diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml new file mode 100644 index 0000000000..0535ccd7dc --- /dev/null +++ b/.github/workflows/CI.yml @@ -0,0 +1,33 @@ +name: CI + +on: [push, pull_request] + +jobs: + java-8: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up JDK 1.8 + uses: actions/setup-java@v1 + with: + java-version: 1.8 + - name: Test + run: | + cd h2 + echo $JAVA_OPTS + export JAVA_OPTS=-Xmx512m + ./build.sh jar testCI + java-11: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up JDK 11 + uses: actions/setup-java@v1 + with: + java-version: 11 + - name: Test + run: | + cd h2 + echo $JAVA_OPTS + export JAVA_OPTS=-Xmx512m + ./build.sh jar testCI diff --git a/.lift.toml b/.lift.toml new file mode 100644 index 0000000000..3c7beccf52 --- /dev/null +++ b/.lift.toml @@ -0,0 +1,8 @@ +# Config file for SonaType Lift analysis tool +# +# config reference here: https://help.sonatype.com/lift/configuration-reference +# + +# Tell sonatype where our pom file lives, so it can build it again +# +build = "maven -f h2/pom.xml compile" \ No newline at end of file diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 0163cc4f8c..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,32 +0,0 @@ -language: java - -script: ./build.sh jar testTravis - -cache: - directories: - - $HOME/.m2/repository - -matrix: - include: - - jdk: openjdk11 - dist: trusty - group: edge - sudo: required - before_script: - - "cd h2" - - "echo $JAVA_OPTS" - - "export JAVA_OPTS=-Xmx512m" - - jdk: oraclejdk8 - dist: trusty - group: edge - sudo: required - before_script: - - "cd h2" - - "echo $JAVA_OPTS" - - "export JAVA_OPTS=-Xmx512m" - - jdk: openjdk7 - dist: trusty - group: edge - sudo: required - before_script: - - "cd h2" diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000000..eed8e4b1a1 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,552 @@ +H2 is dual licensed and available under the MPL 2.0 (Mozilla Public License +Version 2.0) or under the EPL 1.0 (Eclipse Public License). + +------------------------------------------------------------------------------- + +Mozilla Public License, version 2.0 + +1. Definitions + + 1.1. “Contributor” + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + + 1.2. “Contributor Version” + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + + 1.3. “Contribution” + means Covered Software of a particular Contributor. + + 1.4. “Covered Software” + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, + and Modifications of such Source Code Form, in each case + including portions thereof. + + 1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms + of a Secondary License. + + 1.6. “Executable Form” + means any form of the work other than Source Code Form. + + 1.7. “Larger Work” + means a work that combines Covered Software with other material, + in a separate file or files, that is not Covered Software. + + 1.8. “License” + means this document. + + 1.9. “Licensable” + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, + any and all of the rights conveyed by this License. + + 1.10. “Modifications” + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + + 1.11. “Patent Claims” of a Contributor + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + + 1.12. “Secondary License” + means either the GNU General Public License, Version 2.0, the + GNU Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those licenses. + + 1.13. “Source Code Form” + means the form of the work preferred for making modifications. + + 1.14. “You” (or “Your”) + means an individual or a legal entity exercising rights under this License. + For legal entities, “You” includes any entity that controls, + is controlled by, or is under common control with You. For purposes of + this definition, “control” means (a) the power, direct or indirect, + to cause the direction or management of such entity, whether by contract + or otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + +2. License Grants and Conditions + + 2.1. Grants + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, + or as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, + offer for sale, have made, import, and otherwise transfer either + its Contributions or its Contributor Version. + + 2.2. Effective Date + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor + first distributes such Contribution. + + 2.3. Limitations on Grant Scope + The licenses granted in this Section 2 are the only rights granted + under this License. No additional rights or licenses will be implied + from the distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted + by a Contributor: + + a. for any code that a Contributor has removed from + Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its + Contributor Version); or + + c. under Patent Claims infringed by Covered Software in the + absence of its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + + 2.4. Subsequent Licenses + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License + (if permitted under the terms of Section 3.3). + + 2.5. Representation + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights + to grant the rights to its Contributions conveyed by this License. + + 2.6. Fair Use + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, + or other equivalents. + + 2.7. Conditions + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the + licenses granted in Section 2.1. + +3. Responsibilities + + 3.1. Distribution of Source Form + All distribution of Covered Software in Source Code Form, including + any Modifications that You create or to which You contribute, must be + under the terms of this License. You must inform recipients that the + Source Code Form of the Covered Software is governed by the terms + of this License, and how they can obtain a copy of this License. + You may not attempt to alter or restrict the recipients’ rights + in the Source Code Form. + + 3.2. Distribution of Executable Form + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more than + the cost of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients’ rights in the Source Code Form under this License. + + 3.3. Distribution of a Larger Work + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of + Covered Software with a work governed by one or more Secondary Licenses, + and the Covered Software is not Incompatible With Secondary Licenses, + this License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the + Covered Software under the terms of either this License or such + Secondary License(s). + + 3.4. Notices + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, + or limitations of liability) contained within the Source Code Form of + the Covered Software, except that You may alter any license notices to + the extent required to remedy known factual inaccuracies. + + 3.5. Application of Additional Terms + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of + Covered Software. However, You may do so only on Your own behalf, + and not on behalf of any Contributor. You must make it absolutely clear + that any such warranty, support, indemnity, or liability obligation is + offered by You alone, and You hereby agree to indemnify every Contributor + for any liability incurred by such Contributor as a result of warranty, + support, indemnity or liability terms You offer. You may include + additional disclaimers of warranty and limitations of liability + specific to any jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + +If it is impossible for You to comply with any of the terms of this License +with respect to some or all of the Covered Software due to statute, +judicial order, or regulation then You must: (a) comply with the terms of +this License to the maximum extent possible; and (b) describe the limitations +and the code they affect. Such description must be placed in a text file +included with all distributions of the Covered Software under this License. +Except to the extent prohibited by statute or regulation, such description +must be sufficiently detailed for a recipient of ordinary skill +to be able to understand it. + +5. Termination + + 5.1. The rights granted under this License will terminate automatically + if You fail to comply with any of its terms. However, if You become + compliant, then the rights granted under this License from a particular + Contributor are reinstated (a) provisionally, unless and until such + Contributor explicitly and finally terminates Your grants, and (b) on an + ongoing basis, if such Contributor fails to notify You of the + non-compliance by some reasonable means prior to 60 days after You have + come back into compliance. Moreover, Your grants from a particular + Contributor are reinstated on an ongoing basis if such Contributor + notifies You of the non-compliance by some reasonable means, + this is the first time You have received notice of non-compliance with + this License from such Contributor, and You become compliant prior to + 30 days after Your receipt of the notice. + + 5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted + to You by any and all Contributors for the Covered Software under + Section 2.1 of this License shall terminate. + + 5.3. In the event of termination under Sections 5.1 or 5.2 above, all + end user license agreements (excluding distributors and resellers) which + have been validly granted by You or Your distributors under this License + prior to termination shall survive termination. + +6. Disclaimer of Warranty + +Covered Software is provided under this License on an “as is” basis, without +warranty of any kind, either expressed, implied, or statutory, including, +without limitation, warranties that the Covered Software is free of defects, +merchantable, fit for a particular purpose or non-infringing. The entire risk +as to the quality and performance of the Covered Software is with You. +Should any Covered Software prove defective in any respect, You +(not any Contributor) assume the cost of any necessary servicing, repair, +or correction. This disclaimer of warranty constitutes an essential part of +this License. No use of any Covered Software is authorized under this +License except under this disclaimer. + +7. Limitation of Liability + +Under no circumstances and under no legal theory, whether tort +(including negligence), contract, or otherwise, shall any Contributor, or +anyone who distributes Covered Software as permitted above, be liable to +You for any direct, indirect, special, incidental, or consequential damages +of any character including, without limitation, damages for lost profits, +loss of goodwill, work stoppage, computer failure or malfunction, or any and +all other commercial damages or losses, even if such party shall have been +informed of the possibility of such damages. This limitation of liability +shall not apply to liability for death or personal injury resulting from +such party’s negligence to the extent applicable law prohibits such +limitation. Some jurisdictions do not allow the exclusion or limitation of +incidental or consequential damages, so this exclusion and limitation may +not apply to You. + +8. Litigation + +Any litigation relating to this License may be brought only in the courts of +a jurisdiction where the defendant maintains its principal place of business +and such litigation shall be governed by laws of that jurisdiction, without +reference to its conflict-of-law provisions. Nothing in this Section shall +prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + +This License represents the complete agreement concerning the subject matter +hereof. If any provision of this License is held to be unenforceable, +such provision shall be reformed only to the extent necessary to make it +enforceable. Any law or regulation which provides that the language of a +contract shall be construed against the drafter shall not be used to construe +this License against a Contributor. + +10. Versions of the License + + 10.1. New Versions + Mozilla Foundation is the license steward. Except as provided in + Section 10.3, no one other than the license steward has the right to + modify or publish new versions of this License. Each version will be + given a distinguishing version number. + + 10.2. Effect of New Versions + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published + by the license steward. + + 10.3. Modified Versions + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + + 10.4. Distributing Source Code Form that is + Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this + License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the terms of the + Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed + with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to +look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible With Secondary Licenses”, + as defined by the Mozilla Public License, v. 2.0. + +------------------------------------------------------------------------------- + +Eclipse Public License, Version 1.0 (EPL-1.0) + +THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC +LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM +CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + + a) in the case of the initial Contributor, the initial code and + documentation distributed under this Agreement, and + + b) in the case of each subsequent Contributor: + i) changes to the Program, and + ii) additions to the Program; + +where such changes and/or additions to the Program originate from and are +distributed by that particular Contributor. A Contribution 'originates' +from a Contributor if it was added to the Program by such Contributor itself +or anyone acting on such Contributor's behalf. Contributions do not include +additions to the Program which: (i) are separate modules of software +distributed in conjunction with the Program under their own license agreement, +and (ii) are not derivative works of the Program. + +"Contributor" means any person or entity that distributes the Program. + +"Licensed Patents " mean patent claims licensable by a Contributor which are +necessarily infringed by the use or sale of its Contribution alone or +when combined with the Program. + +"Program" means the Contributions distributed in accordance with +this Agreement. + +"Recipient" means anyone who receives the Program under this Agreement, +including all Contributors. + +2. GRANT OF RIGHTS + + a) Subject to the terms of this Agreement, each Contributor hereby grants + Recipient a non-exclusive, worldwide, royalty-free copyright license to + reproduce, prepare derivative works of, publicly display, publicly + perform, distribute and sublicense the Contribution of such + Contributor, if any, and such derivative works, + in source code and object code form. + + b) Subject to the terms of this Agreement, each Contributor hereby grants + Recipient a non-exclusive, worldwide, royalty-free patent license under + Licensed Patents to make, use, sell, offer to sell, import and + otherwise transfer the Contribution of such Contributor, if any, + in source code and object code form. This patent license shall apply + to the combination of the Contribution and the Program if, at the time + the Contribution is added by the Contributor, such addition of the + Contribution causes such combination to be covered by the + Licensed Patents. The patent license shall not apply to any other + combinations which include the Contribution. + No hardware per se is licensed hereunder. + + c) Recipient understands that although each Contributor grants the + licenses to its Contributions set forth herein, no assurances are + provided by any Contributor that the Program does not infringe the + patent or other intellectual property rights of any other entity. + Each Contributor disclaims any liability to Recipient for claims + brought by any other entity based on infringement of intellectual + property rights or otherwise. As a condition to exercising the + rights and licenses granted hereunder, each Recipient hereby assumes + sole responsibility to secure any other intellectual property rights + needed, if any. For example, if a third party patent license is + required to allow Recipient to distribute the Program, it is + Recipient's responsibility to acquire that license + before distributing the Program. + + d) Each Contributor represents that to its knowledge it has sufficient + copyright rights in its Contribution, if any, to grant the copyright + license set forth in this Agreement. + +3. REQUIREMENTS + +A Contributor may choose to distribute the Program in object code form under +its own license agreement, provided that: + + a) it complies with the terms and conditions of this Agreement; and + + b) its license agreement: + + i) effectively disclaims on behalf of all Contributors all warranties + and conditions, express and implied, including warranties or + conditions of title and non-infringement, and implied warranties or + conditions of merchantability and fitness for a particular purpose; + + ii) effectively excludes on behalf of all Contributors all liability + for damages, including direct, indirect, special, incidental and + consequential damages, such as lost profits; + + iii) states that any provisions which differ from this Agreement are + offered by that Contributor alone and not by any other party; and + + iv) states that source code for the Program is available from such + Contributor, and informs licensees how to obtain it in a reasonable + manner on or through a medium customarily used for software exchange. + +When the Program is made available in source code form: + + a) it must be made available under this Agreement; and + b) a copy of this Agreement must be included with each copy of the Program. + +Contributors may not remove or alter any copyright notices contained +within the Program. + +Each Contributor must identify itself as the originator of its Contribution, +if any, in a manner that reasonably allows subsequent Recipients to +identify the originator of the Contribution. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities with +respect to end users, business partners and the like. While this license is +intended to facilitate the commercial use of the Program, the Contributor who +includes the Program in a commercial product offering should do so in a manner +which does not create potential liability for other Contributors. Therefore, +if a Contributor includes the Program in a commercial product offering, +such Contributor ("Commercial Contributor") hereby agrees to defend and +indemnify every other Contributor ("Indemnified Contributor") against any +losses, damages and costs (collectively "Losses") arising from claims, +lawsuits and other legal actions brought by a third party against the +Indemnified Contributor to the extent caused by the acts or omissions of +such Commercial Contributor in connection with its distribution of the Program +in a commercial product offering. The obligations in this section do not apply +to any claims or Losses relating to any actual or alleged intellectual +property infringement. In order to qualify, an Indemnified Contributor must: +a) promptly notify the Commercial Contributor in writing of such claim, +and b) allow the Commercial Contributor to control, and cooperate with the +Commercial Contributor in, the defense and any related settlement +negotiations. The Indemnified Contributor may participate in any such +claim at its own expense. + +For example, a Contributor might include the Program in a commercial product +offering, Product X. That Contributor is then a Commercial Contributor. +If that Commercial Contributor then makes performance claims, or offers +warranties related to Product X, those performance claims and warranties +are such Commercial Contributor's responsibility alone. Under this section, +the Commercial Contributor would have to defend claims against the other +Contributors related to those performance claims and warranties, and if a +court requires any other Contributor to pay any damages as a result, +the Commercial Contributor must pay those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR +IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, +NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. +Each Recipient is solely responsible for determining the appropriateness of +using and distributing the Program and assumes all risks associated with its +exercise of rights under this Agreement , including but not limited to the +risks and costs of program errors, compliance with applicable laws, damage to +or loss of data, programs or equipment, and unavailability +or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY +CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION +LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE +EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under +applicable law, it shall not affect the validity or enforceability of the +remainder of the terms of this Agreement, and without further action by +the parties hereto, such provision shall be reformed to the minimum extent +necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Program itself +(excluding combinations of the Program with other software or hardware) +infringes such Recipient's patent(s), then such Recipient's rights granted +under Section 2(b) shall terminate as of the date such litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it fails to +comply with any of the material terms or conditions of this Agreement and +does not cure such failure in a reasonable period of time after becoming +aware of such noncompliance. If all Recipient's rights under this +Agreement terminate, Recipient agrees to cease use and distribution of the +Program as soon as reasonably practicable. However, Recipient's obligations +under this Agreement and any licenses granted by Recipient relating to the +Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, +but in order to avoid inconsistency the Agreement is copyrighted and may +only be modified in the following manner. The Agreement Steward reserves +the right to publish new versions (including revisions) of this Agreement +from time to time. No one other than the Agreement Steward has the right to +modify this Agreement. The Eclipse Foundation is the initial +Agreement Steward. The Eclipse Foundation may assign the responsibility to +serve as the Agreement Steward to a suitable separate entity. Each new version +of the Agreement will be given a distinguishing version number. The Program +(including Contributions) may always be distributed subject to the version +of the Agreement under which it was received. In addition, after a new version +of the Agreement is published, Contributor may elect to distribute the Program +(including its Contributions) under the new version. Except as expressly +stated in Sections 2(a) and 2(b) above, Recipient receives no rights or +licenses to the intellectual property of any Contributor under this Agreement, +whether expressly, by implication, estoppel or otherwise. All rights in the +Program not expressly granted under this Agreement are reserved. + +This Agreement is governed by the laws of the State of New York and the +intellectual property laws of the United States of America. No party to +this Agreement will bring a legal action under this Agreement more than one +year after the cause of action arose. Each party waives its rights to a +jury trial in any resulting litigation. diff --git a/README.md b/README.md index d812afff0b..70de378686 100644 --- a/README.md +++ b/README.md @@ -1,23 +1,40 @@ -# Welcome to H2, the Java SQL database. [![Build Status](https://travis-ci.org/h2database/h2database.svg?branch=master)](https://travis-ci.org/h2database/h2database) +[![CI](h2/src/docsrc/images/h2-logo-2.png)](https://github.com/h2database/h2database/actions?query=workflow%3ACI) +# Welcome to H2, the Java SQL database. ## The main features of H2 are: -1. Very fast, open source, JDBC API -2. Embedded and server modes; in-memory databases -3. Browser based Console application -4. Small footprint: around 2 MB jar file size - -More information: http://h2database.com - -## Features - -| | [H2](http://www.h2database.com/) | [Derby](http://db.apache.org/derby) | [HSQLDB](http://hsqldb.org) | [MySQL](https://www.mysql.com/) | [PostgreSQL](https://www.postgresql.org) | -|--------------------------------|---------|---------|---------|-------|---------| -| Pure Java | Yes | Yes | Yes | No | No | -| Memory Mode | Yes | Yes | Yes | No | No | -| Encrypted Database | Yes | Yes | Yes | No | No | -| ODBC Driver | Yes | No | No | Yes | Yes | -| Fulltext Search | Yes | No | No | Yes | Yes | -| Multi Version Concurrency | Yes | No | Yes | Yes | Yes | -| Footprint (embedded database) | ~2 MB | ~3 MB | ~1.5 MB | — | — | -| Footprint (JDBC client driver) | ~500 KB | ~600 KB | ~1.5 MB | ~1 MB | ~700 KB | +* Very fast, open source, JDBC API +* Embedded and server modes; disk-based or in-memory databases +* Transaction support, multi-version concurrency +* Browser based Console application +* Encrypted databases +* Fulltext search +* Pure Java with small footprint: around 2.5 MB jar file size +* ODBC driver + +More information: https://h2database.com + +## Downloads + +[Download latest version](https://h2database.com/html/download.html) or add to `pom.xml`: + +```XML + + com.h2database + h2 + 2.1.210 + +``` + +## Documentation + +* [Tutorial](https://h2database.com/html/tutorial.html) +* [SQL commands](https://h2database.com/html/commands.html) +* [Functions](https://h2database.com/html/functions.html), [aggregate functions](https://h2database.com/html/functions-aggregate.html), [window functions](https://h2database.com/html/functions-window.html) +* [Data types](https://h2database.com/html/datatypes.html) + +## Support + +* [Issue tracker](https://github.com/h2database/h2database/issues) for bug reports and feature requests +* [Mailing list / forum](https://groups.google.com/g/h2-database) for questions about H2 +* ['h2' tag on Stack Overflow](https://stackoverflow.com/questions/tagged/h2) for other questions (Hibernate with H2 etc.) diff --git a/h2/.gitignore b/h2/.gitignore index 05251400f0..b90461133b 100644 --- a/h2/.gitignore +++ b/h2/.gitignore @@ -14,5 +14,4 @@ test.out.txt .idea/ *.log target/ -src/main/org/h2/res/help.csv _tmp* diff --git a/h2/MAVEN.md b/h2/MAVEN.md index 40c291a2eb..427fa8a622 100644 --- a/h2/MAVEN.md +++ b/h2/MAVEN.md @@ -5,7 +5,7 @@ Welcome to H2, the Java SQL database. The main features of H2 are: * Very fast, open source, JDBC API * Embedded and server modes; in-memory databases * Browser based Console application -* Small footprint: around 2 MB jar file size +* Small footprint: around 2.5 MB jar file size ## Experimental Building & Testing with Maven diff --git a/h2/build.sh b/h2/build.sh index 7196287ca4..558a7945ab 100755 --- a/h2/build.sh +++ b/h2/build.sh @@ -1,11 +1,16 @@ #!/bin/sh if [ -z "$JAVA_HOME" ] ; then - if [ -d "/System/Library/Frameworks/JavaVM.framework/Home" ] ; then - export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Home - else - echo "Error: JAVA_HOME is not defined." + if [[ "$OSTYPE" == "darwin"* ]]; then + if [ -d "/System/Library/Frameworks/JavaVM.framework/Home" ] ; then + export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Home + else + export JAVA_HOME=`/usr/libexec/java_home` + fi fi fi +if [ -z "$JAVA_HOME" ] ; then + echo "Error: JAVA_HOME is not defined." +fi if [ "$1" = "clean" ] ; then rm -rf temp bin ; fi if [ ! -d "temp" ] ; then mkdir temp ; fi if [ ! -d "bin" ] ; then mkdir bin ; fi diff --git a/h2/pom.xml b/h2/pom.xml index e2113c5b8d..a0c0085569 100644 --- a/h2/pom.xml +++ b/h2/pom.xml @@ -4,10 +4,10 @@ com.h2database h2 - 1.4.200-SNAPSHOT + 2.1.210 jar H2 Database Engine - http://www.h2database.com + https://h2database.com H2 Database Engine @@ -37,10 +37,17 @@ - 1.7 - 1.7 - 4.2.0 - 1.6.0 + 1.8 + 1.8 + 8.0.1 + 1.17.0 + 5.6.2 + 8.5.2 + 5.0.0 + 42.2.14 + 4.0.1 + 5.0.0 + 1.7.30 UTF-8 @@ -50,22 +57,27 @@ javax.servlet javax.servlet-api - 3.1.0 + ${javax.servlet.version} + + + jakarta.servlet + jakarta.servlet-api + ${jakarta.servlet.version} org.apache.lucene lucene-core - 5.5.5 + ${lucene.version} org.apache.lucene lucene-analyzers-common - 5.5.5 + ${lucene.version} org.apache.lucene lucene-queryparser - 5.5.5 + ${lucene.version} org.slf4j @@ -85,7 +97,7 @@ org.locationtech.jts jts-core - 1.15.0 + ${jts.version} @@ -93,26 +105,26 @@ org.slf4j - slf4j-simple + slf4j-nop ${slf4j.version} test org.postgresql postgresql - 42.2.5.jre7 + ${pgjdbc.version} test - junit - junit - 4.12 + org.junit.jupiter + junit-jupiter-engine + ${junit.version} test org.ow2.asm asm - 7.0 + ${asm.version} test @@ -140,7 +152,7 @@ com.sun tools system - 1.7 + 1.8 ${java.home}/../lib/tools.jar @@ -157,7 +169,7 @@ com.sun tools system - 1.7 + 1.8 ${java.home}/../Classes/classes.jar @@ -181,7 +193,6 @@ **/*.js org/h2/res/help.csv org/h2/res/javadoc.properties - org/h2/server/pg/pg_catalog.sql META-INF/** @@ -189,11 +200,17 @@ src/java9/precompiled META-INF/versions/9 + + src/java10/precompiled + META-INF/versions/10 + src/test + org/h2/test/bench/test.properties + org/h2/test/script/testScrip.sql org/h2/test/scripts/**/*.sql org/h2/samples/newsfeed.sql org/h2/samples/optimizations.sql @@ -204,7 +221,7 @@ org.apache.maven.plugins maven-jar-plugin - 3.1.0 + 3.1.2 @@ -238,53 +255,10 @@ - - org.apache.maven.plugins maven-surefire-plugin - 2.22.0 + 2.22.2 TestAllJunit.java @@ -295,5 +269,4 @@ - diff --git a/h2/src/docsrc/help/information_schema.csv b/h2/src/docsrc/help/information_schema.csv new file mode 100644 index 0000000000..8008bb1e46 --- /dev/null +++ b/h2/src/docsrc/help/information_schema.csv @@ -0,0 +1,1022 @@ +# Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +# and the EPL 1.0 (https://h2database.com/html/license.html). +# Initial Developer: H2 Group + +"TABLE_NAME","COLUMN_NAME","DESCRIPTION" + +# Tables and views + +"CHECK_CONSTRAINTS",," +Contains CHECK clauses of check and domain constraints. +" + +"COLLATIONS",," +Contains available collations. +" + +"COLUMNS",," +Contains information about columns of tables. +" + +"COLUMN_PRIVILEGES",," +Contains information about privileges of columns. +H2 doesn't have per-column privileges, so this view actually contains privileges of their tables. +" + +"CONSTANTS",," +Contains information about constants. +" + +"CONSTRAINT_COLUMN_USAGE",," +Contains information about columns used in constraints. +" + +"DOMAINS",," +Contains information about domains. +" + +"DOMAIN_CONSTRAINTS",," +Contains basic information about domain constraints. +See also INFORMATION_SCHEMA.CHECK_CONSTRAINTS. +" + +"ELEMENT_TYPES",," +Contains information about types of array elements. +" + +"ENUM_VALUES",," +Contains information about enum values. +" + +"FIELDS",," +Contains information about fields of row values. +" + +"INDEXES",," +Contains information about indexes. +" + +"INDEX_COLUMNS",," +Contains information about columns used in indexes. +" + +"INFORMATION_SCHEMA_CATALOG_NAME",," +Contains a single row with the name of catalog (database name). +" + +"IN_DOUBT",," +Contains information about prepared transactions. +" + +"KEY_COLUMN_USAGE",," +Contains information about columns used by primary key, unique, or referential constraint. +" + +"LOCKS",," +Contains information about tables locked by sessions. +" + +"PARAMETERS",," +Contains information about parameters of routines. +" + +"QUERY_STATISTICS",," +Contains statistics of queries when query statistics gathering is enabled. +" + +"REFERENTIAL_CONSTRAINTS",," +Contains additional information about referential constraints. +" + +"RIGHTS",," +Contains information about granted rights and roles. +" + +"ROLES",," +Contains information about roles. +" + +"ROUTINES",," +Contains information about user-defined routines, including aggregate functions. +" + +"SCHEMATA",," +Contains information about schemas. +" + +"SEQUENCES",," +Contains information about sequences. +" + +"SESSIONS",," +Contains information about sessions. +Only users with ADMIN privileges can see all sessions, other users can see only own session. +" + +"SESSION_STATE",," +Contains the state of the current session. +" + +"SETTINGS",," +Contains values of various settings. +" + +"SYNONYMS",," +Contains information about table synonyms. +" + +"TABLES",," +Contains information about tables. +See also INFORMATION_SCHEMA.COLUMNS. +" + +"TABLE_CONSTRAINTS",," +Contains basic information about table constraints (check, primary key, unique, and referential). +" + +"TABLE_PRIVILEGES",," +Contains information about privileges of tables. +See INFORMATION_SCHEMA.CHECK_CONSTRAINTS, INFORMATION_SCHEMA.KEY_COLUMN_USAGE, +and INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS for additional information. +" + +"TRIGGERS",," +Contains information about triggers. +" + +"USERS",," +Contains information about users. +Only users with ADMIN privileges can see all users, other users can see only themselves. +" + +"VIEWS",," +Contains additional information about views. +See INFORMATION_SCHEMA.TABLES for basic information. +" + +# Common columns with data type information + +,"DATA_TYPE"," +The SQL data type name. +" + +,"CHARACTER_MAXIMUM_LENGTH"," +The maximum length in characters for character string data types. +For binary string data types contains the same value as CHARACTER_OCTET_LENGTH. +" + +,"CHARACTER_OCTET_LENGTH"," +The maximum length in bytes for binary string data types. +For character string data types contains the same value as CHARACTER_MAXIMUM_LENGTH. +" + +,"NUMERIC_PRECISION"," +The precision for numeric data types. +" + +,"NUMERIC_PRECISION_RADIX"," +The radix of precision (2 or 10) for numeric data types. +" + +,"NUMERIC_SCALE"," +The scale for numeric data types. +" + +,"DATETIME_PRECISION"," +The fractional seconds precision for datetime data types. +" + +,"INTERVAL_TYPE"," +The data type of interval qualifier for interval data types. +" + +,"INTERVAL_PRECISION"," +The leading field precision for interval data types. +" + +,"CHARACTER_SET_CATALOG"," +The catalog (database name) for character string data types. +" + +,"CHARACTER_SET_SCHEMA"," +The name of public schema for character string data types. +" + +,"CHARACTER_SET_NAME"," +The 'Unicode' for character string data types. +" + +,"COLLATION_CATALOG"," +The catalog (database name) for character string data types. +" + +,"COLLATION_SCHEMA"," +The name of public schema for character string data types. +" + +,"COLLATION_NAME"," +The name of collation for character string data types. +" + +,"MAXIMUM_CARDINALITY"," +The maximum cardinality for array data types. +" + +,"DTD_IDENTIFIER"," +The data type identifier to read additional information from INFORMATION_SCHEMA.ELEMENT_TYPES for array data types, +INFORMATION_SCHEMA.ENUM_VALUES for ENUM data type, and INFORMATION_SCHEMA.FIELDS for row value data types. +" + +,"DECLARED_DATA_TYPE"," +The declared SQL data type name for numeric data types. +" + +,"DECLARED_NUMERIC_PRECISION"," +The declared precision, if any, for numeric data types. +" + +,"DECLARED_NUMERIC_SCALE"," +The declared scale, if any, for numeric data types. +" + +,"GEOMETRY_TYPE"," +The geometry type constraint, if any, for geometry data types. +" + +,"GEOMETRY_SRID"," +The geometry SRID (Spatial Reference Identifier) constraint, if any, for geometry data types. +" + +# Other common fields + +,"CONSTRAINT_CATALOG"," +The catalog (database name). +" + +,"CONSTRAINT_SCHEMA"," +The schema of the constraint. +" + +,"CONSTRAINT_NAME"," +The name of the constraint. +" + +,"DOMAIN_CATALOG"," +The catalog (database name). +" + +,"DOMAIN_SCHEMA"," +The schema of domain. +" + +,"DOMAIN_NAME"," +The name of domain. +" + +,"INDEX_CATALOG"," +The catalog (database name). +" + +,"INDEX_SCHEMA"," +The schema of the index. +" + +,"INDEX_NAME"," +The name of the index. +" + +,"OBJECT_CATALOG"," +The catalog (database name). +" + +,"OBJECT_SCHEMA"," +The schema of the object. +" + +,"OBJECT_NAME"," +The name of the object. +" + +,"OBJECT_TYPE"," +The TYPE of the object ('CONSTANT', 'DOMAIN', 'TABLE', or 'ROUTINE'). +" + +,"SPECIFIC_CATALOG"," +The catalog (database name). +" + +,"SPECIFIC_SCHEMA"," +The schema of the overloaded version of routine. +" + +,"SPECIFIC_NAME"," +The name of the overloaded version of routine. +" + +,"TABLE_CATALOG"," +The catalog (database name). +" + +,"TABLE_SCHEMA"," +The schema of the table. +" + +,"TABLE_NAME"," +The name of the table. +" + +,"COLUMN_NAME"," +The name of the column. +" + +,"ORDINAL_POSITION"," +The ordinal position (1-based). +" + +,"GRANTOR"," +NULL. +" + +,"GRANTEE"," +The name of grantee. +" + +,"PRIVILEGE_TYPE"," +'SELECT', 'INSERT', 'UPDATE', or 'DELETE'. +" + +,"IS_GRANTABLE"," +Whether grantee may grant rights to this object to others ('YES' or 'NO'). +" + +,"REMARKS"," +Optional remarks. +" + +,"SESSION_ID"," +The identifier of the session. +" + +# Individual fields + +"CHECK_CONSTRAINTS","CHECK_CLAUSE"," +The SQL of CHECK clause. +" + +"COLLATIONS","PAD_ATTRIBUTE"," +'NO PAD'. +" + +"COLLATIONS","LANGUAGE_TAG"," +The language tag. +" + +"COLUMNS","COLUMN_DEFAULT"," +The SQL of DEFAULT expression, if any. +" + +"COLUMNS","IS_NULLABLE"," +Whether column may contain NULL value ('YES' or 'NO'). +" + +"COLUMNS","DOMAIN_CATALOG"," +The catalog for columns with domain. +" + +"COLUMNS","DOMAIN_SCHEMA"," +The schema of domain for columns with domain. +" + +"COLUMNS","DOMAIN_NAME"," +The name of domain for columns with domain. +" + +"COLUMNS","IS_IDENTITY"," +Whether column is an identity column ('YES' or 'NO'). +" + +"COLUMNS","IDENTITY_GENERATION"," +Identity generation ('ALWAYS' or 'BY DEFAULT') for identity columns. +" + +"COLUMNS","IDENTITY_START"," +The initial start value for identity columns. +" + +"COLUMNS","IDENTITY_INCREMENT"," +The increment value for identity columns. +" + +"COLUMNS","IDENTITY_MAXIMUM"," +The maximum value for identity columns. +" + +"COLUMNS","IDENTITY_MINIMUM"," +The minimum value for identity columns. +" + +"COLUMNS","IDENTITY_CYCLE"," +Whether identity values are cycled ('YES' or 'NO') for identity columns. +" + +"COLUMNS","IS_GENERATED"," +Whether column is an generated column ('ALWAYS' or 'NEVER') +" + +"COLUMNS","GENERATION_EXPRESSION"," +The SQL of GENERATED ALWAYS AS expression for generated columns. +" + +"COLUMNS","IDENTITY_BASE"," +The current base value for identity columns. +" + +"COLUMNS","IDENTITY_CACHE"," +The cache size for identity columns. +" + +"COLUMNS","COLUMN_ON_UPDATE"," +The SQL of ON UPDATE expression, if any. +" + +"COLUMNS","IS_VISIBLE"," +Whether column is visible (included into SELECT *). +" + +"COLUMNS","DEFAULT_ON_NULL"," +Whether value of DEFAULT expression is used when NULL value is inserted. +" + +"COLUMNS","SELECTIVITY"," +The selectivity of a column (0-100), used to choose the best index. +" + +"CONSTANTS","CONSTANT_CATALOG"," +The catalog (database name). +" + +"CONSTANTS","CONSTANT_SCHEMA"," +The schema of the constant. +" + +"CONSTANTS","CONSTANT_NAME"," +The name of the constant. +" + +"CONSTANTS","VALUE_DEFINITION"," +The SQL of value. +" + +"DOMAINS","DOMAIN_DEFAULT"," +The SQL of DEFAULT expression, if any. +" + +"DOMAINS","DOMAIN_ON_UPDATE"," +The SQL of ON UPDATE expression, if any. +" + +"DOMAINS","PARENT_DOMAIN_CATALOG"," +The catalog (database name) for domains with parent domain. +" + +"DOMAINS","PARENT_DOMAIN_SCHEMA"," +The schema of parent domain for domains with parent domain. +" + +"DOMAINS","PARENT_DOMAIN_NAME"," +The name of parent domain for domains with parent domain. +" + +"DOMAIN_CONSTRAINTS","IS_DEFERRABLE"," +'NO'. +" + +"DOMAIN_CONSTRAINTS","INITIALLY_DEFERRED"," +'NO'. +" + +"ELEMENT_TYPES","COLLECTION_TYPE_IDENTIFIER"," +The DTD_IDENTIFIER value of the object. +" + +"ENUM_VALUES","ENUM_IDENTIFIER"," +The DTD_IDENTIFIER value of the object. +" + +"ENUM_VALUES","VALUE_NAME"," +The name of enum value. +" + +"ENUM_VALUES","VALUE_ORDINAL"," +The ordinal of enum value. +" + +"FIELDS","ROW_IDENTIFIER"," +The DTD_IDENTIFIER value of the object. +" + +"FIELDS","FIELD_NAME"," +The name of the field of the row value. +" + +"INDEXES","INDEX_TYPE_NAME"," +The type of the index ('PRIMARY KEY', 'UNIQUE INDEX', 'SPATIAL INDEX', etc.) +" + +"INDEXES","IS_GENERATED"," +Whether index is generated by a constraint and belongs to it. +" + +"INDEXES","INDEX_CLASS"," +The Java class name of index implementation. +" + +"INDEX_COLUMNS","ORDERING_SPECIFICATION"," +'ASC' or 'DESC'. +" + +"INDEX_COLUMNS","NULL_ORDERING"," +'FIRST', 'LAST', or NULL. +" + +"INDEX_COLUMNS","IS_UNIQUE"," +Whether this column is a part of unique column list of a unique index (TRUE or FALSE). +" + +"INFORMATION_SCHEMA_CATALOG_NAME","CATALOG_NAME"," +The catalog (database name). +" + +"IN_DOUBT","TRANSACTION_NAME"," +The name of prepared transaction. +" + +"IN_DOUBT","TRANSACTION_STATE"," +The state of prepared transaction ('IN_DOUBT', 'COMMIT', or 'ROLLBACK'). +" + +"KEY_COLUMN_USAGE","POSITION_IN_UNIQUE_CONSTRAINT"," +The ordinal position in the referenced unique constraint (1-based). +" + +"LOCKS","LOCK_TYPE"," +'READ' or 'WRITE'. +" + +"PARAMETERS","PARAMETER_MODE"," +'IN'. +" + +"PARAMETERS","IS_RESULT"," +'NO'. +" + +"PARAMETERS","AS_LOCATOR"," +'YES' for LOBs, 'NO' for others. +" + +"PARAMETERS","PARAMETER_NAME"," +The name of the parameter. +" + +"PARAMETERS","PARAMETER_DEFAULT"," +NULL. +" + +"QUERY_STATISTICS","SQL_STATEMENT"," +The SQL statement. +" + +"QUERY_STATISTICS","EXECUTION_COUNT"," +The execution count. +" + +"QUERY_STATISTICS","MIN_EXECUTION_TIME"," +The minimum execution time in milliseconds. +" + +"QUERY_STATISTICS","MAX_EXECUTION_TIME"," +The maximum execution time in milliseconds. +" + +"QUERY_STATISTICS","CUMULATIVE_EXECUTION_TIME"," +The total execution time in milliseconds. +" + +"QUERY_STATISTICS","AVERAGE_EXECUTION_TIME"," +The average execution time in milliseconds. +" + +"QUERY_STATISTICS","STD_DEV_EXECUTION_TIME"," +The standard deviation of execution time in milliseconds. +" + +"QUERY_STATISTICS","MIN_ROW_COUNT"," +The minimum number of rows. +" + +"QUERY_STATISTICS","MAX_ROW_COUNT"," +The maximum number of rows. +" + +"QUERY_STATISTICS","CUMULATIVE_ROW_COUNT"," +The total number of rows. +" + +"QUERY_STATISTICS","AVERAGE_ROW_COUNT"," +The average number of rows. +" + +"QUERY_STATISTICS","STD_DEV_ROW_COUNT"," +The standard deviation of number of rows. +" + +"REFERENTIAL_CONSTRAINTS","UNIQUE_CONSTRAINT_CATALOG"," +The catalog (database name). +" + +"REFERENTIAL_CONSTRAINTS","UNIQUE_CONSTRAINT_SCHEMA"," +The schema of referenced unique constraint. +" + +"REFERENTIAL_CONSTRAINTS","UNIQUE_CONSTRAINT_NAME"," +The name of referenced unique constraint. +" + +"REFERENTIAL_CONSTRAINTS","MATCH_OPTION"," +'NONE'. +" + +"REFERENTIAL_CONSTRAINTS","UPDATE_RULE"," +The rule for UPDATE in referenced table ('RESTRICT', 'CASCADE', 'SET DEFAULT', or 'SET NULL'). +" + +"REFERENTIAL_CONSTRAINTS","DELETE_RULE"," +The rule for DELETE in referenced table ('RESTRICT', 'CASCADE', 'SET DEFAULT', or 'SET NULL'). +" + +"RIGHTS","GRANTEETYPE"," +'USER' if grantee is a user, 'ROLE' if grantee is a role. +" + +"RIGHTS","GRANTEDROLE"," +The name of the granted role for role grants. +" + +"RIGHTS","RIGHTS"," +The set of rights ('SELECT', 'DELETE', 'INSERT', 'UPDATE', or 'ALTER ANY SCHEMA' separated with ', ') for table grants. +" + +"ROLES","ROLE_NAME"," +The name of the role. +" + +"ROUTINES","ROUTINE_CATALOG"," +The catalog (database name). +" + +"ROUTINES","ROUTINE_SCHEMA"," +The schema of the routine. +" + +"ROUTINES","ROUTINE_NAME"," +The name of the routine. +" + +"ROUTINES","ROUTINE_TYPE"," +'PROCEDURE', 'FUNCTION', or 'AGGREGATE'. +" + +"ROUTINES","ROUTINE_BODY"," +'EXTERNAL'. +" + +"ROUTINES","ROUTINE_DEFINITION"," +Source code or NULL if not applicable or user doesn't have ADMIN privileges. +" + +"ROUTINES","EXTERNAL_NAME"," +The name of the class or method. +" + +"ROUTINES","EXTERNAL_LANGUAGE"," +'JAVA'. +" + +"ROUTINES","PARAMETER_STYLE"," +'GENERAL'. +" + +"ROUTINES","IS_DETERMINISTIC"," +Whether routine is deterministic ('YES' or 'NO'). +" + +"SCHEMATA","CATALOG_NAME"," +The catalog (database name). +" + +"SCHEMATA","SCHEMA_NAME"," +The schema name. +" + +"SCHEMATA","SCHEMA_OWNER"," +The name of schema owner. +" + +"SCHEMATA","DEFAULT_CHARACTER_SET_CATALOG"," +The catalog (database name). +" + +"SCHEMATA","DEFAULT_CHARACTER_SET_SCHEMA"," +The name of public schema. +" + +"SCHEMATA","DEFAULT_CHARACTER_SET_NAME"," +'Unicode'. +" + +"SCHEMATA","SQL_PATH"," +NULL. +" + +"SCHEMATA","DEFAULT_COLLATION_NAME"," +The name of database collation. +" + +"SEQUENCES","SEQUENCE_CATALOG"," +The catalog (database name). +" + +"SEQUENCES","SEQUENCE_SCHEMA"," +The schema of the sequence. +" + +"SEQUENCES","SEQUENCE_NAME"," +The name of the sequence. +" + +"SEQUENCES","START_VALUE"," +The initial start value. +" + +"SEQUENCES","MINIMUM_VALUE"," +The maximum value. +" + +"SEQUENCES","MAXIMUM_VALUE"," +The minimum value. +" + +"SEQUENCES","INCREMENT"," +The increment value. +" + +"SEQUENCES","CYCLE_OPTION"," +Whether values are cycled ('YES' or 'NO'). +" + +"SEQUENCES","BASE_VALUE"," +The current base value. +" + +"SEQUENCES","CACHE"," +The cache size. +" + +"SESSIONS","USER_NAME"," +The name of the user. +" + +"SESSIONS","SERVER"," +The name of the server used by remote connection. +" + +"SESSIONS","CLIENT_ADDR"," +The client address and port used by remote connection. +" + +"SESSIONS","CLIENT_INFO"," +Additional client information provided by remote connection. +" + +"SESSIONS","SESSION_START"," +When this session was started. +" + +"SESSIONS","ISOLATION_LEVEL"," +The isolation level of the session ('READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ', 'SNAPSHOT', +or 'SERIALIZABLE'). +" + +"SESSIONS","EXECUTING_STATEMENT"," +The currently executing statement, if any. +" + +"SESSIONS","EXECUTING_STATEMENT_START"," +When the current command was started, if any. +" + +"SESSIONS","CONTAINS_UNCOMMITTED"," +Whether the session contains any uncommitted changes. +" + +"SESSIONS","SESSION_STATE"," +The state of the session ('RUNNING', 'SLEEP', etc.) +" + +"SESSIONS","BLOCKER_ID"," +The identifier or blocking session, if any. +" + +"SESSIONS","SLEEP_SINCE"," +When the last command was finished if session is sleeping. +" + +"SESSION_STATE","STATE_KEY"," +The key. +" + +"SESSION_STATE","STATE_COMMAND"," +The SQL command that can be used to restore the state. +" + +"SETTINGS","SETTING_NAME"," +The name of the setting. +" + +"SETTINGS","SETTING_VALUE"," +The value of the setting. +" + +"SYNONYMS","SYNONYM_CATALOG"," +The catalog (database name). +" + +"SYNONYMS","SYNONYM_SCHEMA"," +The schema of the synonym. +" + +"SYNONYMS","SYNONYM_NAME"," +The name of the synonym. +" + +"SYNONYMS","SYNONYM_FOR"," +The name of the referenced table. +" + +"SYNONYMS","SYNONYM_FOR_SCHEMA"," +The name of the referenced schema. +" + +"SYNONYMS","TYPE_NAME"," +'SYNONYM'. +" + +"SYNONYMS","STATUS"," +'VALID'. +" + +"TABLES","TABLE_TYPE"," +'BASE TABLE', 'VIEW', 'GLOBAL TEMPORARY', or 'LOCAL TEMPORARY'. +" + +"TABLES","IS_INSERTABLE_INTO"," +Whether the table is insertable ('YES' or 'NO'). +" + +"TABLES","COMMIT_ACTION"," +'DELETE', 'DROP', or 'PRESERVE' for temporary tables. +" + +"TABLES","STORAGE_TYPE"," +'CACHED' for regular persisted tables, 'MEMORY' for in-memory tables or persisted tables with in-memory indexes, +'GLOBAL TEMPORARY' or 'LOCAL TEMPORARY' for temporary tables, 'EXTERNAL' for tables with external table engines, +or 'TABLE LINK' for linked tables. +" + +"TABLES","LAST_MODIFICATION"," +The sequence number of the last modification, if applicable. +" + +"TABLES","TABLE_CLASS"," +The Java class name of implementation. +" + +"TABLES","ROW_COUNT_ESTIMATE"," +The approximate number of rows if known or some default value if unknown. +For regular tables contains the total number of rows including the uncommitted rows. +" + +"TABLE_CONSTRAINTS","CONSTRAINT_TYPE"," +'CHECK', 'PRIMARY KEY', 'UNIQUE', or 'REFERENTIAL'. +" + +"TABLE_CONSTRAINTS","IS_DEFERRABLE"," +'NO'. +" + +"TABLE_CONSTRAINTS","INITIALLY_DEFERRED"," +'NO'. +" + +"TABLE_CONSTRAINTS","ENFORCED"," +'YES' for non-referential constants. +'YES' for referential constants when checks for referential integrity are enabled for the both referenced and +referencing tables and 'NO' when they are disabled. +" + +"TABLE_PRIVILEGES","WITH_HIERARCHY"," +'NO'. +" + +"TRIGGERS","TRIGGER_CATALOG"," +The catalog (database name). +" + +"TRIGGERS","TRIGGER_SCHEMA"," +The schema of the trigger. +" + +"TRIGGERS","TRIGGER_NAME"," +The name of the trigger. +" + +"TRIGGERS","EVENT_MANIPULATION"," +'INSERT', 'UPDATE', 'DELETE', or 'SELECT'. +" + +"TRIGGERS","EVENT_OBJECT_CATALOG"," +The catalog (database name). +" + +"TRIGGERS","EVENT_OBJECT_SCHEMA"," +The schema of the table. +" + +"TRIGGERS","EVENT_OBJECT_TABLE"," +The name of the table. +" + +"TRIGGERS","ACTION_ORIENTATION"," +'ROW' or 'STATEMENT'. +" + +"TRIGGERS","ACTION_TIMING"," +'BEFORE', 'AFTER', or 'INSTEAD OF'. +" + +"TRIGGERS","IS_ROLLBACK"," +Whether this trigger is executed on rollback. +" + +"TRIGGERS","JAVA_CLASS"," +The Java class name. +" + +"TRIGGERS","QUEUE_SIZE"," +The size of the queue (is not actually used). +" + +"TRIGGERS","NO_WAIT"," +Whether trigger is defined with NO WAIT clause (is not actually used). +" + +"USERS","USER_NAME"," +The name of the user. +" + +"USERS","IS_ADMIN"," +Whether user has ADMIN privileges. +" + +"VIEWS","VIEW_DEFINITION"," +The query SQL, if applicable. +" + +"VIEWS","CHECK_OPTION"," +'NONE'. +" + +"VIEWS","IS_UPDATABLE"," +'NO'. +" + +"VIEWS","INSERTABLE_INTO"," +'NO'. +" + +"VIEWS","IS_TRIGGER_UPDATABLE"," +Whether the view has INSTEAD OF trigger for UPDATE ('YES' or 'NO'). +" + +"VIEWS","IS_TRIGGER_DELETABLE"," +Whether the view has INSTEAD OF trigger for DELETE ('YES' or 'NO'). +" + +"VIEWS","IS_TRIGGER_INSERTABLE_INTO"," +Whether the view has INSTEAD OF trigger for INSERT ('YES' or 'NO'). +" + +"VIEWS","STATUS"," +'VALID' or 'INVALID'. +" diff --git a/h2/src/docsrc/html/advanced.html b/h2/src/docsrc/html/advanced.html index c1d050129c..68e865b1ff 100644 --- a/h2/src/docsrc/html/advanced.html +++ b/h2/src/docsrc/html/advanced.html @@ -1,7 +1,7 @@ @@ -49,8 +49,6 @@

Advanced

Run as Windows Service
ODBC Driver
- - Using H2 in Microsoft .NET
ACID
@@ -83,12 +81,8 @@

Advanced

Pluggable File System

Split File System
- - Database Upgrade
Java Objects Serialization
- - Custom Data Types Handler API
Limits and Limitations
@@ -98,7 +92,10 @@

Result Sets

Statements that Return a Result Set

-The following statements return a result set: SELECT, EXPLAIN, CALL, SCRIPT, SHOW, HELP. +The following statements return a result set: SELECT, TABLE, VALUES, +EXPLAIN, CALL, SCRIPT, SHOW, HELP. +EXECUTE may return either a result set or an update count. +Result of a WITH statement depends on inner command. All other statements return an update count.

@@ -149,18 +146,6 @@

When to use CLOB/BLOB

that don't involve this column.

-

Large Object Compression

-

-The following feature is only available for the PageStore storage engine. -For the MVStore engine (the default for H2 version 1.4.x), -append ;COMPRESS=TRUE to the database URL instead. -CLOB and BLOB values can be compressed by using -SET COMPRESS_LOB. -The LZF algorithm is faster but needs more disk space. By default compression is disabled, which usually speeds up write -operations. If you store many large compressible values such as XML, HTML, text, and uncompressed binary files, -then compressing can save a lot of disk space (sometimes more than 50%), and read operations may even be faster. -

-

Linked Tables

This database supports linked tables, which means tables that don't exist in the current database but @@ -229,55 +214,60 @@

Transaction Isolation

Transaction isolation is provided for all data manipulation language (DML) statements.

-Please note that with default MVStore storage engine table level locking is not used. -Instead, rows are locked for update, and read committed is used in all cases -except for explicitly selected read uncommitted transaction isolation level. -

-

-This database supports the following transaction isolation levels: +H2 supports read uncommitted, read committed, repeatable read, snapshot, +and serializable (partially, see below) isolation levels:

    -
  • Read Committed
    +
  • Read uncommitted
    + Dirty reads, non-repeatable reads, and phantom reads are possible. + To enable, execute the SQL statement + SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL READ UNCOMMITTED +
  • +
  • Read committed
    This is the default level. - Read locks are released immediately after executing the statement, but write locks are kept until the transaction commits. - Higher concurrency is possible when using this level.
    - To enable, execute the SQL statement SET LOCK_MODE 3
    - or append ;LOCK_MODE=3 to the database URL: jdbc:h2:~/test;LOCK_MODE=3 -
  • -Serializable
    - Both read locks and write locks are kept until the transaction commits. - To enable, execute the SQL statement SET LOCK_MODE 1
    - or append ;LOCK_MODE=1 to the database URL: jdbc:h2:~/test;LOCK_MODE=1 -
  • Read Uncommitted
    - This level means that transaction isolation is disabled. - This level is not supported by PageStore engine if multi-threaded mode is enabled. -
    - To enable, execute the SQL statement SET LOCK_MODE 0
    - or append ;LOCK_MODE=0 to the database URL: jdbc:h2:~/test;LOCK_MODE=0 + Dirty reads aren't possible; non-repeatable reads and phantom reads are possible. + To enable, execute the SQL statement + SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL READ COMMITTED +
  • +
  • Repeatable read
    + Dirty reads and non-repeatable reads aren't possible, phantom reads are possible. + To enable, execute the SQL statement + SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL REPEATABLE READ +
  • +
  • Snapshot
    + Dirty reads, non-repeatable reads, and phantom reads aren't possible. + This isolation level is very expensive in databases with many tables. + To enable, execute the SQL statement + SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL SNAPSHOT +
  • +
  • Serializable
    + Dirty reads, non-repeatable reads, and phantom reads aren't possible. + Note that this isolation level in H2 currently doesn't ensure equivalence of concurrent and serializable execution + of transactions that perform write operations. + This isolation level is very expensive in databases with many tables. + To enable, execute the SQL statement + SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL SERIALIZABLE
-

-When using the isolation level 'serializable', dirty reads, non-repeatable reads, and phantom reads are prohibited. -

    -
  • Dirty Reads
    +
  • Dirty reads
    Means a connection can read uncommitted changes made by another connection.
    - Possible with: read uncommitted -
  • Non-Repeatable Reads
    + Possible with: read uncommitted. +
  • Non-repeatable reads
    A connection reads a row, another connection changes a row and commits, and the first connection re-reads the same row and gets the new result.
    - Possible with: read uncommitted, read committed -
  • Phantom Reads
    + Possible with: read uncommitted, read committed. +
  • Phantom reads
    A connection reads a set of rows using a condition, another connection inserts a row that falls in this condition and commits, then the first connection re-reads using the same condition and gets the new row.
    - Possible with: read uncommitted, read committed + Possible with: read uncommitted, read committed, repeatable read.

Multi-Version Concurrency Control (MVCC)

-With default MVStore engine delete, insert and update operations only issue a shared lock on the table. +Insert and update operations only issue a shared lock on the table. An exclusive lock is still used when adding or removing columns or when dropping the table. Connections only 'see' committed data, and own changes. That means, if connection A updates a row but doesn't commit this change yet, connection B will see the old value. @@ -286,22 +276,6 @@

Multi-Version Concurrency Control (MVCC)

database waits until it can apply the change, but at most until the lock timeout expires.

-

Table Level Locking (PageStore engine)

-

-With PageStore engine to make sure all connections only see consistent data, table level locking is used. -This mechanism does not allow high concurrency, but is very fast. -Shared locks and exclusive locks are supported. -Before reading from a table, the database tries to add a shared lock to the table -(this is only possible if there is no exclusive lock on the object by another connection). -If the shared lock is added successfully, the table can be read. It is allowed that -other connections also have a shared lock on the same object. If a connection wants -to write to a table (update or delete a row), an exclusive lock is required. To get the -exclusive lock, other connection must not have any locks on the object. After the -connection commits, all locks are released. -This database keeps all locks in memory. -When a lock is released, and multiple connections are waiting for it, one of them is picked at random. -

-

Lock Timeout

If a connection cannot get a lock on an object, the connection waits for some amount @@ -387,7 +361,7 @@

Detect Which Cluster Instances are Running

To find out which cluster nodes are currently running, execute the following SQL statement:

-SELECT VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME='CLUSTER'
+SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'CLUSTER'
 

If the result is '' (two single quotes), then the cluster mode is disabled. Otherwise, the list of @@ -419,7 +393,7 @@

Clustering Algorithm and Limitations

Those functions should not be used directly in modifying statements (for example INSERT, UPDATE, MERGE). However, they can be used in read-only statements and the result can then be used for modifying statements. -Using auto-increment and identity columns is currently not supported. +Identity columns aren't supported. Instead, sequence values need to be manually requested and then used to insert data (using two statements).

@@ -479,177 +453,250 @@

Keywords / Reserved Words

- - - - - - - + + + + + + + + + + + - + - + + + - + - + + + + + - + - + - + + + - + - + - + + + - + + + + + + + - + - + - + + + + + - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + + + - + - + - + - + - - - + - + - + - + + + - + - + - + - + - + - + - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - + - + - + + + - - - - - - - + + + + + + + + + + + - - - + + + - + - + - + + + - + - + + + + + + + + + - + + + - + - + - + + + + +
KeywordH2SQL:​2011SQL:​2008SQL:​2003SQL:​1999SQL-92KeywordH2SQL Standard
2016201120082003199992
ALL++++++
+++++++
ANDCS+++++
+++++++
ANY+++++++
ARRAY+++++
++++++
ASCS+++++
+++++++
ASYMMETRIC+++++NR
AUTHORIZATION+++++++
BETWEENCS+++NR+
+++++NR+
BOTHCS+++++
CS++++++
CASE++++++
+++++++
CAST+++++++
CHECK++++++
+++++++
CONSTRAINT++++++
+++++++
CROSS++++++
+++++++
CURRENT_CATALOG++++
CURRENT_DATE++++++
+++++++
CURRENT_PATH++++++
CURRENT_ROLE++++++
CURRENT_SCHEMA++++
CURRENT_TIME++++++
+++++++
CURRENT_TIMESTAMP++++++
+++++++
CURRENT_USER++++++
+++++++
DAY+++++++
DEFAULT+++++++
DISTINCT++++++
+++++++
ELSE+++++++
END+++++++
EXCEPT++++++
+++++++
EXISTS++++NR+
+++++NR+
FALSE++++++
+++++++
FETCH++++++
+++++++
FILTERCS+++
CS++++
FOR++++++
+++++++
FOREIGN++++++
+++++++
FROM++++++
+++++++
FULL++++++
+++++++
GROUP++++++
+++++++
GROUPSCS+
CS++
HAVING++++++
+++++++
HOUR+++++++
IF+
+
ILIKECS
CS
INCS+++++
+++++++
INNER++++++
+++++++
INTERSECT++++++
INTERSECTS+
+++++++
INTERVAL++++++
+++++++
IS++++++
+++++++
JOIN++++++
+++++++
KEY+NRNRNRNR++
LEADINGCS+++++
CS++++++
LEFTCS+++++
+++++++
LIKE++++++
+++++++
LIMIT++
MS+
LOCALTIME+++++
++++++
LOCALTIMESTAMP+++++
++++++
MINUS+
MS
MINUTE+++++++
MONTH+++++++
NATURAL++++++
+++++++
NOT++++++
+++++++
NULL++++++
+++++++
OFFSET+++
++++
ON++++++
+++++++
ORCS+++++
+++++++
ORDER++++++
+++++++
OVERCS+++
CS++++
PARTITIONCS+++
CS++++
PRIMARY++++++
+++++++
QUALIFY+
+
RANGECS+++
CS++++
REGEXPCS
CS
RIGHTCS+++++
+++++++
ROW+++++
_ROWID_+
++++++
ROWNUM+
+
ROWSCS+++++
CS++++++
SECOND+++++++
SELECT++++++
SYSDATECS
SYSTIMECS
SYSTIMESTAMPCS
+++++++
SESSION_USER++++++
SET+++++++
SOME+++++++
SYMMETRIC+++++NR
SYSTEM_USER+++++++
TABLE++++++
TODAYCS
+++++++
TO+++++++
TOPCS
MS
CS
TRAILINGCS+++++
CS++++++
TRUE++++++
+++++++
UESCAPE+++++
UNION++++++
+++++++
UNIQUE++++++
+++++++
UNKNOWN+++++++
USER+++++++
USING+++++++
VALUE+++++++
VALUES++++++
+++++++
WHEN+++++++
WHERE++++++
+++++++
WINDOW++++
+++++
WITH++++++
+++++++
YEAR+++++++
_ROWID_+

-Some keywords in H2 are context-sensitive (CS), such keywords may be used as identifiers in some places, +Mode-sensitive keywords (MS) are keywords only in some compatibility modes. +

+
  • LIMIT is a keywords only in Regular, Legacy, DB2, HSQLDB, MariaDB, MySQL, and PostgreSQL compatibility modes. +It is an identifier in Strict, Derby, MSSQLServer, and Oracle compatibility modes. +
  • MINUS is a keyword only in Regular, Legacy, DB2, HSQLDB, and Oracle compatibility modes. +It is an identifier in Strict, Derby, MSSQLServer, MariaDB, MySQL, and PostgreSQL compatibility modes. +
  • TOP is a context-sensitive keyword (can be either keyword or identifier) +only in Regular, Legacy, HSQLDB, and MSSQLServer compatibility modes. +It is an identifier unconditionally in Strict, Derby, DB2, MariaDB, MySQL, Oracle, and PostgreSQL compatibility modes. +
+

+Context-sensitive keywords (CS) can be used as identifiers in some places, but cannot be used as identifiers in others. +Normal keywords (+) are always treated as keywords. +

+

Most keywords in H2 are also reserved (+) or non-reserved (NR) words in the SQL Standard. Newer versions of H2 may have more keywords than older ones. +Reserved words from the SQL Standard are potential candidates for keywords in future versions.

+

There is a compatibility setting +SET NON_KEYWORDS +that can be used as a temporary workaround for applications that use keywords as unquoted identifiers.

+

Standards Compliance

This database tries to be as much standard compliant as possible. For the SQL language, ANSI/ISO is the main @@ -856,55 +903,6 @@

Using Microsoft Access

Tools - Options - Edit/Find - ODBC fields.

-

Using H2 in Microsoft .NET

-

-The database can be used from Microsoft .NET even without using Java, by using IKVM.NET. -You can access a H2 database on .NET using the JDBC API, or using the ADO.NET interface. -

- -

Using the ADO.NET API on .NET

-

-An implementation of the ADO.NET interface is available in the open source project -H2Sharp. -

- -

Using the JDBC API on .NET

-
  • Install the .NET Framework from Microsoft. - Mono has not yet been tested. -
  • Install IKVM.NET. -
  • Copy the h2*.jar file to ikvm/bin -
  • Run the H2 Console using: - ikvm -jar h2*.jar -
  • Convert the H2 Console to an .exe file using: - ikvmc -target:winexe h2*.jar. - You may ignore the warnings. -
  • Create a .dll file using (change the version accordingly): - ikvmc.exe -target:library -version:1.0.69.0 h2*.jar -
-

-If you want your C# application use H2, you need to add the h2.dll and the -IKVM.OpenJDK.ClassLibrary.dll to your C# solution. Here some sample code: -

-
-using System;
-using java.sql;
-
-class Test
-{
-    static public void Main()
-    {
-        org.h2.Driver.load();
-        Connection conn = DriverManager.getConnection("jdbc:h2:~/test", "sa", "sa");
-        Statement stat = conn.createStatement();
-        ResultSet rs = stat.executeQuery("SELECT 'Hello World'");
-        while (rs.next())
-        {
-            Console.WriteLine(rs.getString(1));
-        }
-    }
-}
-
-

ACID

In the database world, ACID stands for: @@ -932,7 +930,8 @@

Isolation

For H2, as with most other database systems, the default isolation level is 'read committed'. This provides better performance, but also means that transactions are not completely isolated. -H2 supports the transaction isolation levels 'serializable', 'read committed', and 'read uncommitted'. +H2 supports the transaction isolation levels 'read uncommitted', 'read committed', 'repeatable read', +and 'serializable'.

Durability

@@ -1042,7 +1041,8 @@

Using the Recover Tool

For each database in the current directory, a text file will be created. This file contains raw insert statements (for the data) and data definition (DDL) statements to recreate the schema of the database. This file can be executed using the RunScript tool or a -RUNSCRIPT FROM SQL statement. The script includes at least one +RUNSCRIPT SQL statement. +The script includes at least one CREATE USER statement. If you run the script against a database that was created with the same user, or if there are conflicting users, running the script will fail. Consider running the script against a database that was created with a user name that is not in the script. @@ -1370,9 +1370,10 @@

Protection against Remote Access

If you enable remote access using -webAllowOthers, please ensure the web server can only be accessed from trusted networks. +If this option is specified, -webExternalNames should be also specified with +comma-separated list of external names or addresses of this server. The options -baseDir don't protect -access to the tools section, prevent remote shutdown of the web server, -changes to the preferences, the saved connection settings, +access to the saved connection settings, or access to other databases accessible from the system.

@@ -1574,7 +1575,9 @@

Spatial Features

Here is an example SQL script to create a table with a spatial column and index:

-CREATE TABLE GEO_TABLE(GID SERIAL, THE_GEOM GEOMETRY);
+CREATE TABLE GEO_TABLE(
+    GID BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
+    THE_GEOM GEOMETRY);
 INSERT INTO GEO_TABLE(THE_GEOM) VALUES
     ('POINT(500 505)'),
     ('LINESTRING(550 551, 525 512, 565 566)'),
@@ -1600,14 +1603,13 @@ 

Spatial Features

'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))'; -- Result SELECT - "GEO_TABLE"."GID", - "GEO_TABLE"."THE_GEOM" + "PUBLIC"."GEO_TABLE"."GID", + "PUBLIC"."GEO_TABLE"."THE_GEOM" FROM "PUBLIC"."GEO_TABLE" - /* PUBLIC.GEO_TABLE_SPATIAL_INDEX: - THE_GEOM && - 'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))'::Geometry */ -WHERE INTERSECTS("THE_GEOM", - 'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))'::Geometry) + /* PUBLIC.GEO_TABLE_SPATIAL_INDEX: THE_GEOM && + GEOMETRY 'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))' */ +WHERE "THE_GEOM" && + GEOMETRY 'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))'

For persistent databases, the spatial index is stored on disk; @@ -1644,7 +1646,7 @@

Recursive Queries

WITH LINK(ID, NAME, LEVEL) AS ( SELECT ID, NAME, 0 FROM FOLDER WHERE PARENT IS NULL UNION ALL - SELECT FOLDER.ID, IFNULL(LINK.NAME || '/', '') || FOLDER.NAME, LEVEL + 1 + SELECT FOLDER.ID, COALESCE(LINK.NAME || '/', '') || FOLDER.NAME, LEVEL + 1 FROM LINK INNER JOIN FOLDER ON LINK.ID = FOLDER.PARENT ) SELECT NAME FROM LINK WHERE NAME IS NOT NULL ORDER BY ID; @@ -1693,7 +1695,7 @@

Settings Read from System Properties

For a complete list of settings, see -SysProperties. +SysProperties.

Setting the Server Bind Address

@@ -1709,16 +1711,16 @@

Pluggable File System

This database supports a pluggable file system API. The file system implementation is selected using a file name prefix. -Internally, the interfaces are very similar to the Java 7 NIO2 API, but do not (yet) use or require Java 7. +Internally, the interfaces are very similar to the Java 7 NIO2 API. The following file systems are included:

-
  • zip: read-only zip-file based file system. Format: zip:/zipFileName!/fileName. +
    • file: the default file system that uses FileChannel. +
    • zip: read-only zip-file based file system. Format: zip:~/zipFileName!/fileName.
    • split: file system that splits files in 1 GB files (stackable with other file systems). -
    • nio: file system that uses FileChannel instead of RandomAccessFile (faster in some operating systems).
    • nioMapped: file system that uses memory mapped files (faster in some operating systems). Please note that there currently is a file size limitation of 2 GB when using this file system. - To work around this limitation, combine it with the split file system: split:nioMapped:test. -
    • async: experimental file system that uses AsynchronousFileChannel instead of RandomAccessFile (faster in some operating systems). + To work around this limitation, combine it with the split file system: split:nioMapped:~/test. +
    • async: experimental file system that uses AsynchronousFileChannel instead of FileChannel (faster in some operating systems).
    • memFS: in-memory file system (slower than mem; experimental; mainly used for testing the database engine itself).
    • memLZF: compressing in-memory file system (slower than memFS but uses less memory; experimental; mainly used for testing the database engine itself).
    • nioMemFS: stores data outside of the VM's heap - useful for large memory DBs without incurring GC costs. @@ -1732,9 +1734,8 @@

      Pluggable File System

      The default value is 1%.

    -As an example, to use the nio file system with PageStore storage engine, -use the following database URL: jdbc:h2:nio:~/test;MV_STORE=FALSE. -With MVStore storage engine nio file system is used by default. +As an example, to use the async: file system +use the following database URL: jdbc:h2:async:~/test.

    To register a new file system, extend the classes org.h2.store.fs.FilePath, FileBase, @@ -1762,47 +1763,10 @@

    Split File System

    However this can be changed if required, by specifying the block size in the file name. The file name format is: split:<x>:<fileName> where the file size per block is 2^x. For 1 MiB block sizes, use x = 20 (because 2^20 is 1 MiB). -The following file name means the logical file is split into 1 MiB blocks: split:20:test.h2.db. +The following file name means the logical file is split into 1 MiB blocks: split:20:~/test.h2.db. An example database URL for this case is jdbc:h2:split:20:~/test.

    -

    Database Upgrade

    -

    -In version 1.2, H2 introduced a new file store implementation which is incompatible to the one used in versions < 1.2. -To automatically convert databases to the new file store, it is necessary to include an additional jar file. -The file can be found at http://h2database.com/h2mig_pagestore_addon.jar . -If this file is in the classpath, every connect to an older database will result in a conversion process. -

    -

    -The conversion itself is done internally via 'script to' and 'runscript from'. After the conversion process, the files will be -renamed from -

    -
      -
    • dbName.data.db to dbName.data.db.backup -
    • dbName.index.db to dbName.index.db.backup -
    -

    -by default. Also, the temporary script will be written to the database directory instead of a temporary directory. -Both defaults can be customized via -

    -
      -
    • org.h2.upgrade.DbUpgrade.setDeleteOldDb(boolean) -
    • org.h2.upgrade.DbUpgrade.setScriptInTmpDir(boolean) -
    -

    -prior opening a database connection. -

    -

    -Since version 1.2.140 it is possible to let the old h2 classes (v 1.2.128) connect to the database. -The automatic upgrade .jar file must be present, and the URL must start with jdbc:h2v1_1: -(the JDBC driver class is org.h2.upgrade.v1_1.Driver). -If the database should automatically connect using the old version if a database with the old format exists -(without upgrade), and use the new version otherwise, then append ;NO_UPGRADE=TRUE -to the database URL. -Please note the old driver did not process the system property "h2.baseDir" correctly, -so that using this setting is not supported when upgrading. -

    -

    Java Objects Serialization

    Java objects serialization is enabled by default for columns of type OTHER, using standard Java serialization/deserialization semantics. @@ -1812,7 +1776,7 @@

    Java Objects Serialization

    Serialization and deserialization of java objects is customizable both at system level and at database level providing a -JavaObjectSerializer implementation: +JavaObjectSerializer implementation:

    • @@ -1830,30 +1794,6 @@

      Java Objects Serialization

    -

    Custom Data Types Handler API

    -

    -It is possible to extend the type system of the database by providing your own implementation -of minimal required API basically consisting of type identification and conversion routines. -

    -

    -In order to enable this feature, set the system property h2.customDataTypesHandler (default: null) to the fully qualified name of the class providing -CustomDataTypesHandler interface implementation.
    -The instance of that class will be created by H2 and used to: -

    -
      -
    • resolve the names and identifiers of extrinsic data types. -
    • -
    • convert values of extrinsic data types to and from values of built-in types. -
    • -
    • provide order of the data types. -
    • -
    -

    This is a system-level setting, i.e. affects all the databases.

    - -

    Note: Please keep in mind that this feature may not possibly provide the same ABI stability level as other features as it exposes many of the H2 internals. You may be required to update your code occasionally due to internal changes in H2 if you are going to use this feature. -

    - -

    Limits and Limitations

    This database has the following known limitations: @@ -1869,28 +1809,29 @@

    Limits and Limitations

    An example database URL is: jdbc:h2:split:~/test.
  • The maximum number of rows per table is 2^64.
  • The maximum number of open transactions is 65535. +
  • The maximum number of columns in a table or expressions in a SELECT statement is 16384. +The actual possible number can be smaller if their definitions are too long. +
  • The maximum length of an identifier (table name, column name, and so on) is 256 characters. +
  • The maximum length of CHARACTER, CHARACTER VARYING and VARCHAR_IGNORECASE values and columns +is 1048576 characters. +
  • The maximum length of BINARY, BINARY VARYING, JAVA_OBJECT, GEOMETRY, and JSON values and columns +is 1048576 bytes. +
  • The maximum precision of NUMERIC and DECFLOAT values and columns is 100000. +
  • The maximum length of an ENUM value is 1048576 characters, the maximum number of ENUM values is 65536. +
  • The maximum cardinality of an ARRAY value or column is 65536. +
  • The maximum degree of a ROW value or column is 16384. +
  • The maximum index of parameter is 100000.
  • Main memory requirements: The larger the database, the more main memory is required. - With the current storage mechanism (the page store), - the minimum main memory required is around 1 MB for each 8 GB database file size.
  • Limit on the complexity of SQL statements. -Statements of the following form will result in a stack overflow exception: -
    -SELECT * FROM DUAL WHERE X = 1
    -OR X = 2 OR X = 2 OR X = 2 OR X = 2 OR X = 2
    --- repeat previous line 500 times --
    -
    +Very complex expressions may result in a stack overflow exception.
  • There is no limit for the following entities, except the memory and storage capacity: - maximum identifier length (table name, column name, and so on); - maximum number of tables, columns, indexes, triggers, and other database objects; - maximum statement length, number of parameters per statement, tables per statement, expressions - in order by, group by, having, and so on; + maximum number of tables, indexes, triggers, and other database objects; + maximum statement length, tables per statement; maximum rows per query; - maximum columns per table, columns per index, indexes per table, lob columns per table, and so on; - maximum row length, index row length, select row length; - maximum length of a varchar column, decimal column, literal in a statement. + maximum indexes per table, lob columns per table, and so on; + maximum row length, index row length, select row length.
  • Querying from the metadata tables is slow if there are many tables (thousands). -
  • For limitations on data types, see the documentation of the respective Java data type - or the data type documentation of this database. +
  • For other limitations on data types, see the data type documentation of this database.
diff --git a/h2/src/docsrc/html/architecture.html b/h2/src/docsrc/html/architecture.html index f9b294679f..af4ccdca18 100644 --- a/h2/src/docsrc/html/architecture.html +++ b/h2/src/docsrc/html/architecture.html @@ -1,7 +1,7 @@ @@ -50,6 +50,7 @@

Introduction

Top-down Overview

Working from the top down, the layers look like this: +

  • JDBC driver.
  • Connection/session management.
  • SQL Parser. @@ -59,7 +60,6 @@

    Top-down Overview

  • B-tree engine and page-based storage allocation.
  • Filesystem abstraction.
-

JDBC Driver

@@ -69,6 +69,7 @@

JDBC Driver

Connection/session management

The primary classes of interest are: +

@@ -79,7 +80,6 @@

Connection/session management

PackageDescription
org.h2.engine.Databasethe root/global class
org.h2.engine.SessionRemote remote session
-

Parser

@@ -95,14 +95,15 @@

Command execution and planning

Unlike other databases, we do not have an intermediate step where we generate some kind of IR (intermediate representation) of the query. The parser class directly generates a command execution object. Then we run some optimisation steps over the command to possibly generate a more efficient command. - +

+

The primary packages of interest are: +

PackageDescription
org.h2.command.ddlCommands that modify schema data structures
org.h2.command.dmlCommands that modify data
-

Table/Index/Constraints

@@ -110,18 +111,18 @@

Table/Index/Constraints

The primary packages of interest are: +

PackageDescription
org.h2.tableImplementations of different kinds of tables
org.h2.indexImplementations of different kinds of indices
-

Undo log, redo log, and transactions layer

We have a transaction log, which is shared among all sessions. See also https://en.wikipedia.org/wiki/Transaction_log -http://h2database.com/html/grammar.html#set_log +https://h2database.com/html/grammar.html#set_log

We also have an undo log, which is per session, to undo an operation (an update that fails for example) diff --git a/h2/src/docsrc/html/build.html b/h2/src/docsrc/html/build.html index c516e77c5d..87a588d72b 100644 --- a/h2/src/docsrc/html/build.html +++ b/h2/src/docsrc/html/build.html @@ -1,7 +1,7 @@ @@ -25,8 +25,6 @@

Build

Environment
Building the Software
- - Build Targets
Using Maven 2
@@ -49,29 +47,27 @@

Portability

Environment

-To run this database, a Java Runtime Environment (JRE) version 7 or higher is required. +To run this database, a Java Runtime Environment (JRE) version 8 or higher is required.

To create the database executables, the following software stack was used. To use this database, it is not required to install this software however.

Building the Software

-You need to install a JDK, for example the Oracle JDK version 7 or 8. +You need to install a JDK, for example the Oracle JDK version 8. Ensure that Java binary directory is included in the PATH environment variable, and that the environment variable JAVA_HOME points to your Java installation. On the command line, go to the directory h2 and execute the following command: @@ -95,28 +91,9 @@

Building the Software

./build.sh - -

Build Targets

-

-The build system can generate smaller jar files as well. The following targets are currently supported: -

-
  • jarClient - creates the file h2client.jar. This only contains the JDBC client. -
  • jarSmall - creates the file h2small.jar. - This only contains the embedded database. Debug information is disabled. -
  • javadocImpl creates the Javadocs of the implementation. -
-

-To create the file h2client.jar, go to the directory h2 and execute the following command: -

-
-build jarClient
-
-

Using Apache Lucene

-Apache Lucene 5.5.5 is used for testing. -Newer versions up to 7.7.1 can also be used. +Apache Lucene 8.5.2 is used for testing.

Using Maven 2

@@ -134,7 +111,7 @@

Using a Central Repository

New versions of this database are first uploaded to http://hsql.sourceforge.net/m2-repo/ and then automatically -synchronized with the main Maven repository; +synchronized with the main Maven repository; however after a new release it may take a few hours before they are available there.

Maven Plugin to Start and Stop the TCP Server

@@ -175,7 +152,7 @@

Using Eclipse

To create an Eclipse project for H2, use the following steps:

-
  • Install Git and Eclipse. +
    • Install Git and Eclipse.
    • Get the H2 source code from Github:
      git clone https://github.com/h2database/h2database
    • Download all dependencies:
      @@ -206,7 +183,7 @@

      Submitting Source Code Changes

      If you'd like to contribute bug fixes or new features, please consider the following guidelines to simplify merging them:

      -
      • Only use Java 7 features (do not use Java 8/9/etc) (see Environment). +
        • Only use Java 8 features (do not use Java 9/10/etc) (see Environment).
        • Follow the coding style used in the project, and use Checkstyle (see above) to verify. For example, do not use tabs (use spaces instead). The checkstyle configuration is in src/installer/checkstyle.xml. @@ -223,7 +200,7 @@

          Submitting Source Code Changes

        • Verify that you did not break other features: run the test cases by executing build test.
        • Provide end user documentation if required (src/docsrc/html/*). -
        • Document grammar changes in src/docsrc/help/help.csv +
        • Document grammar changes in src/main/org/h2/res/help.csv
        • Provide a change log entry (src/docsrc/html/changelog.html).
        • Verify the spelling using build spellcheck. If required add the new words to src/tools/org/h2/build/doc/dictionary.txt. @@ -237,13 +214,13 @@

          Submitting Source Code Changes

          For legal reasons, patches need to be public in the form of an issue report or attachment or in the form of an email - to the group. + to the group. Significant contributions need to include the following statement:

          "I wrote the code, it's mine, and I'm contributing it to H2 for distribution multiple-licensed under the MPL 2.0, and the EPL 1.0 -(http://h2database.com/html/license.html)." +(https://h2database.com/html/license.html)."

          Reporting Problems or Requests

          @@ -254,17 +231,18 @@

          Reporting Problems or Requests

          • For bug reports, please provide a short, self contained, correct (compilable), example of the problem.
          • Feature requests are always welcome, even if the feature is already on the - roadmap. Your mail will help prioritize feature requests. + issue tracker + you can comment it. If you urgently need a feature, consider providing a patch.
          • Before posting problems, check the FAQ and do a Google search.
          • When got an unexpected exception, please try the - Error Analyzer tool. If this doesn't help, + Error Analyzer tool. If this doesn't help, please report the problem, including the complete error message and stack trace, and the root cause stack trace(s).
          • When sending source code, please use a public web clipboard such as Pastebin or - Mystic Paste + Mystic Paste to avoid formatting problems. Please keep test cases as simple and short as possible, but so that the problem can still be reproduced. @@ -279,7 +257,7 @@

            Reporting Problems or Requests

            Google Drive.
          • Google Group versus issue tracking: Use the - Google Group + Google Group for questions or if you are not sure it's a bug. If you are sure it's a bug, you can create an issue, @@ -290,7 +268,7 @@

            Reporting Problems or Requests

            -XX:+HeapDumpOnOutOfMemoryError (to create a heap dump file on out of memory) and a memory analysis tool such as the - Eclipse Memory Analyzer (MAT). + Eclipse Memory Analyzer (MAT).
          • It may take a few days to get an answers. Please do not double post.
          @@ -298,14 +276,9 @@

          Automated Build

          This build process is automated and runs regularly. The build process includes running the tests and code coverage, using the command line -./build.sh clean jar coverage -Dh2.ftpPassword=... uploadBuild. -The last results are available here: +./build.sh jar testCI. +The results are available on CI workflow page.

          -

          Generating Railroad Diagrams

          diff --git a/h2/src/docsrc/html/changelog.html b/h2/src/docsrc/html/changelog.html index 6a36a27766..23c1e63e38 100644 --- a/h2/src/docsrc/html/changelog.html +++ b/h2/src/docsrc/html/changelog.html @@ -1,7 +1,7 @@ @@ -21,1466 +21,1230 @@

          Change Log

          Next Version (unreleased)

            -
          • - +
          • Nothing yet...
          -

          Version 1.4.199 (2019-03-13)

          +

          Version 2.1.210 (2022-01-17)

            -
          • PR #1807: Reduce code duplication and remove h2.mixedGeometries +
          • PR #3381: Add IDENTITY() and SCOPE_IDENTITY() to LEGACY mode
          • -
          • PR #1806: Improve SELECT FOR UPDATE documentation +
          • Issue #3376: Data cannot be read after insert of clob data > MAX_LENGTH_INPLACE_LOB with data change delta table
          • -
          • PR #1804: Lift limit of 10 characters on enum value (1.4.198 regression) +
          • PR #3377: Add -webExternalNames setting and fix WebServer.getConnection()
          • -
          • PR #1803: Do not rely on index sorting in SELECT FOR UPDATE +
          • PR #3367: Use faster checks of dimension systems of geometries
          • -
          • Issue #1800: Remove experimental status from window functions +
          • PR #3369: Added v2 changes in migration docs
          • -
          • PR #1799: Fire triggers after row locking and remove some leftovers +
          • Issue #3361: MemoryEstimator.estimateMemory() can return negative size
          • -
          • PR #1798: Reuse some string builders, remove StatementBuilder and other minor changes +
          • PR #3362: Use BufferedReader instead of BufferedInputStream to avoid Illegal seek exception
          • -
          • Issue #1795: 1.4.198 regression with batch updates and transactions +
          • Issue #3353: Wrong rownum() scope for DML with change delta table
          • -
          • PR #1794: Ask password in Shell in secure way and improve database creation information in tutorial +
          • PR #3352: make Javadoc happier
          • -
          • PR #1791: Move commands to commands.html and other changes +
          • Issue #3344: Changelog could link to github issue
          • -
          • Issue #1774: H2 Browser configuration is unclear and fails on KUbuntu +
          • Issue #3340: JDBC index type seems wrong
          • -
          • PR #1790: Do not convert standard TRIM function to non-standard functions +
          • Issue #3336: FT_INIT error when mode=MySQL
          • -
          • Issue #1787: Non-standard MERGE throws LOCK_TIMEOUT_1 on violation of some constraints +
          • Issue #3334: Regression with CREATE ALIAS - Parameter "#2" is not set
          • -
          • PR #1784: improve database not found error +
          • Issue #3321: Insert Primary Key after import CSV Data does not work
          • -
          • Issue #1740: Enhancement Request: h2 server: do not swallow exceptions +
          • PR #3323: Tokenize SQL before parsing and preserve tokens for recompilation
          • -
          • Issue #1616: Metadata and scripts should be persisted with unconditionally quoted identifiers +
          • PR #3320: Add Servlet 5-compatible servlet for H2 Console
          • -
          • PR #1779: Improve isSimpleIdentifier() and enquoteIdentifier() +
          • Issue #918: Parser fails recognising set operations in correlated subqueries
          • -
          • PR #1776: Improve DATABASE_TO_LOWER handling +
          • Issue #2050: PostgreSQL with recursive fail with union in the final query
          • -
          • Issue #1771: NPE in Comparison.createIndexConditions +
          • PR #3316: Update copyright years
          • -
          • PR #1772: Fix newlines in test scripts +
          • PR #3315: Never put read locks into lockSharedSessions and other minor changes
          • -
          • Issue #1762: NullPointerException in Parser. Introduced in 1.4.198 +
          • Issue #492: H2 does not correctly parse <parenthesized joined table>
          • -
          • PR #1768: Add more context-sensitive keywords +
          • Issue #3311: Parser creates wrong join graph in some cases and uses wrong tables for column mapping
          • -
          • Issue #1758: sequence restart issue with 1.4.198 +
          • FORCE_JOIN_ORDER setting is removed
          • -
          • Issue #1759: SELECT … FOR UPDATE returns old data in 1.4.198 +
          • Issue #1983: Official build script is not compatible with Java 13
          • -
          • PR #1756: Fix DISTINCT ON in presence of ORDER BY +
          • PR #3305: Add UNIQUE(VALUE) and remove some non-standard keywords
          • -
          • PR #1754: Fix window functions in JOIN with ON condition +
          • PR #3299: Remove useless StringBuilder.toString() call
          • -
          • Issue #1751: making it easier to open console and create local databases -
          • -
          • Issue #1750: JOIN t ON t.col IN (SELECT ...) throws AssertionError +
          • PR #3298: Delete unused sqlTypes array
          -

          Version 1.4.198 (2019-02-22)

          +

          Version 2.0.206 (2022-01-04)

            -
          • Issue #1746: Infinite loop in TestConcurrent.testConcurrentChangeAndGetVersion() -
          • -
          • Issue #1739: Table and view names not case sensitive when using DATABASE_TO_UPPER=FALSE -
          • -
          • Issue #848: H2 PostgreSQL Compatibility Mode: lowercase metadata -
          • -
          • Issue #485: Problem is in invalid case for schema's IGNORECASE=true;DATABASE_TO_UPPER=false -
          • -
          • Issue #1742, PR #1743: Assorted small changes -
          • -
          • PR #1738: Reduce memory allocation in getSQL() methods -
          • -
          • PR #1737: more javadoc updates -
          • -
          • Issue #1735: Creating views with DATABASE_TO_UPPER=FALSE fails -
          • -
          • Issue #1732: source.html does not work +
          • Issue #3322: Create linked table fails when the table contains a Geometry with a data type specified
          • -
          • Issue #1730: Show error in H2 Console if specified driver is not compatible with URL +
          • Issue #3297: Unexpected GROUP BY results with indexed IGNORECASE column
          • -
          • Issue #1590: Error on executing "DELETE FROM table1 WHERE ID = ?; DELETE FROM table2 WHERE ID = ?;" -
          • -
          • Issue #1727: Support ISODOW as identifier for the extract function additional to ISO_DAY_OF_WEEK -
          • -
          • PR #1580, #1726: Disable remote database creation by default -
          • -
          • PR #1725: Add partial implementation of standard LISTAGG aggregate function -
          • -
          • PR #1722: Fix window definition lookup in some queries -
          • -
          • PR #1721: Fix derived column list in complex queries -
          • -
          • Issue #1718: Window function and values clause don't work well together -
          • -
          • Issue #1592: Index out of bounds exception in Page.getKey() +
          + +

          Version 2.0.204 (2021-12-21)

          +
            +
          • Issue #3291: Add Legacy and Strict modes
          • -
          • PR #1716: Improve documentation of some DML commands +
          • Issue #3287: SELECT statement works on 1.4.200 but fails on 2.0.202 with "Column XYZ must be in the GROUP BY list"
          • -
          • Issue #1715: Postgres mode: Domain "regproc" already exists +
          • PR #3284: Remove unused UNDO_LOG setting
          • -
          • PR #1714: Assorted changes +
          • Issue #3251: Table with GEOMETRY column can't have a TriggerAdapter-based trigger any more
          • -
          • PR #1713: Remove DataType.defaultDisplaySize and fix display size in TypeInfo +
          • PR #3281: DateTimeFormatter-based FORMATDATETIME and PARSEDATETIME and other changes
          • -
          • PR #1711: Add QUALIFY clause to SELECT command +
          • Issue #3246: Spatial predicates with comparison are broken in MySQL compatibility mode
          • -
          • Issue #1708: CREATE TABLE AS doesn't support column lists without data types +
          • Issue #3270: org.h2.jdbc.JdbcSQLFeatureNotSupportedException: Feature not supported: "Unsafe comparison or cast"
          • -
          • PR #1707: Fix sort order and ENUM data type in external results +
          • Issue #3268 / PR #3275: Add TO_DATE and TO_TIMESTAMP to PostgreSQL compatibility mode
          • -
          • PR #1706: Add hypothetical set functions +
          • PR #3274: Remove some dead code and unused params
          • -
          • PR #1705: Fix GROUP_CONCAT with variable separator +
          • Issue #3266: Oracle compatibility NUMBER without precision and scale should have variable scale
          • -
          • PR #1704: Fix return type of PERCENTILE_CONT and MEDIAN +
          • Issue #3263: Unable to store BigDecimal with negative scale in NUMERIC(19,6) column
          • -
          • PR #1701: Add PERCENTILE_CONT and PERCENTILE_DISC inverse distribution functions +
          • PR #3261: Small optimization for MIN and MAX
          • -
          • Issues #1297, #1697: Failure on concurrent session closure +
          • Issue #3258 / PR #3259: Prevent incorrect optimization of COUNT(*) and other changes
          • -
          • Issue #1297: removeOldTempIndexes on PageStore causes NullPointerException +
          • PR #3255: Throw proper exception when type of argument isn't known
          • -
          • Issue #1354: TestCrashAPI: another NPE +
          • Issue #3249: Multi-column assignment with subquery throws exception when subquery doesn't return any rows
          • -
          • PR #1695: Reduce memory for TestMVTempResult to 64m +
          • PR #3248: Remove redundant uniqueness check, correct version in pom
          • -
          • Issue #1691: Append mode causes OOME in MVPlainTempResult +
          • PR #3247: Avoid AIOBE exception in TestCrashAPI and in Transaction
          • -
          • PR #1692: Use MVTempResult unconditionally +
          • Issue #3241: ResultSetMetaData::getColumnTypeName should produce the correct ARRAY type
          • -
          • Issue #1689: Use separate constants for data types in Data, ValueDataType, and Transfer +
          • Issue #3204: H2 Tools Web Console: Unicode 32
          • -
          • PR #1687: MVMap minor cleanup +
          • Issue #3227: Regression when referencing outer joined column from correlated subquery
          • -
          • PR #1686: Fix a regression with ENUM data type +
          • Issue #3237: Can no longer cast CHAR(n) to BOOLEAN with n > 1
          • -
          • PR #1685: Fix CHAR in PostgreSQL mode and refactor some code +
          • Issue #3235: Regression in IN predicate with empty in list
          • -
          • Issue #1681: IN () doesn't work with row values when data types are not exactly the same +
          • Issue #3236: NullPointerException in DatabaseMetaData::getIndexInfo when querying the info for views
          • -
          • Issue #1320: OOME / GC overhead in IndexCursor.nextCursor() +
          • Issue #3233: General error when using NULL predicate on _ROWID_ column
          • -
          • PR #1680: Assorted fixes for ALTER TABLE ALTER COLUMN +
          • Issue #3223: TRUNC(v, p) with negative precisions no longer works
          • -
          • PR #1679: Use TestScript for testSimple +
          • Issue #3221: NullPointerException when creating domain
          • -
          • Issue #1677: Unable to use VALUES keyword in WHERE clause +
          • Issue #3186: ResultSetMetaData.getSchemaName() returns empty string for aliased columns
          • -
          • Issue #1672: Deadlock on MVStore close in TestOutOfMemory +
          + +

          Version 2.0.202 (2021-11-25)

          +
            +
          • Issue #3206: CVE Vulnerability CVE-2018-14335
          • -
          • Issue #1665: TestCrashAPI: NPE with ENUM in MINUS operator +
          • Issue #3174: Add keyword AUTOCOMMIT on create linked table to control the commit mode
          • -
          • Issue #1602: Combine type, precision, scale, display size and extTypeInfo into one object +
          • Issue #3130: Precision of NUMERIC values isn't verified in the Oracle compatibility mode
          • -
          • PR #1671: Assorted changes +
          • Issue #3122: Documentation: Syntax diagram for RENAME CONSTRAINT incorrect
          • -
          • Issue #1668: MySQL compatibility DATE() function should return NULL on error +
          • PR #3129: remove LOB compression
          • -
          • Issue #1604: TestCrashAPI: PreparedStatement.getGeneratedKeys() is already closed +
          • PR #3127: Cleanups post PageStore removal
          • -
          • PR #1667: Detect NULL values and overflow in window frame bounds +
          • PR #3126: Change nested classes to static nested classes where possible
          • -
          • PR #1664: Allow any expressions in window frames +
          • PR #3125: Strongly typed LobStorageMap
          • -
          • Issue #1576: H2 Console should not display precision and scale for data types that don't have them +
          • PR #3124: Remove PageStore engine
          • -
          • PR #1662: Fix Alter Table Drop Column In View when table name is wrapped by Double Quotes +
          • Issue #3118: SHUTDOWN COMPACT causes 2PC to corrupt database in a simulated crash
          • -
          • PR #1660: Optimize window aggregates with AND UNBOUNDED FOLLOWING and no exclusions +
          • Issue #3115: Infinite loop then OOM in org.h2.mvstore.tx.Transaction.waitFor() when deadlock occurs
          • -
          • PR #1658: Assorted small changes +
          • Issue #3113: Data lost when 2 threads read/write TransactionStore and close it normally even if MVStore autoCommit +disabled
          • -
          • PR #1657: Failure to stop background thread +
          • PR #3110: Fix possible int overflow and minor doc change
          • -
          • PR #1656: Optimize window aggregates with ORDER BY + UNBOUNDED PRECEDING + no exclusions +
          • Issue #3036: A database that contains BLOBs might grow without being able to be compacted
          • -
          • Issue #1654: OOM in TestMemoryUsage, in big mode +
          • Issue #3097: Possible MVStore compaction issue
          • -
          • Issue #1651: TIMESTAMP values near DST may be changed in MVStore database due to UTC-based PageStore format in some -temporary storages +
          • PR #3096: Add separate LOB data layer for values
          • -
          • PR #1650: Fix race in MVStore.close() +
          • Issue #3093: ROWNUM filter doesn't work with more than one table
          • -
          • Issue #1212: TestDiskFull: The file is locked +
          • PR #3087: Add "CONVERT TO CHARACTER SET" to compatibility modes
          • -
          • PR #1648: Add functions ARRAY_CAT(), ARRAY_APPEND() and ARRAY_SLICE() +
          • Issue #3080: Complex Query returns different results depending on the number of arguments in the IN clause
          • -
          • PR #1646: In preparation to a release +
          • Issue #3066: Very high DB opening/closing times
          • -
          • PR #1643: more javadoc update +
          • PR #3077: Add CREATE UNIQUE INDEX ... INCLUDE
          • -
          • PR #1642: update javadoc +
          • Issue #3061 / PR #3074: GROUP BY using column index for MySQL/MariaDB/PostgreSQL compatibility modes
          • -
          • PR #1641: Update copyright years +
          • PR #3067: Restrict identity data types and result limitation clauses to compatibility modes
          • -
          • PR #1640: Suggest ANY(?) instead of variable IN() again +
          • PR #3065: Remove duplicate method IOUtils.getBufferedReader
          • -
          • PR #1638: Add support for Java 11 to test suite +
          • Issue #3055: Phantom table leftover after INSERT .. WITH
          • -
          • PR #1637: Remove explicit unboxing +
          • PR #3062: Add ALTER DOMAIN RENAME CONSTRAINT command
          • -
          • PR #1635: Optimize UUID to VARCHAR conversion and use correct time check in Engine.openSession() +
          • Issue #3059: ALTER TABLE DROP CONSTRAINT doesn't check owner of constraint
          • -
          • Issue #1632: TestMVTableEngine failure +
          • Issue #3054: Add binary set aggregate functions
          • -
          • PR #1631: Prepare to release: javadoc cleanup +
          • Issue #3049: Java value getters of ValueNull should throw exceptions
          • -
          • PR #1630: fix duplicate words typos in comments and javadoc +
          • Issue #3046: SYSTEM_RANGE can't handle bind variable as step size and produces wrong error message
          • -
          • PR #1627: Use lock to protect append buffer +
          • Issue #3033: NPE during BLOB read after 2PC rollback
          • -
          • Issue #1618: GROUP BY does not work with two identical columns in selected expressions +
          • PR #3034: Don't evaluate ValueTimestampTimeZone at begin and end of each command
          • -
          • Issue #1619: Two-phase commit regression in MASTER +
          • PR #3029: Optimize row storage in MVStore and other changes
          • -
          • PR #1626: fix doc +
          • PR #3028: Remove back compatibility
          • -
          • PR #1625: Prepare to release: javadoc cleanup, fix maven build, fix javadoc build +
          • PR #3025: Switch from Travis CI to GitHub Workflows
          • -
          • Issue #1620: UUIDs are unexpectedly sorted as signed +
          • PR #3024: Add initial version of upgrade utility
          • -
          • PR #1614: Use bulk .addAll() operation +
          • Issue #3017: ROUND() does not set correct precision and scale of result
          • -
          • PR #1613: Add explicit table query +
          • Issue #3003: CREATE TABLE ... AS SELECT ... FROM creates invalid column definition when aggregate functions are used
          • -
          • Issue #1608: ARRAY and row value expression should not be the same +
          • Issue #3008: TestCrashAPI: Exception in Arrays.sort() called by LocalResult.done()
          • -
          • Issue #1606: Quantified comparison predicate doesn't work correctly on primary key column +
          • Issue #3006 / PR #3007: Unlock meta during query execution in CREATE TABLE AS query
          • -
          • Issue #1057: Very slow execution with subquery and connection parameter LAZY_QUERY_EXECUTION=1 +
          • PR #3001: PostgreSQL compatibility: UPDATE with FROM
          • -
          • Issue #1072: Very slow execution with join and connection parameter LAZY_QUERY_EXECUTION=1 +
          • PR #2998: Fix off-by-one error with -webAdminPassword in Server
          • -
          • PR #1601: Return BIGINT from ROWNUM(), ROW_NUMBER() and rank functions +
          • PR #2995: Add FETCH_SIZE clause to CREATE LINKED TABLE
          • -
          • PR #1599: cleanup StringUtils.cache +
          • Issue #2907 / PR #2994: Prevent "Chunk not found" on LOB operations
          • -
          • PR #1598: Minor changes in parser and documentation +
          • PR #2993: Update copyright years
          • -
          • PR #1597: Remove SysProperties.CHECK preconditions around simple assertions +
          • Issue #2991: TestCrashAPI: NPE in ScriptCommand.dumpDomains()
          • -
          • PR #1596: Improve SQL Standard compliance in LOB precision parsing +
          • Issue #2950 / PR #2987: Issue commit() right before "non-transactional" DDL command starts
          • -
          • Issue #1594: DBSettings.optimizeIsNull and dead code in IndexCursor.getMax() +
          • PR #2980: Assorted minor changes
          • -
          • PR #1591: Use multi-catch java 7 language construction to simplify code +
          • PR #2966: H2 2.0.201: Linked Tables freeze the Database and freeze the Server Process
          • -
          • Issue #1582: h2 not using best index for >= +
          • Issue #2972: Memory leak due to negative Page memory in the MVStore
          • -
          • PR #1588: Add support for java.time.Period +
          • PR #2971: create skeleton of migration to V2 document
          • -
          • Issue #446: FILE_READ from classpath not working because of 0 byte file length +
          • Issue #2967: MVStore: averageSize int overflow in the class ObjectDataType
          • -
          • PR #1579: fix unintentional append mode disruption +
          • Issue #2963: Syntax error for large hexadecimal constants with DATABASE_TO_UPPER=false
          • -
          • Issue #1573: DELETE FROM w/ ROWNUM and subquery +
          • Issue #2961: Accept CREATE PRIMARY KEY only in metadata or in quirks mode
          • -
          • Issue #187: SHUTDOWN DEFRAG corrupts splitted file database +
          • Issue #2960: Reject invalid CREATE { UNIQUE | HASH } SPATIAL INDEX
          • -
          • PR #1571: Optimizing ConditionAndOr queries +
          • Issue #2958: TableLink is broken for Oracle database after pull request #2903
          • -
          • Issue #1565: SOME / ANY conflict +
          • PR #2955: Prevent incorrect index sorting
          • -
          • PR #1564: Refactor Expression implementations +
          • PR #2951: Add documentation for INFORMATION_SCHEMA
          • -
          • Issue #1561: Incorrect documentation and strange fallback value of SysProperties.FILE_ENCODING +
          • PR #2943: some small prep for next release
          • -
          • Issue #1566: MVStore implements Closeable/AutoCloseable +
          • PR #2948: Add support of Infinity, -Infinity, and NaN to DECFLOAT data type
          • -
          • Issue #1550: OutOfMemoryError during "shutdown defrag" +
          • Issue #2947: Encoding of Unicode and special characters in error messages
          • -
          • Issue #1440: OOM when executing "shutdown compact" in server mode +
          • Issue #2891: Fix import of unnamed referential constraints from SQL scripts generated by older versions of H2
          • -
          • Issue #1561: Incorrect documentation and strange fallback value of SysProperties.FILE_ENCODING +
          • Issue #2812: Unexpected result for query that compares an integer with a string
          • -
          • PR #1557: increase lock timeout to TestConcurrentUpdate due to Travis failures +
          • Issue #2936: Add data type conversion code from datetime and UUID values to JSON
          • -
          • Issue #1554: REGEXP_REPLACE - accept 'g' flag in PostgreSQL compatibility mode +
          • Issue #2935: ENUM ARRAY isn't read properly from persisted data
          • -
          • Issue #950: Comparison between databases in README.md and in features.html +
          • Issue #2923: Combination of fileName() with fileStore() should throw an exception
          • -
          • Issue #1549: [RFE] Implement locking modes (select for update) +
          • Issue #2928: JSON_ARRAYAGG and all NULL values
          • -
          • PR #1548: Add AsynchronousFileChannel-based experimental FilePathAsync +
          • PR #2918: Removal of unnecessary lock
          • -
          • PR #1547: Speedup unused chunks collection +
          • Issue #2911: org.h2.mvstore.MVStoreException: Transaction was illegally transitioned from ROLLING_BACK to +ROLLED_BACK
          • -
          • PR #1546: Tiny optimization: use `System.arraycopy` when possible +
          • Issue #1022: JdbcDatabaseMetaData.getPseudoColumns() should be implemented
          • -
          • PR #1545: Export datetime value functions to SQL using standard syntax +
          • Issue #2914: (T1.A = T2.B) OR (T1.A = T2.C) should be optimized to T1.A IN(T2.B, T2.C) to allow index conditions
          • -
          • Issue #1371: NPE in CacheLRU +
          • PR #2903: Assorted changes
          • -
          • Issue #1534: Typo in message +
          • Issue #2901: PgServer returns less rows when fetchSize is set
          • -
          • Issue #1527: Parser performance: Excessive use of regular expressions to validate column names +
          • Issue #2894: NPE in DROP SCHEMA when unique constraint is removed before linked referential constraint
          • -
          • PR #1543: MVStore assorted re-factorings +
          • Issue #2888: H2 should pass time zone of client to the server
          • -
          • PR #1538: Add support for newer Lucene versions without recompilation +
          • PR #2890: Fixed possible eternal wait(0)
          • -
          • Issue #1536: CURRENT_TIMESTAMP result doesn't change under Transactions +
          • Issue #2846: GRANT SELECT, INSERT, UPDATE, DELETE incorrectly gives privileges to drop a table
          • -
          • Issue #239: Consider supporting Lucene 5 indexes +
          • Issue #2882: NPE in UPDATE with SELECT UNION
          • -
          • PR #1520: Fixes bug in PutIfAbsentDecisionMaker +
          • PR #2881: Store users and roles together and user-defined functions and aggregates together
          • -
          • Issue #1518: ENUM and VIEW with filtering on enum column +
          • Issue #2878: Disallow spatial indexes in PageStore databases
          • -
          • Issue #1516: Array element reference array[index] should be 1-based +
          • PR #2874: Use 64-bit row counts in results and other changes
          • -
          • Issue #1512: TestMVTableEngine.testLowRetentionTime(): NPE in VersionedValue.Type +
          • Issue #2866: New INFORMATION_SCHEMA should not use keywords as column names
          • -
          • PR #1513: Assorted minor changes +
          • Issue #2867: PageStore + Lazy + INSERT ... SELECT cause infinite loop
          • -
          • PR #1510: Add optional EXCEPT clause to wildcards +
          • PR #2869: Normalize binary geometry literals and improve EWKB representation of POLYGON EMPTY
          • -
          • PR #1509: Use domain term everywhere +
          • Issue #2860: CHAR columns in PgCatalogTable have incorrect length
          • -
          • Issue #1507: Add INFORMATION_SCHEMA.COLUMNS.COLUMN_TYPE qualification for domains +
          • Issue #2848: Add support for standard <listagg overflow clause>
          • -
          • Issue #1499: TestScript::envelope.sql failure in “big” mode +
          • Issue #2858: Throw 22001 on attempt to use getString() or getBytes() on LOB object longer than 1,048,576 +chars/octets
          • -
          • Issue #1498: NPE in SimpleResultSet.getColumnCount() +
          • Issue #2854: Define limits for identifiers, number of columns, etc.
          • -
          • Issue #1495: MERGE statement doesn't affect any rows when Oracle UPDATE .. WHERE .. DELETE .. WHERE is used +
          • PR #2853: Small optimization for Page compression / decompression
          • -
          • Issue #1493: MERGE statement fails when it updates more than one row +
          • Issue #2832: Define length limits for non-LOB data types
          • -
          • Issue #1492: Unnecessary restriction on MERGE USING statement when ON clause doesn't reference any target table columns +
          • Issue #2842: Querying view that uses LTRIM/RTRIM results in a syntax error
          • -
          • Issue #1491: Unnecessary restriction on MERGE USING statement when ON predicate doesn't match inserted row +
          • Issue #2841: Call to STRINGDECODE results in StringIndexOutOfBoundsException
          • -
          • Issue #1490: NullPointerException when running invalid MERGE statement +
          • Issue #2839: Querying a view that uses the POSITION() function results in an unexpected syntax error
          • -
          • Issue #1489: MERGE USING documentation has misleading railroad diagram +
          • Issue #2838: INSERT() with NULL arguments for the original string and string to be added results in NPE
          • -
          • Issue #1488: Improve documentation of window and some other functions +
          • Issue #2837: ROUND() function should reject invalid number of digits immediately
          • -
          • Issue #1485: Default window frame in presence of ORDER BY is RANGE .., not ROWS +
          • Issue #2835: Calling math functions with a string argument results in a NullPointerException
          • -
          • PR #1484: New tests, reimplemented EXCLUDE clause, and assorted changes +
          • Issue #2833: MERGE INTO causes an unexpected syntax error
          • -
          • Issue #1338: MSSQLServer compatibility enhancements +
          • Issue #2831: Restore YEAR data type for MySQL compatibility mode
          • -
          • PR #1480: Update Maven build instruction and fix some problems +
          • Issue #2822: Suspicious logic in Database.closeImpl()
          • -
          • PR #1478: Upgrade maven-surefire-plugin +
          • Issue #2829: Incorrect manifest entries in sources jar
          • -
          • PR #1476: Add TransactionStore to MVStore jar +
          • Issue #2828: Parser can't parse NOT in simple when operand
          • -
          • Issue #1475: Dropping column used by a view produces misleading error message +
          • Issue #2826: Table with a generated column cycle results in a NullPointerException
          • -
          • Issue #1473: TestScript needs better detection of sorted result +
          • Issue #2825: Query with % operator results in a ClassCastException
          • -
          • PR #1471: issue 1350: TestCrashAPI: PageStore.freeListPagesPerList +
          • Issue #2818: TableFilter.getValue() can read value of delegated column faster
          • -
          • PR #1470: Fix window functions in queries with HAVING +
          • Issue #2816: Query on view that uses the BETWEEN operator results in an unexpected syntax error
          • -
          • PR #1469: Forbid incorrect nesting of aggregates and window functions +
          • PR #2815: Remove BINARY_COLLATION and UUID_COLLATION settings
          • -
          • Issue #1437: Generated as Identity has a different behaviour. +
          • Issue #2813: Query with CASE operator unexpectedly results in "Column must be in the GROUP BY list" error
          • -
          • PR #1467: Fix subtraction of timestamps +
          • Issue #2811: Update build numbers and data format versions
          • -
          • PR #1464: Assorted minor changes in window processing code +
          • Issue #2674: OPTIMIZE_IN_SELECT shouldn't convert value to incompatible data types
          • -
          • PR #1463: Fix some window aggregates and reduce amount of collecting implementations +
          • Issue #2803: Disallow comparison operations between incomparable data types
          • -
          • PR #1462: Separate aggregate and window code in some places +
          • Issue #2561: Separate normal functions and table value functions
          • -
          • PR #1461: Add WINDOW clause support +
          • Issue #2804: NPE in ConditionNot.getNotIfPossible()
          • -
          • Issue #1427: Scalability problem in MVSpatialIndex +
          • Issue #2801: Instances of TableView objects leaking
          • -
          • PR #1459: Improve window clause correctness checks +
          • PR #2799: Additional bit functions BITNAND, BITNOR, BITXNOR, BITCOUNT, ULSHIFT, URSHIFT, ROTATELEFT, ROTATERIGHT, +BIT_NAND_AGG, BIT_NOR_AGG, and BIT_XNOR_AGG.
          • -
          • PR #1457: Add NTILE(), LEAD() and LAG() window functions +
          • PR #2798: Complete separation of Function class
          • -
          • PR #1456: Add experimental implementation of remaining types of window frames +
          • Issue #2795: Sporadic issues with trigger during concurrent insert in 1.4.199/1.4.200
          • -
          • PR #1454: Add FIRST_VALUE(), LAST_VALUE(), and NTH_VALUE() +
          • PR #2796: Assorted refactorings
          • -
          • PR #1453, Issue #1161: Add ROW_NUMBER(), RANK(), DENSE_RANK(), PERCENT_RANK(), and CUME_DIST() window functions +
          • Issue #2786: Failure in CREATE TABLE AS leaves inconsistent transaction if some rows were successfully inserted
          • -
          • PR #1452: Reset aggregates before reuse +
          • Issue #2790: Examples in documentation of CREATE ALIAS should use standard literals only
          • -
          • PR #1451: Add experimental support for aggregates with OVER (ORDER BY *) +
          • Issue #2787: CONCAT and CONCAT_WS functions
          • -
          • PR #1450: Evaluate window aggregates only once for each partition +
          • PR #2784: Oracle REGEXP_REPLACE support
          • -
          • PR #1449: Move more code from Aggregate and JavaAggregate to AbstractAggregate +
          • Issue #2780: Remove SCOPE_GENERATED_KEYS setting
          • -
          • PR #1448: Add experimental implementation of grouped window queries +
          • PR #2779: Fix incorrect FK restrictions and other changes
          • -
          • PR #1447: Refactor OVER() processing code and fix some issues +
          • PR #2778: Assorted changes
          • -
          • PR #1446: fix : The French messages are bad generated (not contain DB message) +
          • Issue #2776: Referential constraint can create a unique constraint in the wrong schema
          • -
          • PR #1445: Use PostGIS-compatible format for SRID-only constraint in GEOMETRY +
          • Issue #2771: Add documented DEFAULT ON NULL flag for all types of columns
          • -
          • PR #1444: Add experimental unoptimized support for OVER ([PARTITION BY ...]) in aggregates +
          • Issue #2742 / PR #2768: Better separation of MVStore aimed at smaller h2-mvstore jar
          • -
          • PR #1442: Bugfix - Release MVStore lock and file resources rightly even if errors when compacting database +
          • Issue #2764: Identity columns don't accept large numbers
          • -
          • PR #1441: Add GEOMETRY type subtypes with type and SRID constraints +
          • IDENTITY() function is removed, SCOPE_IDENTITY() is now available only in MSSQLServer compatibility mode.
          • -
          • PR #1434: Add support for ENUM in CAST and other changes +
          • Issue #2757: Intermittent TestFileSystem failures
          • -
          • PR #1431: Fix some inconsistencies in documentation and improve mvn build +
          • Issue #2758: Issues with sequences
          • -
          • PR #1428: Add support for M and ZM dimensions to GEOMETRY data type +
          • PR #2756: Prevent DROP NOT NULL for identity columns
          • -
          • Issue #1405: Introduce LocalResult factory +
          • Issue #2753: UPDATE statement changes value of GENERATED ALWAYS AS IDENTITY columns
          • -
          • PR #1422: Add ENVELOPE aggregate function +
          • PR #2751: Add comment explaining seemingly dummy operation
          • -
          • Issue #1421: Remove old-style outer join +
          • PR #2750: Use RFC 4122 compliant UUID comparison by default
          • -
          • PR #1419: Assorted minor changes +
          • PR #2748: PgServer set type text to NULL value
          • -
          • PR #1414: DEFRAG and COMPACT mixup +
          • Issue #2746: Old TCP clients with current server
          • -
          • PR #1413: improvements to MVStore garbage collection +
          • PR #2745: PgServer can send bool in binary mode
          • -
          • PR #1412: Added org.h2.store.fs package to exported osgi bundles +
          • PR #2744: Remove jarSmall and jarClient targets
          • -
          • PR #1409: Map all remaining error codes to custom exception classes +
          • PR #2743: Add IS_TRIGGER_UPDATABLE and other similar fields to INFORMATION_SCHEMA
          • -
          • Issue #1407: Add a MODE() aggregate function +
          • PR #2738: Fix VIEWS.VIEW_DEFINITION and support it for other databases in H2 Console
          • -
          • PR #1402: Duplicate conditions in column check constraint +
          • PR #2737: Assorted changes
          • -
          • PR #1399: Add more subclasses of SQLException and use it for some error codes +
          • PR #2734: Update dependencies and fix ResultSetMetaData.isSigned()
          • -
          • PR #1397: Add DATEADD return type detection +
          • PR #2733: Replace h2.sortNullsHigh with DEFAULT_NULL_ORDERING setting
          • -
          • Issue #1393: Add INFORMATION_SCHEMA.COLUMNS.IS_VISIBLE +
          • PR #2731: Fix spelling errors in German translation
          • -
          • PR #1392: Some refactoring and assorted minor optimizations +
          • PR #2728: Add and use DATA_TYPE_SQL() function and remove INFORMATION_SCHEMA.PARAMETERS.REMARKS
          • -
          • PR #1388: Extract UnaryOperation from Operation and other changes +
          • Issue #1015: ENUM and arithmetic operators
          • -
          • PR #1386: DISK_SPACE_USED() for MVStore and other minor changes +
          • Issue #2711: Store normalized names of data types in metadata
          • -
          • PR #1385: split up the rather large convertTo method +
          • PR #2722: Implement getRowCount() for some INFORMATION_SCHEMA tables
          • -
          • PR #1384: Throw exception if unknown mode is specified in database URL +
          • PR #2721: Improve LOCKS, SESSIONS, and USERS and optimize COUNT(*) on other isolation levels in some cases
          • -
          • Issue #1365, PR #1382: Parse more date-time literals for compatibility with other databases +
          • Issue #2655: TestCrashAPI: AssertionError at MVPrimaryIndex.<init>
          • -
          • PR #1381: Minor fixes for INTERVAL data type +
          • Issue #2716: Fix URL of Maven repository
          • -
          • PR #1380: Improve documentation of intervals +
          • Issue #2715: Mention `DB_CLOSE_DELAY=-1` flag in JDBC URL on the "Cheat Sheet" page
          • -
          • Issue #1189: "Merge into using" parameters aren't found +
          • PR #2714: fixed few code smells discovered by PVS-Studio
          • -
          • Issue #1377: org.h2.api.Interval and TIME leftovers +
          • Issue #2712: `NOT LIKE` to a sub-query doesn't work
          • -
          • PR #1376: TestMultiThreadedKernel is back +
          • PR #2710: PgServer: set oid and attnum in RowDescription
          • -
          • PR #1373: INTERVAL data type +
          • Issue #2254: Add standard DECFLOAT data type
          • -
          • Issue #1369: In MSSQL Server Mode generated UUID fields need NEWID() function +
          • PR #2708: Add declared data type attributes to the INFORMATION_SCHEMA
          • -
          • Issue #756: FunctionsMySql is not in the main jar +
          • Issue #2706: Empty comments / remarks on objects
          • -
          • PR #1368: Parse BINARY VARYING, BINARY LARGE OBJECT, and CHARACTER LARGE OBJECT +
          • PR #2705: Return standard-compliant DATA_TYPE for strings
          • -
          • PR #1367: Assorted changes with SELECT output limitation clauses +
          • PR #2703: Fix case-insensitive comparison issues with national characters
          • -
          • Issue #1363: Why H2 requires random own packages in OSGi bundle description? +
          • Issue #2701: Subquery with FETCH should not accept global conditions
          • -
          • Issue #1192: Add an Automatic-Module-Name +
          • Issue #2699: Remove FUNCTIONS_IN_SCHEMA setting
          • -
          • Issue #1361, PR #1362: Add limited support for MONEY and SMALLMONEY in compatibility modes +
          • Issue #452: Add possibility to use user-defined aggregate functions with schema
          • -
          • Issue #1327: mvn build misses some resources +
          • PR #2695: Refactor handling of parentheses in getSQL() methods
          • -
          • PR #1359: Add system property to return OffsetDateTime from ResultSet.getObject() +
          • PR #2693: disallow VARCHAR_IGNORECASE in PostgreSQL mode
          • -
          • PR #1357: Simplify execution flow in some places +
          • Issue #2407: Implement CHAR whitespace handling correctly
          • -
          • PR #1356: Fix NPE in Query.initExpression() +
          • PR #2685: Check existing data in ALTER DOMAIN ADD CONSTRAINT
          • -
          • PR #1355: Assorted changes in MetaTable +
          • PR #2683: Fix data types in Transfer
          • -
          • Issue #1352: TestCrashAPI: Prepared.getObjectId() was called before +
          • PR #2681: Report user functions in standard ROUTINES and PARAMETERS views
          • -
          • PR #1349: Changes is conversion and comparison methods of Value +
          • PR #2680: Reimplement remaining DatabaseMetaData methods and fix precision of binary numeric types
          • -
          • Issue #1346: Exception when using IN condition for enums +
          • PR #2679: Reimplement getTables(), getTableTypes(), and getColumns()
          • -
          • PR #1345: Replace some init methods with constructors +
          • PR #2678: Reimplement getPrimaryKeys(), getBestRowIdentifier(), getIndexInfo() and others
          • -
          • PR #1344: Streamline last chunk verification on startup +
          • PR #2675: Reimplement getImportedKeys(), getExportedKeys(), and getCrossReferences()
          • -
          • PR #1341: Optimize MVSecondaryIndex.convertToKey() +
          • PR #2673: Reimplement some metadata methods
          • -
          • PR #1340: NoSuchElementException instead of returning null +
          • PR #2672: Forward DatabaseMetaData calls to server
          • -
          • PR #1339: Add support of TIMESTAMP WITH TIME ZONE to addition and subtraction operators +
          • Issue #2329: Content of INFORMATION_SCHEMA should be listed as VIEWS
          • -
          • PR #1337: Streamline Value comparison +
          • PR #2668: Sequence generator data type option and length parameter for JSON data type
          • -
          • PR #1336: Minor refactorings +
          • PR #2666: Add ALTER DOMAIN RENAME command
          • -
          • Issue #1332: Constraint name not set correctly +
          • PR #2663: Add ALTER DOMAIN { SET | DROP } { DEFAULT | ON UPDATE }
          • -
          • Rename fields to reflect actual type +
          • PR #2661: Don't allow construction of incomplete ARRAY and ROW data types
          • -
          • Issue #1331: Regression in Database.updateMeta() +
          • Issue #2659: NULLIF with row values
          • -
          • Issue #1323: Slow update after altering table in 1.4.197 +
          • PR #2658: Extract date-time and some other groups of functions into own classes
          • -
          • PR #1326: Add support of PERCENT in FETCH and TOP clauses +
          • PR #2656: add `_int2` and `_int4` for PgServer
          • -
          • PR #1325: Optimize WITH TIES in some queries and specify data types for KEY_COLUMN_USAGE +
          • PR #2654: Move out JSON, cardinality, ABS, MOD, FLOOR, and CEIL functions from the Function class
          • -
          • PR #1321: Do not add rows before OFFSET to result if possible +
          • PR #2653: Use full TypeInfo for conversions between PG and H2 data types
          • -
          • PR #1319: Treat NEXTVAL as an auto-generated key +
          • PR #2652: Add "SHOW ALL"
          • -
          • PR #1318: Mode append fo MVPlainTempResult +
          • PR #2651: add `pg_type.typelem` and `pg_type.typdelim`
          • -
          • PR #1314: Add ALTER VIEW RENAME command +
          • PR #2649: Extract some groups of functions from Function class
          • -
          • PR #1313, issue #1315: Bugfix - using default locale encoding issue in conversion between varchar and varbinary value, and checking javac output text issue in SourceCompiler +
          • PR #2646: Add some PostgreSQL compatibility features
          • -
          • PR #1312: Add Java 9+ support to NIO_CLEANER_HACK +
          • PR #2645: Add CURRENT_PATH, CURRENT_ROLE, SESSION_USER, and SYSTEM_USER
          • -
          • PR #1311: Fix minor issues with ResultSet.getObject(..., Class) and WITH TIES +
          • Issue #2643: Send PG_TYPE_TEXTARRAY values to ODBC drivers properly
          • -
          • Issue #1298: TestKillRestartMulti: A map named undoLog.2 already exists +
          • PR #2642: Throw proper exceptions from array element reference and TRIM_ARRAY
          • -
          • Issue #1307: Invalid value "null" for parameter "calendar" [90008-193] +
          • PR #2640: German translations
          • -
          • PR #1306: Add initial implementation of WITH TIES clause +
          • Issue #2108: Add possible candidates in different case to table not found exception
          • -
          • PR #1304: Update changelog and fix building of documentation +
          • Issue #2633: Multi-column UPDATE assignment needs to be reimplemented
          • -
          • PR #1302: Use OpenJDK instead of OracleJDK 10 in Travis builds due to Travis problem +
          • PR #2635: Implement REGEXP_SUBSTR function
          • -
          • Issue #1032: Error when executing "SELECT DISTINCT ON" +
          • PR #2632: Improve ROW data type
          • -
          • Issue #1295: ConditionInSelect violates requirements of LocalResult +
          • PR #2630: fix: quoted VALUE in documentation
          • -
          • PR #1296: Assorted minor changes +
          • Issue #2628: Cached SQL throws JdbcSQLSyntaxErrorException if executed with different parameter values than before
          • -
          • PR #1293: Move HELP and SHOW tests into own files +
          • Issue #2611: Add quantified distinct predicate
          • -
          • PR #1291: Fix update count for REPLACE and move some SQL tests into separate files +
          • Issue #2620: LOBs in triggers
          • -
          • PR #1290: Do not load the whole LOBs into memory for comparison operation +
          • PR #2619: ARRAY_MAX_CARDINALITY and TRIM_ARRAY functions
          • -
          • Issue #408: DISTINCT does not properly work with ORDER BY on function like LOWER +
          • PR #2617: Add Feature F262: Extended CASE expression
          • -
          • PR #1286: Fix MVTempResult implementations for results with invisible columns +
          • PR #2615: Add feature T461: Symmetric BETWEEN predicate
          • -
          • Issue #1284: Nanoseconds of timestamps from old H2 versions are not read properly +
          • PR #2614: Fix support of multi-dimensional arrays in Java functions
          • -
          • PR #1283: Clean up interaction between LocalResult and ResultExternal +
          • Issue #2608: Improve concatenation operation for multiple operands
          • -
          • Issue #1265: OOME is not handled properly in TraceObject.logAndConvert() +
          • PR #2605: Assorted minor changes
          • -
          • Issue #1061: Regression: Braces after WITH clause not allowed anymore +
          • Issue #2602: H2 doesn't allow to create trigger from Java source code if there are nested classes
          • -
          • PR #1277: Assorted changes in Parser +
          • PR #2601: Add field SLEEP_SINCE to INFORMATION_SCHEMA.SESSIONS table
          • -
          • PR #1276: Improve support of ARRAY and SQLXML in JDBC layer +
          • Issue #1973: Standard MERGE statement doesn't work with views
          • -
          • PR #1275: Do not quote other lower case characters +
          • Issue #2552: MERGE statement should process each row only once
          • -
          • PR #1274: Use token type in Parser instead of string comparisons +
          • Issue #2548: Wrong update count when MERGE statement visits matched rows more than once
          • -
          • PR #1272: Reduce code duplication in Parser +
          • Issue #2394: H2 does not accept DCL after source merge table
          • -
          • PR #1271: Minor memory leak +
          • Issue #2196: Standard MERGE statement doesn't release the source view
          • -
          • PR #1270: drop TableView isPersistent field +
          • Issue #2567: ARRAY-returning Java functions don't return the proper data type
          • -
          • PR #1269: Eliminate commit of empty batch in some tests +
          • Issue #2584: Regression in NULL handling in multiple AND or OR conditions
          • -
          • Issue #1266: Add INFORMATION_SCHEMA.COLUMNS.DATETIME_PRECISION +
          • PR #2577: PgServer: `array_to_string()` and `set join_collapse_limit`
          • -
          • Issue #1261: How to discover stored enum types through INFORMATION_SCHEMA +
          • PR #2568: Add BIT_XOR_AGG aggregate function
          • -
          • Issue #1258: Failing to remove index when using schema.table +
          • PR #2565: Assorted minor changes
          • -
          • PR #1256: misc tiny refactorings +
          • PR #2563: defrag is not contributing much, remove from test run
          • -
          • PR #1255: Minor changes in MERGE USING, DATE_TRUNC, and EXTRACT +
          • PR #2562: new exception MVStoreException
          • -
          • Issue #1214: Internal compiler believes that "3 warnings" is an error +
          • PR #2557: don't throw IllegalStateException in checkOpen
          • -
          • PR #1252: Assorted minor changes +
          • PR #2554: Reenable mvstore TestCrashAPI
          • -
          • PR #1251: Fix SQL representation of CAST for types with fractional seconds precision +
          • Issue #2556: TestOutOfMemory: Table "STUFF" not found
          • -
          • PR #1250: Batch append mode for MVMap +
          • PR #2555: Move current datetime value functions into own class
          • -
          • PR #1248: StringIndexOutOfBoundsException due to undoLog map +
          • PR #2547: split up the ValueLob classes
          • -
          • PR #1246: Detect disabled tests +
          • PR #2542: Pipelining mvstore chunk creation / save
          • -
          • PR #1242: Add implementation of SQLXML interface +
          • Issue #2550: NullPointerException with MERGE containing unknown column in AND condition of WHEN
          • -
          • PR #1241: Various tweaks in attempting to fix TestDiskFull test +
          • Issue #2546: Disallow empty CASE specifications and END CASE
          • -
          • PR #1240: Optimise ValueLobDB comparison methods +
          • Issue #2530: Long query with many AND expressions causes StackOverflowError
          • -
          • PR #1239: Don't try to find tools.jar on Java 9+ +
          • PR #2543: Improve case specification support and fix some issues with it
          • -
          • PR #1238: remove unfinished android API +
          • Issue #2539: Replace non-standard functions with standard code directly in Parser
          • -
          • PR #1237: remove JaQu +
          • Issue #2521: Disallow untyped arrays
          • -
          • PR #1236: remove STORE_LOCAL_TIME code +
          • Issue #2532: Duplicate column names in derived table should be acceptable in the presence of a derived column list +that removes ambiguities
          • -
          • PR #1235: Do not use deprecated Class.newInstance() +
          • PR #2527: Feature: allow @ meta commands from Console
          • -
          • PR #1234: Fix NPE in Parser.parseMergeUsing() +
          • PR #2526: Reduce I/O during database presence check and restrict some compatibility settings to their modes
          • -
          • PR #1233: Simplify old lob ValueLob class +
          • PR #2525: Restore support of third-party drivers in the Shell tool
          • -
          • Issue 1227: lob growth in pagestore mode +
          • Issue #1710: getHigherType() returns incorrect type for some arguments
          • -
          • PR #1230: clean up some javadoc and some throws clauses +
          • PR #2516: SHUTDOWN IMMEDIATELY should be a normal shut down
          • -
          • PR #1229: Create UndoLog only when necessary and remove outdated code +
          • PR #2515: Fix nested comments in ScriptReader
          • -
          • PR #1228: Remove some PageStore+MVCC leftovers +
          • Issue #2511: Restrict Oracle compatibility functions to Oracle compatibility mode
          • -
          • PR #1226: Fix inconsistencies in checks for transaction isolation level +
          • PR #2508: Minor refactoring around Tx isolation level
          • -
          • PR #1224: Enable Java 10 testing on Travis +
          • PR #2505: Assorted changes in DATEADD, DATEDIFF, DATE_TRUNC, and EXTRACT
          • -
          • PR #1223: Fix issues with testing on latest Java versions +
          • Issue #2502: Combination of DML with data change delta table skips subsequent update
          • -
          • PR #1222: Leftovers handling +
          • PR #2499: Performance fix for PageStore under concurrent load
          • -
          • Issue #1220: JDK-9 build fails due to usage of java.xml.bind in external authentication +
          • PR #2498: Add some PostgreSQL compatibility features mentioned in issue #2450
          • -
          • PR #1218: Test utilities only once during TestAll +
          • Issue #2496: Error when using empty JSON_OBJECT() or JSON_ARRAY() functions
          • -
          • PR #1217: Postpone session.endStatement() until after commit +
          • PR #2495: Fix JSON_OBJECT grammar in documentation
          • -
          • PR #1213: KillRestart fix +
          • Issue #2493 / PR #2494: Replace ColumnNamer with mode-specific generation of column names for views
          • -
          • PR #1211: Assorted minor changes +
          • PR #2492: Assorted changes in parser, keywords, and ILIKE condition
          • -
          • Issue #1204: Always use MVCC with MVStore and never use it with PageStore +
          • PR #2490: Replace pg_catalog.sql with PgCatalogTable and use DATABASE_TO_LOWER in PG Server
          • -
          • PR #1206: Forbid reconnects in non-regular modes in TestScript +
          • Issue #2488 / PR #2489: Mark version functions as not deterministic
          • -
          • PR #1205: Misc test fixes +
          • Issue #2481: Convert TO to keyword
          • -
          • Issue 1198: Enable MULTI_THREADED by default for MVStore mode +
          • PR #2476: Add some PostgreSQL compatibility features mentioned in issue #2450
          • -
          • Issue #1195: Calling setBytes to set VARCHAR field fails +
          • PR #2479: Recognize absolute path on Windows without drive letter
          • -
          • PR #1197: Fix or suppress errors in tests +
          • Issue #2475: Select order by clause is exported with non-portable SQL
          • -
          • PR #1194: TestKillRestartMulti: A map named undoLog-1 already exists +
          • Issue #2472: Updating column to empty string in Oracle mode with prepared statement does not result in null
          • -
          • PR #1193: enable TestRandomSQL on non-memory databases +
          • PR #2468: MVStore scalability improvements
          • -
          • PR #1191: External authentication with datasource issue +
          • PR #2466: Add partial support for MySQL COLLATE and CHARACTER statements
          • -
          • PR #1188: Undo log split to reduce contention +
          • Issue #2464: `client_encoding='utf-8'` (single quoted) from `node-postgres` not recognized
          • -
          • PR #1186: TransactionMap::sizeAsLong() optimized - temp map eliminated +
          • Issue #2461: Support for binary_float and binary_double type aliases
          • -
          • PR #1185: Improve naming of the object id field in Prepared +
          • Issue #2460: Exception when accessing empty arrays
          • -
          • Issue #1196: Feature request for MS SQL Server Compatibility Mode +
          • Issue #2318: Remove incorrect rows from DatabaseMetaData.getTypeInfo() and INFORMATION_SCHEMA.TYPE_INFO
          • -
          • Issue #1177: Resource leak in Recover tool +
          • Issue #2455: `bytea` column incorrectly read by `psycopg2`
          • -
          • PR #1183: Improve concurrency of connection pool with wait-free implement +
          • PR #2456: Add standard array value constructor by query
          • -
          • Issue #1073: H2 v1.4.197 fails to open an existing database with the error [Unique index or primary key violation: "PRIMARY KEY ON """".PAGE_INDEX"] +
          • PR #2451: Add some PostgreSQL compatibility features mentioned in issue #2450
          • -
          • PR #1179: Drop TransactionMap.readLogId +
          • Issue #2448: Change default data type name from DOUBLE to DOUBLE PRECISION
          • -
          • PR #1181: Improve CURRENT_TIMESTAMP and add LOCALTIME and LOCALTIMESTAMP +
          • PR #2452: Do not use unsafe and unnecessary FROM DUAL internally
          • -
          • PR #1176: Magic value replacement with constant +
          • PR #2449: Add support for standard trigraphs
          • -
          • PR #1171: Introduce last committed value into a VersionedValue +
          • Issue #2439: StringIndexOutOfBoundsException when using TO_CHAR
          • -
          • PR #1175: tighten test conditions - do not ignore any exceptions +
          • Issue #2444: WHEN NOT MATCHED THEN INSERT should accept only one row
          • -
          • PR #1174: Remove mapid +
          • Issue #2434: Next value expression should return the same value within a processed row
          • -
          • PR #1173: protect first background exception encountered and relate it to clients +
          • PR #2437: Assorted changes in MVStore
          • -
          • PR #1172: Yet another attempt to tighten that testing loop +
          • Issue #2430: Postgres `bytea` column should be read with and without `forceBinary`
          • -
          • PR #1170: Add support of CONTINUE | RESTART IDENTITY to TRUNCATE TABLE +
          • Issue #2267: BINARY and VARBINARY should be different
          • -
          • Issue #1168: ARRAY_CONTAINS() returning incorrect results when inside subquery with Long elements. +
          • Issue #2266: CHAR and BINARY should have length 1 by default
          • -
          • PR #1167: MVStore: Undo log synchronization removal +
          • PR #2426: Add MD5 and all SHA-1, SHA-2, and SHA-3 digests to the HASH() function
          • -
          • PR #1166: Add SRID support to EWKT format +
          • Issue #2424: 0 should not be accepted as a length of data type
          • -
          • PR #1165: Optimize isTargetRowFound() and buildColumnListFromOnCondition() in MergeUsing +
          • Issue #2378: JAVA_OBJECT and TableLink
          • -
          • PR #1164: More fixes for parsing of MERGE USING and other changes in Parser +
          • Issue #2417: Casts between binary strings and non-string data types
          • -
          • PR #1154: Support for external authentication +
          • Issue #2416: OTHER and JAVA_OBJECT
          • -
          • PR #1162: Reduce allocation of temporary strings +
          • Issue #2379: SQL export can change data type of a constant
          • -
          • PR #1158: make fields final +
          • Issue #2411: ArrayIndexOutOfBoundsException when HAVING and duplicate columns in SELECT
          • -
          • Issue #1129: TestCrashAPI / TestFuzzOptimizations throw OOME on Travis in PageStore mode +
          • Issue #2194: Add own enumeration of data types to API
          • -
          • PR #1156: Add support for SQL:2003 WITH [NO] DATA to CREATE TABLE AS +
          • PR #2408: Descending MVMap and TransactionMap cursor
          • -
          • PR #1149: fix deadlock between OnExitDatabaseCloser.DATABASES and Engine.DATABASES +
          • Issue #2399: Cast to ARRAY with a nested ARRAY does not check the maximum cardinality of the nested ARRAY
          • -
          • PR #1152: skip intermediate DbException object when creating SQLException +
          • Issue #2402: Remove old ValueLob and DbUpgrade
          • -
          • PR #1144: Add missing schema name with recursive view +
          • Issue #2400: Inconsistent data type conversion between strings and LOBs
          • -
          • Issue #1091: get rid of the "New" class +
          • PR #2398: Add expandable flags for SQL generation methods
          • -
          • PR #1147: Assorted minor optimizations +
          • PR #2395: Fix for two recent page format bugs
          • -
          • PR #1145: Reduce code duplication +
          • PR #2386: Chunk occupancy mask
          • -
          • PR #1142: Misc small fixes +
          • PR #2385: Memory estimate
          • -
          • PR #1141: Assorted optimizations and fixes +
          • PR #2381: Follow up REPEATABLE_READ-related changes
          • -
          • PR #1138, #1139: Fix a memory leak caused by DatabaseCloser objects +
          • PR #2380: use JIRA tracker URLs for JDK bugs
          • -
          • PR #1137: Step toward making transaction commit atomic +
          • PR #2376: Fix IN condition with row value expressions in its right side
          • -
          • PR #1136: Assorted minor optimizations +
          • Issue #2367 / PR #2370: fix backward compatibility with 1.4.200
          • -
          • PR #1134: Detect possible overflow in integer division and optimize some code +
          • Issue #2371: REPEATABLE READ isolation level does not work in MVStore
          • -
          • PR #1133: Implement Comparable<Value> in CompareMode and optimize ValueHashMap.keys() +
          • Issue #2363: Soft links in -baseDir and database path cause error 90028
          • -
          • PR #1132: Reduce allocation of ExpressionVisitor instances +
          • Issue #2364: TestScript datatypes/timestamp-with-time-zone.sql fails if TZ=Europe/Berlin
          • -
          • PR #1130: Improve TestScript and TestCrashAPI +
          • Issue #2359: Complete implementation of generated columns
          • -
          • PR #1128: Fix ON DUPLICATE KEY UPDATE with ENUM +
          • PR #2361: Fix unused result
          • -
          • PR #1127: Update JdbcDatabaseMetaData.getSQLKeywords() and perform some minor optimizations +
          • PR #2353: Push binary search operation from Page to DataType
          • -
          • PR #1126: Fix an issue with code coverage and building of documentation +
          • Issue #2348: Add USING clause to ALTER COLUMN CHANGE DATA TYPE
          • -
          • PR #1123: Fix TCP version check +
          • Issue #2350: License Problem in POM
          • -
          • PR #1122: Assorted changes +
          • Issue #2345: Add standard SET TIME ZONE command to set current time zone of the session
          • -
          • PR #1121: Add some protection to ValueHashMap against hashes with the same less significant bits +
          • PR #2341: Cleanup file backend sync
          • -
          • Issue #1097: H2 10x slower than HSQLDB and 6x than Apache Derby for specific query with GROUP BY and DISTINCT subquery +
          • Issue #2343: Domain-based domains: Domain not found after reconnection
          • -
          • Issue #1093: Use temporary files for ResultSet buffer tables in MVStore +
          • Issue #2338: Domains should not support NULL constraints
          • -
          • PR #1117: Fix sorting with distinct in ResultTempTable +
          • Issue #2334: build target mavenInstallLocal broken since commit 7cbbd55e
          • -
          • Issue #1095: Add support for INSERT IGNORE INTO <table> (<columns>) SELECT in MySQL Mode +
          • #2335: TestDateTimeUtils fails if system timezone has DST in the future
          • -
          • PR #1114: Minor cleanup and formatting fixes +
          • Issue #2330: Syntax error with parenthesized expression in GROUP BY clause
          • -
          • PR #1112: Improve test scripts +
          • Issue #2256: <interval value expression> with datetime subtraction
          • -
          • PR #1111: Use a better fix for issue with SRID +
          • Issue #2325: H2 does not parse nested bracketed comments correctly
          • -
          • Issue #1107: Restore support of DATETIME2 with specified fractional seconds precision +
          • Issue #466: Confusion about INFORMATION_SCHEMA content related to UNIQUE constraints
          • -
          • Issue #1106: Get rid of SwitchSource +
          • PR #2323: Assorted changes
          • -
          • PR #1105: Assorted minor changes +
          • Issue #2320: Remove SAMPLE_SIZE clause from SELECT
          • -
          • Issue #1102: CREATE SYNONYM rejects valid definition +
          • Issue #2301: Add compatibility setting to accept some keywords as identifiers
          • -
          • PR #1103: Remove redundant synchronization +
          • PR #2317: Replace CHECK_COLUMN_USAGE with CONSTRAINT_COLUMN_USAGE and other changes
          • -
          • Issue #1048: 1.4.197 regression. org.h2.jdbc.JdbcSQLException: Timeout trying to lock table "SYS" +
          • Issue #2315: Sequence must remember its original START WITH value
          • -
          • PR #1101: Move some tests in better place and add an additional test for 2PC +
          • Issue #2313: DISTINCT does not work in ordered aggregate functions
          • -
          • PR #1100: Fix Insert.prepareUpdateCondition() for PageStore +
          • PR #2306: Add support for RESTART of sequence without initial value
          • -
          • PR #1098: Fix some issues with NULLS FIRST / LAST +
          • Issue #2304: NPE in multiple define commands in one statement after upgrade from H2 4.1.197
          • -
          • Issue #1089: Parser does not quote words INTERSECTS, DUAL, TOP +
          • PR #2303: Assorted minor changes
          • -
          • Issue #230: Renaming a column does not update foreign key constraint +
          • Issue #2286: Inline check constraints not in INFORMATION_SCHEMA
          • -
          • Issue #1091 Get rid if the New class +
          • PR #2300: Continue generification of MVStore codebase
          • -
          • PR #1087: improve performance of planning large queries +
          • PR #2298: add some minimal security documentation
          • -
          • PR #1085: Add tests for simple one-column index sorting +
          • PR #2292: synchronize fileBase subclasses use of position
          • -
          • PR #1084: re-enable some pagestore testing +
          • PR #2238: Some MVStore refactoring
          • -
          • PR #1083: Assorted changes +
          • Issue #2288: ConcurrentModificationException during commit
          • -
          • Issue #394: Recover tool places COLLATION and BINARY_COLLATION after temporary tables +
          • Issue #2293: Remove TestClearReferences and workarounds for old versions of Apache Tomcat
          • -
          • PR #1081: Session.getTransactionId should return a more distinguishing value +
          • Issue #2288: ConcurrentModificationException during commit
          • -
          • Improve the script-based unit testing to check the error code of the exception thrown. +
          • PR #2284: Remove unrelated information from README and add some information about H2
          • -
          • Issue #1041: Support OR syntax while creating trigger +
          • PR #2282: add PostgreSQL compatible variable STATEMENT_TIMEOUT
          • -
          • Issue #1023: MVCC and existing page store file +
          • PR #2280: little comment
          • -
          • Issue #1003: Decrypting database with incorrect password renders the database corrupt +
          • Issue #2205: H2 1.4.200 split FS issue
          • -
          • Issue #873: No error when `=` in equal condition when column is not of array type +
          • Issue #2272: UpdatableView and obtaining the Generated Keys
          • -
          • Issue #1069: Failed to add DATETIME(3) column since 1.4.197 +
          • PR #2276: Split up filesystem classes
          • -
          • Issue #456: H2 table privileges referring to old schema after schema rename +
          • PR #2275: improve detection of JAVA_HOME on Mac OS
          • -
          • Issue #1062: Concurrent update in table "SYS" caused by Analyze.analyzeTable() +
          • Issue #2268: Numeric division needs better algorithm for scale selection
          • -
          • Yet another fix to Page memory accounting +
          • Issue #2270: IGNORE_UNKNOWN_SETTINGS is ignored
          • -
          • Replace MVStore.ASSERT variable with assertions +
          • PR #2269: Fix existence check of non-persistent databases
          • -
          • Issue #1063: Leftover comments about enhanced for loops +
          • Issue #1910: BinaryOperation should evaluate precision and scale properly
          • -
          • PR #1059: Assorted minor changes +
          • PR #2264: Clean up redundant parts of file system abstraction
          • -
          • PR #1058: Txcommit atomic +
          • PR #2262: add setting AUTO_COMPACT_FILL_RATE
          • -
          • Issue #1038: ora_hash function implementation off by one +
          • Issue #2255 / PR #2259: Use NIO2 in main sources and build
          • -
          • PR #1054: Introduce overflow bit in tx state +
          • PR #2257: Catch java.lang.NoClassDefFoundError
          • -
          • Issue #1047: Support DISTINCT in custom aggregate functions +
          • Issue #2241: Mark H2-specific and compatibility only clauses in documentation
          • -
          • PR #1051: Atomic change of transaction state +
          • PR #2246: Update third-party drivers
          • -
          • PR #1046: Split off Transaction TransactionMap VersionedValue +
          • Issue #2239 / PR #2236: Add NETWORK_TIMEOUT setting for SO_TIMEOUT
          • -
          • PR #1045: TransactionStore move into separate org.h2.mvstore.tx package +
          • PR #2235: Don't use RandomAccessFile in FilePathNio
          • -
          • PR #1044: Encapsulate TransactionStore.store field in preparation to a move +
          • Issue #2233: "Prepared.getObjectId() was called before" when granting on multiple tables
          • -
          • PR #1040: generate less garbage for String substring+trim +
          • PR #2230: Add factory methods for Row
          • -
          • PR #1035: Minor free space accounting changes +
          • Issue #2226, PR #2227: Remove support of Apache Ignite
          • -
          • Issue #1034: MERGE USING should not require the same column count in tables +
          • PR #2224: Update some hyperlinks and use https in them where possible
          • -
          • PR #1033: Fix issues with BUILTIN_ALIAS_OVERRIDE=1 +
          • PR #2223: Fix data change delta tables in views
          • -
          • PR #1031: Drop schema rights together with schema +
          • Issue #1943: Deadlock in TestTriggersConstraints
          • -
          • PR #1029: No need to remove orphaned LOBs when the db is read-only +
          • PR #2219: do not retry failed DDL commands
          • -
          • Issue #1027: Add support for fully qualified names in MySQL compatibility mode +
          • PR #2214: Fix TRACE_LEVEL_FILE=4 for in-memory databases
          • -
          • Issue #178: INSERT ON DUPLICATE KEY UPDATE returns wrong generated key +
          • PR #2216: Add FileChannel.lock in the connection URL summary
          • -
          • PR #1025: Remove BitField and replace its usages with BitSet +
          • PR #2215: Add white-space: pre to tables with query results
          • -
          • Issue #1019: Console incorrectly sorts BigDecimal columns alphanumerically +
          • Issue #2213: NUMERIC scale can be larger than a precision
          • -
          • PR #1021: Update JdbcDatabaseMetaData to JDBC 4.1 (Java 7) +
          • PR #2212: Get rid of multi-version CurrentTimestamp and fix negative scale of NUMERIC
          • -
          • Issue #992: 1.4.197 client cannot use DatabaseMetaData with 1.4.196 and older server +
          • PR #2210: Meta table extras
          • -
          • Issue #1016: ResultSet.getObject() should return enum value, not ordinal +
          • PR #2209: Add standard expressions with interval qualifier
          • -
          • Issue #1012: NPE when querying INFORMATION_SCHEMA.COLUMNS on a view that references an ENUM column +
          • PR #2195: Feature abort_session function
          • -
          • Issue #1010: MERGE USING table not found with qualified table +
          • PR #2201: Add padding to negative years and other changes
          • -
          • PR #1009: Fix ARRAY_AGG with ORDER BY and refactor aggregates +
          • PR #2197: Add some additional methods from JDBC 4.2 and return 4.2 as supported version
          • -
          • Issue #1006: "Empty enums are not allowed" in 1.4.197 (mode=MYSQL) +
          • PR #2193: Require Java 8 and remove Java 7 support
          • -
          • PR #1007: Copy also SRID in ValueGeometry.getGeometry() +
          • Issue #2191: NPE with H2 v1.4.200 repeatable read select queries
          • -
          • PR #1004: Preserve type names in more places especially for UUID +
          • Issue #1390: Add standard-compliant ARRAY data type syntax
          • -
          • Issue #1000: Regression in INFORMATION_SCHEMA.CONSTRAINTS.CONSTRAINT_TYPE content +
          • PR #2186: Refactor Parser.parseColumnWithType() and fix some minor issues with CAST
          • -
          • Issue #997: Can not delete from tables with enums +
          • Issue #2181: SET EXCLUSIVE quirks
          • -
          • Issue #994: Too much column in result set for GENERATED_KEYS on table with DEFAULT +
          • PR #2173: Move snapshots from Transaction to TransactionMap
          • -
          • PR #993: Fix some compiler warnings and improve assert*() methods +
          • Issue #2175: Regression: NPE in ResultSet#getTime(int)
          • -
          • PR #991: Generate shorter queries in JdbcDatabaseMetaData.getTables() and remove some dead code +
          • Issue #2171: Wrong PostgreSQL compatibility syntax for the creation of indexes
          • -
          • PR #989: Fix more issues with range table and improve its documentation +
          • PR #2169: Clean up some find methods of indexes and fix minor issues with them
          -

          Version 1.4.197 (2018-03-18)

          +

          Version 1.4.200 (2019-10-14)

            -
          • PR #988: Fix RangeTable.getRowCount() for non-default step -
          • -
          • PR #987: ValueBoolean constants are not cleared and may be used directly -
          • -
          • PR #986: Check parameters in JdbcPreparedStatement.addBatch() -
          • -
          • PR #984: Minor refactorings in Parser -
          • -
          • PR #983: Code cleanups via IntelliJ IDEA inspections -
          • -
          • Issue #960: Implement remaining time unit in "date_trunc" function -
          • -
          • Issue #933: MVStore background writer endless loop -
          • -
          • PR #981: Reorganize date-time functions -
          • -
          • PR #980: Add Parser.toString() method for improved debugging experience -
          • -
          • PR #979: Remove support of TCP protocol versions 6 and 7 -
          • -
          • PR #977: Add database versions to javadoc of TCP protocol versions and update dictionary.txt -
          • -
          • PR #976: Add and use incrementDateValue() and decrementDateValue() -
          • -
          • Issue #974: Inline PRIMARY KEY definition loses its name -
          • -
          • PR #972: Add META-INF/versions to all non-Android jars that use Bits -
          • -
          • PR #971: Update ASM from 6.1-beta to 6.1 -
          • -
          • PR #970: Added support for ENUM in prepared statement where clause -
          • -
          • PR #968: Assorted changes -
          • -
          • PR #967: Adds ARRAY_AGG function -
          • -
          • PR #966: Do not include help and images in client jar -
          • -
          • PR #965: Do not include mvstore.DataUtils in client jar and other changes -
          • -
          • PR #964: Fix TestFunctions.testToCharFromDateTime() -
          • -
          • PR #963 / Issue #962: Improve documentation of compatibility modes and fix ssl URL description -
          • -
          • Issue #219: H2 mode MySQL- ON UPDATE CURRENT_TIMESTAMP not supported -
          • -
          • PR #958: More fixes for PgServer -
          • -
          • PR #957: Update database size information and links in README.md -
          • -
          • PR #956: Move tests added in 821117f1db120a265647a063dca13ab5bee98efc to a proper place -
          • -
          • PR #955: Support getObject(?, Class) in generated keys -
          • -
          • PR #954: Avoid incorrect reads in iterators of TransactionMap -
          • -
          • PR #952: Optimize arguments for MVMap.init() -
          • -
          • PR #949: Fix table borders in PDF and other changes -
          • -
          • PR #948: Fix some grammar descriptions and ALTER TABLE DROP COLUMN parsing -
          • -
          • PR #947: Fix building of documentation and use modern names of Java versions -
          • -
          • PR #943: Assorted changes in documentation and a fix for current-time.sql -
          • -
          • PR #942: Fix page numbers in TOC in PDF and move System Tables into own HTML / section in PDF -
          • -
          • PR #941: Use >> syntax in median.sql and move out more tests from testScript.sql -
          • -
          • PR #940: add Support for MySQL: DROP INDEX index_name ON tbl_name -
          • -
          • PR #939: Short syntax for SQL tests -
          • -
          • Issue #935: The "date_trunc" function is not recognized for 'day' -
          • -
          • PR #936: Fix font size, line length, TOC, and many broken links in PDF -
          • -
          • PR #931: Assorted changes in documentation -
          • -
          • PR #930: Use Math.log10() and remove Mode.getOracle() -
          • -
          • PR #929: Remove Mode.supportOffsetFetch -
          • -
          • PR #928: Show information about office configuration instead of fallback PDF generation mode -
          • -
          • PR #926: Describe datetime fields in documentation -
          • -
          • PR #925: Fix time overflow in DATEADD +
          • PR #2168: Add non-standard SNAPSHOT isolation level to MVStore databases
          • -
          • Issue #416: Add support for DROP SCHEMA x { RESTRICT | CASCADE } +
          • Issue #2165: Problem with secondary index on SERIALIZABLE isolation level
          • -
          • PR #922: Parse and treat fractional seconds precision as described in SQL standard +
          • Issue #2161: Remove undocumented PageStore-only FILE_LOCK=SERIALIZED
          • -
          • Issue #919: Add support for mixing adding constraints and columns in multi-add ALTER TABLE statement +
          • PR #2155: Reduce code duplication
          • -
          • PR #916: Implement TABLE_CONSTRAINTS and REFERENTIAL_CONSTRAINTS from the SQL standard +
          • Issue #1894: Confusing error message when database creation is disallowed
          • -
          • PR #915: Implement INFORMATION_SCHEMA.KEY_COLUMN_USAGE from SQL standard +
          • Issue #2123: Random failures in TestTransactionStore
          • -
          • PR #914: don't allow null values in ConcurrentArrayList +
          • Issue #2153: Different behavior in SET LOCK_TIMEOUT after 1.4.197
          • -
          • PR #913: Assorted changes in tests and documentation +
          • Issue #2150: Remove MULTI_THREADED setting and use multi-threaded MVStore and single-threaded PageStore backends
          • -
          • Issue #755: Missing FLOAT(precision)? +
          • Issue #216: Support READ UNCOMMITTED isolation level in MVStore mode
          • -
          • PR #911: Add support for MySQL-style ALTER TABLE ADD ... FIRST +
          • Issue #678: Support REPEATABLE READ isolation level in MVStore mode
          • -
          • Issue #409: Support derived column list syntax on table alias +
          • Issue #174: Support SERIALIZABLE isolation level in MVStore mode
          • -
          • PR #908: remove dead code +
          • Issue #2144: MVStore: read uncommitted doesn't see committed rows
          • -
          • PR #907: Nest joins only if required and fix some issues with complex joins +
          • Issue #2142: CURRVAL / CURRENT VALUE FOR should return the value for the current session
          • -
          • PR #906: Fix obscure error on non-standard SELECT * FROM A LEFT JOIN B NATURAL JOIN C +
          • Issue #2136: ConstraintCheck concurrency regression
          • -
          • PR #805: Move some JOIN tests from testScript.sql to own file +
          • PR #2137: Don't use SYSTEM_RANGE for SELECT without a FROM
          • -
          • PR #804: Remove unused parameters from readJoin() and readTableFilter() +
          • PR #2134: Assorted fixes and other changes in DateTimeUtils
          • -
          • Issue #322: CSVREAD WHERE clause containing ORs duplicates number of rows +
          • PR #2133: Optimize COUNT([ALL] constant) and other changes
          • -
          • PR #902: Remove DbSettings.nestedJoins +
          • PR #2132: Typo and another bug in MVStore.readStoreHeader()
          • -
          • PR #900: Convert duplicate anonymous classes in TableFilter to nested for reuse +
          • Issue #2130: Group-sorted query returns invalid results with duplicate grouped columns in select list
          • -
          • PR #899: Fix ON DUPLICATE KEY UPDATE for inserts with multiple rows +
          • Issue #2120: Add IF EXISTS clause to column name in ALTER TABLE ALTER COLUMN statement
          • -
          • PR #898: Parse TIME WITHOUT TIME ZONE and fix TIMESTAMP as column name +
          • Issue #521: Add support for the TIME WITH TIME ZONE data type
          • -
          • PR #897: Update JTS to version 1.15.0 from LocationTech +
          • PR #2127: Fix race condition / performance issue during snapshotting
          • -
          • PR #896: Assorted changes in help.csv +
          • Issue #2124: MVStore build is broken
          • -
          • PR #895: Parse more variants of timestamps with time zones +
          • PR #2122: Add support for LMT in time zones and fix large years in datetime values
          • -
          • PR #893: TIMESTAMP WITHOUT TIME ZONE, TIMEZONE_HOUR, and TIMEZONE_MINUTE +
          • Issue #2067: Incorrect chunk space allocation during chunks movement
          • -
          • PR #892: Assorted minor changes in Parser +
          • PR #2066: Not so happy path - "four alternatives" implementation
          • -
          • PR #891: Update documentation of date-time types and clean up related code a bit +
          • PR #2121: Reduce code duplication for datetime API with custom Calendar instances
          • -
          • PR #890: Implement conversions for TIMESTAMP WITH TIME ZONE +
          • PR #2119: SQL: statement read consistency
          • -
          • PR #888: Fix two-phase commit in MVStore +
          • Issue #2116: Empty IN() operator should result in error (MSSQL)
          • -
          • Issue #884: Wrong test Resources path in pom.xml +
          • Issue #2036: CAST from TIME to TIMESTAMP returns incorrect result
          • -
          • PR #886: Fix building of documentation +
          • PR #2114: Assorted changes
          • -
          • PR #883: Add support for TIMESTAMP WITH TIME ZONE to FORMATDATETIME +
          • PR #2113: Add feature F411: Time zone specification
          • -
          • PR #881: Reimplement dateValueFromDate() and nanosFromDate() without a Calendar +
          • PR #2111: CURRENT_CATALOG, SET CATALOG and other changes
          • -
          • PR #880: Assorted date-time related changes +
          • Issue #2109: IW date formatting does not produce proper output
          • -
          • PR #879: Reimplement TO_DATE without a Calendar and fix a lot of bugs an incompatibilities +
          • PR #2104: Fix ordinary grouping set with parentheses and empty grouping set in GROUP BY
          • -
          • PR #878: Fix IYYY in TO_CHAR and reimplement TRUNCATE without a Calendar +
          • Issue #2103: Add QUOTE_IDENT() function to enquote an identifier in SQL
          • -
          • PR #877: Reimplement TO_CHAR without a Calendar and fix 12 AM / 12 PM in it +
          • Issue #2075: Add EXECUTE IMMEDIATE implementation
          • -
          • PR #876: Test out of memory +
          • PR #2101: Fix infinite loop in Schema.removeChildrenAndResources()
          • -
          • PR #875: Improve date-time related parts of documentation +
          • Issue #2096: Convert LEFT and RIGHT to keywords and disallow comma before closing parenthesis
          • -
          • PR #872: Assorted date-time related changes +
          • PR #2098: Fix typos
          • -
          • PR #871: Fix OOME in Transfer.readValue() with large CLOB V2 +
          • Issue #1305 / PR #2097: Remove unused and outdated website translation infrastructure
          • -
          • PR #867: TestOutOfMemory stability +
          • PR #2093: CURRENT VALUE FOR and other sequence-related changes
          • -
          • Issue #834: Add support for the SQL standard FILTER clause on aggregate functions +
          • PR #2092: Allow to simulate usage of multiple catalogs by one connection
          • -
          • PR #864: Minor changes in DateUtils and Function +
          • PR #2091: Oracle mode now uses DECIMAL with NEXTVAL
          • -
          • PR #863: Polish: use isEmpty() to check whether the collection is empty or not. +
          • Issue #2088: Division by zero caused by evaluation of global conditions before local conditions
          • -
          • PR #862: Convert constraint type into enum +
          • Issue #2086: TCP_QUICKACK on server socket
          • -
          • PR #861: Avoid resource leak +
          • Issue #2073: TableLink should not pass queries to DatabaseMetaData.getColumns()
          • -
          • PR #860: IndexCursor inList +
          • Issue #2074: MySQL and MSSQLServer Mode: TRUNCATE TABLE should always RESTART IDENTITY
          • -
          • PR #858 / Issue #690 and others: Return all generated rows and columns from getGeneratedKeys() +
          • Issue #2063: MySQL mode: "drop foreign key if exists" support
          • -
          • Make the JDBC client independent of the database engine +
          • PR #2061: Use VirtualTable as a base class for RangeTable
          • -
          • PR #857: Do not write each SQL error multiple times in TestScript +
          • PR #2059: Parse IN predicate with multiple subqueries correctly
          • -
          • PR #856: Fix TestDateTimeUtils.testDayOfWeek() and example with ANY(? +
          • PR #2057: Fix TestCrashAPI failure with Statement.enquoteIdentifier()
          • -
          • PR #855: Reimplement DATEADD without a Calendar and fix some incompatibilities +
          • PR #2056: Happy path: speed up database opening
          • -
          • PR #854: Improve test stability +
          • Issue #2051: The website shows outdated information about the storage engine
          • -
          • PR #851: Reimplement DATEDIFF without a Calendar +
          • PR #2049: bugfix - mvstore data lost issue when partial write occurs
          • -
          • Issue #502: SQL "= ANY (?)" supported? +
          • PR #2047: File maintenance
          • -
          • PR #849: Encode date and time in fast and proper way in PgServerThread +
          • PR #2046: Recovery mode
          • -
          • PR #847: Reimplement remaining parts of EXTRACT, ISO_YEAR, etc without a Calendar +
          • Issue #2044: setTransactionIsolation always call commit() even if transaction is auto-commit
          • -
          • PR #846: Read known fields directly in DateTimeUtils.getDatePart() +
          • Issue #2042: Add possibility to specify generated columns for query in web console
          • -
          • Issue #832: Extract EPOCH from a timestamp +
          • Issue #2040: INFORMATION_SCHEMA.SETTINGS contains irrelevant settings
          • -
          • PR #844: Add simple implementations of isWrapperFor() and unwrap() to JdbcDataSource +
          • PR #2038: MVMap: lock reduction on updates
          • -
          • PR #843: Add MEDIAN to help.csv and fix building of documentation +
          • PR #2037: Fix SYS_GUID, RAWTOHEX, and HEXTORAW in Oracle mode
          • -
          • PR #841: Support indexes with nulls last for MEDIAN aggregate +
          • Issue #2016: ExpressionColumn.mapColumns() performance complexity is quadratic
          • -
          • PR #840: Add MEDIAN aggregate +
          • Issue #2028: Sporadic inconsistent state after concurrent UPDATE in 1.4.199
          • -
          • PR #839: TestTools should not leave testing thread in interrupted state +
          • PR #2033: Assorted changes
          • -
          • PR #838: (tests) Excessive calls to Runtime.getRuntime().gc() cause OOM for no reason +
          • Issue #2025: Incorrect query result when (OFFSET + FETCH) > Integer.MAX_VALUE
          • -
          • Don't use substring when doing StringBuffer#append +
          • PR #2023: traverseDown() code deduplication
          • -
          • PR #837: Use StringUtils.replaceAll() in Function.replace() +
          • PR #2022: Mvmap minor cleanup
          • -
          • PR #836: Allow to read invalid February 29 dates with LocalDate as March 1 +
          • Issue #2020: Wrong implementation of IN predicate with subquery
          • -
          • PR #835: Inline getTimeTry() into DateTimeUtils.getMillis() +
          • PR #2003: Change dead chunks determination algorithm
          • -
          • PR #827: Use dateValueFromDate() and nanosFromDate() in parseTimestamp() +
          • Issue #2013: DECIMAL is casted to double in ROUND function
          • -
          • Issue #115: to_char fails with pattern FM0D099 +
          • PR #2011: ZonedDateTime and (INTERVAL / INTERVAL)
          • -
          • PR #825: Merge code for parsing and formatting timestamp values +
          • Issue #1997: TestRandomSQL failure with ClassCastException
          • -
          • Enums for ConstraintActionType, UnionType, and OpType +
          • Issue #2007: PostgreSQL compatibility mode: support ON CONFLICT DO NOTHING
          • -
          • PR 824: Add partial support for INSERT IGNORE in MySQL mode +
          • Issue #1927: Do not allow commit() when auto-commit is enabled
          • -
          • PR #823: Use ValueByte.getInt() and ValueShort.getInt() in convertTo() +
          • PR #1998: Reduce TxCounter memory footprint
          • -
          • PR #820: Fix some compiler warnings +
          • PR #1999: Make RootReference lock re-entrant
          • -
          • PR #818: Fixes for remaining issues with boolean parameters +
          • PR #2001: Test improvements, OOME elimination
          • -
          • Use enum for file lock method +
          • Issue #1995: Obscure condition in MVPrimaryIndex.extractPKFromRow()
          • -
          • PR #817: Parse also 1 as true and 0 as false in Utils.parseBoolean() +
          • Issue #1975: Add client ip address to information_schema
          • -
          • PR #815: Fix count of completed statements +
          • PR #1982: Hindi language translation added
          • -
          • PR #814: Method.isVarArgs() is available on all supported platforms +
          • Issue #1985: Add thread number to TCP server thread names
          • -
          • Issue #812: TIME values should be in range 0:00:00.000000000 23:59:59.999999999? +
          • Do not allow empty password for management DB
          • -
          • PR #811: Issues with Boolean.parseBoolean() +
          • Issue #1978: getGeneratedKeys() can use the same rules as FINAL TABLE
          • -
          • PR #809: Use type constants from LocalDateTimeUtils directly +
          • PR #1977: Change JSON literals and add support for compound character literals
          • -
          • PR #808: Use HmacSHA256 provided by JRE +
          • PR #1974: Use proleptic Gregorian calendar for datetime values
          • -
          • PR #807: Use SHA-256 provided by JRE / Android and use rotateLeft / Right in Fog +
          • Issue #1847: Add support for data change delta tables
          • -
          • PR #806: Implement setBytes() and setString() with offset and len +
          • PR #1971: Add maximum cardinality parameter to ARRAY data type
          • -
          • PR #805: Improve support of TIMESTAMP WITH TIME ZONE +
          • PR #1970: Switch from log map rename to "committed" marker log record
          • -
          • PR #803: Use new ArrayList(Collection) and assertThrows() +
          • PR #1969: Add unique predicate
          • -
          • PR #802: Use Arrays.copyOf() and Arrays.copyOfRange() +
          • Issue #1963: Expression.addFilterConditions() with outer joins
          • -
          • PR #801: Fix NULL support in PgServer for primitive types too +
          • PR #1966: Add standard CURRENT_SCHEMA function
          • -
          • PR #800: More fixes in date-time types for ODBC drivers +
          • PR #1964: Add Feature T571: Truth value tests
          • -
          • PR #798: Add partial support of DATE, TIME, and TIMESTAMP data types to PgServer +
          • PR #1962: Fix data types of optimized conditions
          • -
          • PR #799: Use result of ArrayList.remove() +
          • PR #1961: Failure to open DB after improper shutdown
          • -
          • PR #797: Add ceilingKey() and floorKey() to TransactionMap (version 2) +
          • Issue #1957: NullPointerException with DISTINCT and ORDER BY CASE
          • -
          • PR #796: Add MDY to DateStyle in PgServerThread +
          • PR #1956: Fix row value handling in the null predicate
          • -
          • PR #794: Sort files in generated jars +
          • PR #1955: Add standard UNKNOWN literal
          • -
          • PR #793: Change return type of Value.getBoolean() to boolean (unwrapped) +
          • Issue #1952: Connection.setSchema doesn't work with query cache
          • -
          • PR #792: Inherit classpath from parent process +
          • PR #1951: Assorted changes
          • -
          • PR #791: Switch to JaCoCo code coverage +
          • PR #1950: Fix NULL handling in ARRAY_AGG
          • -
          • PR #788: Update lists of keywords +
          • PR #1949: Extract aggregate and window functions into own pages in documentation
          • -
          • PR #789: Map DATE in Oracle mode to ValueTimestamp +
          • PR #1948: Add standard LOG() function with two arguments
          • -
          • PR #787: Assorted changes +
          • Issue #1935: Improve file locking on shared filesystems like SMB
          • -
          • PR #785: Optimize NULL handling in MVSecondaryIndex.add() +
          • PR #1946: Reimplement table value constructor on top of Query
          • -
          • PR #783: Add Bits implementation for Java 9 and later versions +
          • PR #1945: Fix IN (SELECT UNION with OFFSET/FETCH)
          • -
          • PR #784: Hardcoded port numbers should not be used in unit tests +
          • Issue #1942: MySQL Mode: convertInsertNullToZero should be turned off by default?
          • -
          • PR #780: Close JavaFileManager after use. +
          • Issue #1940: MySQL Mode: Modify column from NOT NULL to NULL syntax
          • -
          • PR #782: Leftover shared lock after release +
          • PR #1941: Extract OFFSET / FETCH handling from Select and SelectUnion to Query
          • -
          • PR #781: Locks left behind after commit +
          • Issue #1938: Regression with CREATE OR REPLACE VIEW. Causes "Duplicate column name" exception.
          • -
          • PR #778: Reduce code duplication +
          • PR #1937: Get rid of FunctionCursorResultSet
          • -
          • PR #775: Fix building of documentation and zip +
          • Issue #1932: Incoherence between DbSettings.mvStore and getSettings()
          • -
          • PR #774: Assorted changes +
          • PR #1931: Fix wildcard expansion for multiple schemas
          • -
          • PR #773: Better checks for arguments of partial LOB reading methods +
          • PR #1930: Move PageStore table engine into own package
          • -
          • PR #772: getBinaryStream() and getCharacterStream() with pos and length +
          • PR #1929: Initial implementation of type predicate and other changes
          • -
          • Issue #754: Make NUMERIC type read as NUMERIC +
          • PR #1926: Assorted improvements for BINARY data type
          • -
          • PR #768: Add DataUtils.parseChecksummedMap() +
          • Issue #1925: Support SQL Server binary literal syntax
          • -
          • PR #769: Do not copy result of DataUtils.parseMap() to a new maps +
          • Issue #1918: MySQL: CREATE TABLE with both CHARSET and COMMENT failed
          • -
          • PR #766: Minor clean up of DateTimeUtils +
          • Issue #1913: MySQL: auto_increment changing SQL not supported
          • -
          • PR #764: Make use of try-with-resources statement +
          • Issue #1585: The translate function on DB2 mode could have parameters order changed
          • -
          • Issue #406: Return from ResultSet.getObject not in line with JDBC specification +
          • PR #1914: Change storage and network format of JSON to byte[]
          • -
          • Issue #710: Misleading exception message when INSERT has no value for self referential 'AS' column +
          • Issue #1911: Foreign key constraint does not prevent table being dropped
          • -
          • PR #763: Add DataUtils.getMapName() +
          • PR #1909: Add JSON_OBJECTAGG and JSON_ARRAYAGG aggregate functions
          • -
          • PR #762: Add row deletion confirmation to web console +
          • PR #1908: Cast VARCHAR to JSON properly and require FORMAT JSON in literals
          • -
          • PR #760: Assorted minor optimizations +
          • PR #1906: Add JSON_OBJECT and JSON_ARRAY functions
          • -
          • PR #759: Improve the look of error messages in web console +
          • Issue #1887: Infinite recursion in ConditionAndOr.java
          • -
          • PR #758: Allocate less amount of garbage +
          • Issue #1903: MSSQLServer Mode - Support Update TOP(X)
          • -
          • PR #757: Fix handling of UUID in Datatype.readValue() +
          • Issue #1900: Support SQLServer stored procedure execution syntax
          • -
          • PR #753: Optimize StringUtils.trim() and remove StringUtils.equals() +
          • PR #1898: Add IS JSON predicate
          • -
          • PR #752: Use predefined charsets instead of names where possible +
          • Issue #1896: MSSQLServer compatibility mode - GETDATE() incorrectly omits time
          • -
          • PR #750: Use AtomicIntegerArray and StandardCharsets +
          • PR #1895: Add standard array concatenation operation
          • -
          • PR #749: Fix some build checks in sources +
          • Issue #1892: Window aggregate functions return incorrect result without window ordering and with ROWS unit
          • -
          • Issue #740: TestWeb hangups if webSSL=true specified in configuration +
          • Issue #1890: ArrayIndexOutOfBoundsException in MVSortedTempResult.getKey
          • -
          • Issue #736: Copyright years in sources +
          • Issue #308: Mode MySQL and LAST_INSERT_ID with argument
          • -
          • Issue #744: TestFile failure on Java 9 and Java 10 +
          • Issue #1883: Suspicious code in Session.getLocks()
          • -
          • PR #741: More cleanups in LocalDateTimeUtils and other minor changes +
          • Issue #1878: OPTIMIZE_REUSE_RESULTS causes incorrect result after rollback since 1.4.198
          • -
          • PR #743: Change REGEXP_REPLACE mode for MariaDB and PostgreSQL +
          • PR #1880: Collation names like CHARSET_* recognition
          • -
          • Issue#646 NPE in CREATE VIEW WITH RECURSIVE & NON_RECURSIVE CTE +
          • Issue #1844: MySQL Compatibility: create table error when primary key has comment
          • -
          • PR #738: Copy javadoc to *BackwardsCompat to fix building of documentation +
          • PR #1873: Concurrency in database metadata
          • -
          • PR #735: Add support of java.time.Instant V2 +
          • Issue #1864: Failing to format NotSerializableException corrupting the database
          • -
          • PR #733: Remove JPA/ORM configuration txt files as they're already integrated +
          • PR #1868: add more checking to TestFileLock
          • -
          • PR #732: Fix == +
          • Issue #1819: Trace.db file exceed file size limit (64MB)
          • -
          • PR #730: Implement enquoteIdentifier() and isSimpleIdentifier() from JDBC 4.3 +
          • Issue #1861: Use COALESCE in named columns join for some data types
          • -
          • PR #729: Grammar documentation change +
          • PR #1860: Additional fix for deadlock on shutdown (exclusively in PageStore mode)
          • -
          • PR #727: Integer/Long.compare(x, y) can be used to compare primitive values +
          • Issue #1855: Wrong qualified asterisked projections in named column join
          • -
          • PR #726: Fixes in tests +
          • Issue #1854: Wrong asterisked projection and result in named column right outer join
          • -
          • Issue #725: FilePathMem.tryLock() fails since Java 9 +
          • Issue #1852: Named column joins doesn't work with the VALUES constructor and derived column lists
          • -
          • PR #723: Clean up LocalDateTimeUtils +
          • Issue #1851: Wrong asterisked projection in named column joins
          • -
          • PR #724: Use StringBuilder instead of StringBuffer +
          • PR #1850: Duplicate map identifiers
          • -
          • PR #720: DROP TABLE RESTRICT shouldn't drop foreign keys in other tables +
          • PR #1849: Reimplement MVStore.findOldChunks() with PriorityQueue
          • -
          • PR #722: Assorted minor changes +
          • PR #1848: Reimplement MVStore.findChunksToMove() with PriorityQueue
          • -
          • Issue #638: Oracle mode: incompatible regexp back-reference syntax +
          • Issue #1843: Named columns join syntax is not supported
          • -
          • Make ALL a reserved keyword +
          • Issue #1841: Deadlock during concurrent shutdown attempts with 1.4.199
          • -
          • Issue #311: Avoid calling list.toArray(new X[list.size()]) for performance +
          • Issue #1834: NUMERIC does not preserve its scale for some values
          • -
          • PR #715: Better getObject error message +
          • PR #1838: Implement conversion from JSON to GEOMETRY
          • -
          • PR #714: SecureRandom is already synchronized +
          • PR #1837: Implement conversion from GEOMETRY to JSON
          • -
          • PR #712: Return properly encoded UUID from SimpleResultSet.getBytes() +
          • PR #1836: Add LSHIFT and RSHIFT function
          • -
          • PR #711: TestFunctions less english dependent +
          • PR #1833: Add BITNOT function
          • -
          • Issue #644: Year changing from negative -509 to a positive 510. +
          • PR #1832: JSON validation and normalization
          • -
          • PR #706: SIGNAL function +
          • PR #1829: MVStore chunks occupancy rate calculation fixes
          • -
          • PR #704: added Javascript support for Triggers' source +
          • PR #1828: Basis for implementation of SQL/JSON standard
          • -
          • Issue #694: Oracle syntax for adding NOT NULL constraint not supported. -
          • -
          • Issue #699: When using an index for sorting, the index is ignored when also using NULLS LAST/FIRST -
          • -
          • Issue #697: FilePathDisk.newInputStream fails for contextual class loading -
          • -
          • Issue #695: jdbc:postgresql protocol connection issue in H2 Console Application in case of redshift driver in classpath -
          • -
          • Fix 'file not closed' when using FILE_READ -
          • -
          • Fix bug in LinkSchema tool when object exists with same name in different schemas -
          • -
          • Issue #675: Fix date operations on Locales with non-Gregorian calendars -
          • -
          • Fix removal of LOB when rolling back transaction on a table containing more than one LOB column. -
          • -
          • Issue #654: List ENUM type values in INFORMATION_SCHEMA.COLUMNS -
          • -
          • Issue #650: Simple nested SELECT causes error for table with TIMESTAMP WITH TIMEZONE column -
          • -
          • Issue #654: List ENUM type values in INFORMATION_SCHEMA.COLUMNS -
          • -
          • Issue #668: Fail of an update command on large table with ENUM column -
          • -
          • Issue #662: column called CONSTRAINT is not properly escaped when storing to metadata -
          • -
          • Issue #660: Outdated java version mentioned on http://h2database.com/html/build.html#providing_patches -
          • -
          • Issue #643: H2 doesn't use index when I use IN and EQUAL in one query -
          • -
          • Reset transaction start timestamp on ROLLBACK -
          • -
          • Issue #632: CREATE OR REPLACE VIEW creates incorrect columns names -
          • -
          • Issue #630: Integer overflow in CacheLRU can cause unrestricted cache growth -
          • -
          • Issue #497: Fix TO_DATE in cases of 'inline' text. E.g. the "T" and "Z" in to_date('2017-04-21T00:00:00Z', 'YYYY-MM-DD"T"HH24:MI:SS"Z"') -
          • -
          • Fix bug in MySQL/ORACLE-syntax silently corrupting the modified column in cases of setting the 'NULL'- or 'NOT NULL'-constraint. E.g. alter table T modify C NULL; -
          • -
          • Issue #570: MySQL compatibility for ALTER TABLE .. DROP INDEX -
          • -
          • Issue #537: Include the COLUMN name in message "Numeric value out of range" -
          • -
          • Issue #600: ROW_NUMBER() behaviour change in H2 1.4.195 -
          • -
          • Fix a bunch of race conditions found by vmlens.com, thank you to vmlens for giving us a license. -
          • -
          • PR #597: Support more types in getObject -
          • -
          • Issue #591: Generated SQL from WITH-CTEs does not include a table identifier -
          • -
          • PR #593: Make it possible to create a cluster without using temporary files. -
          • -
          • PR #592: "Connection is broken: "unexpected status 16777216" [90067-192]" message when using older h2 releases as client -
          • -
          • Issue #585: MySQL mode DELETE statements compatibility -
          • -
          • PR #586: remove extra tx preparation -
          • -
          • PR #568: Implement MetaData.getColumns() for synonyms. -
          • -
          • Issue #581: org.h2.tools.RunScript assumes -script parameter is part of protocol -
          • -
          • Fix a deadlock in the TransactionStore -
          • -
          • PR #579: Disallow BLOB type in PostgreSQL mode -
          • -
          • Issue #576: Common Table Expression (CTE): WITH supports INSERT, UPDATE, MERGE, DELETE, CREATE TABLE ... -
          • -
          • Issue #493: Query with distinct/limit/offset subquery returns unexpected rows -
          • -
          • Issue #575: Support for full text search in multithreaded mode -
          • -
          • Issue #569: ClassCastException when filtering on ENUM value in WHERE clause -
          • -
          • Issue #539: Allow override of builtin functions/aliases -
          • -
          • Issue #535: Allow explicit paths on Windows without drive letter -
          • -
          • Issue #549: Removed UNION ALL requirements for CTE -
          • -
          • Issue #548: Table synonym support -
          • -
          • Issue #531: Rollback and delayed meta save. -
          • -
          • Issue #515: "Unique index or primary key violation" in TestMvccMultiThreaded -
          • -
          • Issue #458: TIMESTAMPDIFF() test failing. Handling of timestamp literals. -
          • -
          • PR #546: Fixes the missing file tree.js in the web console -
          • -
          • Issue #543: Prepare statement with regexp will not refresh parameter after metadata change -
          • -
          • PR #536: Support TIMESTAMP_WITH_TIMEZONE 2014 JDBC type -
          • -
          • Fix bug in parsing ANALYZE TABLE xxx SAMPLE_SIZE yyy -
          • -
          • Add padding for CHAR(N) values in PostgreSQL mode -
          • -
          • Issue #89: Add DB2 timestamp format compatibility -
          • -
          - -

          Version 1.4.196 (2017-06-10)

          -
            -
          • Issue#479 Allow non-recursive CTEs (WITH statements), patch from stumc -
          • -
          • Fix startup issue when using "CHECK" as a column name. -
          • -
          • Issue #423: ANALYZE performed multiple times on one table during execution of the same statement. -
          • -
          • Issue #426: Support ANALYZE TABLE statement -
          • -
          • Issue #438: Fix slow logging via SLF4J (TRACE_LEVEL_FILE=4). -
          • -
          • Issue #472: Support CREATE SEQUENCE ... ORDER as a NOOP for Oracle compatibility -
          • -
          • Issue #479: Allow non-recursive Common Table Expressions (CTE) -
          • -
          • On Mac OS X, with IPv6 and no network connection, the Console tool was not working as expected. -
          • -
          - -

          Version 1.4.195 (2017-04-23)

          -
            -
          • Lazy query execution support. +
          • PR #1827: Add support for Lucene 8.0.0
          • -
          • Added API for handling custom data types (System property "h2.customDataTypesHandler", API org.h2.api.CustomDataTypesHandler). +
          • Issue #1820: Performance problem on commit
          • -
          • Added support for invisible columns. +
          • Issue #1822: Use https:// in h2database.com hyperlinks
          • -
          • Added an ENUM data type, with syntax similar to that of MySQL. +
          • PR #1817: Assorted minor changes in documentation and other places
          • -
          • MVStore: for object data types, the cache size memory estimation - was sometimes far off in a read-only scenario. - This could result in inefficient cache usage. +
          • PR #1812: An IllegalStateException that wraps EOFException is thrown when partial writes happens
          diff --git a/h2/src/docsrc/html/cheatSheet.html b/h2/src/docsrc/html/cheatSheet.html index 16237e5696..7226e3b749 100644 --- a/h2/src/docsrc/html/cheatSheet.html +++ b/h2/src/docsrc/html/cheatSheet.html @@ -1,7 +1,7 @@ @@ -108,18 +108,18 @@

          H2 Database Engine Cheat Sheet

          Using H2

          -
          diff --git a/h2/src/docsrc/html/faq.html b/h2/src/docsrc/html/faq.html index fcda24aa16..932ef197ac 100644 --- a/h2/src/docsrc/html/faq.html +++ b/h2/src/docsrc/html/faq.html @@ -1,7 +1,7 @@ @@ -68,14 +68,13 @@

          Are there Known Bugs? When is the Next Release?

          USA, or within Europe), even if the timezone itself is different. As a workaround, export the database to a SQL script using the old timezone, and create a new database in the new timezone. -
        • Tomcat and Glassfish 3 set most static fields (final or non-final) to null when - unloading a web application. This can cause a NullPointerException in H2 versions - 1.1.107 and older, and may still not work in newer versions. Please report it if you - run into this issue. In Tomcat >= 6.0 this behavior can be disabled by setting the - system property org.apache.catalina.loader.WebappClassLoader.ENABLE_CLEAR_REFERENCES=false, - however Tomcat may then run out of memory. A known workaround is to - put the h2*.jar file in a shared lib directory +
        • Old versions of Tomcat and Glassfish 3 set most static fields (final or non-final) to null when + unloading a web application. This can cause a NullPointerException. + In Tomcat >= 6.0 this behavior can be disabled by setting the + system property org.apache.catalina.loader.WebappClassLoader.ENABLE_CLEAR_REFERENCES=false. + A known workaround is to put the h2*.jar file in a shared lib directory (common/lib). + Tomcat 8.5 and newer versions don't clear fields and don't have such property.
        • Some problems have been found with right outer join. Internally, it is converted to left outer join, which does not always produce the same results as other databases when used in combination with other joins. This problem is fixed in H2 version 1.3. @@ -181,8 +180,6 @@

          Is it Reliable?

        • The PostgreSQL server
        • Clustering (there are cases were transaction isolation can be broken due to timing issues, for example one session overtaking another session). -
        • Multi-threading within the old PageStore engine using SET MULTI_THREADED=1. - Default MVStore engine is multi-threaded by default.
        • Compatibility modes for other databases (only some features are implemented).
        • The soft reference cache (CACHE_TYPE=SOFT_LRU). It might not improve performance, and out of memory issues have been reported. @@ -240,10 +237,9 @@

          Column Names are Incorrect?

          This is not a bug. According the JDBC specification, the method ResultSetMetaData.getColumnName() should return the name of the column and not the alias name. If you need the alias name, use -ResultSetMetaData.getColumnLabel(). +ResultSetMetaData.getColumnLabel(). Some other database don't work like this yet (they don't follow the JDBC specification). -If you need compatibility with those databases, use the Compatibility Mode, -or append ;ALIAS_COLUMN_NAME=TRUE to the database URL. +If you need compatibility with those databases, use the Compatibility Mode.

          This also applies to DatabaseMetaData calls that return a result set. @@ -280,7 +276,7 @@

          How to Contribute to this Project?

          code coverage (the target code coverage for this project is 90%, higher is better). You will have to develop, build and run the tests. Once you are familiar with the code, you could implement missing features from the -feature request list. +feature request list. I suggest to start with very small features that are easy to implement. Keep in mind to provide test cases as well.

          diff --git a/h2/src/docsrc/html/features.html b/h2/src/docsrc/html/features.html index 4c802f1476..5d1f7c7f22 100644 --- a/h2/src/docsrc/html/features.html +++ b/h2/src/docsrc/html/features.html @@ -1,7 +1,7 @@ @@ -67,8 +67,8 @@

          Features

          Read Only Databases
          Read Only Databases in Zip or Jar File
          - - Computed Columns / Function Based Index
          + + Generated Columns (Computed Columns) / Function Based Index
          Multi-Dimensional Indexes
          @@ -100,8 +100,8 @@

          Main Features

          Additional Features

          • Disk based or in-memory databases and tables, read-only database support, temporary tables -
          • Transaction support (read committed), 2-phase-commit -
          • Multiple connections, table level locking +
          • Transaction support (read uncommitted, read committed, repeatable read, snapshot), 2-phase-commit +
          • Multiple connections, row-level locking
          • Cost based optimizer, using a genetic algorithm for complex queries, zero-administration
          • Scrollable and updatable result set support, large result set, external result sorting, functions can return a result set @@ -116,7 +116,7 @@

            SQL Support

          • Triggers and Java functions / stored procedures
          • Many built-in functions, including XML and lossless data compression
          • Wide range of data types including large objects (BLOB/CLOB) and arrays -
          • Sequence and autoincrement columns, computed columns (can be used for function based indexes) +
          • Sequences and identity columns, generated columns (can be used for function based indexes)
          • ORDER BY, GROUP BY, HAVING, UNION, OFFSET / FETCH (including PERCENT and WITH TIES), LIMIT, TOP, DISTINCT / DISTINCT ON (...)
          • Window functions @@ -142,7 +142,7 @@

            Security Features

            Other Features and Tools

              -
            • Small footprint (around 2 MB), low memory requirements +
            • Small footprint (around 2.5 MB), low memory requirements
            • Multiple index types (b-tree, tree, hash)
            • Support for multi-dimensional indexes
            • CSV (comma separated values) file support @@ -162,7 +162,7 @@

              Other Features and Tools

              H2 in Use

              For a list of applications that work with or use H2, see: -Links. +Links.

              Connection Modes

              @@ -184,6 +184,15 @@

              Embedded Mode

              There is no limit on the number of database open concurrently, or on the number of open connections.

              +

              +In embedded mode I/O operations can be performed by application's threads that execute a SQL command. +The application may not interrupt these threads, it can lead to database corruption, +because JVM closes I/O handle during thread interruption. +Consider other ways to control execution of your application. +When interrupts are possible the async: +file system can be used as a workaround, but full safety is not guaranteed. +It's recommended to use the client-server model instead, the client side may interrupt own threads. +

              The database is embedded in the application @@ -278,7 +287,7 @@

              Database URL Overview

              File locking methods - jdbc:h2:<url>;FILE_LOCK={FILE|SOCKET|NO}
              + jdbc:h2:<url>;FILE_LOCK={FILE|SOCKET|FS|NO}
              jdbc:h2:file:~/private;CIPHER=AES;FILE_LOCK=SOCKET
              @@ -410,6 +419,8 @@

              In-Memory Databases

              To keep the database open, add ;DB_CLOSE_DELAY=-1 to the database URL. To keep the content of an in-memory database as long as the virtual machine is alive, use jdbc:h2:mem:test;DB_CLOSE_DELAY=-1. +This may create a memory leak, when you need to remove the database, use +the SHUTDOWN command.

              Database Files Encryption

              @@ -594,7 +605,7 @@

              Changing Other Settings when Opening a Connection

              Adding ;setting=value at the end of a database URL is the same as executing the statement SET setting value just after connecting. For a list of supported settings, see SQL Grammar -or the DbSettings javadoc. +or the DbSettings javadoc.

              Custom File Access Mode

              @@ -641,34 +652,16 @@

              Multithreading Support

              An application can use multiple threads that access the same database at the same time. -With default MVStore engine threads that use different connections can use the database concurrently. -With PageStore engine requests to the same database are synchronized, -that means that if one thread executes a long running query, the other threads need to wait. -Concurrent database usage may be enabled for PageStore or disabled for MVStore -with MULTI_THREADED setting. -Note that multi-threaded mode for PageStore engine is not tested well and has some issues; -it should be used with caution. +Threads that use different connections can use the database concurrently.

              Locking, Lock-Timeout, Deadlocks

              -Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. -In this case, table level locking is not used. - -If multi-version concurrency is not used, -the database uses table level locks to give each connection a consistent state of the data. -There are two kinds of locks: read locks (shared locks) and write locks (exclusive locks). -All locks are released when the transaction commits or rolls back. -When using the default transaction isolation level 'read committed', read locks are already released after each statement. -

              -If a connection wants to reads from a table, and there is no write lock on the table, -then a read lock is added to the table. If there is a write lock, then this connection waits -for the other connection to release the lock. If a connection cannot get a lock for a specified time, -then a lock timeout exception is thrown. -

              Usually, SELECT statements will generate read locks. This includes subqueries. -Statements that modify data use write locks. It is also possible to lock a table exclusively without modifying data, +Statements that modify data use write locks on the modified rows. +It is also possible to issue write locks without modifying data, using the statement SELECT ... FOR UPDATE. +Data definition statements may issue exclusive locks on tables. The statements COMMIT and ROLLBACK releases all open locks. The commands SAVEPOINT and @@ -689,18 +682,18 @@

              Locking, Lock-Timeout, Deadlocks

              SCRIPT; - Write + Write (row-level) SELECT * FROM TEST WHERE 1=0 FOR UPDATE; - Write + Write (row-level) INSERT INTO TEST VALUES(1, 'Hello');
              INSERT INTO TEST SELECT * FROM TEST;
              UPDATE TEST SET NAME='Hi';
              DELETE FROM TEST; - Write + Exclusive ALTER TABLE TEST ...;
              CREATE INDEX ... ON TEST ...;
              DROP INDEX ...; @@ -714,16 +707,6 @@

              Locking, Lock-Timeout, Deadlocks

              SET DEFAULT_LOCK_TIMEOUT <milliseconds>. The default lock timeout is persistent.

              -

              Avoiding Deadlocks

              -

              -To avoid deadlocks, ensure that all transactions lock the tables in the same order -(for example in alphabetical order), and avoid upgrading read locks to write locks. -Both can be achieved using explicitly locking tables using SELECT ... FOR UPDATE. -

              -Note that delete, insert and update operations issue table level locks with PageStore engine, -but does not issue them with default MVStore engine. -

              -

              Database File Layout

              The following files are created for persistent databases: @@ -731,14 +714,32 @@

              Database File Layout

              + + -
              File NameDescriptionNumber of Files
              - test.h2.db + test.mv.db Database file.
              Contains the transaction log, indexes, and data for all tables.
              - Format: <database>.h2.db + Format: <database>.mv.db
              1 per database
              + test.newFile + + Temporary file for database compaction.
              + Contains the new MVStore file.
              + Format: <database>.newFile +
              + 0 or 1 per database +
              + test.tempFile + + Temporary file for database compaction.
              + Contains the temporary MVStore file.
              + Format: <database>.tempFile +
              + 0 or 1 per database +
              test.lock.db @@ -754,19 +755,10 @@

              Database File Layout

              Trace file (if the trace option is enabled).
              Contains trace information.
              Format: <database>.trace.db
              - Renamed to <database>.trace.db.old is too big. + Renamed to <database>.trace.db.old if too big.
              0 or 1 per database
              - test.lobs.db/* - - Directory containing one file for each
              - BLOB or CLOB value larger than a certain size.
              - Format: <id>.t<tableId>.lob.db -
              - 1 per large object -
              test.123.temp.db @@ -819,31 +811,101 @@

              Compatibility

              (example: jdbc:h2:~/test;IGNORECASE=TRUE).

              -

              Compatibility Modes

              +

              Compatibility Modes

              For certain features, this database can emulate the behavior of specific databases. However, only a small subset of the differences between databases are implemented in this way. Here is the list of currently supported modes and the differences to the regular mode:

              +

              REGULAR Compatibility mode

              +

              +This mode is used by default. +

              +
              • Empty IN predicate is allowed. +
              • TOP clause in SELECT is allowed. +
              • OFFSET/LIMIT clauses are allowed. +
              • MINUS can be used instead of EXCEPT. +
              • IDENTITY can be used as a data type. +
              • Legacy SERIAL and BIGSERIAL data types are supported. +
              • AUTO_INCREMENT clause can be used instead of GENERATED { ALWAYS | BY DEFAULT } AS IDENTITY. +
              + +

              STRICT Compatibility Mode

              +

              +To use the STRICT mode, use the database URL jdbc:h2:~/test;MODE=STRICT +or the SQL statement SET MODE STRICT. +In this mode some deprecated features are disabled. +

              +

              +If your application or library uses only the H2 or it generates different SQL for different database systems +it is recommended to use this compatibility mode in unit tests +to reduce possibility of accidental misuse of such features. +This mode cannot be used as SQL validator, however. +

              +

              +It is not recommended to enable this mode in production builds of libraries, +because this mode may become more restrictive in future releases of H2 that may break your library +if it will be used together with newer version of H2. +

              +
              • Empty IN predicate is disallowed. +
              • TOP and OFFSET/LIMIT clauses are disallowed, only OFFSET/FETCH can be used. +
              • MINUS cannot be used instead of EXCEPT. +
              • IDENTITY cannot be used as a data type and AUTO_INCREMENT clause cannot be specified. +Use GENERATED BY DEFAULT AS IDENTITY clause instead. +
              • SERIAL and BIGSERIAL data types are disallowed. +Use INTEGER GENERATED BY DEFAULT AS IDENTITY or BIGINT GENERATED BY DEFAULT AS IDENTITY instead. +
              + +

              LEGACY Compatibility Mode

              +

              +To use the LEGACY mode, use the database URL jdbc:h2:~/test;MODE=LEGACY +or the SQL statement SET MODE LEGACY. +In this mode some compatibility features for applications written for H2 1.X are enabled. +This mode doesn't provide full compatibility with H2 1.X. +

              +
              • Empty IN predicate is allowed. +
              • TOP clause in SELECT is allowed. +
              • OFFSET/LIMIT clauses are allowed. +
              • MINUS can be used instead of EXCEPT. +
              • IDENTITY can be used as a data type. +
              • MS SQL Server-style IDENTITY clause is supported. +
              • Legacy SERIAL and BIGSERIAL data types are supported. +
              • AUTO_INCREMENT clause can be used instead of GENERATED { ALWAYS | BY DEFAULT } AS IDENTITY. +
              • If a value for identity column was specified in an INSERT command +the base value of sequence generator of this column is updated if current value of generator was smaller +(larger for generators with negative increment) than the inserted value. +
              • Identity columns have implicit DEFAULT ON NULL clause. +It means a NULL value may be specified for this column in INSERT command and it will be treated as DEFAULT. +
              • Oracle-style CURRVAL and NEXTVAL can be used on sequences. +
              • TOP clause can be used in DELETE and UPDATE. +
              • Non-standard Oracle-style WHERE clause can be used in standard MERGE command. +
              • Attempt to reference a non-unique set of columns from a referential constraint +will create an UNIQUE constraint on them automatically. +
              • Unsafe comparison operators between numeric and boolean values are allowed. +
              • IDENTITY() and SCOPE_IDENTITY() are supported, but both are implemented like SCOPE_IDENTITY() +
              +

              DB2 Compatibility Mode

              -To use the IBM DB2 mode, use the database URL jdbc:h2:~/test;MODE=DB2 +To use the IBM DB2 mode, use the database URL jdbc:h2:~/test;MODE=DB2;DEFAULT_NULL_ORDERING=HIGH or the SQL statement SET MODE DB2.

              • For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. -
              • Concatenating NULL with another value - results in the other value.
              • Support the pseudo-table SYSIBM.SYSDUMMY1.
              • Timestamps with dash between date and time are supported.
              • Datetime value functions return the same value within a command. +
              • Second and third arguments of TRANSLATE() function are swapped. +
              • LIMIT / OFFSET clauses are supported. +
              • MINUS can be used instead of EXCEPT. +
              • Unsafe comparison operators between numeric and boolean values are allowed.

              Derby Compatibility Mode

              -To use the Apache Derby mode, use the database URL jdbc:h2:~/test;MODE=Derby +To use the Apache Derby mode, use the database URL jdbc:h2:~/test;MODE=Derby;DEFAULT_NULL_ORDERING=HIGH or the SQL statement SET MODE Derby.

              • For aliased columns, ResultSetMetaData.getColumnName() @@ -851,26 +913,22 @@

                Derby Compatibility Mode

                null.
              • For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. -
              • Concatenating NULL with another value - results in the other value.
              • Support the pseudo-table SYSIBM.SYSDUMMY1.
              • Datetime value functions return the same value within a command.

              HSQLDB Compatibility Mode

              -To use the HSQLDB mode, use the database URL jdbc:h2:~/test;MODE=HSQLDB +To use the HSQLDB mode, use the database URL jdbc:h2:~/test;MODE=HSQLDB;DEFAULT_NULL_ORDERING=FIRST or the SQL statement SET MODE HSQLDB.

              -
              • For aliased columns, ResultSetMetaData.getColumnName() - returns the alias name and getTableName() returns - null. -
              • When converting the scale of decimal data, the number is only converted if the new scale is - smaller than the current scale. Usually, the scale is converted and 0s are added if required. -
              • For unique indexes, NULL is distinct. - That means only one row with NULL in one of the columns is allowed. -
              • Text can be concatenated using '+'. +
                • Text can be concatenated using '+'. +
                • NULL value works like DEFAULT value is assignments to identity columns.
                • Datetime value functions return the same value within a command. +
                • TOP clause in SELECT is supported. +
                • LIMIT / OFFSET clauses are supported. +
                • MINUS can be used instead of EXCEPT. +
                • Unsafe comparison operators between numeric and boolean values are allowed.

                MS SQL Server Compatibility Mode

                @@ -884,40 +942,92 @@

                MS SQL Server Compatibility Mode

              • Identifiers may be quoted using square brackets as in [Test].
              • For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. -
              • Concatenating NULL with another value - results in the other value.
              • Text can be concatenated using '+'. +
              • Arguments of LOG() function are swapped.
              • MONEY data type is treated like NUMERIC(19, 4) data type. SMALLMONEY data type is treated like NUMERIC(10, 4) data type.
              • IDENTITY can be used for automatic id generation on column level.
              • Table hints are discarded. Example: SELECT * FROM table WITH (NOLOCK).
              • Datetime value functions return the same value within a command. +
              • 0x literals are parsed as binary string literals. +
              • TRUNCATE TABLE restarts next values of generated columns. +
              • TOP clause in SELECT, UPDATE, and DELETE is supported. +
              • Unsafe comparison operators between numeric and boolean values are allowed. +
              + +

              MariaDB Compatibility Mode

              +

              +To use the MariaDB mode, use the database URL jdbc:h2:~/test;MODE=MariaDB;DATABASE_TO_LOWER=TRUE. +When case-insensitive identifiers are needed append ;CASE_INSENSITIVE_IDENTIFIERS=TRUE to URL. +Do not change value of DATABASE_TO_LOWER after creation of database. +

              +
              • Creating indexes in the CREATE TABLE statement is allowed using + INDEX(..) or KEY(..). + Example: create table test(id int primary key, name varchar(255), key idx_name(name)); +
              • When converting a floating point number to an integer, the fractional + digits are not truncated, but the value is rounded. +
              • ON DUPLICATE KEY UPDATE is supported in INSERT statements, due to this feature VALUES has special non-standard + meaning is some contexts. +
              • INSERT IGNORE is partially supported and may be used to skip rows with duplicate keys if ON DUPLICATE KEY + UPDATE is not specified. +
              • REPLACE INTO is partially supported. +
              • Spaces are trimmed from the right side of CHAR values. +
              • REGEXP_REPLACE() uses \ for back-references. +
              • Datetime value functions return the same value within a command. +
              • 0x literals are parsed as binary string literals. +
              • Unrelated expressions in ORDER BY clause of DISTINCT queries are allowed. +
              • Some MariaDB-specific ALTER TABLE commands are partially supported. +
              • TRUNCATE TABLE restarts next values of generated columns. +
              • NEXT VALUE FOR returns different values when invoked multiple times within the same row. +
              • If value of an identity column was manually specified, its sequence is updated to generate values after +inserted. +
              • NULL value works like DEFAULT value is assignments to identity columns. +
              • LIMIT / OFFSET clauses are supported. +
              • AUTO_INCREMENT clause can be used. +
              • YEAR data type is treated like SMALLINT data type. +
              • GROUP BY clause can contain 1-based positions of expressions from the SELECT list. +
              • Unsafe comparison operators between numeric and boolean values are allowed.
              +

              +Text comparison in MariaDB is case insensitive by default, while in H2 it is case sensitive (as in most other databases). +H2 does support case insensitive text comparison, but it needs to be set separately, +using SET IGNORECASE TRUE. +This affects comparison using =, LIKE, REGEXP. +

              MySQL Compatibility Mode

              To use the MySQL mode, use the database URL jdbc:h2:~/test;MODE=MySQL;DATABASE_TO_LOWER=TRUE. -Use this mode for compatibility with MariaDB too. When case-insensitive identifiers are needed append ;CASE_INSENSITIVE_IDENTIFIERS=TRUE to URL. Do not change value of DATABASE_TO_LOWER after creation of database.

              -
              • When inserting data, if a column is defined to be NOT NULL - and NULL is inserted, - then a 0 (or empty string, or the current timestamp for timestamp columns) value is used. - Usually, this operation is not allowed and an exception is thrown. -
              • Creating indexes in the CREATE TABLE statement is allowed using +
                • Creating indexes in the CREATE TABLE statement is allowed using INDEX(..) or KEY(..). Example: create table test(id int primary key, name varchar(255), key idx_name(name));
                • When converting a floating point number to an integer, the fractional digits are not truncated, but the value is rounded. -
                • Concatenating NULL with another value - results in the other value.
                • ON DUPLICATE KEY UPDATE is supported in INSERT statements, due to this feature VALUES has special non-standard meaning is some contexts.
                • INSERT IGNORE is partially supported and may be used to skip rows with duplicate keys if ON DUPLICATE KEY UPDATE is not specified. -
                • REGEXP_REPLACE() uses \ for back-references for compatibility with MariaDB. +
                • REPLACE INTO is partially supported. +
                • Spaces are trimmed from the right side of CHAR values. +
                • REGEXP_REPLACE() uses \ for back-references.
                • Datetime value functions return the same value within a command. +
                • 0x literals are parsed as binary string literals. +
                • Unrelated expressions in ORDER BY clause of DISTINCT queries are allowed. +
                • Some MySQL-specific ALTER TABLE commands are partially supported. +
                • TRUNCATE TABLE restarts next values of generated columns. +
                • If value of an identity column was manually specified, its sequence is updated to generate values after +inserted. +
                • NULL value works like DEFAULT value is assignments to identity columns. +
                • Referential constraints don't require an existing primary key or unique constraint on referenced columns +and create a unique constraint automatically if such constraint doesn't exist. +
                • LIMIT / OFFSET clauses are supported. +
                • AUTO_INCREMENT clause can be used. +
                • YEAR data type is treated like SMALLINT data type. +
                • GROUP BY clause can contain 1-based positions of expressions from the SELECT list. +
                • Unsafe comparison operators between numeric and boolean values are allowed.

                Text comparison in MySQL is case insensitive by default, while in H2 it is case sensitive (as in most other databases). @@ -928,7 +1038,7 @@

                MySQL Compatibility Mode

                Oracle Compatibility Mode

                -To use the Oracle mode, use the database URL jdbc:h2:~/test;MODE=Oracle +To use the Oracle mode, use the database URL jdbc:h2:~/test;MODE=Oracle;DEFAULT_NULL_ORDERING=HIGH or the SQL statement SET MODE Oracle.

                • For aliased columns, ResultSetMetaData.getColumnName() @@ -937,17 +1047,23 @@

                  Oracle Compatibility Mode

                • When using unique indexes, multiple rows with NULL in all columns are allowed, however it is not allowed to have multiple rows with the same values otherwise. -
                • Concatenating NULL with another value +
                • Empty strings are treated like NULL values, concatenating NULL with another value results in the other value. -
                • Empty strings are treated like NULL values.
                • REGEXP_REPLACE() uses \ for back-references. +
                • RAWTOHEX() converts character strings to hexadecimal representation of their UTF-8 encoding. +
                • HEXTORAW() decodes a hexadecimal character string to a binary string.
                • DATE data type is treated like TIMESTAMP(0) data type.
                • Datetime value functions return the same value within a command. +
                • ALTER TABLE MODIFY COLUMN command is partially supported. +
                • SEQUENCE.NEXTVAL and SEQUENCE.CURRVAL are supported and return values with DECIMAL/NUMERIC data type. +
                • Merge when matched clause may have WHERE clause. +
                • MINUS can be used instead of EXCEPT.

                PostgreSQL Compatibility Mode

                -To use the PostgreSQL mode, use the database URL jdbc:h2:~/test;MODE=PostgreSQL;DATABASE_TO_LOWER=TRUE. +To use the PostgreSQL mode, use the database URL +jdbc:h2:~/test;MODE=PostgreSQL;DATABASE_TO_LOWER=TRUE;DEFAULT_NULL_ORDERING=HIGH. Do not change value of DATABASE_TO_LOWER after creation of database.

                • For aliased columns, ResultSetMetaData.getColumnName() @@ -955,8 +1071,8 @@

                  PostgreSQL Compatibility Mode

                  null.
                • When converting a floating point number to an integer, the fractional digits are not be truncated, but the value is rounded. -
                • The system columns CTID and - OID are supported. +
                • The system columns ctid and + oid are supported.
                • LOG(x) is base 10 in this mode.
                • REGEXP_REPLACE():
                    @@ -964,23 +1080,17 @@

                    PostgreSQL Compatibility Mode

                  • does not throw an exception when the flagsString parameter contains a 'g';
                  • replaces only the first matched substring in the absence of the 'g' flag in the flagsString parameter.
                  -
                • Fixed-width strings are padded with spaces. +
                • LIMIT / OFFSET clauses are supported. +
                • Legacy SERIAL and BIGSERIAL data types are supported. +
                • ON CONFLICT DO NOTHING is supported in INSERT statements. +
                • Spaces are trimmed from the right side of CHAR values, but CHAR values in result sets are right-padded with + spaces to the declared length.
                • MONEY data type is treated like NUMERIC(19, 2) data type.
                • Datetime value functions return the same value within a transaction.
                • ARRAY_SLICE() out of bounds parameters are silently corrected.
                • EXTRACT function with DOW field returns (0-6), Sunday is 0. -
                - -

                Ignite Compatibility Mode

                -

                -To use the Ignite mode, use the database URL jdbc:h2:~/test;MODE=Ignite -or the SQL statement SET MODE Ignite. -

                -
                • Creating indexes in the CREATE TABLE statement is allowed using - INDEX(..) or KEY(..). - Example: create table test(id int primary key, name varchar(255), key idx_name(name)); -
                • AFFINITY KEY and SHARD KEY keywords may be used in index definition. -
                • Datetime value functions return the same value within a transaction. +
                • UPDATE with FROM is supported. +
                • GROUP BY clause can contain 1-based positions of expressions from the SELECT list.

                Auto-Reconnect

                @@ -1053,10 +1163,11 @@

                Automatic Mixed Mode

                Page Size

                -The page size for new databases is 2 KB (2048), unless the page size is set +The page size for new databases is 4 KiB (4096 bytes), unless the page size is set explicitly in the database URL using PAGE_SIZE= when the database is created. The page size of existing databases can not be changed, so this property needs to be set when the database is created. +The page size of encrypted databases must be a multiple of 4096 (4096, 8192, …).

                Using the Trace Options

                @@ -1122,7 +1233,7 @@

                Java Code Generation

                12-20 20:58:09 jdbc[0]: /**/dbMeta3.getURL(); 12-20 20:58:09 jdbc[0]: -/**/dbMeta3.getTables(null, "", null, new String[]{"TABLE", "VIEW"}); +/**/dbMeta3.getTables(null, "", null, new String[]{"BASE TABLE", "VIEW"}); ...

                @@ -1221,26 +1332,32 @@

                Opening a Corrupted Database

                The exceptions are logged, but opening the database will continue.

                -

                Computed Columns / Function Based Index

                +

                Generated Columns (Computed Columns) / Function Based Index

                -A computed column is a column whose value is calculated before storing. +Each column is either a base column or a generated column. +A generated column is a column whose value is calculated before storing and cannot be assigned directly. The formula is evaluated when the row is inserted, and re-evaluated every time the row is updated. One use case is to automatically update the last-modification time:

                -CREATE TABLE TEST(ID INT, NAME VARCHAR, LAST_MOD TIMESTAMP AS NOW());
                +CREATE TABLE TEST(
                +    ID INT,
                +    NAME VARCHAR,
                +    LAST_MOD TIMESTAMP WITH TIME ZONE
                +        GENERATED ALWAYS AS CURRENT_TIMESTAMP
                +);
                 

                Function indexes are not directly supported by this database, but they can be emulated -by using computed columns. For example, if an index on the upper-case version of -a column is required, create a computed column with the upper-case version of the original column, +by using generated columns. For example, if an index on the upper-case version of +a column is required, create a generated column with the upper-case version of the original column, and create an index for this column:

                 CREATE TABLE ADDRESS(
                     ID INT PRIMARY KEY,
                     NAME VARCHAR,
                -    UPPER_NAME VARCHAR AS UPPER(NAME)
                +    UPPER_NAME VARCHAR GENERATED ALWAYS AS UPPER(NAME)
                 );
                 CREATE INDEX IDX_U_NAME ON ADDRESS(UPPER_NAME);
                 
                @@ -1265,7 +1382,7 @@

                Multi-Dimensional Indexes

                Currently, Z-order (also called N-order or Morton-order) is used; Hilbert curve could also be used, but the implementation is more complex. The algorithm to convert the multi-dimensional value is called bit-interleaving. -The scalar value is indexed using a B-Tree index (usually using a computed column). +The scalar value is indexed using a B-Tree index (usually using a generated column).

                The method can result in a drastic performance improvement over just using an index on the first column. Depending on the @@ -1315,18 +1432,20 @@

                Referencing a Compiled Method

                Declaring Functions as Source Code

                When defining a function alias with source code, the database tries to compile -the source code using the Sun Java compiler (the class com.sun.tools.javac.Main) -if the tools.jar is in the classpath. If not, javac is run as a separate process. +the source code using the Java compiler (the class javax.tool.ToolProvider.getSystemJavaCompiler()) +if it is in the classpath. If not, javac is run as a separate process. Only the source code is stored in the database; the class is compiled each time the database is re-opened. -Source code is usually passed as dollar quoted text to avoid escaping problems, however single quotes can be used as well. +Source code can be passed as dollar quoted text ($$source code$$) to avoid escaping problems. +If you use some third-party script processing tool, use standard single quotes instead and don't forget to repeat +each single quotation mark twice within the source code. Example:

                -CREATE ALIAS NEXT_PRIME AS $$
                +CREATE ALIAS NEXT_PRIME AS '
                 String nextPrime(String value) {
                     return new BigInteger(value).nextProbablePrime().toString();
                 }
                -$$;
                +';
                 

                By default, the three packages java.util, java.math, java.sql are imported. @@ -1336,13 +1455,13 @@

                Declaring Functions as Source Code

                and separated with the tag @CODE:

                -CREATE ALIAS IP_ADDRESS AS $$
                +CREATE ALIAS IP_ADDRESS AS '
                 import java.net.*;
                 @CODE
                 String ipAddress(String host) throws Exception {
                     return InetAddress.getByName(host).getHostAddress();
                 }
                -$$;
                +';
                 

                The following template is used to create a complete Java class: @@ -1622,7 +1741,7 @@

                Cache Settings

                is kept. Setting the cache size in the database URL or explicitly using SET CACHE_SIZE overrides this value (even if larger than the physical memory). To get the current used maximum cache size, use the query -SELECT * FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME = 'info.CACHE_MAX_SIZE' +SELECT * FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'info.CACHE_MAX_SIZE'

                An experimental scan-resistant cache algorithm "Two Queue" (2Q) is available. To enable it, append ;CACHE_TYPE=TQ to the database URL. diff --git a/h2/src/docsrc/html/fragments.html b/h2/src/docsrc/html/fragments.html index 3a2aa0cf42..b35432e0f1 100644 --- a/h2/src/docsrc/html/fragments.html +++ b/h2/src/docsrc/html/fragments.html @@ -1,6 +1,6 @@ @@ -27,7 +27,7 @@

              -

              Details

              -

              Click on the header to switch between railroad diagram and BNF.

              + +

              Click on the header of the grammar element to switch between railroad diagram and BNF.

              +

              Non-standard syntax is marked in green. Compatibility-only non-standard syntax is marked in red, +don't use it unless you need it for compatibility with other databases or old versions of H2.

              Literals

              diff --git a/h2/src/docsrc/html/history.html b/h2/src/docsrc/html/history.html index dc398f4afa..b5068a54c6 100644 --- a/h2/src/docsrc/html/history.html +++ b/h2/src/docsrc/html/history.html @@ -1,7 +1,7 @@ @@ -17,11 +17,9 @@
              -

              History and Roadmap

              +

              History

              Change Log
              - - Roadmap
              History of this Database Engine
              @@ -31,18 +29,8 @@

              History and Roadmap

              Change Log

              -The up-to-date change log is available at - -http://www.h2database.com/html/changelog.html - -

              - -

              Roadmap

              -

              -The current roadmap is available at - -http://www.h2database.com/html/roadmap.html - +The up-to-date change log is available +here

              History of this Database Engine

              @@ -100,16 +88,16 @@

              Supporters

              • Martin Wildam, Austria
              • tagtraum industries incorporated, USA -
              • TimeWriter, Netherlands -
              • Cognitect, USA -
              • Code 42 Software, Inc., Minneapolis -
              • Code Lutin, France +
              • TimeWriter, Netherlands +
              • Cognitect, USA +
              • Code 42 Software, Inc., Minneapolis +
              • Code Lutin, France
              • NetSuxxess GmbH, Germany -
              • Poker Copilot, Steve McLeod, Germany -
              • SkyCash, Poland -
              • Lumber-mill, Inc., Japan -
              • StockMarketEye, USA -
              • Eckenfelder GmbH & Co.KG, Germany +
              • Poker Copilot, Steve McLeod, Germany +
              • SkyCash, Poland +
              • Lumber-mill, Inc., Japan +
              • StockMarketEye, USA +
              • Eckenfelder GmbH & Co.KG, Germany
              • Jun Iyama, Japan
              • Steven Branda, USA
              • Anthony Goubard, Netherlands @@ -125,7 +113,7 @@

                Supporters

              • Elisabetta Berlini, Italy
              • William Gilbert, USA
              • Antonio Dieguez Rojas, Chile -
              • Ontology Works, USA +
              • Ontology Works, USA
              • Pete Haidinyak, USA
              • William Osmond, USA
              • Joachim Ansorg, Germany diff --git a/h2/src/docsrc/html/installation.html b/h2/src/docsrc/html/installation.html index 65c3f82af8..f787f957ed 100644 --- a/h2/src/docsrc/html/installation.html +++ b/h2/src/docsrc/html/installation.html @@ -1,7 +1,7 @@ @@ -36,7 +36,7 @@

                Requirements

                Database Engine

                • Windows XP or Vista, Mac OS X, or Linux -
                • Oracle Java 7 or newer +
                • Oracle Java 8 or newer
                • Recommended Windows file system: NTFS (FAT32 only supports files up to 4 GB)
                @@ -47,11 +47,8 @@

                H2 Console

                Supported Platforms

                As this database is written in Java, it can run on many different platforms. -It is tested with Java 7. -Currently, the database is developed and tested on Windows 8 -and Mac OS X using Java 7, but it also works in many other operating systems -and using other Java runtime environments. -All major operating systems (Windows XP, Windows Vista, Windows 7, Mac OS, Ubuntu,...) are supported. +It is tested with Java 8 and 11. +All major operating systems (Windows, Mac OS X, Linux, ...) are supported.

                Installing the Software

                diff --git a/h2/src/docsrc/html/license.html b/h2/src/docsrc/html/license.html index d928bb1e61..1f228df8f1 100644 --- a/h2/src/docsrc/html/license.html +++ b/h2/src/docsrc/html/license.html @@ -1,7 +1,7 @@ @@ -30,8 +30,8 @@

                License

                Summary and License FAQ

                -H2 is dual licensed and available under the MPL 2.0 (Mozilla Public License Version 2.0) -or under the EPL 1.0 (Eclipse Public License). +H2 is dual licensed and available under the MPL 2.0 (Mozilla Public License Version 2.0) +or under the EPL 1.0 (Eclipse Public License). There is a license FAQ for both the MPL and the EPL.

                  @@ -47,7 +47,7 @@

                  Summary and License FAQ

                  However, nobody is allowed to rename H2, modify it a little, and sell it as a database engine without telling the customers it is in fact H2. This happened to HSQLDB: a company called 'bungisoft' copied HSQLDB, renamed it to 'RedBase', and tried to sell it, hiding the fact that it was in fact just HSQLDB. It seems 'bungisoft' does not exist any more, but you can use the -Wayback Machine and visit old web pages of http://www.bungisoft.com. +Wayback Machine and visit old web pages of http://www.bungisoft.com.

                  About porting the source code to another language (for example C# or C++): converted source code (even if done manually) stays under the same copyright and license as the original code. The copyright of the ported source code does not (automatically) go to the person who ported the code. @@ -62,11 +62,11 @@

                  Summary and License FAQ

                   This software contains unmodified binary redistributions for
                  -H2 database engine (http://www.h2database.com/),
                  +H2 database engine (https://h2database.com/),
                   which is dual licensed and available under the MPL 2.0
                   (Mozilla Public License) or under the EPL 1.0 (Eclipse Public License).
                   An original copy of the license agreement can be found at:
                  -http://www.h2database.com/html/license.html
                  +https://h2database.com/html/license.html
                   

                  Mozilla Public License Version 2.0

                  @@ -158,7 +158,7 @@

                  Exhibit A - Source Code Form License Notice

                   This Source Code Form is subject to the terms of the Mozilla
                   Public License, v. 2.0. If a copy of the MPL was not distributed
                  -with this file, you can obtain one at http://mozilla.org/MPL/2.0
                  +with this file, you can obtain one at https://mozilla.org/MPL/2.0
                   

                  If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice.

                  You may add additional accurate notices of copyright ownership.

                  @@ -395,9 +395,9 @@

                  7. GENERAL

                  Export Control Classification Number (ECCN)

                  -As far as we know, the U.S. Export Control Classification Number (ECCN) for this software is 5D002. +As far as we know, the U.S. Export Control Classification Number (ECCN) for this software is 5D002. However, for legal reasons, we can make no warranty that this information is correct. -For details, see also the Apache Software Foundation Export Classifications page. +For details, see also the Apache Software Foundation Export Classifications page.

              diff --git a/h2/src/docsrc/html/links.html b/h2/src/docsrc/html/links.html index 9750456308..98cf0cad6a 100644 --- a/h2/src/docsrc/html/links.html +++ b/h2/src/docsrc/html/links.html @@ -1,7 +1,7 @@ @@ -36,7 +36,7 @@

              Links

              Quotes

              - + Quote: "This is by far the easiest and fastest database that I have ever used. Originally the web application that I am working on is using SQL server. @@ -45,34 +45,34 @@

              Quotes

              Books

              - + Seam In Action

              Extensions

              - + Grails H2 Database Plugin
              - + h2osgi: OSGi for the H2 Database
              - + H2Sharp: ADO.NET interface for the H2 database engine
              A spatial extension of the H2 database.

              Blog Articles, Videos

              - + Youtube: Minecraft 1.7.3 / How to install Bukkit Server with xAuth and H2
              Analyzing CSVs with H2 in under 10 minutes (2009-12-07)
              - + Efficient sorting and iteration on large databases (2009-06-15)
              Porting Flexive to the H2 Database (2008-12-05)
              H2 Database with GlassFish (2008-11-24)
              - + H2 Database - Performance Tracing (2008-04-30)
              Open Source Databases Comparison (2007-09-11)
              @@ -86,13 +86,13 @@

              Blog Articles, Videos

              The Codist: Write Your Own Database, Again (2006-11-13)

              Project Pages

              - + Ohloh
              - + Freshmeat Project Page
              - + Wikipedia
              - + Java Source Net
              Linux Package Manager
              @@ -109,7 +109,7 @@

              Database Frontends / Tools

              SQL query tool.

              -

              +

              DbVisualizer
              Database tool.

              @@ -119,7 +119,7 @@

              Database Frontends / Tools

              Database utility written in Java.

              -

              +

              Flyway
              The agile database migration framework for Java.

              @@ -140,17 +140,17 @@

              Database Frontends / Tools

              HenPlus is a SQL shell written in Java.

              -

              +

              JDBC lint
              Helps write correct and efficient code when using the JDBC API.

              -

              +

              OpenOffice
              Base is OpenOffice.org's database application. It provides access to relational data sources.

              -

              +

              RazorSQL
              An SQL query tool, database browser, SQL editor, and database administration tool.

              @@ -160,7 +160,7 @@

              Database Frontends / Tools

              Universal Database Frontend.

              -

              +

              SQL Workbench/J
              Free DBMS-independent SQL tool.

              @@ -170,7 +170,7 @@

              Database Frontends / Tools

              Graphical tool to view the structure of a database, browse the data, issue SQL commands etc.

              -

              +

              SQuirreL DB Copy Plugin
              Tool to copy data from one database to another.

              @@ -182,7 +182,7 @@

              Products and Projects

              Visual business process modeling and simulation software for business users.

              -

              +

              Adeptia BPM
              A Business Process Management (BPM) suite to quickly and easily automate business processes and workflows.

              @@ -192,7 +192,7 @@

              Products and Projects

              Process-centric, services-based application integration suite.

              -

              +

              Aejaks
              A server-side scripting environment to build AJAX enabled web applications.

              @@ -202,17 +202,17 @@

              Products and Projects

              A web framework that let's you write dynamic web applications with Zen-like simplicity.

              -

              +

              Apache Cayenne
              Open source persistence framework providing object-relational mapping (ORM) and remoting services.

              -

              +

              Apache Jackrabbit
              Open source implementation of the Java Content Repository API (JCR).

              -

              +

              Apache OpenJPA
              Open source implementation of the Java Persistence API (JPA).

              @@ -222,7 +222,7 @@

              Products and Projects

              Helps building web applications.

              -

              +

              BGBlitz
              The Swiss army knife of Backgammon.

              @@ -238,7 +238,7 @@

              Products and Projects

              JSR 168 compliant bookmarks management portlet application.

              -

              +

              Claros inTouch
              Ajax communication suite with mail, addresses, notes, IM, and rss reader.

              @@ -269,7 +269,7 @@

              Products and Projects

              Ajax/J2EE framework for RAD development (mainly oriented toward hispanic markets).

              -

              +

              District Health Information Software 2 (DHIS)
              The DHIS 2 is a tool for collection, validation, analysis, and presentation of aggregate statistical data, tailored (but not limited) to integrated health information management activities. @@ -280,7 +280,7 @@

              Products and Projects

              Open source Java Object Relational Mapping tool.

              -

              +

              Eclipse CDO
              The CDO (Connected Data Objects) Model Repository is a distributed shared model framework for EMF models, and a fast server-based O/R mapping solution. @@ -291,7 +291,7 @@

              Products and Projects

              Fabric3 is a project implementing a federated service network based on the Service Component Architecture specification (http://www.osoa.org).

              -

              +

              FIT4Data
              A testing framework for data management applications built on the Java implementation of FIT.

              @@ -306,7 +306,7 @@

              Products and Projects

              GeoServer is a Java-based software server that allows users to view and edit geospatial data. Using open standards set forth by the Open Geospatial Consortium (OGC), GeoServer allows for great flexibility in map creation and data sharing.

              -

              +

              GBIF Integrated Publishing Toolkit (IPT)
              The GBIF IPT is an open source, Java based web application that connects and serves three types of biodiversity data: taxon primary occurrence data, @@ -323,7 +323,7 @@

              Products and Projects

              Fun-to-play games with a simple interface.

              -

              +

              GridGain
              GridGain is easy to use Cloud Application Platform that enables development of highly scalable distributed Java and Scala applications @@ -340,12 +340,12 @@

              Products and Projects

              High-Availability JDBC: A JDBC proxy that provides light-weight, transparent, fault tolerant clustering capability to any underlying JDBC driver.

              -

              +

              Hibernate
              Relational persistence for idiomatic Java (O-R mapping tool).

              -

              +

              Hibicius
              Online Banking Client for the HBCI protocol.

              @@ -367,12 +367,12 @@

              Products and Projects

              Java Spatial. Jaspa potentially brings around 200 spatial functions.

              -

              +

              Java Simon
              Simple Monitoring API.

              -

              +

              JBoss jBPM
              A platform for executable process languages ranging from business process management (BPM) over workflow to service orchestration.

              @@ -393,7 +393,7 @@

              Products and Projects

              Free, multi platform, open source GIS based on the GIS framework of uDig.

              -

              +

              Jena
              Java framework for building Semantic Web applications.

              @@ -403,8 +403,8 @@

              Products and Projects

              Framework for constructing workgroup business applications based on the Naked Objects Architectural Pattern.

              -

              -jOOQ (Java Object Oriented Querying)
              +

              +jOOQ (JOOQ Object Oriented Querying)
              jOOQ is a fluent API for typesafe SQL query construction and execution

              @@ -413,7 +413,7 @@

              Products and Projects

              A Scala-based, secure, developer friendly web framework.

              -

              +

              LiquiBase
              A tool to manage database changes and refactorings.

              @@ -423,7 +423,7 @@

              Products and Projects

              Build automation and management tool.

              -

              +

              localdb
              A tool that locates the full file path of the folder containing the database files.

              @@ -449,7 +449,7 @@

              Products and Projects

              Java web app that provides dynamic web content and Java libraries access from JavaScript.

              -

              +

              MyTunesRss
              MyTunesRSS lets you listen to your music wherever you are.

              @@ -485,7 +485,7 @@

              Products and Projects

              understand the application structure.

              -

              +

              Ontology Works
              This company provides semantic technologies including deductive information repositories (the Ontology Works Knowledge Servers), @@ -510,7 +510,7 @@

              Products and Projects

              OpenGroove is a groupware program that allows users to synchronize data.

              -

              +

              OpenSocial Development Environment (OSDE)
              Development tool for OpenSocial application.

              @@ -522,10 +522,10 @@

              Products and Projects

              P5H2
              -A library for the Processing programming language and environment. +A library for the Processing programming language and environment.

              -

              +

              Phase-6
              A computer based learning software.

              @@ -545,7 +545,7 @@

              Products and Projects

              Open source database benchmark.

              -

              +

              Poormans
              Very basic CMS running as a SWT application and generating static html pages.

              @@ -556,7 +556,7 @@

              Products and Projects

              programmed in CFML into Java bytecode and executes it on a servlet engine.

              -

              +

              Razuna
              Open source Digital Asset Management System with integrated Web Content Management.

              @@ -576,7 +576,7 @@

              Products and Projects

              ETL (Extract-Transform-Load) and script execution tool.

              -

              +

              Sesar
              Dependency Injection Container with Aspect Oriented Programming.

              @@ -591,7 +591,7 @@

              Products and Projects

              A free, light-weight, java data access framework.

              -

              +

              ShapeLogic
              Toolkit for declarative programming, image processing and computer vision.

              @@ -616,7 +616,7 @@

              Products and Projects

              A web-enabled, database independent, data synchronization/replication software.

              -

              +

              SmartFoxServer
              Platform for developing multiuser applications and games with Macromedia Flash.

              @@ -631,7 +631,7 @@

              Products and Projects

              Simple object relational mapping.

              -

              +

              Springfuse
              Code generation For Spring, Spring MVC & Hibernate.

              @@ -658,10 +658,10 @@

              Products and Projects

              Event (stream) processing kernel.

              -

              +

              SUSE Manager, part of Linux Enterprise Server 11
              The SUSE Manager - + eases the burden of compliance with regulatory requirements and corporate policies.

              @@ -670,7 +670,7 @@

              Products and Projects

              Easy-to-use backup solution for your iTunes library.

              -

              +

              TimeWriter
              TimeWriter is a very flexible program for time administration / time tracking. The older versions used dBase tables. @@ -678,7 +678,7 @@

              Products and Projects

              TimeWriter is delivered in Dutch and English.

              -

              +

              weblica
              Desktop CMS.

              @@ -688,7 +688,7 @@

              Products and Projects

              Collaborative and realtime interactive media platform for the web.

              -

              +

              Werkzeugkasten
              Minimum Java Toolset.

              @@ -699,7 +699,7 @@

              Products and Projects

              for building applications composed from server components - view providers.

              -

              +

              Volunteer database
              A database front end to register volunteers, partnership and donation for a Non Profit organization.

              diff --git a/h2/src/docsrc/html/main.html b/h2/src/docsrc/html/main.html index 620b1203f5..ea060a9132 100644 --- a/h2/src/docsrc/html/main.html +++ b/h2/src/docsrc/html/main.html @@ -1,7 +1,7 @@ diff --git a/h2/src/docsrc/html/mainWeb.html b/h2/src/docsrc/html/mainWeb.html index a563112cce..07f12b2267 100644 --- a/h2/src/docsrc/html/mainWeb.html +++ b/h2/src/docsrc/html/mainWeb.html @@ -1,7 +1,7 @@ @@ -13,8 +13,8 @@ H2 Database Engine - - + + @@ -29,7 +29,7 @@

              H2 Database Engine

            • Very fast, open source, JDBC API
            • Embedded and server modes; in-memory databases
            • Browser based Console application -
            • Small footprint: around 2 MB jar file size +
            • Small footprint: around 2.5 MB jar file size
            @@ -40,14 +40,14 @@

            Download

            Version ${version} (${versionDate})
            - Download this database + Download this database - Windows Installer (5 MB) + Windows Installer (6.7 MB)
            - Download this database + Download this database - All Platforms (zip, 8 MB) + All Platforms (zip, 9.5 MB)
            All Downloads @@ -61,8 +61,7 @@

            Download

            Support

            Stack Overflow (tag H2)

            - Google Group English, - Japanese

            + Google Group

            For non-technical issues, use:
            + +
            + + +

            Contents

            + + Introduction
            + + Upgrading
            + + File Format
            + + Data types
            + + Identity columns and sequences
            + + INFORMATION_SCHEMA
            + + General
            + +

            Introduction

            + +

            +Between version 1.4.200 and version 2.0.202 there have been considerable changes, such that a simple update is +not possible. +

            + +

            +It would have been nice to write some kind of migration tool, or auto-detect the file and upgrade. Unfortunately, this +is purely a volunteer-run project, so this is just the way it has to be. There exists a migration tool H2MigrationTool available +in GitHub, but it hasn't been tested by our team. Use at +your own risk. +

            + +

            Upgrading

            + +

            +The official way to upgrade is to export it into SQL script with the +SCRIPT command +USING YOUR CURRENT VERSION OF H2. +

            + +

            +Then create a fresh database USING THE NEW VERSION OF H2, then perform a +RUNSCRIPT to load your data. +You may need to specify FROM_1X flag, see documentation of this command for details. +

            + +

            MVStore file format

            + +

            +The MVStore file format we use (i.e. the default) is still mostly the same, but some subtle changes have been made +to the undo logs, +for the purposes of improving crash safety and also read/write performance. +

            + +

            Data types

            + +

            +The maximum length of CHARACTER +and CHARACTER VARYING data types +is n 1,048,576 characters. For larger values use +CHARACTER LARGE OBJECT. +

            + +

            +BINARY +and BINARY VARYING +are now different data types. BINARY means fixed-length data type and its default length is 1. +The maximum length of binary strings is 1,048,576 bytes. For larger values use +BINARY LARGE OBJECT +

            + +

            +NUMERIC / DECIMAL / DEC without parameters +now have scale 0. For a variable-scale data type see +DECFLOAT. +Negative scale isn't allowed for these data types any more. +The maximum precision is now 100,000. +

            + +

            +ENUM values now have 1-based ordinal numbers. +

            + +

            +Arrays are now typed. +Arrays with mixed types of elements aren't supported. +In some cases they can be replaced with a new ROW +data type. +

            + +

            +All non-standard data types, with exception for TINYINT, JAVA_OBJECT, ENUM, GEOMETRY, JSON, and UUID are deprecated. +

            + +

            Identity columns and sequences

            + +

            +Various legacy vendor-specific declarations and expressions are deprecated +and may not work at all depending on compatibility mode. +

            + +

            +Identity columns should be normally declared with GENERATED BY DEFAULT AS IDENTITY or GENERATED ALWAYS AS IDENTITY +clauses, options may also be specified. +GENERATED ALWAYS AS IDENTITY columns cannot be assigned to a user-provided value +unless OVERRIDING SYSTEM VALUE is specified. +

            + +

            +NULL cannot be specified as a value for IDENTITY column to force identity generation +(with exception for some compatibility modes). +Use DEFAULT or simply exclude this column from insert column list. +

            + +

            +IDENTITY() and SCOPE_IDENTITY() aren't available in Regular mode. If you need to get a generated value, +you need to use data change delta tables +or Statement.getGeneratedKeys(). +

            + +

            +Undocumented Oracle-style .NEXTVAL and .CURRVAL expressions are restricted to Oracle compatibility mode. +Other functions are deprecated for Regular mode. +Use sequence value expression instead. +

            + +

            INFORMATION_SCHEMA

            + +

            +INFORMATION_SCHEMA in H2 is now compliant with the SQL Standard and other database systems, +but it isn't compliant with previous versions of H2. +You may need to update your queries. +

            + +

            General

            + +

            +There are a lot more SQL keywords now. Many SQL statements feature far better support of SQL-Standard behaviour. +There is a NON_KEYWORDS setting that +can be used as a temporary workaround if your application uses them as unquoted identifiers. +

            + +

            +Numeric and boolean values aren't comparable. It means you need to use TRUE, FALSE, or UNKNOWN (NULL) +as boolean literals. 1 and 0 don't work any more (with exception for some compatibility modes). +

            + +

            +Some other non-standard SQL syntax has been restricted to related compatibility modes. +Since H2 2.0.204 there is a LEGACY compatibility mode that provides some limited compatibility with previous versions. +

            + +

            +Various deprecated grammar elements are marked in red in documentation. Please, avoid their usage. +

            + +

            +Migrating an old database to the new version works most of the times. However, there are a couple of important changes in the new version to keep in mind: +

            + +
              +
            • Oracle-style units were never supported officially without being in Oracle compatibility mode, although some worked before. For example, the length of the VARCHAR datatype cannot be more specified using CHAR but CHARACTERS or OCTETS. CHAR and BYTE need to be used in Oracle compatibility mode. +
            • IDENTITY syntax changed when type is specified: if the type for IDENTITY is specified, then the clause needs to be expanded as INTEGER GENERATED ALWAYS AS IDENTITY. Using just INTEGER IDENTITY is no more working. +
            • LOG connection setting removed: PageStore was removed from H2 so the "LOG=0" setting at the end of the URL (like +"jdbc:h2:file:/tmp/test;LOG=0") is no longer available. +
            + +
            diff --git a/h2/src/docsrc/html/mvstore.html b/h2/src/docsrc/html/mvstore.html index 660828e4df..a5fd229d05 100644 --- a/h2/src/docsrc/html/mvstore.html +++ b/h2/src/docsrc/html/mvstore.html @@ -1,7 +1,7 @@ @@ -59,7 +59,7 @@

            MVStore

            Overview

            The MVStore is a persistent, log structured key-value store. -It is planned to be the next storage subsystem of H2, +It is used as default storage subsystem of H2, but it can also be used directly within an application, without using JDBC or SQL.

            • MVStore stands for "multi-version store". @@ -605,7 +605,7 @@

              Chunk Format

              Chunks without live pages are marked as free, so the space can be re-used by more recent chunks. Because not all chunks are of the same size, there can be a number of free blocks in front of a chunk for some time (until a small chunk is written or the chunks are compacted). -There is a +There is a delay of 45 seconds (by default) before a free chunk is overwritten, to ensure new versions are persisted first.

              diff --git a/h2/src/docsrc/html/navigation.js b/h2/src/docsrc/html/navigation.js index eac75414d6..1262d1bf5f 100644 --- a/h2/src/docsrc/html/navigation.js +++ b/h2/src/docsrc/html/navigation.js @@ -1,7 +1,7 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * * Initial Developer: H2 Group + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group */ function scroll() { diff --git a/h2/src/docsrc/html/performance.html b/h2/src/docsrc/html/performance.html index 8ed6bbb5a4..54d1b4ba15 100644 --- a/h2/src/docsrc/html/performance.html +++ b/h2/src/docsrc/html/performance.html @@ -1,7 +1,7 @@ @@ -52,54 +52,54 @@

              Performance Comparison

              Embedded

              - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + +
              Test CaseUnitH2HSQLDBDerby
              Simple: Initms101919078280
              Simple: Query (random)ms13048731912
              Simple: Query (sequential)ms83518395415
              Simple: Update (sequential)ms961233321759
              Simple: Delete (sequential)ms950192232016
              Simple: Memory UsageMB21108
              BenchA: Initms91921337528
              BenchA: Transactionsms121922978541
              BenchA: Memory UsageMB12157
              BenchB: Initms90519938049
              BenchB: Transactionsms10915831165
              BenchB: Memory UsageMB17118
              BenchC: Initms249140038064
              BenchC: Transactionsms19798032840
              BenchC: Memory UsageMB19229
              Executed statements#193099519309951930995
              Total timems1367320686105569
              Statements per second#1412269334718291
              Simple: Initms102125106762
              Simple: Query (random)ms5136532035
              Simple: Query (sequential)ms134422107665
              Simple: Update (sequential)ms164230407034
              Simple: Delete (sequential)ms169723109981
              Simple: Memory UsageMB181513
              BenchA: Initms80128776576
              BenchA: Transactionsms136926294987
              BenchA: Memory UsageMB12159
              BenchB: Initms96625447161
              BenchB: Transactionsms3412316815
              BenchB: Memory UsageMB141010
              BenchC: Initms263031447420
              BenchC: Transactionsms173217422735
              BenchC: Memory UsageMB193411
              Executed statements#222203222220322222032
              Total timems140562597563171
              Statements per second#/s1580848554535174

              Client-Server

              - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + +
              Test CaseUnitH2 (Server)HSQLDBDerbyPostgreSQLMySQL
              Simple: Initms1633817198278603015629409
              Simple: Query (random)ms33992582619033153342
              Simple: Query (sequential)ms2184118699423473077432611
              Simple: Update (sequential)ms69137745285763269811350
              Simple: Delete (sequential)ms80519751422024448016555
              Simple: Memory UsageMB2211901
              BenchA: Initms1299614720247222637526060
              BenchA: Transactionsms1013410250184522145315877
              BenchA: Memory UsageMB1315901
              BenchB: Initms1526416889285463161029747
              BenchB: Transactionsms30173376184227711433
              BenchB: Memory UsageMB17121111
              BenchC: Initms1402010407176551952017532
              BenchC: Transactionsms50763160641160634530
              BenchC: Memory UsageMB19211111
              Executed statements#19309951930995193099519309951930995
              Total timems117049114777244803249215188446
              Statements per second#16497168237887774810246
              Test CaseUnitH2HSQLDBDerbyPostgreSQLMySQL
              Simple: Initms27989480554714232972109482
              Simple: Query (random)ms4821598414741408915140
              Simple: Query (sequential)ms33656491129599935676143536
              Simple: Update (sequential)ms987823565314182611350676
              Simple: Delete (sequential)ms1305628584439552098564647
              Simple: Memory UsageMB18151524
              BenchA: Initms20993425253833527794107723
              BenchA: Transactionsms1654929255289952311365036
              BenchA: Memory UsageMB12181114
              BenchB: Initms26785487723975632369115398
              BenchB: Transactionsms8981004619168181794
              BenchB: Memory UsageMB16111225
              BenchC: Initms1826626865393252454770531
              BenchC: Transactionsms656977839412891619150
              BenchC: Memory UsageMB17351327
              Executed statements#22220322222032222203222220322222032
              Total timems179460320546390994237392763113
              Statements per second#/s123816932568393602911

              Benchmark Results and Comments

              H2

              -Version 1.4.177 (2014-04-12) was used for the test. +Version 2.0.202 (2021-11-25) was used for the test. For most operations, the performance of H2 is about the same as for HSQLDB. One situation where H2 is slow is large result sets, because they are buffered to disk if more than a certain number of records are returned. @@ -108,14 +108,14 @@

              H2

              HSQLDB

              -Version 2.3.2 was used for the test. +Version 2.5.1 was used for the test. Cached tables are used in this test (hsqldb.default_table_type=cached), and the write delay is 1 second (SET WRITE_DELAY 1).

              Derby

              -Version 10.10.1.1 was used for the test. Derby is clearly the slowest embedded database in this test. +Version 10.14.2.0 was used for the test. Derby is clearly the slowest embedded database in this test. This seems to be a structural problem, because all operations are really slow. It will be hard for the developers of Derby to improve the performance to a reasonable level. A few problems have been identified: leaving autocommit on is a problem for Derby. @@ -132,33 +132,42 @@

              Derby

              PostgreSQL

              -Version 9.1.5 was used for the test. +Version 13.4 was used for the test. The following options where changed in postgresql.conf: -fsync = off, commit_delay = 1000. +fsync = off, commit_delay = 100000 (microseconds). PostgreSQL is run in server mode. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured.

              MySQL

              -Version 5.1.65-log was used for the test. +Version 8.0.27 was used for the test. MySQL was run with the InnoDB backend. -The setting innodb_flush_log_at_trx_commit -(found in the my.ini / my.cnf file) was set to 0. Otherwise (and by default), MySQL is slow -(around 140 statements per second in this test) because it tries to flush the data to disk for each commit. + The setting innodb_flush_log_at_trx_commit and sync_binlogcode> +(found in the my.ini / community-mysql-server.cnf file) was set to 0. Otherwise +(and by default), MySQL is slow (around 140 statements per second in this test) +because it tries to flush the data to disk for each commit. For small transactions (when autocommit is on) this is really slow. But many use cases use small or relatively small transactions. Too bad this setting is not listed in the configuration wizard, and it always overwritten when using the wizard. -You need to change this setting manually in the file my.ini / my.cnf, and then restart the service. +You need to change those settings manually in the file my.ini / community-mysql-server.cnf, +and then restart the service. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured.

              +

              SQLite

              +

              +SQLite 3.36.0.2 was tested, but the results are not published currently, +because it's about 50 times slower than H2 in embedded mode. +Any tips on how to configure SQLite for higher performance are welcome. +

              +

              Firebird

              -Firebird 1.5 (default installation) was tested, but the results are not published currently. -It is possible to run the performance test with the Firebird database, -and any information on how to configure Firebird for higher performance are welcome. +Firebird 3.0 (default installation) was tested, but failed on multi-threaded part of the test. +It is likely possible to run the performance test with the Firebird database, +and any information on how to configure Firebird for this are welcome.

              Why Oracle / MS SQL Server / DB2 are Not Listed

              @@ -166,7 +175,6 @@

              Why Oracle / MS SQL Server / DB2 are Not Listed

              The license of these databases does not allow to publish benchmark results. This doesn't mean that they are fast. They are in fact quite slow, and need a lot of memory. But you will need to test this yourself. -SQLite was not tested because the JDBC driver doesn't support transactions.

              About this Benchmark

              @@ -210,8 +218,7 @@

              Comparing Embedded with Server Databases

              Test Platform

              -This test is run on Mac OS X 10.6. No virus scanner was used, and disk indexing was disabled. -The JVM used is Sun JDK 1.6. +This test is run on Fedora v.34 with Oracle JVM 1.8 and SSD drive.

              Multiple Runs

              @@ -401,7 +408,7 @@

              How Data is Stored Internally

              then this column is used as the key of the data b-tree. If no primary key has been specified, if the primary key column is of another data type, or if the primary key contains more than one column, -then a hidden auto-increment column of type BIGINT is added to the table, +then a hidden identity column of type BIGINT is added to the table, which is used as the key for the data b-tree. All other columns of the table are stored within the data area of this data b-tree (except for large BLOB, CLOB columns, which are stored externally). @@ -491,7 +498,7 @@

              Prepared Statements and IN(...)

               PreparedStatement prep = conn.prepareStatement(
                   "SELECT * FROM TEST WHERE ID = ANY(?)");
              -prep.setObject(1, new Object[] { "1", "2" });
              +prep.setObject(1, new Long[] { 1L, 2L });
               ResultSet rs = prep.executeQuery();
               
              @@ -512,7 +519,7 @@

              Data Types

              Each data type has different storage and performance characteristics:

              • The DECIMAL/NUMERIC type is slower - and requires more storage than the REAL and DOUBLE types. + and requires more storage than the REAL and DOUBLE PRECISION types.
              • Text types are slower to read, write, and compare than numeric types and generally require more storage.
              • See Large Objects for information on BINARY vs. BLOB @@ -749,7 +756,8 @@

                How Data is Stored and How Indexes Work

            Access by row id is fast because the data is sorted by this key. -Please note the row id is not available until after the row was added (that means, it can not be used in computed columns or constraints). +Please note the row id is not available until after the row was added +(that means, it can not be used in generated columns or constraints). If the query condition does not contain the row id (and if no other index can be used), then all rows of the table are scanned. A table scan iterates over all rows in the table, in the order of the row id. To find out what strategy the database uses to retrieve the data, use EXPLAIN SELECT: @@ -872,19 +880,6 @@

            Using Multiple Indexes

            Fast Database Import

            -To speed up large imports, consider using the following options temporarily: -

            -
            • SET LOG 0 (disabling the transaction log) -
            • SET CACHE_SIZE (a large cache is faster) -
            • SET LOCK_MODE 0 (disable locking) -
            • SET UNDO_LOG 0 (disable the session undo log) -
            -

            -These options can be set in the database URL: -jdbc:h2:~/test;LOG=0;CACHE_SIZE=65536;LOCK_MODE=0;UNDO_LOG=0. -Most of those options are not recommended for regular use, that means you need to reset them after use. -

            -

            If you have to import a lot of rows, use a PreparedStatement or use CSV import. Please note that CREATE TABLE(...) ... AS SELECT ... is faster than CREATE TABLE(...); INSERT INTO ... SELECT .... diff --git a/h2/src/docsrc/html/quickstart.html b/h2/src/docsrc/html/quickstart.html index 08e7ce009c..5bb4fc0a41 100644 --- a/h2/src/docsrc/html/quickstart.html +++ b/h2/src/docsrc/html/quickstart.html @@ -1,7 +1,7 @@ diff --git a/h2/src/docsrc/html/roadmap.html b/h2/src/docsrc/html/roadmap.html deleted file mode 100644 index caa5ffdd03..0000000000 --- a/h2/src/docsrc/html/roadmap.html +++ /dev/null @@ -1,532 +0,0 @@ - - - - - - - -Roadmap - - - - - -
            - - -

            Roadmap

            -

            -New (feature) requests will usually be added at the very end of the list. The priority is increased for important and popular requests. -Of course, patches are always welcome, but are not always applied as is. -See also Providing Patches. -

            - -

            Version 1.5.x: Planned Changes

            -
            • Replace file password hash with file encryption key; validate encryption key when connecting. -
            • Remove "set binary collation" feature. -
            • Remove the encryption algorithm XTEA. -
            • Disallow referencing other tables in a table (via constraints for example). -
            • Remove PageStore features like compress_lob. -
            - -

            Version 1.4.x: Planned Changes

            -
            • Change license to MPL 2.0. -
            • Automatic migration from 1.3 databases to 1.4. -
            • Option to disable the file name suffix somehow (issue 447). -
            - -

            Priority 1

            -
            • Bugfixes. -
            • Server side cursors. -
            - -

            Priority 2

            -
            • Support hints for the optimizer (which index to use, enforce the join order). -
            • Full outer joins. -
            • Access rights: remember the owner of an object. - Create, alter and drop privileges. - COMMENT: allow owner of object to change it. - Issue 208: Access rights for schemas. -
            • Test multi-threaded in-memory db access. -
            • Support GRANT SELECT, UPDATE ON [schemaName.] *. -
            • Migrate database tool (also from other database engines). For Oracle, maybe use - DBMS_METADATA.GET_DDL / GET_DEPENDENT_DDL. -
            • Clustering: support mixed clustering mode (one embedded, others in server mode). -
            • Clustering: reads should be randomly distributed (optional) or to a designated database on RAM (parameter: READ_FROM=3). -
            • PostgreSQL catalog: use BEFORE SELECT triggers instead of views over metadata tables. -
            • Test very large databases and LOBs (up to 256 GB). -
            • Store all temp files in the temp directory. -
            • Make DDL (Data Definition) operations transactional. -
            • Deferred integrity checking (DEFERRABLE INITIALLY DEFERRED). -
            • Groovy Stored Procedures: http://groovy.codehaus.org/GSQL -
            • Add a migration guide (list differences between databases). -
            • Optimization: automatic index creation suggestion using the trace file? -
            • Fulltext search Lucene: analyzer configuration, mergeFactor. -
            • Compression performance: don't allocate buffers, compress / expand in to out buffer. -
            • Rebuild index functionality to shrink index size and improve performance. -
            • Console: add accesskey to most important commands (A, AREA, BUTTON, INPUT, LABEL, LEGEND, TEXTAREA). -
            • Test performance again with SQL Server, Oracle, DB2. -
            • Test with Spatial DB in a box / JTS: http://www.opengeospatial.org/standards/sfs - OpenGIS Implementation Specification. -
            • Improve documentation of MVCC (Multi Version Concurrency Control). -
            • Find a tool to view large text file (larger than 100 MB), with find, page up and down (like less), truncate before / after. -
            • Implement, test, document XAConnection and so on. -
            • Pluggable data type (for streaming, hashing, compression, validation, conversion, encryption). -
            • CHECK: find out what makes CHECK=TRUE slow, move to assertions. -
            • Drop with invalidate views (so that source code is not lost). Check what other databases do exactly. -
            • Index usage for (ID, NAME)=(1, 'Hi'); document. -
            • Set a connection read only (Connection.setReadOnly) or using a connection parameter. -
            • Access rights: finer grained access control (grant access for specific functions). -
            • Version check: docs / web console (using Javascript), and maybe in the library (using TCP/IP). -
            • Web server classloader: override findResource / getResourceFrom. -
            • Cost for embedded temporary view is calculated wrong, if result is constant. -
            • Count index range query (count(*) where id between 10 and 20). -
            • Performance: update in-place. -
            • Clustering: when a database is back alive, automatically synchronize with the master (requires readable transaction log). -
            • Database file name suffix: a way to use no or a different suffix (for example using a slash). -
            • Eclipse plugin. -
            • Asynchronous queries to support publish/subscribe: SELECT ... FOR READ WAIT [maxMillisToWait]. - See also MS SQL Server "Query Notification". -
            • Fulltext search (native): reader / tokenizer / filter. -
            • Linked schema using CSV files: one schema for a directory of files; support indexes for CSV files. -
            • iReport to support H2. -
            • Include SMTP (mail) client (alert on cluster failure, low disk space,...). -
            • Option for SCRIPT to append to a file. -
            • JSON parser and functions. -
            • Copy database: tool with config GUI and batch mode, extensible (example: compare). -
            • Document, implement tool for long running transactions using user-defined compensation statements. -
            • Support SET TABLE DUAL READONLY. -
            • Events for: database Startup, Connections, Login attempts, Disconnections, Prepare (after parsing), Web Server. See http://docs.openlinksw.com/virtuoso/fn_dbev_startup.html -
            • Optimization: simpler log compression. -
            • Support more standard INFORMATION_SCHEMA tables, as defined in SQL standard. -
            • Compatibility: in MySQL, HSQLDB, /0.0 is NULL; in PostgreSQL, Derby: division by zero. HSQLDB: 0.0e1 / 0.0e1 is NaN. -
            • Functional tables should accept parameters from other tables (see FunctionMultiReturn) SELECT * FROM TEST T, P2C(T.A, T.R). -
            • Custom class loader to reload functions on demand. -
            • Test http://mysql-je.sourceforge.net/ -
            • H2 Console: the webclient could support more features like phpMyAdmin. -
            • Support Oracle functions: TO_NUMBER. -
            • Work on the Java to C converter. -
            • The HELP information schema can be directly exposed in the Console. -
            • Maybe use the 0x1234 notation for binary fields, see MS SQL Server. -
            • Support Oracle CONNECT BY in some way: http://www.adp-gmbh.ch/ora/sql/connect_by.html http://philip.greenspun.com/sql/trees.html -
            • SQL 2003: http://www.wiscorp.com/sql_2003_standard.zip -
            • Version column (number/sequence and timestamp based). -
            • Test and document UPDATE TEST SET (ID, NAME) = (SELECT ID*10, NAME || '!' FROM TEST T WHERE T.ID=TEST.ID). -
            • Max memory rows / max undo log size: use block count / row size not row count. -
            • Implement point-in-time recovery. -
            • Support PL/SQL (programming language / control flow statements). -
            • LIKE: improved version for larger texts (currently using naive search). -
            • Throw an exception when the application calls getInt on a Long (optional). -
            • Default date format for input and output (local date constants). -
            • Document ROWNUM usage for reports: SELECT ROWNUM, * FROM (subquery). -
            • File system that writes to two file systems (replication, replicating file system). -
            • Standalone tool to get relevant system properties and add it to the trace output. -
            • Support 'call proc(1=value)' (PostgreSQL, Oracle). -
            • Console: improve editing data (Tab, Shift-Tab, Enter, Up, Down, Shift+Del?). -
            • Console: autocomplete Ctrl+Space inserts template. -
            • Option to encrypt .trace.db file. -
            • Auto-Update feature for database, .jar file. -
            • ResultSet SimpleResultSet.readFromURL(String url): id varchar, state varchar, released timestamp. -
            • Partial indexing (see PostgreSQL). -
            • Add GUI to build a custom version (embedded, fulltext,...) using build flags. -
            • http://rubyforge.org/projects/hypersonic/ -
            • Add a sample application that runs the H2 unit test and writes the result to a file (so it can be included in the user app). -
            • Table order: ALTER TABLE TEST ORDER BY NAME DESC (MySQL compatibility). -
            • Backup tool should work with other databases as well. -
            • Console: -ifExists doesn't work for the console. Add a flag to disable other dbs. -
            • Check if 'FSUTIL behavior set disablelastaccess 1' improves the performance (fsutil behavior query disablelastaccess). -
            • Java static code analysis: https://pmd.github.io/ -
            • Compatibility for CREATE SCHEMA AUTHORIZATION. -
            • Implement Clob / Blob truncate and the remaining functionality. -
            • File locking: writing a system property to detect concurrent access from the same VM (different classloaders). -
            • Pure SQL triggers (example: update parent table if the child table is changed). -
            • Add H2 to Gem (Ruby install system). -
            • Support linked JCR tables. -
            • Native fulltext search: min word length; store word positions. -
            • Add an option to the SCRIPT command to generate only portable / standard SQL. -
            • Updatable views: create 'instead of' triggers automatically if possible (simple cases first). -
            • Improve create index performance. -
            • Compact databases without having to close the database (vacuum). -
            • Implement more JDBC 4.0 features. -
            • Support TRANSFORM / PIVOT as in MS Access. -
            • Support updatable views with join on primary keys (to extend a table). -
            • Public interface for functions (not public static). -
            • Support reading the transaction log. -
            • Feature matrix. -
            • Updatable result set on table without primary key or unique index. -
            • Allow execution time prepare for SELECT * FROM CSVREAD(?, 'columnNameString') -
            • Support nested transactions (possibly using savepoints internally). -
            • Add a benchmark for bigger databases, and one for many users. -
            • Compression in the result set over TCP/IP. -
            • Support curtimestamp (like curtime, curdate). -
            • Support ANALYZE {TABLE|INDEX} tableName COMPUTE|ESTIMATE|DELETE STATISTICS ptnOption options. -
            • Release locks (shared or exclusive) on demand -
            • Support OUTER UNION -
            • Support parameterized views (similar to CSVREAD, but using just SQL for the definition) -
            • A way (JDBC driver) to map an URL (jdbc:h2map:c1) to a connection object -
            • Support dynamic linked schema (automatically adding/updating/removing tables) -
            • Clustering: adding a node should be very fast and without interrupting clients (very short lock) -
            • Compatibility: # is the start of a single line comment (MySQL) but date quote (Access). Mode specific -
            • Run benchmarks with Android, Java 7, java -server -
            • Optimizations: faster hash function for strings. -
            • DatabaseEventListener: callback for all operations (including expected time, RUNSCRIPT) and cancel functionality -
            • Benchmark: add a graph to show how databases scale (performance/database size) -
            • Implement a SQLData interface to map your data over to a custom object -
            • In the MySQL and PostgreSQL mode, use lower case identifiers by default (DatabaseMetaData.storesLowerCaseIdentifiers = true) -
            • Support multiple directories (on different hard drives) for the same database -
            • Server protocol: use challenge response authentication, but client sends hash(user+password) encrypted with response -
            • Support EXEC[UTE] (doesn't return a result set, compatible to MS SQL Server) -
            • Support native XML data type - see http://en.wikipedia.org/wiki/SQL/XML -
            • Support triggers with a string property or option: SpringTrigger, OSGITrigger -
            • MySQL compatibility: update test1 t1, test2 t2 set t1.id = t2.id where t1.id = t2.id; -
            • Ability to resize the cache array when resizing the cache -
            • Time based cache writing (one second after writing the log) -
            • Check state of H2 driver for DDLUtils: http://issues.apache.org/jira/browse/DDLUTILS-185 -
            • Index usage for REGEXP LIKE. -
            • Compatibility: add a role DBA (like ADMIN). -
            • Better support multiple processors for in-memory databases. -
            • Support N'text' -
            • Support compatibility for jdbc:hsqldb:res: -
            • HSQLDB compatibility: automatically convert to the next 'higher' data type. - Example: cast(2000000000 as int) + cast(2000000000 as int); - (HSQLDB: long; PostgreSQL: integer out of range) -
            • Provide an Java SQL builder with standard and H2 syntax -
            • Trace: write OS, file system, JVM,... when opening the database -
            • Support indexes for views (probably requires materialized views) -
            • Document SET SEARCH_PATH, BEGIN, EXECUTE, parameters -
            • Server: use one listener (detect if the request comes from an PG or TCP client) -
            • Optimize SELECT MIN(ID), MAX(ID), COUNT(*) FROM TEST WHERE ID BETWEEN 100 AND 200 -
            • Sequence: PostgreSQL compatibility (rename, create) http://www.postgresql.org/docs/8.2/static/sql-altersequence.html -
            • Support a special trigger on all tables to allow building a transaction log reader. -
            • File system with a background writer thread; test if this is faster -
            • Better document the source code (high level documentation). -
            • Support select * from dual a left join dual b on b.x=(select max(x) from dual) -
            • Optimization: don't lock when the database is read-only -
            • Issue 146: Support merge join. -
            • Integrate spatial functions from http://geosysin.iict.ch/irstv-trac/wiki/H2spatial/Download -
            • Cluster: hot deploy (adding a node at runtime). -
            • Support DatabaseMetaData.insertsAreDetected: updatable result sets should detect inserts. -
            • Native search: support "phrase search", wildcard search (* and ?), case-insensitive search, boolean operators, and grouping -
            • Improve documentation of access rights. -
            • Support opening a database that is in the classpath, maybe using a new file system. Workaround: detect jar file using getClass().getProtectionDomain().getCodeSource().getLocation(). -
            • Remember the user defined data type (domain) of a column. -
            • Auto-server: add option to define the port range or list. -
            • Support Jackcess (MS Access databases) -
            • Built-in methods to write large objects (BLOB and CLOB): FILE_WRITE('test.txt', 'Hello World') -
            • Improve time to open large databases (see mail 'init time for distributed setup') -
            • Move Maven 2 repository from hsql.sf.net to h2database.sf.net -
            • Java 1.5 tool: JdbcUtils.closeSilently(s1, s2,...) -
            • Optimize A=? OR B=? to UNION if the cost is lower. -
            • Javadoc: document design patterns used -
            • Support custom collators, for example for natural sort (for text that contains numbers). -
            • Write an article about SQLInjection (h2/src/docsrc/html/images/SQLInjection.txt) -
            • Convert SQL-injection-2.txt to html document, include SQLInjection.java sample -
            • Support OUT parameters in user-defined procedures. -
            • Web site design: http://www.igniterealtime.org/projects/openfire/index.jsp -
            • HSQLDB compatibility: Openfire server uses: CREATE SCHEMA PUBLIC AUTHORIZATION DBA; - CREATE USER SA PASSWORD ""; GRANT DBA TO SA; SET SCHEMA PUBLIC -
            • Translation: use ${.} in help.csv -
            • Translated .pdf -
            • Recovery tool: bad blocks should be converted to INSERT INTO SYSTEM_ERRORS(...), and things should go into the .trace.db file -
            • RECOVER=2 to backup the database, run recovery, open the database -
            • Recovery should work with encrypted databases -
            • Corruption: new error code, add help -
            • Space reuse: after init, scan all storages and free those that don't belong to a live database object -
            • Access rights: add missing features (users should be 'owner' of objects; missing rights for sequences; dropping objects) -
            • Support NOCACHE table option (Oracle). -
            • Support table partitioning. -
            • The database should be kept open for a longer time when using the server mode. -
            • Javadocs: for each tool, add a copy & paste sample in the class level. -
            • Javadocs: add @author tags. -
            • Fluent API for tools: Server.createTcpServer().setPort(9081).setPassword(password).start(); -
            • MySQL compatibility: real SQL statement for DESCRIBE TEST -
            • Use a default delay of 1 second before closing a database. -
            • Write (log) to system table before adding to internal data structures. -
            • Support other array types (String[], double[]) in PreparedStatement.setObject(int, Object) (with test case). -
            • Oracle compatibility: support NLS_DATE_FORMAT. -
            • Support for Thread.interrupt to cancel running statements. -
            • Cluster: add feature to make sure cluster nodes can not get out of sync (for example by stopping one process). -
            • H2 Console: support CLOB/BLOB download using a link. -
            • Support flashback queries as in Oracle. -
            • Import / Export of fixed with text files. -
            • HSQLDB compatibility: automatic data type for SUM if value is the value is too big (by default use the same type as the data). -
            • Improve the optimizer to select the right index for special cases: where id between 2 and 4 and booleanColumn -
            • Linked tables: make hidden columns available (Oracle: rowid and ora_rowscn columns). -
            • H2 Console: in-place autocomplete. -
            • Support large databases: split database files to multiple directories / disks (similar to tablespaces). -
            • H2 Console: support configuration option for fixed width (monospace) font. -
            • Native fulltext search: support analyzers (specially for Chinese, Japanese). -
            • Automatically compact databases from time to time (as a background process). -
            • Test Eclipse DTP. -
            • H2 Console: autocomplete: keep the previous setting -
            • executeBatch: option to stop at the first failed statement. -
            • Implement OLAP features as described here: http://www.devx.com/getHelpOn/10MinuteSolution/16573/0/page/5 -
            • Support Oracle ROWID (unique identifier for each row). -
            • MySQL compatibility: alter table add index i(c), add constraint c foreign key(c) references t(c); -
            • Server mode: improve performance for batch updates. -
            • Applets: support read-only databases in a zip file (accessed as a resource). -
            • Long running queries / errors / trace system table. -
            • Better document FTL_SEARCH, FTL_SEARCH_DATA. -
            • Sequences: CURRVAL should be session specific. Compatibility with PostgreSQL. -
            • Index creation using deterministic functions. -
            • ANALYZE: for unique indexes that allow null, count the number of null. -
            • MySQL compatibility: multi-table delete: DELETE .. FROM .. [,...] USING - See http://dev.mysql.com/doc/refman/5.0/en/delete.html -
            • AUTO_SERVER: support changing IP addresses (disable a network while the database is open). -
            • Avoid using java.util.Calendar internally because it's slow, complicated, and buggy. -
            • Support TRUNCATE .. CASCADE like PostgreSQL. -
            • Fulltext search: lazy result generation using SimpleRowSource. -
            • Fulltext search: support alternative syntax: WHERE FTL_CONTAINS(name, 'hello'). -
            • MySQL compatibility: support INSERT INTO table SET column1 = value1, column2 = value2 -
            • Docs: add a one line description for each functions and SQL statements at the top (in the link section). -
            • Javadoc search: weight for titles should be higher ('random' should list Functions as the best match). -
            • Replace information_schema tables with regular tables that are automatically re-built when needed. Use indexes. -
            • Issue 50: Oracle compatibility: support calling 0-parameters functions without parenthesis. Make constants obsolete. -
            • MySQL, HSQLDB compatibility: support where 'a'=1 (not supported by Derby, PostgreSQL) -
            • Finer granularity for SLF4J trace - See http://code.google.com/p/h2database/issues/detail?id=62 -
            • Add database creation date and time to the database. -
            • Support ASSERTION. -
            • MySQL compatibility: support comparing 1='a' -
            • Support PostgreSQL lock modes: http://www.postgresql.org/docs/8.3/static/explicit-locking.html -
            • PostgreSQL compatibility: test DbVisualizer and Squirrel SQL using a new PostgreSQL JDBC driver. -
            • RunScript should be able to read from system in (or quite mode for Shell). -
            • Natural join: support select x from dual natural join dual. -
            • Support using system properties in database URLs (may be a security problem). -
            • Natural join: somehow support this: select a.x, b.x, x from dual a natural join dual b -
            • Use the Java service provider mechanism to register file systems and function libraries. -
            • MySQL compatibility: for auto_increment columns, convert 0 to next value (as when inserting NULL). -
            • Optimization for multi-column IN: use an index if possible. Example: (A, B) IN((1, 2), (2, 3)). -
            • Optimization for EXISTS: convert to inner join or IN(..) if possible. -
            • Serialized file lock: support long running queries. -
            • Network: use 127.0.0.1 if other addresses don't work. -
            • Pluggable network protocol (currently Socket/ServerSocket over TCP/IP) - see also TransportServer with master slave replication. -
            • Support reading JCR data: one table per node type; query table; cache option -
            • OSGi: create a sample application, test, document. -
            • help.csv: use complete examples for functions; run as test case. -
            • Functions to calculate the memory and disk space usage of a table, a row, or a value. -
            • Re-implement PooledConnection; use a lightweight connection object. -
            • Doclet: convert tests in javadocs to a java class. -
            • Doclet: format fields like methods, but support sorting by name and value. -
            • Doclet: shrink the html files. -
            • Allow to scan index backwards starting with a value (to better support ORDER BY DESC). -
            • Java Service Wrapper: try http://yajsw.sourceforge.net/ -
            • Batch parameter for INSERT, UPDATE, and DELETE, and commit after each batch. See also MySQL DELETE. -
            • Use a lazy and auto-close input stream (open resource when reading, close on eof). -
            • Connection pool: 'reset session' command (delete temp tables, rollback, auto-commit true). -
            • Improve SQL documentation, see http://www.w3schools.com/sql/ -
            • MySQL compatibility: DatabaseMetaData.stores*() methods should return the same values. Test with SquirrelSQL. -
            • Sybase/DB2/Oracle compatibility: support out parameters in stored procedures - See http://code.google.com/p/h2database/issues/detail?id=83 -
            • Combine Server and Console tool (only keep Server). -
            • Store the Lucene index in the database itself. -
            • MVCC: compare concurrent update behavior with PostgreSQL and Oracle. -
            • HSQLDB compatibility: CREATE FUNCTION (maybe using a Function interface). -
            • HSQLDB compatibility: support CALL "java.lang.Math.sqrt"(2.0) -
            • Support comma as the decimal separator in the CSV tool. -
            • Compatibility: Java functions with SQLJ Part1 http://www.acm.org/sigmod/record/issues/9912/standards.pdf.gz -
            • Compatibility: Java functions with SQL/PSM (Persistent Stored Modules) - need to find the documentation. -
            • CACHE_SIZE: automatically use a fraction of Runtime.maxMemory - maybe automatically the second level cache. -
            • PostgreSQL compatibility: when in PG mode, treat BYTEA data like PG. -
            • Support =ANY(array) as in PostgreSQL. See also http://www.postgresql.org/docs/8.0/interactive/arrays.html -
            • IBM DB2 compatibility: support PREVIOUS VALUE FOR sequence. -
            • Compatibility: use different LIKE ESCAPE characters depending on the mode (disable for Derby, HSQLDB, DB2, Oracle, MSSQLServer). -
            • FTP: document the server, including -ftpTask option to execute / kill remote processes -
            • FTP: problems with multithreading? -
            • FTP: implement SFTP / FTPS -
            • FTP: access to a database (.csv for a table, a directory for a schema, a file for a lob, a script.sql file). -
            • Improve database file locking (maybe use native file locking). The current approach seems to be problematic - if the file system is on a remote share (see Google Group 'Lock file modification time is in the future'). -
            • Document internal features such as BELONGS_TO_TABLE, NULL_TO_DEFAULT, SEQUENCE. -
            • Issue 107: Prefer using the ORDER BY index if LIMIT is used. -
            • An index on (id, name) should be used for a query: select * from t where s=? order by i -
            • Support reading sequences using DatabaseMetaData.getTables(null, null, null, new String[]{"SEQUENCE"}). - See PostgreSQL. -
            • Add option to enable TCP_NODELAY using Socket.setTcpNoDelay(true). -
            • Maybe disallow = within database names (jdbc:h2:mem:MODE=DB2 means database name MODE=DB2). -
            • Fast alter table add column. -
            • Improve concurrency for in-memory database operations. -
            • Issue 122: Support for connection aliases for remote tcp connections. -
            • Fast scrambling (strong encryption doesn't help if the password is included in the application). -
            • H2 Console: support -webPassword to require a password to access preferences or shutdown. -
            • Issue 126: The index name should be "IDX_" plus the constraint name unless there is a conflict, in which case append a number. -
            • Issue 127: Support activation/deactivation of triggers -
            • Issue 130: Custom log event listeners -
            • Issue 132: Use Java enum trigger type. -
            • Issue 134: IBM DB2 compatibility: session global variables. -
            • Cluster: support load balance with values for each server / auto detect. -
            • FTL_SET_OPTION(keyString, valueString) with key stopWords at first. -
            • Pluggable access control mechanism. -
            • Fulltext search (Lucene): support streaming CLOB data. -
            • Document/example how to create and read an encrypted script file. -
            • Fulltext search (Lucene): only prefix column names with _ if they already start with _. Instead of DATA / QUERY / modified use _DATA, _QUERY, _MODIFIED if possible. -
            • Support a way to create or read compressed encrypted script files using an API. -
            • The network client should better detect if the server is not an H2 server and fail early. -
            • H2 Console: support CLOB/BLOB upload. -
            • Database file lock: detect hibernate / standby / very slow threads (compare system time). -
            • Automatic detection of redundant indexes. -
            • Maybe reject join without "on" (except natural join). -
            • Implement GiST (Generalized Search Tree for Secondary Storage). -
            • Function to read a number of bytes/characters from an BLOB or CLOB. -
            • Issue 156: Support SELECT ? UNION SELECT ?. -
            • Automatic mixed mode: support a port range list (to avoid firewall problems). -
            • Support the pseudo column rowid, oid, _rowid_. -
            • H2 Console / large result sets: stream early instead of keeping a whole result in-memory -
            • Support TRUNCATE for linked tables. -
            • UNION: evaluate INTERSECT before UNION (like most other database except Oracle). -
            • Delay creating the information schema, and share metadata columns. -
            • TCP Server: use a nonce (number used once) to protect unencrypted channels against replay attacks. -
            • Simplify running scripts and recovery: CREATE FORCE USER (overwrites an existing user). -
            • Support CREATE DATABASE LINK (a custom JDBC driver is already supported). -
            • Support large GROUP BY operations. Issue 216. -
            • Issue 163: Allow to create foreign keys on metadata types. -
            • Logback: write a native DBAppender. -
            • Cache size: don't use more cache than what is available. -
            • Allow to defragment at runtime (similar to SHUTDOWN DEFRAG) in a background thread. -
            • Tree index: Instead of an AVL tree, use a general balanced trees or a scapegoat tree. -
            • User defined functions: allow to store the bytecode (of just the class, or the jar file of the extension) in the database. -
            • Compatibility: ResultSet.getObject() on a CLOB (TEXT) should return String for PostgreSQL and MySQL. -
            • Optimizer: WHERE X=? AND Y IN(?), it always uses the index on Y. Should be cost based. -
            • Common Table Expression (CTE) / recursive queries: support parameters. Issue 314. -
            • Oracle compatibility: support INSERT ALL. -
            • Issue 178: Optimizer: index usage when both ascending and descending indexes are available. -
            • Issue 179: Related subqueries in HAVING clause. -
            • IBM DB2 compatibility: NOT NULL WITH DEFAULT. Similar to MySQL Mode.convertInsertNullToZero. -
            • Maybe use a different page layout: keep the data at the head of the page, and ignore the tail - (don't store / read it). This may increase write / read performance depending on the file system. -
            • Indexes of temporary tables are currently kept in-memory. Is this how it should be? -
            • The Shell tool should support the same built-in commands as the H2 Console. -
            • Maybe use PhantomReference instead of finalize. -
            • Database file name suffix: should only have one dot by default. Example: .h2db -
            • Issue 196: Function based indexes -
            • Fix the disk space leak (killing the process at the exact right moment will increase - the disk space usage; this space is not re-used). See TestDiskSpaceLeak.java -
            • Allow to access the database over HTTP (possibly using port 80) and a servlet in a REST way. -
            • ODBC: encrypted databases are not supported because the ;CIPHER= can not be set. -
            • Support CLOB and BLOB update, specially conn.createBlob().setBinaryStream(1); -
            • Optimizer: index usage when both ascending and descending indexes are available. Issue 178. -
            • Issue 306: Support schema specific domains. -
            • Triggers: support user defined execution order. Oracle: - CREATE OR REPLACE TRIGGER TEST_2 BEFORE INSERT - ON TEST FOR EACH ROW FOLLOWS TEST_1. - SQL specifies that multiple triggers should be fired in time-of-creation order. - PostgreSQL uses name order, which was judged to be more convenient. - Derby: triggers are fired in the order in which they were created. -
            • PostgreSQL compatibility: combine "users" and "roles". See: - http://www.postgresql.org/docs/8.1/interactive/user-manag.html -
            • Improve documentation of system properties: only list the property names, default values, and description. -
            • Support running totals / cumulative sum using SUM(..) OVER(..). -
            • Improve object memory size calculation. Use constants for known VMs, or use reflection to call java.lang.instrument.Instrumentation.getObjectSize(Object objectToSize) -
            • Triggers: NOT NULL checks should be done after running triggers (Oracle behavior, maybe others). -
            • Common Table Expression (CTE) / recursive queries: support INSERT INTO ... SELECT ... Issue 219. -
            • Common Table Expression (CTE) / recursive queries: support non-recursive queries. Issue 217. -
            • Common Table Expression (CTE) / recursive queries: avoid endless loop. Issue 218. -
            • Common Table Expression (CTE) / recursive queries: support multiple named queries. Issue 220. -
            • Common Table Expression (CTE) / recursive queries: identifier scope may be incorrect. Issue 222. -
            • Log long running transactions (similar to long running statements). -
            • Parameter data type is data type of other operand. Issue 205. -
            • Some combinations of nested join with right outer join are not supported. -
            • DatabaseEventListener.openConnection(id) and closeConnection(id). -
            • Listener or authentication module for new connections, or a way to restrict the number of different connections to a tcp server, - or to prevent to login with the same username and password from different IPs. - Possibly using the DatabaseEventListener API, or a new API. -
            • Compatibility for data type CHAR (Derby, HSQLDB). Issue 212. -
            • Optimizer: use a histogram of the data, specially for non-normal distributions. -
            • Trigger: allow declaring as source code (like functions). -
            • User defined aggregate: allow declaring as source code (like functions). -
            • The error "table not found" is sometimes caused by using the wrong database. - Add "(this database is empty)" to the exception message if applicable. -
            • MySQL + PostgreSQL compatibility: support string literal escape with \n. -
            • PostgreSQL compatibility: support string literal escape with double \\. -
            • Document the TCP server "management_db". Maybe include the IP address of the client. -
            • Use javax.tools.JavaCompilerTool instead of com.sun.tools.javac.Main -
            • If a database object was not found in the current schema, but one with the same name existed in another schema, included that in the error message. -
            • Optimization to use an index for OR when using multiple keys: where (key1 = ? and key2 = ?) OR (key1 = ? and key2 = ?) -
            • Issue 302: Support optimizing queries with both inner and outer joins, as in: - select * from test a inner join test b on a.id=b.id inner join o on o.id=a.id where b.x=1 - (the optimizer should swap a and b here). - See also TestNestedJoins, tag "swapInnerJoinTables". -
            • Move table to a different schema (rename table to a different schema), possibly using ALTER TABLE ... SET SCHEMA ...; -
            • nioMapped file system: automatically fall back to regular (non mapped) IO if there is a problem (out of memory exception for example). -
            • Column as parameter of function table. Issue 228. -
            • Connection pool: detect ;AUTOCOMMIT=FALSE in the database URL, and if set, - disable autocommit for all connections. -
            • Compatibility with MS Access: support "&" to concatenate text. -
            • The BACKUP statement should not synchronize on the database, and therefore should not block other users. -
            • Document the database file format. -
            • Support reading LOBs. -
            • Require appending DANGEROUS=TRUE when using certain dangerous settings such as - LOG=0, LOG=1, LOCK_MODE=0, disabling FILE_LOCK,... -
            • Support UDT (user defined types) similar to how Apache Derby supports it: - check constraint, allow to use it in Java functions as parameters (return values already seem to work). -
            • Encrypted file system (use cipher text stealing so file length doesn't need to decrypt; 4 KB header per file, - optional compatibility with current encrypted database files). -
            • Issue 229: SELECT with simple OR tests uses tableScan when it could use indexes. -
            • GROUP BY queries should use a temporary table if there are too many rows. -
            • BLOB: support random access when reading. -
            • CLOB: support random access when reading (this is harder than for BLOB as data is stored in UTF-8 form). -
            • Compatibility: support SELECT INTO (as an alias for CREATE TABLE ... AS SELECT ...). -
            • Compatibility with MySQL: support SELECT INTO OUTFILE (cannot be an existing file) as an alias for CSVWRITE(...). -
            • Compatibility with MySQL: support non-strict mode (sql_mode = "") any data - that is too large for the column will just be truncated or set to the default value. -
            • The full condition should be sent to the linked table, not just the indexed condition. - Example: TestLinkedTableFullCondition -
            • Compatibility with IBM DB2: CREATE PROCEDURE. -
            • Compatibility with IBM DB2: SQL cursors. -
            • Single-column primary key values are always stored explicitly. This is not required. -
            • Compatibility with MySQL: support CREATE TABLE TEST(NAME VARCHAR(255) CHARACTER SET UTF8). -
            • CALL is incompatible with other databases because it returns a result set, so that CallableStatement.execute() returns true. -
            • Compatibility for ARRAY data type (Oracle: VARRAY(n) of VARCHAR(m); HSQLDB: VARCHAR(n) ARRAY; Postgres: VARCHAR(n)[]). -
            • PostgreSQL compatible array literal syntax: ARRAY[['a', 'b'], ['c', 'd']] -
            • PostgreSQL compatibility: UPDATE with FROM. -
            • Issue 297: Oracle compatibility for "at time zone". -
            • IBM DB2 compatibility: IDENTITY_VAL_LOCAL(). -
            • Support SQL/XML data type. -
            • Support concurrent opening of databases. -
            • Improved error message and diagnostics in case of network configuration problems. -
            • TRUNCATE should reset the identity columns by default in MySQL and MS SQL Server compatibility modes (and possibly other). -
            • Adding a primary key should make the columns 'not null' unless if there is a row with null - (compatibility with MySQL, PostgreSQL, HSQLDB; not Derby). -
            • ARRAY data type: support Integer[] and so on in Java functions (currently only Object[] is supported). -
            • MySQL compatibility: LOCK TABLES a READ, b READ - see also http://dev.mysql.com/doc/refman/5.0/en/lock-tables.html -
            • The HTML to PDF converter should use http://code.google.com/p/wkhtmltopdf/ -
            • Issue 303: automatically convert "X NOT IN(SELECT...)" to "NOT EXISTS(...)". -
            • MySQL compatibility: update test1 t1, test2 t2 set t1.name=t2.name where t1.id=t2.id. -
            • Issue 283: Improve performance of H2 on Android. -
            • Support INSERT INTO / UPDATE / MERGE ... RETURNING to retrieve the generated key(s). -
            • Column compression option - see http://groups.google.com/group/h2-database/browse_thread/thread/3e223504e52671fa/243da82244343f5d -
            • MS SQL Server compatibility: support @@ROWCOUNT. -
            • Issue 311: Serialized lock mode: executeQuery of write operations fails. -
            • PostgreSQL compatibility: support PgAdmin III (specially the function current_setting). -
            • Support SELECT ... FOR UPDATE OF [field-list] (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby). -
            • Support SELECT ... FOR UPDATE OF [table-list] (supported by PostgreSQL, HSQLDB, Sybase). -
            • TRANSACTION_ID() for in-memory databases. -
            • TRANSACTION_ID() should be long (same as HSQLDB and PostgreSQL). -
            • Support [INNER | OUTER] JOIN USING(column [,...]). -
            • Support NATURAL [ { LEFT | RIGHT } [ OUTER ] | INNER ] JOIN (Derby, Oracle) -
            • GROUP BY columnNumber (similar to ORDER BY columnNumber) (MySQL, PostgreSQL, SQLite; not by HSQLDB and Derby). -
            • Index conditions: WHERE AGE>1 should not scan through all rows with AGE=1. -
            • PHP support: H2 should support PDO, or test with PostgreSQL PDO. -
            • Outer joins: if no column of the outer join table is referenced, the outer join table could be removed from the query. -
            • Cluster: allow using auto-increment and identity columns by ensuring executed in lock-step. -
            • MySQL compatibility: index names only need to be unique for the given table. -
            • Issue 352: constraints: distinguish between 'no action' and 'restrict'. Currently, only restrict is supported, - and 'no action' is internally mapped to 'restrict'. The database meta data returns 'restrict' in all cases. -
            • Issue 348: Oracle compatibility: division should return a decimal result. -
            • Read rows on demand: instead of reading the whole row, only read up to that column that is requested. - Keep an pointer to the data area and the column id that is already read. -
            • Long running transactions: log session id when detected. -
            • Optimization: "select id from test" should use the index on id even without "order by". -
            • Sybase SQL Anywhere compatibility: SELECT TOP ... START AT ... -
            • Issue 390: RUNSCRIPT FROM '...' CONTINUE_ON_ERROR -
            - -

            Not Planned

            -
              -
            • HSQLDB (did) support this: select id i from test where i<0 (other databases don't). Supporting it may break compatibility. -
            • String.intern (so that Strings can be compared with ==) will not be used because some VMs have problems when used extensively. -
            • In prepared statements, identifier names (table names and so on) can not be parameterized. Adding such a feature would complicate the source code without providing reasonable speedup, and would slow down regular prepared statements. -
            - -
            - diff --git a/h2/src/docsrc/html/search.js b/h2/src/docsrc/html/search.js index 3b3779d5d3..6d32a658d3 100644 --- a/h2/src/docsrc/html/search.js +++ b/h2/src/docsrc/html/search.js @@ -1,7 +1,7 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * * Initial Developer: H2 Group + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group */ var pages = new Array(); diff --git a/h2/src/docsrc/html/security.html b/h2/src/docsrc/html/security.html new file mode 100644 index 0000000000..fe8d29f841 --- /dev/null +++ b/h2/src/docsrc/html/security.html @@ -0,0 +1,73 @@ + + + + + + +Features + + + + + +
            + + +

            Securing your H2

            + + + Introduction
            + + Network exposed
            + + Alias / Stored Procedures
            + + Grants / Roles / Permissions
            + + Encrypted storage
            + +

            Introduction

            +

            +H2 is __not__ designed to be run in an adversarial environment. You should absolutely not expose your H2 server to untrusted connections. +

            +

            +Running H2 in embedded mode is the best choice - it is not externally exposed. +

            + +

            Network exposed

            +

            +When running an H2 server in TCP mode, first prize is to run with it only listening to connections on localhost (i.e 127.0.0.1). +

            +

            +Second prize is running listening to restricted ports on a secured network. +

            +

            +If you expose H2 to the broader Internet, you can secure the connection with SSL, but this is a rather tricky thing to get right, between JVM bugs, certificates and choosing a decent cipher. +

            + +

            Alias / Stored procedures

            +

            +Anything created with CREATE ALIAS can do anything the JVM can do, which includes reading/writing from the filesystem on the machine the JVM is running on. +

            + +

            Grants / Roles / Permissions

            +

            +GRANT / REVOKE TODO +

            + +

            Encrypted storage

            +

            +Encrypting your on-disk database will provide a small measure of security to your stored data. +You should not assume that this is any kind of real security against a determined opponent however, +since there are many repeated data structures that will allow someone with resources and time to extract the secret key. +

            +

            +Also the secret key is visible to anything that can read the memory of the process. +

            + +
            + diff --git a/h2/src/docsrc/html/source.html b/h2/src/docsrc/html/source.html index eacf989a59..5b8f130680 100644 --- a/h2/src/docsrc/html/source.html +++ b/h2/src/docsrc/html/source.html @@ -1,6 +1,6 @@ @@ -10,7 +10,10 @@ // @@ -40,6 +40,9 @@ function getVersion(build) { if (build == 64) { return '1.0/version-1.0.' + build; + } else if (build > 200) { + var b = build + 1; + return Math.floor(b / 100) + '.' + Math.floor(b % 100 / 10) + '.' + build; } else if (build >= 177) { return '1.4.' + build; } else if (build >= 146 && build != 147) { @@ -67,7 +70,7 @@ code = code.replace('HY', '50'); code = code.replace('C', '1'); code = code.replace('T', '2'); - get('more').src = 'http://h2database.com/javadoc/org/h2/constant/ErrorCode.html#c' + code; + get('more').src = 'https://h2database.com/javadoc/org/h2/constant/ErrorCode.html#c' + code; } function go(file, line) { @@ -84,7 +87,7 @@ } else { url = 'https://github.com/h2database/h2database/tree/' if (build && build > 0) { - url += 'version-' + getVersion(build) + '/h2'; + url += 'version-' + getVersion(parseInt(build)) + '/h2'; } else { var tag = 'master/h2'; } @@ -114,7 +117,7 @@ hasData = true; idx = errorCode.indexOf("-"); build = parseInt(errorCode.substring(idx + 1)); - get('version').innerHTML = getVersion(build); + get('version').innerHTML = getVersion(parseInt(build)); errorCode = errorCode.substring(0, idx); while (errorCode.length > 1 && errorCode.charAt(0) == '0') { errorCode = errorCode.substring(1); diff --git a/h2/src/docsrc/html/stylesheet.css b/h2/src/docsrc/html/stylesheet.css index 8dffc6ce81..a30f4d5adc 100644 --- a/h2/src/docsrc/html/stylesheet.css +++ b/h2/src/docsrc/html/stylesheet.css @@ -1,7 +1,7 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * * Initial Developer: H2 Group + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group */ td, input, select, textarea, body, code, pre, td, th { @@ -283,6 +283,23 @@ td.index { vertical-align: top; } +div.ruleCompat code { + border-color: coral; + background-color: mistyrose; +} + +div.ruleH2 code { + border-color: lightseagreen; +} + +span.ruleCompat { + color: darkred; +} + +span.ruleH2 { + color: green; +} + .c { padding: 1px 3px; margin: 0px 0px; diff --git a/h2/src/docsrc/html/stylesheetPdf.css b/h2/src/docsrc/html/stylesheetPdf.css index 12f550cfde..dacc282997 100644 --- a/h2/src/docsrc/html/stylesheetPdf.css +++ b/h2/src/docsrc/html/stylesheetPdf.css @@ -1,7 +1,7 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * * Initial Developer: H2 Group + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group */ td, input, select, textarea, body, code, pre, td, th { @@ -152,3 +152,11 @@ td.index { border-collapse: collapse; vertical-align: top; } + +span.ruleCompat { + color: darkred; +} + +span.ruleH2 { + color: green; +} diff --git a/h2/src/docsrc/html/systemtables.html b/h2/src/docsrc/html/systemtables.html index baba02e0f5..fa19549629 100644 --- a/h2/src/docsrc/html/systemtables.html +++ b/h2/src/docsrc/html/systemtables.html @@ -1,7 +1,7 @@ @@ -19,19 +19,60 @@

            System Tables

            +

            Index

            + +

            +Information Schema +

            + + + + + + + +
            + + ${item.table}
            +
            +
            + + ${item.table}
            +
            +
            + + ${item.table}
            +
            +
            + +

            -Information Schema
            Range Table

            Information Schema

            -The system tables in the schema INFORMATION_SCHEMA contain the meta data -of all tables in the database as well as the current settings. +The system tables and views in the schema INFORMATION_SCHEMA contain the meta data +of all tables, views, domains, and other objects in the database as well as the current settings. +This documentation describes the default new version of INFORMATION_SCHEMA for H2 2.0. +Old TCP clients (1.4.200 and below) see the legacy version of INFORMATION_SCHEMA, +because they can't work with the new one. The legacy version is not documented.

            + -

            ${item.topic}

            -

            ${item.syntax}

            +

            ${item.table}

            +

            ${item.description}

            + + +${item.columns} + +

            Range Table

            diff --git a/h2/src/docsrc/html/tutorial.html b/h2/src/docsrc/html/tutorial.html index 9ad45f9979..3dadf0f822 100644 --- a/h2/src/docsrc/html/tutorial.html +++ b/h2/src/docsrc/html/tutorial.html @@ -1,7 +1,7 @@ @@ -45,8 +45,6 @@

            Tutorial

            Using H2 with jOOQ
            Using Databases in Web Applications
            - - Android
            CSV (Comma Separated Values) Support
            @@ -301,7 +299,7 @@

            Special H2 Console Syntax

            @attributes, @best_row_identifier, @catalogs, @columns, @column_privileges, @cross_references, @exported_keys, @imported_keys, @index_info, @primary_keys, @procedures, - @procedure_columns, @schemas, @super_tables, @super_types, + @procedure_columns, @pseudo_columns, @schemas, @super_tables, @super_types, @tables, @table_privileges, @table_types, @type_info, @udts, @version_columns @@ -317,10 +315,13 @@

            Special H2 Console Syntax

            - @generated insert into test() values(); + @generated insert into test() values();
            + @generated(1) insert into test() values();
            + @generated(ID, "TIMESTAMP") insert into test() values(); Show the result of Statement.getGeneratedKeys(). + Names or one-based indexes of required columns can be optionally specified. @@ -597,13 +598,13 @@

            Stopping a TCP Server from Another Process

            To stop the server from the command line, run:

            -java org.h2.tools.Server -tcpShutdown tcp://localhost:9092
            +java org.h2.tools.Server -tcpShutdown tcp://localhost:9092 -tcpPassword password
             

            To stop the server from a user application, use the following code:

            -org.h2.tools.Server.shutdownTcpServer("tcp://localhost:9094");
            +org.h2.tools.Server.shutdownTcpServer("tcp://localhost:9092", "password", false, false);
             

            This function will only stop the TCP server. @@ -611,7 +612,7 @@

            Stopping a TCP Server from Another Process

            To avoid recovery when the databases are opened the next time, all connections to the databases should be closed before calling this method. To stop a remote server, remote connections must be enabled on the server. -Shutting down a TCP server can be protected using the option -tcpPassword +Shutting down a TCP server is protected using the option -tcpPassword (the same password must be used to start and stop the TCP server).

            @@ -662,7 +663,7 @@

            To use H2 in EclipseLink, use the platform class org.eclipse.persistence.platform.database.H2Platform. If this platform is not available in your version of EclipseLink, you can use the OraclePlatform instead in many case. -See also H2Platform. +See also H2Platform.

            Using Apache ActiveMQ

            @@ -682,7 +683,7 @@

            Using H2 within NetBeans

            There is a known issue when using the Netbeans SQL Execution Window: before executing a query, another query in the form SELECT COUNT(*) FROM <query> is run. -This is a problem for queries that modify state, such as SELECT SEQ.NEXTVAL. +This is a problem for queries that modify state, such as SELECT NEXT VALUE FOR SEQ. In this case, two sequence values are allocated instead of just one.

            @@ -700,7 +701,7 @@

            Using H2 with jOOQ

            then run the jOOQ code generator on the command line using this command:

            -java -cp jooq.jar;jooq-meta.jar;jooq-codegen.jar;h2-1.3.158.jar;.
            +java -cp jooq.jar;jooq-meta.jar;jooq-codegen.jar;h2-1.4.199.jar;.
             org.jooq.util.GenerationTool /codegen.xml
             

            @@ -708,7 +709,7 @@

            Using H2 with jOOQ

             <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
            -<configuration xmlns="http://www.jooq.org/xsd/jooq-codegen-2.3.0.xsd">
            +<configuration xmlns="http://www.jooq.org/xsd/jooq-codegen-3.11.0.xsd">
                 <jdbc>
                     <driver>org.h2.Driver</driver>
                     <url>jdbc:h2:~/test</url>
            @@ -716,14 +717,11 @@ 

            Using H2 with jOOQ

            <password></password> </jdbc> <generator> - <name>org.jooq.util.DefaultGenerator</name> <database> - <name>org.jooq.util.h2.H2Database</name> <includes>.*</includes> <excludes></excludes> <inputSchema>PUBLIC</inputSchema> </database> - <generate></generate> <target> <packageName>org.jooq.h2.generated</packageName> <directory>./src</directory> @@ -735,16 +733,16 @@

            Using H2 with jOOQ

            Using the generated source, you can query the database as follows:

            -Factory create = new H2Factory(connection);
            +DSLContext dsl = DSL.using(connection);
             Result<UserRecord> result =
            -create.selectFrom(USER)
            +dsl.selectFrom(USER)
                 .where(NAME.like("Johnny%"))
                 .orderBy(ID)
                 .fetch();
             

            -See more details on jOOQ Homepage -and in the jOOQ Tutorial +See more details on jOOQ Homepage +and in the jOOQ Tutorial

            Using Databases in Web Applications

            @@ -791,6 +789,15 @@

            Using a Servlet Listener to Start and Stop a Database

            </listener>

            +If your servlet container is already Servlet 5-compatible, use the following +snippet instead: +

            +
            +<listener>
            +    <listener-class>org.h2.server.web.JakartaDbStarter</listener-class>
            +</listener>
            +
            +

            For details on how to access the database, see the file DbStarter.java. By default this tool opens an embedded connection using the database URL jdbc:h2:~/test, @@ -875,62 +882,15 @@

            Using the H2 Console Servlet

            For details, see also src/tools/WEB-INF/web.xml.

            -To create a web application with just the H2 Console, run the following command: -

            -
            -build warConsole
            -
            - -

            Android

            -

            -You can use this database on an Android device (using the Dalvik VM) instead of or in addition to SQLite. -So far, only very few tests and benchmarks were run, but it seems that performance is similar to SQLite, -except for opening and closing a database, which is not yet optimized in H2 -(H2 takes about 0.2 seconds, and SQLite about 0.02 seconds). -Read operations seem to be a bit faster than SQLite, and write operations seem to be slower. -So far, only very few tests have been run, and everything seems to work as expected. -Fulltext search was not yet tested, however the native fulltext search should work. -

            -

            -Reasons to use H2 instead of SQLite are: -

            -
            • Full Unicode support including UPPER() and LOWER(). -
            • Streaming API for BLOB and CLOB data. -
            • Fulltext search. -
            • Multiple connections. -
            • User defined functions and triggers. -
            • Database file encryption. -
            • Reading and writing CSV files (this feature can be used outside the database as well). -
            • Referential integrity and check constraints. -
            • Better data type and SQL support. -
            • In-memory databases, read-only databases, linked tables. -
            • Better compatibility with other databases which simplifies porting applications. -
            • Possibly better performance (so far for read operations). -
            • Server mode (accessing a database on a different machine over TCP/IP). -
            -

            -Currently only the JDBC API is supported (it is planned to support the Android database API in future releases). -Both the regular H2 jar file and the smaller h2small-*.jar can be used. -To create the smaller jar file, run the command ./build.sh jarSmall (Linux / Mac OS) -or build.bat jarSmall (Windows). +If your application is already Servlet 5-compatible, use the servlet class +org.h2.server.web.JakartaWebServlet instead.

            -The database files needs to be stored in a place that is accessible for the application. -Example: +To create a web application with just the H2 Console, run the following command:

            -String url = "jdbc:h2:/data/data/" +
            -    "com.example.hello" +
            -    "/data/hello" +
            -    ";FILE_LOCK=FS" +
            -    ";PAGE_SIZE=1024" +
            -    ";CACHE_SIZE=8192";
            -conn = DriverManager.getConnection(url);
            -...
            +build warConsole
             
            -

            -Limitations: Using a connection pool is currently not supported, because the required javax.sql. classes are not available on Android. -

            CSV (Comma Separated Values) Support

            @@ -1065,6 +1025,15 @@

            Restore from a Script

            need to be available on the server side.

            +

            +If the script was generated by H2 1.4.200 or an older version, add VARIABLE_BINARY option to import it +into more recent version. +

            + +
            +java org.h2.tools.RunScript -url jdbc:h2:~/test -user sa -script test.zip -options compression zip variable_binary
            +
            +

            Online Backup

            The BACKUP SQL statement and the Backup tool both create a zip file @@ -1192,7 +1161,7 @@

            Using OpenOffice Base

          This can be done by create it using the NetBeans OpenOffice plugin. -See also Extensions Development. +See also Extensions Development.

          Java Web Start / JNLP

          @@ -1216,7 +1185,7 @@

          Using a Connection Pool

          A simple connection pool is included in H2. It is based on the Mini Connection Pool Manager from Christian d'Heureuse. There are other, more complex, open source connection pools available, -for example the Apache Commons DBCP. +for example the Apache Commons DBCP. For H2, it is about twice as faster to get a connection from the built-in connection pool than to get one using DriverManager.getConnection().The build-in connection pool is used as follows:

          @@ -1302,8 +1271,7 @@

          Using the Native Fulltext Search

          Using the Apache Lucene Fulltext Search

          To use the Apache Lucene full text search, you need the Lucene library in the classpath. -Apache Lucene 5.5.5 or later version up to 7.7.1 is required. -Newer versions may also work, but were not tested. +Apache Lucene 8.5.2 or binary compatible version is required. How to do that depends on the application; if you use the H2 Console, you can add the Lucene jar file to the environment variables H2DRIVERS or CLASSPATH. @@ -1392,7 +1360,7 @@

          User-Defined Variables

           SET @TOTAL = NULL;
          -SELECT X, SET(@TOTAL, IFNULL(@TOTAL, 1.) * X) F FROM SYSTEM_RANGE(1, 50);
          +SELECT X, SET(@TOTAL, COALESCE(@TOTAL, 1.) * X) F FROM SYSTEM_RANGE(1, 50);
           

          Variables that are not set evaluate to NULL. @@ -1404,30 +1372,35 @@

          User-Defined Variables

          Date and Time

          -Date, time and timestamp values support ISO 8601 formatting, including time zone: +Date, time and timestamp values support standard literals:

          -CALL TIMESTAMP '2008-01-01 12:00:00+01:00';
          +VALUES (
          +    DATE '2008-01-01',
          +    TIME '12:00:00',
          +    TIME WITH TIME ZONE '12:00:00+01:00',
          +    TIMESTAMP '2008-01-01 12:00:00',
          +    TIMESTAMP WITH TIME ZONE '2008-01-01 12:00:00+01:00'
          +);
           

          -If the time zone is not set, the value is parsed using the current time zone setting of the system. -Date and time information is stored in H2 database files with or without time zone information depending on used data type. +ISO 8601-style datetime formats with T instead of space between date and time parts are also supported.

          -
            -
          • -With TIMESTAMP data type if the database is opened using another system time zone, the date and time will be the same. -That means if you store the value '2000-01-01 12:00:00' in one time zone, then close the database -and open the database again in a different time zone, you will also get '2000-01-01 12:00:00'. -Please note that changing the time zone after the H2 driver is loaded is not supported. -
          • -
          • -With TIMESTAMP WITH TIME ZONE data type time zone offset is stored and if you store the value -'2008-01-01 12:00:00+01:00' it remains the same even if you close and reopen the database with a different time zone. -If you store the value with specified time zone name like '2008-01-01 12:00:00 Europe/Berlin' this name will be -converted to time zone offset. +

            +TIME and TIMESTAMP values are preserved without time zone information as local time. +That means if you store the value '2000-01-01 12:00:00' in one time zone, then change time zone of the session +you will also get '2000-01-01 12:00:00', the value will not be adjusted to the new time zone, +therefore its absolute value in UTC may be different. +

            +

            +TIME WITH TIME ZONE and TIMESTAMP WITH TIME ZONE values preserve the specified time zone offset +and if you store the value '2008-01-01 12:00:00+01:00' it also remains the same +even if you change time zone of the session, +and because it has a time zone offset its absolute value in UTC will be the same. +TIMESTAMP WITH TIME ZONE values may be also specified with time zone name like '2008-01-01 12:00:00 Europe/Berlin'. +It that case this name will be converted into time zone offset. Names of time zones are not stored. -

          • -
          +

          Using Spring

          Using the TCP Server

          diff --git a/h2/src/docsrc/index.html b/h2/src/docsrc/index.html index 46d6a9e8a3..2e09c2fef2 100644 --- a/h2/src/docsrc/index.html +++ b/h2/src/docsrc/index.html @@ -1,7 +1,7 @@ diff --git a/h2/src/docsrc/javadoc/animate.js b/h2/src/docsrc/javadoc/animate.js deleted file mode 100644 index 099a5aee1a..0000000000 --- a/h2/src/docsrc/javadoc/animate.js +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ - -function on(id) { - return switchTag(id, 'titleOff', 'detailOn'); -} - -function off(id) { - return switchTag(id, '', 'detail'); -} - -function allDetails() { - for (i = 0;; i++) { - x = document.getElementById('_' + i); - if (x == null) { - break; - } - switchTag(i, 'titleOff', 'detailOn'); - } - return false; -} - -function switchTag(id, title, detail) { - if (document.getElementById('__' + id) != null) { - document.getElementById('__' + id).className = title; - document.getElementById('_' + id).className = detail; - } - return false; -} - -function openLink() { - page = new String(self.document.location); - var pos = page.lastIndexOf("#") + 1; - if (pos == 0) { - return; - } - var ref = page.substr(pos); - link = decodeURIComponent(ref); - el = document.getElementById(link); - if (el.nodeName.toLowerCase() == 'h4') { - // constant - return true; - } - el = el.parentNode.parentNode; - window.scrollTo(0, el.offsetTop); - on(el.id.substr(2)); - return false; -} \ No newline at end of file diff --git a/h2/src/docsrc/javadoc/classes.html b/h2/src/docsrc/javadoc/classes.html deleted file mode 100644 index 0c3fa818d5..0000000000 --- a/h2/src/docsrc/javadoc/classes.html +++ /dev/null @@ -1,98 +0,0 @@ - - - - - - - H2 Documentation - - - - - - -
          -
          - - - -
          - diff --git a/h2/src/docsrc/javadoc/index.html b/h2/src/docsrc/javadoc/index.html deleted file mode 100644 index 37a47494b3..0000000000 --- a/h2/src/docsrc/javadoc/index.html +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - H2 Documentation - - - - - - - -<body> - Sorry, Lynx is not supported -</body> - - - diff --git a/h2/src/docsrc/javadoc/overview.html b/h2/src/docsrc/javadoc/overview.html deleted file mode 100644 index 4f7f694957..0000000000 --- a/h2/src/docsrc/javadoc/overview.html +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - API Overview - - - - - -
          -
          - -

          API Overview

          - -

          JDBC API

          - -

          -Use the JDBC API to connect to a database and execute queries. -

          - -

          Tools API

          - -

          -The Tools API can be used to do maintenance operations, -such as deleting database files or changing the database file password, -that do not require a connection to the database. -

          - -
          - - diff --git a/h2/src/docsrc/javadoc/stylesheet.css b/h2/src/docsrc/javadoc/stylesheet.css deleted file mode 100644 index 0243c17ca4..0000000000 --- a/h2/src/docsrc/javadoc/stylesheet.css +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ - -td, input, select, textarea, body, code, pre, td, th { - font: 13px/1.4 Arial, sans-serif; - font-weight: normal; -} - -pre { - background-color: #ece9d8; - border: 1px solid rgb(172, 168, 153); - padding: 4px; -} - -body { - margin: 0px; - max-width: 800px; -} - -h1 { - background-color: #0000bb; - padding: 2px 4px 2px 4px; - margin-top: 11px; - color: #fff; - font-size: 22px; - line-height: normal; -} - -h2 { - font-size: 19px; -} - -h3 { - font-size: 16px; -} - -h4 { - font-size: 13px; -} - -hr { - color: #CCC; - background-color: #CCC; - height: 1px; - border: 0px solid blue; -} - -.menu { - margin: 10px 10px 10px 10px; -} - -.block { - border: 0px; -} - -.titleOff { - display: none; -} - -.detail { - border: 0px; - display: none; -} - -.detailOn { - border: 0px; -} - -td.return { - white-space:nowrap; - width: 1%; -} - -td.method { - width: 99%; -} - -.deprecated { - text-decoration: line-through; -} - -.methodText { - color: #000000; - font-weight: normal; - margin: 0px 0px 0px 20px; -} - -.method { -} - -.fieldText { - margin: 6px 20px 6px 20px; -} - -.methodName { - font-weight: bold; -} - -.itemTitle { -} - -.item { - margin: 0px 0px 0px 20px; -} - -table { - background-color: #ffffff; - border-collapse: collapse; - border: 1px solid #aca899; -} - -th { - text-align: left; - background-color: #ece9d8; - border: 1px solid #aca899; - padding: 2px; -} - -td { - background-color: #ffffff; - text-align: left; - vertical-align:top; - border: 1px solid #aca899; - padding: 2px; -} - - -ul, ol { - list-style-position: outside; - padding-left: 20px; -} - -li { - margin-top: 8px; - line-height: 100%; -} - -a { - text-decoration: none; - color: #0000ff; -} - -a:hover { - text-decoration: underline; -} - -table.content { - width: 100%; - height: 100%; - border: 0px; -} - -tr.content { - border:0px; - border-left:1px solid #aca899; -} - -td.content { - border:0px; - border-left:1px solid #aca899; -} - -.contentDiv { - margin:10px; -} - - - diff --git a/h2/src/docsrc/text/_docs_en.utf8.txt b/h2/src/docsrc/text/_docs_en.utf8.txt deleted file mode 100644 index 88040a0a2a..0000000000 --- a/h2/src/docsrc/text/_docs_en.utf8.txt +++ /dev/null @@ -1,12510 +0,0 @@ -@advanced_1000_h1 -Advanced - -@advanced_1001_a - Result Sets - -@advanced_1002_a - Large Objects - -@advanced_1003_a - Linked Tables - -@advanced_1004_a - Spatial Features - -@advanced_1005_a - Recursive Queries - -@advanced_1006_a - Updatable Views - -@advanced_1007_a - Transaction Isolation - -@advanced_1008_a - Multi-Version Concurrency Control (MVCC) - -@advanced_1009_a - Clustering / High Availability - -@advanced_1010_a - Two Phase Commit - -@advanced_1011_a - Compatibility - -@advanced_1012_a - Standards Compliance - -@advanced_1013_a - Run as Windows Service - -@advanced_1014_a - ODBC Driver - -@advanced_1015_a - Using H2 in Microsoft .NET - -@advanced_1016_a - ACID - -@advanced_1017_a - Durability Problems - -@advanced_1018_a - Using the Recover Tool - -@advanced_1019_a - File Locking Protocols - -@advanced_1020_a - Using Passwords - -@advanced_1021_a - Password Hash - -@advanced_1022_a - Protection against SQL Injection - -@advanced_1023_a - Protection against Remote Access - -@advanced_1024_a - Restricting Class Loading and Usage - -@advanced_1025_a - Security Protocols - -@advanced_1026_a - TLS Connections - -@advanced_1027_a - Universally Unique Identifiers (UUID) - -@advanced_1028_a - Settings Read from System Properties - -@advanced_1029_a - Setting the Server Bind Address - -@advanced_1030_a - Pluggable File System - -@advanced_1031_a - Split File System - -@advanced_1032_a - Database Upgrade - -@advanced_1033_a - Java Objects Serialization - -@advanced_1034_a - Custom Data Types Handler API - -@advanced_1035_a - Limits and Limitations - -@advanced_1036_a - Glossary and Links - -@advanced_1037_h2 -Result Sets - -@advanced_1038_h3 -Statements that Return a Result Set - -@advanced_1039_p - The following statements return a result set: SELECT, EXPLAIN, CALL, SCRIPT, SHOW, HELP. All other statements return an update count. - -@advanced_1040_h3 -Limiting the Number of Rows - -@advanced_1041_p - Before the result is returned to the application, all rows are read by the database. Server side cursors are not supported currently. If only the first few rows are interesting for the application, then the result set size should be limited to improve the performance. This can be done using LIMIT in a query (example: SELECT * FROM TEST LIMIT 100), or by using Statement.setMaxRows(max). - -@advanced_1042_h3 -Large Result Sets and External Sorting - -@advanced_1043_p - For large result set, the result is buffered to disk. The threshold can be defined using the statement SET MAX_MEMORY_ROWS. If ORDER BY is used, the sorting is done using an external sort algorithm. In this case, each block of rows is sorted using quick sort, then written to disk; when reading the data, the blocks are merged together. - -@advanced_1044_h2 -Large Objects - -@advanced_1045_h3 -Storing and Reading Large Objects - -@advanced_1046_p - If it is possible that the objects don't fit into memory, then the data type CLOB (for textual data) or BLOB (for binary data) should be used. For these data types, the objects are not fully read into memory, by using streams. To store a BLOB, use PreparedStatement.setBinaryStream. To store a CLOB, use PreparedStatement.setCharacterStream. To read a BLOB, use ResultSet.getBinaryStream, and to read a CLOB, use ResultSet.getCharacterStream. When using the client/server mode, large BLOB and CLOB data is stored in a temporary file on the client side. - -@advanced_1047_h3 -When to use CLOB/BLOB - -@advanced_1048_p - By default, this database stores large LOB (CLOB and BLOB) objects separate from the main table data. Small LOB objects are stored in-place, the threshold can be set using MAX_LENGTH_INPLACE_LOB, but there is still an overhead to use CLOB/BLOB. Because of this, BLOB and CLOB should never be used for columns with a maximum size below about 200 bytes. The best threshold depends on the use case; reading in-place objects is faster than reading from separate files, but slows down the performance of operations that don't involve this column. - -@advanced_1049_h3 -Large Object Compression - -@advanced_1050_p - The following feature is only available for the PageStore storage engine. For the MVStore engine (the default for H2 version 1.4.x), append ;COMPRESS=TRUE to the database URL instead. CLOB and BLOB values can be compressed by using SET COMPRESS_LOB. The LZF algorithm is faster but needs more disk space. By default compression is disabled, which usually speeds up write operations. If you store many large compressible values such as XML, HTML, text, and uncompressed binary files, then compressing can save a lot of disk space (sometimes more than 50%), and read operations may even be faster. - -@advanced_1051_h2 -Linked Tables - -@advanced_1052_p - This database supports linked tables, which means tables that don't exist in the current database but are just links to another database. To create such a link, use the CREATE LINKED TABLE statement: - -@advanced_1053_p - You can then access the table in the usual way. Whenever the linked table is accessed, the database issues specific queries over JDBC. Using the example above, if you issue the query SELECT * FROM LINK WHERE ID=1, then the following query is run against the PostgreSQL database: SELECT * FROM TEST WHERE ID=?. The same happens for insert and update statements. Only simple statements are executed against the target database, that means no joins (queries that contain joins are converted to simple queries). Prepared statements are used where possible. - -@advanced_1054_p - To view the statements that are executed against the target table, set the trace level to 3. - -@advanced_1055_p - If multiple linked tables point to the same database (using the same database URL), the connection is shared. To disable this, set the system property h2.shareLinkedConnections=false. - -@advanced_1056_p - The statement CREATE LINKED TABLE supports an optional schema name parameter. - -@advanced_1057_p - The following are not supported because they may result in a deadlock: creating a linked table to the same database, and creating a linked table to another database using the server mode if the other database is open in the same server (use the embedded mode instead). - -@advanced_1058_p - Data types that are not supported in H2 are also not supported for linked tables, for example unsigned data types if the value is outside the range of the signed type. In such cases, the columns needs to be cast to a supported type. - -@advanced_1059_h2 -Updatable Views - -@advanced_1060_p - By default, views are not updatable. To make a view updatable, use an "instead of" trigger as follows: - -@advanced_1061_p - Update the base table(s) within the trigger as required. For details, see the sample application org.h2.samples.UpdatableView. - -@advanced_1062_h2 -Transaction Isolation - -@advanced_1063_p - Please note that most data definition language (DDL) statements, such as "create table", commit the current transaction. See the Grammar for details. - -@advanced_1064_p - Transaction isolation is provided for all data manipulation language (DML) statements. - -@advanced_1065_p - Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. In this case, table level locking is not used. Instead, rows are locked for update, and read committed is used in all cases (changing the isolation level has no effect). - -@advanced_1066_p - This database supports the following transaction isolation levels: - -@advanced_1067_b -Read Committed - -@advanced_1068_li - This is the default level. Read locks are released immediately after executing the statement, but write locks are kept until the transaction commits. Higher concurrency is possible when using this level. - -@advanced_1069_li - To enable, execute the SQL statement SET LOCK_MODE 3 - -@advanced_1070_li - or append ;LOCK_MODE=3 to the database URL: jdbc:h2:~/test;LOCK_MODE=3 - -@advanced_1071_b -Serializable - -@advanced_1072_li - Both read locks and write locks are kept until the transaction commits. To enable, execute the SQL statement SET LOCK_MODE 1 - -@advanced_1073_li - or append ;LOCK_MODE=1 to the database URL: jdbc:h2:~/test;LOCK_MODE=1 - -@advanced_1074_b -Read Uncommitted - -@advanced_1075_li - This level means that transaction isolation is disabled. - -@advanced_1076_li - To enable, execute the SQL statement SET LOCK_MODE 0 - -@advanced_1077_li - or append ;LOCK_MODE=0 to the database URL: jdbc:h2:~/test;LOCK_MODE=0 - -@advanced_1078_p - When using the isolation level 'serializable', dirty reads, non-repeatable reads, and phantom reads are prohibited. - -@advanced_1079_b -Dirty Reads - -@advanced_1080_li - Means a connection can read uncommitted changes made by another connection. - -@advanced_1081_li - Possible with: read uncommitted - -@advanced_1082_b -Non-Repeatable Reads - -@advanced_1083_li - A connection reads a row, another connection changes a row and commits, and the first connection re-reads the same row and gets the new result. - -@advanced_1084_li - Possible with: read uncommitted, read committed - -@advanced_1085_b -Phantom Reads - -@advanced_1086_li - A connection reads a set of rows using a condition, another connection inserts a row that falls in this condition and commits, then the first connection re-reads using the same condition and gets the new row. - -@advanced_1087_li - Possible with: read uncommitted, read committed - -@advanced_1088_h3 -Table Level Locking - -@advanced_1089_p - The database allows multiple concurrent connections to the same database. To make sure all connections only see consistent data, table level locking is used by default. This mechanism does not allow high concurrency, but is very fast. Shared locks and exclusive locks are supported. Before reading from a table, the database tries to add a shared lock to the table (this is only possible if there is no exclusive lock on the object by another connection). If the shared lock is added successfully, the table can be read. It is allowed that other connections also have a shared lock on the same object. If a connection wants to write to a table (update or delete a row), an exclusive lock is required. To get the exclusive lock, other connection must not have any locks on the object. After the connection commits, all locks are released. This database keeps all locks in memory. When a lock is released, and multiple connections are waiting for it, one of them is picked at random. - -@advanced_1090_h3 -Lock Timeout - -@advanced_1091_p - If a connection cannot get a lock on an object, the connection waits for some amount of time (the lock timeout). During this time, hopefully the connection holding the lock commits and it is then possible to get the lock. If this is not possible because the other connection does not release the lock for some time, the unsuccessful connection will get a lock timeout exception. The lock timeout can be set individually for each connection. - -@advanced_1092_h2 -Multi-Version Concurrency Control (MVCC) - -@advanced_1093_p - The MVCC feature allows higher concurrency than using (table level or row level) locks. When using MVCC in this database, delete, insert and update operations will only issue a shared lock on the table. An exclusive lock is still used when adding or removing columns, when dropping the table, and when using SELECT ... FOR UPDATE. Connections only 'see' committed data, and own changes. That means, if connection A updates a row but doesn't commit this change yet, connection B will see the old value. Only when the change is committed, the new value is visible by other connections (read committed). If multiple connections concurrently try to update the same row, the database waits until it can apply the change, but at most until the lock timeout expires. - -@advanced_1094_p - To use the MVCC feature, append ;MVCC=TRUE to the database URL: - -@advanced_1095_p - The setting must be specified in the first connection (the one that opens the database). It is not possible to enable or disable this setting while the database is already open. - -@advanced_1096_p - If MVCC is enabled, changing the lock mode (LOCK_MODE) has no effect. - -@advanced_1097_div - The MVCC mode is enabled by default in version 1.4.x, with the default MVStore storage engine. MVCC is disabled by default when using the PageStore storage engine (which is the default in version 1.3.x). The following applies when using the PageStore storage engine: The MVCC feature is not fully tested yet. The limitations of the MVCC mode are: with the PageStore storage engine, it can not be used at the same time as MULTI_THREADED=TRUE; the complete undo log (the list of uncommitted changes) must fit in memory when using multi-version concurrency. The setting MAX_MEMORY_UNDO has no effect. Clustering / High Availability - -@advanced_1098_p - This database supports a simple clustering / high availability mechanism. The architecture is: two database servers run on two different computers, and on both computers is a copy of the same database. If both servers run, each database operation is executed on both computers. If one server fails (power, hardware or network failure), the other server can still continue to work. From this point on, the operations will be executed only on one server until the other server is back up. - -@advanced_1099_p - Clustering can only be used in the server mode (the embedded mode does not support clustering). The cluster can be re-created using the CreateCluster tool without stopping the remaining server. Applications that are still connected are automatically disconnected, however when appending ;AUTO_RECONNECT=TRUE, they will recover from that. - -@advanced_1100_p - To initialize the cluster, use the following steps: - -@advanced_1101_li -Create a database - -@advanced_1102_li -Use the CreateCluster tool to copy the database to another location and initialize the clustering. Afterwards, you have two databases containing the same data. - -@advanced_1103_li -Start two servers (one for each copy of the database) - -@advanced_1104_li -You are now ready to connect to the databases with the client application(s) - -@advanced_1105_h3 -Using the CreateCluster Tool - -@advanced_1106_p - To understand how clustering works, please try out the following example. In this example, the two databases reside on the same computer, but usually, the databases will be on different servers. - -@advanced_1107_li -Create two directories: server1, server2. Each directory will simulate a directory on a computer. - -@advanced_1108_li -Start a TCP server pointing to the first directory. You can do this using the command line: - -@advanced_1109_li -Start a second TCP server pointing to the second directory. This will simulate a server running on a second (redundant) computer. You can do this using the command line: - -@advanced_1110_li -Use the CreateCluster tool to initialize clustering. This will automatically create a new, empty database if it does not exist. Run the tool on the command line: - -@advanced_1111_li -You can now connect to the databases using an application or the H2 Console using the JDBC URL jdbc:h2:tcp://localhost:9101,localhost:9102/~/test - -@advanced_1112_li -If you stop a server (by killing the process), you will notice that the other machine continues to work, and therefore the database is still accessible. - -@advanced_1113_li -To restore the cluster, you first need to delete the database that failed, then restart the server that was stopped, and re-run the CreateCluster tool. - -@advanced_1114_h3 -Detect Which Cluster Instances are Running - -@advanced_1115_p - To find out which cluster nodes are currently running, execute the following SQL statement: - -@advanced_1116_p - If the result is '' (two single quotes), then the cluster mode is disabled. Otherwise, the list of servers is returned, enclosed in single quote. Example: 'server1:9191,server2:9191'. - -@advanced_1117_p - It is also possible to get the list of servers by using Connection.getClientInfo(). - -@advanced_1118_p - The property list returned from getClientInfo() contains a numServers property that returns the number of servers that are in the connection list. To get the actual servers, getClientInfo() also has properties server0..serverX, where serverX is the number of servers minus 1. - -@advanced_1119_p - Example: To get the 2nd server in the connection list one uses getClientInfo('server1'). Note: The serverX property only returns IP addresses and ports and not hostnames. - -@advanced_1120_h3 -Clustering Algorithm and Limitations - -@advanced_1121_p - Read-only queries are only executed against the first cluster node, but all other statements are executed against all nodes. There is currently no load balancing made to avoid problems with transactions. The following functions may yield different results on different cluster nodes and must be executed with care: UUID(), RANDOM_UUID(), SECURE_RAND(), SESSION_ID(), MEMORY_FREE(), MEMORY_USED(), CSVREAD(), CSVWRITE(), RAND() [when not using a seed]. Those functions should not be used directly in modifying statements (for example INSERT, UPDATE, MERGE). However, they can be used in read-only statements and the result can then be used for modifying statements. Using auto-increment and identity columns is currently not supported. Instead, sequence values need to be manually requested and then used to insert data (using two statements). - -@advanced_1122_p - When using the cluster modes, result sets are read fully in memory by the client, so that there is no problem if the server dies that executed the query. Result sets must fit in memory on the client side. - -@advanced_1123_p - The SQL statement SET AUTOCOMMIT FALSE is not supported in the cluster mode. To disable autocommit, the method Connection.setAutoCommit(false) needs to be called. - -@advanced_1124_p - It is possible that a transaction from one connection overtakes a transaction from a different connection. Depending on the operations, this might result in different results, for example when conditionally incrementing a value in a row. - -@advanced_1125_h2 -Two Phase Commit - -@advanced_1126_p - The two phase commit protocol is supported. 2-phase-commit works as follows: - -@advanced_1127_li -Autocommit needs to be switched off - -@advanced_1128_li -A transaction is started, for example by inserting a row - -@advanced_1129_li -The transaction is marked 'prepared' by executing the SQL statement PREPARE COMMIT transactionName - -@advanced_1130_li -The transaction can now be committed or rolled back - -@advanced_1131_li -If a problem occurs before the transaction was successfully committed or rolled back (for example because a network problem occurred), the transaction is in the state 'in-doubt' - -@advanced_1132_li -When re-connecting to the database, the in-doubt transactions can be listed with SELECT * FROM INFORMATION_SCHEMA.IN_DOUBT - -@advanced_1133_li -Each transaction in this list must now be committed or rolled back by executing COMMIT TRANSACTION transactionName or ROLLBACK TRANSACTION transactionName - -@advanced_1134_li -The database needs to be closed and re-opened to apply the changes - -@advanced_1135_h2 -Compatibility - -@advanced_1136_p - This database is (up to a certain point) compatible to other databases such as HSQLDB, MySQL and PostgreSQL. There are certain areas where H2 is incompatible. - -@advanced_1137_h3 -Transaction Commit when Autocommit is On - -@advanced_1138_p - At this time, this database engine commits a transaction (if autocommit is switched on) just before returning the result. For a query, this means the transaction is committed even before the application scans through the result set, and before the result set is closed. Other database engines may commit the transaction in this case when the result set is closed. - -@advanced_1139_h3 -Keywords / Reserved Words - -@advanced_1140_p - There is a list of keywords that can't be used as identifiers (table names, column names and so on), unless they are quoted (surrounded with double quotes). The list is currently: - -@advanced_1141_code - CROSS, CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP, DISTINCT, EXCEPT, EXISTS, FALSE, FETCH, FOR, FROM, FULL, GROUP, HAVING, INNER, INTERSECT, IS, JOIN, LIKE, LIMIT, MINUS, NATURAL, NOT, NULL, OFFSET, ON, ORDER, PRIMARY, ROWNUM, SELECT, SYSDATE, SYSTIME, SYSTIMESTAMP, TODAY, TRUE, UNION, UNIQUE, WHERE - -@advanced_1142_p - Certain words of this list are keywords because they are functions that can be used without '()' for compatibility, for example CURRENT_TIMESTAMP. - -@advanced_1143_h2 -Standards Compliance - -@advanced_1144_p - This database tries to be as much standard compliant as possible. For the SQL language, ANSI/ISO is the main standard. There are several versions that refer to the release date: SQL-92, SQL:1999, and SQL:2003. Unfortunately, the standard documentation is not freely available. Another problem is that important features are not standardized. Whenever this is the case, this database tries to be compatible to other databases. - -@advanced_1145_h3 -Supported Character Sets, Character Encoding, and Unicode - -@advanced_1146_p - H2 internally uses Unicode, and supports all character encoding systems and character sets supported by the virtual machine you use. - -@advanced_1147_h2 -Run as Windows Service - -@advanced_1148_p - Using a native wrapper / adapter, Java applications can be run as a Windows Service. There are various tools available to do that. The Java Service Wrapper from Tanuki Software, Inc. is included in the installation. Batch files are provided to install, start, stop and uninstall the H2 Database Engine Service. This service contains the TCP Server and the H2 Console web application. The batch files are located in the directory h2/service. - -@advanced_1149_p - The service wrapper bundled with H2 is a 32-bit version. To use a 64-bit version of Windows (x64), you need to use a 64-bit version of the wrapper, for example the one from Simon Krenger. - -@advanced_1150_p - When running the database as a service, absolute path should be used. Using ~ in the database URL is problematic in this case, because it means to use the home directory of the current user. The service might run without or with the wrong user, so that the database files might end up in an unexpected place. - -@advanced_1151_h3 -Install the Service - -@advanced_1152_p - The service needs to be registered as a Windows Service first. To do that, double click on 1_install_service.bat. If successful, a command prompt window will pop up and disappear immediately. If not, a message will appear. - -@advanced_1153_h3 -Start the Service - -@advanced_1154_p - You can start the H2 Database Engine Service using the service manager of Windows, or by double clicking on 2_start_service.bat. Please note that the batch file does not print an error message if the service is not installed. - -@advanced_1155_h3 -Connect to the H2 Console - -@advanced_1156_p - After installing and starting the service, you can connect to the H2 Console application using a browser. Double clicking on 3_start_browser.bat to do that. The default port (8082) is hard coded in the batch file. - -@advanced_1157_h3 -Stop the Service - -@advanced_1158_p - To stop the service, double click on 4_stop_service.bat. Please note that the batch file does not print an error message if the service is not installed or started. - -@advanced_1159_h3 -Uninstall the Service - -@advanced_1160_p - To uninstall the service, double click on 5_uninstall_service.bat. If successful, a command prompt window will pop up and disappear immediately. If not, a message will appear. - -@advanced_1161_h3 -Additional JDBC drivers - -@advanced_1162_p - To use other databases (for example MySQL), the location of the JDBC drivers of those databases need to be added to the environment variables H2DRIVERS or CLASSPATH before installing the service. Multiple drivers can be set; each entry needs to be separated with a ; (Windows) or : (other operating systems). Spaces in the path names are supported. The settings must not be quoted. - -@advanced_1163_h2 -ODBC Driver - -@advanced_1164_p - This database does not come with its own ODBC driver at this time, but it supports the PostgreSQL network protocol. Therefore, the PostgreSQL ODBC driver can be used. Support for the PostgreSQL network protocol is quite new and should be viewed as experimental. It should not be used for production applications. - -@advanced_1165_p - To use the PostgreSQL ODBC driver on 64 bit versions of Windows, first run c:/windows/syswow64/odbcad32.exe. At this point you set up your DSN just like you would on any other system. See also: Re: ODBC Driver on Windows 64 bit - -@advanced_1166_h3 -ODBC Installation - -@advanced_1167_p - First, the ODBC driver must be installed. Any recent PostgreSQL ODBC driver should work, however version 8.2 (psqlodbc-08_02*) or newer is recommended. The Windows version of the PostgreSQL ODBC driver is available at http://www.postgresql.org/ftp/odbc/versions/msi. - -@advanced_1168_h3 -Starting the Server - -@advanced_1169_p - After installing the ODBC driver, start the H2 Server using the command line: - -@advanced_1170_p - The PG Server (PG for PostgreSQL protocol) is started as well. By default, databases are stored in the current working directory where the server is started. Use -baseDir to save databases in another directory, for example the user home directory: - -@advanced_1171_p - The PG server can be started and stopped from within a Java application as follows: - -@advanced_1172_p - By default, only connections from localhost are allowed. To allow remote connections, use -pgAllowOthers when starting the server. - -@advanced_1173_p - To map an ODBC database name to a different JDBC database name, use the option -key when starting the server. Please note only one mapping is allowed. The following will map the ODBC database named TEST to the database URL jdbc:h2:~/data/test;cipher=aes: - -@advanced_1174_h3 -ODBC Configuration - -@advanced_1175_p - After installing the driver, a new Data Source must be added. In Windows, run odbcad32.exe to open the Data Source Administrator. Then click on 'Add...' and select the PostgreSQL Unicode driver. Then click 'Finish'. You will be able to change the connection properties. The property column represents the property key in the odbc.ini file (which may be different from the GUI). - -@advanced_1176_th -Property - -@advanced_1177_th -Example - -@advanced_1178_th -Remarks - -@advanced_1179_td -Data Source - -@advanced_1180_td -H2 Test - -@advanced_1181_td -The name of the ODBC Data Source - -@advanced_1182_td -Database - -@advanced_1183_td -~/test;ifexists=true - -@advanced_1184_td - The database name. This can include connections settings. By default, the database is stored in the current working directory where the Server is started except when the -baseDir setting is used. The name must be at least 3 characters. - -@advanced_1185_td -Servername - -@advanced_1186_td -localhost - -@advanced_1187_td -The server name or IP address. - -@advanced_1188_td -By default, only remote connections are allowed - -@advanced_1189_td -Username - -@advanced_1190_td -sa - -@advanced_1191_td -The database user name. - -@advanced_1192_td -SSL - -@advanced_1193_td -false (disabled) - -@advanced_1194_td -At this time, SSL is not supported. - -@advanced_1195_td -Port - -@advanced_1196_td -5435 - -@advanced_1197_td -The port where the PG Server is listening. - -@advanced_1198_td -Password - -@advanced_1199_td -sa - -@advanced_1200_td -The database password. - -@advanced_1201_p - To improve performance, please enable 'server side prepare' under Options / Datasource / Page 2 / Server side prepare. - -@advanced_1202_p - Afterwards, you may use this data source. - -@advanced_1203_h3 -PG Protocol Support Limitations - -@advanced_1204_p - At this time, only a subset of the PostgreSQL network protocol is implemented. Also, there may be compatibility problems on the SQL level, with the catalog, or with text encoding. Problems are fixed as they are found. Currently, statements can not be canceled when using the PG protocol. Also, H2 does not provide index meta over ODBC. - -@advanced_1205_p - PostgreSQL ODBC Driver Setup requires a database password; that means it is not possible to connect to H2 databases without password. This is a limitation of the ODBC driver. - -@advanced_1206_h3 -Security Considerations - -@advanced_1207_p - Currently, the PG Server does not support challenge response or encrypt passwords. This may be a problem if an attacker can listen to the data transferred between the ODBC driver and the server, because the password is readable to the attacker. Also, it is currently not possible to use encrypted SSL connections. Therefore the ODBC driver should not be used where security is important. - -@advanced_1208_p - The first connection that opens a database using the PostgreSQL server needs to be an administrator user. Subsequent connections don't need to be opened by an administrator. - -@advanced_1209_h3 -Using Microsoft Access - -@advanced_1210_p - When using Microsoft Access to edit data in a linked H2 table, you may need to enable the following option: Tools - Options - Edit/Find - ODBC fields. - -@advanced_1211_h2 -Using H2 in Microsoft .NET - -@advanced_1212_p - The database can be used from Microsoft .NET even without using Java, by using IKVM.NET. You can access a H2 database on .NET using the JDBC API, or using the ADO.NET interface. - -@advanced_1213_h3 -Using the ADO.NET API on .NET - -@advanced_1214_p - An implementation of the ADO.NET interface is available in the open source project H2Sharp. - -@advanced_1215_h3 -Using the JDBC API on .NET - -@advanced_1216_li -Install the .NET Framework from Microsoft. Mono has not yet been tested. - -@advanced_1217_li -Install IKVM.NET. - -@advanced_1218_li -Copy the h2*.jar file to ikvm/bin - -@advanced_1219_li -Run the H2 Console using: ikvm -jar h2*.jar - -@advanced_1220_li -Convert the H2 Console to an .exe file using: ikvmc -target:winexe h2*.jar. You may ignore the warnings. - -@advanced_1221_li -Create a .dll file using (change the version accordingly): ikvmc.exe -target:library -version:1.0.69.0 h2*.jar - -@advanced_1222_p - If you want your C# application use H2, you need to add the h2.dll and the IKVM.OpenJDK.ClassLibrary.dll to your C# solution. Here some sample code: - -@advanced_1223_h2 -ACID - -@advanced_1224_p - In the database world, ACID stands for: - -@advanced_1225_li -Atomicity: transactions must be atomic, meaning either all tasks are performed or none. - -@advanced_1226_li -Consistency: all operations must comply with the defined constraints. - -@advanced_1227_li -Isolation: transactions must be isolated from each other. - -@advanced_1228_li -Durability: committed transaction will not be lost. - -@advanced_1229_h3 -Atomicity - -@advanced_1230_p - Transactions in this database are always atomic. - -@advanced_1231_h3 -Consistency - -@advanced_1232_p - By default, this database is always in a consistent state. Referential integrity rules are enforced except when explicitly disabled. - -@advanced_1233_h3 -Isolation - -@advanced_1234_p - For H2, as with most other database systems, the default isolation level is 'read committed'. This provides better performance, but also means that transactions are not completely isolated. H2 supports the transaction isolation levels 'serializable', 'read committed', and 'read uncommitted'. - -@advanced_1235_h3 -Durability - -@advanced_1236_p - This database does not guarantee that all committed transactions survive a power failure. Tests show that all databases sometimes lose transactions on power failure (for details, see below). Where losing transactions is not acceptable, a laptop or UPS (uninterruptible power supply) should be used. If durability is required for all possible cases of hardware failure, clustering should be used, such as the H2 clustering mode. - -@advanced_1237_h2 -Durability Problems - -@advanced_1238_p - Complete durability means all committed transaction survive a power failure. Some databases claim they can guarantee durability, but such claims are wrong. A durability test was run against H2, HSQLDB, PostgreSQL, and Derby. All of those databases sometimes lose committed transactions. The test is included in the H2 download, see org.h2.test.poweroff.Test. - -@advanced_1239_h3 -Ways to (Not) Achieve Durability - -@advanced_1240_p - Making sure that committed transactions are not lost is more complicated than it seems first. To guarantee complete durability, a database must ensure that the log record is on the hard drive before the commit call returns. To do that, databases use different methods. One is to use the 'synchronous write' file access mode. In Java, RandomAccessFile supports the modes rws and rwd: - -@advanced_1241_code -rwd - -@advanced_1242_li -: every update to the file's content is written synchronously to the underlying storage device. - -@advanced_1243_code -rws - -@advanced_1244_li -: in addition to rwd, every update to the metadata is written synchronously. - -@advanced_1245_p - A test (org.h2.test.poweroff.TestWrite) with one of those modes achieves around 50 thousand write operations per second. Even when the operating system write buffer is disabled, the write rate is around 50 thousand operations per second. This feature does not force changes to disk because it does not flush all buffers. The test updates the same byte in the file again and again. If the hard drive was able to write at this rate, then the disk would need to make at least 50 thousand revolutions per second, or 3 million RPM (revolutions per minute). There are no such hard drives. The hard drive used for the test is about 7200 RPM, or about 120 revolutions per second. There is an overhead, so the maximum write rate must be lower than that. - -@advanced_1246_p - Calling fsync flushes the buffers. There are two ways to do that in Java: - -@advanced_1247_code -FileDescriptor.sync() - -@advanced_1248_li -. The documentation says that this forces all system buffers to synchronize with the underlying device. This method is supposed to return after all in-memory modified copies of buffers associated with this file descriptor have been written to the physical medium. - -@advanced_1249_code -FileChannel.force() - -@advanced_1250_li -. This method is supposed to force any updates to this channel's file to be written to the storage device that contains it. - -@advanced_1251_p - By default, MySQL calls fsync for each commit. When using one of those methods, only around 60 write operations per second can be achieved, which is consistent with the RPM rate of the hard drive used. Unfortunately, even when calling FileDescriptor.sync() or FileChannel.force(), data is not always persisted to the hard drive, because most hard drives do not obey fsync(): see Your Hard Drive Lies to You. In Mac OS X, fsync does not flush hard drive buffers. See Bad fsync?. So the situation is confusing, and tests prove there is a problem. - -@advanced_1252_p - Trying to flush hard drive buffers is hard, and if you do the performance is very bad. First you need to make sure that the hard drive actually flushes all buffers. Tests show that this can not be done in a reliable way. Then the maximum number of transactions is around 60 per second. Because of those reasons, the default behavior of H2 is to delay writing committed transactions. - -@advanced_1253_p - In H2, after a power failure, a bit more than one second of committed transactions may be lost. To change the behavior, use SET WRITE_DELAY and CHECKPOINT SYNC. Most other databases support commit delay as well. In the performance comparison, commit delay was used for all databases that support it. - -@advanced_1254_h3 -Running the Durability Test - -@advanced_1255_p - To test the durability / non-durability of this and other databases, you can use the test application in the package org.h2.test.poweroff. Two computers with network connection are required to run this test. One computer just listens, while the test application is run (and power is cut) on the other computer. The computer with the listener application opens a TCP/IP port and listens for an incoming connection. The second computer first connects to the listener, and then created the databases and starts inserting records. The connection is set to 'autocommit', which means after each inserted record a commit is performed automatically. Afterwards, the test computer notifies the listener that this record was inserted successfully. The listener computer displays the last inserted record number every 10 seconds. Now, switch off the power manually, then restart the computer, and run the application again. You will find out that in most cases, none of the databases contains all the records that the listener computer knows about. For details, please consult the source code of the listener and test application. - -@advanced_1256_h2 -Using the Recover Tool - -@advanced_1257_p - The Recover tool can be used to extract the contents of a database file, even if the database is corrupted. It also extracts the content of the transaction log and large objects (CLOB or BLOB). To run the tool, type on the command line: - -@advanced_1258_p - For each database in the current directory, a text file will be created. This file contains raw insert statements (for the data) and data definition (DDL) statements to recreate the schema of the database. This file can be executed using the RunScript tool or a RUNSCRIPT FROM SQL statement. The script includes at least one CREATE USER statement. If you run the script against a database that was created with the same user, or if there are conflicting users, running the script will fail. Consider running the script against a database that was created with a user name that is not in the script. - -@advanced_1259_p - The Recover tool creates a SQL script from database file. It also processes the transaction log. - -@advanced_1260_p - To verify the database can recover at any time, append ;RECOVER_TEST=64 to the database URL in your test environment. This will simulate an application crash after each 64 writes to the database file. A log file named databaseName.h2.db.log is created that lists the operations. The recovery is tested using an in-memory file system, that means it may require a larger heap setting. - -@advanced_1261_h2 -File Locking Protocols - -@advanced_1262_p - Multiple concurrent connections to the same database are supported, however a database file can only be open for reading and writing (in embedded mode) by one process at the same time. Otherwise, the processes would overwrite each others data and corrupt the database file. To protect against this problem, whenever a database is opened, a lock file is created to signal other processes that the database is in use. If the database is closed, or if the process that opened the database stops normally, this lock file is deleted. - -@advanced_1263_p - In special cases (if the process did not terminate normally, for example because there was a power failure), the lock file is not deleted by the process that created it. That means the existence of the lock file is not a safe protocol for file locking. However, this software uses a challenge-response protocol to protect the database files. There are two methods (algorithms) implemented to provide both security (that is, the same database files cannot be opened by two processes at the same time) and simplicity (that is, the lock file does not need to be deleted manually by the user). The two methods are 'file method' and 'socket methods'. - -@advanced_1264_p - The file locking protocols (except the file locking method 'FS') have the following limitation: if a shared file system is used, and the machine with the lock owner is sent to sleep (standby or hibernate), another machine may take over. If the machine that originally held the lock wakes up, the database may become corrupt. If this situation can occur, the application must ensure the database is closed when the application is put to sleep. - -@advanced_1265_h3 -File Locking Method 'File' - -@advanced_1266_p - The default method for database file locking for version 1.3 and older is the 'File Method'. The algorithm is: - -@advanced_1267_li -If the lock file does not exist, it is created (using the atomic operation File.createNewFile). Then, the process waits a little bit (20 ms) and checks the file again. If the file was changed during this time, the operation is aborted. This protects against a race condition when one process deletes the lock file just after another one create it, and a third process creates the file again. It does not occur if there are only two writers. - -@advanced_1268_li - If the file can be created, a random number is inserted together with the locking method ('file'). Afterwards, a watchdog thread is started that checks regularly (every second once by default) if the file was deleted or modified by another (challenger) thread / process. Whenever that occurs, the file is overwritten with the old data. The watchdog thread runs with high priority so that a change to the lock file does not get through undetected even if the system is very busy. However, the watchdog thread does use very little resources (CPU time), because it waits most of the time. Also, the watchdog only reads from the hard disk and does not write to it. - -@advanced_1269_li - If the lock file exists and was recently modified, the process waits for some time (up to two seconds). If it was still changed, an exception is thrown (database is locked). This is done to eliminate race conditions with many concurrent writers. Afterwards, the file is overwritten with a new version (challenge). After that, the thread waits for 2 seconds. If there is a watchdog thread protecting the file, he will overwrite the change and this process will fail to lock the database. However, if there is no watchdog thread, the lock file will still be as written by this thread. In this case, the file is deleted and atomically created again. The watchdog thread is started in this case and the file is locked. - -@advanced_1270_p - This algorithm is tested with over 100 concurrent threads. In some cases, when there are many concurrent threads trying to lock the database, they block each other (meaning the file cannot be locked by any of them) for some time. However, the file never gets locked by two threads at the same time. However using that many concurrent threads / processes is not the common use case. Generally, an application should throw an error to the user if it cannot open a database, and not try again in a (fast) loop. - -@advanced_1271_h3 -File Locking Method 'Socket' - -@advanced_1272_p - There is a second locking mechanism implemented, but disabled by default. To use it, append ;FILE_LOCK=SOCKET to the database URL. The algorithm is: - -@advanced_1273_li -If the lock file does not exist, it is created. Then a server socket is opened on a defined port, and kept open. The port and IP address of the process that opened the database is written into the lock file. - -@advanced_1274_li -If the lock file exists, and the lock method is 'file', then the software switches to the 'file' method. - -@advanced_1275_li -If the lock file exists, and the lock method is 'socket', then the process checks if the port is in use. If the original process is still running, the port is in use and this process throws an exception (database is in use). If the original process died (for example due to a power failure, or abnormal termination of the virtual machine), then the port was released. The new process deletes the lock file and starts again. - -@advanced_1276_p - This method does not require a watchdog thread actively polling (reading) the same file every second. The problem with this method is, if the file is stored on a network share, two processes (running on different computers) could still open the same database files, if they do not have a direct TCP/IP connection. - -@advanced_1277_h3 -File Locking Method 'FS' - -@advanced_1278_p - This is the default mode for version 1.4 and newer. This database file locking mechanism uses native file system lock on the database file. No *.lock.db file is created in this case, and no background thread is started. This mechanism may not work on all systems as expected. Some systems allow to lock the same file multiple times within the same virtual machine, and on some system native file locking is not supported or files are not unlocked after a power failure. - -@advanced_1279_p - To enable this feature, append ;FILE_LOCK=FS to the database URL. - -@advanced_1280_p - This feature is relatively new. When using it for production, please ensure your system does in fact lock files as expected. - -@advanced_1281_h2 -Using Passwords - -@advanced_1282_h3 -Using Secure Passwords - -@advanced_1283_p - Remember that weak passwords can be broken regardless of the encryption and security protocols. Don't use passwords that can be found in a dictionary. Appending numbers does not make passwords secure. A way to create good passwords that can be remembered is: take the first letters of a sentence, use upper and lower case characters, and creatively include special characters (but it's more important to use a long password than to use special characters). Example: - -@advanced_1284_code -i'sE2rtPiUKtT - -@advanced_1285_p - from the sentence it's easy to remember this password if you know the trick. - -@advanced_1286_h3 -Passwords: Using Char Arrays instead of Strings - -@advanced_1287_p - Java strings are immutable objects and cannot be safely 'destroyed' by the application. After creating a string, it will remain in the main memory of the computer at least until it is garbage collected. The garbage collection cannot be controlled by the application, and even if it is garbage collected the data may still remain in memory. It might also be possible that the part of memory containing the password is swapped to disk (if not enough main memory is available), which is a problem if the attacker has access to the swap file of the operating system. - -@advanced_1288_p - It is a good idea to use char arrays instead of strings for passwords. Char arrays can be cleared (filled with zeros) after use, and therefore the password will not be stored in the swap file. - -@advanced_1289_p - This database supports using char arrays instead of string to pass user and file passwords. The following code can be used to do that: - -@advanced_1290_p - This example requires Java 1.6. When using Swing, use javax.swing.JPasswordField. - -@advanced_1291_h3 -Passing the User Name and/or Password in the URL - -@advanced_1292_p - Instead of passing the user name as a separate parameter as in Connection conn = DriverManager. getConnection("jdbc:h2:~/test", "sa", "123"); the user name (and/or password) can be supplied in the URL itself: Connection conn = DriverManager. getConnection("jdbc:h2:~/test;USER=sa;PASSWORD=123"); The settings in the URL override the settings passed as a separate parameter. - -@advanced_1293_h2 -Password Hash - -@advanced_1294_p - Sometimes the database password needs to be stored in a configuration file (for example in the web.xml file). In addition to connecting with the plain text password, this database supports connecting with the password hash. This means that only the hash of the password (and not the plain text password) needs to be stored in the configuration file. This will only protect others from reading or re-constructing the plain text password (even if they have access to the configuration file); it does not protect others from accessing the database using the password hash. - -@advanced_1295_p - To connect using the password hash instead of plain text password, append ;PASSWORD_HASH=TRUE to the database URL, and replace the password with the password hash. To calculate the password hash from a plain text password, run the following command within the H2 Console tool: @password_hash <upperCaseUserName> <password>. As an example, if the user name is sa and the password is test, run the command @password_hash SA test. Then use the resulting password hash as you would use the plain text password. When using an encrypted database, then the user password and file password need to be hashed separately. To calculate the hash of the file password, run: @password_hash file <filePassword>. - -@advanced_1296_h2 -Protection against SQL Injection - -@advanced_1297_h3 -What is SQL Injection - -@advanced_1298_p - This database engine provides a solution for the security vulnerability known as 'SQL Injection'. Here is a short description of what SQL injection means. Some applications build SQL statements with embedded user input such as: - -@advanced_1299_p - If this mechanism is used anywhere in the application, and user input is not correctly filtered or encoded, it is possible for a user to inject SQL functionality or statements by using specially built input such as (in this example) this password: ' OR ''='. In this case the statement becomes: - -@advanced_1300_p - Which is always true no matter what the password stored in the database is. For more information about SQL Injection, see Glossary and Links. - -@advanced_1301_h3 -Disabling Literals - -@advanced_1302_p - SQL Injection is not possible if user input is not directly embedded in SQL statements. A simple solution for the problem above is to use a prepared statement: - -@advanced_1303_p - This database provides a way to enforce usage of parameters when passing user input to the database. This is done by disabling embedded literals in SQL statements. To do this, execute the statement: - -@advanced_1304_p - Afterwards, SQL statements with text and number literals are not allowed any more. That means, SQL statement of the form WHERE NAME='abc' or WHERE CustomerId=10 will fail. It is still possible to use prepared statements and parameters as described above. Also, it is still possible to generate SQL statements dynamically, and use the Statement API, as long as the SQL statements do not include literals. There is also a second mode where number literals are allowed: SET ALLOW_LITERALS NUMBERS. To allow all literals, execute SET ALLOW_LITERALS ALL (this is the default setting). Literals can only be enabled or disabled by an administrator. - -@advanced_1305_h3 -Using Constants - -@advanced_1306_p - Disabling literals also means disabling hard-coded 'constant' literals. This database supports defining constants using the CREATE CONSTANT command. Constants can be defined only when literals are enabled, but used even when literals are disabled. To avoid name clashes with column names, constants can be defined in other schemas: - -@advanced_1307_p - Even when literals are enabled, it is better to use constants instead of hard-coded number or text literals in queries or views. With constants, typos are found at compile time, the source code is easier to understand and change. - -@advanced_1308_h3 -Using the ZERO() Function - -@advanced_1309_p - It is not required to create a constant for the number 0 as there is already a built-in function ZERO(): - -@advanced_1310_h2 -Protection against Remote Access - -@advanced_1311_p - By default this database does not allow connections from other machines when starting the H2 Console, the TCP server, or the PG server. Remote access can be enabled using the command line options -webAllowOthers, -tcpAllowOthers, -pgAllowOthers. - -@advanced_1312_p - If you enable remote access using -tcpAllowOthers or -pgAllowOthers, please also consider using the options -baseDir, -ifExists, so that remote users can not create new databases or access existing databases with weak passwords. When using the option -baseDir, only databases within that directory may be accessed. Ensure the existing accessible databases are protected using strong passwords. - -@advanced_1313_p - If you enable remote access using -webAllowOthers, please ensure the web server can only be accessed from trusted networks. The options -baseDir, -ifExists don't protect access to the tools section, prevent remote shutdown of the web server, changes to the preferences, the saved connection settings, or access to other databases accessible from the system. - -@advanced_1314_h2 -Restricting Class Loading and Usage - -@advanced_1315_p - By default there is no restriction on loading classes and executing Java code for admins. That means an admin may call system functions such as System.setProperty by executing: - -@advanced_1316_p - To restrict users (including admins) from loading classes and executing code, the list of allowed classes can be set in the system property h2.allowedClasses in the form of a comma separated list of classes or patterns (items ending with *). By default all classes are allowed. Example: - -@advanced_1317_p - This mechanism is used for all user classes, including database event listeners, trigger classes, user-defined functions, user-defined aggregate functions, and JDBC driver classes (with the exception of the H2 driver) when using the H2 Console. - -@advanced_1318_h2 -Security Protocols - -@advanced_1319_p - The following paragraphs document the security protocols used in this database. These descriptions are very technical and only intended for security experts that already know the underlying security primitives. - -@advanced_1320_h3 -User Password Encryption - -@advanced_1321_p - When a user tries to connect to a database, the combination of user name, @, and password are hashed using SHA-256, and this hash value is transmitted to the database. This step does not protect against an attacker that re-uses the value if he is able to listen to the (unencrypted) transmission between the client and the server. But, the passwords are never transmitted as plain text, even when using an unencrypted connection between client and server. That means if a user reuses the same password for different things, this password is still protected up to some point. See also 'RFC 2617 - HTTP Authentication: Basic and Digest Access Authentication' for more information. - -@advanced_1322_p - When a new database or user is created, a new random salt value is generated. The size of the salt is 64 bits. Using the random salt reduces the risk of an attacker pre-calculating hash values for many different (commonly used) passwords. - -@advanced_1323_p - The combination of user-password hash value (see above) and salt is hashed using SHA-256. The resulting value is stored in the database. When a user tries to connect to the database, the database combines user-password hash value with the stored salt value and calculates the hash value. Other products use multiple iterations (hash the hash value again and again), but this is not done in this product to reduce the risk of denial of service attacks (where the attacker tries to connect with bogus passwords, and the server spends a lot of time calculating the hash value for each password). The reasoning is: if the attacker has access to the hashed passwords, he also has access to the data in plain text, and therefore does not need the password any more. If the data is protected by storing it on another computer and only accessible remotely, then the iteration count is not required at all. - -@advanced_1324_h3 -File Encryption - -@advanced_1325_p - The database files can be encrypted using the AES-128 algorithm. - -@advanced_1326_p - When a user tries to connect to an encrypted database, the combination of file@ and the file password is hashed using SHA-256. This hash value is transmitted to the server. - -@advanced_1327_p - When a new database file is created, a new cryptographically secure random salt value is generated. The size of the salt is 64 bits. The combination of the file password hash and the salt value is hashed 1024 times using SHA-256. The reason for the iteration is to make it harder for an attacker to calculate hash values for common passwords. - -@advanced_1328_p - The resulting hash value is used as the key for the block cipher algorithm. Then, an initialization vector (IV) key is calculated by hashing the key again using SHA-256. This is to make sure the IV is unknown to the attacker. The reason for using a secret IV is to protect against watermark attacks. - -@advanced_1329_p - Before saving a block of data (each block is 8 bytes long), the following operations are executed: first, the IV is calculated by encrypting the block number with the IV key (using the same block cipher algorithm). This IV is combined with the plain text using XOR. The resulting data is encrypted using the AES-128 algorithm. - -@advanced_1330_p - When decrypting, the operation is done in reverse. First, the block is decrypted using the key, and then the IV is calculated combined with the decrypted text using XOR. - -@advanced_1331_p - Therefore, the block cipher mode of operation is CBC (cipher-block chaining), but each chain is only one block long. The advantage over the ECB (electronic codebook) mode is that patterns in the data are not revealed, and the advantage over multi block CBC is that flipped cipher text bits are not propagated to flipped plaintext bits in the next block. - -@advanced_1332_p - Database encryption is meant for securing the database while it is not in use (stolen laptop and so on). It is not meant for cases where the attacker has access to files while the database is in use. When he has write access, he can for example replace pieces of files with pieces of older versions and manipulate data like this. - -@advanced_1333_p - File encryption slows down the performance of the database engine. Compared to unencrypted mode, database operations take about 2.5 times longer using AES (embedded mode). - -@advanced_1334_h3 -Wrong Password / User Name Delay - -@advanced_1335_p - To protect against remote brute force password attacks, the delay after each unsuccessful login gets double as long. Use the system properties h2.delayWrongPasswordMin and h2.delayWrongPasswordMax to change the minimum (the default is 250 milliseconds) or maximum delay (the default is 4000 milliseconds, or 4 seconds). The delay only applies for those using the wrong password. Normally there is no delay for a user that knows the correct password, with one exception: after using the wrong password, there is a delay of up to (randomly distributed) the same delay as for a wrong password. This is to protect against parallel brute force attacks, so that an attacker needs to wait for the whole delay. Delays are synchronized. This is also required to protect against parallel attacks. - -@advanced_1336_p - There is only one exception message for both wrong user and for wrong password, to make it harder to get the list of user names. It is not possible from the stack trace to see if the user name was wrong or the password. - -@advanced_1337_h3 -HTTPS Connections - -@advanced_1338_p - The web server supports HTTP and HTTPS connections using SSLServerSocket. There is a default self-certified certificate to support an easy starting point, but custom certificates are supported as well. - -@advanced_1339_h2 -TLS Connections - -@advanced_1340_p - Remote TLS connections are supported using the Java Secure Socket Extension (SSLServerSocket, SSLSocket). By default, anonymous TLS is enabled. - -@advanced_1341_p - To use your own keystore, set the system properties javax.net.ssl.keyStore and javax.net.ssl.keyStorePassword before starting the H2 server and client. See also Customizing the Default Key and Trust Stores, Store Types, and Store Passwords for more information. - -@advanced_1342_p - To disable anonymous TLS, set the system property h2.enableAnonymousTLS to false. - -@advanced_1343_h2 -Universally Unique Identifiers (UUID) - -@advanced_1344_p - This database supports UUIDs. Also supported is a function to create new UUIDs using a cryptographically strong pseudo random number generator. With random UUIDs, the chance of two having the same value can be calculated using the probability theory. See also 'Birthday Paradox'. Standardized randomly generated UUIDs have 122 random bits. 4 bits are used for the version (Randomly generated UUID), and 2 bits for the variant (Leach-Salz). This database supports generating such UUIDs using the built-in function RANDOM_UUID() or UUID(). Here is a small program to estimate the probability of having two identical UUIDs after generating a number of values: - -@advanced_1345_p - Some values are: - -@advanced_1346_th -Number of UUIs - -@advanced_1347_th -Probability of Duplicates - -@advanced_1348_td -2^36=68'719'476'736 - -@advanced_1349_td -0.000'000'000'000'000'4 - -@advanced_1350_td -2^41=2'199'023'255'552 - -@advanced_1351_td -0.000'000'000'000'4 - -@advanced_1352_td -2^46=70'368'744'177'664 - -@advanced_1353_td -0.000'000'000'4 - -@advanced_1354_p - To help non-mathematicians understand what those numbers mean, here a comparison: one's annual risk of being hit by a meteorite is estimated to be one chance in 17 billion, that means the probability is about 0.000'000'000'06. - -@advanced_1355_h2 -Spatial Features - -@advanced_1356_p - H2 supports the geometry data type and spatial indexes if the JTS Topology Suite is in the classpath. To run the H2 Console tool with the JTS tool, you need to download the JTS-CORE 1.14.0 jar file and place it in the h2 bin directory. Then edit the h2.sh file as follows: - -@advanced_1357_p - Here is an example SQL script to create a table with a spatial column and index: - -@advanced_1358_p - To query the table using geometry envelope intersection, use the operation &&, as in PostGIS: - -@advanced_1359_p - You can verify that the spatial index is used using the "explain plan" feature: - -@advanced_1360_p - For persistent databases, the spatial index is stored on disk; for in-memory databases, the index is kept in memory. - -@advanced_1361_h2 -Recursive Queries - -@advanced_1362_p - H2 has experimental support for recursive queries using so called "common table expressions" (CTE). Examples: - -@advanced_1363_p - Limitations: Recursive queries need to be of the type UNION ALL, and the recursion needs to be on the second part of the query. No tables or views with the name of the table expression may exist. Different table expression names need to be used when using multiple distinct table expressions within the same transaction and for the same session. All columns of the table expression are of type VARCHAR, and may need to be cast to the required data type. Views with recursive queries are not supported. Subqueries and INSERT INTO ... FROM with recursive queries are not supported. Parameters are only supported within the last SELECT statement (a workaround is to use session variables like @start within the table expression). The syntax is: - -@advanced_1364_h2 -Settings Read from System Properties - -@advanced_1365_p - Some settings of the database can be set on the command line using -DpropertyName=value. It is usually not required to change those settings manually. The settings are case sensitive. Example: - -@advanced_1366_p - The current value of the settings can be read in the table INFORMATION_SCHEMA.SETTINGS. - -@advanced_1367_p - For a complete list of settings, see SysProperties. - -@advanced_1368_h2 -Setting the Server Bind Address - -@advanced_1369_p - Usually server sockets accept connections on any/all local addresses. This may be a problem on multi-homed hosts. To bind only to one address, use the system property h2.bindAddress. This setting is used for both regular server sockets and for TLS server sockets. IPv4 and IPv6 address formats are supported. - -@advanced_1370_h2 -Pluggable File System - -@advanced_1371_p - This database supports a pluggable file system API. The file system implementation is selected using a file name prefix. Internally, the interfaces are very similar to the Java 7 NIO2 API, but do not (yet) use or require Java 7. The following file systems are included: - -@advanced_1372_code -zip: - -@advanced_1373_li - read-only zip-file based file system. Format: zip:/zipFileName!/fileName. - -@advanced_1374_code -split: - -@advanced_1375_li - file system that splits files in 1 GB files (stackable with other file systems). - -@advanced_1376_code -nio: - -@advanced_1377_li - file system that uses FileChannel instead of RandomAccessFile (faster in some operating systems). - -@advanced_1378_code -nioMapped: - -@advanced_1379_li - file system that uses memory mapped files (faster in some operating systems). Please note that there currently is a file size limitation of 2 GB when using this file system. To work around this limitation, combine it with the split file system: split:nioMapped:test. - -@advanced_1380_code -memFS: - -@advanced_1381_li - in-memory file system (slower than mem; experimental; mainly used for testing the database engine itself). - -@advanced_1382_code -memLZF: - -@advanced_1383_li - compressing in-memory file system (slower than memFS but uses less memory; experimental; mainly used for testing the database engine itself). - -@advanced_1384_code -nioMemFS: - -@advanced_1385_li - stores data outside of the VM's heap - useful for large memory DBs without incurring GC costs. - -@advanced_1386_code -nioMemLZF: - -@advanced_1387_li - stores compressed data outside of the VM's heap - useful for large memory DBs without incurring GC costs. Use "nioMemLZF:12:" to tweak the % of blocks that are stored uncompressed. If you size this to your working set correctly, compressed storage is roughly the same performance as uncompressed. The default value is 1%. - -@advanced_1388_p - As an example, to use the the nio file system, use the following database URL: jdbc:h2:nio:~/test. - -@advanced_1389_p - To register a new file system, extend the classes org.h2.store.fs.FilePath, FileBase, and call the method FilePath.register before using it. - -@advanced_1390_p - For input streams (but not for random access files), URLs may be used in addition to the registered file systems. Example: jar:file:///c:/temp/example.zip!/org/example/nested.csv. To read a stream from the classpath, use the prefix classpath:, as in classpath:/org/h2/samples/newsfeed.sql. - -@advanced_1391_h2 -Split File System - -@advanced_1392_p - The file system prefix split: is used to split logical files into multiple physical files, for example so that a database can get larger than the maximum file system size of the operating system. If the logical file is larger than the maximum file size, then the file is split as follows: - -@advanced_1393_code -<fileName> - -@advanced_1394_li - (first block, is always created) - -@advanced_1395_code -<fileName>.1.part - -@advanced_1396_li - (second block) - -@advanced_1397_p - More physical files (*.2.part, *.3.part) are automatically created / deleted if needed. The maximum physical file size of a block is 2^30 bytes, which is also called 1 GiB or 1 GB. However this can be changed if required, by specifying the block size in the file name. The file name format is: split:<x>:<fileName> where the file size per block is 2^x. For 1 MiB block sizes, use x = 20 (because 2^20 is 1 MiB). The following file name means the logical file is split into 1 MiB blocks: split:20:test.h2.db. An example database URL for this case is jdbc:h2:split:20:~/test. - -@advanced_1398_h2 -Database Upgrade - -@advanced_1399_p - In version 1.2, H2 introduced a new file store implementation which is incompatible to the one used in versions < 1.2. To automatically convert databases to the new file store, it is necessary to include an additional jar file. The file can be found at http://h2database.com/h2mig_pagestore_addon.jar . If this file is in the classpath, every connect to an older database will result in a conversion process. - -@advanced_1400_p - The conversion itself is done internally via 'script to' and 'runscript from'. After the conversion process, the files will be renamed from - -@advanced_1401_code -dbName.data.db - -@advanced_1402_li - to dbName.data.db.backup - -@advanced_1403_code -dbName.index.db - -@advanced_1404_li - to dbName.index.db.backup - -@advanced_1405_p - by default. Also, the temporary script will be written to the database directory instead of a temporary directory. Both defaults can be customized via - -@advanced_1406_code -org.h2.upgrade.DbUpgrade.setDeleteOldDb(boolean) - -@advanced_1407_code -org.h2.upgrade.DbUpgrade.setScriptInTmpDir(boolean) - -@advanced_1408_p - prior opening a database connection. - -@advanced_1409_p - Since version 1.2.140 it is possible to let the old h2 classes (v 1.2.128) connect to the database. The automatic upgrade .jar file must be present, and the URL must start with jdbc:h2v1_1: (the JDBC driver class is org.h2.upgrade.v1_1.Driver). If the database should automatically connect using the old version if a database with the old format exists (without upgrade), and use the new version otherwise, then append ;NO_UPGRADE=TRUE to the database URL. Please note the old driver did not process the system property "h2.baseDir" correctly, so that using this setting is not supported when upgrading. - -@advanced_1410_h2 -Java Objects Serialization - -@advanced_1411_p - Java objects serialization is enabled by default for columns of type OTHER, using standard Java serialization/deserialization semantics. - -@advanced_1412_p - To disable this feature set the system property h2.serializeJavaObject=false (default: true). - -@advanced_1413_p - Serialization and deserialization of java objects is customizable both at system level and at database level providing a JavaObjectSerializer implementation: - -@advanced_1414_li - At system level set the system property h2.javaObjectSerializer with the Fully Qualified Name of the JavaObjectSerializer interface implementation. It will be used over the entire JVM session to (de)serialize java objects being stored in column of type OTHER. Example h2.javaObjectSerializer=com.acme.SerializerClassName. - -@advanced_1415_li - At database level execute the SQL statement SET JAVA_OBJECT_SERIALIZER 'com.acme.SerializerClassName' or append ;JAVA_OBJECT_SERIALIZER='com.acme.SerializerClassName' to the database URL: jdbc:h2:~/test;JAVA_OBJECT_SERIALIZER='com.acme.SerializerClassName'. - -@advanced_1416_p - Please note that this SQL statement can only be executed before any tables are defined. - -@advanced_1417_h2 -Custom Data Types Handler API - -@advanced_1418_p - It is possible to extend the type system of the database by providing your own implementation of minimal required API basically consisting of type identification and conversion routines. - -@advanced_1419_p - In order to enable this feature, set the system property h2.customDataTypesHandler (default: null) to the fully qualified name of the class providing CustomDataTypesHandler interface implementation. - -@advanced_1420_p - The instance of that class will be created by H2 and used to: - -@advanced_1421_li -resolve the names and identifiers of extrinsic data types. - -@advanced_1422_li -convert values of extrinsic data types to and from values of built-in types. - -@advanced_1423_li -provide order of the data types. - -@advanced_1424_p -This is a system-level setting, i.e. affects all the databases. - -@advanced_1425_b -Note: - -@advanced_1426_p -Please keep in mind that this feature may not possibly provide the same ABI stability level as other features as it exposes many of the H2 internals. You may be required to update your code occasionally due to internal changes in H2 if you are going to use this feature. - -@advanced_1427_h2 -Limits and Limitations - -@advanced_1428_p - This database has the following known limitations: - -@advanced_1429_li -Database file size limit: 4 TB (using the default page size of 2 KB) or higher (when using a larger page size). This limit is including CLOB and BLOB data. - -@advanced_1430_li -The maximum file size for FAT or FAT32 file systems is 4 GB. That means when using FAT or FAT32, the limit is 4 GB for the data. This is the limitation of the file system. The database does provide a workaround for this problem, it is to use the file name prefix split:. In that case files are split into files of 1 GB by default. An example database URL is: jdbc:h2:split:~/test. - -@advanced_1431_li -The maximum number of rows per table is 2^64. - -@advanced_1432_li -The maximum number of open transactions is 65535. - -@advanced_1433_li -Main memory requirements: The larger the database, the more main memory is required. With the current storage mechanism (the page store), the minimum main memory required is around 1 MB for each 8 GB database file size. - -@advanced_1434_li -Limit on the complexity of SQL statements. Statements of the following form will result in a stack overflow exception: - -@advanced_1435_li -There is no limit for the following entities, except the memory and storage capacity: maximum identifier length (table name, column name, and so on); maximum number of tables, columns, indexes, triggers, and other database objects; maximum statement length, number of parameters per statement, tables per statement, expressions in order by, group by, having, and so on; maximum rows per query; maximum columns per table, columns per index, indexes per table, lob columns per table, and so on; maximum row length, index row length, select row length; maximum length of a varchar column, decimal column, literal in a statement. - -@advanced_1436_li -Querying from the metadata tables is slow if there are many tables (thousands). - -@advanced_1437_li -For limitations on data types, see the documentation of the respective Java data type or the data type documentation of this database. - -@advanced_1438_h2 -Glossary and Links - -@advanced_1439_th -Term - -@advanced_1440_th -Description - -@advanced_1441_td -AES-128 - -@advanced_1442_td -A block encryption algorithm. See also: Wikipedia: AES - -@advanced_1443_td -Birthday Paradox - -@advanced_1444_td -Describes the higher than expected probability that two persons in a room have the same birthday. Also valid for randomly generated UUIDs. See also: Wikipedia: Birthday Paradox - -@advanced_1445_td -Digest - -@advanced_1446_td -Protocol to protect a password (but not to protect data). See also: RFC 2617: HTTP Digest Access Authentication - -@advanced_1447_td -GCJ - -@advanced_1448_td -Compiler for Java. GNU Compiler for the Java and NativeJ (commercial) - -@advanced_1449_td -HTTPS - -@advanced_1450_td -A protocol to provide security to HTTP connections. See also: RFC 2818: HTTP Over TLS - -@advanced_1451_td -Modes of Operation - -@advanced_1452_a -Wikipedia: Block cipher modes of operation - -@advanced_1453_td -Salt - -@advanced_1454_td -Random number to increase the security of passwords. See also: Wikipedia: Key derivation function - -@advanced_1455_td -SHA-256 - -@advanced_1456_td -A cryptographic one-way hash function. See also: Wikipedia: SHA hash functions - -@advanced_1457_td -SQL Injection - -@advanced_1458_td -A security vulnerability where an application embeds SQL statements or expressions in user input. See also: Wikipedia: SQL Injection - -@advanced_1459_td -Watermark Attack - -@advanced_1460_td -Security problem of certain encryption programs where the existence of certain data can be proven without decrypting. For more information, search in the internet for 'watermark attack cryptoloop' - -@advanced_1461_td -SSL/TLS - -@advanced_1462_td -Secure Sockets Layer / Transport Layer Security. See also: Java Secure Socket Extension (JSSE) - -@architecture_1000_h1 -Architecture - -@architecture_1001_a - Introduction - -@architecture_1002_a - Top-down overview - -@architecture_1003_a - JDBC driver - -@architecture_1004_a - Connection/session management - -@architecture_1005_a - Command execution and planning - -@architecture_1006_a - Table/index/constraints - -@architecture_1007_a - Undo log, redo log, and transactions layer - -@architecture_1008_a - B-tree engine and page-based storage allocation - -@architecture_1009_a - Filesystem abstraction - -@architecture_1010_h2 -Introduction - -@architecture_1011_p - H2 implements an embedded and standalone ANSI-SQL89 compliant SQL engine on top of a B-tree based disk store. - -@architecture_1012_p - As of October 2013, Thomas is still working on our next-generation storage engine called MVStore. This will in time replace the B-tree based storage engine. - -@architecture_1013_h2 -Top-down Overview - -@architecture_1014_p - Working from the top down, the layers look like this: - -@architecture_1015_li -JDBC driver. - -@architecture_1016_li -Connection/session management. - -@architecture_1017_li -SQL Parser. - -@architecture_1018_li -Command execution and planning. - -@architecture_1019_li -Table/Index/Constraints. - -@architecture_1020_li -Undo log, redo log, and transactions layer. - -@architecture_1021_li -B-tree engine and page-based storage allocation. - -@architecture_1022_li -Filesystem abstraction. - -@architecture_1023_h2 -JDBC Driver - -@architecture_1024_p - The JDBC driver implementation lives in org.h2.jdbc, org.h2.jdbcx - -@architecture_1025_h2 -Connection/session management - -@architecture_1026_p - The primary classes of interest are: - -@architecture_1027_th -Package - -@architecture_1028_th -Description - -@architecture_1029_td -org.h2.engine.Database - -@architecture_1030_td -the root/global class - -@architecture_1031_td -org.h2.engine.SessionInterface - -@architecture_1032_td -abstracts over the differences between embedded and remote sessions - -@architecture_1033_td -org.h2.engine.Session - -@architecture_1034_td -local/embedded session - -@architecture_1035_td -org.h2.engine.SessionRemote - -@architecture_1036_td -remote session - -@architecture_1037_h2 -Parser - -@architecture_1038_p - The parser lives in org.h2.command.Parser. It uses a straightforward recursive-descent design. - -@architecture_1039_p - See Wikipedia Recursive-descent parser page. - -@architecture_1040_h2 -Command execution and planning - -@architecture_1041_p - Unlike other databases, we do not have an intermediate step where we generate some kind of IR (intermediate representation) of the query. The parser class directly generates a command execution object. Then we run some optimisation steps over the command to possibly generate a more efficient command. The primary packages of interest are: - -@architecture_1042_th -Package - -@architecture_1043_th -Description - -@architecture_1044_td -org.h2.command.ddl - -@architecture_1045_td -Commands that modify schema data structures - -@architecture_1046_td -org.h2.command.dml - -@architecture_1047_td -Commands that modify data - -@architecture_1048_h2 -Table/Index/Constraints - -@architecture_1049_p - One thing to note here is that indexes are simply stored as special kinds of tables. - -@architecture_1050_p - The primary packages of interest are: - -@architecture_1051_th -Package - -@architecture_1052_th -Description - -@architecture_1053_td -org.h2.table - -@architecture_1054_td -Implementations of different kinds of tables - -@architecture_1055_td -org.h2.index - -@architecture_1056_td -Implementations of different kinds of indices - -@architecture_1057_h2 -Undo log, redo log, and transactions layer - -@architecture_1058_p - We have a transaction log, which is shared among all sessions. See also http://en.wikipedia.org/wiki/Transaction_log http://h2database.com/html/grammar.html#set_log - -@architecture_1059_p - We also have an undo log, which is per session, to undo an operation (an update that fails for example) and to rollback a transaction. Theoretically, the transaction log could be used, but for simplicity, H2 currently uses it's own "list of operations" (usually in-memory). - -@architecture_1060_p - With the MVStore, this is no longer needed (just the transaction log). - -@architecture_1061_h2 -B-tree engine and page-based storage allocation. - -@architecture_1062_p - The primary package of interest is org.h2.store. - -@architecture_1063_p - This implements a storage mechanism which allocates pages of storage (typically 2k in size) and also implements a b-tree over those pages to allow fast retrieval and update. - -@architecture_1064_h2 -Filesystem abstraction. - -@architecture_1065_p - The primary class of interest is org.h2.store.FileStore. - -@architecture_1066_p - This implements an abstraction of a random-access file. This allows the higher layers to treat in-memory vs. on-disk vs. zip-file databases the same. - -@build_1000_h1 -Build - -@build_1001_a - Portability - -@build_1002_a - Environment - -@build_1003_a - Building the Software - -@build_1004_a - Build Targets - -@build_1005_a - Using Maven 2 - -@build_1006_a - Using Eclipse - -@build_1007_a - Translating - -@build_1008_a - Submitting Source Code Changes - -@build_1009_a - Reporting Problems or Requests - -@build_1010_a - Automated Build - -@build_1011_a - Generating Railroad Diagrams - -@build_1012_h2 -Portability - -@build_1013_p - This database is written in Java and therefore works on many platforms. It can also be compiled to a native executable using GCJ. - -@build_1014_h2 -Environment - -@build_1015_p - To run this database, a Java Runtime Environment (JRE) version 1.7 or higher is required. - -@build_1016_p - To create the database executables, the following software stack was used. To use this database, it is not required to install this software however. - -@build_1017_li -Mac OS X and Windows - -@build_1018_a -Oracle JDK Version 1.7 - -@build_1019_a -Eclipse - -@build_1020_li -Eclipse Plugins: Subclipse, Eclipse Checkstyle Plug-in, EclEmma Java Code Coverage - -@build_1021_a -Emma Java Code Coverage - -@build_1022_a -Mozilla Firefox - -@build_1023_a -OpenOffice - -@build_1024_a -NSIS - -@build_1025_li - (Nullsoft Scriptable Install System) - -@build_1026_a -Maven - -@build_1027_h2 -Building the Software - -@build_1028_p - You need to install a JDK, for example the Oracle JDK version 1.7 or 1.8. Ensure that Java binary directory is included in the PATH environment variable, and that the environment variable JAVA_HOME points to your Java installation. On the command line, go to the directory h2 and execute the following command: - -@build_1029_p - For Linux and OS X, use ./build.sh instead of build. - -@build_1030_p - You will get a list of targets. If you want to build the jar file, execute (Windows): - -@build_1031_p - To run the build tool in shell mode, use the command line option - as in ./build.sh -. - -@build_1032_h3 -Switching the Source Code - -@build_1033_p - The source code uses Java 1.7 features. To switch the source code to the installed version of Java, run: - -@build_1034_h2 -Build Targets - -@build_1035_p - The build system can generate smaller jar files as well. The following targets are currently supported: - -@build_1036_code -jarClient - -@build_1037_li - creates the file h2client.jar. This only contains the JDBC client. - -@build_1038_code -jarSmall - -@build_1039_li - creates the file h2small.jar. This only contains the embedded database. Debug information is disabled. - -@build_1040_code -jarJaqu - -@build_1041_li - creates the file h2jaqu.jar. This only contains the JaQu (Java Query) implementation. All other jar files do not include JaQu. - -@build_1042_code -javadocImpl - -@build_1043_li - creates the Javadocs of the implementation. - -@build_1044_p - To create the file h2client.jar, go to the directory h2 and execute the following command: - -@build_1045_h3 -Using Apache Lucene - -@build_1046_p - Apache Lucene 3.6.2 is used for testing. Newer versions may work, however they are not tested. - -@build_1047_h2 -Using Maven 2 - -@build_1048_h3 -Using a Central Repository - -@build_1049_p - You can include the database in your Maven 2 project as a dependency. Example: - -@build_1050_p - New versions of this database are first uploaded to http://hsql.sourceforge.net/m2-repo/ and then automatically synchronized with the main Maven repository; however after a new release it may take a few hours before they are available there. - -@build_1051_h3 -Maven Plugin to Start and Stop the TCP Server - -@build_1052_p - A Maven plugin to start and stop the H2 TCP server is available from Laird Nelson at GitHub. To start the H2 server, use: - -@build_1053_p - To stop the H2 server, use: - -@build_1054_h3 -Using Snapshot Version - -@build_1055_p - To build a h2-*-SNAPSHOT.jar file and upload it the to the local Maven 2 repository, execute the following command: - -@build_1056_p - Afterwards, you can include the database in your Maven 2 project as a dependency: - -@build_1057_h2 -Using Eclipse - -@build_1058_p - To create an Eclipse project for H2, use the following steps: - -@build_1059_li -Install Git and Eclipse. - -@build_1060_li -Get the H2 source code from Github: - -@build_1061_code -git clone https://github.com/h2database/h2database - -@build_1062_li -Download all dependencies: - -@build_1063_code -build.bat download - -@build_1064_li -(Windows) - -@build_1065_code -./build.sh download - -@build_1066_li -(otherwise) - -@build_1067_li -In Eclipse, create a new Java project from existing source code: File, New, Project, Java Project, Create project from existing source. - -@build_1068_li -Select the h2 folder, click Next and Finish. - -@build_1069_li -To resolve com.sun.javadoc import statements, you may need to manually add the file <java.home>/../lib/tools.jar to the build path. - -@build_1070_h2 -Translating - -@build_1071_p - The translation of this software is split into the following parts: - -@build_1072_li -H2 Console: src/main/org/h2/server/web/res/_text_*.prop - -@build_1073_li -Error messages: src/main/org/h2/res/_messages_*.prop - -@build_1074_p - To translate the H2 Console, start it and select Preferences / Translate. After you are done, send the translated *.prop file to the Google Group. The web site is currently translated using Google. - -@build_1075_h2 -Submitting Source Code Changes - -@build_1076_p - If you'd like to contribute bug fixes or new features, please consider the following guidelines to simplify merging them: - -@build_1077_li -Only use Java 7 features (do not use Java 8/9/etc) (see Environment). - -@build_1078_li -Follow the coding style used in the project, and use Checkstyle (see above) to verify. For example, do not use tabs (use spaces instead). The checkstyle configuration is in src/installer/checkstyle.xml. - -@build_1079_li -A template of the Eclipse settings are in src/installer/eclipse.settings/*. If you want to use them, you need to copy them to the .settings directory. The formatting options (eclipseCodeStyle) are also included. - -@build_1080_li -Please provide test cases and integrate them into the test suite. For Java level tests, see src/test/org/h2/test/TestAll.java. For SQL level tests, see src/test/org/h2/test/test.in.txt or testSimple.in.txt. - -@build_1081_li -The test cases should cover at least 90% of the changed and new code; use a code coverage tool to verify that (see above). or use the build target coverage. - -@build_1082_li -Verify that you did not break other features: run the test cases by executing build test. - -@build_1083_li -Provide end user documentation if required (src/docsrc/html/*). - -@build_1084_li -Document grammar changes in src/docsrc/help/help.csv - -@build_1085_li -Provide a change log entry (src/docsrc/html/changelog.html). - -@build_1086_li -Verify the spelling using build spellcheck. If required add the new words to src/tools/org/h2/build/doc/dictionary.txt. - -@build_1087_li -Run src/installer/buildRelease to find and fix formatting errors. - -@build_1088_li -Verify the formatting using build docs and build javadoc. - -@build_1089_li -Submit changes using GitHub's "pull requests". You'll require a free GitHub account. If you are not familiar with pull requests, please read GitHub's Using pull requests page. - -@build_1090_p - For legal reasons, patches need to be public in the form of an issue report or attachment or in the form of an email to the group. Significant contributions need to include the following statement: - -@build_1091_p - "I wrote the code, it's mine, and I'm contributing it to H2 for distribution multiple-licensed under the MPL 2.0, and the EPL 1.0 (http://h2database.com/html/license.html)." - -@build_1092_h2 -Reporting Problems or Requests - -@build_1093_p - Please consider the following checklist if you have a question, want to report a problem, or if you have a feature request: - -@build_1094_li -For bug reports, please provide a short, self contained, correct (compilable), example of the problem. - -@build_1095_li -Feature requests are always welcome, even if the feature is already on the roadmap. Your mail will help prioritize feature requests. If you urgently need a feature, consider providing a patch. - -@build_1096_li -Before posting problems, check the FAQ and do a Google search. - -@build_1097_li -When got an unexpected exception, please try the Error Analyzer tool. If this doesn't help, please report the problem, including the complete error message and stack trace, and the root cause stack trace(s). - -@build_1098_li -When sending source code, please use a public web clipboard such as Pastebin, Cl1p, or Mystic Paste to avoid formatting problems. Please keep test cases as simple and short as possible, but so that the problem can still be reproduced. As a template, use: HelloWorld.java. Method that simply call other methods should be avoided, as well as unnecessary exception handling. Please use the JDBC API and no external tools or libraries. The test should include all required initialization code, and should be started with the main method. - -@build_1099_li -For large attachments, use a public temporary storage such as Rapidshare. - -@build_1100_li -Google Group versus issue tracking: Use the Google Group for questions or if you are not sure it's a bug. If you are sure it's a bug, you can create an issue, but you don't need to (sending an email to the group is enough). Please note that only few people monitor the issue tracking system. - -@build_1101_li -For out-of-memory problems, please analyze the problem yourself first, for example using the command line option -XX:+HeapDumpOnOutOfMemoryError (to create a heap dump file on out of memory) and a memory analysis tool such as the Eclipse Memory Analyzer (MAT). - -@build_1102_li -It may take a few days to get an answers. Please do not double post. - -@build_1103_h2 -Automated Build - -@build_1104_p - This build process is automated and runs regularly. The build process includes running the tests and code coverage, using the command line ./build.sh clean jar coverage -Dh2.ftpPassword=... uploadBuild. The last results are available here: - -@build_1105_a -Test Output - -@build_1106_a -Code Coverage Summary - -@build_1107_a -Code Coverage Details (download, 1.3 MB) - -@build_1108_a -Build Newsfeed - -@build_1109_h2 -Generating Railroad Diagrams - -@build_1110_p - The railroad diagrams of the SQL grammar are HTML, formatted as nested tables. The diagrams are generated as follows: - -@build_1111_li -The BNF parser (org.h2.bnf.Bnf) reads and parses the BNF from the file help.csv. - -@build_1112_li -The page parser (org.h2.server.web.PageParser) reads the template HTML file and fills in the diagrams. - -@build_1113_li -The rail images (one straight, four junctions, two turns) are generated using a simple Java application. - -@build_1114_p - To generate railroad diagrams for other grammars, see the package org.h2.jcr. This package is used to generate the SQL-2 railroad diagrams for the JCR 2.0 specification. - -@changelog_1000_h1 -Change Log - -@changelog_1001_h2 -Next Version (unreleased) - -@changelog_1002_li -Issue #654: List ENUM type values in INFORMATION_SCHEMA.COLUMNS - -@changelog_1003_li -Issue #668: Fail of an update command on large table with ENUM column - -@changelog_1004_li -Issue #662: column called CONSTRAINT is not properly escaped when storing to metadata - -@changelog_1005_li -Issue #660: Outdated java version mentioned on http://h2database.com/html/build.html#providing_patches - -@changelog_1006_li -Issue #643: H2 doesn't use index when I use IN and EQUAL in one query - -@changelog_1007_li -Reset transaction start timestamp on ROLLBACK - -@changelog_1008_li -Issue #632: CREATE OR REPLACE VIEW creates incorrect columns names - -@changelog_1009_li -Issue #630: Integer overflow in CacheLRU can cause unrestricted cache growth - -@changelog_1010_li -Issue #497: Fix TO_DATE in cases of 'inline' text. E.g. the "T" and "Z" in to_date('2017-04-21T00:00:00Z', 'YYYY-MM-DD"T"HH24:MI:SS"Z"') - -@changelog_1011_li -Fix bug in MySQL/ORACLE-syntax silently corrupting the modified column in cases of setting the 'NULL'- or 'NOT NULL'-constraint. E.g. alter table T modify C NULL; - -@changelog_1012_li -Issue #570: MySQL compatibility for ALTER TABLE .. DROP INDEX - -@changelog_1013_li -Issue #537: Include the COLUMN name in message "Numeric value out of range" - -@changelog_1014_li -Issue #600: ROW_NUMBER() behaviour change in H2 1.4.195 - -@changelog_1015_li -Fix a bunch of race conditions found by vmlens.com, thank you to vmlens for giving us a license. - -@changelog_1016_li -PR #597: Support more types in getObject - -@changelog_1017_li -Issue #591: Generated SQL from WITH-CTEs does not include a table identifier - -@changelog_1018_li -PR #593: Make it possible to create a cluster without using temporary files. - -@changelog_1019_li -PR #592: "Connection is broken: "unexpected status 16777216" [90067-192]" message when using older h2 releases as client - -@changelog_1020_li -Issue #585: MySQL mode DELETE statements compatibility - -@changelog_1021_li -PR #586: remove extra tx preparation - -@changelog_1022_li -PR #568: Implement MetaData.getColumns() for synonyms. - -@changelog_1023_li -Issue #581: org.h2.tools.RunScript assumes -script parameter is part of protocol - -@changelog_1024_li -Fix a deadlock in the TransactionStore - -@changelog_1025_li -PR #579: Disallow BLOB type in PostgreSQL mode - -@changelog_1026_li -Issue #576: Common Table Expression (CTE): WITH supports INSERT, UPDATE, MERGE, DELETE, CREATE TABLE ... - -@changelog_1027_li -Issue #493: Query with distinct/limit/offset subquery returns unexpected rows - -@changelog_1028_li -Issue #575: Support for full text search in multithreaded mode - -@changelog_1029_li -Issue #569: ClassCastException when filtering on ENUM value in WHERE clause - -@changelog_1030_li -Issue #539: Allow override of builtin functions/aliases - -@changelog_1031_li -Issue #535: Allow explicit paths on Windows without drive letter - -@changelog_1032_li -Issue #549: Removed UNION ALL requirements for CTE - -@changelog_1033_li -Issue #548: Table synonym support - -@changelog_1034_li -Issue #531: Rollback and delayed meta save. - -@changelog_1035_li -Issue #515: "Unique index or primary key violation" in TestMvccMultiThreaded - -@changelog_1036_li -Issue #458: TIMESTAMPDIFF() test failing. Handling of timestamp literals. - -@changelog_1037_li -PR #546: Fixes the missing file tree.js in the web console - -@changelog_1038_li -Issue #543: Prepare statement with regexp will not refresh parameter after metadata change - -@changelog_1039_li -PR #536: Support TIMESTAMP_WITH_TIMEZONE 2014 JDBC type - -@changelog_1040_li -Fix bug in parsing ANALYZE TABLE xxx SAMPLE_SIZE yyy - -@changelog_1041_li -Add padding for CHAR(N) values in PostgreSQL mode - -@changelog_1042_li -Issue #89: Add DB2 timestamp format compatibility - -@changelog_1043_h2 -Version 1.4.196 (2017-06-10) - -@changelog_1044_li -Issue#479 Allow non-recursive CTEs (WITH statements), patch from stumc - -@changelog_1045_li -Fix startup issue when using "CHECK" as a column name. - -@changelog_1046_li -Issue #423: ANALYZE performed multiple times on one table during execution of the same statement. - -@changelog_1047_li -Issue #426: Support ANALYZE TABLE statement - -@changelog_1048_li -Issue #438: Fix slow logging via SLF4J (TRACE_LEVEL_FILE=4). - -@changelog_1049_li -Issue #472: Support CREATE SEQUENCE ... ORDER as a NOOP for Oracle compatibility - -@changelog_1050_li -Issue #479: Allow non-recursive Common Table Expressions (CTE) - -@changelog_1051_li -On Mac OS X, with IPv6 and no network connection, the Console tool was not working as expected. - -@changelog_1052_h2 -Version 1.4.195 (2017-04-23) - -@changelog_1053_li -Lazy query execution support. - -@changelog_1054_li -Added API for handling custom data types (System property "h2.customDataTypesHandler", API org.h2.api.CustomDataTypesHandler). - -@changelog_1055_li -Added support for invisible columns. - -@changelog_1056_li -Added an ENUM data type, with syntax similar to that of MySQL. - -@changelog_1057_li -MVStore: for object data types, the cache size memory estimation was sometimes far off in a read-only scenario. This could result in inefficient cache usage. - -@changelog_1058_h2 -Version 1.4.194 (2017-03-10) - -@changelog_1059_li -Issue #453: MVStore setCacheSize() should also limit the cacheChunkRef. - -@changelog_1060_li -Issue #448: Newly added TO_DATE and TO_TIMESTAMP functions have wrong datatype. - -@changelog_1061_li -The "nioMemLZF" filesystem now supports an extra option "nioMemLZF:12:" to tweak the size of the compress later cache. - -@changelog_1062_li -Various multi-threading fixes and optimisations to the "nioMemLZF" filesystem. - -@changelog_1063_strong -[API CHANGE] #439: the JDBC type of TIMESTAMP WITH TIME ZONE changed from Types.OTHER (1111) to Types.TIMESTAMP_WITH_TIMEZONE (2014) - -@changelog_1064_li -#430: Subquery not cached if number of rows exceeds MAX_MEMORY_ROWS. - -@changelog_1065_li -#411: "TIMEZONE" should be "TIME ZONE" in type "TIMESTAMP WITH TIMEZONE". - -@changelog_1066_li -PR #418, Implement Connection#createArrayOf and PreparedStatement#setArray. - -@changelog_1067_li -PR #427, Add MySQL compatibility functions UNIX_TIMESTAMP, FROM_UNIXTIME and DATE. - -@changelog_1068_li -#429: Tables not found : Fix some Turkish locale bugs around uppercasing. - -@changelog_1069_li -Fixed bug in metadata locking, obscure combination of DDL and SELECT SEQUENCE.NEXTVAL required. - -@changelog_1070_li -Added index hints: SELECT * FROM TEST USE INDEX (idx1, idx2). - -@changelog_1071_li -Add a test case to ensure that spatial index is used with and order by command by Fortin N. - -@changelog_1072_li -Fix multi-threaded mode update exception "NullPointerException", test case by Anatolii K. - -@changelog_1073_li -Fix multi-threaded mode insert exception "Unique index or primary key violation", test case by Anatolii K. - -@changelog_1074_li -Implement ILIKE operator for case-insensitive matching. - -@changelog_1075_li -Optimise LIKE queries for the common cases of '%Foo' and '%Foo%'. - -@changelog_1076_li -Issue #387: H2 MSSQL Compatibility Mode - Support uniqueidentifier. - -@changelog_1077_li -Issue #401: NPE in "SELECT DISTINCT * ORDER BY". - -@changelog_1078_li -Added BITGET function. - -@changelog_1079_li -Fixed bug in FilePathRetryOnInterrupt that caused infinite loop. - -@changelog_1080_li -PR #389, Handle LocalTime with nanosecond resolution, patch by katzyn. - -@changelog_1081_li -PR #382, Recover for "page store" H2 breaks LOBs consistency, patch by vitalus. - -@changelog_1082_li -PR #393, Run tests on Travis, patch by marschall. - -@changelog_1083_li -Fix bug in REGEX_REPLACE, not parsing the mode parameter. - -@changelog_1084_li -ResultSet.getObject(..., Class) threw a ClassNotFoundException if the JTS suite was not in the classpath. - -@changelog_1085_li -File systems: the "cache:" file system, and the compressed in-memory file systems memLZF and nioMemLZF did not correctly support concurrent reading and writing. - -@changelog_1086_li -TIMESTAMP WITH TIMEZONE: serialization for the PageStore was broken. - -@changelog_1087_h2 -Version 1.4.193 (2016-10-31) - -@changelog_1088_li -PR #386: Add JSR-310 Support (introduces JTS dependency fixed in 1.4.194) - -@changelog_1089_li -WARNING: THE MERGE BELOW WILL AFFECT ANY 'TIMESTAMP WITH TIMEZONE' INDEXES. You will need to drop and recreate any such indexes. - -@changelog_1090_li -PR #364: fix compare TIMESTAMP WITH TIMEZONE - -@changelog_1091_li -Fix bug in picking the right index for INSERT..ON DUPLICATE KEY UPDATE when there are both UNIQUE and PRIMARY KEY constraints. - -@changelog_1092_li -Issue #380: Error Analyzer doesn't show source code - -@changelog_1093_li -Remove the "TIMESTAMP UTC" datatype, an experiment that was never finished. - -@changelog_1094_li -PR #363: Added support to define last IDENTIFIER on a Trigger. - -@changelog_1095_li -PR #366: Tests for timestamps - -@changelog_1096_li -PR #361: Improve TimestampWithTimeZone javadoc - -@changelog_1097_li -PR #360: Change getters in TimestampWithTimeZone to int - -@changelog_1098_li -PR #359: Added missing source encoding. Assuming UTF-8. - -@changelog_1099_li -PR #353: Add support for converting JAVA_OBJECT to UUID - -@changelog_1100_li -PR #358: Add support for getObject(int|String, Class) - -@changelog_1101_li -PR #357: Server: use xdg-open to open the WebConsole in the user's preferred browser on Linux - -@changelog_1102_li -PR #356: Support for BEFORE and AFTER clauses when using multiple columns in ALTER TABLE ADD - -@changelog_1103_li -PR #351: Respect format codes from Bind message when sending results - -@changelog_1104_li -ignore summary line when compiling stored procedure - -@changelog_1105_li -PR #348: pg: send RowDescription in response to Describe (statement variant), patch by kostya-sh - -@changelog_1106_li -PR #337: Update russian translation, patch by avp1983 - -@changelog_1107_li -PR #329: Update to servlet API version 3.1.0 from 3.0.1, patch by Mat Booth - -@changelog_1108_li -PR #331: ChangeFileEncryption progress logging ignores -quiet flag, patch by Stefan Bodewig - -@changelog_1109_li -PR #325: Make Row an interface - -@changelog_1110_li -PR #323: Regular expression functions (REGEXP_REPLACE, REGEXP_LIKE) enhancement, patch by Akkuzin - -@changelog_1111_li -Use System.nanoTime for measuring query statistics - -@changelog_1112_li -Issue #324: Deadlock when sending BLOBs over TCP - -@changelog_1113_li -Fix for creating and accessing views in MULTITHREADED mode, test-case courtesy of Daniel Rosenbaum - -@changelog_1114_li -Issue #266: Spatial index not updating, fixed by merging PR #267 - -@changelog_1115_li -PR #302: add support for "with"-subqueries into "join" & "sub-query" statements - -@changelog_1116_li -Issue #299: Nested derived tables did not always work as expected. - -@changelog_1117_li -Use interfaces to replace the java version templating, idea from Lukas Eder. - -@changelog_1118_li -Issue #295: JdbcResultSet.getObject(int, Class) returns null instead of throwing. - -@changelog_1119_li -Mac OS X: Console tool process did not stop on exit. - -@changelog_1120_li -MVStoreTool: add "repair" feature. - -@changelog_1121_li -Garbage collection of unused chunks should be faster still. - -@changelog_1122_li -MVStore / transaction store: opening a store in read-only mode does no longer loop. - -@changelog_1123_li -MVStore: disabled the file system cache by default, because it limits concurrency when using larger databases and many threads. To re-enable, use the file name prefix "cache:". - -@changelog_1124_li -MVStore: add feature to set the cache concurrency. - -@changelog_1125_li -File system nioMemFS: support concurrent reads. - -@changelog_1126_li -File systems: the compressed in-memory file systems now compress better. - -@changelog_1127_li -LIRS cache: improved hit rate because now added entries get hot if they were in the non-resident part of the cache before. - -@changelog_1128_h2 -Version 1.4.192 Beta (2016-05-26) - -@changelog_1129_li -Java 6 is no longer supported (the jar files are compiled for Java 7). - -@changelog_1130_li -Garbage collection of unused chunks should now be faster. - -@changelog_1131_li -Prevent people using unsupported combination of auto-increment columns and clustering mode. - -@changelog_1132_li -Support for DB2 time format, patch by Niklas Mehner - -@changelog_1133_li -Added support for Connection.setClientInfo() in compatibility modes for DB2, Postgresql, Oracle and MySQL. - -@changelog_1134_li -Issue #249: Clarify license declaration in Maven POM xml - -@changelog_1135_li -Fix NullPointerException in querying spatial data through a sub-select. - -@changelog_1136_li -Fix bug where a lock on the SYS table was not released when closing a session that contained a temp table with an LOB column. - -@changelog_1137_li -Issue #255: ConcurrentModificationException with multiple threads in embedded mode and temporary LOBs - -@changelog_1138_li -Issue #235: Anonymous SSL connections fail in many situations - -@changelog_1139_li -Fix race condition in FILE_LOCK=SOCKET, which could result in the watchdog thread not running - -@changelog_1140_li -Experimental support for datatype TIMESTAMP WITH TIMEZONE - -@changelog_1141_li -Add support for ALTER TABLE ... RENAME CONSTRAINT .. TO ... - -@changelog_1142_li -Add support for PostgreSQL ALTER TABLE ... RENAME COLUMN .. TO ... - -@changelog_1143_li -Add support for ALTER SCHEMA [ IF EXISTS ] - -@changelog_1144_li -Add support for ALTER TABLE [ IF EXISTS ] - -@changelog_1145_li -Add support for ALTER VIEW [ IF EXISTS ] - -@changelog_1146_li -Add support for ALTER INDEX [ IF EXISTS ] - -@changelog_1147_li -Add support for ALTER SEQUENCE [ IF EXISTS ] - -@changelog_1148_li -Improve performance of cleaning up temp tables - patch from Eric Faulhaber. - -@changelog_1149_li -Fix bug where table locks were not dropped when the connection closed - -@changelog_1150_li -Fix extra CPU usage caused by query planner enhancement in 1.4.191 - -@changelog_1151_li -improve performance of queries that use LIKE 'foo%' - 10x in the case of one of my queries - -@changelog_1152_li -The function IFNULL did not always return the result in the right data type. - -@changelog_1153_li -Issue #231: Possible infinite loop when initializing the ObjectDataType class when concurrently writing into MVStore. - -@changelog_1154_h2 -Version 1.4.191 Beta (2016-01-21) - -@changelog_1155_li -TO_DATE and TO_TIMESTAMP functions. Thanks a lot to Sam Blume for the patch! - -@changelog_1156_li -Issue #229: DATEDIFF does not work for 'WEEK'. - -@changelog_1157_li -Issue #156: Add support for getGeneratedKeys() when executing commands via PreparedStatement#executeBatch. - -@changelog_1158_li -Issue #195: The new Maven uses a .cmd file instead of a .bat file. - -@changelog_1159_li -Issue #212: EXPLAIN PLAN for UPDATE statement did not display LIMIT expression. - -@changelog_1160_li -Support OFFSET without LIMIT in SELECT. - -@changelog_1161_li -Improve error message for METHOD_NOT_FOUND_1/90087. - -@changelog_1162_li -CLOB and BLOB objects of removed rows were sometimes kept in the database file. - -@changelog_1163_li -Server mode: executing "shutdown" left a thread on the server. - -@changelog_1164_li -The condition "in(select...)" did not work correctly in some cases if the subquery had an "order by". - -@changelog_1165_li -Issue #184: The Platform-independent zip had Windows line endings in Linux scripts. - -@changelog_1166_li -Issue #186: The "script" command did not include sequences of temporary tables. - -@changelog_1167_li -Issue #115: to_char fails with pattern FM0D099. - -@changelog_1168_h2 -Version 1.4.190 Beta (2015-10-11) - -@changelog_1169_li -Pull request #183: optimizer hints (so far without special SQL syntax). - -@changelog_1170_li -Issue #180: In MVCC mode, executing UPDATE and SELECT ... FOR UPDATE simultaneously silently can drop rows. - -@changelog_1171_li -PageStore storage: the cooperative file locking mechanism did not always work as expected (with very slow computers). - -@changelog_1172_li -Temporary CLOB and BLOB objects are now removed while the database is open (and not just when closing the database). - -@changelog_1173_li -MVStore CLOB and BLOB larger than about 25 MB: An exception could be thrown when using the MVStore storage. - -@changelog_1174_li -Add FILE_WRITE function. Patch provided by Nicolas Fortin (Lab-STICC - CNRS UMR 6285 and Ecole Centrale de Nantes) - -@changelog_1175_h2 -Version 1.4.189 Beta (2015-09-13) - -@changelog_1176_li -Add support for dropping multiple columns in ALTER TABLE DROP COLUMN... - -@changelog_1177_li -Fix bug in XA management when doing rollback after prepare. Patch by Stephane Lacoin. - -@changelog_1178_li -MVStore CLOB and BLOB: An exception with the message "Block not found" could be thrown when using the MVStore storage, when copying LOB objects (for example due to "alter table" on a table with a LOB object), and then re-opening the database. - -@changelog_1179_li -Fix for issue #171: Broken QueryStatisticsData duration data when trace level smaller than TraceSystem.INFO - -@changelog_1180_li -Pull request #170: Added SET QUERY_STATISTICS_MAX_ENTRIES - -@changelog_1181_li -Pull request #165: Fix compatibility postgresql function string_agg - -@changelog_1182_li -Pull request #163: improved performance when not using the default timezone. - -@changelog_1183_li -Local temporary tables with many rows did not work correctly due to automatic analyze. - -@changelog_1184_li -Server mode: concurrently using the same connection could throw an exception "Connection is broken: unexpected status". - -@changelog_1185_li -Performance improvement for metadata queries that join against the COLUMNS metadata table. - -@changelog_1186_li -An ArrayIndexOutOfBoundsException was thrown in some cases when opening an old version 1.3 database, or an 1.4 database with both "mv_store=false" and the system property "h2.storeLocalTime" set to false. It mainly showed up with an index on a time, date, or timestamp column. The system property "h2.storeLocalTime" is no longer supported (MVStore databases always store local time, and PageStore now databases never do). - -@changelog_1187_h2 -Version 1.4.188 Beta (2015-08-01) - -@changelog_1188_li -Server mode: CLOB processing for texts larger than about 1 MB sometimes did not work. - -@changelog_1189_li -Server mode: BLOB processing for binaries larger than 2 GB did not work. - -@changelog_1190_li -Multi-threaded processing: concurrent deleting the same row could throw the exception "Row not found when trying to delete". - -@changelog_1191_li -MVStore transactions: a thread could see a change of a different thread within a different map. Pull request #153. - -@changelog_1192_li -H2 Console: improved IBM DB2 compatibility. - -@changelog_1193_li -A thread deadlock detector (disabled by default) can help detect and analyze Java level deadlocks. To enable, set the system property "h2.threadDeadlockDetector" to true. - -@changelog_1194_li -Performance improvement for metadata queries that join against the COLUMNS metadata table. - -@changelog_1195_li -MVStore: power failure could corrupt the store, if writes were re-ordered. - -@changelog_1196_li -For compatibility with other databases, support for (double and float) -0.0 has been removed. 0.0 is used instead. - -@changelog_1197_li -Fix for #134, Column name with a # character. Patch by bradmesserle. - -@changelog_1198_li -In version 1.4.186, "order by" was broken in some cases due to the change "Make the planner use indexes for sorting when doing a GROUP BY". The change was reverted. - -@changelog_1199_li -Pull request #146: Improved CompareMode. - -@changelog_1200_li -Fix for #144, JdbcResultSet.setFetchDirection() throws "Feature not supported". - -@changelog_1201_li -Fix for issue #143, deadlock between two sessions hitting the same sequence on a column. - -@changelog_1202_li -Pull request #137: SourceCompiler should not throw a syntax error on javac warning. - -@changelog_1203_li -MVStore: out of memory while storing could corrupt the store (theoretically, a rollback would be possible, but this case is not yet implemented). - -@changelog_1204_li -The compressed in-memory file systems (memLZF:) could not be used in the MVStore. - -@changelog_1205_li -The in-memory file systems (memFS: and memLZF:) did not support files larger than 2 GB due to an integer overflow. - -@changelog_1206_li -Pull request #138: Added the simple Oracle function: ORA_HASH (+ tests) #138 - -@changelog_1207_li -Timestamps in the trace log follow the format (yyyy-MM-dd HH:mm:ss) instead of the old format (MM-dd HH:mm:ss). Patch by Richard Bull. - -@changelog_1208_li -Pull request #125: Improved Oracle compatibility with "truncate" with timestamps and dates. - -@changelog_1209_li -Pull request #127: Linked tables now support geometry columns. - -@changelog_1210_li -ABS(CAST(0.0 AS DOUBLE)) returned -0.0 instead of 0.0. - -@changelog_1211_li -BNF auto-completion failed with unquoted identifiers. - -@changelog_1212_li -Oracle compatibility: empty strings were not converted to NULL when using prepared statements. - -@changelog_1213_li -PostgreSQL compatibility: new syntax "create index ... using ...". - -@changelog_1214_li -There was a bug in DataType.convertToValue when reading a ResultSet from a ResultSet. - -@changelog_1215_li -Pull request #116: Improved concurrency in the trace system. - -@changelog_1216_li -Issue 609: the spatial index did not support NULL. - -@changelog_1217_li -Granting a schema is now supported. - -@changelog_1218_li -Linked tables did not work when a function-based index is present (Oracle). - -@changelog_1219_li -Creating a user with a null password, salt, or hash threw a NullPointerException. - -@changelog_1220_li -Foreign key: don't add a single column index if column is leading key of existing index. - -@changelog_1221_li -Pull request #4: Creating and removing temporary tables was getting slower and slower over time, because an internal object id was allocated but never de-allocated. - -@changelog_1222_li -Issue 609: the spatial index did not support NULL with update and delete operations. - -@changelog_1223_li -Pull request #2: Add external metadata type support (table type "external") - -@changelog_1224_li -MS SQL Server: the CONVERT method did not work in views and derived tables. - -@changelog_1225_li -Java 8 compatibility for "regexp_replace". - -@changelog_1226_li -When in cluster mode, and one of the nodes goes down, we need to log the problem with priority "error", not "debug" - -@changelog_1227_h2 -Version 1.4.187 Beta (2015-04-10) - -@changelog_1228_li -MVStore: concurrent changes to the same row could result in the exception "The transaction log might be corrupt for key ...". This could only be reproduced with 3 or more threads. - -@changelog_1229_li -Results with CLOB or BLOB data are no longer reused. - -@changelog_1230_li -References to BLOB and CLOB objects now have a timeout. The configuration setting is LOB_TIMEOUT (default 5 minutes). This should avoid growing the database file if there are many queries that return BLOB or CLOB objects, and the database is not closed for a longer time. - -@changelog_1231_li -MVStore: when committing a session that removed LOB values, changes were flushed unnecessarily. - -@changelog_1232_li -Issue 610: possible integer overflow in WriteBuffer.grow(). - -@changelog_1233_li -Issue 609: the spatial index did not support NULL (ClassCastException). - -@changelog_1234_li -MVStore: in some cases, CLOB/BLOB data blocks were removed incorrectly when opening a database. - -@changelog_1235_li -MVStore: updates that affected many rows were were slow in some cases if there was a secondary index. - -@changelog_1236_li -Using "runscript" with autocommit disabled could result in a lock timeout on the internal table "SYS". - -@changelog_1237_li -Issue 603: there was a memory leak when using H2 in a web application. Apache Tomcat logged an error message: "The web application ... created a ThreadLocal with key of type [org.h2.util.DateTimeUtils$1]". - -@changelog_1238_li -When using the MVStore, running a SQL script generate by the Recover tool from a PageStore file failed with a strange error message (NullPointerException), now a clear error message is shown. - -@changelog_1239_li -Issue 605: with version 1.4.186, opening a database could result in an endless loop in LobStorageMap.init. - -@changelog_1240_li -Queries that use the same table alias multiple times now work. Before, the select expression list was expanded incorrectly. Example: "select * from a as x, b as x". - -@changelog_1241_li -The MySQL compatibility feature "insert ... on duplicate key update" did not work with a non-default schema. - -@changelog_1242_li -Issue 599: the condition "in(x, y)" could not be used in the select list when using "group by". - -@changelog_1243_li -The LIRS cache could grow larger than the allocated memory. - -@changelog_1244_li -A new file system implementation that re-opens the file if it was closed due to the application calling Thread.interrupt(). File name prefix "retry:". Please note it is strongly recommended to avoid calling Thread.interrupt; this is a problem for various libraries, including Apache Lucene. - -@changelog_1245_li -MVStore: use RandomAccessFile file system if the file name starts with "file:". - -@changelog_1246_li -Allow DATEADD to take a long value for count when manipulating milliseconds. - -@changelog_1247_li -When using MV_STORE=TRUE and the SET CACHE_SIZE setting, the cache size was incorrectly set, so that it was effectively 1024 times smaller than it should be. - -@changelog_1248_li -Concurrent CREATE TABLE... IF NOT EXISTS in the presence of MULTI_THREAD=TRUE could throw an exception. - -@changelog_1249_li -Fix bug in MVStore when creating lots of temporary tables, where we could run out of transaction IDs. - -@changelog_1250_li -Add support for PostgreSQL STRING_AGG function. Patch by Fred Aquiles. - -@changelog_1251_li -Fix bug in "jdbc:h2:nioMemFS" isRoot() function. Also, the page size was increased to 64 KB. - -@changelog_1252_h2 -Version 1.4.186 Beta (2015-03-02) - -@changelog_1253_li -The Servlet API 3.0.1 is now used, instead of 2.4. - -@changelog_1254_li -MVStore: old chunks no longer removed in append-only mode. - -@changelog_1255_li -MVStore: the cache for page references could grow far too big, resulting in out of memory in some cases. - -@changelog_1256_li -MVStore: orphaned lob objects were not correctly removed in some cases, making the database grow unnecessarily. - -@changelog_1257_li -MVStore: the maximum cache size was artificially limited to 2 GB (due to an integer overflow). - -@changelog_1258_li -MVStore / TransactionStore: concurrent updates could result in a "Too many open transactions" exception. - -@changelog_1259_li -StringUtils.toUpperEnglish now has a small cache. This should speed up reading from a ResultSet when using the column name. - -@changelog_1260_li -MVStore: up to 65535 open transactions are now supported. Previously, the limit was at most 65535 transactions between the oldest open and the newest open transaction (which was quite a strange limit). - -@changelog_1261_li -The default limit for in-place LOB objects was changed from 128 to 256 bytes. This is because each read creates a reference to a LOB, and maintaining the references is a big overhead. With the higher limit, less references are needed. - -@changelog_1262_li -Tables without columns didn't work. (The use case for such tables is testing.) - -@changelog_1263_li -The LIRS cache now resizes the table automatically in all cases and no longer needs the averageMemory configuration. - -@changelog_1264_li -Creating a linked table from an MVStore database to a non-MVStore database created a second (non-MVStore) database file. - -@changelog_1265_li -In version 1.4.184, a bug was introduced that broke queries that have both joins and wildcards, for example: select * from dual join(select x from dual) on 1=1 - -@changelog_1266_li -Issue 598: parser fails on timestamp "24:00:00.1234" - prevent the creation of out-of-range time values. - -@changelog_1267_li -Allow declaring triggers as source code (like functions). Patch by Sylvain Cuaz. - -@changelog_1268_li -Make the planner use indexes for sorting when doing a GROUP BY where all of the GROUP BY columns are not mentioned in the select. Patch by Frederico (zepfred). - -@changelog_1269_li -PostgreSQL compatibility: generate_series (as an alias for system_range). Patch by litailang. - -@changelog_1270_li -Fix missing "column" type in right-hand parameter in ConditionIn. Patch by Arnaud Thimel. - -@changelog_1271_h2 -Version 1.4.185 Beta (2015-01-16) - -@changelog_1272_li -In version 1.4.184, "group by" ignored the table name, and could pick a select column by mistake. Example: select 0 as x from system_range(1, 2) d group by d.x; - -@changelog_1273_li -New connection setting "REUSE_SPACE" (default: true). If disabled, all changes are appended to the database file, and existing content is never overwritten. This allows to rollback to a previous state of the database by truncating the database file. - -@changelog_1274_li -Issue 587: MVStore: concurrent compaction and store operations could result in an IllegalStateException. - -@changelog_1275_li -Issue 594: Profiler.copyInThread does not work properly. - -@changelog_1276_li -Script tool: Now, SCRIPT ... TO is always used (for higher speed and lower disk space usage). - -@changelog_1277_li -Script tool: Fix parsing of BLOCKSIZE parameter, original patch by Ken Jorissen. - -@changelog_1278_li -Fix bug in PageStore#commit method - when the ignoreBigLog flag was set, the logic that cleared the flag could never be reached, resulting in performance degradation. Reported by Alexander Nesterov. - -@changelog_1279_li -Issue 552: Implement BIT_AND and BIT_OR aggregate functions. - -@changelog_1280_h2 -Version 1.4.184 Beta (2014-12-19) - -@changelog_1281_li -In version 1.3.183, indexes were not used if the table contains columns with a default value generated by a sequence. This includes tables with identity and auto-increment columns. This bug was introduced by supporting "rownum" in views and derived tables. - -@changelog_1282_li -MVStore: imported BLOB and CLOB data sometimes disappeared. This was caused by a bug in the ObjectDataType comparison. - -@changelog_1283_li -Reading from a StreamStore now throws an IOException if the underlying data doesn't exist. - -@changelog_1284_li -MVStore: if there is an exception while saving, the store is now in all cases immediately closed. - -@changelog_1285_li -MVStore: the dump tool could go into an endless loop for some files. - -@changelog_1286_li -MVStore: recovery for a database with many CLOB or BLOB entries is now much faster. - -@changelog_1287_li -Group by with a quoted select column name alias didn't work. Example: select 1 "a" from dual group by "a" - -@changelog_1288_li -Auto-server mode: the host name is now stored in the .lock.db file. - -@changelog_1289_h2 -Version 1.4.183 Beta (2014-12-13) - -@changelog_1290_li -MVStore: the default auto-commit buffer size is now about twice as big. This should reduce the database file size after inserting a lot of data. - -@changelog_1291_li -The built-in functions "power" and "radians" now always return a double. - -@changelog_1292_li -Using "row_number" or "rownum" in views or derived tables had unexpected results if the outer query contained constraints for the given view. Example: select b.nr, b.id from (select row_number() over() as nr, a.id as id from (select id from test order by name) as a) as b where b.id = 1 - -@changelog_1293_li -MVStore: the Recover tool can now deal with more types of corruption in the file. - -@changelog_1294_li -MVStore: the TransactionStore now first needs to be initialized before it can be used. - -@changelog_1295_li -Views and derived tables with equality and range conditions on the same columns did not work properly. example: select x from (select x from (select 1 as x) where x > 0 and x < 2) where x = 1 - -@changelog_1296_li -The database URL setting PAGE_SIZE setting is now also used for the MVStore. - -@changelog_1297_li -MVStore: the default page split size for persistent stores is now 4096 (it was 16 KB so far). This should reduce the database file size for most situations (in some cases, less than half the size of the previous version). - -@changelog_1298_li -With query literals disabled, auto-analyze of a table with CLOB or BLOB did not work. - -@changelog_1299_li -MVStore: use a mark and sweep GC algorithm instead of reference counting, to ensure used chunks are never overwrite, even if the reference counting algorithm does not work properly. - -@changelog_1300_li -In the multi-threaded mode, updating the column selectivity ("analyze") in the background sometimes did not work. - -@changelog_1301_li -In the multi-threaded mode, database metadata operations did sometimes not work if the schema was changed at the same time (for example, if tables were dropped). - -@changelog_1302_li -Some CLOB and BLOB values could no longer be read when the original row was removed (even when using the MVCC mode). - -@changelog_1303_li -The MVStoreTool could throw an IllegalArgumentException. - -@changelog_1304_li -Improved performance for some date / time / timestamp conversion operations. Thanks to Sergey Evdokimov for reporting the problem. - -@changelog_1305_li -H2 Console: the built-in web server did not work properly if an unknown file was requested. - -@changelog_1306_li -MVStore: the jar file is renamed to "h2-mvstore-*.jar" and is deployed to Maven separately. - -@changelog_1307_li -MVStore: support for concurrent reads and writes is now enabled by default. - -@changelog_1308_li -Server mode: the transfer buffer size has been changed from 16 KB to 64 KB, after it was found that this improves performance on Linux quite a lot. - -@changelog_1309_li -H2 Console and server mode: SSL is now disabled and TLS is used to protect against the Poodle SSLv3 vulnerability. The system property to disable secure anonymous connections is now "h2.enableAnonymousTLS". The default certificate is still self-signed, so you need to manually install another one if you want to avoid man in the middle attacks. - -@changelog_1310_li -MVStore: the R-tree did not correctly measure the memory usage. - -@changelog_1311_li -MVStore: compacting a store with an R-tree did not always work. - -@changelog_1312_li -Issue 581: When running in LOCK_MODE=0, JdbcDatabaseMetaData#supportsTransactionIsolationLevel(TRANSACTION_READ_UNCOMMITTED) should return false - -@changelog_1313_li -Fix bug which could generate deadlocks when multiple connections accessed the same table. - -@changelog_1314_li -Some places in the code were not respecting the value set in the "SET MAX_MEMORY_ROWS x" command - -@changelog_1315_li -Fix bug which could generate a NegativeArraySizeException when performing large (>40M) row union operations - -@changelog_1316_li -Fix "USE schema" command for MySQL compatibility, patch by mfulton - -@changelog_1317_li -Parse and ignore the ROW_FORMAT=DYNAMIC MySQL syntax, patch by mfulton - -@changelog_1318_h2 -Version 1.4.182 Beta (2014-10-17) - -@changelog_1319_li -MVStore: improved error messages and logging; improved behavior if there is an error when serializing objects. - -@changelog_1320_li -OSGi: the MVStore packages are now exported. - -@changelog_1321_li -With the MVStore option, when using multiple threads that concurrently create indexes or tables, it was relatively easy to get a lock timeout on the "SYS" table. - -@changelog_1322_li -When using the multi-threaded option, the exception "Unexpected code path" could be thrown, specially if the option "analyze_auto" was set to a low value. - -@changelog_1323_li -In the server mode, when reading from a CLOB or BLOB, if the connection was closed, a NullPointerException could be thrown instead of an exception saying the connection is closed. - -@changelog_1324_li -DatabaseMetaData.getProcedures and getProcedureColumns could throw an exception if a user defined class is not available. - -@changelog_1325_li -Issue 584: the error message for a wrong sequence definition was wrong. - -@changelog_1326_li -CSV tool: the rowSeparator option is no longer supported, as the same can be achieved with the lineSeparator. - -@changelog_1327_li -Descending indexes on MVStore tables did not work properly. - -@changelog_1328_li -Issue 579: Conditions on the "_rowid_" pseudo-column didn't use an index when using the MVStore. - -@changelog_1329_li -Fixed documentation that "offset" and "fetch" are also keywords since version 1.4.x. - -@changelog_1330_li -The Long.MIN_VALUE could not be parsed for auto-increment (identity) columns. - -@changelog_1331_li -Issue 573: Add implementation for Methods "isWrapperFor()" and "unwrap()" in other JDBC classes. - -@changelog_1332_li -Issue 572: MySQL compatibility for "order by" in update statements. - -@changelog_1333_li -The change in JDBC escape processing in version 1.4.181 affects both the parser (which is running on the server) and the JDBC API (which is running on the client). If you (or a tool you use) use the syntax "{t 'time}", or "{ts 'timestamp'}", or "{d 'data'}", then both the client and the server need to be upgraded to version 1.4.181 or later. - -@changelog_1334_h2 -Version 1.4.181 Beta (2014-08-06) - -@changelog_1335_li -Improved MySQL compatibility by supporting "use schema". Thanks a lot to Karl Pietrzak for the patch! - -@changelog_1336_li -Writing to the trace file is now faster, specially with the debug level. - -@changelog_1337_li -The database option "defrag_always=true" did not work with the MVStore. - -@changelog_1338_li -The JDBC escape syntax {ts 'value'} did not interpret the value as a timestamp. The same for {d 'value'} (for date) and {t 'value'} (for time). Thanks to Lukas Eder for reporting the issue. The following problem was detected after version 1.4.181 was released: The change in JDBC escape processing affects both the parser (which is running on the server) and the JDBC API (which is running on the client). If you (or a tool you use) use the syntax {t 'time'}, or {ts 'timestamp'}, or {d 'date'}, then both the client and the server need to be upgraded to version 1.4.181 or later. - -@changelog_1339_li -File system abstraction: support replacing existing files using move (currently not for Windows). - -@changelog_1340_li -The statement "shutdown defrag" now compresses the database (with the MVStore). This command can greatly reduce the file size, and is relatively fast, but is not incremental. - -@changelog_1341_li -The MVStore now automatically compacts the store in the background if there is no read or write activity, which should (after some time; sometimes about one minute) reduce the file size. This is still work in progress, feedback is welcome! - -@changelog_1342_li -Change default value of PAGE_SIZE from 2048 to 4096 to more closely match most file systems block size (PageStore only; the MVStore already used 4096). - -@changelog_1343_li -Auto-scale MAX_MEMORY_ROWS and CACHE_SIZE settings by the amount of available RAM. Gives a better out of box experience for people with more powerful machines. - -@changelog_1344_li -Handle tabs like 4 spaces in web console, patch by Martin Grajcar. - -@changelog_1345_li -Issue 573: Add implementation for Methods "isWrapperFor()" and "unwrap()" in JdbcConnection.java, patch by BigMichi1. - -@changelog_1346_h2 -Version 1.4.180 Beta (2014-07-13) - -@changelog_1347_li -MVStore: the store is now auto-compacted automatically up to some point, to avoid very large file sizes. This area is still work in progress. - -@changelog_1348_li -Sequences of temporary tables (auto-increment or identity columns) were persisted unnecessarily in the database file, and were not removed when re-opening the database. - -@changelog_1349_li -MVStore: an IndexOutOfBoundsException could sometimes occur MVMap.openVersion when concurrently accessing the store. - -@changelog_1350_li -The LIRS cache now re-sizes the internal hash map if needed. - -@changelog_1351_li -Optionally persist session history in the H2 console. (patch from Martin Grajcar) - -@changelog_1352_li -Add client-info property to get the number of servers currently in the cluster and which servers that are available. (patch from Nikolaj Fogh) - -@changelog_1353_li -Fix bug in changing encrypted DB password that kept the file handle open when the wrong password was supplied. (test case from Jens Hohmuth). - -@changelog_1354_li -Issue 567: H2 hangs for a long time then (sometimes) recovers. Introduce a queue when doing table locking to prevent session starvation. - -@cheatSheet_1000_h1 -H2 Database Engine Cheat Sheet - -@cheatSheet_1001_h2 -Using H2 - -@cheatSheet_1002_a -H2 - -@cheatSheet_1003_li - is open source, free to use and distribute. - -@cheatSheet_1004_a -Download - -@cheatSheet_1005_li -: jar, installer (Windows), zip. - -@cheatSheet_1006_li -To start the H2 Console tool, double click the jar file, or run java -jar h2*.jar, h2.bat, or h2.sh. - -@cheatSheet_1007_a -A new database is automatically created - -@cheatSheet_1008_a -by default - -@cheatSheet_1009_li -. - -@cheatSheet_1010_a -Closing the last connection closes the database - -@cheatSheet_1011_li -. - -@cheatSheet_1012_h2 -Documentation - -@cheatSheet_1013_p - Reference: SQL grammar, functions, data types, tools, API - -@cheatSheet_1014_a -Features - -@cheatSheet_1015_p -: fulltext search, encryption, read-only (zip/jar), CSV, auto-reconnect, triggers, user functions - -@cheatSheet_1016_a -Database URLs - -@cheatSheet_1017_a -Embedded - -@cheatSheet_1018_code -jdbc:h2:~/test - -@cheatSheet_1019_p - 'test' in the user home directory - -@cheatSheet_1020_code -jdbc:h2:/data/test - -@cheatSheet_1021_p - 'test' in the directory /data - -@cheatSheet_1022_code -jdbc:h2:test - -@cheatSheet_1023_p - in the current(!) working directory - -@cheatSheet_1024_a -In-Memory - -@cheatSheet_1025_code -jdbc:h2:mem:test - -@cheatSheet_1026_p - multiple connections in one process - -@cheatSheet_1027_code -jdbc:h2:mem: - -@cheatSheet_1028_p - unnamed private; one connection - -@cheatSheet_1029_a -Server Mode - -@cheatSheet_1030_code -jdbc:h2:tcp://localhost/~/test - -@cheatSheet_1031_p - user home dir - -@cheatSheet_1032_code -jdbc:h2:tcp://localhost//data/test - -@cheatSheet_1033_p - absolute dir - -@cheatSheet_1034_a -Server start - -@cheatSheet_1035_p -:java -cp *.jar org.h2.tools.Server - -@cheatSheet_1036_a -Settings - -@cheatSheet_1037_code -jdbc:h2:..;MODE=MySQL - -@cheatSheet_1038_a -compatibility (or HSQLDB,...) - -@cheatSheet_1039_code -jdbc:h2:..;TRACE_LEVEL_FILE=3 - -@cheatSheet_1040_a -log to *.trace.db - -@cheatSheet_1041_a -Using the JDBC API - -@cheatSheet_1042_a -Connection Pool - -@cheatSheet_1043_a -Maven 2 - -@cheatSheet_1044_a -Hibernate - -@cheatSheet_1045_p - hibernate.cfg.xml (or use the HSQLDialect): - -@cheatSheet_1046_a -TopLink and Glassfish - -@cheatSheet_1047_p - Datasource class: org.h2.jdbcx.JdbcDataSource - -@cheatSheet_1048_code -oracle.toplink.essentials.platform. - -@cheatSheet_1049_code -database.H2Platform - -@download_1000_h1 -Downloads - -@download_1001_h3 -Version 1.4.196 (2017-06-10) - -@download_1002_a -Windows Installer - -@download_1003_a -Platform-Independent Zip - -@download_1004_h3 -Version 1.4.195 (2017-04-23), Last Stable - -@download_1005_a -Windows Installer - -@download_1006_a -Platform-Independent Zip - -@download_1007_h3 -Old Versions - -@download_1008_a -Platform-Independent Zip - -@download_1009_h3 -Jar File - -@download_1010_a -Maven.org - -@download_1011_a -Sourceforge.net - -@download_1012_h3 -Maven (Binary, Javadoc, and Source) - -@download_1013_a -Binary - -@download_1014_a -Javadoc - -@download_1015_a -Sources - -@download_1016_h3 -Database Upgrade Helper File - -@download_1017_a -Upgrade database from 1.1 to the current version - -@download_1018_h3 -Git Source Repository - -@download_1019_a -Github - -@download_1020_p - For details about changes, see the Change Log. - -@download_1021_h3 -News and Project Information - -@download_1022_a -Atom Feed - -@download_1023_a -RSS Feed - -@download_1024_a -DOAP File - -@download_1025_p - (what is this) - -@faq_1000_h1 -Frequently Asked Questions - -@faq_1001_a - I Have a Problem or Feature Request - -@faq_1002_a - Are there Known Bugs? When is the Next Release? - -@faq_1003_a - Is this Database Engine Open Source? - -@faq_1004_a - Is Commercial Support Available? - -@faq_1005_a - How to Create a New Database? - -@faq_1006_a - How to Connect to a Database? - -@faq_1007_a - Where are the Database Files Stored? - -@faq_1008_a - What is the Size Limit (Maximum Size) of a Database? - -@faq_1009_a - Is it Reliable? - -@faq_1010_a - Why is Opening my Database Slow? - -@faq_1011_a - My Query is Slow - -@faq_1012_a - H2 is Very Slow - -@faq_1013_a - Column Names are Incorrect? - -@faq_1014_a - Float is Double? - -@faq_1015_a - Is the GCJ Version Stable? Faster? - -@faq_1016_a - How to Translate this Project? - -@faq_1017_a - How to Contribute to this Project? - -@faq_1018_h3 -I Have a Problem or Feature Request - -@faq_1019_p - Please read the support checklist. - -@faq_1020_h3 -Are there Known Bugs? When is the Next Release? - -@faq_1021_p - Usually, bugs get fixes as they are found. There is a release every few weeks. Here is the list of known and confirmed issues: - -@faq_1022_li -When opening a database file in a timezone that has different daylight saving rules: the time part of dates where the daylight saving doesn't match will differ. This is not a problem within regions that use the same rules (such as, within USA, or within Europe), even if the timezone itself is different. As a workaround, export the database to a SQL script using the old timezone, and create a new database in the new timezone. - -@faq_1023_li -Apache Harmony: there seems to be a bug in Harmony that affects H2. See HARMONY-6505. - -@faq_1024_li -Tomcat and Glassfish 3 set most static fields (final or non-final) to null when unloading a web application. This can cause a NullPointerException in H2 versions 1.1.107 and older, and may still not work in newer versions. Please report it if you run into this issue. In Tomcat >= 6.0 this behavior can be disabled by setting the system property org.apache.catalina.loader.WebappClassLoader.ENABLE_CLEAR_REFERENCES=false, however Tomcat may then run out of memory. A known workaround is to put the h2*.jar file in a shared lib directory (common/lib). - -@faq_1025_li -Some problems have been found with right outer join. Internally, it is converted to left outer join, which does not always produce the same results as other databases when used in combination with other joins. This problem is fixed in H2 version 1.3. - -@faq_1026_li -When using Install4j before 4.1.4 on Linux and enabling pack200, the h2*.jar becomes corrupted by the install process, causing application failure. A workaround is to add an empty file h2*.jar.nopack next to the h2*.jar file. This problem is solved in Install4j 4.1.4. - -@faq_1027_p - For a complete list, see Open Issues. - -@faq_1028_h3 -Is this Database Engine Open Source? - -@faq_1029_p - Yes. It is free to use and distribute, and the source code is included. See also under license. - -@faq_1030_h3 -Is Commercial Support Available? - -@faq_1031_p - No, currently commercial support is not available. - -@faq_1032_h3 -How to Create a New Database? - -@faq_1033_p - By default, a new database is automatically created if it does not yet exist. See Creating New Databases. - -@faq_1034_h3 -How to Connect to a Database? - -@faq_1035_p - The database driver is org.h2.Driver, and the database URL starts with jdbc:h2:. To connect to a database using JDBC, use the following code: - -@faq_1036_h3 -Where are the Database Files Stored? - -@faq_1037_p - When using database URLs like jdbc:h2:~/test, the database is stored in the user directory. For Windows, this is usually C:\Documents and Settings\<userName> or C:\Users\<userName>. If the base directory is not set (as in jdbc:h2:./test), the database files are stored in the directory where the application is started (the current working directory). When using the H2 Console application from the start menu, this is <Installation Directory>/bin. The base directory can be set in the database URL. A fixed or relative path can be used. When using the URL jdbc:h2:file:./data/sample, the database is stored in the directory data (relative to the current working directory). The directory is created automatically if it does not yet exist. It is also possible to use the fully qualified directory name (and for Windows, drive name). Example: jdbc:h2:file:C:/data/test - -@faq_1038_h3 -What is the Size Limit (Maximum Size) of a Database? - -@faq_1039_p - See Limits and Limitations. - -@faq_1040_h3 -Is it Reliable? - -@faq_1041_p - That is not easy to say. It is still a quite new product. A lot of tests have been written, and the code coverage of these tests is higher than 80% for each package. Randomized stress tests are run regularly. But there are probably still bugs that have not yet been found (as with most software). Some features are known to be dangerous, they are only supported for situations where performance is more important than reliability. Those dangerous features are: - -@faq_1042_li -Disabling the transaction log or FileDescriptor.sync() using LOG=0 or LOG=1. - -@faq_1043_li -Using the transaction isolation level READ_UNCOMMITTED (LOCK_MODE 0) while at the same time using multiple connections. - -@faq_1044_li -Disabling database file protection using (setting FILE_LOCK to NO in the database URL). - -@faq_1045_li -Disabling referential integrity using SET REFERENTIAL_INTEGRITY FALSE. - -@faq_1046_p - In addition to that, running out of memory should be avoided. In older versions, OutOfMemory errors while using the database could corrupt a databases. - -@faq_1047_p - This database is well tested using automated test cases. The tests run every night and run for more than one hour. But not all areas of this database are equally well tested. When using one of the following features for production, please ensure your use case is well tested (if possible with automated test cases). The areas that are not well tested are: - -@faq_1048_li -Platforms other than Windows, Linux, Mac OS X, or JVMs other than Oracle 1.6, 1.7, 1.8. - -@faq_1049_li -The features AUTO_SERVER and AUTO_RECONNECT. - -@faq_1050_li -Cluster mode, 2-phase commit, savepoints. - -@faq_1051_li -Fulltext search. - -@faq_1052_li -Operations on LOBs over 2 GB. - -@faq_1053_li -The optimizer may not always select the best plan. - -@faq_1054_li -Using the ICU4J collator. - -@faq_1055_p - Areas considered experimental are: - -@faq_1056_li -The PostgreSQL server - -@faq_1057_li -Clustering (there are cases were transaction isolation can be broken due to timing issues, for example one session overtaking another session). - -@faq_1058_li -Multi-threading within the engine using SET MULTI_THREADED=1. - -@faq_1059_li -Compatibility modes for other databases (only some features are implemented). - -@faq_1060_li -The soft reference cache (CACHE_TYPE=SOFT_LRU). It might not improve performance, and out of memory issues have been reported. - -@faq_1061_p - Some users have reported that after a power failure, the database cannot be opened sometimes. In this case, use a backup of the database or the Recover tool. Please report such problems. The plan is that the database automatically recovers in all situations. - -@faq_1062_h3 -Why is Opening my Database Slow? - -@faq_1063_p - To find out what the problem is, use the H2 Console and click on "Test Connection" instead of "Login". After the "Login Successful" appears, click on it (it's a link). This will list the top stack traces. Then either analyze this yourself, or post those stack traces in the Google Group. - -@faq_1064_p - Other possible reasons are: the database is very big (many GB), or contains linked tables that are slow to open. - -@faq_1065_h3 -My Query is Slow - -@faq_1066_p - Slow SELECT (or DELETE, UPDATE, MERGE) statement can have multiple reasons. Follow this checklist: - -@faq_1067_li -Run ANALYZE (see documentation for details). - -@faq_1068_li -Run the query with EXPLAIN and check if indexes are used (see documentation for details). - -@faq_1069_li -If required, create additional indexes and try again using ANALYZE and EXPLAIN. - -@faq_1070_li -If it doesn't help please report the problem. - -@faq_1071_h3 -H2 is Very Slow - -@faq_1072_p - By default, H2 closes the database when the last connection is closed. If your application closes the only connection after each operation, the database is opened and closed a lot, which is quite slow. There are multiple ways to solve this problem, see Database Performance Tuning. - -@faq_1073_h3 -Column Names are Incorrect? - -@faq_1074_p - For the query SELECT ID AS X FROM TEST the method ResultSetMetaData.getColumnName() returns ID, I expect it to return X. What's wrong? - -@faq_1075_p - This is not a bug. According the the JDBC specification, the method ResultSetMetaData.getColumnName() should return the name of the column and not the alias name. If you need the alias name, use ResultSetMetaData.getColumnLabel(). Some other database don't work like this yet (they don't follow the JDBC specification). If you need compatibility with those databases, use the Compatibility Mode, or append ;ALIAS_COLUMN_NAME=TRUE to the database URL. - -@faq_1076_p - This also applies to DatabaseMetaData calls that return a result set. The columns in the JDBC API are column labels, not column names. - -@faq_1077_h3 -Float is Double? - -@faq_1078_p - For a table defined as CREATE TABLE TEST(X FLOAT) the method ResultSet.getObject() returns a java.lang.Double, I expect it to return a java.lang.Float. What's wrong? - -@faq_1079_p - This is not a bug. According the the JDBC specification, the JDBC data type FLOAT is equivalent to DOUBLE, and both are mapped to java.lang.Double. See also Mapping SQL and Java Types - 8.3.10 FLOAT. - -@faq_1080_h3 -Is the GCJ Version Stable? Faster? - -@faq_1081_p - The GCJ version is not as stable as the Java version. When running the regression test with the GCJ version, sometimes the application just stops at what seems to be a random point without error message. Currently, the GCJ version is also slower than when using the Sun VM. However, the startup of the GCJ version is faster than when using a VM. - -@faq_1082_h3 -How to Translate this Project? - -@faq_1083_p - For more information, see Build/Translating. - -@faq_1084_h3 -How to Contribute to this Project? - -@faq_1085_p - There are various way to help develop an open source project like H2. The first step could be to translate the error messages and the GUI to your native language. Then, you could provide patches. Please start with small patches. That could be adding a test case to improve the code coverage (the target code coverage for this project is 90%, higher is better). You will have to develop, build and run the tests. Once you are familiar with the code, you could implement missing features from the feature request list. I suggest to start with very small features that are easy to implement. Keep in mind to provide test cases as well. - -@features_1000_h1 -Features - -@features_1001_a - Feature List - -@features_1002_a - Comparison to Other Database Engines - -@features_1003_a - H2 in Use - -@features_1004_a - Connection Modes - -@features_1005_a - Database URL Overview - -@features_1006_a - Connecting to an Embedded (Local) Database - -@features_1007_a - In-Memory Databases - -@features_1008_a - Database Files Encryption - -@features_1009_a - Database File Locking - -@features_1010_a - Opening a Database Only if it Already Exists - -@features_1011_a - Closing a Database - -@features_1012_a - Ignore Unknown Settings - -@features_1013_a - Changing Other Settings when Opening a Connection - -@features_1014_a - Custom File Access Mode - -@features_1015_a - Multiple Connections - -@features_1016_a - Database File Layout - -@features_1017_a - Logging and Recovery - -@features_1018_a - Compatibility - -@features_1019_a - Auto-Reconnect - -@features_1020_a - Automatic Mixed Mode - -@features_1021_a - Page Size - -@features_1022_a - Using the Trace Options - -@features_1023_a - Using Other Logging APIs - -@features_1024_a - Read Only Databases - -@features_1025_a - Read Only Databases in Zip or Jar File - -@features_1026_a - Computed Columns / Function Based Index - -@features_1027_a - Multi-Dimensional Indexes - -@features_1028_a - User-Defined Functions and Stored Procedures - -@features_1029_a - Pluggable or User-Defined Tables - -@features_1030_a - Triggers - -@features_1031_a - Compacting a Database - -@features_1032_a - Cache Settings - -@features_1033_h2 -Feature List - -@features_1034_h3 -Main Features - -@features_1035_li -Very fast database engine - -@features_1036_li -Open source - -@features_1037_li -Written in Java - -@features_1038_li -Supports standard SQL, JDBC API - -@features_1039_li -Embedded and Server mode, Clustering support - -@features_1040_li -Strong security features - -@features_1041_li -The PostgreSQL ODBC driver can be used - -@features_1042_li -Multi version concurrency - -@features_1043_h3 -Additional Features - -@features_1044_li -Disk based or in-memory databases and tables, read-only database support, temporary tables - -@features_1045_li -Transaction support (read committed), 2-phase-commit - -@features_1046_li -Multiple connections, table level locking - -@features_1047_li -Cost based optimizer, using a genetic algorithm for complex queries, zero-administration - -@features_1048_li -Scrollable and updatable result set support, large result set, external result sorting, functions can return a result set - -@features_1049_li -Encrypted database (AES), SHA-256 password encryption, encryption functions, SSL - -@features_1050_h3 -SQL Support - -@features_1051_li -Support for multiple schemas, information schema - -@features_1052_li -Referential integrity / foreign key constraints with cascade, check constraints - -@features_1053_li -Inner and outer joins, subqueries, read only views and inline views - -@features_1054_li -Triggers and Java functions / stored procedures - -@features_1055_li -Many built-in functions, including XML and lossless data compression - -@features_1056_li -Wide range of data types including large objects (BLOB/CLOB) and arrays - -@features_1057_li -Sequence and autoincrement columns, computed columns (can be used for function based indexes) - -@features_1058_code -ORDER BY, GROUP BY, HAVING, UNION, LIMIT, TOP - -@features_1059_li -Collation support, including support for the ICU4J library - -@features_1060_li -Support for users and roles - -@features_1061_li -Compatibility modes for IBM DB2, Apache Derby, HSQLDB, MS SQL Server, MySQL, Oracle, and PostgreSQL. - -@features_1062_h3 -Security Features - -@features_1063_li -Includes a solution for the SQL injection problem - -@features_1064_li -User password authentication uses SHA-256 and salt - -@features_1065_li -For server mode connections, user passwords are never transmitted in plain text over the network (even when using insecure connections; this only applies to the TCP server and not to the H2 Console however; it also doesn't apply if you set the password in the database URL) - -@features_1066_li -All database files (including script files that can be used to backup data) can be encrypted using the AES-128 encryption algorithm - -@features_1067_li -The remote JDBC driver supports TCP/IP connections over TLS - -@features_1068_li -The built-in web server supports connections over TLS - -@features_1069_li -Passwords can be sent to the database using char arrays instead of Strings - -@features_1070_h3 -Other Features and Tools - -@features_1071_li -Small footprint (smaller than 1.5 MB), low memory requirements - -@features_1072_li -Multiple index types (b-tree, tree, hash) - -@features_1073_li -Support for multi-dimensional indexes - -@features_1074_li -CSV (comma separated values) file support - -@features_1075_li -Support for linked tables, and a built-in virtual 'range' table - -@features_1076_li -Supports the EXPLAIN PLAN statement; sophisticated trace options - -@features_1077_li -Database closing can be delayed or disabled to improve the performance - -@features_1078_li -Web-based Console application (translated to many languages) with autocomplete - -@features_1079_li -The database can generate SQL script files - -@features_1080_li -Contains a recovery tool that can dump the contents of the database - -@features_1081_li -Support for variables (for example to calculate running totals) - -@features_1082_li -Automatic re-compilation of prepared statements - -@features_1083_li -Uses a small number of database files - -@features_1084_li -Uses a checksum for each record and log entry for data integrity - -@features_1085_li -Well tested (high code coverage, randomized stress tests) - -@features_1086_h2 -Comparison to Other Database Engines - -@features_1087_p - This comparison is based on H2 1.3, Apache Derby version 10.8, HSQLDB 2.2, MySQL 5.5, PostgreSQL 9.0. - -@features_1088_th -Feature - -@features_1089_th -H2 - -@features_1090_th -Derby - -@features_1091_th -HSQLDB - -@features_1092_th -MySQL - -@features_1093_th -PostgreSQL - -@features_1094_td -Pure Java - -@features_1095_td -Yes - -@features_1096_td -Yes - -@features_1097_td -Yes - -@features_1098_td -No - -@features_1099_td -No - -@features_1100_td -Embedded Mode (Java) - -@features_1101_td -Yes - -@features_1102_td -Yes - -@features_1103_td -Yes - -@features_1104_td -No - -@features_1105_td -No - -@features_1106_td -In-Memory Mode - -@features_1107_td -Yes - -@features_1108_td -Yes - -@features_1109_td -Yes - -@features_1110_td -No - -@features_1111_td -No - -@features_1112_td -Explain Plan - -@features_1113_td -Yes - -@features_1114_td -Yes *12 - -@features_1115_td -Yes - -@features_1116_td -Yes - -@features_1117_td -Yes - -@features_1118_td -Built-in Clustering / Replication - -@features_1119_td -Yes - -@features_1120_td -Yes - -@features_1121_td -No - -@features_1122_td -Yes - -@features_1123_td -Yes - -@features_1124_td -Encrypted Database - -@features_1125_td -Yes - -@features_1126_td -Yes *10 - -@features_1127_td -Yes *10 - -@features_1128_td -No - -@features_1129_td -No - -@features_1130_td -Linked Tables - -@features_1131_td -Yes - -@features_1132_td -No - -@features_1133_td -Partially *1 - -@features_1134_td -Partially *2 - -@features_1135_td -Yes - -@features_1136_td -ODBC Driver - -@features_1137_td -Yes - -@features_1138_td -No - -@features_1139_td -No - -@features_1140_td -Yes - -@features_1141_td -Yes - -@features_1142_td -Fulltext Search - -@features_1143_td -Yes - -@features_1144_td -Yes - -@features_1145_td -No - -@features_1146_td -Yes - -@features_1147_td -Yes - -@features_1148_td -Domains (User-Defined Types) - -@features_1149_td -Yes - -@features_1150_td -No - -@features_1151_td -Yes - -@features_1152_td -Yes - -@features_1153_td -Yes - -@features_1154_td -Files per Database - -@features_1155_td -Few - -@features_1156_td -Many - -@features_1157_td -Few - -@features_1158_td -Many - -@features_1159_td -Many - -@features_1160_td -Row Level Locking - -@features_1161_td -Yes *9 - -@features_1162_td -Yes - -@features_1163_td -Yes *9 - -@features_1164_td -Yes - -@features_1165_td -Yes - -@features_1166_td -Multi Version Concurrency - -@features_1167_td -Yes - -@features_1168_td -No - -@features_1169_td -Yes - -@features_1170_td -Yes - -@features_1171_td -Yes - -@features_1172_td -Multi-Threaded Processing - -@features_1173_td -No *11 - -@features_1174_td -Yes - -@features_1175_td -Yes - -@features_1176_td -Yes - -@features_1177_td -Yes - -@features_1178_td -Role Based Security - -@features_1179_td -Yes - -@features_1180_td -Yes *3 - -@features_1181_td -Yes - -@features_1182_td -Yes - -@features_1183_td -Yes - -@features_1184_td -Updatable Result Sets - -@features_1185_td -Yes - -@features_1186_td -Yes *7 - -@features_1187_td -Yes - -@features_1188_td -Yes - -@features_1189_td -Yes - -@features_1190_td -Sequences - -@features_1191_td -Yes - -@features_1192_td -Yes - -@features_1193_td -Yes - -@features_1194_td -No - -@features_1195_td -Yes - -@features_1196_td -Limit and Offset - -@features_1197_td -Yes - -@features_1198_td -Yes *13 - -@features_1199_td -Yes - -@features_1200_td -Yes - -@features_1201_td -Yes - -@features_1202_td -Window Functions - -@features_1203_td -No *15 - -@features_1204_td -No *15 - -@features_1205_td -No - -@features_1206_td -No - -@features_1207_td -Yes - -@features_1208_td -Temporary Tables - -@features_1209_td -Yes - -@features_1210_td -Yes *4 - -@features_1211_td -Yes - -@features_1212_td -Yes - -@features_1213_td -Yes - -@features_1214_td -Information Schema - -@features_1215_td -Yes - -@features_1216_td -No *8 - -@features_1217_td -Yes - -@features_1218_td -Yes - -@features_1219_td -Yes - -@features_1220_td -Computed Columns - -@features_1221_td -Yes - -@features_1222_td -Yes - -@features_1223_td -Yes - -@features_1224_td -Yes - -@features_1225_td -Yes *6 - -@features_1226_td -Case Insensitive Columns - -@features_1227_td -Yes - -@features_1228_td -Yes *14 - -@features_1229_td -Yes - -@features_1230_td -Yes - -@features_1231_td -Yes *6 - -@features_1232_td -Custom Aggregate Functions - -@features_1233_td -Yes - -@features_1234_td -No - -@features_1235_td -Yes - -@features_1236_td -No - -@features_1237_td -Yes - -@features_1238_td -CLOB/BLOB Compression - -@features_1239_td -Yes - -@features_1240_td -No - -@features_1241_td -No - -@features_1242_td -No - -@features_1243_td -Yes - -@features_1244_td -Footprint (jar/dll size) - -@features_1245_td -~1.5 MB *5 - -@features_1246_td -~3 MB - -@features_1247_td -~1.5 MB - -@features_1248_td -~4 MB - -@features_1249_td -~6 MB - -@features_1250_p - *1 HSQLDB supports text tables. - -@features_1251_p - *2 MySQL supports linked MySQL tables under the name 'federated tables'. - -@features_1252_p - *3 Derby support for roles based security and password checking as an option. - -@features_1253_p - *4 Derby only supports global temporary tables. - -@features_1254_p - *5 The default H2 jar file contains debug information, jar files for other databases do not. - -@features_1255_p - *6 PostgreSQL supports functional indexes. - -@features_1256_p - *7 Derby only supports updatable result sets if the query is not sorted. - -@features_1257_p - *8 Derby doesn't support standard compliant information schema tables. - -@features_1258_p - *9 When using MVCC (multi version concurrency). - -@features_1259_p - *10 Derby and HSQLDB don't hide data patterns well. - -@features_1260_p - *11 The MULTI_THREADED option is not enabled by default, and with version 1.3.x not supported when using MVCC. - -@features_1261_p - *12 Derby doesn't support the EXPLAIN statement, but it supports runtime statistics and retrieving statement execution plans. - -@features_1262_p - *13 Derby doesn't support the syntax LIMIT .. [OFFSET ..], however it supports FETCH FIRST .. ROW[S] ONLY. - -@features_1263_p - *14 Using collations. *15 Derby and H2 support ROW_NUMBER() OVER(). - -@features_1264_h3 -DaffodilDb and One$Db - -@features_1265_p - It looks like the development of this database has stopped. The last release was February 2006. - -@features_1266_h3 -McKoi - -@features_1267_p - It looks like the development of this database has stopped. The last release was August 2004. - -@features_1268_h2 -H2 in Use - -@features_1269_p - For a list of applications that work with or use H2, see: Links. - -@features_1270_h2 -Connection Modes - -@features_1271_p - The following connection modes are supported: - -@features_1272_li -Embedded mode (local connections using JDBC) - -@features_1273_li -Server mode (remote connections using JDBC or ODBC over TCP/IP) - -@features_1274_li -Mixed mode (local and remote connections at the same time) - -@features_1275_h3 -Embedded Mode - -@features_1276_p - In embedded mode, an application opens a database from within the same JVM using JDBC. This is the fastest and easiest connection mode. The disadvantage is that a database may only be open in one virtual machine (and class loader) at any time. As in all modes, both persistent and in-memory databases are supported. There is no limit on the number of database open concurrently, or on the number of open connections. - -@features_1277_h3 -Server Mode - -@features_1278_p - When using the server mode (sometimes called remote mode or client/server mode), an application opens a database remotely using the JDBC or ODBC API. A server needs to be started within the same or another virtual machine, or on another computer. Many applications can connect to the same database at the same time, by connecting to this server. Internally, the server process opens the database(s) in embedded mode. - -@features_1279_p - The server mode is slower than the embedded mode, because all data is transferred over TCP/IP. As in all modes, both persistent and in-memory databases are supported. There is no limit on the number of database open concurrently per server, or on the number of open connections. - -@features_1280_h3 -Mixed Mode - -@features_1281_p - The mixed mode is a combination of the embedded and the server mode. The first application that connects to a database does that in embedded mode, but also starts a server so that other applications (running in different processes or virtual machines) can concurrently access the same data. The local connections are as fast as if the database is used in just the embedded mode, while the remote connections are a bit slower. - -@features_1282_p - The server can be started and stopped from within the application (using the server API), or automatically (automatic mixed mode). When using the automatic mixed mode, all clients that want to connect to the database (no matter if it's an local or remote connection) can do so using the exact same database URL. - -@features_1283_h2 -Database URL Overview - -@features_1284_p - This database supports multiple connection modes and connection settings. This is achieved using different database URLs. Settings in the URLs are not case sensitive. - -@features_1285_th -Topic - -@features_1286_th -URL Format and Examples - -@features_1287_a -Embedded (local) connection - -@features_1288_td - jdbc:h2:[file:][<path>]<databaseName> - -@features_1289_td - jdbc:h2:~/test - -@features_1290_td - jdbc:h2:file:/data/sample - -@features_1291_td - jdbc:h2:file:C:/data/sample (Windows only) - -@features_1292_a -In-memory (private) - -@features_1293_td -jdbc:h2:mem: - -@features_1294_a -In-memory (named) - -@features_1295_td - jdbc:h2:mem:<databaseName> - -@features_1296_td - jdbc:h2:mem:test_mem - -@features_1297_a -Server mode (remote connections) - -@features_1298_a - using TCP/IP - -@features_1299_td - jdbc:h2:tcp://<server>[:<port>]/[<path>]<databaseName> - -@features_1300_td - jdbc:h2:tcp://localhost/~/test - -@features_1301_td - jdbc:h2:tcp://dbserv:8084/~/sample - -@features_1302_td - jdbc:h2:tcp://localhost/mem:test - -@features_1303_a -Server mode (remote connections) - -@features_1304_a - using TLS - -@features_1305_td - jdbc:h2:ssl://<server>[:<port>]/<databaseName> - -@features_1306_td - jdbc:h2:ssl://localhost:8085/~/sample; - -@features_1307_a -Using encrypted files - -@features_1308_td - jdbc:h2:<url>;CIPHER=AES - -@features_1309_td - jdbc:h2:ssl://localhost/~/test;CIPHER=AES - -@features_1310_td - jdbc:h2:file:~/secure;CIPHER=AES - -@features_1311_a -File locking methods - -@features_1312_td - jdbc:h2:<url>;FILE_LOCK={FILE|SOCKET|NO} - -@features_1313_td - jdbc:h2:file:~/private;CIPHER=AES;FILE_LOCK=SOCKET - -@features_1314_a -Only open if it already exists - -@features_1315_td - jdbc:h2:<url>;IFEXISTS=TRUE - -@features_1316_td - jdbc:h2:file:~/sample;IFEXISTS=TRUE - -@features_1317_a -Don't close the database when the VM exits - -@features_1318_td - jdbc:h2:<url>;DB_CLOSE_ON_EXIT=FALSE - -@features_1319_a -Execute SQL on connection - -@features_1320_td - jdbc:h2:<url>;INIT=RUNSCRIPT FROM '~/create.sql' - -@features_1321_td - jdbc:h2:file:~/sample;INIT=RUNSCRIPT FROM '~/create.sql'\;RUNSCRIPT FROM '~/populate.sql' - -@features_1322_a -User name and/or password - -@features_1323_td - jdbc:h2:<url>[;USER=<username>][;PASSWORD=<value>] - -@features_1324_td - jdbc:h2:file:~/sample;USER=sa;PASSWORD=123 - -@features_1325_a -Debug trace settings - -@features_1326_td - jdbc:h2:<url>;TRACE_LEVEL_FILE=<level 0..3> - -@features_1327_td - jdbc:h2:file:~/sample;TRACE_LEVEL_FILE=3 - -@features_1328_a -Ignore unknown settings - -@features_1329_td - jdbc:h2:<url>;IGNORE_UNKNOWN_SETTINGS=TRUE - -@features_1330_a -Custom file access mode - -@features_1331_td - jdbc:h2:<url>;ACCESS_MODE_DATA=rws - -@features_1332_a -Database in a zip file - -@features_1333_td - jdbc:h2:zip:<zipFileName>!/<databaseName> - -@features_1334_td - jdbc:h2:zip:~/db.zip!/test - -@features_1335_a -Compatibility mode - -@features_1336_td - jdbc:h2:<url>;MODE=<databaseType> - -@features_1337_td - jdbc:h2:~/test;MODE=MYSQL - -@features_1338_a -Auto-reconnect - -@features_1339_td - jdbc:h2:<url>;AUTO_RECONNECT=TRUE - -@features_1340_td - jdbc:h2:tcp://localhost/~/test;AUTO_RECONNECT=TRUE - -@features_1341_a -Automatic mixed mode - -@features_1342_td - jdbc:h2:<url>;AUTO_SERVER=TRUE - -@features_1343_td - jdbc:h2:~/test;AUTO_SERVER=TRUE - -@features_1344_a -Page size - -@features_1345_td - jdbc:h2:<url>;PAGE_SIZE=512 - -@features_1346_a -Changing other settings - -@features_1347_td - jdbc:h2:<url>;<setting>=<value>[;<setting>=<value>...] - -@features_1348_td - jdbc:h2:file:~/sample;TRACE_LEVEL_SYSTEM_OUT=3 - -@features_1349_h2 -Connecting to an Embedded (Local) Database - -@features_1350_p - The database URL for connecting to a local database is jdbc:h2:[file:][<path>]<databaseName>. The prefix file: is optional. If no or only a relative path is used, then the current working directory is used as a starting point. The case sensitivity of the path and database name depend on the operating system, however it is recommended to use lowercase letters only. The database name must be at least three characters long (a limitation of File.createTempFile). The database name must not contain a semicolon. To point to the user home directory, use ~/, as in: jdbc:h2:~/test. - -@features_1351_h2 -In-Memory Databases - -@features_1352_p - For certain use cases (for example: rapid prototyping, testing, high performance operations, read-only databases), it may not be required to persist data, or persist changes to the data. This database supports the in-memory mode, where the data is not persisted. - -@features_1353_p - In some cases, only one connection to a in-memory database is required. This means the database to be opened is private. In this case, the database URL is jdbc:h2:mem: Opening two connections within the same virtual machine means opening two different (private) databases. - -@features_1354_p - Sometimes multiple connections to the same in-memory database are required. In this case, the database URL must include a name. Example: jdbc:h2:mem:db1. Accessing the same database using this URL only works within the same virtual machine and class loader environment. - -@features_1355_p - To access an in-memory database from another process or from another computer, you need to start a TCP server in the same process as the in-memory database was created. The other processes then need to access the database over TCP/IP or TLS, using a database URL such as: jdbc:h2:tcp://localhost/mem:db1. - -@features_1356_p - By default, closing the last connection to a database closes the database. For an in-memory database, this means the content is lost. To keep the database open, add ;DB_CLOSE_DELAY=-1 to the database URL. To keep the content of an in-memory database as long as the virtual machine is alive, use jdbc:h2:mem:test;DB_CLOSE_DELAY=-1. - -@features_1357_h2 -Database Files Encryption - -@features_1358_p - The database files can be encrypted. Three encryption algorithms are supported: - -@features_1359_li -"AES" - also known as Rijndael, only AES-128 is implemented. - -@features_1360_li -"XTEA" - the 32 round version. - -@features_1361_li -"FOG" - pseudo-encryption only useful for hiding data from a text editor. - -@features_1362_p - To use file encryption, you need to specify the encryption algorithm (the 'cipher') and the file password (in addition to the user password) when connecting to the database. - -@features_1363_h3 -Creating a New Database with File Encryption - -@features_1364_p - By default, a new database is automatically created if it does not exist yet. To create an encrypted database, connect to it as it would already exist. - -@features_1365_h3 -Connecting to an Encrypted Database - -@features_1366_p - The encryption algorithm is set in the database URL, and the file password is specified in the password field, before the user password. A single space separates the file password and the user password; the file password itself may not contain spaces. File passwords and user passwords are case sensitive. Here is an example to connect to a password-encrypted database: - -@features_1367_h3 -Encrypting or Decrypting a Database - -@features_1368_p - To encrypt an existing database, use the ChangeFileEncryption tool. This tool can also decrypt an encrypted database, or change the file encryption key. The tool is available from within the H2 Console in the tools section, or you can run it from the command line. The following command line will encrypt the database test in the user home directory with the file password filepwd and the encryption algorithm AES: - -@features_1369_h2 -Database File Locking - -@features_1370_p - Whenever a database is opened, a lock file is created to signal other processes that the database is in use. If database is closed, or if the process that opened the database terminates, this lock file is deleted. - -@features_1371_p - The following file locking methods are implemented: - -@features_1372_li -The default method is FILE and uses a watchdog thread to protect the database file. The watchdog reads the lock file each second. - -@features_1373_li -The second method is SOCKET and opens a server socket. The socket method does not require reading the lock file every second. The socket method should only be used if the database files are only accessed by one (and always the same) computer. - -@features_1374_li -The third method is FS. This will use native file locking using FileChannel.lock. - -@features_1375_li -It is also possible to open the database without file locking; in this case it is up to the application to protect the database files. Failing to do so will result in a corrupted database. Using the method NO forces the database to not create a lock file at all. Please note that this is unsafe as another process is able to open the same database, possibly leading to data corruption. - -@features_1376_p - To open the database with a different file locking method, use the parameter FILE_LOCK. The following code opens the database with the 'socket' locking method: - -@features_1377_p - For more information about the algorithms, see Advanced / File Locking Protocols. - -@features_1378_h2 -Opening a Database Only if it Already Exists - -@features_1379_p - By default, when an application calls DriverManager.getConnection(url, ...) and the database specified in the URL does not yet exist, a new (empty) database is created. In some situations, it is better to restrict creating new databases, and only allow to open existing databases. To do this, add ;IFEXISTS=TRUE to the database URL. In this case, if the database does not already exist, an exception is thrown when trying to connect. The connection only succeeds when the database already exists. The complete URL may look like this: - -@features_1380_h2 -Closing a Database - -@features_1381_h3 -Delayed Database Closing - -@features_1382_p - Usually, a database is closed when the last connection to it is closed. In some situations this slows down the application, for example when it is not possible to keep at least one connection open. The automatic closing of a database can be delayed or disabled with the SQL statement SET DB_CLOSE_DELAY <seconds>. The parameter <seconds> specifies the number of seconds to keep a database open after the last connection to it was closed. The following statement will keep a database open for 10 seconds after the last connection was closed: - -@features_1383_p - The value -1 means the database is not closed automatically. The value 0 is the default and means the database is closed when the last connection is closed. This setting is persistent and can be set by an administrator only. It is possible to set the value in the database URL: jdbc:h2:~/test;DB_CLOSE_DELAY=10. - -@features_1384_h3 -Don't Close a Database when the VM Exits - -@features_1385_p - By default, a database is closed when the last connection is closed. However, if it is never closed, the database is closed when the virtual machine exits normally, using a shutdown hook. In some situations, the database should not be closed in this case, for example because the database is still used at virtual machine shutdown (to store the shutdown process in the database for example). For those cases, the automatic closing of the database can be disabled in the database URL. The first connection (the one that is opening the database) needs to set the option in the database URL (it is not possible to change the setting afterwards). The database URL to disable database closing on exit is: - -@features_1386_h2 -Execute SQL on Connection - -@features_1387_p - Sometimes, particularly for in-memory databases, it is useful to be able to execute DDL or DML commands automatically when a client connects to a database. This functionality is enabled via the INIT property. Note that multiple commands may be passed to INIT, but the semicolon delimiter must be escaped, as in the example below. - -@features_1388_p - Please note the double backslash is only required in a Java or properties file. In a GUI, or in an XML file, only one backslash is required: - -@features_1389_p - Backslashes within the init script (for example within a runscript statement, to specify the folder names in Windows) need to be escaped as well (using a second backslash). It might be simpler to avoid backslashes in folder names for this reason; use forward slashes instead. - -@features_1390_h2 -Ignore Unknown Settings - -@features_1391_p - Some applications (for example OpenOffice.org Base) pass some additional parameters when connecting to the database. Why those parameters are passed is unknown. The parameters PREFERDOSLIKELINEENDS and IGNOREDRIVERPRIVILEGES are such examples; they are simply ignored to improve the compatibility with OpenOffice.org. If an application passes other parameters when connecting to the database, usually the database throws an exception saying the parameter is not supported. It is possible to ignored such parameters by adding ;IGNORE_UNKNOWN_SETTINGS=TRUE to the database URL. - -@features_1392_h2 -Changing Other Settings when Opening a Connection - -@features_1393_p - In addition to the settings already described, other database settings can be passed in the database URL. Adding ;setting=value at the end of a database URL is the same as executing the statement SET setting value just after connecting. For a list of supported settings, see SQL Grammar or the DbSettings javadoc. - -@features_1394_h2 -Custom File Access Mode - -@features_1395_p - Usually, the database opens the database file with the access mode rw, meaning read-write (except for read only databases, where the mode r is used). To open a database in read-only mode if the database file is not read-only, use ACCESS_MODE_DATA=r. Also supported are rws and rwd. This setting must be specified in the database URL: - -@features_1396_p - For more information see Durability Problems. On many operating systems the access mode rws does not guarantee that the data is written to the disk. - -@features_1397_h2 -Multiple Connections - -@features_1398_h3 -Opening Multiple Databases at the Same Time - -@features_1399_p - An application can open multiple databases at the same time, including multiple connections to the same database. The number of open database is only limited by the memory available. - -@features_1400_h3 -Multiple Connections to the Same Database: Client/Server - -@features_1401_p - If you want to access the same database at the same time from different processes or computers, you need to use the client / server mode. In this case, one process acts as the server, and the other processes (that could reside on other computers as well) connect to the server via TCP/IP (or TLS over TCP/IP for improved security). - -@features_1402_h3 -Multithreading Support - -@features_1403_p - This database is multithreading-safe. If an application is multi-threaded, it does not need to worry about synchronizing access to the database. An application should normally use one connection per thread. This database synchronizes access to the same connection, but other databases may not do this. To get higher concurrency, you need to use multiple connections. - -@features_1404_p - By default, requests to the same database are synchronized. That means an application can use multiple threads that access the same database at the same time, however if one thread executes a long running query, the other threads need to wait. To enable concurrent database usage, see the setting MULTI_THREADED. - -@features_1405_h3 -Locking, Lock-Timeout, Deadlocks - -@features_1406_p - Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. In this case, table level locking is not used. If multi-version concurrency is not used, the database uses table level locks to give each connection a consistent state of the data. There are two kinds of locks: read locks (shared locks) and write locks (exclusive locks). All locks are released when the transaction commits or rolls back. When using the default transaction isolation level 'read committed', read locks are already released after each statement. - -@features_1407_p - If a connection wants to reads from a table, and there is no write lock on the table, then a read lock is added to the table. If there is a write lock, then this connection waits for the other connection to release the lock. If a connection cannot get a lock for a specified time, then a lock timeout exception is thrown. - -@features_1408_p - Usually, SELECT statements will generate read locks. This includes subqueries. Statements that modify data use write locks. It is also possible to lock a table exclusively without modifying data, using the statement SELECT ... FOR UPDATE. The statements COMMIT and ROLLBACK releases all open locks. The commands SAVEPOINT and ROLLBACK TO SAVEPOINT don't affect locks. The locks are also released when the autocommit mode changes, and for connections with autocommit set to true (this is the default), locks are released after each statement. The following statements generate locks: - -@features_1409_th -Type of Lock - -@features_1410_th -SQL Statement - -@features_1411_td -Read - -@features_1412_td -SELECT * FROM TEST; - -@features_1413_td - CALL SELECT MAX(ID) FROM TEST; - -@features_1414_td - SCRIPT; - -@features_1415_td -Write - -@features_1416_td -SELECT * FROM TEST WHERE 1=0 FOR UPDATE; - -@features_1417_td -Write - -@features_1418_td -INSERT INTO TEST VALUES(1, 'Hello'); - -@features_1419_td - INSERT INTO TEST SELECT * FROM TEST; - -@features_1420_td - UPDATE TEST SET NAME='Hi'; - -@features_1421_td - DELETE FROM TEST; - -@features_1422_td -Write - -@features_1423_td -ALTER TABLE TEST ...; - -@features_1424_td - CREATE INDEX ... ON TEST ...; - -@features_1425_td - DROP INDEX ...; - -@features_1426_p - The number of seconds until a lock timeout exception is thrown can be set separately for each connection using the SQL command SET LOCK_TIMEOUT <milliseconds>. The initial lock timeout (that is the timeout used for new connections) can be set using the SQL command SET DEFAULT_LOCK_TIMEOUT <milliseconds>. The default lock timeout is persistent. - -@features_1427_h3 -Avoiding Deadlocks - -@features_1428_p - To avoid deadlocks, ensure that all transactions lock the tables in the same order (for example in alphabetical order), and avoid upgrading read locks to write locks. Both can be achieved using explicitly locking tables using SELECT ... FOR UPDATE. - -@features_1429_h2 -Database File Layout - -@features_1430_p - The following files are created for persistent databases: - -@features_1431_th -File Name - -@features_1432_th -Description - -@features_1433_th -Number of Files - -@features_1434_td - test.h2.db - -@features_1435_td - Database file. - -@features_1436_td - Contains the transaction log, indexes, and data for all tables. - -@features_1437_td - Format: <database>.h2.db - -@features_1438_td - 1 per database - -@features_1439_td - test.lock.db - -@features_1440_td - Database lock file. - -@features_1441_td - Automatically (re-)created while the database is in use. - -@features_1442_td - Format: <database>.lock.db - -@features_1443_td - 1 per database (only if in use) - -@features_1444_td - test.trace.db - -@features_1445_td - Trace file (if the trace option is enabled). - -@features_1446_td - Contains trace information. - -@features_1447_td - Format: <database>.trace.db - -@features_1448_td - Renamed to <database>.trace.db.old is too big. - -@features_1449_td - 0 or 1 per database - -@features_1450_td - test.lobs.db/* - -@features_1451_td - Directory containing one file for each - -@features_1452_td - BLOB or CLOB value larger than a certain size. - -@features_1453_td - Format: <id>.t<tableId>.lob.db - -@features_1454_td - 1 per large object - -@features_1455_td - test.123.temp.db - -@features_1456_td - Temporary file. - -@features_1457_td - Contains a temporary blob or a large result set. - -@features_1458_td - Format: <database>.<id>.temp.db - -@features_1459_td - 1 per object - -@features_1460_h3 -Moving and Renaming Database Files - -@features_1461_p - Database name and location are not stored inside the database files. - -@features_1462_p - While a database is closed, the files can be moved to another directory, and they can be renamed as well (as long as all files of the same database start with the same name and the respective extensions are unchanged). - -@features_1463_p - As there is no platform specific data in the files, they can be moved to other operating systems without problems. - -@features_1464_h3 -Backup - -@features_1465_p - When the database is closed, it is possible to backup the database files. - -@features_1466_p - To backup data while the database is running, the SQL commands SCRIPT and BACKUP can be used. - -@features_1467_h2 -Logging and Recovery - -@features_1468_p - Whenever data is modified in the database and those changes are committed, the changes are written to the transaction log (except for in-memory objects). The changes to the main data area itself are usually written later on, to optimize disk access. If there is a power failure, the main data area is not up-to-date, but because the changes are in the transaction log, the next time the database is opened, the changes are re-applied automatically. - -@features_1469_h2 -Compatibility - -@features_1470_p - All database engines behave a little bit different. Where possible, H2 supports the ANSI SQL standard, and tries to be compatible to other databases. There are still a few differences however: - -@features_1471_p - In MySQL text columns are case insensitive by default, while in H2 they are case sensitive. However H2 supports case insensitive columns as well. To create the tables with case insensitive texts, append IGNORECASE=TRUE to the database URL (example: jdbc:h2:~/test;IGNORECASE=TRUE). - -@features_1472_h3 -Compatibility Modes - -@features_1473_p - For certain features, this database can emulate the behavior of specific databases. However, only a small subset of the differences between databases are implemented in this way. Here is the list of currently supported modes and the differences to the regular mode: - -@features_1474_h3 -DB2 Compatibility Mode - -@features_1475_p - To use the IBM DB2 mode, use the database URL jdbc:h2:~/test;MODE=DB2 or the SQL statement SET MODE DB2. - -@features_1476_li -For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1477_li -Support for the syntax [OFFSET .. ROW] [FETCH ... ONLY] as an alternative for LIMIT .. OFFSET. - -@features_1478_li -Concatenating NULL with another value results in the other value. - -@features_1479_li -Support the pseudo-table SYSIBM.SYSDUMMY1. - -@features_1480_h3 -Derby Compatibility Mode - -@features_1481_p - To use the Apache Derby mode, use the database URL jdbc:h2:~/test;MODE=Derby or the SQL statement SET MODE Derby. - -@features_1482_li -For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1483_li -For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. - -@features_1484_li -Concatenating NULL with another value results in the other value. - -@features_1485_li -Support the pseudo-table SYSIBM.SYSDUMMY1. - -@features_1486_h3 -HSQLDB Compatibility Mode - -@features_1487_p - To use the HSQLDB mode, use the database URL jdbc:h2:~/test;MODE=HSQLDB or the SQL statement SET MODE HSQLDB. - -@features_1488_li -For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1489_li -When converting the scale of decimal data, the number is only converted if the new scale is smaller than the current scale. Usually, the scale is converted and 0s are added if required. - -@features_1490_li -For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. - -@features_1491_li -Text can be concatenated using '+'. - -@features_1492_h3 -MS SQL Server Compatibility Mode - -@features_1493_p - To use the MS SQL Server mode, use the database URL jdbc:h2:~/test;MODE=MSSQLServer or the SQL statement SET MODE MSSQLServer. - -@features_1494_li -For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1495_li -Identifiers may be quoted using square brackets as in [Test]. - -@features_1496_li -For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. - -@features_1497_li -Concatenating NULL with another value results in the other value. - -@features_1498_li -Text can be concatenated using '+'. - -@features_1499_h3 -MySQL Compatibility Mode - -@features_1500_p - To use the MySQL mode, use the database URL jdbc:h2:~/test;MODE=MySQL or the SQL statement SET MODE MySQL. - -@features_1501_li -When inserting data, if a column is defined to be NOT NULL and NULL is inserted, then a 0 (or empty string, or the current timestamp for timestamp columns) value is used. Usually, this operation is not allowed and an exception is thrown. - -@features_1502_li -Creating indexes in the CREATE TABLE statement is allowed using INDEX(..) or KEY(..). Example: create table test(id int primary key, name varchar(255), key idx_name(name)); - -@features_1503_li -Meta data calls return identifiers in lower case. - -@features_1504_li -When converting a floating point number to an integer, the fractional digits are not truncated, but the value is rounded. - -@features_1505_li -Concatenating NULL with another value results in the other value. - -@features_1506_p - Text comparison in MySQL is case insensitive by default, while in H2 it is case sensitive (as in most other databases). H2 does support case insensitive text comparison, but it needs to be set separately, using SET IGNORECASE TRUE. This affects comparison using =, LIKE, REGEXP. - -@features_1507_h3 -Oracle Compatibility Mode - -@features_1508_p - To use the Oracle mode, use the database URL jdbc:h2:~/test;MODE=Oracle or the SQL statement SET MODE Oracle. - -@features_1509_li -For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1510_li -When using unique indexes, multiple rows with NULL in all columns are allowed, however it is not allowed to have multiple rows with the same values otherwise. - -@features_1511_li -Concatenating NULL with another value results in the other value. - -@features_1512_li -Empty strings are treated like NULL values. - -@features_1513_h3 -PostgreSQL Compatibility Mode - -@features_1514_p - To use the PostgreSQL mode, use the database URL jdbc:h2:~/test;MODE=PostgreSQL or the SQL statement SET MODE PostgreSQL. - -@features_1515_li -For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1516_li -When converting a floating point number to an integer, the fractional digits are not be truncated, but the value is rounded. - -@features_1517_li -The system columns CTID and OID are supported. - -@features_1518_li -LOG(x) is base 10 in this mode. - -@features_1519_h2 -Auto-Reconnect - -@features_1520_p - The auto-reconnect feature causes the JDBC driver to reconnect to the database if the connection is lost. The automatic re-connect only occurs when auto-commit is enabled; if auto-commit is disabled, an exception is thrown. To enable this mode, append ;AUTO_RECONNECT=TRUE to the database URL. - -@features_1521_p - Re-connecting will open a new session. After an automatic re-connect, variables and local temporary tables definitions (excluding data) are re-created. The contents of the system table INFORMATION_SCHEMA.SESSION_STATE contains all client side state that is re-created. - -@features_1522_p - If another connection uses the database in exclusive mode (enabled using SET EXCLUSIVE 1 or SET EXCLUSIVE 2), then this connection will try to re-connect until the exclusive mode ends. - -@features_1523_h2 -Automatic Mixed Mode - -@features_1524_p - Multiple processes can access the same database without having to start the server manually. To do that, append ;AUTO_SERVER=TRUE to the database URL. You can use the same database URL independent of whether the database is already open or not. This feature doesn't work with in-memory databases. Example database URL: - -@features_1525_p - Use the same URL for all connections to this database. Internally, when using this mode, the first connection to the database is made in embedded mode, and additionally a server is started internally (as a daemon thread). If the database is already open in another process, the server mode is used automatically. The IP address and port of the server are stored in the file .lock.db, that's why in-memory databases can't be supported. - -@features_1526_p - The application that opens the first connection to the database uses the embedded mode, which is faster than the server mode. Therefore the main application should open the database first if possible. The first connection automatically starts a server on a random port. This server allows remote connections, however only to this database (to ensure that, the client reads .lock.db file and sends the the random key that is stored there to the server). When the first connection is closed, the server stops. If other (remote) connections are still open, one of them will then start a server (auto-reconnect is enabled automatically). - -@features_1527_p - All processes need to have access to the database files. If the first connection is closed (the connection that started the server), open transactions of other connections will be rolled back (this may not be a problem if you don't disable autocommit). Explicit client/server connections (using jdbc:h2:tcp:// or ssl://) are not supported. This mode is not supported for in-memory databases. - -@features_1528_p - Here is an example how to use this mode. Application 1 and 2 are not necessarily started on the same computer, but they need to have access to the database files. Application 1 and 2 are typically two different processes (however they could run within the same process). - -@features_1529_p - When using this feature, by default the server uses any free TCP port. The port can be set manually using AUTO_SERVER_PORT=9090. - -@features_1530_h2 -Page Size - -@features_1531_p - The page size for new databases is 2 KB (2048), unless the page size is set explicitly in the database URL using PAGE_SIZE= when the database is created. The page size of existing databases can not be changed, so this property needs to be set when the database is created. - -@features_1532_h2 -Using the Trace Options - -@features_1533_p - To find problems in an application, it is sometimes good to see what database operations where executed. This database offers the following trace features: - -@features_1534_li -Trace to System.out and/or to a file - -@features_1535_li -Support for trace levels OFF, ERROR, INFO, DEBUG - -@features_1536_li -The maximum size of the trace file can be set - -@features_1537_li -It is possible to generate Java source code from the trace file - -@features_1538_li -Trace can be enabled at runtime by manually creating a file - -@features_1539_h3 -Trace Options - -@features_1540_p - The simplest way to enable the trace option is setting it in the database URL. There are two settings, one for System.out (TRACE_LEVEL_SYSTEM_OUT) tracing, and one for file tracing (TRACE_LEVEL_FILE). The trace levels are 0 for OFF, 1 for ERROR (the default), 2 for INFO, and 3 for DEBUG. A database URL with both levels set to DEBUG is: - -@features_1541_p - The trace level can be changed at runtime by executing the SQL command SET TRACE_LEVEL_SYSTEM_OUT level (for System.out tracing) or SET TRACE_LEVEL_FILE level (for file tracing). Example: - -@features_1542_h3 -Setting the Maximum Size of the Trace File - -@features_1543_p - When using a high trace level, the trace file can get very big quickly. The default size limit is 16 MB, if the trace file exceeds this limit, it is renamed to .old and a new file is created. If another such file exists, it is deleted. To limit the size to a certain number of megabytes, use SET TRACE_MAX_FILE_SIZE mb. Example: - -@features_1544_h3 -Java Code Generation - -@features_1545_p - When setting the trace level to INFO or DEBUG, Java source code is generated as well. This simplifies reproducing problems. The trace file looks like this: - -@features_1546_p - To filter the Java source code, use the ConvertTraceFile tool as follows: - -@features_1547_p - The generated file Test.java will contain the Java source code. The generated source code may be too large to compile (the size of a Java method is limited). If this is the case, the source code needs to be split in multiple methods. The password is not listed in the trace file and therefore not included in the source code. - -@features_1548_h2 -Using Other Logging APIs - -@features_1549_p - By default, this database uses its own native 'trace' facility. This facility is called 'trace' and not 'log' within this database to avoid confusion with the transaction log. Trace messages can be written to both file and System.out. In most cases, this is sufficient, however sometimes it is better to use the same facility as the application, for example Log4j. To do that, this database support SLF4J. - -@features_1550_a -SLF4J - -@features_1551_p - is a simple facade for various logging APIs and allows to plug in the desired implementation at deployment time. SLF4J supports implementations such as Logback, Log4j, Jakarta Commons Logging (JCL), Java logging, x4juli, and Simple Log. - -@features_1552_p - To enable SLF4J, set the file trace level to 4 in the database URL: - -@features_1553_p - Changing the log mechanism is not possible after the database is open, that means executing the SQL statement SET TRACE_LEVEL_FILE 4 when the database is already open will not have the desired effect. To use SLF4J, all required jar files need to be in the classpath. The logger name is h2database. If it does not work, check the file <database>.trace.db for error messages. - -@features_1554_h2 -Read Only Databases - -@features_1555_p - If the database files are read-only, then the database is read-only as well. It is not possible to create new tables, add or modify data in this database. Only SELECT and CALL statements are allowed. To create a read-only database, close the database. Then, make the database file read-only. When you open the database now, it is read-only. There are two ways an application can find out whether database is read-only: by calling Connection.isReadOnly() or by executing the SQL statement CALL READONLY(). - -@features_1556_p - Using the Custom Access Mode r the database can also be opened in read-only mode, even if the database file is not read only. - -@features_1557_h2 -Read Only Databases in Zip or Jar File - -@features_1558_p - To create a read-only database in a zip file, first create a regular persistent database, and then create a backup. The database must not have pending changes, that means you need to close all connections to the database first. To speed up opening the read-only database and running queries, the database should be closed using SHUTDOWN DEFRAG. If you are using a database named test, an easy way to create a zip file is using the Backup tool. You can start the tool from the command line, or from within the H2 Console (Tools - Backup). Please note that the database must be closed when the backup is created. Therefore, the SQL statement BACKUP TO can not be used. - -@features_1559_p - When the zip file is created, you can open the database in the zip file using the following database URL: - -@features_1560_p - Databases in zip files are read-only. The performance for some queries will be slower than when using a regular database, because random access in zip files is not supported (only streaming). How much this affects the performance depends on the queries and the data. The database is not read in memory; therefore large databases are supported as well. The same indexes are used as when using a regular database. - -@features_1561_p - If the database is larger than a few megabytes, performance is much better if the database file is split into multiple smaller files, because random access in compressed files is not possible. See also the sample application ReadOnlyDatabaseInZip. - -@features_1562_h3 -Opening a Corrupted Database - -@features_1563_p - If a database cannot be opened because the boot info (the SQL script that is run at startup) is corrupted, then the database can be opened by specifying a database event listener. The exceptions are logged, but opening the database will continue. - -@features_1564_h2 -Computed Columns / Function Based Index - -@features_1565_p - A computed column is a column whose value is calculated before storing. The formula is evaluated when the row is inserted, and re-evaluated every time the row is updated. One use case is to automatically update the last-modification time: - -@features_1566_p - Function indexes are not directly supported by this database, but they can be emulated by using computed columns. For example, if an index on the upper-case version of a column is required, create a computed column with the upper-case version of the original column, and create an index for this column: - -@features_1567_p - When inserting data, it is not required (and not allowed) to specify a value for the upper-case version of the column, because the value is generated. But you can use the column when querying the table: - -@features_1568_h2 -Multi-Dimensional Indexes - -@features_1569_p - A tool is provided to execute efficient multi-dimension (spatial) range queries. This database does not support a specialized spatial index (R-Tree or similar). Instead, the B-Tree index is used. For each record, the multi-dimensional key is converted (mapped) to a single dimensional (scalar) value. This value specifies the location on a space-filling curve. - -@features_1570_p - Currently, Z-order (also called N-order or Morton-order) is used; Hilbert curve could also be used, but the implementation is more complex. The algorithm to convert the multi-dimensional value is called bit-interleaving. The scalar value is indexed using a B-Tree index (usually using a computed column). - -@features_1571_p - The method can result in a drastic performance improvement over just using an index on the first column. Depending on the data and number of dimensions, the improvement is usually higher than factor 5. The tool generates a SQL query from a specified multi-dimensional range. The method used is not database dependent, and the tool can easily be ported to other databases. For an example how to use the tool, please have a look at the sample code provided in TestMultiDimension.java. - -@features_1572_h2 -User-Defined Functions and Stored Procedures - -@features_1573_p - In addition to the built-in functions, this database supports user-defined Java functions. In this database, Java functions can be used as stored procedures as well. A function must be declared (registered) before it can be used. A function can be defined using source code, or as a reference to a compiled class that is available in the classpath. By default, the function aliases are stored in the current schema. - -@features_1574_h3 -Referencing a Compiled Method - -@features_1575_p - When referencing a method, the class must already be compiled and included in the classpath where the database is running. Only static Java methods are supported; both the class and the method must be public. Example Java class: - -@features_1576_p - The Java function must be registered in the database by calling CREATE ALIAS ... FOR: - -@features_1577_p - For a complete sample application, see src/test/org/h2/samples/Function.java. - -@features_1578_h3 -Declaring Functions as Source Code - -@features_1579_p - When defining a function alias with source code, the database tries to compile the source code using the Sun Java compiler (the class com.sun.tools.javac.Main) if the tools.jar is in the classpath. If not, javac is run as a separate process. Only the source code is stored in the database; the class is compiled each time the database is re-opened. Source code is usually passed as dollar quoted text to avoid escaping problems, however single quotes can be used as well. Example: - -@features_1580_p - By default, the three packages java.util, java.math, java.sql are imported. The method name (nextPrime in the example above) is ignored. Method overloading is not supported when declaring functions as source code, that means only one method may be declared for an alias. If different import statements are required, they must be declared at the beginning and separated with the tag @CODE: - -@features_1581_p - The following template is used to create a complete Java class: - -@features_1582_h3 -Method Overloading - -@features_1583_p - Multiple methods may be bound to a SQL function if the class is already compiled and included in the classpath. Each Java method must have a different number of arguments. Method overloading is not supported when declaring functions as source code. - -@features_1584_h3 -Function Data Type Mapping - -@features_1585_p - Functions that accept non-nullable parameters such as int will not be called if one of those parameters is NULL. Instead, the result of the function is NULL. If the function should be called if a parameter is NULL, you need to use java.lang.Integer instead. - -@features_1586_p - SQL types are mapped to Java classes and vice-versa as in the JDBC API. For details, see Data Types. There are a few special cases: java.lang.Object is mapped to OTHER (a serialized object). Therefore, java.lang.Object can not be used to match all SQL types (matching all SQL types is not supported). The second special case is Object[]: arrays of any class are mapped to ARRAY. Objects of type org.h2.value.Value (the internal value class) are passed through without conversion. - -@features_1587_h3 -Functions That Require a Connection - -@features_1588_p - If the first parameter of a Java function is a java.sql.Connection, then the connection to database is provided. This connection does not need to be closed before returning. When calling the method from within the SQL statement, this connection parameter does not need to be (can not be) specified. - -@features_1589_h3 -Functions Throwing an Exception - -@features_1590_p - If a function throws an exception, then the current statement is rolled back and the exception is thrown to the application. SQLException are directly re-thrown to the calling application; all other exceptions are first converted to a SQLException. - -@features_1591_h3 -Functions Returning a Result Set - -@features_1592_p - Functions may returns a result set. Such a function can be called with the CALL statement: - -@features_1593_h3 -Using SimpleResultSet - -@features_1594_p - A function can create a result set using the SimpleResultSet tool: - -@features_1595_h3 -Using a Function as a Table - -@features_1596_p - A function that returns a result set can be used like a table. However, in this case the function is called at least twice: first while parsing the statement to collect the column names (with parameters set to null where not known at compile time). And then, while executing the statement to get the data (maybe multiple times if this is a join). If the function is called just to get the column list, the URL of the connection passed to the function is jdbc:columnlist:connection. Otherwise, the URL of the connection is jdbc:default:connection. - -@features_1597_h2 -Pluggable or User-Defined Tables - -@features_1598_p - For situations where you need to expose other data-sources to the SQL engine as a table, there are "pluggable tables". For some examples, have a look at the code in org.h2.test.db.TestTableEngines. - -@features_1599_p - In order to create your own TableEngine, you need to implement the org.h2.api.TableEngine interface e.g. something like this: - -@features_1600_p - and then create the table from SQL like this: - -@features_1601_p - It is also possible to pass in parameters to the table engine, like so: - -@features_1602_p - In which case the parameters are passed down in the tableEngineParams field of the CreateTableData object. - -@features_1603_p - It is also possible to specify default table engine params on schema creation: - -@features_1604_p - Params from the schema are used when CREATE TABLE issued on this schema does not have its own engine params specified. - -@features_1605_h2 -Triggers - -@features_1606_p - This database supports Java triggers that are called before or after a row is updated, inserted or deleted. Triggers can be used for complex consistency checks, or to update related data in the database. It is also possible to use triggers to simulate materialized views. For a complete sample application, see src/test/org/h2/samples/TriggerSample.java. A Java trigger must implement the interface org.h2.api.Trigger. The trigger class must be available in the classpath of the database engine (when using the server mode, it must be in the classpath of the server). - -@features_1607_p - The connection can be used to query or update data in other tables. The trigger then needs to be defined in the database: - -@features_1608_p - The trigger can be used to veto a change by throwing a SQLException. - -@features_1609_p - As an alternative to implementing the Trigger interface, an application can extend the abstract class org.h2.tools.TriggerAdapter. This will allows to use the ResultSet interface within trigger implementations. In this case, only the fire method needs to be implemented: - -@features_1610_h2 -Compacting a Database - -@features_1611_p - Empty space in the database file re-used automatically. When closing the database, the database is automatically compacted for up to 200 milliseconds by default. To compact more, use the SQL statement SHUTDOWN COMPACT. However re-creating the database may further reduce the database size because this will re-build the indexes. Here is a sample function to do this: - -@features_1612_p - See also the sample application org.h2.samples.Compact. The commands SCRIPT / RUNSCRIPT can be used as well to create a backup of a database and re-build the database from the script. - -@features_1613_h2 -Cache Settings - -@features_1614_p - The database keeps most frequently used data in the main memory. The amount of memory used for caching can be changed using the setting CACHE_SIZE. This setting can be set in the database connection URL (jdbc:h2:~/test;CACHE_SIZE=131072), or it can be changed at runtime using SET CACHE_SIZE size. The size of the cache, as represented by CACHE_SIZE is measured in KB, with each KB being 1024 bytes. This setting has no effect for in-memory databases. For persistent databases, the setting is stored in the database and re-used when the database is opened the next time. However, when opening an existing database, the cache size is set to at most half the amount of memory available for the virtual machine (Runtime.getRuntime().maxMemory()), even if the cache size setting stored in the database is larger; however the setting stored in the database is kept. Setting the cache size in the database URL or explicitly using SET CACHE_SIZE overrides this value (even if larger than the physical memory). To get the current used maximum cache size, use the query SELECT * FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME = 'info.CACHE_MAX_SIZE' - -@features_1615_p - An experimental scan-resistant cache algorithm "Two Queue" (2Q) is available. To enable it, append ;CACHE_TYPE=TQ to the database URL. The cache might not actually improve performance. If you plan to use it, please run your own test cases first. - -@features_1616_p - Also included is an experimental second level soft reference cache. Rows in this cache are only garbage collected on low memory. By default the second level cache is disabled. To enable it, use the prefix SOFT_. Example: jdbc:h2:~/test;CACHE_TYPE=SOFT_LRU. The cache might not actually improve performance. If you plan to use it, please run your own test cases first. - -@features_1617_p - To get information about page reads and writes, and the current caching algorithm in use, call SELECT * FROM INFORMATION_SCHEMA.SETTINGS. The number of pages read / written is listed. - -@fragments_1000_div -    - -@fragments_1001_label -Search: - -@fragments_1002_label -Highlight keyword(s) - -@fragments_1003_a -Home - -@fragments_1004_a -Download - -@fragments_1005_a -Cheat Sheet - -@fragments_1006_b -Documentation - -@fragments_1007_a -Quickstart - -@fragments_1008_a -Installation - -@fragments_1009_a -Tutorial - -@fragments_1010_a -Features - -@fragments_1011_a -Performance - -@fragments_1012_a -Advanced - -@fragments_1013_b -Reference - -@fragments_1014_a -SQL Grammar - -@fragments_1015_a -Functions - -@fragments_1016_a -Data Types - -@fragments_1017_a -Javadoc - -@fragments_1018_a -PDF (1 MB) - -@fragments_1019_b -Support - -@fragments_1020_a -FAQ - -@fragments_1021_a -Error Analyzer - -@fragments_1022_a -Google Group (English) - -@fragments_1023_a -Google Group (Japanese) - -@fragments_1024_a -Google Group (Chinese) - -@fragments_1025_b -Appendix - -@fragments_1026_a -History & Roadmap - -@fragments_1027_a -License - -@fragments_1028_a -Build - -@fragments_1029_a -Links - -@fragments_1030_a -JaQu - -@fragments_1031_a -MVStore - -@fragments_1032_a -Architecture - -@fragments_1033_td -  - -@frame_1000_h1 -H2 Database Engine - -@frame_1001_p - Welcome to H2, the free SQL database. The main feature of H2 are: - -@frame_1002_li -It is free to use for everybody, source code is included - -@frame_1003_li -Written in Java, but also available as native executable - -@frame_1004_li -JDBC and (partial) ODBC API - -@frame_1005_li -Embedded and client/server modes - -@frame_1006_li -Clustering is supported - -@frame_1007_li -A web client is included - -@frame_1008_h2 -No Javascript - -@frame_1009_p - If you are not automatically redirected to the main page, then Javascript is currently disabled or your browser does not support Javascript. Some features (for example the integrated search) require Javascript. - -@frame_1010_p - Please enable Javascript, or go ahead without it: H2 Database Engine - -@history_1000_h1 -History and Roadmap - -@history_1001_a - Change Log - -@history_1002_a - Roadmap - -@history_1003_a - History of this Database Engine - -@history_1004_a - Why Java - -@history_1005_a - Supporters - -@history_1006_h2 -Change Log - -@history_1007_p - The up-to-date change log is available at http://www.h2database.com/html/changelog.html - -@history_1008_h2 -Roadmap - -@history_1009_p - The current roadmap is available at http://www.h2database.com/html/roadmap.html - -@history_1010_h2 -History of this Database Engine - -@history_1011_p - The development of H2 was started in May 2004, but it was first published on December 14th 2005. The original author of H2, Thomas Mueller, is also the original developer of Hypersonic SQL. In 2001, he joined PointBase Inc. where he wrote PointBase Micro, a commercial Java SQL database. At that point, he had to discontinue Hypersonic SQL. The HSQLDB Group was formed to continued to work on the Hypersonic SQL codebase. The name H2 stands for Hypersonic 2, however H2 does not share code with Hypersonic SQL or HSQLDB. H2 is built from scratch. - -@history_1012_h2 -Why Java - -@history_1013_p - The main reasons to use a Java database are: - -@history_1014_li -Very simple to integrate in Java applications - -@history_1015_li -Support for many different platforms - -@history_1016_li -More secure than native applications (no buffer overflows) - -@history_1017_li -User defined functions (or triggers) run very fast - -@history_1018_li -Unicode support - -@history_1019_p - Some think Java is too slow for low level operations, but this is no longer true. Garbage collection for example is now faster than manual memory management. - -@history_1020_p - Developing Java code is faster than developing C or C++ code. When using Java, most time can be spent on improving the algorithms instead of porting the code to different platforms or doing memory management. Features such as Unicode and network libraries are already built-in. In Java, writing secure code is easier because buffer overflows can not occur. Features such as reflection can be used for randomized testing. - -@history_1021_p - Java is future proof: a lot of companies support Java. Java is now open source. - -@history_1022_p - To increase the portability and ease of use, this software depends on very few libraries. Features that are not available in open source Java implementations (such as Swing) are not used, or only used for optional features. - -@history_1023_h2 -Supporters - -@history_1024_p - Many thanks for those who reported bugs, gave valuable feedback, spread the word, and translated this project. - -@history_1025_p - Also many thanks to the donors. To become a donor, use PayPal (at the very bottom of the main web page). Donators are: - -@history_1026_li -Martin Wildam, Austria - -@history_1027_a -tagtraum industries incorporated, USA - -@history_1028_a -TimeWriter, Netherlands - -@history_1029_a -Cognitect, USA - -@history_1030_a -Code 42 Software, Inc., Minneapolis - -@history_1031_a -Code Lutin, France - -@history_1032_a -NetSuxxess GmbH, Germany - -@history_1033_a -Poker Copilot, Steve McLeod, Germany - -@history_1034_a -SkyCash, Poland - -@history_1035_a -Lumber-mill, Inc., Japan - -@history_1036_a -StockMarketEye, USA - -@history_1037_a -Eckenfelder GmbH & Co.KG, Germany - -@history_1038_li -Jun Iyama, Japan - -@history_1039_li -Steven Branda, USA - -@history_1040_li -Anthony Goubard, Netherlands - -@history_1041_li -Richard Hickey, USA - -@history_1042_li -Alessio Jacopo D'Adamo, Italy - -@history_1043_li -Ashwin Jayaprakash, USA - -@history_1044_li -Donald Bleyl, USA - -@history_1045_li -Frank Berger, Germany - -@history_1046_li -Florent Ramiere, France - -@history_1047_li -Antonio Casqueiro, Portugal - -@history_1048_li -Oliver Computing LLC, USA - -@history_1049_li -Harpal Grover Consulting Inc., USA - -@history_1050_li -Elisabetta Berlini, Italy - -@history_1051_li -William Gilbert, USA - -@history_1052_li -Antonio Dieguez Rojas, Chile - -@history_1053_a -Ontology Works, USA - -@history_1054_li -Pete Haidinyak, USA - -@history_1055_li -William Osmond, USA - -@history_1056_li -Joachim Ansorg, Germany - -@history_1057_li -Oliver Soerensen, Germany - -@history_1058_li -Christos Vasilakis, Greece - -@history_1059_li -Fyodor Kupolov, Denmark - -@history_1060_li -Jakob Jenkov, Denmark - -@history_1061_li -Stéphane Chartrand, Switzerland - -@history_1062_li -Glenn Kidd, USA - -@history_1063_li -Gustav Trede, Sweden - -@history_1064_li -Joonas Pulakka, Finland - -@history_1065_li -Bjorn Darri Sigurdsson, Iceland - -@history_1066_li -Gray Watson, USA - -@history_1067_li -Erik Dick, Germany - -@history_1068_li -Pengxiang Shao, China - -@history_1069_li -Bilingual Marketing Group, USA - -@history_1070_li -Philippe Marschall, Switzerland - -@history_1071_li -Knut Staring, Norway - -@history_1072_li -Theis Borg, Denmark - -@history_1073_li -Mark De Mendonca Duske, USA - -@history_1074_li -Joel A. Garringer, USA - -@history_1075_li -Olivier Chafik, France - -@history_1076_li -Rene Schwietzke, Germany - -@history_1077_li -Jalpesh Patadia, USA - -@history_1078_li -Takanori Kawashima, Japan - -@history_1079_li -Terrence JC Huang, China - -@history_1080_a -JiaDong Huang, Australia - -@history_1081_li -Laurent van Roy, Belgium - -@history_1082_li -Qian Chen, China - -@history_1083_li -Clinton Hyde, USA - -@history_1084_li -Kritchai Phromros, Thailand - -@history_1085_li -Alan Thompson, USA - -@history_1086_li -Ladislav Jech, Czech Republic - -@history_1087_li -Dimitrijs Fedotovs, Latvia - -@history_1088_li -Richard Manley-Reeve, United Kingdom - -@history_1089_li -Daniel Cyr, ThirdHalf.com, LLC, USA - -@history_1090_li -Peter Jünger, Germany - -@history_1091_li -Dan Keegan, USA - -@history_1092_li -Rafel Israels, Germany - -@history_1093_li -Fabien Todescato, France - -@history_1094_li -Cristan Meijer, Netherlands - -@history_1095_li -Adam McMahon, USA - -@history_1096_li -Fábio Gomes Lisboa Gomes, Brasil - -@history_1097_li -Lyderic Landry, England - -@history_1098_li -Mederp, Morocco - -@history_1099_li -Joaquim Golay, Switzerland - -@history_1100_li -Clemens Quoss, Germany - -@history_1101_li -Kervin Pierre, USA - -@history_1102_li -Jake Bellotti, Australia - -@history_1103_li -Arun Chittanoor, USA - -@installation_1000_h1 -Installation - -@installation_1001_a - Requirements - -@installation_1002_a - Supported Platforms - -@installation_1003_a - Installing the Software - -@installation_1004_a - Directory Structure - -@installation_1005_h2 -Requirements - -@installation_1006_p - To run this database, the following software stack is known to work. Other software most likely also works, but is not tested as much. - -@installation_1007_h3 -Database Engine - -@installation_1008_li -Windows XP or Vista, Mac OS X, or Linux - -@installation_1009_li -Oracle Java 7 or newer - -@installation_1010_li -Recommended Windows file system: NTFS (FAT32 only supports files up to 4 GB) - -@installation_1011_h3 -H2 Console - -@installation_1012_li -Mozilla Firefox - -@installation_1013_h2 -Supported Platforms - -@installation_1014_p - As this database is written in Java, it can run on many different platforms. It is tested with Java 7. Currently, the database is developed and tested on Windows 8 and Mac OS X using Java 7, but it also works in many other operating systems and using other Java runtime environments. All major operating systems (Windows XP, Windows Vista, Windows 7, Mac OS, Ubuntu,...) are supported. - -@installation_1015_h2 -Installing the Software - -@installation_1016_p - To install the software, run the installer or unzip it to a directory of your choice. - -@installation_1017_h2 -Directory Structure - -@installation_1018_p - After installing, you should get the following directory structure: - -@installation_1019_th -Directory - -@installation_1020_th -Contents - -@installation_1021_td -bin - -@installation_1022_td -JAR and batch files - -@installation_1023_td -docs - -@installation_1024_td -Documentation - -@installation_1025_td -docs/html - -@installation_1026_td -HTML pages - -@installation_1027_td -docs/javadoc - -@installation_1028_td -Javadoc files - -@installation_1029_td -ext - -@installation_1030_td -External dependencies (downloaded when building) - -@installation_1031_td -service - -@installation_1032_td -Tools to run the database as a Windows Service - -@installation_1033_td -src - -@installation_1034_td -Source files - -@installation_1035_td -src/docsrc - -@installation_1036_td -Documentation sources - -@installation_1037_td -src/installer - -@installation_1038_td -Installer, shell, and release build script - -@installation_1039_td -src/main - -@installation_1040_td -Database engine source code - -@installation_1041_td -src/test - -@installation_1042_td -Test source code - -@installation_1043_td -src/tools - -@installation_1044_td -Tools and database adapters source code - -@jaqu_1000_h1 -JaQu - -@jaqu_1001_a - What is JaQu - -@jaqu_1002_a - Differences to Other Data Access Tools - -@jaqu_1003_a - Current State - -@jaqu_1004_a - Building the JaQu Library - -@jaqu_1005_a - Requirements - -@jaqu_1006_a - Example Code - -@jaqu_1007_a - Configuration - -@jaqu_1008_a - Natural Syntax - -@jaqu_1009_a - Other Ideas - -@jaqu_1010_a - Similar Projects - -@jaqu_1011_h2 -What is JaQu - -@jaqu_1012_p - Note: This project is currently in maintenance mode. A friendly fork of JaQu is available under the name iciql. - -@jaqu_1013_p - JaQu stands for Java Query and allows to access databases using pure Java. JaQu provides a fluent interface (or internal DSL). JaQu is something like LINQ for Java (LINQ stands for "language integrated query" and is a Microsoft .NET technology). The following JaQu code: - -@jaqu_1014_p - stands for the SQL statement: - -@jaqu_1015_h2 -Differences to Other Data Access Tools - -@jaqu_1016_p - Unlike SQL, JaQu can be easily integrated in Java applications. Because JaQu is pure Java, auto-complete in the IDE is supported. Type checking is performed by the compiler. JaQu fully protects against SQL injection. - -@jaqu_1017_p - JaQu is meant as replacement for JDBC and SQL and not as much as a replacement for tools like Hibernate. With JaQu, you don't write SQL statements as strings. JaQu is much smaller and simpler than other persistence frameworks such as Hibernate, but it also does not provide all the features of those. Unlike iBatis and Hibernate, no XML or annotation based configuration is required; instead the configuration (if required at all) is done in pure Java, within the application. - -@jaqu_1018_p - JaQu does not require or contain any data caching mechanism. Like JDBC and iBatis, JaQu provides full control over when and what SQL statements are executed (but without having to write SQL statements as strings). - -@jaqu_1019_h3 -Restrictions - -@jaqu_1020_p - Primitive types (eg. boolean, int, long, double) are not supported. Use java.lang.Boolean, Integer, Long, Double instead. - -@jaqu_1021_h3 -Why in Java? - -@jaqu_1022_p - Most applications are written in Java. Mixing Java and another language (for example Scala or Groovy) in the same application is complicated: you would need to split the application and database code, and write adapter / wrapper code. - -@jaqu_1023_h2 -Current State - -@jaqu_1024_p - Currently, JaQu is only tested with the H2 database. The API may change in future versions. JaQu is not part of the h2 jar file, however the source code is included in H2, under: - -@jaqu_1025_code -src/test/org/h2/test/jaqu/* - -@jaqu_1026_li - (samples and tests) - -@jaqu_1027_code -src/tools/org/h2/jaqu/* - -@jaqu_1028_li - (framework) - -@jaqu_1029_h2 -Building the JaQu Library - -@jaqu_1030_p - To create the JaQu jar file, run: build jarJaqu. This will create the file bin/h2jaqu.jar. - -@jaqu_1031_h2 -Requirements - -@jaqu_1032_p - JaQu requires Java 6. Annotations are not need. Currently, JaQu is only tested with the H2 database engine, however in theory it should work with any database that supports the JDBC API. - -@jaqu_1033_h2 -Example Code - -@jaqu_1034_h2 -Configuration - -@jaqu_1035_p - JaQu does not require any configuration when using the default field to column mapping. To define table indices, or if you want to map a class to a table with a different name, or a field to a column with another name, create a function called define in the data class. Example: - -@jaqu_1036_p - The method define() contains the mapping definition. It is called once when the class is used for the first time. Like annotations, the mapping is defined in the class itself. Unlike when using annotations, the compiler can check the syntax even for multi-column objects (multi-column indexes, multi-column primary keys and so on). Because the definition is written in Java, the configuration can be set at runtime, which is not possible using annotations. Unlike XML mapping configuration, the configuration is integrated in the class itself. - -@jaqu_1037_h2 -Natural Syntax - -@jaqu_1038_p -The plan is to support more natural (pure Java) syntax in conditions. To do that, the condition class is de-compiled to a SQL condition. A proof of concept decompiler is included (but it doesn't fully work yet; patches are welcome). The planned syntax is: - -@jaqu_1039_h2 -Other Ideas - -@jaqu_1040_p - This project has just been started, and nothing is fixed yet. Some ideas are: - -@jaqu_1041_li -Support queries on collections (instead of using a database). - -@jaqu_1042_li -Provide API level compatibility with JPA (so that JaQu can be used as an extension of JPA). - -@jaqu_1043_li -Internally use a JPA implementation (for example Hibernate) instead of SQL directly. - -@jaqu_1044_li -Use PreparedStatements and cache them. - -@jaqu_1045_h2 -Similar Projects - -@jaqu_1046_a -iciql (a friendly fork of JaQu) - -@jaqu_1047_a -Cement Framework - -@jaqu_1048_a -Dreamsource ORM - -@jaqu_1049_a -Empire-db - -@jaqu_1050_a -JEQUEL: Java Embedded QUEry Language - -@jaqu_1051_a -Joist - -@jaqu_1052_a -jOOQ - -@jaqu_1053_a -JoSQL - -@jaqu_1054_a -LIQUidFORM - -@jaqu_1055_a -Quaere (Alias implementation) - -@jaqu_1056_a -Quaere - -@jaqu_1057_a -Querydsl - -@jaqu_1058_a -Squill - -@license_1000_h1 -License - -@license_1001_a - Summary and License FAQ - -@license_1002_a - Mozilla Public License Version 2.0 - -@license_1003_a - Eclipse Public License - Version 1.0 - -@license_1004_a - Export Control Classification Number (ECCN) - -@license_1005_h2 -Summary and License FAQ - -@license_1006_p - H2 is dual licensed and available under the MPL 2.0 (Mozilla Public License Version 2.0) or under the EPL 1.0 (Eclipse Public License). There is a license FAQ for both the MPL and the EPL. - -@license_1007_li -You can use H2 for free. - -@license_1008_li -You can integrate it into your applications (including in commercial applications) and distribute it. - -@license_1009_li -Files containing only your code are not covered by this license (it is 'commercial friendly'). - -@license_1010_li -Modifications to the H2 source code must be published. - -@license_1011_li -You don't need to provide the source code of H2 if you did not modify anything. - -@license_1012_li -If you distribute a binary that includes H2, you need to add a disclaimer of liability - see the example below. - -@license_1013_p - However, nobody is allowed to rename H2, modify it a little, and sell it as a database engine without telling the customers it is in fact H2. This happened to HSQLDB: a company called 'bungisoft' copied HSQLDB, renamed it to 'RedBase', and tried to sell it, hiding the fact that it was in fact just HSQLDB. It seems 'bungisoft' does not exist any more, but you can use the Wayback Machine and visit old web pages of http://www.bungisoft.com. - -@license_1014_p - About porting the source code to another language (for example C# or C++): converted source code (even if done manually) stays under the same copyright and license as the original code. The copyright of the ported source code does not (automatically) go to the person who ported the code. - -@license_1015_p - If you distribute a binary that includes H2, you need to add the license and a disclaimer of liability (as you should do for your own code). You should add a disclaimer for each open source library you use. For example, add a file 3rdparty_license.txt in the directory where the jar files are, and list all open source libraries, each one with its license and disclaimer. For H2, a simple solution is to copy the following text below. You may also include a copy of the complete license. - -@license_1016_h2 -Mozilla Public License Version 2.0 - -@license_1017_h3 -1. Definitions - -@license_1018_p -1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. - -@license_1019_p -1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. - -@license_1020_p -1.3. "Contribution" means Covered Software of a particular Contributor. - -@license_1021_p -1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. - -@license_1022_p -1.5. "Incompatible With Secondary Licenses" means - -@license_1023_p -a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or - -@license_1024_p -b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. - -@license_1025_p -1.6. "Executable Form" means any form of the work other than Source Code Form. - -@license_1026_p -1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. - -@license_1027_p -1.8. "License" means this document. - -@license_1028_p -1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. - -@license_1029_p -1.10. "Modifications" means any of the following: - -@license_1030_p -a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or - -@license_1031_p -b. any new file in Source Code Form that contains any Covered Software. - -@license_1032_p -1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. - -@license_1033_p -1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. - -@license_1034_p -1.13. "Source Code Form" means the form of the work preferred for making modifications. - -@license_1035_p -1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. - -@license_1036_h3 -2. License Grants and Conditions - -@license_1037_h4 -2.1. Grants - -@license_1038_p -Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: - -@license_1039_p -under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and - -@license_1040_p -under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. - -@license_1041_h4 -2.2. Effective Date - -@license_1042_p -The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. - -@license_1043_h4 -2.3. Limitations on Grant Scope - -@license_1044_p -The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: - -@license_1045_p -for any code that a Contributor has removed from Covered Software; or - -@license_1046_p -for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or - -@license_1047_p -under Patent Claims infringed by Covered Software in the absence of its Contributions. - -@license_1048_p -This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). - -@license_1049_h4 -2.4. Subsequent Licenses - -@license_1050_p -No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). - -@license_1051_h4 -2.5. Representation - -@license_1052_p -Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. - -@license_1053_h4 -2.6. Fair Use - -@license_1054_p -This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. - -@license_1055_h4 -2.7. Conditions - -@license_1056_p -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. - -@license_1057_h3 -3. Responsibilities - -@license_1058_h4 -3.1. Distribution of Source Form - -@license_1059_p -All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. - -@license_1060_h4 -3.2. Distribution of Executable Form - -@license_1061_p -If You distribute Covered Software in Executable Form then: - -@license_1062_p -such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and - -@license_1063_p -You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. - -@license_1064_h4 -3.3. Distribution of a Larger Work - -@license_1065_p -You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). - -@license_1066_h4 -3.4. Notices - -@license_1067_p -You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. - -@license_1068_h4 -3.5. Application of Additional Terms - -@license_1069_p -You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. - -@license_1070_h3 -4. Inability to Comply Due to Statute or Regulation - -@license_1071_p -If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. - -@license_1072_h3 -5. Termination - -@license_1073_p -5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. - -@license_1074_p -5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. - -@license_1075_p -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. - -@license_1076_h3 -6. Disclaimer of Warranty - -@license_1077_p -Covered Software is provided under this License on an "as is" basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer. - -@license_1078_h3 -7. Limitation of Liability - -@license_1079_p -Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You. - -@license_1080_h3 -8. Litigation - -@license_1081_p -Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. - -@license_1082_h3 -9. Miscellaneous - -@license_1083_p -This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. - -@license_1084_h3 -10. Versions of the License - -@license_1085_h4 -10.1. New Versions - -@license_1086_p -Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. - -@license_1087_h4 -10.2. Effect of New Versions - -@license_1088_p -You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. - -@license_1089_h4 -10.3. Modified Versions - -@license_1090_p -If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). - -@license_1091_h4 -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - -@license_1092_p -If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. - -@license_1093_h3 -Exhibit A - Source Code Form License Notice - -@license_1094_p -If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. - -@license_1095_p -You may add additional accurate notices of copyright ownership. - -@license_1096_h3 -Exhibit B - "Incompatible With Secondary Licenses" Notice - -@license_1097_h2 -Eclipse Public License - Version 1.0 - -@license_1098_p - THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. - -@license_1099_h3 -1. DEFINITIONS - -@license_1100_p - "Contribution" means: - -@license_1101_p - a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and - -@license_1102_p - b) in the case of each subsequent Contributor: - -@license_1103_p - i) changes to the Program, and - -@license_1104_p - ii) additions to the Program; - -@license_1105_p - where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. - -@license_1106_p - "Contributor" means any person or entity that distributes the Program. - -@license_1107_p - "Licensed Patents " mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. - -@license_1108_p - "Program" means the Contributions distributed in accordance with this Agreement. - -@license_1109_p - "Recipient" means anyone who receives the Program under this Agreement, including all Contributors. - -@license_1110_h3 -2. GRANT OF RIGHTS - -@license_1111_p - a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. - -@license_1112_p - b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. - -@license_1113_p - c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. - -@license_1114_p - d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. - -@license_1115_h3 -3. REQUIREMENTS - -@license_1116_p - A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: - -@license_1117_p - a) it complies with the terms and conditions of this Agreement; and - -@license_1118_p - b) its license agreement: - -@license_1119_p - i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; - -@license_1120_p - ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; - -@license_1121_p - iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and - -@license_1122_p - iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. - -@license_1123_p - When the Program is made available in source code form: - -@license_1124_p - a) it must be made available under this Agreement; and - -@license_1125_p - b) a copy of this Agreement must be included with each copy of the Program. - -@license_1126_p - Contributors may not remove or alter any copyright notices contained within the Program. - -@license_1127_p - Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. - -@license_1128_h3 -4. COMMERCIAL DISTRIBUTION - -@license_1129_p - Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. - -@license_1130_p - For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. - -@license_1131_h3 -5. NO WARRANTY - -@license_1132_p - EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. - -@license_1133_h3 -6. DISCLAIMER OF LIABILITY - -@license_1134_p - EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -@license_1135_h3 -7. GENERAL - -@license_1136_p - If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. - -@license_1137_p - If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. - -@license_1138_p - All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. - -@license_1139_p - Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. - -@license_1140_p - This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. - -@license_1141_h2 -Export Control Classification Number (ECCN) - -@license_1142_p - As far as we know, the U.S. Export Control Classification Number (ECCN) for this software is 5D002. However, for legal reasons, we can make no warranty that this information is correct. For details, see also the Apache Software Foundation Export Classifications page. - -@links_1000_h1 -Links - -@links_1001_p - If you want to add a link, please send it to the support email address or post it to the group. - -@links_1002_a - Quotes - -@links_1003_a - Books - -@links_1004_a - Extensions - -@links_1005_a - Blog Articles, Videos - -@links_1006_a - Database Frontends / Tools - -@links_1007_a - Products and Projects - -@links_1008_h2 -Quotes - -@links_1009_a - Quote - -@links_1010_p -: "This is by far the easiest and fastest database that I have ever used. Originally the web application that I am working on is using SQL server. But, in less than 15 minutes I had H2 up and working with little recoding of the SQL. Thanks..... " - -@links_1011_h2 -Books - -@links_1012_a - Seam In Action - -@links_1013_h2 -Extensions - -@links_1014_a - Grails H2 Database Plugin - -@links_1015_a - h2osgi: OSGi for the H2 Database - -@links_1016_a - H2Sharp: ADO.NET interface for the H2 database engine - -@links_1017_a - A spatial extension of the H2 database. - -@links_1018_h2 -Blog Articles, Videos - -@links_1019_a - Youtube: Minecraft 1.7.3 / How to install Bukkit Server with xAuth and H2 - -@links_1020_a - Analyzing CSVs with H2 in under 10 minutes (2009-12-07) - -@links_1021_a - Efficient sorting and iteration on large databases (2009-06-15) - -@links_1022_a - Porting Flexive to the H2 Database (2008-12-05) - -@links_1023_a - H2 Database with GlassFish (2008-11-24) - -@links_1024_a - H2 Database - Performance Tracing (2008-04-30) - -@links_1025_a - Open Source Databases Comparison (2007-09-11) - -@links_1026_a - The Codist: The Open Source Frameworks I Use (2007-07-23) - -@links_1027_a - The Codist: SQL Injections: How Not To Get Stuck (2007-05-08) - -@links_1028_a - David Coldrick's Weblog: New Version of H2 Database Released (2007-01-06) - -@links_1029_a - The Codist: Write Your Own Database, Again (2006-11-13) - -@links_1030_h2 -Project Pages - -@links_1031_a - Ohloh - -@links_1032_a - Freshmeat Project Page - -@links_1033_a - Wikipedia - -@links_1034_a - Java Source Net - -@links_1035_a - Linux Package Manager - -@links_1036_h2 -Database Frontends / Tools - -@links_1037_a - Dataflyer - -@links_1038_p - A tool to browse databases and export data. - -@links_1039_a - DB Solo - -@links_1040_p - SQL query tool. - -@links_1041_a - DbVisualizer - -@links_1042_p - Database tool. - -@links_1043_a - Execute Query - -@links_1044_p - Database utility written in Java. - -@links_1045_a - Flyway - -@links_1046_p - The agile database migration framework for Java. - -@links_1047_a - [fleXive] - -@links_1048_p - JavaEE 5 open source framework for the development of complex and evolving (web-)applications. - -@links_1049_a - JDBC Console - -@links_1050_p - This small webapp gives an ability to execute SQL against datasources bound in container's JNDI. Based on H2 Console. - -@links_1051_a - HenPlus - -@links_1052_p - HenPlus is a SQL shell written in Java. - -@links_1053_a - JDBC lint - -@links_1054_p - Helps write correct and efficient code when using the JDBC API. - -@links_1055_a - OpenOffice - -@links_1056_p - Base is OpenOffice.org's database application. It provides access to relational data sources. - -@links_1057_a - RazorSQL - -@links_1058_p - An SQL query tool, database browser, SQL editor, and database administration tool. - -@links_1059_a - SQL Developer - -@links_1060_p - Universal Database Frontend. - -@links_1061_a - SQL Workbench/J - -@links_1062_p - Free DBMS-independent SQL tool. - -@links_1063_a - SQuirreL SQL Client - -@links_1064_p - Graphical tool to view the structure of a database, browse the data, issue SQL commands etc. - -@links_1065_a - SQuirreL DB Copy Plugin - -@links_1066_p - Tool to copy data from one database to another. - -@links_1067_h2 -Products and Projects - -@links_1068_a - AccuProcess - -@links_1069_p - Visual business process modeling and simulation software for business users. - -@links_1070_a - Adeptia BPM - -@links_1071_p - A Business Process Management (BPM) suite to quickly and easily automate business processes and workflows. - -@links_1072_a - Adeptia Integration - -@links_1073_p - Process-centric, services-based application integration suite. - -@links_1074_a - Aejaks - -@links_1075_p - A server-side scripting environment to build AJAX enabled web applications. - -@links_1076_a - Axiom Stack - -@links_1077_p - A web framework that let's you write dynamic web applications with Zen-like simplicity. - -@links_1078_a - Apache Cayenne - -@links_1079_p - Open source persistence framework providing object-relational mapping (ORM) and remoting services. - -@links_1080_a - Apache Jackrabbit - -@links_1081_p - Open source implementation of the Java Content Repository API (JCR). - -@links_1082_a - Apache OpenJPA - -@links_1083_p - Open source implementation of the Java Persistence API (JPA). - -@links_1084_a - AppFuse - -@links_1085_p - Helps building web applications. - -@links_1086_a - BGBlitz - -@links_1087_p - The Swiss army knife of Backgammon. - -@links_1088_a - Bonita - -@links_1089_p - Open source workflow solution for handing long-running, user-oriented processes providing out of the box workflow and business process management features. - -@links_1090_a - Bookmarks Portlet - -@links_1091_p - JSR 168 compliant bookmarks management portlet application. - -@links_1092_a - Claros inTouch - -@links_1093_p - Ajax communication suite with mail, addresses, notes, IM, and rss reader. - -@links_1094_a - CrashPlan PRO Server - -@links_1095_p - Easy and cross platform backup solution for business and service providers. - -@links_1096_a - DataNucleus - -@links_1097_p - Java persistent objects. - -@links_1098_a - DbUnit - -@links_1099_p - A JUnit extension (also usable with Ant) targeted for database-driven projects. - -@links_1100_a - DiffKit - -@links_1101_p - DiffKit is a tool for comparing two tables of data, field-by-field. DiffKit is like the Unix diff utility, but for tables instead of lines of text. - -@links_1102_a - Dinamica Framework - -@links_1103_p - Ajax/J2EE framework for RAD development (mainly oriented toward hispanic markets). - -@links_1104_a - District Health Information Software 2 (DHIS) - -@links_1105_p - The DHIS 2 is a tool for collection, validation, analysis, and presentation of aggregate statistical data, tailored (but not limited) to integrated health information management activities. - -@links_1106_a - Ebean ORM Persistence Layer - -@links_1107_p - Open source Java Object Relational Mapping tool. - -@links_1108_a - Eclipse CDO - -@links_1109_p - The CDO (Connected Data Objects) Model Repository is a distributed shared model framework for EMF models, and a fast server-based O/R mapping solution. - -@links_1110_a - Fabric3 - -@links_1111_p - Fabric3 is a project implementing a federated service network based on the Service Component Architecture specification (http://www.osoa.org). - -@links_1112_a - FIT4Data - -@links_1113_p - A testing framework for data management applications built on the Java implementation of FIT. - -@links_1114_a - Flux - -@links_1115_p - Java job scheduler, file transfer, workflow, and BPM. - -@links_1116_a - GeoServer - -@links_1117_p - GeoServer is a Java-based software server that allows users to view and edit geospatial data. Using open standards set forth by the Open Geospatial Consortium (OGC), GeoServer allows for great flexibility in map creation and data sharing. - -@links_1118_a - GBIF Integrated Publishing Toolkit (IPT) - -@links_1119_p - The GBIF IPT is an open source, Java based web application that connects and serves three types of biodiversity data: taxon primary occurrence data, taxon checklists and general resource metadata. - -@links_1120_a - GNU Gluco Control - -@links_1121_p - Helps you to manage your diabetes. - -@links_1122_a - Golden T Studios - -@links_1123_p - Fun-to-play games with a simple interface. - -@links_1124_a - GridGain - -@links_1125_p - GridGain is easy to use Cloud Application Platform that enables development of highly scalable distributed Java and Scala applications that auto-scale on any grid or cloud infrastructure. - -@links_1126_a - Group Session - -@links_1127_p - Open source web groupware. - -@links_1128_a - HA-JDBC - -@links_1129_p - High-Availability JDBC: A JDBC proxy that provides light-weight, transparent, fault tolerant clustering capability to any underlying JDBC driver. - -@links_1130_a - Hibernate - -@links_1131_p - Relational persistence for idiomatic Java (O-R mapping tool). - -@links_1132_a - Hibicius - -@links_1133_p - Online Banking Client for the HBCI protocol. - -@links_1134_a - ImageMapper - -@links_1135_p - ImageMapper frees users from having to use file browsers to view their images. They get fast access to images and easy cataloguing of them via a user friendly interface. - -@links_1136_a - JAMWiki - -@links_1137_p - Java-based Wiki engine. - -@links_1138_a - Jaspa - -@links_1139_p - Java Spatial. Jaspa potentially brings around 200 spatial functions. - -@links_1140_a - Java Simon - -@links_1141_p - Simple Monitoring API. - -@links_1142_a - JBoss jBPM - -@links_1143_p - A platform for executable process languages ranging from business process management (BPM) over workflow to service orchestration. - -@links_1144_a - JBoss Jopr - -@links_1145_p - An enterprise management solution for JBoss middleware projects and other application technologies. - -@links_1146_a - JGeocoder - -@links_1147_p - Free Java geocoder. Geocoding is the process of estimating a latitude and longitude for a given location. - -@links_1148_a - JGrass - -@links_1149_p - Java Geographic Resources Analysis Support System. Free, multi platform, open source GIS based on the GIS framework of uDig. - -@links_1150_a - Jena - -@links_1151_p - Java framework for building Semantic Web applications. - -@links_1152_a - JMatter - -@links_1153_p - Framework for constructing workgroup business applications based on the Naked Objects Architectural Pattern. - -@links_1154_a - jOOQ (Java Object Oriented Querying) - -@links_1155_p - jOOQ is a fluent API for typesafe SQL query construction and execution - -@links_1156_a - Liftweb - -@links_1157_p - A Scala-based, secure, developer friendly web framework. - -@links_1158_a - LiquiBase - -@links_1159_p - A tool to manage database changes and refactorings. - -@links_1160_a - Luntbuild - -@links_1161_p - Build automation and management tool. - -@links_1162_a - localdb - -@links_1163_p - A tool that locates the full file path of the folder containing the database files. - -@links_1164_a - Magnolia - -@links_1165_p - Microarray Data Management and Export System for PFGRC (Pathogen Functional Genomics Resource Center) Microarrays. - -@links_1166_a - MiniConnectionPoolManager - -@links_1167_p - A lightweight standalone JDBC connection pool manager. - -@links_1168_a - Mr. Persister - -@links_1169_p - Simple, small and fast object relational mapping. - -@links_1170_a - Myna Application Server - -@links_1171_p - Java web app that provides dynamic web content and Java libraries access from JavaScript. - -@links_1172_a - MyTunesRss - -@links_1173_p - MyTunesRSS lets you listen to your music wherever you are. - -@links_1174_a - NCGC CurveFit - -@links_1175_p - From: NIH Chemical Genomics Center, National Institutes of Health, USA. An open source application in the life sciences research field. This application handles chemical structures and biological responses of thousands of compounds with the potential to handle million+ compounds. It utilizes an embedded H2 database to enable flexible query/retrieval of all data including advanced chemical substructure and similarity searching. The application highlights an automated curve fitting and classification algorithm that outperforms commercial packages in the field. Commercial alternatives are typically small desktop software that handle a few dose response curves at a time. A couple of commercial packages that do handle several thousand curves are very expensive tools (>60k USD) that require manual curation of analysis by the user; require a license to Oracle; lack advanced query/retrieval; and the ability to handle chemical structures. - -@links_1176_a - Nuxeo - -@links_1177_p - Standards-based, open source platform for building ECM applications. - -@links_1178_a - nWire - -@links_1179_p - Eclipse plug-in which expedites Java development. It's main purpose is to help developers find code quicker and easily understand how it relates to the rest of the application, thus, understand the application structure. - -@links_1180_a - Ontology Works - -@links_1181_p - This company provides semantic technologies including deductive information repositories (the Ontology Works Knowledge Servers), semantic information fusion and semantic federation of legacy databases, ontology-based domain modeling, and management of the distributed enterprise. - -@links_1182_a - Ontoprise OntoBroker - -@links_1183_p - SemanticWeb-Middleware. It supports all W3C Semantic Web recommendations: OWL, RDF, RDFS, SPARQL, and F-Logic. - -@links_1184_a - Open Anzo - -@links_1185_p - Semantic Application Server. - -@links_1186_a - OpenGroove - -@links_1187_p - OpenGroove is a groupware program that allows users to synchronize data. - -@links_1188_a - OpenSocial Development Environment (OSDE) - -@links_1189_p - Development tool for OpenSocial application. - -@links_1190_a - Orion - -@links_1191_p - J2EE Application Server. - -@links_1192_a - P5H2 - -@links_1193_p - A library for the Processing programming language and environment. - -@links_1194_a - Phase-6 - -@links_1195_p - A computer based learning software. - -@links_1196_a - Pickle - -@links_1197_p - Pickle is a Java library containing classes for persistence, concurrency, and logging. - -@links_1198_a - Piman - -@links_1199_p - Water treatment projects data management. - -@links_1200_a - PolePosition - -@links_1201_p - Open source database benchmark. - -@links_1202_a - Poormans - -@links_1203_p - Very basic CMS running as a SWT application and generating static html pages. - -@links_1204_a - Railo - -@links_1205_p - Railo is an alternative engine for the Cold Fusion Markup Language, that compiles code programmed in CFML into Java bytecode and executes it on a servlet engine. - -@links_1206_a - Razuna - -@links_1207_p - Open source Digital Asset Management System with integrated Web Content Management. - -@links_1208_a - RIFE - -@links_1209_p - A full-stack web application framework with tools and APIs to implement most common web features. - -@links_1210_a - Sava - -@links_1211_p - Open-source web-based content management system. - -@links_1212_a - Scriptella - -@links_1213_p - ETL (Extract-Transform-Load) and script execution tool. - -@links_1214_a - Sesar - -@links_1215_p - Dependency Injection Container with Aspect Oriented Programming. - -@links_1216_a - SemmleCode - -@links_1217_p - Eclipse plugin to help you improve software quality. - -@links_1218_a - SeQuaLite - -@links_1219_p - A free, light-weight, java data access framework. - -@links_1220_a - ShapeLogic - -@links_1221_p - Toolkit for declarative programming, image processing and computer vision. - -@links_1222_a - Shellbook - -@links_1223_p - Desktop publishing application. - -@links_1224_a - Signsoft intelliBO - -@links_1225_p - Persistence middleware supporting the JDO specification. - -@links_1226_a - SimpleORM - -@links_1227_p - Simple Java Object Relational Mapping. - -@links_1228_a - SymmetricDS - -@links_1229_p - A web-enabled, database independent, data synchronization/replication software. - -@links_1230_a - SmartFoxServer - -@links_1231_p - Platform for developing multiuser applications and games with Macromedia Flash. - -@links_1232_a - Social Bookmarks Friend Finder - -@links_1233_p - A GUI application that allows you to find users with similar bookmarks to the user specified (for delicious.com). - -@links_1234_a - sormula - -@links_1235_p - Simple object relational mapping. - -@links_1236_a - Springfuse - -@links_1237_p - Code generation For Spring, Spring MVC & Hibernate. - -@links_1238_a - SQLOrm - -@links_1239_p - Java Object Relation Mapping. - -@links_1240_a - StelsCSV and StelsXML - -@links_1241_p - StelsCSV is a CSV JDBC type 4 driver that allows to perform SQL queries and other JDBC operations on text files. StelsXML is a XML JDBC type 4 driver that allows to perform SQL queries and other JDBC operations on XML files. Both use H2 as the SQL engine. - -@links_1242_a - StorYBook - -@links_1243_p - A summary-based tool for novelist and script writers. It helps to keep the overview over the various traces a story has. - -@links_1244_a - StreamCruncher - -@links_1245_p - Event (stream) processing kernel. - -@links_1246_a - SUSE Manager, part of Linux Enterprise Server 11 - -@links_1247_p - The SUSE Manager eases the burden of compliance with regulatory requirements and corporate policies. - -@links_1248_a - Tune Backup - -@links_1249_p - Easy-to-use backup solution for your iTunes library. - -@links_1250_a - TimeWriter - -@links_1251_p - TimeWriter is a very flexible program for time administration / time tracking. The older versions used dBase tables. The new version 5 is completely rewritten, now using the H2 database. TimeWriter is delivered in Dutch and English. - -@links_1252_a - weblica - -@links_1253_p - Desktop CMS. - -@links_1254_a - Web of Web - -@links_1255_p - Collaborative and realtime interactive media platform for the web. - -@links_1256_a - Werkzeugkasten - -@links_1257_p - Minimum Java Toolset. - -@links_1258_a - VPDA - -@links_1259_p - View providers driven applications is a Java based application framework for building applications composed from server components - view providers. - -@links_1260_a - Volunteer database - -@links_1261_p - A database front end to register volunteers, partnership and donation for a Non Profit organization. - -@mainWeb_1000_h1 -H2 Database Engine - -@mainWeb_1001_p - Welcome to H2, the Java SQL database. The main features of H2 are: - -@mainWeb_1002_li -Very fast, open source, JDBC API - -@mainWeb_1003_li -Embedded and server modes; in-memory databases - -@mainWeb_1004_li -Browser based Console application - -@mainWeb_1005_li -Small footprint: around 1.5 MB jar file size - -@mainWeb_1006_h2 -Download - -@mainWeb_1007_td - Version 1.4.196 (2017-06-10) - -@mainWeb_1008_a -Windows Installer (5 MB) - -@mainWeb_1009_a -All Platforms (zip, 8 MB) - -@mainWeb_1010_a -All Downloads - -@mainWeb_1011_td -    - -@mainWeb_1012_h2 -Support - -@mainWeb_1013_a -Stack Overflow (tag H2) - -@mainWeb_1014_a -Google Group English - -@mainWeb_1015_p -, Japanese - -@mainWeb_1016_p - For non-technical issues, use: - -@mainWeb_1017_h2 -Features - -@mainWeb_1018_th -H2 - -@mainWeb_1019_a -Derby - -@mainWeb_1020_a -HSQLDB - -@mainWeb_1021_a -MySQL - -@mainWeb_1022_a -PostgreSQL - -@mainWeb_1023_td -Pure Java - -@mainWeb_1024_td -Yes - -@mainWeb_1025_td -Yes - -@mainWeb_1026_td -Yes - -@mainWeb_1027_td -No - -@mainWeb_1028_td -No - -@mainWeb_1029_td -Memory Mode - -@mainWeb_1030_td -Yes - -@mainWeb_1031_td -Yes - -@mainWeb_1032_td -Yes - -@mainWeb_1033_td -No - -@mainWeb_1034_td -No - -@mainWeb_1035_td -Encrypted Database - -@mainWeb_1036_td -Yes - -@mainWeb_1037_td -Yes - -@mainWeb_1038_td -Yes - -@mainWeb_1039_td -No - -@mainWeb_1040_td -No - -@mainWeb_1041_td -ODBC Driver - -@mainWeb_1042_td -Yes - -@mainWeb_1043_td -No - -@mainWeb_1044_td -No - -@mainWeb_1045_td -Yes - -@mainWeb_1046_td -Yes - -@mainWeb_1047_td -Fulltext Search - -@mainWeb_1048_td -Yes - -@mainWeb_1049_td -No - -@mainWeb_1050_td -No - -@mainWeb_1051_td -Yes - -@mainWeb_1052_td -Yes - -@mainWeb_1053_td -Multi Version Concurrency - -@mainWeb_1054_td -Yes - -@mainWeb_1055_td -No - -@mainWeb_1056_td -Yes - -@mainWeb_1057_td -Yes - -@mainWeb_1058_td -Yes - -@mainWeb_1059_td -Footprint (jar/dll size) - -@mainWeb_1060_td -~1 MB - -@mainWeb_1061_td -~2 MB - -@mainWeb_1062_td -~1 MB - -@mainWeb_1063_td -~4 MB - -@mainWeb_1064_td -~6 MB - -@mainWeb_1065_p - See also the detailed comparison. - -@mainWeb_1066_h2 -News - -@mainWeb_1067_b -Newsfeeds: - -@mainWeb_1068_a -Full text (Atom) - -@mainWeb_1069_p - or Header only (RSS). - -@mainWeb_1070_b -Email Newsletter: - -@mainWeb_1071_p - Subscribe to H2 Database News (Google account required) to get informed about new releases. Your email address is only used in this context. - -@mainWeb_1072_td -  - -@mainWeb_1073_h2 -Contribute - -@mainWeb_1074_p - You can contribute to the development of H2 by sending feedback and bug reports, or translate the H2 Console application (for details, start the H2 Console and select Options / Translate). To donate money, click on the PayPal button below. You will be listed as a supporter: - -@main_1000_h1 -H2 Database Engine - -@main_1001_p - Welcome to H2, the free Java SQL database engine. - -@main_1002_a -Quickstart - -@main_1003_p - Get a fast overview. - -@main_1004_a -Tutorial - -@main_1005_p - Go through the samples. - -@main_1006_a -Features - -@main_1007_p - See what this database can do and how to use these features. - -@mvstore_1000_h1 -MVStore - -@mvstore_1001_a - Overview - -@mvstore_1002_a - Example Code - -@mvstore_1003_a - Store Builder - -@mvstore_1004_a - R-Tree - -@mvstore_1005_a - Features - -@mvstore_1006_a -- Maps - -@mvstore_1007_a -- Versions - -@mvstore_1008_a -- Transactions - -@mvstore_1009_a -- In-Memory Performance and Usage - -@mvstore_1010_a -- Pluggable Data Types - -@mvstore_1011_a -- BLOB Support - -@mvstore_1012_a -- R-Tree and Pluggable Map Implementations - -@mvstore_1013_a -- Concurrent Operations and Caching - -@mvstore_1014_a -- Log Structured Storage - -@mvstore_1015_a -- Off-Heap and Pluggable Storage - -@mvstore_1016_a -- File System Abstraction, File Locking and Online Backup - -@mvstore_1017_a -- Encrypted Files - -@mvstore_1018_a -- Tools - -@mvstore_1019_a -- Exception Handling - -@mvstore_1020_a -- Storage Engine for H2 - -@mvstore_1021_a - File Format - -@mvstore_1022_a - Similar Projects and Differences to Other Storage Engines - -@mvstore_1023_a - Current State - -@mvstore_1024_a - Requirements - -@mvstore_1025_h2 -Overview - -@mvstore_1026_p - The MVStore is a persistent, log structured key-value store. It is planned to be the next storage subsystem of H2, but it can also be used directly within an application, without using JDBC or SQL. - -@mvstore_1027_li -MVStore stands for "multi-version store". - -@mvstore_1028_li -Each store contains a number of maps that can be accessed using the java.util.Map interface. - -@mvstore_1029_li -Both file-based persistence and in-memory operation are supported. - -@mvstore_1030_li -It is intended to be fast, simple to use, and small. - -@mvstore_1031_li -Concurrent read and write operations are supported. - -@mvstore_1032_li -Transactions are supported (including concurrent transactions and 2-phase commit). - -@mvstore_1033_li -The tool is very modular. It supports pluggable data types and serialization, pluggable storage (to a file, to off-heap memory), pluggable map implementations (B-tree, R-tree, concurrent B-tree currently), BLOB storage, and a file system abstraction to support encrypted files and zip files. - -@mvstore_1034_h2 -Example Code - -@mvstore_1035_p - The following sample code shows how to use the tool: - -@mvstore_1036_h2 -Store Builder - -@mvstore_1037_p - The MVStore.Builder provides a fluid interface to build a store if configuration options are needed. Example usage: - -@mvstore_1038_p - The list of available options is: - -@mvstore_1039_li -autoCommitBufferSize: the size of the write buffer. - -@mvstore_1040_li -autoCommitDisabled: to disable auto-commit. - -@mvstore_1041_li -backgroundExceptionHandler: a handler for exceptions that could occur while writing in the background. - -@mvstore_1042_li -cacheSize: the cache size in MB. - -@mvstore_1043_li -compress: compress the data when storing using a fast algorithm (LZF). - -@mvstore_1044_li -compressHigh: compress the data when storing using a slower algorithm (Deflate). - -@mvstore_1045_li -encryptionKey: the key for file encryption. - -@mvstore_1046_li -fileName: the name of the file, for file based stores. - -@mvstore_1047_li -fileStore: the storage implementation to use. - -@mvstore_1048_li -pageSplitSize: the point where pages are split. - -@mvstore_1049_li -readOnly: open the file in read-only mode. - -@mvstore_1050_h2 -R-Tree - -@mvstore_1051_p - The MVRTreeMap is an R-tree implementation that supports fast spatial queries. It can be used as follows: - -@mvstore_1052_p - The default number of dimensions is 2. To use a different number of dimensions, call new MVRTreeMap.Builder<String>().dimensions(3). The minimum number of dimensions is 1, the maximum is 32. - -@mvstore_1053_h2 -Features - -@mvstore_1054_h3 -Maps - -@mvstore_1055_p - Each store contains a set of named maps. A map is sorted by key, and supports the common lookup operations, including access to the first and last key, iterate over some or all keys, and so on. - -@mvstore_1056_p - Also supported, and very uncommon for maps, is fast index lookup: the entries of the map can be be efficiently accessed like a random-access list (get the entry at the given index), and the index of a key can be calculated efficiently. That also means getting the median of two keys is very fast, and a range of keys can be counted very quickly. The iterator supports fast skipping. This is possible because internally, each map is organized in the form of a counted B+-tree. - -@mvstore_1057_p - In database terms, a map can be used like a table, where the key of the map is the primary key of the table, and the value is the row. A map can also represent an index, where the key of the map is the key of the index, and the value of the map is the primary key of the table (for non-unique indexes, the key of the map must also contain the primary key). - -@mvstore_1058_h3 -Versions - -@mvstore_1059_p - A version is a snapshot of all the data of all maps at a given point in time. Creating a snapshot is fast: only those pages that are changed after a snapshot are copied. This behavior is also called COW (copy on write). Old versions are readable. Rollback to an old version is supported. - -@mvstore_1060_p - The following sample code show how to create a store, open a map, add some data, and access the current and an old version: - -@mvstore_1061_h3 -Transactions - -@mvstore_1062_p - To support multiple concurrent open transactions, a transaction utility is included, the TransactionStore. The tool supports PostgreSQL style "read committed" transaction isolation with savepoints, two-phase commit, and other features typically available in a database. There is no limit on the size of a transaction (the log is written to disk for large or long running transactions). - -@mvstore_1063_p - Internally, this utility stores the old versions of changed entries in a separate map, similar to a transaction log, except that entries of a closed transaction are removed, and the log is usually not stored for short transactions. For common use cases, the storage overhead of this utility is very small compared to the overhead of a regular transaction log. - -@mvstore_1064_h3 -In-Memory Performance and Usage - -@mvstore_1065_p - Performance of in-memory operations is about 50% slower than java.util.TreeMap. - -@mvstore_1066_p - The memory overhead for large maps is slightly better than for the regular map implementations, but there is a higher overhead per map. For maps with less than about 25 entries, the regular map implementations need less memory. - -@mvstore_1067_p - If no file name is specified, the store operates purely in memory. Except for persisting data, all features are supported in this mode (multi-versioning, index lookup, R-tree and so on). If a file name is specified, all operations occur in memory (with the same performance characteristics) until data is persisted. - -@mvstore_1068_p - As in all map implementations, keys need to be immutable, that means changing the key object after an entry has been added is not allowed. If a file name is specified, the value may also not be changed after adding an entry, because it might be serialized (which could happen at any time when autocommit is enabled). - -@mvstore_1069_h3 -Pluggable Data Types - -@mvstore_1070_p - Serialization is pluggable. The default serialization currently supports many common data types, and uses Java serialization for other objects. The following classes are currently directly supported: Boolean, Byte, Short, Character, Integer, Long, Float, Double, BigInteger, BigDecimal, String, UUID, Date and arrays (both primitive arrays and object arrays). For serialized objects, the size estimate is adjusted using an exponential moving average. - -@mvstore_1071_p - Parameterized data types are supported (for example one could build a string data type that limits the length). - -@mvstore_1072_p - The storage engine itself does not have any length limits, so that keys, values, pages, and chunks can be very big (as big as fits in memory). Also, there is no inherent limit to the number of maps and chunks. Due to using a log structured storage, there is no special case handling for large keys or pages. - -@mvstore_1073_h3 -BLOB Support - -@mvstore_1074_p - There is a mechanism that stores large binary objects by splitting them into smaller blocks. This allows to store objects that don't fit in memory. Streaming as well as random access reads on such objects are supported. This tool is written on top of the store, using only the map interface. - -@mvstore_1075_h3 -R-Tree and Pluggable Map Implementations - -@mvstore_1076_p - The map implementation is pluggable. In addition to the default MVMap (multi-version map), there is a multi-version R-tree map implementation for spatial operations. - -@mvstore_1077_h3 -Concurrent Operations and Caching - -@mvstore_1078_p - Concurrent reads and writes are supported. All such read operations can occur in parallel. Concurrent reads from the page cache, as well as concurrent reads from the file system are supported. Write operations first read the relevant pages from disk to memory (this can happen concurrently), and only then modify the data. The in-memory parts of write operations are synchronized. Writing changes to the file can occur concurrently to modifying the data, as writing operates on a snapshot. - -@mvstore_1079_p - Caching is done on the page level. The page cache is a concurrent LIRS cache, which should be resistant against scan operations. - -@mvstore_1080_p - For fully scalable concurrent write operations to a map (in-memory and to disk), the map could be split into multiple maps in different stores ('sharding'). The plan is to add such a mechanism later when needed. - -@mvstore_1081_h3 -Log Structured Storage - -@mvstore_1082_p - Internally, changes are buffered in memory, and once enough changes have accumulated, they are written in one continuous disk write operation. Compared to traditional database storage engines, this should improve write performance for file systems and storage systems that do not efficiently support small random writes, such as Btrfs, as well as SSDs. (According to a test, write throughput of a common SSD increases with write block size, until a block size of 2 MB, and then does not further increase.) By default, changes are automatically written when more than a number of pages are modified, and once every second in a background thread, even if only little data was changed. Changes can also be written explicitly by calling commit(). - -@mvstore_1083_p - When storing, all changed pages are serialized, optionally compressed using the LZF algorithm, and written sequentially to a free area of the file. Each such change set is called a chunk. All parent pages of the changed B-trees are stored in this chunk as well, so that each chunk also contains the root of each changed map (which is the entry point for reading this version of the data). There is no separate index: all data is stored as a list of pages. Per store, there is one additional map that contains the metadata (the list of maps, where the root page of each map is stored, and the list of chunks). - -@mvstore_1084_p - There are usually two write operations per chunk: one to store the chunk data (the pages), and one to update the file header (so it points to the latest chunk). If the chunk is appended at the end of the file, the file header is only written at the end of the chunk. There is no transaction log, no undo log, and there are no in-place updates (however, unused chunks are overwritten by default). - -@mvstore_1085_p - Old data is kept for at least 45 seconds (configurable), so that there are no explicit sync operations required to guarantee data consistency. An application can also sync explicitly when needed. To reuse disk space, the chunks with the lowest amount of live data are compacted (the live data is stored again in the next chunk). To improve data locality and disk space usage, the plan is to automatically defragment and compact data. - -@mvstore_1086_p - Compared to traditional storage engines (that use a transaction log, undo log, and main storage area), the log structured storage is simpler, more flexible, and typically needs less disk operations per change, as data is only written once instead of twice or 3 times, and because the B-tree pages are always full (they are stored next to each other) and can be easily compressed. But temporarily, disk space usage might actually be a bit higher than for a regular database, as disk space is not immediately re-used (there are no in-place updates). - -@mvstore_1087_h3 -Off-Heap and Pluggable Storage - -@mvstore_1088_p - Storage is pluggable. Unless pure in-memory operation is used, the default storage is to a single file. - -@mvstore_1089_p - An off-heap storage implementation is available. This storage keeps the data in the off-heap memory, meaning outside of the regular garbage collected heap. This allows to use very large in-memory stores without having to increase the JVM heap, which would increase Java garbage collection pauses a lot. Memory is allocated using ByteBuffer.allocateDirect. One chunk is allocated at a time (each chunk is usually a few MB large), so that allocation cost is low. To use the off-heap storage, call: - -@mvstore_1090_h3 -File System Abstraction, File Locking and Online Backup - -@mvstore_1091_p - The file system is pluggable. The same file system abstraction is used as H2 uses. The file can be encrypted using a encrypting file system wrapper. Other file system implementations support reading from a compressed zip or jar file. The file system abstraction closely matches the Java 7 file system API. - -@mvstore_1092_p - Each store may only be opened once within a JVM. When opening a store, the file is locked in exclusive mode, so that the file can only be changed from within one process. Files can be opened in read-only mode, in which case a shared lock is used. - -@mvstore_1093_p - The persisted data can be backed up at any time, even during write operations (online backup). To do that, automatic disk space reuse needs to be first disabled, so that new data is always appended at the end of the file. Then, the file can be copied. The file handle is available to the application. It is recommended to use the utility class FileChannelInputStream to do this. For encrypted databases, both the encrypted (raw) file content, as well as the clear text content, can be backed up. - -@mvstore_1094_h3 -Encrypted Files - -@mvstore_1095_p - File encryption ensures the data can only be read with the correct password. Data can be encrypted as follows: - -@mvstore_1096_p - The following algorithms and settings are used: - -@mvstore_1097_li -The password char array is cleared after use, to reduce the risk that the password is stolen even if the attacker has access to the main memory. - -@mvstore_1098_li -The password is hashed according to the PBKDF2 standard, using the SHA-256 hash algorithm. - -@mvstore_1099_li -The length of the salt is 64 bits, so that an attacker can not use a pre-calculated password hash table (rainbow table). It is generated using a cryptographically secure random number generator. - -@mvstore_1100_li -To speed up opening an encrypted stores on Android, the number of PBKDF2 iterations is 10. The higher the value, the better the protection against brute-force password cracking attacks, but the slower is opening a file. - -@mvstore_1101_li -The file itself is encrypted using the standardized disk encryption mode XTS-AES. Only little more than one AES-128 round per block is needed. - -@mvstore_1102_h3 -Tools - -@mvstore_1103_p - There is a tool, the MVStoreTool, to dump the contents of a file. - -@mvstore_1104_h3 -Exception Handling - -@mvstore_1105_p - This tool does not throw checked exceptions. Instead, unchecked exceptions are thrown if needed. The error message always contains the version of the tool. The following exceptions can occur: - -@mvstore_1106_code -IllegalStateException - -@mvstore_1107_li - if a map was already closed or an IO exception occurred, for example if the file was locked, is already closed, could not be opened or closed, if reading or writing failed, if the file is corrupt, or if there is an internal error in the tool. For such exceptions, an error code is added so that the application can distinguish between different error cases. - -@mvstore_1108_code -IllegalArgumentException - -@mvstore_1109_li - if a method was called with an illegal argument. - -@mvstore_1110_code -UnsupportedOperationException - -@mvstore_1111_li - if a method was called that is not supported, for example trying to modify a read-only map. - -@mvstore_1112_code -ConcurrentModificationException - -@mvstore_1113_li - if a map is modified concurrently. - -@mvstore_1114_h3 -Storage Engine for H2 - -@mvstore_1115_p - For H2 version 1.4 and newer, the MVStore is the default storage engine (supporting SQL, JDBC, transactions, MVCC, and so on). For older versions, append ;MV_STORE=TRUE to the database URL. Even though it can be used with the default table level locking, by default the MVCC mode is enabled when using the MVStore. - -@mvstore_1116_h2 -File Format - -@mvstore_1117_p - The data is stored in one file. The file contains two file headers (for safety), and a number of chunks. The file headers are one block each; a block is 4096 bytes. Each chunk is at least one block, but typically 200 blocks or more. Data is stored in the chunks in the form of a log structured storage. There is one chunk for every version. - -@mvstore_1118_p - Each chunk contains a number of B-tree pages. As an example, the following code: - -@mvstore_1119_p - will result in the following two chunks (excluding metadata): - -@mvstore_1120_b -Chunk 1: - -@mvstore_1121_p - - Page 1: (root) node with 2 entries pointing to page 2 and 3 - -@mvstore_1122_p - - Page 2: leaf with 140 entries (keys 0 - 139) - -@mvstore_1123_p - - Page 3: leaf with 260 entries (keys 140 - 399) - -@mvstore_1124_b -Chunk 2: - -@mvstore_1125_p - - Page 4: (root) node with 2 entries pointing to page 5 and 3 - -@mvstore_1126_p - - Page 5: leaf with 140 entries (keys 0 - 139) - -@mvstore_1127_p - That means each chunk contains the changes of one version: the new version of the changed pages and the parent pages, recursively, up to the root page. Pages in subsequent chunks refer to pages in earlier chunks. - -@mvstore_1128_h3 -File Header - -@mvstore_1129_p - There are two file headers, which normally contain the exact same data. But once in a while, the file headers are updated, and writing could partially fail, which could corrupt a header. That's why there is a second header. Only the file headers are updated in this way (called "in-place update"). The headers contain the following data: - -@mvstore_1130_p - The data is stored in the form of a key-value pair. Each value is stored as a hexadecimal number. The entries are: - -@mvstore_1131_li -H: The entry "H:2" stands for the the H2 database. - -@mvstore_1132_li -block: The block number where one of the newest chunks starts (but not necessarily the newest). - -@mvstore_1133_li -blockSize: The block size of the file; currently always hex 1000, which is decimal 4096, to match the disk sector length of modern hard disks. - -@mvstore_1134_li -chunk: The chunk id, which is normally the same value as the version; however, the chunk id might roll over to 0, while the version doesn't. - -@mvstore_1135_li -created: The number of milliseconds since 1970 when the file was created. - -@mvstore_1136_li -format: The file format number. Currently 1. - -@mvstore_1137_li -version: The version number of the chunk. - -@mvstore_1138_li -fletcher: The Fletcher-32 checksum of the header. - -@mvstore_1139_p - When opening the file, both headers are read and the checksum is verified. If both headers are valid, the one with the newer version is used. The chunk with the latest version is then detected (details about this see below), and the rest of the metadata is read from there. If the chunk id, block and version are not stored in the file header, then the latest chunk lookup starts with the last chunk in the file. - -@mvstore_1140_h3 -Chunk Format - -@mvstore_1141_p - There is one chunk per version. Each chunk consists of a header, the pages that were modified in this version, and a footer. The pages contain the actual data of the maps. The pages inside a chunk are stored right after the header, next to each other (unaligned). The size of a chunk is a multiple of the block size. The footer is stored in the last 128 bytes of the chunk. - -@mvstore_1142_p - The footer allows to verify that the chunk is completely written (a chunk is written as one write operation), and allows to find the start position of the very last chunk in the file. The chunk header and footer contain the following data: - -@mvstore_1143_p - The fields of the chunk header and footer are: - -@mvstore_1144_li -chunk: The chunk id. - -@mvstore_1145_li -block: The first block of the chunk (multiply by the block size to get the position in the file). - -@mvstore_1146_li -len: The size of the chunk in number of blocks. - -@mvstore_1147_li -map: The id of the newest map; incremented when a new map is created. - -@mvstore_1148_li -max: The sum of all maximum page sizes (see page format). - -@mvstore_1149_li -next: The predicted start block of the next chunk. - -@mvstore_1150_li -pages: The number of pages in the chunk. - -@mvstore_1151_li -root: The position of the metadata root page (see page format). - -@mvstore_1152_li -time: The time the chunk was written, in milliseconds after the file was created. - -@mvstore_1153_li -version: The version this chunk represents. - -@mvstore_1154_li -fletcher: The checksum of the footer. - -@mvstore_1155_p - Chunks are never updated in-place. Each chunk contains the pages that were changed in that version (there is one chunk per version, see above), plus all the parent nodes of those pages, recursively, up to the root page. If an entry in a map is changed, removed, or added, then the respective page is copied, modified, and stored in the next chunk, and the number of live pages in the old chunk is decremented. This mechanism is called copy-on-write, and is similar to how the Btrfs file system works. Chunks without live pages are marked as free, so the space can be re-used by more recent chunks. Because not all chunks are of the same size, there can be a number of free blocks in front of a chunk for some time (until a small chunk is written or the chunks are compacted). There is a delay of 45 seconds (by default) before a free chunk is overwritten, to ensure new versions are persisted first. - -@mvstore_1156_p - How the newest chunk is located when opening a store: The file header contains the position of a recent chunk, but not always the newest one. This is to reduce the number of file header updates. After opening the file, the file headers, and the chunk footer of the very last chunk (at the end of the file) are read. From those candidates, the header of the most recent chunk is read. If it contains a "next" pointer (see above), those chunk's header and footer are read as well. If it turned out to be a newer valid chunk, this is repeated, until the newest chunk was found. Before writing a chunk, the position of the next chunk is predicted based on the assumption that the next chunk will be of the same size as the current one. When the next chunk is written, and the previous prediction turned out to be incorrect, the file header is updated as well. In any case, the file header is updated if the next chain gets longer than 20 hops. - -@mvstore_1157_h3 -Page Format - -@mvstore_1158_p - Each map is a B-tree, and the map data is stored in (B-tree-) pages. There are leaf pages that contain the key-value pairs of the map, and internal nodes, which only contain keys and pointers to leaf pages. The root of a tree is either a leaf or an internal node. Unlike file header and chunk header and footer, the page data is not human readable. Instead, it is stored as byte arrays, with long (8 bytes), int (4 bytes), short (2 bytes), and variable size int and long (1 to 5 / 10 bytes). The page format is: - -@mvstore_1159_li -length (int): Length of the page in bytes. - -@mvstore_1160_li -checksum (short): Checksum (chunk id xor offset within the chunk xor page length). - -@mvstore_1161_li -mapId (variable size int): The id of the map this page belongs to. - -@mvstore_1162_li -len (variable size int): The number of keys in the page. - -@mvstore_1163_li -type (byte): The page type (0 for leaf page, 1 for internal node; plus 2 if the keys and values are compressed with the LZF algorithm, or plus 6 if the keys and values are compressed with the Deflate algorithm). - -@mvstore_1164_li -children (array of long; internal nodes only): The position of the children. - -@mvstore_1165_li -childCounts (array of variable size long; internal nodes only): The total number of entries for the given child page. - -@mvstore_1166_li -keys (byte array): All keys, stored depending on the data type. - -@mvstore_1167_li -values (byte array; leaf pages only): All values, stored depending on the data type. - -@mvstore_1168_p - Even though this is not required by the file format, pages are stored in the following order: For each map, the root page is stored first, then the internal nodes (if there are any), and then the leaf pages. This should speed up reads for media where sequential reads are faster than random access reads. The metadata map is stored at the end of a chunk. - -@mvstore_1169_p - Pointers to pages are stored as a long, using a special format: 26 bits for the chunk id, 32 bits for the offset within the chunk, 5 bits for the length code, 1 bit for the page type (leaf or internal node). The page type is encoded so that when clearing or removing a map, leaf pages don't have to be read (internal nodes do have to be read in order to know where all the pages are; but in a typical B-tree the vast majority of the pages are leaf pages). The absolute file position is not included so that chunks can be moved within the file without having to change page pointers; only the chunk metadata needs to be changed. The length code is a number from 0 to 31, where 0 means the maximum length of the page is 32 bytes, 1 means 48 bytes, 2: 64, 3: 96, 4: 128, 5: 192, and so on until 31 which means longer than 1 MB. That way, reading a page only requires one read operation (except for very large pages). The sum of the maximum length of all pages is stored in the chunk metadata (field "max"), and when a page is marked as removed, the live maximum length is adjusted. This allows to estimate the amount of free space within a block, in addition to the number of free pages. - -@mvstore_1170_p - The total number of entries in child pages are kept to allow efficient range counting, lookup by index, and skip operations. The pages form a counted B-tree. - -@mvstore_1171_p - Data compression: The data after the page type are optionally compressed using the LZF algorithm. - -@mvstore_1172_h3 -Metadata Map - -@mvstore_1173_p - In addition to the user maps, there is one metadata map that contains names and positions of user maps, and chunk metadata. The very last page of a chunk contains the root page of that metadata map. The exact position of this root page is stored in the chunk header. This page (directly or indirectly) points to the root pages of all other maps. The metadata map of a store with a map named "data", and one chunk, contains the following entries: - -@mvstore_1174_li -chunk.1: The metadata of chunk 1. This is the same data as the chunk header, plus the number of live pages, and the maximum live length. - -@mvstore_1175_li -map.1: The metadata of map 1. The entries are: name, createVersion, and type. - -@mvstore_1176_li -name.data: The map id of the map named "data". The value is "1". - -@mvstore_1177_li -root.1: The root position of map 1. - -@mvstore_1178_li -setting.storeVersion: The store version (a user defined value). - -@mvstore_1179_h2 -Similar Projects and Differences to Other Storage Engines - -@mvstore_1180_p - Unlike similar storage engines like LevelDB and Kyoto Cabinet, the MVStore is written in Java and can easily be embedded in a Java and Android application. - -@mvstore_1181_p - The MVStore is somewhat similar to the Berkeley DB Java Edition because it is also written in Java, and is also a log structured storage, but the H2 license is more liberal. - -@mvstore_1182_p - Like SQLite 3, the MVStore keeps all data in one file. Unlike SQLite 3, the MVStore uses is a log structured storage. The plan is to make the MVStore both easier to use as well as faster than SQLite 3. In a recent (very simple) test, the MVStore was about twice as fast as SQLite 3 on Android. - -@mvstore_1183_p - The API of the MVStore is similar to MapDB (previously known as JDBM) from Jan Kotek, and some code is shared between MVStore and MapDB. However, unlike MapDB, the MVStore uses is a log structured storage. The MVStore does not have a record size limit. - -@mvstore_1184_h2 -Current State - -@mvstore_1185_p - The code is still experimental at this stage. The API as well as the behavior may partially change. Features may be added and removed (even though the main features will stay). - -@mvstore_1186_h2 -Requirements - -@mvstore_1187_p - The MVStore is included in the latest H2 jar file. - -@mvstore_1188_p - There are no special requirements to use it. The MVStore should run on any JVM as well as on Android. - -@mvstore_1189_p - To build just the MVStore (without the database engine), run: - -@mvstore_1190_p - This will create the file bin/h2mvstore-1.4.196.jar (about 200 KB). - -@performance_1000_h1 -Performance - -@performance_1001_a - Performance Comparison - -@performance_1002_a - PolePosition Benchmark - -@performance_1003_a - Database Performance Tuning - -@performance_1004_a - Using the Built-In Profiler - -@performance_1005_a - Application Profiling - -@performance_1006_a - Database Profiling - -@performance_1007_a - Statement Execution Plans - -@performance_1008_a - How Data is Stored and How Indexes Work - -@performance_1009_a - Fast Database Import - -@performance_1010_h2 -Performance Comparison - -@performance_1011_p - In many cases H2 is faster than other (open source and not open source) database engines. Please note this is mostly a single connection benchmark run on one computer, with many very simple operations running against the database. This benchmark does not include very complex queries. The embedded mode of H2 is faster than the client-server mode because the per-statement overhead is greatly reduced. - -@performance_1012_h3 -Embedded - -@performance_1013_th -Test Case - -@performance_1014_th -Unit - -@performance_1015_th -H2 - -@performance_1016_th -HSQLDB - -@performance_1017_th -Derby - -@performance_1018_td -Simple: Init - -@performance_1019_td -ms - -@performance_1020_td -1019 - -@performance_1021_td -1907 - -@performance_1022_td -8280 - -@performance_1023_td -Simple: Query (random) - -@performance_1024_td -ms - -@performance_1025_td -1304 - -@performance_1026_td -873 - -@performance_1027_td -1912 - -@performance_1028_td -Simple: Query (sequential) - -@performance_1029_td -ms - -@performance_1030_td -835 - -@performance_1031_td -1839 - -@performance_1032_td -5415 - -@performance_1033_td -Simple: Update (sequential) - -@performance_1034_td -ms - -@performance_1035_td -961 - -@performance_1036_td -2333 - -@performance_1037_td -21759 - -@performance_1038_td -Simple: Delete (sequential) - -@performance_1039_td -ms - -@performance_1040_td -950 - -@performance_1041_td -1922 - -@performance_1042_td -32016 - -@performance_1043_td -Simple: Memory Usage - -@performance_1044_td -MB - -@performance_1045_td -21 - -@performance_1046_td -10 - -@performance_1047_td -8 - -@performance_1048_td -BenchA: Init - -@performance_1049_td -ms - -@performance_1050_td -919 - -@performance_1051_td -2133 - -@performance_1052_td -7528 - -@performance_1053_td -BenchA: Transactions - -@performance_1054_td -ms - -@performance_1055_td -1219 - -@performance_1056_td -2297 - -@performance_1057_td -8541 - -@performance_1058_td -BenchA: Memory Usage - -@performance_1059_td -MB - -@performance_1060_td -12 - -@performance_1061_td -15 - -@performance_1062_td -7 - -@performance_1063_td -BenchB: Init - -@performance_1064_td -ms - -@performance_1065_td -905 - -@performance_1066_td -1993 - -@performance_1067_td -8049 - -@performance_1068_td -BenchB: Transactions - -@performance_1069_td -ms - -@performance_1070_td -1091 - -@performance_1071_td -583 - -@performance_1072_td -1165 - -@performance_1073_td -BenchB: Memory Usage - -@performance_1074_td -MB - -@performance_1075_td -17 - -@performance_1076_td -11 - -@performance_1077_td -8 - -@performance_1078_td -BenchC: Init - -@performance_1079_td -ms - -@performance_1080_td -2491 - -@performance_1081_td -4003 - -@performance_1082_td -8064 - -@performance_1083_td -BenchC: Transactions - -@performance_1084_td -ms - -@performance_1085_td -1979 - -@performance_1086_td -803 - -@performance_1087_td -2840 - -@performance_1088_td -BenchC: Memory Usage - -@performance_1089_td -MB - -@performance_1090_td -19 - -@performance_1091_td -22 - -@performance_1092_td -9 - -@performance_1093_td -Executed statements - -@performance_1094_td -# - -@performance_1095_td -1930995 - -@performance_1096_td -1930995 - -@performance_1097_td -1930995 - -@performance_1098_td -Total time - -@performance_1099_td -ms - -@performance_1100_td -13673 - -@performance_1101_td -20686 - -@performance_1102_td -105569 - -@performance_1103_td -Statements per second - -@performance_1104_td -# - -@performance_1105_td -141226 - -@performance_1106_td -93347 - -@performance_1107_td -18291 - -@performance_1108_h3 -Client-Server - -@performance_1109_th -Test Case - -@performance_1110_th -Unit - -@performance_1111_th -H2 (Server) - -@performance_1112_th -HSQLDB - -@performance_1113_th -Derby - -@performance_1114_th -PostgreSQL - -@performance_1115_th -MySQL - -@performance_1116_td -Simple: Init - -@performance_1117_td -ms - -@performance_1118_td -16338 - -@performance_1119_td -17198 - -@performance_1120_td -27860 - -@performance_1121_td -30156 - -@performance_1122_td -29409 - -@performance_1123_td -Simple: Query (random) - -@performance_1124_td -ms - -@performance_1125_td -3399 - -@performance_1126_td -2582 - -@performance_1127_td -6190 - -@performance_1128_td -3315 - -@performance_1129_td -3342 - -@performance_1130_td -Simple: Query (sequential) - -@performance_1131_td -ms - -@performance_1132_td -21841 - -@performance_1133_td -18699 - -@performance_1134_td -42347 - -@performance_1135_td -30774 - -@performance_1136_td -32611 - -@performance_1137_td -Simple: Update (sequential) - -@performance_1138_td -ms - -@performance_1139_td -6913 - -@performance_1140_td -7745 - -@performance_1141_td -28576 - -@performance_1142_td -32698 - -@performance_1143_td -11350 - -@performance_1144_td -Simple: Delete (sequential) - -@performance_1145_td -ms - -@performance_1146_td -8051 - -@performance_1147_td -9751 - -@performance_1148_td -42202 - -@performance_1149_td -44480 - -@performance_1150_td -16555 - -@performance_1151_td -Simple: Memory Usage - -@performance_1152_td -MB - -@performance_1153_td -22 - -@performance_1154_td -11 - -@performance_1155_td -9 - -@performance_1156_td -0 - -@performance_1157_td -1 - -@performance_1158_td -BenchA: Init - -@performance_1159_td -ms - -@performance_1160_td -12996 - -@performance_1161_td -14720 - -@performance_1162_td -24722 - -@performance_1163_td -26375 - -@performance_1164_td -26060 - -@performance_1165_td -BenchA: Transactions - -@performance_1166_td -ms - -@performance_1167_td -10134 - -@performance_1168_td -10250 - -@performance_1169_td -18452 - -@performance_1170_td -21453 - -@performance_1171_td -15877 - -@performance_1172_td -BenchA: Memory Usage - -@performance_1173_td -MB - -@performance_1174_td -13 - -@performance_1175_td -15 - -@performance_1176_td -9 - -@performance_1177_td -0 - -@performance_1178_td -1 - -@performance_1179_td -BenchB: Init - -@performance_1180_td -ms - -@performance_1181_td -15264 - -@performance_1182_td -16889 - -@performance_1183_td -28546 - -@performance_1184_td -31610 - -@performance_1185_td -29747 - -@performance_1186_td -BenchB: Transactions - -@performance_1187_td -ms - -@performance_1188_td -3017 - -@performance_1189_td -3376 - -@performance_1190_td -1842 - -@performance_1191_td -2771 - -@performance_1192_td -1433 - -@performance_1193_td -BenchB: Memory Usage - -@performance_1194_td -MB - -@performance_1195_td -17 - -@performance_1196_td -12 - -@performance_1197_td -11 - -@performance_1198_td -1 - -@performance_1199_td -1 - -@performance_1200_td -BenchC: Init - -@performance_1201_td -ms - -@performance_1202_td -14020 - -@performance_1203_td -10407 - -@performance_1204_td -17655 - -@performance_1205_td -19520 - -@performance_1206_td -17532 - -@performance_1207_td -BenchC: Transactions - -@performance_1208_td -ms - -@performance_1209_td -5076 - -@performance_1210_td -3160 - -@performance_1211_td -6411 - -@performance_1212_td -6063 - -@performance_1213_td -4530 - -@performance_1214_td -BenchC: Memory Usage - -@performance_1215_td -MB - -@performance_1216_td -19 - -@performance_1217_td -21 - -@performance_1218_td -11 - -@performance_1219_td -1 - -@performance_1220_td -1 - -@performance_1221_td -Executed statements - -@performance_1222_td -# - -@performance_1223_td -1930995 - -@performance_1224_td -1930995 - -@performance_1225_td -1930995 - -@performance_1226_td -1930995 - -@performance_1227_td -1930995 - -@performance_1228_td -Total time - -@performance_1229_td -ms - -@performance_1230_td -117049 - -@performance_1231_td -114777 - -@performance_1232_td -244803 - -@performance_1233_td -249215 - -@performance_1234_td -188446 - -@performance_1235_td -Statements per second - -@performance_1236_td -# - -@performance_1237_td -16497 - -@performance_1238_td -16823 - -@performance_1239_td -7887 - -@performance_1240_td -7748 - -@performance_1241_td -10246 - -@performance_1242_h3 -Benchmark Results and Comments - -@performance_1243_h4 -H2 - -@performance_1244_p - Version 1.4.177 (2014-04-12) was used for the test. For most operations, the performance of H2 is about the same as for HSQLDB. One situation where H2 is slow is large result sets, because they are buffered to disk if more than a certain number of records are returned. The advantage of buffering is: there is no limit on the result set size. - -@performance_1245_h4 -HSQLDB - -@performance_1246_p - Version 2.3.2 was used for the test. Cached tables are used in this test (hsqldb.default_table_type=cached), and the write delay is 1 second (SET WRITE_DELAY 1). - -@performance_1247_h4 -Derby - -@performance_1248_p - Version 10.10.1.1 was used for the test. Derby is clearly the slowest embedded database in this test. This seems to be a structural problem, because all operations are really slow. It will be hard for the developers of Derby to improve the performance to a reasonable level. A few problems have been identified: leaving autocommit on is a problem for Derby. If it is switched off during the whole test, the results are about 20% better for Derby. Derby calls FileChannel.force(false), but only twice per log file (not on each commit). Disabling this call improves performance for Derby by about 2%. Unlike H2, Derby does not call FileDescriptor.sync() on each checkpoint. Derby supports a testing mode (system property derby.system.durability=test) where durability is disabled. According to the documentation, this setting should be used for testing only, as the database may not recover after a crash. Enabling this setting improves performance by a factor of 2.6 (embedded mode) or 1.4 (server mode). Even if enabled, Derby is still less than half as fast as H2 in default mode. - -@performance_1249_h4 -PostgreSQL - -@performance_1250_p - Version 9.1.5 was used for the test. The following options where changed in postgresql.conf: fsync = off, commit_delay = 1000. PostgreSQL is run in server mode. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured. - -@performance_1251_h4 -MySQL - -@performance_1252_p - Version 5.1.65-log was used for the test. MySQL was run with the InnoDB backend. The setting innodb_flush_log_at_trx_commit (found in the my.ini / my.cnf file) was set to 0. Otherwise (and by default), MySQL is slow (around 140 statements per second in this test) because it tries to flush the data to disk for each commit. For small transactions (when autocommit is on) this is really slow. But many use cases use small or relatively small transactions. Too bad this setting is not listed in the configuration wizard, and it always overwritten when using the wizard. You need to change this setting manually in the file my.ini / my.cnf, and then restart the service. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured. - -@performance_1253_h4 -Firebird - -@performance_1254_p - Firebird 1.5 (default installation) was tested, but the results are not published currently. It is possible to run the performance test with the Firebird database, and any information on how to configure Firebird for higher performance are welcome. - -@performance_1255_h4 -Why Oracle / MS SQL Server / DB2 are Not Listed - -@performance_1256_p - The license of these databases does not allow to publish benchmark results. This doesn't mean that they are fast. They are in fact quite slow, and need a lot of memory. But you will need to test this yourself. SQLite was not tested because the JDBC driver doesn't support transactions. - -@performance_1257_h3 -About this Benchmark - -@performance_1258_h4 -How to Run - -@performance_1259_p - This test was as follows: - -@performance_1260_h4 -Separate Process per Database - -@performance_1261_p - For each database, a new process is started, to ensure the previous test does not impact the current test. - -@performance_1262_h4 -Number of Connections - -@performance_1263_p - This is mostly a single-connection benchmark. BenchB uses multiple connections; the other tests use one connection. - -@performance_1264_h4 -Real-World Tests - -@performance_1265_p - Good benchmarks emulate real-world use cases. This benchmark includes 4 test cases: BenchSimple uses one table and many small updates / deletes. BenchA is similar to the TPC-A test, but single connection / single threaded (see also: www.tpc.org). BenchB is similar to the TPC-B test, using multiple connections (one thread per connection). BenchC is similar to the TPC-C test, but single connection / single threaded. - -@performance_1266_h4 -Comparing Embedded with Server Databases - -@performance_1267_p - This is mainly a benchmark for embedded databases (where the application runs in the same virtual machine as the database engine). However MySQL and PostgreSQL are not Java databases and cannot be embedded into a Java application. For the Java databases, both embedded and server modes are tested. - -@performance_1268_h4 -Test Platform - -@performance_1269_p - This test is run on Mac OS X 10.6. No virus scanner was used, and disk indexing was disabled. The JVM used is Sun JDK 1.6. - -@performance_1270_h4 -Multiple Runs - -@performance_1271_p - When a Java benchmark is run first, the code is not fully compiled and therefore runs slower than when running multiple times. A benchmark should always run the same test multiple times and ignore the first run(s). This benchmark runs three times, but only the last run is measured. - -@performance_1272_h4 -Memory Usage - -@performance_1273_p - It is not enough to measure the time taken, the memory usage is important as well. Performance can be improved by using a bigger cache, but the amount of memory is limited. HSQLDB tables are kept fully in memory by default; this benchmark uses 'disk based' tables for all databases. Unfortunately, it is not so easy to calculate the memory usage of PostgreSQL and MySQL, because they run in a different process than the test. This benchmark currently does not print memory usage of those databases. - -@performance_1274_h4 -Delayed Operations - -@performance_1275_p - Some databases delay some operations (for example flushing the buffers) until after the benchmark is run. This benchmark waits between each database tested, and each database runs in a different process (sequentially). - -@performance_1276_h4 -Transaction Commit / Durability - -@performance_1277_p - Durability means transaction committed to the database will not be lost. Some databases (for example MySQL) try to enforce this by default by calling fsync() to flush the buffers, but most hard drives don't actually flush all data. Calling the method slows down transaction commit a lot, but doesn't always make data durable. When comparing the results, it is important to think about the effect. Many database suggest to 'batch' operations when possible. This benchmark switches off autocommit when loading the data, and calls commit after each 1000 inserts. However many applications need 'short' transactions at runtime (a commit after each update). This benchmark commits after each update / delete in the simple benchmark, and after each business transaction in the other benchmarks. For databases that support delayed commits, a delay of one second is used. - -@performance_1278_h4 -Using Prepared Statements - -@performance_1279_p - Wherever possible, the test cases use prepared statements. - -@performance_1280_h4 -Currently Not Tested: Startup Time - -@performance_1281_p - The startup time of a database engine is important as well for embedded use. This time is not measured currently. Also, not tested is the time used to create a database and open an existing database. Here, one (wrapper) connection is opened at the start, and for each step a new connection is opened and then closed. - -@performance_1282_h2 -PolePosition Benchmark - -@performance_1283_p - The PolePosition is an open source benchmark. The algorithms are all quite simple. It was developed / sponsored by db4o. This test was not run for a longer time, so please be aware that the results below are for older database versions (H2 version 1.1, HSQLDB 1.8, Java 1.4). - -@performance_1284_th -Test Case - -@performance_1285_th -Unit - -@performance_1286_th -H2 - -@performance_1287_th -HSQLDB - -@performance_1288_th -MySQL - -@performance_1289_td -Melbourne write - -@performance_1290_td -ms - -@performance_1291_td -369 - -@performance_1292_td -249 - -@performance_1293_td -2022 - -@performance_1294_td -Melbourne read - -@performance_1295_td -ms - -@performance_1296_td -47 - -@performance_1297_td -49 - -@performance_1298_td -93 - -@performance_1299_td -Melbourne read_hot - -@performance_1300_td -ms - -@performance_1301_td -24 - -@performance_1302_td -43 - -@performance_1303_td -95 - -@performance_1304_td -Melbourne delete - -@performance_1305_td -ms - -@performance_1306_td -147 - -@performance_1307_td -133 - -@performance_1308_td -176 - -@performance_1309_td -Sepang write - -@performance_1310_td -ms - -@performance_1311_td -965 - -@performance_1312_td -1201 - -@performance_1313_td -3213 - -@performance_1314_td -Sepang read - -@performance_1315_td -ms - -@performance_1316_td -765 - -@performance_1317_td -948 - -@performance_1318_td -3455 - -@performance_1319_td -Sepang read_hot - -@performance_1320_td -ms - -@performance_1321_td -789 - -@performance_1322_td -859 - -@performance_1323_td -3563 - -@performance_1324_td -Sepang delete - -@performance_1325_td -ms - -@performance_1326_td -1384 - -@performance_1327_td -1596 - -@performance_1328_td -6214 - -@performance_1329_td -Bahrain write - -@performance_1330_td -ms - -@performance_1331_td -1186 - -@performance_1332_td -1387 - -@performance_1333_td -6904 - -@performance_1334_td -Bahrain query_indexed_string - -@performance_1335_td -ms - -@performance_1336_td -336 - -@performance_1337_td -170 - -@performance_1338_td -693 - -@performance_1339_td -Bahrain query_string - -@performance_1340_td -ms - -@performance_1341_td -18064 - -@performance_1342_td -39703 - -@performance_1343_td -41243 - -@performance_1344_td -Bahrain query_indexed_int - -@performance_1345_td -ms - -@performance_1346_td -104 - -@performance_1347_td -134 - -@performance_1348_td -678 - -@performance_1349_td -Bahrain update - -@performance_1350_td -ms - -@performance_1351_td -191 - -@performance_1352_td -87 - -@performance_1353_td -159 - -@performance_1354_td -Bahrain delete - -@performance_1355_td -ms - -@performance_1356_td -1215 - -@performance_1357_td -729 - -@performance_1358_td -6812 - -@performance_1359_td -Imola retrieve - -@performance_1360_td -ms - -@performance_1361_td -198 - -@performance_1362_td -194 - -@performance_1363_td -4036 - -@performance_1364_td -Barcelona write - -@performance_1365_td -ms - -@performance_1366_td -413 - -@performance_1367_td -832 - -@performance_1368_td -3191 - -@performance_1369_td -Barcelona read - -@performance_1370_td -ms - -@performance_1371_td -119 - -@performance_1372_td -160 - -@performance_1373_td -1177 - -@performance_1374_td -Barcelona query - -@performance_1375_td -ms - -@performance_1376_td -20 - -@performance_1377_td -5169 - -@performance_1378_td -101 - -@performance_1379_td -Barcelona delete - -@performance_1380_td -ms - -@performance_1381_td -388 - -@performance_1382_td -319 - -@performance_1383_td -3287 - -@performance_1384_td -Total - -@performance_1385_td -ms - -@performance_1386_td -26724 - -@performance_1387_td -53962 - -@performance_1388_td -87112 - -@performance_1389_p - There are a few problems with the PolePosition test: - -@performance_1390_li - HSQLDB uses in-memory tables by default while H2 uses persistent tables. The HSQLDB version included in PolePosition does not support changing this, so you need to replace poleposition-0.20/lib/hsqldb.jar with a newer version (for example hsqldb-1.8.0.7.jar), and then use the setting hsqldb.connecturl=jdbc:hsqldb:file:data/hsqldb/dbbench2;hsqldb.default_table_type=cached;sql.enforce_size=true in the file Jdbc.properties. - -@performance_1391_li -HSQLDB keeps the database open between tests, while H2 closes the database (losing all the cache). To change that, use the database URL jdbc:h2:file:data/h2/dbbench;DB_CLOSE_DELAY=-1 - -@performance_1392_li -The amount of cache memory is quite important, specially for the PolePosition test. Unfortunately, the PolePosition test does not take this into account. - -@performance_1393_h2 -Database Performance Tuning - -@performance_1394_h3 -Keep Connections Open or Use a Connection Pool - -@performance_1395_p - If your application opens and closes connections a lot (for example, for each request), you should consider using a connection pool. Opening a connection using DriverManager.getConnection is specially slow if the database is closed. By default the database is closed if the last connection is closed. - -@performance_1396_p - If you open and close connections a lot but don't want to use a connection pool, consider keeping a 'sentinel' connection open for as long as the application runs, or use delayed database closing. See also Closing a database. - -@performance_1397_h3 -Use a Modern JVM - -@performance_1398_p - Newer JVMs are faster. Upgrading to the latest version of your JVM can provide a "free" boost to performance. Switching from the default Client JVM to the Server JVM using the -server command-line option improves performance at the cost of a slight increase in start-up time. - -@performance_1399_h3 -Virus Scanners - -@performance_1400_p - Some virus scanners scan files every time they are accessed. It is very important for performance that database files are not scanned for viruses. The database engine never interprets the data stored in the files as programs, that means even if somebody would store a virus in a database file, this would be harmless (when the virus does not run, it cannot spread). Some virus scanners allow to exclude files by suffix. Ensure files ending with .db are not scanned. - -@performance_1401_h3 -Using the Trace Options - -@performance_1402_p - If the performance hot spots are in the database engine, in many cases the performance can be optimized by creating additional indexes, or changing the schema. Sometimes the application does not directly generate the SQL statements, for example if an O/R mapping tool is used. To view the SQL statements and JDBC API calls, you can use the trace options. For more information, see Using the Trace Options. - -@performance_1403_h3 -Index Usage - -@performance_1404_p - This database uses indexes to improve the performance of SELECT, UPDATE, DELETE. If a column is used in the WHERE clause of a query, and if an index exists on this column, then the index can be used. Multi-column indexes are used if all or the first columns of the index are used. Both equality lookup and range scans are supported. Indexes are used to order result sets, but only if the condition uses the same index or no index at all. The results are sorted in memory if required. Indexes are created automatically for primary key and unique constraints. Indexes are also created for foreign key constraints, if required. For other columns, indexes need to be created manually using the CREATE INDEX statement. - -@performance_1405_h3 -Index Hints - -@performance_1406_p - If you have determined that H2 is not using the optimal index for your query, you can use index hints to force H2 to use specific indexes. - -@performance_1407_p -Only indexes in the list will be used when choosing an index to use on the given table. There is no significance to order in this list. - -@performance_1408_p - It is possible that no index in the list is chosen, in which case a full table scan will be used. - -@performance_1409_p -An empty list of index names forces a full table scan to be performed. - -@performance_1410_p -Each index in the list must exist. - -@performance_1411_h3 -How Data is Stored Internally - -@performance_1412_p - For persistent databases, if a table is created with a single column primary key of type BIGINT, INT, SMALLINT, TINYINT, then the data of the table is organized in this way. This is sometimes also called a "clustered index" or "index organized table". - -@performance_1413_p - H2 internally stores table data and indexes in the form of b-trees. Each b-tree stores entries as a list of unique keys (one or more columns) and data (zero or more columns). The table data is always organized in the form of a "data b-tree" with a single column key of type long. If a single column primary key of type BIGINT, INT, SMALLINT, TINYINT is specified when creating the table (or just after creating the table, but before inserting any rows), then this column is used as the key of the data b-tree. If no primary key has been specified, if the primary key column is of another data type, or if the primary key contains more than one column, then a hidden auto-increment column of type BIGINT is added to the table, which is used as the key for the data b-tree. All other columns of the table are stored within the data area of this data b-tree (except for large BLOB, CLOB columns, which are stored externally). - -@performance_1414_p - For each additional index, one new "index b-tree" is created. The key of this b-tree consists of the indexed columns, plus the key of the data b-tree. If a primary key is created after the table has been created, or if the primary key contains multiple column, or if the primary key is not of the data types listed above, then the primary key is stored in a new index b-tree. - -@performance_1415_h3 -Optimizer - -@performance_1416_p - This database uses a cost based optimizer. For simple and queries and queries with medium complexity (less than 7 tables in the join), the expected cost (running time) of all possible plans is calculated, and the plan with the lowest cost is used. For more complex queries, the algorithm first tries all possible combinations for the first few tables, and the remaining tables added using a greedy algorithm (this works well for most joins). Afterwards a genetic algorithm is used to test at most 2000 distinct plans. Only left-deep plans are evaluated. - -@performance_1417_h3 -Expression Optimization - -@performance_1418_p - After the statement is parsed, all expressions are simplified automatically if possible. Operations are evaluated only once if all parameters are constant. Functions are also optimized, but only if the function is constant (always returns the same result for the same parameter values). If the WHERE clause is always false, then the table is not accessed at all. - -@performance_1419_h3 -COUNT(*) Optimization - -@performance_1420_p - If the query only counts all rows of a table, then the data is not accessed. However, this is only possible if no WHERE clause is used, that means it only works for queries of the form SELECT COUNT(*) FROM table. - -@performance_1421_h3 -Updating Optimizer Statistics / Column Selectivity - -@performance_1422_p - When executing a query, at most one index per join can be used. If the same table is joined multiple times, for each join only one index is used (the same index could be used for both joins, or each join could use a different index). Example: for the query SELECT * FROM TEST T1, TEST T2 WHERE T1.NAME='A' AND T2.ID=T1.ID, two index can be used, in this case the index on NAME for T1 and the index on ID for T2. - -@performance_1423_p - If a table has multiple indexes, sometimes more than one index could be used. Example: if there is a table TEST(ID, NAME, FIRSTNAME) and an index on each column, then two indexes could be used for the query SELECT * FROM TEST WHERE NAME='A' AND FIRSTNAME='B', the index on NAME or the index on FIRSTNAME. It is not possible to use both indexes at the same time. Which index is used depends on the selectivity of the column. The selectivity describes the 'uniqueness' of values in a column. A selectivity of 100 means each value appears only once, and a selectivity of 1 means the same value appears in many or most rows. For the query above, the index on NAME should be used if the table contains more distinct names than first names. - -@performance_1424_p - The SQL statement ANALYZE can be used to automatically estimate the selectivity of the columns in the tables. This command should be run from time to time to improve the query plans generated by the optimizer. - -@performance_1425_h3 -In-Memory (Hash) Indexes - -@performance_1426_p - Using in-memory indexes, specially in-memory hash indexes, can speed up queries and data manipulation. - -@performance_1427_p -In-memory indexes are automatically used for in-memory databases, but can also be created for persistent databases using CREATE MEMORY TABLE. In many cases, the rows itself will also be kept in-memory. Please note this may cause memory problems for large tables. - -@performance_1428_p - In-memory hash indexes are backed by a hash table and are usually faster than regular indexes. However, hash indexes only supports direct lookup (WHERE ID = ?) but not range scan (WHERE ID < ?). To use hash indexes, use HASH as in: CREATE UNIQUE HASH INDEX and CREATE TABLE ...(ID INT PRIMARY KEY HASH,...). - -@performance_1429_h3 -Use Prepared Statements - -@performance_1430_p - If possible, use prepared statements with parameters. - -@performance_1431_h3 -Prepared Statements and IN(...) - -@performance_1432_p - Avoid generating SQL statements with a variable size IN(...) list. Instead, use a prepared statement with arrays as in the following example: - -@performance_1433_h3 -Optimization Examples - -@performance_1434_p - See src/test/org/h2/samples/optimizations.sql for a few examples of queries that benefit from special optimizations built into the database. - -@performance_1435_h3 -Cache Size and Type - -@performance_1436_p - By default the cache size of H2 is quite small. Consider using a larger cache size, or enable the second level soft reference cache. See also Cache Settings. - -@performance_1437_h3 -Data Types - -@performance_1438_p - Each data type has different storage and performance characteristics: - -@performance_1439_li -The DECIMAL/NUMERIC type is slower and requires more storage than the REAL and DOUBLE types. - -@performance_1440_li -Text types are slower to read, write, and compare than numeric types and generally require more storage. - -@performance_1441_li -See Large Objects for information on BINARY vs. BLOB and VARCHAR vs. CLOB performance. - -@performance_1442_li -Parsing and formatting takes longer for the TIME, DATE, and TIMESTAMP types than the numeric types. - -@performance_1443_code -SMALLINT/TINYINT/BOOLEAN - -@performance_1444_li - are not significantly smaller or faster to work with than INTEGER in most modes. - -@performance_1445_h3 -Sorted Insert Optimization - -@performance_1446_p - To reduce disk space usage and speed up table creation, an optimization for sorted inserts is available. When used, b-tree pages are split at the insertion point. To use this optimization, add SORTED before the SELECT statement: - -@performance_1447_h2 -Using the Built-In Profiler - -@performance_1448_p - A very simple Java profiler is built-in. To use it, use the following template: - -@performance_1449_h2 -Application Profiling - -@performance_1450_h3 -Analyze First - -@performance_1451_p - Before trying to optimize performance, it is important to understand where the problem is (what part of the application is slow). Blind optimization or optimization based on guesses should be avoided, because usually it is not an efficient strategy. There are various ways to analyze an application. Sometimes two implementations can be compared using System.currentTimeMillis(). But this does not work for complex applications with many modules, and for memory problems. - -@performance_1452_p - A simple way to profile an application is to use the built-in profiling tool of java. Example: - -@performance_1453_p - Unfortunately, it is only possible to profile the application from start to end. Another solution is to create a number of full thread dumps. To do that, first run jps -l to get the process id, and then run jstack <pid> or kill -QUIT <pid> (Linux) or press Ctrl+C (Windows). - -@performance_1454_p - A simple profiling tool is included in H2. To use it, the application needs to be changed slightly. Example: - -@performance_1455_p - The profiler is built into the H2 Console tool, to analyze databases that open slowly. To use it, run the H2 Console, and then click on 'Test Connection'. Afterwards, click on "Test successful" and you get the most common stack traces, which helps to find out why it took so long to connect. You will only get the stack traces if opening the database took more than a few seconds. - -@performance_1456_h2 -Database Profiling - -@performance_1457_p - The ConvertTraceFile tool generates SQL statement statistics at the end of the SQL script file. The format used is similar to the profiling data generated when using java -Xrunhprof. For this to work, the trace level needs to be 2 or higher (TRACE_LEVEL_FILE=2). The easiest way to set the trace level is to append the setting to the database URL, for example: jdbc:h2:~/test;TRACE_LEVEL_FILE=2 or jdbc:h2:tcp://localhost/~/test;TRACE_LEVEL_FILE=2. As an example, execute the the following script using the H2 Console: - -@performance_1458_p - After running the test case, convert the .trace.db file using the ConvertTraceFile tool. The trace file is located in the same directory as the database file. - -@performance_1459_p - The generated file test.sql will contain the SQL statements as well as the following profiling data (results vary): - -@performance_1460_h2 -Statement Execution Plans - -@performance_1461_p - The SQL statement EXPLAIN displays the indexes and optimizations the database uses for a statement. The following statements support EXPLAIN: SELECT, UPDATE, DELETE, MERGE, INSERT. The following query shows that the database uses the primary key index to search for rows: - -@performance_1462_p - For joins, the tables in the execution plan are sorted in the order they are processed. The following query shows the database first processes the table INVOICE (using the primary key). For each row, it will additionally check that the value of the column AMOUNT is larger than zero, and for those rows the database will search in the table CUSTOMER (using the primary key). The query plan contains some redundancy so it is a valid statement. - -@performance_1463_h3 -Displaying the Scan Count - -@performance_1464_code -EXPLAIN ANALYZE - -@performance_1465_p - additionally shows the scanned rows per table and pages read from disk per table or index. This will actually execute the query, unlike EXPLAIN which only prepares it. The following query scanned 1000 rows, and to do that had to read 85 pages from the data area of the table. Running the query twice will not list the pages read from disk, because they are now in the cache. The tableScan means this query doesn't use an index. - -@performance_1466_p - The cache will prevent the pages are read twice. H2 reads all columns of the row unless only the columns in the index are read. Except for large CLOB and BLOB, which are not store in the table. - -@performance_1467_h3 -Special Optimizations - -@performance_1468_p - For certain queries, the database doesn't need to read all rows, or doesn't need to sort the result even if ORDER BY is used. - -@performance_1469_p - For queries of the form SELECT COUNT(*), MIN(ID), MAX(ID) FROM TEST, the query plan includes the line /* direct lookup */ if the data can be read from an index. - -@performance_1470_p - For queries of the form SELECT DISTINCT CUSTOMER_ID FROM INVOICE, the query plan includes the line /* distinct */ if there is an non-unique or multi-column index on this column, and if this column has a low selectivity. - -@performance_1471_p - For queries of the form SELECT * FROM TEST ORDER BY ID, the query plan includes the line /* index sorted */ to indicate there is no separate sorting required. - -@performance_1472_p - For queries of the form SELECT * FROM TEST GROUP BY ID ORDER BY ID, the query plan includes the line /* group sorted */ to indicate there is no separate sorting required. - -@performance_1473_h2 -How Data is Stored and How Indexes Work - -@performance_1474_p - Internally, each row in a table is identified by a unique number, the row id. The rows of a table are stored with the row id as the key. The row id is a number of type long. If a table has a single column primary key of type INT or BIGINT, then the value of this column is the row id, otherwise the database generates the row id automatically. There is a (non-standard) way to access the row id: using the _ROWID_ pseudo-column: - -@performance_1475_p - The data is stored in the database as follows: - -@performance_1476_th -_ROWID_ - -@performance_1477_th -FIRST_NAME - -@performance_1478_th -NAME - -@performance_1479_th -CITY - -@performance_1480_th -PHONE - -@performance_1481_td -1 - -@performance_1482_td -John - -@performance_1483_td -Miller - -@performance_1484_td -Berne - -@performance_1485_td -123 456 789 - -@performance_1486_td -2 - -@performance_1487_td -Philip - -@performance_1488_td -Jones - -@performance_1489_td -Berne - -@performance_1490_td -123 012 345 - -@performance_1491_p - Access by row id is fast because the data is sorted by this key. Please note the row id is not available until after the row was added (that means, it can not be used in computed columns or constraints). If the query condition does not contain the row id (and if no other index can be used), then all rows of the table are scanned. A table scan iterates over all rows in the table, in the order of the row id. To find out what strategy the database uses to retrieve the data, use EXPLAIN SELECT: - -@performance_1492_h3 -Indexes - -@performance_1493_p - An index internally is basically just a table that contains the indexed column(s), plus the row id: - -@performance_1494_p - In the index, the data is sorted by the indexed columns. So this index contains the following data: - -@performance_1495_th -CITY - -@performance_1496_th -NAME - -@performance_1497_th -FIRST_NAME - -@performance_1498_th -_ROWID_ - -@performance_1499_td -Berne - -@performance_1500_td -Jones - -@performance_1501_td -Philip - -@performance_1502_td -2 - -@performance_1503_td -Berne - -@performance_1504_td -Miller - -@performance_1505_td -John - -@performance_1506_td -1 - -@performance_1507_p - When the database uses an index to query the data, it searches the index for the given data, and (if required) reads the remaining columns in the main data table (retrieved using the row id). An index on city, name, and first name (multi-column index) allows to quickly search for rows when the city, name, and first name are known. If only the city and name, or only the city is known, then this index is also used (so creating an additional index on just the city is not needed). This index is also used when reading all rows, sorted by the indexed columns. However, if only the first name is known, then this index is not used: - -@performance_1508_p - If your application often queries the table for a phone number, then it makes sense to create an additional index on it: - -@performance_1509_p - This index contains the phone number, and the row id: - -@performance_1510_th -PHONE - -@performance_1511_th -_ROWID_ - -@performance_1512_td -123 012 345 - -@performance_1513_td -2 - -@performance_1514_td -123 456 789 - -@performance_1515_td -1 - -@performance_1516_h3 -Using Multiple Indexes - -@performance_1517_p - Within a query, only one index per logical table is used. Using the condition PHONE = '123 567 789' OR CITY = 'Berne' would use a table scan instead of first using the index on the phone number and then the index on the city. It makes sense to write two queries and combine then using UNION. In this case, each individual query uses a different index: - -@performance_1518_h2 -Fast Database Import - -@performance_1519_p - To speed up large imports, consider using the following options temporarily: - -@performance_1520_code -SET LOG 0 - -@performance_1521_li - (disabling the transaction log) - -@performance_1522_code -SET CACHE_SIZE - -@performance_1523_li - (a large cache is faster) - -@performance_1524_code -SET LOCK_MODE 0 - -@performance_1525_li - (disable locking) - -@performance_1526_code -SET UNDO_LOG 0 - -@performance_1527_li - (disable the session undo log) - -@performance_1528_p - These options can be set in the database URL: jdbc:h2:~/test;LOG=0;CACHE_SIZE=65536;LOCK_MODE=0;UNDO_LOG=0. Most of those options are not recommended for regular use, that means you need to reset them after use. - -@performance_1529_p - If you have to import a lot of rows, use a PreparedStatement or use CSV import. Please note that CREATE TABLE(...) ... AS SELECT ... is faster than CREATE TABLE(...); INSERT INTO ... SELECT .... - -@quickstart_1000_h1 -Quickstart - -@quickstart_1001_a - Embedding H2 in an Application - -@quickstart_1002_a - The H2 Console Application - -@quickstart_1003_h2 -Embedding H2 in an Application - -@quickstart_1004_p - This database can be used in embedded mode, or in server mode. To use it in embedded mode, you need to: - -@quickstart_1005_li -Add the h2*.jar to the classpath (H2 does not have any dependencies) - -@quickstart_1006_li -Use the JDBC driver class: org.h2.Driver - -@quickstart_1007_li -The database URL jdbc:h2:~/test opens the database test in your user home directory - -@quickstart_1008_li -A new database is automatically created - -@quickstart_1009_h2 -The H2 Console Application - -@quickstart_1010_p - The Console lets you access a SQL database using a browser interface. - -@quickstart_1011_p - If you don't have Windows XP, or if something does not work as expected, please see the detailed description in the Tutorial. - -@quickstart_1012_h3 -Step-by-Step - -@quickstart_1013_h4 -Installation - -@quickstart_1014_p - Install the software using the Windows Installer (if you did not yet do that). - -@quickstart_1015_h4 -Start the Console - -@quickstart_1016_p - Click [Start], [All Programs], [H2], and [H2 Console (Command Line)]: - -@quickstart_1017_p - A new console window appears: - -@quickstart_1018_p - Also, a new browser page should open with the URL http://localhost:8082. You may get a security warning from the firewall. If you don't want other computers in the network to access the database on your machine, you can let the firewall block these connections. Only local connections are required at this time. - -@quickstart_1019_h4 -Login - -@quickstart_1020_p - Select [Generic H2] and click [Connect]: - -@quickstart_1021_p - You are now logged in. - -@quickstart_1022_h4 -Sample - -@quickstart_1023_p - Click on the [Sample SQL Script]: - -@quickstart_1024_p - The SQL commands appear in the command area. - -@quickstart_1025_h4 -Execute - -@quickstart_1026_p - Click [Run] - -@quickstart_1027_p - On the left side, a new entry TEST is added below the database icon. The operations and results of the statements are shown below the script. - -@quickstart_1028_h4 -Disconnect - -@quickstart_1029_p - Click on [Disconnect]: - -@quickstart_1030_p - to close the connection. - -@quickstart_1031_h4 -End - -@quickstart_1032_p - Close the console window. For more information, see the Tutorial. - -@roadmap_1000_h1 -Roadmap - -@roadmap_1001_p - New (feature) requests will usually be added at the very end of the list. The priority is increased for important and popular requests. Of course, patches are always welcome, but are not always applied as is. See also Providing Patches. - -@roadmap_1002_h2 -Version 1.5.x: Planned Changes - -@roadmap_1003_li -Replace file password hash with file encryption key; validate encryption key when connecting. - -@roadmap_1004_li -Remove "set binary collation" feature. - -@roadmap_1005_li -Remove the encryption algorithm XTEA. - -@roadmap_1006_li -Disallow referencing other tables in a table (via constraints for example). - -@roadmap_1007_li -Remove PageStore features like compress_lob. - -@roadmap_1008_h2 -Version 1.4.x: Planned Changes - -@roadmap_1009_li -Change license to MPL 2.0. - -@roadmap_1010_li -Automatic migration from 1.3 databases to 1.4. - -@roadmap_1011_li -Option to disable the file name suffix somehow (issue 447). - -@roadmap_1012_h2 -Priority 1 - -@roadmap_1013_li -Bugfixes. - -@roadmap_1014_li -More tests with MULTI_THREADED=1 (and MULTI_THREADED with MVCC): Online backup (using the 'backup' statement). - -@roadmap_1015_li -Server side cursors. - -@roadmap_1016_h2 -Priority 2 - -@roadmap_1017_li -Support hints for the optimizer (which index to use, enforce the join order). - -@roadmap_1018_li -Full outer joins. - -@roadmap_1019_li -Access rights: remember the owner of an object. Create, alter and drop privileges. COMMENT: allow owner of object to change it. Issue 208: Access rights for schemas. - -@roadmap_1020_li -Test multi-threaded in-memory db access. - -@roadmap_1021_li -MySQL, MS SQL Server compatibility: support case sensitive (mixed case) identifiers without quotes. - -@roadmap_1022_li -Support GRANT SELECT, UPDATE ON [schemaName.] *. - -@roadmap_1023_li -Migrate database tool (also from other database engines). For Oracle, maybe use DBMS_METADATA.GET_DDL / GET_DEPENDENT_DDL. - -@roadmap_1024_li -Clustering: support mixed clustering mode (one embedded, others in server mode). - -@roadmap_1025_li -Clustering: reads should be randomly distributed (optional) or to a designated database on RAM (parameter: READ_FROM=3). - -@roadmap_1026_li -Window functions: RANK() and DENSE_RANK(), partition using OVER(). select *, count(*) over() as fullCount from ... limit 4; - -@roadmap_1027_li -PostgreSQL catalog: use BEFORE SELECT triggers instead of views over metadata tables. - -@roadmap_1028_li -Compatibility: automatically load functions from a script depending on the mode - see FunctionsMySQL.java, issue 211. - -@roadmap_1029_li -Test very large databases and LOBs (up to 256 GB). - -@roadmap_1030_li -Store all temp files in the temp directory. - -@roadmap_1031_li -Don't use temp files, specially not deleteOnExit (bug 4513817: File.deleteOnExit consumes memory). Also to allow opening client / server (remote) connections when using LOBs. - -@roadmap_1032_li -Make DDL (Data Definition) operations transactional. - -@roadmap_1033_li -Deferred integrity checking (DEFERRABLE INITIALLY DEFERRED). - -@roadmap_1034_li -Groovy Stored Procedures: http://groovy.codehaus.org/GSQL - -@roadmap_1035_li -Add a migration guide (list differences between databases). - -@roadmap_1036_li -Optimization: automatic index creation suggestion using the trace file? - -@roadmap_1037_li -Fulltext search Lucene: analyzer configuration, mergeFactor. - -@roadmap_1038_li -Compression performance: don't allocate buffers, compress / expand in to out buffer. - -@roadmap_1039_li -Rebuild index functionality to shrink index size and improve performance. - -@roadmap_1040_li -Console: add accesskey to most important commands (A, AREA, BUTTON, INPUT, LABEL, LEGEND, TEXTAREA). - -@roadmap_1041_li -Test performance again with SQL Server, Oracle, DB2. - -@roadmap_1042_li -Test with Spatial DB in a box / JTS: http://www.opengeospatial.org/standards/sfs - OpenGIS Implementation Specification. - -@roadmap_1043_li -Write more tests and documentation for MVCC (Multi Version Concurrency Control). - -@roadmap_1044_li -Find a tool to view large text file (larger than 100 MB), with find, page up and down (like less), truncate before / after. - -@roadmap_1045_li -Implement, test, document XAConnection and so on. - -@roadmap_1046_li -Pluggable data type (for streaming, hashing, compression, validation, conversion, encryption). - -@roadmap_1047_li -CHECK: find out what makes CHECK=TRUE slow, move to CHECK2. - -@roadmap_1048_li -Drop with invalidate views (so that source code is not lost). Check what other databases do exactly. - -@roadmap_1049_li -Index usage for (ID, NAME)=(1, 'Hi'); document. - -@roadmap_1050_li -Set a connection read only (Connection.setReadOnly) or using a connection parameter. - -@roadmap_1051_li -Access rights: finer grained access control (grant access for specific functions). - -@roadmap_1052_li -ROW_NUMBER() OVER([PARTITION BY columnName][ORDER BY columnName]). - -@roadmap_1053_li -Version check: docs / web console (using Javascript), and maybe in the library (using TCP/IP). - -@roadmap_1054_li -Web server classloader: override findResource / getResourceFrom. - -@roadmap_1055_li -Cost for embedded temporary view is calculated wrong, if result is constant. - -@roadmap_1056_li -Count index range query (count(*) where id between 10 and 20). - -@roadmap_1057_li -Performance: update in-place. - -@roadmap_1058_li -Clustering: when a database is back alive, automatically synchronize with the master (requires readable transaction log). - -@roadmap_1059_li -Database file name suffix: a way to use no or a different suffix (for example using a slash). - -@roadmap_1060_li -Eclipse plugin. - -@roadmap_1061_li -Asynchronous queries to support publish/subscribe: SELECT ... FOR READ WAIT [maxMillisToWait]. See also MS SQL Server "Query Notification". - -@roadmap_1062_li -Fulltext search (native): reader / tokenizer / filter. - -@roadmap_1063_li -Linked schema using CSV files: one schema for a directory of files; support indexes for CSV files. - -@roadmap_1064_li -iReport to support H2. - -@roadmap_1065_li -Include SMTP (mail) client (alert on cluster failure, low disk space,...). - -@roadmap_1066_li -Option for SCRIPT to only process one or a set of schemas or tables, and append to a file. - -@roadmap_1067_li -JSON parser and functions. - -@roadmap_1068_li -Copy database: tool with config GUI and batch mode, extensible (example: compare). - -@roadmap_1069_li -Document, implement tool for long running transactions using user-defined compensation statements. - -@roadmap_1070_li -Support SET TABLE DUAL READONLY. - -@roadmap_1071_li -GCJ: what is the state now? - -@roadmap_1072_li -Events for: database Startup, Connections, Login attempts, Disconnections, Prepare (after parsing), Web Server. See http://docs.openlinksw.com/virtuoso/fn_dbev_startup.html - -@roadmap_1073_li -Optimization: simpler log compression. - -@roadmap_1074_li -Support standard INFORMATION_SCHEMA tables, as defined in http://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt - specially KEY_COLUMN_USAGE: http://dev.mysql.com/doc/refman/5.0/en/information-schema.html, http://www.xcdsql.org/Misc/INFORMATION_SCHEMA%20With%20Rolenames.gif - -@roadmap_1075_li -Compatibility: in MySQL, HSQLDB, /0.0 is NULL; in PostgreSQL, Derby: division by zero. HSQLDB: 0.0e1 / 0.0e1 is NaN. - -@roadmap_1076_li -Functional tables should accept parameters from other tables (see FunctionMultiReturn) SELECT * FROM TEST T, P2C(T.A, T.R). - -@roadmap_1077_li -Custom class loader to reload functions on demand. - -@roadmap_1078_li -Test http://mysql-je.sourceforge.net/ - -@roadmap_1079_li -H2 Console: the webclient could support more features like phpMyAdmin. - -@roadmap_1080_li -Support Oracle functions: TO_NUMBER. - -@roadmap_1081_li -Work on the Java to C converter. - -@roadmap_1082_li -The HELP information schema can be directly exposed in the Console. - -@roadmap_1083_li -Maybe use the 0x1234 notation for binary fields, see MS SQL Server. - -@roadmap_1084_li -Support Oracle CONNECT BY in some way: http://www.adp-gmbh.ch/ora/sql/connect_by.html http://philip.greenspun.com/sql/trees.html - -@roadmap_1085_li -SQL Server 2005, Oracle: support COUNT(*) OVER(). See http://www.orafusion.com/art_anlytc.htm - -@roadmap_1086_li -SQL 2003: http://www.wiscorp.com/sql_2003_standard.zip - -@roadmap_1087_li -Version column (number/sequence and timestamp based). - -@roadmap_1088_li -Optimize getGeneratedKey: send last identity after each execute (server). - -@roadmap_1089_li -Test and document UPDATE TEST SET (ID, NAME) = (SELECT ID*10, NAME || '!' FROM TEST T WHERE T.ID=TEST.ID). - -@roadmap_1090_li -Max memory rows / max undo log size: use block count / row size not row count. - -@roadmap_1091_li -Implement point-in-time recovery. - -@roadmap_1092_li -Support PL/SQL (programming language / control flow statements). - -@roadmap_1093_li -LIKE: improved version for larger texts (currently using naive search). - -@roadmap_1094_li -Throw an exception when the application calls getInt on a Long (optional). - -@roadmap_1095_li -Default date format for input and output (local date constants). - -@roadmap_1096_li -Document ROWNUM usage for reports: SELECT ROWNUM, * FROM (subquery). - -@roadmap_1097_li -File system that writes to two file systems (replication, replicating file system). - -@roadmap_1098_li -Standalone tool to get relevant system properties and add it to the trace output. - -@roadmap_1099_li -Support 'call proc(1=value)' (PostgreSQL, Oracle). - -@roadmap_1100_li -Console: improve editing data (Tab, Shift-Tab, Enter, Up, Down, Shift+Del?). - -@roadmap_1101_li -Console: autocomplete Ctrl+Space inserts template. - -@roadmap_1102_li -Option to encrypt .trace.db file. - -@roadmap_1103_li -Auto-Update feature for database, .jar file. - -@roadmap_1104_li -ResultSet SimpleResultSet.readFromURL(String url): id varchar, state varchar, released timestamp. - -@roadmap_1105_li -Partial indexing (see PostgreSQL). - -@roadmap_1106_li -Add GUI to build a custom version (embedded, fulltext,...) using build flags. - -@roadmap_1107_li -http://rubyforge.org/projects/hypersonic/ - -@roadmap_1108_li -Add a sample application that runs the H2 unit test and writes the result to a file (so it can be included in the user app). - -@roadmap_1109_li -Table order: ALTER TABLE TEST ORDER BY NAME DESC (MySQL compatibility). - -@roadmap_1110_li -Backup tool should work with other databases as well. - -@roadmap_1111_li -Console: -ifExists doesn't work for the console. Add a flag to disable other dbs. - -@roadmap_1112_li -Check if 'FSUTIL behavior set disablelastaccess 1' improves the performance (fsutil behavior query disablelastaccess). - -@roadmap_1113_li -Java static code analysis: http://pmd.sourceforge.net/ - -@roadmap_1114_li -Java static code analysis: http://www.eclipse.org/tptp/ - -@roadmap_1115_li -Compatibility for CREATE SCHEMA AUTHORIZATION. - -@roadmap_1116_li -Implement Clob / Blob truncate and the remaining functionality. - -@roadmap_1117_li -Add multiple columns at the same time with ALTER TABLE .. ADD .. ADD ... - -@roadmap_1118_li -File locking: writing a system property to detect concurrent access from the same VM (different classloaders). - -@roadmap_1119_li -Pure SQL triggers (example: update parent table if the child table is changed). - -@roadmap_1120_li -Add H2 to Gem (Ruby install system). - -@roadmap_1121_li -Support linked JCR tables. - -@roadmap_1122_li -Native fulltext search: min word length; store word positions. - -@roadmap_1123_li -Add an option to the SCRIPT command to generate only portable / standard SQL. - -@roadmap_1124_li -Updatable views: create 'instead of' triggers automatically if possible (simple cases first). - -@roadmap_1125_li -Improve create index performance. - -@roadmap_1126_li -Compact databases without having to close the database (vacuum). - -@roadmap_1127_li -Implement more JDBC 4.0 features. - -@roadmap_1128_li -Support TRANSFORM / PIVOT as in MS Access. - -@roadmap_1129_li -SELECT * FROM (VALUES (...), (...), ....) AS alias(f1, ...). - -@roadmap_1130_li -Support updatable views with join on primary keys (to extend a table). - -@roadmap_1131_li -Public interface for functions (not public static). - -@roadmap_1132_li -Support reading the transaction log. - -@roadmap_1133_li -Feature matrix as in i-net software. - -@roadmap_1134_li -Updatable result set on table without primary key or unique index. - -@roadmap_1135_li -Compatibility with Derby and PostgreSQL: VALUES(1), (2); SELECT * FROM (VALUES (1), (2)) AS myTable(c1). Issue 221. - -@roadmap_1136_li -Allow execution time prepare for SELECT * FROM CSVREAD(?, 'columnNameString') - -@roadmap_1137_li -Support data type INTERVAL - -@roadmap_1138_li -Support nested transactions (possibly using savepoints internally). - -@roadmap_1139_li -Add a benchmark for bigger databases, and one for many users. - -@roadmap_1140_li -Compression in the result set over TCP/IP. - -@roadmap_1141_li -Support curtimestamp (like curtime, curdate). - -@roadmap_1142_li -Support ANALYZE {TABLE|INDEX} tableName COMPUTE|ESTIMATE|DELETE STATISTICS ptnOption options. - -@roadmap_1143_li -Release locks (shared or exclusive) on demand - -@roadmap_1144_li -Support OUTER UNION - -@roadmap_1145_li -Support parameterized views (similar to CSVREAD, but using just SQL for the definition) - -@roadmap_1146_li -A way (JDBC driver) to map an URL (jdbc:h2map:c1) to a connection object - -@roadmap_1147_li -Support dynamic linked schema (automatically adding/updating/removing tables) - -@roadmap_1148_li -Clustering: adding a node should be very fast and without interrupting clients (very short lock) - -@roadmap_1149_li -Compatibility: # is the start of a single line comment (MySQL) but date quote (Access). Mode specific - -@roadmap_1150_li -Run benchmarks with Android, Java 7, java -server - -@roadmap_1151_li -Optimizations: faster hash function for strings. - -@roadmap_1152_li -DatabaseEventListener: callback for all operations (including expected time, RUNSCRIPT) and cancel functionality - -@roadmap_1153_li -Benchmark: add a graph to show how databases scale (performance/database size) - -@roadmap_1154_li -Implement a SQLData interface to map your data over to a custom object - -@roadmap_1155_li -In the MySQL and PostgreSQL mode, use lower case identifiers by default (DatabaseMetaData.storesLowerCaseIdentifiers = true) - -@roadmap_1156_li -Support multiple directories (on different hard drives) for the same database - -@roadmap_1157_li -Server protocol: use challenge response authentication, but client sends hash(user+password) encrypted with response - -@roadmap_1158_li -Support EXEC[UTE] (doesn't return a result set, compatible to MS SQL Server) - -@roadmap_1159_li -Support native XML data type - see http://en.wikipedia.org/wiki/SQL/XML - -@roadmap_1160_li -Support triggers with a string property or option: SpringTrigger, OSGITrigger - -@roadmap_1161_li -MySQL compatibility: update test1 t1, test2 t2 set t1.id = t2.id where t1.id = t2.id; - -@roadmap_1162_li -Ability to resize the cache array when resizing the cache - -@roadmap_1163_li -Time based cache writing (one second after writing the log) - -@roadmap_1164_li -Check state of H2 driver for DDLUtils: http://issues.apache.org/jira/browse/DDLUTILS-185 - -@roadmap_1165_li -Index usage for REGEXP LIKE. - -@roadmap_1166_li -Compatibility: add a role DBA (like ADMIN). - -@roadmap_1167_li -Better support multiple processors for in-memory databases. - -@roadmap_1168_li -Support N'text' - -@roadmap_1169_li -Support compatibility for jdbc:hsqldb:res: - -@roadmap_1170_li -HSQLDB compatibility: automatically convert to the next 'higher' data type. Example: cast(2000000000 as int) + cast(2000000000 as int); (HSQLDB: long; PostgreSQL: integer out of range) - -@roadmap_1171_li -Provide an Java SQL builder with standard and H2 syntax - -@roadmap_1172_li -Trace: write OS, file system, JVM,... when opening the database - -@roadmap_1173_li -Support indexes for views (probably requires materialized views) - -@roadmap_1174_li -Document SET SEARCH_PATH, BEGIN, EXECUTE, parameters - -@roadmap_1175_li -Server: use one listener (detect if the request comes from an PG or TCP client) - -@roadmap_1176_li -Optimize SELECT MIN(ID), MAX(ID), COUNT(*) FROM TEST WHERE ID BETWEEN 100 AND 200 - -@roadmap_1177_li -Sequence: PostgreSQL compatibility (rename, create) http://www.postgresql.org/docs/8.2/static/sql-altersequence.html - -@roadmap_1178_li -DISTINCT: support large result sets by sorting on all columns (additionally) and then removing duplicates. - -@roadmap_1179_li -Support a special trigger on all tables to allow building a transaction log reader. - -@roadmap_1180_li -File system with a background writer thread; test if this is faster - -@roadmap_1181_li -Better document the source code (high level documentation). - -@roadmap_1182_li -Support select * from dual a left join dual b on b.x=(select max(x) from dual) - -@roadmap_1183_li -Optimization: don't lock when the database is read-only - -@roadmap_1184_li -Issue 146: Support merge join. - -@roadmap_1185_li -Integrate spatial functions from http://geosysin.iict.ch/irstv-trac/wiki/H2spatial/Download - -@roadmap_1186_li -Cluster: hot deploy (adding a node at runtime). - -@roadmap_1187_li -Support DatabaseMetaData.insertsAreDetected: updatable result sets should detect inserts. - -@roadmap_1188_li -Oracle: support DECODE method (convert to CASE WHEN). - -@roadmap_1189_li -Native search: support "phrase search", wildcard search (* and ?), case-insensitive search, boolean operators, and grouping - -@roadmap_1190_li -Improve documentation of access rights. - -@roadmap_1191_li -Support opening a database that is in the classpath, maybe using a new file system. Workaround: detect jar file using getClass().getProtectionDomain().getCodeSource().getLocation(). - -@roadmap_1192_li -Support ENUM data type (see MySQL, PostgreSQL, MS SQL Server, maybe others). - -@roadmap_1193_li -Remember the user defined data type (domain) of a column. - -@roadmap_1194_li -MVCC: support multi-threaded kernel with multi-version concurrency. - -@roadmap_1195_li -Auto-server: add option to define the port range or list. - -@roadmap_1196_li -Support Jackcess (MS Access databases) - -@roadmap_1197_li -Built-in methods to write large objects (BLOB and CLOB): FILE_WRITE('test.txt', 'Hello World') - -@roadmap_1198_li -Improve time to open large databases (see mail 'init time for distributed setup') - -@roadmap_1199_li -Move Maven 2 repository from hsql.sf.net to h2database.sf.net - -@roadmap_1200_li -Java 1.5 tool: JdbcUtils.closeSilently(s1, s2,...) - -@roadmap_1201_li -Optimize A=? OR B=? to UNION if the cost is lower. - -@roadmap_1202_li -Javadoc: document design patterns used - -@roadmap_1203_li -Support custom collators, for example for natural sort (for text that contains numbers). - -@roadmap_1204_li -Write an article about SQLInjection (h2/src/docsrc/html/images/SQLInjection.txt) - -@roadmap_1205_li -Convert SQL-injection-2.txt to html document, include SQLInjection.java sample - -@roadmap_1206_li -Support OUT parameters in user-defined procedures. - -@roadmap_1207_li -Web site design: http://www.igniterealtime.org/projects/openfire/index.jsp - -@roadmap_1208_li -HSQLDB compatibility: Openfire server uses: CREATE SCHEMA PUBLIC AUTHORIZATION DBA; CREATE USER SA PASSWORD ""; GRANT DBA TO SA; SET SCHEMA PUBLIC - -@roadmap_1209_li -Translation: use ?? in help.csv - -@roadmap_1210_li -Translated .pdf - -@roadmap_1211_li -Recovery tool: bad blocks should be converted to INSERT INTO SYSTEM_ERRORS(...), and things should go into the .trace.db file - -@roadmap_1212_li -Issue 357: support getGeneratedKeys to return multiple rows when used with batch updates. This is supported by MySQL, but not Derby. Both PostgreSQL and HSQLDB don't support getGeneratedKeys. Also support it when using INSERT ... SELECT. - -@roadmap_1213_li -RECOVER=2 to backup the database, run recovery, open the database - -@roadmap_1214_li -Recovery should work with encrypted databases - -@roadmap_1215_li -Corruption: new error code, add help - -@roadmap_1216_li -Space reuse: after init, scan all storages and free those that don't belong to a live database object - -@roadmap_1217_li -Access rights: add missing features (users should be 'owner' of objects; missing rights for sequences; dropping objects) - -@roadmap_1218_li -Support NOCACHE table option (Oracle). - -@roadmap_1219_li -Support table partitioning. - -@roadmap_1220_li -Add regular javadocs (using the default doclet, but another css) to the homepage. - -@roadmap_1221_li -The database should be kept open for a longer time when using the server mode. - -@roadmap_1222_li -Javadocs: for each tool, add a copy & paste sample in the class level. - -@roadmap_1223_li -Javadocs: add @author tags. - -@roadmap_1224_li -Fluent API for tools: Server.createTcpServer().setPort(9081).setPassword(password).start(); - -@roadmap_1225_li -MySQL compatibility: real SQL statement for DESCRIBE TEST - -@roadmap_1226_li -Use a default delay of 1 second before closing a database. - -@roadmap_1227_li -Write (log) to system table before adding to internal data structures. - -@roadmap_1228_li -Support direct lookup for MIN and MAX when using WHERE (see todo.txt / Direct Lookup). - -@roadmap_1229_li -Support other array types (String[], double[]) in PreparedStatement.setObject(int, Object) (with test case). - -@roadmap_1230_li -MVCC should not be memory bound (uncommitted data is kept in memory in the delta index; maybe using a regular b-tree index solves the problem). - -@roadmap_1231_li -Oracle compatibility: support NLS_DATE_FORMAT. - -@roadmap_1232_li -Support for Thread.interrupt to cancel running statements. - -@roadmap_1233_li -Cluster: add feature to make sure cluster nodes can not get out of sync (for example by stopping one process). - -@roadmap_1234_li -H2 Console: support CLOB/BLOB download using a link. - -@roadmap_1235_li -Support flashback queries as in Oracle. - -@roadmap_1236_li -Import / Export of fixed with text files. - -@roadmap_1237_li -HSQLDB compatibility: automatic data type for SUM if value is the value is too big (by default use the same type as the data). - -@roadmap_1238_li -Improve the optimizer to select the right index for special cases: where id between 2 and 4 and booleanColumn - -@roadmap_1239_li -Linked tables: make hidden columns available (Oracle: rowid and ora_rowscn columns). - -@roadmap_1240_li -H2 Console: in-place autocomplete. - -@roadmap_1241_li -Support large databases: split database files to multiple directories / disks (similar to tablespaces). - -@roadmap_1242_li -H2 Console: support configuration option for fixed width (monospace) font. - -@roadmap_1243_li -Native fulltext search: support analyzers (specially for Chinese, Japanese). - -@roadmap_1244_li -Automatically compact databases from time to time (as a background process). - -@roadmap_1245_li -Test Eclipse DTP. - -@roadmap_1246_li -H2 Console: autocomplete: keep the previous setting - -@roadmap_1247_li -executeBatch: option to stop at the first failed statement. - -@roadmap_1248_li -Implement OLAP features as described here: http://www.devx.com/getHelpOn/10MinuteSolution/16573/0/page/5 - -@roadmap_1249_li -Support Oracle ROWID (unique identifier for each row). - -@roadmap_1250_li -MySQL compatibility: alter table add index i(c), add constraint c foreign key(c) references t(c); - -@roadmap_1251_li -Server mode: improve performance for batch updates. - -@roadmap_1252_li -Applets: support read-only databases in a zip file (accessed as a resource). - -@roadmap_1253_li -Long running queries / errors / trace system table. - -@roadmap_1254_li -H2 Console should support JaQu directly. - -@roadmap_1255_li -Better document FTL_SEARCH, FTL_SEARCH_DATA. - -@roadmap_1256_li -Sequences: CURRVAL should be session specific. Compatibility with PostgreSQL. - -@roadmap_1257_li -Index creation using deterministic functions. - -@roadmap_1258_li -ANALYZE: for unique indexes that allow null, count the number of null. - -@roadmap_1259_li -MySQL compatibility: multi-table delete: DELETE .. FROM .. [,...] USING - See http://dev.mysql.com/doc/refman/5.0/en/delete.html - -@roadmap_1260_li -AUTO_SERVER: support changing IP addresses (disable a network while the database is open). - -@roadmap_1261_li -Avoid using java.util.Calendar internally because it's slow, complicated, and buggy. - -@roadmap_1262_li -Support TRUNCATE .. CASCADE like PostgreSQL. - -@roadmap_1263_li -Fulltext search: lazy result generation using SimpleRowSource. - -@roadmap_1264_li -Fulltext search: support alternative syntax: WHERE FTL_CONTAINS(name, 'hello'). - -@roadmap_1265_li -MySQL compatibility: support REPLACE, see http://dev.mysql.com/doc/refman/6.0/en/replace.html and issue 73. - -@roadmap_1266_li -MySQL compatibility: support INSERT INTO table SET column1 = value1, column2 = value2 - -@roadmap_1267_li -Docs: add a one line description for each functions and SQL statements at the top (in the link section). - -@roadmap_1268_li -Javadoc search: weight for titles should be higher ('random' should list Functions as the best match). - -@roadmap_1269_li -Replace information_schema tables with regular tables that are automatically re-built when needed. Use indexes. - -@roadmap_1270_li -Issue 50: Oracle compatibility: support calling 0-parameters functions without parenthesis. Make constants obsolete. - -@roadmap_1271_li -MySQL, HSQLDB compatibility: support where 'a'=1 (not supported by Derby, PostgreSQL) - -@roadmap_1272_li -Finer granularity for SLF4J trace - See http://code.google.com/p/h2database/issues/detail?id=62 - -@roadmap_1273_li -Add database creation date and time to the database. - -@roadmap_1274_li -Support ASSERTION. - -@roadmap_1275_li -MySQL compatibility: support comparing 1='a' - -@roadmap_1276_li -Support PostgreSQL lock modes: http://www.postgresql.org/docs/8.3/static/explicit-locking.html - -@roadmap_1277_li -PostgreSQL compatibility: test DbVisualizer and Squirrel SQL using a new PostgreSQL JDBC driver. - -@roadmap_1278_li -RunScript should be able to read from system in (or quite mode for Shell). - -@roadmap_1279_li -Natural join: support select x from dual natural join dual. - -@roadmap_1280_li -Support using system properties in database URLs (may be a security problem). - -@roadmap_1281_li -Natural join: somehow support this: select a.x, b.x, x from dual a natural join dual b - -@roadmap_1282_li -Use the Java service provider mechanism to register file systems and function libraries. - -@roadmap_1283_li -MySQL compatibility: for auto_increment columns, convert 0 to next value (as when inserting NULL). - -@roadmap_1284_li -Optimization for multi-column IN: use an index if possible. Example: (A, B) IN((1, 2), (2, 3)). - -@roadmap_1285_li -Optimization for EXISTS: convert to inner join or IN(..) if possible. - -@roadmap_1286_li -Functions: support hashcode(value); cryptographic and fast - -@roadmap_1287_li -Serialized file lock: support long running queries. - -@roadmap_1288_li -Network: use 127.0.0.1 if other addresses don't work. - -@roadmap_1289_li -Pluggable network protocol (currently Socket/ServerSocket over TCP/IP) - see also TransportServer with master slave replication. - -@roadmap_1290_li -Support reading JCR data: one table per node type; query table; cache option - -@roadmap_1291_li -OSGi: create a sample application, test, document. - -@roadmap_1292_li -help.csv: use complete examples for functions; run as test case. - -@roadmap_1293_li -Functions to calculate the memory and disk space usage of a table, a row, or a value. - -@roadmap_1294_li -Re-implement PooledConnection; use a lightweight connection object. - -@roadmap_1295_li -Doclet: convert tests in javadocs to a java class. - -@roadmap_1296_li -Doclet: format fields like methods, but support sorting by name and value. - -@roadmap_1297_li -Doclet: shrink the html files. - -@roadmap_1298_li -MySQL compatibility: support SET NAMES 'latin1' - See also http://code.google.com/p/h2database/issues/detail?id=56 - -@roadmap_1299_li -Allow to scan index backwards starting with a value (to better support ORDER BY DESC). - -@roadmap_1300_li -Java Service Wrapper: try http://yajsw.sourceforge.net/ - -@roadmap_1301_li -Batch parameter for INSERT, UPDATE, and DELETE, and commit after each batch. See also MySQL DELETE. - -@roadmap_1302_li -Use a lazy and auto-close input stream (open resource when reading, close on eof). - -@roadmap_1303_li -Connection pool: 'reset session' command (delete temp tables, rollback, auto-commit true). - -@roadmap_1304_li -Improve SQL documentation, see http://www.w3schools.com/sql/ - -@roadmap_1305_li -MySQL compatibility: DatabaseMetaData.stores*() methods should return the same values. Test with SquirrelSQL. - -@roadmap_1306_li -MS SQL Server compatibility: support DATEPART syntax. - -@roadmap_1307_li -Sybase/DB2/Oracle compatibility: support out parameters in stored procedures - See http://code.google.com/p/h2database/issues/detail?id=83 - -@roadmap_1308_li -Support INTERVAL data type (see Oracle and others). - -@roadmap_1309_li -Combine Server and Console tool (only keep Server). - -@roadmap_1310_li -Store the Lucene index in the database itself. - -@roadmap_1311_li -Support standard MERGE statement: http://en.wikipedia.org/wiki/Merge_%28SQL%29 - -@roadmap_1312_li -Oracle compatibility: support DECODE(x, ...). - -@roadmap_1313_li -MVCC: compare concurrent update behavior with PostgreSQL and Oracle. - -@roadmap_1314_li -HSQLDB compatibility: CREATE FUNCTION (maybe using a Function interface). - -@roadmap_1315_li -HSQLDB compatibility: support CALL "java.lang.Math.sqrt"(2.0) - -@roadmap_1316_li -Support comma as the decimal separator in the CSV tool. - -@roadmap_1317_li -Compatibility: Java functions with SQLJ Part1 http://www.acm.org/sigmod/record/issues/9912/standards.pdf.gz - -@roadmap_1318_li -Compatibility: Java functions with SQL/PSM (Persistent Stored Modules) - need to find the documentation. - -@roadmap_1319_li -CACHE_SIZE: automatically use a fraction of Runtime.maxMemory - maybe automatically the second level cache. - -@roadmap_1320_li -Support date/time/timestamp as documented in http://en.wikipedia.org/wiki/ISO_8601 - -@roadmap_1321_li -PostgreSQL compatibility: when in PG mode, treat BYTEA data like PG. - -@roadmap_1322_li -Support =ANY(array) as in PostgreSQL. See also http://www.postgresql.org/docs/8.0/interactive/arrays.html - -@roadmap_1323_li -IBM DB2 compatibility: support PREVIOUS VALUE FOR sequence. - -@roadmap_1324_li -Compatibility: use different LIKE ESCAPE characters depending on the mode (disable for Derby, HSQLDB, DB2, Oracle, MSSQLServer). - -@roadmap_1325_li -Oracle compatibility: support CREATE SYNONYM table FOR schema.table. - -@roadmap_1326_li -FTP: document the server, including -ftpTask option to execute / kill remote processes - -@roadmap_1327_li -FTP: problems with multithreading? - -@roadmap_1328_li -FTP: implement SFTP / FTPS - -@roadmap_1329_li -FTP: access to a database (.csv for a table, a directory for a schema, a file for a lob, a script.sql file). - -@roadmap_1330_li -More secure default configuration if remote access is enabled. - -@roadmap_1331_li -Improve database file locking (maybe use native file locking). The current approach seems to be problematic if the file system is on a remote share (see Google Group 'Lock file modification time is in the future'). - -@roadmap_1332_li -Document internal features such as BELONGS_TO_TABLE, NULL_TO_DEFAULT, SEQUENCE. - -@roadmap_1333_li -Issue 107: Prefer using the ORDER BY index if LIMIT is used. - -@roadmap_1334_li -An index on (id, name) should be used for a query: select * from t where s=? order by i - -@roadmap_1335_li -Support reading sequences using DatabaseMetaData.getTables(null, null, null, new String[]{"SEQUENCE"}). See PostgreSQL. - -@roadmap_1336_li -Add option to enable TCP_NODELAY using Socket.setTcpNoDelay(true). - -@roadmap_1337_li -Maybe disallow = within database names (jdbc:h2:mem:MODE=DB2 means database name MODE=DB2). - -@roadmap_1338_li -Fast alter table add column. - -@roadmap_1339_li -Improve concurrency for in-memory database operations. - -@roadmap_1340_li -Issue 122: Support for connection aliases for remote tcp connections. - -@roadmap_1341_li -Fast scrambling (strong encryption doesn't help if the password is included in the application). - -@roadmap_1342_li -H2 Console: support -webPassword to require a password to access preferences or shutdown. - -@roadmap_1343_li -Issue 126: The index name should be "IDX_" plus the constraint name unless there is a conflict, in which case append a number. - -@roadmap_1344_li -Issue 127: Support activation/deactivation of triggers - -@roadmap_1345_li -Issue 130: Custom log event listeners - -@roadmap_1346_li -Issue 131: IBM DB2 compatibility: sysibm.sysdummy1 - -@roadmap_1347_li -Issue 132: Use Java enum trigger type. - -@roadmap_1348_li -Issue 134: IBM DB2 compatibility: session global variables. - -@roadmap_1349_li -Cluster: support load balance with values for each server / auto detect. - -@roadmap_1350_li -FTL_SET_OPTION(keyString, valueString) with key stopWords at first. - -@roadmap_1351_li -Pluggable access control mechanism. - -@roadmap_1352_li -Fulltext search (Lucene): support streaming CLOB data. - -@roadmap_1353_li -Document/example how to create and read an encrypted script file. - -@roadmap_1354_li -Check state of http://issues.apache.org/jira/browse/OPENJPA-1367 (H2 does support cross joins). - -@roadmap_1355_li -Fulltext search (Lucene): only prefix column names with _ if they already start with _. Instead of DATA / QUERY / modified use _DATA, _QUERY, _MODIFIED if possible. - -@roadmap_1356_li -Support a way to create or read compressed encrypted script files using an API. - -@roadmap_1357_li -Scripting language support (Javascript). - -@roadmap_1358_li -The network client should better detect if the server is not an H2 server and fail early. - -@roadmap_1359_li -H2 Console: support CLOB/BLOB upload. - -@roadmap_1360_li -Database file lock: detect hibernate / standby / very slow threads (compare system time). - -@roadmap_1361_li -Automatic detection of redundant indexes. - -@roadmap_1362_li -Maybe reject join without "on" (except natural join). - -@roadmap_1363_li -Implement GiST (Generalized Search Tree for Secondary Storage). - -@roadmap_1364_li -Function to read a number of bytes/characters from an BLOB or CLOB. - -@roadmap_1365_li -Issue 156: Support SELECT ? UNION SELECT ?. - -@roadmap_1366_li -Automatic mixed mode: support a port range list (to avoid firewall problems). - -@roadmap_1367_li -Support the pseudo column rowid, oid, _rowid_. - -@roadmap_1368_li -H2 Console / large result sets: stream early instead of keeping a whole result in-memory - -@roadmap_1369_li -Support TRUNCATE for linked tables. - -@roadmap_1370_li -UNION: evaluate INTERSECT before UNION (like most other database except Oracle). - -@roadmap_1371_li -Delay creating the information schema, and share metadata columns. - -@roadmap_1372_li -TCP Server: use a nonce (number used once) to protect unencrypted channels against replay attacks. - -@roadmap_1373_li -Simplify running scripts and recovery: CREATE FORCE USER (overwrites an existing user). - -@roadmap_1374_li -Support CREATE DATABASE LINK (a custom JDBC driver is already supported). - -@roadmap_1375_li -Support large GROUP BY operations. Issue 216. - -@roadmap_1376_li -Issue 163: Allow to create foreign keys on metadata types. - -@roadmap_1377_li -Logback: write a native DBAppender. - -@roadmap_1378_li -Cache size: don't use more cache than what is available. - -@roadmap_1379_li -Allow to defragment at runtime (similar to SHUTDOWN DEFRAG) in a background thread. - -@roadmap_1380_li -Tree index: Instead of an AVL tree, use a general balanced trees or a scapegoat tree. - -@roadmap_1381_li -User defined functions: allow to store the bytecode (of just the class, or the jar file of the extension) in the database. - -@roadmap_1382_li -Compatibility: ResultSet.getObject() on a CLOB (TEXT) should return String for PostgreSQL and MySQL. - -@roadmap_1383_li -Optimizer: WHERE X=? AND Y IN(?), it always uses the index on Y. Should be cost based. - -@roadmap_1384_li -Common Table Expression (CTE) / recursive queries: support parameters. Issue 314. - -@roadmap_1385_li -Oracle compatibility: support INSERT ALL. - -@roadmap_1386_li -Issue 178: Optimizer: index usage when both ascending and descending indexes are available. - -@roadmap_1387_li -Issue 179: Related subqueries in HAVING clause. - -@roadmap_1388_li -IBM DB2 compatibility: NOT NULL WITH DEFAULT. Similar to MySQL Mode.convertInsertNullToZero. - -@roadmap_1389_li -Creating primary key: always create a constraint. - -@roadmap_1390_li -Maybe use a different page layout: keep the data at the head of the page, and ignore the tail (don't store / read it). This may increase write / read performance depending on the file system. - -@roadmap_1391_li -Indexes of temporary tables are currently kept in-memory. Is this how it should be? - -@roadmap_1392_li -The Shell tool should support the same built-in commands as the H2 Console. - -@roadmap_1393_li -Maybe use PhantomReference instead of finalize. - -@roadmap_1394_li -Database file name suffix: should only have one dot by default. Example: .h2db - -@roadmap_1395_li -Issue 196: Function based indexes - -@roadmap_1396_li -ALTER TABLE ... ADD COLUMN IF NOT EXISTS columnName. - -@roadmap_1397_li -Fix the disk space leak (killing the process at the exact right moment will increase the disk space usage; this space is not re-used). See TestDiskSpaceLeak.java - -@roadmap_1398_li -ROWNUM: Oracle compatibility when used within a subquery. Issue 198. - -@roadmap_1399_li -Allow to access the database over HTTP (possibly using port 80) and a servlet in a REST way. - -@roadmap_1400_li -ODBC: encrypted databases are not supported because the ;CIPHER= can not be set. - -@roadmap_1401_li -Support CLOB and BLOB update, specially conn.createBlob().setBinaryStream(1); - -@roadmap_1402_li -Optimizer: index usage when both ascending and descending indexes are available. Issue 178. - -@roadmap_1403_li -Issue 306: Support schema specific domains. - -@roadmap_1404_li -Triggers: support user defined execution order. Oracle: CREATE OR REPLACE TRIGGER TEST_2 BEFORE INSERT ON TEST FOR EACH ROW FOLLOWS TEST_1. SQL specifies that multiple triggers should be fired in time-of-creation order. PostgreSQL uses name order, which was judged to be more convenient. Derby: triggers are fired in the order in which they were created. - -@roadmap_1405_li -PostgreSQL compatibility: combine "users" and "roles". See: http://www.postgresql.org/docs/8.1/interactive/user-manag.html - -@roadmap_1406_li -Improve documentation of system properties: only list the property names, default values, and description. - -@roadmap_1407_li -Support running totals / cumulative sum using SUM(..) OVER(..). - -@roadmap_1408_li -Improve object memory size calculation. Use constants for known VMs, or use reflection to call java.lang.instrument.Instrumentation.getObjectSize(Object objectToSize) - -@roadmap_1409_li -Triggers: NOT NULL checks should be done after running triggers (Oracle behavior, maybe others). - -@roadmap_1410_li -Common Table Expression (CTE) / recursive queries: support INSERT INTO ... SELECT ... Issue 219. - -@roadmap_1411_li -Common Table Expression (CTE) / recursive queries: support non-recursive queries. Issue 217. - -@roadmap_1412_li -Common Table Expression (CTE) / recursive queries: avoid endless loop. Issue 218. - -@roadmap_1413_li -Common Table Expression (CTE) / recursive queries: support multiple named queries. Issue 220. - -@roadmap_1414_li -Common Table Expression (CTE) / recursive queries: identifier scope may be incorrect. Issue 222. - -@roadmap_1415_li -Log long running transactions (similar to long running statements). - -@roadmap_1416_li -Parameter data type is data type of other operand. Issue 205. - -@roadmap_1417_li -Some combinations of nested join with right outer join are not supported. - -@roadmap_1418_li -DatabaseEventListener.openConnection(id) and closeConnection(id). - -@roadmap_1419_li -Listener or authentication module for new connections, or a way to restrict the number of different connections to a tcp server, or to prevent to login with the same username and password from different IPs. Possibly using the DatabaseEventListener API, or a new API. - -@roadmap_1420_li -Compatibility for data type CHAR (Derby, HSQLDB). Issue 212. - -@roadmap_1421_li -Compatibility with MySQL TIMESTAMPDIFF. Issue 209. - -@roadmap_1422_li -Optimizer: use a histogram of the data, specially for non-normal distributions. - -@roadmap_1423_li -Trigger: allow declaring as source code (like functions). - -@roadmap_1424_li -User defined aggregate: allow declaring as source code (like functions). - -@roadmap_1425_li -The error "table not found" is sometimes caused by using the wrong database. Add "(this database is empty)" to the exception message if applicable. - -@roadmap_1426_li -MySQL + PostgreSQL compatibility: support string literal escape with \n. - -@roadmap_1427_li -PostgreSQL compatibility: support string literal escape with double \\. - -@roadmap_1428_li -Document the TCP server "management_db". Maybe include the IP address of the client. - -@roadmap_1429_li -Use javax.tools.JavaCompilerTool instead of com.sun.tools.javac.Main - -@roadmap_1430_li -If a database object was not found in the current schema, but one with the same name existed in another schema, included that in the error message. - -@roadmap_1431_li -Optimization to use an index for OR when using multiple keys: where (key1 = ? and key2 = ?) OR (key1 = ? and key2 = ?) - -@roadmap_1432_li -Issue 302: Support optimizing queries with both inner and outer joins, as in: select * from test a inner join test b on a.id=b.id inner join o on o.id=a.id where b.x=1 (the optimizer should swap a and b here). See also TestNestedJoins, tag "swapInnerJoinTables". - -@roadmap_1433_li -JaQu should support a DataSource and a way to create a Db object using a Connection (for multi-threaded usage with a connection pool). - -@roadmap_1434_li -Move table to a different schema (rename table to a different schema), possibly using ALTER TABLE ... SET SCHEMA ...; - -@roadmap_1435_li -nioMapped file system: automatically fall back to regular (non mapped) IO if there is a problem (out of memory exception for example). - -@roadmap_1436_li -Column as parameter of function table. Issue 228. - -@roadmap_1437_li -Connection pool: detect ;AUTOCOMMIT=FALSE in the database URL, and if set, disable autocommit for all connections. - -@roadmap_1438_li -Compatibility with MS Access: support "&" to concatenate text. - -@roadmap_1439_li -The BACKUP statement should not synchronize on the database, and therefore should not block other users. - -@roadmap_1440_li -Document the database file format. - -@roadmap_1441_li -Support reading LOBs. - -@roadmap_1442_li -Require appending DANGEROUS=TRUE when using certain dangerous settings such as LOG=0, LOG=1, LOCK_MODE=0, disabling FILE_LOCK,... - -@roadmap_1443_li -Support UDT (user defined types) similar to how Apache Derby supports it: check constraint, allow to use it in Java functions as parameters (return values already seem to work). - -@roadmap_1444_li -Encrypted file system (use cipher text stealing so file length doesn't need to decrypt; 4 KB header per file, optional compatibility with current encrypted database files). - -@roadmap_1445_li -Issue 229: SELECT with simple OR tests uses tableScan when it could use indexes. - -@roadmap_1446_li -GROUP BY queries should use a temporary table if there are too many rows. - -@roadmap_1447_li -BLOB: support random access when reading. - -@roadmap_1448_li -CLOB: support random access when reading (this is harder than for BLOB as data is stored in UTF-8 form). - -@roadmap_1449_li -Compatibility: support SELECT INTO (as an alias for CREATE TABLE ... AS SELECT ...). - -@roadmap_1450_li -Compatibility with MySQL: support SELECT INTO OUTFILE (cannot be an existing file) as an alias for CSVWRITE(...). - -@roadmap_1451_li -Compatibility with MySQL: support non-strict mode (sql_mode = "") any data that is too large for the column will just be truncated or set to the default value. - -@roadmap_1452_li -The full condition should be sent to the linked table, not just the indexed condition. Example: TestLinkedTableFullCondition - -@roadmap_1453_li -Compatibility with IBM DB2: CREATE PROCEDURE. - -@roadmap_1454_li -Compatibility with IBM DB2: SQL cursors. - -@roadmap_1455_li -Single-column primary key values are always stored explicitly. This is not required. - -@roadmap_1456_li -Compatibility with MySQL: support CREATE TABLE TEST(NAME VARCHAR(255) CHARACTER SET UTF8). - -@roadmap_1457_li -CALL is incompatible with other databases because it returns a result set, so that CallableStatement.execute() returns true. - -@roadmap_1458_li -Optimization for large lists for column IN(1, 2, 3, 4,...) - currently an list is used, could potentially use a hash set (maybe only for a part of the values - the ones that can be evaluated). - -@roadmap_1459_li -Compatibility for ARRAY data type (Oracle: VARRAY(n) of VARCHAR(m); HSQLDB: VARCHAR(n) ARRAY; Postgres: VARCHAR(n)[]). - -@roadmap_1460_li -PostgreSQL compatible array literal syntax: ARRAY[['a', 'b'], ['c', 'd']] - -@roadmap_1461_li -PostgreSQL compatibility: UPDATE with FROM. - -@roadmap_1462_li -Issue 297: Oracle compatibility for "at time zone". - -@roadmap_1463_li -IBM DB2 compatibility: IDENTITY_VAL_LOCAL(). - -@roadmap_1464_li -Support SQL/XML. - -@roadmap_1465_li -Support concurrent opening of databases. - -@roadmap_1466_li -Improved error message and diagnostics in case of network configuration problems. - -@roadmap_1467_li -TRUNCATE should reset the identity columns as in MySQL and MS SQL Server (and possibly other databases). - -@roadmap_1468_li -Adding a primary key should make the columns 'not null' unless if there is a row with null (compatibility with MySQL, PostgreSQL, HSQLDB; not Derby). - -@roadmap_1469_li -ARRAY data type: support Integer[] and so on in Java functions (currently only Object[] is supported). - -@roadmap_1470_li -MySQL compatibility: LOCK TABLES a READ, b READ - see also http://dev.mysql.com/doc/refman/5.0/en/lock-tables.html - -@roadmap_1471_li -The HTML to PDF converter should use http://code.google.com/p/wkhtmltopdf/ - -@roadmap_1472_li -Issue 303: automatically convert "X NOT IN(SELECT...)" to "NOT EXISTS(...)". - -@roadmap_1473_li -MySQL compatibility: update test1 t1, test2 t2 set t1.name=t2.name where t1.id=t2.id. - -@roadmap_1474_li -Issue 283: Improve performance of H2 on Android. - -@roadmap_1475_li -Support INSERT INTO / UPDATE / MERGE ... RETURNING to retrieve the generated key(s). - -@roadmap_1476_li -Column compression option - see http://groups.google.com/group/h2-database/browse_thread/thread/3e223504e52671fa/243da82244343f5d - -@roadmap_1477_li -PostgreSQL compatibility: ALTER TABLE ADD combined with adding a foreign key constraint, as in ALTER TABLE FOO ADD COLUMN PARENT BIGINT REFERENCES FOO(ID). - -@roadmap_1478_li -MS SQL Server compatibility: support @@ROWCOUNT. - -@roadmap_1479_li -PostgreSQL compatibility: LOG(x) is LOG10(x) and not LN(x). - -@roadmap_1480_li -Issue 311: Serialized lock mode: executeQuery of write operations fails. - -@roadmap_1481_li -PostgreSQL compatibility: support PgAdmin III (specially the function current_setting). - -@roadmap_1482_li -MySQL compatibility: support TIMESTAMPADD. - -@roadmap_1483_li -Support SELECT ... FOR UPDATE with joins (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby). - -@roadmap_1484_li -Support SELECT ... FOR UPDATE OF [field-list] (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby). - -@roadmap_1485_li -Support SELECT ... FOR UPDATE OF [table-list] (supported by PostgreSQL, HSQLDB, Sybase). - -@roadmap_1486_li -TRANSACTION_ID() for in-memory databases. - -@roadmap_1487_li -TRANSACTION_ID() should be long (same as HSQLDB and PostgreSQL). - -@roadmap_1488_li -Support [INNER | OUTER] JOIN USING(column [,...]). - -@roadmap_1489_li -Support NATURAL [ { LEFT | RIGHT } [ OUTER ] | INNER ] JOIN (Derby, Oracle) - -@roadmap_1490_li -GROUP BY columnNumber (similar to ORDER BY columnNumber) (MySQL, PostgreSQL, SQLite; not by HSQLDB and Derby). - -@roadmap_1491_li -Sybase / MS SQL Server compatibility: CONVERT(..) parameters are swapped. - -@roadmap_1492_li -Index conditions: WHERE AGE>1 should not scan through all rows with AGE=1. - -@roadmap_1493_li -PHP support: H2 should support PDO, or test with PostgreSQL PDO. - -@roadmap_1494_li -Outer joins: if no column of the outer join table is referenced, the outer join table could be removed from the query. - -@roadmap_1495_li -Cluster: allow using auto-increment and identity columns by ensuring executed in lock-step. - -@roadmap_1496_li -MySQL compatibility: index names only need to be unique for the given table. - -@roadmap_1497_li -Issue 352: constraints: distinguish between 'no action' and 'restrict'. Currently, only restrict is supported, and 'no action' is internally mapped to 'restrict'. The database meta data returns 'restrict' in all cases. - -@roadmap_1498_li -Oracle compatibility: support MEDIAN aggregate function. - -@roadmap_1499_li -Issue 348: Oracle compatibility: division should return a decimal result. - -@roadmap_1500_li -Read rows on demand: instead of reading the whole row, only read up to that column that is requested. Keep an pointer to the data area and the column id that is already read. - -@roadmap_1501_li -Long running transactions: log session id when detected. - -@roadmap_1502_li -Optimization: "select id from test" should use the index on id even without "order by". - -@roadmap_1503_li -Issue 362: LIMIT support for UPDATE statements (MySQL compatibility). - -@roadmap_1504_li -Sybase SQL Anywhere compatibility: SELECT TOP ... START AT ... - -@roadmap_1505_li -Use Java 6 SQLException subclasses. - -@roadmap_1506_li -Issue 390: RUNSCRIPT FROM '...' CONTINUE_ON_ERROR - -@roadmap_1507_li -Use Java 6 exceptions: SQLDataException, SQLSyntaxErrorException, SQLTimeoutException,.. - -@roadmap_1508_h2 -Not Planned - -@roadmap_1509_li -HSQLDB (did) support this: select id i from test where i<0 (other databases don't). Supporting it may break compatibility. - -@roadmap_1510_li -String.intern (so that Strings can be compared with ==) will not be used because some VMs have problems when used extensively. - -@roadmap_1511_li -In prepared statements, identifier names (table names and so on) can not be parameterized. Adding such a feature would complicate the source code without providing reasonable speedup, and would slow down regular prepared statements. - -@sourceError_1000_h1 -Error Analyzer - -@sourceError_1001_a -Home - -@sourceError_1002_a -Input - -@sourceError_1003_h2 -  Details  Source Code - -@sourceError_1004_p -Paste the error message and stack trace below and click on 'Details' or 'Source Code': - -@sourceError_1005_b -Error Code: - -@sourceError_1006_b -Product Version: - -@sourceError_1007_b -Message: - -@sourceError_1008_b -More Information: - -@sourceError_1009_b -Stack Trace: - -@sourceError_1010_b -Source File: - -@sourceError_1011_p - Inline - -@tutorial_1000_h1 -Tutorial - -@tutorial_1001_a - Starting and Using the H2 Console - -@tutorial_1002_a - Special H2 Console Syntax - -@tutorial_1003_a - Settings of the H2 Console - -@tutorial_1004_a - Connecting to a Database using JDBC - -@tutorial_1005_a - Creating New Databases - -@tutorial_1006_a - Using the Server - -@tutorial_1007_a - Using Hibernate - -@tutorial_1008_a - Using TopLink and Glassfish - -@tutorial_1009_a - Using EclipseLink - -@tutorial_1010_a - Using Apache ActiveMQ - -@tutorial_1011_a - Using H2 within NetBeans - -@tutorial_1012_a - Using H2 with jOOQ - -@tutorial_1013_a - Using Databases in Web Applications - -@tutorial_1014_a - Android - -@tutorial_1015_a - CSV (Comma Separated Values) Support - -@tutorial_1016_a - Upgrade, Backup, and Restore - -@tutorial_1017_a - Command Line Tools - -@tutorial_1018_a - The Shell Tool - -@tutorial_1019_a - Using OpenOffice Base - -@tutorial_1020_a - Java Web Start / JNLP - -@tutorial_1021_a - Using a Connection Pool - -@tutorial_1022_a - Fulltext Search - -@tutorial_1023_a - User-Defined Variables - -@tutorial_1024_a - Date and Time - -@tutorial_1025_a - Using Spring - -@tutorial_1026_a - OSGi - -@tutorial_1027_a - Java Management Extension (JMX) - -@tutorial_1028_h2 -Starting and Using the H2 Console - -@tutorial_1029_p - The H2 Console application lets you access a database using a browser. This can be a H2 database, or another database that supports the JDBC API. - -@tutorial_1030_p - This is a client/server application, so both a server and a client (a browser) are required to run it. - -@tutorial_1031_p - Depending on your platform and environment, there are multiple ways to start the H2 Console: - -@tutorial_1032_th -OS - -@tutorial_1033_th -Start - -@tutorial_1034_td -Windows - -@tutorial_1035_td - Click [Start], [All Programs], [H2], and [H2 Console (Command Line)] - -@tutorial_1036_td - An icon will be added to the system tray: - -@tutorial_1037_td - If you don't get the window and the system tray icon, then maybe Java is not installed correctly (in this case, try another way to start the application). A browser window should open and point to the login page at http://localhost:8082. - -@tutorial_1038_td -Windows - -@tutorial_1039_td - Open a file browser, navigate to h2/bin, and double click on h2.bat. - -@tutorial_1040_td - A console window appears. If there is a problem, you will see an error message in this window. A browser window will open and point to the login page (URL: http://localhost:8082). - -@tutorial_1041_td -Any - -@tutorial_1042_td - Double click on the h2*.jar file. This only works if the .jar suffix is associated with Java. - -@tutorial_1043_td -Any - -@tutorial_1044_td - Open a console window, navigate to the directory h2/bin, and type: - -@tutorial_1045_h3 -Firewall - -@tutorial_1046_p - If you start the server, you may get a security warning from the firewall (if you have installed one). If you don't want other computers in the network to access the application on your machine, you can let the firewall block those connections. The connection from the local machine will still work. Only if you want other computers to access the database on this computer, you need allow remote connections in the firewall. - -@tutorial_1047_p - It has been reported that when using Kaspersky 7.0 with firewall, the H2 Console is very slow when connecting over the IP address. A workaround is to connect using 'localhost'. - -@tutorial_1048_p - A small firewall is already built into the server: other computers may not connect to the server by default. To change this, go to 'Preferences' and select 'Allow connections from other computers'. - -@tutorial_1049_h3 -Testing Java - -@tutorial_1050_p - To find out which version of Java is installed, open a command prompt and type: - -@tutorial_1051_p - If you get an error message, you may need to add the Java binary directory to the path environment variable. - -@tutorial_1052_h3 -Error Message 'Port may be in use' - -@tutorial_1053_p - You can only start one instance of the H2 Console, otherwise you will get the following error message: "The Web server could not be started. Possible cause: another server is already running...". It is possible to start multiple console applications on the same computer (using different ports), but this is usually not required as the console supports multiple concurrent connections. - -@tutorial_1054_h3 -Using another Port - -@tutorial_1055_p - If the default port of the H2 Console is already in use by another application, then a different port needs to be configured. The settings are stored in a properties file. For details, see Settings of the H2 Console. The relevant entry is webPort. - -@tutorial_1056_p - If no port is specified for the TCP and PG servers, each service will try to listen on its default port. If the default port is already in use, a random port is used. - -@tutorial_1057_h3 -Connecting to the Server using a Browser - -@tutorial_1058_p - If the server started successfully, you can connect to it using a web browser. Javascript needs to be enabled. If you started the server on the same computer as the browser, open the URL http://localhost:8082. If you want to connect to the application from another computer, you need to provide the IP address of the server, for example: http://192.168.0.2:8082. If you enabled TLS on the server side, the URL needs to start with https://. - -@tutorial_1059_h3 -Multiple Concurrent Sessions - -@tutorial_1060_p - Multiple concurrent browser sessions are supported. As that the database objects reside on the server, the amount of concurrent work is limited by the memory available to the server application. - -@tutorial_1061_h3 -Login - -@tutorial_1062_p - At the login page, you need to provide connection information to connect to a database. Set the JDBC driver class of your database, the JDBC URL, user name, and password. If you are done, click [Connect]. - -@tutorial_1063_p - You can save and reuse previously saved settings. The settings are stored in a properties file (see Settings of the H2 Console). - -@tutorial_1064_h3 -Error Messages - -@tutorial_1065_p - Error messages in are shown in red. You can show/hide the stack trace of the exception by clicking on the message. - -@tutorial_1066_h3 -Adding Database Drivers - -@tutorial_1067_p - To register additional JDBC drivers (MySQL, PostgreSQL, HSQLDB,...), add the jar file names to the environment variables H2DRIVERS or CLASSPATH. Example (Windows): to add the HSQLDB JDBC driver C:\Programs\hsqldb\lib\hsqldb.jar, set the environment variable H2DRIVERS to C:\Programs\hsqldb\lib\hsqldb.jar. - -@tutorial_1068_p - Multiple drivers can be set; entries need to be separated by ; (Windows) or : (other operating systems). Spaces in the path names are supported. The settings must not be quoted. - -@tutorial_1069_h3 -Using the H2 Console - -@tutorial_1070_p - The H2 Console application has three main panels: the toolbar on top, the tree on the left, and the query/result panel on the right. The database objects (for example, tables) are listed on the left. Type a SQL command in the query panel and click [Run]. The result appears just below the command. - -@tutorial_1071_h3 -Inserting Table Names or Column Names - -@tutorial_1072_p - To insert table and column names into the script, click on the item in the tree. If you click on a table while the query is empty, then SELECT * FROM ... is added. While typing a query, the table that was used is expanded in the tree. For example if you type SELECT * FROM TEST T WHERE T. then the table TEST is expanded. - -@tutorial_1073_h3 -Disconnecting and Stopping the Application - -@tutorial_1074_p - To log out of the database, click [Disconnect] in the toolbar panel. However, the server is still running and ready to accept new sessions. - -@tutorial_1075_p - To stop the server, right click on the system tray icon and select [Exit]. If you don't have the system tray icon, navigate to [Preferences] and click [Shutdown], press [Ctrl]+[C] in the console where the server was started (Windows), or close the console window. - -@tutorial_1076_h2 -Special H2 Console Syntax - -@tutorial_1077_p - The H2 Console supports a few built-in commands. Those are interpreted within the H2 Console, so they work with any database. Built-in commands need to be at the beginning of a statement (before any remarks), otherwise they are not parsed correctly. If in doubt, add ; before the command. - -@tutorial_1078_th -Command(s) - -@tutorial_1079_th -Description - -@tutorial_1080_td - @autocommit_true; - -@tutorial_1081_td - @autocommit_false; - -@tutorial_1082_td - Enable or disable autocommit. - -@tutorial_1083_td - @cancel; - -@tutorial_1084_td - Cancel the currently running statement. - -@tutorial_1085_td - @columns null null TEST; - -@tutorial_1086_td - @index_info null null TEST; - -@tutorial_1087_td - @tables; - -@tutorial_1088_td - @tables null null TEST; - -@tutorial_1089_td - Call the corresponding DatabaseMetaData.get method. Patterns are case sensitive (usually identifiers are uppercase). For information about the parameters, see the Javadoc documentation. Missing parameters at the end of the line are set to null. The complete list of metadata commands is: @attributes, @best_row_identifier, @catalogs, @columns, @column_privileges, @cross_references, @exported_keys, @imported_keys, @index_info, @primary_keys, @procedures, @procedure_columns, @schemas, @super_tables, @super_types, @tables, @table_privileges, @table_types, @type_info, @udts, @version_columns - -@tutorial_1090_td - @edit select * from test; - -@tutorial_1091_td - Use an updatable result set. - -@tutorial_1092_td - @generated insert into test() values(); - -@tutorial_1093_td - Show the result of Statement.getGeneratedKeys(). - -@tutorial_1094_td - @history; - -@tutorial_1095_td - List the command history. - -@tutorial_1096_td - @info; - -@tutorial_1097_td - Display the result of various Connection and DatabaseMetaData methods. - -@tutorial_1098_td - @list select * from test; - -@tutorial_1099_td - Show the result set in list format (each column on its own line, with row numbers). - -@tutorial_1100_td - @loop 1000 select ?, ?/*rnd*/; - -@tutorial_1101_td - @loop 1000 @statement select ?; - -@tutorial_1102_td - Run the statement this many times. Parameters (?) are set using a loop from 0 up to x - 1. Random values are used for each ?/*rnd*/. A Statement object is used instead of a PreparedStatement if @statement is used. Result sets are read until ResultSet.next() returns false. Timing information is printed. - -@tutorial_1103_td - @maxrows 20; - -@tutorial_1104_td - Set the maximum number of rows to display. - -@tutorial_1105_td - @memory; - -@tutorial_1106_td - Show the used and free memory. This will call System.gc(). - -@tutorial_1107_td - @meta select 1; - -@tutorial_1108_td - List the ResultSetMetaData after running the query. - -@tutorial_1109_td - @parameter_meta select ?; - -@tutorial_1110_td - Show the result of the PreparedStatement.getParameterMetaData() calls. The statement is not executed. - -@tutorial_1111_td - @prof_start; - -@tutorial_1112_td - call hash('SHA256', '', 1000000); - -@tutorial_1113_td - @prof_stop; - -@tutorial_1114_td - Start/stop the built-in profiling tool. The top 3 stack traces of the statement(s) between start and stop are listed (if there are 3). - -@tutorial_1115_td - @prof_start; - -@tutorial_1116_td - @sleep 10; - -@tutorial_1117_td - @prof_stop; - -@tutorial_1118_td - Sleep for a number of seconds. Used to profile a long running query or operation that is running in another session (but in the same process). - -@tutorial_1119_td - @transaction_isolation; - -@tutorial_1120_td - @transaction_isolation 2; - -@tutorial_1121_td - Display (without parameters) or change (with parameters 1, 2, 4, 8) the transaction isolation level. - -@tutorial_1122_h2 -Settings of the H2 Console - -@tutorial_1123_p - The settings of the H2 Console are stored in a configuration file called .h2.server.properties in you user home directory. For Windows installations, the user home directory is usually C:\Documents and Settings\[username] or C:\Users\[username]. The configuration file contains the settings of the application and is automatically created when the H2 Console is first started. Supported settings are: - -@tutorial_1124_code -webAllowOthers - -@tutorial_1125_li -: allow other computers to connect. - -@tutorial_1126_code -webPort - -@tutorial_1127_li -: the port of the H2 Console - -@tutorial_1128_code -webSSL - -@tutorial_1129_li -: use encrypted TLS (HTTPS) connections. - -@tutorial_1130_p - In addition to those settings, the properties of the last recently used connection are listed in the form <number>=<name>|<driver>|<url>|<user> using the escape character \. Example: 1=Generic H2 (Embedded)|org.h2.Driver|jdbc\:h2\:~/test|sa - -@tutorial_1131_h2 -Connecting to a Database using JDBC - -@tutorial_1132_p - To connect to a database, a Java application first needs to load the database driver, and then get a connection. A simple way to do that is using the following code: - -@tutorial_1133_p - This code first loads the driver (Class.forName(...)) and then opens a connection (using DriverManager.getConnection()). The driver name is "org.h2.Driver". The database URL always needs to start with jdbc:h2: to be recognized by this database. The second parameter in the getConnection() call is the user name (sa for System Administrator in this example). The third parameter is the password. In this database, user names are not case sensitive, but passwords are. - -@tutorial_1134_h2 -Creating New Databases - -@tutorial_1135_p - By default, if the database specified in the URL does not yet exist, a new (empty) database is created automatically. The user that created the database automatically becomes the administrator of this database. - -@tutorial_1136_p - Auto-creating new database can be disabled, see Opening a Database Only if it Already Exists. - -@tutorial_1137_h2 -Using the Server - -@tutorial_1138_p - H2 currently supports three server: a web server (for the H2 Console), a TCP server (for client/server connections) and an PG server (for PostgreSQL clients). Please note that only the web server supports browser connections. The servers can be started in different ways, one is using the Server tool. Starting the server doesn't open a database - databases are opened as soon as a client connects. - -@tutorial_1139_h3 -Starting the Server Tool from Command Line - -@tutorial_1140_p - To start the Server tool from the command line with the default settings, run: - -@tutorial_1141_p - This will start the tool with the default options. To get the list of options and default values, run: - -@tutorial_1142_p - There are options available to use other ports, and start or not start parts. - -@tutorial_1143_h3 -Connecting to the TCP Server - -@tutorial_1144_p - To remotely connect to a database using the TCP server, use the following driver and database URL: - -@tutorial_1145_li -JDBC driver class: org.h2.Driver - -@tutorial_1146_li -Database URL: jdbc:h2:tcp://localhost/~/test - -@tutorial_1147_p - For details about the database URL, see also in Features. Please note that you can't connection with a web browser to this URL. You can only connect using a H2 client (over JDBC). - -@tutorial_1148_h3 -Starting the TCP Server within an Application - -@tutorial_1149_p - Servers can also be started and stopped from within an application. Sample code: - -@tutorial_1150_h3 -Stopping a TCP Server from Another Process - -@tutorial_1151_p - The TCP server can be stopped from another process. To stop the server from the command line, run: - -@tutorial_1152_p - To stop the server from a user application, use the following code: - -@tutorial_1153_p - This function will only stop the TCP server. If other server were started in the same process, they will continue to run. To avoid recovery when the databases are opened the next time, all connections to the databases should be closed before calling this method. To stop a remote server, remote connections must be enabled on the server. Shutting down a TCP server can be protected using the option -tcpPassword (the same password must be used to start and stop the TCP server). - -@tutorial_1154_h2 -Using Hibernate - -@tutorial_1155_p - This database supports Hibernate version 3.1 and newer. You can use the HSQLDB Dialect, or the native H2 Dialect. Unfortunately the H2 Dialect included in some old versions of Hibernate was buggy. A patch for Hibernate has been submitted and is now applied. You can rename it to H2Dialect.java and include this as a patch in your application, or upgrade to a version of Hibernate where this is fixed. - -@tutorial_1156_p - When using Hibernate, try to use the H2Dialect if possible. When using the H2Dialect, compatibility modes such as MODE=MySQL are not supported. When using such a compatibility mode, use the Hibernate dialect for the corresponding database instead of the H2Dialect; but please note H2 does not support all features of all databases. - -@tutorial_1157_h2 -Using TopLink and Glassfish - -@tutorial_1158_p - To use H2 with Glassfish (or Sun AS), set the Datasource Classname to org.h2.jdbcx.JdbcDataSource. You can set this in the GUI at Application Server - Resources - JDBC - Connection Pools, or by editing the file sun-resources.xml: at element jdbc-connection-pool, set the attribute datasource-classname to org.h2.jdbcx.JdbcDataSource. - -@tutorial_1159_p - The H2 database is compatible with HSQLDB and PostgreSQL. To take advantage of H2 specific features, use the H2Platform. The source code of this platform is included in H2 at src/tools/oracle/toplink/essentials/platform/database/DatabasePlatform.java.txt. You will need to copy this file to your application, and rename it to .java. To enable it, change the following setting in persistence.xml: - -@tutorial_1160_p - In old versions of Glassfish, the property name is toplink.platform.class.name. - -@tutorial_1161_p - To use H2 within Glassfish, copy the h2*.jar to the directory glassfish/glassfish/lib. - -@tutorial_1162_h2 -Using EclipseLink - -@tutorial_1163_p - To use H2 in EclipseLink, use the platform class org.eclipse.persistence.platform.database.H2Platform. If this platform is not available in your version of EclipseLink, you can use the OraclePlatform instead in many case. See also H2Platform. - -@tutorial_1164_h2 -Using Apache ActiveMQ - -@tutorial_1165_p - When using H2 as the backend database for Apache ActiveMQ, please use the TransactDatabaseLocker instead of the default locking mechanism. Otherwise the database file will grow without bounds. The problem is that the default locking mechanism uses an uncommitted UPDATE transaction, which keeps the transaction log from shrinking (causes the database file to grow). Instead of using an UPDATE statement, the TransactDatabaseLocker uses SELECT ... FOR UPDATE which is not problematic. To use it, change the ApacheMQ configuration element <jdbcPersistenceAdapter> element, property databaseLocker="org.apache.activemq.store.jdbc.adapter.TransactDatabaseLocker". However, using the MVCC mode will again result in the same problem. Therefore, please do not use the MVCC mode in this case. Another (more dangerous) solution is to set useDatabaseLock to false. - -@tutorial_1166_h2 -Using H2 within NetBeans - -@tutorial_1167_p - The project H2 Database Engine Support For NetBeans allows you to start and stop the H2 server from within the IDE. - -@tutorial_1168_p - There is a known issue when using the Netbeans SQL Execution Window: before executing a query, another query in the form SELECT COUNT(*) FROM <query> is run. This is a problem for queries that modify state, such as SELECT SEQ.NEXTVAL. In this case, two sequence values are allocated instead of just one. - -@tutorial_1169_h2 -Using H2 with jOOQ - -@tutorial_1170_p - jOOQ adds a thin layer on top of JDBC, allowing for type-safe SQL construction, including advanced SQL, stored procedures and advanced data types. jOOQ takes your database schema as a base for code generation. If this is your example schema: - -@tutorial_1171_p - then run the jOOQ code generator on the command line using this command: - -@tutorial_1172_p - ...where codegen.xml is on the classpath and contains this information - -@tutorial_1173_p - Using the generated source, you can query the database as follows: - -@tutorial_1174_p - See more details on jOOQ Homepage and in the jOOQ Tutorial - -@tutorial_1175_h2 -Using Databases in Web Applications - -@tutorial_1176_p - There are multiple ways to access a database from within web applications. Here are some examples if you use Tomcat or JBoss. - -@tutorial_1177_h3 -Embedded Mode - -@tutorial_1178_p - The (currently) simplest solution is to use the database in the embedded mode, that means open a connection in your application when it starts (a good solution is using a Servlet Listener, see below), or when a session starts. A database can be accessed from multiple sessions and applications at the same time, as long as they run in the same process. Most Servlet Containers (for example Tomcat) are just using one process, so this is not a problem (unless you run Tomcat in clustered mode). Tomcat uses multiple threads and multiple classloaders. If multiple applications access the same database at the same time, you need to put the database jar in the shared/lib or server/lib directory. It is a good idea to open the database when the web application starts, and close it when the web application stops. If using multiple applications, only one (any) of them needs to do that. In the application, an idea is to use one connection per Session, or even one connection per request (action). Those connections should be closed after use if possible (but it's not that bad if they don't get closed). - -@tutorial_1179_h3 -Server Mode - -@tutorial_1180_p - The server mode is similar, but it allows you to run the server in another process. - -@tutorial_1181_h3 -Using a Servlet Listener to Start and Stop a Database - -@tutorial_1182_p - Add the h2*.jar file to your web application, and add the following snippet to your web.xml file (between the context-param and the filter section): - -@tutorial_1183_p - For details on how to access the database, see the file DbStarter.java. By default this tool opens an embedded connection using the database URL jdbc:h2:~/test, user name sa, and password sa. If you want to use this connection within your servlet, you can access as follows: - -@tutorial_1184_code -DbStarter - -@tutorial_1185_p - can also start the TCP server, however this is disabled by default. To enable it, use the parameter db.tcpServer in the file web.xml. Here is the complete list of options. These options need to be placed between the description tag and the listener / filter tags: - -@tutorial_1186_p - When the web application is stopped, the database connection will be closed automatically. If the TCP server is started within the DbStarter, it will also be stopped automatically. - -@tutorial_1187_h3 -Using the H2 Console Servlet - -@tutorial_1188_p - The H2 Console is a standalone application and includes its own web server, but it can be used as a servlet as well. To do that, include the the h2*.jar file in your application, and add the following configuration to your web.xml: - -@tutorial_1189_p - For details, see also src/tools/WEB-INF/web.xml. - -@tutorial_1190_p - To create a web application with just the H2 Console, run the following command: - -@tutorial_1191_h2 -Android - -@tutorial_1192_p - You can use this database on an Android device (using the Dalvik VM) instead of or in addition to SQLite. So far, only very few tests and benchmarks were run, but it seems that performance is similar to SQLite, except for opening and closing a database, which is not yet optimized in H2 (H2 takes about 0.2 seconds, and SQLite about 0.02 seconds). Read operations seem to be a bit faster than SQLite, and write operations seem to be slower. So far, only very few tests have been run, and everything seems to work as expected. Fulltext search was not yet tested, however the native fulltext search should work. - -@tutorial_1193_p - Reasons to use H2 instead of SQLite are: - -@tutorial_1194_li -Full Unicode support including UPPER() and LOWER(). - -@tutorial_1195_li -Streaming API for BLOB and CLOB data. - -@tutorial_1196_li -Fulltext search. - -@tutorial_1197_li -Multiple connections. - -@tutorial_1198_li -User defined functions and triggers. - -@tutorial_1199_li -Database file encryption. - -@tutorial_1200_li -Reading and writing CSV files (this feature can be used outside the database as well). - -@tutorial_1201_li -Referential integrity and check constraints. - -@tutorial_1202_li -Better data type and SQL support. - -@tutorial_1203_li -In-memory databases, read-only databases, linked tables. - -@tutorial_1204_li -Better compatibility with other databases which simplifies porting applications. - -@tutorial_1205_li -Possibly better performance (so far for read operations). - -@tutorial_1206_li -Server mode (accessing a database on a different machine over TCP/IP). - -@tutorial_1207_p - Currently only the JDBC API is supported (it is planned to support the Android database API in future releases). Both the regular H2 jar file and the smaller h2small-*.jar can be used. To create the smaller jar file, run the command ./build.sh jarSmall (Linux / Mac OS) or build.bat jarSmall (Windows). - -@tutorial_1208_p - The database files needs to be stored in a place that is accessible for the application. Example: - -@tutorial_1209_p - Limitations: Using a connection pool is currently not supported, because the required javax.sql. classes are not available on Android. - -@tutorial_1210_h2 -CSV (Comma Separated Values) Support - -@tutorial_1211_p - The CSV file support can be used inside the database using the functions CSVREAD and CSVWRITE, or it can be used outside the database as a standalone tool. - -@tutorial_1212_h3 -Reading a CSV File from Within a Database - -@tutorial_1213_p - A CSV file can be read using the function CSVREAD. Example: - -@tutorial_1214_p - Please note for performance reason, CSVREAD should not be used inside a join. Instead, import the data first (possibly into a temporary table), create the required indexes if necessary, and then query this table. - -@tutorial_1215_h3 -Importing Data from a CSV File - -@tutorial_1216_p - A fast way to load or import data (sometimes called 'bulk load') from a CSV file is to combine table creation with import. Optionally, the column names and data types can be set when creating the table. Another option is to use INSERT INTO ... SELECT. - -@tutorial_1217_h3 -Writing a CSV File from Within a Database - -@tutorial_1218_p - The built-in function CSVWRITE can be used to create a CSV file from a query. Example: - -@tutorial_1219_h3 -Writing a CSV File from a Java Application - -@tutorial_1220_p - The Csv tool can be used in a Java application even when not using a database at all. Example: - -@tutorial_1221_h3 -Reading a CSV File from a Java Application - -@tutorial_1222_p - It is possible to read a CSV file without opening a database. Example: - -@tutorial_1223_h2 -Upgrade, Backup, and Restore - -@tutorial_1224_h3 -Database Upgrade - -@tutorial_1225_p - The recommended way to upgrade from one version of the database engine to the next version is to create a backup of the database (in the form of a SQL script) using the old engine, and then execute the SQL script using the new engine. - -@tutorial_1226_h3 -Backup using the Script Tool - -@tutorial_1227_p - The recommended way to backup a database is to create a compressed SQL script file. This will result in a small, human readable, and database version independent backup. Creating the script will also verify the checksums of the database file. The Script tool is ran as follows: - -@tutorial_1228_p - It is also possible to use the SQL command SCRIPT to create the backup of the database. For more information about the options, see the SQL command SCRIPT. The backup can be done remotely, however the file will be created on the server side. The built in FTP server could be used to retrieve the file from the server. - -@tutorial_1229_h3 -Restore from a Script - -@tutorial_1230_p - To restore a database from a SQL script file, you can use the RunScript tool: - -@tutorial_1231_p - For more information about the options, see the SQL command RUNSCRIPT. The restore can be done remotely, however the file needs to be on the server side. The built in FTP server could be used to copy the file to the server. It is also possible to use the SQL command RUNSCRIPT to execute a SQL script. SQL script files may contain references to other script files, in the form of RUNSCRIPT commands. However, when using the server mode, the references script files need to be available on the server side. - -@tutorial_1232_h3 -Online Backup - -@tutorial_1233_p - The BACKUP SQL statement and the Backup tool both create a zip file with the database file. However, the contents of this file are not human readable. - -@tutorial_1234_p - The resulting backup is transactionally consistent, meaning the consistency and atomicity rules apply. - -@tutorial_1235_p - The Backup tool (org.h2.tools.Backup) can not be used to create a online backup; the database must not be in use while running this program. - -@tutorial_1236_p - Creating a backup by copying the database files while the database is running is not supported, except if the file systems support creating snapshots. With other file systems, it can't be guaranteed that the data is copied in the right order. - -@tutorial_1237_h2 -Command Line Tools - -@tutorial_1238_p - This database comes with a number of command line tools. To get more information about a tool, start it with the parameter '-?', for example: - -@tutorial_1239_p - The command line tools are: - -@tutorial_1240_code -Backup - -@tutorial_1241_li - creates a backup of a database. - -@tutorial_1242_code -ChangeFileEncryption - -@tutorial_1243_li - allows changing the file encryption password or algorithm of a database. - -@tutorial_1244_code -Console - -@tutorial_1245_li - starts the browser based H2 Console. - -@tutorial_1246_code -ConvertTraceFile - -@tutorial_1247_li - converts a .trace.db file to a Java application and SQL script. - -@tutorial_1248_code -CreateCluster - -@tutorial_1249_li - creates a cluster from a standalone database. - -@tutorial_1250_code -DeleteDbFiles - -@tutorial_1251_li - deletes all files belonging to a database. - -@tutorial_1252_code -Recover - -@tutorial_1253_li - helps recovering a corrupted database. - -@tutorial_1254_code -Restore - -@tutorial_1255_li - restores a backup of a database. - -@tutorial_1256_code -RunScript - -@tutorial_1257_li - runs a SQL script against a database. - -@tutorial_1258_code -Script - -@tutorial_1259_li - allows converting a database to a SQL script for backup or migration. - -@tutorial_1260_code -Server - -@tutorial_1261_li - is used in the server mode to start a H2 server. - -@tutorial_1262_code -Shell - -@tutorial_1263_li - is a command line database tool. - -@tutorial_1264_p - The tools can also be called from an application by calling the main or another public method. For details, see the Javadoc documentation. - -@tutorial_1265_h2 -The Shell Tool - -@tutorial_1266_p - The Shell tool is a simple interactive command line tool. To start it, type: - -@tutorial_1267_p - You will be asked for a database URL, JDBC driver, user name, and password. The connection setting can also be set as command line parameters. After connecting, you will get the list of options. The built-in commands don't need to end with a semicolon, but SQL statements are only executed if the line ends with a semicolon ;. This allows to enter multi-line statements: - -@tutorial_1268_p - By default, results are printed as a table. For results with many column, consider using the list mode: - -@tutorial_1269_h2 -Using OpenOffice Base - -@tutorial_1270_p - OpenOffice.org Base supports database access over the JDBC API. To connect to a H2 database using OpenOffice Base, you first need to add the JDBC driver to OpenOffice. The steps to connect to a H2 database are: - -@tutorial_1271_li -Start OpenOffice Writer, go to [Tools], [Options] - -@tutorial_1272_li -Make sure you have selected a Java runtime environment in OpenOffice.org / Java - -@tutorial_1273_li -Click [Class Path...], [Add Archive...] - -@tutorial_1274_li -Select your h2 jar file (location is up to you, could be wherever you choose) - -@tutorial_1275_li -Click [OK] (as much as needed), stop OpenOffice (including the Quickstarter) - -@tutorial_1276_li -Start OpenOffice Base - -@tutorial_1277_li -Connect to an existing database; select [JDBC]; [Next] - -@tutorial_1278_li -Example datasource URL: jdbc:h2:~/test - -@tutorial_1279_li -JDBC driver class: org.h2.Driver - -@tutorial_1280_p - Now you can access the database stored in the current users home directory. - -@tutorial_1281_p - To use H2 in NeoOffice (OpenOffice without X11): - -@tutorial_1282_li -In NeoOffice, go to [NeoOffice], [Preferences] - -@tutorial_1283_li -Look for the page under [NeoOffice], [Java] - -@tutorial_1284_li -Click [Class Path], [Add Archive...] - -@tutorial_1285_li -Select your h2 jar file (location is up to you, could be wherever you choose) - -@tutorial_1286_li -Click [OK] (as much as needed), restart NeoOffice. - -@tutorial_1287_p - Now, when creating a new database using the "Database Wizard" : - -@tutorial_1288_li -Click [File], [New], [Database]. - -@tutorial_1289_li -Select [Connect to existing database] and the select [JDBC]. Click next. - -@tutorial_1290_li -Example datasource URL: jdbc:h2:~/test - -@tutorial_1291_li -JDBC driver class: org.h2.Driver - -@tutorial_1292_p - Another solution to use H2 in NeoOffice is: - -@tutorial_1293_li -Package the h2 jar within an extension package - -@tutorial_1294_li -Install it as a Java extension in NeoOffice - -@tutorial_1295_p - This can be done by create it using the NetBeans OpenOffice plugin. See also Extensions Development. - -@tutorial_1296_h2 -Java Web Start / JNLP - -@tutorial_1297_p - When using Java Web Start / JNLP (Java Network Launch Protocol), permissions tags must be set in the .jnlp file, and the application .jar file must be signed. Otherwise, when trying to write to the file system, the following exception will occur: java.security.AccessControlException: access denied (java.io.FilePermission ... read). Example permission tags: - -@tutorial_1298_h2 -Using a Connection Pool - -@tutorial_1299_p - For H2, opening a connection is fast if the database is already open. Still, using a connection pool improves performance if you open and close connections a lot. A simple connection pool is included in H2. It is based on the Mini Connection Pool Manager from Christian d'Heureuse. There are other, more complex, open source connection pools available, for example the Apache Commons DBCP. For H2, it is about twice as faster to get a connection from the built-in connection pool than to get one using DriverManager.getConnection().The build-in connection pool is used as follows: - -@tutorial_1300_h2 -Fulltext Search - -@tutorial_1301_p - H2 includes two fulltext search implementations. One is using Apache Lucene, and the other (the native implementation) stores the index data in special tables in the database. - -@tutorial_1302_h3 -Using the Native Fulltext Search - -@tutorial_1303_p - To initialize, call: - -@tutorial_1304_p - You need to initialize it in each database where you want to use it. Afterwards, you can create a fulltext index for a table using: - -@tutorial_1305_p - PUBLIC is the schema name, TEST is the table name. The list of column names (comma separated) is optional, in this case all columns are indexed. The index is updated in realtime. To search the index, use the following query: - -@tutorial_1306_p - This will produce a result set that contains the query needed to retrieve the data: - -@tutorial_1307_p - To drop an index on a table: - -@tutorial_1308_p - To get the raw data, use FT_SEARCH_DATA('Hello', 0, 0);. The result contains the columns SCHEMA (the schema name), TABLE (the table name), COLUMNS (an array of column names), and KEYS (an array of objects). To join a table, use a join as in: SELECT T.* FROM FT_SEARCH_DATA('Hello', 0, 0) FT, TEST T WHERE FT.TABLE='TEST' AND T.ID=FT.KEYS[0]; - -@tutorial_1309_p - You can also call the index from within a Java application: - -@tutorial_1310_h3 -Using the Apache Lucene Fulltext Search - -@tutorial_1311_p - To use the Apache Lucene full text search, you need the Lucene library in the classpath. Currently, Apache Lucene 3.6.2 is used for testing. Newer versions may work, however they are not tested. How to do that depends on the application; if you use the H2 Console, you can add the Lucene jar file to the environment variables H2DRIVERS or CLASSPATH. To initialize the Lucene fulltext search in a database, call: - -@tutorial_1312_p - You need to initialize it in each database where you want to use it. Afterwards, you can create a full text index for a table using: - -@tutorial_1313_p - PUBLIC is the schema name, TEST is the table name. The list of column names (comma separated) is optional, in this case all columns are indexed. The index is updated in realtime. To search the index, use the following query: - -@tutorial_1314_p - This will produce a result set that contains the query needed to retrieve the data: - -@tutorial_1315_p - To drop an index on a table (be warned that this will re-index all of the full-text indices for the entire database): - -@tutorial_1316_p - To get the raw data, use FTL_SEARCH_DATA('Hello', 0, 0);. The result contains the columns SCHEMA (the schema name), TABLE (the table name), COLUMNS (an array of column names), and KEYS (an array of objects). To join a table, use a join as in: SELECT T.* FROM FTL_SEARCH_DATA('Hello', 0, 0) FT, TEST T WHERE FT.TABLE='TEST' AND T.ID=FT.KEYS[0]; - -@tutorial_1317_p - You can also call the index from within a Java application: - -@tutorial_1318_p - The Lucene fulltext search supports searching in specific column only. Column names must be uppercase (except if the original columns are double quoted). For column names starting with an underscore (_), another underscore needs to be added. Example: - -@tutorial_1319_h2 -User-Defined Variables - -@tutorial_1320_p - This database supports user-defined variables. Variables start with @ and can be used wherever expressions or parameters are allowed. Variables are not persisted and session scoped, that means only visible from within the session in which they are defined. A value is usually assigned using the SET command: - -@tutorial_1321_p - The value can also be changed using the SET() method. This is useful in queries: - -@tutorial_1322_p - Variables that are not set evaluate to NULL. The data type of a user-defined variable is the data type of the value assigned to it, that means it is not necessary (or possible) to declare variable names before using them. There are no restrictions on the assigned values; large objects (LOBs) are supported as well. Rolling back a transaction does not affect the value of a user-defined variable. - -@tutorial_1323_h2 -Date and Time - -@tutorial_1324_p - Date, time and timestamp values support ISO 8601 formatting, including time zone: - -@tutorial_1325_p - If the time zone is not set, the value is parsed using the current time zone setting of the system. Date and time information is stored in H2 database files without time zone information. If the database is opened using another system time zone, the date and time will be the same. That means if you store the value '2000-01-01 12:00:00' in one time zone, then close the database and open the database again in a different time zone, you will also get '2000-01-01 12:00:00'. Please note that changing the time zone after the H2 driver is loaded is not supported. - -@tutorial_1326_h2 -Using Spring - -@tutorial_1327_h3 -Using the TCP Server - -@tutorial_1328_p - Use the following configuration to start and stop the H2 TCP server using the Spring Framework: - -@tutorial_1329_p - The destroy-method will help prevent exceptions on hot-redeployment or when restarting the server. - -@tutorial_1330_h3 -Error Code Incompatibility - -@tutorial_1331_p - There is an incompatibility with the Spring JdbcTemplate and H2 version 1.3.154 and newer, because of a change in the error code. This will cause the JdbcTemplate to not detect a duplicate key condition, and so a DataIntegrityViolationException is thrown instead of DuplicateKeyException. See also the issue SPR-8235. The workaround is to add the following XML file to the root of the classpath: - -@tutorial_1332_h2 -OSGi - -@tutorial_1333_p - The standard H2 jar can be dropped in as a bundle in an OSGi container. H2 implements the JDBC Service defined in OSGi Service Platform Release 4 Version 4.2 Enterprise Specification. The H2 Data Source Factory service is registered with the following properties: OSGI_JDBC_DRIVER_CLASS=org.h2.Driver and OSGI_JDBC_DRIVER_NAME=H2 JDBC Driver. The OSGI_JDBC_DRIVER_VERSION property reflects the version of the driver as is. - -@tutorial_1334_p - The following standard configuration properties are supported: JDBC_USER, JDBC_PASSWORD, JDBC_DESCRIPTION, JDBC_DATASOURCE_NAME, JDBC_NETWORK_PROTOCOL, JDBC_URL, JDBC_SERVER_NAME, JDBC_PORT_NUMBER. Any other standard property will be rejected. Non-standard properties will be passed on to H2 in the connection URL. - -@tutorial_1335_h2 -Java Management Extension (JMX) - -@tutorial_1336_p - Management over JMX is supported, but not enabled by default. To enable JMX, append ;JMX=TRUE to the database URL when opening the database. Various tools support JMX, one such tool is the jconsole. When opening the jconsole, connect to the process where the database is open (when using the server mode, you need to connect to the server process). Then go to the MBeans section. Under org.h2 you will find one entry per database. The object name of the entry is the database short name, plus the path (each colon is replaced with an underscore character). - -@tutorial_1337_p - The following attributes and operations are supported: - -@tutorial_1338_code -CacheSize - -@tutorial_1339_li -: the cache size currently in use in KB. - -@tutorial_1340_code -CacheSizeMax - -@tutorial_1341_li - (read/write): the maximum cache size in KB. - -@tutorial_1342_code -Exclusive - -@tutorial_1343_li -: whether this database is open in exclusive mode or not. - -@tutorial_1344_code -FileReadCount - -@tutorial_1345_li -: the number of file read operations since the database was opened. - -@tutorial_1346_code -FileSize - -@tutorial_1347_li -: the file size in KB. - -@tutorial_1348_code -FileWriteCount - -@tutorial_1349_li -: the number of file write operations since the database was opened. - -@tutorial_1350_code -FileWriteCountTotal - -@tutorial_1351_li -: the number of file write operations since the database was created. - -@tutorial_1352_code -LogMode - -@tutorial_1353_li - (read/write): the current transaction log mode. See SET LOG for details. - -@tutorial_1354_code -Mode - -@tutorial_1355_li -: the compatibility mode (REGULAR if no compatibility mode is used). - -@tutorial_1356_code -MultiThreaded - -@tutorial_1357_li -: true if multi-threaded is enabled. - -@tutorial_1358_code -Mvcc - -@tutorial_1359_li -: true if MVCC is enabled. - -@tutorial_1360_code -ReadOnly - -@tutorial_1361_li -: true if the database is read-only. - -@tutorial_1362_code -TraceLevel - -@tutorial_1363_li - (read/write): the file trace level. - -@tutorial_1364_code -Version - -@tutorial_1365_li -: the database version in use. - -@tutorial_1366_code -listSettings - -@tutorial_1367_li -: list the database settings. - -@tutorial_1368_code -listSessions - -@tutorial_1369_li -: list the open sessions, including currently executing statement (if any) and locked tables (if any). - -@tutorial_1370_p - To enable JMX, you may need to set the system properties com.sun.management.jmxremote and com.sun.management.jmxremote.port as required by the JVM. - diff --git a/h2/src/docsrc/text/_docs_ja.utf8.txt b/h2/src/docsrc/text/_docs_ja.utf8.txt deleted file mode 100644 index 5de4e01c0b..0000000000 --- a/h2/src/docsrc/text/_docs_ja.utf8.txt +++ /dev/null @@ -1,12512 +0,0 @@ -@advanced_1000_h1 -#Advanced - -@advanced_1001_a -@advanced_1000_h1 -#Advanced -# Result Sets - -@advanced_1002_a -# Large Objects - -@advanced_1003_a -# Linked Tables - -@advanced_1004_a -# Spatial Features - -@advanced_1005_a -# Recursive Queries - -@advanced_1006_a -# Updatable Views - -@advanced_1007_a -# Transaction Isolation - -@advanced_1008_a -# Multi-Version Concurrency Control (MVCC) - -@advanced_1009_a -# Clustering / High Availability - -@advanced_1010_a -# Two Phase Commit - -@advanced_1011_a -# Compatibility - -@advanced_1012_a -# Standards Compliance - -@advanced_1013_a -# Run as Windows Service - -@advanced_1014_a -# ODBC Driver - -@advanced_1015_a -# Using H2 in Microsoft .NET - -@advanced_1016_a -# ACID - -@advanced_1017_a -# Durability Problems - -@advanced_1018_a -# Using the Recover Tool - -@advanced_1019_a -# File Locking Protocols - -@advanced_1020_a -# Using Passwords - -@advanced_1021_a -# Password Hash - -@advanced_1022_a -# Protection against SQL Injection - -@advanced_1023_a -# Protection against Remote Access - -@advanced_1024_a -# Restricting Class Loading and Usage - -@advanced_1025_a -# Security Protocols - -@advanced_1026_a -# TLS Connections - -@advanced_1027_a -# Universally Unique Identifiers (UUID) - -@advanced_1028_a -# Settings Read from System Properties - -@advanced_1029_a -# Setting the Server Bind Address - -@advanced_1030_a -# Pluggable File System - -@advanced_1031_a -# Split File System - -@advanced_1032_a -# Database Upgrade - -@advanced_1033_a -# Java Objects Serialization - -@advanced_1034_a -# Custom Data Types Handler API - -@advanced_1035_a -# Limits and Limitations - -@advanced_1036_a -# Glossary and Links - -@advanced_1037_h2 -Result Sets - -@advanced_1038_h3 -#Statements that Return a Result Set - -@advanced_1039_p -# The following statements return a result set: SELECT, EXPLAIN, CALL, SCRIPT, SHOW, HELP. All other statements return an update count. - -@advanced_1040_h3 -行数�?�制�? - -@advanced_1041_p -# Before the result is returned to the application, all rows are read by the database. Server side cursors are not supported currently. If only the first few rows are interesting for the application, then the result set size should be limited to improve the performance. This can be done using LIMIT in a query (example: SELECT * FROM TEST LIMIT 100), or by using Statement.setMaxRows(max). - -@advanced_1042_h3 -大�??�?�Result Set �?�外部ソート - -@advanced_1043_p -# For large result set, the result is buffered to disk. The threshold can be defined using the statement SET MAX_MEMORY_ROWS. If ORDER BY is used, the sorting is done using an external sort algorithm. In this case, each block of rows is sorted using quick sort, then written to disk; when reading the data, the blocks are merged together. - -@advanced_1044_h2 -大�??�?�オブジェクト - -@advanced_1045_h3 -大�??�?�オブジェクト�?�ソート�?�読�?�込�?� - -@advanced_1046_p -# If it is possible that the objects don't fit into memory, then the data type CLOB (for textual data) or BLOB (for binary data) should be used. For these data types, the objects are not fully read into memory, by using streams. To store a BLOB, use PreparedStatement.setBinaryStream. To store a CLOB, use PreparedStatement.setCharacterStream. To read a BLOB, use ResultSet.getBinaryStream, and to read a CLOB, use ResultSet.getCharacterStream. When using the client/server mode, large BLOB and CLOB data is stored in a temporary file on the client side. - -@advanced_1047_h3 -#When to use CLOB/BLOB - -@advanced_1048_p -# By default, this database stores large LOB (CLOB and BLOB) objects separate from the main table data. Small LOB objects are stored in-place, the threshold can be set using MAX_LENGTH_INPLACE_LOB, but there is still an overhead to use CLOB/BLOB. Because of this, BLOB and CLOB should never be used for columns with a maximum size below about 200 bytes. The best threshold depends on the use case; reading in-place objects is faster than reading from separate files, but slows down the performance of operations that don't involve this column. - -@advanced_1049_h3 -#Large Object Compression - -@advanced_1050_p -# The following feature is only available for the PageStore storage engine. For the MVStore engine (the default for H2 version 1.4.x), append ;COMPRESS=TRUE to the database URL instead. CLOB and BLOB values can be compressed by using SET COMPRESS_LOB. The LZF algorithm is faster but needs more disk space. By default compression is disabled, which usually speeds up write operations. If you store many large compressible values such as XML, HTML, text, and uncompressed binary files, then compressing can save a lot of disk space (sometimes more than 50%), and read operations may even be faster. - -@advanced_1051_h2 -リンクテーブル - -@advanced_1052_p -# This database supports linked tables, which means tables that don't exist in the current database but are just links to another database. To create such a link, use the CREATE LINKED TABLE statement: - -@advanced_1053_p -# You can then access the table in the usual way. Whenever the linked table is accessed, the database issues specific queries over JDBC. Using the example above, if you issue the query SELECT * FROM LINK WHERE ID=1, then the following query is run against the PostgreSQL database: SELECT * FROM TEST WHERE ID=?. The same happens for insert and update statements. Only simple statements are executed against the target database, that means no joins (queries that contain joins are converted to simple queries). Prepared statements are used where possible. - -@advanced_1054_p -# To view the statements that are executed against the target table, set the trace level to 3. - -@advanced_1055_p -# If multiple linked tables point to the same database (using the same database URL), the connection is shared. To disable this, set the system property h2.shareLinkedConnections=false. - -@advanced_1056_p -# The statement CREATE LINKED TABLE supports an optional schema name parameter. - -@advanced_1057_p -# The following are not supported because they may result in a deadlock: creating a linked table to the same database, and creating a linked table to another database using the server mode if the other database is open in the same server (use the embedded mode instead). - -@advanced_1058_p -# Data types that are not supported in H2 are also not supported for linked tables, for example unsigned data types if the value is outside the range of the signed type. In such cases, the columns needs to be cast to a supported type. - -@advanced_1059_h2 -#Updatable Views - -@advanced_1060_p -# By default, views are not updatable. To make a view updatable, use an "instead of" trigger as follows: - -@advanced_1061_p -# Update the base table(s) within the trigger as required. For details, see the sample application org.h2.samples.UpdatableView. - -@advanced_1062_h2 -トランザクション分離 - -@advanced_1063_p -# Please note that most data definition language (DDL) statements, such as "create table", commit the current transaction. See the Grammar for details. - -@advanced_1064_p -# Transaction isolation is provided for all data manipulation language (DML) statements. - -@advanced_1065_p -# Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. In this case, table level locking is not used. Instead, rows are locked for update, and read committed is used in all cases (changing the isolation level has no effect). - -@advanced_1066_p -# This database supports the following transaction isolation levels: - -@advanced_1067_b -Read Committed (コミット済�?�読�?��?�り) - -@advanced_1068_li -# This is the default level. Read locks are released immediately after executing the statement, but write locks are kept until the transaction commits. Higher concurrency is possible when using this level. - -@advanced_1069_li -# To enable, execute the SQL statement SET LOCK_MODE 3 - -@advanced_1070_li -# or append ;LOCK_MODE=3 to the database URL: jdbc:h2:~/test;LOCK_MODE=3 - -@advanced_1071_b -Serializable (直列化) - -@advanced_1072_li -# Both read locks and write locks are kept until the transaction commits. To enable, execute the SQL statement SET LOCK_MODE 1 - -@advanced_1073_li -# or append ;LOCK_MODE=1 to the database URL: jdbc:h2:~/test;LOCK_MODE=1 - -@advanced_1074_b -Read Uncommitted (�?�コミット読�?��?�り) - -@advanced_1075_li -# This level means that transaction isolation is disabled. - -@advanced_1076_li -# To enable, execute the SQL statement SET LOCK_MODE 0 - -@advanced_1077_li -# or append ;LOCK_MODE=0 to the database URL: jdbc:h2:~/test;LOCK_MODE=0 - -@advanced_1078_p -# When using the isolation level 'serializable', dirty reads, non-repeatable reads, and phantom reads are prohibited. - -@advanced_1079_b -Dirty Reads (ダーティリード) - -@advanced_1080_li -# Means a connection can read uncommitted changes made by another connection. - -@advanced_1081_li -# Possible with: read uncommitted - -@advanced_1082_b -Non-Repeatable Reads (�??復�?�?�能読�?��?�り) - -@advanced_1083_li -# A connection reads a row, another connection changes a row and commits, and the first connection re-reads the same row and gets the new result. - -@advanced_1084_li -# Possible with: read uncommitted, read committed - -@advanced_1085_b -Phantom Reads (ファントムリード) - -@advanced_1086_li -# A connection reads a set of rows using a condition, another connection inserts a row that falls in this condition and commits, then the first connection re-reads using the same condition and gets the new row. - -@advanced_1087_li -# Possible with: read uncommitted, read committed - -@advanced_1088_h3 -#Multi-Version Concurrency Control (MVCC) - -@advanced_1089_p -# The database allows multiple concurrent connections to the same database. To make sure all connections only see consistent data, table level locking is used by default. This mechanism does not allow high concurrency, but is very fast. Shared locks and exclusive locks are supported. Before reading from a table, the database tries to add a shared lock to the table (this is only possible if there is no exclusive lock on the object by another connection). If the shared lock is added successfully, the table can be read. It is allowed that other connections also have a shared lock on the same object. If a connection wants to write to a table (update or delete a row), an exclusive lock is required. To get the exclusive lock, other connection must not have any locks on the object. After the connection commits, all locks are released. This database keeps all locks in memory. When a lock is released, and multiple connections are waiting for it, one of them is picked at random. - -@advanced_1090_h3 -#Table Level Locking (PageStore engine) - -@advanced_1091_p -# If a connection cannot get a lock on an object, the connection waits for some amount of time (the lock timeout). During this time, hopefully the connection holding the lock commits and it is then possible to get the lock. If this is not possible because the other connection does not release the lock for some time, the unsuccessful connection will get a lock timeout exception. The lock timeout can be set individually for each connection. - -@advanced_1092_h2 -#Multi-Version Concurrency Control (MVCC) - -@advanced_1093_p -# The MVCC feature allows higher concurrency than using (table level or row level) locks. When using MVCC in this database, delete, insert and update operations will only issue a shared lock on the table. An exclusive lock is still used when adding or removing columns, when dropping the table, and when using SELECT ... FOR UPDATE. Connections only 'see' committed data, and own changes. That means, if connection A updates a row but doesn't commit this change yet, connection B will see the old value. Only when the change is committed, the new value is visible by other connections (read committed). If multiple connections concurrently try to update the same row, the database waits until it can apply the change, but at most until the lock timeout expires. - -@advanced_1094_p -# To use the MVCC feature, append ;MVCC=TRUE to the database URL: - -@advanced_1095_p -# The setting must be specified in the first connection (the one that opens the database). It is not possible to enable or disable this setting while the database is already open. - -@advanced_1096_p -# If MVCC is enabled, changing the lock mode (LOCK_MODE) has no effect. - -@advanced_1097_div -# The MVCC mode is enabled by default in version 1.4.x, with the default MVStore storage engine. MVCC is disabled by default when using the PageStore storage engine (which is the default in version 1.3.x). The following applies when using the PageStore storage engine: The MVCC feature is not fully tested yet. The limitations of the MVCC mode are: with the PageStore storage engine, it can not be used at the same time as MULTI_THREADED=TRUE; the complete undo log (the list of uncommitted changes) must fit in memory when using multi-version concurrency. The setting MAX_MEMORY_UNDO has no effect. Clustering / High Availability - -@advanced_1098_p -# This database supports a simple clustering / high availability mechanism. The architecture is: two database servers run on two different computers, and on both computers is a copy of the same database. If both servers run, each database operation is executed on both computers. If one server fails (power, hardware or network failure), the other server can still continue to work. From this point on, the operations will be executed only on one server until the other server is back up. - -@advanced_1099_p -# Clustering can only be used in the server mode (the embedded mode does not support clustering). The cluster can be re-created using the CreateCluster tool without stopping the remaining server. Applications that are still connected are automatically disconnected, however when appending ;AUTO_RECONNECT=TRUE, they will recover from that. - -@advanced_1100_p -# To initialize the cluster, use the following steps: - -@advanced_1101_li -#Create a database - -@advanced_1102_li -#Use the CreateCluster tool to copy the database to another location and initialize the clustering. Afterwards, you have two databases containing the same data. - -@advanced_1103_li -#Start two servers (one for each copy of the database) - -@advanced_1104_li -#You are now ready to connect to the databases with the client application(s) - -@advanced_1105_h3 -CreateClusterツールを使用�?�る - -@advanced_1106_p -# To understand how clustering works, please try out the following example. In this example, the two databases reside on the same computer, but usually, the databases will be on different servers. - -@advanced_1107_li -#Create two directories: server1, server2. Each directory will simulate a directory on a computer. - -@advanced_1108_li -#Start a TCP server pointing to the first directory. You can do this using the command line: - -@advanced_1109_li -#Start a second TCP server pointing to the second directory. This will simulate a server running on a second (redundant) computer. You can do this using the command line: - -@advanced_1110_li -#Use the CreateCluster tool to initialize clustering. This will automatically create a new, empty database if it does not exist. Run the tool on the command line: - -@advanced_1111_li -#You can now connect to the databases using an application or the H2 Console using the JDBC URL jdbc:h2:tcp://localhost:9101,localhost:9102/~/test - -@advanced_1112_li -#If you stop a server (by killing the process), you will notice that the other machine continues to work, and therefore the database is still accessible. - -@advanced_1113_li -#To restore the cluster, you first need to delete the database that failed, then restart the server that was stopped, and re-run the CreateCluster tool. - -@advanced_1114_h3 -#Detect Which Cluster Instances are Running - -@advanced_1115_p -# To find out which cluster nodes are currently running, execute the following SQL statement: - -@advanced_1116_p -# If the result is '' (two single quotes), then the cluster mode is disabled. Otherwise, the list of servers is returned, enclosed in single quote. Example: 'server1:9191,server2:9191'. - -@advanced_1117_p -# It is also possible to get the list of servers by using Connection.getClientInfo(). - -@advanced_1118_p -# The property list returned from getClientInfo() contains a numServers property that returns the number of servers that are in the connection list. To get the actual servers, getClientInfo() also has properties server0..serverX, where serverX is the number of servers minus 1. - -@advanced_1119_p -# Example: To get the 2nd server in the connection list one uses getClientInfo('server1'). Note: The serverX property only returns IP addresses and ports and not hostnames. - -@advanced_1120_h3 -クラスタリングアルゴリズム�?�制�? - -@advanced_1121_p -# Read-only queries are only executed against the first cluster node, but all other statements are executed against all nodes. There is currently no load balancing made to avoid problems with transactions. The following functions may yield different results on different cluster nodes and must be executed with care: UUID(), RANDOM_UUID(), SECURE_RAND(), SESSION_ID(), MEMORY_FREE(), MEMORY_USED(), CSVREAD(), CSVWRITE(), RAND() [when not using a seed]. Those functions should not be used directly in modifying statements (for example INSERT, UPDATE, MERGE). However, they can be used in read-only statements and the result can then be used for modifying statements. Using auto-increment and identity columns is currently not supported. Instead, sequence values need to be manually requested and then used to insert data (using two statements). - -@advanced_1122_p -# When using the cluster modes, result sets are read fully in memory by the client, so that there is no problem if the server dies that executed the query. Result sets must fit in memory on the client side. - -@advanced_1123_p -# The SQL statement SET AUTOCOMMIT FALSE is not supported in the cluster mode. To disable autocommit, the method Connection.setAutoCommit(false) needs to be called. - -@advanced_1124_p -# It is possible that a transaction from one connection overtakes a transaction from a different connection. Depending on the operations, this might result in different results, for example when conditionally incrementing a value in a row. - -@advanced_1125_h2 -2フェーズコミット - -@advanced_1126_p -# The two phase commit protocol is supported. 2-phase-commit works as follows: - -@advanced_1127_li -#Autocommit needs to be switched off - -@advanced_1128_li -#A transaction is started, for example by inserting a row - -@advanced_1129_li -#The transaction is marked 'prepared' by executing the SQL statement PREPARE COMMIT transactionName - -@advanced_1130_li -#The transaction can now be committed or rolled back - -@advanced_1131_li -#If a problem occurs before the transaction was successfully committed or rolled back (for example because a network problem occurred), the transaction is in the state 'in-doubt' - -@advanced_1132_li -#When re-connecting to the database, the in-doubt transactions can be listed with SELECT * FROM INFORMATION_SCHEMA.IN_DOUBT - -@advanced_1133_li -#Each transaction in this list must now be committed or rolled back by executing COMMIT TRANSACTION transactionName or ROLLBACK TRANSACTION transactionName - -@advanced_1134_li -#The database needs to be closed and re-opened to apply the changes - -@advanced_1135_h2 -互�?�性 - -@advanced_1136_p -# This database is (up to a certain point) compatible to other databases such as HSQLDB, MySQL and PostgreSQL. There are certain areas where H2 is incompatible. - -@advanced_1137_h3 -オートコミット�?�ON�?�時�?�トランザクションコミット - -@advanced_1138_p -# At this time, this database engine commits a transaction (if autocommit is switched on) just before returning the result. For a query, this means the transaction is committed even before the application scans through the result set, and before the result set is closed. Other database engines may commit the transaction in this case when the result set is closed. - -@advanced_1139_h3 -キーワード / 予約語 - -@advanced_1140_p -# There is a list of keywords that can't be used as identifiers (table names, column names and so on), unless they are quoted (surrounded with double quotes). The list is currently: - -@advanced_1141_code -# CROSS, CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP, DISTINCT, EXCEPT, EXISTS, FALSE, FETCH, FOR, FROM, FULL, GROUP, HAVING, INNER, INTERSECT, IS, JOIN, LIKE, LIMIT, MINUS, NATURAL, NOT, NULL, OFFSET, ON, ORDER, PRIMARY, ROWNUM, SELECT, SYSDATE, SYSTIME, SYSTIMESTAMP, TODAY, TRUE, UNION, UNIQUE, WHERE - -@advanced_1142_p -# Certain words of this list are keywords because they are functions that can be used without '()' for compatibility, for example CURRENT_TIMESTAMP. - -@advanced_1143_h2 -#Standards Compliance - -@advanced_1144_p -# This database tries to be as much standard compliant as possible. For the SQL language, ANSI/ISO is the main standard. There are several versions that refer to the release date: SQL-92, SQL:1999, and SQL:2003. Unfortunately, the standard documentation is not freely available. Another problem is that important features are not standardized. Whenever this is the case, this database tries to be compatible to other databases. - -@advanced_1145_h3 -#Supported Character Sets, Character Encoding, and Unicode - -@advanced_1146_p -# H2 internally uses Unicode, and supports all character encoding systems and character sets supported by the virtual machine you use. - -@advanced_1147_h2 -Windowsサービス�?��?��?�実行�?�る - -@advanced_1148_p -# Using a native wrapper / adapter, Java applications can be run as a Windows Service. There are various tools available to do that. The Java Service Wrapper from Tanuki Software, Inc. is included in the installation. Batch files are provided to install, start, stop and uninstall the H2 Database Engine Service. This service contains the TCP Server and the H2 Console web application. The batch files are located in the directory h2/service. - -@advanced_1149_p -# The service wrapper bundled with H2 is a 32-bit version. To use a 64-bit version of Windows (x64), you need to use a 64-bit version of the wrapper, for example the one from Simon Krenger. - -@advanced_1150_p -# When running the database as a service, absolute path should be used. Using ~ in the database URL is problematic in this case, because it means to use the home directory of the current user. The service might run without or with the wrong user, so that the database files might end up in an unexpected place. - -@advanced_1151_h3 -サービスをインストール�?�る - -@advanced_1152_p -# The service needs to be registered as a Windows Service first. To do that, double click on 1_install_service.bat. If successful, a command prompt window will pop up and disappear immediately. If not, a message will appear. - -@advanced_1153_h3 -サービスを起動�?�る - -@advanced_1154_p -# You can start the H2 Database Engine Service using the service manager of Windows, or by double clicking on 2_start_service.bat. Please note that the batch file does not print an error message if the service is not installed. - -@advanced_1155_h3 -H2コンソール�?�接続�?�る - -@advanced_1156_p -# After installing and starting the service, you can connect to the H2 Console application using a browser. Double clicking on 3_start_browser.bat to do that. The default port (8082) is hard coded in the batch file. - -@advanced_1157_h3 -サービスを終了�?�る - -@advanced_1158_p -# To stop the service, double click on 4_stop_service.bat. Please note that the batch file does not print an error message if the service is not installed or started. - -@advanced_1159_h3 -サービス�?�アンインストール - -@advanced_1160_p -# To uninstall the service, double click on 5_uninstall_service.bat. If successful, a command prompt window will pop up and disappear immediately. If not, a message will appear. - -@advanced_1161_h3 -#Additional JDBC drivers - -@advanced_1162_p -# To use other databases (for example MySQL), the location of the JDBC drivers of those databases need to be added to the environment variables H2DRIVERS or CLASSPATH before installing the service. Multiple drivers can be set; each entry needs to be separated with a ; (Windows) or : (other operating systems). Spaces in the path names are supported. The settings must not be quoted. - -@advanced_1163_h2 -ODBCドライ�? - -@advanced_1164_p -# This database does not come with its own ODBC driver at this time, but it supports the PostgreSQL network protocol. Therefore, the PostgreSQL ODBC driver can be used. Support for the PostgreSQL network protocol is quite new and should be viewed as experimental. It should not be used for production applications. - -@advanced_1165_p -# To use the PostgreSQL ODBC driver on 64 bit versions of Windows, first run c:/windows/syswow64/odbcad32.exe. At this point you set up your DSN just like you would on any other system. See also: Re: ODBC Driver on Windows 64 bit - -@advanced_1166_h3 -ODBCインストール - -@advanced_1167_p -# First, the ODBC driver must be installed. Any recent PostgreSQL ODBC driver should work, however version 8.2 (psqlodbc-08_02*) or newer is recommended. The Windows version of the PostgreSQL ODBC driver is available at http://www.postgresql.org/ftp/odbc/versions/msi. - -@advanced_1168_h3 -サー�?ー�?�起動 - -@advanced_1169_p -# After installing the ODBC driver, start the H2 Server using the command line: - -@advanced_1170_p -# The PG Server (PG for PostgreSQL protocol) is started as well. By default, databases are stored in the current working directory where the server is started. Use -baseDir to save databases in another directory, for example the user home directory: - -@advanced_1171_p -# The PG server can be started and stopped from within a Java application as follows: - -@advanced_1172_p -# By default, only connections from localhost are allowed. To allow remote connections, use -pgAllowOthers when starting the server. - -@advanced_1173_p -# To map an ODBC database name to a different JDBC database name, use the option -key when starting the server. Please note only one mapping is allowed. The following will map the ODBC database named TEST to the database URL jdbc:h2:~/data/test;cipher=aes: - -@advanced_1174_h3 -ODBC設定 - -@advanced_1175_p -# After installing the driver, a new Data Source must be added. In Windows, run odbcad32.exe to open the Data Source Administrator. Then click on 'Add...' and select the PostgreSQL Unicode driver. Then click 'Finish'. You will be able to change the connection properties. The property column represents the property key in the odbc.ini file (which may be different from the GUI). - -@advanced_1176_th -プロパティ - -@advanced_1177_th -例 - -@advanced_1178_th -コメント - -@advanced_1179_td -Data Source - -@advanced_1180_td -#~/test;ifexists=true - -@advanced_1181_td -# The database name. This can include connections settings. By default, the database is stored in the current working directory where the Server is started except when the -baseDir setting is used. The name must be at least 3 characters. - -@advanced_1182_td -#Servername - -@advanced_1183_td -#~/test;ifexists=true - -@advanced_1184_td -# The database name. This can include connections settings. By default, the database is stored in the current working directory where the Server is started except when the -baseDir setting is used. The name must be at least 3 characters. - -@advanced_1185_td -#Servername - -@advanced_1186_td -#Username - -@advanced_1187_td -サー�?ー�??�?�?��?��?�IPアドレス - -@advanced_1188_td -デフォルト�?��?��?リモート接続�?��?�許�?��?�れ�?��?��?��?�。 - -@advanced_1189_td -#Username - -@advanced_1190_td -#false (disabled) - -@advanced_1191_td -データベース�?�ユーザー�?? - -@advanced_1192_td -#SSL - -@advanced_1193_td -#false (disabled) - -@advanced_1194_td -�?�時点�?��?SSL�?�サ�?ート�?�れ�?��?��?��?�ん。 - -@advanced_1195_td -Port - -@advanced_1196_td -5435 - -@advanced_1197_td -PGサー�?ー�?�傾�?��?��?��?�る�?ート - -@advanced_1198_td -Password - -@advanced_1199_td -sa - -@advanced_1200_td -データベースパスワード - -@advanced_1201_p -# To improve performance, please enable 'server side prepare' under Options / Datasource / Page 2 / Server side prepare. - -@advanced_1202_p -# Afterwards, you may use this data source. - -@advanced_1203_h3 -PGプロトコルサ�?ート�?�制�? - -@advanced_1204_p -# At this time, only a subset of the PostgreSQL network protocol is implemented. Also, there may be compatibility problems on the SQL level, with the catalog, or with text encoding. Problems are fixed as they are found. Currently, statements can not be canceled when using the PG protocol. Also, H2 does not provide index meta over ODBC. - -@advanced_1205_p -# PostgreSQL ODBC Driver Setup requires a database password; that means it is not possible to connect to H2 databases without password. This is a limitation of the ODBC driver. - -@advanced_1206_h3 -#Using Microsoft Access - -@advanced_1207_p -# Currently, the PG Server does not support challenge response or encrypt passwords. This may be a problem if an attacker can listen to the data transferred between the ODBC driver and the server, because the password is readable to the attacker. Also, it is currently not possible to use encrypted SSL connections. Therefore the ODBC driver should not be used where security is important. - -@advanced_1208_p -# The first connection that opens a database using the PostgreSQL server needs to be an administrator user. Subsequent connections don't need to be opened by an administrator. - -@advanced_1209_h3 -#Using Microsoft Access - -@advanced_1210_p -# When using Microsoft Access to edit data in a linked H2 table, you may need to enable the following option: Tools - Options - Edit/Find - ODBC fields. - -@advanced_1211_h2 -#Using H2 in Microsoft .NET - -@advanced_1212_p -# The database can be used from Microsoft .NET even without using Java, by using IKVM.NET. You can access a H2 database on .NET using the JDBC API, or using the ADO.NET interface. - -@advanced_1213_h3 -#Using the ADO.NET API on .NET - -@advanced_1214_p -# An implementation of the ADO.NET interface is available in the open source project H2Sharp. - -@advanced_1215_h3 -#Using the JDBC API on .NET - -@advanced_1216_li -#Install the .NET Framework from Microsoft. Mono has not yet been tested. - -@advanced_1217_li -#Install IKVM.NET. - -@advanced_1218_li -#Copy the h2*.jar file to ikvm/bin - -@advanced_1219_li -#Run the H2 Console using: ikvm -jar h2*.jar - -@advanced_1220_li -#Convert the H2 Console to an .exe file using: ikvmc -target:winexe h2*.jar. You may ignore the warnings. - -@advanced_1221_li -#Create a .dll file using (change the version accordingly): ikvmc.exe -target:library -version:1.0.69.0 h2*.jar - -@advanced_1222_p -# If you want your C# application use H2, you need to add the h2.dll and the IKVM.OpenJDK.ClassLibrary.dll to your C# solution. Here some sample code: - -@advanced_1223_h2 -ACID - -@advanced_1224_p -# In the database world, ACID stands for: - -@advanced_1225_li -#Atomicity: transactions must be atomic, meaning either all tasks are performed or none. - -@advanced_1226_li -#Consistency: all operations must comply with the defined constraints. - -@advanced_1227_li -#Isolation: transactions must be isolated from each other. - -@advanced_1228_li -#Durability: committed transaction will not be lost. - -@advanced_1229_h3 -Atomicity (原�?性) - -@advanced_1230_p -# Transactions in this database are always atomic. - -@advanced_1231_h3 -Consistency (一貫性) - -@advanced_1232_p -# By default, this database is always in a consistent state. Referential integrity rules are enforced except when explicitly disabled. - -@advanced_1233_h3 -Isolation (独立性 / 分離性) - -@advanced_1234_p -# For H2, as with most other database systems, the default isolation level is 'read committed'. This provides better performance, but also means that transactions are not completely isolated. H2 supports the transaction isolation levels 'serializable', 'read committed', and 'read uncommitted'. - -@advanced_1235_h3 -Durability (永続性) - -@advanced_1236_p -# This database does not guarantee that all committed transactions survive a power failure. Tests show that all databases sometimes lose transactions on power failure (for details, see below). Where losing transactions is not acceptable, a laptop or UPS (uninterruptible power supply) should be used. If durability is required for all possible cases of hardware failure, clustering should be used, such as the H2 clustering mode. - -@advanced_1237_h2 -永続性�?題 - -@advanced_1238_p -# Complete durability means all committed transaction survive a power failure. Some databases claim they can guarantee durability, but such claims are wrong. A durability test was run against H2, HSQLDB, PostgreSQL, and Derby. All of those databases sometimes lose committed transactions. The test is included in the H2 download, see org.h2.test.poweroff.Test. - -@advanced_1239_h3 -永続性を実�?��?�る (�?��?��?�) 方法 - -@advanced_1240_p -# Making sure that committed transactions are not lost is more complicated than it seems first. To guarantee complete durability, a database must ensure that the log record is on the hard drive before the commit call returns. To do that, databases use different methods. One is to use the 'synchronous write' file access mode. In Java, RandomAccessFile supports the modes rws and rwd: - -@advanced_1241_code -#rwd - -@advanced_1242_li -#: every update to the file's content is written synchronously to the underlying storage device. - -@advanced_1243_code -#rws - -@advanced_1244_li -#: in addition to rwd, every update to the metadata is written synchronously. - -@advanced_1245_p -# A test (org.h2.test.poweroff.TestWrite) with one of those modes achieves around 50 thousand write operations per second. Even when the operating system write buffer is disabled, the write rate is around 50 thousand operations per second. This feature does not force changes to disk because it does not flush all buffers. The test updates the same byte in the file again and again. If the hard drive was able to write at this rate, then the disk would need to make at least 50 thousand revolutions per second, or 3 million RPM (revolutions per minute). There are no such hard drives. The hard drive used for the test is about 7200 RPM, or about 120 revolutions per second. There is an overhead, so the maximum write rate must be lower than that. - -@advanced_1246_p -# Calling fsync flushes the buffers. There are two ways to do that in Java: - -@advanced_1247_code -#FileDescriptor.sync() - -@advanced_1248_li -#. The documentation says that this forces all system buffers to synchronize with the underlying device. This method is supposed to return after all in-memory modified copies of buffers associated with this file descriptor have been written to the physical medium. - -@advanced_1249_code -#FileChannel.force() - -@advanced_1250_li -#. This method is supposed to force any updates to this channel's file to be written to the storage device that contains it. - -@advanced_1251_p -# By default, MySQL calls fsync for each commit. When using one of those methods, only around 60 write operations per second can be achieved, which is consistent with the RPM rate of the hard drive used. Unfortunately, even when calling FileDescriptor.sync() or FileChannel.force(), data is not always persisted to the hard drive, because most hard drives do not obey fsync(): see Your Hard Drive Lies to You. In Mac OS X, fsync does not flush hard drive buffers. See Bad fsync?. So the situation is confusing, and tests prove there is a problem. - -@advanced_1252_p -# Trying to flush hard drive buffers is hard, and if you do the performance is very bad. First you need to make sure that the hard drive actually flushes all buffers. Tests show that this can not be done in a reliable way. Then the maximum number of transactions is around 60 per second. Because of those reasons, the default behavior of H2 is to delay writing committed transactions. - -@advanced_1253_p -# In H2, after a power failure, a bit more than one second of committed transactions may be lost. To change the behavior, use SET WRITE_DELAY and CHECKPOINT SYNC. Most other databases support commit delay as well. In the performance comparison, commit delay was used for all databases that support it. - -@advanced_1254_h3 -永続性テストを実行�?�る - -@advanced_1255_p -# To test the durability / non-durability of this and other databases, you can use the test application in the package org.h2.test.poweroff. Two computers with network connection are required to run this test. One computer just listens, while the test application is run (and power is cut) on the other computer. The computer with the listener application opens a TCP/IP port and listens for an incoming connection. The second computer first connects to the listener, and then created the databases and starts inserting records. The connection is set to 'autocommit', which means after each inserted record a commit is performed automatically. Afterwards, the test computer notifies the listener that this record was inserted successfully. The listener computer displays the last inserted record number every 10 seconds. Now, switch off the power manually, then restart the computer, and run the application again. You will find out that in most cases, none of the databases contains all the records that the listener computer knows about. For details, please consult the source code of the listener and test application. - -@advanced_1256_h2 -リカ�?ーツールを使用�?�る - -@advanced_1257_p -# The Recover tool can be used to extract the contents of a database file, even if the database is corrupted. It also extracts the content of the transaction log and large objects (CLOB or BLOB). To run the tool, type on the command line: - -@advanced_1258_p -# For each database in the current directory, a text file will be created. This file contains raw insert statements (for the data) and data definition (DDL) statements to recreate the schema of the database. This file can be executed using the RunScript tool or a RUNSCRIPT FROM SQL statement. The script includes at least one CREATE USER statement. If you run the script against a database that was created with the same user, or if there are conflicting users, running the script will fail. Consider running the script against a database that was created with a user name that is not in the script. - -@advanced_1259_p -# The Recover tool creates a SQL script from database file. It also processes the transaction log. - -@advanced_1260_p -# To verify the database can recover at any time, append ;RECOVER_TEST=64 to the database URL in your test environment. This will simulate an application crash after each 64 writes to the database file. A log file named databaseName.h2.db.log is created that lists the operations. The recovery is tested using an in-memory file system, that means it may require a larger heap setting. - -@advanced_1261_h2 -ファイルロックプロトコル - -@advanced_1262_p -# Multiple concurrent connections to the same database are supported, however a database file can only be open for reading and writing (in embedded mode) by one process at the same time. Otherwise, the processes would overwrite each others data and corrupt the database file. To protect against this problem, whenever a database is opened, a lock file is created to signal other processes that the database is in use. If the database is closed, or if the process that opened the database stops normally, this lock file is deleted. - -@advanced_1263_p -# In special cases (if the process did not terminate normally, for example because there was a power failure), the lock file is not deleted by the process that created it. That means the existence of the lock file is not a safe protocol for file locking. However, this software uses a challenge-response protocol to protect the database files. There are two methods (algorithms) implemented to provide both security (that is, the same database files cannot be opened by two processes at the same time) and simplicity (that is, the lock file does not need to be deleted manually by the user). The two methods are 'file method' and 'socket methods'. - -@advanced_1264_p -# The file locking protocols (except the file locking method 'FS') have the following limitation: if a shared file system is used, and the machine with the lock owner is sent to sleep (standby or hibernate), another machine may take over. If the machine that originally held the lock wakes up, the database may become corrupt. If this situation can occur, the application must ensure the database is closed when the application is put to sleep. - -@advanced_1265_h3 -ファイルロックメソッド "File" - -@advanced_1266_p -# The default method for database file locking for version 1.3 and older is the 'File Method'. The algorithm is: - -@advanced_1267_li -#If the lock file does not exist, it is created (using the atomic operation File.createNewFile). Then, the process waits a little bit (20 ms) and checks the file again. If the file was changed during this time, the operation is aborted. This protects against a race condition when one process deletes the lock file just after another one create it, and a third process creates the file again. It does not occur if there are only two writers. - -@advanced_1268_li -# If the file can be created, a random number is inserted together with the locking method ('file'). Afterwards, a watchdog thread is started that checks regularly (every second once by default) if the file was deleted or modified by another (challenger) thread / process. Whenever that occurs, the file is overwritten with the old data. The watchdog thread runs with high priority so that a change to the lock file does not get through undetected even if the system is very busy. However, the watchdog thread does use very little resources (CPU time), because it waits most of the time. Also, the watchdog only reads from the hard disk and does not write to it. - -@advanced_1269_li -# If the lock file exists and was recently modified, the process waits for some time (up to two seconds). If it was still changed, an exception is thrown (database is locked). This is done to eliminate race conditions with many concurrent writers. Afterwards, the file is overwritten with a new version (challenge). After that, the thread waits for 2 seconds. If there is a watchdog thread protecting the file, he will overwrite the change and this process will fail to lock the database. However, if there is no watchdog thread, the lock file will still be as written by this thread. In this case, the file is deleted and atomically created again. The watchdog thread is started in this case and the file is locked. - -@advanced_1270_p -# This algorithm is tested with over 100 concurrent threads. In some cases, when there are many concurrent threads trying to lock the database, they block each other (meaning the file cannot be locked by any of them) for some time. However, the file never gets locked by two threads at the same time. However using that many concurrent threads / processes is not the common use case. Generally, an application should throw an error to the user if it cannot open a database, and not try again in a (fast) loop. - -@advanced_1271_h3 -ファイルロックメソッド "Socket" - -@advanced_1272_p -# There is a second locking mechanism implemented, but disabled by default. To use it, append ;FILE_LOCK=SOCKET to the database URL. The algorithm is: - -@advanced_1273_li -#If the lock file does not exist, it is created. Then a server socket is opened on a defined port, and kept open. The port and IP address of the process that opened the database is written into the lock file. - -@advanced_1274_li -#If the lock file exists, and the lock method is 'file', then the software switches to the 'file' method. - -@advanced_1275_li -#If the lock file exists, and the lock method is 'socket', then the process checks if the port is in use. If the original process is still running, the port is in use and this process throws an exception (database is in use). If the original process died (for example due to a power failure, or abnormal termination of the virtual machine), then the port was released. The new process deletes the lock file and starts again. - -@advanced_1276_p -# This method does not require a watchdog thread actively polling (reading) the same file every second. The problem with this method is, if the file is stored on a network share, two processes (running on different computers) could still open the same database files, if they do not have a direct TCP/IP connection. - -@advanced_1277_h3 -#File Locking Method 'FS' - -@advanced_1278_p -# This is the default mode for version 1.4 and newer. This database file locking mechanism uses native file system lock on the database file. No *.lock.db file is created in this case, and no background thread is started. This mechanism may not work on all systems as expected. Some systems allow to lock the same file multiple times within the same virtual machine, and on some system native file locking is not supported or files are not unlocked after a power failure. - -@advanced_1279_p -# To enable this feature, append ;FILE_LOCK=FS to the database URL. - -@advanced_1280_p -# This feature is relatively new. When using it for production, please ensure your system does in fact lock files as expected. - -@advanced_1281_h2 -パスワードを使用�?�る - -@advanced_1282_h3 -安全�?�パスワードを使用�?�る - -@advanced_1283_p -# Remember that weak passwords can be broken regardless of the encryption and security protocols. Don't use passwords that can be found in a dictionary. Appending numbers does not make passwords secure. A way to create good passwords that can be remembered is: take the first letters of a sentence, use upper and lower case characters, and creatively include special characters (but it's more important to use a long password than to use special characters). Example: - -@advanced_1284_code -#i'sE2rtPiUKtT - -@advanced_1285_p -# from the sentence it's easy to remember this password if you know the trick. - -@advanced_1286_h3 -パスワード: String�?�代�?り�?�Char Arraysを使用�?�る - -@advanced_1287_p -# Java strings are immutable objects and cannot be safely 'destroyed' by the application. After creating a string, it will remain in the main memory of the computer at least until it is garbage collected. The garbage collection cannot be controlled by the application, and even if it is garbage collected the data may still remain in memory. It might also be possible that the part of memory containing the password is swapped to disk (if not enough main memory is available), which is a problem if the attacker has access to the swap file of the operating system. - -@advanced_1288_p -# It is a good idea to use char arrays instead of strings for passwords. Char arrays can be cleared (filled with zeros) after use, and therefore the password will not be stored in the swap file. - -@advanced_1289_p -# This database supports using char arrays instead of string to pass user and file passwords. The following code can be used to do that: - -@advanced_1290_p -# This example requires Java 1.6. When using Swing, use javax.swing.JPasswordField. - -@advanced_1291_h3 -ユーザー�?? �?� (�?��?��?�) パスワードをURL�?��?証�?�る - -@advanced_1292_p -# Instead of passing the user name as a separate parameter as in Connection conn = DriverManager. getConnection("jdbc:h2:~/test", "sa", "123"); the user name (and/or password) can be supplied in the URL itself: Connection conn = DriverManager. getConnection("jdbc:h2:~/test;USER=sa;PASSWORD=123"); The settings in the URL override the settings passed as a separate parameter. - -@advanced_1293_h2 -#Password Hash - -@advanced_1294_p -# Sometimes the database password needs to be stored in a configuration file (for example in the web.xml file). In addition to connecting with the plain text password, this database supports connecting with the password hash. This means that only the hash of the password (and not the plain text password) needs to be stored in the configuration file. This will only protect others from reading or re-constructing the plain text password (even if they have access to the configuration file); it does not protect others from accessing the database using the password hash. - -@advanced_1295_p -# To connect using the password hash instead of plain text password, append ;PASSWORD_HASH=TRUE to the database URL, and replace the password with the password hash. To calculate the password hash from a plain text password, run the following command within the H2 Console tool: @password_hash <upperCaseUserName> <password>. As an example, if the user name is sa and the password is test, run the command @password_hash SA test. Then use the resulting password hash as you would use the plain text password. When using an encrypted database, then the user password and file password need to be hashed separately. To calculate the hash of the file password, run: @password_hash file <filePassword>. - -@advanced_1296_h2 -SQLインジェクション�?�対�?�る防御 - -@advanced_1297_h3 -SQLインジェクション�?��?� - -@advanced_1298_p -# This database engine provides a solution for the security vulnerability known as 'SQL Injection'. Here is a short description of what SQL injection means. Some applications build SQL statements with embedded user input such as: - -@advanced_1299_p -# If this mechanism is used anywhere in the application, and user input is not correctly filtered or encoded, it is possible for a user to inject SQL functionality or statements by using specially built input such as (in this example) this password: ' OR ''='. In this case the statement becomes: - -@advanced_1300_p -# Which is always true no matter what the password stored in the database is. For more information about SQL Injection, see Glossary and Links. - -@advanced_1301_h3 -リテラルを無効�?��?�る - -@advanced_1302_p -# SQL Injection is not possible if user input is not directly embedded in SQL statements. A simple solution for the problem above is to use a prepared statement: - -@advanced_1303_p -# This database provides a way to enforce usage of parameters when passing user input to the database. This is done by disabling embedded literals in SQL statements. To do this, execute the statement: - -@advanced_1304_p -# Afterwards, SQL statements with text and number literals are not allowed any more. That means, SQL statement of the form WHERE NAME='abc' or WHERE CustomerId=10 will fail. It is still possible to use prepared statements and parameters as described above. Also, it is still possible to generate SQL statements dynamically, and use the Statement API, as long as the SQL statements do not include literals. There is also a second mode where number literals are allowed: SET ALLOW_LITERALS NUMBERS. To allow all literals, execute SET ALLOW_LITERALS ALL (this is the default setting). Literals can only be enabled or disabled by an administrator. - -@advanced_1305_h3 -定数を使用�?�る - -@advanced_1306_p -# Disabling literals also means disabling hard-coded 'constant' literals. This database supports defining constants using the CREATE CONSTANT command. Constants can be defined only when literals are enabled, but used even when literals are disabled. To avoid name clashes with column names, constants can be defined in other schemas: - -@advanced_1307_p -# Even when literals are enabled, it is better to use constants instead of hard-coded number or text literals in queries or views. With constants, typos are found at compile time, the source code is easier to understand and change. - -@advanced_1308_h3 -ZERO() 関数を使用�?�る - -@advanced_1309_p -# It is not required to create a constant for the number 0 as there is already a built-in function ZERO(): - -@advanced_1310_h2 -#Protection against Remote Access - -@advanced_1311_p -# By default this database does not allow connections from other machines when starting the H2 Console, the TCP server, or the PG server. Remote access can be enabled using the command line options -webAllowOthers, -tcpAllowOthers, -pgAllowOthers. - -@advanced_1312_p -# If you enable remote access using -tcpAllowOthers or -pgAllowOthers, please also consider using the options -baseDir, -ifExists, so that remote users can not create new databases or access existing databases with weak passwords. When using the option -baseDir, only databases within that directory may be accessed. Ensure the existing accessible databases are protected using strong passwords. - -@advanced_1313_p -# If you enable remote access using -webAllowOthers, please ensure the web server can only be accessed from trusted networks. The options -baseDir, -ifExists don't protect access to the tools section, prevent remote shutdown of the web server, changes to the preferences, the saved connection settings, or access to other databases accessible from the system. - -@advanced_1314_h2 -#Restricting Class Loading and Usage - -@advanced_1315_p -# By default there is no restriction on loading classes and executing Java code for admins. That means an admin may call system functions such as System.setProperty by executing: - -@advanced_1316_p -# To restrict users (including admins) from loading classes and executing code, the list of allowed classes can be set in the system property h2.allowedClasses in the form of a comma separated list of classes or patterns (items ending with *). By default all classes are allowed. Example: - -@advanced_1317_p -# This mechanism is used for all user classes, including database event listeners, trigger classes, user-defined functions, user-defined aggregate functions, and JDBC driver classes (with the exception of the H2 driver) when using the H2 Console. - -@advanced_1318_h2 -セキュリティプロトコル - -@advanced_1319_p -# The following paragraphs document the security protocols used in this database. These descriptions are very technical and only intended for security experts that already know the underlying security primitives. - -@advanced_1320_h3 -ユーザーパスワード�?�暗�?�化 - -@advanced_1321_p -# When a user tries to connect to a database, the combination of user name, @, and password are hashed using SHA-256, and this hash value is transmitted to the database. This step does not protect against an attacker that re-uses the value if he is able to listen to the (unencrypted) transmission between the client and the server. But, the passwords are never transmitted as plain text, even when using an unencrypted connection between client and server. That means if a user reuses the same password for different things, this password is still protected up to some point. See also 'RFC 2617 - HTTP Authentication: Basic and Digest Access Authentication' for more information. - -@advanced_1322_p -# When a new database or user is created, a new random salt value is generated. The size of the salt is 64 bits. Using the random salt reduces the risk of an attacker pre-calculating hash values for many different (commonly used) passwords. - -@advanced_1323_p -# The combination of user-password hash value (see above) and salt is hashed using SHA-256. The resulting value is stored in the database. When a user tries to connect to the database, the database combines user-password hash value with the stored salt value and calculates the hash value. Other products use multiple iterations (hash the hash value again and again), but this is not done in this product to reduce the risk of denial of service attacks (where the attacker tries to connect with bogus passwords, and the server spends a lot of time calculating the hash value for each password). The reasoning is: if the attacker has access to the hashed passwords, he also has access to the data in plain text, and therefore does not need the password any more. If the data is protected by storing it on another computer and only accessible remotely, then the iteration count is not required at all. - -@advanced_1324_h3 -ファイル暗�?�化 - -@advanced_1325_p -# The database files can be encrypted using the AES-128 algorithm. - -@advanced_1326_p -# When a user tries to connect to an encrypted database, the combination of file@ and the file password is hashed using SHA-256. This hash value is transmitted to the server. - -@advanced_1327_p -# When a new database file is created, a new cryptographically secure random salt value is generated. The size of the salt is 64 bits. The combination of the file password hash and the salt value is hashed 1024 times using SHA-256. The reason for the iteration is to make it harder for an attacker to calculate hash values for common passwords. - -@advanced_1328_p -# The resulting hash value is used as the key for the block cipher algorithm. Then, an initialization vector (IV) key is calculated by hashing the key again using SHA-256. This is to make sure the IV is unknown to the attacker. The reason for using a secret IV is to protect against watermark attacks. - -@advanced_1329_p -# Before saving a block of data (each block is 8 bytes long), the following operations are executed: first, the IV is calculated by encrypting the block number with the IV key (using the same block cipher algorithm). This IV is combined with the plain text using XOR. The resulting data is encrypted using the AES-128 algorithm. - -@advanced_1330_p -# When decrypting, the operation is done in reverse. First, the block is decrypted using the key, and then the IV is calculated combined with the decrypted text using XOR. - -@advanced_1331_p -# Therefore, the block cipher mode of operation is CBC (cipher-block chaining), but each chain is only one block long. The advantage over the ECB (electronic codebook) mode is that patterns in the data are not revealed, and the advantage over multi block CBC is that flipped cipher text bits are not propagated to flipped plaintext bits in the next block. - -@advanced_1332_p -# Database encryption is meant for securing the database while it is not in use (stolen laptop and so on). It is not meant for cases where the attacker has access to files while the database is in use. When he has write access, he can for example replace pieces of files with pieces of older versions and manipulate data like this. - -@advanced_1333_p -# File encryption slows down the performance of the database engine. Compared to unencrypted mode, database operations take about 2.5 times longer using AES (embedded mode). - -@advanced_1334_h3 -#Wrong Password / User Name Delay - -@advanced_1335_p -# To protect against remote brute force password attacks, the delay after each unsuccessful login gets double as long. Use the system properties h2.delayWrongPasswordMin and h2.delayWrongPasswordMax to change the minimum (the default is 250 milliseconds) or maximum delay (the default is 4000 milliseconds, or 4 seconds). The delay only applies for those using the wrong password. Normally there is no delay for a user that knows the correct password, with one exception: after using the wrong password, there is a delay of up to (randomly distributed) the same delay as for a wrong password. This is to protect against parallel brute force attacks, so that an attacker needs to wait for the whole delay. Delays are synchronized. This is also required to protect against parallel attacks. - -@advanced_1336_p -# There is only one exception message for both wrong user and for wrong password, to make it harder to get the list of user names. It is not possible from the stack trace to see if the user name was wrong or the password. - -@advanced_1337_h3 -HTTPS 接続 - -@advanced_1338_p -# The web server supports HTTP and HTTPS connections using SSLServerSocket. There is a default self-certified certificate to support an easy starting point, but custom certificates are supported as well. - -@advanced_1339_h2 -#TLS Connections - -@advanced_1340_p -# Remote TLS connections are supported using the Java Secure Socket Extension (SSLServerSocket, SSLSocket). By default, anonymous TLS is enabled. - -@advanced_1341_p -# To use your own keystore, set the system properties javax.net.ssl.keyStore and javax.net.ssl.keyStorePassword before starting the H2 server and client. See also Customizing the Default Key and Trust Stores, Store Types, and Store Passwords for more information. - -@advanced_1342_p -# To disable anonymous TLS, set the system property h2.enableAnonymousTLS to false. - -@advanced_1343_h2 -汎用一�?識別�? (UUID) - -@advanced_1344_p -# This database supports UUIDs. Also supported is a function to create new UUIDs using a cryptographically strong pseudo random number generator. With random UUIDs, the chance of two having the same value can be calculated using the probability theory. See also 'Birthday Paradox'. Standardized randomly generated UUIDs have 122 random bits. 4 bits are used for the version (Randomly generated UUID), and 2 bits for the variant (Leach-Salz). This database supports generating such UUIDs using the built-in function RANDOM_UUID() or UUID(). Here is a small program to estimate the probability of having two identical UUIDs after generating a number of values: - -@advanced_1345_p -# Some values are: - -@advanced_1346_th -#Number of UUIs - -@advanced_1347_th -#Probability of Duplicates - -@advanced_1348_td -#2^36=68'719'476'736 - -@advanced_1349_td -#0.000'000'000'000'000'4 - -@advanced_1350_td -#2^41=2'199'023'255'552 - -@advanced_1351_td -#0.000'000'000'000'4 - -@advanced_1352_td -#2^46=70'368'744'177'664 - -@advanced_1353_td -#0.000'000'000'4 - -@advanced_1354_p -# To help non-mathematicians understand what those numbers mean, here a comparison: one's annual risk of being hit by a meteorite is estimated to be one chance in 17 billion, that means the probability is about 0.000'000'000'06. - -@advanced_1355_h2 -#Spatial Features - -@advanced_1356_p -# H2 supports the geometry data type and spatial indexes if the JTS Topology Suite is in the classpath. To run the H2 Console tool with the JTS tool, you need to download the JTS-CORE 1.14.0 jar file and place it in the h2 bin directory. Then edit the h2.sh file as follows: - -@advanced_1357_p -# Here is an example SQL script to create a table with a spatial column and index: - -@advanced_1358_p -# To query the table using geometry envelope intersection, use the operation &&, as in PostGIS: - -@advanced_1359_p -# You can verify that the spatial index is used using the "explain plan" feature: - -@advanced_1360_p -# For persistent databases, the spatial index is stored on disk; for in-memory databases, the index is kept in memory. - -@advanced_1361_h2 -#Recursive Queries - -@advanced_1362_p -# H2 has experimental support for recursive queries using so called "common table expressions" (CTE). Examples: - -@advanced_1363_p -# Limitations: Recursive queries need to be of the type UNION ALL, and the recursion needs to be on the second part of the query. No tables or views with the name of the table expression may exist. Different table expression names need to be used when using multiple distinct table expressions within the same transaction and for the same session. All columns of the table expression are of type VARCHAR, and may need to be cast to the required data type. Views with recursive queries are not supported. Subqueries and INSERT INTO ... FROM with recursive queries are not supported. Parameters are only supported within the last SELECT statement (a workaround is to use session variables like @start within the table expression). The syntax is: - -@advanced_1364_h2 -#Setting the Server Bind Address - -@advanced_1365_p -# Some settings of the database can be set on the command line using -DpropertyName=value. It is usually not required to change those settings manually. The settings are case sensitive. Example: - -@advanced_1366_p -# The current value of the settings can be read in the table INFORMATION_SCHEMA.SETTINGS. - -@advanced_1367_p -# For a complete list of settings, see SysProperties. - -@advanced_1368_h2 -#Setting the Server Bind Address - -@advanced_1369_p -# Usually server sockets accept connections on any/all local addresses. This may be a problem on multi-homed hosts. To bind only to one address, use the system property h2.bindAddress. This setting is used for both regular server sockets and for TLS server sockets. IPv4 and IPv6 address formats are supported. - -@advanced_1370_h2 -#Pluggable File System - -@advanced_1371_p -# This database supports a pluggable file system API. The file system implementation is selected using a file name prefix. Internally, the interfaces are very similar to the Java 7 NIO2 API, but do not (yet) use or require Java 7. The following file systems are included: - -@advanced_1372_code -#zip: - -@advanced_1373_li -# read-only zip-file based file system. Format: zip:/zipFileName!/fileName. - -@advanced_1374_code -#split: - -@advanced_1375_li -# file system that splits files in 1 GB files (stackable with other file systems). - -@advanced_1376_code -#nio: - -@advanced_1377_li -# file system that uses FileChannel instead of RandomAccessFile (faster in some operating systems). - -@advanced_1378_code -#nioMapped: - -@advanced_1379_li -# file system that uses memory mapped files (faster in some operating systems). Please note that there currently is a file size limitation of 2 GB when using this file system. To work around this limitation, combine it with the split file system: split:nioMapped:test. - -@advanced_1380_code -#memFS: - -@advanced_1381_li -# in-memory file system (slower than mem; experimental; mainly used for testing the database engine itself). - -@advanced_1382_code -#memLZF: - -@advanced_1383_li -# compressing in-memory file system (slower than memFS but uses less memory; experimental; mainly used for testing the database engine itself). - -@advanced_1384_code -#nioMemFS: - -@advanced_1385_li -# stores data outside of the VM's heap - useful for large memory DBs without incurring GC costs. - -@advanced_1386_code -#nioMemLZF: - -@advanced_1387_li -# stores compressed data outside of the VM's heap - useful for large memory DBs without incurring GC costs. Use "nioMemLZF:12:" to tweak the % of blocks that are stored uncompressed. If you size this to your working set correctly, compressed storage is roughly the same performance as uncompressed. The default value is 1%. - -@advanced_1388_p -# As an example, to use the the nio file system, use the following database URL: jdbc:h2:nio:~/test. - -@advanced_1389_p -# To register a new file system, extend the classes org.h2.store.fs.FilePath, FileBase, and call the method FilePath.register before using it. - -@advanced_1390_p -# For input streams (but not for random access files), URLs may be used in addition to the registered file systems. Example: jar:file:///c:/temp/example.zip!/org/example/nested.csv. To read a stream from the classpath, use the prefix classpath:, as in classpath:/org/h2/samples/newsfeed.sql. - -@advanced_1391_h2 -#Split File System - -@advanced_1392_p -# The file system prefix split: is used to split logical files into multiple physical files, for example so that a database can get larger than the maximum file system size of the operating system. If the logical file is larger than the maximum file size, then the file is split as follows: - -@advanced_1393_code -#<fileName> - -@advanced_1394_li -# (first block, is always created) - -@advanced_1395_code -#<fileName>.1.part - -@advanced_1396_li -# (second block) - -@advanced_1397_p -# More physical files (*.2.part, *.3.part) are automatically created / deleted if needed. The maximum physical file size of a block is 2^30 bytes, which is also called 1 GiB or 1 GB. However this can be changed if required, by specifying the block size in the file name. The file name format is: split:<x>:<fileName> where the file size per block is 2^x. For 1 MiB block sizes, use x = 20 (because 2^20 is 1 MiB). The following file name means the logical file is split into 1 MiB blocks: split:20:test.h2.db. An example database URL for this case is jdbc:h2:split:20:~/test. - -@advanced_1398_h2 -データベース�?�アップグレー - -@advanced_1399_p -# In version 1.2, H2 introduced a new file store implementation which is incompatible to the one used in versions < 1.2. To automatically convert databases to the new file store, it is necessary to include an additional jar file. The file can be found at http://h2database.com/h2mig_pagestore_addon.jar . If this file is in the classpath, every connect to an older database will result in a conversion process. - -@advanced_1400_p -# The conversion itself is done internally via 'script to' and 'runscript from'. After the conversion process, the files will be renamed from - -@advanced_1401_code -#dbName.data.db - -@advanced_1402_li -# to dbName.data.db.backup - -@advanced_1403_code -#dbName.index.db - -@advanced_1404_li -# to dbName.index.db.backup - -@advanced_1405_p -# by default. Also, the temporary script will be written to the database directory instead of a temporary directory. Both defaults can be customized via - -@advanced_1406_code -#org.h2.upgrade.DbUpgrade.setDeleteOldDb(boolean) - -@advanced_1407_code -#org.h2.upgrade.DbUpgrade.setScriptInTmpDir(boolean) - -@advanced_1408_p -# prior opening a database connection. - -@advanced_1409_p -# Since version 1.2.140 it is possible to let the old h2 classes (v 1.2.128) connect to the database. The automatic upgrade .jar file must be present, and the URL must start with jdbc:h2v1_1: (the JDBC driver class is org.h2.upgrade.v1_1.Driver). If the database should automatically connect using the old version if a database with the old format exists (without upgrade), and use the new version otherwise, then append ;NO_UPGRADE=TRUE to the database URL. Please note the old driver did not process the system property "h2.baseDir" correctly, so that using this setting is not supported when upgrading. - -@advanced_1410_h2 -#Java Objects Serialization - -@advanced_1411_p -# Java objects serialization is enabled by default for columns of type OTHER, using standard Java serialization/deserialization semantics. - -@advanced_1412_p -# To disable this feature set the system property h2.serializeJavaObject=false (default: true). - -@advanced_1413_p -# Serialization and deserialization of java objects is customizable both at system level and at database level providing a JavaObjectSerializer implementation: - -@advanced_1414_li -# At system level set the system property h2.javaObjectSerializer with the Fully Qualified Name of the JavaObjectSerializer interface implementation. It will be used over the entire JVM session to (de)serialize java objects being stored in column of type OTHER. Example h2.javaObjectSerializer=com.acme.SerializerClassName. - -@advanced_1415_li -# At database level execute the SQL statement SET JAVA_OBJECT_SERIALIZER 'com.acme.SerializerClassName' or append ;JAVA_OBJECT_SERIALIZER='com.acme.SerializerClassName' to the database URL: jdbc:h2:~/test;JAVA_OBJECT_SERIALIZER='com.acme.SerializerClassName'. - -@advanced_1416_p -# Please note that this SQL statement can only be executed before any tables are defined. - -@advanced_1417_h2 -#Custom Data Types Handler API - -@advanced_1418_p -# It is possible to extend the type system of the database by providing your own implementation of minimal required API basically consisting of type identification and conversion routines. - -@advanced_1419_p -# In order to enable this feature, set the system property h2.customDataTypesHandler (default: null) to the fully qualified name of the class providing CustomDataTypesHandler interface implementation. - -@advanced_1420_p -# The instance of that class will be created by H2 and used to: - -@advanced_1421_li -#resolve the names and identifiers of extrinsic data types. - -@advanced_1422_li -#convert values of extrinsic data types to and from values of built-in types. - -@advanced_1423_li -#provide order of the data types. - -@advanced_1424_p -#This is a system-level setting, i.e. affects all the databases. - -@advanced_1425_b -#Note: - -@advanced_1426_p -#Please keep in mind that this feature may not possibly provide the same ABI stability level as other features as it exposes many of the H2 internals. You may be required to update your code occasionally due to internal changes in H2 if you are going to use this feature. - -@advanced_1427_h2 -#Limits and Limitations - -@advanced_1428_p -# This database has the following known limitations: - -@advanced_1429_li -#Database file size limit: 4 TB (using the default page size of 2 KB) or higher (when using a larger page size). This limit is including CLOB and BLOB data. - -@advanced_1430_li -#The maximum file size for FAT or FAT32 file systems is 4 GB. That means when using FAT or FAT32, the limit is 4 GB for the data. This is the limitation of the file system. The database does provide a workaround for this problem, it is to use the file name prefix split:. In that case files are split into files of 1 GB by default. An example database URL is: jdbc:h2:split:~/test. - -@advanced_1431_li -#The maximum number of rows per table is 2^64. - -@advanced_1432_li -#The maximum number of open transactions is 65535. - -@advanced_1433_li -#Main memory requirements: The larger the database, the more main memory is required. With the current storage mechanism (the page store), the minimum main memory required is around 1 MB for each 8 GB database file size. - -@advanced_1434_li -#Limit on the complexity of SQL statements. Statements of the following form will result in a stack overflow exception: - -@advanced_1435_li -#There is no limit for the following entities, except the memory and storage capacity: maximum identifier length (table name, column name, and so on); maximum number of tables, columns, indexes, triggers, and other database objects; maximum statement length, number of parameters per statement, tables per statement, expressions in order by, group by, having, and so on; maximum rows per query; maximum columns per table, columns per index, indexes per table, lob columns per table, and so on; maximum row length, index row length, select row length; maximum length of a varchar column, decimal column, literal in a statement. - -@advanced_1436_li -#Querying from the metadata tables is slow if there are many tables (thousands). - -@advanced_1437_li -#For limitations on data types, see the documentation of the respective Java data type or the data type documentation of this database. - -@advanced_1438_h2 -用語集�?�リンク - -@advanced_1439_th -用語 - -@advanced_1440_th -説明 - -@advanced_1441_td -AES-128 - -@advanced_1442_td -#A block encryption algorithm. See also: Wikipedia: AES - -@advanced_1443_td -Birthday Paradox - -@advanced_1444_td -#Describes the higher than expected probability that two persons in a room have the same birthday. Also valid for randomly generated UUIDs. See also: Wikipedia: Birthday Paradox - -@advanced_1445_td -Digest - -@advanced_1446_td -#Protocol to protect a password (but not to protect data). See also: RFC 2617: HTTP Digest Access Authentication - -@advanced_1447_td -GCJ - -@advanced_1448_td -#Compiler for Java. GNU Compiler for the Java and NativeJ (commercial) - -@advanced_1449_td -HTTPS - -@advanced_1450_td -#A protocol to provide security to HTTP connections. See also: RFC 2818: HTTP Over TLS - -@advanced_1451_td -Modes of Operation - -@advanced_1452_a -#Wikipedia: Block cipher modes of operation - -@advanced_1453_td -Salt - -@advanced_1454_td -#Random number to increase the security of passwords. See also: Wikipedia: Key derivation function - -@advanced_1455_td -SHA-256 - -@advanced_1456_td -#A cryptographic one-way hash function. See also: Wikipedia: SHA hash functions - -@advanced_1457_td -SQLインジェクション - -@advanced_1458_td -#A security vulnerability where an application embeds SQL statements or expressions in user input. See also: Wikipedia: SQL Injection - -@advanced_1459_td -Watermark Attack (�?�?��?�攻撃) - -@advanced_1460_td -#Security problem of certain encryption programs where the existence of certain data can be proven without decrypting. For more information, search in the internet for 'watermark attack cryptoloop' - -@advanced_1461_td -SSL/TLS - -@advanced_1462_td -#Secure Sockets Layer / Transport Layer Security. See also: Java Secure Socket Extension (JSSE) - -@architecture_1000_h1 -#Architecture - -@architecture_1001_a -# Introduction - -@architecture_1002_a -# Top-down overview - -@architecture_1003_a -# JDBC driver - -@architecture_1004_a -# Connection/session management - -@architecture_1005_a -# Command execution and planning - -@architecture_1006_a -# Table/index/constraints - -@architecture_1007_a -# Undo log, redo log, and transactions layer - -@architecture_1008_a -# B-tree engine and page-based storage allocation - -@architecture_1009_a -# Filesystem abstraction - -@architecture_1010_h2 -#Introduction - -@architecture_1011_p -# H2 implements an embedded and standalone ANSI-SQL89 compliant SQL engine on top of a B-tree based disk store. - -@architecture_1012_p -# As of October 2013, Thomas is still working on our next-generation storage engine called MVStore. This will in time replace the B-tree based storage engine. - -@architecture_1013_h2 -#Top-down Overview - -@architecture_1014_p -# Working from the top down, the layers look like this: - -@architecture_1015_li -#JDBC driver. - -@architecture_1016_li -#Connection/session management. - -@architecture_1017_li -#SQL Parser. - -@architecture_1018_li -#Command execution and planning. - -@architecture_1019_li -#Table/Index/Constraints. - -@architecture_1020_li -#Undo log, redo log, and transactions layer. - -@architecture_1021_li -#B-tree engine and page-based storage allocation. - -@architecture_1022_li -#Filesystem abstraction. - -@architecture_1023_h2 -#JDBC Driver - -@architecture_1024_p -# The JDBC driver implementation lives in org.h2.jdbc, org.h2.jdbcx - -@architecture_1025_h2 -#Connection/session management - -@architecture_1026_p -# The primary classes of interest are: - -@architecture_1027_th -#Package - -@architecture_1028_th -説明 - -@architecture_1029_td -#org.h2.engine.Database - -@architecture_1030_td -#the root/global class - -@architecture_1031_td -#org.h2.engine.SessionInterface - -@architecture_1032_td -#abstracts over the differences between embedded and remote sessions - -@architecture_1033_td -#org.h2.engine.Session - -@architecture_1034_td -#local/embedded session - -@architecture_1035_td -#org.h2.engine.SessionRemote - -@architecture_1036_td -#remote session - -@architecture_1037_h2 -#Parser - -@architecture_1038_p -# The parser lives in org.h2.command.Parser. It uses a straightforward recursive-descent design. - -@architecture_1039_p -# See Wikipedia Recursive-descent parser page. - -@architecture_1040_h2 -#Command execution and planning - -@architecture_1041_p -# Unlike other databases, we do not have an intermediate step where we generate some kind of IR (intermediate representation) of the query. The parser class directly generates a command execution object. Then we run some optimisation steps over the command to possibly generate a more efficient command. The primary packages of interest are: - -@architecture_1042_th -#Package - -@architecture_1043_th -説明 - -@architecture_1044_td -#org.h2.command.ddl - -@architecture_1045_td -#Commands that modify schema data structures - -@architecture_1046_td -#org.h2.command.dml - -@architecture_1047_td -#Commands that modify data - -@architecture_1048_h2 -#Table/Index/Constraints - -@architecture_1049_p -# One thing to note here is that indexes are simply stored as special kinds of tables. - -@architecture_1050_p -# The primary packages of interest are: - -@architecture_1051_th -#Package - -@architecture_1052_th -説明 - -@architecture_1053_td -#org.h2.table - -@architecture_1054_td -#Implementations of different kinds of tables - -@architecture_1055_td -#org.h2.index - -@architecture_1056_td -#Implementations of different kinds of indices - -@architecture_1057_h2 -#Undo log, redo log, and transactions layer - -@architecture_1058_p -# We have a transaction log, which is shared among all sessions. See also http://en.wikipedia.org/wiki/Transaction_log http://h2database.com/html/grammar.html#set_log - -@architecture_1059_p -# We also have an undo log, which is per session, to undo an operation (an update that fails for example) and to rollback a transaction. Theoretically, the transaction log could be used, but for simplicity, H2 currently uses it's own "list of operations" (usually in-memory). - -@architecture_1060_p -# With the MVStore, this is no longer needed (just the transaction log). - -@architecture_1061_h2 -#B-tree engine and page-based storage allocation. - -@architecture_1062_p -# The primary package of interest is org.h2.store. - -@architecture_1063_p -# This implements a storage mechanism which allocates pages of storage (typically 2k in size) and also implements a b-tree over those pages to allow fast retrieval and update. - -@architecture_1064_h2 -#Filesystem abstraction. - -@architecture_1065_p -# The primary class of interest is org.h2.store.FileStore. - -@architecture_1066_p -# This implements an abstraction of a random-access file. This allows the higher layers to treat in-memory vs. on-disk vs. zip-file databases the same. - -@build_1000_h1 -ビルド - -@build_1001_a -# Portability - -@build_1002_a -# Environment - -@build_1003_a -# Building the Software - -@build_1004_a -# Build Targets - -@build_1005_a -# Using Maven 2 - -@build_1006_a -# Using Eclipse - -@build_1007_a -# Translating - -@build_1008_a -# Submitting Source Code Changes - -@build_1009_a -# Reporting Problems or Requests - -@build_1010_a -# Automated Build - -@build_1011_a -# Generating Railroad Diagrams - -@build_1012_h2 -�?ータビリティ - -@build_1013_p -# This database is written in Java and therefore works on many platforms. It can also be compiled to a native executable using GCJ. - -@build_1014_h2 -環境 - -@build_1015_p -# To run this database, a Java Runtime Environment (JRE) version 1.7 or higher is required. - -@build_1016_p -# To create the database executables, the following software stack was used. To use this database, it is not required to install this software however. - -@build_1017_li -#Mac OS X and Windows - -@build_1018_a -#Oracle JDK Version 1.7 - -@build_1019_a -#Eclipse - -@build_1020_li -#Eclipse Plugins: Subclipse, Eclipse Checkstyle Plug-in, EclEmma Java Code Coverage - -@build_1021_a -#Emma Java Code Coverage - -@build_1022_a -#Mozilla Firefox - -@build_1023_a -#OpenOffice - -@build_1024_a -#NSIS - -@build_1025_li -# (Nullsoft Scriptable Install System) - -@build_1026_a -#Maven - -@build_1027_h2 -ソフトウェア�?�ビルド - -@build_1028_p -# You need to install a JDK, for example the Oracle JDK version 1.7 or 1.8. Ensure that Java binary directory is included in the PATH environment variable, and that the environment variable JAVA_HOME points to your Java installation. On the command line, go to the directory h2 and execute the following command: - -@build_1029_p -# For Linux and OS X, use ./build.sh instead of build. - -@build_1030_p -# You will get a list of targets. If you want to build the jar file, execute (Windows): - -@build_1031_p -# To run the build tool in shell mode, use the command line option - as in ./build.sh -. - -@build_1032_h3 -#Switching the Source Code - -@build_1033_p -# The source code uses Java 1.7 features. To switch the source code to the installed version of Java, run: - -@build_1034_h2 -#Build Targets - -@build_1035_p -# The build system can generate smaller jar files as well. The following targets are currently supported: - -@build_1036_code -#jarClient - -@build_1037_li -# creates the file h2client.jar. This only contains the JDBC client. - -@build_1038_code -#jarSmall - -@build_1039_li -# creates the file h2small.jar. This only contains the embedded database. Debug information is disabled. - -@build_1040_code -#jarJaqu - -@build_1041_li -# creates the file h2jaqu.jar. This only contains the JaQu (Java Query) implementation. All other jar files do not include JaQu. - -@build_1042_code -#javadocImpl - -@build_1043_li -# creates the Javadocs of the implementation. - -@build_1044_p -# To create the file h2client.jar, go to the directory h2 and execute the following command: - -@build_1045_h3 -#Using Apache Lucene - -@build_1046_p -# Apache Lucene 3.6.2 is used for testing. Newer versions may work, however they are not tested. - -@build_1047_h2 -Maven 2 �?�利用 - -@build_1048_h3 -Centralリ�?ジトリ�?�利用 - -@build_1049_p -# You can include the database in your Maven 2 project as a dependency. Example: - -@build_1050_p -# New versions of this database are first uploaded to http://hsql.sourceforge.net/m2-repo/ and then automatically synchronized with the main Maven repository; however after a new release it may take a few hours before they are available there. - -@build_1051_h3 -#Maven Plugin to Start and Stop the TCP Server - -@build_1052_p -# A Maven plugin to start and stop the H2 TCP server is available from Laird Nelson at GitHub. To start the H2 server, use: - -@build_1053_p -# To stop the H2 server, use: - -@build_1054_h3 -スナップショット�?ージョン�?�利用 - -@build_1055_p -# To build a h2-*-SNAPSHOT.jar file and upload it the to the local Maven 2 repository, execute the following command: - -@build_1056_p -# Afterwards, you can include the database in your Maven 2 project as a dependency: - -@build_1057_h2 -#Using Eclipse - -@build_1058_p -# To create an Eclipse project for H2, use the following steps: - -@build_1059_li -#Install Git and Eclipse. - -@build_1060_li -#Get the H2 source code from Github: - -@build_1061_code -#git clone https://github.com/h2database/h2database - -@build_1062_li -#Download all dependencies: - -@build_1063_code -#build.bat download - -@build_1064_li -#(Windows) - -@build_1065_code -#./build.sh download - -@build_1066_li -#(otherwise) - -@build_1067_li -#In Eclipse, create a new Java project from existing source code: File, New, Project, Java Project, Create project from existing source. - -@build_1068_li -#Select the h2 folder, click Next and Finish. - -@build_1069_li -#To resolve com.sun.javadoc import statements, you may need to manually add the file <java.home>/../lib/tools.jar to the build path. - -@build_1070_h2 -#Translating - -@build_1071_p -# The translation of this software is split into the following parts: - -@build_1072_li -#H2 Console: src/main/org/h2/server/web/res/_text_*.prop - -@build_1073_li -#Error messages: src/main/org/h2/res/_messages_*.prop - -@build_1074_p -# To translate the H2 Console, start it and select Preferences / Translate. After you are done, send the translated *.prop file to the Google Group. The web site is currently translated using Google. - -@build_1075_h2 -#Submitting Source Code Changes - -@build_1076_p -# If you'd like to contribute bug fixes or new features, please consider the following guidelines to simplify merging them: - -@build_1077_li -#Only use Java 7 features (do not use Java 8/9/etc) (see Environment). - -@build_1078_li -#Follow the coding style used in the project, and use Checkstyle (see above) to verify. For example, do not use tabs (use spaces instead). The checkstyle configuration is in src/installer/checkstyle.xml. - -@build_1079_li -#A template of the Eclipse settings are in src/installer/eclipse.settings/*. If you want to use them, you need to copy them to the .settings directory. The formatting options (eclipseCodeStyle) are also included. - -@build_1080_li -#Please provide test cases and integrate them into the test suite. For Java level tests, see src/test/org/h2/test/TestAll.java. For SQL level tests, see src/test/org/h2/test/test.in.txt or testSimple.in.txt. - -@build_1081_li -#The test cases should cover at least 90% of the changed and new code; use a code coverage tool to verify that (see above). or use the build target coverage. - -@build_1082_li -#Verify that you did not break other features: run the test cases by executing build test. - -@build_1083_li -#Provide end user documentation if required (src/docsrc/html/*). - -@build_1084_li -#Document grammar changes in src/docsrc/help/help.csv - -@build_1085_li -#Provide a change log entry (src/docsrc/html/changelog.html). - -@build_1086_li -#Verify the spelling using build spellcheck. If required add the new words to src/tools/org/h2/build/doc/dictionary.txt. - -@build_1087_li -#Run src/installer/buildRelease to find and fix formatting errors. - -@build_1088_li -#Verify the formatting using build docs and build javadoc. - -@build_1089_li -#Submit changes using GitHub's "pull requests". You'll require a free GitHub account. If you are not familiar with pull requests, please read GitHub's Using pull requests page. - -@build_1090_p -# For legal reasons, patches need to be public in the form of an issue report or attachment or in the form of an email to the group. Significant contributions need to include the following statement: - -@build_1091_p -# "I wrote the code, it's mine, and I'm contributing it to H2 for distribution multiple-licensed under the MPL 2.0, and the EPL 1.0 (http://h2database.com/html/license.html)." - -@build_1092_h2 -#Reporting Problems or Requests - -@build_1093_p -# Please consider the following checklist if you have a question, want to report a problem, or if you have a feature request: - -@build_1094_li -#For bug reports, please provide a short, self contained, correct (compilable), example of the problem. - -@build_1095_li -#Feature requests are always welcome, even if the feature is already on the roadmap. Your mail will help prioritize feature requests. If you urgently need a feature, consider providing a patch. - -@build_1096_li -#Before posting problems, check the FAQ and do a Google search. - -@build_1097_li -#When got an unexpected exception, please try the Error Analyzer tool. If this doesn't help, please report the problem, including the complete error message and stack trace, and the root cause stack trace(s). - -@build_1098_li -#When sending source code, please use a public web clipboard such as Pastebin, Cl1p, or Mystic Paste to avoid formatting problems. Please keep test cases as simple and short as possible, but so that the problem can still be reproduced. As a template, use: HelloWorld.java. Method that simply call other methods should be avoided, as well as unnecessary exception handling. Please use the JDBC API and no external tools or libraries. The test should include all required initialization code, and should be started with the main method. - -@build_1099_li -#For large attachments, use a public temporary storage such as Rapidshare. - -@build_1100_li -#Google Group versus issue tracking: Use the Google Group for questions or if you are not sure it's a bug. If you are sure it's a bug, you can create an issue, but you don't need to (sending an email to the group is enough). Please note that only few people monitor the issue tracking system. - -@build_1101_li -#For out-of-memory problems, please analyze the problem yourself first, for example using the command line option -XX:+HeapDumpOnOutOfMemoryError (to create a heap dump file on out of memory) and a memory analysis tool such as the Eclipse Memory Analyzer (MAT). - -@build_1102_li -#It may take a few days to get an answers. Please do not double post. - -@build_1103_h2 -#Automated Build - -@build_1104_p -# This build process is automated and runs regularly. The build process includes running the tests and code coverage, using the command line ./build.sh clean jar coverage -Dh2.ftpPassword=... uploadBuild. The last results are available here: - -@build_1105_a -#Test Output - -@build_1106_a -#Code Coverage Summary - -@build_1107_a -#Code Coverage Details (download, 1.3 MB) - -@build_1108_a -#Build Newsfeed - -@build_1109_h2 -#Generating Railroad Diagrams - -@build_1110_p -# The railroad diagrams of the SQL grammar are HTML, formatted as nested tables. The diagrams are generated as follows: - -@build_1111_li -#The BNF parser (org.h2.bnf.Bnf) reads and parses the BNF from the file help.csv. - -@build_1112_li -#The page parser (org.h2.server.web.PageParser) reads the template HTML file and fills in the diagrams. - -@build_1113_li -#The rail images (one straight, four junctions, two turns) are generated using a simple Java application. - -@build_1114_p -# To generate railroad diagrams for other grammars, see the package org.h2.jcr. This package is used to generate the SQL-2 railroad diagrams for the JCR 2.0 specification. - -@changelog_1000_h1 -変更履歴 - -@changelog_1001_h2 -#Next Version (unreleased) - -@changelog_1002_li -#Issue #654: List ENUM type values in INFORMATION_SCHEMA.COLUMNS - -@changelog_1003_li -#Issue #668: Fail of an update command on large table with ENUM column - -@changelog_1004_li -#Issue #662: column called CONSTRAINT is not properly escaped when storing to metadata - -@changelog_1005_li -#Issue #660: Outdated java version mentioned on http://h2database.com/html/build.html#providing_patches - -@changelog_1006_li -#Issue #643: H2 doesn't use index when I use IN and EQUAL in one query - -@changelog_1007_li -#Reset transaction start timestamp on ROLLBACK - -@changelog_1008_li -#Issue #632: CREATE OR REPLACE VIEW creates incorrect columns names - -@changelog_1009_li -#Issue #630: Integer overflow in CacheLRU can cause unrestricted cache growth - -@changelog_1010_li -#Issue #497: Fix TO_DATE in cases of 'inline' text. E.g. the "T" and "Z" in to_date('2017-04-21T00:00:00Z', 'YYYY-MM-DD"T"HH24:MI:SS"Z"') - -@changelog_1011_li -#Fix bug in MySQL/ORACLE-syntax silently corrupting the modified column in cases of setting the 'NULL'- or 'NOT NULL'-constraint. E.g. alter table T modify C NULL; - -@changelog_1012_li -#Issue #570: MySQL compatibility for ALTER TABLE .. DROP INDEX - -@changelog_1013_li -#Issue #537: Include the COLUMN name in message "Numeric value out of range" - -@changelog_1014_li -#Issue #600: ROW_NUMBER() behaviour change in H2 1.4.195 - -@changelog_1015_li -#Fix a bunch of race conditions found by vmlens.com, thank you to vmlens for giving us a license. - -@changelog_1016_li -#PR #597: Support more types in getObject - -@changelog_1017_li -#Issue #591: Generated SQL from WITH-CTEs does not include a table identifier - -@changelog_1018_li -#PR #593: Make it possible to create a cluster without using temporary files. - -@changelog_1019_li -#PR #592: "Connection is broken: "unexpected status 16777216" [90067-192]" message when using older h2 releases as client - -@changelog_1020_li -#Issue #585: MySQL mode DELETE statements compatibility - -@changelog_1021_li -#PR #586: remove extra tx preparation - -@changelog_1022_li -#PR #568: Implement MetaData.getColumns() for synonyms. - -@changelog_1023_li -#Issue #581: org.h2.tools.RunScript assumes -script parameter is part of protocol - -@changelog_1024_li -#Fix a deadlock in the TransactionStore - -@changelog_1025_li -#PR #579: Disallow BLOB type in PostgreSQL mode - -@changelog_1026_li -#Issue #576: Common Table Expression (CTE): WITH supports INSERT, UPDATE, MERGE, DELETE, CREATE TABLE ... - -@changelog_1027_li -#Issue #493: Query with distinct/limit/offset subquery returns unexpected rows - -@changelog_1028_li -#Issue #575: Support for full text search in multithreaded mode - -@changelog_1029_li -#Issue #569: ClassCastException when filtering on ENUM value in WHERE clause - -@changelog_1030_li -#Issue #539: Allow override of builtin functions/aliases - -@changelog_1031_li -#Issue #535: Allow explicit paths on Windows without drive letter - -@changelog_1032_li -#Issue #549: Removed UNION ALL requirements for CTE - -@changelog_1033_li -#Issue #548: Table synonym support - -@changelog_1034_li -#Issue #531: Rollback and delayed meta save. - -@changelog_1035_li -#Issue #515: "Unique index or primary key violation" in TestMvccMultiThreaded - -@changelog_1036_li -#Issue #458: TIMESTAMPDIFF() test failing. Handling of timestamp literals. - -@changelog_1037_li -#PR #546: Fixes the missing file tree.js in the web console - -@changelog_1038_li -#Issue #543: Prepare statement with regexp will not refresh parameter after metadata change - -@changelog_1039_li -#PR #536: Support TIMESTAMP_WITH_TIMEZONE 2014 JDBC type - -@changelog_1040_li -#Fix bug in parsing ANALYZE TABLE xxx SAMPLE_SIZE yyy - -@changelog_1041_li -#Add padding for CHAR(N) values in PostgreSQL mode - -@changelog_1042_li -#Issue #89: Add DB2 timestamp format compatibility - -@changelog_1043_h2 -#Version 1.4.196 (2017-06-10) - -@changelog_1044_li -#Issue#479 Allow non-recursive CTEs (WITH statements), patch from stumc - -@changelog_1045_li -#Fix startup issue when using "CHECK" as a column name. - -@changelog_1046_li -#Issue #423: ANALYZE performed multiple times on one table during execution of the same statement. - -@changelog_1047_li -#Issue #426: Support ANALYZE TABLE statement - -@changelog_1048_li -#Issue #438: Fix slow logging via SLF4J (TRACE_LEVEL_FILE=4). - -@changelog_1049_li -#Issue #472: Support CREATE SEQUENCE ... ORDER as a NOOP for Oracle compatibility - -@changelog_1050_li -#Issue #479: Allow non-recursive Common Table Expressions (CTE) - -@changelog_1051_li -#On Mac OS X, with IPv6 and no network connection, the Console tool was not working as expected. - -@changelog_1052_h2 -#Version 1.4.195 (2017-04-23) - -@changelog_1053_li -#Lazy query execution support. - -@changelog_1054_li -#Added API for handling custom data types (System property "h2.customDataTypesHandler", API org.h2.api.CustomDataTypesHandler). - -@changelog_1055_li -#Added support for invisible columns. - -@changelog_1056_li -#Added an ENUM data type, with syntax similar to that of MySQL. - -@changelog_1057_li -#MVStore: for object data types, the cache size memory estimation was sometimes far off in a read-only scenario. This could result in inefficient cache usage. - -@changelog_1058_h2 -#Version 1.4.194 (2017-03-10) - -@changelog_1059_li -#Issue #453: MVStore setCacheSize() should also limit the cacheChunkRef. - -@changelog_1060_li -#Issue #448: Newly added TO_DATE and TO_TIMESTAMP functions have wrong datatype. - -@changelog_1061_li -#The "nioMemLZF" filesystem now supports an extra option "nioMemLZF:12:" to tweak the size of the compress later cache. - -@changelog_1062_li -#Various multi-threading fixes and optimisations to the "nioMemLZF" filesystem. - -@changelog_1063_strong -#[API CHANGE] #439: the JDBC type of TIMESTAMP WITH TIME ZONE changed from Types.OTHER (1111) to Types.TIMESTAMP_WITH_TIMEZONE (2014) - -@changelog_1064_li -#PR #1637: Remove explicit unboxing - -@changelog_1065_li -#PR #1635: Optimize UUID to VARCHAR conversion and use correct time check in Engine.openSession() - -@changelog_1066_li -#PR #418, Implement Connection#createArrayOf and PreparedStatement#setArray. - -@changelog_1067_li -#PR #427, Add MySQL compatibility functions UNIX_TIMESTAMP, FROM_UNIXTIME and DATE. - -@changelog_1068_li -#PR #1630: fix duplicate words typos in comments and javadoc - -@changelog_1069_li -#Fixed bug in metadata locking, obscure combination of DDL and SELECT SEQUENCE.NEXTVAL required. - -@changelog_1070_li -#Added index hints: SELECT * FROM TEST USE INDEX (idx1, idx2). - -@changelog_1071_li -#Add a test case to ensure that spatial index is used with and order by command by Fortin N. - -@changelog_1072_li -#Fix multi-threaded mode update exception "NullPointerException", test case by Anatolii K. - -@changelog_1073_li -#Fix multi-threaded mode insert exception "Unique index or primary key violation", test case by Anatolii K. - -@changelog_1074_li -#Implement ILIKE operator for case-insensitive matching. - -@changelog_1075_li -#Optimise LIKE queries for the common cases of '%Foo' and '%Foo%'. - -@changelog_1076_li -#Issue #387: H2 MSSQL Compatibility Mode - Support uniqueidentifier. - -@changelog_1077_li -#Issue #401: NPE in "SELECT DISTINCT * ORDER BY". - -@changelog_1078_li -#Added BITGET function. - -@changelog_1079_li -#Fixed bug in FilePathRetryOnInterrupt that caused infinite loop. - -@changelog_1080_li -#PR #389, Handle LocalTime with nanosecond resolution, patch by katzyn. - -@changelog_1081_li -#PR #382, Recover for "page store" H2 breaks LOBs consistency, patch by vitalus. - -@changelog_1082_li -#PR #393, Run tests on Travis, patch by marschall. - -@changelog_1083_li -#Fix bug in REGEX_REPLACE, not parsing the mode parameter. - -@changelog_1084_li -#ResultSet.getObject(..., Class) threw a ClassNotFoundException if the JTS suite was not in the classpath. - -@changelog_1085_li -#File systems: the "cache:" file system, and the compressed in-memory file systems memLZF and nioMemLZF did not correctly support concurrent reading and writing. - -@changelog_1086_li -#TIMESTAMP WITH TIMEZONE: serialization for the PageStore was broken. - -@changelog_1087_h2 -#Version 1.4.193 (2016-10-31) - -@changelog_1088_li -#PR #386: Add JSR-310 Support (introduces JTS dependency fixed in 1.4.194) - -@changelog_1089_li -#WARNING: THE MERGE BELOW WILL AFFECT ANY 'TIMESTAMP WITH TIMEZONE' INDEXES. You will need to drop and recreate any such indexes. - -@changelog_1090_li -#PR #364: fix compare TIMESTAMP WITH TIMEZONE - -@changelog_1091_li -#Fix bug in picking the right index for INSERT..ON DUPLICATE KEY UPDATE when there are both UNIQUE and PRIMARY KEY constraints. - -@changelog_1092_li -#Issue #380: Error Analyzer doesn't show source code - -@changelog_1093_li -#Remove the "TIMESTAMP UTC" datatype, an experiment that was never finished. - -@changelog_1094_li -#PR #363: Added support to define last IDENTIFIER on a Trigger. - -@changelog_1095_li -#PR #366: Tests for timestamps - -@changelog_1096_li -#PR #361: Improve TimestampWithTimeZone javadoc - -@changelog_1097_li -#PR #360: Change getters in TimestampWithTimeZone to int - -@changelog_1098_li -#PR #359: Added missing source encoding. Assuming UTF-8. - -@changelog_1099_li -#PR #353: Add support for converting JAVA_OBJECT to UUID - -@changelog_1100_li -#PR #358: Add support for getObject(int|String, Class) - -@changelog_1101_li -#PR #357: Server: use xdg-open to open the WebConsole in the user's preferred browser on Linux - -@changelog_1102_li -#PR #356: Support for BEFORE and AFTER clauses when using multiple columns in ALTER TABLE ADD - -@changelog_1103_li -#PR #351: Respect format codes from Bind message when sending results - -@changelog_1104_li -#ignore summary line when compiling stored procedure - -@changelog_1105_li -#PR #348: pg: send RowDescription in response to Describe (statement variant), patch by kostya-sh - -@changelog_1106_li -#PR #337: Update russian translation, patch by avp1983 - -@changelog_1107_li -#PR #329: Update to servlet API version 3.1.0 from 3.0.1, patch by Mat Booth - -@changelog_1108_li -#PR #331: ChangeFileEncryption progress logging ignores -quiet flag, patch by Stefan Bodewig - -@changelog_1109_li -#PR #325: Make Row an interface - -@changelog_1110_li -#PR #323: Regular expression functions (REGEXP_REPLACE, REGEXP_LIKE) enhancement, patch by Akkuzin - -@changelog_1111_li -#Use System.nanoTime for measuring query statistics - -@changelog_1112_li -#Issue #324: Deadlock when sending BLOBs over TCP - -@changelog_1113_li -#Fix for creating and accessing views in MULTITHREADED mode, test-case courtesy of Daniel Rosenbaum - -@changelog_1114_li -#Issue #266: Spatial index not updating, fixed by merging PR #267 - -@changelog_1115_li -#PR #302: add support for "with"-subqueries into "join" & "sub-query" statements - -@changelog_1116_li -#Issue #299: Nested derived tables did not always work as expected. - -@changelog_1117_li -#Use interfaces to replace the java version templating, idea from Lukas Eder. - -@changelog_1118_li -#Issue #295: JdbcResultSet.getObject(int, Class) returns null instead of throwing. - -@changelog_1119_li -#Mac OS X: Console tool process did not stop on exit. - -@changelog_1120_li -#MVStoreTool: add "repair" feature. - -@changelog_1121_li -#Garbage collection of unused chunks should be faster still. - -@changelog_1122_li -#MVStore / transaction store: opening a store in read-only mode does no longer loop. - -@changelog_1123_li -#MVStore: disabled the file system cache by default, because it limits concurrency when using larger databases and many threads. To re-enable, use the file name prefix "cache:". - -@changelog_1124_li -#MVStore: add feature to set the cache concurrency. - -@changelog_1125_li -#File system nioMemFS: support concurrent reads. - -@changelog_1126_li -#File systems: the compressed in-memory file systems now compress better. - -@changelog_1127_li -#LIRS cache: improved hit rate because now added entries get hot if they were in the non-resident part of the cache before. - -@changelog_1128_h2 -#Version 1.4.192 Beta (2016-05-26) - -@changelog_1129_li -#Java 6 is no longer supported (the jar files are compiled for Java 7). - -@changelog_1130_li -#Garbage collection of unused chunks should now be faster. - -@changelog_1131_li -#Prevent people using unsupported combination of auto-increment columns and clustering mode. - -@changelog_1132_li -#Support for DB2 time format, patch by Niklas Mehner - -@changelog_1133_li -#Added support for Connection.setClientInfo() in compatibility modes for DB2, Postgresql, Oracle and MySQL. - -@changelog_1134_li -#Issue #249: Clarify license declaration in Maven POM xml - -@changelog_1135_li -#Fix NullPointerException in querying spatial data through a sub-select. - -@changelog_1136_li -#Fix bug where a lock on the SYS table was not released when closing a session that contained a temp table with an LOB column. - -@changelog_1137_li -#Issue #255: ConcurrentModificationException with multiple threads in embedded mode and temporary LOBs - -@changelog_1138_li -#Issue #235: Anonymous SSL connections fail in many situations - -@changelog_1139_li -#Fix race condition in FILE_LOCK=SOCKET, which could result in the watchdog thread not running - -@changelog_1140_li -#Experimental support for datatype TIMESTAMP WITH TIMEZONE - -@changelog_1141_li -#Add support for ALTER TABLE ... RENAME CONSTRAINT .. TO ... - -@changelog_1142_li -#Add support for PostgreSQL ALTER TABLE ... RENAME COLUMN .. TO ... - -@changelog_1143_li -#Add support for ALTER SCHEMA [ IF EXISTS ] - -@changelog_1144_li -#Add support for ALTER TABLE [ IF EXISTS ] - -@changelog_1145_li -#Add support for ALTER VIEW [ IF EXISTS ] - -@changelog_1146_li -#Add support for ALTER INDEX [ IF EXISTS ] - -@changelog_1147_li -#Add support for ALTER SEQUENCE [ IF EXISTS ] - -@changelog_1148_li -#Improve performance of cleaning up temp tables - patch from Eric Faulhaber. - -@changelog_1149_li -#Fix bug where table locks were not dropped when the connection closed - -@changelog_1150_li -#Fix extra CPU usage caused by query planner enhancement in 1.4.191 - -@changelog_1151_li -#improve performance of queries that use LIKE 'foo%' - 10x in the case of one of my queries - -@changelog_1152_li -#The function IFNULL did not always return the result in the right data type. - -@changelog_1153_li -#Issue #231: Possible infinite loop when initializing the ObjectDataType class when concurrently writing into MVStore. - -@changelog_1154_h2 -#Version 1.4.191 Beta (2016-01-21) - -@changelog_1155_li -#TO_DATE and TO_TIMESTAMP functions. Thanks a lot to Sam Blume for the patch! - -@changelog_1156_li -#Issue #229: DATEDIFF does not work for 'WEEK'. - -@changelog_1157_li -#Issue #156: Add support for getGeneratedKeys() when executing commands via PreparedStatement#executeBatch. - -@changelog_1158_li -#Issue #195: The new Maven uses a .cmd file instead of a .bat file. - -@changelog_1159_li -#Issue #212: EXPLAIN PLAN for UPDATE statement did not display LIMIT expression. - -@changelog_1160_li -#Support OFFSET without LIMIT in SELECT. - -@changelog_1161_li -#Improve error message for METHOD_NOT_FOUND_1/90087. - -@changelog_1162_li -#CLOB and BLOB objects of removed rows were sometimes kept in the database file. - -@changelog_1163_li -#Server mode: executing "shutdown" left a thread on the server. - -@changelog_1164_li -#The condition "in(select...)" did not work correctly in some cases if the subquery had an "order by". - -@changelog_1165_li -#Issue #184: The Platform-independent zip had Windows line endings in Linux scripts. - -@changelog_1166_li -#Issue #186: The "script" command did not include sequences of temporary tables. - -@changelog_1167_li -#Issue #115: to_char fails with pattern FM0D099. - -@changelog_1168_h2 -#Version 1.4.190 Beta (2015-10-11) - -@changelog_1169_li -#Pull request #183: optimizer hints (so far without special SQL syntax). - -@changelog_1170_li -#Issue #180: In MVCC mode, executing UPDATE and SELECT ... FOR UPDATE simultaneously silently can drop rows. - -@changelog_1171_li -#PageStore storage: the cooperative file locking mechanism did not always work as expected (with very slow computers). - -@changelog_1172_li -#Temporary CLOB and BLOB objects are now removed while the database is open (and not just when closing the database). - -@changelog_1173_li -#MVStore CLOB and BLOB larger than about 25 MB: An exception could be thrown when using the MVStore storage. - -@changelog_1174_li -#Add FILE_WRITE function. Patch provided by Nicolas Fortin (Lab-STICC - CNRS UMR 6285 and Ecole Centrale de Nantes) - -@changelog_1175_h2 -#Version 1.4.189 Beta (2015-09-13) - -@changelog_1176_li -#Add support for dropping multiple columns in ALTER TABLE DROP COLUMN... - -@changelog_1177_li -#Fix bug in XA management when doing rollback after prepare. Patch by Stephane Lacoin. - -@changelog_1178_li -#MVStore CLOB and BLOB: An exception with the message "Block not found" could be thrown when using the MVStore storage, when copying LOB objects (for example due to "alter table" on a table with a LOB object), and then re-opening the database. - -@changelog_1179_li -#Fix for issue #171: Broken QueryStatisticsData duration data when trace level smaller than TraceSystem.INFO - -@changelog_1180_li -#Pull request #170: Added SET QUERY_STATISTICS_MAX_ENTRIES - -@changelog_1181_li -#Pull request #165: Fix compatibility postgresql function string_agg - -@changelog_1182_li -#Pull request #163: improved performance when not using the default timezone. - -@changelog_1183_li -#Local temporary tables with many rows did not work correctly due to automatic analyze. - -@changelog_1184_li -#Server mode: concurrently using the same connection could throw an exception "Connection is broken: unexpected status". - -@changelog_1185_li -#Performance improvement for metadata queries that join against the COLUMNS metadata table. - -@changelog_1186_li -#An ArrayIndexOutOfBoundsException was thrown in some cases when opening an old version 1.3 database, or an 1.4 database with both "mv_store=false" and the system property "h2.storeLocalTime" set to false. It mainly showed up with an index on a time, date, or timestamp column. The system property "h2.storeLocalTime" is no longer supported (MVStore databases always store local time, and PageStore now databases never do). - -@changelog_1187_h2 -#Version 1.4.188 Beta (2015-08-01) - -@changelog_1188_li -#Server mode: CLOB processing for texts larger than about 1 MB sometimes did not work. - -@changelog_1189_li -#Server mode: BLOB processing for binaries larger than 2 GB did not work. - -@changelog_1190_li -#Multi-threaded processing: concurrent deleting the same row could throw the exception "Row not found when trying to delete". - -@changelog_1191_li -#MVStore transactions: a thread could see a change of a different thread within a different map. Pull request #153. - -@changelog_1192_li -#H2 Console: improved IBM DB2 compatibility. - -@changelog_1193_li -#A thread deadlock detector (disabled by default) can help detect and analyze Java level deadlocks. To enable, set the system property "h2.threadDeadlockDetector" to true. - -@changelog_1194_li -#Performance improvement for metadata queries that join against the COLUMNS metadata table. - -@changelog_1195_li -#MVStore: power failure could corrupt the store, if writes were re-ordered. - -@changelog_1196_li -#For compatibility with other databases, support for (double and float) -0.0 has been removed. 0.0 is used instead. - -@changelog_1197_li -#Fix for #134, Column name with a # character. Patch by bradmesserle. - -@changelog_1198_li -#In version 1.4.186, "order by" was broken in some cases due to the change "Make the planner use indexes for sorting when doing a GROUP BY". The change was reverted. - -@changelog_1199_li -#Pull request #146: Improved CompareMode. - -@changelog_1200_li -#Fix for #144, JdbcResultSet.setFetchDirection() throws "Feature not supported". - -@changelog_1201_li -#Fix for issue #143, deadlock between two sessions hitting the same sequence on a column. - -@changelog_1202_li -#Pull request #137: SourceCompiler should not throw a syntax error on javac warning. - -@changelog_1203_li -#MVStore: out of memory while storing could corrupt the store (theoretically, a rollback would be possible, but this case is not yet implemented). - -@changelog_1204_li -#The compressed in-memory file systems (memLZF:) could not be used in the MVStore. - -@changelog_1205_li -#The in-memory file systems (memFS: and memLZF:) did not support files larger than 2 GB due to an integer overflow. - -@changelog_1206_li -#Pull request #138: Added the simple Oracle function: ORA_HASH (+ tests) #138 - -@changelog_1207_li -#Timestamps in the trace log follow the format (yyyy-MM-dd HH:mm:ss) instead of the old format (MM-dd HH:mm:ss). Patch by Richard Bull. - -@changelog_1208_li -#Pull request #125: Improved Oracle compatibility with "truncate" with timestamps and dates. - -@changelog_1209_li -#Pull request #127: Linked tables now support geometry columns. - -@changelog_1210_li -#ABS(CAST(0.0 AS DOUBLE)) returned -0.0 instead of 0.0. - -@changelog_1211_li -#BNF auto-completion failed with unquoted identifiers. - -@changelog_1212_li -#Oracle compatibility: empty strings were not converted to NULL when using prepared statements. - -@changelog_1213_li -#PostgreSQL compatibility: new syntax "create index ... using ...". - -@changelog_1214_li -#There was a bug in DataType.convertToValue when reading a ResultSet from a ResultSet. - -@changelog_1215_li -#Pull request #116: Improved concurrency in the trace system. - -@changelog_1216_li -#Issue 609: the spatial index did not support NULL. - -@changelog_1217_li -#Granting a schema is now supported. - -@changelog_1218_li -#Linked tables did not work when a function-based index is present (Oracle). - -@changelog_1219_li -#Creating a user with a null password, salt, or hash threw a NullPointerException. - -@changelog_1220_li -#Foreign key: don't add a single column index if column is leading key of existing index. - -@changelog_1221_li -#Pull request #4: Creating and removing temporary tables was getting slower and slower over time, because an internal object id was allocated but never de-allocated. - -@changelog_1222_li -#Issue 609: the spatial index did not support NULL with update and delete operations. - -@changelog_1223_li -#Pull request #2: Add external metadata type support (table type "external") - -@changelog_1224_li -#MS SQL Server: the CONVERT method did not work in views and derived tables. - -@changelog_1225_li -#Java 8 compatibility for "regexp_replace". - -@changelog_1226_li -#When in cluster mode, and one of the nodes goes down, we need to log the problem with priority "error", not "debug" - -@changelog_1227_h2 -#Version 1.4.187 Beta (2015-04-10) - -@changelog_1228_li -#MVStore: concurrent changes to the same row could result in the exception "The transaction log might be corrupt for key ...". This could only be reproduced with 3 or more threads. - -@changelog_1229_li -#Results with CLOB or BLOB data are no longer reused. - -@changelog_1230_li -#References to BLOB and CLOB objects now have a timeout. The configuration setting is LOB_TIMEOUT (default 5 minutes). This should avoid growing the database file if there are many queries that return BLOB or CLOB objects, and the database is not closed for a longer time. - -@changelog_1231_li -#MVStore: when committing a session that removed LOB values, changes were flushed unnecessarily. - -@changelog_1232_li -#Issue 610: possible integer overflow in WriteBuffer.grow(). - -@changelog_1233_li -#Issue 609: the spatial index did not support NULL (ClassCastException). - -@changelog_1234_li -#MVStore: in some cases, CLOB/BLOB data blocks were removed incorrectly when opening a database. - -@changelog_1235_li -#MVStore: updates that affected many rows were were slow in some cases if there was a secondary index. - -@changelog_1236_li -#Using "runscript" with autocommit disabled could result in a lock timeout on the internal table "SYS". - -@changelog_1237_li -#Issue 603: there was a memory leak when using H2 in a web application. Apache Tomcat logged an error message: "The web application ... created a ThreadLocal with key of type [org.h2.util.DateTimeUtils$1]". - -@changelog_1238_li -#When using the MVStore, running a SQL script generate by the Recover tool from a PageStore file failed with a strange error message (NullPointerException), now a clear error message is shown. - -@changelog_1239_li -#Issue 605: with version 1.4.186, opening a database could result in an endless loop in LobStorageMap.init. - -@changelog_1240_li -#Queries that use the same table alias multiple times now work. Before, the select expression list was expanded incorrectly. Example: "select * from a as x, b as x". - -@changelog_1241_li -#The MySQL compatibility feature "insert ... on duplicate key update" did not work with a non-default schema. - -@changelog_1242_li -#Issue 599: the condition "in(x, y)" could not be used in the select list when using "group by". - -@changelog_1243_li -#The LIRS cache could grow larger than the allocated memory. - -@changelog_1244_li -#A new file system implementation that re-opens the file if it was closed due to the application calling Thread.interrupt(). File name prefix "retry:". Please note it is strongly recommended to avoid calling Thread.interrupt; this is a problem for various libraries, including Apache Lucene. - -@changelog_1245_li -#MVStore: use RandomAccessFile file system if the file name starts with "file:". - -@changelog_1246_li -#Allow DATEADD to take a long value for count when manipulating milliseconds. - -@changelog_1247_li -#When using MV_STORE=TRUE and the SET CACHE_SIZE setting, the cache size was incorrectly set, so that it was effectively 1024 times smaller than it should be. - -@changelog_1248_li -#Concurrent CREATE TABLE... IF NOT EXISTS in the presence of MULTI_THREAD=TRUE could throw an exception. - -@changelog_1249_li -#Fix bug in MVStore when creating lots of temporary tables, where we could run out of transaction IDs. - -@changelog_1250_li -#Add support for PostgreSQL STRING_AGG function. Patch by Fred Aquiles. - -@changelog_1251_li -#Fix bug in "jdbc:h2:nioMemFS" isRoot() function. Also, the page size was increased to 64 KB. - -@changelog_1252_h2 -#Version 1.4.186 Beta (2015-03-02) - -@changelog_1253_li -#The Servlet API 3.0.1 is now used, instead of 2.4. - -@changelog_1254_li -#MVStore: old chunks no longer removed in append-only mode. - -@changelog_1255_li -#MVStore: the cache for page references could grow far too big, resulting in out of memory in some cases. - -@changelog_1256_li -#MVStore: orphaned lob objects were not correctly removed in some cases, making the database grow unnecessarily. - -@changelog_1257_li -#MVStore: the maximum cache size was artificially limited to 2 GB (due to an integer overflow). - -@changelog_1258_li -#MVStore / TransactionStore: concurrent updates could result in a "Too many open transactions" exception. - -@changelog_1259_li -#StringUtils.toUpperEnglish now has a small cache. This should speed up reading from a ResultSet when using the column name. - -@changelog_1260_li -#MVStore: up to 65535 open transactions are now supported. Previously, the limit was at most 65535 transactions between the oldest open and the newest open transaction (which was quite a strange limit). - -@changelog_1261_li -#The default limit for in-place LOB objects was changed from 128 to 256 bytes. This is because each read creates a reference to a LOB, and maintaining the references is a big overhead. With the higher limit, less references are needed. - -@changelog_1262_li -#Tables without columns didn't work. (The use case for such tables is testing.) - -@changelog_1263_li -#The LIRS cache now resizes the table automatically in all cases and no longer needs the averageMemory configuration. - -@changelog_1264_li -#Creating a linked table from an MVStore database to a non-MVStore database created a second (non-MVStore) database file. - -@changelog_1265_li -#In version 1.4.184, a bug was introduced that broke queries that have both joins and wildcards, for example: select * from dual join(select x from dual) on 1=1 - -@changelog_1266_li -#Issue 598: parser fails on timestamp "24:00:00.1234" - prevent the creation of out-of-range time values. - -@changelog_1267_li -#Allow declaring triggers as source code (like functions). Patch by Sylvain Cuaz. - -@changelog_1268_li -#Make the planner use indexes for sorting when doing a GROUP BY where all of the GROUP BY columns are not mentioned in the select. Patch by Frederico (zepfred). - -@changelog_1269_li -#PostgreSQL compatibility: generate_series (as an alias for system_range). Patch by litailang. - -@changelog_1270_li -#Fix missing "column" type in right-hand parameter in ConditionIn. Patch by Arnaud Thimel. - -@changelog_1271_h2 -#Version 1.4.185 Beta (2015-01-16) - -@changelog_1272_li -#In version 1.4.184, "group by" ignored the table name, and could pick a select column by mistake. Example: select 0 as x from system_range(1, 2) d group by d.x; - -@changelog_1273_li -#New connection setting "REUSE_SPACE" (default: true). If disabled, all changes are appended to the database file, and existing content is never overwritten. This allows to rollback to a previous state of the database by truncating the database file. - -@changelog_1274_li -#Issue 587: MVStore: concurrent compaction and store operations could result in an IllegalStateException. - -@changelog_1275_li -#Issue 594: Profiler.copyInThread does not work properly. - -@changelog_1276_li -#Script tool: Now, SCRIPT ... TO is always used (for higher speed and lower disk space usage). - -@changelog_1277_li -#Script tool: Fix parsing of BLOCKSIZE parameter, original patch by Ken Jorissen. - -@changelog_1278_li -#Fix bug in PageStore#commit method - when the ignoreBigLog flag was set, the logic that cleared the flag could never be reached, resulting in performance degradation. Reported by Alexander Nesterov. - -@changelog_1279_li -#Issue 552: Implement BIT_AND and BIT_OR aggregate functions. - -@changelog_1280_h2 -#Version 1.4.184 Beta (2014-12-19) - -@changelog_1281_li -#In version 1.3.183, indexes were not used if the table contains columns with a default value generated by a sequence. This includes tables with identity and auto-increment columns. This bug was introduced by supporting "rownum" in views and derived tables. - -@changelog_1282_li -#MVStore: imported BLOB and CLOB data sometimes disappeared. This was caused by a bug in the ObjectDataType comparison. - -@changelog_1283_li -#Reading from a StreamStore now throws an IOException if the underlying data doesn't exist. - -@changelog_1284_li -#MVStore: if there is an exception while saving, the store is now in all cases immediately closed. - -@changelog_1285_li -#MVStore: the dump tool could go into an endless loop for some files. - -@changelog_1286_li -#MVStore: recovery for a database with many CLOB or BLOB entries is now much faster. - -@changelog_1287_li -#Group by with a quoted select column name alias didn't work. Example: select 1 "a" from dual group by "a" - -@changelog_1288_li -#Auto-server mode: the host name is now stored in the .lock.db file. - -@changelog_1289_h2 -#Version 1.4.183 Beta (2014-12-13) - -@changelog_1290_li -#MVStore: the default auto-commit buffer size is now about twice as big. This should reduce the database file size after inserting a lot of data. - -@changelog_1291_li -#The built-in functions "power" and "radians" now always return a double. - -@changelog_1292_li -#Using "row_number" or "rownum" in views or derived tables had unexpected results if the outer query contained constraints for the given view. Example: select b.nr, b.id from (select row_number() over() as nr, a.id as id from (select id from test order by name) as a) as b where b.id = 1 - -@changelog_1293_li -#MVStore: the Recover tool can now deal with more types of corruption in the file. - -@changelog_1294_li -#MVStore: the TransactionStore now first needs to be initialized before it can be used. - -@changelog_1295_li -#Views and derived tables with equality and range conditions on the same columns did not work properly. example: select x from (select x from (select 1 as x) where x > 0 and x < 2) where x = 1 - -@changelog_1296_li -#The database URL setting PAGE_SIZE setting is now also used for the MVStore. - -@changelog_1297_li -#MVStore: the default page split size for persistent stores is now 4096 (it was 16 KB so far). This should reduce the database file size for most situations (in some cases, less than half the size of the previous version). - -@changelog_1298_li -#With query literals disabled, auto-analyze of a table with CLOB or BLOB did not work. - -@changelog_1299_li -#MVStore: use a mark and sweep GC algorithm instead of reference counting, to ensure used chunks are never overwrite, even if the reference counting algorithm does not work properly. - -@changelog_1300_li -#In the multi-threaded mode, updating the column selectivity ("analyze") in the background sometimes did not work. - -@changelog_1301_li -#In the multi-threaded mode, database metadata operations did sometimes not work if the schema was changed at the same time (for example, if tables were dropped). - -@changelog_1302_li -#Some CLOB and BLOB values could no longer be read when the original row was removed (even when using the MVCC mode). - -@changelog_1303_li -#The MVStoreTool could throw an IllegalArgumentException. - -@changelog_1304_li -#Improved performance for some date / time / timestamp conversion operations. Thanks to Sergey Evdokimov for reporting the problem. - -@changelog_1305_li -#H2 Console: the built-in web server did not work properly if an unknown file was requested. - -@changelog_1306_li -#MVStore: the jar file is renamed to "h2-mvstore-*.jar" and is deployed to Maven separately. - -@changelog_1307_li -#MVStore: support for concurrent reads and writes is now enabled by default. - -@changelog_1308_li -#Server mode: the transfer buffer size has been changed from 16 KB to 64 KB, after it was found that this improves performance on Linux quite a lot. - -@changelog_1309_li -#H2 Console and server mode: SSL is now disabled and TLS is used to protect against the Poodle SSLv3 vulnerability. The system property to disable secure anonymous connections is now "h2.enableAnonymousTLS". The default certificate is still self-signed, so you need to manually install another one if you want to avoid man in the middle attacks. - -@changelog_1310_li -#MVStore: the R-tree did not correctly measure the memory usage. - -@changelog_1311_li -#MVStore: compacting a store with an R-tree did not always work. - -@changelog_1312_li -#Issue 581: When running in LOCK_MODE=0, JdbcDatabaseMetaData#supportsTransactionIsolationLevel(TRANSACTION_READ_UNCOMMITTED) should return false - -@changelog_1313_li -#Fix bug which could generate deadlocks when multiple connections accessed the same table. - -@changelog_1314_li -#Some places in the code were not respecting the value set in the "SET MAX_MEMORY_ROWS x" command - -@changelog_1315_li -#Fix bug which could generate a NegativeArraySizeException when performing large (>40M) row union operations - -@changelog_1316_li -#Fix "USE schema" command for MySQL compatibility, patch by mfulton - -@changelog_1317_li -#Parse and ignore the ROW_FORMAT=DYNAMIC MySQL syntax, patch by mfulton - -@changelog_1318_h2 -#Version 1.4.182 Beta (2014-10-17) - -@changelog_1319_li -#MVStore: improved error messages and logging; improved behavior if there is an error when serializing objects. - -@changelog_1320_li -#OSGi: the MVStore packages are now exported. - -@changelog_1321_li -#With the MVStore option, when using multiple threads that concurrently create indexes or tables, it was relatively easy to get a lock timeout on the "SYS" table. - -@changelog_1322_li -#When using the multi-threaded option, the exception "Unexpected code path" could be thrown, specially if the option "analyze_auto" was set to a low value. - -@changelog_1323_li -#In the server mode, when reading from a CLOB or BLOB, if the connection was closed, a NullPointerException could be thrown instead of an exception saying the connection is closed. - -@changelog_1324_li -#DatabaseMetaData.getProcedures and getProcedureColumns could throw an exception if a user defined class is not available. - -@changelog_1325_li -#Issue 584: the error message for a wrong sequence definition was wrong. - -@changelog_1326_li -#CSV tool: the rowSeparator option is no longer supported, as the same can be achieved with the lineSeparator. - -@changelog_1327_li -#Descending indexes on MVStore tables did not work properly. - -@changelog_1328_li -#Issue 579: Conditions on the "_rowid_" pseudo-column didn't use an index when using the MVStore. - -@changelog_1329_li -#Fixed documentation that "offset" and "fetch" are also keywords since version 1.4.x. - -@changelog_1330_li -#The Long.MIN_VALUE could not be parsed for auto-increment (identity) columns. - -@changelog_1331_li -#Issue 573: Add implementation for Methods "isWrapperFor()" and "unwrap()" in other JDBC classes. - -@changelog_1332_li -#Issue 572: MySQL compatibility for "order by" in update statements. - -@changelog_1333_li -#The change in JDBC escape processing in version 1.4.181 affects both the parser (which is running on the server) and the JDBC API (which is running on the client). If you (or a tool you use) use the syntax "{t 'time}", or "{ts 'timestamp'}", or "{d 'data'}", then both the client and the server need to be upgraded to version 1.4.181 or later. - -@changelog_1334_h2 -#Version 1.4.181 Beta (2014-08-06) - -@changelog_1335_li -#Improved MySQL compatibility by supporting "use schema". Thanks a lot to Karl Pietrzak for the patch! - -@changelog_1336_li -#Writing to the trace file is now faster, specially with the debug level. - -@changelog_1337_li -#The database option "defrag_always=true" did not work with the MVStore. - -@changelog_1338_li -#The JDBC escape syntax {ts 'value'} did not interpret the value as a timestamp. The same for {d 'value'} (for date) and {t 'value'} (for time). Thanks to Lukas Eder for reporting the issue. The following problem was detected after version 1.4.181 was released: The change in JDBC escape processing affects both the parser (which is running on the server) and the JDBC API (which is running on the client). If you (or a tool you use) use the syntax {t 'time'}, or {ts 'timestamp'}, or {d 'date'}, then both the client and the server need to be upgraded to version 1.4.181 or later. - -@changelog_1339_li -#File system abstraction: support replacing existing files using move (currently not for Windows). - -@changelog_1340_li -#The statement "shutdown defrag" now compresses the database (with the MVStore). This command can greatly reduce the file size, and is relatively fast, but is not incremental. - -@changelog_1341_li -#The MVStore now automatically compacts the store in the background if there is no read or write activity, which should (after some time; sometimes about one minute) reduce the file size. This is still work in progress, feedback is welcome! - -@changelog_1342_li -#Change default value of PAGE_SIZE from 2048 to 4096 to more closely match most file systems block size (PageStore only; the MVStore already used 4096). - -@changelog_1343_li -#Auto-scale MAX_MEMORY_ROWS and CACHE_SIZE settings by the amount of available RAM. Gives a better out of box experience for people with more powerful machines. - -@changelog_1344_li -#Handle tabs like 4 spaces in web console, patch by Martin Grajcar. - -@changelog_1345_li -#Issue 573: Add implementation for Methods "isWrapperFor()" and "unwrap()" in JdbcConnection.java, patch by BigMichi1. - -@changelog_1346_h2 -#Version 1.4.180 Beta (2014-07-13) - -@changelog_1347_li -#MVStore: the store is now auto-compacted automatically up to some point, to avoid very large file sizes. This area is still work in progress. - -@changelog_1348_li -#Sequences of temporary tables (auto-increment or identity columns) were persisted unnecessarily in the database file, and were not removed when re-opening the database. - -@changelog_1349_li -#MVStore: an IndexOutOfBoundsException could sometimes occur MVMap.openVersion when concurrently accessing the store. - -@changelog_1350_li -#The LIRS cache now re-sizes the internal hash map if needed. - -@changelog_1351_li -#Optionally persist session history in the H2 console. (patch from Martin Grajcar) - -@changelog_1352_li -#Add client-info property to get the number of servers currently in the cluster and which servers that are available. (patch from Nikolaj Fogh) - -@changelog_1353_li -#Fix bug in changing encrypted DB password that kept the file handle open when the wrong password was supplied. (test case from Jens Hohmuth). - -@changelog_1354_li -#Issue 567: H2 hangs for a long time then (sometimes) recovers. Introduce a queue when doing table locking to prevent session starvation. - -@cheatSheet_1000_h1 -#H2 Database Engine Cheat Sheet - -@cheatSheet_1001_h2 -#Using H2 - -@cheatSheet_1002_a -H2 - -@cheatSheet_1003_li -# is open source, free to use and distribute. - -@cheatSheet_1004_a -ダウンロード - -@cheatSheet_1005_li -#: jar, installer (Windows), zip. - -@cheatSheet_1006_li -#To start the H2 Console tool, double click the jar file, or run java -jar h2*.jar, h2.bat, or h2.sh. - -@cheatSheet_1007_a -#A new database is automatically created - -@cheatSheet_1008_a -#by default - -@cheatSheet_1009_li -#. - -@cheatSheet_1010_a -#Closing the last connection closes the database - -@cheatSheet_1011_li -#. - -@cheatSheet_1012_h2 -ドキュメント - -@cheatSheet_1013_p -# Reference: SQL grammar, functions, data types, tools, API - -@cheatSheet_1014_a -特徴 - -@cheatSheet_1015_p -#: fulltext search, encryption, read-only (zip/jar), CSV, auto-reconnect, triggers, user functions - -@cheatSheet_1016_a -#Database URLs - -@cheatSheet_1017_a -#Embedded - -@cheatSheet_1018_code -jdbc:h2:~/test - -@cheatSheet_1019_p -# 'test' in the user home directory - -@cheatSheet_1020_code -#jdbc:h2:/data/test - -@cheatSheet_1021_p -# 'test' in the directory /data - -@cheatSheet_1022_code -#jdbc:h2:test - -@cheatSheet_1023_p -# in the current(!) working directory - -@cheatSheet_1024_a -#In-Memory - -@cheatSheet_1025_code -#jdbc:h2:mem:test - -@cheatSheet_1026_p -# multiple connections in one process - -@cheatSheet_1027_code -jdbc:h2:mem: - -@cheatSheet_1028_p -# unnamed private; one connection - -@cheatSheet_1029_a -サー�?ーモード - -@cheatSheet_1030_code -#jdbc:h2:tcp://localhost/~/test - -@cheatSheet_1031_p -# user home dir - -@cheatSheet_1032_code -#jdbc:h2:tcp://localhost//data/test - -@cheatSheet_1033_p -# absolute dir - -@cheatSheet_1034_a -#Server start - -@cheatSheet_1035_p -#:java -cp *.jar org.h2.tools.Server - -@cheatSheet_1036_a -#Settings - -@cheatSheet_1037_code -#jdbc:h2:..;MODE=MySQL - -@cheatSheet_1038_a -#compatibility (or HSQLDB,...) - -@cheatSheet_1039_code -#jdbc:h2:..;TRACE_LEVEL_FILE=3 - -@cheatSheet_1040_a -#log to *.trace.db - -@cheatSheet_1041_a -#Using the JDBC API - -@cheatSheet_1042_a -#Connection Pool - -@cheatSheet_1043_a -#Maven 2 - -@cheatSheet_1044_a -#Hibernate - -@cheatSheet_1045_p -# hibernate.cfg.xml (or use the HSQLDialect): - -@cheatSheet_1046_a -#TopLink and Glassfish - -@cheatSheet_1047_p -# Datasource class: org.h2.jdbcx.JdbcDataSource - -@cheatSheet_1048_code -#oracle.toplink.essentials.platform. - -@cheatSheet_1049_code -#database.H2Platform - -@download_1000_h1 -ダウンロード - -@download_1001_h3 -#Version 1.4.196 (2017-06-10) - -@download_1002_a -Windows Installer - -@download_1003_a -Platform-Independent Zip - -@download_1004_h3 -#Version 1.4.195 (2017-04-23), Last Stable - -@download_1005_a -Windows Installer - -@download_1006_a -Platform-Independent Zip - -@download_1007_h3 -#Old Versions - -@download_1008_a -Platform-Independent Zip - -@download_1009_h3 -#Jar File - -@download_1010_a -#Maven.org - -@download_1011_a -#Sourceforge.net - -@download_1012_h3 -#Maven (Binary, Javadoc, and Source) - -@download_1013_a -#Binary - -@download_1014_a -#Javadoc - -@download_1015_a -#Sources - -@download_1016_h3 -#Database Upgrade Helper File - -@download_1017_a -#Upgrade database from 1.1 to the current version - -@download_1018_h3 -#Git Source Repository - -@download_1019_a -#Github - -@download_1020_p -# For details about changes, see the Change Log. - -@download_1021_h3 -#News and Project Information - -@download_1022_a -#Atom Feed - -@download_1023_a -#RSS Feed - -@download_1024_a -#DOAP File - -@download_1025_p -# (what is this) - -@faq_1000_h1 -F A Q - -@faq_1001_a -# I Have a Problem or Feature Request - -@faq_1002_a -# Are there Known Bugs? When is the Next Release? - -@faq_1003_a -# Is this Database Engine Open Source? - -@faq_1004_a -# Is Commercial Support Available? - -@faq_1005_a -# How to Create a New Database? - -@faq_1006_a -# How to Connect to a Database? - -@faq_1007_a -# Where are the Database Files Stored? - -@faq_1008_a -# What is the Size Limit (Maximum Size) of a Database? - -@faq_1009_a -# Is it Reliable? - -@faq_1010_a -# Why is Opening my Database Slow? - -@faq_1011_a -# My Query is Slow - -@faq_1012_a -# H2 is Very Slow - -@faq_1013_a -# Column Names are Incorrect? - -@faq_1014_a -# Float is Double? - -@faq_1015_a -# Is the GCJ Version Stable? Faster? - -@faq_1016_a -# How to Translate this Project? - -@faq_1017_a -# How to Contribute to this Project? - -@faq_1018_h3 -#I Have a Problem or Feature Request - -@faq_1019_p -# Please read the support checklist. - -@faq_1020_h3 -#Are there Known Bugs? When is the Next Release? - -@faq_1021_p -# Usually, bugs get fixes as they are found. There is a release every few weeks. Here is the list of known and confirmed issues: - -@faq_1022_li -#When opening a database file in a timezone that has different daylight saving rules: the time part of dates where the daylight saving doesn't match will differ. This is not a problem within regions that use the same rules (such as, within USA, or within Europe), even if the timezone itself is different. As a workaround, export the database to a SQL script using the old timezone, and create a new database in the new timezone. - -@faq_1023_li -#Apache Harmony: there seems to be a bug in Harmony that affects H2. See HARMONY-6505. - -@faq_1024_li -#Tomcat and Glassfish 3 set most static fields (final or non-final) to null when unloading a web application. This can cause a NullPointerException in H2 versions 1.1.107 and older, and may still not work in newer versions. Please report it if you run into this issue. In Tomcat >= 6.0 this behavior can be disabled by setting the system property org.apache.catalina.loader.WebappClassLoader.ENABLE_CLEAR_REFERENCES=false, however Tomcat may then run out of memory. A known workaround is to put the h2*.jar file in a shared lib directory (common/lib). - -@faq_1025_li -#Some problems have been found with right outer join. Internally, it is converted to left outer join, which does not always produce the same results as other databases when used in combination with other joins. This problem is fixed in H2 version 1.3. - -@faq_1026_li -#When using Install4j before 4.1.4 on Linux and enabling pack200, the h2*.jar becomes corrupted by the install process, causing application failure. A workaround is to add an empty file h2*.jar.nopack next to the h2*.jar file. This problem is solved in Install4j 4.1.4. - -@faq_1027_p -# For a complete list, see Open Issues. - -@faq_1028_h3 -�?��?�データベースエンジン�?�オープンソース�?��?��?�? - -@faq_1029_p -# Yes. It is free to use and distribute, and the source code is included. See also under license. - -@faq_1030_h3 -#Is Commercial Support Available? - -@faq_1031_p -# No, currently commercial support is not available. - -@faq_1032_h3 -新�?データベース�?�構築方法�?�? - -@faq_1033_p -# By default, a new database is automatically created if it does not yet exist. See Creating New Databases. - -@faq_1034_h3 -データベース�?��?�接続方法�?�? - -@faq_1035_p -# The database driver is org.h2.Driver, and the database URL starts with jdbc:h2:. To connect to a database using JDBC, use the following code: - -@faq_1036_h3 -データベース�?�ファイル�?��?��?��?��?存�?�れ�?��?��?�? - -@faq_1037_p -# When using database URLs like jdbc:h2:~/test, the database is stored in the user directory. For Windows, this is usually C:\Documents and Settings\<userName> or C:\Users\<userName>. If the base directory is not set (as in jdbc:h2:./test), the database files are stored in the directory where the application is started (the current working directory). When using the H2 Console application from the start menu, this is <Installation Directory>/bin. The base directory can be set in the database URL. A fixed or relative path can be used. When using the URL jdbc:h2:file:./data/sample, the database is stored in the directory data (relative to the current working directory). The directory is created automatically if it does not yet exist. It is also possible to use the fully qualified directory name (and for Windows, drive name). Example: jdbc:h2:file:C:/data/test - -@faq_1038_h3 -#What is the Size Limit (Maximum Size) of a Database? - -@faq_1039_p -# See Limits and Limitations. - -@faq_1040_h3 -�?�れ�?�信頼�?��??るデータベース�?��?��?�? - -@faq_1041_p -# That is not easy to say. It is still a quite new product. A lot of tests have been written, and the code coverage of these tests is higher than 80% for each package. Randomized stress tests are run regularly. But there are probably still bugs that have not yet been found (as with most software). Some features are known to be dangerous, they are only supported for situations where performance is more important than reliability. Those dangerous features are: - -@faq_1042_li -#Disabling the transaction log or FileDescriptor.sync() using LOG=0 or LOG=1. - -@faq_1043_li -#Using the transaction isolation level READ_UNCOMMITTED (LOCK_MODE 0) while at the same time using multiple connections. - -@faq_1044_li -#Disabling database file protection using (setting FILE_LOCK to NO in the database URL). - -@faq_1045_li -#Disabling referential integrity using SET REFERENTIAL_INTEGRITY FALSE. - -@faq_1046_p -# In addition to that, running out of memory should be avoided. In older versions, OutOfMemory errors while using the database could corrupt a databases. - -@faq_1047_p -# This database is well tested using automated test cases. The tests run every night and run for more than one hour. But not all areas of this database are equally well tested. When using one of the following features for production, please ensure your use case is well tested (if possible with automated test cases). The areas that are not well tested are: - -@faq_1048_li -#Platforms other than Windows, Linux, Mac OS X, or JVMs other than Oracle 1.6, 1.7, 1.8. - -@faq_1049_li -#The features AUTO_SERVER and AUTO_RECONNECT. - -@faq_1050_li -#Cluster mode, 2-phase commit, savepoints. - -@faq_1051_li -#Fulltext search. - -@faq_1052_li -#Operations on LOBs over 2 GB. - -@faq_1053_li -#The optimizer may not always select the best plan. - -@faq_1054_li -#Using the ICU4J collator. - -@faq_1055_p -# Areas considered experimental are: - -@faq_1056_li -#The PostgreSQL server - -@faq_1057_li -#Clustering (there are cases were transaction isolation can be broken due to timing issues, for example one session overtaking another session). - -@faq_1058_li -#Multi-threading within the engine using SET MULTI_THREADED=1. - -@faq_1059_li -#Compatibility modes for other databases (only some features are implemented). - -@faq_1060_li -#The soft reference cache (CACHE_TYPE=SOFT_LRU). It might not improve performance, and out of memory issues have been reported. - -@faq_1061_p -# Some users have reported that after a power failure, the database cannot be opened sometimes. In this case, use a backup of the database or the Recover tool. Please report such problems. The plan is that the database automatically recovers in all situations. - -@faq_1062_h3 -#Why is Opening my Database Slow? - -@faq_1063_p -# To find out what the problem is, use the H2 Console and click on "Test Connection" instead of "Login". After the "Login Successful" appears, click on it (it's a link). This will list the top stack traces. Then either analyze this yourself, or post those stack traces in the Google Group. - -@faq_1064_p -# Other possible reasons are: the database is very big (many GB), or contains linked tables that are slow to open. - -@faq_1065_h3 -#My Query is Slow - -@faq_1066_p -# Slow SELECT (or DELETE, UPDATE, MERGE) statement can have multiple reasons. Follow this checklist: - -@faq_1067_li -#Run ANALYZE (see documentation for details). - -@faq_1068_li -#Run the query with EXPLAIN and check if indexes are used (see documentation for details). - -@faq_1069_li -#If required, create additional indexes and try again using ANALYZE and EXPLAIN. - -@faq_1070_li -#If it doesn't help please report the problem. - -@faq_1071_h3 -#H2 is Very Slow - -@faq_1072_p -# By default, H2 closes the database when the last connection is closed. If your application closes the only connection after each operation, the database is opened and closed a lot, which is quite slow. There are multiple ways to solve this problem, see Database Performance Tuning. - -@faq_1073_h3 -#Column Names are Incorrect? - -@faq_1074_p -# For the query SELECT ID AS X FROM TEST the method ResultSetMetaData.getColumnName() returns ID, I expect it to return X. What's wrong? - -@faq_1075_p -# This is not a bug. According the the JDBC specification, the method ResultSetMetaData.getColumnName() should return the name of the column and not the alias name. If you need the alias name, use ResultSetMetaData.getColumnLabel(). Some other database don't work like this yet (they don't follow the JDBC specification). If you need compatibility with those databases, use the Compatibility Mode, or append ;ALIAS_COLUMN_NAME=TRUE to the database URL. - -@faq_1076_p -# This also applies to DatabaseMetaData calls that return a result set. The columns in the JDBC API are column labels, not column names. - -@faq_1077_h3 -#Float is Double? - -@faq_1078_p -# For a table defined as CREATE TABLE TEST(X FLOAT) the method ResultSet.getObject() returns a java.lang.Double, I expect it to return a java.lang.Float. What's wrong? - -@faq_1079_p -# This is not a bug. According the the JDBC specification, the JDBC data type FLOAT is equivalent to DOUBLE, and both are mapped to java.lang.Double. See also Mapping SQL and Java Types - 8.3.10 FLOAT. - -@faq_1080_h3 -#Is the GCJ Version Stable? Faster? - -@faq_1081_p -# The GCJ version is not as stable as the Java version. When running the regression test with the GCJ version, sometimes the application just stops at what seems to be a random point without error message. Currently, the GCJ version is also slower than when using the Sun VM. However, the startup of the GCJ version is faster than when using a VM. - -@faq_1082_h3 -�?��?�プロジェクト�?�翻訳方法�?�? - -@faq_1083_p -# For more information, see Build/Translating. - -@faq_1084_h3 -#How to Contribute to this Project? - -@faq_1085_p -# There are various way to help develop an open source project like H2. The first step could be to translate the error messages and the GUI to your native language. Then, you could provide patches. Please start with small patches. That could be adding a test case to improve the code coverage (the target code coverage for this project is 90%, higher is better). You will have to develop, build and run the tests. Once you are familiar with the code, you could implement missing features from the feature request list. I suggest to start with very small features that are easy to implement. Keep in mind to provide test cases as well. - -@features_1000_h1 -特徴 - -@features_1001_a -# Feature List - -@features_1002_a -# Comparison to Other Database Engines - -@features_1003_a -# H2 in Use - -@features_1004_a -# Connection Modes - -@features_1005_a -# Database URL Overview - -@features_1006_a -# Connecting to an Embedded (Local) Database - -@features_1007_a -# In-Memory Databases - -@features_1008_a -# Database Files Encryption - -@features_1009_a -# Database File Locking - -@features_1010_a -# Opening a Database Only if it Already Exists - -@features_1011_a -# Closing a Database - -@features_1012_a -# Ignore Unknown Settings - -@features_1013_a -# Changing Other Settings when Opening a Connection - -@features_1014_a -# Custom File Access Mode - -@features_1015_a -# Multiple Connections - -@features_1016_a -# Database File Layout - -@features_1017_a -# Logging and Recovery - -@features_1018_a -# Compatibility - -@features_1019_a -# Auto-Reconnect - -@features_1020_a -# Automatic Mixed Mode - -@features_1021_a -# Page Size - -@features_1022_a -# Using the Trace Options - -@features_1023_a -# Using Other Logging APIs - -@features_1024_a -# Read Only Databases - -@features_1025_a -# Read Only Databases in Zip or Jar File - -@features_1026_a -# Computed Columns / Function Based Index - -@features_1027_a -# Multi-Dimensional Indexes - -@features_1028_a -# User-Defined Functions and Stored Procedures - -@features_1029_a -# Pluggable or User-Defined Tables - -@features_1030_a -# Triggers - -@features_1031_a -# Compacting a Database - -@features_1032_a -# Cache Settings - -@features_1033_h2 -特徴一覧 - -@features_1034_h3 -主�?�特徴 - -@features_1035_li -#Very fast database engine - -@features_1036_li -#Open source - -@features_1037_li -#Written in Java - -@features_1038_li -#Supports standard SQL, JDBC API - -@features_1039_li -#Embedded and Server mode, Clustering support - -@features_1040_li -#Strong security features - -@features_1041_li -#The PostgreSQL ODBC driver can be used - -@features_1042_li -#Multi version concurrency - -@features_1043_h3 -追加�?�れ�?�特徴 - -@features_1044_li -#Disk based or in-memory databases and tables, read-only database support, temporary tables - -@features_1045_li -#Transaction support (read committed), 2-phase-commit - -@features_1046_li -#Multiple connections, table level locking - -@features_1047_li -#Cost based optimizer, using a genetic algorithm for complex queries, zero-administration - -@features_1048_li -#Scrollable and updatable result set support, large result set, external result sorting, functions can return a result set - -@features_1049_li -#Encrypted database (AES), SHA-256 password encryption, encryption functions, SSL - -@features_1050_h3 -SQLサ�?ート - -@features_1051_li -#Support for multiple schemas, information schema - -@features_1052_li -#Referential integrity / foreign key constraints with cascade, check constraints - -@features_1053_li -#Inner and outer joins, subqueries, read only views and inline views - -@features_1054_li -#Triggers and Java functions / stored procedures - -@features_1055_li -#Many built-in functions, including XML and lossless data compression - -@features_1056_li -#Wide range of data types including large objects (BLOB/CLOB) and arrays - -@features_1057_li -#Sequence and autoincrement columns, computed columns (can be used for function based indexes) - -@features_1058_code -ORDER BY, GROUP BY, HAVING, UNION, LIMIT, TOP - -@features_1059_li -#Collation support, including support for the ICU4J library - -@features_1060_li -#Support for users and roles - -@features_1061_li -#Compatibility modes for IBM DB2, Apache Derby, HSQLDB, MS SQL Server, MySQL, Oracle, and PostgreSQL. - -@features_1062_h3 -セキュリティ�?�特徴 - -@features_1063_li -#Includes a solution for the SQL injection problem - -@features_1064_li -#User password authentication uses SHA-256 and salt - -@features_1065_li -#For server mode connections, user passwords are never transmitted in plain text over the network (even when using insecure connections; this only applies to the TCP server and not to the H2 Console however; it also doesn't apply if you set the password in the database URL) - -@features_1066_li -#All database files (including script files that can be used to backup data) can be encrypted using the AES-128 encryption algorithm - -@features_1067_li -#The remote JDBC driver supports TCP/IP connections over TLS - -@features_1068_li -#The built-in web server supports connections over TLS - -@features_1069_li -#Passwords can be sent to the database using char arrays instead of Strings - -@features_1070_h3 -他�?�特徴�?�ツール - -@features_1071_li -#Small footprint (smaller than 1.5 MB), low memory requirements - -@features_1072_li -#Multiple index types (b-tree, tree, hash) - -@features_1073_li -#Support for multi-dimensional indexes - -@features_1074_li -#CSV (comma separated values) file support - -@features_1075_li -#Support for linked tables, and a built-in virtual 'range' table - -@features_1076_li -#Supports the EXPLAIN PLAN statement; sophisticated trace options - -@features_1077_li -#Database closing can be delayed or disabled to improve the performance - -@features_1078_li -#Web-based Console application (translated to many languages) with autocomplete - -@features_1079_li -#The database can generate SQL script files - -@features_1080_li -#Contains a recovery tool that can dump the contents of the database - -@features_1081_li -#Support for variables (for example to calculate running totals) - -@features_1082_li -#Automatic re-compilation of prepared statements - -@features_1083_li -#Uses a small number of database files - -@features_1084_li -#Uses a checksum for each record and log entry for data integrity - -@features_1085_li -#Well tested (high code coverage, randomized stress tests) - -@features_1086_h2 -#H2 in Use - -@features_1087_p -# This comparison is based on H2 1.3, Apache Derby version 10.8, HSQLDB 2.2, MySQL 5.5, PostgreSQL 9.0. - -@features_1088_th -#Feature - -@features_1089_th -H2 - -@features_1090_th -Derby - -@features_1091_th -HSQLDB - -@features_1092_th -MySQL - -@features_1093_th -PostgreSQL - -@features_1094_td -Pure Java - -@features_1095_td -対応 - -@features_1096_td -対応 - -@features_1097_td -対応 - -@features_1098_td -�?�対応 - -@features_1099_td -�?�対応 - -@features_1100_td -エンベッドモード (Java) - -@features_1101_td -対応 - -@features_1102_td -対応 - -@features_1103_td -対応 - -@features_1104_td -�?�対応 - -@features_1105_td -�?�対応 - -@features_1106_td -#In-Memory Mode - -@features_1107_td -# jdbc:h2:~/test - -@features_1108_td - jdbc:h2:file:/data/sample - -@features_1109_td -# jdbc:h2:file:C:/data/sample (Windows only) - -@features_1110_td -�?�対応 - -@features_1111_td -�?�対応 - -@features_1112_td -#Explain Plan - -@features_1113_td -# jdbc:h2:mem:<databaseName> - -@features_1114_td -#Yes *12 - -@features_1115_td -対応 - -@features_1116_td -対応 - -@features_1117_td -# jdbc:h2:tcp://<server>[:<port>]/[<path>]<databaseName> - -@features_1118_td -#Built-in Clustering / Replication - -@features_1119_td -# jdbc:h2:tcp://dbserv:8084/~/sample - -@features_1120_td -# jdbc:h2:tcp://localhost/mem:test - -@features_1121_td -�?�対応 - -@features_1122_td -対応 - -@features_1123_td -# jdbc:h2:ssl://<server>[:<port>]/[<path>]<databaseName> - -@features_1124_td -# jdbc:h2:ssl://localhost:8085/~/sample; - -@features_1125_td -対応 - -@features_1126_td -#Yes *10 - -@features_1127_td -#Yes *10 - -@features_1128_td -# jdbc:h2:file:~/secure;CIPHER=AES - -@features_1129_td -�?�対応 - -@features_1130_td -# jdbc:h2:<url>;FILE_LOCK={FILE|SOCKET|NO} - -@features_1131_td -# jdbc:h2:file:~/private;CIPHER=AES;FILE_LOCK=SOCKET - -@features_1132_td -�?�対応 - -@features_1133_td -#Partially *1 - -@features_1134_td -#Partially *2 - -@features_1135_td -対応 - -@features_1136_td -# jdbc:h2:<url>;DB_CLOSE_ON_EXIT=FALSE - -@features_1137_td -対応 - -@features_1138_td - jdbc:h2:<url>;INIT=RUNSCRIPT FROM '~/create.sql' - -@features_1139_td -# jdbc:h2:file:~/sample;INIT=RUNSCRIPT FROM '~/create.sql'\;RUNSCRIPT FROM '~/populate.sql' - -@features_1140_td -対応 - -@features_1141_td -# jdbc:h2:<url>[;USER=<username>][;PASSWORD=<value>] - -@features_1142_td -# jdbc:h2:file:~/sample;USER=sa;PASSWORD=123 - -@features_1143_td -対応 - -@features_1144_td -# jdbc:h2:<url>;TRACE_LEVEL_FILE=<level 0..3> - -@features_1145_td -# jdbc:h2:file:~/sample;TRACE_LEVEL_FILE=3 - -@features_1146_td -対応 - -@features_1147_td -# jdbc:h2:<url>;IGNORE_UNKNOWN_SETTINGS=TRUE - -@features_1148_td -#Domains (User-Defined Types) - -@features_1149_td -# jdbc:h2:<url>;ACCESS_MODE_DATA=rws - -@features_1150_td -�?�対応 - -@features_1151_td -# jdbc:h2:zip:<zipFileName>!/<databaseName> - -@features_1152_td -# jdbc:h2:zip:~/db.zip!/test - -@features_1153_td -対応 - -@features_1154_td -# jdbc:h2:<url>;MODE=<databaseType> - -@features_1155_td -# jdbc:h2:~/test;MODE=MYSQL - -@features_1156_td -多 - -@features_1157_td -# jdbc:h2:<url>;AUTO_RECONNECT=TRUE - -@features_1158_td -# jdbc:h2:tcp://localhost/~/test;AUTO_RECONNECT=TRUE - -@features_1159_td -多 - -@features_1160_td -#Row Level Locking - -@features_1161_td -#Yes *9 - -@features_1162_td -対応 - -@features_1163_td -#Yes *9 - -@features_1164_td -対応 - -@features_1165_td -# jdbc:h2:<url>;<setting>=<value>[;<setting>=<value>...] - -@features_1166_td -#Multi Version Concurrency - -@features_1167_td -対応 - -@features_1168_td -�?�対応 - -@features_1169_td -対応 - -@features_1170_td -対応 - -@features_1171_td -対応 - -@features_1172_td -#Multi-Threaded Processing - -@features_1173_td -#No *11 - -@features_1174_td -対応 - -@features_1175_td -対応 - -@features_1176_td -対応 - -@features_1177_td -対応 - -@features_1178_td -#Role Based Security - -@features_1179_td -対応 - -@features_1180_td -#Yes *3 - -@features_1181_td -対応 - -@features_1182_td -対応 - -@features_1183_td -対応 - -@features_1184_td -#Updatable Result Sets - -@features_1185_td -対応 - -@features_1186_td -#Yes *7 - -@features_1187_td -対応 - -@features_1188_td -対応 - -@features_1189_td -対応 - -@features_1190_td -#Sequences - -@features_1191_td -対応 - -@features_1192_td -対応 - -@features_1193_td -対応 - -@features_1194_td -�?�対応 - -@features_1195_td -対応 - -@features_1196_td -#Limit and Offset - -@features_1197_td -対応 - -@features_1198_td -#Yes *13 - -@features_1199_td -対応 - -@features_1200_td -対応 - -@features_1201_td -対応 - -@features_1202_td -#Window Functions - -@features_1203_td -#No *15 - -@features_1204_td -#No *15 - -@features_1205_td -�?�対応 - -@features_1206_td -�?�対応 - -@features_1207_td -対応 - -@features_1208_td -#Temporary Tables - -@features_1209_td -対応 - -@features_1210_td -#Yes *4 - -@features_1211_td -対応 - -@features_1212_td -対応 - -@features_1213_td -対応 - -@features_1214_td -#Information Schema - -@features_1215_td -対応 - -@features_1216_td -#No *8 - -@features_1217_td -対応 - -@features_1218_td -対応 - -@features_1219_td -対応 - -@features_1220_td -#Computed Columns - -@features_1221_td -対応 - -@features_1222_td -対応 - -@features_1223_td -対応 - -@features_1224_td -対応 - -@features_1225_td -#Yes *6 - -@features_1226_td -#Case Insensitive Columns - -@features_1227_td -対応 - -@features_1228_td -#Yes *14 - -@features_1229_td -対応 - -@features_1230_td -#SELECT * FROM TEST; - -@features_1231_td -#Yes *6 - -@features_1232_td -#Custom Aggregate Functions - -@features_1233_td -対応 - -@features_1234_td -#SELECT * FROM TEST WHERE 1=0 FOR UPDATE; - -@features_1235_td -対応 - -@features_1236_td -#INSERT INTO TEST VALUES(1, 'Hello'); - -@features_1237_td -# INSERT INTO TEST SELECT * FROM TEST; - -@features_1238_td -#CLOB/BLOB Compression - -@features_1239_td -# DELETE FROM TEST; - -@features_1240_td -�?�対応 - -@features_1241_td -#ALTER TABLE TEST ...; - -@features_1242_td -# CREATE INDEX ... ON TEST ...; - -@features_1243_td -# DROP INDEX ...; - -@features_1244_td -フットプリント (jar/dll size) - -@features_1245_td -#~1.5 MB *5 - -@features_1246_td -#~3 MB - -@features_1247_td -#~1.5 MB - -@features_1248_td -#~4 MB - -@features_1249_td -#~6 MB - -@features_1250_p -# *1 HSQLDB supports text tables. - -@features_1251_p -# *2 MySQL supports linked MySQL tables under the name 'federated tables'. - -@features_1252_p -# *3 Derby support for roles based security and password checking as an option. - -@features_1253_p -# *4 Derby only supports global temporary tables. - -@features_1254_p -# *5 The default H2 jar file contains debug information, jar files for other databases do not. - -@features_1255_p -# *6 PostgreSQL supports functional indexes. - -@features_1256_p -# *7 Derby only supports updatable result sets if the query is not sorted. - -@features_1257_p -# *8 Derby doesn't support standard compliant information schema tables. - -@features_1258_p -# *9 When using MVCC (multi version concurrency). - -@features_1259_p -# *10 Derby and HSQLDB don't hide data patterns well. - -@features_1260_p -# *11 The MULTI_THREADED option is not enabled by default, and with version 1.3.x not supported when using MVCC. - -@features_1261_p -# *12 Derby doesn't support the EXPLAIN statement, but it supports runtime statistics and retrieving statement execution plans. - -@features_1262_p -# *13 Derby doesn't support the syntax LIMIT .. [OFFSET ..], however it supports FETCH FIRST .. ROW[S] ONLY. - -@features_1263_p -# *14 Using collations. *15 Derby and H2 support ROW_NUMBER() OVER(). - -@features_1264_h3 -DaffodilDb�?�One$Db - -@features_1265_p -# It looks like the development of this database has stopped. The last release was February 2006. - -@features_1266_h3 -McKoi - -@features_1267_p -# It looks like the development of this database has stopped. The last release was August 2004. - -@features_1268_h2 -#H2 in Use - -@features_1269_p -# For a list of applications that work with or use H2, see: Links. - -@features_1270_h2 -接続モード - -@features_1271_p -# The following connection modes are supported: - -@features_1272_li -#Embedded mode (local connections using JDBC) - -@features_1273_li -#Server mode (remote connections using JDBC or ODBC over TCP/IP) - -@features_1274_li -#Mixed mode (local and remote connections at the same time) - -@features_1275_h3 -エンベッドモード - -@features_1276_p -# In embedded mode, an application opens a database from within the same JVM using JDBC. This is the fastest and easiest connection mode. The disadvantage is that a database may only be open in one virtual machine (and class loader) at any time. As in all modes, both persistent and in-memory databases are supported. There is no limit on the number of database open concurrently, or on the number of open connections. - -@features_1277_h3 -サー�?ーモード - -@features_1278_p -# When using the server mode (sometimes called remote mode or client/server mode), an application opens a database remotely using the JDBC or ODBC API. A server needs to be started within the same or another virtual machine, or on another computer. Many applications can connect to the same database at the same time, by connecting to this server. Internally, the server process opens the database(s) in embedded mode. - -@features_1279_p -# The server mode is slower than the embedded mode, because all data is transferred over TCP/IP. As in all modes, both persistent and in-memory databases are supported. There is no limit on the number of database open concurrently per server, or on the number of open connections. - -@features_1280_h3 -#Mixed Mode - -@features_1281_p -# The mixed mode is a combination of the embedded and the server mode. The first application that connects to a database does that in embedded mode, but also starts a server so that other applications (running in different processes or virtual machines) can concurrently access the same data. The local connections are as fast as if the database is used in just the embedded mode, while the remote connections are a bit slower. - -@features_1282_p -# The server can be started and stopped from within the application (using the server API), or automatically (automatic mixed mode). When using the automatic mixed mode, all clients that want to connect to the database (no matter if it's an local or remote connection) can do so using the exact same database URL. - -@features_1283_h2 -データベースURL概�? - -@features_1284_p -# This database supports multiple connection modes and connection settings. This is achieved using different database URLs. Settings in the URLs are not case sensitive. - -@features_1285_th -トピック - -@features_1286_th -URLフォーマット�?�例 - -@features_1287_a -エンベッド (ローカル) 接続 - -@features_1288_td -# jdbc:h2:[file:][<path>]<databaseName> - -@features_1289_td -# jdbc:h2:~/test - -@features_1290_td -# jdbc:h2:file:/data/sample - -@features_1291_td -# jdbc:h2:file:C:/data/sample (Windows only) - -@features_1292_a -#In-memory (private) - -@features_1293_td -jdbc:h2:mem: - -@features_1294_a -#In-memory (named) - -@features_1295_td -# jdbc:h2:mem:<databaseName> - -@features_1296_td -# jdbc:h2:mem:test_mem - -@features_1297_a -#Server mode (remote connections) - -@features_1298_a -# using TCP/IP - -@features_1299_td -# jdbc:h2:tcp://<server>[:<port>]/[<path>]<databaseName> - -@features_1300_td -# jdbc:h2:tcp://localhost/~/test - -@features_1301_td -# jdbc:h2:tcp://dbserv:8084/~/sample - -@features_1302_td -# jdbc:h2:tcp://localhost/mem:test - -@features_1303_a -#Server mode (remote connections) - -@features_1304_a -# using TLS - -@features_1305_td -# jdbc:h2:ssl://<server>[:<port>]/<databaseName> - -@features_1306_td -# jdbc:h2:ssl://localhost:8085/~/sample; - -@features_1307_a -#Using encrypted files - -@features_1308_td -# jdbc:h2:<url>;CIPHER=AES - -@features_1309_td -# jdbc:h2:ssl://localhost/~/test;CIPHER=AES - -@features_1310_td -# jdbc:h2:file:~/secure;CIPHER=AES - -@features_1311_a -#File locking methods - -@features_1312_td -# jdbc:h2:<url>;FILE_LOCK={FILE|SOCKET|NO} - -@features_1313_td -# jdbc:h2:file:~/private;CIPHER=AES;FILE_LOCK=SOCKET - -@features_1314_a -#Only open if it already exists - -@features_1315_td -# jdbc:h2:<url>;IFEXISTS=TRUE - -@features_1316_td -# jdbc:h2:file:~/sample;IFEXISTS=TRUE - -@features_1317_a -#Don't close the database when the VM exits - -@features_1318_td -# jdbc:h2:<url>;DB_CLOSE_ON_EXIT=FALSE - -@features_1319_a -#Execute SQL on connection - -@features_1320_td -# jdbc:h2:<url>;INIT=RUNSCRIPT FROM '~/create.sql' - -@features_1321_td -# jdbc:h2:file:~/sample;INIT=RUNSCRIPT FROM '~/create.sql'\;RUNSCRIPT FROM '~/populate.sql' - -@features_1322_a -#User name and/or password - -@features_1323_td -# jdbc:h2:<url>[;USER=<username>][;PASSWORD=<value>] - -@features_1324_td -# jdbc:h2:file:~/sample;USER=sa;PASSWORD=123 - -@features_1325_a -#Debug trace settings - -@features_1326_td -# jdbc:h2:<url>;TRACE_LEVEL_FILE=<level 0..3> - -@features_1327_td -# jdbc:h2:file:~/sample;TRACE_LEVEL_FILE=3 - -@features_1328_a -#Ignore unknown settings - -@features_1329_td -# jdbc:h2:<url>;IGNORE_UNKNOWN_SETTINGS=TRUE - -@features_1330_a -#Custom file access mode - -@features_1331_td -# jdbc:h2:<url>;ACCESS_MODE_DATA=rws - -@features_1332_a -#Database in a zip file - -@features_1333_td -# jdbc:h2:zip:<zipFileName>!/<databaseName> - -@features_1334_td -# jdbc:h2:zip:~/db.zip!/test - -@features_1335_a -#Compatibility mode - -@features_1336_td -# jdbc:h2:<url>;MODE=<databaseType> - -@features_1337_td -# jdbc:h2:~/test;MODE=MYSQL - -@features_1338_a -#Auto-reconnect - -@features_1339_td -# jdbc:h2:<url>;AUTO_RECONNECT=TRUE - -@features_1340_td -# jdbc:h2:tcp://localhost/~/test;AUTO_RECONNECT=TRUE - -@features_1341_a -#Automatic mixed mode - -@features_1342_td -# jdbc:h2:<url>;AUTO_SERVER=TRUE - -@features_1343_td -# jdbc:h2:~/test;AUTO_SERVER=TRUE - -@features_1344_a -#Page size - -@features_1345_td -# jdbc:h2:<url>;PAGE_SIZE=512 - -@features_1346_a -#Changing other settings - -@features_1347_td -# jdbc:h2:<url>;<setting>=<value>[;<setting>=<value>...] - -@features_1348_td -# jdbc:h2:file:~/sample;TRACE_LEVEL_SYSTEM_OUT=3 - -@features_1349_h2 -エンベッド (ローカル) データベース�?�接続 - -@features_1350_p -# The database URL for connecting to a local database is jdbc:h2:[file:][<path>]<databaseName>. The prefix file: is optional. If no or only a relative path is used, then the current working directory is used as a starting point. The case sensitivity of the path and database name depend on the operating system, however it is recommended to use lowercase letters only. The database name must be at least three characters long (a limitation of File.createTempFile). The database name must not contain a semicolon. To point to the user home directory, use ~/, as in: jdbc:h2:~/test. - -@features_1351_h2 -#In-Memory Databases - -@features_1352_p -# For certain use cases (for example: rapid prototyping, testing, high performance operations, read-only databases), it may not be required to persist data, or persist changes to the data. This database supports the in-memory mode, where the data is not persisted. - -@features_1353_p -# In some cases, only one connection to a in-memory database is required. This means the database to be opened is private. In this case, the database URL is jdbc:h2:mem: Opening two connections within the same virtual machine means opening two different (private) databases. - -@features_1354_p -# Sometimes multiple connections to the same in-memory database are required. In this case, the database URL must include a name. Example: jdbc:h2:mem:db1. Accessing the same database using this URL only works within the same virtual machine and class loader environment. - -@features_1355_p -# To access an in-memory database from another process or from another computer, you need to start a TCP server in the same process as the in-memory database was created. The other processes then need to access the database over TCP/IP or TLS, using a database URL such as: jdbc:h2:tcp://localhost/mem:db1. - -@features_1356_p -# By default, closing the last connection to a database closes the database. For an in-memory database, this means the content is lost. To keep the database open, add ;DB_CLOSE_DELAY=-1 to the database URL. To keep the content of an in-memory database as long as the virtual machine is alive, use jdbc:h2:mem:test;DB_CLOSE_DELAY=-1. - -@features_1357_h2 -#Database Files Encryption - -@features_1358_p -# The database files can be encrypted. Three encryption algorithms are supported: - -@features_1359_li -#"AES" - also known as Rijndael, only AES-128 is implemented. - -@features_1360_li -#"XTEA" - the 32 round version. - -@features_1361_li -#"FOG" - pseudo-encryption only useful for hiding data from a text editor. - -@features_1362_p -# To use file encryption, you need to specify the encryption algorithm (the 'cipher') and the file password (in addition to the user password) when connecting to the database. - -@features_1363_h3 -#Creating a New Database with File Encryption - -@features_1364_p -# By default, a new database is automatically created if it does not exist yet. To create an encrypted database, connect to it as it would already exist. - -@features_1365_h3 -#Connecting to an Encrypted Database - -@features_1366_p -# The encryption algorithm is set in the database URL, and the file password is specified in the password field, before the user password. A single space separates the file password and the user password; the file password itself may not contain spaces. File passwords and user passwords are case sensitive. Here is an example to connect to a password-encrypted database: - -@features_1367_h3 -#Encrypting or Decrypting a Database - -@features_1368_p -# To encrypt an existing database, use the ChangeFileEncryption tool. This tool can also decrypt an encrypted database, or change the file encryption key. The tool is available from within the H2 Console in the tools section, or you can run it from the command line. The following command line will encrypt the database test in the user home directory with the file password filepwd and the encryption algorithm AES: - -@features_1369_h2 -データベースファイルロック - -@features_1370_p -# Whenever a database is opened, a lock file is created to signal other processes that the database is in use. If database is closed, or if the process that opened the database terminates, this lock file is deleted. - -@features_1371_p -# The following file locking methods are implemented: - -@features_1372_li -#The default method is FILE and uses a watchdog thread to protect the database file. The watchdog reads the lock file each second. - -@features_1373_li -#The second method is SOCKET and opens a server socket. The socket method does not require reading the lock file every second. The socket method should only be used if the database files are only accessed by one (and always the same) computer. - -@features_1374_li -#The third method is FS. This will use native file locking using FileChannel.lock. - -@features_1375_li -#It is also possible to open the database without file locking; in this case it is up to the application to protect the database files. Failing to do so will result in a corrupted database. Using the method NO forces the database to not create a lock file at all. Please note that this is unsafe as another process is able to open the same database, possibly leading to data corruption. - -@features_1376_p -# To open the database with a different file locking method, use the parameter FILE_LOCK. The following code opens the database with the 'socket' locking method: - -@features_1377_p -# For more information about the algorithms, see Advanced / File Locking Protocols. - -@features_1378_h2 -#Page Size - -@features_1379_p -# By default, when an application calls DriverManager.getConnection(url, ...) and the database specified in the URL does not yet exist, a new (empty) database is created. In some situations, it is better to restrict creating new databases, and only allow to open existing databases. To do this, add ;IFEXISTS=TRUE to the database URL. In this case, if the database does not already exist, an exception is thrown when trying to connect. The connection only succeeds when the database already exists. The complete URL may look like this: - -@features_1380_h2 -#Closing a Database - -@features_1381_h3 -データベース�?��?�延終了 - -@features_1382_p -# Usually, a database is closed when the last connection to it is closed. In some situations this slows down the application, for example when it is not possible to keep at least one connection open. The automatic closing of a database can be delayed or disabled with the SQL statement SET DB_CLOSE_DELAY <seconds>. The parameter <seconds> specifies the number of seconds to keep a database open after the last connection to it was closed. The following statement will keep a database open for 10 seconds after the last connection was closed: - -@features_1383_p -# The value -1 means the database is not closed automatically. The value 0 is the default and means the database is closed when the last connection is closed. This setting is persistent and can be set by an administrator only. It is possible to set the value in the database URL: jdbc:h2:~/test;DB_CLOSE_DELAY=10. - -@features_1384_h3 -#Don't Close a Database when the VM Exits - -@features_1385_p -# By default, a database is closed when the last connection is closed. However, if it is never closed, the database is closed when the virtual machine exits normally, using a shutdown hook. In some situations, the database should not be closed in this case, for example because the database is still used at virtual machine shutdown (to store the shutdown process in the database for example). For those cases, the automatic closing of the database can be disabled in the database URL. The first connection (the one that is opening the database) needs to set the option in the database URL (it is not possible to change the setting afterwards). The database URL to disable database closing on exit is: - -@features_1386_h2 -#Execute SQL on Connection - -@features_1387_p -# Sometimes, particularly for in-memory databases, it is useful to be able to execute DDL or DML commands automatically when a client connects to a database. This functionality is enabled via the INIT property. Note that multiple commands may be passed to INIT, but the semicolon delimiter must be escaped, as in the example below. - -@features_1388_p -# Please note the double backslash is only required in a Java or properties file. In a GUI, or in an XML file, only one backslash is required: - -@features_1389_p -# Backslashes within the init script (for example within a runscript statement, to specify the folder names in Windows) need to be escaped as well (using a second backslash). It might be simpler to avoid backslashes in folder names for this reason; use forward slashes instead. - -@features_1390_h2 -未知�?�設定を無視 - -@features_1391_p -# Some applications (for example OpenOffice.org Base) pass some additional parameters when connecting to the database. Why those parameters are passed is unknown. The parameters PREFERDOSLIKELINEENDS and IGNOREDRIVERPRIVILEGES are such examples; they are simply ignored to improve the compatibility with OpenOffice.org. If an application passes other parameters when connecting to the database, usually the database throws an exception saying the parameter is not supported. It is possible to ignored such parameters by adding ;IGNORE_UNKNOWN_SETTINGS=TRUE to the database URL. - -@features_1392_h2 -接続�?�開始�?�れ�?�時�?�他�?�設定を変更�?�る - -@features_1393_p -# In addition to the settings already described, other database settings can be passed in the database URL. Adding ;setting=value at the end of a database URL is the same as executing the statement SET setting value just after connecting. For a list of supported settings, see SQL Grammar or the DbSettings javadoc. - -@features_1394_h2 -カスタムファイル アクセスモード - -@features_1395_p -# Usually, the database opens the database file with the access mode rw, meaning read-write (except for read only databases, where the mode r is used). To open a database in read-only mode if the database file is not read-only, use ACCESS_MODE_DATA=r. Also supported are rws and rwd. This setting must be specified in the database URL: - -@features_1396_p -# For more information see Durability Problems. On many operating systems the access mode rws does not guarantee that the data is written to the disk. - -@features_1397_h2 -複数�?�接続 - -@features_1398_h3 -�?�時�?�複数�?�データベースを開�?? - -@features_1399_p -# An application can open multiple databases at the same time, including multiple connections to the same database. The number of open database is only limited by the memory available. - -@features_1400_h3 ->�?��?�データベース�?��?�複数�?�接続: クライアント/サー�?ー - -@features_1401_p -# If you want to access the same database at the same time from different processes or computers, you need to use the client / server mode. In this case, one process acts as the server, and the other processes (that could reside on other computers as well) connect to the server via TCP/IP (or TLS over TCP/IP for improved security). - -@features_1402_h3 -マル�?スレッドサ�?ート - -@features_1403_p -# This database is multithreading-safe. If an application is multi-threaded, it does not need to worry about synchronizing access to the database. An application should normally use one connection per thread. This database synchronizes access to the same connection, but other databases may not do this. To get higher concurrency, you need to use multiple connections. - -@features_1404_p -# By default, requests to the same database are synchronized. That means an application can use multiple threads that access the same database at the same time, however if one thread executes a long running query, the other threads need to wait. To enable concurrent database usage, see the setting MULTI_THREADED. - -@features_1405_h3 -ロック�?ロックタイムアウト�?デッドロック - -@features_1406_p -# Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. In this case, table level locking is not used. If multi-version concurrency is not used, the database uses table level locks to give each connection a consistent state of the data. There are two kinds of locks: read locks (shared locks) and write locks (exclusive locks). All locks are released when the transaction commits or rolls back. When using the default transaction isolation level 'read committed', read locks are already released after each statement. - -@features_1407_p -# If a connection wants to reads from a table, and there is no write lock on the table, then a read lock is added to the table. If there is a write lock, then this connection waits for the other connection to release the lock. If a connection cannot get a lock for a specified time, then a lock timeout exception is thrown. - -@features_1408_p -# Usually, SELECT statements will generate read locks. This includes subqueries. Statements that modify data use write locks. It is also possible to lock a table exclusively without modifying data, using the statement SELECT ... FOR UPDATE. The statements COMMIT and ROLLBACK releases all open locks. The commands SAVEPOINT and ROLLBACK TO SAVEPOINT don't affect locks. The locks are also released when the autocommit mode changes, and for connections with autocommit set to true (this is the default), locks are released after each statement. The following statements generate locks: - -@features_1409_th -ロック�?�種類 - -@features_1410_th -SQLステートメント - -@features_1411_td -Read - -@features_1412_td -#SELECT * FROM TEST; - -@features_1413_td -# CALL SELECT MAX(ID) FROM TEST; - -@features_1414_td -# SCRIPT; - -@features_1415_td -Write - -@features_1416_td -#SELECT * FROM TEST WHERE 1=0 FOR UPDATE; - -@features_1417_td -Write - -@features_1418_td -#INSERT INTO TEST VALUES(1, 'Hello'); - -@features_1419_td -# INSERT INTO TEST SELECT * FROM TEST; - -@features_1420_td -# UPDATE TEST SET NAME='Hi'; - -@features_1421_td -# DELETE FROM TEST; - -@features_1422_td -Write - -@features_1423_td -#ALTER TABLE TEST ...; - -@features_1424_td -# CREATE INDEX ... ON TEST ...; - -@features_1425_td -# DROP INDEX ...; - -@features_1426_p -# The number of seconds until a lock timeout exception is thrown can be set separately for each connection using the SQL command SET LOCK_TIMEOUT <milliseconds>. The initial lock timeout (that is the timeout used for new connections) can be set using the SQL command SET DEFAULT_LOCK_TIMEOUT <milliseconds>. The default lock timeout is persistent. - -@features_1427_h3 -#Avoiding Deadlocks - -@features_1428_p -# To avoid deadlocks, ensure that all transactions lock the tables in the same order (for example in alphabetical order), and avoid upgrading read locks to write locks. Both can be achieved using explicitly locking tables using SELECT ... FOR UPDATE. - -@features_1429_h2 -データベースファイルレイアウト - -@features_1430_p -# The following files are created for persistent databases: - -@features_1431_th -ファイル�?? - -@features_1432_th -説明 - -@features_1433_th -ファイル数 - -@features_1434_td -# test.h2.db - -@features_1435_td -# Database file. - -@features_1436_td -# Contains the transaction log, indexes, and data for all tables. - -@features_1437_td -# Format: <database>.h2.db - -@features_1438_td -# 1 per database - -@features_1439_td -# test.lock.db - -@features_1440_td -# Database lock file. - -@features_1441_td -# Automatically (re-)created while the database is in use. - -@features_1442_td -# Format: <database>.lock.db - -@features_1443_td -# 1 per database (only if in use) - -@features_1444_td -# test.trace.db - -@features_1445_td -# Trace file (if the trace option is enabled). - -@features_1446_td -# Contains trace information. - -@features_1447_td -# Format: <database>.trace.db - -@features_1448_td -# Renamed to <database>.trace.db.old is too big. - -@features_1449_td -# 0 or 1 per database - -@features_1450_td -# test.lobs.db/* - -@features_1451_td -# Directory containing one file for each - -@features_1452_td -# BLOB or CLOB value larger than a certain size. - -@features_1453_td -# Format: <id>.t<tableId>.lob.db - -@features_1454_td -# 1 per large object - -@features_1455_td -# test.123.temp.db - -@features_1456_td -# Temporary file. - -@features_1457_td -# Contains a temporary blob or a large result set. - -@features_1458_td -# Format: <database>.<id>.temp.db - -@features_1459_td -# 1 per object - -@features_1460_h3 -データベースファイル�?�移動�?�改�?? - -@features_1461_p -# Database name and location are not stored inside the database files. - -@features_1462_p -# While a database is closed, the files can be moved to another directory, and they can be renamed as well (as long as all files of the same database start with the same name and the respective extensions are unchanged). - -@features_1463_p -# As there is no platform specific data in the files, they can be moved to other operating systems without problems. - -@features_1464_h3 -�?ックアップ - -@features_1465_p -# When the database is closed, it is possible to backup the database files. - -@features_1466_p -# To backup data while the database is running, the SQL commands SCRIPT and BACKUP can be used. - -@features_1467_h2 -ログ�?�リカ�?リー - -@features_1468_p -# Whenever data is modified in the database and those changes are committed, the changes are written to the transaction log (except for in-memory objects). The changes to the main data area itself are usually written later on, to optimize disk access. If there is a power failure, the main data area is not up-to-date, but because the changes are in the transaction log, the next time the database is opened, the changes are re-applied automatically. - -@features_1469_h2 -互�?�性 - -@features_1470_p -# All database engines behave a little bit different. Where possible, H2 supports the ANSI SQL standard, and tries to be compatible to other databases. There are still a few differences however: - -@features_1471_p -# In MySQL text columns are case insensitive by default, while in H2 they are case sensitive. However H2 supports case insensitive columns as well. To create the tables with case insensitive texts, append IGNORECASE=TRUE to the database URL (example: jdbc:h2:~/test;IGNORECASE=TRUE). - -@features_1472_h3 -互�?�モード - -@features_1473_p -# For certain features, this database can emulate the behavior of specific databases. However, only a small subset of the differences between databases are implemented in this way. Here is the list of currently supported modes and the differences to the regular mode: - -@features_1474_h3 -#DB2 Compatibility Mode - -@features_1475_p -# To use the IBM DB2 mode, use the database URL jdbc:h2:~/test;MODE=DB2 or the SQL statement SET MODE DB2. - -@features_1476_li -#For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1477_li -#Support for the syntax [OFFSET .. ROW] [FETCH ... ONLY] as an alternative for LIMIT .. OFFSET. - -@features_1478_li -#Concatenating NULL with another value results in the other value. - -@features_1479_li -#Support the pseudo-table SYSIBM.SYSDUMMY1. - -@features_1480_h3 -#Derby Compatibility Mode - -@features_1481_p -# To use the Apache Derby mode, use the database URL jdbc:h2:~/test;MODE=Derby or the SQL statement SET MODE Derby. - -@features_1482_li -#For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1483_li -#For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. - -@features_1484_li -#Concatenating NULL with another value results in the other value. - -@features_1485_li -#Support the pseudo-table SYSIBM.SYSDUMMY1. - -@features_1486_h3 -#HSQLDB Compatibility Mode - -@features_1487_p -# To use the HSQLDB mode, use the database URL jdbc:h2:~/test;MODE=HSQLDB or the SQL statement SET MODE HSQLDB. - -@features_1488_li -#For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1489_li -#When converting the scale of decimal data, the number is only converted if the new scale is smaller than the current scale. Usually, the scale is converted and 0s are added if required. - -@features_1490_li -#For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. - -@features_1491_li -#Text can be concatenated using '+'. - -@features_1492_h3 -#MS SQL Server Compatibility Mode - -@features_1493_p -# To use the MS SQL Server mode, use the database URL jdbc:h2:~/test;MODE=MSSQLServer or the SQL statement SET MODE MSSQLServer. - -@features_1494_li -#For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1495_li -#Identifiers may be quoted using square brackets as in [Test]. - -@features_1496_li -#For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. - -@features_1497_li -#Concatenating NULL with another value results in the other value. - -@features_1498_li -#Text can be concatenated using '+'. - -@features_1499_h3 -#MySQL Compatibility Mode - -@features_1500_p -# To use the MySQL mode, use the database URL jdbc:h2:~/test;MODE=MySQL or the SQL statement SET MODE MySQL. - -@features_1501_li -#When inserting data, if a column is defined to be NOT NULL and NULL is inserted, then a 0 (or empty string, or the current timestamp for timestamp columns) value is used. Usually, this operation is not allowed and an exception is thrown. - -@features_1502_li -#Creating indexes in the CREATE TABLE statement is allowed using INDEX(..) or KEY(..). Example: create table test(id int primary key, name varchar(255), key idx_name(name)); - -@features_1503_li -#Meta data calls return identifiers in lower case. - -@features_1504_li -#When converting a floating point number to an integer, the fractional digits are not truncated, but the value is rounded. - -@features_1505_li -#Concatenating NULL with another value results in the other value. - -@features_1506_p -# Text comparison in MySQL is case insensitive by default, while in H2 it is case sensitive (as in most other databases). H2 does support case insensitive text comparison, but it needs to be set separately, using SET IGNORECASE TRUE. This affects comparison using =, LIKE, REGEXP. - -@features_1507_h3 -#Oracle Compatibility Mode - -@features_1508_p -# To use the Oracle mode, use the database URL jdbc:h2:~/test;MODE=Oracle or the SQL statement SET MODE Oracle. - -@features_1509_li -#For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1510_li -#When using unique indexes, multiple rows with NULL in all columns are allowed, however it is not allowed to have multiple rows with the same values otherwise. - -@features_1511_li -#Concatenating NULL with another value results in the other value. - -@features_1512_li -#Empty strings are treated like NULL values. - -@features_1513_h3 -#PostgreSQL Compatibility Mode - -@features_1514_p -# To use the PostgreSQL mode, use the database URL jdbc:h2:~/test;MODE=PostgreSQL or the SQL statement SET MODE PostgreSQL. - -@features_1515_li -#For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1516_li -#When converting a floating point number to an integer, the fractional digits are not be truncated, but the value is rounded. - -@features_1517_li -#The system columns CTID and OID are supported. - -@features_1518_li -#LOG(x) is base 10 in this mode. - -@features_1519_h2 -#Auto-Reconnect - -@features_1520_p -# The auto-reconnect feature causes the JDBC driver to reconnect to the database if the connection is lost. The automatic re-connect only occurs when auto-commit is enabled; if auto-commit is disabled, an exception is thrown. To enable this mode, append ;AUTO_RECONNECT=TRUE to the database URL. - -@features_1521_p -# Re-connecting will open a new session. After an automatic re-connect, variables and local temporary tables definitions (excluding data) are re-created. The contents of the system table INFORMATION_SCHEMA.SESSION_STATE contains all client side state that is re-created. - -@features_1522_p -# If another connection uses the database in exclusive mode (enabled using SET EXCLUSIVE 1 or SET EXCLUSIVE 2), then this connection will try to re-connect until the exclusive mode ends. - -@features_1523_h2 -#Automatic Mixed Mode - -@features_1524_p -# Multiple processes can access the same database without having to start the server manually. To do that, append ;AUTO_SERVER=TRUE to the database URL. You can use the same database URL independent of whether the database is already open or not. This feature doesn't work with in-memory databases. Example database URL: - -@features_1525_p -# Use the same URL for all connections to this database. Internally, when using this mode, the first connection to the database is made in embedded mode, and additionally a server is started internally (as a daemon thread). If the database is already open in another process, the server mode is used automatically. The IP address and port of the server are stored in the file .lock.db, that's why in-memory databases can't be supported. - -@features_1526_p -# The application that opens the first connection to the database uses the embedded mode, which is faster than the server mode. Therefore the main application should open the database first if possible. The first connection automatically starts a server on a random port. This server allows remote connections, however only to this database (to ensure that, the client reads .lock.db file and sends the the random key that is stored there to the server). When the first connection is closed, the server stops. If other (remote) connections are still open, one of them will then start a server (auto-reconnect is enabled automatically). - -@features_1527_p -# All processes need to have access to the database files. If the first connection is closed (the connection that started the server), open transactions of other connections will be rolled back (this may not be a problem if you don't disable autocommit). Explicit client/server connections (using jdbc:h2:tcp:// or ssl://) are not supported. This mode is not supported for in-memory databases. - -@features_1528_p -# Here is an example how to use this mode. Application 1 and 2 are not necessarily started on the same computer, but they need to have access to the database files. Application 1 and 2 are typically two different processes (however they could run within the same process). - -@features_1529_p -# When using this feature, by default the server uses any free TCP port. The port can be set manually using AUTO_SERVER_PORT=9090. - -@features_1530_h2 -#Page Size - -@features_1531_p -# The page size for new databases is 2 KB (2048), unless the page size is set explicitly in the database URL using PAGE_SIZE= when the database is created. The page size of existing databases can not be changed, so this property needs to be set when the database is created. - -@features_1532_h2 -トレースオプションを使用�?�る - -@features_1533_p -# To find problems in an application, it is sometimes good to see what database operations where executed. This database offers the following trace features: - -@features_1534_li -#Trace to System.out and/or to a file - -@features_1535_li -#Support for trace levels OFF, ERROR, INFO, DEBUG - -@features_1536_li -#The maximum size of the trace file can be set - -@features_1537_li -#It is possible to generate Java source code from the trace file - -@features_1538_li -#Trace can be enabled at runtime by manually creating a file - -@features_1539_h3 -トレースオプション - -@features_1540_p -# The simplest way to enable the trace option is setting it in the database URL. There are two settings, one for System.out (TRACE_LEVEL_SYSTEM_OUT) tracing, and one for file tracing (TRACE_LEVEL_FILE). The trace levels are 0 for OFF, 1 for ERROR (the default), 2 for INFO, and 3 for DEBUG. A database URL with both levels set to DEBUG is: - -@features_1541_p -# The trace level can be changed at runtime by executing the SQL command SET TRACE_LEVEL_SYSTEM_OUT level (for System.out tracing) or SET TRACE_LEVEL_FILE level (for file tracing). Example: - -@features_1542_h3 -トレースファイル�?�最大サイズを設定 - -@features_1543_p -# When using a high trace level, the trace file can get very big quickly. The default size limit is 16 MB, if the trace file exceeds this limit, it is renamed to .old and a new file is created. If another such file exists, it is deleted. To limit the size to a certain number of megabytes, use SET TRACE_MAX_FILE_SIZE mb. Example: - -@features_1544_h3 -Javaコード生�? - -@features_1545_p -# When setting the trace level to INFO or DEBUG, Java source code is generated as well. This simplifies reproducing problems. The trace file looks like this: - -@features_1546_p -# To filter the Java source code, use the ConvertTraceFile tool as follows: - -@features_1547_p -# The generated file Test.java will contain the Java source code. The generated source code may be too large to compile (the size of a Java method is limited). If this is the case, the source code needs to be split in multiple methods. The password is not listed in the trace file and therefore not included in the source code. - -@features_1548_h2 -#Using Other Logging APIs - -@features_1549_p -# By default, this database uses its own native 'trace' facility. This facility is called 'trace' and not 'log' within this database to avoid confusion with the transaction log. Trace messages can be written to both file and System.out. In most cases, this is sufficient, however sometimes it is better to use the same facility as the application, for example Log4j. To do that, this database support SLF4J. - -@features_1550_a -#SLF4J - -@features_1551_p -# is a simple facade for various logging APIs and allows to plug in the desired implementation at deployment time. SLF4J supports implementations such as Logback, Log4j, Jakarta Commons Logging (JCL), Java logging, x4juli, and Simple Log. - -@features_1552_p -# To enable SLF4J, set the file trace level to 4 in the database URL: - -@features_1553_p -# Changing the log mechanism is not possible after the database is open, that means executing the SQL statement SET TRACE_LEVEL_FILE 4 when the database is already open will not have the desired effect. To use SLF4J, all required jar files need to be in the classpath. The logger name is h2database. If it does not work, check the file <database>.trace.db for error messages. - -@features_1554_h2 -読�?��?�り専用データベース - -@features_1555_p -# If the database files are read-only, then the database is read-only as well. It is not possible to create new tables, add or modify data in this database. Only SELECT and CALL statements are allowed. To create a read-only database, close the database. Then, make the database file read-only. When you open the database now, it is read-only. There are two ways an application can find out whether database is read-only: by calling Connection.isReadOnly() or by executing the SQL statement CALL READONLY(). - -@features_1556_p -# Using the Custom Access Mode r the database can also be opened in read-only mode, even if the database file is not read only. - -@features_1557_h2 -#Read Only Databases in Zip or Jar File - -@features_1558_p -# To create a read-only database in a zip file, first create a regular persistent database, and then create a backup. The database must not have pending changes, that means you need to close all connections to the database first. To speed up opening the read-only database and running queries, the database should be closed using SHUTDOWN DEFRAG. If you are using a database named test, an easy way to create a zip file is using the Backup tool. You can start the tool from the command line, or from within the H2 Console (Tools - Backup). Please note that the database must be closed when the backup is created. Therefore, the SQL statement BACKUP TO can not be used. - -@features_1559_p -# When the zip file is created, you can open the database in the zip file using the following database URL: - -@features_1560_p -# Databases in zip files are read-only. The performance for some queries will be slower than when using a regular database, because random access in zip files is not supported (only streaming). How much this affects the performance depends on the queries and the data. The database is not read in memory; therefore large databases are supported as well. The same indexes are used as when using a regular database. - -@features_1561_p -# If the database is larger than a few megabytes, performance is much better if the database file is split into multiple smaller files, because random access in compressed files is not possible. See also the sample application ReadOnlyDatabaseInZip. - -@features_1562_h3 -破�??�?��?�データベースを開�?? - -@features_1563_p -# If a database cannot be opened because the boot info (the SQL script that is run at startup) is corrupted, then the database can be opened by specifying a database event listener. The exceptions are logged, but opening the database will continue. - -@features_1564_h2 -computed column / ベースインデックス�?�機能 - -@features_1565_p -# A computed column is a column whose value is calculated before storing. The formula is evaluated when the row is inserted, and re-evaluated every time the row is updated. One use case is to automatically update the last-modification time: - -@features_1566_p -# Function indexes are not directly supported by this database, but they can be emulated by using computed columns. For example, if an index on the upper-case version of a column is required, create a computed column with the upper-case version of the original column, and create an index for this column: - -@features_1567_p -# When inserting data, it is not required (and not allowed) to specify a value for the upper-case version of the column, because the value is generated. But you can use the column when querying the table: - -@features_1568_h2 -多次元インデックス - -@features_1569_p -# A tool is provided to execute efficient multi-dimension (spatial) range queries. This database does not support a specialized spatial index (R-Tree or similar). Instead, the B-Tree index is used. For each record, the multi-dimensional key is converted (mapped) to a single dimensional (scalar) value. This value specifies the location on a space-filling curve. - -@features_1570_p -# Currently, Z-order (also called N-order or Morton-order) is used; Hilbert curve could also be used, but the implementation is more complex. The algorithm to convert the multi-dimensional value is called bit-interleaving. The scalar value is indexed using a B-Tree index (usually using a computed column). - -@features_1571_p -# The method can result in a drastic performance improvement over just using an index on the first column. Depending on the data and number of dimensions, the improvement is usually higher than factor 5. The tool generates a SQL query from a specified multi-dimensional range. The method used is not database dependent, and the tool can easily be ported to other databases. For an example how to use the tool, please have a look at the sample code provided in TestMultiDimension.java. - -@features_1572_h2 -ユーザー定義�?�関数�?�ストアドプロシージャ - -@features_1573_p -# In addition to the built-in functions, this database supports user-defined Java functions. In this database, Java functions can be used as stored procedures as well. A function must be declared (registered) before it can be used. A function can be defined using source code, or as a reference to a compiled class that is available in the classpath. By default, the function aliases are stored in the current schema. - -@features_1574_h3 -#Referencing a Compiled Method - -@features_1575_p -# When referencing a method, the class must already be compiled and included in the classpath where the database is running. Only static Java methods are supported; both the class and the method must be public. Example Java class: - -@features_1576_p -# The Java function must be registered in the database by calling CREATE ALIAS ... FOR: - -@features_1577_p -# For a complete sample application, see src/test/org/h2/samples/Function.java. - -@features_1578_h3 -#Declaring Functions as Source Code - -@features_1579_p -# When defining a function alias with source code, the database tries to compile the source code using the Sun Java compiler (the class com.sun.tools.javac.Main) if the tools.jar is in the classpath. If not, javac is run as a separate process. Only the source code is stored in the database; the class is compiled each time the database is re-opened. Source code is usually passed as dollar quoted text to avoid escaping problems, however single quotes can be used as well. Example: - -@features_1580_p -# By default, the three packages java.util, java.math, java.sql are imported. The method name (nextPrime in the example above) is ignored. Method overloading is not supported when declaring functions as source code, that means only one method may be declared for an alias. If different import statements are required, they must be declared at the beginning and separated with the tag @CODE: - -@features_1581_p -# The following template is used to create a complete Java class: - -@features_1582_h3 -#Method Overloading - -@features_1583_p -# Multiple methods may be bound to a SQL function if the class is already compiled and included in the classpath. Each Java method must have a different number of arguments. Method overloading is not supported when declaring functions as source code. - -@features_1584_h3 -データタイプマッピング関数 - -@features_1585_p -# Functions that accept non-nullable parameters such as int will not be called if one of those parameters is NULL. Instead, the result of the function is NULL. If the function should be called if a parameter is NULL, you need to use java.lang.Integer instead. - -@features_1586_p -# SQL types are mapped to Java classes and vice-versa as in the JDBC API. For details, see Data Types. There are a few special cases: java.lang.Object is mapped to OTHER (a serialized object). Therefore, java.lang.Object can not be used to match all SQL types (matching all SQL types is not supported). The second special case is Object[]: arrays of any class are mapped to ARRAY. Objects of type org.h2.value.Value (the internal value class) are passed through without conversion. - -@features_1587_h3 -#Functions That Require a Connection - -@features_1588_p -# If the first parameter of a Java function is a java.sql.Connection, then the connection to database is provided. This connection does not need to be closed before returning. When calling the method from within the SQL statement, this connection parameter does not need to be (can not be) specified. - -@features_1589_h3 -#Functions Throwing an Exception - -@features_1590_p -# If a function throws an exception, then the current statement is rolled back and the exception is thrown to the application. SQLException are directly re-thrown to the calling application; all other exceptions are first converted to a SQLException. - -@features_1591_h3 -#Functions Returning a Result Set - -@features_1592_p -# Functions may returns a result set. Such a function can be called with the CALL statement: - -@features_1593_h3 -SimpleResultSetを使用�?�る - -@features_1594_p -# A function can create a result set using the SimpleResultSet tool: - -@features_1595_h3 -関数をテーブル�?��?��?�使用�?�る - -@features_1596_p -# A function that returns a result set can be used like a table. However, in this case the function is called at least twice: first while parsing the statement to collect the column names (with parameters set to null where not known at compile time). And then, while executing the statement to get the data (maybe multiple times if this is a join). If the function is called just to get the column list, the URL of the connection passed to the function is jdbc:columnlist:connection. Otherwise, the URL of the connection is jdbc:default:connection. - -@features_1597_h2 -#Pluggable or User-Defined Tables - -@features_1598_p -# For situations where you need to expose other data-sources to the SQL engine as a table, there are "pluggable tables". For some examples, have a look at the code in org.h2.test.db.TestTableEngines. - -@features_1599_p -# In order to create your own TableEngine, you need to implement the org.h2.api.TableEngine interface e.g. something like this: - -@features_1600_p -# and then create the table from SQL like this: - -@features_1601_p -# It is also possible to pass in parameters to the table engine, like so: - -@features_1602_p -# In which case the parameters are passed down in the tableEngineParams field of the CreateTableData object. - -@features_1603_p -# It is also possible to specify default table engine params on schema creation: - -@features_1604_p -# Params from the schema are used when CREATE TABLE issued on this schema does not have its own engine params specified. - -@features_1605_h2 -トリガー - -@features_1606_p -# This database supports Java triggers that are called before or after a row is updated, inserted or deleted. Triggers can be used for complex consistency checks, or to update related data in the database. It is also possible to use triggers to simulate materialized views. For a complete sample application, see src/test/org/h2/samples/TriggerSample.java. A Java trigger must implement the interface org.h2.api.Trigger. The trigger class must be available in the classpath of the database engine (when using the server mode, it must be in the classpath of the server). - -@features_1607_p -# The connection can be used to query or update data in other tables. The trigger then needs to be defined in the database: - -@features_1608_p -# The trigger can be used to veto a change by throwing a SQLException. - -@features_1609_p -# As an alternative to implementing the Trigger interface, an application can extend the abstract class org.h2.tools.TriggerAdapter. This will allows to use the ResultSet interface within trigger implementations. In this case, only the fire method needs to be implemented: - -@features_1610_h2 -データベースをコンパクト�?��?�る - -@features_1611_p -# Empty space in the database file re-used automatically. When closing the database, the database is automatically compacted for up to 200 milliseconds by default. To compact more, use the SQL statement SHUTDOWN COMPACT. However re-creating the database may further reduce the database size because this will re-build the indexes. Here is a sample function to do this: - -@features_1612_p -# See also the sample application org.h2.samples.Compact. The commands SCRIPT / RUNSCRIPT can be used as well to create a backup of a database and re-build the database from the script. - -@features_1613_h2 -キャッシュ�?�設定 - -@features_1614_p -# The database keeps most frequently used data in the main memory. The amount of memory used for caching can be changed using the setting CACHE_SIZE. This setting can be set in the database connection URL (jdbc:h2:~/test;CACHE_SIZE=131072), or it can be changed at runtime using SET CACHE_SIZE size. The size of the cache, as represented by CACHE_SIZE is measured in KB, with each KB being 1024 bytes. This setting has no effect for in-memory databases. For persistent databases, the setting is stored in the database and re-used when the database is opened the next time. However, when opening an existing database, the cache size is set to at most half the amount of memory available for the virtual machine (Runtime.getRuntime().maxMemory()), even if the cache size setting stored in the database is larger; however the setting stored in the database is kept. Setting the cache size in the database URL or explicitly using SET CACHE_SIZE overrides this value (even if larger than the physical memory). To get the current used maximum cache size, use the query SELECT * FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME = 'info.CACHE_MAX_SIZE' - -@features_1615_p -# An experimental scan-resistant cache algorithm "Two Queue" (2Q) is available. To enable it, append ;CACHE_TYPE=TQ to the database URL. The cache might not actually improve performance. If you plan to use it, please run your own test cases first. - -@features_1616_p -# Also included is an experimental second level soft reference cache. Rows in this cache are only garbage collected on low memory. By default the second level cache is disabled. To enable it, use the prefix SOFT_. Example: jdbc:h2:~/test;CACHE_TYPE=SOFT_LRU. The cache might not actually improve performance. If you plan to use it, please run your own test cases first. - -@features_1617_p -# To get information about page reads and writes, and the current caching algorithm in use, call SELECT * FROM INFORMATION_SCHEMA.SETTINGS. The number of pages read / written is listed. - -@fragments_1000_div -#    - -@fragments_1001_label -#Search: - -@fragments_1002_label -#Highlight keyword(s) - -@fragments_1003_a -ホーム - -@fragments_1004_a -ダウンロード - -@fragments_1005_a -#Cheat Sheet - -@fragments_1006_b -ドキュメント - -@fragments_1007_a -クイックスタート - -@fragments_1008_a -インストール - -@fragments_1009_a -�?ュートリアル - -@fragments_1010_a -特徴 - -@fragments_1011_a -パフォーマンス - -@fragments_1012_a -#Advanced - -@fragments_1013_b -#Reference - -@fragments_1014_a -#SQL Grammar - -@fragments_1015_a -#Functions - -@fragments_1016_a -データ型 - -@fragments_1017_a -#Javadoc - -@fragments_1018_a -#PDF (1 MB) - -@fragments_1019_b -サ�?ート - -@fragments_1020_a -#FAQ - -@fragments_1021_a -#Error Analyzer - -@fragments_1022_a -#Google Group (English) - -@fragments_1023_a -#Google Group (Japanese) - -@fragments_1024_a -#Google Group (Chinese) - -@fragments_1025_b -#Appendix - -@fragments_1026_a -#History & Roadmap - -@fragments_1027_a -#History & Roadmap - -@fragments_1028_a -ビルド - -@fragments_1029_a -#Links - -@fragments_1030_a -#JaQu - -@fragments_1031_a -#MVStore - -@fragments_1032_a -#Architecture - -@fragments_1033_td -  - -@frame_1000_h1 -H2 データベース エンジン - -@frame_1001_p -# Welcome to H2, the free SQL database. The main feature of H2 are: - -@frame_1002_li -#It is free to use for everybody, source code is included - -@frame_1003_li -#Written in Java, but also available as native executable - -@frame_1004_li -#JDBC and (partial) ODBC API - -@frame_1005_li -#Embedded and client/server modes - -@frame_1006_li -#Clustering is supported - -@frame_1007_li -#A web client is included - -@frame_1008_h2 -#No Javascript - -@frame_1009_p -# If you are not automatically redirected to the main page, then Javascript is currently disabled or your browser does not support Javascript. Some features (for example the integrated search) require Javascript. - -@frame_1010_p -# Please enable Javascript, or go ahead without it: H2 Database Engine - -@history_1000_h1 -歴�?��?�ロードマップ - -@history_1001_a -# Change Log - -@history_1002_a -# Roadmap - -@history_1003_a -# History of this Database Engine - -@history_1004_a -# Why Java - -@history_1005_a -# Supporters - -@history_1006_h2 -変更履歴 - -@history_1007_p -# The up-to-date change log is available at http://www.h2database.com/html/changelog.html - -@history_1008_h2 -ロードマップ - -@history_1009_p -# The current roadmap is available at http://www.h2database.com/html/roadmap.html - -@history_1010_h2 -�?��?�データベースエンジン�?�歴�?� - -@history_1011_p -# The development of H2 was started in May 2004, but it was first published on December 14th 2005. The original author of H2, Thomas Mueller, is also the original developer of Hypersonic SQL. In 2001, he joined PointBase Inc. where he wrote PointBase Micro, a commercial Java SQL database. At that point, he had to discontinue Hypersonic SQL. The HSQLDB Group was formed to continued to work on the Hypersonic SQL codebase. The name H2 stands for Hypersonic 2, however H2 does not share code with Hypersonic SQL or HSQLDB. H2 is built from scratch. - -@history_1012_h2 -�?��?�Java�?��?��?� - -@history_1013_p -# The main reasons to use a Java database are: - -@history_1014_li -#Very simple to integrate in Java applications - -@history_1015_li -#Support for many different platforms - -@history_1016_li -#More secure than native applications (no buffer overflows) - -@history_1017_li -#User defined functions (or triggers) run very fast - -@history_1018_li -#Unicode support - -@history_1019_p -# Some think Java is too slow for low level operations, but this is no longer true. Garbage collection for example is now faster than manual memory management. - -@history_1020_p -# Developing Java code is faster than developing C or C++ code. When using Java, most time can be spent on improving the algorithms instead of porting the code to different platforms or doing memory management. Features such as Unicode and network libraries are already built-in. In Java, writing secure code is easier because buffer overflows can not occur. Features such as reflection can be used for randomized testing. - -@history_1021_p -# Java is future proof: a lot of companies support Java. Java is now open source. - -@history_1022_p -# To increase the portability and ease of use, this software depends on very few libraries. Features that are not available in open source Java implementations (such as Swing) are not used, or only used for optional features. - -@history_1023_h2 -支�?�者 - -@history_1024_p -# Many thanks for those who reported bugs, gave valuable feedback, spread the word, and translated this project. - -@history_1025_p -# Also many thanks to the donors. To become a donor, use PayPal (at the very bottom of the main web page). Donators are: - -@history_1026_li -#Martin Wildam, Austria - -@history_1027_a -#tagtraum industries incorporated, USA - -@history_1028_a -#TimeWriter, Netherlands - -@history_1029_a -#Cognitect, USA - -@history_1030_a -#Code 42 Software, Inc., Minneapolis - -@history_1031_a -#Code Lutin, France - -@history_1032_a -#NetSuxxess GmbH, Germany - -@history_1033_a -#Poker Copilot, Steve McLeod, Germany - -@history_1034_a -#SkyCash, Poland - -@history_1035_a -#Lumber-mill, Inc., Japan - -@history_1036_a -#StockMarketEye, USA - -@history_1037_a -#Eckenfelder GmbH & Co.KG, Germany - -@history_1038_li -#Jun Iyama, Japan - -@history_1039_li -#Steven Branda, USA - -@history_1040_li -#Anthony Goubard, Netherlands - -@history_1041_li -#Richard Hickey, USA - -@history_1042_li -#Alessio Jacopo D'Adamo, Italy - -@history_1043_li -#Ashwin Jayaprakash, USA - -@history_1044_li -#Donald Bleyl, USA - -@history_1045_li -#Frank Berger, Germany - -@history_1046_li -#Florent Ramiere, France - -@history_1047_li -#Antonio Casqueiro, Portugal - -@history_1048_li -#Oliver Computing LLC, USA - -@history_1049_li -#Harpal Grover Consulting Inc., USA - -@history_1050_li -#Elisabetta Berlini, Italy - -@history_1051_li -#William Gilbert, USA - -@history_1052_li -#Antonio Dieguez Rojas, Chile - -@history_1053_a -#Ontology Works, USA - -@history_1054_li -#Pete Haidinyak, USA - -@history_1055_li -#William Osmond, USA - -@history_1056_li -#Joachim Ansorg, Germany - -@history_1057_li -#Oliver Soerensen, Germany - -@history_1058_li -#Christos Vasilakis, Greece - -@history_1059_li -#Fyodor Kupolov, Denmark - -@history_1060_li -#Jakob Jenkov, Denmark - -@history_1061_li -#Stéphane Chartrand, Switzerland - -@history_1062_li -#Glenn Kidd, USA - -@history_1063_li -#Gustav Trede, Sweden - -@history_1064_li -#Joonas Pulakka, Finland - -@history_1065_li -#Bjorn Darri Sigurdsson, Iceland - -@history_1066_li -#Gray Watson, USA - -@history_1067_li -#Erik Dick, Germany - -@history_1068_li -#Pengxiang Shao, China - -@history_1069_li -#Bilingual Marketing Group, USA - -@history_1070_li -#Philippe Marschall, Switzerland - -@history_1071_li -#Knut Staring, Norway - -@history_1072_li -#Theis Borg, Denmark - -@history_1073_li -#Mark De Mendonca Duske, USA - -@history_1074_li -#Joel A. Garringer, USA - -@history_1075_li -#Olivier Chafik, France - -@history_1076_li -#Rene Schwietzke, Germany - -@history_1077_li -#Jalpesh Patadia, USA - -@history_1078_li -#Takanori Kawashima, Japan - -@history_1079_li -#Terrence JC Huang, China - -@history_1080_a -#JiaDong Huang, Australia - -@history_1081_li -#Laurent van Roy, Belgium - -@history_1082_li -#Qian Chen, China - -@history_1083_li -#Clinton Hyde, USA - -@history_1084_li -#Kritchai Phromros, Thailand - -@history_1085_li -#Alan Thompson, USA - -@history_1086_li -#Ladislav Jech, Czech Republic - -@history_1087_li -#Dimitrijs Fedotovs, Latvia - -@history_1088_li -#Richard Manley-Reeve, United Kingdom - -@history_1089_li -#Daniel Cyr, ThirdHalf.com, LLC, USA - -@history_1090_li -#Peter Jünger, Germany - -@history_1091_li -#Dan Keegan, USA - -@history_1092_li -#Rafel Israels, Germany - -@history_1093_li -#Fabien Todescato, France - -@history_1094_li -#Cristan Meijer, Netherlands - -@history_1095_li -#Adam McMahon, USA - -@history_1096_li -#Fábio Gomes Lisboa Gomes, Brasil - -@history_1097_li -#Lyderic Landry, England - -@history_1098_li -#Mederp, Morocco - -@history_1099_li -#Joaquim Golay, Switzerland - -@history_1100_li -#Clemens Quoss, Germany - -@history_1101_li -#Kervin Pierre, USA - -@history_1102_li -#Jake Bellotti, Australia - -@history_1103_li -#Arun Chittanoor, USA - -@installation_1000_h1 -インストール - -@installation_1001_a -# Requirements - -@installation_1002_a -# Supported Platforms - -@installation_1003_a -# Installing the Software - -@installation_1004_a -# Directory Structure - -@installation_1005_h2 -必�?�?�件 - -@installation_1006_p -# To run this database, the following software stack is known to work. Other software most likely also works, but is not tested as much. - -@installation_1007_h3 -#Database Engine - -@installation_1008_li -#Windows XP or Vista, Mac OS X, or Linux - -@installation_1009_li -#Oracle Java 7 or newer - -@installation_1010_li -#Recommended Windows file system: NTFS (FAT32 only supports files up to 4 GB) - -@installation_1011_h3 -#H2 Console - -@installation_1012_li -#Mozilla Firefox - -@installation_1013_h2 -サ�?ート�?�れ�?��?�るプラットフォーム - -@installation_1014_p -# As this database is written in Java, it can run on many different platforms. It is tested with Java 7. Currently, the database is developed and tested on Windows 8 and Mac OS X using Java 7, but it also works in many other operating systems and using other Java runtime environments. All major operating systems (Windows XP, Windows Vista, Windows 7, Mac OS, Ubuntu,...) are supported. - -@installation_1015_h2 -ソフトウェア�?�インストール - -@installation_1016_p -# To install the software, run the installer or unzip it to a directory of your choice. - -@installation_1017_h2 -ディレクトリ構�? - -@installation_1018_p -# After installing, you should get the following directory structure: - -@installation_1019_th -ディレクトリ - -@installation_1020_th -コンテンツ - -@installation_1021_td -bin - -@installation_1022_td -JAR�?�batchファイル - -@installation_1023_td -docs - -@installation_1024_td -ドキュメント - -@installation_1025_td -docs/html - -@installation_1026_td -HTMLページ - -@installation_1027_td -docs/javadoc - -@installation_1028_td -Javadocファイル - -@installation_1029_td -#ext - -@installation_1030_td -#External dependencies (downloaded when building) - -@installation_1031_td -service - -@installation_1032_td -Windows Service�?��?��?�データベースを実行�?�るツール - -@installation_1033_td -src - -@installation_1034_td -Sourceファイル - -@installation_1035_td -#src/docsrc - -@installation_1036_td -#Documentation sources - -@installation_1037_td -#src/installer - -@installation_1038_td -#Installer, shell, and release build script - -@installation_1039_td -#src/main - -@installation_1040_td -#Database engine source code - -@installation_1041_td -#src/test - -@installation_1042_td -#Test source code - -@installation_1043_td -#src/tools - -@installation_1044_td -#Tools and database adapters source code - -@jaqu_1000_h1 -#JaQu - -@jaqu_1001_a -# What is JaQu - -@jaqu_1002_a -# Differences to Other Data Access Tools - -@jaqu_1003_a -# Current State - -@jaqu_1004_a -# Building the JaQu Library - -@jaqu_1005_a -# Requirements - -@jaqu_1006_a -# Example Code - -@jaqu_1007_a -# Configuration - -@jaqu_1008_a -# Natural Syntax - -@jaqu_1009_a -# Other Ideas - -@jaqu_1010_a -# Similar Projects - -@jaqu_1011_h2 -#What is JaQu - -@jaqu_1012_p -# Note: This project is currently in maintenance mode. A friendly fork of JaQu is available under the name iciql. - -@jaqu_1013_p -# JaQu stands for Java Query and allows to access databases using pure Java. JaQu provides a fluent interface (or internal DSL). JaQu is something like LINQ for Java (LINQ stands for "language integrated query" and is a Microsoft .NET technology). The following JaQu code: - -@jaqu_1014_p -# stands for the SQL statement: - -@jaqu_1015_h2 -#Differences to Other Data Access Tools - -@jaqu_1016_p -# Unlike SQL, JaQu can be easily integrated in Java applications. Because JaQu is pure Java, auto-complete in the IDE is supported. Type checking is performed by the compiler. JaQu fully protects against SQL injection. - -@jaqu_1017_p -# JaQu is meant as replacement for JDBC and SQL and not as much as a replacement for tools like Hibernate. With JaQu, you don't write SQL statements as strings. JaQu is much smaller and simpler than other persistence frameworks such as Hibernate, but it also does not provide all the features of those. Unlike iBatis and Hibernate, no XML or annotation based configuration is required; instead the configuration (if required at all) is done in pure Java, within the application. - -@jaqu_1018_p -# JaQu does not require or contain any data caching mechanism. Like JDBC and iBatis, JaQu provides full control over when and what SQL statements are executed (but without having to write SQL statements as strings). - -@jaqu_1019_h3 -#Restrictions - -@jaqu_1020_p -# Primitive types (eg. boolean, int, long, double) are not supported. Use java.lang.Boolean, Integer, Long, Double instead. - -@jaqu_1021_h3 -#Why in Java? - -@jaqu_1022_p -# Most applications are written in Java. Mixing Java and another language (for example Scala or Groovy) in the same application is complicated: you would need to split the application and database code, and write adapter / wrapper code. - -@jaqu_1023_h2 -#Current State - -@jaqu_1024_p -# Currently, JaQu is only tested with the H2 database. The API may change in future versions. JaQu is not part of the h2 jar file, however the source code is included in H2, under: - -@jaqu_1025_code -#src/test/org/h2/test/jaqu/* - -@jaqu_1026_li -# (samples and tests) - -@jaqu_1027_code -#src/tools/org/h2/jaqu/* - -@jaqu_1028_li -# (framework) - -@jaqu_1029_h2 -#Building the JaQu Library - -@jaqu_1030_p -# To create the JaQu jar file, run: build jarJaqu. This will create the file bin/h2jaqu.jar. - -@jaqu_1031_h2 -必�?�?�件 - -@jaqu_1032_p -# JaQu requires Java 6. Annotations are not need. Currently, JaQu is only tested with the H2 database engine, however in theory it should work with any database that supports the JDBC API. - -@jaqu_1033_h2 -#Example Code - -@jaqu_1034_h2 -#Configuration - -@jaqu_1035_p -# JaQu does not require any configuration when using the default field to column mapping. To define table indices, or if you want to map a class to a table with a different name, or a field to a column with another name, create a function called define in the data class. Example: - -@jaqu_1036_p -# The method define() contains the mapping definition. It is called once when the class is used for the first time. Like annotations, the mapping is defined in the class itself. Unlike when using annotations, the compiler can check the syntax even for multi-column objects (multi-column indexes, multi-column primary keys and so on). Because the definition is written in Java, the configuration can be set at runtime, which is not possible using annotations. Unlike XML mapping configuration, the configuration is integrated in the class itself. - -@jaqu_1037_h2 -#Natural Syntax - -@jaqu_1038_p -#The plan is to support more natural (pure Java) syntax in conditions. To do that, the condition class is de-compiled to a SQL condition. A proof of concept decompiler is included (but it doesn't fully work yet; patches are welcome). The planned syntax is: - -@jaqu_1039_h2 -#Other Ideas - -@jaqu_1040_p -# This project has just been started, and nothing is fixed yet. Some ideas are: - -@jaqu_1041_li -#Support queries on collections (instead of using a database). - -@jaqu_1042_li -#Provide API level compatibility with JPA (so that JaQu can be used as an extension of JPA). - -@jaqu_1043_li -#Internally use a JPA implementation (for example Hibernate) instead of SQL directly. - -@jaqu_1044_li -#Use PreparedStatements and cache them. - -@jaqu_1045_h2 -#Similar Projects - -@jaqu_1046_a -#iciql (a friendly fork of JaQu) - -@jaqu_1047_a -#Cement Framework - -@jaqu_1048_a -#Dreamsource ORM - -@jaqu_1049_a -#Empire-db - -@jaqu_1050_a -#JEQUEL: Java Embedded QUEry Language - -@jaqu_1051_a -#Joist - -@jaqu_1052_a -#jOOQ - -@jaqu_1053_a -#JoSQL - -@jaqu_1054_a -#LIQUidFORM - -@jaqu_1055_a -#Quaere (Alias implementation) - -@jaqu_1056_a -#Quaere - -@jaqu_1057_a -#Querydsl - -@jaqu_1058_a -#Squill - -@license_1000_h1 -ライセンス - -@license_1001_a -# Summary and License FAQ - -@license_1002_a -# Mozilla Public License Version 2.0 - -@license_1003_a -# Eclipse Public License - Version 1.0 - -@license_1004_a -# Export Control Classification Number (ECCN) - -@license_1005_h2 -#Summary and License FAQ - -@license_1006_p -# H2 is dual licensed and available under the MPL 2.0 (Mozilla Public License Version 2.0) or under the EPL 1.0 (Eclipse Public License). There is a license FAQ for both the MPL and the EPL. - -@license_1007_li -#You can use H2 for free. - -@license_1008_li -#You can integrate it into your applications (including in commercial applications) and distribute it. - -@license_1009_li -#Files containing only your code are not covered by this license (it is 'commercial friendly'). - -@license_1010_li -#Modifications to the H2 source code must be published. - -@license_1011_li -#You don't need to provide the source code of H2 if you did not modify anything. - -@license_1012_li -#If you distribute a binary that includes H2, you need to add a disclaimer of liability - see the example below. - -@license_1013_p -# However, nobody is allowed to rename H2, modify it a little, and sell it as a database engine without telling the customers it is in fact H2. This happened to HSQLDB: a company called 'bungisoft' copied HSQLDB, renamed it to 'RedBase', and tried to sell it, hiding the fact that it was in fact just HSQLDB. It seems 'bungisoft' does not exist any more, but you can use the Wayback Machine and visit old web pages of http://www.bungisoft.com. - -@license_1014_p -# About porting the source code to another language (for example C# or C++): converted source code (even if done manually) stays under the same copyright and license as the original code. The copyright of the ported source code does not (automatically) go to the person who ported the code. - -@license_1015_p -# If you distribute a binary that includes H2, you need to add the license and a disclaimer of liability (as you should do for your own code). You should add a disclaimer for each open source library you use. For example, add a file 3rdparty_license.txt in the directory where the jar files are, and list all open source libraries, each one with its license and disclaimer. For H2, a simple solution is to copy the following text below. You may also include a copy of the complete license. - -@license_1016_h2 -#Mozilla Public License Version 2.0 - -@license_1017_h3 -#1. Definitions - -@license_1018_p -#1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. - -@license_1019_p -#1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. - -@license_1020_p -#1.3. "Contribution" means Covered Software of a particular Contributor. - -@license_1021_p -#1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. - -@license_1022_p -#1.5. "Incompatible With Secondary Licenses" means - -@license_1023_p -#a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or - -@license_1024_p -#b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. - -@license_1025_p -#1.6. "Executable Form" means any form of the work other than Source Code Form. - -@license_1026_p -#1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. - -@license_1027_p -#1.8. "License" means this document. - -@license_1028_p -#1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. - -@license_1029_p -#1.10. "Modifications" means any of the following: - -@license_1030_p -#a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or - -@license_1031_p -#b. any new file in Source Code Form that contains any Covered Software. - -@license_1032_p -#1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. - -@license_1033_p -#1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. - -@license_1034_p -#1.13. "Source Code Form" means the form of the work preferred for making modifications. - -@license_1035_p -#1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. - -@license_1036_h3 -#2. License Grants and Conditions - -@license_1037_h4 -#2.1. Grants - -@license_1038_p -#Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: - -@license_1039_p -#under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and - -@license_1040_p -#under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. - -@license_1041_h4 -#2.2. Effective Date - -@license_1042_p -#The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. - -@license_1043_h4 -#2.3. Limitations on Grant Scope - -@license_1044_p -#The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: - -@license_1045_p -#for any code that a Contributor has removed from Covered Software; or - -@license_1046_p -#for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or - -@license_1047_p -#under Patent Claims infringed by Covered Software in the absence of its Contributions. - -@license_1048_p -#This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). - -@license_1049_h4 -#2.4. Subsequent Licenses - -@license_1050_p -#No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). - -@license_1051_h4 -#2.5. Representation - -@license_1052_p -#Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. - -@license_1053_h4 -#2.6. Fair Use - -@license_1054_p -#This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. - -@license_1055_h4 -#2.7. Conditions - -@license_1056_p -#Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. - -@license_1057_h3 -#3. Responsibilities - -@license_1058_h4 -#3.1. Distribution of Source Form - -@license_1059_p -#All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. - -@license_1060_h4 -#3.2. Distribution of Executable Form - -@license_1061_p -#If You distribute Covered Software in Executable Form then: - -@license_1062_p -#such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and - -@license_1063_p -#You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. - -@license_1064_h4 -#3.3. Distribution of a Larger Work - -@license_1065_p -#You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). - -@license_1066_h4 -#3.4. Notices - -@license_1067_p -#You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. - -@license_1068_h4 -#3.5. Application of Additional Terms - -@license_1069_p -#You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. - -@license_1070_h3 -#4. Inability to Comply Due to Statute or Regulation - -@license_1071_p -#If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. - -@license_1072_h3 -#5. Termination - -@license_1073_p -#5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. - -@license_1074_p -#5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. - -@license_1075_p -#5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. - -@license_1076_h3 -#6. Disclaimer of Warranty - -@license_1077_p -#Covered Software is provided under this License on an "as is" basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer. - -@license_1078_h3 -#7. Limitation of Liability - -@license_1079_p -#Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You. - -@license_1080_h3 -#8. Litigation - -@license_1081_p -#Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. - -@license_1082_h3 -#9. Miscellaneous - -@license_1083_p -#This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. - -@license_1084_h3 -#10. Versions of the License - -@license_1085_h4 -#10.1. New Versions - -@license_1086_p -#Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. - -@license_1087_h4 -#10.2. Effect of New Versions - -@license_1088_p -#You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. - -@license_1089_h4 -#10.3. Modified Versions - -@license_1090_p -#If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). - -@license_1091_h4 -#10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - -@license_1092_p -#If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. - -@license_1093_h3 -#Exhibit A - Source Code Form License Notice - -@license_1094_p -#If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. - -@license_1095_p -#You may add additional accurate notices of copyright ownership. - -@license_1096_h3 -#Exhibit B - "Incompatible With Secondary Licenses" Notice - -@license_1097_h2 -#Eclipse Public License - Version 1.0 - -@license_1098_p -# THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. - -@license_1099_h3 -#1. DEFINITIONS - -@license_1100_p -# "Contribution" means: - -@license_1101_p -# a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and - -@license_1102_p -# b) in the case of each subsequent Contributor: - -@license_1103_p -# i) changes to the Program, and - -@license_1104_p -# ii) additions to the Program; - -@license_1105_p -# where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. - -@license_1106_p -# "Contributor" means any person or entity that distributes the Program. - -@license_1107_p -# "Licensed Patents " mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. - -@license_1108_p -# "Program" means the Contributions distributed in accordance with this Agreement. - -@license_1109_p -# "Recipient" means anyone who receives the Program under this Agreement, including all Contributors. - -@license_1110_h3 -#2. GRANT OF RIGHTS - -@license_1111_p -# a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. - -@license_1112_p -# b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. - -@license_1113_p -# c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. - -@license_1114_p -# d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. - -@license_1115_h3 -#3. REQUIREMENTS - -@license_1116_p -# A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: - -@license_1117_p -# a) it complies with the terms and conditions of this Agreement; and - -@license_1118_p -# b) its license agreement: - -@license_1119_p -# i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; - -@license_1120_p -# ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; - -@license_1121_p -# iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and - -@license_1122_p -# iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. - -@license_1123_p -# When the Program is made available in source code form: - -@license_1124_p -# a) it must be made available under this Agreement; and - -@license_1125_p -# b) a copy of this Agreement must be included with each copy of the Program. - -@license_1126_p -# Contributors may not remove or alter any copyright notices contained within the Program. - -@license_1127_p -# Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. - -@license_1128_h3 -#4. COMMERCIAL DISTRIBUTION - -@license_1129_p -# Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. - -@license_1130_p -# For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. - -@license_1131_h3 -#5. NO WARRANTY - -@license_1132_p -# EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. - -@license_1133_h3 -#6. DISCLAIMER OF LIABILITY - -@license_1134_p -# EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -@license_1135_h3 -#7. GENERAL - -@license_1136_p -# If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. - -@license_1137_p -# If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. - -@license_1138_p -# All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. - -@license_1139_p -# Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. - -@license_1140_p -# This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. - -@license_1141_h2 -#Export Control Classification Number (ECCN) - -@license_1142_p -# As far as we know, the U.S. Export Control Classification Number (ECCN) for this software is 5D002. However, for legal reasons, we can make no warranty that this information is correct. For details, see also the Apache Software Foundation Export Classifications page. - -@links_1000_h1 -#Links - -@links_1001_p -# If you want to add a link, please send it to the support email address or post it to the group. - -@links_1002_a -# Quotes - -@links_1003_a -# Books - -@links_1004_a -# Extensions - -@links_1005_a -# Blog Articles, Videos - -@links_1006_a -# Database Frontends / Tools - -@links_1007_a -# Products and Projects - -@links_1008_h2 -#Quotes - -@links_1009_a -# Quote - -@links_1010_p -#: "This is by far the easiest and fastest database that I have ever used. Originally the web application that I am working on is using SQL server. But, in less than 15 minutes I had H2 up and working with little recoding of the SQL. Thanks..... " - -@links_1011_h2 -#Books - -@links_1012_a -# Seam In Action - -@links_1013_h2 -#Extensions - -@links_1014_a -# Grails H2 Database Plugin - -@links_1015_a -# h2osgi: OSGi for the H2 Database - -@links_1016_a -# H2Sharp: ADO.NET interface for the H2 database engine - -@links_1017_a -# A spatial extension of the H2 database. - -@links_1018_h2 -#Blog Articles, Videos - -@links_1019_a -# Youtube: Minecraft 1.7.3 / How to install Bukkit Server with xAuth and H2 - -@links_1020_a -# Analyzing CSVs with H2 in under 10 minutes (2009-12-07) - -@links_1021_a -# Efficient sorting and iteration on large databases (2009-06-15) - -@links_1022_a -# Porting Flexive to the H2 Database (2008-12-05) - -@links_1023_a -# H2 Database with GlassFish (2008-11-24) - -@links_1024_a -# H2 Database - Performance Tracing (2008-04-30) - -@links_1025_a -# Open Source Databases Comparison (2007-09-11) - -@links_1026_a -# The Codist: The Open Source Frameworks I Use (2007-07-23) - -@links_1027_a -# The Codist: SQL Injections: How Not To Get Stuck (2007-05-08) - -@links_1028_a -# David Coldrick's Weblog: New Version of H2 Database Released (2007-01-06) - -@links_1029_a -# The Codist: Write Your Own Database, Again (2006-11-13) - -@links_1030_h2 -#Project Pages - -@links_1031_a -# Ohloh - -@links_1032_a -# Freshmeat Project Page - -@links_1033_a -# Wikipedia - -@links_1034_a -# Java Source Net - -@links_1035_a -# Linux Package Manager - -@links_1036_h2 -#Database Frontends / Tools - -@links_1037_a -# Dataflyer - -@links_1038_p -# A tool to browse databases and export data. - -@links_1039_a -# DB Solo - -@links_1040_p -# SQL query tool. - -@links_1041_a -# DbVisualizer - -@links_1042_p -# Database tool. - -@links_1043_a -# Execute Query - -@links_1044_p -# Database utility written in Java. - -@links_1045_a -# Flyway - -@links_1046_p -# The agile database migration framework for Java. - -@links_1047_a -# [fleXive] - -@links_1048_p -# JavaEE 5 open source framework for the development of complex and evolving (web-)applications. - -@links_1049_a -# JDBC Console - -@links_1050_p -# This small webapp gives an ability to execute SQL against datasources bound in container's JNDI. Based on H2 Console. - -@links_1051_a -# HenPlus - -@links_1052_p -# HenPlus is a SQL shell written in Java. - -@links_1053_a -# JDBC lint - -@links_1054_p -# Helps write correct and efficient code when using the JDBC API. - -@links_1055_a -# OpenOffice - -@links_1056_p -# Base is OpenOffice.org's database application. It provides access to relational data sources. - -@links_1057_a -# RazorSQL - -@links_1058_p -# An SQL query tool, database browser, SQL editor, and database administration tool. - -@links_1059_a -# SQL Developer - -@links_1060_p -# Universal Database Frontend. - -@links_1061_a -# SQL Workbench/J - -@links_1062_p -# Free DBMS-independent SQL tool. - -@links_1063_a -# SQuirreL SQL Client - -@links_1064_p -# Graphical tool to view the structure of a database, browse the data, issue SQL commands etc. - -@links_1065_a -# SQuirreL DB Copy Plugin - -@links_1066_p -# Tool to copy data from one database to another. - -@links_1067_h2 -#Products and Projects - -@links_1068_a -# AccuProcess - -@links_1069_p -# Visual business process modeling and simulation software for business users. - -@links_1070_a -# Adeptia BPM - -@links_1071_p -# A Business Process Management (BPM) suite to quickly and easily automate business processes and workflows. - -@links_1072_a -# Adeptia Integration - -@links_1073_p -# Process-centric, services-based application integration suite. - -@links_1074_a -# Aejaks - -@links_1075_p -# A server-side scripting environment to build AJAX enabled web applications. - -@links_1076_a -# Axiom Stack - -@links_1077_p -# A web framework that let's you write dynamic web applications with Zen-like simplicity. - -@links_1078_a -# Apache Cayenne - -@links_1079_p -# Open source persistence framework providing object-relational mapping (ORM) and remoting services. - -@links_1080_a -# Apache Jackrabbit - -@links_1081_p -# Open source implementation of the Java Content Repository API (JCR). - -@links_1082_a -# Apache OpenJPA - -@links_1083_p -# Open source implementation of the Java Persistence API (JPA). - -@links_1084_a -# AppFuse - -@links_1085_p -# Helps building web applications. - -@links_1086_a -# BGBlitz - -@links_1087_p -# The Swiss army knife of Backgammon. - -@links_1088_a -# Bonita - -@links_1089_p -# Open source workflow solution for handing long-running, user-oriented processes providing out of the box workflow and business process management features. - -@links_1090_a -# Bookmarks Portlet - -@links_1091_p -# JSR 168 compliant bookmarks management portlet application. - -@links_1092_a -# Claros inTouch - -@links_1093_p -# Ajax communication suite with mail, addresses, notes, IM, and rss reader. - -@links_1094_a -# CrashPlan PRO Server - -@links_1095_p -# Easy and cross platform backup solution for business and service providers. - -@links_1096_a -# DataNucleus - -@links_1097_p -# Java persistent objects. - -@links_1098_a -# DbUnit - -@links_1099_p -# A JUnit extension (also usable with Ant) targeted for database-driven projects. - -@links_1100_a -# DiffKit - -@links_1101_p -# DiffKit is a tool for comparing two tables of data, field-by-field. DiffKit is like the Unix diff utility, but for tables instead of lines of text. - -@links_1102_a -# Dinamica Framework - -@links_1103_p -# Ajax/J2EE framework for RAD development (mainly oriented toward hispanic markets). - -@links_1104_a -# District Health Information Software 2 (DHIS) - -@links_1105_p -# The DHIS 2 is a tool for collection, validation, analysis, and presentation of aggregate statistical data, tailored (but not limited) to integrated health information management activities. - -@links_1106_a -# Ebean ORM Persistence Layer - -@links_1107_p -# Open source Java Object Relational Mapping tool. - -@links_1108_a -# Eclipse CDO - -@links_1109_p -# The CDO (Connected Data Objects) Model Repository is a distributed shared model framework for EMF models, and a fast server-based O/R mapping solution. - -@links_1110_a -# Fabric3 - -@links_1111_p -# Fabric3 is a project implementing a federated service network based on the Service Component Architecture specification (http://www.osoa.org). - -@links_1112_a -# FIT4Data - -@links_1113_p -# A testing framework for data management applications built on the Java implementation of FIT. - -@links_1114_a -# Flux - -@links_1115_p -# Java job scheduler, file transfer, workflow, and BPM. - -@links_1116_a -# GeoServer - -@links_1117_p -# GeoServer is a Java-based software server that allows users to view and edit geospatial data. Using open standards set forth by the Open Geospatial Consortium (OGC), GeoServer allows for great flexibility in map creation and data sharing. - -@links_1118_a -# GBIF Integrated Publishing Toolkit (IPT) - -@links_1119_p -# The GBIF IPT is an open source, Java based web application that connects and serves three types of biodiversity data: taxon primary occurrence data, taxon checklists and general resource metadata. - -@links_1120_a -# GNU Gluco Control - -@links_1121_p -# Helps you to manage your diabetes. - -@links_1122_a -# Golden T Studios - -@links_1123_p -# Fun-to-play games with a simple interface. - -@links_1124_a -# GridGain - -@links_1125_p -# GridGain is easy to use Cloud Application Platform that enables development of highly scalable distributed Java and Scala applications that auto-scale on any grid or cloud infrastructure. - -@links_1126_a -# Group Session - -@links_1127_p -# Open source web groupware. - -@links_1128_a -# HA-JDBC - -@links_1129_p -# High-Availability JDBC: A JDBC proxy that provides light-weight, transparent, fault tolerant clustering capability to any underlying JDBC driver. - -@links_1130_a -# Hibernate - -@links_1131_p -# Relational persistence for idiomatic Java (O-R mapping tool). - -@links_1132_a -# Hibicius - -@links_1133_p -# Online Banking Client for the HBCI protocol. - -@links_1134_a -# ImageMapper - -@links_1135_p -# ImageMapper frees users from having to use file browsers to view their images. They get fast access to images and easy cataloguing of them via a user friendly interface. - -@links_1136_a -# JAMWiki - -@links_1137_p -# Java-based Wiki engine. - -@links_1138_a -# Jaspa - -@links_1139_p -# Java Spatial. Jaspa potentially brings around 200 spatial functions. - -@links_1140_a -# Java Simon - -@links_1141_p -# Simple Monitoring API. - -@links_1142_a -# JBoss jBPM - -@links_1143_p -# A platform for executable process languages ranging from business process management (BPM) over workflow to service orchestration. - -@links_1144_a -# JBoss Jopr - -@links_1145_p -# An enterprise management solution for JBoss middleware projects and other application technologies. - -@links_1146_a -# JGeocoder - -@links_1147_p -# Free Java geocoder. Geocoding is the process of estimating a latitude and longitude for a given location. - -@links_1148_a -# JGrass - -@links_1149_p -# Java Geographic Resources Analysis Support System. Free, multi platform, open source GIS based on the GIS framework of uDig. - -@links_1150_a -# Jena - -@links_1151_p -# Java framework for building Semantic Web applications. - -@links_1152_a -# JMatter - -@links_1153_p -# Framework for constructing workgroup business applications based on the Naked Objects Architectural Pattern. - -@links_1154_a -# jOOQ (Java Object Oriented Querying) - -@links_1155_p -# jOOQ is a fluent API for typesafe SQL query construction and execution - -@links_1156_a -# Liftweb - -@links_1157_p -# A Scala-based, secure, developer friendly web framework. - -@links_1158_a -# LiquiBase - -@links_1159_p -# A tool to manage database changes and refactorings. - -@links_1160_a -# Luntbuild - -@links_1161_p -# Build automation and management tool. - -@links_1162_a -# localdb - -@links_1163_p -# A tool that locates the full file path of the folder containing the database files. - -@links_1164_a -# Magnolia - -@links_1165_p -# Microarray Data Management and Export System for PFGRC (Pathogen Functional Genomics Resource Center) Microarrays. - -@links_1166_a -# MiniConnectionPoolManager - -@links_1167_p -# A lightweight standalone JDBC connection pool manager. - -@links_1168_a -# Mr. Persister - -@links_1169_p -# Simple, small and fast object relational mapping. - -@links_1170_a -# Myna Application Server - -@links_1171_p -# Java web app that provides dynamic web content and Java libraries access from JavaScript. - -@links_1172_a -# MyTunesRss - -@links_1173_p -# MyTunesRSS lets you listen to your music wherever you are. - -@links_1174_a -# NCGC CurveFit - -@links_1175_p -# From: NIH Chemical Genomics Center, National Institutes of Health, USA. An open source application in the life sciences research field. This application handles chemical structures and biological responses of thousands of compounds with the potential to handle million+ compounds. It utilizes an embedded H2 database to enable flexible query/retrieval of all data including advanced chemical substructure and similarity searching. The application highlights an automated curve fitting and classification algorithm that outperforms commercial packages in the field. Commercial alternatives are typically small desktop software that handle a few dose response curves at a time. A couple of commercial packages that do handle several thousand curves are very expensive tools (>60k USD) that require manual curation of analysis by the user; require a license to Oracle; lack advanced query/retrieval; and the ability to handle chemical structures. - -@links_1176_a -# Nuxeo - -@links_1177_p -# Standards-based, open source platform for building ECM applications. - -@links_1178_a -# nWire - -@links_1179_p -# Eclipse plug-in which expedites Java development. It's main purpose is to help developers find code quicker and easily understand how it relates to the rest of the application, thus, understand the application structure. - -@links_1180_a -# Ontology Works - -@links_1181_p -# This company provides semantic technologies including deductive information repositories (the Ontology Works Knowledge Servers), semantic information fusion and semantic federation of legacy databases, ontology-based domain modeling, and management of the distributed enterprise. - -@links_1182_a -# Ontoprise OntoBroker - -@links_1183_p -# SemanticWeb-Middleware. It supports all W3C Semantic Web recommendations: OWL, RDF, RDFS, SPARQL, and F-Logic. - -@links_1184_a -# Open Anzo - -@links_1185_p -# Semantic Application Server. - -@links_1186_a -# OpenGroove - -@links_1187_p -# OpenGroove is a groupware program that allows users to synchronize data. - -@links_1188_a -# OpenSocial Development Environment (OSDE) - -@links_1189_p -# Development tool for OpenSocial application. - -@links_1190_a -# Orion - -@links_1191_p -# J2EE Application Server. - -@links_1192_a -# P5H2 - -@links_1193_p -# A library for the Processing programming language and environment. - -@links_1194_a -# Phase-6 - -@links_1195_p -# A computer based learning software. - -@links_1196_a -# Pickle - -@links_1197_p -# Pickle is a Java library containing classes for persistence, concurrency, and logging. - -@links_1198_a -# Piman - -@links_1199_p -# Water treatment projects data management. - -@links_1200_a -# PolePosition - -@links_1201_p -# Open source database benchmark. - -@links_1202_a -# Poormans - -@links_1203_p -# Very basic CMS running as a SWT application and generating static html pages. - -@links_1204_a -# Railo - -@links_1205_p -# Railo is an alternative engine for the Cold Fusion Markup Language, that compiles code programmed in CFML into Java bytecode and executes it on a servlet engine. - -@links_1206_a -# Razuna - -@links_1207_p -# Open source Digital Asset Management System with integrated Web Content Management. - -@links_1208_a -# RIFE - -@links_1209_p -# A full-stack web application framework with tools and APIs to implement most common web features. - -@links_1210_a -# Sava - -@links_1211_p -# Open-source web-based content management system. - -@links_1212_a -# Scriptella - -@links_1213_p -# ETL (Extract-Transform-Load) and script execution tool. - -@links_1214_a -# Sesar - -@links_1215_p -# Dependency Injection Container with Aspect Oriented Programming. - -@links_1216_a -# SemmleCode - -@links_1217_p -# Eclipse plugin to help you improve software quality. - -@links_1218_a -# SeQuaLite - -@links_1219_p -# A free, light-weight, java data access framework. - -@links_1220_a -# ShapeLogic - -@links_1221_p -# Toolkit for declarative programming, image processing and computer vision. - -@links_1222_a -# Shellbook - -@links_1223_p -# Desktop publishing application. - -@links_1224_a -# Signsoft intelliBO - -@links_1225_p -# Persistence middleware supporting the JDO specification. - -@links_1226_a -# SimpleORM - -@links_1227_p -# Simple Java Object Relational Mapping. - -@links_1228_a -# SymmetricDS - -@links_1229_p -# A web-enabled, database independent, data synchronization/replication software. - -@links_1230_a -# SmartFoxServer - -@links_1231_p -# Platform for developing multiuser applications and games with Macromedia Flash. - -@links_1232_a -# Social Bookmarks Friend Finder - -@links_1233_p -# A GUI application that allows you to find users with similar bookmarks to the user specified (for delicious.com). - -@links_1234_a -# sormula - -@links_1235_p -# Simple object relational mapping. - -@links_1236_a -# Springfuse - -@links_1237_p -# Code generation For Spring, Spring MVC & Hibernate. - -@links_1238_a -# SQLOrm - -@links_1239_p -# Java Object Relation Mapping. - -@links_1240_a -# StelsCSV and StelsXML - -@links_1241_p -# StelsCSV is a CSV JDBC type 4 driver that allows to perform SQL queries and other JDBC operations on text files. StelsXML is a XML JDBC type 4 driver that allows to perform SQL queries and other JDBC operations on XML files. Both use H2 as the SQL engine. - -@links_1242_a -# StorYBook - -@links_1243_p -# A summary-based tool for novelist and script writers. It helps to keep the overview over the various traces a story has. - -@links_1244_a -# StreamCruncher - -@links_1245_p -# Event (stream) processing kernel. - -@links_1246_a -# SUSE Manager, part of Linux Enterprise Server 11 - -@links_1247_p -# The SUSE Manager eases the burden of compliance with regulatory requirements and corporate policies. - -@links_1248_a -# Tune Backup - -@links_1249_p -# Easy-to-use backup solution for your iTunes library. - -@links_1250_a -# TimeWriter - -@links_1251_p -# TimeWriter is a very flexible program for time administration / time tracking. The older versions used dBase tables. The new version 5 is completely rewritten, now using the H2 database. TimeWriter is delivered in Dutch and English. - -@links_1252_a -# weblica - -@links_1253_p -# Desktop CMS. - -@links_1254_a -# Web of Web - -@links_1255_p -# Collaborative and realtime interactive media platform for the web. - -@links_1256_a -# Werkzeugkasten - -@links_1257_p -# Minimum Java Toolset. - -@links_1258_a -# VPDA - -@links_1259_p -# View providers driven applications is a Java based application framework for building applications composed from server components - view providers. - -@links_1260_a -# Volunteer database - -@links_1261_p -# A database front end to register volunteers, partnership and donation for a Non Profit organization. - -@mainWeb_1000_h1 -H2 データベース エンジン - -@mainWeb_1001_p -# Welcome to H2, the Java SQL database. The main features of H2 are: - -@mainWeb_1002_li -#Very fast, open source, JDBC API - -@mainWeb_1003_li -#Embedded and server modes; in-memory databases - -@mainWeb_1004_li -#Browser based Console application - -@mainWeb_1005_li -#Small footprint: around 1.5 MB jar file size - -@mainWeb_1006_h2 -ダウンロード - -@mainWeb_1007_td -# Version 1.4.196 (2017-06-10) - -@mainWeb_1008_a -#Windows Installer (5 MB) - -@mainWeb_1009_a -#All Platforms (zip, 8 MB) - -@mainWeb_1010_a -#All Downloads - -@mainWeb_1011_td -    - -@mainWeb_1012_h2 -サ�?ート - -@mainWeb_1013_a -#Stack Overflow (tag H2) - -@mainWeb_1014_a -#Google Group English - -@mainWeb_1015_p -#, Japanese - -@mainWeb_1016_p -# For non-technical issues, use: - -@mainWeb_1017_h2 -特徴 - -@mainWeb_1018_th -H2 - -@mainWeb_1019_a -Derby - -@mainWeb_1020_a -HSQLDB - -@mainWeb_1021_a -MySQL - -@mainWeb_1022_a -PostgreSQL - -@mainWeb_1023_td -Pure Java - -@mainWeb_1024_td -対応 - -@mainWeb_1025_td -対応 - -@mainWeb_1026_td -対応 - -@mainWeb_1027_td -�?�対応 - -@mainWeb_1028_td -�?�対応 - -@mainWeb_1029_td -#Memory Mode - -@mainWeb_1030_td -対応 - -@mainWeb_1031_td -対応 - -@mainWeb_1032_td -対応 - -@mainWeb_1033_td -�?�対応 - -@mainWeb_1034_td -�?�対応 - -@mainWeb_1035_td -暗�?�化データベース - -@mainWeb_1036_td -対応 - -@mainWeb_1037_td -対応 - -@mainWeb_1038_td -対応 - -@mainWeb_1039_td -�?�対応 - -@mainWeb_1040_td -�?�対応 - -@mainWeb_1041_td -ODBCドライ�? - -@mainWeb_1042_td -対応 - -@mainWeb_1043_td -�?�対応 - -@mainWeb_1044_td -�?�対応 - -@mainWeb_1045_td -対応 - -@mainWeb_1046_td -対応 - -@mainWeb_1047_td -フルテキストサー�? - -@mainWeb_1048_td -対応 - -@mainWeb_1049_td -�?�対応 - -@mainWeb_1050_td -�?�対応 - -@mainWeb_1051_td -対応 - -@mainWeb_1052_td -対応 - -@mainWeb_1053_td -#Multi Version Concurrency - -@mainWeb_1054_td -対応 - -@mainWeb_1055_td -�?�対応 - -@mainWeb_1056_td -対応 - -@mainWeb_1057_td -対応 - -@mainWeb_1058_td -対応 - -@mainWeb_1059_td -#Footprint (embedded) - -@mainWeb_1060_td -#~1 MB - -@mainWeb_1061_td -#~2 MB - -@mainWeb_1062_td -#~1 MB - -@mainWeb_1063_td -#~4 MB - -@mainWeb_1064_td -#~6 MB - -@mainWeb_1065_p -# See also the detailed comparison. - -@mainWeb_1066_h2 -ニュース - -@mainWeb_1067_b -ニュースフィード: - -@mainWeb_1068_a -#Full text (Atom) - -@mainWeb_1069_p -# or Header only (RSS). - -@mainWeb_1070_b -Email ニュースレター: - -@mainWeb_1071_p -# Subscribe to H2 Database News (Google account required) to get informed about new releases. Your email address is only used in this context. - -@mainWeb_1072_td -  - -@mainWeb_1073_h2 -寄稿�?�る - -@mainWeb_1074_p -# You can contribute to the development of H2 by sending feedback and bug reports, or translate the H2 Console application (for details, start the H2 Console and select Options / Translate). To donate money, click on the PayPal button below. You will be listed as a supporter: - -@main_1000_h1 -H2 データベース エンジン - -@main_1001_p -# Welcome to H2, the free Java SQL database engine. - -@main_1002_a -クイックスタート - -@main_1003_p -# Get a fast overview. - -@main_1004_a -�?ュートリアル - -@main_1005_p -# Go through the samples. - -@main_1006_a -特徴 - -@main_1007_p -# See what this database can do and how to use these features. - -@mvstore_1000_h1 -#MVStore - -@mvstore_1001_a -# Overview - -@mvstore_1002_a -# Example Code - -@mvstore_1003_a -# Store Builder - -@mvstore_1004_a -# R-Tree - -@mvstore_1005_a -# Features - -@mvstore_1006_a -#- Maps - -@mvstore_1007_a -#- Versions - -@mvstore_1008_a -#- Transactions - -@mvstore_1009_a -#- In-Memory Performance and Usage - -@mvstore_1010_a -#- Pluggable Data Types - -@mvstore_1011_a -#- BLOB Support - -@mvstore_1012_a -#- R-Tree and Pluggable Map Implementations - -@mvstore_1013_a -#- Concurrent Operations and Caching - -@mvstore_1014_a -#- Log Structured Storage - -@mvstore_1015_a -#- Off-Heap and Pluggable Storage - -@mvstore_1016_a -#- File System Abstraction, File Locking and Online Backup - -@mvstore_1017_a -#- Encrypted Files - -@mvstore_1018_a -#- Tools - -@mvstore_1019_a -#- Exception Handling - -@mvstore_1020_a -#- Storage Engine for H2 - -@mvstore_1021_a -# File Format - -@mvstore_1022_a -# Similar Projects and Differences to Other Storage Engines - -@mvstore_1023_a -# Current State - -@mvstore_1024_a -# Requirements - -@mvstore_1025_h2 -#Overview - -@mvstore_1026_p -# The MVStore is a persistent, log structured key-value store. It is planned to be the next storage subsystem of H2, but it can also be used directly within an application, without using JDBC or SQL. - -@mvstore_1027_li -#MVStore stands for "multi-version store". - -@mvstore_1028_li -#Each store contains a number of maps that can be accessed using the java.util.Map interface. - -@mvstore_1029_li -#Both file-based persistence and in-memory operation are supported. - -@mvstore_1030_li -#It is intended to be fast, simple to use, and small. - -@mvstore_1031_li -#Concurrent read and write operations are supported. - -@mvstore_1032_li -#Transactions are supported (including concurrent transactions and 2-phase commit). - -@mvstore_1033_li -#The tool is very modular. It supports pluggable data types and serialization, pluggable storage (to a file, to off-heap memory), pluggable map implementations (B-tree, R-tree, concurrent B-tree currently), BLOB storage, and a file system abstraction to support encrypted files and zip files. - -@mvstore_1034_h2 -#Example Code - -@mvstore_1035_p -# The following sample code shows how to use the tool: - -@mvstore_1036_h2 -#Store Builder - -@mvstore_1037_p -# The MVStore.Builder provides a fluid interface to build a store if configuration options are needed. Example usage: - -@mvstore_1038_p -# The list of available options is: - -@mvstore_1039_li -#autoCommitBufferSize: the size of the write buffer. - -@mvstore_1040_li -#autoCommitDisabled: to disable auto-commit. - -@mvstore_1041_li -#backgroundExceptionHandler: a handler for exceptions that could occur while writing in the background. - -@mvstore_1042_li -#cacheSize: the cache size in MB. - -@mvstore_1043_li -#compress: compress the data when storing using a fast algorithm (LZF). - -@mvstore_1044_li -#compressHigh: compress the data when storing using a slower algorithm (Deflate). - -@mvstore_1045_li -#encryptionKey: the key for file encryption. - -@mvstore_1046_li -#fileName: the name of the file, for file based stores. - -@mvstore_1047_li -#fileStore: the storage implementation to use. - -@mvstore_1048_li -#pageSplitSize: the point where pages are split. - -@mvstore_1049_li -#readOnly: open the file in read-only mode. - -@mvstore_1050_h2 -#R-Tree - -@mvstore_1051_p -# The MVRTreeMap is an R-tree implementation that supports fast spatial queries. It can be used as follows: - -@mvstore_1052_p -# The default number of dimensions is 2. To use a different number of dimensions, call new MVRTreeMap.Builder<String>().dimensions(3). The minimum number of dimensions is 1, the maximum is 32. - -@mvstore_1053_h2 -特徴 - -@mvstore_1054_h3 -#Maps - -@mvstore_1055_p -# Each store contains a set of named maps. A map is sorted by key, and supports the common lookup operations, including access to the first and last key, iterate over some or all keys, and so on. - -@mvstore_1056_p -# Also supported, and very uncommon for maps, is fast index lookup: the entries of the map can be be efficiently accessed like a random-access list (get the entry at the given index), and the index of a key can be calculated efficiently. That also means getting the median of two keys is very fast, and a range of keys can be counted very quickly. The iterator supports fast skipping. This is possible because internally, each map is organized in the form of a counted B+-tree. - -@mvstore_1057_p -# In database terms, a map can be used like a table, where the key of the map is the primary key of the table, and the value is the row. A map can also represent an index, where the key of the map is the key of the index, and the value of the map is the primary key of the table (for non-unique indexes, the key of the map must also contain the primary key). - -@mvstore_1058_h3 -#Versions - -@mvstore_1059_p -# A version is a snapshot of all the data of all maps at a given point in time. Creating a snapshot is fast: only those pages that are changed after a snapshot are copied. This behavior is also called COW (copy on write). Old versions are readable. Rollback to an old version is supported. - -@mvstore_1060_p -# The following sample code show how to create a store, open a map, add some data, and access the current and an old version: - -@mvstore_1061_h3 -#Transactions - -@mvstore_1062_p -# To support multiple concurrent open transactions, a transaction utility is included, the TransactionStore. The tool supports PostgreSQL style "read committed" transaction isolation with savepoints, two-phase commit, and other features typically available in a database. There is no limit on the size of a transaction (the log is written to disk for large or long running transactions). - -@mvstore_1063_p -# Internally, this utility stores the old versions of changed entries in a separate map, similar to a transaction log, except that entries of a closed transaction are removed, and the log is usually not stored for short transactions. For common use cases, the storage overhead of this utility is very small compared to the overhead of a regular transaction log. - -@mvstore_1064_h3 -#In-Memory Performance and Usage - -@mvstore_1065_p -# Performance of in-memory operations is about 50% slower than java.util.TreeMap. - -@mvstore_1066_p -# The memory overhead for large maps is slightly better than for the regular map implementations, but there is a higher overhead per map. For maps with less than about 25 entries, the regular map implementations need less memory. - -@mvstore_1067_p -# If no file name is specified, the store operates purely in memory. Except for persisting data, all features are supported in this mode (multi-versioning, index lookup, R-tree and so on). If a file name is specified, all operations occur in memory (with the same performance characteristics) until data is persisted. - -@mvstore_1068_p -# As in all map implementations, keys need to be immutable, that means changing the key object after an entry has been added is not allowed. If a file name is specified, the value may also not be changed after adding an entry, because it might be serialized (which could happen at any time when autocommit is enabled). - -@mvstore_1069_h3 -#Pluggable Data Types - -@mvstore_1070_p -# Serialization is pluggable. The default serialization currently supports many common data types, and uses Java serialization for other objects. The following classes are currently directly supported: Boolean, Byte, Short, Character, Integer, Long, Float, Double, BigInteger, BigDecimal, String, UUID, Date and arrays (both primitive arrays and object arrays). For serialized objects, the size estimate is adjusted using an exponential moving average. - -@mvstore_1071_p -# Parameterized data types are supported (for example one could build a string data type that limits the length). - -@mvstore_1072_p -# The storage engine itself does not have any length limits, so that keys, values, pages, and chunks can be very big (as big as fits in memory). Also, there is no inherent limit to the number of maps and chunks. Due to using a log structured storage, there is no special case handling for large keys or pages. - -@mvstore_1073_h3 -#BLOB Support - -@mvstore_1074_p -# There is a mechanism that stores large binary objects by splitting them into smaller blocks. This allows to store objects that don't fit in memory. Streaming as well as random access reads on such objects are supported. This tool is written on top of the store, using only the map interface. - -@mvstore_1075_h3 -#R-Tree and Pluggable Map Implementations - -@mvstore_1076_p -# The map implementation is pluggable. In addition to the default MVMap (multi-version map), there is a multi-version R-tree map implementation for spatial operations. - -@mvstore_1077_h3 -#Concurrent Operations and Caching - -@mvstore_1078_p -# Concurrent reads and writes are supported. All such read operations can occur in parallel. Concurrent reads from the page cache, as well as concurrent reads from the file system are supported. Write operations first read the relevant pages from disk to memory (this can happen concurrently), and only then modify the data. The in-memory parts of write operations are synchronized. Writing changes to the file can occur concurrently to modifying the data, as writing operates on a snapshot. - -@mvstore_1079_p -# Caching is done on the page level. The page cache is a concurrent LIRS cache, which should be resistant against scan operations. - -@mvstore_1080_p -# For fully scalable concurrent write operations to a map (in-memory and to disk), the map could be split into multiple maps in different stores ('sharding'). The plan is to add such a mechanism later when needed. - -@mvstore_1081_h3 -#Log Structured Storage - -@mvstore_1082_p -# Internally, changes are buffered in memory, and once enough changes have accumulated, they are written in one continuous disk write operation. Compared to traditional database storage engines, this should improve write performance for file systems and storage systems that do not efficiently support small random writes, such as Btrfs, as well as SSDs. (According to a test, write throughput of a common SSD increases with write block size, until a block size of 2 MB, and then does not further increase.) By default, changes are automatically written when more than a number of pages are modified, and once every second in a background thread, even if only little data was changed. Changes can also be written explicitly by calling commit(). - -@mvstore_1083_p -# When storing, all changed pages are serialized, optionally compressed using the LZF algorithm, and written sequentially to a free area of the file. Each such change set is called a chunk. All parent pages of the changed B-trees are stored in this chunk as well, so that each chunk also contains the root of each changed map (which is the entry point for reading this version of the data). There is no separate index: all data is stored as a list of pages. Per store, there is one additional map that contains the metadata (the list of maps, where the root page of each map is stored, and the list of chunks). - -@mvstore_1084_p -# There are usually two write operations per chunk: one to store the chunk data (the pages), and one to update the file header (so it points to the latest chunk). If the chunk is appended at the end of the file, the file header is only written at the end of the chunk. There is no transaction log, no undo log, and there are no in-place updates (however, unused chunks are overwritten by default). - -@mvstore_1085_p -# Old data is kept for at least 45 seconds (configurable), so that there are no explicit sync operations required to guarantee data consistency. An application can also sync explicitly when needed. To reuse disk space, the chunks with the lowest amount of live data are compacted (the live data is stored again in the next chunk). To improve data locality and disk space usage, the plan is to automatically defragment and compact data. - -@mvstore_1086_p -# Compared to traditional storage engines (that use a transaction log, undo log, and main storage area), the log structured storage is simpler, more flexible, and typically needs less disk operations per change, as data is only written once instead of twice or 3 times, and because the B-tree pages are always full (they are stored next to each other) and can be easily compressed. But temporarily, disk space usage might actually be a bit higher than for a regular database, as disk space is not immediately re-used (there are no in-place updates). - -@mvstore_1087_h3 -#Off-Heap and Pluggable Storage - -@mvstore_1088_p -# Storage is pluggable. Unless pure in-memory operation is used, the default storage is to a single file. - -@mvstore_1089_p -# An off-heap storage implementation is available. This storage keeps the data in the off-heap memory, meaning outside of the regular garbage collected heap. This allows to use very large in-memory stores without having to increase the JVM heap, which would increase Java garbage collection pauses a lot. Memory is allocated using ByteBuffer.allocateDirect. One chunk is allocated at a time (each chunk is usually a few MB large), so that allocation cost is low. To use the off-heap storage, call: - -@mvstore_1090_h3 -#File System Abstraction, File Locking and Online Backup - -@mvstore_1091_p -# The file system is pluggable. The same file system abstraction is used as H2 uses. The file can be encrypted using a encrypting file system wrapper. Other file system implementations support reading from a compressed zip or jar file. The file system abstraction closely matches the Java 7 file system API. - -@mvstore_1092_p -# Each store may only be opened once within a JVM. When opening a store, the file is locked in exclusive mode, so that the file can only be changed from within one process. Files can be opened in read-only mode, in which case a shared lock is used. - -@mvstore_1093_p -# The persisted data can be backed up at any time, even during write operations (online backup). To do that, automatic disk space reuse needs to be first disabled, so that new data is always appended at the end of the file. Then, the file can be copied. The file handle is available to the application. It is recommended to use the utility class FileChannelInputStream to do this. For encrypted databases, both the encrypted (raw) file content, as well as the clear text content, can be backed up. - -@mvstore_1094_h3 -#Encrypted Files - -@mvstore_1095_p -# File encryption ensures the data can only be read with the correct password. Data can be encrypted as follows: - -@mvstore_1096_p -# The following algorithms and settings are used: - -@mvstore_1097_li -#The password char array is cleared after use, to reduce the risk that the password is stolen even if the attacker has access to the main memory. - -@mvstore_1098_li -#The password is hashed according to the PBKDF2 standard, using the SHA-256 hash algorithm. - -@mvstore_1099_li -#The length of the salt is 64 bits, so that an attacker can not use a pre-calculated password hash table (rainbow table). It is generated using a cryptographically secure random number generator. - -@mvstore_1100_li -#To speed up opening an encrypted stores on Android, the number of PBKDF2 iterations is 10. The higher the value, the better the protection against brute-force password cracking attacks, but the slower is opening a file. - -@mvstore_1101_li -#The file itself is encrypted using the standardized disk encryption mode XTS-AES. Only little more than one AES-128 round per block is needed. - -@mvstore_1102_h3 -#Tools - -@mvstore_1103_p -# There is a tool, the MVStoreTool, to dump the contents of a file. - -@mvstore_1104_h3 -#Exception Handling - -@mvstore_1105_p -# This tool does not throw checked exceptions. Instead, unchecked exceptions are thrown if needed. The error message always contains the version of the tool. The following exceptions can occur: - -@mvstore_1106_code -#IllegalStateException - -@mvstore_1107_li -# if a map was already closed or an IO exception occurred, for example if the file was locked, is already closed, could not be opened or closed, if reading or writing failed, if the file is corrupt, or if there is an internal error in the tool. For such exceptions, an error code is added so that the application can distinguish between different error cases. - -@mvstore_1108_code -#IllegalArgumentException - -@mvstore_1109_li -# if a method was called with an illegal argument. - -@mvstore_1110_code -#UnsupportedOperationException - -@mvstore_1111_li -# if a method was called that is not supported, for example trying to modify a read-only map. - -@mvstore_1112_code -#ConcurrentModificationException - -@mvstore_1113_li -# if a map is modified concurrently. - -@mvstore_1114_h3 -#Storage Engine for H2 - -@mvstore_1115_p -# For H2 version 1.4 and newer, the MVStore is the default storage engine (supporting SQL, JDBC, transactions, MVCC, and so on). For older versions, append ;MV_STORE=TRUE to the database URL. Even though it can be used with the default table level locking, by default the MVCC mode is enabled when using the MVStore. - -@mvstore_1116_h2 -#File Format - -@mvstore_1117_p -# The data is stored in one file. The file contains two file headers (for safety), and a number of chunks. The file headers are one block each; a block is 4096 bytes. Each chunk is at least one block, but typically 200 blocks or more. Data is stored in the chunks in the form of a log structured storage. There is one chunk for every version. - -@mvstore_1118_p -# Each chunk contains a number of B-tree pages. As an example, the following code: - -@mvstore_1119_p -# will result in the following two chunks (excluding metadata): - -@mvstore_1120_b -#Chunk 1: - -@mvstore_1121_p -# - Page 1: (root) node with 2 entries pointing to page 2 and 3 - -@mvstore_1122_p -# - Page 2: leaf with 140 entries (keys 0 - 139) - -@mvstore_1123_p -# - Page 3: leaf with 260 entries (keys 140 - 399) - -@mvstore_1124_b -#Chunk 2: - -@mvstore_1125_p -# - Page 4: (root) node with 2 entries pointing to page 5 and 3 - -@mvstore_1126_p -# - Page 5: leaf with 140 entries (keys 0 - 139) - -@mvstore_1127_p -# That means each chunk contains the changes of one version: the new version of the changed pages and the parent pages, recursively, up to the root page. Pages in subsequent chunks refer to pages in earlier chunks. - -@mvstore_1128_h3 -#File Header - -@mvstore_1129_p -# There are two file headers, which normally contain the exact same data. But once in a while, the file headers are updated, and writing could partially fail, which could corrupt a header. That's why there is a second header. Only the file headers are updated in this way (called "in-place update"). The headers contain the following data: - -@mvstore_1130_p -# The data is stored in the form of a key-value pair. Each value is stored as a hexadecimal number. The entries are: - -@mvstore_1131_li -#H: The entry "H:2" stands for the the H2 database. - -@mvstore_1132_li -#block: The block number where one of the newest chunks starts (but not necessarily the newest). - -@mvstore_1133_li -#blockSize: The block size of the file; currently always hex 1000, which is decimal 4096, to match the disk sector length of modern hard disks. - -@mvstore_1134_li -#chunk: The chunk id, which is normally the same value as the version; however, the chunk id might roll over to 0, while the version doesn't. - -@mvstore_1135_li -#created: The number of milliseconds since 1970 when the file was created. - -@mvstore_1136_li -#format: The file format number. Currently 1. - -@mvstore_1137_li -#version: The version number of the chunk. - -@mvstore_1138_li -#fletcher: The Fletcher-32 checksum of the header. - -@mvstore_1139_p -# When opening the file, both headers are read and the checksum is verified. If both headers are valid, the one with the newer version is used. The chunk with the latest version is then detected (details about this see below), and the rest of the metadata is read from there. If the chunk id, block and version are not stored in the file header, then the latest chunk lookup starts with the last chunk in the file. - -@mvstore_1140_h3 -#Chunk Format - -@mvstore_1141_p -# There is one chunk per version. Each chunk consists of a header, the pages that were modified in this version, and a footer. The pages contain the actual data of the maps. The pages inside a chunk are stored right after the header, next to each other (unaligned). The size of a chunk is a multiple of the block size. The footer is stored in the last 128 bytes of the chunk. - -@mvstore_1142_p -# The footer allows to verify that the chunk is completely written (a chunk is written as one write operation), and allows to find the start position of the very last chunk in the file. The chunk header and footer contain the following data: - -@mvstore_1143_p -# The fields of the chunk header and footer are: - -@mvstore_1144_li -#chunk: The chunk id. - -@mvstore_1145_li -#block: The first block of the chunk (multiply by the block size to get the position in the file). - -@mvstore_1146_li -#len: The size of the chunk in number of blocks. - -@mvstore_1147_li -#map: The id of the newest map; incremented when a new map is created. - -@mvstore_1148_li -#max: The sum of all maximum page sizes (see page format). - -@mvstore_1149_li -#next: The predicted start block of the next chunk. - -@mvstore_1150_li -#pages: The number of pages in the chunk. - -@mvstore_1151_li -#root: The position of the metadata root page (see page format). - -@mvstore_1152_li -#time: The time the chunk was written, in milliseconds after the file was created. - -@mvstore_1153_li -#version: The version this chunk represents. - -@mvstore_1154_li -#fletcher: The checksum of the footer. - -@mvstore_1155_p -# Chunks are never updated in-place. Each chunk contains the pages that were changed in that version (there is one chunk per version, see above), plus all the parent nodes of those pages, recursively, up to the root page. If an entry in a map is changed, removed, or added, then the respective page is copied, modified, and stored in the next chunk, and the number of live pages in the old chunk is decremented. This mechanism is called copy-on-write, and is similar to how the Btrfs file system works. Chunks without live pages are marked as free, so the space can be re-used by more recent chunks. Because not all chunks are of the same size, there can be a number of free blocks in front of a chunk for some time (until a small chunk is written or the chunks are compacted). There is a delay of 45 seconds (by default) before a free chunk is overwritten, to ensure new versions are persisted first. - -@mvstore_1156_p -# How the newest chunk is located when opening a store: The file header contains the position of a recent chunk, but not always the newest one. This is to reduce the number of file header updates. After opening the file, the file headers, and the chunk footer of the very last chunk (at the end of the file) are read. From those candidates, the header of the most recent chunk is read. If it contains a "next" pointer (see above), those chunk's header and footer are read as well. If it turned out to be a newer valid chunk, this is repeated, until the newest chunk was found. Before writing a chunk, the position of the next chunk is predicted based on the assumption that the next chunk will be of the same size as the current one. When the next chunk is written, and the previous prediction turned out to be incorrect, the file header is updated as well. In any case, the file header is updated if the next chain gets longer than 20 hops. - -@mvstore_1157_h3 -#Page Format - -@mvstore_1158_p -# Each map is a B-tree, and the map data is stored in (B-tree-) pages. There are leaf pages that contain the key-value pairs of the map, and internal nodes, which only contain keys and pointers to leaf pages. The root of a tree is either a leaf or an internal node. Unlike file header and chunk header and footer, the page data is not human readable. Instead, it is stored as byte arrays, with long (8 bytes), int (4 bytes), short (2 bytes), and variable size int and long (1 to 5 / 10 bytes). The page format is: - -@mvstore_1159_li -#length (int): Length of the page in bytes. - -@mvstore_1160_li -#checksum (short): Checksum (chunk id xor offset within the chunk xor page length). - -@mvstore_1161_li -#mapId (variable size int): The id of the map this page belongs to. - -@mvstore_1162_li -#len (variable size int): The number of keys in the page. - -@mvstore_1163_li -#type (byte): The page type (0 for leaf page, 1 for internal node; plus 2 if the keys and values are compressed with the LZF algorithm, or plus 6 if the keys and values are compressed with the Deflate algorithm). - -@mvstore_1164_li -#children (array of long; internal nodes only): The position of the children. - -@mvstore_1165_li -#childCounts (array of variable size long; internal nodes only): The total number of entries for the given child page. - -@mvstore_1166_li -#keys (byte array): All keys, stored depending on the data type. - -@mvstore_1167_li -#values (byte array; leaf pages only): All values, stored depending on the data type. - -@mvstore_1168_p -# Even though this is not required by the file format, pages are stored in the following order: For each map, the root page is stored first, then the internal nodes (if there are any), and then the leaf pages. This should speed up reads for media where sequential reads are faster than random access reads. The metadata map is stored at the end of a chunk. - -@mvstore_1169_p -# Pointers to pages are stored as a long, using a special format: 26 bits for the chunk id, 32 bits for the offset within the chunk, 5 bits for the length code, 1 bit for the page type (leaf or internal node). The page type is encoded so that when clearing or removing a map, leaf pages don't have to be read (internal nodes do have to be read in order to know where all the pages are; but in a typical B-tree the vast majority of the pages are leaf pages). The absolute file position is not included so that chunks can be moved within the file without having to change page pointers; only the chunk metadata needs to be changed. The length code is a number from 0 to 31, where 0 means the maximum length of the page is 32 bytes, 1 means 48 bytes, 2: 64, 3: 96, 4: 128, 5: 192, and so on until 31 which means longer than 1 MB. That way, reading a page only requires one read operation (except for very large pages). The sum of the maximum length of all pages is stored in the chunk metadata (field "max"), and when a page is marked as removed, the live maximum length is adjusted. This allows to estimate the amount of free space within a block, in addition to the number of free pages. - -@mvstore_1170_p -# The total number of entries in child pages are kept to allow efficient range counting, lookup by index, and skip operations. The pages form a counted B-tree. - -@mvstore_1171_p -# Data compression: The data after the page type are optionally compressed using the LZF algorithm. - -@mvstore_1172_h3 -#Metadata Map - -@mvstore_1173_p -# In addition to the user maps, there is one metadata map that contains names and positions of user maps, and chunk metadata. The very last page of a chunk contains the root page of that metadata map. The exact position of this root page is stored in the chunk header. This page (directly or indirectly) points to the root pages of all other maps. The metadata map of a store with a map named "data", and one chunk, contains the following entries: - -@mvstore_1174_li -#chunk.1: The metadata of chunk 1. This is the same data as the chunk header, plus the number of live pages, and the maximum live length. - -@mvstore_1175_li -#map.1: The metadata of map 1. The entries are: name, createVersion, and type. - -@mvstore_1176_li -#name.data: The map id of the map named "data". The value is "1". - -@mvstore_1177_li -#root.1: The root position of map 1. - -@mvstore_1178_li -#setting.storeVersion: The store version (a user defined value). - -@mvstore_1179_h2 -#Similar Projects and Differences to Other Storage Engines - -@mvstore_1180_p -# Unlike similar storage engines like LevelDB and Kyoto Cabinet, the MVStore is written in Java and can easily be embedded in a Java and Android application. - -@mvstore_1181_p -# The MVStore is somewhat similar to the Berkeley DB Java Edition because it is also written in Java, and is also a log structured storage, but the H2 license is more liberal. - -@mvstore_1182_p -# Like SQLite 3, the MVStore keeps all data in one file. Unlike SQLite 3, the MVStore uses is a log structured storage. The plan is to make the MVStore both easier to use as well as faster than SQLite 3. In a recent (very simple) test, the MVStore was about twice as fast as SQLite 3 on Android. - -@mvstore_1183_p -# The API of the MVStore is similar to MapDB (previously known as JDBM) from Jan Kotek, and some code is shared between MVStore and MapDB. However, unlike MapDB, the MVStore uses is a log structured storage. The MVStore does not have a record size limit. - -@mvstore_1184_h2 -#Current State - -@mvstore_1185_p -# The code is still experimental at this stage. The API as well as the behavior may partially change. Features may be added and removed (even though the main features will stay). - -@mvstore_1186_h2 -必�?�?�件 - -@mvstore_1187_p -# The MVStore is included in the latest H2 jar file. - -@mvstore_1188_p -# There are no special requirements to use it. The MVStore should run on any JVM as well as on Android. - -@mvstore_1189_p -# To build just the MVStore (without the database engine), run: - -@mvstore_1190_p -# This will create the file bin/h2mvstore-1.4.196.jar (about 200 KB). - -@performance_1000_h1 -パフォーマンス - -@performance_1001_a -# Performance Comparison - -@performance_1002_a -# PolePosition Benchmark - -@performance_1003_a -# Database Performance Tuning - -@performance_1004_a -# Using the Built-In Profiler - -@performance_1005_a -# Application Profiling - -@performance_1006_a -# Database Profiling - -@performance_1007_a -# Statement Execution Plans - -@performance_1008_a -# How Data is Stored and How Indexes Work - -@performance_1009_a -# Fast Database Import - -@performance_1010_h2 -#Performance Comparison - -@performance_1011_p -# In many cases H2 is faster than other (open source and not open source) database engines. Please note this is mostly a single connection benchmark run on one computer, with many very simple operations running against the database. This benchmark does not include very complex queries. The embedded mode of H2 is faster than the client-server mode because the per-statement overhead is greatly reduced. - -@performance_1012_h3 -#Embedded - -@performance_1013_th -#Test Case - -@performance_1014_th -#Unit - -@performance_1015_th -H2 - -@performance_1016_th -HSQLDB - -@performance_1017_th -Derby - -@performance_1018_td -#Simple: Init - -@performance_1019_td -#ms - -@performance_1020_td -#1019 - -@performance_1021_td -#1907 - -@performance_1022_td -#8280 - -@performance_1023_td -#Simple: Query (random) - -@performance_1024_td -#ms - -@performance_1025_td -#1304 - -@performance_1026_td -#873 - -@performance_1027_td -#1912 - -@performance_1028_td -#Simple: Query (sequential) - -@performance_1029_td -#ms - -@performance_1030_td -#835 - -@performance_1031_td -#1839 - -@performance_1032_td -#5415 - -@performance_1033_td -#Simple: Update (sequential) - -@performance_1034_td -#ms - -@performance_1035_td -#961 - -@performance_1036_td -#2333 - -@performance_1037_td -#21759 - -@performance_1038_td -#Simple: Delete (sequential) - -@performance_1039_td -#ms - -@performance_1040_td -#950 - -@performance_1041_td -#1922 - -@performance_1042_td -#32016 - -@performance_1043_td -#Simple: Memory Usage - -@performance_1044_td -#MB - -@performance_1045_td -#21 - -@performance_1046_td -#10 - -@performance_1047_td -#8 - -@performance_1048_td -#BenchA: Init - -@performance_1049_td -#ms - -@performance_1050_td -#919 - -@performance_1051_td -#2133 - -@performance_1052_td -#7528 - -@performance_1053_td -#BenchA: Transactions - -@performance_1054_td -#ms - -@performance_1055_td -#1219 - -@performance_1056_td -#2297 - -@performance_1057_td -#8541 - -@performance_1058_td -#BenchA: Memory Usage - -@performance_1059_td -#MB - -@performance_1060_td -#12 - -@performance_1061_td -#15 - -@performance_1062_td -#7 - -@performance_1063_td -#BenchB: Init - -@performance_1064_td -#ms - -@performance_1065_td -#905 - -@performance_1066_td -#1993 - -@performance_1067_td -#8049 - -@performance_1068_td -#BenchB: Transactions - -@performance_1069_td -#ms - -@performance_1070_td -#1091 - -@performance_1071_td -#583 - -@performance_1072_td -#1165 - -@performance_1073_td -#BenchB: Memory Usage - -@performance_1074_td -#MB - -@performance_1075_td -#17 - -@performance_1076_td -#11 - -@performance_1077_td -#8 - -@performance_1078_td -#BenchC: Init - -@performance_1079_td -#ms - -@performance_1080_td -#2491 - -@performance_1081_td -#4003 - -@performance_1082_td -#8064 - -@performance_1083_td -#BenchC: Transactions - -@performance_1084_td -#ms - -@performance_1085_td -#1979 - -@performance_1086_td -#803 - -@performance_1087_td -#2840 - -@performance_1088_td -#BenchC: Memory Usage - -@performance_1089_td -#MB - -@performance_1090_td -#19 - -@performance_1091_td -#22 - -@performance_1092_td -#9 - -@performance_1093_td -#Executed statements - -@performance_1094_td -## - -@performance_1095_td -#1930995 - -@performance_1096_td -#1930995 - -@performance_1097_td -#1930995 - -@performance_1098_td -#Total time - -@performance_1099_td -#ms - -@performance_1100_td -#13673 - -@performance_1101_td -#20686 - -@performance_1102_td -#105569 - -@performance_1103_td -#Statements per second - -@performance_1104_td -## - -@performance_1105_td -#141226 - -@performance_1106_td -#93347 - -@performance_1107_td -#18291 - -@performance_1108_h3 -#Client-Server - -@performance_1109_th -#Test Case - -@performance_1110_th -#Unit - -@performance_1111_th -#H2 (Server) - -@performance_1112_th -HSQLDB - -@performance_1113_th -Derby - -@performance_1114_th -PostgreSQL - -@performance_1115_th -MySQL - -@performance_1116_td -#Simple: Init - -@performance_1117_td -#ms - -@performance_1118_td -#16338 - -@performance_1119_td -#17198 - -@performance_1120_td -#27860 - -@performance_1121_td -#30156 - -@performance_1122_td -#29409 - -@performance_1123_td -#Simple: Query (random) - -@performance_1124_td -#ms - -@performance_1125_td -#3399 - -@performance_1126_td -#2582 - -@performance_1127_td -#6190 - -@performance_1128_td -#3315 - -@performance_1129_td -#3342 - -@performance_1130_td -#Simple: Query (sequential) - -@performance_1131_td -#ms - -@performance_1132_td -#21841 - -@performance_1133_td -#18699 - -@performance_1134_td -#42347 - -@performance_1135_td -#30774 - -@performance_1136_td -#32611 - -@performance_1137_td -#Simple: Update (sequential) - -@performance_1138_td -#ms - -@performance_1139_td -#6913 - -@performance_1140_td -#7745 - -@performance_1141_td -#28576 - -@performance_1142_td -#32698 - -@performance_1143_td -#11350 - -@performance_1144_td -#Simple: Delete (sequential) - -@performance_1145_td -#ms - -@performance_1146_td -#8051 - -@performance_1147_td -#9751 - -@performance_1148_td -#42202 - -@performance_1149_td -#44480 - -@performance_1150_td -#16555 - -@performance_1151_td -#Simple: Memory Usage - -@performance_1152_td -#MB - -@performance_1153_td -#22 - -@performance_1154_td -#11 - -@performance_1155_td -#9 - -@performance_1156_td -#0 - -@performance_1157_td -#1 - -@performance_1158_td -#BenchA: Init - -@performance_1159_td -#ms - -@performance_1160_td -#12996 - -@performance_1161_td -#14720 - -@performance_1162_td -#24722 - -@performance_1163_td -#26375 - -@performance_1164_td -#26060 - -@performance_1165_td -#BenchA: Transactions - -@performance_1166_td -#ms - -@performance_1167_td -#10134 - -@performance_1168_td -#10250 - -@performance_1169_td -#18452 - -@performance_1170_td -#21453 - -@performance_1171_td -#15877 - -@performance_1172_td -#BenchA: Memory Usage - -@performance_1173_td -#MB - -@performance_1174_td -#13 - -@performance_1175_td -#15 - -@performance_1176_td -#9 - -@performance_1177_td -#0 - -@performance_1178_td -#1 - -@performance_1179_td -#BenchB: Init - -@performance_1180_td -#ms - -@performance_1181_td -#15264 - -@performance_1182_td -#16889 - -@performance_1183_td -#28546 - -@performance_1184_td -#31610 - -@performance_1185_td -#29747 - -@performance_1186_td -#BenchB: Transactions - -@performance_1187_td -#ms - -@performance_1188_td -#3017 - -@performance_1189_td -#3376 - -@performance_1190_td -#1842 - -@performance_1191_td -#2771 - -@performance_1192_td -#1433 - -@performance_1193_td -#BenchB: Memory Usage - -@performance_1194_td -#MB - -@performance_1195_td -#17 - -@performance_1196_td -#12 - -@performance_1197_td -#11 - -@performance_1198_td -#1 - -@performance_1199_td -#1 - -@performance_1200_td -#BenchC: Init - -@performance_1201_td -#ms - -@performance_1202_td -#14020 - -@performance_1203_td -#10407 - -@performance_1204_td -#17655 - -@performance_1205_td -#19520 - -@performance_1206_td -#17532 - -@performance_1207_td -#BenchC: Transactions - -@performance_1208_td -#ms - -@performance_1209_td -#5076 - -@performance_1210_td -#3160 - -@performance_1211_td -#6411 - -@performance_1212_td -#6063 - -@performance_1213_td -#4530 - -@performance_1214_td -#BenchC: Memory Usage - -@performance_1215_td -#MB - -@performance_1216_td -#19 - -@performance_1217_td -#21 - -@performance_1218_td -#11 - -@performance_1219_td -#1 - -@performance_1220_td -#1 - -@performance_1221_td -#Executed statements - -@performance_1222_td -## - -@performance_1223_td -#1930995 - -@performance_1224_td -#1930995 - -@performance_1225_td -#1930995 - -@performance_1226_td -#1930995 - -@performance_1227_td -#1930995 - -@performance_1228_td -#Total time - -@performance_1229_td -#ms - -@performance_1230_td -#117049 - -@performance_1231_td -#114777 - -@performance_1232_td -#244803 - -@performance_1233_td -#249215 - -@performance_1234_td -#188446 - -@performance_1235_td -#Statements per second - -@performance_1236_td -## - -@performance_1237_td -#16497 - -@performance_1238_td -#16823 - -@performance_1239_td -#7887 - -@performance_1240_td -#7748 - -@performance_1241_td -#10246 - -@performance_1242_h3 -#Benchmark Results and Comments - -@performance_1243_h4 -H2 - -@performance_1244_p -# Version 1.4.177 (2014-04-12) was used for the test. For most operations, the performance of H2 is about the same as for HSQLDB. One situation where H2 is slow is large result sets, because they are buffered to disk if more than a certain number of records are returned. The advantage of buffering is: there is no limit on the result set size. - -@performance_1245_h4 -HSQLDB - -@performance_1246_p -# Version 2.3.2 was used for the test. Cached tables are used in this test (hsqldb.default_table_type=cached), and the write delay is 1 second (SET WRITE_DELAY 1). - -@performance_1247_h4 -Derby - -@performance_1248_p -# Version 10.10.1.1 was used for the test. Derby is clearly the slowest embedded database in this test. This seems to be a structural problem, because all operations are really slow. It will be hard for the developers of Derby to improve the performance to a reasonable level. A few problems have been identified: leaving autocommit on is a problem for Derby. If it is switched off during the whole test, the results are about 20% better for Derby. Derby calls FileChannel.force(false), but only twice per log file (not on each commit). Disabling this call improves performance for Derby by about 2%. Unlike H2, Derby does not call FileDescriptor.sync() on each checkpoint. Derby supports a testing mode (system property derby.system.durability=test) where durability is disabled. According to the documentation, this setting should be used for testing only, as the database may not recover after a crash. Enabling this setting improves performance by a factor of 2.6 (embedded mode) or 1.4 (server mode). Even if enabled, Derby is still less than half as fast as H2 in default mode. - -@performance_1249_h4 -PostgreSQL - -@performance_1250_p -# Version 9.1.5 was used for the test. The following options where changed in postgresql.conf: fsync = off, commit_delay = 1000. PostgreSQL is run in server mode. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured. - -@performance_1251_h4 -MySQL - -@performance_1252_p -# Version 5.1.65-log was used for the test. MySQL was run with the InnoDB backend. The setting innodb_flush_log_at_trx_commit (found in the my.ini / my.cnf file) was set to 0. Otherwise (and by default), MySQL is slow (around 140 statements per second in this test) because it tries to flush the data to disk for each commit. For small transactions (when autocommit is on) this is really slow. But many use cases use small or relatively small transactions. Too bad this setting is not listed in the configuration wizard, and it always overwritten when using the wizard. You need to change this setting manually in the file my.ini / my.cnf, and then restart the service. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured. - -@performance_1253_h4 -#Firebird - -@performance_1254_p -# Firebird 1.5 (default installation) was tested, but the results are not published currently. It is possible to run the performance test with the Firebird database, and any information on how to configure Firebird for higher performance are welcome. - -@performance_1255_h4 -#Why Oracle / MS SQL Server / DB2 are Not Listed - -@performance_1256_p -# The license of these databases does not allow to publish benchmark results. This doesn't mean that they are fast. They are in fact quite slow, and need a lot of memory. But you will need to test this yourself. SQLite was not tested because the JDBC driver doesn't support transactions. - -@performance_1257_h3 -#About this Benchmark - -@performance_1258_h4 -#How to Run - -@performance_1259_p -# This test was as follows: - -@performance_1260_h4 -#Separate Process per Database - -@performance_1261_p -# For each database, a new process is started, to ensure the previous test does not impact the current test. - -@performance_1262_h4 -#Number of Connections - -@performance_1263_p -# This is mostly a single-connection benchmark. BenchB uses multiple connections; the other tests use one connection. - -@performance_1264_h4 -#Real-World Tests - -@performance_1265_p -# Good benchmarks emulate real-world use cases. This benchmark includes 4 test cases: BenchSimple uses one table and many small updates / deletes. BenchA is similar to the TPC-A test, but single connection / single threaded (see also: www.tpc.org). BenchB is similar to the TPC-B test, using multiple connections (one thread per connection). BenchC is similar to the TPC-C test, but single connection / single threaded. - -@performance_1266_h4 -#Comparing Embedded with Server Databases - -@performance_1267_p -# This is mainly a benchmark for embedded databases (where the application runs in the same virtual machine as the database engine). However MySQL and PostgreSQL are not Java databases and cannot be embedded into a Java application. For the Java databases, both embedded and server modes are tested. - -@performance_1268_h4 -#Test Platform - -@performance_1269_p -# This test is run on Mac OS X 10.6. No virus scanner was used, and disk indexing was disabled. The JVM used is Sun JDK 1.6. - -@performance_1270_h4 -#Multiple Runs - -@performance_1271_p -# When a Java benchmark is run first, the code is not fully compiled and therefore runs slower than when running multiple times. A benchmark should always run the same test multiple times and ignore the first run(s). This benchmark runs three times, but only the last run is measured. - -@performance_1272_h4 -#Memory Usage - -@performance_1273_p -# It is not enough to measure the time taken, the memory usage is important as well. Performance can be improved by using a bigger cache, but the amount of memory is limited. HSQLDB tables are kept fully in memory by default; this benchmark uses 'disk based' tables for all databases. Unfortunately, it is not so easy to calculate the memory usage of PostgreSQL and MySQL, because they run in a different process than the test. This benchmark currently does not print memory usage of those databases. - -@performance_1274_h4 -#Delayed Operations - -@performance_1275_p -# Some databases delay some operations (for example flushing the buffers) until after the benchmark is run. This benchmark waits between each database tested, and each database runs in a different process (sequentially). - -@performance_1276_h4 -#Transaction Commit / Durability - -@performance_1277_p -# Durability means transaction committed to the database will not be lost. Some databases (for example MySQL) try to enforce this by default by calling fsync() to flush the buffers, but most hard drives don't actually flush all data. Calling the method slows down transaction commit a lot, but doesn't always make data durable. When comparing the results, it is important to think about the effect. Many database suggest to 'batch' operations when possible. This benchmark switches off autocommit when loading the data, and calls commit after each 1000 inserts. However many applications need 'short' transactions at runtime (a commit after each update). This benchmark commits after each update / delete in the simple benchmark, and after each business transaction in the other benchmarks. For databases that support delayed commits, a delay of one second is used. - -@performance_1278_h4 -#Using Prepared Statements - -@performance_1279_p -# Wherever possible, the test cases use prepared statements. - -@performance_1280_h4 -#Currently Not Tested: Startup Time - -@performance_1281_p -# The startup time of a database engine is important as well for embedded use. This time is not measured currently. Also, not tested is the time used to create a database and open an existing database. Here, one (wrapper) connection is opened at the start, and for each step a new connection is opened and then closed. - -@performance_1282_h2 -#PolePosition Benchmark - -@performance_1283_p -# The PolePosition is an open source benchmark. The algorithms are all quite simple. It was developed / sponsored by db4o. This test was not run for a longer time, so please be aware that the results below are for older database versions (H2 version 1.1, HSQLDB 1.8, Java 1.4). - -@performance_1284_th -#Test Case - -@performance_1285_th -#Unit - -@performance_1286_th -H2 - -@performance_1287_th -HSQLDB - -@performance_1288_th -MySQL - -@performance_1289_td -#Melbourne write - -@performance_1290_td -#ms - -@performance_1291_td -#369 - -@performance_1292_td -#249 - -@performance_1293_td -#2022 - -@performance_1294_td -#Melbourne read - -@performance_1295_td -#ms - -@performance_1296_td -#47 - -@performance_1297_td -#49 - -@performance_1298_td -#93 - -@performance_1299_td -#Melbourne read_hot - -@performance_1300_td -#ms - -@performance_1301_td -#24 - -@performance_1302_td -#43 - -@performance_1303_td -#95 - -@performance_1304_td -#Melbourne delete - -@performance_1305_td -#ms - -@performance_1306_td -#147 - -@performance_1307_td -#133 - -@performance_1308_td -#176 - -@performance_1309_td -#Sepang write - -@performance_1310_td -#ms - -@performance_1311_td -#965 - -@performance_1312_td -#1201 - -@performance_1313_td -#3213 - -@performance_1314_td -#Sepang read - -@performance_1315_td -#ms - -@performance_1316_td -#765 - -@performance_1317_td -#948 - -@performance_1318_td -#3455 - -@performance_1319_td -#Sepang read_hot - -@performance_1320_td -#ms - -@performance_1321_td -#789 - -@performance_1322_td -#859 - -@performance_1323_td -#3563 - -@performance_1324_td -#Sepang delete - -@performance_1325_td -#ms - -@performance_1326_td -#1384 - -@performance_1327_td -#1596 - -@performance_1328_td -#6214 - -@performance_1329_td -#Bahrain write - -@performance_1330_td -#ms - -@performance_1331_td -#1186 - -@performance_1332_td -#1387 - -@performance_1333_td -#6904 - -@performance_1334_td -#Bahrain query_indexed_string - -@performance_1335_td -#ms - -@performance_1336_td -#336 - -@performance_1337_td -#170 - -@performance_1338_td -#693 - -@performance_1339_td -#Bahrain query_string - -@performance_1340_td -#ms - -@performance_1341_td -#18064 - -@performance_1342_td -#39703 - -@performance_1343_td -#41243 - -@performance_1344_td -#Bahrain query_indexed_int - -@performance_1345_td -#ms - -@performance_1346_td -#104 - -@performance_1347_td -#134 - -@performance_1348_td -#678 - -@performance_1349_td -#Bahrain update - -@performance_1350_td -#ms - -@performance_1351_td -#191 - -@performance_1352_td -#87 - -@performance_1353_td -#159 - -@performance_1354_td -#Bahrain delete - -@performance_1355_td -#ms - -@performance_1356_td -#1215 - -@performance_1357_td -#729 - -@performance_1358_td -#6812 - -@performance_1359_td -#Imola retrieve - -@performance_1360_td -#ms - -@performance_1361_td -#198 - -@performance_1362_td -#194 - -@performance_1363_td -#4036 - -@performance_1364_td -#Barcelona write - -@performance_1365_td -#ms - -@performance_1366_td -#413 - -@performance_1367_td -#832 - -@performance_1368_td -#3191 - -@performance_1369_td -#Barcelona read - -@performance_1370_td -#ms - -@performance_1371_td -#119 - -@performance_1372_td -#160 - -@performance_1373_td -#1177 - -@performance_1374_td -#Barcelona query - -@performance_1375_td -#ms - -@performance_1376_td -#20 - -@performance_1377_td -#5169 - -@performance_1378_td -#101 - -@performance_1379_td -#Barcelona delete - -@performance_1380_td -#ms - -@performance_1381_td -#388 - -@performance_1382_td -#319 - -@performance_1383_td -#3287 - -@performance_1384_td -#Total - -@performance_1385_td -#ms - -@performance_1386_td -#26724 - -@performance_1387_td -#53962 - -@performance_1388_td -#87112 - -@performance_1389_p -# There are a few problems with the PolePosition test: - -@performance_1390_li -# HSQLDB uses in-memory tables by default while H2 uses persistent tables. The HSQLDB version included in PolePosition does not support changing this, so you need to replace poleposition-0.20/lib/hsqldb.jar with a newer version (for example hsqldb-1.8.0.7.jar), and then use the setting hsqldb.connecturl=jdbc:hsqldb:file:data/hsqldb/dbbench2;hsqldb.default_table_type=cached;sql.enforce_size=true in the file Jdbc.properties. - -@performance_1391_li -#HSQLDB keeps the database open between tests, while H2 closes the database (losing all the cache). To change that, use the database URL jdbc:h2:file:data/h2/dbbench;DB_CLOSE_DELAY=-1 - -@performance_1392_li -#The amount of cache memory is quite important, specially for the PolePosition test. Unfortunately, the PolePosition test does not take this into account. - -@performance_1393_h2 -#Database Performance Tuning - -@performance_1394_h3 -#Keep Connections Open or Use a Connection Pool - -@performance_1395_p -# If your application opens and closes connections a lot (for example, for each request), you should consider using a connection pool. Opening a connection using DriverManager.getConnection is specially slow if the database is closed. By default the database is closed if the last connection is closed. - -@performance_1396_p -# If you open and close connections a lot but don't want to use a connection pool, consider keeping a 'sentinel' connection open for as long as the application runs, or use delayed database closing. See also Closing a database. - -@performance_1397_h3 -#Use a Modern JVM - -@performance_1398_p -# Newer JVMs are faster. Upgrading to the latest version of your JVM can provide a "free" boost to performance. Switching from the default Client JVM to the Server JVM using the -server command-line option improves performance at the cost of a slight increase in start-up time. - -@performance_1399_h3 -#Virus Scanners - -@performance_1400_p -# Some virus scanners scan files every time they are accessed. It is very important for performance that database files are not scanned for viruses. The database engine never interprets the data stored in the files as programs, that means even if somebody would store a virus in a database file, this would be harmless (when the virus does not run, it cannot spread). Some virus scanners allow to exclude files by suffix. Ensure files ending with .db are not scanned. - -@performance_1401_h3 -トレースオプションを使用�?�る - -@performance_1402_p -# If the performance hot spots are in the database engine, in many cases the performance can be optimized by creating additional indexes, or changing the schema. Sometimes the application does not directly generate the SQL statements, for example if an O/R mapping tool is used. To view the SQL statements and JDBC API calls, you can use the trace options. For more information, see Using the Trace Options. - -@performance_1403_h3 -#Index Usage - -@performance_1404_p -# This database uses indexes to improve the performance of SELECT, UPDATE, DELETE. If a column is used in the WHERE clause of a query, and if an index exists on this column, then the index can be used. Multi-column indexes are used if all or the first columns of the index are used. Both equality lookup and range scans are supported. Indexes are used to order result sets, but only if the condition uses the same index or no index at all. The results are sorted in memory if required. Indexes are created automatically for primary key and unique constraints. Indexes are also created for foreign key constraints, if required. For other columns, indexes need to be created manually using the CREATE INDEX statement. - -@performance_1405_h3 -#Index Hints - -@performance_1406_p -# If you have determined that H2 is not using the optimal index for your query, you can use index hints to force H2 to use specific indexes. - -@performance_1407_p -#Only indexes in the list will be used when choosing an index to use on the given table. There is no significance to order in this list. - -@performance_1408_p -# It is possible that no index in the list is chosen, in which case a full table scan will be used. - -@performance_1409_p -#An empty list of index names forces a full table scan to be performed. - -@performance_1410_p -#Each index in the list must exist. - -@performance_1411_h3 -#How Data is Stored Internally - -@performance_1412_p -# For persistent databases, if a table is created with a single column primary key of type BIGINT, INT, SMALLINT, TINYINT, then the data of the table is organized in this way. This is sometimes also called a "clustered index" or "index organized table". - -@performance_1413_p -# H2 internally stores table data and indexes in the form of b-trees. Each b-tree stores entries as a list of unique keys (one or more columns) and data (zero or more columns). The table data is always organized in the form of a "data b-tree" with a single column key of type long. If a single column primary key of type BIGINT, INT, SMALLINT, TINYINT is specified when creating the table (or just after creating the table, but before inserting any rows), then this column is used as the key of the data b-tree. If no primary key has been specified, if the primary key column is of another data type, or if the primary key contains more than one column, then a hidden auto-increment column of type BIGINT is added to the table, which is used as the key for the data b-tree. All other columns of the table are stored within the data area of this data b-tree (except for large BLOB, CLOB columns, which are stored externally). - -@performance_1414_p -# For each additional index, one new "index b-tree" is created. The key of this b-tree consists of the indexed columns, plus the key of the data b-tree. If a primary key is created after the table has been created, or if the primary key contains multiple column, or if the primary key is not of the data types listed above, then the primary key is stored in a new index b-tree. - -@performance_1415_h3 -#Optimizer - -@performance_1416_p -# This database uses a cost based optimizer. For simple and queries and queries with medium complexity (less than 7 tables in the join), the expected cost (running time) of all possible plans is calculated, and the plan with the lowest cost is used. For more complex queries, the algorithm first tries all possible combinations for the first few tables, and the remaining tables added using a greedy algorithm (this works well for most joins). Afterwards a genetic algorithm is used to test at most 2000 distinct plans. Only left-deep plans are evaluated. - -@performance_1417_h3 -#Expression Optimization - -@performance_1418_p -# After the statement is parsed, all expressions are simplified automatically if possible. Operations are evaluated only once if all parameters are constant. Functions are also optimized, but only if the function is constant (always returns the same result for the same parameter values). If the WHERE clause is always false, then the table is not accessed at all. - -@performance_1419_h3 -#COUNT(*) Optimization - -@performance_1420_p -# If the query only counts all rows of a table, then the data is not accessed. However, this is only possible if no WHERE clause is used, that means it only works for queries of the form SELECT COUNT(*) FROM table. - -@performance_1421_h3 -#Updating Optimizer Statistics / Column Selectivity - -@performance_1422_p -# When executing a query, at most one index per join can be used. If the same table is joined multiple times, for each join only one index is used (the same index could be used for both joins, or each join could use a different index). Example: for the query SELECT * FROM TEST T1, TEST T2 WHERE T1.NAME='A' AND T2.ID=T1.ID, two index can be used, in this case the index on NAME for T1 and the index on ID for T2. - -@performance_1423_p -# If a table has multiple indexes, sometimes more than one index could be used. Example: if there is a table TEST(ID, NAME, FIRSTNAME) and an index on each column, then two indexes could be used for the query SELECT * FROM TEST WHERE NAME='A' AND FIRSTNAME='B', the index on NAME or the index on FIRSTNAME. It is not possible to use both indexes at the same time. Which index is used depends on the selectivity of the column. The selectivity describes the 'uniqueness' of values in a column. A selectivity of 100 means each value appears only once, and a selectivity of 1 means the same value appears in many or most rows. For the query above, the index on NAME should be used if the table contains more distinct names than first names. - -@performance_1424_p -# The SQL statement ANALYZE can be used to automatically estimate the selectivity of the columns in the tables. This command should be run from time to time to improve the query plans generated by the optimizer. - -@performance_1425_h3 -#In-Memory (Hash) Indexes - -@performance_1426_p -# Using in-memory indexes, specially in-memory hash indexes, can speed up queries and data manipulation. - -@performance_1427_p -#In-memory indexes are automatically used for in-memory databases, but can also be created for persistent databases using CREATE MEMORY TABLE. In many cases, the rows itself will also be kept in-memory. Please note this may cause memory problems for large tables. - -@performance_1428_p -# In-memory hash indexes are backed by a hash table and are usually faster than regular indexes. However, hash indexes only supports direct lookup (WHERE ID = ?) but not range scan (WHERE ID < ?). To use hash indexes, use HASH as in: CREATE UNIQUE HASH INDEX and CREATE TABLE ...(ID INT PRIMARY KEY HASH,...). - -@performance_1429_h3 -#Use Prepared Statements - -@performance_1430_p -# If possible, use prepared statements with parameters. - -@performance_1431_h3 -#Prepared Statements and IN(...) - -@performance_1432_p -# Avoid generating SQL statements with a variable size IN(...) list. Instead, use a prepared statement with arrays as in the following example: - -@performance_1433_h3 -#Optimization Examples - -@performance_1434_p -# See src/test/org/h2/samples/optimizations.sql for a few examples of queries that benefit from special optimizations built into the database. - -@performance_1435_h3 -#Cache Size and Type - -@performance_1436_p -# By default the cache size of H2 is quite small. Consider using a larger cache size, or enable the second level soft reference cache. See also Cache Settings. - -@performance_1437_h3 -データ型 - -@performance_1438_p -# Each data type has different storage and performance characteristics: - -@performance_1439_li -#The DECIMAL/NUMERIC type is slower and requires more storage than the REAL and DOUBLE types. - -@performance_1440_li -#Text types are slower to read, write, and compare than numeric types and generally require more storage. - -@performance_1441_li -#See Large Objects for information on BINARY vs. BLOB and VARCHAR vs. CLOB performance. - -@performance_1442_li -#Parsing and formatting takes longer for the TIME, DATE, and TIMESTAMP types than the numeric types. - -@performance_1443_code -#SMALLINT/TINYINT/BOOLEAN - -@performance_1444_li -# are not significantly smaller or faster to work with than INTEGER in most modes. - -@performance_1445_h3 -#Sorted Insert Optimization - -@performance_1446_p -# To reduce disk space usage and speed up table creation, an optimization for sorted inserts is available. When used, b-tree pages are split at the insertion point. To use this optimization, add SORTED before the SELECT statement: - -@performance_1447_h2 -#Using the Built-In Profiler - -@performance_1448_p -# A very simple Java profiler is built-in. To use it, use the following template: - -@performance_1449_h2 -#Application Profiling - -@performance_1450_h3 -#Analyze First - -@performance_1451_p -# Before trying to optimize performance, it is important to understand where the problem is (what part of the application is slow). Blind optimization or optimization based on guesses should be avoided, because usually it is not an efficient strategy. There are various ways to analyze an application. Sometimes two implementations can be compared using System.currentTimeMillis(). But this does not work for complex applications with many modules, and for memory problems. - -@performance_1452_p -# A simple way to profile an application is to use the built-in profiling tool of java. Example: - -@performance_1453_p -# Unfortunately, it is only possible to profile the application from start to end. Another solution is to create a number of full thread dumps. To do that, first run jps -l to get the process id, and then run jstack <pid> or kill -QUIT <pid> (Linux) or press Ctrl+C (Windows). - -@performance_1454_p -# A simple profiling tool is included in H2. To use it, the application needs to be changed slightly. Example: - -@performance_1455_p -# The profiler is built into the H2 Console tool, to analyze databases that open slowly. To use it, run the H2 Console, and then click on 'Test Connection'. Afterwards, click on "Test successful" and you get the most common stack traces, which helps to find out why it took so long to connect. You will only get the stack traces if opening the database took more than a few seconds. - -@performance_1456_h2 -#Database Profiling - -@performance_1457_p -# The ConvertTraceFile tool generates SQL statement statistics at the end of the SQL script file. The format used is similar to the profiling data generated when using java -Xrunhprof. For this to work, the trace level needs to be 2 or higher (TRACE_LEVEL_FILE=2). The easiest way to set the trace level is to append the setting to the database URL, for example: jdbc:h2:~/test;TRACE_LEVEL_FILE=2 or jdbc:h2:tcp://localhost/~/test;TRACE_LEVEL_FILE=2. As an example, execute the the following script using the H2 Console: - -@performance_1458_p -# After running the test case, convert the .trace.db file using the ConvertTraceFile tool. The trace file is located in the same directory as the database file. - -@performance_1459_p -# The generated file test.sql will contain the SQL statements as well as the following profiling data (results vary): - -@performance_1460_h2 -#Statement Execution Plans - -@performance_1461_p -# The SQL statement EXPLAIN displays the indexes and optimizations the database uses for a statement. The following statements support EXPLAIN: SELECT, UPDATE, DELETE, MERGE, INSERT. The following query shows that the database uses the primary key index to search for rows: - -@performance_1462_p -# For joins, the tables in the execution plan are sorted in the order they are processed. The following query shows the database first processes the table INVOICE (using the primary key). For each row, it will additionally check that the value of the column AMOUNT is larger than zero, and for those rows the database will search in the table CUSTOMER (using the primary key). The query plan contains some redundancy so it is a valid statement. - -@performance_1463_h3 -#Displaying the Scan Count - -@performance_1464_code -#EXPLAIN ANALYZE - -@performance_1465_p -# additionally shows the scanned rows per table and pages read from disk per table or index. This will actually execute the query, unlike EXPLAIN which only prepares it. The following query scanned 1000 rows, and to do that had to read 85 pages from the data area of the table. Running the query twice will not list the pages read from disk, because they are now in the cache. The tableScan means this query doesn't use an index. - -@performance_1466_p -# The cache will prevent the pages are read twice. H2 reads all columns of the row unless only the columns in the index are read. Except for large CLOB and BLOB, which are not store in the table. - -@performance_1467_h3 -#Special Optimizations - -@performance_1468_p -# For certain queries, the database doesn't need to read all rows, or doesn't need to sort the result even if ORDER BY is used. - -@performance_1469_p -# For queries of the form SELECT COUNT(*), MIN(ID), MAX(ID) FROM TEST, the query plan includes the line /* direct lookup */ if the data can be read from an index. - -@performance_1470_p -# For queries of the form SELECT DISTINCT CUSTOMER_ID FROM INVOICE, the query plan includes the line /* distinct */ if there is an non-unique or multi-column index on this column, and if this column has a low selectivity. - -@performance_1471_p -# For queries of the form SELECT * FROM TEST ORDER BY ID, the query plan includes the line /* index sorted */ to indicate there is no separate sorting required. - -@performance_1472_p -# For queries of the form SELECT * FROM TEST GROUP BY ID ORDER BY ID, the query plan includes the line /* group sorted */ to indicate there is no separate sorting required. - -@performance_1473_h2 -#How Data is Stored and How Indexes Work - -@performance_1474_p -# Internally, each row in a table is identified by a unique number, the row id. The rows of a table are stored with the row id as the key. The row id is a number of type long. If a table has a single column primary key of type INT or BIGINT, then the value of this column is the row id, otherwise the database generates the row id automatically. There is a (non-standard) way to access the row id: using the _ROWID_ pseudo-column: - -@performance_1475_p -# The data is stored in the database as follows: - -@performance_1476_th -#_ROWID_ - -@performance_1477_th -#FIRST_NAME - -@performance_1478_th -#NAME - -@performance_1479_th -#CITY - -@performance_1480_th -#PHONE - -@performance_1481_td -#1 - -@performance_1482_td -#John - -@performance_1483_td -#Miller - -@performance_1484_td -#Berne - -@performance_1485_td -#123 456 789 - -@performance_1486_td -#2 - -@performance_1487_td -#Philip - -@performance_1488_td -#Jones - -@performance_1489_td -#Berne - -@performance_1490_td -#123 012 345 - -@performance_1491_p -# Access by row id is fast because the data is sorted by this key. Please note the row id is not available until after the row was added (that means, it can not be used in computed columns or constraints). If the query condition does not contain the row id (and if no other index can be used), then all rows of the table are scanned. A table scan iterates over all rows in the table, in the order of the row id. To find out what strategy the database uses to retrieve the data, use EXPLAIN SELECT: - -@performance_1492_h3 -#Indexes - -@performance_1493_p -# An index internally is basically just a table that contains the indexed column(s), plus the row id: - -@performance_1494_p -# In the index, the data is sorted by the indexed columns. So this index contains the following data: - -@performance_1495_th -#CITY - -@performance_1496_th -#NAME - -@performance_1497_th -#FIRST_NAME - -@performance_1498_th -#_ROWID_ - -@performance_1499_td -#Berne - -@performance_1500_td -#Jones - -@performance_1501_td -#Philip - -@performance_1502_td -#2 - -@performance_1503_td -#Berne - -@performance_1504_td -#Miller - -@performance_1505_td -#John - -@performance_1506_td -#1 - -@performance_1507_p -# When the database uses an index to query the data, it searches the index for the given data, and (if required) reads the remaining columns in the main data table (retrieved using the row id). An index on city, name, and first name (multi-column index) allows to quickly search for rows when the city, name, and first name are known. If only the city and name, or only the city is known, then this index is also used (so creating an additional index on just the city is not needed). This index is also used when reading all rows, sorted by the indexed columns. However, if only the first name is known, then this index is not used: - -@performance_1508_p -# If your application often queries the table for a phone number, then it makes sense to create an additional index on it: - -@performance_1509_p -# This index contains the phone number, and the row id: - -@performance_1510_th -#PHONE - -@performance_1511_th -#_ROWID_ - -@performance_1512_td -#123 012 345 - -@performance_1513_td -#2 - -@performance_1514_td -#123 456 789 - -@performance_1515_td -#1 - -@performance_1516_h3 -#Using Multiple Indexes - -@performance_1517_p -# Within a query, only one index per logical table is used. Using the condition PHONE = '123 567 789' OR CITY = 'Berne' would use a table scan instead of first using the index on the phone number and then the index on the city. It makes sense to write two queries and combine then using UNION. In this case, each individual query uses a different index: - -@performance_1518_h2 -#Fast Database Import - -@performance_1519_p -# To speed up large imports, consider using the following options temporarily: - -@performance_1520_code -#SET LOG 0 - -@performance_1521_li -# (disabling the transaction log) - -@performance_1522_code -#SET CACHE_SIZE - -@performance_1523_li -# (a large cache is faster) - -@performance_1524_code -#SET LOCK_MODE 0 - -@performance_1525_li -# (disable locking) - -@performance_1526_code -#SET UNDO_LOG 0 - -@performance_1527_li -# (disable the session undo log) - -@performance_1528_p -# These options can be set in the database URL: jdbc:h2:~/test;LOG=0;CACHE_SIZE=65536;LOCK_MODE=0;UNDO_LOG=0. Most of those options are not recommended for regular use, that means you need to reset them after use. - -@performance_1529_p -# If you have to import a lot of rows, use a PreparedStatement or use CSV import. Please note that CREATE TABLE(...) ... AS SELECT ... is faster than CREATE TABLE(...); INSERT INTO ... SELECT .... - -@quickstart_1000_h1 -クイックスタート - -@quickstart_1001_a -# Embedding H2 in an Application - -@quickstart_1002_a -# The H2 Console Application - -@quickstart_1003_h2 -アプリケーション�?�エンベッドH2 - -@quickstart_1004_p -# This database can be used in embedded mode, or in server mode. To use it in embedded mode, you need to: - -@quickstart_1005_li -#Add the h2*.jar to the classpath (H2 does not have any dependencies) - -@quickstart_1006_li -#Use the JDBC driver class: org.h2.Driver - -@quickstart_1007_li -#The database URL jdbc:h2:~/test opens the database test in your user home directory - -@quickstart_1008_li -#A new database is automatically created - -@quickstart_1009_h2 -H2 コンソール アプリケーション - -@quickstart_1010_p -# The Console lets you access a SQL database using a browser interface. - -@quickstart_1011_p -# If you don't have Windows XP, or if something does not work as expected, please see the detailed description in the Tutorial. - -@quickstart_1012_h3 -手順 - -@quickstart_1013_h4 -インストール - -@quickstart_1014_p -# Install the software using the Windows Installer (if you did not yet do that). - -@quickstart_1015_h4 -コンソールを起動�?�る - -@quickstart_1016_p -# Click [Start], [All Programs], [H2], and [H2 Console (Command Line)]: - -@quickstart_1017_p -# A new console window appears: - -@quickstart_1018_p -# Also, a new browser page should open with the URL http://localhost:8082. You may get a security warning from the firewall. If you don't want other computers in the network to access the database on your machine, you can let the firewall block these connections. Only local connections are required at this time. - -@quickstart_1019_h4 -ログイン - -@quickstart_1020_p -# Select [Generic H2] and click [Connect]: - -@quickstart_1021_p -# You are now logged in. - -@quickstart_1022_h4 -サンプル - -@quickstart_1023_p -# Click on the [Sample SQL Script]: - -@quickstart_1024_p -# The SQL commands appear in the command area. - -@quickstart_1025_h4 -実行�?�る - -@quickstart_1026_p -# Click [Run] - -@quickstart_1027_p -# On the left side, a new entry TEST is added below the database icon. The operations and results of the statements are shown below the script. - -@quickstart_1028_h4 -切断 - -@quickstart_1029_p -# Click on [Disconnect]: - -@quickstart_1030_p -# to close the connection. - -@quickstart_1031_h4 -終了 - -@quickstart_1032_p -# Close the console window. For more information, see the Tutorial. - -@roadmap_1000_h1 -ロードマップ - -@roadmap_1001_p -# New (feature) requests will usually be added at the very end of the list. The priority is increased for important and popular requests. Of course, patches are always welcome, but are not always applied as is. See also Providing Patches. - -@roadmap_1002_h2 -#Version 1.5.x: Planned Changes - -@roadmap_1003_li -#Replace file password hash with file encryption key; validate encryption key when connecting. - -@roadmap_1004_li -#Remove "set binary collation" feature. - -@roadmap_1005_li -#Remove the encryption algorithm XTEA. - -@roadmap_1006_li -#Disallow referencing other tables in a table (via constraints for example). - -@roadmap_1007_li -#Remove PageStore features like compress_lob. - -@roadmap_1008_h2 -#Version 1.4.x: Planned Changes - -@roadmap_1009_li -#Change license to MPL 2.0. - -@roadmap_1010_li -#Automatic migration from 1.3 databases to 1.4. - -@roadmap_1011_li -#Option to disable the file name suffix somehow (issue 447). - -@roadmap_1012_h2 -#Priority 1 - -@roadmap_1013_li -#Bugfixes. - -@roadmap_1014_li -#More tests with MULTI_THREADED=1 (and MULTI_THREADED with MVCC): Online backup (using the 'backup' statement). - -@roadmap_1015_li -#Server side cursors. - -@roadmap_1016_h2 -#Priority 2 - -@roadmap_1017_li -#Support hints for the optimizer (which index to use, enforce the join order). - -@roadmap_1018_li -#Full outer joins. - -@roadmap_1019_li -#Access rights: remember the owner of an object. Create, alter and drop privileges. COMMENT: allow owner of object to change it. Issue 208: Access rights for schemas. - -@roadmap_1020_li -#Test multi-threaded in-memory db access. - -@roadmap_1021_li -#MySQL, MS SQL Server compatibility: support case sensitive (mixed case) identifiers without quotes. - -@roadmap_1022_li -#Support GRANT SELECT, UPDATE ON [schemaName.] *. - -@roadmap_1023_li -#Migrate database tool (also from other database engines). For Oracle, maybe use DBMS_METADATA.GET_DDL / GET_DEPENDENT_DDL. - -@roadmap_1024_li -#Clustering: support mixed clustering mode (one embedded, others in server mode). - -@roadmap_1025_li -#Clustering: reads should be randomly distributed (optional) or to a designated database on RAM (parameter: READ_FROM=3). - -@roadmap_1026_li -#Window functions: RANK() and DENSE_RANK(), partition using OVER(). select *, count(*) over() as fullCount from ... limit 4; - -@roadmap_1027_li -#PostgreSQL catalog: use BEFORE SELECT triggers instead of views over metadata tables. - -@roadmap_1028_li -#Compatibility: automatically load functions from a script depending on the mode - see FunctionsMySQL.java, issue 211. - -@roadmap_1029_li -#Test very large databases and LOBs (up to 256 GB). - -@roadmap_1030_li -#Store all temp files in the temp directory. - -@roadmap_1031_li -#Don't use temp files, specially not deleteOnExit (bug 4513817: File.deleteOnExit consumes memory). Also to allow opening client / server (remote) connections when using LOBs. - -@roadmap_1032_li -#Make DDL (Data Definition) operations transactional. - -@roadmap_1033_li -#Deferred integrity checking (DEFERRABLE INITIALLY DEFERRED). - -@roadmap_1034_li -#Groovy Stored Procedures: http://groovy.codehaus.org/GSQL - -@roadmap_1035_li -#Add a migration guide (list differences between databases). - -@roadmap_1036_li -#Optimization: automatic index creation suggestion using the trace file? - -@roadmap_1037_li -#Fulltext search Lucene: analyzer configuration, mergeFactor. - -@roadmap_1038_li -#Compression performance: don't allocate buffers, compress / expand in to out buffer. - -@roadmap_1039_li -#Rebuild index functionality to shrink index size and improve performance. - -@roadmap_1040_li -#Console: add accesskey to most important commands (A, AREA, BUTTON, INPUT, LABEL, LEGEND, TEXTAREA). - -@roadmap_1041_li -#Test performance again with SQL Server, Oracle, DB2. - -@roadmap_1042_li -#Test with Spatial DB in a box / JTS: http://www.opengeospatial.org/standards/sfs - OpenGIS Implementation Specification. - -@roadmap_1043_li -#Write more tests and documentation for MVCC (Multi Version Concurrency Control). - -@roadmap_1044_li -#Find a tool to view large text file (larger than 100 MB), with find, page up and down (like less), truncate before / after. - -@roadmap_1045_li -#Implement, test, document XAConnection and so on. - -@roadmap_1046_li -#Pluggable data type (for streaming, hashing, compression, validation, conversion, encryption). - -@roadmap_1047_li -#CHECK: find out what makes CHECK=TRUE slow, move to CHECK2. - -@roadmap_1048_li -#Drop with invalidate views (so that source code is not lost). Check what other databases do exactly. - -@roadmap_1049_li -#Index usage for (ID, NAME)=(1, 'Hi'); document. - -@roadmap_1050_li -#Set a connection read only (Connection.setReadOnly) or using a connection parameter. - -@roadmap_1051_li -#Access rights: finer grained access control (grant access for specific functions). - -@roadmap_1052_li -#ROW_NUMBER() OVER([PARTITION BY columnName][ORDER BY columnName]). - -@roadmap_1053_li -#Version check: docs / web console (using Javascript), and maybe in the library (using TCP/IP). - -@roadmap_1054_li -#Web server classloader: override findResource / getResourceFrom. - -@roadmap_1055_li -#Cost for embedded temporary view is calculated wrong, if result is constant. - -@roadmap_1056_li -#Count index range query (count(*) where id between 10 and 20). - -@roadmap_1057_li -#Performance: update in-place. - -@roadmap_1058_li -#Clustering: when a database is back alive, automatically synchronize with the master (requires readable transaction log). - -@roadmap_1059_li -#Database file name suffix: a way to use no or a different suffix (for example using a slash). - -@roadmap_1060_li -#Eclipse plugin. - -@roadmap_1061_li -#Asynchronous queries to support publish/subscribe: SELECT ... FOR READ WAIT [maxMillisToWait]. See also MS SQL Server "Query Notification". - -@roadmap_1062_li -#Fulltext search (native): reader / tokenizer / filter. - -@roadmap_1063_li -#Linked schema using CSV files: one schema for a directory of files; support indexes for CSV files. - -@roadmap_1064_li -#iReport to support H2. - -@roadmap_1065_li -#Include SMTP (mail) client (alert on cluster failure, low disk space,...). - -@roadmap_1066_li -#Option for SCRIPT to only process one or a set of schemas or tables, and append to a file. - -@roadmap_1067_li -#JSON parser and functions. - -@roadmap_1068_li -#Copy database: tool with config GUI and batch mode, extensible (example: compare). - -@roadmap_1069_li -#Document, implement tool for long running transactions using user-defined compensation statements. - -@roadmap_1070_li -#Support SET TABLE DUAL READONLY. - -@roadmap_1071_li -#GCJ: what is the state now? - -@roadmap_1072_li -#Events for: database Startup, Connections, Login attempts, Disconnections, Prepare (after parsing), Web Server. See http://docs.openlinksw.com/virtuoso/fn_dbev_startup.html - -@roadmap_1073_li -#Optimization: simpler log compression. - -@roadmap_1074_li -#Support standard INFORMATION_SCHEMA tables, as defined in http://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt - specially KEY_COLUMN_USAGE: http://dev.mysql.com/doc/refman/5.0/en/information-schema.html, http://www.xcdsql.org/Misc/INFORMATION_SCHEMA%20With%20Rolenames.gif - -@roadmap_1075_li -#Compatibility: in MySQL, HSQLDB, /0.0 is NULL; in PostgreSQL, Derby: division by zero. HSQLDB: 0.0e1 / 0.0e1 is NaN. - -@roadmap_1076_li -#Functional tables should accept parameters from other tables (see FunctionMultiReturn) SELECT * FROM TEST T, P2C(T.A, T.R). - -@roadmap_1077_li -#Custom class loader to reload functions on demand. - -@roadmap_1078_li -#Test http://mysql-je.sourceforge.net/ - -@roadmap_1079_li -#H2 Console: the webclient could support more features like phpMyAdmin. - -@roadmap_1080_li -#Support Oracle functions: TO_NUMBER. - -@roadmap_1081_li -#Work on the Java to C converter. - -@roadmap_1082_li -#The HELP information schema can be directly exposed in the Console. - -@roadmap_1083_li -#Maybe use the 0x1234 notation for binary fields, see MS SQL Server. - -@roadmap_1084_li -#Support Oracle CONNECT BY in some way: http://www.adp-gmbh.ch/ora/sql/connect_by.html http://philip.greenspun.com/sql/trees.html - -@roadmap_1085_li -#SQL Server 2005, Oracle: support COUNT(*) OVER(). See http://www.orafusion.com/art_anlytc.htm - -@roadmap_1086_li -#SQL 2003: http://www.wiscorp.com/sql_2003_standard.zip - -@roadmap_1087_li -#Version column (number/sequence and timestamp based). - -@roadmap_1088_li -#Optimize getGeneratedKey: send last identity after each execute (server). - -@roadmap_1089_li -#Test and document UPDATE TEST SET (ID, NAME) = (SELECT ID*10, NAME || '!' FROM TEST T WHERE T.ID=TEST.ID). - -@roadmap_1090_li -#Max memory rows / max undo log size: use block count / row size not row count. - -@roadmap_1091_li -#Implement point-in-time recovery. - -@roadmap_1092_li -#Support PL/SQL (programming language / control flow statements). - -@roadmap_1093_li -#LIKE: improved version for larger texts (currently using naive search). - -@roadmap_1094_li -#Throw an exception when the application calls getInt on a Long (optional). - -@roadmap_1095_li -#Default date format for input and output (local date constants). - -@roadmap_1096_li -#Document ROWNUM usage for reports: SELECT ROWNUM, * FROM (subquery). - -@roadmap_1097_li -#File system that writes to two file systems (replication, replicating file system). - -@roadmap_1098_li -#Standalone tool to get relevant system properties and add it to the trace output. - -@roadmap_1099_li -#Support 'call proc(1=value)' (PostgreSQL, Oracle). - -@roadmap_1100_li -#Console: improve editing data (Tab, Shift-Tab, Enter, Up, Down, Shift+Del?). - -@roadmap_1101_li -#Console: autocomplete Ctrl+Space inserts template. - -@roadmap_1102_li -#Option to encrypt .trace.db file. - -@roadmap_1103_li -#Auto-Update feature for database, .jar file. - -@roadmap_1104_li -#ResultSet SimpleResultSet.readFromURL(String url): id varchar, state varchar, released timestamp. - -@roadmap_1105_li -#Partial indexing (see PostgreSQL). - -@roadmap_1106_li -#Add GUI to build a custom version (embedded, fulltext,...) using build flags. - -@roadmap_1107_li -#http://rubyforge.org/projects/hypersonic/ - -@roadmap_1108_li -#Add a sample application that runs the H2 unit test and writes the result to a file (so it can be included in the user app). - -@roadmap_1109_li -#Table order: ALTER TABLE TEST ORDER BY NAME DESC (MySQL compatibility). - -@roadmap_1110_li -#Backup tool should work with other databases as well. - -@roadmap_1111_li -#Console: -ifExists doesn't work for the console. Add a flag to disable other dbs. - -@roadmap_1112_li -#Check if 'FSUTIL behavior set disablelastaccess 1' improves the performance (fsutil behavior query disablelastaccess). - -@roadmap_1113_li -#Java static code analysis: http://pmd.sourceforge.net/ - -@roadmap_1114_li -#Java static code analysis: http://www.eclipse.org/tptp/ - -@roadmap_1115_li -#Compatibility for CREATE SCHEMA AUTHORIZATION. - -@roadmap_1116_li -#Implement Clob / Blob truncate and the remaining functionality. - -@roadmap_1117_li -#Add multiple columns at the same time with ALTER TABLE .. ADD .. ADD ... - -@roadmap_1118_li -#File locking: writing a system property to detect concurrent access from the same VM (different classloaders). - -@roadmap_1119_li -#Pure SQL triggers (example: update parent table if the child table is changed). - -@roadmap_1120_li -#Add H2 to Gem (Ruby install system). - -@roadmap_1121_li -#Support linked JCR tables. - -@roadmap_1122_li -#Native fulltext search: min word length; store word positions. - -@roadmap_1123_li -#Add an option to the SCRIPT command to generate only portable / standard SQL. - -@roadmap_1124_li -#Updatable views: create 'instead of' triggers automatically if possible (simple cases first). - -@roadmap_1125_li -#Improve create index performance. - -@roadmap_1126_li -#Compact databases without having to close the database (vacuum). - -@roadmap_1127_li -#Implement more JDBC 4.0 features. - -@roadmap_1128_li -#Support TRANSFORM / PIVOT as in MS Access. - -@roadmap_1129_li -#SELECT * FROM (VALUES (...), (...), ....) AS alias(f1, ...). - -@roadmap_1130_li -#Support updatable views with join on primary keys (to extend a table). - -@roadmap_1131_li -#Public interface for functions (not public static). - -@roadmap_1132_li -#Support reading the transaction log. - -@roadmap_1133_li -#Feature matrix as in i-net software. - -@roadmap_1134_li -#Updatable result set on table without primary key or unique index. - -@roadmap_1135_li -#Compatibility with Derby and PostgreSQL: VALUES(1), (2); SELECT * FROM (VALUES (1), (2)) AS myTable(c1). Issue 221. - -@roadmap_1136_li -#Allow execution time prepare for SELECT * FROM CSVREAD(?, 'columnNameString') - -@roadmap_1137_li -#Support data type INTERVAL - -@roadmap_1138_li -#Support nested transactions (possibly using savepoints internally). - -@roadmap_1139_li -#Add a benchmark for bigger databases, and one for many users. - -@roadmap_1140_li -#Compression in the result set over TCP/IP. - -@roadmap_1141_li -#Support curtimestamp (like curtime, curdate). - -@roadmap_1142_li -#Support ANALYZE {TABLE|INDEX} tableName COMPUTE|ESTIMATE|DELETE STATISTICS ptnOption options. - -@roadmap_1143_li -#Release locks (shared or exclusive) on demand - -@roadmap_1144_li -#Support OUTER UNION - -@roadmap_1145_li -#Support parameterized views (similar to CSVREAD, but using just SQL for the definition) - -@roadmap_1146_li -#A way (JDBC driver) to map an URL (jdbc:h2map:c1) to a connection object - -@roadmap_1147_li -#Support dynamic linked schema (automatically adding/updating/removing tables) - -@roadmap_1148_li -#Clustering: adding a node should be very fast and without interrupting clients (very short lock) - -@roadmap_1149_li -#Compatibility: # is the start of a single line comment (MySQL) but date quote (Access). Mode specific - -@roadmap_1150_li -#Run benchmarks with Android, Java 7, java -server - -@roadmap_1151_li -#Optimizations: faster hash function for strings. - -@roadmap_1152_li -#DatabaseEventListener: callback for all operations (including expected time, RUNSCRIPT) and cancel functionality - -@roadmap_1153_li -#Benchmark: add a graph to show how databases scale (performance/database size) - -@roadmap_1154_li -#Implement a SQLData interface to map your data over to a custom object - -@roadmap_1155_li -#In the MySQL and PostgreSQL mode, use lower case identifiers by default (DatabaseMetaData.storesLowerCaseIdentifiers = true) - -@roadmap_1156_li -#Support multiple directories (on different hard drives) for the same database - -@roadmap_1157_li -#Server protocol: use challenge response authentication, but client sends hash(user+password) encrypted with response - -@roadmap_1158_li -#Support EXEC[UTE] (doesn't return a result set, compatible to MS SQL Server) - -@roadmap_1159_li -#Support native XML data type - see http://en.wikipedia.org/wiki/SQL/XML - -@roadmap_1160_li -#Support triggers with a string property or option: SpringTrigger, OSGITrigger - -@roadmap_1161_li -#MySQL compatibility: update test1 t1, test2 t2 set t1.id = t2.id where t1.id = t2.id; - -@roadmap_1162_li -#Ability to resize the cache array when resizing the cache - -@roadmap_1163_li -#Time based cache writing (one second after writing the log) - -@roadmap_1164_li -#Check state of H2 driver for DDLUtils: http://issues.apache.org/jira/browse/DDLUTILS-185 - -@roadmap_1165_li -#Index usage for REGEXP LIKE. - -@roadmap_1166_li -#Compatibility: add a role DBA (like ADMIN). - -@roadmap_1167_li -#Better support multiple processors for in-memory databases. - -@roadmap_1168_li -#Support N'text' - -@roadmap_1169_li -#Support compatibility for jdbc:hsqldb:res: - -@roadmap_1170_li -#HSQLDB compatibility: automatically convert to the next 'higher' data type. Example: cast(2000000000 as int) + cast(2000000000 as int); (HSQLDB: long; PostgreSQL: integer out of range) - -@roadmap_1171_li -#Provide an Java SQL builder with standard and H2 syntax - -@roadmap_1172_li -#Trace: write OS, file system, JVM,... when opening the database - -@roadmap_1173_li -#Support indexes for views (probably requires materialized views) - -@roadmap_1174_li -#Document SET SEARCH_PATH, BEGIN, EXECUTE, parameters - -@roadmap_1175_li -#Server: use one listener (detect if the request comes from an PG or TCP client) - -@roadmap_1176_li -#Optimize SELECT MIN(ID), MAX(ID), COUNT(*) FROM TEST WHERE ID BETWEEN 100 AND 200 - -@roadmap_1177_li -#Sequence: PostgreSQL compatibility (rename, create) http://www.postgresql.org/docs/8.2/static/sql-altersequence.html - -@roadmap_1178_li -#DISTINCT: support large result sets by sorting on all columns (additionally) and then removing duplicates. - -@roadmap_1179_li -#Support a special trigger on all tables to allow building a transaction log reader. - -@roadmap_1180_li -#File system with a background writer thread; test if this is faster - -@roadmap_1181_li -#Better document the source code (high level documentation). - -@roadmap_1182_li -#Support select * from dual a left join dual b on b.x=(select max(x) from dual) - -@roadmap_1183_li -#Optimization: don't lock when the database is read-only - -@roadmap_1184_li -#Issue 146: Support merge join. - -@roadmap_1185_li -#Integrate spatial functions from http://geosysin.iict.ch/irstv-trac/wiki/H2spatial/Download - -@roadmap_1186_li -#Cluster: hot deploy (adding a node at runtime). - -@roadmap_1187_li -#Support DatabaseMetaData.insertsAreDetected: updatable result sets should detect inserts. - -@roadmap_1188_li -#Oracle: support DECODE method (convert to CASE WHEN). - -@roadmap_1189_li -#Native search: support "phrase search", wildcard search (* and ?), case-insensitive search, boolean operators, and grouping - -@roadmap_1190_li -#Improve documentation of access rights. - -@roadmap_1191_li -#Support opening a database that is in the classpath, maybe using a new file system. Workaround: detect jar file using getClass().getProtectionDomain().getCodeSource().getLocation(). - -@roadmap_1192_li -#Support ENUM data type (see MySQL, PostgreSQL, MS SQL Server, maybe others). - -@roadmap_1193_li -#Remember the user defined data type (domain) of a column. - -@roadmap_1194_li -#MVCC: support multi-threaded kernel with multi-version concurrency. - -@roadmap_1195_li -#Auto-server: add option to define the port range or list. - -@roadmap_1196_li -#Support Jackcess (MS Access databases) - -@roadmap_1197_li -#Built-in methods to write large objects (BLOB and CLOB): FILE_WRITE('test.txt', 'Hello World') - -@roadmap_1198_li -#Improve time to open large databases (see mail 'init time for distributed setup') - -@roadmap_1199_li -#Move Maven 2 repository from hsql.sf.net to h2database.sf.net - -@roadmap_1200_li -#Java 1.5 tool: JdbcUtils.closeSilently(s1, s2,...) - -@roadmap_1201_li -#Optimize A=? OR B=? to UNION if the cost is lower. - -@roadmap_1202_li -#Javadoc: document design patterns used - -@roadmap_1203_li -#Support custom collators, for example for natural sort (for text that contains numbers). - -@roadmap_1204_li -#Write an article about SQLInjection (h2/src/docsrc/html/images/SQLInjection.txt) - -@roadmap_1205_li -#Convert SQL-injection-2.txt to html document, include SQLInjection.java sample - -@roadmap_1206_li -#Support OUT parameters in user-defined procedures. - -@roadmap_1207_li -#Web site design: http://www.igniterealtime.org/projects/openfire/index.jsp - -@roadmap_1208_li -#HSQLDB compatibility: Openfire server uses: CREATE SCHEMA PUBLIC AUTHORIZATION DBA; CREATE USER SA PASSWORD ""; GRANT DBA TO SA; SET SCHEMA PUBLIC - -@roadmap_1209_li -#Translation: use ?? in help.csv - -@roadmap_1210_li -#Translated .pdf - -@roadmap_1211_li -#Recovery tool: bad blocks should be converted to INSERT INTO SYSTEM_ERRORS(...), and things should go into the .trace.db file - -@roadmap_1212_li -#Issue 357: support getGeneratedKeys to return multiple rows when used with batch updates. This is supported by MySQL, but not Derby. Both PostgreSQL and HSQLDB don't support getGeneratedKeys. Also support it when using INSERT ... SELECT. - -@roadmap_1213_li -#RECOVER=2 to backup the database, run recovery, open the database - -@roadmap_1214_li -#Recovery should work with encrypted databases - -@roadmap_1215_li -#Corruption: new error code, add help - -@roadmap_1216_li -#Space reuse: after init, scan all storages and free those that don't belong to a live database object - -@roadmap_1217_li -#Access rights: add missing features (users should be 'owner' of objects; missing rights for sequences; dropping objects) - -@roadmap_1218_li -#Support NOCACHE table option (Oracle). - -@roadmap_1219_li -#Support table partitioning. - -@roadmap_1220_li -#Add regular javadocs (using the default doclet, but another css) to the homepage. - -@roadmap_1221_li -#The database should be kept open for a longer time when using the server mode. - -@roadmap_1222_li -#Javadocs: for each tool, add a copy & paste sample in the class level. - -@roadmap_1223_li -#Javadocs: add @author tags. - -@roadmap_1224_li -#Fluent API for tools: Server.createTcpServer().setPort(9081).setPassword(password).start(); - -@roadmap_1225_li -#MySQL compatibility: real SQL statement for DESCRIBE TEST - -@roadmap_1226_li -#Use a default delay of 1 second before closing a database. - -@roadmap_1227_li -#Write (log) to system table before adding to internal data structures. - -@roadmap_1228_li -#Support direct lookup for MIN and MAX when using WHERE (see todo.txt / Direct Lookup). - -@roadmap_1229_li -#Support other array types (String[], double[]) in PreparedStatement.setObject(int, Object) (with test case). - -@roadmap_1230_li -#MVCC should not be memory bound (uncommitted data is kept in memory in the delta index; maybe using a regular b-tree index solves the problem). - -@roadmap_1231_li -#Oracle compatibility: support NLS_DATE_FORMAT. - -@roadmap_1232_li -#Support for Thread.interrupt to cancel running statements. - -@roadmap_1233_li -#Cluster: add feature to make sure cluster nodes can not get out of sync (for example by stopping one process). - -@roadmap_1234_li -#H2 Console: support CLOB/BLOB download using a link. - -@roadmap_1235_li -#Support flashback queries as in Oracle. - -@roadmap_1236_li -#Import / Export of fixed with text files. - -@roadmap_1237_li -#HSQLDB compatibility: automatic data type for SUM if value is the value is too big (by default use the same type as the data). - -@roadmap_1238_li -#Improve the optimizer to select the right index for special cases: where id between 2 and 4 and booleanColumn - -@roadmap_1239_li -#Linked tables: make hidden columns available (Oracle: rowid and ora_rowscn columns). - -@roadmap_1240_li -#H2 Console: in-place autocomplete. - -@roadmap_1241_li -#Support large databases: split database files to multiple directories / disks (similar to tablespaces). - -@roadmap_1242_li -#H2 Console: support configuration option for fixed width (monospace) font. - -@roadmap_1243_li -#Native fulltext search: support analyzers (specially for Chinese, Japanese). - -@roadmap_1244_li -#Automatically compact databases from time to time (as a background process). - -@roadmap_1245_li -#Test Eclipse DTP. - -@roadmap_1246_li -#H2 Console: autocomplete: keep the previous setting - -@roadmap_1247_li -#executeBatch: option to stop at the first failed statement. - -@roadmap_1248_li -#Implement OLAP features as described here: http://www.devx.com/getHelpOn/10MinuteSolution/16573/0/page/5 - -@roadmap_1249_li -#Support Oracle ROWID (unique identifier for each row). - -@roadmap_1250_li -#MySQL compatibility: alter table add index i(c), add constraint c foreign key(c) references t(c); - -@roadmap_1251_li -#Server mode: improve performance for batch updates. - -@roadmap_1252_li -#Applets: support read-only databases in a zip file (accessed as a resource). - -@roadmap_1253_li -#Long running queries / errors / trace system table. - -@roadmap_1254_li -#H2 Console should support JaQu directly. - -@roadmap_1255_li -#Better document FTL_SEARCH, FTL_SEARCH_DATA. - -@roadmap_1256_li -#Sequences: CURRVAL should be session specific. Compatibility with PostgreSQL. - -@roadmap_1257_li -#Index creation using deterministic functions. - -@roadmap_1258_li -#ANALYZE: for unique indexes that allow null, count the number of null. - -@roadmap_1259_li -#MySQL compatibility: multi-table delete: DELETE .. FROM .. [,...] USING - See http://dev.mysql.com/doc/refman/5.0/en/delete.html - -@roadmap_1260_li -#AUTO_SERVER: support changing IP addresses (disable a network while the database is open). - -@roadmap_1261_li -#Avoid using java.util.Calendar internally because it's slow, complicated, and buggy. - -@roadmap_1262_li -#Support TRUNCATE .. CASCADE like PostgreSQL. - -@roadmap_1263_li -#Fulltext search: lazy result generation using SimpleRowSource. - -@roadmap_1264_li -#Fulltext search: support alternative syntax: WHERE FTL_CONTAINS(name, 'hello'). - -@roadmap_1265_li -#MySQL compatibility: support REPLACE, see http://dev.mysql.com/doc/refman/6.0/en/replace.html and issue 73. - -@roadmap_1266_li -#MySQL compatibility: support INSERT INTO table SET column1 = value1, column2 = value2 - -@roadmap_1267_li -#Docs: add a one line description for each functions and SQL statements at the top (in the link section). - -@roadmap_1268_li -#Javadoc search: weight for titles should be higher ('random' should list Functions as the best match). - -@roadmap_1269_li -#Replace information_schema tables with regular tables that are automatically re-built when needed. Use indexes. - -@roadmap_1270_li -#Issue 50: Oracle compatibility: support calling 0-parameters functions without parenthesis. Make constants obsolete. - -@roadmap_1271_li -#MySQL, HSQLDB compatibility: support where 'a'=1 (not supported by Derby, PostgreSQL) - -@roadmap_1272_li -#Finer granularity for SLF4J trace - See http://code.google.com/p/h2database/issues/detail?id=62 - -@roadmap_1273_li -#Add database creation date and time to the database. - -@roadmap_1274_li -#Support ASSERTION. - -@roadmap_1275_li -#MySQL compatibility: support comparing 1='a' - -@roadmap_1276_li -#Support PostgreSQL lock modes: http://www.postgresql.org/docs/8.3/static/explicit-locking.html - -@roadmap_1277_li -#PostgreSQL compatibility: test DbVisualizer and Squirrel SQL using a new PostgreSQL JDBC driver. - -@roadmap_1278_li -#RunScript should be able to read from system in (or quite mode for Shell). - -@roadmap_1279_li -#Natural join: support select x from dual natural join dual. - -@roadmap_1280_li -#Support using system properties in database URLs (may be a security problem). - -@roadmap_1281_li -#Natural join: somehow support this: select a.x, b.x, x from dual a natural join dual b - -@roadmap_1282_li -#Use the Java service provider mechanism to register file systems and function libraries. - -@roadmap_1283_li -#MySQL compatibility: for auto_increment columns, convert 0 to next value (as when inserting NULL). - -@roadmap_1284_li -#Optimization for multi-column IN: use an index if possible. Example: (A, B) IN((1, 2), (2, 3)). - -@roadmap_1285_li -#Optimization for EXISTS: convert to inner join or IN(..) if possible. - -@roadmap_1286_li -#Functions: support hashcode(value); cryptographic and fast - -@roadmap_1287_li -#Serialized file lock: support long running queries. - -@roadmap_1288_li -#Network: use 127.0.0.1 if other addresses don't work. - -@roadmap_1289_li -#Pluggable network protocol (currently Socket/ServerSocket over TCP/IP) - see also TransportServer with master slave replication. - -@roadmap_1290_li -#Support reading JCR data: one table per node type; query table; cache option - -@roadmap_1291_li -#OSGi: create a sample application, test, document. - -@roadmap_1292_li -#help.csv: use complete examples for functions; run as test case. - -@roadmap_1293_li -#Functions to calculate the memory and disk space usage of a table, a row, or a value. - -@roadmap_1294_li -#Re-implement PooledConnection; use a lightweight connection object. - -@roadmap_1295_li -#Doclet: convert tests in javadocs to a java class. - -@roadmap_1296_li -#Doclet: format fields like methods, but support sorting by name and value. - -@roadmap_1297_li -#Doclet: shrink the html files. - -@roadmap_1298_li -#MySQL compatibility: support SET NAMES 'latin1' - See also http://code.google.com/p/h2database/issues/detail?id=56 - -@roadmap_1299_li -#Allow to scan index backwards starting with a value (to better support ORDER BY DESC). - -@roadmap_1300_li -#Java Service Wrapper: try http://yajsw.sourceforge.net/ - -@roadmap_1301_li -#Batch parameter for INSERT, UPDATE, and DELETE, and commit after each batch. See also MySQL DELETE. - -@roadmap_1302_li -#Use a lazy and auto-close input stream (open resource when reading, close on eof). - -@roadmap_1303_li -#Connection pool: 'reset session' command (delete temp tables, rollback, auto-commit true). - -@roadmap_1304_li -#Improve SQL documentation, see http://www.w3schools.com/sql/ - -@roadmap_1305_li -#MySQL compatibility: DatabaseMetaData.stores*() methods should return the same values. Test with SquirrelSQL. - -@roadmap_1306_li -#MS SQL Server compatibility: support DATEPART syntax. - -@roadmap_1307_li -#Sybase/DB2/Oracle compatibility: support out parameters in stored procedures - See http://code.google.com/p/h2database/issues/detail?id=83 - -@roadmap_1308_li -#Support INTERVAL data type (see Oracle and others). - -@roadmap_1309_li -#Combine Server and Console tool (only keep Server). - -@roadmap_1310_li -#Store the Lucene index in the database itself. - -@roadmap_1311_li -#Support standard MERGE statement: http://en.wikipedia.org/wiki/Merge_%28SQL%29 - -@roadmap_1312_li -#Oracle compatibility: support DECODE(x, ...). - -@roadmap_1313_li -#MVCC: compare concurrent update behavior with PostgreSQL and Oracle. - -@roadmap_1314_li -#HSQLDB compatibility: CREATE FUNCTION (maybe using a Function interface). - -@roadmap_1315_li -#HSQLDB compatibility: support CALL "java.lang.Math.sqrt"(2.0) - -@roadmap_1316_li -#Support comma as the decimal separator in the CSV tool. - -@roadmap_1317_li -#Compatibility: Java functions with SQLJ Part1 http://www.acm.org/sigmod/record/issues/9912/standards.pdf.gz - -@roadmap_1318_li -#Compatibility: Java functions with SQL/PSM (Persistent Stored Modules) - need to find the documentation. - -@roadmap_1319_li -#CACHE_SIZE: automatically use a fraction of Runtime.maxMemory - maybe automatically the second level cache. - -@roadmap_1320_li -#Support date/time/timestamp as documented in http://en.wikipedia.org/wiki/ISO_8601 - -@roadmap_1321_li -#PostgreSQL compatibility: when in PG mode, treat BYTEA data like PG. - -@roadmap_1322_li -#Support =ANY(array) as in PostgreSQL. See also http://www.postgresql.org/docs/8.0/interactive/arrays.html - -@roadmap_1323_li -#IBM DB2 compatibility: support PREVIOUS VALUE FOR sequence. - -@roadmap_1324_li -#Compatibility: use different LIKE ESCAPE characters depending on the mode (disable for Derby, HSQLDB, DB2, Oracle, MSSQLServer). - -@roadmap_1325_li -#Oracle compatibility: support CREATE SYNONYM table FOR schema.table. - -@roadmap_1326_li -#FTP: document the server, including -ftpTask option to execute / kill remote processes - -@roadmap_1327_li -#FTP: problems with multithreading? - -@roadmap_1328_li -#FTP: implement SFTP / FTPS - -@roadmap_1329_li -#FTP: access to a database (.csv for a table, a directory for a schema, a file for a lob, a script.sql file). - -@roadmap_1330_li -#More secure default configuration if remote access is enabled. - -@roadmap_1331_li -#Improve database file locking (maybe use native file locking). The current approach seems to be problematic if the file system is on a remote share (see Google Group 'Lock file modification time is in the future'). - -@roadmap_1332_li -#Document internal features such as BELONGS_TO_TABLE, NULL_TO_DEFAULT, SEQUENCE. - -@roadmap_1333_li -#Issue 107: Prefer using the ORDER BY index if LIMIT is used. - -@roadmap_1334_li -#An index on (id, name) should be used for a query: select * from t where s=? order by i - -@roadmap_1335_li -#Support reading sequences using DatabaseMetaData.getTables(null, null, null, new String[]{"SEQUENCE"}). See PostgreSQL. - -@roadmap_1336_li -#Add option to enable TCP_NODELAY using Socket.setTcpNoDelay(true). - -@roadmap_1337_li -#Maybe disallow = within database names (jdbc:h2:mem:MODE=DB2 means database name MODE=DB2). - -@roadmap_1338_li -#Fast alter table add column. - -@roadmap_1339_li -#Improve concurrency for in-memory database operations. - -@roadmap_1340_li -#Issue 122: Support for connection aliases for remote tcp connections. - -@roadmap_1341_li -#Fast scrambling (strong encryption doesn't help if the password is included in the application). - -@roadmap_1342_li -#H2 Console: support -webPassword to require a password to access preferences or shutdown. - -@roadmap_1343_li -#Issue 126: The index name should be "IDX_" plus the constraint name unless there is a conflict, in which case append a number. - -@roadmap_1344_li -#Issue 127: Support activation/deactivation of triggers - -@roadmap_1345_li -#Issue 130: Custom log event listeners - -@roadmap_1346_li -#Issue 131: IBM DB2 compatibility: sysibm.sysdummy1 - -@roadmap_1347_li -#Issue 132: Use Java enum trigger type. - -@roadmap_1348_li -#Issue 134: IBM DB2 compatibility: session global variables. - -@roadmap_1349_li -#Cluster: support load balance with values for each server / auto detect. - -@roadmap_1350_li -#FTL_SET_OPTION(keyString, valueString) with key stopWords at first. - -@roadmap_1351_li -#Pluggable access control mechanism. - -@roadmap_1352_li -#Fulltext search (Lucene): support streaming CLOB data. - -@roadmap_1353_li -#Document/example how to create and read an encrypted script file. - -@roadmap_1354_li -#Check state of http://issues.apache.org/jira/browse/OPENJPA-1367 (H2 does support cross joins). - -@roadmap_1355_li -#Fulltext search (Lucene): only prefix column names with _ if they already start with _. Instead of DATA / QUERY / modified use _DATA, _QUERY, _MODIFIED if possible. - -@roadmap_1356_li -#Support a way to create or read compressed encrypted script files using an API. - -@roadmap_1357_li -#Scripting language support (Javascript). - -@roadmap_1358_li -#The network client should better detect if the server is not an H2 server and fail early. - -@roadmap_1359_li -#H2 Console: support CLOB/BLOB upload. - -@roadmap_1360_li -#Database file lock: detect hibernate / standby / very slow threads (compare system time). - -@roadmap_1361_li -#Automatic detection of redundant indexes. - -@roadmap_1362_li -#Maybe reject join without "on" (except natural join). - -@roadmap_1363_li -#Implement GiST (Generalized Search Tree for Secondary Storage). - -@roadmap_1364_li -#Function to read a number of bytes/characters from an BLOB or CLOB. - -@roadmap_1365_li -#Issue 156: Support SELECT ? UNION SELECT ?. - -@roadmap_1366_li -#Automatic mixed mode: support a port range list (to avoid firewall problems). - -@roadmap_1367_li -#Support the pseudo column rowid, oid, _rowid_. - -@roadmap_1368_li -#H2 Console / large result sets: stream early instead of keeping a whole result in-memory - -@roadmap_1369_li -#Support TRUNCATE for linked tables. - -@roadmap_1370_li -#UNION: evaluate INTERSECT before UNION (like most other database except Oracle). - -@roadmap_1371_li -#Delay creating the information schema, and share metadata columns. - -@roadmap_1372_li -#TCP Server: use a nonce (number used once) to protect unencrypted channels against replay attacks. - -@roadmap_1373_li -#Simplify running scripts and recovery: CREATE FORCE USER (overwrites an existing user). - -@roadmap_1374_li -#Support CREATE DATABASE LINK (a custom JDBC driver is already supported). - -@roadmap_1375_li -#Support large GROUP BY operations. Issue 216. - -@roadmap_1376_li -#Issue 163: Allow to create foreign keys on metadata types. - -@roadmap_1377_li -#Logback: write a native DBAppender. - -@roadmap_1378_li -#Cache size: don't use more cache than what is available. - -@roadmap_1379_li -#Allow to defragment at runtime (similar to SHUTDOWN DEFRAG) in a background thread. - -@roadmap_1380_li -#Tree index: Instead of an AVL tree, use a general balanced trees or a scapegoat tree. - -@roadmap_1381_li -#User defined functions: allow to store the bytecode (of just the class, or the jar file of the extension) in the database. - -@roadmap_1382_li -#Compatibility: ResultSet.getObject() on a CLOB (TEXT) should return String for PostgreSQL and MySQL. - -@roadmap_1383_li -#Optimizer: WHERE X=? AND Y IN(?), it always uses the index on Y. Should be cost based. - -@roadmap_1384_li -#Common Table Expression (CTE) / recursive queries: support parameters. Issue 314. - -@roadmap_1385_li -#Oracle compatibility: support INSERT ALL. - -@roadmap_1386_li -#Issue 178: Optimizer: index usage when both ascending and descending indexes are available. - -@roadmap_1387_li -#Issue 179: Related subqueries in HAVING clause. - -@roadmap_1388_li -#IBM DB2 compatibility: NOT NULL WITH DEFAULT. Similar to MySQL Mode.convertInsertNullToZero. - -@roadmap_1389_li -#Creating primary key: always create a constraint. - -@roadmap_1390_li -#Maybe use a different page layout: keep the data at the head of the page, and ignore the tail (don't store / read it). This may increase write / read performance depending on the file system. - -@roadmap_1391_li -#Indexes of temporary tables are currently kept in-memory. Is this how it should be? - -@roadmap_1392_li -#The Shell tool should support the same built-in commands as the H2 Console. - -@roadmap_1393_li -#Maybe use PhantomReference instead of finalize. - -@roadmap_1394_li -#Database file name suffix: should only have one dot by default. Example: .h2db - -@roadmap_1395_li -#Issue 196: Function based indexes - -@roadmap_1396_li -#ALTER TABLE ... ADD COLUMN IF NOT EXISTS columnName. - -@roadmap_1397_li -#Fix the disk space leak (killing the process at the exact right moment will increase the disk space usage; this space is not re-used). See TestDiskSpaceLeak.java - -@roadmap_1398_li -#ROWNUM: Oracle compatibility when used within a subquery. Issue 198. - -@roadmap_1399_li -#Allow to access the database over HTTP (possibly using port 80) and a servlet in a REST way. - -@roadmap_1400_li -#ODBC: encrypted databases are not supported because the ;CIPHER= can not be set. - -@roadmap_1401_li -#Support CLOB and BLOB update, specially conn.createBlob().setBinaryStream(1); - -@roadmap_1402_li -#Optimizer: index usage when both ascending and descending indexes are available. Issue 178. - -@roadmap_1403_li -#Issue 306: Support schema specific domains. - -@roadmap_1404_li -#Triggers: support user defined execution order. Oracle: CREATE OR REPLACE TRIGGER TEST_2 BEFORE INSERT ON TEST FOR EACH ROW FOLLOWS TEST_1. SQL specifies that multiple triggers should be fired in time-of-creation order. PostgreSQL uses name order, which was judged to be more convenient. Derby: triggers are fired in the order in which they were created. - -@roadmap_1405_li -#PostgreSQL compatibility: combine "users" and "roles". See: http://www.postgresql.org/docs/8.1/interactive/user-manag.html - -@roadmap_1406_li -#Improve documentation of system properties: only list the property names, default values, and description. - -@roadmap_1407_li -#Support running totals / cumulative sum using SUM(..) OVER(..). - -@roadmap_1408_li -#Improve object memory size calculation. Use constants for known VMs, or use reflection to call java.lang.instrument.Instrumentation.getObjectSize(Object objectToSize) - -@roadmap_1409_li -#Triggers: NOT NULL checks should be done after running triggers (Oracle behavior, maybe others). - -@roadmap_1410_li -#Common Table Expression (CTE) / recursive queries: support INSERT INTO ... SELECT ... Issue 219. - -@roadmap_1411_li -#Common Table Expression (CTE) / recursive queries: support non-recursive queries. Issue 217. - -@roadmap_1412_li -#Common Table Expression (CTE) / recursive queries: avoid endless loop. Issue 218. - -@roadmap_1413_li -#Common Table Expression (CTE) / recursive queries: support multiple named queries. Issue 220. - -@roadmap_1414_li -#Common Table Expression (CTE) / recursive queries: identifier scope may be incorrect. Issue 222. - -@roadmap_1415_li -#Log long running transactions (similar to long running statements). - -@roadmap_1416_li -#Parameter data type is data type of other operand. Issue 205. - -@roadmap_1417_li -#Some combinations of nested join with right outer join are not supported. - -@roadmap_1418_li -#DatabaseEventListener.openConnection(id) and closeConnection(id). - -@roadmap_1419_li -#Listener or authentication module for new connections, or a way to restrict the number of different connections to a tcp server, or to prevent to login with the same username and password from different IPs. Possibly using the DatabaseEventListener API, or a new API. - -@roadmap_1420_li -#Compatibility for data type CHAR (Derby, HSQLDB). Issue 212. - -@roadmap_1421_li -#Compatibility with MySQL TIMESTAMPDIFF. Issue 209. - -@roadmap_1422_li -#Optimizer: use a histogram of the data, specially for non-normal distributions. - -@roadmap_1423_li -#Trigger: allow declaring as source code (like functions). - -@roadmap_1424_li -#User defined aggregate: allow declaring as source code (like functions). - -@roadmap_1425_li -#The error "table not found" is sometimes caused by using the wrong database. Add "(this database is empty)" to the exception message if applicable. - -@roadmap_1426_li -#MySQL + PostgreSQL compatibility: support string literal escape with \n. - -@roadmap_1427_li -#PostgreSQL compatibility: support string literal escape with double \\. - -@roadmap_1428_li -#Document the TCP server "management_db". Maybe include the IP address of the client. - -@roadmap_1429_li -#Use javax.tools.JavaCompilerTool instead of com.sun.tools.javac.Main - -@roadmap_1430_li -#If a database object was not found in the current schema, but one with the same name existed in another schema, included that in the error message. - -@roadmap_1431_li -#Optimization to use an index for OR when using multiple keys: where (key1 = ? and key2 = ?) OR (key1 = ? and key2 = ?) - -@roadmap_1432_li -#Issue 302: Support optimizing queries with both inner and outer joins, as in: select * from test a inner join test b on a.id=b.id inner join o on o.id=a.id where b.x=1 (the optimizer should swap a and b here). See also TestNestedJoins, tag "swapInnerJoinTables". - -@roadmap_1433_li -#JaQu should support a DataSource and a way to create a Db object using a Connection (for multi-threaded usage with a connection pool). - -@roadmap_1434_li -#Move table to a different schema (rename table to a different schema), possibly using ALTER TABLE ... SET SCHEMA ...; - -@roadmap_1435_li -#nioMapped file system: automatically fall back to regular (non mapped) IO if there is a problem (out of memory exception for example). - -@roadmap_1436_li -#Column as parameter of function table. Issue 228. - -@roadmap_1437_li -#Connection pool: detect ;AUTOCOMMIT=FALSE in the database URL, and if set, disable autocommit for all connections. - -@roadmap_1438_li -#Compatibility with MS Access: support "&" to concatenate text. - -@roadmap_1439_li -#The BACKUP statement should not synchronize on the database, and therefore should not block other users. - -@roadmap_1440_li -#Document the database file format. - -@roadmap_1441_li -#Support reading LOBs. - -@roadmap_1442_li -#Require appending DANGEROUS=TRUE when using certain dangerous settings such as LOG=0, LOG=1, LOCK_MODE=0, disabling FILE_LOCK,... - -@roadmap_1443_li -#Support UDT (user defined types) similar to how Apache Derby supports it: check constraint, allow to use it in Java functions as parameters (return values already seem to work). - -@roadmap_1444_li -#Encrypted file system (use cipher text stealing so file length doesn't need to decrypt; 4 KB header per file, optional compatibility with current encrypted database files). - -@roadmap_1445_li -#Issue 229: SELECT with simple OR tests uses tableScan when it could use indexes. - -@roadmap_1446_li -#GROUP BY queries should use a temporary table if there are too many rows. - -@roadmap_1447_li -#BLOB: support random access when reading. - -@roadmap_1448_li -#CLOB: support random access when reading (this is harder than for BLOB as data is stored in UTF-8 form). - -@roadmap_1449_li -#Compatibility: support SELECT INTO (as an alias for CREATE TABLE ... AS SELECT ...). - -@roadmap_1450_li -#Compatibility with MySQL: support SELECT INTO OUTFILE (cannot be an existing file) as an alias for CSVWRITE(...). - -@roadmap_1451_li -#Compatibility with MySQL: support non-strict mode (sql_mode = "") any data that is too large for the column will just be truncated or set to the default value. - -@roadmap_1452_li -#The full condition should be sent to the linked table, not just the indexed condition. Example: TestLinkedTableFullCondition - -@roadmap_1453_li -#Compatibility with IBM DB2: CREATE PROCEDURE. - -@roadmap_1454_li -#Compatibility with IBM DB2: SQL cursors. - -@roadmap_1455_li -#Single-column primary key values are always stored explicitly. This is not required. - -@roadmap_1456_li -#Compatibility with MySQL: support CREATE TABLE TEST(NAME VARCHAR(255) CHARACTER SET UTF8). - -@roadmap_1457_li -#CALL is incompatible with other databases because it returns a result set, so that CallableStatement.execute() returns true. - -@roadmap_1458_li -#Optimization for large lists for column IN(1, 2, 3, 4,...) - currently an list is used, could potentially use a hash set (maybe only for a part of the values - the ones that can be evaluated). - -@roadmap_1459_li -#Compatibility for ARRAY data type (Oracle: VARRAY(n) of VARCHAR(m); HSQLDB: VARCHAR(n) ARRAY; Postgres: VARCHAR(n)[]). - -@roadmap_1460_li -#PostgreSQL compatible array literal syntax: ARRAY[['a', 'b'], ['c', 'd']] - -@roadmap_1461_li -#PostgreSQL compatibility: UPDATE with FROM. - -@roadmap_1462_li -#Issue 297: Oracle compatibility for "at time zone". - -@roadmap_1463_li -#IBM DB2 compatibility: IDENTITY_VAL_LOCAL(). - -@roadmap_1464_li -#Support SQL/XML. - -@roadmap_1465_li -#Support concurrent opening of databases. - -@roadmap_1466_li -#Improved error message and diagnostics in case of network configuration problems. - -@roadmap_1467_li -#TRUNCATE should reset the identity columns as in MySQL and MS SQL Server (and possibly other databases). - -@roadmap_1468_li -#Adding a primary key should make the columns 'not null' unless if there is a row with null (compatibility with MySQL, PostgreSQL, HSQLDB; not Derby). - -@roadmap_1469_li -#ARRAY data type: support Integer[] and so on in Java functions (currently only Object[] is supported). - -@roadmap_1470_li -#MySQL compatibility: LOCK TABLES a READ, b READ - see also http://dev.mysql.com/doc/refman/5.0/en/lock-tables.html - -@roadmap_1471_li -#The HTML to PDF converter should use http://code.google.com/p/wkhtmltopdf/ - -@roadmap_1472_li -#Issue 303: automatically convert "X NOT IN(SELECT...)" to "NOT EXISTS(...)". - -@roadmap_1473_li -#MySQL compatibility: update test1 t1, test2 t2 set t1.name=t2.name where t1.id=t2.id. - -@roadmap_1474_li -#Issue 283: Improve performance of H2 on Android. - -@roadmap_1475_li -#Support INSERT INTO / UPDATE / MERGE ... RETURNING to retrieve the generated key(s). - -@roadmap_1476_li -#Column compression option - see http://groups.google.com/group/h2-database/browse_thread/thread/3e223504e52671fa/243da82244343f5d - -@roadmap_1477_li -#PostgreSQL compatibility: ALTER TABLE ADD combined with adding a foreign key constraint, as in ALTER TABLE FOO ADD COLUMN PARENT BIGINT REFERENCES FOO(ID). - -@roadmap_1478_li -#MS SQL Server compatibility: support @@ROWCOUNT. - -@roadmap_1479_li -#PostgreSQL compatibility: LOG(x) is LOG10(x) and not LN(x). - -@roadmap_1480_li -#Issue 311: Serialized lock mode: executeQuery of write operations fails. - -@roadmap_1481_li -#PostgreSQL compatibility: support PgAdmin III (specially the function current_setting). - -@roadmap_1482_li -#MySQL compatibility: support TIMESTAMPADD. - -@roadmap_1483_li -#Support SELECT ... FOR UPDATE with joins (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby). - -@roadmap_1484_li -#Support SELECT ... FOR UPDATE OF [field-list] (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby). - -@roadmap_1485_li -#Support SELECT ... FOR UPDATE OF [table-list] (supported by PostgreSQL, HSQLDB, Sybase). - -@roadmap_1486_li -#TRANSACTION_ID() for in-memory databases. - -@roadmap_1487_li -#TRANSACTION_ID() should be long (same as HSQLDB and PostgreSQL). - -@roadmap_1488_li -#Support [INNER | OUTER] JOIN USING(column [,...]). - -@roadmap_1489_li -#Support NATURAL [ { LEFT | RIGHT } [ OUTER ] | INNER ] JOIN (Derby, Oracle) - -@roadmap_1490_li -#GROUP BY columnNumber (similar to ORDER BY columnNumber) (MySQL, PostgreSQL, SQLite; not by HSQLDB and Derby). - -@roadmap_1491_li -#Sybase / MS SQL Server compatibility: CONVERT(..) parameters are swapped. - -@roadmap_1492_li -#Index conditions: WHERE AGE>1 should not scan through all rows with AGE=1. - -@roadmap_1493_li -#PHP support: H2 should support PDO, or test with PostgreSQL PDO. - -@roadmap_1494_li -#Outer joins: if no column of the outer join table is referenced, the outer join table could be removed from the query. - -@roadmap_1495_li -#Cluster: allow using auto-increment and identity columns by ensuring executed in lock-step. - -@roadmap_1496_li -#MySQL compatibility: index names only need to be unique for the given table. - -@roadmap_1497_li -#Issue 352: constraints: distinguish between 'no action' and 'restrict'. Currently, only restrict is supported, and 'no action' is internally mapped to 'restrict'. The database meta data returns 'restrict' in all cases. - -@roadmap_1498_li -#Oracle compatibility: support MEDIAN aggregate function. - -@roadmap_1499_li -#Issue 348: Oracle compatibility: division should return a decimal result. - -@roadmap_1500_li -#Read rows on demand: instead of reading the whole row, only read up to that column that is requested. Keep an pointer to the data area and the column id that is already read. - -@roadmap_1501_li -#Long running transactions: log session id when detected. - -@roadmap_1502_li -#Optimization: "select id from test" should use the index on id even without "order by". - -@roadmap_1503_li -#Issue 362: LIMIT support for UPDATE statements (MySQL compatibility). - -@roadmap_1504_li -#Sybase SQL Anywhere compatibility: SELECT TOP ... START AT ... - -@roadmap_1505_li -#Use Java 6 SQLException subclasses. - -@roadmap_1506_li -#Issue 390: RUNSCRIPT FROM '...' CONTINUE_ON_ERROR - -@roadmap_1507_li -#Use Java 6 exceptions: SQLDataException, SQLSyntaxErrorException, SQLTimeoutException,.. - -@roadmap_1508_h2 -#Not Planned - -@roadmap_1509_li -#HSQLDB (did) support this: select id i from test where i<0 (other databases don't). Supporting it may break compatibility. - -@roadmap_1510_li -#String.intern (so that Strings can be compared with ==) will not be used because some VMs have problems when used extensively. - -@roadmap_1511_li -#In prepared statements, identifier names (table names and so on) can not be parameterized. Adding such a feature would complicate the source code without providing reasonable speedup, and would slow down regular prepared statements. - -@sourceError_1000_h1 -#Error Analyzer - -@sourceError_1001_a -ホーム - -@sourceError_1002_a -#Input - -@sourceError_1003_h2 -#  Details  Source Code - -@sourceError_1004_p -#Paste the error message and stack trace below and click on 'Details' or 'Source Code': - -@sourceError_1005_b -#Error Code: - -@sourceError_1006_b -#Product Version: - -@sourceError_1007_b -#Message: - -@sourceError_1008_b -#More Information: - -@sourceError_1009_b -#Stack Trace: - -@sourceError_1010_b -#Source File: - -@sourceError_1011_p -# Inline - -@tutorial_1000_h1 -�?ュートリアル - -@tutorial_1001_a -# Starting and Using the H2 Console - -@tutorial_1002_a -# Special H2 Console Syntax - -@tutorial_1003_a -# Settings of the H2 Console - -@tutorial_1004_a -# Connecting to a Database using JDBC - -@tutorial_1005_a -# Creating New Databases - -@tutorial_1006_a -# Using the Server - -@tutorial_1007_a -# Using Hibernate - -@tutorial_1008_a -# Using TopLink and Glassfish - -@tutorial_1009_a -# Using EclipseLink - -@tutorial_1010_a -# Using Apache ActiveMQ - -@tutorial_1011_a -# Using H2 within NetBeans - -@tutorial_1012_a -# Using H2 with jOOQ - -@tutorial_1013_a -# Using Databases in Web Applications - -@tutorial_1014_a -# Android - -@tutorial_1015_a -# CSV (Comma Separated Values) Support - -@tutorial_1016_a -# Upgrade, Backup, and Restore - -@tutorial_1017_a -# Command Line Tools - -@tutorial_1018_a -# The Shell Tool - -@tutorial_1019_a -# Using OpenOffice Base - -@tutorial_1020_a -# Java Web Start / JNLP - -@tutorial_1021_a -# Using a Connection Pool - -@tutorial_1022_a -# Fulltext Search - -@tutorial_1023_a -# User-Defined Variables - -@tutorial_1024_a -# Date and Time - -@tutorial_1025_a -# Using Spring - -@tutorial_1026_a -# OSGi - -@tutorial_1027_a -# Java Management Extension (JMX) - -@tutorial_1028_h2 -起動�?�H2コンソール�?�使用 - -@tutorial_1029_p -# The H2 Console application lets you access a database using a browser. This can be a H2 database, or another database that supports the JDBC API. - -@tutorial_1030_p -# This is a client/server application, so both a server and a client (a browser) are required to run it. - -@tutorial_1031_p -# Depending on your platform and environment, there are multiple ways to start the H2 Console: - -@tutorial_1032_th -OS - -@tutorial_1033_th -起動 - -@tutorial_1034_td -Windows - -@tutorial_1035_td -# Click [Start], [All Programs], [H2], and [H2 Console (Command Line)] - -@tutorial_1036_td -# An icon will be added to the system tray: - -@tutorial_1037_td -# If you don't get the window and the system tray icon, then maybe Java is not installed correctly (in this case, try another way to start the application). A browser window should open and point to the login page at http://localhost:8082. - -@tutorial_1038_td -Windows - -@tutorial_1039_td -# Open a file browser, navigate to h2/bin, and double click on h2.bat. - -@tutorial_1040_td -# A console window appears. If there is a problem, you will see an error message in this window. A browser window will open and point to the login page (URL: http://localhost:8082). - -@tutorial_1041_td -Any - -@tutorial_1042_td -# Double click on the h2*.jar file. This only works if the .jar suffix is associated with Java. - -@tutorial_1043_td -Any - -@tutorial_1044_td -# Open a console window, navigate to the directory h2/bin, and type: - -@tutorial_1045_h3 -ファイアウォール - -@tutorial_1046_p -# If you start the server, you may get a security warning from the firewall (if you have installed one). If you don't want other computers in the network to access the application on your machine, you can let the firewall block those connections. The connection from the local machine will still work. Only if you want other computers to access the database on this computer, you need allow remote connections in the firewall. - -@tutorial_1047_p -# It has been reported that when using Kaspersky 7.0 with firewall, the H2 Console is very slow when connecting over the IP address. A workaround is to connect using 'localhost'. - -@tutorial_1048_p -# A small firewall is already built into the server: other computers may not connect to the server by default. To change this, go to 'Preferences' and select 'Allow connections from other computers'. - -@tutorial_1049_h3 -Javaをテスト�?�る - -@tutorial_1050_p -# To find out which version of Java is installed, open a command prompt and type: - -@tutorial_1051_p -# If you get an error message, you may need to add the Java binary directory to the path environment variable. - -@tutorial_1052_h3 -#Error Message 'Port may be in use' - -@tutorial_1053_p -# You can only start one instance of the H2 Console, otherwise you will get the following error message: "The Web server could not be started. Possible cause: another server is already running...". It is possible to start multiple console applications on the same computer (using different ports), but this is usually not required as the console supports multiple concurrent connections. - -@tutorial_1054_h3 -他�?��?ートを使用�?�る - -@tutorial_1055_p -# If the default port of the H2 Console is already in use by another application, then a different port needs to be configured. The settings are stored in a properties file. For details, see Settings of the H2 Console. The relevant entry is webPort. - -@tutorial_1056_p -# If no port is specified for the TCP and PG servers, each service will try to listen on its default port. If the default port is already in use, a random port is used. - -@tutorial_1057_h3 -ブラウザを使用�?��?�サー�?ー�?�接続 - -@tutorial_1058_p -# If the server started successfully, you can connect to it using a web browser. Javascript needs to be enabled. If you started the server on the same computer as the browser, open the URL http://localhost:8082. If you want to connect to the application from another computer, you need to provide the IP address of the server, for example: http://192.168.0.2:8082. If you enabled TLS on the server side, the URL needs to start with https://. - -@tutorial_1059_h3 -複数�?��?�時セッション - -@tutorial_1060_p -# Multiple concurrent browser sessions are supported. As that the database objects reside on the server, the amount of concurrent work is limited by the memory available to the server application. - -@tutorial_1061_h3 -ログイン - -@tutorial_1062_p -# At the login page, you need to provide connection information to connect to a database. Set the JDBC driver class of your database, the JDBC URL, user name, and password. If you are done, click [Connect]. - -@tutorial_1063_p -# You can save and reuse previously saved settings. The settings are stored in a properties file (see Settings of the H2 Console). - -@tutorial_1064_h3 -エラーメッセージ - -@tutorial_1065_p -# Error messages in are shown in red. You can show/hide the stack trace of the exception by clicking on the message. - -@tutorial_1066_h3 -データベースドライ�?�?�追加 - -@tutorial_1067_p -# To register additional JDBC drivers (MySQL, PostgreSQL, HSQLDB,...), add the jar file names to the environment variables H2DRIVERS or CLASSPATH. Example (Windows): to add the HSQLDB JDBC driver C:\Programs\hsqldb\lib\hsqldb.jar, set the environment variable H2DRIVERS to C:\Programs\hsqldb\lib\hsqldb.jar. - -@tutorial_1068_p -# Multiple drivers can be set; entries need to be separated by ; (Windows) or : (other operating systems). Spaces in the path names are supported. The settings must not be quoted. - -@tutorial_1069_h3 -#Using the H2 Console - -@tutorial_1070_p -# The H2 Console application has three main panels: the toolbar on top, the tree on the left, and the query/result panel on the right. The database objects (for example, tables) are listed on the left. Type a SQL command in the query panel and click [Run]. The result appears just below the command. - -@tutorial_1071_h3 -テーブル�??�?�?��?��?�カラム�??をインサート�?�る - -@tutorial_1072_p -# To insert table and column names into the script, click on the item in the tree. If you click on a table while the query is empty, then SELECT * FROM ... is added. While typing a query, the table that was used is expanded in the tree. For example if you type SELECT * FROM TEST T WHERE T. then the table TEST is expanded. - -@tutorial_1073_h3 -切断�?�アプリケーション�?�終了 - -@tutorial_1074_p -# To log out of the database, click [Disconnect] in the toolbar panel. However, the server is still running and ready to accept new sessions. - -@tutorial_1075_p -# To stop the server, right click on the system tray icon and select [Exit]. If you don't have the system tray icon, navigate to [Preferences] and click [Shutdown], press [Ctrl]+[C] in the console where the server was started (Windows), or close the console window. - -@tutorial_1076_h2 -#Special H2 Console Syntax - -@tutorial_1077_p -# The H2 Console supports a few built-in commands. Those are interpreted within the H2 Console, so they work with any database. Built-in commands need to be at the beginning of a statement (before any remarks), otherwise they are not parsed correctly. If in doubt, add ; before the command. - -@tutorial_1078_th -#Command(s) - -@tutorial_1079_th -説明 - -@tutorial_1080_td -# @autocommit_true; - -@tutorial_1081_td -# @autocommit_false; - -@tutorial_1082_td -# Enable or disable autocommit. - -@tutorial_1083_td -# @cancel; - -@tutorial_1084_td -# Cancel the currently running statement. - -@tutorial_1085_td -# @columns null null TEST; - -@tutorial_1086_td -# @index_info null null TEST; - -@tutorial_1087_td -# @tables; - -@tutorial_1088_td -# @tables null null TEST; - -@tutorial_1089_td -# Call the corresponding DatabaseMetaData.get method. Patterns are case sensitive (usually identifiers are uppercase). For information about the parameters, see the Javadoc documentation. Missing parameters at the end of the line are set to null. The complete list of metadata commands is: @attributes, @best_row_identifier, @catalogs, @columns, @column_privileges, @cross_references, @exported_keys, @imported_keys, @index_info, @primary_keys, @procedures, @procedure_columns, @schemas, @super_tables, @super_types, @tables, @table_privileges, @table_types, @type_info, @udts, @version_columns - -@tutorial_1090_td -# @edit select * from test; - -@tutorial_1091_td -# Use an updatable result set. - -@tutorial_1092_td -# @generated insert into test() values(); - -@tutorial_1093_td -# Show the result of Statement.getGeneratedKeys(). - -@tutorial_1094_td -# @history; - -@tutorial_1095_td -# List the command history. - -@tutorial_1096_td -# @info; - -@tutorial_1097_td -# Display the result of various Connection and DatabaseMetaData methods. - -@tutorial_1098_td -# @list select * from test; - -@tutorial_1099_td -# Show the result set in list format (each column on its own line, with row numbers). - -@tutorial_1100_td -# @loop 1000 select ?, ?/*rnd*/; - -@tutorial_1101_td -# @loop 1000 @statement select ?; - -@tutorial_1102_td -# Run the statement this many times. Parameters (?) are set using a loop from 0 up to x - 1. Random values are used for each ?/*rnd*/. A Statement object is used instead of a PreparedStatement if @statement is used. Result sets are read until ResultSet.next() returns false. Timing information is printed. - -@tutorial_1103_td -# @maxrows 20; - -@tutorial_1104_td -# Set the maximum number of rows to display. - -@tutorial_1105_td -# @memory; - -@tutorial_1106_td -# Show the used and free memory. This will call System.gc(). - -@tutorial_1107_td -# @meta select 1; - -@tutorial_1108_td -# List the ResultSetMetaData after running the query. - -@tutorial_1109_td -# @parameter_meta select ?; - -@tutorial_1110_td -# Show the result of the PreparedStatement.getParameterMetaData() calls. The statement is not executed. - -@tutorial_1111_td -# @prof_start; - -@tutorial_1112_td -# call hash('SHA256', '', 1000000); - -@tutorial_1113_td -# @prof_stop; - -@tutorial_1114_td -# Start/stop the built-in profiling tool. The top 3 stack traces of the statement(s) between start and stop are listed (if there are 3). - -@tutorial_1115_td -# @prof_start; - -@tutorial_1116_td -# @sleep 10; - -@tutorial_1117_td -# @prof_stop; - -@tutorial_1118_td -# Sleep for a number of seconds. Used to profile a long running query or operation that is running in another session (but in the same process). - -@tutorial_1119_td -# @transaction_isolation; - -@tutorial_1120_td -# @transaction_isolation 2; - -@tutorial_1121_td -# Display (without parameters) or change (with parameters 1, 2, 4, 8) the transaction isolation level. - -@tutorial_1122_h2 -#Settings of the H2 Console - -@tutorial_1123_p -# The settings of the H2 Console are stored in a configuration file called .h2.server.properties in you user home directory. For Windows installations, the user home directory is usually C:\Documents and Settings\[username] or C:\Users\[username]. The configuration file contains the settings of the application and is automatically created when the H2 Console is first started. Supported settings are: - -@tutorial_1124_code -#webAllowOthers - -@tutorial_1125_li -#: allow other computers to connect. - -@tutorial_1126_code -#webPort - -@tutorial_1127_li -#: the port of the H2 Console - -@tutorial_1128_code -#webSSL - -@tutorial_1129_li -#: use encrypted TLS (HTTPS) connections. - -@tutorial_1130_p -# In addition to those settings, the properties of the last recently used connection are listed in the form <number>=<name>|<driver>|<url>|<user> using the escape character \. Example: 1=Generic H2 (Embedded)|org.h2.Driver|jdbc\:h2\:~/test|sa - -@tutorial_1131_h2 -JDBCを使用�?��?�データベース�?�接続 - -@tutorial_1132_p -# To connect to a database, a Java application first needs to load the database driver, and then get a connection. A simple way to do that is using the following code: - -@tutorial_1133_p -# This code first loads the driver (Class.forName(...)) and then opens a connection (using DriverManager.getConnection()). The driver name is "org.h2.Driver". The database URL always needs to start with jdbc:h2: to be recognized by this database. The second parameter in the getConnection() call is the user name (sa for System Administrator in this example). The third parameter is the password. In this database, user names are not case sensitive, but passwords are. - -@tutorial_1134_h2 -新�?��?�データベースを作�?�?�る - -@tutorial_1135_p -# By default, if the database specified in the URL does not yet exist, a new (empty) database is created automatically. The user that created the database automatically becomes the administrator of this database. - -@tutorial_1136_p -# Auto-creating new database can be disabled, see Opening a Database Only if it Already Exists. - -@tutorial_1137_h2 -サー�?ーを使用�?�る - -@tutorial_1138_p -# H2 currently supports three server: a web server (for the H2 Console), a TCP server (for client/server connections) and an PG server (for PostgreSQL clients). Please note that only the web server supports browser connections. The servers can be started in different ways, one is using the Server tool. Starting the server doesn't open a database - databases are opened as soon as a client connects. - -@tutorial_1139_h3 -#Starting the Server Tool from Command Line - -@tutorial_1140_p -# To start the Server tool from the command line with the default settings, run: - -@tutorial_1141_p -# This will start the tool with the default options. To get the list of options and default values, run: - -@tutorial_1142_p -# There are options available to use other ports, and start or not start parts. - -@tutorial_1143_h3 -TCPサー�?ー�?�接続�?�る - -@tutorial_1144_p -# To remotely connect to a database using the TCP server, use the following driver and database URL: - -@tutorial_1145_li -#JDBC driver class: org.h2.Driver - -@tutorial_1146_li -#Database URL: jdbc:h2:tcp://localhost/~/test - -@tutorial_1147_p -# For details about the database URL, see also in Features. Please note that you can't connection with a web browser to this URL. You can only connect using a H2 client (over JDBC). - -@tutorial_1148_h3 -#Starting the TCP Server within an Application - -@tutorial_1149_p -# Servers can also be started and stopped from within an application. Sample code: - -@tutorial_1150_h3 -他�?��?�程�?�らTCPサー�?ーを終了�?�る - -@tutorial_1151_p -# The TCP server can be stopped from another process. To stop the server from the command line, run: - -@tutorial_1152_p -# To stop the server from a user application, use the following code: - -@tutorial_1153_p -# This function will only stop the TCP server. If other server were started in the same process, they will continue to run. To avoid recovery when the databases are opened the next time, all connections to the databases should be closed before calling this method. To stop a remote server, remote connections must be enabled on the server. Shutting down a TCP server can be protected using the option -tcpPassword (the same password must be used to start and stop the TCP server). - -@tutorial_1154_h2 -Hibernateを使用�?�る - -@tutorial_1155_p -# This database supports Hibernate version 3.1 and newer. You can use the HSQLDB Dialect, or the native H2 Dialect. Unfortunately the H2 Dialect included in some old versions of Hibernate was buggy. A patch for Hibernate has been submitted and is now applied. You can rename it to H2Dialect.java and include this as a patch in your application, or upgrade to a version of Hibernate where this is fixed. - -@tutorial_1156_p -# When using Hibernate, try to use the H2Dialect if possible. When using the H2Dialect, compatibility modes such as MODE=MySQL are not supported. When using such a compatibility mode, use the Hibernate dialect for the corresponding database instead of the H2Dialect; but please note H2 does not support all features of all databases. - -@tutorial_1157_h2 -#Using TopLink and Glassfish - -@tutorial_1158_p -# To use H2 with Glassfish (or Sun AS), set the Datasource Classname to org.h2.jdbcx.JdbcDataSource. You can set this in the GUI at Application Server - Resources - JDBC - Connection Pools, or by editing the file sun-resources.xml: at element jdbc-connection-pool, set the attribute datasource-classname to org.h2.jdbcx.JdbcDataSource. - -@tutorial_1159_p -# The H2 database is compatible with HSQLDB and PostgreSQL. To take advantage of H2 specific features, use the H2Platform. The source code of this platform is included in H2 at src/tools/oracle/toplink/essentials/platform/database/DatabasePlatform.java.txt. You will need to copy this file to your application, and rename it to .java. To enable it, change the following setting in persistence.xml: - -@tutorial_1160_p -# In old versions of Glassfish, the property name is toplink.platform.class.name. - -@tutorial_1161_p -# To use H2 within Glassfish, copy the h2*.jar to the directory glassfish/glassfish/lib. - -@tutorial_1162_h2 -#Using EclipseLink - -@tutorial_1163_p -# To use H2 in EclipseLink, use the platform class org.eclipse.persistence.platform.database.H2Platform. If this platform is not available in your version of EclipseLink, you can use the OraclePlatform instead in many case. See also H2Platform. - -@tutorial_1164_h2 -#Using Apache ActiveMQ - -@tutorial_1165_p -# When using H2 as the backend database for Apache ActiveMQ, please use the TransactDatabaseLocker instead of the default locking mechanism. Otherwise the database file will grow without bounds. The problem is that the default locking mechanism uses an uncommitted UPDATE transaction, which keeps the transaction log from shrinking (causes the database file to grow). Instead of using an UPDATE statement, the TransactDatabaseLocker uses SELECT ... FOR UPDATE which is not problematic. To use it, change the ApacheMQ configuration element <jdbcPersistenceAdapter> element, property databaseLocker="org.apache.activemq.store.jdbc.adapter.TransactDatabaseLocker". However, using the MVCC mode will again result in the same problem. Therefore, please do not use the MVCC mode in this case. Another (more dangerous) solution is to set useDatabaseLock to false. - -@tutorial_1166_h2 -#Using H2 within NetBeans - -@tutorial_1167_p -# The project H2 Database Engine Support For NetBeans allows you to start and stop the H2 server from within the IDE. - -@tutorial_1168_p -# There is a known issue when using the Netbeans SQL Execution Window: before executing a query, another query in the form SELECT COUNT(*) FROM <query> is run. This is a problem for queries that modify state, such as SELECT SEQ.NEXTVAL. In this case, two sequence values are allocated instead of just one. - -@tutorial_1169_h2 -#Using H2 with jOOQ - -@tutorial_1170_p -# jOOQ adds a thin layer on top of JDBC, allowing for type-safe SQL construction, including advanced SQL, stored procedures and advanced data types. jOOQ takes your database schema as a base for code generation. If this is your example schema: - -@tutorial_1171_p -# then run the jOOQ code generator on the command line using this command: - -@tutorial_1172_p -# ...where codegen.xml is on the classpath and contains this information - -@tutorial_1173_p -# Using the generated source, you can query the database as follows: - -@tutorial_1174_p -# See more details on jOOQ Homepage and in the jOOQ Tutorial - -@tutorial_1175_h2 -Webアプリケーション�?� データベースを使用�?�る - -@tutorial_1176_p -# There are multiple ways to access a database from within web applications. Here are some examples if you use Tomcat or JBoss. - -@tutorial_1177_h3 -エンベッドモード - -@tutorial_1178_p -# The (currently) simplest solution is to use the database in the embedded mode, that means open a connection in your application when it starts (a good solution is using a Servlet Listener, see below), or when a session starts. A database can be accessed from multiple sessions and applications at the same time, as long as they run in the same process. Most Servlet Containers (for example Tomcat) are just using one process, so this is not a problem (unless you run Tomcat in clustered mode). Tomcat uses multiple threads and multiple classloaders. If multiple applications access the same database at the same time, you need to put the database jar in the shared/lib or server/lib directory. It is a good idea to open the database when the web application starts, and close it when the web application stops. If using multiple applications, only one (any) of them needs to do that. In the application, an idea is to use one connection per Session, or even one connection per request (action). Those connections should be closed after use if possible (but it's not that bad if they don't get closed). - -@tutorial_1179_h3 -サー�?ーモード - -@tutorial_1180_p -# The server mode is similar, but it allows you to run the server in another process. - -@tutorial_1181_h3 -データベース�?�起動�?�終了�?�Servletリスナーを使用�?�る - -@tutorial_1182_p -# Add the h2*.jar file to your web application, and add the following snippet to your web.xml file (between the context-param and the filter section): - -@tutorial_1183_p -# For details on how to access the database, see the file DbStarter.java. By default this tool opens an embedded connection using the database URL jdbc:h2:~/test, user name sa, and password sa. If you want to use this connection within your servlet, you can access as follows: - -@tutorial_1184_code -#DbStarter - -@tutorial_1185_p -# can also start the TCP server, however this is disabled by default. To enable it, use the parameter db.tcpServer in the file web.xml. Here is the complete list of options. These options need to be placed between the description tag and the listener / filter tags: - -@tutorial_1186_p -# When the web application is stopped, the database connection will be closed automatically. If the TCP server is started within the DbStarter, it will also be stopped automatically. - -@tutorial_1187_h3 -#Using the H2 Console Servlet - -@tutorial_1188_p -# The H2 Console is a standalone application and includes its own web server, but it can be used as a servlet as well. To do that, include the the h2*.jar file in your application, and add the following configuration to your web.xml: - -@tutorial_1189_p -# For details, see also src/tools/WEB-INF/web.xml. - -@tutorial_1190_p -# To create a web application with just the H2 Console, run the following command: - -@tutorial_1191_h2 -#Android - -@tutorial_1192_p -# You can use this database on an Android device (using the Dalvik VM) instead of or in addition to SQLite. So far, only very few tests and benchmarks were run, but it seems that performance is similar to SQLite, except for opening and closing a database, which is not yet optimized in H2 (H2 takes about 0.2 seconds, and SQLite about 0.02 seconds). Read operations seem to be a bit faster than SQLite, and write operations seem to be slower. So far, only very few tests have been run, and everything seems to work as expected. Fulltext search was not yet tested, however the native fulltext search should work. - -@tutorial_1193_p -# Reasons to use H2 instead of SQLite are: - -@tutorial_1194_li -#Full Unicode support including UPPER() and LOWER(). - -@tutorial_1195_li -#Streaming API for BLOB and CLOB data. - -@tutorial_1196_li -#Fulltext search. - -@tutorial_1197_li -#Multiple connections. - -@tutorial_1198_li -#User defined functions and triggers. - -@tutorial_1199_li -#Database file encryption. - -@tutorial_1200_li -#Reading and writing CSV files (this feature can be used outside the database as well). - -@tutorial_1201_li -#Referential integrity and check constraints. - -@tutorial_1202_li -#Better data type and SQL support. - -@tutorial_1203_li -#In-memory databases, read-only databases, linked tables. - -@tutorial_1204_li -#Better compatibility with other databases which simplifies porting applications. - -@tutorial_1205_li -#Possibly better performance (so far for read operations). - -@tutorial_1206_li -#Server mode (accessing a database on a different machine over TCP/IP). - -@tutorial_1207_p -# Currently only the JDBC API is supported (it is planned to support the Android database API in future releases). Both the regular H2 jar file and the smaller h2small-*.jar can be used. To create the smaller jar file, run the command ./build.sh jarSmall (Linux / Mac OS) or build.bat jarSmall (Windows). - -@tutorial_1208_p -# The database files needs to be stored in a place that is accessible for the application. Example: - -@tutorial_1209_p -# Limitations: Using a connection pool is currently not supported, because the required javax.sql. classes are not available on Android. - -@tutorial_1210_h2 -CSV (Comma Separated Values) サ�?ート - -@tutorial_1211_p -# The CSV file support can be used inside the database using the functions CSVREAD and CSVWRITE, or it can be used outside the database as a standalone tool. - -@tutorial_1212_h3 -データベース内�?�らCSVファイルを読�?�込む - -@tutorial_1213_p -# A CSV file can be read using the function CSVREAD. Example: - -@tutorial_1214_p -# Please note for performance reason, CSVREAD should not be used inside a join. Instead, import the data first (possibly into a temporary table), create the required indexes if necessary, and then query this table. - -@tutorial_1215_h3 -#Importing Data from a CSV File - -@tutorial_1216_p -# A fast way to load or import data (sometimes called 'bulk load') from a CSV file is to combine table creation with import. Optionally, the column names and data types can be set when creating the table. Another option is to use INSERT INTO ... SELECT. - -@tutorial_1217_h3 -#Importing Data from a CSV File - -@tutorial_1218_p -# The built-in function CSVWRITE can be used to create a CSV file from a query. Example: - -@tutorial_1219_h3 -Javaアプリケーション�?�らCSVファイル�?�書�??込む - -@tutorial_1220_p -# The Csv tool can be used in a Java application even when not using a database at all. Example: - -@tutorial_1221_h3 -Javaアプリケーション�?�らCSVファイルを読�?�込む - -@tutorial_1222_p -# It is possible to read a CSV file without opening a database. Example: - -@tutorial_1223_h2 -アップグレード�? �?ックアップ�?修復 - -@tutorial_1224_h3 -データベース�?�アップグレー - -@tutorial_1225_p -# The recommended way to upgrade from one version of the database engine to the next version is to create a backup of the database (in the form of a SQL script) using the old engine, and then execute the SQL script using the new engine. - -@tutorial_1226_h3 -�?ックアップ - -@tutorial_1227_p -# The recommended way to backup a database is to create a compressed SQL script file. This will result in a small, human readable, and database version independent backup. Creating the script will also verify the checksums of the database file. The Script tool is ran as follows: - -@tutorial_1228_p -# It is also possible to use the SQL command SCRIPT to create the backup of the database. For more information about the options, see the SQL command SCRIPT. The backup can be done remotely, however the file will be created on the server side. The built in FTP server could be used to retrieve the file from the server. - -@tutorial_1229_h3 -修復 - -@tutorial_1230_p -# To restore a database from a SQL script file, you can use the RunScript tool: - -@tutorial_1231_p -# For more information about the options, see the SQL command RUNSCRIPT. The restore can be done remotely, however the file needs to be on the server side. The built in FTP server could be used to copy the file to the server. It is also possible to use the SQL command RUNSCRIPT to execute a SQL script. SQL script files may contain references to other script files, in the form of RUNSCRIPT commands. However, when using the server mode, the references script files need to be available on the server side. - -@tutorial_1232_h3 -オンライン�?ックアップ - -@tutorial_1233_p -# The BACKUP SQL statement and the Backup tool both create a zip file with the database file. However, the contents of this file are not human readable. - -@tutorial_1234_p -# The resulting backup is transactionally consistent, meaning the consistency and atomicity rules apply. - -@tutorial_1235_p -# The Backup tool (org.h2.tools.Backup) can not be used to create a online backup; the database must not be in use while running this program. - -@tutorial_1236_p -# Creating a backup by copying the database files while the database is running is not supported, except if the file systems support creating snapshots. With other file systems, it can't be guaranteed that the data is copied in the right order. - -@tutorial_1237_h2 -#Command Line Tools - -@tutorial_1238_p -# This database comes with a number of command line tools. To get more information about a tool, start it with the parameter '-?', for example: - -@tutorial_1239_p -# The command line tools are: - -@tutorial_1240_code -�?ックアップ - -@tutorial_1241_li -# creates a backup of a database. - -@tutorial_1242_code -#ChangeFileEncryption - -@tutorial_1243_li -# allows changing the file encryption password or algorithm of a database. - -@tutorial_1244_code -#Console - -@tutorial_1245_li -# starts the browser based H2 Console. - -@tutorial_1246_code -#ConvertTraceFile - -@tutorial_1247_li -# converts a .trace.db file to a Java application and SQL script. - -@tutorial_1248_code -#CreateCluster - -@tutorial_1249_li -# creates a cluster from a standalone database. - -@tutorial_1250_code -#DeleteDbFiles - -@tutorial_1251_li -# deletes all files belonging to a database. - -@tutorial_1252_code -#Recover - -@tutorial_1253_li -# helps recovering a corrupted database. - -@tutorial_1254_code -#Restore - -@tutorial_1255_li -# restores a backup of a database. - -@tutorial_1256_code -#RunScript - -@tutorial_1257_li -# runs a SQL script against a database. - -@tutorial_1258_code -#Script - -@tutorial_1259_li -# allows converting a database to a SQL script for backup or migration. - -@tutorial_1260_code -#Script - -@tutorial_1261_li -# is used in the server mode to start a H2 server. - -@tutorial_1262_code -#Shell - -@tutorial_1263_li -# is a command line database tool. - -@tutorial_1264_p -# The tools can also be called from an application by calling the main or another public method. For details, see the Javadoc documentation. - -@tutorial_1265_h2 -#The Shell Tool - -@tutorial_1266_p -# The Shell tool is a simple interactive command line tool. To start it, type: - -@tutorial_1267_p -# You will be asked for a database URL, JDBC driver, user name, and password. The connection setting can also be set as command line parameters. After connecting, you will get the list of options. The built-in commands don't need to end with a semicolon, but SQL statements are only executed if the line ends with a semicolon ;. This allows to enter multi-line statements: - -@tutorial_1268_p -# By default, results are printed as a table. For results with many column, consider using the list mode: - -@tutorial_1269_h2 -OpenOffice Baseを使用�?�る - -@tutorial_1270_p -# OpenOffice.org Base supports database access over the JDBC API. To connect to a H2 database using OpenOffice Base, you first need to add the JDBC driver to OpenOffice. The steps to connect to a H2 database are: - -@tutorial_1271_li -#Start OpenOffice Writer, go to [Tools], [Options] - -@tutorial_1272_li -#Make sure you have selected a Java runtime environment in OpenOffice.org / Java - -@tutorial_1273_li -#Click [Class Path...], [Add Archive...] - -@tutorial_1274_li -#Select your h2 jar file (location is up to you, could be wherever you choose) - -@tutorial_1275_li -#Click [OK] (as much as needed), stop OpenOffice (including the Quickstarter) - -@tutorial_1276_li -#Start OpenOffice Base - -@tutorial_1277_li -#Connect to an existing database; select [JDBC]; [Next] - -@tutorial_1278_li -#Example datasource URL: jdbc:h2:~/test - -@tutorial_1279_li -#JDBC driver class: org.h2.Driver - -@tutorial_1280_p -# Now you can access the database stored in the current users home directory. - -@tutorial_1281_p -# To use H2 in NeoOffice (OpenOffice without X11): - -@tutorial_1282_li -#In NeoOffice, go to [NeoOffice], [Preferences] - -@tutorial_1283_li -#Look for the page under [NeoOffice], [Java] - -@tutorial_1284_li -#Click [Class Path], [Add Archive...] - -@tutorial_1285_li -#Select your h2 jar file (location is up to you, could be wherever you choose) - -@tutorial_1286_li -#Click [OK] (as much as needed), restart NeoOffice. - -@tutorial_1287_p -# Now, when creating a new database using the "Database Wizard" : - -@tutorial_1288_li -#Click [File], [New], [Database]. - -@tutorial_1289_li -#Select [Connect to existing database] and the select [JDBC]. Click next. - -@tutorial_1290_li -#Example datasource URL: jdbc:h2:~/test - -@tutorial_1291_li -#JDBC driver class: org.h2.Driver - -@tutorial_1292_p -# Another solution to use H2 in NeoOffice is: - -@tutorial_1293_li -#Package the h2 jar within an extension package - -@tutorial_1294_li -#Install it as a Java extension in NeoOffice - -@tutorial_1295_p -# This can be done by create it using the NetBeans OpenOffice plugin. See also Extensions Development. - -@tutorial_1296_h2 -Java Web Start / JNLP - -@tutorial_1297_p -# When using Java Web Start / JNLP (Java Network Launch Protocol), permissions tags must be set in the .jnlp file, and the application .jar file must be signed. Otherwise, when trying to write to the file system, the following exception will occur: java.security.AccessControlException: access denied (java.io.FilePermission ... read). Example permission tags: - -@tutorial_1298_h2 -#Using a Connection Pool - -@tutorial_1299_p -# For H2, opening a connection is fast if the database is already open. Still, using a connection pool improves performance if you open and close connections a lot. A simple connection pool is included in H2. It is based on the Mini Connection Pool Manager from Christian d'Heureuse. There are other, more complex, open source connection pools available, for example the Apache Commons DBCP. For H2, it is about twice as faster to get a connection from the built-in connection pool than to get one using DriverManager.getConnection().The build-in connection pool is used as follows: - -@tutorial_1300_h2 -#Using a Connection Pool - -@tutorial_1301_p -# H2 includes two fulltext search implementations. One is using Apache Lucene, and the other (the native implementation) stores the index data in special tables in the database. - -@tutorial_1302_h3 -#Using the Native Fulltext Search - -@tutorial_1303_p -# To initialize, call: - -@tutorial_1304_p -# You need to initialize it in each database where you want to use it. Afterwards, you can create a fulltext index for a table using: - -@tutorial_1305_p -# PUBLIC is the schema name, TEST is the table name. The list of column names (comma separated) is optional, in this case all columns are indexed. The index is updated in realtime. To search the index, use the following query: - -@tutorial_1306_p -# This will produce a result set that contains the query needed to retrieve the data: - -@tutorial_1307_p -# To drop an index on a table: - -@tutorial_1308_p -# To get the raw data, use FT_SEARCH_DATA('Hello', 0, 0);. The result contains the columns SCHEMA (the schema name), TABLE (the table name), COLUMNS (an array of column names), and KEYS (an array of objects). To join a table, use a join as in: SELECT T.* FROM FT_SEARCH_DATA('Hello', 0, 0) FT, TEST T WHERE FT.TABLE='TEST' AND T.ID=FT.KEYS[0]; - -@tutorial_1309_p -# You can also call the index from within a Java application: - -@tutorial_1310_h3 -#Using the Apache Lucene Fulltext Search - -@tutorial_1311_p -# To use the Apache Lucene full text search, you need the Lucene library in the classpath. Currently, Apache Lucene 3.6.2 is used for testing. Newer versions may work, however they are not tested. How to do that depends on the application; if you use the H2 Console, you can add the Lucene jar file to the environment variables H2DRIVERS or CLASSPATH. To initialize the Lucene fulltext search in a database, call: - -@tutorial_1312_p -# You need to initialize it in each database where you want to use it. Afterwards, you can create a full text index for a table using: - -@tutorial_1313_p -# PUBLIC is the schema name, TEST is the table name. The list of column names (comma separated) is optional, in this case all columns are indexed. The index is updated in realtime. To search the index, use the following query: - -@tutorial_1314_p -# This will produce a result set that contains the query needed to retrieve the data: - -@tutorial_1315_p -# To drop an index on a table (be warned that this will re-index all of the full-text indices for the entire database): - -@tutorial_1316_p -# To get the raw data, use FTL_SEARCH_DATA('Hello', 0, 0);. The result contains the columns SCHEMA (the schema name), TABLE (the table name), COLUMNS (an array of column names), and KEYS (an array of objects). To join a table, use a join as in: SELECT T.* FROM FTL_SEARCH_DATA('Hello', 0, 0) FT, TEST T WHERE FT.TABLE='TEST' AND T.ID=FT.KEYS[0]; - -@tutorial_1317_p -# You can also call the index from within a Java application: - -@tutorial_1318_p -# The Lucene fulltext search supports searching in specific column only. Column names must be uppercase (except if the original columns are double quoted). For column names starting with an underscore (_), another underscore needs to be added. Example: - -@tutorial_1319_h2 -#User-Defined Variables - -@tutorial_1320_p -# This database supports user-defined variables. Variables start with @ and can be used wherever expressions or parameters are allowed. Variables are not persisted and session scoped, that means only visible from within the session in which they are defined. A value is usually assigned using the SET command: - -@tutorial_1321_p -# The value can also be changed using the SET() method. This is useful in queries: - -@tutorial_1322_p -# Variables that are not set evaluate to NULL. The data type of a user-defined variable is the data type of the value assigned to it, that means it is not necessary (or possible) to declare variable names before using them. There are no restrictions on the assigned values; large objects (LOBs) are supported as well. Rolling back a transaction does not affect the value of a user-defined variable. - -@tutorial_1323_h2 -#Date and Time - -@tutorial_1324_p -# Date, time and timestamp values support ISO 8601 formatting, including time zone: - -@tutorial_1325_p -# If the time zone is not set, the value is parsed using the current time zone setting of the system. Date and time information is stored in H2 database files without time zone information. If the database is opened using another system time zone, the date and time will be the same. That means if you store the value '2000-01-01 12:00:00' in one time zone, then close the database and open the database again in a different time zone, you will also get '2000-01-01 12:00:00'. Please note that changing the time zone after the H2 driver is loaded is not supported. - -@tutorial_1326_h2 -#Using Spring - -@tutorial_1327_h3 -#Using the TCP Server - -@tutorial_1328_p -# Use the following configuration to start and stop the H2 TCP server using the Spring Framework: - -@tutorial_1329_p -# The destroy-method will help prevent exceptions on hot-redeployment or when restarting the server. - -@tutorial_1330_h3 -#Error Code Incompatibility - -@tutorial_1331_p -# There is an incompatibility with the Spring JdbcTemplate and H2 version 1.3.154 and newer, because of a change in the error code. This will cause the JdbcTemplate to not detect a duplicate key condition, and so a DataIntegrityViolationException is thrown instead of DuplicateKeyException. See also the issue SPR-8235. The workaround is to add the following XML file to the root of the classpath: - -@tutorial_1332_h2 -#OSGi - -@tutorial_1333_p -# The standard H2 jar can be dropped in as a bundle in an OSGi container. H2 implements the JDBC Service defined in OSGi Service Platform Release 4 Version 4.2 Enterprise Specification. The H2 Data Source Factory service is registered with the following properties: OSGI_JDBC_DRIVER_CLASS=org.h2.Driver and OSGI_JDBC_DRIVER_NAME=H2 JDBC Driver. The OSGI_JDBC_DRIVER_VERSION property reflects the version of the driver as is. - -@tutorial_1334_p -# The following standard configuration properties are supported: JDBC_USER, JDBC_PASSWORD, JDBC_DESCRIPTION, JDBC_DATASOURCE_NAME, JDBC_NETWORK_PROTOCOL, JDBC_URL, JDBC_SERVER_NAME, JDBC_PORT_NUMBER. Any other standard property will be rejected. Non-standard properties will be passed on to H2 in the connection URL. - -@tutorial_1335_h2 -#Java Management Extension (JMX) - -@tutorial_1336_p -# Management over JMX is supported, but not enabled by default. To enable JMX, append ;JMX=TRUE to the database URL when opening the database. Various tools support JMX, one such tool is the jconsole. When opening the jconsole, connect to the process where the database is open (when using the server mode, you need to connect to the server process). Then go to the MBeans section. Under org.h2 you will find one entry per database. The object name of the entry is the database short name, plus the path (each colon is replaced with an underscore character). - -@tutorial_1337_p -# The following attributes and operations are supported: - -@tutorial_1338_code -#CacheSize - -@tutorial_1339_li -#: the cache size currently in use in KB. - -@tutorial_1340_code -#CacheSizeMax - -@tutorial_1341_li -# (read/write): the maximum cache size in KB. - -@tutorial_1342_code -#Exclusive - -@tutorial_1343_li -#: whether this database is open in exclusive mode or not. - -@tutorial_1344_code -#FileReadCount - -@tutorial_1345_li -#: the number of file read operations since the database was opened. - -@tutorial_1346_code -#FileSize - -@tutorial_1347_li -#: the file size in KB. - -@tutorial_1348_code -#FileWriteCount - -@tutorial_1349_li -#: the number of file write operations since the database was opened. - -@tutorial_1350_code -#FileWriteCountTotal - -@tutorial_1351_li -#: the number of file write operations since the database was created. - -@tutorial_1352_code -#LogMode - -@tutorial_1353_li -# (read/write): the current transaction log mode. See SET LOG for details. - -@tutorial_1354_code -#Mode - -@tutorial_1355_li -#: the compatibility mode (REGULAR if no compatibility mode is used). - -@tutorial_1356_code -#MultiThreaded - -@tutorial_1357_li -#: true if multi-threaded is enabled. - -@tutorial_1358_code -#Mvcc - -@tutorial_1359_li -#: true if MVCC is enabled. - -@tutorial_1360_code -#ReadOnly - -@tutorial_1361_li -#: true if the database is read-only. - -@tutorial_1362_code -#TraceLevel - -@tutorial_1363_li -# (read/write): the file trace level. - -@tutorial_1364_code -#Version - -@tutorial_1365_li -#: the database version in use. - -@tutorial_1366_code -#listSettings - -@tutorial_1367_li -#: list the database settings. - -@tutorial_1368_code -#listSessions - -@tutorial_1369_li -#: list the open sessions, including currently executing statement (if any) and locked tables (if any). - -@tutorial_1370_p -# To enable JMX, you may need to set the system properties com.sun.management.jmxremote and com.sun.management.jmxremote.port as required by the JVM. - diff --git a/h2/src/docsrc/textbase/_docs_en.properties b/h2/src/docsrc/textbase/_docs_en.properties deleted file mode 100644 index b5a5e32a5f..0000000000 --- a/h2/src/docsrc/textbase/_docs_en.properties +++ /dev/null @@ -1,4170 +0,0 @@ -advanced_1000_h1=Advanced -advanced_1001_a=\ Result Sets -advanced_1002_a=\ Large Objects -advanced_1003_a=\ Linked Tables -advanced_1004_a=\ Spatial Features -advanced_1005_a=\ Recursive Queries -advanced_1006_a=\ Updatable Views -advanced_1007_a=\ Transaction Isolation -advanced_1008_a=\ Multi-Version Concurrency Control (MVCC) -advanced_1009_a=\ Clustering / High Availability -advanced_1010_a=\ Two Phase Commit -advanced_1011_a=\ Compatibility -advanced_1012_a=\ Standards Compliance -advanced_1013_a=\ Run as Windows Service -advanced_1014_a=\ ODBC Driver -advanced_1015_a=\ Using H2 in Microsoft .NET -advanced_1016_a=\ ACID -advanced_1017_a=\ Durability Problems -advanced_1018_a=\ Using the Recover Tool -advanced_1019_a=\ File Locking Protocols -advanced_1020_a=\ Using Passwords -advanced_1021_a=\ Password Hash -advanced_1022_a=\ Protection against SQL Injection -advanced_1023_a=\ Protection against Remote Access -advanced_1024_a=\ Restricting Class Loading and Usage -advanced_1025_a=\ Security Protocols -advanced_1026_a=\ TLS Connections -advanced_1027_a=\ Universally Unique Identifiers (UUID) -advanced_1028_a=\ Settings Read from System Properties -advanced_1029_a=\ Setting the Server Bind Address -advanced_1030_a=\ Pluggable File System -advanced_1031_a=\ Split File System -advanced_1032_a=\ Database Upgrade -advanced_1033_a=\ Java Objects Serialization -advanced_1034_a=\ Custom Data Types Handler API -advanced_1035_a=\ Limits and Limitations -advanced_1036_a=\ Glossary and Links -advanced_1037_h2=Result Sets -advanced_1038_h3=Statements that Return a Result Set -advanced_1039_p=\ The following statements return a result set\: SELECT, EXPLAIN, CALL, SCRIPT, SHOW, HELP. All other statements return an update count. -advanced_1040_h3=Limiting the Number of Rows -advanced_1041_p=\ Before the result is returned to the application, all rows are read by the database. Server side cursors are not supported currently. If only the first few rows are interesting for the application, then the result set size should be limited to improve the performance. This can be done using LIMIT in a query (example\: SELECT * FROM TEST LIMIT 100), or by using Statement.setMaxRows(max). -advanced_1042_h3=Large Result Sets and External Sorting -advanced_1043_p=\ For large result set, the result is buffered to disk. The threshold can be defined using the statement SET MAX_MEMORY_ROWS. If ORDER BY is used, the sorting is done using an external sort algorithm. In this case, each block of rows is sorted using quick sort, then written to disk; when reading the data, the blocks are merged together. -advanced_1044_h2=Large Objects -advanced_1045_h3=Storing and Reading Large Objects -advanced_1046_p=\ If it is possible that the objects don't fit into memory, then the data type CLOB (for textual data) or BLOB (for binary data) should be used. For these data types, the objects are not fully read into memory, by using streams. To store a BLOB, use PreparedStatement.setBinaryStream. To store a CLOB, use PreparedStatement.setCharacterStream. To read a BLOB, use ResultSet.getBinaryStream, and to read a CLOB, use ResultSet.getCharacterStream. When using the client/server mode, large BLOB and CLOB data is stored in a temporary file on the client side. -advanced_1047_h3=When to use CLOB/BLOB -advanced_1048_p=\ By default, this database stores large LOB (CLOB and BLOB) objects separate from the main table data. Small LOB objects are stored in-place, the threshold can be set using MAX_LENGTH_INPLACE_LOB, but there is still an overhead to use CLOB/BLOB. Because of this, BLOB and CLOB should never be used for columns with a maximum size below about 200 bytes. The best threshold depends on the use case; reading in-place objects is faster than reading from separate files, but slows down the performance of operations that don't involve this column. -advanced_1049_h3=Large Object Compression -advanced_1050_p=\ The following feature is only available for the PageStore storage engine. For the MVStore engine (the default for H2 version 1.4.x), append ;COMPRESS\=TRUE to the database URL instead. CLOB and BLOB values can be compressed by using SET COMPRESS_LOB. The LZF algorithm is faster but needs more disk space. By default compression is disabled, which usually speeds up write operations. If you store many large compressible values such as XML, HTML, text, and uncompressed binary files, then compressing can save a lot of disk space (sometimes more than 50%), and read operations may even be faster. -advanced_1051_h2=Linked Tables -advanced_1052_p=\ This database supports linked tables, which means tables that don't exist in the current database but are just links to another database. To create such a link, use the CREATE LINKED TABLE statement\: -advanced_1053_p=\ You can then access the table in the usual way. Whenever the linked table is accessed, the database issues specific queries over JDBC. Using the example above, if you issue the query SELECT * FROM LINK WHERE ID\=1, then the following query is run against the PostgreSQL database\: SELECT * FROM TEST WHERE ID\=?. The same happens for insert and update statements. Only simple statements are executed against the target database, that means no joins (queries that contain joins are converted to simple queries). Prepared statements are used where possible. -advanced_1054_p=\ To view the statements that are executed against the target table, set the trace level to 3. -advanced_1055_p=\ If multiple linked tables point to the same database (using the same database URL), the connection is shared. To disable this, set the system property h2.shareLinkedConnections\=false. -advanced_1056_p=\ The statement CREATE LINKED TABLE supports an optional schema name parameter. -advanced_1057_p=\ The following are not supported because they may result in a deadlock\: creating a linked table to the same database, and creating a linked table to another database using the server mode if the other database is open in the same server (use the embedded mode instead). -advanced_1058_p=\ Data types that are not supported in H2 are also not supported for linked tables, for example unsigned data types if the value is outside the range of the signed type. In such cases, the columns needs to be cast to a supported type. -advanced_1059_h2=Updatable Views -advanced_1060_p=\ By default, views are not updatable. To make a view updatable, use an "instead of" trigger as follows\: -advanced_1061_p=\ Update the base table(s) within the trigger as required. For details, see the sample application org.h2.samples.UpdatableView. -advanced_1062_h2=Transaction Isolation -advanced_1063_p=\ Please note that most data definition language (DDL) statements, such as "create table", commit the current transaction. See the Grammar for details. -advanced_1064_p=\ Transaction isolation is provided for all data manipulation language (DML) statements. -advanced_1065_p=\ Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. In this case, table level locking is not used. Instead, rows are locked for update, and read committed is used in all cases (changing the isolation level has no effect). -advanced_1066_p=\ This database supports the following transaction isolation levels\: -advanced_1067_b=Read Committed -advanced_1068_li=\ This is the default level. Read locks are released immediately after executing the statement, but write locks are kept until the transaction commits. Higher concurrency is possible when using this level. -advanced_1069_li=\ To enable, execute the SQL statement SET LOCK_MODE 3 -advanced_1070_li=\ or append ;LOCK_MODE\=3 to the database URL\: jdbc\:h2\:~/test;LOCK_MODE\=3 -advanced_1071_b=Serializable -advanced_1072_li=\ Both read locks and write locks are kept until the transaction commits. To enable, execute the SQL statement SET LOCK_MODE 1 -advanced_1073_li=\ or append ;LOCK_MODE\=1 to the database URL\: jdbc\:h2\:~/test;LOCK_MODE\=1 -advanced_1074_b=Read Uncommitted -advanced_1075_li=\ This level means that transaction isolation is disabled. -advanced_1076_li=\ To enable, execute the SQL statement SET LOCK_MODE 0 -advanced_1077_li=\ or append ;LOCK_MODE\=0 to the database URL\: jdbc\:h2\:~/test;LOCK_MODE\=0 -advanced_1078_p=\ When using the isolation level 'serializable', dirty reads, non-repeatable reads, and phantom reads are prohibited. -advanced_1079_b=Dirty Reads -advanced_1080_li=\ Means a connection can read uncommitted changes made by another connection. -advanced_1081_li=\ Possible with\: read uncommitted -advanced_1082_b=Non-Repeatable Reads -advanced_1083_li=\ A connection reads a row, another connection changes a row and commits, and the first connection re-reads the same row and gets the new result. -advanced_1084_li=\ Possible with\: read uncommitted, read committed -advanced_1085_b=Phantom Reads -advanced_1086_li=\ A connection reads a set of rows using a condition, another connection inserts a row that falls in this condition and commits, then the first connection re-reads using the same condition and gets the new row. -advanced_1087_li=\ Possible with\: read uncommitted, read committed -advanced_1088_h3=Table Level Locking -advanced_1089_p=\ The database allows multiple concurrent connections to the same database. To make sure all connections only see consistent data, table level locking is used by default. This mechanism does not allow high concurrency, but is very fast. Shared locks and exclusive locks are supported. Before reading from a table, the database tries to add a shared lock to the table (this is only possible if there is no exclusive lock on the object by another connection). If the shared lock is added successfully, the table can be read. It is allowed that other connections also have a shared lock on the same object. If a connection wants to write to a table (update or delete a row), an exclusive lock is required. To get the exclusive lock, other connection must not have any locks on the object. After the connection commits, all locks are released. This database keeps all locks in memory. When a lock is released, and multiple connections are waiting for it, one of them is picked at random. -advanced_1090_h3=Lock Timeout -advanced_1091_p=\ If a connection cannot get a lock on an object, the connection waits for some amount of time (the lock timeout). During this time, hopefully the connection holding the lock commits and it is then possible to get the lock. If this is not possible because the other connection does not release the lock for some time, the unsuccessful connection will get a lock timeout exception. The lock timeout can be set individually for each connection. -advanced_1092_h2=Multi-Version Concurrency Control (MVCC) -advanced_1093_p=\ The MVCC feature allows higher concurrency than using (table level or row level) locks. When using MVCC in this database, delete, insert and update operations will only issue a shared lock on the table. An exclusive lock is still used when adding or removing columns, when dropping the table, and when using SELECT ... FOR UPDATE. Connections only 'see' committed data, and own changes. That means, if connection A updates a row but doesn't commit this change yet, connection B will see the old value. Only when the change is committed, the new value is visible by other connections (read committed). If multiple connections concurrently try to update the same row, the database waits until it can apply the change, but at most until the lock timeout expires. -advanced_1094_p=\ To use the MVCC feature, append ;MVCC\=TRUE to the database URL\: -advanced_1095_p=\ The setting must be specified in the first connection (the one that opens the database). It is not possible to enable or disable this setting while the database is already open. -advanced_1096_p=\ If MVCC is enabled, changing the lock mode (LOCK_MODE) has no effect. -advanced_1097_div=\ The MVCC mode is enabled by default in version 1.4.x, with the default MVStore storage engine. MVCC is disabled by default when using the PageStore storage engine (which is the default in version 1.3.x). The following applies when using the PageStore storage engine\: The MVCC feature is not fully tested yet. The limitations of the MVCC mode are\: with the PageStore storage engine, it can not be used at the same time as MULTI_THREADED\=TRUE; the complete undo log (the list of uncommitted changes) must fit in memory when using multi-version concurrency. The setting MAX_MEMORY_UNDO has no effect. Clustering / High Availability -advanced_1098_p=\ This database supports a simple clustering / high availability mechanism. The architecture is\: two database servers run on two different computers, and on both computers is a copy of the same database. If both servers run, each database operation is executed on both computers. If one server fails (power, hardware or network failure), the other server can still continue to work. From this point on, the operations will be executed only on one server until the other server is back up. -advanced_1099_p=\ Clustering can only be used in the server mode (the embedded mode does not support clustering). The cluster can be re-created using the CreateCluster tool without stopping the remaining server. Applications that are still connected are automatically disconnected, however when appending ;AUTO_RECONNECT\=TRUE, they will recover from that. -advanced_1100_p=\ To initialize the cluster, use the following steps\: -advanced_1101_li=Create a database -advanced_1102_li=Use the CreateCluster tool to copy the database to another location and initialize the clustering. Afterwards, you have two databases containing the same data. -advanced_1103_li=Start two servers (one for each copy of the database) -advanced_1104_li=You are now ready to connect to the databases with the client application(s) -advanced_1105_h3=Using the CreateCluster Tool -advanced_1106_p=\ To understand how clustering works, please try out the following example. In this example, the two databases reside on the same computer, but usually, the databases will be on different servers. -advanced_1107_li=Create two directories\: server1, server2. Each directory will simulate a directory on a computer. -advanced_1108_li=Start a TCP server pointing to the first directory. You can do this using the command line\: -advanced_1109_li=Start a second TCP server pointing to the second directory. This will simulate a server running on a second (redundant) computer. You can do this using the command line\: -advanced_1110_li=Use the CreateCluster tool to initialize clustering. This will automatically create a new, empty database if it does not exist. Run the tool on the command line\: -advanced_1111_li=You can now connect to the databases using an application or the H2 Console using the JDBC URL jdbc\:h2\:tcp\://localhost\:9101,localhost\:9102/~/test -advanced_1112_li=If you stop a server (by killing the process), you will notice that the other machine continues to work, and therefore the database is still accessible. -advanced_1113_li=To restore the cluster, you first need to delete the database that failed, then restart the server that was stopped, and re-run the CreateCluster tool. -advanced_1114_h3=Detect Which Cluster Instances are Running -advanced_1115_p=\ To find out which cluster nodes are currently running, execute the following SQL statement\: -advanced_1116_p=\ If the result is '' (two single quotes), then the cluster mode is disabled. Otherwise, the list of servers is returned, enclosed in single quote. Example\: 'server1\:9191,server2\:9191'. -advanced_1117_p=\ It is also possible to get the list of servers by using Connection.getClientInfo(). -advanced_1118_p=\ The property list returned from getClientInfo() contains a numServers property that returns the number of servers that are in the connection list. To get the actual servers, getClientInfo() also has properties server0..serverX, where serverX is the number of servers minus 1. -advanced_1119_p=\ Example\: To get the 2nd server in the connection list one uses getClientInfo('server1'). Note\: The serverX property only returns IP addresses and ports and not hostnames. -advanced_1120_h3=Clustering Algorithm and Limitations -advanced_1121_p=\ Read-only queries are only executed against the first cluster node, but all other statements are executed against all nodes. There is currently no load balancing made to avoid problems with transactions. The following functions may yield different results on different cluster nodes and must be executed with care\: UUID(), RANDOM_UUID(), SECURE_RAND(), SESSION_ID(), MEMORY_FREE(), MEMORY_USED(), CSVREAD(), CSVWRITE(), RAND() [when not using a seed]. Those functions should not be used directly in modifying statements (for example INSERT, UPDATE, MERGE). However, they can be used in read-only statements and the result can then be used for modifying statements. Using auto-increment and identity columns is currently not supported. Instead, sequence values need to be manually requested and then used to insert data (using two statements). -advanced_1122_p=\ When using the cluster modes, result sets are read fully in memory by the client, so that there is no problem if the server dies that executed the query. Result sets must fit in memory on the client side. -advanced_1123_p=\ The SQL statement SET AUTOCOMMIT FALSE is not supported in the cluster mode. To disable autocommit, the method Connection.setAutoCommit(false) needs to be called. -advanced_1124_p=\ It is possible that a transaction from one connection overtakes a transaction from a different connection. Depending on the operations, this might result in different results, for example when conditionally incrementing a value in a row. -advanced_1125_h2=Two Phase Commit -advanced_1126_p=\ The two phase commit protocol is supported. 2-phase-commit works as follows\: -advanced_1127_li=Autocommit needs to be switched off -advanced_1128_li=A transaction is started, for example by inserting a row -advanced_1129_li=The transaction is marked 'prepared' by executing the SQL statement PREPARE COMMIT transactionName -advanced_1130_li=The transaction can now be committed or rolled back -advanced_1131_li=If a problem occurs before the transaction was successfully committed or rolled back (for example because a network problem occurred), the transaction is in the state 'in-doubt' -advanced_1132_li=When re-connecting to the database, the in-doubt transactions can be listed with SELECT * FROM INFORMATION_SCHEMA.IN_DOUBT -advanced_1133_li=Each transaction in this list must now be committed or rolled back by executing COMMIT TRANSACTION transactionName or ROLLBACK TRANSACTION transactionName -advanced_1134_li=The database needs to be closed and re-opened to apply the changes -advanced_1135_h2=Compatibility -advanced_1136_p=\ This database is (up to a certain point) compatible to other databases such as HSQLDB, MySQL and PostgreSQL. There are certain areas where H2 is incompatible. -advanced_1137_h3=Transaction Commit when Autocommit is On -advanced_1138_p=\ At this time, this database engine commits a transaction (if autocommit is switched on) just before returning the result. For a query, this means the transaction is committed even before the application scans through the result set, and before the result set is closed. Other database engines may commit the transaction in this case when the result set is closed. -advanced_1139_h3=Keywords / Reserved Words -advanced_1140_p=\ There is a list of keywords that can't be used as identifiers (table names, column names and so on), unless they are quoted (surrounded with double quotes). The list is currently\: -advanced_1141_code=\ CROSS, CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP, DISTINCT, EXCEPT, EXISTS, FALSE, FETCH, FOR, FROM, FULL, GROUP, HAVING, INNER, INTERSECT, IS, JOIN, LIKE, LIMIT, MINUS, NATURAL, NOT, NULL, OFFSET, ON, ORDER, PRIMARY, ROWNUM, SELECT, SYSDATE, SYSTIME, SYSTIMESTAMP, TODAY, TRUE, UNION, UNIQUE, WHERE -advanced_1142_p=\ Certain words of this list are keywords because they are functions that can be used without '()' for compatibility, for example CURRENT_TIMESTAMP. -advanced_1143_h2=Standards Compliance -advanced_1144_p=\ This database tries to be as much standard compliant as possible. For the SQL language, ANSI/ISO is the main standard. There are several versions that refer to the release date\: SQL-92, SQL\:1999, and SQL\:2003. Unfortunately, the standard documentation is not freely available. Another problem is that important features are not standardized. Whenever this is the case, this database tries to be compatible to other databases. -advanced_1145_h3=Supported Character Sets, Character Encoding, and Unicode -advanced_1146_p=\ H2 internally uses Unicode, and supports all character encoding systems and character sets supported by the virtual machine you use. -advanced_1147_h2=Run as Windows Service -advanced_1148_p=\ Using a native wrapper / adapter, Java applications can be run as a Windows Service. There are various tools available to do that. The Java Service Wrapper from Tanuki Software, Inc. is included in the installation. Batch files are provided to install, start, stop and uninstall the H2 Database Engine Service. This service contains the TCP Server and the H2 Console web application. The batch files are located in the directory h2/service. -advanced_1149_p=\ The service wrapper bundled with H2 is a 32-bit version. To use a 64-bit version of Windows (x64), you need to use a 64-bit version of the wrapper, for example the one from Simon Krenger. -advanced_1150_p=\ When running the database as a service, absolute path should be used. Using ~ in the database URL is problematic in this case, because it means to use the home directory of the current user. The service might run without or with the wrong user, so that the database files might end up in an unexpected place. -advanced_1151_h3=Install the Service -advanced_1152_p=\ The service needs to be registered as a Windows Service first. To do that, double click on 1_install_service.bat. If successful, a command prompt window will pop up and disappear immediately. If not, a message will appear. -advanced_1153_h3=Start the Service -advanced_1154_p=\ You can start the H2 Database Engine Service using the service manager of Windows, or by double clicking on 2_start_service.bat. Please note that the batch file does not print an error message if the service is not installed. -advanced_1155_h3=Connect to the H2 Console -advanced_1156_p=\ After installing and starting the service, you can connect to the H2 Console application using a browser. Double clicking on 3_start_browser.bat to do that. The default port (8082) is hard coded in the batch file. -advanced_1157_h3=Stop the Service -advanced_1158_p=\ To stop the service, double click on 4_stop_service.bat. Please note that the batch file does not print an error message if the service is not installed or started. -advanced_1159_h3=Uninstall the Service -advanced_1160_p=\ To uninstall the service, double click on 5_uninstall_service.bat. If successful, a command prompt window will pop up and disappear immediately. If not, a message will appear. -advanced_1161_h3=Additional JDBC drivers -advanced_1162_p=\ To use other databases (for example MySQL), the location of the JDBC drivers of those databases need to be added to the environment variables H2DRIVERS or CLASSPATH before installing the service. Multiple drivers can be set; each entry needs to be separated with a ; (Windows) or \: (other operating systems). Spaces in the path names are supported. The settings must not be quoted. -advanced_1163_h2=ODBC Driver -advanced_1164_p=\ This database does not come with its own ODBC driver at this time, but it supports the PostgreSQL network protocol. Therefore, the PostgreSQL ODBC driver can be used. Support for the PostgreSQL network protocol is quite new and should be viewed as experimental. It should not be used for production applications. -advanced_1165_p=\ To use the PostgreSQL ODBC driver on 64 bit versions of Windows, first run c\:/windows/syswow64/odbcad32.exe. At this point you set up your DSN just like you would on any other system. See also\: Re\: ODBC Driver on Windows 64 bit -advanced_1166_h3=ODBC Installation -advanced_1167_p=\ First, the ODBC driver must be installed. Any recent PostgreSQL ODBC driver should work, however version 8.2 (psqlodbc-08_02*) or newer is recommended. The Windows version of the PostgreSQL ODBC driver is available at http\://www.postgresql.org/ftp/odbc/versions/msi. -advanced_1168_h3=Starting the Server -advanced_1169_p=\ After installing the ODBC driver, start the H2 Server using the command line\: -advanced_1170_p=\ The PG Server (PG for PostgreSQL protocol) is started as well. By default, databases are stored in the current working directory where the server is started. Use -baseDir to save databases in another directory, for example the user home directory\: -advanced_1171_p=\ The PG server can be started and stopped from within a Java application as follows\: -advanced_1172_p=\ By default, only connections from localhost are allowed. To allow remote connections, use -pgAllowOthers when starting the server. -advanced_1173_p=\ To map an ODBC database name to a different JDBC database name, use the option -key when starting the server. Please note only one mapping is allowed. The following will map the ODBC database named TEST to the database URL jdbc\:h2\:~/data/test;cipher\=aes\: -advanced_1174_h3=ODBC Configuration -advanced_1175_p=\ After installing the driver, a new Data Source must be added. In Windows, run odbcad32.exe to open the Data Source Administrator. Then click on 'Add...' and select the PostgreSQL Unicode driver. Then click 'Finish'. You will be able to change the connection properties. The property column represents the property key in the odbc.ini file (which may be different from the GUI). -advanced_1176_th=Property -advanced_1177_th=Example -advanced_1178_th=Remarks -advanced_1179_td=Data Source -advanced_1180_td=H2 Test -advanced_1181_td=The name of the ODBC Data Source -advanced_1182_td=Database -advanced_1183_td=~/test;ifexists\=true -advanced_1184_td=\ The database name. This can include connections settings. By default, the database is stored in the current working directory where the Server is started except when the -baseDir setting is used. The name must be at least 3 characters. -advanced_1185_td=Servername -advanced_1186_td=localhost -advanced_1187_td=The server name or IP address. -advanced_1188_td=By default, only remote connections are allowed -advanced_1189_td=Username -advanced_1190_td=sa -advanced_1191_td=The database user name. -advanced_1192_td=SSL -advanced_1193_td=false (disabled) -advanced_1194_td=At this time, SSL is not supported. -advanced_1195_td=Port -advanced_1196_td=5435 -advanced_1197_td=The port where the PG Server is listening. -advanced_1198_td=Password -advanced_1199_td=sa -advanced_1200_td=The database password. -advanced_1201_p=\ To improve performance, please enable 'server side prepare' under Options / Datasource / Page 2 / Server side prepare. -advanced_1202_p=\ Afterwards, you may use this data source. -advanced_1203_h3=PG Protocol Support Limitations -advanced_1204_p=\ At this time, only a subset of the PostgreSQL network protocol is implemented. Also, there may be compatibility problems on the SQL level, with the catalog, or with text encoding. Problems are fixed as they are found. Currently, statements can not be canceled when using the PG protocol. Also, H2 does not provide index meta over ODBC. -advanced_1205_p=\ PostgreSQL ODBC Driver Setup requires a database password; that means it is not possible to connect to H2 databases without password. This is a limitation of the ODBC driver. -advanced_1206_h3=Security Considerations -advanced_1207_p=\ Currently, the PG Server does not support challenge response or encrypt passwords. This may be a problem if an attacker can listen to the data transferred between the ODBC driver and the server, because the password is readable to the attacker. Also, it is currently not possible to use encrypted SSL connections. Therefore the ODBC driver should not be used where security is important. -advanced_1208_p=\ The first connection that opens a database using the PostgreSQL server needs to be an administrator user. Subsequent connections don't need to be opened by an administrator. -advanced_1209_h3=Using Microsoft Access -advanced_1210_p=\ When using Microsoft Access to edit data in a linked H2 table, you may need to enable the following option\: Tools - Options - Edit/Find - ODBC fields. -advanced_1211_h2=Using H2 in Microsoft .NET -advanced_1212_p=\ The database can be used from Microsoft .NET even without using Java, by using IKVM.NET. You can access a H2 database on .NET using the JDBC API, or using the ADO.NET interface. -advanced_1213_h3=Using the ADO.NET API on .NET -advanced_1214_p=\ An implementation of the ADO.NET interface is available in the open source project H2Sharp. -advanced_1215_h3=Using the JDBC API on .NET -advanced_1216_li=Install the .NET Framework from Microsoft. Mono has not yet been tested. -advanced_1217_li=Install IKVM.NET. -advanced_1218_li=Copy the h2*.jar file to ikvm/bin -advanced_1219_li=Run the H2 Console using\: ikvm -jar h2*.jar -advanced_1220_li=Convert the H2 Console to an .exe file using\: ikvmc -target\:winexe h2*.jar. You may ignore the warnings. -advanced_1221_li=Create a .dll file using (change the version accordingly)\: ikvmc.exe -target\:library -version\:1.0.69.0 h2*.jar -advanced_1222_p=\ If you want your C\# application use H2, you need to add the h2.dll and the IKVM.OpenJDK.ClassLibrary.dll to your C\# solution. Here some sample code\: -advanced_1223_h2=ACID -advanced_1224_p=\ In the database world, ACID stands for\: -advanced_1225_li=Atomicity\: transactions must be atomic, meaning either all tasks are performed or none. -advanced_1226_li=Consistency\: all operations must comply with the defined constraints. -advanced_1227_li=Isolation\: transactions must be isolated from each other. -advanced_1228_li=Durability\: committed transaction will not be lost. -advanced_1229_h3=Atomicity -advanced_1230_p=\ Transactions in this database are always atomic. -advanced_1231_h3=Consistency -advanced_1232_p=\ By default, this database is always in a consistent state. Referential integrity rules are enforced except when explicitly disabled. -advanced_1233_h3=Isolation -advanced_1234_p=\ For H2, as with most other database systems, the default isolation level is 'read committed'. This provides better performance, but also means that transactions are not completely isolated. H2 supports the transaction isolation levels 'serializable', 'read committed', and 'read uncommitted'. -advanced_1235_h3=Durability -advanced_1236_p=\ This database does not guarantee that all committed transactions survive a power failure. Tests show that all databases sometimes lose transactions on power failure (for details, see below). Where losing transactions is not acceptable, a laptop or UPS (uninterruptible power supply) should be used. If durability is required for all possible cases of hardware failure, clustering should be used, such as the H2 clustering mode. -advanced_1237_h2=Durability Problems -advanced_1238_p=\ Complete durability means all committed transaction survive a power failure. Some databases claim they can guarantee durability, but such claims are wrong. A durability test was run against H2, HSQLDB, PostgreSQL, and Derby. All of those databases sometimes lose committed transactions. The test is included in the H2 download, see org.h2.test.poweroff.Test. -advanced_1239_h3=Ways to (Not) Achieve Durability -advanced_1240_p=\ Making sure that committed transactions are not lost is more complicated than it seems first. To guarantee complete durability, a database must ensure that the log record is on the hard drive before the commit call returns. To do that, databases use different methods. One is to use the 'synchronous write' file access mode. In Java, RandomAccessFile supports the modes rws and rwd\: -advanced_1241_code=rwd -advanced_1242_li=\: every update to the file's content is written synchronously to the underlying storage device. -advanced_1243_code=rws -advanced_1244_li=\: in addition to rwd, every update to the metadata is written synchronously. -advanced_1245_p=\ A test (org.h2.test.poweroff.TestWrite) with one of those modes achieves around 50 thousand write operations per second. Even when the operating system write buffer is disabled, the write rate is around 50 thousand operations per second. This feature does not force changes to disk because it does not flush all buffers. The test updates the same byte in the file again and again. If the hard drive was able to write at this rate, then the disk would need to make at least 50 thousand revolutions per second, or 3 million RPM (revolutions per minute). There are no such hard drives. The hard drive used for the test is about 7200 RPM, or about 120 revolutions per second. There is an overhead, so the maximum write rate must be lower than that. -advanced_1246_p=\ Calling fsync flushes the buffers. There are two ways to do that in Java\: -advanced_1247_code=FileDescriptor.sync() -advanced_1248_li=. The documentation says that this forces all system buffers to synchronize with the underlying device. This method is supposed to return after all in-memory modified copies of buffers associated with this file descriptor have been written to the physical medium. -advanced_1249_code=FileChannel.force() -advanced_1250_li=. This method is supposed to force any updates to this channel's file to be written to the storage device that contains it. -advanced_1251_p=\ By default, MySQL calls fsync for each commit. When using one of those methods, only around 60 write operations per second can be achieved, which is consistent with the RPM rate of the hard drive used. Unfortunately, even when calling FileDescriptor.sync() or FileChannel.force(), data is not always persisted to the hard drive, because most hard drives do not obey fsync()\: see Your Hard Drive Lies to You. In Mac OS X, fsync does not flush hard drive buffers. See Bad fsync?. So the situation is confusing, and tests prove there is a problem. -advanced_1252_p=\ Trying to flush hard drive buffers is hard, and if you do the performance is very bad. First you need to make sure that the hard drive actually flushes all buffers. Tests show that this can not be done in a reliable way. Then the maximum number of transactions is around 60 per second. Because of those reasons, the default behavior of H2 is to delay writing committed transactions. -advanced_1253_p=\ In H2, after a power failure, a bit more than one second of committed transactions may be lost. To change the behavior, use SET WRITE_DELAY and CHECKPOINT SYNC. Most other databases support commit delay as well. In the performance comparison, commit delay was used for all databases that support it. -advanced_1254_h3=Running the Durability Test -advanced_1255_p=\ To test the durability / non-durability of this and other databases, you can use the test application in the package org.h2.test.poweroff. Two computers with network connection are required to run this test. One computer just listens, while the test application is run (and power is cut) on the other computer. The computer with the listener application opens a TCP/IP port and listens for an incoming connection. The second computer first connects to the listener, and then created the databases and starts inserting records. The connection is set to 'autocommit', which means after each inserted record a commit is performed automatically. Afterwards, the test computer notifies the listener that this record was inserted successfully. The listener computer displays the last inserted record number every 10 seconds. Now, switch off the power manually, then restart the computer, and run the application again. You will find out that in most cases, none of the databases contains all the records that the listener computer knows about. For details, please consult the source code of the listener and test application. -advanced_1256_h2=Using the Recover Tool -advanced_1257_p=\ The Recover tool can be used to extract the contents of a database file, even if the database is corrupted. It also extracts the content of the transaction log and large objects (CLOB or BLOB). To run the tool, type on the command line\: -advanced_1258_p=\ For each database in the current directory, a text file will be created. This file contains raw insert statements (for the data) and data definition (DDL) statements to recreate the schema of the database. This file can be executed using the RunScript tool or a RUNSCRIPT FROM SQL statement. The script includes at least one CREATE USER statement. If you run the script against a database that was created with the same user, or if there are conflicting users, running the script will fail. Consider running the script against a database that was created with a user name that is not in the script. -advanced_1259_p=\ The Recover tool creates a SQL script from database file. It also processes the transaction log. -advanced_1260_p=\ To verify the database can recover at any time, append ;RECOVER_TEST\=64 to the database URL in your test environment. This will simulate an application crash after each 64 writes to the database file. A log file named databaseName.h2.db.log is created that lists the operations. The recovery is tested using an in-memory file system, that means it may require a larger heap setting. -advanced_1261_h2=File Locking Protocols -advanced_1262_p=\ Multiple concurrent connections to the same database are supported, however a database file can only be open for reading and writing (in embedded mode) by one process at the same time. Otherwise, the processes would overwrite each others data and corrupt the database file. To protect against this problem, whenever a database is opened, a lock file is created to signal other processes that the database is in use. If the database is closed, or if the process that opened the database stops normally, this lock file is deleted. -advanced_1263_p=\ In special cases (if the process did not terminate normally, for example because there was a power failure), the lock file is not deleted by the process that created it. That means the existence of the lock file is not a safe protocol for file locking. However, this software uses a challenge-response protocol to protect the database files. There are two methods (algorithms) implemented to provide both security (that is, the same database files cannot be opened by two processes at the same time) and simplicity (that is, the lock file does not need to be deleted manually by the user). The two methods are 'file method' and 'socket methods'. -advanced_1264_p=\ The file locking protocols (except the file locking method 'FS') have the following limitation\: if a shared file system is used, and the machine with the lock owner is sent to sleep (standby or hibernate), another machine may take over. If the machine that originally held the lock wakes up, the database may become corrupt. If this situation can occur, the application must ensure the database is closed when the application is put to sleep. -advanced_1265_h3=File Locking Method 'File' -advanced_1266_p=\ The default method for database file locking for version 1.3 and older is the 'File Method'. The algorithm is\: -advanced_1267_li=If the lock file does not exist, it is created (using the atomic operation File.createNewFile). Then, the process waits a little bit (20 ms) and checks the file again. If the file was changed during this time, the operation is aborted. This protects against a race condition when one process deletes the lock file just after another one create it, and a third process creates the file again. It does not occur if there are only two writers. -advanced_1268_li=\ If the file can be created, a random number is inserted together with the locking method ('file'). Afterwards, a watchdog thread is started that checks regularly (every second once by default) if the file was deleted or modified by another (challenger) thread / process. Whenever that occurs, the file is overwritten with the old data. The watchdog thread runs with high priority so that a change to the lock file does not get through undetected even if the system is very busy. However, the watchdog thread does use very little resources (CPU time), because it waits most of the time. Also, the watchdog only reads from the hard disk and does not write to it. -advanced_1269_li=\ If the lock file exists and was recently modified, the process waits for some time (up to two seconds). If it was still changed, an exception is thrown (database is locked). This is done to eliminate race conditions with many concurrent writers. Afterwards, the file is overwritten with a new version (challenge). After that, the thread waits for 2 seconds. If there is a watchdog thread protecting the file, he will overwrite the change and this process will fail to lock the database. However, if there is no watchdog thread, the lock file will still be as written by this thread. In this case, the file is deleted and atomically created again. The watchdog thread is started in this case and the file is locked. -advanced_1270_p=\ This algorithm is tested with over 100 concurrent threads. In some cases, when there are many concurrent threads trying to lock the database, they block each other (meaning the file cannot be locked by any of them) for some time. However, the file never gets locked by two threads at the same time. However using that many concurrent threads / processes is not the common use case. Generally, an application should throw an error to the user if it cannot open a database, and not try again in a (fast) loop. -advanced_1271_h3=File Locking Method 'Socket' -advanced_1272_p=\ There is a second locking mechanism implemented, but disabled by default. To use it, append ;FILE_LOCK\=SOCKET to the database URL. The algorithm is\: -advanced_1273_li=If the lock file does not exist, it is created. Then a server socket is opened on a defined port, and kept open. The port and IP address of the process that opened the database is written into the lock file. -advanced_1274_li=If the lock file exists, and the lock method is 'file', then the software switches to the 'file' method. -advanced_1275_li=If the lock file exists, and the lock method is 'socket', then the process checks if the port is in use. If the original process is still running, the port is in use and this process throws an exception (database is in use). If the original process died (for example due to a power failure, or abnormal termination of the virtual machine), then the port was released. The new process deletes the lock file and starts again. -advanced_1276_p=\ This method does not require a watchdog thread actively polling (reading) the same file every second. The problem with this method is, if the file is stored on a network share, two processes (running on different computers) could still open the same database files, if they do not have a direct TCP/IP connection. -advanced_1277_h3=File Locking Method 'FS' -advanced_1278_p=\ This is the default mode for version 1.4 and newer. This database file locking mechanism uses native file system lock on the database file. No *.lock.db file is created in this case, and no background thread is started. This mechanism may not work on all systems as expected. Some systems allow to lock the same file multiple times within the same virtual machine, and on some system native file locking is not supported or files are not unlocked after a power failure. -advanced_1279_p=\ To enable this feature, append ;FILE_LOCK\=FS to the database URL. -advanced_1280_p=\ This feature is relatively new. When using it for production, please ensure your system does in fact lock files as expected. -advanced_1281_h2=Using Passwords -advanced_1282_h3=Using Secure Passwords -advanced_1283_p=\ Remember that weak passwords can be broken regardless of the encryption and security protocols. Don't use passwords that can be found in a dictionary. Appending numbers does not make passwords secure. A way to create good passwords that can be remembered is\: take the first letters of a sentence, use upper and lower case characters, and creatively include special characters (but it's more important to use a long password than to use special characters). Example\: -advanced_1284_code=i'sE2rtPiUKtT -advanced_1285_p=\ from the sentence it's easy to remember this password if you know the trick. -advanced_1286_h3=Passwords\: Using Char Arrays instead of Strings -advanced_1287_p=\ Java strings are immutable objects and cannot be safely 'destroyed' by the application. After creating a string, it will remain in the main memory of the computer at least until it is garbage collected. The garbage collection cannot be controlled by the application, and even if it is garbage collected the data may still remain in memory. It might also be possible that the part of memory containing the password is swapped to disk (if not enough main memory is available), which is a problem if the attacker has access to the swap file of the operating system. -advanced_1288_p=\ It is a good idea to use char arrays instead of strings for passwords. Char arrays can be cleared (filled with zeros) after use, and therefore the password will not be stored in the swap file. -advanced_1289_p=\ This database supports using char arrays instead of string to pass user and file passwords. The following code can be used to do that\: -advanced_1290_p=\ This example requires Java 1.6. When using Swing, use javax.swing.JPasswordField. -advanced_1291_h3=Passing the User Name and/or Password in the URL -advanced_1292_p=\ Instead of passing the user name as a separate parameter as in Connection conn \= DriverManager. getConnection("jdbc\:h2\:~/test", "sa", "123"); the user name (and/or password) can be supplied in the URL itself\: Connection conn \= DriverManager. getConnection("jdbc\:h2\:~/test;USER\=sa;PASSWORD\=123"); The settings in the URL override the settings passed as a separate parameter. -advanced_1293_h2=Password Hash -advanced_1294_p=\ Sometimes the database password needs to be stored in a configuration file (for example in the web.xml file). In addition to connecting with the plain text password, this database supports connecting with the password hash. This means that only the hash of the password (and not the plain text password) needs to be stored in the configuration file. This will only protect others from reading or re-constructing the plain text password (even if they have access to the configuration file); it does not protect others from accessing the database using the password hash. -advanced_1295_p=\ To connect using the password hash instead of plain text password, append ;PASSWORD_HASH\=TRUE to the database URL, and replace the password with the password hash. To calculate the password hash from a plain text password, run the following command within the H2 Console tool\: @password_hash <upperCaseUserName> <password>. As an example, if the user name is sa and the password is test, run the command @password_hash SA test. Then use the resulting password hash as you would use the plain text password. When using an encrypted database, then the user password and file password need to be hashed separately. To calculate the hash of the file password, run\: @password_hash file <filePassword>. -advanced_1296_h2=Protection against SQL Injection -advanced_1297_h3=What is SQL Injection -advanced_1298_p=\ This database engine provides a solution for the security vulnerability known as 'SQL Injection'. Here is a short description of what SQL injection means. Some applications build SQL statements with embedded user input such as\: -advanced_1299_p=\ If this mechanism is used anywhere in the application, and user input is not correctly filtered or encoded, it is possible for a user to inject SQL functionality or statements by using specially built input such as (in this example) this password\: ' OR ''\='. In this case the statement becomes\: -advanced_1300_p=\ Which is always true no matter what the password stored in the database is. For more information about SQL Injection, see Glossary and Links. -advanced_1301_h3=Disabling Literals -advanced_1302_p=\ SQL Injection is not possible if user input is not directly embedded in SQL statements. A simple solution for the problem above is to use a prepared statement\: -advanced_1303_p=\ This database provides a way to enforce usage of parameters when passing user input to the database. This is done by disabling embedded literals in SQL statements. To do this, execute the statement\: -advanced_1304_p=\ Afterwards, SQL statements with text and number literals are not allowed any more. That means, SQL statement of the form WHERE NAME\='abc' or WHERE CustomerId\=10 will fail. It is still possible to use prepared statements and parameters as described above. Also, it is still possible to generate SQL statements dynamically, and use the Statement API, as long as the SQL statements do not include literals. There is also a second mode where number literals are allowed\: SET ALLOW_LITERALS NUMBERS. To allow all literals, execute SET ALLOW_LITERALS ALL (this is the default setting). Literals can only be enabled or disabled by an administrator. -advanced_1305_h3=Using Constants -advanced_1306_p=\ Disabling literals also means disabling hard-coded 'constant' literals. This database supports defining constants using the CREATE CONSTANT command. Constants can be defined only when literals are enabled, but used even when literals are disabled. To avoid name clashes with column names, constants can be defined in other schemas\: -advanced_1307_p=\ Even when literals are enabled, it is better to use constants instead of hard-coded number or text literals in queries or views. With constants, typos are found at compile time, the source code is easier to understand and change. -advanced_1308_h3=Using the ZERO() Function -advanced_1309_p=\ It is not required to create a constant for the number 0 as there is already a built-in function ZERO()\: -advanced_1310_h2=Protection against Remote Access -advanced_1311_p=\ By default this database does not allow connections from other machines when starting the H2 Console, the TCP server, or the PG server. Remote access can be enabled using the command line options -webAllowOthers, -tcpAllowOthers, -pgAllowOthers. -advanced_1312_p=\ If you enable remote access using -tcpAllowOthers or -pgAllowOthers, please also consider using the options -baseDir, -ifExists, so that remote users can not create new databases or access existing databases with weak passwords. When using the option -baseDir, only databases within that directory may be accessed. Ensure the existing accessible databases are protected using strong passwords. -advanced_1313_p=\ If you enable remote access using -webAllowOthers, please ensure the web server can only be accessed from trusted networks. The options -baseDir, -ifExists don't protect access to the tools section, prevent remote shutdown of the web server, changes to the preferences, the saved connection settings, or access to other databases accessible from the system. -advanced_1314_h2=Restricting Class Loading and Usage -advanced_1315_p=\ By default there is no restriction on loading classes and executing Java code for admins. That means an admin may call system functions such as System.setProperty by executing\: -advanced_1316_p=\ To restrict users (including admins) from loading classes and executing code, the list of allowed classes can be set in the system property h2.allowedClasses in the form of a comma separated list of classes or patterns (items ending with *). By default all classes are allowed. Example\: -advanced_1317_p=\ This mechanism is used for all user classes, including database event listeners, trigger classes, user-defined functions, user-defined aggregate functions, and JDBC driver classes (with the exception of the H2 driver) when using the H2 Console. -advanced_1318_h2=Security Protocols -advanced_1319_p=\ The following paragraphs document the security protocols used in this database. These descriptions are very technical and only intended for security experts that already know the underlying security primitives. -advanced_1320_h3=User Password Encryption -advanced_1321_p=\ When a user tries to connect to a database, the combination of user name, @, and password are hashed using SHA-256, and this hash value is transmitted to the database. This step does not protect against an attacker that re-uses the value if he is able to listen to the (unencrypted) transmission between the client and the server. But, the passwords are never transmitted as plain text, even when using an unencrypted connection between client and server. That means if a user reuses the same password for different things, this password is still protected up to some point. See also 'RFC 2617 - HTTP Authentication\: Basic and Digest Access Authentication' for more information. -advanced_1322_p=\ When a new database or user is created, a new random salt value is generated. The size of the salt is 64 bits. Using the random salt reduces the risk of an attacker pre-calculating hash values for many different (commonly used) passwords. -advanced_1323_p=\ The combination of user-password hash value (see above) and salt is hashed using SHA-256. The resulting value is stored in the database. When a user tries to connect to the database, the database combines user-password hash value with the stored salt value and calculates the hash value. Other products use multiple iterations (hash the hash value again and again), but this is not done in this product to reduce the risk of denial of service attacks (where the attacker tries to connect with bogus passwords, and the server spends a lot of time calculating the hash value for each password). The reasoning is\: if the attacker has access to the hashed passwords, he also has access to the data in plain text, and therefore does not need the password any more. If the data is protected by storing it on another computer and only accessible remotely, then the iteration count is not required at all. -advanced_1324_h3=File Encryption -advanced_1325_p=\ The database files can be encrypted using the AES-128 algorithm. -advanced_1326_p=\ When a user tries to connect to an encrypted database, the combination of file@ and the file password is hashed using SHA-256. This hash value is transmitted to the server. -advanced_1327_p=\ When a new database file is created, a new cryptographically secure random salt value is generated. The size of the salt is 64 bits. The combination of the file password hash and the salt value is hashed 1024 times using SHA-256. The reason for the iteration is to make it harder for an attacker to calculate hash values for common passwords. -advanced_1328_p=\ The resulting hash value is used as the key for the block cipher algorithm. Then, an initialization vector (IV) key is calculated by hashing the key again using SHA-256. This is to make sure the IV is unknown to the attacker. The reason for using a secret IV is to protect against watermark attacks. -advanced_1329_p=\ Before saving a block of data (each block is 8 bytes long), the following operations are executed\: first, the IV is calculated by encrypting the block number with the IV key (using the same block cipher algorithm). This IV is combined with the plain text using XOR. The resulting data is encrypted using the AES-128 algorithm. -advanced_1330_p=\ When decrypting, the operation is done in reverse. First, the block is decrypted using the key, and then the IV is calculated combined with the decrypted text using XOR. -advanced_1331_p=\ Therefore, the block cipher mode of operation is CBC (cipher-block chaining), but each chain is only one block long. The advantage over the ECB (electronic codebook) mode is that patterns in the data are not revealed, and the advantage over multi block CBC is that flipped cipher text bits are not propagated to flipped plaintext bits in the next block. -advanced_1332_p=\ Database encryption is meant for securing the database while it is not in use (stolen laptop and so on). It is not meant for cases where the attacker has access to files while the database is in use. When he has write access, he can for example replace pieces of files with pieces of older versions and manipulate data like this. -advanced_1333_p=\ File encryption slows down the performance of the database engine. Compared to unencrypted mode, database operations take about 2.5 times longer using AES (embedded mode). -advanced_1334_h3=Wrong Password / User Name Delay -advanced_1335_p=\ To protect against remote brute force password attacks, the delay after each unsuccessful login gets double as long. Use the system properties h2.delayWrongPasswordMin and h2.delayWrongPasswordMax to change the minimum (the default is 250 milliseconds) or maximum delay (the default is 4000 milliseconds, or 4 seconds). The delay only applies for those using the wrong password. Normally there is no delay for a user that knows the correct password, with one exception\: after using the wrong password, there is a delay of up to (randomly distributed) the same delay as for a wrong password. This is to protect against parallel brute force attacks, so that an attacker needs to wait for the whole delay. Delays are synchronized. This is also required to protect against parallel attacks. -advanced_1336_p=\ There is only one exception message for both wrong user and for wrong password, to make it harder to get the list of user names. It is not possible from the stack trace to see if the user name was wrong or the password. -advanced_1337_h3=HTTPS Connections -advanced_1338_p=\ The web server supports HTTP and HTTPS connections using SSLServerSocket. There is a default self-certified certificate to support an easy starting point, but custom certificates are supported as well. -advanced_1339_h2=TLS Connections -advanced_1340_p=\ Remote TLS connections are supported using the Java Secure Socket Extension (SSLServerSocket, SSLSocket). By default, anonymous TLS is enabled. -advanced_1341_p=\ To use your own keystore, set the system properties javax.net.ssl.keyStore and javax.net.ssl.keyStorePassword before starting the H2 server and client. See also Customizing the Default Key and Trust Stores, Store Types, and Store Passwords for more information. -advanced_1342_p=\ To disable anonymous TLS, set the system property h2.enableAnonymousTLS to false. -advanced_1343_h2=Universally Unique Identifiers (UUID) -advanced_1344_p=\ This database supports UUIDs. Also supported is a function to create new UUIDs using a cryptographically strong pseudo random number generator. With random UUIDs, the chance of two having the same value can be calculated using the probability theory. See also 'Birthday Paradox'. Standardized randomly generated UUIDs have 122 random bits. 4 bits are used for the version (Randomly generated UUID), and 2 bits for the variant (Leach-Salz). This database supports generating such UUIDs using the built-in function RANDOM_UUID() or UUID(). Here is a small program to estimate the probability of having two identical UUIDs after generating a number of values\: -advanced_1345_p=\ Some values are\: -advanced_1346_th=Number of UUIs -advanced_1347_th=Probability of Duplicates -advanced_1348_td=2^36\=68'719'476'736 -advanced_1349_td=0.000'000'000'000'000'4 -advanced_1350_td=2^41\=2'199'023'255'552 -advanced_1351_td=0.000'000'000'000'4 -advanced_1352_td=2^46\=70'368'744'177'664 -advanced_1353_td=0.000'000'000'4 -advanced_1354_p=\ To help non-mathematicians understand what those numbers mean, here a comparison\: one's annual risk of being hit by a meteorite is estimated to be one chance in 17 billion, that means the probability is about 0.000'000'000'06. -advanced_1355_h2=Spatial Features -advanced_1356_p=\ H2 supports the geometry data type and spatial indexes if the JTS Topology Suite is in the classpath. To run the H2 Console tool with the JTS tool, you need to download the JTS-CORE 1.14.0 jar file and place it in the h2 bin directory. Then edit the h2.sh file as follows\: -advanced_1357_p=\ Here is an example SQL script to create a table with a spatial column and index\: -advanced_1358_p=\ To query the table using geometry envelope intersection, use the operation &&, as in PostGIS\: -advanced_1359_p=\ You can verify that the spatial index is used using the "explain plan" feature\: -advanced_1360_p=\ For persistent databases, the spatial index is stored on disk; for in-memory databases, the index is kept in memory. -advanced_1361_h2=Recursive Queries -advanced_1362_p=\ H2 has experimental support for recursive queries using so called "common table expressions" (CTE). Examples\: -advanced_1363_p=\ Limitations\: Recursive queries need to be of the type UNION ALL, and the recursion needs to be on the second part of the query. No tables or views with the name of the table expression may exist. Different table expression names need to be used when using multiple distinct table expressions within the same transaction and for the same session. All columns of the table expression are of type VARCHAR, and may need to be cast to the required data type. Views with recursive queries are not supported. Subqueries and INSERT INTO ... FROM with recursive queries are not supported. Parameters are only supported within the last SELECT statement (a workaround is to use session variables like @start within the table expression). The syntax is\: -advanced_1364_h2=Settings Read from System Properties -advanced_1365_p=\ Some settings of the database can be set on the command line using -DpropertyName\=value. It is usually not required to change those settings manually. The settings are case sensitive. Example\: -advanced_1366_p=\ The current value of the settings can be read in the table INFORMATION_SCHEMA.SETTINGS. -advanced_1367_p=\ For a complete list of settings, see SysProperties. -advanced_1368_h2=Setting the Server Bind Address -advanced_1369_p=\ Usually server sockets accept connections on any/all local addresses. This may be a problem on multi-homed hosts. To bind only to one address, use the system property h2.bindAddress. This setting is used for both regular server sockets and for TLS server sockets. IPv4 and IPv6 address formats are supported. -advanced_1370_h2=Pluggable File System -advanced_1371_p=\ This database supports a pluggable file system API. The file system implementation is selected using a file name prefix. Internally, the interfaces are very similar to the Java 7 NIO2 API, but do not (yet) use or require Java 7. The following file systems are included\: -advanced_1372_code=zip\: -advanced_1373_li=\ read-only zip-file based file system. Format\: zip\:/zipFileName\!/fileName. -advanced_1374_code=split\: -advanced_1375_li=\ file system that splits files in 1 GB files (stackable with other file systems). -advanced_1376_code=nio\: -advanced_1377_li=\ file system that uses FileChannel instead of RandomAccessFile (faster in some operating systems). -advanced_1378_code=nioMapped\: -advanced_1379_li=\ file system that uses memory mapped files (faster in some operating systems). Please note that there currently is a file size limitation of 2 GB when using this file system. To work around this limitation, combine it with the split file system\: split\:nioMapped\:test. -advanced_1380_code=memFS\: -advanced_1381_li=\ in-memory file system (slower than mem; experimental; mainly used for testing the database engine itself). -advanced_1382_code=memLZF\: -advanced_1383_li=\ compressing in-memory file system (slower than memFS but uses less memory; experimental; mainly used for testing the database engine itself). -advanced_1384_code=nioMemFS\: -advanced_1385_li=\ stores data outside of the VM's heap - useful for large memory DBs without incurring GC costs. -advanced_1386_code=nioMemLZF\: -advanced_1387_li=\ stores compressed data outside of the VM's heap - useful for large memory DBs without incurring GC costs. Use "nioMemLZF\:12\:" to tweak the % of blocks that are stored uncompressed. If you size this to your working set correctly, compressed storage is roughly the same performance as uncompressed. The default value is 1%. -advanced_1388_p=\ As an example, to use the the nio file system, use the following database URL\: jdbc\:h2\:nio\:~/test. -advanced_1389_p=\ To register a new file system, extend the classes org.h2.store.fs.FilePath, FileBase, and call the method FilePath.register before using it. -advanced_1390_p=\ For input streams (but not for random access files), URLs may be used in addition to the registered file systems. Example\: jar\:file\:///c\:/temp/example.zip\!/org/example/nested.csv. To read a stream from the classpath, use the prefix classpath\:, as in classpath\:/org/h2/samples/newsfeed.sql. -advanced_1391_h2=Split File System -advanced_1392_p=\ The file system prefix split\: is used to split logical files into multiple physical files, for example so that a database can get larger than the maximum file system size of the operating system. If the logical file is larger than the maximum file size, then the file is split as follows\: -advanced_1393_code=<fileName> -advanced_1394_li=\ (first block, is always created) -advanced_1395_code=<fileName>.1.part -advanced_1396_li=\ (second block) -advanced_1397_p=\ More physical files (*.2.part, *.3.part) are automatically created / deleted if needed. The maximum physical file size of a block is 2^30 bytes, which is also called 1 GiB or 1 GB. However this can be changed if required, by specifying the block size in the file name. The file name format is\: split\:<x>\:<fileName> where the file size per block is 2^x. For 1 MiB block sizes, use x \= 20 (because 2^20 is 1 MiB). The following file name means the logical file is split into 1 MiB blocks\: split\:20\:test.h2.db. An example database URL for this case is jdbc\:h2\:split\:20\:~/test. -advanced_1398_h2=Database Upgrade -advanced_1399_p=\ In version 1.2, H2 introduced a new file store implementation which is incompatible to the one used in versions < 1.2. To automatically convert databases to the new file store, it is necessary to include an additional jar file. The file can be found at http\://h2database.com/h2mig_pagestore_addon.jar . If this file is in the classpath, every connect to an older database will result in a conversion process. -advanced_1400_p=\ The conversion itself is done internally via 'script to' and 'runscript from'. After the conversion process, the files will be renamed from -advanced_1401_code=dbName.data.db -advanced_1402_li=\ to dbName.data.db.backup -advanced_1403_code=dbName.index.db -advanced_1404_li=\ to dbName.index.db.backup -advanced_1405_p=\ by default. Also, the temporary script will be written to the database directory instead of a temporary directory. Both defaults can be customized via -advanced_1406_code=org.h2.upgrade.DbUpgrade.setDeleteOldDb(boolean) -advanced_1407_code=org.h2.upgrade.DbUpgrade.setScriptInTmpDir(boolean) -advanced_1408_p=\ prior opening a database connection. -advanced_1409_p=\ Since version 1.2.140 it is possible to let the old h2 classes (v 1.2.128) connect to the database. The automatic upgrade .jar file must be present, and the URL must start with jdbc\:h2v1_1\: (the JDBC driver class is org.h2.upgrade.v1_1.Driver). If the database should automatically connect using the old version if a database with the old format exists (without upgrade), and use the new version otherwise, then append ;NO_UPGRADE\=TRUE to the database URL. Please note the old driver did not process the system property "h2.baseDir" correctly, so that using this setting is not supported when upgrading. -advanced_1410_h2=Java Objects Serialization -advanced_1411_p=\ Java objects serialization is enabled by default for columns of type OTHER, using standard Java serialization/deserialization semantics. -advanced_1412_p=\ To disable this feature set the system property h2.serializeJavaObject\=false (default\: true). -advanced_1413_p=\ Serialization and deserialization of java objects is customizable both at system level and at database level providing a JavaObjectSerializer implementation\: -advanced_1414_li=\ At system level set the system property h2.javaObjectSerializer with the Fully Qualified Name of the JavaObjectSerializer interface implementation. It will be used over the entire JVM session to (de)serialize java objects being stored in column of type OTHER. Example h2.javaObjectSerializer\=com.acme.SerializerClassName. -advanced_1415_li=\ At database level execute the SQL statement SET JAVA_OBJECT_SERIALIZER 'com.acme.SerializerClassName' or append ;JAVA_OBJECT_SERIALIZER\='com.acme.SerializerClassName' to the database URL\: jdbc\:h2\:~/test;JAVA_OBJECT_SERIALIZER\='com.acme.SerializerClassName'. -advanced_1416_p=\ Please note that this SQL statement can only be executed before any tables are defined. -advanced_1417_h2=Custom Data Types Handler API -advanced_1418_p=\ It is possible to extend the type system of the database by providing your own implementation of minimal required API basically consisting of type identification and conversion routines. -advanced_1419_p=\ In order to enable this feature, set the system property h2.customDataTypesHandler (default\: null) to the fully qualified name of the class providing CustomDataTypesHandler interface implementation. -advanced_1420_p=\ The instance of that class will be created by H2 and used to\: -advanced_1421_li=resolve the names and identifiers of extrinsic data types. -advanced_1422_li=convert values of extrinsic data types to and from values of built-in types. -advanced_1423_li=provide order of the data types. -advanced_1424_p=This is a system-level setting, i.e. affects all the databases. -advanced_1425_b=Note\: -advanced_1426_p=Please keep in mind that this feature may not possibly provide the same ABI stability level as other features as it exposes many of the H2 internals. You may be required to update your code occasionally due to internal changes in H2 if you are going to use this feature. -advanced_1427_h2=Limits and Limitations -advanced_1428_p=\ This database has the following known limitations\: -advanced_1429_li=Database file size limit\: 4 TB (using the default page size of 2 KB) or higher (when using a larger page size). This limit is including CLOB and BLOB data. -advanced_1430_li=The maximum file size for FAT or FAT32 file systems is 4 GB. That means when using FAT or FAT32, the limit is 4 GB for the data. This is the limitation of the file system. The database does provide a workaround for this problem, it is to use the file name prefix split\:. In that case files are split into files of 1 GB by default. An example database URL is\: jdbc\:h2\:split\:~/test. -advanced_1431_li=The maximum number of rows per table is 2^64. -advanced_1432_li=The maximum number of open transactions is 65535. -advanced_1433_li=Main memory requirements\: The larger the database, the more main memory is required. With the current storage mechanism (the page store), the minimum main memory required is around 1 MB for each 8 GB database file size. -advanced_1434_li=Limit on the complexity of SQL statements. Statements of the following form will result in a stack overflow exception\: -advanced_1435_li=There is no limit for the following entities, except the memory and storage capacity\: maximum identifier length (table name, column name, and so on); maximum number of tables, columns, indexes, triggers, and other database objects; maximum statement length, number of parameters per statement, tables per statement, expressions in order by, group by, having, and so on; maximum rows per query; maximum columns per table, columns per index, indexes per table, lob columns per table, and so on; maximum row length, index row length, select row length; maximum length of a varchar column, decimal column, literal in a statement. -advanced_1436_li=Querying from the metadata tables is slow if there are many tables (thousands). -advanced_1437_li=For limitations on data types, see the documentation of the respective Java data type or the data type documentation of this database. -advanced_1438_h2=Glossary and Links -advanced_1439_th=Term -advanced_1440_th=Description -advanced_1441_td=AES-128 -advanced_1442_td=A block encryption algorithm. See also\: Wikipedia\: AES -advanced_1443_td=Birthday Paradox -advanced_1444_td=Describes the higher than expected probability that two persons in a room have the same birthday. Also valid for randomly generated UUIDs. See also\: Wikipedia\: Birthday Paradox -advanced_1445_td=Digest -advanced_1446_td=Protocol to protect a password (but not to protect data). See also\: RFC 2617\: HTTP Digest Access Authentication -advanced_1447_td=GCJ -advanced_1448_td=Compiler for Java. GNU Compiler for the Java and NativeJ (commercial) -advanced_1449_td=HTTPS -advanced_1450_td=A protocol to provide security to HTTP connections. See also\: RFC 2818\: HTTP Over TLS -advanced_1451_td=Modes of Operation -advanced_1452_a=Wikipedia\: Block cipher modes of operation -advanced_1453_td=Salt -advanced_1454_td=Random number to increase the security of passwords. See also\: Wikipedia\: Key derivation function -advanced_1455_td=SHA-256 -advanced_1456_td=A cryptographic one-way hash function. See also\: Wikipedia\: SHA hash functions -advanced_1457_td=SQL Injection -advanced_1458_td=A security vulnerability where an application embeds SQL statements or expressions in user input. See also\: Wikipedia\: SQL Injection -advanced_1459_td=Watermark Attack -advanced_1460_td=Security problem of certain encryption programs where the existence of certain data can be proven without decrypting. For more information, search in the internet for 'watermark attack cryptoloop' -advanced_1461_td=SSL/TLS -advanced_1462_td=Secure Sockets Layer / Transport Layer Security. See also\: Java Secure Socket Extension (JSSE) -architecture_1000_h1=Architecture -architecture_1001_a=\ Introduction -architecture_1002_a=\ Top-down overview -architecture_1003_a=\ JDBC driver -architecture_1004_a=\ Connection/session management -architecture_1005_a=\ Command execution and planning -architecture_1006_a=\ Table/index/constraints -architecture_1007_a=\ Undo log, redo log, and transactions layer -architecture_1008_a=\ B-tree engine and page-based storage allocation -architecture_1009_a=\ Filesystem abstraction -architecture_1010_h2=Introduction -architecture_1011_p=\ H2 implements an embedded and standalone ANSI-SQL89 compliant SQL engine on top of a B-tree based disk store. -architecture_1012_p=\ As of October 2013, Thomas is still working on our next-generation storage engine called MVStore. This will in time replace the B-tree based storage engine. -architecture_1013_h2=Top-down Overview -architecture_1014_p=\ Working from the top down, the layers look like this\: -architecture_1015_li=JDBC driver. -architecture_1016_li=Connection/session management. -architecture_1017_li=SQL Parser. -architecture_1018_li=Command execution and planning. -architecture_1019_li=Table/Index/Constraints. -architecture_1020_li=Undo log, redo log, and transactions layer. -architecture_1021_li=B-tree engine and page-based storage allocation. -architecture_1022_li=Filesystem abstraction. -architecture_1023_h2=JDBC Driver -architecture_1024_p=\ The JDBC driver implementation lives in org.h2.jdbc, org.h2.jdbcx -architecture_1025_h2=Connection/session management -architecture_1026_p=\ The primary classes of interest are\: -architecture_1027_th=Package -architecture_1028_th=Description -architecture_1029_td=org.h2.engine.Database -architecture_1030_td=the root/global class -architecture_1031_td=org.h2.engine.SessionInterface -architecture_1032_td=abstracts over the differences between embedded and remote sessions -architecture_1033_td=org.h2.engine.Session -architecture_1034_td=local/embedded session -architecture_1035_td=org.h2.engine.SessionRemote -architecture_1036_td=remote session -architecture_1037_h2=Parser -architecture_1038_p=\ The parser lives in org.h2.command.Parser. It uses a straightforward recursive-descent design. -architecture_1039_p=\ See Wikipedia Recursive-descent parser page. -architecture_1040_h2=Command execution and planning -architecture_1041_p=\ Unlike other databases, we do not have an intermediate step where we generate some kind of IR (intermediate representation) of the query. The parser class directly generates a command execution object. Then we run some optimisation steps over the command to possibly generate a more efficient command. The primary packages of interest are\: -architecture_1042_th=Package -architecture_1043_th=Description -architecture_1044_td=org.h2.command.ddl -architecture_1045_td=Commands that modify schema data structures -architecture_1046_td=org.h2.command.dml -architecture_1047_td=Commands that modify data -architecture_1048_h2=Table/Index/Constraints -architecture_1049_p=\ One thing to note here is that indexes are simply stored as special kinds of tables. -architecture_1050_p=\ The primary packages of interest are\: -architecture_1051_th=Package -architecture_1052_th=Description -architecture_1053_td=org.h2.table -architecture_1054_td=Implementations of different kinds of tables -architecture_1055_td=org.h2.index -architecture_1056_td=Implementations of different kinds of indices -architecture_1057_h2=Undo log, redo log, and transactions layer -architecture_1058_p=\ We have a transaction log, which is shared among all sessions. See also http\://en.wikipedia.org/wiki/Transaction_log http\://h2database.com/html/grammar.html\#set_log -architecture_1059_p=\ We also have an undo log, which is per session, to undo an operation (an update that fails for example) and to rollback a transaction. Theoretically, the transaction log could be used, but for simplicity, H2 currently uses it's own "list of operations" (usually in-memory). -architecture_1060_p=\ With the MVStore, this is no longer needed (just the transaction log). -architecture_1061_h2=B-tree engine and page-based storage allocation. -architecture_1062_p=\ The primary package of interest is org.h2.store. -architecture_1063_p=\ This implements a storage mechanism which allocates pages of storage (typically 2k in size) and also implements a b-tree over those pages to allow fast retrieval and update. -architecture_1064_h2=Filesystem abstraction. -architecture_1065_p=\ The primary class of interest is org.h2.store.FileStore. -architecture_1066_p=\ This implements an abstraction of a random-access file. This allows the higher layers to treat in-memory vs. on-disk vs. zip-file databases the same. -build_1000_h1=Build -build_1001_a=\ Portability -build_1002_a=\ Environment -build_1003_a=\ Building the Software -build_1004_a=\ Build Targets -build_1005_a=\ Using Maven 2 -build_1006_a=\ Using Eclipse -build_1007_a=\ Translating -build_1008_a=\ Submitting Source Code Changes -build_1009_a=\ Reporting Problems or Requests -build_1010_a=\ Automated Build -build_1011_a=\ Generating Railroad Diagrams -build_1012_h2=Portability -build_1013_p=\ This database is written in Java and therefore works on many platforms. It can also be compiled to a native executable using GCJ. -build_1014_h2=Environment -build_1015_p=\ To run this database, a Java Runtime Environment (JRE) version 1.7 or higher is required. -build_1016_p=\ To create the database executables, the following software stack was used. To use this database, it is not required to install this software however. -build_1017_li=Mac OS X and Windows -build_1018_a=Oracle JDK Version 1.7 -build_1019_a=Eclipse -build_1020_li=Eclipse Plugins\: Subclipse, Eclipse Checkstyle Plug-in, EclEmma Java Code Coverage -build_1021_a=Emma Java Code Coverage -build_1022_a=Mozilla Firefox -build_1023_a=OpenOffice -build_1024_a=NSIS -build_1025_li=\ (Nullsoft Scriptable Install System) -build_1026_a=Maven -build_1027_h2=Building the Software -build_1028_p=\ You need to install a JDK, for example the Oracle JDK version 1.7 or 1.8. Ensure that Java binary directory is included in the PATH environment variable, and that the environment variable JAVA_HOME points to your Java installation. On the command line, go to the directory h2 and execute the following command\: -build_1029_p=\ For Linux and OS X, use ./build.sh instead of build. -build_1030_p=\ You will get a list of targets. If you want to build the jar file, execute (Windows)\: -build_1031_p=\ To run the build tool in shell mode, use the command line option - as in ./build.sh -. -build_1032_h3=Switching the Source Code -build_1033_p=\ The source code uses Java 1.7 features. To switch the source code to the installed version of Java, run\: -build_1034_h2=Build Targets -build_1035_p=\ The build system can generate smaller jar files as well. The following targets are currently supported\: -build_1036_code=jarClient -build_1037_li=\ creates the file h2client.jar. This only contains the JDBC client. -build_1038_code=jarSmall -build_1039_li=\ creates the file h2small.jar. This only contains the embedded database. Debug information is disabled. -build_1040_code=jarJaqu -build_1041_li=\ creates the file h2jaqu.jar. This only contains the JaQu (Java Query) implementation. All other jar files do not include JaQu. -build_1042_code=javadocImpl -build_1043_li=\ creates the Javadocs of the implementation. -build_1044_p=\ To create the file h2client.jar, go to the directory h2 and execute the following command\: -build_1045_h3=Using Apache Lucene -build_1046_p=\ Apache Lucene 3.6.2 is used for testing. Newer versions may work, however they are not tested. -build_1047_h2=Using Maven 2 -build_1048_h3=Using a Central Repository -build_1049_p=\ You can include the database in your Maven 2 project as a dependency. Example\: -build_1050_p=\ New versions of this database are first uploaded to http\://hsql.sourceforge.net/m2-repo/ and then automatically synchronized with the main Maven repository; however after a new release it may take a few hours before they are available there. -build_1051_h3=Maven Plugin to Start and Stop the TCP Server -build_1052_p=\ A Maven plugin to start and stop the H2 TCP server is available from Laird Nelson at GitHub. To start the H2 server, use\: -build_1053_p=\ To stop the H2 server, use\: -build_1054_h3=Using Snapshot Version -build_1055_p=\ To build a h2-*-SNAPSHOT.jar file and upload it the to the local Maven 2 repository, execute the following command\: -build_1056_p=\ Afterwards, you can include the database in your Maven 2 project as a dependency\: -build_1057_h2=Using Eclipse -build_1058_p=\ To create an Eclipse project for H2, use the following steps\: -build_1059_li=Install Git and Eclipse. -build_1060_li=Get the H2 source code from Github\: -build_1061_code=git clone https\://github.com/h2database/h2database -build_1062_li=Download all dependencies\: -build_1063_code=build.bat download -build_1064_li=(Windows) -build_1065_code=./build.sh download -build_1066_li=(otherwise) -build_1067_li=In Eclipse, create a new Java project from existing source code\: File, New, Project, Java Project, Create project from existing source. -build_1068_li=Select the h2 folder, click Next and Finish. -build_1069_li=To resolve com.sun.javadoc import statements, you may need to manually add the file <java.home>/../lib/tools.jar to the build path. -build_1070_h2=Translating -build_1071_p=\ The translation of this software is split into the following parts\: -build_1072_li=H2 Console\: src/main/org/h2/server/web/res/_text_*.prop -build_1073_li=Error messages\: src/main/org/h2/res/_messages_*.prop -build_1074_p=\ To translate the H2 Console, start it and select Preferences / Translate. After you are done, send the translated *.prop file to the Google Group. The web site is currently translated using Google. -build_1075_h2=Submitting Source Code Changes -build_1076_p=\ If you'd like to contribute bug fixes or new features, please consider the following guidelines to simplify merging them\: -build_1077_li=Only use Java 7 features (do not use Java 8/9/etc) (see Environment). -build_1078_li=Follow the coding style used in the project, and use Checkstyle (see above) to verify. For example, do not use tabs (use spaces instead). The checkstyle configuration is in src/installer/checkstyle.xml. -build_1079_li=A template of the Eclipse settings are in src/installer/eclipse.settings/*. If you want to use them, you need to copy them to the .settings directory. The formatting options (eclipseCodeStyle) are also included. -build_1080_li=Please provide test cases and integrate them into the test suite. For Java level tests, see src/test/org/h2/test/TestAll.java. For SQL level tests, see src/test/org/h2/test/test.in.txt or testSimple.in.txt. -build_1081_li=The test cases should cover at least 90% of the changed and new code; use a code coverage tool to verify that (see above). or use the build target coverage. -build_1082_li=Verify that you did not break other features\: run the test cases by executing build test. -build_1083_li=Provide end user documentation if required (src/docsrc/html/*). -build_1084_li=Document grammar changes in src/docsrc/help/help.csv -build_1085_li=Provide a change log entry (src/docsrc/html/changelog.html). -build_1086_li=Verify the spelling using build spellcheck. If required add the new words to src/tools/org/h2/build/doc/dictionary.txt. -build_1087_li=Run src/installer/buildRelease to find and fix formatting errors. -build_1088_li=Verify the formatting using build docs and build javadoc. -build_1089_li=Submit changes using GitHub's "pull requests". You'll require a free GitHub account. If you are not familiar with pull requests, please read GitHub's Using pull requests page. -build_1090_p=\ For legal reasons, patches need to be public in the form of an issue report or attachment or in the form of an email to the group. Significant contributions need to include the following statement\: -build_1091_p=\ "I wrote the code, it's mine, and I'm contributing it to H2 for distribution multiple-licensed under the MPL 2.0, and the EPL 1.0 (http\://h2database.com/html/license.html)." -build_1092_h2=Reporting Problems or Requests -build_1093_p=\ Please consider the following checklist if you have a question, want to report a problem, or if you have a feature request\: -build_1094_li=For bug reports, please provide a short, self contained, correct (compilable), example of the problem. -build_1095_li=Feature requests are always welcome, even if the feature is already on the roadmap. Your mail will help prioritize feature requests. If you urgently need a feature, consider providing a patch. -build_1096_li=Before posting problems, check the FAQ and do a Google search. -build_1097_li=When got an unexpected exception, please try the Error Analyzer tool. If this doesn't help, please report the problem, including the complete error message and stack trace, and the root cause stack trace(s). -build_1098_li=When sending source code, please use a public web clipboard such as Pastebin, Cl1p, or Mystic Paste to avoid formatting problems. Please keep test cases as simple and short as possible, but so that the problem can still be reproduced. As a template, use\: HelloWorld.java. Method that simply call other methods should be avoided, as well as unnecessary exception handling. Please use the JDBC API and no external tools or libraries. The test should include all required initialization code, and should be started with the main method. -build_1099_li=For large attachments, use a public temporary storage such as Rapidshare. -build_1100_li=Google Group versus issue tracking\: Use the Google Group for questions or if you are not sure it's a bug. If you are sure it's a bug, you can create an issue, but you don't need to (sending an email to the group is enough). Please note that only few people monitor the issue tracking system. -build_1101_li=For out-of-memory problems, please analyze the problem yourself first, for example using the command line option -XX\:+HeapDumpOnOutOfMemoryError (to create a heap dump file on out of memory) and a memory analysis tool such as the Eclipse Memory Analyzer (MAT). -build_1102_li=It may take a few days to get an answers. Please do not double post. -build_1103_h2=Automated Build -build_1104_p=\ This build process is automated and runs regularly. The build process includes running the tests and code coverage, using the command line ./build.sh clean jar coverage -Dh2.ftpPassword\=... uploadBuild. The last results are available here\: -build_1105_a=Test Output -build_1106_a=Code Coverage Summary -build_1107_a=Code Coverage Details (download, 1.3 MB) -build_1108_a=Build Newsfeed -build_1109_h2=Generating Railroad Diagrams -build_1110_p=\ The railroad diagrams of the SQL grammar are HTML, formatted as nested tables. The diagrams are generated as follows\: -build_1111_li=The BNF parser (org.h2.bnf.Bnf) reads and parses the BNF from the file help.csv. -build_1112_li=The page parser (org.h2.server.web.PageParser) reads the template HTML file and fills in the diagrams. -build_1113_li=The rail images (one straight, four junctions, two turns) are generated using a simple Java application. -build_1114_p=\ To generate railroad diagrams for other grammars, see the package org.h2.jcr. This package is used to generate the SQL-2 railroad diagrams for the JCR 2.0 specification. -changelog_1000_h1=Change Log -changelog_1001_h2=Next Version (unreleased) -changelog_1002_li=Issue \#654\: List ENUM type values in INFORMATION_SCHEMA.COLUMNS -changelog_1003_li=Issue \#668\: Fail of an update command on large table with ENUM column -changelog_1004_li=Issue \#662\: column called CONSTRAINT is not properly escaped when storing to metadata -changelog_1005_li=Issue \#660\: Outdated java version mentioned on http\://h2database.com/html/build.html\#providing_patches -changelog_1006_li=Issue \#643\: H2 doesn't use index when I use IN and EQUAL in one query -changelog_1007_li=Reset transaction start timestamp on ROLLBACK -changelog_1008_li=Issue \#632\: CREATE OR REPLACE VIEW creates incorrect columns names -changelog_1009_li=Issue \#630\: Integer overflow in CacheLRU can cause unrestricted cache growth -changelog_1010_li=Issue \#497\: Fix TO_DATE in cases of 'inline' text. E.g. the "T" and "Z" in to_date('2017-04-21T00\:00\:00Z', 'YYYY-MM-DD"T"HH24\:MI\:SS"Z"') -changelog_1011_li=Fix bug in MySQL/ORACLE-syntax silently corrupting the modified column in cases of setting the 'NULL'- or 'NOT NULL'-constraint. E.g. alter table T modify C NULL; -changelog_1012_li=Issue \#570\: MySQL compatibility for ALTER TABLE .. DROP INDEX -changelog_1013_li=Issue \#537\: Include the COLUMN name in message "Numeric value out of range" -changelog_1014_li=Issue \#600\: ROW_NUMBER() behaviour change in H2 1.4.195 -changelog_1015_li=Fix a bunch of race conditions found by vmlens.com, thank you to vmlens for giving us a license. -changelog_1016_li=PR \#597\: Support more types in getObject -changelog_1017_li=Issue \#591\: Generated SQL from WITH-CTEs does not include a table identifier -changelog_1018_li=PR \#593\: Make it possible to create a cluster without using temporary files. -changelog_1019_li=PR \#592\: "Connection is broken\: "unexpected status 16777216" [90067-192]" message when using older h2 releases as client -changelog_1020_li=Issue \#585\: MySQL mode DELETE statements compatibility -changelog_1021_li=PR \#586\: remove extra tx preparation -changelog_1022_li=PR \#568\: Implement MetaData.getColumns() for synonyms. -changelog_1023_li=Issue \#581\: org.h2.tools.RunScript assumes -script parameter is part of protocol -changelog_1024_li=Fix a deadlock in the TransactionStore -changelog_1025_li=PR \#579\: Disallow BLOB type in PostgreSQL mode -changelog_1026_li=Issue \#576\: Common Table Expression (CTE)\: WITH supports INSERT, UPDATE, MERGE, DELETE, CREATE TABLE ... -changelog_1027_li=Issue \#493\: Query with distinct/limit/offset subquery returns unexpected rows -changelog_1028_li=Issue \#575\: Support for full text search in multithreaded mode -changelog_1029_li=Issue \#569\: ClassCastException when filtering on ENUM value in WHERE clause -changelog_1030_li=Issue \#539\: Allow override of builtin functions/aliases -changelog_1031_li=Issue \#535\: Allow explicit paths on Windows without drive letter -changelog_1032_li=Issue \#549\: Removed UNION ALL requirements for CTE -changelog_1033_li=Issue \#548\: Table synonym support -changelog_1034_li=Issue \#531\: Rollback and delayed meta save. -changelog_1035_li=Issue \#515\: "Unique index or primary key violation" in TestMvccMultiThreaded -changelog_1036_li=Issue \#458\: TIMESTAMPDIFF() test failing. Handling of timestamp literals. -changelog_1037_li=PR \#546\: Fixes the missing file tree.js in the web console -changelog_1038_li=Issue \#543\: Prepare statement with regexp will not refresh parameter after metadata change -changelog_1039_li=PR \#536\: Support TIMESTAMP_WITH_TIMEZONE 2014 JDBC type -changelog_1040_li=Fix bug in parsing ANALYZE TABLE xxx SAMPLE_SIZE yyy -changelog_1041_li=Add padding for CHAR(N) values in PostgreSQL mode -changelog_1042_li=Issue \#89\: Add DB2 timestamp format compatibility -changelog_1043_h2=Version 1.4.196 (2017-06-10) -changelog_1044_li=Issue\#479 Allow non-recursive CTEs (WITH statements), patch from stumc -changelog_1045_li=Fix startup issue when using "CHECK" as a column name. -changelog_1046_li=Issue \#423\: ANALYZE performed multiple times on one table during execution of the same statement. -changelog_1047_li=Issue \#426\: Support ANALYZE TABLE statement -changelog_1048_li=Issue \#438\: Fix slow logging via SLF4J (TRACE_LEVEL_FILE\=4). -changelog_1049_li=Issue \#472\: Support CREATE SEQUENCE ... ORDER as a NOOP for Oracle compatibility -changelog_1050_li=Issue \#479\: Allow non-recursive Common Table Expressions (CTE) -changelog_1051_li=On Mac OS X, with IPv6 and no network connection, the Console tool was not working as expected. -changelog_1052_h2=Version 1.4.195 (2017-04-23) -changelog_1053_li=Lazy query execution support. -changelog_1054_li=Added API for handling custom data types (System property "h2.customDataTypesHandler", API org.h2.api.CustomDataTypesHandler). -changelog_1055_li=Added support for invisible columns. -changelog_1056_li=Added an ENUM data type, with syntax similar to that of MySQL. -changelog_1057_li=MVStore\: for object data types, the cache size memory estimation was sometimes far off in a read-only scenario. This could result in inefficient cache usage. -changelog_1058_h2=Version 1.4.194 (2017-03-10) -changelog_1059_li=Issue \#453\: MVStore setCacheSize() should also limit the cacheChunkRef. -changelog_1060_li=Issue \#448\: Newly added TO_DATE and TO_TIMESTAMP functions have wrong datatype. -changelog_1061_li=The "nioMemLZF" filesystem now supports an extra option "nioMemLZF\:12\:" to tweak the size of the compress later cache. -changelog_1062_li=Various multi-threading fixes and optimisations to the "nioMemLZF" filesystem. -changelog_1063_strong=[API CHANGE] \#439\: the JDBC type of TIMESTAMP WITH TIME ZONE changed from Types.OTHER (1111) to Types.TIMESTAMP_WITH_TIMEZONE (2014) -changelog_1064_li=\#430\: Subquery not cached if number of rows exceeds MAX_MEMORY_ROWS. -changelog_1065_li=\#411\: "TIMEZONE" should be "TIME ZONE" in type "TIMESTAMP WITH TIMEZONE". -changelog_1066_li=PR \#418, Implement Connection\#createArrayOf and PreparedStatement\#setArray. -changelog_1067_li=PR \#427, Add MySQL compatibility functions UNIX_TIMESTAMP, FROM_UNIXTIME and DATE. -changelog_1068_li=\#429\: Tables not found \: Fix some Turkish locale bugs around uppercasing. -changelog_1069_li=Fixed bug in metadata locking, obscure combination of DDL and SELECT SEQUENCE.NEXTVAL required. -changelog_1070_li=Added index hints\: SELECT * FROM TEST USE INDEX (idx1, idx2). -changelog_1071_li=Add a test case to ensure that spatial index is used with and order by command by Fortin N. -changelog_1072_li=Fix multi-threaded mode update exception "NullPointerException", test case by Anatolii K. -changelog_1073_li=Fix multi-threaded mode insert exception "Unique index or primary key violation", test case by Anatolii K. -changelog_1074_li=Implement ILIKE operator for case-insensitive matching. -changelog_1075_li=Optimise LIKE queries for the common cases of '%Foo' and '%Foo%'. -changelog_1076_li=Issue \#387\: H2 MSSQL Compatibility Mode - Support uniqueidentifier. -changelog_1077_li=Issue \#401\: NPE in "SELECT DISTINCT * ORDER BY". -changelog_1078_li=Added BITGET function. -changelog_1079_li=Fixed bug in FilePathRetryOnInterrupt that caused infinite loop. -changelog_1080_li=PR \#389, Handle LocalTime with nanosecond resolution, patch by katzyn. -changelog_1081_li=PR \#382, Recover for "page store" H2 breaks LOBs consistency, patch by vitalus. -changelog_1082_li=PR \#393, Run tests on Travis, patch by marschall. -changelog_1083_li=Fix bug in REGEX_REPLACE, not parsing the mode parameter. -changelog_1084_li=ResultSet.getObject(..., Class) threw a ClassNotFoundException if the JTS suite was not in the classpath. -changelog_1085_li=File systems\: the "cache\:" file system, and the compressed in-memory file systems memLZF and nioMemLZF did not correctly support concurrent reading and writing. -changelog_1086_li=TIMESTAMP WITH TIMEZONE\: serialization for the PageStore was broken. -changelog_1087_h2=Version 1.4.193 (2016-10-31) -changelog_1088_li=PR \#386\: Add JSR-310 Support (introduces JTS dependency fixed in 1.4.194) -changelog_1089_li=WARNING\: THE MERGE BELOW WILL AFFECT ANY 'TIMESTAMP WITH TIMEZONE' INDEXES. You will need to drop and recreate any such indexes. -changelog_1090_li=PR \#364\: fix compare TIMESTAMP WITH TIMEZONE -changelog_1091_li=Fix bug in picking the right index for INSERT..ON DUPLICATE KEY UPDATE when there are both UNIQUE and PRIMARY KEY constraints. -changelog_1092_li=Issue \#380\: Error Analyzer doesn't show source code -changelog_1093_li=Remove the "TIMESTAMP UTC" datatype, an experiment that was never finished. -changelog_1094_li=PR \#363\: Added support to define last IDENTIFIER on a Trigger. -changelog_1095_li=PR \#366\: Tests for timestamps -changelog_1096_li=PR \#361\: Improve TimestampWithTimeZone javadoc -changelog_1097_li=PR \#360\: Change getters in TimestampWithTimeZone to int -changelog_1098_li=PR \#359\: Added missing source encoding. Assuming UTF-8. -changelog_1099_li=PR \#353\: Add support for converting JAVA_OBJECT to UUID -changelog_1100_li=PR \#358\: Add support for getObject(int|String, Class) -changelog_1101_li=PR \#357\: Server\: use xdg-open to open the WebConsole in the user's preferred browser on Linux -changelog_1102_li=PR \#356\: Support for BEFORE and AFTER clauses when using multiple columns in ALTER TABLE ADD -changelog_1103_li=PR \#351\: Respect format codes from Bind message when sending results -changelog_1104_li=ignore summary line when compiling stored procedure -changelog_1105_li=PR \#348\: pg\: send RowDescription in response to Describe (statement variant), patch by kostya-sh -changelog_1106_li=PR \#337\: Update russian translation, patch by avp1983 -changelog_1107_li=PR \#329\: Update to servlet API version 3.1.0 from 3.0.1, patch by Mat Booth -changelog_1108_li=PR \#331\: ChangeFileEncryption progress logging ignores -quiet flag, patch by Stefan Bodewig -changelog_1109_li=PR \#325\: Make Row an interface -changelog_1110_li=PR \#323\: Regular expression functions (REGEXP_REPLACE, REGEXP_LIKE) enhancement, patch by Akkuzin -changelog_1111_li=Use System.nanoTime for measuring query statistics -changelog_1112_li=Issue \#324\: Deadlock when sending BLOBs over TCP -changelog_1113_li=Fix for creating and accessing views in MULTITHREADED mode, test-case courtesy of Daniel Rosenbaum -changelog_1114_li=Issue \#266\: Spatial index not updating, fixed by merging PR \#267 -changelog_1115_li=PR \#302\: add support for "with"-subqueries into "join" & "sub-query" statements -changelog_1116_li=Issue \#299\: Nested derived tables did not always work as expected. -changelog_1117_li=Use interfaces to replace the java version templating, idea from Lukas Eder. -changelog_1118_li=Issue \#295\: JdbcResultSet.getObject(int, Class) returns null instead of throwing. -changelog_1119_li=Mac OS X\: Console tool process did not stop on exit. -changelog_1120_li=MVStoreTool\: add "repair" feature. -changelog_1121_li=Garbage collection of unused chunks should be faster still. -changelog_1122_li=MVStore / transaction store\: opening a store in read-only mode does no longer loop. -changelog_1123_li=MVStore\: disabled the file system cache by default, because it limits concurrency when using larger databases and many threads. To re-enable, use the file name prefix "cache\:". -changelog_1124_li=MVStore\: add feature to set the cache concurrency. -changelog_1125_li=File system nioMemFS\: support concurrent reads. -changelog_1126_li=File systems\: the compressed in-memory file systems now compress better. -changelog_1127_li=LIRS cache\: improved hit rate because now added entries get hot if they were in the non-resident part of the cache before. -changelog_1128_h2=Version 1.4.192 Beta (2016-05-26) -changelog_1129_li=Java 6 is no longer supported (the jar files are compiled for Java 7). -changelog_1130_li=Garbage collection of unused chunks should now be faster. -changelog_1131_li=Prevent people using unsupported combination of auto-increment columns and clustering mode. -changelog_1132_li=Support for DB2 time format, patch by Niklas Mehner -changelog_1133_li=Added support for Connection.setClientInfo() in compatibility modes for DB2, Postgresql, Oracle and MySQL. -changelog_1134_li=Issue \#249\: Clarify license declaration in Maven POM xml -changelog_1135_li=Fix NullPointerException in querying spatial data through a sub-select. -changelog_1136_li=Fix bug where a lock on the SYS table was not released when closing a session that contained a temp table with an LOB column. -changelog_1137_li=Issue \#255\: ConcurrentModificationException with multiple threads in embedded mode and temporary LOBs -changelog_1138_li=Issue \#235\: Anonymous SSL connections fail in many situations -changelog_1139_li=Fix race condition in FILE_LOCK\=SOCKET, which could result in the watchdog thread not running -changelog_1140_li=Experimental support for datatype TIMESTAMP WITH TIMEZONE -changelog_1141_li=Add support for ALTER TABLE ... RENAME CONSTRAINT .. TO ... -changelog_1142_li=Add support for PostgreSQL ALTER TABLE ... RENAME COLUMN .. TO ... -changelog_1143_li=Add support for ALTER SCHEMA [ IF EXISTS ] -changelog_1144_li=Add support for ALTER TABLE [ IF EXISTS ] -changelog_1145_li=Add support for ALTER VIEW [ IF EXISTS ] -changelog_1146_li=Add support for ALTER INDEX [ IF EXISTS ] -changelog_1147_li=Add support for ALTER SEQUENCE [ IF EXISTS ] -changelog_1148_li=Improve performance of cleaning up temp tables - patch from Eric Faulhaber. -changelog_1149_li=Fix bug where table locks were not dropped when the connection closed -changelog_1150_li=Fix extra CPU usage caused by query planner enhancement in 1.4.191 -changelog_1151_li=improve performance of queries that use LIKE 'foo%' - 10x in the case of one of my queries -changelog_1152_li=The function IFNULL did not always return the result in the right data type. -changelog_1153_li=Issue \#231\: Possible infinite loop when initializing the ObjectDataType class when concurrently writing into MVStore. -changelog_1154_h2=Version 1.4.191 Beta (2016-01-21) -changelog_1155_li=TO_DATE and TO_TIMESTAMP functions. Thanks a lot to Sam Blume for the patch\! -changelog_1156_li=Issue \#229\: DATEDIFF does not work for 'WEEK'. -changelog_1157_li=Issue \#156\: Add support for getGeneratedKeys() when executing commands via PreparedStatement\#executeBatch. -changelog_1158_li=Issue \#195\: The new Maven uses a .cmd file instead of a .bat file. -changelog_1159_li=Issue \#212\: EXPLAIN PLAN for UPDATE statement did not display LIMIT expression. -changelog_1160_li=Support OFFSET without LIMIT in SELECT. -changelog_1161_li=Improve error message for METHOD_NOT_FOUND_1/90087. -changelog_1162_li=CLOB and BLOB objects of removed rows were sometimes kept in the database file. -changelog_1163_li=Server mode\: executing "shutdown" left a thread on the server. -changelog_1164_li=The condition "in(select...)" did not work correctly in some cases if the subquery had an "order by". -changelog_1165_li=Issue \#184\: The Platform-independent zip had Windows line endings in Linux scripts. -changelog_1166_li=Issue \#186\: The "script" command did not include sequences of temporary tables. -changelog_1167_li=Issue \#115\: to_char fails with pattern FM0D099. -changelog_1168_h2=Version 1.4.190 Beta (2015-10-11) -changelog_1169_li=Pull request \#183\: optimizer hints (so far without special SQL syntax). -changelog_1170_li=Issue \#180\: In MVCC mode, executing UPDATE and SELECT ... FOR UPDATE simultaneously silently can drop rows. -changelog_1171_li=PageStore storage\: the cooperative file locking mechanism did not always work as expected (with very slow computers). -changelog_1172_li=Temporary CLOB and BLOB objects are now removed while the database is open (and not just when closing the database). -changelog_1173_li=MVStore CLOB and BLOB larger than about 25 MB\: An exception could be thrown when using the MVStore storage. -changelog_1174_li=Add FILE_WRITE function. Patch provided by Nicolas Fortin (Lab-STICC - CNRS UMR 6285 and Ecole Centrale de Nantes) -changelog_1175_h2=Version 1.4.189 Beta (2015-09-13) -changelog_1176_li=Add support for dropping multiple columns in ALTER TABLE DROP COLUMN... -changelog_1177_li=Fix bug in XA management when doing rollback after prepare. Patch by Stephane Lacoin. -changelog_1178_li=MVStore CLOB and BLOB\: An exception with the message "Block not found" could be thrown when using the MVStore storage, when copying LOB objects (for example due to "alter table" on a table with a LOB object), and then re-opening the database. -changelog_1179_li=Fix for issue \#171\: Broken QueryStatisticsData duration data when trace level smaller than TraceSystem.INFO -changelog_1180_li=Pull request \#170\: Added SET QUERY_STATISTICS_MAX_ENTRIES -changelog_1181_li=Pull request \#165\: Fix compatibility postgresql function string_agg -changelog_1182_li=Pull request \#163\: improved performance when not using the default timezone. -changelog_1183_li=Local temporary tables with many rows did not work correctly due to automatic analyze. -changelog_1184_li=Server mode\: concurrently using the same connection could throw an exception "Connection is broken\: unexpected status". -changelog_1185_li=Performance improvement for metadata queries that join against the COLUMNS metadata table. -changelog_1186_li=An ArrayIndexOutOfBoundsException was thrown in some cases when opening an old version 1.3 database, or an 1.4 database with both "mv_store\=false" and the system property "h2.storeLocalTime" set to false. It mainly showed up with an index on a time, date, or timestamp column. The system property "h2.storeLocalTime" is no longer supported (MVStore databases always store local time, and PageStore now databases never do). -changelog_1187_h2=Version 1.4.188 Beta (2015-08-01) -changelog_1188_li=Server mode\: CLOB processing for texts larger than about 1 MB sometimes did not work. -changelog_1189_li=Server mode\: BLOB processing for binaries larger than 2 GB did not work. -changelog_1190_li=Multi-threaded processing\: concurrent deleting the same row could throw the exception "Row not found when trying to delete". -changelog_1191_li=MVStore transactions\: a thread could see a change of a different thread within a different map. Pull request \#153. -changelog_1192_li=H2 Console\: improved IBM DB2 compatibility. -changelog_1193_li=A thread deadlock detector (disabled by default) can help detect and analyze Java level deadlocks. To enable, set the system property "h2.threadDeadlockDetector" to true. -changelog_1194_li=Performance improvement for metadata queries that join against the COLUMNS metadata table. -changelog_1195_li=MVStore\: power failure could corrupt the store, if writes were re-ordered. -changelog_1196_li=For compatibility with other databases, support for (double and float) -0.0 has been removed. 0.0 is used instead. -changelog_1197_li=Fix for \#134, Column name with a \# character. Patch by bradmesserle. -changelog_1198_li=In version 1.4.186, "order by" was broken in some cases due to the change "Make the planner use indexes for sorting when doing a GROUP BY". The change was reverted. -changelog_1199_li=Pull request \#146\: Improved CompareMode. -changelog_1200_li=Fix for \#144, JdbcResultSet.setFetchDirection() throws "Feature not supported". -changelog_1201_li=Fix for issue \#143, deadlock between two sessions hitting the same sequence on a column. -changelog_1202_li=Pull request \#137\: SourceCompiler should not throw a syntax error on javac warning. -changelog_1203_li=MVStore\: out of memory while storing could corrupt the store (theoretically, a rollback would be possible, but this case is not yet implemented). -changelog_1204_li=The compressed in-memory file systems (memLZF\:) could not be used in the MVStore. -changelog_1205_li=The in-memory file systems (memFS\: and memLZF\:) did not support files larger than 2 GB due to an integer overflow. -changelog_1206_li=Pull request \#138\: Added the simple Oracle function\: ORA_HASH (+ tests) \#138 -changelog_1207_li=Timestamps in the trace log follow the format (yyyy-MM-dd HH\:mm\:ss) instead of the old format (MM-dd HH\:mm\:ss). Patch by Richard Bull. -changelog_1208_li=Pull request \#125\: Improved Oracle compatibility with "truncate" with timestamps and dates. -changelog_1209_li=Pull request \#127\: Linked tables now support geometry columns. -changelog_1210_li=ABS(CAST(0.0 AS DOUBLE)) returned -0.0 instead of 0.0. -changelog_1211_li=BNF auto-completion failed with unquoted identifiers. -changelog_1212_li=Oracle compatibility\: empty strings were not converted to NULL when using prepared statements. -changelog_1213_li=PostgreSQL compatibility\: new syntax "create index ... using ...". -changelog_1214_li=There was a bug in DataType.convertToValue when reading a ResultSet from a ResultSet. -changelog_1215_li=Pull request \#116\: Improved concurrency in the trace system. -changelog_1216_li=Issue 609\: the spatial index did not support NULL. -changelog_1217_li=Granting a schema is now supported. -changelog_1218_li=Linked tables did not work when a function-based index is present (Oracle). -changelog_1219_li=Creating a user with a null password, salt, or hash threw a NullPointerException. -changelog_1220_li=Foreign key\: don't add a single column index if column is leading key of existing index. -changelog_1221_li=Pull request \#4\: Creating and removing temporary tables was getting slower and slower over time, because an internal object id was allocated but never de-allocated. -changelog_1222_li=Issue 609\: the spatial index did not support NULL with update and delete operations. -changelog_1223_li=Pull request \#2\: Add external metadata type support (table type "external") -changelog_1224_li=MS SQL Server\: the CONVERT method did not work in views and derived tables. -changelog_1225_li=Java 8 compatibility for "regexp_replace". -changelog_1226_li=When in cluster mode, and one of the nodes goes down, we need to log the problem with priority "error", not "debug" -changelog_1227_h2=Version 1.4.187 Beta (2015-04-10) -changelog_1228_li=MVStore\: concurrent changes to the same row could result in the exception "The transaction log might be corrupt for key ...". This could only be reproduced with 3 or more threads. -changelog_1229_li=Results with CLOB or BLOB data are no longer reused. -changelog_1230_li=References to BLOB and CLOB objects now have a timeout. The configuration setting is LOB_TIMEOUT (default 5 minutes). This should avoid growing the database file if there are many queries that return BLOB or CLOB objects, and the database is not closed for a longer time. -changelog_1231_li=MVStore\: when committing a session that removed LOB values, changes were flushed unnecessarily. -changelog_1232_li=Issue 610\: possible integer overflow in WriteBuffer.grow(). -changelog_1233_li=Issue 609\: the spatial index did not support NULL (ClassCastException). -changelog_1234_li=MVStore\: in some cases, CLOB/BLOB data blocks were removed incorrectly when opening a database. -changelog_1235_li=MVStore\: updates that affected many rows were were slow in some cases if there was a secondary index. -changelog_1236_li=Using "runscript" with autocommit disabled could result in a lock timeout on the internal table "SYS". -changelog_1237_li=Issue 603\: there was a memory leak when using H2 in a web application. Apache Tomcat logged an error message\: "The web application ... created a ThreadLocal with key of type [org.h2.util.DateTimeUtils$1]". -changelog_1238_li=When using the MVStore, running a SQL script generate by the Recover tool from a PageStore file failed with a strange error message (NullPointerException), now a clear error message is shown. -changelog_1239_li=Issue 605\: with version 1.4.186, opening a database could result in an endless loop in LobStorageMap.init. -changelog_1240_li=Queries that use the same table alias multiple times now work. Before, the select expression list was expanded incorrectly. Example\: "select * from a as x, b as x". -changelog_1241_li=The MySQL compatibility feature "insert ... on duplicate key update" did not work with a non-default schema. -changelog_1242_li=Issue 599\: the condition "in(x, y)" could not be used in the select list when using "group by". -changelog_1243_li=The LIRS cache could grow larger than the allocated memory. -changelog_1244_li=A new file system implementation that re-opens the file if it was closed due to the application calling Thread.interrupt(). File name prefix "retry\:". Please note it is strongly recommended to avoid calling Thread.interrupt; this is a problem for various libraries, including Apache Lucene. -changelog_1245_li=MVStore\: use RandomAccessFile file system if the file name starts with "file\:". -changelog_1246_li=Allow DATEADD to take a long value for count when manipulating milliseconds. -changelog_1247_li=When using MV_STORE\=TRUE and the SET CACHE_SIZE setting, the cache size was incorrectly set, so that it was effectively 1024 times smaller than it should be. -changelog_1248_li=Concurrent CREATE TABLE... IF NOT EXISTS in the presence of MULTI_THREAD\=TRUE could throw an exception. -changelog_1249_li=Fix bug in MVStore when creating lots of temporary tables, where we could run out of transaction IDs. -changelog_1250_li=Add support for PostgreSQL STRING_AGG function. Patch by Fred Aquiles. -changelog_1251_li=Fix bug in "jdbc\:h2\:nioMemFS" isRoot() function. Also, the page size was increased to 64 KB. -changelog_1252_h2=Version 1.4.186 Beta (2015-03-02) -changelog_1253_li=The Servlet API 3.0.1 is now used, instead of 2.4. -changelog_1254_li=MVStore\: old chunks no longer removed in append-only mode. -changelog_1255_li=MVStore\: the cache for page references could grow far too big, resulting in out of memory in some cases. -changelog_1256_li=MVStore\: orphaned lob objects were not correctly removed in some cases, making the database grow unnecessarily. -changelog_1257_li=MVStore\: the maximum cache size was artificially limited to 2 GB (due to an integer overflow). -changelog_1258_li=MVStore / TransactionStore\: concurrent updates could result in a "Too many open transactions" exception. -changelog_1259_li=StringUtils.toUpperEnglish now has a small cache. This should speed up reading from a ResultSet when using the column name. -changelog_1260_li=MVStore\: up to 65535 open transactions are now supported. Previously, the limit was at most 65535 transactions between the oldest open and the newest open transaction (which was quite a strange limit). -changelog_1261_li=The default limit for in-place LOB objects was changed from 128 to 256 bytes. This is because each read creates a reference to a LOB, and maintaining the references is a big overhead. With the higher limit, less references are needed. -changelog_1262_li=Tables without columns didn't work. (The use case for such tables is testing.) -changelog_1263_li=The LIRS cache now resizes the table automatically in all cases and no longer needs the averageMemory configuration. -changelog_1264_li=Creating a linked table from an MVStore database to a non-MVStore database created a second (non-MVStore) database file. -changelog_1265_li=In version 1.4.184, a bug was introduced that broke queries that have both joins and wildcards, for example\: select * from dual join(select x from dual) on 1\=1 -changelog_1266_li=Issue 598\: parser fails on timestamp "24\:00\:00.1234" - prevent the creation of out-of-range time values. -changelog_1267_li=Allow declaring triggers as source code (like functions). Patch by Sylvain Cuaz. -changelog_1268_li=Make the planner use indexes for sorting when doing a GROUP BY where all of the GROUP BY columns are not mentioned in the select. Patch by Frederico (zepfred). -changelog_1269_li=PostgreSQL compatibility\: generate_series (as an alias for system_range). Patch by litailang. -changelog_1270_li=Fix missing "column" type in right-hand parameter in ConditionIn. Patch by Arnaud Thimel. -changelog_1271_h2=Version 1.4.185 Beta (2015-01-16) -changelog_1272_li=In version 1.4.184, "group by" ignored the table name, and could pick a select column by mistake. Example\: select 0 as x from system_range(1, 2) d group by d.x; -changelog_1273_li=New connection setting "REUSE_SPACE" (default\: true). If disabled, all changes are appended to the database file, and existing content is never overwritten. This allows to rollback to a previous state of the database by truncating the database file. -changelog_1274_li=Issue 587\: MVStore\: concurrent compaction and store operations could result in an IllegalStateException. -changelog_1275_li=Issue 594\: Profiler.copyInThread does not work properly. -changelog_1276_li=Script tool\: Now, SCRIPT ... TO is always used (for higher speed and lower disk space usage). -changelog_1277_li=Script tool\: Fix parsing of BLOCKSIZE parameter, original patch by Ken Jorissen. -changelog_1278_li=Fix bug in PageStore\#commit method - when the ignoreBigLog flag was set, the logic that cleared the flag could never be reached, resulting in performance degradation. Reported by Alexander Nesterov. -changelog_1279_li=Issue 552\: Implement BIT_AND and BIT_OR aggregate functions. -changelog_1280_h2=Version 1.4.184 Beta (2014-12-19) -changelog_1281_li=In version 1.3.183, indexes were not used if the table contains columns with a default value generated by a sequence. This includes tables with identity and auto-increment columns. This bug was introduced by supporting "rownum" in views and derived tables. -changelog_1282_li=MVStore\: imported BLOB and CLOB data sometimes disappeared. This was caused by a bug in the ObjectDataType comparison. -changelog_1283_li=Reading from a StreamStore now throws an IOException if the underlying data doesn't exist. -changelog_1284_li=MVStore\: if there is an exception while saving, the store is now in all cases immediately closed. -changelog_1285_li=MVStore\: the dump tool could go into an endless loop for some files. -changelog_1286_li=MVStore\: recovery for a database with many CLOB or BLOB entries is now much faster. -changelog_1287_li=Group by with a quoted select column name alias didn't work. Example\: select 1 "a" from dual group by "a" -changelog_1288_li=Auto-server mode\: the host name is now stored in the .lock.db file. -changelog_1289_h2=Version 1.4.183 Beta (2014-12-13) -changelog_1290_li=MVStore\: the default auto-commit buffer size is now about twice as big. This should reduce the database file size after inserting a lot of data. -changelog_1291_li=The built-in functions "power" and "radians" now always return a double. -changelog_1292_li=Using "row_number" or "rownum" in views or derived tables had unexpected results if the outer query contained constraints for the given view. Example\: select b.nr, b.id from (select row_number() over() as nr, a.id as id from (select id from test order by name) as a) as b where b.id \= 1 -changelog_1293_li=MVStore\: the Recover tool can now deal with more types of corruption in the file. -changelog_1294_li=MVStore\: the TransactionStore now first needs to be initialized before it can be used. -changelog_1295_li=Views and derived tables with equality and range conditions on the same columns did not work properly. example\: select x from (select x from (select 1 as x) where x > 0 and x < 2) where x \= 1 -changelog_1296_li=The database URL setting PAGE_SIZE setting is now also used for the MVStore. -changelog_1297_li=MVStore\: the default page split size for persistent stores is now 4096 (it was 16 KB so far). This should reduce the database file size for most situations (in some cases, less than half the size of the previous version). -changelog_1298_li=With query literals disabled, auto-analyze of a table with CLOB or BLOB did not work. -changelog_1299_li=MVStore\: use a mark and sweep GC algorithm instead of reference counting, to ensure used chunks are never overwrite, even if the reference counting algorithm does not work properly. -changelog_1300_li=In the multi-threaded mode, updating the column selectivity ("analyze") in the background sometimes did not work. -changelog_1301_li=In the multi-threaded mode, database metadata operations did sometimes not work if the schema was changed at the same time (for example, if tables were dropped). -changelog_1302_li=Some CLOB and BLOB values could no longer be read when the original row was removed (even when using the MVCC mode). -changelog_1303_li=The MVStoreTool could throw an IllegalArgumentException. -changelog_1304_li=Improved performance for some date / time / timestamp conversion operations. Thanks to Sergey Evdokimov for reporting the problem. -changelog_1305_li=H2 Console\: the built-in web server did not work properly if an unknown file was requested. -changelog_1306_li=MVStore\: the jar file is renamed to "h2-mvstore-*.jar" and is deployed to Maven separately. -changelog_1307_li=MVStore\: support for concurrent reads and writes is now enabled by default. -changelog_1308_li=Server mode\: the transfer buffer size has been changed from 16 KB to 64 KB, after it was found that this improves performance on Linux quite a lot. -changelog_1309_li=H2 Console and server mode\: SSL is now disabled and TLS is used to protect against the Poodle SSLv3 vulnerability. The system property to disable secure anonymous connections is now "h2.enableAnonymousTLS". The default certificate is still self-signed, so you need to manually install another one if you want to avoid man in the middle attacks. -changelog_1310_li=MVStore\: the R-tree did not correctly measure the memory usage. -changelog_1311_li=MVStore\: compacting a store with an R-tree did not always work. -changelog_1312_li=Issue 581\: When running in LOCK_MODE\=0, JdbcDatabaseMetaData\#supportsTransactionIsolationLevel(TRANSACTION_READ_UNCOMMITTED) should return false -changelog_1313_li=Fix bug which could generate deadlocks when multiple connections accessed the same table. -changelog_1314_li=Some places in the code were not respecting the value set in the "SET MAX_MEMORY_ROWS x" command -changelog_1315_li=Fix bug which could generate a NegativeArraySizeException when performing large (>40M) row union operations -changelog_1316_li=Fix "USE schema" command for MySQL compatibility, patch by mfulton -changelog_1317_li=Parse and ignore the ROW_FORMAT\=DYNAMIC MySQL syntax, patch by mfulton -changelog_1318_h2=Version 1.4.182 Beta (2014-10-17) -changelog_1319_li=MVStore\: improved error messages and logging; improved behavior if there is an error when serializing objects. -changelog_1320_li=OSGi\: the MVStore packages are now exported. -changelog_1321_li=With the MVStore option, when using multiple threads that concurrently create indexes or tables, it was relatively easy to get a lock timeout on the "SYS" table. -changelog_1322_li=When using the multi-threaded option, the exception "Unexpected code path" could be thrown, specially if the option "analyze_auto" was set to a low value. -changelog_1323_li=In the server mode, when reading from a CLOB or BLOB, if the connection was closed, a NullPointerException could be thrown instead of an exception saying the connection is closed. -changelog_1324_li=DatabaseMetaData.getProcedures and getProcedureColumns could throw an exception if a user defined class is not available. -changelog_1325_li=Issue 584\: the error message for a wrong sequence definition was wrong. -changelog_1326_li=CSV tool\: the rowSeparator option is no longer supported, as the same can be achieved with the lineSeparator. -changelog_1327_li=Descending indexes on MVStore tables did not work properly. -changelog_1328_li=Issue 579\: Conditions on the "_rowid_" pseudo-column didn't use an index when using the MVStore. -changelog_1329_li=Fixed documentation that "offset" and "fetch" are also keywords since version 1.4.x. -changelog_1330_li=The Long.MIN_VALUE could not be parsed for auto-increment (identity) columns. -changelog_1331_li=Issue 573\: Add implementation for Methods "isWrapperFor()" and "unwrap()" in other JDBC classes. -changelog_1332_li=Issue 572\: MySQL compatibility for "order by" in update statements. -changelog_1333_li=The change in JDBC escape processing in version 1.4.181 affects both the parser (which is running on the server) and the JDBC API (which is running on the client). If you (or a tool you use) use the syntax "{t 'time}", or "{ts 'timestamp'}", or "{d 'data'}", then both the client and the server need to be upgraded to version 1.4.181 or later. -changelog_1334_h2=Version 1.4.181 Beta (2014-08-06) -changelog_1335_li=Improved MySQL compatibility by supporting "use schema". Thanks a lot to Karl Pietrzak for the patch\! -changelog_1336_li=Writing to the trace file is now faster, specially with the debug level. -changelog_1337_li=The database option "defrag_always\=true" did not work with the MVStore. -changelog_1338_li=The JDBC escape syntax {ts 'value'} did not interpret the value as a timestamp. The same for {d 'value'} (for date) and {t 'value'} (for time). Thanks to Lukas Eder for reporting the issue. The following problem was detected after version 1.4.181 was released\: The change in JDBC escape processing affects both the parser (which is running on the server) and the JDBC API (which is running on the client). If you (or a tool you use) use the syntax {t 'time'}, or {ts 'timestamp'}, or {d 'date'}, then both the client and the server need to be upgraded to version 1.4.181 or later. -changelog_1339_li=File system abstraction\: support replacing existing files using move (currently not for Windows). -changelog_1340_li=The statement "shutdown defrag" now compresses the database (with the MVStore). This command can greatly reduce the file size, and is relatively fast, but is not incremental. -changelog_1341_li=The MVStore now automatically compacts the store in the background if there is no read or write activity, which should (after some time; sometimes about one minute) reduce the file size. This is still work in progress, feedback is welcome\! -changelog_1342_li=Change default value of PAGE_SIZE from 2048 to 4096 to more closely match most file systems block size (PageStore only; the MVStore already used 4096). -changelog_1343_li=Auto-scale MAX_MEMORY_ROWS and CACHE_SIZE settings by the amount of available RAM. Gives a better out of box experience for people with more powerful machines. -changelog_1344_li=Handle tabs like 4 spaces in web console, patch by Martin Grajcar. -changelog_1345_li=Issue 573\: Add implementation for Methods "isWrapperFor()" and "unwrap()" in JdbcConnection.java, patch by BigMichi1. -changelog_1346_h2=Version 1.4.180 Beta (2014-07-13) -changelog_1347_li=MVStore\: the store is now auto-compacted automatically up to some point, to avoid very large file sizes. This area is still work in progress. -changelog_1348_li=Sequences of temporary tables (auto-increment or identity columns) were persisted unnecessarily in the database file, and were not removed when re-opening the database. -changelog_1349_li=MVStore\: an IndexOutOfBoundsException could sometimes occur MVMap.openVersion when concurrently accessing the store. -changelog_1350_li=The LIRS cache now re-sizes the internal hash map if needed. -changelog_1351_li=Optionally persist session history in the H2 console. (patch from Martin Grajcar) -changelog_1352_li=Add client-info property to get the number of servers currently in the cluster and which servers that are available. (patch from Nikolaj Fogh) -changelog_1353_li=Fix bug in changing encrypted DB password that kept the file handle open when the wrong password was supplied. (test case from Jens Hohmuth). -changelog_1354_li=Issue 567\: H2 hangs for a long time then (sometimes) recovers. Introduce a queue when doing table locking to prevent session starvation. -cheatSheet_1000_h1=H2 Database Engine Cheat Sheet -cheatSheet_1001_h2=Using H2 -cheatSheet_1002_a=H2 -cheatSheet_1003_li=\ is open source, free to use and distribute. -cheatSheet_1004_a=Download -cheatSheet_1005_li=\: jar, installer (Windows), zip. -cheatSheet_1006_li=To start the H2 Console tool, double click the jar file, or run java -jar h2*.jar, h2.bat, or h2.sh. -cheatSheet_1007_a=A new database is automatically created -cheatSheet_1008_a=by default -cheatSheet_1009_li=. -cheatSheet_1010_a=Closing the last connection closes the database -cheatSheet_1011_li=. -cheatSheet_1012_h2=Documentation -cheatSheet_1013_p=\ Reference\: SQL grammar, functions, data types, tools, API -cheatSheet_1014_a=Features -cheatSheet_1015_p=\: fulltext search, encryption, read-only (zip/jar), CSV, auto-reconnect, triggers, user functions -cheatSheet_1016_a=Database URLs -cheatSheet_1017_a=Embedded -cheatSheet_1018_code=jdbc\:h2\:~/test -cheatSheet_1019_p=\ 'test' in the user home directory -cheatSheet_1020_code=jdbc\:h2\:/data/test -cheatSheet_1021_p=\ 'test' in the directory /data -cheatSheet_1022_code=jdbc\:h2\:test -cheatSheet_1023_p=\ in the current(\!) working directory -cheatSheet_1024_a=In-Memory -cheatSheet_1025_code=jdbc\:h2\:mem\:test -cheatSheet_1026_p=\ multiple connections in one process -cheatSheet_1027_code=jdbc\:h2\:mem\: -cheatSheet_1028_p=\ unnamed private; one connection -cheatSheet_1029_a=Server Mode -cheatSheet_1030_code=jdbc\:h2\:tcp\://localhost/~/test -cheatSheet_1031_p=\ user home dir -cheatSheet_1032_code=jdbc\:h2\:tcp\://localhost//data/test -cheatSheet_1033_p=\ absolute dir -cheatSheet_1034_a=Server start -cheatSheet_1035_p=\:java -cp *.jar org.h2.tools.Server -cheatSheet_1036_a=Settings -cheatSheet_1037_code=jdbc\:h2\:..;MODE\=MySQL -cheatSheet_1038_a=compatibility (or HSQLDB,...) -cheatSheet_1039_code=jdbc\:h2\:..;TRACE_LEVEL_FILE\=3 -cheatSheet_1040_a=log to *.trace.db -cheatSheet_1041_a=Using the JDBC API -cheatSheet_1042_a=Connection Pool -cheatSheet_1043_a=Maven 2 -cheatSheet_1044_a=Hibernate -cheatSheet_1045_p=\ hibernate.cfg.xml (or use the HSQLDialect)\: -cheatSheet_1046_a=TopLink and Glassfish -cheatSheet_1047_p=\ Datasource class\: org.h2.jdbcx.JdbcDataSource -cheatSheet_1048_code=oracle.toplink.essentials.platform. -cheatSheet_1049_code=database.H2Platform -download_1000_h1=Downloads -download_1001_h3=Version 1.4.196 (2017-06-10) -download_1002_a=Windows Installer -download_1003_a=Platform-Independent Zip -download_1004_h3=Version 1.4.195 (2017-04-23), Last Stable -download_1005_a=Windows Installer -download_1006_a=Platform-Independent Zip -download_1007_h3=Old Versions -download_1008_a=Platform-Independent Zip -download_1009_h3=Jar File -download_1010_a=Maven.org -download_1011_a=Sourceforge.net -download_1012_h3=Maven (Binary, Javadoc, and Source) -download_1013_a=Binary -download_1014_a=Javadoc -download_1015_a=Sources -download_1016_h3=Database Upgrade Helper File -download_1017_a=Upgrade database from 1.1 to the current version -download_1018_h3=Git Source Repository -download_1019_a=Github -download_1020_p=\ For details about changes, see the Change Log. -download_1021_h3=News and Project Information -download_1022_a=Atom Feed -download_1023_a=RSS Feed -download_1024_a=DOAP File -download_1025_p=\ (what is this) -faq_1000_h1=Frequently Asked Questions -faq_1001_a=\ I Have a Problem or Feature Request -faq_1002_a=\ Are there Known Bugs? When is the Next Release? -faq_1003_a=\ Is this Database Engine Open Source? -faq_1004_a=\ Is Commercial Support Available? -faq_1005_a=\ How to Create a New Database? -faq_1006_a=\ How to Connect to a Database? -faq_1007_a=\ Where are the Database Files Stored? -faq_1008_a=\ What is the Size Limit (Maximum Size) of a Database? -faq_1009_a=\ Is it Reliable? -faq_1010_a=\ Why is Opening my Database Slow? -faq_1011_a=\ My Query is Slow -faq_1012_a=\ H2 is Very Slow -faq_1013_a=\ Column Names are Incorrect? -faq_1014_a=\ Float is Double? -faq_1015_a=\ Is the GCJ Version Stable? Faster? -faq_1016_a=\ How to Translate this Project? -faq_1017_a=\ How to Contribute to this Project? -faq_1018_h3=I Have a Problem or Feature Request -faq_1019_p=\ Please read the support checklist. -faq_1020_h3=Are there Known Bugs? When is the Next Release? -faq_1021_p=\ Usually, bugs get fixes as they are found. There is a release every few weeks. Here is the list of known and confirmed issues\: -faq_1022_li=When opening a database file in a timezone that has different daylight saving rules\: the time part of dates where the daylight saving doesn't match will differ. This is not a problem within regions that use the same rules (such as, within USA, or within Europe), even if the timezone itself is different. As a workaround, export the database to a SQL script using the old timezone, and create a new database in the new timezone. -faq_1023_li=Apache Harmony\: there seems to be a bug in Harmony that affects H2. See HARMONY-6505. -faq_1024_li=Tomcat and Glassfish 3 set most static fields (final or non-final) to null when unloading a web application. This can cause a NullPointerException in H2 versions 1.1.107 and older, and may still not work in newer versions. Please report it if you run into this issue. In Tomcat >\= 6.0 this behavior can be disabled by setting the system property org.apache.catalina.loader.WebappClassLoader.ENABLE_CLEAR_REFERENCES\=false, however Tomcat may then run out of memory. A known workaround is to put the h2*.jar file in a shared lib directory (common/lib). -faq_1025_li=Some problems have been found with right outer join. Internally, it is converted to left outer join, which does not always produce the same results as other databases when used in combination with other joins. This problem is fixed in H2 version 1.3. -faq_1026_li=When using Install4j before 4.1.4 on Linux and enabling pack200, the h2*.jar becomes corrupted by the install process, causing application failure. A workaround is to add an empty file h2*.jar.nopack next to the h2*.jar file. This problem is solved in Install4j 4.1.4. -faq_1027_p=\ For a complete list, see Open Issues. -faq_1028_h3=Is this Database Engine Open Source? -faq_1029_p=\ Yes. It is free to use and distribute, and the source code is included. See also under license. -faq_1030_h3=Is Commercial Support Available? -faq_1031_p=\ No, currently commercial support is not available. -faq_1032_h3=How to Create a New Database? -faq_1033_p=\ By default, a new database is automatically created if it does not yet exist. See Creating New Databases. -faq_1034_h3=How to Connect to a Database? -faq_1035_p=\ The database driver is org.h2.Driver, and the database URL starts with jdbc\:h2\:. To connect to a database using JDBC, use the following code\: -faq_1036_h3=Where are the Database Files Stored? -faq_1037_p=\ When using database URLs like jdbc\:h2\:~/test, the database is stored in the user directory. For Windows, this is usually C\:\\Documents and Settings\\<userName> or C\:\\Users\\<userName>. If the base directory is not set (as in jdbc\:h2\:./test), the database files are stored in the directory where the application is started (the current working directory). When using the H2 Console application from the start menu, this is <Installation Directory>/bin. The base directory can be set in the database URL. A fixed or relative path can be used. When using the URL jdbc\:h2\:file\:./data/sample, the database is stored in the directory data (relative to the current working directory). The directory is created automatically if it does not yet exist. It is also possible to use the fully qualified directory name (and for Windows, drive name). Example\: jdbc\:h2\:file\:C\:/data/test -faq_1038_h3=What is the Size Limit (Maximum Size) of a Database? -faq_1039_p=\ See Limits and Limitations. -faq_1040_h3=Is it Reliable? -faq_1041_p=\ That is not easy to say. It is still a quite new product. A lot of tests have been written, and the code coverage of these tests is higher than 80% for each package. Randomized stress tests are run regularly. But there are probably still bugs that have not yet been found (as with most software). Some features are known to be dangerous, they are only supported for situations where performance is more important than reliability. Those dangerous features are\: -faq_1042_li=Disabling the transaction log or FileDescriptor.sync() using LOG\=0 or LOG\=1. -faq_1043_li=Using the transaction isolation level READ_UNCOMMITTED (LOCK_MODE 0) while at the same time using multiple connections. -faq_1044_li=Disabling database file protection using (setting FILE_LOCK to NO in the database URL). -faq_1045_li=Disabling referential integrity using SET REFERENTIAL_INTEGRITY FALSE. -faq_1046_p=\ In addition to that, running out of memory should be avoided. In older versions, OutOfMemory errors while using the database could corrupt a databases. -faq_1047_p=\ This database is well tested using automated test cases. The tests run every night and run for more than one hour. But not all areas of this database are equally well tested. When using one of the following features for production, please ensure your use case is well tested (if possible with automated test cases). The areas that are not well tested are\: -faq_1048_li=Platforms other than Windows, Linux, Mac OS X, or JVMs other than Oracle 1.6, 1.7, 1.8. -faq_1049_li=The features AUTO_SERVER and AUTO_RECONNECT. -faq_1050_li=Cluster mode, 2-phase commit, savepoints. -faq_1051_li=Fulltext search. -faq_1052_li=Operations on LOBs over 2 GB. -faq_1053_li=The optimizer may not always select the best plan. -faq_1054_li=Using the ICU4J collator. -faq_1055_p=\ Areas considered experimental are\: -faq_1056_li=The PostgreSQL server -faq_1057_li=Clustering (there are cases were transaction isolation can be broken due to timing issues, for example one session overtaking another session). -faq_1058_li=Multi-threading within the engine using SET MULTI_THREADED\=1. -faq_1059_li=Compatibility modes for other databases (only some features are implemented). -faq_1060_li=The soft reference cache (CACHE_TYPE\=SOFT_LRU). It might not improve performance, and out of memory issues have been reported. -faq_1061_p=\ Some users have reported that after a power failure, the database cannot be opened sometimes. In this case, use a backup of the database or the Recover tool. Please report such problems. The plan is that the database automatically recovers in all situations. -faq_1062_h3=Why is Opening my Database Slow? -faq_1063_p=\ To find out what the problem is, use the H2 Console and click on "Test Connection" instead of "Login". After the "Login Successful" appears, click on it (it's a link). This will list the top stack traces. Then either analyze this yourself, or post those stack traces in the Google Group. -faq_1064_p=\ Other possible reasons are\: the database is very big (many GB), or contains linked tables that are slow to open. -faq_1065_h3=My Query is Slow -faq_1066_p=\ Slow SELECT (or DELETE, UPDATE, MERGE) statement can have multiple reasons. Follow this checklist\: -faq_1067_li=Run ANALYZE (see documentation for details). -faq_1068_li=Run the query with EXPLAIN and check if indexes are used (see documentation for details). -faq_1069_li=If required, create additional indexes and try again using ANALYZE and EXPLAIN. -faq_1070_li=If it doesn't help please report the problem. -faq_1071_h3=H2 is Very Slow -faq_1072_p=\ By default, H2 closes the database when the last connection is closed. If your application closes the only connection after each operation, the database is opened and closed a lot, which is quite slow. There are multiple ways to solve this problem, see Database Performance Tuning. -faq_1073_h3=Column Names are Incorrect? -faq_1074_p=\ For the query SELECT ID AS X FROM TEST the method ResultSetMetaData.getColumnName() returns ID, I expect it to return X. What's wrong? -faq_1075_p=\ This is not a bug. According the the JDBC specification, the method ResultSetMetaData.getColumnName() should return the name of the column and not the alias name. If you need the alias name, use ResultSetMetaData.getColumnLabel(). Some other database don't work like this yet (they don't follow the JDBC specification). If you need compatibility with those databases, use the Compatibility Mode, or append ;ALIAS_COLUMN_NAME\=TRUE to the database URL. -faq_1076_p=\ This also applies to DatabaseMetaData calls that return a result set. The columns in the JDBC API are column labels, not column names. -faq_1077_h3=Float is Double? -faq_1078_p=\ For a table defined as CREATE TABLE TEST(X FLOAT) the method ResultSet.getObject() returns a java.lang.Double, I expect it to return a java.lang.Float. What's wrong? -faq_1079_p=\ This is not a bug. According the the JDBC specification, the JDBC data type FLOAT is equivalent to DOUBLE, and both are mapped to java.lang.Double. See also Mapping SQL and Java Types - 8.3.10 FLOAT. -faq_1080_h3=Is the GCJ Version Stable? Faster? -faq_1081_p=\ The GCJ version is not as stable as the Java version. When running the regression test with the GCJ version, sometimes the application just stops at what seems to be a random point without error message. Currently, the GCJ version is also slower than when using the Sun VM. However, the startup of the GCJ version is faster than when using a VM. -faq_1082_h3=How to Translate this Project? -faq_1083_p=\ For more information, see Build/Translating. -faq_1084_h3=How to Contribute to this Project? -faq_1085_p=\ There are various way to help develop an open source project like H2. The first step could be to translate the error messages and the GUI to your native language. Then, you could provide patches. Please start with small patches. That could be adding a test case to improve the code coverage (the target code coverage for this project is 90%, higher is better). You will have to develop, build and run the tests. Once you are familiar with the code, you could implement missing features from the feature request list. I suggest to start with very small features that are easy to implement. Keep in mind to provide test cases as well. -features_1000_h1=Features -features_1001_a=\ Feature List -features_1002_a=\ Comparison to Other Database Engines -features_1003_a=\ H2 in Use -features_1004_a=\ Connection Modes -features_1005_a=\ Database URL Overview -features_1006_a=\ Connecting to an Embedded (Local) Database -features_1007_a=\ In-Memory Databases -features_1008_a=\ Database Files Encryption -features_1009_a=\ Database File Locking -features_1010_a=\ Opening a Database Only if it Already Exists -features_1011_a=\ Closing a Database -features_1012_a=\ Ignore Unknown Settings -features_1013_a=\ Changing Other Settings when Opening a Connection -features_1014_a=\ Custom File Access Mode -features_1015_a=\ Multiple Connections -features_1016_a=\ Database File Layout -features_1017_a=\ Logging and Recovery -features_1018_a=\ Compatibility -features_1019_a=\ Auto-Reconnect -features_1020_a=\ Automatic Mixed Mode -features_1021_a=\ Page Size -features_1022_a=\ Using the Trace Options -features_1023_a=\ Using Other Logging APIs -features_1024_a=\ Read Only Databases -features_1025_a=\ Read Only Databases in Zip or Jar File -features_1026_a=\ Computed Columns / Function Based Index -features_1027_a=\ Multi-Dimensional Indexes -features_1028_a=\ User-Defined Functions and Stored Procedures -features_1029_a=\ Pluggable or User-Defined Tables -features_1030_a=\ Triggers -features_1031_a=\ Compacting a Database -features_1032_a=\ Cache Settings -features_1033_h2=Feature List -features_1034_h3=Main Features -features_1035_li=Very fast database engine -features_1036_li=Open source -features_1037_li=Written in Java -features_1038_li=Supports standard SQL, JDBC API -features_1039_li=Embedded and Server mode, Clustering support -features_1040_li=Strong security features -features_1041_li=The PostgreSQL ODBC driver can be used -features_1042_li=Multi version concurrency -features_1043_h3=Additional Features -features_1044_li=Disk based or in-memory databases and tables, read-only database support, temporary tables -features_1045_li=Transaction support (read committed), 2-phase-commit -features_1046_li=Multiple connections, table level locking -features_1047_li=Cost based optimizer, using a genetic algorithm for complex queries, zero-administration -features_1048_li=Scrollable and updatable result set support, large result set, external result sorting, functions can return a result set -features_1049_li=Encrypted database (AES), SHA-256 password encryption, encryption functions, SSL -features_1050_h3=SQL Support -features_1051_li=Support for multiple schemas, information schema -features_1052_li=Referential integrity / foreign key constraints with cascade, check constraints -features_1053_li=Inner and outer joins, subqueries, read only views and inline views -features_1054_li=Triggers and Java functions / stored procedures -features_1055_li=Many built-in functions, including XML and lossless data compression -features_1056_li=Wide range of data types including large objects (BLOB/CLOB) and arrays -features_1057_li=Sequence and autoincrement columns, computed columns (can be used for function based indexes) -features_1058_code=ORDER BY, GROUP BY, HAVING, UNION, LIMIT, TOP -features_1059_li=Collation support, including support for the ICU4J library -features_1060_li=Support for users and roles -features_1061_li=Compatibility modes for IBM DB2, Apache Derby, HSQLDB, MS SQL Server, MySQL, Oracle, and PostgreSQL. -features_1062_h3=Security Features -features_1063_li=Includes a solution for the SQL injection problem -features_1064_li=User password authentication uses SHA-256 and salt -features_1065_li=For server mode connections, user passwords are never transmitted in plain text over the network (even when using insecure connections; this only applies to the TCP server and not to the H2 Console however; it also doesn't apply if you set the password in the database URL) -features_1066_li=All database files (including script files that can be used to backup data) can be encrypted using the AES-128 encryption algorithm -features_1067_li=The remote JDBC driver supports TCP/IP connections over TLS -features_1068_li=The built-in web server supports connections over TLS -features_1069_li=Passwords can be sent to the database using char arrays instead of Strings -features_1070_h3=Other Features and Tools -features_1071_li=Small footprint (smaller than 1.5 MB), low memory requirements -features_1072_li=Multiple index types (b-tree, tree, hash) -features_1073_li=Support for multi-dimensional indexes -features_1074_li=CSV (comma separated values) file support -features_1075_li=Support for linked tables, and a built-in virtual 'range' table -features_1076_li=Supports the EXPLAIN PLAN statement; sophisticated trace options -features_1077_li=Database closing can be delayed or disabled to improve the performance -features_1078_li=Web-based Console application (translated to many languages) with autocomplete -features_1079_li=The database can generate SQL script files -features_1080_li=Contains a recovery tool that can dump the contents of the database -features_1081_li=Support for variables (for example to calculate running totals) -features_1082_li=Automatic re-compilation of prepared statements -features_1083_li=Uses a small number of database files -features_1084_li=Uses a checksum for each record and log entry for data integrity -features_1085_li=Well tested (high code coverage, randomized stress tests) -features_1086_h2=Comparison to Other Database Engines -features_1087_p=\ This comparison is based on H2 1.3, Apache Derby version 10.8, HSQLDB 2.2, MySQL 5.5, PostgreSQL 9.0. -features_1088_th=Feature -features_1089_th=H2 -features_1090_th=Derby -features_1091_th=HSQLDB -features_1092_th=MySQL -features_1093_th=PostgreSQL -features_1094_td=Pure Java -features_1095_td=Yes -features_1096_td=Yes -features_1097_td=Yes -features_1098_td=No -features_1099_td=No -features_1100_td=Embedded Mode (Java) -features_1101_td=Yes -features_1102_td=Yes -features_1103_td=Yes -features_1104_td=No -features_1105_td=No -features_1106_td=In-Memory Mode -features_1107_td=Yes -features_1108_td=Yes -features_1109_td=Yes -features_1110_td=No -features_1111_td=No -features_1112_td=Explain Plan -features_1113_td=Yes -features_1114_td=Yes *12 -features_1115_td=Yes -features_1116_td=Yes -features_1117_td=Yes -features_1118_td=Built-in Clustering / Replication -features_1119_td=Yes -features_1120_td=Yes -features_1121_td=No -features_1122_td=Yes -features_1123_td=Yes -features_1124_td=Encrypted Database -features_1125_td=Yes -features_1126_td=Yes *10 -features_1127_td=Yes *10 -features_1128_td=No -features_1129_td=No -features_1130_td=Linked Tables -features_1131_td=Yes -features_1132_td=No -features_1133_td=Partially *1 -features_1134_td=Partially *2 -features_1135_td=Yes -features_1136_td=ODBC Driver -features_1137_td=Yes -features_1138_td=No -features_1139_td=No -features_1140_td=Yes -features_1141_td=Yes -features_1142_td=Fulltext Search -features_1143_td=Yes -features_1144_td=Yes -features_1145_td=No -features_1146_td=Yes -features_1147_td=Yes -features_1148_td=Domains (User-Defined Types) -features_1149_td=Yes -features_1150_td=No -features_1151_td=Yes -features_1152_td=Yes -features_1153_td=Yes -features_1154_td=Files per Database -features_1155_td=Few -features_1156_td=Many -features_1157_td=Few -features_1158_td=Many -features_1159_td=Many -features_1160_td=Row Level Locking -features_1161_td=Yes *9 -features_1162_td=Yes -features_1163_td=Yes *9 -features_1164_td=Yes -features_1165_td=Yes -features_1166_td=Multi Version Concurrency -features_1167_td=Yes -features_1168_td=No -features_1169_td=Yes -features_1170_td=Yes -features_1171_td=Yes -features_1172_td=Multi-Threaded Processing -features_1173_td=No *11 -features_1174_td=Yes -features_1175_td=Yes -features_1176_td=Yes -features_1177_td=Yes -features_1178_td=Role Based Security -features_1179_td=Yes -features_1180_td=Yes *3 -features_1181_td=Yes -features_1182_td=Yes -features_1183_td=Yes -features_1184_td=Updatable Result Sets -features_1185_td=Yes -features_1186_td=Yes *7 -features_1187_td=Yes -features_1188_td=Yes -features_1189_td=Yes -features_1190_td=Sequences -features_1191_td=Yes -features_1192_td=Yes -features_1193_td=Yes -features_1194_td=No -features_1195_td=Yes -features_1196_td=Limit and Offset -features_1197_td=Yes -features_1198_td=Yes *13 -features_1199_td=Yes -features_1200_td=Yes -features_1201_td=Yes -features_1202_td=Window Functions -features_1203_td=No *15 -features_1204_td=No *15 -features_1205_td=No -features_1206_td=No -features_1207_td=Yes -features_1208_td=Temporary Tables -features_1209_td=Yes -features_1210_td=Yes *4 -features_1211_td=Yes -features_1212_td=Yes -features_1213_td=Yes -features_1214_td=Information Schema -features_1215_td=Yes -features_1216_td=No *8 -features_1217_td=Yes -features_1218_td=Yes -features_1219_td=Yes -features_1220_td=Computed Columns -features_1221_td=Yes -features_1222_td=Yes -features_1223_td=Yes -features_1224_td=Yes -features_1225_td=Yes *6 -features_1226_td=Case Insensitive Columns -features_1227_td=Yes -features_1228_td=Yes *14 -features_1229_td=Yes -features_1230_td=Yes -features_1231_td=Yes *6 -features_1232_td=Custom Aggregate Functions -features_1233_td=Yes -features_1234_td=No -features_1235_td=Yes -features_1236_td=No -features_1237_td=Yes -features_1238_td=CLOB/BLOB Compression -features_1239_td=Yes -features_1240_td=No -features_1241_td=No -features_1242_td=No -features_1243_td=Yes -features_1244_td=Footprint (jar/dll size) -features_1245_td=~1.5 MB *5 -features_1246_td=~3 MB -features_1247_td=~1.5 MB -features_1248_td=~4 MB -features_1249_td=~6 MB -features_1250_p=\ *1 HSQLDB supports text tables. -features_1251_p=\ *2 MySQL supports linked MySQL tables under the name 'federated tables'. -features_1252_p=\ *3 Derby support for roles based security and password checking as an option. -features_1253_p=\ *4 Derby only supports global temporary tables. -features_1254_p=\ *5 The default H2 jar file contains debug information, jar files for other databases do not. -features_1255_p=\ *6 PostgreSQL supports functional indexes. -features_1256_p=\ *7 Derby only supports updatable result sets if the query is not sorted. -features_1257_p=\ *8 Derby doesn't support standard compliant information schema tables. -features_1258_p=\ *9 When using MVCC (multi version concurrency). -features_1259_p=\ *10 Derby and HSQLDB don't hide data patterns well. -features_1260_p=\ *11 The MULTI_THREADED option is not enabled by default, and with version 1.3.x not supported when using MVCC. -features_1261_p=\ *12 Derby doesn't support the EXPLAIN statement, but it supports runtime statistics and retrieving statement execution plans. -features_1262_p=\ *13 Derby doesn't support the syntax LIMIT .. [OFFSET ..], however it supports FETCH FIRST .. ROW[S] ONLY. -features_1263_p=\ *14 Using collations. *15 Derby and H2 support ROW_NUMBER() OVER(). -features_1264_h3=DaffodilDb and One$Db -features_1265_p=\ It looks like the development of this database has stopped. The last release was February 2006. -features_1266_h3=McKoi -features_1267_p=\ It looks like the development of this database has stopped. The last release was August 2004. -features_1268_h2=H2 in Use -features_1269_p=\ For a list of applications that work with or use H2, see\: Links. -features_1270_h2=Connection Modes -features_1271_p=\ The following connection modes are supported\: -features_1272_li=Embedded mode (local connections using JDBC) -features_1273_li=Server mode (remote connections using JDBC or ODBC over TCP/IP) -features_1274_li=Mixed mode (local and remote connections at the same time) -features_1275_h3=Embedded Mode -features_1276_p=\ In embedded mode, an application opens a database from within the same JVM using JDBC. This is the fastest and easiest connection mode. The disadvantage is that a database may only be open in one virtual machine (and class loader) at any time. As in all modes, both persistent and in-memory databases are supported. There is no limit on the number of database open concurrently, or on the number of open connections. -features_1277_h3=Server Mode -features_1278_p=\ When using the server mode (sometimes called remote mode or client/server mode), an application opens a database remotely using the JDBC or ODBC API. A server needs to be started within the same or another virtual machine, or on another computer. Many applications can connect to the same database at the same time, by connecting to this server. Internally, the server process opens the database(s) in embedded mode. -features_1279_p=\ The server mode is slower than the embedded mode, because all data is transferred over TCP/IP. As in all modes, both persistent and in-memory databases are supported. There is no limit on the number of database open concurrently per server, or on the number of open connections. -features_1280_h3=Mixed Mode -features_1281_p=\ The mixed mode is a combination of the embedded and the server mode. The first application that connects to a database does that in embedded mode, but also starts a server so that other applications (running in different processes or virtual machines) can concurrently access the same data. The local connections are as fast as if the database is used in just the embedded mode, while the remote connections are a bit slower. -features_1282_p=\ The server can be started and stopped from within the application (using the server API), or automatically (automatic mixed mode). When using the automatic mixed mode, all clients that want to connect to the database (no matter if it's an local or remote connection) can do so using the exact same database URL. -features_1283_h2=Database URL Overview -features_1284_p=\ This database supports multiple connection modes and connection settings. This is achieved using different database URLs. Settings in the URLs are not case sensitive. -features_1285_th=Topic -features_1286_th=URL Format and Examples -features_1287_a=Embedded (local) connection -features_1288_td=\ jdbc\:h2\:[file\:][<path>]<databaseName> -features_1289_td=\ jdbc\:h2\:~/test -features_1290_td=\ jdbc\:h2\:file\:/data/sample -features_1291_td=\ jdbc\:h2\:file\:C\:/data/sample (Windows only) -features_1292_a=In-memory (private) -features_1293_td=jdbc\:h2\:mem\: -features_1294_a=In-memory (named) -features_1295_td=\ jdbc\:h2\:mem\:<databaseName> -features_1296_td=\ jdbc\:h2\:mem\:test_mem -features_1297_a=Server mode (remote connections) -features_1298_a=\ using TCP/IP -features_1299_td=\ jdbc\:h2\:tcp\://<server>[\:<port>]/[<path>]<databaseName> -features_1300_td=\ jdbc\:h2\:tcp\://localhost/~/test -features_1301_td=\ jdbc\:h2\:tcp\://dbserv\:8084/~/sample -features_1302_td=\ jdbc\:h2\:tcp\://localhost/mem\:test -features_1303_a=Server mode (remote connections) -features_1304_a=\ using TLS -features_1305_td=\ jdbc\:h2\:ssl\://<server>[\:<port>]/<databaseName> -features_1306_td=\ jdbc\:h2\:ssl\://localhost\:8085/~/sample; -features_1307_a=Using encrypted files -features_1308_td=\ jdbc\:h2\:<url>;CIPHER\=AES -features_1309_td=\ jdbc\:h2\:ssl\://localhost/~/test;CIPHER\=AES -features_1310_td=\ jdbc\:h2\:file\:~/secure;CIPHER\=AES -features_1311_a=File locking methods -features_1312_td=\ jdbc\:h2\:<url>;FILE_LOCK\={FILE|SOCKET|NO} -features_1313_td=\ jdbc\:h2\:file\:~/private;CIPHER\=AES;FILE_LOCK\=SOCKET -features_1314_a=Only open if it already exists -features_1315_td=\ jdbc\:h2\:<url>;IFEXISTS\=TRUE -features_1316_td=\ jdbc\:h2\:file\:~/sample;IFEXISTS\=TRUE -features_1317_a=Don't close the database when the VM exits -features_1318_td=\ jdbc\:h2\:<url>;DB_CLOSE_ON_EXIT\=FALSE -features_1319_a=Execute SQL on connection -features_1320_td=\ jdbc\:h2\:<url>;INIT\=RUNSCRIPT FROM '~/create.sql' -features_1321_td=\ jdbc\:h2\:file\:~/sample;INIT\=RUNSCRIPT FROM '~/create.sql'\\;RUNSCRIPT FROM '~/populate.sql' -features_1322_a=User name and/or password -features_1323_td=\ jdbc\:h2\:<url>[;USER\=<username>][;PASSWORD\=<value>] -features_1324_td=\ jdbc\:h2\:file\:~/sample;USER\=sa;PASSWORD\=123 -features_1325_a=Debug trace settings -features_1326_td=\ jdbc\:h2\:<url>;TRACE_LEVEL_FILE\=<level 0..3> -features_1327_td=\ jdbc\:h2\:file\:~/sample;TRACE_LEVEL_FILE\=3 -features_1328_a=Ignore unknown settings -features_1329_td=\ jdbc\:h2\:<url>;IGNORE_UNKNOWN_SETTINGS\=TRUE -features_1330_a=Custom file access mode -features_1331_td=\ jdbc\:h2\:<url>;ACCESS_MODE_DATA\=rws -features_1332_a=Database in a zip file -features_1333_td=\ jdbc\:h2\:zip\:<zipFileName>\!/<databaseName> -features_1334_td=\ jdbc\:h2\:zip\:~/db.zip\!/test -features_1335_a=Compatibility mode -features_1336_td=\ jdbc\:h2\:<url>;MODE\=<databaseType> -features_1337_td=\ jdbc\:h2\:~/test;MODE\=MYSQL -features_1338_a=Auto-reconnect -features_1339_td=\ jdbc\:h2\:<url>;AUTO_RECONNECT\=TRUE -features_1340_td=\ jdbc\:h2\:tcp\://localhost/~/test;AUTO_RECONNECT\=TRUE -features_1341_a=Automatic mixed mode -features_1342_td=\ jdbc\:h2\:<url>;AUTO_SERVER\=TRUE -features_1343_td=\ jdbc\:h2\:~/test;AUTO_SERVER\=TRUE -features_1344_a=Page size -features_1345_td=\ jdbc\:h2\:<url>;PAGE_SIZE\=512 -features_1346_a=Changing other settings -features_1347_td=\ jdbc\:h2\:<url>;<setting>\=<value>[;<setting>\=<value>...] -features_1348_td=\ jdbc\:h2\:file\:~/sample;TRACE_LEVEL_SYSTEM_OUT\=3 -features_1349_h2=Connecting to an Embedded (Local) Database -features_1350_p=\ The database URL for connecting to a local database is jdbc\:h2\:[file\:][<path>]<databaseName>. The prefix file\: is optional. If no or only a relative path is used, then the current working directory is used as a starting point. The case sensitivity of the path and database name depend on the operating system, however it is recommended to use lowercase letters only. The database name must be at least three characters long (a limitation of File.createTempFile). The database name must not contain a semicolon. To point to the user home directory, use ~/, as in\: jdbc\:h2\:~/test. -features_1351_h2=In-Memory Databases -features_1352_p=\ For certain use cases (for example\: rapid prototyping, testing, high performance operations, read-only databases), it may not be required to persist data, or persist changes to the data. This database supports the in-memory mode, where the data is not persisted. -features_1353_p=\ In some cases, only one connection to a in-memory database is required. This means the database to be opened is private. In this case, the database URL is jdbc\:h2\:mem\: Opening two connections within the same virtual machine means opening two different (private) databases. -features_1354_p=\ Sometimes multiple connections to the same in-memory database are required. In this case, the database URL must include a name. Example\: jdbc\:h2\:mem\:db1. Accessing the same database using this URL only works within the same virtual machine and class loader environment. -features_1355_p=\ To access an in-memory database from another process or from another computer, you need to start a TCP server in the same process as the in-memory database was created. The other processes then need to access the database over TCP/IP or TLS, using a database URL such as\: jdbc\:h2\:tcp\://localhost/mem\:db1. -features_1356_p=\ By default, closing the last connection to a database closes the database. For an in-memory database, this means the content is lost. To keep the database open, add ;DB_CLOSE_DELAY\=-1 to the database URL. To keep the content of an in-memory database as long as the virtual machine is alive, use jdbc\:h2\:mem\:test;DB_CLOSE_DELAY\=-1. -features_1357_h2=Database Files Encryption -features_1358_p=\ The database files can be encrypted. Three encryption algorithms are supported\: -features_1359_li="AES" - also known as Rijndael, only AES-128 is implemented. -features_1360_li="XTEA" - the 32 round version. -features_1361_li="FOG" - pseudo-encryption only useful for hiding data from a text editor. -features_1362_p=\ To use file encryption, you need to specify the encryption algorithm (the 'cipher') and the file password (in addition to the user password) when connecting to the database. -features_1363_h3=Creating a New Database with File Encryption -features_1364_p=\ By default, a new database is automatically created if it does not exist yet. To create an encrypted database, connect to it as it would already exist. -features_1365_h3=Connecting to an Encrypted Database -features_1366_p=\ The encryption algorithm is set in the database URL, and the file password is specified in the password field, before the user password. A single space separates the file password and the user password; the file password itself may not contain spaces. File passwords and user passwords are case sensitive. Here is an example to connect to a password-encrypted database\: -features_1367_h3=Encrypting or Decrypting a Database -features_1368_p=\ To encrypt an existing database, use the ChangeFileEncryption tool. This tool can also decrypt an encrypted database, or change the file encryption key. The tool is available from within the H2 Console in the tools section, or you can run it from the command line. The following command line will encrypt the database test in the user home directory with the file password filepwd and the encryption algorithm AES\: -features_1369_h2=Database File Locking -features_1370_p=\ Whenever a database is opened, a lock file is created to signal other processes that the database is in use. If database is closed, or if the process that opened the database terminates, this lock file is deleted. -features_1371_p=\ The following file locking methods are implemented\: -features_1372_li=The default method is FILE and uses a watchdog thread to protect the database file. The watchdog reads the lock file each second. -features_1373_li=The second method is SOCKET and opens a server socket. The socket method does not require reading the lock file every second. The socket method should only be used if the database files are only accessed by one (and always the same) computer. -features_1374_li=The third method is FS. This will use native file locking using FileChannel.lock. -features_1375_li=It is also possible to open the database without file locking; in this case it is up to the application to protect the database files. Failing to do so will result in a corrupted database. Using the method NO forces the database to not create a lock file at all. Please note that this is unsafe as another process is able to open the same database, possibly leading to data corruption. -features_1376_p=\ To open the database with a different file locking method, use the parameter FILE_LOCK. The following code opens the database with the 'socket' locking method\: -features_1377_p=\ For more information about the algorithms, see Advanced / File Locking Protocols. -features_1378_h2=Opening a Database Only if it Already Exists -features_1379_p=\ By default, when an application calls DriverManager.getConnection(url, ...) and the database specified in the URL does not yet exist, a new (empty) database is created. In some situations, it is better to restrict creating new databases, and only allow to open existing databases. To do this, add ;IFEXISTS\=TRUE to the database URL. In this case, if the database does not already exist, an exception is thrown when trying to connect. The connection only succeeds when the database already exists. The complete URL may look like this\: -features_1380_h2=Closing a Database -features_1381_h3=Delayed Database Closing -features_1382_p=\ Usually, a database is closed when the last connection to it is closed. In some situations this slows down the application, for example when it is not possible to keep at least one connection open. The automatic closing of a database can be delayed or disabled with the SQL statement SET DB_CLOSE_DELAY <seconds>. The parameter <seconds> specifies the number of seconds to keep a database open after the last connection to it was closed. The following statement will keep a database open for 10 seconds after the last connection was closed\: -features_1383_p=\ The value -1 means the database is not closed automatically. The value 0 is the default and means the database is closed when the last connection is closed. This setting is persistent and can be set by an administrator only. It is possible to set the value in the database URL\: jdbc\:h2\:~/test;DB_CLOSE_DELAY\=10. -features_1384_h3=Don't Close a Database when the VM Exits -features_1385_p=\ By default, a database is closed when the last connection is closed. However, if it is never closed, the database is closed when the virtual machine exits normally, using a shutdown hook. In some situations, the database should not be closed in this case, for example because the database is still used at virtual machine shutdown (to store the shutdown process in the database for example). For those cases, the automatic closing of the database can be disabled in the database URL. The first connection (the one that is opening the database) needs to set the option in the database URL (it is not possible to change the setting afterwards). The database URL to disable database closing on exit is\: -features_1386_h2=Execute SQL on Connection -features_1387_p=\ Sometimes, particularly for in-memory databases, it is useful to be able to execute DDL or DML commands automatically when a client connects to a database. This functionality is enabled via the INIT property. Note that multiple commands may be passed to INIT, but the semicolon delimiter must be escaped, as in the example below. -features_1388_p=\ Please note the double backslash is only required in a Java or properties file. In a GUI, or in an XML file, only one backslash is required\: -features_1389_p=\ Backslashes within the init script (for example within a runscript statement, to specify the folder names in Windows) need to be escaped as well (using a second backslash). It might be simpler to avoid backslashes in folder names for this reason; use forward slashes instead. -features_1390_h2=Ignore Unknown Settings -features_1391_p=\ Some applications (for example OpenOffice.org Base) pass some additional parameters when connecting to the database. Why those parameters are passed is unknown. The parameters PREFERDOSLIKELINEENDS and IGNOREDRIVERPRIVILEGES are such examples; they are simply ignored to improve the compatibility with OpenOffice.org. If an application passes other parameters when connecting to the database, usually the database throws an exception saying the parameter is not supported. It is possible to ignored such parameters by adding ;IGNORE_UNKNOWN_SETTINGS\=TRUE to the database URL. -features_1392_h2=Changing Other Settings when Opening a Connection -features_1393_p=\ In addition to the settings already described, other database settings can be passed in the database URL. Adding ;setting\=value at the end of a database URL is the same as executing the statement SET setting value just after connecting. For a list of supported settings, see SQL Grammar or the DbSettings javadoc. -features_1394_h2=Custom File Access Mode -features_1395_p=\ Usually, the database opens the database file with the access mode rw, meaning read-write (except for read only databases, where the mode r is used). To open a database in read-only mode if the database file is not read-only, use ACCESS_MODE_DATA\=r. Also supported are rws and rwd. This setting must be specified in the database URL\: -features_1396_p=\ For more information see Durability Problems. On many operating systems the access mode rws does not guarantee that the data is written to the disk. -features_1397_h2=Multiple Connections -features_1398_h3=Opening Multiple Databases at the Same Time -features_1399_p=\ An application can open multiple databases at the same time, including multiple connections to the same database. The number of open database is only limited by the memory available. -features_1400_h3=Multiple Connections to the Same Database\: Client/Server -features_1401_p=\ If you want to access the same database at the same time from different processes or computers, you need to use the client / server mode. In this case, one process acts as the server, and the other processes (that could reside on other computers as well) connect to the server via TCP/IP (or TLS over TCP/IP for improved security). -features_1402_h3=Multithreading Support -features_1403_p=\ This database is multithreading-safe. If an application is multi-threaded, it does not need to worry about synchronizing access to the database. An application should normally use one connection per thread. This database synchronizes access to the same connection, but other databases may not do this. To get higher concurrency, you need to use multiple connections. -features_1404_p=\ By default, requests to the same database are synchronized. That means an application can use multiple threads that access the same database at the same time, however if one thread executes a long running query, the other threads need to wait. To enable concurrent database usage, see the setting MULTI_THREADED. -features_1405_h3=Locking, Lock-Timeout, Deadlocks -features_1406_p=\ Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. In this case, table level locking is not used. If multi-version concurrency is not used, the database uses table level locks to give each connection a consistent state of the data. There are two kinds of locks\: read locks (shared locks) and write locks (exclusive locks). All locks are released when the transaction commits or rolls back. When using the default transaction isolation level 'read committed', read locks are already released after each statement. -features_1407_p=\ If a connection wants to reads from a table, and there is no write lock on the table, then a read lock is added to the table. If there is a write lock, then this connection waits for the other connection to release the lock. If a connection cannot get a lock for a specified time, then a lock timeout exception is thrown. -features_1408_p=\ Usually, SELECT statements will generate read locks. This includes subqueries. Statements that modify data use write locks. It is also possible to lock a table exclusively without modifying data, using the statement SELECT ... FOR UPDATE. The statements COMMIT and ROLLBACK releases all open locks. The commands SAVEPOINT and ROLLBACK TO SAVEPOINT don't affect locks. The locks are also released when the autocommit mode changes, and for connections with autocommit set to true (this is the default), locks are released after each statement. The following statements generate locks\: -features_1409_th=Type of Lock -features_1410_th=SQL Statement -features_1411_td=Read -features_1412_td=SELECT * FROM TEST; -features_1413_td=\ CALL SELECT MAX(ID) FROM TEST; -features_1414_td=\ SCRIPT; -features_1415_td=Write -features_1416_td=SELECT * FROM TEST WHERE 1\=0 FOR UPDATE; -features_1417_td=Write -features_1418_td=INSERT INTO TEST VALUES(1, 'Hello'); -features_1419_td=\ INSERT INTO TEST SELECT * FROM TEST; -features_1420_td=\ UPDATE TEST SET NAME\='Hi'; -features_1421_td=\ DELETE FROM TEST; -features_1422_td=Write -features_1423_td=ALTER TABLE TEST ...; -features_1424_td=\ CREATE INDEX ... ON TEST ...; -features_1425_td=\ DROP INDEX ...; -features_1426_p=\ The number of seconds until a lock timeout exception is thrown can be set separately for each connection using the SQL command SET LOCK_TIMEOUT <milliseconds>. The initial lock timeout (that is the timeout used for new connections) can be set using the SQL command SET DEFAULT_LOCK_TIMEOUT <milliseconds>. The default lock timeout is persistent. -features_1427_h3=Avoiding Deadlocks -features_1428_p=\ To avoid deadlocks, ensure that all transactions lock the tables in the same order (for example in alphabetical order), and avoid upgrading read locks to write locks. Both can be achieved using explicitly locking tables using SELECT ... FOR UPDATE. -features_1429_h2=Database File Layout -features_1430_p=\ The following files are created for persistent databases\: -features_1431_th=File Name -features_1432_th=Description -features_1433_th=Number of Files -features_1434_td=\ test.h2.db -features_1435_td=\ Database file. -features_1436_td=\ Contains the transaction log, indexes, and data for all tables. -features_1437_td=\ Format\: <database>.h2.db -features_1438_td=\ 1 per database -features_1439_td=\ test.lock.db -features_1440_td=\ Database lock file. -features_1441_td=\ Automatically (re-)created while the database is in use. -features_1442_td=\ Format\: <database>.lock.db -features_1443_td=\ 1 per database (only if in use) -features_1444_td=\ test.trace.db -features_1445_td=\ Trace file (if the trace option is enabled). -features_1446_td=\ Contains trace information. -features_1447_td=\ Format\: <database>.trace.db -features_1448_td=\ Renamed to <database>.trace.db.old is too big. -features_1449_td=\ 0 or 1 per database -features_1450_td=\ test.lobs.db/* -features_1451_td=\ Directory containing one file for each -features_1452_td=\ BLOB or CLOB value larger than a certain size. -features_1453_td=\ Format\: <id>.t<tableId>.lob.db -features_1454_td=\ 1 per large object -features_1455_td=\ test.123.temp.db -features_1456_td=\ Temporary file. -features_1457_td=\ Contains a temporary blob or a large result set. -features_1458_td=\ Format\: <database>.<id>.temp.db -features_1459_td=\ 1 per object -features_1460_h3=Moving and Renaming Database Files -features_1461_p=\ Database name and location are not stored inside the database files. -features_1462_p=\ While a database is closed, the files can be moved to another directory, and they can be renamed as well (as long as all files of the same database start with the same name and the respective extensions are unchanged). -features_1463_p=\ As there is no platform specific data in the files, they can be moved to other operating systems without problems. -features_1464_h3=Backup -features_1465_p=\ When the database is closed, it is possible to backup the database files. -features_1466_p=\ To backup data while the database is running, the SQL commands SCRIPT and BACKUP can be used. -features_1467_h2=Logging and Recovery -features_1468_p=\ Whenever data is modified in the database and those changes are committed, the changes are written to the transaction log (except for in-memory objects). The changes to the main data area itself are usually written later on, to optimize disk access. If there is a power failure, the main data area is not up-to-date, but because the changes are in the transaction log, the next time the database is opened, the changes are re-applied automatically. -features_1469_h2=Compatibility -features_1470_p=\ All database engines behave a little bit different. Where possible, H2 supports the ANSI SQL standard, and tries to be compatible to other databases. There are still a few differences however\: -features_1471_p=\ In MySQL text columns are case insensitive by default, while in H2 they are case sensitive. However H2 supports case insensitive columns as well. To create the tables with case insensitive texts, append IGNORECASE\=TRUE to the database URL (example\: jdbc\:h2\:~/test;IGNORECASE\=TRUE). -features_1472_h3=Compatibility Modes -features_1473_p=\ For certain features, this database can emulate the behavior of specific databases. However, only a small subset of the differences between databases are implemented in this way. Here is the list of currently supported modes and the differences to the regular mode\: -features_1474_h3=DB2 Compatibility Mode -features_1475_p=\ To use the IBM DB2 mode, use the database URL jdbc\:h2\:~/test;MODE\=DB2 or the SQL statement SET MODE DB2. -features_1476_li=For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. -features_1477_li=Support for the syntax [OFFSET .. ROW] [FETCH ... ONLY] as an alternative for LIMIT .. OFFSET. -features_1478_li=Concatenating NULL with another value results in the other value. -features_1479_li=Support the pseudo-table SYSIBM.SYSDUMMY1. -features_1480_h3=Derby Compatibility Mode -features_1481_p=\ To use the Apache Derby mode, use the database URL jdbc\:h2\:~/test;MODE\=Derby or the SQL statement SET MODE Derby. -features_1482_li=For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. -features_1483_li=For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. -features_1484_li=Concatenating NULL with another value results in the other value. -features_1485_li=Support the pseudo-table SYSIBM.SYSDUMMY1. -features_1486_h3=HSQLDB Compatibility Mode -features_1487_p=\ To use the HSQLDB mode, use the database URL jdbc\:h2\:~/test;MODE\=HSQLDB or the SQL statement SET MODE HSQLDB. -features_1488_li=For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. -features_1489_li=When converting the scale of decimal data, the number is only converted if the new scale is smaller than the current scale. Usually, the scale is converted and 0s are added if required. -features_1490_li=For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. -features_1491_li=Text can be concatenated using '+'. -features_1492_h3=MS SQL Server Compatibility Mode -features_1493_p=\ To use the MS SQL Server mode, use the database URL jdbc\:h2\:~/test;MODE\=MSSQLServer or the SQL statement SET MODE MSSQLServer. -features_1494_li=For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. -features_1495_li=Identifiers may be quoted using square brackets as in [Test]. -features_1496_li=For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. -features_1497_li=Concatenating NULL with another value results in the other value. -features_1498_li=Text can be concatenated using '+'. -features_1499_h3=MySQL Compatibility Mode -features_1500_p=\ To use the MySQL mode, use the database URL jdbc\:h2\:~/test;MODE\=MySQL or the SQL statement SET MODE MySQL. -features_1501_li=When inserting data, if a column is defined to be NOT NULL and NULL is inserted, then a 0 (or empty string, or the current timestamp for timestamp columns) value is used. Usually, this operation is not allowed and an exception is thrown. -features_1502_li=Creating indexes in the CREATE TABLE statement is allowed using INDEX(..) or KEY(..). Example\: create table test(id int primary key, name varchar(255), key idx_name(name)); -features_1503_li=Meta data calls return identifiers in lower case. -features_1504_li=When converting a floating point number to an integer, the fractional digits are not truncated, but the value is rounded. -features_1505_li=Concatenating NULL with another value results in the other value. -features_1506_p=\ Text comparison in MySQL is case insensitive by default, while in H2 it is case sensitive (as in most other databases). H2 does support case insensitive text comparison, but it needs to be set separately, using SET IGNORECASE TRUE. This affects comparison using \=, LIKE, REGEXP. -features_1507_h3=Oracle Compatibility Mode -features_1508_p=\ To use the Oracle mode, use the database URL jdbc\:h2\:~/test;MODE\=Oracle or the SQL statement SET MODE Oracle. -features_1509_li=For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. -features_1510_li=When using unique indexes, multiple rows with NULL in all columns are allowed, however it is not allowed to have multiple rows with the same values otherwise. -features_1511_li=Concatenating NULL with another value results in the other value. -features_1512_li=Empty strings are treated like NULL values. -features_1513_h3=PostgreSQL Compatibility Mode -features_1514_p=\ To use the PostgreSQL mode, use the database URL jdbc\:h2\:~/test;MODE\=PostgreSQL or the SQL statement SET MODE PostgreSQL. -features_1515_li=For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. -features_1516_li=When converting a floating point number to an integer, the fractional digits are not be truncated, but the value is rounded. -features_1517_li=The system columns CTID and OID are supported. -features_1518_li=LOG(x) is base 10 in this mode. -features_1519_h2=Auto-Reconnect -features_1520_p=\ The auto-reconnect feature causes the JDBC driver to reconnect to the database if the connection is lost. The automatic re-connect only occurs when auto-commit is enabled; if auto-commit is disabled, an exception is thrown. To enable this mode, append ;AUTO_RECONNECT\=TRUE to the database URL. -features_1521_p=\ Re-connecting will open a new session. After an automatic re-connect, variables and local temporary tables definitions (excluding data) are re-created. The contents of the system table INFORMATION_SCHEMA.SESSION_STATE contains all client side state that is re-created. -features_1522_p=\ If another connection uses the database in exclusive mode (enabled using SET EXCLUSIVE 1 or SET EXCLUSIVE 2), then this connection will try to re-connect until the exclusive mode ends. -features_1523_h2=Automatic Mixed Mode -features_1524_p=\ Multiple processes can access the same database without having to start the server manually. To do that, append ;AUTO_SERVER\=TRUE to the database URL. You can use the same database URL independent of whether the database is already open or not. This feature doesn't work with in-memory databases. Example database URL\: -features_1525_p=\ Use the same URL for all connections to this database. Internally, when using this mode, the first connection to the database is made in embedded mode, and additionally a server is started internally (as a daemon thread). If the database is already open in another process, the server mode is used automatically. The IP address and port of the server are stored in the file .lock.db, that's why in-memory databases can't be supported. -features_1526_p=\ The application that opens the first connection to the database uses the embedded mode, which is faster than the server mode. Therefore the main application should open the database first if possible. The first connection automatically starts a server on a random port. This server allows remote connections, however only to this database (to ensure that, the client reads .lock.db file and sends the the random key that is stored there to the server). When the first connection is closed, the server stops. If other (remote) connections are still open, one of them will then start a server (auto-reconnect is enabled automatically). -features_1527_p=\ All processes need to have access to the database files. If the first connection is closed (the connection that started the server), open transactions of other connections will be rolled back (this may not be a problem if you don't disable autocommit). Explicit client/server connections (using jdbc\:h2\:tcp\:// or ssl\://) are not supported. This mode is not supported for in-memory databases. -features_1528_p=\ Here is an example how to use this mode. Application 1 and 2 are not necessarily started on the same computer, but they need to have access to the database files. Application 1 and 2 are typically two different processes (however they could run within the same process). -features_1529_p=\ When using this feature, by default the server uses any free TCP port. The port can be set manually using AUTO_SERVER_PORT\=9090. -features_1530_h2=Page Size -features_1531_p=\ The page size for new databases is 2 KB (2048), unless the page size is set explicitly in the database URL using PAGE_SIZE\= when the database is created. The page size of existing databases can not be changed, so this property needs to be set when the database is created. -features_1532_h2=Using the Trace Options -features_1533_p=\ To find problems in an application, it is sometimes good to see what database operations where executed. This database offers the following trace features\: -features_1534_li=Trace to System.out and/or to a file -features_1535_li=Support for trace levels OFF, ERROR, INFO, DEBUG -features_1536_li=The maximum size of the trace file can be set -features_1537_li=It is possible to generate Java source code from the trace file -features_1538_li=Trace can be enabled at runtime by manually creating a file -features_1539_h3=Trace Options -features_1540_p=\ The simplest way to enable the trace option is setting it in the database URL. There are two settings, one for System.out (TRACE_LEVEL_SYSTEM_OUT) tracing, and one for file tracing (TRACE_LEVEL_FILE). The trace levels are 0 for OFF, 1 for ERROR (the default), 2 for INFO, and 3 for DEBUG. A database URL with both levels set to DEBUG is\: -features_1541_p=\ The trace level can be changed at runtime by executing the SQL command SET TRACE_LEVEL_SYSTEM_OUT level (for System.out tracing) or SET TRACE_LEVEL_FILE level (for file tracing). Example\: -features_1542_h3=Setting the Maximum Size of the Trace File -features_1543_p=\ When using a high trace level, the trace file can get very big quickly. The default size limit is 16 MB, if the trace file exceeds this limit, it is renamed to .old and a new file is created. If another such file exists, it is deleted. To limit the size to a certain number of megabytes, use SET TRACE_MAX_FILE_SIZE mb. Example\: -features_1544_h3=Java Code Generation -features_1545_p=\ When setting the trace level to INFO or DEBUG, Java source code is generated as well. This simplifies reproducing problems. The trace file looks like this\: -features_1546_p=\ To filter the Java source code, use the ConvertTraceFile tool as follows\: -features_1547_p=\ The generated file Test.java will contain the Java source code. The generated source code may be too large to compile (the size of a Java method is limited). If this is the case, the source code needs to be split in multiple methods. The password is not listed in the trace file and therefore not included in the source code. -features_1548_h2=Using Other Logging APIs -features_1549_p=\ By default, this database uses its own native 'trace' facility. This facility is called 'trace' and not 'log' within this database to avoid confusion with the transaction log. Trace messages can be written to both file and System.out. In most cases, this is sufficient, however sometimes it is better to use the same facility as the application, for example Log4j. To do that, this database support SLF4J. -features_1550_a=SLF4J -features_1551_p=\ is a simple facade for various logging APIs and allows to plug in the desired implementation at deployment time. SLF4J supports implementations such as Logback, Log4j, Jakarta Commons Logging (JCL), Java logging, x4juli, and Simple Log. -features_1552_p=\ To enable SLF4J, set the file trace level to 4 in the database URL\: -features_1553_p=\ Changing the log mechanism is not possible after the database is open, that means executing the SQL statement SET TRACE_LEVEL_FILE 4 when the database is already open will not have the desired effect. To use SLF4J, all required jar files need to be in the classpath. The logger name is h2database. If it does not work, check the file <database>.trace.db for error messages. -features_1554_h2=Read Only Databases -features_1555_p=\ If the database files are read-only, then the database is read-only as well. It is not possible to create new tables, add or modify data in this database. Only SELECT and CALL statements are allowed. To create a read-only database, close the database. Then, make the database file read-only. When you open the database now, it is read-only. There are two ways an application can find out whether database is read-only\: by calling Connection.isReadOnly() or by executing the SQL statement CALL READONLY(). -features_1556_p=\ Using the Custom Access Mode r the database can also be opened in read-only mode, even if the database file is not read only. -features_1557_h2=Read Only Databases in Zip or Jar File -features_1558_p=\ To create a read-only database in a zip file, first create a regular persistent database, and then create a backup. The database must not have pending changes, that means you need to close all connections to the database first. To speed up opening the read-only database and running queries, the database should be closed using SHUTDOWN DEFRAG. If you are using a database named test, an easy way to create a zip file is using the Backup tool. You can start the tool from the command line, or from within the H2 Console (Tools - Backup). Please note that the database must be closed when the backup is created. Therefore, the SQL statement BACKUP TO can not be used. -features_1559_p=\ When the zip file is created, you can open the database in the zip file using the following database URL\: -features_1560_p=\ Databases in zip files are read-only. The performance for some queries will be slower than when using a regular database, because random access in zip files is not supported (only streaming). How much this affects the performance depends on the queries and the data. The database is not read in memory; therefore large databases are supported as well. The same indexes are used as when using a regular database. -features_1561_p=\ If the database is larger than a few megabytes, performance is much better if the database file is split into multiple smaller files, because random access in compressed files is not possible. See also the sample application ReadOnlyDatabaseInZip. -features_1562_h3=Opening a Corrupted Database -features_1563_p=\ If a database cannot be opened because the boot info (the SQL script that is run at startup) is corrupted, then the database can be opened by specifying a database event listener. The exceptions are logged, but opening the database will continue. -features_1564_h2=Computed Columns / Function Based Index -features_1565_p=\ A computed column is a column whose value is calculated before storing. The formula is evaluated when the row is inserted, and re-evaluated every time the row is updated. One use case is to automatically update the last-modification time\: -features_1566_p=\ Function indexes are not directly supported by this database, but they can be emulated by using computed columns. For example, if an index on the upper-case version of a column is required, create a computed column with the upper-case version of the original column, and create an index for this column\: -features_1567_p=\ When inserting data, it is not required (and not allowed) to specify a value for the upper-case version of the column, because the value is generated. But you can use the column when querying the table\: -features_1568_h2=Multi-Dimensional Indexes -features_1569_p=\ A tool is provided to execute efficient multi-dimension (spatial) range queries. This database does not support a specialized spatial index (R-Tree or similar). Instead, the B-Tree index is used. For each record, the multi-dimensional key is converted (mapped) to a single dimensional (scalar) value. This value specifies the location on a space-filling curve. -features_1570_p=\ Currently, Z-order (also called N-order or Morton-order) is used; Hilbert curve could also be used, but the implementation is more complex. The algorithm to convert the multi-dimensional value is called bit-interleaving. The scalar value is indexed using a B-Tree index (usually using a computed column). -features_1571_p=\ The method can result in a drastic performance improvement over just using an index on the first column. Depending on the data and number of dimensions, the improvement is usually higher than factor 5. The tool generates a SQL query from a specified multi-dimensional range. The method used is not database dependent, and the tool can easily be ported to other databases. For an example how to use the tool, please have a look at the sample code provided in TestMultiDimension.java. -features_1572_h2=User-Defined Functions and Stored Procedures -features_1573_p=\ In addition to the built-in functions, this database supports user-defined Java functions. In this database, Java functions can be used as stored procedures as well. A function must be declared (registered) before it can be used. A function can be defined using source code, or as a reference to a compiled class that is available in the classpath. By default, the function aliases are stored in the current schema. -features_1574_h3=Referencing a Compiled Method -features_1575_p=\ When referencing a method, the class must already be compiled and included in the classpath where the database is running. Only static Java methods are supported; both the class and the method must be public. Example Java class\: -features_1576_p=\ The Java function must be registered in the database by calling CREATE ALIAS ... FOR\: -features_1577_p=\ For a complete sample application, see src/test/org/h2/samples/Function.java. -features_1578_h3=Declaring Functions as Source Code -features_1579_p=\ When defining a function alias with source code, the database tries to compile the source code using the Sun Java compiler (the class com.sun.tools.javac.Main) if the tools.jar is in the classpath. If not, javac is run as a separate process. Only the source code is stored in the database; the class is compiled each time the database is re-opened. Source code is usually passed as dollar quoted text to avoid escaping problems, however single quotes can be used as well. Example\: -features_1580_p=\ By default, the three packages java.util, java.math, java.sql are imported. The method name (nextPrime in the example above) is ignored. Method overloading is not supported when declaring functions as source code, that means only one method may be declared for an alias. If different import statements are required, they must be declared at the beginning and separated with the tag @CODE\: -features_1581_p=\ The following template is used to create a complete Java class\: -features_1582_h3=Method Overloading -features_1583_p=\ Multiple methods may be bound to a SQL function if the class is already compiled and included in the classpath. Each Java method must have a different number of arguments. Method overloading is not supported when declaring functions as source code. -features_1584_h3=Function Data Type Mapping -features_1585_p=\ Functions that accept non-nullable parameters such as int will not be called if one of those parameters is NULL. Instead, the result of the function is NULL. If the function should be called if a parameter is NULL, you need to use java.lang.Integer instead. -features_1586_p=\ SQL types are mapped to Java classes and vice-versa as in the JDBC API. For details, see Data Types. There are a few special cases\: java.lang.Object is mapped to OTHER (a serialized object). Therefore, java.lang.Object can not be used to match all SQL types (matching all SQL types is not supported). The second special case is Object[]\: arrays of any class are mapped to ARRAY. Objects of type org.h2.value.Value (the internal value class) are passed through without conversion. -features_1587_h3=Functions That Require a Connection -features_1588_p=\ If the first parameter of a Java function is a java.sql.Connection, then the connection to database is provided. This connection does not need to be closed before returning. When calling the method from within the SQL statement, this connection parameter does not need to be (can not be) specified. -features_1589_h3=Functions Throwing an Exception -features_1590_p=\ If a function throws an exception, then the current statement is rolled back and the exception is thrown to the application. SQLException are directly re-thrown to the calling application; all other exceptions are first converted to a SQLException. -features_1591_h3=Functions Returning a Result Set -features_1592_p=\ Functions may returns a result set. Such a function can be called with the CALL statement\: -features_1593_h3=Using SimpleResultSet -features_1594_p=\ A function can create a result set using the SimpleResultSet tool\: -features_1595_h3=Using a Function as a Table -features_1596_p=\ A function that returns a result set can be used like a table. However, in this case the function is called at least twice\: first while parsing the statement to collect the column names (with parameters set to null where not known at compile time). And then, while executing the statement to get the data (maybe multiple times if this is a join). If the function is called just to get the column list, the URL of the connection passed to the function is jdbc\:columnlist\:connection. Otherwise, the URL of the connection is jdbc\:default\:connection. -features_1597_h2=Pluggable or User-Defined Tables -features_1598_p=\ For situations where you need to expose other data-sources to the SQL engine as a table, there are "pluggable tables". For some examples, have a look at the code in org.h2.test.db.TestTableEngines. -features_1599_p=\ In order to create your own TableEngine, you need to implement the org.h2.api.TableEngine interface e.g. something like this\: -features_1600_p=\ and then create the table from SQL like this\: -features_1601_p=\ It is also possible to pass in parameters to the table engine, like so\: -features_1602_p=\ In which case the parameters are passed down in the tableEngineParams field of the CreateTableData object. -features_1603_p=\ It is also possible to specify default table engine params on schema creation\: -features_1604_p=\ Params from the schema are used when CREATE TABLE issued on this schema does not have its own engine params specified. -features_1605_h2=Triggers -features_1606_p=\ This database supports Java triggers that are called before or after a row is updated, inserted or deleted. Triggers can be used for complex consistency checks, or to update related data in the database. It is also possible to use triggers to simulate materialized views. For a complete sample application, see src/test/org/h2/samples/TriggerSample.java. A Java trigger must implement the interface org.h2.api.Trigger. The trigger class must be available in the classpath of the database engine (when using the server mode, it must be in the classpath of the server). -features_1607_p=\ The connection can be used to query or update data in other tables. The trigger then needs to be defined in the database\: -features_1608_p=\ The trigger can be used to veto a change by throwing a SQLException. -features_1609_p=\ As an alternative to implementing the Trigger interface, an application can extend the abstract class org.h2.tools.TriggerAdapter. This will allows to use the ResultSet interface within trigger implementations. In this case, only the fire method needs to be implemented\: -features_1610_h2=Compacting a Database -features_1611_p=\ Empty space in the database file re-used automatically. When closing the database, the database is automatically compacted for up to 200 milliseconds by default. To compact more, use the SQL statement SHUTDOWN COMPACT. However re-creating the database may further reduce the database size because this will re-build the indexes. Here is a sample function to do this\: -features_1612_p=\ See also the sample application org.h2.samples.Compact. The commands SCRIPT / RUNSCRIPT can be used as well to create a backup of a database and re-build the database from the script. -features_1613_h2=Cache Settings -features_1614_p=\ The database keeps most frequently used data in the main memory. The amount of memory used for caching can be changed using the setting CACHE_SIZE. This setting can be set in the database connection URL (jdbc\:h2\:~/test;CACHE_SIZE\=131072), or it can be changed at runtime using SET CACHE_SIZE size. The size of the cache, as represented by CACHE_SIZE is measured in KB, with each KB being 1024 bytes. This setting has no effect for in-memory databases. For persistent databases, the setting is stored in the database and re-used when the database is opened the next time. However, when opening an existing database, the cache size is set to at most half the amount of memory available for the virtual machine (Runtime.getRuntime().maxMemory()), even if the cache size setting stored in the database is larger; however the setting stored in the database is kept. Setting the cache size in the database URL or explicitly using SET CACHE_SIZE overrides this value (even if larger than the physical memory). To get the current used maximum cache size, use the query SELECT * FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME \= 'info.CACHE_MAX_SIZE' -features_1615_p=\ An experimental scan-resistant cache algorithm "Two Queue" (2Q) is available. To enable it, append ;CACHE_TYPE\=TQ to the database URL. The cache might not actually improve performance. If you plan to use it, please run your own test cases first. -features_1616_p=\ Also included is an experimental second level soft reference cache. Rows in this cache are only garbage collected on low memory. By default the second level cache is disabled. To enable it, use the prefix SOFT_. Example\: jdbc\:h2\:~/test;CACHE_TYPE\=SOFT_LRU. The cache might not actually improve performance. If you plan to use it, please run your own test cases first. -features_1617_p=\ To get information about page reads and writes, and the current caching algorithm in use, call SELECT * FROM INFORMATION_SCHEMA.SETTINGS. The number of pages read / written is listed. -fragments_1000_div=\   &\#x25b2; -fragments_1001_label=Search\: -fragments_1002_label=Highlight keyword(s) -fragments_1003_a=Home -fragments_1004_a=Download -fragments_1005_a=Cheat Sheet -fragments_1006_b=Documentation -fragments_1007_a=Quickstart -fragments_1008_a=Installation -fragments_1009_a=Tutorial -fragments_1010_a=Features -fragments_1011_a=Performance -fragments_1012_a=Advanced -fragments_1013_b=Reference -fragments_1014_a=SQL Grammar -fragments_1015_a=Functions -fragments_1016_a=Data Types -fragments_1017_a=Javadoc -fragments_1018_a=PDF (1 MB) -fragments_1019_b=Support -fragments_1020_a=FAQ -fragments_1021_a=Error Analyzer -fragments_1022_a=Google Group (English) -fragments_1023_a=Google Group (Japanese) -fragments_1024_a=Google Group (Chinese) -fragments_1025_b=Appendix -fragments_1026_a=History & Roadmap -fragments_1027_a=License -fragments_1028_a=Build -fragments_1029_a=Links -fragments_1030_a=JaQu -fragments_1031_a=MVStore -fragments_1032_a=Architecture -fragments_1033_td=  -frame_1000_h1=H2 Database Engine -frame_1001_p=\ Welcome to H2, the free SQL database. The main feature of H2 are\: -frame_1002_li=It is free to use for everybody, source code is included -frame_1003_li=Written in Java, but also available as native executable -frame_1004_li=JDBC and (partial) ODBC API -frame_1005_li=Embedded and client/server modes -frame_1006_li=Clustering is supported -frame_1007_li=A web client is included -frame_1008_h2=No Javascript -frame_1009_p=\ If you are not automatically redirected to the main page, then Javascript is currently disabled or your browser does not support Javascript. Some features (for example the integrated search) require Javascript. -frame_1010_p=\ Please enable Javascript, or go ahead without it\: H2 Database Engine -history_1000_h1=History and Roadmap -history_1001_a=\ Change Log -history_1002_a=\ Roadmap -history_1003_a=\ History of this Database Engine -history_1004_a=\ Why Java -history_1005_a=\ Supporters -history_1006_h2=Change Log -history_1007_p=\ The up-to-date change log is available at http\://www.h2database.com/html/changelog.html -history_1008_h2=Roadmap -history_1009_p=\ The current roadmap is available at http\://www.h2database.com/html/roadmap.html -history_1010_h2=History of this Database Engine -history_1011_p=\ The development of H2 was started in May 2004, but it was first published on December 14th 2005. The original author of H2, Thomas Mueller, is also the original developer of Hypersonic SQL. In 2001, he joined PointBase Inc. where he wrote PointBase Micro, a commercial Java SQL database. At that point, he had to discontinue Hypersonic SQL. The HSQLDB Group was formed to continued to work on the Hypersonic SQL codebase. The name H2 stands for Hypersonic 2, however H2 does not share code with Hypersonic SQL or HSQLDB. H2 is built from scratch. -history_1012_h2=Why Java -history_1013_p=\ The main reasons to use a Java database are\: -history_1014_li=Very simple to integrate in Java applications -history_1015_li=Support for many different platforms -history_1016_li=More secure than native applications (no buffer overflows) -history_1017_li=User defined functions (or triggers) run very fast -history_1018_li=Unicode support -history_1019_p=\ Some think Java is too slow for low level operations, but this is no longer true. Garbage collection for example is now faster than manual memory management. -history_1020_p=\ Developing Java code is faster than developing C or C++ code. When using Java, most time can be spent on improving the algorithms instead of porting the code to different platforms or doing memory management. Features such as Unicode and network libraries are already built-in. In Java, writing secure code is easier because buffer overflows can not occur. Features such as reflection can be used for randomized testing. -history_1021_p=\ Java is future proof\: a lot of companies support Java. Java is now open source. -history_1022_p=\ To increase the portability and ease of use, this software depends on very few libraries. Features that are not available in open source Java implementations (such as Swing) are not used, or only used for optional features. -history_1023_h2=Supporters -history_1024_p=\ Many thanks for those who reported bugs, gave valuable feedback, spread the word, and translated this project. -history_1025_p=\ Also many thanks to the donors. To become a donor, use PayPal (at the very bottom of the main web page). Donators are\: -history_1026_li=Martin Wildam, Austria -history_1027_a=tagtraum industries incorporated, USA -history_1028_a=TimeWriter, Netherlands -history_1029_a=Cognitect, USA -history_1030_a=Code 42 Software, Inc., Minneapolis -history_1031_a=Code Lutin, France -history_1032_a=NetSuxxess GmbH, Germany -history_1033_a=Poker Copilot, Steve McLeod, Germany -history_1034_a=SkyCash, Poland -history_1035_a=Lumber-mill, Inc., Japan -history_1036_a=StockMarketEye, USA -history_1037_a=Eckenfelder GmbH & Co.KG, Germany -history_1038_li=Jun Iyama, Japan -history_1039_li=Steven Branda, USA -history_1040_li=Anthony Goubard, Netherlands -history_1041_li=Richard Hickey, USA -history_1042_li=Alessio Jacopo D'Adamo, Italy -history_1043_li=Ashwin Jayaprakash, USA -history_1044_li=Donald Bleyl, USA -history_1045_li=Frank Berger, Germany -history_1046_li=Florent Ramiere, France -history_1047_li=Antonio Casqueiro, Portugal -history_1048_li=Oliver Computing LLC, USA -history_1049_li=Harpal Grover Consulting Inc., USA -history_1050_li=Elisabetta Berlini, Italy -history_1051_li=William Gilbert, USA -history_1052_li=Antonio Dieguez Rojas, Chile -history_1053_a=Ontology Works, USA -history_1054_li=Pete Haidinyak, USA -history_1055_li=William Osmond, USA -history_1056_li=Joachim Ansorg, Germany -history_1057_li=Oliver Soerensen, Germany -history_1058_li=Christos Vasilakis, Greece -history_1059_li=Fyodor Kupolov, Denmark -history_1060_li=Jakob Jenkov, Denmark -history_1061_li=Stéphane Chartrand, Switzerland -history_1062_li=Glenn Kidd, USA -history_1063_li=Gustav Trede, Sweden -history_1064_li=Joonas Pulakka, Finland -history_1065_li=Bjorn Darri Sigurdsson, Iceland -history_1066_li=Gray Watson, USA -history_1067_li=Erik Dick, Germany -history_1068_li=Pengxiang Shao, China -history_1069_li=Bilingual Marketing Group, USA -history_1070_li=Philippe Marschall, Switzerland -history_1071_li=Knut Staring, Norway -history_1072_li=Theis Borg, Denmark -history_1073_li=Mark De Mendonca Duske, USA -history_1074_li=Joel A. Garringer, USA -history_1075_li=Olivier Chafik, France -history_1076_li=Rene Schwietzke, Germany -history_1077_li=Jalpesh Patadia, USA -history_1078_li=Takanori Kawashima, Japan -history_1079_li=Terrence JC Huang, China -history_1080_a=JiaDong Huang, Australia -history_1081_li=Laurent van Roy, Belgium -history_1082_li=Qian Chen, China -history_1083_li=Clinton Hyde, USA -history_1084_li=Kritchai Phromros, Thailand -history_1085_li=Alan Thompson, USA -history_1086_li=Ladislav Jech, Czech Republic -history_1087_li=Dimitrijs Fedotovs, Latvia -history_1088_li=Richard Manley-Reeve, United Kingdom -history_1089_li=Daniel Cyr, ThirdHalf.com, LLC, USA -history_1090_li=Peter Jünger, Germany -history_1091_li=Dan Keegan, USA -history_1092_li=Rafel Israels, Germany -history_1093_li=Fabien Todescato, France -history_1094_li=Cristan Meijer, Netherlands -history_1095_li=Adam McMahon, USA -history_1096_li=Fábio Gomes Lisboa Gomes, Brasil -history_1097_li=Lyderic Landry, England -history_1098_li=Mederp, Morocco -history_1099_li=Joaquim Golay, Switzerland -history_1100_li=Clemens Quoss, Germany -history_1101_li=Kervin Pierre, USA -history_1102_li=Jake Bellotti, Australia -history_1103_li=Arun Chittanoor, USA -installation_1000_h1=Installation -installation_1001_a=\ Requirements -installation_1002_a=\ Supported Platforms -installation_1003_a=\ Installing the Software -installation_1004_a=\ Directory Structure -installation_1005_h2=Requirements -installation_1006_p=\ To run this database, the following software stack is known to work. Other software most likely also works, but is not tested as much. -installation_1007_h3=Database Engine -installation_1008_li=Windows XP or Vista, Mac OS X, or Linux -installation_1009_li=Oracle Java 7 or newer -installation_1010_li=Recommended Windows file system\: NTFS (FAT32 only supports files up to 4 GB) -installation_1011_h3=H2 Console -installation_1012_li=Mozilla Firefox -installation_1013_h2=Supported Platforms -installation_1014_p=\ As this database is written in Java, it can run on many different platforms. It is tested with Java 7. Currently, the database is developed and tested on Windows 8 and Mac OS X using Java 7, but it also works in many other operating systems and using other Java runtime environments. All major operating systems (Windows XP, Windows Vista, Windows 7, Mac OS, Ubuntu,...) are supported. -installation_1015_h2=Installing the Software -installation_1016_p=\ To install the software, run the installer or unzip it to a directory of your choice. -installation_1017_h2=Directory Structure -installation_1018_p=\ After installing, you should get the following directory structure\: -installation_1019_th=Directory -installation_1020_th=Contents -installation_1021_td=bin -installation_1022_td=JAR and batch files -installation_1023_td=docs -installation_1024_td=Documentation -installation_1025_td=docs/html -installation_1026_td=HTML pages -installation_1027_td=docs/javadoc -installation_1028_td=Javadoc files -installation_1029_td=ext -installation_1030_td=External dependencies (downloaded when building) -installation_1031_td=service -installation_1032_td=Tools to run the database as a Windows Service -installation_1033_td=src -installation_1034_td=Source files -installation_1035_td=src/docsrc -installation_1036_td=Documentation sources -installation_1037_td=src/installer -installation_1038_td=Installer, shell, and release build script -installation_1039_td=src/main -installation_1040_td=Database engine source code -installation_1041_td=src/test -installation_1042_td=Test source code -installation_1043_td=src/tools -installation_1044_td=Tools and database adapters source code -jaqu_1000_h1=JaQu -jaqu_1001_a=\ What is JaQu -jaqu_1002_a=\ Differences to Other Data Access Tools -jaqu_1003_a=\ Current State -jaqu_1004_a=\ Building the JaQu Library -jaqu_1005_a=\ Requirements -jaqu_1006_a=\ Example Code -jaqu_1007_a=\ Configuration -jaqu_1008_a=\ Natural Syntax -jaqu_1009_a=\ Other Ideas -jaqu_1010_a=\ Similar Projects -jaqu_1011_h2=What is JaQu -jaqu_1012_p=\ Note\: This project is currently in maintenance mode. A friendly fork of JaQu is available under the name iciql. -jaqu_1013_p=\ JaQu stands for Java Query and allows to access databases using pure Java. JaQu provides a fluent interface (or internal DSL). JaQu is something like LINQ for Java (LINQ stands for "language integrated query" and is a Microsoft .NET technology). The following JaQu code\: -jaqu_1014_p=\ stands for the SQL statement\: -jaqu_1015_h2=Differences to Other Data Access Tools -jaqu_1016_p=\ Unlike SQL, JaQu can be easily integrated in Java applications. Because JaQu is pure Java, auto-complete in the IDE is supported. Type checking is performed by the compiler. JaQu fully protects against SQL injection. -jaqu_1017_p=\ JaQu is meant as replacement for JDBC and SQL and not as much as a replacement for tools like Hibernate. With JaQu, you don't write SQL statements as strings. JaQu is much smaller and simpler than other persistence frameworks such as Hibernate, but it also does not provide all the features of those. Unlike iBatis and Hibernate, no XML or annotation based configuration is required; instead the configuration (if required at all) is done in pure Java, within the application. -jaqu_1018_p=\ JaQu does not require or contain any data caching mechanism. Like JDBC and iBatis, JaQu provides full control over when and what SQL statements are executed (but without having to write SQL statements as strings). -jaqu_1019_h3=Restrictions -jaqu_1020_p=\ Primitive types (eg. boolean, int, long, double) are not supported. Use java.lang.Boolean, Integer, Long, Double instead. -jaqu_1021_h3=Why in Java? -jaqu_1022_p=\ Most applications are written in Java. Mixing Java and another language (for example Scala or Groovy) in the same application is complicated\: you would need to split the application and database code, and write adapter / wrapper code. -jaqu_1023_h2=Current State -jaqu_1024_p=\ Currently, JaQu is only tested with the H2 database. The API may change in future versions. JaQu is not part of the h2 jar file, however the source code is included in H2, under\: -jaqu_1025_code=src/test/org/h2/test/jaqu/* -jaqu_1026_li=\ (samples and tests) -jaqu_1027_code=src/tools/org/h2/jaqu/* -jaqu_1028_li=\ (framework) -jaqu_1029_h2=Building the JaQu Library -jaqu_1030_p=\ To create the JaQu jar file, run\: build jarJaqu. This will create the file bin/h2jaqu.jar. -jaqu_1031_h2=Requirements -jaqu_1032_p=\ JaQu requires Java 6. Annotations are not need. Currently, JaQu is only tested with the H2 database engine, however in theory it should work with any database that supports the JDBC API. -jaqu_1033_h2=Example Code -jaqu_1034_h2=Configuration -jaqu_1035_p=\ JaQu does not require any configuration when using the default field to column mapping. To define table indices, or if you want to map a class to a table with a different name, or a field to a column with another name, create a function called define in the data class. Example\: -jaqu_1036_p=\ The method define() contains the mapping definition. It is called once when the class is used for the first time. Like annotations, the mapping is defined in the class itself. Unlike when using annotations, the compiler can check the syntax even for multi-column objects (multi-column indexes, multi-column primary keys and so on). Because the definition is written in Java, the configuration can be set at runtime, which is not possible using annotations. Unlike XML mapping configuration, the configuration is integrated in the class itself. -jaqu_1037_h2=Natural Syntax -jaqu_1038_p=The plan is to support more natural (pure Java) syntax in conditions. To do that, the condition class is de-compiled to a SQL condition. A proof of concept decompiler is included (but it doesn't fully work yet; patches are welcome). The planned syntax is\: -jaqu_1039_h2=Other Ideas -jaqu_1040_p=\ This project has just been started, and nothing is fixed yet. Some ideas are\: -jaqu_1041_li=Support queries on collections (instead of using a database). -jaqu_1042_li=Provide API level compatibility with JPA (so that JaQu can be used as an extension of JPA). -jaqu_1043_li=Internally use a JPA implementation (for example Hibernate) instead of SQL directly. -jaqu_1044_li=Use PreparedStatements and cache them. -jaqu_1045_h2=Similar Projects -jaqu_1046_a=iciql (a friendly fork of JaQu) -jaqu_1047_a=Cement Framework -jaqu_1048_a=Dreamsource ORM -jaqu_1049_a=Empire-db -jaqu_1050_a=JEQUEL\: Java Embedded QUEry Language -jaqu_1051_a=Joist -jaqu_1052_a=jOOQ -jaqu_1053_a=JoSQL -jaqu_1054_a=LIQUidFORM -jaqu_1055_a=Quaere (Alias implementation) -jaqu_1056_a=Quaere -jaqu_1057_a=Querydsl -jaqu_1058_a=Squill -license_1000_h1=License -license_1001_a=\ Summary and License FAQ -license_1002_a=\ Mozilla Public License Version 2.0 -license_1003_a=\ Eclipse Public License - Version 1.0 -license_1004_a=\ Export Control Classification Number (ECCN) -license_1005_h2=Summary and License FAQ -license_1006_p=\ H2 is dual licensed and available under the MPL 2.0 (Mozilla Public License Version 2.0) or under the EPL 1.0 (Eclipse Public License). There is a license FAQ for both the MPL and the EPL. -license_1007_li=You can use H2 for free. -license_1008_li=You can integrate it into your applications (including in commercial applications) and distribute it. -license_1009_li=Files containing only your code are not covered by this license (it is 'commercial friendly'). -license_1010_li=Modifications to the H2 source code must be published. -license_1011_li=You don't need to provide the source code of H2 if you did not modify anything. -license_1012_li=If you distribute a binary that includes H2, you need to add a disclaimer of liability - see the example below. -license_1013_p=\ However, nobody is allowed to rename H2, modify it a little, and sell it as a database engine without telling the customers it is in fact H2. This happened to HSQLDB\: a company called 'bungisoft' copied HSQLDB, renamed it to 'RedBase', and tried to sell it, hiding the fact that it was in fact just HSQLDB. It seems 'bungisoft' does not exist any more, but you can use the Wayback Machine and visit old web pages of http\://www.bungisoft.com. -license_1014_p=\ About porting the source code to another language (for example C\# or C++)\: converted source code (even if done manually) stays under the same copyright and license as the original code. The copyright of the ported source code does not (automatically) go to the person who ported the code. -license_1015_p=\ If you distribute a binary that includes H2, you need to add the license and a disclaimer of liability (as you should do for your own code). You should add a disclaimer for each open source library you use. For example, add a file 3rdparty_license.txt in the directory where the jar files are, and list all open source libraries, each one with its license and disclaimer. For H2, a simple solution is to copy the following text below. You may also include a copy of the complete license. -license_1016_h2=Mozilla Public License Version 2.0 -license_1017_h3=1. Definitions -license_1018_p=1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. -license_1019_p=1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. -license_1020_p=1.3. "Contribution" means Covered Software of a particular Contributor. -license_1021_p=1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. -license_1022_p=1.5. "Incompatible With Secondary Licenses" means -license_1023_p=a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or -license_1024_p=b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. -license_1025_p=1.6. "Executable Form" means any form of the work other than Source Code Form. -license_1026_p=1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. -license_1027_p=1.8. "License" means this document. -license_1028_p=1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. -license_1029_p=1.10. "Modifications" means any of the following\: -license_1030_p=a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or -license_1031_p=b. any new file in Source Code Form that contains any Covered Software. -license_1032_p=1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. -license_1033_p=1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. -license_1034_p=1.13. "Source Code Form" means the form of the work preferred for making modifications. -license_1035_p=1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. -license_1036_h3=2. License Grants and Conditions -license_1037_h4=2.1. Grants -license_1038_p=Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license\: -license_1039_p=under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and -license_1040_p=under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. -license_1041_h4=2.2. Effective Date -license_1042_p=The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. -license_1043_h4=2.3. Limitations on Grant Scope -license_1044_p=The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor\: -license_1045_p=for any code that a Contributor has removed from Covered Software; or -license_1046_p=for infringements caused by\: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or -license_1047_p=under Patent Claims infringed by Covered Software in the absence of its Contributions. -license_1048_p=This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). -license_1049_h4=2.4. Subsequent Licenses -license_1050_p=No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). -license_1051_h4=2.5. Representation -license_1052_p=Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. -license_1053_h4=2.6. Fair Use -license_1054_p=This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. -license_1055_h4=2.7. Conditions -license_1056_p=Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. -license_1057_h3=3. Responsibilities -license_1058_h4=3.1. Distribution of Source Form -license_1059_p=All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. -license_1060_h4=3.2. Distribution of Executable Form -license_1061_p=If You distribute Covered Software in Executable Form then\: -license_1062_p=such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and -license_1063_p=You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. -license_1064_h4=3.3. Distribution of a Larger Work -license_1065_p=You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). -license_1066_h4=3.4. Notices -license_1067_p=You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. -license_1068_h4=3.5. Application of Additional Terms -license_1069_p=You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. -license_1070_h3=4. Inability to Comply Due to Statute or Regulation -license_1071_p=If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must\: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. -license_1072_h3=5. Termination -license_1073_p=5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. -license_1074_p=5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. -license_1075_p=5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. -license_1076_h3=6. Disclaimer of Warranty -license_1077_p=Covered Software is provided under this License on an "as is" basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer. -license_1078_h3=7. Limitation of Liability -license_1079_p=Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You. -license_1080_h3=8. Litigation -license_1081_p=Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. -license_1082_h3=9. Miscellaneous -license_1083_p=This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. -license_1084_h3=10. Versions of the License -license_1085_h4=10.1. New Versions -license_1086_p=Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. -license_1087_h4=10.2. Effect of New Versions -license_1088_p=You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. -license_1089_h4=10.3. Modified Versions -license_1090_p=If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). -license_1091_h4=10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses -license_1092_p=If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. -license_1093_h3=Exhibit A - Source Code Form License Notice -license_1094_p=If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. -license_1095_p=You may add additional accurate notices of copyright ownership. -license_1096_h3=Exhibit B - "Incompatible With Secondary Licenses" Notice -license_1097_h2=Eclipse Public License - Version 1.0 -license_1098_p=\ THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. -license_1099_h3=1. DEFINITIONS -license_1100_p=\ "Contribution" means\: -license_1101_p=\ a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and -license_1102_p=\ b) in the case of each subsequent Contributor\: -license_1103_p=\ i) changes to the Program, and -license_1104_p=\ ii) additions to the Program; -license_1105_p=\ where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which\: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. -license_1106_p=\ "Contributor" means any person or entity that distributes the Program. -license_1107_p=\ "Licensed Patents " mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. -license_1108_p=\ "Program" means the Contributions distributed in accordance with this Agreement. -license_1109_p=\ "Recipient" means anyone who receives the Program under this Agreement, including all Contributors. -license_1110_h3=2. GRANT OF RIGHTS -license_1111_p=\ a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. -license_1112_p=\ b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. -license_1113_p=\ c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. -license_1114_p=\ d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. -license_1115_h3=3. REQUIREMENTS -license_1116_p=\ A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that\: -license_1117_p=\ a) it complies with the terms and conditions of this Agreement; and -license_1118_p=\ b) its license agreement\: -license_1119_p=\ i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; -license_1120_p=\ ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; -license_1121_p=\ iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and -license_1122_p=\ iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. -license_1123_p=\ When the Program is made available in source code form\: -license_1124_p=\ a) it must be made available under this Agreement; and -license_1125_p=\ b) a copy of this Agreement must be included with each copy of the Program. -license_1126_p=\ Contributors may not remove or alter any copyright notices contained within the Program. -license_1127_p=\ Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. -license_1128_h3=4. COMMERCIAL DISTRIBUTION -license_1129_p=\ Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must\: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. -license_1130_p=\ For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. -license_1131_h3=5. NO WARRANTY -license_1132_p=\ EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. -license_1133_h3=6. DISCLAIMER OF LIABILITY -license_1134_p=\ EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. -license_1135_h3=7. GENERAL -license_1136_p=\ If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. -license_1137_p=\ If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. -license_1138_p=\ All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. -license_1139_p=\ Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. -license_1140_p=\ This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. -license_1141_h2=Export Control Classification Number (ECCN) -license_1142_p=\ As far as we know, the U.S. Export Control Classification Number (ECCN) for this software is 5D002. However, for legal reasons, we can make no warranty that this information is correct. For details, see also the Apache Software Foundation Export Classifications page. -links_1000_h1=Links -links_1001_p=\ If you want to add a link, please send it to the support email address or post it to the group. -links_1002_a=\ Quotes -links_1003_a=\ Books -links_1004_a=\ Extensions -links_1005_a=\ Blog Articles, Videos -links_1006_a=\ Database Frontends / Tools -links_1007_a=\ Products and Projects -links_1008_h2=Quotes -links_1009_a=\ Quote -links_1010_p=\: "This is by far the easiest and fastest database that I have ever used. Originally the web application that I am working on is using SQL server. But, in less than 15 minutes I had H2 up and working with little recoding of the SQL. Thanks..... " -links_1011_h2=Books -links_1012_a=\ Seam In Action -links_1013_h2=Extensions -links_1014_a=\ Grails H2 Database Plugin -links_1015_a=\ h2osgi\: OSGi for the H2 Database -links_1016_a=\ H2Sharp\: ADO.NET interface for the H2 database engine -links_1017_a=\ A spatial extension of the H2 database. -links_1018_h2=Blog Articles, Videos -links_1019_a=\ Youtube\: Minecraft 1.7.3 / How to install Bukkit Server with xAuth and H2 -links_1020_a=\ Analyzing CSVs with H2 in under 10 minutes (2009-12-07) -links_1021_a=\ Efficient sorting and iteration on large databases (2009-06-15) -links_1022_a=\ Porting Flexive to the H2 Database (2008-12-05) -links_1023_a=\ H2 Database with GlassFish (2008-11-24) -links_1024_a=\ H2 Database - Performance Tracing (2008-04-30) -links_1025_a=\ Open Source Databases Comparison (2007-09-11) -links_1026_a=\ The Codist\: The Open Source Frameworks I Use (2007-07-23) -links_1027_a=\ The Codist\: SQL Injections\: How Not To Get Stuck (2007-05-08) -links_1028_a=\ David Coldrick's Weblog\: New Version of H2 Database Released (2007-01-06) -links_1029_a=\ The Codist\: Write Your Own Database, Again (2006-11-13) -links_1030_h2=Project Pages -links_1031_a=\ Ohloh -links_1032_a=\ Freshmeat Project Page -links_1033_a=\ Wikipedia -links_1034_a=\ Java Source Net -links_1035_a=\ Linux Package Manager -links_1036_h2=Database Frontends / Tools -links_1037_a=\ Dataflyer -links_1038_p=\ A tool to browse databases and export data. -links_1039_a=\ DB Solo -links_1040_p=\ SQL query tool. -links_1041_a=\ DbVisualizer -links_1042_p=\ Database tool. -links_1043_a=\ Execute Query -links_1044_p=\ Database utility written in Java. -links_1045_a=\ Flyway -links_1046_p=\ The agile database migration framework for Java. -links_1047_a=\ [fleXive] -links_1048_p=\ JavaEE 5 open source framework for the development of complex and evolving (web-)applications. -links_1049_a=\ JDBC Console -links_1050_p=\ This small webapp gives an ability to execute SQL against datasources bound in container's JNDI. Based on H2 Console. -links_1051_a=\ HenPlus -links_1052_p=\ HenPlus is a SQL shell written in Java. -links_1053_a=\ JDBC lint -links_1054_p=\ Helps write correct and efficient code when using the JDBC API. -links_1055_a=\ OpenOffice -links_1056_p=\ Base is OpenOffice.org's database application. It provides access to relational data sources. -links_1057_a=\ RazorSQL -links_1058_p=\ An SQL query tool, database browser, SQL editor, and database administration tool. -links_1059_a=\ SQL Developer -links_1060_p=\ Universal Database Frontend. -links_1061_a=\ SQL Workbench/J -links_1062_p=\ Free DBMS-independent SQL tool. -links_1063_a=\ SQuirreL SQL Client -links_1064_p=\ Graphical tool to view the structure of a database, browse the data, issue SQL commands etc. -links_1065_a=\ SQuirreL DB Copy Plugin -links_1066_p=\ Tool to copy data from one database to another. -links_1067_h2=Products and Projects -links_1068_a=\ AccuProcess -links_1069_p=\ Visual business process modeling and simulation software for business users. -links_1070_a=\ Adeptia BPM -links_1071_p=\ A Business Process Management (BPM) suite to quickly and easily automate business processes and workflows. -links_1072_a=\ Adeptia Integration -links_1073_p=\ Process-centric, services-based application integration suite. -links_1074_a=\ Aejaks -links_1075_p=\ A server-side scripting environment to build AJAX enabled web applications. -links_1076_a=\ Axiom Stack -links_1077_p=\ A web framework that let's you write dynamic web applications with Zen-like simplicity. -links_1078_a=\ Apache Cayenne -links_1079_p=\ Open source persistence framework providing object-relational mapping (ORM) and remoting services. -links_1080_a=\ Apache Jackrabbit -links_1081_p=\ Open source implementation of the Java Content Repository API (JCR). -links_1082_a=\ Apache OpenJPA -links_1083_p=\ Open source implementation of the Java Persistence API (JPA). -links_1084_a=\ AppFuse -links_1085_p=\ Helps building web applications. -links_1086_a=\ BGBlitz -links_1087_p=\ The Swiss army knife of Backgammon. -links_1088_a=\ Bonita -links_1089_p=\ Open source workflow solution for handing long-running, user-oriented processes providing out of the box workflow and business process management features. -links_1090_a=\ Bookmarks Portlet -links_1091_p=\ JSR 168 compliant bookmarks management portlet application. -links_1092_a=\ Claros inTouch -links_1093_p=\ Ajax communication suite with mail, addresses, notes, IM, and rss reader. -links_1094_a=\ CrashPlan PRO Server -links_1095_p=\ Easy and cross platform backup solution for business and service providers. -links_1096_a=\ DataNucleus -links_1097_p=\ Java persistent objects. -links_1098_a=\ DbUnit -links_1099_p=\ A JUnit extension (also usable with Ant) targeted for database-driven projects. -links_1100_a=\ DiffKit -links_1101_p=\ DiffKit is a tool for comparing two tables of data, field-by-field. DiffKit is like the Unix diff utility, but for tables instead of lines of text. -links_1102_a=\ Dinamica Framework -links_1103_p=\ Ajax/J2EE framework for RAD development (mainly oriented toward hispanic markets). -links_1104_a=\ District Health Information Software 2 (DHIS) -links_1105_p=\ The DHIS 2 is a tool for collection, validation, analysis, and presentation of aggregate statistical data, tailored (but not limited) to integrated health information management activities. -links_1106_a=\ Ebean ORM Persistence Layer -links_1107_p=\ Open source Java Object Relational Mapping tool. -links_1108_a=\ Eclipse CDO -links_1109_p=\ The CDO (Connected Data Objects) Model Repository is a distributed shared model framework for EMF models, and a fast server-based O/R mapping solution. -links_1110_a=\ Fabric3 -links_1111_p=\ Fabric3 is a project implementing a federated service network based on the Service Component Architecture specification (http\://www.osoa.org). -links_1112_a=\ FIT4Data -links_1113_p=\ A testing framework for data management applications built on the Java implementation of FIT. -links_1114_a=\ Flux -links_1115_p=\ Java job scheduler, file transfer, workflow, and BPM. -links_1116_a=\ GeoServer -links_1117_p=\ GeoServer is a Java-based software server that allows users to view and edit geospatial data. Using open standards set forth by the Open Geospatial Consortium (OGC), GeoServer allows for great flexibility in map creation and data sharing. -links_1118_a=\ GBIF Integrated Publishing Toolkit (IPT) -links_1119_p=\ The GBIF IPT is an open source, Java based web application that connects and serves three types of biodiversity data\: taxon primary occurrence data, taxon checklists and general resource metadata. -links_1120_a=\ GNU Gluco Control -links_1121_p=\ Helps you to manage your diabetes. -links_1122_a=\ Golden T Studios -links_1123_p=\ Fun-to-play games with a simple interface. -links_1124_a=\ GridGain -links_1125_p=\ GridGain is easy to use Cloud Application Platform that enables development of highly scalable distributed Java and Scala applications that auto-scale on any grid or cloud infrastructure. -links_1126_a=\ Group Session -links_1127_p=\ Open source web groupware. -links_1128_a=\ HA-JDBC -links_1129_p=\ High-Availability JDBC\: A JDBC proxy that provides light-weight, transparent, fault tolerant clustering capability to any underlying JDBC driver. -links_1130_a=\ Hibernate -links_1131_p=\ Relational persistence for idiomatic Java (O-R mapping tool). -links_1132_a=\ Hibicius -links_1133_p=\ Online Banking Client for the HBCI protocol. -links_1134_a=\ ImageMapper -links_1135_p=\ ImageMapper frees users from having to use file browsers to view their images. They get fast access to images and easy cataloguing of them via a user friendly interface. -links_1136_a=\ JAMWiki -links_1137_p=\ Java-based Wiki engine. -links_1138_a=\ Jaspa -links_1139_p=\ Java Spatial. Jaspa potentially brings around 200 spatial functions. -links_1140_a=\ Java Simon -links_1141_p=\ Simple Monitoring API. -links_1142_a=\ JBoss jBPM -links_1143_p=\ A platform for executable process languages ranging from business process management (BPM) over workflow to service orchestration. -links_1144_a=\ JBoss Jopr -links_1145_p=\ An enterprise management solution for JBoss middleware projects and other application technologies. -links_1146_a=\ JGeocoder -links_1147_p=\ Free Java geocoder. Geocoding is the process of estimating a latitude and longitude for a given location. -links_1148_a=\ JGrass -links_1149_p=\ Java Geographic Resources Analysis Support System. Free, multi platform, open source GIS based on the GIS framework of uDig. -links_1150_a=\ Jena -links_1151_p=\ Java framework for building Semantic Web applications. -links_1152_a=\ JMatter -links_1153_p=\ Framework for constructing workgroup business applications based on the Naked Objects Architectural Pattern. -links_1154_a=\ jOOQ (Java Object Oriented Querying) -links_1155_p=\ jOOQ is a fluent API for typesafe SQL query construction and execution -links_1156_a=\ Liftweb -links_1157_p=\ A Scala-based, secure, developer friendly web framework. -links_1158_a=\ LiquiBase -links_1159_p=\ A tool to manage database changes and refactorings. -links_1160_a=\ Luntbuild -links_1161_p=\ Build automation and management tool. -links_1162_a=\ localdb -links_1163_p=\ A tool that locates the full file path of the folder containing the database files. -links_1164_a=\ Magnolia -links_1165_p=\ Microarray Data Management and Export System for PFGRC (Pathogen Functional Genomics Resource Center) Microarrays. -links_1166_a=\ MiniConnectionPoolManager -links_1167_p=\ A lightweight standalone JDBC connection pool manager. -links_1168_a=\ Mr. Persister -links_1169_p=\ Simple, small and fast object relational mapping. -links_1170_a=\ Myna Application Server -links_1171_p=\ Java web app that provides dynamic web content and Java libraries access from JavaScript. -links_1172_a=\ MyTunesRss -links_1173_p=\ MyTunesRSS lets you listen to your music wherever you are. -links_1174_a=\ NCGC CurveFit -links_1175_p=\ From\: NIH Chemical Genomics Center, National Institutes of Health, USA. An open source application in the life sciences research field. This application handles chemical structures and biological responses of thousands of compounds with the potential to handle million+ compounds. It utilizes an embedded H2 database to enable flexible query/retrieval of all data including advanced chemical substructure and similarity searching. The application highlights an automated curve fitting and classification algorithm that outperforms commercial packages in the field. Commercial alternatives are typically small desktop software that handle a few dose response curves at a time. A couple of commercial packages that do handle several thousand curves are very expensive tools (>60k USD) that require manual curation of analysis by the user; require a license to Oracle; lack advanced query/retrieval; and the ability to handle chemical structures. -links_1176_a=\ Nuxeo -links_1177_p=\ Standards-based, open source platform for building ECM applications. -links_1178_a=\ nWire -links_1179_p=\ Eclipse plug-in which expedites Java development. It's main purpose is to help developers find code quicker and easily understand how it relates to the rest of the application, thus, understand the application structure. -links_1180_a=\ Ontology Works -links_1181_p=\ This company provides semantic technologies including deductive information repositories (the Ontology Works Knowledge Servers), semantic information fusion and semantic federation of legacy databases, ontology-based domain modeling, and management of the distributed enterprise. -links_1182_a=\ Ontoprise OntoBroker -links_1183_p=\ SemanticWeb-Middleware. It supports all W3C Semantic Web recommendations\: OWL, RDF, RDFS, SPARQL, and F-Logic. -links_1184_a=\ Open Anzo -links_1185_p=\ Semantic Application Server. -links_1186_a=\ OpenGroove -links_1187_p=\ OpenGroove is a groupware program that allows users to synchronize data. -links_1188_a=\ OpenSocial Development Environment (OSDE) -links_1189_p=\ Development tool for OpenSocial application. -links_1190_a=\ Orion -links_1191_p=\ J2EE Application Server. -links_1192_a=\ P5H2 -links_1193_p=\ A library for the Processing programming language and environment. -links_1194_a=\ Phase-6 -links_1195_p=\ A computer based learning software. -links_1196_a=\ Pickle -links_1197_p=\ Pickle is a Java library containing classes for persistence, concurrency, and logging. -links_1198_a=\ Piman -links_1199_p=\ Water treatment projects data management. -links_1200_a=\ PolePosition -links_1201_p=\ Open source database benchmark. -links_1202_a=\ Poormans -links_1203_p=\ Very basic CMS running as a SWT application and generating static html pages. -links_1204_a=\ Railo -links_1205_p=\ Railo is an alternative engine for the Cold Fusion Markup Language, that compiles code programmed in CFML into Java bytecode and executes it on a servlet engine. -links_1206_a=\ Razuna -links_1207_p=\ Open source Digital Asset Management System with integrated Web Content Management. -links_1208_a=\ RIFE -links_1209_p=\ A full-stack web application framework with tools and APIs to implement most common web features. -links_1210_a=\ Sava -links_1211_p=\ Open-source web-based content management system. -links_1212_a=\ Scriptella -links_1213_p=\ ETL (Extract-Transform-Load) and script execution tool. -links_1214_a=\ Sesar -links_1215_p=\ Dependency Injection Container with Aspect Oriented Programming. -links_1216_a=\ SemmleCode -links_1217_p=\ Eclipse plugin to help you improve software quality. -links_1218_a=\ SeQuaLite -links_1219_p=\ A free, light-weight, java data access framework. -links_1220_a=\ ShapeLogic -links_1221_p=\ Toolkit for declarative programming, image processing and computer vision. -links_1222_a=\ Shellbook -links_1223_p=\ Desktop publishing application. -links_1224_a=\ Signsoft intelliBO -links_1225_p=\ Persistence middleware supporting the JDO specification. -links_1226_a=\ SimpleORM -links_1227_p=\ Simple Java Object Relational Mapping. -links_1228_a=\ SymmetricDS -links_1229_p=\ A web-enabled, database independent, data synchronization/replication software. -links_1230_a=\ SmartFoxServer -links_1231_p=\ Platform for developing multiuser applications and games with Macromedia Flash. -links_1232_a=\ Social Bookmarks Friend Finder -links_1233_p=\ A GUI application that allows you to find users with similar bookmarks to the user specified (for delicious.com). -links_1234_a=\ sormula -links_1235_p=\ Simple object relational mapping. -links_1236_a=\ Springfuse -links_1237_p=\ Code generation For Spring, Spring MVC & Hibernate. -links_1238_a=\ SQLOrm -links_1239_p=\ Java Object Relation Mapping. -links_1240_a=\ StelsCSV and StelsXML -links_1241_p=\ StelsCSV is a CSV JDBC type 4 driver that allows to perform SQL queries and other JDBC operations on text files. StelsXML is a XML JDBC type 4 driver that allows to perform SQL queries and other JDBC operations on XML files. Both use H2 as the SQL engine. -links_1242_a=\ StorYBook -links_1243_p=\ A summary-based tool for novelist and script writers. It helps to keep the overview over the various traces a story has. -links_1244_a=\ StreamCruncher -links_1245_p=\ Event (stream) processing kernel. -links_1246_a=\ SUSE Manager, part of Linux Enterprise Server 11 -links_1247_p=\ The SUSE Manager eases the burden of compliance with regulatory requirements and corporate policies. -links_1248_a=\ Tune Backup -links_1249_p=\ Easy-to-use backup solution for your iTunes library. -links_1250_a=\ TimeWriter -links_1251_p=\ TimeWriter is a very flexible program for time administration / time tracking. The older versions used dBase tables. The new version 5 is completely rewritten, now using the H2 database. TimeWriter is delivered in Dutch and English. -links_1252_a=\ weblica -links_1253_p=\ Desktop CMS. -links_1254_a=\ Web of Web -links_1255_p=\ Collaborative and realtime interactive media platform for the web. -links_1256_a=\ Werkzeugkasten -links_1257_p=\ Minimum Java Toolset. -links_1258_a=\ VPDA -links_1259_p=\ View providers driven applications is a Java based application framework for building applications composed from server components - view providers. -links_1260_a=\ Volunteer database -links_1261_p=\ A database front end to register volunteers, partnership and donation for a Non Profit organization. -mainWeb_1000_h1=H2 Database Engine -mainWeb_1001_p=\ Welcome to H2, the Java SQL database. The main features of H2 are\: -mainWeb_1002_li=Very fast, open source, JDBC API -mainWeb_1003_li=Embedded and server modes; in-memory databases -mainWeb_1004_li=Browser based Console application -mainWeb_1005_li=Small footprint\: around 1.5 MB jar file size -mainWeb_1006_h2=Download -mainWeb_1007_td=\ Version 1.4.196 (2017-06-10) -mainWeb_1008_a=Windows Installer (5 MB) -mainWeb_1009_a=All Platforms (zip, 8 MB) -mainWeb_1010_a=All Downloads -mainWeb_1011_td=    -mainWeb_1012_h2=Support -mainWeb_1013_a=Stack Overflow (tag H2) -mainWeb_1014_a=Google Group English -mainWeb_1015_p=, Japanese -mainWeb_1016_p=\ For non-technical issues, use\: -mainWeb_1017_h2=Features -mainWeb_1018_th=H2 -mainWeb_1019_a=Derby -mainWeb_1020_a=HSQLDB -mainWeb_1021_a=MySQL -mainWeb_1022_a=PostgreSQL -mainWeb_1023_td=Pure Java -mainWeb_1024_td=Yes -mainWeb_1025_td=Yes -mainWeb_1026_td=Yes -mainWeb_1027_td=No -mainWeb_1028_td=No -mainWeb_1029_td=Memory Mode -mainWeb_1030_td=Yes -mainWeb_1031_td=Yes -mainWeb_1032_td=Yes -mainWeb_1033_td=No -mainWeb_1034_td=No -mainWeb_1035_td=Encrypted Database -mainWeb_1036_td=Yes -mainWeb_1037_td=Yes -mainWeb_1038_td=Yes -mainWeb_1039_td=No -mainWeb_1040_td=No -mainWeb_1041_td=ODBC Driver -mainWeb_1042_td=Yes -mainWeb_1043_td=No -mainWeb_1044_td=No -mainWeb_1045_td=Yes -mainWeb_1046_td=Yes -mainWeb_1047_td=Fulltext Search -mainWeb_1048_td=Yes -mainWeb_1049_td=No -mainWeb_1050_td=No -mainWeb_1051_td=Yes -mainWeb_1052_td=Yes -mainWeb_1053_td=Multi Version Concurrency -mainWeb_1054_td=Yes -mainWeb_1055_td=No -mainWeb_1056_td=Yes -mainWeb_1057_td=Yes -mainWeb_1058_td=Yes -mainWeb_1059_td=Footprint (jar/dll size) -mainWeb_1060_td=~1 MB -mainWeb_1061_td=~2 MB -mainWeb_1062_td=~1 MB -mainWeb_1063_td=~4 MB -mainWeb_1064_td=~6 MB -mainWeb_1065_p=\ See also the detailed comparison. -mainWeb_1066_h2=News -mainWeb_1067_b=Newsfeeds\: -mainWeb_1068_a=Full text (Atom) -mainWeb_1069_p=\ or Header only (RSS). -mainWeb_1070_b=Email Newsletter\: -mainWeb_1071_p=\ Subscribe to H2 Database News (Google account required) to get informed about new releases. Your email address is only used in this context. -mainWeb_1072_td=  -mainWeb_1073_h2=Contribute -mainWeb_1074_p=\ You can contribute to the development of H2 by sending feedback and bug reports, or translate the H2 Console application (for details, start the H2 Console and select Options / Translate). To donate money, click on the PayPal button below. You will be listed as a supporter\: -main_1000_h1=H2 Database Engine -main_1001_p=\ Welcome to H2, the free Java SQL database engine. -main_1002_a=Quickstart -main_1003_p=\ Get a fast overview. -main_1004_a=Tutorial -main_1005_p=\ Go through the samples. -main_1006_a=Features -main_1007_p=\ See what this database can do and how to use these features. -mvstore_1000_h1=MVStore -mvstore_1001_a=\ Overview -mvstore_1002_a=\ Example Code -mvstore_1003_a=\ Store Builder -mvstore_1004_a=\ R-Tree -mvstore_1005_a=\ Features -mvstore_1006_a=- Maps -mvstore_1007_a=- Versions -mvstore_1008_a=- Transactions -mvstore_1009_a=- In-Memory Performance and Usage -mvstore_1010_a=- Pluggable Data Types -mvstore_1011_a=- BLOB Support -mvstore_1012_a=- R-Tree and Pluggable Map Implementations -mvstore_1013_a=- Concurrent Operations and Caching -mvstore_1014_a=- Log Structured Storage -mvstore_1015_a=- Off-Heap and Pluggable Storage -mvstore_1016_a=- File System Abstraction, File Locking and Online Backup -mvstore_1017_a=- Encrypted Files -mvstore_1018_a=- Tools -mvstore_1019_a=- Exception Handling -mvstore_1020_a=- Storage Engine for H2 -mvstore_1021_a=\ File Format -mvstore_1022_a=\ Similar Projects and Differences to Other Storage Engines -mvstore_1023_a=\ Current State -mvstore_1024_a=\ Requirements -mvstore_1025_h2=Overview -mvstore_1026_p=\ The MVStore is a persistent, log structured key-value store. It is planned to be the next storage subsystem of H2, but it can also be used directly within an application, without using JDBC or SQL. -mvstore_1027_li=MVStore stands for "multi-version store". -mvstore_1028_li=Each store contains a number of maps that can be accessed using the java.util.Map interface. -mvstore_1029_li=Both file-based persistence and in-memory operation are supported. -mvstore_1030_li=It is intended to be fast, simple to use, and small. -mvstore_1031_li=Concurrent read and write operations are supported. -mvstore_1032_li=Transactions are supported (including concurrent transactions and 2-phase commit). -mvstore_1033_li=The tool is very modular. It supports pluggable data types and serialization, pluggable storage (to a file, to off-heap memory), pluggable map implementations (B-tree, R-tree, concurrent B-tree currently), BLOB storage, and a file system abstraction to support encrypted files and zip files. -mvstore_1034_h2=Example Code -mvstore_1035_p=\ The following sample code shows how to use the tool\: -mvstore_1036_h2=Store Builder -mvstore_1037_p=\ The MVStore.Builder provides a fluid interface to build a store if configuration options are needed. Example usage\: -mvstore_1038_p=\ The list of available options is\: -mvstore_1039_li=autoCommitBufferSize\: the size of the write buffer. -mvstore_1040_li=autoCommitDisabled\: to disable auto-commit. -mvstore_1041_li=backgroundExceptionHandler\: a handler for exceptions that could occur while writing in the background. -mvstore_1042_li=cacheSize\: the cache size in MB. -mvstore_1043_li=compress\: compress the data when storing using a fast algorithm (LZF). -mvstore_1044_li=compressHigh\: compress the data when storing using a slower algorithm (Deflate). -mvstore_1045_li=encryptionKey\: the key for file encryption. -mvstore_1046_li=fileName\: the name of the file, for file based stores. -mvstore_1047_li=fileStore\: the storage implementation to use. -mvstore_1048_li=pageSplitSize\: the point where pages are split. -mvstore_1049_li=readOnly\: open the file in read-only mode. -mvstore_1050_h2=R-Tree -mvstore_1051_p=\ The MVRTreeMap is an R-tree implementation that supports fast spatial queries. It can be used as follows\: -mvstore_1052_p=\ The default number of dimensions is 2. To use a different number of dimensions, call new MVRTreeMap.Builder<String>().dimensions(3). The minimum number of dimensions is 1, the maximum is 32. -mvstore_1053_h2=Features -mvstore_1054_h3=Maps -mvstore_1055_p=\ Each store contains a set of named maps. A map is sorted by key, and supports the common lookup operations, including access to the first and last key, iterate over some or all keys, and so on. -mvstore_1056_p=\ Also supported, and very uncommon for maps, is fast index lookup\: the entries of the map can be be efficiently accessed like a random-access list (get the entry at the given index), and the index of a key can be calculated efficiently. That also means getting the median of two keys is very fast, and a range of keys can be counted very quickly. The iterator supports fast skipping. This is possible because internally, each map is organized in the form of a counted B+-tree. -mvstore_1057_p=\ In database terms, a map can be used like a table, where the key of the map is the primary key of the table, and the value is the row. A map can also represent an index, where the key of the map is the key of the index, and the value of the map is the primary key of the table (for non-unique indexes, the key of the map must also contain the primary key). -mvstore_1058_h3=Versions -mvstore_1059_p=\ A version is a snapshot of all the data of all maps at a given point in time. Creating a snapshot is fast\: only those pages that are changed after a snapshot are copied. This behavior is also called COW (copy on write). Old versions are readable. Rollback to an old version is supported. -mvstore_1060_p=\ The following sample code show how to create a store, open a map, add some data, and access the current and an old version\: -mvstore_1061_h3=Transactions -mvstore_1062_p=\ To support multiple concurrent open transactions, a transaction utility is included, the TransactionStore. The tool supports PostgreSQL style "read committed" transaction isolation with savepoints, two-phase commit, and other features typically available in a database. There is no limit on the size of a transaction (the log is written to disk for large or long running transactions). -mvstore_1063_p=\ Internally, this utility stores the old versions of changed entries in a separate map, similar to a transaction log, except that entries of a closed transaction are removed, and the log is usually not stored for short transactions. For common use cases, the storage overhead of this utility is very small compared to the overhead of a regular transaction log. -mvstore_1064_h3=In-Memory Performance and Usage -mvstore_1065_p=\ Performance of in-memory operations is about 50% slower than java.util.TreeMap. -mvstore_1066_p=\ The memory overhead for large maps is slightly better than for the regular map implementations, but there is a higher overhead per map. For maps with less than about 25 entries, the regular map implementations need less memory. -mvstore_1067_p=\ If no file name is specified, the store operates purely in memory. Except for persisting data, all features are supported in this mode (multi-versioning, index lookup, R-tree and so on). If a file name is specified, all operations occur in memory (with the same performance characteristics) until data is persisted. -mvstore_1068_p=\ As in all map implementations, keys need to be immutable, that means changing the key object after an entry has been added is not allowed. If a file name is specified, the value may also not be changed after adding an entry, because it might be serialized (which could happen at any time when autocommit is enabled). -mvstore_1069_h3=Pluggable Data Types -mvstore_1070_p=\ Serialization is pluggable. The default serialization currently supports many common data types, and uses Java serialization for other objects. The following classes are currently directly supported\: Boolean, Byte, Short, Character, Integer, Long, Float, Double, BigInteger, BigDecimal, String, UUID, Date and arrays (both primitive arrays and object arrays). For serialized objects, the size estimate is adjusted using an exponential moving average. -mvstore_1071_p=\ Parameterized data types are supported (for example one could build a string data type that limits the length). -mvstore_1072_p=\ The storage engine itself does not have any length limits, so that keys, values, pages, and chunks can be very big (as big as fits in memory). Also, there is no inherent limit to the number of maps and chunks. Due to using a log structured storage, there is no special case handling for large keys or pages. -mvstore_1073_h3=BLOB Support -mvstore_1074_p=\ There is a mechanism that stores large binary objects by splitting them into smaller blocks. This allows to store objects that don't fit in memory. Streaming as well as random access reads on such objects are supported. This tool is written on top of the store, using only the map interface. -mvstore_1075_h3=R-Tree and Pluggable Map Implementations -mvstore_1076_p=\ The map implementation is pluggable. In addition to the default MVMap (multi-version map), there is a multi-version R-tree map implementation for spatial operations. -mvstore_1077_h3=Concurrent Operations and Caching -mvstore_1078_p=\ Concurrent reads and writes are supported. All such read operations can occur in parallel. Concurrent reads from the page cache, as well as concurrent reads from the file system are supported. Write operations first read the relevant pages from disk to memory (this can happen concurrently), and only then modify the data. The in-memory parts of write operations are synchronized. Writing changes to the file can occur concurrently to modifying the data, as writing operates on a snapshot. -mvstore_1079_p=\ Caching is done on the page level. The page cache is a concurrent LIRS cache, which should be resistant against scan operations. -mvstore_1080_p=\ For fully scalable concurrent write operations to a map (in-memory and to disk), the map could be split into multiple maps in different stores ('sharding'). The plan is to add such a mechanism later when needed. -mvstore_1081_h3=Log Structured Storage -mvstore_1082_p=\ Internally, changes are buffered in memory, and once enough changes have accumulated, they are written in one continuous disk write operation. Compared to traditional database storage engines, this should improve write performance for file systems and storage systems that do not efficiently support small random writes, such as Btrfs, as well as SSDs. (According to a test, write throughput of a common SSD increases with write block size, until a block size of 2 MB, and then does not further increase.) By default, changes are automatically written when more than a number of pages are modified, and once every second in a background thread, even if only little data was changed. Changes can also be written explicitly by calling commit(). -mvstore_1083_p=\ When storing, all changed pages are serialized, optionally compressed using the LZF algorithm, and written sequentially to a free area of the file. Each such change set is called a chunk. All parent pages of the changed B-trees are stored in this chunk as well, so that each chunk also contains the root of each changed map (which is the entry point for reading this version of the data). There is no separate index\: all data is stored as a list of pages. Per store, there is one additional map that contains the metadata (the list of maps, where the root page of each map is stored, and the list of chunks). -mvstore_1084_p=\ There are usually two write operations per chunk\: one to store the chunk data (the pages), and one to update the file header (so it points to the latest chunk). If the chunk is appended at the end of the file, the file header is only written at the end of the chunk. There is no transaction log, no undo log, and there are no in-place updates (however, unused chunks are overwritten by default). -mvstore_1085_p=\ Old data is kept for at least 45 seconds (configurable), so that there are no explicit sync operations required to guarantee data consistency. An application can also sync explicitly when needed. To reuse disk space, the chunks with the lowest amount of live data are compacted (the live data is stored again in the next chunk). To improve data locality and disk space usage, the plan is to automatically defragment and compact data. -mvstore_1086_p=\ Compared to traditional storage engines (that use a transaction log, undo log, and main storage area), the log structured storage is simpler, more flexible, and typically needs less disk operations per change, as data is only written once instead of twice or 3 times, and because the B-tree pages are always full (they are stored next to each other) and can be easily compressed. But temporarily, disk space usage might actually be a bit higher than for a regular database, as disk space is not immediately re-used (there are no in-place updates). -mvstore_1087_h3=Off-Heap and Pluggable Storage -mvstore_1088_p=\ Storage is pluggable. Unless pure in-memory operation is used, the default storage is to a single file. -mvstore_1089_p=\ An off-heap storage implementation is available. This storage keeps the data in the off-heap memory, meaning outside of the regular garbage collected heap. This allows to use very large in-memory stores without having to increase the JVM heap, which would increase Java garbage collection pauses a lot. Memory is allocated using ByteBuffer.allocateDirect. One chunk is allocated at a time (each chunk is usually a few MB large), so that allocation cost is low. To use the off-heap storage, call\: -mvstore_1090_h3=File System Abstraction, File Locking and Online Backup -mvstore_1091_p=\ The file system is pluggable. The same file system abstraction is used as H2 uses. The file can be encrypted using a encrypting file system wrapper. Other file system implementations support reading from a compressed zip or jar file. The file system abstraction closely matches the Java 7 file system API. -mvstore_1092_p=\ Each store may only be opened once within a JVM. When opening a store, the file is locked in exclusive mode, so that the file can only be changed from within one process. Files can be opened in read-only mode, in which case a shared lock is used. -mvstore_1093_p=\ The persisted data can be backed up at any time, even during write operations (online backup). To do that, automatic disk space reuse needs to be first disabled, so that new data is always appended at the end of the file. Then, the file can be copied. The file handle is available to the application. It is recommended to use the utility class FileChannelInputStream to do this. For encrypted databases, both the encrypted (raw) file content, as well as the clear text content, can be backed up. -mvstore_1094_h3=Encrypted Files -mvstore_1095_p=\ File encryption ensures the data can only be read with the correct password. Data can be encrypted as follows\: -mvstore_1096_p=\ The following algorithms and settings are used\: -mvstore_1097_li=The password char array is cleared after use, to reduce the risk that the password is stolen even if the attacker has access to the main memory. -mvstore_1098_li=The password is hashed according to the PBKDF2 standard, using the SHA-256 hash algorithm. -mvstore_1099_li=The length of the salt is 64 bits, so that an attacker can not use a pre-calculated password hash table (rainbow table). It is generated using a cryptographically secure random number generator. -mvstore_1100_li=To speed up opening an encrypted stores on Android, the number of PBKDF2 iterations is 10. The higher the value, the better the protection against brute-force password cracking attacks, but the slower is opening a file. -mvstore_1101_li=The file itself is encrypted using the standardized disk encryption mode XTS-AES. Only little more than one AES-128 round per block is needed. -mvstore_1102_h3=Tools -mvstore_1103_p=\ There is a tool, the MVStoreTool, to dump the contents of a file. -mvstore_1104_h3=Exception Handling -mvstore_1105_p=\ This tool does not throw checked exceptions. Instead, unchecked exceptions are thrown if needed. The error message always contains the version of the tool. The following exceptions can occur\: -mvstore_1106_code=IllegalStateException -mvstore_1107_li=\ if a map was already closed or an IO exception occurred, for example if the file was locked, is already closed, could not be opened or closed, if reading or writing failed, if the file is corrupt, or if there is an internal error in the tool. For such exceptions, an error code is added so that the application can distinguish between different error cases. -mvstore_1108_code=IllegalArgumentException -mvstore_1109_li=\ if a method was called with an illegal argument. -mvstore_1110_code=UnsupportedOperationException -mvstore_1111_li=\ if a method was called that is not supported, for example trying to modify a read-only map. -mvstore_1112_code=ConcurrentModificationException -mvstore_1113_li=\ if a map is modified concurrently. -mvstore_1114_h3=Storage Engine for H2 -mvstore_1115_p=\ For H2 version 1.4 and newer, the MVStore is the default storage engine (supporting SQL, JDBC, transactions, MVCC, and so on). For older versions, append ;MV_STORE\=TRUE to the database URL. Even though it can be used with the default table level locking, by default the MVCC mode is enabled when using the MVStore. -mvstore_1116_h2=File Format -mvstore_1117_p=\ The data is stored in one file. The file contains two file headers (for safety), and a number of chunks. The file headers are one block each; a block is 4096 bytes. Each chunk is at least one block, but typically 200 blocks or more. Data is stored in the chunks in the form of a log structured storage. There is one chunk for every version. -mvstore_1118_p=\ Each chunk contains a number of B-tree pages. As an example, the following code\: -mvstore_1119_p=\ will result in the following two chunks (excluding metadata)\: -mvstore_1120_b=Chunk 1\: -mvstore_1121_p=\ - Page 1\: (root) node with 2 entries pointing to page 2 and 3 -mvstore_1122_p=\ - Page 2\: leaf with 140 entries (keys 0 - 139) -mvstore_1123_p=\ - Page 3\: leaf with 260 entries (keys 140 - 399) -mvstore_1124_b=Chunk 2\: -mvstore_1125_p=\ - Page 4\: (root) node with 2 entries pointing to page 5 and 3 -mvstore_1126_p=\ - Page 5\: leaf with 140 entries (keys 0 - 139) -mvstore_1127_p=\ That means each chunk contains the changes of one version\: the new version of the changed pages and the parent pages, recursively, up to the root page. Pages in subsequent chunks refer to pages in earlier chunks. -mvstore_1128_h3=File Header -mvstore_1129_p=\ There are two file headers, which normally contain the exact same data. But once in a while, the file headers are updated, and writing could partially fail, which could corrupt a header. That's why there is a second header. Only the file headers are updated in this way (called "in-place update"). The headers contain the following data\: -mvstore_1130_p=\ The data is stored in the form of a key-value pair. Each value is stored as a hexadecimal number. The entries are\: -mvstore_1131_li=H\: The entry "H\:2" stands for the the H2 database. -mvstore_1132_li=block\: The block number where one of the newest chunks starts (but not necessarily the newest). -mvstore_1133_li=blockSize\: The block size of the file; currently always hex 1000, which is decimal 4096, to match the disk sector length of modern hard disks. -mvstore_1134_li=chunk\: The chunk id, which is normally the same value as the version; however, the chunk id might roll over to 0, while the version doesn't. -mvstore_1135_li=created\: The number of milliseconds since 1970 when the file was created. -mvstore_1136_li=format\: The file format number. Currently 1. -mvstore_1137_li=version\: The version number of the chunk. -mvstore_1138_li=fletcher\: The Fletcher-32 checksum of the header. -mvstore_1139_p=\ When opening the file, both headers are read and the checksum is verified. If both headers are valid, the one with the newer version is used. The chunk with the latest version is then detected (details about this see below), and the rest of the metadata is read from there. If the chunk id, block and version are not stored in the file header, then the latest chunk lookup starts with the last chunk in the file. -mvstore_1140_h3=Chunk Format -mvstore_1141_p=\ There is one chunk per version. Each chunk consists of a header, the pages that were modified in this version, and a footer. The pages contain the actual data of the maps. The pages inside a chunk are stored right after the header, next to each other (unaligned). The size of a chunk is a multiple of the block size. The footer is stored in the last 128 bytes of the chunk. -mvstore_1142_p=\ The footer allows to verify that the chunk is completely written (a chunk is written as one write operation), and allows to find the start position of the very last chunk in the file. The chunk header and footer contain the following data\: -mvstore_1143_p=\ The fields of the chunk header and footer are\: -mvstore_1144_li=chunk\: The chunk id. -mvstore_1145_li=block\: The first block of the chunk (multiply by the block size to get the position in the file). -mvstore_1146_li=len\: The size of the chunk in number of blocks. -mvstore_1147_li=map\: The id of the newest map; incremented when a new map is created. -mvstore_1148_li=max\: The sum of all maximum page sizes (see page format). -mvstore_1149_li=next\: The predicted start block of the next chunk. -mvstore_1150_li=pages\: The number of pages in the chunk. -mvstore_1151_li=root\: The position of the metadata root page (see page format). -mvstore_1152_li=time\: The time the chunk was written, in milliseconds after the file was created. -mvstore_1153_li=version\: The version this chunk represents. -mvstore_1154_li=fletcher\: The checksum of the footer. -mvstore_1155_p=\ Chunks are never updated in-place. Each chunk contains the pages that were changed in that version (there is one chunk per version, see above), plus all the parent nodes of those pages, recursively, up to the root page. If an entry in a map is changed, removed, or added, then the respective page is copied, modified, and stored in the next chunk, and the number of live pages in the old chunk is decremented. This mechanism is called copy-on-write, and is similar to how the Btrfs file system works. Chunks without live pages are marked as free, so the space can be re-used by more recent chunks. Because not all chunks are of the same size, there can be a number of free blocks in front of a chunk for some time (until a small chunk is written or the chunks are compacted). There is a delay of 45 seconds (by default) before a free chunk is overwritten, to ensure new versions are persisted first. -mvstore_1156_p=\ How the newest chunk is located when opening a store\: The file header contains the position of a recent chunk, but not always the newest one. This is to reduce the number of file header updates. After opening the file, the file headers, and the chunk footer of the very last chunk (at the end of the file) are read. From those candidates, the header of the most recent chunk is read. If it contains a "next" pointer (see above), those chunk's header and footer are read as well. If it turned out to be a newer valid chunk, this is repeated, until the newest chunk was found. Before writing a chunk, the position of the next chunk is predicted based on the assumption that the next chunk will be of the same size as the current one. When the next chunk is written, and the previous prediction turned out to be incorrect, the file header is updated as well. In any case, the file header is updated if the next chain gets longer than 20 hops. -mvstore_1157_h3=Page Format -mvstore_1158_p=\ Each map is a B-tree, and the map data is stored in (B-tree-) pages. There are leaf pages that contain the key-value pairs of the map, and internal nodes, which only contain keys and pointers to leaf pages. The root of a tree is either a leaf or an internal node. Unlike file header and chunk header and footer, the page data is not human readable. Instead, it is stored as byte arrays, with long (8 bytes), int (4 bytes), short (2 bytes), and variable size int and long (1 to 5 / 10 bytes). The page format is\: -mvstore_1159_li=length (int)\: Length of the page in bytes. -mvstore_1160_li=checksum (short)\: Checksum (chunk id xor offset within the chunk xor page length). -mvstore_1161_li=mapId (variable size int)\: The id of the map this page belongs to. -mvstore_1162_li=len (variable size int)\: The number of keys in the page. -mvstore_1163_li=type (byte)\: The page type (0 for leaf page, 1 for internal node; plus 2 if the keys and values are compressed with the LZF algorithm, or plus 6 if the keys and values are compressed with the Deflate algorithm). -mvstore_1164_li=children (array of long; internal nodes only)\: The position of the children. -mvstore_1165_li=childCounts (array of variable size long; internal nodes only)\: The total number of entries for the given child page. -mvstore_1166_li=keys (byte array)\: All keys, stored depending on the data type. -mvstore_1167_li=values (byte array; leaf pages only)\: All values, stored depending on the data type. -mvstore_1168_p=\ Even though this is not required by the file format, pages are stored in the following order\: For each map, the root page is stored first, then the internal nodes (if there are any), and then the leaf pages. This should speed up reads for media where sequential reads are faster than random access reads. The metadata map is stored at the end of a chunk. -mvstore_1169_p=\ Pointers to pages are stored as a long, using a special format\: 26 bits for the chunk id, 32 bits for the offset within the chunk, 5 bits for the length code, 1 bit for the page type (leaf or internal node). The page type is encoded so that when clearing or removing a map, leaf pages don't have to be read (internal nodes do have to be read in order to know where all the pages are; but in a typical B-tree the vast majority of the pages are leaf pages). The absolute file position is not included so that chunks can be moved within the file without having to change page pointers; only the chunk metadata needs to be changed. The length code is a number from 0 to 31, where 0 means the maximum length of the page is 32 bytes, 1 means 48 bytes, 2\: 64, 3\: 96, 4\: 128, 5\: 192, and so on until 31 which means longer than 1 MB. That way, reading a page only requires one read operation (except for very large pages). The sum of the maximum length of all pages is stored in the chunk metadata (field "max"), and when a page is marked as removed, the live maximum length is adjusted. This allows to estimate the amount of free space within a block, in addition to the number of free pages. -mvstore_1170_p=\ The total number of entries in child pages are kept to allow efficient range counting, lookup by index, and skip operations. The pages form a counted B-tree. -mvstore_1171_p=\ Data compression\: The data after the page type are optionally compressed using the LZF algorithm. -mvstore_1172_h3=Metadata Map -mvstore_1173_p=\ In addition to the user maps, there is one metadata map that contains names and positions of user maps, and chunk metadata. The very last page of a chunk contains the root page of that metadata map. The exact position of this root page is stored in the chunk header. This page (directly or indirectly) points to the root pages of all other maps. The metadata map of a store with a map named "data", and one chunk, contains the following entries\: -mvstore_1174_li=chunk.1\: The metadata of chunk 1. This is the same data as the chunk header, plus the number of live pages, and the maximum live length. -mvstore_1175_li=map.1\: The metadata of map 1. The entries are\: name, createVersion, and type. -mvstore_1176_li=name.data\: The map id of the map named "data". The value is "1". -mvstore_1177_li=root.1\: The root position of map 1. -mvstore_1178_li=setting.storeVersion\: The store version (a user defined value). -mvstore_1179_h2=Similar Projects and Differences to Other Storage Engines -mvstore_1180_p=\ Unlike similar storage engines like LevelDB and Kyoto Cabinet, the MVStore is written in Java and can easily be embedded in a Java and Android application. -mvstore_1181_p=\ The MVStore is somewhat similar to the Berkeley DB Java Edition because it is also written in Java, and is also a log structured storage, but the H2 license is more liberal. -mvstore_1182_p=\ Like SQLite 3, the MVStore keeps all data in one file. Unlike SQLite 3, the MVStore uses is a log structured storage. The plan is to make the MVStore both easier to use as well as faster than SQLite 3. In a recent (very simple) test, the MVStore was about twice as fast as SQLite 3 on Android. -mvstore_1183_p=\ The API of the MVStore is similar to MapDB (previously known as JDBM) from Jan Kotek, and some code is shared between MVStore and MapDB. However, unlike MapDB, the MVStore uses is a log structured storage. The MVStore does not have a record size limit. -mvstore_1184_h2=Current State -mvstore_1185_p=\ The code is still experimental at this stage. The API as well as the behavior may partially change. Features may be added and removed (even though the main features will stay). -mvstore_1186_h2=Requirements -mvstore_1187_p=\ The MVStore is included in the latest H2 jar file. -mvstore_1188_p=\ There are no special requirements to use it. The MVStore should run on any JVM as well as on Android. -mvstore_1189_p=\ To build just the MVStore (without the database engine), run\: -mvstore_1190_p=\ This will create the file bin/h2mvstore-1.4.196.jar (about 200 KB). -performance_1000_h1=Performance -performance_1001_a=\ Performance Comparison -performance_1002_a=\ PolePosition Benchmark -performance_1003_a=\ Database Performance Tuning -performance_1004_a=\ Using the Built-In Profiler -performance_1005_a=\ Application Profiling -performance_1006_a=\ Database Profiling -performance_1007_a=\ Statement Execution Plans -performance_1008_a=\ How Data is Stored and How Indexes Work -performance_1009_a=\ Fast Database Import -performance_1010_h2=Performance Comparison -performance_1011_p=\ In many cases H2 is faster than other (open source and not open source) database engines. Please note this is mostly a single connection benchmark run on one computer, with many very simple operations running against the database. This benchmark does not include very complex queries. The embedded mode of H2 is faster than the client-server mode because the per-statement overhead is greatly reduced. -performance_1012_h3=Embedded -performance_1013_th=Test Case -performance_1014_th=Unit -performance_1015_th=H2 -performance_1016_th=HSQLDB -performance_1017_th=Derby -performance_1018_td=Simple\: Init -performance_1019_td=ms -performance_1020_td=1019 -performance_1021_td=1907 -performance_1022_td=8280 -performance_1023_td=Simple\: Query (random) -performance_1024_td=ms -performance_1025_td=1304 -performance_1026_td=873 -performance_1027_td=1912 -performance_1028_td=Simple\: Query (sequential) -performance_1029_td=ms -performance_1030_td=835 -performance_1031_td=1839 -performance_1032_td=5415 -performance_1033_td=Simple\: Update (sequential) -performance_1034_td=ms -performance_1035_td=961 -performance_1036_td=2333 -performance_1037_td=21759 -performance_1038_td=Simple\: Delete (sequential) -performance_1039_td=ms -performance_1040_td=950 -performance_1041_td=1922 -performance_1042_td=32016 -performance_1043_td=Simple\: Memory Usage -performance_1044_td=MB -performance_1045_td=21 -performance_1046_td=10 -performance_1047_td=8 -performance_1048_td=BenchA\: Init -performance_1049_td=ms -performance_1050_td=919 -performance_1051_td=2133 -performance_1052_td=7528 -performance_1053_td=BenchA\: Transactions -performance_1054_td=ms -performance_1055_td=1219 -performance_1056_td=2297 -performance_1057_td=8541 -performance_1058_td=BenchA\: Memory Usage -performance_1059_td=MB -performance_1060_td=12 -performance_1061_td=15 -performance_1062_td=7 -performance_1063_td=BenchB\: Init -performance_1064_td=ms -performance_1065_td=905 -performance_1066_td=1993 -performance_1067_td=8049 -performance_1068_td=BenchB\: Transactions -performance_1069_td=ms -performance_1070_td=1091 -performance_1071_td=583 -performance_1072_td=1165 -performance_1073_td=BenchB\: Memory Usage -performance_1074_td=MB -performance_1075_td=17 -performance_1076_td=11 -performance_1077_td=8 -performance_1078_td=BenchC\: Init -performance_1079_td=ms -performance_1080_td=2491 -performance_1081_td=4003 -performance_1082_td=8064 -performance_1083_td=BenchC\: Transactions -performance_1084_td=ms -performance_1085_td=1979 -performance_1086_td=803 -performance_1087_td=2840 -performance_1088_td=BenchC\: Memory Usage -performance_1089_td=MB -performance_1090_td=19 -performance_1091_td=22 -performance_1092_td=9 -performance_1093_td=Executed statements -performance_1094_td=\# -performance_1095_td=1930995 -performance_1096_td=1930995 -performance_1097_td=1930995 -performance_1098_td=Total time -performance_1099_td=ms -performance_1100_td=13673 -performance_1101_td=20686 -performance_1102_td=105569 -performance_1103_td=Statements per second -performance_1104_td=\# -performance_1105_td=141226 -performance_1106_td=93347 -performance_1107_td=18291 -performance_1108_h3=Client-Server -performance_1109_th=Test Case -performance_1110_th=Unit -performance_1111_th=H2 (Server) -performance_1112_th=HSQLDB -performance_1113_th=Derby -performance_1114_th=PostgreSQL -performance_1115_th=MySQL -performance_1116_td=Simple\: Init -performance_1117_td=ms -performance_1118_td=16338 -performance_1119_td=17198 -performance_1120_td=27860 -performance_1121_td=30156 -performance_1122_td=29409 -performance_1123_td=Simple\: Query (random) -performance_1124_td=ms -performance_1125_td=3399 -performance_1126_td=2582 -performance_1127_td=6190 -performance_1128_td=3315 -performance_1129_td=3342 -performance_1130_td=Simple\: Query (sequential) -performance_1131_td=ms -performance_1132_td=21841 -performance_1133_td=18699 -performance_1134_td=42347 -performance_1135_td=30774 -performance_1136_td=32611 -performance_1137_td=Simple\: Update (sequential) -performance_1138_td=ms -performance_1139_td=6913 -performance_1140_td=7745 -performance_1141_td=28576 -performance_1142_td=32698 -performance_1143_td=11350 -performance_1144_td=Simple\: Delete (sequential) -performance_1145_td=ms -performance_1146_td=8051 -performance_1147_td=9751 -performance_1148_td=42202 -performance_1149_td=44480 -performance_1150_td=16555 -performance_1151_td=Simple\: Memory Usage -performance_1152_td=MB -performance_1153_td=22 -performance_1154_td=11 -performance_1155_td=9 -performance_1156_td=0 -performance_1157_td=1 -performance_1158_td=BenchA\: Init -performance_1159_td=ms -performance_1160_td=12996 -performance_1161_td=14720 -performance_1162_td=24722 -performance_1163_td=26375 -performance_1164_td=26060 -performance_1165_td=BenchA\: Transactions -performance_1166_td=ms -performance_1167_td=10134 -performance_1168_td=10250 -performance_1169_td=18452 -performance_1170_td=21453 -performance_1171_td=15877 -performance_1172_td=BenchA\: Memory Usage -performance_1173_td=MB -performance_1174_td=13 -performance_1175_td=15 -performance_1176_td=9 -performance_1177_td=0 -performance_1178_td=1 -performance_1179_td=BenchB\: Init -performance_1180_td=ms -performance_1181_td=15264 -performance_1182_td=16889 -performance_1183_td=28546 -performance_1184_td=31610 -performance_1185_td=29747 -performance_1186_td=BenchB\: Transactions -performance_1187_td=ms -performance_1188_td=3017 -performance_1189_td=3376 -performance_1190_td=1842 -performance_1191_td=2771 -performance_1192_td=1433 -performance_1193_td=BenchB\: Memory Usage -performance_1194_td=MB -performance_1195_td=17 -performance_1196_td=12 -performance_1197_td=11 -performance_1198_td=1 -performance_1199_td=1 -performance_1200_td=BenchC\: Init -performance_1201_td=ms -performance_1202_td=14020 -performance_1203_td=10407 -performance_1204_td=17655 -performance_1205_td=19520 -performance_1206_td=17532 -performance_1207_td=BenchC\: Transactions -performance_1208_td=ms -performance_1209_td=5076 -performance_1210_td=3160 -performance_1211_td=6411 -performance_1212_td=6063 -performance_1213_td=4530 -performance_1214_td=BenchC\: Memory Usage -performance_1215_td=MB -performance_1216_td=19 -performance_1217_td=21 -performance_1218_td=11 -performance_1219_td=1 -performance_1220_td=1 -performance_1221_td=Executed statements -performance_1222_td=\# -performance_1223_td=1930995 -performance_1224_td=1930995 -performance_1225_td=1930995 -performance_1226_td=1930995 -performance_1227_td=1930995 -performance_1228_td=Total time -performance_1229_td=ms -performance_1230_td=117049 -performance_1231_td=114777 -performance_1232_td=244803 -performance_1233_td=249215 -performance_1234_td=188446 -performance_1235_td=Statements per second -performance_1236_td=\# -performance_1237_td=16497 -performance_1238_td=16823 -performance_1239_td=7887 -performance_1240_td=7748 -performance_1241_td=10246 -performance_1242_h3=Benchmark Results and Comments -performance_1243_h4=H2 -performance_1244_p=\ Version 1.4.177 (2014-04-12) was used for the test. For most operations, the performance of H2 is about the same as for HSQLDB. One situation where H2 is slow is large result sets, because they are buffered to disk if more than a certain number of records are returned. The advantage of buffering is\: there is no limit on the result set size. -performance_1245_h4=HSQLDB -performance_1246_p=\ Version 2.3.2 was used for the test. Cached tables are used in this test (hsqldb.default_table_type\=cached), and the write delay is 1 second (SET WRITE_DELAY 1). -performance_1247_h4=Derby -performance_1248_p=\ Version 10.10.1.1 was used for the test. Derby is clearly the slowest embedded database in this test. This seems to be a structural problem, because all operations are really slow. It will be hard for the developers of Derby to improve the performance to a reasonable level. A few problems have been identified\: leaving autocommit on is a problem for Derby. If it is switched off during the whole test, the results are about 20% better for Derby. Derby calls FileChannel.force(false), but only twice per log file (not on each commit). Disabling this call improves performance for Derby by about 2%. Unlike H2, Derby does not call FileDescriptor.sync() on each checkpoint. Derby supports a testing mode (system property derby.system.durability\=test) where durability is disabled. According to the documentation, this setting should be used for testing only, as the database may not recover after a crash. Enabling this setting improves performance by a factor of 2.6 (embedded mode) or 1.4 (server mode). Even if enabled, Derby is still less than half as fast as H2 in default mode. -performance_1249_h4=PostgreSQL -performance_1250_p=\ Version 9.1.5 was used for the test. The following options where changed in postgresql.conf\: fsync \= off, commit_delay \= 1000. PostgreSQL is run in server mode. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured. -performance_1251_h4=MySQL -performance_1252_p=\ Version 5.1.65-log was used for the test. MySQL was run with the InnoDB backend. The setting innodb_flush_log_at_trx_commit (found in the my.ini / my.cnf file) was set to 0. Otherwise (and by default), MySQL is slow (around 140 statements per second in this test) because it tries to flush the data to disk for each commit. For small transactions (when autocommit is on) this is really slow. But many use cases use small or relatively small transactions. Too bad this setting is not listed in the configuration wizard, and it always overwritten when using the wizard. You need to change this setting manually in the file my.ini / my.cnf, and then restart the service. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured. -performance_1253_h4=Firebird -performance_1254_p=\ Firebird 1.5 (default installation) was tested, but the results are not published currently. It is possible to run the performance test with the Firebird database, and any information on how to configure Firebird for higher performance are welcome. -performance_1255_h4=Why Oracle / MS SQL Server / DB2 are Not Listed -performance_1256_p=\ The license of these databases does not allow to publish benchmark results. This doesn't mean that they are fast. They are in fact quite slow, and need a lot of memory. But you will need to test this yourself. SQLite was not tested because the JDBC driver doesn't support transactions. -performance_1257_h3=About this Benchmark -performance_1258_h4=How to Run -performance_1259_p=\ This test was as follows\: -performance_1260_h4=Separate Process per Database -performance_1261_p=\ For each database, a new process is started, to ensure the previous test does not impact the current test. -performance_1262_h4=Number of Connections -performance_1263_p=\ This is mostly a single-connection benchmark. BenchB uses multiple connections; the other tests use one connection. -performance_1264_h4=Real-World Tests -performance_1265_p=\ Good benchmarks emulate real-world use cases. This benchmark includes 4 test cases\: BenchSimple uses one table and many small updates / deletes. BenchA is similar to the TPC-A test, but single connection / single threaded (see also\: www.tpc.org). BenchB is similar to the TPC-B test, using multiple connections (one thread per connection). BenchC is similar to the TPC-C test, but single connection / single threaded. -performance_1266_h4=Comparing Embedded with Server Databases -performance_1267_p=\ This is mainly a benchmark for embedded databases (where the application runs in the same virtual machine as the database engine). However MySQL and PostgreSQL are not Java databases and cannot be embedded into a Java application. For the Java databases, both embedded and server modes are tested. -performance_1268_h4=Test Platform -performance_1269_p=\ This test is run on Mac OS X 10.6. No virus scanner was used, and disk indexing was disabled. The JVM used is Sun JDK 1.6. -performance_1270_h4=Multiple Runs -performance_1271_p=\ When a Java benchmark is run first, the code is not fully compiled and therefore runs slower than when running multiple times. A benchmark should always run the same test multiple times and ignore the first run(s). This benchmark runs three times, but only the last run is measured. -performance_1272_h4=Memory Usage -performance_1273_p=\ It is not enough to measure the time taken, the memory usage is important as well. Performance can be improved by using a bigger cache, but the amount of memory is limited. HSQLDB tables are kept fully in memory by default; this benchmark uses 'disk based' tables for all databases. Unfortunately, it is not so easy to calculate the memory usage of PostgreSQL and MySQL, because they run in a different process than the test. This benchmark currently does not print memory usage of those databases. -performance_1274_h4=Delayed Operations -performance_1275_p=\ Some databases delay some operations (for example flushing the buffers) until after the benchmark is run. This benchmark waits between each database tested, and each database runs in a different process (sequentially). -performance_1276_h4=Transaction Commit / Durability -performance_1277_p=\ Durability means transaction committed to the database will not be lost. Some databases (for example MySQL) try to enforce this by default by calling fsync() to flush the buffers, but most hard drives don't actually flush all data. Calling the method slows down transaction commit a lot, but doesn't always make data durable. When comparing the results, it is important to think about the effect. Many database suggest to 'batch' operations when possible. This benchmark switches off autocommit when loading the data, and calls commit after each 1000 inserts. However many applications need 'short' transactions at runtime (a commit after each update). This benchmark commits after each update / delete in the simple benchmark, and after each business transaction in the other benchmarks. For databases that support delayed commits, a delay of one second is used. -performance_1278_h4=Using Prepared Statements -performance_1279_p=\ Wherever possible, the test cases use prepared statements. -performance_1280_h4=Currently Not Tested\: Startup Time -performance_1281_p=\ The startup time of a database engine is important as well for embedded use. This time is not measured currently. Also, not tested is the time used to create a database and open an existing database. Here, one (wrapper) connection is opened at the start, and for each step a new connection is opened and then closed. -performance_1282_h2=PolePosition Benchmark -performance_1283_p=\ The PolePosition is an open source benchmark. The algorithms are all quite simple. It was developed / sponsored by db4o. This test was not run for a longer time, so please be aware that the results below are for older database versions (H2 version 1.1, HSQLDB 1.8, Java 1.4). -performance_1284_th=Test Case -performance_1285_th=Unit -performance_1286_th=H2 -performance_1287_th=HSQLDB -performance_1288_th=MySQL -performance_1289_td=Melbourne write -performance_1290_td=ms -performance_1291_td=369 -performance_1292_td=249 -performance_1293_td=2022 -performance_1294_td=Melbourne read -performance_1295_td=ms -performance_1296_td=47 -performance_1297_td=49 -performance_1298_td=93 -performance_1299_td=Melbourne read_hot -performance_1300_td=ms -performance_1301_td=24 -performance_1302_td=43 -performance_1303_td=95 -performance_1304_td=Melbourne delete -performance_1305_td=ms -performance_1306_td=147 -performance_1307_td=133 -performance_1308_td=176 -performance_1309_td=Sepang write -performance_1310_td=ms -performance_1311_td=965 -performance_1312_td=1201 -performance_1313_td=3213 -performance_1314_td=Sepang read -performance_1315_td=ms -performance_1316_td=765 -performance_1317_td=948 -performance_1318_td=3455 -performance_1319_td=Sepang read_hot -performance_1320_td=ms -performance_1321_td=789 -performance_1322_td=859 -performance_1323_td=3563 -performance_1324_td=Sepang delete -performance_1325_td=ms -performance_1326_td=1384 -performance_1327_td=1596 -performance_1328_td=6214 -performance_1329_td=Bahrain write -performance_1330_td=ms -performance_1331_td=1186 -performance_1332_td=1387 -performance_1333_td=6904 -performance_1334_td=Bahrain query_indexed_string -performance_1335_td=ms -performance_1336_td=336 -performance_1337_td=170 -performance_1338_td=693 -performance_1339_td=Bahrain query_string -performance_1340_td=ms -performance_1341_td=18064 -performance_1342_td=39703 -performance_1343_td=41243 -performance_1344_td=Bahrain query_indexed_int -performance_1345_td=ms -performance_1346_td=104 -performance_1347_td=134 -performance_1348_td=678 -performance_1349_td=Bahrain update -performance_1350_td=ms -performance_1351_td=191 -performance_1352_td=87 -performance_1353_td=159 -performance_1354_td=Bahrain delete -performance_1355_td=ms -performance_1356_td=1215 -performance_1357_td=729 -performance_1358_td=6812 -performance_1359_td=Imola retrieve -performance_1360_td=ms -performance_1361_td=198 -performance_1362_td=194 -performance_1363_td=4036 -performance_1364_td=Barcelona write -performance_1365_td=ms -performance_1366_td=413 -performance_1367_td=832 -performance_1368_td=3191 -performance_1369_td=Barcelona read -performance_1370_td=ms -performance_1371_td=119 -performance_1372_td=160 -performance_1373_td=1177 -performance_1374_td=Barcelona query -performance_1375_td=ms -performance_1376_td=20 -performance_1377_td=5169 -performance_1378_td=101 -performance_1379_td=Barcelona delete -performance_1380_td=ms -performance_1381_td=388 -performance_1382_td=319 -performance_1383_td=3287 -performance_1384_td=Total -performance_1385_td=ms -performance_1386_td=26724 -performance_1387_td=53962 -performance_1388_td=87112 -performance_1389_p=\ There are a few problems with the PolePosition test\: -performance_1390_li=\ HSQLDB uses in-memory tables by default while H2 uses persistent tables. The HSQLDB version included in PolePosition does not support changing this, so you need to replace poleposition-0.20/lib/hsqldb.jar with a newer version (for example hsqldb-1.8.0.7.jar), and then use the setting hsqldb.connecturl\=jdbc\:hsqldb\:file\:data/hsqldb/dbbench2;hsqldb.default_table_type\=cached;sql.enforce_size\=true in the file Jdbc.properties. -performance_1391_li=HSQLDB keeps the database open between tests, while H2 closes the database (losing all the cache). To change that, use the database URL jdbc\:h2\:file\:data/h2/dbbench;DB_CLOSE_DELAY\=-1 -performance_1392_li=The amount of cache memory is quite important, specially for the PolePosition test. Unfortunately, the PolePosition test does not take this into account. -performance_1393_h2=Database Performance Tuning -performance_1394_h3=Keep Connections Open or Use a Connection Pool -performance_1395_p=\ If your application opens and closes connections a lot (for example, for each request), you should consider using a connection pool. Opening a connection using DriverManager.getConnection is specially slow if the database is closed. By default the database is closed if the last connection is closed. -performance_1396_p=\ If you open and close connections a lot but don't want to use a connection pool, consider keeping a 'sentinel' connection open for as long as the application runs, or use delayed database closing. See also Closing a database. -performance_1397_h3=Use a Modern JVM -performance_1398_p=\ Newer JVMs are faster. Upgrading to the latest version of your JVM can provide a "free" boost to performance. Switching from the default Client JVM to the Server JVM using the -server command-line option improves performance at the cost of a slight increase in start-up time. -performance_1399_h3=Virus Scanners -performance_1400_p=\ Some virus scanners scan files every time they are accessed. It is very important for performance that database files are not scanned for viruses. The database engine never interprets the data stored in the files as programs, that means even if somebody would store a virus in a database file, this would be harmless (when the virus does not run, it cannot spread). Some virus scanners allow to exclude files by suffix. Ensure files ending with .db are not scanned. -performance_1401_h3=Using the Trace Options -performance_1402_p=\ If the performance hot spots are in the database engine, in many cases the performance can be optimized by creating additional indexes, or changing the schema. Sometimes the application does not directly generate the SQL statements, for example if an O/R mapping tool is used. To view the SQL statements and JDBC API calls, you can use the trace options. For more information, see Using the Trace Options. -performance_1403_h3=Index Usage -performance_1404_p=\ This database uses indexes to improve the performance of SELECT, UPDATE, DELETE. If a column is used in the WHERE clause of a query, and if an index exists on this column, then the index can be used. Multi-column indexes are used if all or the first columns of the index are used. Both equality lookup and range scans are supported. Indexes are used to order result sets, but only if the condition uses the same index or no index at all. The results are sorted in memory if required. Indexes are created automatically for primary key and unique constraints. Indexes are also created for foreign key constraints, if required. For other columns, indexes need to be created manually using the CREATE INDEX statement. -performance_1405_h3=Index Hints -performance_1406_p=\ If you have determined that H2 is not using the optimal index for your query, you can use index hints to force H2 to use specific indexes. -performance_1407_p=Only indexes in the list will be used when choosing an index to use on the given table. There is no significance to order in this list. -performance_1408_p=\ It is possible that no index in the list is chosen, in which case a full table scan will be used. -performance_1409_p=An empty list of index names forces a full table scan to be performed. -performance_1410_p=Each index in the list must exist. -performance_1411_h3=How Data is Stored Internally -performance_1412_p=\ For persistent databases, if a table is created with a single column primary key of type BIGINT, INT, SMALLINT, TINYINT, then the data of the table is organized in this way. This is sometimes also called a "clustered index" or "index organized table". -performance_1413_p=\ H2 internally stores table data and indexes in the form of b-trees. Each b-tree stores entries as a list of unique keys (one or more columns) and data (zero or more columns). The table data is always organized in the form of a "data b-tree" with a single column key of type long. If a single column primary key of type BIGINT, INT, SMALLINT, TINYINT is specified when creating the table (or just after creating the table, but before inserting any rows), then this column is used as the key of the data b-tree. If no primary key has been specified, if the primary key column is of another data type, or if the primary key contains more than one column, then a hidden auto-increment column of type BIGINT is added to the table, which is used as the key for the data b-tree. All other columns of the table are stored within the data area of this data b-tree (except for large BLOB, CLOB columns, which are stored externally). -performance_1414_p=\ For each additional index, one new "index b-tree" is created. The key of this b-tree consists of the indexed columns, plus the key of the data b-tree. If a primary key is created after the table has been created, or if the primary key contains multiple column, or if the primary key is not of the data types listed above, then the primary key is stored in a new index b-tree. -performance_1415_h3=Optimizer -performance_1416_p=\ This database uses a cost based optimizer. For simple and queries and queries with medium complexity (less than 7 tables in the join), the expected cost (running time) of all possible plans is calculated, and the plan with the lowest cost is used. For more complex queries, the algorithm first tries all possible combinations for the first few tables, and the remaining tables added using a greedy algorithm (this works well for most joins). Afterwards a genetic algorithm is used to test at most 2000 distinct plans. Only left-deep plans are evaluated. -performance_1417_h3=Expression Optimization -performance_1418_p=\ After the statement is parsed, all expressions are simplified automatically if possible. Operations are evaluated only once if all parameters are constant. Functions are also optimized, but only if the function is constant (always returns the same result for the same parameter values). If the WHERE clause is always false, then the table is not accessed at all. -performance_1419_h3=COUNT(*) Optimization -performance_1420_p=\ If the query only counts all rows of a table, then the data is not accessed. However, this is only possible if no WHERE clause is used, that means it only works for queries of the form SELECT COUNT(*) FROM table. -performance_1421_h3=Updating Optimizer Statistics / Column Selectivity -performance_1422_p=\ When executing a query, at most one index per join can be used. If the same table is joined multiple times, for each join only one index is used (the same index could be used for both joins, or each join could use a different index). Example\: for the query SELECT * FROM TEST T1, TEST T2 WHERE T1.NAME\='A' AND T2.ID\=T1.ID, two index can be used, in this case the index on NAME for T1 and the index on ID for T2. -performance_1423_p=\ If a table has multiple indexes, sometimes more than one index could be used. Example\: if there is a table TEST(ID, NAME, FIRSTNAME) and an index on each column, then two indexes could be used for the query SELECT * FROM TEST WHERE NAME\='A' AND FIRSTNAME\='B', the index on NAME or the index on FIRSTNAME. It is not possible to use both indexes at the same time. Which index is used depends on the selectivity of the column. The selectivity describes the 'uniqueness' of values in a column. A selectivity of 100 means each value appears only once, and a selectivity of 1 means the same value appears in many or most rows. For the query above, the index on NAME should be used if the table contains more distinct names than first names. -performance_1424_p=\ The SQL statement ANALYZE can be used to automatically estimate the selectivity of the columns in the tables. This command should be run from time to time to improve the query plans generated by the optimizer. -performance_1425_h3=In-Memory (Hash) Indexes -performance_1426_p=\ Using in-memory indexes, specially in-memory hash indexes, can speed up queries and data manipulation. -performance_1427_p=In-memory indexes are automatically used for in-memory databases, but can also be created for persistent databases using CREATE MEMORY TABLE. In many cases, the rows itself will also be kept in-memory. Please note this may cause memory problems for large tables. -performance_1428_p=\ In-memory hash indexes are backed by a hash table and are usually faster than regular indexes. However, hash indexes only supports direct lookup (WHERE ID \= ?) but not range scan (WHERE ID < ?). To use hash indexes, use HASH as in\: CREATE UNIQUE HASH INDEX and CREATE TABLE ...(ID INT PRIMARY KEY HASH,...). -performance_1429_h3=Use Prepared Statements -performance_1430_p=\ If possible, use prepared statements with parameters. -performance_1431_h3=Prepared Statements and IN(...) -performance_1432_p=\ Avoid generating SQL statements with a variable size IN(...) list. Instead, use a prepared statement with arrays as in the following example\: -performance_1433_h3=Optimization Examples -performance_1434_p=\ See src/test/org/h2/samples/optimizations.sql for a few examples of queries that benefit from special optimizations built into the database. -performance_1435_h3=Cache Size and Type -performance_1436_p=\ By default the cache size of H2 is quite small. Consider using a larger cache size, or enable the second level soft reference cache. See also Cache Settings. -performance_1437_h3=Data Types -performance_1438_p=\ Each data type has different storage and performance characteristics\: -performance_1439_li=The DECIMAL/NUMERIC type is slower and requires more storage than the REAL and DOUBLE types. -performance_1440_li=Text types are slower to read, write, and compare than numeric types and generally require more storage. -performance_1441_li=See Large Objects for information on BINARY vs. BLOB and VARCHAR vs. CLOB performance. -performance_1442_li=Parsing and formatting takes longer for the TIME, DATE, and TIMESTAMP types than the numeric types. -performance_1443_code=SMALLINT/TINYINT/BOOLEAN -performance_1444_li=\ are not significantly smaller or faster to work with than INTEGER in most modes. -performance_1445_h3=Sorted Insert Optimization -performance_1446_p=\ To reduce disk space usage and speed up table creation, an optimization for sorted inserts is available. When used, b-tree pages are split at the insertion point. To use this optimization, add SORTED before the SELECT statement\: -performance_1447_h2=Using the Built-In Profiler -performance_1448_p=\ A very simple Java profiler is built-in. To use it, use the following template\: -performance_1449_h2=Application Profiling -performance_1450_h3=Analyze First -performance_1451_p=\ Before trying to optimize performance, it is important to understand where the problem is (what part of the application is slow). Blind optimization or optimization based on guesses should be avoided, because usually it is not an efficient strategy. There are various ways to analyze an application. Sometimes two implementations can be compared using System.currentTimeMillis(). But this does not work for complex applications with many modules, and for memory problems. -performance_1452_p=\ A simple way to profile an application is to use the built-in profiling tool of java. Example\: -performance_1453_p=\ Unfortunately, it is only possible to profile the application from start to end. Another solution is to create a number of full thread dumps. To do that, first run jps -l to get the process id, and then run jstack <pid> or kill -QUIT <pid> (Linux) or press Ctrl+C (Windows). -performance_1454_p=\ A simple profiling tool is included in H2. To use it, the application needs to be changed slightly. Example\: -performance_1455_p=\ The profiler is built into the H2 Console tool, to analyze databases that open slowly. To use it, run the H2 Console, and then click on 'Test Connection'. Afterwards, click on "Test successful" and you get the most common stack traces, which helps to find out why it took so long to connect. You will only get the stack traces if opening the database took more than a few seconds. -performance_1456_h2=Database Profiling -performance_1457_p=\ The ConvertTraceFile tool generates SQL statement statistics at the end of the SQL script file. The format used is similar to the profiling data generated when using java -Xrunhprof. For this to work, the trace level needs to be 2 or higher (TRACE_LEVEL_FILE\=2). The easiest way to set the trace level is to append the setting to the database URL, for example\: jdbc\:h2\:~/test;TRACE_LEVEL_FILE\=2 or jdbc\:h2\:tcp\://localhost/~/test;TRACE_LEVEL_FILE\=2. As an example, execute the the following script using the H2 Console\: -performance_1458_p=\ After running the test case, convert the .trace.db file using the ConvertTraceFile tool. The trace file is located in the same directory as the database file. -performance_1459_p=\ The generated file test.sql will contain the SQL statements as well as the following profiling data (results vary)\: -performance_1460_h2=Statement Execution Plans -performance_1461_p=\ The SQL statement EXPLAIN displays the indexes and optimizations the database uses for a statement. The following statements support EXPLAIN\: SELECT, UPDATE, DELETE, MERGE, INSERT. The following query shows that the database uses the primary key index to search for rows\: -performance_1462_p=\ For joins, the tables in the execution plan are sorted in the order they are processed. The following query shows the database first processes the table INVOICE (using the primary key). For each row, it will additionally check that the value of the column AMOUNT is larger than zero, and for those rows the database will search in the table CUSTOMER (using the primary key). The query plan contains some redundancy so it is a valid statement. -performance_1463_h3=Displaying the Scan Count -performance_1464_code=EXPLAIN ANALYZE -performance_1465_p=\ additionally shows the scanned rows per table and pages read from disk per table or index. This will actually execute the query, unlike EXPLAIN which only prepares it. The following query scanned 1000 rows, and to do that had to read 85 pages from the data area of the table. Running the query twice will not list the pages read from disk, because they are now in the cache. The tableScan means this query doesn't use an index. -performance_1466_p=\ The cache will prevent the pages are read twice. H2 reads all columns of the row unless only the columns in the index are read. Except for large CLOB and BLOB, which are not store in the table. -performance_1467_h3=Special Optimizations -performance_1468_p=\ For certain queries, the database doesn't need to read all rows, or doesn't need to sort the result even if ORDER BY is used. -performance_1469_p=\ For queries of the form SELECT COUNT(*), MIN(ID), MAX(ID) FROM TEST, the query plan includes the line /* direct lookup */ if the data can be read from an index. -performance_1470_p=\ For queries of the form SELECT DISTINCT CUSTOMER_ID FROM INVOICE, the query plan includes the line /* distinct */ if there is an non-unique or multi-column index on this column, and if this column has a low selectivity. -performance_1471_p=\ For queries of the form SELECT * FROM TEST ORDER BY ID, the query plan includes the line /* index sorted */ to indicate there is no separate sorting required. -performance_1472_p=\ For queries of the form SELECT * FROM TEST GROUP BY ID ORDER BY ID, the query plan includes the line /* group sorted */ to indicate there is no separate sorting required. -performance_1473_h2=How Data is Stored and How Indexes Work -performance_1474_p=\ Internally, each row in a table is identified by a unique number, the row id. The rows of a table are stored with the row id as the key. The row id is a number of type long. If a table has a single column primary key of type INT or BIGINT, then the value of this column is the row id, otherwise the database generates the row id automatically. There is a (non-standard) way to access the row id\: using the _ROWID_ pseudo-column\: -performance_1475_p=\ The data is stored in the database as follows\: -performance_1476_th=_ROWID_ -performance_1477_th=FIRST_NAME -performance_1478_th=NAME -performance_1479_th=CITY -performance_1480_th=PHONE -performance_1481_td=1 -performance_1482_td=John -performance_1483_td=Miller -performance_1484_td=Berne -performance_1485_td=123 456 789 -performance_1486_td=2 -performance_1487_td=Philip -performance_1488_td=Jones -performance_1489_td=Berne -performance_1490_td=123 012 345 -performance_1491_p=\ Access by row id is fast because the data is sorted by this key. Please note the row id is not available until after the row was added (that means, it can not be used in computed columns or constraints). If the query condition does not contain the row id (and if no other index can be used), then all rows of the table are scanned. A table scan iterates over all rows in the table, in the order of the row id. To find out what strategy the database uses to retrieve the data, use EXPLAIN SELECT\: -performance_1492_h3=Indexes -performance_1493_p=\ An index internally is basically just a table that contains the indexed column(s), plus the row id\: -performance_1494_p=\ In the index, the data is sorted by the indexed columns. So this index contains the following data\: -performance_1495_th=CITY -performance_1496_th=NAME -performance_1497_th=FIRST_NAME -performance_1498_th=_ROWID_ -performance_1499_td=Berne -performance_1500_td=Jones -performance_1501_td=Philip -performance_1502_td=2 -performance_1503_td=Berne -performance_1504_td=Miller -performance_1505_td=John -performance_1506_td=1 -performance_1507_p=\ When the database uses an index to query the data, it searches the index for the given data, and (if required) reads the remaining columns in the main data table (retrieved using the row id). An index on city, name, and first name (multi-column index) allows to quickly search for rows when the city, name, and first name are known. If only the city and name, or only the city is known, then this index is also used (so creating an additional index on just the city is not needed). This index is also used when reading all rows, sorted by the indexed columns. However, if only the first name is known, then this index is not used\: -performance_1508_p=\ If your application often queries the table for a phone number, then it makes sense to create an additional index on it\: -performance_1509_p=\ This index contains the phone number, and the row id\: -performance_1510_th=PHONE -performance_1511_th=_ROWID_ -performance_1512_td=123 012 345 -performance_1513_td=2 -performance_1514_td=123 456 789 -performance_1515_td=1 -performance_1516_h3=Using Multiple Indexes -performance_1517_p=\ Within a query, only one index per logical table is used. Using the condition PHONE \= '123 567 789' OR CITY \= 'Berne' would use a table scan instead of first using the index on the phone number and then the index on the city. It makes sense to write two queries and combine then using UNION. In this case, each individual query uses a different index\: -performance_1518_h2=Fast Database Import -performance_1519_p=\ To speed up large imports, consider using the following options temporarily\: -performance_1520_code=SET LOG 0 -performance_1521_li=\ (disabling the transaction log) -performance_1522_code=SET CACHE_SIZE -performance_1523_li=\ (a large cache is faster) -performance_1524_code=SET LOCK_MODE 0 -performance_1525_li=\ (disable locking) -performance_1526_code=SET UNDO_LOG 0 -performance_1527_li=\ (disable the session undo log) -performance_1528_p=\ These options can be set in the database URL\: jdbc\:h2\:~/test;LOG\=0;CACHE_SIZE\=65536;LOCK_MODE\=0;UNDO_LOG\=0. Most of those options are not recommended for regular use, that means you need to reset them after use. -performance_1529_p=\ If you have to import a lot of rows, use a PreparedStatement or use CSV import. Please note that CREATE TABLE(...) ... AS SELECT ... is faster than CREATE TABLE(...); INSERT INTO ... SELECT .... -quickstart_1000_h1=Quickstart -quickstart_1001_a=\ Embedding H2 in an Application -quickstart_1002_a=\ The H2 Console Application -quickstart_1003_h2=Embedding H2 in an Application -quickstart_1004_p=\ This database can be used in embedded mode, or in server mode. To use it in embedded mode, you need to\: -quickstart_1005_li=Add the h2*.jar to the classpath (H2 does not have any dependencies) -quickstart_1006_li=Use the JDBC driver class\: org.h2.Driver -quickstart_1007_li=The database URL jdbc\:h2\:~/test opens the database test in your user home directory -quickstart_1008_li=A new database is automatically created -quickstart_1009_h2=The H2 Console Application -quickstart_1010_p=\ The Console lets you access a SQL database using a browser interface. -quickstart_1011_p=\ If you don't have Windows XP, or if something does not work as expected, please see the detailed description in the Tutorial. -quickstart_1012_h3=Step-by-Step -quickstart_1013_h4=Installation -quickstart_1014_p=\ Install the software using the Windows Installer (if you did not yet do that). -quickstart_1015_h4=Start the Console -quickstart_1016_p=\ Click [Start], [All Programs], [H2], and [H2 Console (Command Line)]\: -quickstart_1017_p=\ A new console window appears\: -quickstart_1018_p=\ Also, a new browser page should open with the URL http\://localhost\:8082. You may get a security warning from the firewall. If you don't want other computers in the network to access the database on your machine, you can let the firewall block these connections. Only local connections are required at this time. -quickstart_1019_h4=Login -quickstart_1020_p=\ Select [Generic H2] and click [Connect]\: -quickstart_1021_p=\ You are now logged in. -quickstart_1022_h4=Sample -quickstart_1023_p=\ Click on the [Sample SQL Script]\: -quickstart_1024_p=\ The SQL commands appear in the command area. -quickstart_1025_h4=Execute -quickstart_1026_p=\ Click [Run] -quickstart_1027_p=\ On the left side, a new entry TEST is added below the database icon. The operations and results of the statements are shown below the script. -quickstart_1028_h4=Disconnect -quickstart_1029_p=\ Click on [Disconnect]\: -quickstart_1030_p=\ to close the connection. -quickstart_1031_h4=End -quickstart_1032_p=\ Close the console window. For more information, see the Tutorial. -roadmap_1000_h1=Roadmap -roadmap_1001_p=\ New (feature) requests will usually be added at the very end of the list. The priority is increased for important and popular requests. Of course, patches are always welcome, but are not always applied as is. See also Providing Patches. -roadmap_1002_h2=Version 1.5.x\: Planned Changes -roadmap_1003_li=Replace file password hash with file encryption key; validate encryption key when connecting. -roadmap_1004_li=Remove "set binary collation" feature. -roadmap_1005_li=Remove the encryption algorithm XTEA. -roadmap_1006_li=Disallow referencing other tables in a table (via constraints for example). -roadmap_1007_li=Remove PageStore features like compress_lob. -roadmap_1008_h2=Version 1.4.x\: Planned Changes -roadmap_1009_li=Change license to MPL 2.0. -roadmap_1010_li=Automatic migration from 1.3 databases to 1.4. -roadmap_1011_li=Option to disable the file name suffix somehow (issue 447). -roadmap_1012_h2=Priority 1 -roadmap_1013_li=Bugfixes. -roadmap_1014_li=More tests with MULTI_THREADED\=1 (and MULTI_THREADED with MVCC)\: Online backup (using the 'backup' statement). -roadmap_1015_li=Server side cursors. -roadmap_1016_h2=Priority 2 -roadmap_1017_li=Support hints for the optimizer (which index to use, enforce the join order). -roadmap_1018_li=Full outer joins. -roadmap_1019_li=Access rights\: remember the owner of an object. Create, alter and drop privileges. COMMENT\: allow owner of object to change it. Issue 208\: Access rights for schemas. -roadmap_1020_li=Test multi-threaded in-memory db access. -roadmap_1021_li=MySQL, MS SQL Server compatibility\: support case sensitive (mixed case) identifiers without quotes. -roadmap_1022_li=Support GRANT SELECT, UPDATE ON [schemaName.] *. -roadmap_1023_li=Migrate database tool (also from other database engines). For Oracle, maybe use DBMS_METADATA.GET_DDL / GET_DEPENDENT_DDL. -roadmap_1024_li=Clustering\: support mixed clustering mode (one embedded, others in server mode). -roadmap_1025_li=Clustering\: reads should be randomly distributed (optional) or to a designated database on RAM (parameter\: READ_FROM\=3). -roadmap_1026_li=Window functions\: RANK() and DENSE_RANK(), partition using OVER(). select *, count(*) over() as fullCount from ... limit 4; -roadmap_1027_li=PostgreSQL catalog\: use BEFORE SELECT triggers instead of views over metadata tables. -roadmap_1028_li=Compatibility\: automatically load functions from a script depending on the mode - see FunctionsMySQL.java, issue 211. -roadmap_1029_li=Test very large databases and LOBs (up to 256 GB). -roadmap_1030_li=Store all temp files in the temp directory. -roadmap_1031_li=Don't use temp files, specially not deleteOnExit (bug 4513817\: File.deleteOnExit consumes memory). Also to allow opening client / server (remote) connections when using LOBs. -roadmap_1032_li=Make DDL (Data Definition) operations transactional. -roadmap_1033_li=Deferred integrity checking (DEFERRABLE INITIALLY DEFERRED). -roadmap_1034_li=Groovy Stored Procedures\: http\://groovy.codehaus.org/GSQL -roadmap_1035_li=Add a migration guide (list differences between databases). -roadmap_1036_li=Optimization\: automatic index creation suggestion using the trace file? -roadmap_1037_li=Fulltext search Lucene\: analyzer configuration, mergeFactor. -roadmap_1038_li=Compression performance\: don't allocate buffers, compress / expand in to out buffer. -roadmap_1039_li=Rebuild index functionality to shrink index size and improve performance. -roadmap_1040_li=Console\: add accesskey to most important commands (A, AREA, BUTTON, INPUT, LABEL, LEGEND, TEXTAREA). -roadmap_1041_li=Test performance again with SQL Server, Oracle, DB2. -roadmap_1042_li=Test with Spatial DB in a box / JTS\: http\://www.opengeospatial.org/standards/sfs - OpenGIS Implementation Specification. -roadmap_1043_li=Write more tests and documentation for MVCC (Multi Version Concurrency Control). -roadmap_1044_li=Find a tool to view large text file (larger than 100 MB), with find, page up and down (like less), truncate before / after. -roadmap_1045_li=Implement, test, document XAConnection and so on. -roadmap_1046_li=Pluggable data type (for streaming, hashing, compression, validation, conversion, encryption). -roadmap_1047_li=CHECK\: find out what makes CHECK\=TRUE slow, move to CHECK2. -roadmap_1048_li=Drop with invalidate views (so that source code is not lost). Check what other databases do exactly. -roadmap_1049_li=Index usage for (ID, NAME)\=(1, 'Hi'); document. -roadmap_1050_li=Set a connection read only (Connection.setReadOnly) or using a connection parameter. -roadmap_1051_li=Access rights\: finer grained access control (grant access for specific functions). -roadmap_1052_li=ROW_NUMBER() OVER([PARTITION BY columnName][ORDER BY columnName]). -roadmap_1053_li=Version check\: docs / web console (using Javascript), and maybe in the library (using TCP/IP). -roadmap_1054_li=Web server classloader\: override findResource / getResourceFrom. -roadmap_1055_li=Cost for embedded temporary view is calculated wrong, if result is constant. -roadmap_1056_li=Count index range query (count(*) where id between 10 and 20). -roadmap_1057_li=Performance\: update in-place. -roadmap_1058_li=Clustering\: when a database is back alive, automatically synchronize with the master (requires readable transaction log). -roadmap_1059_li=Database file name suffix\: a way to use no or a different suffix (for example using a slash). -roadmap_1060_li=Eclipse plugin. -roadmap_1061_li=Asynchronous queries to support publish/subscribe\: SELECT ... FOR READ WAIT [maxMillisToWait]. See also MS SQL Server "Query Notification". -roadmap_1062_li=Fulltext search (native)\: reader / tokenizer / filter. -roadmap_1063_li=Linked schema using CSV files\: one schema for a directory of files; support indexes for CSV files. -roadmap_1064_li=iReport to support H2. -roadmap_1065_li=Include SMTP (mail) client (alert on cluster failure, low disk space,...). -roadmap_1066_li=Option for SCRIPT to only process one or a set of schemas or tables, and append to a file. -roadmap_1067_li=JSON parser and functions. -roadmap_1068_li=Copy database\: tool with config GUI and batch mode, extensible (example\: compare). -roadmap_1069_li=Document, implement tool for long running transactions using user-defined compensation statements. -roadmap_1070_li=Support SET TABLE DUAL READONLY. -roadmap_1071_li=GCJ\: what is the state now? -roadmap_1072_li=Events for\: database Startup, Connections, Login attempts, Disconnections, Prepare (after parsing), Web Server. See http\://docs.openlinksw.com/virtuoso/fn_dbev_startup.html -roadmap_1073_li=Optimization\: simpler log compression. -roadmap_1074_li=Support standard INFORMATION_SCHEMA tables, as defined in http\://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt - specially KEY_COLUMN_USAGE\: http\://dev.mysql.com/doc/refman/5.0/en/information-schema.html, http\://www.xcdsql.org/Misc/INFORMATION_SCHEMA%20With%20Rolenames.gif -roadmap_1075_li=Compatibility\: in MySQL, HSQLDB, /0.0 is NULL; in PostgreSQL, Derby\: division by zero. HSQLDB\: 0.0e1 / 0.0e1 is NaN. -roadmap_1076_li=Functional tables should accept parameters from other tables (see FunctionMultiReturn) SELECT * FROM TEST T, P2C(T.A, T.R). -roadmap_1077_li=Custom class loader to reload functions on demand. -roadmap_1078_li=Test http\://mysql-je.sourceforge.net/ -roadmap_1079_li=H2 Console\: the webclient could support more features like phpMyAdmin. -roadmap_1080_li=Support Oracle functions\: TO_NUMBER. -roadmap_1081_li=Work on the Java to C converter. -roadmap_1082_li=The HELP information schema can be directly exposed in the Console. -roadmap_1083_li=Maybe use the 0x1234 notation for binary fields, see MS SQL Server. -roadmap_1084_li=Support Oracle CONNECT BY in some way\: http\://www.adp-gmbh.ch/ora/sql/connect_by.html http\://philip.greenspun.com/sql/trees.html -roadmap_1085_li=SQL Server 2005, Oracle\: support COUNT(*) OVER(). See http\://www.orafusion.com/art_anlytc.htm -roadmap_1086_li=SQL 2003\: http\://www.wiscorp.com/sql_2003_standard.zip -roadmap_1087_li=Version column (number/sequence and timestamp based). -roadmap_1088_li=Optimize getGeneratedKey\: send last identity after each execute (server). -roadmap_1089_li=Test and document UPDATE TEST SET (ID, NAME) \= (SELECT ID*10, NAME || '\!' FROM TEST T WHERE T.ID\=TEST.ID). -roadmap_1090_li=Max memory rows / max undo log size\: use block count / row size not row count. -roadmap_1091_li=Implement point-in-time recovery. -roadmap_1092_li=Support PL/SQL (programming language / control flow statements). -roadmap_1093_li=LIKE\: improved version for larger texts (currently using naive search). -roadmap_1094_li=Throw an exception when the application calls getInt on a Long (optional). -roadmap_1095_li=Default date format for input and output (local date constants). -roadmap_1096_li=Document ROWNUM usage for reports\: SELECT ROWNUM, * FROM (subquery). -roadmap_1097_li=File system that writes to two file systems (replication, replicating file system). -roadmap_1098_li=Standalone tool to get relevant system properties and add it to the trace output. -roadmap_1099_li=Support 'call proc(1\=value)' (PostgreSQL, Oracle). -roadmap_1100_li=Console\: improve editing data (Tab, Shift-Tab, Enter, Up, Down, Shift+Del?). -roadmap_1101_li=Console\: autocomplete Ctrl+Space inserts template. -roadmap_1102_li=Option to encrypt .trace.db file. -roadmap_1103_li=Auto-Update feature for database, .jar file. -roadmap_1104_li=ResultSet SimpleResultSet.readFromURL(String url)\: id varchar, state varchar, released timestamp. -roadmap_1105_li=Partial indexing (see PostgreSQL). -roadmap_1106_li=Add GUI to build a custom version (embedded, fulltext,...) using build flags. -roadmap_1107_li=http\://rubyforge.org/projects/hypersonic/ -roadmap_1108_li=Add a sample application that runs the H2 unit test and writes the result to a file (so it can be included in the user app). -roadmap_1109_li=Table order\: ALTER TABLE TEST ORDER BY NAME DESC (MySQL compatibility). -roadmap_1110_li=Backup tool should work with other databases as well. -roadmap_1111_li=Console\: -ifExists doesn't work for the console. Add a flag to disable other dbs. -roadmap_1112_li=Check if 'FSUTIL behavior set disablelastaccess 1' improves the performance (fsutil behavior query disablelastaccess). -roadmap_1113_li=Java static code analysis\: http\://pmd.sourceforge.net/ -roadmap_1114_li=Java static code analysis\: http\://www.eclipse.org/tptp/ -roadmap_1115_li=Compatibility for CREATE SCHEMA AUTHORIZATION. -roadmap_1116_li=Implement Clob / Blob truncate and the remaining functionality. -roadmap_1117_li=Add multiple columns at the same time with ALTER TABLE .. ADD .. ADD ... -roadmap_1118_li=File locking\: writing a system property to detect concurrent access from the same VM (different classloaders). -roadmap_1119_li=Pure SQL triggers (example\: update parent table if the child table is changed). -roadmap_1120_li=Add H2 to Gem (Ruby install system). -roadmap_1121_li=Support linked JCR tables. -roadmap_1122_li=Native fulltext search\: min word length; store word positions. -roadmap_1123_li=Add an option to the SCRIPT command to generate only portable / standard SQL. -roadmap_1124_li=Updatable views\: create 'instead of' triggers automatically if possible (simple cases first). -roadmap_1125_li=Improve create index performance. -roadmap_1126_li=Compact databases without having to close the database (vacuum). -roadmap_1127_li=Implement more JDBC 4.0 features. -roadmap_1128_li=Support TRANSFORM / PIVOT as in MS Access. -roadmap_1129_li=SELECT * FROM (VALUES (...), (...), ....) AS alias(f1, ...). -roadmap_1130_li=Support updatable views with join on primary keys (to extend a table). -roadmap_1131_li=Public interface for functions (not public static). -roadmap_1132_li=Support reading the transaction log. -roadmap_1133_li=Feature matrix as in i-net software. -roadmap_1134_li=Updatable result set on table without primary key or unique index. -roadmap_1135_li=Compatibility with Derby and PostgreSQL\: VALUES(1), (2); SELECT * FROM (VALUES (1), (2)) AS myTable(c1). Issue 221. -roadmap_1136_li=Allow execution time prepare for SELECT * FROM CSVREAD(?, 'columnNameString') -roadmap_1137_li=Support data type INTERVAL -roadmap_1138_li=Support nested transactions (possibly using savepoints internally). -roadmap_1139_li=Add a benchmark for bigger databases, and one for many users. -roadmap_1140_li=Compression in the result set over TCP/IP. -roadmap_1141_li=Support curtimestamp (like curtime, curdate). -roadmap_1142_li=Support ANALYZE {TABLE|INDEX} tableName COMPUTE|ESTIMATE|DELETE STATISTICS ptnOption options. -roadmap_1143_li=Release locks (shared or exclusive) on demand -roadmap_1144_li=Support OUTER UNION -roadmap_1145_li=Support parameterized views (similar to CSVREAD, but using just SQL for the definition) -roadmap_1146_li=A way (JDBC driver) to map an URL (jdbc\:h2map\:c1) to a connection object -roadmap_1147_li=Support dynamic linked schema (automatically adding/updating/removing tables) -roadmap_1148_li=Clustering\: adding a node should be very fast and without interrupting clients (very short lock) -roadmap_1149_li=Compatibility\: \# is the start of a single line comment (MySQL) but date quote (Access). Mode specific -roadmap_1150_li=Run benchmarks with Android, Java 7, java -server -roadmap_1151_li=Optimizations\: faster hash function for strings. -roadmap_1152_li=DatabaseEventListener\: callback for all operations (including expected time, RUNSCRIPT) and cancel functionality -roadmap_1153_li=Benchmark\: add a graph to show how databases scale (performance/database size) -roadmap_1154_li=Implement a SQLData interface to map your data over to a custom object -roadmap_1155_li=In the MySQL and PostgreSQL mode, use lower case identifiers by default (DatabaseMetaData.storesLowerCaseIdentifiers \= true) -roadmap_1156_li=Support multiple directories (on different hard drives) for the same database -roadmap_1157_li=Server protocol\: use challenge response authentication, but client sends hash(user+password) encrypted with response -roadmap_1158_li=Support EXEC[UTE] (doesn't return a result set, compatible to MS SQL Server) -roadmap_1159_li=Support native XML data type - see http\://en.wikipedia.org/wiki/SQL/XML -roadmap_1160_li=Support triggers with a string property or option\: SpringTrigger, OSGITrigger -roadmap_1161_li=MySQL compatibility\: update test1 t1, test2 t2 set t1.id \= t2.id where t1.id \= t2.id; -roadmap_1162_li=Ability to resize the cache array when resizing the cache -roadmap_1163_li=Time based cache writing (one second after writing the log) -roadmap_1164_li=Check state of H2 driver for DDLUtils\: http\://issues.apache.org/jira/browse/DDLUTILS-185 -roadmap_1165_li=Index usage for REGEXP LIKE. -roadmap_1166_li=Compatibility\: add a role DBA (like ADMIN). -roadmap_1167_li=Better support multiple processors for in-memory databases. -roadmap_1168_li=Support N'text' -roadmap_1169_li=Support compatibility for jdbc\:hsqldb\:res\: -roadmap_1170_li=HSQLDB compatibility\: automatically convert to the next 'higher' data type. Example\: cast(2000000000 as int) + cast(2000000000 as int); (HSQLDB\: long; PostgreSQL\: integer out of range) -roadmap_1171_li=Provide an Java SQL builder with standard and H2 syntax -roadmap_1172_li=Trace\: write OS, file system, JVM,... when opening the database -roadmap_1173_li=Support indexes for views (probably requires materialized views) -roadmap_1174_li=Document SET SEARCH_PATH, BEGIN, EXECUTE, parameters -roadmap_1175_li=Server\: use one listener (detect if the request comes from an PG or TCP client) -roadmap_1176_li=Optimize SELECT MIN(ID), MAX(ID), COUNT(*) FROM TEST WHERE ID BETWEEN 100 AND 200 -roadmap_1177_li=Sequence\: PostgreSQL compatibility (rename, create) http\://www.postgresql.org/docs/8.2/static/sql-altersequence.html -roadmap_1178_li=DISTINCT\: support large result sets by sorting on all columns (additionally) and then removing duplicates. -roadmap_1179_li=Support a special trigger on all tables to allow building a transaction log reader. -roadmap_1180_li=File system with a background writer thread; test if this is faster -roadmap_1181_li=Better document the source code (high level documentation). -roadmap_1182_li=Support select * from dual a left join dual b on b.x\=(select max(x) from dual) -roadmap_1183_li=Optimization\: don't lock when the database is read-only -roadmap_1184_li=Issue 146\: Support merge join. -roadmap_1185_li=Integrate spatial functions from http\://geosysin.iict.ch/irstv-trac/wiki/H2spatial/Download -roadmap_1186_li=Cluster\: hot deploy (adding a node at runtime). -roadmap_1187_li=Support DatabaseMetaData.insertsAreDetected\: updatable result sets should detect inserts. -roadmap_1188_li=Oracle\: support DECODE method (convert to CASE WHEN). -roadmap_1189_li=Native search\: support "phrase search", wildcard search (* and ?), case-insensitive search, boolean operators, and grouping -roadmap_1190_li=Improve documentation of access rights. -roadmap_1191_li=Support opening a database that is in the classpath, maybe using a new file system. Workaround\: detect jar file using getClass().getProtectionDomain().getCodeSource().getLocation(). -roadmap_1192_li=Support ENUM data type (see MySQL, PostgreSQL, MS SQL Server, maybe others). -roadmap_1193_li=Remember the user defined data type (domain) of a column. -roadmap_1194_li=MVCC\: support multi-threaded kernel with multi-version concurrency. -roadmap_1195_li=Auto-server\: add option to define the port range or list. -roadmap_1196_li=Support Jackcess (MS Access databases) -roadmap_1197_li=Built-in methods to write large objects (BLOB and CLOB)\: FILE_WRITE('test.txt', 'Hello World') -roadmap_1198_li=Improve time to open large databases (see mail 'init time for distributed setup') -roadmap_1199_li=Move Maven 2 repository from hsql.sf.net to h2database.sf.net -roadmap_1200_li=Java 1.5 tool\: JdbcUtils.closeSilently(s1, s2,...) -roadmap_1201_li=Optimize A\=? OR B\=? to UNION if the cost is lower. -roadmap_1202_li=Javadoc\: document design patterns used -roadmap_1203_li=Support custom collators, for example for natural sort (for text that contains numbers). -roadmap_1204_li=Write an article about SQLInjection (h2/src/docsrc/html/images/SQLInjection.txt) -roadmap_1205_li=Convert SQL-injection-2.txt to html document, include SQLInjection.java sample -roadmap_1206_li=Support OUT parameters in user-defined procedures. -roadmap_1207_li=Web site design\: http\://www.igniterealtime.org/projects/openfire/index.jsp -roadmap_1208_li=HSQLDB compatibility\: Openfire server uses\: CREATE SCHEMA PUBLIC AUTHORIZATION DBA; CREATE USER SA PASSWORD ""; GRANT DBA TO SA; SET SCHEMA PUBLIC -roadmap_1209_li=Translation\: use ?? in help.csv -roadmap_1210_li=Translated .pdf -roadmap_1211_li=Recovery tool\: bad blocks should be converted to INSERT INTO SYSTEM_ERRORS(...), and things should go into the .trace.db file -roadmap_1212_li=Issue 357\: support getGeneratedKeys to return multiple rows when used with batch updates. This is supported by MySQL, but not Derby. Both PostgreSQL and HSQLDB don't support getGeneratedKeys. Also support it when using INSERT ... SELECT. -roadmap_1213_li=RECOVER\=2 to backup the database, run recovery, open the database -roadmap_1214_li=Recovery should work with encrypted databases -roadmap_1215_li=Corruption\: new error code, add help -roadmap_1216_li=Space reuse\: after init, scan all storages and free those that don't belong to a live database object -roadmap_1217_li=Access rights\: add missing features (users should be 'owner' of objects; missing rights for sequences; dropping objects) -roadmap_1218_li=Support NOCACHE table option (Oracle). -roadmap_1219_li=Support table partitioning. -roadmap_1220_li=Add regular javadocs (using the default doclet, but another css) to the homepage. -roadmap_1221_li=The database should be kept open for a longer time when using the server mode. -roadmap_1222_li=Javadocs\: for each tool, add a copy & paste sample in the class level. -roadmap_1223_li=Javadocs\: add @author tags. -roadmap_1224_li=Fluent API for tools\: Server.createTcpServer().setPort(9081).setPassword(password).start(); -roadmap_1225_li=MySQL compatibility\: real SQL statement for DESCRIBE TEST -roadmap_1226_li=Use a default delay of 1 second before closing a database. -roadmap_1227_li=Write (log) to system table before adding to internal data structures. -roadmap_1228_li=Support direct lookup for MIN and MAX when using WHERE (see todo.txt / Direct Lookup). -roadmap_1229_li=Support other array types (String[], double[]) in PreparedStatement.setObject(int, Object) (with test case). -roadmap_1230_li=MVCC should not be memory bound (uncommitted data is kept in memory in the delta index; maybe using a regular b-tree index solves the problem). -roadmap_1231_li=Oracle compatibility\: support NLS_DATE_FORMAT. -roadmap_1232_li=Support for Thread.interrupt to cancel running statements. -roadmap_1233_li=Cluster\: add feature to make sure cluster nodes can not get out of sync (for example by stopping one process). -roadmap_1234_li=H2 Console\: support CLOB/BLOB download using a link. -roadmap_1235_li=Support flashback queries as in Oracle. -roadmap_1236_li=Import / Export of fixed with text files. -roadmap_1237_li=HSQLDB compatibility\: automatic data type for SUM if value is the value is too big (by default use the same type as the data). -roadmap_1238_li=Improve the optimizer to select the right index for special cases\: where id between 2 and 4 and booleanColumn -roadmap_1239_li=Linked tables\: make hidden columns available (Oracle\: rowid and ora_rowscn columns). -roadmap_1240_li=H2 Console\: in-place autocomplete. -roadmap_1241_li=Support large databases\: split database files to multiple directories / disks (similar to tablespaces). -roadmap_1242_li=H2 Console\: support configuration option for fixed width (monospace) font. -roadmap_1243_li=Native fulltext search\: support analyzers (specially for Chinese, Japanese). -roadmap_1244_li=Automatically compact databases from time to time (as a background process). -roadmap_1245_li=Test Eclipse DTP. -roadmap_1246_li=H2 Console\: autocomplete\: keep the previous setting -roadmap_1247_li=executeBatch\: option to stop at the first failed statement. -roadmap_1248_li=Implement OLAP features as described here\: http\://www.devx.com/getHelpOn/10MinuteSolution/16573/0/page/5 -roadmap_1249_li=Support Oracle ROWID (unique identifier for each row). -roadmap_1250_li=MySQL compatibility\: alter table add index i(c), add constraint c foreign key(c) references t(c); -roadmap_1251_li=Server mode\: improve performance for batch updates. -roadmap_1252_li=Applets\: support read-only databases in a zip file (accessed as a resource). -roadmap_1253_li=Long running queries / errors / trace system table. -roadmap_1254_li=H2 Console should support JaQu directly. -roadmap_1255_li=Better document FTL_SEARCH, FTL_SEARCH_DATA. -roadmap_1256_li=Sequences\: CURRVAL should be session specific. Compatibility with PostgreSQL. -roadmap_1257_li=Index creation using deterministic functions. -roadmap_1258_li=ANALYZE\: for unique indexes that allow null, count the number of null. -roadmap_1259_li=MySQL compatibility\: multi-table delete\: DELETE .. FROM .. [,...] USING - See http\://dev.mysql.com/doc/refman/5.0/en/delete.html -roadmap_1260_li=AUTO_SERVER\: support changing IP addresses (disable a network while the database is open). -roadmap_1261_li=Avoid using java.util.Calendar internally because it's slow, complicated, and buggy. -roadmap_1262_li=Support TRUNCATE .. CASCADE like PostgreSQL. -roadmap_1263_li=Fulltext search\: lazy result generation using SimpleRowSource. -roadmap_1264_li=Fulltext search\: support alternative syntax\: WHERE FTL_CONTAINS(name, 'hello'). -roadmap_1265_li=MySQL compatibility\: support REPLACE, see http\://dev.mysql.com/doc/refman/6.0/en/replace.html and issue 73. -roadmap_1266_li=MySQL compatibility\: support INSERT INTO table SET column1 \= value1, column2 \= value2 -roadmap_1267_li=Docs\: add a one line description for each functions and SQL statements at the top (in the link section). -roadmap_1268_li=Javadoc search\: weight for titles should be higher ('random' should list Functions as the best match). -roadmap_1269_li=Replace information_schema tables with regular tables that are automatically re-built when needed. Use indexes. -roadmap_1270_li=Issue 50\: Oracle compatibility\: support calling 0-parameters functions without parenthesis. Make constants obsolete. -roadmap_1271_li=MySQL, HSQLDB compatibility\: support where 'a'\=1 (not supported by Derby, PostgreSQL) -roadmap_1272_li=Finer granularity for SLF4J trace - See http\://code.google.com/p/h2database/issues/detail?id\=62 -roadmap_1273_li=Add database creation date and time to the database. -roadmap_1274_li=Support ASSERTION. -roadmap_1275_li=MySQL compatibility\: support comparing 1\='a' -roadmap_1276_li=Support PostgreSQL lock modes\: http\://www.postgresql.org/docs/8.3/static/explicit-locking.html -roadmap_1277_li=PostgreSQL compatibility\: test DbVisualizer and Squirrel SQL using a new PostgreSQL JDBC driver. -roadmap_1278_li=RunScript should be able to read from system in (or quite mode for Shell). -roadmap_1279_li=Natural join\: support select x from dual natural join dual. -roadmap_1280_li=Support using system properties in database URLs (may be a security problem). -roadmap_1281_li=Natural join\: somehow support this\: select a.x, b.x, x from dual a natural join dual b -roadmap_1282_li=Use the Java service provider mechanism to register file systems and function libraries. -roadmap_1283_li=MySQL compatibility\: for auto_increment columns, convert 0 to next value (as when inserting NULL). -roadmap_1284_li=Optimization for multi-column IN\: use an index if possible. Example\: (A, B) IN((1, 2), (2, 3)). -roadmap_1285_li=Optimization for EXISTS\: convert to inner join or IN(..) if possible. -roadmap_1286_li=Functions\: support hashcode(value); cryptographic and fast -roadmap_1287_li=Serialized file lock\: support long running queries. -roadmap_1288_li=Network\: use 127.0.0.1 if other addresses don't work. -roadmap_1289_li=Pluggable network protocol (currently Socket/ServerSocket over TCP/IP) - see also TransportServer with master slave replication. -roadmap_1290_li=Support reading JCR data\: one table per node type; query table; cache option -roadmap_1291_li=OSGi\: create a sample application, test, document. -roadmap_1292_li=help.csv\: use complete examples for functions; run as test case. -roadmap_1293_li=Functions to calculate the memory and disk space usage of a table, a row, or a value. -roadmap_1294_li=Re-implement PooledConnection; use a lightweight connection object. -roadmap_1295_li=Doclet\: convert tests in javadocs to a java class. -roadmap_1296_li=Doclet\: format fields like methods, but support sorting by name and value. -roadmap_1297_li=Doclet\: shrink the html files. -roadmap_1298_li=MySQL compatibility\: support SET NAMES 'latin1' - See also http\://code.google.com/p/h2database/issues/detail?id\=56 -roadmap_1299_li=Allow to scan index backwards starting with a value (to better support ORDER BY DESC). -roadmap_1300_li=Java Service Wrapper\: try http\://yajsw.sourceforge.net/ -roadmap_1301_li=Batch parameter for INSERT, UPDATE, and DELETE, and commit after each batch. See also MySQL DELETE. -roadmap_1302_li=Use a lazy and auto-close input stream (open resource when reading, close on eof). -roadmap_1303_li=Connection pool\: 'reset session' command (delete temp tables, rollback, auto-commit true). -roadmap_1304_li=Improve SQL documentation, see http\://www.w3schools.com/sql/ -roadmap_1305_li=MySQL compatibility\: DatabaseMetaData.stores*() methods should return the same values. Test with SquirrelSQL. -roadmap_1306_li=MS SQL Server compatibility\: support DATEPART syntax. -roadmap_1307_li=Sybase/DB2/Oracle compatibility\: support out parameters in stored procedures - See http\://code.google.com/p/h2database/issues/detail?id\=83 -roadmap_1308_li=Support INTERVAL data type (see Oracle and others). -roadmap_1309_li=Combine Server and Console tool (only keep Server). -roadmap_1310_li=Store the Lucene index in the database itself. -roadmap_1311_li=Support standard MERGE statement\: http\://en.wikipedia.org/wiki/Merge_%28SQL%29 -roadmap_1312_li=Oracle compatibility\: support DECODE(x, ...). -roadmap_1313_li=MVCC\: compare concurrent update behavior with PostgreSQL and Oracle. -roadmap_1314_li=HSQLDB compatibility\: CREATE FUNCTION (maybe using a Function interface). -roadmap_1315_li=HSQLDB compatibility\: support CALL "java.lang.Math.sqrt"(2.0) -roadmap_1316_li=Support comma as the decimal separator in the CSV tool. -roadmap_1317_li=Compatibility\: Java functions with SQLJ Part1 http\://www.acm.org/sigmod/record/issues/9912/standards.pdf.gz -roadmap_1318_li=Compatibility\: Java functions with SQL/PSM (Persistent Stored Modules) - need to find the documentation. -roadmap_1319_li=CACHE_SIZE\: automatically use a fraction of Runtime.maxMemory - maybe automatically the second level cache. -roadmap_1320_li=Support date/time/timestamp as documented in http\://en.wikipedia.org/wiki/ISO_8601 -roadmap_1321_li=PostgreSQL compatibility\: when in PG mode, treat BYTEA data like PG. -roadmap_1322_li=Support \=ANY(array) as in PostgreSQL. See also http\://www.postgresql.org/docs/8.0/interactive/arrays.html -roadmap_1323_li=IBM DB2 compatibility\: support PREVIOUS VALUE FOR sequence. -roadmap_1324_li=Compatibility\: use different LIKE ESCAPE characters depending on the mode (disable for Derby, HSQLDB, DB2, Oracle, MSSQLServer). -roadmap_1325_li=Oracle compatibility\: support CREATE SYNONYM table FOR schema.table. -roadmap_1326_li=FTP\: document the server, including -ftpTask option to execute / kill remote processes -roadmap_1327_li=FTP\: problems with multithreading? -roadmap_1328_li=FTP\: implement SFTP / FTPS -roadmap_1329_li=FTP\: access to a database (.csv for a table, a directory for a schema, a file for a lob, a script.sql file). -roadmap_1330_li=More secure default configuration if remote access is enabled. -roadmap_1331_li=Improve database file locking (maybe use native file locking). The current approach seems to be problematic if the file system is on a remote share (see Google Group 'Lock file modification time is in the future'). -roadmap_1332_li=Document internal features such as BELONGS_TO_TABLE, NULL_TO_DEFAULT, SEQUENCE. -roadmap_1333_li=Issue 107\: Prefer using the ORDER BY index if LIMIT is used. -roadmap_1334_li=An index on (id, name) should be used for a query\: select * from t where s\=? order by i -roadmap_1335_li=Support reading sequences using DatabaseMetaData.getTables(null, null, null, new String[]{"SEQUENCE"}). See PostgreSQL. -roadmap_1336_li=Add option to enable TCP_NODELAY using Socket.setTcpNoDelay(true). -roadmap_1337_li=Maybe disallow \= within database names (jdbc\:h2\:mem\:MODE\=DB2 means database name MODE\=DB2). -roadmap_1338_li=Fast alter table add column. -roadmap_1339_li=Improve concurrency for in-memory database operations. -roadmap_1340_li=Issue 122\: Support for connection aliases for remote tcp connections. -roadmap_1341_li=Fast scrambling (strong encryption doesn't help if the password is included in the application). -roadmap_1342_li=H2 Console\: support -webPassword to require a password to access preferences or shutdown. -roadmap_1343_li=Issue 126\: The index name should be "IDX_" plus the constraint name unless there is a conflict, in which case append a number. -roadmap_1344_li=Issue 127\: Support activation/deactivation of triggers -roadmap_1345_li=Issue 130\: Custom log event listeners -roadmap_1346_li=Issue 131\: IBM DB2 compatibility\: sysibm.sysdummy1 -roadmap_1347_li=Issue 132\: Use Java enum trigger type. -roadmap_1348_li=Issue 134\: IBM DB2 compatibility\: session global variables. -roadmap_1349_li=Cluster\: support load balance with values for each server / auto detect. -roadmap_1350_li=FTL_SET_OPTION(keyString, valueString) with key stopWords at first. -roadmap_1351_li=Pluggable access control mechanism. -roadmap_1352_li=Fulltext search (Lucene)\: support streaming CLOB data. -roadmap_1353_li=Document/example how to create and read an encrypted script file. -roadmap_1354_li=Check state of http\://issues.apache.org/jira/browse/OPENJPA-1367 (H2 does support cross joins). -roadmap_1355_li=Fulltext search (Lucene)\: only prefix column names with _ if they already start with _. Instead of DATA / QUERY / modified use _DATA, _QUERY, _MODIFIED if possible. -roadmap_1356_li=Support a way to create or read compressed encrypted script files using an API. -roadmap_1357_li=Scripting language support (Javascript). -roadmap_1358_li=The network client should better detect if the server is not an H2 server and fail early. -roadmap_1359_li=H2 Console\: support CLOB/BLOB upload. -roadmap_1360_li=Database file lock\: detect hibernate / standby / very slow threads (compare system time). -roadmap_1361_li=Automatic detection of redundant indexes. -roadmap_1362_li=Maybe reject join without "on" (except natural join). -roadmap_1363_li=Implement GiST (Generalized Search Tree for Secondary Storage). -roadmap_1364_li=Function to read a number of bytes/characters from an BLOB or CLOB. -roadmap_1365_li=Issue 156\: Support SELECT ? UNION SELECT ?. -roadmap_1366_li=Automatic mixed mode\: support a port range list (to avoid firewall problems). -roadmap_1367_li=Support the pseudo column rowid, oid, _rowid_. -roadmap_1368_li=H2 Console / large result sets\: stream early instead of keeping a whole result in-memory -roadmap_1369_li=Support TRUNCATE for linked tables. -roadmap_1370_li=UNION\: evaluate INTERSECT before UNION (like most other database except Oracle). -roadmap_1371_li=Delay creating the information schema, and share metadata columns. -roadmap_1372_li=TCP Server\: use a nonce (number used once) to protect unencrypted channels against replay attacks. -roadmap_1373_li=Simplify running scripts and recovery\: CREATE FORCE USER (overwrites an existing user). -roadmap_1374_li=Support CREATE DATABASE LINK (a custom JDBC driver is already supported). -roadmap_1375_li=Support large GROUP BY operations. Issue 216. -roadmap_1376_li=Issue 163\: Allow to create foreign keys on metadata types. -roadmap_1377_li=Logback\: write a native DBAppender. -roadmap_1378_li=Cache size\: don't use more cache than what is available. -roadmap_1379_li=Allow to defragment at runtime (similar to SHUTDOWN DEFRAG) in a background thread. -roadmap_1380_li=Tree index\: Instead of an AVL tree, use a general balanced trees or a scapegoat tree. -roadmap_1381_li=User defined functions\: allow to store the bytecode (of just the class, or the jar file of the extension) in the database. -roadmap_1382_li=Compatibility\: ResultSet.getObject() on a CLOB (TEXT) should return String for PostgreSQL and MySQL. -roadmap_1383_li=Optimizer\: WHERE X\=? AND Y IN(?), it always uses the index on Y. Should be cost based. -roadmap_1384_li=Common Table Expression (CTE) / recursive queries\: support parameters. Issue 314. -roadmap_1385_li=Oracle compatibility\: support INSERT ALL. -roadmap_1386_li=Issue 178\: Optimizer\: index usage when both ascending and descending indexes are available. -roadmap_1387_li=Issue 179\: Related subqueries in HAVING clause. -roadmap_1388_li=IBM DB2 compatibility\: NOT NULL WITH DEFAULT. Similar to MySQL Mode.convertInsertNullToZero. -roadmap_1389_li=Creating primary key\: always create a constraint. -roadmap_1390_li=Maybe use a different page layout\: keep the data at the head of the page, and ignore the tail (don't store / read it). This may increase write / read performance depending on the file system. -roadmap_1391_li=Indexes of temporary tables are currently kept in-memory. Is this how it should be? -roadmap_1392_li=The Shell tool should support the same built-in commands as the H2 Console. -roadmap_1393_li=Maybe use PhantomReference instead of finalize. -roadmap_1394_li=Database file name suffix\: should only have one dot by default. Example\: .h2db -roadmap_1395_li=Issue 196\: Function based indexes -roadmap_1396_li=ALTER TABLE ... ADD COLUMN IF NOT EXISTS columnName. -roadmap_1397_li=Fix the disk space leak (killing the process at the exact right moment will increase the disk space usage; this space is not re-used). See TestDiskSpaceLeak.java -roadmap_1398_li=ROWNUM\: Oracle compatibility when used within a subquery. Issue 198. -roadmap_1399_li=Allow to access the database over HTTP (possibly using port 80) and a servlet in a REST way. -roadmap_1400_li=ODBC\: encrypted databases are not supported because the ;CIPHER\= can not be set. -roadmap_1401_li=Support CLOB and BLOB update, specially conn.createBlob().setBinaryStream(1); -roadmap_1402_li=Optimizer\: index usage when both ascending and descending indexes are available. Issue 178. -roadmap_1403_li=Issue 306\: Support schema specific domains. -roadmap_1404_li=Triggers\: support user defined execution order. Oracle\: CREATE OR REPLACE TRIGGER TEST_2 BEFORE INSERT ON TEST FOR EACH ROW FOLLOWS TEST_1. SQL specifies that multiple triggers should be fired in time-of-creation order. PostgreSQL uses name order, which was judged to be more convenient. Derby\: triggers are fired in the order in which they were created. -roadmap_1405_li=PostgreSQL compatibility\: combine "users" and "roles". See\: http\://www.postgresql.org/docs/8.1/interactive/user-manag.html -roadmap_1406_li=Improve documentation of system properties\: only list the property names, default values, and description. -roadmap_1407_li=Support running totals / cumulative sum using SUM(..) OVER(..). -roadmap_1408_li=Improve object memory size calculation. Use constants for known VMs, or use reflection to call java.lang.instrument.Instrumentation.getObjectSize(Object objectToSize) -roadmap_1409_li=Triggers\: NOT NULL checks should be done after running triggers (Oracle behavior, maybe others). -roadmap_1410_li=Common Table Expression (CTE) / recursive queries\: support INSERT INTO ... SELECT ... Issue 219. -roadmap_1411_li=Common Table Expression (CTE) / recursive queries\: support non-recursive queries. Issue 217. -roadmap_1412_li=Common Table Expression (CTE) / recursive queries\: avoid endless loop. Issue 218. -roadmap_1413_li=Common Table Expression (CTE) / recursive queries\: support multiple named queries. Issue 220. -roadmap_1414_li=Common Table Expression (CTE) / recursive queries\: identifier scope may be incorrect. Issue 222. -roadmap_1415_li=Log long running transactions (similar to long running statements). -roadmap_1416_li=Parameter data type is data type of other operand. Issue 205. -roadmap_1417_li=Some combinations of nested join with right outer join are not supported. -roadmap_1418_li=DatabaseEventListener.openConnection(id) and closeConnection(id). -roadmap_1419_li=Listener or authentication module for new connections, or a way to restrict the number of different connections to a tcp server, or to prevent to login with the same username and password from different IPs. Possibly using the DatabaseEventListener API, or a new API. -roadmap_1420_li=Compatibility for data type CHAR (Derby, HSQLDB). Issue 212. -roadmap_1421_li=Compatibility with MySQL TIMESTAMPDIFF. Issue 209. -roadmap_1422_li=Optimizer\: use a histogram of the data, specially for non-normal distributions. -roadmap_1423_li=Trigger\: allow declaring as source code (like functions). -roadmap_1424_li=User defined aggregate\: allow declaring as source code (like functions). -roadmap_1425_li=The error "table not found" is sometimes caused by using the wrong database. Add "(this database is empty)" to the exception message if applicable. -roadmap_1426_li=MySQL + PostgreSQL compatibility\: support string literal escape with \\n. -roadmap_1427_li=PostgreSQL compatibility\: support string literal escape with double \\\\. -roadmap_1428_li=Document the TCP server "management_db". Maybe include the IP address of the client. -roadmap_1429_li=Use javax.tools.JavaCompilerTool instead of com.sun.tools.javac.Main -roadmap_1430_li=If a database object was not found in the current schema, but one with the same name existed in another schema, included that in the error message. -roadmap_1431_li=Optimization to use an index for OR when using multiple keys\: where (key1 \= ? and key2 \= ?) OR (key1 \= ? and key2 \= ?) -roadmap_1432_li=Issue 302\: Support optimizing queries with both inner and outer joins, as in\: select * from test a inner join test b on a.id\=b.id inner join o on o.id\=a.id where b.x\=1 (the optimizer should swap a and b here). See also TestNestedJoins, tag "swapInnerJoinTables". -roadmap_1433_li=JaQu should support a DataSource and a way to create a Db object using a Connection (for multi-threaded usage with a connection pool). -roadmap_1434_li=Move table to a different schema (rename table to a different schema), possibly using ALTER TABLE ... SET SCHEMA ...; -roadmap_1435_li=nioMapped file system\: automatically fall back to regular (non mapped) IO if there is a problem (out of memory exception for example). -roadmap_1436_li=Column as parameter of function table. Issue 228. -roadmap_1437_li=Connection pool\: detect ;AUTOCOMMIT\=FALSE in the database URL, and if set, disable autocommit for all connections. -roadmap_1438_li=Compatibility with MS Access\: support "&" to concatenate text. -roadmap_1439_li=The BACKUP statement should not synchronize on the database, and therefore should not block other users. -roadmap_1440_li=Document the database file format. -roadmap_1441_li=Support reading LOBs. -roadmap_1442_li=Require appending DANGEROUS\=TRUE when using certain dangerous settings such as LOG\=0, LOG\=1, LOCK_MODE\=0, disabling FILE_LOCK,... -roadmap_1443_li=Support UDT (user defined types) similar to how Apache Derby supports it\: check constraint, allow to use it in Java functions as parameters (return values already seem to work). -roadmap_1444_li=Encrypted file system (use cipher text stealing so file length doesn't need to decrypt; 4 KB header per file, optional compatibility with current encrypted database files). -roadmap_1445_li=Issue 229\: SELECT with simple OR tests uses tableScan when it could use indexes. -roadmap_1446_li=GROUP BY queries should use a temporary table if there are too many rows. -roadmap_1447_li=BLOB\: support random access when reading. -roadmap_1448_li=CLOB\: support random access when reading (this is harder than for BLOB as data is stored in UTF-8 form). -roadmap_1449_li=Compatibility\: support SELECT INTO (as an alias for CREATE TABLE ... AS SELECT ...). -roadmap_1450_li=Compatibility with MySQL\: support SELECT INTO OUTFILE (cannot be an existing file) as an alias for CSVWRITE(...). -roadmap_1451_li=Compatibility with MySQL\: support non-strict mode (sql_mode \= "") any data that is too large for the column will just be truncated or set to the default value. -roadmap_1452_li=The full condition should be sent to the linked table, not just the indexed condition. Example\: TestLinkedTableFullCondition -roadmap_1453_li=Compatibility with IBM DB2\: CREATE PROCEDURE. -roadmap_1454_li=Compatibility with IBM DB2\: SQL cursors. -roadmap_1455_li=Single-column primary key values are always stored explicitly. This is not required. -roadmap_1456_li=Compatibility with MySQL\: support CREATE TABLE TEST(NAME VARCHAR(255) CHARACTER SET UTF8). -roadmap_1457_li=CALL is incompatible with other databases because it returns a result set, so that CallableStatement.execute() returns true. -roadmap_1458_li=Optimization for large lists for column IN(1, 2, 3, 4,...) - currently an list is used, could potentially use a hash set (maybe only for a part of the values - the ones that can be evaluated). -roadmap_1459_li=Compatibility for ARRAY data type (Oracle\: VARRAY(n) of VARCHAR(m); HSQLDB\: VARCHAR(n) ARRAY; Postgres\: VARCHAR(n)[]). -roadmap_1460_li=PostgreSQL compatible array literal syntax\: ARRAY[['a', 'b'], ['c', 'd']] -roadmap_1461_li=PostgreSQL compatibility\: UPDATE with FROM. -roadmap_1462_li=Issue 297\: Oracle compatibility for "at time zone". -roadmap_1463_li=IBM DB2 compatibility\: IDENTITY_VAL_LOCAL(). -roadmap_1464_li=Support SQL/XML. -roadmap_1465_li=Support concurrent opening of databases. -roadmap_1466_li=Improved error message and diagnostics in case of network configuration problems. -roadmap_1467_li=TRUNCATE should reset the identity columns as in MySQL and MS SQL Server (and possibly other databases). -roadmap_1468_li=Adding a primary key should make the columns 'not null' unless if there is a row with null (compatibility with MySQL, PostgreSQL, HSQLDB; not Derby). -roadmap_1469_li=ARRAY data type\: support Integer[] and so on in Java functions (currently only Object[] is supported). -roadmap_1470_li=MySQL compatibility\: LOCK TABLES a READ, b READ - see also http\://dev.mysql.com/doc/refman/5.0/en/lock-tables.html -roadmap_1471_li=The HTML to PDF converter should use http\://code.google.com/p/wkhtmltopdf/ -roadmap_1472_li=Issue 303\: automatically convert "X NOT IN(SELECT...)" to "NOT EXISTS(...)". -roadmap_1473_li=MySQL compatibility\: update test1 t1, test2 t2 set t1.name\=t2.name where t1.id\=t2.id. -roadmap_1474_li=Issue 283\: Improve performance of H2 on Android. -roadmap_1475_li=Support INSERT INTO / UPDATE / MERGE ... RETURNING to retrieve the generated key(s). -roadmap_1476_li=Column compression option - see http\://groups.google.com/group/h2-database/browse_thread/thread/3e223504e52671fa/243da82244343f5d -roadmap_1477_li=PostgreSQL compatibility\: ALTER TABLE ADD combined with adding a foreign key constraint, as in ALTER TABLE FOO ADD COLUMN PARENT BIGINT REFERENCES FOO(ID). -roadmap_1478_li=MS SQL Server compatibility\: support @@ROWCOUNT. -roadmap_1479_li=PostgreSQL compatibility\: LOG(x) is LOG10(x) and not LN(x). -roadmap_1480_li=Issue 311\: Serialized lock mode\: executeQuery of write operations fails. -roadmap_1481_li=PostgreSQL compatibility\: support PgAdmin III (specially the function current_setting). -roadmap_1482_li=MySQL compatibility\: support TIMESTAMPADD. -roadmap_1483_li=Support SELECT ... FOR UPDATE with joins (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby). -roadmap_1484_li=Support SELECT ... FOR UPDATE OF [field-list] (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby). -roadmap_1485_li=Support SELECT ... FOR UPDATE OF [table-list] (supported by PostgreSQL, HSQLDB, Sybase). -roadmap_1486_li=TRANSACTION_ID() for in-memory databases. -roadmap_1487_li=TRANSACTION_ID() should be long (same as HSQLDB and PostgreSQL). -roadmap_1488_li=Support [INNER | OUTER] JOIN USING(column [,...]). -roadmap_1489_li=Support NATURAL [ { LEFT | RIGHT } [ OUTER ] | INNER ] JOIN (Derby, Oracle) -roadmap_1490_li=GROUP BY columnNumber (similar to ORDER BY columnNumber) (MySQL, PostgreSQL, SQLite; not by HSQLDB and Derby). -roadmap_1491_li=Sybase / MS SQL Server compatibility\: CONVERT(..) parameters are swapped. -roadmap_1492_li=Index conditions\: WHERE AGE>1 should not scan through all rows with AGE\=1. -roadmap_1493_li=PHP support\: H2 should support PDO, or test with PostgreSQL PDO. -roadmap_1494_li=Outer joins\: if no column of the outer join table is referenced, the outer join table could be removed from the query. -roadmap_1495_li=Cluster\: allow using auto-increment and identity columns by ensuring executed in lock-step. -roadmap_1496_li=MySQL compatibility\: index names only need to be unique for the given table. -roadmap_1497_li=Issue 352\: constraints\: distinguish between 'no action' and 'restrict'. Currently, only restrict is supported, and 'no action' is internally mapped to 'restrict'. The database meta data returns 'restrict' in all cases. -roadmap_1498_li=Oracle compatibility\: support MEDIAN aggregate function. -roadmap_1499_li=Issue 348\: Oracle compatibility\: division should return a decimal result. -roadmap_1500_li=Read rows on demand\: instead of reading the whole row, only read up to that column that is requested. Keep an pointer to the data area and the column id that is already read. -roadmap_1501_li=Long running transactions\: log session id when detected. -roadmap_1502_li=Optimization\: "select id from test" should use the index on id even without "order by". -roadmap_1503_li=Issue 362\: LIMIT support for UPDATE statements (MySQL compatibility). -roadmap_1504_li=Sybase SQL Anywhere compatibility\: SELECT TOP ... START AT ... -roadmap_1505_li=Use Java 6 SQLException subclasses. -roadmap_1506_li=Issue 390\: RUNSCRIPT FROM '...' CONTINUE_ON_ERROR -roadmap_1507_li=Use Java 6 exceptions\: SQLDataException, SQLSyntaxErrorException, SQLTimeoutException,.. -roadmap_1508_h2=Not Planned -roadmap_1509_li=HSQLDB (did) support this\: select id i from test where i<0 (other databases don't). Supporting it may break compatibility. -roadmap_1510_li=String.intern (so that Strings can be compared with \=\=) will not be used because some VMs have problems when used extensively. -roadmap_1511_li=In prepared statements, identifier names (table names and so on) can not be parameterized. Adding such a feature would complicate the source code without providing reasonable speedup, and would slow down regular prepared statements. -sourceError_1000_h1=Error Analyzer -sourceError_1001_a=Home -sourceError_1002_a=Input -sourceError_1003_h2=  Details  Source Code -sourceError_1004_p=Paste the error message and stack trace below and click on 'Details' or 'Source Code'\: -sourceError_1005_b=Error Code\: -sourceError_1006_b=Product Version\: -sourceError_1007_b=Message\: -sourceError_1008_b=More Information\: -sourceError_1009_b=Stack Trace\: -sourceError_1010_b=Source File\: -sourceError_1011_p=\ Inline -tutorial_1000_h1=Tutorial -tutorial_1001_a=\ Starting and Using the H2 Console -tutorial_1002_a=\ Special H2 Console Syntax -tutorial_1003_a=\ Settings of the H2 Console -tutorial_1004_a=\ Connecting to a Database using JDBC -tutorial_1005_a=\ Creating New Databases -tutorial_1006_a=\ Using the Server -tutorial_1007_a=\ Using Hibernate -tutorial_1008_a=\ Using TopLink and Glassfish -tutorial_1009_a=\ Using EclipseLink -tutorial_1010_a=\ Using Apache ActiveMQ -tutorial_1011_a=\ Using H2 within NetBeans -tutorial_1012_a=\ Using H2 with jOOQ -tutorial_1013_a=\ Using Databases in Web Applications -tutorial_1014_a=\ Android -tutorial_1015_a=\ CSV (Comma Separated Values) Support -tutorial_1016_a=\ Upgrade, Backup, and Restore -tutorial_1017_a=\ Command Line Tools -tutorial_1018_a=\ The Shell Tool -tutorial_1019_a=\ Using OpenOffice Base -tutorial_1020_a=\ Java Web Start / JNLP -tutorial_1021_a=\ Using a Connection Pool -tutorial_1022_a=\ Fulltext Search -tutorial_1023_a=\ User-Defined Variables -tutorial_1024_a=\ Date and Time -tutorial_1025_a=\ Using Spring -tutorial_1026_a=\ OSGi -tutorial_1027_a=\ Java Management Extension (JMX) -tutorial_1028_h2=Starting and Using the H2 Console -tutorial_1029_p=\ The H2 Console application lets you access a database using a browser. This can be a H2 database, or another database that supports the JDBC API. -tutorial_1030_p=\ This is a client/server application, so both a server and a client (a browser) are required to run it. -tutorial_1031_p=\ Depending on your platform and environment, there are multiple ways to start the H2 Console\: -tutorial_1032_th=OS -tutorial_1033_th=Start -tutorial_1034_td=Windows -tutorial_1035_td=\ Click [Start], [All Programs], [H2], and [H2 Console (Command Line)] -tutorial_1036_td=\ An icon will be added to the system tray\: -tutorial_1037_td=\ If you don't get the window and the system tray icon, then maybe Java is not installed correctly (in this case, try another way to start the application). A browser window should open and point to the login page at http\://localhost\:8082. -tutorial_1038_td=Windows -tutorial_1039_td=\ Open a file browser, navigate to h2/bin, and double click on h2.bat. -tutorial_1040_td=\ A console window appears. If there is a problem, you will see an error message in this window. A browser window will open and point to the login page (URL\: http\://localhost\:8082). -tutorial_1041_td=Any -tutorial_1042_td=\ Double click on the h2*.jar file. This only works if the .jar suffix is associated with Java. -tutorial_1043_td=Any -tutorial_1044_td=\ Open a console window, navigate to the directory h2/bin, and type\: -tutorial_1045_h3=Firewall -tutorial_1046_p=\ If you start the server, you may get a security warning from the firewall (if you have installed one). If you don't want other computers in the network to access the application on your machine, you can let the firewall block those connections. The connection from the local machine will still work. Only if you want other computers to access the database on this computer, you need allow remote connections in the firewall. -tutorial_1047_p=\ It has been reported that when using Kaspersky 7.0 with firewall, the H2 Console is very slow when connecting over the IP address. A workaround is to connect using 'localhost'. -tutorial_1048_p=\ A small firewall is already built into the server\: other computers may not connect to the server by default. To change this, go to 'Preferences' and select 'Allow connections from other computers'. -tutorial_1049_h3=Testing Java -tutorial_1050_p=\ To find out which version of Java is installed, open a command prompt and type\: -tutorial_1051_p=\ If you get an error message, you may need to add the Java binary directory to the path environment variable. -tutorial_1052_h3=Error Message 'Port may be in use' -tutorial_1053_p=\ You can only start one instance of the H2 Console, otherwise you will get the following error message\: "The Web server could not be started. Possible cause\: another server is already running...". It is possible to start multiple console applications on the same computer (using different ports), but this is usually not required as the console supports multiple concurrent connections. -tutorial_1054_h3=Using another Port -tutorial_1055_p=\ If the default port of the H2 Console is already in use by another application, then a different port needs to be configured. The settings are stored in a properties file. For details, see Settings of the H2 Console. The relevant entry is webPort. -tutorial_1056_p=\ If no port is specified for the TCP and PG servers, each service will try to listen on its default port. If the default port is already in use, a random port is used. -tutorial_1057_h3=Connecting to the Server using a Browser -tutorial_1058_p=\ If the server started successfully, you can connect to it using a web browser. Javascript needs to be enabled. If you started the server on the same computer as the browser, open the URL http\://localhost\:8082. If you want to connect to the application from another computer, you need to provide the IP address of the server, for example\: http\://192.168.0.2\:8082. If you enabled TLS on the server side, the URL needs to start with https\://. -tutorial_1059_h3=Multiple Concurrent Sessions -tutorial_1060_p=\ Multiple concurrent browser sessions are supported. As that the database objects reside on the server, the amount of concurrent work is limited by the memory available to the server application. -tutorial_1061_h3=Login -tutorial_1062_p=\ At the login page, you need to provide connection information to connect to a database. Set the JDBC driver class of your database, the JDBC URL, user name, and password. If you are done, click [Connect]. -tutorial_1063_p=\ You can save and reuse previously saved settings. The settings are stored in a properties file (see Settings of the H2 Console). -tutorial_1064_h3=Error Messages -tutorial_1065_p=\ Error messages in are shown in red. You can show/hide the stack trace of the exception by clicking on the message. -tutorial_1066_h3=Adding Database Drivers -tutorial_1067_p=\ To register additional JDBC drivers (MySQL, PostgreSQL, HSQLDB,...), add the jar file names to the environment variables H2DRIVERS or CLASSPATH. Example (Windows)\: to add the HSQLDB JDBC driver C\:\\Programs\\hsqldb\\lib\\hsqldb.jar, set the environment variable H2DRIVERS to C\:\\Programs\\hsqldb\\lib\\hsqldb.jar. -tutorial_1068_p=\ Multiple drivers can be set; entries need to be separated by ; (Windows) or \: (other operating systems). Spaces in the path names are supported. The settings must not be quoted. -tutorial_1069_h3=Using the H2 Console -tutorial_1070_p=\ The H2 Console application has three main panels\: the toolbar on top, the tree on the left, and the query/result panel on the right. The database objects (for example, tables) are listed on the left. Type a SQL command in the query panel and click [Run]. The result appears just below the command. -tutorial_1071_h3=Inserting Table Names or Column Names -tutorial_1072_p=\ To insert table and column names into the script, click on the item in the tree. If you click on a table while the query is empty, then SELECT * FROM ... is added. While typing a query, the table that was used is expanded in the tree. For example if you type SELECT * FROM TEST T WHERE T. then the table TEST is expanded. -tutorial_1073_h3=Disconnecting and Stopping the Application -tutorial_1074_p=\ To log out of the database, click [Disconnect] in the toolbar panel. However, the server is still running and ready to accept new sessions. -tutorial_1075_p=\ To stop the server, right click on the system tray icon and select [Exit]. If you don't have the system tray icon, navigate to [Preferences] and click [Shutdown], press [Ctrl]+[C] in the console where the server was started (Windows), or close the console window. -tutorial_1076_h2=Special H2 Console Syntax -tutorial_1077_p=\ The H2 Console supports a few built-in commands. Those are interpreted within the H2 Console, so they work with any database. Built-in commands need to be at the beginning of a statement (before any remarks), otherwise they are not parsed correctly. If in doubt, add ; before the command. -tutorial_1078_th=Command(s) -tutorial_1079_th=Description -tutorial_1080_td=\ @autocommit_true; -tutorial_1081_td=\ @autocommit_false; -tutorial_1082_td=\ Enable or disable autocommit. -tutorial_1083_td=\ @cancel; -tutorial_1084_td=\ Cancel the currently running statement. -tutorial_1085_td=\ @columns null null TEST; -tutorial_1086_td=\ @index_info null null TEST; -tutorial_1087_td=\ @tables; -tutorial_1088_td=\ @tables null null TEST; -tutorial_1089_td=\ Call the corresponding DatabaseMetaData.get method. Patterns are case sensitive (usually identifiers are uppercase). For information about the parameters, see the Javadoc documentation. Missing parameters at the end of the line are set to null. The complete list of metadata commands is\: @attributes, @best_row_identifier, @catalogs, @columns, @column_privileges, @cross_references, @exported_keys, @imported_keys, @index_info, @primary_keys, @procedures, @procedure_columns, @schemas, @super_tables, @super_types, @tables, @table_privileges, @table_types, @type_info, @udts, @version_columns -tutorial_1090_td=\ @edit select * from test; -tutorial_1091_td=\ Use an updatable result set. -tutorial_1092_td=\ @generated insert into test() values(); -tutorial_1093_td=\ Show the result of Statement.getGeneratedKeys(). -tutorial_1094_td=\ @history; -tutorial_1095_td=\ List the command history. -tutorial_1096_td=\ @info; -tutorial_1097_td=\ Display the result of various Connection and DatabaseMetaData methods. -tutorial_1098_td=\ @list select * from test; -tutorial_1099_td=\ Show the result set in list format (each column on its own line, with row numbers). -tutorial_1100_td=\ @loop 1000 select ?, ?/*rnd*/; -tutorial_1101_td=\ @loop 1000 @statement select ?; -tutorial_1102_td=\ Run the statement this many times. Parameters (?) are set using a loop from 0 up to x - 1. Random values are used for each ?/*rnd*/. A Statement object is used instead of a PreparedStatement if @statement is used. Result sets are read until ResultSet.next() returns false. Timing information is printed. -tutorial_1103_td=\ @maxrows 20; -tutorial_1104_td=\ Set the maximum number of rows to display. -tutorial_1105_td=\ @memory; -tutorial_1106_td=\ Show the used and free memory. This will call System.gc(). -tutorial_1107_td=\ @meta select 1; -tutorial_1108_td=\ List the ResultSetMetaData after running the query. -tutorial_1109_td=\ @parameter_meta select ?; -tutorial_1110_td=\ Show the result of the PreparedStatement.getParameterMetaData() calls. The statement is not executed. -tutorial_1111_td=\ @prof_start; -tutorial_1112_td=\ call hash('SHA256', '', 1000000); -tutorial_1113_td=\ @prof_stop; -tutorial_1114_td=\ Start/stop the built-in profiling tool. The top 3 stack traces of the statement(s) between start and stop are listed (if there are 3). -tutorial_1115_td=\ @prof_start; -tutorial_1116_td=\ @sleep 10; -tutorial_1117_td=\ @prof_stop; -tutorial_1118_td=\ Sleep for a number of seconds. Used to profile a long running query or operation that is running in another session (but in the same process). -tutorial_1119_td=\ @transaction_isolation; -tutorial_1120_td=\ @transaction_isolation 2; -tutorial_1121_td=\ Display (without parameters) or change (with parameters 1, 2, 4, 8) the transaction isolation level. -tutorial_1122_h2=Settings of the H2 Console -tutorial_1123_p=\ The settings of the H2 Console are stored in a configuration file called .h2.server.properties in you user home directory. For Windows installations, the user home directory is usually C\:\\Documents and Settings\\[username] or C\:\\Users\\[username]. The configuration file contains the settings of the application and is automatically created when the H2 Console is first started. Supported settings are\: -tutorial_1124_code=webAllowOthers -tutorial_1125_li=\: allow other computers to connect. -tutorial_1126_code=webPort -tutorial_1127_li=\: the port of the H2 Console -tutorial_1128_code=webSSL -tutorial_1129_li=\: use encrypted TLS (HTTPS) connections. -tutorial_1130_p=\ In addition to those settings, the properties of the last recently used connection are listed in the form <number>\=<name>|<driver>|<url>|<user> using the escape character \\. Example\: 1\=Generic H2 (Embedded)|org.h2.Driver|jdbc\\\:h2\\\:~/test|sa -tutorial_1131_h2=Connecting to a Database using JDBC -tutorial_1132_p=\ To connect to a database, a Java application first needs to load the database driver, and then get a connection. A simple way to do that is using the following code\: -tutorial_1133_p=\ This code first loads the driver (Class.forName(...)) and then opens a connection (using DriverManager.getConnection()). The driver name is "org.h2.Driver". The database URL always needs to start with jdbc\:h2\: to be recognized by this database. The second parameter in the getConnection() call is the user name (sa for System Administrator in this example). The third parameter is the password. In this database, user names are not case sensitive, but passwords are. -tutorial_1134_h2=Creating New Databases -tutorial_1135_p=\ By default, if the database specified in the URL does not yet exist, a new (empty) database is created automatically. The user that created the database automatically becomes the administrator of this database. -tutorial_1136_p=\ Auto-creating new database can be disabled, see Opening a Database Only if it Already Exists. -tutorial_1137_h2=Using the Server -tutorial_1138_p=\ H2 currently supports three server\: a web server (for the H2 Console), a TCP server (for client/server connections) and an PG server (for PostgreSQL clients). Please note that only the web server supports browser connections. The servers can be started in different ways, one is using the Server tool. Starting the server doesn't open a database - databases are opened as soon as a client connects. -tutorial_1139_h3=Starting the Server Tool from Command Line -tutorial_1140_p=\ To start the Server tool from the command line with the default settings, run\: -tutorial_1141_p=\ This will start the tool with the default options. To get the list of options and default values, run\: -tutorial_1142_p=\ There are options available to use other ports, and start or not start parts. -tutorial_1143_h3=Connecting to the TCP Server -tutorial_1144_p=\ To remotely connect to a database using the TCP server, use the following driver and database URL\: -tutorial_1145_li=JDBC driver class\: org.h2.Driver -tutorial_1146_li=Database URL\: jdbc\:h2\:tcp\://localhost/~/test -tutorial_1147_p=\ For details about the database URL, see also in Features. Please note that you can't connection with a web browser to this URL. You can only connect using a H2 client (over JDBC). -tutorial_1148_h3=Starting the TCP Server within an Application -tutorial_1149_p=\ Servers can also be started and stopped from within an application. Sample code\: -tutorial_1150_h3=Stopping a TCP Server from Another Process -tutorial_1151_p=\ The TCP server can be stopped from another process. To stop the server from the command line, run\: -tutorial_1152_p=\ To stop the server from a user application, use the following code\: -tutorial_1153_p=\ This function will only stop the TCP server. If other server were started in the same process, they will continue to run. To avoid recovery when the databases are opened the next time, all connections to the databases should be closed before calling this method. To stop a remote server, remote connections must be enabled on the server. Shutting down a TCP server can be protected using the option -tcpPassword (the same password must be used to start and stop the TCP server). -tutorial_1154_h2=Using Hibernate -tutorial_1155_p=\ This database supports Hibernate version 3.1 and newer. You can use the HSQLDB Dialect, or the native H2 Dialect. Unfortunately the H2 Dialect included in some old versions of Hibernate was buggy. A patch for Hibernate has been submitted and is now applied. You can rename it to H2Dialect.java and include this as a patch in your application, or upgrade to a version of Hibernate where this is fixed. -tutorial_1156_p=\ When using Hibernate, try to use the H2Dialect if possible. When using the H2Dialect, compatibility modes such as MODE\=MySQL are not supported. When using such a compatibility mode, use the Hibernate dialect for the corresponding database instead of the H2Dialect; but please note H2 does not support all features of all databases. -tutorial_1157_h2=Using TopLink and Glassfish -tutorial_1158_p=\ To use H2 with Glassfish (or Sun AS), set the Datasource Classname to org.h2.jdbcx.JdbcDataSource. You can set this in the GUI at Application Server - Resources - JDBC - Connection Pools, or by editing the file sun-resources.xml\: at element jdbc-connection-pool, set the attribute datasource-classname to org.h2.jdbcx.JdbcDataSource. -tutorial_1159_p=\ The H2 database is compatible with HSQLDB and PostgreSQL. To take advantage of H2 specific features, use the H2Platform. The source code of this platform is included in H2 at src/tools/oracle/toplink/essentials/platform/database/DatabasePlatform.java.txt. You will need to copy this file to your application, and rename it to .java. To enable it, change the following setting in persistence.xml\: -tutorial_1160_p=\ In old versions of Glassfish, the property name is toplink.platform.class.name. -tutorial_1161_p=\ To use H2 within Glassfish, copy the h2*.jar to the directory glassfish/glassfish/lib. -tutorial_1162_h2=Using EclipseLink -tutorial_1163_p=\ To use H2 in EclipseLink, use the platform class org.eclipse.persistence.platform.database.H2Platform. If this platform is not available in your version of EclipseLink, you can use the OraclePlatform instead in many case. See also H2Platform. -tutorial_1164_h2=Using Apache ActiveMQ -tutorial_1165_p=\ When using H2 as the backend database for Apache ActiveMQ, please use the TransactDatabaseLocker instead of the default locking mechanism. Otherwise the database file will grow without bounds. The problem is that the default locking mechanism uses an uncommitted UPDATE transaction, which keeps the transaction log from shrinking (causes the database file to grow). Instead of using an UPDATE statement, the TransactDatabaseLocker uses SELECT ... FOR UPDATE which is not problematic. To use it, change the ApacheMQ configuration element <jdbcPersistenceAdapter> element, property databaseLocker\="org.apache.activemq.store.jdbc.adapter.TransactDatabaseLocker". However, using the MVCC mode will again result in the same problem. Therefore, please do not use the MVCC mode in this case. Another (more dangerous) solution is to set useDatabaseLock to false. -tutorial_1166_h2=Using H2 within NetBeans -tutorial_1167_p=\ The project H2 Database Engine Support For NetBeans allows you to start and stop the H2 server from within the IDE. -tutorial_1168_p=\ There is a known issue when using the Netbeans SQL Execution Window\: before executing a query, another query in the form SELECT COUNT(*) FROM <query> is run. This is a problem for queries that modify state, such as SELECT SEQ.NEXTVAL. In this case, two sequence values are allocated instead of just one. -tutorial_1169_h2=Using H2 with jOOQ -tutorial_1170_p=\ jOOQ adds a thin layer on top of JDBC, allowing for type-safe SQL construction, including advanced SQL, stored procedures and advanced data types. jOOQ takes your database schema as a base for code generation. If this is your example schema\: -tutorial_1171_p=\ then run the jOOQ code generator on the command line using this command\: -tutorial_1172_p=\ ...where codegen.xml is on the classpath and contains this information -tutorial_1173_p=\ Using the generated source, you can query the database as follows\: -tutorial_1174_p=\ See more details on jOOQ Homepage and in the jOOQ Tutorial -tutorial_1175_h2=Using Databases in Web Applications -tutorial_1176_p=\ There are multiple ways to access a database from within web applications. Here are some examples if you use Tomcat or JBoss. -tutorial_1177_h3=Embedded Mode -tutorial_1178_p=\ The (currently) simplest solution is to use the database in the embedded mode, that means open a connection in your application when it starts (a good solution is using a Servlet Listener, see below), or when a session starts. A database can be accessed from multiple sessions and applications at the same time, as long as they run in the same process. Most Servlet Containers (for example Tomcat) are just using one process, so this is not a problem (unless you run Tomcat in clustered mode). Tomcat uses multiple threads and multiple classloaders. If multiple applications access the same database at the same time, you need to put the database jar in the shared/lib or server/lib directory. It is a good idea to open the database when the web application starts, and close it when the web application stops. If using multiple applications, only one (any) of them needs to do that. In the application, an idea is to use one connection per Session, or even one connection per request (action). Those connections should be closed after use if possible (but it's not that bad if they don't get closed). -tutorial_1179_h3=Server Mode -tutorial_1180_p=\ The server mode is similar, but it allows you to run the server in another process. -tutorial_1181_h3=Using a Servlet Listener to Start and Stop a Database -tutorial_1182_p=\ Add the h2*.jar file to your web application, and add the following snippet to your web.xml file (between the context-param and the filter section)\: -tutorial_1183_p=\ For details on how to access the database, see the file DbStarter.java. By default this tool opens an embedded connection using the database URL jdbc\:h2\:~/test, user name sa, and password sa. If you want to use this connection within your servlet, you can access as follows\: -tutorial_1184_code=DbStarter -tutorial_1185_p=\ can also start the TCP server, however this is disabled by default. To enable it, use the parameter db.tcpServer in the file web.xml. Here is the complete list of options. These options need to be placed between the description tag and the listener / filter tags\: -tutorial_1186_p=\ When the web application is stopped, the database connection will be closed automatically. If the TCP server is started within the DbStarter, it will also be stopped automatically. -tutorial_1187_h3=Using the H2 Console Servlet -tutorial_1188_p=\ The H2 Console is a standalone application and includes its own web server, but it can be used as a servlet as well. To do that, include the the h2*.jar file in your application, and add the following configuration to your web.xml\: -tutorial_1189_p=\ For details, see also src/tools/WEB-INF/web.xml. -tutorial_1190_p=\ To create a web application with just the H2 Console, run the following command\: -tutorial_1191_h2=Android -tutorial_1192_p=\ You can use this database on an Android device (using the Dalvik VM) instead of or in addition to SQLite. So far, only very few tests and benchmarks were run, but it seems that performance is similar to SQLite, except for opening and closing a database, which is not yet optimized in H2 (H2 takes about 0.2 seconds, and SQLite about 0.02 seconds). Read operations seem to be a bit faster than SQLite, and write operations seem to be slower. So far, only very few tests have been run, and everything seems to work as expected. Fulltext search was not yet tested, however the native fulltext search should work. -tutorial_1193_p=\ Reasons to use H2 instead of SQLite are\: -tutorial_1194_li=Full Unicode support including UPPER() and LOWER(). -tutorial_1195_li=Streaming API for BLOB and CLOB data. -tutorial_1196_li=Fulltext search. -tutorial_1197_li=Multiple connections. -tutorial_1198_li=User defined functions and triggers. -tutorial_1199_li=Database file encryption. -tutorial_1200_li=Reading and writing CSV files (this feature can be used outside the database as well). -tutorial_1201_li=Referential integrity and check constraints. -tutorial_1202_li=Better data type and SQL support. -tutorial_1203_li=In-memory databases, read-only databases, linked tables. -tutorial_1204_li=Better compatibility with other databases which simplifies porting applications. -tutorial_1205_li=Possibly better performance (so far for read operations). -tutorial_1206_li=Server mode (accessing a database on a different machine over TCP/IP). -tutorial_1207_p=\ Currently only the JDBC API is supported (it is planned to support the Android database API in future releases). Both the regular H2 jar file and the smaller h2small-*.jar can be used. To create the smaller jar file, run the command ./build.sh jarSmall (Linux / Mac OS) or build.bat jarSmall (Windows). -tutorial_1208_p=\ The database files needs to be stored in a place that is accessible for the application. Example\: -tutorial_1209_p=\ Limitations\: Using a connection pool is currently not supported, because the required javax.sql. classes are not available on Android. -tutorial_1210_h2=CSV (Comma Separated Values) Support -tutorial_1211_p=\ The CSV file support can be used inside the database using the functions CSVREAD and CSVWRITE, or it can be used outside the database as a standalone tool. -tutorial_1212_h3=Reading a CSV File from Within a Database -tutorial_1213_p=\ A CSV file can be read using the function CSVREAD. Example\: -tutorial_1214_p=\ Please note for performance reason, CSVREAD should not be used inside a join. Instead, import the data first (possibly into a temporary table), create the required indexes if necessary, and then query this table. -tutorial_1215_h3=Importing Data from a CSV File -tutorial_1216_p=\ A fast way to load or import data (sometimes called 'bulk load') from a CSV file is to combine table creation with import. Optionally, the column names and data types can be set when creating the table. Another option is to use INSERT INTO ... SELECT. -tutorial_1217_h3=Writing a CSV File from Within a Database -tutorial_1218_p=\ The built-in function CSVWRITE can be used to create a CSV file from a query. Example\: -tutorial_1219_h3=Writing a CSV File from a Java Application -tutorial_1220_p=\ The Csv tool can be used in a Java application even when not using a database at all. Example\: -tutorial_1221_h3=Reading a CSV File from a Java Application -tutorial_1222_p=\ It is possible to read a CSV file without opening a database. Example\: -tutorial_1223_h2=Upgrade, Backup, and Restore -tutorial_1224_h3=Database Upgrade -tutorial_1225_p=\ The recommended way to upgrade from one version of the database engine to the next version is to create a backup of the database (in the form of a SQL script) using the old engine, and then execute the SQL script using the new engine. -tutorial_1226_h3=Backup using the Script Tool -tutorial_1227_p=\ The recommended way to backup a database is to create a compressed SQL script file. This will result in a small, human readable, and database version independent backup. Creating the script will also verify the checksums of the database file. The Script tool is ran as follows\: -tutorial_1228_p=\ It is also possible to use the SQL command SCRIPT to create the backup of the database. For more information about the options, see the SQL command SCRIPT. The backup can be done remotely, however the file will be created on the server side. The built in FTP server could be used to retrieve the file from the server. -tutorial_1229_h3=Restore from a Script -tutorial_1230_p=\ To restore a database from a SQL script file, you can use the RunScript tool\: -tutorial_1231_p=\ For more information about the options, see the SQL command RUNSCRIPT. The restore can be done remotely, however the file needs to be on the server side. The built in FTP server could be used to copy the file to the server. It is also possible to use the SQL command RUNSCRIPT to execute a SQL script. SQL script files may contain references to other script files, in the form of RUNSCRIPT commands. However, when using the server mode, the references script files need to be available on the server side. -tutorial_1232_h3=Online Backup -tutorial_1233_p=\ The BACKUP SQL statement and the Backup tool both create a zip file with the database file. However, the contents of this file are not human readable. -tutorial_1234_p=\ The resulting backup is transactionally consistent, meaning the consistency and atomicity rules apply. -tutorial_1235_p=\ The Backup tool (org.h2.tools.Backup) can not be used to create a online backup; the database must not be in use while running this program. -tutorial_1236_p=\ Creating a backup by copying the database files while the database is running is not supported, except if the file systems support creating snapshots. With other file systems, it can't be guaranteed that the data is copied in the right order. -tutorial_1237_h2=Command Line Tools -tutorial_1238_p=\ This database comes with a number of command line tools. To get more information about a tool, start it with the parameter '-?', for example\: -tutorial_1239_p=\ The command line tools are\: -tutorial_1240_code=Backup -tutorial_1241_li=\ creates a backup of a database. -tutorial_1242_code=ChangeFileEncryption -tutorial_1243_li=\ allows changing the file encryption password or algorithm of a database. -tutorial_1244_code=Console -tutorial_1245_li=\ starts the browser based H2 Console. -tutorial_1246_code=ConvertTraceFile -tutorial_1247_li=\ converts a .trace.db file to a Java application and SQL script. -tutorial_1248_code=CreateCluster -tutorial_1249_li=\ creates a cluster from a standalone database. -tutorial_1250_code=DeleteDbFiles -tutorial_1251_li=\ deletes all files belonging to a database. -tutorial_1252_code=Recover -tutorial_1253_li=\ helps recovering a corrupted database. -tutorial_1254_code=Restore -tutorial_1255_li=\ restores a backup of a database. -tutorial_1256_code=RunScript -tutorial_1257_li=\ runs a SQL script against a database. -tutorial_1258_code=Script -tutorial_1259_li=\ allows converting a database to a SQL script for backup or migration. -tutorial_1260_code=Server -tutorial_1261_li=\ is used in the server mode to start a H2 server. -tutorial_1262_code=Shell -tutorial_1263_li=\ is a command line database tool. -tutorial_1264_p=\ The tools can also be called from an application by calling the main or another public method. For details, see the Javadoc documentation. -tutorial_1265_h2=The Shell Tool -tutorial_1266_p=\ The Shell tool is a simple interactive command line tool. To start it, type\: -tutorial_1267_p=\ You will be asked for a database URL, JDBC driver, user name, and password. The connection setting can also be set as command line parameters. After connecting, you will get the list of options. The built-in commands don't need to end with a semicolon, but SQL statements are only executed if the line ends with a semicolon ;. This allows to enter multi-line statements\: -tutorial_1268_p=\ By default, results are printed as a table. For results with many column, consider using the list mode\: -tutorial_1269_h2=Using OpenOffice Base -tutorial_1270_p=\ OpenOffice.org Base supports database access over the JDBC API. To connect to a H2 database using OpenOffice Base, you first need to add the JDBC driver to OpenOffice. The steps to connect to a H2 database are\: -tutorial_1271_li=Start OpenOffice Writer, go to [Tools], [Options] -tutorial_1272_li=Make sure you have selected a Java runtime environment in OpenOffice.org / Java -tutorial_1273_li=Click [Class Path...], [Add Archive...] -tutorial_1274_li=Select your h2 jar file (location is up to you, could be wherever you choose) -tutorial_1275_li=Click [OK] (as much as needed), stop OpenOffice (including the Quickstarter) -tutorial_1276_li=Start OpenOffice Base -tutorial_1277_li=Connect to an existing database; select [JDBC]; [Next] -tutorial_1278_li=Example datasource URL\: jdbc\:h2\:~/test -tutorial_1279_li=JDBC driver class\: org.h2.Driver -tutorial_1280_p=\ Now you can access the database stored in the current users home directory. -tutorial_1281_p=\ To use H2 in NeoOffice (OpenOffice without X11)\: -tutorial_1282_li=In NeoOffice, go to [NeoOffice], [Preferences] -tutorial_1283_li=Look for the page under [NeoOffice], [Java] -tutorial_1284_li=Click [Class Path], [Add Archive...] -tutorial_1285_li=Select your h2 jar file (location is up to you, could be wherever you choose) -tutorial_1286_li=Click [OK] (as much as needed), restart NeoOffice. -tutorial_1287_p=\ Now, when creating a new database using the "Database Wizard" \: -tutorial_1288_li=Click [File], [New], [Database]. -tutorial_1289_li=Select [Connect to existing database] and the select [JDBC]. Click next. -tutorial_1290_li=Example datasource URL\: jdbc\:h2\:~/test -tutorial_1291_li=JDBC driver class\: org.h2.Driver -tutorial_1292_p=\ Another solution to use H2 in NeoOffice is\: -tutorial_1293_li=Package the h2 jar within an extension package -tutorial_1294_li=Install it as a Java extension in NeoOffice -tutorial_1295_p=\ This can be done by create it using the NetBeans OpenOffice plugin. See also Extensions Development. -tutorial_1296_h2=Java Web Start / JNLP -tutorial_1297_p=\ When using Java Web Start / JNLP (Java Network Launch Protocol), permissions tags must be set in the .jnlp file, and the application .jar file must be signed. Otherwise, when trying to write to the file system, the following exception will occur\: java.security.AccessControlException\: access denied (java.io.FilePermission ... read). Example permission tags\: -tutorial_1298_h2=Using a Connection Pool -tutorial_1299_p=\ For H2, opening a connection is fast if the database is already open. Still, using a connection pool improves performance if you open and close connections a lot. A simple connection pool is included in H2. It is based on the Mini Connection Pool Manager from Christian d'Heureuse. There are other, more complex, open source connection pools available, for example the Apache Commons DBCP. For H2, it is about twice as faster to get a connection from the built-in connection pool than to get one using DriverManager.getConnection().The build-in connection pool is used as follows\: -tutorial_1300_h2=Fulltext Search -tutorial_1301_p=\ H2 includes two fulltext search implementations. One is using Apache Lucene, and the other (the native implementation) stores the index data in special tables in the database. -tutorial_1302_h3=Using the Native Fulltext Search -tutorial_1303_p=\ To initialize, call\: -tutorial_1304_p=\ You need to initialize it in each database where you want to use it. Afterwards, you can create a fulltext index for a table using\: -tutorial_1305_p=\ PUBLIC is the schema name, TEST is the table name. The list of column names (comma separated) is optional, in this case all columns are indexed. The index is updated in realtime. To search the index, use the following query\: -tutorial_1306_p=\ This will produce a result set that contains the query needed to retrieve the data\: -tutorial_1307_p=\ To drop an index on a table\: -tutorial_1308_p=\ To get the raw data, use FT_SEARCH_DATA('Hello', 0, 0);. The result contains the columns SCHEMA (the schema name), TABLE (the table name), COLUMNS (an array of column names), and KEYS (an array of objects). To join a table, use a join as in\: SELECT T.* FROM FT_SEARCH_DATA('Hello', 0, 0) FT, TEST T WHERE FT.TABLE\='TEST' AND T.ID\=FT.KEYS[0]; -tutorial_1309_p=\ You can also call the index from within a Java application\: -tutorial_1310_h3=Using the Apache Lucene Fulltext Search -tutorial_1311_p=\ To use the Apache Lucene full text search, you need the Lucene library in the classpath. Currently, Apache Lucene 3.6.2 is used for testing. Newer versions may work, however they are not tested. How to do that depends on the application; if you use the H2 Console, you can add the Lucene jar file to the environment variables H2DRIVERS or CLASSPATH. To initialize the Lucene fulltext search in a database, call\: -tutorial_1312_p=\ You need to initialize it in each database where you want to use it. Afterwards, you can create a full text index for a table using\: -tutorial_1313_p=\ PUBLIC is the schema name, TEST is the table name. The list of column names (comma separated) is optional, in this case all columns are indexed. The index is updated in realtime. To search the index, use the following query\: -tutorial_1314_p=\ This will produce a result set that contains the query needed to retrieve the data\: -tutorial_1315_p=\ To drop an index on a table (be warned that this will re-index all of the full-text indices for the entire database)\: -tutorial_1316_p=\ To get the raw data, use FTL_SEARCH_DATA('Hello', 0, 0);. The result contains the columns SCHEMA (the schema name), TABLE (the table name), COLUMNS (an array of column names), and KEYS (an array of objects). To join a table, use a join as in\: SELECT T.* FROM FTL_SEARCH_DATA('Hello', 0, 0) FT, TEST T WHERE FT.TABLE\='TEST' AND T.ID\=FT.KEYS[0]; -tutorial_1317_p=\ You can also call the index from within a Java application\: -tutorial_1318_p=\ The Lucene fulltext search supports searching in specific column only. Column names must be uppercase (except if the original columns are double quoted). For column names starting with an underscore (_), another underscore needs to be added. Example\: -tutorial_1319_h2=User-Defined Variables -tutorial_1320_p=\ This database supports user-defined variables. Variables start with @ and can be used wherever expressions or parameters are allowed. Variables are not persisted and session scoped, that means only visible from within the session in which they are defined. A value is usually assigned using the SET command\: -tutorial_1321_p=\ The value can also be changed using the SET() method. This is useful in queries\: -tutorial_1322_p=\ Variables that are not set evaluate to NULL. The data type of a user-defined variable is the data type of the value assigned to it, that means it is not necessary (or possible) to declare variable names before using them. There are no restrictions on the assigned values; large objects (LOBs) are supported as well. Rolling back a transaction does not affect the value of a user-defined variable. -tutorial_1323_h2=Date and Time -tutorial_1324_p=\ Date, time and timestamp values support ISO 8601 formatting, including time zone\: -tutorial_1325_p=\ If the time zone is not set, the value is parsed using the current time zone setting of the system. Date and time information is stored in H2 database files without time zone information. If the database is opened using another system time zone, the date and time will be the same. That means if you store the value '2000-01-01 12\:00\:00' in one time zone, then close the database and open the database again in a different time zone, you will also get '2000-01-01 12\:00\:00'. Please note that changing the time zone after the H2 driver is loaded is not supported. -tutorial_1326_h2=Using Spring -tutorial_1327_h3=Using the TCP Server -tutorial_1328_p=\ Use the following configuration to start and stop the H2 TCP server using the Spring Framework\: -tutorial_1329_p=\ The destroy-method will help prevent exceptions on hot-redeployment or when restarting the server. -tutorial_1330_h3=Error Code Incompatibility -tutorial_1331_p=\ There is an incompatibility with the Spring JdbcTemplate and H2 version 1.3.154 and newer, because of a change in the error code. This will cause the JdbcTemplate to not detect a duplicate key condition, and so a DataIntegrityViolationException is thrown instead of DuplicateKeyException. See also the issue SPR-8235. The workaround is to add the following XML file to the root of the classpath\: -tutorial_1332_h2=OSGi -tutorial_1333_p=\ The standard H2 jar can be dropped in as a bundle in an OSGi container. H2 implements the JDBC Service defined in OSGi Service Platform Release 4 Version 4.2 Enterprise Specification. The H2 Data Source Factory service is registered with the following properties\: OSGI_JDBC_DRIVER_CLASS\=org.h2.Driver and OSGI_JDBC_DRIVER_NAME\=H2 JDBC Driver. The OSGI_JDBC_DRIVER_VERSION property reflects the version of the driver as is. -tutorial_1334_p=\ The following standard configuration properties are supported\: JDBC_USER, JDBC_PASSWORD, JDBC_DESCRIPTION, JDBC_DATASOURCE_NAME, JDBC_NETWORK_PROTOCOL, JDBC_URL, JDBC_SERVER_NAME, JDBC_PORT_NUMBER. Any other standard property will be rejected. Non-standard properties will be passed on to H2 in the connection URL. -tutorial_1335_h2=Java Management Extension (JMX) -tutorial_1336_p=\ Management over JMX is supported, but not enabled by default. To enable JMX, append ;JMX\=TRUE to the database URL when opening the database. Various tools support JMX, one such tool is the jconsole. When opening the jconsole, connect to the process where the database is open (when using the server mode, you need to connect to the server process). Then go to the MBeans section. Under org.h2 you will find one entry per database. The object name of the entry is the database short name, plus the path (each colon is replaced with an underscore character). -tutorial_1337_p=\ The following attributes and operations are supported\: -tutorial_1338_code=CacheSize -tutorial_1339_li=\: the cache size currently in use in KB. -tutorial_1340_code=CacheSizeMax -tutorial_1341_li=\ (read/write)\: the maximum cache size in KB. -tutorial_1342_code=Exclusive -tutorial_1343_li=\: whether this database is open in exclusive mode or not. -tutorial_1344_code=FileReadCount -tutorial_1345_li=\: the number of file read operations since the database was opened. -tutorial_1346_code=FileSize -tutorial_1347_li=\: the file size in KB. -tutorial_1348_code=FileWriteCount -tutorial_1349_li=\: the number of file write operations since the database was opened. -tutorial_1350_code=FileWriteCountTotal -tutorial_1351_li=\: the number of file write operations since the database was created. -tutorial_1352_code=LogMode -tutorial_1353_li=\ (read/write)\: the current transaction log mode. See SET LOG for details. -tutorial_1354_code=Mode -tutorial_1355_li=\: the compatibility mode (REGULAR if no compatibility mode is used). -tutorial_1356_code=MultiThreaded -tutorial_1357_li=\: true if multi-threaded is enabled. -tutorial_1358_code=Mvcc -tutorial_1359_li=\: true if MVCC is enabled. -tutorial_1360_code=ReadOnly -tutorial_1361_li=\: true if the database is read-only. -tutorial_1362_code=TraceLevel -tutorial_1363_li=\ (read/write)\: the file trace level. -tutorial_1364_code=Version -tutorial_1365_li=\: the database version in use. -tutorial_1366_code=listSettings -tutorial_1367_li=\: list the database settings. -tutorial_1368_code=listSessions -tutorial_1369_li=\: list the open sessions, including currently executing statement (if any) and locked tables (if any). -tutorial_1370_p=\ To enable JMX, you may need to set the system properties com.sun.management.jmxremote and com.sun.management.jmxremote.port as required by the JVM. diff --git a/h2/src/docsrc/textbase/_messages_en.prop b/h2/src/docsrc/textbase/_messages_en.prop deleted file mode 100644 index 06f821581c..0000000000 --- a/h2/src/docsrc/textbase/_messages_en.prop +++ /dev/null @@ -1,184 +0,0 @@ -.translator=Thomas Mueller -02000=No data is available -07001=Invalid parameter count for {0}, expected count: {1} -08000=Error opening database: {0} -21S02=Column count does not match -22001=Value too long for column {0}: {1} -22003=Numeric value out of range: {0} -22004=Numeric value out of range: {0} in column {1} -22007=Cannot parse {0} constant {1} -22012=Division by zero: {0} -22013=Invalid PRECEDING or FOLLOWING size in window function: {0} -22018=Data conversion error converting {0} -22025=Error in LIKE ESCAPE: {0} -22030=Value not permitted for column {0}: {1} -22031=Value not a member of enumerators {0}: {1} -22032=Empty enums are not allowed -22033=Duplicate enumerators are not allowed for enum types: {0} -23502=NULL not allowed for column {0} -23503=Referential integrity constraint violation: {0} -23505=Unique index or primary key violation: {0} -23506=Referential integrity constraint violation: {0} -23507=No default value is set for column {0} -23513=Check constraint violation: {0} -23514=Check constraint invalid: {0} -28000=Wrong user name or password -40001=Deadlock detected. The current transaction was rolled back. Details: {0} -42000=Syntax error in SQL statement {0} -42001=Syntax error in SQL statement {0}; expected {1} -42S01=Table {0} already exists -42S02=Table {0} not found -42S11=Index {0} already exists -42S12=Index {0} not found -42S21=Duplicate column name {0} -42S22=Column {0} not found -42S31=Identical expressions should be used; expected {0}, found {1} -57014=Statement was canceled or the session timed out -90000=Function {0} must return a result set -90001=Method is not allowed for a query. Use execute or executeQuery instead of executeUpdate -90002=Method is only allowed for a query. Use execute or executeUpdate instead of executeQuery -90003=Hexadecimal string with odd number of characters: {0} -90004=Hexadecimal string contains non-hex character: {0} -90006=Sequence {0} has run out of numbers -90007=The object is already closed -90008=Invalid value {0} for parameter {1} -90009=Unable to create or alter sequence {0} because of invalid attributes (start value {1}, min value {2}, max value {3}, increment {4}) -90010=Invalid TO_CHAR format {0} -90011=A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead. -90012=Parameter {0} is not set -90013=Database {0} not found -90014=Error parsing {0} -90015=SUM or AVG on wrong data type for {0} -90016=Column {0} must be in the GROUP BY list -90017=Attempt to define a second primary key -90018=The connection was not closed by the application and is garbage collected -90019=Cannot drop the current user -90020=Database may be already in use: {0}. Possible solutions: close all other connection(s); use the server mode -90021=This combination of database settings is not supported: {0} -90022=Function {0} not found -90023=Column {0} must not be nullable -90024=Error while renaming file {0} to {1} -90025=Cannot delete file {0} -90026=Serialization failed, cause: {0} -90027=Deserialization failed, cause: {0} -90028=IO Exception: {0} -90029=Currently not on an updatable row -90030=File corrupted while reading record: {0}. Possible solution: use the recovery tool -90031=IO Exception: {0}; {1} -90032=User {0} not found -90033=User {0} already exists -90034=Log file error: {0}, cause: {1} -90035=Sequence {0} already exists -90036=Sequence {0} not found -90037=View {0} not found -90038=View {0} already exists -90039=This CLOB or BLOB reference timed out: {0} -90040=Admin rights are required for this operation -90041=Trigger {0} already exists -90042=Trigger {0} not found -90043=Error creating or initializing trigger {0} object, class {1}, cause: {2}; see root cause for details -90044=Error executing trigger {0}, class {1}, cause : {2}; see root cause for details -90045=Constraint {0} already exists -90046=URL format error; must be {0} but is {1} -90047=Version mismatch, driver version is {0} but server version is {1} -90048=Unsupported database file version or invalid file header in file {0} -90049=Encryption error in file {0} -90050=Wrong password format, must be: file password user password -90051=Scale(${0}) must not be bigger than precision({1}) -90052=Subquery is not a single column query -90053=Scalar subquery contains more than one row -90054=Invalid use of aggregate function {0} -90055=Unsupported cipher {0} -90056=Function {0}: Invalid date format: {1} -90057=Constraint {0} not found -90058=Commit or rollback is not allowed within a trigger -90059=Ambiguous column name {0} -90060=Unsupported file lock method {0} -90061=Exception opening port {0} (port may be in use), cause: {1} -90062=Error while creating file {0} -90063=Savepoint is invalid: {0} -90064=Savepoint is unnamed -90065=Savepoint is named -90066=Duplicate property {0} -90067=Connection is broken: {0} -90068=Order by expression {0} must be in the result list in this case -90069=Role {0} already exists -90070=Role {0} not found -90071=User or role {0} not found -90072=Roles and rights cannot be mixed -90073=Matching Java methods must have different parameter counts: {0} and {1} -90074=Role {0} already granted -90075=Column is part of the index {0} -90076=Function alias {0} already exists -90077=Function alias {0} not found -90078=Schema {0} already exists -90079=Schema {0} not found -90080=Schema name must match -90081=Column {0} contains null values -90082=Sequence {0} belongs to a table -90083=Column may be referenced by {0} -90084=Cannot drop last column {0} -90085=Index {0} belongs to constraint {1} -90086=Class {0} not found -90087=Method {0} not found -90088=Unknown mode {0} -90089=Collation cannot be changed because there is a data table: {0} -90090=Schema {0} cannot be dropped -90091=Role {0} cannot be dropped -90093=Clustering error - database currently runs in standalone mode -90094=Clustering error - database currently runs in cluster mode, server list: {0} -90095=String format error: {0} -90096=Not enough rights for object {0} -90097=The database is read only -90098=The database has been closed -90099=Error setting database event listener {0}, cause: {1} -90101=Wrong XID format: {0} -90102=Unsupported compression options: {0} -90103=Unsupported compression algorithm: {0} -90104=Compression error -90105=Exception calling user-defined function: {0} -90106=Cannot truncate {0} -90107=Cannot drop {0} because {1} depends on it -90108=Out of memory. -90109=View {0} is invalid: {1} -90110=Comparing ARRAY to scalar value -90111=Error accessing linked table with SQL statement {0}, cause: {1} -90112=Row not found when trying to delete from index {0} -90113=Unsupported connection setting {0} -90114=Constant {0} already exists -90115=Constant {0} not found -90116=Literals of this kind are not allowed -90117=Remote connections to this server are not allowed, see -tcpAllowOthers -90118=Cannot drop table {0} -90119=Domain {0} already exists -90120=Domain {0} not found -90121=Database is already closed (to disable automatic closing at VM shutdown, add ";DB_CLOSE_ON_EXIT=FALSE" to the db URL) -90122=The WITH TIES clause is not allowed without a corresponding ORDER BY clause. -90123=Cannot mix indexed and non-indexed parameters -90124=File not found: {0} -90125=Invalid class, expected {0} but got {1} -90126=Database is not persistent -90127=The result set is not updatable. The query must select all columns from a unique key. Only one table may be selected. -90128=The result set is not scrollable and can not be reset. You may need to use conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ..). -90129=Transaction {0} not found -90130=This method is not allowed for a prepared statement; use a regular statement instead. -90131=Concurrent update in table {0}: another transaction has updated or deleted the same row -90132=Aggregate {0} not found -90133=Cannot change the setting {0} when the database is already open -90134=Access to the class {0} is denied -90135=The database is open in exclusive mode; can not open additional connections -90136=Window not found: {0} -90137=Can only assign to a variable, not to: {0} -90138=Invalid database name: {0} -90139=The public static Java method was not found: {0} -90140=The result set is readonly. You may need to use conn.createStatement(.., ResultSet.CONCUR_UPDATABLE). -90141=Serializer cannot be changed because there is a data table: {0} -90142=Step size must not be zero -90143=Row {1} not found in primary index {0} -90144=Authenticator not enabled on database {0} -90145=FOR UPDATE is not allowed in DISTINCT or grouped select -90146=Database {0} not found, and IFEXISTS=true, so we can't auto-create it -HY000=General error: {0} -HY004=Unknown data type: {0} -HYC00=Feature not supported: {0} -HYT00=Timeout trying to lock table {0} diff --git a/h2/src/docsrc/textbase/_text_en.prop b/h2/src/docsrc/textbase/_text_en.prop deleted file mode 100644 index 792bbb2859..0000000000 --- a/h2/src/docsrc/textbase/_text_en.prop +++ /dev/null @@ -1,163 +0,0 @@ -.translator=Thomas Mueller -a.help=Help -a.language=English -a.lynxNotSupported=Sorry, Lynx not supported yet -a.password=Password -a.remoteConnectionsDisabled=Sorry, remote connections ('webAllowOthers') are disabled on this server. -a.title=H2 Console -a.tools=Tools -a.user=User Name -admin.executing=Executing -admin.ip=IP -admin.lastAccess=Last Access -admin.lastQuery=Last Query -admin.no=no -admin.notConnected=not connected -admin.url=URL -admin.yes=yes -adminAllow=Allowed clients -adminConnection=Connection security -adminHttp=Use unencrypted HTTP connections -adminHttps=Use encrypted SSL (HTTPS) connections -adminLocal=Only allow local connections -adminLogin=Administration Login -adminLoginCancel=Cancel -adminLoginOk=OK -adminLogout=Logout -adminOthers=Allow connections from other computers -adminPort=Port number -adminPortWeb=Web server port number -adminRestart=Changes take effect after restarting the server. -adminSave=Save -adminSessions=Active Sessions -adminShutdown=Shutdown -adminTitle=H2 Console Preferences -adminTranslateHelp=Translate or improve the translation of the H2 Console. -adminTranslateStart=Translate -helpAction=Action -helpAddAnotherRow=Add another row -helpAddDrivers=Adding Database Drivers -helpAddDriversText=Additional database drivers can be registered by adding the Jar file location of the driver to the environment variables H2DRIVERS or CLASSPATH. Example (Windows): to add the database driver library C:/Programs/hsqldb/lib/hsqldb.jar, set the environment variable H2DRIVERS to C:/Programs/hsqldb/lib/hsqldb.jar. -helpAddRow=Add a new row -helpCommandHistory=Shows the Command History -helpCreateTable=Create a new table -helpDeleteRow=Remove a row -helpDisconnect=Disconnects from the database -helpDisplayThis=Displays this Help Page -helpDropTable=Delete the table if it exists -helpExecuteCurrent=Executes the current SQL statement -helpExecuteSelected=Executes the SQL statement defined by the text selection -helpIcon=Icon -helpImportantCommands=Important Commands -helpOperations=Operations -helpQuery=Query the table -helpSampleSQL=Sample SQL Script -helpStatements=SQL statements -helpUpdate=Change data in a row -helpWithColumnsIdName=with ID and NAME columns -key.alt=Alt -key.ctrl=Ctrl -key.enter=Enter -key.shift=Shift -key.space=Space -login.connect=Connect -login.driverClass=Driver Class -login.driverNotFound=Database driver not found
          See in the Help for how to add drivers -login.goAdmin=Preferences -login.jdbcUrl=JDBC URL -login.language=Language -login.login=Login -login.remove=Remove -login.save=Save -login.savedSetting=Saved Settings -login.settingName=Setting Name -login.testConnection=Test Connection -login.testSuccessful=Test successful -login.welcome=H2 Console -result.1row=1 row -result.autoCommitOff=Auto commit is now OFF -result.autoCommitOn=Auto commit is now ON -result.bytes=bytes -result.characters=characters -result.maxrowsSet=Max rowcount is set -result.noRows=no rows -result.noRunningStatement=There is currently no running statement -result.rows=rows -result.statementWasCanceled=The statement was canceled -result.updateCount=Update count -resultEdit.action=Action -resultEdit.add=Add -resultEdit.cancel=Cancel -resultEdit.delete=Delete -resultEdit.edit=Edit -resultEdit.editResult=Edit -resultEdit.save=Save -toolbar.all=All -toolbar.autoCommit=Auto commit -toolbar.autoComplete=Auto complete -toolbar.autoComplete.full=Full -toolbar.autoComplete.normal=Normal -toolbar.autoComplete.off=Off -toolbar.autoSelect=Auto select -toolbar.autoSelect.off=Off -toolbar.autoSelect.on=On -toolbar.cancelStatement=Cancel the current statement -toolbar.clear=Clear -toolbar.commit=Commit -toolbar.disconnect=Disconnect -toolbar.history=Command history -toolbar.maxRows=Max rows -toolbar.refresh=Refresh -toolbar.rollback=Rollback -toolbar.run=Run -toolbar.runSelected=Run Selected -toolbar.sqlStatement=SQL statement -tools.backup=Backup -tools.backup.help=Creates a backup of a database. -tools.changeFileEncryption=ChangeFileEncryption -tools.changeFileEncryption.help=Allows changing the database file encryption password and algorithm. -tools.cipher=Cipher (AES or XTEA) -tools.commandLine=Command line -tools.convertTraceFile=ConvertTraceFile -tools.convertTraceFile.help=Converts a .trace.db file to a Java application and SQL script. -tools.createCluster=CreateCluster -tools.createCluster.help=Creates a cluster from a standalone database. -tools.databaseName=Database name -tools.decryptionPassword=Decryption password -tools.deleteDbFiles=DeleteDbFiles -tools.deleteDbFiles.help=Deletes all files belonging to a database. -tools.directory=Directory -tools.encryptionPassword=Encryption password -tools.javaDirectoryClassName=Java directory and class name -tools.recover=Recover -tools.recover.help=Helps recovering a corrupted database. -tools.restore=Restore -tools.restore.help=Restores a database backup. -tools.result=Result -tools.run=Run -tools.runScript=RunScript -tools.runScript.help=Runs a SQL script. -tools.script=Script -tools.script.help=Allows to convert a database to a SQL script for backup or migration. -tools.scriptFileName=Script file name -tools.serverList=Server list -tools.sourceDatabaseName=Source database name -tools.sourceDatabaseURL=Source database URL -tools.sourceDirectory=Source directory -tools.sourceFileName=Source file name -tools.sourceScriptFileName=Source script file name -tools.targetDatabaseName=Target database name -tools.targetDatabaseURL=Target database URL -tools.targetDirectory=Target directory -tools.targetFileName=Target file name -tools.targetScriptFileName=Target script file name -tools.traceFileName=Trace file name -tree.admin=Admin -tree.current=Current value -tree.hashed=Hashed -tree.increment=Increment -tree.indexes=Indexes -tree.nonUnique=Non unique -tree.sequences=Sequences -tree.unique=Unique -tree.users=Users diff --git a/h2/src/installer/buildRelease.bat b/h2/src/installer/buildRelease.bat index 144888313d..5a82084ff2 100644 --- a/h2/src/installer/buildRelease.bat +++ b/h2/src/installer/buildRelease.bat @@ -11,9 +11,8 @@ mkdir ..\h2web rmdir /s /q bin 2>nul rmdir /s /q temp 2>nul -call java16 >nul 2>nul call build -quiet compile -call build -quiet spellcheck javadocImpl jarClient +call build -quiet spellcheck javadocImpl call build -quiet clean compile installer mavenDeployCentral rem call build -quiet compile benchmark diff --git a/h2/src/installer/buildRelease.sh b/h2/src/installer/buildRelease.sh index 042a55d174..8782e23845 100755 --- a/h2/src/installer/buildRelease.sh +++ b/h2/src/installer/buildRelease.sh @@ -8,7 +8,7 @@ rm -rf bin rm -rf temp ./build.sh -quiet compile -./build.sh -quiet spellcheck javadocImpl jarClient +./build.sh -quiet spellcheck javadocImpl ./build.sh -quiet clean compile installer mavenDeployCentral # ./build.sh -quiet compile benchmark diff --git a/h2/src/installer/client/MANIFEST.MF b/h2/src/installer/client/MANIFEST.MF deleted file mode 100644 index 1a5b868211..0000000000 --- a/h2/src/installer/client/MANIFEST.MF +++ /dev/null @@ -1,39 +0,0 @@ -Manifest-Version: 1.0 -Implementation-Title: H2 Database Client -Implementation-URL: http://www.h2database.com -Implementation-Version: ${version} -Build-Jdk: ${buildJdk} -Created-By: ${createdBy} -Automatic-Module-Name: com.h2database.client -Bundle-ManifestVersion: 2 -Bundle-Name: H2 Database Client -Bundle-SymbolicName: com.h2database.client -Bundle-Vendor: H2 Group -Bundle-Version: ${version} -Bundle-License: http://www.h2database.com/html/license.html -Bundle-Category: jdbc -Multi-Release: true -Import-Package: javax.crypto, - javax.crypto.spec, - javax.naming;resolution:=optional, - javax.naming.spi;resolution:=optional, - javax.net, - javax.net.ssl, - javax.sql, - javax.transaction.xa;resolution:=optional, - javax.xml.parsers;resolution:=optional, - javax.xml.stream;resolution:=optional, - javax.xml.transform;resolution:=optional, - javax.xml.transform.dom;resolution:=optional, - javax.xml.transform.sax;resolution:=optional, - javax.xml.transform.stax;resolution:=optional, - javax.xml.transform.stream;resolution:=optional, - org.w3c.dom;resolution:=optional, - org.xml.sax;resolution:=optional, - org.locationtech.jts.geom;version="1.15.0";resolution:=optional, - org.locationtech.jts.io;version="1.15.0";resolution:=optional -Export-Package: org.h2;version="${version}", - org.h2.api;version="${version}", - org.h2.jdbc;version="${version}", - org.h2.jdbcx;version="${version}", - org.h2.tools;version="${version}" diff --git a/h2/src/installer/h2.nsi b/h2/src/installer/h2.nsi index d1fa6c380e..ffaf509fd9 100644 --- a/h2/src/installer/h2.nsi +++ b/h2/src/installer/h2.nsi @@ -1,3 +1,4 @@ + Unicode True !include "MUI.nsh" SetCompressor /SOLID lzma diff --git a/h2/src/installer/h2.sh b/h2/src/installer/h2.sh old mode 100644 new mode 100755 diff --git a/h2/src/installer/mvstore/MANIFEST.MF b/h2/src/installer/mvstore/MANIFEST.MF index 8bd928821d..a470ceb294 100644 --- a/h2/src/installer/mvstore/MANIFEST.MF +++ b/h2/src/installer/mvstore/MANIFEST.MF @@ -1,18 +1,18 @@ Manifest-Version: 1.0 Implementation-Title: H2 MVStore -Implementation-URL: http://www.h2database.com +Implementation-URL: https://h2database.com Implementation-Version: ${version} Build-Jdk: ${buildJdk} Created-By: ${createdBy} Automatic-Module-Name: com.h2database.mvstore Bundle-Description: The MVStore is a persistent, log structured key-value store. -Bundle-DocURL: http://h2database.com/html/mvstore.html +Bundle-DocURL: https://h2database.com/html/mvstore.html Bundle-ManifestVersion: 2 Bundle-Name: H2 MVStore Bundle-SymbolicName: com.h2database.mvstore Bundle-Vendor: H2 Group Bundle-Version: ${version} -Bundle-License: http://www.h2database.com/html/license.html +Bundle-License: https://h2database.com/html/license.html Bundle-Category: utility Multi-Release: true Import-Package: javax.crypto, diff --git a/h2/src/installer/pom-mvstore-template.xml b/h2/src/installer/pom-mvstore-template.xml index f467e90074..2a2b2cede1 100644 --- a/h2/src/installer/pom-mvstore-template.xml +++ b/h2/src/installer/pom-mvstore-template.xml @@ -5,12 +5,17 @@ @version@ jar H2 MVStore - http://www.h2database.com/html/mvstore.html + https://h2database.com/html/mvstore.html H2 MVStore - MPL 2.0 or EPL 1.0 - http://h2database.com/html/license.html + MPL 2.0 + https://www.mozilla.org/en-US/MPL/2.0/ + repo + + + EPL 1.0 + https://opensource.org/licenses/eclipse-1.0.php repo diff --git a/h2/src/installer/pom-template.xml b/h2/src/installer/pom-template.xml index d9902ddc16..132a1a8f91 100644 --- a/h2/src/installer/pom-template.xml +++ b/h2/src/installer/pom-template.xml @@ -5,12 +5,17 @@ @version@ jar H2 Database Engine - http://www.h2database.com + https://h2database.com H2 Database Engine - MPL 2.0 or EPL 1.0 - http://h2database.com/html/license.html + MPL 2.0 + https://www.mozilla.org/en-US/MPL/2.0/ + repo + + + EPL 1.0 + https://opensource.org/licenses/eclipse-1.0.php repo diff --git a/h2/src/installer/release.txt b/h2/src/installer/release.txt index 441d7ae290..54bc01212d 100644 --- a/h2/src/installer/release.txt +++ b/h2/src/installer/release.txt @@ -1,9 +1,5 @@ # Checklist for a release -## Switch to Java 1.7 - - . setjava.sh 1.7 - ## Formatting, Spellchecking, Javadocs git pull @@ -15,32 +11,11 @@ Fix typos, add new words to dictionary.txt: Add documentation for all public methods. Make methods private if possible: - ./build.sh javadocImpl + ./build.sh clean compile javadocImpl Ensure lines are not overly long: - ./build.sh docs - -## JDBC Client Jar File Size Verification - -The JDBC client is supposed to not have dependencies to the database engine. -To verify, run - - ./build.sh clean jarClient - -If this fails with eg. "Expected file size 400 - 600 KB, got: 1687", then -find out where the dependency is, and resolve. As follows: -start by renaming Database to Database2: - - mv src/main/org/h2/engine/Database.java src/main/org/h2/engine/Database2.java - ./build.sh clean jarClient - -This will fail, the first error is for example can not compile Session because Database was not found. -So rename Session to Session2 and try again. -This will fail again, the first error is different, now for example can not compile ResultInterface -because Session was not found. Now, ResultInterface should not depend on the Session. -So this needs to be fixed (the JDBC API shouldn't indirectly depend on it). -After everything is resolved, rename the classes back. + ./build.sh clean compile docs ## MVStore Jar File Size Verification @@ -53,17 +28,17 @@ The file size should be about 300 KB: ## Changing Version Numbers Update org.h2.engine.Constants.java: - if the last build was stable (the normal case): - set BUILD_DATE_STABLE to current BUILD_DATE - set BUILD_ID_STABLE to current BUILD_ID change the version and build number: set BUILD_DATE to today - increment BUILD_ID + increment BUILD_ID, the value must be even (for example, 202) + set VERSION_MAJOR / VERSION_MINOR to the new version number if the last TCP_PROTOCOL_VERSION_## doesn't have a release date set it to current BUILD_DATE + check and update if necessary links to the latest releases in previous + series of releases and their checksums in download.html -Update h2/pom.xml. - set ...-SNAPSHOT to the next version +Update README.md. + set version to the new version Update changelog.html: * create a new "Next Version (unreleased)" with an empty list @@ -72,9 +47,12 @@ Update changelog.html: Update newsfeed.sql: * add new version, for example: - * (146, '1.4.197', '2017-06-10'), + * (150, '1.4.200', '2019-10-14'), * remove oldest entry in that list +Update download-archive.html: + * add new version under Distribution section + ## Skipped * Minor version change: change sourceError.html and source.html @@ -86,8 +64,7 @@ The following can be skipped currently; benchmarks should probably be removed: ## Build the Release -Switch to Java 1.7. -In Build.java, comment "-Xdoclint:none", but don't commit that change. +In Build.java, comment "-Xdoclint:...", but don't commit that change. Run the following commands: Non-Windows: @@ -116,13 +93,12 @@ Github: create a release. Newsletter: send (always to BCC!), the following: - h2-database-jp@googlegroups.com; h2-database@googlegroups.com; h2database-news@googlegroups.com; ... + h2-database@googlegroups.com; h2database-news@googlegroups.com; ... Create tweet at http://twitter.com ## Sign files and publish files on Maven Central -Switch to Java 1.7. In Build.java, comment "-Xdoclint:none", but don't commit that change. ./build.sh clean compile jar mavenDeployCentral @@ -153,4 +129,10 @@ In Build.java, comment "-Xdoclint:none", but don't commit that change. Update statistics. -Change version in pom.xml, commit. +Change version in pom.xml, commit, add version-*.*.*** tag. + +Update org.h2.engine.Constants.java: + increment BUILD_ID again, the value must be odd (for example, 203) +Update h2/pom.xml. + set ...-SNAPSHOT to the next version (with this odd third number) +Commit. diff --git a/h2/src/installer/small/MANIFEST.MF b/h2/src/installer/small/MANIFEST.MF deleted file mode 100644 index b83fb48ea7..0000000000 --- a/h2/src/installer/small/MANIFEST.MF +++ /dev/null @@ -1,48 +0,0 @@ -Manifest-Version: 1.0 -Implementation-Title: H2 Database embedded Engine -Implementation-URL: http://www.h2database.com -Implementation-Version: ${version} -Build-Jdk: ${buildJdk} -Created-By: ${createdBy} -Automatic-Module-Name: com.h2database.small -Bundle-Activator: org.h2.util.DbDriverActivator -Bundle-ManifestVersion: 2 -Bundle-Name: H2 Database embedded Engine -Bundle-SymbolicName: com.h2database.small -Bundle-Vendor: H2 Group -Bundle-Version: ${version} -Bundle-License: http://www.h2database.com/html/license.html -Bundle-Category: jdbc -Multi-Release: true -Import-Package: javax.crypto, - javax.crypto.spec, - javax.management, - javax.naming;resolution:=optional, - javax.naming.directory;resolution:=optional, - javax.net, - javax.net.ssl, - javax.script;resolution:=optional, - javax.security.auth.callback;resolution:=optional, - javax.security.auth.login;resolution:=optional, - javax.sql, - javax.tools;resolution:=optional, - javax.xml.parsers;resolution:=optional, - javax.xml.stream;resolution:=optional, - javax.xml.transform;resolution:=optional, - javax.xml.transform.dom;resolution:=optional, - javax.xml.transform.sax;resolution:=optional, - javax.xml.transform.stax;resolution:=optional, - javax.xml.transform.stream;resolution:=optional, - org.w3c.dom;resolution:=optional, - org.xml.sax;resolution:=optional, - org.xml.sax.helpers;resolution:=optional, - org.locationtech.jts.geom;version="1.15.0";resolution:=optional, - org.locationtech.jts.io;version="1.15.0";resolution:=optional, - org.osgi.framework;version="1.5", - org.osgi.service.jdbc;version="1.0";resolution:=optional, - org.slf4j;version="[1.6.0,1.7.0)";resolution:=optional -Export-Package: org.h2;version="${version}", - org.h2.api;version="${version}", - org.h2.jdbc;version="${version}", - org.h2.tools;version="${version}" -Provide-Capability: osgi.service;objectClass:List=org.osgi.service.jdbc.DataSourceFactory diff --git a/h2/src/installer/source-manifest.mf b/h2/src/installer/source-manifest.mf index 63022f8fe7..bb3c215b5a 100644 --- a/h2/src/installer/source-manifest.mf +++ b/h2/src/installer/source-manifest.mf @@ -1,7 +1,7 @@ Manifest-Version: 1.0 Bundle-ManifestVersion: 2 Bundle-Name: H2 Database Engine Sources -Bundle-SymbolicName: org.h2.source +Bundle-SymbolicName: com.h2database.source Bundle-Vendor: H2 Group Bundle-Version: ${version} -Eclipse-SourceBundle: org.h2;version="${version}" \ No newline at end of file +Eclipse-SourceBundle: com.h2database;version="${version}" diff --git a/h2/src/installer/source-mvstore-manifest.mf b/h2/src/installer/source-mvstore-manifest.mf new file mode 100644 index 0000000000..48c80436f9 --- /dev/null +++ b/h2/src/installer/source-mvstore-manifest.mf @@ -0,0 +1,7 @@ +Manifest-Version: 1.0 +Bundle-ManifestVersion: 2 +Bundle-Name: H2 MVStore Sources +Bundle-SymbolicName: com.h2database.mvstore.source +Bundle-Vendor: H2 Group +Bundle-Version: ${version} +Eclipse-SourceBundle: com.h2database.mvstore;version="${version}" diff --git a/h2/src/java10/precompiled/org/h2/util/Utils10.class b/h2/src/java10/precompiled/org/h2/util/Utils10.class new file mode 100644 index 0000000000..1ae38e89d7 Binary files /dev/null and b/h2/src/java10/precompiled/org/h2/util/Utils10.class differ diff --git a/h2/src/java10/src/org/h2/util/Utils10.java b/h2/src/java10/src/org/h2/util/Utils10.java new file mode 100644 index 0000000000..2ba397e893 --- /dev/null +++ b/h2/src/java10/src/org/h2/util/Utils10.java @@ -0,0 +1,72 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.net.Socket; +import java.nio.charset.Charset; + +import jdk.net.ExtendedSocketOptions; + +/** + * Utilities with specialized implementations for Java 10 and later versions. + * + * This class contains implementations for Java 10 and later versions. + */ +public final class Utils10 { + + /** + * Converts the buffer's contents into a string by decoding the bytes using + * the specified {@link java.nio.charset.Charset charset}. + * + * @param baos + * the buffer to decode + * @param charset + * the charset to use + * @return the decoded string + */ + public static String byteArrayOutputStreamToString(ByteArrayOutputStream baos, Charset charset) { + return baos.toString(charset); + } + + /** + * Returns the value of TCP_QUICKACK option. + * + * @param socket + * the socket + * @return the current value of TCP_QUICKACK option + * @throws IOException + * on I/O exception + * @throws UnsupportedOperationException + * if TCP_QUICKACK is not supported + */ + public static boolean getTcpQuickack(Socket socket) throws IOException { + return socket.getOption(ExtendedSocketOptions.TCP_QUICKACK); + } + + /** + * Sets the value of TCP_QUICKACK option. + * + * @param socket + * the socket + * @param value + * the value to set + * @return whether operation was successful + */ + public static boolean setTcpQuickack(Socket socket, boolean value) { + try { + socket.setOption(ExtendedSocketOptions.TCP_QUICKACK, value); + return true; + } catch (Throwable t) { + return false; + } + } + + private Utils10() { + } + +} diff --git a/h2/src/java10/src/org/h2/util/package.html b/h2/src/java10/src/org/h2/util/package.html new file mode 100644 index 0000000000..5860dd0957 --- /dev/null +++ b/h2/src/java10/src/org/h2/util/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

          + +Internal utility classes reimplemented for Java 10 and later versions. + +

          \ No newline at end of file diff --git a/h2/src/java9/precompiled/org/h2/util/CurrentTimestamp.class b/h2/src/java9/precompiled/org/h2/util/CurrentTimestamp.class deleted file mode 100644 index f86c0b4f74..0000000000 Binary files a/h2/src/java9/precompiled/org/h2/util/CurrentTimestamp.class and /dev/null differ diff --git a/h2/src/java9/src/org/h2/util/Bits.java b/h2/src/java9/src/org/h2/util/Bits.java index d422a25cb0..fc323a8abf 100644 --- a/h2/src/java9/src/org/h2/util/Bits.java +++ b/h2/src/java9/src/org/h2/util/Bits.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; diff --git a/h2/src/java9/src/org/h2/util/CurrentTimestamp.java b/h2/src/java9/src/org/h2/util/CurrentTimestamp.java deleted file mode 100644 index 7241a2c3a1..0000000000 --- a/h2/src/java9/src/org/h2/util/CurrentTimestamp.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.util; - -import java.time.Instant; - -import org.h2.value.ValueTimestampTimeZone; - -public final class CurrentTimestamp { - - /** - * Returns current timestamp. - * - * @return current timestamp - */ - public static ValueTimestampTimeZone get() { - Instant now = Instant.now(); - long second = now.getEpochSecond(); - int nano = now.getNano(); - /* - * This code intentionally does not support properly dates before UNIX - * epoch and time zone offsets with seconds because such support is not - * required for current dates. - */ - int offsetSec = DateTimeUtils.getTimeZoneOffset(second * 1_000 + nano / 1_000_000) / 1000; - second += offsetSec; - return ValueTimestampTimeZone.fromDateValueAndNanos( - DateTimeUtils.dateValueFromAbsoluteDay(second / DateTimeUtils.SECONDS_PER_DAY), - second % DateTimeUtils.SECONDS_PER_DAY * 1_000_000_000 + nano, (short) (offsetSec / 60)); - } - - private CurrentTimestamp() { - } - -} diff --git a/h2/src/java9/src/org/h2/util/package.html b/h2/src/java9/src/org/h2/util/package.html index 6514a84e97..9ef3d9ca4e 100644 --- a/h2/src/java9/src/org/h2/util/package.html +++ b/h2/src/java9/src/org/h2/util/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/META-INF/MANIFEST.MF b/h2/src/main/META-INF/MANIFEST.MF index 62136bc25f..c4a0ae3b15 100644 --- a/h2/src/main/META-INF/MANIFEST.MF +++ b/h2/src/main/META-INF/MANIFEST.MF @@ -1,6 +1,6 @@ Manifest-Version: 1.0 Implementation-Title: H2 Database Engine -Implementation-URL: http://www.h2database.com +Implementation-URL: https://h2database.com Implementation-Version: ${version} Build-Jdk: ${buildJdk} Created-By: ${createdBy} @@ -12,7 +12,7 @@ Bundle-Name: H2 Database Engine Bundle-SymbolicName: com.h2database Bundle-Vendor: H2 Group Bundle-Version: ${version} -Bundle-License: http://www.h2database.com/html/license.html +Bundle-License: https://h2database.com/html/license.html Bundle-Category: jdbc Multi-Release: true Import-Package: javax.crypto, @@ -28,6 +28,8 @@ Import-Package: javax.crypto, javax.security.auth.login;resolution:=optional, javax.servlet;resolution:=optional, javax.servlet.http;resolution:=optional, + jakarta.servlet;resolution:=optional, + jakarta.servlet.http;resolution:=optional, javax.sql, javax.tools;resolution:=optional, javax.transaction.xa;resolution:=optional, @@ -41,19 +43,18 @@ Import-Package: javax.crypto, org.w3c.dom;resolution:=optional, org.xml.sax;resolution:=optional, org.xml.sax.helpers;resolution:=optional, - org.apache.lucene.analysis;version="[5.5.5,8.0.0)";resolution:=optional, - org.apache.lucene.analysis.standard;version="[5.5.5,8.0.0)";resolution:=optional, - org.apache.lucene.document;version="[5.5.5,8.0.0)";resolution:=optional, - org.apache.lucene.index;version="[5.5.5,8.0.0)";resolution:=optional, - org.apache.lucene.queryparser;version="[5.5.5,8.0.0)";resolution:=optional, - org.apache.lucene.search;version="[5.5.5,8.0.0)";resolution:=optional, - org.apache.lucene.store;version="[5.5.5,8.0.0)";resolution:=optional, - org.apache.lucene.util;version="[5.5.5,8.0.0)";resolution:=optional, - org.locationtech.jts.geom;version="1.15.0";resolution:=optional, - org.locationtech.jts.io;version="1.15.0";resolution:=optional, + org.apache.lucene.analysis;version="[8.5.2,9.0.0)";resolution:=optional, + org.apache.lucene.analysis.standard;version="[8.5.2,9.0.0)";resolution:=optional, + org.apache.lucene.document;version="[8.5.2,9.0.0)";resolution:=optional, + org.apache.lucene.index;version="[8.5.2,9.0.0)";resolution:=optional, + org.apache.lucene.queryparser;version="[8.5.2,9.0.0)";resolution:=optional, + org.apache.lucene.search;version="[8.5.2,9.0.0)";resolution:=optional, + org.apache.lucene.store;version="[8.5.2,9.0.0)";resolution:=optional, + org.apache.lucene.util;version="[8.5.2,9.0.0)";resolution:=optional, + org.locationtech.jts.geom;version="1.17.0";resolution:=optional, org.osgi.framework;version="1.5", org.osgi.service.jdbc;version="1.0";resolution:=optional, - org.slf4j;version="[1.6.0,1.7.0)";resolution:=optional + org.slf4j;version="[1.7.0,1.8.0)";resolution:=optional Export-Package: org.h2;version="${version}", org.h2.api;version="${version}", org.h2.constant;version="${version}", diff --git a/h2/src/main/org/h2/Driver.java b/h2/src/main/org/h2/Driver.java index 71b92d1624..a0660fc5fd 100644 --- a/h2/src/main/org/h2/Driver.java +++ b/h2/src/main/org/h2/Driver.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2; @@ -11,10 +11,10 @@ import java.sql.SQLException; import java.util.Properties; import java.util.logging.Logger; +import org.h2.api.ErrorCode; import org.h2.engine.Constants; import org.h2.jdbc.JdbcConnection; import org.h2.message.DbException; -import org.h2.upgrade.DbUpgrade; /** * The database driver. An application should not use this class directly. The @@ -49,26 +49,18 @@ public class Driver implements java.sql.Driver, JdbcDriverBackwardsCompat { * @param url the database URL * @param info the connection properties * @return the new connection or null if the URL is not supported + * @throws SQLException on connection exception or if URL is {@code null} */ @Override public Connection connect(String url, Properties info) throws SQLException { - try { - if (info == null) { - info = new Properties(); - } - if (!acceptsURL(url)) { - return null; - } - if (url.equals(DEFAULT_URL)) { - return DEFAULT_CONNECTION.get(); - } - Connection c = DbUpgrade.connectOrUpgrade(url, info); - if (c != null) { - return c; - } - return new JdbcConnection(url, info); - } catch (Exception e) { - throw DbException.toSQLException(e); + if (url == null) { + throw DbException.getJdbcSQLException(ErrorCode.URL_FORMAT_ERROR_2, null, Constants.URL_FORMAT, null); + } else if (url.startsWith(Constants.START_URL)) { + return new JdbcConnection(url, info, null, null, false); + } else if (url.equals(DEFAULT_URL)) { + return DEFAULT_CONNECTION.get(); + } else { + return null; } } @@ -78,17 +70,19 @@ public Connection connect(String url, Properties info) throws SQLException { * * @param url the database URL * @return if the driver understands the URL + * @throws SQLException if URL is {@code null} */ @Override - public boolean acceptsURL(String url) { - if (url != null) { - if (url.startsWith(Constants.START_URL)) { - return true; - } else if (url.equals(DEFAULT_URL)) { - return DEFAULT_CONNECTION.get() != null; - } + public boolean acceptsURL(String url) throws SQLException { + if (url == null) { + throw DbException.getJdbcSQLException(ErrorCode.URL_FORMAT_ERROR_2, null, Constants.URL_FORMAT, null); + } else if (url.startsWith(Constants.START_URL)) { + return true; + } else if (url.equals(DEFAULT_URL)) { + return DEFAULT_CONNECTION.get() != null; + } else { + return false; } - return false; } /** @@ -147,6 +141,7 @@ public Logger getParentLogger() { /** * INTERNAL + * @return instance of the driver registered with the DriverManager */ public static synchronized Driver load() { try { @@ -178,6 +173,7 @@ public static synchronized void unload() { * INTERNAL * Sets, on a per-thread basis, the default-connection for * user-defined functions. + * @param c to set default to */ public static void setDefaultConnection(Connection c) { if (c == null) { @@ -189,6 +185,7 @@ public static void setDefaultConnection(Connection c) { /** * INTERNAL + * @param thread to set context class loader for */ public static void setThreadContextClassLoader(Thread thread) { // Apache Tomcat: use the classloader of the driver to avoid the diff --git a/h2/src/main/org/h2/JdbcDriverBackwardsCompat.java b/h2/src/main/org/h2/JdbcDriverBackwardsCompat.java index d7284fd321..4d033fd00c 100644 --- a/h2/src/main/org/h2/JdbcDriverBackwardsCompat.java +++ b/h2/src/main/org/h2/JdbcDriverBackwardsCompat.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2; diff --git a/h2/src/main/org/h2/api/Aggregate.java b/h2/src/main/org/h2/api/Aggregate.java index 419d6a2b4b..6169d0cec4 100644 --- a/h2/src/main/org/h2/api/Aggregate.java +++ b/h2/src/main/org/h2/api/Aggregate.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.api; @@ -19,8 +19,11 @@ public interface Aggregate { * A new object is created for each invocation. * * @param conn a connection to the database + * @throws SQLException on SQL exception */ - void init(Connection conn) throws SQLException; + default void init(Connection conn) throws SQLException { + // Do nothing by default + } /** * This method must return the H2 data type, {@link org.h2.value.Value}, @@ -40,6 +43,7 @@ public interface Aggregate { * those are passed as array. * * @param value the value(s) for this row + * @throws SQLException on failure */ void add(Object value) throws SQLException; @@ -49,6 +53,7 @@ public interface Aggregate { * more values were added since its previous invocation. * * @return the aggregated value + * @throws SQLException on failure */ Object getResult() throws SQLException; diff --git a/h2/src/main/org/h2/api/AggregateFunction.java b/h2/src/main/org/h2/api/AggregateFunction.java index 509303c0e5..916853edcd 100644 --- a/h2/src/main/org/h2/api/AggregateFunction.java +++ b/h2/src/main/org/h2/api/AggregateFunction.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.api; @@ -24,8 +24,11 @@ public interface AggregateFunction { * A new object is created for each invocation. * * @param conn a connection to the database + * @throws SQLException on SQL exception */ - void init(Connection conn) throws SQLException; + default void init(Connection conn) throws SQLException { + // Do nothing by default + } /** * This method must return the SQL type of the method, given the SQL type of @@ -34,6 +37,7 @@ public interface AggregateFunction { * * @param inputTypes the SQL type of the parameters, {@link java.sql.Types} * @return the SQL type of the result + * @throws SQLException on failure */ int getType(int[] inputTypes) throws SQLException; @@ -43,6 +47,7 @@ public interface AggregateFunction { * those are passed as array. * * @param value the value(s) for this row + * @throws SQLException on failure */ void add(Object value) throws SQLException; @@ -52,6 +57,7 @@ public interface AggregateFunction { * more values were added since its previous invocation. * * @return the aggregated value + * @throws SQLException on failure */ Object getResult() throws SQLException; diff --git a/h2/src/main/org/h2/api/CredentialsValidator.java b/h2/src/main/org/h2/api/CredentialsValidator.java index 2752d624d3..79dae86059 100644 --- a/h2/src/main/org/h2/api/CredentialsValidator.java +++ b/h2/src/main/org/h2/api/CredentialsValidator.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.api; diff --git a/h2/src/main/org/h2/api/CustomDataTypesHandler.java b/h2/src/main/org/h2/api/CustomDataTypesHandler.java deleted file mode 100644 index c3fd3c8d69..0000000000 --- a/h2/src/main/org/h2/api/CustomDataTypesHandler.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.api; - -import org.h2.store.DataHandler; -import org.h2.value.DataType; -import org.h2.value.ExtTypeInfo; -import org.h2.value.TypeInfo; -import org.h2.value.Value; - -/** - * Custom data type handler - * Provides means to plug-in custom data types support - * - * Please keep in mind that this feature may not possibly - * provide the same ABI stability level as other features - * as it exposes many of the H2 internals. You may be - * required to update your code occasionally due to internal - * changes in H2 if you are going to use this feature - */ -public interface CustomDataTypesHandler { - /** - * Get custom data type given its name - * - * @param name data type name - * @return custom data type - */ - DataType getDataTypeByName(String name); - - /** - * Get custom data type given its integer id - * - * @param type identifier of a data type - * @return custom data type - */ - DataType getDataTypeById(int type); - - /** - * Get type info for the given data type identity. - * - * @param type identifier of a data type - * @param precision precision - * @param scale scale - * @param extTypeInfo the extended type information, or null - * @return type information - */ - TypeInfo getTypeInfoById(int type, long precision, int scale, ExtTypeInfo extTypeInfo); - - /** - * Get order for custom data type given its integer id - * - * @param type identifier of a data type - * @return order associated with custom data type - */ - int getDataTypeOrder(int type); - - /** - * Convert the provided source value into value of given target data type - * Shall implement conversions to and from custom data types. - * - * @param source source value - * @param targetType identifier of target data type - * @return converted value - */ - Value convert(Value source, int targetType); - - /** - * Get custom data type class name given its integer id - * - * @param type identifier of a data type - * @return class name - */ - String getDataTypeClassName(int type); - - /** - * Get custom data type identifier given corresponding Java class - * @param cls Java class object - * @return type identifier - */ - int getTypeIdFromClass(Class cls); - - /** - * Get {@link org.h2.value.Value} object - * corresponding to given data type identifier and data. - * - * @param type custom data type identifier - * @param data underlying data type value - * @param dataHandler data handler object - * @return Value object - */ - Value getValue(int type, Object data, DataHandler dataHandler); - - /** - * Converts {@link org.h2.value.Value} object - * to the specified class. - * - * @param value the value to convert - * @param cls the target class - * @return result - */ - Object getObject(Value value, Class cls); - - /** - * Checks if type supports add operation - * - * @param type custom data type identifier - * @return True, if custom data type supports add operation - */ - boolean supportsAdd(int type); - - /** - * Get compatible type identifier that would not overflow - * after many add operations. - * - * @param type identifier of a type - * @return resulting type identifier - */ - int getAddProofType(int type); -} diff --git a/h2/src/main/org/h2/api/DatabaseEventListener.java b/h2/src/main/org/h2/api/DatabaseEventListener.java index 2651b9e35e..67f3c8eb9e 100644 --- a/h2/src/main/org/h2/api/DatabaseEventListener.java +++ b/h2/src/main/org/h2/api/DatabaseEventListener.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.api; @@ -12,7 +12,7 @@ * A class that implements this interface can get notified about exceptions * and other events. A database event listener can be registered when * connecting to a database. Example database URL: - * jdbc:h2:test;DATABASE_EVENT_LISTENER='com.acme.DbListener' + * jdbc:h2:./test;DATABASE_EVENT_LISTENER='com.acme.DbListener' */ public interface DatabaseEventListener extends EventListener { @@ -66,13 +66,15 @@ public interface DatabaseEventListener extends EventListener { * * @param url - the database URL */ - void init(String url); + default void init(String url) { + } /** - * This method is called after the database has been opened. It is save to + * This method is called after the database has been opened. It is safe to * connect to the database and execute statements at this point. */ - void opened(); + default void opened() { + } /** * This method is called if an exception occurred. @@ -80,7 +82,8 @@ public interface DatabaseEventListener extends EventListener { * @param e the exception * @param sql the SQL statement */ - void exceptionThrown(SQLException e, String sql); + default void exceptionThrown(SQLException e, String sql) { + } /** * This method is called for long running events, such as recovering, @@ -93,15 +96,17 @@ public interface DatabaseEventListener extends EventListener { * @param state the state * @param name the object name * @param x the current position - * @param max the highest possible value (might be 0) + * @param max the highest possible value or 0 if unknown */ - void setProgress(int state, String name, int x, int max); + default void setProgress(int state, String name, long x, long max) { + } /** - * This method is called before the database is closed normally. It is save + * This method is called before the database is closed normally. It is safe * to connect to the database and execute statements at this point, however * the connection must be closed before the method returns. */ - void closingDatabase(); + default void closingDatabase() { + } } diff --git a/h2/src/main/org/h2/api/ErrorCode.java b/h2/src/main/org/h2/api/ErrorCode.java index 66800aa487..bb74ebef80 100644 --- a/h2/src/main/org/h2/api/ErrorCode.java +++ b/h2/src/main/org/h2/api/ErrorCode.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.api; @@ -212,6 +212,17 @@ public class ErrorCode { */ public static final int ENUM_DUPLICATE = 22033; + /** + * The error with code 22034 is thrown when an + * attempt is made to read non-existing element of an array. + * + * Example: + *
          +     * VALUES ARRAY[1, 2][3]
          +     * 
          + */ + public static final int ARRAY_ELEMENT_ERROR_2 = 22034; + // 23: constraint violation /** @@ -281,7 +292,7 @@ public class ErrorCode { * The error with code 23513 is thrown when * a check constraint is violated. Example: *
          -     * CREATE TABLE TEST(ID INT CHECK ID>0);
          +     * CREATE TABLE TEST(ID INT CHECK (ID>0));
                * INSERT INTO TEST VALUES(0);
                * 
          */ @@ -289,7 +300,7 @@ public class ErrorCode { /** * The error with code 23514 is thrown when - * evaluation of a check constraint resulted in a error. + * evaluation of a check constraint resulted in an error. */ public static final int CHECK_CONSTRAINT_INVALID = 23514; @@ -317,7 +328,7 @@ public class ErrorCode { * sessions are also possible. To solve deadlock problems, an application * should lock tables always in the same order, such as always lock table A * before locking table B. For details, see Wikipedia Deadlock. + * href="https://en.wikipedia.org/wiki/Deadlock">Wikipedia Deadlock. */ public static final int DEADLOCK_1 = 40001; @@ -367,6 +378,30 @@ public class ErrorCode { */ public static final int TABLE_OR_VIEW_NOT_FOUND_1 = 42102; + /** + * The error with code 42103 is thrown when + * trying to query, modify or drop a table or view that does not exists + * in this schema and database but similar names were found. A common cause + * is that the names are written in different case. + * Example: + *
          +     * SELECT * FROM ABC;
          +     * 
          + */ + public static final int TABLE_OR_VIEW_NOT_FOUND_WITH_CANDIDATES_2 = 42103; + + /** + * The error with code 42104 is thrown when + * trying to query, modify or drop a table or view that does not exists + * in this schema and database but it is empty anyway. A common cause is + * that the wrong database was opened. + * Example: + *
          +     * SELECT * FROM ABC;
          +     * 
          + */ + public static final int TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1 = 42104; + /** * The error with code 42111 is thrown when * trying to create an index if an index with the same name already exists. @@ -422,6 +457,41 @@ public class ErrorCode { */ public static final int IDENTICAL_EXPRESSIONS_SHOULD_BE_USED = 42131; + /** + * The error with code 42602 is thrown when + * invalid name of identifier is used. + * Example: + *
          +     * statement.enquoteIdentifier("\"", true);
          +     * 
          + */ + public static final int INVALID_NAME_1 = 42602; + + /** + * The error with code 42622 is thrown when + * name of identifier is too long. + * Example: + *
          +     * char[] c = new char[1000];
          +     * Arrays.fill(c, 'A');
          +     * statement.executeQuery("SELECT 1 " + new String(c));
          +     * 
          + */ + public static final int NAME_TOO_LONG_2 = 42622; + + // 54: program limit exceeded + + /** + * The error with code 54011 is thrown when + * too many columns were specified in a table, select statement, + * or row value. + * Example: + *
          +     * CREATE TABLE TEST(C1 INTEGER, C2 INTEGER, ..., C20000 INTEGER);
          +     * 
          + */ + public static final int TOO_MANY_COLUMNS_1 = 54011; + // 0A: feature not supported // HZ: remote database access @@ -539,10 +609,9 @@ public class ErrorCode { /** * The error with code 90005 is thrown when - * trying to create a trigger and using the combination of SELECT - * and FOR EACH ROW, which we do not support. + * trying to create a trigger with invalid combination of flags. */ - public static final int TRIGGER_SELECT_AND_ROW_BASED_NOT_SUPPORTED = 90005; + public static final int INVALID_TRIGGER_FLAGS_1 = 90005; /** * The error with code 90006 is thrown when @@ -572,7 +641,7 @@ public class ErrorCode { * trying to create a sequence with an invalid combination * of attributes (min value, max value, start value, etc). */ - public static final int SEQUENCE_ATTRIBUTES_INVALID = 90009; + public static final int SEQUENCE_ATTRIBUTES_INVALID_7 = 90009; /** * The error with code 90010 is thrown when @@ -735,13 +804,22 @@ public class ErrorCode { public static final int FUNCTION_NOT_FOUND_1 = 90022; /** - * The error with code 90023 is thrown when - * trying to set a primary key on a nullable column. - * Example: + * The error with code 90023 is thrown when trying to set a + * primary key on a nullable column or when trying to drop NOT NULL + * constraint on primary key or identity column. + * Examples: *
                * CREATE TABLE TEST(ID INT, NAME VARCHAR);
                * ALTER TABLE TEST ADD CONSTRAINT PK PRIMARY KEY(ID);
                * 
          + *
          +     * CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR);
          +     * ALTER TABLE TEST ALTER COLUMN ID DROP NOT NULL;
          +     * 
          + *
          +     * CREATE TABLE TEST(ID INT GENERATED ALWAYS AS IDENTITY, NAME VARCHAR);
          +     * ALTER TABLE TEST ALTER COLUMN ID DROP NOT NULL;
          +     * 
          */ public static final int COLUMN_MUST_NOT_BE_NULLABLE_1 = 90023; @@ -1000,30 +1078,15 @@ public class ErrorCode { */ public static final int WRONG_PASSWORD_FORMAT = 90050; - /** - * The error with code 90051 is thrown when - * trying to use a scale that is > precision. - * Example: - *
          -     * CREATE TABLE TABLE1 ( FAIL NUMBER(6,24) );
          -     * 
          - */ - public static final int INVALID_VALUE_SCALE_PRECISION = 90051; + // 90051 was removed /** - * The error with code 90052 is thrown when - * a subquery that is used as a value contains more than one column. - * Example of wrong usage: - *
          -     * CREATE TABLE TEST(ID INT);
          -     * INSERT INTO TEST VALUES(1), (2);
          -     * SELECT * FROM TEST WHERE ID IN (SELECT 1, 2 FROM DUAL);
          -     * 
          - * Correct: + * The error with code 90052 is thrown when a single-column + * subquery is expected but a subquery with other number of columns was + * specified. + * Example: *
          -     * CREATE TABLE TEST(ID INT);
          -     * INSERT INTO TEST VALUES(1), (2);
          -     * SELECT * FROM TEST WHERE ID IN (1, 2);
          +     * VALUES ARRAY(SELECT A, B FROM TEST)
                * 
          */ public static final int SUBQUERY_IS_NOT_SINGLE_COLUMN = 90052; @@ -1394,11 +1457,14 @@ public class ErrorCode { /** * The error with code 90085 is thrown when * trying to manually drop an index that was generated by the system - * because of a unique or referential constraint. To find out what - * constraint causes the problem, run: + * because of a unique or referential constraint. To find + * the owner of the index without attempt to drop it run *
          -     * SELECT * FROM INFORMATION_SCHEMA.CONSTRAINTS
          -     * WHERE UNIQUE_INDEX_NAME = '<index name>';
          +     * SELECT CONSTRAINT_SCHEMA, CONSTRAINT_NAME
          +     * FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE
          +     * WHERE INDEX_SCHEMA = '<index schema>'
          +     * AND INDEX_NAME = '<index name>'
          +     * FETCH FIRST ROW ONLY
                * 
          * Example of wrong usage: *
          @@ -1648,14 +1714,14 @@ public class ErrorCode {
           
               /**
                * The error with code 90110 is thrown when
          -     * trying to compare an array value against a non-array value.
          +     * trying to compare values of incomparable data types.
                * Example:
                * 
                * CREATE TABLE test (id INT NOT NULL, name VARCHAR);
                * select * from test where id = (1, 2);
                * 
          */ - public static final int COMPARING_ARRAY_TO_SCALAR = 90110; + public static final int TYPES_ARE_NOT_COMPARABLE_2 = 90110; /** * The error with code 90111 is thrown when @@ -1751,8 +1817,8 @@ public class ErrorCode { * Example: *
                * CREATE DOMAIN INTEGER AS VARCHAR;
          -     * CREATE DOMAIN EMAIL AS VARCHAR CHECK LOCATE('@', VALUE) > 0;
          -     * CREATE DOMAIN EMAIL AS VARCHAR CHECK LOCATE('@', VALUE) > 0;
          +     * CREATE DOMAIN EMAIL AS VARCHAR CHECK LOCATE('@', VALUE) > 0;
          +     * CREATE DOMAIN EMAIL AS VARCHAR CHECK LOCATE('@', VALUE) > 0;
                * 
          */ public static final int DOMAIN_ALREADY_EXISTS_1 = 90119; @@ -1959,7 +2025,7 @@ public class ErrorCode { * The error with code 90137 is thrown when * trying to assign a value to something that is not a variable. *
          -     * SELECT AMOUNT, SET(@V, IFNULL(@V, 0)+AMOUNT) FROM TEST;
          +     * SELECT AMOUNT, SET(@V, COALESCE(@V, 0)+AMOUNT) FROM TEST;
                * 
          */ public static final int CAN_ONLY_ASSIGN_TO_VARIABLE_1 = 90137; @@ -2021,7 +2087,6 @@ public class ErrorCode { /** * The error with code 90143 is thrown when * trying to fetch a row from the primary index and the row is not there. - * Can happen in MULTI_THREADED=1 case. */ public static final int ROW_NOT_FOUND_IN_PRIMARY_INDEX = 90143; @@ -2049,18 +2114,133 @@ public class ErrorCode { */ public static final int FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT = 90145; - /** * The error with code 90146 is thrown when trying to open a + * database that does not exist using the flag IFEXISTS=TRUE + *
          +     * jdbc:h2:./database_that_does_not_exist
          +     * 
          + */ + public static final int DATABASE_NOT_FOUND_WITH_IF_EXISTS_1 = 90146; + + /** + * The error with code 90147 is thrown when trying to execute a + * statement which closes the transaction (such as commit and rollback) and + * autocommit mode is on. + * + * @see org.h2.engine.SysProperties#FORCE_AUTOCOMMIT_OFF_ON_COMMIT + */ + public static final int METHOD_DISABLED_ON_AUTOCOMMIT_TRUE = 90147; + + /** + * The error with code 90148 is thrown when trying to access + * the current value of a sequence before execution of NEXT VALUE FOR + * sequenceName in the current session. Example: + * + *
          +     * SELECT CURRENT VALUE FOR SEQUENCE XYZ;
          +     * 
          + */ + public static final int CURRENT_SEQUENCE_VALUE_IS_NOT_DEFINED_IN_SESSION_1 = 90148; + + /** + * The error with code 90149 is thrown when trying to open a * database that does not exist remotely without enabling remote database - * creation first, or using the flag IFEXISTS=TRUE + * creation first. *
                * jdbc:h2:./database_that_does_not_exist
                * 
          */ - public static final int DATABASE_NOT_FOUND_2 = 90146; + public static final int REMOTE_DATABASE_NOT_FOUND_1 = 90149; + + /** + * The error with code 90150 is thrown when + * trying to use an invalid precision. + * Example: + *
          +     * CREATE TABLE TABLE1 ( FAIL INTERVAL YEAR(20) );
          +     * 
          + */ + public static final int INVALID_VALUE_PRECISION = 90150; - // next is 90147 + /** + * The error with code 90151 is thrown when + * trying to use an invalid scale or fractional seconds precision. + * Example: + *
          +     * CREATE TABLE TABLE1 ( FAIL TIME(10) );
          +     * 
          + */ + public static final int INVALID_VALUE_SCALE = 90151; + + /** + * The error with code 90152 is thrown when trying to manually + * drop a unique or primary key constraint that is referenced by a foreign + * key constraint without a CASCADE clause. + * + *
          +     * CREATE TABLE PARENT(ID INT CONSTRAINT P1 PRIMARY KEY);
          +     * CREATE TABLE CHILD(ID INT CONSTRAINT P2 PRIMARY KEY, CHILD INT CONSTRAINT C REFERENCES PARENT);
          +     * ALTER TABLE PARENT DROP CONSTRAINT P1 RESTRICT;
          +     * 
          + */ + public static final int CONSTRAINT_IS_USED_BY_CONSTRAINT_2 = 90152; + + /** + * The error with code 90153 is thrown when trying to reference + * a column of another data type when data types aren't comparable or don't + * have a session-independent compare order between each other. + * + *
          +     * CREATE TABLE PARENT(T TIMESTAMP UNIQUE);
          +     * CREATE TABLE CHILD(T TIMESTAMP WITH TIME ZONE REFERENCES PARENT(T));
          +     * 
          + */ + public static final int UNCOMPARABLE_REFERENCED_COLUMN_2 = 90153; + + /** + * The error with code 90154 is thrown when trying to assign a + * value to a generated column. + * + *
          +     * CREATE TABLE TEST(A INT, B INT GENERATED ALWAYS AS (A + 1));
          +     * INSERT INTO TEST(A, B) VALUES (1, 1);
          +     * 
          + */ + public static final int GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 = 90154; + + /** + * The error with code 90155 is thrown when trying to create a + * referential constraint that can update a referenced generated column. + * + *
          +     * CREATE TABLE PARENT(ID INT PRIMARY KEY, K INT GENERATED ALWAYS AS (ID) UNIQUE);
          +     * CREATE TABLE CHILD(ID INT PRIMARY KEY, P INT);
          +     * ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON DELETE SET NULL;
          +     * 
          + */ + public static final int GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2 = 90155; + + /** + * The error with code 90156 is thrown when trying to create a + * view or a table from a select and some expression doesn't have a column + * name or alias when it is required by a compatibility mode. + * + *
          +     * SET MODE DB2;
          +     * CREATE TABLE T1(A INT, B INT);
          +     * CREATE TABLE T2 AS (SELECT A + B FROM T1) WITH DATA;
          +     * 
          + */ + public static final int COLUMN_ALIAS_IS_NOT_SPECIFIED_1 = 90156; + + /** + * The error with code 90157 is thrown when the integer + * index that is used in the GROUP BY is not in the SELECT list + */ + public static final int GROUP_BY_NOT_IN_THE_RESULT = 90157; + + // next is 90158 private ErrorCode() { // utility class @@ -2068,6 +2248,8 @@ private ErrorCode() { /** * INTERNAL + * @param errorCode to check + * @return true if provided code is common, false otherwise */ public static boolean isCommon(int errorCode) { // this list is sorted alphabetically @@ -2086,6 +2268,8 @@ public static boolean isCommon(int errorCode) { case SYNTAX_ERROR_2: case TABLE_OR_VIEW_ALREADY_EXISTS_1: case TABLE_OR_VIEW_NOT_FOUND_1: + case TABLE_OR_VIEW_NOT_FOUND_WITH_CANDIDATES_2: + case TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1: case VALUE_TOO_LONG_2: return true; } @@ -2094,6 +2278,8 @@ public static boolean isCommon(int errorCode) { /** * INTERNAL + * @param errorCode to get state for + * @return error state */ public static String getState(int errorCode) { // To convert SQLState to error code, replace @@ -2113,9 +2299,14 @@ public static String getState(int errorCode) { // 21: cardinality violation case COLUMN_COUNT_DOES_NOT_MATCH: return "21S02"; + // 22: data exception + case ARRAY_ELEMENT_ERROR_2: return "2202E"; + // 42: syntax error or access rule violation case TABLE_OR_VIEW_ALREADY_EXISTS_1: return "42S01"; case TABLE_OR_VIEW_NOT_FOUND_1: return "42S02"; + case TABLE_OR_VIEW_NOT_FOUND_WITH_CANDIDATES_2: return "42S03"; + case TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1: return "42S04"; case INDEX_ALREADY_EXISTS_1: return "42S11"; case INDEX_NOT_FOUND_1: return "42S12"; case DUPLICATE_COLUMN_NAME_1: return "42S21"; diff --git a/h2/src/main/org/h2/api/H2Type.java b/h2/src/main/org/h2/api/H2Type.java new file mode 100644 index 0000000000..ecc61311e8 --- /dev/null +++ b/h2/src/main/org/h2/api/H2Type.java @@ -0,0 +1,321 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.api; + +import java.sql.SQLType; + +import org.h2.value.ExtTypeInfoRow; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * Data types of H2. + */ +public final class H2Type implements SQLType { + + // Character strings + + /** + * The CHARACTER data type. + */ + public static final H2Type CHAR = new H2Type(TypeInfo.getTypeInfo(Value.CHAR), "CHARACTER"); + + /** + * The CHARACTER VARYING data type. + */ + public static final H2Type VARCHAR = new H2Type(TypeInfo.TYPE_VARCHAR, "CHARACTER VARYING"); + + /** + * The CHARACTER LARGE OBJECT data type. + */ + public static final H2Type CLOB = new H2Type(TypeInfo.TYPE_CLOB, "CHARACTER LARGE OBJECT"); + + /** + * The VARCHAR_IGNORECASE data type. + */ + public static final H2Type VARCHAR_IGNORECASE = new H2Type(TypeInfo.TYPE_VARCHAR_IGNORECASE, "VARCHAR_IGNORECASE"); + + // Binary strings + + /** + * The BINARY data type. + */ + public static final H2Type BINARY = new H2Type(TypeInfo.getTypeInfo(Value.BINARY), "BINARY"); + + /** + * The BINARY VARYING data type. + */ + public static final H2Type VARBINARY = new H2Type(TypeInfo.TYPE_VARBINARY, "BINARY VARYING"); + + /** + * The BINARY LARGE OBJECT data type. + */ + public static final H2Type BLOB = new H2Type(TypeInfo.TYPE_BLOB, "BINARY LARGE OBJECT"); + + // Boolean + + /** + * The BOOLEAN data type + */ + public static final H2Type BOOLEAN = new H2Type(TypeInfo.TYPE_BOOLEAN, "BOOLEAN"); + + // Exact numeric data types + + /** + * The TINYINT data type. + */ + public static final H2Type TINYINT = new H2Type(TypeInfo.TYPE_TINYINT, "TINYINT"); + + /** + * The SMALLINT data type. + */ + public static final H2Type SMALLINT = new H2Type(TypeInfo.TYPE_SMALLINT, "SMALLINT"); + + /** + * The INTEGER data type. + */ + public static final H2Type INTEGER = new H2Type(TypeInfo.TYPE_INTEGER, "INTEGER"); + + /** + * The BIGINT data type. + */ + public static final H2Type BIGINT = new H2Type(TypeInfo.TYPE_BIGINT, "BIGINT"); + + /** + * The NUMERIC data type. + */ + public static final H2Type NUMERIC = new H2Type(TypeInfo.TYPE_NUMERIC_FLOATING_POINT, "NUMERIC"); + + // Approximate numeric data types + + /** + * The REAL data type. + */ + public static final H2Type REAL = new H2Type(TypeInfo.TYPE_REAL, "REAL"); + + /** + * The DOUBLE PRECISION data type. + */ + public static final H2Type DOUBLE_PRECISION = new H2Type(TypeInfo.TYPE_DOUBLE, "DOUBLE PRECISION"); + + // Decimal floating-point type + + /** + * The DECFLOAT data type. + */ + public static final H2Type DECFLOAT = new H2Type(TypeInfo.TYPE_DECFLOAT, "DECFLOAT"); + + // Date-time data types + + /** + * The DATE data type. + */ + public static final H2Type DATE = new H2Type(TypeInfo.TYPE_DATE, "DATE"); + + /** + * The TIME data type. + */ + public static final H2Type TIME = new H2Type(TypeInfo.TYPE_TIME, "TIME"); + + /** + * The TIME WITH TIME ZONE data type. + */ + public static final H2Type TIME_WITH_TIME_ZONE = new H2Type(TypeInfo.TYPE_TIME_TZ, "TIME WITH TIME ZONE"); + + /** + * The TIMESTAMP data type. + */ + public static final H2Type TIMESTAMP = new H2Type(TypeInfo.TYPE_TIMESTAMP, "TIMESTAMP"); + + /** + * The TIMESTAMP WITH TIME ZONE data type. + */ + public static final H2Type TIMESTAMP_WITH_TIME_ZONE = new H2Type(TypeInfo.TYPE_TIMESTAMP_TZ, + "TIMESTAMP WITH TIME ZONE"); + + // Intervals + + /** + * The INTERVAL YEAR data type. + */ + public static final H2Type INTERVAL_YEAR = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_YEAR), "INTERVAL_YEAR"); + + /** + * The INTERVAL MONTH data type. + */ + public static final H2Type INTERVAL_MONTH = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_MONTH), + "INTERVAL_MONTH"); + + /** + * The INTERVAL DAY data type. + */ + public static final H2Type INTERVAL_DAY = new H2Type(TypeInfo.TYPE_INTERVAL_DAY, "INTERVAL_DAY"); + + /** + * The INTERVAL HOUR data type. + */ + public static final H2Type INTERVAL_HOUR = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_HOUR), "INTERVAL_HOUR"); + + /** + * The INTERVAL MINUTE data type. + */ + public static final H2Type INTERVAL_MINUTE = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_MINUTE), + "INTERVAL_MINUTE"); + + /** + * The INTERVAL SECOND data type. + */ + public static final H2Type INTERVAL_SECOND = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_SECOND), + "INTERVAL_SECOND"); + + /** + * The INTERVAL YEAR TO MONTH data type. + */ + public static final H2Type INTERVAL_YEAR_TO_MONTH = new H2Type(TypeInfo.TYPE_INTERVAL_YEAR_TO_MONTH, + "INTERVAL_YEAR_TO_MONTH"); + + /** + * The INTERVAL DAY TO HOUR data type. + */ + public static final H2Type INTERVAL_DAY_TO_HOUR = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_DAY_TO_HOUR), + "INTERVAL_DAY_TO_HOUR"); + + /** + * The INTERVAL DAY TO MINUTE data type. + */ + public static final H2Type INTERVAL_DAY_TO_MINUTE = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_DAY_TO_MINUTE), + "INTERVAL_DAY_TO_MINUTE"); + + /** + * The INTERVAL DAY TO SECOND data type. + */ + public static final H2Type INTERVAL_DAY_TO_SECOND = new H2Type(TypeInfo.TYPE_INTERVAL_DAY_TO_SECOND, + "INTERVAL_DAY_TO_SECOND"); + + /** + * The INTERVAL HOUR TO MINUTE data type. + */ + public static final H2Type INTERVAL_HOUR_TO_MINUTE = new H2Type( // + TypeInfo.getTypeInfo(Value.INTERVAL_HOUR_TO_MINUTE), "INTERVAL_HOUR_TO_MINUTE"); + + /** + * The INTERVAL HOUR TO SECOND data type. + */ + public static final H2Type INTERVAL_HOUR_TO_SECOND = new H2Type(TypeInfo.TYPE_INTERVAL_HOUR_TO_SECOND, + "INTERVAL_HOUR_TO_SECOND"); + + /** + * The INTERVAL MINUTE TO SECOND data type. + */ + public static final H2Type INTERVAL_MINUTE_TO_SECOND = new H2Type( + TypeInfo.getTypeInfo(Value.INTERVAL_MINUTE_TO_SECOND), "INTERVAL_MINUTE_TO_SECOND"); + + // Other JDBC + + /** + * The JAVA_OBJECT data type. + */ + public static final H2Type JAVA_OBJECT = new H2Type(TypeInfo.TYPE_JAVA_OBJECT, "JAVA_OBJECT"); + + // Other non-standard + + /** + * The ENUM data type. + */ + public static final H2Type ENUM = new H2Type(TypeInfo.TYPE_ENUM_UNDEFINED, "ENUM"); + + /** + * The GEOMETRY data type. + */ + public static final H2Type GEOMETRY = new H2Type(TypeInfo.TYPE_GEOMETRY, "GEOMETRY"); + + /** + * The JSON data type. + */ + public static final H2Type JSON = new H2Type(TypeInfo.TYPE_JSON, "JSON"); + + /** + * The UUID data type. + */ + public static final H2Type UUID = new H2Type(TypeInfo.TYPE_UUID, "UUID"); + + // Collections + + // Use arrayOf() for ARRAY + + // Use row() for ROW + + /** + * Returns ARRAY data type with the specified component type. + * + * @param componentType + * the type of elements + * @return ARRAY data type + */ + public static H2Type array(H2Type componentType) { + return new H2Type(TypeInfo.getTypeInfo(Value.ARRAY, -1L, -1, componentType.typeInfo), + "array(" + componentType.field + ')'); + } + + /** + * Returns ROW data type with specified types of fields and default names. + * + * @param fieldTypes + * the type of fields + * @return ROW data type + */ + public static H2Type row(H2Type... fieldTypes) { + int degree = fieldTypes.length; + TypeInfo[] row = new TypeInfo[degree]; + StringBuilder builder = new StringBuilder("row("); + for (int i = 0; i < degree; i++) { + H2Type t = fieldTypes[i]; + row[i] = t.typeInfo; + if (i > 0) { + builder.append(", "); + } + builder.append(t.field); + } + return new H2Type(TypeInfo.getTypeInfo(Value.ROW, -1L, -1, new ExtTypeInfoRow(row)), + builder.append(')').toString()); + } + + private TypeInfo typeInfo; + + private String field; + + private H2Type(TypeInfo typeInfo, String field) { + this.typeInfo = typeInfo; + this.field = "H2Type." + field; + } + + @Override + public String getName() { + return typeInfo.toString(); + } + + @Override + public String getVendor() { + return "com.h2database"; + } + + /** + * Returns the vendor specific type number for the data type. The returned + * value is actual only for the current version of H2. + * + * @return the vendor specific data type + */ + @Override + public Integer getVendorTypeNumber() { + return typeInfo.getValueType(); + } + + @Override + public String toString() { + return field; + } + +} diff --git a/h2/src/main/org/h2/api/Interval.java b/h2/src/main/org/h2/api/Interval.java index 16c8ef6da0..42024b9466 100644 --- a/h2/src/main/org/h2/api/Interval.java +++ b/h2/src/main/org/h2/api/Interval.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.api; diff --git a/h2/src/main/org/h2/api/IntervalQualifier.java b/h2/src/main/org/h2/api/IntervalQualifier.java index c78ff268ca..1772d1790e 100644 --- a/h2/src/main/org/h2/api/IntervalQualifier.java +++ b/h2/src/main/org/h2/api/IntervalQualifier.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.api; @@ -251,87 +251,102 @@ public String toString() { * @return full type name */ public String getTypeName(int precision, int scale) { - StringBuilder b = new StringBuilder("INTERVAL "); + return getTypeName(new StringBuilder(), precision, scale, false).toString(); + } + + /** + * Appends full type name to the specified string builder. + * + * @param builder string builder + * @param precision precision, or {@code -1} + * @param scale fractional seconds precision, or {@code -1} + * @param qualifierOnly if {@code true}, don't add the INTERVAL prefix + * @return the specified string builder + */ + public StringBuilder getTypeName(StringBuilder builder, int precision, int scale, boolean qualifierOnly) { + if (!qualifierOnly) { + builder.append("INTERVAL "); + } switch (this) { case YEAR: case MONTH: case DAY: case HOUR: case MINUTE: - b.append(string); + builder.append(string); if (precision > 0) { - b.append('(').append(precision).append(')'); + builder.append('(').append(precision).append(')'); } break; case SECOND: - b.append(string); + builder.append(string); if (precision > 0 || scale >= 0) { - b.append('(').append(precision > 0 ? precision : 2); + builder.append('(').append(precision > 0 ? precision : 2); if (scale >= 0) { - b.append(", ").append(scale); + builder.append(", ").append(scale); } - b.append(')'); + builder.append(')'); } break; case YEAR_TO_MONTH: - b.append("YEAR"); + builder.append("YEAR"); if (precision > 0) { - b.append('(').append(precision).append(')'); + builder.append('(').append(precision).append(')'); } - b.append(" TO MONTH"); + builder.append(" TO MONTH"); break; case DAY_TO_HOUR: - b.append("DAY"); + builder.append("DAY"); if (precision > 0) { - b.append('(').append(precision).append(')'); + builder.append('(').append(precision).append(')'); } - b.append(" TO HOUR"); + builder.append(" TO HOUR"); break; case DAY_TO_MINUTE: - b.append("DAY"); + builder.append("DAY"); if (precision > 0) { - b.append('(').append(precision).append(')'); + builder.append('(').append(precision).append(')'); } - b.append(" TO MINUTE"); + builder.append(" TO MINUTE"); break; case DAY_TO_SECOND: - b.append("DAY"); + builder.append("DAY"); if (precision > 0) { - b.append('(').append(precision).append(')'); + builder.append('(').append(precision).append(')'); } - b.append(" TO SECOND"); + builder.append(" TO SECOND"); if (scale >= 0) { - b.append('(').append(scale).append(')'); + builder.append('(').append(scale).append(')'); } break; case HOUR_TO_MINUTE: - b.append("HOUR"); + builder.append("HOUR"); if (precision > 0) { - b.append('(').append(precision).append(')'); + builder.append('(').append(precision).append(')'); } - b.append(" TO MINUTE"); + builder.append(" TO MINUTE"); break; case HOUR_TO_SECOND: - b.append("HOUR"); + builder.append("HOUR"); if (precision > 0) { - b.append('(').append(precision).append(')'); + builder.append('(').append(precision).append(')'); } - b.append(" TO SECOND"); + builder.append(" TO SECOND"); if (scale >= 0) { - b.append('(').append(scale).append(')'); + builder.append('(').append(scale).append(')'); } break; case MINUTE_TO_SECOND: - b.append("MINUTE"); + builder.append("MINUTE"); if (precision > 0) { - b.append('(').append(precision).append(')'); + builder.append('(').append(precision).append(')'); } - b.append(" TO SECOND"); + builder.append(" TO SECOND"); if (scale >= 0) { - b.append('(').append(scale).append(')'); + builder.append('(').append(scale).append(')'); } } - return b.toString(); + return builder; } } diff --git a/h2/src/main/org/h2/api/JavaObjectSerializer.java b/h2/src/main/org/h2/api/JavaObjectSerializer.java index 98d387ce57..9daa53065d 100644 --- a/h2/src/main/org/h2/api/JavaObjectSerializer.java +++ b/h2/src/main/org/h2/api/JavaObjectSerializer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.api; @@ -18,6 +18,7 @@ public interface JavaObjectSerializer { * * @param obj the object to serialize * @return the byte array of the serialized object + * @throws Exception on failure */ byte[] serialize(Object obj) throws Exception; @@ -26,6 +27,7 @@ public interface JavaObjectSerializer { * * @param bytes the byte array of the serialized object * @return the object + * @throws Exception on failure */ Object deserialize(byte[] bytes) throws Exception; diff --git a/h2/src/main/org/h2/api/TableEngine.java b/h2/src/main/org/h2/api/TableEngine.java index 9bc9f0e959..497b291949 100644 --- a/h2/src/main/org/h2/api/TableEngine.java +++ b/h2/src/main/org/h2/api/TableEngine.java @@ -1,12 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.api; -import org.h2.table.Table; import org.h2.command.ddl.CreateTableData; +import org.h2.table.Table; /** * A class that implements this interface can create custom table diff --git a/h2/src/main/org/h2/api/TimestampWithTimeZone.java b/h2/src/main/org/h2/api/TimestampWithTimeZone.java deleted file mode 100644 index ed7f82adff..0000000000 --- a/h2/src/main/org/h2/api/TimestampWithTimeZone.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.api; - -import java.io.Serializable; -import org.h2.util.DateTimeUtils; -import org.h2.value.ValueTimestampTimeZone; - -/** - * How we expose "TIMESTAMP WITH TIME ZONE" in our ResultSets. - */ -public class TimestampWithTimeZone implements Serializable, Cloneable { - - /** - * The serial version UID. - */ - private static final long serialVersionUID = 4413229090646777107L; - - /** - * A bit field with bits for the year, month, and day (see DateTimeUtils for - * encoding) - */ - private final long dateValue; - /** - * The nanoseconds since midnight. - */ - private final long timeNanos; - /** - * Time zone offset from UTC in minutes, range of -12hours to +12hours - */ - private final short timeZoneOffsetMins; - - public TimestampWithTimeZone(long dateValue, long timeNanos, short timeZoneOffsetMins) { - this.dateValue = dateValue; - this.timeNanos = timeNanos; - this.timeZoneOffsetMins = timeZoneOffsetMins; - } - - /** - * @return the year-month-day bit field - */ - public long getYMD() { - return dateValue; - } - - /** - * Gets the year. - * - *

          The year is in the specified time zone and not UTC. So for - * {@code 2015-12-31 19:00:00.00-10:00} the value returned - * will be {@code 2015} even though in UTC the year is {@code 2016}.

          - * - * @return the year - */ - public int getYear() { - return DateTimeUtils.yearFromDateValue(dateValue); - } - - /** - * Gets the month 1-based. - * - *

          The month is in the specified time zone and not UTC. So for - * {@code 2015-12-31 19:00:00.00-10:00} the value returned - * is {@code 12} even though in UTC the month is {@code 1}.

          - * - * @return the month - */ - public int getMonth() { - return DateTimeUtils.monthFromDateValue(dateValue); - } - - /** - * Gets the day of month 1-based. - * - *

          The day of month is in the specified time zone and not UTC. So for - * {@code 2015-12-31 19:00:00.00-10:00} the value returned - * is {@code 31} even though in UTC the day of month is {@code 1}.

          - * - * @return the day of month - */ - public int getDay() { - return DateTimeUtils.dayFromDateValue(dateValue); - } - - /** - * Gets the nanoseconds since midnight. - * - *

          The nanoseconds are relative to midnight in the specified - * time zone. So for {@code 2016-09-24 00:00:00.000000001-00:01} the - * value returned is {@code 1} even though {@code 60000000001} - * nanoseconds have passed since midnight in UTC.

          - * - * @return the nanoseconds since midnight - */ - public long getNanosSinceMidnight() { - return timeNanos; - } - - /** - * The time zone offset in minutes. - * - * @return the offset - */ - public short getTimeZoneOffsetMins() { - return timeZoneOffsetMins; - } - - @Override - public String toString() { - StringBuilder builder = new StringBuilder(ValueTimestampTimeZone.MAXIMUM_PRECISION); - DateTimeUtils.appendTimestampTimeZone(builder, dateValue, timeNanos, timeZoneOffsetMins); - return builder.toString(); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + (int) (dateValue ^ (dateValue >>> 32)); - result = prime * result + (int) (timeNanos ^ (timeNanos >>> 32)); - result = prime * result + timeZoneOffsetMins; - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - TimestampWithTimeZone other = (TimestampWithTimeZone) obj; - if (dateValue != other.dateValue) { - return false; - } - if (timeNanos != other.timeNanos) { - return false; - } - if (timeZoneOffsetMins != other.timeZoneOffsetMins) { - return false; - } - return true; - } - -} diff --git a/h2/src/main/org/h2/api/Trigger.java b/h2/src/main/org/h2/api/Trigger.java index 19df8322bd..37a1cb74c2 100644 --- a/h2/src/main/org/h2/api/Trigger.java +++ b/h2/src/main/org/h2/api/Trigger.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.api; @@ -49,9 +49,12 @@ public interface Trigger { * operation is performed * @param type the operation type: INSERT, UPDATE, DELETE, SELECT, or a * combination (this parameter is a bit field) + * @throws SQLException on SQL exception */ - void init(Connection conn, String schemaName, String triggerName, - String tableName, boolean before, int type) throws SQLException; + default void init(Connection conn, String schemaName, String triggerName, + String tableName, boolean before, int type) throws SQLException { + // Does nothing by default + } /** * This method is called for each triggered action. The method is called @@ -82,12 +85,20 @@ void fire(Connection conn, Object[] oldRow, Object[] newRow) * This method is called when the database is closed. * If the method throws an exception, it will be logged, but * closing the database will continue. + * + * @throws SQLException on SQL exception */ - void close() throws SQLException; + default void close() throws SQLException { + // Does nothing by default + } /** * This method is called when the trigger is dropped. + * + * @throws SQLException on SQL exception */ - void remove() throws SQLException; + default void remove() throws SQLException { + // Does nothing by default + } } diff --git a/h2/src/main/org/h2/api/UserToRolesMapper.java b/h2/src/main/org/h2/api/UserToRolesMapper.java index 0b24428a03..55d59468e2 100644 --- a/h2/src/main/org/h2/api/UserToRolesMapper.java +++ b/h2/src/main/org/h2/api/UserToRolesMapper.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.api; diff --git a/h2/src/main/org/h2/api/package.html b/h2/src/main/org/h2/api/package.html index 3c1df7bf37..3dd9f31c6c 100644 --- a/h2/src/main/org/h2/api/package.html +++ b/h2/src/main/org/h2/api/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/bnf/Bnf.java b/h2/src/main/org/h2/bnf/Bnf.java index 89071cacaa..3faccea4e4 100644 --- a/h2/src/main/org/h2/bnf/Bnf.java +++ b/h2/src/main/org/h2/bnf/Bnf.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; @@ -14,8 +14,8 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.StringTokenizer; - import org.h2.bnf.context.DbContextRule; +import org.h2.command.dml.Help; import org.h2.tools.Csv; import org.h2.util.StringUtils; import org.h2.util.Utils; @@ -45,6 +45,8 @@ public class Bnf { * * @param csv if not specified, the help.csv is used * @return a new instance + * @throws SQLException on failure + * @throws IOException on failure */ public static Bnf getInstance(Reader csv) throws SQLException, IOException { Bnf bnf = new Bnf(); @@ -75,10 +77,9 @@ private void addFixedRule(String name, int fixedType) { private RuleHead addRule(String topic, String section, Rule rule) { RuleHead head = new RuleHead(section, topic, rule); String key = StringUtils.toLowerEnglish(topic.trim().replace(' ', '_')); - if (ruleMap.get(key) != null) { + if (ruleMap.putIfAbsent(key, head) != null) { throw new AssertionError("already exists: " + topic); } - ruleMap.put(key, head); return head; } @@ -94,7 +95,7 @@ private void parse(Reader reader) throws SQLException, IOException { continue; } String topic = rs.getString("TOPIC"); - syntax = rs.getString("SYNTAX").trim(); + syntax = Help.stripAnnotationsFromSyntax(rs.getString("SYNTAX")); currentTopic = section; tokens = tokenize(); index = 0; @@ -118,9 +119,10 @@ private void parse(Reader reader) throws SQLException, IOException { addFixedRule("@hms@", RuleFixed.HMS); addFixedRule("@nanos@", RuleFixed.NANOS); addFixedRule("anything_except_single_quote", RuleFixed.ANY_EXCEPT_SINGLE_QUOTE); + addFixedRule("single_character", RuleFixed.ANY_EXCEPT_SINGLE_QUOTE); addFixedRule("anything_except_double_quote", RuleFixed.ANY_EXCEPT_DOUBLE_QUOTE); addFixedRule("anything_until_end_of_line", RuleFixed.ANY_UNTIL_EOL); - addFixedRule("anything_until_end_comment", RuleFixed.ANY_UNTIL_END); + addFixedRule("anything_until_comment_start_or_end", RuleFixed.ANY_UNTIL_END); addFixedRule("anything_except_two_dollar_signs", RuleFixed.ANY_EXCEPT_2_DOLLAR); addFixedRule("anything", RuleFixed.ANY_WORD); addFixedRule("@hex_start@", RuleFixed.HEX_START); @@ -130,6 +132,7 @@ private void parse(Reader reader) throws SQLException, IOException { addFixedRule("@digit@", RuleFixed.DIGIT); addFixedRule("@open_bracket@", RuleFixed.OPEN_BRACKET); addFixedRule("@close_bracket@", RuleFixed.CLOSE_BRACKET); + addFixedRule("json_text", RuleFixed.JSON_TEXT); } /** @@ -210,6 +213,28 @@ private Rule parseList() { return r; } + private RuleExtension parseExtension(boolean compatibility) { + read(); + Rule r; + if (firstChar == '[') { + read(); + r = parseOr(); + r = new RuleOptional(r); + if (firstChar != ']') { + throw new AssertionError("expected ], got " + currentToken + " syntax:" + syntax); + } + } else if (firstChar == '{') { + read(); + r = parseOr(); + if (firstChar != '}') { + throw new AssertionError("expected }, got " + currentToken + " syntax:" + syntax); + } + } else { + r = parseOr(); + } + return new RuleExtension(r, compatibility); + } + private Rule parseToken() { Rule r; if ((firstChar >= 'A' && firstChar <= 'Z') @@ -218,24 +243,30 @@ private Rule parseToken() { r = new RuleElement(currentToken, currentTopic); } else if (firstChar == '[') { read(); - Rule r2 = parseOr(); - r = new RuleOptional(r2); + r = parseOr(); + r = new RuleOptional(r); if (firstChar != ']') { - throw new AssertionError("expected ], got " + currentToken - + " syntax:" + syntax); + throw new AssertionError("expected ], got " + currentToken + " syntax:" + syntax); } } else if (firstChar == '{') { read(); r = parseOr(); if (firstChar != '}') { - throw new AssertionError("expected }, got " + currentToken - + " syntax:" + syntax); + throw new AssertionError("expected }, got " + currentToken + " syntax:" + syntax); + } + } else if (firstChar == '@') { + if ("@commaDots@".equals(currentToken)) { + r = new RuleList(new RuleElement(",", currentTopic), lastRepeat, false); + r = new RuleRepeat(r, true); + } else if ("@dots@".equals(currentToken)) { + r = new RuleRepeat(lastRepeat, false); + } else if ("@c@".equals(currentToken)) { + r = parseExtension(true); + } else if ("@h2@".equals(currentToken)) { + r = parseExtension(false); + } else { + r = new RuleElement(currentToken, currentTopic); } - } else if ("@commaDots@".equals(currentToken)) { - r = new RuleList(new RuleElement(",", currentTopic), lastRepeat, false); - r = new RuleRepeat(r, true); - } else if ("@dots@".equals(currentToken)) { - r = new RuleRepeat(lastRepeat, false); } else { r = new RuleElement(currentToken, currentTopic); } @@ -254,6 +285,19 @@ private void read() { } } + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < index; i++) { + builder.append(tokens[i]).append(' '); + } + builder.append("[*]"); + for (int i = index; i < tokens.length; i++) { + builder.append(' ').append(tokens[i]); + } + return builder.toString(); + } + private String[] tokenize() { ArrayList list = new ArrayList<>(); syntax = StringUtils.replaceAll(syntax, "yyyy-MM-dd", "@ymd@"); diff --git a/h2/src/main/org/h2/bnf/BnfVisitor.java b/h2/src/main/org/h2/bnf/BnfVisitor.java index ec3058843a..1a8ec01d6f 100644 --- a/h2/src/main/org/h2/bnf/BnfVisitor.java +++ b/h2/src/main/org/h2/bnf/BnfVisitor.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; @@ -51,4 +51,19 @@ public interface BnfVisitor { */ void visitRuleOptional(Rule rule); + /** + * Visit an OR list of optional rules. + * + * @param list the optional rules + */ + void visitRuleOptional(ArrayList list); + + /** + * Visit a rule with non-standard extension. + * + * @param rule the rule + * @param compatibility whether this rule exists for compatibility only + */ + void visitRuleExtension(Rule rule, boolean compatibility); + } diff --git a/h2/src/main/org/h2/bnf/Rule.java b/h2/src/main/org/h2/bnf/Rule.java index 194a2ef892..0070e4e28b 100644 --- a/h2/src/main/org/h2/bnf/Rule.java +++ b/h2/src/main/org/h2/bnf/Rule.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; diff --git a/h2/src/main/org/h2/bnf/RuleElement.java b/h2/src/main/org/h2/bnf/RuleElement.java index 7c72e8620d..aca908583b 100644 --- a/h2/src/main/org/h2/bnf/RuleElement.java +++ b/h2/src/main/org/h2/bnf/RuleElement.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; @@ -77,4 +77,9 @@ public boolean autoComplete(Sentence sentence) { return link.autoComplete(sentence); } + @Override + public String toString() { + return name; + } + } diff --git a/h2/src/main/org/h2/bnf/RuleExtension.java b/h2/src/main/org/h2/bnf/RuleExtension.java new file mode 100644 index 0000000000..217a946da7 --- /dev/null +++ b/h2/src/main/org/h2/bnf/RuleExtension.java @@ -0,0 +1,49 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.bnf; + +import java.util.HashMap; + +/** + * Represents a non-standard syntax. + */ +public class RuleExtension implements Rule { + + private final Rule rule; + private final boolean compatibility; + + private boolean mapSet; + + public RuleExtension(Rule rule, boolean compatibility) { + this.rule = rule; + this.compatibility = compatibility; + } + + @Override + public void accept(BnfVisitor visitor) { + visitor.visitRuleExtension(rule, compatibility); + } + + @Override + public void setLinks(HashMap ruleMap) { + if (!mapSet) { + rule.setLinks(ruleMap); + mapSet = true; + } + } + @Override + public boolean autoComplete(Sentence sentence) { + sentence.stopIfRequired(); + rule.autoComplete(sentence); + return true; + } + + @Override + public String toString() { + return (compatibility ? "@c@ " : "@h2@ ") + rule.toString(); + } + +} diff --git a/h2/src/main/org/h2/bnf/RuleFixed.java b/h2/src/main/org/h2/bnf/RuleFixed.java index aa364bc68c..8557e0ae52 100644 --- a/h2/src/main/org/h2/bnf/RuleFixed.java +++ b/h2/src/main/org/h2/bnf/RuleFixed.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; @@ -22,6 +22,7 @@ public class RuleFixed implements Rule { public static final int HEX_START = 10, CONCAT = 11; public static final int AZ_UNDERSCORE = 12, AF = 13, DIGIT = 14; public static final int OPEN_BRACKET = 15, CLOSE_BRACKET = 16; + public static final int JSON_TEXT = 17; private final int type; @@ -115,6 +116,7 @@ public boolean autoComplete(Sentence sentence) { } break; case ANY_WORD: + case JSON_TEXT: while (s.length() > 0 && !Bnf.startWithSpace(s)) { s = s.substring(1); } @@ -208,4 +210,9 @@ public boolean autoComplete(Sentence sentence) { return false; } + @Override + public String toString() { + return "#" + type; + } + } diff --git a/h2/src/main/org/h2/bnf/RuleHead.java b/h2/src/main/org/h2/bnf/RuleHead.java index 2ff9cec25b..95891bd1a0 100644 --- a/h2/src/main/org/h2/bnf/RuleHead.java +++ b/h2/src/main/org/h2/bnf/RuleHead.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; diff --git a/h2/src/main/org/h2/bnf/RuleList.java b/h2/src/main/org/h2/bnf/RuleList.java index 30469affd8..30e8f67893 100644 --- a/h2/src/main/org/h2/bnf/RuleList.java +++ b/h2/src/main/org/h2/bnf/RuleList.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; @@ -15,8 +15,8 @@ */ public class RuleList implements Rule { - private final boolean or; - private final ArrayList list; + final boolean or; + final ArrayList list; private boolean mapSet; public RuleList(Rule first, Rule next, boolean or) { @@ -71,4 +71,20 @@ public boolean autoComplete(Sentence sentence) { return true; } + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + for (int i = 0, l = list.size(); i < l; i++) { + if (i > 0) { + if (or) { + builder.append(" | "); + } else { + builder.append(' '); + } + } + builder.append(list.get(i).toString()); + } + return builder.toString(); + } + } diff --git a/h2/src/main/org/h2/bnf/RuleOptional.java b/h2/src/main/org/h2/bnf/RuleOptional.java index 63f4874513..52cfee7f42 100644 --- a/h2/src/main/org/h2/bnf/RuleOptional.java +++ b/h2/src/main/org/h2/bnf/RuleOptional.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; @@ -20,6 +20,13 @@ public RuleOptional(Rule rule) { @Override public void accept(BnfVisitor visitor) { + if (rule instanceof RuleList) { + RuleList ruleList = (RuleList) rule; + if (ruleList.or) { + visitor.visitRuleOptional(ruleList.list); + return; + } + } visitor.visitRuleOptional(rule); } @@ -37,4 +44,9 @@ public boolean autoComplete(Sentence sentence) { return true; } + @Override + public String toString() { + return '[' + rule.toString() + ']'; + } + } diff --git a/h2/src/main/org/h2/bnf/RuleRepeat.java b/h2/src/main/org/h2/bnf/RuleRepeat.java index 59957791f2..347d03a8e7 100644 --- a/h2/src/main/org/h2/bnf/RuleRepeat.java +++ b/h2/src/main/org/h2/bnf/RuleRepeat.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; @@ -44,4 +44,9 @@ public boolean autoComplete(Sentence sentence) { return true; } + @Override + public String toString() { + return comma ? ", ..." : " ..."; + } + } diff --git a/h2/src/main/org/h2/bnf/Sentence.java b/h2/src/main/org/h2/bnf/Sentence.java index 556e3fef9e..a0993b0892 100644 --- a/h2/src/main/org/h2/bnf/Sentence.java +++ b/h2/src/main/org/h2/bnf/Sentence.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; @@ -8,7 +8,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Objects; -import java.util.concurrent.TimeUnit; import org.h2.bnf.context.DbSchema; import org.h2.bnf.context.DbTableOrView; @@ -37,7 +36,7 @@ public class Sentence { */ public static final int FUNCTION = 2; - private static final long MAX_PROCESSING_TIME = 100; + private static final int MAX_PROCESSING_TIME = 100; /** * The map of next tokens in the form type#tokenName token. @@ -65,7 +64,7 @@ public class Sentence { * Start the timer to make sure processing doesn't take too long. */ public void start() { - stopAtNs = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(MAX_PROCESSING_TIME); + stopAtNs = System.nanoTime() + MAX_PROCESSING_TIME * 1_000_000L; } /** @@ -74,7 +73,7 @@ public void start() { * If processing is stopped, this methods throws an IllegalStateException */ public void stopIfRequired() { - if (System.nanoTime() > stopAtNs) { + if (System.nanoTime() - stopAtNs > 0L) { throw new IllegalStateException(); } } diff --git a/h2/src/main/org/h2/bnf/context/DbColumn.java b/h2/src/main/org/h2/bnf/context/DbColumn.java index 594dd4218d..db187c3e0a 100644 --- a/h2/src/main/org/h2/bnf/context/DbColumn.java +++ b/h2/src/main/org/h2/bnf/context/DbColumn.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf.context; @@ -64,6 +64,7 @@ private DbColumn(DbContents contents, ResultSet rs, boolean procedureColumn) * @param contents the database contents * @param rs the result set * @return the column + * @throws SQLException on failure */ public static DbColumn getProcedureColumn(DbContents contents, ResultSet rs) throws SQLException { @@ -76,6 +77,7 @@ public static DbColumn getProcedureColumn(DbContents contents, ResultSet rs) * @param contents the database contents * @param rs the result set * @return the column + * @throws SQLException on failure */ public static DbColumn getColumn(DbContents contents, ResultSet rs) throws SQLException { diff --git a/h2/src/main/org/h2/bnf/context/DbContents.java b/h2/src/main/org/h2/bnf/context/DbContents.java index ed1a9fa4ba..1cedefb0da 100644 --- a/h2/src/main/org/h2/bnf/context/DbContents.java +++ b/h2/src/main/org/h2/bnf/context/DbContents.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf.context; @@ -11,6 +11,7 @@ import java.sql.SQLException; import java.util.ArrayList; +import org.h2.engine.Session; import org.h2.jdbc.JdbcConnection; import org.h2.util.ParserUtil; import org.h2.util.StringUtils; @@ -36,88 +37,109 @@ public class DbContents { private boolean databaseToUpper, databaseToLower; + private boolean mayHaveStandardViews = true; + /** - * @return The default schema. + * @return the default schema. */ public DbSchema getDefaultSchema() { return defaultSchema; } /** - * @return True if this is an Apache Derby database. + * @return true if this is an Apache Derby database. */ public boolean isDerby() { return isDerby; } /** - * @return True if this is a Firebird database. + * @return true if this is a Firebird database. */ public boolean isFirebird() { return isFirebird; } /** - * @return True if this is a H2 database. + * @return true if this is a H2 database. */ public boolean isH2() { return isH2; } /** - * @return True if this is a MS SQL Server database. + * @return true if this is a MS SQL Server database. */ public boolean isMSSQLServer() { return isMSSQLServer; } /** - * @return True if this is a MySQL database. + * @return true if this is a MySQL database. */ public boolean isMySQL() { return isMySQL; } /** - * @return True if this is an Oracle database. + * @return true if this is an Oracle database. */ public boolean isOracle() { return isOracle; } /** - * @return True if this is a PostgreSQL database. + * @return true if this is a PostgreSQL database. */ public boolean isPostgreSQL() { return isPostgreSQL; } /** - * @return True if this is an SQLite database. + * @return true if this is an SQLite database. */ public boolean isSQLite() { return isSQLite; } /** - * @return True if this is an IBM DB2 database. + * @return true if this is an IBM DB2 database. */ public boolean isDB2() { return isDB2; } /** - * @return The list of schemas. + * @return the list of schemas. */ public DbSchema[] getSchemas() { return schemas; } + /** + * Returns whether standard INFORMATION_SCHEMA.VIEWS may be supported. + * + * @return whether standard INFORMATION_SCHEMA.VIEWS may be supported + */ + public boolean mayHaveStandardViews() { + return mayHaveStandardViews; + } + + /** + * @param mayHaveStandardViews + * whether standard INFORMATION_SCHEMA.VIEWS is detected as + * supported + */ + public void setMayHaveStandardViews(boolean mayHaveStandardViews) { + this.mayHaveStandardViews = mayHaveStandardViews; + } + /** * Read the contents of this database from the database meta data. * * @param url the database URL * @param conn the connection + * @throws SQLException on failure */ public synchronized void readContents(String url, Connection conn) throws SQLException { @@ -133,7 +155,7 @@ public synchronized void readContents(String url, Connection conn) isFirebird = url.startsWith("jdbc:firebirdsql:"); isMSSQLServer = url.startsWith("jdbc:sqlserver:"); if (isH2) { - JdbcConnection.Settings settings = ((JdbcConnection) conn).getSettings(); + Session.StaticSettings settings = ((JdbcConnection) conn).getStaticSettings(); databaseToUpper = settings.databaseToUpper; databaseToLower = settings.databaseToLower; }else if (isMySQL || isPostgreSQL) { @@ -232,7 +254,9 @@ private String[] getSchemaNames(DatabaseMetaData meta) throws SQLException { private String getDefaultSchemaName(DatabaseMetaData meta) { String defaultSchemaName = ""; try { - if (isOracle) { + if (isH2) { + return meta.storesLowerCaseIdentifiers() ? "public" : "PUBLIC"; + } else if (isOracle) { return meta.getUserName(); } else if (isPostgreSQL) { return "public"; @@ -243,15 +267,8 @@ private String getDefaultSchemaName(DatabaseMetaData meta) { } else if (isFirebird) { return null; } - ResultSet rs = meta.getSchemas(); - int index = rs.findColumn("IS_DEFAULT"); - while (rs.next()) { - if (rs.getBoolean(index)) { - defaultSchemaName = rs.getString("TABLE_SCHEM"); - } - } } catch (SQLException e) { - // IS_DEFAULT not found + // Ignore } return defaultSchemaName; } diff --git a/h2/src/main/org/h2/bnf/context/DbContextRule.java b/h2/src/main/org/h2/bnf/context/DbContextRule.java index f37431aec4..1d295cdb42 100644 --- a/h2/src/main/org/h2/bnf/context/DbContextRule.java +++ b/h2/src/main/org/h2/bnf/context/DbContextRule.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf.context; @@ -172,9 +172,7 @@ public boolean autoComplete(Sentence sentence) { name = column.getQuotedName(); compare = query; } - if (compare.startsWith(name) && - (columnType == null || - column.getDataType().contains(columnType))) { + if (compare.startsWith(name) && testColumnType(column)) { String b = s.substring(name.length()); if (best == null || b.length() < best.length()) { best = b; @@ -199,8 +197,7 @@ public boolean autoComplete(Sentence sentence) { for (DbColumn column : table.getColumns()) { String name = StringUtils.toUpperEnglish(column .getName()); - if (columnType == null - || column.getDataType().contains(columnType)) { + if (testColumnType(column)) { if (up.startsWith(name)) { String b = s.substring(name.length()); if (best == null || b.length() < best.length()) { @@ -226,7 +223,7 @@ public boolean autoComplete(Sentence sentence) { autoCompleteProcedure(sentence); break; default: - throw DbException.throwInternalError("type=" + type); + throw DbException.getInternalError("type=" + type); } if (!s.equals(query)) { while (Bnf.startWithSpace(s)) { @@ -237,6 +234,21 @@ public boolean autoComplete(Sentence sentence) { } return false; } + + private boolean testColumnType(DbColumn column) { + if (columnType == null) { + return true; + } + String type = column.getDataType(); + if (columnType.contains("CHAR") || columnType.contains("CLOB")) { + return type.contains("CHAR") || type.contains("CLOB"); + } + if (columnType.contains("BINARY") || columnType.contains("BLOB")) { + return type.contains("BINARY") || type.contains("BLOB"); + } + return type.contains(columnType); + } + private void autoCompleteProcedure(Sentence sentence) { DbSchema schema = sentence.getLastMatchedSchema(); if (schema == null) { diff --git a/h2/src/main/org/h2/bnf/context/DbProcedure.java b/h2/src/main/org/h2/bnf/context/DbProcedure.java index eb719e123b..0e9a71c2b7 100644 --- a/h2/src/main/org/h2/bnf/context/DbProcedure.java +++ b/h2/src/main/org/h2/bnf/context/DbProcedure.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf.context; @@ -71,6 +71,7 @@ public boolean isReturnsResult() { * Read the column for this table from the database meta data. * * @param meta the database meta data + * @throws SQLException on failure */ void readParameters(DatabaseMetaData meta) throws SQLException { ResultSet rs = meta.getProcedureColumns(null, schema.name, name, null); diff --git a/h2/src/main/org/h2/bnf/context/DbSchema.java b/h2/src/main/org/h2/bnf/context/DbSchema.java index 7a4ee503d0..f37e06fbe1 100644 --- a/h2/src/main/org/h2/bnf/context/DbSchema.java +++ b/h2/src/main/org/h2/bnf/context/DbSchema.java @@ -1,14 +1,16 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf.context; +import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import java.sql.SQLSyntaxErrorException; import java.util.ArrayList; import org.h2.engine.SysProperties; @@ -21,6 +23,13 @@ */ public class DbSchema { + private static final String COLUMNS_QUERY_H2_197 = "SELECT COLUMN_NAME, ORDINAL_POSITION, COLUMN_TYPE " + + "FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = ?1 AND TABLE_NAME = ?2"; + + private static final String COLUMNS_QUERY_H2_202 = "SELECT COLUMN_NAME, ORDINAL_POSITION, " + + "DATA_TYPE_SQL(?1, ?2, 'TABLE', ORDINAL_POSITION) COLUMN_TYPE " + + "FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = ?1 AND TABLE_NAME = ?2"; + /** * The schema name. */ @@ -64,7 +73,7 @@ public class DbSchema { if (name == null) { // firebird isSystem = true; - } else if ("INFORMATION_SCHEMA".equals(name)) { + } else if ("INFORMATION_SCHEMA".equalsIgnoreCase(name)) { isSystem = true; } else if (!contents.isH2() && StringUtils.toUpperEnglish(name).startsWith("INFO")) { @@ -105,6 +114,7 @@ public DbProcedure[] getProcedures() { * * @param meta the database meta data * @param tableTypes the table types to read + * @throws SQLException on failure */ public void readTables(DatabaseMetaData meta, String[] tableTypes) throws SQLException { @@ -120,10 +130,7 @@ public void readTables(DatabaseMetaData meta, String[] tableTypes) rs.close(); tables = list.toArray(new DbTableOrView[0]); if (tables.length < SysProperties.CONSOLE_MAX_TABLES_LIST_COLUMNS) { - try (PreparedStatement ps = contents.isH2() ? meta.getConnection().prepareStatement( - "SELECT COLUMN_NAME, ORDINAL_POSITION, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS" - + " WHERE TABLE_SCHEMA = ? AND TABLE_NAME = ?") - : null) { + try (PreparedStatement ps = contents.isH2() ? prepareColumnsQueryH2(meta.getConnection()) : null) { for (DbTableOrView tab : tables) { try { tab.readColumns(meta, ps); @@ -139,6 +146,14 @@ public void readTables(DatabaseMetaData meta, String[] tableTypes) } } + private static PreparedStatement prepareColumnsQueryH2(Connection connection) throws SQLException { + try { + return connection.prepareStatement(COLUMNS_QUERY_H2_202); + } catch (SQLSyntaxErrorException ex) { + return connection.prepareStatement(COLUMNS_QUERY_H2_197); + } + } + /** * Read all procedures in the database. * diff --git a/h2/src/main/org/h2/bnf/context/DbTableOrView.java b/h2/src/main/org/h2/bnf/context/DbTableOrView.java index 4886859baf..e97ffe4385 100644 --- a/h2/src/main/org/h2/bnf/context/DbTableOrView.java +++ b/h2/src/main/org/h2/bnf/context/DbTableOrView.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf.context; @@ -91,6 +91,7 @@ public String getQuotedName() { * @param meta the database meta data * @param ps prepared statement with custom query for H2 database, null for * others + * @throws SQLException on failure */ public void readColumns(DatabaseMetaData meta, PreparedStatement ps) throws SQLException { ResultSet rs; diff --git a/h2/src/main/org/h2/bnf/context/package.html b/h2/src/main/org/h2/bnf/context/package.html index 50292459ba..0a6386fb30 100644 --- a/h2/src/main/org/h2/bnf/context/package.html +++ b/h2/src/main/org/h2/bnf/context/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/bnf/package.html b/h2/src/main/org/h2/bnf/package.html index 6805463de3..36296736e3 100644 --- a/h2/src/main/org/h2/bnf/package.html +++ b/h2/src/main/org/h2/bnf/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/command/Command.java b/h2/src/main/org/h2/command/Command.java index 83c35a1f2b..f26fb686b8 100644 --- a/h2/src/main/org/h2/command/Command.java +++ b/h2/src/main/org/h2/command/Command.java @@ -1,34 +1,37 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command; import java.sql.SQLException; import java.util.ArrayList; -import java.util.concurrent.TimeUnit; - +import java.util.Set; import org.h2.api.ErrorCode; import org.h2.engine.Constants; import org.h2.engine.Database; +import org.h2.engine.DbObject; +import org.h2.engine.Mode.CharPadding; import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.ParameterInterface; import org.h2.message.DbException; import org.h2.message.Trace; import org.h2.result.ResultInterface; import org.h2.result.ResultWithGeneratedKeys; import org.h2.result.ResultWithPaddedStrings; -import org.h2.util.MathUtils; +import org.h2.util.Utils; /** * Represents a SQL statement. This object is only used on the server side. */ public abstract class Command implements CommandInterface { + /** * The session. */ - protected final Session session; + protected final SessionLocal session; /** * The last start time. @@ -49,7 +52,7 @@ public abstract class Command implements CommandInterface { private boolean canReuse; - Command(Session session, String sql) { + Command(SessionLocal session, String sql) { this.session = session; this.sql = sql; trace = session.getDatabase().getTrace(Trace.COMMAND); @@ -71,11 +74,6 @@ public abstract class Command implements CommandInterface { @Override public abstract boolean isQuery(); - /** - * Prepare join batching. - */ - public abstract void prepareJoinBatch(); - /** * Get the list of parameters. * @@ -102,12 +100,16 @@ public abstract class Command implements CommandInterface { * Execute an updating statement (for example insert, delete, or update), if * this is possible. * - * @return the update count + * @param generatedKeysRequest + * {@code false} if generated keys are not needed, {@code true} if + * generated keys should be configured automatically, {@code int[]} + * to specify column indices to return generated keys from, or + * {@code String[]} to specify column names to return generated keys + * from + * @return the update count and generated keys, if any * @throws DbException if the command is not an updating statement */ - public int update() { - throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_QUERY); - } + public abstract ResultWithGeneratedKeys update(Object generatedKeysRequest); /** * Execute a query statement, if this is possible. @@ -116,9 +118,7 @@ public int update() { * @return the local result set * @throws DbException if the command is not a query */ - public ResultInterface query(@SuppressWarnings("unused") int maxrows) { - throw DbException.get(ErrorCode.METHOD_ONLY_ALLOWED_FOR_QUERY); - } + public abstract ResultInterface query(long maxrows); @Override public final ResultInterface getMetaData() { @@ -130,7 +130,7 @@ public final ResultInterface getMetaData() { */ void start() { if (trace.isInfoEnabled() || session.getDatabase().getQueryStatistics()) { - startTimeNanos = System.nanoTime(); + startTimeNanos = Utils.currentNanoTime(); } } @@ -152,17 +152,12 @@ protected void checkCanceled() { @Override public void stop() { - session.setCurrentCommand(null, false); - if (!isTransactional()) { - session.commit(true); - } else if (session.getAutoCommit()) { + commitIfNonTransactional(); + if (isTransactional() && session.getAutoCommit()) { session.commit(false); - } else { - session.unlockReadLocks(); } - session.endStatement(); - if (trace.isInfoEnabled() && startTimeNanos > 0) { - long timeMillis = (System.nanoTime() - startTimeNanos) / 1000 / 1000; + if (trace.isInfoEnabled() && startTimeNanos != 0L) { + long timeMillis = (System.nanoTime() - startTimeNanos) / 1_000_000L; if (timeMillis > Constants.SLOW_QUERY_LIMIT_MS) { trace.info("slow query: {0} ms", timeMillis); } @@ -171,41 +166,38 @@ public void stop() { /** * Execute a query and return the result. - * This method prepares everything and calls {@link #query(int)} finally. + * This method prepares everything and calls {@link #query(long)} finally. * * @param maxrows the maximum number of rows to return * @param scrollable if the result set must be scrollable (ignored) * @return the result set */ @Override - public ResultInterface executeQuery(int maxrows, boolean scrollable) { - startTimeNanos = 0; - long start = 0; + public ResultInterface executeQuery(long maxrows, boolean scrollable) { + startTimeNanos = 0L; + long start = 0L; Database database = session.getDatabase(); - Object sync = database.isMultiThreaded() || database.getStore() != null ? session : database; session.waitIfExclusiveModeEnabled(); boolean callStop = true; - boolean writing = !isReadOnly(); - if (writing) { - while (!database.beforeWriting()) { - // wait - } - } //noinspection SynchronizationOnLocalVariableOrMethodParameter - synchronized (sync) { - session.startStatementWithinTransaction(); - session.setCurrentCommand(this, false); + synchronized (session) { + session.startStatementWithinTransaction(this); + Session oldSession = session.setThreadLocalSession(); try { while (true) { database.checkPowerOff(); try { ResultInterface result = query(maxrows); callStop = !result.isLazy(); - if (database.getMode().padFixedLengthStrings) { + if (database.getMode().charPadding == CharPadding.IN_RESULT_SETS) { return ResultWithPaddedStrings.get(result); } return result; } catch (DbException e) { + // cannot retry DDL + if (isCurrentCommandADefineCommand()) { + throw e; + } start = filterConcurrentUpdate(e, start); } catch (OutOfMemoryError e) { callStop = false; @@ -231,12 +223,11 @@ public ResultInterface executeQuery(int maxrows, boolean scrollable) { database.checkPowerOff(); throw e; } finally { + session.resetThreadLocalSession(oldSession); + session.endStatement(); if (callStop) { stop(); } - if (writing) { - database.afterWriting(); - } } } } @@ -245,32 +236,25 @@ public ResultInterface executeQuery(int maxrows, boolean scrollable) { public ResultWithGeneratedKeys executeUpdate(Object generatedKeysRequest) { long start = 0; Database database = session.getDatabase(); - Object sync = database.isMultiThreaded() || database.getStore() != null ? session : database; session.waitIfExclusiveModeEnabled(); boolean callStop = true; - boolean writing = !isReadOnly(); - if (writing) { - while (!database.beforeWriting()) { - // wait - } - } //noinspection SynchronizationOnLocalVariableOrMethodParameter - synchronized (sync) { - Session.Savepoint rollback = session.setSavepoint(); - session.startStatementWithinTransaction(); - session.setCurrentCommand(this, generatedKeysRequest); + synchronized (session) { + commitIfNonTransactional(); + SessionLocal.Savepoint rollback = session.setSavepoint(); + session.startStatementWithinTransaction(this); DbException ex = null; + Session oldSession = session.setThreadLocalSession(); try { while (true) { database.checkPowerOff(); try { - int updateCount = update(); - if (!Boolean.FALSE.equals(generatedKeysRequest)) { - return new ResultWithGeneratedKeys.WithKeys(updateCount, - session.getGeneratedKeys().getKeys(session)); - } - return ResultWithGeneratedKeys.of(updateCount); + return update(generatedKeysRequest); } catch (DbException e) { + // cannot retry DDL + if (isCurrentCommandADefineCommand()) { + throw e; + } start = filterConcurrentUpdate(e, start); } catch (OutOfMemoryError e) { callStop = false; @@ -302,7 +286,9 @@ public ResultWithGeneratedKeys executeUpdate(Object generatedKeysRequest) { ex = e; throw e; } finally { + session.resetThreadLocalSession(oldSession); try { + session.endStatement(); if (callStop) { stop(); } @@ -312,49 +298,32 @@ public ResultWithGeneratedKeys executeUpdate(Object generatedKeysRequest) { } else { ex.addSuppressed(nested); } - } finally { - if (writing) { - database.afterWriting(); - } } } } } + private void commitIfNonTransactional() { + if (!isTransactional()) { + boolean autoCommit = session.getAutoCommit(); + session.commit(true); + if (!autoCommit && session.getAutoCommit()) { + session.begin(); + } + } + } + private long filterConcurrentUpdate(DbException e, long start) { int errorCode = e.getErrorCode(); - if (errorCode != ErrorCode.CONCURRENT_UPDATE_1 && - errorCode != ErrorCode.ROW_NOT_FOUND_IN_PRIMARY_INDEX && - errorCode != ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1) { + if (errorCode != ErrorCode.CONCURRENT_UPDATE_1 && errorCode != ErrorCode.ROW_NOT_FOUND_IN_PRIMARY_INDEX + && errorCode != ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1) { throw e; } - long now = System.nanoTime(); - if (start != 0 && TimeUnit.NANOSECONDS.toMillis(now - start) > session.getLockTimeout()) { + long now = Utils.currentNanoTime(); + if (start != 0L && now - start > session.getLockTimeout() * 1_000_000L) { throw DbException.get(ErrorCode.LOCK_TIMEOUT_1, e); } - // Only in PageStore mode we need to sleep here to avoid busy wait loop - Database database = session.getDatabase(); - if (database.getStore() == null) { - int sleep = 1 + MathUtils.randomInt(10); - while (true) { - try { - if (database.isMultiThreaded()) { - Thread.sleep(sleep); - } else { - // although nobody going to notify us - // it is vital to give up lock on a database - database.wait(sleep); - } - } catch (InterruptedException e1) { - // ignore - } - long slept = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - now); - if (slept >= sleep) { - break; - } - } - } - return start == 0 ? now : start; + return start == 0L ? now : start; } @Override @@ -364,7 +333,7 @@ public void close() { @Override public void cancel() { - this.cancel = true; + cancel = true; } @Override @@ -400,4 +369,13 @@ public void reuse() { public void setCanReuse(boolean canReuse) { this.canReuse = canReuse; } + + public abstract Set getDependencies(); + + /** + * Is the command we just tried to execute a DefineCommand (i.e. DDL). + * + * @return true if yes + */ + protected abstract boolean isCurrentCommandADefineCommand(); } diff --git a/h2/src/main/org/h2/command/CommandContainer.java b/h2/src/main/org/h2/command/CommandContainer.java index 2b63655763..30fcf5bc53 100644 --- a/h2/src/main/org/h2/command/CommandContainer.java +++ b/h2/src/main/org/h2/command/CommandContainer.java @@ -1,22 +1,39 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; +import java.util.Set; import org.h2.api.DatabaseEventListener; -import org.h2.command.dml.Explain; -import org.h2.command.dml.Query; -import org.h2.engine.Session; +import org.h2.api.ErrorCode; +import org.h2.command.ddl.DefineCommand; +import org.h2.command.dml.DataChangeStatement; +import org.h2.engine.Database; +import org.h2.engine.DbObject; +import org.h2.engine.DbSettings; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; import org.h2.expression.Parameter; import org.h2.expression.ParameterInterface; +import org.h2.index.Index; +import org.h2.message.DbException; +import org.h2.result.LocalResult; import org.h2.result.ResultInterface; +import org.h2.result.ResultTarget; +import org.h2.result.ResultWithGeneratedKeys; +import org.h2.table.Column; +import org.h2.table.DataChangeDeltaTable.ResultOption; +import org.h2.table.Table; import org.h2.table.TableView; +import org.h2.util.StringUtils; +import org.h2.util.Utils; import org.h2.value.Value; -import org.h2.value.ValueNull; /** * Represents a single SQL statements. @@ -24,6 +41,42 @@ */ public class CommandContainer extends Command { + /** + * Collector of generated keys. + */ + private static final class GeneratedKeysCollector implements ResultTarget { + + private final int[] indexes; + private final LocalResult result; + + GeneratedKeysCollector(int[] indexes, LocalResult result) { + this.indexes = indexes; + this.result = result; + } + + @Override + public void limitsWereApplied() { + // Nothing to do + } + + @Override + public long getRowCount() { + // Not required + return 0L; + } + + @Override + public void addRow(Value... values) { + int length = indexes.length; + Value[] row = new Value[length]; + for (int i = 0; i < length; i++) { + row[i] = values[indexes[i]]; + } + result.addRow(row); + } + + } + private Prepared prepared; private boolean readOnlyKnown; private boolean readOnly; @@ -34,7 +87,7 @@ public class CommandContainer extends Command { * @param session the session * @param prepared prepared statement */ - static void clearCTE(Session session, Prepared prepared) { + static void clearCTE(SessionLocal session, Prepared prepared) { List cteCleanups = prepared.getCteCleanups(); if (cteCleanups != null) { clearCTE(session, cteCleanups); @@ -47,7 +100,7 @@ static void clearCTE(Session session, Prepared prepared) { * @param session the session * @param views list of view */ - static void clearCTE(Session session, List views) { + static void clearCTE(SessionLocal session, List views) { for (TableView view : views) { // check if view was previously deleted as their name is set to // null @@ -57,7 +110,7 @@ static void clearCTE(Session session, List views) { } } - CommandContainer(Session session, String sql, Prepared prepared) { + public CommandContainer(SessionLocal session, String sql, Prepared prepared) { super(session, sql); prepared.setCommand(this); this.prepared = prepared; @@ -78,38 +131,19 @@ public boolean isQuery() { return prepared.isQuery(); } - @Override - public void prepareJoinBatch() { - if (session.isJoinBatchEnabled()) { - prepareJoinBatch(prepared); - } - } - - private static void prepareJoinBatch(Prepared prepared) { - if (prepared.isQuery()) { - int type = prepared.getType(); - - if (type == CommandInterface.SELECT) { - ((Query) prepared).prepareJoinBatch(); - } else if (type == CommandInterface.EXPLAIN || - type == CommandInterface.EXPLAIN_ANALYZE) { - prepareJoinBatch(((Explain) prepared).getCommand()); - } - } - } - private void recompileIfRequired() { if (prepared.needRecompile()) { // TODO test with 'always recompile' prepared.setModificationMetaId(0); String sql = prepared.getSQL(); + ArrayList tokens = prepared.getSQLTokens(); ArrayList oldParams = prepared.getParameters(); Parser parser = new Parser(session); - prepared = parser.parse(sql); + prepared = parser.parse(sql, tokens); long mod = prepared.getModificationMetaId(); prepared.setModificationMetaId(0); ArrayList newParams = prepared.getParameters(); - for (int i = 0, size = newParams.size(); i < size; i++) { + for (int i = 0, size = Math.min(newParams.size(), oldParams.size()); i < size; i++) { Parameter old = oldParams.get(i); if (old.isValueSet()) { Value v = old.getValue(session); @@ -119,25 +153,102 @@ private void recompileIfRequired() { } prepared.prepare(); prepared.setModificationMetaId(mod); - prepareJoinBatch(); } } @Override - public int update() { + public ResultWithGeneratedKeys update(Object generatedKeysRequest) { recompileIfRequired(); setProgress(DatabaseEventListener.STATE_STATEMENT_START); start(); - session.setLastScopeIdentity(ValueNull.INSTANCE); prepared.checkParameters(); - int updateCount = prepared.update(); - prepared.trace(startTimeNanos, updateCount); + ResultWithGeneratedKeys result; + if (generatedKeysRequest != null && !Boolean.FALSE.equals(generatedKeysRequest)) { + if (prepared instanceof DataChangeStatement && prepared.getType() != CommandInterface.DELETE) { + result = executeUpdateWithGeneratedKeys((DataChangeStatement) prepared, + generatedKeysRequest); + } else { + result = new ResultWithGeneratedKeys.WithKeys(prepared.update(), new LocalResult()); + } + } else { + result = ResultWithGeneratedKeys.of(prepared.update()); + } + prepared.trace(startTimeNanos, result.getUpdateCount()); setProgress(DatabaseEventListener.STATE_STATEMENT_END); - return updateCount; + return result; + } + + private ResultWithGeneratedKeys executeUpdateWithGeneratedKeys(DataChangeStatement statement, + Object generatedKeysRequest) { + Database db = session.getDatabase(); + Table table = statement.getTable(); + ArrayList expressionColumns; + if (Boolean.TRUE.equals(generatedKeysRequest)) { + expressionColumns = Utils.newSmallArrayList(); + Column[] columns = table.getColumns(); + Index primaryKey = table.findPrimaryKey(); + for (Column column : columns) { + Expression e; + if (column.isIdentity() + || ((e = column.getEffectiveDefaultExpression()) != null && !e.isConstant()) + || (primaryKey != null && primaryKey.getColumnIndex(column) >= 0)) { + expressionColumns.add(new ExpressionColumn(db, column)); + } + } + } else if (generatedKeysRequest instanceof int[]) { + int[] indexes = (int[]) generatedKeysRequest; + Column[] columns = table.getColumns(); + int cnt = columns.length; + expressionColumns = new ArrayList<>(indexes.length); + for (int idx : indexes) { + if (idx < 1 || idx > cnt) { + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, "Index: " + idx); + } + expressionColumns.add(new ExpressionColumn(db, columns[idx - 1])); + } + } else if (generatedKeysRequest instanceof String[]) { + String[] names = (String[]) generatedKeysRequest; + expressionColumns = new ArrayList<>(names.length); + for (String name : names) { + Column column = table.findColumn(name); + if (column == null) { + DbSettings settings = db.getSettings(); + if (settings.databaseToUpper) { + column = table.findColumn(StringUtils.toUpperEnglish(name)); + } else if (settings.databaseToLower) { + column = table.findColumn(StringUtils.toLowerEnglish(name)); + } + search: if (column == null) { + for (Column c : table.getColumns()) { + if (c.getName().equalsIgnoreCase(name)) { + column = c; + break search; + } + } + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, name); + } + } + expressionColumns.add(new ExpressionColumn(db, column)); + } + } else { + throw DbException.getInternalError(); + } + int columnCount = expressionColumns.size(); + if (columnCount == 0) { + return new ResultWithGeneratedKeys.WithKeys(statement.update(), new LocalResult()); + } + int[] indexes = new int[columnCount]; + ExpressionColumn[] expressions = expressionColumns.toArray(new ExpressionColumn[0]); + for (int i = 0; i < columnCount; i++) { + indexes[i] = expressions[i].getColumn().getColumnId(); + } + LocalResult result = new LocalResult(session, expressions, columnCount, columnCount); + return new ResultWithGeneratedKeys.WithKeys( + statement.update(new GeneratedKeysCollector(indexes, result), ResultOption.FINAL), result); } @Override - public ResultInterface query(int maxrows) { + public ResultInterface query(long maxrows) { recompileIfRequired(); setProgress(DatabaseEventListener.STATE_STATEMENT_START); start(); @@ -192,4 +303,15 @@ void clearCTE() { clearCTE(session, prepared); } + @Override + public Set getDependencies() { + HashSet dependencies = new HashSet<>(); + prepared.collectDependencies(dependencies); + return dependencies; + } + + @Override + protected boolean isCurrentCommandADefineCommand() { + return prepared instanceof DefineCommand; + } } diff --git a/h2/src/main/org/h2/command/CommandInterface.java b/h2/src/main/org/h2/command/CommandInterface.java index 2cc4492cf5..fbe1223ad7 100644 --- a/h2/src/main/org/h2/command/CommandInterface.java +++ b/h2/src/main/org/h2/command/CommandInterface.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command; @@ -13,7 +13,7 @@ /** * Represents a SQL statement. */ -public interface CommandInterface { +public interface CommandInterface extends AutoCloseable { /** * The type for unknown statement. @@ -28,47 +28,47 @@ public interface CommandInterface { int ALTER_INDEX_RENAME = 1; /** - * The type of a ALTER SCHEMA RENAME statement. + * The type of an ALTER SCHEMA RENAME statement. */ int ALTER_SCHEMA_RENAME = 2; /** - * The type of a ALTER TABLE ADD CHECK statement. + * The type of an ALTER TABLE ADD CHECK statement. */ int ALTER_TABLE_ADD_CONSTRAINT_CHECK = 3; /** - * The type of a ALTER TABLE ADD UNIQUE statement. + * The type of an ALTER TABLE ADD UNIQUE statement. */ int ALTER_TABLE_ADD_CONSTRAINT_UNIQUE = 4; /** - * The type of a ALTER TABLE ADD FOREIGN KEY statement. + * The type of an ALTER TABLE ADD FOREIGN KEY statement. */ int ALTER_TABLE_ADD_CONSTRAINT_REFERENTIAL = 5; /** - * The type of a ALTER TABLE ADD PRIMARY KEY statement. + * The type of an ALTER TABLE ADD PRIMARY KEY statement. */ int ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY = 6; /** - * The type of a ALTER TABLE ADD statement. + * The type of an ALTER TABLE ADD statement. */ int ALTER_TABLE_ADD_COLUMN = 7; /** - * The type of a ALTER TABLE ALTER COLUMN SET NOT NULL statement. + * The type of an ALTER TABLE ALTER COLUMN SET NOT NULL statement. */ int ALTER_TABLE_ALTER_COLUMN_NOT_NULL = 8; /** - * The type of a ALTER TABLE ALTER COLUMN DROP NOT NULL statement. + * The type of an ALTER TABLE ALTER COLUMN DROP NOT NULL statement. */ int ALTER_TABLE_ALTER_COLUMN_DROP_NOT_NULL = 9; /** - * The type of a ALTER TABLE ALTER COLUMN SET DEFAULT and ALTER TABLE ALTER + * The type of an ALTER TABLE ALTER COLUMN SET DEFAULT and ALTER TABLE ALTER * COLUMN DROP DEFAULT statements. */ int ALTER_TABLE_ALTER_COLUMN_DEFAULT = 10; @@ -80,52 +80,52 @@ public interface CommandInterface { int ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE = 11; /** - * The type of a ALTER TABLE DROP COLUMN statement. + * The type of an ALTER TABLE DROP COLUMN statement. */ int ALTER_TABLE_DROP_COLUMN = 12; /** - * The type of a ALTER TABLE ALTER COLUMN SELECTIVITY statement. + * The type of an ALTER TABLE ALTER COLUMN SELECTIVITY statement. */ int ALTER_TABLE_ALTER_COLUMN_SELECTIVITY = 13; /** - * The type of a ALTER TABLE DROP CONSTRAINT statement. + * The type of an ALTER TABLE DROP CONSTRAINT statement. */ int ALTER_TABLE_DROP_CONSTRAINT = 14; /** - * The type of a ALTER TABLE RENAME statement. + * The type of an ALTER TABLE RENAME statement. */ int ALTER_TABLE_RENAME = 15; /** - * The type of a ALTER TABLE ALTER COLUMN RENAME statement. + * The type of an ALTER TABLE ALTER COLUMN RENAME statement. */ int ALTER_TABLE_ALTER_COLUMN_RENAME = 16; /** - * The type of a ALTER USER ADMIN statement. + * The type of an ALTER USER ADMIN statement. */ int ALTER_USER_ADMIN = 17; /** - * The type of a ALTER USER RENAME statement. + * The type of an ALTER USER RENAME statement. */ int ALTER_USER_RENAME = 18; /** - * The type of a ALTER USER SET PASSWORD statement. + * The type of an ALTER USER SET PASSWORD statement. */ int ALTER_USER_SET_PASSWORD = 19; /** - * The type of a ALTER VIEW statement. + * The type of an ALTER VIEW statement. */ int ALTER_VIEW = 20; /** - * The type of a ANALYZE statement. + * The type of an ANALYZE statement. */ int ANALYZE = 21; @@ -292,12 +292,12 @@ public interface CommandInterface { // dml operations /** - * The type of a ALTER SEQUENCE statement. + * The type of an ALTER SEQUENCE statement. */ int ALTER_SEQUENCE = 54; /** - * The type of a ALTER TABLE SET REFERENTIAL_INTEGRITY statement. + * The type of an ALTER TABLE SET REFERENTIAL_INTEGRITY statement. */ int ALTER_TABLE_SET_REFERENTIAL_INTEGRITY = 55; @@ -317,17 +317,17 @@ public interface CommandInterface { int DELETE = 58; /** - * The type of a EXECUTE statement. + * The type of an EXECUTE statement. */ int EXECUTE = 59; /** - * The type of a EXPLAIN statement. + * The type of an EXPLAIN statement. */ int EXPLAIN = 60; /** - * The type of a INSERT statement. + * The type of an INSERT statement. */ int INSERT = 61; @@ -367,7 +367,7 @@ public interface CommandInterface { int SET = 67; /** - * The type of a UPDATE statement. + * The type of an UPDATE statement. */ int UPDATE = 68; @@ -454,18 +454,17 @@ public interface CommandInterface { int SHUTDOWN_DEFRAG = 84; /** - * The type of a ALTER TABLE RENAME CONSTRAINT statement. + * The type of an ALTER TABLE RENAME CONSTRAINT statement. */ int ALTER_TABLE_RENAME_CONSTRAINT = 85; - /** - * The type of a EXPLAIN ANALYZE statement. + * The type of an EXPLAIN ANALYZE statement. */ int EXPLAIN_ANALYZE = 86; /** - * The type of a ALTER TABLE ALTER COLUMN SET INVISIBLE statement. + * The type of an ALTER TABLE ALTER COLUMN SET INVISIBLE statement. */ int ALTER_TABLE_ALTER_COLUMN_VISIBILITY = 87; @@ -480,10 +479,68 @@ public interface CommandInterface { int DROP_SYNONYM = 89; /** - * The type of a ALTER TABLE ALTER COLUMN SET ON UPDATE statement. + * The type of an ALTER TABLE ALTER COLUMN SET ON UPDATE statement. */ int ALTER_TABLE_ALTER_COLUMN_ON_UPDATE = 90; + /** + * The type of an EXECUTE IMMEDIATELY statement. + */ + int EXECUTE_IMMEDIATELY = 91; + + /** + * The type of ALTER DOMAIN ADD CONSTRAINT statement. + */ + int ALTER_DOMAIN_ADD_CONSTRAINT = 92; + + /** + * The type of ALTER DOMAIN DROP CONSTRAINT statement. + */ + int ALTER_DOMAIN_DROP_CONSTRAINT = 93; + + /** + * The type of an ALTER DOMAIN SET DEFAULT and ALTER DOMAIN DROP DEFAULT + * statements. + */ + int ALTER_DOMAIN_DEFAULT = 94; + + /** + * The type of an ALTER DOMAIN SET ON UPDATE and ALTER DOMAIN DROP ON UPDATE + * statements. + */ + int ALTER_DOMAIN_ON_UPDATE = 95; + + /** + * The type of an ALTER DOMAIN RENAME statement. + */ + int ALTER_DOMAIN_RENAME = 96; + + /** + * The type of a HELP statement. + */ + int HELP = 97; + + /** + * The type of an ALTER TABLE ALTER COLUMN DROP EXPRESSION statement. + */ + int ALTER_TABLE_ALTER_COLUMN_DROP_EXPRESSION = 98; + + /** + * The type of an ALTER TABLE ALTER COLUMN DROP IDENTITY statement. + */ + int ALTER_TABLE_ALTER_COLUMN_DROP_IDENTITY = 99; + + /** + * The type of ALTER TABLE ALTER COLUMN SET DEFAULT ON NULL and ALTER TABLE + * ALTER COLUMN DROP DEFAULT ON NULL statements. + */ + int ALTER_TABLE_ALTER_COLUMN_DEFAULT_ON_NULL = 100; + + /** + * The type of an ALTER DOMAIN RENAME CONSTRAINT statement. + */ + int ALTER_DOMAIN_RENAME_CONSTRAINT = 101; + /** * Get command type. * @@ -512,19 +569,19 @@ public interface CommandInterface { * @param scrollable if the result set must be scrollable * @return the result */ - ResultInterface executeQuery(int maxRows, boolean scrollable); + ResultInterface executeQuery(long maxRows, boolean scrollable); /** * Execute the statement * * @param generatedKeysRequest - * {@code false} if generated keys are not needed, {@code true} if - * generated keys should be configured automatically, {@code int[]} - * to specify column indices to return generated keys from, or - * {@code String[]} to specify column names to return generated keys - * from + * {@code null} or {@code false} if generated keys are not + * needed, {@code true} if generated keys should be configured + * automatically, {@code int[]} to specify column indices to + * return generated keys from, or {@code String[]} to specify + * column names to return generated keys from * - * @return the update count + * @return the update count and generated keys, if any */ ResultWithGeneratedKeys executeUpdate(Object generatedKeysRequest); @@ -536,6 +593,7 @@ public interface CommandInterface { /** * Close the statement. */ + @Override void close(); /** @@ -549,4 +607,5 @@ public interface CommandInterface { * @return the empty result */ ResultInterface getMetaData(); + } diff --git a/h2/src/main/org/h2/command/CommandList.java b/h2/src/main/org/h2/command/CommandList.java index 2ed6a70e61..f3d17e1162 100644 --- a/h2/src/main/org/h2/command/CommandList.java +++ b/h2/src/main/org/h2/command/CommandList.java @@ -1,16 +1,19 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command; import java.util.ArrayList; - -import org.h2.engine.Session; +import java.util.HashSet; +import java.util.Set; +import org.h2.engine.DbObject; +import org.h2.engine.SessionLocal; import org.h2.expression.Parameter; import org.h2.expression.ParameterInterface; import org.h2.result.ResultInterface; +import org.h2.result.ResultWithGeneratedKeys; /** * Represents a list of SQL statements. @@ -23,7 +26,7 @@ class CommandList extends Command { private String remaining; private Command remainingCommand; - CommandList(Session session, String sql, CommandContainer command, ArrayList commands, + CommandList(SessionLocal session, String sql, CommandContainer command, ArrayList commands, ArrayList parameters, String remaining) { super(session, sql); this.command = command; @@ -52,25 +55,20 @@ private void executeRemaining() { if (remainingCommand.isQuery()) { remainingCommand.query(0); } else { - remainingCommand.update(); + remainingCommand.update(null); } } } @Override - public int update() { - int updateCount = command.executeUpdate(false).getUpdateCount(); + public ResultWithGeneratedKeys update(Object generatedKeysRequest) { + ResultWithGeneratedKeys result = command.executeUpdate(null); executeRemaining(); - return updateCount; - } - - @Override - public void prepareJoinBatch() { - command.prepareJoinBatch(); + return result; } @Override - public ResultInterface query(int maxrows) { + public ResultInterface query(long maxrows) { ResultInterface result = command.query(maxrows); executeRemaining(); return result; @@ -112,4 +110,17 @@ public int getCommandType() { return command.getCommandType(); } + @Override + public Set getDependencies() { + HashSet dependencies = new HashSet<>(); + for (Prepared prepared : commands) { + prepared.collectDependencies(dependencies); + } + return dependencies; + } + + @Override + protected boolean isCurrentCommandADefineCommand() { + return command.isCurrentCommandADefineCommand(); + } } diff --git a/h2/src/main/org/h2/command/CommandRemote.java b/h2/src/main/org/h2/command/CommandRemote.java index abe2386cb9..7807ef4b7a 100644 --- a/h2/src/main/org/h2/command/CommandRemote.java +++ b/h2/src/main/org/h2/command/CommandRemote.java @@ -1,14 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command; import java.io.IOException; import java.util.ArrayList; - -import org.h2.engine.Constants; import org.h2.engine.GeneratedKeysMode; import org.h2.engine.SessionRemote; import org.h2.engine.SysProperties; @@ -22,6 +20,7 @@ import org.h2.util.Utils; import org.h2.value.Transfer; import org.h2.value.Value; +import org.h2.value.ValueLob; import org.h2.value.ValueNull; /** @@ -58,8 +57,7 @@ public CommandRemote(SessionRemote session, @Override public void stop() { - // Must never be called, because remote result is not lazy. - throw DbException.throwInternalError(); + // Ignore } private void prepare(SessionRemote s, boolean createParams) { @@ -68,14 +66,9 @@ private void prepare(SessionRemote s, boolean createParams) { try { Transfer transfer = transferList.get(i); - boolean v16 = s.getClientVersion() >= Constants.TCP_PROTOCOL_VERSION_16; - if (createParams) { - s.traceOperation(v16 ? "SESSION_PREPARE_READ_PARAMS2" - : "SESSION_PREPARE_READ_PARAMS", id); - transfer.writeInt( - v16 ? SessionRemote.SESSION_PREPARE_READ_PARAMS2 - : SessionRemote.SESSION_PREPARE_READ_PARAMS) + s.traceOperation("SESSION_PREPARE_READ_PARAMS2", id); + transfer.writeInt(SessionRemote.SESSION_PREPARE_READ_PARAMS2) .writeInt(id).writeString(sql); } else { s.traceOperation("SESSION_PREPARE", id); @@ -86,7 +79,7 @@ private void prepare(SessionRemote s, boolean createParams) { isQuery = transfer.readBoolean(); readonly = transfer.readBoolean(); - cmdType = v16 && createParams ? transfer.readInt() : UNKNOWN; + cmdType = createParams ? transfer.readInt() : UNKNOWN; int paramCount = transfer.readInt(); if (createParams) { @@ -155,7 +148,7 @@ public ResultInterface getMetaData() { } @Override - public ResultInterface executeQuery(int maxRows, boolean scrollable) { + public ResultInterface executeQuery(long maxRows, boolean scrollable) { checkParameters(); synchronized (session) { int objectId = session.getNextId(); @@ -165,8 +158,8 @@ public ResultInterface executeQuery(int maxRows, boolean scrollable) { Transfer transfer = transferList.get(i); try { session.traceOperation("COMMAND_EXECUTE_QUERY", id); - transfer.writeInt(SessionRemote.COMMAND_EXECUTE_QUERY). - writeInt(id).writeInt(objectId).writeInt(maxRows); + transfer.writeInt(SessionRemote.COMMAND_EXECUTE_QUERY).writeInt(id).writeInt(objectId); + transfer.writeRowCount(maxRows); int fetch; if (session.isClustered() || scrollable) { fetch = Integer.MAX_VALUE; @@ -198,11 +191,11 @@ public ResultInterface executeQuery(int maxRows, boolean scrollable) { @Override public ResultWithGeneratedKeys executeUpdate(Object generatedKeysRequest) { checkParameters(); - boolean supportsGeneratedKeys = session.isSupportsGeneratedKeys(); - boolean readGeneratedKeys = supportsGeneratedKeys && !Boolean.FALSE.equals(generatedKeysRequest); + int generatedKeysMode = GeneratedKeysMode.valueOf(generatedKeysRequest); + boolean readGeneratedKeys = generatedKeysMode != GeneratedKeysMode.NONE; int objectId = readGeneratedKeys ? session.getNextId() : 0; synchronized (session) { - int updateCount = 0; + long updateCount = 0L; ResultRemote generatedKeys = null; boolean autoCommit = false; for (int i = 0, count = 0; i < transferList.size(); i++) { @@ -212,30 +205,27 @@ public ResultWithGeneratedKeys executeUpdate(Object generatedKeysRequest) { session.traceOperation("COMMAND_EXECUTE_UPDATE", id); transfer.writeInt(SessionRemote.COMMAND_EXECUTE_UPDATE).writeInt(id); sendParameters(transfer); - if (supportsGeneratedKeys) { - int mode = GeneratedKeysMode.valueOf(generatedKeysRequest); - transfer.writeInt(mode); - switch (mode) { - case GeneratedKeysMode.COLUMN_NUMBERS: { - int[] keys = (int[]) generatedKeysRequest; - transfer.writeInt(keys.length); - for (int key : keys) { - transfer.writeInt(key); - } - break; - } - case GeneratedKeysMode.COLUMN_NAMES: { - String[] keys = (String[]) generatedKeysRequest; - transfer.writeInt(keys.length); - for (String key : keys) { - transfer.writeString(key); - } - break; + transfer.writeInt(generatedKeysMode); + switch (generatedKeysMode) { + case GeneratedKeysMode.COLUMN_NUMBERS: { + int[] keys = (int[]) generatedKeysRequest; + transfer.writeInt(keys.length); + for (int key : keys) { + transfer.writeInt(key); } + break; + } + case GeneratedKeysMode.COLUMN_NAMES: { + String[] keys = (String[]) generatedKeysRequest; + transfer.writeInt(keys.length); + for (String key : keys) { + transfer.writeString(key); } + break; + } } session.done(transfer); - updateCount = transfer.readInt(); + updateCount = transfer.readRowCount(); autoCommit = transfer.readBoolean(); if (readGeneratedKeys) { int columnCount = transfer.readInt(); @@ -300,8 +290,8 @@ public void close() { try { for (ParameterInterface p : parameters) { Value v = p.getParamValue(); - if (v != null) { - v.remove(); + if (v instanceof ValueLob) { + ((ValueLob) v).remove(); } } } catch (DbException e) { diff --git a/h2/src/main/org/h2/command/Parser.java b/h2/src/main/org/h2/command/Parser.java index 4c0121735a..6aa8a51d37 100644 --- a/h2/src/main/org/h2/command/Parser.java +++ b/h2/src/main/org/h2/command/Parser.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group * * Nicolas Fortin, Atelier SIG, IRSTV FR CNRS 24888 @@ -8,77 +8,150 @@ */ package org.h2.command; +import static org.h2.command.Token.ASTERISK; +import static org.h2.command.Token.AT; +import static org.h2.command.Token.BIGGER; +import static org.h2.command.Token.BIGGER_EQUAL; +import static org.h2.command.Token.CLOSE_BRACE; +import static org.h2.command.Token.CLOSE_BRACKET; +import static org.h2.command.Token.CLOSE_PAREN; +import static org.h2.command.Token.COLON; +import static org.h2.command.Token.COLON_COLON; +import static org.h2.command.Token.COLON_EQ; +import static org.h2.command.Token.COMMA; +import static org.h2.command.Token.CONCATENATION; +import static org.h2.command.Token.DOT; +import static org.h2.command.Token.END_OF_INPUT; +import static org.h2.command.Token.EQUAL; +import static org.h2.command.Token.LITERAL; +import static org.h2.command.Token.MINUS_SIGN; +import static org.h2.command.Token.NOT_EQUAL; +import static org.h2.command.Token.NOT_TILDE; +import static org.h2.command.Token.OPEN_BRACE; +import static org.h2.command.Token.OPEN_BRACKET; +import static org.h2.command.Token.OPEN_PAREN; +import static org.h2.command.Token.PARAMETER; +import static org.h2.command.Token.PERCENT; +import static org.h2.command.Token.PLUS_SIGN; +import static org.h2.command.Token.SEMICOLON; +import static org.h2.command.Token.SLASH; +import static org.h2.command.Token.SMALLER; +import static org.h2.command.Token.SMALLER_EQUAL; +import static org.h2.command.Token.SPATIAL_INTERSECTS; +import static org.h2.command.Token.TILDE; +import static org.h2.command.Token.TOKENS; import static org.h2.util.ParserUtil.ALL; +import static org.h2.util.ParserUtil.AND; +import static org.h2.util.ParserUtil.ANY; import static org.h2.util.ParserUtil.ARRAY; +import static org.h2.util.ParserUtil.AS; +import static org.h2.util.ParserUtil.ASYMMETRIC; +import static org.h2.util.ParserUtil.AUTHORIZATION; +import static org.h2.util.ParserUtil.BETWEEN; import static org.h2.util.ParserUtil.CASE; +import static org.h2.util.ParserUtil.CAST; import static org.h2.util.ParserUtil.CHECK; import static org.h2.util.ParserUtil.CONSTRAINT; import static org.h2.util.ParserUtil.CROSS; +import static org.h2.util.ParserUtil.CURRENT_CATALOG; import static org.h2.util.ParserUtil.CURRENT_DATE; +import static org.h2.util.ParserUtil.CURRENT_PATH; +import static org.h2.util.ParserUtil.CURRENT_ROLE; +import static org.h2.util.ParserUtil.CURRENT_SCHEMA; import static org.h2.util.ParserUtil.CURRENT_TIME; import static org.h2.util.ParserUtil.CURRENT_TIMESTAMP; import static org.h2.util.ParserUtil.CURRENT_USER; +import static org.h2.util.ParserUtil.DAY; +import static org.h2.util.ParserUtil.DEFAULT; import static org.h2.util.ParserUtil.DISTINCT; +import static org.h2.util.ParserUtil.ELSE; +import static org.h2.util.ParserUtil.END; import static org.h2.util.ParserUtil.EXCEPT; import static org.h2.util.ParserUtil.EXISTS; import static org.h2.util.ParserUtil.FALSE; import static org.h2.util.ParserUtil.FETCH; +import static org.h2.util.ParserUtil.FIRST_KEYWORD; import static org.h2.util.ParserUtil.FOR; import static org.h2.util.ParserUtil.FOREIGN; import static org.h2.util.ParserUtil.FROM; import static org.h2.util.ParserUtil.FULL; import static org.h2.util.ParserUtil.GROUP; import static org.h2.util.ParserUtil.HAVING; +import static org.h2.util.ParserUtil.HOUR; import static org.h2.util.ParserUtil.IDENTIFIER; import static org.h2.util.ParserUtil.IF; +import static org.h2.util.ParserUtil.IN; import static org.h2.util.ParserUtil.INNER; import static org.h2.util.ParserUtil.INTERSECT; -import static org.h2.util.ParserUtil.INTERSECTS; import static org.h2.util.ParserUtil.INTERVAL; import static org.h2.util.ParserUtil.IS; import static org.h2.util.ParserUtil.JOIN; +import static org.h2.util.ParserUtil.KEY; +import static org.h2.util.ParserUtil.LAST_KEYWORD; +import static org.h2.util.ParserUtil.LEFT; import static org.h2.util.ParserUtil.LIKE; import static org.h2.util.ParserUtil.LIMIT; import static org.h2.util.ParserUtil.LOCALTIME; import static org.h2.util.ParserUtil.LOCALTIMESTAMP; import static org.h2.util.ParserUtil.MINUS; +import static org.h2.util.ParserUtil.MINUTE; +import static org.h2.util.ParserUtil.MONTH; import static org.h2.util.ParserUtil.NATURAL; import static org.h2.util.ParserUtil.NOT; import static org.h2.util.ParserUtil.NULL; import static org.h2.util.ParserUtil.OFFSET; import static org.h2.util.ParserUtil.ON; +import static org.h2.util.ParserUtil.OR; import static org.h2.util.ParserUtil.ORDER; import static org.h2.util.ParserUtil.PRIMARY; import static org.h2.util.ParserUtil.QUALIFY; +import static org.h2.util.ParserUtil.RIGHT; import static org.h2.util.ParserUtil.ROW; import static org.h2.util.ParserUtil.ROWNUM; +import static org.h2.util.ParserUtil.SECOND; import static org.h2.util.ParserUtil.SELECT; +import static org.h2.util.ParserUtil.SESSION_USER; +import static org.h2.util.ParserUtil.SET; +import static org.h2.util.ParserUtil.SOME; +import static org.h2.util.ParserUtil.SYMMETRIC; +import static org.h2.util.ParserUtil.SYSTEM_USER; import static org.h2.util.ParserUtil.TABLE; +import static org.h2.util.ParserUtil.TO; import static org.h2.util.ParserUtil.TRUE; import static org.h2.util.ParserUtil.UNION; import static org.h2.util.ParserUtil.UNIQUE; +import static org.h2.util.ParserUtil.UNKNOWN; +import static org.h2.util.ParserUtil.USER; +import static org.h2.util.ParserUtil.USING; +import static org.h2.util.ParserUtil.VALUE; import static org.h2.util.ParserUtil.VALUES; +import static org.h2.util.ParserUtil.WHEN; import static org.h2.util.ParserUtil.WHERE; import static org.h2.util.ParserUtil.WINDOW; import static org.h2.util.ParserUtil.WITH; +import static org.h2.util.ParserUtil.YEAR; import static org.h2.util.ParserUtil._ROWID_; -import java.math.BigDecimal; -import java.math.BigInteger; import java.nio.charset.Charset; import java.text.Collator; import java.util.ArrayList; import java.util.Arrays; +import java.util.BitSet; import java.util.Collection; import java.util.Collections; -import java.util.Comparator; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; - +import java.util.TreeSet; import org.h2.api.ErrorCode; import org.h2.api.IntervalQualifier; import org.h2.api.Trigger; +import org.h2.command.ddl.AlterDomainAddConstraint; +import org.h2.command.ddl.AlterDomainDropConstraint; +import org.h2.command.ddl.AlterDomainExpressions; +import org.h2.command.ddl.AlterDomainRename; +import org.h2.command.ddl.AlterDomainRenameConstraint; import org.h2.command.ddl.AlterIndexRename; import org.h2.command.ddl.AlterSchemaRename; import org.h2.command.ddl.AlterSequence; @@ -124,7 +197,6 @@ import org.h2.command.ddl.DropView; import org.h2.command.ddl.GrantRevoke; import org.h2.command.ddl.PrepareProcedure; -import org.h2.command.ddl.SchemaCommand; import org.h2.command.ddl.SequenceOptions; import org.h2.command.ddl.SetComment; import org.h2.command.ddl.TruncateTable; @@ -132,47 +204,65 @@ import org.h2.command.dml.BackupCommand; import org.h2.command.dml.Call; import org.h2.command.dml.CommandWithValues; +import org.h2.command.dml.DataChangeStatement; import org.h2.command.dml.Delete; +import org.h2.command.dml.ExecuteImmediate; import org.h2.command.dml.ExecuteProcedure; import org.h2.command.dml.Explain; +import org.h2.command.dml.Help; import org.h2.command.dml.Insert; import org.h2.command.dml.Merge; import org.h2.command.dml.MergeUsing; import org.h2.command.dml.NoOperation; -import org.h2.command.dml.Query; -import org.h2.command.dml.Replace; import org.h2.command.dml.RunScriptCommand; import org.h2.command.dml.ScriptCommand; -import org.h2.command.dml.Select; -import org.h2.command.dml.SelectOrderBy; -import org.h2.command.dml.SelectUnion; import org.h2.command.dml.Set; +import org.h2.command.dml.SetClauseList; +import org.h2.command.dml.SetSessionCharacteristics; import org.h2.command.dml.SetTypes; import org.h2.command.dml.TransactionCommand; import org.h2.command.dml.Update; +import org.h2.command.query.Query; +import org.h2.command.query.QueryOrderBy; +import org.h2.command.query.Select; +import org.h2.command.query.SelectUnion; +import org.h2.command.query.TableValueConstructor; import org.h2.constraint.ConstraintActionType; +import org.h2.engine.ConnectionInfo; import org.h2.engine.Constants; import org.h2.engine.Database; import org.h2.engine.DbObject; -import org.h2.engine.Domain; -import org.h2.engine.FunctionAlias; +import org.h2.engine.DbSettings; +import org.h2.engine.IsolationLevel; import org.h2.engine.Mode; import org.h2.engine.Mode.ModeEnum; import org.h2.engine.Procedure; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.engine.User; -import org.h2.engine.UserAggregate; import org.h2.expression.Alias; +import org.h2.expression.ArrayConstructorByQuery; +import org.h2.expression.ArrayElementReference; import org.h2.expression.BinaryOperation; import org.h2.expression.BinaryOperation.OpType; +import org.h2.expression.ConcatenationOperation; +import org.h2.expression.DomainValueExpression; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.expression.ExpressionList; +import org.h2.expression.ExpressionWithFlags; +import org.h2.expression.ExpressionWithVariableParameters; +import org.h2.expression.FieldReference; +import org.h2.expression.Format; +import org.h2.expression.Format.FormatEnum; import org.h2.expression.Parameter; import org.h2.expression.Rownum; +import org.h2.expression.SearchedCase; import org.h2.expression.SequenceValue; +import org.h2.expression.SimpleCase; import org.h2.expression.Subquery; +import org.h2.expression.TimeZoneOperation; +import org.h2.expression.TypedValueExpression; import org.h2.expression.UnaryOperation; import org.h2.expression.ValueExpression; import org.h2.expression.Variable; @@ -181,6 +271,7 @@ import org.h2.expression.aggregate.Aggregate; import org.h2.expression.aggregate.AggregateType; import org.h2.expression.aggregate.JavaAggregate; +import org.h2.expression.aggregate.ListaggArguments; import org.h2.expression.analysis.DataAnalysisOperation; import org.h2.expression.analysis.Window; import org.h2.expression.analysis.WindowFrame; @@ -190,58 +281,128 @@ import org.h2.expression.analysis.WindowFrameUnits; import org.h2.expression.analysis.WindowFunction; import org.h2.expression.analysis.WindowFunctionType; +import org.h2.expression.condition.BetweenPredicate; +import org.h2.expression.condition.BooleanTest; import org.h2.expression.condition.CompareLike; +import org.h2.expression.condition.CompareLike.LikeType; import org.h2.expression.condition.Comparison; import org.h2.expression.condition.ConditionAndOr; -import org.h2.expression.condition.ConditionExists; +import org.h2.expression.condition.ConditionAndOrN; import org.h2.expression.condition.ConditionIn; import org.h2.expression.condition.ConditionInParameter; -import org.h2.expression.condition.ConditionInSelect; +import org.h2.expression.condition.ConditionInQuery; +import org.h2.expression.condition.ConditionLocalAndGlobal; import org.h2.expression.condition.ConditionNot; -import org.h2.expression.function.Function; -import org.h2.expression.function.FunctionCall; +import org.h2.expression.condition.ExistsPredicate; +import org.h2.expression.condition.IsJsonPredicate; +import org.h2.expression.condition.NullPredicate; +import org.h2.expression.condition.TypePredicate; +import org.h2.expression.condition.UniquePredicate; +import org.h2.expression.function.ArrayFunction; +import org.h2.expression.function.BitFunction; +import org.h2.expression.function.BuiltinFunctions; +import org.h2.expression.function.CSVWriteFunction; +import org.h2.expression.function.CardinalityExpression; +import org.h2.expression.function.CastSpecification; +import org.h2.expression.function.CoalesceFunction; +import org.h2.expression.function.CompatibilitySequenceValueFunction; +import org.h2.expression.function.CompressFunction; +import org.h2.expression.function.ConcatFunction; +import org.h2.expression.function.CryptFunction; +import org.h2.expression.function.CurrentDateTimeValueFunction; +import org.h2.expression.function.CurrentGeneralValueSpecification; +import org.h2.expression.function.DBObjectFunction; +import org.h2.expression.function.DataTypeSQLFunction; +import org.h2.expression.function.DateTimeFormatFunction; +import org.h2.expression.function.DateTimeFunction; +import org.h2.expression.function.DayMonthNameFunction; +import org.h2.expression.function.FileFunction; +import org.h2.expression.function.HashFunction; import org.h2.expression.function.JavaFunction; -import org.h2.expression.function.TableFunction; +import org.h2.expression.function.JsonConstructorFunction; +import org.h2.expression.function.LengthFunction; +import org.h2.expression.function.MathFunction; +import org.h2.expression.function.MathFunction1; +import org.h2.expression.function.MathFunction2; +import org.h2.expression.function.NullIfFunction; +import org.h2.expression.function.RandFunction; +import org.h2.expression.function.RegexpFunction; +import org.h2.expression.function.SessionControlFunction; +import org.h2.expression.function.SetFunction; +import org.h2.expression.function.SignalFunction; +import org.h2.expression.function.SoundexFunction; +import org.h2.expression.function.StringFunction; +import org.h2.expression.function.StringFunction1; +import org.h2.expression.function.StringFunction2; +import org.h2.expression.function.SubstringFunction; +import org.h2.expression.function.SysInfoFunction; +import org.h2.expression.function.TableInfoFunction; +import org.h2.expression.function.ToCharFunction; +import org.h2.expression.function.TrimFunction; +import org.h2.expression.function.TruncateValueFunction; +import org.h2.expression.function.XMLFunction; +import org.h2.expression.function.table.ArrayTableFunction; +import org.h2.expression.function.table.CSVReadFunction; +import org.h2.expression.function.table.JavaTableFunction; +import org.h2.expression.function.table.LinkSchemaFunction; +import org.h2.expression.function.table.TableFunction; import org.h2.index.Index; import org.h2.message.DbException; +import org.h2.mode.FunctionsPostgreSQL; +import org.h2.mode.ModeFunction; +import org.h2.mode.OnDuplicateKeyValues; +import org.h2.mode.Regclass; import org.h2.result.SortOrder; +import org.h2.schema.Domain; +import org.h2.schema.FunctionAlias; import org.h2.schema.Schema; import org.h2.schema.Sequence; +import org.h2.schema.UserAggregate; +import org.h2.schema.UserDefinedFunction; import org.h2.table.Column; +import org.h2.table.DataChangeDeltaTable; +import org.h2.table.DataChangeDeltaTable.ResultOption; +import org.h2.table.DualTable; import org.h2.table.FunctionTable; import org.h2.table.IndexColumn; import org.h2.table.IndexHints; import org.h2.table.RangeTable; import org.h2.table.Table; import org.h2.table.TableFilter; -import org.h2.table.TableFilter.TableFilterVisitor; import org.h2.table.TableView; +import org.h2.util.HasSQL; import org.h2.util.IntervalUtils; import org.h2.util.ParserUtil; import org.h2.util.StringUtils; import org.h2.util.Utils; import org.h2.util.geometry.EWKTUtils; +import org.h2.util.json.JSONItemType; +import org.h2.util.json.JsonConstructorUtils; import org.h2.value.CompareMode; import org.h2.value.DataType; -import org.h2.value.ExtTypeInfo; import org.h2.value.ExtTypeInfoEnum; import org.h2.value.ExtTypeInfoGeometry; +import org.h2.value.ExtTypeInfoNumeric; +import org.h2.value.ExtTypeInfoRow; import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueArray; -import org.h2.value.ValueBoolean; -import org.h2.value.ValueBytes; +import org.h2.value.ValueBigint; import org.h2.value.ValueDate; -import org.h2.value.ValueDecimal; -import org.h2.value.ValueInt; +import org.h2.value.ValueDouble; +import org.h2.value.ValueGeometry; +import org.h2.value.ValueInteger; import org.h2.value.ValueInterval; -import org.h2.value.ValueLong; +import org.h2.value.ValueJson; import org.h2.value.ValueNull; +import org.h2.value.ValueNumeric; import org.h2.value.ValueRow; -import org.h2.value.ValueString; import org.h2.value.ValueTime; +import org.h2.value.ValueTimeTimeZone; import org.h2.value.ValueTimestamp; import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueUuid; +import org.h2.value.ValueVarchar; /** * The parser is used to convert a SQL statement string to an command object. @@ -256,361 +417,8 @@ public class Parser { "WITH statement supports only SELECT, TABLE, VALUES, " + "CREATE TABLE, INSERT, UPDATE, MERGE or DELETE statements"; - // used during the tokenizer phase - private static final int CHAR_END = 1, CHAR_VALUE = 2, CHAR_QUOTED = 3; - private static final int CHAR_NAME = 4, CHAR_SPECIAL_1 = 5, - CHAR_SPECIAL_2 = 6; - private static final int CHAR_STRING = 7, CHAR_DOT = 8, - CHAR_DOLLAR_QUOTED_STRING = 9; - - // this are token types, see also types in ParserUtil - - /** - * Token with parameter. - */ - private static final int PARAMETER = WITH + 1; - - /** - * End of input. - */ - private static final int END = PARAMETER + 1; - - /** - * Token with value. - */ - private static final int VALUE = END + 1; - - /** - * The token "=". - */ - private static final int EQUAL = VALUE + 1; - - /** - * The token ">=". - */ - private static final int BIGGER_EQUAL = EQUAL + 1; - - /** - * The token ">". - */ - private static final int BIGGER = BIGGER_EQUAL + 1; - - /** - * The token "<". - */ - private static final int SMALLER = BIGGER + 1; - - /** - * The token "<=". - */ - private static final int SMALLER_EQUAL = SMALLER + 1; - - /** - * The token "<>" or "!=". - */ - private static final int NOT_EQUAL = SMALLER_EQUAL + 1; - - /** - * The token "@". - */ - private static final int AT = NOT_EQUAL + 1; - - /** - * The token "-". - */ - private static final int MINUS_SIGN = AT + 1; - - /** - * The token "+". - */ - private static final int PLUS_SIGN = MINUS_SIGN + 1; - - /** - * The token "||". - */ - private static final int STRING_CONCAT = PLUS_SIGN + 1; - - /** - * The token "(". - */ - private static final int OPEN_PAREN = STRING_CONCAT + 1; - - /** - * The token ")". - */ - private static final int CLOSE_PAREN = OPEN_PAREN + 1; - - /** - * The token "&&". - */ - private static final int SPATIAL_INTERSECTS = CLOSE_PAREN + 1; - - /** - * The token "*". - */ - private static final int ASTERISK = SPATIAL_INTERSECTS + 1; - - /** - * The token ",". - */ - private static final int COMMA = ASTERISK + 1; - - /** - * The token ".". - */ - private static final int DOT = COMMA + 1; - - /** - * The token "{". - */ - private static final int OPEN_BRACE = DOT + 1; - - /** - * The token "}". - */ - private static final int CLOSE_BRACE = OPEN_BRACE + 1; - - /** - * The token "/". - */ - private static final int SLASH = CLOSE_BRACE + 1; - - /** - * The token "%". - */ - private static final int PERCENT = SLASH + 1; - - /** - * The token ";". - */ - private static final int SEMICOLON = PERCENT + 1; - - /** - * The token ":". - */ - private static final int COLON = SEMICOLON + 1; - - /** - * The token "[". - */ - private static final int OPEN_BRACKET = COLON + 1; - - /** - * The token "]". - */ - private static final int CLOSE_BRACKET = OPEN_BRACKET + 1; - - /** - * The token "~". - */ - private static final int TILDE = CLOSE_BRACKET + 1; - - /** - * The token "::". - */ - private static final int COLON_COLON = TILDE + 1; - - /** - * The token ":=". - */ - private static final int COLON_EQ = COLON_COLON + 1; - - /** - * The token "!~". - */ - private static final int NOT_TILDE = COLON_EQ + 1; - - private static final String[] TOKENS = { - // Unused - null, - // KEYWORD - null, - // IDENTIFIER - null, - // ALL - "ALL", - // ARRAY - "ARRAY", - // CASE - "CASE", - // CHECK - "CHECK", - // CONSTRAINT - "CONSTRAINT", - // CROSS - "CROSS", - // CURRENT_DATE - "CURRENT_DATE", - // CURRENT_TIME - "CURRENT_TIME", - // CURRENT_TIMESTAMP - "CURRENT_TIMESTAMP", - // CURRENT_USER - "CURRENT_USER", - // DISTINCT - "DISTINCT", - // EXCEPT - "EXCEPT", - // EXISTS - "EXISTS", - // FALSE - "FALSE", - // FETCH - "FETCH", - // FOR - "FOR", - // FOREIGN - "FOREIGN", - // FROM - "FROM", - // FULL - "FULL", - // GROUP - "GROUP", - // HAVING - "HAVING", - // IF - "IF", - // INNER - "INNER", - // INTERSECT - "INTERSECT", - // INTERSECTS - "INTERSECTS", - // INTERVAL - "INTERVAL", - // IS - "IS", - // JOIN - "JOIN", - // LIKE - "LIKE", - // LIMIT - "LIMIT", - // LOCALTIME - "LOCALTIME", - // LOCALTIMESTAMP - "LOCALTIMESTAMP", - // MINUS - "MINUS", - // NATURAL - "NATURAL", - // NOT - "NOT", - // NULL - "NULL", - // OFFSET - "OFFSET", - // ON - "ON", - // ORDER - "ORDER", - // PRIMARY - "PRIMARY", - // QUALIFY - "QUALIFY", - // ROW - "ROW", - // _ROWID_ - "_ROWID_", - // ROWNUM - "ROWNUM", - // SELECT - "SELECT", - // TABLE - "TABLE", - // TRUE - "TRUE", - // UNION - "UNION", - // UNIQUE - "UNIQUE", - // VALUES - "VALUES", - // WHERE - "WHERE", - // WINDOW - "WINDOW", - // WITH - "WITH", - // PARAMETER - "?", - // END - null, - // VALUE - null, - // EQUAL - "=", - // BIGGER_EQUAL - ">=", - // BIGGER - ">", - // SMALLER - "<", - // SMALLER_EQUAL - "<=", - // NOT_EQUAL - "<>", - // AT - "@", - // MINUS_SIGN - "-", - // PLUS_SIGN - "+", - // STRING_CONCAT - "||", - // OPEN_PAREN - "(", - // CLOSE_PAREN - ")", - // SPATIAL_INTERSECTS - "&&", - // ASTERISK - "*", - // COMMA - ",", - // DOT - ".", - // OPEN_BRACE - "{", - // CLOSE_BRACE - "}", - // SLASH - "/", - // PERCENT - "%", - // SEMICOLON - ";", - // COLON - ":", - // OPEN_BRACKET - "[", - // CLOSE_BRACKET - "]", - // TILDE - "~", - // COLON_COLON - "::", - // COLON_EQ - ":=", - // NOT_TILDE - "!~", - // End - }; - - private static final Comparator TABLE_FILTER_COMPARATOR = - new Comparator() { - @Override - public int compare(TableFilter o1, TableFilter o2) { - if (o1 == o2) - return 0; - assert o1.getOrderInFrom() != o2.getOrderInFrom(); - return o1.getOrderInFrom() > o2.getOrderInFrom() ? 1 : -1; - } - }; - private final Database database; - private final Session session; + private final SessionLocal session; /** * @see org.h2.engine.DbSettings#databaseToLower @@ -621,42 +429,102 @@ public int compare(TableFilter o1, TableFilter o2) { */ private final boolean identifiersToUpper; - /** indicates character-type for each char in sqlCommand */ - private int[] characterTypes; + /** + * @see org.h2.engine.SessionLocal#isVariableBinary() + */ + private final boolean variableBinary; + + private final BitSet nonKeywords; + + ArrayList tokens; + int tokenIndex; + Token token; private int currentTokenType; private String currentToken; - private boolean currentTokenQuoted; - private Value currentValue; - private String originalSQL; - /** copy of originalSQL, with comments blanked out */ private String sqlCommand; - /** cached array if chars from sqlCommand */ - private char[] sqlCommandChars; - /** index into sqlCommand of previous token */ - private int lastParseIndex; - /** index into sqlCommand of current token */ - private int parseIndex; private CreateView createView; private Prepared currentPrepared; private Select currentSelect; + private List cteCleanups; private ArrayList parameters; - private ArrayList indexedParameterList; private ArrayList suppliedParameters; - private ArrayList suppliedParameterList; private String schemaName; private ArrayList expectedList; private boolean rightsChecked; private boolean recompileAlways; private boolean literalsChecked; private int orderInFrom; + private boolean parseDomainConstraint; + + /** + * Parses the specified collection of non-keywords. + * + * @param nonKeywords array of non-keywords in upper case + * @return bit set of non-keywords, or {@code null} + */ + public static BitSet parseNonKeywords(String[] nonKeywords) { + if (nonKeywords.length == 0) { + return null; + } + BitSet set = new BitSet(); + for (String nonKeyword : nonKeywords) { + int index = Arrays.binarySearch(TOKENS, FIRST_KEYWORD, LAST_KEYWORD + 1, nonKeyword); + if (index >= 0) { + set.set(index); + } + } + return set.isEmpty() ? null : set; + } + + /** + * Formats a comma-separated list of keywords. + * + * @param nonKeywords bit set of non-keywords, or {@code null} + * @return comma-separated list of non-keywords + */ + public static String formatNonKeywords(BitSet nonKeywords) { + if (nonKeywords == null || nonKeywords.isEmpty()) { + return ""; + } + StringBuilder builder = new StringBuilder(); + for (int i = -1; (i = nonKeywords.nextSetBit(i + 1)) >= 0;) { + if (i >= FIRST_KEYWORD && i <= LAST_KEYWORD) { + if (builder.length() > 0) { + builder.append(','); + } + builder.append(TOKENS[i]); + } + } + return builder.toString(); + } - public Parser(Session session) { + /** + * Creates a new instance of parser. + * + * @param session the session + */ + public Parser(SessionLocal session) { this.database = session.getDatabase(); - this.identifiersToLower = database.getSettings().databaseToLower; - this.identifiersToUpper = database.getSettings().databaseToUpper; + DbSettings settings = database.getSettings(); + this.identifiersToLower = settings.databaseToLower; + this.identifiersToUpper = settings.databaseToUpper; + this.variableBinary = session.isVariableBinary(); + this.nonKeywords = session.getNonKeywords(); this.session = session; } + /** + * Creates a new instance of parser for special use cases. + */ + public Parser() { + database = null; + identifiersToLower = false; + identifiersToUpper = false; + variableBinary = false; + nonKeywords = null; + session = null; + } + /** * Parse the statement and prepare it for execution. * @@ -664,9 +532,9 @@ public Parser(Session session) { * @return the prepared object */ public Prepared prepare(String sql) { - Prepared p = parse(sql); + Prepared p = parse(sql, null); p.prepare(); - if (currentTokenType != END) { + if (currentTokenType != END_OF_INPUT) { throw getSyntaxError(); } return p; @@ -680,8 +548,8 @@ public Prepared prepare(String sql) { */ public Command prepareCommand(String sql) { try { - Prepared p = parse(sql); - if (currentTokenType != SEMICOLON && currentTokenType != END) { + Prepared p = parse(sql, null); + if (currentTokenType != SEMICOLON && currentTokenType != END_OF_INPUT) { addExpected(SEMICOLON); throw getSyntaxError(); } @@ -691,55 +559,58 @@ public Command prepareCommand(String sql) { CommandContainer.clearCTE(session, p); throw t; } - if (parseIndex < sql.length()) { - sql = sql.substring(0, parseIndex); + int sqlIndex = token.start(); + if (sqlIndex < sql.length()) { + sql = sql.substring(0, sqlIndex); } CommandContainer c = new CommandContainer(session, sql, p); - if (currentTokenType == SEMICOLON) { - String remaining = originalSQL.substring(parseIndex); - if (!StringUtils.isWhitespaceOrEmpty(remaining)) { - return prepareCommandList(c, sql, remaining); - } + while (currentTokenType == SEMICOLON) { + read(); + } + if (currentTokenType != END_OF_INPUT) { + int offset = token.start(); + return prepareCommandList(c, p, sql, sqlCommand.substring(offset), getRemainingTokens(offset)); } return c; } catch (DbException e) { - throw e.addSQL(originalSQL); + throw e.addSQL(sqlCommand); } } - private CommandList prepareCommandList(CommandContainer command, String sql, String remaining) { + private CommandList prepareCommandList(CommandContainer command, Prepared p, String sql, String remainingSql, + ArrayList remainingTokens) { try { ArrayList list = Utils.newSmallArrayList(); - boolean stop = false; - do { - if (stop) { - return new CommandList(session, sql, command, list, parameters, remaining); + for (;;) { + if (p instanceof DefineCommand) { + // Next commands may depend on results of this command. + return new CommandList(session, sql, command, list, parameters, remainingSql); } suppliedParameters = parameters; - suppliedParameterList = indexedParameterList; - Prepared p; try { - p = parse(remaining); + p = parse(remainingSql, remainingTokens); } catch (DbException ex) { // This command may depend on results of previous commands. if (ex.getErrorCode() == ErrorCode.CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS) { throw ex; } - return new CommandList(session, sql, command, list, parameters, remaining); - } - if (p instanceof DefineCommand) { - // Next commands may depend on results of this command. - stop = true; + return new CommandList(session, sql, command, list, parameters, remainingSql); } list.add(p); - if (currentTokenType == END) { - break; - } - if (currentTokenType != SEMICOLON) { + if (currentTokenType != SEMICOLON && currentTokenType != END_OF_INPUT) { addExpected(SEMICOLON); throw getSyntaxError(); } - } while (!StringUtils.isWhitespaceOrEmpty(remaining = originalSQL.substring(parseIndex))); + while (currentTokenType == SEMICOLON) { + read(); + } + if (currentTokenType == END_OF_INPUT) { + break; + } + int offset = token.start(); + remainingSql = sqlCommand.substring(offset); + remainingTokens = getRemainingTokens(offset); + } return new CommandList(session, sql, command, list, parameters, null); } catch (Throwable t) { command.clearCTE(); @@ -747,13 +618,26 @@ private CommandList prepareCommandList(CommandContainer command, String sql, Str } } + private ArrayList getRemainingTokens(int offset) { + List subList = tokens.subList(tokenIndex, tokens.size()); + ArrayList remainingTokens = new ArrayList<>(subList); + subList.clear(); + tokens.add(new Token.EndOfInputToken(offset)); + for (Token token : remainingTokens) { + token.subtractFromStart(offset); + } + return remainingTokens; + } + /** * Parse the statement, but don't prepare it for execution. * * @param sql the SQL statement to parse + * @param tokens tokens, or null * @return the prepared object */ - Prepared parse(String sql) { + Prepared parse(String sql, ArrayList tokens) { + initialize(sql, tokens, false); Prepared p; try { // first, try the fast variant @@ -761,6 +645,7 @@ Prepared parse(String sql) { } catch (DbException e) { if (e.getErrorCode() == ErrorCode.SYNTAX_ERROR_1) { // now, get the detailed exception + resetTokenIndex(); p = parse(sql, true); } else { throw e.addSQL(sql); @@ -772,57 +657,74 @@ Prepared parse(String sql) { } private Prepared parse(String sql, boolean withExpectedList) { - initialize(sql); if (withExpectedList) { expectedList = new ArrayList<>(); } else { expectedList = null; } - parameters = suppliedParameters != null ? suppliedParameters : Utils.newSmallArrayList(); - indexedParameterList = suppliedParameterList; + parameters = suppliedParameters != null ? suppliedParameters : Utils.newSmallArrayList(); currentSelect = null; currentPrepared = null; createView = null; + cteCleanups = null; recompileAlways = false; read(); - return parsePrepared(); + Prepared p; + try { + p = parsePrepared(); + p.setCteCleanups(cteCleanups); + } catch (Throwable t) { + if (cteCleanups != null) { + CommandContainer.clearCTE(session, cteCleanups); + } + throw t; + } + return p; } private Prepared parsePrepared() { - int start = lastParseIndex; + int start = tokenIndex; Prepared c = null; switch (currentTokenType) { - case END: + case END_OF_INPUT: case SEMICOLON: c = new NoOperation(session); - setSQL(c, null, start); + setSQL(c, start); return c; case PARAMETER: // read the ? as a parameter - readTerm(); // this is an 'out' parameter - set a dummy value - parameters.get(0).setValue(ValueNull.INSTANCE); + readParameter().setValue(ValueNull.INSTANCE); read(EQUAL); + start = tokenIndex; read("CALL"); c = parseCall(); break; case OPEN_PAREN: - case FROM: case SELECT: case TABLE: case VALUES: - c = parseSelect(); + c = parseQuery(); break; case WITH: read(); - c = parseWithStatementOrQuery(); + c = parseWithStatementOrQuery(start); + break; + case SET: + read(); + c = parseSet(); break; case IDENTIFIER: - if (currentTokenQuoted) { + if (token.isQuoted()) { break; } - switch (currentToken.charAt(0)) { - case 'a': + /* + * Convert a-z to A-Z. This method is safe, because only A-Z + * characters are considered below. + * + * Unquoted identifier is never empty. + */ + switch (currentToken.charAt(0) & 0xffdf) { case 'A': if (readIf("ALTER")) { c = parseAlter(); @@ -830,7 +732,6 @@ private Prepared parsePrepared() { c = parseAnalyze(); } break; - case 'b': case 'B': if (readIf("BACKUP")) { c = parseBackup(); @@ -838,7 +739,6 @@ private Prepared parsePrepared() { c = parseBegin(); } break; - case 'c': case 'C': if (readIf("COMMIT")) { c = parseCommit(); @@ -852,58 +752,61 @@ private Prepared parsePrepared() { c = parseComment(); } break; - case 'd': case 'D': if (readIf("DELETE")) { - c = parseDelete(); + c = parseDelete(start); } else if (readIf("DROP")) { c = parseDrop(); } else if (readIf("DECLARE")) { // support for DECLARE GLOBAL TEMPORARY TABLE... c = parseCreate(); - } else if (readIf("DEALLOCATE")) { + } else if (database.getMode().getEnum() != ModeEnum.MSSQLServer && readIf("DEALLOCATE")) { + /* + * PostgreSQL-style DEALLOCATE is disabled in MSSQLServer + * mode because PostgreSQL-style EXECUTE is redefined in + * this mode. + */ c = parseDeallocate(); } break; - case 'e': case 'E': if (readIf("EXPLAIN")) { c = parseExplain(); - } else if (readIf("EXECUTE")) { - c = parseExecute(); + } else if (database.getMode().getEnum() != ModeEnum.MSSQLServer) { + if (readIf("EXECUTE")) { + c = parseExecutePostgre(); + } + } else { + if (readIf("EXEC") || readIf("EXECUTE")) { + c = parseExecuteSQLServer(); + } } break; - case 'g': case 'G': if (readIf("GRANT")) { c = parseGrantRevoke(CommandInterface.GRANT); } break; - case 'h': case 'H': if (readIf("HELP")) { c = parseHelp(); } break; - case 'i': case 'I': if (readIf("INSERT")) { - c = parseInsert(); + c = parseInsert(start); } break; - case 'm': case 'M': if (readIf("MERGE")) { - c = parseMerge(); + c = parseMerge(start); } break; - case 'p': case 'P': if (readIf("PREPARE")) { c = parsePrepare(); } break; - case 'r': case 'R': if (readIf("ROLLBACK")) { c = parseRollback(); @@ -913,15 +816,12 @@ private Prepared parsePrepared() { c = parseRunScript(); } else if (readIf("RELEASE")) { c = parseReleaseSavepoint(); - } else if (readIf("REPLACE")) { - c = parseReplace(); + } else if (database.getMode().replaceInto && readIf("REPLACE")) { + c = parseReplace(start); } break; - case 's': case 'S': - if (readIf("SET")) { - c = parseSet(); - } else if (readIf("SAVEPOINT")) { + if (readIf("SAVEPOINT")) { c = parseSavepoint(); } else if (readIf("SCRIPT")) { c = parseScript(); @@ -931,16 +831,14 @@ private Prepared parsePrepared() { c = parseShow(); } break; - case 't': case 'T': if (readIf("TRUNCATE")) { c = parseTruncate(); } break; - case 'u': case 'U': if (readIf("UPDATE")) { - c = parseUpdate(); + c = parseUpdate(start); } else if (readIf("USE")) { c = parseUse(); } @@ -950,16 +848,15 @@ private Prepared parsePrepared() { if (c == null) { throw getSyntaxError(); } - if (indexedParameterList != null) { - for (int i = 0, size = indexedParameterList.size(); - i < size; i++) { - if (indexedParameterList.get(i) == null) { - indexedParameterList.set(i, new Parameter(i)); + if (parameters != null) { + for (int i = 0, size = parameters.size(); i < size; i++) { + if (parameters.get(i) == null) { + parameters.set(i, new Parameter(i)); } } - parameters = indexedParameterList; } - if (readIf(OPEN_BRACE)) { + boolean withParamValues = readIf(OPEN_BRACE); + if (withParamValues) { do { int index = (int) readLong() - 1; if (index < 0 || index >= parameters.size()) { @@ -980,21 +877,22 @@ private Prepared parsePrepared() { } parameters.clear(); } - setSQL(c, null, start); + if (withParamValues || c.getSQL() == null) { + setSQL(c, start); + } return c; } private DbException getSyntaxError() { if (expectedList == null || expectedList.isEmpty()) { - return DbException.getSyntaxError(sqlCommand, parseIndex); + return DbException.getSyntaxError(sqlCommand, token.start()); } - return DbException.getSyntaxError(sqlCommand, parseIndex, - StringUtils.join(new StringBuilder(), expectedList, ", ").toString()); + return DbException.getSyntaxError(sqlCommand, token.start(), String.join(", ", expectedList)); } private Prepared parseBackup() { BackupCommand command = new BackupCommand(session); - read("TO"); + read(TO); command.setFileName(readExpression()); return command; } @@ -1023,13 +921,11 @@ private TransactionCommand parseBegin() { private TransactionCommand parseCommit() { TransactionCommand command; if (readIf("TRANSACTION")) { - command = new TransactionCommand(session, - CommandInterface.COMMIT_TRANSACTION); - command.setTransactionName(readUniqueIdentifier()); + command = new TransactionCommand(session, CommandInterface.COMMIT_TRANSACTION); + command.setTransactionName(readIdentifier()); return command; } - command = new TransactionCommand(session, - CommandInterface.COMMIT); + command = new TransactionCommand(session, CommandInterface.COMMIT); readIf("WORK"); return command; } @@ -1051,43 +947,51 @@ private TransactionCommand parseShutdown() { private TransactionCommand parseRollback() { TransactionCommand command; if (readIf("TRANSACTION")) { - command = new TransactionCommand(session, - CommandInterface.ROLLBACK_TRANSACTION); - command.setTransactionName(readUniqueIdentifier()); + command = new TransactionCommand(session, CommandInterface.ROLLBACK_TRANSACTION); + command.setTransactionName(readIdentifier()); return command; } - if (readIf("TO")) { + readIf("WORK"); + if (readIf(TO)) { read("SAVEPOINT"); - command = new TransactionCommand(session, - CommandInterface.ROLLBACK_TO_SAVEPOINT); - command.setSavepointName(readUniqueIdentifier()); + command = new TransactionCommand(session, CommandInterface.ROLLBACK_TO_SAVEPOINT); + command.setSavepointName(readIdentifier()); } else { - readIf("WORK"); - command = new TransactionCommand(session, - CommandInterface.ROLLBACK); + command = new TransactionCommand(session, CommandInterface.ROLLBACK); } return command; } private Prepared parsePrepare() { if (readIf("COMMIT")) { - TransactionCommand command = new TransactionCommand(session, - CommandInterface.PREPARE_COMMIT); - command.setTransactionName(readUniqueIdentifier()); + TransactionCommand command = new TransactionCommand(session, CommandInterface.PREPARE_COMMIT); + command.setTransactionName(readIdentifier()); return command; } - String procedureName = readAliasIdentifier(); + return parsePrepareProcedure(); + } + + private Prepared parsePrepareProcedure() { + if (database.getMode().getEnum() == ModeEnum.MSSQLServer) { + throw getSyntaxError(); + /* + * PostgreSQL-style PREPARE is disabled in MSSQLServer mode + * because PostgreSQL-style EXECUTE is redefined in this + * mode. + */ + } + String procedureName = readIdentifier(); if (readIf(OPEN_PAREN)) { ArrayList list = Utils.newSmallArrayList(); for (int i = 0;; i++) { - Column column = parseColumnForTable("C" + i, true, false); + Column column = parseColumnForTable("C" + i, true); list.add(column); - if (!readIfMore(true)) { + if (!readIfMore()) { break; } } } - read("AS"); + read(AS); Prepared prep = parsePrepared(); PrepareProcedure command = new PrepareProcedure(session); command.setProcedureName(procedureName); @@ -1096,16 +1000,15 @@ private Prepared parsePrepare() { } private TransactionCommand parseSavepoint() { - TransactionCommand command = new TransactionCommand(session, - CommandInterface.SAVEPOINT); - command.setSavepointName(readUniqueIdentifier()); + TransactionCommand command = new TransactionCommand(session, CommandInterface.SAVEPOINT); + command.setSavepointName(readIdentifier()); return command; } private Prepared parseReleaseSavepoint() { Prepared command = new NoOperation(session); readIf("SAVEPOINT"); - readUniqueIdentifier(); + readIdentifier(); return command; } @@ -1165,168 +1068,143 @@ private Schema getSchemaWithDefault() { } private Column readTableColumn(TableFilter filter) { - boolean rowId = false; - String columnName = null; - if (currentTokenType == _ROWID_) { - read(); - rowId = true; - } else { - columnName = readColumnIdentifier(); + String columnName = readIdentifier(); + if (readIf(DOT)) { + columnName = readTableColumn(filter, columnName); + } + return filter.getTable().getColumn(columnName); + } + + private String readTableColumn(TableFilter filter, String tableAlias) { + String columnName = readIdentifier(); + if (readIf(DOT)) { + String schema = tableAlias; + tableAlias = columnName; + columnName = readIdentifier(); if (readIf(DOT)) { - String tableAlias = columnName; - if (currentTokenType == _ROWID_) { - read(); - rowId = true; - } else { - columnName = readColumnIdentifier(); - if (readIf(DOT)) { - String schema = tableAlias; - tableAlias = columnName; - if (currentTokenType == _ROWID_) { - read(); - rowId = true; - } else { - columnName = readColumnIdentifier(); - if (readIf(DOT)) { - if (!equalsToken(schema, database.getShortName())) { - throw DbException.get(ErrorCode.DATABASE_NOT_FOUND_1, schema); - } - schema = tableAlias; - tableAlias = columnName; - if (currentTokenType == _ROWID_) { - read(); - rowId = true; - } else { - columnName = readColumnIdentifier(); - } - } - } - if (!equalsToken(schema, filter.getTable().getSchema().getName())) { - throw DbException.get(ErrorCode.SCHEMA_NOT_FOUND_1, schema); - } - } - } - if (!equalsToken(tableAlias, filter.getTableAlias())) { - throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableAlias); - } + checkDatabaseName(schema); + schema = tableAlias; + tableAlias = columnName; + columnName = readIdentifier(); + } + if (!equalsToken(schema, filter.getTable().getSchema().getName())) { + throw DbException.get(ErrorCode.SCHEMA_NOT_FOUND_1, schema); } } - return rowId ? filter.getRowIdColumn() : filter.getTable().getColumn(columnName); + if (!equalsToken(tableAlias, filter.getTableAlias())) { + throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableAlias); + } + return columnName; } - private Update parseUpdate() { + private Update parseUpdate(int start) { Update command = new Update(session); currentPrepared = command; - int start = lastParseIndex; - TableFilter filter = readSimpleTableFilter(0, null); + Expression fetch = null; + if (database.getMode().topInDML && readIf("TOP")) { + read(OPEN_PAREN); + fetch = readTerm().optimize(session); + read(CLOSE_PAREN); + } + TableFilter filter = readSimpleTableFilter(); command.setTableFilter(filter); - parseUpdateSetClause(command, filter, start, true); + command.setSetClauseList(readUpdateSetClause(filter)); + if (database.getMode().allowUsingFromClauseInUpdateStatement && readIf(FROM)) { + TableFilter fromTable = readTablePrimary(); + command.setFromTableFilter(fromTable); + } + if (readIf(WHERE)) { + command.setCondition(readExpression()); + } + if (fetch == null) { + // for MySQL compatibility + // (this syntax is supported, but ignored) + readIfOrderBy(); + fetch = readFetchOrLimit(); + } + command.setFetch(fetch); + setSQL(command, start); return command; } - private void parseUpdateSetClause(Update command, TableFilter filter, int start, boolean allowExtensions) { - read("SET"); - if (readIf(OPEN_PAREN)) { - ArrayList columns = Utils.newSmallArrayList(); - do { - Column column = readTableColumn(filter); - columns.add(column); - } while (readIfMore(true)); - read(EQUAL); - Expression expression = readExpression(); - if (columns.size() == 1 && expression.getType().getValueType() != Value.ROW) { - // the expression is parsed as a simple value - command.setAssignment(columns.get(0), expression); + private SetClauseList readUpdateSetClause(TableFilter filter) { + read(SET); + SetClauseList list = new SetClauseList(filter.getTable()); + do { + if (readIf(OPEN_PAREN)) { + ArrayList columns = Utils.newSmallArrayList(); + do { + columns.add(readTableColumn(filter)); + } while (readIfMore()); + read(EQUAL); + list.addMultiple(columns, readExpression()); } else { - for (int i = 0, size = columns.size(); i < size; i++) { - Column column = columns.get(i); - Function f = Function.getFunction(database, "ARRAY_GET"); - f.setParameter(0, expression); - f.setParameter(1, ValueExpression.get(ValueInt.get(i + 1))); - f.doneWithParameters(); - command.setAssignment(column, f); - } - } - } else { - do { Column column = readTableColumn(filter); read(EQUAL); - command.setAssignment(column, readExpressionOrDefault()); - } while (readIf(COMMA)); - } - if (readIf(WHERE)) { - Expression condition = readExpression(); - command.setCondition(condition); - } - if (allowExtensions) { - if (readIf(ORDER)) { - // for MySQL compatibility - // (this syntax is supported, but ignored) - read("BY"); - parseSimpleOrderList(); + list.addSingle(column, readExpressionOrDefault()); } - if (readIf(LIMIT)) { - Expression limit = readTerm().optimize(session); - command.setLimit(limit); - } - } - setSQL(command, "UPDATE", start); + } while (readIf(COMMA)); + return list; } - private TableFilter readSimpleTableFilter(int orderInFrom, Collection excludeTokens) { - Table table = readTableOrView(); - String alias = null; - if (readIf("AS")) { - alias = readAliasIdentifier(); - } else if (currentTokenType == IDENTIFIER) { - if (!equalsTokenIgnoreCase(currentToken, "SET") - && (excludeTokens == null || !isTokenInList(excludeTokens))) { - // SET is not a keyword (PostgreSQL supports it as a table name) - alias = readAliasIdentifier(); - } - } - return new TableFilter(session, table, alias, rightsChecked, - currentSelect, orderInFrom, null); + private TableFilter readSimpleTableFilter() { + return new TableFilter(session, readTableOrView(), readFromAlias(null), rightsChecked, currentSelect, 0, null); } - private Delete parseDelete() { + private Delete parseDelete(int start) { Delete command = new Delete(session); - Expression limit = null; - if (readIf("TOP")) { - limit = readTerm().optimize(session); + Expression fetch = null; + if (database.getMode().topInDML && readIf("TOP")) { + fetch = readTerm().optimize(session); } currentPrepared = command; - int start = lastParseIndex; if (!readIf(FROM) && database.getMode().getEnum() == ModeEnum.MySQL) { readIdentifierWithSchema(); read(FROM); } - TableFilter filter = readSimpleTableFilter(0, null); - command.setTableFilter(filter); + command.setTableFilter(readSimpleTableFilter()); if (readIf(WHERE)) { command.setCondition(readExpression()); } - if (limit == null && readIf(LIMIT)) { - limit = readTerm().optimize(session); + if (fetch == null) { + fetch = readFetchOrLimit(); } - command.setLimit(limit); - setSQL(command, "DELETE", start); + command.setFetch(fetch); + setSQL(command, start); return command; } + private Expression readFetchOrLimit() { + Expression fetch = null; + if (readIf(FETCH)) { + if (!readIf("FIRST")) { + read("NEXT"); + } + if (readIf(ROW) || readIf("ROWS")) { + fetch = ValueExpression.get(ValueInteger.get(1)); + } else { + fetch = readExpression().optimize(session); + if (!readIf(ROW)) { + read("ROWS"); + } + } + read("ONLY"); + } else if (database.getMode().limit && readIf(LIMIT)) { + fetch = readTerm().optimize(session); + } + return fetch; + } + private IndexColumn[] parseIndexColumnList() { ArrayList columns = Utils.newSmallArrayList(); do { - IndexColumn column = new IndexColumn(); - column.columnName = readColumnIdentifier(); - column.sortType = parseSortType(); - columns.add(column); - } while (readIfMore(true)); + columns.add(new IndexColumn(readIdentifier(), parseSortType())); + } while (readIfMore()); return columns.toArray(new IndexColumn[0]); } private int parseSortType() { - int sortType = parseSimpleSortType(); + int sortType = !readIf("ASC") && readIf("DESC") ? SortOrder.DESCENDING : SortOrder.ASCENDING; if (readIf("NULLS")) { if (readIf("FIRST")) { sortType |= SortOrder.NULLS_FIRST; @@ -1338,19 +1216,11 @@ private int parseSortType() { return sortType; } - private int parseSimpleSortType() { - if (!readIf("ASC") && readIf("DESC")) { - return SortOrder.DESCENDING; - } - return SortOrder.ASCENDING; - } - private String[] parseColumnList() { ArrayList columns = Utils.newSmallArrayList(); do { - String columnName = readColumnIdentifier(); - columns.add(columnName); - } while (readIfMore(false)); + columns.add(readIdentifier()); + } while (readIfMore()); return columns.toArray(new String[0]); } @@ -1361,10 +1231,10 @@ private Column[] parseColumnList(Table table) { do { Column column = parseColumn(table); if (!set.add(column)) { - throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, column.getSQL(false)); + throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, column.getTraceSQL()); } columns.add(column); - } while (readIfMore(false)); + } while (readIfMore()); } return columns.toArray(new Column[0]); } @@ -1374,45 +1244,29 @@ private Column parseColumn(Table table) { read(); return table.getRowIdColumn(); } - return table.getColumn(readColumnIdentifier()); + return table.getColumn(readIdentifier()); } /** * Read comma or closing brace. * - * @param strict - * if {@code false} additional comma before brace is allowed * @return {@code true} if comma is read, {@code false} if brace is read */ - private boolean readIfMore(boolean strict) { + private boolean readIfMore() { if (readIf(COMMA)) { - return strict || !readIf(CLOSE_PAREN); + return true; } read(CLOSE_PAREN); return false; } private Prepared parseHelp() { - Select select = new Select(session, null); - select.setWildcard(); - String informationSchema = database.sysIdentifier("INFORMATION_SCHEMA"); - Table table = database.getSchema(informationSchema) - .resolveTableOrView(session, database.sysIdentifier("HELP")); - Function function = Function.getFunction(database, "UPPER"); - function.setParameter(0, new ExpressionColumn(database, informationSchema, - database.sysIdentifier("HELP"), database.sysIdentifier("TOPIC"), false)); - function.doneWithParameters(); - TableFilter filter = new TableFilter(session, table, null, rightsChecked, select, 0, null); - select.addTableFilter(filter, true); - while (currentTokenType != END) { - String s = currentToken; + HashSet conditions = new HashSet<>(); + while (currentTokenType != END_OF_INPUT) { + conditions.add(StringUtils.toUpperEnglish(currentToken)); read(); - CompareLike like = new CompareLike(database, function, - ValueExpression.get(ValueString.get('%' + s + '%')), null, false); - select.addCondition(like); } - select.init(); - return select; + return new Help(session, conditions.toArray(new String[0])); } private Prepared parseShow() { @@ -1420,63 +1274,98 @@ private Prepared parseShow() { StringBuilder buff = new StringBuilder("SELECT "); if (readIf("CLIENT_ENCODING")) { // for PostgreSQL compatibility - buff.append("'UNICODE' AS CLIENT_ENCODING FROM DUAL"); + buff.append("'UNICODE' CLIENT_ENCODING"); } else if (readIf("DEFAULT_TRANSACTION_ISOLATION")) { // for PostgreSQL compatibility - buff.append("'read committed' AS DEFAULT_TRANSACTION_ISOLATION " + - "FROM DUAL"); + buff.append("'read committed' DEFAULT_TRANSACTION_ISOLATION"); } else if (readIf("TRANSACTION")) { // for PostgreSQL compatibility read("ISOLATION"); read("LEVEL"); - buff.append("'read committed' AS TRANSACTION_ISOLATION " + - "FROM DUAL"); + buff.append("LOWER(ISOLATION_LEVEL) TRANSACTION_ISOLATION FROM INFORMATION_SCHEMA.SESSIONS " + + "WHERE SESSION_ID = SESSION_ID()"); } else if (readIf("DATESTYLE")) { // for PostgreSQL compatibility - buff.append("'ISO' AS DATESTYLE FROM DUAL"); + buff.append("'ISO' DATESTYLE"); + } else if (readIf("SEARCH_PATH")) { + // for PostgreSQL compatibility + String[] searchPath = session.getSchemaSearchPath(); + StringBuilder searchPathBuff = new StringBuilder(); + if (searchPath != null) { + for (int i = 0; i < searchPath.length; i ++) { + if (i > 0) { + searchPathBuff.append(", "); + } + ParserUtil.quoteIdentifier(searchPathBuff, searchPath[i], HasSQL.QUOTE_ONLY_WHEN_REQUIRED); + } + } + StringUtils.quoteStringSQL(buff, searchPathBuff.toString()); + buff.append(" SEARCH_PATH"); } else if (readIf("SERVER_VERSION")) { // for PostgreSQL compatibility - buff.append("'" + Constants.PG_VERSION + "' AS SERVER_VERSION FROM DUAL"); + buff.append("'" + Constants.PG_VERSION + "' SERVER_VERSION"); } else if (readIf("SERVER_ENCODING")) { // for PostgreSQL compatibility - buff.append("'UTF8' AS SERVER_ENCODING FROM DUAL"); + buff.append("'UTF8' SERVER_ENCODING"); + } else if (readIf("SSL")) { + // for PostgreSQL compatibility + buff.append("'off' SSL"); } else if (readIf("TABLES")) { // for MySQL compatibility String schema = database.getMainSchema().getName(); if (readIf(FROM)) { - schema = readUniqueIdentifier(); + schema = readIdentifier(); } buff.append("TABLE_NAME, TABLE_SCHEMA FROM " + "INFORMATION_SCHEMA.TABLES " + "WHERE TABLE_SCHEMA=? ORDER BY TABLE_NAME"); - paramValues.add(ValueString.get(schema)); + paramValues.add(ValueVarchar.get(schema)); } else if (readIf("COLUMNS")) { // for MySQL compatibility read(FROM); String tableName = readIdentifierWithSchema(); String schemaName = getSchema().getName(); - paramValues.add(ValueString.get(tableName)); + paramValues.add(ValueVarchar.get(tableName)); if (readIf(FROM)) { - schemaName = readUniqueIdentifier(); - } - buff.append("C.COLUMN_NAME FIELD, " - + "C.TYPE_NAME || '(' || C.NUMERIC_PRECISION || ')' TYPE, " + schemaName = readIdentifier(); + } + buff.append("C.COLUMN_NAME FIELD, "); + boolean oldInformationSchema = session.isOldInformationSchema(); + buff.append(oldInformationSchema + ? "C.COLUMN_TYPE" + : "DATA_TYPE_SQL(?2, ?1, 'TABLE', C.DTD_IDENTIFIER)"); + buff.append(" TYPE, " + "C.IS_NULLABLE \"NULL\", " + "CASE (SELECT MAX(I.INDEX_TYPE_NAME) FROM " - + "INFORMATION_SCHEMA.INDEXES I " - + "WHERE I.TABLE_SCHEMA=C.TABLE_SCHEMA " - + "AND I.TABLE_NAME=C.TABLE_NAME " - + "AND I.COLUMN_NAME=C.COLUMN_NAME)" + + "INFORMATION_SCHEMA.INDEXES I "); + if (!oldInformationSchema) { + buff.append("JOIN INFORMATION_SCHEMA.INDEX_COLUMNS IC "); + } + buff.append("WHERE I.TABLE_SCHEMA=C.TABLE_SCHEMA " + + "AND I.TABLE_NAME=C.TABLE_NAME "); + if (oldInformationSchema) { + buff.append("AND I.COLUMN_NAME=C.COLUMN_NAME"); + } else { + buff.append("AND IC.TABLE_SCHEMA=C.TABLE_SCHEMA " + + "AND IC.TABLE_NAME=C.TABLE_NAME " + + "AND IC.INDEX_SCHEMA=I.INDEX_SCHEMA " + + "AND IC.INDEX_NAME=I.INDEX_NAME " + + "AND IC.COLUMN_NAME=C.COLUMN_NAME"); + } + buff.append(')' + "WHEN 'PRIMARY KEY' THEN 'PRI' " - + "WHEN 'UNIQUE INDEX' THEN 'UNI' ELSE '' END KEY, " - + "IFNULL(COLUMN_DEFAULT, 'NULL') DEFAULT " + + "WHEN 'UNIQUE INDEX' THEN 'UNI' ELSE '' END `KEY`, " + + "COALESCE(COLUMN_DEFAULT, 'NULL') `DEFAULT` " + "FROM INFORMATION_SCHEMA.COLUMNS C " - + "WHERE C.TABLE_NAME=? AND C.TABLE_SCHEMA=? " + + "WHERE C.TABLE_NAME=?1 AND C.TABLE_SCHEMA=?2 " + "ORDER BY C.ORDINAL_POSITION"); - paramValues.add(ValueString.get(schemaName)); + paramValues.add(ValueVarchar.get(schemaName)); } else if (readIf("DATABASES") || readIf("SCHEMAS")) { // for MySQL compatibility buff.append("SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA"); + } else if (database.getMode().getEnum() == ModeEnum.PostgreSQL && readIf("ALL")) { + // for PostgreSQL compatibility + buff.append("NAME, SETTING FROM PG_CATALOG.PG_SETTINGS"); } boolean b = session.getAllowLiterals(); try { @@ -1489,7 +1378,7 @@ private Prepared parseShow() { } } - private static Prepared prepare(Session s, String sql, + private static Prepared prepare(SessionLocal s, String sql, ArrayList paramValues) { Prepared prep = s.prepare(sql); ArrayList params = prep.getParameters(); @@ -1502,113 +1391,174 @@ private static Prepared prepare(Session s, String sql, return prep; } - private boolean isSelect() { - int start = lastParseIndex; - while (readIf(OPEN_PAREN)) { - // need to read ahead, it could be a nested union: - // ((select 1) union (select 1)) - } - boolean select; - switch (currentTokenType) { - case FROM: + private boolean isDerivedTable() { + int offset = tokenIndex; + int level = 0; + while (tokens.get(offset).tokenType() == OPEN_PAREN) { + level++; + offset++; + } + boolean query = isDirectQuery(offset); + s: if (query && level > 0) { + offset = scanToCloseParen(offset + 1); + if (offset < 0) { + query = false; + break s; + } + for (;;) { + switch (tokens.get(offset).tokenType()) { + case SEMICOLON: + case END_OF_INPUT: + query = false; + break s; + case OPEN_PAREN: + offset = scanToCloseParen(offset + 1); + if (offset < 0) { + query = false; + break s; + } + break; + case CLOSE_PAREN: + if (--level == 0) { + break s; + } + offset++; + break; + case JOIN: + query = false; + break s; + default: + offset++; + } + } + } + return query; + } + + private boolean isQuery() { + int offset = tokenIndex; + int level = 0; + while (tokens.get(offset).tokenType() == OPEN_PAREN) { + level++; + offset++; + } + boolean query = isDirectQuery(offset); + s: if (query && level > 0) { + offset++; + do { + offset = scanToCloseParen(offset); + if (offset < 0) { + query = false; + break s; + } + switch (tokens.get(offset).tokenType()) { + default: + query = false; + break s; + case END_OF_INPUT: + case SEMICOLON: + case CLOSE_PAREN: + case ORDER: + case OFFSET: + case FETCH: + case LIMIT: + case UNION: + case EXCEPT: + case MINUS: + case INTERSECT: + } + } while (--level > 0); + } + return query; + } + + private int scanToCloseParen(int offset) { + for (int level = 0;;) { + switch (tokens.get(offset).tokenType()) { + case SEMICOLON: + case END_OF_INPUT: + return -1; + case OPEN_PAREN: + level++; + break; + case CLOSE_PAREN: + if (--level < 0) { + return offset + 1; + } + } + offset++; + } + } + + private boolean isQueryQuick() { + int offset = tokenIndex; + while (tokens.get(offset).tokenType() == OPEN_PAREN) { + offset++; + } + return isDirectQuery(offset); + } + + private boolean isDirectQuery(int offset) { + boolean query; + switch (tokens.get(offset).tokenType()) { case SELECT: case VALUES: case WITH: - select = true; + query = true; break; case TABLE: - read(); - select = !readIf(OPEN_PAREN); + query = tokens.get(offset + 1).tokenType() != OPEN_PAREN; break; default: - select = false; + query = false; } - parseIndex = start; - read(); - return select; + return query; } - private Prepared parseMerge() { - int start = lastParseIndex; + private Prepared parseMerge(int start) { read("INTO"); - List excludeIdentifiers = Arrays.asList("USING", "KEY"); - TableFilter targetTableFilter = readSimpleTableFilter(0, excludeIdentifiers); - if (readIf("USING")) { + TableFilter targetTableFilter = readSimpleTableFilter(); + if (readIf(USING)) { return parseMergeUsing(targetTableFilter, start); } - Merge command = new Merge(session); + return parseMergeInto(targetTableFilter, start); + } + + private Prepared parseMergeInto(TableFilter targetTableFilter, int start) { + Merge command = new Merge(session, false); currentPrepared = command; - command.setTargetTableFilter(targetTableFilter); - Table table = command.getTargetTable(); + command.setTable(targetTableFilter.getTable()); + Table table = command.getTable(); if (readIf(OPEN_PAREN)) { - if (isSelect()) { - command.setQuery(parseSelect()); + if (isQueryQuick()) { + command.setQuery(parseQuery()); read(CLOSE_PAREN); return command; } - Column[] columns = parseColumnList(table); - command.setColumns(columns); + command.setColumns(parseColumnList(table)); } - if (readIf("KEY")) { + if (readIf(KEY)) { read(OPEN_PAREN); - Column[] keys = parseColumnList(table); - command.setKeys(keys); + command.setKeys(parseColumnList(table)); } if (readIf(VALUES)) { parseValuesForCommand(command); } else { - command.setQuery(parseSelect()); + command.setQuery(parseQuery()); } + setSQL(command, start); return command; } private MergeUsing parseMergeUsing(TableFilter targetTableFilter, int start) { MergeUsing command = new MergeUsing(session, targetTableFilter); currentPrepared = command; - - if (readIf(OPEN_PAREN)) { - /* a select query is supplied */ - if (isSelect()) { - command.setQuery(parseSelect()); - read(CLOSE_PAREN); - } - String queryAlias = readFromAlias(null, null); - if (queryAlias == null) { - queryAlias = Constants.PREFIX_QUERY_ALIAS + parseIndex; - } - command.setQueryAlias(queryAlias); - - String[] querySQLOutput = {null}; - List columnTemplateList = TableView.createQueryColumnTemplateList(null, command.getQuery(), - querySQLOutput); - TableView temporarySourceTableView = createCTEView( - queryAlias, querySQLOutput[0], - columnTemplateList, false/* no recursion */, - false/* do not add to session */, - true /* isTemporary */ - ); - TableFilter sourceTableFilter = new TableFilter(session, - temporarySourceTableView, queryAlias, - rightsChecked, (Select) command.getQuery(), 0, null); - command.setSourceTableFilter(sourceTableFilter); - } else { - /* Its a table name, simulate a query by building a select query for the table */ - TableFilter sourceTableFilter = readSimpleTableFilter(0, null); - command.setSourceTableFilter(sourceTableFilter); - - Select preparedQuery = new Select(session, null); - preparedQuery.setWildcard(); - TableFilter filter = new TableFilter(session, sourceTableFilter.getTable(), - sourceTableFilter.getTableAlias(), rightsChecked, preparedQuery, 0, null); - preparedQuery.addTableFilter(filter, true); - preparedQuery.init(); - command.setQuery(preparedQuery); - } + command.setSourceTableFilter(readTableReference()); read(ON); Expression condition = readExpression(); command.setOnCondition(condition); - read("WHEN"); + read(WHEN); do { boolean matched = readIf("MATCHED"); if (matched) { @@ -1616,90 +1566,153 @@ private MergeUsing parseMergeUsing(TableFilter targetTableFilter, int start) { } else { parseWhenNotMatched(command); } - } while (readIf("WHEN")); + } while (readIf(WHEN)); - setSQL(command, "MERGE", start); + setSQL(command, start); return command; } private void parseWhenMatched(MergeUsing command) { - Expression and = readIf("AND") ? readExpression() : null; + Expression and = readIf(AND) ? readExpression() : null; read("THEN"); - int startMatched = lastParseIndex; - Update updateCommand = null; + MergeUsing.When when; if (readIf("UPDATE")) { - updateCommand = new Update(session); - TableFilter filter = command.getTargetTableFilter(); - updateCommand.setTableFilter(filter); - parseUpdateSetClause(updateCommand, filter, startMatched, false); - startMatched = lastParseIndex; - } - Delete deleteCommand = null; - if (readIf("DELETE")) { - deleteCommand = new Delete(session); - deleteCommand.setTableFilter(command.getTargetTableFilter()); - if (readIf(WHERE)) { - deleteCommand.setCondition(readExpression()); - } - setSQL(deleteCommand, "DELETE", startMatched); - } - if (updateCommand != null || deleteCommand != null) { - MergeUsing.WhenMatched when = new MergeUsing.WhenMatched(command); - when.setAndCondition(and); - when.setUpdateCommand(updateCommand); - when.setDeleteCommand(deleteCommand); - command.addWhen(when); + MergeUsing.WhenMatchedThenUpdate update = command.new WhenMatchedThenUpdate(); + update.setSetClauseList(readUpdateSetClause(command.getTargetTableFilter())); + when = update; } else { - throw getSyntaxError(); + read("DELETE"); + when = command.new WhenMatchedThenDelete(); + } + if (and == null && database.getMode().mergeWhere && readIf(WHERE)) { + and = readExpression(); } + when.setAndCondition(and); + command.addWhen(when); } private void parseWhenNotMatched(MergeUsing command) { read(NOT); read("MATCHED"); - Expression and = readIf("AND") ? readExpression() : null; + Expression and = readIf(AND) ? readExpression() : null; read("THEN"); - if (readIf("INSERT")) { - Insert insertCommand = new Insert(session); - insertCommand.setTable(command.getTargetTable()); - parseInsertGivenTable(insertCommand, command.getTargetTable()); - MergeUsing.WhenNotMatched when = new MergeUsing.WhenNotMatched(command); - when.setAndCondition(and); - when.setInsertCommand(insertCommand); - command.addWhen(when); - } else { - throw getSyntaxError(); + read("INSERT"); + Column[] columns = readIf(OPEN_PAREN) ? parseColumnList(command.getTargetTableFilter().getTable()) : null; + Boolean overridingSystem = readIfOverriding(); + read(VALUES); + read(OPEN_PAREN); + ArrayList values = Utils.newSmallArrayList(); + if (!readIf(CLOSE_PAREN)) { + do { + values.add(readExpressionOrDefault()); + } while (readIfMore()); } + MergeUsing.WhenNotMatched when = command.new WhenNotMatched(columns, overridingSystem, + values.toArray(new Expression[0])); + when.setAndCondition(and); + command.addWhen(when); } - private Insert parseInsert() { + private Insert parseInsert(int start) { Insert command = new Insert(session); currentPrepared = command; - if (database.getMode().onDuplicateKeyUpdate && readIf("IGNORE")) { + Mode mode = database.getMode(); + if (mode.onDuplicateKeyUpdate && readIf("IGNORE")) { command.setIgnore(true); } read("INTO"); Table table = readTableOrView(); command.setTable(table); - Insert returnedCommand = parseInsertGivenTable(command, table); - if (returnedCommand != null) { - return returnedCommand; + Column[] columns = null; + if (readIf(OPEN_PAREN)) { + if (isQueryQuick()) { + command.setQuery(parseQuery()); + read(CLOSE_PAREN); + return command; + } + columns = parseColumnList(table); + command.setColumns(columns); + } + Boolean overridingSystem = readIfOverriding(); + command.setOverridingSystem(overridingSystem); + boolean requireQuery = false; + if (readIf("DIRECT")) { + requireQuery = true; + command.setInsertFromSelect(true); + } + if (readIf("SORTED")) { + requireQuery = true; + } + readValues: { + if (!requireQuery) { + if (overridingSystem == null && readIf(DEFAULT)) { + read(VALUES); + command.addRow(new Expression[0]); + break readValues; + } + if (readIf(VALUES)) { + parseValuesForCommand(command); + break readValues; + } + if (readIf(SET)) { + parseInsertSet(command, table, columns); + break readValues; + } + } + command.setQuery(parseQuery()); + } + if (mode.onDuplicateKeyUpdate || mode.insertOnConflict || mode.isolationLevelInSelectOrInsertStatement) { + parseInsertCompatibility(command, table, mode); + } + setSQL(command, start); + return command; + } + + private Boolean readIfOverriding() { + Boolean overridingSystem = null; + if (readIf("OVERRIDING")) { + if (readIf(USER)) { + overridingSystem = Boolean.FALSE; + } else { + read("SYSTEM"); + overridingSystem = Boolean.TRUE; + } + read(VALUE); + } + return overridingSystem; + } + + private void parseInsertSet(Insert command, Table table, Column[] columns) { + if (columns != null) { + throw getSyntaxError(); } - if (database.getMode().onDuplicateKeyUpdate) { + ArrayList columnList = Utils.newSmallArrayList(); + ArrayList values = Utils.newSmallArrayList(); + do { + columnList.add(parseColumn(table)); + read(EQUAL); + values.add(readExpressionOrDefault()); + } while (readIf(COMMA)); + command.setColumns(columnList.toArray(new Column[0])); + command.addRow(values.toArray(new Expression[0])); + } + + private void parseInsertCompatibility(Insert command, Table table, Mode mode) { + if (mode.onDuplicateKeyUpdate) { if (readIf(ON)) { read("DUPLICATE"); - read("KEY"); + read(KEY); read("UPDATE"); do { - String columnName = readColumnIdentifier(); + String columnName = readIdentifier(); if (readIf(DOT)) { String schemaOrTableName = columnName; - String tableOrColumnName = readColumnIdentifier(); + String tableOrColumnName = readIdentifier(); if (readIf(DOT)) { if (!table.getSchema().getName().equals(schemaOrTableName)) { throw DbException.get(ErrorCode.SCHEMA_NAME_MUST_MATCH); } - columnName = readColumnIdentifier(); + columnName = readIdentifier(); } else { columnName = tableOrColumnName; tableOrColumnName = schemaOrTableName; @@ -1714,76 +1727,42 @@ private Insert parseInsert() { } while (readIf(COMMA)); } } - if (database.getMode().isolationLevelInSelectOrInsertStatement) { - parseIsolationClause(); - } - return command; - } - - private Insert parseInsertGivenTable(Insert command, Table table) { - Column[] columns = null; - if (readIf(OPEN_PAREN)) { - if (isSelect()) { - command.setQuery(parseSelect()); - read(CLOSE_PAREN); - return command; + if (mode.insertOnConflict) { + if (readIf(ON)) { + read("CONFLICT"); + read("DO"); + read("NOTHING"); + command.setIgnore(true); } - columns = parseColumnList(table); - command.setColumns(columns); } - if (readIf("DIRECT")) { - command.setInsertFromSelect(true); - } - if (readIf("SORTED")) { - command.setSortedInsertMode(true); - } - if (readIf("DEFAULT")) { - read(VALUES); - command.addRow(new Expression[0]); - } else if (readIf(VALUES)) { - parseValuesForCommand(command); - } else if (readIf("SET")) { - if (columns != null) { - throw getSyntaxError(); - } - ArrayList columnList = Utils.newSmallArrayList(); - ArrayList values = Utils.newSmallArrayList(); - do { - columnList.add(parseColumn(table)); - read(EQUAL); - values.add(readExpressionOrDefault()); - } while (readIf(COMMA)); - command.setColumns(columnList.toArray(new Column[0])); - command.addRow(values.toArray(new Expression[0])); - } else { - command.setQuery(parseSelect()); + if (mode.isolationLevelInSelectOrInsertStatement) { + parseIsolationClause(); } - return null; } /** * MySQL compatibility. REPLACE is similar to MERGE. */ - private Replace parseReplace() { - Replace command = new Replace(session); + private Merge parseReplace(int start) { + Merge command = new Merge(session, true); currentPrepared = command; read("INTO"); Table table = readTableOrView(); command.setTable(table); if (readIf(OPEN_PAREN)) { - if (isSelect()) { - command.setQuery(parseSelect()); + if (isQueryQuick()) { + command.setQuery(parseQuery()); read(CLOSE_PAREN); return command; } - Column[] columns = parseColumnList(table); - command.setColumns(columns); + command.setColumns(parseColumnList(table)); } if (readIf(VALUES)) { parseValuesForCommand(command); } else { - command.setQuery(parseSelect()); + command.setQuery(parseQuery()); } + setSQL(command, start); return command; } @@ -1801,56 +1780,49 @@ private void parseValuesForCommand(CommandWithValues command) { if (multiColumn) { if (!readIf(CLOSE_PAREN)) { do { - values.add(readIf("DEFAULT") ? null : readExpression()); - } while (readIfMore(false)); + values.add(readExpressionOrDefault()); + } while (readIfMore()); } } else { - values.add(readIf("DEFAULT") ? null : readExpression()); + values.add(readExpressionOrDefault()); } command.addRow(values.toArray(new Expression[0])); } while (readIf(COMMA)); } - private TableFilter readTableFilter() { + private TableFilter readTablePrimary() { Table table; String alias = null; label: if (readIf(OPEN_PAREN)) { - if (isSelect()) { - Query query = parseSelectUnion(); - read(CLOSE_PAREN); - query.setParameterList(new ArrayList<>(parameters)); - query.init(); - Session s; - if (createView != null) { - s = database.getSystemSession(); - } else { - s = session; - } - alias = session.getNextSystemIdentifier(sqlCommand); - table = TableView.createTempView(s, session.getUser(), alias, - query, currentSelect); + if (isDerivedTable()) { + // Derived table + return readDerivedTableWithCorrelation(); } else { - TableFilter top; - top = readTableFilter(); - top = readJoin(top); + // Parenthesized joined table + TableFilter tableFilter = readTableReference(); read(CLOSE_PAREN); - alias = readFromAlias(null); - if (alias != null) { - top.setAlias(alias); - ArrayList derivedColumnNames = readDerivedColumnNames(); - if (derivedColumnNames != null) { - top.setDerivedColumns(derivedColumnNames); - } - } - return top; + return readCorrelation(tableFilter); } } else if (readIf(VALUES)) { - table = parseValuesTable(0).getTable(); + TableValueConstructor query = parseValues(); + alias = session.getNextSystemIdentifier(sqlCommand); + table = query.toTable(alias, null, parameters, createView != null, currentSelect); } else if (readIf(TABLE)) { + // Table function derived table read(OPEN_PAREN); - table = readTableFunction("TABLE", null, database.getMainSchema()); + ArrayTableFunction function = readTableFunction(ArrayTableFunction.TABLE); + table = new FunctionTable(database.getMainSchema(), session, function); } else { - String tableName = readIdentifierWithSchema(null); + boolean quoted = token.isQuoted(); + String tableName = readIdentifier(); + int backupIndex = tokenIndex; + schemaName = null; + if (readIf(DOT)) { + tableName = readIdentifierWithSchema2(tableName); + } else if (!quoted && readIf(TABLE)) { + table = readDataChangeDeltaTable(upperName(tableName), backupIndex); + break label; + } Schema schema; if (schemaName == null) { schema = null; @@ -1858,21 +1830,21 @@ private TableFilter readTableFilter() { schema = findSchema(schemaName); if (schema == null) { if (isDualTable(tableName)) { - table = getDualTable(false); + table = new DualTable(database); break label; } throw DbException.get(ErrorCode.SCHEMA_NOT_FOUND_1, schemaName); } } - boolean foundLeftBracket = readIf(OPEN_PAREN); - if (foundLeftBracket && readIf("INDEX")) { + boolean foundLeftParen = readIf(OPEN_PAREN); + if (foundLeftParen && readIf("INDEX")) { // Sybase compatibility with // "select * from test (index table1_index)" readIdentifierWithSchema(null); read(CLOSE_PAREN); - foundLeftBracket = false; + foundLeftParen = false; } - if (foundLeftBracket) { + if (foundLeftParen) { Schema mainSchema = database.getMainSchema(); if (equalsToken(tableName, RangeTable.NAME) || equalsToken(tableName, RangeTable.ALIAS)) { @@ -1882,14 +1854,13 @@ private TableFilter readTableFilter() { if (readIf(COMMA)) { Expression step = readExpression(); read(CLOSE_PAREN); - table = new RangeTable(mainSchema, min, max, step, - false); + table = new RangeTable(mainSchema, min, max, step); } else { read(CLOSE_PAREN); - table = new RangeTable(mainSchema, min, max, false); + table = new RangeTable(mainSchema, min, max); } } else { - table = readTableFunction(tableName, schema, mainSchema); + table = new FunctionTable(mainSchema, session, readTableFunction(tableName, schema)); } } else { table = readTableOrView(tableName); @@ -1897,32 +1868,73 @@ private TableFilter readTableFilter() { } ArrayList derivedColumnNames = null; IndexHints indexHints = null; - // for backward compatibility, handle case where USE is a table alias - if (readIf("USE")) { - if (readIf("INDEX")) { - indexHints = parseIndexHints(table); - } else { - alias = "USE"; + if (readIfUseIndex()) { + indexHints = parseIndexHints(table); + } else { + alias = readFromAlias(alias); + if (alias != null) { derivedColumnNames = readDerivedColumnNames(); + if (readIfUseIndex()) { + indexHints = parseIndexHints(table); + } + } + } + return buildTableFilter(table, alias, derivedColumnNames, indexHints); + } + + private TableFilter readCorrelation(TableFilter tableFilter) { + String alias = readFromAlias(null); + if (alias != null) { + tableFilter.setAlias(alias); + ArrayList derivedColumnNames = readDerivedColumnNames(); + if (derivedColumnNames != null) { + tableFilter.setDerivedColumns(derivedColumnNames); } + } + return tableFilter; + } + + private TableFilter readDerivedTableWithCorrelation() { + Query query = parseQueryExpression(); + read(CLOSE_PAREN); + Table table; + String alias; + ArrayList derivedColumnNames = null; + IndexHints indexHints = null; + if (readIfUseIndex()) { + alias = session.getNextSystemIdentifier(sqlCommand); + table = query.toTable(alias, null, parameters, createView != null, currentSelect); + indexHints = parseIndexHints(table); } else { - alias = readFromAlias(alias); + alias = readFromAlias(null); if (alias != null) { derivedColumnNames = readDerivedColumnNames(); - // if alias present, a second chance to parse index hints - if (readIf("USE")) { - read("INDEX"); + Column[] columnTemplates = null; + if (derivedColumnNames != null) { + query.init(); + columnTemplates = TableView.createQueryColumnTemplateList( + derivedColumnNames.toArray(new String[0]), query, new String[1]) + .toArray(new Column[0]); + } + table = query.toTable(alias, columnTemplates, parameters, createView != null, currentSelect); + if (readIfUseIndex()) { indexHints = parseIndexHints(table); } + } else { + alias = session.getNextSystemIdentifier(sqlCommand); + table = query.toTable(alias, null, parameters, createView != null, currentSelect); } } + return buildTableFilter(table, alias, derivedColumnNames, indexHints); + } + private TableFilter buildTableFilter(Table table, String alias, ArrayList derivedColumnNames, + IndexHints indexHints) { if (database.getMode().discardWithTableHints) { discardWithTableHints(); } - // inherit alias for CTE as views from table name - if (table.isView() && table.isTableExpression() && alias == null) { + if (alias == null && table.isView() && table.isTableExpression()) { alias = table.getName(); } TableFilter filter = new TableFilter(session, table, alias, rightsChecked, @@ -1933,16 +1945,95 @@ private TableFilter readTableFilter() { return filter; } - private Table readTableFunction(String tableName, Schema schema, Schema mainSchema) { - Expression expr = readFunction(schema, tableName); - if (!(expr instanceof FunctionCall)) { + private Table readDataChangeDeltaTable(String resultOptionName, int backupIndex) { + read(OPEN_PAREN); + int start = tokenIndex; + DataChangeStatement statement; + ResultOption resultOption = ResultOption.FINAL; + switch (resultOptionName) { + case "OLD": + resultOption = ResultOption.OLD; + if (readIf("UPDATE")) { + statement = parseUpdate(start); + } else if (readIf("DELETE")) { + statement = parseDelete(start); + } else if (readIf("MERGE")) { + statement = (DataChangeStatement) parseMerge(start); + } else if (database.getMode().replaceInto && readIf("REPLACE")) { + statement = parseReplace(start); + } else { + throw getSyntaxError(); + } + break; + case "NEW": + resultOption = ResultOption.NEW; + //$FALL-THROUGH$ + case "FINAL": + if (readIf("INSERT")) { + statement = parseInsert(start); + } else if (readIf("UPDATE")) { + statement = parseUpdate(start); + } else if (readIf("MERGE")) { + statement = (DataChangeStatement) parseMerge(start); + } else if (database.getMode().replaceInto && readIf("REPLACE")) { + statement = parseReplace(start); + } else { + throw getSyntaxError(); + } + break; + default: + setTokenIndex(backupIndex); + addExpected("OLD TABLE"); + addExpected("NEW TABLE"); + addExpected("FINAL TABLE"); throw getSyntaxError(); } - FunctionCall call = (FunctionCall) expr; - if (!call.isDeterministic()) { + read(CLOSE_PAREN); + if (currentSelect != null) { + // Lobs aren't copied, so use it for more safety + currentSelect.setNeverLazy(true); + } + return new DataChangeDeltaTable(getSchemaWithDefault(), session, statement, resultOption); + } + + private TableFunction readTableFunction(String name, Schema schema) { + if (schema == null) { + switch (upperName(name)) { + case "UNNEST": + return readUnnestFunction(); + case "TABLE_DISTINCT": + return readTableFunction(ArrayTableFunction.TABLE_DISTINCT); + case "CSVREAD": + recompileAlways = true; + return readParameters(new CSVReadFunction()); + case "LINK_SCHEMA": + recompileAlways = true; + return readParameters(new LinkSchemaFunction()); + } + } + FunctionAlias functionAlias = getFunctionAliasWithinPath(name, schema); + if (!functionAlias.isDeterministic()) { recompileAlways = true; } - return new FunctionTable(mainSchema, session, expr, call); + ArrayList argList = Utils.newSmallArrayList(); + if (!readIf(CLOSE_PAREN)) { + do { + argList.add(readExpression()); + } while (readIfMore()); + } + return new JavaTableFunction(functionAlias, argList.toArray(new Expression[0])); + } + + private boolean readIfUseIndex() { + int start = tokenIndex; + if (!readIf("USE")) { + return false; + } + if (!readIf("INDEX")) { + setTokenIndex(start); + return false; + } + return true; } private IndexHints parseIndexHints(Table table) { @@ -1953,34 +2044,24 @@ private IndexHints parseIndexHints(Table table) { String indexName = readIdentifierWithSchema(); Index index = table.getIndex(indexName); indexNames.add(index.getName()); - } while (readIfMore(true)); + } while (readIfMore()); } return IndexHints.createUseIndexHints(indexNames); } - private String readFromAlias(String alias, List excludeIdentifiers) { - if (readIf("AS")) { - alias = readAliasIdentifier(); - } else if (currentTokenType == IDENTIFIER - && (excludeIdentifiers == null || !isTokenInList(excludeIdentifiers))) { - alias = readAliasIdentifier(); + private String readFromAlias(String alias) { + if (readIf(AS) || isIdentifier()) { + alias = readIdentifier(); } return alias; } - private String readFromAlias(String alias) { - // left and right are not keywords (because they are functions as - // well) - List excludeIdentifiers = Arrays.asList("LEFT", "RIGHT"); - return readFromAlias(alias, excludeIdentifiers); - } - private ArrayList readDerivedColumnNames() { if (readIf(OPEN_PAREN)) { ArrayList derivedColumnNames = new ArrayList<>(); do { - derivedColumnNames.add(readAliasIdentifier()); - } while (readIfMore(true)); + derivedColumnNames.add(readIdentifier()); + } while (readIfMore()); return derivedColumnNames; } return null; @@ -1991,7 +2072,7 @@ private void discardWithTableHints() { read(OPEN_PAREN); do { discardTableHint(); - } while (readIfMore(true)); + } while (readIfMore()); } } @@ -2000,7 +2081,7 @@ private void discardTableHint() { if (readIf(OPEN_PAREN)) { do { readExpression(); - } while (readIfMore(true)); + } while (readIfMore()); } else { read(EQUAL); readExpression(); @@ -2013,15 +2094,13 @@ private void discardTableHint() { private Prepared parseTruncate() { read(TABLE); Table table = readTableOrView(); - boolean restart; + boolean restart = database.getMode().truncateTableRestartIdentity; if (readIf("CONTINUE")) { read("IDENTITY"); restart = false; } else if (readIf("RESTART")) { read("IDENTITY"); restart = true; - } else { - restart = false; } TruncateTable command = new TruncateTable(session); command.setTable(table); @@ -2062,7 +2141,7 @@ private Prepared parseComment() { type = DbObject.SEQUENCE; } else if (readIf("TRIGGER")) { type = DbObject.TRIGGER; - } else if (readIf("USER")) { + } else if (readIf(USER)) { type = DbObject.USER; } else if (readIf("DOMAIN")) { type = DbObject.DOMAIN; @@ -2073,29 +2152,29 @@ private Prepared parseComment() { String objectName; if (column) { // can't use readIdentifierWithSchema() because - // it would not read schema.table.column correctly - // if the db name is equal to the schema name - ArrayList list = Utils.newSmallArrayList(); - do { - list.add(readUniqueIdentifier()); - } while (readIf(DOT)); - schemaName = session.getCurrentSchemaName(); - if (list.size() == 4) { - if (!equalsToken(database.getShortName(), list.remove(0))) { - throw DbException.getSyntaxError(sqlCommand, parseIndex, - "database name"); + // it would not read [catalog.]schema.table.column correctly + objectName = readIdentifier(); + String tmpSchemaName = null; + read(DOT); + boolean allowEmpty = database.getMode().allowEmptySchemaValuesAsDefaultSchema; + String columnName = allowEmpty && currentTokenType == DOT ? null : readIdentifier(); + if (readIf(DOT)) { + tmpSchemaName = objectName; + objectName = columnName; + columnName = allowEmpty && currentTokenType == DOT ? null : readIdentifier(); + if (readIf(DOT)) { + checkDatabaseName(tmpSchemaName); + tmpSchemaName = objectName; + objectName = columnName; + columnName = readIdentifier(); } } - if (list.size() == 3) { - schemaName = list.remove(0); + if (columnName == null || objectName == null) { + throw DbException.getSyntaxError(sqlCommand, token.start(), "table.column"); } - if (list.size() != 2) { - throw DbException.getSyntaxError(sqlCommand, parseIndex, - "table.column"); - } - objectName = list.get(0); + schemaName = tmpSchemaName != null ? tmpSchemaName : session.getCurrentSchemaName(); command.setColumn(true); - command.setColumnName(list.get(1)); + command.setColumnName(columnName); } else { objectName = readIdentifierWithSchema(); } @@ -2110,15 +2189,11 @@ private Prepared parseComment() { private Prepared parseDrop() { if (readIf(TABLE)) { boolean ifExists = readIfExists(false); - String tableName = readIdentifierWithSchema(); - DropTable command = new DropTable(session, getSchema()); - command.setTableName(tableName); - while (readIf(COMMA)) { - tableName = readIdentifierWithSchema(); - DropTable next = new DropTable(session, getSchema()); - next.setTableName(tableName); - command.addNextDropTable(next); - } + DropTable command = new DropTable(session); + do { + String tableName = readIdentifierWithSchema(); + command.addTable(getSchema(), tableName); + } while (readIf(COMMA)); ifExists = readIfExists(ifExists); command.setIfExists(ifExists); if (readIf("CASCADE")) { @@ -2127,6 +2202,7 @@ private Prepared parseDrop() { } else if (readIf("RESTRICT")) { command.setDropAction(ConstraintActionType.RESTRICT); } else if (readIf("IGNORE")) { + // TODO SET_DEFAULT works in the same way as CASCADE command.setDropAction(ConstraintActionType.SET_DEFAULT); } return command; @@ -2142,10 +2218,10 @@ private Prepared parseDrop() { readIdentifierWithSchema(); } return command; - } else if (readIf("USER")) { + } else if (readIf(USER)) { boolean ifExists = readIfExists(false); DropUser command = new DropUser(session); - command.setUserName(readUniqueIdentifier()); + command.setUserName(readIdentifier()); ifExists = readIfExists(ifExists); readIf("CASCADE"); command.setIfExists(ifExists); @@ -2189,7 +2265,7 @@ private Prepared parseDrop() { } else if (readIf("ROLE")) { boolean ifExists = readIfExists(false); DropRole command = new DropRole(session); - command.setRoleName(readUniqueIdentifier()); + command.setRoleName(readIdentifier()); ifExists = readIfExists(ifExists); command.setIfExists(ifExists); return command; @@ -2205,7 +2281,7 @@ private Prepared parseDrop() { } else if (readIf("SCHEMA")) { boolean ifExists = readIfExists(false); DropSchema command = new DropSchema(session); - command.setSchemaName(readUniqueIdentifier()); + command.setSchemaName(readIdentifier()); ifExists = readIfExists(ifExists); command.setIfExists(ifExists); ConstraintActionType dropAction = parseCascadeOrRestrict(); @@ -2240,10 +2316,11 @@ private Prepared parseDrop() { private DropDomain parseDropDomain() { boolean ifExists = readIfExists(false); - DropDomain command = new DropDomain(session); - command.setTypeName(readUniqueIdentifier()); + String domainName = readIdentifierWithSchema(); + DropDomain command = new DropDomain(session, getSchema()); + command.setDomainName(domainName); ifExists = readIfExists(ifExists); - command.setIfExists(ifExists); + command.setIfDomainExists(ifExists); ConstraintActionType dropAction = parseCascadeOrRestrict(); if (dropAction != null) { command.setDropAction(dropAction); @@ -2253,100 +2330,121 @@ private DropDomain parseDropDomain() { private DropAggregate parseDropAggregate() { boolean ifExists = readIfExists(false); - DropAggregate command = new DropAggregate(session); - command.setName(readUniqueIdentifier()); + String name = readIdentifierWithSchema(); + DropAggregate command = new DropAggregate(session, getSchema()); + command.setName(name); ifExists = readIfExists(ifExists); command.setIfExists(ifExists); return command; } - private TableFilter readJoin(TableFilter top) { - TableFilter last = top; - while (true) { - TableFilter join; - if (readIf("RIGHT")) { + private TableFilter readTableReference() { + for (TableFilter top, last = top = readTablePrimary(), join;; last = join) { + switch (currentTokenType) { + case RIGHT: { + read(); readIf("OUTER"); read(JOIN); // the right hand side is the 'inner' table usually - join = readTableFilter(); - join = readJoin(join); - Expression on = null; - if (readIf(ON)) { - on = readExpression(); - } + join = readTableReference(); + Expression on = readJoinSpecification(top, join, true); addJoin(join, top, true, on); top = join; - } else if (readIf("LEFT")) { + break; + } + case LEFT: { + read(); readIf("OUTER"); read(JOIN); - join = readTableFilter(); - join = readJoin(join); - Expression on = null; - if (readIf(ON)) { - on = readExpression(); - } + join = readTableReference(); + Expression on = readJoinSpecification(top, join, false); addJoin(top, join, true, on); - } else if (readIf(FULL)) { + break; + } + case FULL: + read(); throw getSyntaxError(); - } else if (readIf(INNER)) { + case INNER: { + read(); read(JOIN); - join = readTableFilter(); - top = readJoin(top); - Expression on = null; - if (readIf(ON)) { - on = readExpression(); - } + join = readTableReference(); + Expression on = readJoinSpecification(top, join, false); addJoin(top, join, false, on); - } else if (readIf(JOIN)) { - join = readTableFilter(); - top = readJoin(top); - Expression on = null; - if (readIf(ON)) { - on = readExpression(); - } + break; + } + case JOIN: { + read(); + join = readTableReference(); + Expression on = readJoinSpecification(top, join, false); addJoin(top, join, false, on); - } else if (readIf(CROSS)) { + break; + } + case CROSS: { + read(); read(JOIN); - join = readTableFilter(); + join = readTablePrimary(); addJoin(top, join, false, null); - } else if (readIf(NATURAL)) { + break; + } + case NATURAL: { + read(); read(JOIN); - join = readTableFilter(); - Column[] tableCols = last.getTable().getColumns(); - Column[] joinCols = join.getTable().getColumns(); - String tableSchema = last.getTable().getSchema().getName(); - String joinSchema = join.getTable().getSchema().getName(); + join = readTablePrimary(); Expression on = null; - for (Column tc : tableCols) { - String tableColumnName = tc.getName(); - for (Column c : joinCols) { - String joinColumnName = c.getName(); - if (equalsToken(tableColumnName, joinColumnName)) { - join.addNaturalJoinColumn(c); - Expression tableExpr = new ExpressionColumn( - database, tableSchema, - last.getTableAlias(), tableColumnName, false); - Expression joinExpr = new ExpressionColumn( - database, joinSchema, join.getTableAlias(), - joinColumnName, false); - Expression equal = new Comparison(session, - Comparison.EQUAL, tableExpr, joinExpr); - if (on == null) { - on = equal; - } else { - on = new ConditionAndOr(ConditionAndOr.AND, on, - equal); - } - } + for (Column column1 : last.getTable().getColumns()) { + Column column2 = join.getColumn(last.getColumnName(column1), true); + if (column2 != null) { + on = addJoinColumn(on, last, join, column1, column2, false); } } addJoin(top, join, false, on); - } else { break; } - last = join; + default: + if (expectedList != null) { + // FULL is intentionally excluded + addMultipleExpected(RIGHT, LEFT, INNER, JOIN, CROSS, NATURAL); + } + return top; + } + } + } + + private Expression readJoinSpecification(TableFilter filter1, TableFilter filter2, boolean rightJoin) { + Expression on = null; + if (readIf(ON)) { + on = readExpression(); + } else if (readIf(USING)) { + read(OPEN_PAREN); + do { + String columnName = readIdentifier(); + on = addJoinColumn(on, filter1, filter2, filter1.getColumn(columnName, false), + filter2.getColumn(columnName, false), rightJoin); + } while (readIfMore()); + } + return on; + } + + private Expression addJoinColumn(Expression on, TableFilter filter1, TableFilter filter2, Column column1, + Column column2, boolean rightJoin) { + if (rightJoin) { + filter1.addCommonJoinColumns(column1, column2, filter2); + filter2.addCommonJoinColumnToExclude(column2); + } else { + filter1.addCommonJoinColumns(column1, column1, filter1); + filter2.addCommonJoinColumnToExclude(column2); + } + Expression tableExpr = new ExpressionColumn(database, filter1.getSchemaName(), filter1.getTableAlias(), + filter1.getColumnName(column1)); + Expression joinExpr = new ExpressionColumn(database, filter2.getSchemaName(), filter2.getTableAlias(), + filter2.getColumnName(column2)); + Expression equal = new Comparison(Comparison.EQUAL, tableExpr, joinExpr, false); + if (on == null) { + on = equal; + } else { + on = new ConditionAndOr(ConditionAndOr.AND, on, equal); } - return top; + return on; } /** @@ -2361,8 +2459,8 @@ private TableFilter readJoin(TableFilter top) { */ private void addJoin(TableFilter top, TableFilter join, boolean outer, Expression on) { if (join.getJoin() != null) { - String joinTable = Constants.PREFIX_JOIN + parseIndex; - TableFilter n = new TableFilter(session, getDualTable(true), + String joinTable = Constants.PREFIX_JOIN + token.start(); + TableFilter n = new TableFilter(session, new DualTable(database), joinTable, rightsChecked, currentSelect, join.getOrderInFrom(), null); n.setNestedJoin(join); @@ -2371,9 +2469,12 @@ private void addJoin(TableFilter top, TableFilter join, boolean outer, Expressio top.addJoin(join, outer, on); } - private Prepared parseExecute() { + private Prepared parseExecutePostgre() { + if (readIf("IMMEDIATE")) { + return new ExecuteImmediate(session, readExpression()); + } ExecuteProcedure command = new ExecuteProcedure(session); - String procedureName = readAliasIdentifier(); + String procedureName = readIdentifier(); Procedure p = session.getProcedure(procedureName); if (p == null) { throw DbException.get(ErrorCode.FUNCTION_ALIAS_NOT_FOUND_1, @@ -2383,7 +2484,7 @@ private Prepared parseExecute() { if (readIf(OPEN_PAREN)) { for (int i = 0;; i++) { command.setExpression(i, readExpression()); - if (!readIfMore(true)) { + if (!readIfMore()) { break; } } @@ -2391,11 +2492,46 @@ private Prepared parseExecute() { return command; } - private DeallocateProcedure parseDeallocate() { + private Prepared parseExecuteSQLServer() { + Call command = new Call(session); + currentPrepared = command; + String schemaName = null; + String name = readIdentifier(); + if (readIf(DOT)) { + schemaName = name; + name = readIdentifier(); + if (readIf(DOT)) { + checkDatabaseName(schemaName); + schemaName = name; + name = readIdentifier(); + } + } + FunctionAlias functionAlias = getFunctionAliasWithinPath(name, + schemaName != null ? database.getSchema(schemaName) : null); + Expression[] args; + ArrayList argList = Utils.newSmallArrayList(); + if (currentTokenType != SEMICOLON && currentTokenType != END_OF_INPUT) { + do { + argList.add(readExpression()); + } while (readIf(COMMA)); + } + args = argList.toArray(new Expression[0]); + command.setExpression(new JavaFunction(functionAlias, args)); + return command; + } + + private FunctionAlias getFunctionAliasWithinPath(String name, Schema schema) { + UserDefinedFunction userDefinedFunction = findUserDefinedFunctionWithinPath(schema, name); + if (userDefinedFunction instanceof FunctionAlias) { + return (FunctionAlias) userDefinedFunction; + } + throw DbException.get(ErrorCode.FUNCTION_NOT_FOUND_1, name); + } + + private DeallocateProcedure parseDeallocate() { readIf("PLAN"); - String procedureName = readAliasIdentifier(); DeallocateProcedure command = new DeallocateProcedure(session); - command.setProcedureName(procedureName); + command.setProcedureName(readIdentifier()); return command; } @@ -2409,25 +2545,25 @@ private Explain parseExplain() { } } switch (currentTokenType) { - case FROM: case SELECT: case TABLE: case VALUES: case WITH: case OPEN_PAREN: - Query query = parseSelect(); + Query query = parseQuery(); query.setNeverLazy(true); command.setCommand(query); break; default: + int start = tokenIndex; if (readIf("DELETE")) { - command.setCommand(parseDelete()); + command.setCommand(parseDelete(start)); } else if (readIf("UPDATE")) { - command.setCommand(parseUpdate()); + command.setCommand(parseUpdate(start)); } else if (readIf("INSERT")) { - command.setCommand(parseInsert()); + command.setCommand(parseInsert(start)); } else if (readIf("MERGE")) { - command.setCommand(parseMerge()); + command.setCommand(parseMerge(start)); } else { throw getSyntaxError(); } @@ -2435,9 +2571,9 @@ private Explain parseExplain() { return command; } - private Query parseSelect() { + private Query parseQuery() { int paramIndex = parameters.size(); - Query command = parseSelectUnion(); + Query command = parseQueryExpression(); int size = parameters.size(); ArrayList params = new ArrayList<>(size); for (int i = paramIndex; i < size; i++) { @@ -2448,7 +2584,7 @@ private Query parseSelect() { return command; } - private Prepared parseWithStatementOrQuery() { + private Prepared parseWithStatementOrQuery(int start) { int paramIndex = parameters.size(); Prepared command = parseWith(); int size = parameters.size(); @@ -2461,12 +2597,36 @@ private Prepared parseWithStatementOrQuery() { Query query = (Query) command; query.init(); } + setSQL(command, start); + return command; + } + + private Query parseQueryExpression() { + Query query; + if (readIf(WITH)) { + try { + query = (Query) parseWith(); + } catch (ClassCastException e) { + throw DbException.get(ErrorCode.SYNTAX_ERROR_1, "WITH statement supports only query in this context"); + } + // recursive can not be lazy + query.setNeverLazy(true); + } else { + query = parseQueryExpressionBodyAndEndOfQuery(); + } + return query; + } + + private Query parseQueryExpressionBodyAndEndOfQuery() { + int start = tokenIndex; + Query command = parseQueryExpressionBody(); + parseEndOfQuery(command); + setSQL(command, start); return command; } - private Query parseSelectUnion() { - int start = lastParseIndex; - Query command = parseSelectSub(); + private Query parseQueryExpressionBody() { + Query command = parseQueryTerm(); for (;;) { SelectUnion.UnionType type; if (readIf(UNION)) { @@ -2478,15 +2638,19 @@ private Query parseSelectUnion() { } } else if (readIf(EXCEPT) || readIf(MINUS)) { type = SelectUnion.UnionType.EXCEPT; - } else if (readIf(INTERSECT)) { - type = SelectUnion.UnionType.INTERSECT; } else { break; } - command = new SelectUnion(session, type, command, parseSelectSub()); + command = new SelectUnion(session, type, command, parseQueryTerm()); + } + return command; + } + + private Query parseQueryTerm() { + Query command = parseQueryPrimary(); + while (readIf(INTERSECT)) { + command = new SelectUnion(session, SelectUnion.UnionType.INTERSECT, command, parseQueryPrimary()); } - parseEndOfQuery(command); - setSQL(command, null, start); return command; } @@ -2497,12 +2661,12 @@ private void parseEndOfQuery(Query command) { if (command instanceof Select) { currentSelect = (Select) command; } - ArrayList orderList = Utils.newSmallArrayList(); + ArrayList orderList = Utils.newSmallArrayList(); do { - boolean canBeNumber = !readIf(EQUAL); - SelectOrderBy order = new SelectOrderBy(); + boolean canBeNumber = currentTokenType == LITERAL; + QueryOrderBy order = new QueryOrderBy(); Expression expr = readExpression(); - if (canBeNumber && expr instanceof ValueExpression && expr.getType().getValueType() == Value.INT) { + if (canBeNumber && expr instanceof ValueExpression && expr.getType().getValueType() == Value.INTEGER) { order.columnIndexExpr = expr; } else if (expr instanceof Parameter) { recompileAlways = true; @@ -2516,7 +2680,7 @@ private void parseEndOfQuery(Query command) { command.setOrder(orderList); currentSelect = oldSelect; } - if (command.getLimit() == null) { + if (command.getFetch() == null) { // make sure aggregate functions will not work here Select temp = currentSelect; currentSelect = null; @@ -2535,10 +2699,9 @@ private void parseEndOfQuery(Query command) { read("NEXT"); } if (readIf(ROW) || readIf("ROWS")) { - command.setLimit(ValueExpression.get(ValueInt.get(1))); + command.setFetch(ValueExpression.get(ValueInteger.get(1))); } else { - Expression limit = readExpression().optimize(session); - command.setLimit(limit); + command.setFetch(readExpression().optimize(session)); if (readIf("PERCENT")) { command.setFetchPercent(true); } @@ -2554,23 +2717,17 @@ private void parseEndOfQuery(Query command) { } } // MySQL-style LIMIT / OFFSET - if (!hasOffsetOrFetch && readIf(LIMIT)) { + if (!hasOffsetOrFetch && database.getMode().limit && readIf(LIMIT)) { Expression limit = readExpression().optimize(session); - command.setLimit(limit); if (readIf(OFFSET)) { - Expression offset = readExpression().optimize(session); - command.setOffset(offset); + command.setOffset(readExpression().optimize(session)); } else if (readIf(COMMA)) { // MySQL: [offset, ] rowcount Expression offset = limit; limit = readExpression().optimize(session); command.setOffset(offset); - command.setLimit(limit); } - } - if (readIf("SAMPLE_SIZE")) { - Expression sampleSize = readExpression().optimize(session); - command.setSampleSize(sampleSize); + command.setFetch(limit); } currentSelect = temp; } @@ -2601,7 +2758,7 @@ private void parseIsolationClause() { if (readIf("RR") || readIf("RS")) { // concurrent-access-resolution clause if (readIf("USE")) { - read("AND"); + read(AND); read("KEEP"); if (readIf("SHARE") || readIf("UPDATE") || readIf("EXCLUSIVE")) { @@ -2615,86 +2772,64 @@ private void parseIsolationClause() { } } - private Query parseSelectSub() { + private Query parseQueryPrimary() { if (readIf(OPEN_PAREN)) { - Query command = parseSelectUnion(); + Query command = parseQueryExpressionBodyAndEndOfQuery(); read(CLOSE_PAREN); return command; } - if (readIf(WITH)) { - Query query; - try { - query = (Query) parseWith(); - } catch (ClassCastException e) { - throw DbException.get(ErrorCode.SYNTAX_ERROR_1, - "WITH statement supports only SELECT (query) in this context"); - } - // recursive can not be lazy - query.setNeverLazy(true); - return query; + int start = tokenIndex; + if (readIf(SELECT)) { + return parseSelect(start); + } else if (readIf(TABLE)) { + return parseExplicitTable(start); } - return parseSelectSimple(); + read(VALUES); + return parseValues(); } - private void parseSelectSimpleFromPart(Select command) { + private void parseSelectFromPart(Select command) { do { - TableFilter filter = readTableFilter(); - parseJoinTableFilter(filter, command); - } while (readIf(COMMA)); - - // Parser can reorder joined table filters, need to explicitly sort them - // to get the order as it was in the original query. - if (session.isForceJoinOrder()) { - Collections.sort(command.getTopFilters(), TABLE_FILTER_COMPARATOR); - } - } - - private void parseJoinTableFilter(TableFilter top, final Select command) { - top = readJoin(top); - command.addTableFilter(top, true); - boolean isOuter = false; - while (true) { - TableFilter n = top.getNestedJoin(); - if (n != null) { - n.visit(new TableFilterVisitor() { - @Override - public void accept(TableFilter f) { - command.addTableFilter(f, false); + TableFilter top = readTableReference(); + command.addTableFilter(top, true); + boolean isOuter = false; + for (;;) { + TableFilter n = top.getNestedJoin(); + if (n != null) { + n.visit(f -> command.addTableFilter(f, false)); + } + TableFilter join = top.getJoin(); + if (join == null) { + break; + } + isOuter = isOuter | join.isJoinOuter(); + if (isOuter) { + command.addTableFilter(join, false); + } else { + // make flat so the optimizer can work better + Expression on = join.getJoinCondition(); + if (on != null) { + command.addCondition(on); } - }); - } - TableFilter join = top.getJoin(); - if (join == null) { - break; - } - isOuter = isOuter | join.isJoinOuter(); - if (isOuter) { - command.addTableFilter(join, false); - } else { - // make flat so the optimizer can work better - Expression on = join.getJoinCondition(); - if (on != null) { - command.addCondition(on); + join.removeJoinCondition(); + top.removeJoin(); + command.addTableFilter(join, true); } - join.removeJoinCondition(); - top.removeJoin(); - command.addTableFilter(join, true); + top = join; } - top = join; - } + } while (readIf(COMMA)); } - private void parseSelectSimpleSelectPart(Select command) { - Select temp = currentSelect; - // make sure aggregate functions will not work in TOP and LIMIT - currentSelect = null; - if (readIf("TOP")) { + private void parseSelectExpressions(Select command) { + if (database.getMode().topInSelect && readIf("TOP")) { + Select temp = currentSelect; + // make sure aggregate functions will not work in TOP and LIMIT + currentSelect = null; // can't read more complex expressions here because // SELECT TOP 1 +? A FROM TEST could mean // SELECT TOP (1+?) A FROM TEST or // SELECT TOP 1 (+?) AS A FROM TEST - Expression limit = readTerm().optimize(session); - command.setLimit(limit); + command.setFetch(readTerm().optimize(session)); if (readIf("PERCENT")) { command.setFetchPercent(true); } @@ -2702,20 +2837,15 @@ private void parseSelectSimpleSelectPart(Select command) { read("TIES"); command.setWithTies(true); } - } else if (readIf(LIMIT)) { - Expression offset = readTerm().optimize(session); - command.setOffset(offset); - Expression limit = readTerm().optimize(session); - command.setLimit(limit); + currentSelect = temp; } - currentSelect = temp; if (readIf(DISTINCT)) { if (readIf(ON)) { read(OPEN_PAREN); ArrayList distinctExpressions = Utils.newSmallArrayList(); do { distinctExpressions.add(readExpression()); - } while (readIfMore(true)); + } while (readIfMore()); command.setDistinct(distinctExpressions.toArray(new Expression[0])); } else { command.setDistinct(); @@ -2728,69 +2858,49 @@ private void parseSelectSimpleSelectPart(Select command) { if (readIf(ASTERISK)) { expressions.add(parseWildcard(null, null)); } else { - Expression expr = readExpression(); - if (readIf("AS") || currentTokenType == IDENTIFIER) { - String alias = readAliasIdentifier(); - boolean aliasColumnName = database.getSettings().aliasColumnName; - aliasColumnName |= database.getMode().aliasColumnName; - expr = new Alias(expr, alias, aliasColumnName); + switch (currentTokenType) { + case FROM: + case WHERE: + case GROUP: + case HAVING: + case WINDOW: + case QUALIFY: + case ORDER: + case OFFSET: + case FETCH: + case CLOSE_PAREN: + case SEMICOLON: + case END_OF_INPUT: + break; + default: + Expression expr = readExpression(); + if (readIf(AS) || isIdentifier()) { + expr = new Alias(expr, readIdentifier(), database.getMode().aliasColumnName); + } + expressions.add(expr); } - expressions.add(expr); } } while (readIf(COMMA)); command.setExpressions(expressions); } - private Select parseSelectSimple() { - boolean fromFirst; - if (readIf(SELECT)) { - fromFirst = false; - } else if (readIf(FROM)) { - fromFirst = true; - } else if (readIf(TABLE)) { - int start = lastParseIndex; - Table table = readTableOrView(); - Select command = new Select(session, currentSelect); - TableFilter filter = new TableFilter(session, table, null, rightsChecked, - command, orderInFrom++, null); - command.addTableFilter(filter, true); - ArrayList expressions = new ArrayList<>(); - expressions.add(new Wildcard(null, null)); - command.setExpressions(expressions); - setSQL(command, "TABLE", start); - return command; - } else if (readIf(VALUES)) { - return parseValues(); - } else { - throw getSyntaxError(); - } + private Select parseSelect(int start) { Select command = new Select(session, currentSelect); - int start = lastParseIndex; Select oldSelect = currentSelect; Prepared oldPrepared = currentPrepared; currentSelect = command; currentPrepared = command; - if (fromFirst) { - parseSelectSimpleFromPart(command); - read(SELECT); - parseSelectSimpleSelectPart(command); + parseSelectExpressions(command); + if (!readIf(FROM)) { + // select without FROM + TableFilter filter = new TableFilter(session, new DualTable(database), null, rightsChecked, + currentSelect, 0, null); + command.addTableFilter(filter, true); } else { - parseSelectSimpleSelectPart(command); - if (!readIf(FROM)) { - // select without FROM: convert to SELECT ... FROM - // SYSTEM_RANGE(1,1) - Table dual = getDualTable(false); - TableFilter filter = new TableFilter(session, dual, null, - rightsChecked, currentSelect, 0, - null); - command.addTableFilter(filter, true); - } else { - parseSelectSimpleFromPart(command); - } + parseSelectFromPart(command); } if (readIf(WHERE)) { - Expression condition = readExpression(); - command.addCondition(condition); + command.addCondition(readExpressionWithGlobalConditions()); } // the group by is read for the outer select (or not a select) // so that columns that are not grouped can be used @@ -2800,309 +2910,541 @@ private Select parseSelectSimple() { command.setGroupQuery(); ArrayList list = Utils.newSmallArrayList(); do { - Expression expr = readExpression(); - list.add(expr); + if (isToken(OPEN_PAREN) && isOrdinaryGroupingSet()) { + if (!readIf(CLOSE_PAREN)) { + do { + list.add(readExpression()); + } while (readIfMore()); + } + } else { + Expression expr = readExpression(); + if (database.getMode().groupByColumnIndex && expr instanceof ValueExpression && + expr.getType().getValueType() == Value.INTEGER) { + ArrayList expressions = command.getExpressions(); + for (Expression e : expressions) { + if (e instanceof Wildcard) { + throw getSyntaxError(); + } + } + int idx = expr.getValue(session).getInt(); + if (idx < 1 || idx > expressions.size()) { + throw DbException.get(ErrorCode.GROUP_BY_NOT_IN_THE_RESULT, Integer.toString(idx), + Integer.toString(expressions.size())); + } + list.add(expressions.get(idx-1)); + } else { + list.add(expr); + } + } } while (readIf(COMMA)); - command.setGroupBy(list); + if (!list.isEmpty()) { + command.setGroupBy(list); + } } currentSelect = command; if (readIf(HAVING)) { command.setGroupQuery(); - Expression condition = readExpression(); - command.setHaving(condition); + command.setHaving(readExpressionWithGlobalConditions()); } if (readIf(WINDOW)) { do { - int index = parseIndex; - String name = readAliasIdentifier(); - read("AS"); + int sqlIndex = token.start(); + String name = readIdentifier(); + read(AS); Window w = readWindowSpecification(); if (!currentSelect.addWindow(name, w)) { - throw DbException.getSyntaxError(sqlCommand, index, "unique identifier"); + throw DbException.getSyntaxError(sqlCommand, sqlIndex, "unique identifier"); } } while (readIf(COMMA)); } if (readIf(QUALIFY)) { command.setWindowQuery(); - Expression condition = readExpression(); - command.setQualify(condition); + command.setQualify(readExpressionWithGlobalConditions()); } command.setParameterList(parameters); currentSelect = oldSelect; currentPrepared = oldPrepared; - setSQL(command, "SELECT", start); + setSQL(command, start); return command; } - private Table getDualTable(boolean noColumns) { - Schema main = database.getMainSchema(); - Expression one = ValueExpression.get(ValueLong.get(1)); - return new RangeTable(main, one, one, noColumns); + /** + * Checks whether current opening parenthesis can be a start of ordinary + * grouping set. This method reads this parenthesis if it is. + * + * @return whether current opening parenthesis can be a start of ordinary + * grouping set + */ + private boolean isOrdinaryGroupingSet() { + int offset = scanToCloseParen(tokenIndex + 1); + if (offset < 0) { + // Try to parse as expression to get better syntax error + return false; + } + switch (tokens.get(offset).tokenType()) { + // End of query + case CLOSE_PAREN: + case SEMICOLON: + case END_OF_INPUT: + // Next grouping element + case COMMA: + // Next select clause + case HAVING: + case WINDOW: + case QUALIFY: + // Next query expression body clause + case UNION: + case EXCEPT: + case MINUS: + case INTERSECT: + // Next query expression clause + case ORDER: + case OFFSET: + case FETCH: + case LIMIT: + case FOR: + setTokenIndex(tokenIndex + 1); + return true; + default: + return false; + } + } + + private Query parseExplicitTable(int start) { + Table table = readTableOrView(); + Select command = new Select(session, currentSelect); + TableFilter filter = new TableFilter(session, table, null, rightsChecked, + command, orderInFrom++, null); + command.addTableFilter(filter, true); + command.setExplicitTable(); + setSQL(command, start); + return command; } - private void setSQL(Prepared command, String start, int startIndex) { - int endIndex = lastParseIndex; - String sql; - if (start != null) { - StringBuilder builder = new StringBuilder(start.length() + endIndex - startIndex + 1) - .append(start).append(' '); - sql = StringUtils.trimSubstring(builder, originalSQL, startIndex, endIndex).toString(); + private void setSQL(Prepared command, int start) { + String s = sqlCommand; + int beginIndex = tokens.get(start).start(); + int endIndex = token.start(); + while (beginIndex < endIndex && s.charAt(beginIndex) <= ' ') { + beginIndex++; + } + while (beginIndex < endIndex && s.charAt(endIndex - 1) <= ' ') { + endIndex--; + } + s = s.substring(beginIndex, endIndex); + ArrayList commandTokens; + if (start == 0 && currentTokenType == END_OF_INPUT) { + commandTokens = tokens; + if (beginIndex != 0) { + for (int i = 0, l = commandTokens.size() - 1; i < l; i++) { + commandTokens.get(i).subtractFromStart(beginIndex); + } + } + token.setStart(s.length()); + sqlCommand = s; } else { - sql = StringUtils.trimSubstring(originalSQL, startIndex, endIndex); + List subList = tokens.subList(start, tokenIndex); + commandTokens = new ArrayList<>(subList.size() + 1); + for (int i = start; i < tokenIndex; i++) { + Token t = tokens.get(i).clone(); + t.subtractFromStart(beginIndex); + commandTokens.add(t); + } + commandTokens.add(new Token.EndOfInputToken(s.length())); } - command.setSQL(sql); + command.setSQL(s, commandTokens); } private Expression readExpressionOrDefault() { - if (readIf("DEFAULT")) { - return ValueExpression.getDefault(); + if (readIf(DEFAULT)) { + return ValueExpression.DEFAULT; } return readExpression(); } + private Expression readExpressionWithGlobalConditions() { + Expression r = readCondition(); + if (readIf(AND)) { + r = readAnd(new ConditionAndOr(ConditionAndOr.AND, r, readCondition())); + } else if (readIf("_LOCAL_AND_GLOBAL_")) { + r = readAnd(new ConditionLocalAndGlobal(r, readCondition())); + } + return readExpressionPart2(r); + } + private Expression readExpression() { - Expression r = readAnd(); - while (readIf("OR")) { - r = new ConditionAndOr(ConditionAndOr.OR, r, readAnd()); + return readExpressionPart2(readAnd(readCondition())); + } + + private Expression readExpressionPart2(Expression r1) { + if (!readIf(OR)) { + return r1; } - return r; + Expression r2 = readAnd(readCondition()); + if (!readIf(OR)) { + return new ConditionAndOr(ConditionAndOr.OR, r1, r2); + } + // Above logic to avoid allocating an ArrayList for the common case. + // We combine into ConditionAndOrN here rather than letting the optimisation + // pass do it, to avoid StackOverflowError during stuff like mapColumns. + final ArrayList expressions = new ArrayList<>(); + expressions.add(r1); + expressions.add(r2); + do { + expressions.add(readAnd(readCondition())); + } + while (readIf(OR)); + return new ConditionAndOrN(ConditionAndOr.OR, expressions); } - private Expression readAnd() { - Expression r = readCondition(); - while (readIf("AND")) { - r = new ConditionAndOr(ConditionAndOr.AND, r, readCondition()); + private Expression readAnd(Expression r) { + if (!readIf(AND)) { + return r; } - return r; + Expression expr2 = readCondition(); + if (!readIf(AND)) { + return new ConditionAndOr(ConditionAndOr.AND, r, expr2); + } + // Above logic to avoid allocating an ArrayList for the common case. + // We combine into ConditionAndOrN here rather than letting the optimisation + // pass do it, to avoid StackOverflowError during stuff like mapColumns. + final ArrayList expressions = new ArrayList<>(); + expressions.add(r); + expressions.add(expr2); + do { + expressions.add(readCondition()); + } + while (readIf(AND)); + return new ConditionAndOrN(ConditionAndOr.AND, expressions); } private Expression readCondition() { - if (readIf(NOT)) { + switch (currentTokenType) { + case NOT: + read(); return new ConditionNot(readCondition()); - } - if (readIf(EXISTS)) { + case EXISTS: { + read(); read(OPEN_PAREN); - Query query = parseSelect(); + Query query = parseQuery(); // can not reduce expression because it might be a union except // query with distinct read(CLOSE_PAREN); - return new ConditionExists(query); + return new ExistsPredicate(query); } - if (readIf(INTERSECTS)) { + case UNIQUE: { + read(); read(OPEN_PAREN); - Expression r1 = readConcat(); - read(COMMA); - Expression r2 = readConcat(); + Query query = parseQuery(); read(CLOSE_PAREN); - return new Comparison(session, Comparison.SPATIAL_INTERSECTS, r1, - r2); + return new UniquePredicate(query); } - Expression r = readConcat(); - while (true) { + default: + int index = tokenIndex; + if (readIf("INTERSECTS")) { + if (readIf(OPEN_PAREN)) { + Expression r1 = readConcat(); + read(COMMA); + Expression r2 = readConcat(); + read(CLOSE_PAREN); + return new Comparison(Comparison.SPATIAL_INTERSECTS, r1, r2, false); + } else { + setTokenIndex(index); + } + } + if (expectedList != null) { + addMultipleExpected(NOT, EXISTS, UNIQUE); + addExpected("INTERSECTS"); + } + } + Expression l, c = readConcat(); + do { + l = c; // special case: NOT NULL is not part of an expression (as in CREATE // TABLE TEST(ID INT DEFAULT 0 NOT NULL)) - int backup = parseIndex; + int backup = tokenIndex; boolean not = readIf(NOT); if (not && isToken(NULL)) { // this really only works for NOT NULL! - parseIndex = backup; - currentToken = "NOT"; - currentTokenType = NOT; + setTokenIndex(backup); break; } - if (readIf(LIKE)) { - Expression b = readConcat(); - Expression esc = null; - if (readIf("ESCAPE")) { - esc = readConcat(); - } - recompileAlways = true; - r = new CompareLike(database, r, b, esc, false); - } else if (readIf("ILIKE")) { - Function function = Function.getFunction(database, "CAST"); - function.setDataType(new Column("X", Value.STRING_IGNORECASE)); - function.setParameter(0, r); - r = function; - Expression b = readConcat(); - Expression esc = null; - if (readIf("ESCAPE")) { - esc = readConcat(); - } - recompileAlways = true; - r = new CompareLike(database, r, b, esc, false); - } else if (readIf("REGEXP")) { - Expression b = readConcat(); - recompileAlways = true; - r = new CompareLike(database, r, b, null, true); - } else if (readIf(IS)) { - if (readIf(NOT)) { - if (readIf(NULL)) { - r = new Comparison(session, Comparison.IS_NOT_NULL, r, - null); - } else if (readIf(DISTINCT)) { - read(FROM); - r = new Comparison(session, Comparison.EQUAL_NULL_SAFE, - r, readConcat()); - } else { - r = new Comparison(session, - Comparison.NOT_EQUAL_NULL_SAFE, r, readConcat()); - } - } else if (readIf(NULL)) { - r = new Comparison(session, Comparison.IS_NULL, r, null); - } else if (readIf(DISTINCT)) { - read(FROM); - r = new Comparison(session, Comparison.NOT_EQUAL_NULL_SAFE, - r, readConcat()); - } else { - r = new Comparison(session, Comparison.EQUAL_NULL_SAFE, r, - readConcat()); - } - } else if (readIf("IN")) { - read(OPEN_PAREN); - if (readIf(CLOSE_PAREN)) { - if (database.getMode().prohibitEmptyInPredicate) { - throw getSyntaxError(); - } - r = ValueExpression.get(ValueBoolean.FALSE); - } else { - if (isSelect()) { - Query query = parseSelect(); - r = new ConditionInSelect(database, r, query, false, - Comparison.EQUAL); - } else { - ArrayList v = Utils.newSmallArrayList(); - Expression last; - do { - last = readExpression(); - v.add(last); - } while (readIf(COMMA)); - if (v.size() == 1 && (last instanceof Subquery)) { - Subquery s = (Subquery) last; - Query q = s.getQuery(); - r = new ConditionInSelect(database, r, q, false, - Comparison.EQUAL); - } else { - r = new ConditionIn(database, r, v); - } - } - read(CLOSE_PAREN); - } - } else if (readIf("BETWEEN")) { - Expression low = readConcat(); - read("AND"); - Expression high = readConcat(); - Expression condLow = new Comparison(session, - Comparison.SMALLER_EQUAL, low, r); - Expression condHigh = new Comparison(session, - Comparison.BIGGER_EQUAL, high, r); - r = new ConditionAndOr(ConditionAndOr.AND, condLow, condHigh); - } else { - if (not) { - throw getSyntaxError(); - } - int compareType = getCompareType(currentTokenType); - if (compareType < 0) { - break; + c = readConditionRightHandSide(l, not, false); + } while (c != null); + return l; + } + + private Expression readConditionRightHandSide(Expression r, boolean not, boolean whenOperand) { + if (!not && readIf(IS)) { + r = readConditionIs(r, whenOperand); + } else { + switch (currentTokenType) { + case BETWEEN: { + read(); + boolean symmetric = readIf(SYMMETRIC); + if (!symmetric) { + readIf(ASYMMETRIC); } + Expression a = readConcat(); + read(AND); + r = new BetweenPredicate(r, not, whenOperand, symmetric, a, readConcat()); + break; + } + case IN: read(); - int start = lastParseIndex; - if (readIf(ALL)) { - read(OPEN_PAREN); - if (isSelect()) { - Query query = parseSelect(); - r = new ConditionInSelect(database, r, query, true, compareType); - read(CLOSE_PAREN); - } else { - parseIndex = start; - read(); - r = new Comparison(session, compareType, r, readConcat()); + r = readInPredicate(r, not, whenOperand); + break; + case LIKE: { + read(); + r = readLikePredicate(r, LikeType.LIKE, not, whenOperand); + break; + } + default: + if (readIf("ILIKE")) { + r = readLikePredicate(r, LikeType.ILIKE, not, whenOperand); + } else if (readIf("REGEXP")) { + Expression b = readConcat(); + recompileAlways = true; + r = new CompareLike(database, r, not, whenOperand, b, null, LikeType.REGEXP); + } else if (not) { + if (whenOperand) { + return null; } - } else if (readIf("ANY") || readIf("SOME")) { - read(OPEN_PAREN); - if (currentTokenType == PARAMETER && compareType == 0) { - Parameter p = readParameter(); - r = new ConditionInParameter(database, r, p); - read(CLOSE_PAREN); - } else if (isSelect()) { - Query query = parseSelect(); - r = new ConditionInSelect(database, r, query, false, compareType); - read(CLOSE_PAREN); - } else { - parseIndex = start; - read(); - r = new Comparison(session, compareType, r, readConcat()); + if (expectedList != null) { + addMultipleExpected(BETWEEN, IN, LIKE); } + throw getSyntaxError(); } else { - r = new Comparison(session, compareType, r, readConcat()); + int compareType = getCompareType(currentTokenType); + if (compareType < 0) { + return null; + } + read(); + r = readComparison(r, compareType, whenOperand); } } - if (not) { - r = new ConditionNot(r); - } } return r; } - private Expression readConcat() { - Expression r = readSum(); - while (true) { - if (readIf(STRING_CONCAT)) { - r = new BinaryOperation(OpType.CONCAT, r, readSum()); - } else if (readIf(TILDE)) { - if (readIf(ASTERISK)) { - Function function = Function.getFunction(database, "CAST"); - function.setDataType(new Column("X", - Value.STRING_IGNORECASE)); - function.setParameter(0, r); - r = function; - } - r = new CompareLike(database, r, readSum(), null, true); - } else if (readIf(NOT_TILDE)) { - if (readIf(ASTERISK)) { - Function function = Function.getFunction(database, "CAST"); - function.setDataType(new Column("X", - Value.STRING_IGNORECASE)); - function.setParameter(0, r); - r = function; - } - r = new ConditionNot(new CompareLike(database, r, readSum(), - null, true)); + private Expression readConditionIs(Expression left, boolean whenOperand) { + boolean isNot = readIf(NOT); + switch (currentTokenType) { + case NULL: + read(); + left = new NullPredicate(left, isNot, whenOperand); + break; + case DISTINCT: + read(); + read(FROM); + left = readComparison(left, isNot ? Comparison.EQUAL_NULL_SAFE : Comparison.NOT_EQUAL_NULL_SAFE, + whenOperand); + break; + case TRUE: + read(); + left = new BooleanTest(left, isNot, whenOperand, true); + break; + case FALSE: + read(); + left = new BooleanTest(left, isNot, whenOperand, false); + break; + case UNKNOWN: + read(); + left = new BooleanTest(left, isNot, whenOperand, null); + break; + default: + if (readIf("OF")) { + left = readTypePredicate(left, isNot, whenOperand); + } else if (readIf("JSON")) { + left = readJsonPredicate(left, isNot, whenOperand); } else { - return r; + if (expectedList != null) { + addMultipleExpected(NULL, DISTINCT, TRUE, FALSE, UNKNOWN); + } + /* + * Databases that were created in 1.4.199 and older + * versions can contain invalid generated IS [ NOT ] + * expressions. + */ + if (whenOperand || !session.isQuirksMode()) { + throw getSyntaxError(); + } + left = new Comparison(isNot ? Comparison.NOT_EQUAL_NULL_SAFE : Comparison.EQUAL_NULL_SAFE, left, + readConcat(), false); } } + return left; } - private Expression readSum() { - Expression r = readFactor(); - while (true) { - if (readIf(PLUS_SIGN)) { - r = new BinaryOperation(OpType.PLUS, r, readFactor()); - } else if (readIf(MINUS_SIGN)) { - r = new BinaryOperation(OpType.MINUS, r, readFactor()); - } else { - return r; - } - } + private TypePredicate readTypePredicate(Expression left, boolean not, boolean whenOperand) { + read(OPEN_PAREN); + ArrayList typeList = Utils.newSmallArrayList(); + do { + typeList.add(parseDataType()); + } while (readIfMore()); + return new TypePredicate(left, not, whenOperand, typeList.toArray(new TypeInfo[0])); } - private Expression readFactor() { - Expression r = readTerm(); - while (true) { - if (readIf(ASTERISK)) { - r = new BinaryOperation(OpType.MULTIPLY, r, readTerm()); - } else if (readIf(SLASH)) { - r = new BinaryOperation(OpType.DIVIDE, r, readTerm()); - } else if (readIf(PERCENT)) { - r = new BinaryOperation(OpType.MODULUS, r, readTerm()); - } else { - return r; + private Expression readInPredicate(Expression left, boolean not, boolean whenOperand) { + read(OPEN_PAREN); + if (!whenOperand && database.getMode().allowEmptyInPredicate && readIf(CLOSE_PAREN)) { + return ValueExpression.getBoolean(not); + } + ArrayList v; + if (isQuery()) { + Query query = parseQuery(); + if (!readIfMore()) { + return new ConditionInQuery(left, not, whenOperand, query, false, Comparison.EQUAL); } + v = Utils.newSmallArrayList(); + v.add(new Subquery(query)); + } else { + v = Utils.newSmallArrayList(); } - } + do { + v.add(readExpression()); + } while (readIfMore()); + return new ConditionIn(left, not, whenOperand, v); + } + + private IsJsonPredicate readJsonPredicate(Expression left, boolean not, boolean whenOperand) { + JSONItemType itemType; + if (readIf(VALUE)) { + itemType = JSONItemType.VALUE; + } else if (readIf(ARRAY)) { + itemType = JSONItemType.ARRAY; + } else if (readIf("OBJECT")) { + itemType = JSONItemType.OBJECT; + } else if (readIf("SCALAR")) { + itemType = JSONItemType.SCALAR; + } else { + itemType = JSONItemType.VALUE; + } + boolean unique = false; + if (readIf(WITH)) { + read(UNIQUE); + readIf("KEYS"); + unique = true; + } else if (readIf("WITHOUT")) { + read(UNIQUE); + readIf("KEYS"); + } + return new IsJsonPredicate(left, not, whenOperand, unique, itemType); + } + + private Expression readLikePredicate(Expression left, LikeType likeType, boolean not, boolean whenOperand) { + Expression right = readConcat(); + Expression esc = readIf("ESCAPE") ? readConcat() : null; + recompileAlways = true; + return new CompareLike(database, left, not, whenOperand, right, esc, likeType); + } + + private Expression readComparison(Expression left, int compareType, boolean whenOperand) { + int start = tokenIndex; + if (readIf(ALL)) { + read(OPEN_PAREN); + if (isQuery()) { + Query query = parseQuery(); + left = new ConditionInQuery(left, false, whenOperand, query, true, compareType); + read(CLOSE_PAREN); + } else { + setTokenIndex(start); + left = new Comparison(compareType, left, readConcat(), whenOperand); + } + } else if (readIf(ANY) || readIf(SOME)) { + read(OPEN_PAREN); + if (currentTokenType == PARAMETER && compareType == Comparison.EQUAL) { + Parameter p = readParameter(); + left = new ConditionInParameter(left, false, whenOperand, p); + read(CLOSE_PAREN); + } else if (isQuery()) { + Query query = parseQuery(); + left = new ConditionInQuery(left, false, whenOperand, query, false, compareType); + read(CLOSE_PAREN); + } else { + setTokenIndex(start); + left = new Comparison(compareType, left, readConcat(), whenOperand); + } + } else { + left = new Comparison(compareType, left, readConcat(), whenOperand); + } + return left; + } + + private Expression readConcat() { + Expression op1 = readSum(); + for (;;) { + switch (currentTokenType) { + case CONCATENATION: { + read(); + Expression op2 = readSum(); + if (readIf(CONCATENATION)) { + ConcatenationOperation c = new ConcatenationOperation(); + c.addParameter(op1); + c.addParameter(op2); + do { + c.addParameter(readSum()); + } while (readIf(CONCATENATION)); + c.doneWithParameters(); + op1 = c; + } else { + op1 = new ConcatenationOperation(op1, op2); + } + break; + } + case TILDE: // PostgreSQL compatibility + op1 = readTildeCondition(op1, false); + break; + case NOT_TILDE: // PostgreSQL compatibility + op1 = readTildeCondition(op1, true); + break; + default: + // Don't add compatibility operators + addExpected(CONCATENATION); + return op1; + } + } + } + + private Expression readSum() { + Expression r = readFactor(); + while (true) { + if (readIf(PLUS_SIGN)) { + r = new BinaryOperation(OpType.PLUS, r, readFactor()); + } else if (readIf(MINUS_SIGN)) { + r = new BinaryOperation(OpType.MINUS, r, readFactor()); + } else { + return r; + } + } + } + + private Expression readFactor() { + Expression r = readTerm(); + while (true) { + if (readIf(ASTERISK)) { + r = new BinaryOperation(OpType.MULTIPLY, r, readTerm()); + } else if (readIf(SLASH)) { + r = new BinaryOperation(OpType.DIVIDE, r, readTerm()); + } else if (readIf(PERCENT)) { + r = new MathFunction(r, readTerm(), MathFunction.MOD); + } else { + return r; + } + } + } + + private Expression readTildeCondition(Expression r, boolean not) { + read(); + if (readIf(ASTERISK)) { + r = new CastSpecification(r, TypeInfo.TYPE_VARCHAR_IGNORECASE); + } + return new CompareLike(database, r, not, false, readSum(), null, LikeType.REGEXP); + } private Expression readAggregate(AggregateType aggregateType, String aggregateName) { if (currentSelect == null) { + expectedList = null; throw getSyntaxError(); } Aggregate r; @@ -3121,44 +3463,70 @@ private Expression readAggregate(AggregateType aggregateType, String aggregateNa } } break; + case COVAR_POP: + case COVAR_SAMP: + case CORR: + case REGR_SLOPE: + case REGR_INTERCEPT: + case REGR_COUNT: + case REGR_R2: + case REGR_AVGX: + case REGR_AVGY: + case REGR_SXX: + case REGR_SYY: + case REGR_SXY: + r = new Aggregate(aggregateType, new Expression[] { readExpression(), readNextArgument() }, + currentSelect, false); + break; + case HISTOGRAM: + r = new Aggregate(aggregateType, new Expression[] { readExpression() }, currentSelect, false); + break; case LISTAGG: { boolean distinct = readDistinctAgg(); - Expression arg = readExpression(), separator = null; - ArrayList orderByList = null; - if (equalsToken("STRING_AGG", aggregateName)) { + Expression arg = readExpression(); + ListaggArguments extraArguments = new ListaggArguments(); + ArrayList orderByList; + if ("STRING_AGG".equals(aggregateName)) { // PostgreSQL compatibility: string_agg(expression, delimiter) read(COMMA); - separator = readExpression(); - if (readIf(ORDER)) { - read("BY"); - orderByList = parseSimpleOrderList(); - } - } else if (equalsToken("GROUP_CONCAT", aggregateName)){ - if (readIf(ORDER)) { - read("BY"); - orderByList = parseSimpleOrderList(); - } + extraArguments.setSeparator(readString()); + orderByList = readIfOrderBy(); + } else if ("GROUP_CONCAT".equals(aggregateName)) { + orderByList = readIfOrderBy(); if (readIf("SEPARATOR")) { - separator = readExpression(); + extraArguments.setSeparator(readString()); } } else { if (readIf(COMMA)) { - separator = readExpression(); + extraArguments.setSeparator(readString()); } if (readIf(ON)) { read("OVERFLOW"); - read("ERROR"); + if (readIf("TRUNCATE")) { + extraArguments.setOnOverflowTruncate(true); + if (currentTokenType == LITERAL) { + extraArguments.setFilter(readString()); + } + if (!readIf(WITH)) { + read("WITHOUT"); + extraArguments.setWithoutCount(true); + } + read("COUNT"); + } else { + read("ERROR"); + } } + orderByList = null; } - Expression[] args = separator == null ? new Expression[] { arg } : new Expression[] { arg, separator }; - int index = lastParseIndex; + Expression[] args = new Expression[] { arg }; + int index = tokenIndex; read(CLOSE_PAREN); if (orderByList == null && isToken("WITHIN")) { - r = readWithinGroup(aggregateType, args, distinct, false); + r = readWithinGroup(aggregateType, args, distinct, extraArguments, false, false); } else { - parseIndex = index; - read(); + setTokenIndex(index); r = new Aggregate(AggregateType.LISTAGG, args, currentSelect, distinct); + r.setExtraArguments(extraArguments); if (orderByList != null) { r.setOrderByList(orderByList); } @@ -3168,10 +3536,7 @@ private Expression readAggregate(AggregateType aggregateType, String aggregateNa case ARRAY_AGG: { boolean distinct = readDistinctAgg(); r = new Aggregate(AggregateType.ARRAY_AGG, new Expression[] { readExpression() }, currentSelect, distinct); - if (readIf(ORDER)) { - read("BY"); - r.setOrderByList(parseSimpleOrderList()); - } + r.setOrderByList(readIfOrderBy()); break; } case RANK: @@ -3184,30 +3549,30 @@ private Expression readAggregate(AggregateType aggregateType, String aggregateNa ArrayList expressions = Utils.newSmallArrayList(); do { expressions.add(readExpression()); - } while (readIfMore(true)); - r = readWithinGroup(aggregateType, expressions.toArray(new Expression[0]), false, true); + } while (readIfMore()); + r = readWithinGroup(aggregateType, expressions.toArray(new Expression[0]), false, null, true, false); break; } case PERCENTILE_CONT: case PERCENTILE_DISC: { Expression num = readExpression(); read(CLOSE_PAREN); - r = readWithinGroup(aggregateType, new Expression[] { num }, false, false); + r = readWithinGroup(aggregateType, new Expression[] { num }, false, null, false, true); break; } case MODE: { if (readIf(CLOSE_PAREN)) { - r = readWithinGroup(AggregateType.MODE, new Expression[0], false, false); + r = readWithinGroup(AggregateType.MODE, new Expression[0], false, null, false, true); } else { Expression expr = readExpression(); - r = new Aggregate(aggregateType, new Expression[0], currentSelect, false); + r = new Aggregate(AggregateType.MODE, new Expression[0], currentSelect, false); if (readIf(ORDER)) { read("BY"); Expression expr2 = readExpression(); - String sql = expr.getSQL(true), sql2 = expr2.getSQL(true); + String sql = expr.getSQL(HasSQL.DEFAULT_SQL_FLAGS), sql2 = expr2.getSQL(HasSQL.DEFAULT_SQL_FLAGS); if (!sql.equals(sql2)) { throw DbException.getSyntaxError(ErrorCode.IDENTICAL_EXPRESSIONS_SHOULD_BE_USED, sqlCommand, - lastParseIndex, sql, sql2); + token.start(), sql, sql2); } readAggregateOrder(r, expr, true); } else { @@ -3216,6 +3581,28 @@ private Expression readAggregate(AggregateType aggregateType, String aggregateNa } break; } + case JSON_OBJECTAGG: { + boolean withKey = readIf(KEY); + Expression key = readExpression(); + if (withKey) { + read(VALUE); + } else if (!readIf(VALUE)) { + read(COLON); + } + Expression value = readExpression(); + r = new Aggregate(AggregateType.JSON_OBJECTAGG, new Expression[] { key, value }, currentSelect, false); + readJsonObjectFunctionFlags(r, false); + break; + } + case JSON_ARRAYAGG: { + boolean distinct = readDistinctAgg(); + r = new Aggregate(AggregateType.JSON_ARRAYAGG, new Expression[] { readExpression() }, currentSelect, + distinct); + r.setOrderByList(readIfOrderBy()); + r.setFlags(JsonConstructorUtils.JSON_ABSENT_ON_NULL); + readJsonObjectFunctionFlags(r, true); + break; + } default: boolean distinct = readDistinctAgg(); r = new Aggregate(aggregateType, new Expression[] { readExpression() }, currentSelect, distinct); @@ -3227,90 +3614,91 @@ private Expression readAggregate(AggregateType aggregateType, String aggregateNa } private Aggregate readWithinGroup(AggregateType aggregateType, Expression[] args, boolean distinct, - boolean forHypotheticalSet) { + Object extraArguments, boolean forHypotheticalSet, boolean simple) { read("WITHIN"); read(GROUP); read(OPEN_PAREN); read(ORDER); read("BY"); Aggregate r = new Aggregate(aggregateType, args, currentSelect, distinct); + r.setExtraArguments(extraArguments); if (forHypotheticalSet) { int count = args.length; - ArrayList orderList = new ArrayList<>(count); + ArrayList orderList = new ArrayList<>(count); for (int i = 0; i < count; i++) { if (i > 0) { read(COMMA); } - SelectOrderBy order = new SelectOrderBy(); - order.expression = readExpression(); - order.sortType = parseSimpleSortType(); - orderList.add(order); + orderList.add(parseSortSpecification()); } r.setOrderByList(orderList); - } else { + } else if (simple) { readAggregateOrder(r, readExpression(), true); + } else { + r.setOrderByList(parseSortSpecificationList()); } return r; } private void readAggregateOrder(Aggregate r, Expression expr, boolean parseSortType) { - ArrayList orderList = new ArrayList<>(1); - SelectOrderBy order = new SelectOrderBy(); + ArrayList orderList = new ArrayList<>(1); + QueryOrderBy order = new QueryOrderBy(); order.expression = expr; if (parseSortType) { - order.sortType = parseSimpleSortType(); + order.sortType = parseSortType(); } orderList.add(order); r.setOrderByList(orderList); } - private ArrayList parseSimpleOrderList() { - ArrayList orderList = Utils.newSmallArrayList(); + private ArrayList readIfOrderBy() { + if (readIf(ORDER)) { + read("BY"); + return parseSortSpecificationList(); + } + return null; + } + + private ArrayList parseSortSpecificationList() { + ArrayList orderList = Utils.newSmallArrayList(); do { - SelectOrderBy order = new SelectOrderBy(); - order.expression = readExpression(); - order.sortType = parseSortType(); - orderList.add(order); + orderList.add(parseSortSpecification()); } while (readIf(COMMA)); return orderList; } - private JavaFunction readJavaFunction(Schema schema, String functionName, boolean throwIfNotFound) { - FunctionAlias functionAlias; - if (schema != null) { - functionAlias = schema.findFunction(functionName); - } else { - functionAlias = findFunctionAlias(session.getCurrentSchemaName(), - functionName); - } - if (functionAlias == null) { - if (throwIfNotFound) { - throw DbException.get(ErrorCode.FUNCTION_NOT_FOUND_1, functionName); - } else { - return null; + private QueryOrderBy parseSortSpecification() { + QueryOrderBy order = new QueryOrderBy(); + order.expression = readExpression(); + order.sortType = parseSortType(); + return order; + } + + private Expression readUserDefinedFunctionIf(Schema schema, String functionName) { + UserDefinedFunction userDefinedFunction = findUserDefinedFunctionWithinPath(schema, functionName); + if (userDefinedFunction == null) { + return null; + } else if (userDefinedFunction instanceof FunctionAlias) { + FunctionAlias functionAlias = (FunctionAlias) userDefinedFunction; + ArrayList argList = Utils.newSmallArrayList(); + if (!readIf(CLOSE_PAREN)) { + do { + argList.add(readExpression()); + } while (readIfMore()); } - } - Expression[] args; - ArrayList argList = Utils.newSmallArrayList(); - if (!readIf(CLOSE_PAREN)) { + return new JavaFunction(functionAlias, argList.toArray(new Expression[0])); + } else { + UserAggregate aggregate = (UserAggregate) userDefinedFunction; + boolean distinct = readDistinctAgg(); + ArrayList params = Utils.newSmallArrayList(); do { - argList.add(readExpression()); - } while (readIfMore(true)); + params.add(readExpression()); + } while (readIfMore()); + Expression[] list = params.toArray(new Expression[0]); + JavaAggregate agg = new JavaAggregate(aggregate, list, currentSelect, distinct); + readFilterAndOver(agg); + return agg; } - args = argList.toArray(new Expression[0]); - return new JavaFunction(functionAlias, args); - } - - private JavaAggregate readJavaAggregate(UserAggregate aggregate) { - boolean distinct = readDistinctAgg(); - ArrayList params = Utils.newSmallArrayList(); - do { - params.add(readExpression()); - } while (readIfMore(true)); - Expression[] list = params.toArray(new Expression[0]); - JavaAggregate agg = new JavaAggregate(aggregate, list, currentSelect, distinct); - readFilterAndOver(agg); - return agg; } private boolean readDistinctAgg() { @@ -3344,20 +3732,20 @@ private void readOver(DataAnalysisOperation operation) { } private Window readWindowNameOrSpecification() { - return isToken(OPEN_PAREN) ? readWindowSpecification() : new Window(readAliasIdentifier(), null, null, null); + return isToken(OPEN_PAREN) ? readWindowSpecification() : new Window(readIdentifier(), null, null, null); } private Window readWindowSpecification() { read(OPEN_PAREN); String parent = null; if (currentTokenType == IDENTIFIER) { - String token = currentToken; - if (currentTokenQuoted || ( // - !equalsToken(token, "PARTITION") // - && !equalsToken(token, "ROWS") // - && !equalsToken(token, "RANGE") // - && !equalsToken(token, "GROUPS"))) { - parent = token; + String current = currentToken; + if (token.isQuoted() || ( // + !equalsToken(current, "PARTITION") // + && !equalsToken(current, "ROWS") // + && !equalsToken(current, "RANGE") // + && !equalsToken(current, "GROUPS"))) { + parent = current; read(); } } @@ -3370,11 +3758,7 @@ private Window readWindowSpecification() { partitionBy.add(expr); } while (readIf(COMMA)); } - ArrayList orderBy = null; - if (readIf(ORDER)) { - read("BY"); - orderBy = parseSimpleOrderList(); - } + ArrayList orderBy = readIfOrderBy(); WindowFrame frame = readWindowFrame(); read(CLOSE_PAREN); return new Window(parent, partitionBy, orderBy, frame); @@ -3392,15 +3776,15 @@ private WindowFrame readWindowFrame() { return null; } WindowFrameBound starting, following; - if (readIf("BETWEEN")) { + if (readIf(BETWEEN)) { starting = readWindowFrameRange(); - read("AND"); + read(AND); following = readWindowFrameRange(); } else { starting = readWindowFrameStarting(); following = null; } - int idx = lastParseIndex; + int sqlIndex = token.start(); WindowFrameExclusion exclusion = WindowFrameExclusion.EXCLUDE_NO_OTHERS; if (readIf("EXCLUDE")) { if (readIf("CURRENT")) { @@ -3417,7 +3801,7 @@ private WindowFrame readWindowFrame() { } WindowFrame frame = new WindowFrame(units, starting, following, exclusion); if (!frame.isValid()) { - throw DbException.getSyntaxError(sqlCommand, idx); + throw DbException.getSyntaxError(sqlCommand, sqlIndex); } return frame; } @@ -3456,226 +3840,827 @@ private WindowFrameBound readWindowFrameRange() { return new WindowFrameBound(WindowFrameBoundType.FOLLOWING, value); } - private AggregateType getAggregateType(String name) { - if (!identifiersToUpper) { - // if not yet converted to uppercase, do it now - name = StringUtils.toUpperEnglish(name); - } - return Aggregate.getAggregateType(name); - } - private Expression readFunction(Schema schema, String name) { + String upperName = upperName(name); if (schema != null) { - return readJavaFunction(schema, name, true); + return readFunctionWithSchema(schema, name, upperName); } boolean allowOverride = database.isAllowBuiltinAliasOverride(); if (allowOverride) { - JavaFunction jf = readJavaFunction(null, name, false); - if (jf != null) { - return jf; + Expression e = readUserDefinedFunctionIf(null, name); + if (e != null) { + return e; } } - AggregateType agg = getAggregateType(name); + AggregateType agg = Aggregate.getAggregateType(upperName); if (agg != null) { - return readAggregate(agg, name); - } - Function function = Function.getFunction(database, name); - if (function == null) { - WindowFunction windowFunction = readWindowFunction(name); - if (windowFunction != null) { - return windowFunction; - } - UserAggregate aggregate = database.findAggregate(name); - if (aggregate != null) { - return readJavaAggregate(aggregate); - } - if (allowOverride) { - throw DbException.get(ErrorCode.FUNCTION_NOT_FOUND_1, name); - } - return readJavaFunction(null, name, true); + return readAggregate(agg, upperName); } - switch (function.getFunctionType()) { - case Function.CAST: { - function.setParameter(0, readExpression()); - read("AS"); - Column type = parseColumnWithType(null, false); - function.setDataType(type); - read(CLOSE_PAREN); - break; + Expression e = readBuiltinFunctionIf(upperName); + if (e != null) { + return e; } - case Function.CONVERT: { - if (database.getMode().swapConvertFunctionParameters) { - Column type = parseColumnWithType(null, false); - function.setDataType(type); - read(COMMA); - function.setParameter(0, readExpression()); - read(CLOSE_PAREN); - } else { - function.setParameter(0, readExpression()); - read(COMMA); - Column type = parseColumnWithType(null, false); - function.setDataType(type); - read(CLOSE_PAREN); - } - break; + e = readWindowFunction(upperName); + if (e != null) { + return e; } - case Function.EXTRACT: { - function.setParameter(0, ValueExpression.get(ValueString.get(currentToken))); - read(); - read(FROM); - function.setParameter(1, readExpression()); - read(CLOSE_PAREN); - break; + e = readCompatibilityFunction(upperName); + if (e != null) { + return e; } - case Function.DATE_ADD: - case Function.DATE_DIFF: { - if (currentTokenType == VALUE) { - function.setParameter(0, ValueExpression.get(currentValue.convertTo(Value.STRING))); - } else { - function.setParameter(0, ValueExpression.get(ValueString.get(currentToken))); + if (!allowOverride) { + e = readUserDefinedFunctionIf(null, name); + if (e != null) { + return e; + } + } + throw DbException.get(ErrorCode.FUNCTION_NOT_FOUND_1, name); + } + + private Expression readFunctionWithSchema(Schema schema, String name, String upperName) { + if (database.getMode().getEnum() == ModeEnum.PostgreSQL + && schema.getName().equals(database.sysIdentifier("PG_CATALOG"))) { + FunctionsPostgreSQL function = FunctionsPostgreSQL.getFunction(upperName); + if (function != null) { + return readParameters(function); + } + } + Expression function = readUserDefinedFunctionIf(schema, name); + if (function != null) { + return function; + } + throw DbException.get(ErrorCode.FUNCTION_NOT_FOUND_1, name); + } + + private Expression readCompatibilityFunction(String name) { + switch (name) { + // || + case "ARRAY_APPEND": + case "ARRAY_CAT": + return new ConcatenationOperation(readExpression(), readLastArgument()); + // [] + case "ARRAY_GET": + return new ArrayElementReference(readExpression(), readLastArgument()); + // CARDINALITY + case "ARRAY_LENGTH": + return new CardinalityExpression(readSingleArgument(), false); + // Simple case + case "DECODE": { + Expression caseOperand = readExpression(); + boolean canOptimize = caseOperand.isConstant() && !caseOperand.getValue(session).containsNull(); + Expression a = readNextArgument(), b = readNextArgument(); + SimpleCase.SimpleWhen when = decodeToWhen(caseOperand, canOptimize, a, b), current = when; + Expression elseResult = null; + while (readIf(COMMA)) { + a = readExpression(); + if (readIf(COMMA)) { + b = readExpression(); + SimpleCase.SimpleWhen next = decodeToWhen(caseOperand, canOptimize, a, b); + current.setWhen(next); + current = next; + } else { + elseResult = a; + break; + } } - read(); - read(COMMA); - function.setParameter(1, readExpression()); - read(COMMA); - function.setParameter(2, readExpression()); read(CLOSE_PAREN); - break; - } - case Function.SUBSTRING: { - // Different variants include: - // SUBSTRING(X,1) - // SUBSTRING(X,1,1) - // SUBSTRING(X FROM 1 FOR 1) -- Postgres - // SUBSTRING(X FROM 1) -- Postgres - // SUBSTRING(X FOR 1) -- Postgres - function.setParameter(0, readExpression()); - if (readIf(FROM)) { - function.setParameter(1, readExpression()); - if (readIf(FOR)) { - function.setParameter(2, readExpression()); - } - } else if (readIf(FOR)) { - function.setParameter(1, ValueExpression.get(ValueInt.get(0))); - function.setParameter(2, readExpression()); + return new SimpleCase(caseOperand, when, elseResult); + } + // Searched case + case "CASEWHEN": + return readCompatibilityCase(readExpression()); + case "NVL2": + return readCompatibilityCase(new NullPredicate(readExpression(), true, false)); + // Cast specification + case "CONVERT": { + Expression arg; + Column column; + if (database.getMode().swapConvertFunctionParameters) { + column = parseColumnWithType(null); + arg = readNextArgument(); } else { + arg = readExpression(); read(COMMA); - function.setParameter(1, readExpression()); - if (readIf(COMMA)) { - function.setParameter(2, readExpression()); - } + column = parseColumnWithType(null); } read(CLOSE_PAREN); - break; - } - case Function.POSITION: { + return new CastSpecification(arg, column); + } + // COALESCE + case "IFNULL": + return new CoalesceFunction(CoalesceFunction.COALESCE, readExpression(), readLastArgument()); + case "NVL": + return readCoalesceFunction(CoalesceFunction.COALESCE); + // CURRENT_CATALOG + case "DATABASE": + read(CLOSE_PAREN); + return new CurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_CATALOG); + // CURRENT_DATE + case "CURDATE": + case "SYSDATE": + case "TODAY": + return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_DATE, true, name); + // CURRENT_SCHEMA + case "SCHEMA": + read(CLOSE_PAREN); + return new CurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_SCHEMA); + // CURRENT_TIMESTAMP + case "SYSTIMESTAMP": + return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_TIMESTAMP, true, name); + // EXTRACT + case "DAY": + case "DAY_OF_MONTH": + case "DAYOFMONTH": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.DAY, readSingleArgument(), null); + case "DAY_OF_WEEK": + case "DAYOFWEEK": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.DAY_OF_WEEK, readSingleArgument(), + null); + case "DAY_OF_YEAR": + case "DAYOFYEAR": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.DAY_OF_YEAR, readSingleArgument(), + null); + case "HOUR": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.HOUR, readSingleArgument(), null); + case "ISO_DAY_OF_WEEK": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.ISO_DAY_OF_WEEK, + readSingleArgument(), null); + case "ISO_WEEK": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.ISO_WEEK, readSingleArgument(), + null); + case "ISO_YEAR": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.ISO_WEEK_YEAR, readSingleArgument(), + null); + case "MINUTE": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.MINUTE, readSingleArgument(), null); + case "MONTH": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.MONTH, readSingleArgument(), null); + case "QUARTER": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.QUARTER, readSingleArgument(), // + null); + case "SECOND": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.SECOND, readSingleArgument(), null); + case "WEEK": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.WEEK, readSingleArgument(), null); + case "YEAR": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.YEAR, readSingleArgument(), null); + // LOCALTIME + case "CURTIME": + return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIME, true, "CURTIME"); + case "SYSTIME": + read(CLOSE_PAREN); + return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIME, false, "SYSTIME"); + // LOCALTIMESTAMP + case "NOW": + return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIMESTAMP, true, "NOW"); + // LOCATE + case "INSTR": { + Expression arg1 = readExpression(); + return new StringFunction(readNextArgument(), arg1, readIfArgument(), StringFunction.LOCATE); + } + case "POSITION": { // can't read expression because IN would be read too early - function.setParameter(0, readConcat()); + Expression arg1 = readConcat(); if (!readIf(COMMA)) { - read("IN"); - } - function.setParameter(1, readExpression()); - read(CLOSE_PAREN); - break; + read(IN); + } + return new StringFunction(arg1, readSingleArgument(), null, StringFunction.LOCATE); + } + // LOWER + case "LCASE": + return new StringFunction1(readSingleArgument(), StringFunction1.LOWER); + // SUBSTRING + case "SUBSTR": + return readSubstringFunction(); + // TRIM + case "LTRIM": + return new TrimFunction(readSingleArgument(), null, TrimFunction.LEADING); + case "RTRIM": + return new TrimFunction(readSingleArgument(), null, TrimFunction.TRAILING); + // UPPER + case "UCASE": + return new StringFunction1(readSingleArgument(), StringFunction1.UPPER); + // Sequence value + case "CURRVAL": + return readCompatibilitySequenceValueFunction(true); + case "NEXTVAL": + return readCompatibilitySequenceValueFunction(false); + default: + return null; + } + } + + private T readParameters(T expression) { + if (!readIf(CLOSE_PAREN)) { + do { + expression.addParameter(readExpression()); + } while (readIfMore()); + } + expression.doneWithParameters(); + return expression; + } + + private SimpleCase.SimpleWhen decodeToWhen(Expression caseOperand, boolean canOptimize, Expression whenOperand, + Expression result) { + if (!canOptimize && (!whenOperand.isConstant() || whenOperand.getValue(session).containsNull())) { + whenOperand = new Comparison(Comparison.EQUAL_NULL_SAFE, caseOperand, whenOperand, true); } - case Function.TRIM: { - int flags; - boolean needFrom = false; - if (readIf("LEADING")) { - flags = Function.TRIM_LEADING; - needFrom = true; - } else if (readIf("TRAILING")) { - flags = Function.TRIM_TRAILING; - needFrom = true; + return new SimpleCase.SimpleWhen(whenOperand, result); + } + + private Expression readCompatibilityCase(Expression when) { + return new SearchedCase(new Expression[] { when, readNextArgument(), readLastArgument() }); + } + + private Expression readCompatibilitySequenceValueFunction(boolean current) { + Expression arg1 = readExpression(), arg2 = readIf(COMMA) ? readExpression() : null; + read(CLOSE_PAREN); + return new CompatibilitySequenceValueFunction(arg1, arg2, current); + } + + private Expression readBuiltinFunctionIf(String upperName) { + switch (upperName) { + case "ABS": + return new MathFunction(readSingleArgument(), null, MathFunction.ABS); + case "MOD": + return new MathFunction(readExpression(), readLastArgument(), MathFunction.MOD); + case "SIN": + return new MathFunction1(readSingleArgument(), MathFunction1.SIN); + case "COS": + return new MathFunction1(readSingleArgument(), MathFunction1.COS); + case "TAN": + return new MathFunction1(readSingleArgument(), MathFunction1.TAN); + case "COT": + return new MathFunction1(readSingleArgument(), MathFunction1.COT); + case "SINH": + return new MathFunction1(readSingleArgument(), MathFunction1.SINH); + case "COSH": + return new MathFunction1(readSingleArgument(), MathFunction1.COSH); + case "TANH": + return new MathFunction1(readSingleArgument(), MathFunction1.TANH); + case "ASIN": + return new MathFunction1(readSingleArgument(), MathFunction1.ASIN); + case "ACOS": + return new MathFunction1(readSingleArgument(), MathFunction1.ACOS); + case "ATAN": + return new MathFunction1(readSingleArgument(), MathFunction1.ATAN); + case "ATAN2": + return new MathFunction2(readExpression(), readLastArgument(), MathFunction2.ATAN2); + case "LOG": { + Expression arg1 = readExpression(); + if (readIf(COMMA)) { + return new MathFunction2(arg1, readSingleArgument(), MathFunction2.LOG); } else { - needFrom = readIf("BOTH"); - flags = Function.TRIM_LEADING | Function.TRIM_TRAILING; - } - Expression p0, space = null; - function.setFlags(flags); - if (needFrom) { - if (!readIf(FROM)) { - space = readExpression(); - read(FROM); - } - p0 = readExpression(); + read(CLOSE_PAREN); + return new MathFunction1(arg1, + database.getMode().logIsLogBase10 ? MathFunction1.LOG10 : MathFunction1.LN); + } + } + case "LOG10": + return new MathFunction1(readSingleArgument(), MathFunction1.LOG10); + case "LN": + return new MathFunction1(readSingleArgument(), MathFunction1.LN); + case "EXP": + return new MathFunction1(readSingleArgument(), MathFunction1.EXP); + case "POWER": + return new MathFunction2(readExpression(), readLastArgument(), MathFunction2.POWER); + case "SQRT": + return new MathFunction1(readSingleArgument(), MathFunction1.SQRT); + case "FLOOR": + return new MathFunction(readSingleArgument(), null, MathFunction.FLOOR); + case "CEIL": + case "CEILING": + return new MathFunction(readSingleArgument(), null, MathFunction.CEIL); + case "ROUND": + return new MathFunction(readExpression(), readIfArgument(), MathFunction.ROUND); + case "ROUNDMAGIC": + return new MathFunction(readSingleArgument(), null, MathFunction.ROUNDMAGIC); + case "SIGN": + return new MathFunction(readSingleArgument(), null, MathFunction.SIGN); + case "TRUNC": + case "TRUNCATE": + return new MathFunction(readExpression(), readIfArgument(), MathFunction.TRUNC); + case "DEGREES": + return new MathFunction1(readSingleArgument(), MathFunction1.DEGREES); + case "RADIANS": + return new MathFunction1(readSingleArgument(), MathFunction1.RADIANS); + case "BITAND": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITAND); + case "BITOR": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITOR); + case "BITXOR": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITXOR); + case "BITNOT": + return new BitFunction(readSingleArgument(), null, BitFunction.BITNOT); + case "BITNAND": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITNAND); + case "BITNOR": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITNOR); + case "BITXNOR": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITXNOR); + case "BITGET": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITGET); + case "BITCOUNT": + return new BitFunction(readSingleArgument(), null, BitFunction.BITCOUNT); + case "LSHIFT": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.LSHIFT); + case "RSHIFT": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.RSHIFT); + case "ULSHIFT": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.ULSHIFT); + case "URSHIFT": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.URSHIFT); + case "ROTATELEFT": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.ROTATELEFT); + case "ROTATERIGHT": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.ROTATERIGHT); + case "EXTRACT": { + int field = readDateTimeField(); + read(FROM); + return new DateTimeFunction(DateTimeFunction.EXTRACT, field, readSingleArgument(), null); + } + case "DATE_TRUNC": + return new DateTimeFunction(DateTimeFunction.DATE_TRUNC, readDateTimeField(), readLastArgument(), null); + case "DATEADD": + case "TIMESTAMPADD": + return new DateTimeFunction(DateTimeFunction.DATEADD, readDateTimeField(), readNextArgument(), + readLastArgument()); + case "DATEDIFF": + case "TIMESTAMPDIFF": + return new DateTimeFunction(DateTimeFunction.DATEDIFF, readDateTimeField(), readNextArgument(), + readLastArgument()); + case "FORMATDATETIME": + return readDateTimeFormatFunction(DateTimeFormatFunction.FORMATDATETIME); + case "PARSEDATETIME": + return readDateTimeFormatFunction(DateTimeFormatFunction.PARSEDATETIME); + case "DAYNAME": + return new DayMonthNameFunction(readSingleArgument(), DayMonthNameFunction.DAYNAME); + case "MONTHNAME": + return new DayMonthNameFunction(readSingleArgument(), DayMonthNameFunction.MONTHNAME); + case "CARDINALITY": + return new CardinalityExpression(readSingleArgument(), false); + case "ARRAY_MAX_CARDINALITY": + return new CardinalityExpression(readSingleArgument(), true); + case "LOCATE": + return new StringFunction(readExpression(), readNextArgument(), readIfArgument(), StringFunction.LOCATE); + case "INSERT": + return new StringFunction(readExpression(), readNextArgument(), readNextArgument(), readLastArgument(), + StringFunction.INSERT); + case "REPLACE": + return new StringFunction(readExpression(), readNextArgument(), readIfArgument(), StringFunction.REPLACE); + case "LPAD": + return new StringFunction(readExpression(), readNextArgument(), readIfArgument(), StringFunction.LPAD); + case "RPAD": + return new StringFunction(readExpression(), readNextArgument(), readIfArgument(), StringFunction.RPAD); + case "TRANSLATE": + return new StringFunction(readExpression(), readNextArgument(), readLastArgument(), + StringFunction.TRANSLATE); + case "UPPER": + return new StringFunction1(readSingleArgument(), StringFunction1.UPPER); + case "LOWER": + return new StringFunction1(readSingleArgument(), StringFunction1.LOWER); + case "ASCII": + return new StringFunction1(readSingleArgument(), StringFunction1.ASCII); + case "CHAR": + case "CHR": + return new StringFunction1(readSingleArgument(), StringFunction1.CHAR); + case "STRINGENCODE": + return new StringFunction1(readSingleArgument(), StringFunction1.STRINGENCODE); + case "STRINGDECODE": + return new StringFunction1(readSingleArgument(), StringFunction1.STRINGDECODE); + case "STRINGTOUTF8": + return new StringFunction1(readSingleArgument(), StringFunction1.STRINGTOUTF8); + case "UTF8TOSTRING": + return new StringFunction1(readSingleArgument(), StringFunction1.UTF8TOSTRING); + case "HEXTORAW": + return new StringFunction1(readSingleArgument(), StringFunction1.HEXTORAW); + case "RAWTOHEX": + return new StringFunction1(readSingleArgument(), StringFunction1.RAWTOHEX); + case "SPACE": + return new StringFunction1(readSingleArgument(), StringFunction1.SPACE); + case "QUOTE_IDENT": + return new StringFunction1(readSingleArgument(), StringFunction1.QUOTE_IDENT); + case "SUBSTRING": + return readSubstringFunction(); + case "TO_CHAR": { + Expression arg1 = readExpression(), arg2, arg3; + if (readIf(COMMA)) { + arg2 = readExpression(); + arg3 = readIf(COMMA) ? readExpression() : null; } else { - if (readIf(FROM)) { - p0 = readExpression(); - } else { - p0 = readExpression(); - if (readIf(FROM)) { - space = p0; - p0 = readExpression(); - } - } - } - if (!needFrom && space == null && readIf(COMMA)) { - space = readExpression(); - } - function.setParameter(0, p0); - if (space != null) { - function.setParameter(1, space); + arg3 = arg2 = null; } read(CLOSE_PAREN); - break; - } - case Function.TABLE: - case Function.TABLE_DISTINCT: { - int i = 0; - ArrayList columns = Utils.newSmallArrayList(); - do { - String columnName = readAliasIdentifier(); - Column column = parseColumnWithType(columnName, false); - columns.add(column); - read(EQUAL); - function.setParameter(i, readExpression()); - i++; - } while (readIfMore(true)); - TableFunction tf = (TableFunction) function; - tf.setColumns(columns); - break; - } - case Function.UNNEST: { - ArrayList columns = Utils.newSmallArrayList(); - if (!readIf(CLOSE_PAREN)) { - int i = 0; + return new ToCharFunction(arg1, arg2, arg3); + } + case "REPEAT": + return new StringFunction2(readExpression(), readLastArgument(), StringFunction2.REPEAT); + case "CHAR_LENGTH": + case "CHARACTER_LENGTH": + case "LENGTH": + return new LengthFunction(readIfSingleArgument(), LengthFunction.CHAR_LENGTH); + case "OCTET_LENGTH": + return new LengthFunction(readIfSingleArgument(), LengthFunction.OCTET_LENGTH); + case "BIT_LENGTH": + return new LengthFunction(readIfSingleArgument(), LengthFunction.BIT_LENGTH); + case "TRIM": + return readTrimFunction(); + case "REGEXP_LIKE": + return readParameters(new RegexpFunction(RegexpFunction.REGEXP_LIKE)); + case "REGEXP_REPLACE": + return readParameters(new RegexpFunction(RegexpFunction.REGEXP_REPLACE)); + case "REGEXP_SUBSTR": + return readParameters(new RegexpFunction(RegexpFunction.REGEXP_SUBSTR)); + case "XMLATTR": + return readParameters(new XMLFunction(XMLFunction.XMLATTR)); + case "XMLCDATA": + return readParameters(new XMLFunction(XMLFunction.XMLCDATA)); + case "XMLCOMMENT": + return readParameters(new XMLFunction(XMLFunction.XMLCOMMENT)); + case "XMLNODE": + return readParameters(new XMLFunction(XMLFunction.XMLNODE)); + case "XMLSTARTDOC": + return readParameters(new XMLFunction(XMLFunction.XMLSTARTDOC)); + case "XMLTEXT": + return readParameters(new XMLFunction(XMLFunction.XMLTEXT)); + case "TRIM_ARRAY": + return new ArrayFunction(readExpression(), readLastArgument(), null, ArrayFunction.TRIM_ARRAY); + case "ARRAY_CONTAINS": + return new ArrayFunction(readExpression(), readLastArgument(), null, ArrayFunction.ARRAY_CONTAINS); + case "ARRAY_SLICE": + return new ArrayFunction(readExpression(), readNextArgument(), readLastArgument(), + ArrayFunction.ARRAY_SLICE); + case "COMPRESS": + return new CompressFunction(readExpression(), readIfArgument(), CompressFunction.COMPRESS); + case "EXPAND": + return new CompressFunction(readSingleArgument(), null, CompressFunction.EXPAND); + case "SOUNDEX": + return new SoundexFunction(readSingleArgument(), null, SoundexFunction.SOUNDEX); + case "DIFFERENCE": + return new SoundexFunction(readExpression(), readLastArgument(), SoundexFunction.DIFFERENCE); + case "JSON_OBJECT": { + JsonConstructorFunction function = new JsonConstructorFunction(false); + if (currentTokenType != CLOSE_PAREN && !readJsonObjectFunctionFlags(function, false)) { do { - function.setParameter(i++, readExpression()); - columns.add(new Column("C" + i, Value.NULL)); - } while (readIfMore(true)); - } - if (readIf(WITH)) { - read("ORDINALITY"); - columns.add(new Column("NORD", Value.INT)); + boolean withKey = readIf(KEY); + function.addParameter(readExpression()); + if (withKey) { + read(VALUE); + } else if (!readIf(VALUE)) { + read(COLON); + } + function.addParameter(readExpression()); + } while (readIf(COMMA)); + readJsonObjectFunctionFlags(function, false); } - TableFunction tf = (TableFunction) function; - tf.setColumns(columns); - break; + read(CLOSE_PAREN); + function.doneWithParameters(); + return function; } - default: - if (!readIf(CLOSE_PAREN)) { - int i = 0; + case "JSON_ARRAY": { + JsonConstructorFunction function = new JsonConstructorFunction(true); + function.setFlags(JsonConstructorUtils.JSON_ABSENT_ON_NULL); + if (currentTokenType != CLOSE_PAREN && !readJsonObjectFunctionFlags(function, true)) { do { - function.setParameter(i++, readExpression()); - } while (readIfMore(true)); + function.addParameter(readExpression()); + } while (readIf(COMMA)); + readJsonObjectFunctionFlags(function, true); } + read(CLOSE_PAREN); + function.doneWithParameters(); + return function; + } + case "ENCRYPT": + return new CryptFunction(readExpression(), readNextArgument(), readLastArgument(), CryptFunction.ENCRYPT); + case "DECRYPT": + return new CryptFunction(readExpression(), readNextArgument(), readLastArgument(), CryptFunction.DECRYPT); + case "COALESCE": + return readCoalesceFunction(CoalesceFunction.COALESCE); + case "GREATEST": + return readCoalesceFunction(CoalesceFunction.GREATEST); + case "LEAST": + return readCoalesceFunction(CoalesceFunction.LEAST); + case "NULLIF": + return new NullIfFunction(readExpression(), readLastArgument()); + case "CONCAT": + return readConcatFunction(ConcatFunction.CONCAT); + case "CONCAT_WS": + return readConcatFunction(ConcatFunction.CONCAT_WS); + case "HASH": + return new HashFunction(readExpression(), readNextArgument(), readIfArgument(), HashFunction.HASH); + case "ORA_HASH": { + Expression arg1 = readExpression(); + if (readIfMore()) { + return new HashFunction(arg1, readExpression(), readIfArgument(), HashFunction.ORA_HASH); + } + return new HashFunction(arg1, HashFunction.ORA_HASH); + } + case "RAND": + case "RANDOM": + return new RandFunction(readIfSingleArgument(), RandFunction.RAND); + case "SECURE_RAND": + return new RandFunction(readSingleArgument(), RandFunction.SECURE_RAND); + case "RANDOM_UUID": + case "UUID": + read(CLOSE_PAREN); + return new RandFunction(null, RandFunction.RANDOM_UUID); + case "ABORT_SESSION": + return new SessionControlFunction(readIfSingleArgument(), SessionControlFunction.ABORT_SESSION); + case "CANCEL_SESSION": + return new SessionControlFunction(readIfSingleArgument(), SessionControlFunction.CANCEL_SESSION); + case "AUTOCOMMIT": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.AUTOCOMMIT); + case "DATABASE_PATH": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.DATABASE_PATH); + case "H2VERSION": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.H2VERSION); + case "LOCK_MODE": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.LOCK_MODE); + case "LOCK_TIMEOUT": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.LOCK_TIMEOUT); + case "MEMORY_FREE": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.MEMORY_FREE); + case "MEMORY_USED": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.MEMORY_USED); + case "READONLY": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.READONLY); + case "SESSION_ID": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.SESSION_ID); + case "TRANSACTION_ID": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.TRANSACTION_ID); + case "DISK_SPACE_USED": + return new TableInfoFunction(readIfSingleArgument(), null, TableInfoFunction.DISK_SPACE_USED); + case "ESTIMATED_ENVELOPE": + return new TableInfoFunction(readExpression(), readLastArgument(), TableInfoFunction.ESTIMATED_ENVELOPE); + case "FILE_READ": + return new FileFunction(readExpression(), readIfArgument(), FileFunction.FILE_READ); + case "FILE_WRITE": + return new FileFunction(readExpression(), readLastArgument(), FileFunction.FILE_WRITE); + case "DATA_TYPE_SQL": + return new DataTypeSQLFunction(readExpression(), readNextArgument(), readNextArgument(), + readLastArgument()); + case "DB_OBJECT_ID": + return new DBObjectFunction(readExpression(), readNextArgument(), readIfArgument(), + DBObjectFunction.DB_OBJECT_ID); + case "DB_OBJECT_SQL": + return new DBObjectFunction(readExpression(), readNextArgument(), readIfArgument(), + DBObjectFunction.DB_OBJECT_SQL); + case "CSVWRITE": + return readParameters(new CSVWriteFunction()); + case "SIGNAL": + return new SignalFunction(readExpression(), readLastArgument()); + case "TRUNCATE_VALUE": + return new TruncateValueFunction(readExpression(), readNextArgument(), readLastArgument()); + case "ZERO": + read(CLOSE_PAREN); + return ValueExpression.get(ValueInteger.get(0)); + case "PI": + read(CLOSE_PAREN); + return ValueExpression.get(ValueDouble.get(Math.PI)); } - function.doneWithParameters(); - return function; + ModeFunction function = ModeFunction.getFunction(database, upperName); + return function != null ? readParameters(function) : null; } - private WindowFunction readWindowFunction(String name) { - if (!identifiersToUpper) { - // if not yet converted to uppercase, do it now - name = StringUtils.toUpperEnglish(name); + private Expression readDateTimeFormatFunction(int function) { + DateTimeFormatFunction f = new DateTimeFormatFunction(function); + f.addParameter(readExpression()); + read(COMMA); + f.addParameter(readExpression()); + if (readIf(COMMA)) { + f.addParameter(readExpression()); + if (readIf(COMMA)) { + f.addParameter(readExpression()); + } } - WindowFunctionType type = WindowFunctionType.get(name); - if (type == null) { - return null; + read(CLOSE_PAREN); + f.doneWithParameters(); + return f; + } + + private Expression readTrimFunction() { + int flags; + boolean needFrom = false; + if (readIf("LEADING")) { + flags = TrimFunction.LEADING; + needFrom = true; + } else if (readIf("TRAILING")) { + flags = TrimFunction.TRAILING; + needFrom = true; + } else { + needFrom = readIf("BOTH"); + flags = TrimFunction.LEADING | TrimFunction.TRAILING; } - if (currentSelect == null) { - throw getSyntaxError(); + Expression from, space = null; + if (needFrom) { + if (!readIf(FROM)) { + space = readExpression(); + read(FROM); + } + from = readExpression(); + } else { + if (readIf(FROM)) { + from = readExpression(); + } else { + from = readExpression(); + if (readIf(FROM)) { + space = from; + from = readExpression(); + } else if (readIf(COMMA)) { + space = readExpression(); + } + } + } + read(CLOSE_PAREN); + return new TrimFunction(from, space, flags); + } + + private ArrayTableFunction readUnnestFunction() { + ArrayTableFunction f = new ArrayTableFunction(ArrayTableFunction.UNNEST); + ArrayList columns = Utils.newSmallArrayList(); + if (!readIf(CLOSE_PAREN)) { + int i = 0; + do { + Expression expr = readExpression(); + TypeInfo columnType = TypeInfo.TYPE_NULL; + if (expr.isConstant()) { + expr = expr.optimize(session); + TypeInfo exprType = expr.getType(); + if (exprType.getValueType() == Value.ARRAY) { + columnType = (TypeInfo) exprType.getExtTypeInfo(); + } + } + f.addParameter(expr); + columns.add(new Column("C" + ++i, columnType)); + } while (readIfMore()); + } + if (readIf(WITH)) { + read("ORDINALITY"); + columns.add(new Column("NORD", TypeInfo.TYPE_INTEGER)); + } + f.setColumns(columns); + f.doneWithParameters(); + return f; + } + + private ArrayTableFunction readTableFunction(int functionType) { + ArrayTableFunction f = new ArrayTableFunction(functionType); + ArrayList columns = Utils.newSmallArrayList(); + do { + columns.add(parseColumnWithType(readIdentifier())); + read(EQUAL); + f.addParameter(readExpression()); + } while (readIfMore()); + f.setColumns(columns); + f.doneWithParameters(); + return f; + } + + private Expression readSingleArgument() { + Expression arg = readExpression(); + read(CLOSE_PAREN); + return arg; + } + + private Expression readNextArgument() { + read(COMMA); + return readExpression(); + } + + private Expression readLastArgument() { + read(COMMA); + Expression arg = readExpression(); + read(CLOSE_PAREN); + return arg; + } + + private Expression readIfSingleArgument() { + Expression arg; + if (readIf(CLOSE_PAREN)) { + arg = null; + } else { + arg = readExpression(); + read(CLOSE_PAREN); + } + return arg; + } + + private Expression readIfArgument() { + Expression arg = readIf(COMMA) ? readExpression() : null; + read(CLOSE_PAREN); + return arg; + } + + private Expression readCoalesceFunction(int function) { + CoalesceFunction f = new CoalesceFunction(function); + f.addParameter(readExpression()); + while (readIfMore()) { + f.addParameter(readExpression()); + } + f.doneWithParameters(); + return f; + } + + private Expression readConcatFunction(int function) { + ConcatFunction f = new ConcatFunction(function); + f.addParameter(readExpression()); + f.addParameter(readNextArgument()); + if (function == ConcatFunction.CONCAT_WS) { + f.addParameter(readNextArgument()); + } + while (readIfMore()) { + f.addParameter(readExpression()); + } + f.doneWithParameters(); + return f; + } + + private Expression readSubstringFunction() { + // Standard variants are: + // SUBSTRING(X FROM 1) + // SUBSTRING(X FROM 1 FOR 1) + // Different non-standard variants include: + // SUBSTRING(X,1) + // SUBSTRING(X,1,1) + // SUBSTRING(X FOR 1) -- Postgres + SubstringFunction function = new SubstringFunction(); + function.addParameter(readExpression()); + if (readIf(FROM)) { + function.addParameter(readExpression()); + if (readIf(FOR)) { + function.addParameter(readExpression()); + } + } else if (readIf(FOR)) { + function.addParameter(ValueExpression.get(ValueInteger.get(1))); + function.addParameter(readExpression()); + } else { + read(COMMA); + function.addParameter(readExpression()); + if (readIf(COMMA)) { + function.addParameter(readExpression()); + } + } + read(CLOSE_PAREN); + function.doneWithParameters(); + return function; + } + + private int readDateTimeField() { + int field = -1; + switch (currentTokenType) { + case IDENTIFIER: + if (!token.isQuoted()) { + field = DateTimeFunction.getField(currentToken); + } + break; + case LITERAL: + if (token.value(session).getValueType() == Value.VARCHAR) { + field = DateTimeFunction.getField(token.value(session).getString()); + } + break; + case YEAR: + field = DateTimeFunction.YEAR; + break; + case MONTH: + field = DateTimeFunction.MONTH; + break; + case DAY: + field = DateTimeFunction.DAY; + break; + case HOUR: + field = DateTimeFunction.HOUR; + break; + case MINUTE: + field = DateTimeFunction.MINUTE; + break; + case SECOND: + field = DateTimeFunction.SECOND; + } + if (field < 0) { + addExpected("date-time field"); + throw getSyntaxError(); + } + read(); + return field; + } + + private WindowFunction readWindowFunction(String name) { + WindowFunctionType type = WindowFunctionType.get(name); + if (type == null) { + return null; + } + if (currentSelect == null) { + throw getSyntaxError(); } int numArgs = WindowFunction.getMinArgumentCount(type); Expression[] args = null; @@ -3709,15 +4694,14 @@ private WindowFunction readWindowFunction(String name) { } read(CLOSE_PAREN); WindowFunction function = new WindowFunction(type, currentSelect, args); - if (type == WindowFunctionType.NTH_VALUE) { - readFromFirstOrLast(function); - } switch (type) { + case NTH_VALUE: + readFromFirstOrLast(function); + //$FALL-THROUGH$ case LEAD: case LAG: case FIRST_VALUE: case LAST_VALUE: - case NTH_VALUE: readRespectOrIgnoreNulls(function); //$FALL-THROUGH$ default: @@ -3743,52 +4727,99 @@ private void readRespectOrIgnoreNulls(WindowFunction function) { } } - private Expression readKeywordFunction(String name) { + private boolean readJsonObjectFunctionFlags(ExpressionWithFlags function, boolean forArray) { + int start = tokenIndex; + boolean result = false; + int flags = function.getFlags(); + if (readIf(NULL)) { + if (readIf(ON)) { + read(NULL); + flags &= ~JsonConstructorUtils.JSON_ABSENT_ON_NULL; + result = true; + } else { + setTokenIndex(start); + return false; + } + } else if (readIf("ABSENT")) { + if (readIf(ON)) { + read(NULL); + flags |= JsonConstructorUtils.JSON_ABSENT_ON_NULL; + result = true; + } else { + setTokenIndex(start); + return false; + } + } + if (!forArray) { + if (readIf(WITH)) { + read(UNIQUE); + read("KEYS"); + flags |= JsonConstructorUtils.JSON_WITH_UNIQUE_KEYS; + result = true; + } else if (readIf("WITHOUT")) { + if (readIf(UNIQUE)) { + read("KEYS"); + flags &= ~JsonConstructorUtils.JSON_WITH_UNIQUE_KEYS; + result = true; + } else if (result) { + throw getSyntaxError(); + } else { + setTokenIndex(start); + return false; + } + } + } + if (result) { + function.setFlags(flags); + } + return result; + } + + private Expression readKeywordCompatibilityFunctionOrColumn() { + boolean nonKeyword = nonKeywords != null && nonKeywords.get(currentTokenType); + String name = currentToken; + read(); if (readIf(OPEN_PAREN)) { - return readFunction(null, name); - } else { - return readFunctionWithoutParameters(name); + return readCompatibilityFunction(upperName(name)); + } else if (nonKeyword) { + return readIf(DOT) ? readTermObjectDot(name) : new ExpressionColumn(database, null, null, name); } + throw getSyntaxError(); } - private Expression readFunctionWithoutParameters(String name) { + private Expression readCurrentDateTimeValueFunction(int function, boolean hasParen, String name) { + int scale = -1; + if (hasParen) { + if (function != CurrentDateTimeValueFunction.CURRENT_DATE && currentTokenType != CLOSE_PAREN) { + scale = readInt(); + if (scale < 0 || scale > ValueTime.MAXIMUM_SCALE) { + throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), "0", + /* compile-time constant */ "" + ValueTime.MAXIMUM_SCALE); + } + } + read(CLOSE_PAREN); + } if (database.isAllowBuiltinAliasOverride()) { - FunctionAlias functionAlias = database.getSchema(session.getCurrentSchemaName()).findFunction(name); + FunctionAlias functionAlias = database.getSchema(session.getCurrentSchemaName()) + .findFunction(name != null ? name : CurrentDateTimeValueFunction.getName(function)); if (functionAlias != null) { - return new JavaFunction(functionAlias, new Expression[0]); + return new JavaFunction(functionAlias, + scale >= 0 ? new Expression[] { ValueExpression.get(ValueInteger.get(scale)) } + : new Expression[0]); } } - Function function = Function.getFunction(database, name); - function.doneWithParameters(); - return function; + return new CurrentDateTimeValueFunction(function, scale); } - private Expression readWildcardRowidOrSequenceValue(String schema, String objectName) { + private Expression readIfWildcardRowidOrSequencePseudoColumn(String schema, String objectName) { if (readIf(ASTERISK)) { return parseWildcard(schema, objectName); } if (readIf(_ROWID_)) { - return new ExpressionColumn(database, schema, objectName, Column.ROWID, true); - } - if (schema == null) { - schema = session.getCurrentSchemaName(); + return new ExpressionColumn(database, schema, objectName); } - if (readIf("NEXTVAL")) { - Sequence sequence = findSequence(schema, objectName); - if (sequence != null) { - return new SequenceValue(sequence); - } - } else if (readIf("CURRVAL")) { - Sequence sequence = findSequence(schema, objectName); - if (sequence != null) { - Function function = Function.getFunction(database, "CURRVAL"); - function.setParameter(0, ValueExpression.get(ValueString - .get(sequence.getSchema().getName()))); - function.setParameter(1, ValueExpression.get(ValueString - .get(sequence.getName()))); - function.doneWithParameters(); - return function; - } + if (database.getMode().nextvalAndCurrvalPseudoColumns) { + return readIfSequencePseudoColumn(schema, objectName); } return null; } @@ -3800,120 +4831,110 @@ private Wildcard parseWildcard(String schema, String objectName) { ArrayList exceptColumns = Utils.newSmallArrayList(); do { String s = null, t = null; - String name = readColumnIdentifier(); + String name = readIdentifier(); if (readIf(DOT)) { t = name; - name = readColumnIdentifier(); + name = readIdentifier(); if (readIf(DOT)) { s = t; t = name; - name = readColumnIdentifier(); + name = readIdentifier(); if (readIf(DOT)) { - if (!equalsToken(database.getShortName(), s)) { - throw DbException.get(ErrorCode.DATABASE_NOT_FOUND_1, s); - } + checkDatabaseName(s); s = t; t = name; - name = readColumnIdentifier(); + name = readIdentifier(); } } } - exceptColumns.add(new ExpressionColumn(database, s, t, name, false)); - } while (readIfMore(true)); + exceptColumns.add(new ExpressionColumn(database, s, t, name)); + } while (readIfMore()); wildcard.setExceptColumns(exceptColumns); } return wildcard; } + private SequenceValue readIfSequencePseudoColumn(String schema, String objectName) { + if (schema == null) { + schema = session.getCurrentSchemaName(); + } + if (isToken("NEXTVAL")) { + Sequence sequence = findSequence(schema, objectName); + if (sequence != null) { + read(); + return new SequenceValue(sequence, getCurrentPrepared()); + } + } else if (isToken("CURRVAL")) { + Sequence sequence = findSequence(schema, objectName); + if (sequence != null) { + read(); + return new SequenceValue(sequence); + } + } + return null; + } + private Expression readTermObjectDot(String objectName) { - Expression expr = readWildcardRowidOrSequenceValue(null, objectName); + Expression expr = readIfWildcardRowidOrSequencePseudoColumn(null, objectName); if (expr != null) { return expr; } - String name = readColumnIdentifier(); - Schema s = database.findSchema(objectName); + String name = readIdentifier(); if (readIf(OPEN_PAREN)) { - return readFunction(s, name); + return readFunction(database.getSchema(objectName), name); } else if (readIf(DOT)) { String schema = objectName; objectName = name; - expr = readWildcardRowidOrSequenceValue(schema, objectName); + expr = readIfWildcardRowidOrSequencePseudoColumn(schema, objectName); if (expr != null) { return expr; } - name = readColumnIdentifier(); + name = readIdentifier(); if (readIf(OPEN_PAREN)) { - String databaseName = schema; - if (!equalsToken(database.getShortName(), databaseName)) { - throw DbException.get(ErrorCode.DATABASE_NOT_FOUND_1, - databaseName); - } - schema = objectName; - return readFunction(database.getSchema(schema), name); + checkDatabaseName(schema); + return readFunction(database.getSchema(objectName), name); } else if (readIf(DOT)) { - String databaseName = schema; - if (!equalsToken(database.getShortName(), databaseName)) { - throw DbException.get(ErrorCode.DATABASE_NOT_FOUND_1, - databaseName); - } + checkDatabaseName(schema); schema = objectName; objectName = name; - expr = readWildcardRowidOrSequenceValue(schema, objectName); + expr = readIfWildcardRowidOrSequencePseudoColumn(schema, objectName); if (expr != null) { return expr; } - name = readColumnIdentifier(); - return new ExpressionColumn(database, schema, objectName, name, false); + name = readIdentifier(); } - return new ExpressionColumn(database, schema, objectName, name, false); + return new ExpressionColumn(database, schema, objectName, name); } - return new ExpressionColumn(database, null, objectName, name, false); + return new ExpressionColumn(database, null, objectName, name); } - private Parameter readParameter() { - // there must be no space between ? and the number - boolean indexed = Character.isDigit(sqlCommandChars[parseIndex]); + private void checkDatabaseName(String databaseName) { + if (!database.getIgnoreCatalogs() && !equalsToken(database.getShortName(), databaseName)) { + throw DbException.get(ErrorCode.DATABASE_NOT_FOUND_1, databaseName); + } + } + private Parameter readParameter() { + int index = ((Token.ParameterToken) token).index(); + read(); Parameter p; - if (indexed) { - readParameterIndex(); - if (indexedParameterList == null) { - if (parameters == null) { - // this can occur when parsing expressions only (for - // example check constraints) - throw getSyntaxError(); - } else if (!parameters.isEmpty()) { - throw DbException - .get(ErrorCode.CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS); - } - indexedParameterList = Utils.newSmallArrayList(); - } - int index = currentValue.getInt() - 1; - if (index < 0 || index >= Constants.MAX_PARAMETER_INDEX) { - throw DbException.getInvalidValueException( - "parameter index", index + 1); - } - if (indexedParameterList.size() <= index) { - indexedParameterList.ensureCapacity(index + 1); - while (indexedParameterList.size() <= index) { - indexedParameterList.add(null); - } - } - p = indexedParameterList.get(index); - if (p == null) { - p = new Parameter(index); - indexedParameterList.set(index, p); - parameters.add(p); - } - read(); - } else { - read(); - if (indexedParameterList != null) { - throw DbException - .get(ErrorCode.CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS); + if (parameters == null) { + parameters = Utils.newSmallArrayList(); + } + if (index > Constants.MAX_PARAMETER_INDEX) { + throw DbException.getInvalidValueException("parameter index", index); + } + index--; + if (parameters.size() <= index) { + parameters.ensureCapacity(index + 1); + while (parameters.size() < index) { + parameters.add(null); } - p = new Parameter(parameters.size()); + p = new Parameter(index); parameters.add(p); + } else if ((p = parameters.get(index)) == null) { + p = new Parameter(index); + parameters.set(index, p); } return p; } @@ -3923,71 +4944,34 @@ private Expression readTerm() { switch (currentTokenType) { case AT: read(); - r = new Variable(session, readAliasIdentifier()); + r = new Variable(session, readIdentifier()); if (readIf(COLON_EQ)) { - Expression value = readExpression(); - Function function = Function.getFunction(database, "SET"); - function.setParameter(0, r); - function.setParameter(1, value); - r = function; + r = new SetFunction(r, readExpression()); } break; case PARAMETER: r = readParameter(); break; + case TABLE: case SELECT: - case FROM: case WITH: - r = new Subquery(parseSelect()); - break; - case TABLE: - int index = lastParseIndex; - read(); - if (readIf(OPEN_PAREN)) { - r = readFunction(null, "TABLE"); - } else { - parseIndex = index; - read(); - r = new Subquery(parseSelect()); - } - break; - case IDENTIFIER: - String name = currentToken; - if (currentTokenQuoted) { - read(); - if (readIf(OPEN_PAREN)) { - r = readFunction(null, name); - } else if (readIf(DOT)) { - r = readTermObjectDot(name); - } else { - r = new ExpressionColumn(database, null, null, name, false); - } - } else { - read(); - if (readIf(DOT)) { - r = readTermObjectDot(name); - } else if (readIf(OPEN_PAREN)) { - r = readFunction(null, name); - } else { - r = readTermWithIdentifier(name); - } - } + r = new Subquery(parseQuery()); break; case MINUS_SIGN: read(); - if (currentTokenType == VALUE) { - r = ValueExpression.get(currentValue.negate()); + if (currentTokenType == LITERAL) { + r = ValueExpression.get(token.value(session).negate()); int rType = r.getType().getValueType(); - if (rType == Value.LONG && + if (rType == Value.BIGINT && r.getValue(session).getLong() == Integer.MIN_VALUE) { // convert Integer.MIN_VALUE to type 'int' // (Integer.MAX_VALUE+1 is of type 'long') - r = ValueExpression.get(ValueInt.get(Integer.MIN_VALUE)); - } else if (rType == Value.DECIMAL && + r = ValueExpression.get(ValueInteger.get(Integer.MIN_VALUE)); + } else if (rType == Value.NUMERIC && r.getValue(session).getBigDecimal().compareTo(Value.MIN_LONG_DECIMAL) == 0) { // convert Long.MIN_VALUE to type 'long' // (Long.MAX_VALUE+1 is of type 'decimal') - r = ValueExpression.get(ValueLong.MIN); + r = ValueExpression.get(ValueBigint.MIN); } read(); } else { @@ -4001,34 +4985,51 @@ private Expression readTerm() { case OPEN_PAREN: read(); if (readIf(CLOSE_PAREN)) { - r = ValueExpression.get(ValueRow.getEmpty()); + r = ValueExpression.get(ValueRow.EMPTY); + } else if (isQuery()) { + r = new Subquery(parseQuery()); + read(CLOSE_PAREN); } else { r = readExpression(); - if (readIfMore(true)) { + if (readIfMore()) { ArrayList list = Utils.newSmallArrayList(); list.add(r); - if (!readIf(CLOSE_PAREN)) { - do { - list.add(readExpression()); - } while (readIfMore(false)); - } + do { + list.add(readExpression()); + } while (readIfMore()); r = new ExpressionList(list.toArray(new Expression[0]), false); + } else if (r instanceof BinaryOperation) { + BinaryOperation binaryOperation = (BinaryOperation) r; + if (binaryOperation.getOperationType() == OpType.MINUS) { + TypeInfo ti = readIntervalQualifier(); + if (ti != null) { + binaryOperation.setForcedType(ti); + } + } } } + if (readIf(DOT)) { + r = new FieldReference(r, readIdentifier()); + } break; case ARRAY: read(); - read(OPEN_BRACKET); - if (readIf(CLOSE_BRACKET)) { - r = ValueExpression.get(ValueArray.getEmpty()); - } else { - ArrayList list = Utils.newSmallArrayList(); - list.add(readExpression()); - while (readIf(COMMA)) { - list.add(readExpression()); + if (readIf(OPEN_BRACKET)) { + if (readIf(CLOSE_BRACKET)) { + r = ValueExpression.get(ValueArray.EMPTY); + } else { + ArrayList list = Utils.newSmallArrayList(); + do { + list.add(readExpression()); + } while (readIf(COMMA)); + read(CLOSE_BRACKET); + r = new ExpressionList(list.toArray(new Expression[0]), true); } - read(CLOSE_BRACKET); - r = new ExpressionList(list.toArray(new Expression[0]), true); + } else { + read(OPEN_PAREN); + Query q = parseQuery(); + read(CLOSE_PAREN); + r = new ArrayConstructorByQuery(q); } break; case INTERVAL: @@ -4039,23 +5040,27 @@ private Expression readTerm() { read(); read(OPEN_PAREN); if (readIf(CLOSE_PAREN)) { - r = ValueExpression.get(ValueRow.getEmpty()); + r = ValueExpression.get(ValueRow.EMPTY); } else { ArrayList list = Utils.newSmallArrayList(); do { list.add(readExpression()); - } while (readIfMore(true)); + } while (readIfMore()); r = new ExpressionList(list.toArray(new Expression[0]), false); } break; } case TRUE: read(); - r = ValueExpression.get(ValueBoolean.TRUE); + r = ValueExpression.TRUE; break; case FALSE: read(); - r = ValueExpression.get(ValueBoolean.FALSE); + r = ValueExpression.FALSE; + break; + case UNKNOWN: + read(); + r = TypedValueExpression.UNKNOWN; break; case ROWNUM: read(); @@ -4065,209 +5070,385 @@ private Expression readTerm() { if (currentSelect == null && currentPrepared == null) { throw getSyntaxError(); } - r = new Rownum(currentSelect == null ? currentPrepared - : currentSelect); + r = new Rownum(getCurrentPrepared()); break; case NULL: read(); - r = ValueExpression.getNull(); + r = ValueExpression.NULL; break; case _ROWID_: read(); - r = new ExpressionColumn(database, null, null, Column.ROWID, true); + r = new ExpressionColumn(database, null, null); break; - case VALUE: - r = ValueExpression.get(currentValue); + case LITERAL: + r = ValueExpression.get(token.value(session)); read(); break; case VALUES: if (database.getMode().onDuplicateKeyUpdate) { - read(); - r = readKeywordFunction("VALUES"); - } else { - r = new Subquery(parseSelect()); + if (currentPrepared instanceof Insert) { + r = readOnDuplicateKeyValues(((Insert) currentPrepared).getTable(), null); + break; + } else if (currentPrepared instanceof Update) { + Update update = (Update) currentPrepared; + r = readOnDuplicateKeyValues(update.getTable(), update); + break; + } } + r = new Subquery(parseQuery()); break; case CASE: read(); r = readCase(); break; + case CAST: { + read(); + read(OPEN_PAREN); + Expression arg = readExpression(); + read(AS); + Column column = parseColumnWithType(null); + read(CLOSE_PAREN); + r = new CastSpecification(arg, column); + break; + } + case CURRENT_CATALOG: + return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_CATALOG); case CURRENT_DATE: read(); - r = readKeywordFunction("CURRENT_DATE"); + r = readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_DATE, readIf(OPEN_PAREN), null); break; + case CURRENT_PATH: + return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_PATH); + case CURRENT_ROLE: + return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_ROLE); + case CURRENT_SCHEMA: + return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_SCHEMA); case CURRENT_TIME: read(); - r = readKeywordFunction("CURRENT_TIME"); + r = readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_TIME, readIf(OPEN_PAREN), null); break; case CURRENT_TIMESTAMP: read(); - r = readKeywordFunction("CURRENT_TIMESTAMP"); + r = readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_TIMESTAMP, readIf(OPEN_PAREN), + null); break; case CURRENT_USER: + case USER: + return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_USER); + case SESSION_USER: + return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.SESSION_USER); + case SYSTEM_USER: + return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.SYSTEM_USER); + case ANY: + case SOME: read(); - r = readKeywordFunction("USER"); + read(OPEN_PAREN); + return readAggregate(AggregateType.ANY, "ANY"); + case DAY: + case HOUR: + case MINUTE: + case MONTH: + case SECOND: + case YEAR: + r = readKeywordCompatibilityFunctionOrColumn(); + break; + case LEFT: + r = readColumnIfNotFunction(); + if (r == null) { + r = new StringFunction2(readExpression(), readLastArgument(), StringFunction2.LEFT); + } break; case LOCALTIME: read(); - r = readKeywordFunction("LOCALTIME"); + r = readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIME, readIf(OPEN_PAREN), null); break; case LOCALTIMESTAMP: read(); - r = readKeywordFunction("LOCALTIMESTAMP"); + r = readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIMESTAMP, readIf(OPEN_PAREN), // + null); + break; + case RIGHT: + r = readColumnIfNotFunction(); + if (r == null) { + r = new StringFunction2(readExpression(), readLastArgument(), StringFunction2.RIGHT); + } + break; + case SET: + r = readColumnIfNotFunction(); + if (r == null) { + r = readSetFunction(); + } break; + case VALUE: + if (parseDomainConstraint) { + read(); + r = new DomainValueExpression(); + break; + } + //$FALL-THROUGH$ default: - throw getSyntaxError(); + if (!isIdentifier()) { + throw getSyntaxError(); + } + //$FALL-THROUGH$ + case IDENTIFIER: + String name = currentToken; + boolean quoted = token.isQuoted(); + read(); + if (readIf(OPEN_PAREN)) { + r = readFunction(null, name); + } else if (readIf(DOT)) { + r = readTermObjectDot(name); + } else if (quoted) { + r = new ExpressionColumn(database, null, null, name); + } else { + r = readTermWithIdentifier(name, quoted); + } + break; } if (readIf(OPEN_BRACKET)) { - Function function = Function.getFunction(database, "ARRAY_GET"); - function.setParameter(0, r); - function.setParameter(1, readExpression()); - r = function; + r = new ArrayElementReference(r, readExpression()); read(CLOSE_BRACKET); } - if (readIf(COLON_COLON)) { - // PostgreSQL compatibility - if (isToken("PG_CATALOG")) { - read("PG_CATALOG"); - read(DOT); - } - if (readIf("REGCLASS")) { - FunctionAlias f = findFunctionAlias(database.getMainSchema().getName(), "PG_GET_OID"); - if (f == null) { - throw getSyntaxError(); + colonColon: if (readIf(COLON_COLON)) { + if (database.getMode().getEnum() == ModeEnum.PostgreSQL) { + // PostgreSQL compatibility + if (isToken("PG_CATALOG")) { + read("PG_CATALOG"); + read(DOT); + } + if (readIf("REGCLASS")) { + r = new Regclass(r); + break colonColon; } - Expression[] args = { r }; - r = new JavaFunction(f, args); - } else { - Column col = parseColumnWithType(null, false); - Function function = Function.getFunction(database, "CAST"); - function.setDataType(col); - function.setParameter(0, r); - r = function; } + r = new CastSpecification(r, parseColumnWithType(null)); } - return r; + for (;;) { + TypeInfo ti = readIntervalQualifier(); + if (ti != null) { + r = new CastSpecification(r, ti); + } + int index = tokenIndex; + if (readIf("AT")) { + if (readIf("TIME")) { + read("ZONE"); + r = new TimeZoneOperation(r, readExpression()); + continue; + } else if (readIf("LOCAL")) { + r = new TimeZoneOperation(r, null); + continue; + } else { + setTokenIndex(index); + } + } else if (readIf("FORMAT")) { + if (readIf("JSON")) { + r = new Format(r, FormatEnum.JSON); + continue; + } else { + setTokenIndex(index); + } + } + break; + } + return r; } - private Expression readTermWithIdentifier(String name) { - // Unquoted identifier is never empty - char ch = name.charAt(0); - if (!identifiersToUpper) { - /* - * Convert a-z to A-Z. This method is safe, because only A-Z - * characters are considered below. - */ - ch &= 0xffdf; + private Expression readCurrentGeneralValueSpecification(int specification) { + read(); + if (readIf(OPEN_PAREN)) { + read(CLOSE_PAREN); + } + return new CurrentGeneralValueSpecification(specification); + } + + private Expression readColumnIfNotFunction() { + boolean nonKeyword = nonKeywords != null && nonKeywords.get(currentTokenType); + String name = currentToken; + read(); + if (readIf(OPEN_PAREN)) { + return null; + } else if (nonKeyword) { + return readIf(DOT) ? readTermObjectDot(name) : new ExpressionColumn(database, null, null, name); } - switch (ch) { + throw getSyntaxError(); + } + + private Expression readSetFunction() { + SetFunction function = new SetFunction(readExpression(), readLastArgument()); + if (database.isAllowBuiltinAliasOverride()) { + FunctionAlias functionAlias = database.getSchema(session.getCurrentSchemaName()).findFunction( + function.getName()); + if (functionAlias != null) { + return new JavaFunction(functionAlias, + new Expression[] { function.getSubexpression(0), function.getSubexpression(1) }); + } + } + return function; + } + + private Expression readOnDuplicateKeyValues(Table table, Update update) { + read(); + read(OPEN_PAREN); + Column c = readTableColumn(new TableFilter(session, table, null, rightsChecked, null, 0, null)); + read(CLOSE_PAREN); + return new OnDuplicateKeyValues(c, update); + } + + private Expression readTermWithIdentifier(String name, boolean quoted) { + /* + * Convert a-z to A-Z. This method is safe, because only A-Z + * characters are considered below. + * + * Unquoted identifier is never empty. + */ + switch (name.charAt(0) & 0xffdf) { case 'C': - if (database.getMode().getEnum() == ModeEnum.DB2 && equalsToken("CURRENT", name)) { - return parseDB2SpecialRegisters(name); + if (equalsToken("CURRENT", name)) { + int index = tokenIndex; + if (readIf(VALUE) && readIf(FOR)) { + return new SequenceValue(readSequence()); + } + setTokenIndex(index); + if (database.getMode().getEnum() == ModeEnum.DB2) { + return parseDB2SpecialRegisters(name); + } } break; case 'D': - if (currentTokenType == VALUE && currentValue.getValueType() == Value.STRING && + if (currentTokenType == LITERAL && token.value(session).getValueType() == Value.VARCHAR && (equalsToken("DATE", name) || equalsToken("D", name))) { - String date = currentValue.getString(); + String date = token.value(session).getString(); read(); return ValueExpression.get(ValueDate.parse(date)); } break; case 'E': - if (currentTokenType == VALUE && currentValue.getValueType() == Value.STRING && equalsToken("E", name)) { - String text = currentValue.getString(); + if (currentTokenType == LITERAL && token.value(session).getValueType() == Value.VARCHAR // + && equalsToken("E", name)) { + String text = token.value(session).getString(); // the PostgreSQL ODBC driver uses // LIKE E'PROJECT\\_DATA' instead of LIKE // 'PROJECT\_DATA' // N: SQL-92 "National Language" strings text = StringUtils.replaceAll(text, "\\\\", "\\"); read(); - return ValueExpression.get(ValueString.get(text)); + return ValueExpression.get(ValueVarchar.get(text)); } break; - case 'N': - if (equalsToken("NEXT", name) && readIf("VALUE")) { - read(FOR); - return new SequenceValue(readSequence()); - } else if (currentTokenType == VALUE && currentValue.getValueType() == Value.STRING - && equalsToken("N", name)) { - // SQL-92 "National Language" strings - String text = currentValue.getString(); - read(); - return ValueExpression.get(ValueString.get(text)); + case 'G': + if (currentTokenType == LITERAL) { + int t = token.value(session).getValueType(); + if (t == Value.VARCHAR && equalsToken("GEOMETRY", name)) { + ValueExpression v = ValueExpression.get(ValueGeometry.get(token.value(session).getString())); + read(); + return v; + } else if (t == Value.VARBINARY && equalsToken("GEOMETRY", name)) { + ValueExpression v = ValueExpression + .get(ValueGeometry.getFromEWKB(token.value(session).getBytesNoCopy())); + read(); + return v; + } } break; - case 'S': - if (equalsToken("SYSDATE", name)) { - return readFunctionWithoutParameters("CURRENT_TIMESTAMP"); - } else if (equalsToken("SYSTIME", name)) { - return readFunctionWithoutParameters("CURRENT_TIME"); - } else if (equalsToken("SYSTIMESTAMP", name)) { - return readFunctionWithoutParameters("CURRENT_TIMESTAMP"); + case 'J': + if (currentTokenType == LITERAL) { + int t = token.value(session).getValueType(); + if (t == Value.VARCHAR && equalsToken("JSON", name)) { + ValueExpression v = ValueExpression.get(ValueJson.fromJson(token.value(session).getString())); + read(); + return v; + } else if (t == Value.VARBINARY && equalsToken("JSON", name)) { + ValueExpression v = ValueExpression.get(ValueJson.fromJson(token.value(session).getBytesNoCopy())); + read(); + return v; + } + } + break; + case 'N': + if (equalsToken("NEXT", name)) { + int index = tokenIndex; + if (readIf(VALUE) && readIf(FOR)) { + return new SequenceValue(readSequence(), getCurrentPrepared()); + } + setTokenIndex(index); } break; case 'T': if (equalsToken("TIME", name)) { - boolean without = readIf("WITHOUT"); - if (without) { + if (readIf(WITH)) { read("TIME"); read("ZONE"); - } - if (currentTokenType == VALUE && currentValue.getValueType() == Value.STRING) { - String time = currentValue.getString(); + if (currentTokenType != LITERAL || token.value(session).getValueType() != Value.VARCHAR) { + throw getSyntaxError(); + } + String time = token.value(session).getString(); read(); - return ValueExpression.get(ValueTime.parse(time)); - } else if (without) { - throw getSyntaxError(); + return ValueExpression.get(ValueTimeTimeZone.parse(time)); + } else { + boolean without = readIf("WITHOUT"); + if (without) { + read("TIME"); + read("ZONE"); + } + if (currentTokenType == LITERAL && token.value(session).getValueType() == Value.VARCHAR) { + String time = token.value(session).getString(); + read(); + return ValueExpression.get(ValueTime.parse(time)); + } else if (without) { + throw getSyntaxError(); + } } } else if (equalsToken("TIMESTAMP", name)) { if (readIf(WITH)) { read("TIME"); read("ZONE"); - if (currentTokenType != VALUE || currentValue.getValueType() != Value.STRING) { + if (currentTokenType != LITERAL || token.value(session).getValueType() != Value.VARCHAR) { throw getSyntaxError(); } - String timestamp = currentValue.getString(); + String timestamp = token.value(session).getString(); read(); - return ValueExpression.get(ValueTimestampTimeZone.parse(timestamp)); + return ValueExpression.get(ValueTimestampTimeZone.parse(timestamp, session)); } else { boolean without = readIf("WITHOUT"); if (without) { read("TIME"); read("ZONE"); } - if (currentTokenType == VALUE && currentValue.getValueType() == Value.STRING) { - String timestamp = currentValue.getString(); + if (currentTokenType == LITERAL && token.value(session).getValueType() == Value.VARCHAR) { + String timestamp = token.value(session).getString(); read(); - return ValueExpression.get(ValueTimestamp.parse(timestamp, database.getMode())); + return ValueExpression.get(ValueTimestamp.parse(timestamp, session)); } else if (without) { throw getSyntaxError(); } } - } else if (equalsToken("TODAY", name)) { - return readFunctionWithoutParameters("CURRENT_DATE"); - } else if (currentTokenType == VALUE && currentValue.getValueType() == Value.STRING) { + } else if (currentTokenType == LITERAL && token.value(session).getValueType() == Value.VARCHAR) { if (equalsToken("T", name)) { - String time = currentValue.getString(); + String time = token.value(session).getString(); read(); return ValueExpression.get(ValueTime.parse(time)); } else if (equalsToken("TS", name)) { - String timestamp = currentValue.getString(); + String timestamp = token.value(session).getString(); read(); - return ValueExpression.get(ValueTimestamp.parse(timestamp, database.getMode())); + return ValueExpression.get(ValueTimestamp.parse(timestamp, session)); } } break; - case 'X': - if (currentTokenType == VALUE && currentValue.getValueType() == Value.STRING && equalsToken("X", name)) { - byte[] buffer = StringUtils.convertHexToBytes(currentValue.getString()); + case 'U': + if (currentTokenType == LITERAL && token.value(session).getValueType() == Value.VARCHAR + && (equalsToken("UUID", name))) { + String uuid = token.value(session).getString(); read(); - return ValueExpression.get(ValueBytes.getNoCopy(buffer)); + return ValueExpression.get(ValueUuid.get(uuid)); } break; } - return new ExpressionColumn(database, null, null, name, false); + return new ExpressionColumn(database, null, null, name, quoted); + } + + private Prepared getCurrentPrepared() { + return currentPrepared; } private Expression readInterval() { @@ -4275,51 +5456,81 @@ private Expression readInterval() { if (!negative) { readIf(PLUS_SIGN); } - String s = readString(); + if (currentTokenType != LITERAL || token.value(session).getValueType() != Value.VARCHAR) { + addExpected("string"); + throw getSyntaxError(); + } + String s = token.value(session).getString(); + read(); IntervalQualifier qualifier; - if (readIf("YEAR")) { - if (readIf("TO")) { - read("MONTH"); + switch (currentTokenType) { + case YEAR: + read(); + if (readIf(TO)) { + read(MONTH); qualifier = IntervalQualifier.YEAR_TO_MONTH; } else { qualifier = IntervalQualifier.YEAR; } - } else if (readIf("MONTH")) { + break; + case MONTH: + read(); qualifier = IntervalQualifier.MONTH; - } else if (readIf("DAY")) { - if (readIf("TO")) { - if (readIf("HOUR")) { + break; + case DAY: + read(); + if (readIf(TO)) { + switch (currentTokenType) { + case HOUR: qualifier = IntervalQualifier.DAY_TO_HOUR; - } else if (readIf("MINUTE")) { + break; + case MINUTE: qualifier = IntervalQualifier.DAY_TO_MINUTE; - } else { - read("SECOND"); + break; + case SECOND: qualifier = IntervalQualifier.DAY_TO_SECOND; + break; + default: + throw intervalDayError(); } + read(); } else { qualifier = IntervalQualifier.DAY; } - } else if (readIf("HOUR")) { - if (readIf("TO")) { - if (readIf("MINUTE")) { + break; + case HOUR: + read(); + if (readIf(TO)) { + switch (currentTokenType) { + case MINUTE: qualifier = IntervalQualifier.HOUR_TO_MINUTE; - } else { - read("SECOND"); + break; + case SECOND: qualifier = IntervalQualifier.HOUR_TO_SECOND; + break; + default: + throw intervalHourError(); } + read(); } else { qualifier = IntervalQualifier.HOUR; } - } else if (readIf("MINUTE")) { - if (readIf("TO")) { - read("SECOND"); + break; + case MINUTE: + read(); + if (readIf(TO)) { + read(SECOND); qualifier = IntervalQualifier.MINUTE_TO_SECOND; } else { qualifier = IntervalQualifier.MINUTE; } - } else { - read("SECOND"); + break; + case SECOND: + read(); qualifier = IntervalQualifier.SECOND; + break; + default: + throw intervalQualifierError(); } try { return ValueExpression.get(IntervalUtils.parseInterval(qualifier, negative, s)); @@ -4334,70 +5545,77 @@ private Expression parseDB2SpecialRegisters(String name) { if (readIf(WITH)) { read("TIME"); read("ZONE"); - return readKeywordFunction("CURRENT_TIMESTAMP"); + return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_TIMESTAMP, + readIf(OPEN_PAREN), null); } - return readKeywordFunction("LOCALTIMESTAMP"); + return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIMESTAMP, readIf(OPEN_PAREN), + null); } else if (readIf("TIME")) { // Time with fractional seconds is not supported by DB2 - return readFunctionWithoutParameters("CURRENT_TIME"); + return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIME, false, null); } else if (readIf("DATE")) { - return readFunctionWithoutParameters("CURRENT_DATE"); + return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_DATE, false, null); } // No match, parse CURRENT as a column - return new ExpressionColumn(database, null, null, name, false); + return new ExpressionColumn(database, null, null, name); } private Expression readCase() { - if (readIf("END")) { - readIf(CASE); - return ValueExpression.getNull(); - } - if (readIf("ELSE")) { - Expression elsePart = readExpression().optimize(session); - read("END"); - readIf(CASE); - return elsePart; - } - int i; - Function function; - if (readIf("WHEN")) { - function = Function.getFunction(database, "CASE"); - function.setParameter(0, null); - i = 1; + Expression c; + if (readIf(WHEN)) { + SearchedCase searched = new SearchedCase(); do { - function.setParameter(i++, readExpression()); + Expression condition = readExpression(); read("THEN"); - function.setParameter(i++, readExpression()); - } while (readIf("WHEN")); + searched.addParameter(condition); + searched.addParameter(readExpression()); + } while (readIf(WHEN)); + if (readIf(ELSE)) { + searched.addParameter(readExpression()); + } + searched.doneWithParameters(); + c = searched; } else { - Expression expr = readExpression(); - if (readIf("END")) { - readIf(CASE); - return ValueExpression.getNull(); - } - if (readIf("ELSE")) { - Expression elsePart = readExpression().optimize(session); - read("END"); - readIf(CASE); - return elsePart; - } - function = Function.getFunction(database, "CASE"); - function.setParameter(0, expr); - i = 1; - read("WHEN"); + Expression caseOperand = readExpression(); + read(WHEN); + SimpleCase.SimpleWhen when = readSimpleWhenClause(caseOperand), current = when; + while (readIf(WHEN)) { + SimpleCase.SimpleWhen next = readSimpleWhenClause(caseOperand); + current.setWhen(next); + current = next; + } + c = new SimpleCase(caseOperand, when, readIf(ELSE) ? readExpression() : null); + } + read(END); + return c; + } + + private SimpleCase.SimpleWhen readSimpleWhenClause(Expression caseOperand) { + Expression whenOperand = readWhenOperand(caseOperand); + if (readIf(COMMA)) { + ArrayList operands = Utils.newSmallArrayList(); + operands.add(whenOperand); do { - function.setParameter(i++, readExpression()); - read("THEN"); - function.setParameter(i++, readExpression()); - } while (readIf("WHEN")); + operands.add(readWhenOperand(caseOperand)); + } while (readIf(COMMA)); + read("THEN"); + return new SimpleCase.SimpleWhen(operands.toArray(new Expression[0]), readExpression()); } - if (readIf("ELSE")) { - function.setParameter(i, readExpression()); + read("THEN"); + return new SimpleCase.SimpleWhen(whenOperand, readExpression()); + } + + private Expression readWhenOperand(Expression caseOperand) { + int backup = tokenIndex; + boolean not = readIf(NOT); + Expression whenOperand = readConditionRightHandSide(caseOperand, not, true); + if (whenOperand == null) { + if (not) { + setTokenIndex(backup); + } + whenOperand = readExpression(); } - read("END"); - readIf("CASE"); - function.doneWithParameters(); - return function; + return whenOperand; } private int readNonNegativeInt() { @@ -4416,22 +5634,23 @@ private int readInt() { } else if (currentTokenType == PLUS_SIGN) { read(); } - if (currentTokenType != VALUE) { - throw DbException.getSyntaxError(sqlCommand, parseIndex, "integer"); + if (currentTokenType != LITERAL) { + throw DbException.getSyntaxError(sqlCommand, token.start(), "integer"); } + Value value = token.value(session); if (minus) { // must do that now, otherwise Integer.MIN_VALUE would not work - currentValue = currentValue.negate(); + value = value.negate(); } - int i = currentValue.getInt(); + int i = value.getInt(); read(); return i; } - private long readNonNegativeLong() { + private long readPositiveLong() { long v = readLong(); - if (v < 0) { - throw DbException.getInvalidValueException("non-negative long", v); + if (v <= 0) { + throw DbException.getInvalidValueException("positive long", v); } return v; } @@ -4444,14 +5663,15 @@ private long readLong() { } else if (currentTokenType == PLUS_SIGN) { read(); } - if (currentTokenType != VALUE) { - throw DbException.getSyntaxError(sqlCommand, parseIndex, "long"); + if (currentTokenType != LITERAL) { + throw DbException.getSyntaxError(sqlCommand, token.start(), "long"); } + Value value = token.value(session); if (minus) { // must do that now, otherwise Long.MIN_VALUE would not work - currentValue = currentValue.negate(); + value = value.negate(); } - long i = currentValue.getLong(); + long i = value.getLong(); read(); return i; } @@ -4465,40 +5685,60 @@ private boolean readBooleanSetting() { case FALSE: read(); return false; - case VALUE: - boolean result = currentValue.getBoolean(); + case LITERAL: + boolean result = token.value(session).getBoolean(); read(); return result; } if (readIf("OFF")) { return false; } else { + if (expectedList != null) { + addMultipleExpected(ON, TRUE, FALSE); + } throw getSyntaxError(); } } private String readString() { - Expression expr = readExpression().optimize(session); - if (!(expr instanceof ValueExpression)) { - throw DbException.getSyntaxError(sqlCommand, parseIndex, "string"); + int sqlIndex = token.start(); + Expression expr = readExpression(); + try { + String s = expr.optimize(session).getValue(session).getString(); + if (s == null || s.length() <= Constants.MAX_STRING_LENGTH) { + return s; + } + } catch (DbException e) { } - return expr.getValue(session).getString(); + throw DbException.getSyntaxError(sqlCommand, sqlIndex, "character string"); } // TODO: why does this function allow defaultSchemaName=null - which resets // the parser schemaName for everyone ? private String readIdentifierWithSchema(String defaultSchemaName) { - String s = readColumnIdentifier(); + String s = readIdentifier(); schemaName = defaultSchemaName; if (readIf(DOT)) { - schemaName = s; - s = readColumnIdentifier(); + s = readIdentifierWithSchema2(s); } - if (currentTokenType == DOT) { - if (equalsToken(schemaName, database.getShortName())) { - read(); - schemaName = s; - s = readColumnIdentifier(); + return s; + } + + private String readIdentifierWithSchema2(String s) { + schemaName = s; + if (database.getMode().allowEmptySchemaValuesAsDefaultSchema && readIf(DOT)) { + if (equalsToken(schemaName, database.getShortName()) || database.getIgnoreCatalogs()) { + schemaName = session.getCurrentSchemaName(); + s = readIdentifier(); + } + } else { + s = readIdentifier(); + if (currentTokenType == DOT) { + if (equalsToken(schemaName, database.getShortName()) || database.getIgnoreCatalogs()) { + read(); + schemaName = s; + s = readIdentifier(); + } } } return s; @@ -4508,26 +5748,15 @@ private String readIdentifierWithSchema() { return readIdentifierWithSchema(session.getCurrentSchemaName()); } - private String readAliasIdentifier() { - return readColumnIdentifier(); - } - - private String readUniqueIdentifier() { - return readColumnIdentifier(); - } - - private String readColumnIdentifier() { - if (currentTokenType != IDENTIFIER) { + private String readIdentifier() { + if (!isIdentifier()) { /* * Sometimes a new keywords are introduced. During metadata * initialization phase keywords are accepted as identifiers to * allow migration from older versions. - * - * PageStore's LobStorageBackend also needs this in databases that - * were created in 1.4.197 and older versions. */ - if (!session.getDatabase().isStarting() || !isKeyword(currentToken)) { - throw DbException.getSyntaxError(sqlCommand, parseIndex, "identifier"); + if (!session.isQuirksMode() || !isKeyword(currentTokenType)) { + throw DbException.getSyntaxError(sqlCommand, token.start(), "identifier"); } } String s = currentToken; @@ -4536,7 +5765,7 @@ private String readColumnIdentifier() { } private void read(String expected) { - if (currentTokenQuoted || !equalsToken(expected, currentToken)) { + if (token.isQuoted() || !equalsToken(expected, currentToken)) { addExpected(expected); throw getSyntaxError(); } @@ -4551,12 +5780,12 @@ private void read(int tokenType) { read(); } - private boolean readIf(String token) { - if (!currentTokenQuoted && equalsToken(token, currentToken)) { + private boolean readIf(String tokenName) { + if (!token.isQuoted() && equalsToken(tokenName, currentToken)) { read(); return true; } - addExpected(token); + addExpected(tokenName); return false; } @@ -4569,11 +5798,11 @@ private boolean readIf(int tokenType) { return false; } - private boolean isToken(String token) { - if (!currentTokenQuoted && equalsToken(token, currentToken)) { + private boolean isToken(String tokenName) { + if (!token.isQuoted() && equalsToken(tokenName, currentToken)) { return true; } - addExpected(token); + addExpected(tokenName); return false; } @@ -4592,16 +5821,8 @@ private boolean equalsToken(String a, String b) { return a.equals(b) || !identifiersToUpper && a.equalsIgnoreCase(b); } - private static boolean equalsTokenIgnoreCase(String a, String b) { - if (a == null) { - return b == null; - } else - return a.equals(b) || a.equalsIgnoreCase(b); - } - - private boolean isTokenInList(Collection upperCaseTokenList) { - String upperCaseCurrentToken = currentToken.toUpperCase(); - return upperCaseTokenList.contains(upperCaseCurrentToken); + private boolean isIdentifier() { + return currentTokenType == IDENTIFIER || nonKeywords != null && nonKeywords.get(currentTokenType); } private void addExpected(String token) { @@ -4616,713 +5837,194 @@ private void addExpected(int tokenType) { } } + private void addMultipleExpected(int ... tokenTypes) { + for (int tokenType : tokenTypes) { + expectedList.add(TOKENS[tokenType]); + } + } + private void read() { - currentTokenQuoted = false; if (expectedList != null) { expectedList.clear(); } - int[] types = characterTypes; - lastParseIndex = parseIndex; - int i = parseIndex; - int type = types[i]; - while (type == 0) { - type = types[++i]; - } - int start = i; - char[] chars = sqlCommandChars; - char c = chars[i++]; - currentToken = ""; - switch (type) { - case CHAR_NAME: - while (true) { - type = types[i]; - if (type != CHAR_NAME && type != CHAR_VALUE) { - break; - } - i++; - } - currentTokenType = ParserUtil.getSaveTokenType(sqlCommand, !identifiersToUpper, start, i, false); - if (currentTokenType == IDENTIFIER) { - currentToken = StringUtils.cache(sqlCommand.substring(start, i)); - } else { - currentToken = TOKENS[currentTokenType]; - } - parseIndex = i; - return; - case CHAR_QUOTED: { - String result = null; - while (true) { - for (int begin = i;; i++) { - if (chars[i] == c) { - if (result == null) { - result = sqlCommand.substring(begin, i); - } else { - result += sqlCommand.substring(begin - 1, i); - } - break; - } - } - if (chars[++i] != c) { - break; - } - i++; + int size = tokens.size(); + if (tokenIndex + 1 < size) { + token = tokens.get(++tokenIndex); + currentTokenType = token.tokenType(); + currentToken = token.asIdentifier(); + if (currentToken != null && currentToken.length() > Constants.MAX_IDENTIFIER_LENGTH) { + throw DbException.get(ErrorCode.NAME_TOO_LONG_2, currentToken.substring(0, 32), + "" + Constants.MAX_IDENTIFIER_LENGTH); + } else if (currentTokenType == LITERAL) { + checkLiterals(); } - currentToken = StringUtils.cache(result); - parseIndex = i; - currentTokenQuoted = true; - currentTokenType = IDENTIFIER; - return; + } else { + throw getSyntaxError(); } - case CHAR_SPECIAL_2: - if (types[i] == CHAR_SPECIAL_2) { - char c1 = chars[i++]; - currentTokenType = getSpecialType2(c, c1); - } else { - currentTokenType = getSpecialType1(c); - } - parseIndex = i; - return; - case CHAR_SPECIAL_1: - currentTokenType = getSpecialType1(c); - parseIndex = i; - return; - case CHAR_VALUE: - if (c == '0' && (chars[i] == 'X' || chars[i] == 'x')) { - // hex number - long number = 0; - start += 2; - i++; - while (true) { - c = chars[i]; - if (c >= '0' && c <= '9') { - number = (number << 4) + c - '0'; - } else if (c >= 'A' && c <= 'F') { - number = (number << 4) + c - ('A' - 10); - } else if (c >= 'a' && c <= 'f') { - number = (number << 4) + c - ('a' - 10); - } else { - checkLiterals(false); - currentValue = ValueInt.get((int) number); - currentTokenType = VALUE; - currentToken = "0"; - parseIndex = i; - return; - } - if (number > Integer.MAX_VALUE) { - readHexDecimal(start, i); - return; - } - i++; - } - } - long number = c - '0'; - loop: while (true) { - c = chars[i]; - if (c < '0' || c > '9') { - switch (c) { - case '.': - case 'E': - case 'e': - readDecimal(start, i, false); - break loop; - case 'L': - case 'l': - readDecimal(start, i, true); - break loop; - } - checkLiterals(false); - currentValue = ValueInt.get((int) number); - currentTokenType = VALUE; - currentToken = "0"; - parseIndex = i; - break; - } - number = number * 10 + (c - '0'); - if (number > Integer.MAX_VALUE) { - readDecimal(start, i, true); - break; - } - i++; - } - return; - case CHAR_DOT: - if (types[i] != CHAR_VALUE) { - currentTokenType = DOT; - currentToken = "."; - parseIndex = i; - return; - } - readDecimal(i - 1, i, false); - return; - case CHAR_STRING: { - String result = null; - while (true) { - for (int begin = i;; i++) { - if (chars[i] == '\'') { - if (result == null) { - result = sqlCommand.substring(begin, i); - } else { - result += sqlCommand.substring(begin - 1, i); - } - break; - } - } - if (chars[++i] != '\'') { - break; - } - i++; + } + + private void checkLiterals() { + if (!literalsChecked && session != null && !session.getAllowLiterals()) { + int allowed = database.getAllowLiterals(); + if (allowed == Constants.ALLOW_LITERALS_NONE + || ((token instanceof Token.CharacterStringToken || token instanceof Token.BinaryStringToken) + && allowed != Constants.ALLOW_LITERALS_ALL)) { + throw DbException.get(ErrorCode.LITERALS_ARE_NOT_ALLOWED); } - currentToken = "'"; - checkLiterals(true); - currentValue = ValueString.get(result, database.getMode().treatEmptyStringsAsNull); - parseIndex = i; - currentTokenType = VALUE; - return; } - case CHAR_DOLLAR_QUOTED_STRING: { - int begin = i - 1; - while (types[i] == CHAR_DOLLAR_QUOTED_STRING) { - i++; - } - String result = sqlCommand.substring(begin, i); - currentToken = "'"; - checkLiterals(true); - currentValue = ValueString.get(result, database.getMode().treatEmptyStringsAsNull); - parseIndex = i; - currentTokenType = VALUE; - return; - } - case CHAR_END: - currentTokenType = END; - parseIndex = i; - return; - default: - throw getSyntaxError(); + } + + private void initialize(String sql, ArrayList tokens, boolean stopOnCloseParen) { + if (sql == null) { + sql = ""; } + sqlCommand = sql; + this.tokens = tokens == null ? new Tokenizer(database, identifiersToUpper, identifiersToLower, nonKeywords) + .tokenize(sql, stopOnCloseParen) : tokens; + resetTokenIndex(); } - private void readParameterIndex() { - int i = parseIndex; + private void resetTokenIndex() { + tokenIndex = -1; + token = null; + currentTokenType = -1; + currentToken = null; + } - char[] chars = sqlCommandChars; - char c = chars[i++]; - long number = c - '0'; - while (true) { - c = chars[i]; - if (c < '0' || c > '9') { - currentValue = ValueInt.get((int) number); - currentTokenType = VALUE; - currentToken = "0"; - parseIndex = i; - break; - } - number = number * 10 + (c - '0'); - if (number > Integer.MAX_VALUE) { - throw DbException.getInvalidValueException( - "parameter index", number); + void setTokenIndex(int index) { + if (index != tokenIndex) { + if (expectedList != null) { + expectedList.clear(); } - i++; + token = tokens.get(index); + tokenIndex = index; + currentTokenType = token.tokenType(); + currentToken = token.asIdentifier(); } } - private void checkLiterals(boolean text) { - if (!literalsChecked && !session.getAllowLiterals()) { - int allowed = database.getAllowLiterals(); - if (allowed == Constants.ALLOW_LITERALS_NONE || - (text && allowed != Constants.ALLOW_LITERALS_ALL)) { - throw DbException.get(ErrorCode.LITERALS_ARE_NOT_ALLOWED); - } - } + private static boolean isKeyword(int tokenType) { + return tokenType >= FIRST_KEYWORD && tokenType <= LAST_KEYWORD; } - private void readHexDecimal(int start, int i) { - char[] chars = sqlCommandChars; - char c; - do { - c = chars[++i]; - } while ((c >= '0' && c <= '9') || (c >= 'A' && c <= 'F')); - parseIndex = i; - String sub = sqlCommand.substring(start, i); - BigDecimal bd = new BigDecimal(new BigInteger(sub, 16)); - checkLiterals(false); - currentValue = ValueDecimal.get(bd); - currentTokenType = VALUE; - } - - private void readDecimal(int start, int i, boolean integer) { - char[] chars = sqlCommandChars; - int[] types = characterTypes; - // go until the first non-number - while (true) { - int t = types[i]; - if (t == CHAR_DOT) { - integer = false; - } else if (t != CHAR_VALUE) { - break; - } - i++; + private boolean isKeyword(String s) { + return ParserUtil.isKeyword(s, !identifiersToUpper); + } + + private String upperName(String name) { + return identifiersToUpper ? name : StringUtils.toUpperEnglish(name); + } + + private Column parseColumnForTable(String columnName, boolean defaultNullable) { + Column column; + Mode mode = database.getMode(); + if (mode.identityDataType && readIf("IDENTITY")) { + column = new Column(columnName, TypeInfo.TYPE_BIGINT); + parseCompatibilityIdentityOptions(column); + column.setPrimaryKey(true); + } else if (mode.serialDataTypes && readIf("BIGSERIAL")) { + column = new Column(columnName, TypeInfo.TYPE_BIGINT); + column.setIdentityOptions(new SequenceOptions(), false); + } else if (mode.serialDataTypes && readIf("SERIAL")) { + column = new Column(columnName, TypeInfo.TYPE_INTEGER); + column.setIdentityOptions(new SequenceOptions(), false); + } else { + column = parseColumnWithType(columnName); + } + if (readIf("INVISIBLE")) { + column.setVisible(false); + } else if (readIf("VISIBLE")) { + column.setVisible(true); } - char c = chars[i]; - if (c == 'E' || c == 'e') { - integer = false; - c = chars[++i]; - if (c == '+' || c == '-') { - i++; + boolean defaultOnNull = false; + NullConstraintType nullConstraint = parseNotNullConstraint(); + defaultIdentityGeneration: if (!column.isIdentity()) { + if (readIf(AS)) { + column.setGeneratedExpression(readExpression()); + } else if (readIf(DEFAULT)) { + if (readIf(ON)) { + read(NULL); + defaultOnNull = true; + break defaultIdentityGeneration; + } + column.setDefaultExpression(session, readExpression()); + } else if (readIf("GENERATED")) { + boolean always = readIf("ALWAYS"); + if (!always) { + read("BY"); + read(DEFAULT); + } + read(AS); + if (readIf("IDENTITY")) { + SequenceOptions options = new SequenceOptions(); + if (readIf(OPEN_PAREN)) { + parseSequenceOptions(options, null, false, false); + read(CLOSE_PAREN); + } + column.setIdentityOptions(options, always); + break defaultIdentityGeneration; + } else if (!always) { + throw getSyntaxError(); + } else { + column.setGeneratedExpression(readExpression()); + } } - if (types[i] != CHAR_VALUE) { - throw getSyntaxError(); + if (!column.isGenerated() && readIf(ON)) { + read("UPDATE"); + column.setOnUpdateExpression(session, readExpression()); } - while (types[++i] == CHAR_VALUE) { - // go until the first non-number + nullConstraint = parseNotNullConstraint(nullConstraint); + if (parseCompatibilityIdentity(column, mode)) { + nullConstraint = parseNotNullConstraint(nullConstraint); } } - parseIndex = i; - checkLiterals(false); - BigDecimal bd; - if (integer && i - start <= 19) { - BigInteger bi = new BigInteger(sqlCommand.substring(start, i)); - if (bi.compareTo(ValueLong.MAX_BI) <= 0) { - // parse constants like "10000000L" - c = chars[i]; - if (c == 'L' || c == 'l') { - parseIndex++; - } - currentValue = ValueLong.get(bi.longValue()); - currentTokenType = VALUE; - return; + switch (nullConstraint) { + case NULL_IS_ALLOWED: + if (column.isIdentity()) { + throw DbException.get(ErrorCode.COLUMN_MUST_NOT_BE_NULLABLE_1, column.getName()); } - bd = new BigDecimal(bi); - } else { - try { - bd = new BigDecimal(sqlCommandChars, start, i - start); - } catch (NumberFormatException e) { - throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, e, sqlCommand.substring(start, i)); - } - } - currentValue = ValueDecimal.get(bd); - currentTokenType = VALUE; - } - - private void initialize(String sql) { - if (sql == null) { - sql = ""; - } - originalSQL = sql; - sqlCommand = sql; - int len = sql.length() + 1; - char[] command = new char[len]; - int[] types = new int[len]; - len--; - sql.getChars(0, len, command, 0); - boolean changed = false; - command[len] = ' '; - int startLoop = 0; - int lastType = 0; - for (int i = 0; i < len; i++) { - char c = command[i]; - int type = 0; - switch (c) { - case '/': - if (command[i + 1] == '*') { - // block comment - changed = true; - command[i] = ' '; - command[i + 1] = ' '; - startLoop = i; - i += 2; - checkRunOver(i, len, startLoop); - while (command[i] != '*' || command[i + 1] != '/') { - command[i++] = ' '; - checkRunOver(i, len, startLoop); - } - command[i] = ' '; - command[i + 1] = ' '; - i++; - } else if (command[i + 1] == '/') { - // single line comment - changed = true; - startLoop = i; - while (true) { - c = command[i]; - if (c == '\n' || c == '\r' || i >= len - 1) { - break; - } - command[i++] = ' '; - checkRunOver(i, len, startLoop); - } - } else { - type = CHAR_SPECIAL_1; - } - break; - case '-': - if (command[i + 1] == '-') { - // single line comment - changed = true; - startLoop = i; - while (true) { - c = command[i]; - if (c == '\n' || c == '\r' || i >= len - 1) { - break; - } - command[i++] = ' '; - checkRunOver(i, len, startLoop); - } - } else { - type = CHAR_SPECIAL_1; - } - break; - case '$': - if (command[i + 1] == '$' && (i == 0 || command[i - 1] <= ' ')) { - // dollar quoted string - changed = true; - command[i] = ' '; - command[i + 1] = ' '; - startLoop = i; - i += 2; - checkRunOver(i, len, startLoop); - while (command[i] != '$' || command[i + 1] != '$') { - types[i++] = CHAR_DOLLAR_QUOTED_STRING; - checkRunOver(i, len, startLoop); - } - command[i] = ' '; - command[i + 1] = ' '; - i++; - } else { - if (lastType == CHAR_NAME || lastType == CHAR_VALUE) { - // $ inside an identifier is supported - type = CHAR_NAME; - } else { - // but not at the start, to support PostgreSQL $1 - type = CHAR_SPECIAL_1; - } - } - break; - case '(': - case ')': - case '{': - case '}': - case '*': - case ',': - case ';': - case '+': - case '%': - case '?': - case '@': - case ']': - type = CHAR_SPECIAL_1; - break; - case '!': - case '<': - case '>': - case '|': - case '=': - case ':': - case '&': - case '~': - type = CHAR_SPECIAL_2; - break; - case '.': - type = CHAR_DOT; - break; - case '\'': - type = types[i] = CHAR_STRING; - startLoop = i; - while (command[++i] != '\'') { - checkRunOver(i, len, startLoop); - } - break; - case '[': - if (database.getMode().squareBracketQuotedNames) { - // SQL Server alias for " - command[i] = '"'; - changed = true; - type = types[i] = CHAR_QUOTED; - startLoop = i; - while (command[++i] != ']') { - checkRunOver(i, len, startLoop); - } - command[i] = '"'; - } else { - type = CHAR_SPECIAL_1; - } - break; - case '`': - // MySQL alias for ", but not case sensitive - type = types[i] = CHAR_QUOTED; - startLoop = i; - while (command[++i] != '`') { - checkRunOver(i, len, startLoop); - c = command[i]; - if (identifiersToUpper || identifiersToLower) { - char u = identifiersToUpper ? Character.toUpperCase(c) : Character.toLowerCase(c); - if (u != c) { - command[i] = u; - changed = true; - } - } - } - break; - case '"': - type = types[i] = CHAR_QUOTED; - startLoop = i; - while (command[++i] != '"') { - checkRunOver(i, len, startLoop); - } - break; - case '_': - type = CHAR_NAME; - break; - case '#': - if (database.getMode().supportPoundSymbolForColumnNames) { - type = CHAR_NAME; - } else { - type = CHAR_SPECIAL_1; - } - break; - default: - if (c >= 'a' && c <= 'z') { - if (identifiersToUpper) { - command[i] = (char) (c - ('a' - 'A')); - changed = true; - } - type = CHAR_NAME; - } else if (c >= 'A' && c <= 'Z') { - if (identifiersToLower) { - command[i] = (char) (c + ('a' - 'A')); - changed = true; - } - type = CHAR_NAME; - } else if (c >= '0' && c <= '9') { - type = CHAR_VALUE; - } else { - if (c <= ' ' || Character.isSpaceChar(c)) { - // whitespace - } else if (Character.isJavaIdentifierPart(c)) { - type = CHAR_NAME; - if (identifiersToUpper || identifiersToLower) { - char u = identifiersToUpper ? Character.toUpperCase(c) : Character.toLowerCase(c); - if (u != c) { - command[i] = u; - changed = true; - } - } - } else { - type = CHAR_SPECIAL_1; - } - } - } - types[i] = type; - lastType = type; - } - sqlCommandChars = command; - types[len] = CHAR_END; - characterTypes = types; - if (changed) { - sqlCommand = new String(command); - } - parseIndex = 0; - } - - private void checkRunOver(int i, int len, int startLoop) { - if (i >= len) { - parseIndex = startLoop; - throw getSyntaxError(); - } - } - - private int getSpecialType1(char c0) { - switch (c0) { - case '?': - case '$': - return PARAMETER; - case '@': - return AT; - case '+': - return PLUS_SIGN; - case '-': - return MINUS_SIGN; - case '*': - return ASTERISK; - case ',': - return COMMA; - case '{': - return OPEN_BRACE; - case '}': - return CLOSE_BRACE; - case '/': - return SLASH; - case '%': - return PERCENT; - case ';': - return SEMICOLON; - case ':': - return COLON; - case '[': - return OPEN_BRACKET; - case ']': - return CLOSE_BRACKET; - case '~': - return TILDE; - case '(': - return OPEN_PAREN; - case ')': - return CLOSE_PAREN; - case '<': - return SMALLER; - case '>': - return BIGGER; - case '=': - return EQUAL; - default: - throw getSyntaxError(); - } - } - - private int getSpecialType2(char c0, char c1) { - switch (c0) { - case ':': - if (c1 == ':') { - return COLON_COLON; - } else if (c1 == '=') { - return COLON_EQ; - } - break; - case '>': - if (c1 == '=') { - return BIGGER_EQUAL; - } - break; - case '<': - if (c1 == '=') { - return SMALLER_EQUAL; - } else if (c1 == '>') { - return NOT_EQUAL; - } - break; - case '!': - if (c1 == '=') { - return NOT_EQUAL; - } else if (c1 == '~') { - return NOT_TILDE; - } - break; - case '|': - if (c1 == '|') { - return STRING_CONCAT; - } - break; - case '&': - if (c1 == '&') { - return SPATIAL_INTERSECTS; + column.setNullable(true); + break; + case NULL_IS_NOT_ALLOWED: + column.setNullable(false); + break; + case NO_NULL_CONSTRAINT_FOUND: + if (!column.isIdentity()) { + column.setNullable(defaultNullable); } break; - } - throw getSyntaxError(); - } - - private boolean isKeyword(String s) { - return ParserUtil.isKeyword(s, !identifiersToUpper); - } - - private Column parseColumnForTable(String columnName, - boolean defaultNullable, boolean forTable) { - Column column; - boolean isIdentity = readIf("IDENTITY"); - if (isIdentity || readIf("BIGSERIAL")) { - // Check if any of them are disallowed in the current Mode - if (isIdentity && database.getMode(). - disallowedTypes.contains("IDENTITY")) { - throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, - currentToken); - } - column = new Column(columnName, Value.LONG); - column.setOriginalSQL("IDENTITY"); - parseAutoIncrement(column); - // PostgreSQL compatibility - if (!database.getMode().serialColumnIsNotPK) { - column.setPrimaryKey(true); - } - } else if (readIf("SERIAL")) { - column = new Column(columnName, Value.INT); - column.setOriginalSQL("SERIAL"); - parseAutoIncrement(column); - // PostgreSQL compatibility - if (!database.getMode().serialColumnIsNotPK) { - column.setPrimaryKey(true); - } - } else { - column = parseColumnWithType(columnName, forTable); - } - if (readIf("INVISIBLE")) { - column.setVisible(false); - } else if (readIf("VISIBLE")) { - column.setVisible(true); - } - NullConstraintType nullConstraint = parseNotNullConstraint(); - switch (nullConstraint) { - case NULL_IS_ALLOWED: - column.setNullable(true); - break; - case NULL_IS_NOT_ALLOWED: - column.setNullable(false); - break; - case NO_NULL_CONSTRAINT_FOUND: - // domains may be defined as not nullable - column.setNullable(defaultNullable & column.isNullable()); - break; default: throw DbException.get(ErrorCode.UNKNOWN_MODE_1, "Internal Error - unhandled case: " + nullConstraint.name()); } - if (readIf("AS")) { - if (isIdentity) { - getSyntaxError(); - } - Expression expr = readExpression(); - column.setComputedExpression(expr); - } else if (readIf("DEFAULT")) { - Expression defaultExpression = readExpression(); - column.setDefaultExpression(session, defaultExpression); - } else if (readIf("GENERATED")) { - if (!readIf("ALWAYS")) { - read("BY"); - read("DEFAULT"); - } - read("AS"); - read("IDENTITY"); - SequenceOptions options = new SequenceOptions(); - if (readIf(OPEN_PAREN)) { - parseSequenceOptions(options, null, true); - read(CLOSE_PAREN); + if (!defaultOnNull) { + if (readIf(DEFAULT)) { + read(ON); + read(NULL); + defaultOnNull = true; + } else if (readIf("NULL_TO_DEFAULT")) { + defaultOnNull = true; } - column.setAutoIncrementOptions(options); - } - if (readIf(ON)) { - read("UPDATE"); - Expression onUpdateExpression = readExpression(); - column.setOnUpdateExpression(session, onUpdateExpression); - } - if (NullConstraintType.NULL_IS_NOT_ALLOWED == parseNotNullConstraint()) { - column.setNullable(false); } - if (readIf("AUTO_INCREMENT") || readIf("BIGSERIAL") || readIf("SERIAL")) { - parseAutoIncrement(column); - parseNotNullConstraint(); - } else if (readIf("IDENTITY")) { - parseAutoIncrement(column); - column.setPrimaryKey(true); - parseNotNullConstraint(); - } - if (readIf("NULL_TO_DEFAULT")) { - column.setConvertNullToDefault(true); + if (defaultOnNull) { + column.setDefaultOnNull(true); } - if (readIf("SEQUENCE")) { - Sequence sequence = readSequence(); - column.setSequence(sequence); + if (!column.isGenerated()) { + if (readIf("SEQUENCE")) { + column.setSequence(readSequence(), column.isGeneratedAlways()); + } } if (readIf("SELECTIVITY")) { - int value = readNonNegativeInt(); - column.setSelectivity(value); + column.setSelectivity(readNonNegativeInt()); + } + if (mode.getEnum() == ModeEnum.MySQL) { + if (readIf("CHARACTER")) { + readIf(SET); + readMySQLCharset(); + } + if (readIf("COLLATE")) { + readMySQLCharset(); + } } String comment = readCommentIf(); if (comment != null) { @@ -5331,16 +6033,16 @@ private Column parseColumnForTable(String columnName, return column; } - private void parseAutoIncrement(Column column) { + private void parseCompatibilityIdentityOptions(Column column) { SequenceOptions options = new SequenceOptions(); if (readIf(OPEN_PAREN)) { - options.setStartValue(ValueExpression.get(ValueLong.get(readLong()))); + options.setStartValue(ValueExpression.get(ValueBigint.get(readLong()))); if (readIf(COMMA)) { - options.setIncrement(ValueExpression.get(ValueLong.get(readLong()))); + options.setIncrement(ValueExpression.get(ValueBigint.get(readLong()))); } read(CLOSE_PAREN); } - column.setAutoIncrementOptions(options); + column.setIdentityOptions(options, false); } private String readCommentIf() { @@ -5351,373 +6053,627 @@ private String readCommentIf() { return null; } - private Column parseColumnWithType(String columnName, boolean forTable) { - String original = currentToken; - boolean regular = false; - int originalPrecision = -1, originalScale = -1; - if (readIf("LONG")) { - if (readIf("RAW")) { - original = "LONG RAW"; + private Column parseColumnWithType(String columnName) { + TypeInfo typeInfo = readIfDataType(); + if (typeInfo == null) { + String domainName = readIdentifierWithSchema(); + return getColumnWithDomain(columnName, getSchema().getDomain(domainName)); + } + return new Column(columnName, typeInfo); + } + + private TypeInfo parseDataType() { + TypeInfo typeInfo = readIfDataType(); + if (typeInfo == null) { + addExpected("data type"); + throw getSyntaxError(); + } + return typeInfo; + } + + private TypeInfo readIfDataType() { + TypeInfo typeInfo = readIfDataType1(); + if (typeInfo != null) { + while (readIf(ARRAY)) { + typeInfo = parseArrayType(typeInfo); } - } else if (readIf("DOUBLE")) { - if (readIf("PRECISION")) { - original = "DOUBLE PRECISION"; + } + return typeInfo; + } + + private TypeInfo readIfDataType1() { + switch (currentTokenType) { + case IDENTIFIER: + if (token.isQuoted()) { + return null; } - } else if (readIf("CHARACTER")) { - if (readIf("VARYING")) { - original = "CHARACTER VARYING"; - } else if (readIf("LARGE")) { - read("OBJECT"); - original = "CHARACTER LARGE OBJECT"; + break; + case INTERVAL: { + read(); + TypeInfo typeInfo = readIntervalQualifier(); + if (typeInfo == null) { + throw intervalQualifierError(); + } + return typeInfo; + } + case NULL: + read(); + return TypeInfo.TYPE_NULL; + case ROW: + read(); + return parseRowType(); + case ARRAY: + // Partial compatibility with 1.4.200 and older versions + if (session.isQuirksMode()) { + read(); + return parseArrayType(TypeInfo.TYPE_VARCHAR); + } + addExpected("data type"); + throw getSyntaxError(); + default: + if (isKeyword(currentToken)) { + break; } - } else if (readIf("BINARY")) { + addExpected("data type"); + throw getSyntaxError(); + } + int index = tokenIndex; + String originalCase = currentToken; + read(); + if (currentTokenType == DOT) { + setTokenIndex(index); + return null; + } + String original = upperName(originalCase); + switch (original) { + case "BINARY": if (readIf("VARYING")) { original = "BINARY VARYING"; } else if (readIf("LARGE")) { read("OBJECT"); original = "BINARY LARGE OBJECT"; + } else if (variableBinary) { + original = "VARBINARY"; } - } else if (readIf("TIME")) { - if (readIf(OPEN_PAREN)) { - originalScale = readNonNegativeInt(); - if (originalScale > ValueTime.MAXIMUM_SCALE) { - throw DbException.get(ErrorCode.INVALID_VALUE_SCALE_PRECISION, Integer.toString(originalScale)); - } - read(CLOSE_PAREN); + break; + case "CHAR": + if (readIf("VARYING")) { + original = "CHAR VARYING"; + } else if (readIf("LARGE")) { + read("OBJECT"); + original = "CHAR LARGE OBJECT"; } - if (readIf("WITHOUT")) { - read("TIME"); - read("ZONE"); - original = "TIME WITHOUT TIME ZONE"; + break; + case "CHARACTER": + if (readIf("VARYING")) { + original = "CHARACTER VARYING"; + } else if (readIf("LARGE")) { + read("OBJECT"); + original = "CHARACTER LARGE OBJECT"; } - } else if (readIf("TIMESTAMP")) { - if (readIf(OPEN_PAREN)) { - originalScale = readNonNegativeInt(); - // Allow non-standard TIMESTAMP(..., ...) syntax - if (readIf(COMMA)) { - originalScale = readNonNegativeInt(); - } - if (originalScale > ValueTimestamp.MAXIMUM_SCALE) { - throw DbException.get(ErrorCode.INVALID_VALUE_SCALE_PRECISION, Integer.toString(originalScale)); - } - read(CLOSE_PAREN); + break; + case "DATETIME": + case "DATETIME2": + return parseDateTimeType(false); + case "DEC": + case "DECIMAL": + return parseNumericType(true); + case "DECFLOAT": + return parseDecfloatType(); + case "DOUBLE": + if (readIf("PRECISION")) { + original = "DOUBLE PRECISION"; } - if (readIf(WITH)) { - read("TIME"); - read("ZONE"); - original = "TIMESTAMP WITH TIME ZONE"; - } else if (readIf("WITHOUT")) { - read("TIME"); - read("ZONE"); - original = "TIMESTAMP WITHOUT TIME ZONE"; + break; + case "ENUM": + return parseEnumType(); + case "FLOAT": + return parseFloatType(); + case "GEOMETRY": + return parseGeometryType(); + case "LONG": + if (readIf("RAW")) { + original = "LONG RAW"; } - } else if (readIf(INTERVAL)) { - if (readIf("YEAR")) { - if (readIf(OPEN_PAREN)) { - originalPrecision = readNonNegativeInt(); - read(CLOSE_PAREN); - } - if (readIf("TO")) { - read("MONTH"); - original = "INTERVAL YEAR TO MONTH"; + break; + case "NATIONAL": + if (readIf("CHARACTER")) { + if (readIf("VARYING")) { + original = "NATIONAL CHARACTER VARYING"; + } else if (readIf("LARGE")) { + read("OBJECT"); + original = "NATIONAL CHARACTER LARGE OBJECT"; } else { - original = "INTERVAL YEAR"; - } - } else if (readIf("MONTH")) { - if (readIf(OPEN_PAREN)) { - originalPrecision = readNonNegativeInt(); - read(CLOSE_PAREN); + original = "NATIONAL CHARACTER"; } - original = "INTERVAL MONTH"; - } else if (readIf("DAY")) { - if (readIf(OPEN_PAREN)) { - originalPrecision = readNonNegativeInt(); - read(CLOSE_PAREN); + } else { + read("CHAR"); + if (readIf("VARYING")) { + original = "NATIONAL CHAR VARYING"; + } else { + original = "NATIONAL CHAR"; } - if (readIf("TO")) { - if (readIf("HOUR")) { - original = "INTERVAL DAY TO HOUR"; - } else if (readIf("MINUTE")) { - original = "INTERVAL DAY TO MINUTE"; - } else { - read("SECOND"); - if (readIf(OPEN_PAREN)) { - originalScale = readNonNegativeInt(); - read(CLOSE_PAREN); + } + break; + case "NCHAR": + if (readIf("VARYING")) { + original = "NCHAR VARYING"; + } else if (readIf("LARGE")) { + read("OBJECT"); + original = "NCHAR LARGE OBJECT"; + } + break; + case "NUMBER": + if (database.getMode().disallowedTypes.contains("NUMBER")) { + throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, "NUMBER"); + } + if (!isToken(OPEN_PAREN)) { + return TypeInfo.getTypeInfo(Value.DECFLOAT, 40, -1, null); + } + //$FALL-THROUGH$ + case "NUMERIC": + return parseNumericType(false); + case "SMALLDATETIME": + return parseDateTimeType(true); + case "TIME": + return parseTimeType(); + case "TIMESTAMP": + return parseTimestampType(); + } + // Domain names can't have multiple words without quotes + if (originalCase.length() == original.length()) { + Domain domain = database.getSchema(session.getCurrentSchemaName()).findDomain(originalCase); + if (domain != null) { + setTokenIndex(index); + return null; + } + } + Mode mode = database.getMode(); + DataType dataType = DataType.getTypeByName(original, mode); + if (dataType == null || mode.disallowedTypes.contains(original)) { + throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, original); + } + long precision; + int scale; + if (dataType.specialPrecisionScale) { + precision = dataType.defaultPrecision; + scale = dataType.defaultScale; + } else { + precision = -1L; + scale = -1; + } + int t = dataType.type; + if (database.getIgnoreCase() && t == Value.VARCHAR && !equalsToken("VARCHAR_CASESENSITIVE", original)) { + dataType = DataType.getDataType(t = Value.VARCHAR_IGNORECASE); + } + if ((dataType.supportsPrecision || dataType.supportsScale) && readIf(OPEN_PAREN)) { + if (!readIf("MAX")) { + if (dataType.supportsPrecision) { + precision = readPrecision(t); + if (precision < dataType.minPrecision) { + throw getInvalidPrecisionException(dataType, precision); + } else if (precision > dataType.maxPrecision) + badPrecision: { + if (session.isQuirksMode() || session.isTruncateLargeLength()) { + switch (dataType.type) { + case Value.CHAR: + case Value.VARCHAR: + case Value.VARCHAR_IGNORECASE: + case Value.BINARY: + case Value.VARBINARY: + case Value.JAVA_OBJECT: + case Value.JSON: + precision = dataType.maxPrecision; + break badPrecision; + } } - original = "INTERVAL DAY TO SECOND"; + throw getInvalidPrecisionException(dataType, precision); } - } else { - original = "INTERVAL DAY"; - } - } else if (readIf("HOUR")) { - if (readIf(OPEN_PAREN)) { - originalPrecision = readNonNegativeInt(); - read(CLOSE_PAREN); - } - if (readIf("TO")) { - if (readIf("MINUTE")) { - original = "INTERVAL HOUR TO MINUTE"; - } else { - read("SECOND"); - if (readIf(OPEN_PAREN)) { - originalScale = readNonNegativeInt(); - read(CLOSE_PAREN); + if (dataType.supportsScale) { + if (readIf(COMMA)) { + scale = readInt(); + if (scale < dataType.minScale || scale > dataType.maxScale) { + throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), + Integer.toString(dataType.minScale), Integer.toString(dataType.maxScale)); + } } - original = "INTERVAL HOUR TO SECOND"; } } else { - original = "INTERVAL HOUR"; - } - } else if (readIf("MINUTE")) { - if (readIf(OPEN_PAREN)) { - originalPrecision = readNonNegativeInt(); - read(CLOSE_PAREN); - } - if (readIf("TO")) { - read("SECOND"); - if (readIf(OPEN_PAREN)) { - originalScale = readNonNegativeInt(); - read(CLOSE_PAREN); + scale = readInt(); + if (scale < dataType.minScale || scale > dataType.maxScale) { + throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), + Integer.toString(dataType.minScale), Integer.toString(dataType.maxScale)); } - original = "INTERVAL MINUTE TO SECOND"; + } + } + read(CLOSE_PAREN); + } + if (mode.allNumericTypesHavePrecision && DataType.isNumericType(dataType.type)) { + if (readIf(OPEN_PAREN)) { + // Support for MySQL: INT(11), MEDIUMINT(8) and so on. + // Just ignore the precision. + readNonNegativeInt(); + read(CLOSE_PAREN); + } + readIf("UNSIGNED"); + } + if (mode.forBitData && DataType.isStringType(t)) { + if (readIf(FOR)) { + read("BIT"); + read("DATA"); + dataType = DataType.getDataType(t = Value.VARBINARY); + } + } + return TypeInfo.getTypeInfo(t, precision, scale, null); + } + + private static DbException getInvalidPrecisionException(DataType dataType, long precision) { + return DbException.get(ErrorCode.INVALID_VALUE_PRECISION, Long.toString(precision), + Long.toString(dataType.minPrecision), Long.toString(dataType.maxPrecision)); + } + + private static Column getColumnWithDomain(String columnName, Domain domain) { + Column column = new Column(columnName, domain.getDataType()); + column.setComment(domain.getComment()); + column.setDomain(domain); + return column; + } + + private TypeInfo parseFloatType() { + int type = Value.DOUBLE; + int precision; + if (readIf(OPEN_PAREN)) { + precision = readNonNegativeInt(); + read(CLOSE_PAREN); + if (precision < 1 || precision > 53) { + throw DbException.get(ErrorCode.INVALID_VALUE_PRECISION, Integer.toString(precision), "1", "53"); + } + if (precision <= 24) { + type = Value.REAL; + } + } else { + precision = 0; + } + return TypeInfo.getTypeInfo(type, precision, -1, null); + } + + private TypeInfo parseNumericType(boolean decimal) { + long precision = -1L; + int scale = -1; + if (readIf(OPEN_PAREN)) { + precision = readPrecision(Value.NUMERIC); + if (precision < 1) { + throw getInvalidNumericPrecisionException(precision); + } else if (precision > Constants.MAX_NUMERIC_PRECISION) { + if (session.isQuirksMode() || session.isTruncateLargeLength()) { + precision = Constants.MAX_NUMERIC_PRECISION; } else { - original = "INTERVAL MINUTE"; + throw getInvalidNumericPrecisionException(precision); } - } else { - read("SECOND"); - if (readIf(OPEN_PAREN)) { - originalPrecision = readNonNegativeInt(); - if (readIf(COMMA)) { - originalScale = readNonNegativeInt(); - } - read(CLOSE_PAREN); + } + if (readIf(COMMA)) { + scale = readInt(); + if (scale < 0 || scale > ValueNumeric.MAXIMUM_SCALE) { + throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), + "0", "" + ValueNumeric.MAXIMUM_SCALE); } - original = "INTERVAL SECOND"; } - } else { - regular = true; + read(CLOSE_PAREN); } - long precision = -1; - ExtTypeInfo extTypeInfo = null; + return TypeInfo.getTypeInfo(Value.NUMERIC, precision, scale, decimal ? ExtTypeInfoNumeric.DECIMAL : null); + } + + private TypeInfo parseDecfloatType() { + long precision = -1L; + if (readIf(OPEN_PAREN)) { + precision = readPrecision(Value.DECFLOAT); + if (precision < 1 || precision > Constants.MAX_NUMERIC_PRECISION) { + throw getInvalidNumericPrecisionException(precision); + } + read(CLOSE_PAREN); + } + return TypeInfo.getTypeInfo(Value.DECFLOAT, precision, -1, null); + } + + private static DbException getInvalidNumericPrecisionException(long precision) { + return DbException.get(ErrorCode.INVALID_VALUE_PRECISION, Long.toString(precision), "1", + "" + Constants.MAX_NUMERIC_PRECISION); + } + + private TypeInfo parseTimeType() { int scale = -1; - String comment = null; - Column templateColumn = null; - DataType dataType; - if (!identifiersToUpper) { - original = StringUtils.toUpperEnglish(original); - } - Domain domain = database.findDomain(original); - if (domain != null) { - templateColumn = domain.getColumn(); - TypeInfo type = templateColumn.getType(); - dataType = DataType.getDataType(type.getValueType()); - comment = templateColumn.getComment(); - original = forTable ? domain.getSQL(true) : templateColumn.getOriginalSQL(); - precision = type.getPrecision(); - scale = type.getScale(); - extTypeInfo = type.getExtTypeInfo(); - } else { - Mode mode = database.getMode(); - dataType = DataType.getTypeByName(original, mode); - if (dataType == null || mode.disallowedTypes.contains(original)) { - throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, - currentToken); + if (readIf(OPEN_PAREN)) { + scale = readNonNegativeInt(); + if (scale > ValueTime.MAXIMUM_SCALE) { + throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), "0", + /* Folds to a constant */ "" + ValueTime.MAXIMUM_SCALE); } + read(CLOSE_PAREN); } - if (database.getIgnoreCase() && dataType.type == Value.STRING && - !equalsToken("VARCHAR_CASESENSITIVE", original)) { - original = "VARCHAR_IGNORECASE"; - dataType = DataType.getTypeByName(original, database.getMode()); + int type = Value.TIME; + if (readIf(WITH)) { + read("TIME"); + read("ZONE"); + type = Value.TIME_TZ; + } else if (readIf("WITHOUT")) { + read("TIME"); + read("ZONE"); } - if (regular) { - read(); + return TypeInfo.getTypeInfo(type, -1L, scale, null); + } + + private TypeInfo parseTimestampType() { + int scale = -1; + if (readIf(OPEN_PAREN)) { + scale = readNonNegativeInt(); + // Allow non-standard TIMESTAMP(..., ...) syntax + if (readIf(COMMA)) { + scale = readNonNegativeInt(); + } + if (scale > ValueTimestamp.MAXIMUM_SCALE) { + throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), "0", + /* Folds to a constant */ "" + ValueTimestamp.MAXIMUM_SCALE); + } + read(CLOSE_PAREN); } - precision = precision == -1 ? dataType.defaultPrecision : precision; - scale = scale == -1 ? dataType.defaultScale : scale; - if (dataType.supportsPrecision || dataType.supportsScale) { - int t = dataType.type; - if (t == Value.TIME || t == Value.TIMESTAMP || t == Value.TIMESTAMP_TZ) { - if (originalScale >= 0) { - scale = originalScale; - switch (t) { - case Value.TIME: - if (original.equals("TIME WITHOUT TIME ZONE")) { - original = "TIME(" + originalScale + ") WITHOUT TIME ZONE"; - } else { - original = original + '(' + originalScale + ')'; - } - break; - case Value.TIMESTAMP: - if (original.equals("TIMESTAMP WITHOUT TIME ZONE")) { - original = "TIMESTAMP(" + originalScale + ") WITHOUT TIME ZONE"; - } else { - original = original + '(' + originalScale + ')'; - } - break; - case Value.TIMESTAMP_TZ: - original = "TIMESTAMP(" + originalScale + ") WITH TIME ZONE"; - break; - } - } else if (original.equals("DATETIME") || original.equals("DATETIME2")) { + int type = Value.TIMESTAMP; + if (readIf(WITH)) { + read("TIME"); + read("ZONE"); + type = Value.TIMESTAMP_TZ; + } else if (readIf("WITHOUT")) { + read("TIME"); + read("ZONE"); + } + return TypeInfo.getTypeInfo(type, -1L, scale, null); + } + + private TypeInfo parseDateTimeType(boolean smallDateTime) { + int scale; + if (smallDateTime) { + scale = 0; + } else { + scale = -1; + if (readIf(OPEN_PAREN)) { + scale = readNonNegativeInt(); + if (scale > ValueTimestamp.MAXIMUM_SCALE) { + throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), "0", + /* folds to a constant */ "" + ValueTimestamp.MAXIMUM_SCALE); + } + read(CLOSE_PAREN); + } + } + return TypeInfo.getTypeInfo(Value.TIMESTAMP, -1L, scale, null); + } + + private TypeInfo readIntervalQualifier() { + IntervalQualifier qualifier; + int precision = -1, scale = -1; + switch (currentTokenType) { + case YEAR: + read(); + if (readIf(OPEN_PAREN)) { + precision = readNonNegativeInt(); + read(CLOSE_PAREN); + } + if (readIf(TO)) { + read(MONTH); + qualifier = IntervalQualifier.YEAR_TO_MONTH; + } else { + qualifier = IntervalQualifier.YEAR; + } + break; + case MONTH: + read(); + if (readIf(OPEN_PAREN)) { + precision = readNonNegativeInt(); + read(CLOSE_PAREN); + } + qualifier = IntervalQualifier.MONTH; + break; + case DAY: + read(); + if (readIf(OPEN_PAREN)) { + precision = readNonNegativeInt(); + read(CLOSE_PAREN); + } + if (readIf(TO)) { + switch (currentTokenType) { + case HOUR: + read(); + qualifier = IntervalQualifier.DAY_TO_HOUR; + break; + case MINUTE: + read(); + qualifier = IntervalQualifier.DAY_TO_MINUTE; + break; + case SECOND: + read(); if (readIf(OPEN_PAREN)) { - originalScale = readNonNegativeInt(); - if (originalScale > ValueTime.MAXIMUM_SCALE) { - throw DbException.get(ErrorCode.INVALID_VALUE_SCALE_PRECISION, - Integer.toString(originalScale)); - } + scale = readNonNegativeInt(); read(CLOSE_PAREN); - scale = originalScale; - original = original + '(' + originalScale + ')'; - } - } else if (original.equals("SMALLDATETIME")) { - scale = 0; - } - } else if (DataType.isIntervalType(t)) { - if (originalPrecision >= 0 || originalScale >= 0) { - IntervalQualifier qualifier = IntervalQualifier.valueOf(t - Value.INTERVAL_YEAR); - original = qualifier.getTypeName(originalPrecision, originalScale); - if (originalPrecision >= 0) { - if (originalPrecision <= 0 || originalPrecision > ValueInterval.MAXIMUM_PRECISION) { - throw DbException.get(ErrorCode.INVALID_VALUE_SCALE_PRECISION, - Integer.toString(originalPrecision)); - } - precision = originalPrecision; - } - if (originalScale >= 0) { - if (originalScale > ValueInterval.MAXIMUM_SCALE) { - throw DbException.get(ErrorCode.INVALID_VALUE_SCALE_PRECISION, - Integer.toString(originalScale)); - } - scale = originalScale; } + qualifier = IntervalQualifier.DAY_TO_SECOND; + break; + default: + throw intervalDayError(); } - } else if (readIf(OPEN_PAREN)) { - if (!readIf("MAX")) { - long p = readPrecision(); - original += "(" + p; - if (dataType.supportsScale) { - if (readIf(COMMA)) { - scale = readInt(); - original += ", " + scale; - } else { - scale = 0; - } + } else { + qualifier = IntervalQualifier.DAY; + } + break; + case HOUR: + read(); + if (readIf(OPEN_PAREN)) { + precision = readNonNegativeInt(); + read(CLOSE_PAREN); + } + if (readIf(TO)) { + switch (currentTokenType) { + case MINUTE: + read(); + qualifier = IntervalQualifier.HOUR_TO_MINUTE; + break; + case SECOND: + read(); + if (readIf(OPEN_PAREN)) { + scale = readNonNegativeInt(); + read(CLOSE_PAREN); } - precision = p; - original += ")"; + qualifier = IntervalQualifier.HOUR_TO_SECOND; + break; + default: + throw intervalHourError(); } - read(CLOSE_PAREN); + } else { + qualifier = IntervalQualifier.HOUR; } - } else if (dataType.type == Value.DOUBLE && original.equals("FLOAT")) { + break; + case MINUTE: + read(); if (readIf(OPEN_PAREN)) { - int p = readNonNegativeInt(); + precision = readNonNegativeInt(); read(CLOSE_PAREN); - if (p > 53) { - throw DbException.get(ErrorCode.INVALID_VALUE_SCALE_PRECISION, Integer.toString(p)); - } - if (p <= 24) { - dataType = DataType.getDataType(Value.FLOAT); - } - original = original + '(' + p + ')'; } - } else if (dataType.type == Value.ENUM) { - if (extTypeInfo == null) { - String[] enumerators = null; + if (readIf(TO)) { + read(SECOND); if (readIf(OPEN_PAREN)) { - java.util.List enumeratorList = new ArrayList<>(); - String enumerator0 = readString(); - enumeratorList.add(enumerator0); - while (readIfMore(true)) { - String enumeratorN = readString(); - enumeratorList.add(enumeratorN); - } - enumerators = enumeratorList.toArray(new String[0]); + scale = readNonNegativeInt(); + read(CLOSE_PAREN); } - try { - extTypeInfo = new ExtTypeInfoEnum(enumerators); - } catch (DbException e) { - throw e.addSQL(original); + qualifier = IntervalQualifier.MINUTE_TO_SECOND; + } else { + qualifier = IntervalQualifier.MINUTE; + } + break; + case SECOND: + read(); + if (readIf(OPEN_PAREN)) { + precision = readNonNegativeInt(); + if (readIf(COMMA)) { + scale = readNonNegativeInt(); } - original += extTypeInfo.getCreateSQL(); + read(CLOSE_PAREN); } - } else if (dataType.type == Value.GEOMETRY) { - if (extTypeInfo == null) { - if (readIf(OPEN_PAREN)) { - int type = 0; - if (currentTokenType != IDENTIFIER || currentTokenQuoted) { - throw getSyntaxError(); - } - if (!readIf("GEOMETRY")) { - try { - type = EWKTUtils.parseGeometryType(currentToken); - read(); - if (type / 1_000 == 0 && currentTokenType == IDENTIFIER && !currentTokenQuoted) { - type += EWKTUtils.parseDimensionSystem(currentToken) * 1_000; - read(); - } - } catch (IllegalArgumentException ex) { - throw getSyntaxError(); - } - } - Integer srid = null; - if (readIf(COMMA)) { - srid = readInt(); + qualifier = IntervalQualifier.SECOND; + break; + default: + return null; + } + if (precision >= 0) { + if (precision == 0 || precision > ValueInterval.MAXIMUM_PRECISION) { + throw DbException.get(ErrorCode.INVALID_VALUE_PRECISION, Integer.toString(precision), "1", + /* Folds to a constant */ "" + ValueInterval.MAXIMUM_PRECISION); + } + } + if (scale >= 0) { + if (scale > ValueInterval.MAXIMUM_SCALE) { + throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), "0", + /* Folds to a constant */ "" + ValueInterval.MAXIMUM_SCALE); + } + } + return TypeInfo.getTypeInfo(qualifier.ordinal() + Value.INTERVAL_YEAR, precision, scale, null); + } + + private DbException intervalQualifierError() { + if (expectedList != null) { + addMultipleExpected(YEAR, MONTH, DAY, HOUR, MINUTE, SECOND); + } + return getSyntaxError(); + } + + private DbException intervalDayError() { + if (expectedList != null) { + addMultipleExpected(HOUR, MINUTE, SECOND); + } + return getSyntaxError(); + } + + private DbException intervalHourError() { + if (expectedList != null) { + addMultipleExpected(MINUTE, SECOND); + } + return getSyntaxError(); + } + + private TypeInfo parseArrayType(TypeInfo componentType) { + int precision = -1; + if (readIf(OPEN_BRACKET)) { + // Maximum cardinality may be zero + precision = readNonNegativeInt(); + if (precision > Constants.MAX_ARRAY_CARDINALITY) { + throw DbException.get(ErrorCode.INVALID_VALUE_PRECISION, Integer.toString(precision), "0", + /* Folds to a constant */ "" + Constants.MAX_ARRAY_CARDINALITY); + } + read(CLOSE_BRACKET); + } + return TypeInfo.getTypeInfo(Value.ARRAY, precision, -1, componentType); + } + + private TypeInfo parseEnumType() { + read(OPEN_PAREN); + ArrayList enumeratorList = new ArrayList<>(); + do { + enumeratorList.add(readString()); + } while (readIfMore()); + return TypeInfo.getTypeInfo(Value.ENUM, -1L, -1, new ExtTypeInfoEnum(enumeratorList.toArray(new String[0]))); + } + + private TypeInfo parseGeometryType() { + ExtTypeInfoGeometry extTypeInfo; + if (readIf(OPEN_PAREN)) { + int type = 0; + if (currentTokenType != IDENTIFIER || token.isQuoted()) { + throw getSyntaxError(); + } + if (!readIf("GEOMETRY")) { + try { + type = EWKTUtils.parseGeometryType(currentToken); + read(); + if (type / 1_000 == 0 && currentTokenType == IDENTIFIER && !token.isQuoted()) { + type += EWKTUtils.parseDimensionSystem(currentToken) * 1_000; + read(); } - read(CLOSE_PAREN); - extTypeInfo = new ExtTypeInfoGeometry(type, srid); - original += extTypeInfo.getCreateSQL(); + } catch (IllegalArgumentException ex) { + throw getSyntaxError(); } } - } else if (readIf(OPEN_PAREN)) { - // Support for MySQL: INT(11), MEDIUMINT(8) and so on. - // Just ignore the precision. - readNonNegativeInt(); + Integer srid = null; + if (readIf(COMMA)) { + srid = readInt(); + } read(CLOSE_PAREN); + extTypeInfo = new ExtTypeInfoGeometry(type, srid); + } else { + extTypeInfo = null; } - if (readIf(FOR)) { - read("BIT"); - read("DATA"); - if (dataType.type == Value.STRING) { - dataType = DataType.getTypeByName("BINARY", database.getMode()); - } - } - // MySQL compatibility - readIf("UNSIGNED"); - int type = dataType.type; - if (scale > precision && dataType.supportsPrecision && dataType.supportsScale - && !DataType.isIntervalType(type)) { - throw DbException.get(ErrorCode.INVALID_VALUE_SCALE_PRECISION, - Integer.toString(scale), Long.toString(precision)); - } - - Column column = new Column(columnName, TypeInfo.getTypeInfo(type, precision, scale, extTypeInfo)); - if (templateColumn != null) { - column.setNullable(templateColumn.isNullable()); - column.setDefaultExpression(session, - templateColumn.getDefaultExpression()); - int selectivity = templateColumn.getSelectivity(); - if (selectivity != Constants.SELECTIVITY_DEFAULT) { - column.setSelectivity(selectivity); - } - Expression checkConstraint = templateColumn.getCheckConstraint( - session, columnName); - column.addCheckConstraint(session, checkConstraint); - } - column.setComment(comment); - column.setOriginalSQL(original); - if (forTable) { - column.setDomain(domain); - } - return column; + return TypeInfo.getTypeInfo(Value.GEOMETRY, -1L, -1, extTypeInfo); + } + + private TypeInfo parseRowType() { + read(OPEN_PAREN); + LinkedHashMap fields = new LinkedHashMap<>(); + do { + String name = readIdentifier(); + if (fields.putIfAbsent(name, parseDataType()) != null) { + throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, name); + } + } while (readIfMore()); + return TypeInfo.getTypeInfo(Value.ROW, -1L, -1, new ExtTypeInfoRow(fields)); } - private long readPrecision() { - long p = readNonNegativeLong(); - if (currentTokenType == IDENTIFIER && !currentTokenQuoted && currentToken.length() == 1) { + private long readPrecision(int valueType) { + long p = readPositiveLong(); + if (currentTokenType != IDENTIFIER || token.isQuoted()) { + return p; + } + if ((valueType == Value.BLOB || valueType == Value.CLOB) && currentToken.length() == 1) { long mul; - char ch = currentToken.charAt(0); - switch (identifiersToUpper ? ch : Character.toUpperCase(ch)) { + /* + * Convert a-z to A-Z. This method is safe, because only A-Z + * characters are considered below. + */ + switch (currentToken.charAt(0) & 0xffdf) { case 'K': mul = 1L << 10; break; @@ -5741,14 +6697,19 @@ private long readPrecision() { } p *= mul; read(); + if (currentTokenType != IDENTIFIER || token.isQuoted()) { + return p; + } } - if (currentTokenType == IDENTIFIER && !currentTokenQuoted) { - // Standard char length units - if (!readIf("CHARACTERS") && !readIf("OCTETS") && - // Oracle syntax - !readIf("CHAR")) { - // Oracle syntax - readIf("BYTE"); + switch (valueType) { + case Value.VARCHAR: + case Value.VARCHAR_IGNORECASE: + case Value.CLOB: + case Value.CHAR: + if (!readIf("CHARACTERS") && !readIf("OCTETS")) { + if (database.getMode().charAndByteLengthUnits && !readIf("CHAR")) { + readIf("BYTE"); + } } } return p; @@ -5756,7 +6717,7 @@ private long readPrecision() { private Prepared parseCreate() { boolean orReplace = false; - if (readIf("OR")) { + if (readIf(OR)) { read("REPLACE"); orReplace = true; } @@ -5767,7 +6728,7 @@ private Prepared parseCreate() { return parseCreateFunctionAlias(force); } else if (readIf("SEQUENCE")) { return parseCreateSequence(); - } else if (readIf("USER")) { + } else if (readIf(USER)) { return parseCreateUser(); } else if (readIf("TRIGGER")) { return parseCreateTrigger(force); @@ -5824,8 +6785,8 @@ private Prepared parseCreate() { String indexName = null; Schema oldSchema = null; boolean ifNotExists = false; - if (readIf(PRIMARY)) { - read("KEY"); + if (session.isQuirksMode() && readIf(PRIMARY)) { + read(KEY); if (readIf("HASH")) { hash = true; } @@ -5841,53 +6802,69 @@ private Prepared parseCreate() { } if (readIf("HASH")) { hash = true; - } - if (readIf("SPATIAL")) { + } else if (!unique && readIf("SPATIAL")) { spatial = true; } - if (readIf("INDEX")) { - if (!isToken(ON)) { - ifNotExists = readIfNotExists(); - indexName = readIdentifierWithSchema(null); - oldSchema = getSchema(); - } - } else { - throw getSyntaxError(); + read("INDEX"); + if (!isToken(ON)) { + ifNotExists = readIfNotExists(); + indexName = readIdentifierWithSchema(null); + oldSchema = getSchema(); } } read(ON); String tableName = readIdentifierWithSchema(); checkSchema(oldSchema); - CreateIndex command = new CreateIndex(session, getSchema()); - command.setIfNotExists(ifNotExists); - command.setPrimaryKey(primaryKey); - command.setTableName(tableName); - command.setUnique(unique); - command.setIndexName(indexName); - command.setComment(readCommentIf()); - read(OPEN_PAREN); - command.setIndexColumns(parseIndexColumnList()); - - if (readIf("USING")) { - if (hash) { - throw getSyntaxError(); - } - if (spatial) { + String comment = readCommentIf(); + if (!readIf(OPEN_PAREN)) { + // PostgreSQL compatibility + if (hash || spatial) { throw getSyntaxError(); } + read(USING); if (readIf("BTREE")) { // default - } else if (readIf("RTREE")) { - spatial = true; } else if (readIf("HASH")) { hash = true; } else { - throw getSyntaxError(); + read("RTREE"); + spatial = true; } - + read(OPEN_PAREN); } + CreateIndex command = new CreateIndex(session, getSchema()); + command.setIfNotExists(ifNotExists); + command.setPrimaryKey(primaryKey); + command.setTableName(tableName); command.setHash(hash); command.setSpatial(spatial); + command.setIndexName(indexName); + command.setComment(comment); + IndexColumn[] columns; + int uniqueColumnCount = 0; + if (spatial) { + columns = new IndexColumn[] { new IndexColumn(readIdentifier()) }; + if (unique) { + uniqueColumnCount = 1; + } + read(CLOSE_PAREN); + } else { + columns = parseIndexColumnList(); + if (unique) { + uniqueColumnCount = columns.length; + if (readIf("INCLUDE")) { + read(OPEN_PAREN); + IndexColumn[] columnsToInclude = parseIndexColumnList(); + int nonUniqueCount = columnsToInclude.length; + columns = Arrays.copyOf(columns, uniqueColumnCount + nonUniqueCount); + System.arraycopy(columnsToInclude, 0, columns, uniqueColumnCount, nonUniqueCount); + } + } else if (primaryKey) { + uniqueColumnCount = columns.length; + } + } + command.setIndexColumns(columns); + command.setUniqueColumnCount(uniqueColumnCount); return command; } } @@ -5908,15 +6885,6 @@ private boolean addRoleOrRight(GrantRevoke command) { } else if (readIf("UPDATE")) { command.addRight(Right.UPDATE); return true; - } else if (readIf(ALL)) { - command.addRight(Right.ALL); - return true; - } else if (readIf("ALTER")) { - read("ANY"); - read("SCHEMA"); - command.addRight(Right.ALTER_ANY_SCHEMA); - command.addTable(null); - return false; } else if (readIf("CONNECT")) { // ignore this right return true; @@ -5924,7 +6892,7 @@ private boolean addRoleOrRight(GrantRevoke command) { // ignore this right return true; } else { - command.addRoleName(readUniqueIdentifier()); + command.addRoleName(readIdentifier()); return false; } } @@ -5932,20 +6900,31 @@ private boolean addRoleOrRight(GrantRevoke command) { private GrantRevoke parseGrantRevoke(int operationType) { GrantRevoke command = new GrantRevoke(session); command.setOperationType(operationType); - boolean tableClauseExpected = addRoleOrRight(command); - while (readIf(COMMA)) { - addRoleOrRight(command); - if (command.isRightMode() && command.isRoleMode()) { - throw DbException - .get(ErrorCode.ROLES_AND_RIGHT_CANNOT_BE_MIXED); + boolean tableClauseExpected; + if (readIf(ALL)) { + readIf("PRIVILEGES"); + command.addRight(Right.ALL); + tableClauseExpected = true; + } else if (readIf("ALTER")) { + read(ANY); + read("SCHEMA"); + command.addRight(Right.ALTER_ANY_SCHEMA); + command.addTable(null); + tableClauseExpected = false; + } else { + tableClauseExpected = addRoleOrRight(command); + while (readIf(COMMA)) { + if (addRoleOrRight(command) != tableClauseExpected) { + throw DbException.get(ErrorCode.ROLES_AND_RIGHT_CANNOT_BE_MIXED); + } } } if (tableClauseExpected) { if (readIf(ON)) { if (readIf("SCHEMA")) { - Schema schema = database.getSchema(readAliasIdentifier()); - command.setSchema(schema); + command.setSchema(database.getSchema(readIdentifier())); } else { + readIf(TABLE); do { Table table = readTableOrView(); command.addTable(table); @@ -5953,113 +6932,105 @@ private GrantRevoke parseGrantRevoke(int operationType) { } } } - if (operationType == CommandInterface.GRANT) { - read("TO"); - } else { - read(FROM); - } - command.setGranteeName(readUniqueIdentifier()); - return command; - } - - private Select parseValues() { - Select command = new Select(session, currentSelect); - currentSelect = command; - TableFilter filter = parseValuesTable(0); - command.setWildcard(); - command.addTableFilter(filter, true); + read(operationType == CommandInterface.GRANT ? TO : FROM); + command.setGranteeName(readIdentifier()); return command; } - private TableFilter parseValuesTable(int orderInFrom) { - Schema mainSchema = database.getMainSchema(); - TableFunction tf = (TableFunction) Function.getFunction(database, "TABLE"); - ArrayList columns = Utils.newSmallArrayList(); + private TableValueConstructor parseValues() { ArrayList> rows = Utils.newSmallArrayList(); - do { - int i = 0; - ArrayList row = Utils.newSmallArrayList(); - boolean multiColumn; - if (readIf(ROW)) { - read(OPEN_PAREN); - multiColumn = true; - } else { - multiColumn = readIf(OPEN_PAREN); - } - do { - Expression expr = readExpression(); - expr = expr.optimize(session); - TypeInfo type = expr.getType(); - Column column; - String columnName = "C" + (i + 1); - if (rows.isEmpty()) { - if (type.getValueType() == Value.UNKNOWN) { - type = TypeInfo.TYPE_STRING; - } - column = new Column(columnName, type); - columns.add(column); - } else { - if (i >= columns.size()) { - throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); - } - type = Value.getHigherType(columns.get(i).getType(), type); - column = new Column(columnName, type); - columns.set(i, column); - } - row.add(expr); - i++; - } while (multiColumn && readIfMore(true)); - rows.add(row); - } while (readIf(COMMA)); - int columnCount = columns.size(); - int rowCount = rows.size(); - for (ArrayList row : rows) { + ArrayList row = parseValuesRow(Utils.newSmallArrayList()); + rows.add(row); + int columnCount = row.size(); + while (readIf(COMMA)) { + row = parseValuesRow(new ArrayList<>(columnCount)); if (row.size() != columnCount) { throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); } + rows.add(row); } - for (int i = 0; i < columnCount; i++) { - Column c = columns.get(i); - if (c.getType().getValueType() == Value.UNKNOWN) { - c = new Column(c.getName(), Value.STRING); - columns.set(i, c); - } - Expression[] array = new Expression[rowCount]; - for (int j = 0; j < rowCount; j++) { - array[j] = rows.get(j).get(i); - } - ExpressionList list = new ExpressionList(array, false); - tf.setParameter(i, list); + return new TableValueConstructor(session, rows); + } + + private ArrayList parseValuesRow(ArrayList row) { + if (readIf(ROW)) { + read(OPEN_PAREN); + } else if (!readIf(OPEN_PAREN)) { + row.add(readExpression()); + return row; } - tf.setColumns(columns); - tf.doneWithParameters(); - Table table = new FunctionTable(mainSchema, session, tf, tf); - return new TableFilter(session, table, null, rightsChecked, currentSelect, orderInFrom, null); + do { + row.add(readExpression()); + } while (readIfMore()); + return row; } private Call parseCall() { Call command = new Call(session); currentPrepared = command; - command.setExpression(readExpression()); + int index = tokenIndex; + boolean canBeFunction; + switch (currentTokenType) { + case IDENTIFIER: + canBeFunction = true; + break; + case TABLE: + read(); + read(OPEN_PAREN); + command.setTableFunction(readTableFunction(ArrayTableFunction.TABLE)); + return command; + default: + canBeFunction = false; + } + try { + command.setExpression(readExpression()); + } catch (DbException e) { + if (canBeFunction && e.getErrorCode() == ErrorCode.FUNCTION_NOT_FOUND_1) { + setTokenIndex(index); + String schemaName = null, name = readIdentifier(); + if (readIf(DOT)) { + schemaName = name; + name = readIdentifier(); + if (readIf(DOT)) { + checkDatabaseName(schemaName); + schemaName = name; + name = readIdentifier(); + } + } + read(OPEN_PAREN); + Schema schema = schemaName != null ? database.getSchema(schemaName) : null; + command.setTableFunction(readTableFunction(name, schema)); + return command; + } + throw e; + } return command; } private CreateRole parseCreateRole() { CreateRole command = new CreateRole(session); command.setIfNotExists(readIfNotExists()); - command.setRoleName(readUniqueIdentifier()); + command.setRoleName(readIdentifier()); return command; } private CreateSchema parseCreateSchema() { CreateSchema command = new CreateSchema(session); command.setIfNotExists(readIfNotExists()); - command.setSchemaName(readUniqueIdentifier()); - if (readIf("AUTHORIZATION")) { - command.setAuthorization(readUniqueIdentifier()); + String authorization; + if (readIf(AUTHORIZATION)) { + authorization = readIdentifier(); + command.setSchemaName(authorization); + command.setAuthorization(authorization); } else { - command.setAuthorization(session.getUser().getName()); + command.setSchemaName(readIdentifier()); + if (readIf(AUTHORIZATION)) { + authorization = readIdentifier(); + } else { + authorization = session.getUser().getName(); + } } + command.setAuthorization(authorization); if (readIf(WITH)) { command.setTableEngineParams(readTableEngineParams()); } @@ -6069,7 +7040,7 @@ private CreateSchema parseCreateSchema() { private ArrayList readTableEngineParams() { ArrayList tableEngineParams = Utils.newSmallArrayList(); do { - tableEngineParams.add(readUniqueIdentifier()); + tableEngineParams.add(readIdentifier()); } while (readIf(COMMA)); return tableEngineParams; } @@ -6081,7 +7052,7 @@ private CreateSequence parseCreateSequence() { command.setIfNotExists(ifNotExists); command.setSequenceName(sequenceName); SequenceOptions options = new SequenceOptions(); - parseSequenceOptions(options, command, true); + parseSequenceOptions(options, command, true, false); command.setOptions(options); return command; } @@ -6095,10 +7066,6 @@ private boolean readIfNotExists() { return false; } - private boolean readIfAffinity() { - return readIf("AFFINITY") || readIf("SHARD"); - } - private CreateConstant parseCreateConstant() { boolean ifNotExists = readIfNotExists(); String constantName = readIdentifierWithSchema(); @@ -6107,7 +7074,7 @@ private CreateConstant parseCreateConstant() { throw DbException.get(ErrorCode.CONSTANT_ALREADY_EXISTS_1, constantName); } - read("VALUE"); + read(VALUE); Expression expr = readExpression(); CreateConstant command = new CreateConstant(session, schema); command.setConstantName(constantName); @@ -6118,35 +7085,71 @@ private CreateConstant parseCreateConstant() { private CreateAggregate parseCreateAggregate(boolean force) { boolean ifNotExists = readIfNotExists(); - CreateAggregate command = new CreateAggregate(session); - command.setForce(force); - String name = readIdentifierWithSchema(); - if (isKeyword(name) || Function.getFunction(database, name) != null || - getAggregateType(name) != null) { - throw DbException.get(ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, - name); + String name = readIdentifierWithSchema(), upperName; + if (isKeyword(name) || BuiltinFunctions.isBuiltinFunction(database, upperName = upperName(name)) + || Aggregate.getAggregateType(upperName) != null) { + throw DbException.get(ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, name); } + CreateAggregate command = new CreateAggregate(session, getSchema()); + command.setForce(force); command.setName(name); - command.setSchema(getSchema()); command.setIfNotExists(ifNotExists); read(FOR); - command.setJavaClassMethod(readUniqueIdentifier()); + command.setJavaClassMethod(readStringOrIdentifier()); return command; } private CreateDomain parseCreateDomain() { boolean ifNotExists = readIfNotExists(); - CreateDomain command = new CreateDomain(session); - command.setTypeName(readUniqueIdentifier()); - read("AS"); - Column col = parseColumnForTable("VALUE", true, false); - if (readIf(CHECK)) { - Expression expr = readExpression(); - col.addCheckConstraint(session, expr); - } - col.rename(null); - command.setColumn(col); + String domainName = readIdentifierWithSchema(); + Schema schema = getSchema(); + CreateDomain command = new CreateDomain(session, schema); command.setIfNotExists(ifNotExists); + command.setTypeName(domainName); + readIf(AS); + TypeInfo dataType = readIfDataType(); + if (dataType != null) { + command.setDataType(dataType); + } else { + String parentDomainName = readIdentifierWithSchema(); + command.setParentDomain(getSchema().getDomain(parentDomainName)); + } + if (readIf(DEFAULT)) { + command.setDefaultExpression(readExpression()); + } + if (readIf(ON)) { + read("UPDATE"); + command.setOnUpdateExpression(readExpression()); + } + // Compatibility with 1.4.200 and older versions + if (readIf("SELECTIVITY")) { + readNonNegativeInt(); + } + String comment = readCommentIf(); + if (comment != null) { + command.setComment(comment); + } + for (;;) { + String constraintName; + if (readIf(CONSTRAINT)) { + constraintName = readIdentifier(); + read(CHECK); + } else if (readIf(CHECK)) { + constraintName = null; + } else { + break; + } + AlterDomainAddConstraint constraint = new AlterDomainAddConstraint(session, schema, ifNotExists); + constraint.setConstraintName(constraintName); + constraint.setDomainName(domainName); + parseDomainConstraint = true; + try { + constraint.setCheckExpression(readExpression()); + } finally { + parseDomainConstraint = false; + } + command.addConstraintCommand(constraint); + } return command; } @@ -6169,6 +7172,7 @@ private CreateTrigger parseCreateTrigger(boolean force) { } int typeMask = 0; boolean onRollback = false; + boolean allowOr = database.getMode().getEnum() == ModeEnum.PostgreSQL; do { if (readIf("INSERT")) { typeMask |= Trigger.INSERT; @@ -6183,9 +7187,7 @@ private CreateTrigger parseCreateTrigger(boolean force) { } else { throw getSyntaxError(); } - } while (readIf(COMMA) - || (database.getMode().getEnum() == ModeEnum.PostgreSQL - && readIf("OR"))); + } while (readIf(COMMA) || allowOr && readIf(OR)); read(ON); String tableName = readIdentifierWithSchema(); checkSchema(schema); @@ -6200,20 +7202,21 @@ private CreateTrigger parseCreateTrigger(boolean force) { command.setTableName(tableName); if (readIf(FOR)) { read("EACH"); - read(ROW); - command.setRowBased(true); - } else { - command.setRowBased(false); + if (readIf(ROW)) { + command.setRowBased(true); + } else { + read("STATEMENT"); + } } if (readIf("QUEUE")) { command.setQueueSize(readNonNegativeInt()); } command.setNoWait(readIf("NOWAIT")); - if (readIf("AS")) { + if (readIf(AS)) { command.setTriggerSource(readString()); } else { read("CALL"); - command.setTriggerClassName(readUniqueIdentifier()); + command.setTriggerClassName(readStringOrIdentifier()); } return command; } @@ -6221,7 +7224,7 @@ private CreateTrigger parseCreateTrigger(boolean force) { private CreateUser parseCreateUser() { CreateUser command = new CreateUser(session); command.setIfNotExists(readIfNotExists()); - command.setUserName(readUniqueIdentifier()); + command.setUserName(readIdentifier()); command.setComment(readCommentIf()); if (readIf("PASSWORD")) { command.setPassword(readExpression()); @@ -6232,8 +7235,7 @@ private CreateUser parseCreateUser() { } else if (readIf("IDENTIFIED")) { read("BY"); // uppercase if not quoted - command.setPassword(ValueExpression.get(ValueString - .get(readColumnIdentifier()))); + command.setPassword(ValueExpression.get(ValueVarchar.get(readIdentifier()))); } else { throw getSyntaxError(); } @@ -6253,31 +7255,55 @@ private CreateFunctionAlias parseCreateFunctionAlias(boolean force) { } else { aliasName = readIdentifierWithSchema(); } - final boolean newAliasSameNameAsBuiltin = Function.getFunction(database, aliasName) != null; - if (database.isAllowBuiltinAliasOverride() && newAliasSameNameAsBuiltin) { - // fine - } else if (isKeyword(aliasName) || - newAliasSameNameAsBuiltin || - getAggregateType(aliasName) != null) { - throw DbException.get(ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, - aliasName); - } - CreateFunctionAlias command = new CreateFunctionAlias(session, - getSchema()); + String upperName = upperName(aliasName); + if (isReservedFunctionName(upperName)) { + throw DbException.get(ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, aliasName); + } + CreateFunctionAlias command = new CreateFunctionAlias(session, getSchema()); command.setForce(force); command.setAliasName(aliasName); command.setIfNotExists(ifNotExists); command.setDeterministic(readIf("DETERMINISTIC")); - command.setBufferResultSetToLocalTemp(!readIf("NOBUFFER")); - if (readIf("AS")) { + // Compatibility with old versions of H2 + readIf("NOBUFFER"); + if (readIf(AS)) { command.setSource(readString()); } else { read(FOR); - command.setJavaClassMethod(readUniqueIdentifier()); + command.setJavaClassMethod(readStringOrIdentifier()); } return command; } + private String readStringOrIdentifier() { + return currentTokenType != IDENTIFIER ? readString() : readIdentifier(); + } + + private boolean isReservedFunctionName(String name) { + int tokenType = ParserUtil.getTokenType(name, false, false); + if (tokenType != ParserUtil.IDENTIFIER) { + if (database.isAllowBuiltinAliasOverride()) { + switch (tokenType) { + case CURRENT_DATE: + case CURRENT_TIME: + case CURRENT_TIMESTAMP: + case DAY: + case HOUR: + case LOCALTIME: + case LOCALTIMESTAMP: + case MINUTE: + case MONTH: + case SECOND: + case YEAR: + return false; + } + } + return true; + } + return Aggregate.getAggregateType(name) != null + || BuiltinFunctions.isBuiltinFunction(database, name) && !database.isAllowBuiltinAliasOverride(); + } + private Prepared parseWith() { List viewsCreated = new ArrayList<>(); try { @@ -6303,42 +7329,28 @@ private Prepared parseWith1(List viewsCreated) { // Reverse the order of constructed CTE views - as the destruction order // (since later created view may depend on previously created views - // we preserve that dependency order in the destruction sequence ) - // used in setCteCleanups. - Collections.reverse(viewsCreated); - - int parentheses = 0; - while (readIf(OPEN_PAREN)) { - parentheses++; - } - if (isToken(SELECT) || isToken(VALUES)) { - p = parseWithQuery(); - } else if (isToken(TABLE)) { - int index = lastParseIndex; - read(); - if (!isToken(OPEN_PAREN)) { - parseIndex = index; - read(); - p = parseWithQuery(); - } else { - throw DbException.get(ErrorCode.SYNTAX_ERROR_1, WITH_STATEMENT_SUPPORTS_LIMITED_SUB_STATEMENTS); - } + // used in setCteCleanups. + Collections.reverse(viewsCreated); + + int start = tokenIndex; + if (isQueryQuick()) { + p = parseWithQuery(); } else if (readIf("INSERT")) { - p = parseInsert(); + p = parseInsert(start); p.setPrepareAlways(true); } else if (readIf("UPDATE")) { - p = parseUpdate(); + p = parseUpdate(start); p.setPrepareAlways(true); } else if (readIf("MERGE")) { - p = parseMerge(); + p = parseMerge(start); p.setPrepareAlways(true); } else if (readIf("DELETE")) { - p = parseDelete(); + p = parseDelete(start); p.setPrepareAlways(true); } else if (readIf("CREATE")) { if (!isToken(TABLE)) { throw DbException.get(ErrorCode.SYNTAX_ERROR_1, WITH_STATEMENT_SUPPORTS_LIMITED_SUB_STATEMENTS); - } p = parseCreate(); p.setPrepareAlways(true); @@ -6346,20 +7358,20 @@ private Prepared parseWith1(List viewsCreated) { throw DbException.get(ErrorCode.SYNTAX_ERROR_1, WITH_STATEMENT_SUPPORTS_LIMITED_SUB_STATEMENTS); } - for (; parentheses > 0; parentheses--) { - read(CLOSE_PAREN); - } // Clean up temporary views starting with last to first (in case of // dependencies) - but only if they are not persistent. if (isTemporary) { - p.setCteCleanups(viewsCreated); + if (cteCleanups == null) { + cteCleanups = new ArrayList<>(viewsCreated.size()); + } + cteCleanups.addAll(viewsCreated); } return p; } private Prepared parseWithQuery() { - Query query = parseSelectUnion(); + Query query = parseQueryExpressionBodyAndEndOfQuery(); query.setPrepareAlways(true); query.setNeverLazy(true); return query; @@ -6378,7 +7390,7 @@ private TableView parseSingleCommonTableExpression(boolean isTemporary) { for (String c : cols) { // we don't really know the type of the column, so STRING will // have to do, UNKNOWN does not work here - columns.add(new Column(c, Value.STRING)); + columns.add(new Column(c, TypeInfo.TYPE_VARCHAR)); } } @@ -6400,7 +7412,7 @@ private TableView parseSingleCommonTableExpression(boolean isTemporary) { cteViewName); } if (!isTemporary) { - oldViewFound.lock(session, true, true); + oldViewFound.lock(session, Table.EXCLUSIVE_LOCK); database.removeSchemaObject(session, oldViewFound); } else { @@ -6416,11 +7428,11 @@ private TableView parseSingleCommonTableExpression(boolean isTemporary) { Table recursiveTable = TableView.createShadowTableForRecursiveTableExpression( isTemporary, session, cteViewName, schema, columns, database); List columnTemplateList; - String[] querySQLOutput = {null}; + String[] querySQLOutput = new String[1]; try { - read("AS"); + read(AS); read(OPEN_PAREN); - Query withQuery = parseSelect(); + Query withQuery = parseQuery(); if (!isTemporary) { withQuery.session = session; } @@ -6441,9 +7453,8 @@ private TableView parseSingleCommonTableExpression(boolean isTemporary) { private TableView createCTEView(String cteViewName, String querySQL, List columnTemplateList, boolean allowRecursiveQueryDetection, boolean addViewToSession, boolean isTemporary) { - Database db = session.getDatabase(); Schema schema = getSchemaWithDefault(); - int id = db.allocateObjectId(); + int id = database.allocateObjectId(); Column[] columnTemplateArray = columnTemplateList.toArray(new Column[0]); // No easy way to determine if this is a recursive query up front, so we just compile @@ -6457,9 +7468,9 @@ private TableView createCTEView(String cteViewName, String querySQL, isTemporary); if (!view.isRecursiveQueryDetected() && allowRecursiveQueryDetection) { if (!isTemporary) { - db.addSchemaObject(session, view); - view.lock(session, true, true); - db.removeSchemaObject(session, view); + database.addSchemaObject(session, view); + view.lock(session, Table.EXCLUSIVE_LOCK); + database.removeSchemaObject(session, view); } else { session.removeLocalTempTable(view); } @@ -6469,7 +7480,7 @@ private TableView createCTEView(String cteViewName, String querySQL, isTemporary); } // both removeSchemaObject and removeLocalTempTable hold meta locks - db.unlockMeta(session); + database.unlockMeta(session); } view.setTableExpression(true); view.setTemporary(isTemporary); @@ -6477,9 +7488,9 @@ private TableView createCTEView(String cteViewName, String querySQL, view.setOnCommitDrop(false); if (addViewToSession) { if (!isTemporary) { - db.addSchemaObject(session, view); + database.addSchemaObject(session, view); view.unlock(session); - db.unlockMeta(session); + database.unlockMeta(session); } else { session.addLocalTempTable(view); } @@ -6503,23 +7514,22 @@ private CreateView parseCreateView(boolean force, boolean orReplace) { String[] cols = parseColumnList(); command.setColumnNames(cols); } - String select = StringUtils.cache(sqlCommand - .substring(parseIndex)); - read("AS"); + read(AS); + String select = StringUtils.cache(sqlCommand.substring(token.start())); try { Query query; - session.setParsingCreateView(true, viewName); + session.setParsingCreateView(true); try { - query = parseSelect(); + query = parseQuery(); query.prepare(); } finally { - session.setParsingCreateView(false, viewName); + session.setParsingCreateView(false); } command.setSelect(query); } catch (DbException e) { if (force) { command.setSelectSQL(select); - while (currentTokenType != END) { + while (currentTokenType != END_OF_INPUT) { read(); } } else { @@ -6544,7 +7554,7 @@ private TransactionCommand parseCheckpoint() { private Prepared parseAlter() { if (readIf(TABLE)) { return parseAlterTable(); - } else if (readIf("USER")) { + } else if (readIf(USER)) { return parseAlterUser(); } else if (readIf("INDEX")) { return parseAlterIndex(); @@ -6554,6 +7564,8 @@ private Prepared parseAlter() { return parseAlterSequence(); } else if (readIf("VIEW")) { return parseAlterView(); + } else if (readIf("DOMAIN")) { + return parseAlterDomain(); } throw getSyntaxError(); } @@ -6573,13 +7585,115 @@ private AlterIndexRename parseAlterIndex() { command.setOldName(indexName); command.setIfExists(ifExists); read("RENAME"); - read("TO"); + read(TO); String newName = readIdentifierWithSchema(old.getName()); checkSchema(old); command.setNewName(newName); return command; } + private DefineCommand parseAlterDomain() { + boolean ifDomainExists = readIfExists(false); + String domainName = readIdentifierWithSchema(); + Schema schema = getSchema(); + if (readIf("ADD")) { + boolean ifNotExists = false; + String constraintName = null; + String comment = null; + if (readIf(CONSTRAINT)) { + ifNotExists = readIfNotExists(); + constraintName = readIdentifierWithSchema(schema.getName()); + checkSchema(schema); + comment = readCommentIf(); + } + read(CHECK); + AlterDomainAddConstraint command = new AlterDomainAddConstraint(session, schema, ifNotExists); + command.setDomainName(domainName); + command.setConstraintName(constraintName); + parseDomainConstraint = true; + try { + command.setCheckExpression(readExpression()); + } finally { + parseDomainConstraint = false; + } + command.setIfDomainExists(ifDomainExists); + command.setComment(comment); + if (readIf("NOCHECK")) { + command.setCheckExisting(false); + } else { + readIf(CHECK); + command.setCheckExisting(true); + } + return command; + } else if (readIf("DROP")) { + if (readIf(CONSTRAINT)) { + boolean ifConstraintExists = readIfExists(false); + String constraintName = readIdentifierWithSchema(schema.getName()); + checkSchema(schema); + AlterDomainDropConstraint command = new AlterDomainDropConstraint(session, getSchema(), + ifConstraintExists); + command.setConstraintName(constraintName); + command.setDomainName(domainName); + command.setIfDomainExists(ifDomainExists); + return command; + } else if (readIf(DEFAULT)) { + AlterDomainExpressions command = new AlterDomainExpressions(session, schema, + CommandInterface.ALTER_DOMAIN_DEFAULT); + command.setDomainName(domainName); + command.setIfDomainExists(ifDomainExists); + command.setExpression(null); + return command; + } else if (readIf(ON)) { + read("UPDATE"); + AlterDomainExpressions command = new AlterDomainExpressions(session, schema, + CommandInterface.ALTER_DOMAIN_ON_UPDATE); + command.setDomainName(domainName); + command.setIfDomainExists(ifDomainExists); + command.setExpression(null); + return command; + } + } else if (readIf("RENAME")) { + if (readIf(CONSTRAINT)) { + String constraintName = readIdentifierWithSchema(schema.getName()); + checkSchema(schema); + read(TO); + AlterDomainRenameConstraint command = new AlterDomainRenameConstraint(session, schema); + command.setDomainName(domainName); + command.setIfDomainExists(ifDomainExists); + command.setConstraintName(constraintName); + command.setNewConstraintName(readIdentifier()); + return command; + } + read(TO); + String newName = readIdentifierWithSchema(schema.getName()); + checkSchema(schema); + AlterDomainRename command = new AlterDomainRename(session, getSchema()); + command.setDomainName(domainName); + command.setIfDomainExists(ifDomainExists); + command.setNewDomainName(newName); + return command; + } else { + read(SET); + if (readIf(DEFAULT)) { + AlterDomainExpressions command = new AlterDomainExpressions(session, schema, + CommandInterface.ALTER_DOMAIN_DEFAULT); + command.setDomainName(domainName); + command.setIfDomainExists(ifDomainExists); + command.setExpression(readExpression()); + return command; + } else if (readIf(ON)) { + read("UPDATE"); + AlterDomainExpressions command = new AlterDomainExpressions(session, schema, + CommandInterface.ALTER_DOMAIN_ON_UPDATE); + command.setDomainName(domainName); + command.setIfDomainExists(ifDomainExists); + command.setExpression(readExpression()); + return command; + } + } + throw getSyntaxError(); + } + private DefineCommand parseAlterView() { boolean ifExists = readIfExists(false); String viewName = readIdentifierWithSchema(); @@ -6589,11 +7703,11 @@ private DefineCommand parseAlterView() { throw DbException.get(ErrorCode.VIEW_NOT_FOUND_1, viewName); } if (readIf("RENAME")) { - read("TO"); + read(TO); String newName = readIdentifierWithSchema(schema.getName()); checkSchema(schema); AlterTableRename command = new AlterTableRename(session, getSchema()); - command.setOldTableName(viewName); + command.setTableName(viewName); command.setNewTableName(newName); command.setIfTableExists(ifExists); return command; @@ -6612,7 +7726,7 @@ private Prepared parseAlterSchema() { String schemaName = readIdentifierWithSchema(); Schema old = getSchema(); read("RENAME"); - read("TO"); + read(TO); String newName = readIdentifierWithSchema(old.getName()); Schema schema = findSchema(schemaName); if (schema == null) { @@ -6635,64 +7749,102 @@ private AlterSequence parseAlterSequence() { command.setSequenceName(sequenceName); command.setIfExists(ifExists); SequenceOptions options = new SequenceOptions(); - parseSequenceOptions(options, null, false); + parseSequenceOptions(options, null, false, false); command.setOptions(options); return command; } - private void parseSequenceOptions(SequenceOptions options, CreateSequence command, boolean forCreate) { + private boolean parseSequenceOptions(SequenceOptions options, CreateSequence command, boolean allowDataType, + boolean forAlterColumn) { + boolean result = false; for (;;) { - if (readIf(forCreate ? "START" : "RESTART")) { - readIf(WITH); + if (allowDataType && readIf(AS)) { + TypeInfo dataType = parseDataType(); + if (!DataType.isNumericType(dataType.getValueType())) { + throw DbException.getUnsupportedException(dataType + .getSQL(new StringBuilder("CREATE SEQUENCE AS "), HasSQL.TRACE_SQL_FLAGS).toString()); + } + options.setDataType(dataType); + } else if (readIf("START")) { + read(WITH); options.setStartValue(readExpression()); - } else if (readIf("INCREMENT")) { - readIf("BY"); - options.setIncrement(readExpression()); - } else if (readIf("MINVALUE")) { - options.setMinValue(readExpression()); - } else if (readIf("NOMINVALUE")) { - options.setMinValue(ValueExpression.getNull()); - } else if (readIf("MAXVALUE")) { - options.setMaxValue(readExpression()); - } else if (readIf("NOMAXVALUE")) { - options.setMaxValue(ValueExpression.getNull()); - } else if (readIf("CYCLE")) { - options.setCycle(true); - } else if (readIf("NOCYCLE")) { - options.setCycle(false); - } else if (readIf("NO")) { - if (readIf("MINVALUE")) { - options.setMinValue(ValueExpression.getNull()); - } else if (readIf("MAXVALUE")) { - options.setMaxValue(ValueExpression.getNull()); - } else if (readIf("CYCLE")) { - options.setCycle(false); - } else if (readIf("CACHE")) { - options.setCacheSize(ValueExpression.get(ValueLong.get(1))); + } else if (readIf("RESTART")) { + options.setRestartValue(readIf(WITH) ? readExpression() : ValueExpression.DEFAULT); + } else if (command != null && parseCreateSequenceOption(command)) { + // + } else if (forAlterColumn) { + int index = tokenIndex; + if (readIf(SET)) { + if (!parseBasicSequenceOption(options)) { + setTokenIndex(index); + break; + } } else { break; } + } else if (!parseBasicSequenceOption(options)) { + break; + } + result = true; + } + return result; + } + + private boolean parseCreateSequenceOption(CreateSequence command) { + if (readIf("BELONGS_TO_TABLE")) { + command.setBelongsToTable(true); + } else if (readIf(ORDER)) { + // Oracle compatibility + } else { + return false; + } + return true; + } + + private boolean parseBasicSequenceOption(SequenceOptions options) { + if (readIf("INCREMENT")) { + readIf("BY"); + options.setIncrement(readExpression()); + } else if (readIf("MINVALUE")) { + options.setMinValue(readExpression()); + } else if (readIf("MAXVALUE")) { + options.setMaxValue(readExpression()); + } else if (readIf("CYCLE")) { + options.setCycle(Sequence.Cycle.CYCLE); + } else if (readIf("NO")) { + if (readIf("MINVALUE")) { + options.setMinValue(ValueExpression.NULL); + } else if (readIf("MAXVALUE")) { + options.setMaxValue(ValueExpression.NULL); + } else if (readIf("CYCLE")) { + options.setCycle(Sequence.Cycle.NO_CYCLE); } else if (readIf("CACHE")) { - options.setCacheSize(readExpression()); - } else if (readIf("NOCACHE")) { - options.setCacheSize(ValueExpression.get(ValueLong.get(1))); - } else if (command != null) { - if (readIf("BELONGS_TO_TABLE")) { - command.setBelongsToTable(true); - } else if (readIf(ORDER)) { - // Oracle compatibility - } else { - break; - } + options.setCacheSize(ValueExpression.get(ValueBigint.get(1))); } else { - break; + throw getSyntaxError(); } + } else if (readIf("EXHAUSTED")) { + options.setCycle(Sequence.Cycle.EXHAUSTED); + } else if (readIf("CACHE")) { + options.setCacheSize(readExpression()); + // Various compatibility options + } else if (readIf("NOMINVALUE")) { + options.setMinValue(ValueExpression.NULL); + } else if (readIf("NOMAXVALUE")) { + options.setMaxValue(ValueExpression.NULL); + } else if (readIf("NOCYCLE")) { + options.setCycle(Sequence.Cycle.NO_CYCLE); + } else if (readIf("NOCACHE")) { + options.setCacheSize(ValueExpression.get(ValueBigint.get(1))); + } else { + return false; } + return true; } private AlterUser parseAlterUser() { - String userName = readUniqueIdentifier(); - if (readIf("SET")) { + String userName = readIdentifier(); + if (readIf(SET)) { AlterUser command = new AlterUser(session); command.setType(CommandInterface.ALTER_USER_SET_PASSWORD); command.setUser(database.getUser(userName)); @@ -6707,12 +7859,11 @@ private AlterUser parseAlterUser() { } return command; } else if (readIf("RENAME")) { - read("TO"); + read(TO); AlterUser command = new AlterUser(session); command.setType(CommandInterface.ALTER_USER_RENAME); command.setUser(database.getUser(userName)); - String newName = readUniqueIdentifier(); - command.setNewName(newName); + command.setNewName(readIdentifier()); return command; } else if (readIf("ADMIN")) { AlterUser command = new AlterUser(session); @@ -6733,27 +7884,21 @@ private AlterUser parseAlterUser() { private void readIfEqualOrTo() { if (!readIf(EQUAL)) { - readIf("TO"); + readIf(TO); } } private Prepared parseSet() { if (readIf(AT)) { Set command = new Set(session, SetTypes.VARIABLE); - command.setString(readAliasIdentifier()); + command.setString(readIdentifier()); readIfEqualOrTo(); command.setExpression(readExpression()); return command; } else if (readIf("AUTOCOMMIT")) { readIfEqualOrTo(); - boolean value = readBooleanSetting(); - int setting = value ? CommandInterface.SET_AUTOCOMMIT_TRUE - : CommandInterface.SET_AUTOCOMMIT_FALSE; - return new TransactionCommand(session, setting); - } else if (readIf("MVCC")) { - readIfEqualOrTo(); - readBooleanSetting(); - return new NoOperation(session); + return new TransactionCommand(session, readBooleanSetting() ? CommandInterface.SET_AUTOCOMMIT_TRUE + : CommandInterface.SET_AUTOCOMMIT_FALSE); } else if (readIf("EXCLUSIVE")) { readIfEqualOrTo(); Set command = new Set(session, SetTypes.EXCLUSIVE); @@ -6761,9 +7906,8 @@ private Prepared parseSet() { return command; } else if (readIf("IGNORECASE")) { readIfEqualOrTo(); - boolean value = readBooleanSetting(); Set command = new Set(session, SetTypes.IGNORECASE); - command.setInt(value ? 1 : 0); + command.setInt(readBooleanSetting() ? 1 : 0); return command; } else if (readIf("PASSWORD")) { readIfEqualOrTo(); @@ -6784,16 +7928,7 @@ private Prepared parseSet() { } else if (readIf("MODE")) { readIfEqualOrTo(); Set command = new Set(session, SetTypes.MODE); - command.setString(readAliasIdentifier()); - return command; - } else if (readIf("COMPRESS_LOB")) { - readIfEqualOrTo(); - Set command = new Set(session, SetTypes.COMPRESS_LOB); - if (currentTokenType == VALUE) { - command.setString(readString()); - } else { - command.setString(readUniqueIdentifier()); - } + command.setString(readIdentifier()); return command; } else if (readIf("DATABASE")) { readIfEqualOrTo(); @@ -6802,12 +7937,6 @@ private Prepared parseSet() { } else if (readIf("COLLATION")) { readIfEqualOrTo(); return parseSetCollation(); - } else if (readIf("BINARY_COLLATION")) { - readIfEqualOrTo(); - return parseSetBinaryCollation(SetTypes.BINARY_COLLATION); - } else if (readIf("UUID_COLLATION")) { - readIfEqualOrTo(); - return parseSetBinaryCollation(SetTypes.UUID_COLLATION); } else if (readIf("CLUSTER")) { readIfEqualOrTo(); Set command = new Set(session, SetTypes.CLUSTER); @@ -6821,161 +7950,174 @@ private Prepared parseSet() { } else if (readIf("ALLOW_LITERALS")) { readIfEqualOrTo(); Set command = new Set(session, SetTypes.ALLOW_LITERALS); - if (readIf("NONE")) { - command.setInt(Constants.ALLOW_LITERALS_NONE); - } else if (readIf(ALL)) { - command.setInt(Constants.ALLOW_LITERALS_ALL); + int v; + if (readIf(ALL)) { + v = Constants.ALLOW_LITERALS_ALL; + } else if (readIf("NONE")) { + v = Constants.ALLOW_LITERALS_NONE; } else if (readIf("NUMBERS")) { - command.setInt(Constants.ALLOW_LITERALS_NUMBERS); + v = Constants.ALLOW_LITERALS_NUMBERS; } else { - command.setInt(readNonNegativeInt()); + v = readNonNegativeInt(); } + command.setInt(v); return command; } else if (readIf("DEFAULT_TABLE_TYPE")) { readIfEqualOrTo(); Set command = new Set(session, SetTypes.DEFAULT_TABLE_TYPE); + int v; if (readIf("MEMORY")) { - command.setInt(Table.TYPE_MEMORY); + v = Table.TYPE_MEMORY; } else if (readIf("CACHED")) { - command.setInt(Table.TYPE_CACHED); + v = Table.TYPE_CACHED; } else { - command.setInt(readNonNegativeInt()); + v = readNonNegativeInt(); } + command.setInt(v); return command; - } else if (readIf("CREATE")) { - readIfEqualOrTo(); - // Derby compatibility (CREATE=TRUE in the database URL) - read(); - return new NoOperation(session); - } else if (readIf("HSQLDB.DEFAULT_TABLE_TYPE")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("PAGE_STORE")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("CACHE_TYPE")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("FILE_LOCK")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("DB_CLOSE_ON_EXIT")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("AUTO_SERVER")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("AUTO_SERVER_PORT")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("AUTO_RECONNECT")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("ASSERT")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("ACCESS_MODE_DATA")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("OPEN_NEW")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("JMX")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("PAGE_SIZE")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("RECOVER")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("NAMES")) { - // Quercus PHP MySQL driver compatibility - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("SCOPE_GENERATED_KEYS")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); } else if (readIf("SCHEMA")) { readIfEqualOrTo(); Set command = new Set(session, SetTypes.SCHEMA); - command.setString(readAliasIdentifier()); + command.setExpression(readExpressionOrIdentifier()); return command; - } else if (readIf("DATESTYLE")) { - // PostgreSQL compatibility + } else if (readIf("CATALOG")) { readIfEqualOrTo(); - if (!readIf("ISO")) { - String s = readString(); - if (!equalsToken(s, "ISO")) { - throw getSyntaxError(); - } - } - return new NoOperation(session); - } else if (readIf("SEARCH_PATH") || - readIf(SetTypes.getTypeName(SetTypes.SCHEMA_SEARCH_PATH))) { + Set command = new Set(session, SetTypes.CATALOG); + command.setExpression(readExpressionOrIdentifier()); + return command; + } else if (readIf(SetTypes.getTypeName(SetTypes.SCHEMA_SEARCH_PATH))) { readIfEqualOrTo(); Set command = new Set(session, SetTypes.SCHEMA_SEARCH_PATH); ArrayList list = Utils.newSmallArrayList(); do { - list.add(readAliasIdentifier()); + list.add(readIdentifier()); } while (readIf(COMMA)); command.setStringArray(list.toArray(new String[0])); return command; } else if (readIf("JAVA_OBJECT_SERIALIZER")) { readIfEqualOrTo(); - return parseSetJavaObjectSerializer(); + Set command = new Set(session, SetTypes.JAVA_OBJECT_SERIALIZER); + command.setString(readString()); + return command; + } else if (readIf("IGNORE_CATALOGS")) { + readIfEqualOrTo(); + Set command = new Set(session, SetTypes.IGNORE_CATALOGS); + command.setInt(readBooleanSetting() ? 1 : 0); + return command; + } else if (readIf("SESSION")) { + read("CHARACTERISTICS"); + read(AS); + read("TRANSACTION"); + return parseSetTransactionMode(); + } else if (readIf("TRANSACTION")) { + // TODO should affect only the current transaction + return parseSetTransactionMode(); + } else if (readIf("TIME")) { + read("ZONE"); + Set command = new Set(session, SetTypes.TIME_ZONE); + if (!readIf("LOCAL")) { + command.setExpression(readExpression()); + } + return command; + } else if (readIf("NON_KEYWORDS")) { + readIfEqualOrTo(); + Set command = new Set(session, SetTypes.NON_KEYWORDS); + ArrayList list = Utils.newSmallArrayList(); + if (currentTokenType != END_OF_INPUT && currentTokenType != SEMICOLON) { + do { + if (currentTokenType < IDENTIFIER || currentTokenType > LAST_KEYWORD) { + throw getSyntaxError(); + } + list.add(StringUtils.toUpperEnglish(currentToken)); + read(); + } while (readIf(COMMA)); + } + command.setStringArray(list.toArray(new String[0])); + return command; + } else if (readIf("DEFAULT_NULL_ORDERING")) { + readIfEqualOrTo(); + Set command = new Set(session, SetTypes.DEFAULT_NULL_ORDERING); + command.setString(readIdentifier()); + return command; + } else if (readIf("LOG")) { + throw DbException.getUnsupportedException("LOG"); } else { - if (isToken("LOGSIZE")) { - // HSQLDB compatibility - currentToken = SetTypes.getTypeName(SetTypes.MAX_LOG_SIZE); + String upperName = upperName(currentToken); + if (ConnectionInfo.isIgnoredByParser(upperName)) { + read(); + readIfEqualOrTo(); + read(); + return new NoOperation(session); } - if (isToken("FOREIGN_KEY_CHECKS")) { - // MySQL compatibility - currentToken = SetTypes - .getTypeName(SetTypes.REFERENTIAL_INTEGRITY); + int type = SetTypes.getType(upperName); + if (type >= 0) { + read(); + readIfEqualOrTo(); + Set command = new Set(session, type); + command.setExpression(readExpression()); + return command; } - String typeName = currentToken; - if (!identifiersToUpper) { - typeName = StringUtils.toUpperEnglish(typeName); + ModeEnum modeEnum = database.getMode().getEnum(); + if (modeEnum != ModeEnum.REGULAR) { + Prepared command = readSetCompatibility(modeEnum); + if (command != null) { + return command; + } } - int type = SetTypes.getType(typeName); - if (type < 0) { - throw getSyntaxError(); + if (session.isQuirksMode()) { + switch (upperName) { + case "BINARY_COLLATION": + case "UUID_COLLATION": + read(); + readIfEqualOrTo(); + readIdentifier(); + return new NoOperation(session); + } } - read(); - readIfEqualOrTo(); - Set command = new Set(session, type); - command.setExpression(readExpression()); - return command; + throw getSyntaxError(); + } + } + + private Prepared parseSetTransactionMode() { + IsolationLevel isolationLevel; + read("ISOLATION"); + read("LEVEL"); + if (readIf("READ")) { + if (readIf("UNCOMMITTED")) { + isolationLevel = IsolationLevel.READ_UNCOMMITTED; + } else { + read("COMMITTED"); + isolationLevel = IsolationLevel.READ_COMMITTED; + } + } else if (readIf("REPEATABLE")) { + read("READ"); + isolationLevel = IsolationLevel.REPEATABLE_READ; + } else if (readIf("SNAPSHOT")) { + isolationLevel = IsolationLevel.SNAPSHOT; + } else { + read("SERIALIZABLE"); + isolationLevel = IsolationLevel.SERIALIZABLE; + } + return new SetSessionCharacteristics(session, isolationLevel); + } + + private Expression readExpressionOrIdentifier() { + if (isIdentifier()) { + return ValueExpression.get(ValueVarchar.get(readIdentifier())); } + return readExpression(); } private Prepared parseUse() { readIfEqualOrTo(); Set command = new Set(session, SetTypes.SCHEMA); - command.setString(readAliasIdentifier()); + command.setExpression(ValueExpression.get(ValueVarchar.get(readIdentifier()))); return command; } private Set parseSetCollation() { Set command = new Set(session, SetTypes.COLLATION); - String name = readAliasIdentifier(); + String name = readIdentifier(); command.setString(name); if (equalsToken(name, CompareMode.OFF)) { return command; @@ -7000,21 +8142,88 @@ private Set parseSetCollation() { return command; } - private Set parseSetBinaryCollation(int type) { - String name = readAliasIdentifier(); - if (equalsToken(name, CompareMode.UNSIGNED) || equalsToken(name, CompareMode.SIGNED)) { - Set command = new Set(session, type); - command.setString(name); - return command; + private Prepared readSetCompatibility(ModeEnum modeEnum) { + switch (modeEnum) { + case Derby: + if (readIf("CREATE")) { + readIfEqualOrTo(); + // (CREATE=TRUE in the database URL) + read(); + return new NoOperation(session); + } + break; + case HSQLDB: + if (readIf("LOGSIZE")) { + readIfEqualOrTo(); + Set command = new Set(session, SetTypes.MAX_LOG_SIZE); + command.setExpression(readExpression()); + return command; + } + break; + case MySQL: + if (readIf("FOREIGN_KEY_CHECKS")) { + readIfEqualOrTo(); + Set command = new Set(session, SetTypes.REFERENTIAL_INTEGRITY); + command.setExpression(readExpression()); + return command; + } else if (readIf("NAMES")) { + // Quercus PHP MySQL driver compatibility + readIfEqualOrTo(); + read(); + return new NoOperation(session); + } + break; + case PostgreSQL: + if (readIf("STATEMENT_TIMEOUT")) { + readIfEqualOrTo(); + Set command = new Set(session, SetTypes.QUERY_TIMEOUT); + command.setInt(readNonNegativeInt()); + return command; + } else if (readIf("CLIENT_ENCODING") || readIf("CLIENT_MIN_MESSAGES") || readIf("JOIN_COLLAPSE_LIMIT")) { + readIfEqualOrTo(); + read(); + return new NoOperation(session); + } else if (readIf("DATESTYLE")) { + readIfEqualOrTo(); + if (!readIf("ISO")) { + String s = readString(); + if (!equalsToken(s, "ISO")) { + throw getSyntaxError(); + } + } + return new NoOperation(session); + } else if (readIf("SEARCH_PATH")) { + readIfEqualOrTo(); + Set command = new Set(session, SetTypes.SCHEMA_SEARCH_PATH); + ArrayList list = Utils.newSmallArrayList(); + String pgCatalog = database.sysIdentifier("PG_CATALOG"); + boolean hasPgCatalog = false; + do { + // some PG clients will send single-quoted alias + String s = currentTokenType == LITERAL ? readString() : readIdentifier(); + if ("$user".equals(s)) { + continue; + } + if (pgCatalog.equals(s)) { + hasPgCatalog = true; + } + list.add(s); + } while (readIf(COMMA)); + // If "pg_catalog" is not in the path then it will be searched before + // searching any of the path items. See + // https://www.postgresql.org/docs/8.2/runtime-config-client.html + if (!hasPgCatalog) { + if (database.findSchema(pgCatalog) != null) { + list.add(0, pgCatalog); + } + } + command.setStringArray(list.toArray(new String[0])); + return command; + } + break; + default: } - throw DbException.getInvalidValueException(SetTypes.getTypeName(type), name); - } - - private Set parseSetJavaObjectSerializer() { - Set command = new Set(session, SetTypes.JAVA_OBJECT_SERIALIZER); - String name = readString(); - command.setString(name); - return command; + return null; } private RunScriptCommand parseRunScript() { @@ -7022,10 +8231,10 @@ private RunScriptCommand parseRunScript() { read(FROM); command.setFileNameExpr(readExpression()); if (readIf("COMPRESSION")) { - command.setCompressionAlgorithm(readUniqueIdentifier()); + command.setCompressionAlgorithm(readIdentifier()); } if (readIf("CIPHER")) { - command.setCipher(readUniqueIdentifier()); + command.setCipher(readIdentifier()); if (readIf("PASSWORD")) { command.setPassword(readExpression()); } @@ -7033,12 +8242,22 @@ private RunScriptCommand parseRunScript() { if (readIf("CHARSET")) { command.setCharset(Charset.forName(readString())); } + if (readIf("FROM_1X")) { + command.setFrom1X(); + } else { + if (readIf("QUIRKS_MODE")) { + command.setQuirksMode(true); + } + if (readIf("VARIABLE_BINARY")) { + command.setVariableBinary(true); + } + } return command; } private ScriptCommand parseScript() { ScriptCommand command = new ScriptCommand(session); - boolean data = true, passwords = true, settings = true; + boolean data = true, passwords = true, settings = true, version = true; boolean dropTables = false, simple = false, withColumns = false; if (readIf("NODATA")) { data = false; @@ -7056,6 +8275,9 @@ private ScriptCommand parseScript() { if (readIf("NOSETTINGS")) { settings = false; } + if (readIf("NOVERSION")) { + version = false; + } if (readIf("DROP")) { dropTables = true; } @@ -7066,16 +8288,17 @@ private ScriptCommand parseScript() { command.setData(data); command.setPasswords(passwords); command.setSettings(settings); + command.setVersion(version); command.setDrop(dropTables); command.setSimple(simple); command.setWithColumns(withColumns); - if (readIf("TO")) { + if (readIf(TO)) { command.setFileNameExpr(readExpression()); if (readIf("COMPRESSION")) { - command.setCompressionAlgorithm(readUniqueIdentifier()); + command.setCompressionAlgorithm(readIdentifier()); } if (readIf("CIPHER")) { - command.setCipher(readUniqueIdentifier()); + command.setCipher(readIdentifier()); if (readIf("PASSWORD")) { command.setPassword(readExpression()); } @@ -7087,7 +8310,7 @@ private ScriptCommand parseScript() { if (readIf("SCHEMA")) { HashSet schemaNames = new HashSet<>(); do { - schemaNames.add(readUniqueIdentifier()); + schemaNames.add(readIdentifier()); } while (readIf(COMMA)); command.setSchemaNames(schemaNames); } else if (readIf(TABLE)) { @@ -7107,7 +8330,7 @@ private ScriptCommand parseScript() { * @return {@code true} if the table is DUAL special table. Otherwise returns {@code false}. * @see Wikipedia: DUAL table */ - boolean isDualTable(String tableName) { + private boolean isDualTable(String tableName) { return ((schemaName == null || equalsToken(schemaName, "SYS")) && equalsToken("DUAL", tableName)) || (database.getMode().sysDummy1 && (schemaName == null || equalsToken(schemaName, "SYSIBM")) && equalsToken("SYSDUMMY1", tableName)); @@ -7140,24 +8363,88 @@ private Table readTableOrView(String tableName) { } } } - if (isDualTable(tableName)) { - return getDualTable(false); - } - throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); + if (isDualTable(tableName)) { + return new DualTable(database); + } + + throw getTableOrViewNotFoundDbException(tableName); + } + + private DbException getTableOrViewNotFoundDbException(String tableName) { + if (schemaName != null) { + return getTableOrViewNotFoundDbException(schemaName, tableName); + } + + String currentSchemaName = session.getCurrentSchemaName(); + String[] schemaSearchPath = session.getSchemaSearchPath(); + if (schemaSearchPath == null) { + return getTableOrViewNotFoundDbException(Collections.singleton(currentSchemaName), tableName); + } + + LinkedHashSet schemaNames = new LinkedHashSet<>(); + schemaNames.add(currentSchemaName); + schemaNames.addAll(Arrays.asList(schemaSearchPath)); + return getTableOrViewNotFoundDbException(schemaNames, tableName); + } + + private DbException getTableOrViewNotFoundDbException(String schemaName, String tableName) { + return getTableOrViewNotFoundDbException(Collections.singleton(schemaName), tableName); + } + + private DbException getTableOrViewNotFoundDbException( + java.util.Set schemaNames, String tableName) { + if (database == null || database.getFirstUserTable() == null) { + return DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, tableName); + } + + if (database.getSettings().caseInsensitiveIdentifiers) { + return DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); + } + + java.util.Set candidates = new TreeSet<>(); + for (String schemaName : schemaNames) { + findTableNameCandidates(schemaName, tableName, candidates); + } + + if (candidates.isEmpty()) { + return DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); + } + + return DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_WITH_CANDIDATES_2, + tableName, + String.join(", ", candidates)); + } + + private void findTableNameCandidates(String schemaName, String tableName, java.util.Set candidates) { + Schema schema = database.getSchema(schemaName); + String ucTableName = StringUtils.toUpperEnglish(tableName); + Collection allTablesAndViews = schema.getAllTablesAndViews(session); + for (Table candidate : allTablesAndViews) { + String candidateName = candidate.getName(); + if (ucTableName.equals(StringUtils.toUpperEnglish(candidateName))) { + candidates.add(candidateName); + } + } } - private FunctionAlias findFunctionAlias(String schema, String aliasName) { - FunctionAlias functionAlias = database.getSchema(schema).findFunction( - aliasName); - if (functionAlias != null) { - return functionAlias; + private UserDefinedFunction findUserDefinedFunctionWithinPath(Schema schema, String name) { + if (schema != null) { + return schema.findFunctionOrAggregate(name); + } + schema = database.getSchema(session.getCurrentSchemaName()); + UserDefinedFunction userDefinedFunction = schema.findFunctionOrAggregate(name); + if (userDefinedFunction != null) { + return userDefinedFunction; } String[] schemaNames = session.getSchemaSearchPath(); if (schemaNames != null) { - for (String n : schemaNames) { - functionAlias = database.getSchema(n).findFunction(aliasName); - if (functionAlias != null) { - return functionAlias; + for (String schemaName : schemaNames) { + Schema schemaFromPath = database.getSchema(schemaName); + if (schemaFromPath != schema) { + userDefinedFunction = schemaFromPath.findFunctionOrAggregate(name); + if (userDefinedFunction != null) { + return userDefinedFunction; + } } } } @@ -7201,292 +8488,438 @@ private Prepared parseAlterTable() { String tableName = readIdentifierWithSchema(); Schema schema = getSchema(); if (readIf("ADD")) { - Prepared command = parseAlterTableAddConstraintIf(tableName, - schema, ifTableExists); + Prepared command = parseTableConstraintIf(tableName, schema, ifTableExists); if (command != null) { return command; } return parseAlterTableAddColumn(tableName, schema, ifTableExists); - } else if (readIf("SET")) { - read("REFERENTIAL_INTEGRITY"); - int type = CommandInterface.ALTER_TABLE_SET_REFERENTIAL_INTEGRITY; - boolean value = readBooleanSetting(); - AlterTableSet command = new AlterTableSet(session, - schema, type, value); + } else if (readIf(SET)) { + return parseAlterTableSet(schema, tableName, ifTableExists); + } else if (readIf("RENAME")) { + return parseAlterTableRename(schema, tableName, ifTableExists); + } else if (readIf("DROP")) { + return parseAlterTableDrop(schema, tableName, ifTableExists); + } else if (readIf("ALTER")) { + return parseAlterTableAlter(schema, tableName, ifTableExists); + } else { + Mode mode = database.getMode(); + if (mode.alterTableExtensionsMySQL || mode.alterTableModifyColumn) { + return parseAlterTableCompatibility(schema, tableName, ifTableExists, mode); + } + } + throw getSyntaxError(); + } + + private Prepared parseAlterTableAlter(Schema schema, String tableName, boolean ifTableExists) { + readIf("COLUMN"); + boolean ifExists = readIfExists(false); + String columnName = readIdentifier(); + Column column = columnIfTableExists(schema, tableName, columnName, ifTableExists, ifExists); + if (readIf("RENAME")) { + read(TO); + AlterTableRenameColumn command = new AlterTableRenameColumn( + session, schema); command.setTableName(tableName); command.setIfTableExists(ifTableExists); - if (readIf(CHECK)) { - command.setCheckExisting(true); - } else if (readIf("NOCHECK")) { - command.setCheckExisting(false); - } + command.setIfExists(ifExists); + command.setOldColumnName(columnName); + String newName = readIdentifier(); + command.setNewColumnName(newName); return command; - } else if (readIf("RENAME")) { - if (readIf("COLUMN")) { - // PostgreSQL syntax - String columnName = readColumnIdentifier(); - read("TO"); - AlterTableRenameColumn command = new AlterTableRenameColumn( - session, schema); - command.setTableName(tableName); - command.setIfTableExists(ifTableExists); - command.setOldColumnName(columnName); - String newName = readColumnIdentifier(); - command.setNewColumnName(newName); - return command; - } else if (readIf(CONSTRAINT)) { - String constraintName = readIdentifierWithSchema(schema.getName()); - checkSchema(schema); - read("TO"); - AlterTableRenameConstraint command = new AlterTableRenameConstraint( - session, schema); - command.setConstraintName(constraintName); - String newName = readColumnIdentifier(); - command.setNewConstraintName(newName); - return commandIfTableExists(schema, tableName, ifTableExists, command); - } else { - read("TO"); - String newName = readIdentifierWithSchema(schema.getName()); - checkSchema(schema); - AlterTableRename command = new AlterTableRename(session, - getSchema()); - command.setOldTableName(tableName); - command.setNewTableName(newName); - command.setIfTableExists(ifTableExists); - command.setHidden(readIf("HIDDEN")); - return command; - } } else if (readIf("DROP")) { - if (readIf(CONSTRAINT)) { - boolean ifExists = readIfExists(false); - String constraintName = readIdentifierWithSchema(schema.getName()); - ifExists = readIfExists(ifExists); - checkSchema(schema); - AlterTableDropConstraint command = new AlterTableDropConstraint( - session, getSchema(), ifExists); - command.setConstraintName(constraintName); - return commandIfTableExists(schema, tableName, ifTableExists, command); - } else if (readIf(FOREIGN)) { - // MySQL compatibility - read("KEY"); - String constraintName = readIdentifierWithSchema(schema.getName()); - checkSchema(schema); - AlterTableDropConstraint command = new AlterTableDropConstraint( - session, getSchema(), false); - command.setConstraintName(constraintName); - return commandIfTableExists(schema, tableName, ifTableExists, command); - } else if (readIf("INDEX")) { - // MySQL compatibility - String indexOrConstraintName = readIdentifierWithSchema(schema.getName()); - final SchemaCommand command; - if (schema.findIndex(session, indexOrConstraintName) != null) { - DropIndex dropIndexCommand = new DropIndex(session, getSchema()); - dropIndexCommand.setIndexName(indexOrConstraintName); - command = dropIndexCommand; - } else { - AlterTableDropConstraint dropCommand = new AlterTableDropConstraint( - session, getSchema(), false/*ifExists*/); - dropCommand.setConstraintName(indexOrConstraintName); - command = dropCommand; - } - return commandIfTableExists(schema, tableName, ifTableExists, command); - } else if (readIf(PRIMARY)) { - read("KEY"); - Table table = tableIfTableExists(schema, tableName, ifTableExists); - if (table == null) { - return new NoOperation(session); - } - Index idx = table.getPrimaryKey(); - DropIndex command = new DropIndex(session, schema); - command.setIndexName(idx.getName()); - return command; - } else { - readIf("COLUMN"); - boolean ifExists = readIfExists(false); - ArrayList columnsToRemove = new ArrayList<>(); - Table table = tableIfTableExists(schema, tableName, ifTableExists); - // For Oracle compatibility - open bracket required - boolean openingBracketDetected = readIf(OPEN_PAREN); - do { - String columnName = readColumnIdentifier(); - if (table != null) { - if (!ifExists || table.doesColumnExist(columnName)) { - Column column = table.getColumn(columnName); - columnsToRemove.add(column); - } - } - } while (readIf(COMMA)); - if (openingBracketDetected) { - // For Oracle compatibility - close bracket - read(CLOSE_PAREN); - } - if (table == null || columnsToRemove.isEmpty()) { - return new NoOperation(session); + if (readIf(DEFAULT)) { + if (readIf(ON)) { + read(NULL); + AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setOldColumn(column); + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT_ON_NULL); + command.setBooleanFlag(false); + return command; } + return getAlterTableAlterColumnDropDefaultExpression(schema, tableName, ifTableExists, column, + CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT); + } else if (readIf("EXPRESSION")) { + return getAlterTableAlterColumnDropDefaultExpression(schema, tableName, ifTableExists, column, + CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_EXPRESSION); + } else if (readIf("IDENTITY")) { + return getAlterTableAlterColumnDropDefaultExpression(schema, tableName, ifTableExists, column, + CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_IDENTITY); + } + if (readIf(ON)) { + read("UPDATE"); AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema); - command.setType(CommandInterface.ALTER_TABLE_DROP_COLUMN); command.setTableName(tableName); command.setIfTableExists(ifTableExists); - command.setColumnsToRemove(columnsToRemove); + command.setOldColumn(column); + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_ON_UPDATE); + command.setDefaultExpression(null); return command; } - } else if (readIf("CHANGE")) { - // MySQL compatibility - readIf("COLUMN"); - String columnName = readColumnIdentifier(); - String newColumnName = readColumnIdentifier(); - Column column = columnIfTableExists(schema, tableName, columnName, ifTableExists); - boolean nullable = column == null ? true : column.isNullable(); - // new column type ignored. RENAME and MODIFY are - // a single command in MySQL but two different commands in H2. - parseColumnForTable(newColumnName, nullable, true); - AlterTableRenameColumn command = new AlterTableRenameColumn(session, schema); + read(NOT); + read(NULL); + AlterTableAlterColumn command = new AlterTableAlterColumn( + session, schema); command.setTableName(tableName); command.setIfTableExists(ifTableExists); - command.setOldColumnName(columnName); - command.setNewColumnName(newColumnName); + command.setOldColumn(column); + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_NOT_NULL); return command; - } else if (readIf("MODIFY")) { - // MySQL compatibility (optional) - readIf("COLUMN"); - // Oracle specifies (but will not require) an opening parenthesis - boolean hasOpeningBracket = readIf(OPEN_PAREN); - String columnName = readColumnIdentifier(); - AlterTableAlterColumn command; - NullConstraintType nullConstraint = parseNotNullConstraint(); - switch (nullConstraint) { - case NULL_IS_ALLOWED: - case NULL_IS_NOT_ALLOWED: - command = new AlterTableAlterColumn(session, schema); - command.setTableName(tableName); - command.setIfTableExists(ifTableExists); - Column column = columnIfTableExists(schema, tableName, columnName, ifTableExists); - command.setOldColumn(column); - if (nullConstraint == NullConstraintType.NULL_IS_ALLOWED) { - command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_NOT_NULL); - } else { - command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_NOT_NULL); + } else if (readIf("TYPE")) { + // PostgreSQL compatibility + return parseAlterTableAlterColumnDataType(schema, tableName, columnName, ifTableExists, ifExists); + } else if (readIf("SELECTIVITY")) { + AlterTableAlterColumn command = new AlterTableAlterColumn( + session, schema); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_SELECTIVITY); + command.setOldColumn(column); + command.setSelectivity(readExpression()); + return command; + } + Prepared command = parseAlterTableAlterColumnIdentity(schema, tableName, ifTableExists, column); + if (command != null) { + return command; + } + if (readIf(SET)) { + return parseAlterTableAlterColumnSet(schema, tableName, ifTableExists, ifExists, columnName, column); + } + return parseAlterTableAlterColumnType(schema, tableName, columnName, ifTableExists, ifExists, true); + } + + private Prepared getAlterTableAlterColumnDropDefaultExpression(Schema schema, String tableName, + boolean ifTableExists, Column column, int type) { + AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setOldColumn(column); + command.setType(type); + command.setDefaultExpression(null); + return command; + } + + private Prepared parseAlterTableAlterColumnIdentity(Schema schema, String tableName, boolean ifTableExists, + Column column) { + int index = tokenIndex; + Boolean always = null; + if (readIf(SET) && readIf("GENERATED")) { + if (readIf("ALWAYS")) { + always = true; + } else { + read("BY"); + read(DEFAULT); + always = false; + } + } else { + setTokenIndex(index); + } + SequenceOptions options = new SequenceOptions(); + if (!parseSequenceOptions(options, null, false, true) && always == null) { + return null; + } + if (column == null) { + return new NoOperation(session); + } + if (!column.isIdentity()) { + AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema); + parseAlterColumnUsingIf(command); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE); + command.setOldColumn(column); + Column newColumn = column.getClone(); + newColumn.setIdentityOptions(options, always != null && always); + command.setNewColumn(newColumn); + return command; + } + AlterSequence command = new AlterSequence(session, schema); + command.setColumn(column, always); + command.setOptions(options); + return commandIfTableExists(schema, tableName, ifTableExists, command); + } + + private Prepared parseAlterTableAlterColumnSet(Schema schema, String tableName, boolean ifTableExists, + boolean ifExists, String columnName, Column column) { + if (readIf("DATA")) { + read("TYPE"); + return parseAlterTableAlterColumnDataType(schema, tableName, columnName, ifTableExists, ifExists); + } + AlterTableAlterColumn command = new AlterTableAlterColumn( + session, schema); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setOldColumn(column); + NullConstraintType nullConstraint = parseNotNullConstraint(); + switch (nullConstraint) { + case NULL_IS_ALLOWED: + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_NOT_NULL); + break; + case NULL_IS_NOT_ALLOWED: + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_NOT_NULL); + break; + case NO_NULL_CONSTRAINT_FOUND: + if (readIf(DEFAULT)) { + if (readIf(ON)) { + read(NULL); + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT_ON_NULL); + command.setBooleanFlag(true); + break; } - break; - case NO_NULL_CONSTRAINT_FOUND: - command = parseAlterTableAlterColumnType(schema, tableName, columnName, ifTableExists); - break; - default: - throw DbException.get(ErrorCode.UNKNOWN_MODE_1, - "Internal Error - unhandled case: " + nullConstraint.name()); + Expression defaultExpression = readExpression(); + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT); + command.setDefaultExpression(defaultExpression); + } else if (readIf(ON)) { + read("UPDATE"); + Expression onUpdateExpression = readExpression(); + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_ON_UPDATE); + command.setDefaultExpression(onUpdateExpression); + } else if (readIf("INVISIBLE")) { + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_VISIBILITY); + command.setBooleanFlag(false); + } else if (readIf("VISIBLE")) { + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_VISIBILITY); + command.setBooleanFlag(true); } - if(hasOpeningBracket) { - read(CLOSE_PAREN); + break; + default: + throw DbException.get(ErrorCode.UNKNOWN_MODE_1, + "Internal Error - unhandled case: " + nullConstraint.name()); + } + return command; + } + + private Prepared parseAlterTableDrop(Schema schema, String tableName, boolean ifTableExists) { + if (readIf(CONSTRAINT)) { + boolean ifExists = readIfExists(false); + String constraintName = readIdentifierWithSchema(schema.getName()); + ifExists = readIfExists(ifExists); + checkSchema(schema); + AlterTableDropConstraint command = new AlterTableDropConstraint(session, getSchema(), ifExists); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setConstraintName(constraintName); + ConstraintActionType dropAction = parseCascadeOrRestrict(); + if (dropAction != null) { + command.setDropAction(dropAction); } return command; - } else if (readIf("ALTER")) { - readIf("COLUMN"); - String columnName = readColumnIdentifier(); - Column column = columnIfTableExists(schema, tableName, columnName, ifTableExists); - if (readIf("RENAME")) { - read("TO"); - AlterTableRenameColumn command = new AlterTableRenameColumn( - session, schema); - command.setTableName(tableName); - command.setIfTableExists(ifTableExists); - command.setOldColumnName(columnName); - String newName = readColumnIdentifier(); - command.setNewColumnName(newName); + } else if (readIf(PRIMARY)) { + read(KEY); + Table table = tableIfTableExists(schema, tableName, ifTableExists); + if (table == null) { + return new NoOperation(session); + } + Index idx = table.getPrimaryKey(); + DropIndex command = new DropIndex(session, schema); + command.setIndexName(idx.getName()); + return command; + } else if (database.getMode().alterTableExtensionsMySQL) { + Prepared command = parseAlterTableDropCompatibility(schema, tableName, ifTableExists); + if (command != null) { return command; - } else if (readIf("DROP")) { - if (readIf("DEFAULT")) { - AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema); - command.setTableName(tableName); - command.setIfTableExists(ifTableExists); - command.setOldColumn(column); - command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT); - command.setDefaultExpression(null); - return command; + } + } + readIf("COLUMN"); + boolean ifExists = readIfExists(false); + ArrayList columnsToRemove = new ArrayList<>(); + Table table = tableIfTableExists(schema, tableName, ifTableExists); + // For Oracle compatibility - open bracket required + boolean openingBracketDetected = readIf(OPEN_PAREN); + do { + String columnName = readIdentifier(); + if (table != null) { + Column column = table.getColumn(columnName, ifExists); + if (column != null) { + columnsToRemove.add(column); } - if (readIf(ON)) { - read("UPDATE"); - AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema); - command.setTableName(tableName); - command.setIfTableExists(ifTableExists); - command.setOldColumn(column); - command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_ON_UPDATE); - command.setDefaultExpression(null); - return command; + } + } while (readIf(COMMA)); + if (openingBracketDetected) { + // For Oracle compatibility - close bracket + read(CLOSE_PAREN); + } + if (table == null || columnsToRemove.isEmpty()) { + return new NoOperation(session); + } + AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema); + command.setType(CommandInterface.ALTER_TABLE_DROP_COLUMN); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setColumnsToRemove(columnsToRemove); + return command; + } + + private Prepared parseAlterTableDropCompatibility(Schema schema, String tableName, boolean ifTableExists) { + if (readIf(FOREIGN)) { + read(KEY); + // For MariaDB + boolean ifExists = readIfExists(false); + String constraintName = readIdentifierWithSchema(schema.getName()); + checkSchema(schema); + AlterTableDropConstraint command = new AlterTableDropConstraint(session, getSchema(), ifExists); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setConstraintName(constraintName); + return command; + } else if (readIf("INDEX")) { + // For MariaDB + boolean ifExists = readIfExists(false); + String indexOrConstraintName = readIdentifierWithSchema(schema.getName()); + if (schema.findIndex(session, indexOrConstraintName) != null) { + DropIndex dropIndexCommand = new DropIndex(session, getSchema()); + dropIndexCommand.setIndexName(indexOrConstraintName); + return commandIfTableExists(schema, tableName, ifTableExists, dropIndexCommand); + } else { + AlterTableDropConstraint dropCommand = new AlterTableDropConstraint(session, getSchema(), ifExists); + dropCommand.setTableName(tableName); + dropCommand.setIfTableExists(ifTableExists); + dropCommand.setConstraintName(indexOrConstraintName); + return dropCommand; + } + } + return null; + } + + private Prepared parseAlterTableRename(Schema schema, String tableName, boolean ifTableExists) { + if (readIf("COLUMN")) { + // PostgreSQL syntax + String columnName = readIdentifier(); + read(TO); + AlterTableRenameColumn command = new AlterTableRenameColumn( + session, schema); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setOldColumnName(columnName); + command.setNewColumnName(readIdentifier()); + return command; + } else if (readIf(CONSTRAINT)) { + String constraintName = readIdentifierWithSchema(schema.getName()); + checkSchema(schema); + read(TO); + AlterTableRenameConstraint command = new AlterTableRenameConstraint(session, schema); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setConstraintName(constraintName); + command.setNewConstraintName(readIdentifier()); + return command; + } else { + read(TO); + String newName = readIdentifierWithSchema(schema.getName()); + checkSchema(schema); + AlterTableRename command = new AlterTableRename(session, + getSchema()); + command.setTableName(tableName); + command.setNewTableName(newName); + command.setIfTableExists(ifTableExists); + command.setHidden(readIf("HIDDEN")); + return command; + } + } + + private Prepared parseAlterTableSet(Schema schema, String tableName, boolean ifTableExists) { + read("REFERENTIAL_INTEGRITY"); + int type = CommandInterface.ALTER_TABLE_SET_REFERENTIAL_INTEGRITY; + boolean value = readBooleanSetting(); + AlterTableSet command = new AlterTableSet(session, + schema, type, value); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + if (readIf(CHECK)) { + command.setCheckExisting(true); + } else if (readIf("NOCHECK")) { + command.setCheckExisting(false); + } + return command; + } + + private Prepared parseAlterTableCompatibility(Schema schema, String tableName, boolean ifTableExists, Mode mode) { + if (mode.alterTableExtensionsMySQL) { + if (readIf("AUTO_INCREMENT")) { + readIf(EQUAL); + Expression restart = readExpression(); + Table table = tableIfTableExists(schema, tableName, ifTableExists); + if (table == null) { + return new NoOperation(session); } - read(NOT); - read(NULL); - AlterTableAlterColumn command = new AlterTableAlterColumn( - session, schema); + Index idx = table.findPrimaryKey(); + if (idx != null) { + for (IndexColumn ic : idx.getIndexColumns()) { + Column column = ic.column; + if (column.isIdentity()) { + AlterSequence command = new AlterSequence(session, schema); + command.setColumn(column, null); + SequenceOptions options = new SequenceOptions(); + options.setRestartValue(restart); + command.setOptions(options); + return command; + } + } + } + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, "AUTO_INCREMENT PRIMARY KEY"); + } else if (readIf("CHANGE")) { + readIf("COLUMN"); + String columnName = readIdentifier(); + String newColumnName = readIdentifier(); + Column column = columnIfTableExists(schema, tableName, columnName, ifTableExists, false); + boolean nullable = column == null ? true : column.isNullable(); + // new column type ignored. RENAME and MODIFY are + // a single command in MySQL but two different commands in H2. + parseColumnForTable(newColumnName, nullable); + AlterTableRenameColumn command = new AlterTableRenameColumn(session, schema); command.setTableName(tableName); command.setIfTableExists(ifTableExists); - command.setOldColumn(column); - command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_NOT_NULL); + command.setOldColumnName(columnName); + command.setNewColumnName(newColumnName); return command; - } else if (readIf("TYPE")) { - // PostgreSQL compatibility - return parseAlterTableAlterColumnDataType(schema, tableName, columnName, ifTableExists); - } else if (readIf("SET")) { - if (readIf("DATA")) { - read("TYPE"); - return parseAlterTableAlterColumnDataType(schema, tableName, columnName, ifTableExists); - } - AlterTableAlterColumn command = new AlterTableAlterColumn( - session, schema); + } else if (readIf("CONVERT")) { + readIf(TO); + readIf("CHARACTER"); + readIf(SET); + readMySQLCharset(); + + if (readIf("COLLATE")) { + readMySQLCharset(); + } + + return new NoOperation(session); + } + } + if (mode.alterTableModifyColumn && readIf("MODIFY")) { + // MySQL compatibility (optional) + readIf("COLUMN"); + // Oracle specifies (but will not require) an opening parenthesis + boolean hasOpeningBracket = readIf(OPEN_PAREN); + String columnName = readIdentifier(); + AlterTableAlterColumn command; + NullConstraintType nullConstraint = parseNotNullConstraint(); + switch (nullConstraint) { + case NULL_IS_ALLOWED: + case NULL_IS_NOT_ALLOWED: + command = new AlterTableAlterColumn(session, schema); command.setTableName(tableName); command.setIfTableExists(ifTableExists); + Column column = columnIfTableExists(schema, tableName, columnName, ifTableExists, false); command.setOldColumn(column); - NullConstraintType nullConstraint = parseNotNullConstraint(); - switch (nullConstraint) { - case NULL_IS_ALLOWED: + if (nullConstraint == NullConstraintType.NULL_IS_ALLOWED) { command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_NOT_NULL); - break; - case NULL_IS_NOT_ALLOWED: + } else { command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_NOT_NULL); - break; - case NO_NULL_CONSTRAINT_FOUND: - if (readIf("DEFAULT")) { - Expression defaultExpression = readExpression(); - command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT); - command.setDefaultExpression(defaultExpression); - } else if (readIf(ON)) { - read("UPDATE"); - Expression onUpdateExpression = readExpression(); - command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_ON_UPDATE); - command.setDefaultExpression(onUpdateExpression); - } else if (readIf("INVISIBLE")) { - command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_VISIBILITY); - command.setVisible(false); - } else if (readIf("VISIBLE")) { - command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_VISIBILITY); - command.setVisible(true); - } - break; - default: - throw DbException.get(ErrorCode.UNKNOWN_MODE_1, - "Internal Error - unhandled case: " + nullConstraint.name()); } - return command; - } else if (readIf("RESTART")) { - readIf(WITH); - Expression start = readExpression(); - AlterSequence command = new AlterSequence(session, schema); - command.setColumn(column); - SequenceOptions options = new SequenceOptions(); - options.setStartValue(start); - command.setOptions(options); - return commandIfTableExists(schema, tableName, ifTableExists, command); - } else if (readIf("SELECTIVITY")) { - AlterTableAlterColumn command = new AlterTableAlterColumn( - session, schema); - command.setTableName(tableName); - command.setIfTableExists(ifTableExists); - command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_SELECTIVITY); - command.setOldColumn(column); - command.setSelectivity(readExpression()); - return command; - } else { - return parseAlterTableAlterColumnType(schema, tableName, columnName, ifTableExists); + break; + case NO_NULL_CONSTRAINT_FOUND: + command = parseAlterTableAlterColumnType(schema, tableName, columnName, ifTableExists, false, + mode.getEnum() != ModeEnum.MySQL); + break; + default: + throw DbException.get(ErrorCode.UNKNOWN_MODE_1, + "Internal Error - unhandled case: " + nullConstraint.name()); + } + if (hasOpeningBracket) { + read(CLOSE_PAREN); } + return command; } throw getSyntaxError(); } @@ -7494,15 +8927,18 @@ private Prepared parseAlterTable() { private Table tableIfTableExists(Schema schema, String tableName, boolean ifTableExists) { Table table = schema.resolveTableOrView(session, tableName); if (table == null && !ifTableExists) { - throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); + throw getTableOrViewNotFoundDbException(schema.getName(), tableName); } return table; } private Column columnIfTableExists(Schema schema, String tableName, - String columnName, boolean ifTableExists) { + String columnName, boolean ifTableExists, boolean ifExists) { Table table = tableIfTableExists(schema, tableName, ifTableExists); - return table == null ? null : table.getColumn(columnName); + if (table == null) { + return null; + } + return table.getColumn(columnName, ifExists); } private Prepared commandIfTableExists(Schema schema, String tableName, @@ -7513,15 +8949,12 @@ private Prepared commandIfTableExists(Schema schema, String tableName, } private AlterTableAlterColumn parseAlterTableAlterColumnType(Schema schema, - String tableName, String columnName, boolean ifTableExists) { - Column oldColumn = columnIfTableExists(schema, tableName, columnName, ifTableExists); + String tableName, String columnName, boolean ifTableExists, boolean ifExists, boolean preserveNotNull) { + Column oldColumn = columnIfTableExists(schema, tableName, columnName, ifTableExists, ifExists); Column newColumn = parseColumnForTable(columnName, - oldColumn == null ? true : oldColumn.isNullable(), true); - if (readIf(CHECK)) { - Expression expr = readExpression(); - newColumn.addCheckConstraint(session, expr); - } + !preserveNotNull || oldColumn == null || oldColumn.isNullable()); AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema); + parseAlterColumnUsingIf(command); command.setTableName(tableName); command.setIfTableExists(ifTableExists); command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE); @@ -7531,9 +8964,9 @@ private AlterTableAlterColumn parseAlterTableAlterColumnType(Schema schema, } private AlterTableAlterColumn parseAlterTableAlterColumnDataType(Schema schema, - String tableName, String columnName, boolean ifTableExists) { - Column oldColumn = columnIfTableExists(schema, tableName, columnName, ifTableExists); - Column newColumn = parseColumnWithType(columnName, true); + String tableName, String columnName, boolean ifTableExists, boolean ifExists) { + Column oldColumn = columnIfTableExists(schema, tableName, columnName, ifTableExists, ifExists); + Column newColumn = parseColumnWithType(columnName); if (oldColumn != null) { if (!oldColumn.isNullable()) { newColumn.setNullable(false); @@ -7543,15 +8976,20 @@ private AlterTableAlterColumn parseAlterTableAlterColumnDataType(Schema schema, } Expression e = oldColumn.getDefaultExpression(); if (e != null) { - newColumn.setDefaultExpression(session, e); + if (oldColumn.isGenerated()) { + newColumn.setGeneratedExpression(e); + } else { + newColumn.setDefaultExpression(session, e); + } } e = oldColumn.getOnUpdateExpression(); if (e != null) { newColumn.setOnUpdateExpression(session, e); } - e = oldColumn.getCheckConstraint(session, columnName); - if (e != null) { - newColumn.addCheckConstraint(session, e); + Sequence s = oldColumn.getSequence(); + if (s != null) { + newColumn.setIdentityOptions(new SequenceOptions(s, newColumn.getType()), + oldColumn.isGeneratedAlways()); } String c = oldColumn.getComment(); if (c != null) { @@ -7559,6 +8997,7 @@ private AlterTableAlterColumn parseAlterTableAlterColumnDataType(Schema schema, } } AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema); + parseAlterColumnUsingIf(command); command.setTableName(tableName); command.setIfTableExists(ifTableExists); command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE); @@ -7579,22 +9018,29 @@ private AlterTableAlterColumn parseAlterTableAddColumn(String tableName, command.setIfNotExists(false); do { parseTableColumnDefinition(command, schema, tableName, false); - } while (readIfMore(true)); + } while (readIfMore()); } else { boolean ifNotExists = readIfNotExists(); command.setIfNotExists(ifNotExists); parseTableColumnDefinition(command, schema, tableName, false); + parseAlterColumnUsingIf(command); } if (readIf("BEFORE")) { - command.setAddBefore(readColumnIdentifier()); + command.setAddBefore(readIdentifier()); } else if (readIf("AFTER")) { - command.setAddAfter(readColumnIdentifier()); + command.setAddAfter(readIdentifier()); } else if (readIf("FIRST")) { command.setAddFirst(); } return command; } + private void parseAlterColumnUsingIf(AlterTableAlterColumn command) { + if (readIf(USING)) { + command.setUsingExpression(readExpression()); + } + } + private ConstraintActionType parseAction() { ConstraintActionType result = parseCascadeOrRestrict(); if (result != null) { @@ -7604,11 +9050,11 @@ private ConstraintActionType parseAction() { read("ACTION"); return ConstraintActionType.RESTRICT; } - read("SET"); + read(SET); if (readIf(NULL)) { return ConstraintActionType.SET_NULL; } - read("DEFAULT"); + read(DEFAULT); return ConstraintActionType.SET_DEFAULT; } @@ -7622,28 +9068,22 @@ private ConstraintActionType parseCascadeOrRestrict() { } } - private DefineCommand parseAlterTableAddConstraintIf(String tableName, - Schema schema, boolean ifTableExists) { + private DefineCommand parseTableConstraintIf(String tableName, Schema schema, boolean ifTableExists) { String constraintName = null, comment = null; boolean ifNotExists = false; - boolean allowIndexDefinition = database.getMode().indexDefinitionInCreateTable; - boolean allowAffinityKey = database.getMode().allowAffinityKey; if (readIf(CONSTRAINT)) { ifNotExists = readIfNotExists(); constraintName = readIdentifierWithSchema(schema.getName()); checkSchema(schema); comment = readCommentIf(); - allowIndexDefinition = true; } - if (readIf(PRIMARY)) { - read("KEY"); - AlterTableAddConstraint command = new AlterTableAddConstraint( - session, schema, ifNotExists); - command.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY); - command.setComment(comment); - command.setConstraintName(constraintName); - command.setTableName(tableName); - command.setIfTableExists(ifTableExists); + AlterTableAddConstraint command; + switch (currentTokenType) { + case PRIMARY: + read(); + read(KEY); + command = new AlterTableAddConstraint(session, schema, + CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY, ifNotExists); if (readIf("HASH")) { command.setPrimaryKeyHash(true); } @@ -7653,66 +9093,41 @@ private DefineCommand parseAlterTableAddConstraintIf(String tableName, String indexName = readIdentifierWithSchema(); command.setIndex(getSchema().findIndex(session, indexName)); } - return command; - } else if (allowIndexDefinition && (isToken("INDEX") || isToken("KEY"))) { - // MySQL - // need to read ahead, as it could be a column name - int start = lastParseIndex; + break; + case UNIQUE: read(); - if (DataType.getTypeByName(currentToken, database.getMode()) != null) { - // known data type - parseIndex = start; - read(); - return null; - } - CreateIndex command = new CreateIndex(session, schema); - command.setComment(comment); - command.setTableName(tableName); - command.setIfTableExists(ifTableExists); - if (!readIf(OPEN_PAREN)) { - command.setIndexName(readUniqueIdentifier()); - read(OPEN_PAREN); - } - command.setIndexColumns(parseIndexColumnList()); // MySQL compatibility - if (readIf("USING")) { - read("BTREE"); + boolean compatibility = database.getMode().indexDefinitionInCreateTable; + if (compatibility) { + if (!readIf(KEY)) { + readIf("INDEX"); + } + if (!isToken(OPEN_PAREN)) { + constraintName = readIdentifier(); + } } - return command; - } else if (allowAffinityKey && readIfAffinity()) { - read("KEY"); read(OPEN_PAREN); - CreateIndex command = createAffinityIndex(schema, tableName, parseIndexColumnList()); - command.setIfTableExists(ifTableExists); - return command; - } - AlterTableAddConstraint command; - if (readIf(CHECK)) { - command = new AlterTableAddConstraint(session, schema, ifNotExists); - command.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_CHECK); - command.setCheckExpression(readExpression()); - } else if (readIf(UNIQUE)) { - readIf("KEY"); - readIf("INDEX"); - command = new AlterTableAddConstraint(session, schema, ifNotExists); - command.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE); - if (!readIf(OPEN_PAREN)) { - constraintName = readUniqueIdentifier(); - read(OPEN_PAREN); + command = new AlterTableAddConstraint(session, schema, CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE, + ifNotExists); + if (readIf(VALUE)) { + read(CLOSE_PAREN); + command.setIndexColumns(null); + } else { + command.setIndexColumns(parseIndexColumnList()); } - command.setIndexColumns(parseIndexColumnList()); if (readIf("INDEX")) { String indexName = readIdentifierWithSchema(); command.setIndex(getSchema().findIndex(session, indexName)); } - // MySQL compatibility - if (readIf("USING")) { + if (compatibility && readIf(USING)) { read("BTREE"); } - } else if (readIf(FOREIGN)) { - command = new AlterTableAddConstraint(session, schema, ifNotExists); - command.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_REFERENTIAL); - read("KEY"); + break; + case FOREIGN: + read(); + command = new AlterTableAddConstraint(session, schema, + CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_REFERENTIAL, ifNotExists); + read(KEY); read(OPEN_PAREN); command.setIndexColumns(parseIndexColumnList()); if (readIf("INDEX")) { @@ -7721,17 +9136,57 @@ private DefineCommand parseAlterTableAddConstraintIf(String tableName, } read("REFERENCES"); parseReferences(command, schema, tableName); - } else { - if (constraintName != null) { + break; + case CHECK: + read(); + command = new AlterTableAddConstraint(session, schema, CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_CHECK, + ifNotExists); + command.setCheckExpression(readExpression()); + break; + default: + if (constraintName == null) { + Mode mode = database.getMode(); + if (mode.indexDefinitionInCreateTable) { + int start = tokenIndex; + if (readIf(KEY) || readIf("INDEX")) { + // MySQL + // need to read ahead, as it could be a column name + if (DataType.getTypeByName(currentToken, mode) == null) { + CreateIndex createIndex = new CreateIndex(session, schema); + createIndex.setComment(comment); + createIndex.setTableName(tableName); + createIndex.setIfTableExists(ifTableExists); + if (!readIf(OPEN_PAREN)) { + createIndex.setIndexName(readIdentifier()); + read(OPEN_PAREN); + } + createIndex.setIndexColumns(parseIndexColumnList()); + // MySQL compatibility + if (readIf(USING)) { + read("BTREE"); + } + return createIndex; + } else { + // known data type + setTokenIndex(start); + } + } + } + return null; + } else { + if (expectedList != null) { + addMultipleExpected(PRIMARY, UNIQUE, FOREIGN, CHECK); + } throw getSyntaxError(); } - return null; } - if (readIf("NOCHECK")) { - command.setCheckExisting(false); - } else { - readIf(CHECK); - command.setCheckExisting(true); + if (command.getType() != CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY) { + if (readIf("NOCHECK")) { + command.setCheckExisting(false); + } else { + readIf(CHECK); + command.setCheckExisting(true); + } } command.setTableName(tableName); command.setIfTableExists(ifTableExists); @@ -7805,6 +9260,17 @@ private CreateLinkedTable parseCreateLinkedTable(boolean temp, } else if (readIf("READONLY")) { command.setReadOnly(true); } + if (readIf("FETCH_SIZE")) { + command.setFetchSize(readNonNegativeInt()); + } + if(readIf("AUTOCOMMIT")){ + if(readIf("ON")) { + command.setAutoCommit(true); + } + else if(readIf("OFF")){ + command.setAutoCommit(false); + } + } return command; } @@ -7830,49 +9296,18 @@ private CreateTable parseCreateTable(boolean temp, boolean globalTemp, if (!readIf(CLOSE_PAREN)) { do { parseTableColumnDefinition(command, schema, tableName, true); - } while (readIfMore(false)); + } while (readIfMore()); } } - // Allows "COMMENT='comment'" in DDL statements (MySQL syntax) - if (readIf("COMMENT")) { - if (readIf(EQUAL)) { - // read the complete string comment, but nothing with it for now - readString(); - } + if (database.getMode().getEnum() == ModeEnum.MySQL) { + parseCreateTableMySQLTableOptions(command); } if (readIf("ENGINE")) { - if (readIf(EQUAL)) { - // map MySQL engine types onto H2 behavior - String tableEngine = readUniqueIdentifier(); - if ("InnoDb".equalsIgnoreCase(tableEngine)) { - // ok - } else if (!"MyISAM".equalsIgnoreCase(tableEngine)) { - throw DbException.getUnsupportedException(tableEngine); - } - } else { - command.setTableEngine(readUniqueIdentifier()); - } + command.setTableEngine(readIdentifier()); } if (readIf(WITH)) { command.setTableEngineParams(readTableEngineParams()); } - // MySQL compatibility - if (readIf("AUTO_INCREMENT")) { - read(EQUAL); - if (currentTokenType != VALUE || - currentValue.getValueType() != Value.INT) { - throw DbException.getSyntaxError(sqlCommand, parseIndex, - "integer"); - } - read(); - } - readIf("DEFAULT"); - if (readIf("CHARSET")) { - read(EQUAL); - if (!readIf("UTF8")) { - read("UTF8MB4"); - } - } if (temp) { if (readIf(ON)) { read("COMMIT"); @@ -7899,124 +9334,200 @@ private CreateTable parseCreateTable(boolean temp, boolean globalTemp, if (readIf("HIDDEN")) { command.setHidden(true); } - if (readIf("AS")) { - if (readIf("SORTED")) { - command.setSortedInsertMode(true); - } - command.setQuery(parseSelect()); + if (readIf(AS)) { + readIf("SORTED"); + command.setQuery(parseQuery()); if (readIf(WITH)) { command.setWithNoData(readIf("NO")); read("DATA"); } } - // for MySQL compatibility - if (readIf("ROW_FORMAT")) { - if (readIf(EQUAL)) { - readColumnIdentifier(); - } - } return command; } private void parseTableColumnDefinition(CommandWithColumns command, Schema schema, String tableName, boolean forCreateTable) { - DefineCommand c = parseAlterTableAddConstraintIf(tableName, schema, false); + DefineCommand c = parseTableConstraintIf(tableName, schema, false); if (c != null) { command.addConstraintCommand(c); - } else { - String columnName = readColumnIdentifier(); - if (forCreateTable && (currentTokenType == COMMA || currentTokenType == CLOSE_PAREN)) { - command.addColumn(new Column(columnName, TypeInfo.TYPE_UNKNOWN)); - return; - } - Column column = parseColumnForTable(columnName, true, true); - if (column.isAutoIncrement() && column.isPrimaryKey()) { - column.setPrimaryKey(false); - IndexColumn[] cols = { new IndexColumn() }; - cols[0].columnName = column.getName(); - AlterTableAddConstraint pk = new AlterTableAddConstraint( - session, schema, false); - pk.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY); - pk.setTableName(tableName); - pk.setIndexColumns(cols); - command.addConstraintCommand(pk); - } - command.addColumn(column); - String constraintName = null; + return; + } + String columnName = readIdentifier(); + if (forCreateTable && (currentTokenType == COMMA || currentTokenType == CLOSE_PAREN)) { + command.addColumn(new Column(columnName, TypeInfo.TYPE_UNKNOWN)); + return; + } + Column column = parseColumnForTable(columnName, true); + if (column.hasIdentityOptions() && column.isPrimaryKey()) { + command.addConstraintCommand(newPrimaryKeyConstraintCommand(session, schema, tableName, column)); + } + command.addColumn(column); + readColumnConstraints(command, schema, tableName, column); + } + + /** + * Create a new alter table command. + * + * @param session the session + * @param schema the schema + * @param tableName the table + * @param column the column + * @return the command + */ + public static AlterTableAddConstraint newPrimaryKeyConstraintCommand(SessionLocal session, Schema schema, + String tableName, Column column) { + column.setPrimaryKey(false); + AlterTableAddConstraint pk = new AlterTableAddConstraint(session, schema, + CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY, false); + pk.setTableName(tableName); + pk.setIndexColumns(new IndexColumn[] { new IndexColumn(column.getName()) }); + return pk; + } + + private void readColumnConstraints(CommandWithColumns command, Schema schema, String tableName, Column column) { + String comment = column.getComment(); + boolean hasPrimaryKey = false, hasNotNull = false; + NullConstraintType nullType; + Mode mode = database.getMode(); + for (;;) { + String constraintName; if (readIf(CONSTRAINT)) { - constraintName = readColumnIdentifier(); + constraintName = readIdentifier(); + } else if (comment == null && (comment = readCommentIf()) != null) { + // Compatibility: COMMENT may be specified appear after some constraint + column.setComment(comment); + continue; + } else { + constraintName = null; } - // For compatibility with Apache Ignite. - boolean allowAffinityKey = database.getMode().allowAffinityKey; - boolean affinity = allowAffinityKey && readIfAffinity(); - if (readIf(PRIMARY)) { - read("KEY"); + if (!hasPrimaryKey && readIf(PRIMARY)) { + read(KEY); + hasPrimaryKey = true; boolean hash = readIf("HASH"); - IndexColumn[] cols = { new IndexColumn() }; - cols[0].columnName = column.getName(); - AlterTableAddConstraint pk = new AlterTableAddConstraint( - session, schema, false); + AlterTableAddConstraint pk = new AlterTableAddConstraint(session, schema, + CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY, false); pk.setConstraintName(constraintName); pk.setPrimaryKeyHash(hash); - pk.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY); pk.setTableName(tableName); - pk.setIndexColumns(cols); + pk.setIndexColumns(new IndexColumn[] { new IndexColumn(column.getName()) }); command.addConstraintCommand(pk); - if (readIf("AUTO_INCREMENT")) { - parseAutoIncrement(column); - } - if (database.getMode().useIdentityAsAutoIncrement) { - if (readIf(NOT)) { - read(NULL); - column.setNullable(false); - } - if (readIf("IDENTITY")) { - parseAutoIncrement(column); - } - } - if (affinity) { - CreateIndex idx = createAffinityIndex(schema, tableName, cols); - command.addConstraintCommand(idx); - } - } else if (affinity) { - read("KEY"); - IndexColumn[] cols = { new IndexColumn() }; - cols[0].columnName = column.getName(); - CreateIndex idx = createAffinityIndex(schema, tableName, cols); - command.addConstraintCommand(idx); } else if (readIf(UNIQUE)) { - AlterTableAddConstraint unique = new AlterTableAddConstraint( - session, schema, false); + AlterTableAddConstraint unique = new AlterTableAddConstraint(session, schema, + CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE, false); unique.setConstraintName(constraintName); - unique.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE); - IndexColumn[] cols = { new IndexColumn() }; - cols[0].columnName = columnName; - unique.setIndexColumns(cols); + unique.setIndexColumns(new IndexColumn[] { new IndexColumn(column.getName()) }); unique.setTableName(tableName); command.addConstraintCommand(unique); - } - if (NullConstraintType.NULL_IS_NOT_ALLOWED == parseNotNullConstraint()) { - column.setNullable(false); - } - if (readIf(CHECK)) { - Expression expr = readExpression(); - column.addCheckConstraint(session, expr); - } - if (readIf("REFERENCES")) { - AlterTableAddConstraint ref = new AlterTableAddConstraint( - session, schema, false); + } else if (!hasNotNull + && (nullType = parseNotNullConstraint()) != NullConstraintType.NO_NULL_CONSTRAINT_FOUND) { + hasNotNull = true; + if (nullType == NullConstraintType.NULL_IS_NOT_ALLOWED) { + column.setNullable(false); + } else if (nullType == NullConstraintType.NULL_IS_ALLOWED) { + if (column.isIdentity()) { + throw DbException.get(ErrorCode.COLUMN_MUST_NOT_BE_NULLABLE_1, column.getName()); + } + column.setNullable(true); + } + } else if (readIf(CHECK)) { + AlterTableAddConstraint check = new AlterTableAddConstraint(session, schema, + CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_CHECK, false); + check.setConstraintName(constraintName); + check.setTableName(tableName); + check.setCheckExpression(readExpression()); + command.addConstraintCommand(check); + } else if (readIf("REFERENCES")) { + AlterTableAddConstraint ref = new AlterTableAddConstraint(session, schema, + CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_REFERENTIAL, false); ref.setConstraintName(constraintName); - ref.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_REFERENTIAL); - IndexColumn[] cols = { new IndexColumn() }; - cols[0].columnName = columnName; - ref.setIndexColumns(cols); + ref.setIndexColumns(new IndexColumn[] { new IndexColumn(column.getName()) }); ref.setTableName(tableName); parseReferences(ref, schema, tableName); command.addConstraintCommand(ref); + } else if (constraintName == null) { + if (column.getIdentityOptions() != null || !parseCompatibilityIdentity(column, mode)) { + return; + } + } else { + throw getSyntaxError(); + } + } + } + + private boolean parseCompatibilityIdentity(Column column, Mode mode) { + if (mode.autoIncrementClause && readIf("AUTO_INCREMENT")) { + parseCompatibilityIdentityOptions(column); + return true; + } + if (mode.identityClause && readIf("IDENTITY")) { + parseCompatibilityIdentityOptions(column); + return true; + } + return false; + } + + private void parseCreateTableMySQLTableOptions(CreateTable command) { + boolean requireNext = false; + for (;;) { + if (readIf("AUTO_INCREMENT")) { + readIf(EQUAL); + Expression value = readExpression(); + set: { + AlterTableAddConstraint primaryKey = command.getPrimaryKey(); + if (primaryKey != null) { + for (IndexColumn ic : primaryKey.getIndexColumns()) { + String columnName = ic.columnName; + for (Column column : command.getColumns()) { + if (database.equalsIdentifiers(column.getName(), columnName)) { + SequenceOptions options = column.getIdentityOptions(); + if (options != null) { + options.setStartValue(value); + break set; + } + } + } + } + } + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, "AUTO_INCREMENT PRIMARY KEY"); + } + } else if (readIf(DEFAULT)) { + if (readIf("CHARACTER")) { + read(SET); + } else { + readIf("CHARSET"); + readIf("COLLATE"); + } + readMySQLCharset(); + } else if (readIf("CHARACTER")) { + read(SET); + readMySQLCharset(); + } else if (readIf("COLLATE")) { + readMySQLCharset(); + } else if (readIf("CHARSET")) { + readMySQLCharset(); + } else if (readIf("COMMENT")) { + readIf(EQUAL); + command.setComment(readString()); + } else if (readIf("ENGINE")) { + readIf(EQUAL); + readIdentifier(); + } else if (readIf("ROW_FORMAT")) { + readIf(EQUAL); + readIdentifier(); + } else if (requireNext) { + throw getSyntaxError(); + } else { + break; } + requireNext = readIf(COMMA); } } + private void readMySQLCharset() { + readIf(EQUAL); + readIdentifier(); + } + /** * Enumeration describing null constraints */ @@ -8024,33 +9535,40 @@ private enum NullConstraintType { NULL_IS_ALLOWED, NULL_IS_NOT_ALLOWED, NO_NULL_CONSTRAINT_FOUND } + private NullConstraintType parseNotNullConstraint(NullConstraintType nullConstraint) { + if (nullConstraint == NullConstraintType.NO_NULL_CONSTRAINT_FOUND) { + nullConstraint = parseNotNullConstraint(); + } + return nullConstraint; + } + private NullConstraintType parseNotNullConstraint() { - NullConstraintType nullConstraint = NullConstraintType.NO_NULL_CONSTRAINT_FOUND; - if (isToken(NOT) || isToken(NULL)) { - if (readIf(NOT)) { - read(NULL); - nullConstraint = NullConstraintType.NULL_IS_NOT_ALLOWED; - } else { - read(NULL); + NullConstraintType nullConstraint; + if (readIf(NOT)) { + read(NULL); + nullConstraint = NullConstraintType.NULL_IS_NOT_ALLOWED; + } else if (readIf(NULL)) { + nullConstraint = NullConstraintType.NULL_IS_ALLOWED; + } else { + return NullConstraintType.NO_NULL_CONSTRAINT_FOUND; + } + if (database.getMode().getEnum() == ModeEnum.Oracle) { + nullConstraint = parseNotNullCompatibility(nullConstraint); + } + return nullConstraint; + } + + private NullConstraintType parseNotNullCompatibility(NullConstraintType nullConstraint) { + if (readIf("ENABLE")) { + if (!readIf("VALIDATE") && readIf("NOVALIDATE")) { + // Turn off constraint, allow NULLs nullConstraint = NullConstraintType.NULL_IS_ALLOWED; } - if (database.getMode().getEnum() == ModeEnum.Oracle) { - if (readIf("ENABLE")) { - // Leave constraint 'as is' - readIf("VALIDATE"); - // Turn off constraint, allow NULLs - if (readIf("NOVALIDATE")) { - nullConstraint = NullConstraintType.NULL_IS_ALLOWED; - } - } - // Turn off constraint, allow NULLs - if (readIf("DISABLE")) { - nullConstraint = NullConstraintType.NULL_IS_ALLOWED; - // ignore validate - readIf("VALIDATE"); - // ignore novalidate - readIf("NOVALIDATE"); - } + } else if (readIf("DISABLE")) { + // Turn off constraint, allow NULLs + nullConstraint = NullConstraintType.NULL_IS_ALLOWED; + if (!readIf("VALIDATE")) { + readIf("NOVALIDATE"); } } return nullConstraint; @@ -8074,14 +9592,6 @@ private CreateSynonym parseCreateSynonym(boolean orReplace) { return command; } - private CreateIndex createAffinityIndex(Schema schema, String tableName, IndexColumn[] indexColumns) { - CreateIndex idx = new CreateIndex(session, schema); - idx.setTableName(tableName); - idx.setIndexColumns(indexColumns); - idx.setAffinity(true); - return idx; - } - private static int getCompareType(int tokenType) { switch (tokenType) { case EQUAL: @@ -8107,38 +9617,19 @@ private static int getCompareType(int tokenType) { * Add double quotes around an identifier if required. * * @param s the identifier - * @param alwaysQuote quote all identifiers + * @param sqlFlags formatting flags * @return the quoted identifier */ - public static String quoteIdentifier(String s, boolean alwaysQuote) { + public static String quoteIdentifier(String s, int sqlFlags) { if (s == null) { return "\"\""; } - if (!alwaysQuote && ParserUtil.isSimpleIdentifier(s, false, false)) { + if ((sqlFlags & HasSQL.QUOTE_ONLY_WHEN_REQUIRED) != 0 && ParserUtil.isSimpleIdentifier(s, false, false)) { return s; } return StringUtils.quoteIdentifier(s); } - /** - * Add double quotes around an identifier if required and appends it to the - * specified string builder. - * - * @param builder string builder to append to - * @param s the identifier - * @param alwaysQuote quote all identifiers - * @return the specified builder - */ - public static StringBuilder quoteIdentifier(StringBuilder builder, String s, boolean alwaysQuote) { - if (s == null) { - return builder.append("\"\""); - } - if (!alwaysQuote && ParserUtil.isSimpleIdentifier(s, false, false)) { - return builder.append(s); - } - return StringUtils.quoteIdentifier(builder, s); - } - public void setLiteralsChecked(boolean literalsChecked) { this.literalsChecked = literalsChecked; } @@ -8147,8 +9638,8 @@ public void setRightsChecked(boolean rightsChecked) { this.rightsChecked = rightsChecked; } - public void setSuppliedParameterList(ArrayList suppliedParameterList) { - this.suppliedParameterList = suppliedParameterList; + public void setSuppliedParameters(ArrayList suppliedParameters) { + this.suppliedParameters = suppliedParameters; } /** @@ -8159,11 +9650,29 @@ public void setSuppliedParameterList(ArrayList suppliedParameterList) */ public Expression parseExpression(String sql) { parameters = Utils.newSmallArrayList(); - initialize(sql); + initialize(sql, null, false); read(); return readExpression(); } + /** + * Parse a SQL code snippet that represents an expression for a domain constraint. + * + * @param sql the code snippet + * @return the expression object + */ + public Expression parseDomainConstraintExpression(String sql) { + parameters = Utils.newSmallArrayList(); + initialize(sql, null, false); + read(); + try { + parseDomainConstraint = true; + return readExpression(); + } finally { + parseDomainConstraint = false; + } + } + /** * Parse a SQL code snippet that represents a table name. * @@ -8172,13 +9681,69 @@ public Expression parseExpression(String sql) { */ public Table parseTableName(String sql) { parameters = Utils.newSmallArrayList(); - initialize(sql); + initialize(sql, null, false); read(); return readTableOrView(); } + /** + * Parses a list of column names or numbers in parentheses. + * + * @param sql the source SQL + * @param offset the initial offset + * @return the array of column names ({@code String[]}) or numbers + * ({@code int[]}) + * @throws DbException on syntax error + */ + public Object parseColumnList(String sql, int offset) { + initialize(sql, null, true); + for (int i = 0, l = tokens.size(); i < l; i++) { + if (tokens.get(i).start() >= offset) { + setTokenIndex(i); + break; + } + } + read(OPEN_PAREN); + if (readIf(CLOSE_PAREN)) { + return Utils.EMPTY_INT_ARRAY; + } + if (isIdentifier()) { + ArrayList list = Utils.newSmallArrayList(); + do { + if (!isIdentifier()) { + throw getSyntaxError(); + } + list.add(currentToken); + read(); + } while (readIfMore()); + return list.toArray(new String[0]); + } else if (currentTokenType == LITERAL) { + ArrayList list = Utils.newSmallArrayList(); + do { + list.add(readInt()); + } while (readIfMore()); + int count = list.size(); + int[] array = new int[count]; + for (int i = 0; i < count; i++) { + array[i] = list.get(i); + } + return array; + } else { + throw getSyntaxError(); + } + } + + /** + * Returns the last parse index. + * + * @return the last parse index + */ + public int getLastParseIndex() { + return token.start(); + } + @Override public String toString() { - return StringUtils.addAsterisk(sqlCommand, parseIndex); + return StringUtils.addAsterisk(sqlCommand, token.start()); } } diff --git a/h2/src/main/org/h2/command/Prepared.java b/h2/src/main/org/h2/command/Prepared.java index 6e32e7713f..f9a88835d9 100644 --- a/h2/src/main/org/h2/command/Prepared.java +++ b/h2/src/main/org/h2/command/Prepared.java @@ -1,24 +1,25 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; import org.h2.api.DatabaseEventListener; import org.h2.api.ErrorCode; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.DbObject; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.Parameter; import org.h2.message.DbException; import org.h2.message.Trace; import org.h2.result.ResultInterface; import org.h2.table.TableView; -import org.h2.util.MathUtils; -import org.h2.value.Value; +import org.h2.util.HasSQL; /** * A prepared statement. @@ -28,13 +29,18 @@ public abstract class Prepared { /** * The session. */ - protected Session session; + protected SessionLocal session; /** * The SQL string. */ protected String sqlStatement; + /** + * The SQL tokens. + */ + protected ArrayList sqlTokens; + /** * Whether to create a new object (for indexes). */ @@ -73,7 +79,7 @@ public abstract class Prepared { * * @param session the session */ - public Prepared(Session session) { + public Prepared(SessionLocal session) { this.session = session; modificationMetaId = session.getDatabase().getModificationMetaId(); } @@ -173,7 +179,7 @@ protected void checkParameters() { if (persistedObjectId < 0) { // restore original persistedObjectId on Command re-run // i.e. due to concurrent update - persistedObjectId = -persistedObjectId - 1; + persistedObjectId = ~persistedObjectId; } if (parameters != null) { for (Parameter param : parameters) { @@ -213,7 +219,7 @@ public void prepare() { * @return the update count * @throws DbException if it is a query */ - public int update() { + public long update() { throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_QUERY); } @@ -225,7 +231,7 @@ public int update() { * @throws DbException if it is not a query */ @SuppressWarnings("unused") - public ResultInterface query(int maxrows) { + public ResultInterface query(long maxrows) { throw DbException.get(ErrorCode.METHOD_ONLY_ALLOWED_FOR_QUERY); } @@ -233,9 +239,11 @@ public ResultInterface query(int maxrows) { * Set the SQL statement. * * @param sql the SQL statement + * @param sqlTokens the SQL tokens */ - public void setSQL(String sql) { + public final void setSQL(String sql, ArrayList sqlTokens) { this.sqlStatement = sql; + this.sqlTokens = sqlTokens; } /** @@ -243,10 +251,19 @@ public void setSQL(String sql) { * * @return the SQL statement */ - public String getSQL() { + public final String getSQL() { return sqlStatement; } + /** + * Get the SQL tokens. + * + * @return the SQL tokens + */ + public final ArrayList getSQLTokens() { + return sqlTokens; + } + /** * Get the object id to use for the database object that is created in this * statement. This id is only set when the object is already persisted. @@ -254,7 +271,7 @@ public String getSQL() { * * @return the object id or 0 if not set */ - protected int getPersistedObjectId() { + public int getPersistedObjectId() { int id = persistedObjectId; return id >= 0 ? id : 0; } @@ -271,19 +288,19 @@ protected int getObjectId() { if (id == 0) { id = session.getDatabase().allocateObjectId(); } else if (id < 0) { - throw DbException.throwInternalError("Prepared.getObjectId() was called before"); + throw DbException.getInternalError("Prepared.getObjectId() was called before"); } - persistedObjectId = -persistedObjectId - 1; // while negative, it can be restored later + persistedObjectId = ~persistedObjectId; // while negative, it can be restored later return id; } /** * Get the SQL statement with the execution plan. * - * @param alwaysQuote quote all identifiers + * @param sqlFlags formatting flags * @return the execution plan */ - public String getPlanSQL(boolean alwaysQuote) { + public String getPlanSQL(int sqlFlags) { return null; } @@ -315,7 +332,7 @@ public void setPersistedObjectId(int i) { * * @param currentSession the new session */ - public void setSession(Session currentSession) { + public void setSession(SessionLocal currentSession) { this.session = currentSession; } @@ -326,19 +343,17 @@ public void setSession(Session currentSession) { * @param startTimeNanos when the statement was started * @param rowCount the query or update row count */ - void trace(long startTimeNanos, int rowCount) { + void trace(long startTimeNanos, long rowCount) { if (session.getTrace().isInfoEnabled() && startTimeNanos > 0) { long deltaTimeNanos = System.nanoTime() - startTimeNanos; String params = Trace.formatParams(parameters); - session.getTrace().infoSQL(sqlStatement, params, rowCount, - deltaTimeNanos / 1000 / 1000); + session.getTrace().infoSQL(sqlStatement, params, rowCount, deltaTimeNanos / 1_000_000L); } // startTime_nanos can be zero for the command that actually turns on // statistics if (session.getDatabase().getQueryStatistics() && startTimeNanos != 0) { long deltaTimeNanos = System.nanoTime() - startTimeNanos; - session.getDatabase().getQueryStatisticsData(). - update(toString(), deltaTimeNanos, rowCount); + session.getDatabase().getQueryStatisticsData().update(toString(), deltaTimeNanos, rowCount); } } @@ -379,11 +394,8 @@ public long getCurrentRowNumber() { */ private void setProgress() { if ((currentRowNumber & 127) == 0) { - session.getDatabase().setProgress( - DatabaseEventListener.STATE_STATEMENT_PROGRESS, - sqlStatement, - // TODO update interface - MathUtils.convertLongToInt(currentRowNumber), 0); + session.getDatabase().setProgress(DatabaseEventListener.STATE_STATEMENT_PROGRESS, sqlStatement, + currentRowNumber, 0L); } } @@ -397,36 +409,14 @@ public String toString() { return sqlStatement; } - /** - * Get the SQL snippet of the value list. - * - * @param values the value list - * @return the SQL snippet - */ - protected static String getSQL(Value[] values) { - StringBuilder builder = new StringBuilder(); - for (int i = 0, l = values.length; i < l; i++) { - if (i > 0) { - builder.append(", "); - } - Value v = values[i]; - if (v != null) { - v.getSQL(builder); - } - } - return builder.toString(); - } - /** * Get the SQL snippet of the expression list. * * @param list the expression list * @return the SQL snippet */ - protected static String getSimpleSQL(Expression[] list) { - StringBuilder builder = new StringBuilder(); - Expression.writeExpressions(builder, list, false); - return builder.toString(); + public static String getSimpleSQL(Expression[] list) { + return Expression.writeExpressions(new StringBuilder(), list, HasSQL.TRACE_SQL_FLAGS).toString(); } /** @@ -437,7 +427,7 @@ protected static String getSimpleSQL(Expression[] list) { * @param values the values of the row * @return the exception */ - protected DbException setRow(DbException e, int rowId, String values) { + protected DbException setRow(DbException e, long rowId, String values) { StringBuilder buff = new StringBuilder(); if (sqlStatement != null) { buff.append(sqlStatement); @@ -470,7 +460,14 @@ public void setCteCleanups(List cteCleanups) { this.cteCleanups = cteCleanups; } - public Session getSession() { + public final SessionLocal getSession() { return session; } + + /** + * Find and collect all DbObjects, this Prepared depends on. + * + * @param dependencies collection of dependencies to populate + */ + public void collectDependencies(HashSet dependencies) {} } diff --git a/h2/src/main/org/h2/command/Token.java b/h2/src/main/org/h2/command/Token.java new file mode 100644 index 0000000000..888a7e776a --- /dev/null +++ b/h2/src/main/org/h2/command/Token.java @@ -0,0 +1,757 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command; + +import static org.h2.util.ParserUtil.IDENTIFIER; +import static org.h2.util.ParserUtil.LAST_KEYWORD; + +import org.h2.engine.CastDataProvider; +import org.h2.message.DbException; +import org.h2.util.StringUtils; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueInteger; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; + +/** + * Token. + */ +public abstract class Token implements Cloneable { + + /** + * Token with parameter. + */ + static final int PARAMETER = LAST_KEYWORD + 1; + + /** + * End of input. + */ + static final int END_OF_INPUT = PARAMETER + 1; + + /** + * Token with literal. + */ + static final int LITERAL = END_OF_INPUT + 1; + + /** + * The token "=". + */ + static final int EQUAL = LITERAL + 1; + + /** + * The token ">=". + */ + static final int BIGGER_EQUAL = EQUAL + 1; + + /** + * The token ">". + */ + static final int BIGGER = BIGGER_EQUAL + 1; + + /** + * The token "<". + */ + static final int SMALLER = BIGGER + 1; + + /** + * The token "<=". + */ + static final int SMALLER_EQUAL = SMALLER + 1; + + /** + * The token "<>" or "!=". + */ + static final int NOT_EQUAL = SMALLER_EQUAL + 1; + + /** + * The token "@". + */ + static final int AT = NOT_EQUAL + 1; + + /** + * The token "-". + */ + static final int MINUS_SIGN = AT + 1; + + /** + * The token "+". + */ + static final int PLUS_SIGN = MINUS_SIGN + 1; + + /** + * The token "||". + */ + static final int CONCATENATION = PLUS_SIGN + 1; + + /** + * The token "(". + */ + static final int OPEN_PAREN = CONCATENATION + 1; + + /** + * The token ")". + */ + static final int CLOSE_PAREN = OPEN_PAREN + 1; + + /** + * The token "&&". + */ + static final int SPATIAL_INTERSECTS = CLOSE_PAREN + 1; + + /** + * The token "*". + */ + static final int ASTERISK = SPATIAL_INTERSECTS + 1; + + /** + * The token ",". + */ + static final int COMMA = ASTERISK + 1; + + /** + * The token ".". + */ + static final int DOT = COMMA + 1; + + /** + * The token "{". + */ + static final int OPEN_BRACE = DOT + 1; + + /** + * The token "}". + */ + static final int CLOSE_BRACE = OPEN_BRACE + 1; + + /** + * The token "/". + */ + static final int SLASH = CLOSE_BRACE + 1; + + /** + * The token "%". + */ + static final int PERCENT = SLASH + 1; + + /** + * The token ";". + */ + static final int SEMICOLON = PERCENT + 1; + + /** + * The token ":". + */ + static final int COLON = SEMICOLON + 1; + + /** + * The token "[". + */ + static final int OPEN_BRACKET = COLON + 1; + + /** + * The token "]". + */ + static final int CLOSE_BRACKET = OPEN_BRACKET + 1; + + /** + * The token "~". + */ + static final int TILDE = CLOSE_BRACKET + 1; + + /** + * The token "::". + */ + static final int COLON_COLON = TILDE + 1; + + /** + * The token ":=". + */ + static final int COLON_EQ = COLON_COLON + 1; + + /** + * The token "!~". + */ + static final int NOT_TILDE = COLON_EQ + 1; + + static final String[] TOKENS = { + // Unused + null, + // KEYWORD + null, + // IDENTIFIER + null, + // ALL + "ALL", + // AND + "AND", + // ANY + "ANY", + // ARRAY + "ARRAY", + // AS + "AS", + // ASYMMETRIC + "ASYMMETRIC", + // AUTHORIZATION + "AUTHORIZATION", + // BETWEEN + "BETWEEN", + // CASE + "CASE", + // CAST + "CAST", + // CHECK + "CHECK", + // CONSTRAINT + "CONSTRAINT", + // CROSS + "CROSS", + // CURRENT_CATALOG + "CURRENT_CATALOG", + // CURRENT_DATE + "CURRENT_DATE", + // CURRENT_PATH + "CURRENT_PATH", + // CURRENT_ROLE + "CURRENT_ROLE", + // CURRENT_SCHEMA + "CURRENT_SCHEMA", + // CURRENT_TIME + "CURRENT_TIME", + // CURRENT_TIMESTAMP + "CURRENT_TIMESTAMP", + // CURRENT_USER + "CURRENT_USER", + // DAY + "DAY", + // DEFAULT + "DEFAULT", + // DISTINCT + "DISTINCT", + // ELSE + "ELSE", + // END + "END", + // EXCEPT + "EXCEPT", + // EXISTS + "EXISTS", + // FALSE + "FALSE", + // FETCH + "FETCH", + // FOR + "FOR", + // FOREIGN + "FOREIGN", + // FROM + "FROM", + // FULL + "FULL", + // GROUP + "GROUP", + // HAVING + "HAVING", + // HOUR + "HOUR", + // IF + "IF", + // IN + "IN", + // INNER + "INNER", + // INTERSECT + "INTERSECT", + // INTERVAL + "INTERVAL", + // IS + "IS", + // JOIN + "JOIN", + // KEY + "KEY", + // LEFT + "LEFT", + // LIKE + "LIKE", + // LIMIT + "LIMIT", + // LOCALTIME + "LOCALTIME", + // LOCALTIMESTAMP + "LOCALTIMESTAMP", + // MINUS + "MINUS", + // MINUTE + "MINUTE", + // MONTH + "MONTH", + // NATURAL + "NATURAL", + // NOT + "NOT", + // NULL + "NULL", + // OFFSET + "OFFSET", + // ON + "ON", + // OR + "OR", + // ORDER + "ORDER", + // PRIMARY + "PRIMARY", + // QUALIFY + "QUALIFY", + // RIGHT + "RIGHT", + // ROW + "ROW", + // ROWNUM + "ROWNUM", + // SECOND + "SECOND", + // SELECT + "SELECT", + // SESSION_USER + "SESSION_USER", + // SET + "SET", + // SOME + "SOME", + // SYMMETRIC + "SYMMETRIC", + // SYSTEM_USER + "SYSTEM_USER", + // TABLE + "TABLE", + // TO + "TO", + // TRUE + "TRUE", + // UESCAPE + "UESCAPE", + // UNION + "UNION", + // UNIQUE + "UNIQUE", + // UNKNOWN + "UNKNOWN", + // USER + "USER", + // USING + "USING", + // VALUE + "VALUE", + // VALUES + "VALUES", + // WHEN + "WHEN", + // WHERE + "WHERE", + // WINDOW + "WINDOW", + // WITH + "WITH", + // YEAR + "YEAR", + // _ROWID_ + "_ROWID_", + // PARAMETER + "?", + // END_OF_INPUT + null, + // LITERAL + null, + // EQUAL + "=", + // BIGGER_EQUAL + ">=", + // BIGGER + ">", + // SMALLER + "<", + // SMALLER_EQUAL + "<=", + // NOT_EQUAL + "<>", + // AT + "@", + // MINUS_SIGN + "-", + // PLUS_SIGN + "+", + // CONCATENATION + "||", + // OPEN_PAREN + "(", + // CLOSE_PAREN + ")", + // SPATIAL_INTERSECTS + "&&", + // ASTERISK + "*", + // COMMA + ",", + // DOT + ".", + // OPEN_BRACE + "{", + // CLOSE_BRACE + "}", + // SLASH + "/", + // PERCENT + "%", + // SEMICOLON + ";", + // COLON + ":", + // OPEN_BRACKET + "[", + // CLOSE_BRACKET + "]", + // TILDE + "~", + // COLON_COLON + "::", + // COLON_EQ + ":=", + // NOT_TILDE + "!~", + // End + }; + + static class IdentifierToken extends Token { + + private String identifier; + + private final boolean quoted; + + private boolean unicode; + + IdentifierToken(int start, String identifier, boolean quoted, boolean unicode) { + super(start); + this.identifier = identifier; + this.quoted = quoted; + this.unicode = unicode; + } + + @Override + int tokenType() { + return IDENTIFIER; + } + + @Override + String asIdentifier() { + return identifier; + } + + @Override + boolean isQuoted() { + return quoted; + } + + @Override + boolean needsUnicodeConversion() { + return unicode; + } + + @Override + void convertUnicode(int uescape) { + if (unicode) { + identifier = StringUtils.decodeUnicodeStringSQL(identifier, uescape); + unicode = false; + } else { + throw DbException.getInternalError(); + } + } + + @Override + public String toString() { + return quoted ? StringUtils.quoteIdentifier(identifier) : identifier; + } + + } + + static final class KeywordToken extends Token { + + private final int type; + + KeywordToken(int start, int type) { + super(start); + this.type = type; + } + + @Override + int tokenType() { + return type; + } + + @Override + String asIdentifier() { + return TOKENS[type]; + } + + @Override + public String toString() { + return TOKENS[type]; + } + + } + + static final class KeywordOrIdentifierToken extends Token { + + private final int type; + + private final String identifier; + + KeywordOrIdentifierToken(int start, int type, String identifier) { + super(start); + this.type = type; + this.identifier = identifier; + } + + @Override + int tokenType() { + return type; + } + + @Override + String asIdentifier() { + return identifier; + } + + @Override + public String toString() { + return identifier; + } + + } + + static abstract class LiteralToken extends Token { + + Value value; + + LiteralToken(int start) { + super(start); + } + + @Override + final int tokenType() { + return LITERAL; + } + + @Override + public final String toString() { + return value(null).getTraceSQL(); + } + + } + + static final class BinaryStringToken extends LiteralToken { + + private final byte[] string; + + BinaryStringToken(int start, byte[] string) { + super(start); + this.string = string; + } + + @Override + Value value(CastDataProvider provider) { + if (value == null) { + value = ValueVarbinary.getNoCopy(string); + } + return value; + } + + } + + static final class CharacterStringToken extends LiteralToken { + + String string; + + private boolean unicode; + + CharacterStringToken(int start, String string, boolean unicode) { + super(start); + this.string = string; + this.unicode = unicode; + } + + @Override + Value value(CastDataProvider provider) { + if (value == null) { + value = ValueVarchar.get(string, provider); + } + return value; + } + + @Override + boolean needsUnicodeConversion() { + return unicode; + } + + @Override + void convertUnicode(int uescape) { + if (unicode) { + string = StringUtils.decodeUnicodeStringSQL(string, uescape); + unicode = false; + } else { + throw DbException.getInternalError(); + } + } + + } + + static final class IntegerToken extends LiteralToken { + + private final int number; + + IntegerToken(int start, int number) { + super(start); + this.number = number; + } + + @Override + Value value(CastDataProvider provider) { + if (value == null) { + value = ValueInteger.get(number); + } + return value; + } + + } + + static final class BigintToken extends LiteralToken { + + private final long number; + + BigintToken(int start, long number) { + super(start); + this.number = number; + } + + @Override + Value value(CastDataProvider provider) { + if (value == null) { + value = ValueBigint.get(number); + } + return value; + } + + } + + static final class ValueToken extends LiteralToken { + + ValueToken(int start, Value value) { + super(start); + this.value = value; + } + + @Override + Value value(CastDataProvider provider) { + return value; + } + + } + + static final class ParameterToken extends Token { + + int index; + + ParameterToken(int start, int index) { + super(start); + this.index = index; + } + + @Override + int tokenType() { + return PARAMETER; + } + + @Override + String asIdentifier() { + return "?"; + } + + int index() { + return index; + } + + @Override + public String toString() { + return index == 0 ? "?" : "?" + index; + } + + } + + static final class EndOfInputToken extends Token { + + EndOfInputToken(int start) { + super(start); + } + + @Override + int tokenType() { + return END_OF_INPUT; + } + + } + + private int start; + + Token(int start) { + this.start = start; + } + + final int start() { + return start; + } + + final void setStart(int offset) { + start = offset; + } + + final void subtractFromStart(int offset) { + start -= offset; + } + + abstract int tokenType(); + + String asIdentifier() { + return null; + } + + boolean isQuoted() { + return false; + } + + Value value(CastDataProvider provider) { + return null; + } + + boolean needsUnicodeConversion() { + return false; + } + + void convertUnicode(int uescape) { + throw DbException.getInternalError(); + } + + @Override + protected Token clone() { + try { + return (Token) super.clone(); + } catch (CloneNotSupportedException e) { + throw DbException.getInternalError(); + } + } + +} diff --git a/h2/src/main/org/h2/command/Tokenizer.java b/h2/src/main/org/h2/command/Tokenizer.java new file mode 100644 index 0000000000..f0c413e546 --- /dev/null +++ b/h2/src/main/org/h2/command/Tokenizer.java @@ -0,0 +1,1400 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command; + +import static org.h2.command.Token.ASTERISK; +import static org.h2.command.Token.AT; +import static org.h2.command.Token.BIGGER; +import static org.h2.command.Token.BIGGER_EQUAL; +import static org.h2.command.Token.CLOSE_BRACE; +import static org.h2.command.Token.CLOSE_BRACKET; +import static org.h2.command.Token.CLOSE_PAREN; +import static org.h2.command.Token.COLON; +import static org.h2.command.Token.COLON_COLON; +import static org.h2.command.Token.COLON_EQ; +import static org.h2.command.Token.COMMA; +import static org.h2.command.Token.CONCATENATION; +import static org.h2.command.Token.DOT; +import static org.h2.command.Token.EQUAL; +import static org.h2.command.Token.MINUS_SIGN; +import static org.h2.command.Token.NOT_EQUAL; +import static org.h2.command.Token.NOT_TILDE; +import static org.h2.command.Token.OPEN_BRACE; +import static org.h2.command.Token.OPEN_BRACKET; +import static org.h2.command.Token.OPEN_PAREN; +import static org.h2.command.Token.PERCENT; +import static org.h2.command.Token.PLUS_SIGN; +import static org.h2.command.Token.SEMICOLON; +import static org.h2.command.Token.SLASH; +import static org.h2.command.Token.SMALLER; +import static org.h2.command.Token.SMALLER_EQUAL; +import static org.h2.command.Token.SPATIAL_INTERSECTS; +import static org.h2.command.Token.TILDE; +import static org.h2.util.ParserUtil.ALL; +import static org.h2.util.ParserUtil.AND; +import static org.h2.util.ParserUtil.ANY; +import static org.h2.util.ParserUtil.ARRAY; +import static org.h2.util.ParserUtil.AS; +import static org.h2.util.ParserUtil.ASYMMETRIC; +import static org.h2.util.ParserUtil.AUTHORIZATION; +import static org.h2.util.ParserUtil.BETWEEN; +import static org.h2.util.ParserUtil.CASE; +import static org.h2.util.ParserUtil.CAST; +import static org.h2.util.ParserUtil.CHECK; +import static org.h2.util.ParserUtil.CONSTRAINT; +import static org.h2.util.ParserUtil.CROSS; +import static org.h2.util.ParserUtil.CURRENT_CATALOG; +import static org.h2.util.ParserUtil.CURRENT_DATE; +import static org.h2.util.ParserUtil.CURRENT_PATH; +import static org.h2.util.ParserUtil.CURRENT_ROLE; +import static org.h2.util.ParserUtil.CURRENT_SCHEMA; +import static org.h2.util.ParserUtil.CURRENT_TIME; +import static org.h2.util.ParserUtil.CURRENT_TIMESTAMP; +import static org.h2.util.ParserUtil.CURRENT_USER; +import static org.h2.util.ParserUtil.DAY; +import static org.h2.util.ParserUtil.DEFAULT; +import static org.h2.util.ParserUtil.DISTINCT; +import static org.h2.util.ParserUtil.ELSE; +import static org.h2.util.ParserUtil.END; +import static org.h2.util.ParserUtil.EXCEPT; +import static org.h2.util.ParserUtil.EXISTS; +import static org.h2.util.ParserUtil.FALSE; +import static org.h2.util.ParserUtil.FETCH; +import static org.h2.util.ParserUtil.FOR; +import static org.h2.util.ParserUtil.FOREIGN; +import static org.h2.util.ParserUtil.FROM; +import static org.h2.util.ParserUtil.FULL; +import static org.h2.util.ParserUtil.GROUP; +import static org.h2.util.ParserUtil.HAVING; +import static org.h2.util.ParserUtil.HOUR; +import static org.h2.util.ParserUtil.IDENTIFIER; +import static org.h2.util.ParserUtil.IF; +import static org.h2.util.ParserUtil.IN; +import static org.h2.util.ParserUtil.INNER; +import static org.h2.util.ParserUtil.INTERSECT; +import static org.h2.util.ParserUtil.INTERVAL; +import static org.h2.util.ParserUtil.IS; +import static org.h2.util.ParserUtil.JOIN; +import static org.h2.util.ParserUtil.KEY; +import static org.h2.util.ParserUtil.LEFT; +import static org.h2.util.ParserUtil.LIKE; +import static org.h2.util.ParserUtil.LIMIT; +import static org.h2.util.ParserUtil.LOCALTIME; +import static org.h2.util.ParserUtil.LOCALTIMESTAMP; +import static org.h2.util.ParserUtil.MINUS; +import static org.h2.util.ParserUtil.MINUTE; +import static org.h2.util.ParserUtil.MONTH; +import static org.h2.util.ParserUtil.NATURAL; +import static org.h2.util.ParserUtil.NOT; +import static org.h2.util.ParserUtil.NULL; +import static org.h2.util.ParserUtil.OFFSET; +import static org.h2.util.ParserUtil.ON; +import static org.h2.util.ParserUtil.OR; +import static org.h2.util.ParserUtil.ORDER; +import static org.h2.util.ParserUtil.PRIMARY; +import static org.h2.util.ParserUtil.QUALIFY; +import static org.h2.util.ParserUtil.RIGHT; +import static org.h2.util.ParserUtil.ROW; +import static org.h2.util.ParserUtil.ROWNUM; +import static org.h2.util.ParserUtil.SECOND; +import static org.h2.util.ParserUtil.SELECT; +import static org.h2.util.ParserUtil.SESSION_USER; +import static org.h2.util.ParserUtil.SET; +import static org.h2.util.ParserUtil.SOME; +import static org.h2.util.ParserUtil.SYMMETRIC; +import static org.h2.util.ParserUtil.SYSTEM_USER; +import static org.h2.util.ParserUtil.TABLE; +import static org.h2.util.ParserUtil.TO; +import static org.h2.util.ParserUtil.TRUE; +import static org.h2.util.ParserUtil.UESCAPE; +import static org.h2.util.ParserUtil.UNION; +import static org.h2.util.ParserUtil.UNIQUE; +import static org.h2.util.ParserUtil.UNKNOWN; +import static org.h2.util.ParserUtil.USER; +import static org.h2.util.ParserUtil.USING; +import static org.h2.util.ParserUtil.VALUE; +import static org.h2.util.ParserUtil.VALUES; +import static org.h2.util.ParserUtil.WHEN; +import static org.h2.util.ParserUtil.WHERE; +import static org.h2.util.ParserUtil.WINDOW; +import static org.h2.util.ParserUtil.WITH; +import static org.h2.util.ParserUtil.YEAR; +import static org.h2.util.ParserUtil._ROWID_; + +import java.io.ByteArrayOutputStream; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.ListIterator; + +import org.h2.api.ErrorCode; +import org.h2.engine.CastDataProvider; +import org.h2.message.DbException; +import org.h2.util.StringUtils; +import org.h2.value.ValueBigint; +import org.h2.value.ValueDecfloat; +import org.h2.value.ValueNumeric; + +/** + * Tokenizer. + */ +public final class Tokenizer { + + private final CastDataProvider provider; + + private final boolean identifiersToUpper; + + private final boolean identifiersToLower; + + private final BitSet nonKeywords; + + Tokenizer(CastDataProvider provider, boolean identifiersToUpper, boolean identifiersToLower, BitSet nonKeywords) { + this.provider = provider; + this.identifiersToUpper = identifiersToUpper; + this.identifiersToLower = identifiersToLower; + this.nonKeywords = nonKeywords; + } + + ArrayList tokenize(String sql, boolean stopOnCloseParen) { + ArrayList tokens = new ArrayList<>(); + int end = sql.length() - 1; + boolean foundUnicode = false; + int lastParameter = 0; + loop: for (int i = 0; i <= end;) { + int tokenStart = i; + char c = sql.charAt(i); + Token token; + switch (c) { + case '!': + if (i < end) { + char c2 = sql.charAt(++i); + if (c2 == '=') { + token = new Token.KeywordToken(tokenStart, NOT_EQUAL); + break; + } + if (c2 == '~') { + token = new Token.KeywordToken(tokenStart, NOT_TILDE); + break; + } + } + throw DbException.getSyntaxError(sql, tokenStart); + case '"': + case '`': + i = readQuotedIdentifier(sql, end, tokenStart, i, c, false, tokens); + continue loop; + case '#': + if (provider.getMode().supportPoundSymbolForColumnNames) { + i = readIdentifier(sql, end, tokenStart, i, c, tokens); + continue loop; + } + throw DbException.getSyntaxError(sql, tokenStart); + case '$': + if (i < end) { + char c2 = sql.charAt(i + 1); + if (c2 == '$') { + i += 2; + int stringEnd = sql.indexOf("$$", i); + if (stringEnd < 0) { + throw DbException.getSyntaxError(sql, tokenStart); + } + token = new Token.CharacterStringToken(tokenStart, sql.substring(i, stringEnd), false); + i = stringEnd + 1; + } else { + i = parseParameterIndex(sql, end, i, tokens); + lastParameter = assignParameterIndex(tokens, lastParameter); + continue loop; + } + } else { + token = new Token.ParameterToken(tokenStart, 0); + } + break; + case '%': + token = new Token.KeywordToken(tokenStart, PERCENT); + break; + case '&': + if (i < end && sql.charAt(i + 1) == '&') { + i++; + token = new Token.KeywordToken(tokenStart, SPATIAL_INTERSECTS); + break; + } + throw DbException.getSyntaxError(sql, tokenStart); + case '\'': + i = readCharacterString(sql, tokenStart, end, i, false, tokens); + continue loop; + case '(': + token = new Token.KeywordToken(tokenStart, OPEN_PAREN); + break; + case ')': + token = new Token.KeywordToken(tokenStart, CLOSE_PAREN); + if (stopOnCloseParen) { + tokens.add(token); + end = skipWhitespace(sql, end, i + 1) - 1; + break loop; + } + break; + case '*': + token = new Token.KeywordToken(tokenStart, ASTERISK); + break; + case '+': + token = new Token.KeywordToken(tokenStart, PLUS_SIGN); + break; + case ',': + token = new Token.KeywordToken(tokenStart, COMMA); + break; + case '-': + if (i < end && sql.charAt(i + 1) == '-') { + i = skipSimpleComment(sql, end, i); + continue loop; + } else { + token = new Token.KeywordToken(tokenStart, MINUS_SIGN); + } + break; + case '.': + if (i < end) { + char c2 = sql.charAt(i + 1); + if (c2 >= '0' && c2 <= '9') { + i = readNumeric(sql, tokenStart, end, i + 1, c2, false, false, tokens); + continue loop; + } + } + token = new Token.KeywordToken(tokenStart, DOT); + break; + case '/': + if (i < end) { + char c2 = sql.charAt(i + 1); + if (c2 == '*') { + i = skipBracketedComment(sql, tokenStart, end, i); + continue loop; + } else if (c2 == '/') { + i = skipSimpleComment(sql, end, i); + continue loop; + } + } + token = new Token.KeywordToken(tokenStart, SLASH); + break; + case '0': + if (i < end) { + char c2 = sql.charAt(i + 1); + if (c2 == 'X' || c2 == 'x') { + i = readHexNumber(sql, provider, tokenStart, end, i + 2, tokens); + continue loop; + } + } + //$FALL-THROUGH$ + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + i = readNumeric(sql, tokenStart, end, i + 1, c, tokens); + continue loop; + case ':': + if (i < end) { + char c2 = sql.charAt(i + 1); + if (c2 == ':') { + i++; + token = new Token.KeywordToken(tokenStart, COLON_COLON); + break; + } else if (c2 == '=') { + i++; + token = new Token.KeywordToken(tokenStart, COLON_EQ); + break; + } + } + token = new Token.KeywordToken(tokenStart, COLON); + break; + case ';': + token = new Token.KeywordToken(tokenStart, SEMICOLON); + break; + case '<': + if (i < end) { + char c2 = sql.charAt(i + 1); + if (c2 == '=') { + i++; + token = new Token.KeywordToken(tokenStart, SMALLER_EQUAL); + break; + } + if (c2 == '>') { + i++; + token = new Token.KeywordToken(tokenStart, NOT_EQUAL); + break; + } + } + token = new Token.KeywordToken(tokenStart, SMALLER); + break; + case '=': + token = new Token.KeywordToken(tokenStart, EQUAL); + break; + case '>': + if (i < end && sql.charAt(i + 1) == '=') { + i++; + token = new Token.KeywordToken(tokenStart, BIGGER_EQUAL); + break; + } + token = new Token.KeywordToken(tokenStart, BIGGER); + break; + case '?': { + if (i + 1 < end && sql.charAt(i + 1) == '?') { + char c3 = sql.charAt(i + 2); + if (c3 == '(') { + i += 2; + token = new Token.KeywordToken(tokenStart, OPEN_BRACKET); + break; + } + if (c3 == ')') { + i += 2; + token = new Token.KeywordToken(tokenStart, CLOSE_BRACKET); + break; + } + } + i = parseParameterIndex(sql, end, i, tokens); + lastParameter = assignParameterIndex(tokens, lastParameter); + continue loop; + } + case '@': + token = new Token.KeywordToken(tokenStart, AT); + break; + case 'A': + case 'a': + i = readA(sql, end, tokenStart, i, tokens); + continue loop; + case 'B': + case 'b': + i = readB(sql, end, tokenStart, i, tokens); + continue loop; + case 'C': + case 'c': + i = readC(sql, end, tokenStart, i, tokens); + continue loop; + case 'D': + case 'd': + i = readD(sql, end, tokenStart, i, tokens); + continue loop; + case 'E': + case 'e': + i = readE(sql, end, tokenStart, i, tokens); + continue loop; + case 'F': + case 'f': + i = readF(sql, end, tokenStart, i, tokens); + continue loop; + case 'G': + case 'g': + i = readG(sql, end, tokenStart, i, tokens); + continue loop; + case 'H': + case 'h': + i = readH(sql, end, tokenStart, i, tokens); + continue loop; + case 'I': + case 'i': + i = readI(sql, end, tokenStart, i, tokens); + continue loop; + case 'J': + case 'j': + i = readJ(sql, end, tokenStart, i, tokens); + continue loop; + case 'K': + case 'k': + i = readK(sql, end, tokenStart, i, tokens); + continue loop; + case 'L': + case 'l': + i = readL(sql, end, tokenStart, i, tokens); + continue loop; + case 'M': + case 'm': + i = readM(sql, end, tokenStart, i, tokens); + continue loop; + case 'N': + case 'n': + if (i < end && sql.charAt(i + 1) == '\'') { + i = readCharacterString(sql, tokenStart, end, i + 1, false, tokens); + } else { + i = readN(sql, end, tokenStart, i, tokens); + } + continue loop; + case 'O': + case 'o': + i = readO(sql, end, tokenStart, i, tokens); + continue loop; + case 'P': + case 'p': + i = readP(sql, end, tokenStart, i, tokens); + continue loop; + case 'Q': + case 'q': + i = readQ(sql, end, tokenStart, i, tokens); + continue loop; + case 'R': + case 'r': + i = readR(sql, end, tokenStart, i, tokens); + continue loop; + case 'S': + case 's': + i = readS(sql, end, tokenStart, i, tokens); + continue loop; + case 'T': + case 't': + i = readT(sql, end, tokenStart, i, tokens); + continue loop; + case 'U': + case 'u': + if (i + 1 < end && sql.charAt(i + 1) == '&') { + char c3 = sql.charAt(i + 2); + if (c3 == '"') { + i = readQuotedIdentifier(sql, end, tokenStart, i + 2, '"', true, tokens); + foundUnicode = true; + continue loop; + } else if (c3 == '\'') { + i = readCharacterString(sql, tokenStart, end, i + 2, true, tokens); + foundUnicode = true; + continue loop; + } + } + i = readU(sql, end, tokenStart, i, tokens); + continue loop; + case 'V': + case 'v': + i = readV(sql, end, tokenStart, i, tokens); + continue loop; + case 'W': + case 'w': + i = readW(sql, end, tokenStart, i, tokens); + continue loop; + case 'X': + case 'x': + if (i < end && sql.charAt(i + 1) == '\'') { + i = readBinaryString(sql, tokenStart, end, i + 1, tokens); + } else { + i = readIdentifier(sql, end, tokenStart, i, c, tokens); + } + continue loop; + case 'Y': + case 'y': + i = readY(sql, end, tokenStart, i, tokens); + continue loop; + case 'Z': + case 'z': + i = readIdentifier(sql, end, tokenStart, i, c, tokens); + continue loop; + case '[': + if (provider.getMode().squareBracketQuotedNames) { + int identifierEnd = sql.indexOf(']', ++i); + if (identifierEnd < 0) { + throw DbException.getSyntaxError(sql, tokenStart); + } + token = new Token.IdentifierToken(tokenStart, sql.substring(i, identifierEnd), true, false); + i = identifierEnd; + } else { + token = new Token.KeywordToken(tokenStart, OPEN_BRACKET); + } + break; + case ']': + token = new Token.KeywordToken(tokenStart, CLOSE_BRACKET); + break; + case '_': + i = read_(sql, end, tokenStart, i, tokens); + continue loop; + case '{': + token = new Token.KeywordToken(tokenStart, OPEN_BRACE); + break; + case '|': + if (i < end && sql.charAt(++i) == '|') { + token = new Token.KeywordToken(tokenStart, CONCATENATION); + break; + } + throw DbException.getSyntaxError(sql, tokenStart); + case '}': + token = new Token.KeywordToken(tokenStart, CLOSE_BRACE); + break; + case '~': + token = new Token.KeywordToken(tokenStart, TILDE); + break; + default: + if (c <= ' ') { + i++; + continue loop; + } else { + int cp = Character.isHighSurrogate(c) ? sql.codePointAt(i++) : c; + if (Character.isSpaceChar(cp)) { + continue loop; + } + if (Character.isJavaIdentifierStart(cp)) { + i = readIdentifier(sql, end, tokenStart, i, cp, tokens); + continue loop; + } + throw DbException.getSyntaxError(sql, tokenStart); + } + } + tokens.add(token); + i++; + } + if (foundUnicode) { + processUescape(sql, tokens); + } + tokens.add(new Token.EndOfInputToken(end + 1)); + return tokens; + } + + private int readIdentifier(String sql, int end, int tokenStart, int i, int cp, ArrayList tokens) { + if (cp >= Character.MIN_SUPPLEMENTARY_CODE_POINT) { + i++; + } + int endIndex = findIdentifierEnd(sql, end, i + Character.charCount(cp) - 1); + tokens.add(new Token.IdentifierToken(tokenStart, extractIdentifier(sql, tokenStart, endIndex), false, false)); + return endIndex; + } + + private int readA(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (length == 2) { + type = (sql.charAt(tokenStart + 1) & 0xffdf) == 'S' ? AS : IDENTIFIER; + } else { + if (eq("ALL", sql, tokenStart, length)) { + type = ALL; + } else if (eq("AND", sql, tokenStart, length)) { + type = AND; + } else if (eq("ANY", sql, tokenStart, length)) { + type = ANY; + } else if (eq("ARRAY", sql, tokenStart, length)) { + type = ARRAY; + } else if (eq("ASYMMETRIC", sql, tokenStart, length)) { + type = ASYMMETRIC; + } else if (eq("AUTHORIZATION", sql, tokenStart, length)) { + type = AUTHORIZATION; + } else { + type = IDENTIFIER; + } + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readB(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type = eq("BETWEEN", sql, tokenStart, length) ? BETWEEN : IDENTIFIER; + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readC(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("CASE", sql, tokenStart, length)) { + type = CASE; + } else if (eq("CAST", sql, tokenStart, length)) { + type = CAST; + } else if (eq("CHECK", sql, tokenStart, length)) { + type = CHECK; + } else if (eq("CONSTRAINT", sql, tokenStart, length)) { + type = CONSTRAINT; + } else if (eq("CROSS", sql, tokenStart, length)) { + type = CROSS; + } else if (length >= 12 && eq("CURRENT_", sql, tokenStart, 8)) { + type = getTokenTypeCurrent(sql, tokenStart, length); + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private static int getTokenTypeCurrent(String s, int tokenStart, int length) { + tokenStart += 8; + switch (length) { + case 12: + if (eqCurrent("CURRENT_DATE", s, tokenStart, length)) { + return CURRENT_DATE; + } else if (eqCurrent("CURRENT_PATH", s, tokenStart, length)) { + return CURRENT_PATH; + } else if (eqCurrent("CURRENT_ROLE", s, tokenStart, length)) { + return CURRENT_ROLE; + } else if (eqCurrent("CURRENT_TIME", s, tokenStart, length)) { + return CURRENT_TIME; + } else if (eqCurrent("CURRENT_USER", s, tokenStart, length)) { + return CURRENT_USER; + } + break; + case 14: + if (eqCurrent("CURRENT_SCHEMA", s, tokenStart, length)) { + return CURRENT_SCHEMA; + } + break; + case 15: + if (eqCurrent("CURRENT_CATALOG", s, tokenStart, length)) { + return CURRENT_CATALOG; + } + break; + case 17: + if (eqCurrent("CURRENT_TIMESTAMP", s, tokenStart, length)) { + return CURRENT_TIMESTAMP; + } + } + return IDENTIFIER; + } + + private static boolean eqCurrent(String expected, String s, int start, int length) { + for (int i = 8; i < length; i++) { + if (expected.charAt(i) != (s.charAt(start++) & 0xffdf)) { + return false; + } + } + return true; + } + + private int readD(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("DAY", sql, tokenStart, length)) { + type = DAY; + } else if (eq("DEFAULT", sql, tokenStart, length)) { + type = DEFAULT; + } else if (eq("DISTINCT", sql, tokenStart, length)) { + type = DISTINCT; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readE(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("ELSE", sql, tokenStart, length)) { + type = ELSE; + } else if (eq("END", sql, tokenStart, length)) { + type = END; + } else if (eq("EXCEPT", sql, tokenStart, length)) { + type = EXCEPT; + } else if (eq("EXISTS", sql, tokenStart, length)) { + type = EXISTS; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readF(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("FETCH", sql, tokenStart, length)) { + type = FETCH; + } else if (eq("FROM", sql, tokenStart, length)) { + type = FROM; + } else if (eq("FOR", sql, tokenStart, length)) { + type = FOR; + } else if (eq("FOREIGN", sql, tokenStart, length)) { + type = FOREIGN; + } else if (eq("FULL", sql, tokenStart, length)) { + type = FULL; + } else if (eq("FALSE", sql, tokenStart, length)) { + type = FALSE; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readG(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type = eq("GROUP", sql, tokenStart, length) ? GROUP : IDENTIFIER; + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readH(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("HAVING", sql, tokenStart, length)) { + type = HAVING; + } else if (eq("HOUR", sql, tokenStart, length)) { + type = HOUR; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readI(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (length == 2) { + switch ((sql.charAt(tokenStart + 1) & 0xffdf)) { + case 'F': + type = IF; + break; + case 'N': + type = IN; + break; + case 'S': + type = IS; + break; + default: + type = IDENTIFIER; + } + } else { + if (eq("INNER", sql, tokenStart, length)) { + type = INNER; + } else if (eq("INTERSECT", sql, tokenStart, length)) { + type = INTERSECT; + } else if (eq("INTERVAL", sql, tokenStart, length)) { + type = INTERVAL; + } else { + type = IDENTIFIER; + } + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readJ(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type = eq("JOIN", sql, tokenStart, length) ? JOIN : IDENTIFIER; + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readK(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type = eq("KEY", sql, tokenStart, length) ? KEY : IDENTIFIER; + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readL(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("LEFT", sql, tokenStart, length)) { + type = LEFT; + } else if (eq("LIMIT", sql, tokenStart, length)) { + type = provider.getMode().limit ? LIMIT : IDENTIFIER; + } else if (eq("LIKE", sql, tokenStart, length)) { + type = LIKE; + } else if (eq("LOCALTIME", sql, tokenStart, length)) { + type = LOCALTIME; + } else if (eq("LOCALTIMESTAMP", sql, tokenStart, length)) { + type = LOCALTIMESTAMP; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readM(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("MINUS", sql, tokenStart, length)) { + type = provider.getMode().minusIsExcept ? MINUS : IDENTIFIER; + } else if (eq("MINUTE", sql, tokenStart, length)) { + type = MINUTE; + } else if (eq("MONTH", sql, tokenStart, length)) { + type = MONTH; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readN(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("NOT", sql, tokenStart, length)) { + type = NOT; + } else if (eq("NATURAL", sql, tokenStart, length)) { + type = NATURAL; + } else if (eq("NULL", sql, tokenStart, length)) { + type = NULL; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readO(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (length == 2) { + switch ((sql.charAt(tokenStart + 1) & 0xffdf)) { + case 'N': + type = ON; + break; + case 'R': + type = OR; + break; + default: + type = IDENTIFIER; + } + } else { + if (eq("OFFSET", sql, tokenStart, length)) { + type = OFFSET; + } else if (eq("ORDER", sql, tokenStart, length)) { + type = ORDER; + } else { + type = IDENTIFIER; + } + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readP(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type = eq("PRIMARY", sql, tokenStart, length) ? PRIMARY : IDENTIFIER; + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readQ(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type = eq("QUALIFY", sql, tokenStart, length) ? QUALIFY : IDENTIFIER; + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readR(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("RIGHT", sql, tokenStart, length)) { + type = RIGHT; + } else if (eq("ROW", sql, tokenStart, length)) { + type = ROW; + } else if (eq("ROWNUM", sql, tokenStart, length)) { + type = ROWNUM; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readS(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("SECOND", sql, tokenStart, length)) { + type = SECOND; + } else if (eq("SELECT", sql, tokenStart, length)) { + type = SELECT; + } else if (eq("SESSION_USER", sql, tokenStart, length)) { + type = SESSION_USER; + } else if (eq("SET", sql, tokenStart, length)) { + type = SET; + } else if (eq("SOME", sql, tokenStart, length)) { + type = SOME; + } else if (eq("SYMMETRIC", sql, tokenStart, length)) { + type = SYMMETRIC; + } else if (eq("SYSTEM_USER", sql, tokenStart, length)) { + type = SYSTEM_USER; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readT(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (length == 2) { + type = (sql.charAt(tokenStart + 1) & 0xffdf) == 'O' ? TO : IDENTIFIER; + } else { + if (eq("TABLE", sql, tokenStart, length)) { + type = TABLE; + } else if (eq("TRUE", sql, tokenStart, length)) { + type = TRUE; + } else { + type = IDENTIFIER; + } + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readU(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("UESCAPE", sql, tokenStart, length)) { + type = UESCAPE; + } else if (eq("UNION", sql, tokenStart, length)) { + type = UNION; + } else if (eq("UNIQUE", sql, tokenStart, length)) { + type = UNIQUE; + } else if (eq("UNKNOWN", sql, tokenStart, length)) { + type = UNKNOWN; + } else if (eq("USER", sql, tokenStart, length)) { + type = USER; + } else if (eq("USING", sql, tokenStart, length)) { + type = USING; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readV(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("VALUE", sql, tokenStart, length)) { + type = VALUE; + } else if (eq("VALUES", sql, tokenStart, length)) { + type = VALUES; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readW(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("WHEN", sql, tokenStart, length)) { + type = WHEN; + } else if (eq("WHERE", sql, tokenStart, length)) { + type = WHERE; + } else if (eq("WINDOW", sql, tokenStart, length)) { + type = WINDOW; + } else if (eq("WITH", sql, tokenStart, length)) { + type = WITH; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readY(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type = eq("YEAR", sql, tokenStart, length) ? YEAR : IDENTIFIER; + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int read_(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int type = endIndex - tokenStart == 7 && "_ROWID_".regionMatches(true, 1, sql, tokenStart + 1, 6) ? _ROWID_ + : IDENTIFIER; + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readIdentifierOrKeyword(String sql, int tokenStart, ArrayList tokens, int endIndex, int type) { + Token token; + if (type == IDENTIFIER) { + token = new Token.IdentifierToken(tokenStart, extractIdentifier(sql, tokenStart, endIndex), false, false); + } else if (nonKeywords != null && nonKeywords.get(type)) { + token = new Token.KeywordOrIdentifierToken(tokenStart, type, extractIdentifier(sql, tokenStart, endIndex)); + } else { + token = new Token.KeywordToken(tokenStart, type); + } + tokens.add(token); + return endIndex; + } + + private static boolean eq(String expected, String s, int start, int length) { + if (length != expected.length()) { + return false; + } + for (int i = 1; i < length; i++) { + if (expected.charAt(i) != (s.charAt(++start) & 0xffdf)) { + return false; + } + } + return true; + } + + private int findIdentifierEnd(String sql, int end, int i) { + i++; + for (;;) { + int cp; + if (i > end || (!Character.isJavaIdentifierPart(cp = sql.codePointAt(i)) + && (cp != '#' || !provider.getMode().supportPoundSymbolForColumnNames))) { + break; + } + i += Character.charCount(cp); + } + return i; + } + + private String extractIdentifier(String sql, int beginIndex, int endIndex) { + return convertCase(sql.substring(beginIndex, endIndex)); + } + + private int readQuotedIdentifier(String sql, int end, int tokenStart, int i, char c, boolean unicode, + ArrayList tokens) { + int identifierEnd = sql.indexOf(c, ++i); + if (identifierEnd < 0) { + throw DbException.getSyntaxError(sql, tokenStart); + } + String s = sql.substring(i, identifierEnd); + i = identifierEnd + 1; + if (i <= end && sql.charAt(i) == c) { + StringBuilder builder = new StringBuilder(s); + do { + identifierEnd = sql.indexOf(c, i + 1); + if (identifierEnd < 0) { + throw DbException.getSyntaxError(sql, tokenStart); + } + builder.append(sql, i, identifierEnd); + i = identifierEnd + 1; + } while (i <= end && sql.charAt(i) == c); + s = builder.toString(); + } + if (c == '`') { + s = convertCase(s); + } + tokens.add(new Token.IdentifierToken(tokenStart, s, true, unicode)); + return i; + } + + private String convertCase(String s) { + if (identifiersToUpper) { + s = StringUtils.toUpperEnglish(s); + } else if (identifiersToLower) { + s = StringUtils.toLowerEnglish(s); + } + return s; + } + + private static int readBinaryString(String sql, int tokenStart, int end, int i, ArrayList tokens) { + ByteArrayOutputStream result = new ByteArrayOutputStream(); + int stringEnd; + do { + stringEnd = sql.indexOf('\'', ++i); + if (stringEnd < 0 || stringEnd < end && sql.charAt(stringEnd + 1) == '\'') { + throw DbException.getSyntaxError(sql, tokenStart); + } + StringUtils.convertHexWithSpacesToBytes(result, sql, i, stringEnd); + i = skipWhitespace(sql, end, stringEnd + 1); + } while (i <= end && sql.charAt(i) == '\''); + tokens.add(new Token.BinaryStringToken(tokenStart, result.toByteArray())); + return i; + } + + private static int readCharacterString(String sql, int tokenStart, int end, int i, boolean unicode, + ArrayList tokens) { + String s = null; + StringBuilder builder = null; + int stringEnd; + do { + stringEnd = sql.indexOf('\'', ++i); + if (stringEnd < 0) { + throw DbException.getSyntaxError(sql, tokenStart); + } + if (s == null) { + s = sql.substring(i, stringEnd); + } else { + if (builder == null) { + builder = new StringBuilder(s); + } + builder.append(sql, i, stringEnd); + } + i = stringEnd + 1; + if (i <= end && sql.charAt(i) == '\'') { + if (builder == null) { + builder = new StringBuilder(s); + } + do { + stringEnd = sql.indexOf('\'', i + 1); + if (stringEnd < 0) { + throw DbException.getSyntaxError(sql, tokenStart); + } + builder.append(sql, i, stringEnd); + i = stringEnd + 1; + } while (i <= end && sql.charAt(i) == '\''); + } + i = skipWhitespace(sql, end, i); + } while (i <= end && sql.charAt(i) == '\''); + if (builder != null) { + s = builder.toString(); + } + tokens.add(new Token.CharacterStringToken(tokenStart, s, unicode)); + return i; + } + + private static int skipWhitespace(String sql, int end, int i) { + while (i <= end) { + int cp = sql.codePointAt(i); + if (!Character.isWhitespace(cp)) { + if (cp == '/' && i < end) { + char c2 = sql.charAt(i + 1); + if (c2 == '*') { + i = skipBracketedComment(sql, i, end, i); + continue; + } else if (c2 == '/') { + i = skipSimpleComment(sql, end, i); + continue; + } + } + break; + } + i += Character.charCount(cp); + } + return i; + } + + private static int readHexNumber(String sql, CastDataProvider provider, int tokenStart, int end, int i, + ArrayList tokens) { + if (provider.getMode().zeroExLiteralsAreBinaryStrings) { + int start = i; + for (char c; i <= end + && (((c = sql.charAt(i)) >= '0' && c <= '9') || ((c &= 0xffdf) >= 'A' && c <= 'F'));) { + i++; + } + if (i <= end && Character.isJavaIdentifierPart(sql.codePointAt(i))) { + throw DbException.get(ErrorCode.HEX_STRING_WRONG_1, sql.substring(start, i + 1)); + } + tokens.add(new Token.BinaryStringToken(start, StringUtils.convertHexToBytes(sql.substring(start, i)))); + return i; + } else { + if (i > end) { + throw DbException.getSyntaxError(sql, tokenStart, "Hex number"); + } + int start = i; + long number = 0; + char c; + do { + c = sql.charAt(i); + if (c >= '0' && c <= '9') { + number = (number << 4) + c - '0'; + // Convert a-z to A-Z + } else if ((c &= 0xffdf) >= 'A' && c <= 'F') { + number = (number << 4) + c - ('A' - 10); + } else if (i == start) { + throw DbException.getSyntaxError(sql, tokenStart, "Hex number"); + } else { + break; + } + if (number > Integer.MAX_VALUE) { + while (++i <= end + && (((c = sql.charAt(i)) >= '0' && c <= '9') || ((c &= 0xffdf) >= 'A' && c <= 'F'))) { + } + return finishBigInteger(sql, tokenStart, end, i, start, i <= end && c == 'L', 16, tokens); + } + } while (++i <= end); + + boolean bigint = i <= end && c == 'L'; + if (bigint) { + i++; + } + if (i <= end && Character.isJavaIdentifierPart(sql.codePointAt(i))) { + throw DbException.getSyntaxError(sql, tokenStart, "Hex number"); + } + tokens.add(bigint ? new Token.BigintToken(start, number) : new Token.IntegerToken(start, (int) number)); + return i; + } + } + + private static int readNumeric(String sql, int tokenStart, int end, int i, char c, ArrayList tokens) { + long number = c - '0'; + for (; i <= end; i++) { + c = sql.charAt(i); + if (c < '0' || c > '9') { + switch (c) { + case '.': + return readNumeric(sql, tokenStart, end, i, c, false, false, tokens); + case 'E': + case 'e': + return readNumeric(sql, tokenStart, end, i, c, false, true, tokens); + case 'L': + case 'l': + return finishBigInteger(sql, tokenStart, end, i, tokenStart, true, 10, tokens); + } + break; + } + number = number * 10 + (c - '0'); + if (number > Integer.MAX_VALUE) { + return readNumeric(sql, tokenStart, end, i, c, true, false, tokens); + } + } + tokens.add(new Token.IntegerToken(tokenStart, (int) number)); + return i; + } + + private static int readNumeric(String sql, int tokenStart, int end, int i, char c, boolean integer, + boolean approximate, ArrayList tokens) { + if (!approximate) { + while (++i <= end) { + c = sql.charAt(i); + if (c == '.') { + integer = false; + } else if (c < '0' || c > '9') { + break; + } + } + } + if (i <= end && (c == 'E' || c == 'e')) { + integer = false; + approximate = true; + if (i == end) { + throw DbException.getSyntaxError(sql, tokenStart); + } + c = sql.charAt(++i); + if (c == '+' || c == '-') { + if (i == end) { + throw DbException.getSyntaxError(sql, tokenStart); + } + c = sql.charAt(++i); + } + if (c < '0' || c > '9') { + throw DbException.getSyntaxError(sql, tokenStart); + } + while (++i <= end && (c = sql.charAt(i)) >= '0' && c <= '9') { + // go until the first non-number + } + } + if (integer) { + return finishBigInteger(sql, tokenStart, end, i, tokenStart, i < end && c == 'L' || c == 'l', 10, tokens); + } + BigDecimal bd; + String string = sql.substring(tokenStart, i); + try { + bd = new BigDecimal(string); + } catch (NumberFormatException e) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, e, string); + } + tokens.add(new Token.ValueToken(tokenStart, approximate ? ValueDecfloat.get(bd) : ValueNumeric.get(bd))); + return i; + } + + private static int finishBigInteger(String sql, int tokenStart, int end, int i, int start, boolean asBigint, + int radix, ArrayList tokens) { + int endIndex = i; + if (asBigint) { + i++; + } + if (radix == 16 && i <= end && Character.isJavaIdentifierPart(sql.codePointAt(i))) { + throw DbException.getSyntaxError(sql, tokenStart, "Hex number"); + } + BigInteger bigInteger = new BigInteger(sql.substring(start, endIndex), radix); + Token token; + if (bigInteger.compareTo(ValueBigint.MAX_BI) > 0) { + if (asBigint) { + throw DbException.getSyntaxError(sql, tokenStart); + } + token = new Token.ValueToken(tokenStart, ValueNumeric.get(bigInteger)); + } else { + token = new Token.BigintToken(start, bigInteger.longValue()); + } + tokens.add(token); + return i; + } + + private static int skipBracketedComment(String sql, int tokenStart, int end, int i) { + i += 2; + for (int level = 1; level > 0;) { + for (;;) { + if (i >= end) { + throw DbException.getSyntaxError(sql, tokenStart); + } + char c = sql.charAt(i++); + if (c == '*') { + if (sql.charAt(i) == '/') { + level--; + i++; + break; + } + } else if (c == '/' && sql.charAt(i) == '*') { + level++; + i++; + } + } + } + return i; + } + + private static int skipSimpleComment(String sql, int end, int i) { + i += 2; + for (char c; i <= end && (c = sql.charAt(i)) != '\n' && c != '\r'; i++) { + // + } + return i; + } + + private static int parseParameterIndex(String sql, int end, int i, ArrayList tokens) { + int tokenStart = i; + long number = 0; + for (char c; ++i <= end && (c = sql.charAt(i)) >= '0' && c <= '9';) { + number = number * 10 + (c - '0'); + if (number > Integer.MAX_VALUE) { + throw DbException.getInvalidValueException("parameter index", number); + } + } + if (i > tokenStart + 1 && number == 0) { + throw DbException.getInvalidValueException("parameter index", number); + } + tokens.add(new Token.ParameterToken(tokenStart, (int) number)); + return i; + } + + private static int assignParameterIndex(ArrayList tokens, int lastParameter) { + Token.ParameterToken parameter = (Token.ParameterToken) tokens.get(tokens.size() - 1); + if (parameter.index == 0) { + if (lastParameter < 0) { + throw DbException.get(ErrorCode.CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS); + } + parameter.index = ++lastParameter; + } else if (lastParameter > 0) { + throw DbException.get(ErrorCode.CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS); + } else { + lastParameter = -1; + } + return lastParameter; + } + + private static void processUescape(String sql, ArrayList tokens) { + ListIterator i = tokens.listIterator(); + while (i.hasNext()) { + Token token = i.next(); + if (token.needsUnicodeConversion()) { + int uescape = '\\'; + condition: if (i.hasNext()) { + Token t2 = i.next(); + if (t2.tokenType() == UESCAPE) { + i.remove(); + if (i.hasNext()) { + Token t3 = i.next(); + i.remove(); + if (t3 instanceof Token.CharacterStringToken) { + String s = ((Token.CharacterStringToken) t3).string; + if (s.codePointCount(0, s.length()) == 1) { + int escape = s.codePointAt(0); + if (!Character.isWhitespace(escape) && (escape < '0' || escape > '9') + && (escape < 'A' || escape > 'F') && (escape < 'a' || escape > 'f')) { + switch (escape) { + default: + uescape = escape; + break condition; + case '"': + case '\'': + case '+': + } + } + } + } + } + throw DbException.getSyntaxError(sql, t2.start() + 7, "''"); + } + } + token.convertUnicode(uescape); + } + } + } + +} diff --git a/h2/src/main/org/h2/command/ddl/AlterDomain.java b/h2/src/main/org/h2/command/ddl/AlterDomain.java new file mode 100644 index 0000000000..4b96f6828d --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/AlterDomain.java @@ -0,0 +1,111 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import java.util.function.BiPredicate; + +import org.h2.api.ErrorCode; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.schema.Domain; +import org.h2.schema.Schema; +import org.h2.table.Column; +import org.h2.table.Table; + +/** + * The base class for ALTER DOMAIN commands. + */ +public abstract class AlterDomain extends SchemaOwnerCommand { + + /** + * Processes all columns and domains that use the specified domain. + * + * @param session + * the session + * @param domain + * the domain to process + * @param columnProcessor + * column handler + * @param domainProcessor + * domain handler + * @param recompileExpressions + * whether processed expressions need to be recompiled + */ + public static void forAllDependencies(SessionLocal session, Domain domain, + BiPredicate columnProcessor, BiPredicate domainProcessor, + boolean recompileExpressions) { + Database db = session.getDatabase(); + for (Schema schema : db.getAllSchemasNoMeta()) { + for (Domain targetDomain : schema.getAllDomains()) { + if (targetDomain.getDomain() == domain) { + if (domainProcessor == null || domainProcessor.test(domain, targetDomain)) { + if (recompileExpressions) { + domain.prepareExpressions(session); + } + db.updateMeta(session, targetDomain); + } + } + } + for (Table t : schema.getAllTablesAndViews(null)) { + if (forTable(session, domain, columnProcessor, recompileExpressions, t)) { + db.updateMeta(session, t); + } + } + } + for (Table t : session.getLocalTempTables()) { + forTable(session, domain, columnProcessor, recompileExpressions, t); + } + } + + private static boolean forTable(SessionLocal session, Domain domain, BiPredicate columnProcessor, + boolean recompileExpressions, Table t) { + boolean modified = false; + for (Column targetColumn : t.getColumns()) { + if (targetColumn.getDomain() == domain) { + boolean m = columnProcessor == null || columnProcessor.test(domain, targetColumn); + if (m) { + if (recompileExpressions) { + targetColumn.prepareExpressions(session); + } + modified = true; + } + } + } + return modified; + } + + String domainName; + + boolean ifDomainExists; + + AlterDomain(SessionLocal session, Schema schema) { + super(session, schema); + } + + public final void setDomainName(String domainName) { + this.domainName = domainName; + } + + public final void setIfDomainExists(boolean b) { + ifDomainExists = b; + } + + @Override + final long update(Schema schema) { + Domain domain = getSchema().findDomain(domainName); + if (domain == null) { + if (ifDomainExists) { + return 0; + } + throw DbException.get(ErrorCode.DOMAIN_NOT_FOUND_1, domainName); + } + return update(schema, domain); + } + + abstract long update(Schema schema, Domain domain); + +} diff --git a/h2/src/main/org/h2/command/ddl/AlterDomainAddConstraint.java b/h2/src/main/org/h2/command/ddl/AlterDomainAddConstraint.java new file mode 100644 index 0000000000..d8b8bcef52 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/AlterDomainAddConstraint.java @@ -0,0 +1,105 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.constraint.ConstraintDomain; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.schema.Domain; +import org.h2.schema.Schema; + +/** + * This class represents the statement ALTER DOMAIN ADD CONSTRAINT + */ +public class AlterDomainAddConstraint extends AlterDomain { + + private String constraintName; + private Expression checkExpression; + private String comment; + private boolean checkExisting; + private final boolean ifNotExists; + + public AlterDomainAddConstraint(SessionLocal session, Schema schema, boolean ifNotExists) { + super(session, schema); + this.ifNotExists = ifNotExists; + } + + private String generateConstraintName(Domain domain) { + if (constraintName == null) { + constraintName = getSchema().getUniqueDomainConstraintName(session, domain); + } + return constraintName; + } + + @Override + long update(Schema schema, Domain domain) { + try { + return tryUpdate(schema, domain); + } finally { + getSchema().freeUniqueName(constraintName); + } + } + + /** + * Try to execute the statement. + * + * @param schema the schema + * @param domain the domain + * @return the update count + */ + private int tryUpdate(Schema schema, Domain domain) { + if (constraintName != null && schema.findConstraint(session, constraintName) != null) { + if (ifNotExists) { + return 0; + } + throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, constraintName); + } + Database db = session.getDatabase(); + db.lockMeta(session); + + int id = getObjectId(); + String name = generateConstraintName(domain); + ConstraintDomain constraint = new ConstraintDomain(schema, id, name, domain); + constraint.setExpression(session, checkExpression); + if (checkExisting) { + constraint.checkExistingData(session); + } + constraint.setComment(comment); + db.addSchemaObject(session, constraint); + domain.addConstraint(constraint); + return 0; + } + + public void setConstraintName(String constraintName) { + this.constraintName = constraintName; + } + + public String getConstraintName() { + return constraintName; + } + + @Override + public int getType() { + return CommandInterface.ALTER_DOMAIN_ADD_CONSTRAINT; + } + + public void setCheckExpression(Expression expression) { + this.checkExpression = expression; + } + + public void setComment(String comment) { + this.comment = comment; + } + + public void setCheckExisting(boolean b) { + this.checkExisting = b; + } + +} diff --git a/h2/src/main/org/h2/command/ddl/AlterDomainDropConstraint.java b/h2/src/main/org/h2/command/ddl/AlterDomainDropConstraint.java new file mode 100644 index 0000000000..df9efaa5a8 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/AlterDomainDropConstraint.java @@ -0,0 +1,54 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.constraint.Constraint; +import org.h2.constraint.Constraint.Type; +import org.h2.constraint.ConstraintDomain; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.schema.Domain; +import org.h2.schema.Schema; + +/** + * This class represents the statement ALTER DOMAIN DROP CONSTRAINT + */ +public class AlterDomainDropConstraint extends AlterDomain { + + private String constraintName; + private final boolean ifConstraintExists; + + public AlterDomainDropConstraint(SessionLocal session, Schema schema, boolean ifConstraintExists) { + super(session, schema); + this.ifConstraintExists = ifConstraintExists; + } + + public void setConstraintName(String string) { + constraintName = string; + } + + @Override + long update(Schema schema, Domain domain) { + Constraint constraint = schema.findConstraint(session, constraintName); + if (constraint == null || constraint.getConstraintType() != Type.DOMAIN + || ((ConstraintDomain) constraint).getDomain() != domain) { + if (!ifConstraintExists) { + throw DbException.get(ErrorCode.CONSTRAINT_NOT_FOUND_1, constraintName); + } + } else { + session.getDatabase().removeSchemaObject(session, constraint); + } + return 0; + } + + @Override + public int getType() { + return CommandInterface.ALTER_DOMAIN_DROP_CONSTRAINT; + } + +} diff --git a/h2/src/main/org/h2/command/ddl/AlterDomainExpressions.java b/h2/src/main/org/h2/command/ddl/AlterDomainExpressions.java new file mode 100644 index 0000000000..a5d519e379 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/AlterDomainExpressions.java @@ -0,0 +1,92 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.command.CommandInterface; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.schema.Domain; +import org.h2.schema.Schema; +import org.h2.table.Column; +import org.h2.table.ColumnTemplate; + +/** + * This class represents the statements + * ALTER DOMAIN SET DEFAULT + * ALTER DOMAIN DROP DEFAULT + * ALTER DOMAIN SET ON UPDATE + * ALTER DOMAIN DROP ON UPDATE + */ +public class AlterDomainExpressions extends AlterDomain { + + private final int type; + + private Expression expression; + + public AlterDomainExpressions(SessionLocal session, Schema schema, int type) { + super(session, schema); + this.type = type; + } + + public void setExpression(Expression expression) { + this.expression = expression; + } + + @Override + long update(Schema schema, Domain domain) { + switch (type) { + case CommandInterface.ALTER_DOMAIN_DEFAULT: + domain.setDefaultExpression(session, expression); + break; + case CommandInterface.ALTER_DOMAIN_ON_UPDATE: + domain.setOnUpdateExpression(session, expression); + break; + default: + throw DbException.getInternalError("type=" + type); + } + if (expression != null) { + forAllDependencies(session, domain, this::copyColumn, this::copyDomain, true); + } + session.getDatabase().updateMeta(session, domain); + return 0; + } + + private boolean copyColumn(Domain domain, Column targetColumn) { + return copyExpressions(session, domain, targetColumn); + } + + private boolean copyDomain(Domain domain, Domain targetDomain) { + return copyExpressions(session, domain, targetDomain); + } + + private boolean copyExpressions(SessionLocal session, Domain domain, ColumnTemplate targetColumn) { + switch (type) { + case CommandInterface.ALTER_DOMAIN_DEFAULT: { + Expression e = domain.getDefaultExpression(); + if (e != null && targetColumn.getDefaultExpression() == null) { + targetColumn.setDefaultExpression(session, e); + return true; + } + break; + } + case CommandInterface.ALTER_DOMAIN_ON_UPDATE: { + Expression e = domain.getOnUpdateExpression(); + if (e != null && targetColumn.getOnUpdateExpression() == null) { + targetColumn.setOnUpdateExpression(session, e); + return true; + } + } + } + return false; + } + + @Override + public int getType() { + return type; + } + +} diff --git a/h2/src/main/org/h2/command/ddl/AlterDomainRename.java b/h2/src/main/org/h2/command/ddl/AlterDomainRename.java new file mode 100644 index 0000000000..f0b65e9705 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/AlterDomainRename.java @@ -0,0 +1,52 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.schema.Domain; +import org.h2.schema.Schema; + +/** + * This class represents the statement + * ALTER DOMAIN RENAME + */ +public class AlterDomainRename extends AlterDomain { + + private String newDomainName; + + public AlterDomainRename(SessionLocal session, Schema schema) { + super(session, schema); + } + + public void setNewDomainName(String name) { + newDomainName = name; + } + + @Override + long update(Schema schema, Domain domain) { + Domain d = schema.findDomain(newDomainName); + if (d != null) { + if (domain != d) { + throw DbException.get(ErrorCode.DOMAIN_ALREADY_EXISTS_1, newDomainName); + } + if (newDomainName.equals(domain.getName())) { + return 0; + } + } + session.getDatabase().renameSchemaObject(session, domain, newDomainName); + forAllDependencies(session, domain, null, null, false); + return 0; + } + + @Override + public int getType() { + return CommandInterface.ALTER_DOMAIN_RENAME; + } + +} diff --git a/h2/src/main/org/h2/command/ddl/AlterDomainRenameConstraint.java b/h2/src/main/org/h2/command/ddl/AlterDomainRenameConstraint.java new file mode 100644 index 0000000000..3f4cfbad23 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/AlterDomainRenameConstraint.java @@ -0,0 +1,59 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.constraint.Constraint; +import org.h2.constraint.Constraint.Type; +import org.h2.constraint.ConstraintDomain; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.schema.Domain; +import org.h2.schema.Schema; + +/** + * This class represents the statement + * ALTER DOMAIN RENAME CONSTRAINT + */ +public class AlterDomainRenameConstraint extends AlterDomain { + + private String constraintName; + private String newConstraintName; + + public AlterDomainRenameConstraint(SessionLocal session, Schema schema) { + super(session, schema); + } + + public void setConstraintName(String string) { + constraintName = string; + } + + public void setNewConstraintName(String newName) { + this.newConstraintName = newName; + } + + @Override + long update(Schema schema, Domain domain) { + Constraint constraint = getSchema().findConstraint(session, constraintName); + if (constraint == null || constraint.getConstraintType() != Type.DOMAIN + || ((ConstraintDomain) constraint).getDomain() != domain) { + throw DbException.get(ErrorCode.CONSTRAINT_NOT_FOUND_1, constraintName); + } + if (getSchema().findConstraint(session, newConstraintName) != null + || newConstraintName.equals(constraintName)) { + throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, newConstraintName); + } + session.getDatabase().renameSchemaObject(session, constraint, newConstraintName); + return 0; + } + + @Override + public int getType() { + return CommandInterface.ALTER_DOMAIN_RENAME_CONSTRAINT; + } + +} diff --git a/h2/src/main/org/h2/command/ddl/AlterIndexRename.java b/h2/src/main/org/h2/command/ddl/AlterIndexRename.java index 7ffb35304d..a09d820ce2 100644 --- a/h2/src/main/org/h2/command/ddl/AlterIndexRename.java +++ b/h2/src/main/org/h2/command/ddl/AlterIndexRename.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -9,7 +9,7 @@ import org.h2.command.CommandInterface; import org.h2.engine.Database; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.index.Index; import org.h2.message.DbException; import org.h2.schema.Schema; @@ -25,7 +25,7 @@ public class AlterIndexRename extends DefineCommand { private String oldIndexName; private String newIndexName; - public AlterIndexRename(Session session) { + public AlterIndexRename(SessionLocal session) { super(session); } @@ -46,8 +46,7 @@ public void setNewName(String name) { } @Override - public int update() { - session.commit(true); + public long update() { Database db = session.getDatabase(); Index oldIndex = oldSchema.findIndex(session, oldIndexName); if (oldIndex == null) { @@ -62,7 +61,7 @@ public int update() { throw DbException.get(ErrorCode.INDEX_ALREADY_EXISTS_1, newIndexName); } - session.getUser().checkRight(oldIndex.getTable(), Right.ALL); + session.getUser().checkTableRight(oldIndex.getTable(), Right.SCHEMA_OWNER); db.renameSchemaObject(session, oldIndex, newIndexName); return 0; } diff --git a/h2/src/main/org/h2/command/ddl/AlterSchemaRename.java b/h2/src/main/org/h2/command/ddl/AlterSchemaRename.java index 0328fb8972..3ce0b0fb3b 100644 --- a/h2/src/main/org/h2/command/ddl/AlterSchemaRename.java +++ b/h2/src/main/org/h2/command/ddl/AlterSchemaRename.java @@ -1,20 +1,19 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; +import java.util.ArrayList; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.schema.SchemaObject; -import java.util.ArrayList; - /** * This class represents the statement * ALTER SCHEMA RENAME @@ -24,7 +23,7 @@ public class AlterSchemaRename extends DefineCommand { private Schema oldSchema; private String newSchemaName; - public AlterSchemaRename(Session session) { + public AlterSchemaRename(SessionLocal session) { super(session); } @@ -37,23 +36,23 @@ public void setNewName(String name) { } @Override - public int update() { - session.commit(true); + public long update() { + session.getUser().checkSchemaAdmin(); Database db = session.getDatabase(); if (!oldSchema.canDrop()) { - throw DbException.get(ErrorCode.SCHEMA_CAN_NOT_BE_DROPPED_1, - oldSchema.getName()); + throw DbException.get(ErrorCode.SCHEMA_CAN_NOT_BE_DROPPED_1, oldSchema.getName()); } - if (db.findSchema(newSchemaName) != null || - newSchemaName.equals(oldSchema.getName())) { - throw DbException.get(ErrorCode.SCHEMA_ALREADY_EXISTS_1, - newSchemaName); + if (db.findSchema(newSchemaName) != null || newSchemaName.equals(oldSchema.getName())) { + throw DbException.get(ErrorCode.SCHEMA_ALREADY_EXISTS_1, newSchemaName); } - session.getUser().checkSchemaAdmin(); db.renameDatabaseObject(session, oldSchema, newSchemaName); - ArrayList all = db.getAllSchemaObjects(); - for (SchemaObject schemaObject : all) { - db.updateMeta(session, schemaObject); + ArrayList all = new ArrayList<>(); + for (Schema schema : db.getAllSchemas()) { + schema.getAll(all); + for (SchemaObject schemaObject : all) { + db.updateMeta(session, schemaObject); + } + all.clear(); } return 0; } diff --git a/h2/src/main/org/h2/command/ddl/AlterSequence.java b/h2/src/main/org/h2/command/ddl/AlterSequence.java index 9b251e785d..706672a7c1 100644 --- a/h2/src/main/org/h2/command/ddl/AlterSequence.java +++ b/h2/src/main/org/h2/command/ddl/AlterSequence.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,21 +8,22 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.schema.Sequence; import org.h2.table.Column; -import org.h2.table.Table; /** * This class represents the statement ALTER SEQUENCE. */ -public class AlterSequence extends SchemaCommand { +public class AlterSequence extends SchemaOwnerCommand { private boolean ifExists; - private Table table; + private Column column; + + private Boolean always; private String sequenceName; @@ -30,8 +31,9 @@ public class AlterSequence extends SchemaCommand { private SequenceOptions options; - public AlterSequence(Session session, Schema schema) { + public AlterSequence(SessionLocal session, Schema schema) { super(session, schema); + transactional = true; } public void setIfExists(boolean b) { @@ -51,18 +53,26 @@ public boolean isTransactional() { return true; } - public void setColumn(Column column) { - table = column.getTable(); + /** + * Set the column + * + * @param column the column + * @param always whether value should be always generated, or null if "set + * generated is not specified + */ + public void setColumn(Column column, Boolean always) { + this.column = column; + this.always = always; sequence = column.getSequence(); if (sequence == null && !ifExists) { - throw DbException.get(ErrorCode.SEQUENCE_NOT_FOUND_1, column.getSQL(false)); + throw DbException.get(ErrorCode.SEQUENCE_NOT_FOUND_1, column.getTraceSQL()); } } @Override - public int update() { + long update(Schema schema) { if (sequence == null) { - sequence = getSchema().findSequence(sequenceName); + sequence = schema.findSequence(sequenceName); if (sequence == null) { if (!ifExists) { throw DbException.get(ErrorCode.SEQUENCE_NOT_FOUND_1, sequenceName); @@ -70,22 +80,21 @@ public int update() { return 0; } } - if (table != null) { - session.getUser().checkRight(table, Right.ALL); - } - Boolean cycle = options.getCycle(); - if (cycle != null) { - sequence.setCycle(cycle); - } - Long cache = options.getCacheSize(session); - if (cache != null) { - sequence.setCacheSize(cache); - } - if (options.isRangeSet()) { - sequence.modify(options.getStartValue(session), options.getMinValue(sequence, session), - options.getMaxValue(sequence, session), options.getIncrement(session)); + if (column != null) { + session.getUser().checkTableRight(column.getTable(), Right.SCHEMA_OWNER); } + options.setDataType(sequence.getDataType()); + Long startValue = options.getStartValue(session); + sequence.modify( + options.getRestartValue(session, startValue != null ? startValue : sequence.getStartValue()), + startValue, + options.getMinValue(sequence, session), options.getMaxValue(sequence, session), + options.getIncrement(session), options.getCycle(), options.getCacheSize(session)); sequence.flush(session); + if (column != null && always != null) { + column.setSequence(sequence, always); + session.getDatabase().updateMeta(session, column.getTable()); + } return 0; } diff --git a/h2/src/main/org/h2/command/ddl/AlterTable.java b/h2/src/main/org/h2/command/ddl/AlterTable.java new file mode 100644 index 0000000000..2cfbd7ff85 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/AlterTable.java @@ -0,0 +1,51 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.api.ErrorCode; +import org.h2.engine.Right; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.schema.Schema; +import org.h2.table.Table; + +/** + * The base class for ALTER TABLE commands. + */ +public abstract class AlterTable extends SchemaCommand { + + String tableName; + + boolean ifTableExists; + + AlterTable(SessionLocal session, Schema schema) { + super(session, schema); + } + + public final void setTableName(String tableName) { + this.tableName = tableName; + } + + public final void setIfTableExists(boolean b) { + ifTableExists = b; + } + + @Override + public final long update() { + Table table = getSchema().findTableOrView(session, tableName); + if (table == null) { + if (ifTableExists) { + return 0; + } + throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); + } + session.getUser().checkTableRight(table, Right.SCHEMA_OWNER); + return update(table); + } + + abstract long update(Table table); + +} diff --git a/h2/src/main/org/h2/command/ddl/AlterTableAddConstraint.java b/h2/src/main/org/h2/command/ddl/AlterTableAddConstraint.java index 71d0fb8f6b..05c425b2e0 100644 --- a/h2/src/main/org/h2/command/ddl/AlterTableAddConstraint.java +++ b/h2/src/main/org/h2/command/ddl/AlterTableAddConstraint.java @@ -1,13 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; @@ -19,7 +17,7 @@ import org.h2.engine.Constants; import org.h2.engine.Database; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.index.Index; import org.h2.index.IndexType; @@ -29,16 +27,17 @@ import org.h2.table.IndexColumn; import org.h2.table.Table; import org.h2.table.TableFilter; +import org.h2.util.HasSQL; +import org.h2.value.DataType; /** * This class represents the statement * ALTER TABLE ADD CONSTRAINT */ -public class AlterTableAddConstraint extends SchemaCommand { +public class AlterTableAddConstraint extends AlterTable { - private int type; + private final int type; private String constraintName; - private String tableName; private IndexColumn[] indexColumns; private ConstraintActionType deleteAction = ConstraintActionType.RESTRICT; private ConstraintActionType updateAction = ConstraintActionType.RESTRICT; @@ -50,34 +49,34 @@ public class AlterTableAddConstraint extends SchemaCommand { private String comment; private boolean checkExisting; private boolean primaryKeyHash; - private boolean ifTableExists; private final boolean ifNotExists; private final ArrayList createdIndexes = new ArrayList<>(); + private ConstraintUnique createdUniqueConstraint; - public AlterTableAddConstraint(Session session, Schema schema, - boolean ifNotExists) { + public AlterTableAddConstraint(SessionLocal session, Schema schema, int type, boolean ifNotExists) { super(session, schema); this.ifNotExists = ifNotExists; - } - - public void setIfTableExists(boolean b) { - ifTableExists = b; + this.type = type; } private String generateConstraintName(Table table) { if (constraintName == null) { - constraintName = getSchema().getUniqueConstraintName( - session, table); + constraintName = getSchema().getUniqueConstraintName(session, table); } return constraintName; } @Override - public int update() { + public long update(Table table) { try { - return tryUpdate(); + return tryUpdate(table); } catch (DbException e) { try { + if (createdUniqueConstraint != null) { + Index index = createdUniqueConstraint.getIndex(); + session.getDatabase().removeSchemaObject(session, createdUniqueConstraint); + createdIndexes.remove(index); + } for (Index index : createdIndexes) { session.getDatabase().removeSchemaObject(session, index); } @@ -95,28 +94,25 @@ public int update() { * * @return the update count */ - private int tryUpdate() { - if (!transactional) { - session.commit(true); - } - Database db = session.getDatabase(); - Table table = getSchema().findTableOrView(session, tableName); - if (table == null) { - if (ifTableExists) { - return 0; - } - throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); - } + private int tryUpdate(Table table) { if (constraintName != null && getSchema().findConstraint(session, constraintName) != null) { if (ifNotExists) { return 0; } - throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, - constraintName); + /** + * 1.4.200 and older databases don't always have a unique constraint + * for each referential constraint, so these constraints are created + * and they may use the same generated name as some other not yet + * initialized constraint that may lead to a name conflict. + */ + if (!session.isQuirksMode()) { + throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, constraintName); + } + constraintName = null; } - session.getUser().checkRight(table, Right.ALL); + Database db = session.getDatabase(); db.lockMeta(session); - table.lock(session, true, true); + table.lock(session, Table.EXCLUSIVE_LOCK); Constraint constraint; switch (type) { case CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY: { @@ -148,8 +144,8 @@ private int tryUpdate() { session, table, Constants.PREFIX_PRIMARY_KEY); int indexId = session.getDatabase().allocateObjectId(); try { - index = table.addIndex(session, indexName, indexId, - indexColumns, indexType, true, null); + index = table.addIndex(session, indexName, indexId, indexColumns, indexColumns.length, indexType, + true, null); } finally { getSchema().freeUniqueName(indexName); } @@ -164,28 +160,28 @@ private int tryUpdate() { constraint = pk; break; } - case CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE: { - IndexColumn.mapColumns(indexColumns, table); - boolean isOwner = false; - if (index != null && canUseUniqueIndex(index, table, indexColumns)) { - isOwner = true; - index.getIndexType().setBelongsToConstraint(true); - } else { - index = getUniqueIndex(table, indexColumns); - if (index == null) { - index = createIndex(table, indexColumns, true); - isOwner = true; + case CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE: + if (indexColumns == null) { + Column[] columns = table.getColumns(); + int columnCount = columns.length; + ArrayList list = new ArrayList<>(columnCount); + for (int i = 0; i < columnCount; i++) { + Column c = columns[i]; + if (c.getVisible()) { + IndexColumn indexColumn = new IndexColumn(c.getName()); + indexColumn.column = c; + list.add(indexColumn); + } + } + if (list.isEmpty()) { + throw DbException.get(ErrorCode.SYNTAX_ERROR_1, "UNIQUE(VALUE) on table without columns"); } + indexColumns = list.toArray(new IndexColumn[0]); + } else { + IndexColumn.mapColumns(indexColumns, table); } - int id = getObjectId(); - String name = generateConstraintName(table); - ConstraintUnique unique = new ConstraintUnique(getSchema(), id, - name, table, false); - unique.setColumns(indexColumns); - unique.setIndex(index, isOwner); - constraint = unique; + constraint = createUniqueConstraint(table, index, indexColumns, false); break; - } case CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_CHECK: { int id = getObjectId(); String name = generateConstraintName(table); @@ -206,14 +202,63 @@ private int tryUpdate() { if (refTable == null) { throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, refTableName); } - session.getUser().checkRight(refTable, Right.ALL); + if (refTable != table) { + session.getUser().checkTableRight(refTable, Right.SCHEMA_OWNER); + } if (!refTable.canReference()) { StringBuilder builder = new StringBuilder("Reference "); - refTable.getSQL(builder, false); + refTable.getSQL(builder, HasSQL.TRACE_SQL_FLAGS); throw DbException.getUnsupportedException(builder.toString()); } boolean isOwner = false; IndexColumn.mapColumns(indexColumns, table); + if (refIndexColumns == null) { + refIndexColumns = refTable.getPrimaryKey().getIndexColumns(); + } else { + IndexColumn.mapColumns(refIndexColumns, refTable); + } + int columnCount = indexColumns.length; + if (refIndexColumns.length != columnCount) { + throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); + } + for (IndexColumn indexColumn : indexColumns) { + Column column = indexColumn.column; + if (column.isGeneratedAlways()) { + switch (deleteAction) { + case SET_DEFAULT: + case SET_NULL: + throw DbException.get(ErrorCode.GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2, + column.getSQLWithTable(new StringBuilder(), HasSQL.TRACE_SQL_FLAGS).toString(), + "ON DELETE " + deleteAction.getSqlName()); + default: + // All other actions are allowed + } + switch (updateAction) { + case CASCADE: + case SET_DEFAULT: + case SET_NULL: + throw DbException.get(ErrorCode.GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2, + column.getSQLWithTable(new StringBuilder(), HasSQL.TRACE_SQL_FLAGS).toString(), + "ON UPDATE " + updateAction.getSqlName()); + default: + // All other actions are allowed + } + } + } + for (int i = 0; i < columnCount; i++) { + Column column1 = indexColumns[i].column, column2 = refIndexColumns[i].column; + if (!DataType.areStableComparable(column1.getType(), column2.getType())) { + throw DbException.get(ErrorCode.UNCOMPARABLE_REFERENCED_COLUMN_2, column1.getCreateSQL(), + column2.getCreateSQL()); + } + } + ConstraintUnique unique = getUniqueConstraint(refTable, refIndexColumns); + if (unique == null && !session.isQuirksMode() + && !session.getMode().createUniqueConstraintForReferencedColumns) { + throw DbException.get(ErrorCode.CONSTRAINT_NOT_FOUND_1, IndexColumn.writeColumns( + new StringBuilder("PRIMARY KEY | UNIQUE ("), refIndexColumns, HasSQL.TRACE_SQL_FLAGS) + .append(')').toString()); + } if (index != null && canUseIndex(index, table, indexColumns, false)) { isOwner = true; index.getIndexType().setBelongsToConstraint(true); @@ -224,30 +269,6 @@ private int tryUpdate() { isOwner = true; } } - if (refIndexColumns == null) { - Index refIdx = refTable.getPrimaryKey(); - refIndexColumns = refIdx.getIndexColumns(); - } else { - IndexColumn.mapColumns(refIndexColumns, refTable); - } - if (refIndexColumns.length != indexColumns.length) { - throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); - } - boolean isRefOwner = false; - if (refIndex != null && refIndex.getTable() == refTable && - canUseIndex(refIndex, refTable, refIndexColumns, false)) { - isRefOwner = true; - refIndex.getIndexType().setBelongsToConstraint(true); - } else { - refIndex = null; - } - if (refIndex == null) { - refIndex = getIndex(refTable, refIndexColumns, false); - if (refIndex == null) { - refIndex = createIndex(refTable, refIndexColumns, true); - isRefOwner = true; - } - } int id = getObjectId(); String name = generateConstraintName(table); ConstraintReferential refConstraint = new ConstraintReferential(getSchema(), @@ -256,7 +277,12 @@ private int tryUpdate() { refConstraint.setIndex(index, isOwner); refConstraint.setRefTable(refTable); refConstraint.setRefColumns(refIndexColumns); - refConstraint.setRefIndex(refIndex, isRefOwner); + if (unique == null) { + unique = createUniqueConstraint(refTable, refIndex, refIndexColumns, true); + addConstraintToTable(db, refTable, unique); + createdUniqueConstraint = unique; + } + refConstraint.setRefConstraint(unique); if (checkExisting) { refConstraint.checkExistingData(session); } @@ -267,17 +293,55 @@ private int tryUpdate() { break; } default: - throw DbException.throwInternalError("type=" + type); + throw DbException.getInternalError("type=" + type); } // parent relationship is already set with addConstraint constraint.setComment(comment); + addConstraintToTable(db, table, constraint); + return 0; + } + + private ConstraintUnique createUniqueConstraint(Table table, Index index, IndexColumn[] indexColumns, + boolean forForeignKey) { + boolean isOwner = false; + if (index != null && canUseIndex(index, table, indexColumns, true)) { + isOwner = true; + index.getIndexType().setBelongsToConstraint(true); + } else { + index = getIndex(table, indexColumns, true); + if (index == null) { + index = createIndex(table, indexColumns, true); + isOwner = true; + } + } + int id; + String name; + Schema tableSchema = table.getSchema(); + if (forForeignKey) { + id = session.getDatabase().allocateObjectId(); + try { + tableSchema.reserveUniqueName(constraintName); + name = tableSchema.getUniqueConstraintName(session, table); + } finally { + tableSchema.freeUniqueName(constraintName); + } + } else { + id = getObjectId(); + name = generateConstraintName(table); + } + ConstraintUnique unique = new ConstraintUnique(tableSchema, id, name, table, false); + unique.setColumns(indexColumns); + unique.setIndex(index, isOwner); + return unique; + } + + private void addConstraintToTable(Database db, Table table, Constraint constraint) { if (table.isTemporary() && !table.isGlobalTemporary()) { session.addLocalTempTableConstraint(constraint); } else { db.addSchemaObject(session, constraint); } table.addConstraint(constraint); - return 0; } private Index createIndex(Table t, IndexColumn[] cols, boolean unique) { @@ -295,8 +359,8 @@ private Index createIndex(Table t, IndexColumn[] cols, boolean unique) { String indexName = t.getSchema().getUniqueIndexName(session, t, prefix + "_INDEX_"); try { - Index index = t.addIndex(session, indexName, indexId, cols, - indexType, true, null); + Index index = t.addIndex(session, indexName, indexId, cols, unique ? cols.length : 0, indexType, true, + null); createdIndexes.add(index); return index; } finally { @@ -312,79 +376,58 @@ public void setUpdateAction(ConstraintActionType action) { this.updateAction = action; } - private static Index getUniqueIndex(Table t, IndexColumn[] cols) { - if (t.getIndexes() == null) { - return null; - } - for (Index idx : t.getIndexes()) { - if (canUseUniqueIndex(idx, t, cols)) { - return idx; + private static ConstraintUnique getUniqueConstraint(Table t, IndexColumn[] cols) { + ArrayList constraints = t.getConstraints(); + if (constraints != null) { + for (Constraint constraint : constraints) { + if (constraint.getTable() == t) { + Constraint.Type constraintType = constraint.getConstraintType(); + if (constraintType == Constraint.Type.PRIMARY_KEY || constraintType == Constraint.Type.UNIQUE) { + if (canUseIndex(constraint.getIndex(), t, cols, true)) { + return (ConstraintUnique) constraint; + } + } + } } } return null; } - private static Index getIndex(Table t, IndexColumn[] cols, boolean moreColumnOk) { - if (t.getIndexes() == null) { - return null; - } - for (Index idx : t.getIndexes()) { - if (canUseIndex(idx, t, cols, moreColumnOk)) { - return idx; + private static Index getIndex(Table t, IndexColumn[] cols, boolean unique) { + ArrayList indexes = t.getIndexes(); + Index index = null; + if (indexes != null) { + for (Index idx : indexes) { + if (canUseIndex(idx, t, cols, unique)) { + if (index == null || idx.getIndexColumns().length < index.getIndexColumns().length) { + index = idx; + } + } } } - return null; + return index; } - - // all cols must be in the index key, the order doesn't matter and there - // must be no other fields in the index key - private static boolean canUseUniqueIndex(Index idx, Table table, IndexColumn[] cols) { - if (idx.getTable() != table || !idx.getIndexType().isUnique()) { + private static boolean canUseIndex(Index index, Table table, IndexColumn[] cols, boolean unique) { + if (index.getTable() != table) { return false; } - Column[] indexCols = idx.getColumns(); - HashSet indexColsSet = new HashSet<>(); - Collections.addAll(indexColsSet, indexCols); - HashSet colsSet = new HashSet<>(); - for (IndexColumn c : cols) { - colsSet.add(c.column); - } - return colsSet.equals(indexColsSet); - } - - private static boolean canUseIndex(Index existingIndex, Table table, - IndexColumn[] cols, boolean moreColumnsOk) { - if (existingIndex.getTable() != table || existingIndex.getCreateSQL() == null) { - // can't use the scan index or index of another table - return false; - } - Column[] indexCols = existingIndex.getColumns(); - - if (moreColumnsOk) { - if (indexCols.length < cols.length) { + int allowedColumns; + if (unique) { + allowedColumns = index.getUniqueColumnCount(); + if (allowedColumns != cols.length) { return false; } - for (IndexColumn col : cols) { - // all columns of the list must be part of the index, - // but not all columns of the index need to be part of the list - // holes are not allowed (index=a,b,c & list=a,b is ok; - // but list=a,c is not) - int idx = existingIndex.getColumnIndex(col.column); - if (idx < 0 || idx >= cols.length) { - return false; - } - } } else { - if (indexCols.length != cols.length) { + if (index.getCreateSQL() == null || (allowedColumns = index.getColumns().length) != cols.length) { return false; } - for (IndexColumn col : cols) { - // all columns of the list must be part of the index - int idx = existingIndex.getColumnIndex(col.column); - if (idx < 0) { - return false; - } + } + for (IndexColumn col : cols) { + // all columns of the list must be part of the index + int i = index.getColumnIndex(col.column); + if (i < 0 || i >= allowedColumns) { + return false; } } return true; @@ -398,10 +441,6 @@ public String getConstraintName() { return constraintName; } - public void setType(int type) { - this.type = type; - } - @Override public int getType() { return type; @@ -411,10 +450,6 @@ public void setCheckExpression(Expression expression) { this.checkExpression = expression; } - public void setTableName(String tableName) { - this.tableName = tableName; - } - public void setIndexColumns(IndexColumn[] indexColumns) { this.indexColumns = indexColumns; } diff --git a/h2/src/main/org/h2/command/ddl/AlterTableAlterColumn.java b/h2/src/main/org/h2/command/ddl/AlterTableAlterColumn.java index 5676a8aed3..ebb8baa2ef 100644 --- a/h2/src/main/org/h2/command/ddl/AlterTableAlterColumn.java +++ b/h2/src/main/org/h2/command/ddl/AlterTableAlterColumn.java @@ -1,14 +1,14 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import java.util.ArrayList; import java.util.HashSet; - import org.h2.api.ErrorCode; +import org.h2.command.CommandContainer; import org.h2.command.CommandInterface; import org.h2.command.Parser; import org.h2.command.Prepared; @@ -19,7 +19,7 @@ import org.h2.engine.Database; import org.h2.engine.DbObject; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionVisitor; import org.h2.index.Index; @@ -35,6 +35,7 @@ import org.h2.table.Table; import org.h2.table.TableBase; import org.h2.table.TableView; +import org.h2.util.HasSQL; import org.h2.util.Utils; /** @@ -42,11 +43,12 @@ * ALTER TABLE ADD, * ALTER TABLE ADD IF NOT EXISTS, * ALTER TABLE ALTER COLUMN, - * ALTER TABLE ALTER COLUMN RESTART, * ALTER TABLE ALTER COLUMN SELECTIVITY, * ALTER TABLE ALTER COLUMN SET DEFAULT, - * ALTER TABLE ALTER COLUMN SET NOT NULL, + * ALTER TABLE ALTER COLUMN DROP DEFAULT, + * ALTER TABLE ALTER COLUMN DROP EXPRESSION, * ALTER TABLE ALTER COLUMN SET NULL, + * ALTER TABLE ALTER COLUMN DROP NULL, * ALTER TABLE ALTER COLUMN SET VISIBLE, * ALTER TABLE ALTER COLUMN SET INVISIBLE, * ALTER TABLE DROP COLUMN @@ -62,6 +64,7 @@ public class AlterTableAlterColumn extends CommandWithColumns { */ private Expression defaultExpression; private Expression newSelectivity; + private Expression usingExpression; private boolean addFirst; private String addBefore; private String addAfter; @@ -69,9 +72,9 @@ public class AlterTableAlterColumn extends CommandWithColumns { private boolean ifNotExists; private ArrayList columnsToAdd; private ArrayList columnsToRemove; - private boolean newVisibility; + private boolean booleanFlag; - public AlterTableAlterColumn(Session session, Schema schema) { + public AlterTableAlterColumn(SessionLocal session, Schema schema) { super(session, schema); } @@ -103,8 +106,7 @@ public void setAddAfter(String after) { } @Override - public int update() { - session.commit(true); + public long update() { Database db = session.getDatabase(); Table table = getSchema().resolveTableOrView(session, tableName); if (table == null) { @@ -113,9 +115,9 @@ public int update() { } throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); } - session.getUser().checkRight(table, Right.ALL); + session.getUser().checkTableRight(table, Right.SCHEMA_OWNER); table.checkSupportAlter(); - table.lock(session, true, true); + table.lock(session, Table.EXCLUSIVE_LOCK); if (newColumn != null) { checkDefaultReferencesTable(table, newColumn.getDefaultExpression()); checkClustering(newColumn); @@ -128,7 +130,7 @@ public int update() { } switch (type) { case CommandInterface.ALTER_TABLE_ALTER_COLUMN_NOT_NULL: { - if (!oldColumn.isNullable()) { + if (oldColumn == null || !oldColumn.isNullable()) { // no change break; } @@ -138,7 +140,7 @@ public int update() { break; } case CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_NOT_NULL: { - if (oldColumn.isNullable()) { + if (oldColumn == null || oldColumn.isNullable()) { // no change break; } @@ -147,33 +149,72 @@ public int update() { db.updateMeta(session, table); break; } - case CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT: { - Sequence sequence = oldColumn == null ? null : oldColumn.getSequence(); - checkDefaultReferencesTable(table, defaultExpression); - oldColumn.setSequence(null); - oldColumn.setDefaultExpression(session, defaultExpression); + case CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT: + case CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_EXPRESSION: { + if (oldColumn == null) { + break; + } + if (oldColumn.isIdentity()) { + break; + } + if (defaultExpression != null) { + if (oldColumn.isGenerated()) { + break; + } + checkDefaultReferencesTable(table, defaultExpression); + oldColumn.setDefaultExpression(session, defaultExpression); + } else { + if (type == CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_EXPRESSION != oldColumn.isGenerated()) { + break; + } + oldColumn.setDefaultExpression(session, null); + } + db.updateMeta(session, table); + break; + } + case CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_IDENTITY: { + if (oldColumn == null) { + break; + } + Sequence sequence = oldColumn.getSequence(); + if (sequence == null) { + break; + } + oldColumn.setSequence(null, false); removeSequence(table, sequence); db.updateMeta(session, table); break; } case CommandInterface.ALTER_TABLE_ALTER_COLUMN_ON_UPDATE: { - checkDefaultReferencesTable(table, defaultExpression); - oldColumn.setOnUpdateExpression(session, defaultExpression); + if (oldColumn == null) { + break; + } + if (defaultExpression != null) { + if (oldColumn.isIdentity() || oldColumn.isGenerated()) { + break; + } + checkDefaultReferencesTable(table, defaultExpression); + oldColumn.setOnUpdateExpression(session, defaultExpression); + } else { + oldColumn.setOnUpdateExpression(session, null); + } db.updateMeta(session, table); break; } case CommandInterface.ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE: { + if (oldColumn == null) { + break; + } // if the change is only increasing the precision, then we don't // need to copy the table because the length is only a constraint, // and does not affect the storage structure. - if (oldColumn.isWideningConversion(newColumn)) { - convertAutoIncrementColumn(table, newColumn); + if (oldColumn.isWideningConversion(newColumn) && usingExpression == null) { + convertIdentityColumn(table, newColumn); oldColumn.copy(newColumn); db.updateMeta(session, table); } else { - oldColumn.setSequence(null); + oldColumn.setSequence(null, false); oldColumn.setDefaultExpression(session, null); - oldColumn.setConvertNullToDefault(false); if (oldColumn.isNullable() && !newColumn.isNullable()) { checkNoNullValues(table); } else if (!oldColumn.isNullable() && newColumn.isNullable()) { @@ -182,8 +223,8 @@ public int update() { if (oldColumn.getVisible() ^ newColumn.getVisible()) { oldColumn.setVisible(newColumn.getVisible()); } - convertAutoIncrementColumn(table, newColumn); - copyData(table); + convertIdentityColumn(table, newColumn); + copyData(table, null, true); } table.setModified(); break; @@ -203,26 +244,43 @@ public int update() { } case CommandInterface.ALTER_TABLE_DROP_COLUMN: { if (table.getColumns().length - columnsToRemove.size() < 1) { - throw DbException.get(ErrorCode.CANNOT_DROP_LAST_COLUMN, columnsToRemove.get(0).getSQL(false)); + throw DbException.get(ErrorCode.CANNOT_DROP_LAST_COLUMN, columnsToRemove.get(0).getTraceSQL()); } table.dropMultipleColumnsConstraintsAndIndexes(session, columnsToRemove); - copyData(table); + copyData(table, null, false); break; } case CommandInterface.ALTER_TABLE_ALTER_COLUMN_SELECTIVITY: { + if (oldColumn == null) { + break; + } int value = newSelectivity.optimize(session).getValue(session).getInt(); oldColumn.setSelectivity(value); db.updateMeta(session, table); break; } - case CommandInterface.ALTER_TABLE_ALTER_COLUMN_VISIBILITY: { - oldColumn.setVisible(newVisibility); - table.setModified(); - db.updateMeta(session, table); + case CommandInterface.ALTER_TABLE_ALTER_COLUMN_VISIBILITY: + if (oldColumn == null) { + break; + } + if (oldColumn.getVisible() != booleanFlag) { + oldColumn.setVisible(booleanFlag); + table.setModified(); + db.updateMeta(session, table); + } + break; + case CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT_ON_NULL: + if (oldColumn == null) { + break; + } + if (oldColumn.isDefaultOnNull() != booleanFlag) { + oldColumn.setDefaultOnNull(booleanFlag); + table.setModified(); + db.updateMeta(session, table); + } break; - } default: - DbException.throwInternalError("type=" + type); + throw DbException.getInternalError("type=" + type); } return 0; } @@ -236,28 +294,27 @@ private static void checkDefaultReferencesTable(Table table, Expression defaultE .getDependenciesVisitor(dependencies); defaultExpression.isEverything(visitor); if (dependencies.contains(table)) { - throw DbException.get(ErrorCode.COLUMN_IS_REFERENCED_1, defaultExpression.getSQL(false)); + throw DbException.get(ErrorCode.COLUMN_IS_REFERENCED_1, defaultExpression.getTraceSQL()); } } private void checkClustering(Column c) { if (!Constants.CLUSTERING_DISABLED .equals(session.getDatabase().getCluster()) - && c.isAutoIncrement()) { + && c.hasIdentityOptions()) { throw DbException.getUnsupportedException( - "CLUSTERING && auto-increment columns"); + "CLUSTERING && identity columns"); } } - private void convertAutoIncrementColumn(Table table, Column c) { - if (c.isAutoIncrement()) { + private void convertIdentityColumn(Table table, Column c) { + if (c.hasIdentityOptions()) { if (c.isPrimaryKey()) { - c.setOriginalSQL("IDENTITY"); - } else { - int objId = getObjectId(); - c.convertAutoIncrementToSequence(session, getSchema(), objId, - table.isTemporary()); + addConstraintCommand( + Parser.newPrimaryKeyConstraintCommand(session, table.getSchema(), table.getName(), c)); } + int objId = getObjectId(); + c.initializeSequence(session, getSchema(), objId, table.isTemporary()); } } @@ -270,10 +327,6 @@ private void removeSequence(Table table, Sequence sequence) { } } - private void copyData(Table table) { - copyData(table, null, false); - } - private void copyData(Table table, ArrayList sequences, boolean createConstraints) { if (table.isTemporary()) { throw DbException.getUnsupportedException("TEMP TABLE"); @@ -295,8 +348,8 @@ private void copyData(Table table, ArrayList sequences, boolean create checkViews(table, newTable); } catch (DbException e) { StringBuilder builder = new StringBuilder("DROP TABLE "); - newTable.getSQL(builder, true); - execute(builder.toString(), true); + newTable.getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS); + execute(builder.toString()); throw e; } String tableName = table.getName(); @@ -305,8 +358,8 @@ private void copyData(Table table, ArrayList sequences, boolean create table.removeDependentView(view); } StringBuilder builder = new StringBuilder("DROP TABLE "); - table.getSQL(builder, true).append(" IGNORE"); - execute(builder.toString(), true); + table.getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS).append(" IGNORE"); + execute(builder.toString()); db.renameSchemaObject(session, newTable, tableName); for (DbObject child : newTable.getChildren()) { if (child instanceof Sequence) { @@ -336,7 +389,7 @@ private void copyData(Table table, ArrayList sequences, boolean create } for (TableView view : dependentViews) { String sql = view.getCreateSQL(true, true); - execute(sql, true); + execute(sql); } } @@ -345,7 +398,8 @@ private Table cloneTableStructure(Table table, Column[] columns, Database db, for (Column col : columns) { newColumns.add(col.getClone()); } - if (type == CommandInterface.ALTER_TABLE_DROP_COLUMN) { + switch (type) { + case CommandInterface.ALTER_TABLE_DROP_COLUMN: for (Column removeCol : columnsToRemove) { Column foundCol = null; for (Column newCol : newColumns) { @@ -355,11 +409,12 @@ private Table cloneTableStructure(Table table, Column[] columns, Database db, } } if (foundCol == null) { - throw DbException.throwInternalError(removeCol.getCreateSQL()); + throw DbException.getInternalError(removeCol.getCreateSQL()); } newColumns.remove(foundCol); } - } else if (type == CommandInterface.ALTER_TABLE_ADD_COLUMN) { + break; + case CommandInterface.ALTER_TABLE_ADD_COLUMN: { int position; if (addFirst) { position = 0; @@ -375,9 +430,10 @@ private Table cloneTableStructure(Table table, Column[] columns, Database db, newColumns.add(position++, column); } } - } else if (type == CommandInterface.ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE) { - int position = oldColumn.getColumnId(); - newColumns.set(position, newColumn); + break; + } + case CommandInterface.ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE: + newColumns.set(oldColumn.getColumnId(), newColumn); } // create a table object in order to get the SQL statement @@ -394,33 +450,40 @@ private Table cloneTableStructure(Table table, Column[] columns, Database db, data.persistData = table.isPersistData(); data.persistIndexes = table.isPersistIndexes(); data.isHidden = table.isHidden(); - data.create = true; data.session = session; Table newTable = getSchema().createTable(data); newTable.setComment(table.getComment()); - String newTableSQL = newTable.getCreateSQL(); - StringBuilder columnList = new StringBuilder(); + String newTableSQL = newTable.getCreateSQLForMeta(); + StringBuilder columnNames = new StringBuilder(); + StringBuilder columnValues = new StringBuilder(); for (Column nc : newColumns) { - if (columnList.length() > 0) { - columnList.append(", "); - } - if (type == CommandInterface.ALTER_TABLE_ADD_COLUMN && - columnsToAdd != null && columnsToAdd.contains(nc)) { - Expression def = nc.getDefaultExpression(); - if (def == null) { - columnList.append("NULL"); - } else { - def.getSQL(columnList, true); + if (nc.isGenerated()) { + continue; + } + switch (type) { + case CommandInterface.ALTER_TABLE_ADD_COLUMN: + if (columnsToAdd != null && columnsToAdd.contains(nc)) { + if (usingExpression != null) { + usingExpression.getUnenclosedSQL(addColumn(nc, columnNames, columnValues), + HasSQL.DEFAULT_SQL_FLAGS); + } + continue; + } + break; + case CommandInterface.ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE: + if (nc.equals(newColumn) && usingExpression != null) { + usingExpression.getUnenclosedSQL(addColumn(nc, columnNames, columnValues), + HasSQL.DEFAULT_SQL_FLAGS); + continue; } - } else { - nc.getSQL(columnList, true); } + nc.getSQL(addColumn(nc, columnNames, columnValues), HasSQL.DEFAULT_SQL_FLAGS); } String newTableName = newTable.getName(); Schema newTableSchema = newTable.getSchema(); newTable.removeChildrenAndResources(session); - execute(newTableSQL, true); + execute(newTableSQL); newTable = newTableSchema.getTableOrView(session, newTableName); ArrayList children = Utils.newSmallArrayList(); ArrayList triggers = Utils.newSmallArrayList(); @@ -441,9 +504,9 @@ private Table cloneTableStructure(Table table, Column[] columns, Database db, if (child instanceof TableView) { continue; } else if (child.getType() == DbObject.TABLE_OR_VIEW) { - DbException.throwInternalError(); + throw DbException.getInternalError(); } - String quotedName = Parser.quoteIdentifier(tempName + "_" + child.getName(), true); + String quotedName = Parser.quoteIdentifier(tempName + "_" + child.getName(), HasSQL.DEFAULT_SQL_FLAGS); String sql = null; if (child instanceof ConstraintReferential) { ConstraintReferential r = (ConstraintReferential) child; @@ -463,7 +526,7 @@ private Table cloneTableStructure(Table table, Column[] columns, Database db, if (child instanceof ConstraintUnique) { ConstraintUnique constraint = (ConstraintUnique) child; if (constraint.getConstraintType() == Constraint.Type.PRIMARY_KEY) { - index = constraint.getUniqueIndex(); + index = constraint.getIndex(); } } else if (child instanceof Index) { index = (Index) child; @@ -471,7 +534,7 @@ private Table cloneTableStructure(Table table, Column[] columns, Database db, if (index != null && TableBase.getMainIndexColumn(index.getIndexType(), index.getIndexColumns()) != SearchRow.ROWID_INDEX) { - execute(sql, true); + execute(sql); hasDelegateIndex = true; continue; } @@ -480,30 +543,28 @@ private Table cloneTableStructure(Table table, Column[] columns, Database db, } } } - StringBuilder buff = new StringBuilder(); - buff.append("INSERT INTO "); - newTable.getSQL(buff, true); - buff.append(" SELECT "); - if (columnList.length() == 0) { + StringBuilder builder = newTable.getSQL(new StringBuilder(128).append("INSERT INTO "), // + HasSQL.DEFAULT_SQL_FLAGS) + .append('(').append(columnNames).append(") OVERRIDING SYSTEM VALUE SELECT "); + if (columnValues.length() == 0) { // special case: insert into test select * from - buff.append('*'); + builder.append('*'); } else { - buff.append(columnList); + builder.append(columnValues); } - buff.append(" FROM "); - table.getSQL(buff, true); + table.getSQL(builder.append(" FROM "), HasSQL.DEFAULT_SQL_FLAGS); try { - execute(buff.toString(), true); + execute(builder.toString()); } catch (Throwable t) { // data was not inserted due to data conversion error or some // unexpected reason - StringBuilder builder = new StringBuilder("DROP TABLE "); - newTable.getSQL(builder, true); - execute(builder.toString(), true); + builder = new StringBuilder("DROP TABLE "); + newTable.getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS); + execute(builder.toString()); throw t; } for (String sql : children) { - execute(sql, true); + execute(sql); } table.setModified(); // remove the sequences from the columns (except dropped columns) @@ -512,15 +573,26 @@ private Table cloneTableStructure(Table table, Column[] columns, Database db, Sequence seq = col.getSequence(); if (seq != null) { table.removeSequence(seq); - col.setSequence(null); + col.setSequence(null, false); } } for (String sql : triggers) { - execute(sql, true); + execute(sql); } return newTable; } + private static StringBuilder addColumn(Column column, StringBuilder columnNames, StringBuilder columnValues) { + if (columnNames.length() > 0) { + columnNames.append(", "); + } + column.getSQL(columnNames, HasSQL.DEFAULT_SQL_FLAGS); + if (columnValues.length() > 0) { + columnValues.append(", "); + } + return columnValues; + } + /** * Check that all views and other dependent objects. */ @@ -560,43 +632,44 @@ private void checkViewsAreValid(DbObject tableOrView) { try { session.prepare(sql); } catch (DbException e) { - throw DbException.get(ErrorCode.COLUMN_IS_REFERENCED_1, e, view.getSQL(false)); + throw DbException.get(ErrorCode.COLUMN_IS_REFERENCED_1, e, view.getTraceSQL()); } checkViewsAreValid(view); } } } - private void execute(String sql, boolean ddl) { + private void execute(String sql) { Prepared command = session.prepare(sql); - command.update(); - if (ddl) { - session.commit(true); - } + CommandContainer commandContainer = new CommandContainer(session, sql, command); + commandContainer.executeUpdate(null); } private void checkNullable(Table table) { + if (oldColumn.isIdentity()) { + throw DbException.get(ErrorCode.COLUMN_MUST_NOT_BE_NULLABLE_1, oldColumn.getName()); + } for (Index index : table.getIndexes()) { if (index.getColumnIndex(oldColumn) < 0) { continue; } IndexType indexType = index.getIndexType(); - if (indexType.isPrimaryKey() || indexType.isHash()) { - throw DbException.get(ErrorCode.COLUMN_IS_PART_OF_INDEX_1, index.getSQL(false)); + if (indexType.isPrimaryKey()) { + throw DbException.get(ErrorCode.COLUMN_MUST_NOT_BE_NULLABLE_1, oldColumn.getName()); } } } private void checkNoNullValues(Table table) { StringBuilder builder = new StringBuilder("SELECT COUNT(*) FROM "); - table.getSQL(builder, true).append(" WHERE "); - oldColumn.getSQL(builder, true).append(" IS NULL"); + table.getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS).append(" WHERE "); + oldColumn.getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS).append(" IS NULL"); String sql = builder.toString(); Prepared command = session.prepare(sql); ResultInterface result = command.query(0); result.next(); if (result.currentRow()[0].getInt() > 0) { - throw DbException.get(ErrorCode.COLUMN_CONTAINS_NULL_VALUES_1, oldColumn.getSQL(false)); + throw DbException.get(ErrorCode.COLUMN_CONTAINS_NULL_VALUES_1, oldColumn.getTraceSQL()); } } @@ -617,6 +690,15 @@ public void setDefaultExpression(Expression defaultExpression) { this.defaultExpression = defaultExpression; } + /** + * Set using expression. + * + * @param usingExpression using expression + */ + public void setUsingExpression(Expression usingExpression) { + this.usingExpression = usingExpression; + } + public void setNewColumn(Column newColumn) { this.newColumn = newColumn; } @@ -642,7 +724,7 @@ public void setColumnsToRemove(ArrayList columnsToRemove) { this.columnsToRemove = columnsToRemove; } - public void setVisible(boolean visible) { - this.newVisibility = visible; + public void setBooleanFlag(boolean booleanFlag) { + this.booleanFlag = booleanFlag; } } diff --git a/h2/src/main/org/h2/command/ddl/AlterTableDropConstraint.java b/h2/src/main/org/h2/command/ddl/AlterTableDropConstraint.java index fd6f571e38..32a7390e02 100644 --- a/h2/src/main/org/h2/command/ddl/AlterTableDropConstraint.java +++ b/h2/src/main/org/h2/command/ddl/AlterTableDropConstraint.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,41 +8,67 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.constraint.Constraint; +import org.h2.constraint.Constraint.Type; +import org.h2.constraint.ConstraintActionType; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; +import org.h2.table.Table; /** * This class represents the statement * ALTER TABLE DROP CONSTRAINT */ -public class AlterTableDropConstraint extends SchemaCommand { +public class AlterTableDropConstraint extends AlterTable { private String constraintName; private final boolean ifExists; + private ConstraintActionType dropAction; - public AlterTableDropConstraint(Session session, Schema schema, - boolean ifExists) { + public AlterTableDropConstraint(SessionLocal session, Schema schema, boolean ifExists) { super(session, schema); this.ifExists = ifExists; + dropAction = session.getDatabase().getSettings().dropRestrict ? + ConstraintActionType.RESTRICT : ConstraintActionType.CASCADE; } public void setConstraintName(String string) { constraintName = string; } + public void setDropAction(ConstraintActionType dropAction) { + this.dropAction = dropAction; + } + @Override - public int update() { - session.commit(true); + public long update(Table table) { Constraint constraint = getSchema().findConstraint(session, constraintName); - if (constraint == null) { + Type constraintType; + if (constraint == null || (constraintType = constraint.getConstraintType()) == Type.DOMAIN + || constraint.getTable() != table) { if (!ifExists) { throw DbException.get(ErrorCode.CONSTRAINT_NOT_FOUND_1, constraintName); } } else { - session.getUser().checkRight(constraint.getTable(), Right.ALL); - session.getUser().checkRight(constraint.getRefTable(), Right.ALL); + Table refTable = constraint.getRefTable(); + if (refTable != table) { + session.getUser().checkTableRight(refTable, Right.SCHEMA_OWNER); + } + if (constraintType == Type.PRIMARY_KEY || constraintType == Type.UNIQUE) { + for (Constraint c : constraint.getTable().getConstraints()) { + if (c.getReferencedConstraint() == constraint) { + if (dropAction == ConstraintActionType.RESTRICT) { + throw DbException.get(ErrorCode.CONSTRAINT_IS_USED_BY_CONSTRAINT_2, + constraint.getTraceSQL(), c.getTraceSQL()); + } + Table t = c.getTable(); + if (t != table && t != refTable) { + session.getUser().checkTableRight(t, Right.SCHEMA_OWNER); + } + } + } + } session.getDatabase().removeSchemaObject(session, constraint); } return 0; diff --git a/h2/src/main/org/h2/command/ddl/AlterTableRename.java b/h2/src/main/org/h2/command/ddl/AlterTableRename.java index 58d3086ff7..948b4878d2 100644 --- a/h2/src/main/org/h2/command/ddl/AlterTableRename.java +++ b/h2/src/main/org/h2/command/ddl/AlterTableRename.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,8 +8,7 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.table.Table; @@ -18,57 +17,38 @@ * This class represents the statement * ALTER TABLE RENAME */ -public class AlterTableRename extends SchemaCommand { +public class AlterTableRename extends AlterTable { - private boolean ifTableExists; - private String oldTableName; private String newTableName; private boolean hidden; - public AlterTableRename(Session session, Schema schema) { + public AlterTableRename(SessionLocal session, Schema schema) { super(session, schema); } - public void setIfTableExists(boolean b) { - ifTableExists = b; - } - - public void setOldTableName(String name) { - oldTableName = name; - } - public void setNewTableName(String name) { newTableName = name; } @Override - public int update() { - session.commit(true); + public long update(Table table) { Database db = session.getDatabase(); - Table oldTable = getSchema().findTableOrView(session, oldTableName); - if (oldTable == null) { - if (ifTableExists) { - return 0; - } - throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, oldTableName); - } - session.getUser().checkRight(oldTable, Right.ALL); Table t = getSchema().findTableOrView(session, newTableName); - if (t != null && hidden && newTableName.equals(oldTable.getName())) { + if (t != null && hidden && newTableName.equals(table.getName())) { if (!t.isHidden()) { t.setHidden(hidden); - oldTable.setHidden(true); - db.updateMeta(session, oldTable); + table.setHidden(true); + db.updateMeta(session, table); } return 0; } - if (t != null || newTableName.equals(oldTable.getName())) { + if (t != null || newTableName.equals(table.getName())) { throw DbException.get(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, newTableName); } - if (oldTable.isTemporary()) { + if (table.isTemporary()) { throw DbException.getUnsupportedException("temp table"); } - db.renameSchemaObject(session, oldTable, newTableName); + db.renameSchemaObject(session, table, newTableName); return 0; } diff --git a/h2/src/main/org/h2/command/ddl/AlterTableRenameColumn.java b/h2/src/main/org/h2/command/ddl/AlterTableRenameColumn.java index de98a36336..104d514108 100644 --- a/h2/src/main/org/h2/command/ddl/AlterTableRenameColumn.java +++ b/h2/src/main/org/h2/command/ddl/AlterTableRenameColumn.java @@ -1,19 +1,15 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; -import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.constraint.ConstraintReferential; import org.h2.engine.Database; import org.h2.engine.DbObject; -import org.h2.engine.Right; -import org.h2.engine.Session; -import org.h2.expression.Expression; -import org.h2.message.DbException; +import org.h2.engine.SessionLocal; import org.h2.schema.Schema; import org.h2.table.Column; import org.h2.table.Table; @@ -22,23 +18,18 @@ * This class represents the statement * ALTER TABLE ALTER COLUMN RENAME */ -public class AlterTableRenameColumn extends SchemaCommand { +public class AlterTableRenameColumn extends AlterTable { - private boolean ifTableExists; - private String tableName; + private boolean ifExists; private String oldName; private String newName; - public AlterTableRenameColumn(Session session, Schema schema) { + public AlterTableRenameColumn(SessionLocal session, Schema schema) { super(session, schema); } - public void setIfTableExists(boolean b) { - this.ifTableExists = b; - } - - public void setTableName(String tableName) { - this.tableName = tableName; + public void setIfExists(boolean b) { + this.ifExists = b; } public void setOldColumnName(String oldName) { @@ -50,27 +41,15 @@ public void setNewColumnName(String newName) { } @Override - public int update() { - session.commit(true); - Database db = session.getDatabase(); - Table table = getSchema().findTableOrView(session, tableName); - if (table == null) { - if (ifTableExists) { - return 0; - } - throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); + public long update(Table table) { + Column column = table.getColumn(oldName, ifExists); + if (column == null) { + return 0; } - Column column = table.getColumn(oldName); - session.getUser().checkRight(table, Right.ALL); table.checkSupportAlter(); - - // we need to update CHECK constraint - // since it might reference the name of the column - Expression newCheckExpr = column.getCheckConstraint(session, newName); table.renameColumn(column, newName); - column.removeCheckConstraint(); - column.addCheckConstraint(session, newCheckExpr); table.setModified(); + Database db = session.getDatabase(); db.updateMeta(session, table); // if we have foreign key constraints pointing at this table, we need to update them diff --git a/h2/src/main/org/h2/command/ddl/AlterTableRenameConstraint.java b/h2/src/main/org/h2/command/ddl/AlterTableRenameConstraint.java index 04ac7c34b5..3dce7f3a6c 100644 --- a/h2/src/main/org/h2/command/ddl/AlterTableRenameConstraint.java +++ b/h2/src/main/org/h2/command/ddl/AlterTableRenameConstraint.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,46 +8,53 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.constraint.Constraint; +import org.h2.constraint.Constraint.Type; +import org.h2.engine.Database; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; +import org.h2.engine.User; import org.h2.message.DbException; import org.h2.schema.Schema; +import org.h2.table.Table; /** * This class represents the statement * ALTER TABLE RENAME CONSTRAINT */ -public class AlterTableRenameConstraint extends SchemaCommand { +public class AlterTableRenameConstraint extends AlterTable { private String constraintName; private String newConstraintName; - public AlterTableRenameConstraint(Session session, Schema schema) { + public AlterTableRenameConstraint(SessionLocal session, Schema schema) { super(session, schema); } public void setConstraintName(String string) { constraintName = string; } + public void setNewConstraintName(String newName) { this.newConstraintName = newName; } @Override - public int update() { - session.commit(true); + public long update(Table table) { Constraint constraint = getSchema().findConstraint(session, constraintName); - if (constraint == null) { + Database db = session.getDatabase(); + if (constraint == null || constraint.getConstraintType() == Type.DOMAIN || constraint.getTable() != table) { throw DbException.get(ErrorCode.CONSTRAINT_NOT_FOUND_1, constraintName); } - if (getSchema().findConstraint(session, newConstraintName) != null || - newConstraintName.equals(constraintName)) { - throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, - newConstraintName); + if (getSchema().findConstraint(session, newConstraintName) != null + || newConstraintName.equals(constraintName)) { + throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, newConstraintName); + } + User user = session.getUser(); + Table refTable = constraint.getRefTable(); + if (refTable != table) { + user.checkTableRight(refTable, Right.SCHEMA_OWNER); } - session.getUser().checkRight(constraint.getTable(), Right.ALL); - session.getUser().checkRight(constraint.getRefTable(), Right.ALL); - session.getDatabase().renameSchemaObject(session, constraint, newConstraintName); + db.renameSchemaObject(session, constraint, newConstraintName); return 0; } diff --git a/h2/src/main/org/h2/command/ddl/AlterUser.java b/h2/src/main/org/h2/command/ddl/AlterUser.java index 5922a22cfb..adaf83ea64 100644 --- a/h2/src/main/org/h2/command/ddl/AlterUser.java +++ b/h2/src/main/org/h2/command/ddl/AlterUser.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,7 +8,7 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.engine.User; import org.h2.expression.Expression; import org.h2.message.DbException; @@ -29,7 +29,7 @@ public class AlterUser extends DefineCommand { private Expression hash; private boolean admin; - public AlterUser(Session session) { + public AlterUser(SessionLocal session) { super(session); } @@ -62,8 +62,7 @@ public void setPassword(Expression password) { } @Override - public int update() { - session.commit(true); + public long update() { Database db = session.getDatabase(); switch (type) { case CommandInterface.ALTER_USER_SET_PASSWORD: @@ -85,13 +84,10 @@ public int update() { break; case CommandInterface.ALTER_USER_ADMIN: session.getUser().checkAdmin(); - if (!admin) { - user.checkOwnsNoSchemas(); - } user.setAdmin(admin); break; default: - DbException.throwInternalError("type=" + type); + throw DbException.getInternalError("type=" + type); } db.updateMeta(session, user); return 0; diff --git a/h2/src/main/org/h2/command/ddl/AlterView.java b/h2/src/main/org/h2/command/ddl/AlterView.java index a82ef402a0..27360167c4 100644 --- a/h2/src/main/org/h2/command/ddl/AlterView.java +++ b/h2/src/main/org/h2/command/ddl/AlterView.java @@ -1,13 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import org.h2.command.CommandInterface; -import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.table.TableView; @@ -20,7 +19,7 @@ public class AlterView extends DefineCommand { private boolean ifExists; private TableView view; - public AlterView(Session session) { + public AlterView(SessionLocal session) { super(session); } @@ -33,12 +32,11 @@ public void setView(TableView view) { } @Override - public int update() { - session.commit(true); + public long update() { if (view == null && ifExists) { return 0; } - session.getUser().checkRight(view, Right.ALL); + session.getUser().checkSchemaOwner(view.getSchema()); DbException e = view.recompile(session, false, true); if (e != null) { throw e; diff --git a/h2/src/main/org/h2/command/ddl/Analyze.java b/h2/src/main/org/h2/command/ddl/Analyze.java index bcfccaf396..166d319685 100644 --- a/h2/src/main/org/h2/command/ddl/Analyze.java +++ b/h2/src/main/org/h2/command/ddl/Analyze.java @@ -1,25 +1,25 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; -import java.util.ArrayList; +import java.util.Arrays; + import org.h2.command.CommandInterface; -import org.h2.command.Prepared; +import org.h2.engine.Constants; import org.h2.engine.Database; import org.h2.engine.Right; -import org.h2.engine.Session; -import org.h2.expression.Parameter; -import org.h2.result.ResultInterface; +import org.h2.engine.SessionLocal; +import org.h2.index.Cursor; +import org.h2.result.Row; +import org.h2.schema.Schema; import org.h2.table.Column; import org.h2.table.Table; import org.h2.table.TableType; import org.h2.value.DataType; import org.h2.value.Value; -import org.h2.value.ValueInt; -import org.h2.value.ValueNull; /** * This class represents the statements @@ -27,6 +27,105 @@ */ public class Analyze extends DefineCommand { + private static final class SelectivityData { + + private long distinctCount; + + /** + * The number of occupied slots, excluding the zero element (if any). + */ + private int size; + + private int[] elements; + + /** + * Whether the zero element is present. + */ + private boolean zeroElement; + + private int maxSize; + + SelectivityData() { + elements = new int[8]; + maxSize = 7; + } + + void add(Value v) { + int currentSize = currentSize(); + if (currentSize >= Constants.SELECTIVITY_DISTINCT_COUNT) { + size = 0; + Arrays.fill(elements, 0); + zeroElement = false; + distinctCount += currentSize; + } + int hash = v.hashCode(); + if (hash == 0) { + zeroElement = true; + } else { + if (size >= maxSize) { + rehash(); + } + add(hash); + } + } + + int getSelectivity(long count) { + int s; + if (count == 0) { + s = 0; + } else { + s = (int) (100 * (distinctCount + currentSize()) / count); + if (s <= 0) { + s = 1; + } + } + return s; + } + + private int currentSize() { + int size = this.size; + if (zeroElement) { + size++; + } + return size; + } + + private void add(int element) { + int len = elements.length; + int mask = len - 1; + int index = element & mask; + int plus = 1; + do { + int k = elements[index]; + if (k == 0) { + // found an empty record + size++; + elements[index] = element; + return; + } else if (k == element) { + // existing element + return; + } + index = (index + plus++) & mask; + } while (plus <= len); + // no space, ignore + } + + private void rehash() { + size = 0; + int[] oldElements = elements; + int len = oldElements.length << 1; + elements = new int[len]; + maxSize = (int) (len * 90L / 100); + for (int k : oldElements) { + if (k != 0) { + add(k); + } + } + } + + } + /** * The sample size. */ @@ -36,7 +135,7 @@ public class Analyze extends DefineCommand { */ private Table table; - public Analyze(Session session) { + public Analyze(SessionLocal session) { super(session); sampleRows = session.getDatabase().getSettings().analyzeSample; } @@ -46,15 +145,16 @@ public void setTable(Table table) { } @Override - public int update() { - session.commit(true); + public long update() { session.getUser().checkAdmin(); Database db = session.getDatabase(); if (table != null) { analyzeTable(session, table, sampleRows, true); } else { - for (Table table : db.getAllTablesAndViews(false)) { - analyzeTable(session, table, sampleRows, true); + for (Schema schema : db.getAllSchemasNoMeta()) { + for (Table table : schema.getAllTablesAndViews(null)) { + analyzeTable(session, table, sampleRows, true); + } } } return 0; @@ -68,75 +168,57 @@ public int update() { * @param sample the number of sample rows * @param manual whether the command was called by the user */ - public static void analyzeTable(Session session, Table table, int sample, - boolean manual) { - if (table.getTableType() != TableType.TABLE || - table.isHidden() || session == null) { - return; - } - if (!manual) { - if (session.getDatabase().isSysTableLocked()) { - return; - } - if (table.hasSelectTrigger()) { - return; - } - } - if (table.isTemporary() && !table.isGlobalTemporary() - && session.findLocalTempTable(table.getName()) == null) { - return; - } - if (table.isLockedExclusively() && !table.isLockedExclusivelyBy(session)) { - return; - } - if (!session.getUser().hasRight(table, Right.SELECT)) { - return; - } - if (session.getCancel() != 0) { - // if the connection is closed and there is something to undo + public static void analyzeTable(SessionLocal session, Table table, int sample, boolean manual) { + if (table.getTableType() != TableType.TABLE // + || table.isHidden() // + || session == null // + || !manual && (session.getDatabase().isSysTableLocked() || table.hasSelectTrigger()) // + || table.isTemporary() && !table.isGlobalTemporary() // + && session.findLocalTempTable(table.getName()) == null // + || table.isLockedExclusively() && !table.isLockedExclusivelyBy(session) + || !session.getUser().hasTableRight(table, Right.SELECT) // + // if the connection is closed and there is something to undo + || session.getCancel() != 0) { return; } + table.lock(session, Table.READ_LOCK); Column[] columns = table.getColumns(); - if (columns.length == 0) { + int columnCount = columns.length; + if (columnCount == 0) { return; } - Database db = session.getDatabase(); - StringBuilder buff = new StringBuilder("SELECT "); - for (int i = 0, l = columns.length; i < l; i++) { - if (i > 0) { - buff.append(", "); + Cursor cursor = table.getScanIndex(session).find(session, null, null); + if (cursor.next()) { + SelectivityData[] array = new SelectivityData[columnCount]; + for (int i = 0; i < columnCount; i++) { + Column col = columns[i]; + if (!DataType.isLargeObject(col.getType().getValueType())) { + array[i] = new SelectivityData(); + } } - Column col = columns[i]; - if (DataType.isLargeObject(col.getType().getValueType())) { - // can not index LOB columns, so calculating - // the selectivity is not required - buff.append("MAX(NULL)"); - } else { - buff.append("SELECTIVITY("); - col.getSQL(buff, true).append(')'); + long rowNumber = 0; + do { + Row row = cursor.get(); + for (int i = 0; i < columnCount; i++) { + SelectivityData selectivity = array[i]; + if (selectivity != null) { + selectivity.add(row.getValue(i)); + } + } + rowNumber++; + } while ((sample <= 0 || rowNumber < sample) && cursor.next()); + for (int i = 0; i < columnCount; i++) { + SelectivityData selectivity = array[i]; + if (selectivity != null) { + columns[i].setSelectivity(selectivity.getSelectivity(rowNumber)); + } } - } - buff.append(" FROM "); - table.getSQL(buff, true); - if (sample > 0) { - buff.append(" FETCH FIRST ROW ONLY SAMPLE_SIZE ? "); - } - String sql = buff.toString(); - Prepared command = session.prepare(sql); - if (sample > 0) { - ArrayList params = command.getParameters(); - params.get(0).setValue(ValueInt.get(sample)); - } - ResultInterface result = command.query(0); - result.next(); - for (int j = 0; j < columns.length; j++) { - Value v = result.currentRow()[j]; - if (v != ValueNull.INSTANCE) { - int selectivity = v.getInt(); - columns[j].setSelectivity(selectivity); + } else { + for (int i = 0; i < columnCount; i++) { + columns[i].setSelectivity(0); } } - db.updateMeta(session, table); + session.getDatabase().updateMeta(session, table); } public void setTop(int top) { diff --git a/h2/src/main/org/h2/command/ddl/CommandWithColumns.java b/h2/src/main/org/h2/command/ddl/CommandWithColumns.java index 5465b2504d..b8cb76ec80 100644 --- a/h2/src/main/org/h2/command/ddl/CommandWithColumns.java +++ b/h2/src/main/org/h2/command/ddl/CommandWithColumns.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -10,7 +10,7 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Constants; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.schema.Sequence; @@ -23,7 +23,7 @@ public abstract class CommandWithColumns extends SchemaCommand { private AlterTableAddConstraint primaryKey; - protected CommandWithColumns(Session session, Schema schema) { + protected CommandWithColumns(SessionLocal session, Schema schema) { super(session, schema); } @@ -86,7 +86,7 @@ protected void createConstraints() { } /** - * For the given list of columns, create sequences for auto-increment + * For the given list of columns, create sequences for identity * columns (if needed), and then get the list of all sequences of the * columns. * @@ -98,11 +98,11 @@ protected ArrayList generateSequences(ArrayList columns, boole ArrayList sequences = new ArrayList<>(columns == null ? 0 : columns.size()); if (columns != null) { for (Column c : columns) { - if (c.isAutoIncrement()) { + if (c.hasIdentityOptions()) { int objId = session.getDatabase().allocateObjectId(); - c.convertAutoIncrementToSequence(session, getSchema(), objId, temporary); + c.initializeSequence(session, getSchema(), objId, temporary); if (!Constants.CLUSTERING_DISABLED.equals(session.getDatabase().getCluster())) { - throw DbException.getUnsupportedException("CLUSTERING && auto-increment columns"); + throw DbException.getUnsupportedException("CLUSTERING && identity columns"); } } Sequence seq = c.getSequence(); @@ -158,4 +158,8 @@ private boolean setPrimaryKey(AlterTableAddConstraint primaryKey) { return false; } + public AlterTableAddConstraint getPrimaryKey() { + return primaryKey; + } + } diff --git a/h2/src/main/org/h2/command/ddl/CreateAggregate.java b/h2/src/main/org/h2/command/ddl/CreateAggregate.java index b7bf48d4f3..000f09fe05 100644 --- a/h2/src/main/org/h2/command/ddl/CreateAggregate.java +++ b/h2/src/main/org/h2/command/ddl/CreateAggregate.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,50 +8,43 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.engine.UserAggregate; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; +import org.h2.schema.UserAggregate; /** * This class represents the statement * CREATE AGGREGATE */ -public class CreateAggregate extends DefineCommand { +public class CreateAggregate extends SchemaCommand { - private Schema schema; private String name; private String javaClassMethod; private boolean ifNotExists; private boolean force; - public CreateAggregate(Session session) { - super(session); + public CreateAggregate(SessionLocal session, Schema schema) { + super(session, schema); } @Override - public int update() { - session.commit(true); + public long update() { session.getUser().checkAdmin(); Database db = session.getDatabase(); - if (db.findAggregate(name) != null || schema.findFunction(name) != null) { + Schema schema = getSchema(); + if (schema.findFunctionOrAggregate(name) != null) { if (!ifNotExists) { - throw DbException.get( - ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, name); + throw DbException.get(ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, name); } } else { int id = getObjectId(); - UserAggregate aggregate = new UserAggregate( - db, id, name, javaClassMethod, force); - db.addDatabaseObject(session, aggregate); + UserAggregate aggregate = new UserAggregate(schema, id, name, javaClassMethod, force); + db.addSchemaObject(session, aggregate); } return 0; } - public void setSchema(Schema schema) { - this.schema = schema; - } - public void setName(String name) { this.name = name; } diff --git a/h2/src/main/org/h2/command/ddl/CreateConstant.java b/h2/src/main/org/h2/command/ddl/CreateConstant.java index 8e3e31a075..a66b8c3a23 100644 --- a/h2/src/main/org/h2/command/ddl/CreateConstant.java +++ b/h2/src/main/org/h2/command/ddl/CreateConstant.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,7 +8,7 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.message.DbException; import org.h2.schema.Constant; @@ -19,13 +19,13 @@ * This class represents the statement * CREATE CONSTANT */ -public class CreateConstant extends SchemaCommand { +public class CreateConstant extends SchemaOwnerCommand { private String constantName; private Expression expression; private boolean ifNotExists; - public CreateConstant(Session session, Schema schema) { + public CreateConstant(SessionLocal session, Schema schema) { super(session, schema); } @@ -34,18 +34,16 @@ public void setIfNotExists(boolean ifNotExists) { } @Override - public int update() { - session.commit(true); - session.getUser().checkAdmin(); + long update(Schema schema) { Database db = session.getDatabase(); - if (getSchema().findConstant(constantName) != null) { + if (schema.findConstant(constantName) != null) { if (ifNotExists) { return 0; } throw DbException.get(ErrorCode.CONSTANT_ALREADY_EXISTS_1, constantName); } int id = getObjectId(); - Constant constant = new Constant(getSchema(), id, constantName); + Constant constant = new Constant(schema, id, constantName); expression = expression.optimize(session); Value value = expression.getValue(session); constant.setValue(value); diff --git a/h2/src/main/org/h2/command/ddl/CreateDomain.java b/h2/src/main/org/h2/command/ddl/CreateDomain.java index f1d10cead3..2af747f546 100644 --- a/h2/src/main/org/h2/command/ddl/CreateDomain.java +++ b/h2/src/main/org/h2/command/ddl/CreateDomain.java @@ -1,78 +1,113 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; +import java.util.ArrayList; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; -import org.h2.engine.Database; -import org.h2.engine.Domain; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; import org.h2.message.DbException; -import org.h2.table.Column; +import org.h2.schema.Domain; +import org.h2.schema.Schema; import org.h2.table.Table; +import org.h2.util.HasSQL; +import org.h2.util.Utils; import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; /** * This class represents the statement * CREATE DOMAIN */ -public class CreateDomain extends DefineCommand { +public class CreateDomain extends SchemaOwnerCommand { private String typeName; - private Column column; private boolean ifNotExists; - public CreateDomain(Session session) { - super(session); + private TypeInfo dataType; + + private Domain parentDomain; + + private Expression defaultExpression; + + private Expression onUpdateExpression; + + private String comment; + + private ArrayList constraintCommands; + + public CreateDomain(SessionLocal session, Schema schema) { + super(session, schema); } public void setTypeName(String name) { this.typeName = name; } - public void setColumn(Column column) { - this.column = column; - } - public void setIfNotExists(boolean ifNotExists) { this.ifNotExists = ifNotExists; } + public void setDataType(TypeInfo dataType) { + this.dataType = dataType; + } + + public void setParentDomain(Domain parentDomain) { + this.parentDomain = parentDomain; + } + + public void setDefaultExpression(Expression defaultExpression) { + this.defaultExpression = defaultExpression; + } + + public void setOnUpdateExpression(Expression onUpdateExpression) { + this.onUpdateExpression = onUpdateExpression; + } + + public void setComment(String comment) { + this.comment = comment; + } + @Override - public int update() { - session.getUser().checkAdmin(); - session.commit(true); - Database db = session.getDatabase(); - session.getUser().checkAdmin(); - if (db.findDomain(typeName) != null) { + long update(Schema schema) { + if (schema.findDomain(typeName) != null) { if (ifNotExists) { return 0; } - throw DbException.get( - ErrorCode.DOMAIN_ALREADY_EXISTS_1, - typeName); + throw DbException.get(ErrorCode.DOMAIN_ALREADY_EXISTS_1, typeName); } - DataType builtIn = DataType.getTypeByName(typeName, session.getDatabase().getMode()); - if (builtIn != null) { - if (!builtIn.hidden) { - throw DbException.get( - ErrorCode.DOMAIN_ALREADY_EXISTS_1, - typeName); - } - Table table = session.getDatabase().getFirstUserTable(); - if (table != null) { - StringBuilder builder = new StringBuilder(typeName).append(" ("); - table.getSQL(builder, false).append(')'); - throw DbException.get(ErrorCode.DOMAIN_ALREADY_EXISTS_1, builder.toString()); + if (typeName.indexOf(' ') < 0) { + DataType builtIn = DataType.getTypeByName(typeName, session.getDatabase().getMode()); + if (builtIn != null) { + if (session.getDatabase().equalsIdentifiers(typeName, Value.getTypeName(builtIn.type))) { + throw DbException.get(ErrorCode.DOMAIN_ALREADY_EXISTS_1, typeName); + } + Table table = session.getDatabase().getFirstUserTable(); + if (table != null) { + StringBuilder builder = new StringBuilder(typeName).append(" ("); + table.getSQL(builder, HasSQL.TRACE_SQL_FLAGS).append(')'); + throw DbException.get(ErrorCode.DOMAIN_ALREADY_EXISTS_1, builder.toString()); + } } } int id = getObjectId(); - Domain type = new Domain(db, id, typeName); - type.setColumn(column); - db.addDatabaseObject(session, type); + Domain domain = new Domain(schema, id, typeName); + domain.setDataType(dataType != null ? dataType : parentDomain.getDataType()); + domain.setDomain(parentDomain); + domain.setDefaultExpression(session, defaultExpression); + domain.setOnUpdateExpression(session, onUpdateExpression); + domain.setComment(comment); + schema.getDatabase().addSchemaObject(session, domain); + if (constraintCommands != null) { + for (AlterDomainAddConstraint command : constraintCommands) { + command.update(); + } + } return 0; } @@ -81,4 +116,16 @@ public int getType() { return CommandInterface.CREATE_DOMAIN; } + /** + * Add a constraint command. + * + * @param command the command to add + */ + public void addConstraintCommand(AlterDomainAddConstraint command) { + if (constraintCommands == null) { + constraintCommands = Utils.newSmallArrayList(); + } + constraintCommands.add(command); + } + } diff --git a/h2/src/main/org/h2/command/ddl/CreateFunctionAlias.java b/h2/src/main/org/h2/command/ddl/CreateFunctionAlias.java index 9796c56e15..0641dbce33 100644 --- a/h2/src/main/org/h2/command/ddl/CreateFunctionAlias.java +++ b/h2/src/main/org/h2/command/ddl/CreateFunctionAlias.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,9 +8,9 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.FunctionAlias; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; +import org.h2.schema.FunctionAlias; import org.h2.schema.Schema; import org.h2.util.StringUtils; @@ -26,33 +26,27 @@ public class CreateFunctionAlias extends SchemaCommand { private boolean ifNotExists; private boolean force; private String source; - private boolean bufferResultSetToLocalTemp = true; - public CreateFunctionAlias(Session session, Schema schema) { + public CreateFunctionAlias(SessionLocal session, Schema schema) { super(session, schema); } @Override - public int update() { - session.commit(true); + public long update() { session.getUser().checkAdmin(); Database db = session.getDatabase(); - if (getSchema().findFunction(aliasName) != null) { + Schema schema = getSchema(); + if (schema.findFunctionOrAggregate(aliasName) != null) { if (!ifNotExists) { - throw DbException.get( - ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, aliasName); + throw DbException.get(ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, aliasName); } } else { int id = getObjectId(); FunctionAlias functionAlias; if (javaClassMethod != null) { - functionAlias = FunctionAlias.newInstance(getSchema(), id, - aliasName, javaClassMethod, force, - bufferResultSetToLocalTemp); + functionAlias = FunctionAlias.newInstance(schema, id, aliasName, javaClassMethod, force); } else { - functionAlias = FunctionAlias.newInstanceFromSource( - getSchema(), id, aliasName, source, force, - bufferResultSetToLocalTemp); + functionAlias = FunctionAlias.newInstanceFromSource(schema, id, aliasName, source, force); } functionAlias.setDeterministic(deterministic); db.addSchemaObject(session, functionAlias); @@ -85,15 +79,6 @@ public void setDeterministic(boolean deterministic) { this.deterministic = deterministic; } - /** - * Should the return value ResultSet be buffered in a local temporary file? - * - * @param b the new value - */ - public void setBufferResultSetToLocalTemp(boolean b) { - this.bufferResultSetToLocalTemp = b; - } - public void setSource(String source) { this.source = source; } diff --git a/h2/src/main/org/h2/command/ddl/CreateIndex.java b/h2/src/main/org/h2/command/ddl/CreateIndex.java index b94b388a39..cf00511c40 100644 --- a/h2/src/main/org/h2/command/ddl/CreateIndex.java +++ b/h2/src/main/org/h2/command/ddl/CreateIndex.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -10,7 +10,7 @@ import org.h2.engine.Constants; import org.h2.engine.Database; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.index.IndexType; import org.h2.message.DbException; import org.h2.schema.Schema; @@ -26,12 +26,13 @@ public class CreateIndex extends SchemaCommand { private String tableName; private String indexName; private IndexColumn[] indexColumns; - private boolean primaryKey, unique, hash, spatial, affinity; + private int uniqueColumnCount; + private boolean primaryKey, hash, spatial; private boolean ifTableExists; private boolean ifNotExists; private String comment; - public CreateIndex(Session session, Schema schema) { + public CreateIndex(SessionLocal session, Schema schema) { super(session, schema); } @@ -56,10 +57,7 @@ public void setIndexColumns(IndexColumn[] columns) { } @Override - public int update() { - if (!transactional) { - session.commit(true); - } + public long update() { Database db = session.getDatabase(); boolean persistent = db.isPersistent(); Table table = getSchema().findTableOrView(session, tableName); @@ -75,8 +73,8 @@ public int update() { } throw DbException.get(ErrorCode.INDEX_ALREADY_EXISTS_1, indexName); } - session.getUser().checkRight(table, Right.ALL); - table.lock(session, true, true); + session.getUser().checkTableRight(table, Right.SCHEMA_OWNER); + table.lock(session, Table.EXCLUSIVE_LOCK); if (!table.isPersistIndexes()) { persistent = false; } @@ -96,16 +94,13 @@ public int update() { throw DbException.get(ErrorCode.SECOND_PRIMARY_KEY); } indexType = IndexType.createPrimaryKey(persistent, hash); - } else if (unique) { + } else if (uniqueColumnCount > 0) { indexType = IndexType.createUnique(persistent, hash); - } else if (affinity) { - indexType = IndexType.createAffinity(); } else { indexType = IndexType.createNonUnique(persistent, hash, spatial); } IndexColumn.mapColumns(indexColumns, table); - table.addIndex(session, indexName, id, indexColumns, indexType, create, - comment); + table.addIndex(session, indexName, id, indexColumns, uniqueColumnCount, indexType, create, comment); return 0; } @@ -113,8 +108,8 @@ public void setPrimaryKey(boolean b) { this.primaryKey = b; } - public void setUnique(boolean b) { - this.unique = b; + public void setUniqueColumnCount(int uniqueColumnCount) { + this.uniqueColumnCount = uniqueColumnCount; } public void setHash(boolean b) { @@ -125,10 +120,6 @@ public void setSpatial(boolean b) { this.spatial = b; } - public void setAffinity(boolean b) { - this.affinity = b; - } - public void setComment(String comment) { this.comment = comment; } diff --git a/h2/src/main/org/h2/command/ddl/CreateLinkedTable.java b/h2/src/main/org/h2/command/ddl/CreateLinkedTable.java index 14640af80e..d7ea31eaac 100644 --- a/h2/src/main/org/h2/command/ddl/CreateLinkedTable.java +++ b/h2/src/main/org/h2/command/ddl/CreateLinkedTable.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,7 +8,7 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.table.TableLink; @@ -28,8 +28,10 @@ public class CreateLinkedTable extends SchemaCommand { private boolean temporary; private boolean globalTemporary; private boolean readOnly; + private int fetchSize; + private boolean autocommit = true; - public CreateLinkedTable(Session session, Schema schema) { + public CreateLinkedTable(SessionLocal session, Schema schema) { super(session, schema); } @@ -61,11 +63,28 @@ public void setIfNotExists(boolean ifNotExists) { this.ifNotExists = ifNotExists; } + /** + * Specify the number of rows fetched by the linked table command + * + * @param fetchSize to set + */ + public void setFetchSize(int fetchSize) { + this.fetchSize = fetchSize; + } + + /** + * Specify if the autocommit mode is activated or not + * + * @param mode to set + */ + public void setAutoCommit(boolean mode) { + this.autocommit= mode; + } + @Override - public int update() { - session.commit(true); - Database db = session.getDatabase(); + public long update() { session.getUser().checkAdmin(); + Database db = session.getDatabase(); if (getSchema().resolveTableOrView(session, tableName) != null) { if (ifNotExists) { return 0; @@ -80,6 +99,10 @@ public int update() { table.setGlobalTemporary(globalTemporary); table.setComment(comment); table.setReadOnly(readOnly); + if (fetchSize > 0) { + table.setFetchSize(fetchSize); + } + table.setAutoCommit(autocommit); if (temporary && !globalTemporary) { session.addLocalTempTable(table); } else { diff --git a/h2/src/main/org/h2/command/ddl/CreateRole.java b/h2/src/main/org/h2/command/ddl/CreateRole.java index 7e17c38111..3add534252 100644 --- a/h2/src/main/org/h2/command/ddl/CreateRole.java +++ b/h2/src/main/org/h2/command/ddl/CreateRole.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,8 +8,9 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; +import org.h2.engine.RightOwner; import org.h2.engine.Role; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; /** @@ -21,7 +22,7 @@ public class CreateRole extends DefineCommand { private String roleName; private boolean ifNotExists; - public CreateRole(Session session) { + public CreateRole(SessionLocal session) { super(session); } @@ -34,18 +35,18 @@ public void setRoleName(String name) { } @Override - public int update() { + public long update() { session.getUser().checkAdmin(); - session.commit(true); Database db = session.getDatabase(); - if (db.findUser(roleName) != null) { - throw DbException.get(ErrorCode.USER_ALREADY_EXISTS_1, roleName); - } - if (db.findRole(roleName) != null) { - if (ifNotExists) { - return 0; + RightOwner rightOwner = db.findUserOrRole(roleName); + if (rightOwner != null) { + if (rightOwner instanceof Role) { + if (ifNotExists) { + return 0; + } + throw DbException.get(ErrorCode.ROLE_ALREADY_EXISTS_1, roleName); } - throw DbException.get(ErrorCode.ROLE_ALREADY_EXISTS_1, roleName); + throw DbException.get(ErrorCode.USER_ALREADY_EXISTS_1, roleName); } int id = getObjectId(); Role role = new Role(db, id, roleName, false); diff --git a/h2/src/main/org/h2/command/ddl/CreateSchema.java b/h2/src/main/org/h2/command/ddl/CreateSchema.java index 172cbc4f4a..fbab006152 100644 --- a/h2/src/main/org/h2/command/ddl/CreateSchema.java +++ b/h2/src/main/org/h2/command/ddl/CreateSchema.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -9,8 +9,8 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.engine.User; +import org.h2.engine.RightOwner; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; @@ -25,7 +25,7 @@ public class CreateSchema extends DefineCommand { private boolean ifNotExists; private ArrayList tableEngineParams; - public CreateSchema(Session session) { + public CreateSchema(SessionLocal session) { super(session); } @@ -34,14 +34,12 @@ public void setIfNotExists(boolean ifNotExists) { } @Override - public int update() { + public long update() { session.getUser().checkSchemaAdmin(); - session.commit(true); Database db = session.getDatabase(); - User user = db.getUser(authorization); - // during DB startup, the Right/Role records have not yet been loaded - if (!db.isStarting()) { - user.checkSchemaAdmin(); + RightOwner owner = db.findUserOrRole(authorization); + if (owner == null) { + throw DbException.get(ErrorCode.USER_OR_ROLE_NOT_FOUND_1, authorization); } if (db.findSchema(schemaName) != null) { if (ifNotExists) { @@ -50,7 +48,7 @@ public int update() { throw DbException.get(ErrorCode.SCHEMA_ALREADY_EXISTS_1, schemaName); } int id = getObjectId(); - Schema schema = new Schema(db, id, schemaName, user, false); + Schema schema = new Schema(db, id, schemaName, owner, false); schema.setTableEngineParams(tableEngineParams); db.addDatabaseObject(session, schema); return 0; diff --git a/h2/src/main/org/h2/command/ddl/CreateSequence.java b/h2/src/main/org/h2/command/ddl/CreateSequence.java index 1210e7844d..896a326337 100644 --- a/h2/src/main/org/h2/command/ddl/CreateSequence.java +++ b/h2/src/main/org/h2/command/ddl/CreateSequence.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,7 +8,7 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.schema.Sequence; @@ -16,7 +16,7 @@ /** * This class represents the statement CREATE SEQUENCE. */ -public class CreateSequence extends SchemaCommand { +public class CreateSequence extends SchemaOwnerCommand { private String sequenceName; @@ -26,8 +26,9 @@ public class CreateSequence extends SchemaCommand { private boolean belongsToTable; - public CreateSequence(Session session, Schema schema) { + public CreateSequence(SessionLocal session, Schema schema) { super(session, schema); + transactional = true; } public void setSequenceName(String sequenceName) { @@ -43,19 +44,16 @@ public void setOptions(SequenceOptions options) { } @Override - public int update() { - session.commit(true); + long update(Schema schema) { Database db = session.getDatabase(); - if (getSchema().findSequence(sequenceName) != null) { + if (schema.findSequence(sequenceName) != null) { if (ifNotExists) { return 0; } throw DbException.get(ErrorCode.SEQUENCE_ALREADY_EXISTS_1, sequenceName); } int id = getObjectId(); - Sequence sequence = new Sequence(getSchema(), id, sequenceName, options.getStartValue(session), - options.getIncrement(session), options.getCacheSize(session), options.getMinValue(null, session), - options.getMaxValue(null, session), Boolean.TRUE.equals(options.getCycle()), belongsToTable); + Sequence sequence = new Sequence(session, schema, id, sequenceName, options, belongsToTable); db.addSchemaObject(session, sequence); return 0; } diff --git a/h2/src/main/org/h2/command/ddl/CreateSynonym.java b/h2/src/main/org/h2/command/ddl/CreateSynonym.java index a708f63520..5f94ad93b4 100644 --- a/h2/src/main/org/h2/command/ddl/CreateSynonym.java +++ b/h2/src/main/org/h2/command/ddl/CreateSynonym.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,7 +8,7 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.table.TableSynonym; @@ -17,14 +17,14 @@ * This class represents the statement * CREATE SYNONYM */ -public class CreateSynonym extends SchemaCommand { +public class CreateSynonym extends SchemaOwnerCommand { private final CreateSynonymData data = new CreateSynonymData(); private boolean ifNotExists; private boolean orReplace; private String comment; - public CreateSynonym(Session session, Schema schema) { + public CreateSynonym(SessionLocal session, Schema schema) { super(session, schema); } @@ -47,16 +47,12 @@ public void setIfNotExists(boolean ifNotExists) { public void setOrReplace(boolean orReplace) { this.orReplace = orReplace; } @Override - public int update() { - if (!transactional) { - session.commit(true); - } - session.getUser().checkAdmin(); + long update(Schema schema) { Database db = session.getDatabase(); data.session = session; db.lockMeta(session); - if (getSchema().findTableOrView(session, data.synonymName) != null) { + if (schema.findTableOrView(session, data.synonymName) != null) { throw DbException.get(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, data.synonymName); } diff --git a/h2/src/main/org/h2/command/ddl/CreateSynonymData.java b/h2/src/main/org/h2/command/ddl/CreateSynonymData.java index fccefbd039..6e1122d749 100644 --- a/h2/src/main/org/h2/command/ddl/CreateSynonymData.java +++ b/h2/src/main/org/h2/command/ddl/CreateSynonymData.java @@ -1,11 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.schema.Schema; /** @@ -39,6 +39,6 @@ public class CreateSynonymData { /** * The session. */ - public Session session; + public SessionLocal session; } diff --git a/h2/src/main/org/h2/command/ddl/CreateTable.java b/h2/src/main/org/h2/command/ddl/CreateTable.java index 1c5616eb63..213b178702 100644 --- a/h2/src/main/org/h2/command/ddl/CreateTable.java +++ b/h2/src/main/org/h2/command/ddl/CreateTable.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -10,17 +10,16 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.command.dml.Insert; -import org.h2.command.dml.Query; +import org.h2.command.query.Query; import org.h2.engine.Database; import org.h2.engine.DbObject; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.schema.Sequence; import org.h2.table.Column; import org.h2.table.Table; -import org.h2.util.ColumnNamer; import org.h2.value.Value; /** @@ -35,10 +34,9 @@ public class CreateTable extends CommandWithColumns { private boolean onCommitTruncate; private Query asQuery; private String comment; - private boolean sortedInsertMode; private boolean withNoData; - public CreateTable(Session session, Schema schema) { + public CreateTable(SessionLocal session, Schema schema) { super(session, schema); data.persistIndexes = true; data.persistData = true; @@ -61,24 +59,29 @@ public void addColumn(Column column) { data.columns.add(column); } + public ArrayList getColumns() { + return data.columns; + } + public void setIfNotExists(boolean ifNotExists) { this.ifNotExists = ifNotExists; } @Override - public int update() { - if (!transactional) { - session.commit(true); + public long update() { + Schema schema = getSchema(); + boolean isSessionTemporary = data.temporary && !data.globalTemporary; + if (!isSessionTemporary) { + session.getUser().checkSchemaOwner(schema); } Database db = session.getDatabase(); if (!db.isPersistent()) { data.persistIndexes = false; } - boolean isSessionTemporary = data.temporary && !data.globalTemporary; if (!isSessionTemporary) { db.lockMeta(session); } - if (getSchema().resolveTableOrView(session, data.tableName) != null) { + if (schema.resolveTableOrView(session, data.tableName) != null) { if (ifNotExists) { return 0; } @@ -102,9 +105,8 @@ public int update() { } changePrimaryKeysToNotNull(data.columns); data.id = getObjectId(); - data.create = create; data.session = session; - Table table = getSchema().createTable(data); + Table table = schema.createTable(data); ArrayList sequences = generateSequences(data.columns, data.temporary); table.setComment(comment); if (isSessionTemporary) { @@ -121,28 +123,12 @@ public int update() { } try { for (Column c : data.columns) { - c.prepareExpression(session); + c.prepareExpressions(session); } for (Sequence sequence : sequences) { table.addSequence(sequence); } createConstraints(); - if (asQuery != null && !withNoData) { - boolean old = session.isUndoLogEnabled(); - try { - session.setUndoLogEnabled(false); - session.startStatementWithinTransaction(); - Insert insert = new Insert(session); - insert.setSortedInsertMode(sortedInsertMode); - insert.setQuery(asQuery); - insert.setTable(table); - insert.setInsertFromSelect(true); - insert.prepare(); - insert.update(); - } finally { - session.setUndoLogEnabled(old); - } - } HashSet set = new HashSet<>(); table.addDependencies(set); for (DbObject obj : set) { @@ -164,6 +150,40 @@ public int update() { } } } + if (asQuery != null && !withNoData) { + boolean flushSequences = false; + if (!isSessionTemporary) { + db.unlockMeta(session); + for (Column c : table.getColumns()) { + Sequence s = c.getSequence(); + if (s != null) { + flushSequences = true; + s.setTemporary(true); + } + } + } + try { + session.startStatementWithinTransaction(null); + Insert insert = new Insert(session); + insert.setQuery(asQuery); + insert.setTable(table); + insert.setInsertFromSelect(true); + insert.prepare(); + insert.update(); + } finally { + session.endStatement(); + } + if (flushSequences) { + db.lockMeta(session); + for (Column c : table.getColumns()) { + Sequence s = c.getSequence(); + if (s != null) { + s.setTemporary(false); + s.flush(session); + } + } + } + } } catch (DbException e) { try { db.checkPowerOff(); @@ -182,12 +202,9 @@ public int update() { private void generateColumnsFromQuery() { int columnCount = asQuery.getColumnCount(); ArrayList expressions = asQuery.getExpressions(); - ColumnNamer columnNamer= new ColumnNamer(session); for (int i = 0; i < columnCount; i++) { Expression expr = expressions.get(i); - String name = columnNamer.getColumnName(expr, i, expr.getAlias()); - Column col = new Column(name, expr.getType()); - addColumn(col); + addColumn(new Column(expr.getColumnNameForView(session, i), expr.getType())); } } @@ -224,10 +241,6 @@ public void setPersistData(boolean persistData) { } } - public void setSortedInsertMode(boolean sortedInsertMode) { - this.sortedInsertMode = sortedInsertMode; - } - public void setWithNoData(boolean withNoData) { this.withNoData = withNoData; } diff --git a/h2/src/main/org/h2/command/ddl/CreateTableData.java b/h2/src/main/org/h2/command/ddl/CreateTableData.java index 13c16598ea..7549b15175 100644 --- a/h2/src/main/org/h2/command/ddl/CreateTableData.java +++ b/h2/src/main/org/h2/command/ddl/CreateTableData.java @@ -1,13 +1,13 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import java.util.ArrayList; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.schema.Schema; import org.h2.table.Column; @@ -56,15 +56,10 @@ public class CreateTableData { */ public boolean persistData; - /** - * Whether to create a new table. - */ - public boolean create; - /** * The session. */ - public Session session; + public SessionLocal session; /** * The table engine to use for creating the table. diff --git a/h2/src/main/org/h2/command/ddl/CreateTrigger.java b/h2/src/main/org/h2/command/ddl/CreateTrigger.java index eebc88c126..9b098fe3e8 100644 --- a/h2/src/main/org/h2/command/ddl/CreateTrigger.java +++ b/h2/src/main/org/h2/command/ddl/CreateTrigger.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -9,7 +9,7 @@ import org.h2.api.Trigger; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.schema.TriggerObject; @@ -36,7 +36,7 @@ public class CreateTrigger extends SchemaCommand { private boolean force; private boolean onRollback; - public CreateTrigger(Session session, Schema schema) { + public CreateTrigger(SessionLocal session, Schema schema) { super(session, schema); } @@ -85,8 +85,8 @@ public void setIfNotExists(boolean ifNotExists) { } @Override - public int update() { - session.commit(true); + public long update() { + session.getUser().checkAdmin(); Database db = session.getDatabase(); if (getSchema().findTrigger(triggerName) != null) { if (ifNotExists) { @@ -96,10 +96,18 @@ public int update() { ErrorCode.TRIGGER_ALREADY_EXISTS_1, triggerName); } - if ((typeMask & Trigger.SELECT) == Trigger.SELECT && rowBased) { - throw DbException.get( - ErrorCode.TRIGGER_SELECT_AND_ROW_BASED_NOT_SUPPORTED, - triggerName); + if ((typeMask & Trigger.SELECT) != 0) { + if (rowBased) { + throw DbException.get(ErrorCode.INVALID_TRIGGER_FLAGS_1, "SELECT + FOR EACH ROW"); + } + if (onRollback) { + throw DbException.get(ErrorCode.INVALID_TRIGGER_FLAGS_1, "SELECT + ROLLBACK"); + } + } else if ((typeMask & (Trigger.INSERT | Trigger.UPDATE | Trigger.DELETE)) == 0) { + if (onRollback) { + throw DbException.get(ErrorCode.INVALID_TRIGGER_FLAGS_1, "(!INSERT & !UPDATE & !DELETE) + ROLLBACK"); + } + throw DbException.getInternalError(); } int id = getObjectId(); Table table = getSchema().getTableOrView(session, tableName); diff --git a/h2/src/main/org/h2/command/ddl/CreateUser.java b/h2/src/main/org/h2/command/ddl/CreateUser.java index 966d71dcef..17983aad07 100644 --- a/h2/src/main/org/h2/command/ddl/CreateUser.java +++ b/h2/src/main/org/h2/command/ddl/CreateUser.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,12 +8,15 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.RightOwner; +import org.h2.engine.SessionLocal; import org.h2.engine.User; import org.h2.expression.Expression; import org.h2.message.DbException; import org.h2.security.SHA256; import org.h2.util.StringUtils; +import org.h2.value.DataType; +import org.h2.value.Value; /** * This class represents the statement @@ -29,7 +32,7 @@ public class CreateUser extends DefineCommand { private boolean ifNotExists; private String comment; - public CreateUser(Session session) { + public CreateUser(SessionLocal session) { super(session); } @@ -53,12 +56,17 @@ public void setPassword(Expression password) { * @param salt the salt * @param hash the hash */ - static void setSaltAndHash(User user, Session session, Expression salt, Expression hash) { + static void setSaltAndHash(User user, SessionLocal session, Expression salt, Expression hash) { user.setSaltAndHash(getByteArray(session, salt), getByteArray(session, hash)); } - private static byte[] getByteArray(Session session, Expression e) { - String s = e.optimize(session).getValue(session).getString(); + private static byte[] getByteArray(SessionLocal session, Expression e) { + Value value = e.optimize(session).getValue(session); + if (DataType.isBinaryStringType(value.getValueType())) { + byte[] b = value.getBytes(); + return b == null ? new byte[0] : b; + } + String s = value.getString(); return s == null ? new byte[0] : StringUtils.convertHexToBytes(s); } @@ -69,7 +77,7 @@ private static byte[] getByteArray(Session session, Expression e) { * @param session the session * @param password the password */ - static void setPassword(User user, Session session, Expression password) { + static void setPassword(User user, SessionLocal session, Expression password) { String pwd = password.optimize(session).getValue(session).getString(); char[] passwordChars = pwd == null ? new char[0] : pwd.toCharArray(); byte[] userPasswordHash; @@ -83,18 +91,18 @@ static void setPassword(User user, Session session, Expression password) { } @Override - public int update() { + public long update() { session.getUser().checkAdmin(); - session.commit(true); Database db = session.getDatabase(); - if (db.findRole(userName) != null) { - throw DbException.get(ErrorCode.ROLE_ALREADY_EXISTS_1, userName); - } - if (db.findUser(userName) != null) { - if (ifNotExists) { - return 0; + RightOwner rightOwner = db.findUserOrRole(userName); + if (rightOwner != null) { + if (rightOwner instanceof User) { + if (ifNotExists) { + return 0; + } + throw DbException.get(ErrorCode.USER_ALREADY_EXISTS_1, userName); } - throw DbException.get(ErrorCode.USER_ALREADY_EXISTS_1, userName); + throw DbException.get(ErrorCode.ROLE_ALREADY_EXISTS_1, userName); } int id = getObjectId(); User user = new User(db, id, userName, false); @@ -105,7 +113,7 @@ public int update() { } else if (password != null) { setPassword(user, session, password); } else { - throw DbException.throwInternalError(); + throw DbException.getInternalError(); } db.addDatabaseObject(session, user); return 0; diff --git a/h2/src/main/org/h2/command/ddl/CreateView.java b/h2/src/main/org/h2/command/ddl/CreateView.java index 5c3663ec2a..dc397ae3da 100644 --- a/h2/src/main/org/h2/command/ddl/CreateView.java +++ b/h2/src/main/org/h2/command/ddl/CreateView.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,9 +8,9 @@ import java.util.ArrayList; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; -import org.h2.command.dml.Query; +import org.h2.command.query.Query; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Parameter; import org.h2.message.DbException; import org.h2.schema.Schema; @@ -18,14 +18,14 @@ import org.h2.table.Table; import org.h2.table.TableType; import org.h2.table.TableView; +import org.h2.util.HasSQL; import org.h2.value.TypeInfo; -import org.h2.value.Value; /** * This class represents the statement * CREATE VIEW */ -public class CreateView extends SchemaCommand { +public class CreateView extends SchemaOwnerCommand { private Query select; private String viewName; @@ -37,7 +37,7 @@ public class CreateView extends SchemaCommand { private boolean force; private boolean isTableExpression; - public CreateView(Session session, Schema schema) { + public CreateView(SessionLocal session, Schema schema) { super(session, schema); } @@ -78,12 +78,10 @@ public void setTableExpression(boolean isTableExpression) { } @Override - public int update() { - session.commit(true); - session.getUser().checkAdmin(); + long update(Schema schema) { Database db = session.getDatabase(); TableView view = null; - Table old = getSchema().findTableOrView(session, viewName); + Table old = schema.findTableOrView(session, viewName); if (old != null) { if (ifNotExists) { return 0; @@ -102,7 +100,7 @@ public int update() { if (params != null && !params.isEmpty()) { throw DbException.getUnsupportedException("parameters in views"); } - querySQL = select.getPlanSQL(true); + querySQL = select.getPlanSQL(HasSQL.DEFAULT_SQL_FLAGS); } Column[] columnTemplatesAsUnknowns = null; Column[] columnTemplatesAsStrings = null; @@ -113,16 +111,16 @@ public int update() { // non table expressions are fine to use unknown column type columnTemplatesAsUnknowns[i] = new Column(columnNames[i], TypeInfo.TYPE_UNKNOWN); // table expressions can't have unknown types - so we use string instead - columnTemplatesAsStrings[i] = new Column(columnNames[i], Value.STRING); + columnTemplatesAsStrings[i] = new Column(columnNames[i], TypeInfo.TYPE_VARCHAR); } } if (view == null) { if (isTableExpression) { - view = TableView.createTableViewMaybeRecursive(getSchema(), id, viewName, querySQL, null, + view = TableView.createTableViewMaybeRecursive(schema, id, viewName, querySQL, null, columnTemplatesAsStrings, session, false /* literalsChecked */, isTableExpression, false/*isTemporary*/, db); } else { - view = new TableView(getSchema(), id, viewName, querySQL, null, columnTemplatesAsUnknowns, session, + view = new TableView(schema, id, viewName, querySQL, null, columnTemplatesAsUnknowns, session, false/* allow recursive */, false/* literalsChecked */, isTableExpression, false/*temporary*/); } } else { diff --git a/h2/src/main/org/h2/command/ddl/DeallocateProcedure.java b/h2/src/main/org/h2/command/ddl/DeallocateProcedure.java index 3375531495..dad6d054cb 100644 --- a/h2/src/main/org/h2/command/ddl/DeallocateProcedure.java +++ b/h2/src/main/org/h2/command/ddl/DeallocateProcedure.java @@ -1,12 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import org.h2.command.CommandInterface; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; /** * This class represents the statement @@ -16,12 +16,12 @@ public class DeallocateProcedure extends DefineCommand { private String procedureName; - public DeallocateProcedure(Session session) { + public DeallocateProcedure(SessionLocal session) { super(session); } @Override - public int update() { + public long update() { session.removeProcedure(procedureName); return 0; } diff --git a/h2/src/main/org/h2/command/ddl/DefineCommand.java b/h2/src/main/org/h2/command/ddl/DefineCommand.java index 834ca1d408..cf10794d56 100644 --- a/h2/src/main/org/h2/command/ddl/DefineCommand.java +++ b/h2/src/main/org/h2/command/ddl/DefineCommand.java @@ -1,12 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import org.h2.command.Prepared; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.result.ResultInterface; /** @@ -26,7 +26,7 @@ public abstract class DefineCommand extends Prepared { * * @param session the session */ - DefineCommand(Session session) { + DefineCommand(SessionLocal session) { super(session); } diff --git a/h2/src/main/org/h2/command/ddl/DropAggregate.java b/h2/src/main/org/h2/command/ddl/DropAggregate.java index e2a8cba706..08cd6d5741 100644 --- a/h2/src/main/org/h2/command/ddl/DropAggregate.java +++ b/h2/src/main/org/h2/command/ddl/DropAggregate.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,35 +8,34 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.engine.UserAggregate; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; +import org.h2.schema.Schema; +import org.h2.schema.UserAggregate; /** * This class represents the statement * DROP AGGREGATE */ -public class DropAggregate extends DefineCommand { +public class DropAggregate extends SchemaOwnerCommand { private String name; private boolean ifExists; - public DropAggregate(Session session) { - super(session); + public DropAggregate(SessionLocal session, Schema schema) { + super(session, schema); } @Override - public int update() { - session.getUser().checkAdmin(); - session.commit(true); + long update(Schema schema) { Database db = session.getDatabase(); - UserAggregate aggregate = db.findAggregate(name); + UserAggregate aggregate = schema.findAggregate(name); if (aggregate == null) { if (!ifExists) { throw DbException.get(ErrorCode.AGGREGATE_NOT_FOUND_1, name); } } else { - db.removeDatabaseObject(session, aggregate); + db.removeSchemaObject(session, aggregate); } return 0; } diff --git a/h2/src/main/org/h2/command/ddl/DropConstant.java b/h2/src/main/org/h2/command/ddl/DropConstant.java index 634d58c2e9..565031ee60 100644 --- a/h2/src/main/org/h2/command/ddl/DropConstant.java +++ b/h2/src/main/org/h2/command/ddl/DropConstant.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,7 +8,7 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Constant; import org.h2.schema.Schema; @@ -17,12 +17,12 @@ * This class represents the statement * DROP CONSTANT */ -public class DropConstant extends SchemaCommand { +public class DropConstant extends SchemaOwnerCommand { private String constantName; private boolean ifExists; - public DropConstant(Session session, Schema schema) { + public DropConstant(SessionLocal session, Schema schema) { super(session, schema); } @@ -35,11 +35,9 @@ public void setConstantName(String constantName) { } @Override - public int update() { - session.getUser().checkAdmin(); - session.commit(true); + long update(Schema schema) { Database db = session.getDatabase(); - Constant constant = getSchema().findConstant(constantName); + Constant constant = schema.findConstant(constantName); if (constant == null) { if (!ifExists) { throw DbException.get(ErrorCode.CONSTANT_NOT_FOUND_1, constantName); diff --git a/h2/src/main/org/h2/command/ddl/DropDatabase.java b/h2/src/main/org/h2/command/ddl/DropDatabase.java index 8a083da087..a46fae9f6a 100644 --- a/h2/src/main/org/h2/command/ddl/DropDatabase.java +++ b/h2/src/main/org/h2/command/ddl/DropDatabase.java @@ -1,23 +1,27 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import java.util.ArrayList; +import java.util.Collection; import org.h2.command.CommandInterface; import org.h2.engine.Database; import org.h2.engine.DbObject; +import org.h2.engine.Right; +import org.h2.engine.RightOwner; import org.h2.engine.Role; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.engine.User; import org.h2.schema.Schema; import org.h2.schema.SchemaObject; import org.h2.schema.Sequence; import org.h2.table.Table; import org.h2.table.TableType; +import org.h2.value.ValueNull; /** * This class represents the statement @@ -28,12 +32,12 @@ public class DropDatabase extends DefineCommand { private boolean dropAllObjects; private boolean deleteFiles; - public DropDatabase(Session session) { + public DropDatabase(SessionLocal session) { super(session); } @Override - public int update() { + public long update() { if (dropAllObjects) { dropAllObjects(); } @@ -44,8 +48,8 @@ public int update() { } private void dropAllObjects() { - session.getUser().checkAdmin(); - session.commit(true); + User user = session.getUser(); + user.checkAdmin(); Database db = session.getDatabase(); db.lockMeta(session); @@ -53,7 +57,7 @@ private void dropAllObjects() { // so we might need to loop over them multiple times. boolean runLoopAgain; do { - ArrayList
          tables = db.getAllTablesAndViews(false); + ArrayList
          tables = db.getAllTablesAndViews(); ArrayList
          toRemove = new ArrayList<>(tables.size()); for (Table t : tables) { if (t.getName() != null && @@ -94,54 +98,54 @@ private void dropAllObjects() { } while (runLoopAgain); // TODO session-local temp tables are not removed - for (Schema schema : db.getAllSchemas()) { + Collection schemas = db.getAllSchemasNoMeta(); + for (Schema schema : schemas) { if (schema.canDrop()) { db.removeDatabaseObject(session, schema); } } ArrayList list = new ArrayList<>(); - for (SchemaObject obj : db.getAllSchemaObjects(DbObject.SEQUENCE)) { - // ignore these. the ones we want to drop will get dropped when we - // drop their associated tables, and we will ignore the problematic - // ones that belong to session-local temp tables. - if (!((Sequence) obj).getBelongsToTable()) { - list.add(obj); + for (Schema schema : schemas) { + for (Sequence sequence : schema.getAllSequences()) { + // ignore these. the ones we want to drop will get dropped when we + // drop their associated tables, and we will ignore the problematic + // ones that belong to session-local temp tables. + if (!sequence.getBelongsToTable()) { + list.add(sequence); + } } } // maybe constraints and triggers on system tables will be allowed in // the future - list.addAll(db.getAllSchemaObjects(DbObject.CONSTRAINT)); - list.addAll(db.getAllSchemaObjects(DbObject.TRIGGER)); - list.addAll(db.getAllSchemaObjects(DbObject.CONSTANT)); - list.addAll(db.getAllSchemaObjects(DbObject.FUNCTION_ALIAS)); + addAll(schemas, DbObject.CONSTRAINT, list); + addAll(schemas, DbObject.TRIGGER, list); + addAll(schemas, DbObject.CONSTANT, list); + // Function aliases and aggregates are stored together + addAll(schemas, DbObject.FUNCTION_ALIAS, list); + addAll(schemas, DbObject.DOMAIN, list); for (SchemaObject obj : list) { - if (obj.isHidden()) { + if (!obj.getSchema().isValid() || obj.isHidden()) { continue; } db.removeSchemaObject(session, obj); } - for (User user : db.getAllUsers()) { - if (user != session.getUser()) { - db.removeDatabaseObject(session, user); + Role publicRole = db.getPublicRole(); + for (RightOwner rightOwner : db.getAllUsersAndRoles()) { + if (rightOwner != user && rightOwner != publicRole) { + db.removeDatabaseObject(session, rightOwner); } } - for (Role role : db.getAllRoles()) { - String sql = role.getCreateSQL(); - // the role PUBLIC must not be dropped - if (sql != null) { - db.removeDatabaseObject(session, role); - } + for (Right right : db.getAllRights()) { + db.removeDatabaseObject(session, right); } - ArrayList dbObjects = new ArrayList<>(); - dbObjects.addAll(db.getAllRights()); - dbObjects.addAll(db.getAllAggregates()); - dbObjects.addAll(db.getAllDomains()); - for (DbObject obj : dbObjects) { - String sql = obj.getCreateSQL(); - // the role PUBLIC must not be dropped - if (sql != null) { - db.removeDatabaseObject(session, obj); - } + for (SessionLocal s : db.getSessions(false)) { + s.setLastIdentity(ValueNull.INSTANCE); + } + } + + private static void addAll(Collection schemas, int type, ArrayList list) { + for (Schema schema : schemas) { + schema.getAll(type, list); } } diff --git a/h2/src/main/org/h2/command/ddl/DropDomain.java b/h2/src/main/org/h2/command/ddl/DropDomain.java index 365e2a678d..8426dc2390 100644 --- a/h2/src/main/org/h2/command/ddl/DropDomain.java +++ b/h2/src/main/org/h2/command/ddl/DropDomain.java @@ -1,38 +1,36 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; +import java.util.ArrayList; + import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.constraint.ConstraintActionType; -import org.h2.engine.Database; -import org.h2.engine.Domain; -import org.h2.engine.Session; +import org.h2.constraint.ConstraintDomain; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; import org.h2.message.DbException; +import org.h2.schema.Domain; +import org.h2.schema.Schema; import org.h2.table.Column; +import org.h2.table.ColumnTemplate; import org.h2.table.Table; /** - * This class represents the statement - * DROP DOMAIN + * This class represents the statement DROP DOMAIN */ -public class DropDomain extends DefineCommand { +public class DropDomain extends AlterDomain { - private String typeName; - private boolean ifExists; private ConstraintActionType dropAction; - public DropDomain(Session session) { - super(session); - dropAction = session.getDatabase().getSettings().dropRestrict ? - ConstraintActionType.RESTRICT : ConstraintActionType.CASCADE; - } - - public void setIfExists(boolean ifExists) { - this.ifExists = ifExists; + public DropDomain(SessionLocal session, Schema schema) { + super(session, schema); + dropAction = session.getDatabase().getSettings().dropRestrict ? ConstraintActionType.RESTRICT + : ConstraintActionType.CASCADE; } public void setDropAction(ConstraintActionType dropAction) { @@ -40,40 +38,66 @@ public void setDropAction(ConstraintActionType dropAction) { } @Override - public int update() { - session.getUser().checkAdmin(); - session.commit(true); - Database db = session.getDatabase(); - Domain type = db.findDomain(typeName); - if (type == null) { - if (!ifExists) { - throw DbException.get(ErrorCode.DOMAIN_NOT_FOUND_1, typeName); + long update(Schema schema, Domain domain) { + forAllDependencies(session, domain, this::copyColumn, this::copyDomain, true); + session.getDatabase().removeSchemaObject(session, domain); + return 0; + } + + private boolean copyColumn(Domain domain, Column targetColumn) { + Table targetTable = targetColumn.getTable(); + if (dropAction == ConstraintActionType.RESTRICT) { + throw DbException.get(ErrorCode.CANNOT_DROP_2, domainName, targetTable.getCreateSQL()); + } + String columnName = targetColumn.getName(); + ArrayList constraints = domain.getConstraints(); + if (constraints != null && !constraints.isEmpty()) { + for (ConstraintDomain constraint : constraints) { + Expression checkCondition = constraint.getCheckConstraint(session, columnName); + AlterTableAddConstraint check = new AlterTableAddConstraint(session, targetTable.getSchema(), + CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_CHECK, false); + check.setTableName(targetTable.getName()); + check.setCheckExpression(checkCondition); + check.update(); } - } else { - for (Table t : db.getAllTablesAndViews(false)) { - boolean modified = false; - for (Column c : t.getColumns()) { - Domain domain = c.getDomain(); - if (domain != null && domain.getName().equals(typeName)) { - if (dropAction == ConstraintActionType.RESTRICT) { - throw DbException.get(ErrorCode.CANNOT_DROP_2, typeName, t.getCreateSQL()); - } - c.setOriginalSQL(type.getColumn().getOriginalSQL()); - c.setDomain(null); - modified = true; - } - } - if (modified) { - db.updateMeta(session, t); - } + } + copyExpressions(session, domain, targetColumn); + return true; + } + + private boolean copyDomain(Domain domain, Domain targetDomain) { + if (dropAction == ConstraintActionType.RESTRICT) { + throw DbException.get(ErrorCode.CANNOT_DROP_2, domainName, targetDomain.getTraceSQL()); + } + ArrayList constraints = domain.getConstraints(); + if (constraints != null && !constraints.isEmpty()) { + for (ConstraintDomain constraint : constraints) { + Expression checkCondition = constraint.getCheckConstraint(session, null); + AlterDomainAddConstraint check = new AlterDomainAddConstraint(session, targetDomain.getSchema(), // + false); + check.setDomainName(targetDomain.getName()); + check.setCheckExpression(checkCondition); + check.update(); } - db.removeDatabaseObject(session, type); } - return 0; + copyExpressions(session, domain, targetDomain); + return true; } - public void setTypeName(String name) { - this.typeName = name; + private static boolean copyExpressions(SessionLocal session, Domain domain, ColumnTemplate targetColumn) { + targetColumn.setDomain(domain.getDomain()); + Expression e = domain.getDefaultExpression(); + boolean modified = false; + if (e != null && targetColumn.getDefaultExpression() == null) { + targetColumn.setDefaultExpression(session, e); + modified = true; + } + e = domain.getOnUpdateExpression(); + if (e != null && targetColumn.getOnUpdateExpression() == null) { + targetColumn.setOnUpdateExpression(session, e); + modified = true; + } + return modified; } @Override diff --git a/h2/src/main/org/h2/command/ddl/DropFunctionAlias.java b/h2/src/main/org/h2/command/ddl/DropFunctionAlias.java index 978653e353..2a9fb641de 100644 --- a/h2/src/main/org/h2/command/ddl/DropFunctionAlias.java +++ b/h2/src/main/org/h2/command/ddl/DropFunctionAlias.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,30 +8,28 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.FunctionAlias; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; +import org.h2.schema.FunctionAlias; import org.h2.schema.Schema; /** * This class represents the statement * DROP ALIAS */ -public class DropFunctionAlias extends SchemaCommand { +public class DropFunctionAlias extends SchemaOwnerCommand { private String aliasName; private boolean ifExists; - public DropFunctionAlias(Session session, Schema schema) { + public DropFunctionAlias(SessionLocal session, Schema schema) { super(session, schema); } @Override - public int update() { - session.getUser().checkAdmin(); - session.commit(true); + long update(Schema schema) { Database db = session.getDatabase(); - FunctionAlias functionAlias = getSchema().findFunction(aliasName); + FunctionAlias functionAlias = schema.findFunction(aliasName); if (functionAlias == null) { if (!ifExists) { throw DbException.get(ErrorCode.FUNCTION_ALIAS_NOT_FOUND_1, aliasName); diff --git a/h2/src/main/org/h2/command/ddl/DropIndex.java b/h2/src/main/org/h2/command/ddl/DropIndex.java index 20d4f65db6..37b66aa011 100644 --- a/h2/src/main/org/h2/command/ddl/DropIndex.java +++ b/h2/src/main/org/h2/command/ddl/DropIndex.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -12,7 +12,7 @@ import org.h2.constraint.Constraint; import org.h2.engine.Database; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.index.Index; import org.h2.message.DbException; import org.h2.schema.Schema; @@ -27,7 +27,7 @@ public class DropIndex extends SchemaCommand { private String indexName; private boolean ifExists; - public DropIndex(Session session, Schema schema) { + public DropIndex(SessionLocal session, Schema schema) { super(session, schema); } @@ -40,8 +40,7 @@ public void setIndexName(String indexName) { } @Override - public int update() { - session.commit(true); + public long update() { Database db = session.getDatabase(); Index index = getSchema().findIndex(session, indexName); if (index == null) { @@ -50,7 +49,7 @@ public int update() { } } else { Table table = index.getTable(); - session.getUser().checkRight(index.getTable(), Right.ALL); + session.getUser().checkTableRight(index.getTable(), Right.SCHEMA_OWNER); Constraint pkConstraint = null; ArrayList constraints = table.getConstraints(); for (int i = 0; constraints != null && i < constraints.size(); i++) { @@ -58,11 +57,15 @@ public int update() { if (cons.usesIndex(index)) { // can drop primary key index (for compatibility) if (Constraint.Type.PRIMARY_KEY == cons.getConstraintType()) { + for (Constraint c : constraints) { + if (c.getReferencedConstraint() == cons) { + throw DbException.get(ErrorCode.INDEX_BELONGS_TO_CONSTRAINT_2, indexName, + cons.getName()); + } + } pkConstraint = cons; } else { - throw DbException.get( - ErrorCode.INDEX_BELONGS_TO_CONSTRAINT_2, - indexName, cons.getName()); + throw DbException.get(ErrorCode.INDEX_BELONGS_TO_CONSTRAINT_2, indexName, cons.getName()); } } } diff --git a/h2/src/main/org/h2/command/ddl/DropRole.java b/h2/src/main/org/h2/command/ddl/DropRole.java index 06104d25da..5fdac3838c 100644 --- a/h2/src/main/org/h2/command/ddl/DropRole.java +++ b/h2/src/main/org/h2/command/ddl/DropRole.java @@ -1,16 +1,15 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; -import org.h2.engine.Constants; import org.h2.engine.Database; import org.h2.engine.Role; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; /** @@ -22,7 +21,7 @@ public class DropRole extends DefineCommand { private String roleName; private boolean ifExists; - public DropRole(Session session) { + public DropRole(SessionLocal session) { super(session); } @@ -31,19 +30,19 @@ public void setRoleName(String roleName) { } @Override - public int update() { + public long update() { session.getUser().checkAdmin(); - session.commit(true); Database db = session.getDatabase(); - if (roleName.equals(Constants.PUBLIC_ROLE_NAME)) { - throw DbException.get(ErrorCode.ROLE_CAN_NOT_BE_DROPPED_1, roleName); - } Role role = db.findRole(roleName); if (role == null) { if (!ifExists) { throw DbException.get(ErrorCode.ROLE_NOT_FOUND_1, roleName); } } else { + if (role == db.getPublicRole()) { + throw DbException.get(ErrorCode.ROLE_CAN_NOT_BE_DROPPED_1, roleName); + } + role.checkOwnsNoSchemas(); db.removeDatabaseObject(session, role); } return 0; diff --git a/h2/src/main/org/h2/command/ddl/DropSchema.java b/h2/src/main/org/h2/command/ddl/DropSchema.java index 93614ff905..3a8ea29ce1 100644 --- a/h2/src/main/org/h2/command/ddl/DropSchema.java +++ b/h2/src/main/org/h2/command/ddl/DropSchema.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -10,7 +10,7 @@ import org.h2.command.CommandInterface; import org.h2.constraint.ConstraintActionType; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.schema.SchemaObject; @@ -25,7 +25,7 @@ public class DropSchema extends DefineCommand { private boolean ifExists; private ConstraintActionType dropAction; - public DropSchema(Session session) { + public DropSchema(SessionLocal session) { super(session); dropAction = session.getDatabase().getSettings().dropRestrict ? ConstraintActionType.RESTRICT : ConstraintActionType.CASCADE; @@ -36,9 +36,7 @@ public void setSchemaName(String name) { } @Override - public int update() { - session.getUser().checkSchemaAdmin(); - session.commit(true); + public long update() { Database db = session.getDatabase(); Schema schema = db.findSchema(schemaName); if (schema == null) { @@ -46,6 +44,7 @@ public int update() { throw DbException.get(ErrorCode.SCHEMA_NOT_FOUND_1, schemaName); } } else { + session.getUser().checkSchemaOwner(schema); if (!schema.canDrop()) { throw DbException.get(ErrorCode.SCHEMA_CAN_NOT_BE_DROPPED_1, schemaName); } diff --git a/h2/src/main/org/h2/command/ddl/DropSequence.java b/h2/src/main/org/h2/command/ddl/DropSequence.java index 4395a590b3..451c628fee 100644 --- a/h2/src/main/org/h2/command/ddl/DropSequence.java +++ b/h2/src/main/org/h2/command/ddl/DropSequence.java @@ -1,14 +1,13 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; -import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.schema.Sequence; @@ -17,12 +16,12 @@ * This class represents the statement * DROP SEQUENCE */ -public class DropSequence extends SchemaCommand { +public class DropSequence extends SchemaOwnerCommand { private String sequenceName; private boolean ifExists; - public DropSequence(Session session, Schema schema) { + public DropSequence(SessionLocal session, Schema schema) { super(session, schema); } @@ -35,11 +34,8 @@ public void setSequenceName(String sequenceName) { } @Override - public int update() { - session.getUser().checkAdmin(); - session.commit(true); - Database db = session.getDatabase(); - Sequence sequence = getSchema().findSequence(sequenceName); + long update(Schema schema) { + Sequence sequence = schema.findSequence(sequenceName); if (sequence == null) { if (!ifExists) { throw DbException.get(ErrorCode.SEQUENCE_NOT_FOUND_1, sequenceName); @@ -48,7 +44,7 @@ public int update() { if (sequence.getBelongsToTable()) { throw DbException.get(ErrorCode.SEQUENCE_BELONGS_TO_A_TABLE_1, sequenceName); } - db.removeSchemaObject(session, sequence); + session.getDatabase().removeSchemaObject(session, sequence); } return 0; } diff --git a/h2/src/main/org/h2/command/ddl/DropSynonym.java b/h2/src/main/org/h2/command/ddl/DropSynonym.java index 28fc9217a1..fcab524f5e 100644 --- a/h2/src/main/org/h2/command/ddl/DropSynonym.java +++ b/h2/src/main/org/h2/command/ddl/DropSynonym.java @@ -1,13 +1,13 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.table.TableSynonym; @@ -16,12 +16,12 @@ * This class represents the statement * DROP SYNONYM */ -public class DropSynonym extends SchemaCommand { +public class DropSynonym extends SchemaOwnerCommand { private String synonymName; private boolean ifExists; - public DropSynonym(Session session, Schema schema) { + public DropSynonym(SessionLocal session, Schema schema) { super(session, schema); } @@ -30,11 +30,8 @@ public void setSynonymName(String name) { } @Override - public int update() { - session.commit(true); - session.getUser().checkAdmin(); - - TableSynonym synonym = getSchema().getSynonym(synonymName); + long update(Schema schema) { + TableSynonym synonym = schema.getSynonym(synonymName); if (synonym == null) { if (!ifExists) { throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, synonymName); diff --git a/h2/src/main/org/h2/command/ddl/DropTable.java b/h2/src/main/org/h2/command/ddl/DropTable.java index a2ee9e815c..c907d56e2b 100644 --- a/h2/src/main/org/h2/command/ddl/DropTable.java +++ b/h2/src/main/org/h2/command/ddl/DropTable.java @@ -1,11 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import org.h2.api.ErrorCode; @@ -14,128 +15,117 @@ import org.h2.constraint.ConstraintActionType; import org.h2.engine.Database; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.table.Table; import org.h2.table.TableView; -import org.h2.util.StringUtils; +import org.h2.util.Utils; /** * This class represents the statement * DROP TABLE */ -public class DropTable extends SchemaCommand { +public class DropTable extends DefineCommand { private boolean ifExists; - private String tableName; - private Table table; - private DropTable next; private ConstraintActionType dropAction; - public DropTable(Session session, Schema schema) { - super(session, schema); + private final ArrayList tables = Utils.newSmallArrayList(); + + public DropTable(SessionLocal session) { + super(session); dropAction = session.getDatabase().getSettings().dropRestrict ? ConstraintActionType.RESTRICT : ConstraintActionType.CASCADE; } - /** - * Chain another drop table statement to this statement. - * - * @param drop the statement to add - */ - public void addNextDropTable(DropTable drop) { - if (next == null) { - next = drop; - } else { - next.addNextDropTable(drop); - } - } - public void setIfExists(boolean b) { ifExists = b; - if (next != null) { - next.setIfExists(b); - } } - public void setTableName(String tableName) { - this.tableName = tableName; + /** + * Add a table to drop. + * + * @param schema the schema + * @param tableName the table name + */ + public void addTable(Schema schema, String tableName) { + tables.add(new SchemaAndTable(schema, tableName)); } - private void prepareDrop() { - table = getSchema().findTableOrView(session, tableName); - if (table == null) { - if (!ifExists) { - throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); - } - } else { - session.getUser().checkRight(table, Right.ALL); - if (!table.canDrop()) { - throw DbException.get(ErrorCode.CANNOT_DROP_TABLE_1, tableName); + private boolean prepareDrop() { + HashSet
          tablesToDrop = new HashSet<>(); + for (SchemaAndTable schemaAndTable : tables) { + String tableName = schemaAndTable.tableName; + Table table = schemaAndTable.schema.findTableOrView(session, tableName); + if (table == null) { + if (!ifExists) { + throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); + } + } else { + session.getUser().checkTableRight(table, Right.SCHEMA_OWNER); + if (!table.canDrop()) { + throw DbException.get(ErrorCode.CANNOT_DROP_TABLE_1, tableName); + } + tablesToDrop.add(table); } + } + if (tablesToDrop.isEmpty()) { + return false; + } + for (Table table : tablesToDrop) { + ArrayList dependencies = new ArrayList<>(); if (dropAction == ConstraintActionType.RESTRICT) { - ArrayList dependencies = new ArrayList<>(); CopyOnWriteArrayList dependentViews = table.getDependentViews(); if (dependentViews != null && !dependentViews.isEmpty()) { for (TableView v : dependentViews) { - dependencies.add(v.getName()); + if (!tablesToDrop.contains(v)) { + dependencies.add(v.getName()); + } } } - if (session.getDatabase() - .getSettings().standardDropTableRestrict) { - final List constraints = table.getConstraints(); - if (constraints != null && !constraints.isEmpty()) { - for (Constraint c : constraints) { - if (c.getTable() != table) { - dependencies.add(c.getName()); - } + final List constraints = table.getConstraints(); + if (constraints != null && !constraints.isEmpty()) { + for (Constraint c : constraints) { + if (!tablesToDrop.contains(c.getTable())) { + dependencies.add(c.getName()); } } } if (!dependencies.isEmpty()) { - throw DbException.get(ErrorCode.CANNOT_DROP_2, tableName, - StringUtils.join(new StringBuilder(), dependencies, ", ").toString()); + throw DbException.get(ErrorCode.CANNOT_DROP_2, table.getName(), String.join(", ", dependencies)); } - } - table.lock(session, true, true); - } - if (next != null) { - next.prepareDrop(); + table.lock(session, Table.EXCLUSIVE_LOCK); } + return true; } private void executeDrop() { - // need to get the table again, because it may be dropped already - // meanwhile (dependent object, or same object) - table = getSchema().findTableOrView(session, tableName); - - if (table != null) { - table.setModified(); - Database db = session.getDatabase(); - db.lockMeta(session); - db.removeSchemaObject(session, table); - } - if (next != null) { - next.executeDrop(); + for (SchemaAndTable schemaAndTable : tables) { + // need to get the table again, because it may be dropped already + // meanwhile (dependent object, or same object) + Table table = schemaAndTable.schema.findTableOrView(session, schemaAndTable.tableName); + if (table != null) { + table.setModified(); + Database db = session.getDatabase(); + db.lockMeta(session); + db.removeSchemaObject(session, table); + } } } @Override - public int update() { - session.commit(true); - prepareDrop(); - executeDrop(); + public long update() { + if (prepareDrop()) { + executeDrop(); + } return 0; } public void setDropAction(ConstraintActionType dropAction) { this.dropAction = dropAction; - if (next != null) { - next.setDropAction(dropAction); - } } @Override @@ -143,4 +133,17 @@ public int getType() { return CommandInterface.DROP_TABLE; } + private static final class SchemaAndTable { + + final Schema schema; + + final String tableName; + + SchemaAndTable(Schema schema, String tableName) { + this.schema = schema; + this.tableName = tableName; + } + + } + } diff --git a/h2/src/main/org/h2/command/ddl/DropTrigger.java b/h2/src/main/org/h2/command/ddl/DropTrigger.java index 94ede93ae3..3e304bd5ce 100644 --- a/h2/src/main/org/h2/command/ddl/DropTrigger.java +++ b/h2/src/main/org/h2/command/ddl/DropTrigger.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -9,7 +9,7 @@ import org.h2.command.CommandInterface; import org.h2.engine.Database; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.schema.TriggerObject; @@ -24,7 +24,7 @@ public class DropTrigger extends SchemaCommand { private String triggerName; private boolean ifExists; - public DropTrigger(Session session, Schema schema) { + public DropTrigger(SessionLocal session, Schema schema) { super(session, schema); } @@ -37,8 +37,7 @@ public void setTriggerName(String triggerName) { } @Override - public int update() { - session.commit(true); + public long update() { Database db = session.getDatabase(); TriggerObject trigger = getSchema().findTrigger(triggerName); if (trigger == null) { @@ -47,7 +46,7 @@ public int update() { } } else { Table table = trigger.getTable(); - session.getUser().checkRight(table, Right.ALL); + session.getUser().checkTableRight(table, Right.SCHEMA_OWNER); db.removeSchemaObject(session, trigger); } return 0; diff --git a/h2/src/main/org/h2/command/ddl/DropUser.java b/h2/src/main/org/h2/command/ddl/DropUser.java index 46d3126ba9..3f72099e46 100644 --- a/h2/src/main/org/h2/command/ddl/DropUser.java +++ b/h2/src/main/org/h2/command/ddl/DropUser.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,7 +8,8 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.RightOwner; +import org.h2.engine.SessionLocal; import org.h2.engine.User; import org.h2.message.DbException; @@ -21,7 +22,7 @@ public class DropUser extends DefineCommand { private boolean ifExists; private String userName; - public DropUser(Session session) { + public DropUser(SessionLocal session) { super(session); } @@ -34,9 +35,8 @@ public void setUserName(String userName) { } @Override - public int update() { + public long update() { session.getUser().checkAdmin(); - session.commit(true); Database db = session.getDatabase(); User user = db.findUser(userName); if (user == null) { @@ -46,8 +46,8 @@ public int update() { } else { if (user == session.getUser()) { int adminUserCount = 0; - for (User u : db.getAllUsers()) { - if (u.isAdmin()) { + for (RightOwner rightOwner : db.getAllUsersAndRoles()) { + if (rightOwner instanceof User && ((User) rightOwner).isAdmin()) { adminUserCount++; } } diff --git a/h2/src/main/org/h2/command/ddl/DropView.java b/h2/src/main/org/h2/command/ddl/DropView.java index 8c059cf223..35c8462e4b 100644 --- a/h2/src/main/org/h2/command/ddl/DropView.java +++ b/h2/src/main/org/h2/command/ddl/DropView.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -10,8 +10,7 @@ import org.h2.command.CommandInterface; import org.h2.constraint.ConstraintActionType; import org.h2.engine.DbObject; -import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.table.Table; @@ -28,7 +27,7 @@ public class DropView extends SchemaCommand { private boolean ifExists; private ConstraintActionType dropAction; - public DropView(Session session, Schema schema) { + public DropView(SessionLocal session, Schema schema) { super(session, schema); dropAction = session.getDatabase().getSettings().dropRestrict ? ConstraintActionType.RESTRICT : @@ -48,8 +47,7 @@ public void setViewName(String viewName) { } @Override - public int update() { - session.commit(true); + public long update() { Table view = getSchema().findTableOrView(session, viewName); if (view == null) { if (!ifExists) { @@ -59,7 +57,7 @@ public int update() { if (TableType.VIEW != view.getTableType()) { throw DbException.get(ErrorCode.VIEW_NOT_FOUND_1, viewName); } - session.getUser().checkRight(view, Right.ALL); + session.getUser().checkSchemaOwner(view.getSchema()); if (dropAction == ConstraintActionType.RESTRICT) { for (DbObject child : view.getChildren()) { @@ -75,7 +73,7 @@ public int update() { TableView tableView = (TableView) view; ArrayList
          copyOfDependencies = new ArrayList<>(tableView.getTables()); - view.lock(session, true, true); + view.lock(session, Table.EXCLUSIVE_LOCK); session.getDatabase().removeSchemaObject(session, view); // remove dependent table expressions diff --git a/h2/src/main/org/h2/command/ddl/GrantRevoke.java b/h2/src/main/org/h2/command/ddl/GrantRevoke.java index 8ec093f6c2..3fc52cf5d0 100644 --- a/h2/src/main/org/h2/command/ddl/GrantRevoke.java +++ b/h2/src/main/org/h2/command/ddl/GrantRevoke.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -14,7 +14,8 @@ import org.h2.engine.Right; import org.h2.engine.RightOwner; import org.h2.engine.Role; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; +import org.h2.engine.User; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.table.Table; @@ -36,7 +37,7 @@ public class GrantRevoke extends DefineCommand { private Schema schema; private RightOwner grantee; - public GrantRevoke(Session session) { + public GrantRevoke(SessionLocal session) { super(session); } @@ -67,21 +68,18 @@ public void addRoleName(String roleName) { public void setGranteeName(String granteeName) { Database db = session.getDatabase(); - grantee = db.findUser(granteeName); + grantee = db.findUserOrRole(granteeName); if (grantee == null) { - grantee = db.findRole(granteeName); - if (grantee == null) { - throw DbException.get(ErrorCode.USER_OR_ROLE_NOT_FOUND_1, granteeName); - } + throw DbException.get(ErrorCode.USER_OR_ROLE_NOT_FOUND_1, granteeName); } } @Override - public int update() { - session.getUser().checkAdmin(); - session.commit(true); + public long update() { Database db = session.getDatabase(); + User user = session.getUser(); if (roleNames != null) { + user.checkAdmin(); for (String name : roleNames) { Role grantedRole = db.findRole(name); if (grantedRole == null) { @@ -92,16 +90,26 @@ public int update() { } else if (operationType == CommandInterface.REVOKE) { revokeRole(grantedRole); } else { - DbException.throwInternalError("type=" + operationType); + throw DbException.getInternalError("type=" + operationType); } } } else { + if ((rightMask & Right.ALTER_ANY_SCHEMA) != 0) { + user.checkAdmin(); + } else { + if (schema != null) { + user.checkSchemaOwner(schema); + } + for (Table table : tables) { + user.checkSchemaOwner(table.getSchema()); + } + } if (operationType == CommandInterface.GRANT) { grantRight(); } else if (operationType == CommandInterface.REVOKE) { revokeRight(); } else { - DbException.throwInternalError("type=" + operationType); + throw DbException.getInternalError("type=" + operationType); } } return 0; @@ -120,7 +128,10 @@ private void grantRight(DbObject object) { Database db = session.getDatabase(); Right right = grantee.getRightForObject(object); if (right == null) { - int id = getObjectId(); + int id = getPersistedObjectId(); + if (id == 0) { + id = session.getDatabase().allocateObjectId(); + } right = new Right(db, id, grantee, rightMask, object); grantee.grantRight(object, right); db.addDatabaseObject(session, right); @@ -138,7 +149,7 @@ private void grantRole(Role grantedRole) { Role granteeRole = (Role) grantee; if (grantedRole.isRoleGranted(granteeRole)) { // cyclic role grants are not allowed - throw DbException.get(ErrorCode.ROLE_ALREADY_GRANTED_1, grantedRole.getSQL(false)); + throw DbException.get(ErrorCode.ROLE_ALREADY_GRANTED_1, grantedRole.getTraceSQL()); } } Database db = session.getDatabase(); @@ -211,17 +222,4 @@ public int getType() { return operationType; } - /** - * @return true if this command is using Roles - */ - public boolean isRoleMode() { - return roleNames != null; - } - - /** - * @return true if this command is using Rights - */ - public boolean isRightMode() { - return rightMask != 0; - } } diff --git a/h2/src/main/org/h2/command/ddl/PrepareProcedure.java b/h2/src/main/org/h2/command/ddl/PrepareProcedure.java index 289d6b2c5b..028ab2fcae 100644 --- a/h2/src/main/org/h2/command/ddl/PrepareProcedure.java +++ b/h2/src/main/org/h2/command/ddl/PrepareProcedure.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -10,7 +10,7 @@ import org.h2.command.CommandInterface; import org.h2.command.Prepared; import org.h2.engine.Procedure; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Parameter; /** @@ -22,7 +22,7 @@ public class PrepareProcedure extends DefineCommand { private String procedureName; private Prepared prepared; - public PrepareProcedure(Session session) { + public PrepareProcedure(SessionLocal session) { super(session); } @@ -32,7 +32,7 @@ public void checkParameters() { } @Override - public int update() { + public long update() { Procedure proc = new Procedure(procedureName, prepared); prepared.setParameterList(parameters); prepared.setPrepareAlways(prepareAlways); diff --git a/h2/src/main/org/h2/command/ddl/SchemaCommand.java b/h2/src/main/org/h2/command/ddl/SchemaCommand.java index eb364c59ee..14cf2c772c 100644 --- a/h2/src/main/org/h2/command/ddl/SchemaCommand.java +++ b/h2/src/main/org/h2/command/ddl/SchemaCommand.java @@ -1,11 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.schema.Schema; /** @@ -21,7 +21,7 @@ public abstract class SchemaCommand extends DefineCommand { * @param session the session * @param schema the schema */ - public SchemaCommand(Session session, Schema schema) { + public SchemaCommand(SessionLocal session, Schema schema) { super(session); this.schema = schema; } @@ -31,7 +31,7 @@ public SchemaCommand(Session session, Schema schema) { * * @return the schema */ - protected Schema getSchema() { + protected final Schema getSchema() { return schema; } diff --git a/h2/src/main/org/h2/command/ddl/SchemaOwnerCommand.java b/h2/src/main/org/h2/command/ddl/SchemaOwnerCommand.java new file mode 100644 index 0000000000..28d432e625 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/SchemaOwnerCommand.java @@ -0,0 +1,38 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.engine.SessionLocal; +import org.h2.schema.Schema; + +/** + * This class represents a non-transaction statement that involves a schema and + * requires schema owner rights. + */ +abstract class SchemaOwnerCommand extends SchemaCommand { + + /** + * Create a new command. + * + * @param session + * the session + * @param schema + * the schema + */ + SchemaOwnerCommand(SessionLocal session, Schema schema) { + super(session, schema); + } + + @Override + public final long update() { + Schema schema = getSchema(); + session.getUser().checkSchemaOwner(schema); + return update(schema); + } + + abstract long update(Schema schema); + +} diff --git a/h2/src/main/org/h2/command/ddl/SequenceOptions.java b/h2/src/main/org/h2/command/ddl/SequenceOptions.java index 2b44063100..801db6e1bd 100644 --- a/h2/src/main/org/h2/command/ddl/SequenceOptions.java +++ b/h2/src/main/org/h2/command/ddl/SequenceOptions.java @@ -1,15 +1,19 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; -import org.h2.engine.Session; +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ValueExpression; +import org.h2.message.DbException; import org.h2.schema.Sequence; +import org.h2.value.TypeInfo; import org.h2.value.Value; +import org.h2.value.ValueBigint; import org.h2.value.ValueNull; /** @@ -17,19 +21,27 @@ */ public class SequenceOptions { + private TypeInfo dataType; + private Expression start; + private Expression restart; + private Expression increment; private Expression maxValue; private Expression minValue; - private Boolean cycle; + private Sequence.Cycle cycle; private Expression cacheSize; - private static Long getLong(Session session, Expression expr) { + private long[] bounds; + + private final Sequence oldSequence; + + private static Long getLong(SessionLocal session, Expression expr) { if (expr != null) { Value value = expr.optimize(session).getValue(session); if (value != ValueNull.INSTANCE) { @@ -39,14 +51,72 @@ private static Long getLong(Session session, Expression expr) { return null; } + /** + * Creates new instance of sequence options. + */ + public SequenceOptions() { + oldSequence = null; + } + + /** + * Creates new instance of sequence options. + * + * @param oldSequence + * the sequence to copy options from + * @param dataType + * the new data type + */ + public SequenceOptions(Sequence oldSequence, TypeInfo dataType) { + this.oldSequence = oldSequence; + this.dataType = dataType; + // Check data type correctness immediately + getBounds(); + } + + public TypeInfo getDataType() { + if (oldSequence != null) { + synchronized (oldSequence) { + copyFromOldSequence(); + } + } + return dataType; + } + + private void copyFromOldSequence() { + long bounds[] = getBounds(); + long min = Math.max(oldSequence.getMinValue(), bounds[0]); + long max = Math.min(oldSequence.getMaxValue(), bounds[1]); + if (max < min) { + min = bounds[0]; + max = bounds[1]; + } + minValue = ValueExpression.get(ValueBigint.get(min)); + maxValue = ValueExpression.get(ValueBigint.get(max)); + long v = oldSequence.getStartValue(); + if (v >= min && v <= max) { + start = ValueExpression.get(ValueBigint.get(v)); + } + v = oldSequence.getBaseValue(); + if (v >= min && v <= max) { + restart = ValueExpression.get(ValueBigint.get(v)); + } + increment = ValueExpression.get(ValueBigint.get(oldSequence.getIncrement())); + cycle = oldSequence.getCycle(); + cacheSize = ValueExpression.get(ValueBigint.get(oldSequence.getCacheSize())); + } + + public void setDataType(TypeInfo dataType) { + this.dataType = dataType; + } + /** * Gets start value. * * @param session The session to calculate the value. * @return start value or {@code null} if value is not defined. */ - public Long getStartValue(Session session) { - return getLong(session, start); + public Long getStartValue(SessionLocal session) { + return check(getLong(session, start)); } /** @@ -58,14 +128,38 @@ public void setStartValue(Expression start) { this.start = start; } + /** + * Gets restart value. + * + * @param session + * the session to calculate the value + * @param startValue + * the start value to use if restart without value is specified + * @return restart value or {@code null} if value is not defined. + */ + public Long getRestartValue(SessionLocal session, long startValue) { + return check(restart == ValueExpression.DEFAULT ? (Long) startValue : getLong(session, restart)); + } + + /** + * Sets restart value expression, or {@link ValueExpression#DEFAULT}. + * + * @param restart + * RESTART WITH value expression, or + * {@link ValueExpression#DEFAULT} for simple RESTART + */ + public void setRestartValue(Expression restart) { + this.restart = restart; + } + /** * Gets increment value. * * @param session The session to calculate the value. * @return increment value or {@code null} if value is not defined. */ - public Long getIncrement(Session session) { - return getLong(session, increment); + public Long getIncrement(SessionLocal session) { + return check(getLong(session, increment)); } /** @@ -84,12 +178,15 @@ public void setIncrement(Expression increment) { * @param session The session to calculate the value. * @return max value when the MAXVALUE expression is set, otherwise returns default max value. */ - public Long getMaxValue(Sequence sequence, Session session) { - if (maxValue == ValueExpression.getNull() && sequence != null) { - return Sequence.getDefaultMaxValue(getCurrentStart(sequence, session), - increment != null ? getIncrement(session) : sequence.getIncrement()); + public Long getMaxValue(Sequence sequence, SessionLocal session) { + Long v; + if (maxValue == ValueExpression.NULL && sequence != null) { + v = Sequence.getDefaultMaxValue(getCurrentStart(sequence, session), + increment != null ? getIncrement(session) : sequence.getIncrement(), getBounds()); + } else { + v = getLong(session, maxValue); } - return getLong(session, maxValue); + return check(v); } /** @@ -108,12 +205,15 @@ public void setMaxValue(Expression maxValue) { * @param session The session to calculate the value. * @return min value when the MINVALUE expression is set, otherwise returns default min value. */ - public Long getMinValue(Sequence sequence, Session session) { - if (minValue == ValueExpression.getNull() && sequence != null) { - return Sequence.getDefaultMinValue(getCurrentStart(sequence, session), - increment != null ? getIncrement(session) : sequence.getIncrement()); + public Long getMinValue(Sequence sequence, SessionLocal session) { + Long v; + if (minValue == ValueExpression.NULL && sequence != null) { + v = Sequence.getDefaultMinValue(getCurrentStart(sequence, session), + increment != null ? getIncrement(session) : sequence.getIncrement(), getBounds()); + } else { + v = getLong(session, minValue); } - return getLong(session, minValue); + return check(v); } /** @@ -125,21 +225,115 @@ public void setMinValue(Expression minValue) { this.minValue = minValue; } + private Long check(Long value) { + if (value == null) { + return null; + } else { + long[] bounds = getBounds(); + long v = value; + if (v < bounds[0] || v > bounds[1]) { + throw DbException.get(ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE_1, Long.toString(v)); + } + } + return value; + } + + public long[] getBounds() { + long[] bounds = this.bounds; + if (bounds == null) { + this.bounds = bounds = getBounds(dataType); + } + return bounds; + } + /** - * Gets cycle flag. + * Get the bounds (min, max) of a data type. * - * @return cycle flag value or {@code null} if value is not defined. + * @param dataType the data type + * @return the bounds (an array with 2 elements) */ - public Boolean getCycle() { + public static long[] getBounds(TypeInfo dataType) { + long min, max; + switch (dataType.getValueType()) { + case Value.TINYINT: + min = Byte.MIN_VALUE; + max = Byte.MAX_VALUE; + break; + case Value.SMALLINT: + min = Short.MIN_VALUE; + max = Short.MAX_VALUE; + break; + case Value.INTEGER: + min = Integer.MIN_VALUE; + max = Integer.MAX_VALUE; + break; + case Value.BIGINT: + min = Long.MIN_VALUE; + max = Long.MAX_VALUE; + break; + case Value.REAL: + min = -0x100_0000; + max = 0x100_0000; + break; + case Value.DOUBLE: + min = -0x20_0000_0000_0000L; + max = 0x20_0000_0000_0000L; + break; + case Value.NUMERIC: { + if (dataType.getScale() != 0) { + throw DbException.getUnsupportedException(dataType.getTraceSQL()); + } + long p = (dataType.getPrecision() - dataType.getScale()); + if (p <= 0) { + throw DbException.getUnsupportedException(dataType.getTraceSQL()); + } else if (p > 18) { + min = Long.MIN_VALUE; + max = Long.MAX_VALUE; + } else { + max = 10; + for (int i = 1; i < p; i++) { + max *= 10; + } + min = - --max; + } + break; + } + case Value.DECFLOAT: { + long p = dataType.getPrecision(); + if (p > 18) { + min = Long.MIN_VALUE; + max = Long.MAX_VALUE; + } else { + max = 10; + for (int i = 1; i < p; i++) { + max *= 10; + } + min = -max; + } + break; + } + default: + throw DbException.getUnsupportedException(dataType.getTraceSQL()); + } + long bounds[] = { min, max }; + return bounds; + } + + /** + * Gets cycle option. + * + * @return cycle option value or {@code null} if is not defined. + */ + public Sequence.Cycle getCycle() { return cycle; } /** - * Sets cycle flag. + * Sets cycle option. * - * @param cycle flag value. + * @param cycle option value. */ - public void setCycle(Boolean cycle) { + public void setCycle(Sequence.Cycle cycle) { this.cycle = cycle; } @@ -149,7 +343,7 @@ public void setCycle(Boolean cycle) { * @param session The session to calculate the value. * @return cache size or {@code null} if value is not defined. */ - public Long getCacheSize(Session session) { + public Long getCacheSize(SessionLocal session) { return getLong(session, cacheSize); } @@ -162,11 +356,7 @@ public void setCacheSize(Expression cacheSize) { this.cacheSize = cacheSize; } - boolean isRangeSet() { - return start != null || minValue != null || maxValue != null || increment != null; - } - - private long getCurrentStart(Sequence sequence, Session session) { - return start != null ? getStartValue(session) : sequence.getCurrentValue() + sequence.getIncrement(); + private long getCurrentStart(Sequence sequence, SessionLocal session) { + return start != null ? getStartValue(session) : sequence.getBaseValue(); } } diff --git a/h2/src/main/org/h2/command/ddl/SetComment.java b/h2/src/main/org/h2/command/ddl/SetComment.java index 22bfc8b34c..ba936cc766 100644 --- a/h2/src/main/org/h2/command/ddl/SetComment.java +++ b/h2/src/main/org/h2/command/ddl/SetComment.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -10,9 +10,10 @@ import org.h2.engine.Comment; import org.h2.engine.Database; import org.h2.engine.DbObject; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.message.DbException; +import org.h2.schema.Schema; import org.h2.table.Table; /** @@ -28,69 +29,97 @@ public class SetComment extends DefineCommand { private int objectType; private Expression expr; - public SetComment(Session session) { + public SetComment(SessionLocal session) { super(session); } @Override - public int update() { - session.commit(true); + public long update() { Database db = session.getDatabase(); - session.getUser().checkAdmin(); DbObject object = null; int errorCode = ErrorCode.GENERAL_ERROR_1; if (schemaName == null) { schemaName = session.getCurrentSchemaName(); } switch (objectType) { - case DbObject.CONSTANT: - object = db.getSchema(schemaName).getConstant(objectName); + case DbObject.CONSTANT: { + Schema schema = db.getSchema(schemaName); + session.getUser().checkSchemaOwner(schema); + object = schema.getConstant(objectName); break; - case DbObject.CONSTRAINT: - object = db.getSchema(schemaName).getConstraint(objectName); + } + case DbObject.CONSTRAINT: { + Schema schema = db.getSchema(schemaName); + session.getUser().checkSchemaOwner(schema); + object = schema.getConstraint(objectName); break; - case DbObject.FUNCTION_ALIAS: - object = db.getSchema(schemaName).findFunction(objectName); + } + case DbObject.FUNCTION_ALIAS: { + Schema schema = db.getSchema(schemaName); + session.getUser().checkSchemaOwner(schema); + object = schema.findFunction(objectName); errorCode = ErrorCode.FUNCTION_ALIAS_NOT_FOUND_1; break; - case DbObject.INDEX: - object = db.getSchema(schemaName).getIndex(objectName); + } + case DbObject.INDEX: { + Schema schema = db.getSchema(schemaName); + session.getUser().checkSchemaOwner(schema); + object = schema.getIndex(objectName); break; + } case DbObject.ROLE: + session.getUser().checkAdmin(); schemaName = null; object = db.findRole(objectName); errorCode = ErrorCode.ROLE_NOT_FOUND_1; break; - case DbObject.SCHEMA: + case DbObject.SCHEMA: { schemaName = null; - object = db.findSchema(objectName); - errorCode = ErrorCode.SCHEMA_NOT_FOUND_1; + Schema schema = db.getSchema(objectName); + session.getUser().checkSchemaOwner(schema); + object = schema; break; - case DbObject.SEQUENCE: - object = db.getSchema(schemaName).getSequence(objectName); + } + case DbObject.SEQUENCE: { + Schema schema = db.getSchema(schemaName); + session.getUser().checkSchemaOwner(schema); + object = schema.getSequence(objectName); break; - case DbObject.TABLE_OR_VIEW: - object = db.getSchema(schemaName).getTableOrView(session, objectName); + } + case DbObject.TABLE_OR_VIEW: { + Schema schema = db.getSchema(schemaName); + session.getUser().checkSchemaOwner(schema); + object = schema.getTableOrView(session, objectName); break; - case DbObject.TRIGGER: - object = db.getSchema(schemaName).findTrigger(objectName); + } + case DbObject.TRIGGER: { + Schema schema = db.getSchema(schemaName); + session.getUser().checkSchemaOwner(schema); + object = schema.findTrigger(objectName); errorCode = ErrorCode.TRIGGER_NOT_FOUND_1; break; + } case DbObject.USER: + session.getUser().checkAdmin(); schemaName = null; object = db.getUser(objectName); break; - case DbObject.DOMAIN: - schemaName = null; - object = db.findDomain(objectName); - errorCode = ErrorCode.DOMAIN_ALREADY_EXISTS_1; + case DbObject.DOMAIN: { + Schema schema = db.getSchema(schemaName); + session.getUser().checkSchemaOwner(schema); + object = schema.findDomain(objectName); + errorCode = ErrorCode.DOMAIN_NOT_FOUND_1; break; + } default: } if (object == null) { throw DbException.get(errorCode, objectName); } String text = expr.optimize(session).getValue(session).getString(); + if (text != null && text.isEmpty()) { + text = null; + } if (column) { Table table = (Table) object; table.getColumn(columnName).setComment(text); diff --git a/h2/src/main/org/h2/command/ddl/TruncateTable.java b/h2/src/main/org/h2/command/ddl/TruncateTable.java index 065189e6d7..6bb244f6b7 100644 --- a/h2/src/main/org/h2/command/ddl/TruncateTable.java +++ b/h2/src/main/org/h2/command/ddl/TruncateTable.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,7 +8,7 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Sequence; import org.h2.table.Column; @@ -24,7 +24,7 @@ public class TruncateTable extends DefineCommand { private boolean restart; - public TruncateTable(Session session) { + public TruncateTable(SessionLocal session) { super(session); } @@ -37,27 +37,23 @@ public void setRestart(boolean restart) { } @Override - public int update() { - session.commit(true); + public long update() { if (!table.canTruncate()) { - throw DbException.get(ErrorCode.CANNOT_TRUNCATE_1, table.getSQL(false)); + throw DbException.get(ErrorCode.CANNOT_TRUNCATE_1, table.getTraceSQL()); } - session.getUser().checkRight(table, Right.DELETE); - table.lock(session, true, true); - table.truncate(session); + session.getUser().checkTableRight(table, Right.DELETE); + table.lock(session, Table.EXCLUSIVE_LOCK); + long result = table.truncate(session); if (restart) { for (Column column : table.getColumns()) { Sequence sequence = column.getSequence(); if (sequence != null) { - long min = sequence.getMinValue(); - if (min != sequence.getCurrentValue()) { - sequence.modify(min, null, null, null); - session.getDatabase().updateMeta(session, sequence); - } + sequence.modify(sequence.getStartValue(), null, null, null, null, null, null); + session.getDatabase().updateMeta(session, sequence); } } } - return 0; + return result; } @Override diff --git a/h2/src/main/org/h2/command/ddl/package.html b/h2/src/main/org/h2/command/ddl/package.html index c33afa7e0c..9862a68694 100644 --- a/h2/src/main/org/h2/command/ddl/package.html +++ b/h2/src/main/org/h2/command/ddl/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/command/dml/AlterTableSet.java b/h2/src/main/org/h2/command/dml/AlterTableSet.java index 97c9ebb7ee..9d3a3c1a14 100644 --- a/h2/src/main/org/h2/command/dml/AlterTableSet.java +++ b/h2/src/main/org/h2/command/dml/AlterTableSet.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; @@ -9,7 +9,7 @@ import org.h2.command.CommandInterface; import org.h2.command.ddl.SchemaCommand; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.table.Table; @@ -27,7 +27,7 @@ public class AlterTableSet extends SchemaCommand { private final boolean value; private boolean checkExisting; - public AlterTableSet(Session session, Schema schema, int type, boolean value) { + public AlterTableSet(SessionLocal session, Schema schema, int type, boolean value) { super(session, schema); this.type = type; this.value = value; @@ -51,7 +51,7 @@ public void setTableName(String tableName) { } @Override - public int update() { + public long update() { Table table = getSchema().resolveTableOrView(session, tableName); if (table == null) { if (ifTableExists) { @@ -59,15 +59,15 @@ public int update() { } throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); } - session.getUser().checkRight(table, Right.ALL); - table.lock(session, true, true); + session.getUser().checkTableRight(table, Right.SCHEMA_OWNER); + table.lock(session, Table.EXCLUSIVE_LOCK); switch (type) { case CommandInterface.ALTER_TABLE_SET_REFERENTIAL_INTEGRITY: table.setCheckForeignKeyConstraints(session, value, value ? checkExisting : false); break; default: - DbException.throwInternalError("type="+type); + throw DbException.getInternalError("type="+type); } return 0; } diff --git a/h2/src/main/org/h2/command/dml/BackupCommand.java b/h2/src/main/org/h2/command/dml/BackupCommand.java index bb4990ddde..709147da4d 100644 --- a/h2/src/main/org/h2/command/dml/BackupCommand.java +++ b/h2/src/main/org/h2/command/dml/BackupCommand.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; @@ -11,20 +11,18 @@ import java.util.ArrayList; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; -import org.h2.api.DatabaseEventListener; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.command.Prepared; import org.h2.engine.Constants; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.message.DbException; import org.h2.mvstore.MVStore; -import org.h2.mvstore.db.MVTableEngine.Store; +import org.h2.mvstore.db.Store; import org.h2.result.ResultInterface; import org.h2.store.FileLister; -import org.h2.store.PageStore; import org.h2.store.fs.FileUtils; import org.h2.util.IOUtils; @@ -36,7 +34,7 @@ public class BackupCommand extends Prepared { private Expression fileNameExpr; - public BackupCommand(Session session) { + public BackupCommand(SessionLocal session) { super(session); } @@ -45,7 +43,7 @@ public void setFileName(Expression fileName) { } @Override - public int update() { + public long update() { String name = fileNameExpr.getValue(session).getString(); session.getUser().checkAdmin(); backupTo(name); @@ -59,18 +57,12 @@ private void backupTo(String fileName) { } try { Store store = db.getStore(); - if (store != null) { - store.flush(); - } + store.flush(); String name = db.getName(); name = FileUtils.getName(name); try (OutputStream zip = FileUtils.newOutputStream(fileName, false)) { ZipOutputStream out = new ZipOutputStream(zip); db.flush(); - if (db.getPageStore() != null) { - String fn = db.getName() + Constants.SUFFIX_PAGE_FILE; - backupPageStore(out, fn, db.getPageStore()); - } // synchronize on the database, to avoid concurrent temp file // creation / deletion / backup String base = FileUtils.getParent(db.getName()); @@ -80,10 +72,7 @@ private void backupTo(String fileName) { dir = FileLister.getDir(dir); ArrayList fileList = FileLister.getDatabaseFiles(dir, name, true); for (String n : fileList) { - if (n.endsWith(Constants.SUFFIX_LOB_FILE)) { - backupFile(out, base, n); - } - if (n.endsWith(Constants.SUFFIX_MV_FILE) && store != null) { + if (n.endsWith(Constants.SUFFIX_MV_FILE)) { MVStore s = store.getMvStore(); boolean before = s.getReuseSpace(); s.setReuseSpace(false); @@ -103,40 +92,12 @@ private void backupTo(String fileName) { } } - private void backupPageStore(ZipOutputStream out, String fileName, - PageStore store) throws IOException { - Database db = session.getDatabase(); - fileName = FileUtils.getName(fileName); - out.putNextEntry(new ZipEntry(fileName)); - int pos = 0; - try { - store.setBackup(true); - while (true) { - pos = store.copyDirect(pos, out); - if (pos < 0) { - break; - } - int max = store.getPageCount(); - db.setProgress(DatabaseEventListener.STATE_BACKUP_FILE, fileName, pos, max); - } - } finally { - store.setBackup(false); - } - out.closeEntry(); - } - - private static void backupFile(ZipOutputStream out, String base, String fn) - throws IOException { - InputStream in = FileUtils.newInputStream(fn); - backupFile(out, base, fn, in); - } - private static void backupFile(ZipOutputStream out, String base, String fn, InputStream in) throws IOException { String f = FileUtils.toRealPath(fn); base = FileUtils.toRealPath(base); if (!f.startsWith(base)) { - DbException.throwInternalError(f + " does not start with " + base); + throw DbException.getInternalError(f + " does not start with " + base); } f = f.substring(base.length()); f = correctFileName(f); diff --git a/h2/src/main/org/h2/command/dml/Call.java b/h2/src/main/org/h2/command/dml/Call.java index 475a19396d..7302298328 100644 --- a/h2/src/main/org/h2/command/dml/Call.java +++ b/h2/src/main/org/h2/command/dml/Call.java @@ -1,17 +1,21 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; import org.h2.command.CommandInterface; import org.h2.command.Prepared; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; +import org.h2.expression.Alias; import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; import org.h2.expression.ExpressionVisitor; +import org.h2.expression.function.table.TableFunction; import org.h2.result.LocalResult; import org.h2.result.ResultInterface; +import org.h2.table.Column; import org.h2.value.Value; /** @@ -20,36 +24,34 @@ */ public class Call extends Prepared { - private boolean isResultSet; private Expression expression; + + private TableFunction tableFunction; + private Expression[] expressions; - public Call(Session session) { + public Call(SessionLocal session) { super(session); } @Override public ResultInterface queryMeta() { - LocalResult result; - if (isResultSet) { - Expression[] expr = expression.getExpressionColumns(session); - result = session.getDatabase().getResultFactory().create(session, expr, expr.length); - } else { - result = session.getDatabase().getResultFactory().create(session, expressions, 1); - } + int columnCount = expressions.length; + LocalResult result = new LocalResult(session, expressions, columnCount, columnCount); result.done(); return result; } @Override - public int update() { - Value v = expression.getValue(session); - int type = v.getValueType(); - switch (type) { - case Value.RESULT_SET: + public long update() { + if (tableFunction != null) { // this will throw an exception // methods returning a result set may not be called like this. return super.update(); + } + Value v = expression.getValue(session); + int type = v.getValueType(); + switch (type) { case Value.UNKNOWN: case Value.NULL: return 0; @@ -59,26 +61,36 @@ public int update() { } @Override - public ResultInterface query(int maxrows) { + public ResultInterface query(long maxrows) { setCurrentRowNumber(1); - Value v = expression.getValue(session); - if (isResultSet) { - return v.getResult(); + if (tableFunction != null) { + return tableFunction.getValue(session); } - LocalResult result = session.getDatabase().getResultFactory().create(session, expressions, 1); - Value[] row = { v }; - result.addRow(row); + LocalResult result = new LocalResult(session, expressions, 1, 1); + result.addRow(expression.getValue(session)); result.done(); return result; } @Override public void prepare() { - expression = expression.optimize(session); - expressions = new Expression[] { expression }; - isResultSet = expression.getType().getValueType() == Value.RESULT_SET; - if (isResultSet) { + if (tableFunction != null) { prepareAlways = true; + tableFunction.optimize(session); + ResultInterface result = tableFunction.getValueTemplate(session); + int columnCount = result.getVisibleColumnCount(); + expressions = new Expression[columnCount]; + for (int i = 0; i < columnCount; i++) { + String name = result.getColumnName(i); + String alias = result.getAlias(i); + Expression e = new ExpressionColumn(session.getDatabase(), new Column(name, result.getColumnType(i))); + if (!alias.equals(name)) { + e = new Alias(e, alias, false); + } + expressions[i] = e; + } + } else { + expressions = new Expression[] { expression = expression.optimize(session) }; } } @@ -86,6 +98,10 @@ public void setExpression(Expression expression) { this.expression = expression; } + public void setTableFunction(TableFunction tableFunction) { + this.tableFunction = tableFunction; + } + @Override public boolean isQuery() { return true; @@ -98,7 +114,7 @@ public boolean isTransactional() { @Override public boolean isReadOnly() { - return expression.isEverything(ExpressionVisitor.READONLY_VISITOR); + return tableFunction == null && expression.isEverything(ExpressionVisitor.READONLY_VISITOR); } @@ -109,7 +125,7 @@ public int getType() { @Override public boolean isCacheable() { - return !isResultSet; + return tableFunction == null; } } diff --git a/h2/src/main/org/h2/command/dml/CommandWithValues.java b/h2/src/main/org/h2/command/dml/CommandWithValues.java index 751e8a0195..592981ae33 100644 --- a/h2/src/main/org/h2/command/dml/CommandWithValues.java +++ b/h2/src/main/org/h2/command/dml/CommandWithValues.java @@ -1,21 +1,20 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; import java.util.ArrayList; -import org.h2.command.Prepared; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.util.Utils; /** * Command that supports VALUES clause. */ -public abstract class CommandWithValues extends Prepared { +public abstract class CommandWithValues extends DataChangeStatement { /** * Expression data for the VALUES clause. @@ -28,7 +27,7 @@ public abstract class CommandWithValues extends Prepared { * @param session * the session */ - protected CommandWithValues(Session session) { + protected CommandWithValues(SessionLocal session) { super(session); } diff --git a/h2/src/main/org/h2/command/dml/DataChangeStatement.java b/h2/src/main/org/h2/command/dml/DataChangeStatement.java new file mode 100644 index 0000000000..a2b53970f4 --- /dev/null +++ b/h2/src/main/org/h2/command/dml/DataChangeStatement.java @@ -0,0 +1,75 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.dml; + +import org.h2.command.Prepared; +import org.h2.engine.SessionLocal; +import org.h2.result.ResultInterface; +import org.h2.result.ResultTarget; +import org.h2.table.DataChangeDeltaTable.ResultOption; +import org.h2.table.Table; + +/** + * Data change statement. + */ +public abstract class DataChangeStatement extends Prepared { + + /** + * Creates new instance of DataChangeStatement. + * + * @param session + * the session + */ + protected DataChangeStatement(SessionLocal session) { + super(session); + } + + /** + * Return the name of this statement. + * + * @return the short name of this statement. + */ + public abstract String getStatementName(); + + /** + * Return the target table. + * + * @return the target table + */ + public abstract Table getTable(); + + @Override + public final boolean isTransactional() { + return true; + } + + @Override + public final ResultInterface queryMeta() { + return null; + } + + @Override + public boolean isCacheable() { + return true; + } + + @Override + public final long update() { + return update(null, null); + } + + /** + * Execute the statement with specified delta change collector and collection mode. + * + * @param deltaChangeCollector + * target result + * @param deltaChangeCollectionMode + * collection mode + * @return the update count + */ + public abstract long update(ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode); + +} diff --git a/h2/src/main/org/h2/command/dml/Delete.java b/h2/src/main/org/h2/command/dml/Delete.java index e53cb5c8a2..832ba22dc2 100644 --- a/h2/src/main/org/h2/command/dml/Delete.java +++ b/h2/src/main/org/h2/command/dml/Delete.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; @@ -9,14 +9,17 @@ import org.h2.api.Trigger; import org.h2.command.CommandInterface; -import org.h2.command.Prepared; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.DbObject; import org.h2.engine.Right; -import org.h2.engine.Session; -import org.h2.engine.UndoLogRecord; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; -import org.h2.result.ResultInterface; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.result.LocalResult; +import org.h2.result.ResultTarget; import org.h2.result.Row; -import org.h2.result.RowList; +import org.h2.table.DataChangeDeltaTable.ResultOption; import org.h2.table.PlanItem; import org.h2.table.Table; import org.h2.table.TableFilter; @@ -27,106 +30,66 @@ * This class represents the statement * DELETE */ -public class Delete extends Prepared { +public final class Delete extends FilteredDataChangeStatement { - private Expression condition; - private TableFilter targetTableFilter; - - /** - * The limit expression as specified in the LIMIT or TOP clause. - */ - private Expression limitExpr; - /** - * This table filter is for MERGE..USING support - not used in stand-alone DML - */ - private TableFilter sourceTableFilter; - - private HashSet keysFilter; - - public Delete(Session session) { + public Delete(SessionLocal session) { super(session); } - public void setTableFilter(TableFilter tableFilter) { - this.targetTableFilter = tableFilter; - } - - public void setCondition(Expression condition) { - this.condition = condition; - } - - public Expression getCondition() { - return this.condition; - } - - /** - * Sets the keys filter. - * - * @param keysFilter the keys filter - */ - public void setKeysFilter(HashSet keysFilter) { - this.keysFilter = keysFilter; - } @Override - public int update() { + public long update(ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode) { targetTableFilter.startQuery(session); targetTableFilter.reset(); Table table = targetTableFilter.getTable(); - session.getUser().checkRight(table, Right.DELETE); + session.getUser().checkTableRight(table, Right.DELETE); table.fire(session, Trigger.DELETE, true); - table.lock(session, true, false); - int limitRows = -1; - if (limitExpr != null) { - Value v = limitExpr.getValue(session); - if (v != ValueNull.INSTANCE) { - limitRows = v.getInt(); + table.lock(session, Table.WRITE_LOCK); + long limitRows = -1; + if (fetchExpr != null) { + Value v = fetchExpr.getValue(session); + if (v == ValueNull.INSTANCE || (limitRows = v.getLong()) < 0) { + throw DbException.getInvalidValueException("FETCH", v); } } - try (RowList rows = new RowList(session)) { + try (LocalResult rows = LocalResult.forTable(session, table)) { setCurrentRowNumber(0); - int count = 0; - while (limitRows != 0 && targetTableFilter.next()) { - setCurrentRowNumber(rows.size() + 1); - if (condition == null || condition.getBooleanValue(session)) { - Row row = targetTableFilter.get(); - if (keysFilter == null || keysFilter.contains(row.getKey())) { - if (table.isMVStore()) { - Row lockedRow = table.lockRow(session, row); - if (lockedRow == null) { - continue; - } - if (!row.hasSharedData(lockedRow)) { - row = lockedRow; - targetTableFilter.set(row); - if (condition != null && !condition.getBooleanValue(session)) { - continue; - } - } - } - if (!table.fireRow() || !table.fireBeforeRow(session, row, null)) { - rows.add(row); - } - count++; - if (limitRows >= 0 && count >= limitRows) { - break; + long count = 0; + while (nextRow(limitRows, count)) { + Row row = targetTableFilter.get(); + if (table.isRowLockable()) { + Row lockedRow = table.lockRow(session, row); + if (lockedRow == null) { + continue; + } + if (!row.hasSharedData(lockedRow)) { + row = lockedRow; + targetTableFilter.set(row); + if (condition != null && !condition.getBooleanValue(session)) { + continue; } } } + if (deltaChangeCollectionMode == ResultOption.OLD) { + deltaChangeCollector.addRow(row.getValueList()); + } + if (!table.fireRow() || !table.fireBeforeRow(session, row, null)) { + rows.addRowForTable(row); + } + count++; } - int rowScanCount = 0; - for (rows.reset(); rows.hasNext();) { + rows.done(); + long rowScanCount = 0; + while (rows.next()) { if ((++rowScanCount & 127) == 0) { checkCanceled(); } - Row row = rows.next(); + Row row = rows.currentRowForTable(); table.removeRow(session, row); - session.log(table, UndoLogRecord.DELETE, row); } if (table.fireRow()) { - for (rows.reset(); rows.hasNext();) { - Row row = rows.next(); - table.fireAfterRow(session, row, null, false); + for (rows.reset(); rows.next();) { + table.fireAfterRow(session, rows.currentRowForTable(), null, false); } } table.fire(session, Trigger.DELETE, false); @@ -135,77 +98,43 @@ public int update() { } @Override - public String getPlanSQL(boolean alwaysQuote) { - StringBuilder buff = new StringBuilder(); - buff.append("DELETE FROM "); - targetTableFilter.getPlanSQL(buff, false, alwaysQuote); - if (condition != null) { - buff.append("\nWHERE "); - condition.getUnenclosedSQL(buff, alwaysQuote); - } - if (limitExpr != null) { - buff.append("\nLIMIT ("); - limitExpr.getUnenclosedSQL(buff, alwaysQuote).append(')'); - } - return buff.toString(); + public String getPlanSQL(int sqlFlags) { + StringBuilder builder = new StringBuilder("DELETE FROM "); + targetTableFilter.getPlanSQL(builder, false, sqlFlags); + appendFilterCondition(builder, sqlFlags); + return builder.toString(); } @Override public void prepare() { if (condition != null) { condition.mapColumns(targetTableFilter, 0, Expression.MAP_INITIAL); - if (sourceTableFilter != null) { - condition.mapColumns(sourceTableFilter, 0, Expression.MAP_INITIAL); + condition = condition.optimizeCondition(session); + if (condition != null) { + condition.createIndexConditions(session, targetTableFilter); } - condition = condition.optimize(session); - condition.createIndexConditions(session, targetTableFilter); } - TableFilter[] filters; - if (sourceTableFilter == null) { - filters = new TableFilter[] { targetTableFilter }; - } else { - filters = new TableFilter[] { targetTableFilter, sourceTableFilter }; - } - PlanItem item = targetTableFilter.getBestPlanItem(session, filters, 0, - new AllColumnsForPlan(filters)); + TableFilter[] filters = new TableFilter[] { targetTableFilter }; + PlanItem item = targetTableFilter.getBestPlanItem(session, filters, 0, new AllColumnsForPlan(filters)); targetTableFilter.setPlanItem(item); targetTableFilter.prepare(); } - @Override - public boolean isTransactional() { - return true; - } - - @Override - public ResultInterface queryMeta() { - return null; - } - @Override public int getType() { return CommandInterface.DELETE; } - public void setLimit(Expression limit) { - this.limitExpr = limit; - } - @Override - public boolean isCacheable() { - return true; + public String getStatementName() { + return "DELETE"; } - public void setSourceTableFilter(TableFilter sourceTableFilter) { - this.sourceTableFilter = sourceTableFilter; - } - - public TableFilter getTableFilter() { - return targetTableFilter; - } - - public TableFilter getSourceTableFilter() { - return sourceTableFilter; + @Override + public void collectDependencies(HashSet dependencies) { + ExpressionVisitor visitor = ExpressionVisitor.getDependenciesVisitor(dependencies); + if (condition != null) { + condition.isEverything(visitor); + } } - } diff --git a/h2/src/main/org/h2/command/dml/ExecuteImmediate.java b/h2/src/main/org/h2/command/dml/ExecuteImmediate.java new file mode 100644 index 0000000000..b9e5cfe66e --- /dev/null +++ b/h2/src/main/org/h2/command/dml/ExecuteImmediate.java @@ -0,0 +1,57 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.dml; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.command.Prepared; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.result.ResultInterface; + +/** + * This class represents the statement + * EXECUTE IMMEDIATE. + */ +public class ExecuteImmediate extends Prepared { + + private Expression statement; + + public ExecuteImmediate(SessionLocal session, Expression statement) { + super(session); + this.statement = statement.optimize(session); + } + + @Override + public long update() { + String sql = statement.getValue(session).getString(); + if (sql == null) { + throw DbException.getInvalidValueException("SQL command", null); + } + Prepared command = session.prepare(sql); + if (command.isQuery()) { + throw DbException.get(ErrorCode.SYNTAX_ERROR_2, sql, ""); + } + return command.update(); + } + + @Override + public boolean isTransactional() { + return true; + } + + @Override + public int getType() { + return CommandInterface.EXECUTE_IMMEDIATELY; + } + + @Override + public ResultInterface queryMeta() { + return null; + } + +} diff --git a/h2/src/main/org/h2/command/dml/ExecuteProcedure.java b/h2/src/main/org/h2/command/dml/ExecuteProcedure.java index 4c9c7d3a2c..0313ea51fd 100644 --- a/h2/src/main/org/h2/command/dml/ExecuteProcedure.java +++ b/h2/src/main/org/h2/command/dml/ExecuteProcedure.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; @@ -10,7 +10,7 @@ import org.h2.command.CommandInterface; import org.h2.command.Prepared; import org.h2.engine.Procedure; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.Parameter; import org.h2.result.ResultInterface; @@ -25,7 +25,7 @@ public class ExecuteProcedure extends Prepared { private final ArrayList expressions = Utils.newSmallArrayList(); private Procedure procedure; - public ExecuteProcedure(Session session) { + public ExecuteProcedure(SessionLocal session) { super(session); } @@ -61,14 +61,14 @@ public boolean isQuery() { } @Override - public int update() { + public long update() { setParameters(); Prepared prepared = procedure.getPrepared(); return prepared.update(); } @Override - public ResultInterface query(int limit) { + public ResultInterface query(long limit) { setParameters(); Prepared prepared = procedure.getPrepared(); return prepared.query(limit); diff --git a/h2/src/main/org/h2/command/dml/Explain.java b/h2/src/main/org/h2/command/dml/Explain.java index 062f59252c..ea677f528f 100644 --- a/h2/src/main/org/h2/command/dml/Explain.java +++ b/h2/src/main/org/h2/command/dml/Explain.java @@ -1,26 +1,28 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; +import java.util.HashSet; import java.util.Map; -import java.util.TreeMap; import java.util.Map.Entry; +import java.util.TreeMap; import org.h2.command.CommandInterface; import org.h2.command.Prepared; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.DbObject; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; -import org.h2.mvstore.db.MVTableEngine.Store; +import org.h2.mvstore.db.Store; import org.h2.result.LocalResult; import org.h2.result.ResultInterface; -import org.h2.store.PageStore; import org.h2.table.Column; -import org.h2.value.Value; -import org.h2.value.ValueString; +import org.h2.util.HasSQL; +import org.h2.value.TypeInfo; +import org.h2.value.ValueVarchar; /** * This class represents the statement @@ -32,7 +34,7 @@ public class Explain extends Prepared { private LocalResult result; private boolean executeCommand; - public Explain(Session session) { + public Explain(SessionLocal session) { super(session); } @@ -67,39 +69,28 @@ protected void checkParameters() { } @Override - public ResultInterface query(int maxrows) { - Column column = new Column("PLAN", Value.STRING); + public ResultInterface query(long maxrows) { Database db = session.getDatabase(); - ExpressionColumn expr = new ExpressionColumn(db, column); - Expression[] expressions = { expr }; - result = db.getResultFactory().create(session, expressions, 1); - boolean alwaysQuote = true; + Expression[] expressions = { new ExpressionColumn(db, new Column("PLAN", TypeInfo.TYPE_VARCHAR)) }; + result = new LocalResult(session, expressions, 1, 1); + int sqlFlags = HasSQL.ADD_PLAN_INFORMATION; if (maxrows >= 0) { String plan; if (executeCommand) { - PageStore store = null; - Store mvStore = null; + Store store = null; if (db.isPersistent()) { - store = db.getPageStore(); - if (store != null) { - store.statisticsStart(); - } - mvStore = db.getStore(); - if (mvStore != null) { - mvStore.statisticsStart(); - } + store = db.getStore(); + store.statisticsStart(); } if (command.isQuery()) { command.query(maxrows); } else { command.update(); } - plan = command.getPlanSQL(alwaysQuote); + plan = command.getPlanSQL(sqlFlags); Map statistics = null; if (store != null) { statistics = store.statisticsEnd(); - } else if (mvStore != null) { - statistics = mvStore.statisticsEnd(); } if (statistics != null) { int total = 0; @@ -125,7 +116,7 @@ public ResultInterface query(int maxrows) { } } } else { - plan = command.getPlanSQL(alwaysQuote); + plan = command.getPlanSQL(sqlFlags); } add(plan); } @@ -134,8 +125,7 @@ public ResultInterface query(int maxrows) { } private void add(String text) { - Value[] row = { ValueString.get(text) }; - result.addRow(row); + result.addRow(ValueVarchar.get(text)); } @Override @@ -157,4 +147,10 @@ public boolean isReadOnly() { public int getType() { return executeCommand ? CommandInterface.EXPLAIN_ANALYZE : CommandInterface.EXPLAIN; } + + @Override + public void collectDependencies(HashSet dependencies) { + command.collectDependencies(dependencies); + } + } diff --git a/h2/src/main/org/h2/command/dml/FilteredDataChangeStatement.java b/h2/src/main/org/h2/command/dml/FilteredDataChangeStatement.java new file mode 100644 index 0000000000..81995ce801 --- /dev/null +++ b/h2/src/main/org/h2/command/dml/FilteredDataChangeStatement.java @@ -0,0 +1,97 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.dml; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.table.Table; +import org.h2.table.TableFilter; + +/** + * Data change statement with WHERE criteria and possibly limited number of + * rows. + */ +abstract class FilteredDataChangeStatement extends DataChangeStatement { + + /** + * The WHERE criteria. + */ + Expression condition; + + /** + * The target table filter. + */ + TableFilter targetTableFilter; + + /** + * The expression with optional maximum number of rows. + */ + Expression fetchExpr; + + /** + * Creates new instance of FilteredDataChangeStatement. + * + * @param session + * the session + */ + FilteredDataChangeStatement(SessionLocal session) { + super(session); + } + + @Override + public final Table getTable() { + return targetTableFilter.getTable(); + } + + public final void setTableFilter(TableFilter tableFilter) { + this.targetTableFilter = tableFilter; + } + + public final TableFilter getTableFilter() { + return targetTableFilter; + } + + public final void setCondition(Expression condition) { + this.condition = condition; + } + + public final Expression getCondition() { + return this.condition; + } + + public void setFetch(Expression fetch) { + this.fetchExpr = fetch; + } + + final boolean nextRow(long limitRows, long count) { + if (limitRows < 0 || count < limitRows) { + while (targetTableFilter.next()) { + setCurrentRowNumber(count + 1); + if (condition == null || condition.getBooleanValue(session)) { + return true; + } + } + } + return false; + } + + final void appendFilterCondition(StringBuilder builder, int sqlFlags) { + if (condition != null) { + builder.append("\nWHERE "); + condition.getUnenclosedSQL(builder, sqlFlags); + } + if (fetchExpr != null) { + builder.append("\nFETCH FIRST "); + String count = fetchExpr.getSQL(sqlFlags, Expression.WITHOUT_PARENTHESES); + if ("1".equals(count)) { + builder.append("ROW ONLY"); + } else { + builder.append(count).append(" ROWS ONLY"); + } + } + } + +} diff --git a/h2/src/main/org/h2/command/dml/Help.java b/h2/src/main/org/h2/command/dml/Help.java new file mode 100644 index 0000000000..528909e31d --- /dev/null +++ b/h2/src/main/org/h2/command/dml/Help.java @@ -0,0 +1,161 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.dml; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.Reader; +import java.sql.ResultSet; + +import org.h2.command.CommandInterface; +import org.h2.command.Prepared; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.message.DbException; +import org.h2.result.LocalResult; +import org.h2.result.ResultInterface; +import org.h2.table.Column; +import org.h2.tools.Csv; +import org.h2.util.Utils; +import org.h2.value.TypeInfo; +import org.h2.value.ValueVarchar; + +/** + * This class represents the statement CALL. + */ +public class Help extends Prepared { + + private final String[] conditions; + + private final Expression[] expressions; + + public Help(SessionLocal session, String[] conditions) { + super(session); + this.conditions = conditions; + Database db = session.getDatabase(); + expressions = new Expression[] { // + new ExpressionColumn(db, new Column("SECTION", TypeInfo.TYPE_VARCHAR)), // + new ExpressionColumn(db, new Column("TOPIC", TypeInfo.TYPE_VARCHAR)), // + new ExpressionColumn(db, new Column("SYNTAX", TypeInfo.TYPE_VARCHAR)), // + new ExpressionColumn(db, new Column("TEXT", TypeInfo.TYPE_VARCHAR)), // + }; + } + + @Override + public ResultInterface queryMeta() { + LocalResult result = new LocalResult(session, expressions, 4, 4); + result.done(); + return result; + } + + @Override + public ResultInterface query(long maxrows) { + LocalResult result = new LocalResult(session, expressions, 4, 4); + try { + ResultSet rs = getTable(); + loop: while (rs.next()) { + String topic = rs.getString(2).trim(); + for (String condition : conditions) { + if (!topic.contains(condition)) { + continue loop; + } + } + result.addRow( + // SECTION + ValueVarchar.get(rs.getString(1).trim(), session), + // TOPIC + ValueVarchar.get(topic, session), + // SYNTAX + ValueVarchar.get(stripAnnotationsFromSyntax(rs.getString(3)), session), + // TEXT + ValueVarchar.get(processHelpText(rs.getString(4)), session)); + } + } catch (Exception e) { + throw DbException.convert(e); + } + result.done(); + return result; + } + + /** + * Strip out the special annotations we use to help build the railroad/BNF diagrams + * @param s to process + * @return cleaned text + */ + public static String stripAnnotationsFromSyntax(String s) { + // SYNTAX column - Strip out the special annotations we use to + // help build the railroad/BNF diagrams. + return s.replaceAll("@c@ ", "").replaceAll("@h2@ ", "") + .replaceAll("@c@", "").replaceAll("@h2@", "").trim(); + } + + /** + * Sanitize value read from csv file (i.e. help.csv) + * @param s text to process + * @return text without wrapping quotes and trimmed + */ + public static String processHelpText(String s) { + int len = s.length(); + int end = 0; + for (; end < len; end++) { + char ch = s.charAt(end); + if (ch == '.') { + end++; + break; + } + if (ch == '"') { + do { + end++; + } while (end < len && s.charAt(end) != '"'); + } + } + s = s.substring(0, end); + return s.trim(); + } + + /** + * Returns HELP table. + * + * @return HELP table with columns SECTION,TOPIC,SYNTAX,TEXT + * @throws IOException + * on I/O exception + */ + public static ResultSet getTable() throws IOException { + Reader reader = new InputStreamReader(new ByteArrayInputStream(Utils.getResource("/org/h2/res/help.csv"))); + Csv csv = new Csv(); + csv.setLineCommentCharacter('#'); + return csv.read(reader, null); + } + + @Override + public boolean isQuery() { + return true; + } + + @Override + public boolean isTransactional() { + return true; + } + + @Override + public boolean isReadOnly() { + return true; + } + + @Override + public int getType() { + return CommandInterface.CALL; + } + + @Override + public boolean isCacheable() { + return true; + } + +} diff --git a/h2/src/main/org/h2/command/dml/Insert.java b/h2/src/main/org/h2/command/dml/Insert.java index 8ab69733f8..aa350cc3ee 100644 --- a/h2/src/main/org/h2/command/dml/Insert.java +++ b/h2/src/main/org/h2/command/dml/Insert.java @@ -1,69 +1,74 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; -import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; +import java.util.Map.Entry; import org.h2.api.ErrorCode; import org.h2.api.Trigger; import org.h2.command.Command; import org.h2.command.CommandInterface; -import org.h2.engine.GeneratedKeys; -import org.h2.engine.Mode; +import org.h2.command.query.Query; +import org.h2.engine.DbObject; import org.h2.engine.Right; -import org.h2.engine.Session; -import org.h2.engine.UndoLogRecord; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionVisitor; import org.h2.expression.Parameter; import org.h2.expression.ValueExpression; import org.h2.expression.condition.Comparison; import org.h2.expression.condition.ConditionAndOr; import org.h2.index.Index; -import org.h2.index.PageDataIndex; import org.h2.message.DbException; import org.h2.mvstore.db.MVPrimaryIndex; import org.h2.result.ResultInterface; import org.h2.result.ResultTarget; import org.h2.result.Row; import org.h2.table.Column; +import org.h2.table.DataChangeDeltaTable; +import org.h2.table.DataChangeDeltaTable.ResultOption; import org.h2.table.Table; -import org.h2.table.TableFilter; +import org.h2.util.HasSQL; import org.h2.value.Value; -import org.h2.value.ValueNull; /** * This class represents the statement * INSERT */ -public class Insert extends CommandWithValues implements ResultTarget { +public final class Insert extends CommandWithValues implements ResultTarget { private Table table; private Column[] columns; private Query query; - private boolean sortedInsertMode; - private int rowNumber; + private long rowNumber; private boolean insertFromSelect; - /** - * This table filter is for MERGE..USING support - not used in stand-alone DML - */ - private TableFilter sourceTableFilter; + + private Boolean overridingSystem; /** * For MySQL-style INSERT ... ON DUPLICATE KEY UPDATE .... */ private HashMap duplicateKeyAssignmentMap; + private Value[] onDuplicateKeyRow; + /** - * For MySQL-style INSERT IGNORE + * For MySQL-style INSERT IGNORE and PostgreSQL-style ON CONFLICT DO + * NOTHING. */ private boolean ignore; - public Insert(Session session) { + private ResultTarget deltaChangeCollector; + + private ResultOption deltaChangeCollectionMode; + + public Insert(SessionLocal session) { super(session); } @@ -75,6 +80,11 @@ public void setCommand(Command command) { } } + @Override + public Table getTable() { + return table; + } + public void setTable(Table table) { this.table = table; } @@ -84,8 +94,10 @@ public void setColumns(Column[] columns) { } /** - * Sets MySQL-style INSERT IGNORE mode - * @param ignore ignore errors + * Sets MySQL-style INSERT IGNORE mode or PostgreSQL-style ON CONFLICT + * DO NOTHING. + * + * @param ignore ignore duplicates */ public void setIgnore(boolean ignore) { this.ignore = ignore; @@ -95,6 +107,10 @@ public void setQuery(Query query) { this.query = query; } + public void setOverridingSystem(Boolean overridingSystem) { + this.overridingSystem = overridingSystem; + } + /** * Keep a collection of the columns to pass to update if a duplicate key * happens, for MySQL-style INSERT ... ON DUPLICATE KEY UPDATE .... @@ -106,50 +122,32 @@ public void addAssignmentForDuplicate(Column column, Expression expression) { if (duplicateKeyAssignmentMap == null) { duplicateKeyAssignmentMap = new HashMap<>(); } - if (duplicateKeyAssignmentMap.containsKey(column)) { - throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, - column.getName()); + if (duplicateKeyAssignmentMap.putIfAbsent(column, expression) != null) { + throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, column.getName()); } - duplicateKeyAssignmentMap.put(column, expression); } @Override - public int update() { - Index index = null; - if (sortedInsertMode) { - if (!session.getDatabase().isMVStore()) { - /* - * Take exclusive lock, otherwise two different inserts running at - * the same time, the second might accidentally get - * sorted-insert-mode. - */ - table.lock(session, /* exclusive */true, /* forceLockEvenInMvcc */true); - } - index = table.getScanIndex(session); - index.setSortedInsertMode(true); - } + public long update(ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode) { + this.deltaChangeCollector = deltaChangeCollector; + this.deltaChangeCollectionMode = deltaChangeCollectionMode; try { return insertRows(); } finally { - if (index != null) { - index.setSortedInsertMode(false); - } + this.deltaChangeCollector = null; + this.deltaChangeCollectionMode = null; } } - private int insertRows() { - session.getUser().checkRight(table, Right.INSERT); + private long insertRows() { + session.getUser().checkTableRight(table, Right.INSERT); setCurrentRowNumber(0); table.fire(session, Trigger.INSERT, true); rowNumber = 0; - GeneratedKeys generatedKeys = session.getGeneratedKeys(); - generatedKeys.initialize(table); int listSize = valuesExpressionList.size(); if (listSize > 0) { - Mode mode = session.getDatabase().getMode(); int columnLen = columns.length; for (int x = 0; x < listSize; x++) { - generatedKeys.nextRow(); Row newRow = table.getTemplateRow(); Expression[] expr = valuesExpressionList.get(x); setCurrentRowNumber(x + 1); @@ -157,25 +155,21 @@ private int insertRows() { Column c = columns[i]; int index = c.getColumnId(); Expression e = expr[i]; - if (e != null) { - // e can be null (DEFAULT) - e = e.optimize(session); + if (e != ValueExpression.DEFAULT) { try { - Value v = c.convert(e.getValue(session), mode); - newRow.setValue(index, v); - if (e.isGeneratedKey()) { - generatedKeys.add(c); - } + newRow.setValue(index, e.getValue(session)); } catch (DbException ex) { throw setRow(ex, x, getSimpleSQL(expr)); } } } rowNumber++; - table.validateConvertUpdateSequence(session, newRow); - boolean done = table.fireBeforeRow(session, null, newRow); - if (!done) { - table.lock(session, true, false); + table.convertInsertRow(session, newRow, overridingSystem); + if (deltaChangeCollectionMode == ResultOption.NEW) { + deltaChangeCollector.addRow(newRow.getValueList().clone()); + } + if (!table.fireBeforeRow(session, null, newRow)) { + table.lock(session, Table.WRITE_LOCK); try { table.addRow(session, newRow); } catch (DbException de) { @@ -189,25 +183,24 @@ private int insertRows() { } continue; } - generatedKeys.confirmRow(newRow); - session.log(table, UndoLogRecord.INSERT, newRow); + DataChangeDeltaTable.collectInsertedFinalRow(session, table, deltaChangeCollector, + deltaChangeCollectionMode, newRow); table.fireAfterRow(session, null, newRow, false); + } else { + DataChangeDeltaTable.collectInsertedFinalRow(session, table, deltaChangeCollector, + deltaChangeCollectionMode, newRow); } } } else { - table.lock(session, true, false); + table.lock(session, Table.WRITE_LOCK); if (insertFromSelect) { query.query(0, this); } else { ResultInterface rows = query.query(0); while (rows.next()) { - generatedKeys.nextRow(); Value[] r = rows.currentRow(); try { - Row newRow = addRowImpl(r); - if (newRow != null) { - generatedKeys.confirmRow(newRow); - } + addRow(r); } catch (DbException de) { if (handleOnDuplicate(de, r)) { // MySQL returns 2 for updated row @@ -227,37 +220,30 @@ private int insertRows() { } @Override - public void addRow(Value[] values) { - addRowImpl(values); - } - - private Row addRowImpl(Value[] values) { + public void addRow(Value... values) { Row newRow = table.getTemplateRow(); setCurrentRowNumber(++rowNumber); - Mode mode = session.getDatabase().getMode(); for (int j = 0, len = columns.length; j < len; j++) { - Column c = columns[j]; - int index = c.getColumnId(); - try { - Value v = c.convert(values[j], mode); - newRow.setValue(index, v); - } catch (DbException ex) { - throw setRow(ex, rowNumber, getSQL(values)); - } + newRow.setValue(columns[j].getColumnId(), values[j]); + } + table.convertInsertRow(session, newRow, overridingSystem); + if (deltaChangeCollectionMode == ResultOption.NEW) { + deltaChangeCollector.addRow(newRow.getValueList().clone()); } - table.validateConvertUpdateSequence(session, newRow); - boolean done = table.fireBeforeRow(session, null, newRow); - if (!done) { + if (!table.fireBeforeRow(session, null, newRow)) { table.addRow(session, newRow); - session.log(table, UndoLogRecord.INSERT, newRow); + DataChangeDeltaTable.collectInsertedFinalRow(session, table, deltaChangeCollector, + deltaChangeCollectionMode, newRow); table.fireAfterRow(session, null, newRow, false); - return newRow; + } else { + DataChangeDeltaTable.collectInsertedFinalRow(session, table, deltaChangeCollector, + deltaChangeCollectionMode, newRow); } - return null; } @Override - public int getRowCount() { + public long getRowCount() { + // This method is not used in this class return rowNumber; } @@ -267,17 +253,14 @@ public void limitsWereApplied() { } @Override - public String getPlanSQL(boolean alwaysQuote) { + public String getPlanSQL(int sqlFlags) { StringBuilder builder = new StringBuilder("INSERT INTO "); - table.getSQL(builder, alwaysQuote).append('('); - Column.writeColumns(builder, columns, alwaysQuote); + table.getSQL(builder, sqlFlags).append('('); + Column.writeColumns(builder, columns, sqlFlags); builder.append(")\n"); if (insertFromSelect) { builder.append("DIRECT "); } - if (sortedInsertMode) { - builder.append("SORTED "); - } if (!valuesExpressionList.isEmpty()) { builder.append("VALUES "); int row = 0; @@ -288,12 +271,10 @@ public String getPlanSQL(boolean alwaysQuote) { if (row++ > 0) { builder.append(",\n"); } - builder.append('('); - Expression.writeExpressions(builder, expr, alwaysQuote); - builder.append(')'); + Expression.writeExpressions(builder.append('('), expr, sqlFlags).append(')'); } } else { - builder.append(query.getPlanSQL(alwaysQuote)); + builder.append(query.getPlanSQL(sqlFlags)); } return builder.toString(); } @@ -316,9 +297,6 @@ public void prepare() { for (int i = 0, len = expr.length; i < len; i++) { Expression e = expr[i]; if (e != null) { - if(sourceTableFilter!=null){ - e.mapColumns(sourceTableFilter, 0, Expression.MAP_INITIAL); - } e = e.optimize(session); if (e instanceof Parameter) { Parameter p = (Parameter) e; @@ -337,22 +315,13 @@ public void prepare() { } @Override - public boolean isTransactional() { - return true; - } - - @Override - public ResultInterface queryMeta() { - return null; - } - - public void setSortedInsertMode(boolean sortedInsertMode) { - this.sortedInsertMode = sortedInsertMode; + public int getType() { + return CommandInterface.INSERT; } @Override - public int getType() { - return CommandInterface.INSERT; + public String getStatementName() { + return "INSERT"; } public void setInsertFromSelect(boolean value) { @@ -361,8 +330,7 @@ public void setInsertFromSelect(boolean value) { @Override public boolean isCacheable() { - return duplicateKeyAssignmentMap == null || - duplicateKeyAssignmentMap.isEmpty(); + return duplicateKeyAssignmentMap == null; } /** @@ -374,22 +342,18 @@ private boolean handleOnDuplicate(DbException de, Value[] currentRow) { if (de.getErrorCode() != ErrorCode.DUPLICATE_KEY_1) { throw de; } - if (duplicateKeyAssignmentMap == null || - duplicateKeyAssignmentMap.isEmpty()) { + if (duplicateKeyAssignmentMap == null) { if (ignore) { return false; } throw de; } - ArrayList variableNames = new ArrayList<>( - duplicateKeyAssignmentMap.size()); + int columnCount = columns.length; Expression[] row = (currentRow == null) ? valuesExpressionList.get((int) getCurrentRowNumber() - 1) - : new Expression[columns.length]; - for (int i = 0; i < columns.length; i++) { - StringBuilder builder = table.getSQL(new StringBuilder(), true).append('.'); - String key = columns[i].getSQL(builder, true).toString(); - variableNames.add(key); + : new Expression[columnCount]; + onDuplicateKeyRow = new Value[table.getColumns().length]; + for (int i = 0; i < columnCount; i++) { Value value; if (currentRow != null) { value = currentRow[i]; @@ -397,20 +361,19 @@ private boolean handleOnDuplicate(DbException de, Value[] currentRow) { } else { value = row[i].getValue(session); } - session.setVariable(key, value); + onDuplicateKeyRow[columns[i].getColumnId()] = value; } StringBuilder builder = new StringBuilder("UPDATE "); - table.getSQL(builder, true).append(" SET "); + table.getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS).append(" SET "); boolean f = false; - for (Column column : duplicateKeyAssignmentMap.keySet()) { + for (Entry entry : duplicateKeyAssignmentMap.entrySet()) { if (f) { builder.append(", "); } f = true; - Expression ex = duplicateKeyAssignmentMap.get(column); - column.getSQL(builder, true).append('='); - ex.getSQL(builder, true); + entry.getKey().getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS).append('='); + entry.getValue().getUnenclosedSQL(builder, HasSQL.DEFAULT_SQL_FLAGS); } builder.append(" WHERE "); Index foundIndex = (Index) de.getSource(); @@ -418,18 +381,16 @@ private boolean handleOnDuplicate(DbException de, Value[] currentRow) { throw DbException.getUnsupportedException( "Unable to apply ON DUPLICATE KEY UPDATE, no index found!"); } - prepareUpdateCondition(foundIndex, row).getSQL(builder, true); + prepareUpdateCondition(foundIndex, row).getUnenclosedSQL(builder, HasSQL.DEFAULT_SQL_FLAGS); String sql = builder.toString(); Update command = (Update) session.prepare(sql); - command.setUpdateToCurrentValuesReturnsZero(true); + command.setOnDuplicateKeyInsert(this); for (Parameter param : command.getParameters()) { Parameter insertParam = parameters.get(param.getIndex()); param.setValue(insertParam.getValue(session)); } boolean result = command.update() > 0; - for (String variableName : variableNames) { - session.setVariable(variableName, ValueNull.INSTANCE); - } + onDuplicateKeyRow = null; return result; } @@ -445,12 +406,6 @@ private Expression prepareUpdateCondition(Index foundIndex, Expression[] row) { MVPrimaryIndex foundMV = (MVPrimaryIndex) foundIndex; indexedColumns = new Column[] { foundMV.getIndexColumns()[foundMV .getMainIndexColumn()].column }; - } else if (foundIndex instanceof PageDataIndex) { - PageDataIndex foundPD = (PageDataIndex) foundIndex; - int mainIndexColumn = foundPD.getMainIndexColumn(); - indexedColumns = mainIndexColumn >= 0 - ? new Column[] { foundPD.getIndexColumns()[mainIndexColumn].column } - : foundIndex.getColumns(); } else { indexedColumns = foundIndex.getColumns(); } @@ -458,15 +413,14 @@ private Expression prepareUpdateCondition(Index foundIndex, Expression[] row) { Expression condition = null; for (Column column : indexedColumns) { ExpressionColumn expr = new ExpressionColumn(session.getDatabase(), - table.getSchema().getName(), table.getName(), - column.getName(), false); + table.getSchema().getName(), table.getName(), column.getName()); for (int i = 0; i < columns.length; i++) { - if (expr.getColumnName().equals(columns[i].getName())) { + if (expr.getColumnName(session, i).equals(columns[i].getName())) { if (condition == null) { - condition = new Comparison(session, Comparison.EQUAL, expr, row[i]); + condition = new Comparison(Comparison.EQUAL, expr, row[i], false); } else { condition = new ConditionAndOr(ConditionAndOr.AND, condition, - new Comparison(session, Comparison.EQUAL, expr, row[i])); + new Comparison(Comparison.EQUAL, expr, row[i], false)); } break; } @@ -475,8 +429,27 @@ private Expression prepareUpdateCondition(Index foundIndex, Expression[] row) { return condition; } - public void setSourceTableFilter(TableFilter sourceTableFilter) { - this.sourceTableFilter = sourceTableFilter; + /** + * Get the value to use for the specified column in case of a duplicate key. + * + * @param columnIndex the column index + * @return the value + */ + public Value getOnDuplicateKeyValue(int columnIndex) { + return onDuplicateKeyRow[columnIndex]; } + @Override + public void collectDependencies(HashSet dependencies) { + ExpressionVisitor visitor = ExpressionVisitor.getDependenciesVisitor(dependencies); + if (!valuesExpressionList.isEmpty()) { + for (Expression[] expr : valuesExpressionList) { + for (Expression e : expr) { + e.isEverything(visitor); + } + } + } else { + query.isEverything(visitor); + } + } } diff --git a/h2/src/main/org/h2/command/dml/Merge.java b/h2/src/main/org/h2/command/dml/Merge.java index 4be0aa5774..7931be7085 100644 --- a/h2/src/main/org/h2/command/dml/Merge.java +++ b/h2/src/main/org/h2/command/dml/Merge.java @@ -1,48 +1,56 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; import java.util.ArrayList; +import java.util.HashSet; import org.h2.api.ErrorCode; import org.h2.api.Trigger; import org.h2.command.Command; import org.h2.command.CommandInterface; -import org.h2.command.Prepared; -import org.h2.engine.GeneratedKeys; -import org.h2.engine.Mode; +import org.h2.command.query.Query; +import org.h2.engine.DbObject; import org.h2.engine.Right; -import org.h2.engine.Session; -import org.h2.engine.UndoLogRecord; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.Parameter; +import org.h2.expression.ValueExpression; import org.h2.index.Index; import org.h2.message.DbException; import org.h2.mvstore.db.MVPrimaryIndex; import org.h2.result.ResultInterface; +import org.h2.result.ResultTarget; import org.h2.result.Row; import org.h2.table.Column; +import org.h2.table.DataChangeDeltaTable; +import org.h2.table.DataChangeDeltaTable.ResultOption; import org.h2.table.Table; -import org.h2.table.TableFilter; +import org.h2.util.HasSQL; import org.h2.value.Value; +import org.h2.value.ValueNull; /** * This class represents the statement * MERGE + * or the MySQL compatibility statement + * REPLACE */ -public class Merge extends CommandWithValues { +public final class Merge extends CommandWithValues { - private Table targetTable; - private TableFilter targetTableFilter; + private boolean isReplace; + + private Table table; private Column[] columns; private Column[] keys; private Query query; - private Prepared update; + private Update update; - public Merge(Session session) { + public Merge(SessionLocal session, boolean isReplace) { super(session); + this.isReplace = isReplace; } @Override @@ -53,8 +61,13 @@ public void setCommand(Command command) { } } - public void setTargetTable(Table targetTable) { - this.targetTable = targetTable; + @Override + public Table getTable() { + return table; + } + + public void setTable(Table table) { + this.table = table; } public void setColumns(Column[] columns) { @@ -70,111 +83,115 @@ public void setQuery(Query query) { } @Override - public int update() { - int count; - session.getUser().checkRight(targetTable, Right.INSERT); - session.getUser().checkRight(targetTable, Right.UPDATE); + public long update(ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode) { + long count = 0; + session.getUser().checkTableRight(table, Right.INSERT); + session.getUser().checkTableRight(table, Right.UPDATE); setCurrentRowNumber(0); - GeneratedKeys generatedKeys = session.getGeneratedKeys(); - Mode mode = session.getDatabase().getMode(); if (!valuesExpressionList.isEmpty()) { // process values in list - count = 0; - generatedKeys.initialize(targetTable); for (int x = 0, size = valuesExpressionList.size(); x < size; x++) { setCurrentRowNumber(x + 1); - generatedKeys.nextRow(); Expression[] expr = valuesExpressionList.get(x); - Row newRow = targetTable.getTemplateRow(); + Row newRow = table.getTemplateRow(); for (int i = 0, len = columns.length; i < len; i++) { Column c = columns[i]; int index = c.getColumnId(); Expression e = expr[i]; - if (e != null) { - // e can be null (DEFAULT) + if (e != ValueExpression.DEFAULT) { try { - Value v = c.convert(e.getValue(session), mode); - newRow.setValue(index, v); - if (e.isGeneratedKey()) { - generatedKeys.add(c); - } + newRow.setValue(index, e.getValue(session)); } catch (DbException ex) { throw setRow(ex, count, getSimpleSQL(expr)); } } } - merge(newRow); - count++; + count += merge(newRow, expr, deltaChangeCollector, deltaChangeCollectionMode); } } else { // process select data for list query.setNeverLazy(true); ResultInterface rows = query.query(0); - count = 0; - targetTable.fire(session, Trigger.UPDATE | Trigger.INSERT, true); - targetTable.lock(session, true, false); + table.fire(session, Trigger.UPDATE | Trigger.INSERT, true); + table.lock(session, Table.WRITE_LOCK); while (rows.next()) { - count++; - generatedKeys.nextRow(); Value[] r = rows.currentRow(); - Row newRow = targetTable.getTemplateRow(); + Row newRow = table.getTemplateRow(); setCurrentRowNumber(count); for (int j = 0; j < columns.length; j++) { - Column c = columns[j]; - int index = c.getColumnId(); - try { - Value v = c.convert(r[j], mode); - newRow.setValue(index, v); - } catch (DbException ex) { - throw setRow(ex, count, getSQL(r)); - } + newRow.setValue(columns[j].getColumnId(), r[j]); } - merge(newRow); + count += merge(newRow, null, deltaChangeCollector, deltaChangeCollectionMode); } rows.close(); - targetTable.fire(session, Trigger.UPDATE | Trigger.INSERT, false); + table.fire(session, Trigger.UPDATE | Trigger.INSERT, false); } return count; } /** - * Merge the given row. + * Updates an existing row or inserts a new one. * - * @param row the row + * @param row row to replace + * @param expressions source expressions, or null + * @param deltaChangeCollector target result + * @param deltaChangeCollectionMode collection mode + * @return 1 if row was inserted, 1 if row was updated by a MERGE statement, + * and 2 if row was updated by a REPLACE statement */ - protected void merge(Row row) { - ArrayList k = update.getParameters(); - for (int i = 0; i < columns.length; i++) { - Column col = columns[i]; - Value v = row.getValue(col.getColumnId()); - Parameter p = k.get(i); - p.setValue(v); - } - for (int i = 0; i < keys.length; i++) { - Column col = keys[i]; - Value v = row.getValue(col.getColumnId()); - if (v == null) { - throw DbException.get(ErrorCode.COLUMN_CONTAINS_NULL_VALUES_1, col.getSQL(false)); + private int merge(Row row, Expression[] expressions, ResultTarget deltaChangeCollector, + ResultOption deltaChangeCollectionMode) { + long count; + if (update == null) { + // if there is no valid primary key, + // the REPLACE statement degenerates to an INSERT + count = 0; + } else { + ArrayList k = update.getParameters(); + int j = 0; + for (int i = 0, l = columns.length; i < l; i++) { + Column col = columns[i]; + if (col.isGeneratedAlways()) { + if (expressions == null || expressions[i] != ValueExpression.DEFAULT) { + throw DbException.get(ErrorCode.GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1, + col.getSQLWithTable(new StringBuilder(), HasSQL.TRACE_SQL_FLAGS).toString()); + } + } else { + Value v = row.getValue(col.getColumnId()); + if (v == null) { + Expression defaultExpression = col.getEffectiveDefaultExpression(); + v = defaultExpression != null ? defaultExpression.getValue(session) : ValueNull.INSTANCE; + } + k.get(j++).setValue(v); + } + } + for (Column col : keys) { + Value v = row.getValue(col.getColumnId()); + if (v == null) { + throw DbException.get(ErrorCode.COLUMN_CONTAINS_NULL_VALUES_1, col.getTraceSQL()); + } + k.get(j++).setValue(v); } - Parameter p = k.get(columns.length + i); - p.setValue(v); + count = update.update(deltaChangeCollector, deltaChangeCollectionMode); } - - // try an update - int count = update.update(); - // if update fails try an insert if (count == 0) { try { - targetTable.validateConvertUpdateSequence(session, row); - boolean done = targetTable.fireBeforeRow(session, null, row); - if (!done) { - targetTable.lock(session, true, false); - targetTable.addRow(session, row); - session.getGeneratedKeys().confirmRow(row); - session.log(targetTable, UndoLogRecord.INSERT, row); - targetTable.fireAfterRow(session, null, row, false); + table.convertInsertRow(session, row, null); + if (deltaChangeCollectionMode == ResultOption.NEW) { + deltaChangeCollector.addRow(row.getValueList().clone()); + } + if (!table.fireBeforeRow(session, null, row)) { + table.lock(session, Table.WRITE_LOCK); + table.addRow(session, row); + DataChangeDeltaTable.collectInsertedFinalRow(session, table, deltaChangeCollector, + deltaChangeCollectionMode, row); + table.fireAfterRow(session, null, row, false); + } else { + DataChangeDeltaTable.collectInsertedFinalRow(session, table, deltaChangeCollector, + deltaChangeCollectionMode, row); } + return 1; } catch (DbException e) { if (e.getErrorCode() == ErrorCode.DUPLICATE_KEY_1) { // possibly a concurrent merge or insert @@ -202,26 +219,27 @@ protected void merge(Row row) { indexMatchesKeys = false; } if (indexMatchesKeys) { - throw DbException.get(ErrorCode.CONCURRENT_UPDATE_1, targetTable.getName()); + throw DbException.get(ErrorCode.CONCURRENT_UPDATE_1, table.getName()); } } } throw e; } - } else if (count != 1) { - throw DbException.get(ErrorCode.DUPLICATE_KEY_1, targetTable.getSQL(false)); + } else if (count == 1) { + return isReplace ? 2 : 1; } + throw DbException.get(ErrorCode.DUPLICATE_KEY_1, table.getTraceSQL()); } @Override - public String getPlanSQL(boolean alwaysQuote) { - StringBuilder builder = new StringBuilder("MERGE INTO "); - targetTable.getSQL(builder, alwaysQuote).append('('); - Column.writeColumns(builder, columns, alwaysQuote); + public String getPlanSQL(int sqlFlags) { + StringBuilder builder = new StringBuilder(isReplace ? "REPLACE INTO " : "MERGE INTO "); + table.getSQL(builder, sqlFlags).append('('); + Column.writeColumns(builder, columns, sqlFlags); builder.append(')'); - if (keys != null) { + if (!isReplace && keys != null) { builder.append(" KEY("); - Column.writeColumns(builder, keys, alwaysQuote); + Column.writeColumns(builder, keys, sqlFlags); builder.append(')'); } builder.append('\n'); @@ -232,12 +250,10 @@ public String getPlanSQL(boolean alwaysQuote) { if (row++ > 0) { builder.append(", "); } - builder.append('('); - Expression.writeExpressions(builder, expr, alwaysQuote); - builder.append(')'); + Expression.writeExpressions(builder.append('('), expr, sqlFlags).append(')'); } } else { - builder.append(query.getPlanSQL(alwaysQuote)); + builder.append(query.getPlanSQL(sqlFlags)); } return builder.toString(); } @@ -249,7 +265,7 @@ public void prepare() { // special case where table is used as a sequence columns = new Column[0]; } else { - columns = targetTable.getColumns(); + columns = table.getColumns(); } } if (!valuesExpressionList.isEmpty()) { @@ -271,52 +287,62 @@ public void prepare() { } } if (keys == null) { - Index idx = targetTable.getPrimaryKey(); + Index idx = table.getPrimaryKey(); if (idx == null) { throw DbException.get(ErrorCode.CONSTRAINT_NOT_FOUND_1, "PRIMARY KEY"); } keys = idx.getColumns(); } - StringBuilder builder = new StringBuilder("UPDATE "); - targetTable.getSQL(builder, true).append(" SET "); - Column.writeColumns(builder, columns, ", ", "=?", true).append(" WHERE "); - Column.writeColumns(builder, keys, " AND ", "=?", true); - update = session.prepare(builder.toString()); - } - - @Override - public boolean isTransactional() { - return true; - } - - @Override - public ResultInterface queryMeta() { - return null; + if (isReplace) { + // if there is no valid primary key, + // the REPLACE statement degenerates to an INSERT + for (Column key : keys) { + boolean found = false; + for (Column column : columns) { + if (column.getColumnId() == key.getColumnId()) { + found = true; + break; + } + } + if (!found) { + return; + } + } + } + StringBuilder builder = table.getSQL(new StringBuilder("UPDATE "), HasSQL.DEFAULT_SQL_FLAGS).append(" SET "); + boolean hasColumn = false; + for (int i = 0, l = columns.length; i < l; i++) { + Column column = columns[i]; + if (!column.isGeneratedAlways()) { + if (hasColumn) { + builder.append(", "); + } + hasColumn = true; + column.getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS).append("=?"); + } + } + if (!hasColumn) { + throw DbException.getSyntaxError(sqlStatement, sqlStatement.length(), + "Valid MERGE INTO statement with at least one updatable column"); + } + Column.writeColumns(builder.append(" WHERE "), keys, " AND ", "=?", HasSQL.DEFAULT_SQL_FLAGS); + update = (Update) session.prepare(builder.toString()); } @Override public int getType() { - return CommandInterface.MERGE; + return isReplace ? CommandInterface.REPLACE : CommandInterface.MERGE; } @Override - public boolean isCacheable() { - return true; - } - - public Table getTargetTable() { - return targetTable; + public String getStatementName() { + return isReplace ? "REPLACE" : "MERGE"; } - public TableFilter getTargetTableFilter() { - return targetTableFilter; - } - - public void setTargetTableFilter(TableFilter targetTableFilter) { - this.targetTableFilter = targetTableFilter; - setTargetTable(targetTableFilter.getTable()); + @Override + public void collectDependencies(HashSet dependencies) { + if (query != null) { + query.collectDependencies(dependencies); + } } - - - } diff --git a/h2/src/main/org/h2/command/dml/MergeUsing.java b/h2/src/main/org/h2/command/dml/MergeUsing.java index 2690cd4c24..0dab851782 100644 --- a/h2/src/main/org/h2/command/dml/MergeUsing.java +++ b/h2/src/main/org/h2/command/dml/MergeUsing.java @@ -1,33 +1,37 @@ /* - * Copyright 2004-2017 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; import java.util.ArrayList; -import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import org.h2.api.ErrorCode; import org.h2.api.Trigger; import org.h2.command.CommandInterface; -import org.h2.command.Prepared; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.DbObject; import org.h2.engine.Right; -import org.h2.engine.Session; -import org.h2.engine.User; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; -import org.h2.expression.ExpressionColumn; -import org.h2.expression.condition.ConditionAndOr; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Parameter; +import org.h2.expression.ValueExpression; import org.h2.message.DbException; -import org.h2.result.ResultInterface; +import org.h2.result.LocalResult; +import org.h2.result.ResultTarget; import org.h2.result.Row; -import org.h2.result.RowImpl; import org.h2.table.Column; +import org.h2.table.DataChangeDeltaTable; +import org.h2.table.DataChangeDeltaTable.ResultOption; +import org.h2.table.PlanItem; import org.h2.table.Table; import org.h2.table.TableFilter; +import org.h2.util.HasSQL; import org.h2.util.Utils; -import org.h2.value.Value; /** * This class represents the statement syntax @@ -35,237 +39,13 @@ * * It does not replace the MERGE INTO... KEYS... form. */ -public class MergeUsing extends Prepared { - - /** - * Abstract WHEN command of the MERGE statement. - */ - public static abstract class When { - - /** - * The parent MERGE statement. - */ - final MergeUsing mergeUsing; - - /** - * AND condition of the command. - */ - Expression andCondition; - - When(MergeUsing mergeUsing) { - this.mergeUsing = mergeUsing; - } - - /** - * Sets the specified AND condition. - * - * @param andCondition AND condition to set - */ - public void setAndCondition(Expression andCondition) { - this.andCondition = andCondition; - } - - /** - * Reset updated keys if needs. - */ - void reset() { - // Nothing to do - } - - /** - * Merges rows. - * - * @return count of updated rows. - */ - abstract int merge(); - - /** - * Prepares WHEN command. - */ - void prepare() { - if (andCondition != null) { - andCondition.mapColumns(mergeUsing.sourceTableFilter, 2, Expression.MAP_INITIAL); - andCondition.mapColumns(mergeUsing.targetTableFilter, 1, Expression.MAP_INITIAL); - } - } - - /** - * Evaluates trigger mask (UPDATE, INSERT, DELETE). - * - * @return the trigger mask. - */ - abstract int evaluateTriggerMasks(); - - /** - * Checks user's INSERT, UPDATE, DELETE permission in appropriate cases. - */ - abstract void checkRights(); - - } - - public static final class WhenMatched extends When { - - private Update updateCommand; - - private Delete deleteCommand; - - private final HashSet updatedKeys = new HashSet<>(); - - public WhenMatched(MergeUsing mergeUsing) { - super(mergeUsing); - } - - public Prepared getUpdateCommand() { - return updateCommand; - } - - public void setUpdateCommand(Update updateCommand) { - this.updateCommand = updateCommand; - } - - public Prepared getDeleteCommand() { - return deleteCommand; - } - - public void setDeleteCommand(Delete deleteCommand) { - this.deleteCommand = deleteCommand; - } - - @Override - void reset() { - updatedKeys.clear(); - } - - @Override - int merge() { - int countUpdatedRows = 0; - if (updateCommand != null) { - countUpdatedRows += updateCommand.update(); - } - // under oracle rules these updates & delete combinations are - // allowed together - if (deleteCommand != null) { - countUpdatedRows += deleteCommand.update(); - updatedKeys.clear(); - } - return countUpdatedRows; - } - - @Override - void prepare() { - super.prepare(); - if (updateCommand != null) { - updateCommand.setSourceTableFilter(mergeUsing.sourceTableFilter); - updateCommand.setCondition(appendCondition(updateCommand, mergeUsing.onCondition)); - if (andCondition != null) { - updateCommand.setCondition(appendCondition(updateCommand, andCondition)); - } - updateCommand.prepare(); - } - if (deleteCommand != null) { - deleteCommand.setSourceTableFilter(mergeUsing.sourceTableFilter); - deleteCommand.setCondition(appendCondition(deleteCommand, mergeUsing.onCondition)); - if (andCondition != null) { - deleteCommand.setCondition(appendCondition(deleteCommand, andCondition)); - } - deleteCommand.prepare(); - if (updateCommand != null) { - updateCommand.setUpdatedKeysCollector(updatedKeys); - deleteCommand.setKeysFilter(updatedKeys); - } - } - } - - @Override - int evaluateTriggerMasks() { - int masks = 0; - if (updateCommand != null) { - masks |= Trigger.UPDATE; - } - if (deleteCommand != null) { - masks |= Trigger.DELETE; - } - return masks; - } - - @Override - void checkRights() { - User user = mergeUsing.getSession().getUser(); - if (updateCommand != null) { - user.checkRight(mergeUsing.targetTable, Right.UPDATE); - } - if (deleteCommand != null) { - user.checkRight(mergeUsing.targetTable, Right.DELETE); - } - } - - private static Expression appendCondition(Update updateCommand, Expression condition) { - Expression c = updateCommand.getCondition(); - return c == null ? condition : new ConditionAndOr(ConditionAndOr.AND, c, condition); - } - - private static Expression appendCondition(Delete deleteCommand, Expression condition) { - Expression c = deleteCommand.getCondition(); - return c == null ? condition : new ConditionAndOr(ConditionAndOr.AND, c, condition); - } - - } - - public static final class WhenNotMatched extends When { - - private Insert insertCommand; - - public WhenNotMatched(MergeUsing mergeUsing) { - super(mergeUsing); - } - - public Insert getInsertCommand() { - return insertCommand; - } - - public void setInsertCommand(Insert insertCommand) { - this.insertCommand = insertCommand; - } - - @Override - int merge() { - return andCondition == null || andCondition.getBooleanValue(mergeUsing.getSession()) ? - insertCommand.update() : 0; - } - - @Override - void prepare() { - super.prepare(); - insertCommand.setSourceTableFilter(mergeUsing.sourceTableFilter); - insertCommand.prepare(); - } - - @Override - int evaluateTriggerMasks() { - return Trigger.INSERT; - } - - @Override - void checkRights() { - mergeUsing.getSession().getUser().checkRight(mergeUsing.targetTable, Right.INSERT); - } - - } - - // Merge fields - /** - * Target table. - */ - Table targetTable; +public final class MergeUsing extends DataChangeStatement { /** * Target table filter. */ TableFilter targetTableFilter; - private Query query; - - // MergeUsing fields /** * Source table filter. */ @@ -275,60 +55,110 @@ void checkRights() { * ON condition expression. */ Expression onCondition; + private ArrayList when = Utils.newSmallArrayList(); - private String queryAlias; - private int countUpdatedRows; - private Select targetMatchQuery; + /** - * Contains mappings between _ROWID_ and ROW_NUMBER for processed rows. Row + * Contains _ROWID_ of processed rows. Row * identities are remembered to prevent duplicate updates of the same row. */ - private final HashMap targetRowidsRemembered = new HashMap<>(); - private int sourceQueryRowNumber; + private final HashSet targetRowidsRemembered = new HashSet<>(); - - public MergeUsing(Session session, TableFilter targetTableFilter) { + public MergeUsing(SessionLocal session, TableFilter targetTableFilter) { super(session); - this.targetTable = targetTableFilter.getTable(); this.targetTableFilter = targetTableFilter; } @Override - public int update() { - countUpdatedRows = 0; - - // clear list of source table keys & rowids we have processed already + public long update(ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode) { + long countUpdatedRows = 0; targetRowidsRemembered.clear(); - - targetTableFilter.startQuery(session); - targetTableFilter.reset(); - + checkRights(); + setCurrentRowNumber(0); sourceTableFilter.startQuery(session); sourceTableFilter.reset(); - - sourceQueryRowNumber = 0; - checkRights(); + Table table = targetTableFilter.getTable(); + table.fire(session, evaluateTriggerMasks(), true); + table.lock(session, Table.WRITE_LOCK); setCurrentRowNumber(0); - for (When w : when) { - w.reset(); + long count = 0; + Row previousSource = null, missedSource = null; + boolean hasRowId = table.getRowIdColumn() != null; + while (sourceTableFilter.next()) { + Row source = sourceTableFilter.get(); + if (missedSource != null) { + if (source != missedSource) { + Row backupTarget = targetTableFilter.get(); + sourceTableFilter.set(missedSource); + targetTableFilter.set(table.getNullRow()); + countUpdatedRows += merge(true, deltaChangeCollector, deltaChangeCollectionMode); + sourceTableFilter.set(source); + targetTableFilter.set(backupTarget); + count++; + } + missedSource = null; + } + setCurrentRowNumber(count + 1); + boolean nullRow = targetTableFilter.isNullRow(); + if (!nullRow) { + Row targetRow = targetTableFilter.get(); + if (table.isRowLockable()) { + Row lockedRow = table.lockRow(session, targetRow); + if (lockedRow == null) { + if (previousSource != source) { + missedSource = source; + } + continue; + } + if (!targetRow.hasSharedData(lockedRow)) { + targetRow = lockedRow; + targetTableFilter.set(targetRow); + if (!onCondition.getBooleanValue(session)) { + if (previousSource != source) { + missedSource = source; + } + continue; + } + } + } + if (hasRowId) { + long targetRowId = targetRow.getKey(); + if (!targetRowidsRemembered.add(targetRowId)) { + throw DbException.get(ErrorCode.DUPLICATE_KEY_1, + "Merge using ON column expression, " + + "duplicate _ROWID_ target record already processed:_ROWID_=" + + targetRowId + ":in:" + + targetTableFilter.getTable()); + } + } + } + countUpdatedRows += merge(nullRow, deltaChangeCollector, deltaChangeCollectionMode); + count++; + previousSource = source; } - // process source select query data for row creation - ResultInterface rows = query.query(0); - targetTable.fire(session, evaluateTriggerMasks(), true); - targetTable.lock(session, true, false); - while (rows.next()) { - sourceQueryRowNumber++; - Value[] sourceRowValues = rows.currentRow(); - Row sourceRow = new RowImpl(sourceRowValues, 0); - setCurrentRowNumber(sourceQueryRowNumber); - - merge(sourceRow); + if (missedSource != null) { + sourceTableFilter.set(missedSource); + targetTableFilter.set(table.getNullRow()); + countUpdatedRows += merge(true, deltaChangeCollector, deltaChangeCollectionMode); } - rows.close(); - targetTable.fire(session, evaluateTriggerMasks(), false); + targetRowidsRemembered.clear(); + table.fire(session, evaluateTriggerMasks(), false); return countUpdatedRows; } + private int merge(boolean nullRow, ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode) { + for (When w : when) { + if (w.getClass() == WhenNotMatched.class == nullRow) { + Expression condition = w.andCondition; + if (condition == null || condition.getBooleanValue(session)) { + w.merge(session, deltaChangeCollector, deltaChangeCollectionMode); + return 1; + } + } + } + return 0; + } + private int evaluateTriggerMasks() { int masks = 0; for (When w : when) { @@ -341,99 +171,59 @@ private void checkRights() { for (When w : when) { w.checkRights(); } - // check the underlying tables - session.getUser().checkRight(targetTable, Right.SELECT); - session.getUser().checkRight(sourceTableFilter.getTable(), Right.SELECT); - } - - /** - * Merge the given row. - * - * @param sourceRow the row - */ - protected void merge(Row sourceRow) { - // put the column values into the table filter - sourceTableFilter.set(sourceRow); - boolean found = isTargetRowFound(); - for (When w : when) { - if (w.getClass() == WhenNotMatched.class ^ found) { - countUpdatedRows += w.merge(); - } - } - } - - private boolean isTargetRowFound() { - boolean matched = false; - try (ResultInterface rows = targetMatchQuery.query(0)) { - while (rows.next()) { - Value targetRowId = rows.currentRow()[0]; - Integer number = targetRowidsRemembered.get(targetRowId); - // throw and exception if we have processed this _ROWID_ before... - if (number != null) { - throw DbException.get(ErrorCode.DUPLICATE_KEY_1, - "Merge using ON column expression, " + - "duplicate _ROWID_ target record already updated, deleted or inserted:_ROWID_=" - + targetRowId + ":in:" - + targetTableFilter.getTable() - + ":conflicting source row number:" - + number); - } - // remember the source column values we have used before (they - // are the effective ON clause keys - // and should not be repeated - targetRowidsRemembered.put(targetRowId, sourceQueryRowNumber); - matched = true; - } - } - return matched; + session.getUser().checkTableRight(targetTableFilter.getTable(), Right.SELECT); + session.getUser().checkTableRight(sourceTableFilter.getTable(), Right.SELECT); } @Override - public String getPlanSQL(boolean alwaysQuote) { + public String getPlanSQL(int sqlFlags) { StringBuilder builder = new StringBuilder("MERGE INTO "); - targetTable.getSQL(builder, alwaysQuote).append('\n').append("USING ").append(query.getPlanSQL(alwaysQuote)); - // TODO add aliases and WHEN clauses to make plan SQL more like original SQL + targetTableFilter.getPlanSQL(builder, false, sqlFlags); + builder.append('\n').append("USING "); + sourceTableFilter.getPlanSQL(builder, false, sqlFlags); + for (When w : when) { + w.getSQL(builder.append('\n'), sqlFlags); + } return builder.toString(); } @Override public void prepare() { - onCondition.addFilterConditions(sourceTableFilter, true); - onCondition.addFilterConditions(targetTableFilter, true); + onCondition.addFilterConditions(sourceTableFilter); + onCondition.addFilterConditions(targetTableFilter); - onCondition.mapColumns(sourceTableFilter, 2, Expression.MAP_INITIAL); - onCondition.mapColumns(targetTableFilter, 1, Expression.MAP_INITIAL); + onCondition.mapColumns(sourceTableFilter, 0, Expression.MAP_INITIAL); + onCondition.mapColumns(targetTableFilter, 0, Expression.MAP_INITIAL); - // only do the optimize now - before we have already gathered the - // unoptimized column data onCondition = onCondition.optimize(session); - onCondition.createIndexConditions(session, sourceTableFilter); + // Create conditions only for target table onCondition.createIndexConditions(session, targetTableFilter); - query.prepare(); - - // Prepare each of the sub-commands ready to aid in the MERGE - // collaboration - targetTableFilter.doneWithIndexConditions(); - boolean forUpdate = false; - for (When w : when) { - w.prepare(); - if (w instanceof WhenNotMatched) { - forUpdate = true; + TableFilter[] filters = new TableFilter[] { sourceTableFilter, targetTableFilter }; + sourceTableFilter.addJoin(targetTableFilter, true, onCondition); + PlanItem item = sourceTableFilter.getBestPlanItem(session, filters, 0, new AllColumnsForPlan(filters)); + sourceTableFilter.setPlanItem(item); + sourceTableFilter.prepare(); + + boolean hasFinalNotMatched = false, hasFinalMatched = false; + for (Iterator i = when.iterator(); i.hasNext();) { + When w = i.next(); + if (!w.prepare(session)) { + i.remove(); + } else if (w.getClass() == WhenNotMatched.class) { + if (hasFinalNotMatched) { + i.remove(); + } else if (w.andCondition == null) { + hasFinalNotMatched = true; + } + } else { + if (hasFinalMatched) { + i.remove(); + } else if (w.andCondition == null) { + hasFinalMatched = true; + } } } - - // setup the targetMatchQuery - for detecting if the target row exists - targetMatchQuery = new Select(session, null); - ArrayList expressions = new ArrayList<>(1); - expressions.add(new ExpressionColumn(session.getDatabase(), targetTable.getSchema().getName(), - targetTableFilter.getTableAlias(), Column.ROWID, true)); - targetMatchQuery.setExpressions(expressions); - targetMatchQuery.addTableFilter(targetTableFilter, true); - targetMatchQuery.addCondition(onCondition); - targetMatchQuery.setForUpdate(forUpdate); - targetMatchQuery.init(); - targetMatchQuery.prepare(); } public void setSourceTableFilter(TableFilter sourceTableFilter) { @@ -465,55 +255,316 @@ public void addWhen(When w) { when.add(w); } - public void setQueryAlias(String alias) { - this.queryAlias = alias; - + @Override + public Table getTable() { + return targetTableFilter.getTable(); } - public String getQueryAlias() { - return this.queryAlias; - + public void setTargetTableFilter(TableFilter targetTableFilter) { + this.targetTableFilter = targetTableFilter; } - public Query getQuery() { - return query; + public TableFilter getTargetTableFilter() { + return targetTableFilter; } - public void setQuery(Query query) { - this.query = query; - } + // Prepared interface implementations - public void setTargetTableFilter(TableFilter targetTableFilter) { - this.targetTableFilter = targetTableFilter; + @Override + public int getType() { + return CommandInterface.MERGE; } - public TableFilter getTargetTableFilter() { - return targetTableFilter; + @Override + public String getStatementName() { + return "MERGE"; } - public Table getTargetTable() { - return targetTable; + @Override + public void collectDependencies(HashSet dependencies) { + dependencies.add(targetTableFilter.getTable()); + dependencies.add(sourceTableFilter.getTable()); + ExpressionVisitor visitor = ExpressionVisitor.getDependenciesVisitor(dependencies); + for (When w : when) { + w.collectDependencies(visitor); + } + onCondition.isEverything(visitor); } - public void setTargetTable(Table targetTable) { - this.targetTable = targetTable; + /** + * Abstract WHEN command of the MERGE statement. + */ + public abstract class When implements HasSQL { + + /** + * AND condition of the command. + */ + Expression andCondition; + + When() { + } + + /** + * Sets the specified AND condition. + * + * @param andCondition AND condition to set + */ + public void setAndCondition(Expression andCondition) { + this.andCondition = andCondition; + } + + /** + * Merges rows. + * + * @param session + * the session + * @param deltaChangeCollector + * target result + * @param deltaChangeCollectionMode + * collection mode + */ + abstract void merge(SessionLocal session, ResultTarget deltaChangeCollector, + ResultOption deltaChangeCollectionMode); + + /** + * Prepares WHEN command. + * + * @param session + * the session + * @return {@code false} if this clause may be removed + */ + boolean prepare(SessionLocal session) { + if (andCondition != null) { + andCondition.mapColumns(targetTableFilter, 0, Expression.MAP_INITIAL); + andCondition.mapColumns(sourceTableFilter, 0, Expression.MAP_INITIAL); + andCondition = andCondition.optimize(session); + if (andCondition.isConstant()) { + if (andCondition.getBooleanValue(session)) { + andCondition = null; + } else { + return false; + } + } + } + return true; + } + + /** + * Evaluates trigger mask (UPDATE, INSERT, DELETE). + * + * @return the trigger mask. + */ + abstract int evaluateTriggerMasks(); + + /** + * Checks user's INSERT, UPDATE, DELETE permission in appropriate cases. + */ + abstract void checkRights(); + + /** + * Find and collect all DbObjects, this When object depends on. + * + * @param visitor the expression visitor + */ + void collectDependencies(ExpressionVisitor visitor) { + if (andCondition != null) { + andCondition.isEverything(visitor); + } + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + builder.append("WHEN "); + if (getClass() == WhenNotMatched.class) { + builder.append("NOT "); + } + builder.append("MATCHED"); + if (andCondition != null) { + andCondition.getUnenclosedSQL(builder.append(" AND "), sqlFlags); + } + return builder.append(" THEN "); + } + } - // Prepared interface implementations + public final class WhenMatchedThenDelete extends When { + + @Override + void merge(SessionLocal session, ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode) { + TableFilter targetTableFilter = MergeUsing.this.targetTableFilter; + Table table = targetTableFilter.getTable(); + Row row = targetTableFilter.get(); + if (deltaChangeCollectionMode == ResultOption.OLD) { + deltaChangeCollector.addRow(row.getValueList()); + } + if (!table.fireRow() || !table.fireBeforeRow(session, row, null)) { + table.removeRow(session, row); + table.fireAfterRow(session, row, null, false); + } + } + + @Override + int evaluateTriggerMasks() { + return Trigger.DELETE; + } + + @Override + void checkRights() { + getSession().getUser().checkTableRight(targetTableFilter.getTable(), Right.DELETE); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return super.getSQL(builder, sqlFlags).append("DELETE"); + } - @Override - public boolean isTransactional() { - return true; } - @Override - public ResultInterface queryMeta() { - return null; + public final class WhenMatchedThenUpdate extends When { + + private SetClauseList setClauseList; + + public void setSetClauseList(SetClauseList setClauseList) { + this.setClauseList = setClauseList; + } + + @Override + void merge(SessionLocal session, ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode) { + TableFilter targetTableFilter = MergeUsing.this.targetTableFilter; + Table table = targetTableFilter.getTable(); + try (LocalResult rows = LocalResult.forTable(session, table)) { + setClauseList.prepareUpdate(table, session, deltaChangeCollector, deltaChangeCollectionMode, rows, + targetTableFilter.get(), false); + Update.doUpdate(MergeUsing.this, session, table, rows); + } + } + + @Override + boolean prepare(SessionLocal session) { + boolean result = super.prepare(session); + setClauseList.mapAndOptimize(session, targetTableFilter, sourceTableFilter); + return result; + } + + @Override + int evaluateTriggerMasks() { + return Trigger.UPDATE; + } + + @Override + void checkRights() { + getSession().getUser().checkTableRight(targetTableFilter.getTable(), Right.UPDATE); + } + + @Override + void collectDependencies(ExpressionVisitor visitor) { + super.collectDependencies(visitor); + setClauseList.isEverything(visitor); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return setClauseList.getSQL(super.getSQL(builder, sqlFlags).append("UPDATE"), sqlFlags); + } + } - @Override - public int getType() { - return CommandInterface.MERGE; + public final class WhenNotMatched extends When { + + private Column[] columns; + + private final Boolean overridingSystem; + + private final Expression[] values; + + public WhenNotMatched(Column[] columns, Boolean overridingSystem, Expression[] values) { + this.columns = columns; + this.overridingSystem = overridingSystem; + this.values = values; + } + + @Override + void merge(SessionLocal session, ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode) { + Table table = targetTableFilter.getTable(); + Row newRow = table.getTemplateRow(); + Expression[] expr = values; + for (int i = 0, len = columns.length; i < len; i++) { + Column c = columns[i]; + int index = c.getColumnId(); + Expression e = expr[i]; + if (e != ValueExpression.DEFAULT) { + try { + newRow.setValue(index, e.getValue(session)); + } catch (DbException ex) { + ex.addSQL("INSERT -- " + getSimpleSQL(expr)); + throw ex; + } + } + } + table.convertInsertRow(session, newRow, overridingSystem); + if (deltaChangeCollectionMode == ResultOption.NEW) { + deltaChangeCollector.addRow(newRow.getValueList().clone()); + } + if (!table.fireBeforeRow(session, null, newRow)) { + table.addRow(session, newRow); + DataChangeDeltaTable.collectInsertedFinalRow(session, table, deltaChangeCollector, + deltaChangeCollectionMode, newRow); + table.fireAfterRow(session, null, newRow, false); + } else { + DataChangeDeltaTable.collectInsertedFinalRow(session, table, deltaChangeCollector, + deltaChangeCollectionMode, newRow); + } + } + + @Override + boolean prepare(SessionLocal session) { + boolean result = super.prepare(session); + TableFilter targetTableFilter = MergeUsing.this.targetTableFilter, + sourceTableFilter = MergeUsing.this.sourceTableFilter; + if (columns == null) { + columns = targetTableFilter.getTable().getColumns(); + } + if (values.length != columns.length) { + throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); + } + for (int i = 0, len = values.length; i < len; i++) { + Expression e = values[i]; + e.mapColumns(targetTableFilter, 0, Expression.MAP_INITIAL); + e.mapColumns(sourceTableFilter, 0, Expression.MAP_INITIAL); + e = e.optimize(session); + if (e instanceof Parameter) { + ((Parameter) e).setColumn(columns[i]); + } + values[i] = e; + } + return result; + } + + @Override + int evaluateTriggerMasks() { + return Trigger.INSERT; + } + + @Override + void checkRights() { + getSession().getUser().checkTableRight(targetTableFilter.getTable(), Right.INSERT); + } + + @Override + void collectDependencies(ExpressionVisitor visitor) { + super.collectDependencies(visitor); + for (Expression e : values) { + e.isEverything(visitor); + } + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + super.getSQL(builder, sqlFlags).append("INSERT ("); + Column.writeColumns(builder, columns, sqlFlags).append(")\nVALUES ("); + return Expression.writeExpressions(builder, values, sqlFlags).append(')'); + } + } } diff --git a/h2/src/main/org/h2/command/dml/NoOperation.java b/h2/src/main/org/h2/command/dml/NoOperation.java index 6168b051ce..803c52003d 100644 --- a/h2/src/main/org/h2/command/dml/NoOperation.java +++ b/h2/src/main/org/h2/command/dml/NoOperation.java @@ -1,13 +1,13 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; import org.h2.command.CommandInterface; import org.h2.command.Prepared; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.result.ResultInterface; /** @@ -15,12 +15,12 @@ */ public class NoOperation extends Prepared { - public NoOperation(Session session) { + public NoOperation(SessionLocal session) { super(session); } @Override - public int update() { + public long update() { return 0; } diff --git a/h2/src/main/org/h2/command/dml/Replace.java b/h2/src/main/org/h2/command/dml/Replace.java deleted file mode 100644 index fe3dd19455..0000000000 --- a/h2/src/main/org/h2/command/dml/Replace.java +++ /dev/null @@ -1,295 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.command.dml; - -import java.util.ArrayList; - -import org.h2.api.ErrorCode; -import org.h2.api.Trigger; -import org.h2.command.Command; -import org.h2.command.CommandInterface; -import org.h2.command.Prepared; -import org.h2.engine.Mode; -import org.h2.engine.Right; -import org.h2.engine.Session; -import org.h2.engine.UndoLogRecord; -import org.h2.expression.Expression; -import org.h2.expression.Parameter; -import org.h2.index.Index; -import org.h2.message.DbException; -import org.h2.result.ResultInterface; -import org.h2.result.Row; -import org.h2.table.Column; -import org.h2.table.Table; -import org.h2.value.Value; - -/** - * This class represents the MySQL-compatibility REPLACE statement - */ -public class Replace extends CommandWithValues { - - private Table table; - private Column[] columns; - private Column[] keys; - private Query query; - private Prepared update; - - public Replace(Session session) { - super(session); - } - - @Override - public void setCommand(Command command) { - super.setCommand(command); - if (query != null) { - query.setCommand(command); - } - } - - public void setTable(Table table) { - this.table = table; - } - - public void setColumns(Column[] columns) { - this.columns = columns; - } - - public void setKeys(Column[] keys) { - this.keys = keys; - } - - public void setQuery(Query query) { - this.query = query; - } - - @Override - public int update() { - int count = 0; - session.getUser().checkRight(table, Right.INSERT); - session.getUser().checkRight(table, Right.UPDATE); - setCurrentRowNumber(0); - Mode mode = session.getDatabase().getMode(); - if (!valuesExpressionList.isEmpty()) { - for (int x = 0, size = valuesExpressionList.size(); x < size; x++) { - setCurrentRowNumber(x + 1); - Expression[] expr = valuesExpressionList.get(x); - Row newRow = table.getTemplateRow(); - for (int i = 0, len = columns.length; i < len; i++) { - Column c = columns[i]; - int index = c.getColumnId(); - Expression e = expr[i]; - if (e != null) { - // e can be null (DEFAULT) - try { - Value v = c.convert(e.getValue(session), mode); - newRow.setValue(index, v); - } catch (DbException ex) { - throw setRow(ex, count, getSimpleSQL(expr)); - } - } - } - count += replace(newRow); - } - } else { - ResultInterface rows = query.query(0); - table.fire(session, Trigger.UPDATE | Trigger.INSERT, true); - table.lock(session, true, false); - while (rows.next()) { - Value[] r = rows.currentRow(); - Row newRow = table.getTemplateRow(); - setCurrentRowNumber(count); - for (int j = 0; j < columns.length; j++) { - Column c = columns[j]; - int index = c.getColumnId(); - try { - Value v = c.convert(r[j], mode); - newRow.setValue(index, v); - } catch (DbException ex) { - throw setRow(ex, count, getSQL(r)); - } - } - count += replace(newRow); - } - rows.close(); - table.fire(session, Trigger.UPDATE | Trigger.INSERT, false); - } - return count; - } - - /** - * Updates an existing row or inserts a new one. - * - * @param row row to replace - * @return 1 if row was inserted, 2 if row was updated - */ - private int replace(Row row) { - int count = update(row); - if (count == 0) { - try { - table.validateConvertUpdateSequence(session, row); - boolean done = table.fireBeforeRow(session, null, row); - if (!done) { - table.lock(session, true, false); - table.addRow(session, row); - session.log(table, UndoLogRecord.INSERT, row); - table.fireAfterRow(session, null, row, false); - } - return 1; - } catch (DbException e) { - if (e.getErrorCode() == ErrorCode.DUPLICATE_KEY_1) { - // possibly a concurrent replace or insert - Index index = (Index) e.getSource(); - if (index != null) { - // verify the index columns match the key - Column[] indexColumns = index.getColumns(); - boolean indexMatchesKeys = false; - if (indexColumns.length <= keys.length) { - for (int i = 0; i < indexColumns.length; i++) { - if (indexColumns[i] != keys[i]) { - indexMatchesKeys = false; - break; - } - } - } - if (indexMatchesKeys) { - throw DbException.get(ErrorCode.CONCURRENT_UPDATE_1, table.getName()); - } - } - } - throw e; - } - } else if (count == 1) { - return 2; - } - throw DbException.get(ErrorCode.DUPLICATE_KEY_1, table.getSQL(false)); - } - - private int update(Row row) { - // if there is no valid primary key, - // the statement degenerates to an INSERT - if (update == null) { - return 0; - } - ArrayList k = update.getParameters(); - for (int i = 0; i < columns.length; i++) { - Column col = columns[i]; - Value v = row.getValue(col.getColumnId()); - Parameter p = k.get(i); - p.setValue(v); - } - for (int i = 0; i < keys.length; i++) { - Column col = keys[i]; - Value v = row.getValue(col.getColumnId()); - if (v == null) { - throw DbException.get(ErrorCode.COLUMN_CONTAINS_NULL_VALUES_1, col.getSQL(false)); - } - Parameter p = k.get(columns.length + i); - p.setValue(v); - } - return update.update(); - } - - @Override - public String getPlanSQL(boolean alwaysQuote) { - StringBuilder builder = new StringBuilder("REPLACE INTO "); - table.getSQL(builder, alwaysQuote).append('('); - Column.writeColumns(builder, columns, alwaysQuote); - builder.append(')'); - builder.append('\n'); - if (!valuesExpressionList.isEmpty()) { - builder.append("VALUES "); - int row = 0; - for (Expression[] expr : valuesExpressionList) { - if (row++ > 0) { - builder.append(", "); - } - builder.append('('); - Expression.writeExpressions(builder, expr, alwaysQuote); - builder.append(')'); - } - } else { - builder.append(query.getPlanSQL(alwaysQuote)); - } - return builder.toString(); - } - - @Override - public void prepare() { - if (columns == null) { - if (!valuesExpressionList.isEmpty() && valuesExpressionList.get(0).length == 0) { - // special case where table is used as a sequence - columns = new Column[0]; - } else { - columns = table.getColumns(); - } - } - if (!valuesExpressionList.isEmpty()) { - for (Expression[] expr : valuesExpressionList) { - if (expr.length != columns.length) { - throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); - } - for (int i = 0; i < expr.length; i++) { - Expression e = expr[i]; - if (e != null) { - expr[i] = e.optimize(session); - } - } - } - } else { - query.prepare(); - if (query.getColumnCount() != columns.length) { - throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); - } - } - if (keys == null) { - Index idx = table.getPrimaryKey(); - if (idx == null) { - throw DbException.get(ErrorCode.CONSTRAINT_NOT_FOUND_1, "PRIMARY KEY"); - } - keys = idx.getColumns(); - } - // if there is no valid primary key, the statement degenerates to an - // INSERT - for (Column key : keys) { - boolean found = false; - for (Column column : columns) { - if (column.getColumnId() == key.getColumnId()) { - found = true; - break; - } - } - if (!found) { - return; - } - } - StringBuilder builder = new StringBuilder("UPDATE "); - table.getSQL(builder, true).append(" SET "); - Column.writeColumns(builder, columns, ", ", "=?", true).append(" WHERE "); - Column.writeColumns(builder, keys, " AND ", "=?", true); - update = session.prepare(builder.toString()); - } - - @Override - public boolean isTransactional() { - return true; - } - - @Override - public ResultInterface queryMeta() { - return null; - } - - @Override - public int getType() { - return CommandInterface.REPLACE; - } - - @Override - public boolean isCacheable() { - return true; - } - -} diff --git a/h2/src/main/org/h2/command/dml/RunScriptCommand.java b/h2/src/main/org/h2/command/dml/RunScriptCommand.java index aa91b6c7ee..1040e3d6e2 100644 --- a/h2/src/main/org/h2/command/dml/RunScriptCommand.java +++ b/h2/src/main/org/h2/command/dml/RunScriptCommand.java @@ -1,19 +1,18 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; -import java.io.BufferedReader; import java.io.IOException; -import java.io.InputStreamReader; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; +import org.h2.command.CommandContainer; import org.h2.command.CommandInterface; import org.h2.command.Prepared; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.result.ResultInterface; import org.h2.util.ScriptReader; @@ -33,22 +32,35 @@ public class RunScriptCommand extends ScriptBase { private Charset charset = StandardCharsets.UTF_8; - public RunScriptCommand(Session session) { + private boolean quirksMode; + + private boolean variableBinary; + + private boolean from1X; + + public RunScriptCommand(SessionLocal session) { super(session); } @Override - public int update() { + public long update() { session.getUser().checkAdmin(); int count = 0; + boolean oldQuirksMode = session.isQuirksMode(); + boolean oldVariableBinary = session.isVariableBinary(); try { - openInput(); - BufferedReader reader = new BufferedReader(new InputStreamReader(in, charset)); + openInput(charset); // if necessary, strip the BOM from the front of the file reader.mark(1); if (reader.read() != UTF8_BOM) { reader.reset(); } + if (quirksMode) { + session.setQuirksMode(true); + } + if (variableBinary) { + session.setVariableBinary(true); + } ScriptReader r = new ScriptReader(reader); while (true) { String sql = r.readStatement(); @@ -65,21 +77,35 @@ public int update() { } catch (IOException e) { throw DbException.convertIOException(e, null); } finally { + if (quirksMode) { + session.setQuirksMode(oldQuirksMode); + } + if (variableBinary) { + session.setVariableBinary(oldVariableBinary); + } closeIO(); } return count; } private void execute(String sql) { + if (from1X) { + sql = sql.trim(); + if (sql.startsWith("INSERT INTO SYSTEM_LOB_STREAM VALUES(")) { + int idx = sql.indexOf(", NULL, '"); + if (idx >= 0) { + sql = new StringBuilder(sql.length() + 1).append(sql, 0, idx + 8).append("X'") + .append(sql, idx + 9, sql.length()).toString(); + } + } + } try { Prepared command = session.prepare(sql); - if (command.isQuery()) { - command.query(0); + CommandContainer commandContainer = new CommandContainer(session, sql, command); + if (commandContainer.isQuery()) { + commandContainer.executeQuery(0, false); } else { - command.update(); - } - if (session.getAutoCommit()) { - session.commit(false); + commandContainer.executeUpdate(null); } } catch (DbException e) { throw e.addSQL(sql); @@ -90,6 +116,34 @@ public void setCharset(Charset charset) { this.charset = charset; } + /** + * Enables or disables the quirks mode. + * + * @param quirksMode + * whether quirks mode should be enabled + */ + public void setQuirksMode(boolean quirksMode) { + this.quirksMode = quirksMode; + } + + /** + * Changes parsing of a BINARY data type. + * + * @param variableBinary + * {@code true} to parse BINARY as VARBINARY, {@code false} to + * parse it as is + */ + public void setVariableBinary(boolean variableBinary) { + this.variableBinary = variableBinary; + } + + /** + * Enables quirks for parsing scripts from H2 1.*.*. + */ + public void setFrom1X() { + variableBinary = quirksMode = from1X = true; + } + @Override public ResultInterface queryMeta() { return null; diff --git a/h2/src/main/org/h2/command/dml/ScriptBase.java b/h2/src/main/org/h2/command/dml/ScriptBase.java index 973180ee73..e1b99c039f 100644 --- a/h2/src/main/org/h2/command/dml/ScriptBase.java +++ b/h2/src/main/org/h2/command/dml/ScriptBase.java @@ -1,42 +1,38 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; -import java.io.BufferedInputStream; import java.io.BufferedOutputStream; +import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; +import java.io.InputStreamReader; import java.io.OutputStream; +import java.nio.charset.Charset; import org.h2.api.ErrorCode; -import org.h2.api.JavaObjectSerializer; import org.h2.command.Prepared; import org.h2.engine.Constants; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.engine.SysProperties; import org.h2.expression.Expression; import org.h2.message.DbException; import org.h2.security.SHA256; -import org.h2.store.DataHandler; import org.h2.store.FileStore; import org.h2.store.FileStoreInputStream; import org.h2.store.FileStoreOutputStream; -import org.h2.store.LobStorageBackend; import org.h2.store.fs.FileUtils; import org.h2.tools.CompressTool; import org.h2.util.IOUtils; -import org.h2.util.SmallLRUCache; import org.h2.util.StringUtils; -import org.h2.util.TempFileDeleter; -import org.h2.value.CompareMode; /** * This class is the base for RunScriptCommand and ScriptCommand. */ -abstract class ScriptBase extends Prepared implements DataHandler { +abstract class ScriptBase extends Prepared { /** * The default name of the script file if .zip compression is used. @@ -49,9 +45,9 @@ abstract class ScriptBase extends Prepared implements DataHandler { protected OutputStream out; /** - * The input stream. + * The input reader. */ - protected InputStream in; + protected BufferedReader reader; /** * The file name (if set). @@ -66,7 +62,7 @@ abstract class ScriptBase extends Prepared implements DataHandler { private FileStore store; private String compressionAlgorithm; - ScriptBase(Session session) { + ScriptBase(SessionLocal session) { super(session); } @@ -136,7 +132,7 @@ void openOutput() { } if (isEncrypted()) { initStore(); - out = new FileStoreOutputStream(store, this, compressionAlgorithm); + out = new FileStoreOutputStream(store, compressionAlgorithm); // always use a big buffer, otherwise end-of-block is written a lot out = new BufferedOutputStream(out, Constants.IO_BUFFER_SIZE_COMPRESS); } else { @@ -153,28 +149,30 @@ void openOutput() { /** * Open the input stream. + * + * @param charset the charset to use */ - void openInput() { + void openInput(Charset charset) { String file = getFileName(); if (file == null) { return; } + InputStream in; if (isEncrypted()) { initStore(); - in = new FileStoreInputStream(store, this, compressionAlgorithm != null, false); + in = new FileStoreInputStream(store, compressionAlgorithm != null, false); } else { - InputStream inStream; try { - inStream = FileUtils.newInputStream(file); + in = FileUtils.newInputStream(file); } catch (IOException e) { throw DbException.convertIOException(e, file); } - in = new BufferedInputStream(inStream, Constants.IO_BUFFER_SIZE); in = CompressTool.wrapInputStream(in, compressionAlgorithm, SCRIPT_SQL); if (in == null) { throw DbException.get(ErrorCode.FILE_NOT_FOUND_1, SCRIPT_SQL + " in " + file); } } + reader = new BufferedReader(new InputStreamReader(in, charset), Constants.IO_BUFFER_SIZE); } /** @@ -183,8 +181,8 @@ void openInput() { void closeIO() { IOUtils.closeSilently(out); out = null; - IOUtils.closeSilently(in); - in = null; + IOUtils.closeSilently(reader); + reader = null; if (store != null) { store.closeSilently(); store = null; @@ -196,73 +194,8 @@ public boolean needRecompile() { return false; } - @Override - public String getDatabasePath() { - return null; - } - - @Override - public FileStore openFile(String name, String mode, boolean mustExist) { - return null; - } - - @Override - public void checkPowerOff() { - session.getDatabase().checkPowerOff(); - } - - @Override - public void checkWritingAllowed() { - session.getDatabase().checkWritingAllowed(); - } - - @Override - public int getMaxLengthInplaceLob() { - return session.getDatabase().getMaxLengthInplaceLob(); - } - - @Override - public TempFileDeleter getTempFileDeleter() { - return session.getDatabase().getTempFileDeleter(); - } - - @Override - public String getLobCompressionAlgorithm(int type) { - return session.getDatabase().getLobCompressionAlgorithm(type); - } - public void setCompressionAlgorithm(String algorithm) { this.compressionAlgorithm = algorithm; } - @Override - public Object getLobSyncObject() { - return this; - } - - @Override - public SmallLRUCache getLobFileListCache() { - return null; - } - - @Override - public LobStorageBackend getLobStorage() { - return null; - } - - @Override - public int readLob(long lobId, byte[] hmac, long offset, byte[] buff, - int off, int length) { - throw DbException.throwInternalError(); - } - - @Override - public JavaObjectSerializer getJavaObjectSerializer() { - return session.getDataHandler().getJavaObjectSerializer(); - } - - @Override - public CompareMode getCompareMode() { - return session.getDataHandler().getCompareMode(); - } } diff --git a/h2/src/main/org/h2/command/dml/ScriptCommand.java b/h2/src/main/org/h2/command/dml/ScriptCommand.java index f14d77bce6..d613e45079 100644 --- a/h2/src/main/org/h2/command/dml/ScriptCommand.java +++ b/h2/src/main/org/h2/command/dml/ScriptCommand.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; @@ -17,48 +17,54 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.Comparator; import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.constraint.Constraint; +import org.h2.constraint.Constraint.Type; import org.h2.engine.Comment; import org.h2.engine.Constants; import org.h2.engine.Database; import org.h2.engine.DbObject; -import org.h2.engine.Domain; import org.h2.engine.Right; +import org.h2.engine.RightOwner; import org.h2.engine.Role; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.engine.Setting; -import org.h2.engine.SysProperties; import org.h2.engine.User; -import org.h2.engine.UserAggregate; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.index.Cursor; import org.h2.index.Index; import org.h2.message.DbException; +import org.h2.mvstore.DataUtils; import org.h2.result.LocalResult; import org.h2.result.ResultInterface; import org.h2.result.Row; import org.h2.schema.Constant; +import org.h2.schema.Domain; import org.h2.schema.Schema; import org.h2.schema.SchemaObject; import org.h2.schema.Sequence; import org.h2.schema.TriggerObject; +import org.h2.schema.UserDefinedFunction; import org.h2.table.Column; import org.h2.table.PlanItem; import org.h2.table.Table; import org.h2.table.TableType; +import org.h2.util.HasSQL; import org.h2.util.IOUtils; import org.h2.util.MathUtils; import org.h2.util.StringUtils; import org.h2.util.Utils; +import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueString; +import org.h2.value.ValueVarchar; /** * This class represents the statement @@ -66,6 +72,16 @@ */ public class ScriptCommand extends ScriptBase { + private static final Comparator BY_NAME_COMPARATOR = (o1, o2) -> { + if (o1 instanceof SchemaObject && o2 instanceof SchemaObject) { + int cmp = ((SchemaObject) o1).getSchema().getName().compareTo(((SchemaObject) o2).getSchema().getName()); + if (cmp != 0) { + return cmp; + } + } + return o1.getName().compareTo(o2.getName()); + }; + private Charset charset = StandardCharsets.UTF_8; private Set schemaNames; private Collection
          tables; @@ -79,6 +95,8 @@ public class ScriptCommand extends ScriptBase { private boolean drop; private boolean simple; private boolean withColumns; + private boolean version = true; + private LocalResult result; private String lineSeparatorString; private byte[] lineSeparator; @@ -87,7 +105,7 @@ public class ScriptCommand extends ScriptBase { private int nextLobId; private int lobBlockSize = Constants.IO_BUFFER_SIZE; - public ScriptCommand(Session session) { + public ScriptCommand(SessionLocal session) { super(session); } @@ -134,13 +152,12 @@ public ResultInterface queryMeta() { } private LocalResult createResult() { - Database db = session.getDatabase(); - return db.getResultFactory().create(session, - new Expression[] { new ExpressionColumn(db, new Column("SCRIPT", Value.STRING)) }, 1); + return new LocalResult(session, new Expression[] { + new ExpressionColumn(session.getDatabase(), new Column("SCRIPT", TypeInfo.TYPE_VARCHAR)) }, 1, 1); } @Override - public ResultInterface query(int maxrows) { + public ResultInterface query(long maxrows) { session.getUser().checkAdmin(); reset(); Database db = session.getDatabase(); @@ -160,6 +177,9 @@ public ResultInterface query(int maxrows) { if (out != null) { buffer = new byte[Constants.IO_BUFFER_SIZE]; } + if (version) { + add("-- H2 " + Constants.VERSION, true); + } if (settings) { for (Setting setting : db.getAllSettings()) { if (setting.getName().equals(SetTypes.getTypeName( @@ -174,42 +194,47 @@ public ResultInterface query(int maxrows) { if (out != null) { add("", true); } - for (User user : db.getAllUsers()) { - add(user.getCreateSQL(passwords), false); - } - for (Role role : db.getAllRoles()) { - add(role.getCreateSQL(true), false); + RightOwner[] rightOwners = db.getAllUsersAndRoles().toArray(new RightOwner[0]); + // ADMIN users first, other users next, roles last + Arrays.sort(rightOwners, (o1, o2) -> { + boolean b = o1 instanceof User; + if (b != o2 instanceof User) { + return b ? -1 : 1; + } + if (b) { + b = ((User) o1).isAdmin(); + if (b != ((User) o2).isAdmin()) { + return b ? -1 : 1; + } + } + return o1.getName().compareTo(o2.getName()); + }); + for (RightOwner rightOwner : rightOwners) { + if (rightOwner instanceof User) { + add(((User) rightOwner).getCreateSQL(passwords), false); + } else { + add(((Role) rightOwner).getCreateSQL(true), false); + } } + ArrayList schemas = new ArrayList<>(); for (Schema schema : db.getAllSchemas()) { if (excludeSchema(schema)) { continue; } + schemas.add(schema); add(schema.getCreateSQL(), false); } - for (Domain datatype : db.getAllDomains()) { - if (drop) { - add(datatype.getDropSQL(), false); + dumpDomains(schemas); + for (Schema schema : schemas) { + for (Constant constant : sorted(schema.getAllConstants(), Constant.class)) { + add(constant.getCreateSQL(), false); } - add(datatype.getCreateSQL(), false); - } - for (SchemaObject obj : db.getAllSchemaObjects( - DbObject.CONSTANT)) { - if (excludeSchema(obj.getSchema())) { - continue; - } - Constant constant = (Constant) obj; - add(constant.getCreateSQL(), false); } - final ArrayList
          tables = db.getAllTablesAndViews(false); + final ArrayList
          tables = db.getAllTablesAndViews(); // sort by id, so that views are after tables and views on views // after the base views - Collections.sort(tables, new Comparator
          () { - @Override - public int compare(Table t1, Table t2) { - return t1.getId() - t2.getId(); - } - }); + tables.sort(Comparator.comparingInt(Table::getId)); // Generate the DROP XXX ... IF EXISTS for (Table table : tables) { @@ -222,7 +247,7 @@ public int compare(Table t1, Table t2) { if (table.isHidden()) { continue; } - table.lock(session, false, false); + table.lock(session, Table.READ_LOCK); String sql = table.getCreateSQL(); if (sql == null) { // null for metadata tables @@ -232,32 +257,25 @@ public int compare(Table t1, Table t2) { add(table.getDropSQL(), false); } } - for (SchemaObject obj : db.getAllSchemaObjects( - DbObject.FUNCTION_ALIAS)) { - if (excludeSchema(obj.getSchema())) { - continue; - } - if (drop) { - add(obj.getDropSQL(), false); - } - add(obj.getCreateSQL(), false); - } - for (UserAggregate agg : db.getAllAggregates()) { - if (drop) { - add(agg.getDropSQL(), false); + for (Schema schema : schemas) { + for (UserDefinedFunction userDefinedFunction : sorted(schema.getAllFunctionsAndAggregates(), + UserDefinedFunction.class)) { + if (drop) { + add(userDefinedFunction.getDropSQL(), false); + } + add(userDefinedFunction.getCreateSQL(), false); } - add(agg.getCreateSQL(), false); } - for (SchemaObject obj : db.getAllSchemaObjects( - DbObject.SEQUENCE)) { - if (excludeSchema(obj.getSchema())) { - continue; - } - Sequence sequence = (Sequence) obj; - if (drop) { - add(sequence.getDropSQL(), false); + for (Schema schema : schemas) { + for (Sequence sequence : sorted(schema.getAllSequences(), Sequence.class)) { + if (sequence.getBelongsToTable()) { + continue; + } + if (drop) { + add(sequence.getDropSQL(), false); + } + add(sequence.getCreateSQL(), false); } - add(sequence.getCreateSQL(), false); } // Generate CREATE TABLE and INSERT...VALUES @@ -272,7 +290,7 @@ public int compare(Table t1, Table t2) { if (table.isHidden()) { continue; } - table.lock(session, false, false); + table.lock(session, Table.READ_LOCK); String createTableSql = table.getCreateSQL(); if (createTableSql == null) { // null for metadata tables @@ -289,10 +307,11 @@ public int compare(Table t1, Table t2) { } } if (TableType.TABLE == tableType) { - if (table.canGetRowCount()) { - StringBuilder builder = new StringBuilder("-- ").append(table.getRowCountApproximation()) + if (table.canGetRowCount(session)) { + StringBuilder builder = new StringBuilder("-- ") + .append(table.getRowCountApproximation(session)) .append(" +/- SELECT COUNT(*) FROM "); - table.getSQL(builder, false); + table.getSQL(builder, HasSQL.TRACE_SQL_FLAGS); add(builder.toString(), false); } if (data) { @@ -309,61 +328,41 @@ public int compare(Table t1, Table t2) { } if (tempLobTableCreated) { add("DROP TABLE IF EXISTS SYSTEM_LOB_STREAM", true); - add("CALL SYSTEM_COMBINE_BLOB(-1)", true); add("DROP ALIAS IF EXISTS SYSTEM_COMBINE_CLOB", true); add("DROP ALIAS IF EXISTS SYSTEM_COMBINE_BLOB", true); tempLobTableCreated = false; } // Generate CREATE CONSTRAINT ... - final ArrayList constraints = db.getAllSchemaObjects( - DbObject.CONSTRAINT); - Collections.sort(constraints, null); - for (SchemaObject obj : constraints) { - if (excludeSchema(obj.getSchema())) { - continue; - } - Constraint constraint = (Constraint) obj; - if (excludeTable(constraint.getTable())) { - continue; - } - if (constraint.getTable().isHidden()) { - continue; - } - if (Constraint.Type.PRIMARY_KEY != constraint.getConstraintType()) { - add(constraint.getCreateSQLWithoutIndexes(), false); + ArrayList constraints = new ArrayList<>(); + for (Schema schema : schemas) { + for (Constraint constraint : schema.getAllConstraints()) { + if (excludeTable(constraint.getTable())) { + continue; + } + Type constraintType = constraint.getConstraintType(); + if (constraintType != Type.DOMAIN && constraint.getTable().isHidden()) { + continue; + } + if (constraintType != Constraint.Type.PRIMARY_KEY) { + constraints.add(constraint); + } } } - // Generate CREATE TRIGGER ... - for (SchemaObject obj : db.getAllSchemaObjects(DbObject.TRIGGER)) { - if (excludeSchema(obj.getSchema())) { - continue; - } - TriggerObject trigger = (TriggerObject) obj; - if (excludeTable(trigger.getTable())) { - continue; - } - add(trigger.getCreateSQL(), false); + constraints.sort(null); + for (Constraint constraint : constraints) { + add(constraint.getCreateSQLWithoutIndexes(), false); } - // Generate GRANT ... - for (Right right : db.getAllRights()) { - DbObject object = right.getGrantedObject(); - if (object != null) { - if (object instanceof Schema) { - if (excludeSchema((Schema) object)) { - continue; - } - } else if (object instanceof Table) { - Table table = (Table) object; - if (excludeSchema(table.getSchema())) { - continue; - } - if (excludeTable(table)) { - continue; - } + // Generate CREATE TRIGGER ... + for (Schema schema : schemas) { + for (TriggerObject trigger : schema.getAllTriggers()) { + if (excludeTable(trigger.getTable())) { + continue; } + add(trigger.getCreateSQL(), false); } - add(right.getCreateSQL(), false); } + // Generate GRANT ... + dumpRights(db); // Generate COMMENT ON ... for (Comment comment : db.getAllComments()) { add(comment.getCreateSQL(), false); @@ -382,17 +381,139 @@ public int compare(Table t1, Table t2) { return r; } + private void dumpDomains(ArrayList schemas) throws IOException { + TreeMap> referencingDomains = new TreeMap<>(BY_NAME_COMPARATOR); + TreeSet known = new TreeSet<>(BY_NAME_COMPARATOR); + for (Schema schema : schemas) { + for (Domain domain : sorted(schema.getAllDomains(), Domain.class)) { + Domain parent = domain.getDomain(); + if (parent == null) { + addDomain(domain); + } else { + TreeSet set = referencingDomains.get(parent); + if (set == null) { + set = new TreeSet<>(BY_NAME_COMPARATOR); + referencingDomains.put(parent, set); + } + set.add(domain); + if (parent.getDomain() == null || !schemas.contains(parent.getSchema())) { + known.add(parent); + } + } + } + } + while (!referencingDomains.isEmpty()) { + TreeSet known2 = new TreeSet<>(BY_NAME_COMPARATOR); + for (Domain d : known) { + TreeSet set = referencingDomains.remove(d); + if (set != null) { + for (Domain d2 : set) { + addDomain(d2); + known2.add(d2); + } + } + } + known = known2; + } + } + + private void dumpRights(Database db) throws IOException { + Right[] rights = db.getAllRights().toArray(new Right[0]); + Arrays.sort(rights, (o1, o2) -> { + Role r1 = o1.getGrantedRole(), r2 = o2.getGrantedRole(); + if ((r1 == null) != (r2 == null)) { + return r1 == null ? -1 : 1; + } + if (r1 == null) { + DbObject g1 = o1.getGrantedObject(), g2 = o2.getGrantedObject(); + if ((g1 == null) != (g2 == null)) { + return g1 == null ? -1 : 1; + } + if (g1 != null) { + if (g1 instanceof Schema != g2 instanceof Schema) { + return g1 instanceof Schema ? -1 : 1; + } + int cmp = g1.getName().compareTo(g2.getName()); + if (cmp != 0) { + return cmp; + } + } + } else { + int cmp = r1.getName().compareTo(r2.getName()); + if (cmp != 0) { + return cmp; + } + } + return o1.getGrantee().getName().compareTo(o2.getGrantee().getName()); + }); + for (Right right : rights) { + DbObject object = right.getGrantedObject(); + if (object != null) { + if (object instanceof Schema) { + if (excludeSchema((Schema) object)) { + continue; + } + } else if (object instanceof Table) { + Table table = (Table) object; + if (excludeSchema(table.getSchema())) { + continue; + } + if (excludeTable(table)) { + continue; + } + } + } + add(right.getCreateSQL(), false); + } + } + + private void addDomain(Domain domain) throws IOException { + if (drop) { + add(domain.getDropSQL(), false); + } + add(domain.getCreateSQL(), false); + } + + private static T[] sorted(Collection collection, Class clazz) { + @SuppressWarnings("unchecked") + T[] array = collection.toArray((T[]) java.lang.reflect.Array.newInstance(clazz, 0)); + Arrays.sort(array, BY_NAME_COMPARATOR); + return array; + } + private int generateInsertValues(int count, Table table) throws IOException { PlanItem plan = table.getBestPlanItem(session, null, null, -1, null, null); Index index = plan.getIndex(); Cursor cursor = index.find(session, null, null); Column[] columns = table.getColumns(); + boolean withGenerated = false, withGeneratedAlwaysAsIdentity = false; + for (Column c : columns) { + if (c.isGeneratedAlways()) { + if (c.isIdentity()) { + withGeneratedAlwaysAsIdentity = true; + } else { + withGenerated = true; + } + } + } StringBuilder builder = new StringBuilder("INSERT INTO "); - table.getSQL(builder, true); - if (withColumns) { + table.getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS); + if (withGenerated || withGeneratedAlwaysAsIdentity || withColumns) { builder.append('('); - Column.writeColumns(builder, columns, true); + boolean needComma = false; + for (Column column : columns) { + if (!column.isGenerated()) { + if (needComma) { + builder.append(", "); + } + needComma = true; + column.getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS); + } + } builder.append(')'); + if (withGeneratedAlwaysAsIdentity) { + builder.append(" OVERRIDING SYSTEM VALUE"); + } } builder.append(" VALUES"); if (!simple) { @@ -401,6 +522,7 @@ private int generateInsertValues(int count, Table table) throws IOException { builder.append('('); String ins = builder.toString(); builder = null; + int columnCount = columns.length; while (cursor.next()) { Row row = cursor.get(); if (builder == null) { @@ -408,11 +530,16 @@ private int generateInsertValues(int count, Table table) throws IOException { } else { builder.append(",\n("); } - for (int j = 0; j < row.getColumnCount(); j++) { - if (j > 0) { + boolean needComma = false; + for (int i = 0; i < columnCount; i++) { + if (columns[i].isGenerated()) { + continue; + } + if (needComma) { builder.append(", "); } - Value v = row.getValue(j); + needComma = true; + Value v = row.getValue(i); if (v.getType().getPrecision() > lobBlockSize) { int id; if (v.getValueType() == Value.CLOB) { @@ -422,10 +549,10 @@ private int generateInsertValues(int count, Table table) throws IOException { id = writeLobStream(v); builder.append("SYSTEM_COMBINE_BLOB(").append(id).append(')'); } else { - v.getSQL(builder); + v.getSQL(builder, HasSQL.NO_CASTS); } } else { - v.getSQL(builder); + v.getSQL(builder, HasSQL.NO_CASTS); } } builder.append(')'); @@ -446,16 +573,15 @@ private int generateInsertValues(int count, Table table) throws IOException { private int writeLobStream(Value v) throws IOException { if (!tempLobTableCreated) { - add("CREATE TABLE IF NOT EXISTS SYSTEM_LOB_STREAM" + + add("CREATE CACHED LOCAL TEMPORARY TABLE IF NOT EXISTS SYSTEM_LOB_STREAM" + "(ID INT NOT NULL, PART INT NOT NULL, " + - "CDATA VARCHAR, BDATA BINARY)", + "CDATA VARCHAR, BDATA VARBINARY)", true); - add("CREATE PRIMARY KEY SYSTEM_LOB_STREAM_PRIMARY_KEY " + - "ON SYSTEM_LOB_STREAM(ID, PART)", true); - add("CREATE ALIAS IF NOT EXISTS " + "SYSTEM_COMBINE_CLOB FOR \"" + - this.getClass().getName() + ".combineClob\"", true); - add("CREATE ALIAS IF NOT EXISTS " + "SYSTEM_COMBINE_BLOB FOR \"" + - this.getClass().getName() + ".combineBlob\"", true); + add("ALTER TABLE SYSTEM_LOB_STREAM ADD CONSTRAINT SYSTEM_LOB_STREAM_PRIMARY_KEY PRIMARY KEY(ID, PART)", + true); + String className = getClass().getName(); + add("CREATE ALIAS IF NOT EXISTS " + "SYSTEM_COMBINE_CLOB FOR '" + className + ".combineClob'", true); + add("CREATE ALIAS IF NOT EXISTS " + "SYSTEM_COMBINE_BLOB FOR '" + className + ".combineBlob'", true); tempLobTableCreated = true; } int id = nextLobId++; @@ -466,7 +592,7 @@ private int writeLobStream(Value v) throws IOException { for (int i = 0;; i++) { StringBuilder buff = new StringBuilder(lobBlockSize * 2); buff.append("INSERT INTO SYSTEM_LOB_STREAM VALUES(").append(id) - .append(", ").append(i).append(", NULL, '"); + .append(", ").append(i).append(", NULL, X'"); int len = IOUtils.readFully(input, bytes, lobBlockSize); if (len <= 0) { break; @@ -499,7 +625,7 @@ private int writeLobStream(Value v) throws IOException { break; } default: - DbException.throwInternalError("type:" + v.getValueType()); + throw DbException.getInternalError("type:" + v.getValueType()); } return id; } @@ -512,6 +638,7 @@ private int writeLobStream(Value v) throws IOException { * @param conn a connection * @param id the lob id * @return a stream for the combined data + * @throws SQLException on failure */ public static InputStream combineBlob(Connection conn, int id) throws SQLException { @@ -543,7 +670,7 @@ public int read() throws IOException { } current = null; } catch (SQLException e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } } } @@ -556,7 +683,7 @@ public void close() throws IOException { try { rs.close(); } catch (SQLException e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } } }; @@ -569,6 +696,7 @@ public void close() throws IOException { * @param conn a connection * @param id the lob id * @return a reader for the combined data + * @throws SQLException on failure */ public static Reader combineClob(Connection conn, int id) throws SQLException { if (id < 0) { @@ -599,7 +727,7 @@ public int read() throws IOException { } current = null; } catch (SQLException e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } } } @@ -612,7 +740,7 @@ public void close() throws IOException { try { rs.close(); } catch (SQLException e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } } @Override @@ -649,7 +777,7 @@ private static ResultSet getLobStream(Connection conn, String column, int id) private void reset() { result = null; buffer = null; - lineSeparatorString = SysProperties.LINE_SEPARATOR; + lineSeparatorString = System.lineSeparator(); lineSeparator = lineSeparatorString.getBytes(charset); } @@ -659,7 +787,7 @@ private boolean excludeSchema(Schema schema) { } if (tables != null) { // if filtering on specific tables, only include those schemas - for (Table table : schema.getAllTablesAndViews()) { + for (Table table : schema.getAllTablesAndViews(session)) { if (tables.contains(table)) { return false; } @@ -699,12 +827,10 @@ private void add(String s, boolean insert) throws IOException { } out.write(buffer, 0, len); if (!insert) { - Value[] row = { ValueString.get(s) }; - result.addRow(row); + result.addRow(ValueVarchar.get(s)); } } else { - Value[] row = { ValueString.get(s) }; - result.addRow(row); + result.addRow(ValueVarchar.get(s)); } } @@ -716,6 +842,10 @@ public void setWithColumns(boolean withColumns) { this.withColumns = withColumns; } + public void setVersion(boolean version) { + this.version = version; + } + public void setCharset(Charset charset) { this.charset = charset; } diff --git a/h2/src/main/org/h2/command/dml/Set.java b/h2/src/main/org/h2/command/dml/Set.java index 0b1cca4038..d0020a7307 100644 --- a/h2/src/main/org/h2/command/dml/Set.java +++ b/h2/src/main/org/h2/command/dml/Set.java @@ -1,35 +1,39 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; import java.text.Collator; + import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; +import org.h2.command.Parser; import org.h2.command.Prepared; -import org.h2.compress.Compressor; import org.h2.engine.Constants; import org.h2.engine.Database; import org.h2.engine.Mode; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.engine.Setting; import org.h2.expression.Expression; +import org.h2.expression.TimeZoneOperation; import org.h2.expression.ValueExpression; import org.h2.message.DbException; import org.h2.message.Trace; -import org.h2.result.LocalResultFactory; +import org.h2.mode.DefaultNullOrdering; import org.h2.result.ResultInterface; -import org.h2.result.RowFactory; import org.h2.schema.Schema; import org.h2.security.auth.AuthenticatorFactory; import org.h2.table.Table; -import org.h2.tools.CompressTool; -import org.h2.util.JdbcUtils; +import org.h2.util.DateTimeUtils; import org.h2.util.StringUtils; +import org.h2.util.TimeZoneProvider; import org.h2.value.CompareMode; -import org.h2.value.ValueInt; +import org.h2.value.DataType; +import org.h2.value.Value; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; /** * This class represents the statement @@ -42,7 +46,7 @@ public class Set extends Prepared { private String stringValue; private String[] stringValueList; - public Set(Session session, int type) { + public Set(SessionLocal session, int type) { super(session); this.type = type; } @@ -63,7 +67,14 @@ public boolean isTransactional() { case SetTypes.THROTTLE: case SetTypes.SCHEMA: case SetTypes.SCHEMA_SEARCH_PATH: + case SetTypes.CATALOG: case SetTypes.RETENTION_TIME: + case SetTypes.LAZY_QUERY_EXECUTION: + case SetTypes.NON_KEYWORDS: + case SetTypes.TIME_ZONE: + case SetTypes.VARIABLE_BINARY: + case SetTypes.TRUNCATE_LARGE_LENGTH: + case SetTypes.WRITE_DELAY: return true; default: } @@ -71,7 +82,7 @@ public boolean isTransactional() { } @Override - public int update() { + public long update() { Database database = session.getDatabase(); String name = SetTypes.getTypeName(type); switch (type) { @@ -79,22 +90,26 @@ public int update() { session.getUser().checkAdmin(); int value = getIntValue(); if (value < 0 || value > 2) { - throw DbException.getInvalidValueException("ALLOW_LITERALS", - getIntValue()); + throw DbException.getInvalidValueException("ALLOW_LITERALS", value); + } + synchronized (database) { + database.setAllowLiterals(value); + addOrUpdateSetting(name, null, value); } - database.setAllowLiterals(value); - addOrUpdateSetting(name, null, value); break; } - case SetTypes.CACHE_SIZE: - if (getIntValue() < 0) { - throw DbException.getInvalidValueException("CACHE_SIZE", - getIntValue()); - } + case SetTypes.CACHE_SIZE: { session.getUser().checkAdmin(); - database.setCacheSize(getIntValue()); - addOrUpdateSetting(name, null, getIntValue()); + int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("CACHE_SIZE", value); + } + synchronized (database) { + database.setCacheSize(value); + addOrUpdateSetting(name, null, value); + } break; + } case SetTypes.CLUSTER: { if (Constants.CLUSTERING_ENABLED.equals(stringValue)) { // this value is used when connecting @@ -111,7 +126,7 @@ public int update() { database.setCluster(value); // use the system session so that the current transaction // (if any) is not committed - Session sysSession = database.getSystemSession(); + SessionLocal sysSession = database.getSystemSession(); synchronized (sysSession) { synchronized (database) { addOrUpdateSetting(sysSession, name, value, 0); @@ -123,13 +138,10 @@ public int update() { } case SetTypes.COLLATION: { session.getUser().checkAdmin(); - CompareMode currentMode = database.getCompareMode(); - final boolean binaryUnsigned = currentMode.isBinaryUnsigned(); - final boolean uuidUnsigned = currentMode.isUuidUnsigned(); CompareMode compareMode; StringBuilder buff = new StringBuilder(stringValue); if (stringValue.equals(CompareMode.OFF)) { - compareMode = CompareMode.getInstance(null, 0, binaryUnsigned, uuidUnsigned); + compareMode = CompareMode.getInstance(null, 0); } else { int strength = getIntValue(); buff.append(" STRENGTH "); @@ -142,72 +154,20 @@ public int update() { } else if (strength == Collator.TERTIARY) { buff.append("TERTIARY"); } - compareMode = CompareMode.getInstance(stringValue, strength, binaryUnsigned, uuidUnsigned); + compareMode = CompareMode.getInstance(stringValue, strength); } - CompareMode old = database.getCompareMode(); - if (old.equals(compareMode)) { - break; - } - Table table = database.getFirstUserTable(); - if (table != null) { - throw DbException.get(ErrorCode.COLLATION_CHANGE_WITH_DATA_TABLE_1, table.getSQL(false)); - } - addOrUpdateSetting(name, buff.toString(), 0); - database.setCompareMode(compareMode); - break; - } - case SetTypes.BINARY_COLLATION: { - session.getUser().checkAdmin(); - boolean unsigned; - if (stringValue.equals(CompareMode.SIGNED)) { - unsigned = false; - } else if (stringValue.equals(CompareMode.UNSIGNED)) { - unsigned = true; - } else { - throw DbException.getInvalidValueException("BINARY_COLLATION", stringValue); - } - CompareMode currentMode = database.getCompareMode(); - if (currentMode.isBinaryUnsigned() != unsigned) { - Table table = database.getFirstUserTable(); - if (table != null) { - throw DbException.get(ErrorCode.COLLATION_CHANGE_WITH_DATA_TABLE_1, table.getSQL(false)); + synchronized (database) { + CompareMode old = database.getCompareMode(); + if (old.equals(compareMode)) { + break; } - } - CompareMode newMode = CompareMode.getInstance(currentMode.getName(), - currentMode.getStrength(), unsigned, currentMode.isUuidUnsigned()); - addOrUpdateSetting(name, stringValue, 0); - database.setCompareMode(newMode); - break; - } - case SetTypes.UUID_COLLATION: { - session.getUser().checkAdmin(); - boolean unsigned; - if (stringValue.equals(CompareMode.SIGNED)) { - unsigned = false; - } else if (stringValue.equals(CompareMode.UNSIGNED)) { - unsigned = true; - } else { - throw DbException.getInvalidValueException("UUID_COLLATION", stringValue); - } - CompareMode currentMode = database.getCompareMode(); - if (currentMode.isUuidUnsigned() != unsigned) { Table table = database.getFirstUserTable(); if (table != null) { - throw DbException.get(ErrorCode.COLLATION_CHANGE_WITH_DATA_TABLE_1, table.getSQL(false)); + throw DbException.get(ErrorCode.COLLATION_CHANGE_WITH_DATA_TABLE_1, table.getTraceSQL()); } + addOrUpdateSetting(name, buff.toString(), 0); + database.setCompareMode(compareMode); } - CompareMode newMode = CompareMode.getInstance(currentMode.getName(), - currentMode.getStrength(), currentMode.isBinaryUnsigned(), unsigned); - addOrUpdateSetting(name, stringValue, 0); - database.setCompareMode(newMode); - break; - } - case SetTypes.COMPRESS_LOB: { - session.getUser().checkAdmin(); - int algo = CompressTool.getCompressAlgorithm(stringValue); - database.setLobCompressionAlgorithm(algo == Compressor.NO ? - null : stringValue); - addOrUpdateSetting(name, stringValue, 0); break; } case SetTypes.CREATE_BUILD: { @@ -216,7 +176,9 @@ public int update() { // just ignore the command if not starting // this avoids problems when running recovery scripts int value = getIntValue(); - addOrUpdateSetting(name, null, value); + synchronized (database) { + addOrUpdateSetting(name, null, value); + } } break; } @@ -226,44 +188,59 @@ public int update() { break; } case SetTypes.DB_CLOSE_DELAY: { - int x = getIntValue(); - if (x == -1) { + session.getUser().checkAdmin(); + int value = getIntValue(); + if (value == -1) { // -1 is a special value for in-memory databases, // which means "keep the DB alive and use the same // DB for all connections" - } else if (x < 0) { - throw DbException.getInvalidValueException("DB_CLOSE_DELAY", x); + } else if (value < 0) { + throw DbException.getInvalidValueException("DB_CLOSE_DELAY", value); + } + synchronized (database) { + database.setCloseDelay(value); + addOrUpdateSetting(name, null, value); } - session.getUser().checkAdmin(); - database.setCloseDelay(getIntValue()); - addOrUpdateSetting(name, null, getIntValue()); break; } - case SetTypes.DEFAULT_LOCK_TIMEOUT: - if (getIntValue() < 0) { - throw DbException.getInvalidValueException( - "DEFAULT_LOCK_TIMEOUT", getIntValue()); - } + case SetTypes.DEFAULT_LOCK_TIMEOUT: { session.getUser().checkAdmin(); - addOrUpdateSetting(name, null, getIntValue()); + int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("DEFAULT_LOCK_TIMEOUT", value); + } + synchronized (database) { + addOrUpdateSetting(name, null, value); + } break; - case SetTypes.DEFAULT_TABLE_TYPE: + } + case SetTypes.DEFAULT_TABLE_TYPE: { session.getUser().checkAdmin(); - database.setDefaultTableType(getIntValue()); - addOrUpdateSetting(name, null, getIntValue()); + int value = getIntValue(); + synchronized (database) { + database.setDefaultTableType(value); + addOrUpdateSetting(name, null, value); + } break; + } case SetTypes.EXCLUSIVE: { session.getUser().checkAdmin(); int value = getIntValue(); switch (value) { case 0: - database.setExclusiveSession(null, false); + if (!database.unsetExclusiveSession(session)) { + throw DbException.get(ErrorCode.DATABASE_IS_IN_EXCLUSIVE_MODE); + } break; case 1: - database.setExclusiveSession(session, false); + if (!database.setExclusiveSession(session, false)) { + throw DbException.get(ErrorCode.DATABASE_IS_IN_EXCLUSIVE_MODE); + } break; case 2: - database.setExclusiveSession(session, true); + if (!database.setExclusiveSession(session, true)) { + throw DbException.get(ErrorCode.DATABASE_IS_IN_EXCLUSIVE_MODE); + } break; default: throw DbException.getInvalidValueException("EXCLUSIVE", value); @@ -272,89 +249,96 @@ public int update() { } case SetTypes.JAVA_OBJECT_SERIALIZER: { session.getUser().checkAdmin(); - Table table = database.getFirstUserTable(); - if (table != null) { - throw DbException.get(ErrorCode.JAVA_OBJECT_SERIALIZER_CHANGE_WITH_DATA_TABLE, table.getSQL(false)); + synchronized (database) { + Table table = database.getFirstUserTable(); + if (table != null) { + throw DbException.get(ErrorCode.JAVA_OBJECT_SERIALIZER_CHANGE_WITH_DATA_TABLE, + table.getTraceSQL()); + } + database.setJavaObjectSerializerName(stringValue); + addOrUpdateSetting(name, stringValue, 0); } - database.setJavaObjectSerializerName(stringValue); - addOrUpdateSetting(name, stringValue, 0); break; } - case SetTypes.IGNORECASE: + case SetTypes.IGNORECASE: { session.getUser().checkAdmin(); - database.setIgnoreCase(getIntValue() == 1); - addOrUpdateSetting(name, null, getIntValue()); + int value = getIntValue(); + synchronized (database) { + database.setIgnoreCase(value == 1); + addOrUpdateSetting(name, null, value); + } break; - case SetTypes.LOCK_MODE: + } + case SetTypes.LOCK_MODE: { session.getUser().checkAdmin(); - database.setLockMode(getIntValue()); - addOrUpdateSetting(name, null, getIntValue()); - break; - case SetTypes.LOCK_TIMEOUT: - if (getIntValue() < 0) { - throw DbException.getInvalidValueException("LOCK_TIMEOUT", - getIntValue()); + int value = getIntValue(); + synchronized (database) { + database.setLockMode(value); + addOrUpdateSetting(name, null, value); } - session.setLockTimeout(getIntValue()); break; - case SetTypes.LOG: { + } + case SetTypes.LOCK_TIMEOUT: { int value = getIntValue(); - if (database.isPersistent() && value != database.getLogMode()) { - session.getUser().checkAdmin(); - database.setLogMode(value); + if (value < 0) { + throw DbException.getInvalidValueException("LOCK_TIMEOUT", value); } + session.setLockTimeout(value); break; } case SetTypes.MAX_LENGTH_INPLACE_LOB: { - if (getIntValue() < 0) { - throw DbException.getInvalidValueException( - "MAX_LENGTH_INPLACE_LOB", getIntValue()); - } session.getUser().checkAdmin(); - database.setMaxLengthInplaceLob(getIntValue()); - addOrUpdateSetting(name, null, getIntValue()); + int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("MAX_LENGTH_INPLACE_LOB", value); + } + synchronized (database) { + database.setMaxLengthInplaceLob(value); + addOrUpdateSetting(name, null, value); + } break; } - case SetTypes.MAX_LOG_SIZE: - if (getIntValue() < 0) { - throw DbException.getInvalidValueException("MAX_LOG_SIZE", - getIntValue()); - } + case SetTypes.MAX_LOG_SIZE: { session.getUser().checkAdmin(); - database.setMaxLogSize((long) getIntValue() * 1024 * 1024); - addOrUpdateSetting(name, null, getIntValue()); + int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("MAX_LOG_SIZE", value); + } break; + } case SetTypes.MAX_MEMORY_ROWS: { - if (getIntValue() < 0) { - throw DbException.getInvalidValueException("MAX_MEMORY_ROWS", - getIntValue()); - } session.getUser().checkAdmin(); - database.setMaxMemoryRows(getIntValue()); - addOrUpdateSetting(name, null, getIntValue()); + int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("MAX_MEMORY_ROWS", value); + } + synchronized (database) { + database.setMaxMemoryRows(value); + addOrUpdateSetting(name, null, value); + } break; } case SetTypes.MAX_MEMORY_UNDO: { - if (getIntValue() < 0) { - throw DbException.getInvalidValueException("MAX_MEMORY_UNDO", - getIntValue()); - } session.getUser().checkAdmin(); - database.setMaxMemoryUndo(getIntValue()); - addOrUpdateSetting(name, null, getIntValue()); + int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("MAX_MEMORY_UNDO", value); + } + synchronized (database) { + addOrUpdateSetting(name, null, value); + } break; } case SetTypes.MAX_OPERATION_MEMORY: { - if (getIntValue() < 0) { - throw DbException.getInvalidValueException( - "MAX_OPERATION_MEMORY", getIntValue()); - } session.getUser().checkAdmin(); int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("MAX_OPERATION_MEMORY", value); + } database.setMaxOperationMemory(value); break; } - case SetTypes.MODE: + case SetTypes.MODE: { Mode mode = Mode.getInstance(stringValue); if (mode == null) { throw DbException.get(ErrorCode.UNKNOWN_MODE_1, stringValue); @@ -362,14 +346,6 @@ public int update() { if (database.getMode() != mode) { session.getUser().checkAdmin(); database.setMode(mode); - session.getColumnNamerConfiguration().configure(mode.getEnum()); - } - break; - case SetTypes.MULTI_THREADED: { - boolean v = getIntValue() == 1; - if (database.isMultiThreaded() != v) { - session.getUser().checkAdmin(); - database.setMultiThreaded(v); } break; } @@ -379,25 +355,22 @@ public int update() { break; } case SetTypes.QUERY_TIMEOUT: { - if (getIntValue() < 0) { - throw DbException.getInvalidValueException("QUERY_TIMEOUT", - getIntValue()); - } int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("QUERY_TIMEOUT", value); + } session.setQueryTimeout(value); break; } case SetTypes.REDO_LOG_BINARY: { - int value = getIntValue(); - session.setRedoLogBinary(value == 1); + DbException.getUnsupportedException("MV_STORE + SET REDO_LOG_BINARY"); break; } case SetTypes.REFERENTIAL_INTEGRITY: { session.getUser().checkAdmin(); int value = getIntValue(); if (value < 0 || value > 1) { - throw DbException.getInvalidValueException( - "REFERENTIAL_INTEGRITY", getIntValue()); + throw DbException.getInvalidValueException("REFERENTIAL_INTEGRITY", value); } database.setReferentialIntegrity(value == 1); break; @@ -406,8 +379,7 @@ public int update() { session.getUser().checkAdmin(); int value = getIntValue(); if (value < 0 || value > 1) { - throw DbException.getInvalidValueException("QUERY_STATISTICS", - getIntValue()); + throw DbException.getInvalidValueException("QUERY_STATISTICS", value); } database.setQueryStatistics(value == 1); break; @@ -416,14 +388,13 @@ public int update() { session.getUser().checkAdmin(); int value = getIntValue(); if (value < 1) { - throw DbException.getInvalidValueException("QUERY_STATISTICS_MAX_ENTRIES", - getIntValue()); + throw DbException.getInvalidValueException("QUERY_STATISTICS_MAX_ENTRIES", value); } database.setQueryStatisticsMaxEntries(value); break; } case SetTypes.SCHEMA: { - Schema schema = database.getSchema(stringValue); + Schema schema = database.getSchema(expression.optimize(session).getValue(session).getString()); session.setCurrentSchema(schema); break; } @@ -431,6 +402,15 @@ public int update() { session.setSchemaSearchPath(stringValueList); break; } + case SetTypes.CATALOG: { + String shortName = database.getShortName(); + String value = expression.optimize(session).getValue(session).getString(); + if (value == null || !database.equalsIdentifiers(shortName, value) + && !database.equalsIdentifiers(shortName, value.trim())) { + throw DbException.get(ErrorCode.DATABASE_NOT_FOUND_1, stringValue); + } + break; + } case SetTypes.TRACE_LEVEL_FILE: session.getUser().checkAdmin(); if (getPersistedObjectId() == 0) { @@ -450,31 +430,24 @@ public int update() { } break; case SetTypes.TRACE_MAX_FILE_SIZE: { - if (getIntValue() < 0) { - throw DbException.getInvalidValueException( - "TRACE_MAX_FILE_SIZE", getIntValue()); - } session.getUser().checkAdmin(); - int size = getIntValue() * 1024 * 1024; - database.getTraceSystem().setMaxFileSize(size); - addOrUpdateSetting(name, null, getIntValue()); - break; - } - case SetTypes.THROTTLE: { - if (getIntValue() < 0) { - throw DbException.getInvalidValueException("THROTTLE", - getIntValue()); + int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("TRACE_MAX_FILE_SIZE", value); + } + int size = value * (1024 * 1024); + synchronized (database) { + database.getTraceSystem().setMaxFileSize(size); + addOrUpdateSetting(name, null, value); } - session.setThrottle(getIntValue()); break; } - case SetTypes.UNDO_LOG: { + case SetTypes.THROTTLE: { int value = getIntValue(); - if (value < 0 || value > 1) { - throw DbException.getInvalidValueException("UNDO_LOG", - getIntValue()); + if (value < 0) { + throw DbException.getInvalidValueException("THROTTLE", value); } - session.setUndoLogEnabled(value == 1); + session.setThrottle(value); break; } case SetTypes.VARIABLE: { @@ -483,54 +456,27 @@ public int update() { break; } case SetTypes.WRITE_DELAY: { - if (getIntValue() < 0) { - throw DbException.getInvalidValueException("WRITE_DELAY", - getIntValue()); - } session.getUser().checkAdmin(); - database.setWriteDelay(getIntValue()); - addOrUpdateSetting(name, null, getIntValue()); - break; - } - case SetTypes.RETENTION_TIME: { - if (getIntValue() < 0) { - throw DbException.getInvalidValueException("RETENTION_TIME", - getIntValue()); + int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("WRITE_DELAY", value); } - session.getUser().checkAdmin(); - database.setRetentionTime(getIntValue()); - addOrUpdateSetting(name, null, getIntValue()); - break; - } - case SetTypes.ROW_FACTORY: { - session.getUser().checkAdmin(); - String rowFactoryName = expression.getColumnName(); - Class rowFactoryClass = JdbcUtils.loadUserClass(rowFactoryName); - RowFactory rowFactory; - try { - rowFactory = rowFactoryClass.getDeclaredConstructor().newInstance(); - } catch (Exception e) { - throw DbException.convert(e); + synchronized (database) { + database.setWriteDelay(value); + addOrUpdateSetting(name, null, value); } - database.setRowFactory(rowFactory); break; } - case SetTypes.BATCH_JOINS: { + case SetTypes.RETENTION_TIME: { + session.getUser().checkAdmin(); int value = getIntValue(); - if (value != 0 && value != 1) { - throw DbException.getInvalidValueException("BATCH_JOINS", - getIntValue()); + if (value < 0) { + throw DbException.getInvalidValueException("RETENTION_TIME", value); } - session.setJoinBatchEnabled(value == 1); - break; - } - case SetTypes.FORCE_JOIN_ORDER: { - int value = getIntValue(); - if (value != 0 && value != 1) { - throw DbException.getInvalidValueException("FORCE_JOIN_ORDER", - value); + synchronized (database) { + database.setRetentionTime(value); + addOrUpdateSetting(name, null, value); } - session.setForceJoinOrder(value == 1); break; } case SetTypes.LAZY_QUERY_EXECUTION: { @@ -552,20 +498,18 @@ public int update() { database.setAllowBuiltinAliasOverride(value == 1); break; } - case SetTypes.COLUMN_NAME_RULES: { - session.getUser().checkAdmin(); - session.getColumnNamerConfiguration().configure(expression.getColumnName()); - break; - } case SetTypes.AUTHENTICATOR: { session.getUser().checkAdmin(); + boolean value = expression.optimize(session).getBooleanValue(session); try { - if (expression.getBooleanValue(session)) { - database.setAuthenticator(AuthenticatorFactory.createAuthenticator()); - } else { - database.setAuthenticator(null); + synchronized (database) { + if (value) { + database.setAuthenticator(AuthenticatorFactory.createAuthenticator()); + } else { + database.setAuthenticator(null); + } + addOrUpdateSetting(name, value ? "TRUE" : "FALSE", 0); } - addOrUpdateSetting(name,expression.getValue(session).getString(),0); } catch (Exception e) { // Errors during start are ignored to allow to open the database if (database.isStarting()) { @@ -577,21 +521,43 @@ public int update() { } break; } - case SetTypes.LOCAL_RESULT_FACTORY: { + case SetTypes.IGNORE_CATALOGS: { session.getUser().checkAdmin(); - String localResultFactoryName = expression.getColumnName(); - Class localResultFactoryClass = JdbcUtils.loadUserClass(localResultFactoryName); - LocalResultFactory localResultFactory; + int value = getIntValue(); + synchronized (database) { + database.setIgnoreCatalogs(value == 1); + addOrUpdateSetting(name, null, value); + } + break; + } + case SetTypes.NON_KEYWORDS: + session.setNonKeywords(Parser.parseNonKeywords(stringValueList)); + break; + case SetTypes.TIME_ZONE: + session.setTimeZone(expression == null ? DateTimeUtils.getTimeZone() + : parseTimeZone(expression.getValue(session))); + break; + case SetTypes.VARIABLE_BINARY: + session.setVariableBinary(expression.getBooleanValue(session)); + break; + case SetTypes.DEFAULT_NULL_ORDERING: { + DefaultNullOrdering defaultNullOrdering; try { - localResultFactory = localResultFactoryClass.getDeclaredConstructor().newInstance(); - database.setResultFactory(localResultFactory); - } catch (Exception e) { - throw DbException.convert(e); + defaultNullOrdering = DefaultNullOrdering.valueOf(StringUtils.toUpperEnglish(stringValue)); + } catch (RuntimeException e) { + throw DbException.getInvalidValueException("DEFAULT_NULL_ORDERING", stringValue); + } + if (database.getDefaultNullOrdering() != defaultNullOrdering) { + session.getUser().checkAdmin(); + database.setDefaultNullOrdering(defaultNullOrdering); } break; } + case SetTypes.TRUNCATE_LARGE_LENGTH: + session.setTruncateLargeLength(expression.getBooleanValue(session)); + break; default: - DbException.throwInternalError("type="+type); + throw DbException.getInternalError("type="+type); } // the meta data information has changed database.getNextModificationDataId(); @@ -601,13 +567,28 @@ public int update() { return 0; } + private static TimeZoneProvider parseTimeZone(Value v) { + if (DataType.isCharacterStringType(v.getValueType())) { + TimeZoneProvider timeZone; + try { + timeZone = TimeZoneProvider.ofId(v.getString()); + } catch (IllegalArgumentException ex) { + throw DbException.getInvalidValueException("TIME ZONE", v.getTraceSQL()); + } + return timeZone; + } else if (v == ValueNull.INSTANCE) { + throw DbException.getInvalidValueException("TIME ZONE", v); + } + return TimeZoneProvider.ofOffset(TimeZoneOperation.parseInterval(v)); + } + private int getIntValue() { expression = expression.optimize(session); return expression.getValue(session).getInt(); } public void setInt(int value) { - this.expression = ValueExpression.get(ValueInt.get(value)); + this.expression = ValueExpression.get(ValueInteger.get(value)); } public void setExpression(Expression expression) { @@ -618,9 +599,9 @@ private void addOrUpdateSetting(String name, String s, int v) { addOrUpdateSetting(session, name, s, v); } - private void addOrUpdateSetting(Session session, String name, String s, - int v) { + private void addOrUpdateSetting(SessionLocal session, String name, String s, int v) { Database database = session.getDatabase(); + assert Thread.holdsLock(database); if (database.isReadOnly()) { return; } diff --git a/h2/src/main/org/h2/command/dml/SetClauseList.java b/h2/src/main/org/h2/command/dml/SetClauseList.java new file mode 100644 index 0000000000..a17d38b825 --- /dev/null +++ b/h2/src/main/org/h2/command/dml/SetClauseList.java @@ -0,0 +1,404 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.dml; + +import java.util.ArrayList; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionList; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Parameter; +import org.h2.expression.ValueExpression; +import org.h2.message.DbException; +import org.h2.result.LocalResult; +import org.h2.result.ResultTarget; +import org.h2.result.Row; +import org.h2.table.Column; +import org.h2.table.ColumnResolver; +import org.h2.table.DataChangeDeltaTable.ResultOption; +import org.h2.table.Table; +import org.h2.util.HasSQL; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * Set clause list. + */ +public final class SetClauseList implements HasSQL { + + private final Table table; + + private final UpdateAction[] actions; + + private boolean onUpdate; + + public SetClauseList(Table table) { + this.table = table; + actions = new UpdateAction[table.getColumns().length]; + } + + /** + * Add a single column. + * + * @param column the column + * @param expression the expression + */ + public void addSingle(Column column, Expression expression) { + int id = column.getColumnId(); + if (actions[id] != null) { + throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, column.getName()); + } + if (expression != ValueExpression.DEFAULT) { + actions[id] = new SetSimple(expression); + if (expression instanceof Parameter) { + ((Parameter) expression).setColumn(column); + } + } else { + actions[id] = SetClauseList.UpdateAction.SET_DEFAULT; + } + } + + /** + * Add multiple columns. + * + * @param columns the columns + * @param expression the expression (e.g. an expression list) + */ + public void addMultiple(ArrayList columns, Expression expression) { + int columnCount = columns.size(); + if (expression instanceof ExpressionList) { + ExpressionList expressions = (ExpressionList) expression; + if (!expressions.isArray()) { + if (columnCount != expressions.getSubexpressionCount()) { + throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); + } + for (int i = 0; i < columnCount; i++) { + addSingle(columns.get(i), expressions.getSubexpression(i)); + } + return; + } + } + if (columnCount == 1) { + // Row value special case + addSingle(columns.get(0), expression); + } else { + int[] cols = new int[columnCount]; + RowExpression row = new RowExpression(expression, cols); + int minId = table.getColumns().length - 1, maxId = 0; + for (int i = 0; i < columnCount; i++) { + int id = columns.get(i).getColumnId(); + if (id < minId) { + minId = id; + } + if (id > maxId) { + maxId = id; + } + } + for (int i = 0; i < columnCount; i++) { + Column column = columns.get(i); + int id = column.getColumnId(); + cols[i] = id; + if (actions[id] != null) { + throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, column.getName()); + } + actions[id] = new SetMultiple(row, i, id == minId, id == maxId); + } + } + } + + boolean prepareUpdate(Table table, SessionLocal session, ResultTarget deltaChangeCollector, + ResultOption deltaChangeCollectionMode, LocalResult rows, Row oldRow, + boolean updateToCurrentValuesReturnsZero) { + Column[] columns = table.getColumns(); + int columnCount = columns.length; + Row newRow = table.getTemplateRow(); + for (int i = 0; i < columnCount; i++) { + UpdateAction action = actions[i]; + Column column = columns[i]; + Value newValue; + if (action == null || action == UpdateAction.ON_UPDATE) { + newValue = column.isGenerated() ? null : oldRow.getValue(i); + } else if (action == UpdateAction.SET_DEFAULT) { + newValue = !column.isIdentity() ? null : oldRow.getValue(i); + } else { + newValue = action.update(session); + if (newValue == ValueNull.INSTANCE && column.isDefaultOnNull()) { + newValue = !column.isIdentity() ? null : oldRow.getValue(i); + } else if (column.isGeneratedAlways()) { + throw DbException.get(ErrorCode.GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1, + column.getSQLWithTable(new StringBuilder(), TRACE_SQL_FLAGS).toString()); + } + } + newRow.setValue(i, newValue); + } + newRow.setKey(oldRow.getKey()); + table.convertUpdateRow(session, newRow, false); + boolean result = true; + if (onUpdate) { + if (!oldRow.hasSameValues(newRow)) { + for (int i = 0; i < columnCount; i++) { + if (actions[i] == UpdateAction.ON_UPDATE) { + newRow.setValue(i, columns[i].getEffectiveOnUpdateExpression().getValue(session)); + } else if (columns[i].isGenerated()) { + newRow.setValue(i, null); + } + } + // Convert on update expressions and reevaluate + // generated columns + table.convertUpdateRow(session, newRow, false); + } else if (updateToCurrentValuesReturnsZero) { + result = false; + } + } else if (updateToCurrentValuesReturnsZero && oldRow.hasSameValues(newRow)) { + result = false; + } + if (deltaChangeCollectionMode == ResultOption.OLD) { + deltaChangeCollector.addRow(oldRow.getValueList()); + } else if (deltaChangeCollectionMode == ResultOption.NEW) { + deltaChangeCollector.addRow(newRow.getValueList().clone()); + } + if (!table.fireRow() || !table.fireBeforeRow(session, oldRow, newRow)) { + rows.addRowForTable(oldRow); + rows.addRowForTable(newRow); + } + if (deltaChangeCollectionMode == ResultOption.FINAL) { + deltaChangeCollector.addRow(newRow.getValueList()); + } + return result; + } + + /** + * Check if this expression and all sub-expressions can fulfill a criteria. + * If any part returns false, the result is false. + * + * @param visitor + * the visitor + * @return if the criteria can be fulfilled + */ + boolean isEverything(ExpressionVisitor visitor) { + for (UpdateAction action : actions) { + if (action != null) { + if (!action.isEverything(visitor)) { + return false; + } + } + } + return true; + } + + /** + * Map the columns and optimize expressions. + * + * @param session + * the session + * @param resolver1 + * the first column resolver + * @param resolver2 + * the second column resolver, or {@code null} + */ + void mapAndOptimize(SessionLocal session, ColumnResolver resolver1, ColumnResolver resolver2) { + Column[] columns = table.getColumns(); + boolean onUpdate = false; + for (int i = 0; i < actions.length; i++) { + UpdateAction action = actions[i]; + if (action != null) { + action.mapAndOptimize(session, resolver1, resolver2); + } else { + Column column = columns[i]; + if (column.getEffectiveOnUpdateExpression() != null) { + actions[i] = UpdateAction.ON_UPDATE; + onUpdate = true; + } + } + } + this.onUpdate = onUpdate; + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + Column[] columns = table.getColumns(); + builder.append("\nSET\n "); + boolean f = false; + for (int i = 0; i < actions.length; i++) { + UpdateAction action = actions[i]; + if (action != null && action != UpdateAction.ON_UPDATE) { + if (action.getClass() == SetMultiple.class) { + SetMultiple multiple = (SetMultiple) action; + if (multiple.first) { + if (f) { + builder.append(",\n "); + } + f = true; + RowExpression r = multiple.row; + builder.append('('); + int[] cols = r.columns; + for (int j = 0, l = cols.length; j < l; j++) { + if (j > 0) { + builder.append(", "); + } + columns[cols[j]].getSQL(builder, sqlFlags); + } + r.expression.getUnenclosedSQL(builder.append(") = "), sqlFlags); + } + } else { + if (f) { + builder.append(",\n "); + } + f = true; + Column column = columns[i]; + if (action != UpdateAction.SET_DEFAULT) { + action.getSQL(builder, sqlFlags, column); + } else { + column.getSQL(builder, sqlFlags).append(" = DEFAULT"); + } + } + } + } + return builder; + } + + private static class UpdateAction { + + static UpdateAction ON_UPDATE = new UpdateAction(); + + static UpdateAction SET_DEFAULT = new UpdateAction(); + + UpdateAction() { + } + + Value update(SessionLocal session) { + throw DbException.getInternalError(); + } + + boolean isEverything(ExpressionVisitor visitor) { + return true; + } + + void mapAndOptimize(SessionLocal session, ColumnResolver resolver1, ColumnResolver resolver2) { + // Do nothing + } + + void getSQL(StringBuilder builder, int sqlFlags, Column column) { + throw DbException.getInternalError(); + } + + } + + private static final class SetSimple extends UpdateAction { + + private Expression expression; + + SetSimple(Expression expression) { + this.expression = expression; + } + + @Override + Value update(SessionLocal session) { + return expression.getValue(session); + } + + @Override + boolean isEverything(ExpressionVisitor visitor) { + return expression.isEverything(visitor); + } + + @Override + void mapAndOptimize(SessionLocal session, ColumnResolver resolver1, ColumnResolver resolver2) { + expression.mapColumns(resolver1, 0, Expression.MAP_INITIAL); + if (resolver2 != null) { + expression.mapColumns(resolver2, 0, Expression.MAP_INITIAL); + } + expression = expression.optimize(session); + } + + @Override + void getSQL(StringBuilder builder, int sqlFlags, Column column) { + expression.getUnenclosedSQL(column.getSQL(builder, sqlFlags).append(" = "), sqlFlags); + } + + } + + private static final class RowExpression { + + Expression expression; + + final int[] columns; + + Value[] values; + + RowExpression(Expression expression, int[] columns) { + this.expression = expression; + this.columns = columns; + } + + boolean isEverything(ExpressionVisitor visitor) { + return expression.isEverything(visitor); + } + + void mapAndOptimize(SessionLocal session, ColumnResolver resolver1, ColumnResolver resolver2) { + expression.mapColumns(resolver1, 0, Expression.MAP_INITIAL); + if (resolver2 != null) { + expression.mapColumns(resolver2, 0, Expression.MAP_INITIAL); + } + expression = expression.optimize(session); + } + } + + private static final class SetMultiple extends UpdateAction { + + final RowExpression row; + + private final int position; + + boolean first; + + private boolean last; + + SetMultiple(RowExpression row, int position, boolean first, boolean last) { + this.row = row; + this.position = position; + this.first = first; + this.last = last; + } + + @Override + Value update(SessionLocal session) { + Value[] v; + if (first) { + Value value = row.expression.getValue(session); + if (value == ValueNull.INSTANCE) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, "NULL to assigned row value"); + } + row.values = v = value.convertToAnyRow().getList(); + if (v.length != row.columns.length) { + throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); + } + } else { + v = row.values; + if (last) { + row.values = null; + } + } + return v[position]; + } + + @Override + boolean isEverything(ExpressionVisitor visitor) { + return !first || row.isEverything(visitor); + } + + @Override + void mapAndOptimize(SessionLocal session, ColumnResolver resolver1, ColumnResolver resolver2) { + if (first) { + row.mapAndOptimize(session, resolver1, resolver2); + } + } + + } + +} diff --git a/h2/src/main/org/h2/command/dml/SetSessionCharacteristics.java b/h2/src/main/org/h2/command/dml/SetSessionCharacteristics.java new file mode 100644 index 0000000000..cb5efc62f7 --- /dev/null +++ b/h2/src/main/org/h2/command/dml/SetSessionCharacteristics.java @@ -0,0 +1,52 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.dml; + +import org.h2.command.CommandInterface; +import org.h2.command.Prepared; +import org.h2.engine.IsolationLevel; +import org.h2.engine.SessionLocal; +import org.h2.result.ResultInterface; + +/** + * This class represents the statement SET SESSION CHARACTERISTICS + */ +public class SetSessionCharacteristics extends Prepared { + + private final IsolationLevel isolationLevel; + + public SetSessionCharacteristics(SessionLocal session, IsolationLevel isolationLevel) { + super(session); + this.isolationLevel = isolationLevel; + } + + @Override + public boolean isTransactional() { + return false; + } + + @Override + public long update() { + session.setIsolationLevel(isolationLevel); + return 0; + } + + @Override + public boolean needRecompile() { + return false; + } + + @Override + public ResultInterface queryMeta() { + return null; + } + + @Override + public int getType() { + return CommandInterface.SET; + } + +} diff --git a/h2/src/main/org/h2/command/dml/SetTypes.java b/h2/src/main/org/h2/command/dml/SetTypes.java index 53549080a9..464ffc8674 100644 --- a/h2/src/main/org/h2/command/dml/SetTypes.java +++ b/h2/src/main/org/h2/command/dml/SetTypes.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; @@ -15,254 +15,234 @@ public class SetTypes { /** * The type of a SET IGNORECASE statement. */ - public static final int IGNORECASE = 1; + public static final int IGNORECASE = 0; /** * The type of a SET MAX_LOG_SIZE statement. */ - public static final int MAX_LOG_SIZE = 2; + public static final int MAX_LOG_SIZE = IGNORECASE + 1; /** * The type of a SET MODE statement. */ - public static final int MODE = 3; + public static final int MODE = MAX_LOG_SIZE + 1; /** * The type of a SET READONLY statement. */ - public static final int READONLY = 4; + public static final int READONLY = MODE + 1; /** * The type of a SET LOCK_TIMEOUT statement. */ - public static final int LOCK_TIMEOUT = 5; + public static final int LOCK_TIMEOUT = READONLY + 1; /** * The type of a SET DEFAULT_LOCK_TIMEOUT statement. */ - public static final int DEFAULT_LOCK_TIMEOUT = 6; + public static final int DEFAULT_LOCK_TIMEOUT = LOCK_TIMEOUT + 1; /** * The type of a SET DEFAULT_TABLE_TYPE statement. */ - public static final int DEFAULT_TABLE_TYPE = 7; + public static final int DEFAULT_TABLE_TYPE = DEFAULT_LOCK_TIMEOUT + 1; /** * The type of a SET CACHE_SIZE statement. */ - public static final int CACHE_SIZE = 8; + public static final int CACHE_SIZE = DEFAULT_TABLE_TYPE + 1; /** * The type of a SET TRACE_LEVEL_SYSTEM_OUT statement. */ - public static final int TRACE_LEVEL_SYSTEM_OUT = 9; + public static final int TRACE_LEVEL_SYSTEM_OUT = CACHE_SIZE + 1; /** * The type of a SET TRACE_LEVEL_FILE statement. */ - public static final int TRACE_LEVEL_FILE = 10; + public static final int TRACE_LEVEL_FILE = TRACE_LEVEL_SYSTEM_OUT + 1; /** * The type of a SET TRACE_MAX_FILE_SIZE statement. */ - public static final int TRACE_MAX_FILE_SIZE = 11; + public static final int TRACE_MAX_FILE_SIZE = TRACE_LEVEL_FILE + 1; /** * The type of a SET COLLATION statement. */ - public static final int COLLATION = 12; + public static final int COLLATION = TRACE_MAX_FILE_SIZE + 1; /** * The type of a SET CLUSTER statement. */ - public static final int CLUSTER = 13; + public static final int CLUSTER = COLLATION + 1; /** * The type of a SET WRITE_DELAY statement. */ - public static final int WRITE_DELAY = 14; + public static final int WRITE_DELAY = CLUSTER + 1; /** * The type of a SET DATABASE_EVENT_LISTENER statement. */ - public static final int DATABASE_EVENT_LISTENER = 15; + public static final int DATABASE_EVENT_LISTENER = WRITE_DELAY + 1; /** * The type of a SET MAX_MEMORY_ROWS statement. */ - public static final int MAX_MEMORY_ROWS = 16; + public static final int MAX_MEMORY_ROWS = DATABASE_EVENT_LISTENER + 1; /** * The type of a SET LOCK_MODE statement. */ - public static final int LOCK_MODE = 17; + public static final int LOCK_MODE = MAX_MEMORY_ROWS + 1; /** * The type of a SET DB_CLOSE_DELAY statement. */ - public static final int DB_CLOSE_DELAY = 18; - - /** - * The type of a SET LOG statement. - */ - public static final int LOG = 19; + public static final int DB_CLOSE_DELAY = LOCK_MODE + 1; /** * The type of a SET THROTTLE statement. */ - public static final int THROTTLE = 20; + public static final int THROTTLE = DB_CLOSE_DELAY + 1; /** * The type of a SET MAX_MEMORY_UNDO statement. */ - public static final int MAX_MEMORY_UNDO = 21; + public static final int MAX_MEMORY_UNDO = THROTTLE + 1; /** * The type of a SET MAX_LENGTH_INPLACE_LOB statement. */ - public static final int MAX_LENGTH_INPLACE_LOB = 22; - - /** - * The type of a SET COMPRESS_LOB statement. - */ - public static final int COMPRESS_LOB = 23; + public static final int MAX_LENGTH_INPLACE_LOB = MAX_MEMORY_UNDO + 1; /** * The type of a SET ALLOW_LITERALS statement. */ - public static final int ALLOW_LITERALS = 24; - - /** - * The type of a SET MULTI_THREADED statement. - */ - public static final int MULTI_THREADED = 25; + public static final int ALLOW_LITERALS = MAX_LENGTH_INPLACE_LOB + 1; /** * The type of a SET SCHEMA statement. */ - public static final int SCHEMA = 26; + public static final int SCHEMA = ALLOW_LITERALS + 1; /** * The type of a SET OPTIMIZE_REUSE_RESULTS statement. */ - public static final int OPTIMIZE_REUSE_RESULTS = 27; + public static final int OPTIMIZE_REUSE_RESULTS = SCHEMA + 1; /** * The type of a SET SCHEMA_SEARCH_PATH statement. */ - public static final int SCHEMA_SEARCH_PATH = 28; - - /** - * The type of a SET UNDO_LOG statement. - */ - public static final int UNDO_LOG = 29; + public static final int SCHEMA_SEARCH_PATH = OPTIMIZE_REUSE_RESULTS + 1; /** * The type of a SET REFERENTIAL_INTEGRITY statement. */ - public static final int REFERENTIAL_INTEGRITY = 30; + public static final int REFERENTIAL_INTEGRITY = SCHEMA_SEARCH_PATH + 1; /** * The type of a SET MAX_OPERATION_MEMORY statement. */ - public static final int MAX_OPERATION_MEMORY = 31; + public static final int MAX_OPERATION_MEMORY = REFERENTIAL_INTEGRITY + 1; /** * The type of a SET EXCLUSIVE statement. */ - public static final int EXCLUSIVE = 32; + public static final int EXCLUSIVE = MAX_OPERATION_MEMORY + 1; /** * The type of a SET CREATE_BUILD statement. */ - public static final int CREATE_BUILD = 33; + public static final int CREATE_BUILD = EXCLUSIVE + 1; /** * The type of a SET \@VARIABLE statement. */ - public static final int VARIABLE = 34; + public static final int VARIABLE = CREATE_BUILD + 1; /** * The type of a SET QUERY_TIMEOUT statement. */ - public static final int QUERY_TIMEOUT = 35; + public static final int QUERY_TIMEOUT = VARIABLE + 1; /** * The type of a SET REDO_LOG_BINARY statement. */ - public static final int REDO_LOG_BINARY = 36; - - /** - * The type of a SET BINARY_COLLATION statement. - */ - public static final int BINARY_COLLATION = 37; + public static final int REDO_LOG_BINARY = QUERY_TIMEOUT + 1; /** * The type of a SET JAVA_OBJECT_SERIALIZER statement. */ - public static final int JAVA_OBJECT_SERIALIZER = 38; + public static final int JAVA_OBJECT_SERIALIZER = REDO_LOG_BINARY + 1; /** * The type of a SET RETENTION_TIME statement. */ - public static final int RETENTION_TIME = 39; + public static final int RETENTION_TIME = JAVA_OBJECT_SERIALIZER + 1; /** * The type of a SET QUERY_STATISTICS statement. */ - public static final int QUERY_STATISTICS = 40; + public static final int QUERY_STATISTICS = RETENTION_TIME + 1; /** * The type of a SET QUERY_STATISTICS_MAX_ENTRIES statement. */ - public static final int QUERY_STATISTICS_MAX_ENTRIES = 41; + public static final int QUERY_STATISTICS_MAX_ENTRIES = QUERY_STATISTICS + 1; + + /** + * The type of SET LAZY_QUERY_EXECUTION statement. + */ + public static final int LAZY_QUERY_EXECUTION = QUERY_STATISTICS_MAX_ENTRIES + 1; /** - * The type of a SET ROW_FACTORY statement. + * The type of SET BUILTIN_ALIAS_OVERRIDE statement. */ - public static final int ROW_FACTORY = 42; + public static final int BUILTIN_ALIAS_OVERRIDE = LAZY_QUERY_EXECUTION + 1; /** - * The type of SET BATCH_JOINS statement. + * The type of a SET AUTHENTICATOR statement. */ - public static final int BATCH_JOINS = 43; + public static final int AUTHENTICATOR = BUILTIN_ALIAS_OVERRIDE + 1; /** - * The type of SET FORCE_JOIN_ORDER statement. + * The type of a SET IGNORE_CATALOGS statement. */ - public static final int FORCE_JOIN_ORDER = 44; + public static final int IGNORE_CATALOGS = AUTHENTICATOR + 1; /** - * The type of SET LAZY_QUERY_EXECUTION statement. + * The type of a SET CATALOG statement. */ - public static final int LAZY_QUERY_EXECUTION = 45; + public static final int CATALOG = IGNORE_CATALOGS + 1; /** - * The type of SET BUILTIN_ALIAS_OVERRIDE statement. + * The type of a SET NON_KEYWORDS statement. */ - public static final int BUILTIN_ALIAS_OVERRIDE = 46; + public static final int NON_KEYWORDS = CATALOG + 1; /** - * The type of a SET COLUMN_NAME_RULES statement. + * The type of a SET TIME ZONE statement. */ - public static final int COLUMN_NAME_RULES = 47; + public static final int TIME_ZONE = NON_KEYWORDS + 1; /** - * The type of a SET AUTHENTICATOR statement. + * The type of a SET VARIABLE_BINARY statement. */ - public static final int AUTHENTICATOR = 48; + public static final int VARIABLE_BINARY = TIME_ZONE + 1; /** - * The type of a SET LOCAL_RESULT_FACTORY statement. + * The type of a SET DEFAULT_NULL_ORDERING statement. */ - public static final int LOCAL_RESULT_FACTORY = 49; + public static final int DEFAULT_NULL_ORDERING = VARIABLE_BINARY + 1; /** - * The type of a SET UUID_COLLATION statement. + * The type of a SET TRUNCATE_LARGE_LENGTH statement. */ - public static final int UUID_COLLATION = 50; + public static final int TRUNCATE_LARGE_LENGTH = DEFAULT_NULL_ORDERING + 1; - private static final int COUNT = UUID_COLLATION + 1; + private static final int COUNT = TRUNCATE_LARGE_LENGTH + 1; private static final ArrayList TYPES; @@ -272,58 +252,54 @@ private SetTypes() { static { ArrayList list = new ArrayList<>(COUNT); - list.add(null); - list.add(IGNORECASE, "IGNORECASE"); - list.add(MAX_LOG_SIZE, "MAX_LOG_SIZE"); - list.add(MODE, "MODE"); - list.add(READONLY, "READONLY"); - list.add(LOCK_TIMEOUT, "LOCK_TIMEOUT"); - list.add(DEFAULT_LOCK_TIMEOUT, "DEFAULT_LOCK_TIMEOUT"); - list.add(DEFAULT_TABLE_TYPE, "DEFAULT_TABLE_TYPE"); - list.add(CACHE_SIZE, "CACHE_SIZE"); - list.add(TRACE_LEVEL_SYSTEM_OUT, "TRACE_LEVEL_SYSTEM_OUT"); - list.add(TRACE_LEVEL_FILE, "TRACE_LEVEL_FILE"); - list.add(TRACE_MAX_FILE_SIZE, "TRACE_MAX_FILE_SIZE"); - list.add(COLLATION, "COLLATION"); - list.add(CLUSTER, "CLUSTER"); - list.add(WRITE_DELAY, "WRITE_DELAY"); - list.add(DATABASE_EVENT_LISTENER, "DATABASE_EVENT_LISTENER"); - list.add(MAX_MEMORY_ROWS, "MAX_MEMORY_ROWS"); - list.add(LOCK_MODE, "LOCK_MODE"); - list.add(DB_CLOSE_DELAY, "DB_CLOSE_DELAY"); - list.add(LOG, "LOG"); - list.add(THROTTLE, "THROTTLE"); - list.add(MAX_MEMORY_UNDO, "MAX_MEMORY_UNDO"); - list.add(MAX_LENGTH_INPLACE_LOB, "MAX_LENGTH_INPLACE_LOB"); - list.add(COMPRESS_LOB, "COMPRESS_LOB"); - list.add(ALLOW_LITERALS, "ALLOW_LITERALS"); - list.add(MULTI_THREADED, "MULTI_THREADED"); - list.add(SCHEMA, "SCHEMA"); - list.add(OPTIMIZE_REUSE_RESULTS, "OPTIMIZE_REUSE_RESULTS"); - list.add(SCHEMA_SEARCH_PATH, "SCHEMA_SEARCH_PATH"); - list.add(UNDO_LOG, "UNDO_LOG"); - list.add(REFERENTIAL_INTEGRITY, "REFERENTIAL_INTEGRITY"); - list.add(MAX_OPERATION_MEMORY, "MAX_OPERATION_MEMORY"); - list.add(EXCLUSIVE, "EXCLUSIVE"); - list.add(CREATE_BUILD, "CREATE_BUILD"); - list.add(VARIABLE, "@"); - list.add(QUERY_TIMEOUT, "QUERY_TIMEOUT"); - list.add(REDO_LOG_BINARY, "REDO_LOG_BINARY"); - list.add(BINARY_COLLATION, "BINARY_COLLATION"); - list.add(JAVA_OBJECT_SERIALIZER, "JAVA_OBJECT_SERIALIZER"); - list.add(RETENTION_TIME, "RETENTION_TIME"); - list.add(QUERY_STATISTICS, "QUERY_STATISTICS"); - list.add(QUERY_STATISTICS_MAX_ENTRIES, "QUERY_STATISTICS_MAX_ENTRIES"); - list.add(ROW_FACTORY, "ROW_FACTORY"); - list.add(BATCH_JOINS, "BATCH_JOINS"); - list.add(FORCE_JOIN_ORDER, "FORCE_JOIN_ORDER"); - list.add(LAZY_QUERY_EXECUTION, "LAZY_QUERY_EXECUTION"); - list.add(BUILTIN_ALIAS_OVERRIDE, "BUILTIN_ALIAS_OVERRIDE"); - list.add(COLUMN_NAME_RULES, "COLUMN_NAME_RULES"); - list.add(AUTHENTICATOR, "AUTHENTICATOR"); - list.add(LOCAL_RESULT_FACTORY, "LOCAL_RESULT_FACTORY"); - list.add(UUID_COLLATION, "UUID_COLLATION"); + list.add("IGNORECASE"); + list.add("MAX_LOG_SIZE"); + list.add("MODE"); + list.add("READONLY"); + list.add("LOCK_TIMEOUT"); + list.add("DEFAULT_LOCK_TIMEOUT"); + list.add("DEFAULT_TABLE_TYPE"); + list.add("CACHE_SIZE"); + list.add("TRACE_LEVEL_SYSTEM_OUT"); + list.add("TRACE_LEVEL_FILE"); + list.add("TRACE_MAX_FILE_SIZE"); + list.add("COLLATION"); + list.add("CLUSTER"); + list.add("WRITE_DELAY"); + list.add("DATABASE_EVENT_LISTENER"); + list.add("MAX_MEMORY_ROWS"); + list.add("LOCK_MODE"); + list.add("DB_CLOSE_DELAY"); + list.add("THROTTLE"); + list.add("MAX_MEMORY_UNDO"); + list.add("MAX_LENGTH_INPLACE_LOB"); + list.add("ALLOW_LITERALS"); + list.add("SCHEMA"); + list.add("OPTIMIZE_REUSE_RESULTS"); + list.add("SCHEMA_SEARCH_PATH"); + list.add("REFERENTIAL_INTEGRITY"); + list.add("MAX_OPERATION_MEMORY"); + list.add("EXCLUSIVE"); + list.add("CREATE_BUILD"); + list.add("@"); + list.add("QUERY_TIMEOUT"); + list.add("REDO_LOG_BINARY"); + list.add("JAVA_OBJECT_SERIALIZER"); + list.add("RETENTION_TIME"); + list.add("QUERY_STATISTICS"); + list.add("QUERY_STATISTICS_MAX_ENTRIES"); + list.add("LAZY_QUERY_EXECUTION"); + list.add("BUILTIN_ALIAS_OVERRIDE"); + list.add("AUTHENTICATOR"); + list.add("IGNORE_CATALOGS"); + list.add("CATALOG"); + list.add("NON_KEYWORDS"); + list.add("TIME ZONE"); + list.add("VARIABLE_BINARY"); + list.add("DEFAULT_NULL_ORDERING"); + list.add("TRUNCATE_LARGE_LENGTH"); TYPES = list; + assert(list.size() == COUNT); } /** diff --git a/h2/src/main/org/h2/command/dml/TransactionCommand.java b/h2/src/main/org/h2/command/dml/TransactionCommand.java index 9e95f501d1..c8fa171126 100644 --- a/h2/src/main/org/h2/command/dml/TransactionCommand.java +++ b/h2/src/main/org/h2/command/dml/TransactionCommand.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; @@ -8,7 +8,7 @@ import org.h2.command.CommandInterface; import org.h2.command.Prepared; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.result.ResultInterface; @@ -21,7 +21,7 @@ public class TransactionCommand extends Prepared { private String savepointName; private String transactionName; - public TransactionCommand(Session session, int type) { + public TransactionCommand(SessionLocal session, int type) { super(session); this.type = type; } @@ -31,7 +31,7 @@ public void setSavepointName(String name) { } @Override - public int update() { + public long update() { switch (type) { case CommandInterface.SET_AUTOCOMMIT_TRUE: session.setAutoCommit(true); @@ -73,46 +73,27 @@ public int update() { session.getUser().checkAdmin(); session.setPreparedTransaction(transactionName, false); break; - case CommandInterface.SHUTDOWN_IMMEDIATELY: - session.getUser().checkAdmin(); - session.getDatabase().shutdownImmediately(); - break; case CommandInterface.SHUTDOWN: case CommandInterface.SHUTDOWN_COMPACT: - case CommandInterface.SHUTDOWN_DEFRAG: { - session.getUser().checkAdmin(); + case CommandInterface.SHUTDOWN_DEFRAG: session.commit(false); - if (type == CommandInterface.SHUTDOWN_COMPACT || - type == CommandInterface.SHUTDOWN_DEFRAG) { - session.getDatabase().setCompactMode(type); - } - // close the database, but don't update the persistent setting - session.getDatabase().setCloseDelay(0); - Database db = session.getDatabase(); + //$FALL-THROUGH$ + case CommandInterface.SHUTDOWN_IMMEDIATELY: { + session.getUser().checkAdmin(); // throttle, to allow testing concurrent // execution of shutdown and query session.throttle(); - for (Session s : db.getSessions(false)) { - if (db.isMultiThreaded()) { - synchronized (s) { - s.rollback(); - } - } else { - // if not multi-threaded, the session could already own - // the lock, which would result in a deadlock - // the other session can not concurrently do anything - // because the current session has locked the database - s.rollback(); - } - if (s != session) { - s.close(); - } + Database db = session.getDatabase(); + if (db.setExclusiveSession(session, true)) { + db.setCompactMode(type); + // close the database, but don't update the persistent setting + db.setCloseDelay(0); + session.close(); } - session.close(); break; } default: - DbException.throwInternalError("type=" + type); + throw DbException.getInternalError("type=" + type); } return 0; } diff --git a/h2/src/main/org/h2/command/dml/Update.java b/h2/src/main/org/h2/command/dml/Update.java index c8fcb2bfc7..26781c9594 100644 --- a/h2/src/main/org/h2/command/dml/Update.java +++ b/h2/src/main/org/h2/command/dml/Update.java @@ -1,33 +1,29 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; -import java.util.ArrayList; -import java.util.HashMap; import java.util.HashSet; -import java.util.Objects; -import org.h2.api.ErrorCode; import org.h2.api.Trigger; import org.h2.command.CommandInterface; import org.h2.command.Prepared; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.DbObject; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; -import org.h2.expression.Parameter; -import org.h2.expression.ValueExpression; +import org.h2.expression.ExpressionVisitor; import org.h2.message.DbException; -import org.h2.result.ResultInterface; +import org.h2.result.LocalResult; +import org.h2.result.ResultTarget; import org.h2.result.Row; -import org.h2.result.RowList; -import org.h2.table.Column; +import org.h2.table.DataChangeDeltaTable.ResultOption; import org.h2.table.PlanItem; import org.h2.table.Table; import org.h2.table.TableFilter; -import org.h2.util.Utils; import org.h2.value.Value; import org.h2.value.ValueNull; @@ -35,274 +31,157 @@ * This class represents the statement * UPDATE */ -public class Update extends Prepared { +public final class Update extends FilteredDataChangeStatement { - private Expression condition; - private TableFilter targetTableFilter;// target of update - /** - * This table filter is for MERGE..USING support - not used in stand-alone DML - */ - private TableFilter sourceTableFilter; + private SetClauseList setClauseList; - /** The limit expression as specified in the LIMIT clause. */ - private Expression limitExpr; + private Insert onDuplicateKeyInsert; - private boolean updateToCurrentValuesReturnsZero; + private TableFilter fromTableFilter; - private final ArrayList columns = Utils.newSmallArrayList(); - private final HashMap expressionMap = new HashMap<>(); - - private HashSet updatedKeysCollector; - - public Update(Session session) { + public Update(SessionLocal session) { super(session); } - public void setTableFilter(TableFilter tableFilter) { - this.targetTableFilter = tableFilter; - } - - public void setCondition(Expression condition) { - this.condition = condition; - } - - public Expression getCondition( ) { - return this.condition; + public void setSetClauseList(SetClauseList setClauseList) { + this.setClauseList = setClauseList; } - /** - * Add an assignment of the form column = expression. - * - * @param column the column - * @param expression the expression - */ - public void setAssignment(Column column, Expression expression) { - if (expressionMap.containsKey(column)) { - throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, column - .getName()); - } - columns.add(column); - expressionMap.put(column, expression); - if (expression instanceof Parameter) { - Parameter p = (Parameter) expression; - p.setColumn(column); - } - } - - /** - * Sets the collector of updated keys. - * - * @param updatedKeysCollector the collector of updated keys - */ - public void setUpdatedKeysCollector(HashSet updatedKeysCollector) { - this.updatedKeysCollector = updatedKeysCollector; + public void setFromTableFilter(TableFilter tableFilter) { + this.fromTableFilter = tableFilter; } @Override - public int update() { + public long update(ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode) { targetTableFilter.startQuery(session); targetTableFilter.reset(); - try (RowList rows = new RowList(session)) { - Table table = targetTableFilter.getTable(); - session.getUser().checkRight(table, Right.UPDATE); + Table table = targetTableFilter.getTable(); + try (LocalResult rows = LocalResult.forTable(session, table)) { + session.getUser().checkTableRight(table, Right.UPDATE); table.fire(session, Trigger.UPDATE, true); - table.lock(session, true, false); - int columnCount = table.getColumns().length; + table.lock(session, Table.WRITE_LOCK); // get the old rows, compute the new rows setCurrentRowNumber(0); - int count = 0; - Column[] columns = table.getColumns(); - int limitRows = -1; - if (limitExpr != null) { - Value v = limitExpr.getValue(session); - if (v != ValueNull.INSTANCE) { - limitRows = v.getInt(); + long count = 0; + long limitRows = -1; + if (fetchExpr != null) { + Value v = fetchExpr.getValue(session); + if (v == ValueNull.INSTANCE || (limitRows = v.getLong()) < 0) { + throw DbException.getInvalidValueException("FETCH", v); } } - while (targetTableFilter.next()) { - setCurrentRowNumber(count+1); - if (limitRows >= 0 && count >= limitRows) { - break; - } - if (condition == null || condition.getBooleanValue(session)) { - Row oldRow = targetTableFilter.get(); - if (table.isMVStore()) { - Row lockedRow = table.lockRow(session, oldRow); - if (lockedRow == null) { - continue; - } - if (!oldRow.hasSharedData(lockedRow)) { - oldRow = lockedRow; - targetTableFilter.set(oldRow); - if (condition != null && !condition.getBooleanValue(session)) { - continue; - } - } - } - Row newRow = table.getTemplateRow(); - boolean setOnUpdate = false; - for (int i = 0; i < columnCount; i++) { - Expression newExpr = expressionMap.get(columns[i]); - Column column = table.getColumn(i); - Value newValue; - if (newExpr == null) { - if (column.getOnUpdateExpression() != null) { - setOnUpdate = true; - } - newValue = oldRow.getValue(i); - } else if (newExpr == ValueExpression.getDefault()) { - newValue = table.getDefaultValue(session, column); - } else { - newValue = column.convert(newExpr.getValue(session), session.getDatabase().getMode()); - } - newRow.setValue(i, newValue); + while (nextRow(limitRows, count)) { + Row oldRow = targetTableFilter.get(); + if (table.isRowLockable()) { + Row lockedRow = table.lockRow(session, oldRow); + if (lockedRow == null) { + continue; } - long key = oldRow.getKey(); - newRow.setKey(key); - if (setOnUpdate || updateToCurrentValuesReturnsZero) { - setOnUpdate = false; - for (int i = 0; i < columnCount; i++) { - // Use equals here to detect changes from numeric 0 to 0.0 and similar - if (!Objects.equals(oldRow.getValue(i), newRow.getValue(i))) { - setOnUpdate = true; - break; - } - } - if (setOnUpdate) { - for (int i = 0; i < columnCount; i++) { - if (expressionMap.get(columns[i]) == null) { - Column column = table.getColumn(i); - if (column.getOnUpdateExpression() != null) { - newRow.setValue(i, table.getOnUpdateValue(session, column)); - } - } - } - } else if (updateToCurrentValuesReturnsZero) { - count--; - } - } - table.validateConvertUpdateSequence(session, newRow); - if (!table.fireRow() || !table.fireBeforeRow(session, oldRow, newRow)) { - rows.add(oldRow); - rows.add(newRow); - if (updatedKeysCollector != null) { - updatedKeysCollector.add(key); + if (!oldRow.hasSharedData(lockedRow)) { + oldRow = lockedRow; + targetTableFilter.set(oldRow); + if (condition != null && !condition.getBooleanValue(session)) { + continue; } } - count++; } - } - // TODO self referencing referential integrity constraints - // don't work if update is multi-row and 'inversed' the condition! - // probably need multi-row triggers with 'deleted' and 'inserted' - // at the same time. anyway good for sql compatibility - // TODO update in-place (but if the key changes, - // we need to update all indexes) before row triggers - - // the cached row is already updated - we need the old values - table.updateRows(this, session, rows); - if (table.fireRow()) { - for (rows.reset(); rows.hasNext();) { - Row o = rows.next(); - Row n = rows.next(); - table.fireAfterRow(session, o, n, false); + if (setClauseList.prepareUpdate(table, session, deltaChangeCollector, deltaChangeCollectionMode, + rows, oldRow, onDuplicateKeyInsert != null)) { + count++; } } + doUpdate(this, session, table, rows); table.fire(session, Trigger.UPDATE, false); return count; } } - @Override - public String getPlanSQL(boolean alwaysQuote) { - StringBuilder builder = new StringBuilder("UPDATE "); - targetTableFilter.getPlanSQL(builder, false, alwaysQuote).append("\nSET\n "); - for (int i = 0, size = columns.size(); i < size; i++) { - if (i > 0) { - builder.append(",\n "); + static void doUpdate(Prepared prepared, SessionLocal session, Table table, LocalResult rows) { + rows.done(); + // TODO self referencing referential integrity constraints + // don't work if update is multi-row and 'inversed' the condition! + // probably need multi-row triggers with 'deleted' and 'inserted' + // at the same time. anyway good for sql compatibility + // TODO update in-place (but if the key changes, + // we need to update all indexes) before row triggers + + // the cached row is already updated - we need the old values + table.updateRows(prepared, session, rows); + if (table.fireRow()) { + for (rows.reset(); rows.next();) { + Row o = rows.currentRowForTable(); + rows.next(); + Row n = rows.currentRowForTable(); + table.fireAfterRow(session, o, n, false); } - Column c = columns.get(i); - c.getSQL(builder, alwaysQuote).append(" = "); - expressionMap.get(c).getSQL(builder, alwaysQuote); } - if (condition != null) { - builder.append("\nWHERE "); - condition.getUnenclosedSQL(builder, alwaysQuote); - } - if (limitExpr != null) { - builder.append("\nLIMIT "); - limitExpr.getUnenclosedSQL(builder, alwaysQuote); + } + + @Override + public String getPlanSQL(int sqlFlags) { + StringBuilder builder = new StringBuilder("UPDATE "); + targetTableFilter.getPlanSQL(builder, false, sqlFlags); + if (fromTableFilter != null) { + builder.append("\nFROM "); + fromTableFilter.getPlanSQL(builder, false, sqlFlags); } + setClauseList.getSQL(builder, sqlFlags); + appendFilterCondition(builder, sqlFlags); return builder.toString(); } @Override public void prepare() { + if (fromTableFilter != null) { + targetTableFilter.addJoin(fromTableFilter, false, null); + } if (condition != null) { condition.mapColumns(targetTableFilter, 0, Expression.MAP_INITIAL); - condition = condition.optimize(session); - condition.createIndexConditions(session, targetTableFilter); - } - for (Column c : columns) { - Expression e = expressionMap.get(c); - e.mapColumns(targetTableFilter, 0, Expression.MAP_INITIAL); - if (sourceTableFilter!=null){ - e.mapColumns(sourceTableFilter, 0, Expression.MAP_INITIAL); + if (fromTableFilter != null) { + condition.mapColumns(fromTableFilter, 0, Expression.MAP_INITIAL); + } + condition = condition.optimizeCondition(session); + if (condition != null) { + condition.createIndexConditions(session, targetTableFilter); } - expressionMap.put(c, e.optimize(session)); } - TableFilter[] filters; - if(sourceTableFilter==null){ + setClauseList.mapAndOptimize(session, targetTableFilter, fromTableFilter); + TableFilter[] filters = null; + if (fromTableFilter == null) { filters = new TableFilter[] { targetTableFilter }; + } else { + filters = new TableFilter[] { targetTableFilter, fromTableFilter }; } - else{ - filters = new TableFilter[] { targetTableFilter, sourceTableFilter }; - } - PlanItem item = targetTableFilter.getBestPlanItem(session, filters, 0, - new AllColumnsForPlan(filters)); + PlanItem item = targetTableFilter.getBestPlanItem(session, filters, 0, new AllColumnsForPlan(filters)); targetTableFilter.setPlanItem(item); targetTableFilter.prepare(); } - @Override - public boolean isTransactional() { - return true; - } - - @Override - public ResultInterface queryMeta() { - return null; - } - @Override public int getType() { return CommandInterface.UPDATE; } - public void setLimit(Expression limit) { - this.limitExpr = limit; + @Override + public String getStatementName() { + return "UPDATE"; } @Override - public boolean isCacheable() { - return true; + public void collectDependencies(HashSet dependencies) { + ExpressionVisitor visitor = ExpressionVisitor.getDependenciesVisitor(dependencies); + if (condition != null) { + condition.isEverything(visitor); + } + setClauseList.isEverything(visitor); } - public TableFilter getSourceTableFilter() { - return sourceTableFilter; + public Insert getOnDuplicateKeyInsert() { + return onDuplicateKeyInsert; } - public void setSourceTableFilter(TableFilter sourceTableFilter) { - this.sourceTableFilter = sourceTableFilter; + void setOnDuplicateKeyInsert(Insert onDuplicateKeyInsert) { + this.onDuplicateKeyInsert = onDuplicateKeyInsert; } - /** - * Sets expected update count for update to current values case. - * - * @param updateToCurrentValuesReturnsZero if zero should be returned as update - * count if update set row to current values - */ - public void setUpdateToCurrentValuesReturnsZero(boolean updateToCurrentValuesReturnsZero) { - this.updateToCurrentValuesReturnsZero = updateToCurrentValuesReturnsZero; - } } diff --git a/h2/src/main/org/h2/command/dml/package.html b/h2/src/main/org/h2/command/dml/package.html index dd0ba849dc..077734e108 100644 --- a/h2/src/main/org/h2/command/dml/package.html +++ b/h2/src/main/org/h2/command/dml/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/command/package.html b/h2/src/main/org/h2/command/package.html index a49d11b5ab..6003e70e0d 100644 --- a/h2/src/main/org/h2/command/package.html +++ b/h2/src/main/org/h2/command/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/command/dml/AllColumnsForPlan.java b/h2/src/main/org/h2/command/query/AllColumnsForPlan.java similarity index 89% rename from h2/src/main/org/h2/command/dml/AllColumnsForPlan.java rename to h2/src/main/org/h2/command/query/AllColumnsForPlan.java index 44e1a78777..b5b34e5290 100644 --- a/h2/src/main/org/h2/command/dml/AllColumnsForPlan.java +++ b/h2/src/main/org/h2/command/query/AllColumnsForPlan.java @@ -1,9 +1,9 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ -package org.h2.command.dml; +package org.h2.command.query; import java.util.ArrayList; import java.util.HashMap; diff --git a/h2/src/main/org/h2/command/dml/Optimizer.java b/h2/src/main/org/h2/command/query/Optimizer.java similarity index 93% rename from h2/src/main/org/h2/command/dml/Optimizer.java rename to h2/src/main/org/h2/command/query/Optimizer.java index 6bd593ac64..83bd58699f 100644 --- a/h2/src/main/org/h2/command/dml/Optimizer.java +++ b/h2/src/main/org/h2/command/query/Optimizer.java @@ -1,14 +1,13 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ -package org.h2.command.dml; +package org.h2.command.query; import java.util.BitSet; import java.util.Random; -import java.util.concurrent.TimeUnit; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.table.Plan; import org.h2.table.PlanItem; @@ -41,7 +40,7 @@ class Optimizer { private final TableFilter[] filters; private final Expression condition; - private final Session session; + private final SessionLocal session; private Plan bestPlan; private TableFilter topFilter; @@ -49,7 +48,7 @@ class Optimizer { private Random random; private final AllColumnsForPlan allColumnsSet; - Optimizer(TableFilter[] filters, Expression condition, Session session) { + Optimizer(TableFilter[] filters, Expression condition, SessionLocal session) { this.filters = filters; this.condition = condition; this.session = session; @@ -78,7 +77,7 @@ private static int getMaxBruteForceFilters(int filterCount) { private void calculateBestPlan() { cost = -1; - if (filters.length == 1 || session.isForceJoinOrder()) { + if (filters.length == 1) { testPlan(filters); } else { startNs = System.nanoTime(); @@ -99,8 +98,10 @@ private void calculateFakePlan() { private boolean canStop(int x) { return (x & 127) == 0 - && cost >= 0 // don't calculate for simple queries (no rows or so) - && 10 * (System.nanoTime() - startNs) > cost * TimeUnit.MILLISECONDS.toNanos(1); + // don't calculate for simple queries (no rows or so) + && cost >= 0 + // 100 microseconds * cost + && System.nanoTime() - startNs > cost * 100_000L; } private void calculateBruteForceAll() { diff --git a/h2/src/main/org/h2/command/dml/Query.java b/h2/src/main/org/h2/command/query/Query.java similarity index 56% rename from h2/src/main/org/h2/command/dml/Query.java rename to h2/src/main/org/h2/command/query/Query.java index f194407c02..227e15a472 100644 --- a/h2/src/main/org/h2/command/dml/Query.java +++ b/h2/src/main/org/h2/command/query/Query.java @@ -1,38 +1,44 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ -package org.h2.command.dml; +package org.h2.command.query; + +import static org.h2.expression.Expression.WITHOUT_PARENTHESES; +import static org.h2.util.HasSQL.DEFAULT_SQL_FLAGS; import java.util.ArrayList; import java.util.HashSet; -import java.util.List; +import java.util.Iterator; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.command.Prepared; import org.h2.engine.Database; -import org.h2.engine.Mode.ModeEnum; -import org.h2.engine.Session; +import org.h2.engine.DbObject; +import org.h2.engine.SessionLocal; import org.h2.expression.Alias; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.expression.ExpressionVisitor; import org.h2.expression.Parameter; import org.h2.expression.ValueExpression; -import org.h2.expression.function.FunctionCall; import org.h2.message.DbException; +import org.h2.result.LocalResult; import org.h2.result.ResultInterface; import org.h2.result.ResultTarget; import org.h2.result.SortOrder; +import org.h2.table.Column; import org.h2.table.ColumnResolver; import org.h2.table.Table; import org.h2.table.TableFilter; -import org.h2.util.StringUtils; +import org.h2.table.TableView; import org.h2.util.Utils; +import org.h2.value.ExtTypeInfoRow; +import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueInt; +import org.h2.value.ValueInteger; import org.h2.value.ValueNull; /** @@ -40,6 +46,34 @@ */ public abstract class Query extends Prepared { + /** + * Evaluated values of OFFSET and FETCH clauses. + */ + static final class OffsetFetch { + + /** + * OFFSET value. + */ + final long offset; + + /** + * FETCH value. + */ + final long fetch; + + /** + * Whether FETCH value is a PERCENT value. + */ + final boolean fetchPercent; + + OffsetFetch(long offset, long fetch, boolean fetchPercent) { + this.offset = offset; + this.fetch = fetch; + this.fetchPercent = fetchPercent; + } + + } + /** * The column list, including invisible expressions such as order by expressions. */ @@ -55,7 +89,7 @@ public abstract class Query extends Prepared { /** * Describes elements of the ORDER BY clause of a query. */ - ArrayList orderList; + ArrayList orderList; /** * A sort order represents an ORDER BY clause in a query. @@ -63,9 +97,9 @@ public abstract class Query extends Prepared { SortOrder sort; /** - * The limit expression as specified in the LIMIT or TOP clause. + * The fetch expression as specified in the FETCH, LIMIT, or TOP clause. */ - Expression limitExpr; + Expression fetchExpr; /** * Whether limit expression specifies percentage of rows. @@ -78,15 +112,10 @@ public abstract class Query extends Prepared { boolean withTies; /** - * The offset expression as specified in the LIMIT .. OFFSET clause. + * The offset expression as specified in the OFFSET clause. */ Expression offsetExpr; - /** - * The sample size expression as specified in the SAMPLE_SIZE clause. - */ - Expression sampleSizeExpr; - /** * Whether the result must only contain distinct rows. */ @@ -97,15 +126,32 @@ public abstract class Query extends Prepared { */ boolean randomAccessResult; + /** + * The visible columns (the ones required in the result). + */ + int visibleColumnCount; + + /** + * Number of columns including visible columns and additional virtual + * columns for ORDER BY and DISTINCT ON clauses. This number does not + * include virtual columns for HAVING and QUALIFY. + */ + int resultColumnCount; + private boolean noCache; - private int lastLimit; + private long lastLimit; private long lastEvaluated; private ResultInterface lastResult; + private Boolean lastExists; private Value[] lastParameters; private boolean cacheableChecked; private boolean neverLazy; - Query(Session session) { + boolean checkInit; + + boolean isPrepared; + + Query(SessionLocal session) { super(session); } @@ -124,10 +170,12 @@ public boolean isNeverLazy() { */ public abstract boolean isUnion(); - /** - * Prepare join batching. - */ - public abstract void prepareJoinBatch(); + @Override + public ResultInterface queryMeta() { + LocalResult result = new LocalResult(session, expressionArray, visibleColumnCount, resultColumnCount); + result.done(); + return result; + } /** * Execute the query without checking the cache. If a target is specified, @@ -138,11 +186,9 @@ public boolean isNeverLazy() { * @param target the target to write results to * @return the result */ - protected abstract ResultInterface queryWithoutCache(int limit, - ResultTarget target); + protected abstract ResultInterface queryWithoutCache(long limit, ResultTarget target); - private ResultInterface queryWithoutCacheLazyCheck(int limit, - ResultTarget target) { + private ResultInterface queryWithoutCacheLazyCheck(long limit, ResultTarget target) { boolean disableLazy = neverLazy && session.isLazyQueryExecution(); if (disableLazy) { session.setLazyQueryExecution(false); @@ -203,7 +249,7 @@ public int getCostAsExpression() { * * @param order the order by list */ - public void setOrder(ArrayList order) { + public void setOrder(ArrayList order) { orderList = order; } @@ -228,7 +274,21 @@ public boolean hasOrder() { * * @return the column count */ - public abstract int getColumnCount(); + public int getColumnCount() { + return visibleColumnCount; + } + + /** + * Returns data type of rows. + * + * @return data type of rows + */ + public TypeInfo getRowDataType() { + if (visibleColumnCount == 1) { + return expressionArray[0].getType(); + } + return TypeInfo.getTypeInfo(Value.ROW, -1L, -1, new ExtTypeInfoRow(expressionArray, visibleColumnCount)); + } /** * Map the columns to the given column resolver. @@ -278,31 +338,33 @@ public abstract void addGlobalCondition(Parameter param, int columnId, */ public abstract boolean isEverything(ExpressionVisitor visitor); + @Override + public boolean isReadOnly() { + return isEverything(ExpressionVisitor.READONLY_VISITOR); + } + /** * Update all aggregate function values. * * @param s the session * @param stage select stage */ - public abstract void updateAggregate(Session s, int stage); + public abstract void updateAggregate(SessionLocal s, int stage); /** * Call the before triggers on all tables. */ public abstract void fireBeforeSelectTriggers(); - /** - * Set the distinct flag. - */ - public void setDistinct() { - distinct = true; - } - /** * Set the distinct flag only if it is possible, may be used as a possible * optimization only. */ - public abstract void setDistinctIfPossible(); + public void setDistinctIfPossible() { + if (!isAnyDistinct() && offsetExpr == null && fetchExpr == null) { + distinct = true; + } + } /** * @return whether this query is a plain {@code DISTINCT} query @@ -354,8 +416,7 @@ public void disableCache() { this.noCache = true; } - private boolean sameResultAsLast(Session s, Value[] params, - Value[] lastParams, long lastEval) { + private boolean sameResultAsLast(Value[] params, Value[] lastParams, long lastEval) { if (!cacheableChecked) { long max = getMaxDataModificationId(); noCache = max == Long.MAX_VALUE; @@ -368,10 +429,9 @@ private boolean sameResultAsLast(Session s, Value[] params, if (noCache) { return false; } - Database db = s.getDatabase(); for (int i = 0; i < params.length; i++) { Value a = lastParams[i], b = params[i]; - if (a.getValueType() != b.getValueType() || !db.areEqual(a, b)) { + if (a.getValueType() != b.getValueType() || !session.areEqual(a, b)) { return false; } } @@ -381,7 +441,7 @@ private boolean sameResultAsLast(Session s, Value[] params, private Value[] getParameterValues() { ArrayList list = getParameters(); if (list == null) { - return new Value[0]; + return Value.EMPTY_VALUES; } int size = list.size(); Value[] params = new Value[size]; @@ -393,7 +453,7 @@ private Value[] getParameterValues() { } @Override - public final ResultInterface query(int maxrows) { + public final ResultInterface query(long maxrows) { return query(maxrows, null); } @@ -404,7 +464,7 @@ public final ResultInterface query(int maxrows) { * @param target the target result (null will return the result) * @return the result set (if the target is not set). */ - public final ResultInterface query(int limit, ResultTarget target) { + public final ResultInterface query(long limit, ResultTarget target) { if (isUnion()) { // union doesn't always know the parameter list of the left and // right queries @@ -420,8 +480,7 @@ public final ResultInterface query(int limit, ResultTarget target) { if (isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR)) { if (lastResult != null && !lastResult.isClosed() && limit == lastLimit) { - if (sameResultAsLast(session, params, lastParameters, - lastEvaluated)) { + if (sameResultAsLast(params, lastParameters, lastEvaluated)) { lastResult = lastResult.createShallowCopy(session); if (lastResult != null) { lastResult.reset(); @@ -434,7 +493,8 @@ public final ResultInterface query(int limit, ResultTarget target) { closeLastResult(); ResultInterface r = queryWithoutCacheLazyCheck(limit, target); lastResult = r; - this.lastEvaluated = now; + lastExists = null; + lastEvaluated = now; lastLimit = limit; return r; } @@ -445,49 +505,86 @@ private void closeLastResult() { } } + /** + * Execute the EXISTS predicate over the query. + * + * @return EXISTS predicate result + */ + public final boolean exists() { + if (isUnion()) { + // union doesn't always know the parameter list of the left and + // right queries + return executeExists(); + } + fireBeforeSelectTriggers(); + if (noCache || !session.getDatabase().getOptimizeReuseResults()) { + return executeExists(); + } + Value[] params = getParameterValues(); + long now = session.getDatabase().getModificationDataId(); + if (isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR)) { + if (lastExists != null) { + if (sameResultAsLast(params, lastParameters, lastEvaluated)) { + return lastExists; + } + } + } + lastParameters = params; + boolean exists = executeExists(); + lastExists = exists; + lastResult = null; + lastEvaluated = now; + return exists; + } + + private boolean executeExists() { + ResultInterface r = queryWithoutCacheLazyCheck(1L, null); + boolean exists = r.hasNext(); + r.close(); + return exists; + } + /** * Initialize the order by list. This call may extend the expressions list. * - * @param session the session - * @param expressions the select list expressions * @param expressionSQL the select list SQL snippets - * @param orderList the order by list - * @param visible the number of visible columns in the select list * @param mustBeInResult all order by expressions must be in the select list * @param filters the table filters + * @return {@code true} if ORDER BY clause is preserved, {@code false} + * otherwise */ - static void initOrder(Session session, - ArrayList expressions, - ArrayList expressionSQL, - List orderList, - int visible, - boolean mustBeInResult, - ArrayList filters) { - for (SelectOrderBy o : orderList) { + boolean initOrder(ArrayList expressionSQL, boolean mustBeInResult, ArrayList filters) { + for (Iterator i = orderList.iterator(); i.hasNext();) { + QueryOrderBy o = i.next(); Expression e = o.expression; if (e == null) { continue; } - int idx = initExpression(session, expressions, expressionSQL, e, visible, mustBeInResult, filters); - o.columnIndexExpr = ValueExpression.get(ValueInt.get(idx + 1)); + if (e.isConstant()) { + i.remove(); + continue; + } + int idx = initExpression(expressionSQL, e, mustBeInResult, filters); + o.columnIndexExpr = ValueExpression.get(ValueInteger.get(idx + 1)); o.expression = expressions.get(idx).getNonAliasExpression(); } + if (orderList.isEmpty()) { + orderList = null; + return false; + } + return true; } /** * Initialize the 'ORDER BY' or 'DISTINCT' expressions. * - * @param session the session - * @param expressions the select list expressions * @param expressionSQL the select list SQL snippets * @param e the expression. - * @param visible the number of visible columns in the select list * @param mustBeInResult all order by expressions must be in the select list * @param filters the table filters. * @return index on the expression in the {@link #expressions} list. */ - static int initExpression(Session session, ArrayList expressions, - ArrayList expressionSQL, Expression e, int visible, boolean mustBeInResult, + int initExpression(ArrayList expressionSQL, Expression e, boolean mustBeInResult, ArrayList filters) { Database db = session.getDatabase(); // special case: SELECT 1 AS A FROM DUAL ORDER BY A @@ -499,12 +596,12 @@ static int initExpression(Session session, ArrayList expressions, ExpressionColumn exprCol = (ExpressionColumn) e; String tableAlias = exprCol.getOriginalTableAliasName(); String col = exprCol.getOriginalColumnName(); - for (int j = 0; j < visible; j++) { + for (int j = 0, visible = getColumnCount(); j < visible; j++) { Expression ec = expressions.get(j); if (ec instanceof ExpressionColumn) { // select expression ExpressionColumn c = (ExpressionColumn) ec; - if (!db.equalsIdentifiers(col, c.getColumnName())) { + if (!db.equalsIdentifiers(col, c.getColumnName(session, j))) { continue; } if (tableAlias == null) { @@ -524,15 +621,15 @@ static int initExpression(Session session, ArrayList expressions, } } } else if (ec instanceof Alias) { - if (tableAlias == null && db.equalsIdentifiers(col, ec.getAlias())) { + if (tableAlias == null && db.equalsIdentifiers(col, ec.getAlias(session, j))) { return j; } Expression ec2 = ec.getNonAliasExpression(); if (ec2 instanceof ExpressionColumn) { ExpressionColumn c2 = (ExpressionColumn) ec2; - String ta = exprCol.getSQL(true); - String tb = c2.getSQL(true); - String s2 = c2.getColumnName(); + String ta = exprCol.getSQL(DEFAULT_SQL_FLAGS, WITHOUT_PARENTHESES); + String tb = c2.getSQL(DEFAULT_SQL_FLAGS, WITHOUT_PARENTHESES); + String s2 = c2.getColumnName(session, j); if (db.equalsIdentifiers(col, s2) && db.equalsIdentifiers(ta, tb)) { return j; } @@ -540,7 +637,7 @@ static int initExpression(Session session, ArrayList expressions, } } } else if (expressionSQL != null) { - String s = e.getSQL(true); + String s = e.getSQL(DEFAULT_SQL_FLAGS, WITHOUT_PARENTHESES); for (int j = 0, size = expressionSQL.size(); j < size; j++) { if (db.equalsIdentifiers(expressionSQL.get(j), s)) { return j; @@ -548,13 +645,13 @@ static int initExpression(Session session, ArrayList expressions, } } if (expressionSQL == null - || mustBeInResult && session.getDatabase().getMode().getEnum() != ModeEnum.MySQL + || mustBeInResult && !db.getMode().allowUnrelatedOrderByExpressionsInDistinctQueries && !checkOrderOther(session, e, expressionSQL)) { - throw DbException.get(ErrorCode.ORDER_BY_NOT_IN_RESULT, e.getSQL(false)); + throw DbException.get(ErrorCode.ORDER_BY_NOT_IN_RESULT, e.getTraceSQL()); } int idx = expressions.size(); expressions.add(e); - expressionSQL.add(e.getSQL(true)); + expressionSQL.add(e.getSQL(DEFAULT_SQL_FLAGS, WITHOUT_PARENTHESES)); return idx; } @@ -570,22 +667,20 @@ static int initExpression(Session session, ArrayList expressions, * @return whether the specified expression should be allowed in ORDER BY * list of DISTINCT select */ - private static boolean checkOrderOther(Session session, Expression expr, ArrayList expressionSQL) { - if (expr.isConstant()) { - // ValueExpression or other + private static boolean checkOrderOther(SessionLocal session, Expression expr, ArrayList expressionSQL) { + if (expr == null || expr.isConstant()) { + // ValueExpression, null expression in CASE, or other return true; } - String exprSQL = expr.getSQL(true); + String exprSQL = expr.getSQL(DEFAULT_SQL_FLAGS, WITHOUT_PARENTHESES); for (String sql: expressionSQL) { if (session.getDatabase().equalsIdentifiers(exprSQL, sql)) { return true; } } int count = expr.getSubexpressionCount(); - if (expr instanceof FunctionCall) { - if (!((FunctionCall) expr).isDeterministic()) { - return false; - } + if (!expr.isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR)) { + return false; } else if (count <= 0) { // Expression is an ExpressionColumn, Parameter, SequenceValue or // has other unsupported type without subexpressions @@ -600,19 +695,18 @@ private static boolean checkOrderOther(Session session, Expression expr, ArrayLi } /** - * Create a {@link SortOrder} object given the list of {@link SelectOrderBy} + * Create a {@link SortOrder} object given the list of {@link QueryOrderBy} * objects. * - * @param orderList a list of {@link SelectOrderBy} elements + * @param orderList a list of {@link QueryOrderBy} elements * @param expressionCount the number of columns in the query - * @return the {@link SortOrder} object */ - public SortOrder prepareOrder(ArrayList orderList, int expressionCount) { + void prepareOrder(ArrayList orderList, int expressionCount) { int size = orderList.size(); int[] index = new int[size]; int[] sortType = new int[size]; for (int i = 0; i < size; i++) { - SelectOrderBy o = orderList.get(i); + QueryOrderBy o = orderList.get(i); int idx; boolean reverse = false; Value v = o.columnIndexExpr.getValue(null); @@ -638,7 +732,48 @@ public SortOrder prepareOrder(ArrayList orderList, int expression } sortType[i] = type; } - return new SortOrder(session.getDatabase(), index, sortType, orderList); + sort = new SortOrder(session, index, sortType, orderList); + this.orderList = null; + } + + /** + * Removes constant expressions from the sort order. + * + * Some constants are detected only after optimization of expressions, this + * method removes them from the sort order only. They are currently + * preserved in the list of expressions. + */ + void cleanupOrder() { + int sourceIndexes[] = sort.getQueryColumnIndexes(); + int count = sourceIndexes.length; + int constants = 0; + for (int i = 0; i < count; i++) { + if (expressions.get(sourceIndexes[i]).isConstant()) { + constants++; + } + } + if (constants == 0) { + return; + } + if (constants == count) { + sort = null; + return; + } + int size = count - constants; + int[] indexes = new int[size]; + int[] sortTypes = new int[size]; + int[] sourceSortTypes = sort.getSortTypes(); + ArrayList orderList = sort.getOrderList(); + for (int i = 0, j = 0; j < size; i++) { + if (!expressions.get(sourceIndexes[i]).isConstant()) { + indexes[j] = sourceIndexes[i]; + sortTypes[j] = sourceSortTypes[i]; + j++; + } else { + orderList.remove(j); + } + } + sort = new SortOrder(session, indexes, sortTypes, orderList); } @Override @@ -654,12 +789,12 @@ public Expression getOffset() { return offsetExpr; } - public void setLimit(Expression limit) { - this.limitExpr = limit; + public void setFetch(Expression fetch) { + this.fetchExpr = fetch; } - public Expression getLimit() { - return limitExpr; + public Expression getFetch() { + return fetchExpr; } public void setFetchPercent(boolean fetchPercent) { @@ -690,47 +825,38 @@ void addParameter(Parameter param) { parameters.add(param); } - public void setSampleSize(Expression sampleSize) { - this.sampleSizeExpr = sampleSize; - } - - /** - * Get the sample size, if set. - * - * @param session the session - * @return the sample size - */ - int getSampleSizeValue(Session session) { - if (sampleSizeExpr == null) { - return 0; - } - Value v = sampleSizeExpr.optimize(session).getValue(session); - if (v == ValueNull.INSTANCE) { - return 0; - } - return v.getInt(); - } - public final long getMaxDataModificationId() { ExpressionVisitor visitor = ExpressionVisitor.getMaxModificationIdVisitor(); isEverything(visitor); - return visitor.getMaxDataModificationId(); + return Math.max(visitor.getMaxDataModificationId(), session.getSnapshotDataModificationId()); } /** - * Appends query limits info to the plan. + * Appends ORDER BY, OFFSET, and FETCH clauses to the plan. * * @param builder query plan string builder. - * @param alwaysQuote quote all identifiers - */ - void appendLimitToSQL(StringBuilder builder, boolean alwaysQuote) { + * @param sqlFlags formatting flags + * @param expressions the array of expressions + */ + void appendEndOfQueryToSQL(StringBuilder builder, int sqlFlags, Expression[] expressions) { + if (sort != null) { + sort.getSQL(builder.append("\nORDER BY "), expressions, visibleColumnCount, sqlFlags); + } else if (orderList != null) { + builder.append("\nORDER BY "); + for (int i = 0, l = orderList.size(); i < l; i++) { + if (i > 0) { + builder.append(", "); + } + orderList.get(i).getSQL(builder, sqlFlags); + } + } if (offsetExpr != null) { - String count = StringUtils.unEnclose(offsetExpr.getSQL(alwaysQuote)); + String count = offsetExpr.getSQL(sqlFlags, WITHOUT_PARENTHESES); builder.append("\nOFFSET ").append(count).append("1".equals(count) ? " ROW" : " ROWS"); } - if (limitExpr != null) { + if (fetchExpr != null) { builder.append("\nFETCH ").append(offsetExpr != null ? "NEXT" : "FIRST"); - String count = StringUtils.unEnclose(limitExpr.getSQL(alwaysQuote)); + String count = fetchExpr.getSQL(sqlFlags, WITHOUT_PARENTHESES); boolean withCount = fetchPercent || !"1".equals(count); if (withCount) { builder.append(' ').append(count); @@ -743,4 +869,150 @@ void appendLimitToSQL(StringBuilder builder, boolean alwaysQuote) { } } + /** + * Evaluates OFFSET and FETCH expressions. + * + * @param maxRows + * additional limit + * @return the evaluated values + */ + OffsetFetch getOffsetFetch(long maxRows) { + long offset; + if (offsetExpr != null) { + Value v = offsetExpr.getValue(session); + if (v == ValueNull.INSTANCE || (offset = v.getLong()) < 0) { + throw DbException.getInvalidValueException("result OFFSET", v); + } + } else { + offset = 0; + } + long fetch = maxRows == 0 ? -1 : maxRows; + if (fetchExpr != null) { + Value v = fetchExpr.getValue(session); + long l; + if (v == ValueNull.INSTANCE || (l = v.getLong()) < 0) { + throw DbException.getInvalidValueException("result FETCH", v); + } + fetch = fetch < 0 ? l : Math.min(l, fetch); + } + boolean fetchPercent = this.fetchPercent; + if (fetchPercent) { + if (fetch > 100) { + throw DbException.getInvalidValueException("result FETCH PERCENT", fetch); + } + // 0 PERCENT means 0 + if (fetch == 0) { + fetchPercent = false; + } + } + return new OffsetFetch(offset, fetch, fetchPercent); + } + + /** + * Applies limits, if any, to a result and makes it ready for value + * retrieval. + * + * @param result + * the result + * @param offset + * OFFSET value + * @param fetch + * FETCH value + * @param fetchPercent + * whether FETCH value is a PERCENT value + * @param target + * target result or null + * @return the result or null + */ + LocalResult finishResult(LocalResult result, long offset, long fetch, boolean fetchPercent, ResultTarget target) { + if (offset != 0) { + result.setOffset(offset); + } + if (fetch >= 0) { + result.setLimit(fetch); + result.setFetchPercent(fetchPercent); + if (withTies) { + result.setWithTies(sort); + } + } + result.done(); + if (randomAccessResult && !distinct) { + result = convertToDistinct(result); + } + if (target != null) { + while (result.next()) { + target.addRow(result.currentRow()); + } + result.close(); + return null; + } + return result; + } + + /** + * Convert a result into a distinct result, using the current columns. + * + * @param result the source + * @return the distinct result + */ + LocalResult convertToDistinct(ResultInterface result) { + LocalResult distinctResult = new LocalResult(session, expressionArray, visibleColumnCount, resultColumnCount); + distinctResult.setDistinct(); + result.reset(); + while (result.next()) { + distinctResult.addRow(result.currentRow()); + } + result.close(); + distinctResult.done(); + return distinctResult; + } + + /** + * Converts this query to a table or a view. + * + * @param alias alias name for the view + * @param columnTemplates column templates, or {@code null} + * @param parameters the parameters + * @param forCreateView if true, a system session will be used for the view + * @param topQuery the top level query + * @return the table or the view + */ + public Table toTable(String alias, Column[] columnTemplates, ArrayList parameters, + boolean forCreateView, Query topQuery) { + setParameterList(new ArrayList<>(parameters)); + if (!checkInit) { + init(); + } + return TableView.createTempView(forCreateView ? session.getDatabase().getSystemSession() : session, + session.getUser(), alias, columnTemplates, this, topQuery); + } + + @Override + public void collectDependencies(HashSet dependencies) { + ExpressionVisitor visitor = ExpressionVisitor.getDependenciesVisitor(dependencies); + isEverything(visitor); + } + + /** + * Check if this query will always return the same value and has no side + * effects. + * + * @return if this query will always return the same value and has no side + * effects. + */ + public boolean isConstantQuery() { + return !hasOrder() && (offsetExpr == null || offsetExpr.isConstant()) + && (fetchExpr == null || fetchExpr.isConstant()); + } + + /** + * If this query is determined as a single-row query, returns a replacement + * expression. + * + * @return the expression, or {@code null} + */ + public Expression getIfSingleRow() { + return null; + } + } diff --git a/h2/src/main/org/h2/command/dml/SelectOrderBy.java b/h2/src/main/org/h2/command/query/QueryOrderBy.java similarity index 60% rename from h2/src/main/org/h2/command/dml/SelectOrderBy.java rename to h2/src/main/org/h2/command/query/QueryOrderBy.java index c1bbe5181e..8606f30a69 100644 --- a/h2/src/main/org/h2/command/dml/SelectOrderBy.java +++ b/h2/src/main/org/h2/command/query/QueryOrderBy.java @@ -1,9 +1,9 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ -package org.h2.command.dml; +package org.h2.command.query; import org.h2.expression.Expression; import org.h2.result.SortOrder; @@ -11,7 +11,7 @@ /** * Describes one element of the ORDER BY clause of a query. */ -public class SelectOrderBy { +public class QueryOrderBy { /** * The order by expression. @@ -34,15 +34,10 @@ public class SelectOrderBy { * Appends the order by expression to the specified builder. * * @param builder the string builder - * @param alwaysQuote quote all identifiers + * @param sqlFlags formatting flags */ - public void getSQL(StringBuilder builder, boolean alwaysQuote) { - if (expression != null) { - builder.append('='); - expression.getSQL(builder, alwaysQuote); - } else { - columnIndexExpr.getUnenclosedSQL(builder, alwaysQuote); - } + public void getSQL(StringBuilder builder, int sqlFlags) { + (expression != null ? expression : columnIndexExpr).getUnenclosedSQL(builder, sqlFlags); SortOrder.typeToString(builder, sortType); } diff --git a/h2/src/main/org/h2/command/dml/Select.java b/h2/src/main/org/h2/command/query/Select.java similarity index 71% rename from h2/src/main/org/h2/command/dml/Select.java rename to h2/src/main/org/h2/command/query/Select.java index f86cc2f34e..5b1b730dd1 100644 --- a/h2/src/main/org/h2/command/dml/Select.java +++ b/h2/src/main/org/h2/command/query/Select.java @@ -1,24 +1,31 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ -package org.h2.command.dml; +package org.h2.command.query; + +import static org.h2.expression.Expression.WITHOUT_PARENTHESES; +import static org.h2.util.HasSQL.ADD_PLAN_INFORMATION; +import static org.h2.util.HasSQL.DEFAULT_SQL_FLAGS; import java.util.ArrayList; import java.util.Arrays; import java.util.BitSet; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.Map.Entry; import org.h2.api.ErrorCode; import org.h2.api.Trigger; -import org.h2.command.Parser; import org.h2.engine.Constants; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.Mode.ExpressionNames; +import org.h2.engine.SessionLocal; import org.h2.expression.Alias; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionList; import org.h2.expression.ExpressionVisitor; import org.h2.expression.Parameter; import org.h2.expression.Wildcard; @@ -26,11 +33,13 @@ import org.h2.expression.analysis.Window; import org.h2.expression.condition.Comparison; import org.h2.expression.condition.ConditionAndOr; +import org.h2.expression.condition.ConditionLocalAndGlobal; +import org.h2.expression.function.CoalesceFunction; import org.h2.index.Cursor; import org.h2.index.Index; -import org.h2.index.IndexType; import org.h2.index.ViewIndex; import org.h2.message.DbException; +import org.h2.mode.DefaultNullOrdering; import org.h2.result.LazyResult; import org.h2.result.LocalResult; import org.h2.result.ResultInterface; @@ -41,17 +50,15 @@ import org.h2.table.Column; import org.h2.table.ColumnResolver; import org.h2.table.IndexColumn; -import org.h2.table.JoinBatch; import org.h2.table.Table; import org.h2.table.TableFilter; -import org.h2.table.TableFilter.TableFilterVisitor; import org.h2.table.TableType; import org.h2.table.TableView; -import org.h2.util.ColumnNamer; +import org.h2.util.ParserUtil; import org.h2.util.StringUtils; import org.h2.util.Utils; +import org.h2.value.DataType; import org.h2.value.Value; -import org.h2.value.ValueNull; import org.h2.value.ValueRow; /** @@ -97,11 +104,6 @@ public class Select extends Query { */ private Expression qualify; - /** - * The visible columns (the ones required in the result). - */ - int visibleColumnCount; - /** * {@code DISTINCT ON(...)} expressions. */ @@ -109,7 +111,6 @@ public class Select extends Query { private int[] distinctIndexes; - private int distinctColumnCount; private ArrayList group; /** @@ -133,6 +134,12 @@ public class Select extends Query { private int[] groupByCopies; + /** + * Whether this SELECT is an explicit table (TABLE tableName). It is used in + * {@link #getPlanSQL(int)} to generate SQL similar to original query. + */ + private boolean isExplicitTable; + /** * This flag is set when SELECT statement contains (non-window) aggregate * functions, GROUP BY clause or HAVING clause. @@ -140,17 +147,16 @@ public class Select extends Query { boolean isGroupQuery; private boolean isGroupSortedQuery; private boolean isWindowQuery; - private boolean isForUpdate, isForUpdateMvcc; + private boolean isForUpdate; private double cost; private boolean isQuickAggregateQuery, isDistinctQuery; - private boolean isPrepared, checkInit; private boolean sortUsingIndex; private boolean isGroupWindowStage2; private HashMap windows; - public Select(Session session, Select parentSelect) { + public Select(SessionLocal session, Select parentSelect) { super(session); this.parentSelect = parentSelect; } @@ -188,6 +194,14 @@ public void setExpressions(ArrayList expressions) { this.expressions = expressions; } + /** + * Convert this SELECT to an explicit table (TABLE tableName). + */ + public void setExplicitTable() { + setWildcard(); + isExplicitTable = true; + } + /** * Sets a wildcard expression as in "SELECT * FROM TEST". */ @@ -229,7 +243,9 @@ public SelectGroups getGroupDataIfCurrent(boolean window) { return groupData != null && (window || groupData.isCurrentGroup()) ? groupData : null; } - @Override + /** + * Set the distinct flag. + */ public void setDistinct() { if (distinctExpressions != null) { throw DbException.getUnsupportedException("DISTINCT ON together with DISTINCT"); @@ -249,13 +265,6 @@ public void setDistinct(Expression[] distinctExpressions) { this.distinctExpressions = distinctExpressions; } - @Override - public void setDistinctIfPossible() { - if (!isAnyDistinct() && offsetExpr == null && limitExpr == null) { - distinct = true; - } - } - @Override public boolean isAnyDistinct() { return distinct || distinctExpressions != null; @@ -322,34 +331,31 @@ private LazyResult queryGroupSorted(int columnCount, ResultTarget result, long o * @return the row */ Value[] createGroupSortedRow(Value[] keyValues, int columnCount) { - Value[] row = new Value[columnCount]; - for (int j = 0; groupIndex != null && j < groupIndex.length; j++) { - row[groupIndex[j]] = keyValues[j]; - } - for (int j = 0; j < columnCount; j++) { - if (groupByExpression != null && groupByExpression[j]) { - continue; - } - Expression expr = expressions.get(j); - row[j] = expr.getValue(session); - } + Value[] row = constructGroupResultRow(keyValues, columnCount); if (isHavingNullOrFalse(row)) { return null; } - row = keepOnlyDistinct(row, columnCount); - return row; + return rowForResult(row, columnCount); } - private Value[] keepOnlyDistinct(Value[] row, int columnCount) { - if (columnCount == distinctColumnCount) { + /** + * Removes HAVING and QUALIFY columns from the row. + * + * @param row + * the complete row + * @param columnCount + * the number of columns to keep + * @return the same or the truncated row + */ + private Value[] rowForResult(Value[] row, int columnCount) { + if (columnCount == resultColumnCount) { return row; } - // remove columns so that 'distinct' can filter duplicate rows - return Arrays.copyOf(row, distinctColumnCount); + return Arrays.copyOf(row, resultColumnCount); } private boolean isHavingNullOrFalse(Value[] row) { - return havingIndex >= 0 && !row[havingIndex].getBoolean(); + return havingIndex >= 0 && !row[havingIndex].isTrue(); } private Index getGroupSortedIndex() { @@ -412,19 +418,6 @@ private boolean isGroupSortedIndex(TableFilter tableFilter, Index index) { return true; } - private int getGroupByExpressionCount() { - if (groupByExpression == null) { - return 0; - } - int count = 0; - for (boolean b : groupByExpression) { - if (b) { - ++count; - } - } - return count; - } - boolean isConditionMetForUpdate() { if (isConditionMet()) { int count = filters.size(); @@ -435,7 +428,7 @@ boolean isConditionMetForUpdate() { Row row = tableFilter.get(); Table table = tableFilter.getTable(); // Views, function tables, links, etc. do not support locks - if (table.isMVStore()) { + if (table.isRowLockable()) { Row lockedRow = table.lockRow(session, row); if (lockedRow == null) { return false; @@ -510,13 +503,10 @@ private void initGroupData(int columnCount) { void setGroupData(final SelectGroups groupData) { this.groupData = groupData; - topTableFilter.visit(new TableFilterVisitor() { - @Override - public void accept(TableFilter f) { - Select s = f.getSelect(); - if (s != null) { - s.groupData = groupData; - } + topTableFilter.visit(f -> { + Select s = f.getSelect(); + if (s != null) { + s.groupData = groupData; } }); } @@ -524,16 +514,12 @@ public void accept(TableFilter f) { private void gatherGroup(int columnCount, int stage) { long rowNumber = 0; setCurrentRowNumber(0); - int sampleSize = getSampleSizeValue(session); while (topTableFilter.next()) { setCurrentRowNumber(rowNumber + 1); - if (isForUpdateMvcc ? isConditionMetForUpdate() : isConditionMet()) { + if (isForUpdate ? isConditionMetForUpdate() : isConditionMet()) { rowNumber++; groupData.nextSource(); updateAgg(columnCount, stage); - if (sampleSize > 0 && rowNumber >= sampleSize) { - break; - } } } groupData.done(); @@ -558,38 +544,42 @@ void updateAgg(int columnCount, int stage) { private void processGroupResult(int columnCount, LocalResult result, long offset, boolean quickOffset, boolean withHaving) { for (ValueRow currentGroupsKey; (currentGroupsKey = groupData.next()) != null;) { - Value[] keyValues = currentGroupsKey.getList(); - Value[] row = new Value[columnCount]; - for (int j = 0; groupIndex != null && j < groupIndex.length; j++) { - row[groupIndex[j]] = keyValues[j]; - } - for (int j = 0; j < columnCount; j++) { - if (groupByExpression != null && groupByExpression[j]) { - continue; - } - if (groupByCopies != null) { - int original = groupByCopies[j]; - if (original >= 0) { - row[j] = row[original]; - continue; - } - } - Expression expr = expressions.get(j); - row[j] = expr.getValue(session); - } + Value[] row = constructGroupResultRow(currentGroupsKey.getList(), columnCount); if (withHaving && isHavingNullOrFalse(row)) { continue; } - if (qualifyIndex >= 0 && !row[qualifyIndex].getBoolean()) { + if (qualifyIndex >= 0 && !row[qualifyIndex].isTrue()) { continue; } if (quickOffset && offset > 0) { offset--; continue; } - row = keepOnlyDistinct(row, columnCount); - result.addRow(row); + result.addRow(rowForResult(row, columnCount)); + } + } + + private Value[] constructGroupResultRow(Value[] keyValues, int columnCount) { + Value[] row = new Value[columnCount]; + if (groupIndex != null) { + for (int i = 0, l = groupIndex.length; i < l; i++) { + row[groupIndex[i]] = keyValues[i]; + } + } + for (int i = 0; i < columnCount; i++) { + if (groupByExpression != null && groupByExpression[i]) { + continue; + } + if (groupByCopies != null) { + int original = groupByCopies[i]; + if (original >= 0) { + row[i] = row[original]; + continue; + } + } + row[i] = expressions.get(i).getValue(session); } + return row; } /** @@ -605,7 +595,11 @@ private Index getSortIndex() { return null; } ArrayList sortColumns = Utils.newSmallArrayList(); - for (int idx : sort.getQueryColumnIndexes()) { + int[] queryColumnIndexes = sort.getQueryColumnIndexes(); + int queryIndexesLength = queryColumnIndexes.length; + int[] sortIndex = new int[queryIndexesLength]; + for (int i = 0, j = 0; i < queryIndexesLength; i++) { + int idx = queryColumnIndexes[i]; if (idx < 0 || idx >= expressions.size()) { throw DbException.getInvalidValueException("ORDER BY", idx + 1); } @@ -622,6 +616,7 @@ private Index getSortIndex() { return null; } sortColumns.add(exprCol.getColumn()); + sortIndex[j++] = i; } Column[] sortCols = sortColumns.toArray(new Column[0]); if (sortCols.length == 0) { @@ -630,8 +625,9 @@ private Index getSortIndex() { } ArrayList list = topTableFilter.getTable().getIndexes(); if (list != null) { - int[] sortTypes = sort.getSortTypesWithNullPosition(); - for (Index index : list) { + int[] sortTypes = sort.getSortTypesWithNullOrdering(); + DefaultNullOrdering defaultNullOrdering = session.getDatabase().getDefaultNullOrdering(); + loop: for (Index index : list) { if (index.getCreateSQL() == null) { // can't use the scan index continue; @@ -643,24 +639,22 @@ private Index getSortIndex() { if (indexCols.length < sortCols.length) { continue; } - boolean ok = true; for (int j = 0; j < sortCols.length; j++) { // the index and the sort order must start // with the exact same columns IndexColumn idxCol = indexCols[j]; Column sortCol = sortCols[j]; if (idxCol.column != sortCol) { - ok = false; - break; + continue loop; } - if (SortOrder.addExplicitNullPosition(idxCol.sortType) != sortTypes[j]) { - ok = false; - break; + int sortType = sortTypes[sortIndex[j]]; + if (sortCol.isNullable() + ? defaultNullOrdering.addExplicitNullOrdering(idxCol.sortType) != sortType + : (idxCol.sortType & SortOrder.DESCENDING) != (sortType & SortOrder.DESCENDING)) { + continue loop; } } - if (ok) { - return index; - } + return index; } } if (sortCols.length == 1 && sortCols[0].getColumnId() == -1) { @@ -687,7 +681,6 @@ private void queryDistinct(ResultTarget result, long offset, long limitRows, boo Index index = topTableFilter.getIndex(); SearchRow first = null; int columnIndex = index.getColumns()[0].getColumnId(); - int sampleSize = getSampleSizeValue(session); if (!quickOffset) { offset = 0; } @@ -700,20 +693,15 @@ private void queryDistinct(ResultTarget result, long offset, long limitRows, boo SearchRow found = cursor.getSearchRow(); Value value = found.getValue(columnIndex); if (first == null) { - first = topTableFilter.getTable().getTemplateSimpleRow(true); + first = index.getRowFactory().createRow(); } first.setValue(columnIndex, value); if (offset > 0) { offset--; continue; } - Value[] row = { value }; - result.addRow(row); - if ((sort == null || sortUsingIndex) && limitRows > 0 && - rowNumber >= limitRows && !withTies) { - break; - } - if (sampleSize > 0 && rowNumber >= sampleSize) { + result.addRow(value); + if ((sort == null || sortUsingIndex) && limitRows > 0 && rowNumber >= limitRows && !withTies) { break; } } @@ -728,9 +716,7 @@ private LazyResult queryFlat(int columnCount, ResultTarget result, long offset, limitRows = Long.MAX_VALUE; } } - int sampleSize = getSampleSizeValue(session); - LazyResultQueryFlat lazyResult = new LazyResultQueryFlat(expressionArray, columnCount, sampleSize, - isForUpdateMvcc); + LazyResultQueryFlat lazyResult = new LazyResultQueryFlat(expressionArray, columnCount, isForUpdate); skipOffset(lazyResult, offset, quickOffset); if (result == null) { return lazyResult; @@ -777,50 +763,15 @@ private void queryQuick(int columnCount, ResultTarget result, boolean skipResult } @Override - public ResultInterface queryMeta() { - LocalResult result = session.getDatabase().getResultFactory().create(session, expressionArray, - visibleColumnCount); - result.done(); - return result; - } - - @Override - protected ResultInterface queryWithoutCache(int maxRows, ResultTarget target) { + protected ResultInterface queryWithoutCache(long maxRows, ResultTarget target) { disableLazyForJoinSubqueries(topTableFilter); - - int limitRows = maxRows == 0 ? -1 : maxRows; - if (limitExpr != null) { - Value v = limitExpr.getValue(session); - int l = v == ValueNull.INSTANCE ? -1 : v.getInt(); - if (limitRows < 0) { - limitRows = l; - } else if (l >= 0) { - limitRows = Math.min(l, limitRows); - } - } - boolean fetchPercent = this.fetchPercent; - if (fetchPercent) { - // Need to check it now, because negative limit has special treatment later - if (limitRows < 0 || limitRows > 100) { - throw DbException.getInvalidValueException("FETCH PERCENT", limitRows); - } - // 0 PERCENT means 0 - if (limitRows == 0) { - fetchPercent = false; - } - } - long offset; - if (offsetExpr != null) { - offset = offsetExpr.getValue(session).getLong(); - if (offset < 0) { - offset = 0; - } - } else { - offset = 0; - } + OffsetFetch offsetFetch = getOffsetFetch(maxRows); + long offset = offsetFetch.offset; + long fetch = offsetFetch.fetch; + boolean fetchPercent = offsetFetch.fetchPercent; boolean lazy = session.isLazyQueryExecution() && target == null && !isForUpdate && !isQuickAggregateQuery && - limitRows != 0 && !fetchPercent && !withTies && offset == 0 && isReadOnly(); + fetch != 0 && !fetchPercent && !withTies && offset == 0 && isReadOnly(); int columnCount = expressions.size(); LocalResult result = null; if (!lazy && (target == null || @@ -850,52 +801,45 @@ protected ResultInterface queryWithoutCache(int maxRows, ResultTarget target) { if (isWindowQuery || isGroupQuery && !isGroupSortedQuery) { result = createLocalResult(result); } - if (!lazy && (limitRows >= 0 || offset > 0)) { + if (!lazy && (fetch >= 0 || offset > 0)) { result = createLocalResult(result); } topTableFilter.startQuery(session); topTableFilter.reset(); - boolean exclusive = isForUpdate && !isForUpdateMvcc; - topTableFilter.lock(session, exclusive, exclusive); + topTableFilter.lock(session); ResultTarget to = result != null ? result : target; lazy &= to == null; LazyResult lazyResult = null; - if (limitRows != 0) { + if (fetch != 0) { // Cannot apply limit now if percent is specified - int limit = fetchPercent ? -1 : limitRows; - try { - if (isQuickAggregateQuery) { - queryQuick(columnCount, to, quickOffset && offset > 0); - } else if (isWindowQuery) { - if (isGroupQuery) { - queryGroupWindow(columnCount, result, offset, quickOffset); - } else { - queryWindow(columnCount, result, offset, quickOffset); - } - } else if (isGroupQuery) { - if (isGroupSortedQuery) { - lazyResult = queryGroupSorted(columnCount, to, offset, quickOffset); - } else { - queryGroup(columnCount, result, offset, quickOffset); - } - } else if (isDistinctQuery) { - queryDistinct(to, offset, limit, withTies, quickOffset); + long limit = fetchPercent ? -1 : fetch; + if (isQuickAggregateQuery) { + queryQuick(columnCount, to, quickOffset && offset > 0); + } else if (isWindowQuery) { + if (isGroupQuery) { + queryGroupWindow(columnCount, result, offset, quickOffset); } else { - lazyResult = queryFlat(columnCount, to, offset, limit, withTies, quickOffset); + queryWindow(columnCount, result, offset, quickOffset); } - if (quickOffset) { - offset = 0; - } - } finally { - if (!lazy) { - resetJoinBatchAfterQuery(); + } else if (isGroupQuery) { + if (isGroupSortedQuery) { + lazyResult = queryGroupSorted(columnCount, to, offset, quickOffset); + } else { + queryGroup(columnCount, result, offset, quickOffset); } + } else if (isDistinctQuery) { + queryDistinct(to, offset, limit, withTies, quickOffset); + } else { + lazyResult = queryFlat(columnCount, to, offset, limit, withTies, quickOffset); + } + if (quickOffset) { + offset = 0; } } - assert lazy == (lazyResult != null): lazy; + assert lazy == (lazyResult != null) : lazy; if (lazyResult != null) { - if (limitRows > 0) { - lazyResult.setLimit(limitRows); + if (fetch > 0) { + lazyResult.setLimit(fetch); } if (randomAccessResult) { return convertToDistinct(lazyResult); @@ -903,78 +847,27 @@ protected ResultInterface queryWithoutCache(int maxRows, ResultTarget target) { return lazyResult; } } - if (offset != 0) { - if (offset > Integer.MAX_VALUE) { - throw DbException.getInvalidValueException("OFFSET", offset); - } - result.setOffset((int) offset); - } - if (limitRows >= 0) { - result.setLimit(limitRows); - result.setFetchPercent(fetchPercent); - if (withTies) { - result.setWithTies(sort); - } - } if (result != null) { - result.done(); - if (randomAccessResult && !distinct) { - result = convertToDistinct(result); - } - if (target != null) { - while (result.next()) { - target.addRow(result.currentRow()); - } - result.close(); - return null; - } - return result; + return finishResult(result, offset, fetch, fetchPercent, target); } return null; } private void disableLazyForJoinSubqueries(final TableFilter top) { if (session.isLazyQueryExecution()) { - top.visit(new TableFilter.TableFilterVisitor() { - @Override - public void accept(TableFilter f) { - if (f != top && f.getTable().getTableType() == TableType.VIEW) { - ViewIndex idx = (ViewIndex) f.getIndex(); - if (idx != null && idx.getQuery() != null) { - idx.getQuery().setNeverLazy(true); - } + top.visit(f -> { + if (f != top && f.getTable().getTableType() == TableType.VIEW) { + ViewIndex idx = (ViewIndex) f.getIndex(); + if (idx != null && idx.getQuery() != null) { + idx.getQuery().setNeverLazy(true); } } }); } } - /** - * Reset the batch-join after the query result is closed. - */ - void resetJoinBatchAfterQuery() { - JoinBatch jb = getJoinBatch(); - if (jb != null) { - jb.reset(false); - } - } - private LocalResult createLocalResult(LocalResult old) { - return old != null ? old : session.getDatabase().getResultFactory().create(session, expressionArray, - visibleColumnCount); - } - - private LocalResult convertToDistinct(ResultInterface result) { - LocalResult distinctResult = session.getDatabase().getResultFactory().create(session, - expressionArray, visibleColumnCount); - distinctResult.setDistinct(); - result.reset(); - while (result.next()) { - distinctResult.addRow(result.currentRow()); - } - result.close(); - distinctResult.done(); - return distinctResult; + return old != null ? old : new LocalResult(session, expressionArray, visibleColumnCount, resultColumnCount); } private void expandColumnList() { @@ -998,7 +891,7 @@ private void expandColumnList() { exceptTableColumns = w.mapExceptColumns(); } for (TableFilter filter : filters) { - i = expandColumnList(filter, i, exceptTableColumns); + i = expandColumnList(filter, i, false, exceptTableColumns); } } else { Database db = session.getDatabase(); @@ -1019,26 +912,62 @@ private void expandColumnList() { if (filter == null) { throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableAlias); } - i = expandColumnList(filter, i, exceptTableColumns); + i = expandColumnList(filter, i, true, exceptTableColumns); } } } - private int expandColumnList(TableFilter filter, int index, HashMap except) { + private int expandColumnList(TableFilter filter, int index, boolean forAlias, + HashMap except) { + String schema = filter.getSchemaName(); String alias = filter.getTableAlias(); - for (Column c : filter.getTable().getColumns()) { - if (except != null && except.remove(c) != null) { - continue; + if (forAlias) { + for (Column c : filter.getTable().getColumns()) { + index = addExpandedColumn(filter, index, except, schema, alias, c); } - if (!c.getVisible()) { - continue; + } else { + LinkedHashMap commonJoinColumns = filter.getCommonJoinColumns(); + if (commonJoinColumns != null) { + TableFilter replacementFilter = filter.getCommonJoinColumnsFilter(); + String replacementSchema = replacementFilter.getSchemaName(); + String replacementAlias = replacementFilter.getTableAlias(); + for (Entry entry : commonJoinColumns.entrySet()) { + Column left = entry.getKey(), right = entry.getValue(); + if (!filter.isCommonJoinColumnToExclude(right) + && (except == null || except.remove(left) == null && except.remove(right) == null)) { + Database database = session.getDatabase(); + Expression e; + if (left == right + || DataType.hasTotalOrdering(left.getType().getValueType()) + && DataType.hasTotalOrdering(right.getType().getValueType())) { + e = new ExpressionColumn(database, replacementSchema, replacementAlias, + replacementFilter.getColumnName(right)); + } else { + e = new Alias(new CoalesceFunction(CoalesceFunction.COALESCE, + new ExpressionColumn(database, schema, alias, filter.getColumnName(left)), + new ExpressionColumn(database, replacementSchema, replacementAlias, + replacementFilter.getColumnName(right))), // + left.getName(), true); + } + expressions.add(index++, e); + } + } } - if (filter.isNaturalJoinColumn(c)) { - continue; + for (Column c : filter.getTable().getColumns()) { + if (commonJoinColumns == null || !commonJoinColumns.containsKey(c)) { + if (!filter.isCommonJoinColumnToExclude(c)) { + index = addExpandedColumn(filter, index, except, schema, alias, c); + } + } } - String name = filter.getDerivedColumnName(c); - ExpressionColumn ec = new ExpressionColumn( - session.getDatabase(), null, alias, name != null ? name : c.getName(), false); + } + return index; + } + + private int addExpandedColumn(TableFilter filter, int index, HashMap except, + String schema, String alias, Column c) { + if ((except == null || except.remove(c) == null) && c.getVisible()) { + ExpressionColumn ec = new ExpressionColumn(session.getDatabase(), schema, alias, filter.getColumnName(c)); expressions.add(index++, ec); } return index; @@ -1047,18 +976,20 @@ private int expandColumnList(TableFilter filter, int index, HashMap Constants.MAX_COLUMNS) { + throw DbException.get(ErrorCode.TOO_MANY_COLUMNS_1, "" + Constants.MAX_COLUMNS); + } ArrayList expressionSQL; if (distinctExpressions != null || orderList != null || group != null) { expressionSQL = new ArrayList<>(visibleColumnCount); for (int i = 0; i < visibleColumnCount; i++) { Expression expr = expressions.get(i); expr = expr.getNonAliasExpression(); - String sql = expr.getSQL(true); - expressionSQL.add(sql); + expressionSQL.add(expr.getSQL(DEFAULT_SQL_FLAGS, WITHOUT_PARENTHESES)); } } else { expressionSQL = null; @@ -1066,8 +997,7 @@ public void init() { if (distinctExpressions != null) { BitSet set = new BitSet(); for (Expression e : distinctExpressions) { - set.set(initExpression(session, expressions, expressionSQL, e, visibleColumnCount, false, - filters)); + set.set(initExpression(expressionSQL, e, false, filters)); } int idx = 0, cnt = set.cardinality(); distinctIndexes = new int[cnt]; @@ -1078,10 +1008,9 @@ public void init() { } } if (orderList != null) { - initOrder(session, expressions, expressionSQL, orderList, - visibleColumnCount, isAnyDistinct(), filters); + initOrder(expressionSQL, isAnyDistinct(), filters); } - distinctColumnCount = expressions.size(); + resultColumnCount = expressions.size(); if (having != null) { expressions.add(having); havingIndex = expressions.size() - 1; @@ -1110,10 +1039,17 @@ public void init() { if (group != null) { int size = group.size(); int expSize = expressionSQL.size(); + int fullExpSize = expressions.size(); + if (fullExpSize > expSize) { + expressionSQL.ensureCapacity(fullExpSize); + for (int i = expSize; i < fullExpSize; i++) { + expressionSQL.add(expressions.get(i).getSQL(DEFAULT_SQL_FLAGS, WITHOUT_PARENTHESES)); + } + } groupIndex = new int[size]; for (int i = 0; i < size; i++) { Expression expr = group.get(i); - String sql = expr.getSQL(true); + String sql = expr.getSQL(DEFAULT_SQL_FLAGS, WITHOUT_PARENTHESES); int found = -1; for (int j = 0; j < expSize; j++) { String s2 = expressionSQL.get(j); @@ -1126,12 +1062,12 @@ public void init() { // special case: GROUP BY a column alias for (int j = 0; j < expSize; j++) { Expression e = expressions.get(j); - if (db.equalsIdentifiers(sql, e.getAlias())) { + if (db.equalsIdentifiers(sql, e.getAlias(session, j))) { found = mergeGroupByExpressions(db, j, expressionSQL, true); break; } - sql = expr.getAlias(); - if (db.equalsIdentifiers(sql, e.getAlias())) { + sql = expr.getAlias(session, j); + if (db.equalsIdentifiers(sql, e.getAlias(session, j))) { found = mergeGroupByExpressions(db, j, expressionSQL, true); break; } @@ -1176,8 +1112,9 @@ private void mapCondition(int index) { } } - private int mergeGroupByExpressions(Database db, int index, ArrayList expressionSQL, boolean scanPrevious) - { + private int mergeGroupByExpressions(Database db, int index, ArrayList expressionSQL, // + boolean scanPrevious) { + /* * -1: uniqueness of expression is not known yet * @@ -1227,35 +1164,36 @@ public void prepare() { return; } if (!checkInit) { - DbException.throwInternalError("not initialized"); + throw DbException.getInternalError("not initialized"); } if (orderList != null) { - sort = prepareOrder(orderList, expressions.size()); - orderList = null; + prepareOrder(orderList, expressions.size()); } - ColumnNamer columnNamer = new ColumnNamer(session); - for (int i = 0; i < expressions.size(); i++) { - Expression e = expressions.get(i); - String proposedColumnName = e.getAlias(); - String columnName = columnNamer.getColumnName(e, i, proposedColumnName); - // if the name changed, create an alias - if (!columnName.equals(proposedColumnName)) { - e = new Alias(e, columnName, true); + ExpressionNames expressionNames = session.getMode().expressionNames; + if (expressionNames == ExpressionNames.ORIGINAL_SQL || expressionNames == ExpressionNames.POSTGRESQL_STYLE) { + optimizeExpressionsAndPreserveAliases(); + } else { + for (int i = 0; i < expressions.size(); i++) { + expressions.set(i, expressions.get(i).optimize(session)); } - expressions.set(i, e.optimize(session)); + } + if (sort != null) { + cleanupOrder(); } if (condition != null) { - condition = condition.optimize(session); - for (TableFilter f : filters) { - // outer joins: must not add index conditions such as - // "c is null" - example: - // create table parent(p int primary key) as select 1; - // create table child(c int primary key, pc int); - // insert into child values(2, 1); - // select p, c from parent - // left outer join child on p = pc where c is null; - if (!f.isJoinOuter() && !f.isJoinOuterIndirect()) { - condition.createIndexConditions(session, f); + condition = condition.optimizeCondition(session); + if (condition != null) { + for (TableFilter f : filters) { + // outer joins: must not add index conditions such as + // "c is null" - example: + // create table parent(p int primary key) as select 1; + // create table child(c int primary key, pc int); + // insert into child values(2, 1); + // select p, c from parent + // left outer join child on p = pc where c is null; + if (!f.isJoinOuter() && !f.isJoinOuterIndirect()) { + condition.createIndexConditions(session, f); + } } } } @@ -1277,23 +1215,11 @@ public void prepare() { if (columnIndex != null && selectivity != Constants.SELECTIVITY_DEFAULT && selectivity < 20) { - // the first column must be ascending - boolean ascending = columnIndex. - getIndexColumns()[0].sortType == SortOrder.ASCENDING; Index current = topTableFilter.getIndex(); // if another index is faster - if (columnIndex.canFindNext() && ascending && - (current == null || - current.getIndexType().isScan() || - columnIndex == current)) { - IndexType type = columnIndex.getIndexType(); - // hash indexes don't work, and unique single column - // indexes don't work - if (!type.isHash() && (!type.isUnique() || - columnIndex.getColumns().length > 1)) { - topTableFilter.setIndex(columnIndex); - isDistinctQuery = true; - } + if (current == null || current.getIndexType().isScan() || columnIndex == current) { + topTableFilter.setIndex(columnIndex); + isDistinctQuery = true; } } } @@ -1330,48 +1256,36 @@ public void prepare() { } } } - if (sortUsingIndex && isForUpdateMvcc && !topTableFilter.getIndex().isRowIdIndex()) { + if (sortUsingIndex && isForUpdate && !topTableFilter.getIndex().isRowIdIndex()) { sortUsingIndex = false; } } - if (!isQuickAggregateQuery && isGroupQuery && - getGroupByExpressionCount() > 0) { + if (!isQuickAggregateQuery && isGroupQuery) { Index index = getGroupSortedIndex(); - Index current = topTableFilter.getIndex(); - if (index != null && current != null && (current.getIndexType().isScan() || - current == index)) { - topTableFilter.setIndex(index); - isGroupSortedQuery = true; + if (index != null) { + Index current = topTableFilter.getIndex(); + if (current != null && (current.getIndexType().isScan() || current == index)) { + topTableFilter.setIndex(index); + isGroupSortedQuery = true; + } } } expressionArray = expressions.toArray(new Expression[0]); isPrepared = true; } - @Override - public void prepareJoinBatch() { - ArrayList list = new ArrayList<>(); - TableFilter f = getTopTableFilter(); - do { - if (f.getNestedJoin() != null) { - // we do not support batching with nested joins - return; + private void optimizeExpressionsAndPreserveAliases() { + for (int i = 0; i < expressions.size(); i++) { + Expression e = expressions.get(i); + String alias = e.getAlias(session, i); + e = e.optimize(session); + if (!e.getAlias(session, i).equals(alias)) { + e = new Alias(e, alias, true); } - list.add(f); - f = f.getJoin(); - } while (f != null); - TableFilter[] fs = list.toArray(new TableFilter[0]); - // prepare join batch - JoinBatch jb = null; - for (int i = fs.length - 1; i >= 0; i--) { - jb = fs[i].prepareJoinBatch(jb, fs, i); + expressions.set(i, e); } } - public JoinBatch getJoinBatch() { - return getTopTableFilter().getJoinBatch(); - } - @Override public double getCost() { return cost; @@ -1450,7 +1364,7 @@ private void setEvaluatableRecursive(TableFilter f) { } @Override - public String getPlanSQL(boolean alwaysQuote) { + public String getPlanSQL(int sqlFlags) { // can not use the field sqlStatement because the parameter // indexes may be incorrect: ? may be in fact ?2 for a subquery // but indexes may be set manually as well @@ -1458,7 +1372,7 @@ public String getPlanSQL(boolean alwaysQuote) { StringBuilder builder = new StringBuilder(); for (TableFilter f : topFilters) { Table t = f.getTable(); - TableView tableView = t.isView() ? (TableView) t : null; + TableView tableView = t instanceof TableView ? (TableView) t : null; if (tableView != null && tableView.isRecursive() && tableView.isTableExpression()) { if (!tableView.isTemporary()) { @@ -1467,126 +1381,137 @@ public String getPlanSQL(boolean alwaysQuote) { // views. } else { builder.append("WITH RECURSIVE "); - t.getSchema().getSQL(builder, alwaysQuote).append('.'); - Parser.quoteIdentifier(builder, t.getName(), alwaysQuote).append('('); - Column.writeColumns(builder, t.getColumns(), alwaysQuote); + t.getSchema().getSQL(builder, sqlFlags).append('.'); + ParserUtil.quoteIdentifier(builder, t.getName(), sqlFlags).append('('); + Column.writeColumns(builder, t.getColumns(), sqlFlags); builder.append(") AS "); - t.getSQL(builder, alwaysQuote).append('\n'); + t.getSQL(builder, sqlFlags).append('\n'); } } } - builder.append("SELECT"); - if (isAnyDistinct()) { - builder.append(" DISTINCT"); - if (distinctExpressions != null) { - builder.append(" ON("); - Expression.writeExpressions(builder, distinctExpressions, alwaysQuote); - builder.append(')'); - } - } - for (int i = 0; i < visibleColumnCount; i++) { - if (i > 0) { - builder.append(','); + if (isExplicitTable) { + builder.append("TABLE "); + filters.get(0).getPlanSQL(builder, false, sqlFlags); + } else { + builder.append("SELECT"); + if (isAnyDistinct()) { + builder.append(" DISTINCT"); + if (distinctExpressions != null) { + Expression.writeExpressions(builder.append(" ON("), distinctExpressions, sqlFlags).append(')'); + } } - builder.append('\n'); - StringUtils.indent(builder, exprList[i].getSQL( alwaysQuote), 4, false); - } - builder.append("\nFROM "); - TableFilter filter = topTableFilter; - if (filter != null) { - int i = 0; - do { + for (int i = 0; i < visibleColumnCount; i++) { if (i > 0) { - builder.append('\n'); + builder.append(','); } - filter.getPlanSQL(builder, i++ > 0, alwaysQuote); - filter = filter.getJoin(); - } while (filter != null); - } else { - int i = 0; - for (TableFilter f : topFilters) { - do { - if (i > 0) { - builder.append('\n'); + builder.append('\n'); + StringUtils.indent(builder, exprList[i].getSQL(sqlFlags, WITHOUT_PARENTHESES), 4, false); + } + TableFilter filter = topTableFilter; + if (filter == null) { + int count = topFilters.size(); + if (count != 1 || !topFilters.get(0).isNoFromClauseFilter()) { + builder.append("\nFROM "); + boolean isJoin = false; + for (int i = 0; i < count; i++) { + isJoin = getPlanFromFilter(builder, sqlFlags, topFilters.get(i), isJoin); } - f.getPlanSQL(builder, i++ > 0, alwaysQuote); - f = f.getJoin(); - } while (f != null); - } - } - if (condition != null) { - builder.append("\nWHERE "); - condition.getUnenclosedSQL(builder, alwaysQuote); - } - if (groupIndex != null) { - builder.append("\nGROUP BY "); - for (int i = 0, l = groupIndex.length; i < l; i++) { - if (i > 0) { - builder.append(", "); } - exprList[groupIndex[i]].getNonAliasExpression().getUnenclosedSQL(builder, alwaysQuote); + } else if (!filter.isNoFromClauseFilter()) { + getPlanFromFilter(builder.append("\nFROM "), sqlFlags, filter, false); } - } else if (group != null) { - builder.append("\nGROUP BY "); - for (int i = 0, l = group.size(); i < l; i++) { - if (i > 0) { - builder.append(", "); - } - group.get(i).getUnenclosedSQL(builder, alwaysQuote); + if (condition != null) { + getFilterSQL(builder, "\nWHERE ", condition, sqlFlags); } - } - getFilterSQL(builder, "\nHAVING ", exprList, having, havingIndex); - getFilterSQL(builder, "\nQUALIFY ", exprList, qualify, qualifyIndex); - if (sort != null) { - builder.append("\nORDER BY ").append( - sort.getSQL(exprList, visibleColumnCount, alwaysQuote)); - } - if (orderList != null) { - builder.append("\nORDER BY "); - for (int i = 0, l = orderList.size(); i < l; i++) { - if (i > 0) { - builder.append(", "); + if (groupIndex != null) { + builder.append("\nGROUP BY "); + for (int i = 0, l = groupIndex.length; i < l; i++) { + if (i > 0) { + builder.append(", "); + } + exprList[groupIndex[i]].getNonAliasExpression().getUnenclosedSQL(builder, sqlFlags); } - orderList.get(i).getSQL(builder, alwaysQuote); + } else if (group != null) { + builder.append("\nGROUP BY "); + for (int i = 0, l = group.size(); i < l; i++) { + if (i > 0) { + builder.append(", "); + } + group.get(i).getUnenclosedSQL(builder, sqlFlags); + } + } else emptyGroupingSet: if (isGroupQuery && having == null && havingIndex < 0) { + for (int i = 0; i < visibleColumnCount; i++) { + if (containsAggregate(exprList[i])) { + break emptyGroupingSet; + } + } + builder.append("\nGROUP BY ()"); } + getFilterSQL(builder, "\nHAVING ", exprList, having, havingIndex, sqlFlags); + getFilterSQL(builder, "\nQUALIFY ", exprList, qualify, qualifyIndex, sqlFlags); } - appendLimitToSQL(builder, alwaysQuote); - if (sampleSizeExpr != null) { - builder.append("\nSAMPLE_SIZE "); - sampleSizeExpr.getUnenclosedSQL(builder, alwaysQuote); - } + appendEndOfQueryToSQL(builder, sqlFlags, exprList); if (isForUpdate) { builder.append("\nFOR UPDATE"); } - if (isQuickAggregateQuery) { - builder.append("\n/* direct lookup */"); - } - if (isDistinctQuery) { - builder.append("\n/* distinct */"); - } - if (sortUsingIndex) { - builder.append("\n/* index sorted */"); - } - if (isGroupQuery) { - if (isGroupSortedQuery) { - builder.append("\n/* group sorted */"); + if ((sqlFlags & ADD_PLAN_INFORMATION) != 0) { + if (isQuickAggregateQuery) { + builder.append("\n/* direct lookup */"); + } + if (isDistinctQuery) { + builder.append("\n/* distinct */"); + } + if (sortUsingIndex) { + builder.append("\n/* index sorted */"); + } + if (isGroupQuery) { + if (isGroupSortedQuery) { + builder.append("\n/* group sorted */"); + } } + // builder.append("\n/* cost: " + cost + " */"); } - // buff.append("\n/* cost: " + cost + " */"); return builder.toString(); } + private static boolean getPlanFromFilter(StringBuilder builder, int sqlFlags, TableFilter f, boolean isJoin) { + do { + if (isJoin) { + builder.append('\n'); + } + f.getPlanSQL(builder, isJoin, sqlFlags); + isJoin = true; + } while ((f = f.getJoin()) != null); + return isJoin; + } + private static void getFilterSQL(StringBuilder builder, String sql, Expression[] exprList, Expression condition, - int conditionIndex) { + int conditionIndex, int sqlFlags) { if (condition != null) { - builder.append(sql); - condition.getUnenclosedSQL(builder, true); + getFilterSQL(builder, sql, condition, sqlFlags); } else if (conditionIndex >= 0) { - builder.append(sql); - exprList[conditionIndex].getUnenclosedSQL(builder, true); + getFilterSQL(builder, sql, exprList[conditionIndex], sqlFlags); } } + private static void getFilterSQL(StringBuilder builder, String sql, Expression condition, int sqlFlags) { + condition.getUnenclosedSQL(builder.append(sql), sqlFlags); + } + + private static boolean containsAggregate(Expression expression) { + if (expression instanceof DataAnalysisOperation) { + if (((DataAnalysisOperation) expression).isAggregate()) { + return true; + } + } + for (int i = 0, l = expression.getSubexpressionCount(); i < l; i++) { + if (containsAggregate(expression.getSubexpression(i))) { + return true; + } + } + return false; + } + public void setHaving(Expression having) { this.having = having; } @@ -1603,11 +1528,6 @@ public Expression getQualify() { return qualify; } - @Override - public int getColumnCount() { - return visibleColumnCount; - } - public TableFilter getTopTableFilter() { return topTableFilter; } @@ -1618,9 +1538,6 @@ public void setForUpdate(boolean b) { throw DbException.get(ErrorCode.FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT); } this.isForUpdate = b; - if (session.getDatabase().isMVStore()) { - isForUpdateMvcc = b; - } } @Override @@ -1683,59 +1600,54 @@ public boolean isGroupWindowStage2() { } @Override - public void addGlobalCondition(Parameter param, int columnId, - int comparisonType) { + public void addGlobalCondition(Parameter param, int columnId, int comparisonType) { addParameter(param); Expression comp; Expression col = expressions.get(columnId); col = col.getNonAliasExpression(); if (col.isEverything(ExpressionVisitor.QUERY_COMPARABLE_VISITOR)) { - comp = new Comparison(session, comparisonType, col, param); + comp = new Comparison(comparisonType, col, param, false); } else { // this condition will always evaluate to true, but need to // add the parameter, so it can be set later - comp = new Comparison(session, Comparison.EQUAL_NULL_SAFE, param, param); + comp = new Comparison(Comparison.EQUAL_NULL_SAFE, param, param, false); } comp = comp.optimize(session); - boolean addToCondition = true; if (isWindowQuery) { - if (qualify == null) { - qualify = comp; - } else { - qualify = new ConditionAndOr(ConditionAndOr.AND, comp, qualify); - } - return; - } - if (isGroupQuery) { - addToCondition = false; + qualify = addGlobalCondition(qualify, comp); + } else if (isGroupQuery) { for (int i = 0; groupIndex != null && i < groupIndex.length; i++) { if (groupIndex[i] == columnId) { - addToCondition = true; - break; + condition = addGlobalCondition(condition, comp); + return; } } - if (!addToCondition) { - if (havingIndex >= 0) { - having = expressions.get(havingIndex); - } - if (having == null) { - having = comp; - } else { - having = new ConditionAndOr(ConditionAndOr.AND, having, comp); - } + if (havingIndex >= 0) { + having = expressions.get(havingIndex); } + having = addGlobalCondition(having, comp); + } else { + condition = addGlobalCondition(condition, comp); } - if (addToCondition) { - if (condition == null) { - condition = comp; - } else { - condition = new ConditionAndOr(ConditionAndOr.AND, condition, comp); - } + } + + private static Expression addGlobalCondition(Expression condition, Expression additional) { + if (!(condition instanceof ConditionLocalAndGlobal)) { + return new ConditionLocalAndGlobal(condition, additional); + } + Expression oldLocal, oldGlobal; + if (condition.getSubexpressionCount() == 1) { + oldLocal = null; + oldGlobal = condition.getSubexpression(0); + } else { + oldLocal = condition.getSubexpression(0); + oldGlobal = condition.getSubexpression(1); } + return new ConditionLocalAndGlobal(oldLocal, new ConditionAndOr(ConditionAndOr.AND, oldGlobal, additional)); } @Override - public void updateAggregate(Session s, int stage) { + public void updateAggregate(SessionLocal s, int stage) { for (Expression e : expressions) { e.updateAggregate(s, stage); } @@ -1805,11 +1717,6 @@ public boolean isEverything(ExpressionVisitor visitor) { return true; } - @Override - public boolean isReadOnly() { - return isEverything(ExpressionVisitor.READONLY_VISITOR); - } - @Override public boolean isCacheable() { @@ -1818,13 +1725,61 @@ public boolean isCacheable() { @Override public boolean allowGlobalConditions() { - return offsetExpr == null && (limitExpr == null && distinctExpressions == null || sort == null); + return offsetExpr == null && fetchExpr == null && distinctExpressions == null; } public SortOrder getSortOrder() { return sort; } + /** + * Returns parent select, or null. + * + * @return parent select, or null + */ + public Select getParentSelect() { + return parentSelect; + } + + @Override + public boolean isConstantQuery() { + if (!super.isConstantQuery() || distinctExpressions != null || condition != null || isGroupQuery + || isWindowQuery || !isNoFromClause()) { + return false; + } + for (int i = 0; i < visibleColumnCount; i++) { + if (!expressions.get(i).isConstant()) { + return false; + } + } + return true; + } + + @Override + public Expression getIfSingleRow() { + if (offsetExpr != null || fetchExpr != null || condition != null || isGroupQuery || isWindowQuery + || !isNoFromClause()) { + return null; + } + if (visibleColumnCount == 1) { + return expressions.get(0); + } + Expression[] array = new Expression[visibleColumnCount]; + for (int i = 0; i < visibleColumnCount; i++) { + array[i] = expressions.get(i); + } + return new ExpressionList(array, false); + } + + private boolean isNoFromClause() { + if (topTableFilter != null) { + return topTableFilter.isNoFromClauseFilter(); + } else if (topFilters.size() == 1) { + return topFilters.get(0).isNoFromClauseFilter(); + } + return false; + } + /** * Lazy execution for this select. */ @@ -1834,7 +1789,7 @@ private abstract class LazyResultSelect extends LazyResult { int columnCount; LazyResultSelect(Expression[] expressions, int columnCount) { - super(expressions); + super(getSession(), expressions); this.columnCount = columnCount; setCurrentRowNumber(0); } @@ -1844,18 +1799,9 @@ public final int getVisibleColumnCount() { return visibleColumnCount; } - @Override - public void close() { - if (!isClosed()) { - super.close(); - resetJoinBatchAfterQuery(); - } - } - @Override public void reset() { super.reset(); - resetJoinBatchAfterQuery(); topTableFilter.reset(); setCurrentRowNumber(0); rowNumber = 0; @@ -1867,19 +1813,16 @@ public void reset() { */ private final class LazyResultQueryFlat extends LazyResultSelect { - private int sampleSize; - private boolean forUpdate; - LazyResultQueryFlat(Expression[] expressions, int columnCount, int sampleSize, boolean forUpdate) { + LazyResultQueryFlat(Expression[] expressions, int columnCount, boolean forUpdate) { super(expressions, columnCount); - this.sampleSize = sampleSize; this.forUpdate = forUpdate; } @Override protected Value[] fetchNextRow() { - while ((sampleSize <= 0 || rowNumber < sampleSize) && topTableFilter.next()) { + while (topTableFilter.next()) { setCurrentRowNumber(rowNumber + 1); // This method may lock rows if (forUpdate ? isConditionMetForUpdate() : isConditionMet()) { @@ -1897,7 +1840,7 @@ protected Value[] fetchNextRow() { @Override protected boolean skipNextRow() { - while ((sampleSize <= 0 || rowNumber < sampleSize) && topTableFilter.next()) { + while (topTableFilter.next()) { setCurrentRowNumber(rowNumber + 1); // This method does not lock rows if (isConditionMet()) { @@ -1923,7 +1866,6 @@ private final class LazyResultGroupSorted extends LazyResultSelect { setGroupData(SelectGroups.getInstance(getSession(), Select.this.expressions, isGroupQuery, groupIndex)); } else { - // TODO is this branch possible? updateAgg(columnCount, DataAnalysisOperation.STAGE_RESET); groupData.resetLazy(); } @@ -1942,9 +1884,10 @@ protected Value[] fetchNextRow() { setCurrentRowNumber(rowNumber + 1); if (isConditionMet()) { rowNumber++; - Value[] keyValues = new Value[groupIndex.length]; + int groupSize = groupIndex.length; + Value[] keyValues = new Value[groupSize]; // update group - for (int i = 0; i < groupIndex.length; i++) { + for (int i = 0; i < groupSize; i++) { int idx = groupIndex[i]; Expression expr = expressions.get(idx); keyValues[i] = expr.getValue(getSession()); @@ -1954,10 +1897,16 @@ protected Value[] fetchNextRow() { if (previousKeyValues == null) { previousKeyValues = keyValues; groupData.nextLazyGroup(); - } else if (!Arrays.equals(previousKeyValues, keyValues)) { - row = createGroupSortedRow(previousKeyValues, columnCount); - previousKeyValues = keyValues; - groupData.nextLazyGroup(); + } else { + SessionLocal session = getSession(); + for (int i = 0; i < groupSize; i++) { + if (session.compare(previousKeyValues[i], keyValues[i]) != 0) { + row = createGroupSortedRow(previousKeyValues, columnCount); + previousKeyValues = keyValues; + groupData.nextLazyGroup(); + break; + } + } } groupData.nextLazyRow(); updateAgg(columnCount, DataAnalysisOperation.STAGE_GROUP); @@ -1975,13 +1924,4 @@ protected Value[] fetchNextRow() { } } - /** - * Returns parent select, or null. - * - * @return parent select, or null - */ - public Select getParentSelect() { - return parentSelect; - } - } diff --git a/h2/src/main/org/h2/command/dml/SelectGroups.java b/h2/src/main/org/h2/command/query/SelectGroups.java similarity index 92% rename from h2/src/main/org/h2/command/dml/SelectGroups.java rename to h2/src/main/org/h2/command/query/SelectGroups.java index 2bb7d10f7b..ef5e1572ab 100644 --- a/h2/src/main/org/h2/command/dml/SelectGroups.java +++ b/h2/src/main/org/h2/command/query/SelectGroups.java @@ -1,9 +1,9 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ -package org.h2.command.dml; +package org.h2.command.query; import java.util.ArrayList; import java.util.Arrays; @@ -13,7 +13,7 @@ import java.util.Map.Entry; import java.util.TreeMap; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.analysis.DataAnalysisOperation; import org.h2.expression.analysis.PartitionData; @@ -64,7 +64,7 @@ private static final class Grouped extends SelectGroups { */ private Iterator> cursor; - Grouped(Session session, ArrayList expressions, int[] groupIndex) { + Grouped(SessionLocal session, ArrayList expressions, int[] groupIndex) { super(session, expressions); this.groupIndex = groupIndex; } @@ -80,7 +80,7 @@ public void reset() { @Override public void nextSource() { if (groupIndex == null) { - currentGroupsKey = ValueRow.getEmpty(); + currentGroupsKey = ValueRow.EMPTY; } else { Value[] keyValues = new Value[groupIndex.length]; // update group @@ -114,7 +114,7 @@ void updateCurrentGroupExprData() { public void done() { super.done(); if (groupIndex == null && groupByData.size() == 0) { - groupByData.put(ValueRow.getEmpty(), createRow()); + groupByData.put(ValueRow.EMPTY, createRow()); } cursor = groupByData.entrySet().iterator(); } @@ -153,7 +153,7 @@ private static final class Plain extends SelectGroups { */ private Iterator cursor; - Plain(Session session, ArrayList expressions) { + Plain(SessionLocal session, ArrayList expressions) { super(session, expressions); } @@ -188,7 +188,7 @@ public ValueRow next() { if (cursor.hasNext()) { currentGroupByExprData = cursor.next(); currentGroupRowId++; - return ValueRow.getEmpty(); + return ValueRow.EMPTY; } return null; } @@ -197,7 +197,7 @@ public ValueRow next() { /** * The database session. */ - final Session session; + final SessionLocal session; /** * The query's column list, including invisible expressions such as order by expressions. @@ -243,12 +243,12 @@ public ValueRow next() { * the indexes of group expressions, or null * @return new instance of the grouped data. */ - public static SelectGroups getInstance(Session session, ArrayList expressions, boolean isGroupQuery, - int[] groupIndex) { + public static SelectGroups getInstance(SessionLocal session, ArrayList expressions, + boolean isGroupQuery, int[] groupIndex) { return isGroupQuery ? new Grouped(session, expressions, groupIndex) : new Plain(session, expressions); } - SelectGroups(Session session, ArrayList expressions) { + SelectGroups(SessionLocal session, ArrayList expressions) { this.session = session; this.expressions = expressions; } @@ -430,13 +430,4 @@ public void nextLazyRow() { currentGroupRowId++; } - /** - * Gets the query's column list, including invisible expressions - * such as order by expressions. - * - * @return Expressions. - */ - public ArrayList expressions() { - return expressions; - } } diff --git a/h2/src/main/org/h2/command/dml/SelectListColumnResolver.java b/h2/src/main/org/h2/command/query/SelectListColumnResolver.java similarity index 62% rename from h2/src/main/org/h2/command/dml/SelectListColumnResolver.java rename to h2/src/main/org/h2/command/query/SelectListColumnResolver.java index b531730a93..ec62787f09 100644 --- a/h2/src/main/org/h2/command/dml/SelectListColumnResolver.java +++ b/h2/src/main/org/h2/command/query/SelectListColumnResolver.java @@ -1,17 +1,19 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ -package org.h2.command.dml; +package org.h2.command.query; import java.util.ArrayList; + +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.table.Column; import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; -import org.h2.util.ColumnNamer; +import org.h2.value.TypeInfo; import org.h2.value.Value; /** @@ -19,7 +21,7 @@ * statement. It is used to resolve select column aliases in the HAVING clause. * Example: *

          - * SELECT X/3 AS A, COUNT(*) FROM SYSTEM_RANGE(1, 10) GROUP BY A HAVING A>2; + * SELECT X/3 AS A, COUNT(*) FROM SYSTEM_RANGE(1, 10) GROUP BY A HAVING A > 2; *

          * * @author Thomas Mueller @@ -36,13 +38,10 @@ public class SelectListColumnResolver implements ColumnResolver { columns = new Column[columnCount]; expressions = new Expression[columnCount]; ArrayList columnList = select.getExpressions(); - ColumnNamer columnNamer= new ColumnNamer(select.getSession()); + SessionLocal session = select.getSession(); for (int i = 0; i < columnCount; i++) { Expression expr = columnList.get(i); - String columnName = columnNamer.getColumnName(expr, i, expr.getAlias()); - Column column = new Column(columnName, Value.NULL); - column.setTable(null, i); - columns[i] = column; + columns[i] = new Column(expr.getAlias(session, i), TypeInfo.TYPE_NULL, null, i); expressions[i] = expr.getNonAliasExpression(); } } @@ -53,12 +52,13 @@ public Column[] getColumns() { } @Override - public String getDerivedColumnName(Column column) { - return null; - } - - @Override - public String getSchemaName() { + public Column findColumn(String name) { + Database db = select.getSession().getDatabase(); + for (Column column : columns) { + if (db.equalsIdentifiers(column.getName(), name)) { + return column; + } + } return null; } @@ -67,26 +67,6 @@ public Select getSelect() { return select; } - @Override - public Column[] getSystemColumns() { - return null; - } - - @Override - public Column getRowIdColumn() { - return null; - } - - @Override - public String getTableAlias() { - return null; - } - - @Override - public TableFilter getTableFilter() { - return null; - } - @Override public Value getValue(Column column) { return null; diff --git a/h2/src/main/org/h2/command/dml/SelectUnion.java b/h2/src/main/org/h2/command/query/SelectUnion.java similarity index 68% rename from h2/src/main/org/h2/command/dml/SelectUnion.java rename to h2/src/main/org/h2/command/query/SelectUnion.java index b1d15a3552..a1388eccfe 100644 --- a/h2/src/main/org/h2/command/dml/SelectUnion.java +++ b/h2/src/main/org/h2/command/query/SelectUnion.java @@ -1,22 +1,20 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ -package org.h2.command.dml; +package org.h2.command.query; import java.util.ArrayList; import java.util.HashSet; import org.h2.api.ErrorCode; import org.h2.engine.Database; -import org.h2.engine.Mode; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.expression.ExpressionVisitor; import org.h2.expression.Parameter; -import org.h2.expression.ValueExpression; import org.h2.message.DbException; import org.h2.result.LazyResult; import org.h2.result.LocalResult; @@ -26,10 +24,8 @@ import org.h2.table.ColumnResolver; import org.h2.table.Table; import org.h2.table.TableFilter; -import org.h2.util.ColumnNamer; +import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueInt; -import org.h2.value.ValueNull; /** * Represents a union SELECT statement. @@ -70,10 +66,9 @@ public enum UnionType { */ final Query right; - private boolean isPrepared, checkInit; private boolean isForUpdate; - public SelectUnion(Session session, UnionType unionType, Query query, Query right) { + public SelectUnion(SessionLocal session, UnionType unionType, Query query, Query right) { super(session); this.unionType = unionType; this.left = query; @@ -85,12 +80,6 @@ public boolean isUnion() { return true; } - @Override - public void prepareJoinBatch() { - left.prepareJoinBatch(); - right.prepareJoinBatch(); - } - public UnionType getUnionType() { return unionType; } @@ -103,11 +92,6 @@ public Query getRight() { return right; } - @Override - public void setDistinctIfPossible() { - setDistinct(); - } - private Value[] convert(Value[] values, int columnCount) { Value[] newValues; if (columnCount == values.length) { @@ -118,51 +102,28 @@ private Value[] convert(Value[] values, int columnCount) { // for the value hash set newValues = new Value[columnCount]; } - Mode mode = session.getDatabase().getMode(); for (int i = 0; i < columnCount; i++) { Expression e = expressions.get(i); - newValues[i] = values[i].convertTo(e.getType(), mode, null); + newValues[i] = values[i].convertTo(e.getType(), session); } return newValues; } - @Override - public ResultInterface queryMeta() { - int columnCount = left.getColumnCount(); - LocalResult result = session.getDatabase().getResultFactory().create(session, expressionArray, columnCount); - result.done(); - return result; - } - public LocalResult getEmptyResult() { int columnCount = left.getColumnCount(); - return session.getDatabase().getResultFactory().create(session, expressionArray, columnCount); + return createLocalResult(columnCount); } @Override - protected ResultInterface queryWithoutCache(int maxRows, ResultTarget target) { - if (maxRows != 0) { - // maxRows is set (maxRows 0 means no limit) - int l; - if (limitExpr == null) { - l = -1; - } else { - Value v = limitExpr.getValue(session); - l = v == ValueNull.INSTANCE ? -1 : v.getInt(); - } - if (l < 0) { - // for limitExpr, 0 means no rows, and -1 means no limit - l = maxRows; - } else { - l = Math.min(l, maxRows); - } - limitExpr = ValueExpression.get(ValueInt.get(l)); - } + protected ResultInterface queryWithoutCache(long maxRows, ResultTarget target) { + OffsetFetch offsetFetch = getOffsetFetch(maxRows); + long offset = offsetFetch.offset; + long fetch = offsetFetch.fetch; + boolean fetchPercent = offsetFetch.fetchPercent; Database db = session.getDatabase(); if (db.getSettings().optimizeInsertFromSelect) { if (unionType == UnionType.UNION_ALL && target != null) { - if (sort == null && !distinct && maxRows == 0 && - offsetExpr == null && limitExpr == null) { + if (sort == null && !distinct && fetch < 0 && offset == 0) { left.query(0, target); right.query(0, target); return null; @@ -172,24 +133,17 @@ protected ResultInterface queryWithoutCache(int maxRows, ResultTarget target) { int columnCount = left.getColumnCount(); if (session.isLazyQueryExecution() && unionType == UnionType.UNION_ALL && !distinct && sort == null && !randomAccessResult && !isForUpdate && - offsetExpr == null && !fetchPercent && !withTies && isReadOnly()) { - int limit = -1; - if (limitExpr != null) { - Value v = limitExpr.getValue(session); - if (v != ValueNull.INSTANCE) { - limit = v.getInt(); - } - } + offset == 0 && !fetchPercent && !withTies && isReadOnly()) { // limit 0 means no rows - if (limit != 0) { + if (fetch != 0) { LazyResultUnion lazyResult = new LazyResultUnion(expressionArray, columnCount); - if (limit > 0) { - lazyResult.setLimit(limit); + if (fetch > 0) { + lazyResult.setLimit(fetch); } return lazyResult; } } - LocalResult result = db.getResultFactory().create(session, expressionArray, columnCount); + LocalResult result = createLocalResult(columnCount); if (sort != null) { result.setSortOrder(sort); } @@ -212,7 +166,7 @@ protected ResultInterface queryWithoutCache(int maxRows, ResultTarget target) { right.setDistinctIfPossible(); break; default: - DbException.throwInternalError("type=" + unionType); + throw DbException.getInternalError("type=" + unionType); } ResultInterface l = left.query(0); ResultInterface r = right.query(0); @@ -239,7 +193,7 @@ protected ResultInterface queryWithoutCache(int maxRows, ResultTarget target) { break; } case INTERSECT: { - LocalResult temp = db.getResultFactory().create(session, expressionArray, columnCount); + LocalResult temp = createLocalResult(columnCount); temp.setDistinct(); while (l.next()) { temp.addRow(convert(l.currentRow(), columnCount)); @@ -254,38 +208,21 @@ protected ResultInterface queryWithoutCache(int maxRows, ResultTarget target) { break; } default: - DbException.throwInternalError("type=" + unionType); - } - if (offsetExpr != null) { - result.setOffset(offsetExpr.getValue(session).getInt()); - } - if (limitExpr != null) { - Value v = limitExpr.getValue(session); - if (v != ValueNull.INSTANCE) { - result.setLimit(v.getInt()); - result.setFetchPercent(fetchPercent); - if (withTies) { - result.setWithTies(sort); - } - } + throw DbException.getInternalError("type=" + unionType); } l.close(); r.close(); - result.done(); - if (target != null) { - while (result.next()) { - target.addRow(result.currentRow()); - } - result.close(); - return null; - } - return result; + return finishResult(result, offset, fetch, fetchPercent, target); + } + + private LocalResult createLocalResult(int columnCount) { + return new LocalResult(session, expressionArray, columnCount, columnCount); } @Override public void init() { if (checkInit) { - DbException.throwInternalError(); + throw DbException.getInternalError(); } checkInit = true; left.init(); @@ -302,6 +239,7 @@ public void init() { Expression l = le.get(i); expressions.add(l); } + visibleColumnCount = len; if (withTies && !hasOrder()) { throw DbException.get(ErrorCode.WITH_TIES_WITHOUT_ORDER_BY); } @@ -314,7 +252,7 @@ public void prepare() { return; } if (!checkInit) { - DbException.throwInternalError("not initialized"); + throw DbException.getInternalError("not initialized"); } isPrepared = true; left.prepare(); @@ -324,20 +262,20 @@ public void prepare() { expressions = new ArrayList<>(len); ArrayList le = left.getExpressions(); ArrayList re = right.getExpressions(); - ColumnNamer columnNamer= new ColumnNamer(session); for (int i = 0; i < len; i++) { Expression l = le.get(i); Expression r = re.get(i); - String columnName = columnNamer.getColumnName(l, i, l.getAlias()); - Column col = new Column(columnName, Value.getHigherType(l.getType(), r.getType())); + Column col = new Column(l.getAlias(session, i), TypeInfo.getHigherType(l.getType(), r.getType())); Expression e = new ExpressionColumn(session.getDatabase(), col); expressions.add(e); } if (orderList != null) { - initOrder(session, expressions, null, orderList, getColumnCount(), true, null); - sort = prepareOrder(orderList, expressions.size()); - orderList = null; + if (initOrder(null, true, null)) { + prepareOrder(orderList, expressions.size()); + cleanupOrder(); + } } + resultColumnCount = expressions.size(); expressionArray = expressions.toArray(new Expression[0]); } @@ -360,11 +298,6 @@ public void setForUpdate(boolean forUpdate) { isForUpdate = forUpdate; } - @Override - public int getColumnCount() { - return left.getColumnCount(); - } - @Override public void mapColumns(ColumnResolver resolver, int level) { left.mapColumns(resolver, level); @@ -394,14 +327,14 @@ public void addGlobalCondition(Parameter param, int columnId, break; } default: - DbException.throwInternalError("type=" + unionType); + throw DbException.getInternalError("type=" + unionType); } } @Override - public String getPlanSQL(boolean alwaysQuote) { + public String getPlanSQL(int sqlFlags) { StringBuilder buff = new StringBuilder(); - buff.append('(').append(left.getPlanSQL(alwaysQuote)).append(')'); + buff.append('(').append(left.getPlanSQL(sqlFlags)).append(')'); switch (unionType) { case UNION_ALL: buff.append("\nUNION ALL\n"); @@ -416,18 +349,10 @@ public String getPlanSQL(boolean alwaysQuote) { buff.append("\nEXCEPT\n"); break; default: - DbException.throwInternalError("type=" + unionType); - } - buff.append('(').append(right.getPlanSQL(alwaysQuote)).append(')'); - Expression[] exprList = expressions.toArray(new Expression[0]); - if (sort != null) { - buff.append("\nORDER BY ").append(sort.getSQL(exprList, exprList.length, alwaysQuote)); - } - appendLimitToSQL(buff, alwaysQuote); - if (sampleSizeExpr != null) { - buff.append("\nSAMPLE_SIZE "); - sampleSizeExpr.getUnenclosedSQL(buff, alwaysQuote); + throw DbException.getInternalError("type=" + unionType); } + buff.append('(').append(right.getPlanSQL(sqlFlags)).append(')'); + appendEndOfQueryToSQL(buff, sqlFlags, expressions.toArray(new Expression[0])); if (isForUpdate) { buff.append("\nFOR UPDATE"); } @@ -440,12 +365,7 @@ public boolean isEverything(ExpressionVisitor visitor) { } @Override - public boolean isReadOnly() { - return left.isReadOnly() && right.isReadOnly(); - } - - @Override - public void updateAggregate(Session s, int stage) { + public void updateAggregate(SessionLocal s, int stage) { left.updateAggregate(s, stage); right.updateAggregate(s, stage); } @@ -461,6 +381,11 @@ public boolean allowGlobalConditions() { return left.allowGlobalConditions() && right.allowGlobalConditions(); } + @Override + public boolean isConstantQuery() { + return super.isConstantQuery() && left.isConstantQuery() && right.isConstantQuery(); + } + /** * Lazy execution for this union. */ @@ -473,7 +398,7 @@ private final class LazyResultUnion extends LazyResult { boolean rightDone; LazyResultUnion(Expression[] expressions, int columnCount) { - super(expressions); + super(getSession(), expressions); this.columnCount = columnCount; } diff --git a/h2/src/main/org/h2/command/query/TableValueConstructor.java b/h2/src/main/org/h2/command/query/TableValueConstructor.java new file mode 100644 index 0000000000..82d171fa3c --- /dev/null +++ b/h2/src/main/org/h2/command/query/TableValueConstructor.java @@ -0,0 +1,400 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.query; + +import static org.h2.expression.Expression.WITHOUT_PARENTHESES; +import static org.h2.util.HasSQL.DEFAULT_SQL_FLAGS; + +import java.util.ArrayList; +import java.util.HashSet; + +import org.h2.api.ErrorCode; +import org.h2.engine.Constants; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionList; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Parameter; +import org.h2.message.DbException; +import org.h2.result.LocalResult; +import org.h2.result.ResultInterface; +import org.h2.result.ResultTarget; +import org.h2.table.Column; +import org.h2.table.ColumnResolver; +import org.h2.table.Table; +import org.h2.table.TableFilter; +import org.h2.table.TableValueConstructorTable; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * Table value constructor. + */ +public class TableValueConstructor extends Query { + + private final ArrayList> rows; + + /** + * The table. + */ + TableValueConstructorTable table; + + private TableValueColumnResolver columnResolver; + + private double cost; + + /** + * Creates new instance of table value constructor. + * + * @param session + * the session + * @param rows + * the rows + */ + public TableValueConstructor(SessionLocal session, ArrayList> rows) { + super(session); + this.rows = rows; + if ((visibleColumnCount = rows.get(0).size()) > Constants.MAX_COLUMNS) { + throw DbException.get(ErrorCode.TOO_MANY_COLUMNS_1, "" + Constants.MAX_COLUMNS); + } + for (ArrayList row : rows) { + for (Expression column : row) { + if (!column.isConstant()) { + return; + } + } + } + createTable(); + } + + /** + * Appends visible columns of all rows to the specified result. + * + * @param session + * the session + * @param result + * the result + * @param columns + * the columns + * @param rows + * the rows with data + */ + public static void getVisibleResult(SessionLocal session, ResultTarget result, Column[] columns, + ArrayList> rows) { + int count = columns.length; + for (ArrayList row : rows) { + Value[] values = new Value[count]; + for (int i = 0; i < count; i++) { + values[i] = row.get(i).getValue(session).convertTo(columns[i].getType(), session); + } + result.addRow(values); + } + } + + /** + * Appends the SQL of the values to the specified string builder.. + * + * @param builder + * string builder + * @param sqlFlags + * formatting flags + * @param rows + * the values + */ + public static void getValuesSQL(StringBuilder builder, int sqlFlags, ArrayList> rows) { + builder.append("VALUES "); + int rowCount = rows.size(); + for (int i = 0; i < rowCount; i++) { + if (i > 0) { + builder.append(", "); + } + Expression.writeExpressions(builder.append('('), rows.get(i), sqlFlags).append(')'); + } + } + + @Override + public boolean isUnion() { + return false; + } + + @Override + protected ResultInterface queryWithoutCache(long limit, ResultTarget target) { + OffsetFetch offsetFetch = getOffsetFetch(limit); + long offset = offsetFetch.offset; + long fetch = offsetFetch.fetch; + boolean fetchPercent = offsetFetch.fetchPercent; + int visibleColumnCount = this.visibleColumnCount, resultColumnCount = this.resultColumnCount; + LocalResult result = new LocalResult(session, expressionArray, visibleColumnCount, resultColumnCount); + if (sort != null) { + result.setSortOrder(sort); + } + if (distinct) { + result.setDistinct(); + } + Column[] columns = table.getColumns(); + if (visibleColumnCount == resultColumnCount) { + getVisibleResult(session, result, columns, rows); + } else { + for (ArrayList row : rows) { + Value[] values = new Value[resultColumnCount]; + for (int i = 0; i < visibleColumnCount; i++) { + values[i] = row.get(i).getValue(session).convertTo(columns[i].getType(), session); + } + columnResolver.currentRow = values; + for (int i = visibleColumnCount; i < resultColumnCount; i++) { + values[i] = expressionArray[i].getValue(session); + } + result.addRow(values); + } + columnResolver.currentRow = null; + } + return finishResult(result, offset, fetch, fetchPercent, target); + } + + @Override + public void init() { + if (checkInit) { + throw DbException.getInternalError(); + } + checkInit = true; + if (withTies && !hasOrder()) { + throw DbException.get(ErrorCode.WITH_TIES_WITHOUT_ORDER_BY); + } + } + + @Override + public void prepare() { + if (isPrepared) { + // sometimes a subquery is prepared twice (CREATE TABLE AS SELECT) + return; + } + if (!checkInit) { + throw DbException.getInternalError("not initialized"); + } + isPrepared = true; + if (columnResolver == null) { + createTable(); + } + if (orderList != null) { + ArrayList expressionsSQL = new ArrayList<>(); + for (Expression e : expressions) { + expressionsSQL.add(e.getSQL(DEFAULT_SQL_FLAGS, WITHOUT_PARENTHESES)); + } + if (initOrder(expressionsSQL, false, null)) { + prepareOrder(orderList, expressions.size()); + } + } + resultColumnCount = expressions.size(); + for (int i = 0; i < resultColumnCount; i++) { + expressions.get(i).mapColumns(columnResolver, 0, Expression.MAP_INITIAL); + } + for (int i = visibleColumnCount; i < resultColumnCount; i++) { + expressions.set(i, expressions.get(i).optimize(session)); + } + if (sort != null) { + cleanupOrder(); + } + expressionArray = expressions.toArray(new Expression[0]); + double cost = 0; + int columnCount = visibleColumnCount; + for (ArrayList r : rows) { + for (int i = 0; i < columnCount; i++) { + cost += r.get(i).getCost(); + } + } + this.cost = cost + rows.size(); + } + + private void createTable() { + int rowCount = rows.size(); + ArrayList row = rows.get(0); + int columnCount = row.size(); + TypeInfo[] types = new TypeInfo[columnCount]; + for (int c = 0; c < columnCount; c++) { + Expression e = row.get(c).optimize(session); + row.set(c, e); + TypeInfo type = e.getType(); + if (type.getValueType() == Value.UNKNOWN) { + type = TypeInfo.TYPE_VARCHAR; + } + types[c] = type; + } + for (int r = 1; r < rowCount; r++) { + row = rows.get(r); + for (int c = 0; c < columnCount; c++) { + Expression e = row.get(c).optimize(session); + row.set(c, e); + types[c] = TypeInfo.getHigherType(types[c], e.getType()); + } + } + Column[] columns = new Column[columnCount]; + for (int c = 0; c < columnCount;) { + TypeInfo type = types[c]; + columns[c] = new Column("C" + ++c, type); + } + Database database = session.getDatabase(); + ArrayList expressions = new ArrayList<>(columnCount); + for (int i = 0; i < columnCount; i++) { + expressions.add(new ExpressionColumn(database, null, null, columns[i].getName())); + } + this.expressions = expressions; + table = new TableValueConstructorTable(session.getDatabase().getMainSchema(), session, columns, rows); + columnResolver = new TableValueColumnResolver(); + } + + @Override + public double getCost() { + return cost; + } + + @Override + public HashSet
          getTables() { + HashSet
          tables = new HashSet<>(1, 1f); + tables.add(table); + return tables; + } + + @Override + public void setForUpdate(boolean forUpdate) { + throw DbException.get(ErrorCode.RESULT_SET_READONLY); + } + + @Override + public void mapColumns(ColumnResolver resolver, int level) { + int columnCount = visibleColumnCount; + for (ArrayList row : rows) { + for (int i = 0; i < columnCount; i++) { + row.get(i).mapColumns(resolver, level, Expression.MAP_INITIAL); + } + } + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + int columnCount = visibleColumnCount; + for (ArrayList row : rows) { + for (int i = 0; i < columnCount; i++) { + row.get(i).setEvaluatable(tableFilter, b); + } + } + } + + @Override + public void addGlobalCondition(Parameter param, int columnId, int comparisonType) { + // Can't add + } + + @Override + public boolean allowGlobalConditions() { + return false; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + ExpressionVisitor v2 = visitor.incrementQueryLevel(1); + for (Expression e : expressionArray) { + if (!e.isEverything(v2)) { + return false; + } + } + return true; + } + + @Override + public void updateAggregate(SessionLocal s, int stage) { + int columnCount = visibleColumnCount; + for (ArrayList row : rows) { + for (int i = 0; i < columnCount; i++) { + row.get(i).updateAggregate(s, stage); + } + } + } + + @Override + public void fireBeforeSelectTriggers() { + // Nothing to do + } + + @Override + public String getPlanSQL(int sqlFlags) { + StringBuilder builder = new StringBuilder(); + getValuesSQL(builder, sqlFlags, rows); + appendEndOfQueryToSQL(builder, sqlFlags, expressionArray); + return builder.toString(); + } + + @Override + public Table toTable(String alias, Column[] columnTemplates, ArrayList parameters, + boolean forCreateView, Query topQuery) { + if (!hasOrder() && offsetExpr == null && fetchExpr == null && table != null) { + return table; + } + return super.toTable(alias, columnTemplates, parameters, forCreateView, topQuery); + } + + @Override + public boolean isConstantQuery() { + if (!super.isConstantQuery()) { + return false; + } + for (ArrayList row : rows) { + for (int i = 0; i < visibleColumnCount; i++) { + if (!row.get(i).isConstant()) { + return false; + } + } + } + return true; + } + + @Override + public Expression getIfSingleRow() { + if (offsetExpr != null || fetchExpr != null || rows.size() != 1) { + return null; + } + ArrayList row = rows.get(0); + if (visibleColumnCount == 1) { + return row.get(0); + } + Expression[] array = new Expression[visibleColumnCount]; + for (int i = 0; i < visibleColumnCount; i++) { + array[i] = row.get(i); + } + return new ExpressionList(array, false); + } + + private final class TableValueColumnResolver implements ColumnResolver { + + Value[] currentRow; + + TableValueColumnResolver() { + } + + @Override + public Column[] getColumns() { + return table.getColumns(); + } + + @Override + public Column findColumn(String name) { + return table.findColumn(name); + } + + @Override + public Value getValue(Column column) { + return currentRow[column.getColumnId()]; + } + + @Override + public Expression optimize(ExpressionColumn expressionColumn, Column column) { + return expressions.get(column.getColumnId()); + } + + } + +} diff --git a/h2/src/tools/org/h2/build/i18n/package.html b/h2/src/main/org/h2/command/query/package.html similarity index 75% rename from h2/src/tools/org/h2/build/i18n/package.html rename to h2/src/main/org/h2/command/query/package.html index 33ba9ff2df..80f0d16539 100644 --- a/h2/src/tools/org/h2/build/i18n/package.html +++ b/h2/src/main/org/h2/command/query/package.html @@ -1,7 +1,7 @@ @@ -9,6 +9,6 @@ Javadoc package documentation

          -Internationalization tools. +Contains queries.

          \ No newline at end of file diff --git a/h2/src/main/org/h2/compress/CompressDeflate.java b/h2/src/main/org/h2/compress/CompressDeflate.java index 7d3cf3165f..0a1f722a05 100644 --- a/h2/src/main/org/h2/compress/CompressDeflate.java +++ b/h2/src/main/org/h2/compress/CompressDeflate.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.compress; @@ -11,7 +11,7 @@ import java.util.zip.Inflater; import org.h2.api.ErrorCode; -import org.h2.message.DbException; +import org.h2.mvstore.DataUtils; /** * This is a wrapper class for the Deflater class. @@ -47,24 +47,24 @@ public void setOptions(String options) { deflater.setStrategy(strategy); } } catch (Exception e) { - throw DbException.get(ErrorCode.UNSUPPORTED_COMPRESSION_OPTIONS_1, options); + throw DataUtils.newMVStoreException(ErrorCode.UNSUPPORTED_COMPRESSION_OPTIONS_1, options); } } @Override - public int compress(byte[] in, int inLen, byte[] out, int outPos) { + public int compress(byte[] in, int inPos, int inLen, byte[] out, int outPos) { Deflater deflater = new Deflater(level); deflater.setStrategy(strategy); - deflater.setInput(in, 0, inLen); + deflater.setInput(in, inPos, inLen); deflater.finish(); int compressed = deflater.deflate(out, outPos, out.length - outPos); - while (compressed == 0) { + if (compressed == 0) { // the compressed length is 0, meaning compression didn't work // (sounds like a JDK bug) // try again, using the default strategy and compression level strategy = Deflater.DEFAULT_STRATEGY; level = Deflater.DEFAULT_COMPRESSION; - return compress(in, inLen, out, outPos); + return compress(in, inPos, inLen, out, outPos); } deflater.end(); return outPos + compressed; @@ -87,7 +87,7 @@ public void expand(byte[] in, int inPos, int inLen, byte[] out, int outPos, throw new DataFormatException(len + " " + outLen); } } catch (DataFormatException e) { - throw DbException.get(ErrorCode.COMPRESSION_ERROR, e); + throw DataUtils.newMVStoreException(ErrorCode.COMPRESSION_ERROR, e.getMessage(), e); } decompresser.end(); } diff --git a/h2/src/main/org/h2/compress/CompressLZF.java b/h2/src/main/org/h2/compress/CompressLZF.java index ba145a7414..952a4e53b8 100644 --- a/h2/src/main/org/h2/compress/CompressLZF.java +++ b/h2/src/main/org/h2/compress/CompressLZF.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * * This code is based on the LZF algorithm from Marc Lehmann. It is a * re-implementation of the C code: @@ -155,15 +155,16 @@ private static int hash(int h) { } @Override - public int compress(byte[] in, int inLen, byte[] out, int outPos) { - int inPos = 0; + public int compress(byte[] in, int inPos, int inLen, byte[] out, int outPos) { + int offset = inPos; + inLen += inPos; if (cachedHashTable == null) { cachedHashTable = new int[HASH_SIZE]; } int[] hashTab = cachedHashTable; int literals = 0; outPos++; - int future = first(in, 0); + int future = first(in, inPos); while (inPos < inLen - 4) { byte p2 = in[inPos + 2]; // next @@ -178,7 +179,7 @@ public int compress(byte[] in, int inLen, byte[] out, int outPos) { // && (((in[ref] & 255) << 8) | (in[ref + 1] & 255)) == // ((future >> 8) & 0xffff)) { if (ref < inPos - && ref > 0 + && ref > offset && (off = inPos - ref - 1) < MAX_OFF && in[ref + 2] == p2 && in[ref + 1] == (byte) (future >> 8) @@ -265,14 +266,15 @@ public int compress(byte[] in, int inLen, byte[] out, int outPos) { * @return the end position */ public int compress(ByteBuffer in, int inPos, byte[] out, int outPos) { - int inLen = in.capacity() - inPos; + int offset = inPos; + int inLen = in.capacity(); if (cachedHashTable == null) { cachedHashTable = new int[HASH_SIZE]; } int[] hashTab = cachedHashTable; int literals = 0; outPos++; - int future = first(in, 0); + int future = first(in, inPos); while (inPos < inLen - 4) { byte p2 = in.get(inPos + 2); // next @@ -287,7 +289,7 @@ public int compress(ByteBuffer in, int inPos, byte[] out, int outPos) { // && (((in[ref] & 255) << 8) | (in[ref + 1] & 255)) == // ((future >> 8) & 0xffff)) { if (ref < inPos - && ref > 0 + && ref > offset && (off = inPos - ref - 1) < MAX_OFF && in.get(ref + 2) == p2 && in.get(ref + 1) == (byte) (future >> 8) diff --git a/h2/src/main/org/h2/compress/CompressNo.java b/h2/src/main/org/h2/compress/CompressNo.java index 7fe0291465..df7c1fb4f9 100644 --- a/h2/src/main/org/h2/compress/CompressNo.java +++ b/h2/src/main/org/h2/compress/CompressNo.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.compress; @@ -23,8 +23,8 @@ public void setOptions(String options) { } @Override - public int compress(byte[] in, int inLen, byte[] out, int outPos) { - System.arraycopy(in, 0, out, outPos, inLen); + public int compress(byte[] in, int inPos, int inLen, byte[] out, int outPos) { + System.arraycopy(in, inPos, out, outPos, inLen); return outPos + inLen; } diff --git a/h2/src/main/org/h2/compress/Compressor.java b/h2/src/main/org/h2/compress/Compressor.java index 735ca104b9..4970ff0b57 100644 --- a/h2/src/main/org/h2/compress/Compressor.java +++ b/h2/src/main/org/h2/compress/Compressor.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.compress; @@ -37,12 +37,13 @@ public interface Compressor { * Compress a number of bytes. * * @param in the input data + * @param inPos the offset at the input array * @param inLen the number of bytes to compress * @param out the output area * @param outPos the offset at the output array * @return the end position */ - int compress(byte[] in, int inLen, byte[] out, int outPos); + int compress(byte[] in, int inPos, int inLen, byte[] out, int outPos); /** * Expand a number of compressed bytes. diff --git a/h2/src/main/org/h2/compress/LZFInputStream.java b/h2/src/main/org/h2/compress/LZFInputStream.java index 6f8ad817ea..5586841b86 100644 --- a/h2/src/main/org/h2/compress/LZFInputStream.java +++ b/h2/src/main/org/h2/compress/LZFInputStream.java @@ -1,13 +1,13 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.compress; import java.io.IOException; import java.io.InputStream; -import org.h2.message.DbException; +import org.h2.mvstore.DataUtils; import org.h2.util.Utils; /** @@ -55,7 +55,7 @@ private void fillBuffer() throws IOException { try { decompress.expand(inBuffer, 0, len, buffer, 0, size); } catch (ArrayIndexOutOfBoundsException e) { - DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } this.bufferLength = size; } diff --git a/h2/src/main/org/h2/compress/LZFOutputStream.java b/h2/src/main/org/h2/compress/LZFOutputStream.java index 0db73e6fd1..e2b7aa2a04 100644 --- a/h2/src/main/org/h2/compress/LZFOutputStream.java +++ b/h2/src/main/org/h2/compress/LZFOutputStream.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.compress; @@ -54,7 +54,7 @@ public void write(int b) throws IOException { private void compressAndWrite(byte[] buff, int len) throws IOException { if (len > 0) { ensureOutput(len); - int compressed = compress.compress(buff, len, outBuffer, 0); + int compressed = compress.compress(buff, 0, len, outBuffer, 0); if (compressed > len) { writeInt(-len); out.write(buff, 0, len); diff --git a/h2/src/main/org/h2/compress/package.html b/h2/src/main/org/h2/compress/package.html index be064c3f52..3c1c6d9b1f 100644 --- a/h2/src/main/org/h2/compress/package.html +++ b/h2/src/main/org/h2/compress/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/constraint/Constraint.java b/h2/src/main/org/h2/constraint/Constraint.java index c220312c65..762b267643 100644 --- a/h2/src/main/org/h2/constraint/Constraint.java +++ b/h2/src/main/org/h2/constraint/Constraint.java @@ -1,27 +1,27 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.constraint; import java.util.HashSet; import org.h2.engine.DbObject; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; import org.h2.expression.ExpressionVisitor; import org.h2.index.Index; import org.h2.message.Trace; import org.h2.result.Row; import org.h2.schema.Schema; -import org.h2.schema.SchemaObjectBase; +import org.h2.schema.SchemaObject; import org.h2.table.Column; import org.h2.table.Table; /** * The base class for constraint checking. */ -public abstract class Constraint extends SchemaObjectBase implements - Comparable { +public abstract class Constraint extends SchemaObject implements Comparable { public enum Type { /** @@ -39,7 +39,11 @@ public enum Type { /** * The constraint type for referential constraints. */ - REFERENTIAL; + REFERENTIAL, + /** + * The constraint type for domain constraints. + */ + DOMAIN; /** * Get standard SQL type name. @@ -66,7 +70,9 @@ public String getSqlName() { Constraint(Schema schema, int id, String name, Table table) { super(schema, id, name, Trace.CONSTRAINT); this.table = table; - this.setTemporary(table.isTemporary()); + if (table != null) { + this.setTemporary(table.isTemporary()); + } } /** @@ -85,7 +91,7 @@ public String getSqlName() { * @param oldRow the old row * @param newRow the new row */ - public abstract void checkRow(Session session, Table t, Row oldRow, Row newRow); + public abstract void checkRow(SessionLocal session, Table t, Row oldRow, Row newRow); /** * Check if this constraint needs the specified index. @@ -110,6 +116,15 @@ public String getSqlName() { */ public abstract HashSet getReferencedColumns(Table table); + /** + * Returns the CHECK expression or null. + * + * @return the CHECK expression or null. + */ + public Expression getExpression() { + return null; + } + /** * Get the SQL statement to create this constraint. * @@ -130,7 +145,7 @@ public String getSqlName() { * * @param session the session */ - public abstract void checkExistingData(Session session); + public abstract void checkExistingData(SessionLocal session); /** * This method is called after a related table has changed @@ -139,16 +154,22 @@ public String getSqlName() { public abstract void rebuild(); /** - * Get the unique index used to enforce this constraint, or null if no index + * Get the index of this constraint in the source table, or null if no index * is used. * * @return the index */ - public abstract Index getUniqueIndex(); + public Index getIndex() { + return null; + } - @Override - public void checkRename() { - // ok + /** + * Returns the referenced unique constraint, or null. + * + * @return the referenced unique constraint, or null + */ + public ConstraintUnique getReferencedConstraint() { + return null; } @Override @@ -164,11 +185,6 @@ public Table getRefTable() { return table; } - @Override - public String getDropSQL() { - return null; - } - @Override public int compareTo(Constraint other) { if (this == other) { @@ -179,7 +195,7 @@ public int compareTo(Constraint other) { @Override public boolean isHidden() { - return table.isHidden(); + return table != null && table.isHidden(); } /** diff --git a/h2/src/main/org/h2/constraint/ConstraintActionType.java b/h2/src/main/org/h2/constraint/ConstraintActionType.java index ba9aab66fb..b5e3b8fc6c 100644 --- a/h2/src/main/org/h2/constraint/ConstraintActionType.java +++ b/h2/src/main/org/h2/constraint/ConstraintActionType.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.constraint; diff --git a/h2/src/main/org/h2/constraint/ConstraintCheck.java b/h2/src/main/org/h2/constraint/ConstraintCheck.java index 888f13b983..a453b23705 100644 --- a/h2/src/main/org/h2/constraint/ConstraintCheck.java +++ b/h2/src/main/org/h2/constraint/ConstraintCheck.java @@ -1,13 +1,13 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.constraint; import java.util.HashSet; import org.h2.api.ErrorCode; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionVisitor; import org.h2.index.Index; @@ -20,7 +20,6 @@ import org.h2.table.TableFilter; import org.h2.util.StringUtils; import org.h2.value.Value; -import org.h2.value.ValueNull; /** * A check constraint. @@ -50,7 +49,7 @@ public void setExpression(Expression expr) { @Override public String getCreateSQLForCopy(Table forTable, String quotedName) { StringBuilder buff = new StringBuilder("ALTER TABLE "); - forTable.getSQL(buff, true).append(" ADD CONSTRAINT "); + forTable.getSQL(buff, DEFAULT_SQL_FLAGS).append(" ADD CONSTRAINT "); if (forTable.isHidden()) { buff.append("IF NOT EXISTS "); } @@ -59,14 +58,14 @@ public String getCreateSQLForCopy(Table forTable, String quotedName) { buff.append(" COMMENT "); StringUtils.quoteStringSQL(buff, comment); } - buff.append(" CHECK("); - expr.getUnenclosedSQL(buff, true).append(") NOCHECK"); + buff.append(" CHECK"); + expr.getEnclosedSQL(buff, DEFAULT_SQL_FLAGS).append(" NOCHECK"); return buff.toString(); } private String getShortDescription() { StringBuilder builder = new StringBuilder().append(getName()).append(": "); - expr.getSQL(builder, false); + expr.getTraceSQL(); return builder.toString(); } @@ -77,11 +76,11 @@ public String getCreateSQLWithoutIndexes() { @Override public String getCreateSQL() { - return getCreateSQLForCopy(table, getSQL(true)); + return getCreateSQLForCopy(table, getSQL(DEFAULT_SQL_FLAGS)); } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { table.removeConstraint(this); database.removeMeta(session, getId()); filter = null; @@ -91,23 +90,24 @@ public void removeChildrenAndResources(Session session) { } @Override - public void checkRow(Session session, Table t, Row oldRow, Row newRow) { + public void checkRow(SessionLocal session, Table t, Row oldRow, Row newRow) { if (newRow == null) { return; } - filter.set(newRow); boolean b; try { - Value v = expr.getValue(session); + Value v; + synchronized (this) { + filter.set(newRow); + v = expr.getValue(session); + } // Both TRUE and NULL are ok - b = v == ValueNull.INSTANCE || v.getBoolean(); + b = v.isFalse(); } catch (DbException ex) { - throw DbException.get(ErrorCode.CHECK_CONSTRAINT_INVALID, ex, - getShortDescription()); + throw DbException.get(ErrorCode.CHECK_CONSTRAINT_INVALID, ex, getShortDescription()); } - if (!b) { - throw DbException.get(ErrorCode.CHECK_CONSTRAINT_VIOLATED_1, - getShortDescription()); + if (b) { + throw DbException.get(ErrorCode.CHECK_CONSTRAINT_VIOLATED_1, getShortDescription()); } } @@ -118,7 +118,7 @@ public boolean usesIndex(Index index) { @Override public void setIndexOwner(Index index) { - DbException.throwInternalError(toString()); + throw DbException.getInternalError(toString()); } @Override @@ -128,6 +128,7 @@ public HashSet getReferencedColumns(Table table) { return columns; } + @Override public Expression getExpression() { return expr; } @@ -138,14 +139,14 @@ public boolean isBefore() { } @Override - public void checkExistingData(Session session) { + public void checkExistingData(SessionLocal session) { if (session.getDatabase().isStarting()) { // don't check at startup return; } - StringBuilder builder = new StringBuilder().append("SELECT 1 FROM "); - filter.getTable().getSQL(builder, true).append(" WHERE NOT("); - expr.getSQL(builder, true).append(')'); + StringBuilder builder = new StringBuilder().append("SELECT NULL FROM "); + filter.getTable().getSQL(builder, DEFAULT_SQL_FLAGS).append(" WHERE NOT "); + expr.getSQL(builder, DEFAULT_SQL_FLAGS, Expression.AUTO_PARENTHESES); String sql = builder.toString(); ResultInterface r = session.prepare(sql).query(1); if (r.next()) { @@ -153,11 +154,6 @@ public void checkExistingData(Session session) { } } - @Override - public Index getUniqueIndex() { - return null; - } - @Override public void rebuild() { // nothing to do diff --git a/h2/src/main/org/h2/constraint/ConstraintDomain.java b/h2/src/main/org/h2/constraint/ConstraintDomain.java new file mode 100644 index 0000000000..c866c808bb --- /dev/null +++ b/h2/src/main/org/h2/constraint/ConstraintDomain.java @@ -0,0 +1,240 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.constraint; + +import java.util.HashSet; + +import org.h2.api.ErrorCode; +import org.h2.command.Parser; +import org.h2.command.ddl.AlterDomain; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.index.Index; +import org.h2.message.DbException; +import org.h2.result.Row; +import org.h2.schema.Domain; +import org.h2.schema.Schema; +import org.h2.table.Column; +import org.h2.table.PlanItem; +import org.h2.table.Table; +import org.h2.table.TableFilter; +import org.h2.util.StringUtils; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * A domain constraint. + */ +public class ConstraintDomain extends Constraint { + + private Domain domain; + + private Expression expr; + + private DomainColumnResolver resolver; + + public ConstraintDomain(Schema schema, int id, String name, Domain domain) { + super(schema, id, name, null); + this.domain = domain; + resolver = new DomainColumnResolver(domain.getDataType()); + } + + @Override + public Type getConstraintType() { + return Constraint.Type.DOMAIN; + } + + /** + * Returns the domain of this constraint. + * + * @return the domain + */ + public Domain getDomain() { + return domain; + } + + /** + * Set the expression. + * + * @param session the session + * @param expr the expression + */ + public void setExpression(SessionLocal session, Expression expr) { + expr.mapColumns(resolver, 0, Expression.MAP_INITIAL); + expr = expr.optimize(session); + // check if the column is mapped + synchronized (this) { + resolver.setValue(ValueNull.INSTANCE); + expr.getValue(session); + } + this.expr = expr; + } + + @Override + public String getCreateSQLForCopy(Table forTable, String quotedName) { + throw DbException.getInternalError(toString()); + } + + @Override + public String getCreateSQLWithoutIndexes() { + return getCreateSQL(); + } + + @Override + public String getCreateSQL() { + StringBuilder builder = new StringBuilder("ALTER DOMAIN "); + domain.getSQL(builder, DEFAULT_SQL_FLAGS).append(" ADD CONSTRAINT "); + getSQL(builder, DEFAULT_SQL_FLAGS); + if (comment != null) { + builder.append(" COMMENT "); + StringUtils.quoteStringSQL(builder, comment); + } + builder.append(" CHECK"); + expr.getEnclosedSQL(builder, DEFAULT_SQL_FLAGS).append(" NOCHECK"); + return builder.toString(); + } + + @Override + public void removeChildrenAndResources(SessionLocal session) { + domain.removeConstraint(this); + database.removeMeta(session, getId()); + domain = null; + expr = null; + invalidate(); + } + + @Override + public void checkRow(SessionLocal session, Table t, Row oldRow, Row newRow) { + throw DbException.getInternalError(toString()); + } + + /** + * Check the specified value. + * + * @param session + * the session + * @param value + * the value to check + */ + public void check(SessionLocal session, Value value) { + Value v; + synchronized (this) { + resolver.setValue(value); + v = expr.getValue(session); + } + // Both TRUE and NULL are OK + if (v.isFalse()) { + throw DbException.get(ErrorCode.CHECK_CONSTRAINT_VIOLATED_1, expr.getTraceSQL()); + } + } + + /** + * Get the check constraint expression for this column. + * + * @param session the session + * @param columnName the column name + * @return the expression + */ + public Expression getCheckConstraint(SessionLocal session, String columnName) { + String sql; + if (columnName != null) { + synchronized (this) { + try { + resolver.setColumnName(columnName); + sql = expr.getSQL(DEFAULT_SQL_FLAGS); + } finally { + resolver.resetColumnName(); + } + } + return new Parser(session).parseExpression(sql); + } else { + synchronized (this) { + sql = expr.getSQL(DEFAULT_SQL_FLAGS); + } + return new Parser(session).parseDomainConstraintExpression(sql); + } + } + + @Override + public boolean usesIndex(Index index) { + return false; + } + + @Override + public void setIndexOwner(Index index) { + throw DbException.getInternalError(toString()); + } + + @Override + public HashSet getReferencedColumns(Table table) { + HashSet columns = new HashSet<>(); + expr.isEverything(ExpressionVisitor.getColumnsVisitor(columns, table)); + return columns; + } + + @Override + public Expression getExpression() { + return expr; + } + + @Override + public boolean isBefore() { + return true; + } + + @Override + public void checkExistingData(SessionLocal session) { + if (session.getDatabase().isStarting()) { + // don't check at startup + return; + } + new CheckExistingData(session, domain); + } + + @Override + public void rebuild() { + // nothing to do + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return expr.isEverything(visitor); + } + + private class CheckExistingData { + + private final SessionLocal session; + + CheckExistingData(SessionLocal session, Domain domain) { + this.session = session; + checkDomain(null, domain); + } + + private boolean checkColumn(Domain domain, Column targetColumn) { + Table table = targetColumn.getTable(); + TableFilter filter = new TableFilter(session, table, null, true, null, 0, null); + TableFilter[] filters = { filter }; + PlanItem item = filter.getBestPlanItem(session, filters, 0, new AllColumnsForPlan(filters)); + filter.setPlanItem(item); + filter.prepare(); + filter.startQuery(session); + filter.reset(); + while (filter.next()) { + check(session, filter.getValue(targetColumn)); + } + return false; + } + + private boolean checkDomain(Domain domain, Domain targetDomain) { + AlterDomain.forAllDependencies(session, targetDomain, this::checkColumn, this::checkDomain, false); + return false; + } + + } + +} diff --git a/h2/src/main/org/h2/constraint/ConstraintReferential.java b/h2/src/main/org/h2/constraint/ConstraintReferential.java index e48ad4c145..7bdde5c130 100644 --- a/h2/src/main/org/h2/constraint/ConstraintReferential.java +++ b/h2/src/main/org/h2/constraint/ConstraintReferential.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.constraint; @@ -9,7 +9,7 @@ import java.util.HashSet; import org.h2.api.ErrorCode; import org.h2.command.Prepared; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.Parameter; import org.h2.index.Cursor; @@ -37,9 +37,8 @@ public class ConstraintReferential extends Constraint { private ConstraintActionType updateAction = ConstraintActionType.RESTRICT; private Table refTable; private Index index; - private Index refIndex; + private ConstraintUnique refConstraint; private boolean indexOwner; - private boolean refIndexOwner; private String deleteSQL, updateSQL; private boolean skipOwnTable; @@ -78,7 +77,7 @@ public String getCreateSQLForCopy(Table forTable, String quotedName) { public String getCreateSQLForCopy(Table forTable, Table forRefTable, String quotedName, boolean internalIndex) { StringBuilder builder = new StringBuilder("ALTER TABLE "); - forTable.getSQL(builder, true).append(" ADD CONSTRAINT "); + forTable.getSQL(builder, DEFAULT_SQL_FLAGS).append(" ADD CONSTRAINT "); if (forTable.isHidden()) { builder.append("IF NOT EXISTS "); } @@ -90,26 +89,22 @@ public String getCreateSQLForCopy(Table forTable, Table forRefTable, IndexColumn[] cols = columns; IndexColumn[] refCols = refColumns; builder.append(" FOREIGN KEY("); - IndexColumn.writeColumns(builder, cols, true); + IndexColumn.writeColumns(builder, cols, DEFAULT_SQL_FLAGS); builder.append(')'); if (internalIndex && indexOwner && forTable == this.table) { builder.append(" INDEX "); - index.getSQL(builder, true); + index.getSQL(builder, DEFAULT_SQL_FLAGS); } builder.append(" REFERENCES "); if (this.table == this.refTable) { // self-referencing constraints: need to use new table - forTable.getSQL(builder, true); + forTable.getSQL(builder, DEFAULT_SQL_FLAGS); } else { - forRefTable.getSQL(builder, true); + forRefTable.getSQL(builder, DEFAULT_SQL_FLAGS); } builder.append('('); - IndexColumn.writeColumns(builder, refCols, true); + IndexColumn.writeColumns(builder, refCols, DEFAULT_SQL_FLAGS); builder.append(')'); - if (internalIndex && refIndexOwner && forTable == this.table) { - builder.append(" INDEX "); - refIndex.getSQL(builder, true); - } if (deleteAction != ConstraintActionType.RESTRICT) { builder.append(" ON DELETE ").append(deleteAction.getSqlName()); } @@ -130,11 +125,11 @@ public String getCreateSQLForCopy(Table forTable, Table forRefTable, */ private String getShortDescription(Index searchIndex, SearchRow check) { StringBuilder builder = new StringBuilder(getName()).append(": "); - table.getSQL(builder, false).append(" FOREIGN KEY("); - IndexColumn.writeColumns(builder, columns, false); + table.getSQL(builder, TRACE_SQL_FLAGS).append(" FOREIGN KEY("); + IndexColumn.writeColumns(builder, columns, TRACE_SQL_FLAGS); builder.append(") REFERENCES "); - refTable.getSQL(builder, false).append('('); - IndexColumn.writeColumns(builder, refColumns, false); + refTable.getSQL(builder, TRACE_SQL_FLAGS).append('('); + IndexColumn.writeColumns(builder, refColumns, TRACE_SQL_FLAGS); builder.append(')'); if (searchIndex != null && check != null) { builder.append(" ("); @@ -155,12 +150,12 @@ private String getShortDescription(Index searchIndex, SearchRow check) { @Override public String getCreateSQLWithoutIndexes() { - return getCreateSQLForCopy(table, refTable, getSQL(true), false); + return getCreateSQLForCopy(table, refTable, getSQL(DEFAULT_SQL_FLAGS), false); } @Override public String getCreateSQL() { - return getCreateSQLForCopy(table, getSQL(true)); + return getCreateSQLForCopy(table, getSQL(DEFAULT_SQL_FLAGS)); } public void setColumns(IndexColumn[] cols) { @@ -214,31 +209,27 @@ public void setIndex(Index index, boolean isOwner) { } /** - * Set the index of the referenced table to use for this constraint. + * Set the unique constraint of the referenced table to use for this + * constraint. * - * @param refIndex the index - * @param isRefOwner true if the index is generated by the system and - * belongs to this constraint + * @param refConstraint + * the unique constraint */ - public void setRefIndex(Index refIndex, boolean isRefOwner) { - this.refIndex = refIndex; - this.refIndexOwner = isRefOwner; + public void setRefConstraint(ConstraintUnique refConstraint) { + this.refConstraint = refConstraint; } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { table.removeConstraint(this); refTable.removeConstraint(this); if (indexOwner) { table.removeIndexOrTransferOwnership(session, index); } - if (refIndexOwner) { - refTable.removeIndexOrTransferOwnership(session, refIndex); - } database.removeMeta(session, getId()); refTable = null; index = null; - refIndex = null; + refConstraint = null; columns = null; refColumns = null; deleteSQL = null; @@ -248,7 +239,7 @@ public void removeChildrenAndResources(Session session) { } @Override - public void checkRow(Session session, Table t, Row oldRow, Row newRow) { + public void checkRow(SessionLocal session, Table t, Row oldRow, Row newRow) { if (!database.getReferentialIntegrity()) { return; } @@ -266,7 +257,7 @@ public void checkRow(Session session, Table t, Row oldRow, Row newRow) { } } - private void checkRowOwnTable(Session session, Row oldRow, Row newRow) { + private void checkRowOwnTable(SessionLocal session, Row oldRow, Row newRow) { if (newRow == null) { return; } @@ -279,7 +270,7 @@ private void checkRowOwnTable(Session session, Row oldRow, Row newRow) { return; } if (constraintColumnsEqual) { - if (!database.areEqual(v, oldRow.getValue(idx))) { + if (!session.areEqual(v, oldRow.getValue(idx))) { constraintColumnsEqual = false; } } @@ -298,7 +289,7 @@ private void checkRowOwnTable(Session session, Row oldRow, Row newRow) { Column refCol = refColumns[i].column; int refIdx = refCol.getColumnId(); Value r = newRow.getValue(refIdx); - if (!database.areEqual(r, v)) { + if (!session.areEqual(r, v)) { self = false; break; } @@ -313,18 +304,19 @@ private void checkRowOwnTable(Session session, Row oldRow, Row newRow) { Value v = newRow.getValue(idx); Column refCol = refColumns[i].column; int refIdx = refCol.getColumnId(); - check.setValue(refIdx, refCol.convert(v)); + check.setValue(refIdx, refCol.convert(session, v)); } + Index refIndex = refConstraint.getIndex(); if (!existsRow(session, refIndex, check, null)) { throw DbException.get(ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, getShortDescription(refIndex, check)); } } - private boolean existsRow(Session session, Index searchIndex, + private boolean existsRow(SessionLocal session, Index searchIndex, SearchRow check, Row excluding) { Table searchTable = searchIndex.getTable(); - searchTable.lock(session, false, false); + searchTable.lock(session, Table.READ_LOCK); Cursor cursor = searchIndex.find(session, check, check); while (cursor.next()) { SearchRow found; @@ -339,7 +331,7 @@ private boolean existsRow(Session session, Index searchIndex, int idx = cols[i].getColumnId(); Value c = check.getValue(idx); Value f = found.getValue(idx); - if (searchTable.compareValues(c, f) != 0) { + if (searchTable.compareValues(session, c, f) != 0) { allEqual = false; break; } @@ -352,16 +344,16 @@ private boolean existsRow(Session session, Index searchIndex, } private boolean isEqual(Row oldRow, Row newRow) { - return refIndex.compareRows(oldRow, newRow) == 0; + return refConstraint.getIndex().compareRows(oldRow, newRow) == 0; } - private void checkRow(Session session, Row oldRow) { - SearchRow check = table.getTemplateSimpleRow(false); + private void checkRow(SessionLocal session, Row oldRow) { + SearchRow check = table.getRowFactory().createRow(); for (int i = 0, len = columns.length; i < len; i++) { Column refCol = refColumns[i].column; int refIdx = refCol.getColumnId(); Column col = columns[i].column; - Value v = col.convert(oldRow.getValue(refIdx)); + Value v = col.convert(session, oldRow.getValue(refIdx)); if (v == ValueNull.INSTANCE) { return; } @@ -375,7 +367,7 @@ private void checkRow(Session session, Row oldRow) { } } - private void checkRowRefTable(Session session, Row oldRow, Row newRow) { + private void checkRowRefTable(SessionLocal session, Row oldRow, Row newRow) { if (oldRow == null) { // this is an insert return; @@ -478,7 +470,7 @@ private void buildDeleteSQL() { StringBuilder builder = new StringBuilder(); if (deleteAction == ConstraintActionType.CASCADE) { builder.append("DELETE FROM "); - table.getSQL(builder, true); + table.getSQL(builder, DEFAULT_SQL_FLAGS); } else { appendUpdate(builder); } @@ -486,11 +478,11 @@ private void buildDeleteSQL() { deleteSQL = builder.toString(); } - private Prepared getUpdate(Session session) { + private Prepared getUpdate(SessionLocal session) { return prepare(session, updateSQL, updateAction); } - private Prepared getDelete(Session session) { + private Prepared getDelete(SessionLocal session) { return prepare(session, deleteSQL, deleteAction); } @@ -530,7 +522,7 @@ public void rebuild() { buildDeleteSQL(); } - private Prepared prepare(Session session, String sql, ConstraintActionType action) { + private Prepared prepare(SessionLocal session, String sql, ConstraintActionType action) { Prepared command = session.prepare(sql); if (action != ConstraintActionType.CASCADE) { ArrayList params = command.getParameters(); @@ -541,7 +533,7 @@ private Prepared prepare(Session session, String sql, ConstraintActionType actio if (action == ConstraintActionType.SET_NULL) { value = ValueNull.INSTANCE; } else { - Expression expr = column.getDefaultExpression(); + Expression expr = column.getEffectiveDefaultExpression(); if (expr == null) { throw DbException.get(ErrorCode.NO_DEFAULT_SET_1, column.getName()); } @@ -555,23 +547,13 @@ private Prepared prepare(Session session, String sql, ConstraintActionType actio private void appendUpdate(StringBuilder builder) { builder.append("UPDATE "); - table.getSQL(builder, true).append(" SET "); - for (int i = 0, l = columns.length; i < l; i++) { - if (i > 0) { - builder.append(", "); - } - columns[i].column.getSQL(builder, true).append("=?"); - } + table.getSQL(builder, DEFAULT_SQL_FLAGS).append(" SET "); + IndexColumn.writeColumns(builder, columns, ", ", "=?", IndexColumn.SQL_NO_ORDER); } private void appendWhere(StringBuilder builder) { builder.append(" WHERE "); - for (int i = 0, l = columns.length; i < l; i++) { - if (i > 0) { - builder.append(" AND "); - } - columns[i].column.getSQL(builder, true).append("=?"); - } + IndexColumn.writeColumns(builder, columns, " AND ", "=?", IndexColumn.SQL_NO_ORDER); } @Override @@ -581,17 +563,15 @@ public Table getRefTable() { @Override public boolean usesIndex(Index idx) { - return idx == index || idx == refIndex; + return idx == index; } @Override public void setIndexOwner(Index index) { if (this.index == index) { indexOwner = true; - } else if (this.refIndex == index) { - refIndexOwner = true; } else { - DbException.throwInternalError(index + " " + toString()); + throw DbException.getInternalError(index + " " + toString()); } } @@ -601,40 +581,50 @@ public boolean isBefore() { } @Override - public void checkExistingData(Session session) { + public void checkExistingData(SessionLocal session) { if (session.getDatabase().isStarting()) { // don't check at startup return; } - session.startStatementWithinTransaction(); StringBuilder builder = new StringBuilder("SELECT 1 FROM (SELECT "); - IndexColumn.writeColumns(builder, columns, true); + IndexColumn.writeColumns(builder, columns, IndexColumn.SQL_NO_ORDER); builder.append(" FROM "); - table.getSQL(builder, true).append(" WHERE "); - IndexColumn.writeColumns(builder, columns, " AND ", " IS NOT NULL ", true); + table.getSQL(builder, DEFAULT_SQL_FLAGS).append(" WHERE "); + IndexColumn.writeColumns(builder, columns, " AND ", " IS NOT NULL ", IndexColumn.SQL_NO_ORDER); builder.append(" ORDER BY "); - IndexColumn.writeColumns(builder, columns, true); + IndexColumn.writeColumns(builder, columns, DEFAULT_SQL_FLAGS); builder.append(") C WHERE NOT EXISTS(SELECT 1 FROM "); - refTable.getSQL(builder, true).append(" P WHERE "); + refTable.getSQL(builder, DEFAULT_SQL_FLAGS).append(" P WHERE "); for (int i = 0, l = columns.length; i < l; i++) { if (i > 0) { builder.append(" AND "); } builder.append("C."); - columns[i].getSQL(builder, true).append('=').append("P."); - refColumns[i].getSQL(builder, true); + columns[i].column.getSQL(builder, DEFAULT_SQL_FLAGS).append('=').append("P."); + refColumns[i].column.getSQL(builder, DEFAULT_SQL_FLAGS); } builder.append(')'); - ResultInterface r = session.prepare(builder.toString()).query(1); - if (r.next()) { - throw DbException.get(ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, - getShortDescription(null, null)); + + session.startStatementWithinTransaction(null); + try { + ResultInterface r = session.prepare(builder.toString()).query(1); + if (r.next()) { + throw DbException.get(ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, + getShortDescription(null, null)); + } + } finally { + session.endStatement(); } } @Override - public Index getUniqueIndex() { - return refIndex; + public Index getIndex() { + return index; + } + + @Override + public ConstraintUnique getReferencedConstraint() { + return refConstraint; } } diff --git a/h2/src/main/org/h2/constraint/ConstraintUnique.java b/h2/src/main/org/h2/constraint/ConstraintUnique.java index 9be82d98d1..3da09e09e8 100644 --- a/h2/src/main/org/h2/constraint/ConstraintUnique.java +++ b/h2/src/main/org/h2/constraint/ConstraintUnique.java @@ -1,12 +1,13 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.constraint; +import java.util.ArrayList; import java.util.HashSet; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.index.Index; import org.h2.result.Row; import org.h2.schema.Schema; @@ -43,7 +44,7 @@ public String getCreateSQLForCopy(Table forTable, String quotedName) { private String getCreateSQLForCopy(Table forTable, String quotedName, boolean internalIndex) { StringBuilder builder = new StringBuilder("ALTER TABLE "); - forTable.getSQL(builder, true).append(" ADD CONSTRAINT "); + forTable.getSQL(builder, DEFAULT_SQL_FLAGS).append(" ADD CONSTRAINT "); if (forTable.isHidden()) { builder.append("IF NOT EXISTS "); } @@ -53,28 +54,22 @@ private String getCreateSQLForCopy(Table forTable, String quotedName, boolean in StringUtils.quoteStringSQL(builder, comment); } builder.append(' ').append(getConstraintType().getSqlName()).append('('); - for (int i = 0, l = columns.length; i < l; i++) { - if (i > 0) { - builder.append(", "); - } - columns[i].column.getSQL(builder, true); - } - builder.append(')'); + IndexColumn.writeColumns(builder, columns, DEFAULT_SQL_FLAGS).append(')'); if (internalIndex && indexOwner && forTable == this.table) { builder.append(" INDEX "); - index.getSQL(builder, true); + index.getSQL(builder, DEFAULT_SQL_FLAGS); } return builder.toString(); } @Override public String getCreateSQLWithoutIndexes() { - return getCreateSQLForCopy(table, getSQL(true), false); + return getCreateSQLForCopy(table, getSQL(DEFAULT_SQL_FLAGS), false); } @Override public String getCreateSQL() { - return getCreateSQLForCopy(table, getSQL(true)); + return getCreateSQLForCopy(table, getSQL(DEFAULT_SQL_FLAGS)); } public void setColumns(IndexColumn[] columns) { @@ -98,7 +93,16 @@ public void setIndex(Index index, boolean isOwner) { } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { + ArrayList constraints = table.getConstraints(); + if (constraints != null) { + constraints = new ArrayList<>(table.getConstraints()); + for (Constraint c : constraints) { + if (c.getReferencedConstraint() == this) { + database.removeSchemaObject(session, c); + } + } + } table.removeConstraint(this); if (indexOwner) { table.removeIndexOrTransferOwnership(session, index); @@ -111,7 +115,7 @@ public void removeChildrenAndResources(Session session) { } @Override - public void checkRow(Session session, Table t, Row oldRow, Row newRow) { + public void checkRow(SessionLocal session, Table t, Row oldRow, Row newRow) { // unique index check is enough } @@ -140,13 +144,13 @@ public boolean isBefore() { } @Override - public void checkExistingData(Session session) { + public void checkExistingData(SessionLocal session) { // no need to check: when creating the unique index any problems are // found } @Override - public Index getUniqueIndex() { + public Index getIndex() { return index; } diff --git a/h2/src/main/org/h2/constraint/DomainColumnResolver.java b/h2/src/main/org/h2/constraint/DomainColumnResolver.java new file mode 100644 index 0000000000..1d01e1afe5 --- /dev/null +++ b/h2/src/main/org/h2/constraint/DomainColumnResolver.java @@ -0,0 +1,72 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.constraint; + +import org.h2.table.Column; +import org.h2.table.ColumnResolver; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * The single column resolver resolves the VALUE column. + * It is used to parse a domain constraint. + */ +public class DomainColumnResolver implements ColumnResolver { + + private final Column column; + private Value value; + private String name; + + public DomainColumnResolver(TypeInfo typeInfo) { + this.column = new Column("VALUE", typeInfo); + } + + public void setValue(Value value) { + this.value = value; + } + + @Override + public Value getValue(Column col) { + return value; + } + + @Override + public Column[] getColumns() { + return new Column[] { column }; + } + + @Override + public Column findColumn(String name) { + return null; + } + + void setColumnName(String newName) { + name = newName; + } + + void resetColumnName() { + name = null; + } + + /** + * Return column name to use or null. + * + * @return column name to use or null + */ + public String getColumnName() { + return name; + } + + /** + * Return the type of the column. + * + * @return the type of the column + */ + public TypeInfo getValueType() { + return column.getType(); + } + +} diff --git a/h2/src/main/org/h2/constraint/package.html b/h2/src/main/org/h2/constraint/package.html index 0cd694d51d..a7e1d88a70 100644 --- a/h2/src/main/org/h2/constraint/package.html +++ b/h2/src/main/org/h2/constraint/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/engine/CastDataProvider.java b/h2/src/main/org/h2/engine/CastDataProvider.java new file mode 100644 index 0000000000..9682dda61a --- /dev/null +++ b/h2/src/main/org/h2/engine/CastDataProvider.java @@ -0,0 +1,53 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.engine; + +import org.h2.api.JavaObjectSerializer; +import org.h2.util.TimeZoneProvider; +import org.h2.value.ValueTimestampTimeZone; + +/** + * Provides information for type casts and comparison operations. + */ +public interface CastDataProvider { + + /** + * Returns the current timestamp with maximum resolution. The value must be + * the same within a transaction or within execution of a command. + * + * @return the current timestamp for CURRENT_TIMESTAMP(9) + */ + ValueTimestampTimeZone currentTimestamp(); + + /** + * Returns the current time zone. + * + * @return the current time zone + */ + TimeZoneProvider currentTimeZone(); + + /** + * Returns the database mode. + * + * @return the database mode + */ + Mode getMode(); + + /** + * Returns the custom Java object serializer, or {@code null}. + * + * @return the custom Java object serializer, or {@code null} + */ + JavaObjectSerializer getJavaObjectSerializer(); + + /** + * Returns are ENUM values 0-based. + * + * @return are ENUM values 0-based + */ + boolean zeroBasedEnums(); + +} diff --git a/h2/src/main/org/h2/engine/Comment.java b/h2/src/main/org/h2/engine/Comment.java index e10a7f5601..e3af80fb67 100644 --- a/h2/src/main/org/h2/engine/Comment.java +++ b/h2/src/main/org/h2/engine/Comment.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; @@ -13,7 +13,7 @@ /** * Represents a database object comment. */ -public class Comment extends DbObjectBase { +public final class Comment extends DbObject { private final int objectType; private final String quotedObjectName; @@ -22,12 +22,12 @@ public class Comment extends DbObjectBase { public Comment(Database database, int id, DbObject obj) { super(database, id, getKey(obj), Trace.DATABASE); this.objectType = obj.getType(); - this.quotedObjectName = obj.getSQL(true); + this.quotedObjectName = obj.getSQL(DEFAULT_SQL_FLAGS); } @Override public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(toString()); + throw DbException.getInternalError(toString()); } private static String getTypeName(int type) { @@ -61,11 +61,6 @@ private static String getTypeName(int type) { } } - @Override - public String getDropSQL() { - return null; - } - @Override public String getCreateSQL() { StringBuilder buff = new StringBuilder("COMMENT ON "); @@ -85,13 +80,13 @@ public int getType() { } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { database.removeMeta(session, getId()); } @Override public void checkRename() { - DbException.throwInternalError(); + throw DbException.getInternalError(); } /** @@ -103,7 +98,7 @@ public void checkRename() { */ static String getKey(DbObject obj) { StringBuilder builder = new StringBuilder(getTypeName(obj.getType())).append(' '); - obj.getSQL(builder, true); + obj.getSQL(builder, DEFAULT_SQL_FLAGS); return builder.toString(); } diff --git a/h2/src/main/org/h2/engine/ConnectionInfo.java b/h2/src/main/org/h2/engine/ConnectionInfo.java index 08857009d1..fdd0ee260a 100644 --- a/h2/src/main/org/h2/engine/ConnectionInfo.java +++ b/h2/src/main/org/h2/engine/ConnectionInfo.java @@ -1,10 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; +import java.io.File; import java.io.IOException; import java.util.Arrays; import java.util.HashMap; @@ -15,19 +16,25 @@ import org.h2.command.dml.SetTypes; import org.h2.message.DbException; import org.h2.security.SHA256; -import org.h2.store.fs.FilePathEncrypt; -import org.h2.store.fs.FilePathRec; import org.h2.store.fs.FileUtils; +import org.h2.store.fs.encrypt.FilePathEncrypt; +import org.h2.store.fs.rec.FilePathRec; +import org.h2.util.IOUtils; +import org.h2.util.NetworkConnectionInfo; import org.h2.util.SortedProperties; import org.h2.util.StringUtils; +import org.h2.util.TimeZoneProvider; import org.h2.util.Utils; /** * Encapsulates the connection settings, including user name and password. */ public class ConnectionInfo implements Cloneable { + private static final HashSet KNOWN_SETTINGS; + private static final HashSet IGNORED_BY_PARSER; + private Properties prop = new Properties(); private String originalURL; private String url; @@ -36,6 +43,8 @@ public class ConnectionInfo implements Cloneable { private byte[] fileEncryptionKey; private byte[] userPasswordHash; + private TimeZoneProvider timeZone; + /** * The database name */ @@ -46,6 +55,8 @@ public class ConnectionInfo implements Cloneable { private boolean persistent; private boolean unnamed; + private NetworkConnectionInfo networkConnectionInfo; + /** * Create a connection info object. * @@ -62,17 +73,32 @@ public ConnectionInfo(String name) { * Create a connection info object. * * @param u the database URL (must start with jdbc:h2:) - * @param info the connection properties + * @param info the connection properties or {@code null} + * @param user the user name or {@code null} + * @param password + * the password as {@code String} or {@code char[]}, or + * {@code null} */ - public ConnectionInfo(String u, Properties info) { + public ConnectionInfo(String u, Properties info, String user, Object password) { u = remapURL(u); - this.originalURL = u; + originalURL = url = u; if (!u.startsWith(Constants.START_URL)) { - throw DbException.getInvalidValueException("url", u); + throw getFormatException(); + } + if (info != null) { + readProperties(info); + } + if (user != null) { + prop.put("USER", user); + } + if (password != null) { + prop.put("PASSWORD", password); } - this.url = u; - readProperties(info); readSettingsFromURL(); + Object timeZoneName = prop.remove("TIME ZONE"); + if (timeZoneName != null) { + timeZone = TimeZoneProvider.ofId(timeZoneName.toString()); + } setUserName(removeProperty("USER", "")); name = url.substring(Constants.START_URL.length()); parseName(); @@ -90,26 +116,73 @@ public ConnectionInfo(String u, Properties info) { } static { - String[] connectionTime = { "ACCESS_MODE_DATA", "AUTOCOMMIT", "CIPHER", - "CREATE", "CACHE_TYPE", "FILE_LOCK", "IGNORE_UNKNOWN_SETTINGS", - "IFEXISTS", "INIT", "MVCC", "PASSWORD", "RECOVER", "RECOVER_TEST", - "USER", "AUTO_SERVER", "AUTO_SERVER_PORT", "NO_UPGRADE", - "AUTO_RECONNECT", "OPEN_NEW", "PAGE_SIZE", "PASSWORD_HASH", "JMX", - "SCOPE_GENERATED_KEYS", "AUTHREALM", "AUTHZPWD" }; + String[] commonSettings = { // + "ACCESS_MODE_DATA", "AUTO_RECONNECT", "AUTO_SERVER", "AUTO_SERVER_PORT", // + "CACHE_TYPE", // + "FILE_LOCK", // + "JMX", // + "NETWORK_TIMEOUT", // + "OLD_INFORMATION_SCHEMA", "OPEN_NEW", // + "PAGE_SIZE", // + "RECOVER", // + }; + String[] settings = { // + "AUTHREALM", "AUTHZPWD", "AUTOCOMMIT", // + "CIPHER", "CREATE", // + "FORBID_CREATION", // + "IGNORE_UNKNOWN_SETTINGS", "IFEXISTS", "INIT", // + "NO_UPGRADE", // + "PASSWORD", "PASSWORD_HASH", // + "RECOVER_TEST", // + "USER" // + }; HashSet set = new HashSet<>(128); set.addAll(SetTypes.getTypes()); - for (String key : connectionTime) { - if (!set.add(key)) { - DbException.throwInternalError(key); + for (String setting : commonSettings) { + if (!set.add(setting)) { + throw DbException.getInternalError(setting); + } + } + for (String setting : settings) { + if (!set.add(setting)) { + throw DbException.getInternalError(setting); } } KNOWN_SETTINGS = set; + settings = new String[] { // + "ASSERT", // + "BINARY_COLLATION", // + "DB_CLOSE_ON_EXIT", // + "PAGE_STORE", // + "UUID_COLLATION", // + }; + set = new HashSet<>(32); + for (String setting : commonSettings) { + set.add(setting); + } + for (String setting : settings) { + set.add(setting); + } + IGNORED_BY_PARSER = set; } private static boolean isKnownSetting(String s) { return KNOWN_SETTINGS.contains(s); } + /** + * Returns whether setting with the specified name should be ignored by + * parser. + * + * @param name + * the name of the setting + * @return whether setting with the specified name should be ignored by + * parser + */ + public static boolean isIgnoredByParser(String name) { + return IGNORED_BY_PARSER.contains(name); + } + @Override public ConnectionInfo clone() throws CloneNotSupportedException { ConnectionInfo clone = (ConnectionInfo) super.clone(); @@ -143,11 +216,7 @@ private void parseName() { persistent = true; } if (persistent && !remote) { - if ("/".equals(SysProperties.FILE_SEPARATOR)) { - name = name.replace('\\', '/'); - } else { - name = name.replace('/', '\\'); - } + name = IOUtils.nameSeparatorsToNative(name); } } @@ -163,7 +232,7 @@ public void setBaseDir(String dir) { boolean absolute = FileUtils.isAbsolute(name); String n; String prefix = null; - if (dir.endsWith(SysProperties.FILE_SEPARATOR)) { + if (dir.endsWith(File.separator)) { dir = dir.substring(0, dir.length() - 1); } if (absolute) { @@ -171,7 +240,7 @@ public void setBaseDir(String dir) { } else { n = FileUtils.unwrap(name); prefix = name.substring(0, name.length() - n.length()); - n = dir + SysProperties.FILE_SEPARATOR + n; + n = dir + File.separatorChar + n; } String normalizedName = FileUtils.unwrap(FileUtils.toRealPath(n)); if (normalizedName.equals(absDir) || !normalizedName.startsWith(absDir)) { @@ -190,7 +259,7 @@ public void setBaseDir(String dir) { absDir); } if (!absolute) { - name = prefix + dir + SysProperties.FILE_SEPARATOR + FileUtils.unwrap(name); + name = prefix + dir + File.separatorChar + FileUtils.unwrap(name); } } } @@ -245,11 +314,12 @@ private void readProperties(Properties info) { } private void readSettingsFromURL() { - DbSettings defaultSettings = DbSettings.getDefaultSettings(); + DbSettings defaultSettings = DbSettings.DEFAULT; int idx = url.indexOf(';'); if (idx >= 0) { String settings = url.substring(idx + 1); url = url.substring(0, idx); + String unknownSetting = null; String[] list = StringUtils.arraySplit(settings, ';', false); for (String setting : list) { if (setting.isEmpty()) { @@ -262,14 +332,19 @@ private void readSettingsFromURL() { String value = setting.substring(equal + 1); String key = setting.substring(0, equal); key = StringUtils.toUpperEnglish(key); - if (!isKnownSetting(key) && !defaultSettings.containsKey(key)) { - throw DbException.get(ErrorCode.UNSUPPORTED_SETTING_1, key); - } - String old = prop.getProperty(key); - if (old != null && !old.equals(value)) { - throw DbException.get(ErrorCode.DUPLICATE_PROPERTY_1, key); + if (isKnownSetting(key) || defaultSettings.containsKey(key)) { + String old = prop.getProperty(key); + if (old != null && !old.equals(value)) { + throw DbException.get(ErrorCode.DUPLICATE_PROPERTY_1, key); + } + prop.setProperty(key, value); + } else { + unknownSetting = key; } - prop.setProperty(key, value); + } + if (unknownSetting != null // + && !Utils.parseBoolean(prop.getProperty("IGNORE_UNKNOWN_SETTINGS"), false, false)) { + throw DbException.get(ErrorCode.UNSUPPORTED_SETTING_1, unknownSetting); } } } @@ -363,7 +438,7 @@ public boolean removeProperty(String key, boolean defaultValue) { */ String removeProperty(String key, String defaultValue) { if (SysProperties.CHECK && !isKnownSetting(key)) { - DbException.throwInternalError(key); + throw DbException.getInternalError(key); } Object x = prop.remove(key); return x == null ? defaultValue : x.toString(); @@ -379,31 +454,17 @@ public String getName() { return name; } if (nameNormalized == null) { - if (!SysProperties.IMPLICIT_RELATIVE_PATH) { - if (!FileUtils.isAbsolute(name)) { - if (!name.contains("./") && - !name.contains(".\\") && - !name.contains(":/") && - !name.contains(":\\")) { - // the name could start with "./", or - // it could start with a prefix such as "nio:./" - // for Windows, the path "\test" is not considered - // absolute as the drive letter is missing, - // but we consider it absolute - throw DbException.get( - ErrorCode.URL_RELATIVE_TO_CWD, - originalURL); - } - } - } - String suffix = Constants.SUFFIX_PAGE_FILE; - String n; - if (FileUtils.exists(name + suffix)) { - n = FileUtils.toRealPath(name + suffix); - } else { - suffix = Constants.SUFFIX_MV_FILE; - n = FileUtils.toRealPath(name + suffix); + if (!FileUtils.isAbsolute(name) && !name.contains("./") && !name.contains(".\\") && !name.contains(":/") + && !name.contains(":\\")) { + // the name could start with "./", or + // it could start with a prefix such as "nioMapped:./" + // for Windows, the path "\test" is not considered + // absolute as the drive letter is missing, + // but we consider it absolute + throw DbException.get(ErrorCode.URL_RELATIVE_TO_CWD, originalURL); } + String suffix = Constants.SUFFIX_MV_FILE; + String n = FileUtils.toRealPath(name + suffix); String fileName = FileUtils.getName(n); if (fileName.length() < suffix.length() + 1) { throw DbException.get(ErrorCode.INVALID_DATABASE_NAME_1, name); @@ -476,7 +537,7 @@ String getProperty(String key) { */ int getProperty(String key, int defaultValue) { if (SysProperties.CHECK && !isKnownSetting(key)) { - DbException.throwInternalError(key); + throw DbException.getInternalError(key); } String s = getProperty(key); return s == null ? defaultValue : Integer.parseInt(s); @@ -491,7 +552,7 @@ int getProperty(String key, int defaultValue) { */ public String getProperty(String key, String defaultValue) { if (SysProperties.CHECK && !isKnownSetting(key)) { - DbException.throwInternalError(key); + throw DbException.getInternalError(key); } String s = getProperty(key); return s == null ? defaultValue : s; @@ -609,13 +670,21 @@ public void setOriginalURL(String url) { } /** - * Generate an URL format exception. + * Returns the time zone. + * + * @return the time zone + */ + public TimeZoneProvider getTimeZone() { + return timeZone; + } + + /** + * Generate a URL format exception. * * @return the exception */ DbException getFormatException() { - String format = Constants.URL_FORMAT; - return DbException.get(ErrorCode.URL_FORMAT_ERROR_2, format, url); + return DbException.get(ErrorCode.URL_FORMAT_ERROR_2, Constants.URL_FORMAT, url); } /** @@ -629,9 +698,27 @@ public void setServerKey(String serverKey) { this.name = serverKey; } + /** + * Returns the network connection information, or {@code null}. + * + * @return the network connection information, or {@code null} + */ + public NetworkConnectionInfo getNetworkConnectionInfo() { + return networkConnectionInfo; + } + + /** + * Sets the network connection information. + * + * @param networkConnectionInfo the network connection information + */ + public void setNetworkConnectionInfo(NetworkConnectionInfo networkConnectionInfo) { + this.networkConnectionInfo = networkConnectionInfo; + } + public DbSettings getDbSettings() { - DbSettings defaultSettings = DbSettings.getDefaultSettings(); - HashMap s = new HashMap<>(); + DbSettings defaultSettings = DbSettings.DEFAULT; + HashMap s = new HashMap<>(DbSettings.TABLE_SIZE); for (Object k : prop.keySet()) { String key = k.toString(); if (!isKnownSetting(key) && defaultSettings.containsKey(key)) { diff --git a/h2/src/main/org/h2/engine/Constants.java b/h2/src/main/org/h2/engine/Constants.java index 6a9815abcb..d71cf6b656 100644 --- a/h2/src/main/org/h2/engine/Constants.java +++ b/h2/src/main/org/h2/engine/Constants.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; @@ -15,22 +15,13 @@ public class Constants { /** * The build date is updated for each public release. */ - public static final String BUILD_DATE = "2019-03-13"; + public static final String BUILD_DATE = "2022-01-17"; /** - * The build date of the last stable release. + * Sequential version number. Even numbers are used for official releases, + * odd numbers are used for development builds. */ - public static final String BUILD_DATE_STABLE = "2018-03-18"; - - /** - * The build id is incremented for each public release. - */ - public static final int BUILD_ID = 199; - - /** - * The build id of the last stable release. - */ - public static final int BUILD_ID_STABLE = 197; + public static final int BUILD_ID = 210; /** * Whether this is a snapshot version. @@ -45,54 +36,6 @@ public class Constants { */ public static final String BUILD_VENDOR_AND_VERSION = null; - /** - * The TCP protocol version number 9. - * @since 1.3.158 (2011-07-17) - */ - public static final int TCP_PROTOCOL_VERSION_9 = 9; - - /** - * The TCP protocol version number 10. - * @since 1.3.162 (2011-11-26) - */ - public static final int TCP_PROTOCOL_VERSION_10 = 10; - - /** - * The TCP protocol version number 11. - * @since 1.3.163 (2011-12-30) - */ - public static final int TCP_PROTOCOL_VERSION_11 = 11; - - /** - * The TCP protocol version number 12. - * @since 1.3.168 (2012-07-13) - */ - public static final int TCP_PROTOCOL_VERSION_12 = 12; - - /** - * The TCP protocol version number 13. - * @since 1.3.174 (2013-10-19) - */ - public static final int TCP_PROTOCOL_VERSION_13 = 13; - - /** - * The TCP protocol version number 14. - * @since 1.3.176 (2014-04-05) - */ - public static final int TCP_PROTOCOL_VERSION_14 = 14; - - /** - * The TCP protocol version number 15. - * @since 1.4.178 Beta (2014-05-02) - */ - public static final int TCP_PROTOCOL_VERSION_15 = 15; - - /** - * The TCP protocol version number 16. - * @since 1.4.194 (2017-03-10) - */ - public static final int TCP_PROTOCOL_VERSION_16 = 16; - /** * The TCP protocol version number 17. * @since 1.4.197 (2018-03-18) @@ -105,25 +48,37 @@ public class Constants { */ public static final int TCP_PROTOCOL_VERSION_18 = 18; + /** + * The TCP protocol version number 19. + * @since 1.4.200 (2019-10-14) + */ + public static final int TCP_PROTOCOL_VERSION_19 = 19; + + /** + * The TCP protocol version number 20. + * @since 2.0.202 (2021-11-25) + */ + public static final int TCP_PROTOCOL_VERSION_20 = 20; + /** * Minimum supported version of TCP protocol. */ - public static final int TCP_PROTOCOL_VERSION_MIN_SUPPORTED = TCP_PROTOCOL_VERSION_9; + public static final int TCP_PROTOCOL_VERSION_MIN_SUPPORTED = TCP_PROTOCOL_VERSION_17; /** * Maximum supported version of TCP protocol. */ - public static final int TCP_PROTOCOL_VERSION_MAX_SUPPORTED = TCP_PROTOCOL_VERSION_18; + public static final int TCP_PROTOCOL_VERSION_MAX_SUPPORTED = TCP_PROTOCOL_VERSION_20; /** * The major version of this database. */ - public static final int VERSION_MAJOR = 1; + public static final int VERSION_MAJOR = 2; /** * The minor version of this database. */ - public static final int VERSION_MINOR = 4; + public static final int VERSION_MINOR = 1; /** * The lock mode that means no locking is used at all. @@ -165,6 +120,11 @@ public class Constants { */ public static final int ALLOW_LITERALS_NUMBERS = 1; + /** + * SNAPSHOT isolation level of transaction. + */ + public static final int TRANSACTION_SNAPSHOT = 6; + /** * Whether searching in Blob values should be supported. */ @@ -175,11 +135,6 @@ public class Constants { */ public static final int CACHE_MIN_RECORDS = 16; - /** - * The default cache size in KB for each GB of RAM. - */ - public static final int CACHE_SIZE_DEFAULT = 64 * 1024; - /** * The default cache type. */ @@ -238,16 +193,6 @@ public class Constants { */ public static final int DEFAULT_MAX_LENGTH_INPLACE_LOB = 256; - /** - * The default value for the maximum transaction log size. - */ - public static final long DEFAULT_MAX_LOG_SIZE = 16 * 1024 * 1024; - - /** - * The default value for the MAX_MEMORY_UNDO setting. - */ - public static final int DEFAULT_MAX_MEMORY_UNDO = 50_000; - /** * The default for the setting MAX_OPERATION_MEMORY. */ @@ -310,48 +255,47 @@ public class Constants { public static final int LOCK_SLEEP = 1000; /** - * The highest possible parameter index. + * The maximum allowed length of identifiers. */ - public static final int MAX_PARAMETER_INDEX = 100_000; + public static final int MAX_IDENTIFIER_LENGTH = 256; /** - * The memory needed by a object of class Data + * The maximum number of columns in a table, select statement or row value. */ - public static final int MEMORY_DATA = 24; + public static final int MAX_COLUMNS = 16_384; /** - * This value is used to calculate the average memory usage. + * The maximum allowed length for character string, binary string, and other + * data types based on them; excluding LOB data types. */ - public static final int MEMORY_FACTOR = 64; + public static final int MAX_STRING_LENGTH = 1024 * 1024; /** - * The memory needed by a regular object with at least one field. + * The maximum allowed precision of numeric data types. */ - // Java 6, 64 bit: 24 - // Java 6, 32 bit: 12 - public static final int MEMORY_OBJECT = 24; + public static final int MAX_NUMERIC_PRECISION = 100_000; /** - * The memory needed by an array. + * The maximum allowed cardinality of array. */ - public static final int MEMORY_ARRAY = 24; + public static final int MAX_ARRAY_CARDINALITY = 65_536; /** - * The memory needed by an object of class PageBtree. + * The highest possible parameter index. */ - public static final int MEMORY_PAGE_BTREE = - 112 + MEMORY_DATA + 2 * MEMORY_OBJECT; + public static final int MAX_PARAMETER_INDEX = 100_000; /** - * The memory needed by an object of class PageData. + * The memory needed by a regular object with at least one field. */ - public static final int MEMORY_PAGE_DATA = - 144 + MEMORY_DATA + 3 * MEMORY_OBJECT; + // Java 6, 64 bit: 24 + // Java 6, 32 bit: 12 + public static final int MEMORY_OBJECT = 24; /** - * The memory needed by an object of class PageDataOverflow. + * The memory needed by an array. */ - public static final int MEMORY_PAGE_DATA_OVERFLOW = 96 + MEMORY_DATA; + public static final int MEMORY_ARRAY = 24; /** * The memory needed by a pointer. @@ -365,11 +309,6 @@ public class Constants { */ public static final int MEMORY_ROW = 40; - /** - * The minimum write delay that causes commits to be delayed. - */ - public static final int MIN_WRITE_DELAY = 5; - /** * The name prefix used for indexes that are not explicitly named. */ @@ -416,6 +355,16 @@ public class Constants { */ public static final String SCHEMA_MAIN = "PUBLIC"; + /** + * The identity of pg_catalog schema. + */ + public static final int PG_CATALOG_SCHEMA_ID = -1_000; + + /** + * The name of the pg_catalog schema. + */ + public static final String SCHEMA_PG_CATALOG = "PG_CATALOG"; + /** * The default selectivity (used if the selectivity is not calculated). */ @@ -448,22 +397,6 @@ public class Constants { */ public static final String START_URL = "jdbc:h2:"; - /** - * The file name suffix of all database files. - */ - public static final String SUFFIX_DB_FILE = ".db"; - - /** - * The file name suffix of large object files. - */ - public static final String SUFFIX_LOB_FILE = ".lob.db"; - - /** - * The suffix of the directory name used if LOB objects are stored in a - * directory. - */ - public static final String SUFFIX_LOBS_DIRECTORY = ".lobs.db"; - /** * The file name suffix of file lock files that are used to make sure a * database is open by only one process at any time. @@ -475,10 +408,6 @@ public class Constants { */ public static final String SUFFIX_OLD_DATABASE_FILE = ".data.db"; - /** - * The file name suffix of page files. - */ - public static final String SUFFIX_PAGE_FILE = ".h2.db"; /** * The file name suffix of a MVStore file. */ @@ -511,11 +440,6 @@ public class Constants { */ public static final int THROTTLE_DELAY = 50; - /** - * The maximum size of an undo log block. - */ - public static final int UNDO_BLOCK_SIZE = 1024 * 1024; - /** * The database URL format in simplified Backus-Naur form. */ @@ -550,44 +474,32 @@ public class Constants { */ public static final String PG_VERSION = "8.2.23"; - private Constants() { - // utility class - } - /** - * Get the version of this product, consisting of major version, minor + * The version of this product, consisting of major version, minor * version, and build id. - * - * @return the version number */ - public static String getVersion() { - String version = VERSION_MAJOR + "." + VERSION_MINOR + "." + BUILD_ID; + public static final String VERSION; + + /** + * The complete version number of this database, consisting of + * the major version, the minor version, the build id, and the build date. + */ + public static final String FULL_VERSION; + + static { + String version = VERSION_MAJOR + "." + VERSION_MINOR + '.' + BUILD_ID; if (BUILD_VENDOR_AND_VERSION != null) { - version += "_" + BUILD_VENDOR_AND_VERSION; + version += '_' + BUILD_VENDOR_AND_VERSION; } if (BUILD_SNAPSHOT) { version += "-SNAPSHOT"; } - return version; + VERSION = version; + FULL_VERSION = version + (" (" + BUILD_DATE + ')'); } - /** - * Get the last stable version name. - * - * @return the version number - */ - public static Object getVersionStable() { - return "1.4." + BUILD_ID_STABLE; - } - - /** - * Get the complete version number of this database, consisting of - * the major version, the minor version, the build id, and the build date. - * - * @return the complete version - */ - public static String getFullVersion() { - return getVersion() + " (" + BUILD_DATE + ")"; + private Constants() { + // utility class } } diff --git a/h2/src/main/org/h2/engine/Database.java b/h2/src/main/org/h2/engine/Database.java index dd332e96a5..f7d6958c4e 100644 --- a/h2/src/main/org/h2/engine/Database.java +++ b/h2/src/main/org/h2/engine/Database.java @@ -1,50 +1,49 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; -import java.io.IOException; import java.sql.SQLException; import java.util.ArrayList; -import java.util.Arrays; import java.util.BitSet; +import java.util.Collection; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; -import java.util.List; +import java.util.Map; import java.util.Objects; -import java.util.Properties; import java.util.Set; -import java.util.StringTokenizer; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; - import org.h2.api.DatabaseEventListener; import org.h2.api.ErrorCode; import org.h2.api.JavaObjectSerializer; import org.h2.api.TableEngine; import org.h2.command.CommandInterface; +import org.h2.command.Prepared; import org.h2.command.ddl.CreateTableData; import org.h2.command.dml.SetTypes; import org.h2.constraint.Constraint; +import org.h2.constraint.Constraint.Type; +import org.h2.engine.Mode.ModeEnum; import org.h2.index.Cursor; import org.h2.index.Index; import org.h2.index.IndexType; -import org.h2.jdbc.JdbcConnection; import org.h2.message.DbException; import org.h2.message.Trace; import org.h2.message.TraceSystem; -import org.h2.mvstore.MVStore; -import org.h2.mvstore.db.MVTableEngine; -import org.h2.result.LocalResultFactory; +import org.h2.mode.DefaultNullOrdering; +import org.h2.mode.PgCatalogSchema; +import org.h2.mvstore.MVStoreException; +import org.h2.mvstore.db.LobStorageMap; +import org.h2.mvstore.db.Store; import org.h2.result.Row; import org.h2.result.RowFactory; import org.h2.result.SearchRow; +import org.h2.schema.InformationSchema; import org.h2.schema.Schema; import org.h2.schema.SchemaObject; import org.h2.schema.Sequence; @@ -55,16 +54,12 @@ import org.h2.store.FileLockMethod; import org.h2.store.FileStore; import org.h2.store.InDoubtTransaction; -import org.h2.store.LobStorageBackend; import org.h2.store.LobStorageFrontend; import org.h2.store.LobStorageInterface; -import org.h2.store.LobStorageMap; -import org.h2.store.PageStore; -import org.h2.store.WriterThread; import org.h2.store.fs.FileUtils; +import org.h2.store.fs.encrypt.FileEncrypt; import org.h2.table.Column; import org.h2.table.IndexColumn; -import org.h2.table.MetaTable; import org.h2.table.Table; import org.h2.table.TableLinkConnection; import org.h2.table.TableSynonym; @@ -75,16 +70,19 @@ import org.h2.util.JdbcUtils; import org.h2.util.MathUtils; import org.h2.util.NetUtils; +import org.h2.util.NetworkConnectionInfo; import org.h2.util.SmallLRUCache; import org.h2.util.SourceCompiler; import org.h2.util.StringUtils; import org.h2.util.TempFileDeleter; +import org.h2.util.TimeZoneProvider; import org.h2.util.Utils; +import org.h2.value.CaseInsensitiveConcurrentMap; import org.h2.value.CaseInsensitiveMap; import org.h2.value.CompareMode; -import org.h2.value.CaseInsensitiveConcurrentMap; -import org.h2.value.Value; -import org.h2.value.ValueInt; +import org.h2.value.TypeInfo; +import org.h2.value.ValueInteger; +import org.h2.value.ValueTimestampTimeZone; /** * There is one database object per open database. @@ -94,16 +92,16 @@ * * @since 2004-04-15 22:49 */ -public class Database implements DataHandler { +public final class Database implements DataHandler, CastDataProvider { private static int initialPowerOffCount; private static final boolean ASSERT; - private static final ThreadLocal META_LOCK_DEBUGGING; + private static final ThreadLocal META_LOCK_DEBUGGING; private static final ThreadLocal META_LOCK_DEBUGGING_DB; private static final ThreadLocal META_LOCK_DEBUGGING_STACK; - private static final Session[] EMPTY_SESSION_ARRAY = new Session[0]; + private static final SessionLocal[] EMPTY_SESSION_ARRAY = new SessionLocal[0]; static { boolean a = false; @@ -135,48 +133,47 @@ public class Database implements DataHandler { private final byte[] filePasswordHash; private final byte[] fileEncryptionKey; - private final HashMap roles = new HashMap<>(); - private final HashMap users = new HashMap<>(); - private final HashMap settings = new HashMap<>(); - private final HashMap schemas = new HashMap<>(); - private final HashMap rights = new HashMap<>(); - private final HashMap domains = new HashMap<>(); - private final HashMap aggregates = new HashMap<>(); - private final HashMap comments = new HashMap<>(); + private final ConcurrentHashMap usersAndRoles = new ConcurrentHashMap<>(); + private final ConcurrentHashMap settings = new ConcurrentHashMap<>(); + private final ConcurrentHashMap schemas = new ConcurrentHashMap<>(); + private final ConcurrentHashMap rights = new ConcurrentHashMap<>(); + private final ConcurrentHashMap comments = new ConcurrentHashMap<>(); + private final HashMap tableEngines = new HashMap<>(); - private final Set userSessions = - Collections.synchronizedSet(new HashSet()); - private final AtomicReference exclusiveSession = new AtomicReference<>(); + private final Set userSessions = Collections.synchronizedSet(new HashSet<>()); + private final AtomicReference exclusiveSession = new AtomicReference<>(); private final BitSet objectIds = new BitSet(); private final Object lobSyncObject = new Object(); - private Schema mainSchema; - private Schema infoSchema; + private final Schema mainSchema; + private final Schema infoSchema; + private final Schema pgCatalogSchema; private int nextSessionId; private int nextTempTableId; - private User systemUser; - private Session systemSession; - private Session lobSession; - private Table meta; - private Index metaIdIndex; + private final User systemUser; + private SessionLocal systemSession; + private SessionLocal lobSession; + private final Table meta; + private final Index metaIdIndex; private FileLock lock; - private WriterThread writer; private volatile boolean starting; - private TraceSystem traceSystem; - private Trace trace; + private final TraceSystem traceSystem; + private final Trace trace; private final FileLockMethod fileLockMethod; - private Role publicRole; + private final Role publicRole; private final AtomicLong modificationDataId = new AtomicLong(); private final AtomicLong modificationMetaId = new AtomicLong(); + /** + * Used to trigger the client side to reload some of the settings. + */ + private final AtomicLong remoteSettingsId = new AtomicLong(); private CompareMode compareMode; private String cluster = Constants.CLUSTERING_DISABLED; private boolean readOnly; - private int writeDelay = Constants.DEFAULT_WRITE_DELAY; private DatabaseEventListener eventListener; private int maxMemoryRows = SysProperties.MAX_MEMORY_ROWS; - private int maxMemoryUndo = Constants.DEFAULT_MAX_MEMORY_UNDO; - private int lockMode = Constants.DEFAULT_LOCK_MODE; + private int lockMode; private int maxLengthInplaceLob; private int allowLiterals = Constants.ALLOW_LITERALS_ALL; @@ -186,14 +183,11 @@ public class Database implements DataHandler { private volatile boolean closing; private boolean ignoreCase; private boolean deleteFilesOnDisconnect; - private String lobCompressionAlgorithm; private boolean optimizeReuseResults = true; private final String cacheType; - private final String accessModeData; private boolean referentialIntegrity = true; private Mode mode = Mode.getRegular(); - /** ie. the MULTI_THREADED setting */ - private boolean multiThreaded; + private DefaultNullOrdering defaultNullOrdering = DefaultNullOrdering.LOW; private int maxOperationMemory = Constants.DEFAULT_MAX_OPERATION_MEMORY; private SmallLRUCache lobFileListCache; @@ -202,26 +196,13 @@ public class Database implements DataHandler { private Server server; private HashMap linkConnections; private final TempFileDeleter tempFileDeleter = TempFileDeleter.getInstance(); - private PageStore pageStore; - private Properties reconnectLastLock; - private volatile long reconnectCheckNext; - private volatile boolean reconnectChangePending; - private volatile int checkpointAllowed; - private volatile boolean checkpointRunning; - private final Object reconnectSync = new Object(); - private int cacheSize; private int compactMode; private SourceCompiler compiler; - private volatile boolean metaTablesInitialized; - private boolean flushOnEachCommit; - private LobStorageInterface lobStorage; + private final LobStorageInterface lobStorage; private final int pageSize; private int defaultTableType = Table.TYPE_CACHED; private final DbSettings dbSettings; - private final long reconnectCheckDelayNs; - private int logMode; - private MVTableEngine.Store store; - private int retentionTime; + private final Store store; private boolean allowBuiltinAliasOverride; private final AtomicReference backgroundException = new AtomicReference<>(); private JavaObjectSerializer javaObjectSerializer; @@ -230,8 +211,8 @@ public class Database implements DataHandler { private boolean queryStatistics; private int queryStatisticsMaxEntries = Constants.QUERY_STATISTICS_MAX_ENTRIES; private QueryStatisticsData queryStatisticsData; - private RowFactory rowFactory = RowFactory.DEFAULT; - private LocalResultFactory resultFactory = LocalResultFactory.DEFAULT; + private RowFactory rowFactory = RowFactory.getRowFactory(); + private boolean ignoreCatalogs; private Authenticator authenticator; @@ -241,82 +222,163 @@ public Database(ConnectionInfo ci, String cipher) { META_LOCK_DEBUGGING_DB.set(null); META_LOCK_DEBUGGING_STACK.set(null); } - String name = ci.getName(); + String databaseName = ci.getName(); this.dbSettings = ci.getDbSettings(); - this.reconnectCheckDelayNs = TimeUnit.MILLISECONDS.toNanos(dbSettings.reconnectCheckDelay); this.compareMode = CompareMode.getInstance(null, 0); this.persistent = ci.isPersistent(); this.filePasswordHash = ci.getFilePasswordHash(); this.fileEncryptionKey = ci.getFileEncryptionKey(); - this.databaseName = name; + this.databaseName = databaseName; this.databaseShortName = parseDatabaseShortName(); this.maxLengthInplaceLob = Constants.DEFAULT_MAX_LENGTH_INPLACE_LOB; this.cipher = cipher; - String lockMethodName = ci.getProperty("FILE_LOCK", null); - this.accessModeData = StringUtils.toLowerEnglish( - ci.getProperty("ACCESS_MODE_DATA", "rw")); this.autoServerMode = ci.getProperty("AUTO_SERVER", false); this.autoServerPort = ci.getProperty("AUTO_SERVER_PORT", 0); - int defaultCacheSize = Utils.scaleForAvailableMemory( - Constants.CACHE_SIZE_DEFAULT); - this.cacheSize = - ci.getProperty("CACHE_SIZE", defaultCacheSize); - this.pageSize = ci.getProperty("PAGE_SIZE", - Constants.DEFAULT_PAGE_SIZE); + pageSize = ci.getProperty("PAGE_SIZE", Constants.DEFAULT_PAGE_SIZE); + if (cipher != null && pageSize % FileEncrypt.BLOCK_SIZE != 0) { + throw DbException.getUnsupportedException("CIPHER && PAGE_SIZE=" + pageSize); + } + String accessModeData = StringUtils.toLowerEnglish(ci.getProperty("ACCESS_MODE_DATA", "rw")); if ("r".equals(accessModeData)) { readOnly = true; } - if (dbSettings.mvStore && lockMethodName == null) { - if (autoServerMode) { - fileLockMethod = FileLockMethod.FILE; - } else { - fileLockMethod = FileLockMethod.FS; + String lockMethodName = ci.getProperty("FILE_LOCK", null); + fileLockMethod = lockMethodName != null ? FileLock.getFileLockMethod(lockMethodName) : + autoServerMode ? FileLockMethod.FILE : FileLockMethod.FS; + this.databaseURL = ci.getURL(); + String s = ci.removeProperty("DATABASE_EVENT_LISTENER", null); + if (s != null) { + setEventListenerClass(StringUtils.trim(s, true, true, "'")); + } + s = ci.removeProperty("MODE", null); + if (s != null) { + mode = Mode.getInstance(s); + if (mode == null) { + throw DbException.get(ErrorCode.UNKNOWN_MODE_1, s); } - } else { - fileLockMethod = FileLock.getFileLockMethod(lockMethodName); } - if (dbSettings.mvStore && fileLockMethod == FileLockMethod.SERIALIZED) { - throw DbException.getUnsupportedException( - "MV_STORE combined with FILE_LOCK=SERIALIZED"); + s = ci.removeProperty("DEFAULT_NULL_ORDERING", null); + if (s != null) { + try { + defaultNullOrdering = DefaultNullOrdering.valueOf(StringUtils.toUpperEnglish(s)); + } catch (RuntimeException e) { + throw DbException.getInvalidValueException("DEFAULT_NULL_ORDERING", s); + } } - this.databaseURL = ci.getURL(); - String listener = ci.removeProperty("DATABASE_EVENT_LISTENER", null); - if (listener != null) { - listener = StringUtils.trim(listener, true, true, "'"); - setEventListenerClass(listener); - } - String modeName = ci.removeProperty("MODE", null); - if (modeName != null) { - mode = Mode.getInstance(modeName); - if (mode == null) { - throw DbException.get(ErrorCode.UNKNOWN_MODE_1, modeName); - } - } - this.logMode = - ci.getProperty("LOG", PageStore.LOG_MODE_SYNC); - this.javaObjectSerializerName = - ci.getProperty("JAVA_OBJECT_SERIALIZER", null); - this.multiThreaded = - ci.getProperty("MULTI_THREADED", dbSettings.mvStore); - this.allowBuiltinAliasOverride = - ci.getProperty("BUILTIN_ALIAS_OVERRIDE", false); - boolean closeAtVmShutdown = - dbSettings.dbCloseOnExit; - int traceLevelFile = - ci.getIntProperty(SetTypes.TRACE_LEVEL_FILE, - TraceSystem.DEFAULT_TRACE_LEVEL_FILE); - int traceLevelSystemOut = - ci.getIntProperty(SetTypes.TRACE_LEVEL_SYSTEM_OUT, + s = ci.getProperty("JAVA_OBJECT_SERIALIZER", null); + if (s != null) { + s = StringUtils.trim(s, true, true, "'"); + javaObjectSerializerName = s; + } + this.allowBuiltinAliasOverride = ci.getProperty("BUILTIN_ALIAS_OVERRIDE", false); + boolean closeAtVmShutdown = dbSettings.dbCloseOnExit; + int traceLevelFile = ci.getIntProperty(SetTypes.TRACE_LEVEL_FILE, TraceSystem.DEFAULT_TRACE_LEVEL_FILE); + int traceLevelSystemOut = ci.getIntProperty(SetTypes.TRACE_LEVEL_SYSTEM_OUT, TraceSystem.DEFAULT_TRACE_LEVEL_SYSTEM_OUT); - this.cacheType = StringUtils.toUpperEnglish( - ci.removeProperty("CACHE_TYPE", Constants.CACHE_TYPE_DEFAULT)); - openDatabase(traceLevelFile, traceLevelSystemOut, closeAtVmShutdown, ci); - } - - private void openDatabase(int traceLevelFile, int traceLevelSystemOut, - boolean closeAtVmShutdown, ConnectionInfo ci) { + this.cacheType = StringUtils.toUpperEnglish(ci.removeProperty("CACHE_TYPE", Constants.CACHE_TYPE_DEFAULT)); + this.ignoreCatalogs = ci.getProperty("IGNORE_CATALOGS", dbSettings.ignoreCatalogs); + this.lockMode = ci.getProperty("LOCK_MODE", Constants.DEFAULT_LOCK_MODE); + String traceFile; + if (persistent) { + if (readOnly) { + if (traceLevelFile >= TraceSystem.DEBUG) { + traceFile = Utils.getProperty("java.io.tmpdir", ".") + "/h2_" + System.currentTimeMillis() + + Constants.SUFFIX_TRACE_FILE; + } else { + traceFile = null; + } + } else { + traceFile = databaseName + Constants.SUFFIX_TRACE_FILE; + } + } else { + traceFile = null; + } + traceSystem = new TraceSystem(traceFile); + traceSystem.setLevelFile(traceLevelFile); + traceSystem.setLevelSystemOut(traceLevelSystemOut); + trace = traceSystem.getTrace(Trace.DATABASE); + trace.info("opening {0} (build {1})", databaseName, Constants.BUILD_ID); try { - open(traceLevelFile, traceLevelSystemOut, ci); + if (autoServerMode && (readOnly || !persistent || fileLockMethod == FileLockMethod.NO + || fileLockMethod == FileLockMethod.FS)) { + throw DbException.getUnsupportedException( + "AUTO_SERVER=TRUE && (readOnly || inMemory || FILE_LOCK=NO || FILE_LOCK=FS)"); + } + if (persistent) { + String lockFileName = databaseName + Constants.SUFFIX_LOCK_FILE; + if (readOnly) { + if (FileUtils.exists(lockFileName)) { + throw DbException.get(ErrorCode.DATABASE_ALREADY_OPEN_1, "Lock file exists: " + lockFileName); + } + } else if (fileLockMethod != FileLockMethod.NO && fileLockMethod != FileLockMethod.FS) { + lock = new FileLock(traceSystem, lockFileName, Constants.LOCK_SLEEP); + lock.lock(fileLockMethod); + if (autoServerMode) { + startServer(lock.getUniqueId()); + } + } + deleteOldTempFiles(); + } + starting = true; + if (dbSettings.mvStore) { + store = new Store(this); + } else { + throw new UnsupportedOperationException(); + } + starting = false; + systemUser = new User(this, 0, SYSTEM_USER_NAME, true); + systemUser.setAdmin(true); + mainSchema = new Schema(this, Constants.MAIN_SCHEMA_ID, sysIdentifier(Constants.SCHEMA_MAIN), systemUser, + true); + infoSchema = new InformationSchema(this, systemUser); + schemas.put(mainSchema.getName(), mainSchema); + schemas.put(infoSchema.getName(), infoSchema); + if (mode.getEnum() == ModeEnum.PostgreSQL) { + pgCatalogSchema = new PgCatalogSchema(this, systemUser); + schemas.put(pgCatalogSchema.getName(), pgCatalogSchema); + } else { + pgCatalogSchema = null; + } + publicRole = new Role(this, 0, sysIdentifier(Constants.PUBLIC_ROLE_NAME), true); + usersAndRoles.put(publicRole.getName(), publicRole); + systemSession = createSession(systemUser); + lobSession = createSession(systemUser); + Set settingKeys = dbSettings.getSettings().keySet(); + store.getTransactionStore().init(lobSession); + settingKeys.removeIf(name -> name.startsWith("PAGE_STORE_")); + CreateTableData data = createSysTableData(); + starting = true; + meta = mainSchema.createTable(data); + IndexColumn[] pkCols = IndexColumn.wrap(new Column[] { data.columns.get(0) }); + metaIdIndex = meta.addIndex(systemSession, "SYS_ID", 0, pkCols, 1, + IndexType.createPrimaryKey(false, false), true, null); + systemSession.commit(true); + objectIds.set(0); + executeMeta(); + systemSession.commit(true); + store.getTransactionStore().endLeftoverTransactions(); + store.removeTemporaryMaps(objectIds); + recompileInvalidViews(); + starting = false; + if (!readOnly) { + // set CREATE_BUILD in a new database + String settingName = SetTypes.getTypeName(SetTypes.CREATE_BUILD); + Setting setting = settings.get(settingName); + if (setting == null) { + setting = new Setting(this, allocateObjectId(), settingName); + setting.setIntValue(Constants.BUILD_ID); + lockMeta(systemSession); + addDatabaseObject(systemSession, setting); + } + } + lobStorage = new LobStorageMap(this); + lobSession.commit(true); + systemSession.commit(true); + trace.info("opened {0}", databaseName); + if (persistent) { + int writeDelay = ci.getProperty("WRITE_DELAY", Constants.DEFAULT_WRITE_DELAY); + setWriteDelay(writeDelay); + } if (closeAtVmShutdown) { OnExitDatabaseCloser.register(this); } @@ -325,21 +387,17 @@ private void openDatabase(int traceLevelFile, int traceLevelSystemOut, if (e instanceof OutOfMemoryError) { e.fillInStackTrace(); } - boolean alreadyOpen = e instanceof DbException - && ((DbException) e).getErrorCode() == ErrorCode.DATABASE_ALREADY_OPEN_1; - if (alreadyOpen) { - stopServer(); - } - - if (traceSystem != null) { - if (e instanceof DbException && !alreadyOpen) { + if (e instanceof DbException) { + if (((DbException) e).getErrorCode() == ErrorCode.DATABASE_ALREADY_OPEN_1) { + stopServer(); + } else { // only write if the database is not already in use trace.error(e, "opening {0}", databaseName); } - traceSystem.close(); } - closeOpenFilesAndUnlock(false); - } catch(Throwable ex) { + traceSystem.close(); + closeOpenFilesAndUnlock(); + } catch (Throwable ex) { e.addSuppressed(ex); } throw DbException.convert(e); @@ -347,22 +405,10 @@ private void openDatabase(int traceLevelFile, int traceLevelSystemOut, } public int getLockTimeout() { - Setting setting = findSetting( - SetTypes.getTypeName(SetTypes.DEFAULT_LOCK_TIMEOUT)); + Setting setting = findSetting(SetTypes.getTypeName(SetTypes.DEFAULT_LOCK_TIMEOUT)); return setting == null ? Constants.INITIAL_LOCK_TIMEOUT : setting.getIntValue(); } - /** - * Create a new row for a table. - * - * @param data the values - * @param memory whether the row is in memory - * @return the created row - */ - public Row createRow(Value[] data, int memory) { - return rowFactory.createRow(data, memory); - } - public RowFactory getRowFactory() { return rowFactory; } @@ -371,14 +417,6 @@ public void setRowFactory(RowFactory rowFactory) { this.rowFactory = rowFactory; } - public LocalResultFactory getResultFactory() { - return resultFactory; - } - - public void setResultFactory(LocalResultFactory resultFactory) { - this.resultFactory = resultFactory; - } - public static void setInitialPowerOffCount(int count) { initialPowerOffCount = count; } @@ -390,145 +428,14 @@ public void setPowerOffCount(int count) { powerOffCount = count; } - public MVTableEngine.Store getStore() { + public Store getStore() { return store; } - public void setStore(MVTableEngine.Store store) { - this.store = store; - this.retentionTime = store.getMvStore().getRetentionTime(); - } - - /** - * Check if two values are equal with the current comparison mode. - * - * @param a the first value - * @param b the second value - * @return true if both objects are equal - */ - public boolean areEqual(Value a, Value b) { - // can not use equals because ValueDecimal 0.0 is not equal to 0.00. - return a.compareTo(b, mode, compareMode) == 0; - } - - /** - * Compare two values with the current comparison mode. The values may have - * different data types including NULL. - * - * @param a the first value - * @param b the second value - * @return 0 if both values are equal, -1 if the first value is smaller, and - * 1 otherwise - */ - public int compare(Value a, Value b) { - return a.compareTo(b, mode, compareMode); - } - - /** - * Compare two values with the current comparison mode. The values may have - * different data types including NULL. - * - * @param a the first value - * @param b the second value - * @param forEquality perform only check for equality (= or <>) - * @return 0 if both values are equal, -1 if the first value is smaller, 1 - * if the second value is larger, {@link Integer#MIN_VALUE} if order - * is not defined due to NULL comparison - */ - public int compareWithNull(Value a, Value b, boolean forEquality) { - return a.compareWithNull(b, forEquality, mode, compareMode); - } - - /** - * Compare two values with the current comparison mode. The values must be - * of the same type. - * - * @param a the first value - * @param b the second value - * @return 0 if both values are equal, -1 if the first value is smaller, and - * 1 otherwise - */ - public int compareTypeSafe(Value a, Value b) { - return a.compareTypeSafe(b, compareMode); - } - public long getModificationDataId() { return modificationDataId.get(); } - /** - * Set or reset the pending change flag in the .lock.db file. - * - * @param pending the new value of the flag - * @return true if the call was successful, - * false if another connection was faster - */ - private synchronized boolean reconnectModified(boolean pending) { - if (readOnly || lock == null || - fileLockMethod != FileLockMethod.SERIALIZED) { - return true; - } - try { - if (pending == reconnectChangePending) { - long now = System.nanoTime(); - if (now > reconnectCheckNext) { - if (pending) { - String pos = pageStore == null ? - null : Long.toString(pageStore.getWriteCountTotal()); - lock.setProperty("logPos", pos); - lock.save(); - } - reconnectCheckNext = now + reconnectCheckDelayNs; - } - return true; - } - Properties old = lock.load(); - if (pending) { - if (old.getProperty("changePending") != null) { - return false; - } - trace.debug("wait before writing"); - Thread.sleep(TimeUnit.NANOSECONDS.toMillis((long) (reconnectCheckDelayNs * 1.1))); - Properties now = lock.load(); - if (!now.equals(old)) { - // somebody else was faster - return false; - } - } - String pos = pageStore == null ? - null : Long.toString(pageStore.getWriteCountTotal()); - lock.setProperty("logPos", pos); - if (pending) { - lock.setProperty("changePending", "true-" + Math.random()); - } else { - lock.setProperty("changePending", null); - } - // ensure that the writer thread will - // not reset the flag before we are done - reconnectCheckNext = System.nanoTime() + - 2 * reconnectCheckDelayNs; - old = lock.save(); - if (pending) { - trace.debug("wait before writing again"); - Thread.sleep(TimeUnit.NANOSECONDS.toMillis((long) (reconnectCheckDelayNs * 1.1))); - Properties now = lock.load(); - if (!now.equals(old)) { - // somebody else was faster - return false; - } - } else { - Thread.sleep(1); - } - reconnectLastLock = old; - reconnectChangePending = pending; - reconnectCheckNext = System.nanoTime() + reconnectCheckDelayNs; - return true; - } catch (Exception e) { - trace.error(e, "pending {0}", pending); - return false; - } - } - public long getNextModificationDataId() { return modificationDataId.incrementAndGet(); } @@ -544,15 +451,26 @@ public long getNextModificationMetaId() { return modificationMetaId.incrementAndGet() - 1; } + public long getRemoteSettingsId() { + return remoteSettingsId.get(); + } + + public long getNextRemoteSettingsId() { + return remoteSettingsId.incrementAndGet(); + } + public int getPowerOffCount() { return powerOffCount; } @Override public void checkPowerOff() { - if (powerOffCount == 0) { - return; + if (powerOffCount != 0) { + checkPowerOff2(); } + } + + private void checkPowerOff2() { if (powerOffCount > 1) { powerOffCount--; return; @@ -560,26 +478,11 @@ public void checkPowerOff() { if (powerOffCount != -1) { try { powerOffCount = -1; - stopWriter(); - if (store != null) { - store.closeImmediately(); - } - synchronized(this) { - if (pageStore != null) { - try { - pageStore.close(); - } catch (DbException e) { - // ignore - } - pageStore = null; - } - } + store.closeImmediately(); if (lock != null) { stopServer(); - if (fileLockMethod != FileLockMethod.SERIALIZED) { - // allow testing shutdown - lock.unlock(); - } + // allow testing shutdown + lock.unlock(); lock = null; } if (traceSystem != null) { @@ -589,23 +492,10 @@ public void checkPowerOff() { DbException.traceThrowable(e); } } - Engine.getInstance().close(databaseName); + Engine.close(databaseName); throw DbException.get(ErrorCode.DATABASE_IS_CLOSED); } - /** - * Check if a database with the given name exists. - * - * @param name the name of the database (including path) - * @return true if one exists - */ - static boolean exists(String name) { - if (FileUtils.exists(name + Constants.SUFFIX_PAGE_FILE)) { - return true; - } - return FileUtils.exists(name + Constants.SUFFIX_MV_FILE); - } - /** * Get the trace object for the given module id. * @@ -648,302 +538,126 @@ boolean validateFilePasswordHash(String testCipher, byte[] testHash) { private String parseDatabaseShortName() { String n = databaseName; - if (n.endsWith(":")) { - n = null; - } - if (n != null) { - StringTokenizer tokenizer = new StringTokenizer(n, "/\\:,;"); - while (tokenizer.hasMoreTokens()) { - n = tokenizer.nextToken(); + int l = n.length(), i = l; + loop: while (--i >= 0) { + char ch = n.charAt(i); + switch (ch) { + case '/': + case ':': + case '\\': + break loop; } } - if (n == null || n.isEmpty()) { - n = "unnamed"; - } - return dbSettings.databaseToUpper ? StringUtils.toUpperEnglish(n) - : dbSettings.databaseToLower ? StringUtils.toLowerEnglish(n) : n; + n = ++i == l ? "UNNAMED" : n.substring(i); + return StringUtils.truncateString( + dbSettings.databaseToUpper ? StringUtils.toUpperEnglish(n) + : dbSettings.databaseToLower ? StringUtils.toLowerEnglish(n) : n, + Constants.MAX_IDENTIFIER_LENGTH); } - private synchronized void open(int traceLevelFile, int traceLevelSystemOut, ConnectionInfo ci) { - if (persistent) { - String dataFileName = databaseName + Constants.SUFFIX_OLD_DATABASE_FILE; - boolean existsData = FileUtils.exists(dataFileName); - String pageFileName = databaseName + Constants.SUFFIX_PAGE_FILE; - String mvFileName = databaseName + Constants.SUFFIX_MV_FILE; - boolean existsPage = FileUtils.exists(pageFileName); - boolean existsMv = FileUtils.exists(mvFileName); - if (existsData && (!existsPage && !existsMv)) { - throw DbException.get( - ErrorCode.FILE_VERSION_ERROR_1, "Old database: " + - dataFileName + - " - please convert the database " + - "to a SQL script and re-create it."); - } - if (existsPage && !FileUtils.canWrite(pageFileName)) { - readOnly = true; - } - if (existsMv && !FileUtils.canWrite(mvFileName)) { - readOnly = true; - } - if (existsPage && !existsMv) { - dbSettings.mvStore = false; - // Need to re-init this because the first time we do it we don't - // know if we have an mvstore or a pagestore. - multiThreaded = ci.getProperty("MULTI_THREADED", false); - } - if (readOnly) { - if (traceLevelFile >= TraceSystem.DEBUG) { - String traceFile = Utils.getProperty("java.io.tmpdir", ".") + - "/" + "h2_" + System.currentTimeMillis(); - traceSystem = new TraceSystem(traceFile + - Constants.SUFFIX_TRACE_FILE); - } else { - traceSystem = new TraceSystem(null); - } - } else { - traceSystem = new TraceSystem(databaseName + - Constants.SUFFIX_TRACE_FILE); - } - traceSystem.setLevelFile(traceLevelFile); - traceSystem.setLevelSystemOut(traceLevelSystemOut); - trace = traceSystem.getTrace(Trace.DATABASE); - trace.info("opening {0} (build {1})", databaseName, Constants.BUILD_ID); - if (autoServerMode) { - if (readOnly || - fileLockMethod == FileLockMethod.NO || - fileLockMethod == FileLockMethod.SERIALIZED || - fileLockMethod == FileLockMethod.FS) { - throw DbException.getUnsupportedException( - "autoServerMode && (readOnly || " + - "fileLockMethod == NO || " + - "fileLockMethod == SERIALIZED || " + - "fileLockMethod == FS || " + - "inMemory)"); - } - } - String lockFileName = databaseName + Constants.SUFFIX_LOCK_FILE; - if (readOnly) { - if (FileUtils.exists(lockFileName)) { - throw DbException.get(ErrorCode.DATABASE_ALREADY_OPEN_1, - "Lock file exists: " + lockFileName); - } - } - if (!readOnly && fileLockMethod != FileLockMethod.NO) { - if (fileLockMethod != FileLockMethod.FS) { - lock = new FileLock(traceSystem, lockFileName, Constants.LOCK_SLEEP); - lock.lock(fileLockMethod); - if (autoServerMode) { - startServer(lock.getUniqueId()); - } - } - } - if (SysProperties.MODIFY_ON_WRITE) { - while (isReconnectNeeded()) { - // wait until others stopped writing - } - } else { - while (isReconnectNeeded() && !beforeWriting()) { - // wait until others stopped writing and - // until we can write (the file is not yet open - - // no need to re-connect) - } - } - deleteOldTempFiles(); - starting = true; - if (SysProperties.MODIFY_ON_WRITE) { - try { - getPageStore(); - } catch (DbException e) { - if (e.getErrorCode() != ErrorCode.DATABASE_IS_READ_ONLY) { - throw e; - } - pageStore = null; - while (!beforeWriting()) { - // wait until others stopped writing and - // until we can write (the file is not yet open - - // no need to re-connect) - } - getPageStore(); - } - } else { - getPageStore(); - } - starting = false; - if (store == null) { - writer = WriterThread.create(this, writeDelay); - } else { - setWriteDelay(writeDelay); - } - } else { - if (autoServerMode) { - throw DbException.getUnsupportedException( - "autoServerMode && inMemory"); - } - traceSystem = new TraceSystem(null); - trace = traceSystem.getTrace(Trace.DATABASE); - if (dbSettings.mvStore) { - getPageStore(); - } - } - if(store != null) { - store.getTransactionStore().init(); - } - systemUser = new User(this, 0, SYSTEM_USER_NAME, true); - mainSchema = new Schema(this, Constants.MAIN_SCHEMA_ID, sysIdentifier(Constants.SCHEMA_MAIN), systemUser, - true); - infoSchema = new Schema(this, Constants.INFORMATION_SCHEMA_ID, sysIdentifier("INFORMATION_SCHEMA"), systemUser, - true); - schemas.put(mainSchema.getName(), mainSchema); - schemas.put(infoSchema.getName(), infoSchema); - publicRole = new Role(this, 0, sysIdentifier(Constants.PUBLIC_ROLE_NAME), true); - roles.put(publicRole.getName(), publicRole); - systemUser.setAdmin(true); - systemSession = new Session(this, systemUser, ++nextSessionId); - lobSession = new Session(this, systemUser, ++nextSessionId); + private CreateTableData createSysTableData() { CreateTableData data = new CreateTableData(); ArrayList cols = data.columns; - Column columnId = new Column("ID", Value.INT); + Column columnId = new Column("ID", TypeInfo.TYPE_INTEGER); columnId.setNullable(false); cols.add(columnId); - cols.add(new Column("HEAD", Value.INT)); - cols.add(new Column("TYPE", Value.INT)); - cols.add(new Column("SQL", Value.STRING)); - boolean create = true; - if (pageStore != null) { - create = pageStore.isNew(); - } + cols.add(new Column("HEAD", TypeInfo.TYPE_INTEGER)); + cols.add(new Column("TYPE", TypeInfo.TYPE_INTEGER)); + cols.add(new Column("SQL", TypeInfo.TYPE_VARCHAR)); data.tableName = "SYS"; data.id = 0; data.temporary = false; data.persistData = persistent; data.persistIndexes = persistent; - data.create = create; data.isHidden = true; data.session = systemSession; - starting = true; - meta = mainSchema.createTable(data); - handleUpgradeIssues(); - IndexColumn[] pkCols = IndexColumn.wrap(new Column[] { columnId }); - metaIdIndex = meta.addIndex(systemSession, "SYS_ID", - 0, pkCols, IndexType.createPrimaryKey( - false, false), true, null); - systemSession.commit(true); - objectIds.set(0); + return data; + } + + private void executeMeta() { Cursor cursor = metaIdIndex.find(systemSession, null, null); - ArrayList records = new ArrayList<>((int) metaIdIndex.getRowCountApproximation()); + ArrayList firstRecords = new ArrayList<>(), domainRecords = new ArrayList<>(), + middleRecords = new ArrayList<>(), constraintRecords = new ArrayList<>(), + lastRecords = new ArrayList<>(); while (cursor.next()) { MetaRecord rec = new MetaRecord(cursor.get()); objectIds.set(rec.getId()); - records.add(rec); + switch (rec.getObjectType()) { + case DbObject.SETTING: + case DbObject.USER: + case DbObject.SCHEMA: + case DbObject.FUNCTION_ALIAS: + firstRecords.add(rec); + break; + case DbObject.DOMAIN: + domainRecords.add(rec); + break; + case DbObject.SEQUENCE: + case DbObject.CONSTANT: + case DbObject.TABLE_OR_VIEW: + case DbObject.INDEX: + middleRecords.add(rec); + break; + case DbObject.CONSTRAINT: + constraintRecords.add(rec); + break; + default: + lastRecords.add(rec); + } } - Collections.sort(records); synchronized (systemSession) { - for (MetaRecord rec : records) { - rec.execute(this, systemSession, eventListener); + executeMeta(firstRecords); + // Domains may depend on other domains + int count = domainRecords.size(); + if (count > 0) { + for (int j = 0;; count = j) { + DbException exception = null; + for (int i = 0; i < count; i++) { + MetaRecord rec = domainRecords.get(i); + try { + rec.prepareAndExecute(this, systemSession, eventListener); + } catch (DbException ex) { + if (exception == null) { + exception = ex; + } + domainRecords.set(j++, rec); + } + } + if (exception == null) { + break; + } + if (count == j) { + throw exception; + } + } } - } - systemSession.commit(true); - if (store != null) { - store.getTransactionStore().endLeftoverTransactions(); - store.removeTemporaryMaps(objectIds); - } - recompileInvalidViews(systemSession); - starting = false; - if (!readOnly) { - // set CREATE_BUILD in a new database - String name = SetTypes.getTypeName(SetTypes.CREATE_BUILD); - if (settings.get(name) == null) { - Setting setting = new Setting(this, allocateObjectId(), name); - setting.setIntValue(Constants.BUILD_ID); - lockMeta(systemSession); - addDatabaseObject(systemSession, setting); - } - setSortSetting(SetTypes.BINARY_COLLATION, SysProperties.SORT_BINARY_UNSIGNED, true); - setSortSetting(SetTypes.UUID_COLLATION, SysProperties.SORT_UUID_UNSIGNED, false); - // mark all ids used in the page store - if (pageStore != null) { - BitSet f = pageStore.getObjectIds(); - for (int i = 0, len = f.length(); i < len; i++) { - if (f.get(i) && !objectIds.get(i)) { - trace.info("unused object id: " + i); - objectIds.set(i); + executeMeta(middleRecords); + // Prepare, but don't create all constraints and sort them + count = constraintRecords.size(); + if (count > 0) { + ArrayList constraints = new ArrayList<>(count); + for (int i = 0; i < count; i++) { + Prepared prepared = constraintRecords.get(i).prepare(this, systemSession, eventListener); + if (prepared != null) { + constraints.add(prepared); } } + constraints.sort(MetaRecord.CONSTRAINTS_COMPARATOR); + // Create constraints in order (unique and primary key before + // all others) + for (Prepared constraint : constraints) { + MetaRecord.execute(this, constraint, eventListener, constraint.getSQL()); + } } - } - getLobStorage().init(); - systemSession.commit(true); - - trace.info("opened {0}", databaseName); - if (checkpointAllowed > 0) { - afterWriting(); + executeMeta(lastRecords); } } - /** - * Preserves a current default value of a sorting setting if it is not the - * same as default for older versions of H2 and if it was not modified by - * user. - * - * @param type - * setting type - * @param defValue - * current default value (may be modified via system properties) - * @param oldDefault - * default value for old versions - */ - private void setSortSetting(int type, boolean defValue, boolean oldDefault) { - if (defValue == oldDefault) { - return; - } - String name = SetTypes.getTypeName(type); - if (settings.get(name) == null) { - Setting setting = new Setting(this, allocateObjectId(), name); - setting.setStringValue(defValue ? CompareMode.UNSIGNED : CompareMode.SIGNED); - lockMeta(systemSession); - addDatabaseObject(systemSession, setting); - } - } - - private void handleUpgradeIssues() { - if (store != null && !isReadOnly()) { - MVStore mvStore = store.getMvStore(); - // Version 1.4.197 erroneously handles index on SYS_ID.ID as secondary - // and does not delegate to scan index as it should. - // This code will try to fix that by converging ROW_ID and ID, - // since they may have got out of sync, and by removing map "index.0", - // which corresponds to a secondary index. - if (mvStore.hasMap("index.0")) { - Index scanIndex = meta.getScanIndex(systemSession); - Cursor curs = scanIndex.find(systemSession, null, null); - List allMetaRows = new ArrayList<>(); - boolean needRepair = false; - while (curs.next()) { - Row row = curs.get(); - allMetaRows.add(row); - long rowId = row.getKey(); - int id = row.getValue(0).getInt(); - if (id != rowId) { - needRepair = true; - row.setKey(id); - } - } - if (needRepair) { - Row[] array = allMetaRows.toArray(new Row[0]); - Arrays.sort(array, new Comparator() { - @Override - public int compare(Row o1, Row o2) { - return Integer.compare(o1.getValue(0).getInt(), o2.getValue(0).getInt()); - } - }); - meta.truncate(systemSession); - for (Row row : array) { - meta.addRow(systemSession, row); - } - systemSession.commit(true); - } - mvStore.removeMap("index.0"); - mvStore.commit(); + private void executeMeta(ArrayList records) { + if (!records.isEmpty()) { + records.sort(null); + for (MetaRecord rec : records) { + rec.prepareAndExecute(this, systemSession, eventListener); } } } @@ -978,54 +692,52 @@ private void stopServer() { } } - private void recompileInvalidViews(Session session) { + private void recompileInvalidViews() { boolean atLeastOneRecompiledSuccessfully; do { atLeastOneRecompiledSuccessfully = false; - for (Table obj : getAllTablesAndViews(false)) { - if (obj instanceof TableView) { - TableView view = (TableView) obj; - if (view.isInvalid()) { - view.recompile(session, true, false); - if (!view.isInvalid()) { - atLeastOneRecompiledSuccessfully = true; + for (Schema schema : schemas.values()) { + for (Table obj : schema.getAllTablesAndViews(null)) { + if (obj instanceof TableView) { + TableView view = (TableView) obj; + if (view.isInvalid()) { + view.recompile(systemSession, true, false); + if (!view.isInvalid()) { + atLeastOneRecompiledSuccessfully = true; + } } } } } } while (atLeastOneRecompiledSuccessfully); - TableView.clearIndexCaches(session.getDatabase()); - } - - private void initMetaTables() { - if (metaTablesInitialized) { - return; - } - synchronized (infoSchema) { - if (!metaTablesInitialized) { - for (int type = 0, count = MetaTable.getMetaTableTypeCount(); - type < count; type++) { - MetaTable m = new MetaTable(infoSchema, -1 - type, type); - infoSchema.add(m); - } - metaTablesInitialized = true; - } - } + TableView.clearIndexCaches(this); } - private void addMeta(Session session, DbObject obj) { + private void addMeta(SessionLocal session, DbObject obj) { assert Thread.holdsLock(this); int id = obj.getId(); - if (id > 0 && !starting && !obj.isTemporary()) { - Row r = meta.getTemplateRow(); - MetaRecord.populateRowFromDBObject(obj, r); - synchronized (objectIds) { - objectIds.set(id); - } - if (SysProperties.CHECK) { - verifyMetaLocked(session); + if (id > 0 && !obj.isTemporary()) { + if (!isReadOnly()) { + Row r = meta.getTemplateRow(); + MetaRecord.populateRowFromDBObject(obj, r); + assert objectIds.get(id); + if (SysProperties.CHECK) { + verifyMetaLocked(session); + } + Cursor cursor = metaIdIndex.find(session, r, r); + if (!cursor.next()) { + meta.addRow(session, r); + } else { + assert starting; + Row oldRow = cursor.get(); + MetaRecord rec = new MetaRecord(oldRow); + assert rec.getId() == obj.getId(); + assert rec.getObjectType() == obj.getType(); + if (!rec.getSQL().equals(obj.getCreateSQLForMeta())) { + meta.updateRow(session, oldRow, r); + } + } } - meta.addRow(session, r); } } @@ -1034,10 +746,9 @@ private void addMeta(Session session, DbObject obj) { * * @param session the session */ - public void verifyMetaLocked(Session session) { - if (meta != null && !meta.isLockedExclusivelyBy(session) - && lockMode != Constants.LOCK_MODE_OFF) { - throw DbException.throwInternalError(); + public void verifyMetaLocked(SessionLocal session) { + if (lockMode != Constants.LOCK_MODE_OFF && meta != null && !meta.isLockedExclusivelyBy(session)) { + throw DbException.getInternalError(); } } @@ -1047,7 +758,7 @@ public void verifyMetaLocked(Session session) { * @param session the session * @return whether it was already locked before by this session */ - public boolean lockMeta(Session session) { + public boolean lockMeta(SessionLocal session) { // this method can not be synchronized on the database object, // as unlocking is also synchronized on the database object - // so if locking starts just before unlocking, locking could @@ -1056,27 +767,29 @@ public boolean lockMeta(Session session) { return true; } if (ASSERT) { - // If we are locking two different databases in the same stack, just ignore it. - // This only happens in TestLinkedTable where we connect to another h2 DB in the - // same process. - if (META_LOCK_DEBUGGING_DB.get() != null - && META_LOCK_DEBUGGING_DB.get() != this) { - final Session prev = META_LOCK_DEBUGGING.get(); - if (prev == null) { - META_LOCK_DEBUGGING.set(session); - META_LOCK_DEBUGGING_DB.set(this); - META_LOCK_DEBUGGING_STACK.set(new Throwable("Last meta lock granted in this stack trace, "+ - "this is debug information for following IllegalStateException")); - } else if (prev != session) { - META_LOCK_DEBUGGING_STACK.get().printStackTrace(); - throw new IllegalStateException("meta currently locked by " - + prev +", sessionid="+ prev.getId() - + " and trying to be locked by different session, " - + session +", sessionid="+ session.getId() + " on same thread"); - } + lockMetaAssertion(session); + } + return meta.lock(session, Table.EXCLUSIVE_LOCK); + } + + private void lockMetaAssertion(SessionLocal session) { + // If we are locking two different databases in the same stack, just ignore it. + // This only happens in TestLinkedTable where we connect to another h2 DB in the + // same process. + if (META_LOCK_DEBUGGING_DB.get() != null && META_LOCK_DEBUGGING_DB.get() != this) { + final SessionLocal prev = META_LOCK_DEBUGGING.get(); + if (prev == null) { + META_LOCK_DEBUGGING.set(session); + META_LOCK_DEBUGGING_DB.set(this); + META_LOCK_DEBUGGING_STACK.set(new Throwable("Last meta lock granted in this stack trace, " + + "this is debug information for following IllegalStateException")); + } else if (prev != session) { + META_LOCK_DEBUGGING_STACK.get().printStackTrace(); + throw new IllegalStateException("meta currently locked by " + prev + ", sessionid=" + prev.getId() + + " and trying to be locked by different session, " + session + ", sessionid=" // + + session.getId() + " on same thread"); } } - return meta.lock(session, true, true); } /** @@ -1084,7 +797,7 @@ public boolean lockMeta(Session session) { * * @param session the session */ - public void unlockMeta(Session session) { + public void unlockMeta(SessionLocal session) { if (meta != null) { unlockMetaDebug(session); meta.unlock(session); @@ -1098,7 +811,7 @@ public void unlockMeta(Session session) { * * @param session the session */ - public void unlockMetaDebug(Session session) { + static void unlockMetaDebug(SessionLocal session) { if (ASSERT) { if (META_LOCK_DEBUGGING.get() == session) { META_LOCK_DEBUGGING.set(null); @@ -1114,17 +827,14 @@ public void unlockMetaDebug(Session session) { * @param session the session * @param id the id of the object to remove */ - public void removeMeta(Session session, int id) { + public void removeMeta(SessionLocal session, int id) { if (id > 0 && !starting) { - SearchRow r = meta.getTemplateSimpleRow(false); - r.setValue(0, ValueInt.get(id)); + SearchRow r = meta.getRowFactory().createRow(); + r.setValue(0, ValueInteger.get(id)); boolean wasLocked = lockMeta(session); try { Cursor cursor = metaIdIndex.find(session, r, r); if (cursor.next()) { - if (lockMode != Constants.LOCK_MODE_OFF && !wasLocked) { - throw DbException.throwInternalError(); - } Row found = cursor.get(); meta.removeRow(session, found); if (SysProperties.CHECK) { @@ -1138,19 +848,11 @@ public void removeMeta(Session session, int id) { unlockMeta(session); } } - if (isMVStore()) { - // release of the object id has to be postponed until the end of the transaction, - // otherwise it might be re-used prematurely, and it would make - // rollback impossible or lead to MVMaps name collision, - // so until then ids are accumulated within session - session.scheduleDatabaseObjectIdForRelease(id); - } else { - // but PageStore, on the other hand, for reasons unknown to me, - // requires immediate id release - synchronized (this) { - objectIds.clear(id); - } - } + // release of the object id has to be postponed until the end of the transaction, + // otherwise it might be re-used prematurely, and it would make + // rollback impossible or lead to MVMaps name collision, + // so until then ids are accumulated within session + session.scheduleDatabaseObjectIdForRelease(id); } } @@ -1158,44 +860,36 @@ public void removeMeta(Session session, int id) { * Mark some database ids as unused. * @param idsToRelease the ids to release */ - void releaseDatabaseObjectIds(BitSet idsToRelease) { + public void releaseDatabaseObjectIds(BitSet idsToRelease) { synchronized (objectIds) { objectIds.andNot(idsToRelease); } } @SuppressWarnings("unchecked") - private HashMap getMap(int type) { - HashMap result; + private Map getMap(int type) { + Map result; switch (type) { case DbObject.USER: - result = users; + case DbObject.ROLE: + result = usersAndRoles; break; case DbObject.SETTING: result = settings; break; - case DbObject.ROLE: - result = roles; - break; case DbObject.RIGHT: result = rights; break; case DbObject.SCHEMA: result = schemas; break; - case DbObject.DOMAIN: - result = domains; - break; case DbObject.COMMENT: result = comments; break; - case DbObject.AGGREGATE: - result = aggregates; - break; default: - throw DbException.throwInternalError("type=" + type); + throw DbException.getInternalError("type=" + type); } - return (HashMap) result; + return (Map) result; } /** @@ -1204,7 +898,7 @@ private HashMap getMap(int type) { * @param session the session * @param obj the object to add */ - public void addSchemaObject(Session session, SchemaObject obj) { + public void addSchemaObject(SessionLocal session, SchemaObject obj) { int id = obj.getId(); if (id > 0 && !starting) { checkWritingAllowed(); @@ -1222,12 +916,12 @@ public void addSchemaObject(Session session, SchemaObject obj) { * @param session the session * @param obj the object to add */ - public synchronized void addDatabaseObject(Session session, DbObject obj) { + public synchronized void addDatabaseObject(SessionLocal session, DbObject obj) { int id = obj.getId(); if (id > 0 && !starting) { checkWritingAllowed(); } - HashMap map = getMap(obj.getType()); + Map map = getMap(obj.getType()); if (obj.getType() == DbObject.USER) { User user = (User) obj; if (user.isAdmin() && systemUser.getName().equals(SYSTEM_USER_NAME)) { @@ -1236,23 +930,13 @@ public synchronized void addDatabaseObject(Session session, DbObject obj) { } String name = obj.getName(); if (SysProperties.CHECK && map.get(name) != null) { - DbException.throwInternalError("object already exists"); + throw DbException.getInternalError("object already exists"); } lockMeta(session); addMeta(session, obj); map.put(name, obj); } - /** - * Get the user defined aggregate function if it exists, or null if not. - * - * @param name the name of the user defined aggregate function - * @return the aggregate function or null - */ - public UserAggregate findAggregate(String name) { - return aggregates.get(name); - } - /** * Get the comment for the given database object if one exists, or null if * not. @@ -1275,7 +959,8 @@ public Comment findComment(DbObject object) { * @return the role or null */ public Role findRole(String roleName) { - return roles.get(StringUtils.toUpperEnglish(roleName)); + RightOwner rightOwner = findUserOrRole(roleName); + return rightOwner instanceof Role ? (Role) rightOwner : null; } /** @@ -1285,11 +970,10 @@ public Role findRole(String roleName) { * @return the schema or null */ public Schema findSchema(String schemaName) { - Schema schema = schemas.get(schemaName); - if (schema == infoSchema) { - initMetaTables(); + if (schemaName == null) { + return null; } - return schema; + return schemas.get(schemaName); } /** @@ -1309,17 +993,8 @@ public Setting findSetting(String name) { * @return the user or null */ public User findUser(String name) { - return users.get(StringUtils.toUpperEnglish(name)); - } - - /** - * Get the domain if it exists, or null if not. - * - * @param name the name of the domain - * @return the domain or null - */ - public Domain findDomain(String name) { - return domains.get(name); + RightOwner rightOwner = findUserOrRole(name); + return rightOwner instanceof User ? (User) rightOwner : null; } /** @@ -1338,21 +1013,33 @@ public User getUser(String name) { return user; } + /** + * Get the user or role if it exists, or {@code null} if not. + * + * @param name the name of the user or role + * @return the user, the role, or {@code null} + */ + public RightOwner findUserOrRole(String name) { + return usersAndRoles.get(StringUtils.toUpperEnglish(name)); + } + /** * Create a session for the given user. * * @param user the user + * @param networkConnectionInfo the network connection information, or {@code null} * @return the session, or null if the database is currently closing * @throws DbException if the database is in exclusive mode */ - synchronized Session createSession(User user) { + synchronized SessionLocal createSession(User user, NetworkConnectionInfo networkConnectionInfo) { if (closing) { return null; } if (exclusiveSession.get() != null) { throw DbException.get(ErrorCode.DATABASE_IS_IN_EXCLUSIVE_MODE); } - Session session = new Session(this, user, ++nextSessionId); + SessionLocal session = createSession(user); + session.setNetworkConnectionInfo(networkConnectionInfo); userSessions.add(session); trace.info("connecting session #{0} to {1}", session.getId(), databaseName); if (delayedCloser != null) { @@ -1362,44 +1049,84 @@ synchronized Session createSession(User user) { return session; } + private SessionLocal createSession(User user) { + int id = ++nextSessionId; + return new SessionLocal(this, user, id); + } + /** * Remove a session. This method is called after the user has disconnected. * * @param session the session */ - public synchronized void removeSession(Session session) { + public synchronized void removeSession(SessionLocal session) { if (session != null) { exclusiveSession.compareAndSet(session, null); - userSessions.remove(session); - if (session != systemSession && session != lobSession) { + if (userSessions.remove(session)) { trace.info("disconnecting session #{0}", session.getId()); } } - if (userSessions.isEmpty() && - session != systemSession && session != lobSession) { - if (closeDelay == 0) { - close(false); - } else if (closeDelay < 0) { - return; - } else { - delayedCloser = new DelayedDatabaseCloser(this, closeDelay * 1000); + if (isUserSession(session)) { + if (userSessions.isEmpty()) { + if (closeDelay == 0) { + close(false); + } else if (closeDelay < 0) { + return; + } else { + delayedCloser = new DelayedDatabaseCloser(this, closeDelay * 1000); + } + } + if (session != null) { + trace.info("disconnected session #{0}", session.getId()); } } - if (session != systemSession && - session != lobSession && session != null) { - trace.info("disconnected session #{0}", session.getId()); - } } - private synchronized void closeAllSessionsException(Session except) { - Session[] all = userSessions.toArray(EMPTY_SESSION_ARRAY); - for (Session s : all) { + boolean isUserSession(SessionLocal session) { + return session != systemSession && session != lobSession; + } + + private synchronized void closeAllSessionsExcept(SessionLocal except) { + SessionLocal[] all = userSessions.toArray(EMPTY_SESSION_ARRAY); + for (SessionLocal s : all) { if (s != except) { - try { - // this will rollback outstanding transaction - s.close(); - } catch (DbException e) { - trace.error(e, "disconnecting session #{0}", s.getId()); + // indicate that session need to be closed ASAP + s.suspend(); + } + } + + int timeout = 2 * getLockTimeout(); + long start = System.currentTimeMillis(); + // 'sleep' should be strictly greater than zero, otherwise real time is not taken into consideration + // and the thread simply waits until notified + long sleep = Math.max(timeout / 20, 1); + boolean done = false; + while (!done) { + try { + // although nobody going to notify us + // it is vital to give up lock on a database + wait(sleep); + } catch (InterruptedException e1) { + // ignore + } + if (System.currentTimeMillis() - start > timeout) { + for (SessionLocal s : all) { + if (s != except && !s.isClosed()) { + try { + // this will rollback outstanding transaction + s.close(); + } catch (Throwable e) { + trace.error(e, "disconnecting session #{0}", s.getId()); + } + } + } + break; + } + done = true; + for (SessionLocal s : all) { + if (s != except && !s.isClosed()) { + done = false; + break; } } } @@ -1428,72 +1155,62 @@ void close(boolean fromShutdownHook) { } private void closeImpl(boolean fromShutdownHook) { - try { - synchronized (this) { - if (closing) { - return; - } - if (fileLockMethod == FileLockMethod.SERIALIZED && - !reconnectChangePending) { - // another connection may have written something - don't write - try { - closeOpenFilesAndUnlock(false); - } catch (DbException e) { - // ignore - } - traceSystem.close(); - return; - } + synchronized (this) { + if (closing || !fromShutdownHook && !userSessions.isEmpty()) { + return; + } + closing = true; + stopServer(); + if (!userSessions.isEmpty()) { + assert fromShutdownHook; + trace.info("closing {0} from shutdown hook", databaseName); + closeAllSessionsExcept(null); + } + trace.info("closing {0}", databaseName); + if (eventListener != null) { + // allow the event listener to connect to the database + closing = false; + DatabaseEventListener e = eventListener; + // set it to null, to make sure it's called only once + eventListener = null; + e.closingDatabase(); closing = true; - stopServer(); if (!userSessions.isEmpty()) { - if (!fromShutdownHook) { - return; - } - trace.info("closing {0} from shutdown hook", databaseName); - closeAllSessionsException(null); - } - trace.info("closing {0}", databaseName); - if (eventListener != null) { - // allow the event listener to connect to the database - closing = false; - DatabaseEventListener e = eventListener; - // set it to null, to make sure it's called only once - eventListener = null; - e.closingDatabase(); - if (!userSessions.isEmpty()) { - // if a connection was opened, we can't close the database - return; - } - closing = true; - } - if (!this.isReadOnly()) { - removeOrphanedLobs(); + trace.info("event listener {0} left connection open", e.getClass().getName()); + // if listener left an open connection + closeAllSessionsExcept(null); } } + if (!this.isReadOnly()) { + removeOrphanedLobs(); + } + } + try { try { if (systemSession != null) { if (powerOffCount != -1) { - for (Table table : getAllTablesAndViews(false)) { - if (table.isGlobalTemporary()) { - table.removeChildrenAndResources(systemSession); - } else { - table.close(systemSession); + for (Schema schema : schemas.values()) { + for (Table table : schema.getAllTablesAndViews(null)) { + if (table.isGlobalTemporary()) { + table.removeChildrenAndResources(systemSession); + } else { + table.close(systemSession); + } } } - for (SchemaObject obj : getAllSchemaObjects( - DbObject.SEQUENCE)) { - Sequence sequence = (Sequence) obj; - sequence.close(); + for (Schema schema : schemas.values()) { + for (Sequence sequence : schema.getAllSequences()) { + sequence.close(); + } } } - for (SchemaObject obj : getAllSchemaObjects( - DbObject.TRIGGER)) { - TriggerObject trigger = (TriggerObject) obj; - try { - trigger.close(); - } catch (SQLException e) { - trace.error(e, "close"); + for (Schema schema : schemas.values()) { + for (TriggerObject trigger : schema.getAllTriggers()) { + try { + trigger.close(); + } catch (SQLException e) { + trace.error(e, "close"); + } } } if (powerOffCount != -1) { @@ -1506,7 +1223,15 @@ private void closeImpl(boolean fromShutdownHook) { } tempFileDeleter.deleteAll(); try { - closeOpenFilesAndUnlock(true); + if (lobSession != null) { + lobSession.close(); + lobSession = null; + } + if (systemSession != null) { + systemSession.close(); + systemSession = null; + } + closeOpenFilesAndUnlock(); } catch (DbException e) { trace.error(e, "close"); } @@ -1524,7 +1249,7 @@ private void closeImpl(boolean fromShutdownHook) { } } } finally { - Engine.getInstance().close(databaseName); + Engine.close(databaseName); } } @@ -1533,119 +1258,39 @@ private void removeOrphanedLobs() { if (!persistent) { return; } - boolean lobStorageIsUsed = infoSchema.findTableOrView( - systemSession, LobStorageBackend.LOB_DATA_TABLE) != null; - lobStorageIsUsed |= store != null; - if (!lobStorageIsUsed) { - return; - } try { - getLobStorage(); - lobStorage.removeAllForTable( - LobStorageFrontend.TABLE_ID_SESSION_VARIABLE); + lobStorage.removeAllForTable(LobStorageFrontend.TABLE_ID_SESSION_VARIABLE); } catch (DbException e) { trace.error(e, "close"); } } - private void stopWriter() { - if (writer != null) { - writer.stopThread(); - writer = null; - } - } - /** * Close all open files and unlock the database. - * - * @param flush whether writing is allowed */ - private synchronized void closeOpenFilesAndUnlock(boolean flush) { + private synchronized void closeOpenFilesAndUnlock() { try { - stopWriter(); - if (pageStore != null) { - if (flush) { - try { - pageStore.checkpoint(); - if (!readOnly) { - lockMeta(pageStore.getPageStoreSession()); - pageStore.compact(compactMode); - unlockMeta(pageStore.getPageStoreSession()); - } - } catch (DbException e) { - if (ASSERT) { - int code = e.getErrorCode(); - if (code != ErrorCode.DATABASE_IS_CLOSED && - code != ErrorCode.LOCK_TIMEOUT_1 && - code != ErrorCode.IO_EXCEPTION_2) { - e.printStackTrace(); - } - } - trace.error(e, "close"); - } catch (Throwable t) { - if (ASSERT) { - t.printStackTrace(); - } - trace.error(t, "close"); - } - } - } - reconnectModified(false); - if (store != null) { - MVStore mvStore = store.getMvStore(); - if (mvStore != null && !mvStore.isClosed()) { - boolean compactFully = + if (!store.getMvStore().isClosed()) { + if (compactMode == CommandInterface.SHUTDOWN_IMMEDIATELY) { + store.closeImmediately(); + } else { + int allowedCompactionTime = compactMode == CommandInterface.SHUTDOWN_COMPACT || compactMode == CommandInterface.SHUTDOWN_DEFRAG || - getSettings().defragAlways; - if (!compactFully && !mvStore.isReadOnly()) { - if (dbSettings.maxCompactTime > 0) { - try { - store.compactFile(dbSettings.maxCompactTime); - } catch (Throwable t) { - trace.error(t, "compactFile"); - } - } else { - mvStore.commit(); - } - } - store.close(compactFully); + dbSettings.defragAlways ? -1 : dbSettings.maxCompactTime; + store.close(allowedCompactionTime); } } - if (systemSession != null) { - systemSession.close(); - systemSession = null; - } - if (lobSession != null) { - lobSession.close(); - lobSession = null; - } - closeFiles(); - if (persistent && lock == null && - fileLockMethod != FileLockMethod.NO && - fileLockMethod != FileLockMethod.FS) { - // everything already closed (maybe in checkPowerOff) - // don't delete temp files in this case because - // the database could be open now (even from within another process) - return; - } if (persistent) { - deleteOldTempFiles(); + // Don't delete temp files if everything is already closed + // (maybe in checkPowerOff), the database could be open now + // (even from within another process). + if (lock != null || fileLockMethod == FileLockMethod.NO || fileLockMethod == FileLockMethod.FS) { + deleteOldTempFiles(); + } } } finally { if (lock != null) { - if (fileLockMethod == FileLockMethod.SERIALIZED) { - // wait before deleting the .lock file, - // otherwise other connections can not detect that - if (lock.load().containsKey("changePending")) { - try { - Thread.sleep(TimeUnit.NANOSECONDS - .toMillis((long) (reconnectCheckDelayNs * 1.1))); - } catch (InterruptedException e) { - trace.error(e, "close"); - } - } - } lock.unlock(); lock = null; } @@ -1654,24 +1299,18 @@ private synchronized void closeOpenFilesAndUnlock(boolean flush) { private synchronized void closeFiles() { try { - if (store != null) { - store.closeImmediately(); - } - if (pageStore != null) { - pageStore.close(); - pageStore = null; - } + store.closeImmediately(); } catch (DbException e) { trace.error(e, "close"); } } - private void checkMetaFree(Session session, int id) { - SearchRow r = meta.getTemplateSimpleRow(false); - r.setValue(0, ValueInt.get(id)); + private void checkMetaFree(SessionLocal session, int id) { + SearchRow r = meta.getRowFactory().createRow(); + r.setValue(0, ValueInteger.get(id)); Cursor cursor = metaIdIndex.find(session, r, r); if (cursor.next()) { - DbException.throwInternalError(); + throw DbException.getInternalError(); } } @@ -1681,15 +1320,23 @@ private void checkMetaFree(Session session, int id) { * @return the id */ public int allocateObjectId() { - Object lock = isMVStore() ? objectIds : this; int i; - synchronized (lock) { + synchronized (objectIds) { i = objectIds.nextClearBit(0); objectIds.set(i); } return i; } + /** + * Returns system user. + * + * @return system user + */ + public User getSystemUser() { + return systemUser; + } + /** * Returns main schema (usually PUBLIC). * @@ -1699,75 +1346,30 @@ public Schema getMainSchema() { return mainSchema; } - public ArrayList getAllAggregates() { - return new ArrayList<>(aggregates.values()); - } - public ArrayList getAllComments() { return new ArrayList<>(comments.values()); } - - public int getAllowLiterals() { - if (starting) { - return Constants.ALLOW_LITERALS_ALL; - } - return allowLiterals; - } - - public ArrayList getAllRights() { - return new ArrayList<>(rights.values()); - } - - public ArrayList getAllRoles() { - return new ArrayList<>(roles.values()); - } - - /** - * Get all schema objects. - * - * @return all objects of all types - */ - public ArrayList getAllSchemaObjects() { - initMetaTables(); - ArrayList list = new ArrayList<>(); - for (Schema schema : schemas.values()) { - schema.getAll(list); - } - return list; - } - - /** - * Get all schema objects of the given type. - * - * @param type the object type - * @return all objects of that type - */ - public ArrayList getAllSchemaObjects(int type) { - if (type == DbObject.TABLE_OR_VIEW) { - initMetaTables(); - } - ArrayList list = new ArrayList<>(); - for (Schema schema : schemas.values()) { - schema.getAll(type, list); + + public int getAllowLiterals() { + if (starting) { + return Constants.ALLOW_LITERALS_ALL; } - return list; + return allowLiterals; + } + + public ArrayList getAllRights() { + return new ArrayList<>(rights.values()); } /** - * Get all tables and views. + * Get all tables and views. Meta data tables may be excluded. * - * @param includeMeta whether to force including the meta data tables (if - * true, metadata tables are always included; if false, metadata - * tables are only included if they are already initialized) * @return all objects of that type */ - public ArrayList
          getAllTablesAndViews(boolean includeMeta) { - if (includeMeta) { - initMetaTables(); - } + public ArrayList
          getAllTablesAndViews() { ArrayList
          list = new ArrayList<>(); for (Schema schema : schemas.values()) { - list.addAll(schema.getAllTablesAndViews()); + list.addAll(schema.getAllTablesAndViews(null)); } return list; } @@ -1785,39 +1387,20 @@ public ArrayList getAllSynonyms() { return list; } - /** - * Get the tables with the given name, if any. - * - * @param name the table name - * @return the list - */ - public ArrayList
          getTableOrViewByName(String name) { - // we expect that at most one table matches, at least in most cases - ArrayList
          list = new ArrayList<>(1); - for (Schema schema : schemas.values()) { - Table table = schema.getTableOrViewByName(name); - if (table != null) { - list.add(table); - } - } - return list; - } - - public ArrayList getAllSchemas() { - initMetaTables(); - return new ArrayList<>(schemas.values()); + public Collection getAllSchemas() { + return schemas.values(); } - public ArrayList getAllSettings() { - return new ArrayList<>(settings.values()); + public Collection getAllSchemasNoMeta() { + return schemas.values(); } - public ArrayList getAllDomains() { - return new ArrayList<>(domains.values()); + public Collection getAllSettings() { + return settings.values(); } - public ArrayList getAllUsers() { - return new ArrayList<>(users.values()); + public Collection getAllUsersAndRoles() { + return usersAndRoles.values(); } public String getCacheType() { @@ -1856,23 +1439,25 @@ public String getName() { * included * @return the list of sessions */ - public Session[] getSessions(boolean includingSystemSession) { - ArrayList list; - // need to synchronized on userSession, otherwise the list - // may contain null elements - synchronized (userSessions) { + public SessionLocal[] getSessions(boolean includingSystemSession) { + ArrayList list; + // need to synchronized on this database, + // otherwise the list may contain null elements + synchronized (this) { list = new ArrayList<>(userSessions); } - // copy, to ensure the reference is stable - Session sys = systemSession; - Session lob = lobSession; - if (includingSystemSession && sys != null) { - list.add(sys); - } - if (includingSystemSession && lob != null) { - list.add(lob); + if (includingSystemSession) { + // copy, to ensure the reference is stable + SessionLocal s = systemSession; + if (s != null) { + list.add(s); + } + s = lobSession; + if (s != null) { + list.add(s); + } } - return list.toArray(new Session[0]); + return list.toArray(new SessionLocal[0]); } /** @@ -1881,36 +1466,20 @@ public Session[] getSessions(boolean includingSystemSession) { * @param session the session * @param obj the database object */ - public void updateMeta(Session session, DbObject obj) { - if (isMVStore()) { - int id = obj.getId(); - if (id > 0) { - if (!starting && !obj.isTemporary()) { - Row newRow = meta.getTemplateRow(); - MetaRecord.populateRowFromDBObject(obj, newRow); - Row oldRow = metaIdIndex.getRow(session, id); - if (oldRow != null) { - meta.updateRow(session, oldRow, newRow); - } - } - // for temporary objects - synchronized (objectIds) { - objectIds.set(id); - } - } - } else { - boolean metaWasLocked = lockMeta(session); - synchronized (this) { - int id = obj.getId(); - removeMeta(session, id); - addMeta(session, obj); - // for temporary objects - if(id > 0) { - objectIds.set(id); + public void updateMeta(SessionLocal session, DbObject obj) { + int id = obj.getId(); + if (id > 0) { + if (!starting && !obj.isTemporary()) { + Row newRow = meta.getTemplateRow(); + MetaRecord.populateRowFromDBObject(obj, newRow); + Row oldRow = metaIdIndex.getRow(session, id); + if (oldRow != null) { + meta.updateRow(session, oldRow, newRow); } } - if (!metaWasLocked) { - unlockMeta(session); + // for temporary objects + synchronized (objectIds) { + objectIds.set(id); } } } @@ -1922,18 +1491,18 @@ public void updateMeta(Session session, DbObject obj) { * @param obj the object * @param newName the new name */ - public synchronized void renameSchemaObject(Session session, + public synchronized void renameSchemaObject(SessionLocal session, SchemaObject obj, String newName) { checkWritingAllowed(); obj.getSchema().rename(obj, newName); updateMetaAndFirstLevelChildren(session, obj); } - private synchronized void updateMetaAndFirstLevelChildren(Session session, DbObject obj) { + private synchronized void updateMetaAndFirstLevelChildren(SessionLocal session, DbObject obj) { ArrayList list = obj.getChildren(); Comment comment = findComment(obj); if (comment != null) { - DbException.throwInternalError(comment.toString()); + throw DbException.getInternalError(comment.toString()); } updateMeta(session, obj); // remember that this scans only one level deep! @@ -1953,17 +1522,17 @@ private synchronized void updateMetaAndFirstLevelChildren(Session session, DbObj * @param obj the object * @param newName the new name */ - public synchronized void renameDatabaseObject(Session session, + public synchronized void renameDatabaseObject(SessionLocal session, DbObject obj, String newName) { checkWritingAllowed(); int type = obj.getType(); - HashMap map = getMap(type); + Map map = getMap(type); if (SysProperties.CHECK) { if (!map.containsKey(obj.getName())) { - DbException.throwInternalError("not found: " + obj.getName()); + throw DbException.getInternalError("not found: " + obj.getName()); } if (obj.getName().equals(newName) || map.containsKey(newName)) { - DbException.throwInternalError("object already exists: " + newName); + throw DbException.getInternalError("object already exists: " + newName); } } obj.checkRename(); @@ -1973,24 +1542,6 @@ public synchronized void renameDatabaseObject(Session session, updateMetaAndFirstLevelChildren(session, obj); } - /** - * Create a temporary file in the database folder. - * - * @return the file name - */ - public String createTempFile() { - try { - boolean inTempDir = readOnly; - String name = databaseName; - if (!persistent) { - name = "memFS:" + name; - } - return FileUtils.createTempFile(name, Constants.SUFFIX_TEMP_FILE, inTempDir); - } catch (IOException e) { - throw DbException.convertIOException(e, databaseName); - } - } - private void deleteOldTempFiles() { String path = FileUtils.getParent(databaseName); for (String name : FileUtils.newDirectoryStream(path)) { @@ -2023,13 +1574,13 @@ public Schema getSchema(String schemaName) { * @param session the session * @param obj the object to remove */ - public synchronized void removeDatabaseObject(Session session, DbObject obj) { + public synchronized void removeDatabaseObject(SessionLocal session, DbObject obj) { checkWritingAllowed(); String objName = obj.getName(); int type = obj.getType(); - HashMap map = getMap(type); + Map map = getMap(type); if (SysProperties.CHECK && !map.containsKey(objName)) { - DbException.throwInternalError("not found: " + objName); + throw DbException.getInternalError("not found: " + objName); } Comment comment = findComment(obj); lockMeta(session); @@ -2061,16 +1612,16 @@ public Table getDependentTable(SchemaObject obj, Table except) { default: } HashSet set = new HashSet<>(); - for (Table t : getAllTablesAndViews(false)) { - if (except == t) { - continue; - } else if (TableType.VIEW == t.getTableType()) { - continue; - } - set.clear(); - t.addDependencies(set); - if (set.contains(obj)) { - return t; + for (Schema schema : schemas.values()) { + for (Table t : schema.getAllTablesAndViews(null)) { + if (except == t || TableType.VIEW == t.getTableType()) { + continue; + } + set.clear(); + t.addDependencies(set); + if (set.contains(obj)) { + return t; + } } } return null; @@ -2082,7 +1633,7 @@ public Table getDependentTable(SchemaObject obj, Table except) { * @param session the session * @param obj the object to be removed */ - public void removeSchemaObject(Session session, + public void removeSchemaObject(SessionLocal session, SchemaObject obj) { int type = obj.getType(); if (type == DbObject.TABLE_OR_VIEW) { @@ -2100,10 +1651,12 @@ public void removeSchemaObject(Session session, } } else if (type == DbObject.CONSTRAINT) { Constraint constraint = (Constraint) obj; - Table table = constraint.getTable(); - if (table.isTemporary() && !table.isGlobalTemporary()) { - session.removeLocalTempTableConstraint(constraint); - return; + if (constraint.getConstraintType() != Type.DOMAIN) { + Table table = constraint.getTable(); + if (table.isTemporary() && !table.isGlobalTemporary()) { + session.removeLocalTempTableConstraint(constraint); + return; + } } } checkWritingAllowed(); @@ -2119,7 +1672,7 @@ public void removeSchemaObject(Session session, Table t = getDependentTable(obj, null); if (t != null) { obj.getSchema().add(obj); - throw DbException.get(ErrorCode.CANNOT_DROP_2, obj.getSQL(false), t.getSQL(false)); + throw DbException.get(ErrorCode.CANNOT_DROP_2, obj.getTraceSQL(), t.getTraceSQL()); } obj.removeChildrenAndResources(session); } @@ -2145,13 +1698,7 @@ public synchronized void setCacheSize(int kb) { int max = MathUtils.convertLongToInt(Utils.getMemoryMax()) / 2; kb = Math.min(kb, max); } - cacheSize = kb; - if (pageStore != null) { - pageStore.setMaxCacheMemory(kb); - } - if (store != null) { - store.setCacheSize(Math.max(1, kb)); - } + store.setCacheSize(Math.max(1, kb)); } public synchronized void setMasterUser(User user) { @@ -2171,7 +1718,7 @@ public Role getPublicRole() { * @param session the session * @return a unique name */ - public synchronized String getTempTableName(String baseName, Session session) { + public synchronized String getTempTableName(String baseName, SessionLocal session) { String tempName; do { tempName = baseName + "_COPY_" + session.getId() + @@ -2193,11 +1740,6 @@ public void checkWritingAllowed() { if (readOnly) { throw DbException.get(ErrorCode.DATABASE_IS_READ_ONLY); } - if (fileLockMethod == FileLockMethod.SERIALIZED) { - if (!reconnectChangePending) { - throw DbException.get(ErrorCode.DATABASE_IS_READ_ONLY); - } - } } public boolean isReadOnly() { @@ -2205,27 +1747,15 @@ public boolean isReadOnly() { } public void setWriteDelay(int value) { - writeDelay = value; - if (writer != null) { - writer.setWriteDelay(value); - // TODO check if MIN_WRITE_DELAY is a good value - flushOnEachCommit = writeDelay < Constants.MIN_WRITE_DELAY; - } - if (store != null) { - int millis = value < 0 ? 0 : value; - store.getMvStore().setAutoCommitDelay(millis); - } + store.getMvStore().setAutoCommitDelay(value < 0 ? 0 : value); } public int getRetentionTime() { - return retentionTime; + return store.getMvStore().getRetentionTime(); } public void setRetentionTime(int value) { - retentionTime = value; - if (store != null) { - store.getMvStore().setRetentionTime(value); - } + store.getMvStore().setRetentionTime(value); } public void setAllowBuiltinAliasOverride(boolean b) { @@ -2236,25 +1766,13 @@ public boolean isAllowBuiltinAliasOverride() { return allowBuiltinAliasOverride; } - /** - * Check if flush-on-each-commit is enabled. - * - * @return true if it is - */ - public boolean getFlushOnEachCommit() { - return flushOnEachCommit; - } - /** * Get the list of in-doubt transactions. * * @return the list */ public ArrayList getInDoubtTransactions() { - if (store != null) { - return store.getInDoubtTransactions(); - } - return pageStore == null ? null : pageStore.getInDoubtTransactions(); + return store.getInDoubtTransactions(); } /** @@ -2263,41 +1781,23 @@ public ArrayList getInDoubtTransactions() { * @param session the session * @param transaction the name of the transaction */ - synchronized void prepareCommit(Session session, String transaction) { - if (readOnly) { - return; - } - if (store != null) { + synchronized void prepareCommit(SessionLocal session, String transaction) { + if (!readOnly) { store.prepareCommit(session, transaction); - return; - } - if (pageStore != null) { - pageStore.flushLog(); - pageStore.prepareCommit(session, transaction); } } /** - * Commit the current transaction of the given session. - * - * @param session the session + * If there is a background store thread, and if there wasn an exception in + * that thread, throw it now. */ - synchronized void commit(Session session) { - throwLastBackgroundException(); - if (readOnly) { - return; - } - if (pageStore != null) { - pageStore.commit(session); - } - session.setAllCommitted(); - } - - private void throwLastBackgroundException() { - DbException b = backgroundException.getAndSet(null); - if (b != null) { - // wrap the exception, so we see it was thrown here - throw DbException.get(b.getErrorCode(), b, b.getMessage()); + void throwLastBackgroundException() { + if (!store.getMvStore().isBackgroundThread()) { + DbException b = backgroundException.getAndSet(null); + if (b != null) { + // wrap the exception, so we see it was thrown here + throw DbException.get(b.getErrorCode(), b, b.getMessage()); + } } } @@ -2311,7 +1811,7 @@ public void setBackgroundException(DbException e) { } public Throwable getBackgroundException() { - IllegalStateException exception = store.getMvStore().getPanicException(); + MVStoreException exception = store.getMvStore().getPanicException(); if(exception != null) { return exception; } @@ -2323,13 +1823,7 @@ public Throwable getBackgroundException() { * Flush all pending changes to the transaction log. */ public synchronized void flush() { - if (readOnly) { - return; - } - if (pageStore != null) { - pageStore.flushLog(); - } - if (store != null) { + if (!readOnly) { try { store.flush(); } catch (RuntimeException e) { @@ -2370,9 +1864,9 @@ public void setEventListenerClass(String className) { * @param state the {@link DatabaseEventListener} state * @param name the object name * @param x the current position - * @param max the highest value + * @param max the highest value or 0 if unknown */ - public void setProgress(int state, String name, int x, int max) { + public void setProgress(int state, String name, long x, long max) { if (eventListener != null) { try { eventListener.setProgress(state, name, x, max); @@ -2407,12 +1901,7 @@ public synchronized void sync() { if (readOnly) { return; } - if (store != null) { - store.sync(); - } - if (pageStore != null) { - pageStore.sync(); - } + store.sync(); } public int getMaxMemoryRows() { @@ -2423,29 +1912,14 @@ public void setMaxMemoryRows(int value) { this.maxMemoryRows = value; } - public void setMaxMemoryUndo(int value) { - this.maxMemoryUndo = value; - } - - public int getMaxMemoryUndo() { - return maxMemoryUndo; - } - public void setLockMode(int lockMode) { switch (lockMode) { case Constants.LOCK_MODE_OFF: - if (multiThreaded && !isMVStore()) { - // Currently the combination of MV_STORE=FALSE, LOCK_MODE=0 and - // MULTI_THREADED=TRUE is not supported. Also see code in - // JdbcDatabaseMetaData#supportsTransactionIsolationLevel(int) - throw DbException.get( - ErrorCode.UNSUPPORTED_SETTING_COMBINATION, - "MV_STORE=FALSE & LOCK_MODE=0 & MULTI_THREADED=TRUE"); - } - break; case Constants.LOCK_MODE_READ_COMMITTED: + break; case Constants.LOCK_MODE_TABLE: case Constants.LOCK_MODE_TABLE_GC: + lockMode = Constants.LOCK_MODE_READ_COMMITTED; break; default: throw DbException.getInvalidValueException("lock mode", lockMode); @@ -2461,7 +1935,7 @@ public void setCloseDelay(int value) { this.closeDelay = value; } - public Session getSystemSession() { + public SessionLocal getSystemSession() { return systemSession; } @@ -2495,23 +1969,17 @@ public boolean getIgnoreCase() { return ignoreCase; } - public synchronized void setDeleteFilesOnDisconnect(boolean b) { - this.deleteFilesOnDisconnect = b; + public void setIgnoreCatalogs(boolean b) { + ignoreCatalogs = b; } - @Override - public String getLobCompressionAlgorithm(int type) { - return lobCompressionAlgorithm; + public boolean getIgnoreCatalogs() { + return ignoreCatalogs; } - public void setLobCompressionAlgorithm(String stringValue) { - this.lobCompressionAlgorithm = stringValue; - } - public synchronized void setMaxLogSize(long value) { - if (pageStore != null) { - pageStore.setMaxLogSize(value); - } + public synchronized void setDeleteFilesOnDisconnect(boolean b) { + this.deleteFilesOnDisconnect = b; } public void setAllowLiterals(int value) { @@ -2591,15 +2059,6 @@ public boolean isStarting() { return starting; } - /** - * Check if MVStore backend is used for this database. - * - * @return {@code true} for MVStore, {@code false} for PageStore - */ - public boolean isMVStore() { - return dbSettings.mvStore; - } - /** * Called after the database has been opened and initialized. This method * notifies the event listener if one has been set. @@ -2608,34 +2067,24 @@ void opened() { if (eventListener != null) { eventListener.opened(); } - if (writer != null) { - writer.startThread(); - } } public void setMode(Mode mode) { this.mode = mode; + getNextRemoteSettingsId(); } + @Override public Mode getMode() { return mode; } - public boolean isMultiThreaded() { - return multiThreaded; + public void setDefaultNullOrdering(DefaultNullOrdering defaultNullOrdering) { + this.defaultNullOrdering = defaultNullOrdering; } - public void setMultiThreaded(boolean multiThreaded) { - if (multiThreaded && this.multiThreaded != multiThreaded) { - if (lockMode == Constants.LOCK_MODE_OFF && !isMVStore()) { - // Currently the combination of MV_STORE=FALSE, LOCK_MODE=0 and - // MULTI_THREADED=TRUE is not supported. - throw DbException.get( - ErrorCode.UNSUPPORTED_SETTING_COMBINATION, - "MV_STORE=FALSE & LOCK_MODE=0 & MULTI_THREADED=TRUE"); - } - } - this.multiThreaded = multiThreaded; + public DefaultNullOrdering getDefaultNullOrdering() { + return defaultNullOrdering; } public void setMaxOperationMemory(int maxOperationMemory) { @@ -2646,7 +2095,7 @@ public int getMaxOperationMemory() { return maxOperationMemory; } - public Session getExclusiveSession() { + public SessionLocal getExclusiveSession() { return exclusiveSession.get(); } @@ -2655,12 +2104,30 @@ public Session getExclusiveSession() { * * @param session the session * @param closeOthers whether other sessions are closed + * @return true if success or if database is in exclusive mode + * set by this session already, false otherwise */ - public void setExclusiveSession(Session session, boolean closeOthers) { - this.exclusiveSession.set(session); + public boolean setExclusiveSession(SessionLocal session, boolean closeOthers) { + if (exclusiveSession.get() != session && + !exclusiveSession.compareAndSet(null, session)) { + return false; + } if (closeOthers) { - closeAllSessionsException(session); + closeAllSessionsExcept(session); } + return true; + } + + /** + * Stop exclusive access the database by provided session. + * + * @param session the session + * @return true if success or if database is in non-exclusive mode already, + * false otherwise + */ + public boolean unsetExclusiveSession(SessionLocal session) { + return exclusiveSession.get() == null + || exclusiveSession.compareAndSet(session, null); } @Override @@ -2687,7 +2154,7 @@ public boolean isSysTableLocked() { * @param session the session * @return true if it is currently locked */ - public boolean isSysTableLockedBy(Session session) { + public boolean isSysTableLockedBy(SessionLocal session) { return meta == null || meta.isLockedExclusivelyBy(session); } @@ -2726,6 +2193,7 @@ public void shutdownImmediately() { // ignore } closeFiles(); + powerOffCount = 0; } @Override @@ -2733,30 +2201,6 @@ public TempFileDeleter getTempFileDeleter() { return tempFileDeleter; } - public PageStore getPageStore() { - if (dbSettings.mvStore) { - if (store == null) { - store = MVTableEngine.init(this); - } - return null; - } - synchronized (this) { - if (pageStore == null) { - pageStore = new PageStore(this, databaseName + - Constants.SUFFIX_PAGE_FILE, accessModeData, cacheSize); - if (pageSize != Constants.DEFAULT_PAGE_SIZE) { - pageStore.setPageSize(pageSize); - } - if (!readOnly && fileLockMethod == FileLockMethod.FS) { - pageStore.setLockFile(true); - } - pageStore.setLogMode(logMode); - pageStore.open(); - } - return pageStore; - } - } - /** * Get the first user defined table, excluding the LOB_BLOCKS table that the * Recover tool creates. @@ -2764,14 +2208,13 @@ public PageStore getPageStore() { * @return the table or null if no table is defined */ public Table getFirstUserTable() { - for (Table table : getAllTablesAndViews(false)) { - if (table.getCreateSQL() != null) { - if (table.isHidden()) { - // LOB tables + for (Schema schema : schemas.values()) { + for (Table table : schema.getAllTablesAndViews(null)) { + if (table.getCreateSQL() == null || table.isHidden()) { continue; } // exclude the LOB_MAP that the Recover tool creates - if (table.getSchema().getId() == Constants.INFORMATION_SCHEMA_ID + if (schema.getId() == Constants.INFORMATION_SCHEMA_ID && table.getName().equalsIgnoreCase("LOB_BLOCKS")) { continue; } @@ -2781,171 +2224,16 @@ public Table getFirstUserTable() { return null; } - /** - * Check if the contents of the database was changed and therefore it is - * required to re-connect. This method waits until pending changes are - * completed. If a pending change takes too long (more than 2 seconds), the - * pending change is broken (removed from the properties file). - * - * @return true if reconnecting is required - */ - public boolean isReconnectNeeded() { - if (fileLockMethod != FileLockMethod.SERIALIZED) { - return false; - } - if (reconnectChangePending) { - return false; - } - long now = System.nanoTime(); - if (now < reconnectCheckNext) { - return false; - } - reconnectCheckNext = now + reconnectCheckDelayNs; - if (lock == null) { - lock = new FileLock(traceSystem, databaseName + - Constants.SUFFIX_LOCK_FILE, Constants.LOCK_SLEEP); - } - try { - Properties prop = lock.load(), first = prop; - while (true) { - if (prop.equals(reconnectLastLock)) { - return false; - } - if (prop.getProperty("changePending", null) == null) { - break; - } - if (System.nanoTime() > - now + reconnectCheckDelayNs * 10) { - if (first.equals(prop)) { - // the writing process didn't update the file - - // it may have terminated - lock.setProperty("changePending", null); - lock.save(); - break; - } - } - trace.debug("delay (change pending)"); - Thread.sleep(TimeUnit.NANOSECONDS.toMillis(reconnectCheckDelayNs)); - prop = lock.load(); - } - reconnectLastLock = prop; - } catch (Exception e) { - // DbException, InterruptedException - trace.error(e, "readOnly {0}", readOnly); - // ignore - } - return true; - } - - /** - * Flush all changes when using the serialized mode, and if there are - * pending changes, and some time has passed. This switches to a new - * transaction log and resets the change pending flag in - * the .lock.db file. - */ - public void checkpointIfRequired() { - if (fileLockMethod != FileLockMethod.SERIALIZED || - readOnly || !reconnectChangePending || closing) { - return; - } - long now = System.nanoTime(); - if (now > reconnectCheckNext + reconnectCheckDelayNs) { - if (checkpointAllowed < 0) { - DbException.throwInternalError(Integer.toString(checkpointAllowed)); - } - synchronized (reconnectSync) { - if (checkpointAllowed > 0) { - return; - } - checkpointRunning = true; - } - synchronized (this) { - trace.debug("checkpoint start"); - flushSequences(); - checkpoint(); - reconnectModified(false); - trace.debug("checkpoint end"); - } - synchronized (reconnectSync) { - checkpointRunning = false; - } - } - } - - public boolean isFileLockSerialized() { - return fileLockMethod == FileLockMethod.SERIALIZED; - } - - private void flushSequences() { - for (SchemaObject obj : getAllSchemaObjects(DbObject.SEQUENCE)) { - Sequence sequence = (Sequence) obj; - sequence.flushWithoutMargin(); - } - } - /** * Flush all changes and open a new transaction log. */ public void checkpoint() { if (persistent) { - synchronized (this) { - if (pageStore != null) { - pageStore.checkpoint(); - } - } - if (store != null) { - store.flush(); - } + store.flush(); } getTempFileDeleter().deleteUnused(); } - /** - * This method is called before writing to the transaction log. - * - * @return true if the call was successful and writing is allowed, - * false if another connection was faster - */ - public boolean beforeWriting() { - if (fileLockMethod != FileLockMethod.SERIALIZED) { - return true; - } - while (checkpointRunning) { - try { - Thread.sleep(10 + (int) (Math.random() * 10)); - } catch (Exception e) { - // ignore InterruptedException - } - } - synchronized (reconnectSync) { - if (reconnectModified(true)) { - if (++checkpointAllowed > 20) { - throw DbException.throwInternalError(Integer.toString(checkpointAllowed)); - } - return true; - } - } - // make sure the next call to isReconnectNeeded() returns true - reconnectCheckNext = System.nanoTime() - 1; - reconnectLastLock = null; - return false; - } - - /** - * This method is called after updates are finished. - */ - public void afterWriting() { - if (fileLockMethod != FileLockMethod.SERIALIZED) { - return; - } - synchronized (reconnectSync) { - checkpointAllowed--; - } - if (checkpointAllowed < 0) { - throw DbException.throwInternalError(Integer.toString(checkpointAllowed)); - } - } - /** * Switch the database to read-only mode. * @@ -2968,70 +2256,13 @@ public SourceCompiler getCompiler() { @Override public LobStorageInterface getLobStorage() { - if (lobStorage == null) { - if (dbSettings.mvStore) { - lobStorage = new LobStorageMap(this); - } else { - lobStorage = new LobStorageBackend(this); - } - } return lobStorage; } - public JdbcConnection getLobConnectionForInit() { - String url = Constants.CONN_URL_INTERNAL; - JdbcConnection conn = new JdbcConnection( - systemSession, systemUser.getName(), url); - conn.setTraceLevel(TraceSystem.OFF); - return conn; - } - - public JdbcConnection getLobConnectionForRegularUse() { - String url = Constants.CONN_URL_INTERNAL; - JdbcConnection conn = new JdbcConnection( - lobSession, systemUser.getName(), url); - conn.setTraceLevel(TraceSystem.OFF); - return conn; - } - - public Session getLobSession() { + public SessionLocal getLobSession() { return lobSession; } - public void setLogMode(int log) { - if (log < 0 || log > 2) { - throw DbException.getInvalidValueException("LOG", log); - } - if (store != null) { - this.logMode = log; - return; - } - synchronized (this) { - if (pageStore != null) { - if (log != PageStore.LOG_MODE_SYNC || - pageStore.getLogMode() != PageStore.LOG_MODE_SYNC) { - // write the log mode in the trace file when enabling or - // disabling a dangerous mode - trace.error(null, "log {0}", log); - } - this.logMode = log; - pageStore.setLogMode(log); - } - } - } - - public int getLogMode() { - if (store != null) { - return logMode; - } - synchronized (this) { - if (pageStore != null) { - return pageStore.getLogMode(); - } - } - return PageStore.LOG_MODE_OFF; - } - public int getDefaultTableType() { return defaultTableType; } @@ -3052,7 +2283,20 @@ public DbSettings getSettings() { * @return the hash map */ public HashMap newStringMap() { - return dbSettings.caseInsensitiveIdentifiers ? new CaseInsensitiveMap() : new HashMap(); + return dbSettings.caseInsensitiveIdentifiers ? new CaseInsensitiveMap<>() : new HashMap<>(); + } + + /** + * Create a new hash map. Depending on the configuration, the key is case + * sensitive or case insensitive. + * + * @param the value type + * @param initialCapacity the initial capacity + * @return the hash map + */ + public HashMap newStringMap(int initialCapacity) { + return dbSettings.caseInsensitiveIdentifiers ? new CaseInsensitiveMap<>(initialCapacity) + : new HashMap<>(initialCapacity); } /** @@ -3063,8 +2307,8 @@ public HashMap newStringMap() { * @return the hash map */ public ConcurrentHashMap newConcurrentStringMap() { - return dbSettings.caseInsensitiveIdentifiers ? new CaseInsensitiveConcurrentMap() - : new ConcurrentHashMap(); + return dbSettings.caseInsensitiveIdentifiers ? new CaseInsensitiveConcurrentMap<>() + : new ConcurrentHashMap<>(); } /** @@ -3096,9 +2340,20 @@ private static boolean isUpperSysIdentifier(String upperName) { if (l == 0) { return false; } - for (int i = 0; i < l; i++) { - int ch = upperName.charAt(i); - if (ch < 'A' || ch > 'Z' && ch != '_') { + char c = upperName.charAt(0); + if (c < 'A' || c > 'Z') { + return false; + } + l--; + for (int i = 1; i < l; i++) { + c = upperName.charAt(i); + if ((c < 'A' || c > 'Z') && c != '_') { + return false; + } + } + if (l > 0) { + c = upperName.charAt(l); + if (c < 'A' || c > 'Z') { return false; } } @@ -3106,9 +2361,8 @@ private static boolean isUpperSysIdentifier(String upperName) { } @Override - public int readLob(long lobId, byte[] hmac, long offset, byte[] buff, - int off, int length) { - throw DbException.throwInternalError(); + public int readLob(long lobId, byte[] hmac, long offset, byte[] buff, int off, int length) { + throw DbException.getInternalError(); } public byte[] getFileEncryptionKey() { @@ -3154,6 +2408,7 @@ public void setJavaObjectSerializerName(String serializerName) { synchronized (this) { javaObjectSerializerInitialized = false; javaObjectSerializerName = serializerName; + getNextRemoteSettingsId(); } } @@ -3197,4 +2452,28 @@ public void setAuthenticator(Authenticator authenticator) { } this.authenticator=authenticator; } + + @Override + public ValueTimestampTimeZone currentTimestamp() { + Session session = SessionLocal.getThreadLocalSession(); + if (session != null) { + return session.currentTimestamp(); + } + throw DbException.getUnsupportedException("Unsafe comparison or cast"); + } + + @Override + public TimeZoneProvider currentTimeZone() { + Session session = SessionLocal.getThreadLocalSession(); + if (session != null) { + return session.currentTimeZone(); + } + throw DbException.getUnsupportedException("Unsafe comparison or cast"); + } + + @Override + public boolean zeroBasedEnums() { + return dbSettings.zeroBasedEnums; + } + } diff --git a/h2/src/main/org/h2/engine/DbObject.java b/h2/src/main/org/h2/engine/DbObject.java index a07476d584..7464f97794 100644 --- a/h2/src/main/org/h2/engine/DbObject.java +++ b/h2/src/main/org/h2/engine/DbObject.java @@ -1,146 +1,224 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; import java.util.ArrayList; + +import org.h2.command.Parser; +import org.h2.message.DbException; +import org.h2.message.Trace; import org.h2.table.Table; +import org.h2.util.HasSQL; +import org.h2.util.ParserUtil; /** * A database object such as a table, an index, or a user. */ -public interface DbObject { +public abstract class DbObject implements HasSQL { /** * The object is of the type table or view. */ - int TABLE_OR_VIEW = 0; + public static final int TABLE_OR_VIEW = 0; /** * This object is an index. */ - int INDEX = 1; + public static final int INDEX = 1; /** * This object is a user. */ - int USER = 2; + public static final int USER = 2; /** * This object is a sequence. */ - int SEQUENCE = 3; + public static final int SEQUENCE = 3; /** * This object is a trigger. */ - int TRIGGER = 4; + public static final int TRIGGER = 4; /** * This object is a constraint (check constraint, unique constraint, or * referential constraint). */ - int CONSTRAINT = 5; + public static final int CONSTRAINT = 5; /** * This object is a setting. */ - int SETTING = 6; + public static final int SETTING = 6; /** * This object is a role. */ - int ROLE = 7; + public static final int ROLE = 7; /** * This object is a right. */ - int RIGHT = 8; + public static final int RIGHT = 8; /** * This object is an alias for a Java function. */ - int FUNCTION_ALIAS = 9; + public static final int FUNCTION_ALIAS = 9; /** * This object is a schema. */ - int SCHEMA = 10; + public static final int SCHEMA = 10; /** * This object is a constant. */ - int CONSTANT = 11; + public static final int CONSTANT = 11; /** * This object is a domain. */ - int DOMAIN = 12; + public static final int DOMAIN = 12; /** * This object is a comment. */ - int COMMENT = 13; + public static final int COMMENT = 13; /** * This object is a user-defined aggregate function. */ - int AGGREGATE = 14; + public static final int AGGREGATE = 14; /** * This object is a synonym. */ - int SYNONYM = 15; + public static final int SYNONYM = 15; /** - * Get the SQL name of this object (may be quoted). - * - * @param alwaysQuote quote all identifiers - * @return the SQL name + * The database. + */ + protected Database database; + + /** + * The trace module. */ - String getSQL(boolean alwaysQuote); + protected Trace trace; /** - * Appends the SQL name of this object (may be quoted) to the specified - * builder. + * The comment (if set). + */ + protected String comment; + + private int id; + + private String objectName; + + private long modificationId; + + private boolean temporary; + + /** + * Initialize some attributes of this object. * - * @param builder - * string builder - * @param alwaysQuote quote all identifiers - * @return the specified string builder + * @param db the database + * @param objectId the object id + * @param name the name + * @param traceModuleId the trace module id */ - StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote); + protected DbObject(Database db, int objectId, String name, int traceModuleId) { + this.database = db; + this.trace = db.getTrace(traceModuleId); + this.id = objectId; + this.objectName = name; + this.modificationId = db.getModificationMetaId(); + } + + /** + * Tell the object that is was modified. + */ + public final void setModified() { + this.modificationId = database == null ? -1 : database.getNextModificationMetaId(); + } + + public final long getModificationId() { + return modificationId; + } + + protected final void setObjectName(String name) { + objectName = name; + } + + @Override + public String getSQL(int sqlFlags) { + return Parser.quoteIdentifier(objectName, sqlFlags); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return ParserUtil.quoteIdentifier(builder, objectName, sqlFlags); + } /** * Get the list of dependent children (for tables, this includes indexes and * so on). * - * @return the list of children + * @return the list of children, or {@code null} */ - ArrayList getChildren(); + public ArrayList getChildren() { + return null; + } /** * Get the database. * * @return the database */ - Database getDatabase(); + public final Database getDatabase() { + return database; + } /** * Get the unique object id. * * @return the object id */ - int getId(); + public final int getId() { + return id; + } /** * Get the name. * * @return the name */ - String getName(); + public final String getName() { + return objectName; + } + + /** + * Set the main attributes to null to make sure the object is no longer + * used. + */ + protected void invalidate() { + if (id == -1) { + throw DbException.getInternalError(); + } + setModified(); + id = -1; + database = null; + trace = null; + objectName = null; + } + + public final boolean isValid() { + return id != -1; + } /** * Build a SQL statement to re-create the object, or to create a copy of the @@ -150,74 +228,104 @@ public interface DbObject { * @param quotedName the quoted name * @return the SQL statement */ - String getCreateSQLForCopy(Table table, String quotedName); + public abstract String getCreateSQLForCopy(Table table, String quotedName); /** - * Construct the original CREATE ... SQL statement for this object. + * Construct the CREATE ... SQL statement for this object for meta table. * * @return the SQL statement */ - String getCreateSQL(); + public String getCreateSQLForMeta() { + return getCreateSQL(); + } + + /** + * Construct the CREATE ... SQL statement for this object. + * + * @return the SQL statement + */ + public abstract String getCreateSQL(); /** * Construct a DROP ... SQL statement for this object. * * @return the SQL statement */ - String getDropSQL(); + public String getDropSQL() { + return null; + } /** * Get the object type. * * @return the object type */ - int getType(); + public abstract int getType(); /** * Delete all dependent children objects and resources of this object. * * @param session the session */ - void removeChildrenAndResources(Session session); + public abstract void removeChildrenAndResources(SessionLocal session); /** * Check if renaming is allowed. Does nothing when allowed. */ - void checkRename(); + public void checkRename() { + // Allowed by default + } /** * Rename the object. * * @param newName the new name */ - void rename(String newName); + public void rename(String newName) { + checkRename(); + objectName = newName; + setModified(); + } /** * Check if this object is temporary (for example, a temporary table). * * @return true if is temporary */ - boolean isTemporary(); + public boolean isTemporary() { + return temporary; + } /** * Tell this object that it is temporary or not. * * @param temporary the new value */ - void setTemporary(boolean temporary); + public void setTemporary(boolean temporary) { + this.temporary = temporary; + } /** * Change the comment of this object. * * @param comment the new comment, or null for no comment */ - void setComment(String comment); + public void setComment(String comment) { + this.comment = comment != null && !comment.isEmpty() ? comment : null; + } /** * Get the current comment of this object. * * @return the comment, or null if not set */ - String getComment(); + public String getComment() { + return comment; + } + + @Override + public String toString() { + return objectName + ":" + id + ":" + super.toString(); + } } diff --git a/h2/src/main/org/h2/engine/DbObjectBase.java b/h2/src/main/org/h2/engine/DbObjectBase.java deleted file mode 100644 index 808c3ce5f4..0000000000 --- a/h2/src/main/org/h2/engine/DbObjectBase.java +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.engine; - -import java.util.ArrayList; -import org.h2.command.Parser; -import org.h2.message.DbException; -import org.h2.message.Trace; - -/** - * The base class for all database objects. - */ -public abstract class DbObjectBase implements DbObject { - - /** - * The database. - */ - protected Database database; - - /** - * The trace module. - */ - protected Trace trace; - - /** - * The comment (if set). - */ - protected String comment; - - private int id; - private String objectName; - private long modificationId; - private boolean temporary; - - /** - * Initialize some attributes of this object. - * - * @param db the database - * @param objectId the object id - * @param name the name - * @param traceModuleId the trace module id - */ - protected DbObjectBase(Database db, int objectId, String name, - int traceModuleId) { - this.database = db; - this.trace = db.getTrace(traceModuleId); - this.id = objectId; - this.objectName = name; - this.modificationId = db.getModificationMetaId(); - } - - /** - * Build a SQL statement to re-create this object. - * - * @return the SQL statement - */ - @Override - public abstract String getCreateSQL(); - - /** - * Build a SQL statement to drop this object. - * - * @return the SQL statement - */ - @Override - public abstract String getDropSQL(); - - /** - * Remove all dependent objects and free all resources (files, blocks in - * files) of this object. - * - * @param session the session - */ - @Override - public abstract void removeChildrenAndResources(Session session); - - /** - * Check if this object can be renamed. System objects may not be renamed. - */ - @Override - public abstract void checkRename(); - - /** - * Tell the object that is was modified. - */ - public void setModified() { - this.modificationId = database == null ? - -1 : database.getNextModificationMetaId(); - } - - public long getModificationId() { - return modificationId; - } - - protected void setObjectName(String name) { - objectName = name; - } - - @Override - public String getSQL(boolean alwaysQuote) { - return Parser.quoteIdentifier(objectName, alwaysQuote); - } - - @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - return Parser.quoteIdentifier(builder, objectName, alwaysQuote); - } - - @Override - public ArrayList getChildren() { - return null; - } - - @Override - public Database getDatabase() { - return database; - } - - @Override - public int getId() { - return id; - } - - @Override - public String getName() { - return objectName; - } - - /** - * Set the main attributes to null to make sure the object is no longer - * used. - */ - protected void invalidate() { - if (id == -1) { - throw DbException.throwInternalError(); - } - setModified(); - id = -1; - database = null; - trace = null; - objectName = null; - } - - public final boolean isValid() { - return id != -1; - } - - @Override - public void rename(String newName) { - checkRename(); - objectName = newName; - setModified(); - } - - @Override - public boolean isTemporary() { - return temporary; - } - - @Override - public void setTemporary(boolean temporary) { - this.temporary = temporary; - } - - @Override - public void setComment(String comment) { - this.comment = comment; - } - - @Override - public String getComment() { - return comment; - } - - @Override - public String toString() { - return objectName + ":" + id + ":" + super.toString(); - } - -} diff --git a/h2/src/main/org/h2/engine/DbSettings.java b/h2/src/main/org/h2/engine/DbSettings.java index 9c0f8a5a92..c4baedefe3 100644 --- a/h2/src/main/org/h2/engine/DbSettings.java +++ b/h2/src/main/org/h2/engine/DbSettings.java @@ -1,19 +1,19 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; import java.util.HashMap; + import org.h2.api.ErrorCode; import org.h2.message.DbException; -import org.h2.util.Utils; /** * This class contains various database-level settings. To override the * documented default value for a database, append the setting in the database - * URL: "jdbc:h2:test;ALIAS_COLUMN_NAME=TRUE" when opening the first connection + * URL: "jdbc:h2:./test;ANALYZE_SAMPLE=1000" when opening the first connection * to the database. The settings can not be changed once the database is open. *

          * Some settings are a last resort and temporary solution to work around a @@ -24,23 +24,19 @@ */ public class DbSettings extends SettingsBase { - private static DbSettings defaultSettings; + /** + * The initial size of the hash table. + */ + static final int TABLE_SIZE = 64; /** - * Database setting ALIAS_COLUMN_NAME (default: false).
          - * When enabled, aliased columns (as in SELECT ID AS I FROM TEST) return the - * alias (I in this case) in ResultSetMetaData.getColumnName() and 'null' in - * getTableName(). If disabled, the real column name (ID in this case) and - * table name is returned. - *
          - * This setting only affects the default and the MySQL mode. When using - * any other mode, this feature is enabled for compatibility, even if this - * database setting is not enabled explicitly. + * INTERNAL. + * The default settings. Those must not be modified. */ - public final boolean aliasColumnName = get("ALIAS_COLUMN_NAME", false); + public static final DbSettings DEFAULT = new DbSettings(new HashMap<>(TABLE_SIZE)); /** - * Database setting ANALYZE_AUTO (default: 2000).
          + * Database setting ANALYZE_AUTO (default: 2000). * After changing this many rows, ANALYZE is automatically run for a table. * Automatically running ANALYZE is disabled if set to 0. If set to 1000, * then ANALYZE will run against each user table after about 1000 changes to @@ -51,13 +47,26 @@ public class DbSettings extends SettingsBase { public final int analyzeAuto = get("ANALYZE_AUTO", 2000); /** - * Database setting ANALYZE_SAMPLE (default: 10000).
          + * Database setting ANALYZE_SAMPLE (default: 10000). * The default sample size when analyzing a table. */ public final int analyzeSample = get("ANALYZE_SAMPLE", 10_000); /** - * Database setting DATABASE_TO_LOWER (default: false).
          + * Database setting AUTO_COMPACT_FILL_RATE + * (default: 90, which means 90%, 0 disables auto-compacting). + * Set the auto-compact target fill rate. If the average fill rate (the + * percentage of the storage space that contains active data) of the + * chunks is lower, then the chunks with a low fill rate are re-written. + * Also, if the percentage of empty space between chunks is higher than + * this value, then chunks at the end of the file are moved. Compaction + * stops if the target fill rate is reached. + * This setting only affects MVStore engine. + */ + public final int autoCompactFillRate = get("AUTO_COMPACT_FILL_RATE", 90); + + /** + * Database setting DATABASE_TO_LOWER (default: false). * When set to true unquoted identifiers and short name of database are * converted to lower case. Value of this setting should not be changed * after creation of database. Setting this to "true" is experimental. @@ -65,7 +74,7 @@ public class DbSettings extends SettingsBase { public final boolean databaseToLower; /** - * Database setting DATABASE_TO_UPPER (default: true).
          + * Database setting DATABASE_TO_UPPER (default: true). * When set to true unquoted identifiers and short name of database are * converted to upper case. */ @@ -73,21 +82,21 @@ public class DbSettings extends SettingsBase { /** * Database setting CASE_INSENSITIVE_IDENTIFIERS (default: - * false).
          + * false). * When set to true, all identifier names (table names, column names) are * case insensitive. Setting this to "true" is experimental. */ public final boolean caseInsensitiveIdentifiers = get("CASE_INSENSITIVE_IDENTIFIERS", false); /** - * Database setting DB_CLOSE_ON_EXIT (default: true).
          + * Database setting DB_CLOSE_ON_EXIT (default: true). * Close the database when the virtual machine exits normally, using a * shutdown hook. */ public final boolean dbCloseOnExit = get("DB_CLOSE_ON_EXIT", true); /** - * Database setting DEFAULT_CONNECTION (default: false).
          + * Database setting DEFAULT_CONNECTION (default: false). * Whether Java functions can use * DriverManager.getConnection("jdbc:default:connection") to * get a database connection. This feature is disabled by default for @@ -97,14 +106,14 @@ public class DbSettings extends SettingsBase { public final boolean defaultConnection = get("DEFAULT_CONNECTION", false); /** - * Database setting DEFAULT_ESCAPE (default: \).
          + * Database setting DEFAULT_ESCAPE (default: \). * The default escape character for LIKE comparisons. To select no escape * character, use an empty string. */ public final String defaultEscape = get("DEFAULT_ESCAPE", "\\"); /** - * Database setting DEFRAG_ALWAYS (default: false).
          + * Database setting DEFRAG_ALWAYS (default: false) * Each time the database is closed normally, it is fully defragmented (the * same as SHUTDOWN DEFRAG). If you execute SHUTDOWN COMPACT, then this * setting is ignored. @@ -112,41 +121,24 @@ public class DbSettings extends SettingsBase { public final boolean defragAlways = get("DEFRAG_ALWAYS", false); /** - * Database setting DROP_RESTRICT (default: true).
          - * Whether the default action for DROP TABLE, DROP VIEW, DROP SCHEMA, and - * DROP DOMAIN is RESTRICT. + * Database setting DROP_RESTRICT (default: true) + * Whether the default action for DROP TABLE, DROP VIEW, DROP SCHEMA, DROP + * DOMAIN, and DROP CONSTRAINT is RESTRICT. */ public final boolean dropRestrict = get("DROP_RESTRICT", true); - /** - * Database setting EARLY_FILTER (default: false).
          - * This setting allows table implementations to apply filter conditions - * early on. - */ - public final boolean earlyFilter = get("EARLY_FILTER", false); - /** * Database setting ESTIMATED_FUNCTION_TABLE_ROWS (default: - * 1000).
          + * 1000). * The estimated number of rows in a function table (for example, CSVREAD or * FTL_SEARCH). This value is used by the optimizer. */ public final int estimatedFunctionTableRows = get( "ESTIMATED_FUNCTION_TABLE_ROWS", 1000); - /** - * Database setting FUNCTIONS_IN_SCHEMA - * (default: true).
          - * If set, all functions are stored in a schema. Specially, the SCRIPT - * statement will always include the schema name in the CREATE ALIAS - * statement. This is not backward compatible with H2 versions 1.2.134 and - * older. - */ - public final boolean functionsInSchema = get("FUNCTIONS_IN_SCHEMA", true); - /** * Database setting LOB_TIMEOUT (default: 300000, - * which means 5 minutes).
          + * which means 5 minutes). * The number of milliseconds a temporary LOB reference is kept until it * times out. After the timeout, the LOB is no longer accessible using this * reference. @@ -154,21 +146,13 @@ public class DbSettings extends SettingsBase { public final int lobTimeout = get("LOB_TIMEOUT", 300_000); /** - * Database setting MAX_COMPACT_COUNT - * (default: Integer.MAX_VALUE).
          - * The maximum number of pages to move when closing a database. - */ - public final int maxCompactCount = get("MAX_COMPACT_COUNT", - Integer.MAX_VALUE); - - /** - * Database setting MAX_COMPACT_TIME (default: 200).
          + * Database setting MAX_COMPACT_TIME (default: 200). * The maximum time in milliseconds used to compact a database when closing. */ public final int maxCompactTime = get("MAX_COMPACT_TIME", 200); /** - * Database setting MAX_QUERY_TIMEOUT (default: 0).
          + * Database setting MAX_QUERY_TIMEOUT (default: 0). * The maximum timeout of a query in milliseconds. The default is 0, meaning * no limit. Please note the actual query timeout may be set to a lower * value. @@ -176,7 +160,7 @@ public class DbSettings extends SettingsBase { public final int maxQueryTimeout = get("MAX_QUERY_TIMEOUT", 0); /** - * Database setting OPTIMIZE_DISTINCT (default: true).
          + * Database setting OPTIMIZE_DISTINCT (default: true). * Improve the performance of simple DISTINCT queries if an index is * available for the given column. The optimization is used if: *

            @@ -191,7 +175,7 @@ public class DbSettings extends SettingsBase { /** * Database setting OPTIMIZE_EVALUATABLE_SUBQUERIES (default: - * true).
            + * true). * Optimize subqueries that are not dependent on the outer query. */ public final boolean optimizeEvaluatableSubqueries = get( @@ -199,7 +183,7 @@ public class DbSettings extends SettingsBase { /** * Database setting OPTIMIZE_INSERT_FROM_SELECT - * (default: true).
            + * (default: true). * Insert into table from query directly bypassing temporary disk storage. * This also applies to create table as select. */ @@ -207,63 +191,40 @@ public class DbSettings extends SettingsBase { "OPTIMIZE_INSERT_FROM_SELECT", true); /** - * Database setting OPTIMIZE_IN_LIST (default: true).
            + * Database setting OPTIMIZE_IN_LIST (default: true). * Optimize IN(...) and IN(SELECT ...) comparisons. This includes * optimization for SELECT, DELETE, and UPDATE. */ public final boolean optimizeInList = get("OPTIMIZE_IN_LIST", true); /** - * Database setting OPTIMIZE_IN_SELECT (default: true).
            + * Database setting OPTIMIZE_IN_SELECT (default: true). * Optimize IN(SELECT ...) comparisons. This includes * optimization for SELECT, DELETE, and UPDATE. */ public final boolean optimizeInSelect = get("OPTIMIZE_IN_SELECT", true); /** - * Database setting OPTIMIZE_OR (default: true).
            + * Database setting OPTIMIZE_OR (default: true). * Convert (C=? OR C=?) to (C IN(?, ?)). */ public final boolean optimizeOr = get("OPTIMIZE_OR", true); /** - * Database setting OPTIMIZE_TWO_EQUALS (default: true).
            + * Database setting OPTIMIZE_TWO_EQUALS (default: true). * Optimize expressions of the form A=B AND B=1. In this case, AND A=1 is * added so an index on A can be used. */ public final boolean optimizeTwoEquals = get("OPTIMIZE_TWO_EQUALS", true); /** - * Database setting OPTIMIZE_UPDATE (default: true).
            - * Speed up inserts, updates, and deletes by not reading all rows from a - * page unless necessary. - */ - public final boolean optimizeUpdate = get("OPTIMIZE_UPDATE", true); - - /** - * Database setting PAGE_STORE_MAX_GROWTH - * (default: 128 * 1024).
            - * The maximum number of pages the file grows at any time. - */ - public final int pageStoreMaxGrowth = get("PAGE_STORE_MAX_GROWTH", - 128 * 1024); - - /** - * Database setting PAGE_STORE_INTERNAL_COUNT - * (default: false).
            - * Update the row counts on a node level. - */ - public final boolean pageStoreInternalCount = get( - "PAGE_STORE_INTERNAL_COUNT", false); - - /** - * Database setting PAGE_STORE_TRIM (default: true).
            - * Trim the database size when closing. + * Database setting OPTIMIZE_SIMPLE_SINGLE_ROW_SUBQUERIES (default: true). + * Optimize expressions of the form (SELECT A) to A. */ - public final boolean pageStoreTrim = get("PAGE_STORE_TRIM", true); + public final boolean optimizeSimpleSingleRowSubqueries = get("OPTIMIZE_SIMPLE_SINGLE_ROW_SUBQUERIES", true); /** - * Database setting QUERY_CACHE_SIZE (default: 8).
            + * Database setting QUERY_CACHE_SIZE (default: 8). * The size of the query cache, in number of cached statements. Each session * has it's own cache with the given size. The cache is only used if the SQL * statement and all parameters match. Only the last returned result per @@ -276,22 +237,13 @@ public class DbSettings extends SettingsBase { public final int queryCacheSize = get("QUERY_CACHE_SIZE", 8); /** - * Database setting RECOMPILE_ALWAYS (default: false).
            + * Database setting RECOMPILE_ALWAYS (default: false). * Always recompile prepared statements. */ public final boolean recompileAlways = get("RECOMPILE_ALWAYS", false); /** - * Database setting RECONNECT_CHECK_DELAY (default: 200).
            - * Check the .lock.db file every this many milliseconds to detect that the - * database was changed. The process writing to the database must first - * notify a change in the .lock.db file, then wait twice this many - * milliseconds before updating the database. - */ - public final int reconnectCheckDelay = get("RECONNECT_CHECK_DELAY", 200); - - /** - * Database setting REUSE_SPACE (default: true).
            + * Database setting REUSE_SPACE (default: true). * If disabled, all changes are appended to the database file, and existing * content is never overwritten. This setting has no effect if the database * is already open. @@ -300,7 +252,7 @@ public class DbSettings extends SettingsBase { /** * Database setting SHARE_LINKED_CONNECTIONS - * (default: true).
            + * (default: true). * Linked connections should be shared, that means connections to the same * database should be used for all linked tables that connect to the same * database. @@ -310,41 +262,42 @@ public class DbSettings extends SettingsBase { /** * Database setting DEFAULT_TABLE_ENGINE - * (default: null).
            + * (default: null). * The default table engine to use for new tables. */ public final String defaultTableEngine = get("DEFAULT_TABLE_ENGINE", null); /** * Database setting MV_STORE - * (default: true).
            + * (default: true). * Use the MVStore storage engine. */ - public boolean mvStore = get("MV_STORE", true); + public final boolean mvStore = get("MV_STORE", true); /** * Database setting COMPRESS - * (default: false).
            + * (default: false). * Compress data when storing. */ public final boolean compressData = get("COMPRESS", false); /** - * Database setting STANDARD_DROP_TABLE_RESTRICT (default: - * false).
            - * true if DROP TABLE RESTRICT should fail if there's any - * foreign key referencing the table to be dropped. false if - * foreign keys referencing the table to be dropped should be silently - * dropped as well. + * Database setting IGNORE_CATALOGS + * (default: false). + * If set, all catalog names in identifiers are silently accepted + * without comparing them with the short name of the database. + */ + public final boolean ignoreCatalogs = get("IGNORE_CATALOGS", false); + + /** + * Database setting ZERO_BASED_ENUMS + * (default: false). + * If set, ENUM ordinal values are 0-based. */ - public final boolean standardDropTableRestrict = get( - "STANDARD_DROP_TABLE_RESTRICT", false); + public final boolean zeroBasedEnums = get("ZERO_BASED_ENUMS", false); private DbSettings(HashMap s) { super(s); - if (s.get("NESTED_JOINS") != null || Utils.getProperty("h2.nestedJoins", null) != null) { - throw DbException.getUnsupportedException("NESTED_JOINS setting is not available since 1.4.197"); - } boolean lower = get("DATABASE_TO_LOWER", false); boolean upperSet = containsKey("DATABASE_TO_UPPER"); boolean upper = get("DATABASE_TO_UPPER", true); @@ -369,21 +322,8 @@ private DbSettings(HashMap s) { * @param s the settings * @return the settings */ - public static DbSettings getInstance(HashMap s) { + static DbSettings getInstance(HashMap s) { return new DbSettings(s); } - /** - * INTERNAL. - * Get the default settings. Those must not be modified. - * - * @return the settings - */ - public static DbSettings getDefaultSettings() { - if (defaultSettings == null) { - defaultSettings = new DbSettings(new HashMap()); - } - return defaultSettings; - } - } diff --git a/h2/src/main/org/h2/engine/DelayedDatabaseCloser.java b/h2/src/main/org/h2/engine/DelayedDatabaseCloser.java index 881b93a9b3..2e6083f260 100644 --- a/h2/src/main/org/h2/engine/DelayedDatabaseCloser.java +++ b/h2/src/main/org/h2/engine/DelayedDatabaseCloser.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; diff --git a/h2/src/main/org/h2/engine/Domain.java b/h2/src/main/org/h2/engine/Domain.java deleted file mode 100644 index 4522f33dfa..0000000000 --- a/h2/src/main/org/h2/engine/Domain.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.engine; - -import org.h2.message.DbException; -import org.h2.message.Trace; -import org.h2.table.Column; -import org.h2.table.Table; - -/** - * Represents a domain. - */ -public class Domain extends DbObjectBase { - - private Column column; - - public Domain(Database database, int id, String name) { - super(database, id, name, Trace.DATABASE); - } - - @Override - public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(toString()); - } - - @Override - public String getDropSQL() { - StringBuilder builder = new StringBuilder("DROP DOMAIN IF EXISTS "); - return getSQL(builder, true).toString(); - } - - @Override - public String getCreateSQL() { - StringBuilder builder = new StringBuilder("CREATE DOMAIN "); - getSQL(builder, true).append(" AS "); - builder.append(column.getCreateSQL()); - return builder.toString(); - } - - public Column getColumn() { - return column; - } - - @Override - public int getType() { - return DbObject.DOMAIN; - } - - @Override - public void removeChildrenAndResources(Session session) { - database.removeMeta(session, getId()); - } - - @Override - public void checkRename() { - // ok - } - - public void setColumn(Column column) { - this.column = column; - } - -} diff --git a/h2/src/main/org/h2/engine/Engine.java b/h2/src/main/org/h2/engine/Engine.java index 39ba76e9d8..2ee7732178 100644 --- a/h2/src/main/org/h2/engine/Engine.java +++ b/h2/src/main/org/h2/engine/Engine.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; @@ -16,11 +16,13 @@ import org.h2.security.auth.AuthenticationException; import org.h2.security.auth.AuthenticationInfo; import org.h2.security.auth.Authenticator; -import org.h2.store.FileLock; -import org.h2.store.FileLockMethod; +import org.h2.store.fs.FileUtils; +import org.h2.util.DateTimeUtils; import org.h2.util.MathUtils; import org.h2.util.ParserUtil; +import org.h2.util.StringUtils; import org.h2.util.ThreadDeadlockDetector; +import org.h2.util.TimeZoneProvider; import org.h2.util.Utils; /** @@ -28,27 +30,21 @@ * It is also responsible for opening and creating new databases. * This is a singleton class. */ -public class Engine implements SessionFactory { +public final class Engine { - private static final Engine INSTANCE = new Engine(); - private static final Map DATABASES = new HashMap<>(); + private static final Map DATABASES = new HashMap<>(); - private volatile long wrongPasswordDelay = - SysProperties.DELAY_WRONG_PASSWORD_MIN; - private boolean jmx; + private static volatile long WRONG_PASSWORD_DELAY = SysProperties.DELAY_WRONG_PASSWORD_MIN; - private Engine() { - // use getInstance() + private static boolean JMX; + + static { if (SysProperties.THREAD_DEADLOCK_DETECTOR) { ThreadDeadlockDetector.init(); } } - public static Engine getInstance() { - return INSTANCE; - } - - private Session openSession(ConnectionInfo ci, boolean ifExists, + private static SessionLocal openSession(ConnectionInfo ci, boolean ifExists, boolean forbidCreation, String cipher) { String name = ci.getName(); Database database; @@ -56,32 +52,64 @@ private Session openSession(ConnectionInfo ci, boolean ifExists, boolean openNew = ci.getProperty("OPEN_NEW", false); boolean opened = false; User user = null; - synchronized (DATABASES) { - if (openNew || ci.isUnnamedInMemory()) { - database = null; - } else { - database = DATABASES.get(name); + DatabaseHolder databaseHolder; + if (!ci.isUnnamedInMemory()) { + synchronized (DATABASES) { + databaseHolder = DATABASES.computeIfAbsent(name, (key) -> new DatabaseHolder()); } - if (database == null) { - if (ifExists && !Database.exists(name)) { - throw DbException.get(ErrorCode.DATABASE_NOT_FOUND_2, name); + } else { + databaseHolder = new DatabaseHolder(); + } + synchronized (databaseHolder) { + database = databaseHolder.database; + if (database == null || openNew) { + if (ci.isPersistent()) { + String p = ci.getProperty("MV_STORE"); + String fileName; + if (p == null) { + fileName = name + Constants.SUFFIX_MV_FILE; + if (!FileUtils.exists(fileName)) { + throwNotFound(ifExists, forbidCreation, name); + fileName = name + Constants.SUFFIX_OLD_DATABASE_FILE; + if (FileUtils.exists(fileName)) { + throw DbException.getFileVersionError(fileName); + } + fileName = null; + } + } else { + fileName = name + Constants.SUFFIX_MV_FILE; + if (!FileUtils.exists(fileName)) { + throwNotFound(ifExists, forbidCreation, name); + fileName = null; + } + } + if (fileName != null && !FileUtils.canWrite(fileName)) { + ci.setProperty("ACCESS_MODE_DATA", "r"); + } + } else { + throwNotFound(ifExists, forbidCreation, name); } database = new Database(ci, cipher); opened = true; - if (database.getAllUsers().isEmpty()) { + boolean found = false; + for (RightOwner rightOwner : database.getAllUsersAndRoles()) { + if (rightOwner instanceof User) { + found = true; + break; + } + } + if (!found) { // users is the last thing we add, so if no user is around, // the database is new (or not initialized correctly) - user = new User(database, database.allocateObjectId(), - ci.getUserName(), false); + user = new User(database, database.allocateObjectId(), ci.getUserName(), false); user.setAdmin(true); user.setUserPasswordHash(ci.getUserPasswordHash()); database.setMasterUser(user); } - if (!ci.isUnnamedInMemory()) { - DATABASES.put(name, database); - } + databaseHolder.database = database; } } + if (opened) { // start the thread when already synchronizing on the database // otherwise a deadlock can occur when the writer thread @@ -132,11 +160,14 @@ private Session openSession(ConnectionInfo ci, boolean ifExists, //Prevent to set _PASSWORD ci.cleanAuthenticationInfo(); checkClustering(ci, database); - Session session = database.createSession(user); + SessionLocal session = database.createSession(user, ci.getNetworkConnectionInfo()); if (session == null) { // concurrently closing return null; } + if (ci.getProperty("OLD_INFORMATION_SCHEMA", false)) { + session.setOldInformationSchema(true); + } if (ci.getProperty("JMX", false)) { try { Utils.callStaticMethod( @@ -145,41 +176,30 @@ private Session openSession(ConnectionInfo ci, boolean ifExists, database.removeSession(session); throw DbException.get(ErrorCode.FEATURE_NOT_SUPPORTED_1, e, "JMX"); } - jmx = true; + JMX = true; } return session; } + private static void throwNotFound(boolean ifExists, boolean forbidCreation, String name) { + if (ifExists) { + throw DbException.get(ErrorCode.DATABASE_NOT_FOUND_WITH_IF_EXISTS_1, name); + } + if (forbidCreation) { + throw DbException.get(ErrorCode.REMOTE_DATABASE_NOT_FOUND_1, name); + } + } + /** * Open a database connection with the given connection information. * * @param ci the connection information * @return the session */ - @Override - public Session createSession(ConnectionInfo ci) { - return INSTANCE.createSessionAndValidate(ci); - } - - private Session createSessionAndValidate(ConnectionInfo ci) { + public static SessionLocal createSession(ConnectionInfo ci) { try { - ConnectionInfo backup = null; - String lockMethodName = ci.getProperty("FILE_LOCK", null); - FileLockMethod fileLockMethod = FileLock.getFileLockMethod(lockMethodName); - if (fileLockMethod == FileLockMethod.SERIALIZED) { - // In serialized mode, database instance sharing is not possible - ci.setProperty("OPEN_NEW", "TRUE"); - try { - backup = ci.clone(); - } catch (CloneNotSupportedException e) { - throw DbException.convert(e); - } - } - Session session = openSession(ci); + SessionLocal session = openSession(ci); validateUserAndPassword(true); - if (backup != null) { - session.setConnectionInfo(backup); - } return session; } catch (DbException e) { if (e.getErrorCode() == ErrorCode.WRONG_USER_OR_PASSWORD) { @@ -189,23 +209,23 @@ private Session createSessionAndValidate(ConnectionInfo ci) { } } - private synchronized Session openSession(ConnectionInfo ci) { + private static SessionLocal openSession(ConnectionInfo ci) { boolean ifExists = ci.removeProperty("IFEXISTS", false); + boolean forbidCreation = ci.removeProperty("FORBID_CREATION", false); boolean ignoreUnknownSetting = ci.removeProperty( "IGNORE_UNKNOWN_SETTINGS", false); String cipher = ci.removeProperty("CIPHER", null); String init = ci.removeProperty("INIT", null); - Session session; + SessionLocal session; long start = System.nanoTime(); for (;;) { - session = openSession(ci, ifExists, cipher); + session = openSession(ci, ifExists, forbidCreation, cipher); if (session != null) { break; } // we found a database that is currently closing // wait a bit to avoid a busy loop (the method is synchronized) - if (System.nanoTime() - start > 60_000_000_000L) { - // retry at most 1 minute + if (System.nanoTime() - start > DateTimeUtils.NANOS_PER_MINUTE) { throw DbException.get(ErrorCode.DATABASE_ALREADY_OPEN_1, "Waited for database closing longer than 1 minute"); } @@ -217,21 +237,25 @@ private synchronized Session openSession(ConnectionInfo ci) { } synchronized (session) { session.setAllowLiterals(true); - DbSettings defaultSettings = DbSettings.getDefaultSettings(); + DbSettings defaultSettings = DbSettings.DEFAULT; for (String setting : ci.getKeys()) { if (defaultSettings.containsKey(setting)) { // database setting are only used when opening the database continue; } String value = ci.getProperty(setting); + StringBuilder builder = new StringBuilder("SET ").append(setting).append(' '); if (!ParserUtil.isSimpleIdentifier(setting, false, false)) { - throw DbException.get(ErrorCode.UNSUPPORTED_SETTING_1, setting); + if (!setting.equalsIgnoreCase("TIME ZONE")) { + throw DbException.get(ErrorCode.UNSUPPORTED_SETTING_1, setting); + } + StringUtils.quoteStringSQL(builder, value); + } else { + builder.append(value); } try { - CommandInterface command = session.prepareCommand( - "SET " + setting + ' ' + value, - Integer.MAX_VALUE); - command.executeUpdate(false); + CommandInterface command = session.prepareLocal(builder.toString()); + command.executeUpdate(null); } catch (DbException e) { if (e.getErrorCode() == ErrorCode.ADMIN_RIGHTS_REQUIRED) { session.getTrace().error(e, "admin rights required; user: \"" + @@ -245,11 +269,14 @@ private synchronized Session openSession(ConnectionInfo ci) { } } } + TimeZoneProvider timeZone = ci.getTimeZone(); + if (timeZone != null) { + session.setTimeZone(timeZone); + } if (init != null) { try { - CommandInterface command = session.prepareCommand(init, - Integer.MAX_VALUE); - command.executeUpdate(false); + CommandInterface command = session.prepareLocal(init); + command.executeUpdate(null); } catch (DbException e) { if (!ignoreUnknownSetting) { session.close(); @@ -292,8 +319,8 @@ private static void checkClustering(ConnectionInfo ci, Database database) { * * @param name the database name */ - void close(String name) { - if (jmx) { + static void close(String name) { + if (JMX) { try { Utils.callStaticMethod("org.h2.jmx.DatabaseInfo.unregisterMBean", name); } catch (Exception e) { @@ -322,14 +349,14 @@ void close(String name) { * @param correct if the user name or the password was correct * @throws DbException the exception 'wrong user or password' */ - private void validateUserAndPassword(boolean correct) { + private static void validateUserAndPassword(boolean correct) { int min = SysProperties.DELAY_WRONG_PASSWORD_MIN; if (correct) { - long delay = wrongPasswordDelay; + long delay = WRONG_PASSWORD_DELAY; if (delay > min && delay > 0) { // the first correct password must be blocked, // otherwise parallel attacks are possible - synchronized (INSTANCE) { + synchronized (Engine.class) { // delay up to the last delay // an attacker can't know how long it will be delay = MathUtils.secureRandomInt((int) delay); @@ -338,21 +365,21 @@ private void validateUserAndPassword(boolean correct) { } catch (InterruptedException e) { // ignore } - wrongPasswordDelay = min; + WRONG_PASSWORD_DELAY = min; } } } else { // this method is not synchronized on the Engine, so that // regular successful attempts are not blocked - synchronized (INSTANCE) { - long delay = wrongPasswordDelay; + synchronized (Engine.class) { + long delay = WRONG_PASSWORD_DELAY; int max = SysProperties.DELAY_WRONG_PASSWORD_MAX; if (max <= 0) { max = Integer.MAX_VALUE; } - wrongPasswordDelay += wrongPasswordDelay; - if (wrongPasswordDelay > max || wrongPasswordDelay < 0) { - wrongPasswordDelay = max; + WRONG_PASSWORD_DELAY += WRONG_PASSWORD_DELAY; + if (WRONG_PASSWORD_DELAY > max || WRONG_PASSWORD_DELAY < 0) { + WRONG_PASSWORD_DELAY = max; } if (min > 0) { // a bit more to protect against timing attacks @@ -368,4 +395,14 @@ private void validateUserAndPassword(boolean correct) { } } + private Engine() { + } + + private static final class DatabaseHolder { + + DatabaseHolder() { + } + + volatile Database database; + } } diff --git a/h2/src/main/org/h2/engine/GeneratedKeys.java b/h2/src/main/org/h2/engine/GeneratedKeys.java deleted file mode 100644 index 8c40df15d6..0000000000 --- a/h2/src/main/org/h2/engine/GeneratedKeys.java +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.engine; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import org.h2.expression.Expression; -import org.h2.expression.ExpressionColumn; -import org.h2.result.LocalResult; -import org.h2.result.Row; -import org.h2.table.Column; -import org.h2.table.Table; -import org.h2.util.StringUtils; -import org.h2.util.Utils; -import org.h2.value.Value; -import org.h2.value.ValueNull; - -/** - * Class for gathering and processing of generated keys. - */ -public final class GeneratedKeys { - /** - * Data for result set with generated keys. - */ - private final ArrayList> data = Utils.newSmallArrayList(); - - /** - * Columns with generated keys in the current row. - */ - private final ArrayList row = Utils.newSmallArrayList(); - - /** - * All columns with generated keys. - */ - private final ArrayList allColumns = Utils.newSmallArrayList(); - - /** - * Request for keys gathering. {@code false} if generated keys are not needed, - * {@code true} if generated keys should be configured automatically, - * {@code int[]} to specify column indices to return generated keys from, or - * {@code String[]} to specify column names to return generated keys from. - */ - private Object generatedKeysRequest; - - /** - * Processed table. - */ - private Table table; - - /** - * Remembers columns with generated keys. - * - * @param column - * table column - */ - public void add(Column column) { - if (Boolean.FALSE.equals(generatedKeysRequest)) { - return; - } - row.add(column); - } - - /** - * Clears all information from previous runs and sets a new request for - * gathering of generated keys. - * - * @param generatedKeysRequest - * {@code false} if generated keys are not needed, {@code true} if - * generated keys should be configured automatically, {@code int[]} - * to specify column indices to return generated keys from, or - * {@code String[]} to specify column names to return generated keys - * from - */ - public void clear(Object generatedKeysRequest) { - this.generatedKeysRequest = generatedKeysRequest; - data.clear(); - row.clear(); - allColumns.clear(); - table = null; - } - - /** - * Saves row with generated keys if any. - * - * @param tableRow - * table row that was inserted - */ - public void confirmRow(Row tableRow) { - if (Boolean.FALSE.equals(generatedKeysRequest)) { - return; - } - int size = row.size(); - if (size > 0) { - if (size == 1) { - Column column = row.get(0); - data.add(Collections.singletonMap(column, tableRow.getValue(column.getColumnId()))); - if (!allColumns.contains(column)) { - allColumns.add(column); - } - } else { - HashMap map = new HashMap<>(); - for (Column column : row) { - map.put(column, tableRow.getValue(column.getColumnId())); - if (!allColumns.contains(column)) { - allColumns.add(column); - } - } - data.add(map); - } - row.clear(); - } - } - - /** - * Returns generated keys. - * - * @param session - * session - * @return local result with generated keys - */ - public LocalResult getKeys(Session session) { - Database db = session.getDatabase(); - if (Boolean.FALSE.equals(generatedKeysRequest)) { - clear(null); - return db.getResultFactory().create(); - } - ArrayList expressionColumns; - if (Boolean.TRUE.equals(generatedKeysRequest)) { - expressionColumns = new ArrayList<>(allColumns.size()); - for (Column column : allColumns) { - expressionColumns.add(new ExpressionColumn(db, column)); - } - } else if (generatedKeysRequest instanceof int[]) { - if (table != null) { - int[] indices = (int[]) generatedKeysRequest; - Column[] columns = table.getColumns(); - int cnt = columns.length; - allColumns.clear(); - expressionColumns = new ArrayList<>(indices.length); - for (int idx : indices) { - if (idx >= 1 && idx <= cnt) { - Column column = columns[idx - 1]; - expressionColumns.add(new ExpressionColumn(db, column)); - allColumns.add(column); - } - } - } else { - clear(null); - return db.getResultFactory().create(); - } - } else if (generatedKeysRequest instanceof String[]) { - if (table != null) { - String[] names = (String[]) generatedKeysRequest; - allColumns.clear(); - expressionColumns = new ArrayList<>(names.length); - for (String name : names) { - Column column; - search: if (table.doesColumnExist(name)) { - column = table.getColumn(name); - } else { - name = StringUtils.toUpperEnglish(name); - if (table.doesColumnExist(name)) { - column = table.getColumn(name); - } else { - for (Column c : table.getColumns()) { - if (c.getName().equalsIgnoreCase(name)) { - column = c; - break search; - } - } - continue; - } - } - expressionColumns.add(new ExpressionColumn(db, column)); - allColumns.add(column); - } - } else { - clear(null); - return db.getResultFactory().create(); - } - } else { - clear(null); - return db.getResultFactory().create(); - } - int columnCount = expressionColumns.size(); - if (columnCount == 0) { - clear(null); - return db.getResultFactory().create(); - } - LocalResult result = db.getResultFactory().create(session, - expressionColumns.toArray(new Expression[0]), columnCount); - for (Map map : data) { - Value[] row = new Value[columnCount]; - for (Map.Entry entry : map.entrySet()) { - int idx = allColumns.indexOf(entry.getKey()); - if (idx >= 0) { - row[idx] = entry.getValue(); - } - } - for (int i = 0; i < columnCount; i++) { - if (row[i] == null) { - row[i] = ValueNull.INSTANCE; - } - } - result.addRow(row); - } - clear(null); - return result; - } - - /** - * Initializes processing of the specified table. Should be called after - * {@code clear()}, but before other methods. - * - * @param table - * table - */ - public void initialize(Table table) { - this.table = table; - } - - /** - * Clears unsaved information about previous row, if any. Should be called - * before processing of a new row if previous row was not confirmed or simply - * always before each row. - */ - public void nextRow() { - row.clear(); - } - - @Override - public String toString() { - return allColumns + ": " + data.size(); - } - -} diff --git a/h2/src/main/org/h2/engine/GeneratedKeysMode.java b/h2/src/main/org/h2/engine/GeneratedKeysMode.java index 2e26d6f7da..bf5f707b7c 100644 --- a/h2/src/main/org/h2/engine/GeneratedKeysMode.java +++ b/h2/src/main/org/h2/engine/GeneratedKeysMode.java @@ -1,11 +1,10 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; -import org.h2.api.ErrorCode; import org.h2.message.DbException; /** @@ -37,28 +36,27 @@ public final class GeneratedKeysMode { * Determines mode of generated keys' gathering. * * @param generatedKeysRequest - * {@code false} if generated keys are not needed, {@code true} if - * generated keys should be configured automatically, {@code int[]} - * to specify column indices to return generated keys from, or - * {@code String[]} to specify column names to return generated keys - * from + * {@code null} or {@code false} if generated keys are not + * needed, {@code true} if generated keys should be configured + * automatically, {@code int[]} to specify column indices to + * return generated keys from, or {@code String[]} to specify + * column names to return generated keys from * @return mode for the specified generated keys request */ public static int valueOf(Object generatedKeysRequest) { - if (Boolean.FALSE.equals(generatedKeysRequest)) { + if (generatedKeysRequest == null || Boolean.FALSE.equals(generatedKeysRequest)) { return NONE; } if (Boolean.TRUE.equals(generatedKeysRequest)) { return AUTO; } if (generatedKeysRequest instanceof int[]) { - return COLUMN_NUMBERS; + return ((int[]) generatedKeysRequest).length > 0 ? COLUMN_NUMBERS : NONE; } if (generatedKeysRequest instanceof String[]) { - return COLUMN_NAMES; + return ((String[]) generatedKeysRequest).length > 0 ? COLUMN_NAMES : NONE; } - throw DbException.get(ErrorCode.INVALID_VALUE_2, - generatedKeysRequest == null ? "null" : generatedKeysRequest.toString()); + throw DbException.getInternalError(); } private GeneratedKeysMode() { diff --git a/h2/src/main/org/h2/engine/IsolationLevel.java b/h2/src/main/org/h2/engine/IsolationLevel.java new file mode 100644 index 0000000000..26309cbdca --- /dev/null +++ b/h2/src/main/org/h2/engine/IsolationLevel.java @@ -0,0 +1,162 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.engine; + +import java.sql.Connection; + +import org.h2.message.DbException; + +/** + * Level of isolation. + */ +public enum IsolationLevel { + + /** + * Dirty reads, non-repeatable reads and phantom reads are allowed. + */ + READ_UNCOMMITTED(Connection.TRANSACTION_READ_UNCOMMITTED, Constants.LOCK_MODE_OFF), + + /** + * Dirty reads aren't allowed; non-repeatable reads and phantom reads are + * allowed. + */ + READ_COMMITTED(Connection.TRANSACTION_READ_COMMITTED, Constants.LOCK_MODE_READ_COMMITTED), + + /** + * Dirty reads and non-repeatable reads aren't allowed; phantom reads are + * allowed. + */ + REPEATABLE_READ(Connection.TRANSACTION_REPEATABLE_READ, Constants.LOCK_MODE_TABLE), + + /** + * Dirty reads, non-repeatable reads and phantom reads are'n allowed. + */ + SNAPSHOT(Constants.TRANSACTION_SNAPSHOT, Constants.LOCK_MODE_TABLE), + + /** + * Dirty reads, non-repeatable reads and phantom reads are'n allowed. + * Concurrent and serial execution of transactions with this isolation level + * should have the same effect. + */ + SERIALIZABLE(Connection.TRANSACTION_SERIALIZABLE, Constants.LOCK_MODE_TABLE); + + /** + * Returns the isolation level from LOCK_MODE equivalent for PageStore and + * old versions of H2. + * + * @param level + * the LOCK_MODE value + * @return the isolation level + */ + public static IsolationLevel fromJdbc(int level) { + switch (level) { + case Connection.TRANSACTION_READ_UNCOMMITTED: + return IsolationLevel.READ_UNCOMMITTED; + case Connection.TRANSACTION_READ_COMMITTED: + return IsolationLevel.READ_COMMITTED; + case Connection.TRANSACTION_REPEATABLE_READ: + return IsolationLevel.REPEATABLE_READ; + case Constants.TRANSACTION_SNAPSHOT: + return IsolationLevel.SNAPSHOT; + case Connection.TRANSACTION_SERIALIZABLE: + return IsolationLevel.SERIALIZABLE; + default: + throw DbException.getInvalidValueException("isolation level", level); + } + } + + /** + * Returns the isolation level from LOCK_MODE equivalent for PageStore and + * old versions of H2. + * + * @param lockMode + * the LOCK_MODE value + * @return the isolation level + */ + public static IsolationLevel fromLockMode(int lockMode) { + switch (lockMode) { + case Constants.LOCK_MODE_OFF: + return IsolationLevel.READ_UNCOMMITTED; + case Constants.LOCK_MODE_READ_COMMITTED: + default: + return IsolationLevel.READ_COMMITTED; + case Constants.LOCK_MODE_TABLE: + case Constants.LOCK_MODE_TABLE_GC: + return IsolationLevel.SERIALIZABLE; + } + } + + /** + * Returns the isolation level from its SQL name. + * + * @param sql + * the SQL name + * @return the isolation level from its SQL name + */ + public static IsolationLevel fromSql(String sql) { + switch (sql) { + case "READ UNCOMMITTED": + return READ_UNCOMMITTED; + case "READ COMMITTED": + return READ_COMMITTED; + case "REPEATABLE READ": + return REPEATABLE_READ; + case "SNAPSHOT": + return SNAPSHOT; + case "SERIALIZABLE": + return SERIALIZABLE; + default: + throw DbException.getInvalidValueException("isolation level", sql); + } + } + + private final String sql; + + private final int jdbc, lockMode; + + private IsolationLevel(int jdbc, int lockMode) { + sql = name().replace('_', ' ').intern(); + this.jdbc = jdbc; + this.lockMode = lockMode; + } + + /** + * Returns the SQL representation of this isolation level. + * + * @return SQL representation of this isolation level + */ + public String getSQL() { + return sql; + } + + /** + * Returns the JDBC constant for this isolation level. + * + * @return the JDBC constant for this isolation level + */ + public int getJdbc() { + return jdbc; + } + + /** + * Returns the LOCK_MODE equivalent for PageStore and old versions of H2. + * + * @return the LOCK_MODE equivalent + */ + public int getLockMode() { + return lockMode; + } + + /** + * Returns whether a non-repeatable read phenomena is allowed. + * + * @return whether a non-repeatable read phenomena is allowed + */ + public boolean allowNonRepeatableRead() { + return ordinal() < REPEATABLE_READ.ordinal(); + } + +} diff --git a/h2/src/main/org/h2/engine/MetaRecord.java b/h2/src/main/org/h2/engine/MetaRecord.java index 62fc5282d7..b0016e4202 100644 --- a/h2/src/main/org/h2/engine/MetaRecord.java +++ b/h2/src/main/org/h2/engine/MetaRecord.java @@ -1,18 +1,20 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; import java.sql.SQLException; +import java.util.Comparator; import org.h2.api.DatabaseEventListener; +import org.h2.command.CommandInterface; import org.h2.command.Prepared; import org.h2.message.DbException; import org.h2.message.Trace; import org.h2.result.SearchRow; -import org.h2.value.ValueInt; -import org.h2.value.ValueString; +import org.h2.value.ValueInteger; +import org.h2.value.ValueVarchar; /** * A record in the system table of the database. @@ -20,6 +22,22 @@ */ public class MetaRecord implements Comparable { + /** + * Comparator for prepared constraints, sorts unique and primary key + * constraints first. + */ + static final Comparator CONSTRAINTS_COMPARATOR = (o1, o2) -> { + int t1 = o1.getType(), t2 = o2.getType(); + boolean u1 = t1 == CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY + || t1 == CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE; + boolean u2 = t2 == CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY + || t2 == CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE; + if (u1 == u2) { + return o1.getPersistedObjectId() - o2.getPersistedObjectId(); + } + return u1 ? -1 : 1; + }; + private final int id; private final int objectType; private final String sql; @@ -33,10 +51,10 @@ public class MetaRecord implements Comparable { * search row */ public static void populateRowFromDBObject(DbObject obj, SearchRow r) { - r.setValue(0, ValueInt.get(obj.getId())); - r.setValue(1, ValueInt.get(0)); - r.setValue(2, ValueInt.get(obj.getType())); - r.setValue(3, ValueString.get(obj.getCreateSQL())); + r.setValue(0, ValueInteger.get(obj.getId())); + r.setValue(1, ValueInteger.get(0)); + r.setValue(2, ValueInteger.get(obj.getType())); + r.setValue(3, ValueVarchar.get(obj.getCreateSQLForMeta())); } public MetaRecord(SearchRow r) { @@ -52,22 +70,60 @@ public MetaRecord(SearchRow r) { * @param systemSession the system session * @param listener the database event listener */ - void execute(Database db, Session systemSession, - DatabaseEventListener listener) { + void prepareAndExecute(Database db, SessionLocal systemSession, DatabaseEventListener listener) { + try { + Prepared command = systemSession.prepare(sql); + command.setPersistedObjectId(id); + command.update(); + } catch (DbException e) { + throwException(db, listener, e, sql); + } + } + + /** + * Prepares the meta data statement. + * + * @param db the database + * @param systemSession the system session + * @param listener the database event listener + * @return the prepared command + */ + Prepared prepare(Database db, SessionLocal systemSession, DatabaseEventListener listener) { try { Prepared command = systemSession.prepare(sql); command.setPersistedObjectId(id); + return command; + } catch (DbException e) { + throwException(db, listener, e, sql); + return null; + } + } + + /** + * Execute the meta data statement. + * + * @param db the database + * @param command the prepared command + * @param listener the database event listener + * @param sql SQL + */ + static void execute(Database db, Prepared command, DatabaseEventListener listener, String sql) { + try { command.update(); } catch (DbException e) { - e = e.addSQL(sql); - SQLException s = e.getSQLException(); - db.getTrace(Trace.DATABASE).error(s, sql); - if (listener != null) { - listener.exceptionThrown(s, sql); - // continue startup in this case - } else { - throw e; - } + throwException(db, listener, e, sql); + } + } + + private static void throwException(Database db, DatabaseEventListener listener, DbException e, String sql) { + e = e.addSQL(sql); + SQLException s = e.getSQLException(); + db.getTrace(Trace.DATABASE).error(s, sql); + if (listener != null) { + listener.exceptionThrown(s, sql); + // continue startup in this case + } else { + throw e; } } @@ -140,14 +196,13 @@ private int getCreateOrder() { case DbObject.COMMENT: return 15; default: - throw DbException.throwInternalError("type="+objectType); + throw DbException.getInternalError("type=" + objectType); } } @Override public String toString() { - return "MetaRecord [id=" + id + ", objectType=" + objectType + - ", sql=" + sql + "]"; + return "MetaRecord [id=" + id + ", objectType=" + objectType + ", sql=" + sql + ']'; } } diff --git a/h2/src/main/org/h2/engine/Mode.java b/h2/src/main/org/h2/engine/Mode.java index a388f1c28f..26f875b976 100644 --- a/h2/src/main/org/h2/engine/Mode.java +++ b/h2/src/main/org/h2/engine/Mode.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; @@ -10,6 +10,7 @@ import java.util.HashMap; import java.util.Set; import java.util.regex.Pattern; + import org.h2.util.StringUtils; import org.h2.value.DataType; import org.h2.value.Value; @@ -21,7 +22,7 @@ public class Mode { public enum ModeEnum { - REGULAR, DB2, Derby, MSSQLServer, HSQLDB, MySQL, Oracle, PostgreSQL, Ignite, + REGULAR, STRICT, LEGACY, DB2, Derby, MariaDB, MSSQLServer, HSQLDB, MySQL, Oracle, PostgreSQL } /** @@ -48,6 +49,84 @@ public enum UniqueIndexNullsHandling { FORBID_ANY_DUPLICATES } + /** + * Generation of column names for expressions. + */ + public enum ExpressionNames { + /** + * Use optimized SQL representation of expression. + */ + OPTIMIZED_SQL, + + /** + * Use original SQL representation of expression. + */ + ORIGINAL_SQL, + + /** + * Generate empty name. + */ + EMPTY, + + /** + * Use ordinal number of a column. + */ + NUMBER, + + /** + * Use ordinal number of a column with C prefix. + */ + C_NUMBER, + + /** + * Use function name for functions and ?column? for other expressions + */ + POSTGRESQL_STYLE, + } + + /** + * Generation of column names for expressions to be used in a view. + */ + public enum ViewExpressionNames { + /** + * Use both specified and generated names as is. + */ + AS_IS, + + /** + * Throw exception for unspecified names. + */ + EXCEPTION, + + /** + * Use both specified and generated names as is, but replace too long + * generated names with {@code Name_exp_###}. + */ + MYSQL_STYLE, + } + + /** + * When CHAR values are right-padded with spaces. + */ + public enum CharPadding { + /** + * CHAR values are always right-padded with spaces. + */ + ALWAYS, + + /** + * Spaces are trimmed from the right side of CHAR values, but CHAR + * values in result sets are right-padded with spaces to the declared + * length + */ + IN_RESULT_SETS, + + /** + * Spaces are trimmed from the right side of CHAR values. + */ + NEVER + } + private static final HashMap MODES = new HashMap<>(); // Modes are also documented in the features section @@ -60,14 +139,6 @@ public enum UniqueIndexNullsHandling { */ public boolean aliasColumnName; - /** - * When inserting data, if a column is defined to be NOT NULL and NULL is - * inserted, then a 0 (or empty string, or the current timestamp for - * timestamp columns) value is used. Usually, this operation is not allowed - * and an exception is thrown. - */ - public boolean convertInsertNullToZero; - /** * When converting the scale of decimal data, the number is only converted * if the new scale is smaller than the current scale. Usually, the scale is @@ -83,25 +154,13 @@ public enum UniqueIndexNullsHandling { */ public boolean indexDefinitionInCreateTable; - /** - * Meta data calls return identifiers in lower case. - */ - public boolean lowerCaseIdentifiers; - - /** - * Concatenation with NULL results in NULL. Usually, NULL is treated as an - * empty string if only one of the operands is NULL, and NULL is only - * returned if both operands are NULL. - */ - public boolean nullConcatIsNull; - /** * Identifiers may be quoted using square brackets as in [Test]. */ public boolean squareBracketQuotedNames; /** - * The system columns 'CTID' and 'OID' are supported. + * The system columns 'ctid' and 'oid' are supported. */ public boolean systemColumns; @@ -127,19 +186,19 @@ public enum UniqueIndexNullsHandling { public boolean allowPlusForStringConcat; /** - * The function LOG() uses base 10 instead of E. + * The single-argument function LOG() uses base 10 instead of E. */ public boolean logIsLogBase10; /** - * The function REGEXP_REPLACE() uses \ for back-references. + * Swap the parameters of LOG() function. */ - public boolean regexpReplaceBackslashReferences; + public boolean swapLogFunctionParameters; /** - * SERIAL and BIGSERIAL columns are not automatically primary keys. + * The function REGEXP_REPLACE() uses \ for back-references. */ - public boolean serialColumnIsNotPK; + public boolean regexpReplaceBackslashReferences; /** * Swap the parameters of the CONVERT function. @@ -152,10 +211,20 @@ public enum UniqueIndexNullsHandling { public boolean isolationLevelInSelectOrInsertStatement; /** - * MySQL style INSERT ... ON DUPLICATE KEY UPDATE ... and INSERT IGNORE + * MySQL style INSERT ... ON DUPLICATE KEY UPDATE ... and INSERT IGNORE. */ public boolean onDuplicateKeyUpdate; + /** + * MySQL style REPLACE INTO. + */ + public boolean replaceInto; + + /** + * PostgreSQL style INSERT ... ON CONFLICT DO NOTHING. + */ + public boolean insertOnConflict; + /** * Pattern describing the keys the java.sql.Connection.setClientInfo() * method accepts. @@ -168,19 +237,14 @@ public enum UniqueIndexNullsHandling { public boolean supportPoundSymbolForColumnNames; /** - * Whether an empty list as in "NAME IN()" results in a syntax error. + * Whether IN predicate may have an empty value list. */ - public boolean prohibitEmptyInPredicate; + public boolean allowEmptyInPredicate; /** - * Whether AFFINITY KEY keywords are supported. + * How to pad or trim CHAR values. */ - public boolean allowAffinityKey; - - /** - * Whether to right-pad fixed strings with spaces. - */ - public boolean padFixedLengthStrings; + public CharPadding charPadding = CharPadding.ALWAYS; /** * Whether DB2 TIMESTAMP formats are allowed. @@ -193,21 +257,171 @@ public enum UniqueIndexNullsHandling { public boolean discardWithTableHints; /** - * Use "IDENTITY" as an alias for "auto_increment" (SQLServer style) + * If {@code true}, datetime value function return the same value within a + * transaction, if {@code false} datetime value functions return the same + * value within a command. */ - public boolean useIdentityAsAutoIncrement; + public boolean dateTimeValueWithinTransaction; /** - * Convert (VAR)CHAR to VAR(BINARY) and vice versa with UTF-8 encoding instead of HEX. + * If {@code true} {@code 0x}-prefixed numbers are parsed as binary string + * literals, if {@code false} they are parsed as hexadecimal numeric values. */ - public boolean charToBinaryInUtf8; + public boolean zeroExLiteralsAreBinaryStrings; /** - * If {@code true}, datetime value function return the same value within a - * transaction, if {@code false} datetime value functions return the same - * value within a command. + * If {@code true} unrelated ORDER BY expression are allowed in DISTINCT + * queries, if {@code false} they are disallowed. */ - public boolean dateTimeValueWithinTransaction; + public boolean allowUnrelatedOrderByExpressionsInDistinctQueries; + + /** + * If {@code true} some additional non-standard ALTER TABLE commands are allowed. + */ + public boolean alterTableExtensionsMySQL; + + /** + * If {@code true} non-standard ALTER TABLE MODIFY COLUMN is allowed. + */ + public boolean alterTableModifyColumn; + + /** + * If {@code true} TRUNCATE TABLE uses RESTART IDENTITY by default. + */ + public boolean truncateTableRestartIdentity; + + /** + * If {@code true} NEXT VALUE FOR SEQUENCE, CURRENT VALUE FOR SEQUENCE, + * SEQUENCE.NEXTVAL, and SEQUENCE.CURRVAL return values with DECIMAL/NUMERIC + * data type instead of BIGINT. + */ + public boolean decimalSequences; + + /** + * If {@code true} constructs like 'CREATE TABLE CATALOG..TABLE_NAME' are allowed, + * the default schema is used. + */ + public boolean allowEmptySchemaValuesAsDefaultSchema; + + /** + * If {@code true} all numeric data types may have precision and 'UNSIGNED' + * clause. + */ + public boolean allNumericTypesHavePrecision; + + /** + * If {@code true} 'FOR BIT DATA' clauses are allowed for character string + * data types. + */ + public boolean forBitData; + + /** + * If {@code true} 'CHAR' and 'BYTE' length units are allowed. + */ + public boolean charAndByteLengthUnits; + + /** + * If {@code true}, sequence.NEXTVAL and sequence.CURRVAL pseudo columns are + * supported. + */ + public boolean nextvalAndCurrvalPseudoColumns; + + /** + * If {@code true}, the next value expression returns different values when + * invoked multiple times within a row. This setting does not affect + * NEXTVAL() function. + */ + public boolean nextValueReturnsDifferentValues; + + /** + * If {@code true}, sequences of generated by default identity columns are + * updated when value is provided by user. + */ + public boolean updateSequenceOnManualIdentityInsertion; + + /** + * If {@code true}, last identity of the session is updated on insertion of + * a new value into identity column. + */ + public boolean takeInsertedIdentity; + + /** + * If {@code true}, last identity of the session is updated on generation of + * a new sequence value. + */ + public boolean takeGeneratedSequenceValue; + + /** + * If {@code true}, identity columns have DEFAULT ON NULL clause. + */ + public boolean identityColumnsHaveDefaultOnNull; + + /** + * If {@code true}, merge when matched clause may have WHERE clause. + */ + public boolean mergeWhere; + + /** + * If {@code true}, allow using from clause in update statement. + */ + public boolean allowUsingFromClauseInUpdateStatement; + + /** + * If {@code true}, referential constraints will create a unique constraint + * on referenced columns if it doesn't exist instead of throwing an + * exception. + */ + public boolean createUniqueConstraintForReferencedColumns; + + /** + * How column names are generated for expressions. + */ + public ExpressionNames expressionNames = ExpressionNames.OPTIMIZED_SQL; + + /** + * How column names are generated for views. + */ + public ViewExpressionNames viewExpressionNames = ViewExpressionNames.AS_IS; + + /** + * Whether TOP clause in SELECT queries is supported. + */ + public boolean topInSelect; + + /** + * Whether TOP clause in DML commands is supported. + */ + public boolean topInDML; + + /** + * Whether LIMIT / OFFSET clauses are supported. + */ + public boolean limit; + + /** + * Whether MINUS can be used as EXCEPT. + */ + public boolean minusIsExcept; + + /** + * Whether IDENTITY pseudo data type is supported. + */ + public boolean identityDataType; + + /** + * Whether SERIAL and BIGSERIAL pseudo data types are supported. + */ + public boolean serialDataTypes; + + /** + * Whether SQL Server-style IDENTITY clause is supported. + */ + public boolean identityClause; + + /** + * Whether MySQL-style AUTO_INCREMENT clause is supported. + */ + public boolean autoIncrementClause; /** * An optional Set of hidden/disallowed column types. @@ -221,14 +435,59 @@ public enum UniqueIndexNullsHandling { */ public HashMap typeByNameMap = new HashMap<>(); + /** + * Allow to use GROUP BY n, where n is column index in the SELECT list, similar to ORDER BY + */ + public boolean groupByColumnIndex; + + /** + * Allow to compare numeric with BOOLEAN. + */ + public boolean numericWithBooleanComparison; + private final String name; private final ModeEnum modeEnum; static { Mode mode = new Mode(ModeEnum.REGULAR); - mode.nullConcatIsNull = true; + mode.allowEmptyInPredicate = true; mode.dateTimeValueWithinTransaction = true; + mode.topInSelect = true; + mode.limit = true; + mode.minusIsExcept = true; + mode.identityDataType = true; + mode.serialDataTypes = true; + mode.autoIncrementClause = true; + add(mode); + + mode = new Mode(ModeEnum.STRICT); + mode.dateTimeValueWithinTransaction = true; + add(mode); + + mode = new Mode(ModeEnum.LEGACY); + // Features of REGULAR mode + mode.allowEmptyInPredicate = true; + mode.dateTimeValueWithinTransaction = true; + mode.topInSelect = true; + mode.limit = true; + mode.minusIsExcept = true; + mode.identityDataType = true; + mode.serialDataTypes = true; + mode.autoIncrementClause = true; + // Legacy identity and sequence features + mode.identityClause = true; + mode.updateSequenceOnManualIdentityInsertion = true; + mode.takeInsertedIdentity = true; + mode.identityColumnsHaveDefaultOnNull = true; + mode.nextvalAndCurrvalPseudoColumns = true; + // Legacy DML features + mode.topInDML = true; + mode.mergeWhere = true; + // Legacy DDL features + mode.createUniqueConstraintForReferencedColumns = true; + // Legacy numeric with boolean comparison + mode.numericWithBooleanComparison = true; add(mode); mode = new Mode(ModeEnum.DB2); @@ -241,8 +500,14 @@ public enum UniqueIndexNullsHandling { mode.supportedClientInfoPropertiesRegEx = Pattern.compile("ApplicationName|ClientAccountingInformation|" + "ClientUser|ClientCorrelationToken"); - mode.prohibitEmptyInPredicate = true; mode.allowDB2TimestampFormat = true; + mode.forBitData = true; + mode.takeInsertedIdentity = true; + mode.expressionNames = ExpressionNames.NUMBER; + mode.viewExpressionNames = ViewExpressionNames.EXCEPTION; + mode.limit = true; + mode.minusIsExcept = true; + mode.numericWithBooleanComparison = true; add(mode); mode = new Mode(ModeEnum.Derby); @@ -252,19 +517,23 @@ public enum UniqueIndexNullsHandling { mode.isolationLevelInSelectOrInsertStatement = true; // Derby does not support client info properties as of version 10.12.1.1 mode.supportedClientInfoPropertiesRegEx = null; + mode.forBitData = true; + mode.takeInsertedIdentity = true; + mode.expressionNames = ExpressionNames.NUMBER; + mode.viewExpressionNames = ViewExpressionNames.EXCEPTION; add(mode); mode = new Mode(ModeEnum.HSQLDB); - mode.aliasColumnName = true; - mode.convertOnlyToSmallerScale = true; - mode.nullConcatIsNull = true; - mode.uniqueIndexNullsHandling = UniqueIndexNullsHandling.FORBID_ANY_DUPLICATES; mode.allowPlusForStringConcat = true; + mode.identityColumnsHaveDefaultOnNull = true; // HSQLDB does not support client info properties. See - // http://hsqldb.org/doc/apidocs/ - // org/hsqldb/jdbc/JDBCConnection.html# - // setClientInfo%28java.lang.String,%20java.lang.String%29 + // http://hsqldb.org/doc/apidocs/org/hsqldb/jdbc/JDBCConnection.html#setClientInfo-java.lang.String-java.lang.String- mode.supportedClientInfoPropertiesRegEx = null; + mode.expressionNames = ExpressionNames.C_NUMBER; + mode.topInSelect = true; + mode.limit = true; + mode.minusIsExcept = true; + mode.numericWithBooleanComparison = true; add(mode); mode = new Mode(ModeEnum.MSSQLServer); @@ -272,40 +541,89 @@ public enum UniqueIndexNullsHandling { mode.squareBracketQuotedNames = true; mode.uniqueIndexNullsHandling = UniqueIndexNullsHandling.FORBID_ANY_DUPLICATES; mode.allowPlusForStringConcat = true; + mode.swapLogFunctionParameters = true; mode.swapConvertFunctionParameters = true; mode.supportPoundSymbolForColumnNames = true; mode.discardWithTableHints = true; - mode.useIdentityAsAutoIncrement = true; // MS SQL Server does not support client info properties. See // https://msdn.microsoft.com/en-Us/library/dd571296%28v=sql.110%29.aspx mode.supportedClientInfoPropertiesRegEx = null; - DataType dt = DataType.createNumeric(19, 4, false); - dt.type = Value.DECIMAL; + mode.zeroExLiteralsAreBinaryStrings = true; + mode.truncateTableRestartIdentity = true; + mode.takeInsertedIdentity = true; + DataType dt = DataType.createNumeric(19, 4); + dt.type = Value.NUMERIC; dt.sqlType = Types.NUMERIC; - dt.name = "MONEY"; + dt.specialPrecisionScale = true; mode.typeByNameMap.put("MONEY", dt); - dt = DataType.createNumeric(10, 4, false); - dt.type = Value.DECIMAL; + dt = DataType.createNumeric(10, 4); + dt.type = Value.NUMERIC; dt.sqlType = Types.NUMERIC; - dt.name = "SMALLMONEY"; + dt.specialPrecisionScale = true; mode.typeByNameMap.put("SMALLMONEY", dt); + mode.typeByNameMap.put("UNIQUEIDENTIFIER", DataType.getDataType(Value.UUID)); + mode.allowEmptySchemaValuesAsDefaultSchema = true; + mode.expressionNames = ExpressionNames.EMPTY; + mode.viewExpressionNames = ViewExpressionNames.EXCEPTION; + mode.topInSelect = true; + mode.topInDML = true; + mode.identityClause = true; + mode.numericWithBooleanComparison = true; + add(mode); + + mode = new Mode(ModeEnum.MariaDB); + mode.indexDefinitionInCreateTable = true; + mode.regexpReplaceBackslashReferences = true; + mode.onDuplicateKeyUpdate = true; + mode.replaceInto = true; + mode.charPadding = CharPadding.NEVER; + mode.supportedClientInfoPropertiesRegEx = Pattern.compile(".*"); + mode.zeroExLiteralsAreBinaryStrings = true; + mode.allowUnrelatedOrderByExpressionsInDistinctQueries = true; + mode.alterTableExtensionsMySQL = true; + mode.alterTableModifyColumn = true; + mode.truncateTableRestartIdentity = true; + mode.allNumericTypesHavePrecision = true; + mode.nextValueReturnsDifferentValues = true; + mode.updateSequenceOnManualIdentityInsertion = true; + mode.takeInsertedIdentity = true; + mode.identityColumnsHaveDefaultOnNull = true; + mode.expressionNames = ExpressionNames.ORIGINAL_SQL; + mode.viewExpressionNames = ViewExpressionNames.MYSQL_STYLE; + mode.limit = true; + mode.autoIncrementClause = true; + mode.typeByNameMap.put("YEAR", DataType.getDataType(Value.SMALLINT)); + mode.groupByColumnIndex = true; + mode.numericWithBooleanComparison = true; add(mode); mode = new Mode(ModeEnum.MySQL); - mode.convertInsertNullToZero = true; mode.indexDefinitionInCreateTable = true; - mode.lowerCaseIdentifiers = true; - // Next one is for MariaDB mode.regexpReplaceBackslashReferences = true; mode.onDuplicateKeyUpdate = true; + mode.replaceInto = true; + mode.charPadding = CharPadding.NEVER; // MySQL allows to use any key for client info entries. See - // http://grepcode.com/file/repo1.maven.org/maven2/mysql/ - // mysql-connector-java/5.1.24/com/mysql/jdbc/ - // JDBC4CommentClientInfoProvider.java + // https://github.com/mysql/mysql-connector-j/blob/5.1.47/src/com/mysql/jdbc/JDBC4CommentClientInfoProvider.java mode.supportedClientInfoPropertiesRegEx = Pattern.compile(".*"); - mode.prohibitEmptyInPredicate = true; - mode.charToBinaryInUtf8 = true; + mode.zeroExLiteralsAreBinaryStrings = true; + mode.allowUnrelatedOrderByExpressionsInDistinctQueries = true; + mode.alterTableExtensionsMySQL = true; + mode.alterTableModifyColumn = true; + mode.truncateTableRestartIdentity = true; + mode.allNumericTypesHavePrecision = true; + mode.updateSequenceOnManualIdentityInsertion = true; + mode.takeInsertedIdentity = true; + mode.identityColumnsHaveDefaultOnNull = true; + mode.createUniqueConstraintForReferencedColumns = true; + mode.expressionNames = ExpressionNames.ORIGINAL_SQL; + mode.viewExpressionNames = ViewExpressionNames.MYSQL_STYLE; + mode.limit = true; + mode.autoIncrementClause = true; + mode.typeByNameMap.put("YEAR", DataType.getDataType(Value.SMALLINT)); + mode.groupByColumnIndex = true; + mode.numericWithBooleanComparison = true; add(mode); mode = new Mode(ModeEnum.Oracle); @@ -319,48 +637,59 @@ public enum UniqueIndexNullsHandling { // https://docs.oracle.com/database/121/JJDBC/jdbcvers.htm#JJDBC29006 mode.supportedClientInfoPropertiesRegEx = Pattern.compile(".*\\..*"); - mode.prohibitEmptyInPredicate = true; + mode.alterTableModifyColumn = true; + mode.decimalSequences = true; + mode.charAndByteLengthUnits = true; + mode.nextvalAndCurrvalPseudoColumns = true; + mode.mergeWhere = true; + mode.minusIsExcept = true; + mode.expressionNames = ExpressionNames.ORIGINAL_SQL; + mode.viewExpressionNames = ViewExpressionNames.EXCEPTION; + mode.typeByNameMap.put("BINARY_FLOAT", DataType.getDataType(Value.REAL)); + mode.typeByNameMap.put("BINARY_DOUBLE", DataType.getDataType(Value.DOUBLE)); dt = DataType.createDate(/* 2001-01-01 23:59:59 */ 19, 19, "DATE", false, 0, 0); dt.type = Value.TIMESTAMP; dt.sqlType = Types.TIMESTAMP; - dt.name = "DATE"; + dt.specialPrecisionScale = true; mode.typeByNameMap.put("DATE", dt); add(mode); mode = new Mode(ModeEnum.PostgreSQL); mode.aliasColumnName = true; - mode.nullConcatIsNull = true; mode.systemColumns = true; mode.logIsLogBase10 = true; mode.regexpReplaceBackslashReferences = true; - mode.serialColumnIsNotPK = true; + mode.insertOnConflict = true; // PostgreSQL only supports the ApplicationName property. See // https://github.com/hhru/postgres-jdbc/blob/master/postgresql-jdbc-9.2-1002.src/ // org/postgresql/jdbc4/AbstractJdbc4Connection.java mode.supportedClientInfoPropertiesRegEx = Pattern.compile("ApplicationName"); - mode.prohibitEmptyInPredicate = true; - mode.padFixedLengthStrings = true; + mode.charPadding = CharPadding.IN_RESULT_SETS; + mode.nextValueReturnsDifferentValues = true; + mode.takeGeneratedSequenceValue = true; + mode.expressionNames = ExpressionNames.POSTGRESQL_STYLE; + mode.allowUsingFromClauseInUpdateStatement = true; + mode.limit = true; + mode.serialDataTypes = true; // Enumerate all H2 types NOT supported by PostgreSQL: Set disallowedTypes = new java.util.HashSet<>(); disallowedTypes.add("NUMBER"); - disallowedTypes.add("IDENTITY"); disallowedTypes.add("TINYINT"); disallowedTypes.add("BLOB"); + disallowedTypes.add("VARCHAR_IGNORECASE"); mode.disallowedTypes = disallowedTypes; - dt = DataType.createNumeric(19, 2, false); - dt.type = Value.DECIMAL; + dt = DataType.getDataType(Value.JSON); + mode.typeByNameMap.put("JSONB", dt); + dt = DataType.createNumeric(19, 2); + dt.type = Value.NUMERIC; dt.sqlType = Types.NUMERIC; - dt.name = "MONEY"; + dt.specialPrecisionScale = true; mode.typeByNameMap.put("MONEY", dt); + dt = DataType.getDataType(Value.INTEGER); + mode.typeByNameMap.put("OID", dt); mode.dateTimeValueWithinTransaction = true; - add(mode); - - mode = new Mode(ModeEnum.Ignite); - mode.nullConcatIsNull = true; - mode.allowAffinityKey = true; - mode.indexDefinitionInCreateTable = true; - mode.dateTimeValueWithinTransaction = true; + mode.groupByColumnIndex = true; add(mode); } diff --git a/h2/src/main/org/h2/engine/OnExitDatabaseCloser.java b/h2/src/main/org/h2/engine/OnExitDatabaseCloser.java index bec37316d4..d8022ac6e2 100644 --- a/h2/src/main/org/h2/engine/OnExitDatabaseCloser.java +++ b/h2/src/main/org/h2/engine/OnExitDatabaseCloser.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; diff --git a/h2/src/main/org/h2/engine/Procedure.java b/h2/src/main/org/h2/engine/Procedure.java index 86b95a756d..899309b6f6 100644 --- a/h2/src/main/org/h2/engine/Procedure.java +++ b/h2/src/main/org/h2/engine/Procedure.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; diff --git a/h2/src/main/org/h2/engine/QueryStatisticsData.java b/h2/src/main/org/h2/engine/QueryStatisticsData.java index 360e5a9aed..9d805e8a5f 100644 --- a/h2/src/main/org/h2/engine/QueryStatisticsData.java +++ b/h2/src/main/org/h2/engine/QueryStatisticsData.java @@ -1,12 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; import java.util.ArrayList; -import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; @@ -20,15 +19,9 @@ public class QueryStatisticsData { private static final Comparator QUERY_ENTRY_COMPARATOR = - new Comparator() { - @Override - public int compare(QueryEntry o1, QueryEntry o2) { - return Long.signum(o1.lastUpdateTime - o2.lastUpdateTime); - } - }; + Comparator.comparingLong(q -> q.lastUpdateTime); - private final HashMap map = - new HashMap<>(); + private final HashMap map = new HashMap<>(); private int maxQueryEntries; @@ -45,7 +38,7 @@ public synchronized List getQueries() { // worry about external synchronization ArrayList list = new ArrayList<>(map.values()); // only return the newest 100 entries - Collections.sort(list, QUERY_ENTRY_COMPARATOR); + list.sort(QUERY_ENTRY_COMPARATOR); return list.subList(0, Math.min(list.size(), maxQueryEntries)); } @@ -57,8 +50,7 @@ public synchronized List getQueries() { * to execute * @param rowCount the query or update row count */ - public synchronized void update(String sqlStatement, long executionTimeNanos, - int rowCount) { + public synchronized void update(String sqlStatement, long executionTimeNanos, long rowCount) { QueryEntry entry = map.get(sqlStatement); if (entry == null) { entry = new QueryEntry(sqlStatement); @@ -71,7 +63,7 @@ public synchronized void update(String sqlStatement, long executionTimeNanos, if (map.size() > maxQueryEntries * 1.5f) { // Sort the entries by age ArrayList list = new ArrayList<>(map.values()); - Collections.sort(list, QUERY_ENTRY_COMPARATOR); + list.sort(QUERY_ENTRY_COMPARATOR); // Create a set of the oldest 1/3 of the entries HashSet oldestSet = new HashSet<>(list.subList(0, list.size() / 3)); @@ -126,12 +118,12 @@ public static final class QueryEntry { /** * The minimum number of rows. */ - public int rowCountMin; + public long rowCountMin; /** * The maximum number of rows. */ - public int rowCountMax; + public long rowCountMax; /** * The total number of rows. @@ -149,8 +141,8 @@ public static final class QueryEntry { public double rowCountMean; // Using Welford's method, see also - // http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance - // http://www.johndcook.com/standard_deviation.html + // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance + // https://www.johndcook.com/blog/standard_deviation/ private double executionTimeM2Nanos; private double rowCountM2; @@ -165,7 +157,7 @@ public QueryEntry(String sql) { * @param timeNanos the execution time in nanos * @param rows the number of rows */ - void update(long timeNanos, int rows) { + void update(long timeNanos, long rows) { count++; executionTimeMinNanos = Math.min(timeNanos, executionTimeMinNanos); executionTimeMaxNanos = Math.max(timeNanos, executionTimeMaxNanos); diff --git a/h2/src/main/org/h2/engine/Right.java b/h2/src/main/org/h2/engine/Right.java index 7f5488b716..3f171b7559 100644 --- a/h2/src/main/org/h2/engine/Right.java +++ b/h2/src/main/org/h2/engine/Right.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; @@ -14,7 +14,7 @@ * An access right. Rights are regular database objects, but have generated * names. */ -public class Right extends DbObjectBase { +public final class Right extends DbObject { /** * The right bit mask that means: selecting from a table is allowed. @@ -41,6 +41,12 @@ public class Right extends DbObjectBase { */ public static final int ALTER_ANY_SCHEMA = 16; + /** + * The right bit mask that means: user is a schema owner. This mask isn't + * used in GRANT / REVOKE statements. + */ + public static final int SCHEMA_OWNER = 32; + /** * The right bit mask that means: select, insert, update, delete, and update * for this object is allowed. @@ -73,16 +79,14 @@ public Right(Database db, int id, RightOwner grantee, Role grantedRole) { this.grantedRole = grantedRole; } - public Right(Database db, int id, RightOwner grantee, int grantedRight, - DbObject grantedObject) { + public Right(Database db, int id, RightOwner grantee, int grantedRight, DbObject grantedObject) { super(db, id, Integer.toString(id), Trace.USER); this.grantee = grantee; this.grantedRight = grantedRight; this.grantedObject = grantedObject; } - private static boolean appendRight(StringBuilder buff, int right, int mask, - String name, boolean comma) { + private static boolean appendRight(StringBuilder buff, int right, int mask, String name, boolean comma) { if ((right & mask) != 0) { if (comma) { buff.append(", "); @@ -102,9 +106,8 @@ public String getRights() { comma = appendRight(buff, grantedRight, SELECT, "SELECT", comma); comma = appendRight(buff, grantedRight, DELETE, "DELETE", comma); comma = appendRight(buff, grantedRight, INSERT, "INSERT", comma); - comma = appendRight(buff, grantedRight, ALTER_ANY_SCHEMA, - "ALTER ANY SCHEMA", comma); - appendRight(buff, grantedRight, UPDATE, "UPDATE", comma); + comma = appendRight(buff, grantedRight, UPDATE, "UPDATE", comma); + appendRight(buff, grantedRight, ALTER_ANY_SCHEMA, "ALTER ANY SCHEMA", comma); } return buff.toString(); } @@ -121,36 +124,31 @@ public DbObject getGrantee() { return grantee; } - @Override - public String getDropSQL() { - return null; - } - @Override public String getCreateSQLForCopy(Table table, String quotedName) { return getCreateSQLForCopy(table); } private String getCreateSQLForCopy(DbObject object) { - StringBuilder buff = new StringBuilder(); - buff.append("GRANT "); + StringBuilder builder = new StringBuilder(); + builder.append("GRANT "); if (grantedRole != null) { - grantedRole.getSQL(buff, true); + grantedRole.getSQL(builder, DEFAULT_SQL_FLAGS); } else { - buff.append(getRights()); + builder.append(getRights()); if (object != null) { if (object instanceof Schema) { - buff.append(" ON SCHEMA "); - object.getSQL(buff, true); + builder.append(" ON SCHEMA "); + object.getSQL(builder, DEFAULT_SQL_FLAGS); } else if (object instanceof Table) { - buff.append(" ON "); - object.getSQL(buff, true); + builder.append(" ON "); + object.getSQL(builder, DEFAULT_SQL_FLAGS); } } } - buff.append(" TO "); - grantee.getSQL(buff, true); - return buff.toString(); + builder.append(" TO "); + grantee.getSQL(builder, DEFAULT_SQL_FLAGS); + return builder.toString(); } @Override @@ -164,7 +162,7 @@ public int getType() { } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { if (grantedRole != null) { grantee.revokeRole(grantedRole); } else { @@ -179,7 +177,7 @@ public void removeChildrenAndResources(Session session) { @Override public void checkRename() { - DbException.throwInternalError(); + throw DbException.getInternalError(); } public void setRightMask(int rightMask) { diff --git a/h2/src/main/org/h2/engine/RightOwner.java b/h2/src/main/org/h2/engine/RightOwner.java index 0aedba5f38..bcd5e0ebfc 100644 --- a/h2/src/main/org/h2/engine/RightOwner.java +++ b/h2/src/main/org/h2/engine/RightOwner.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; @@ -10,13 +10,16 @@ import java.util.List; import java.util.Map.Entry; +import org.h2.api.ErrorCode; +import org.h2.message.DbException; +import org.h2.schema.Schema; import org.h2.table.Table; import org.h2.util.StringUtils; /** * A right owner (sometimes called principal). */ -public abstract class RightOwner extends DbObjectBase { +public abstract class RightOwner extends DbObject { /** * The map of granted roles. @@ -61,36 +64,69 @@ public boolean isRoleGranted(Role grantedRole) { } /** - * Check if a right is already granted to this object or to objects that - * were granted to this object. The rights for schemas takes - * precedence over rights of tables, in other words, the rights of schemas - * will be valid for every each table in the related schema. + * Checks if a right is already granted to this object or to objects that + * were granted to this object. The rights of schemas will be valid for + * every each table in the related schema. The ALTER ANY SCHEMA right gives + * all rights to all tables. * - * @param table the table to check - * @param rightMask the right mask to check + * @param table + * the table to check + * @param rightMask + * the right mask to check * @return true if the right was already granted */ - boolean isRightGrantedRecursive(Table table, int rightMask) { - Right right; + final boolean isTableRightGrantedRecursive(Table table, int rightMask) { + Schema schema = table.getSchema(); + if (schema.getOwner() == this) { + return true; + } if (grantedRights != null) { - if (table != null) { - right = grantedRights.get(table.getSchema()); - if (right != null) { - if ((right.getRightMask() & rightMask) == rightMask) { - return true; - } - } + Right right = grantedRights.get(null); + if (right != null && (right.getRightMask() & Right.ALTER_ANY_SCHEMA) == Right.ALTER_ANY_SCHEMA) { + return true; + } + right = grantedRights.get(schema); + if (right != null && (right.getRightMask() & rightMask) == rightMask) { + return true; } right = grantedRights.get(table); - if (right != null) { - if ((right.getRightMask() & rightMask) == rightMask) { + if (right != null && (right.getRightMask() & rightMask) == rightMask) { + return true; + } + } + if (grantedRoles != null) { + for (Role role : grantedRoles.keySet()) { + if (role.isTableRightGrantedRecursive(table, rightMask)) { return true; } } } + return false; + } + + /** + * Checks if a schema owner right is already granted to this object or to + * objects that were granted to this object. The ALTER ANY SCHEMA right + * gives rights to all schemas. + * + * @param schema + * the schema to check, or {@code null} to check for ALTER ANY + * SCHEMA right only + * @return true if the right was already granted + */ + final boolean isSchemaRightGrantedRecursive(Schema schema) { + if (schema != null && schema.getOwner() == this) { + return true; + } + if (grantedRights != null) { + Right right = grantedRights.get(null); + if (right != null && (right.getRightMask() & Right.ALTER_ANY_SCHEMA) == Right.ALTER_ANY_SCHEMA) { + return true; + } + } if (grantedRoles != null) { - for (RightOwner role : grantedRoles.keySet()) { - if (role.isRightGrantedRecursive(table, rightMask)) { + for (Role role : grantedRoles.keySet()) { + if (role.isSchemaRightGrantedRecursive(schema)) { return true; } } @@ -205,4 +241,19 @@ public Right getRightForRole(Role role) { return grantedRoles.get(role); } + /** + * Check that this right owner does not own any schema. An exception is + * thrown if it owns one or more schemas. + * + * @throws DbException + * if this right owner owns a schema + */ + public final void checkOwnsNoSchemas() { + for (Schema s : database.getAllSchemas()) { + if (this == s.getOwner()) { + throw DbException.get(ErrorCode.CANNOT_DROP_2, getName(), s.getName()); + } + } + } + } diff --git a/h2/src/main/org/h2/engine/Role.java b/h2/src/main/org/h2/engine/Role.java index 186dd519dd..7fec06ca11 100644 --- a/h2/src/main/org/h2/engine/Role.java +++ b/h2/src/main/org/h2/engine/Role.java @@ -1,18 +1,21 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; +import java.util.ArrayList; + import org.h2.message.DbException; import org.h2.message.Trace; +import org.h2.schema.Schema; import org.h2.table.Table; /** * Represents a role. Roles can be granted to users, and to other roles. */ -public class Role extends RightOwner { +public final class Role extends RightOwner { private final boolean system; @@ -23,12 +26,7 @@ public Role(Database database, int id, String roleName, boolean system) { @Override public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(toString()); - } - - @Override - public String getDropSQL() { - return null; + throw DbException.getInternalError(toString()); } /** @@ -41,12 +39,11 @@ public String getCreateSQL(boolean ifNotExists) { if (system) { return null; } - StringBuilder buff = new StringBuilder("CREATE ROLE "); + StringBuilder builder = new StringBuilder("CREATE ROLE "); if (ifNotExists) { - buff.append("IF NOT EXISTS "); + builder.append("IF NOT EXISTS "); } - getSQL(buff, true); - return buff.toString(); + return getSQL(builder, DEFAULT_SQL_FLAGS).toString(); } @Override @@ -60,15 +57,20 @@ public int getType() { } @Override - public void removeChildrenAndResources(Session session) { - for (User user : database.getAllUsers()) { - Right right = user.getRightForRole(this); - if (right != null) { - database.removeDatabaseObject(session, right); + public ArrayList getChildren() { + ArrayList children = new ArrayList<>(); + for (Schema schema : database.getAllSchemas()) { + if (schema.getOwner() == this) { + children.add(schema); } } - for (Role r2 : database.getAllRoles()) { - Right right = r2.getRightForRole(this); + return children; + } + + @Override + public void removeChildrenAndResources(SessionLocal session) { + for (RightOwner rightOwner : database.getAllUsersAndRoles()) { + Right right = rightOwner.getRightForRole(this); if (right != null) { database.removeDatabaseObject(session, right); } @@ -82,9 +84,4 @@ public void removeChildrenAndResources(Session session) { invalidate(); } - @Override - public void checkRename() { - // ok - } - } diff --git a/h2/src/main/org/h2/engine/Session.java b/h2/src/main/org/h2/engine/Session.java index 48395aee68..654458ceee 100644 --- a/h2/src/main/org/h2/engine/Session.java +++ b/h2/src/main/org/h2/engine/Session.java @@ -1,1906 +1,310 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; -import java.util.ArrayDeque; import java.util.ArrayList; -import java.util.BitSet; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.Map; -import java.util.Random; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; -import org.h2.api.ErrorCode; -import org.h2.command.Command; + import org.h2.command.CommandInterface; -import org.h2.command.Parser; -import org.h2.command.Prepared; -import org.h2.command.ddl.Analyze; -import org.h2.command.dml.Query; -import org.h2.constraint.Constraint; -import org.h2.index.Index; -import org.h2.index.ViewIndex; -import org.h2.jdbc.JdbcConnection; -import org.h2.message.DbException; +import org.h2.jdbc.meta.DatabaseMeta; import org.h2.message.Trace; -import org.h2.message.TraceSystem; -import org.h2.mvstore.MVMap; -import org.h2.mvstore.db.MVTable; -import org.h2.mvstore.db.MVTableEngine; -import org.h2.mvstore.tx.Transaction; -import org.h2.mvstore.tx.TransactionStore; -import org.h2.value.VersionedValue; import org.h2.result.ResultInterface; -import org.h2.result.Row; -import org.h2.result.SortOrder; -import org.h2.schema.Schema; import org.h2.store.DataHandler; -import org.h2.store.InDoubtTransaction; -import org.h2.store.LobStorageFrontend; -import org.h2.table.SubQueryInfo; -import org.h2.table.Table; -import org.h2.table.TableFilter; -import org.h2.table.TableType; -import org.h2.util.ColumnNamerConfiguration; -import org.h2.util.CurrentTimestamp; -import org.h2.util.SmallLRUCache; +import org.h2.util.NetworkConnectionInfo; +import org.h2.util.TimeZoneProvider; import org.h2.util.Utils; -import org.h2.value.DataType; -import org.h2.value.Value; -import org.h2.value.ValueArray; -import org.h2.value.ValueLong; -import org.h2.value.ValueNull; -import org.h2.value.ValueString; -import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueLob; /** - * A session represents an embedded database connection. When using the server - * mode, this object resides on the server side and communicates with a - * SessionRemote object on the client side. + * A local or remote session. A session represents a database connection. */ -public class Session extends SessionWithState implements TransactionStore.RollbackListener { - - public enum State { INIT, RUNNING, BLOCKED, SLEEP, CLOSED } +public abstract class Session implements CastDataProvider, AutoCloseable { /** - * This special log position means that the log entry has been written. + * Static settings. */ - public static final int LOG_WRITTEN = -1; + public static final class StaticSettings { - /** - * The prefix of generated identifiers. It may not have letters, because - * they are case sensitive. - */ - private static final String SYSTEM_IDENTIFIER_PREFIX = "_"; - private static int nextSerialId; + /** + * Whether unquoted identifiers are converted to upper case. + */ + public final boolean databaseToUpper; - private final int serialId = nextSerialId++; - private final Database database; - private ConnectionInfo connectionInfo; - private final User user; - private final int id; - private final ArrayList
          locks = Utils.newSmallArrayList(); - private UndoLog undoLog; - private boolean autoCommit = true; - private Random random; - private int lockTimeout; - private Value lastIdentity = ValueLong.get(0); - private Value lastScopeIdentity = ValueLong.get(0); - private Value lastTriggerIdentity; - private GeneratedKeys generatedKeys; - private int firstUncommittedLog = Session.LOG_WRITTEN; - private int firstUncommittedPos = Session.LOG_WRITTEN; - private HashMap savepoints; - private HashMap localTempTables; - private HashMap localTempTableIndexes; - private HashMap localTempTableConstraints; - private long throttleNs; - private long lastThrottle; - private Command currentCommand; - private boolean allowLiterals; - private String currentSchemaName; - private String[] schemaSearchPath; - private Trace trace; - private HashMap removeLobMap; - private int systemIdentifier; - private HashMap procedures; - private boolean undoLogEnabled = true; - private boolean redoLogBinary = true; - private boolean autoCommitAtTransactionEnd; - private String currentTransactionName; - private volatile long cancelAtNs; - private final long sessionStart = System.currentTimeMillis(); - private ValueTimestampTimeZone transactionStart; - private ValueTimestampTimeZone currentCommandStart; - private HashMap variables; - private HashSet temporaryResults; - private int queryTimeout; - private boolean commitOrRollbackDisabled; - private Table waitForLock; - private Thread waitForLockThread; - private int modificationId; - private int objectId; - private final int queryCacheSize; - private SmallLRUCache queryCache; - private long modificationMetaID = -1; - private SubQueryInfo subQueryInfo; - private ArrayDeque viewNameStack; - private int preparingQueryExpression; - private volatile SmallLRUCache viewIndexCache; - private HashMap subQueryIndexCache; - private boolean joinBatchEnabled; - private boolean forceJoinOrder; - private boolean lazyQueryExecution; - private ColumnNamerConfiguration columnNamerConfiguration; - /** - * Tables marked for ANALYZE after the current transaction is committed. - * Prevents us calling ANALYZE repeatedly in large transactions. - */ - private HashSet
          tablesToAnalyze; + /** + * Whether unquoted identifiers are converted to lower case. + */ + public final boolean databaseToLower; - /** - * Temporary LOBs from result sets. Those are kept for some time. The - * problem is that transactions are committed before the result is returned, - * and in some cases the next transaction is already started before the - * result is read (for example when using the server mode, when accessing - * metadata methods). We can't simply free those values up when starting the - * next transaction, because they would be removed too early. - */ - private LinkedList temporaryResultLobs; + /** + * Whether all identifiers are case insensitive. + */ + public final boolean caseInsensitiveIdentifiers; - /** - * The temporary LOBs that need to be removed on commit. - */ - private ArrayList temporaryLobs; + /** + * Creates new instance of static settings. + * + * @param databaseToUpper + * whether unquoted identifiers are converted to upper case + * @param databaseToLower + * whether unquoted identifiers are converted to lower case + * @param caseInsensitiveIdentifiers + * whether all identifiers are case insensitive + */ + public StaticSettings(boolean databaseToUpper, boolean databaseToLower, boolean caseInsensitiveIdentifiers) { + this.databaseToUpper = databaseToUpper; + this.databaseToLower = databaseToLower; + this.caseInsensitiveIdentifiers = caseInsensitiveIdentifiers; + } - private Transaction transaction; - private final AtomicReference state = new AtomicReference<>(State.INIT); - private long startStatement = -1; + } /** - * Set of database object ids to be released at the end of transaction + * Dynamic settings. */ - private BitSet idsToRelease; + public static final class DynamicSettings { - public Session(Database database, User user, int id) { - this.database = database; - this.queryTimeout = database.getSettings().maxQueryTimeout; - this.queryCacheSize = database.getSettings().queryCacheSize; - this.user = user; - this.id = id; - this.lockTimeout = database.getLockTimeout(); - // PageStore creates a system session before initialization of the main schema - Schema mainSchema = database.getMainSchema(); - this.currentSchemaName = mainSchema != null ? mainSchema.getName() - : database.sysIdentifier(Constants.SCHEMA_MAIN); - this.columnNamerConfiguration = ColumnNamerConfiguration.getDefault(); - } + /** + * The database mode. + */ + public final Mode mode; - public void setLazyQueryExecution(boolean lazyQueryExecution) { - this.lazyQueryExecution = lazyQueryExecution; - } + /** + * The current time zone. + */ + public final TimeZoneProvider timeZone; - public boolean isLazyQueryExecution() { - return lazyQueryExecution; - } + /** + * Creates new instance of dynamic settings. + * + * @param mode + * the database mode + * @param timeZone + * the current time zone + */ + public DynamicSettings(Mode mode, TimeZoneProvider timeZone) { + this.mode = mode; + this.timeZone = timeZone; + } - public void setForceJoinOrder(boolean forceJoinOrder) { - this.forceJoinOrder = forceJoinOrder; } - public boolean isForceJoinOrder() { - return forceJoinOrder; - } + private ArrayList sessionState; - public void setJoinBatchEnabled(boolean joinBatchEnabled) { - this.joinBatchEnabled = joinBatchEnabled; - } + boolean sessionStateChanged; - public boolean isJoinBatchEnabled() { - return joinBatchEnabled; - } + private boolean sessionStateUpdating; - /** - * Create a new row for a table. - * - * @param data the values - * @param memory whether the row is in memory - * @return the created row - */ - public Row createRow(Value[] data, int memory) { - return database.createRow(data, memory); - } + volatile StaticSettings staticSettings; - /** - * Add a subquery info on top of the subquery info stack. - * - * @param masks the mask - * @param filters the filters - * @param filter the filter index - * @param sortOrder the sort order - */ - public void pushSubQueryInfo(int[] masks, TableFilter[] filters, int filter, - SortOrder sortOrder) { - subQueryInfo = new SubQueryInfo(subQueryInfo, masks, filters, filter, sortOrder); + Session() { } /** - * Remove the current subquery info from the stack. + * Get the list of the cluster servers for this session. + * + * @return A list of "ip:port" strings for the cluster servers in this + * session. */ - public void popSubQueryInfo() { - subQueryInfo = subQueryInfo.getUpper(); - } - - public SubQueryInfo getSubQueryInfo() { - return subQueryInfo; - } + public abstract ArrayList getClusterServers(); /** - * Stores name of currently parsed view in a stack so it can be determined - * during {@code prepare()}. + * Parse a command and prepare it for execution. * - * @param parsingView - * {@code true} to store one more name, {@code false} to remove it - * from stack - * @param viewName - * name of the view + * @param sql the SQL statement + * @param fetchSize the number of rows to fetch in one step + * @return the prepared command */ - public void setParsingCreateView(boolean parsingView, String viewName) { - if (viewNameStack == null) { - viewNameStack = new ArrayDeque<>(3); - } - if (parsingView) { - viewNameStack.push(viewName); - } else { - String name = viewNameStack.pop(); - assert viewName.equals(name); - } - } - - public String getParsingCreateViewName() { - return viewNameStack != null ? viewNameStack.peek() : null; - } - - public boolean isParsingCreateView() { - return viewNameStack != null && !viewNameStack.isEmpty(); - } + public abstract CommandInterface prepareCommand(String sql, int fetchSize); /** - * Optimize a query. This will remember the subquery info, clear it, prepare - * the query, and reset the subquery info. - * - * @param query the query to prepare + * Roll back pending transactions and close the session. */ - public void optimizeQueryExpression(Query query) { - // we have to hide current subQueryInfo if we are going to optimize - // query expression - SubQueryInfo tmp = subQueryInfo; - subQueryInfo = null; - preparingQueryExpression++; - try { - query.prepare(); - } finally { - subQueryInfo = tmp; - preparingQueryExpression--; - } - } - - public boolean isPreparingQueryExpression() { - assert preparingQueryExpression >= 0; - return preparingQueryExpression != 0; - } - @Override - public ArrayList getClusterServers() { - return new ArrayList<>(); - } - - public boolean setCommitOrRollbackDisabled(boolean x) { - boolean old = commitOrRollbackDisabled; - commitOrRollbackDisabled = x; - return old; - } - - private void initVariables() { - if (variables == null) { - variables = database.newStringMap(); - } - } + public abstract void close(); /** - * Set the value of the given variable for this session. + * Get the trace object * - * @param name the name of the variable (may not be null) - * @param value the new value (may not be null) + * @return the trace object */ - public void setVariable(String name, Value value) { - initVariables(); - modificationId++; - Value old; - if (value == ValueNull.INSTANCE) { - old = variables.remove(name); - } else { - // link LOB values, to make sure we have our own object - value = value.copy(database, - LobStorageFrontend.TABLE_ID_SESSION_VARIABLE); - old = variables.put(name, value); - } - if (old != null) { - // remove the old value (in case it is a lob) - old.remove(); - } - } + public abstract Trace getTrace(); /** - * Get the value of the specified user defined variable. This method always - * returns a value; it returns ValueNull.INSTANCE if the variable doesn't - * exist. + * Check if close was called. * - * @param name the variable name - * @return the value, or NULL + * @return if the session has been closed */ - public Value getVariable(String name) { - initVariables(); - Value v = variables.get(name); - return v == null ? ValueNull.INSTANCE : v; - } + public abstract boolean isClosed(); /** - * Get the list of variable names that are set for this session. + * Get the data handler object. * - * @return the list of names + * @return the data handler */ - public String[] getVariableNames() { - if (variables == null) { - return new String[0]; - } - return variables.keySet().toArray(new String[variables.size()]); - } + public abstract DataHandler getDataHandler(); /** - * Get the local temporary table if one exists with that name, or null if - * not. + * Check whether this session has a pending transaction. * - * @param name the table name - * @return the table, or null + * @return true if it has */ - public Table findLocalTempTable(String name) { - if (localTempTables == null) { - return null; - } - return localTempTables.get(name); - } - - public ArrayList
          getLocalTempTables() { - if (localTempTables == null) { - return Utils.newSmallArrayList(); - } - return new ArrayList<>(localTempTables.values()); - } + public abstract boolean hasPendingTransaction(); /** - * Add a local temporary table to this session. - * - * @param table the table to add - * @throws DbException if a table with this name already exists + * Cancel the current or next command (called when closing a connection). */ - public void addLocalTempTable(Table table) { - if (localTempTables == null) { - localTempTables = database.newStringMap(); - } - if (localTempTables.get(table.getName()) != null) { - StringBuilder builder = new StringBuilder(); - table.getSQL(builder, false).append(" AS "); - Parser.quoteIdentifier(table.getName(), false); - throw DbException.get(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, builder.toString()); - } - modificationId++; - localTempTables.put(table.getName(), table); - } + public abstract void cancel(); /** - * Drop and remove the given local temporary table from this session. + * Check if this session is in auto-commit mode. * - * @param table the table + * @return true if the session is in auto-commit mode */ - public void removeLocalTempTable(Table table) { - // Exception thrown in org.h2.engine.Database.removeMeta if line below - // is missing with TestGeneralCommonTableQueries - boolean wasLocked = database.lockMeta(this); - try { - modificationId++; - if (localTempTables != null) { - localTempTables.remove(table.getName()); - } - synchronized (database) { - table.removeChildrenAndResources(this); - } - } finally { - if (!wasLocked) { - database.unlockMeta(this); - } - } - } + public abstract boolean getAutoCommit(); /** - * Get the local temporary index if one exists with that name, or null if - * not. + * Set the auto-commit mode. This call doesn't commit the current + * transaction. * - * @param name the table name - * @return the table, or null + * @param autoCommit the new value */ - public Index findLocalTempTableIndex(String name) { - if (localTempTableIndexes == null) { - return null; - } - return localTempTableIndexes.get(name); - } - - public HashMap getLocalTempTableIndexes() { - if (localTempTableIndexes == null) { - return new HashMap<>(); - } - return localTempTableIndexes; - } + public abstract void setAutoCommit(boolean autoCommit); /** - * Add a local temporary index to this session. + * Add a temporary LOB, which is closed when the session commits. * - * @param index the index to add - * @throws DbException if a index with this name already exists + * @param v the value + * @return the specified value */ - public void addLocalTempTableIndex(Index index) { - if (localTempTableIndexes == null) { - localTempTableIndexes = database.newStringMap(); - } - if (localTempTableIndexes.get(index.getName()) != null) { - throw DbException.get(ErrorCode.INDEX_ALREADY_EXISTS_1, index.getSQL(false)); - } - localTempTableIndexes.put(index.getName(), index); - } + public abstract ValueLob addTemporaryLob(ValueLob v); /** - * Drop and remove the given local temporary index from this session. + * Check if this session is remote or embedded. * - * @param index the index + * @return true if this session is remote */ - public void removeLocalTempTableIndex(Index index) { - if (localTempTableIndexes != null) { - localTempTableIndexes.remove(index.getName()); - synchronized (database) { - index.removeChildrenAndResources(this); - } - } - } + public abstract boolean isRemote(); /** - * Get the local temporary constraint if one exists with that name, or - * null if not. + * Set current schema. * - * @param name the constraint name - * @return the constraint, or null + * @param schema the schema name */ - public Constraint findLocalTempTableConstraint(String name) { - if (localTempTableConstraints == null) { - return null; - } - return localTempTableConstraints.get(name); - } + public abstract void setCurrentSchemaName(String schema); /** - * Get the map of constraints for all constraints on local, temporary - * tables, if any. The map's keys are the constraints' names. + * Get current schema. * - * @return the map of constraints, or null + * @return the current schema name */ - public HashMap getLocalTempTableConstraints() { - if (localTempTableConstraints == null) { - return new HashMap<>(); - } - return localTempTableConstraints; - } + public abstract String getCurrentSchemaName(); /** - * Add a local temporary constraint to this session. + * Sets the network connection information if possible. * - * @param constraint the constraint to add - * @throws DbException if a constraint with the same name already exists + * @param networkConnectionInfo the network connection information */ - public void addLocalTempTableConstraint(Constraint constraint) { - if (localTempTableConstraints == null) { - localTempTableConstraints = database.newStringMap(); - } - String name = constraint.getName(); - if (localTempTableConstraints.get(name) != null) { - throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, constraint.getSQL(false)); - } - localTempTableConstraints.put(name, constraint); - } + public abstract void setNetworkConnectionInfo(NetworkConnectionInfo networkConnectionInfo); /** - * Drop and remove the given local temporary constraint from this session. + * Returns the isolation level. * - * @param constraint the constraint + * @return the isolation level */ - void removeLocalTempTableConstraint(Constraint constraint) { - if (localTempTableConstraints != null) { - localTempTableConstraints.remove(constraint.getName()); - synchronized (database) { - constraint.removeChildrenAndResources(this); - } - } - } - - @Override - public boolean getAutoCommit() { - return autoCommit; - } - - public User getUser() { - return user; - } - - @Override - public void setAutoCommit(boolean b) { - autoCommit = b; - } - - public int getLockTimeout() { - return lockTimeout; - } - - public void setLockTimeout(int lockTimeout) { - this.lockTimeout = lockTimeout; - } - - @Override - public synchronized CommandInterface prepareCommand(String sql, - int fetchSize) { - return prepareLocal(sql); - } + public abstract IsolationLevel getIsolationLevel(); /** - * Parse and prepare the given SQL statement. This method also checks the - * rights. + * Sets the isolation level. * - * @param sql the SQL statement - * @return the prepared statement + * @param isolationLevel the isolation level to set */ - public Prepared prepare(String sql) { - return prepare(sql, false, false); - } + public abstract void setIsolationLevel(IsolationLevel isolationLevel); /** - * Parse and prepare the given SQL statement. + * Returns static settings. These settings cannot be changed during + * lifecycle of session. * - * @param sql the SQL statement - * @param rightsChecked true if the rights have already been checked - * @param literalsChecked true if the sql string has already been checked - * for literals (only used if ALLOW_LITERALS NONE is set). - * @return the prepared statement + * @return static settings */ - public Prepared prepare(String sql, boolean rightsChecked, boolean literalsChecked) { - Parser parser = new Parser(this); - parser.setRightsChecked(rightsChecked); - parser.setLiteralsChecked(literalsChecked); - return parser.prepare(sql); - } + public abstract StaticSettings getStaticSettings(); /** - * Parse and prepare the given SQL statement. - * This method also checks if the connection has been closed. + * Returns dynamic settings. These settings can be changed during lifecycle + * of session. * - * @param sql the SQL statement - * @return the prepared statement - */ - public Command prepareLocal(String sql) { - if (isClosed()) { - throw DbException.get(ErrorCode.CONNECTION_BROKEN_1, - "session closed"); - } - Command command; - if (queryCacheSize > 0) { - if (queryCache == null) { - queryCache = SmallLRUCache.newInstance(queryCacheSize); - modificationMetaID = database.getModificationMetaId(); - } else { - long newModificationMetaID = database.getModificationMetaId(); - if (newModificationMetaID != modificationMetaID) { - queryCache.clear(); - modificationMetaID = newModificationMetaID; - } - command = queryCache.get(sql); - if (command != null && command.canReuse()) { - command.reuse(); - return command; - } - } - } - Parser parser = new Parser(this); - try { - command = parser.prepareCommand(sql); - } finally { - // we can't reuse sub-query indexes, so just drop the whole cache - subQueryIndexCache = null; - } - command.prepareJoinBatch(); - if (queryCache != null) { - if (command.isCacheable()) { - queryCache.put(sql, command); - } - } - return command; - } - - /** - * Arranges for the specified database object id to be released - * at the end of the current transaction. - * @param id to be scheduled + * @return dynamic settings */ - void scheduleDatabaseObjectIdForRelease(int id) { - if (idsToRelease == null) { - idsToRelease = new BitSet(); - } - idsToRelease.set(id); - } - - public Database getDatabase() { - return database; - } - - @Override - public int getPowerOffCount() { - return database.getPowerOffCount(); - } - - @Override - public void setPowerOffCount(int count) { - database.setPowerOffCount(count); - } + public abstract DynamicSettings getDynamicSettings(); /** - * Commit the current transaction. If the statement was not a data - * definition statement, and if there are temporary tables that should be - * dropped or truncated at commit, this is done as well. + * Returns database meta information. * - * @param ddl if the statement was a data definition statement - */ - public void commit(boolean ddl) { - checkCommitRollback(); - - currentTransactionName = null; - transactionStart = null; - if (transaction != null) { - try { - // increment the data mod count, so that other sessions - // see the changes - // TODO should not rely on locking - if (!locks.isEmpty()) { - for (Table t : locks) { - if (t instanceof MVTable) { - ((MVTable) t).commit(); - } - } - } - transaction.commit(); - } finally { - transaction = null; - } - } else if (containsUncommitted()) { - // need to commit even if rollback is not possible - // (create/drop table and so on) - database.commit(this); - } - removeTemporaryLobs(true); - if (undoLog != null && undoLog.size() > 0) { - undoLog.clear(); - } - if (!ddl) { - // do not clean the temp tables if the last command was a - // create/drop - cleanTempTables(false); - if (autoCommitAtTransactionEnd) { - autoCommit = true; - autoCommitAtTransactionEnd = false; - } - } - - if (tablesToAnalyze != null) { - if (database.isMVStore()) { - // table analysis will cause a new transaction(s) to be opened, - // so we need to commit afterwards whatever leftovers might be - analyzeTables(); - commit(true); - } else { - analyzeTables(); - } - } - endTransaction(); - } - - private void analyzeTables() { - int rowCount = getDatabase().getSettings().analyzeSample / 10; - for (Table table : tablesToAnalyze) { - Analyze.analyzeTable(this, table, rowCount, false); - } - // analyze can lock the meta - database.unlockMeta(this); - tablesToAnalyze = null; - } - - private void removeTemporaryLobs(boolean onTimeout) { - assert this != getDatabase().getLobSession() || Thread.holdsLock(this) || Thread.holdsLock(getDatabase()); - if (temporaryLobs != null) { - for (Value v : temporaryLobs) { - if (!v.isLinkedToTable()) { - v.remove(); - } - } - temporaryLobs.clear(); - } - if (temporaryResultLobs != null && !temporaryResultLobs.isEmpty()) { - long keepYoungerThan = System.nanoTime() - - TimeUnit.MILLISECONDS.toNanos(database.getSettings().lobTimeout); - while (!temporaryResultLobs.isEmpty()) { - TimeoutValue tv = temporaryResultLobs.getFirst(); - if (onTimeout && tv.created >= keepYoungerThan) { - break; - } - Value v = temporaryResultLobs.removeFirst().value; - if (!v.isLinkedToTable()) { - v.remove(); - } - } - } - } - - private void checkCommitRollback() { - if (commitOrRollbackDisabled && !locks.isEmpty()) { - throw DbException.get(ErrorCode.COMMIT_ROLLBACK_NOT_ALLOWED); - } - } - - private void endTransaction() { - if (removeLobMap != null && removeLobMap.size() > 0) { - if (database.getStore() == null) { - // need to flush the transaction log, because we can't unlink - // lobs if the commit record is not written - database.flush(); - } - for (Value v : removeLobMap.values()) { - v.remove(); - } - removeLobMap = null; - } - unlockAll(); - if (idsToRelease != null) { - database.releaseDatabaseObjectIds(idsToRelease); - idsToRelease = null; - } - } - - /** - * Fully roll back the current transaction. + * @return database meta information */ - public void rollback() { - checkCommitRollback(); - currentTransactionName = null; - transactionStart = null; - boolean needCommit = undoLog != null && undoLog.size() > 0 || transaction != null; - if (needCommit) { - rollbackTo(null); - } - if (!locks.isEmpty() || needCommit) { - database.commit(this); - } - idsToRelease = null; - cleanTempTables(false); - if (autoCommitAtTransactionEnd) { - autoCommit = true; - autoCommitAtTransactionEnd = false; - } - endTransaction(); - } + public abstract DatabaseMeta getDatabaseMeta(); /** - * Partially roll back the current transaction. + * Returns whether INFORMATION_SCHEMA contains old-style tables. * - * @param savepoint the savepoint to which should be rolled back + * @return whether INFORMATION_SCHEMA contains old-style tables */ - public void rollbackTo(Savepoint savepoint) { - int index = savepoint == null ? 0 : savepoint.logIndex; - if (undoLog != null) { - while (undoLog.size() > index) { - UndoLogRecord entry = undoLog.getLast(); - entry.undo(this); - undoLog.removeLast(); - } - } - if (transaction != null) { - if (savepoint == null) { - transaction.rollback(); - transaction = null; - } else { - transaction.rollbackToSavepoint(savepoint.transactionSavepoint); - } - } - if (savepoints != null) { - String[] names = savepoints.keySet().toArray(new String[savepoints.size()]); - for (String name : names) { - Savepoint sp = savepoints.get(name); - int savepointIndex = sp.logIndex; - if (savepointIndex > index) { - savepoints.remove(name); - } - } - } - - // Because cache may have captured query result (in Query.lastResult), - // which is based on data from uncommitted transaction., - // It is not valid after rollback, therefore cache has to be cleared. - if(queryCache != null) { - queryCache.clear(); - } - } - - @Override - public boolean hasPendingTransaction() { - return undoLog != null && undoLog.size() > 0; - } + public abstract boolean isOldInformationSchema(); /** - * Create a savepoint to allow rolling back to this state. - * - * @return the savepoint + * Re-create the session state using the stored sessionState list. */ - public Savepoint setSavepoint() { - Savepoint sp = new Savepoint(); - if (undoLog != null) { - sp.logIndex = undoLog.size(); - } - if (database.getStore() != null) { - sp.transactionSavepoint = getStatementSavepoint(); - } - return sp; - } - - public int getId() { - return id; - } - - @Override - public void cancel() { - cancelAtNs = System.nanoTime(); - } - - @Override - public void close() { - // this is the only operation that can be invoked concurrently - // so, we should prevent double-closure - if (state.getAndSet(State.CLOSED) != State.CLOSED) { + void recreateSessionState() { + if (sessionState != null && !sessionState.isEmpty()) { + sessionStateUpdating = true; try { - database.checkPowerOff(); - - // release any open table locks - rollback(); - - removeTemporaryLobs(false); - cleanTempTables(true); - commit(true); // temp table removal may have opened new transaction - if (undoLog != null) { - undoLog.clear(); + for (String sql : sessionState) { + CommandInterface ci = prepareCommand(sql, Integer.MAX_VALUE); + ci.executeUpdate(null); } - // Table#removeChildrenAndResources can take the meta lock, - // and we need to unlock before we call removeSession(), which might - // want to take the meta lock using the system session. - database.unlockMeta(this); } finally { - database.removeSession(this); - } - } - } - - /** - * Add a lock for the given table. The object is unlocked on commit or - * rollback. - * - * @param table the table that is locked - */ - public void addLock(Table table) { - if (SysProperties.CHECK) { - if (locks.contains(table)) { - DbException.throwInternalError(table.toString()); + sessionStateUpdating = false; + sessionStateChanged = false; } } - locks.add(table); } /** - * Add an undo log entry to this session. - * - * @param table the table - * @param operation the operation type (see {@link UndoLogRecord}) - * @param row the row + * Read the session state if necessary. */ - public void log(Table table, short operation, Row row) { - if (table.isMVStore()) { + public void readSessionState() { + if (!sessionStateChanged || sessionStateUpdating) { return; } - if (undoLogEnabled) { - UndoLogRecord log = new UndoLogRecord(table, operation, row); - // called _after_ the row was inserted successfully into the table, - // otherwise rollback will try to rollback a not-inserted row - if (SysProperties.CHECK) { - int lockMode = database.getLockMode(); - if (lockMode != Constants.LOCK_MODE_OFF && - !database.isMVStore()) { - TableType tableType = log.getTable().getTableType(); - if (!locks.contains(log.getTable()) - && TableType.TABLE_LINK != tableType - && TableType.EXTERNAL_TABLE_ENGINE != tableType) { - DbException.throwInternalError(String.valueOf(tableType)); - } - } - } - if (undoLog == null) { - undoLog = new UndoLog(database); - } - undoLog.add(log); + sessionStateChanged = false; + sessionState = Utils.newSmallArrayList(); + CommandInterface ci = prepareCommand(!isOldInformationSchema() + ? "SELECT STATE_COMMAND FROM INFORMATION_SCHEMA.SESSION_STATE" + : "SELECT SQL FROM INFORMATION_SCHEMA.SESSION_STATE", Integer.MAX_VALUE); + ResultInterface result = ci.executeQuery(0, false); + while (result.next()) { + sessionState.add(result.currentRow()[0].getString()); } } /** - * Unlock all read locks. This is done if the transaction isolation mode is - * READ_COMMITTED. + * Sets this session as thread local session, if this session is a local + * session. + * + * @return old thread local session, or {@code null} */ - public void unlockReadLocks() { - if (!database.isMVStore() && database.isMultiThreaded() && - database.getLockMode() == Constants.LOCK_MODE_READ_COMMITTED) { - for (Iterator
          iter = locks.iterator(); iter.hasNext(); ) { - Table t = iter.next(); - if (!t.isLockedExclusively()) { - t.unlock(this); - iter.remove(); - } - } - } + public Session setThreadLocalSession() { + return null; } /** - * Unlock just this table. + * Resets old thread local session. * - * @param t the table to unlock + * @param oldSession + * the old thread local session, or {@code null} */ - void unlock(Table t) { - locks.remove(t); - } - - private void unlockAll() { - if (undoLog != null && undoLog.size() > 0) { - DbException.throwInternalError(); - } - if (!locks.isEmpty()) { - for (Table t : locks) { - t.unlock(this); - } - locks.clear(); - } - database.unlockMetaDebug(this); - savepoints = null; - sessionStateChanged = true; - } - - private void cleanTempTables(boolean closeSession) { - if (localTempTables != null && localTempTables.size() > 0) { - if (database.isMVStore()) { - _cleanTempTables(closeSession); - } else { - synchronized (database) { - _cleanTempTables(closeSession); - } - } - } - } - - private void _cleanTempTables(boolean closeSession) { - Iterator
          it = localTempTables.values().iterator(); - while (it.hasNext()) { - Table table = it.next(); - if (closeSession || table.getOnCommitDrop()) { - modificationId++; - table.setModified(); - it.remove(); - // Exception thrown in org.h2.engine.Database.removeMeta - // if line below is missing with TestDeadlock - database.lockMeta(this); - table.removeChildrenAndResources(this); - if (closeSession) { - // need to commit, otherwise recovery might - // ignore the table removal - database.commit(this); - } - } else if (table.getOnCommitTruncate()) { - table.truncate(this); - } - } - } - - public Random getRandom() { - if (random == null) { - random = new Random(); - } - return random; - } - - @Override - public Trace getTrace() { - if (trace != null && !isClosed()) { - return trace; - } - String traceModuleName = "jdbc[" + id + "]"; - if (isClosed()) { - return new TraceSystem(null).getTrace(traceModuleName); - } - trace = database.getTraceSystem().getTrace(traceModuleName); - return trace; - } - - public void setLastIdentity(Value last) { - this.lastIdentity = last; - this.lastScopeIdentity = last; - } - - public Value getLastIdentity() { - return lastIdentity; - } - - public void setLastScopeIdentity(Value last) { - this.lastScopeIdentity = last; - } - - public Value getLastScopeIdentity() { - return lastScopeIdentity; - } - - public void setLastTriggerIdentity(Value last) { - this.lastTriggerIdentity = last; - } - - public Value getLastTriggerIdentity() { - return lastTriggerIdentity; - } - - public GeneratedKeys getGeneratedKeys() { - if (generatedKeys == null) { - generatedKeys = new GeneratedKeys(); - } - return generatedKeys; - } - - /** - * Called when a log entry for this session is added. The session keeps - * track of the first entry in the transaction log that is not yet - * committed. - * - * @param logId the transaction log id - * @param pos the position of the log entry in the transaction log - */ - public void addLogPos(int logId, int pos) { - if (firstUncommittedLog == Session.LOG_WRITTEN) { - firstUncommittedLog = logId; - firstUncommittedPos = pos; - } - } - - public int getFirstUncommittedLog() { - return firstUncommittedLog; - } - - /** - * This method is called after the transaction log has written the commit - * entry for this session. - */ - void setAllCommitted() { - firstUncommittedLog = Session.LOG_WRITTEN; - firstUncommittedPos = Session.LOG_WRITTEN; - } - - /** - * Whether the session contains any uncommitted changes. - * - * @return true if yes - */ - public boolean containsUncommitted() { - if (database.getStore() != null) { - return transaction != null && transaction.hasChanges(); - } - return firstUncommittedLog != Session.LOG_WRITTEN; - } - - /** - * Create a savepoint that is linked to the current log position. - * - * @param name the savepoint name - */ - public void addSavepoint(String name) { - if (savepoints == null) { - savepoints = database.newStringMap(); - } - savepoints.put(name, setSavepoint()); - } - - /** - * Undo all operations back to the log position of the given savepoint. - * - * @param name the savepoint name - */ - public void rollbackToSavepoint(String name) { - checkCommitRollback(); - currentTransactionName = null; - transactionStart = null; - if (savepoints == null) { - throw DbException.get(ErrorCode.SAVEPOINT_IS_INVALID_1, name); - } - Savepoint savepoint = savepoints.get(name); - if (savepoint == null) { - throw DbException.get(ErrorCode.SAVEPOINT_IS_INVALID_1, name); - } - rollbackTo(savepoint); - } - - /** - * Prepare the given transaction. - * - * @param transactionName the name of the transaction - */ - public void prepareCommit(String transactionName) { - if (containsUncommitted()) { - // need to commit even if rollback is not possible (create/drop - // table and so on) - database.prepareCommit(this, transactionName); - } - currentTransactionName = transactionName; - } - - /** - * Commit or roll back the given transaction. - * - * @param transactionName the name of the transaction - * @param commit true for commit, false for rollback - */ - public void setPreparedTransaction(String transactionName, boolean commit) { - if (currentTransactionName != null && - currentTransactionName.equals(transactionName)) { - if (commit) { - commit(false); - } else { - rollback(); - } - } else { - ArrayList list = database - .getInDoubtTransactions(); - int state = commit ? InDoubtTransaction.COMMIT - : InDoubtTransaction.ROLLBACK; - boolean found = false; - if (list != null) { - for (InDoubtTransaction p: list) { - if (p.getTransactionName().equals(transactionName)) { - p.setState(state); - found = true; - break; - } - } - } - if (!found) { - throw DbException.get(ErrorCode.TRANSACTION_NOT_FOUND_1, - transactionName); - } - } - } - - @Override - public boolean isClosed() { - return state.get() == State.CLOSED; - } - - public void setThrottle(int throttle) { - this.throttleNs = TimeUnit.MILLISECONDS.toNanos(throttle); - } - - /** - * Wait for some time if this session is throttled (slowed down). - */ - public void throttle() { - if (currentCommandStart == null) { - currentCommandStart = CurrentTimestamp.get(); - } - if (throttleNs == 0) { - return; - } - long time = System.nanoTime(); - if (lastThrottle + TimeUnit.MILLISECONDS.toNanos(Constants.THROTTLE_DELAY) > time) { - return; - } - State prevState = this.state.get(); - if (prevState != State.CLOSED) { - lastThrottle = time + throttleNs; - try { - state.compareAndSet(prevState, State.SLEEP); - Thread.sleep(TimeUnit.NANOSECONDS.toMillis(throttleNs)); - } catch (Exception e) { - // ignore InterruptedException - } finally { - state.compareAndSet(State.SLEEP, prevState); - } - } - } - - /** - * Set the current command of this session. This is done just before - * executing the statement. - * - * @param command the command - * @param generatedKeysRequest - * {@code false} if generated keys are not needed, {@code true} if - * generated keys should be configured automatically, {@code int[]} - * to specify column indices to return generated keys from, or - * {@code String[]} to specify column names to return generated keys - * from - */ - public void setCurrentCommand(Command command, Object generatedKeysRequest) { - currentCommand = command; - // Preserve generated keys in case of a new query due to possible nested - // queries in update - if (command != null && !command.isQuery()) { - getGeneratedKeys().clear(generatedKeysRequest); - } - if (command != null) { - if (queryTimeout > 0) { - currentCommandStart = CurrentTimestamp.get(); - long now = System.nanoTime(); - cancelAtNs = now + TimeUnit.MILLISECONDS.toNanos(queryTimeout); - } else { - currentCommandStart = null; - } - } - State currentState = state.get(); - if(currentState != State.CLOSED) { - state.compareAndSet(currentState, command == null ? State.SLEEP : State.RUNNING); - } - } - - /** - * Check if the current transaction is canceled by calling - * Statement.cancel() or because a session timeout was set and expired. - * - * @throws DbException if the transaction is canceled - */ - public void checkCanceled() { - throttle(); - if (cancelAtNs == 0) { - return; - } - long time = System.nanoTime(); - if (time >= cancelAtNs) { - cancelAtNs = 0; - throw DbException.get(ErrorCode.STATEMENT_WAS_CANCELED); - } - } - - /** - * Get the cancel time. - * - * @return the time or 0 if not set - */ - public long getCancel() { - return cancelAtNs; - } - - public Command getCurrentCommand() { - return currentCommand; - } - - public ValueTimestampTimeZone getCurrentCommandStart() { - if (currentCommandStart == null) { - currentCommandStart = CurrentTimestamp.get(); - } - return currentCommandStart; - } - - public boolean getAllowLiterals() { - return allowLiterals; - } - - public void setAllowLiterals(boolean b) { - this.allowLiterals = b; - } - - public void setCurrentSchema(Schema schema) { - modificationId++; - this.currentSchemaName = schema.getName(); - } - - @Override - public String getCurrentSchemaName() { - return currentSchemaName; - } - - @Override - public void setCurrentSchemaName(String schemaName) { - Schema schema = database.getSchema(schemaName); - setCurrentSchema(schema); - } - - /** - * Create an internal connection. This connection is used when initializing - * triggers, and when calling user defined functions. - * - * @param columnList if the url should be 'jdbc:columnlist:connection' - * @return the internal connection - */ - public JdbcConnection createConnection(boolean columnList) { - String url; - if (columnList) { - url = Constants.CONN_URL_COLUMNLIST; - } else { - url = Constants.CONN_URL_INTERNAL; - } - return new JdbcConnection(this, getUser().getName(), url); - } - - @Override - public DataHandler getDataHandler() { - return database; - } - - /** - * Remember that the given LOB value must be removed at commit. - * - * @param v the value - */ - public void removeAtCommit(Value v) { - final String key = v.toString(); - if (!v.isLinkedToTable()) { - DbException.throwInternalError(key); - } - if (removeLobMap == null) { - removeLobMap = new HashMap<>(); - } - removeLobMap.put(key, v); - } - - /** - * Do not remove this LOB value at commit any longer. - * - * @param v the value - */ - public void removeAtCommitStop(Value v) { - if (removeLobMap != null) { - removeLobMap.remove(v.toString()); - } - } - - /** - * Get the next system generated identifiers. The identifier returned does - * not occur within the given SQL statement. - * - * @param sql the SQL statement - * @return the new identifier - */ - public String getNextSystemIdentifier(String sql) { - String identifier; - do { - identifier = SYSTEM_IDENTIFIER_PREFIX + systemIdentifier++; - } while (sql.contains(identifier)); - return identifier; - } - - /** - * Add a procedure to this session. - * - * @param procedure the procedure to add - */ - public void addProcedure(Procedure procedure) { - if (procedures == null) { - procedures = database.newStringMap(); - } - procedures.put(procedure.getName(), procedure); - } - - /** - * Remove a procedure from this session. - * - * @param name the name of the procedure to remove - */ - public void removeProcedure(String name) { - if (procedures != null) { - procedures.remove(name); - } - } - - /** - * Get the procedure with the given name, or null - * if none exists. - * - * @param name the procedure name - * @return the procedure or null - */ - public Procedure getProcedure(String name) { - if (procedures == null) { - return null; - } - return procedures.get(name); - } - - public void setSchemaSearchPath(String[] schemas) { - modificationId++; - this.schemaSearchPath = schemas; - } - - public String[] getSchemaSearchPath() { - return schemaSearchPath; - } - - @Override - public int hashCode() { - return serialId; - } - - @Override - public String toString() { - return "#" + serialId + " (user: " + (user == null ? "" : user.getName()) + ")"; - } - - public void setUndoLogEnabled(boolean b) { - this.undoLogEnabled = b; - } - - public void setRedoLogBinary(boolean b) { - this.redoLogBinary = b; - } - - public boolean isUndoLogEnabled() { - return undoLogEnabled; - } - - /** - * Begin a transaction. - */ - public void begin() { - autoCommitAtTransactionEnd = true; - autoCommit = false; - } - - public long getSessionStart() { - return sessionStart; - } - - public ValueTimestampTimeZone getTransactionStart() { - if (transactionStart == null) { - transactionStart = CurrentTimestamp.get(); - } - return transactionStart; - } - - public Table[] getLocks() { - // copy the data without synchronizing - ArrayList
          copy = new ArrayList<>(locks.size()); - for (Table lock : locks) { - try { - copy.add(lock); - } catch (Exception e) { - // ignore - break; - } - } - return copy.toArray(new Table[0]); - } - - /** - * Wait if the exclusive mode has been enabled for another session. This - * method returns as soon as the exclusive mode has been disabled. - */ - public void waitIfExclusiveModeEnabled() { - // Even in exclusive mode, we have to let the LOB session proceed, or we - // will get deadlocks. - if (database.getLobSession() == this) { - return; - } - while (true) { - Session exclusive = database.getExclusiveSession(); - if (exclusive == null || exclusive == this) { - break; - } - if (Thread.holdsLock(exclusive)) { - // if another connection is used within the connection - break; - } - try { - Thread.sleep(100); - } catch (InterruptedException e) { - // ignore - } - } - } - - /** - * Get the view cache for this session. There are two caches: the subquery - * cache (which is only use for a single query, has no bounds, and is - * cleared after use), and the cache for regular views. - * - * @param subQuery true to get the subquery cache - * @return the view cache - */ - public Map getViewIndexCache(boolean subQuery) { - if (subQuery) { - // for sub-queries we don't need to use LRU because the cache should - // not grow too large for a single query (we drop the whole cache in - // the end of prepareLocal) - if (subQueryIndexCache == null) { - subQueryIndexCache = new HashMap<>(); - } - return subQueryIndexCache; - } - SmallLRUCache cache = viewIndexCache; - if (cache == null) { - viewIndexCache = cache = SmallLRUCache.newInstance(Constants.VIEW_INDEX_CACHE_SIZE); - } - return cache; - } - - /** - * Remember the result set and close it as soon as the transaction is - * committed (if it needs to be closed). This is done to delete temporary - * files as soon as possible, and free object ids of temporary tables. - * - * @param result the temporary result set - */ - public void addTemporaryResult(ResultInterface result) { - if (!result.needToClose()) { - return; - } - if (temporaryResults == null) { - temporaryResults = new HashSet<>(); - } - if (temporaryResults.size() < 100) { - // reference at most 100 result sets to avoid memory problems - temporaryResults.add(result); - } - } - - private void closeTemporaryResults() { - if (temporaryResults != null) { - for (ResultInterface result : temporaryResults) { - result.close(); - } - temporaryResults = null; - } - } - - public void setQueryTimeout(int queryTimeout) { - int max = database.getSettings().maxQueryTimeout; - if (max != 0 && (max < queryTimeout || queryTimeout == 0)) { - // the value must be at most max - queryTimeout = max; - } - this.queryTimeout = queryTimeout; - // must reset the cancel at here, - // otherwise it is still used - this.cancelAtNs = 0; - } - - public int getQueryTimeout() { - return queryTimeout; - } - - /** - * Set the table this session is waiting for, and the thread that is - * waiting. - * - * @param waitForLock the table - * @param waitForLockThread the current thread (the one that is waiting) - */ - public void setWaitForLock(Table waitForLock, Thread waitForLockThread) { - this.waitForLock = waitForLock; - this.waitForLockThread = waitForLockThread; - } - - public Table getWaitForLock() { - return waitForLock; - } - - public Thread getWaitForLockThread() { - return waitForLockThread; - } - - public int getModificationId() { - return modificationId; - } - - @Override - public boolean isReconnectNeeded(boolean write) { - while (true) { - boolean reconnect = database.isReconnectNeeded(); - if (reconnect) { - return true; - } - if (write) { - if (database.beforeWriting()) { - return false; - } - } else { - return false; - } - } - } - - @Override - public void afterWriting() { - database.afterWriting(); - } - - @Override - public SessionInterface reconnect(boolean write) { - readSessionState(); - close(); - Session newSession = Engine.getInstance().createSession(connectionInfo); - newSession.sessionState = sessionState; - newSession.recreateSessionState(); - if (write) { - while (!newSession.database.beforeWriting()) { - // wait until we are allowed to write - } - } - return newSession; - } - - public void setConnectionInfo(ConnectionInfo ci) { - connectionInfo = ci; - } - - public Value getTransactionId() { - if (database.getStore() != null) { - if (transaction == null || !transaction.hasChanges()) { - return ValueNull.INSTANCE; - } - return ValueString.get(Long.toString(getTransaction().getSequenceNum())); - } - if (!database.isPersistent()) { - return ValueNull.INSTANCE; - } - if (undoLog == null || undoLog.size() == 0) { - return ValueNull.INSTANCE; - } - return ValueString.get(firstUncommittedLog + "-" + firstUncommittedPos + - "-" + id); - } - - /** - * Get the next object id. - * - * @return the next object id - */ - public int nextObjectId() { - return objectId++; - } - - public boolean isRedoLogBinaryEnabled() { - return redoLogBinary; - } - - /** - * Get the transaction to use for this session. - * - * @return the transaction - */ - public Transaction getTransaction() { - if (transaction == null) { - MVTableEngine.Store store = database.getStore(); - if (store != null) { - if (store.getMvStore().isClosed()) { - Throwable backgroundException = database.getBackgroundException(); - database.shutdownImmediately(); - throw DbException.get(ErrorCode.DATABASE_IS_CLOSED, backgroundException); - } - transaction = store.getTransactionStore().begin(this, this.lockTimeout, id); - } - startStatement = -1; - } - return transaction; - } - - private long getStatementSavepoint() { - if (startStatement == -1) { - startStatement = getTransaction().setSavepoint(); - } - return startStatement; - } - - /** - * Start a new statement within a transaction. - */ - public void startStatementWithinTransaction() { - Transaction transaction = getTransaction(); - if(transaction != null) { - transaction.markStatementStart(); - } - startStatement = -1; - } - - /** - * Mark the statement as completed. This also close all temporary result - * set, and deletes all temporary files held by the result sets. - */ - public void endStatement() { - if(transaction != null) { - transaction.markStatementEnd(); - } - startStatement = -1; - closeTemporaryResults(); - } - - /** - * Clear the view cache for this session. - */ - public void clearViewIndexCache() { - viewIndexCache = null; - } - - @Override - public void addTemporaryLob(Value v) { - if (!DataType.isLargeObject(v.getValueType())) { - return; - } - if (v.getTableId() == LobStorageFrontend.TABLE_RESULT - || v.getTableId() == LobStorageFrontend.TABLE_TEMP) { - if (temporaryResultLobs == null) { - temporaryResultLobs = new LinkedList<>(); - } - temporaryResultLobs.add(new TimeoutValue(v)); - } else { - if (temporaryLobs == null) { - temporaryLobs = new ArrayList<>(); - } - temporaryLobs.add(v); - } - } - - @Override - public boolean isRemote() { - return false; - } - - /** - * Mark that the given table needs to be analyzed on commit. - * - * @param table the table - */ - public void markTableForAnalyze(Table table) { - if (tablesToAnalyze == null) { - tablesToAnalyze = new HashSet<>(); - } - tablesToAnalyze.add(table); - } - - public State getState() { - return getBlockingSessionId() != 0 ? State.BLOCKED : state.get(); - } - - public int getBlockingSessionId() { - return transaction == null ? 0 : transaction.getBlockerId(); - } - - @Override - public void onRollback(MVMap map, Object key, - VersionedValue existingValue, - VersionedValue restoredValue) { - // Here we are relying on the fact that map which backs table's primary index - // has the same name as the table itself - MVTableEngine.Store store = database.getStore(); - if(store != null) { - MVTable table = store.getTable(map.getName()); - if (table != null) { - long recKey = ((ValueLong)key).getLong(); - Row oldRow = getRowFromVersionedValue(table, recKey, existingValue); - Row newRow = getRowFromVersionedValue(table, recKey, restoredValue); - table.fireAfterRow(this, oldRow, newRow, true); - - if (table.getContainsLargeObject()) { - if (oldRow != null) { - for (int i = 0, len = oldRow.getColumnCount(); i < len; i++) { - Value v = oldRow.getValue(i); - if (v.isLinkedToTable()) { - removeAtCommit(v); - } - } - } - if (newRow != null) { - for (int i = 0, len = newRow.getColumnCount(); i < len; i++) { - Value v = newRow.getValue(i); - if (v.isLinkedToTable()) { - removeAtCommitStop(v); - } - } - } - } - } - } - } - - private static Row getRowFromVersionedValue(MVTable table, long recKey, - VersionedValue versionedValue) { - Object value = versionedValue == null ? null : versionedValue.getCurrentValue(); - if (value == null) { - return null; - } - Row result; - if(value instanceof Row) { - result = (Row) value; - assert result.getKey() == recKey : result.getKey() + " != " + recKey; - } else { - ValueArray array = (ValueArray) value; - result = table.createRow(array.getList(), 0); - result.setKey(recKey); - } - return result; - } - - - /** - * Represents a savepoint (a position in a transaction to where one can roll - * back to). - */ - public static class Savepoint { - - /** - * The undo log index. - */ - int logIndex; - - /** - * The transaction savepoint id. - */ - long transactionSavepoint; - } - - /** - * An object with a timeout. - */ - public static class TimeoutValue { - - /** - * The time when this object was created. - */ - final long created = System.nanoTime(); - - /** - * The value. - */ - final Value value; - - TimeoutValue(Value v) { - this.value = v; - } - - } - - public ColumnNamerConfiguration getColumnNamerConfiguration() { - return columnNamerConfiguration; - } - - public void setColumnNamerConfiguration(ColumnNamerConfiguration columnNamerConfiguration) { - this.columnNamerConfiguration = columnNamerConfiguration; - } - - @Override - public boolean isSupportsGeneratedKeys() { - return true; + public void resetThreadLocalSession(Session oldSession) { } } diff --git a/h2/src/main/org/h2/engine/SessionFactory.java b/h2/src/main/org/h2/engine/SessionFactory.java deleted file mode 100644 index c996f1e238..0000000000 --- a/h2/src/main/org/h2/engine/SessionFactory.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.engine; - -import java.sql.SQLException; - -/** - * A class that implements this interface can create new database sessions. This - * exists so that the JDBC layer (the client) can be compiled without dependency - * to the core database engine. - */ -interface SessionFactory { - - /** - * Create a new session. - * - * @param ci the connection parameters - * @return the new session - */ - SessionInterface createSession(ConnectionInfo ci) throws SQLException; - -} diff --git a/h2/src/main/org/h2/engine/SessionInterface.java b/h2/src/main/org/h2/engine/SessionInterface.java deleted file mode 100644 index ba1fa8a31b..0000000000 --- a/h2/src/main/org/h2/engine/SessionInterface.java +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.engine; - -import java.io.Closeable; -import java.util.ArrayList; -import org.h2.command.CommandInterface; -import org.h2.message.Trace; -import org.h2.store.DataHandler; -import org.h2.value.Value; - -/** - * A local or remote session. A session represents a database connection. - */ -public interface SessionInterface extends Closeable { - - /** - * Get the list of the cluster servers for this session. - * - * @return A list of "ip:port" strings for the cluster servers in this - * session. - */ - ArrayList getClusterServers(); - - /** - * Parse a command and prepare it for execution. - * - * @param sql the SQL statement - * @param fetchSize the number of rows to fetch in one step - * @return the prepared command - */ - CommandInterface prepareCommand(String sql, int fetchSize); - - /** - * Roll back pending transactions and close the session. - */ - @Override - void close(); - - /** - * Get the trace object - * - * @return the trace object - */ - Trace getTrace(); - - /** - * Check if close was called. - * - * @return if the session has been closed - */ - boolean isClosed(); - - /** - * Get the number of disk operations before power failure is simulated. - * This is used for testing. If not set, 0 is returned - * - * @return the number of operations, or 0 - */ - int getPowerOffCount(); - - /** - * Set the number of disk operations before power failure is simulated. - * To disable the countdown, use 0. - * - * @param i the number of operations - */ - void setPowerOffCount(int i); - - /** - * Get the data handler object. - * - * @return the data handler - */ - DataHandler getDataHandler(); - - /** - * Check whether this session has a pending transaction. - * - * @return true if it has - */ - boolean hasPendingTransaction(); - - /** - * Cancel the current or next command (called when closing a connection). - */ - void cancel(); - - /** - * Check if the database changed and therefore reconnecting is required. - * - * @param write if the next operation may be writing - * @return true if reconnecting is required - */ - boolean isReconnectNeeded(boolean write); - - /** - * Close the connection and open a new connection. - * - * @param write if the next operation may be writing - * @return the new connection - */ - SessionInterface reconnect(boolean write); - - /** - * Called after writing has ended. It needs to be called after - * isReconnectNeeded(true) returned false. - */ - void afterWriting(); - - /** - * Check if this session is in auto-commit mode. - * - * @return true if the session is in auto-commit mode - */ - boolean getAutoCommit(); - - /** - * Set the auto-commit mode. This call doesn't commit the current - * transaction. - * - * @param autoCommit the new value - */ - void setAutoCommit(boolean autoCommit); - - /** - * Add a temporary LOB, which is closed when the session commits. - * - * @param v the value - */ - void addTemporaryLob(Value v); - - /** - * Check if this session is remote or embedded. - * - * @return true if this session is remote - */ - boolean isRemote(); - - /** - * Set current schema. - * - * @param schema the schema name - */ - void setCurrentSchemaName(String schema); - - /** - * Get current schema. - * - * @return the current schema name - */ - String getCurrentSchemaName(); - - /** - * Returns is this session supports generated keys. - * - * @return {@code true} if generated keys are supported, {@code false} if only - * {@code SCOPE_IDENTITY()} is supported - */ - boolean isSupportsGeneratedKeys(); - -} diff --git a/h2/src/main/org/h2/engine/SessionLocal.java b/h2/src/main/org/h2/engine/SessionLocal.java new file mode 100644 index 0000000000..8117c628da --- /dev/null +++ b/h2/src/main/org/h2/engine/SessionLocal.java @@ -0,0 +1,2069 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.engine; + +import java.time.Instant; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.WeakHashMap; +import java.util.concurrent.atomic.AtomicReference; +import org.h2.api.ErrorCode; +import org.h2.api.JavaObjectSerializer; +import org.h2.command.Command; +import org.h2.command.CommandInterface; +import org.h2.command.Parser; +import org.h2.command.Prepared; +import org.h2.command.ddl.Analyze; +import org.h2.constraint.Constraint; +import org.h2.index.Index; +import org.h2.index.ViewIndex; +import org.h2.jdbc.JdbcConnection; +import org.h2.jdbc.meta.DatabaseMeta; +import org.h2.jdbc.meta.DatabaseMetaLocal; +import org.h2.message.DbException; +import org.h2.message.Trace; +import org.h2.message.TraceSystem; +import org.h2.mvstore.MVMap; +import org.h2.mvstore.db.MVIndex; +import org.h2.mvstore.db.MVTable; +import org.h2.mvstore.db.Store; +import org.h2.mvstore.tx.Transaction; +import org.h2.mvstore.tx.TransactionStore; +import org.h2.result.Row; +import org.h2.schema.Schema; +import org.h2.schema.Sequence; +import org.h2.store.DataHandler; +import org.h2.store.InDoubtTransaction; +import org.h2.store.LobStorageFrontend; +import org.h2.table.Table; +import org.h2.util.DateTimeUtils; +import org.h2.util.HasSQL; +import org.h2.util.NetworkConnectionInfo; +import org.h2.util.SmallLRUCache; +import org.h2.util.TimeZoneProvider; +import org.h2.util.Utils; +import org.h2.value.Value; +import org.h2.value.ValueLob; +import org.h2.value.ValueNull; +import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueVarchar; +import org.h2.value.VersionedValue; +import org.h2.value.lob.LobData; +import org.h2.value.lob.LobDataDatabase; +import org.h2.value.lob.LobDataInMemory; + +/** + * A session represents an embedded database connection. When using the server + * mode, this object resides on the server side and communicates with a + * SessionRemote object on the client side. + */ +public final class SessionLocal extends Session implements TransactionStore.RollbackListener { + + public enum State { INIT, RUNNING, BLOCKED, SLEEP, THROTTLED, SUSPENDED, CLOSED } + + private static final class SequenceAndPrepared { + + private final Sequence sequence; + + private final Prepared prepared; + + SequenceAndPrepared(Sequence sequence, Prepared prepared) { + this.sequence = sequence; + this.prepared = prepared; + } + + @Override + public int hashCode() { + return 31 * (31 + prepared.hashCode()) + sequence.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || obj.getClass() != SequenceAndPrepared.class) { + return false; + } + SequenceAndPrepared other = (SequenceAndPrepared) obj; + return sequence == other.sequence && prepared == other.prepared; + } + + } + + private static final class RowNumberAndValue { + + long rowNumber; + + Value nextValue; + + RowNumberAndValue(long rowNumber, Value nextValue) { + this.rowNumber = rowNumber; + this.nextValue = nextValue; + } + + } + + /** + * The prefix of generated identifiers. It may not have letters, because + * they are case sensitive. + */ + private static final String SYSTEM_IDENTIFIER_PREFIX = "_"; + private static int nextSerialId; + + /** + * Thread local session for comparison operations between different data types. + */ + private static final ThreadLocal THREAD_LOCAL_SESSION = new ThreadLocal<>(); + + static Session getThreadLocalSession() { + Session session = THREAD_LOCAL_SESSION.get(); + if (session == null) { + THREAD_LOCAL_SESSION.remove(); + } + return session; + } + + private final int serialId = nextSerialId++; + private final Database database; + private final User user; + private final int id; + + private NetworkConnectionInfo networkConnectionInfo; + + private final ArrayList
          locks = Utils.newSmallArrayList(); + private boolean autoCommit = true; + private Random random; + private int lockTimeout; + + private HashMap nextValueFor; + private WeakHashMap currentValueFor; + private Value lastIdentity = ValueNull.INSTANCE; + + private HashMap savepoints; + private HashMap localTempTables; + private HashMap localTempTableIndexes; + private HashMap localTempTableConstraints; + private int throttleMs; + private long lastThrottleNs; + private Command currentCommand; + private boolean allowLiterals; + private String currentSchemaName; + private String[] schemaSearchPath; + private Trace trace; + private HashMap removeLobMap; + private int systemIdentifier; + private HashMap procedures; + private boolean autoCommitAtTransactionEnd; + private String currentTransactionName; + private volatile long cancelAtNs; + private final ValueTimestampTimeZone sessionStart; + private Instant commandStartOrEnd; + private ValueTimestampTimeZone currentTimestamp; + private HashMap variables; + private int queryTimeout; + private boolean commitOrRollbackDisabled; + private Table waitForLock; + private Thread waitForLockThread; + private int modificationId; + private int objectId; + private final int queryCacheSize; + private SmallLRUCache queryCache; + private long modificationMetaID = -1; + private int createViewLevel; + private volatile SmallLRUCache viewIndexCache; + private HashMap subQueryIndexCache; + private boolean lazyQueryExecution; + + private BitSet nonKeywords; + + private TimeZoneProvider timeZone; + + /** + * Tables marked for ANALYZE after the current transaction is committed. + * Prevents us calling ANALYZE repeatedly in large transactions. + */ + private HashSet
          tablesToAnalyze; + + /** + * Temporary LOBs from result sets. Those are kept for some time. The + * problem is that transactions are committed before the result is returned, + * and in some cases the next transaction is already started before the + * result is read (for example when using the server mode, when accessing + * metadata methods). We can't simply free those values up when starting the + * next transaction, because they would be removed too early. + */ + private LinkedList temporaryResultLobs; + + /** + * The temporary LOBs that need to be removed on commit. + */ + private ArrayList temporaryLobs; + + private Transaction transaction; + private final AtomicReference state = new AtomicReference<>(State.INIT); + private long startStatement = -1; + + /** + * Isolation level. + */ + private IsolationLevel isolationLevel = IsolationLevel.READ_COMMITTED; + + /** + * The snapshot data modification id. If isolation level doesn't allow + * non-repeatable reads the session uses a snapshot versions of data. After + * commit or rollback these snapshots are discarded and cached results of + * queries may became invalid. Commit and rollback allocate a new data + * modification id and store it here to forbid usage of older results. + */ + private long snapshotDataModificationId; + + /** + * Set of database object ids to be released at the end of transaction + */ + private BitSet idsToRelease; + + /** + * Whether length in definitions of data types is truncated. + */ + private boolean truncateLargeLength; + + /** + * Whether BINARY is parsed as VARBINARY. + */ + private boolean variableBinary; + + /** + * Whether INFORMATION_SCHEMA contains old-style tables. + */ + private boolean oldInformationSchema; + + /** + * Whether commands are executed in quirks mode to support scripts from older versions of H2. + */ + private boolean quirksMode; + + public SessionLocal(Database database, User user, int id) { + this.database = database; + this.queryTimeout = database.getSettings().maxQueryTimeout; + this.queryCacheSize = database.getSettings().queryCacheSize; + this.user = user; + this.id = id; + this.lockTimeout = database.getLockTimeout(); + Schema mainSchema = database.getMainSchema(); + this.currentSchemaName = mainSchema != null ? mainSchema.getName() + : database.sysIdentifier(Constants.SCHEMA_MAIN); + timeZone = DateTimeUtils.getTimeZone(); + sessionStart = DateTimeUtils.currentTimestamp(timeZone, commandStartOrEnd = Instant.now()); + } + + public void setLazyQueryExecution(boolean lazyQueryExecution) { + this.lazyQueryExecution = lazyQueryExecution; + } + + public boolean isLazyQueryExecution() { + return lazyQueryExecution; + } + + /** + * This method is called before and after parsing of view definition and may + * be called recursively. + * + * @param parsingView + * {@code true} if this method is called before parsing of view + * definition, {@code false} if it is called after it. + */ + public void setParsingCreateView(boolean parsingView) { + createViewLevel += parsingView ? 1 : -1; + } + + public boolean isParsingCreateView() { + return createViewLevel != 0; + } + + @Override + public ArrayList getClusterServers() { + return new ArrayList<>(); + } + + public boolean setCommitOrRollbackDisabled(boolean x) { + boolean old = commitOrRollbackDisabled; + commitOrRollbackDisabled = x; + return old; + } + + private void initVariables() { + if (variables == null) { + variables = database.newStringMap(); + } + } + + /** + * Set the value of the given variable for this session. + * + * @param name the name of the variable (may not be null) + * @param value the new value (may not be null) + */ + public void setVariable(String name, Value value) { + initVariables(); + modificationId++; + Value old; + if (value == ValueNull.INSTANCE) { + old = variables.remove(name); + } else { + if (value instanceof ValueLob) { + // link LOB values, to make sure we have our own object + value = ((ValueLob) value).copy(database, LobStorageFrontend.TABLE_ID_SESSION_VARIABLE); + } + old = variables.put(name, value); + } + if (old instanceof ValueLob) { + ((ValueLob) old).remove(); + } + } + + /** + * Get the value of the specified user defined variable. This method always + * returns a value; it returns ValueNull.INSTANCE if the variable doesn't + * exist. + * + * @param name the variable name + * @return the value, or NULL + */ + public Value getVariable(String name) { + initVariables(); + Value v = variables.get(name); + return v == null ? ValueNull.INSTANCE : v; + } + + /** + * Get the list of variable names that are set for this session. + * + * @return the list of names + */ + public String[] getVariableNames() { + if (variables == null) { + return new String[0]; + } + return variables.keySet().toArray(new String[0]); + } + + /** + * Get the local temporary table if one exists with that name, or null if + * not. + * + * @param name the table name + * @return the table, or null + */ + public Table findLocalTempTable(String name) { + if (localTempTables == null) { + return null; + } + return localTempTables.get(name); + } + + public List
          getLocalTempTables() { + if (localTempTables == null) { + return Collections.emptyList(); + } + return new ArrayList<>(localTempTables.values()); + } + + /** + * Add a local temporary table to this session. + * + * @param table the table to add + * @throws DbException if a table with this name already exists + */ + public void addLocalTempTable(Table table) { + if (localTempTables == null) { + localTempTables = database.newStringMap(); + } + if (localTempTables.putIfAbsent(table.getName(), table) != null) { + StringBuilder builder = new StringBuilder(); + table.getSQL(builder, HasSQL.TRACE_SQL_FLAGS).append(" AS "); + Parser.quoteIdentifier(table.getName(), HasSQL.TRACE_SQL_FLAGS); + throw DbException.get(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, builder.toString()); + } + modificationId++; + } + + /** + * Drop and remove the given local temporary table from this session. + * + * @param table the table + */ + public void removeLocalTempTable(Table table) { + modificationId++; + if (localTempTables != null) { + localTempTables.remove(table.getName()); + } + synchronized (database) { + table.removeChildrenAndResources(this); + } + } + + /** + * Get the local temporary index if one exists with that name, or null if + * not. + * + * @param name the table name + * @return the table, or null + */ + public Index findLocalTempTableIndex(String name) { + if (localTempTableIndexes == null) { + return null; + } + return localTempTableIndexes.get(name); + } + + public HashMap getLocalTempTableIndexes() { + if (localTempTableIndexes == null) { + return new HashMap<>(); + } + return localTempTableIndexes; + } + + /** + * Add a local temporary index to this session. + * + * @param index the index to add + * @throws DbException if a index with this name already exists + */ + public void addLocalTempTableIndex(Index index) { + if (localTempTableIndexes == null) { + localTempTableIndexes = database.newStringMap(); + } + if (localTempTableIndexes.putIfAbsent(index.getName(), index) != null) { + throw DbException.get(ErrorCode.INDEX_ALREADY_EXISTS_1, index.getTraceSQL()); + } + } + + /** + * Drop and remove the given local temporary index from this session. + * + * @param index the index + */ + public void removeLocalTempTableIndex(Index index) { + if (localTempTableIndexes != null) { + localTempTableIndexes.remove(index.getName()); + synchronized (database) { + index.removeChildrenAndResources(this); + } + } + } + + /** + * Get the local temporary constraint if one exists with that name, or + * null if not. + * + * @param name the constraint name + * @return the constraint, or null + */ + public Constraint findLocalTempTableConstraint(String name) { + if (localTempTableConstraints == null) { + return null; + } + return localTempTableConstraints.get(name); + } + + /** + * Get the map of constraints for all constraints on local, temporary + * tables, if any. The map's keys are the constraints' names. + * + * @return the map of constraints, or null + */ + public HashMap getLocalTempTableConstraints() { + if (localTempTableConstraints == null) { + return new HashMap<>(); + } + return localTempTableConstraints; + } + + /** + * Add a local temporary constraint to this session. + * + * @param constraint the constraint to add + * @throws DbException if a constraint with the same name already exists + */ + public void addLocalTempTableConstraint(Constraint constraint) { + if (localTempTableConstraints == null) { + localTempTableConstraints = database.newStringMap(); + } + String name = constraint.getName(); + if (localTempTableConstraints.putIfAbsent(name, constraint) != null) { + throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, constraint.getTraceSQL()); + } + } + + /** + * Drop and remove the given local temporary constraint from this session. + * + * @param constraint the constraint + */ + void removeLocalTempTableConstraint(Constraint constraint) { + if (localTempTableConstraints != null) { + localTempTableConstraints.remove(constraint.getName()); + synchronized (database) { + constraint.removeChildrenAndResources(this); + } + } + } + + @Override + public boolean getAutoCommit() { + return autoCommit; + } + + public User getUser() { + return user; + } + + @Override + public void setAutoCommit(boolean b) { + autoCommit = b; + } + + public int getLockTimeout() { + return lockTimeout; + } + + public void setLockTimeout(int lockTimeout) { + this.lockTimeout = lockTimeout; + if (hasTransaction()) { + transaction.setTimeoutMillis(lockTimeout); + } + } + + @Override + public synchronized CommandInterface prepareCommand(String sql, + int fetchSize) { + return prepareLocal(sql); + } + + /** + * Parse and prepare the given SQL statement. This method also checks the + * rights. + * + * @param sql the SQL statement + * @return the prepared statement + */ + public Prepared prepare(String sql) { + return prepare(sql, false, false); + } + + /** + * Parse and prepare the given SQL statement. + * + * @param sql the SQL statement + * @param rightsChecked true if the rights have already been checked + * @param literalsChecked true if the sql string has already been checked + * for literals (only used if ALLOW_LITERALS NONE is set). + * @return the prepared statement + */ + public Prepared prepare(String sql, boolean rightsChecked, boolean literalsChecked) { + Parser parser = new Parser(this); + parser.setRightsChecked(rightsChecked); + parser.setLiteralsChecked(literalsChecked); + return parser.prepare(sql); + } + + /** + * Parse and prepare the given SQL statement. + * This method also checks if the connection has been closed. + * + * @param sql the SQL statement + * @return the prepared statement + */ + public Command prepareLocal(String sql) { + if (isClosed()) { + throw DbException.get(ErrorCode.CONNECTION_BROKEN_1, + "session closed"); + } + Command command; + if (queryCacheSize > 0) { + if (queryCache == null) { + queryCache = SmallLRUCache.newInstance(queryCacheSize); + modificationMetaID = database.getModificationMetaId(); + } else { + long newModificationMetaID = database.getModificationMetaId(); + if (newModificationMetaID != modificationMetaID) { + queryCache.clear(); + modificationMetaID = newModificationMetaID; + } + command = queryCache.get(sql); + if (command != null && command.canReuse()) { + command.reuse(); + return command; + } + } + } + Parser parser = new Parser(this); + try { + command = parser.prepareCommand(sql); + } finally { + // we can't reuse sub-query indexes, so just drop the whole cache + subQueryIndexCache = null; + } + if (queryCache != null) { + if (command.isCacheable()) { + queryCache.put(sql, command); + } + } + return command; + } + + /** + * Arranges for the specified database object id to be released + * at the end of the current transaction. + * @param id to be scheduled + */ + protected void scheduleDatabaseObjectIdForRelease(int id) { + if (idsToRelease == null) { + idsToRelease = new BitSet(); + } + idsToRelease.set(id); + } + + public Database getDatabase() { + return database; + } + + /** + * Commit the current transaction. If the statement was not a data + * definition statement, and if there are temporary tables that should be + * dropped or truncated at commit, this is done as well. + * + * @param ddl if the statement was a data definition statement + */ + public void commit(boolean ddl) { + beforeCommitOrRollback(); + if (hasTransaction()) { + try { + markUsedTablesAsUpdated(); + transaction.commit(); + removeTemporaryLobs(true); + endTransaction(); + } finally { + transaction = null; + } + if (!ddl) { + // do not clean the temp tables if the last command was a + // create/drop + cleanTempTables(false); + if (autoCommitAtTransactionEnd) { + autoCommit = true; + autoCommitAtTransactionEnd = false; + } + } + analyzeTables(); + } + } + + private void markUsedTablesAsUpdated() { + // TODO should not rely on locking + if (!locks.isEmpty()) { + for (Table t : locks) { + if (t instanceof MVTable) { + ((MVTable) t).commit(); + } + } + } + } + + private void analyzeTables() { + // On rare occasions it can be called concurrently (i.e. from close()) + // without proper locking, but instead of oversynchronizing + // we just skip this optional operation in such case + if (tablesToAnalyze != null && + Thread.holdsLock(this)) { + // take a local copy and clear because in rare cases we can call + // back into markTableForAnalyze while iterating here + HashSet
          tablesToAnalyzeLocal = tablesToAnalyze; + tablesToAnalyze = null; + int rowCount = getDatabase().getSettings().analyzeSample / 10; + for (Table table : tablesToAnalyzeLocal) { + Analyze.analyzeTable(this, table, rowCount, false); + } + // analyze can lock the meta + database.unlockMeta(this); + // table analysis opens a new transaction(s), + // so we need to commit afterwards whatever leftovers might be + commit(true); + } + } + + private void removeTemporaryLobs(boolean onTimeout) { + if (temporaryLobs != null) { + for (ValueLob v : temporaryLobs) { + if (!v.isLinkedToTable()) { + v.remove(); + } + } + temporaryLobs.clear(); + } + if (temporaryResultLobs != null && !temporaryResultLobs.isEmpty()) { + long keepYoungerThan = System.nanoTime() - database.getSettings().lobTimeout * 1_000_000L; + while (!temporaryResultLobs.isEmpty()) { + TimeoutValue tv = temporaryResultLobs.getFirst(); + if (onTimeout && tv.created - keepYoungerThan >= 0) { + break; + } + ValueLob v = temporaryResultLobs.removeFirst().value; + if (!v.isLinkedToTable()) { + v.remove(); + } + } + } + } + + private void beforeCommitOrRollback() { + if (commitOrRollbackDisabled && !locks.isEmpty()) { + throw DbException.get(ErrorCode.COMMIT_ROLLBACK_NOT_ALLOWED); + } + currentTransactionName = null; + currentTimestamp = null; + database.throwLastBackgroundException(); + } + + private void endTransaction() { + if (removeLobMap != null && !removeLobMap.isEmpty()) { + for (ValueLob v : removeLobMap.values()) { + v.remove(); + } + removeLobMap = null; + } + unlockAll(); + if (idsToRelease != null) { + database.releaseDatabaseObjectIds(idsToRelease); + idsToRelease = null; + } + if (hasTransaction() && !transaction.allowNonRepeatableRead()) { + snapshotDataModificationId = database.getNextModificationDataId(); + } + } + + /** + * Returns the data modification id of transaction's snapshot, or 0 if + * isolation level doesn't use snapshots. + * + * @return the data modification id of transaction's snapshot, or 0 + */ + public long getSnapshotDataModificationId() { + return snapshotDataModificationId; + } + + /** + * Fully roll back the current transaction. + */ + public void rollback() { + beforeCommitOrRollback(); + if (hasTransaction()) { + rollbackTo(null); + } + idsToRelease = null; + cleanTempTables(false); + if (autoCommitAtTransactionEnd) { + autoCommit = true; + autoCommitAtTransactionEnd = false; + } + endTransaction(); + } + + /** + * Partially roll back the current transaction. + * + * @param savepoint the savepoint to which should be rolled back + */ + public void rollbackTo(Savepoint savepoint) { + int index = savepoint == null ? 0 : savepoint.logIndex; + if (hasTransaction()) { + markUsedTablesAsUpdated(); + if (savepoint == null) { + transaction.rollback(); + transaction = null; + } else { + transaction.rollbackToSavepoint(savepoint.transactionSavepoint); + } + } + if (savepoints != null) { + String[] names = savepoints.keySet().toArray(new String[0]); + for (String name : names) { + Savepoint sp = savepoints.get(name); + int savepointIndex = sp.logIndex; + if (savepointIndex > index) { + savepoints.remove(name); + } + } + } + + // Because cache may have captured query result (in Query.lastResult), + // which is based on data from uncommitted transaction., + // It is not valid after rollback, therefore cache has to be cleared. + if (queryCache != null) { + queryCache.clear(); + } + } + + @Override + public boolean hasPendingTransaction() { + return hasTransaction() && transaction.hasChanges() && transaction.getStatus() != Transaction.STATUS_PREPARED; + } + + /** + * Create a savepoint to allow rolling back to this state. + * + * @return the savepoint + */ + public Savepoint setSavepoint() { + Savepoint sp = new Savepoint(); + sp.transactionSavepoint = getStatementSavepoint(); + return sp; + } + + public int getId() { + return id; + } + + @Override + public void cancel() { + cancelAtNs = Utils.currentNanoTime(); + } + + /** + * Cancel the transaction and close the session if needed. + */ + void suspend() { + cancel(); + if (transitionToState(State.SUSPENDED, false) == State.SLEEP) { + close(); + } + } + + @Override + public void close() { + // this is the only operation that can be invoked concurrently + // so, we should prevent double-closure + if (state.getAndSet(State.CLOSED) != State.CLOSED) { + try { + database.throwLastBackgroundException(); + + database.checkPowerOff(); + + // release any open table locks + if (hasPreparedTransaction()) { + if (currentTransactionName != null) { + removeLobMap = null; + } + endTransaction(); + } else { + rollback(); + removeTemporaryLobs(false); + cleanTempTables(true); + commit(true); // temp table removal may have opened new transaction + } + + // Table#removeChildrenAndResources can take the meta lock, + // and we need to unlock before we call removeSession(), which might + // want to take the meta lock using the system session. + database.unlockMeta(this); + } finally { + database.removeSession(this); + } + } + } + + /** + * Register table as locked within current transaction. + * Table is unlocked on commit or rollback. + * It also assumes that table will be modified by transaction. + * + * @param table the table that is locked + */ + public void registerTableAsLocked(Table table) { + if (SysProperties.CHECK) { + if (locks.contains(table)) { + throw DbException.getInternalError(table.toString()); + } + } + locks.add(table); + } + + /** + * Register table as updated within current transaction. + * This is used instead of table locking when lock mode is LOCK_MODE_OFF. + * + * @param table to register + */ + public void registerTableAsUpdated(Table table) { + if (!locks.contains(table)) { + locks.add(table); + } + } + + /** + * Unlock just this table. + * + * @param t the table to unlock + */ + void unlock(Table t) { + locks.remove(t); + } + + + private boolean hasTransaction() { + return transaction != null; + } + + private void unlockAll() { + if (!locks.isEmpty()) { + Table[] array = locks.toArray(new Table[0]); + for (Table t : array) { + if (t != null) { + t.unlock(this); + } + } + locks.clear(); + } + Database.unlockMetaDebug(this); + savepoints = null; + sessionStateChanged = true; + } + + private void cleanTempTables(boolean closeSession) { + if (localTempTables != null && !localTempTables.isEmpty()) { + Iterator
          it = localTempTables.values().iterator(); + while (it.hasNext()) { + Table table = it.next(); + if (closeSession || table.getOnCommitDrop()) { + modificationId++; + table.setModified(); + it.remove(); + // Exception thrown in org.h2.engine.Database.removeMeta + // if line below is missing with TestDeadlock + database.lockMeta(this); + table.removeChildrenAndResources(this); + if (closeSession) { + database.throwLastBackgroundException(); + } + } else if (table.getOnCommitTruncate()) { + table.truncate(this); + } + } + } + } + + public Random getRandom() { + if (random == null) { + random = new Random(); + } + return random; + } + + @Override + public Trace getTrace() { + if (trace != null && !isClosed()) { + return trace; + } + String traceModuleName = "jdbc[" + id + "]"; + if (isClosed()) { + return new TraceSystem(null).getTrace(traceModuleName); + } + trace = database.getTraceSystem().getTrace(traceModuleName); + return trace; + } + + /** + * Returns the next value of the sequence in this session. + * + * @param sequence + * the sequence + * @param prepared + * current prepared command, select, or {@code null} + * @return the next value of the sequence in this session + */ + public Value getNextValueFor(Sequence sequence, Prepared prepared) { + Value value; + Mode mode = database.getMode(); + if (mode.nextValueReturnsDifferentValues || prepared == null) { + value = sequence.getNext(this); + } else { + if (nextValueFor == null) { + nextValueFor = new HashMap<>(); + } + SequenceAndPrepared key = new SequenceAndPrepared(sequence, prepared); + RowNumberAndValue data = nextValueFor.get(key); + long rowNumber = prepared.getCurrentRowNumber(); + if (data != null) { + if (data.rowNumber == rowNumber) { + value = data.nextValue; + } else { + data.nextValue = value = sequence.getNext(this); + data.rowNumber = rowNumber; + } + } else { + value = sequence.getNext(this); + nextValueFor.put(key, new RowNumberAndValue(rowNumber, value)); + } + } + WeakHashMap currentValueFor = this.currentValueFor; + if (currentValueFor == null) { + this.currentValueFor = currentValueFor = new WeakHashMap<>(); + } + currentValueFor.put(sequence, value); + if (mode.takeGeneratedSequenceValue) { + lastIdentity = value; + } + return value; + } + + /** + * Returns the current value of the sequence in this session. + * + * @param sequence + * the sequence + * @return the current value of the sequence in this session + * @throws DbException + * if current value is not defined + */ + public Value getCurrentValueFor(Sequence sequence) { + WeakHashMap currentValueFor = this.currentValueFor; + if (currentValueFor != null) { + Value value = currentValueFor.get(sequence); + if (value != null) { + return value; + } + } + throw DbException.get(ErrorCode.CURRENT_SEQUENCE_VALUE_IS_NOT_DEFINED_IN_SESSION_1, sequence.getTraceSQL()); + } + + public void setLastIdentity(Value last) { + this.lastIdentity = last; + } + + public Value getLastIdentity() { + return lastIdentity; + } + + /** + * Whether the session contains any uncommitted changes. + * + * @return true if yes + */ + public boolean containsUncommitted() { + return transaction != null && transaction.hasChanges(); + } + + /** + * Create a savepoint that is linked to the current log position. + * + * @param name the savepoint name + */ + public void addSavepoint(String name) { + if (savepoints == null) { + savepoints = database.newStringMap(); + } + savepoints.put(name, setSavepoint()); + } + + /** + * Undo all operations back to the log position of the given savepoint. + * + * @param name the savepoint name + */ + public void rollbackToSavepoint(String name) { + beforeCommitOrRollback(); + Savepoint savepoint; + if (savepoints == null || (savepoint = savepoints.get(name)) == null) { + throw DbException.get(ErrorCode.SAVEPOINT_IS_INVALID_1, name); + } + rollbackTo(savepoint); + } + + /** + * Prepare the given transaction. + * + * @param transactionName the name of the transaction + */ + public void prepareCommit(String transactionName) { + if (hasPendingTransaction()) { + // need to commit even if rollback is not possible (create/drop + // table and so on) + database.prepareCommit(this, transactionName); + } + currentTransactionName = transactionName; + } + + /** + * Checks presence of prepared transaction in this session. + * + * @return {@code true} if there is a prepared transaction, + * {@code false} otherwise + */ + public boolean hasPreparedTransaction() { + return currentTransactionName != null; + } + + /** + * Commit or roll back the given transaction. + * + * @param transactionName the name of the transaction + * @param commit true for commit, false for rollback + */ + public void setPreparedTransaction(String transactionName, boolean commit) { + if (hasPreparedTransaction() && currentTransactionName.equals(transactionName)) { + if (commit) { + commit(false); + } else { + rollback(); + } + } else { + ArrayList list = database.getInDoubtTransactions(); + int state = commit ? InDoubtTransaction.COMMIT : InDoubtTransaction.ROLLBACK; + boolean found = false; + for (InDoubtTransaction p: list) { + if (p.getTransactionName().equals(transactionName)) { + p.setState(state); + found = true; + break; + } + } + if (!found) { + throw DbException.get(ErrorCode.TRANSACTION_NOT_FOUND_1, + transactionName); + } + } + } + + @Override + public boolean isClosed() { + return state.get() == State.CLOSED; + } + + public boolean isOpen() { + State current = state.get(); + checkSuspended(current); + return current != State.CLOSED; + } + + public void setThrottle(int throttle) { + this.throttleMs = throttle; + } + + /** + * Wait for some time if this session is throttled (slowed down). + */ + public void throttle() { + if (throttleMs == 0) { + return; + } + long time = System.nanoTime(); + if (lastThrottleNs != 0L && time - lastThrottleNs < Constants.THROTTLE_DELAY * 1_000_000L) { + return; + } + lastThrottleNs = Utils.nanoTimePlusMillis(time, throttleMs); + State prevState = transitionToState(State.THROTTLED, false); + try { + Thread.sleep(throttleMs); + } catch (InterruptedException ignore) { + } finally { + transitionToState(prevState, false); + } + } + + /** + * Set the current command of this session. This is done just before + * executing the statement. + * + * @param command the command + */ + private void setCurrentCommand(Command command) { + State targetState = command == null ? State.SLEEP : State.RUNNING; + transitionToState(targetState, true); + if (isOpen()) { + currentCommand = command; + commandStartOrEnd = Instant.now(); + if (command != null) { + if (queryTimeout > 0) { + cancelAtNs = Utils.currentNanoTimePlusMillis(queryTimeout); + } + } else { + if (currentTimestamp != null && !database.getMode().dateTimeValueWithinTransaction) { + currentTimestamp = null; + } + if (nextValueFor != null) { + nextValueFor.clear(); + } + } + } + } + + private State transitionToState(State targetState, boolean checkSuspended) { + State currentState; + while((currentState = state.get()) != State.CLOSED && + (!checkSuspended || checkSuspended(currentState)) && + !state.compareAndSet(currentState, targetState)) {/**/} + return currentState; + } + + private boolean checkSuspended(State currentState) { + if (currentState == State.SUSPENDED) { + close(); + throw DbException.get(ErrorCode.DATABASE_IS_IN_EXCLUSIVE_MODE); + } + return true; + } + + /** + * Check if the current transaction is canceled by calling + * Statement.cancel() or because a session timeout was set and expired. + * + * @throws DbException if the transaction is canceled + */ + public void checkCanceled() { + throttle(); + long cancel = cancelAtNs; + if (cancel == 0L) { + return; + } + if (System.nanoTime() - cancel >= 0L) { + cancelAtNs = 0L; + throw DbException.get(ErrorCode.STATEMENT_WAS_CANCELED); + } + } + + /** + * Get the cancel time. + * + * @return the time or 0 if not set + */ + public long getCancel() { + return cancelAtNs; + } + + public Command getCurrentCommand() { + return currentCommand; + } + + public ValueTimestampTimeZone getCommandStartOrEnd() { + return DateTimeUtils.currentTimestamp(timeZone, commandStartOrEnd); + } + + public boolean getAllowLiterals() { + return allowLiterals; + } + + public void setAllowLiterals(boolean b) { + this.allowLiterals = b; + } + + public void setCurrentSchema(Schema schema) { + modificationId++; + if (queryCache != null) { + queryCache.clear(); + } + this.currentSchemaName = schema.getName(); + } + + @Override + public String getCurrentSchemaName() { + return currentSchemaName; + } + + @Override + public void setCurrentSchemaName(String schemaName) { + Schema schema = database.getSchema(schemaName); + setCurrentSchema(schema); + } + + /** + * Create an internal connection. This connection is used when initializing + * triggers, and when calling user defined functions. + * + * @param columnList if the url should be 'jdbc:columnlist:connection' + * @return the internal connection + */ + public JdbcConnection createConnection(boolean columnList) { + String url; + if (columnList) { + url = Constants.CONN_URL_COLUMNLIST; + } else { + url = Constants.CONN_URL_INTERNAL; + } + return new JdbcConnection(this, getUser().getName(), url); + } + + @Override + public DataHandler getDataHandler() { + return database; + } + + /** + * Remember that the given LOB value must be removed at commit. + * + * @param v the value + */ + public void removeAtCommit(ValueLob v) { + if (v.isLinkedToTable()) { + if (removeLobMap == null) { + removeLobMap = new HashMap<>(); + } + removeLobMap.put(v.toString(), v); + } + } + + /** + * Do not remove this LOB value at commit any longer. + * + * @param v the value + */ + public void removeAtCommitStop(ValueLob v) { + if (v.isLinkedToTable() && removeLobMap != null) { + removeLobMap.remove(v.toString()); + } + } + + /** + * Get the next system generated identifiers. The identifier returned does + * not occur within the given SQL statement. + * + * @param sql the SQL statement + * @return the new identifier + */ + public String getNextSystemIdentifier(String sql) { + String identifier; + do { + identifier = SYSTEM_IDENTIFIER_PREFIX + systemIdentifier++; + } while (sql.contains(identifier)); + return identifier; + } + + /** + * Add a procedure to this session. + * + * @param procedure the procedure to add + */ + public void addProcedure(Procedure procedure) { + if (procedures == null) { + procedures = database.newStringMap(); + } + procedures.put(procedure.getName(), procedure); + } + + /** + * Remove a procedure from this session. + * + * @param name the name of the procedure to remove + */ + public void removeProcedure(String name) { + if (procedures != null) { + procedures.remove(name); + } + } + + /** + * Get the procedure with the given name, or null + * if none exists. + * + * @param name the procedure name + * @return the procedure or null + */ + public Procedure getProcedure(String name) { + if (procedures == null) { + return null; + } + return procedures.get(name); + } + + public void setSchemaSearchPath(String[] schemas) { + modificationId++; + this.schemaSearchPath = schemas; + } + + public String[] getSchemaSearchPath() { + return schemaSearchPath; + } + + @Override + public int hashCode() { + return serialId; + } + + @Override + public String toString() { + return "#" + serialId + " (user: " + (user == null ? "" : user.getName()) + ", " + state.get() + ")"; + } + + /** + * Begin a transaction. + */ + public void begin() { + autoCommitAtTransactionEnd = true; + autoCommit = false; + } + + public ValueTimestampTimeZone getSessionStart() { + return sessionStart; + } + + public Set
          getLocks() { + /* + * This implementation needs to be lock-free. + */ + if (database.getLockMode() == Constants.LOCK_MODE_OFF || locks.isEmpty()) { + return Collections.emptySet(); + } + /* + * Do not use ArrayList.toArray(T[]) here, its implementation is not + * thread-safe. + */ + Object[] array = locks.toArray(); + /* + * The returned array may contain null elements and may contain + * duplicates due to concurrent remove(). + */ + switch (array.length) { + case 1: { + Object table = array[0]; + if (table != null) { + return Collections.singleton((Table) table); + } + } + //$FALL-THROUGH$ + case 0: + return Collections.emptySet(); + default: { + HashSet
          set = new HashSet<>(); + for (Object table : array) { + if (table != null) { + set.add((Table) table); + } + } + return set; + } + } + } + + /** + * Wait if the exclusive mode has been enabled for another session. This + * method returns as soon as the exclusive mode has been disabled. + */ + public void waitIfExclusiveModeEnabled() { + transitionToState(State.RUNNING, true); + // Even in exclusive mode, we have to let the LOB session proceed, or we + // will get deadlocks. + if (database.getLobSession() == this) { + return; + } + while (isOpen()) { + SessionLocal exclusive = database.getExclusiveSession(); + if (exclusive == null || exclusive == this) { + break; + } + if (Thread.holdsLock(exclusive)) { + // if another connection is used within the connection + break; + } + try { + Thread.sleep(100); + } catch (InterruptedException e) { + // ignore + } + } + } + + /** + * Get the view cache for this session. There are two caches: the subquery + * cache (which is only use for a single query, has no bounds, and is + * cleared after use), and the cache for regular views. + * + * @param subQuery true to get the subquery cache + * @return the view cache + */ + public Map getViewIndexCache(boolean subQuery) { + if (subQuery) { + // for sub-queries we don't need to use LRU because the cache should + // not grow too large for a single query (we drop the whole cache in + // the end of prepareLocal) + if (subQueryIndexCache == null) { + subQueryIndexCache = new HashMap<>(); + } + return subQueryIndexCache; + } + SmallLRUCache cache = viewIndexCache; + if (cache == null) { + viewIndexCache = cache = SmallLRUCache.newInstance(Constants.VIEW_INDEX_CACHE_SIZE); + } + return cache; + } + + public void setQueryTimeout(int queryTimeout) { + int max = database.getSettings().maxQueryTimeout; + if (max != 0 && (max < queryTimeout || queryTimeout == 0)) { + // the value must be at most max + queryTimeout = max; + } + this.queryTimeout = queryTimeout; + // must reset the cancel at here, + // otherwise it is still used + cancelAtNs = 0L; + } + + public int getQueryTimeout() { + return queryTimeout; + } + + /** + * Set the table this session is waiting for, and the thread that is + * waiting. + * + * @param waitForLock the table + * @param waitForLockThread the current thread (the one that is waiting) + */ + public void setWaitForLock(Table waitForLock, Thread waitForLockThread) { + this.waitForLock = waitForLock; + this.waitForLockThread = waitForLockThread; + } + + public Table getWaitForLock() { + return waitForLock; + } + + public Thread getWaitForLockThread() { + return waitForLockThread; + } + + public int getModificationId() { + return modificationId; + } + + public Value getTransactionId() { + if (transaction == null || !transaction.hasChanges()) { + return ValueNull.INSTANCE; + } + return ValueVarchar.get(Long.toString(transaction.getSequenceNum())); + } + + /** + * Get the next object id. + * + * @return the next object id + */ + public int nextObjectId() { + return objectId++; + } + + /** + * Get the transaction to use for this session. + * + * @return the transaction + */ + public Transaction getTransaction() { + if (transaction == null) { + Store store = database.getStore(); + if (store.getMvStore().isClosed()) { + Throwable backgroundException = database.getBackgroundException(); + database.shutdownImmediately(); + throw DbException.get(ErrorCode.DATABASE_IS_CLOSED, backgroundException); + } + transaction = store.getTransactionStore().begin(this, this.lockTimeout, id, isolationLevel); + startStatement = -1; + } + return transaction; + } + + private long getStatementSavepoint() { + if (startStatement == -1) { + startStatement = getTransaction().setSavepoint(); + } + return startStatement; + } + + /** + * Start a new statement within a transaction. + * @param command about to be started + */ + @SuppressWarnings("incomplete-switch") + public void startStatementWithinTransaction(Command command) { + Transaction transaction = getTransaction(); + if (transaction != null) { + HashSet>> maps = new HashSet<>(); + if (command != null) { + Set dependencies = command.getDependencies(); + switch (transaction.getIsolationLevel()) { + case SNAPSHOT: + case SERIALIZABLE: + if (!transaction.hasStatementDependencies()) { + for (Schema schema : database.getAllSchemasNoMeta()) { + for (Table table : schema.getAllTablesAndViews(null)) { + if (table instanceof MVTable) { + addTableToDependencies((MVTable)table, maps); + } + } + } + break; + } + //$FALL-THROUGH$ + case READ_COMMITTED: + case READ_UNCOMMITTED: + for (DbObject dependency : dependencies) { + if (dependency instanceof MVTable) { + addTableToDependencies((MVTable)dependency, maps); + } + } + break; + case REPEATABLE_READ: + HashSet processed = new HashSet<>(); + for (DbObject dependency : dependencies) { + if (dependency instanceof MVTable) { + addTableToDependencies((MVTable)dependency, maps, processed); + } + } + break; + } + } + transaction.markStatementStart(maps); + } + startStatement = -1; + if (command != null) { + setCurrentCommand(command); + } + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + private static void addTableToDependencies(MVTable table, HashSet>> maps) { + for (Index index : table.getIndexes()) { + if (index instanceof MVIndex) { + maps.add(((MVIndex) index).getMVMap()); + } + } + } + + private static void addTableToDependencies(MVTable table, HashSet>> maps, + HashSet processed) { + if (!processed.add(table)) { + return; + } + addTableToDependencies(table, maps); + ArrayList constraints = table.getConstraints(); + if (constraints != null) { + for (Constraint constraint : constraints) { + Table ref = constraint.getTable(); + if (ref != table && ref instanceof MVTable) { + addTableToDependencies((MVTable) ref, maps, processed); + } + } + } + } + + /** + * Mark the statement as completed. This also close all temporary result + * set, and deletes all temporary files held by the result sets. + */ + public void endStatement() { + setCurrentCommand(null); + if (hasTransaction()) { + transaction.markStatementEnd(); + } + startStatement = -1; + } + + /** + * Clear the view cache for this session. + */ + public void clearViewIndexCache() { + viewIndexCache = null; + } + + @Override + public ValueLob addTemporaryLob(ValueLob v) { + LobData lobData = v.getLobData(); + if (lobData instanceof LobDataInMemory) { + return v; + } + int tableId = ((LobDataDatabase) lobData).getTableId(); + if (tableId == LobStorageFrontend.TABLE_RESULT || tableId == LobStorageFrontend.TABLE_TEMP) { + if (temporaryResultLobs == null) { + temporaryResultLobs = new LinkedList<>(); + } + temporaryResultLobs.add(new TimeoutValue(v)); + } else { + if (temporaryLobs == null) { + temporaryLobs = new ArrayList<>(); + } + temporaryLobs.add(v); + } + return v; + } + + @Override + public boolean isRemote() { + return false; + } + + /** + * Mark that the given table needs to be analyzed on commit. + * + * @param table the table + */ + public void markTableForAnalyze(Table table) { + if (tablesToAnalyze == null) { + tablesToAnalyze = new HashSet<>(); + } + tablesToAnalyze.add(table); + } + + public State getState() { + return getBlockingSessionId() != 0 ? State.BLOCKED : state.get(); + } + + public int getBlockingSessionId() { + return transaction == null ? 0 : transaction.getBlockerId(); + } + + @Override + public void onRollback(MVMap> map, Object key, + VersionedValue existingValue, + VersionedValue restoredValue) { + // Here we are relying on the fact that map which backs table's primary index + // has the same name as the table itself + Store store = database.getStore(); + MVTable table = store.getTable(map.getName()); + if (table != null) { + Row oldRow = existingValue == null ? null : (Row) existingValue.getCurrentValue(); + Row newRow = restoredValue == null ? null : (Row) restoredValue.getCurrentValue(); + table.fireAfterRow(this, oldRow, newRow, true); + + if (table.getContainsLargeObject()) { + if (oldRow != null) { + for (int i = 0, len = oldRow.getColumnCount(); i < len; i++) { + Value v = oldRow.getValue(i); + if (v instanceof ValueLob) { + removeAtCommit((ValueLob) v); + } + } + } + if (newRow != null) { + for (int i = 0, len = newRow.getColumnCount(); i < len; i++) { + Value v = newRow.getValue(i); + if (v instanceof ValueLob) { + removeAtCommitStop((ValueLob) v); + } + } + } + } + } + } + + /** + * Represents a savepoint (a position in a transaction to where one can roll + * back to). + */ + public static class Savepoint { + + /** + * The undo log index. + */ + int logIndex; + + /** + * The transaction savepoint id. + */ + long transactionSavepoint; + } + + /** + * An LOB object with a timeout. + */ + public static class TimeoutValue { + + /** + * The time when this object was created. + */ + final long created = System.nanoTime(); + + /** + * The value. + */ + final ValueLob value; + + TimeoutValue(ValueLob v) { + this.value = v; + } + + } + + /** + * Returns the network connection information, or {@code null}. + * + * @return the network connection information, or {@code null} + */ + public NetworkConnectionInfo getNetworkConnectionInfo() { + return networkConnectionInfo; + } + + @Override + public void setNetworkConnectionInfo(NetworkConnectionInfo networkConnectionInfo) { + this.networkConnectionInfo = networkConnectionInfo; + } + + @Override + public ValueTimestampTimeZone currentTimestamp() { + ValueTimestampTimeZone ts = currentTimestamp; + if (ts == null) { + currentTimestamp = ts = DateTimeUtils.currentTimestamp(timeZone, commandStartOrEnd); + } + return ts; + } + + @Override + public Mode getMode() { + return database.getMode(); + } + + @Override + public JavaObjectSerializer getJavaObjectSerializer() { + return database.getJavaObjectSerializer(); + } + + @Override + public IsolationLevel getIsolationLevel() { + return isolationLevel; + } + + @Override + public void setIsolationLevel(IsolationLevel isolationLevel) { + commit(false); + this.isolationLevel = isolationLevel; + } + + /** + * Gets bit set of non-keywords. + * + * @return set of non-keywords, or {@code null} + */ + public BitSet getNonKeywords() { + return nonKeywords; + } + + /** + * Sets bit set of non-keywords. + * + * @param nonKeywords set of non-keywords, or {@code null} + */ + public void setNonKeywords(BitSet nonKeywords) { + this.nonKeywords = nonKeywords; + } + + @Override + public StaticSettings getStaticSettings() { + StaticSettings settings = staticSettings; + if (settings == null) { + DbSettings dbSettings = database.getSettings(); + staticSettings = settings = new StaticSettings(dbSettings.databaseToUpper, dbSettings.databaseToLower, + dbSettings.caseInsensitiveIdentifiers); + } + return settings; + } + + @Override + public DynamicSettings getDynamicSettings() { + return new DynamicSettings(database.getMode(), timeZone); + } + + @Override + public TimeZoneProvider currentTimeZone() { + return timeZone; + } + + /** + * Sets current time zone. + * + * @param timeZone time zone + */ + public void setTimeZone(TimeZoneProvider timeZone) { + if (!timeZone.equals(this.timeZone)) { + this.timeZone = timeZone; + ValueTimestampTimeZone ts = currentTimestamp; + if (ts != null) { + long dateValue = ts.getDateValue(); + long timeNanos = ts.getTimeNanos(); + int offsetSeconds = ts.getTimeZoneOffsetSeconds(); + currentTimestamp = DateTimeUtils.timestampTimeZoneAtOffset(dateValue, timeNanos, offsetSeconds, // + timeZone.getTimeZoneOffsetUTC( + DateTimeUtils.getEpochSeconds(dateValue, timeNanos, offsetSeconds))); + } + modificationId++; + } + } + + /** + * Check if two values are equal with the current comparison mode. + * + * @param a the first value + * @param b the second value + * @return true if both objects are equal + */ + public boolean areEqual(Value a, Value b) { + // can not use equals because ValueDecimal 0.0 is not equal to 0.00. + return a.compareTo(b, this, database.getCompareMode()) == 0; + } + + /** + * Compare two values with the current comparison mode. The values may have + * different data types including NULL. + * + * @param a the first value + * @param b the second value + * @return 0 if both values are equal, -1 if the first value is smaller, and + * 1 otherwise + */ + public int compare(Value a, Value b) { + return a.compareTo(b, this, database.getCompareMode()); + } + + /** + * Compare two values with the current comparison mode. The values may have + * different data types including NULL. + * + * @param a the first value + * @param b the second value + * @param forEquality perform only check for equality (= or <>) + * @return 0 if both values are equal, -1 if the first value is smaller, 1 + * if the second value is larger, {@link Integer#MIN_VALUE} if order + * is not defined due to NULL comparison + */ + public int compareWithNull(Value a, Value b, boolean forEquality) { + return a.compareWithNull(b, forEquality, this, database.getCompareMode()); + } + + /** + * Compare two values with the current comparison mode. The values must be + * of the same type. + * + * @param a the first value + * @param b the second value + * @return 0 if both values are equal, -1 if the first value is smaller, and + * 1 otherwise + */ + public int compareTypeSafe(Value a, Value b) { + return a.compareTypeSafe(b, database.getCompareMode(), this); + } + + /** + * Changes parsing mode of data types with too large length. + * + * @param truncateLargeLength + * {@code true} to truncate to valid bound, {@code false} to + * throw an exception + */ + public void setTruncateLargeLength(boolean truncateLargeLength) { + this.truncateLargeLength = truncateLargeLength; + } + + /** + * Returns parsing mode of data types with too large length. + * + * @return {@code true} if large length is truncated, {@code false} if an + * exception is thrown + */ + public boolean isTruncateLargeLength() { + return truncateLargeLength; + } + + /** + * Changes parsing of a BINARY data type. + * + * @param variableBinary + * {@code true} to parse BINARY as VARBINARY, {@code false} to + * parse it as is + */ + public void setVariableBinary(boolean variableBinary) { + this.variableBinary = variableBinary; + } + + /** + * Returns BINARY data type parsing mode. + * + * @return {@code true} if BINARY should be parsed as VARBINARY, + * {@code false} if it should be parsed as is + */ + public boolean isVariableBinary() { + return variableBinary; + } + + /** + * Changes INFORMATION_SCHEMA content. + * + * @param oldInformationSchema + * {@code true} to have old-style tables in INFORMATION_SCHEMA, + * {@code false} to have modern tables + */ + public void setOldInformationSchema(boolean oldInformationSchema) { + this.oldInformationSchema = oldInformationSchema; + } + + @Override + public boolean isOldInformationSchema() { + return oldInformationSchema; + } + + @Override + public DatabaseMeta getDatabaseMeta() { + return new DatabaseMetaLocal(this); + } + + @Override + public boolean zeroBasedEnums() { + return database.zeroBasedEnums(); + } + + /** + * Enables or disables the quirks mode. + * + * @param quirksMode + * whether quirks mode should be enabled + */ + public void setQuirksMode(boolean quirksMode) { + this.quirksMode = quirksMode; + } + + /** + * Returns whether quirks mode is enabled explicitly or implicitly. + * + * @return {@code true} if database is starting or quirks mode was enabled + * explicitly, {@code false} otherwise + */ + public boolean isQuirksMode() { + return quirksMode || database.isStarting(); + } + + @Override + public Session setThreadLocalSession() { + Session oldSession = THREAD_LOCAL_SESSION.get(); + THREAD_LOCAL_SESSION.set(this); + return oldSession; + } + + @Override + public void resetThreadLocalSession(Session oldSession) { + if (oldSession == null) { + THREAD_LOCAL_SESSION.remove(); + } else { + THREAD_LOCAL_SESSION.set(oldSession); + } + } + +} diff --git a/h2/src/main/org/h2/engine/SessionRemote.java b/h2/src/main/org/h2/engine/SessionRemote.java index 284396ef9c..6045e111c1 100644 --- a/h2/src/main/org/h2/engine/SessionRemote.java +++ b/h2/src/main/org/h2/engine/SessionRemote.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; @@ -9,14 +9,18 @@ import java.net.Socket; import java.sql.SQLException; import java.util.ArrayList; - import org.h2.api.DatabaseEventListener; import org.h2.api.ErrorCode; import org.h2.api.JavaObjectSerializer; import org.h2.command.CommandInterface; import org.h2.command.CommandRemote; import org.h2.command.dml.SetTypes; +import org.h2.engine.Mode.ModeEnum; +import org.h2.expression.ParameterInterface; import org.h2.jdbc.JdbcException; +import org.h2.jdbc.meta.DatabaseMeta; +import org.h2.jdbc.meta.DatabaseMetaLegacy; +import org.h2.jdbc.meta.DatabaseMetaRemote; import org.h2.message.DbException; import org.h2.message.Trace; import org.h2.message.TraceSystem; @@ -24,24 +28,30 @@ import org.h2.store.DataHandler; import org.h2.store.FileStore; import org.h2.store.LobStorageFrontend; -import org.h2.store.LobStorageInterface; import org.h2.store.fs.FileUtils; +import org.h2.util.DateTimeUtils; import org.h2.util.JdbcUtils; import org.h2.util.MathUtils; import org.h2.util.NetUtils; +import org.h2.util.NetworkConnectionInfo; import org.h2.util.SmallLRUCache; import org.h2.util.StringUtils; import org.h2.util.TempFileDeleter; +import org.h2.util.TimeZoneProvider; import org.h2.util.Utils; import org.h2.value.CompareMode; import org.h2.value.Transfer; import org.h2.value.Value; +import org.h2.value.ValueInteger; +import org.h2.value.ValueLob; +import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueVarchar; /** * The client side part of a session when using the server mode. This object * communicates with a Session on the server side. */ -public class SessionRemote extends SessionWithState implements DataHandler { +public final class SessionRemote extends Session implements DataHandler { public static final int SESSION_PREPARE = 0; public static final int SESSION_CLOSE = 1; @@ -54,7 +64,7 @@ public class SessionRemote extends SessionWithState implements DataHandler { public static final int COMMAND_COMMIT = 8; public static final int CHANGE_ID = 9; public static final int COMMAND_GET_META_DATA = 10; - public static final int SESSION_PREPARE_READ_PARAMS = 11; + // 11 was used for SESSION_PREPARE_READ_PARAMS public static final int SESSION_SET_ID = 12; public static final int SESSION_CANCEL_STATEMENT = 13; public static final int SESSION_CHECK_KEY = 14; @@ -62,14 +72,13 @@ public class SessionRemote extends SessionWithState implements DataHandler { public static final int SESSION_HAS_PENDING_TRANSACTION = 16; public static final int LOB_READ = 17; public static final int SESSION_PREPARE_READ_PARAMS2 = 18; + public static final int GET_JDBC_META = 19; public static final int STATUS_ERROR = 0; public static final int STATUS_OK = 1; public static final int STATUS_CLOSED = 2; public static final int STATUS_OK_STATE_CHANGED = 3; - private static SessionFactory sessionFactory; - private TraceSystem traceSystem; private Trace trace; private ArrayList transferList = Utils.newSmallArrayList(); @@ -84,19 +93,25 @@ public class SessionRemote extends SessionWithState implements DataHandler { private int clientVersion; private boolean autoReconnect; private int lastReconnect; - private SessionInterface embedded; + private Session embedded; private DatabaseEventListener eventListener; private LobStorageFrontend lobStorage; private boolean cluster; private TempFileDeleter tempFileDeleter; private JavaObjectSerializer javaObjectSerializer; - private volatile boolean javaObjectSerializerInitialized; private final CompareMode compareMode = CompareMode.getInstance(null, 0); + private final boolean oldInformationSchema; + + private String currentSchemaName; + + private volatile DynamicSettings dynamicSettings; + public SessionRemote(ConnectionInfo ci) { this.connectionInfo = ci; + oldInformationSchema = ci.getProperty("OLD_INFORMATION_SCHEMA", false); } @Override @@ -112,8 +127,8 @@ public ArrayList getClusterServers() { private Transfer initTransfer(ConnectionInfo ci, String db, String server) throws IOException { - Socket socket = NetUtils.createSocket(server, - Constants.DEFAULT_TCP_PORT, ci.isSSL()); + Socket socket = NetUtils.createSocket(server, Constants.DEFAULT_TCP_PORT, ci.isSSL(), + ci.getProperty("NETWORK_TIMEOUT", 0)); Transfer trans = new Transfer(this, socket); trans.setSSL(ci.isSSL()); trans.init(); @@ -133,19 +148,20 @@ private Transfer initTransfer(ConnectionInfo ci, String db, String server) done(trans); clientVersion = trans.readInt(); trans.setVersion(clientVersion); - if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_14) { - if (ci.getFileEncryptionKey() != null) { - trans.writeBytes(ci.getFileEncryptionKey()); - } + if (ci.getFileEncryptionKey() != null) { + trans.writeBytes(ci.getFileEncryptionKey()); } trans.writeInt(SessionRemote.SESSION_SET_ID); trans.writeString(sessionId); - done(trans); - if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_15) { - autoCommit = trans.readBoolean(); - } else { - autoCommit = true; + if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_20) { + TimeZoneProvider timeZone = ci.getTimeZone(); + if (timeZone == null) { + timeZone = DateTimeUtils.getTimeZone(); + } + trans.writeString(timeZone.getId()); } + done(trans); + autoCommit = trans.readBoolean(); return trans; } catch (DbException e) { trans.close(); @@ -155,9 +171,6 @@ private Transfer initTransfer(ConnectionInfo ci, String db, String server) @Override public boolean hasPendingTransaction() { - if (clientVersion < Constants.TCP_PROTOCOL_VERSION_10) { - return true; - } for (int i = 0, count = 0; i < transferList.size(); i++) { Transfer transfer = transferList.get(i); try { @@ -210,13 +223,18 @@ private void checkClusterDisableAutoCommit(String serverList) { CommandInterface c = prepareCommand( "SET CLUSTER " + serverList, Integer.MAX_VALUE); // this will set autoCommit to false - c.executeUpdate(false); + c.executeUpdate(null); // so we need to switch it on autoCommit = true; cluster = true; } } + /** + * Returns the TCP protocol version of remote connection. + * + * @return the TCP protocol version + */ public int getClientVersion() { return clientVersion; } @@ -295,30 +313,18 @@ private String getFilePrefix(String dir) { return buff.toString(); } - @Override - public int getPowerOffCount() { - return 0; - } - - @Override - public void setPowerOffCount(int count) { - throw DbException.getUnsupportedException("remote"); - } - /** * Open a new (remote or embedded) session. * * @param openNew whether to open a new session in any case * @return the session */ - public SessionInterface connectEmbeddedOrServer(boolean openNew) { + public Session connectEmbeddedOrServer(boolean openNew) { ConnectionInfo ci = connectionInfo; if (ci.isRemote()) { connectServer(ci); return this; } - // create the session using reflection, - // so that the JDBC layer can be compiled without it boolean autoServerMode = ci.getProperty("AUTO_SERVER", false); ConnectionInfo backup = null; try { @@ -329,11 +335,7 @@ public SessionInterface connectEmbeddedOrServer(boolean openNew) { if (openNew) { ci.setProperty("OPEN_NEW", "true"); } - if (sessionFactory == null) { - sessionFactory = (SessionFactory) Class.forName( - "org.h2.engine.Engine").getMethod("getInstance").invoke(null); - } - return sessionFactory.createSession(ci); + return Engine.createSession(ci); } catch (Exception re) { DbException e = DbException.convert(re); if (e.getErrorCode() == ErrorCode.DATABASE_ALREADY_OPEN_1) { @@ -446,11 +448,12 @@ private void connectServer(ConnectionInfo ci) { traceSystem.close(); throw e; } + getDynamicSettings(); } private void switchOffCluster() { CommandInterface ci = prepareCommand("SET CLUSTER ''", Integer.MAX_VALUE); - ci.executeUpdate(false); + ci.executeUpdate(null); } /** @@ -598,28 +601,45 @@ public int getCurrentId() { public void done(Transfer transfer) throws IOException { transfer.flush(); int status = transfer.readInt(); - if (status == STATUS_ERROR) { - String sqlstate = transfer.readString(); - String message = transfer.readString(); - String sql = transfer.readString(); - int errorCode = transfer.readInt(); - String stackTrace = transfer.readString(); - SQLException s = DbException.getJdbcSQLException(message, sql, sqlstate, errorCode, null, stackTrace); - if (errorCode == ErrorCode.CONNECTION_BROKEN_1) { - // allow re-connect - throw new IOException(s.toString(), s); - } - throw DbException.convert(s); - } else if (status == STATUS_CLOSED) { + switch (status) { + case STATUS_ERROR: + throw readException(transfer); + case STATUS_OK: + break; + case STATUS_CLOSED: transferList = null; - } else if (status == STATUS_OK_STATE_CHANGED) { + break; + case STATUS_OK_STATE_CHANGED: sessionStateChanged = true; - } else if (status == STATUS_OK) { - // ok - } else { - throw DbException.get(ErrorCode.CONNECTION_BROKEN_1, - "unexpected status " + status); + currentSchemaName = null; + dynamicSettings = null; + break; + default: + throw DbException.get(ErrorCode.CONNECTION_BROKEN_1, "unexpected status " + status); + } + } + + /** + * Reads an exception. + * + * @param transfer + * the transfer object + * @return the exception + * @throws IOException + * on I/O exception + */ + public static DbException readException(Transfer transfer) throws IOException { + String sqlstate = transfer.readString(); + String message = transfer.readString(); + String sql = transfer.readString(); + int errorCode = transfer.readInt(); + String stackTrace = transfer.readString(); + SQLException s = DbException.getJdbcSQLException(message, sql, sqlstate, errorCode, null, stackTrace); + if (errorCode == ErrorCode.CONNECTION_BROKEN_1) { + // allow re-connect + throw new IOException(s.toString(), s); } + return DbException.convert(s); } /** @@ -663,11 +683,6 @@ public String getDatabasePath() { return ""; } - @Override - public String getLobCompressionAlgorithm(int type) { - return null; - } - @Override public int getMaxLengthInplaceLob() { return SysProperties.LOB_CLIENT_MAX_SIZE_MEMORY; @@ -722,22 +737,7 @@ public TempFileDeleter getTempFileDeleter() { } @Override - public boolean isReconnectNeeded(boolean write) { - return false; - } - - @Override - public SessionInterface reconnect(boolean write) { - return this; - } - - @Override - public void afterWriting() { - // nothing to do - } - - @Override - public LobStorageInterface getLobStorage() { + public LobStorageFrontend getLobStorage() { if (lobStorage == null) { lobStorage = new LobStorageFrontend(this); } @@ -754,9 +754,7 @@ public synchronized int readLob(long lobId, byte[] hmac, long offset, traceOperation("LOB_READ", (int) lobId); transfer.writeInt(SessionRemote.LOB_READ); transfer.writeLong(lobId); - if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_12) { - transfer.writeBytes(hmac); - } + transfer.writeBytes(hmac); transfer.writeLong(offset); transfer.writeInt(length); done(transfer); @@ -775,84 +773,215 @@ public synchronized int readLob(long lobId, byte[] hmac, long offset, @Override public JavaObjectSerializer getJavaObjectSerializer() { - initJavaObjectSerializer(); + if (dynamicSettings == null) { + getDynamicSettings(); + } return javaObjectSerializer; } - private void initJavaObjectSerializer() { - if (javaObjectSerializerInitialized) { - return; + @Override + public ValueLob addTemporaryLob(ValueLob v) { + // do nothing + return v; + } + + @Override + public CompareMode getCompareMode() { + return compareMode; + } + + @Override + public boolean isRemote() { + return true; + } + + @Override + public String getCurrentSchemaName() { + String schema = currentSchemaName; + if (schema == null) { + synchronized (this) { + try (CommandInterface command = prepareCommand("CALL SCHEMA()", 1); + ResultInterface result = command.executeQuery(1, false)) { + result.next(); + currentSchemaName = schema = result.currentRow()[0].getString(); + } + } } - synchronized (this) { - if (javaObjectSerializerInitialized) { - return; + return schema; + } + + @Override + public synchronized void setCurrentSchemaName(String schema) { + currentSchemaName = null; + try (CommandInterface command = prepareCommand( + StringUtils.quoteIdentifier(new StringBuilder("SET SCHEMA "), schema).toString(), 0)) { + command.executeUpdate(null); + currentSchemaName = schema; + } + } + + @Override + public void setNetworkConnectionInfo(NetworkConnectionInfo networkConnectionInfo) { + // Not supported + } + + @Override + public IsolationLevel getIsolationLevel() { + if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_19) { + try (CommandInterface command = prepareCommand(!isOldInformationSchema() + ? "SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID()" + : "SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE ID = SESSION_ID()", 1); + ResultInterface result = command.executeQuery(1, false)) { + result.next(); + return IsolationLevel.fromSql(result.currentRow()[0].getString()); } - String serializerFQN = readSerializationSettings(); - if (serializerFQN != null) { - serializerFQN = serializerFQN.trim(); - if (!serializerFQN.isEmpty() && !serializerFQN.equals("null")) { - try { - javaObjectSerializer = (JavaObjectSerializer) JdbcUtils - .loadUserClass(serializerFQN).getDeclaredConstructor().newInstance(); - } catch (Exception e) { - throw DbException.convert(e); - } - } + } else { + try (CommandInterface command = prepareCommand("CALL LOCK_MODE()", 1); + ResultInterface result = command.executeQuery(1, false)) { + result.next(); + return IsolationLevel.fromLockMode(result.currentRow()[0].getInt()); } - javaObjectSerializerInitialized = true; } } - /** - * Read the serializer name from the persistent database settings. - * - * @return the serializer - */ - private String readSerializationSettings() { - String javaObjectSerializerFQN = null; - CommandInterface ci = prepareCommand( - "SELECT VALUE FROM INFORMATION_SCHEMA.SETTINGS "+ - " WHERE NAME='JAVA_OBJECT_SERIALIZER'", Integer.MAX_VALUE); - try { - ResultInterface result = ci.executeQuery(0, false); - if (result.next()) { - Value[] row = result.currentRow(); - javaObjectSerializerFQN = row[0].getString(); + @Override + public void setIsolationLevel(IsolationLevel isolationLevel) { + if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_19) { + try (CommandInterface command = prepareCommand( + "SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL " + isolationLevel.getSQL(), 0)) { + command.executeUpdate(null); + } + } else { + try (CommandInterface command = prepareCommand("SET LOCK_MODE ?", 0)) { + command.getParameters().get(0).setValue(ValueInteger.get(isolationLevel.getLockMode()), false); + command.executeUpdate(null); } - } finally { - ci.close(); } - return javaObjectSerializerFQN; } @Override - public void addTemporaryLob(Value v) { - // do nothing + public StaticSettings getStaticSettings() { + StaticSettings settings = staticSettings; + if (settings == null) { + boolean databaseToUpper = true, databaseToLower = false, caseInsensitiveIdentifiers = false; + try (CommandInterface command = getSettingsCommand(" IN (?, ?, ?)")) { + ArrayList parameters = command.getParameters(); + parameters.get(0).setValue(ValueVarchar.get("DATABASE_TO_UPPER"), false); + parameters.get(1).setValue(ValueVarchar.get("DATABASE_TO_LOWER"), false); + parameters.get(2).setValue(ValueVarchar.get("CASE_INSENSITIVE_IDENTIFIERS"), false); + try (ResultInterface result = command.executeQuery(0, false)) { + while (result.next()) { + Value[] row = result.currentRow(); + String value = row[1].getString(); + switch (row[0].getString()) { + case "DATABASE_TO_UPPER": + databaseToUpper = Boolean.valueOf(value); + break; + case "DATABASE_TO_LOWER": + databaseToLower = Boolean.valueOf(value); + break; + case "CASE_INSENSITIVE_IDENTIFIERS": + caseInsensitiveIdentifiers = Boolean.valueOf(value); + } + } + } + } + if (clientVersion < Constants.TCP_PROTOCOL_VERSION_18) { + caseInsensitiveIdentifiers = !databaseToUpper; + } + staticSettings = settings = new StaticSettings(databaseToUpper, databaseToLower, + caseInsensitiveIdentifiers); + } + return settings; + } + + @Override + public DynamicSettings getDynamicSettings() { + DynamicSettings settings = dynamicSettings; + if (settings == null) { + String modeName = ModeEnum.REGULAR.name(); + TimeZoneProvider timeZone = DateTimeUtils.getTimeZone(); + String javaObjectSerializerName = null; + try (CommandInterface command = getSettingsCommand(" IN (?, ?, ?)")) { + ArrayList parameters = command.getParameters(); + parameters.get(0).setValue(ValueVarchar.get("MODE"), false); + parameters.get(1).setValue(ValueVarchar.get("TIME ZONE"), false); + parameters.get(2).setValue(ValueVarchar.get("JAVA_OBJECT_SERIALIZER"), false); + try (ResultInterface result = command.executeQuery(0, false)) { + while (result.next()) { + Value[] row = result.currentRow(); + String value = row[1].getString(); + switch (row[0].getString()) { + case "MODE": + modeName = value; + break; + case "TIME ZONE": + timeZone = TimeZoneProvider.ofId(value); + break; + case "JAVA_OBJECT_SERIALIZER": + javaObjectSerializerName = value; + } + } + } + } + Mode mode = Mode.getInstance(modeName); + if (mode == null) { + mode = Mode.getRegular(); + } + dynamicSettings = settings = new DynamicSettings(mode, timeZone); + if (javaObjectSerializerName != null + && !(javaObjectSerializerName = javaObjectSerializerName.trim()).isEmpty() + && !javaObjectSerializerName.equals("null")) { + try { + javaObjectSerializer = (JavaObjectSerializer) JdbcUtils + .loadUserClass(javaObjectSerializerName).getDeclaredConstructor().newInstance(); + } catch (Exception e) { + throw DbException.convert(e); + } + } else { + javaObjectSerializer = null; + } + } + return settings; + } + + private CommandInterface getSettingsCommand(String args) { + return prepareCommand( + (!isOldInformationSchema() + ? "SELECT SETTING_NAME, SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME" + : "SELECT NAME, `VALUE` FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME") + args, + Integer.MAX_VALUE); } @Override - public CompareMode getCompareMode() { - return compareMode; + public ValueTimestampTimeZone currentTimestamp() { + return DateTimeUtils.currentTimestamp(getDynamicSettings().timeZone); } @Override - public boolean isRemote() { - return true; + public TimeZoneProvider currentTimeZone() { + return getDynamicSettings().timeZone; } @Override - public String getCurrentSchemaName() { - throw DbException.getUnsupportedException("getSchema && remote session"); + public Mode getMode() { + return getDynamicSettings().mode; } @Override - public void setCurrentSchemaName(String schema) { - throw DbException.getUnsupportedException("setSchema && remote session"); + public DatabaseMeta getDatabaseMeta() { + return clientVersion >= Constants.TCP_PROTOCOL_VERSION_20 ? new DatabaseMetaRemote(this, transferList) + : new DatabaseMetaLegacy(this); } @Override - public boolean isSupportsGeneratedKeys() { - return getClientVersion() >= Constants.TCP_PROTOCOL_VERSION_17; + public boolean isOldInformationSchema() { + return oldInformationSchema || clientVersion < Constants.TCP_PROTOCOL_VERSION_20; + } + + @Override + public boolean zeroBasedEnums() { + return false; } } diff --git a/h2/src/main/org/h2/engine/SessionWithState.java b/h2/src/main/org/h2/engine/SessionWithState.java deleted file mode 100644 index a793f9d363..0000000000 --- a/h2/src/main/org/h2/engine/SessionWithState.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.engine; - -import java.util.ArrayList; - -import org.h2.command.CommandInterface; -import org.h2.result.ResultInterface; -import org.h2.util.Utils; -import org.h2.value.Value; - -/** - * The base class for both remote and embedded sessions. - */ -abstract class SessionWithState implements SessionInterface { - - protected ArrayList sessionState; - protected boolean sessionStateChanged; - private boolean sessionStateUpdating; - - /** - * Re-create the session state using the stored sessionState list. - */ - protected void recreateSessionState() { - if (sessionState != null && !sessionState.isEmpty()) { - sessionStateUpdating = true; - try { - for (String sql : sessionState) { - CommandInterface ci = prepareCommand(sql, Integer.MAX_VALUE); - ci.executeUpdate(false); - } - } finally { - sessionStateUpdating = false; - sessionStateChanged = false; - } - } - } - - /** - * Read the session state if necessary. - */ - public void readSessionState() { - if (!sessionStateChanged || sessionStateUpdating) { - return; - } - sessionStateChanged = false; - sessionState = Utils.newSmallArrayList(); - CommandInterface ci = prepareCommand( - "SELECT * FROM INFORMATION_SCHEMA.SESSION_STATE", - Integer.MAX_VALUE); - ResultInterface result = ci.executeQuery(0, false); - while (result.next()) { - Value[] row = result.currentRow(); - sessionState.add(row[1].getString()); - } - } - -} diff --git a/h2/src/main/org/h2/engine/Setting.java b/h2/src/main/org/h2/engine/Setting.java index 03859e8592..3d8cc24576 100644 --- a/h2/src/main/org/h2/engine/Setting.java +++ b/h2/src/main/org/h2/engine/Setting.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; @@ -12,7 +12,7 @@ /** * A persistent database setting. */ -public class Setting extends DbObjectBase { +public final class Setting extends DbObject { private int intValue; private String stringValue; @@ -22,12 +22,12 @@ public Setting(Database database, int id, String settingName) { } @Override - public String getSQL(boolean alwaysQuote) { + public String getSQL(int sqlFlags) { return getName(); } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { return builder.append(getName()); } @@ -49,18 +49,13 @@ public String getStringValue() { @Override public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(toString()); - } - - @Override - public String getDropSQL() { - return null; + throw DbException.getInternalError(toString()); } @Override public String getCreateSQL() { StringBuilder buff = new StringBuilder("SET "); - getSQL(buff, true).append(' '); + getSQL(buff, DEFAULT_SQL_FLAGS).append(' '); if (stringValue != null) { buff.append(stringValue); } else { @@ -75,7 +70,7 @@ public int getType() { } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { database.removeMeta(session, getId()); invalidate(); } diff --git a/h2/src/main/org/h2/engine/SettingsBase.java b/h2/src/main/org/h2/engine/SettingsBase.java index 8a07e047c1..2059dfdbb6 100644 --- a/h2/src/main/org/h2/engine/SettingsBase.java +++ b/h2/src/main/org/h2/engine/SettingsBase.java @@ -1,11 +1,15 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; +import java.util.Arrays; +import java.util.Comparator; import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; import org.h2.api.ErrorCode; import org.h2.message.DbException; @@ -39,6 +43,16 @@ protected boolean get(String key, boolean defaultValue) { } } + /** + * Set an entry in the key-value pair. + * + * @param key the key + * @param value the value + */ + void set(String key, boolean value) { + settings.put(key, Boolean.toString(value)); + } + /** * Get the setting for the given key. * @@ -104,4 +118,16 @@ public HashMap getSettings() { return settings; } + /** + * Get all settings in alphabetical order. + * + * @return the settings + */ + public Entry[] getSortedSettings() { + @SuppressWarnings("unchecked") + Map.Entry[] entries = settings.entrySet().toArray(new Map.Entry[0]); + Arrays.sort(entries, Comparator.comparing(Entry::getKey)); + return entries; + } + } diff --git a/h2/src/main/org/h2/engine/SysProperties.java b/h2/src/main/org/h2/engine/SysProperties.java index c91caac185..bf07188c88 100644 --- a/h2/src/main/org/h2/engine/SysProperties.java +++ b/h2/src/main/org/h2/engine/SysProperties.java @@ -1,12 +1,10 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; -import java.io.File; - import org.h2.util.MathUtils; import org.h2.util.Utils; @@ -44,19 +42,7 @@ public class SysProperties { public static final String H2_BROWSER = "h2.browser"; /** - * System property file.separator.
          - * It is set by the system, and used to build absolute file names. - */ - public static final String FILE_SEPARATOR = File.separator; - - /** - * System property line.separator.
          - * It is set by the system, and used by the script and trace tools. - */ - public static final String LINE_SEPARATOR = System.lineSeparator(); - - /** - * System property user.home (empty string if not set).
          + * System property user.home (empty string if not set). * It is usually set by the system, and used as a replacement for ~ in file * names. */ @@ -64,31 +50,21 @@ public class SysProperties { Utils.getProperty("user.home", ""); /** - * System property {@code h2.preview} (default: false). - *

          - * Controls default values of other properties. If {@code true} default - * values of other properties are changed to planned defaults for the 1.5.x - * versions of H2. Some other functionality may be also enabled or disabled. - *

          - */ - public static final boolean PREVIEW = Utils.getProperty("h2.preview", false); - - /** - * System property h2.allowedClasses (default: *).
          + * System property h2.allowedClasses (default: *). * Comma separated list of class names or prefixes. */ public static final String ALLOWED_CLASSES = Utils.getProperty("h2.allowedClasses", "*"); /** - * System property h2.enableAnonymousTLS (default: true).
          + * System property h2.enableAnonymousTLS (default: true). * When using TLS connection, the anonymous cipher suites should be enabled. */ public static final boolean ENABLE_ANONYMOUS_TLS = Utils.getProperty("h2.enableAnonymousTLS", true); /** - * System property h2.bindAddress (default: null).
          + * System property h2.bindAddress (default: null). * The bind address to use. */ public static final String BIND_ADDRESS = @@ -96,7 +72,7 @@ public class SysProperties { /** * System property h2.check - * (default: true for JDK/JRE, false for Android).
          + * (default: true for JDK/JRE, false for Android). * Optional additional checks in the database engine. */ public static final boolean CHECK = @@ -104,7 +80,7 @@ public class SysProperties { /** * System property h2.clientTraceDirectory (default: - * trace.db/).
          + * trace.db/). * Directory where the trace files of the JDBC client are stored (only for * client / server). */ @@ -112,7 +88,8 @@ public class SysProperties { Utils.getProperty("h2.clientTraceDirectory", "trace.db/"); /** - * System property h2.collatorCacheSize (default: 32000).
          + * System property h2.collatorCacheSize (default: 3 + * 2000). * The cache size for collation keys (in elements). Used when a collator has * been set for the database. */ @@ -121,7 +98,7 @@ public class SysProperties { /** * System property h2.consoleTableIndexes - * (default: 100).
          + * (default: 100). * Up to this many tables, the column type and indexes are listed. */ public static final int CONSOLE_MAX_TABLES_LIST_INDEXES = @@ -129,7 +106,7 @@ public class SysProperties { /** * System property h2.consoleTableColumns - * (default: 500).
          + * (default: 500). * Up to this many tables, the column names are listed. */ public static final int CONSOLE_MAX_TABLES_LIST_COLUMNS = @@ -137,28 +114,28 @@ public class SysProperties { /** * System property h2.consoleProcedureColumns - * (default: 500).
          + * (default: 500). * Up to this many procedures, the column names are listed. */ public static final int CONSOLE_MAX_PROCEDURES_LIST_COLUMNS = Utils.getProperty("h2.consoleProcedureColumns", 300); /** - * System property h2.consoleStream (default: true).
          + * System property h2.consoleStream (default: true). * H2 Console: stream query results. */ public static final boolean CONSOLE_STREAM = Utils.getProperty("h2.consoleStream", true); /** - * System property h2.consoleTimeout (default: 1800000).
          + * System property h2.consoleTimeout (default: 1800000). * H2 Console: session timeout in milliseconds. The default is 30 minutes. */ public static final int CONSOLE_TIMEOUT = Utils.getProperty("h2.consoleTimeout", 30 * 60 * 1000); /** - * System property h2.dataSourceTraceLevel (default: 1).
          + * System property h2.dataSourceTraceLevel (default: 1). * The trace level of the data source implementation. Default is 1 for * error. */ @@ -167,7 +144,7 @@ public class SysProperties { /** * System property h2.delayWrongPasswordMin - * (default: 250).
          + * (default: 250). * The minimum delay in milliseconds before an exception is thrown for using * the wrong user name or password. This slows down brute force attacks. The * delay is reset to this value after a successful login. Unsuccessful @@ -179,7 +156,7 @@ public class SysProperties { /** * System property h2.delayWrongPasswordMax - * (default: 4000).
          + * (default: 4000). * The maximum delay in milliseconds before an exception is thrown for using * the wrong user name or password. This slows down brute force attacks. The * delay is reset after a successful login. The value 0 means there is no @@ -189,7 +166,7 @@ public class SysProperties { Utils.getProperty("h2.delayWrongPasswordMax", 4000); /** - * System property h2.javaSystemCompiler (default: true).
          + * System property h2.javaSystemCompiler (default: true). * Whether to use the Java system compiler * (ToolProvider.getSystemJavaCompiler()) if it is available to compile user * defined functions. If disabled or if the system compiler is not @@ -201,23 +178,15 @@ public class SysProperties { /** * System property h2.lobCloseBetweenReads - * (default: false).
          + * (default: false). * Close LOB files between read operations. */ public static boolean lobCloseBetweenReads = Utils.getProperty("h2.lobCloseBetweenReads", false); - /** - * System property h2.lobFilesPerDirectory - * (default: 256).
          - * Maximum number of LOB files per directory. - */ - public static final int LOB_FILES_PER_DIRECTORY = - Utils.getProperty("h2.lobFilesPerDirectory", 256); - /** * System property h2.lobClientMaxSizeMemory (default: - * 1048576).
          + * 1048576). * The maximum size of a LOB object to keep in memory on the client side * when using the server mode. */ @@ -225,7 +194,7 @@ public class SysProperties { Utils.getProperty("h2.lobClientMaxSizeMemory", 1024 * 1024); /** - * System property h2.maxFileRetry (default: 16).
          + * System property h2.maxFileRetry (default: 16). * Number of times to retry file delete and rename. in Windows, files can't * be deleted if they are open. Waiting a bit can help (sometimes the * Windows Explorer opens the files for a short time) may help. Sometimes, @@ -236,7 +205,7 @@ public class SysProperties { Math.max(1, Utils.getProperty("h2.maxFileRetry", 16)); /** - * System property h2.maxReconnect (default: 3).
          + * System property h2.maxReconnect (default: 3). * The maximum number of tries to reconnect in a row. */ public static final int MAX_RECONNECT = @@ -244,7 +213,7 @@ public class SysProperties { /** * System property h2.maxMemoryRows - * (default: 40000 per GB of available RAM).
          + * (default: 40000 per GB of available RAM). * The default maximum number of rows to be kept in memory in a result set. */ public static final int MAX_MEMORY_ROWS = @@ -252,7 +221,7 @@ public class SysProperties { /** * System property h2.maxTraceDataLength - * (default: 65535).
          + * (default: 65535). * The maximum size of a LOB value that is written as data to the trace * system. */ @@ -260,17 +229,7 @@ public class SysProperties { Utils.getProperty("h2.maxTraceDataLength", 65535); /** - * System property h2.modifyOnWrite (default: false).
          - * Only modify the database file when recovery is necessary, or when writing - * to the database. If disabled, opening the database always writes to the - * file (except if the database is read-only). When enabled, the serialized - * file lock is faster. - */ - public static final boolean MODIFY_ON_WRITE = - Utils.getProperty("h2.modifyOnWrite", false); - - /** - * System property h2.nioLoadMapped (default: false).
          + * System property h2.nioLoadMapped (default: false). * If the mapped buffer should be loaded when the file is opened. * This can improve performance. */ @@ -278,17 +237,17 @@ public class SysProperties { Utils.getProperty("h2.nioLoadMapped", false); /** - * System property h2.nioCleanerHack (default: false).
          + * System property h2.nioCleanerHack (default: false). * If enabled, use the reflection hack to un-map the mapped file if * possible. If disabled, System.gc() is called in a loop until the object * is garbage collected. See also - * http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4724038 + * https://bugs.openjdk.java.net/browse/JDK-4724038 */ public static final boolean NIO_CLEANER_HACK = Utils.getProperty("h2.nioCleanerHack", false); /** - * System property h2.objectCache (default: true).
          + * System property h2.objectCache (default: true). * Cache commonly used values (numbers, strings). There is a shared cache * for all values. */ @@ -297,14 +256,14 @@ public class SysProperties { /** * System property h2.objectCacheMaxPerElementSize (default: - * 4096).
          + * 4096). * The maximum size (precision) of an object in the cache. */ public static final int OBJECT_CACHE_MAX_PER_ELEMENT_SIZE = Utils.getProperty("h2.objectCacheMaxPerElementSize", 4096); /** - * System property h2.objectCacheSize (default: 1024).
          + * System property h2.objectCacheSize (default: 1024). * The maximum number of objects in the cache. * This value must be a power of 2. */ @@ -319,56 +278,7 @@ public class SysProperties { } /** - * System property {@code h2.oldResultSetGetObject}, {@code true} by default - * unless {@code h2.preview} is enabled. - *

          - * If {@code true} return {@code Byte} and {@code Short} from - * {@code ResultSet#getObject(int)} and {@code ResultSet#getObject(String)} - * for {@code TINYINT} and {@code SMALLINT} values. - *

          - *

          - * If {@code false} return {@code Integer} for them as specified in JDBC - * specification (see Mapping from JDBC Types to Java Object Types). - *

          - */ - public static final boolean OLD_RESULT_SET_GET_OBJECT = Utils.getProperty("h2.oldResultSetGetObject", !PREVIEW); - - /** - * System property {@code h2.bigDecimalIsDecimal}, {@code true} by default - * unless {@code h2.preview} is enabled. - *

          - * If {@code true} map {@code BigDecimal} to {@code DECIMAL} type. - *

          - *

          - * If {@code false} map {@code BigDecimal} to {@code NUMERIC} as specified - * in JDBC specification (see Mapping from Java Object Types to JDBC Types). - *

          - */ - public static final boolean BIG_DECIMAL_IS_DECIMAL = Utils.getProperty("h2.bigDecimalIsDecimal", !PREVIEW); - - /** - * System property {@code h2.returnOffsetDateTime}, {@code false} by default - * unless {@code h2.preview} is enabled. - *

          - * If {@code true} {@link java.sql.ResultSet#getObject(int)} and - * {@link java.sql.ResultSet#getObject(String)} return - * {@code TIMESTAMP WITH TIME ZONE} values as - * {@code java.time.OffsetDateTime}. - *

          - *

          - * If {@code false} return them as {@code org.h2.api.TimestampWithTimeZone} - * instead. - *

          - *

          - * This property has effect only on Java 8 / Android API 26 and later - * versions. Without JSR-310 {@code org.h2.api.TimestampWithTimeZone} is - * used unconditionally. - *

          - */ - public static final boolean RETURN_OFFSET_DATE_TIME = Utils.getProperty("h2.returnOffsetDateTime", PREVIEW); - - /** - * System property h2.pgClientEncoding (default: UTF-8).
          + * System property h2.pgClientEncoding (default: UTF-8). * Default client encoding for PG server. It is used if the client does not * sends his encoding. */ @@ -376,14 +286,21 @@ public class SysProperties { Utils.getProperty("h2.pgClientEncoding", "UTF-8"); /** - * System property h2.prefixTempFile (default: h2.temp).
          + * System property h2.prefixTempFile (default: h2.temp). * The prefix for temporary files in the temp directory. */ public static final String PREFIX_TEMP_FILE = Utils.getProperty("h2.prefixTempFile", "h2.temp"); /** - * System property h2.serverCachedObjects (default: 64).
          + * System property h2.forceAutoCommitOffOnCommit (default: false). + * Throw error if transaction's auto-commit property is true when a commit is executed. + */ + public static boolean FORCE_AUTOCOMMIT_OFF_ON_COMMIT = + Utils.getProperty("h2.forceAutoCommitOffOnCommit", false); + + /** + * System property h2.serverCachedObjects (default: 64). * TCP Server: number of cached objects per session. */ public static final int SERVER_CACHED_OBJECTS = @@ -391,79 +308,38 @@ public class SysProperties { /** * System property h2.serverResultSetFetchSize - * (default: 100).
          + * (default: 100). * The default result set fetch size when using the server mode. */ public static final int SERVER_RESULT_SET_FETCH_SIZE = Utils.getProperty("h2.serverResultSetFetchSize", 100); /** - * System property h2.socketConnectRetry (default: 16).
          + * System property h2.socketConnectRetry (default: 16). * The number of times to retry opening a socket. Windows sometimes fails * to open a socket, see bug - * http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6213296 + * https://bugs.openjdk.java.net/browse/JDK-6213296 */ public static final int SOCKET_CONNECT_RETRY = Utils.getProperty("h2.socketConnectRetry", 16); /** * System property h2.socketConnectTimeout - * (default: 2000).
          + * (default: 2000). * The timeout in milliseconds to connect to a server. */ public static final int SOCKET_CONNECT_TIMEOUT = Utils.getProperty("h2.socketConnectTimeout", 2000); /** - * System property h2.sortBinaryUnsigned - * (default: true).
          - * Whether binary data should be sorted in unsigned mode - * (0xff is larger than 0x00) by default in new databases. - */ - public static final boolean SORT_BINARY_UNSIGNED = - Utils.getProperty("h2.sortBinaryUnsigned", true); - - /** - * System property {@code h2.sortUuidUnsigned}, {@code false} by default - * unless {@code h2.preview} is enabled. - * Whether UUID data should be sorted in unsigned mode - * ('ffffffff-ffff-ffff-ffff-ffffffffffff' is larger than - * '00000000-0000-0000-0000-000000000000') by default in new databases. - */ - public static final boolean SORT_UUID_UNSIGNED = - Utils.getProperty("h2.sortUuidUnsigned", PREVIEW); - - /** - * System property h2.sortNullsHigh (default: false).
          - * Invert the default sorting behavior for NULL, such that NULL - * is at the end of a result set in an ascending sort and at - * the beginning of a result set in a descending sort. - */ - public static final boolean SORT_NULLS_HIGH = - Utils.getProperty("h2.sortNullsHigh", false); - - /** - * System property h2.splitFileSizeShift (default: 30).
          + * System property h2.splitFileSizeShift (default: 30). * The maximum file size of a split file is 1L << x. */ public static final long SPLIT_FILE_SIZE_SHIFT = Utils.getProperty("h2.splitFileSizeShift", 30); /** - * System property h2.syncMethod (default: sync).
          - * What method to call when closing the database, on checkpoint, and on - * CHECKPOINT SYNC. The following options are supported: - * "sync" (default): RandomAccessFile.getFD().sync(); - * "force": RandomAccessFile.getChannel().force(true); - * "forceFalse": RandomAccessFile.getChannel().force(false); - * "": do not call a method (fast but there is a risk of data loss - * on power failure). - */ - public static final String SYNC_METHOD = - Utils.getProperty("h2.syncMethod", "sync"); - - /** - * System property h2.traceIO (default: false).
          + * System property h2.traceIO (default: false). * Trace all I/O operations. */ public static final boolean TRACE_IO = @@ -471,23 +347,14 @@ public class SysProperties { /** * System property h2.threadDeadlockDetector - * (default: false).
          + * (default: false). * Detect thread deadlocks in a background thread. */ public static final boolean THREAD_DEADLOCK_DETECTOR = Utils.getProperty("h2.threadDeadlockDetector", false); /** - * System property h2.implicitRelativePath - * (default: false).
          - * If disabled, relative paths in database URLs need to be written as - * jdbc:h2:./test instead of jdbc:h2:test. - */ - public static final boolean IMPLICIT_RELATIVE_PATH = - Utils.getProperty("h2.implicitRelativePath", false); - - /** - * System property h2.urlMap (default: null).
          + * System property h2.urlMap (default: null). * A properties file that contains a mapping between database URLs. New * connections are written into the file. An empty value in the map means no * redirection is used for the given URL. @@ -497,49 +364,16 @@ public class SysProperties { /** * System property h2.useThreadContextClassLoader - * (default: false).
          + * (default: false). * Instead of using the default class loader when deserializing objects, the * current thread-context class loader will be used. */ public static final boolean USE_THREAD_CONTEXT_CLASS_LOADER = Utils.getProperty("h2.useThreadContextClassLoader", false); - /** - * System property h2.serializeJavaObject - * (default: true).
          - * If true, values of type OTHER will be stored in serialized form - * and have the semantics of binary data for all operations (such as sorting - * and conversion to string). - *
          - * If false, the objects will be serialized only for I/O operations - * and a few other special cases (for example when someone tries to get the - * value in binary form or when comparing objects that are not comparable - * otherwise). - *
          - * If the object implements the Comparable interface, the method compareTo - * will be used for sorting (but only if objects being compared have a - * common comparable super type). Otherwise the objects will be compared by - * type, and if they are the same by hashCode, and if the hash codes are - * equal, but objects are not, the serialized forms (the byte arrays) are - * compared. - *
          - * The string representation of the values use the toString method of - * object. - *
          - * In client-server mode, the server must have all required classes in the - * class path. On the client side, this setting is required to be disabled - * as well, to have correct string representation and display size. - *
          - * In embedded mode, no data copying occurs, so the user has to make - * defensive copy himself before storing, or ensure that the value object is - * immutable. - */ - public static boolean serializeJavaObject = - Utils.getProperty("h2.serializeJavaObject", true); - /** * System property h2.javaObjectSerializer - * (default: null).
          + * (default: null). * The JavaObjectSerializer class name for java objects being stored in * column of type OTHER. It must be the same on client and server to work * correctly. @@ -547,19 +381,9 @@ public class SysProperties { public static final String JAVA_OBJECT_SERIALIZER = Utils.getProperty("h2.javaObjectSerializer", null); - /** - * System property h2.customDataTypesHandler - * (default: null).
          - * The CustomDataTypesHandler class name that is used - * to provide support for user defined custom data types. - * It must be the same on client and server to work correctly. - */ - public static final String CUSTOM_DATA_TYPES_HANDLER = - Utils.getProperty("h2.customDataTypesHandler", null); - /** * System property h2.authConfigFile - * (default: null).
          + * (default: null). * authConfigFile define the URL of configuration file * of {@link org.h2.security.auth.DefaultAuthenticator} * @@ -575,6 +399,7 @@ private SysProperties() { /** * INTERNAL + * @param dir base directory */ public static void setBaseDir(String dir) { if (!dir.endsWith("/")) { @@ -585,6 +410,7 @@ public static void setBaseDir(String dir) { /** * INTERNAL + * @return base directory */ public static String getBaseDir() { return Utils.getProperty(H2_BASE_DIR, null); @@ -592,7 +418,7 @@ public static String getBaseDir() { /** * System property h2.scriptDirectory (default: empty - * string).
          + * string). * Relative or absolute directory where the script files are stored to or * read from. * diff --git a/h2/src/main/org/h2/engine/UndoLog.java b/h2/src/main/org/h2/engine/UndoLog.java deleted file mode 100644 index c301bd5d81..0000000000 --- a/h2/src/main/org/h2/engine/UndoLog.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.engine; - -import java.util.ArrayList; -import java.util.HashMap; - -import org.h2.store.Data; -import org.h2.store.FileStore; -import org.h2.table.Table; -import org.h2.util.Utils; - -/** - * Each session keeps a undo log if rollback is required. - */ -public class UndoLog { - - private final Database database; - private final ArrayList storedEntriesPos = Utils.newSmallArrayList(); - private final ArrayList records = Utils.newSmallArrayList(); - private FileStore file; - private Data rowBuff; - private int memoryUndo; - private int storedEntries; - private HashMap tables; - - /** - * Create a new undo log for the given session. - * - * @param database the database - */ - UndoLog(Database database) { - this.database = database; - } - - /** - * Get the number of active rows in this undo log. - * - * @return the number of rows - */ - int size() { - return storedEntries + records.size(); - } - - /** - * Clear the undo log. This method is called after the transaction is - * committed. - */ - void clear() { - records.clear(); - storedEntries = 0; - storedEntriesPos.clear(); - memoryUndo = 0; - if (file != null) { - file.closeAndDeleteSilently(); - file = null; - rowBuff = null; - } - } - - /** - * Get the last record and remove it from the list of operations. - * - * @return the last record - */ - public UndoLogRecord getLast() { - int i = records.size() - 1; - if (i < 0 && storedEntries > 0) { - int last = storedEntriesPos.size() - 1; - long pos = storedEntriesPos.remove(last); - long end = file.length(); - int bufferLength = (int) (end - pos); - Data buff = Data.create(database, bufferLength, true); - file.seek(pos); - file.readFully(buff.getBytes(), 0, bufferLength); - while (buff.length() < bufferLength) { - UndoLogRecord e = UndoLogRecord.loadFromBuffer(buff, this); - records.add(e); - memoryUndo++; - } - storedEntries -= records.size(); - file.setLength(pos); - file.seek(pos); - } - i = records.size() - 1; - UndoLogRecord entry = records.get(i); - if (entry.isStored()) { - int start = Math.max(0, i - database.getMaxMemoryUndo() / 2); - UndoLogRecord first = null; - for (int j = start; j <= i; j++) { - UndoLogRecord e = records.get(j); - if (e.isStored()) { - e.load(rowBuff, file, this); - memoryUndo++; - if (first == null) { - first = e; - } - } - } - for (int k = 0; k < i; k++) { - UndoLogRecord e = records.get(k); - e.invalidatePos(); - } - seek(first.getFilePos()); - } - return entry; - } - - /** - * Go to the right position in the file. - * - * @param filePos the position in the file - */ - void seek(long filePos) { - file.seek(filePos * Constants.FILE_BLOCK_SIZE); - } - - /** - * Remove the last record from the list of operations. - */ - void removeLast() { - int i = records.size() - 1; - UndoLogRecord r = records.remove(i); - if (!r.isStored()) { - memoryUndo--; - } - } - - /** - * Append an undo log entry to the log. - * - * @param entry the entry - */ - void add(UndoLogRecord entry) { - records.add(entry); - memoryUndo++; - if (memoryUndo > database.getMaxMemoryUndo() && - database.isPersistent() && - !database.isMVStore()) { - if (file == null) { - String fileName = database.createTempFile(); - file = database.openFile(fileName, "rw", false); - file.autoDelete(); - file.setCheckedWriting(false); - file.setLength(FileStore.HEADER_LENGTH); - } - Data buff = Data.create(database, Constants.DEFAULT_PAGE_SIZE, true); - for (int i = 0; i < records.size(); i++) { - UndoLogRecord r = records.get(i); - buff.checkCapacity(Constants.DEFAULT_PAGE_SIZE); - r.append(buff, this); - if (i == records.size() - 1 || buff.length() > Constants.UNDO_BLOCK_SIZE) { - storedEntriesPos.add(file.getFilePointer()); - file.write(buff.getBytes(), 0, buff.length()); - buff.reset(); - } - } - storedEntries += records.size(); - memoryUndo = 0; - records.clear(); - } - } - - /** - * Get the table id for this undo log. If the table is not registered yet, - * this is done as well. - * - * @param table the table - * @return the id - */ - int getTableId(Table table) { - int id = table.getId(); - if (tables == null) { - tables = new HashMap<>(); - } - // need to overwrite the old entry, because the old object - // might be deleted in the meantime - tables.put(id, table); - return id; - } - - /** - * Get the table for this id. The table must be registered for this undo log - * first by calling getTableId. - * - * @param id the table id - * @return the table object - */ - Table getTable(int id) { - return tables.get(id); - } - -} diff --git a/h2/src/main/org/h2/engine/UndoLogRecord.java b/h2/src/main/org/h2/engine/UndoLogRecord.java deleted file mode 100644 index c6c44d6ae7..0000000000 --- a/h2/src/main/org/h2/engine/UndoLogRecord.java +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.engine; - -import org.h2.api.ErrorCode; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.store.Data; -import org.h2.store.FileStore; -import org.h2.table.Table; -import org.h2.value.Value; - -/** - * An entry in a undo log. - */ -public class UndoLogRecord { - - /** - * Operation type meaning the row was inserted. - */ - public static final short INSERT = 0; - - /** - * Operation type meaning the row was deleted. - */ - public static final short DELETE = 1; - - private static final int IN_MEMORY = 0, STORED = 1, IN_MEMORY_INVALID = 2; - private Table table; - private Row row; - private short operation; - private short state; - private int filePos; - - /** - * Create a new undo log record - * - * @param table the table - * @param op the operation type - * @param row the row that was deleted or inserted - */ - UndoLogRecord(Table table, short op, Row row) { - this.table = table; - this.row = row; - this.operation = op; - this.state = IN_MEMORY; - } - - /** - * Check if the log record is stored in the file. - * - * @return true if it is - */ - boolean isStored() { - return state == STORED; - } - - /** - * Check if this undo log record can be store. Only record can be stored if - * the table has a unique index. - * - * @return if it can be stored - */ - boolean canStore() { - // if large transactions are enabled, this method is not called - return table.getUniqueIndex() != null; - } - - /** - * Un-do the operation. If the row was inserted before, it is deleted now, - * and vice versa. - * - * @param session the session - */ - void undo(Session session) { - Database db = session.getDatabase(); - switch (operation) { - case INSERT: - if (state == IN_MEMORY_INVALID) { - state = IN_MEMORY; - } - if (db.getLockMode() == Constants.LOCK_MODE_OFF) { - if (row.isDeleted()) { - // it might have been deleted by another thread - return; - } - } - try { - row.setDeleted(false); - table.removeRow(session, row); - table.fireAfterRow(session, row, null, true); - } catch (DbException e) { - if (session.getDatabase().getLockMode() == Constants.LOCK_MODE_OFF - && e.getErrorCode() == ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1) { - // it might have been deleted by another thread - // ignore - } else { - throw e; - } - } - break; - case DELETE: - try { - table.addRow(session, row); - table.fireAfterRow(session, null, row, true); - } catch (DbException e) { - if (session.getDatabase().getLockMode() == Constants.LOCK_MODE_OFF - && e.getSQLException().getErrorCode() == ErrorCode.DUPLICATE_KEY_1) { - // it might have been added by another thread - // ignore - } else { - throw e; - } - } - break; - default: - DbException.throwInternalError("op=" + operation); - } - } - - /** - * Append the row to the buffer. - * - * @param buff the buffer - * @param log the undo log - */ - void append(Data buff, UndoLog log) { - int p = buff.length(); - buff.writeInt(0); - buff.writeInt(operation); - buff.writeByte(row.isDeleted() ? (byte) 1 : (byte) 0); - buff.writeInt(log.getTableId(table)); - buff.writeLong(row.getKey()); - int count = row.getColumnCount(); - buff.writeInt(count); - for (int i = 0; i < count; i++) { - Value v = row.getValue(i); - buff.checkCapacity(buff.getValueLen(v)); - buff.writeValue(v); - } - buff.fillAligned(); - buff.setInt(p, (buff.length() - p) / Constants.FILE_BLOCK_SIZE); - } - - /** - * Save the row in the file using a buffer. - * - * @param buff the buffer - * @param file the file - * @param log the undo log - */ - void save(Data buff, FileStore file, UndoLog log) { - buff.reset(); - append(buff, log); - filePos = (int) (file.getFilePointer() / Constants.FILE_BLOCK_SIZE); - file.write(buff.getBytes(), 0, buff.length()); - row = null; - state = STORED; - } - - /** - * Load an undo log record row using a buffer. - * - * @param buff the buffer - * @param log the log - * @return the undo log record - */ - static UndoLogRecord loadFromBuffer(Data buff, UndoLog log) { - UndoLogRecord rec = new UndoLogRecord(null, (short) 0, null); - int pos = buff.length(); - int len = buff.readInt() * Constants.FILE_BLOCK_SIZE; - rec.load(buff, log); - buff.setPos(pos + len); - return rec; - } - - /** - * Load an undo log record row using a buffer. - * - * @param buff the buffer - * @param file the source file - * @param log the log - */ - void load(Data buff, FileStore file, UndoLog log) { - int min = Constants.FILE_BLOCK_SIZE; - log.seek(filePos); - buff.reset(); - file.readFully(buff.getBytes(), 0, min); - int len = buff.readInt() * Constants.FILE_BLOCK_SIZE; - buff.checkCapacity(len); - if (len - min > 0) { - file.readFully(buff.getBytes(), min, len - min); - } - int oldOp = operation; - load(buff, log); - if (operation != oldOp) { - DbException.throwInternalError("operation=" + operation + " op=" + oldOp); - } - } - - private void load(Data buff, UndoLog log) { - operation = (short) buff.readInt(); - boolean deleted = buff.readByte() == 1; - table = log.getTable(buff.readInt()); - long key = buff.readLong(); - int columnCount = buff.readInt(); - Value[] values = new Value[columnCount]; - for (int i = 0; i < columnCount; i++) { - values[i] = buff.readValue(); - } - row = getTable().getDatabase().createRow(values, Row.MEMORY_CALCULATE); - row.setKey(key); - row.setDeleted(deleted); - state = IN_MEMORY_INVALID; - } - - /** - * Get the table. - * - * @return the table - */ - public Table getTable() { - return table; - } - - /** - * Get the position in the file. - * - * @return the file position - */ - public long getFilePos() { - return filePos; - } - - /** - * Get the row that was deleted or inserted. - * - * @return the row - */ - public Row getRow() { - return row; - } - - /** - * Change the state from IN_MEMORY to IN_MEMORY_INVALID. This method is - * called if a later record was read from the temporary file, and therefore - * the position could have changed. - */ - void invalidatePos() { - if (this.state == IN_MEMORY) { - state = IN_MEMORY_INVALID; - } - } -} diff --git a/h2/src/main/org/h2/engine/User.java b/h2/src/main/org/h2/engine/User.java index 8e1260ddaf..312516a84f 100644 --- a/h2/src/main/org/h2/engine/User.java +++ b/h2/src/main/org/h2/engine/User.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; @@ -13,6 +13,7 @@ import org.h2.message.Trace; import org.h2.schema.Schema; import org.h2.security.SHA256; +import org.h2.table.DualTable; import org.h2.table.MetaTable; import org.h2.table.RangeTable; import org.h2.table.Table; @@ -25,7 +26,7 @@ /** * Represents a user object. */ -public class User extends RightOwner { +public final class User extends RightOwner { private final boolean systemUser; private byte[] salt; @@ -76,7 +77,7 @@ public void setUserPasswordHash(byte[] userPasswordHash) { @Override public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(toString()); + throw DbException.getInternalError(toString()); } @Override @@ -84,70 +85,6 @@ public String getCreateSQL() { return getCreateSQL(true); } - @Override - public String getDropSQL() { - return null; - } - - /** - * Checks that this user has the given rights for this database object. - * - * @param table the database object - * @param rightMask the rights required - * @throws DbException if this user does not have the required rights - */ - public void checkRight(Table table, int rightMask) { - if (!hasRight(table, rightMask)) { - throw DbException.get(ErrorCode.NOT_ENOUGH_RIGHTS_FOR_1, table.getSQL(false)); - } - } - - /** - * See if this user has the given rights for this database object. - * - * @param table the database object, or null for schema-only check - * @param rightMask the rights required - * @return true if the user has the rights - */ - public boolean hasRight(Table table, int rightMask) { - if (rightMask != Right.SELECT && !systemUser && table != null) { - table.checkWritingAllowed(); - } - if (admin) { - return true; - } - Role publicRole = database.getPublicRole(); - if (publicRole.isRightGrantedRecursive(table, rightMask)) { - return true; - } - if (table instanceof MetaTable || table instanceof RangeTable) { - // everybody has access to the metadata information - return true; - } - if (table != null) { - if (hasRight(null, Right.ALTER_ANY_SCHEMA)) { - return true; - } - TableType tableType = table.getTableType(); - if (TableType.VIEW == tableType) { - TableView v = (TableView) table; - if (v.getOwner() == this) { - // the owner of a view has access: - // SELECT * FROM (SELECT * FROM ...) - return true; - } - } else if (tableType == null) { - // function table - return true; - } - if (table.isTemporary() && !table.isGlobalTemporary()) { - // the owner has all rights on local temporary tables - return true; - } - } - return isRightGrantedRecursive(table, rightMask); - } - /** * Get the CREATE SQL statement for this object. * @@ -157,7 +94,7 @@ public boolean hasRight(Table table, int rightMask) { */ public String getCreateSQL(boolean password) { StringBuilder buff = new StringBuilder("CREATE USER IF NOT EXISTS "); - getSQL(buff, true); + getSQL(buff, DEFAULT_SQL_FLAGS); if (comment != null) { buff.append(" COMMENT "); StringUtils.quoteStringSQL(buff, comment); @@ -195,8 +132,8 @@ boolean validateUserPasswordHash(byte[] userPasswordHash) { } /** - * Check if this user has admin rights. An exception is thrown if he does - * not have them. + * Checks if this user has admin rights. An exception is thrown if user + * doesn't have them. * * @throws DbException if this user is not an admin */ @@ -207,17 +144,101 @@ public void checkAdmin() { } /** - * Check if this user has schema admin rights. An exception is thrown if he - * does not have them. + * Checks if this user has schema admin rights for every schema. An + * exception is thrown if user doesn't have them. * * @throws DbException if this user is not a schema admin */ public void checkSchemaAdmin() { - if (!hasRight(null, Right.ALTER_ANY_SCHEMA)) { + if (!hasSchemaRight(null)) { throw DbException.get(ErrorCode.ADMIN_RIGHTS_REQUIRED); } } + /** + * Checks if this user has schema owner rights for the specified schema. An + * exception is thrown if user doesn't have them. + * + * @param schema the schema + * @throws DbException if this user is not a schema owner + */ + public void checkSchemaOwner(Schema schema) { + if (!hasSchemaRight(schema)) { + throw DbException.get(ErrorCode.NOT_ENOUGH_RIGHTS_FOR_1, schema.getTraceSQL()); + } + } + + /** + * See if this user has owner rights for the specified schema + * + * @param schema the schema + * @return true if the user has the rights + */ + private boolean hasSchemaRight(Schema schema) { + if (admin) { + return true; + } + Role publicRole = database.getPublicRole(); + if (publicRole.isSchemaRightGrantedRecursive(schema)) { + return true; + } + return isSchemaRightGrantedRecursive(schema); + } + + /** + * Checks that this user has the given rights for the specified table. + * + * @param table the table + * @param rightMask the rights required + * @throws DbException if this user does not have the required rights + */ + public void checkTableRight(Table table, int rightMask) { + if (!hasTableRight(table, rightMask)) { + throw DbException.get(ErrorCode.NOT_ENOUGH_RIGHTS_FOR_1, table.getTraceSQL()); + } + } + + /** + * See if this user has the given rights for this database object. + * + * @param table the database object, or null for schema-only check + * @param rightMask the rights required + * @return true if the user has the rights + */ + public boolean hasTableRight(Table table, int rightMask) { + if (rightMask != Right.SELECT && !systemUser) { + table.checkWritingAllowed(); + } + if (admin) { + return true; + } + Role publicRole = database.getPublicRole(); + if (publicRole.isTableRightGrantedRecursive(table, rightMask)) { + return true; + } + if (table instanceof MetaTable || table instanceof DualTable || table instanceof RangeTable) { + // everybody has access to the metadata information + return true; + } + TableType tableType = table.getTableType(); + if (TableType.VIEW == tableType) { + TableView v = (TableView) table; + if (v.getOwner() == this) { + // the owner of a view has access: + // SELECT * FROM (SELECT * FROM ...) + return true; + } + } else if (tableType == null) { + // function table + return true; + } + if (table.isTemporary() && !table.isGlobalTemporary()) { + // the owner has all rights on local temporary tables + return true; + } + return isTableRightGrantedRecursive(table, rightMask); + } + @Override public int getType() { return DbObject.USER; @@ -240,7 +261,7 @@ public ArrayList getChildren() { } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { for (Right right : database.getAllRights()) { if (right.getGrantee() == this) { database.removeDatabaseObject(session, right); @@ -253,23 +274,4 @@ public void removeChildrenAndResources(Session session) { invalidate(); } - @Override - public void checkRename() { - // ok - } - - /** - * Check that this user does not own any schema. An exception is thrown if - * he owns one or more schemas. - * - * @throws DbException if this user owns a schema - */ - public void checkOwnsNoSchemas() { - for (Schema s : database.getAllSchemas()) { - if (this == s.getOwner()) { - throw DbException.get(ErrorCode.CANNOT_DROP_2, getName(), s.getName()); - } - } - } - } diff --git a/h2/src/main/org/h2/engine/UserBuilder.java b/h2/src/main/org/h2/engine/UserBuilder.java index 4e4d5d19dd..658c80581d 100644 --- a/h2/src/main/org/h2/engine/UserBuilder.java +++ b/h2/src/main/org/h2/engine/UserBuilder.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; diff --git a/h2/src/main/org/h2/engine/package.html b/h2/src/main/org/h2/engine/package.html index 7fd976596c..09d0a56fed 100644 --- a/h2/src/main/org/h2/engine/package.html +++ b/h2/src/main/org/h2/engine/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/expression/Alias.java b/h2/src/main/org/h2/expression/Alias.java index 5024673c17..afae60cf28 100644 --- a/h2/src/main/org/h2/expression/Alias.java +++ b/h2/src/main/org/h2/expression/Alias.java @@ -1,21 +1,21 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; -import org.h2.command.Parser; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.table.ColumnResolver; import org.h2.table.TableFilter; +import org.h2.util.ParserUtil; import org.h2.value.TypeInfo; import org.h2.value.Value; /** * A column alias as in SELECT 'Hello' AS NAME ... */ -public class Alias extends Expression { +public final class Alias extends Expression { private final String alias; private Expression expr; @@ -33,7 +33,7 @@ public Expression getNonAliasExpression() { } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { return expr.getValue(session); } @@ -48,7 +48,7 @@ public void mapColumns(ColumnResolver resolver, int level, int state) { } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { expr = expr.optimize(session); return this; } @@ -59,23 +59,28 @@ public void setEvaluatable(TableFilter tableFilter, boolean b) { } @Override - public boolean isAutoIncrement() { - return expr.isAutoIncrement(); + public boolean isIdentity() { + return expr.isIdentity(); } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - expr.getSQL(builder, alwaysQuote).append(" AS "); - return Parser.quoteIdentifier(builder, alias, alwaysQuote); + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + expr.getUnenclosedSQL(builder, sqlFlags).append(" AS "); + return ParserUtil.quoteIdentifier(builder, alias, sqlFlags); } @Override - public void updateAggregate(Session session, int stage) { + public void updateAggregate(SessionLocal session, int stage) { expr.updateAggregate(session, stage); } @Override - public String getAlias() { + public String getAlias(SessionLocal session, int columnIndex) { + return alias; + } + + @Override + public String getColumnNameForView(SessionLocal session, int columnIndex) { return alias; } @@ -94,6 +99,14 @@ public int getCost() { return expr.getCost(); } + @Override + public String getSchemaName() { + if (aliasColumnName) { + return null; + } + return expr.getSchemaName(); + } + @Override public String getTableName() { if (aliasColumnName) { @@ -103,11 +116,11 @@ public String getTableName() { } @Override - public String getColumnName() { + public String getColumnName(SessionLocal session, int columnIndex) { if (!(expr instanceof ExpressionColumn) || aliasColumnName) { return alias; } - return expr.getColumnName(); + return expr.getColumnName(session, columnIndex); } } diff --git a/h2/src/main/org/h2/expression/ArrayConstructorByQuery.java b/h2/src/main/org/h2/expression/ArrayConstructorByQuery.java new file mode 100644 index 0000000000..9ed16bd3e5 --- /dev/null +++ b/h2/src/main/org/h2/expression/ArrayConstructorByQuery.java @@ -0,0 +1,102 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import java.util.ArrayList; + +import org.h2.api.ErrorCode; +import org.h2.command.query.Query; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.result.ResultInterface; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueArray; + +/** + * Array value constructor by query. + */ +public final class ArrayConstructorByQuery extends Expression { + + /** + * The subquery. + */ + private final Query query; + + private TypeInfo componentType, type; + + /** + * Creates new instance of array value constructor by query. + * + * @param query + * the query + */ + public ArrayConstructorByQuery(Query query) { + this.query = query; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return StringUtils.indent(builder.append("ARRAY ("), query.getPlanSQL(sqlFlags), 4, false).append(')'); + } + + @Override + public Value getValue(SessionLocal session) { + query.setSession(session); + ArrayList values = new ArrayList<>(); + try (ResultInterface result = query.query(0)) { + while (result.next()) { + values.add(result.currentRow()[0]); + } + } + return ValueArray.get(componentType, values.toArray(new Value[0]), session); + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + query.mapColumns(resolver, level + 1); + } + + @Override + public Expression optimize(SessionLocal session) { + query.prepare(); + if (query.getColumnCount() != 1) { + throw DbException.get(ErrorCode.SUBQUERY_IS_NOT_SINGLE_COLUMN); + } + componentType = query.getExpressions().get(0).getType(); + type = TypeInfo.getTypeInfo(Value.ARRAY, -1L, -1, componentType); + return this; + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + query.setEvaluatable(tableFilter, value); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + query.updateAggregate(session, stage); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return query.isEverything(visitor); + } + + @Override + public int getCost() { + return query.getCostAsExpression(); + } + +} diff --git a/h2/src/main/org/h2/expression/ArrayElementReference.java b/h2/src/main/org/h2/expression/ArrayElementReference.java new file mode 100644 index 0000000000..d02245e968 --- /dev/null +++ b/h2/src/main/org/h2/expression/ArrayElementReference.java @@ -0,0 +1,67 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueNull; + +/** + * Array element reference. + */ +public final class ArrayElementReference extends Operation2 { + + public ArrayElementReference(Expression left, Expression right) { + super(left, right); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + left.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append('['); + return right.getUnenclosedSQL(builder, sqlFlags).append(']'); + } + + @Override + public Value getValue(SessionLocal session) { + Value l = left.getValue(session); + Value r = right.getValue(session); + if (l != ValueNull.INSTANCE && r != ValueNull.INSTANCE) { + Value[] list = ((ValueArray) l).getList(); + int element = r.getInt(); + int cardinality = list.length; + if (element >= 1 && element <= cardinality) { + return list[element - 1]; + } + throw DbException.get(ErrorCode.ARRAY_ELEMENT_ERROR_2, Integer.toString(element), "1.." + cardinality); + } + return ValueNull.INSTANCE; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + right = right.optimize(session); + TypeInfo leftType = left.getType(); + switch (leftType.getValueType()) { + case Value.NULL: + return ValueExpression.NULL; + case Value.ARRAY: + type = (TypeInfo) leftType.getExtTypeInfo(); + if (left.isConstant() && right.isConstant()) { + return TypedValueExpression.get(getValue(session), type); + } + break; + default: + throw DbException.getInvalidExpressionTypeException("Array", left); + } + return this; + } + +} diff --git a/h2/src/main/org/h2/expression/BinaryOperation.java b/h2/src/main/org/h2/expression/BinaryOperation.java index 663c4f7e61..9c910515e6 100644 --- a/h2/src/main/org/h2/expression/BinaryOperation.java +++ b/h2/src/main/org/h2/expression/BinaryOperation.java @@ -1,36 +1,27 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; -import org.h2.engine.Mode; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.IntervalOperation.IntervalOpType; -import org.h2.expression.function.Function; +import org.h2.expression.function.DateTimeFunction; import org.h2.message.DbException; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; import org.h2.value.DataType; import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueInt; +import org.h2.value.ValueInteger; import org.h2.value.ValueNull; -import org.h2.value.ValueString; +import org.h2.value.ValueNumeric; /** * A mathematical expression, or string concatenation. */ -public class BinaryOperation extends Expression { +public class BinaryOperation extends Operation2 { public enum OpType { - /** - * This operation represents a string concatenation as in - * 'Hello' || 'World'. - */ - CONCAT, - /** * This operation represents an addition as in 1 + 2. */ @@ -47,40 +38,47 @@ public enum OpType { MULTIPLY, /** - * This operation represents a division as in 4 * 2. - */ - DIVIDE, - - /** - * This operation represents a modulus as in 5 % 2. + * This operation represents a division as in 4 / 2. */ - MODULUS + DIVIDE } private OpType opType; - private Expression left, right; - private TypeInfo type; + private TypeInfo forcedType; private boolean convertRight = true; public BinaryOperation(OpType opType, Expression left, Expression right) { + super(left, right); this.opType = opType; - this.left = left; - this.right = right; + } + + /** + * Sets a forced data type of a datetime minus datetime operation. + * + * @param forcedType the forced data type + */ + public void setForcedType(TypeInfo forcedType) { + if (opType != OpType.MINUS) { + throw getUnexpectedForcedTypeException(); + } + this.forcedType = forcedType; + } + + @Override + public boolean needParentheses() { + return true; } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { // don't remove the space, otherwise it might end up some thing like // --1 which is a line remark - builder.append('('); - left.getSQL(builder, alwaysQuote).append(' ').append(getOperationToken()).append(' '); - return right.getSQL(builder, alwaysQuote).append(')'); + left.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append(' ').append(getOperationToken()).append(' '); + return right.getSQL(builder, sqlFlags, AUTO_PARENTHESES); } private String getOperationToken() { switch (opType) { - case CONCAT: - return "||"; case PLUS: return "+"; case MINUS: @@ -89,39 +87,19 @@ private String getOperationToken() { return "*"; case DIVIDE: return "/"; - case MODULUS: - return "%"; default: - throw DbException.throwInternalError("opType=" + opType); + throw DbException.getInternalError("opType=" + opType); } } @Override - public Value getValue(Session session) { - Mode mode = session.getDatabase().getMode(); - Value l = left.getValue(session).convertTo(type, mode, null); + public Value getValue(SessionLocal session) { + Value l = left.getValue(session).convertTo(type, session); Value r = right.getValue(session); if (convertRight) { - r = r.convertTo(type, mode, null); + r = r.convertTo(type, session); } switch (opType) { - case CONCAT: { - if (l == ValueNull.INSTANCE) { - if (mode.nullConcatIsNull) { - return ValueNull.INSTANCE; - } - return r; - } else if (r == ValueNull.INSTANCE) { - if (mode.nullConcatIsNull) { - return ValueNull.INSTANCE; - } - return l; - } - String s1 = l.getString(), s2 = r.getString(); - StringBuilder buff = new StringBuilder(s1.length() + s2.length()); - buff.append(s1).append(s2); - return ValueString.get(buff.toString()); - } case PLUS: if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { return ValueNull.INSTANCE; @@ -141,84 +119,132 @@ public Value getValue(Session session) { if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { return ValueNull.INSTANCE; } - return l.divide(r); - case MODULUS: - if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { - return ValueNull.INSTANCE; - } - return l.modulus(r); + return l.divide(r, type); default: - throw DbException.throwInternalError("type=" + opType); + throw DbException.getInternalError("type=" + opType); } } @Override - public void mapColumns(ColumnResolver resolver, int level, int state) { - left.mapColumns(resolver, level, state); - right.mapColumns(resolver, level, state); - } - - @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { left = left.optimize(session); right = right.optimize(session); - switch (opType) { - case CONCAT: { - TypeInfo l = left.getType(), r = right.getType(); - if (DataType.isStringType(l.getValueType()) && DataType.isStringType(r.getValueType())) { - long precision = l.getPrecision() + r.getPrecision(); - if (precision >= 0 && precision < Integer.MAX_VALUE) { - type = TypeInfo.getTypeInfo(Value.STRING, precision, 0, null); - break; - } + TypeInfo leftType = left.getType(), rightType = right.getType(); + int l = leftType.getValueType(), r = rightType.getValueType(); + if ((l == Value.NULL && r == Value.NULL) || (l == Value.UNKNOWN && r == Value.UNKNOWN)) { + // (? + ?) - use decimal by default (the most safe data type) or + // string when text concatenation with + is enabled + if (opType == OpType.PLUS && session.getDatabase().getMode().allowPlusForStringConcat) { + return new ConcatenationOperation(left, right).optimize(session); + } else { + type = TypeInfo.TYPE_NUMERIC_FLOATING_POINT; } - type = TypeInfo.TYPE_STRING; - break; + } else if (DataType.isIntervalType(l) || DataType.isIntervalType(r)) { + if (forcedType != null) { + throw getUnexpectedForcedTypeException(); + } + return optimizeInterval(l, r); + } else if (DataType.isDateTimeType(l) || DataType.isDateTimeType(r)) { + return optimizeDateTime(session, l, r); + } else if (forcedType != null) { + throw getUnexpectedForcedTypeException(); + } else { + int dataType = Value.getHigherOrder(l, r); + if (dataType == Value.NUMERIC) { + optimizeNumeric(leftType, rightType); + } else if (dataType == Value.DECFLOAT) { + optimizeDecfloat(leftType, rightType); + } else if (dataType == Value.ENUM) { + type = TypeInfo.TYPE_INTEGER; + } else if (DataType.isCharacterStringType(dataType) + && opType == OpType.PLUS && session.getDatabase().getMode().allowPlusForStringConcat) { + return new ConcatenationOperation(left, right).optimize(session); + } else { + type = TypeInfo.getTypeInfo(dataType); + } + } + if (left.isConstant() && right.isConstant()) { + return ValueExpression.get(getValue(session)); } + return this; + } + + private void optimizeNumeric(TypeInfo leftType, TypeInfo rightType) { + leftType = leftType.toNumericType(); + rightType = rightType.toNumericType(); + long leftPrecision = leftType.getPrecision(), rightPrecision = rightType.getPrecision(); + int leftScale = leftType.getScale(), rightScale = rightType.getScale(); + long precision; + int scale; + switch (opType) { case PLUS: case MINUS: + // Precision is implementation-defined. + // Scale must be max(leftScale, rightScale). + // Choose the largest scale and adjust the precision of other + // argument. + if (leftScale < rightScale) { + leftPrecision += rightScale - leftScale; + scale = rightScale; + } else { + rightPrecision += leftScale - rightScale; + scale = leftScale; + } + // Add one extra digit to the largest precision. + precision = Math.max(leftPrecision, rightPrecision) + 1; + break; case MULTIPLY: - case DIVIDE: - case MODULUS: - int l = left.getType().getValueType(); - int r = right.getType().getValueType(); - if ((l == Value.NULL && r == Value.NULL) || - (l == Value.UNKNOWN && r == Value.UNKNOWN)) { - // (? + ?) - use decimal by default (the most safe data type) or - // string when text concatenation with + is enabled - if (opType == OpType.PLUS && session.getDatabase(). - getMode().allowPlusForStringConcat) { - type = TypeInfo.TYPE_STRING; - opType = OpType.CONCAT; - } else { - type = TypeInfo.TYPE_DECIMAL_DEFAULT; - } - } else if (DataType.isIntervalType(l) || DataType.isIntervalType(r)) { - return optimizeInterval(session, l, r); - } else if (DataType.isDateTimeType(l) || DataType.isDateTimeType(r)) { - return optimizeDateTime(session, l, r); + // Precision is implementation-defined. + // Scale must be leftScale + rightScale. + // Use sum of precisions. + precision = leftPrecision + rightPrecision; + scale = leftScale + rightScale; + break; + case DIVIDE: { + // Precision and scale are implementation-defined. + long scaleAsLong = leftScale - rightScale + rightPrecision * 2; + if (scaleAsLong >= ValueNumeric.MAXIMUM_SCALE) { + scale = ValueNumeric.MAXIMUM_SCALE; + } else if (scaleAsLong <= 0) { + scale = 0; } else { - int dataType = Value.getHigherOrder(l, r); - if (dataType == Value.ENUM) { - type = TypeInfo.TYPE_INT; - } else { - type = TypeInfo.getTypeInfo(dataType); - if (DataType.isStringType(dataType) && session.getDatabase().getMode().allowPlusForStringConcat) { - opType = OpType.CONCAT; - } - } + scale = (int) scaleAsLong; } + // Divider can be effectively multiplied by no more than + // 10^rightScale, so add rightScale to its precision and adjust the + // result to the changes in scale. + precision = leftPrecision + rightScale - leftScale + scale; break; + } default: - DbException.throwInternalError("type=" + opType); + throw DbException.getInternalError("type=" + opType); } - if (left.isConstant() && right.isConstant()) { - return ValueExpression.get(getValue(session)); + type = TypeInfo.getTypeInfo(Value.NUMERIC, precision, scale, null); + } + + private void optimizeDecfloat(TypeInfo leftType, TypeInfo rightType) { + leftType = leftType.toDecfloatType(); + rightType = rightType.toDecfloatType(); + long leftPrecision = leftType.getPrecision(), rightPrecision = rightType.getPrecision(); + long precision; + switch (opType) { + case PLUS: + case MINUS: + case DIVIDE: + // Add one extra digit to the largest precision. + precision = Math.max(leftPrecision, rightPrecision) + 1; + break; + case MULTIPLY: + // Use sum of precisions. + precision = leftPrecision + rightPrecision; + break; + default: + throw DbException.getInternalError("type=" + opType); } - return this; + type = TypeInfo.getTypeInfo(Value.DECFLOAT, precision, 0, null); } - private Expression optimizeInterval(Session session, int l, int r) { + private Expression optimizeInterval(int l, int r) { boolean lInterval = false, lNumeric = false, lDateTime = false; if (DataType.isIntervalType(l)) { lInterval = true; @@ -277,8 +303,13 @@ private Expression optimizeInterval(Session session, int l, int r) { } break; case DIVIDE: - if (lInterval && rNumeric) { - return new IntervalOperation(IntervalOpType.INTERVAL_DIVIDE_NUMERIC, left, right); + if (lInterval) { + if (rNumeric) { + return new IntervalOperation(IntervalOpType.INTERVAL_DIVIDE_NUMERIC, left, right); + } else if (rInterval && DataType.isYearMonthIntervalType(l) == DataType.isYearMonthIntervalType(r)) { + // Non-standard + return new IntervalOperation(IntervalOpType.INTERVAL_DIVIDE_INTERVAL, left, right); + } } break; default: @@ -286,93 +317,78 @@ private Expression optimizeInterval(Session session, int l, int r) { throw getUnsupported(l, r); } - private Expression optimizeDateTime(Session session, int l, int r) { + private Expression optimizeDateTime(SessionLocal session, int l, int r) { switch (opType) { - case PLUS: - if (r != Value.getHigherOrder(l, r)) { - // order left and right: INT < TIME < DATE < TIMESTAMP + case PLUS: { + if (DataType.isDateTimeType(l)) { + if (DataType.isDateTimeType(r)) { + if (l > r) { + swap(); + int t = l; + l = r; + r = t; + } + return new CompatibilityDatePlusTimeOperation(right, left).optimize(session); + } swap(); int t = l; l = r; r = t; } switch (l) { - case Value.INT: { + case Value.INTEGER: // Oracle date add - Function f = Function.getFunction(session.getDatabase(), "DATEADD"); - f.setParameter(0, ValueExpression.get(ValueString.get("DAY"))); - f.setParameter(1, left); - f.setParameter(2, right); - f.doneWithParameters(); - return f.optimize(session); - } - case Value.DECIMAL: - case Value.FLOAT: - case Value.DOUBLE: { + return new DateTimeFunction(DateTimeFunction.DATEADD, DateTimeFunction.DAY, left, right) + .optimize(session); + case Value.NUMERIC: + case Value.REAL: + case Value.DOUBLE: + case Value.DECFLOAT: // Oracle date add - Function f = Function.getFunction(session.getDatabase(), "DATEADD"); - f.setParameter(0, ValueExpression.get(ValueString.get("SECOND"))); - left = new BinaryOperation(OpType.MULTIPLY, ValueExpression.get(ValueInt - .get(60 * 60 * 24)), left); - f.setParameter(1, left); - f.setParameter(2, right); - f.doneWithParameters(); - return f.optimize(session); - } - case Value.TIME: - if (r == Value.TIME || r == Value.TIMESTAMP_TZ) { - type = TypeInfo.getTypeInfo(r); - return this; - } else { // DATE, TIMESTAMP - type = TypeInfo.TYPE_TIMESTAMP; - return this; - } + return new DateTimeFunction(DateTimeFunction.DATEADD, DateTimeFunction.SECOND, + new BinaryOperation(OpType.MULTIPLY, ValueExpression.get(ValueInteger.get(60 * 60 * 24)), + left), right).optimize(session); } break; + } case MINUS: switch (l) { case Value.DATE: case Value.TIMESTAMP: case Value.TIMESTAMP_TZ: switch (r) { - case Value.INT: { + case Value.INTEGER: { + if (forcedType != null) { + throw getUnexpectedForcedTypeException(); + } // Oracle date subtract - Function f = Function.getFunction(session.getDatabase(), "DATEADD"); - f.setParameter(0, ValueExpression.get(ValueString.get("DAY"))); - right = new UnaryOperation(right); - right = right.optimize(session); - f.setParameter(1, right); - f.setParameter(2, left); - f.doneWithParameters(); - return f.optimize(session); + return new DateTimeFunction(DateTimeFunction.DATEADD, DateTimeFunction.DAY, + new UnaryOperation(right), left).optimize(session); } - case Value.DECIMAL: - case Value.FLOAT: - case Value.DOUBLE: { + case Value.NUMERIC: + case Value.REAL: + case Value.DOUBLE: + case Value.DECFLOAT: { + if (forcedType != null) { + throw getUnexpectedForcedTypeException(); + } // Oracle date subtract - Function f = Function.getFunction(session.getDatabase(), "DATEADD"); - f.setParameter(0, ValueExpression.get(ValueString.get("SECOND"))); - right = new BinaryOperation(OpType.MULTIPLY, ValueExpression.get(ValueInt - .get(60 * 60 * 24)), right); - right = new UnaryOperation(right); - right = right.optimize(session); - f.setParameter(1, right); - f.setParameter(2, left); - f.doneWithParameters(); - return f.optimize(session); + return new DateTimeFunction(DateTimeFunction.DATEADD, DateTimeFunction.SECOND, + new BinaryOperation(OpType.MULTIPLY, ValueExpression.get(ValueInteger.get(-60 * 60 * 24)), + right), left).optimize(session); } case Value.TIME: - type = TypeInfo.TYPE_TIMESTAMP; - return this; + case Value.TIME_TZ: case Value.DATE: case Value.TIMESTAMP: case Value.TIMESTAMP_TZ: - return new IntervalOperation(IntervalOpType.DATETIME_MINUS_DATETIME, left, right); + return new IntervalOperation(IntervalOpType.DATETIME_MINUS_DATETIME, left, right, forcedType); } break; case Value.TIME: - if (r == Value.TIME) { - return new IntervalOperation(IntervalOpType.DATETIME_MINUS_DATETIME, left, right); + case Value.TIME_TZ: + if (DataType.isDateTimeType(r)) { + return new IntervalOperation(IntervalOpType.DATETIME_MINUS_DATETIME, left, right, forcedType); } break; } @@ -403,7 +419,14 @@ private Expression optimizeDateTime(Session session, int l, int r) { private DbException getUnsupported(int l, int r) { return DbException.getUnsupportedException( - DataType.getDataType(l).name + ' ' + getOperationToken() + ' ' + DataType.getDataType(r).name); + Value.getTypeName(l) + ' ' + getOperationToken() + ' ' + Value.getTypeName(r)); + } + + private DbException getUnexpectedForcedTypeException() { + StringBuilder builder = getUnenclosedSQL(new StringBuilder(), TRACE_SQL_FLAGS); + int index = builder.length(); + return DbException.getSyntaxError( + IntervalOperation.getForcedTypeSQL(builder.append(' '), forcedType).toString(), index, ""); } private void swap() { @@ -412,48 +435,13 @@ private void swap() { right = temp; } - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - left.setEvaluatable(tableFilter, b); - right.setEvaluatable(tableFilter, b); - } - - @Override - public TypeInfo getType() { - return type; - } - - @Override - public void updateAggregate(Session session, int stage) { - left.updateAggregate(session, stage); - right.updateAggregate(session, stage); - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - return left.isEverything(visitor) && right.isEverything(visitor); - } - - @Override - public int getCost() { - return left.getCost() + right.getCost() + 1; - } - - @Override - public int getSubexpressionCount() { - return 2; - } - - @Override - public Expression getSubexpression(int index) { - switch (index) { - case 0: - return left; - case 1: - return right; - default: - throw new IndexOutOfBoundsException(); - } + /** + * Returns the type of this binary operation. + * + * @return the type of this binary operation + */ + public OpType getOperationType() { + return opType; } } diff --git a/h2/src/main/org/h2/expression/CompatibilityDatePlusTimeOperation.java b/h2/src/main/org/h2/expression/CompatibilityDatePlusTimeOperation.java new file mode 100644 index 0000000000..f1f4132788 --- /dev/null +++ b/h2/src/main/org/h2/expression/CompatibilityDatePlusTimeOperation.java @@ -0,0 +1,117 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import static org.h2.util.DateTimeUtils.NANOS_PER_DAY; + +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.util.DateTimeUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueDate; +import org.h2.value.ValueNull; +import org.h2.value.ValueTime; +import org.h2.value.ValueTimeTimeZone; +import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; + +/** + * A compatibility mathematical operation with datetime values. + */ +public class CompatibilityDatePlusTimeOperation extends Operation2 { + + public CompatibilityDatePlusTimeOperation(Expression left, Expression right) { + super(left, right); + TypeInfo l = left.getType(), r = right.getType(); + int t; + switch (l.getValueType()) { + case Value.TIMESTAMP_TZ: + if (r.getValueType() == Value.TIME_TZ) { + throw DbException.getUnsupportedException("TIMESTAMP WITH TIME ZONE + TIME WITH TIME ZONE"); + } + //$FALL-THROUGH$ + case Value.TIME: + t = r.getValueType() == Value.DATE ? Value.TIMESTAMP : l.getValueType(); + break; + case Value.TIME_TZ: + if (r.getValueType() == Value.TIME_TZ) { + throw DbException.getUnsupportedException("TIME WITH TIME ZONE + TIME WITH TIME ZONE"); + } + t = r.getValueType() == Value.DATE ? Value.TIMESTAMP_TZ : l.getValueType(); + break; + case Value.TIMESTAMP: + t = r.getValueType() == Value.TIME_TZ ? Value.TIMESTAMP_TZ : Value.TIMESTAMP; + break; + default: + throw DbException.getUnsupportedException( + Value.getTypeName(l.getValueType()) + " + " + Value.getTypeName(r.getValueType())); + } + type = TypeInfo.getTypeInfo(t, 0L, Math.max(l.getScale(), r.getScale()), null); + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + left.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append(" + "); + return right.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + } + + @Override + public Value getValue(SessionLocal session) { + Value l = left.getValue(session); + Value r = right.getValue(session); + if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + switch (l.getValueType()) { + case Value.TIME: + if (r.getValueType() == Value.DATE) { + return ValueTimestamp.fromDateValueAndNanos(((ValueDate) r).getDateValue(), // + ((ValueTime) l).getNanos()); + } + break; + case Value.TIME_TZ: + if (r.getValueType() == Value.DATE) { + ValueTimeTimeZone t = (ValueTimeTimeZone) l; + return ValueTimestampTimeZone.fromDateValueAndNanos(((ValueDate) r).getDateValue(), t.getNanos(), + t.getTimeZoneOffsetSeconds()); + } + break; + case Value.TIMESTAMP: { + if (r.getValueType() == Value.TIME_TZ) { + ValueTimestamp ts = (ValueTimestamp) l; + l = ValueTimestampTimeZone.fromDateValueAndNanos(ts.getDateValue(), ts.getTimeNanos(), + ((ValueTimeTimeZone) r).getTimeZoneOffsetSeconds()); + } + break; + } + } + long[] a = DateTimeUtils.dateAndTimeFromValue(l, session); + long dateValue = a[0], timeNanos = a[1] + + (r instanceof ValueTime ? ((ValueTime) r).getNanos() : ((ValueTimeTimeZone) r).getNanos()); + if (timeNanos >= NANOS_PER_DAY) { + timeNanos -= NANOS_PER_DAY; + dateValue = DateTimeUtils.incrementDateValue(dateValue); + } + return DateTimeUtils.dateTimeToValue(l, dateValue, timeNanos); + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + right = right.optimize(session); + if (left.isConstant() && right.isConstant()) { + return ValueExpression.get(getValue(session)); + } + return this; + } + +} diff --git a/h2/src/main/org/h2/expression/ConcatenationOperation.java b/h2/src/main/org/h2/expression/ConcatenationOperation.java new file mode 100644 index 0000000000..18baaceb53 --- /dev/null +++ b/h2/src/main/org/h2/expression/ConcatenationOperation.java @@ -0,0 +1,281 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import java.util.Arrays; + +import org.h2.engine.SessionLocal; +import org.h2.expression.function.CastSpecification; +import org.h2.expression.function.ConcatFunction; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; + +/** + * Character string concatenation as in {@code 'Hello' || 'World'}, binary + * string concatenation as in {@code X'01' || X'AB'} or an array concatenation + * as in {@code ARRAY[1, 2] || 3}. + */ +public final class ConcatenationOperation extends OperationN { + + public ConcatenationOperation() { + super(new Expression[4]); + } + + public ConcatenationOperation(Expression op1, Expression op2) { + super(new Expression[] { op1, op2 }); + argsCount = 2; + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + for (int i = 0, l = args.length; i < l; i++) { + if (i > 0) { + builder.append(" || "); + } + args[i].getSQL(builder, sqlFlags, AUTO_PARENTHESES); + } + return builder; + } + + @Override + public Value getValue(SessionLocal session) { + int l = args.length; + if (l == 2) { + Value v1 = args[0].getValue(session); + v1 = v1.convertTo(type, session); + if (v1 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + Value v2 = args[1].getValue(session); + v2 = v2.convertTo(type, session); + if (v2 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + return getValue(session, v1, v2); + } + return getValue(session, l); + } + + private Value getValue(SessionLocal session, Value l, Value r) { + int valueType = type.getValueType(); + if (valueType == Value.VARCHAR) { + String s1 = l.getString(), s2 = r.getString(); + return ValueVarchar.get(new StringBuilder(s1.length() + s2.length()).append(s1).append(s2).toString()); + } else if (valueType == Value.VARBINARY) { + byte[] leftBytes = l.getBytesNoCopy(), rightBytes = r.getBytesNoCopy(); + int leftLength = leftBytes.length, rightLength = rightBytes.length; + byte[] bytes = Arrays.copyOf(leftBytes, leftLength + rightLength); + System.arraycopy(rightBytes, 0, bytes, leftLength, rightLength); + return ValueVarbinary.getNoCopy(bytes); + } else { + Value[] leftValues = ((ValueArray) l).getList(), rightValues = ((ValueArray) r).getList(); + int leftLength = leftValues.length, rightLength = rightValues.length; + Value[] values = Arrays.copyOf(leftValues, leftLength + rightLength); + System.arraycopy(rightValues, 0, values, leftLength, rightLength); + return ValueArray.get((TypeInfo) type.getExtTypeInfo(), values, session); + } + } + + private Value getValue(SessionLocal session, int l) { + Value[] values = new Value[l]; + for (int i = 0; i < l; i++) { + Value v = args[i].getValue(session).convertTo(type, session); + if (v == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + values[i] = v; + } + int valueType = type.getValueType(); + if (valueType == Value.VARCHAR) { + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < l; i++) { + builder.append(values[i].getString()); + } + return ValueVarchar.get(builder.toString(), session); + } else if (valueType == Value.VARBINARY) { + int totalLength = 0; + for (int i = 0; i < l; i++) { + totalLength += values[i].getBytesNoCopy().length; + } + byte[] v = new byte[totalLength]; + int offset = 0; + for (int i = 0; i < l; i++) { + byte[] a = values[i].getBytesNoCopy(); + int length = a.length; + System.arraycopy(a, 0, v, offset, length); + offset += length; + } + return ValueVarbinary.getNoCopy(v); + } else { + int totalLength = 0; + for (int i = 0; i < l; i++) { + totalLength += ((ValueArray) values[i]).getList().length; + } + Value[] v = new Value[totalLength]; + int offset = 0; + for (int i = 0; i < l; i++) { + Value[] a = ((ValueArray) values[i]).getList(); + int length = a.length; + System.arraycopy(a, 0, v, offset, length); + offset += length; + } + return ValueArray.get((TypeInfo) type.getExtTypeInfo(), v, session); + } + } + + @Override + public Expression optimize(SessionLocal session) { + determineType(session); + inlineArguments(); + if (type.getValueType() == Value.VARCHAR && session.getMode().treatEmptyStringsAsNull) { + return new ConcatFunction(ConcatFunction.CONCAT, args).optimize(session); + } + int l = args.length; + boolean allConst = true, anyConst = false; + for (int i = 0; i < l; i++) { + if (args[i].isConstant()) { + anyConst = true; + } else { + allConst = false; + } + } + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + if (anyConst) { + int offset = 0; + for (int i = 0; i < l; i++) { + Expression arg1 = args[i]; + if (arg1.isConstant()) { + Value v1 = arg1.getValue(session).convertTo(type, session); + if (v1 == ValueNull.INSTANCE) { + return TypedValueExpression.get(ValueNull.INSTANCE, type); + } + if (isEmpty(v1)) { + continue; + } + for (Expression arg2; i + 1 < l && (arg2 = args[i + 1]).isConstant(); i++) { + Value v2 = arg2.getValue(session).convertTo(type, session); + if (v2 == ValueNull.INSTANCE) { + return TypedValueExpression.get(ValueNull.INSTANCE, type); + } + if (!isEmpty(v2)) { + v1 = getValue(session, v1, v2); + } + } + arg1 = ValueExpression.get(v1); + } + args[offset++] = arg1; + } + if (offset == 1) { + Expression arg = args[0]; + TypeInfo argType = arg.getType(); + if (TypeInfo.areSameTypes(type, argType)) { + return arg; + } + return new CastSpecification(arg, type); + } + argsCount = offset; + doneWithParameters(); + } + return this; + } + + private void determineType(SessionLocal session) { + int l = args.length; + boolean anyArray = false, allBinary = true, allCharacter = true; + for (int i = 0; i < l; i++) { + Expression arg = args[i].optimize(session); + args[i] = arg; + int t = arg.getType().getValueType(); + if (t == Value.ARRAY) { + anyArray = true; + allBinary = allCharacter = false; + } else if (t == Value.NULL) { + // Ignore NULL literals + } else if (DataType.isBinaryStringType(t)) { + allCharacter = false; + } else if (DataType.isCharacterStringType(t)) { + allBinary = false; + } else { + allBinary = allCharacter = false; + } + } + if (anyArray) { + type = TypeInfo.getTypeInfo(Value.ARRAY, -1, 0, TypeInfo.getHigherType(args).getExtTypeInfo()); + } else if (allBinary) { + long precision = getPrecision(0); + for (int i = 1; i < l; i++) { + precision = DataType.addPrecision(precision, getPrecision(i)); + } + type = TypeInfo.getTypeInfo(Value.VARBINARY, precision, 0, null); + } else if (allCharacter) { + long precision = getPrecision(0); + for (int i = 1; i < l; i++) { + precision = DataType.addPrecision(precision, getPrecision(i)); + } + type = TypeInfo.getTypeInfo(Value.VARCHAR, precision, 0, null); + } else { + type = TypeInfo.TYPE_VARCHAR; + } + } + + private long getPrecision(int i) { + TypeInfo t = args[i].getType(); + return t.getValueType() != Value.NULL ? t.getPrecision() : 0L; + } + + private void inlineArguments() { + int valueType = type.getValueType(); + int l = args.length; + int count = l; + for (int i = 0; i < l; i++) { + Expression arg = args[i]; + if (arg instanceof ConcatenationOperation && arg.getType().getValueType() == valueType) { + count += arg.getSubexpressionCount() - 1; + } + } + if (count > l) { + Expression[] newArguments = new Expression[count]; + for (int i = 0, offset = 0; i < l; i++) { + Expression arg = args[i]; + if (arg instanceof ConcatenationOperation && arg.getType().getValueType() == valueType) { + ConcatenationOperation c = (ConcatenationOperation) arg; + Expression[] innerArgs = c.args; + int innerLength = innerArgs.length; + System.arraycopy(innerArgs, 0, newArguments, offset, innerLength); + offset += innerLength; + } else { + newArguments[offset++] = arg; + } + } + args = newArguments; + argsCount = count; + } + } + + private static boolean isEmpty(Value v) { + int valueType = v.getValueType(); + if (valueType == Value.VARCHAR) { + return v.getString().isEmpty(); + } else if (valueType == Value.VARBINARY) { + return v.getBytesNoCopy().length == 0; + } else { + return ((ValueArray) v).getList().length == 0; + } + } + +} diff --git a/h2/src/main/org/h2/expression/DomainValueExpression.java b/h2/src/main/org/h2/expression/DomainValueExpression.java new file mode 100644 index 0000000000..e1831203e0 --- /dev/null +++ b/h2/src/main/org/h2/expression/DomainValueExpression.java @@ -0,0 +1,78 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.api.ErrorCode; +import org.h2.constraint.DomainColumnResolver; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.table.ColumnResolver; +import org.h2.util.ParserUtil; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * An expression representing a value for domain constraint. + */ +public final class DomainValueExpression extends Operation0 { + + private DomainColumnResolver columnResolver; + + public DomainValueExpression() { + } + + @Override + public Value getValue(SessionLocal session) { + return columnResolver.getValue(null); + } + + @Override + public TypeInfo getType() { + return columnResolver.getValueType(); + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + if (resolver instanceof DomainColumnResolver) { + columnResolver = (DomainColumnResolver) resolver; + } + } + + @Override + public Expression optimize(SessionLocal session) { + if (columnResolver == null) { + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, "VALUE"); + } + return this; + } + + @Override + public boolean isValueSet() { + return columnResolver.getValue(null) != null; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + if (columnResolver != null) { + String name = columnResolver.getColumnName(); + if (name != null) { + return ParserUtil.quoteIdentifier(builder, name, sqlFlags); + } + } + return builder.append("VALUE"); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return true; + } + + @Override + public int getCost() { + return 1; + } + +} diff --git a/h2/src/main/org/h2/expression/Expression.java b/h2/src/main/org/h2/expression/Expression.java index 10ea90f70f..7718e6e6f0 100644 --- a/h2/src/main/org/h2/expression/Expression.java +++ b/h2/src/main/org/h2/expression/Expression.java @@ -1,26 +1,30 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; import java.util.List; -import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.result.ResultInterface; +import org.h2.api.ErrorCode; +import org.h2.engine.Constants; +import org.h2.engine.SessionLocal; +import org.h2.expression.function.NamedExpression; +import org.h2.message.DbException; import org.h2.table.Column; import org.h2.table.ColumnResolver; import org.h2.table.TableFilter; +import org.h2.util.HasSQL; +import org.h2.util.StringUtils; import org.h2.value.TypeInfo; +import org.h2.value.Typed; import org.h2.value.Value; -import org.h2.value.ValueCollectionBase; /** * An expression is a operation, a value, or a function in a query. */ -public abstract class Expression { +public abstract class Expression implements HasSQL, Typed { /** * Initial state for {@link #mapColumns(ColumnResolver, int, int)}. @@ -39,6 +43,22 @@ public abstract class Expression { */ public static final int MAP_IN_AGGREGATE = 2; + /** + * Wrap expression in parentheses only if it can't be safely included into + * other expressions without them. + */ + public static final int AUTO_PARENTHESES = 0; + + /** + * Wrap expression in parentheses unconditionally. + */ + public static final int WITH_PARENTHESES = 1; + + /** + * Do not wrap expression in parentheses. + */ + public static final int WITHOUT_PARENTHESES = 2; + private boolean addedToFilter; /** @@ -46,16 +66,18 @@ public abstract class Expression { * * @param builder the builder to append the SQL to * @param expressions the list of expressions - * @param alwaysQuote quote all identifiers + * @param sqlFlags formatting flags + * @return the specified string builder */ - public static void writeExpressions(StringBuilder builder, List expressions, - boolean alwaysQuote) { + public static StringBuilder writeExpressions(StringBuilder builder, List expressions, + int sqlFlags) { for (int i = 0, length = expressions.size(); i < length; i++) { if (i > 0) { builder.append(", "); } - expressions.get(i).getSQL(builder, alwaysQuote); + expressions.get(i).getUnenclosedSQL(builder, sqlFlags); } + return builder; } /** @@ -63,9 +85,10 @@ public static void writeExpressions(StringBuilder builder, List 0) { builder.append(", "); @@ -74,9 +97,10 @@ public static void writeExpressions(StringBuilder builder, Expression[] expressi if (e == null) { builder.append("DEFAULT"); } else { - e.getSQL(builder, alwaysQuote); + e.getUnenclosedSQL(builder, sqlFlags); } } + return builder; } /** @@ -85,14 +109,15 @@ public static void writeExpressions(StringBuilder builder, Expression[] expressi * @param session the session * @return the result */ - public abstract Value getValue(Session session); + public abstract Value getValue(SessionLocal session); /** - * Returns the data type. The data type may not be known before the + * Returns the data type. The data type may be unknown before the * optimization phase. * * @return the data type */ + @Override public abstract TypeInfo getType(); /** @@ -111,7 +136,21 @@ public static void writeExpressions(StringBuilder builder, Expression[] expressi * @param session the session * @return the optimized expression */ - public abstract Expression optimize(Session session); + public abstract Expression optimize(SessionLocal session); + + /** + * Try to optimize or remove the condition. + * + * @param session the session + * @return the optimized condition, or {@code null} + */ + public final Expression optimizeCondition(SessionLocal session) { + Expression e = optimize(session); + if (e.isConstant()) { + return e.getBooleanValue(session) ? null : ValueExpression.FALSE; + } + return e; + } /** * Tell the expression columns whether the table filter can return values @@ -122,51 +161,86 @@ public static void writeExpressions(StringBuilder builder, Expression[] expressi */ public abstract void setEvaluatable(TableFilter tableFilter, boolean value); + @Override + public final String getSQL(int sqlFlags) { + return getSQL(new StringBuilder(), sqlFlags, AUTO_PARENTHESES).toString(); + } + + @Override + public final StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return getSQL(builder, sqlFlags, AUTO_PARENTHESES); + } + /** - * Get the SQL statement of this expression. - * This may not always be the original SQL statement, - * specially after optimization. + * Get the SQL statement of this expression. This may not always be the + * original SQL statement, especially after optimization. * - * @param alwaysQuote quote all identifiers + * @param sqlFlags + * formatting flags + * @param parentheses + * parentheses mode * @return the SQL statement */ - public String getSQL(boolean alwaysQuote) { - return getSQL(new StringBuilder(), alwaysQuote).toString(); + public final String getSQL(int sqlFlags, int parentheses) { + return getSQL(new StringBuilder(), sqlFlags, parentheses).toString(); } /** - * Appends the SQL statement of this expression to the specified builder. - * This may not always be the original SQL statement, specially after - * optimization. + * Get the SQL statement of this expression. This may not always be the + * original SQL statement, especially after optimization. * * @param builder * string builder - * @param alwaysQuote quote all identifiers + * @param sqlFlags + * formatting flags + * @param parentheses + * parentheses mode * @return the specified string builder */ - public abstract StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote); + public final StringBuilder getSQL(StringBuilder builder, int sqlFlags, int parentheses) { + return parentheses == WITH_PARENTHESES || parentheses != WITHOUT_PARENTHESES && needParentheses() + ? getUnenclosedSQL(builder.append('('), sqlFlags).append(')') + : getUnenclosedSQL(builder, sqlFlags); + } + + /** + * Returns whether this expressions needs to be wrapped in parentheses when + * it is used as an argument of other expressions. + * + * @return {@code true} if it is + */ + public boolean needParentheses() { + return false; + } /** - * Appends the SQL statement of this expression to the specified builder. - * This may not always be the original SQL statement, specially after - * optimization. Enclosing '(' and ')' are removed. + * Get the SQL statement of this expression. This may not always be the + * original SQL statement, especially after optimization. Enclosing '(' and + * ')' are always appended. * * @param builder * string builder - * @param alwaysQuote - * quote all identifiers + * @param sqlFlags + * formatting flags * @return the specified string builder */ - public StringBuilder getUnenclosedSQL(StringBuilder builder, boolean alwaysQuote) { - int first = builder.length(); - int last = getSQL(builder, alwaysQuote).length() - 1; - if (last > first && builder.charAt(first) == '(' && builder.charAt(last) == ')') { - builder.setLength(last); - builder.deleteCharAt(first); - } - return builder; + public final StringBuilder getEnclosedSQL(StringBuilder builder, int sqlFlags) { + return getUnenclosedSQL(builder.append('('), sqlFlags).append(')'); } + /** + * Get the SQL statement of this expression. This may not always be the + * original SQL statement, especially after optimization. Enclosing '(' and + * ')' are never appended. + * + * @param builder + * string builder + * @param sqlFlags + * formatting flags + * @return the specified string builder + */ + public abstract StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags); + /** * Update an aggregate value. This method is called at statement execution * time. It is usually called once for each row, but if the expression is @@ -177,7 +251,7 @@ public StringBuilder getUnenclosedSQL(StringBuilder builder, boolean alwaysQuote * @param session the session * @param stage select stage */ - public abstract void updateAggregate(Session session, int stage); + public abstract void updateAggregate(SessionLocal session, int stage); /** * Check if this expression and all sub-expressions can fulfill a criteria. @@ -199,13 +273,13 @@ public StringBuilder getUnenclosedSQL(StringBuilder builder, boolean alwaysQuote /** * If it is possible, return the negated expression. This is used - * to optimize NOT expressions: NOT ID>10 can be converted to + * to optimize NOT expressions: NOT ID>10 can be converted to * ID<=10. Returns null if negating is not possible. * * @param session the session * @return the negated expression, or null */ - public Expression getNotIfPossible(@SuppressWarnings("unused") Session session) { + public Expression getNotIfPossible(@SuppressWarnings("unused") SessionLocal session) { // by default it is not possible return null; } @@ -220,30 +294,29 @@ public boolean isConstant() { } /** - * Is the value of a parameter set. + * Check if this expression will always return the NULL value. * - * @return true if set + * @return if the expression is constant NULL value */ - public boolean isValueSet() { + public boolean isNullConstant() { return false; } /** - * Check if this is an auto-increment column. + * Is the value of a parameter set. * - * @return true if it is an auto-increment column + * @return true if set */ - public boolean isAutoIncrement() { + public boolean isValueSet() { return false; } /** - * Check if this expression is an auto-generated key expression such as next - * value from a sequence. + * Check if this is an identity column. * - * @return whether this expression is an auto-generated key expression + * @return true if it is an identity column */ - public boolean isGeneratedKey() { + public boolean isIdentity() { return false; } @@ -255,8 +328,8 @@ public boolean isGeneratedKey() { * @param session the session * @return the result */ - public boolean getBooleanValue(Session session) { - return getValue(session).getBoolean(); + public boolean getBooleanValue(SessionLocal session) { + return getValue(session).isTrue(); } /** @@ -266,17 +339,19 @@ public boolean getBooleanValue(Session session) { * @param filter the table filter */ @SuppressWarnings("unused") - public void createIndexConditions(Session session, TableFilter filter) { + public void createIndexConditions(SessionLocal session, TableFilter filter) { // default is do nothing } /** * Get the column name or alias name of this expression. * + * @param session the session + * @param columnIndex 0-based column index * @return the column name */ - public String getColumnName() { - return getAlias(); + public String getColumnName(SessionLocal session, int columnIndex) { + return getAlias(session, columnIndex); } /** @@ -320,10 +395,55 @@ public String getTableAlias() { * Get the alias name of a column or SQL expression * if it is not an aliased expression. * + * @param session the session + * @param columnIndex 0-based column index * @return the alias name */ - public String getAlias() { - return getUnenclosedSQL(new StringBuilder(), false).toString(); + public String getAlias(SessionLocal session, int columnIndex) { + switch (session.getMode().expressionNames) { + default: { + String sql = getSQL(QUOTE_ONLY_WHEN_REQUIRED | NO_CASTS, WITHOUT_PARENTHESES); + if (sql.length() <= Constants.MAX_IDENTIFIER_LENGTH) { + return sql; + } + } + //$FALL-THROUGH$ + case C_NUMBER: + return "C" + (columnIndex + 1); + case EMPTY: + return ""; + case NUMBER: + return Integer.toString(columnIndex + 1); + case POSTGRESQL_STYLE: + if (this instanceof NamedExpression) { + return StringUtils.toLowerEnglish(((NamedExpression) this).getName()); + } + return "?column?"; + } + } + + /** + * Get the column name of this expression for a view. + * + * @param session the session + * @param columnIndex 0-based column index + * @return the column name for a view + */ + public String getColumnNameForView(SessionLocal session, int columnIndex) { + switch (session.getMode().viewExpressionNames) { + case AS_IS: + default: + return getAlias(session, columnIndex); + case EXCEPTION: + throw DbException.get(ErrorCode.COLUMN_ALIAS_IS_NOT_SPECIFIED_1, getTraceSQL()); + case MYSQL_STYLE: { + String name = getSQL(QUOTE_ONLY_WHEN_REQUIRED | NO_CASTS, WITHOUT_PARENTHESES); + if (name.length() > 64) { + name = "Name_exp_" + (columnIndex + 1); + } + return name; + } + } } /** @@ -339,11 +459,9 @@ public Expression getNonAliasExpression() { * Add conditions to a table filter if they can be evaluated. * * @param filter the table filter - * @param outerJoin if the expression is part of an outer join */ - public void addFilterConditions(TableFilter filter, boolean outerJoin) { - if (!addedToFilter && !outerJoin && - isEverything(ExpressionVisitor.EVALUATABLE_VISITOR)) { + public void addFilterConditions(TableFilter filter) { + if (!addedToFilter && isEverything(ExpressionVisitor.EVALUATABLE_VISITOR)) { filter.addFilterCondition(this, false); addedToFilter = true; } @@ -356,77 +474,63 @@ public void addFilterConditions(TableFilter filter, boolean outerJoin) { */ @Override public String toString() { - return getSQL(false); + return getTraceSQL(); } /** - * If this expression consists of column expressions it should return them. + * Returns count of subexpressions. * - * @param session the session - * @return array of expression columns if applicable, null otherwise + * @return count of subexpressions */ - @SuppressWarnings("unused") - public Expression[] getExpressionColumns(Session session) { - return null; + public int getSubexpressionCount() { + return 0; } /** - * Extracts expression columns from ValueArray + * Returns subexpression with specified index. * - * @param session the current session - * @param value the value to extract columns from - * @return array of expression columns + * @param index 0-based index + * @return subexpression with specified index, may be null + * @throws IndexOutOfBoundsException if specified index is not valid */ - protected static Expression[] getExpressionColumns(Session session, ValueCollectionBase value) { - Value[] list = value.getList(); - ExpressionColumn[] expr = new ExpressionColumn[list.length]; - for (int i = 0, len = list.length; i < len; i++) { - Value v = list[i]; - Column col = new Column("C" + (i + 1), v.getType()); - expr[i] = new ExpressionColumn(session.getDatabase(), col); - } - return expr; + public Expression getSubexpression(int index) { + throw new IndexOutOfBoundsException(); } /** - * Extracts expression columns from the given result set. + * Return the resulting value of when operand for the current row. * - * @param session the session - * @param result the result - * @return an array of expression columns - */ - public static Expression[] getExpressionColumns(Session session, ResultInterface result) { - int columnCount = result.getVisibleColumnCount(); - Expression[] expressions = new Expression[columnCount]; - Database db = session == null ? null : session.getDatabase(); - for (int i = 0; i < columnCount; i++) { - String name = result.getColumnName(i); - TypeInfo type = result.getColumnType(i); - Column col = new Column(name, type); - Expression expr = new ExpressionColumn(db, col); - expressions[i] = expr; - } - return expressions; + * @param session + * the session + * @param left + * value on the left side + * @return the result + */ + public boolean getWhenValue(SessionLocal session, Value left) { + return session.compareWithNull(left, getValue(session), true) == 0; } /** - * Returns count of subexpressions. + * Appends the SQL statement of this when operand to the specified builder. * - * @return count of subexpressions + * @param builder + * string builder + * @param sqlFlags + * formatting flags + * @return the specified string builder */ - public int getSubexpressionCount() { - return 0; + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + return getUnenclosedSQL(builder.append(' '), sqlFlags); } /** - * Returns subexpression with specified index. + * Returns whether this expression is a right side of condition in a when + * operand. * - * @param index 0-based index - * @return subexpression with specified index - * @throws IndexOutOfBoundsException if specified index is not valid + * @return {@code true} if it is, {@code false} otherwise */ - public Expression getSubexpression(int index) { - throw new IndexOutOfBoundsException(); + public boolean isWhenConditionOperand() { + return false; } } diff --git a/h2/src/main/org/h2/expression/ExpressionColumn.java b/h2/src/main/org/h2/expression/ExpressionColumn.java index 4e01ebeef0..6a207b29cf 100644 --- a/h2/src/main/org/h2/expression/ExpressionColumn.java +++ b/h2/src/main/org/h2/expression/ExpressionColumn.java @@ -1,18 +1,19 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; import org.h2.api.ErrorCode; -import org.h2.command.Parser; -import org.h2.command.dml.Select; -import org.h2.command.dml.SelectGroups; -import org.h2.command.dml.SelectListColumnResolver; +import org.h2.command.query.Select; +import org.h2.command.query.SelectGroups; +import org.h2.command.query.SelectListColumnResolver; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; +import org.h2.expression.analysis.DataAnalysisOperation; import org.h2.expression.condition.Comparison; +import org.h2.expression.function.CurrentDateTimeValueFunction; import org.h2.index.IndexCondition; import org.h2.message.DbException; import org.h2.schema.Constant; @@ -21,63 +22,131 @@ import org.h2.table.ColumnResolver; import org.h2.table.Table; import org.h2.table.TableFilter; -import org.h2.value.ExtTypeInfo; +import org.h2.util.ParserUtil; +import org.h2.util.StringUtils; import org.h2.value.TypeInfo; import org.h2.value.Value; +import org.h2.value.ValueBigint; import org.h2.value.ValueBoolean; -import org.h2.value.ValueNull; +import org.h2.value.ValueDecfloat; +import org.h2.value.ValueDouble; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueReal; +import org.h2.value.ValueSmallint; +import org.h2.value.ValueTinyint; /** - * A expression that represents a column of a table or view. + * A column reference expression that represents a column of a table or view. */ -public class ExpressionColumn extends Expression { +public final class ExpressionColumn extends Expression { private final Database database; private final String schemaName; private final String tableAlias; private final String columnName; private final boolean rowId; + private final boolean quotedName; private ColumnResolver columnResolver; private int queryLevel; private Column column; - private String derivedName; + /** + * Creates a new column reference for metadata of queries; should not be + * used as normal expression. + * + * @param database + * the database + * @param column + * the column + */ public ExpressionColumn(Database database, Column column) { this.database = database; this.column = column; - this.schemaName = null; - this.tableAlias = null; - this.columnName = null; - this.rowId = column.isRowId(); + columnName = tableAlias = schemaName = null; + rowId = column.isRowId(); + quotedName = true; } - public ExpressionColumn(Database database, String schemaName, - String tableAlias, String columnName, boolean rowId) { + /** + * Creates a new instance of column reference for regular columns as normal + * expression. + * + * @param database + * the database + * @param schemaName + * the schema name, or {@code null} + * @param tableAlias + * the table alias name, table name, or {@code null} + * @param columnName + * the column name + */ + public ExpressionColumn(Database database, String schemaName, String tableAlias, String columnName) { + this(database, schemaName, tableAlias, columnName, true); + } + + /** + * Creates a new instance of column reference for regular columns as normal + * expression. + * + * @param database + * the database + * @param schemaName + * the schema name, or {@code null} + * @param tableAlias + * the table alias name, table name, or {@code null} + * @param columnName + * the column name + * @param quotedName + * whether name was quoted + */ + public ExpressionColumn(Database database, String schemaName, String tableAlias, String columnName, + boolean quotedName) { this.database = database; this.schemaName = schemaName; this.tableAlias = tableAlias; this.columnName = columnName; - this.rowId = rowId; + rowId = false; + this.quotedName = quotedName; + } + + /** + * Creates a new instance of column reference for {@code _ROWID_} column as + * normal expression. + * + * @param database + * the database + * @param schemaName + * the schema name, or {@code null} + * @param tableAlias + * the table alias name, table name, or {@code null} + */ + public ExpressionColumn(Database database, String schemaName, String tableAlias) { + this.database = database; + this.schemaName = schemaName; + this.tableAlias = tableAlias; + columnName = Column.ROWID; + quotedName = rowId = true; } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { if (schemaName != null) { - Parser.quoteIdentifier(builder, schemaName, alwaysQuote).append('.'); + ParserUtil.quoteIdentifier(builder, schemaName, sqlFlags).append('.'); } if (tableAlias != null) { - Parser.quoteIdentifier(builder, tableAlias, alwaysQuote).append('.'); + ParserUtil.quoteIdentifier(builder, tableAlias, sqlFlags).append('.'); } if (column != null) { - if (derivedName != null) { - Parser.quoteIdentifier(builder, derivedName, alwaysQuote); + if (columnResolver != null && columnResolver.hasDerivedColumnList()) { + ParserUtil.quoteIdentifier(builder, columnResolver.getColumnName(column), sqlFlags); } else { - column.getSQL(builder, alwaysQuote); + column.getSQL(builder, sqlFlags); } } else if (rowId) { builder.append(columnName); } else { - Parser.quoteIdentifier(builder, columnName, alwaysQuote); + ParserUtil.quoteIdentifier(builder, columnName, sqlFlags); } return builder; } @@ -88,12 +157,10 @@ public TableFilter getTableFilter() { @Override public void mapColumns(ColumnResolver resolver, int level, int state) { - if (tableAlias != null && !database.equalsIdentifiers( - tableAlias, resolver.getTableAlias())) { + if (tableAlias != null && !database.equalsIdentifiers(tableAlias, resolver.getTableAlias())) { return; } - if (schemaName != null && !database.equalsIdentifiers( - schemaName, resolver.getSchemaName())) { + if (schemaName != null && !database.equalsIdentifiers(schemaName, resolver.getSchemaName())) { return; } if (rowId) { @@ -103,26 +170,14 @@ public void mapColumns(ColumnResolver resolver, int level, int state) { } return; } - for (Column col : resolver.getColumns()) { - String n = resolver.getDerivedColumnName(col); - boolean derived; - if (n == null) { - n = col.getName(); - derived = false; - } else { - derived = true; - } - if (database.equalsIdentifiers(columnName, n)) { - mapColumn(resolver, col, level); - if (derived) { - derivedName = n; - } - return; - } + Column col = resolver.findColumn(columnName); + if (col != null) { + mapColumn(resolver, col, level); + return; } Column[] columns = resolver.getSystemColumns(); for (int i = 0; columns != null && i < columns.length; i++) { - Column col = columns[i]; + col = columns[i]; if (database.equalsIdentifiers(columnName, col.getName())) { mapColumn(resolver, col, level); return; @@ -145,7 +200,7 @@ private void mapColumn(ColumnResolver resolver, Column col, int level) { } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { if (columnResolver == null) { Schema schema = session.getDatabase().findSchema( tableAlias == null ? session.getCurrentSchemaName() : tableAlias); @@ -155,32 +210,52 @@ public Expression optimize(Session session) { return constant.getValue(); } } - throw getColumnException(ErrorCode.COLUMN_NOT_FOUND_1); + return optimizeOther(); } return columnResolver.optimize(this, column); } + private Expression optimizeOther() { + if (tableAlias == null && !quotedName) { + switch (StringUtils.toUpperEnglish(columnName)) { + case "SYSDATE": + case "TODAY": + return new CurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_DATE, -1); + case "SYSTIME": + return new CurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIME, -1); + case "SYSTIMESTAMP": + return new CurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_TIMESTAMP, -1); + } + } + throw getColumnException(ErrorCode.COLUMN_NOT_FOUND_1); + } + /** * Get exception to throw, with column and table info added + * * @param code SQL error code * @return DbException */ public DbException getColumnException(int code) { String name = columnName; if (tableAlias != null) { - name = tableAlias + '.' + name; if (schemaName != null) { - name = schemaName + '.' + name; + name = schemaName + '.' + tableAlias + '.' + name; + } else { + name = tableAlias + '.' + name; } } return DbException.get(code, name); } @Override - public void updateAggregate(Session session, int stage) { + public void updateAggregate(SessionLocal session, int stage) { Select select = columnResolver.getSelect(); if (select == null) { - throw DbException.get(ErrorCode.MUST_GROUP_BY_COLUMN_1, getSQL(false)); + throw DbException.get(ErrorCode.MUST_GROUP_BY_COLUMN_1, getTraceSQL()); + } + if (stage == DataAnalysisOperation.STAGE_RESET) { + return; } SelectGroups groupData = select.getGroupDataIfCurrent(false); if (groupData == null) { @@ -191,14 +266,14 @@ public void updateAggregate(Session session, int stage) { if (v == null) { groupData.setCurrentGroupExprData(this, columnResolver.getValue(column)); } else if (!select.isGroupWindowStage2()) { - if (!database.areEqual(columnResolver.getValue(column), v)) { - throw DbException.get(ErrorCode.MUST_GROUP_BY_COLUMN_1, getSQL(false)); + if (!session.areEqual(columnResolver.getValue(column), v)) { + throw DbException.get(ErrorCode.MUST_GROUP_BY_COLUMN_1, getTraceSQL()); } } } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { Select select = columnResolver.getSelect(); if (select != null) { SelectGroups groupData = select.getGroupDataIfCurrent(false); @@ -208,25 +283,16 @@ public Value getValue(Session session) { return v; } if (select.isGroupWindowStage2()) { - throw DbException.get(ErrorCode.MUST_GROUP_BY_COLUMN_1, getSQL(false)); + throw DbException.get(ErrorCode.MUST_GROUP_BY_COLUMN_1, getTraceSQL()); } } } Value value = columnResolver.getValue(column); if (value == null) { if (select == null) { - throw DbException.get(ErrorCode.NULL_NOT_ALLOWED, getSQL(false)); + throw DbException.get(ErrorCode.NULL_NOT_ALLOWED, getTraceSQL()); } else { - throw DbException.get(ErrorCode.MUST_GROUP_BY_COLUMN_1, getSQL(false)); - } - } - /* - * ENUM values are stored as integers. - */ - if (value != ValueNull.INSTANCE) { - ExtTypeInfo extTypeInfo = column.getType().getExtTypeInfo(); - if (extTypeInfo != null) { - return extTypeInfo.cast(value); + throw DbException.get(ErrorCode.MUST_GROUP_BY_COLUMN_1, getTraceSQL()); } } return value; @@ -234,7 +300,7 @@ public Value getValue(Session session) { @Override public TypeInfo getType() { - return column == null ? TypeInfo.TYPE_UNKNOWN : column.getType(); + return column != null ? column.getType() : rowId ? TypeInfo.TYPE_BIGINT : TypeInfo.TYPE_UNKNOWN; } @Override @@ -254,8 +320,14 @@ public String getOriginalTableAliasName() { } @Override - public String getColumnName() { - return columnName != null ? columnName : column.getName(); + public String getColumnName(SessionLocal session, int columnIndex) { + if (column != null) { + if (columnResolver != null) { + return columnResolver.getColumnName(column); + } + return column.getName(); + } + return columnName; } @Override @@ -271,25 +343,27 @@ public String getTableName() { } @Override - public String getAlias() { + public String getAlias(SessionLocal session, int columnIndex) { if (column != null) { if (columnResolver != null) { - String name = columnResolver.getDerivedColumnName(column); - if (name != null) { - return name; - } + return columnResolver.getColumnName(column); } return column.getName(); } if (tableAlias != null) { - return tableAlias + "." + columnName; + return tableAlias + '.' + columnName; } return columnName; } @Override - public boolean isAutoIncrement() { - return column.getSequence() != null; + public String getColumnNameForView(SessionLocal session, int columnIndex) { + return getAlias(session, columnIndex); + } + + @Override + public boolean isIdentity() { + return column.isIdentity(); } @Override @@ -302,10 +376,6 @@ public boolean isEverything(ExpressionVisitor visitor) { switch (visitor.getType()) { case ExpressionVisitor.OPTIMIZABLE_AGGREGATE: return false; - case ExpressionVisitor.READONLY: - case ExpressionVisitor.DETERMINISTIC: - case ExpressionVisitor.QUERY_COMPARABLE: - return true; case ExpressionVisitor.INDEPENDENT: return this.queryLevel < visitor.getQueryLevel(); case ExpressionVisitor.EVALUATABLE: @@ -330,18 +400,35 @@ public boolean isEverything(ExpressionVisitor visitor) { return true; case ExpressionVisitor.GET_COLUMNS1: if (column == null) { - throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, getSQL(false)); + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, getTraceSQL()); } visitor.addColumn1(column); return true; case ExpressionVisitor.GET_COLUMNS2: if (column == null) { - throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, getSQL(false)); + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, getTraceSQL()); } visitor.addColumn2(column); return true; + case ExpressionVisitor.DECREMENT_QUERY_LEVEL: { + if (column == null) { + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, getTraceSQL()); + } + if (visitor.getColumnResolvers().contains(columnResolver)) { + int decrement = visitor.getQueryLevel(); + if (decrement > 0) { + if (queryLevel > 0) { + queryLevel--; + return true; + } + throw DbException.getInternalError("queryLevel=0"); + } + return queryLevel > 0; + } + } + //$FALL-THROUGH$ default: - throw DbException.throwInternalError("type=" + visitor.getType()); + return true; } } @@ -351,20 +438,57 @@ public int getCost() { } @Override - public void createIndexConditions(Session session, TableFilter filter) { + public void createIndexConditions(SessionLocal session, TableFilter filter) { TableFilter tf = getTableFilter(); if (filter == tf && column.getType().getValueType() == Value.BOOLEAN) { - IndexCondition cond = IndexCondition.get( - Comparison.EQUAL, this, ValueExpression.get( - ValueBoolean.TRUE)); - filter.addIndexCondition(cond); + filter.addIndexCondition(IndexCondition.get(Comparison.EQUAL, this, ValueExpression.TRUE)); } } @Override - public Expression getNotIfPossible(Session session) { - return new Comparison(session, Comparison.EQUAL, this, - ValueExpression.get(ValueBoolean.FALSE)); + public Expression getNotIfPossible(SessionLocal session) { + Expression o = optimize(session); + if (o != this) { + return o.getNotIfPossible(session); + } + Value v; + switch (column.getType().getValueType()) { + case Value.BOOLEAN: + v = ValueBoolean.FALSE; + break; + case Value.TINYINT: + v = ValueTinyint.get((byte) 0); + break; + case Value.SMALLINT: + v = ValueSmallint.get((short) 0); + break; + case Value.INTEGER: + v = ValueInteger.get(0); + break; + case Value.BIGINT: + v = ValueBigint.get(0L); + break; + case Value.NUMERIC: + v = ValueNumeric.ZERO; + break; + case Value.REAL: + v = ValueReal.ZERO; + break; + case Value.DOUBLE: + v = ValueDouble.ZERO; + break; + case Value.DECFLOAT: + v = ValueDecfloat.ZERO; + break; + default: + /* + * Can be replaced with CAST(column AS BOOLEAN) = FALSE, but this + * replacement can't be optimized further, so it's better to leave + * NOT (column) as is. + */ + return null; + } + return new Comparison(Comparison.EQUAL, this, ValueExpression.get(v), false); } } diff --git a/h2/src/main/org/h2/expression/ExpressionList.java b/h2/src/main/org/h2/expression/ExpressionList.java index f7e92d8dce..25c38c160b 100644 --- a/h2/src/main/org/h2/expression/ExpressionList.java +++ b/h2/src/main/org/h2/expression/ExpressionList.java @@ -1,14 +1,14 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; -import org.h2.engine.Session; -import org.h2.table.Column; +import org.h2.engine.SessionLocal; import org.h2.table.ColumnResolver; import org.h2.table.TableFilter; +import org.h2.value.ExtTypeInfoRow; import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueArray; @@ -18,10 +18,11 @@ * A list of expressions, as in (ID, NAME). * The result of this expression is a row or an array. */ -public class ExpressionList extends Expression { +public final class ExpressionList extends Expression { private final Expression[] list; private final boolean isArray; + private TypeInfo type; public ExpressionList(Expression[] list, boolean isArray) { this.list = list; @@ -29,17 +30,17 @@ public ExpressionList(Expression[] list, boolean isArray) { } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { Value[] v = new Value[list.length]; for (int i = 0; i < list.length; i++) { v[i] = list[i].getValue(session); } - return isArray ? ValueArray.get(v) : ValueRow.get(v); + return isArray ? ValueArray.get((TypeInfo) type.getExtTypeInfo(), v, session) : ValueRow.get(type, v); } @Override public TypeInfo getType() { - return isArray ? TypeInfo.TYPE_ARRAY : TypeInfo.TYPE_ROW; + return type; } @Override @@ -50,21 +51,28 @@ public void mapColumns(ColumnResolver resolver, int level, int state) { } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { boolean allConst = true; - for (int i = 0; i < list.length; i++) { + int count = list.length; + for (int i = 0; i < count; i++) { Expression e = list[i].optimize(session); if (!e.isConstant()) { allConst = false; } list[i] = e; } + initializeType(); if (allConst) { return ValueExpression.get(getValue(session)); } return this; } + void initializeType() { + type = isArray ? TypeInfo.getTypeInfo(Value.ARRAY, list.length, 0, TypeInfo.getHigherType(list)) + : TypeInfo.getTypeInfo(Value.ROW, 0, 0, new ExtTypeInfoRow(list)); + } + @Override public void setEvaluatable(TableFilter tableFilter, boolean b) { for (Expression e : list) { @@ -73,14 +81,14 @@ public void setEvaluatable(TableFilter tableFilter, boolean b) { } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - builder.append(isArray ? "ARRAY [" : "ROW ("); - writeExpressions(builder, list, alwaysQuote); - return builder.append(isArray ? ']' : ')'); + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return isArray // + ? writeExpressions(builder.append("ARRAY ["), list, sqlFlags).append(']') + : writeExpressions(builder.append("ROW ("), list, sqlFlags).append(')'); } @Override - public void updateAggregate(Session session, int stage) { + public void updateAggregate(SessionLocal session, int stage) { for (Expression e : list) { e.updateAggregate(session, stage); } @@ -105,17 +113,6 @@ public int getCost() { return cost; } - @Override - public Expression[] getExpressionColumns(Session session) { - ExpressionColumn[] expr = new ExpressionColumn[list.length]; - for (int i = 0; i < list.length; i++) { - Expression e = list[i]; - Column col = new Column("C" + (i + 1), e.getType()); - expr[i] = new ExpressionColumn(session.getDatabase(), col); - } - return expr; - } - @Override public boolean isConstant() { for (Expression e : list) { @@ -136,4 +133,8 @@ public Expression getSubexpression(int index) { return list[index]; } + public boolean isArray() { + return isArray; + } + } diff --git a/h2/src/main/org/h2/expression/ExpressionVisitor.java b/h2/src/main/org/h2/expression/ExpressionVisitor.java index 8a8f4642cf..7f2660fd7b 100644 --- a/h2/src/main/org/h2/expression/ExpressionVisitor.java +++ b/h2/src/main/org/h2/expression/ExpressionVisitor.java @@ -1,12 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; import java.util.HashSet; -import org.h2.command.dml.AllColumnsForPlan; +import org.h2.command.query.AllColumnsForPlan; import org.h2.engine.DbObject; import org.h2.table.Column; import org.h2.table.ColumnResolver; @@ -17,7 +17,7 @@ * The visitor pattern is used to iterate through all expressions of a query * to optimize a statement. */ -public class ExpressionVisitor { +public final class ExpressionVisitor { /** * Is the value independent on unset parameters or on columns of a higher @@ -137,6 +137,11 @@ public class ExpressionVisitor { */ public static final int GET_COLUMNS2 = 10; + /** + * Decrement query level of all expression columns. + */ + public static final int DECREMENT_QUERY_LEVEL = 11; + /** * The visitor singleton for the type QUERY_COMPARABLE. */ @@ -145,35 +150,31 @@ public class ExpressionVisitor { private final int type; private final int queryLevel; - private final HashSet dependencies; + private final HashSet set; private final AllColumnsForPlan columns1; private final Table table; private final long[] maxDataModificationId; private final ColumnResolver resolver; - private final HashSet columns2; private ExpressionVisitor(int type, int queryLevel, - HashSet dependencies, + HashSet set, AllColumnsForPlan columns1, Table table, ColumnResolver resolver, - long[] maxDataModificationId, - HashSet columns2) { + long[] maxDataModificationId) { this.type = type; this.queryLevel = queryLevel; - this.dependencies = dependencies; + this.set = set; this.columns1 = columns1; this.table = table; this.resolver = resolver; this.maxDataModificationId = maxDataModificationId; - this.columns2 = columns2; } private ExpressionVisitor(int type) { this.type = type; this.queryLevel = 0; - this.dependencies = null; + this.set = null; this.columns1 = null; - this.columns2 = null; this.table = null; this.resolver = null; this.maxDataModificationId = null; @@ -182,9 +183,8 @@ private ExpressionVisitor(int type) { private ExpressionVisitor(int type, int queryLevel) { this.type = type; this.queryLevel = queryLevel; - this.dependencies = null; + this.set = null; this.columns1 = null; - this.columns2 = null; this.table = null; this.resolver = null; this.maxDataModificationId = null; @@ -199,7 +199,7 @@ private ExpressionVisitor(int type, int queryLevel) { public static ExpressionVisitor getDependenciesVisitor( HashSet dependencies) { return new ExpressionVisitor(GET_DEPENDENCIES, 0, dependencies, null, - null, null, null, null); + null, null, null); } /** @@ -210,7 +210,7 @@ public static ExpressionVisitor getDependenciesVisitor( */ public static ExpressionVisitor getOptimizableVisitor(Table table) { return new ExpressionVisitor(OPTIMIZABLE_AGGREGATE, 0, null, - null, table, null, null, null); + null, table, null, null); } /** @@ -222,7 +222,7 @@ public static ExpressionVisitor getOptimizableVisitor(Table table) { */ public static ExpressionVisitor getNotFromResolverVisitor(ColumnResolver resolver) { return new ExpressionVisitor(NOT_FROM_RESOLVER, 0, null, null, null, - resolver, null, null); + resolver, null); } /** @@ -232,7 +232,7 @@ public static ExpressionVisitor getNotFromResolverVisitor(ColumnResolver resolve * @return the new visitor */ public static ExpressionVisitor getColumnsVisitor(AllColumnsForPlan columns) { - return new ExpressionVisitor(GET_COLUMNS1, 0, null, columns, null, null, null, null); + return new ExpressionVisitor(GET_COLUMNS1, 0, null, columns, null, null, null); } /** @@ -243,12 +243,28 @@ public static ExpressionVisitor getColumnsVisitor(AllColumnsForPlan columns) { * @return the new visitor */ public static ExpressionVisitor getColumnsVisitor(HashSet columns, Table table) { - return new ExpressionVisitor(GET_COLUMNS2, 0, null, null, table, null, null, columns); + return new ExpressionVisitor(GET_COLUMNS2, 0, columns, null, table, null, null); } public static ExpressionVisitor getMaxModificationIdVisitor() { return new ExpressionVisitor(SET_MAX_DATA_MODIFICATION_ID, 0, null, - null, null, null, new long[1], null); + null, null, null, new long[1]); + } + + /** + * Create a new visitor to decrement query level in columns with the + * specified resolvers. + * + * @param columnResolvers + * column resolvers + * @param queryDecrement + * 0 to check whether operation is allowed, 1 to actually perform + * the decrement + * @return the new visitor + */ + public static ExpressionVisitor getDecrementQueryLevelVisitor(HashSet columnResolvers, + int queryDecrement) { + return new ExpressionVisitor(DECREMENT_QUERY_LEVEL, queryDecrement, columnResolvers, null, null, null, null); } /** @@ -257,8 +273,9 @@ public static ExpressionVisitor getMaxModificationIdVisitor() { * * @param obj the additional dependency. */ + @SuppressWarnings("unchecked") public void addDependency(DbObject obj) { - dependencies.add(obj); + ((HashSet) set).add(obj); } /** @@ -277,9 +294,10 @@ void addColumn1(Column column) { * * @param column the additional column. */ + @SuppressWarnings("unchecked") void addColumn2(Column column) { if (table == null || table == column.getTable()) { - columns2.add(column); + ((HashSet) set).add(column); } } @@ -289,8 +307,9 @@ void addColumn2(Column column) { * * @return the set */ + @SuppressWarnings("unchecked") public HashSet getDependencies() { - return dependencies; + return (HashSet) set; } /** @@ -321,6 +340,17 @@ public ColumnResolver getResolver() { return resolver; } + /** + * Get the set of column resolvers. + * This is used for {@link #DECREMENT_QUERY_LEVEL} visitors. + * + * @return the set + */ + @SuppressWarnings("unchecked") + public HashSet getColumnResolvers() { + return (HashSet) set; + } + /** * Update the field maxDataModificationId if this value is higher * than the current value. @@ -346,7 +376,7 @@ public long getMaxDataModificationId() { } int getQueryLevel() { - assert type == INDEPENDENT || type == EVALUATABLE; + assert type == INDEPENDENT || type == EVALUATABLE || type == DECREMENT_QUERY_LEVEL; return queryLevel; } diff --git a/h2/src/main/org/h2/expression/ExpressionWithFlags.java b/h2/src/main/org/h2/expression/ExpressionWithFlags.java new file mode 100644 index 0000000000..6100d5d550 --- /dev/null +++ b/h2/src/main/org/h2/expression/ExpressionWithFlags.java @@ -0,0 +1,28 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +/** + * Expression with flags. + */ +public interface ExpressionWithFlags { + + /** + * Set the flags for this expression. + * + * @param flags + * the flags to set + */ + void setFlags(int flags); + + /** + * Returns the flags. + * + * @return the flags + */ + int getFlags(); + +} diff --git a/h2/src/main/org/h2/expression/ExpressionWithVariableParameters.java b/h2/src/main/org/h2/expression/ExpressionWithVariableParameters.java new file mode 100644 index 0000000000..a7c0d54e02 --- /dev/null +++ b/h2/src/main/org/h2/expression/ExpressionWithVariableParameters.java @@ -0,0 +1,33 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.message.DbException; + +/** + * An expression with variable number of parameters. + */ +public interface ExpressionWithVariableParameters { + + /** + * Adds the parameter expression. + * + * @param param + * the expression + */ + void addParameter(Expression param); + + /** + * This method must be called after all the parameters have been set. It + * checks if the parameter count is correct when required by the + * implementation. + * + * @throws DbException + * if the parameter count is incorrect. + */ + void doneWithParameters() throws DbException; + +} diff --git a/h2/src/main/org/h2/expression/FieldReference.java b/h2/src/main/org/h2/expression/FieldReference.java new file mode 100644 index 0000000000..248b937a55 --- /dev/null +++ b/h2/src/main/org/h2/expression/FieldReference.java @@ -0,0 +1,71 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import java.util.Map.Entry; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.util.ParserUtil; +import org.h2.value.ExtTypeInfoRow; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; +import org.h2.value.ValueRow; + +/** + * Field reference. + */ +public final class FieldReference extends Operation1 { + + private final String fieldName; + + private int ordinal; + + public FieldReference(Expression arg, String fieldName) { + super(arg); + this.fieldName = fieldName; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return ParserUtil.quoteIdentifier(arg.getEnclosedSQL(builder, sqlFlags).append('.'), fieldName, sqlFlags); + } + + @Override + public Value getValue(SessionLocal session) { + Value l = arg.getValue(session); + if (l != ValueNull.INSTANCE) { + return ((ValueRow) l).getList()[ordinal]; + } + return ValueNull.INSTANCE; + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + TypeInfo type = arg.getType(); + if (type.getValueType() != Value.ROW) { + throw DbException.getInvalidExpressionTypeException("ROW", arg); + } + int ordinal = 0; + for (Entry entry : ((ExtTypeInfoRow) type.getExtTypeInfo()).getFields()) { + if (fieldName.equals(entry.getKey())) { + type = entry.getValue(); + this.type = type; + this.ordinal = ordinal; + if (arg.isConstant()) { + return TypedValueExpression.get(getValue(session), type); + } + return this; + } + ordinal++; + } + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, fieldName); + } + +} diff --git a/h2/src/main/org/h2/expression/Format.java b/h2/src/main/org/h2/expression/Format.java new file mode 100644 index 0000000000..6ba27eadd5 --- /dev/null +++ b/h2/src/main/org/h2/expression/Format.java @@ -0,0 +1,99 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.engine.SessionLocal; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueJson; + +/** + * A format clause such as FORMAT JSON. + */ +public final class Format extends Operation1 { + + /** + * Supported formats. + */ + public enum FormatEnum { + /** + * JSON. + */ + JSON; + } + + private final FormatEnum format; + + public Format(Expression arg, FormatEnum format) { + super(arg); + this.format = format; + } + + @Override + public Value getValue(SessionLocal session) { + return getValue(arg.getValue(session)); + } + + /** + * Returns the value with applied format. + * + * @param value + * the value + * @return the value with applied format + */ + public Value getValue(Value value) { + switch (value.getValueType()) { + case Value.NULL: + return ValueJson.NULL; + case Value.VARCHAR: + case Value.VARCHAR_IGNORECASE: + case Value.CHAR: + case Value.CLOB: + return ValueJson.fromJson(value.getString()); + default: + return value.convertTo(TypeInfo.TYPE_JSON); + } + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + if (arg.isConstant()) { + return ValueExpression.get(getValue(session)); + } + if (arg instanceof Format && format == ((Format) arg).format) { + return arg; + } + type = TypeInfo.TYPE_JSON; + return this; + } + + @Override + public boolean isIdentity() { + return arg.isIdentity(); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return arg.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append(" FORMAT ").append(format.name()); + } + + @Override + public int getNullable() { + return arg.getNullable(); + } + + @Override + public String getTableName() { + return arg.getTableName(); + } + + @Override + public String getColumnName(SessionLocal session, int columnIndex) { + return arg.getColumnName(session, columnIndex); + } + +} diff --git a/h2/src/main/org/h2/expression/IntervalOperation.java b/h2/src/main/org/h2/expression/IntervalOperation.java index 1390d2e33a..8182b9c8e3 100644 --- a/h2/src/main/org/h2/expression/IntervalOperation.java +++ b/h2/src/main/org/h2/expression/IntervalOperation.java @@ -1,13 +1,13 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; import static org.h2.util.DateTimeUtils.NANOS_PER_DAY; import static org.h2.util.DateTimeUtils.NANOS_PER_HOUR; -import static org.h2.util.DateTimeUtils.NANOS_PER_MINUTE; +import static org.h2.util.DateTimeUtils.NANOS_PER_SECOND; import static org.h2.util.DateTimeUtils.absoluteDayFromDateValue; import static org.h2.util.DateTimeUtils.dateAndTimeFromValue; import static org.h2.util.DateTimeUtils.dateTimeToValue; @@ -19,11 +19,10 @@ import org.h2.api.ErrorCode; import org.h2.api.IntervalQualifier; -import org.h2.engine.Session; -import org.h2.expression.function.DateTimeFunctions; +import org.h2.engine.SessionLocal; +import org.h2.expression.function.DateTimeFunction; import org.h2.message.DbException; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; +import org.h2.util.DateTimeUtils; import org.h2.util.IntervalUtils; import org.h2.value.DataType; import org.h2.value.TypeInfo; @@ -31,13 +30,15 @@ import org.h2.value.ValueDate; import org.h2.value.ValueInterval; import org.h2.value.ValueNull; +import org.h2.value.ValueNumeric; import org.h2.value.ValueTime; +import org.h2.value.ValueTimeTimeZone; import org.h2.value.ValueTimestampTimeZone; /** * A mathematical operation with intervals. */ -public class IntervalOperation extends Expression { +public class IntervalOperation extends Operation2 { public enum IntervalOpType { /** @@ -50,6 +51,11 @@ public enum IntervalOpType { */ INTERVAL_MINUS_INTERVAL, + /** + * Interval divided by interval (non-standard). + */ + INTERVAL_DIVIDE_INTERVAL, + /** * Date-time plus interval. */ @@ -76,26 +82,52 @@ public enum IntervalOpType { DATETIME_MINUS_DATETIME } + /** + * Number of digits enough to hold + * {@code INTERVAL '999999999999999999' YEAR / INTERVAL '1' MONTH}. + */ + private static final int INTERVAL_YEAR_DIGITS = 20; + + /** + * Number of digits enough to hold + * {@code INTERVAL '999999999999999999' DAY / INTERVAL '0.000000001' SECOND}. + */ + private static final int INTERVAL_DAY_DIGITS = 32; + + private static final TypeInfo INTERVAL_DIVIDE_INTERVAL_YEAR_TYPE = TypeInfo.getTypeInfo(Value.NUMERIC, + INTERVAL_YEAR_DIGITS * 3, INTERVAL_YEAR_DIGITS * 2, null); + + private static final TypeInfo INTERVAL_DIVIDE_INTERVAL_DAY_TYPE = TypeInfo.getTypeInfo(Value.NUMERIC, + INTERVAL_DAY_DIGITS * 3, INTERVAL_DAY_DIGITS * 2, null); + private final IntervalOpType opType; - private Expression left, right; - private TypeInfo type; - private static BigInteger nanosFromValue(Value v) { - long[] a = dateAndTimeFromValue(v); + private TypeInfo forcedType; + + private static BigInteger nanosFromValue(SessionLocal session, Value v) { + long[] a = dateAndTimeFromValue(v, session); return BigInteger.valueOf(absoluteDayFromDateValue(a[0])).multiply(NANOS_PER_DAY_BI) .add(BigInteger.valueOf(a[1])); } + public IntervalOperation(IntervalOpType opType, Expression left, Expression right, TypeInfo forcedType) { + this(opType, left, right); + this.forcedType = forcedType; + } + public IntervalOperation(IntervalOpType opType, Expression left, Expression right) { + super(left, right); this.opType = opType; - this.left = left; - this.right = right; int l = left.getType().getValueType(), r = right.getType().getValueType(); switch (opType) { case INTERVAL_PLUS_INTERVAL: case INTERVAL_MINUS_INTERVAL: type = TypeInfo.getTypeInfo(Value.getHigherOrder(l, r)); break; + case INTERVAL_DIVIDE_INTERVAL: + type = DataType.isYearMonthIntervalType(l) ? INTERVAL_DIVIDE_INTERVAL_YEAR_TYPE + : INTERVAL_DIVIDE_INTERVAL_DAY_TYPE; + break; case DATETIME_PLUS_INTERVAL: case DATETIME_MINUS_INTERVAL: case INTERVAL_MULTIPLY_NUMERIC: @@ -103,7 +135,9 @@ public IntervalOperation(IntervalOpType opType, Expression left, Expression righ type = left.getType(); break; case DATETIME_MINUS_DATETIME: - if (l == Value.TIME && r == Value.TIME) { + if (forcedType != null) { + type = forcedType; + } else if ((l == Value.TIME || l == Value.TIME_TZ) && (r == Value.TIME || r == Value.TIME_TZ)) { type = TypeInfo.TYPE_INTERVAL_HOUR_TO_SECOND; } else if (l == Value.DATE && r == Value.DATE) { type = TypeInfo.TYPE_INTERVAL_DAY; @@ -114,10 +148,32 @@ public IntervalOperation(IntervalOpType opType, Expression left, Expression righ } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - builder.append('('); - left.getSQL(builder, alwaysQuote).append(' ').append(getOperationToken()).append(' '); - return right.getSQL(builder, alwaysQuote).append(')'); + public boolean needParentheses() { + return forcedType == null; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + if (forcedType != null) { + getInnerSQL2(builder.append('('), sqlFlags); + getForcedTypeSQL(builder.append(") "), forcedType); + } else { + getInnerSQL2(builder, sqlFlags); + } + return builder; + } + + private void getInnerSQL2(StringBuilder builder, int sqlFlags) { + left.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append(' ').append(getOperationToken()).append(' '); + right.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + } + + static StringBuilder getForcedTypeSQL(StringBuilder builder, TypeInfo forcedType) { + int precision = (int) forcedType.getPrecision(); + int scale = forcedType.getScale(); + return IntervalQualifier.valueOf(forcedType.getValueType() - Value.INTERVAL_YEAR).getTypeName(builder, + precision == ValueInterval.DEFAULT_PRECISION ? -1 : (int) precision, + scale == ValueInterval.DEFAULT_SCALE ? -1 : scale, true); } private char getOperationToken() { @@ -131,15 +187,16 @@ private char getOperationToken() { return '-'; case INTERVAL_MULTIPLY_NUMERIC: return '*'; + case INTERVAL_DIVIDE_INTERVAL: case INTERVAL_DIVIDE_NUMERIC: return '/'; default: - throw DbException.throwInternalError("opType=" + opType); + throw DbException.getInternalError("opType=" + opType); } } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { Value l = left.getValue(session); Value r = right.getValue(session); if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { @@ -155,9 +212,12 @@ public Value getValue(Session session) { IntervalQualifier.valueOf(Value.getHigherOrder(lType, rType) - Value.INTERVAL_YEAR), opType == IntervalOpType.INTERVAL_PLUS_INTERVAL ? a1.add(a2) : a1.subtract(a2)); } + case INTERVAL_DIVIDE_INTERVAL: + return ValueNumeric.get(IntervalUtils.intervalToAbsolute((ValueInterval) l)) + .divide(ValueNumeric.get(IntervalUtils.intervalToAbsolute((ValueInterval) r)), type); case DATETIME_PLUS_INTERVAL: case DATETIME_MINUS_INTERVAL: - return getDateTimeWithInterval(l, r, lType, rType); + return getDateTimeWithInterval(session, l, r, lType, rType); case INTERVAL_MULTIPLY_NUMERIC: case INTERVAL_DIVIDE_NUMERIC: { BigDecimal a1 = new BigDecimal(IntervalUtils.intervalToAbsolute((ValueInterval) l)); @@ -166,15 +226,53 @@ public Value getValue(Session session) { (opType == IntervalOpType.INTERVAL_MULTIPLY_NUMERIC ? a1.multiply(a2) : a1.divide(a2)) .toBigInteger()); } - case DATETIME_MINUS_DATETIME: - if (lType == Value.TIME && rType == Value.TIME) { - long diff = ((ValueTime) l).getNanos() - ((ValueTime) r).getNanos(); + case DATETIME_MINUS_DATETIME: { + Value result; + if ((lType == Value.TIME || lType == Value.TIME_TZ) && (rType == Value.TIME || rType == Value.TIME_TZ)) { + long diff; + if (lType == Value.TIME && rType == Value.TIME) { + diff = ((ValueTime) l).getNanos() - ((ValueTime) r).getNanos(); + } else { + ValueTimeTimeZone left = (ValueTimeTimeZone) l.convertTo(TypeInfo.TYPE_TIME_TZ, session), + right = (ValueTimeTimeZone) r.convertTo(TypeInfo.TYPE_TIME_TZ, session); + diff = left.getNanos() - right.getNanos() + + (right.getTimeZoneOffsetSeconds() - left.getTimeZoneOffsetSeconds()) + * DateTimeUtils.NANOS_PER_SECOND; + } boolean negative = diff < 0; if (negative) { diff = -diff; } - return ValueInterval.from(IntervalQualifier.HOUR_TO_SECOND, negative, diff / NANOS_PER_HOUR, + result = ValueInterval.from(IntervalQualifier.HOUR_TO_SECOND, negative, diff / NANOS_PER_HOUR, diff % NANOS_PER_HOUR); + } else if (forcedType != null && DataType.isYearMonthIntervalType(forcedType.getValueType())) { + long[] dt1 = dateAndTimeFromValue(l, session), dt2 = dateAndTimeFromValue(r, session); + long dateValue1 = lType == Value.TIME || lType == Value.TIME_TZ + ? session.currentTimestamp().getDateValue() + : dt1[0]; + long dateValue2 = rType == Value.TIME || rType == Value.TIME_TZ + ? session.currentTimestamp().getDateValue() + : dt2[0]; + long leading = 12L + * (DateTimeUtils.yearFromDateValue(dateValue1) - DateTimeUtils.yearFromDateValue(dateValue2)) + + DateTimeUtils.monthFromDateValue(dateValue1) - DateTimeUtils.monthFromDateValue(dateValue2); + int d1 = DateTimeUtils.dayFromDateValue(dateValue1); + int d2 = DateTimeUtils.dayFromDateValue(dateValue2); + if (leading >= 0) { + if (d1 < d2 || d1 == d2 && dt1[1] < dt2[1]) { + leading--; + } + } else if (d1 > d2 || d1 == d2 && dt1[1] > dt2[1]) { + leading++; + } + boolean negative; + if (leading < 0) { + negative = true; + leading = -leading; + } else { + negative = false; + } + result = ValueInterval.from(IntervalQualifier.MONTH, negative, leading, 0L); } else if (lType == Value.DATE && rType == Value.DATE) { long diff = absoluteDayFromDateValue(((ValueDate) l).getDateValue()) - absoluteDayFromDateValue(((ValueDate) r).getDateValue()); @@ -182,34 +280,39 @@ public Value getValue(Session session) { if (negative) { diff = -diff; } - return ValueInterval.from(IntervalQualifier.DAY, negative, diff, 0L); + result = ValueInterval.from(IntervalQualifier.DAY, negative, diff, 0L); } else { - BigInteger diff = nanosFromValue(l).subtract(nanosFromValue(r)); + BigInteger diff = nanosFromValue(session, l).subtract(nanosFromValue(session, r)); if (lType == Value.TIMESTAMP_TZ || rType == Value.TIMESTAMP_TZ) { - l = l.convertTo(Value.TIMESTAMP_TZ); - r = r.convertTo(Value.TIMESTAMP_TZ); - diff = diff.add(BigInteger.valueOf((((ValueTimestampTimeZone) r).getTimeZoneOffsetMins() - - ((ValueTimestampTimeZone) l).getTimeZoneOffsetMins()) * NANOS_PER_MINUTE)); + l = l.convertTo(TypeInfo.TYPE_TIMESTAMP_TZ, session); + r = r.convertTo(TypeInfo.TYPE_TIMESTAMP_TZ, session); + diff = diff.add(BigInteger.valueOf((((ValueTimestampTimeZone) r).getTimeZoneOffsetSeconds() + - ((ValueTimestampTimeZone) l).getTimeZoneOffsetSeconds()) * NANOS_PER_SECOND)); } - return IntervalUtils.intervalFromAbsolute(IntervalQualifier.DAY_TO_SECOND, diff); + result = IntervalUtils.intervalFromAbsolute(IntervalQualifier.DAY_TO_SECOND, diff); + } + if (forcedType != null) { + result = result.castTo(forcedType, session); } + return result; } - throw DbException.throwInternalError("type=" + opType); + } + throw DbException.getInternalError("type=" + opType); } - private Value getDateTimeWithInterval(Value l, Value r, int lType, int rType) { + private Value getDateTimeWithInterval(SessionLocal session, Value l, Value r, int lType, int rType) { switch (lType) { - case Value.TIME: { + case Value.TIME: if (DataType.isYearMonthIntervalType(rType)) { - throw DbException.throwInternalError("type=" + rType); + throw DbException.getInternalError("type=" + rType); } - BigInteger a1 = BigInteger.valueOf(((ValueTime) l).getNanos()); - BigInteger a2 = IntervalUtils.intervalToAbsolute((ValueInterval) r); - BigInteger n = opType == IntervalOpType.DATETIME_PLUS_INTERVAL ? a1.add(a2) : a1.subtract(a2); - if (n.signum() < 0 || n.compareTo(NANOS_PER_DAY_BI) >= 0) { - throw DbException.get(ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE_1, n.toString()); + return ValueTime.fromNanos(getTimeWithInterval(r, ((ValueTime) l).getNanos())); + case Value.TIME_TZ: { + if (DataType.isYearMonthIntervalType(rType)) { + throw DbException.getInternalError("type=" + rType); } - return ValueTime.fromNanos(n.longValue()); + ValueTimeTimeZone t = (ValueTimeTimeZone) l; + return ValueTimeTimeZone.fromNanos(getTimeWithInterval(r, t.getNanos()), t.getTimeZoneOffsetSeconds()); } case Value.DATE: case Value.TIMESTAMP: @@ -219,7 +322,7 @@ private Value getDateTimeWithInterval(Value l, Value r, int lType, int rType) { if (opType == IntervalOpType.DATETIME_MINUS_INTERVAL) { m = -m; } - return DateTimeFunctions.dateadd("MONTH", m, l); + return DateTimeFunction.dateadd(session, DateTimeFunction.MONTH, m, l); } else { BigInteger a2 = IntervalUtils.intervalToAbsolute((ValueInterval) r); if (lType == Value.DATE) { @@ -228,7 +331,7 @@ private Value getDateTimeWithInterval(Value l, Value r, int lType, int rType) { BigInteger n = opType == IntervalOpType.DATETIME_PLUS_INTERVAL ? a1.add(a2) : a1.subtract(a2); return ValueDate.fromDateValue(dateValueFromAbsoluteDay(n.longValue())); } else { - long[] a = dateAndTimeFromValue(l); + long[] a = dateAndTimeFromValue(l, session); long absoluteDay = absoluteDayFromDateValue(a[0]); long timeNanos = a[1]; BigInteger[] dr = a2.divideAndRemainder(NANOS_PER_DAY_BI); @@ -246,23 +349,26 @@ private Value getDateTimeWithInterval(Value l, Value r, int lType, int rType) { timeNanos += NANOS_PER_DAY; absoluteDay--; } - return dateTimeToValue(l, dateValueFromAbsoluteDay(absoluteDay), timeNanos, false); + return dateTimeToValue(l, dateValueFromAbsoluteDay(absoluteDay), timeNanos); } } } - throw DbException.throwInternalError("type=" + opType); + throw DbException.getInternalError("type=" + opType); } - @Override - public void mapColumns(ColumnResolver resolver, int level, int state) { - left.mapColumns(resolver, level, state); - if (right != null) { - right.mapColumns(resolver, level, state); + private long getTimeWithInterval(Value r, long nanos) { + BigInteger a1 = BigInteger.valueOf(nanos); + BigInteger a2 = IntervalUtils.intervalToAbsolute((ValueInterval) r); + BigInteger n = opType == IntervalOpType.DATETIME_PLUS_INTERVAL ? a1.add(a2) : a1.subtract(a2); + if (n.signum() < 0 || n.compareTo(NANOS_PER_DAY_BI) >= 0) { + throw DbException.get(ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE_1, n.toString()); } + nanos = n.longValue(); + return nanos; } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { left = left.optimize(session); right = right.optimize(session); if (left.isConstant() && right.isConstant()) { @@ -271,48 +377,4 @@ public Expression optimize(Session session) { return this; } - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - left.setEvaluatable(tableFilter, b); - right.setEvaluatable(tableFilter, b); - } - - @Override - public TypeInfo getType() { - return type; - } - - @Override - public void updateAggregate(Session session, int stage) { - left.updateAggregate(session, stage); - right.updateAggregate(session, stage); - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - return left.isEverything(visitor) && right.isEverything(visitor); - } - - @Override - public int getCost() { - return left.getCost() + 1 + right.getCost(); - } - - @Override - public int getSubexpressionCount() { - return 2; - } - - @Override - public Expression getSubexpression(int index) { - switch (index) { - case 0: - return left; - case 1: - return right; - default: - throw new IndexOutOfBoundsException(); - } - } - } diff --git a/h2/src/main/org/h2/expression/Operation0.java b/h2/src/main/org/h2/expression/Operation0.java new file mode 100644 index 0000000000..23349d23a1 --- /dev/null +++ b/h2/src/main/org/h2/expression/Operation0.java @@ -0,0 +1,40 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.engine.SessionLocal; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; + +/** + * Operation without subexpressions. + */ +public abstract class Operation0 extends Expression { + + protected Operation0() { + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + // Nothing to do + } + + @Override + public Expression optimize(SessionLocal session) { + return this; + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + // Nothing to do + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + // Nothing to do + } + +} diff --git a/h2/src/main/org/h2/expression/Operation1.java b/h2/src/main/org/h2/expression/Operation1.java new file mode 100644 index 0000000000..a4ff48cca5 --- /dev/null +++ b/h2/src/main/org/h2/expression/Operation1.java @@ -0,0 +1,75 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.engine.SessionLocal; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; + +/** + * Operation with one argument. + */ +public abstract class Operation1 extends Expression { + + /** + * The argument of the operation. + */ + protected Expression arg; + + /** + * The type of the result. + */ + protected TypeInfo type; + + protected Operation1(Expression arg) { + this.arg = arg; + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + arg.mapColumns(resolver, level, state); + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + arg.setEvaluatable(tableFilter, value); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + arg.updateAggregate(session, stage); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return arg.isEverything(visitor); + } + + @Override + public int getCost() { + return arg.getCost() + 1; + } + + @Override + public int getSubexpressionCount() { + return 1; + } + + @Override + public Expression getSubexpression(int index) { + if (index == 0) { + return arg; + } + throw new IndexOutOfBoundsException(); + } + +} diff --git a/h2/src/main/org/h2/expression/Operation1_2.java b/h2/src/main/org/h2/expression/Operation1_2.java new file mode 100644 index 0000000000..78bed3190a --- /dev/null +++ b/h2/src/main/org/h2/expression/Operation1_2.java @@ -0,0 +1,97 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.engine.SessionLocal; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; + +/** + * Operation with one or two arguments. + */ +public abstract class Operation1_2 extends Expression { + + /** + * The left part of the operation (the first argument). + */ + protected Expression left; + + /** + * The right part of the operation (the second argument). + */ + protected Expression right; + + /** + * The type of the result. + */ + protected TypeInfo type; + + protected Operation1_2(Expression left, Expression right) { + this.left = left; + this.right = right; + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + left.mapColumns(resolver, level, state); + if (right != null) { + right.mapColumns(resolver, level, state); + } + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + left.setEvaluatable(tableFilter, value); + if (right != null) { + right.setEvaluatable(tableFilter, value); + } + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + left.updateAggregate(session, stage); + if (right != null) { + right.updateAggregate(session, stage); + } + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return left.isEverything(visitor) && (right == null || right.isEverything(visitor)); + } + + @Override + public int getCost() { + int cost = left.getCost() + 1; + if (right != null) { + cost += right.getCost(); + } + return cost; + } + + @Override + public int getSubexpressionCount() { + return right != null ? 2 : 1; + } + + @Override + public Expression getSubexpression(int index) { + if (index == 0) { + return left; + } + if (index == 1 && right != null) { + return right; + } + throw new IndexOutOfBoundsException(); + } + +} diff --git a/h2/src/main/org/h2/expression/Operation2.java b/h2/src/main/org/h2/expression/Operation2.java new file mode 100644 index 0000000000..d729157712 --- /dev/null +++ b/h2/src/main/org/h2/expression/Operation2.java @@ -0,0 +1,88 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.engine.SessionLocal; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; + +/** + * Operation with two arguments. + */ +public abstract class Operation2 extends Expression { + + /** + * The left part of the operation (the first argument). + */ + protected Expression left; + + /** + * The right part of the operation (the second argument). + */ + protected Expression right; + + /** + * The type of the result. + */ + protected TypeInfo type; + + protected Operation2(Expression left, Expression right) { + this.left = left; + this.right = right; + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + left.mapColumns(resolver, level, state); + right.mapColumns(resolver, level, state); + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + left.setEvaluatable(tableFilter, value); + right.setEvaluatable(tableFilter, value); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + left.updateAggregate(session, stage); + right.updateAggregate(session, stage); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return left.isEverything(visitor) && right.isEverything(visitor); + } + + @Override + public int getCost() { + return left.getCost() + right.getCost() + 1; + } + + @Override + public int getSubexpressionCount() { + return 2; + } + + @Override + public Expression getSubexpression(int index) { + switch (index) { + case 0: + return left; + case 1: + return right; + default: + throw new IndexOutOfBoundsException(); + } + } + +} diff --git a/h2/src/main/org/h2/expression/OperationN.java b/h2/src/main/org/h2/expression/OperationN.java new file mode 100644 index 0000000000..ff964ea697 --- /dev/null +++ b/h2/src/main/org/h2/expression/OperationN.java @@ -0,0 +1,132 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import java.util.Arrays; + +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; + +/** + * Operation with many arguments. + */ +public abstract class OperationN extends Expression implements ExpressionWithVariableParameters { + + /** + * The array of arguments. + */ + protected Expression[] args; + + /** + * The number of arguments. + */ + protected int argsCount; + + /** + * The type of the result. + */ + protected TypeInfo type; + + protected OperationN(Expression[] args) { + this.args = args; + } + + @Override + public void addParameter(Expression param) { + int capacity = args.length; + if (argsCount >= capacity) { + args = Arrays.copyOf(args, capacity * 2); + } + args[argsCount++] = param; + } + + @Override + public void doneWithParameters() throws DbException { + if (args.length != argsCount) { + args = Arrays.copyOf(args, argsCount); + } + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + for (Expression e : args) { + e.mapColumns(resolver, level, state); + } + } + + /** + * Optimizes arguments. + * + * @param session + * the session + * @param allConst + * whether operation is deterministic + * @return whether operation is deterministic and all arguments are + * constants + */ + protected boolean optimizeArguments(SessionLocal session, boolean allConst) { + for (int i = 0, l = args.length; i < l; i++) { + Expression e = args[i].optimize(session); + args[i] = e; + if (allConst && !e.isConstant()) { + allConst = false; + } + } + return allConst; + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + for (Expression e : args) { + e.setEvaluatable(tableFilter, value); + } + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + for (Expression e : args) { + e.updateAggregate(session, stage); + } + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + for (Expression e : args) { + if (!e.isEverything(visitor)) { + return false; + } + } + return true; + } + + @Override + public int getCost() { + int cost = args.length + 1; + for (Expression e : args) { + cost += e.getCost(); + } + return cost; + } + + @Override + public int getSubexpressionCount() { + return args.length; + } + + @Override + public Expression getSubexpression(int index) { + return args[index]; + } + +} diff --git a/h2/src/main/org/h2/expression/Parameter.java b/h2/src/main/org/h2/expression/Parameter.java index 69f6d9b348..5c30d6facc 100644 --- a/h2/src/main/org/h2/expression/Parameter.java +++ b/h2/src/main/org/h2/expression/Parameter.java @@ -1,27 +1,24 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; import org.h2.api.ErrorCode; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.condition.Comparison; import org.h2.message.DbException; import org.h2.table.Column; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueBoolean; import org.h2.value.ValueNull; -import org.h2.value.ValueString; +import org.h2.value.ValueVarchar; /** * A parameter of a prepared statement. */ -public class Parameter extends Expression implements ParameterInterface { +public final class Parameter extends Operation0 implements ParameterInterface { private Value value; private Column column; @@ -32,7 +29,7 @@ public Parameter(int index) { } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { return builder.append('?').append(index + 1); } @@ -57,7 +54,7 @@ public Value getParamValue() { } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { return getParamValue(); } @@ -72,22 +69,6 @@ public TypeInfo getType() { return TypeInfo.TYPE_UNKNOWN; } - @Override - public int getValueType() { - if (value != null) { - return value.getValueType(); - } - if (column != null) { - return column.getType().getValueType(); - } - return Value.UNKNOWN; - } - - @Override - public void mapColumns(ColumnResolver resolver, int level, int state) { - // can't map - } - @Override public void checkSet() { if (value == null) { @@ -96,78 +77,27 @@ public void checkSet() { } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { if (session.getDatabase().getMode().treatEmptyStringsAsNull) { - if (value instanceof ValueString && value.getString().isEmpty()) { + if (value instanceof ValueVarchar && value.getString().isEmpty()) { value = ValueNull.INSTANCE; } } return this; } - @Override - public boolean isConstant() { - return false; - } - @Override public boolean isValueSet() { return value != null; } - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - // not bound - } - - @Override - public int getScale() { - if (value != null) { - return value.getType().getScale(); - } - if (column != null) { - return column.getType().getScale(); - } - return 0; - } - - @Override - public long getPrecision() { - if (value != null) { - return value.getType().getPrecision(); - } - if (column != null) { - return column.getType().getPrecision(); - } - return 0; - } - - @Override - public void updateAggregate(Session session, int stage) { - // nothing to do - } - @Override public boolean isEverything(ExpressionVisitor visitor) { switch (visitor.getType()) { - case ExpressionVisitor.EVALUATABLE: - // the parameter _will_be_ evaluatable at execute time - case ExpressionVisitor.SET_MAX_DATA_MODIFICATION_ID: - // it is checked independently if the value is the same as the last - // time - case ExpressionVisitor.NOT_FROM_RESOLVER: - case ExpressionVisitor.QUERY_COMPARABLE: - case ExpressionVisitor.GET_DEPENDENCIES: - case ExpressionVisitor.OPTIMIZABLE_AGGREGATE: - case ExpressionVisitor.DETERMINISTIC: - case ExpressionVisitor.READONLY: - case ExpressionVisitor.GET_COLUMNS1: - case ExpressionVisitor.GET_COLUMNS2: - return true; case ExpressionVisitor.INDEPENDENT: return value != null; default: - throw DbException.throwInternalError("type="+visitor.getType()); + return true; } } @@ -177,9 +107,8 @@ public int getCost() { } @Override - public Expression getNotIfPossible(Session session) { - return new Comparison(session, Comparison.EQUAL, this, - ValueExpression.get(ValueBoolean.FALSE)); + public Expression getNotIfPossible(SessionLocal session) { + return new Comparison(Comparison.EQUAL, this, ValueExpression.FALSE, false); } public void setColumn(Column column) { diff --git a/h2/src/main/org/h2/expression/ParameterInterface.java b/h2/src/main/org/h2/expression/ParameterInterface.java index f833066c13..2f8405213d 100644 --- a/h2/src/main/org/h2/expression/ParameterInterface.java +++ b/h2/src/main/org/h2/expression/ParameterInterface.java @@ -1,11 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; import org.h2.message.DbException; +import org.h2.value.TypeInfo; import org.h2.value.Value; /** @@ -43,26 +44,12 @@ public interface ParameterInterface { boolean isValueSet(); /** - * Get the expected data type of the parameter if no value is set, or the + * Returns the expected data type if no value is set, or the * data type of the value if one is set. * * @return the data type */ - int getValueType(); - - /** - * Get the expected precision of this parameter. - * - * @return the expected precision - */ - long getPrecision(); - - /** - * Get the expected scale of this parameter. - * - * @return the expected scale - */ - int getScale(); + TypeInfo getType(); /** * Check if this column is nullable. diff --git a/h2/src/main/org/h2/expression/ParameterRemote.java b/h2/src/main/org/h2/expression/ParameterRemote.java index e3258dd076..fe6a46b9e5 100644 --- a/h2/src/main/org/h2/expression/ParameterRemote.java +++ b/h2/src/main/org/h2/expression/ParameterRemote.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; @@ -11,7 +11,9 @@ import org.h2.api.ErrorCode; import org.h2.message.DbException; import org.h2.value.Transfer; +import org.h2.value.TypeInfo; import org.h2.value.Value; +import org.h2.value.ValueLob; /** * A client side (remote) parameter. @@ -20,9 +22,7 @@ public class ParameterRemote implements ParameterInterface { private Value value; private final int index; - private int dataType = Value.UNKNOWN; - private long precision; - private int scale; + private TypeInfo type = TypeInfo.TYPE_UNKNOWN; private int nullable = ResultSetMetaData.columnNullableUnknown; public ParameterRemote(int index) { @@ -31,8 +31,8 @@ public ParameterRemote(int index) { @Override public void setValue(Value newValue, boolean closeOld) { - if (closeOld && value != null) { - value.remove(); + if (closeOld && value instanceof ValueLob) { + ((ValueLob) value).remove(); } value = newValue; } @@ -55,18 +55,8 @@ public boolean isValueSet() { } @Override - public int getValueType() { - return value == null ? dataType : value.getValueType(); - } - - @Override - public long getPrecision() { - return value == null ? precision : value.getType().getPrecision(); - } - - @Override - public int getScale() { - return value == null ? scale : value.getType().getScale(); + public TypeInfo getType() { + return value == null ? type : value.getType(); } @Override @@ -75,14 +65,13 @@ public int getNullable() { } /** - * Write the parameter meta data from the transfer object. + * Read the parameter meta data from the transfer object. * * @param transfer the transfer object + * @throws IOException on failure */ public void readMetaData(Transfer transfer) throws IOException { - dataType = transfer.readInt(); - precision = transfer.readLong(); - scale = transfer.readInt(); + type = transfer.readTypeInfo(); nullable = transfer.readInt(); } @@ -91,13 +80,10 @@ public void readMetaData(Transfer transfer) throws IOException { * * @param transfer the transfer object * @param p the parameter + * @throws IOException on failure */ - public static void writeMetaData(Transfer transfer, ParameterInterface p) - throws IOException { - transfer.writeInt(p.getValueType()); - transfer.writeLong(p.getPrecision()); - transfer.writeInt(p.getScale()); - transfer.writeInt(p.getNullable()); + public static void writeMetaData(Transfer transfer, ParameterInterface p) throws IOException { + transfer.writeTypeInfo(p.getType()).writeInt(p.getNullable()); } } diff --git a/h2/src/main/org/h2/expression/Rownum.java b/h2/src/main/org/h2/expression/Rownum.java index 07457f4979..0b7db71504 100644 --- a/h2/src/main/org/h2/expression/Rownum.java +++ b/h2/src/main/org/h2/expression/Rownum.java @@ -1,71 +1,51 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; import org.h2.command.Prepared; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueLong; +import org.h2.value.ValueBigint; /** * Represents the ROWNUM function. */ -public class Rownum extends Expression { +public final class Rownum extends Operation0 { private final Prepared prepared; + private boolean singleRow; + public Rownum(Prepared prepared) { if (prepared == null) { - throw DbException.throwInternalError(); + throw DbException.getInternalError(); } this.prepared = prepared; } @Override - public Value getValue(Session session) { - return ValueLong.get(prepared.getCurrentRowNumber()); + public Value getValue(SessionLocal session) { + return ValueBigint.get(prepared.getCurrentRowNumber()); } @Override public TypeInfo getType() { - return TypeInfo.TYPE_LONG; + return TypeInfo.TYPE_BIGINT; } @Override - public void mapColumns(ColumnResolver resolver, int level, int state) { - // nothing to do - } - - @Override - public Expression optimize(Session session) { - return this; - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - // nothing to do - } - - @Override - public String getSQL(boolean alwaysQuote) { - return "ROWNUM()"; - } - - @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { return builder.append("ROWNUM()"); } @Override - public void updateAggregate(Session session, int stage) { - // nothing to do + public Expression optimize(SessionLocal session) { + return singleRow ? ValueExpression.get(ValueBigint.get(1L)) : this; } @Override @@ -75,18 +55,15 @@ public boolean isEverything(ExpressionVisitor visitor) { case ExpressionVisitor.OPTIMIZABLE_AGGREGATE: case ExpressionVisitor.DETERMINISTIC: case ExpressionVisitor.INDEPENDENT: - return false; case ExpressionVisitor.EVALUATABLE: - case ExpressionVisitor.READONLY: - case ExpressionVisitor.NOT_FROM_RESOLVER: - case ExpressionVisitor.GET_DEPENDENCIES: - case ExpressionVisitor.SET_MAX_DATA_MODIFICATION_ID: - case ExpressionVisitor.GET_COLUMNS1: - case ExpressionVisitor.GET_COLUMNS2: - // if everything else is the same, the rownum is the same - return true; + return false; + case ExpressionVisitor.DECREMENT_QUERY_LEVEL: + if (visitor.getQueryLevel() > 0) { + singleRow = true; + } + //$FALL-THROUGH$ default: - throw DbException.throwInternalError("type="+visitor.getType()); + return true; } } diff --git a/h2/src/main/org/h2/expression/SearchedCase.java b/h2/src/main/org/h2/expression/SearchedCase.java new file mode 100644 index 0000000000..05ba3454a8 --- /dev/null +++ b/h2/src/main/org/h2/expression/SearchedCase.java @@ -0,0 +1,95 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.engine.SessionLocal; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * A searched case. + */ +public final class SearchedCase extends OperationN { + + public SearchedCase() { + super(new Expression[4]); + } + + public SearchedCase(Expression[] args) { + super(args); + } + + @Override + public Value getValue(SessionLocal session) { + int len = args.length - 1; + for (int i = 0; i < len; i += 2) { + if (args[i].getBooleanValue(session)) { + return args[i + 1].getValue(session).convertTo(type, session); + } + } + if ((len & 1) == 0) { + return args[len].getValue(session).convertTo(type, session); + } + return ValueNull.INSTANCE; + } + + @Override + public Expression optimize(SessionLocal session) { + TypeInfo typeInfo = TypeInfo.TYPE_UNKNOWN; + int len = args.length - 1; + boolean allConst = true; + for (int i = 0; i < len; i += 2) { + Expression condition = args[i].optimize(session); + Expression result = args[i + 1].optimize(session); + if (allConst) { + if (condition.isConstant()) { + if (condition.getBooleanValue(session)) { + return result; + } + } else { + allConst = false; + } + } + args[i] = condition; + args[i + 1] = result; + typeInfo = SimpleCase.combineTypes(typeInfo, result); + } + if ((len & 1) == 0) { + Expression result = args[len].optimize(session); + if (allConst) { + return result; + } + args[len] = result; + typeInfo = SimpleCase.combineTypes(typeInfo, result); + } else if (allConst) { + return ValueExpression.NULL; + } + if (typeInfo.getValueType() == Value.UNKNOWN) { + typeInfo = TypeInfo.TYPE_VARCHAR; + } + type = typeInfo; + return this; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + builder.append("CASE"); + int len = args.length - 1; + for (int i = 0; i < len; i += 2) { + builder.append(" WHEN "); + args[i].getUnenclosedSQL(builder, sqlFlags); + builder.append(" THEN "); + args[i + 1].getUnenclosedSQL(builder, sqlFlags); + } + if ((len & 1) == 0) { + builder.append(" ELSE "); + args[len].getUnenclosedSQL(builder, sqlFlags); + } + return builder.append(" END"); + } + +} diff --git a/h2/src/main/org/h2/expression/SequenceValue.java b/h2/src/main/org/h2/expression/SequenceValue.java index d02b220f69..96a4410d4e 100644 --- a/h2/src/main/org/h2/expression/SequenceValue.java +++ b/h2/src/main/org/h2/expression/SequenceValue.java @@ -1,79 +1,73 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; -import org.h2.engine.Session; -import org.h2.message.DbException; +import org.h2.command.Prepared; +import org.h2.engine.SessionLocal; import org.h2.schema.Sequence; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueLong; /** * Wraps a sequence when used in a statement. */ -public class SequenceValue extends Expression { +public final class SequenceValue extends Operation0 { private final Sequence sequence; - public SequenceValue(Sequence sequence) { - this.sequence = sequence; - } + private final boolean current; - @Override - public Value getValue(Session session) { - ValueLong value = ValueLong.get(sequence.getNext(session)); - session.setLastIdentity(value); - return value; - } + private final Prepared prepared; - @Override - public TypeInfo getType() { - return TypeInfo.TYPE_LONG; - } - - @Override - public void mapColumns(ColumnResolver resolver, int level, int state) { - // nothing to do + /** + * Creates new instance of NEXT VALUE FOR expression. + * + * @param sequence + * the sequence + * @param prepared + * the owner command, or {@code null} + */ + public SequenceValue(Sequence sequence, Prepared prepared) { + this.sequence = sequence; + current = false; + this.prepared = prepared; } - @Override - public Expression optimize(Session session) { - return this; + /** + * Creates new instance of CURRENT VALUE FOR expression. + * + * @param sequence + * the sequence + */ + public SequenceValue(Sequence sequence) { + this.sequence = sequence; + current = true; + prepared = null; } @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - // nothing to do + public Value getValue(SessionLocal session) { + return current ? session.getCurrentValueFor(sequence) : session.getNextValueFor(sequence, prepared); } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - builder.append("(NEXT VALUE FOR "); - return sequence.getSQL(builder, alwaysQuote).append(')'); + public TypeInfo getType() { + return sequence.getDataType(); } @Override - public void updateAggregate(Session session, int stage) { - // nothing to do + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + builder.append(current ? "CURRENT" : "NEXT").append(" VALUE FOR "); + return sequence.getSQL(builder, sqlFlags); } @Override public boolean isEverything(ExpressionVisitor visitor) { switch (visitor.getType()) { - case ExpressionVisitor.EVALUATABLE: - case ExpressionVisitor.OPTIMIZABLE_AGGREGATE: - case ExpressionVisitor.NOT_FROM_RESOLVER: - case ExpressionVisitor.GET_COLUMNS1: - case ExpressionVisitor.GET_COLUMNS2: - return true; case ExpressionVisitor.DETERMINISTIC: - case ExpressionVisitor.READONLY: case ExpressionVisitor.INDEPENDENT: case ExpressionVisitor.QUERY_COMPARABLE: return false; @@ -83,8 +77,10 @@ public boolean isEverything(ExpressionVisitor visitor) { case ExpressionVisitor.GET_DEPENDENCIES: visitor.addDependency(sequence); return true; + case ExpressionVisitor.READONLY: + return current; default: - throw DbException.throwInternalError("type="+visitor.getType()); + return true; } } @@ -93,9 +89,4 @@ public int getCost() { return 1; } - @Override - public boolean isGeneratedKey() { - return true; - } - } diff --git a/h2/src/main/org/h2/expression/SimpleCase.java b/h2/src/main/org/h2/expression/SimpleCase.java new file mode 100644 index 0000000000..1fc46fa57e --- /dev/null +++ b/h2/src/main/org/h2/expression/SimpleCase.java @@ -0,0 +1,273 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.engine.SessionLocal; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * A simple case. + */ +public final class SimpleCase extends Expression { + + public static final class SimpleWhen { + + Expression[] operands; + + Expression result; + + SimpleWhen next; + + public SimpleWhen(Expression operand, Expression result) { + this(new Expression[] { operand }, result); + } + + public SimpleWhen(Expression[] operands, Expression result) { + this.operands = operands; + this.result = result; + } + + public void setWhen(SimpleWhen next) { + this.next = next; + } + + } + + private Expression operand; + + private SimpleWhen when; + + private Expression elseResult; + + private TypeInfo type; + + public SimpleCase(Expression operand, SimpleWhen when, Expression elseResult) { + this.operand = operand; + this.when = when; + this.elseResult = elseResult; + } + + @Override + public Value getValue(SessionLocal session) { + Value v = operand.getValue(session); + for (SimpleWhen when = this.when; when != null; when = when.next) { + for (Expression e : when.operands) { + if (e.getWhenValue(session, v)) { + return when.result.getValue(session).convertTo(type, session); + } + } + } + if (elseResult != null) { + return elseResult.getValue(session).convertTo(type, session); + } + return ValueNull.INSTANCE; + } + + @Override + public Expression optimize(SessionLocal session) { + TypeInfo typeInfo = TypeInfo.TYPE_UNKNOWN; + operand = operand.optimize(session); + boolean allConst = operand.isConstant(); + Value v = null; + if (allConst) { + v = operand.getValue(session); + } + TypeInfo operandType = operand.getType(); + for (SimpleWhen when = this.when; when != null; when = when.next) { + Expression[] operands = when.operands; + for (int i = 0; i < operands.length; i++) { + Expression e = operands[i].optimize(session); + if (!e.isWhenConditionOperand()) { + TypeInfo.checkComparable(operandType, e.getType()); + } + if (allConst) { + if (e.isConstant()) { + if (e.getWhenValue(session, v)) { + return when.result.optimize(session); + } + } else { + allConst = false; + } + } + operands[i] = e; + } + when.result = when.result.optimize(session); + typeInfo = combineTypes(typeInfo, when.result); + } + if (elseResult != null) { + elseResult = elseResult.optimize(session); + if (allConst) { + return elseResult; + } + typeInfo = combineTypes(typeInfo, elseResult); + } else if (allConst) { + return ValueExpression.NULL; + } + if (typeInfo.getValueType() == Value.UNKNOWN) { + typeInfo = TypeInfo.TYPE_VARCHAR; + } + type = typeInfo; + return this; + } + + static TypeInfo combineTypes(TypeInfo typeInfo, Expression e) { + if (!e.isNullConstant()) { + TypeInfo type = e.getType(); + int valueType = type.getValueType(); + if (valueType != Value.UNKNOWN && valueType != Value.NULL) { + typeInfo = TypeInfo.getHigherType(typeInfo, type); + } + } + return typeInfo; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + operand.getUnenclosedSQL(builder.append("CASE "), sqlFlags); + for (SimpleWhen when = this.when; when != null; when = when.next) { + builder.append(" WHEN"); + Expression[] operands = when.operands; + for (int i = 0, len = operands.length; i < len; i++) { + if (i > 0) { + builder.append(','); + } + operands[i].getWhenSQL(builder, sqlFlags); + } + when.result.getUnenclosedSQL(builder.append(" THEN "), sqlFlags); + } + if (elseResult != null) { + elseResult.getUnenclosedSQL(builder.append(" ELSE "), sqlFlags); + } + return builder.append(" END"); + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + operand.mapColumns(resolver, level, state); + for (SimpleWhen when = this.when; when != null; when = when.next) { + for (Expression e : when.operands) { + e.mapColumns(resolver, level, state); + } + when.result.mapColumns(resolver, level, state); + } + if (elseResult != null) { + elseResult.mapColumns(resolver, level, state); + } + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + operand.setEvaluatable(tableFilter, value); + for (SimpleWhen when = this.when; when != null; when = when.next) { + for (Expression e : when.operands) { + e.setEvaluatable(tableFilter, value); + } + when.result.setEvaluatable(tableFilter, value); + } + if (elseResult != null) { + elseResult.setEvaluatable(tableFilter, value); + } + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + operand.updateAggregate(session, stage); + for (SimpleWhen when = this.when; when != null; when = when.next) { + for (Expression e : when.operands) { + e.updateAggregate(session, stage); + } + when.result.updateAggregate(session, stage); + } + if (elseResult != null) { + elseResult.updateAggregate(session, stage); + } + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + if (!operand.isEverything(visitor)) { + return false; + } + for (SimpleWhen when = this.when; when != null; when = when.next) { + for (Expression e : when.operands) { + if (!e.isEverything(visitor)) { + return false; + } + } + if (!when.result.isEverything(visitor)) { + return false; + } + } + if (elseResult != null && !elseResult.isEverything(visitor)) { + return false; + } + return true; + } + + @Override + public int getCost() { + int cost = 1, resultCost = 0; + cost += operand.getCost(); + for (SimpleWhen when = this.when; when != null; when = when.next) { + for (Expression e : when.operands) { + cost += e.getCost(); + } + resultCost = Math.max(resultCost, when.result.getCost()); + } + if (elseResult != null) { + resultCost = Math.max(resultCost, elseResult.getCost()); + } + return cost + resultCost; + } + + @Override + public int getSubexpressionCount() { + int count = 1; + for (SimpleWhen when = this.when; when != null; when = when.next) { + count += when.operands.length + 1; + } + if (elseResult != null) { + count++; + } + return count; + } + + @Override + public Expression getSubexpression(int index) { + if (index >= 0) { + if (index == 0) { + return operand; + } + int ptr = 1; + for (SimpleWhen when = this.when; when != null; when = when.next) { + Expression[] operands = when.operands; + int count = operands.length; + int offset = index - ptr; + if (offset < count) { + return operands[offset]; + } + ptr += count; + if (index == ptr++) { + return when.result; + } + } + if (elseResult != null && index == ptr) { + return elseResult; + } + } + throw new IndexOutOfBoundsException(); + } + +} diff --git a/h2/src/main/org/h2/expression/Subquery.java b/h2/src/main/org/h2/expression/Subquery.java index 35909e0a92..236a538b25 100644 --- a/h2/src/main/org/h2/expression/Subquery.java +++ b/h2/src/main/org/h2/expression/Subquery.java @@ -1,18 +1,22 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; + import org.h2.api.ErrorCode; -import org.h2.command.dml.Query; -import org.h2.engine.Session; +import org.h2.command.query.Query; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.result.ResultInterface; import org.h2.table.ColumnResolver; import org.h2.table.TableFilter; +import org.h2.value.ExtTypeInfoRow; import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueNull; @@ -22,29 +26,29 @@ * A query returning a single value. * Subqueries are used inside other statements. */ -public class Subquery extends Expression { +public final class Subquery extends Expression { private final Query query; + private Expression expression; + private Value nullValue; + + private HashSet outerResolvers = new HashSet<>(); + public Subquery(Query query) { this.query = query; } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { query.setSession(session); try (ResultInterface result = query.query(2)) { Value v; if (!result.next()) { - v = ValueNull.INSTANCE; + return nullValue; } else { - Value[] values = result.currentRow(); - if (result.getVisibleColumnCount() == 1) { - v = values[0]; - } else { - v = ValueRow.get(values); - } + v = readRow(result); if (result.hasNext()) { throw DbException.get(ErrorCode.SCALAR_SUBQUERY_CONTAINS_MORE_THAN_ONE_ROW); } @@ -53,54 +57,96 @@ public Value getValue(Session session) { } } + /** + * Evaluates and returns all rows of the subquery. + * + * @param session + * the session + * @return values in all rows + */ + public ArrayList getAllRows(SessionLocal session) { + ArrayList list = new ArrayList<>(); + query.setSession(session); + try (ResultInterface result = query.query(Integer.MAX_VALUE)) { + while (result.next()) { + list.add(readRow(result)); + } + } + return list; + } + + private Value readRow(ResultInterface result) { + Value[] values = result.currentRow(); + int visible = result.getVisibleColumnCount(); + return visible == 1 ? values[0] + : ValueRow.get(getType(), visible == values.length ? values : Arrays.copyOf(values, visible)); + } + @Override public TypeInfo getType() { - return getExpression().getType(); + return expression.getType(); } @Override public void mapColumns(ColumnResolver resolver, int level, int state) { + outerResolvers.add(resolver); query.mapColumns(resolver, level + 1); } @Override - public Expression optimize(Session session) { - session.optimizeQueryExpression(query); + public Expression optimize(SessionLocal session) { + query.prepare(); + if (query.isConstantQuery()) { + setType(); + return ValueExpression.get(getValue(session)); + } + if (outerResolvers != null && session.getDatabase().getSettings().optimizeSimpleSingleRowSubqueries) { + Expression e = query.getIfSingleRow(); + if (e != null && e.isEverything(ExpressionVisitor.getDecrementQueryLevelVisitor(outerResolvers, 0))) { + e.isEverything(ExpressionVisitor.getDecrementQueryLevelVisitor(outerResolvers, 1)); + return e.optimize(session); + } + } + outerResolvers = null; + setType(); return this; } + private void setType() { + ArrayList expressions = query.getExpressions(); + int columnCount = query.getColumnCount(); + if (columnCount == 1) { + expression = expressions.get(0); + nullValue = ValueNull.INSTANCE; + } else { + Expression[] list = new Expression[columnCount]; + Value[] nulls = new Value[columnCount]; + for (int i = 0; i < columnCount; i++) { + list[i] = expressions.get(i); + nulls[i] = ValueNull.INSTANCE; + } + ExpressionList expressionList = new ExpressionList(list, false); + expressionList.initializeType(); + expression = expressionList; + nullValue = ValueRow.get(new ExtTypeInfoRow(list), nulls); + } + } + @Override public void setEvaluatable(TableFilter tableFilter, boolean b) { query.setEvaluatable(tableFilter, b); } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - return builder.append('(').append(query.getPlanSQL(alwaysQuote)).append(')'); + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return builder.append('(').append(query.getPlanSQL(sqlFlags)).append(')'); } @Override - public void updateAggregate(Session session, int stage) { + public void updateAggregate(SessionLocal session, int stage) { query.updateAggregate(session, stage); } - private Expression getExpression() { - if (expression == null) { - ArrayList expressions = query.getExpressions(); - int columnCount = query.getColumnCount(); - if (columnCount == 1) { - expression = expressions.get(0); - } else { - Expression[] list = new Expression[columnCount]; - for (int i = 0; i < columnCount; i++) { - list[i] = expressions.get(i); - } - expression = new ExpressionList(list, false); - } - } - return expression; - } - @Override public boolean isEverything(ExpressionVisitor visitor) { return query.isEverything(visitor); @@ -116,7 +162,8 @@ public int getCost() { } @Override - public Expression[] getExpressionColumns(Session session) { - return getExpression().getExpressionColumns(session); + public boolean isConstant() { + return query.isConstantQuery(); } + } diff --git a/h2/src/main/org/h2/expression/TimeZoneOperation.java b/h2/src/main/org/h2/expression/TimeZoneOperation.java new file mode 100644 index 0000000000..3c7de63b63 --- /dev/null +++ b/h2/src/main/org/h2/expression/TimeZoneOperation.java @@ -0,0 +1,146 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.util.DateTimeUtils; +import org.h2.util.TimeZoneProvider; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueInterval; +import org.h2.value.ValueNull; +import org.h2.value.ValueTimeTimeZone; +import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; + +/** + * A time zone specification (AT { TIME ZONE | LOCAL }). + */ +public final class TimeZoneOperation extends Operation1_2 { + + public TimeZoneOperation(Expression left, Expression right) { + super(left, right); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + left.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append(" AT "); + if (right != null) { + right.getSQL(builder.append("TIME ZONE "), sqlFlags, AUTO_PARENTHESES); + } else { + builder.append("LOCAL"); + } + return builder; + } + + @Override + public Value getValue(SessionLocal session) { + Value a = left.getValue(session).convertTo(type, session); + int valueType = a.getValueType(); + if ((valueType == Value.TIMESTAMP_TZ || valueType == Value.TIME_TZ) && right != null) { + Value b = right.getValue(session); + if (b != ValueNull.INSTANCE) { + if (valueType == Value.TIMESTAMP_TZ) { + ValueTimestampTimeZone v = (ValueTimestampTimeZone) a; + long dateValue = v.getDateValue(); + long timeNanos = v.getTimeNanos(); + int offsetSeconds = v.getTimeZoneOffsetSeconds(); + int newOffset = parseTimeZone(b, dateValue, timeNanos, offsetSeconds, true); + if (offsetSeconds != newOffset) { + a = DateTimeUtils.timestampTimeZoneAtOffset(dateValue, timeNanos, offsetSeconds, newOffset); + } + } else { + ValueTimeTimeZone v = (ValueTimeTimeZone) a; + long timeNanos = v.getNanos(); + int offsetSeconds = v.getTimeZoneOffsetSeconds(); + int newOffset = parseTimeZone(b, DateTimeUtils.EPOCH_DATE_VALUE, timeNanos, offsetSeconds, false); + if (offsetSeconds != newOffset) { + timeNanos += (newOffset - offsetSeconds) * DateTimeUtils.NANOS_PER_SECOND; + a = ValueTimeTimeZone.fromNanos(DateTimeUtils.normalizeNanosOfDay(timeNanos), newOffset); + } + } + } else { + a = ValueNull.INSTANCE; + } + } + return a; + } + + private static int parseTimeZone(Value b, long dateValue, long timeNanos, int offsetSeconds, + boolean allowTimeZoneName) { + if (DataType.isCharacterStringType(b.getValueType())) { + TimeZoneProvider timeZone; + try { + timeZone = TimeZoneProvider.ofId(b.getString()); + } catch (RuntimeException ex) { + throw DbException.getInvalidValueException("time zone", b.getTraceSQL()); + } + if (!allowTimeZoneName && !timeZone.hasFixedOffset()) { + throw DbException.getInvalidValueException("time zone", b.getTraceSQL()); + } + return timeZone.getTimeZoneOffsetUTC(DateTimeUtils.getEpochSeconds(dateValue, timeNanos, offsetSeconds)); + } + return parseInterval(b); + } + + /** + * Parses a daytime interval as time zone offset. + * + * @param interval the interval + * @return the time zone offset in seconds + */ + public static int parseInterval(Value interval) { + ValueInterval i = (ValueInterval) interval.convertTo(TypeInfo.TYPE_INTERVAL_HOUR_TO_SECOND); + long h = i.getLeading(), seconds = i.getRemaining(); + if (h > 18 || h == 18 && seconds != 0 || seconds % DateTimeUtils.NANOS_PER_SECOND != 0) { + throw DbException.getInvalidValueException("time zone", i.getTraceSQL()); + } + int newOffset = (int) (h * 3_600 + seconds / DateTimeUtils.NANOS_PER_SECOND); + if (i.isNegative()) { + newOffset = -newOffset; + } + return newOffset; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + TypeInfo type = left.getType(); + int valueType = Value.TIMESTAMP_TZ, scale = ValueTimestamp.MAXIMUM_SCALE; + switch (type.getValueType()) { + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + scale = type.getScale(); + break; + case Value.TIME: + case Value.TIME_TZ: + valueType = Value.TIME_TZ; + scale = type.getScale(); + break; + default: + StringBuilder builder = left.getSQL(new StringBuilder(), TRACE_SQL_FLAGS, AUTO_PARENTHESES); + int offset = builder.length(); + builder.append(" AT "); + if (right != null) { + right.getSQL(builder.append("TIME ZONE "), TRACE_SQL_FLAGS, AUTO_PARENTHESES); + } else { + builder.append("LOCAL"); + } + throw DbException.getSyntaxError(builder.toString(), offset, "time, timestamp"); + } + this.type = TypeInfo.getTypeInfo(valueType, -1, scale, null); + if (left.isConstant() && (right == null || right.isConstant())) { + return ValueExpression.get(getValue(session)); + } + return this; + } + +} diff --git a/h2/src/main/org/h2/expression/TypedValueExpression.java b/h2/src/main/org/h2/expression/TypedValueExpression.java new file mode 100644 index 0000000000..dd16296665 --- /dev/null +++ b/h2/src/main/org/h2/expression/TypedValueExpression.java @@ -0,0 +1,103 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import java.util.Objects; + +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * An expression representing a constant value with a type cast. + */ +public class TypedValueExpression extends ValueExpression { + + /** + * The expression represents the SQL UNKNOWN value. + */ + public static final TypedValueExpression UNKNOWN = new TypedValueExpression(ValueNull.INSTANCE, + TypeInfo.TYPE_BOOLEAN); + + /** + * Create a new expression with the given value and type. + * + * @param value + * the value + * @param type + * the value type + * @return the expression + */ + public static ValueExpression get(Value value, TypeInfo type) { + return getImpl(value, type, true); + } + + /** + * Create a new typed value expression with the given value and type if + * value is {@code NULL}, or a plain value expression otherwise. + * + * @param value + * the value + * @param type + * the value type + * @return the expression + */ + public static ValueExpression getTypedIfNull(Value value, TypeInfo type) { + return getImpl(value, type, false); + } + + private static ValueExpression getImpl(Value value, TypeInfo type, boolean preserveStrictType) { + if (value == ValueNull.INSTANCE) { + switch (type.getValueType()) { + case Value.NULL: + return ValueExpression.NULL; + case Value.BOOLEAN: + return UNKNOWN; + } + return new TypedValueExpression(value, type); + } + if (preserveStrictType) { + DataType dt = DataType.getDataType(type.getValueType()); + TypeInfo vt = value.getType(); + if (dt.supportsPrecision && type.getPrecision() != vt.getPrecision() + || dt.supportsScale && type.getScale() != vt.getScale() + || !Objects.equals(type.getExtTypeInfo(), vt.getExtTypeInfo())) { + return new TypedValueExpression(value, type); + } + } + return ValueExpression.get(value); + } + + private final TypeInfo type; + + private TypedValueExpression(Value value, TypeInfo type) { + super(value); + this.type = type; + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + if (this == UNKNOWN) { + builder.append("UNKNOWN"); + } else { + value.getSQL(builder.append("CAST("), sqlFlags | NO_CASTS).append(" AS "); + type.getSQL(builder, sqlFlags).append(')'); + } + return builder; + } + + @Override + public boolean isNullConstant() { + return value == ValueNull.INSTANCE; + } + +} diff --git a/h2/src/main/org/h2/expression/UnaryOperation.java b/h2/src/main/org/h2/expression/UnaryOperation.java index 459251c6e4..6860d7ebdc 100644 --- a/h2/src/main/org/h2/expression/UnaryOperation.java +++ b/h2/src/main/org/h2/expression/UnaryOperation.java @@ -1,13 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; -import org.h2.engine.Session; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; +import org.h2.engine.SessionLocal; import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueNull; @@ -15,42 +13,38 @@ /** * Unary operation. Only negation operation is currently supported. */ -public class UnaryOperation extends Expression { - - private Expression arg; - private TypeInfo type; +public class UnaryOperation extends Operation1 { public UnaryOperation(Expression arg) { - this.arg = arg; + super(arg); } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - // don't remove the space, otherwise it might end up some thing like - // --1 which is a line remark - builder.append("(- "); - return arg.getSQL(builder, alwaysQuote).append(')'); + public boolean needParentheses() { + return true; } @Override - public Value getValue(Session session) { - Value a = arg.getValue(session).convertTo(type, session.getDatabase().getMode(), null); - return a == ValueNull.INSTANCE ? a : a.negate(); + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + // don't remove the space, otherwise it might end up some thing like + // --1 which is a line remark + return arg.getSQL(builder.append("- "), sqlFlags, AUTO_PARENTHESES); } @Override - public void mapColumns(ColumnResolver resolver, int level, int state) { - arg.mapColumns(resolver, level, state); + public Value getValue(SessionLocal session) { + Value a = arg.getValue(session).convertTo(type, session); + return a == ValueNull.INSTANCE ? a : a.negate(); } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { arg = arg.optimize(session); type = arg.getType(); if (type.getValueType() == Value.UNKNOWN) { - type = TypeInfo.TYPE_DECIMAL_DEFAULT; + type = TypeInfo.TYPE_NUMERIC_FLOATING_POINT; } else if (type.getValueType() == Value.ENUM) { - type = TypeInfo.TYPE_INT; + type = TypeInfo.TYPE_INTEGER; } if (arg.isConstant()) { return ValueExpression.get(getValue(session)); @@ -58,42 +52,4 @@ public Expression optimize(Session session) { return this; } - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - arg.setEvaluatable(tableFilter, b); - } - - @Override - public TypeInfo getType() { - return type; - } - - @Override - public void updateAggregate(Session session, int stage) { - arg.updateAggregate(session, stage); - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - return arg.isEverything(visitor); - } - - @Override - public int getCost() { - return arg.getCost() + 1; - } - - @Override - public int getSubexpressionCount() { - return 1; - } - - @Override - public Expression getSubexpression(int index) { - if (index == 0) { - return arg; - } - throw new IndexOutOfBoundsException(); - } - } diff --git a/h2/src/main/org/h2/expression/ValueExpression.java b/h2/src/main/org/h2/expression/ValueExpression.java index ec7d6a2097..d0515e76aa 100644 --- a/h2/src/main/org/h2/expression/ValueExpression.java +++ b/h2/src/main/org/h2/expression/ValueExpression.java @@ -1,60 +1,53 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.condition.Comparison; import org.h2.index.IndexCondition; -import org.h2.message.DbException; -import org.h2.table.ColumnResolver; import org.h2.table.TableFilter; import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueBoolean; -import org.h2.value.ValueCollectionBase; import org.h2.value.ValueNull; /** * An expression representing a constant value. */ -public class ValueExpression extends Expression { +public class ValueExpression extends Operation0 { + /** * The expression represents ValueNull.INSTANCE. */ - private static final Object NULL = new ValueExpression(ValueNull.INSTANCE); + public static final ValueExpression NULL = new ValueExpression(ValueNull.INSTANCE); /** * This special expression represents the default value. It is used for * UPDATE statements of the form SET COLUMN = DEFAULT. The value is * ValueNull.INSTANCE, but should never be accessed. */ - private static final Object DEFAULT = new ValueExpression(ValueNull.INSTANCE); - - private final Value value; + public static final ValueExpression DEFAULT = new ValueExpression(ValueNull.INSTANCE); - private ValueExpression(Value value) { - this.value = value; - } + /** + * The expression represents ValueBoolean.TRUE. + */ + public static final ValueExpression TRUE = new ValueExpression(ValueBoolean.TRUE); /** - * Get the NULL expression. - * - * @return the NULL expression + * The expression represents ValueBoolean.FALSE. */ - public static ValueExpression getNull() { - return (ValueExpression) NULL; - } + public static final ValueExpression FALSE = new ValueExpression(ValueBoolean.FALSE); /** - * Get the DEFAULT expression. - * - * @return the DEFAULT expression + * The value. */ - public static ValueExpression getDefault() { - return (ValueExpression) DEFAULT; + final Value value; + + ValueExpression(Value value) { + this.value = value; } /** @@ -65,45 +58,60 @@ public static ValueExpression getDefault() { */ public static ValueExpression get(Value value) { if (value == ValueNull.INSTANCE) { - return getNull(); + return NULL; + } + if (value.getValueType() == Value.BOOLEAN) { + return getBoolean(value.getBoolean()); } return new ValueExpression(value); } - @Override - public Value getValue(Session session) { - return value; + /** + * Create a new expression with the given boolean value. + * + * @param value the boolean value + * @return the expression + */ + public static ValueExpression getBoolean(Value value) { + if (value == ValueNull.INSTANCE) { + return TypedValueExpression.UNKNOWN; + } + return getBoolean(value.getBoolean()); } - @Override - public TypeInfo getType() { - return value.getType(); + /** + * Create a new expression with the given boolean value. + * + * @param value the boolean value + * @return the expression + */ + public static ValueExpression getBoolean(boolean value) { + return value ? TRUE : FALSE; } @Override - public void createIndexConditions(Session session, TableFilter filter) { - if (value.getValueType() == Value.BOOLEAN) { - boolean v = ((ValueBoolean) value).getBoolean(); - if (!v) { - filter.addIndexCondition(IndexCondition.get(Comparison.FALSE, null, this)); - } - } + public Value getValue(SessionLocal session) { + return value; } @Override - public Expression getNotIfPossible(Session session) { - return new Comparison(session, Comparison.EQUAL, this, - ValueExpression.get(ValueBoolean.FALSE)); + public TypeInfo getType() { + return value.getType(); } @Override - public void mapColumns(ColumnResolver resolver, int level, int state) { - // nothing to do + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (value.getValueType() == Value.BOOLEAN && !value.getBoolean()) { + filter.addIndexCondition(IndexCondition.get(Comparison.FALSE, null, this)); + } } @Override - public Expression optimize(Session session) { - return this; + public Expression getNotIfPossible(SessionLocal session) { + if (value == ValueNull.INSTANCE) { + return TypedValueExpression.UNKNOWN; + } + return getBoolean(!value.getBoolean()); } @Override @@ -112,48 +120,28 @@ public boolean isConstant() { } @Override - public boolean isValueSet() { - return true; + public boolean isNullConstant() { + return this == NULL; } @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - // nothing to do + public boolean isValueSet() { + return true; } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { if (this == DEFAULT) { builder.append("DEFAULT"); } else { - value.getSQL(builder); + value.getSQL(builder, sqlFlags); } return builder; } - @Override - public void updateAggregate(Session session, int stage) { - // nothing to do - } - @Override public boolean isEverything(ExpressionVisitor visitor) { - switch (visitor.getType()) { - case ExpressionVisitor.OPTIMIZABLE_AGGREGATE: - case ExpressionVisitor.DETERMINISTIC: - case ExpressionVisitor.READONLY: - case ExpressionVisitor.INDEPENDENT: - case ExpressionVisitor.EVALUATABLE: - case ExpressionVisitor.SET_MAX_DATA_MODIFICATION_ID: - case ExpressionVisitor.NOT_FROM_RESOLVER: - case ExpressionVisitor.GET_DEPENDENCIES: - case ExpressionVisitor.QUERY_COMPARABLE: - case ExpressionVisitor.GET_COLUMNS1: - case ExpressionVisitor.GET_COLUMNS2: - return true; - default: - throw DbException.throwInternalError("type=" + visitor.getType()); - } + return true; } @Override @@ -161,12 +149,4 @@ public int getCost() { return 0; } - @Override - public Expression[] getExpressionColumns(Session session) { - int valueType = getType().getValueType(); - if (valueType == Value.ARRAY || valueType == Value.ROW) { - return getExpressionColumns(session, (ValueCollectionBase) getValue(session)); - } - return super.getExpressionColumns(session); - } } diff --git a/h2/src/main/org/h2/expression/Variable.java b/h2/src/main/org/h2/expression/Variable.java index 5f0f20fbc7..b1d8da2823 100644 --- a/h2/src/main/org/h2/expression/Variable.java +++ b/h2/src/main/org/h2/expression/Variable.java @@ -1,27 +1,24 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; -import org.h2.command.Parser; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; +import org.h2.engine.SessionLocal; +import org.h2.util.ParserUtil; import org.h2.value.TypeInfo; import org.h2.value.Value; /** * A user-defined variable, for example: @ID. */ -public class Variable extends Expression { +public final class Variable extends Operation0 { private final String name; private Value lastValue; - public Variable(Session session, String name) { + public Variable(SessionLocal session, String name) { this.name = name; lastValue = session.getVariable(name); } @@ -32,9 +29,8 @@ public int getCost() { } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - builder.append('@'); - return Parser.quoteIdentifier(builder, name, alwaysQuote); + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return ParserUtil.quoteIdentifier(builder.append('@'), name, sqlFlags); } @Override @@ -43,7 +39,7 @@ public TypeInfo getType() { } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { lastValue = session.getVariable(name); return lastValue; } @@ -51,47 +47,13 @@ public Value getValue(Session session) { @Override public boolean isEverything(ExpressionVisitor visitor) { switch (visitor.getType()) { - case ExpressionVisitor.EVALUATABLE: - // the value will be evaluated at execute time - case ExpressionVisitor.SET_MAX_DATA_MODIFICATION_ID: - // it is checked independently if the value is the same as the last - // time - case ExpressionVisitor.OPTIMIZABLE_AGGREGATE: - case ExpressionVisitor.READONLY: - case ExpressionVisitor.INDEPENDENT: - case ExpressionVisitor.NOT_FROM_RESOLVER: - case ExpressionVisitor.QUERY_COMPARABLE: - case ExpressionVisitor.GET_DEPENDENCIES: - case ExpressionVisitor.GET_COLUMNS1: - case ExpressionVisitor.GET_COLUMNS2: - return true; case ExpressionVisitor.DETERMINISTIC: return false; default: - throw DbException.throwInternalError("type="+visitor.getType()); + return true; } } - @Override - public void mapColumns(ColumnResolver resolver, int level, int state) { - // nothing to do - } - - @Override - public Expression optimize(Session session) { - return this; - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean value) { - // nothing to do - } - - @Override - public void updateAggregate(Session session, int stage) { - // nothing to do - } - public String getName() { return name; } diff --git a/h2/src/main/org/h2/expression/Wildcard.java b/h2/src/main/org/h2/expression/Wildcard.java index fe425d7a97..17d8cc9997 100644 --- a/h2/src/main/org/h2/expression/Wildcard.java +++ b/h2/src/main/org/h2/expression/Wildcard.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; @@ -9,7 +9,7 @@ import java.util.HashMap; import org.h2.api.ErrorCode; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.table.Column; import org.h2.table.ColumnResolver; @@ -23,7 +23,8 @@ * This object is only used temporarily during the parsing phase, and later * replaced by column expressions. */ -public class Wildcard extends Expression { +public final class Wildcard extends Expression { + private final String schema; private final String table; @@ -55,7 +56,7 @@ public HashMap mapExceptColumns() { if (column == null) { throw ec.getColumnException(ErrorCode.COLUMN_NOT_FOUND_1); } - if (exceptTableColumns.put(column, ec) != null) { + if (exceptTableColumns.putIfAbsent(column, ec) != null) { throw ec.getColumnException(ErrorCode.DUPLICATE_COLUMN_NAME_1); } } @@ -63,13 +64,13 @@ public HashMap mapExceptColumns() { } @Override - public Value getValue(Session session) { - throw DbException.throwInternalError(toString()); + public Value getValue(SessionLocal session) { + throw DbException.getInternalError(toString()); } @Override public TypeInfo getType() { - throw DbException.throwInternalError(toString()); + throw DbException.getInternalError(toString()); } @Override @@ -82,13 +83,13 @@ public void mapColumns(ColumnResolver resolver, int level, int state) { } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { throw DbException.get(ErrorCode.SYNTAX_ERROR_1, table); } @Override public void setEvaluatable(TableFilter tableFilter, boolean b) { - DbException.throwInternalError(toString()); + throw DbException.getInternalError(toString()); } @Override @@ -102,22 +103,20 @@ public String getSchemaName() { } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { if (table != null) { StringUtils.quoteIdentifier(builder, table).append('.'); } builder.append('*'); if (exceptColumns != null) { - builder.append(" EXCEPT ("); - writeExpressions(builder, exceptColumns, alwaysQuote); - builder.append(')'); + writeExpressions(builder.append(" EXCEPT ("), exceptColumns, sqlFlags).append(')'); } return builder; } @Override - public void updateAggregate(Session session, int stage) { - DbException.throwInternalError(toString()); + public void updateAggregate(SessionLocal session, int stage) { + throw DbException.getInternalError(toString()); } @Override @@ -125,12 +124,12 @@ public boolean isEverything(ExpressionVisitor visitor) { if (visitor.getType() == ExpressionVisitor.QUERY_COMPARABLE) { return true; } - throw DbException.throwInternalError(Integer.toString(visitor.getType())); + throw DbException.getInternalError(Integer.toString(visitor.getType())); } @Override public int getCost() { - throw DbException.throwInternalError(toString()); + throw DbException.getInternalError(toString()); } } diff --git a/h2/src/main/org/h2/expression/aggregate/AbstractAggregate.java b/h2/src/main/org/h2/expression/aggregate/AbstractAggregate.java index c857eab44b..09dbf84f8c 100644 --- a/h2/src/main/org/h2/expression/aggregate/AbstractAggregate.java +++ b/h2/src/main/org/h2/expression/aggregate/AbstractAggregate.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.aggregate; @@ -9,10 +9,9 @@ import java.util.HashMap; import java.util.Iterator; -import org.h2.command.dml.Select; -import org.h2.command.dml.SelectGroups; -import org.h2.command.dml.SelectOrderBy; -import org.h2.engine.Session; +import org.h2.command.query.Select; +import org.h2.command.query.SelectGroups; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.analysis.DataAnalysisOperation; import org.h2.expression.analysis.WindowFrame; @@ -88,12 +87,12 @@ public void mapColumnsAnalysis(ColumnResolver resolver, int level, int innerStat } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { for (int i = 0; i < args.length; i++) { args[i] = args[i].optimize(session); } if (filterCondition != null) { - filterCondition = filterCondition.optimize(session); + filterCondition = filterCondition.optimizeCondition(session); } return super.optimize(session); } @@ -110,8 +109,8 @@ public void setEvaluatable(TableFilter tableFilter, boolean b) { } @Override - protected void getOrderedResultLoop(Session session, HashMap result, ArrayList ordered, - int rowIdColumn) { + protected void getOrderedResultLoop(SessionLocal session, HashMap result, + ArrayList ordered, int rowIdColumn) { WindowFrame frame = over.getWindowFrame(); /* * With RANGE (default) or GROUPS units and EXCLUDE GROUP or EXCLUDE NO @@ -156,7 +155,7 @@ protected void getOrderedResultLoop(Session session, HashMap res updateFromExpressions(session, aggregateData, iter.next()); } Value r = getAggregatedValue(session, aggregateData); - i = processGroup(session, result, r, ordered, rowIdColumn, i, size, aggregateData, grouped); + i = processGroup(result, r, ordered, rowIdColumn, i, size, grouped); } } @@ -185,8 +184,8 @@ private static boolean checkVariableBounds(WindowFrame frame, ArrayList return false; } - private void aggregateFastPartition(Session session, HashMap result, ArrayList ordered, - int rowIdColumn, boolean grouped) { + private void aggregateFastPartition(SessionLocal session, HashMap result, + ArrayList ordered, int rowIdColumn, boolean grouped) { Object aggregateData = createAggregateData(); int size = ordered.size(); int lastIncludedRow = -1; @@ -203,11 +202,11 @@ private void aggregateFastPartition(Session session, HashMap res } else if (r == null) { r = getAggregatedValue(session, aggregateData); } - i = processGroup(session, result, r, ordered, rowIdColumn, i, size, aggregateData, grouped); + i = processGroup(result, r, ordered, rowIdColumn, i, size, grouped); } } - private void aggregateFastPartitionInReverse(Session session, HashMap result, + private void aggregateFastPartitionInReverse(SessionLocal session, HashMap result, ArrayList ordered, int rowIdColumn, boolean grouped) { Object aggregateData = createAggregateData(); int firstIncludedRow = ordered.size(); @@ -232,8 +231,8 @@ private void aggregateFastPartitionInReverse(Session session, HashMap result, Value r, ArrayList ordered, - int rowIdColumn, int i, int size, Object aggregateData, boolean grouped) { + private int processGroup(HashMap result, Value r, ArrayList ordered, + int rowIdColumn, int i, int size, boolean grouped) { Value[] firstRowInGroup = ordered.get(i), currentRowInGroup = firstRowInGroup; do { result.put(currentRowInGroup[rowIdColumn].getInt(), r); @@ -242,8 +241,8 @@ private int processGroup(Session session, HashMap result, Value return i; } - private void aggregateWholePartition(Session session, HashMap result, ArrayList ordered, - int rowIdColumn) { + private void aggregateWholePartition(SessionLocal session, HashMap result, + ArrayList ordered, int rowIdColumn) { // Aggregate values from the whole partition Object aggregateData = createAggregateData(); for (Value[] row : ordered) { @@ -266,23 +265,22 @@ private void aggregateWholePartition(Session session, HashMap re * @param array * values of expressions */ - protected abstract void updateFromExpressions(Session session, Object aggregateData, Value[] array); + protected abstract void updateFromExpressions(SessionLocal session, Object aggregateData, Value[] array); @Override - protected void updateAggregate(Session session, SelectGroups groupData, int groupRowId) { - ArrayList orderBy; + protected void updateAggregate(SessionLocal session, SelectGroups groupData, int groupRowId) { if (filterCondition == null || filterCondition.getBooleanValue(session)) { if (over != null) { - if ((orderBy = over.getOrderBy()) != null) { - updateOrderedAggregate(session, groupData, groupRowId, orderBy); + if (over.isOrdered()) { + updateOrderedAggregate(session, groupData, groupRowId, over.getOrderBy()); } else { updateAggregate(session, getWindowData(session, groupData, false)); } } else { updateAggregate(session, getGroupData(groupData, false)); } - } else if (over != null && (orderBy = over.getOrderBy()) != null) { - updateOrderedAggregate(session, groupData, groupRowId, orderBy); + } else if (over != null && over.isOrdered()) { + updateOrderedAggregate(session, groupData, groupRowId, over.getOrderBy()); } } @@ -294,10 +292,10 @@ protected void updateAggregate(Session session, SelectGroups groupData, int grou * @param aggregateData * aggregate data */ - protected abstract void updateAggregate(Session session, Object aggregateData); + protected abstract void updateAggregate(SessionLocal session, Object aggregateData); @Override - protected void updateGroupAggregates(Session session, int stage) { + protected void updateGroupAggregates(SessionLocal session, int stage) { if (filterCondition != null) { filterCondition.updateAggregate(session, stage); } @@ -305,12 +303,22 @@ protected void updateGroupAggregates(Session session, int stage) { } @Override - protected StringBuilder appendTailConditions(StringBuilder builder, boolean alwaysQuote) { + protected StringBuilder appendTailConditions(StringBuilder builder, int sqlFlags, boolean forceOrderBy) { if (filterCondition != null) { builder.append(" FILTER (WHERE "); - filterCondition.getSQL(builder, alwaysQuote).append(')'); + filterCondition.getUnenclosedSQL(builder, sqlFlags).append(')'); } - return super.appendTailConditions(builder, alwaysQuote); + return super.appendTailConditions(builder, sqlFlags, forceOrderBy); + } + + @Override + public int getSubexpressionCount() { + return args.length; + } + + @Override + public Expression getSubexpression(int index) { + return args[index]; } } diff --git a/h2/src/main/org/h2/expression/aggregate/Aggregate.java b/h2/src/main/org/h2/expression/aggregate/Aggregate.java index a1e4e9ec43..ac8082c354 100644 --- a/h2/src/main/org/h2/expression/aggregate/Aggregate.java +++ b/h2/src/main/org/h2/expression/aggregate/Aggregate.java @@ -1,29 +1,36 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.aggregate; +import java.io.ByteArrayOutputStream; import java.math.BigDecimal; import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.Map.Entry; import java.util.TreeMap; import org.h2.api.ErrorCode; -import org.h2.command.dml.Select; -import org.h2.command.dml.SelectOrderBy; +import org.h2.command.query.QueryOrderBy; +import org.h2.command.query.Select; +import org.h2.engine.Constants; import org.h2.engine.Database; -import org.h2.engine.Mode; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.expression.ExpressionVisitor; -import org.h2.expression.Subquery; +import org.h2.expression.ExpressionWithFlags; +import org.h2.expression.ValueExpression; +import org.h2.expression.aggregate.AggregateDataCollecting.NullCollectionMode; import org.h2.expression.analysis.Window; +import org.h2.expression.function.BitFunction; +import org.h2.expression.function.JsonConstructorFunction; import org.h2.index.Cursor; import org.h2.index.Index; import org.h2.message.DbException; @@ -34,31 +41,51 @@ import org.h2.table.ColumnResolver; import org.h2.table.Table; import org.h2.table.TableFilter; +import org.h2.util.StringUtils; +import org.h2.util.json.JsonConstructorUtils; import org.h2.value.CompareMode; import org.h2.value.DataType; +import org.h2.value.ExtTypeInfoRow; import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueArray; +import org.h2.value.ValueBigint; import org.h2.value.ValueBoolean; import org.h2.value.ValueDouble; -import org.h2.value.ValueInt; -import org.h2.value.ValueLong; +import org.h2.value.ValueInteger; +import org.h2.value.ValueInterval; +import org.h2.value.ValueJson; import org.h2.value.ValueNull; +import org.h2.value.ValueNumeric; import org.h2.value.ValueRow; -import org.h2.value.ValueString; +import org.h2.value.ValueVarchar; /** * Implements the integrated aggregate functions, such as COUNT, MAX, SUM. */ -public class Aggregate extends AbstractAggregate { +public class Aggregate extends AbstractAggregate implements ExpressionWithFlags { - private static final HashMap AGGREGATES = new HashMap<>(64); + /** + * The additional result precision in decimal digits for a SUM aggregate function. + */ + private static final int ADDITIONAL_SUM_PRECISION = 10; + + /** + * The additional precision and scale in decimal digits for an AVG aggregate function. + */ + private static final int ADDITIONAL_AVG_SCALE = 10; + + private static final HashMap AGGREGATES = new HashMap<>(128); private final AggregateType aggregateType; - private ArrayList orderByList; + private ArrayList orderByList; private SortOrder orderBySort; + private Object extraArguments; + + private int flags; + /** * Create a new aggregate object. * @@ -74,7 +101,7 @@ public class Aggregate extends AbstractAggregate { public Aggregate(AggregateType aggregateType, Expression[] args, Select select, boolean distinct) { super(select, args, distinct); if (distinct && aggregateType == AggregateType.COUNT_ALL) { - throw DbException.throwInternalError(); + throw DbException.getInternalError(); } this.aggregateType = aggregateType; } @@ -109,10 +136,28 @@ public Aggregate(AggregateType aggregateType, Expression[] args, Select select, addAggregate("EVERY", AggregateType.EVERY); // PostgreSQL compatibility addAggregate("BOOL_AND", AggregateType.EVERY); - addAggregate("SELECTIVITY", AggregateType.SELECTIVITY); addAggregate("HISTOGRAM", AggregateType.HISTOGRAM); - addAggregate("BIT_OR", AggregateType.BIT_OR); - addAggregate("BIT_AND", AggregateType.BIT_AND); + addAggregate("BIT_AND_AGG", AggregateType.BIT_AND_AGG); + addAggregate("BIT_AND", AggregateType.BIT_AND_AGG); + addAggregate("BIT_OR_AGG", AggregateType.BIT_OR_AGG); + addAggregate("BIT_OR", AggregateType.BIT_OR_AGG); + addAggregate("BIT_XOR_AGG", AggregateType.BIT_XOR_AGG); + addAggregate("BIT_NAND_AGG", AggregateType.BIT_NAND_AGG); + addAggregate("BIT_NOR_AGG", AggregateType.BIT_NOR_AGG); + addAggregate("BIT_XNOR_AGG", AggregateType.BIT_XNOR_AGG); + + addAggregate("COVAR_POP", AggregateType.COVAR_POP); + addAggregate("COVAR_SAMP", AggregateType.COVAR_SAMP); + addAggregate("CORR", AggregateType.CORR); + addAggregate("REGR_SLOPE", AggregateType.REGR_SLOPE); + addAggregate("REGR_INTERCEPT", AggregateType.REGR_INTERCEPT); + addAggregate("REGR_COUNT", AggregateType.REGR_COUNT); + addAggregate("REGR_R2", AggregateType.REGR_R2); + addAggregate("REGR_AVGX", AggregateType.REGR_AVGX); + addAggregate("REGR_AVGY", AggregateType.REGR_AVGY); + addAggregate("REGR_SXX", AggregateType.REGR_SXX); + addAggregate("REGR_SYY", AggregateType.REGR_SYY); + addAggregate("REGR_SXY", AggregateType.REGR_SXY); addAggregate("RANK", AggregateType.RANK); addAggregate("DENSE_RANK", AggregateType.DENSE_RANK); @@ -128,6 +173,9 @@ public Aggregate(AggregateType aggregateType, Expression[] args, Select select, // Oracle compatibility addAggregate("STATS_MODE", AggregateType.MODE); addAggregate("ENVELOPE", AggregateType.ENVELOPE); + + addAggregate("JSON_OBJECTAGG", AggregateType.JSON_OBJECTAGG); + addAggregate("JSON_ARRAYAGG", AggregateType.JSON_ARRAYAGG); } private static void addAggregate(String name, AggregateType type) { @@ -153,7 +201,7 @@ public static AggregateType getAggregateType(String name) { * @param orderByList * the order by list */ - public void setOrderByList(ArrayList orderByList) { + public void setOrderByList(ArrayList orderByList) { this.orderByList = orderByList; } @@ -166,42 +214,86 @@ public AggregateType getAggregateType() { return aggregateType; } + /** + * Sets the additional arguments. + * + * @param extraArguments the additional arguments + */ + public void setExtraArguments(Object extraArguments) { + this.extraArguments = extraArguments; + } + + /** + * Returns the additional arguments. + * + * @return the additional arguments + */ + public Object getExtraArguments() { + return extraArguments; + } + + @Override + public void setFlags(int flags) { + this.flags = flags; + } + + @Override + public int getFlags() { + return flags; + } + private void sortWithOrderBy(Value[] array) { final SortOrder sortOrder = orderBySort; - if (sortOrder != null) { - Arrays.sort(array, new Comparator() { - @Override - public int compare(Value v1, Value v2) { - return sortOrder.compare(((ValueArray) v1).getList(), ((ValueArray) v2).getList()); - } - }); - } else { - Arrays.sort(array, select.getSession().getDatabase().getCompareMode()); - } + Arrays.sort(array, + sortOrder != null + ? (v1, v2) -> sortOrder.compare(((ValueRow) v1).getList(), ((ValueRow) v2).getList()) + : select.getSession().getDatabase().getCompareMode()); } @Override - protected void updateAggregate(Session session, Object aggregateData) { + protected void updateAggregate(SessionLocal session, Object aggregateData) { AggregateData data = (AggregateData) aggregateData; Value v = args.length == 0 ? null : args[0].getValue(session); updateData(session, data, v, null); } - private void updateData(Session session, AggregateData data, Value v, Value[] remembered) { + private void updateData(SessionLocal session, AggregateData data, Value v, Value[] remembered) { switch (aggregateType) { - case LISTAGG: - if (v != ValueNull.INSTANCE) { - v = updateCollecting(session, v.convertTo(Value.STRING), remembered); + case COVAR_POP: + case COVAR_SAMP: + case CORR: + case REGR_SLOPE: + case REGR_INTERCEPT: + case REGR_R2: + case REGR_SXY: { + Value x; + if (v == ValueNull.INSTANCE || (x = getSecondValue(session, remembered)) == ValueNull.INSTANCE) { + return; } - if (args.length >= 2) { - ((AggregateDataCollecting) data).setSharedArgument( - remembered != null ? remembered[1] : args[1].getValue(session)); + ((AggregateDataBinarySet) data).add(session, v, x); + return; + } + case REGR_COUNT: + case REGR_AVGY: + case REGR_SYY: + if (v == ValueNull.INSTANCE || getSecondValue(session, remembered) == ValueNull.INSTANCE) { + return; } break; - case ARRAY_AGG: - if (v != ValueNull.INSTANCE) { - v = updateCollecting(session, v, remembered); + case REGR_AVGX: + case REGR_SXX: + if (v == ValueNull.INSTANCE || (v = getSecondValue(session, remembered)) == ValueNull.INSTANCE) { + return; + } + break; + case LISTAGG: + if (v == ValueNull.INSTANCE) { + return; } + v = updateCollecting(session, v.convertTo(TypeInfo.TYPE_VARCHAR), remembered); + break; + case ARRAY_AGG: + v = updateCollecting(session, v, remembered); break; case RANK: case DENSE_RANK: @@ -215,7 +307,7 @@ private void updateData(Session session, AggregateData data, Value v, Value[] re ((AggregateDataCollecting) data).setSharedArgument(ValueRow.get(a)); a = new Value[count]; for (int i = 0; i < count; i++) { - a[i] = remembered != null ? remembered[count + i] :orderByList.get(i).expression.getValue(session); + a[i] = remembered != null ? remembered[count + i] : orderByList.get(i).expression.getValue(session); } v = ValueRow.get(a); break; @@ -228,39 +320,55 @@ private void updateData(Session session, AggregateData data, Value v, Value[] re case MODE: v = remembered != null ? remembered[0] : orderByList.get(0).expression.getValue(session); break; + case JSON_ARRAYAGG: + v = updateCollecting(session, v, remembered); + break; + case JSON_OBJECTAGG: { + Value key = v; + Value value = getSecondValue(session, remembered); + if (key == ValueNull.INSTANCE) { + throw DbException.getInvalidValueException("JSON_OBJECTAGG key", "NULL"); + } + v = ValueRow.get(new Value[] { key, value }); + break; + } default: // Use argument as is } - data.add(session.getDatabase(), v); + data.add(session, v); + } + + private Value getSecondValue(SessionLocal session, Value[] remembered) { + return remembered != null ? remembered[1] : args[1].getValue(session); } @Override - protected void updateGroupAggregates(Session session, int stage) { + protected void updateGroupAggregates(SessionLocal session, int stage) { super.updateGroupAggregates(session, stage); for (Expression arg : args) { arg.updateAggregate(session, stage); } if (orderByList != null) { - for (SelectOrderBy orderBy : orderByList) { + for (QueryOrderBy orderBy : orderByList) { orderBy.expression.updateAggregate(session, stage); } } } - private Value updateCollecting(Session session, Value v, Value[] remembered) { + private Value updateCollecting(SessionLocal session, Value v, Value[] remembered) { if (orderByList != null) { int size = orderByList.size(); - Value[] array = new Value[1 + size]; - array[0] = v; + Value[] row = new Value[1 + size]; + row[0] = v; if (remembered == null) { for (int i = 0; i < size; i++) { - SelectOrderBy o = orderByList.get(i); - array[i + 1] = o.expression.getValue(session); + QueryOrderBy o = orderByList.get(i); + row[i + 1] = o.expression.getValue(session); } } else { - System.arraycopy(remembered, 1, array, 1, size); + System.arraycopy(remembered, 1, row, 1, size); } - v = ValueArray.get(array); + v = ValueRow.get(row); } return v; } @@ -278,13 +386,13 @@ protected int getNumExpressions() { } @Override - protected void rememberExpressions(Session session, Value[] array) { + protected void rememberExpressions(SessionLocal session, Value[] array) { int offset = 0; for (Expression arg : args) { array[offset++] = arg.getValue(session); } if (orderByList != null) { - for (SelectOrderBy o : orderByList) { + for (QueryOrderBy o : orderByList) { array[offset++] = o.expression.getValue(session); } } @@ -294,8 +402,8 @@ protected void rememberExpressions(Session session, Value[] array) { } @Override - protected void updateFromExpressions(Session session, Object aggregateData, Value[] array) { - if (filterCondition == null || array[getNumExpressions() - 1].getBoolean()) { + protected void updateFromExpressions(SessionLocal session, Object aggregateData, Value[] array) { + if (filterCondition == null || array[getNumExpressions() - 1].isTrue()) { AggregateData data = (AggregateData) aggregateData; Value v = args.length == 0 ? null : array[0]; updateData(session, data, v, array); @@ -304,20 +412,100 @@ protected void updateFromExpressions(Session session, Object aggregateData, Valu @Override protected Object createAggregateData() { - return AggregateData.create(aggregateType, distinct, type.getValueType()); + switch (aggregateType) { + case COUNT_ALL: + case REGR_COUNT: + return new AggregateDataCount(true); + case COUNT: + if (!distinct) { + return new AggregateDataCount(false); + } + break; + case RANK: + case DENSE_RANK: + case PERCENT_RANK: + case CUME_DIST: + case PERCENTILE_CONT: + case PERCENTILE_DISC: + case MEDIAN: + break; + case SUM: + case BIT_XOR_AGG: + case BIT_XNOR_AGG: + if (distinct) { + break; + } + //$FALL-THROUGH$ + case MIN: + case MAX: + case BIT_AND_AGG: + case BIT_OR_AGG: + case BIT_NAND_AGG: + case BIT_NOR_AGG: + case ANY: + case EVERY: + return new AggregateDataDefault(aggregateType, type); + case AVG: + if (distinct) { + break; + } + //$FALL-THROUGH$ + case REGR_AVGX: + case REGR_AVGY: + return new AggregateDataAvg(type); + case STDDEV_POP: + case STDDEV_SAMP: + case VAR_POP: + case VAR_SAMP: + if (distinct) { + break; + } + //$FALL-THROUGH$ + case REGR_SXX: + case REGR_SYY: + return new AggregateDataStdVar(aggregateType); + case HISTOGRAM: + return new AggregateDataDistinctWithCounts(false, Constants.SELECTIVITY_DISTINCT_COUNT); + case COVAR_POP: + case COVAR_SAMP: + case REGR_SXY: + return new AggregateDataCovar(aggregateType); + case CORR: + case REGR_SLOPE: + case REGR_INTERCEPT: + case REGR_R2: + return new AggregateDataCorr(aggregateType); + case LISTAGG: // NULL values are excluded by Aggregate + case ARRAY_AGG: + return new AggregateDataCollecting(distinct, orderByList != null, NullCollectionMode.USED_OR_IMPOSSIBLE); + case MODE: + return new AggregateDataDistinctWithCounts(true, Integer.MAX_VALUE); + case ENVELOPE: + return new AggregateDataEnvelope(); + case JSON_ARRAYAGG: + return new AggregateDataCollecting(distinct, orderByList != null, + (flags & JsonConstructorUtils.JSON_ABSENT_ON_NULL) != 0 ? NullCollectionMode.EXCLUDED + : NullCollectionMode.USED_OR_IMPOSSIBLE); + case JSON_OBJECTAGG: + // ROW(key, value) are collected, so NULL values can't be passed + return new AggregateDataCollecting(distinct, false, NullCollectionMode.USED_OR_IMPOSSIBLE); + default: + throw DbException.getInternalError("type=" + aggregateType); + } + return new AggregateDataCollecting(distinct, false, NullCollectionMode.IGNORED); } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { return select.isQuickAggregateQuery() ? getValueQuick(session) : super.getValue(session); } - private Value getValueQuick(Session session) { + private Value getValueQuick(SessionLocal session) { switch (aggregateType) { case COUNT: case COUNT_ALL: Table table = select.getTopTableFilter().getTable(); - return ValueLong.get(table.getRowCount(session)); + return ValueBigint.get(table.getRowCount(session)); case MIN: case MAX: { boolean first = aggregateType == AggregateType.MIN; @@ -356,12 +544,12 @@ private Value getValueQuick(Session session) { case ENVELOPE: return ((MVSpatialIndex) AggregateDataEnvelope.getGeometryColumnIndex(args[0])).getBounds(session); default: - throw DbException.throwInternalError("type=" + aggregateType); + throw DbException.getInternalError("type=" + aggregateType); } } @Override - public Value getAggregatedValue(Session session, Object aggregateData) { + public Value getAggregatedValue(SessionLocal session, Object aggregateData) { AggregateData data = (AggregateData) aggregateData; if (data == null) { data = (AggregateData) createAggregateData(); @@ -369,11 +557,29 @@ public Value getAggregatedValue(Session session, Object aggregateData) { switch (aggregateType) { case COUNT: if (distinct) { - return ValueLong.get(((AggregateDataCollecting) data).getCount()); + return ValueBigint.get(((AggregateDataCollecting) data).getCount()); } break; case SUM: + case BIT_XOR_AGG: + case BIT_XNOR_AGG: + if (distinct) { + AggregateDataCollecting c = ((AggregateDataCollecting) data); + if (c.getCount() == 0) { + return ValueNull.INSTANCE; + } + return collect(session, c, new AggregateDataDefault(aggregateType, type)); + } + break; case AVG: + if (distinct) { + AggregateDataCollecting c = ((AggregateDataCollecting) data); + if (c.getCount() == 0) { + return ValueNull.INSTANCE; + } + return collect(session, c, new AggregateDataAvg(type)); + } + break; case STDDEV_POP: case STDDEV_SAMP: case VAR_POP: @@ -383,13 +589,7 @@ public Value getAggregatedValue(Session session, Object aggregateData) { if (c.getCount() == 0) { return ValueNull.INSTANCE; } - AggregateDataDefault d = new AggregateDataDefault(aggregateType, type.getValueType()); - Database db = session.getDatabase(); - int dataType = type.getValueType(); - for (Value v : c) { - d.add(db, v); - } - return d.getValue(db, dataType); + return collect(session, c, new AggregateDataStdVar(aggregateType)); } break; case HISTOGRAM: @@ -406,10 +606,10 @@ public Value getAggregatedValue(Session session, Object aggregateData) { } if (orderByList != null) { for (int i = 0; i < array.length; i++) { - array[i] = ((ValueArray) array[i]).getList()[0]; + array[i] = ((ValueRow) array[i]).getList()[0]; } } - return ValueArray.get(array); + return ValueArray.get((TypeInfo) type.getExtTypeInfo(), array, session); } case RANK: case DENSE_RANK: @@ -429,7 +629,7 @@ public Value getAggregatedValue(Session session, Object aggregateData) { } BigDecimal arg = v.getBigDecimal(); if (arg.signum() >= 0 && arg.compareTo(BigDecimal.ONE) <= 0) { - return Percentile.getValue(session.getDatabase(), array, type.getValueType(), orderByList, arg, + return Percentile.getValue(session, array, type.getValueType(), orderByList, arg, aggregateType == AggregateType.PERCENTILE_CONT); } else { throw DbException.getInvalidValueException(aggregateType == AggregateType.PERCENTILE_CONT ? @@ -441,25 +641,74 @@ public Value getAggregatedValue(Session session, Object aggregateData) { if (array == null) { return ValueNull.INSTANCE; } - return Percentile.getValue(session.getDatabase(), array, type.getValueType(), orderByList, Percentile.HALF, - true); + return Percentile.getValue(session, array, type.getValueType(), orderByList, Percentile.HALF, true); } case MODE: return getMode(session, data); + case JSON_ARRAYAGG: { + Value[] array = ((AggregateDataCollecting) data).getArray(); + if (array == null) { + return ValueNull.INSTANCE; + } + if (orderByList != null) { + sortWithOrderBy(array); + } + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + baos.write('['); + for (Value v : array) { + if (orderByList != null) { + v = ((ValueRow) v).getList()[0]; + } + JsonConstructorUtils.jsonArrayAppend(baos, v != ValueNull.INSTANCE ? v : ValueJson.NULL, flags); + } + baos.write(']'); + return ValueJson.getInternal(baos.toByteArray()); + } + case JSON_OBJECTAGG: { + Value[] array = ((AggregateDataCollecting) data).getArray(); + if (array == null) { + return ValueNull.INSTANCE; + } + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + baos.write('{'); + for (Value v : array) { + Value[] row = ((ValueRow) v).getList(); + String key = row[0].getString(); + if (key == null) { + throw DbException.getInvalidValueException("JSON_OBJECTAGG key", "NULL"); + } + Value value = row[1]; + if (value == ValueNull.INSTANCE) { + if ((flags & JsonConstructorUtils.JSON_ABSENT_ON_NULL) != 0) { + continue; + } + value = ValueJson.NULL; + } + JsonConstructorUtils.jsonObjectAppend(baos, key, value); + } + return JsonConstructorUtils.jsonObjectFinish(baos, flags); + } default: // Avoid compiler warning } - return data.getValue(session.getDatabase(), type.getValueType()); + return data.getValue(session); } - private Value getHypotheticalSet(Session session, AggregateData data) { + private static Value collect(SessionLocal session, AggregateDataCollecting c, AggregateData d) { + for (Value v : c) { + d.add(session, v); + } + return d.getValue(session); + } + + private Value getHypotheticalSet(SessionLocal session, AggregateData data) { AggregateDataCollecting collectingData = (AggregateDataCollecting) data; Value arg = collectingData.getSharedArgument(); if (arg == null) { switch (aggregateType) { case RANK: case DENSE_RANK: - return ValueInt.get(1); + return ValueInteger.get(1); case PERCENT_RANK: return ValueDouble.ZERO; case CUME_DIST: @@ -468,7 +717,7 @@ private Value getHypotheticalSet(Session session, AggregateData data) { throw DbException.getUnsupportedException("aggregateType=" + aggregateType); } } - collectingData.add(session.getDatabase(), arg); + collectingData.add(session, arg); Value[] array = collectingData.getArray(); Comparator sort = orderBySort.getRowValueComparator(); Arrays.sort(array, sort); @@ -494,13 +743,13 @@ private Value getRank(Value[] ordered, Value arg, Comparator sort) { int nm = number - 1; v = nm == 0 ? ValueDouble.ZERO : ValueDouble.get((double) nm / (size - 1)); } else { - v = ValueLong.get(number); + v = ValueBigint.get(number); } if (sort.compare(row, arg) == 0) { return v; } } - throw DbException.throwInternalError(); + throw DbException.getInternalError(); } private static Value getCumeDist(Value[] ordered, Value arg, Comparator sort) { @@ -519,63 +768,103 @@ private static Value getCumeDist(Value[] ordered, Value arg, Comparator s } start = end; } - throw DbException.throwInternalError(); + throw DbException.getInternalError(); } - private Value getListagg(Session session, AggregateData data) { + private Value getListagg(SessionLocal session, AggregateData data) { AggregateDataCollecting collectingData = (AggregateDataCollecting) data; Value[] array = collectingData.getArray(); if (array == null) { return ValueNull.INSTANCE; } + if (array.length == 1) { + Value v = array[0]; + if (orderByList != null) { + v = ((ValueRow) v).getList()[0]; + } + return v.convertTo(Value.VARCHAR, session); + } if (orderByList != null || distinct) { sortWithOrderBy(array); } - StringBuilder builder = new StringBuilder(); - String sep = args.length < 2 ? "," : collectingData.getSharedArgument().getString(); - for (int i = 0, length = array.length; i < length; i++) { - Value val = array[i]; - String s; - if (orderByList != null) { - s = ((ValueArray) val).getList()[0].getString(); - } else { - s = val.getString(); + ListaggArguments arguments = (ListaggArguments) extraArguments; + String separator = arguments.getEffectiveSeparator(); + return ValueVarchar + .get((arguments.getOnOverflowTruncate() + ? getListaggTruncate(array, separator, arguments.getEffectiveFilter(), + arguments.isWithoutCount()) + : getListaggError(array, separator)).toString(), session); + } + + private StringBuilder getListaggError(Value[] array, String separator) { + StringBuilder builder = new StringBuilder(getListaggItem(array[0])); + for (int i = 1, count = array.length; i < count; i++) { + builder.append(separator).append(getListaggItem(array[i])); + if (builder.length() > Constants.MAX_STRING_LENGTH) { + throw DbException.getValueTooLongException("CHARACTER VARYING", builder.substring(0, 81), -1L); } - if (sep != null && i > 0) { - builder.append(sep); + } + return builder; + } + + private StringBuilder getListaggTruncate(Value[] array, String separator, String filter, + boolean withoutCount) { + int count = array.length; + String[] strings = new String[count]; + String s = getListaggItem(array[0]); + strings[0] = s; + StringBuilder builder = new StringBuilder(s); + loop: for (int i = 1; i < count; i++) { + builder.append(separator).append(strings[i] = s = getListaggItem(array[i])); + int length = builder.length(); + if (length > Constants.MAX_STRING_LENGTH) { + for (; i > 0; i--) { + length -= strings[i].length(); + builder.setLength(length); + builder.append(filter); + if (!withoutCount) { + builder.append('(').append(count - i).append(')'); + } + if (builder.length() <= Constants.MAX_STRING_LENGTH) { + break loop; + } + length -= separator.length(); + } + builder.setLength(0); + builder.append(filter).append('(').append(count).append(')'); + break; } - builder.append(s); } - return ValueString.get(builder.toString()); + return builder; + } + + private String getListaggItem(Value v) { + if (orderByList != null) { + v = ((ValueRow) v).getList()[0]; + } + return v.getString(); } - private Value getHistogram(Session session, AggregateData data) { + private Value getHistogram(SessionLocal session, AggregateData data) { TreeMap distinctValues = ((AggregateDataDistinctWithCounts) data).getValues(); + TypeInfo rowType = (TypeInfo) type.getExtTypeInfo(); if (distinctValues == null) { - return ValueArray.getEmpty(); + return ValueArray.get(rowType, Value.EMPTY_VALUES, session); } - ValueArray[] values = new ValueArray[distinctValues.size()]; + ValueRow[] values = new ValueRow[distinctValues.size()]; int i = 0; for (Entry entry : distinctValues.entrySet()) { LongDataCounter d = entry.getValue(); - values[i] = ValueArray.get(new Value[] { entry.getKey(), ValueLong.get(distinct ? 1L : d.count) }); + values[i] = ValueRow.get(rowType, new Value[] { entry.getKey(), ValueBigint.get(d.count) }); i++; } Database db = session.getDatabase(); - final Mode mode = db.getMode(); - final CompareMode compareMode = db.getCompareMode(); - Arrays.sort(values, new Comparator() { - @Override - public int compare(ValueArray v1, ValueArray v2) { - Value a1 = v1.getList()[0]; - Value a2 = v2.getList()[0]; - return a1.compareTo(a2, mode, compareMode); - } - }); - return ValueArray.get(values); + CompareMode compareMode = db.getCompareMode(); + Arrays.sort(values, (v1, v2) -> v1.getList()[0].compareTo(v2.getList()[0], session, compareMode)); + return ValueArray.get(rowType, values, session); } - private Value getMode(Session session, AggregateData data) { + private Value getMode(SessionLocal session, AggregateData data) { Value v = ValueNull.INSTANCE; TreeMap distinctValues = ((AggregateDataDistinctWithCounts) data).getValues(); if (distinctValues == null) { @@ -591,7 +880,7 @@ private Value getMode(Session session, AggregateData data) { count = c; } else if (c == count) { Value v2 = entry.getKey(); - int cmp = session.getDatabase().compareTypeSafe(v, v2); + int cmp = session.compareTypeSafe(v, v2); if (desc) { if (cmp >= 0) { continue; @@ -611,13 +900,13 @@ private Value getMode(Session session, AggregateData data) { } } } - return v.convertTo(type.getValueType()); + return v; } @Override public void mapColumnsAnalysis(ColumnResolver resolver, int level, int innerState) { if (orderByList != null) { - for (SelectOrderBy o : orderByList) { + for (QueryOrderBy o : orderByList) { o.expression.mapColumns(resolver, level, innerState); } } @@ -625,63 +914,107 @@ public void mapColumnsAnalysis(ColumnResolver resolver, int level, int innerStat } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { super.optimize(session); if (args.length == 1) { type = args[0].getType(); } if (orderByList != null) { - for (SelectOrderBy o : orderByList) { - o.expression = o.expression.optimize(session); - } int offset; switch (aggregateType) { case ARRAY_AGG: case LISTAGG: + case JSON_ARRAYAGG: offset = 1; break; default: offset = 0; } - orderBySort = createOrder(session, orderByList, offset); + for (Iterator i = orderByList.iterator(); i.hasNext();) { + QueryOrderBy o = i.next(); + Expression e = o.expression.optimize(session); + if (offset != 0 && e.isConstant()) { + i.remove(); + } else { + o.expression = e; + } + } + if (orderByList.isEmpty()) { + orderByList = null; + } else { + orderBySort = createOrder(session, orderByList, offset); + } } switch (aggregateType) { case LISTAGG: - type = TypeInfo.TYPE_STRING; + type = TypeInfo.TYPE_VARCHAR; break; - case COUNT_ALL: case COUNT: - type = TypeInfo.TYPE_LONG; + if (args[0].isConstant()) { + if (args[0].getValue(session) == ValueNull.INSTANCE) { + return ValueExpression.get(ValueBigint.get(0L)); + } + if (!distinct) { + Aggregate aggregate = new Aggregate(AggregateType.COUNT_ALL, new Expression[0], select, false); + aggregate.setFilterCondition(filterCondition); + aggregate.setOverCondition(over); + return aggregate.optimize(session); + } + } + //$FALL-THROUGH$ + case COUNT_ALL: + case REGR_COUNT: + type = TypeInfo.TYPE_BIGINT; break; - case SELECTIVITY: - type = TypeInfo.TYPE_INT; + case HISTOGRAM: { + LinkedHashMap fields = new LinkedHashMap<>(4); + fields.put("VALUE", type); + fields.put("COUNT", TypeInfo.TYPE_BIGINT); + type = TypeInfo.getTypeInfo(Value.ARRAY, -1, 0, + TypeInfo.getTypeInfo(Value.ROW, -1, -1, new ExtTypeInfoRow(fields))); break; - case HISTOGRAM: - type = TypeInfo.TYPE_ARRAY; - break; - case SUM: { - int dataType = type.getValueType(); - if (dataType == Value.BOOLEAN) { - // example: sum(id > 3) (count the rows) - type = TypeInfo.TYPE_LONG; - } else if (!DataType.supportsAdd(dataType)) { - throw DbException.get(ErrorCode.SUM_OR_AVG_ON_WRONG_DATATYPE_1, getSQL(false)); - } else { - type = TypeInfo.getTypeInfo(DataType.getAddProofType(dataType)); + } + case SUM: + if ((type = getSumType(type)) == null) { + throw DbException.get(ErrorCode.SUM_OR_AVG_ON_WRONG_DATATYPE_1, getTraceSQL()); } break; - } case AVG: - if (!DataType.supportsAdd(type.getValueType())) { - throw DbException.get(ErrorCode.SUM_OR_AVG_ON_WRONG_DATATYPE_1, getSQL(false)); + if ((type = getAvgType(type)) == null) { + throw DbException.get(ErrorCode.SUM_OR_AVG_ON_WRONG_DATATYPE_1, getTraceSQL()); } break; case MIN: case MAX: break; + case STDDEV_POP: + case STDDEV_SAMP: + case VAR_POP: + case VAR_SAMP: + case COVAR_POP: + case COVAR_SAMP: + case CORR: + case REGR_SLOPE: + case REGR_INTERCEPT: + case REGR_R2: + case REGR_SXX: + case REGR_SYY: + case REGR_SXY: + type = TypeInfo.TYPE_DOUBLE; + break; + case REGR_AVGX: + if ((type = getAvgType(args[1].getType())) == null) { + throw DbException.get(ErrorCode.SUM_OR_AVG_ON_WRONG_DATATYPE_1, getTraceSQL()); + } + break; + case REGR_AVGY: + if ((type = getAvgType(args[0].getType())) == null) { + throw DbException.get(ErrorCode.SUM_OR_AVG_ON_WRONG_DATATYPE_1, getTraceSQL()); + } + break; case RANK: case DENSE_RANK: - type = TypeInfo.TYPE_LONG; + type = TypeInfo.TYPE_BIGINT; break; case PERCENT_RANK: case CUME_DIST: @@ -692,14 +1025,15 @@ public Expression optimize(Session session) { //$FALL-THROUGH$ case MEDIAN: switch (type.getValueType()) { - case Value.BYTE: - case Value.SHORT: - case Value.INT: - case Value.LONG: - case Value.DECIMAL: + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + case Value.BIGINT: + case Value.NUMERIC: + case Value.REAL: case Value.DOUBLE: - case Value.FLOAT: - type = TypeInfo.TYPE_DECIMAL_DEFAULT; + case Value.DECFLOAT: + type = TypeInfo.TYPE_NUMERIC_FLOATING_POINT; break; } break; @@ -707,165 +1041,207 @@ public Expression optimize(Session session) { case MODE: type = orderByList.get(0).expression.getType(); break; - case STDDEV_POP: - case STDDEV_SAMP: - case VAR_POP: - case VAR_SAMP: - type = TypeInfo.TYPE_DOUBLE; - break; case EVERY: case ANY: type = TypeInfo.TYPE_BOOLEAN; break; - case BIT_AND: - case BIT_OR: - if (!DataType.supportsAdd(type.getValueType())) { - throw DbException.get(ErrorCode.SUM_OR_AVG_ON_WRONG_DATATYPE_1, getSQL(false)); - } + case BIT_AND_AGG: + case BIT_OR_AGG: + case BIT_XOR_AGG: + case BIT_NAND_AGG: + case BIT_NOR_AGG: + case BIT_XNOR_AGG: + BitFunction.checkArgType(args[0]); break; case ARRAY_AGG: - type = TypeInfo.TYPE_ARRAY; + type = TypeInfo.getTypeInfo(Value.ARRAY, -1, 0, args[0].getType()); break; case ENVELOPE: type = TypeInfo.TYPE_GEOMETRY; break; + case JSON_OBJECTAGG: + case JSON_ARRAYAGG: + type = TypeInfo.TYPE_JSON; + break; default: - DbException.throwInternalError("type=" + aggregateType); + throw DbException.getInternalError("type=" + aggregateType); } return this; } + private static TypeInfo getSumType(TypeInfo type) { + int valueType = type.getValueType(); + switch (valueType) { + case Value.BOOLEAN: + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + return TypeInfo.TYPE_BIGINT; + case Value.BIGINT: + return TypeInfo.getTypeInfo(Value.NUMERIC, ValueBigint.DECIMAL_PRECISION + ADDITIONAL_SUM_PRECISION, -1, + null); + case Value.NUMERIC: + return TypeInfo.getTypeInfo(Value.NUMERIC, type.getPrecision() + ADDITIONAL_SUM_PRECISION, + type.getDeclaredScale(), null); + case Value.REAL: + return TypeInfo.TYPE_DOUBLE; + case Value.DOUBLE: + return TypeInfo.getTypeInfo(Value.DECFLOAT, ValueDouble.DECIMAL_PRECISION + ADDITIONAL_SUM_PRECISION, -1, + null); + case Value.DECFLOAT: + return TypeInfo.getTypeInfo(Value.DECFLOAT, type.getPrecision() + ADDITIONAL_SUM_PRECISION, -1, null); + default: + if (DataType.isIntervalType(valueType)) { + return TypeInfo.getTypeInfo(valueType, ValueInterval.MAXIMUM_PRECISION, type.getDeclaredScale(), null); + } + return null; + } + } + + private static TypeInfo getAvgType(TypeInfo type) { + switch (type.getValueType()) { + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + case Value.REAL: + return TypeInfo.TYPE_DOUBLE; + case Value.BIGINT: + return TypeInfo.getTypeInfo(Value.NUMERIC, ValueBigint.DECIMAL_PRECISION + ADDITIONAL_AVG_SCALE, + ADDITIONAL_AVG_SCALE, null); + case Value.NUMERIC: { + int additionalScale = Math.min(ValueNumeric.MAXIMUM_SCALE - type.getScale(), + Math.min(Constants.MAX_NUMERIC_PRECISION - (int) type.getPrecision(), ADDITIONAL_AVG_SCALE)); + return TypeInfo.getTypeInfo(Value.NUMERIC, type.getPrecision() + additionalScale, + type.getScale() + additionalScale, null); + } + case Value.DOUBLE: + return TypeInfo.getTypeInfo(Value.DECFLOAT, ValueDouble.DECIMAL_PRECISION + ADDITIONAL_AVG_SCALE, -1, // + null); + case Value.DECFLOAT: + return TypeInfo.getTypeInfo(Value.DECFLOAT, type.getPrecision() + ADDITIONAL_AVG_SCALE, -1, null); + case Value.INTERVAL_YEAR: + case Value.INTERVAL_YEAR_TO_MONTH: + return TypeInfo.getTypeInfo(Value.INTERVAL_YEAR_TO_MONTH, type.getDeclaredPrecision(), 0, null); + case Value.INTERVAL_MONTH: + return TypeInfo.getTypeInfo(Value.INTERVAL_MONTH, type.getDeclaredPrecision(), 0, null); + case Value.INTERVAL_DAY: + case Value.INTERVAL_DAY_TO_HOUR: + case Value.INTERVAL_DAY_TO_MINUTE: + case Value.INTERVAL_DAY_TO_SECOND: + return TypeInfo.getTypeInfo(Value.INTERVAL_DAY_TO_SECOND, type.getDeclaredPrecision(), + ValueInterval.MAXIMUM_SCALE, null); + case Value.INTERVAL_HOUR: + case Value.INTERVAL_HOUR_TO_MINUTE: + case Value.INTERVAL_HOUR_TO_SECOND: + return TypeInfo.getTypeInfo(Value.INTERVAL_HOUR_TO_SECOND, type.getDeclaredPrecision(), + ValueInterval.MAXIMUM_SCALE, null); + case Value.INTERVAL_MINUTE: + case Value.INTERVAL_MINUTE_TO_SECOND: + return TypeInfo.getTypeInfo(Value.INTERVAL_MINUTE_TO_SECOND, type.getDeclaredPrecision(), + ValueInterval.MAXIMUM_SCALE, null); + case Value.INTERVAL_SECOND: + return TypeInfo.getTypeInfo(Value.INTERVAL_SECOND, type.getDeclaredPrecision(), // + ValueInterval.MAXIMUM_SCALE, null); + default: + return null; + } + } + @Override public void setEvaluatable(TableFilter tableFilter, boolean b) { if (orderByList != null) { - for (SelectOrderBy o : orderByList) { + for (QueryOrderBy o : orderByList) { o.expression.setEvaluatable(tableFilter, b); } } super.setEvaluatable(tableFilter, b); } - private StringBuilder getSQLArrayAggregate(StringBuilder builder, boolean alwaysQuote) { - builder.append("ARRAY_AGG("); - if (distinct) { - builder.append("DISTINCT "); - } - args[0].getSQL(builder, alwaysQuote); - Window.appendOrderBy(builder, orderByList, alwaysQuote); - builder.append(')'); - return appendTailConditions(builder, alwaysQuote); - } - @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - String text; + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { switch (aggregateType) { case COUNT_ALL: - return appendTailConditions(builder.append("COUNT(*)"), alwaysQuote); - case COUNT: - text = "COUNT"; - break; - case SELECTIVITY: - text = "SELECTIVITY"; - break; - case HISTOGRAM: - text = "HISTOGRAM"; - break; - case SUM: - text = "SUM"; - break; - case MIN: - text = "MIN"; - break; - case MAX: - text = "MAX"; - break; - case AVG: - text = "AVG"; - break; - case STDDEV_POP: - text = "STDDEV_POP"; - break; - case STDDEV_SAMP: - text = "STDDEV_SAMP"; - break; - case VAR_POP: - text = "VAR_POP"; - break; - case VAR_SAMP: - text = "VAR_SAMP"; - break; - case EVERY: - text = "EVERY"; - break; - case ANY: - text = "ANY"; - break; - case BIT_AND: - text = "BIT_AND"; - break; - case BIT_OR: - text = "BIT_OR"; - break; - case RANK: - text = "RANK"; - break; - case DENSE_RANK: - text = "DENSE_RANK"; - break; - case PERCENT_RANK: - text = "PERCENT_RANK"; - break; - case CUME_DIST: - text = "CUME_DIST"; - break; - case PERCENTILE_CONT: - text = "PERCENTILE_CONT"; - break; - case PERCENTILE_DISC: - text = "PERCENTILE_DISC"; - break; - case MEDIAN: - text = "MEDIAN"; - break; + return appendTailConditions(builder.append("COUNT(*)"), sqlFlags, false); case LISTAGG: - text = "LISTAGG"; - break; + return getSQLListagg(builder, sqlFlags); case ARRAY_AGG: - return getSQLArrayAggregate(builder, alwaysQuote); - case MODE: - text = "MODE"; - break; - case ENVELOPE: - text = "ENVELOPE"; - break; + return getSQLArrayAggregate(builder, sqlFlags); + case JSON_OBJECTAGG: + return getSQLJsonObjectAggregate(builder, sqlFlags); + case JSON_ARRAYAGG: + return getSQLJsonArrayAggregate(builder, sqlFlags); default: - throw DbException.throwInternalError("type=" + aggregateType); } - builder.append(text); + builder.append(aggregateType.name()); if (distinct) { builder.append("(DISTINCT "); } else { builder.append('('); } - for (int i = 0; i < args.length; i++) { - if (i > 0) { - builder.append(", "); - } - Expression arg = args[i]; - if (arg instanceof Subquery) { - arg.getSQL(builder, alwaysQuote); - } else { - arg.getUnenclosedSQL(builder, alwaysQuote); - } - } - builder.append(')'); + writeExpressions(builder, args, sqlFlags).append(')'); if (orderByList != null) { builder.append(" WITHIN GROUP ("); - Window.appendOrderBy(builder, orderByList, alwaysQuote); + Window.appendOrderBy(builder, orderByList, sqlFlags, false); builder.append(')'); } - return appendTailConditions(builder, alwaysQuote); + return appendTailConditions(builder, sqlFlags, false); + } + + private StringBuilder getSQLArrayAggregate(StringBuilder builder, int sqlFlags) { + builder.append("ARRAY_AGG("); + if (distinct) { + builder.append("DISTINCT "); + } + args[0].getUnenclosedSQL(builder, sqlFlags); + Window.appendOrderBy(builder, orderByList, sqlFlags, false); + builder.append(')'); + return appendTailConditions(builder, sqlFlags, false); + } + + private StringBuilder getSQLListagg(StringBuilder builder, int sqlFlags) { + builder.append("LISTAGG("); + if (distinct) { + builder.append("DISTINCT "); + } + args[0].getUnenclosedSQL(builder, sqlFlags); + ListaggArguments arguments = (ListaggArguments) extraArguments; + String s = arguments.getSeparator(); + if (s != null) { + StringUtils.quoteStringSQL(builder.append(", "), s); + } + if (arguments.getOnOverflowTruncate()) { + builder.append(" ON OVERFLOW TRUNCATE "); + s = arguments.getFilter(); + if (s != null) { + StringUtils.quoteStringSQL(builder, s).append(' '); + } + builder.append(arguments.isWithoutCount() ? "WITHOUT" : "WITH").append(" COUNT"); + } + builder.append(')'); + builder.append(" WITHIN GROUP ("); + Window.appendOrderBy(builder, orderByList, sqlFlags, true); + builder.append(')'); + return appendTailConditions(builder, sqlFlags, false); + } + + private StringBuilder getSQLJsonObjectAggregate(StringBuilder builder, int sqlFlags) { + builder.append("JSON_OBJECTAGG("); + args[0].getUnenclosedSQL(builder, sqlFlags).append(": "); + args[1].getUnenclosedSQL(builder, sqlFlags); + JsonConstructorFunction.getJsonFunctionFlagsSQL(builder, flags, false).append(')'); + return appendTailConditions(builder, sqlFlags, false); + } + + private StringBuilder getSQLJsonArrayAggregate(StringBuilder builder, int sqlFlags) { + builder.append("JSON_ARRAYAGG("); + if (distinct) { + builder.append("DISTINCT "); + } + args[0].getUnenclosedSQL(builder, sqlFlags); + JsonConstructorFunction.getJsonFunctionFlagsSQL(builder, flags, true); + Window.appendOrderBy(builder, orderByList, sqlFlags, false); + builder.append(')'); + return appendTailConditions(builder, sqlFlags, false); } private Index getMinMaxColumnIndex() { @@ -893,24 +1269,24 @@ public boolean isEverything(ExpressionVisitor visitor) { if (visitor.getType() == ExpressionVisitor.OPTIMIZABLE_AGGREGATE) { switch (aggregateType) { case COUNT: - if (!distinct && args[0].getNullable() == Column.NOT_NULLABLE) { - return visitor.getTable().canGetRowCount(); + if (distinct || args[0].getNullable() != Column.NOT_NULLABLE) { + return false; } - return false; + //$FALL-THROUGH$ case COUNT_ALL: - return visitor.getTable().canGetRowCount(); + return visitor.getTable().canGetRowCount(select.getSession()); case MIN: case MAX: - Index index = getMinMaxColumnIndex(); - return index != null; + return getMinMaxColumnIndex() != null; case PERCENTILE_CONT: case PERCENTILE_DISC: - return args[0].isConstant() && Percentile.getColumnIndex(orderByList.get(0).expression) != null; + return args[0].isConstant() && Percentile.getColumnIndex(select.getSession().getDatabase(), + orderByList.get(0).expression) != null; case MEDIAN: if (distinct) { return false; } - return Percentile.getColumnIndex(args[0]) != null; + return Percentile.getColumnIndex(select.getSession().getDatabase(), args[0]) != null; case ENVELOPE: return AggregateDataEnvelope.getGeometryColumnIndex(args[0]) != null; default: @@ -923,7 +1299,7 @@ public boolean isEverything(ExpressionVisitor visitor) { } } if (orderByList != null) { - for (SelectOrderBy o : orderByList) { + for (QueryOrderBy o : orderByList) { if (!o.expression.isEverything(visitor)) { return false; } @@ -939,7 +1315,7 @@ public int getCost() { cost += arg.getCost(); } if (orderByList != null) { - for (SelectOrderBy o : orderByList) { + for (QueryOrderBy o : orderByList) { cost += o.expression.getCost(); } } @@ -949,4 +1325,21 @@ public int getCost() { return cost; } + /** + * Returns the select statement. + * @return the select statement + */ + public Select getSelect() { + return select; + } + + /** + * Returns if distinct is used. + * + * @return if distinct is used + */ + public boolean isDistinct() { + return distinct; + } + } diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateData.java b/h2/src/main/org/h2/expression/aggregate/AggregateData.java index e7d2ef82a6..97986b4838 100644 --- a/h2/src/main/org/h2/expression/aggregate/AggregateData.java +++ b/h2/src/main/org/h2/expression/aggregate/AggregateData.java @@ -1,13 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.aggregate; -import org.h2.engine.Constants; -import org.h2.engine.Database; -import org.h2.message.DbException; +import org.h2.engine.SessionLocal; import org.h2.value.Value; /** @@ -15,78 +13,20 @@ */ abstract class AggregateData { - /** - * Create an AggregateData object of the correct sub-type. - * - * @param aggregateType the type of the aggregate operation - * @param distinct if the calculation should be distinct - * @param dataType the data type of the computed result - * @return the aggregate data object of the specified type - */ - static AggregateData create(AggregateType aggregateType, boolean distinct, int dataType) { - switch (aggregateType) { - case COUNT_ALL: - return new AggregateDataCount(true); - case COUNT: - if (!distinct) { - return new AggregateDataCount(false); - } - break; - case LISTAGG: - case ARRAY_AGG: - case RANK: - case DENSE_RANK: - case PERCENT_RANK: - case CUME_DIST: - case PERCENTILE_CONT: - case PERCENTILE_DISC: - case MEDIAN: - break; - case MIN: - case MAX: - case BIT_OR: - case BIT_AND: - case ANY: - case EVERY: - return new AggregateDataDefault(aggregateType, dataType); - case SUM: - case AVG: - case STDDEV_POP: - case STDDEV_SAMP: - case VAR_POP: - case VAR_SAMP: - if (!distinct) { - return new AggregateDataDefault(aggregateType, dataType); - } - break; - case SELECTIVITY: - return new AggregateDataSelectivity(distinct); - case HISTOGRAM: - return new AggregateDataDistinctWithCounts(false, Constants.SELECTIVITY_DISTINCT_COUNT); - case MODE: - return new AggregateDataDistinctWithCounts(true, Integer.MAX_VALUE); - case ENVELOPE: - return new AggregateDataEnvelope(); - default: - throw DbException.throwInternalError("type=" + aggregateType); - } - return new AggregateDataCollecting(distinct); - } - /** * Add a value to this aggregate. * - * @param database the database + * @param session the session * @param v the value */ - abstract void add(Database database, Value v); + abstract void add(SessionLocal session, Value v); /** * Get the aggregate result. * - * @param database the database - * @param dataType the datatype of the computed result + * @param session the session * @return the value */ - abstract Value getValue(Database database, int dataType); + abstract Value getValue(SessionLocal session); + } diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataAvg.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataAvg.java new file mode 100644 index 0000000000..283ad625d8 --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataAvg.java @@ -0,0 +1,90 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.math.RoundingMode; + +import org.h2.api.IntervalQualifier; +import org.h2.engine.SessionLocal; +import org.h2.util.IntervalUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueDecfloat; +import org.h2.value.ValueDouble; +import org.h2.value.ValueInterval; +import org.h2.value.ValueNull; +import org.h2.value.ValueNumeric; + +/** + * Data stored while calculating an AVG aggregate. + */ +final class AggregateDataAvg extends AggregateData { + + private final TypeInfo dataType; + private long count; + private double doubleValue; + private BigDecimal decimalValue; + private BigInteger integerValue; + + /** + * @param dataType + * the data type of the computed result + */ + AggregateDataAvg(TypeInfo dataType) { + this.dataType = dataType; + } + + @Override + void add(SessionLocal session, Value v) { + if (v == ValueNull.INSTANCE) { + return; + } + count++; + switch (dataType.getValueType()) { + case Value.DOUBLE: + doubleValue += v.getDouble(); + break; + case Value.NUMERIC: + case Value.DECFLOAT: { + BigDecimal bd = v.getBigDecimal(); + decimalValue = decimalValue == null ? bd : decimalValue.add(bd); + break; + } + default: { + BigInteger bi = IntervalUtils.intervalToAbsolute((ValueInterval) v); + integerValue = integerValue == null ? bi : integerValue.add(bi); + } + } + } + + @Override + Value getValue(SessionLocal session) { + if (count == 0) { + return ValueNull.INSTANCE; + } + Value v; + int valueType = dataType.getValueType(); + switch (valueType) { + case Value.DOUBLE: + v = ValueDouble.get(doubleValue / count); + break; + case Value.NUMERIC: + v = ValueNumeric + .get(decimalValue.divide(BigDecimal.valueOf(count), dataType.getScale(), RoundingMode.HALF_DOWN)); + break; + case Value.DECFLOAT: + v = ValueDecfloat.divide(decimalValue, BigDecimal.valueOf(count), dataType); + break; + default: + v = IntervalUtils.intervalFromAbsolute(IntervalQualifier.valueOf(valueType - Value.INTERVAL_YEAR), + integerValue.divide(BigInteger.valueOf(count))); + } + return v.castTo(dataType, session); + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataBinarySet.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataBinarySet.java new file mode 100644 index 0000000000..fc788db76d --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataBinarySet.java @@ -0,0 +1,24 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.value.Value; + +/** + * Aggregate data of binary set functions. + */ +abstract class AggregateDataBinarySet extends AggregateData { + + abstract void add(SessionLocal session, Value yValue, Value xValue); + + @Override + final void add(SessionLocal session, Value v) { + throw DbException.getInternalError(); + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataCollecting.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataCollecting.java index d1ff066242..af1e267fcf 100644 --- a/h2/src/main/org/h2/expression/aggregate/AggregateDataCollecting.java +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataCollecting.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.aggregate; @@ -8,29 +8,60 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.Comparator; import java.util.Iterator; import java.util.TreeSet; import org.h2.api.ErrorCode; -import org.h2.engine.Database; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.value.Value; import org.h2.value.ValueNull; +import org.h2.value.ValueRow; /** * Data stored while calculating an aggregate that needs collecting of all * values or a distinct aggregate. * *

          - * NULL values are not collected. {@link #getValue(Database, int)} - * method returns {@code null}. Use {@link #getArray()} for instances of this - * class instead. + * NULL values are not collected. {@link #getValue(SessionLocal)} method + * returns {@code null}. Use {@link #getArray()} for instances of this class + * instead. *

          */ -class AggregateDataCollecting extends AggregateData implements Iterable { +final class AggregateDataCollecting extends AggregateData implements Iterable { + + /** + * NULL values collection mode. + */ + enum NullCollectionMode { + + /** + * Rows with NULL value are completely ignored. + */ + IGNORED, + + /** + * Rows with NULL values are processed causing the result to be not + * NULL, but NULL values aren't collected. + */ + EXCLUDED, + + /** + * Rows with NULL values are aggregated just like rows with any other + * values, should also be used when NULL values aren't passed to + * {@linkplain AggregateDataCollecting}. + */ + USED_OR_IMPOSSIBLE; + + } private final boolean distinct; + private final boolean orderedWithOrder; + + private final NullCollectionMode nullCollectionMode; + Collection values; private Value shared; @@ -38,26 +69,49 @@ class AggregateDataCollecting extends AggregateData implements Iterable { /** * Creates new instance of data for collecting aggregates. * - * @param distinct if distinct is used + * @param distinct + * if distinct is used + * @param orderedWithOrder + * if aggregate is an ordered aggregate with ORDER BY clause + * @param nullCollectionMode + * NULL values collection mode */ - AggregateDataCollecting(boolean distinct) { + AggregateDataCollecting(boolean distinct, boolean orderedWithOrder, NullCollectionMode nullCollectionMode) { this.distinct = distinct; + this.orderedWithOrder = orderedWithOrder; + this.nullCollectionMode = nullCollectionMode; } @Override - void add(Database database, Value v) { - if (v == ValueNull.INSTANCE) { + void add(SessionLocal session, Value v) { + if (nullCollectionMode == NullCollectionMode.IGNORED && isNull(v)) { return; } Collection c = values; if (c == null) { - values = c = distinct ? new TreeSet<>(database.getCompareMode()) : new ArrayList(); + if (distinct) { + Comparator comparator = session.getDatabase().getCompareMode(); + if (orderedWithOrder) { + comparator = Comparator.comparing(t -> ((ValueRow) t).getList()[0], comparator); + } + c = new TreeSet<>(comparator); + } else { + c = new ArrayList<>(); + } + values = c; + } + if (nullCollectionMode == NullCollectionMode.EXCLUDED && isNull(v)) { + return; } c.add(v); } + private boolean isNull(Value v) { + return (orderedWithOrder ? ((ValueRow) v).getList()[0] : v) == ValueNull.INSTANCE; + } + @Override - Value getValue(Database database, int dataType) { + Value getValue(SessionLocal session) { return null; } @@ -80,12 +134,12 @@ Value[] getArray() { if (values == null) { return null; } - return values.toArray(new Value[0]); + return values.toArray(Value.EMPTY_VALUES); } @Override public Iterator iterator() { - return values != null ? values.iterator() : Collections.emptyIterator(); + return values != null ? values.iterator() : Collections.emptyIterator(); } /** diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataCorr.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataCorr.java new file mode 100644 index 0000000000..28b6160b6f --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataCorr.java @@ -0,0 +1,96 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.value.Value; +import org.h2.value.ValueDouble; +import org.h2.value.ValueNull; + +/** + * Data stored while calculating a CORR, REG_SLOPE, REG_INTERCEPT, or REGR_R2 + * aggregate. + */ +final class AggregateDataCorr extends AggregateDataBinarySet { + + private final AggregateType aggregateType; + + private long count; + + private double sumY, sumX, sumYX; + + private double m2y, meanY; + + private double m2x, meanX; + + AggregateDataCorr(AggregateType aggregateType) { + this.aggregateType = aggregateType; + } + + @Override + void add(SessionLocal session, Value yValue, Value xValue) { + double y = yValue.getDouble(), x = xValue.getDouble(); + sumY += y; + sumX += x; + sumYX += y * x; + if (++count == 1) { + meanY = y; + meanX = x; + m2x = m2y = 0; + } else { + double delta = y - meanY; + meanY += delta / count; + m2y += delta * (y - meanY); + delta = x - meanX; + meanX += delta / count; + m2x += delta * (x - meanX); + } + } + + @Override + Value getValue(SessionLocal session) { + if (count < 1) { + return ValueNull.INSTANCE; + } + double v; + switch (aggregateType) { + case CORR: + if (m2y == 0 || m2x == 0) { + return ValueNull.INSTANCE; + } + v = (sumYX - sumX * sumY / count) / Math.sqrt(m2y * m2x); + break; + case REGR_SLOPE: + if (m2x == 0) { + return ValueNull.INSTANCE; + } + v = (sumYX - sumX * sumY / count) / m2x; + break; + case REGR_INTERCEPT: + if (m2x == 0) { + return ValueNull.INSTANCE; + } + v = meanY - (sumYX - sumX * sumY / count) / m2x * meanX; + break; + case REGR_R2: { + if (m2x == 0) { + return ValueNull.INSTANCE; + } + if (m2y == 0) { + return ValueDouble.ONE; + } + v = sumYX - sumX * sumY / count; + v = v * v / (m2y * m2x); + break; + } + default: + throw DbException.getInternalError("type=" + aggregateType); + } + return ValueDouble.get(v); + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataCount.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataCount.java index f3046edf31..b0841b1551 100644 --- a/h2/src/main/org/h2/expression/aggregate/AggregateDataCount.java +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataCount.java @@ -1,19 +1,19 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.aggregate; -import org.h2.engine.Database; +import org.h2.engine.SessionLocal; import org.h2.value.Value; -import org.h2.value.ValueLong; +import org.h2.value.ValueBigint; import org.h2.value.ValueNull; /** * Data stored while calculating a COUNT aggregate. */ -class AggregateDataCount extends AggregateData { +final class AggregateDataCount extends AggregateData { private final boolean all; @@ -24,15 +24,15 @@ class AggregateDataCount extends AggregateData { } @Override - void add(Database database, Value v) { + void add(SessionLocal session, Value v) { if (all || v != ValueNull.INSTANCE) { count++; } } @Override - Value getValue(Database database, int dataType) { - return ValueLong.get(count).convertTo(dataType); + Value getValue(SessionLocal session) { + return ValueBigint.get(count); } } diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataCovar.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataCovar.java new file mode 100644 index 0000000000..acd0031054 --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataCovar.java @@ -0,0 +1,70 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.value.Value; +import org.h2.value.ValueDouble; +import org.h2.value.ValueNull; + +/** + * Data stored while calculating a COVAR_POP, COVAR_SAMP, or REGR_SXY aggregate. + */ +final class AggregateDataCovar extends AggregateDataBinarySet { + + private final AggregateType aggregateType; + + private long count; + + private double sumY, sumX, sumYX; + + /** + * @param aggregateType + * the type of the aggregate operation + */ + AggregateDataCovar(AggregateType aggregateType) { + this.aggregateType = aggregateType; + } + + @Override + void add(SessionLocal session, Value yValue, Value xValue) { + double y = yValue.getDouble(), x = xValue.getDouble(); + sumY += y; + sumX += x; + sumYX += y * x; + count++; + } + + @Override + Value getValue(SessionLocal session) { + double v; + switch (aggregateType) { + case COVAR_POP: + if (count < 1) { + return ValueNull.INSTANCE; + } + v = (sumYX - sumX * sumY / count) / count; + break; + case COVAR_SAMP: + if (count < 2) { + return ValueNull.INSTANCE; + } + v = (sumYX - sumX * sumY / count) / (count - 1); + break; + case REGR_SXY: + if (count < 1) { + return ValueNull.INSTANCE; + } + v = sumYX - sumX * sumY / count; + break; + default: + throw DbException.getInternalError("type=" + aggregateType); + } + return ValueDouble.get(v); + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataDefault.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataDefault.java index 870d84678a..0ff71f2270 100644 --- a/h2/src/main/org/h2/expression/aggregate/AggregateDataDefault.java +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataDefault.java @@ -1,92 +1,62 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.aggregate; -import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.expression.function.BitFunction; import org.h2.message.DbException; -import org.h2.value.DataType; +import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueBoolean; -import org.h2.value.ValueDouble; -import org.h2.value.ValueLong; import org.h2.value.ValueNull; /** * Data stored while calculating an aggregate. */ -class AggregateDataDefault extends AggregateData { +final class AggregateDataDefault extends AggregateData { private final AggregateType aggregateType; - private final int dataType; - private long count; + private final TypeInfo dataType; private Value value; - private double m2, mean; /** * @param aggregateType the type of the aggregate operation * @param dataType the data type of the computed result */ - AggregateDataDefault(AggregateType aggregateType, int dataType) { + AggregateDataDefault(AggregateType aggregateType, TypeInfo dataType) { this.aggregateType = aggregateType; this.dataType = dataType; } @Override - void add(Database database, Value v) { + void add(SessionLocal session, Value v) { if (v == ValueNull.INSTANCE) { return; } - count++; switch (aggregateType) { case SUM: if (value == null) { - value = v.convertTo(dataType); - } else { - v = v.convertTo(value.getValueType()); - value = value.add(v); - } - break; - case AVG: - if (value == null) { - value = v.convertTo(DataType.getAddProofType(dataType)); + value = v.convertTo(dataType.getValueType()); } else { v = v.convertTo(value.getValueType()); value = value.add(v); } break; case MIN: - if (value == null || database.compare(v, value) < 0) { + if (value == null || session.compare(v, value) < 0) { value = v; } break; case MAX: - if (value == null || database.compare(v, value) > 0) { + if (value == null || session.compare(v, value) > 0) { value = v; } break; - case STDDEV_POP: - case STDDEV_SAMP: - case VAR_POP: - case VAR_SAMP: { - // Using Welford's method, see also - // http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance - // http://www.johndcook.com/standard_deviation.html - double x = v.getDouble(); - if (count == 1) { - mean = x; - m2 = 0; - } else { - double delta = x - mean; - mean += delta / count; - m2 += delta * (x - mean); - } - break; - } case EVERY: - v = v.convertTo(Value.BOOLEAN); + v = v.convertToBoolean(); if (value == null) { value = v; } else { @@ -94,92 +64,56 @@ void add(Database database, Value v) { } break; case ANY: - v = v.convertTo(Value.BOOLEAN); + v = v.convertToBoolean(); if (value == null) { value = v; } else { value = ValueBoolean.get(value.getBoolean() || v.getBoolean()); } break; - case BIT_AND: + case BIT_AND_AGG: + case BIT_NAND_AGG: if (value == null) { - value = v.convertTo(dataType); + value = v; } else { - value = ValueLong.get(value.getLong() & v.getLong()).convertTo(dataType); + value = BitFunction.getBitwise(BitFunction.BITAND, dataType, value, v); } break; - case BIT_OR: + case BIT_OR_AGG: + case BIT_NOR_AGG: if (value == null) { - value = v.convertTo(dataType); + value = v; } else { - value = ValueLong.get(value.getLong() | v.getLong()).convertTo(dataType); - } - break; - default: - DbException.throwInternalError("type=" + aggregateType); - } - } - - @Override - Value getValue(Database database, int dataType) { - Value v = null; - switch (aggregateType) { - case SUM: - case MIN: - case MAX: - case BIT_OR: - case BIT_AND: - case ANY: - case EVERY: - v = value; - break; - case AVG: - if (value != null) { - v = divide(value, count); + value = BitFunction.getBitwise(BitFunction.BITOR, dataType, value, v); } break; - case STDDEV_POP: { - if (count < 1) { - return ValueNull.INSTANCE; - } - v = ValueDouble.get(Math.sqrt(m2 / count)); - break; - } - case STDDEV_SAMP: { - if (count < 2) { - return ValueNull.INSTANCE; - } - v = ValueDouble.get(Math.sqrt(m2 / (count - 1))); - break; - } - case VAR_POP: { - if (count < 1) { - return ValueNull.INSTANCE; - } - v = ValueDouble.get(m2 / count); - break; - } - case VAR_SAMP: { - if (count < 2) { - return ValueNull.INSTANCE; + case BIT_XOR_AGG: + case BIT_XNOR_AGG: + if (value == null) { + value = v; + } else { + value = BitFunction.getBitwise(BitFunction.BITXOR, dataType, value, v); } - v = ValueDouble.get(m2 / (count - 1)); break; - } default: - DbException.throwInternalError("type=" + aggregateType); + throw DbException.getInternalError("type=" + aggregateType); } - return v == null ? ValueNull.INSTANCE : v.convertTo(dataType); } - private static Value divide(Value a, long by) { - if (by == 0) { + @SuppressWarnings("incomplete-switch") + @Override + Value getValue(SessionLocal session) { + Value v = value; + if (v == null) { return ValueNull.INSTANCE; } - int type = Value.getHigherOrder(a.getValueType(), Value.LONG); - Value b = ValueLong.get(by).convertTo(type); - a = a.convertTo(type).divide(b); - return a; + switch (aggregateType) { + case BIT_NAND_AGG: + case BIT_NOR_AGG: + case BIT_XNOR_AGG: + v = BitFunction.getBitwise(BitFunction.BITNOT, dataType, v, null); + } + return v.convertTo(dataType); } } diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataDistinctWithCounts.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataDistinctWithCounts.java index 1384549ef0..60bd31ef3f 100644 --- a/h2/src/main/org/h2/expression/aggregate/AggregateDataDistinctWithCounts.java +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataDistinctWithCounts.java @@ -1,12 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.aggregate; import java.util.TreeMap; -import org.h2.engine.Database; +import org.h2.engine.SessionLocal; import org.h2.value.Value; import org.h2.value.ValueNull; @@ -14,7 +14,7 @@ * Data stored while calculating an aggregate that needs distinct values with * their counts. */ -class AggregateDataDistinctWithCounts extends AggregateData { +final class AggregateDataDistinctWithCounts extends AggregateData { private final boolean ignoreNulls; @@ -37,12 +37,12 @@ class AggregateDataDistinctWithCounts extends AggregateData { } @Override - void add(Database database, Value v) { + void add(SessionLocal session, Value v) { if (ignoreNulls && v == ValueNull.INSTANCE) { return; } if (values == null) { - values = new TreeMap<>(database.getCompareMode()); + values = new TreeMap<>(session.getDatabase().getCompareMode()); } LongDataCounter a = values.get(v); if (a == null) { @@ -56,7 +56,7 @@ void add(Database database, Value v) { } @Override - Value getValue(Database database, int dataType) { + Value getValue(SessionLocal session) { return null; } diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataEnvelope.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataEnvelope.java index 4f5ef2fdf4..a2215249d7 100644 --- a/h2/src/main/org/h2/expression/aggregate/AggregateDataEnvelope.java +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataEnvelope.java @@ -1,13 +1,13 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.aggregate; import java.util.ArrayList; -import org.h2.engine.Database; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.index.Index; @@ -22,7 +22,7 @@ /** * Data stored while calculating an aggregate. */ -class AggregateDataEnvelope extends AggregateData { +final class AggregateDataEnvelope extends AggregateData { private double[] envelope; @@ -57,15 +57,15 @@ static Index getGeometryColumnIndex(Expression on) { } @Override - void add(Database database, Value v) { + void add(SessionLocal session, Value v) { if (v == ValueNull.INSTANCE) { return; } - envelope = GeometryUtils.union(envelope, ((ValueGeometry) v.convertTo(Value.GEOMETRY)).getEnvelopeNoCopy()); + envelope = GeometryUtils.union(envelope, v.convertToGeometry(null).getEnvelopeNoCopy()); } @Override - Value getValue(Database database, int dataType) { + Value getValue(SessionLocal session) { return ValueGeometry.fromEnvelope(envelope); } diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataSelectivity.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataSelectivity.java deleted file mode 100644 index e0a21e17bc..0000000000 --- a/h2/src/main/org/h2/expression/aggregate/AggregateDataSelectivity.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression.aggregate; - -import org.h2.engine.Constants; -import org.h2.engine.Database; -import org.h2.util.IntIntHashMap; -import org.h2.value.Value; -import org.h2.value.ValueInt; - -/** - * Data stored while calculating a SELECTIVITY aggregate. - */ -class AggregateDataSelectivity extends AggregateData { - - private final boolean distinct; - - private long count; - private IntIntHashMap distinctHashes; - private double m2; - - /** - * Creates new instance of data for SELECTIVITY aggregate. - * - * @param distinct if distinct is used - */ - AggregateDataSelectivity(boolean distinct) { - this.distinct = distinct; - } - - @Override - void add(Database database, Value v) { - count++; - if (distinctHashes == null) { - distinctHashes = new IntIntHashMap(); - } - int size = distinctHashes.size(); - if (size > Constants.SELECTIVITY_DISTINCT_COUNT) { - distinctHashes = new IntIntHashMap(); - m2 += size; - } - int hash = v.hashCode(); - // the value -1 is not supported - distinctHashes.put(hash, 1); - } - - @Override - Value getValue(Database database, int dataType) { - if (distinct) { - count = 0; - } - Value v = null; - int s = 0; - if (count == 0) { - s = 0; - } else { - m2 += distinctHashes.size(); - m2 = 100 * m2 / count; - s = (int) m2; - s = s <= 0 ? 1 : s > 100 ? 100 : s; - } - v = ValueInt.get(s); - return v.convertTo(dataType); - } - -} diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataStdVar.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataStdVar.java new file mode 100644 index 0000000000..2c64503025 --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataStdVar.java @@ -0,0 +1,90 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.value.Value; +import org.h2.value.ValueDouble; +import org.h2.value.ValueNull; + +/** + * Data stored while calculating a STDDEV_POP, STDDEV_SAMP, VAR_SAMP, VAR_POP, + * REGR_SXX, or REGR_SYY aggregate. + */ +final class AggregateDataStdVar extends AggregateData { + + private final AggregateType aggregateType; + + private long count; + + private double m2, mean; + + /** + * @param aggregateType + * the type of the aggregate operation + */ + AggregateDataStdVar(AggregateType aggregateType) { + this.aggregateType = aggregateType; + } + + @Override + void add(SessionLocal session, Value v) { + if (v == ValueNull.INSTANCE) { + return; + } + // Using Welford's method, see also + // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance + // https://www.johndcook.com/standard_deviation.html + double x = v.getDouble(); + if (++count == 1) { + mean = x; + m2 = 0; + } else { + double delta = x - mean; + mean += delta / count; + m2 += delta * (x - mean); + } + } + + @Override + Value getValue(SessionLocal session) { + double v; + switch (aggregateType) { + case STDDEV_SAMP: + case VAR_SAMP: + if (count < 2) { + return ValueNull.INSTANCE; + } + v = m2 / (count - 1); + if (aggregateType == AggregateType.STDDEV_SAMP) { + v = Math.sqrt(v); + } + break; + case STDDEV_POP: + case VAR_POP: + if (count < 1) { + return ValueNull.INSTANCE; + } + v = m2 / count; + if (aggregateType == AggregateType.STDDEV_POP) { + v = Math.sqrt(v); + } + break; + case REGR_SXX: + case REGR_SYY: + if (count < 1) { + return ValueNull.INSTANCE; + } + v = m2; + break; + default: + throw DbException.getInternalError("type=" + aggregateType); + } + return ValueDouble.get(v); + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateType.java b/h2/src/main/org/h2/expression/aggregate/AggregateType.java index b72e9701a0..23df562bf1 100644 --- a/h2/src/main/org/h2/expression/aggregate/AggregateType.java +++ b/h2/src/main/org/h2/expression/aggregate/AggregateType.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.aggregate; @@ -71,25 +71,100 @@ public enum AggregateType { EVERY, /** - * The aggregate type for BOOL_OR(expression). + * The aggregate type for BIT_AND_AGG(expression). */ - BIT_OR, + BIT_AND_AGG, /** - * The aggregate type for BOOL_AND(expression). + * The aggregate type for BIT_OR_AGG(expression). */ - BIT_AND, + BIT_OR_AGG, /** - * The aggregate type for SELECTIVITY(expression). + * The aggregate type for BIT_XOR_AGG(expression). */ - SELECTIVITY, + BIT_XOR_AGG, + + /** + * The aggregate type for BIT_NAND_AGG(expression). + */ + BIT_NAND_AGG, + + /** + * The aggregate type for BIT_NOR_AGG(expression). + */ + BIT_NOR_AGG, + + /** + * The aggregate type for BIT_XNOR_AGG(expression). + */ + BIT_XNOR_AGG, /** * The aggregate type for HISTOGRAM(expression). */ HISTOGRAM, + /** + * The aggregate type for COVAR_POP binary set function. + */ + COVAR_POP, + + /** + * The aggregate type for COVAR_SAMP binary set function. + */ + COVAR_SAMP, + + /** + * The aggregate type for CORR binary set function. + */ + CORR, + + /** + * The aggregate type for REGR_SLOPE binary set function. + */ + REGR_SLOPE, + + /** + * The aggregate type for REGR_INTERCEPT binary set function. + */ + REGR_INTERCEPT, + + /** + * The aggregate type for REGR_COUNT binary set function. + */ + REGR_COUNT, + + /** + * The aggregate type for REGR_R2 binary set function. + */ + REGR_R2, + + /** + * The aggregate type for REGR_AVGX binary set function. + */ + REGR_AVGX, + + /** + * The aggregate type for REGR_AVGY binary set function. + */ + REGR_AVGY, + + /** + * The aggregate type for REGR_SXX binary set function. + */ + REGR_SXX, + + /** + * The aggregate type for REGR_SYY binary set function. + */ + REGR_SYY, + + /** + * The aggregate type for REGR_SXY binary set function. + */ + REGR_SXY, + /** * The type for RANK() hypothetical set function. */ @@ -145,4 +220,14 @@ public enum AggregateType { */ ENVELOPE, + /** + * The aggregate type for JSON_OBJECTAGG(expression: expression). + */ + JSON_OBJECTAGG, + + /** + * The aggregate type for JSON_ARRAYAGG(expression). + */ + JSON_ARRAYAGG, + } diff --git a/h2/src/main/org/h2/expression/aggregate/JavaAggregate.java b/h2/src/main/org/h2/expression/aggregate/JavaAggregate.java index cea82b63d7..d4ce36570c 100644 --- a/h2/src/main/org/h2/expression/aggregate/JavaAggregate.java +++ b/h2/src/main/org/h2/expression/aggregate/JavaAggregate.java @@ -1,26 +1,27 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.aggregate; -import java.sql.Connection; import java.sql.SQLException; import org.h2.api.Aggregate; -import org.h2.command.Parser; -import org.h2.command.dml.Select; -import org.h2.engine.Session; -import org.h2.engine.UserAggregate; +import org.h2.command.query.Select; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionVisitor; +import org.h2.expression.aggregate.AggregateDataCollecting.NullCollectionMode; +import org.h2.jdbc.JdbcConnection; import org.h2.message.DbException; -import org.h2.value.DataType; +import org.h2.schema.UserAggregate; +import org.h2.util.ParserUtil; import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueBoolean; import org.h2.value.ValueNull; import org.h2.value.ValueRow; +import org.h2.value.ValueToObjectConverter; /** * This class wraps a user-defined aggregate. @@ -30,7 +31,7 @@ public class JavaAggregate extends AbstractAggregate { private final UserAggregate userAggregate; private int[] argTypes; private int dataType; - private Connection userConnection; + private JdbcConnection userConnection; public JavaAggregate(UserAggregate userAggregate, Expression[] args, Select select, boolean distinct) { super(select, args, distinct); @@ -50,11 +51,10 @@ public int getCost() { } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - Parser.quoteIdentifier(builder, userAggregate.getName(), alwaysQuote).append('('); - writeExpressions(builder, args, alwaysQuote); - builder.append(')'); - return appendTailConditions(builder, alwaysQuote); + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + ParserUtil.quoteIdentifier(builder, userAggregate.getName(), sqlFlags).append('('); + writeExpressions(builder, args, sqlFlags).append(')'); + return appendTailConditions(builder, sqlFlags, false); } @Override @@ -83,7 +83,7 @@ public boolean isEverything(ExpressionVisitor visitor) { } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { super.optimize(session); userConnection = session.createConnection(false); int len = args.length; @@ -113,7 +113,7 @@ private Aggregate getInstance() { } @Override - public Value getAggregatedValue(Session session, Object aggregateData) { + public Value getAggregatedValue(SessionLocal session, Object aggregateData) { try { Aggregate agg; if (distinct) { @@ -122,12 +122,13 @@ public Value getAggregatedValue(Session session, Object aggregateData) { if (data != null) { for (Value value : data.values) { if (args.length == 1) { - agg.add(value.getObject()); + agg.add(ValueToObjectConverter.valueToDefaultObject(value, userConnection, false)); } else { Value[] values = ((ValueRow) value).getList(); Object[] argValues = new Object[args.length]; for (int i = 0, len = args.length; i < len; i++) { - argValues[i] = values[i].getObject(); + argValues[i] = ValueToObjectConverter.valueToDefaultObject(values[i], userConnection, + false); } agg.add(argValues); } @@ -143,18 +144,18 @@ public Value getAggregatedValue(Session session, Object aggregateData) { if (obj == null) { return ValueNull.INSTANCE; } - return DataType.convertToValue(session, obj, dataType); + return ValueToObjectConverter.objectToValue(session, obj, dataType); } catch (SQLException e) { throw DbException.convert(e); } } @Override - protected void updateAggregate(Session session, Object aggregateData) { + protected void updateAggregate(SessionLocal session, Object aggregateData) { updateData(session, aggregateData, null); } - private void updateData(Session session, Object aggregateData, Value[] remembered) { + private void updateData(SessionLocal session, Object aggregateData, Value[] remembered) { try { if (distinct) { AggregateDataCollecting data = (AggregateDataCollecting) aggregateData; @@ -162,18 +163,16 @@ private void updateData(Session session, Object aggregateData, Value[] remembere Value arg = null; for (int i = 0, len = args.length; i < len; i++) { arg = remembered == null ? args[i].getValue(session) : remembered[i]; - arg = arg.convertTo(argTypes[i]); argValues[i] = arg; } - data.add(session.getDatabase(), args.length == 1 ? arg : ValueRow.get(argValues)); + data.add(session, args.length == 1 ? arg : ValueRow.get(argValues)); } else { Aggregate agg = (Aggregate) aggregateData; Object[] argValues = new Object[args.length]; Object arg = null; for (int i = 0, len = args.length; i < len; i++) { Value v = remembered == null ? args[i].getValue(session) : remembered[i]; - v = v.convertTo(argTypes[i]); - arg = v.getObject(); + arg = ValueToObjectConverter.valueToDefaultObject(v, userConnection, false); argValues[i] = arg; } agg.add(args.length == 1 ? arg : argValues); @@ -184,7 +183,7 @@ private void updateData(Session session, Object aggregateData, Value[] remembere } @Override - protected void updateGroupAggregates(Session session, int stage) { + protected void updateGroupAggregates(SessionLocal session, int stage) { super.updateGroupAggregates(session, stage); for (Expression expr : args) { expr.updateAggregate(session, stage); @@ -201,7 +200,7 @@ protected int getNumExpressions() { } @Override - protected void rememberExpressions(Session session, Value[] array) { + protected void rememberExpressions(SessionLocal session, Value[] array) { int length = args.length; for (int i = 0; i < length; i++) { array[i] = args[i].getValue(session); @@ -212,15 +211,15 @@ protected void rememberExpressions(Session session, Value[] array) { } @Override - protected void updateFromExpressions(Session session, Object aggregateData, Value[] array) { - if (filterCondition == null || array[getNumExpressions() - 1].getBoolean()) { + protected void updateFromExpressions(SessionLocal session, Object aggregateData, Value[] array) { + if (filterCondition == null || array[getNumExpressions() - 1].isTrue()) { updateData(session, aggregateData, array); } } @Override protected Object createAggregateData() { - return distinct ? new AggregateDataCollecting(true) : getInstance(); + return distinct ? new AggregateDataCollecting(true, false, NullCollectionMode.IGNORED) : getInstance(); } } diff --git a/h2/src/main/org/h2/expression/aggregate/ListaggArguments.java b/h2/src/main/org/h2/expression/aggregate/ListaggArguments.java new file mode 100644 index 0000000000..ee134f7a8c --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/ListaggArguments.java @@ -0,0 +1,126 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +/** + * Additional arguments of LISTAGG aggregate function. + */ +public final class ListaggArguments { + + private String separator; + + private boolean onOverflowTruncate; + + private String filter; + + private boolean withoutCount; + + /** + * Creates a new instance of additional arguments of LISTAGG aggregate + * function. + */ + public ListaggArguments() { + } + + /** + * Sets the custom LISTAGG separator. + * + * @param separator + * the LISTAGG separator, {@code null} or empty string means no + * separator + */ + public void setSeparator(String separator) { + this.separator = separator != null ? separator : ""; + } + + /** + * Returns the LISTAGG separator. + * + * @return the LISTAGG separator, {@code null} means the default + */ + public String getSeparator() { + return separator; + } + + /** + * Returns the effective LISTAGG separator. + * + * @return the effective LISTAGG separator + */ + public String getEffectiveSeparator() { + return separator != null ? separator : ","; + } + + /** + * Sets the LISTAGG overflow behavior. + * + * @param onOverflowTruncate + * {@code true} for ON OVERFLOW TRUNCATE, {@code false} for ON + * OVERFLOW ERROR + */ + public void setOnOverflowTruncate(boolean onOverflowTruncate) { + this.onOverflowTruncate = onOverflowTruncate; + } + + /** + * Returns the LISTAGG overflow behavior. + * + * @return {@code true} for ON OVERFLOW TRUNCATE, {@code false} for ON + * OVERFLOW ERROR + */ + public boolean getOnOverflowTruncate() { + return onOverflowTruncate; + } + + /** + * Sets the custom LISTAGG truncation filter. + * + * @param filter + * the LISTAGG truncation filter, {@code null} or empty string + * means no truncation filter + */ + public void setFilter(String filter) { + this.filter = filter != null ? filter : ""; + } + + /** + * Returns the LISTAGG truncation filter. + * + * @return the LISTAGG truncation filter, {@code null} means the default + */ + public String getFilter() { + return filter; + } + + /** + * Returns the effective LISTAGG truncation filter. + * + * @return the effective LISTAGG truncation filter + */ + public String getEffectiveFilter() { + return filter != null ? filter : "..."; + } + + /** + * Sets the LISTAGG count indication. + * + * @param withoutCount + * {@code true} for WITHOUT COUNT, {@code false} for WITH COUNT + */ + public void setWithoutCount(boolean withoutCount) { + this.withoutCount = withoutCount; + } + + /** + * Returns the LISTAGG count indication. + * + * @return {@code true} for WITHOUT COUNT, {@code false} for WITH COUNT + */ + public boolean isWithoutCount() { + return withoutCount; + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/LongDataCounter.java b/h2/src/main/org/h2/expression/aggregate/LongDataCounter.java index 369d3591a2..2bd5086f19 100644 --- a/h2/src/main/org/h2/expression/aggregate/LongDataCounter.java +++ b/h2/src/main/org/h2/expression/aggregate/LongDataCounter.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.aggregate; diff --git a/h2/src/main/org/h2/expression/aggregate/Percentile.java b/h2/src/main/org/h2/expression/aggregate/Percentile.java index 379e341ca7..39bae3ca73 100644 --- a/h2/src/main/org/h2/expression/aggregate/Percentile.java +++ b/h2/src/main/org/h2/expression/aggregate/Percentile.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.aggregate; @@ -11,19 +11,17 @@ import java.util.Arrays; import org.h2.api.IntervalQualifier; -import org.h2.command.dml.SelectOrderBy; +import org.h2.command.query.QueryOrderBy; import org.h2.engine.Database; -import org.h2.engine.Mode; -import org.h2.engine.Session; -import org.h2.engine.SysProperties; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.index.Cursor; import org.h2.index.Index; +import org.h2.mode.DefaultNullOrdering; import org.h2.result.SearchRow; import org.h2.result.SortOrder; import org.h2.table.Column; -import org.h2.table.IndexColumn; import org.h2.table.Table; import org.h2.table.TableFilter; import org.h2.util.DateTimeUtils; @@ -31,10 +29,11 @@ import org.h2.value.CompareMode; import org.h2.value.Value; import org.h2.value.ValueDate; -import org.h2.value.ValueDecimal; import org.h2.value.ValueInterval; import org.h2.value.ValueNull; +import org.h2.value.ValueNumeric; import org.h2.value.ValueTime; +import org.h2.value.ValueTimeTimeZone; import org.h2.value.ValueTimestamp; import org.h2.value.ValueTimestampTimeZone; @@ -48,22 +47,20 @@ final class Percentile { */ static final BigDecimal HALF = BigDecimal.valueOf(0.5d); - private static boolean isNullsLast(Index index) { - IndexColumn ic = index.getIndexColumns()[0]; - int sortType = ic.sortType; - return (sortType & SortOrder.NULLS_LAST) != 0 - || (sortType & SortOrder.NULLS_FIRST) == 0 - && (sortType & SortOrder.DESCENDING) != 0 ^ SysProperties.SORT_NULLS_HIGH; + private static boolean isNullsLast(DefaultNullOrdering defaultNullOrdering, Index index) { + return defaultNullOrdering.compareNull(true, index.getIndexColumns()[0].sortType) > 0; } /** * Get the index (if any) for the column specified in the inverse * distribution function. * + * @param database the database * @param on the expression (usually a column expression) * @return the index, or null */ - static Index getColumnIndex(Expression on) { + static Index getColumnIndex(Database database, Expression on) { + DefaultNullOrdering defaultNullOrdering = database.getDefaultNullOrdering(); if (on instanceof ExpressionColumn) { ExpressionColumn col = (ExpressionColumn) on; Column column = col.getColumn(); @@ -84,7 +81,8 @@ static Index getColumnIndex(Expression on) { } // Prefer index without nulls last for nullable columns if (result == null || result.getColumns().length > index.getColumns().length - || nullable && isNullsLast(result) && !isNullsLast(index)) { + || nullable && isNullsLast(defaultNullOrdering, result) + && !isNullsLast(defaultNullOrdering, index)) { result = index; } } @@ -98,7 +96,7 @@ static Index getColumnIndex(Expression on) { /** * Get the result from the array of values. * - * @param database the database + * @param session the session * @param array array with values * @param dataType the data type * @param orderByList ORDER BY list @@ -106,9 +104,9 @@ static Index getColumnIndex(Expression on) { * @param interpolate whether value should be interpolated * @return the result */ - static Value getValue(Database database, Value[] array, int dataType, ArrayList orderByList, + static Value getValue(SessionLocal session, Value[] array, int dataType, ArrayList orderByList, BigDecimal percentile, boolean interpolate) { - final CompareMode compareMode = database.getCompareMode(); + final CompareMode compareMode = session.getDatabase().getCompareMode(); Arrays.sort(array, compareMode); int count = array.length; boolean reverseIndex = orderByList != null && (orderByList.get(0).sortType & SortOrder.DESCENDING) != 0; @@ -135,9 +133,9 @@ static Value getValue(Database database, Value[] array, int dataType, ArrayList< } Value v = array[rowIdx1]; if (!interpolate) { - return v.convertTo(dataType); + return v; } - return interpolate(v, array[rowIdx2], factor, dataType, database.getMode(), compareMode); + return interpolate(v, array[rowIdx2], factor, dataType, session, compareMode); } /** @@ -151,9 +149,10 @@ static Value getValue(Database database, Value[] array, int dataType, ArrayList< * @param interpolate whether value should be interpolated * @return the result */ - static Value getFromIndex(Session session, Expression expression, int dataType, - ArrayList orderByList, BigDecimal percentile, boolean interpolate) { - Index index = getColumnIndex(expression); + static Value getFromIndex(SessionLocal session, Expression expression, int dataType, + ArrayList orderByList, BigDecimal percentile, boolean interpolate) { + Database db = session.getDatabase(); + Index index = getColumnIndex(db, expression); long count = index.getRowCount(session); if (count == 0) { return ValueNull.INSTANCE; @@ -185,7 +184,7 @@ static Value getFromIndex(Session session, Expression expression, int dataType, } // If no nulls found and if index orders nulls last create a second // cursor to count nulls at the end. - if (!hasNulls && isNullsLast(index)) { + if (!hasNulls && isNullsLast(db.getDefaultNullOrdering(), index)) { TableFilter tableFilter = expr.getTableFilter(); SearchRow check = tableFilter.getTable().getTemplateSimpleRow(true); check.setValue(columnId, ValueNull.INSTANCE); @@ -239,54 +238,77 @@ static Value getFromIndex(Session session, Expression expression, int dataType, if (v2 == ValueNull.INSTANCE) { return v; } - Database database = session.getDatabase(); if (reverseIndex) { Value t = v; v = v2; v2 = t; } - return interpolate(v, v2, factor, dataType, database.getMode(), database.getCompareMode()); + return interpolate(v, v2, factor, dataType, session, db.getCompareMode()); } - return v.convertTo(dataType); + return v; } - private static Value interpolate(Value v0, Value v1, BigDecimal factor, int dataType, Mode databaseMode, + private static Value interpolate(Value v0, Value v1, BigDecimal factor, int dataType, SessionLocal session, CompareMode compareMode) { - if (v0.compareTo(v1, databaseMode, compareMode) == 0) { - return v0.convertTo(dataType); + if (v0.compareTo(v1, session, compareMode) == 0) { + return v0; } switch (dataType) { - case Value.BYTE: - case Value.SHORT: - case Value.INT: - return ValueDecimal.get( + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + return ValueNumeric.get( interpolateDecimal(BigDecimal.valueOf(v0.getInt()), BigDecimal.valueOf(v1.getInt()), factor)); - case Value.LONG: - return ValueDecimal.get( + case Value.BIGINT: + return ValueNumeric.get( interpolateDecimal(BigDecimal.valueOf(v0.getLong()), BigDecimal.valueOf(v1.getLong()), factor)); - case Value.DECIMAL: - return ValueDecimal.get(interpolateDecimal(v0.getBigDecimal(), v1.getBigDecimal(), factor)); - case Value.FLOAT: + case Value.NUMERIC: + case Value.DECFLOAT: + return ValueNumeric.get(interpolateDecimal(v0.getBigDecimal(), v1.getBigDecimal(), factor)); + case Value.REAL: case Value.DOUBLE: - return ValueDecimal.get( + return ValueNumeric.get( interpolateDecimal( BigDecimal.valueOf(v0.getDouble()), BigDecimal.valueOf(v1.getDouble()), factor)); case Value.TIME: { - ValueTime t0 = (ValueTime) v0.convertTo(Value.TIME), t1 = (ValueTime) v1.convertTo(Value.TIME); + ValueTime t0 = (ValueTime) v0, t1 = (ValueTime) v1; BigDecimal n0 = BigDecimal.valueOf(t0.getNanos()); BigDecimal n1 = BigDecimal.valueOf(t1.getNanos()); return ValueTime.fromNanos(interpolateDecimal(n0, n1, factor).longValue()); } + case Value.TIME_TZ: { + ValueTimeTimeZone t0 = (ValueTimeTimeZone) v0, t1 = (ValueTimeTimeZone) v1; + BigDecimal n0 = BigDecimal.valueOf(t0.getNanos()); + BigDecimal n1 = BigDecimal.valueOf(t1.getNanos()); + BigDecimal offset = BigDecimal.valueOf(t0.getTimeZoneOffsetSeconds()) + .multiply(BigDecimal.ONE.subtract(factor)) + .add(BigDecimal.valueOf(t1.getTimeZoneOffsetSeconds()).multiply(factor)); + int intOffset = offset.intValue(); + BigDecimal intOffsetBD = BigDecimal.valueOf(intOffset); + BigDecimal bd = interpolateDecimal(n0, n1, factor); + if (offset.compareTo(intOffsetBD) != 0) { + bd = bd.add( + offset.subtract(intOffsetBD).multiply(BigDecimal.valueOf(DateTimeUtils.NANOS_PER_SECOND))); + } + long timeNanos = bd.longValue(); + if (timeNanos < 0L) { + timeNanos += DateTimeUtils.NANOS_PER_SECOND; + intOffset++; + } else if (timeNanos >= DateTimeUtils.NANOS_PER_DAY) { + timeNanos -= DateTimeUtils.NANOS_PER_SECOND; + intOffset--; + } + return ValueTimeTimeZone.fromNanos(timeNanos, intOffset); + } case Value.DATE: { - ValueDate d0 = (ValueDate) v0.convertTo(Value.DATE), d1 = (ValueDate) v1.convertTo(Value.DATE); + ValueDate d0 = (ValueDate) v0, d1 = (ValueDate) v1; BigDecimal a0 = BigDecimal.valueOf(DateTimeUtils.absoluteDayFromDateValue(d0.getDateValue())); BigDecimal a1 = BigDecimal.valueOf(DateTimeUtils.absoluteDayFromDateValue(d1.getDateValue())); return ValueDate.fromDateValue( DateTimeUtils.dateValueFromAbsoluteDay(interpolateDecimal(a0, a1, factor).longValue())); } case Value.TIMESTAMP: { - ValueTimestamp ts0 = (ValueTimestamp) v0.convertTo(Value.TIMESTAMP), - ts1 = (ValueTimestamp) v1.convertTo(Value.TIMESTAMP); + ValueTimestamp ts0 = (ValueTimestamp) v0, ts1 = (ValueTimestamp) v1; BigDecimal a0 = timestampToDecimal(ts0.getDateValue(), ts0.getTimeNanos()); BigDecimal a1 = timestampToDecimal(ts1.getDateValue(), ts1.getTimeNanos()); BigInteger[] dr = interpolateDecimal(a0, a1, factor).toBigInteger() @@ -301,19 +323,18 @@ private static Value interpolate(Value v0, Value v1, BigDecimal factor, int data DateTimeUtils.dateValueFromAbsoluteDay(absoluteDay), timeNanos); } case Value.TIMESTAMP_TZ: { - ValueTimestampTimeZone ts0 = (ValueTimestampTimeZone) v0.convertTo(Value.TIMESTAMP_TZ), - ts1 = (ValueTimestampTimeZone) v1.convertTo(Value.TIMESTAMP_TZ); + ValueTimestampTimeZone ts0 = (ValueTimestampTimeZone) v0, ts1 = (ValueTimestampTimeZone) v1; BigDecimal a0 = timestampToDecimal(ts0.getDateValue(), ts0.getTimeNanos()); BigDecimal a1 = timestampToDecimal(ts1.getDateValue(), ts1.getTimeNanos()); - BigDecimal offset = BigDecimal.valueOf(ts0.getTimeZoneOffsetMins()) + BigDecimal offset = BigDecimal.valueOf(ts0.getTimeZoneOffsetSeconds()) .multiply(BigDecimal.ONE.subtract(factor)) - .add(BigDecimal.valueOf(ts1.getTimeZoneOffsetMins()).multiply(factor)); - short shortOffset = offset.shortValue(); - BigDecimal shortOffsetBD = BigDecimal.valueOf(shortOffset); + .add(BigDecimal.valueOf(ts1.getTimeZoneOffsetSeconds()).multiply(factor)); + int intOffset = offset.intValue(); + BigDecimal intOffsetBD = BigDecimal.valueOf(intOffset); BigDecimal bd = interpolateDecimal(a0, a1, factor); - if (offset.compareTo(shortOffsetBD) != 0) { + if (offset.compareTo(intOffsetBD) != 0) { bd = bd.add( - offset.subtract(shortOffsetBD).multiply(BigDecimal.valueOf(DateTimeUtils.NANOS_PER_MINUTE))); + offset.subtract(intOffsetBD).multiply(BigDecimal.valueOf(DateTimeUtils.NANOS_PER_SECOND))); } BigInteger[] dr = bd.toBigInteger().divideAndRemainder(IntervalUtils.NANOS_PER_DAY_BI); long absoluteDay = dr[0].longValue(); @@ -323,7 +344,7 @@ private static Value interpolate(Value v0, Value v1, BigDecimal factor, int data absoluteDay--; } return ValueTimestampTimeZone.fromDateValueAndNanos(DateTimeUtils.dateValueFromAbsoluteDay(absoluteDay), - timeNanos, shortOffset); + timeNanos, intOffset); } case Value.INTERVAL_YEAR: case Value.INTERVAL_MONTH: @@ -344,7 +365,7 @@ private static Value interpolate(Value v0, Value v1, BigDecimal factor, int data .toBigInteger()); default: // Use the same rules as PERCENTILE_DISC - return (factor.compareTo(HALF) > 0 ? v1 : v0).convertTo(dataType); + return (factor.compareTo(HALF) > 0 ? v1 : v0); } } diff --git a/h2/src/main/org/h2/expression/aggregate/package.html b/h2/src/main/org/h2/expression/aggregate/package.html index cd6e29a811..e20a45ac82 100644 --- a/h2/src/main/org/h2/expression/aggregate/package.html +++ b/h2/src/main/org/h2/expression/aggregate/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/expression/analysis/DataAnalysisOperation.java b/h2/src/main/org/h2/expression/analysis/DataAnalysisOperation.java index c264ee147e..8cb6ebda12 100644 --- a/h2/src/main/org/h2/expression/analysis/DataAnalysisOperation.java +++ b/h2/src/main/org/h2/expression/analysis/DataAnalysisOperation.java @@ -1,19 +1,18 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.analysis; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import org.h2.api.ErrorCode; -import org.h2.command.dml.Select; -import org.h2.command.dml.SelectGroups; -import org.h2.command.dml.SelectOrderBy; -import org.h2.engine.Session; +import org.h2.command.query.QueryOrderBy; +import org.h2.command.query.Select; +import org.h2.command.query.SelectGroups; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionVisitor; import org.h2.message.DbException; @@ -21,7 +20,7 @@ import org.h2.table.ColumnResolver; import org.h2.table.TableFilter; import org.h2.value.Value; -import org.h2.value.ValueInt; +import org.h2.value.ValueInteger; /** * A base class for data analysis operations such as aggregates and window @@ -74,16 +73,16 @@ public abstract class DataAnalysisOperation extends Expression { * index offset * @return the SortOrder */ - protected static SortOrder createOrder(Session session, ArrayList orderBy, int offset) { + protected static SortOrder createOrder(SessionLocal session, ArrayList orderBy, int offset) { int size = orderBy.size(); int[] index = new int[size]; int[] sortType = new int[size]; for (int i = 0; i < size; i++) { - SelectOrderBy o = orderBy.get(i); + QueryOrderBy o = orderBy.get(i); index[i] = i + offset; sortType[i] = o.sortType; } - return new SortOrder(session.getDatabase(), index, sortType, null); + return new SortOrder(session, index, sortType, null); } protected DataAnalysisOperation(Select select) { @@ -121,12 +120,12 @@ protected SortOrder getOverOrderBySort() { public final void mapColumns(ColumnResolver resolver, int level, int state) { if (over != null) { if (state != MAP_INITIAL) { - throw DbException.get(ErrorCode.INVALID_USE_OF_AGGREGATE_FUNCTION_1, getSQL(false)); + throw DbException.get(ErrorCode.INVALID_USE_OF_AGGREGATE_FUNCTION_1, getTraceSQL()); } state = MAP_IN_WINDOW; } else { if (state == MAP_IN_AGGREGATE) { - throw DbException.get(ErrorCode.INVALID_USE_OF_AGGREGATE_FUNCTION_1, getSQL(false)); + throw DbException.get(ErrorCode.INVALID_USE_OF_AGGREGATE_FUNCTION_1, getTraceSQL()); } state = MAP_IN_AGGREGATE; } @@ -150,14 +149,14 @@ protected void mapColumnsAnalysis(ColumnResolver resolver, int level, int innerS } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { if (over != null) { over.optimize(session); - ArrayList orderBy = over.getOrderBy(); + ArrayList orderBy = over.getOrderBy(); if (orderBy != null) { overOrderBySort = createOrder(session, orderBy, getNumExpressions()); } else if (!isAggregate()) { - overOrderBySort = new SortOrder(session.getDatabase(), new int[getNumExpressions()], new int[0], null); + overOrderBySort = new SortOrder(session, new int[getNumExpressions()]); } WindowFrame frame = over.getWindowFrame(); if (frame != null) { @@ -170,9 +169,7 @@ public Expression optimize(Session session) { int n = 0; WindowFrameBound bound = frame.getStarting(); if (bound.isParameterized()) { - if (orderBySize != 1) { - throw getSingleSortKeyException(); - } + checkOrderBy(frame.getUnits(), orderBySize); if (bound.isVariable()) { bound.setExpressionIndex(index); n++; @@ -180,9 +177,7 @@ public Expression optimize(Session session) { } bound = frame.getFollowing(); if (bound != null && bound.isParameterized()) { - if (orderBySize != 1) { - throw getSingleSortKeyException(); - } + checkOrderBy(frame.getUnits(), orderBySize); if (bound.isVariable()) { bound.setExpressionIndex(index + n); n++; @@ -194,9 +189,24 @@ public Expression optimize(Session session) { return this; } - private DbException getSingleSortKeyException() { - String sql = getSQL(false); - return DbException.getSyntaxError(sql, sql.length() - 1, "exactly one sort key is required for RANGE units"); + private void checkOrderBy(WindowFrameUnits units, int orderBySize) { + switch (units) { + case RANGE: + if (orderBySize != 1) { + String sql = getTraceSQL(); + throw DbException.getSyntaxError(sql, sql.length() - 1, + "exactly one sort key is required for RANGE units"); + } + break; + case GROUPS: + if (orderBySize < 1) { + String sql = getTraceSQL(); + throw DbException.getSyntaxError(sql, sql.length() - 1, + "a sort key is required for GROUPS units"); + } + break; + default: + } } @Override @@ -207,7 +217,7 @@ public void setEvaluatable(TableFilter tableFilter, boolean b) { } @Override - public final void updateAggregate(Session session, int stage) { + public final void updateAggregate(SessionLocal session, int stage) { if (stage == STAGE_RESET) { updateGroupAggregates(session, STAGE_RESET); lastGroupRowId = 0; @@ -251,7 +261,7 @@ public final void updateAggregate(Session session, int stage) { * @param groupRowId * row id of group */ - protected abstract void updateAggregate(Session session, SelectGroups groupData, int groupRowId); + protected abstract void updateAggregate(SessionLocal session, SelectGroups groupData, int groupRowId); /** * Invoked when processing group stage of grouped window queries to update @@ -262,7 +272,7 @@ public final void updateAggregate(Session session, int stage) { * @param stage * select stage */ - protected void updateGroupAggregates(Session session, int stage) { + protected void updateGroupAggregates(SessionLocal session, int stage) { if (over != null) { over.updateAggregate(session, stage); } @@ -292,7 +302,7 @@ private int getNumFrameExpressions() { * @param array * array to store values of expressions */ - protected abstract void rememberExpressions(Session session, Value[] array); + protected abstract void rememberExpressions(SessionLocal session, Value[] array); /** * Get the aggregate data for a window clause. @@ -305,7 +315,7 @@ private int getNumFrameExpressions() { * true if this is for ORDER BY * @return the aggregate data object, specific to each kind of aggregate. */ - protected Object getWindowData(Session session, SelectGroups groupData, boolean forOrderBy) { + protected Object getWindowData(SessionLocal session, SelectGroups groupData, boolean forOrderBy) { Object data; Value key = over.getCurrentKey(session); PartitionData partition = groupData.getWindowExprData(this, key); @@ -358,25 +368,18 @@ public boolean isEverything(ExpressionVisitor visitor) { case ExpressionVisitor.OPTIMIZABLE_AGGREGATE: case ExpressionVisitor.DETERMINISTIC: case ExpressionVisitor.INDEPENDENT: + case ExpressionVisitor.DECREMENT_QUERY_LEVEL: return false; - case ExpressionVisitor.EVALUATABLE: - case ExpressionVisitor.READONLY: - case ExpressionVisitor.NOT_FROM_RESOLVER: - case ExpressionVisitor.GET_DEPENDENCIES: - case ExpressionVisitor.SET_MAX_DATA_MODIFICATION_ID: - case ExpressionVisitor.GET_COLUMNS1: - case ExpressionVisitor.GET_COLUMNS2: - return true; default: - throw DbException.throwInternalError("type=" + visitor.getType()); + return true; } } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { SelectGroups groupData = select.getGroupDataIfCurrent(over != null); if (groupData == null) { - throw DbException.get(ErrorCode.INVALID_USE_OF_AGGREGATE_FUNCTION_1, getSQL(false)); + throw DbException.get(ErrorCode.INVALID_USE_OF_AGGREGATE_FUNCTION_1, getTraceSQL()); } return over == null ? getAggregatedValue(session, getGroupData(groupData, true)) : getWindowResult(session, groupData); @@ -392,21 +395,21 @@ public Value getValue(Session session) { * the group data * @return result of this function */ - private Value getWindowResult(Session session, SelectGroups groupData) { + private Value getWindowResult(SessionLocal session, SelectGroups groupData) { PartitionData partition; Object data; - boolean forOrderBy = over.getOrderBy() != null; + boolean isOrdered = over.isOrdered(); Value key = over.getCurrentKey(session); partition = groupData.getWindowExprData(this, key); if (partition == null) { // Window aggregates with FILTER clause may have no collected values - data = forOrderBy ? new ArrayList<>() : createAggregateData(); + data = isOrdered ? new ArrayList<>() : createAggregateData(); partition = new PartitionData(data); groupData.setWindowExprData(this, key, partition); } else { data = partition.getData(); } - if (forOrderBy || !isAggregate()) { + if (isOrdered || !isAggregate()) { Value result = getOrderedResult(session, groupData, partition, data); if (result == null) { return getAggregatedValue(session, null); @@ -431,7 +434,7 @@ private Value getWindowResult(Session session, SelectGroups groupData) { * the aggregate data * @return aggregated value. */ - protected abstract Value getAggregatedValue(Session session, Object aggregateData); + protected abstract Value getAggregatedValue(SessionLocal session, Object aggregateData); /** * Update a row of an ordered aggregate. @@ -445,8 +448,8 @@ private Value getWindowResult(Session session, SelectGroups groupData) { * @param orderBy * list of order by expressions */ - protected void updateOrderedAggregate(Session session, SelectGroups groupData, int groupRowId, - ArrayList orderBy) { + protected void updateOrderedAggregate(SessionLocal session, SelectGroups groupData, int groupRowId, + ArrayList orderBy) { int ne = getNumExpressions(); int size = orderBy != null ? orderBy.size() : 0; int frameSize = getNumFrameExpressions(); @@ -454,7 +457,7 @@ protected void updateOrderedAggregate(Session session, SelectGroups groupData, i rememberExpressions(session, array); for (int i = 0; i < size; i++) { @SuppressWarnings("null") - SelectOrderBy o = orderBy.get(i); + QueryOrderBy o = orderBy.get(i); array[ne++] = o.expression.getValue(session); } if (frameSize > 0) { @@ -468,23 +471,24 @@ protected void updateOrderedAggregate(Session session, SelectGroups groupData, i array[ne++] = bound.getValue().getValue(session); } } - array[ne] = ValueInt.get(groupRowId); + array[ne] = ValueInteger.get(groupRowId); @SuppressWarnings("unchecked") ArrayList data = (ArrayList) getWindowData(session, groupData, true); data.add(array); } - private Value getOrderedResult(Session session, SelectGroups groupData, PartitionData partition, Object data) { + private Value getOrderedResult(SessionLocal session, SelectGroups groupData, PartitionData partition, // + Object data) { HashMap result = partition.getOrderedResult(); if (result == null) { result = new HashMap<>(); @SuppressWarnings("unchecked") ArrayList orderedData = (ArrayList) data; int rowIdColumn = getNumExpressions(); - ArrayList orderBy = over.getOrderBy(); + ArrayList orderBy = over.getOrderBy(); if (orderBy != null) { rowIdColumn += orderBy.size(); - Collections.sort(orderedData, overOrderBySort); + orderedData.sort(overOrderBySort); } rowIdColumn += getNumFrameExpressions(); getOrderedResultLoop(session, result, orderedData, rowIdColumn); @@ -506,7 +510,7 @@ private Value getOrderedResult(Session session, SelectGroups groupData, Partitio * @param rowIdColumn * the index of row id value */ - protected abstract void getOrderedResultLoop(Session session, HashMap result, + protected abstract void getOrderedResultLoop(SessionLocal session, HashMap result, ArrayList ordered, int rowIdColumn); /** @@ -514,14 +518,17 @@ protected abstract void getOrderedResultLoop(Session session, HashMap partitionBy; - private ArrayList orderBy; + private ArrayList orderBy; private WindowFrame frame; @@ -39,24 +42,35 @@ public final class Window { * string builder * @param orderBy * ORDER BY clause, or null - * @param alwaysQuote - * quote all identifiers + * @param sqlFlags + * formatting flags + * @param forceOrderBy + * whether synthetic ORDER BY clause should be generated when it + * is missing */ - public static void appendOrderBy(StringBuilder builder, ArrayList orderBy, boolean alwaysQuote) { + public static void appendOrderBy(StringBuilder builder, ArrayList orderBy, int sqlFlags, + boolean forceOrderBy) { if (orderBy != null && !orderBy.isEmpty()) { - if (builder.charAt(builder.length() - 1) != '(') { - builder.append(' '); - } - builder.append("ORDER BY "); + appendOrderByStart(builder); for (int i = 0; i < orderBy.size(); i++) { - SelectOrderBy o = orderBy.get(i); + QueryOrderBy o = orderBy.get(i); if (i > 0) { builder.append(", "); } - o.expression.getSQL(builder, alwaysQuote); + o.expression.getUnenclosedSQL(builder, sqlFlags); SortOrder.typeToString(builder, o.sortType); } + } else if (forceOrderBy) { + appendOrderByStart(builder); + builder.append("NULL"); + } + } + + private static void appendOrderByStart(StringBuilder builder) { + if (builder.charAt(builder.length() - 1) != '(') { + builder.append(' '); } + builder.append("ORDER BY "); } /** @@ -71,7 +85,7 @@ public static void appendOrderBy(StringBuilder builder, ArrayList * @param frame * window frame clause, or null */ - public Window(String parent, ArrayList partitionBy, ArrayList orderBy, + public Window(String parent, ArrayList partitionBy, ArrayList orderBy, WindowFrame frame) { this.parent = parent; this.partitionBy = partitionBy; @@ -96,7 +110,7 @@ public void mapColumns(ColumnResolver resolver, int level) { } } if (orderBy != null) { - for (SelectOrderBy o : orderBy) { + for (QueryOrderBy o : orderBy) { o.expression.mapColumns(resolver, level, Expression.MAP_IN_WINDOW); } } @@ -135,15 +149,32 @@ private void resolveWindows(ColumnResolver resolver) { * @param session * the session */ - public void optimize(Session session) { + public void optimize(SessionLocal session) { if (partitionBy != null) { - for (int i = 0; i < partitionBy.size(); i++) { - partitionBy.set(i, partitionBy.get(i).optimize(session)); + for (ListIterator i = partitionBy.listIterator(); i.hasNext();) { + Expression e = i.next().optimize(session); + if (e.isConstant()) { + i.remove(); + } else { + i.set(e); + } + } + if (partitionBy.isEmpty()) { + partitionBy = null; } } if (orderBy != null) { - for (SelectOrderBy o : orderBy) { - o.expression = o.expression.optimize(session); + for (Iterator i = orderBy.iterator(); i.hasNext();) { + QueryOrderBy o = i.next(); + Expression e = o.expression.optimize(session); + if (e.isConstant()) { + i.remove(); + } else { + o.expression = e; + } + } + if (orderBy.isEmpty()) { + orderBy = null; } } if (frame != null) { @@ -168,7 +199,7 @@ public void setEvaluatable(TableFilter tableFilter, boolean value) { } } if (orderBy != null) { - for (SelectOrderBy o : orderBy) { + for (QueryOrderBy o : orderBy) { o.expression.setEvaluatable(tableFilter, value); } } @@ -179,7 +210,7 @@ public void setEvaluatable(TableFilter tableFilter, boolean value) { * * @return ORDER BY clause, or null */ - public ArrayList getOrderBy() { + public ArrayList getOrderBy() { return orderBy; } @@ -192,6 +223,29 @@ public WindowFrame getWindowFrame() { return frame; } + /** + * Returns {@code true} if window ordering clause is specified or ROWS unit + * is used. + * + * @return {@code true} if window ordering clause is specified or ROWS unit + * is used + */ + public boolean isOrdered() { + if (orderBy != null) { + return true; + } + if (frame != null && frame.getUnits() == WindowFrameUnits.ROWS) { + if (frame.getStarting().getType() == WindowFrameBoundType.UNBOUNDED_PRECEDING) { + WindowFrameBound following = frame.getFollowing(); + if (following != null && following.getType() == WindowFrameBoundType.UNBOUNDED_FOLLOWING) { + return false; + } + } + return true; + } + return false; + } + /** * Returns the key for the current group. * @@ -199,7 +253,7 @@ public WindowFrame getWindowFrame() { * session * @return key for the current group, or null */ - public Value getCurrentKey(Session session) { + public Value getCurrentKey(SessionLocal session) { if (partitionBy == null) { return null; } @@ -222,11 +276,15 @@ public Value getCurrentKey(Session session) { * * @param builder * string builder - * @param alwaysQuote quote all identifiers + * @param sqlFlags + * formatting flags + * @param forceOrderBy + * whether synthetic ORDER BY clause should be generated when it + * is missing * @return the specified string builder - * @see Expression#getSQL(StringBuilder, boolean) + * @see Expression#getSQL(StringBuilder, int, int) */ - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { + public StringBuilder getSQL(StringBuilder builder, int sqlFlags, boolean forceOrderBy) { builder.append("OVER ("); if (partitionBy != null) { builder.append("PARTITION BY "); @@ -234,15 +292,15 @@ public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { if (i > 0) { builder.append(", "); } - partitionBy.get(i).getUnenclosedSQL(builder, alwaysQuote); + partitionBy.get(i).getUnenclosedSQL(builder, sqlFlags); } } - appendOrderBy(builder, orderBy, alwaysQuote); + appendOrderBy(builder, orderBy, sqlFlags, forceOrderBy); if (frame != null) { if (builder.charAt(builder.length() - 1) != '(') { builder.append(' '); } - frame.getSQL(builder, alwaysQuote); + frame.getSQL(builder, sqlFlags); } return builder.append(')'); } @@ -254,16 +312,16 @@ public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { * the session * @param stage * select stage - * @see Expression#updateAggregate(Session, int) + * @see Expression#updateAggregate(SessionLocal, int) */ - public void updateAggregate(Session session, int stage) { + public void updateAggregate(SessionLocal session, int stage) { if (partitionBy != null) { for (Expression expr : partitionBy) { expr.updateAggregate(session, stage); } } if (orderBy != null) { - for (SelectOrderBy o : orderBy) { + for (QueryOrderBy o : orderBy) { o.expression.updateAggregate(session, stage); } } @@ -274,7 +332,7 @@ public void updateAggregate(Session session, int stage) { @Override public String toString() { - return getSQL(new StringBuilder(), false).toString(); + return getSQL(new StringBuilder(), HasSQL.TRACE_SQL_FLAGS, false).toString(); } } diff --git a/h2/src/main/org/h2/expression/analysis/WindowFrame.java b/h2/src/main/org/h2/expression/analysis/WindowFrame.java index 7d63eec995..a5b40722d9 100644 --- a/h2/src/main/org/h2/expression/analysis/WindowFrame.java +++ b/h2/src/main/org/h2/expression/analysis/WindowFrame.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.analysis; @@ -11,7 +11,7 @@ import java.util.NoSuchElementException; import org.h2.api.ErrorCode; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.BinaryOperation; import org.h2.expression.BinaryOperation.OpType; import org.h2.expression.Expression; @@ -27,7 +27,7 @@ */ public final class WindowFrame { - private static abstract class Itr implements Iterator { + private abstract static class Itr implements Iterator { final ArrayList orderedRows; @@ -37,11 +37,6 @@ private static abstract class Itr implements Iterator { this.orderedRows = orderedRows; } - @Override - public final void remove() { - throw new UnsupportedOperationException(); - } - } private static class PlainItr extends Itr { @@ -210,7 +205,7 @@ public Value[] next() { * whether iterator should iterate in reverse order * @return iterator */ - public static Iterator iterator(Window over, Session session, ArrayList orderedRows, + public static Iterator iterator(Window over, SessionLocal session, ArrayList orderedRows, SortOrder sortOrder, int currentRow, boolean reverse) { WindowFrame frame = over.getWindowFrame(); if (frame != null) { @@ -241,8 +236,8 @@ public static Iterator iterator(Window over, Session session, ArrayList * if over is not null and its exclusion clause is not EXCLUDE * NO OTHERS */ - public static int getEndIndex(Window over, Session session, ArrayList orderedRows, SortOrder sortOrder, - int currentRow) { + public static int getEndIndex(Window over, SessionLocal session, ArrayList orderedRows, + SortOrder sortOrder, int currentRow) { WindowFrame frame = over.getWindowFrame(); if (frame != null) { return frame.getEndIndex(session, orderedRows, sortOrder, currentRow); @@ -289,10 +284,10 @@ private static int toGroupEnd(ArrayList orderedRows, SortOrder sortOrde return offset; } - private static int getIntOffset(WindowFrameBound bound, Value[] values, Session session) { + private static int getIntOffset(WindowFrameBound bound, Value[] values, SessionLocal session) { Value v = bound.isVariable() ? values[bound.getExpressionIndex()] : bound.getValue().getValue(session); - int value = v.getInt(); - if (v == ValueNull.INSTANCE || value < 0) { + int value; + if (v == ValueNull.INSTANCE || (value = v.getInt()) < 0) { throw DbException.get(ErrorCode.INVALID_PRECEDING_OR_FOLLOWING_1, v.getTraceSQL()); } return value; @@ -317,7 +312,7 @@ private static int getIntOffset(WindowFrameBound bound, Value[] values, Session * @return row for comparison operations, or null if result is out of range * and should be treated as UNLIMITED */ - private static Value[] getCompareRow(Session session, ArrayList orderedRows, SortOrder sortOrder, + private static Value[] getCompareRow(SessionLocal session, ArrayList orderedRows, SortOrder sortOrder, int currentRow, WindowFrameBound bound, boolean add) { int sortIndex = sortOrder.getQueryColumnIndexes()[0]; Value[] row = orderedRows.get(currentRow); @@ -329,14 +324,16 @@ private static Value[] getCompareRow(Session session, ArrayList ordered case Value.NULL: newValue = ValueNull.INSTANCE; break; - case Value.BYTE: - case Value.SHORT: - case Value.INT: - case Value.LONG: - case Value.DECIMAL: + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + case Value.BIGINT: + case Value.NUMERIC: + case Value.REAL: case Value.DOUBLE: - case Value.FLOAT: + case Value.DECFLOAT: case Value.TIME: + case Value.TIME_TZ: case Value.DATE: case Value.TIMESTAMP: case Value.TIMESTAMP_TZ: @@ -376,7 +373,7 @@ private static Value[] getCompareRow(Session session, ArrayList ordered return newRow; } - private static Value getValueOffset(WindowFrameBound bound, Value[] values, Session session) { + private static Value getValueOffset(WindowFrameBound bound, Value[] values, SessionLocal session) { Value value = bound.isVariable() ? values[bound.getExpressionIndex()] : bound.getValue().getValue(session); if (value == ValueNull.INSTANCE || value.getSignum() < 0) { throw DbException.get(ErrorCode.INVALID_PRECEDING_OR_FOLLOWING_1, value.getTraceSQL()); @@ -457,7 +454,7 @@ public boolean isValid() { /** * Check if bounds of this frame has variable expressions. This method may - * be used only after {@link #optimize(Session)} invocation. + * be used only after {@link #optimize(SessionLocal)} invocation. * * @return if bounds of this frame has variable expressions */ @@ -494,7 +491,7 @@ void mapColumns(ColumnResolver resolver, int level, int state) { * @param session * the session */ - void optimize(Session session) { + void optimize(SessionLocal session) { starting.optimize(session); if (following != null) { following.optimize(session); @@ -508,9 +505,9 @@ void optimize(Session session) { * the session * @param stage * select stage - * @see Expression#updateAggregate(Session, int) + * @see Expression#updateAggregate(SessionLocal, int) */ - void updateAggregate(Session session, int stage) { + void updateAggregate(SessionLocal session, int stage) { starting.updateAggregate(session, stage); if (following != null) { following.updateAggregate(session, stage); @@ -532,7 +529,7 @@ void updateAggregate(Session session, int stage) { * whether iterator should iterate in reverse order * @return iterator */ - public Iterator iterator(Session session, ArrayList orderedRows, SortOrder sortOrder, + public Iterator iterator(SessionLocal session, ArrayList orderedRows, SortOrder sortOrder, int currentRow, boolean reverse) { int startIndex = getIndex(session, orderedRows, sortOrder, currentRow, starting, false); int endIndex = following != null ? getIndex(session, orderedRows, sortOrder, currentRow, following, true) @@ -571,7 +568,8 @@ public Iterator iterator(Session session, ArrayList orderedRow * @throws UnsupportedOperationException * if exclusion clause is not EXCLUDE NO OTHERS */ - public int getStartIndex(Session session, ArrayList orderedRows, SortOrder sortOrder, int currentRow) { + public int getStartIndex(SessionLocal session, ArrayList orderedRows, SortOrder sortOrder, // + int currentRow) { if (exclusion != WindowFrameExclusion.EXCLUDE_NO_OTHERS) { throw new UnsupportedOperationException(); } @@ -597,7 +595,8 @@ public int getStartIndex(Session session, ArrayList orderedRows, SortOr * @throws UnsupportedOperationException * if exclusion clause is not EXCLUDE NO OTHERS */ - private int getEndIndex(Session session, ArrayList orderedRows, SortOrder sortOrder, int currentRow) { + private int getEndIndex(SessionLocal session, ArrayList orderedRows, SortOrder sortOrder, // + int currentRow) { if (exclusion != WindowFrameExclusion.EXCLUDE_NO_OTHERS) { throw new UnsupportedOperationException(); } @@ -630,7 +629,7 @@ private int getEndIndex(Session session, ArrayList orderedRows, SortOrd * or be equal to the number of rows if frame is not limited from * that side */ - private int getIndex(Session session, ArrayList orderedRows, SortOrder sortOrder, int currentRow, + private int getIndex(SessionLocal session, ArrayList orderedRows, SortOrder sortOrder, int currentRow, WindowFrameBound bound, boolean forFollowing) { int size = orderedRows.size(); int last = size - 1; @@ -854,20 +853,20 @@ private Iterator complexIterator(ArrayList orderedRows, SortOr * * @param builder * string builder - * @param alwaysQuote + * @param formattingFlags * quote all identifiers * @return the specified string builder - * @see org.h2.expression.Expression#getSQL(StringBuilder, boolean) + * @see org.h2.expression.Expression#getSQL(StringBuilder, int, int) */ - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { + public StringBuilder getSQL(StringBuilder builder, int formattingFlags) { builder.append(units.getSQL()); if (following == null) { builder.append(' '); - starting.getSQL(builder, false, alwaysQuote); + starting.getSQL(builder, false, formattingFlags); } else { builder.append(" BETWEEN "); - starting.getSQL(builder, false, alwaysQuote).append(" AND "); - following.getSQL(builder, true, alwaysQuote); + starting.getSQL(builder, false, formattingFlags).append(" AND "); + following.getSQL(builder, true, formattingFlags); } if (exclusion != WindowFrameExclusion.EXCLUDE_NO_OTHERS) { builder.append(' ').append(exclusion.getSQL()); diff --git a/h2/src/main/org/h2/expression/analysis/WindowFrameBound.java b/h2/src/main/org/h2/expression/analysis/WindowFrameBound.java index f9c3defcc3..ca520458d3 100644 --- a/h2/src/main/org/h2/expression/analysis/WindowFrameBound.java +++ b/h2/src/main/org/h2/expression/analysis/WindowFrameBound.java @@ -1,11 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.analysis; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.table.ColumnResolver; @@ -68,7 +68,7 @@ public boolean isParameterized() { /** * Returns whether bound is defined with a variable. This method may be used - * only after {@link #optimize(Session)} invocation. + * only after {@link #optimize(SessionLocal)} invocation. * * @return whether bound is defined with a variable */ @@ -117,7 +117,7 @@ void mapColumns(ColumnResolver resolver, int level, int state) { * @param session * the session */ - void optimize(Session session) { + void optimize(SessionLocal session) { if (value != null) { value = value.optimize(session); if (!value.isConstant()) { @@ -133,9 +133,9 @@ void optimize(Session session) { * the session * @param stage * select stage - * @see Expression#updateAggregate(Session, int) + * @see Expression#updateAggregate(SessionLocal, int) */ - void updateAggregate(Session session, int stage) { + void updateAggregate(SessionLocal session, int stage) { if (value != null) { value.updateAggregate(session, stage); } @@ -149,14 +149,14 @@ void updateAggregate(Session session, int stage) { * @param following * if false return SQL for starting clause, if true return SQL * for following clause - * @param alwaysQuote - * quote all identifiers + * @param sqlFlags + * formatting flags * @return the specified string builder - * @see Expression#getSQL(StringBuilder, boolean) + * @see Expression#getSQL(StringBuilder, int, int) */ - public StringBuilder getSQL(StringBuilder builder, boolean following, boolean alwaysQuote) { + public StringBuilder getSQL(StringBuilder builder, boolean following, int sqlFlags) { if (type == WindowFrameBoundType.PRECEDING || type == WindowFrameBoundType.FOLLOWING) { - value.getSQL(builder, alwaysQuote).append(' '); + value.getUnenclosedSQL(builder, sqlFlags).append(' '); } return builder.append(type.getSQL()); } diff --git a/h2/src/main/org/h2/expression/analysis/WindowFrameBoundType.java b/h2/src/main/org/h2/expression/analysis/WindowFrameBoundType.java index 9f35c7fa92..27b2e3a274 100644 --- a/h2/src/main/org/h2/expression/analysis/WindowFrameBoundType.java +++ b/h2/src/main/org/h2/expression/analysis/WindowFrameBoundType.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.analysis; @@ -23,7 +23,7 @@ public enum WindowFrameBoundType { /** * CURRENT_ROW clause. */ - CURRENT_ROW("CURRENT_ROW"), + CURRENT_ROW("CURRENT ROW"), /** * FOLLOWING clause. @@ -45,7 +45,7 @@ private WindowFrameBoundType(String sql) { * Returns SQL representation. * * @return SQL representation. - * @see org.h2.expression.Expression#getSQL(boolean) + * @see org.h2.expression.Expression#getSQL(int) */ public String getSQL() { return sql; diff --git a/h2/src/main/org/h2/expression/analysis/WindowFrameExclusion.java b/h2/src/main/org/h2/expression/analysis/WindowFrameExclusion.java index 85143edbaa..e587732c50 100644 --- a/h2/src/main/org/h2/expression/analysis/WindowFrameExclusion.java +++ b/h2/src/main/org/h2/expression/analysis/WindowFrameExclusion.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.analysis; @@ -53,7 +53,7 @@ public boolean isGroupOrNoOthers() { * Returns SQL representation. * * @return SQL representation. - * @see org.h2.expression.Expression#getSQL(boolean) + * @see org.h2.expression.Expression#getSQL(int) */ public String getSQL() { return sql; diff --git a/h2/src/main/org/h2/expression/analysis/WindowFrameUnits.java b/h2/src/main/org/h2/expression/analysis/WindowFrameUnits.java index 67ab5a6998..081438ea90 100644 --- a/h2/src/main/org/h2/expression/analysis/WindowFrameUnits.java +++ b/h2/src/main/org/h2/expression/analysis/WindowFrameUnits.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.analysis; @@ -31,7 +31,7 @@ public enum WindowFrameUnits { * Returns SQL representation. * * @return SQL representation. - * @see org.h2.expression.Expression#getSQL(boolean) + * @see org.h2.expression.Expression#getSQL(int) */ public String getSQL() { return name(); diff --git a/h2/src/main/org/h2/expression/analysis/WindowFunction.java b/h2/src/main/org/h2/expression/analysis/WindowFunction.java index c07ff0442c..c3ddc40e63 100644 --- a/h2/src/main/org/h2/expression/analysis/WindowFunction.java +++ b/h2/src/main/org/h2/expression/analysis/WindowFunction.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.analysis; @@ -9,17 +9,18 @@ import java.util.HashMap; import java.util.Iterator; -import org.h2.command.dml.Select; -import org.h2.command.dml.SelectGroups; -import org.h2.engine.Session; +import org.h2.command.query.Select; +import org.h2.command.query.SelectGroups; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; +import org.h2.expression.ValueExpression; import org.h2.message.DbException; import org.h2.table.ColumnResolver; import org.h2.table.TableFilter; import org.h2.value.TypeInfo; import org.h2.value.Value; +import org.h2.value.ValueBigint; import org.h2.value.ValueDouble; -import org.h2.value.ValueLong; import org.h2.value.ValueNull; /** @@ -148,12 +149,12 @@ public boolean isAggregate() { } @Override - protected void updateAggregate(Session session, SelectGroups groupData, int groupRowId) { + protected void updateAggregate(SessionLocal session, SelectGroups groupData, int groupRowId) { updateOrderedAggregate(session, groupData, groupRowId, over.getOrderBy()); } @Override - protected void updateGroupAggregates(Session session, int stage) { + protected void updateGroupAggregates(SessionLocal session, int stage) { super.updateGroupAggregates(session, stage); if (args != null) { for (Expression expr : args) { @@ -168,7 +169,7 @@ protected int getNumExpressions() { } @Override - protected void rememberExpressions(Session session, Value[] array) { + protected void rememberExpressions(SessionLocal session, Value[] array) { if (args != null) { for (int i = 0, cnt = args.length; i < cnt; i++) { array[i] = args[i].getValue(session); @@ -182,12 +183,12 @@ protected Object createAggregateData() { } @Override - protected void getOrderedResultLoop(Session session, HashMap result, ArrayList ordered, - int rowIdColumn) { + protected void getOrderedResultLoop(SessionLocal session, HashMap result, + ArrayList ordered, int rowIdColumn) { switch (type) { case ROW_NUMBER: for (int i = 0, size = ordered.size(); i < size;) { - result.put(ordered.get(i)[rowIdColumn].getInt(), ValueLong.get(++i)); + result.put(ordered.get(i)[rowIdColumn].getInt(), ValueBigint.get(++i)); } break; case RANK: @@ -203,7 +204,7 @@ protected void getOrderedResultLoop(Session session, HashMap res break; case LEAD: case LAG: - getLeadLag(result, ordered, rowIdColumn); + getLeadLag(result, ordered, rowIdColumn, session); break; case FIRST_VALUE: case LAST_VALUE: @@ -214,7 +215,7 @@ protected void getOrderedResultLoop(Session session, HashMap res getRatioToReport(result, ordered, rowIdColumn); break; default: - throw DbException.throwInternalError("type=" + type); + throw DbException.getInternalError("type=" + type); } } @@ -237,7 +238,7 @@ private void getRank(HashMap result, ArrayList ordered, int nm = number - 1; v = nm == 0 ? ValueDouble.ZERO : ValueDouble.get((double) nm / (size - 1)); } else { - v = ValueLong.get(number); + v = ValueBigint.get(number); } result.put(row[rowIdColumn].getInt(), v); } @@ -277,14 +278,15 @@ private static void getNtile(HashMap result, ArrayList } else { v = i / (perTile + 1) + 1; } - result.put(orderedData.get(i)[rowIdColumn].getInt(), ValueLong.get(v)); + result.put(orderedData.get(i)[rowIdColumn].getInt(), ValueBigint.get(v)); } } - private void getLeadLag(HashMap result, ArrayList ordered, int rowIdColumn) { + private void getLeadLag(HashMap result, ArrayList ordered, int rowIdColumn, + SessionLocal session) { int size = ordered.size(); int numExpressions = getNumExpressions(); - int dataType = args[0].getType().getValueType(); + TypeInfo dataType = args[0].getType(); for (int i = 0; i < size; i++) { Value[] row = ordered.get(i); int rowId = row[rowIdColumn].getInt(); @@ -336,7 +338,7 @@ private void getLeadLag(HashMap result, ArrayList order } if (v == null) { if (numExpressions >= 3) { - v = row[2].convertTo(dataType); + v = row[2].convertTo(dataType, session); } else { v = ValueNull.INSTANCE; } @@ -345,7 +347,8 @@ private void getLeadLag(HashMap result, ArrayList order } } - private void getNth(Session session, HashMap result, ArrayList ordered, int rowIdColumn) { + private void getNth(SessionLocal session, HashMap result, ArrayList ordered, + int rowIdColumn) { int size = ordered.size(); for (int i = 0; i < size; i++) { Value[] row = ordered.get(i); @@ -372,7 +375,7 @@ private void getNth(Session session, HashMap result, ArrayList diff --git a/h2/src/main/org/h2/expression/condition/BetweenPredicate.java b/h2/src/main/org/h2/expression/condition/BetweenPredicate.java new file mode 100644 index 0000000000..b5b7b11f4d --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/BetweenPredicate.java @@ -0,0 +1,207 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.TypedValueExpression; +import org.h2.expression.ValueExpression; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * BETWEEN predicate. + */ +public final class BetweenPredicate extends Condition { + + private Expression left; + + private final boolean not; + + private final boolean whenOperand; + + private boolean symmetric; + + private Expression a, b; + + public BetweenPredicate(Expression left, boolean not, boolean whenOperand, boolean symmetric, Expression a, + Expression b) { + this.left = left; + this.not = not; + this.whenOperand = whenOperand; + this.symmetric = symmetric; + this.a = a; + this.b = b; + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return getWhenSQL(left.getSQL(builder, sqlFlags, AUTO_PARENTHESES), sqlFlags); + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + if (not) { + builder.append(" NOT"); + } + builder.append(" BETWEEN "); + if (symmetric) { + builder.append("SYMMETRIC "); + } + a.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append(" AND "); + return b.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + a = a.optimize(session); + b = b.optimize(session); + TypeInfo leftType = left.getType(); + TypeInfo.checkComparable(leftType, a.getType()); + TypeInfo.checkComparable(leftType, b.getType()); + if (whenOperand) { + return this; + } + Value value = left.isConstant() ? left.getValue(session) : null, + aValue = a.isConstant() ? a.getValue(session) : null, + bValue = b.isConstant() ? b.getValue(session) : null; + if (value != null) { + if (value == ValueNull.INSTANCE) { + return TypedValueExpression.UNKNOWN; + } + if (aValue != null && bValue != null) { + return ValueExpression.getBoolean(getValue(session, value, aValue, bValue)); + } + } + if (symmetric) { + if (aValue == ValueNull.INSTANCE || bValue == ValueNull.INSTANCE) { + return TypedValueExpression.UNKNOWN; + } + } else if (aValue == ValueNull.INSTANCE && bValue == ValueNull.INSTANCE) { + return TypedValueExpression.UNKNOWN; + } + if (aValue != null && bValue != null && session.compareWithNull(aValue, bValue, false) == 0) { + return new Comparison(not ? Comparison.NOT_EQUAL : Comparison.EQUAL, left, a, false).optimize(session); + } + return this; + } + + @Override + public Value getValue(SessionLocal session) { + Value value = left.getValue(session); + if (value == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + return getValue(session, value, a.getValue(session), b.getValue(session)); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + if (left == ValueNull.INSTANCE) { + return false; + } + return getValue(session, left, a.getValue(session), b.getValue(session)).isTrue(); + } + + private Value getValue(SessionLocal session, Value value, Value aValue, Value bValue) { + int cmp1 = session.compareWithNull(aValue, value, false); + int cmp2 = session.compareWithNull(value, bValue, false); + if (cmp1 == Integer.MIN_VALUE) { + return symmetric || cmp2 <= 0 ? ValueNull.INSTANCE : ValueBoolean.get(not); + } else if (cmp2 == Integer.MIN_VALUE) { + return symmetric || cmp1 <= 0 ? ValueNull.INSTANCE : ValueBoolean.get(not); + } else { + return ValueBoolean.get(not ^ // + (symmetric ? cmp1 <= 0 && cmp2 <= 0 || cmp1 >= 0 && cmp2 >= 0 : cmp1 <= 0 && cmp2 <= 0)); + } + } + + @Override + public boolean isWhenConditionOperand() { + return whenOperand; + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + return new BetweenPredicate(left, !not, false, symmetric, a, b); + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (!not && !whenOperand && !symmetric) { + Comparison.createIndexConditions(filter, a, left, Comparison.SMALLER_EQUAL); + Comparison.createIndexConditions(filter, left, b, Comparison.SMALLER_EQUAL); + } + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + left.setEvaluatable(tableFilter, value); + a.setEvaluatable(tableFilter, value); + b.setEvaluatable(tableFilter, value); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + left.updateAggregate(session, stage); + a.updateAggregate(session, stage); + b.updateAggregate(session, stage); + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + left.mapColumns(resolver, level, state); + a.mapColumns(resolver, level, state); + b.mapColumns(resolver, level, state); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return left.isEverything(visitor) && a.isEverything(visitor) && b.isEverything(visitor); + } + + @Override + public int getCost() { + return left.getCost() + a.getCost() + b.getCost() + 1; + } + + @Override + public int getSubexpressionCount() { + return 3; + } + + @Override + public Expression getSubexpression(int index) { + switch (index) { + case 0: + return left; + case 1: + return a; + case 2: + return b; + default: + throw new IndexOutOfBoundsException(); + } + } + +} diff --git a/h2/src/main/org/h2/expression/condition/BooleanTest.java b/h2/src/main/org/h2/expression/condition/BooleanTest.java new file mode 100644 index 0000000000..47a07743f0 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/BooleanTest.java @@ -0,0 +1,91 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import java.util.ArrayList; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.expression.TypedValueExpression; +import org.h2.expression.ValueExpression; +import org.h2.index.IndexCondition; +import org.h2.table.TableFilter; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * Boolean test (IS [NOT] { TRUE | FALSE | UNKNOWN }). + */ +public final class BooleanTest extends SimplePredicate { + + private final Boolean right; + + public BooleanTest(Expression left, boolean not, boolean whenOperand, Boolean right) { + super(left, not, whenOperand); + this.right = right; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return getWhenSQL(left.getSQL(builder, sqlFlags, AUTO_PARENTHESES), sqlFlags); + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + return builder.append(not ? " IS NOT " : " IS ").append(right == null ? "UNKNOWN" : right ? "TRUE" : "FALSE"); + } + + @Override + public Value getValue(SessionLocal session) { + return ValueBoolean.get(getValue(left.getValue(session))); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + return getValue(left); + } + + private boolean getValue(Value left) { + return (left == ValueNull.INSTANCE ? right == null : right != null && right == left.getBoolean()) ^ not; + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + return new BooleanTest(left, !not, false, right); + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (whenOperand || !filter.getTable().isQueryComparable()) { + return; + } + if (left instanceof ExpressionColumn) { + ExpressionColumn c = (ExpressionColumn) left; + if (c.getType().getValueType() == Value.BOOLEAN && filter == c.getTableFilter()) { + if (not) { + if (right == null && c.getColumn().isNullable()) { + ArrayList list = new ArrayList<>(2); + list.add(ValueExpression.FALSE); + list.add(ValueExpression.TRUE); + filter.addIndexCondition(IndexCondition.getInList(c, list)); + } + } else { + filter.addIndexCondition(IndexCondition.get(Comparison.EQUAL_NULL_SAFE, c, + right == null ? TypedValueExpression.UNKNOWN : ValueExpression.getBoolean(right))); + } + } + } + } + +} diff --git a/h2/src/main/org/h2/expression/condition/CompareLike.java b/h2/src/main/org/h2/expression/condition/CompareLike.java index db5234f685..e62dbaaa24 100644 --- a/h2/src/main/org/h2/expression/condition/CompareLike.java +++ b/h2/src/main/org/h2/expression/condition/CompareLike.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.condition; @@ -9,10 +9,12 @@ import java.util.regex.PatternSyntaxException; import org.h2.api.ErrorCode; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.expression.ExpressionVisitor; +import org.h2.expression.SearchedCase; +import org.h2.expression.TypedValueExpression; import org.h2.expression.ValueExpression; import org.h2.index.IndexCondition; import org.h2.message.DbException; @@ -20,21 +22,50 @@ import org.h2.table.TableFilter; import org.h2.value.CompareMode; import org.h2.value.DataType; +import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueBoolean; import org.h2.value.ValueNull; -import org.h2.value.ValueString; +import org.h2.value.ValueVarchar; +import org.h2.value.ValueVarcharIgnoreCase; /** * Pattern matching comparison expression: WHERE NAME LIKE ? */ -public class CompareLike extends Condition { +public final class CompareLike extends Condition { + + /** + * The type of comparison. + */ + public enum LikeType { + /** + * LIKE. + */ + LIKE, + + /** + * ILIKE (case-insensitive LIKE). + */ + ILIKE, + + /** + * REGEXP + */ + REGEXP + } private static final int MATCH = 0, ONE = 1, ANY = 2; private final CompareMode compareMode; private final String defaultEscape; + + private final LikeType likeType; private Expression left; + + private final boolean not; + + private final boolean whenOperand; + private Expression right; private Expression escape; @@ -46,7 +77,6 @@ public class CompareLike extends Condition { private int[] patternTypes; private int patternLength; - private final boolean regexp; private Pattern patternRegexp; private boolean ignoreCase; @@ -59,18 +89,19 @@ public class CompareLike extends Condition { /** indicates that we can shortcut the comparison and use contains */ private boolean shortcutToContains; - public CompareLike(Database db, Expression left, Expression right, - Expression escape, boolean regexp) { - this(db.getCompareMode(), db.getSettings().defaultEscape, left, right, - escape, regexp); + public CompareLike(Database db, Expression left, boolean not, boolean whenOperand, Expression right, + Expression escape, LikeType likeType) { + this(db.getCompareMode(), db.getSettings().defaultEscape, left, not, whenOperand, right, escape, likeType); } - public CompareLike(CompareMode compareMode, String defaultEscape, - Expression left, Expression right, Expression escape, boolean regexp) { + public CompareLike(CompareMode compareMode, String defaultEscape, Expression left, boolean not, + boolean whenOperand, Expression right, Expression escape, LikeType likeType) { this.compareMode = compareMode; this.defaultEscape = defaultEscape; - this.regexp = regexp; + this.likeType = likeType; this.left = left; + this.not = not; + this.whenOperand = whenOperand; this.right = right; this.escape = escape; } @@ -80,68 +111,88 @@ private static Character getEscapeChar(String s) { } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - builder.append('('); - if (regexp) { - left.getSQL(builder, alwaysQuote).append(" REGEXP "); - right.getSQL(builder, alwaysQuote); - } else { - left.getSQL(builder, alwaysQuote).append(" LIKE "); - right.getSQL(builder, alwaysQuote); + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return getWhenSQL(left.getSQL(builder, sqlFlags, AUTO_PARENTHESES), sqlFlags); + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + if (not) { + builder.append(" NOT"); + } + switch (likeType) { + case LIKE: + case ILIKE: + builder.append(likeType == LikeType.LIKE ? " LIKE " : " ILIKE "); + right.getSQL(builder, sqlFlags, AUTO_PARENTHESES); if (escape != null) { - builder.append(" ESCAPE "); - escape.getSQL(builder, alwaysQuote); + escape.getSQL(builder.append(" ESCAPE "), sqlFlags, AUTO_PARENTHESES); } + break; + case REGEXP: + builder.append(" REGEXP "); + right.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + break; + default: + throw DbException.getUnsupportedException(likeType.name()); } - return builder.append(')'); + return builder; } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { left = left.optimize(session); right = right.optimize(session); - if (left.getType().getValueType() == Value.STRING_IGNORECASE) { + if (likeType == LikeType.ILIKE || left.getType().getValueType() == Value.VARCHAR_IGNORECASE) { ignoreCase = true; } + if (escape != null) { + escape = escape.optimize(session); + } + if (whenOperand) { + return this; + } if (left.isValueSet()) { Value l = left.getValue(session); if (l == ValueNull.INSTANCE) { // NULL LIKE something > NULL - return ValueExpression.getNull(); + return TypedValueExpression.UNKNOWN; } } - if (escape != null) { - escape = escape.optimize(session); - } if (right.isValueSet() && (escape == null || escape.isValueSet())) { if (left.isValueSet()) { - return ValueExpression.get(getValue(session)); + return ValueExpression.getBoolean(getValue(session)); } Value r = right.getValue(session); if (r == ValueNull.INSTANCE) { // something LIKE NULL > NULL - return ValueExpression.getNull(); + return TypedValueExpression.UNKNOWN; } Value e = escape == null ? null : escape.getValue(session); if (e == ValueNull.INSTANCE) { - return ValueExpression.getNull(); + return TypedValueExpression.UNKNOWN; } String p = r.getString(); initPattern(p, getEscapeChar(e)); if (invalidPattern) { - return ValueExpression.getNull(); + return TypedValueExpression.UNKNOWN; } - if ("%".equals(p)) { - // optimization for X LIKE '%': convert to X IS NOT NULL - return new Comparison(session, - Comparison.IS_NOT_NULL, left, null).optimize(session); + if (likeType != LikeType.REGEXP && "%".equals(p)) { + // optimization for X LIKE '%' + return new SearchedCase(new Expression[] { new NullPredicate(left, true, false), + ValueExpression.getBoolean(!not), TypedValueExpression.UNKNOWN }).optimize(session); } if (isFullMatch()) { // optimization for X LIKE 'Hello': convert to X = 'Hello' - Value value = ValueString.get(patternString); + Value value = ignoreCase ? ValueVarcharIgnoreCase.get(patternString) : ValueVarchar.get(patternString); Expression expr = ValueExpression.get(value); - return new Comparison(session, - Comparison.EQUAL, left, expr).optimize(session); + return new Comparison(not ? Comparison.NOT_EQUAL : Comparison.EQUAL, left, expr, false) + .optimize(session); } isInit = true; } @@ -167,15 +218,13 @@ private Character getEscapeChar(Value e) { } @Override - public void createIndexConditions(Session session, TableFilter filter) { - if (regexp) { - return; - } - if (!(left instanceof ExpressionColumn)) { + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (not || whenOperand || likeType == LikeType.REGEXP || !(left instanceof ExpressionColumn)) { return; } ExpressionColumn l = (ExpressionColumn) left; - if (filter != l.getTableFilter()) { + if (filter != l.getTableFilter() || !TypeInfo.haveSameOrdering(l.getType(), + ignoreCase ? TypeInfo.TYPE_VARCHAR_IGNORECASE : TypeInfo.TYPE_VARCHAR)) { return; } // parameters are always evaluatable, but @@ -186,8 +235,7 @@ public void createIndexConditions(Session session, TableFilter filter) { if (!right.isEverything(ExpressionVisitor.INDEPENDENT_VISITOR)) { return; } - if (escape != null && - !escape.isEverything(ExpressionVisitor.INDEPENDENT_VISITOR)) { + if (escape != null && !escape.isEverything(ExpressionVisitor.INDEPENDENT_VISITOR)) { return; } String p = right.getValue(session).getString(); @@ -195,7 +243,7 @@ public void createIndexConditions(Session session, TableFilter filter) { Value e = escape == null ? null : escape.getValue(session); if (e == ValueNull.INSTANCE) { // should already be optimized - DbException.throwInternalError(); + throw DbException.getInternalError(); } initPattern(p, getEscapeChar(e)); } @@ -220,7 +268,7 @@ public void createIndexConditions(Session session, TableFilter filter) { String begin = buff.toString(); if (maxMatch == patternLength) { filter.addIndexCondition(IndexCondition.get(Comparison.EQUAL, l, - ValueExpression.get(ValueString.get(begin)))); + ValueExpression.get(ValueVarchar.get(begin)))); } else { // TODO check if this is correct according to Unicode rules // (code points) @@ -228,16 +276,16 @@ public void createIndexConditions(Session session, TableFilter filter) { if (begin.length() > 0) { filter.addIndexCondition(IndexCondition.get( Comparison.BIGGER_EQUAL, l, - ValueExpression.get(ValueString.get(begin)))); + ValueExpression.get(ValueVarchar.get(begin)))); char next = begin.charAt(begin.length() - 1); // search the 'next' unicode character (or at least a character // that is higher) for (int i = 1; i < 2000; i++) { end = begin.substring(0, begin.length() - 1) + (char) (next + i); - if (compareMode.compareString(begin, end, ignoreCase) == -1) { + if (compareMode.compareString(begin, end, ignoreCase) < 0) { filter.addIndexCondition(IndexCondition.get( Comparison.SMALLER, l, - ValueExpression.get(ValueString.get(end)))); + ValueExpression.get(ValueVarchar.get(end)))); break; } } @@ -246,15 +294,26 @@ public void createIndexConditions(Session session, TableFilter filter) { } @Override - public Value getValue(Session session) { - Value l = left.getValue(session); - if (l == ValueNull.INSTANCE) { - return l; + public Value getValue(SessionLocal session) { + return getValue(session, left.getValue(session)); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + return getValue(session, left).isTrue(); + } + + private Value getValue(SessionLocal session, Value left) { + if (left == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; } if (!isInit) { Value r = right.getValue(session); if (r == ValueNull.INSTANCE) { - return r; + return ValueNull.INSTANCE; } String p = r.getString(); Value e = escape == null ? null : escape.getValue(session); @@ -266,9 +325,9 @@ public Value getValue(Session session) { if (invalidPattern) { return ValueNull.INSTANCE; } - String value = l.getString(); + String value = left.getString(); boolean result; - if (regexp) { + if (likeType == LikeType.REGEXP) { result = patternRegexp.matcher(value).find(); } else if (shortcutToStartsWith) { result = value.regionMatches(ignoreCase, 0, patternString, 0, patternLength - 1); @@ -285,7 +344,7 @@ public Value getValue(Session session) { } else { result = compareAt(value, 0, 0, value.length(), patternChars, patternTypes); } - return ValueBoolean.get(result); + return ValueBoolean.get(not ^ result); } private static boolean containsIgnoreCase(String src, String what) { @@ -339,7 +398,7 @@ private boolean compareAt(String s, int pi, int si, int sLen, } return false; default: - DbException.throwInternalError(Integer.toString(types[pi])); + throw DbException.getInternalError(Integer.toString(types[pi])); } } return si == sLen; @@ -351,6 +410,11 @@ private boolean compare(char[] pattern, String s, int pi, int si) { si, ignoreCase)); } + @Override + public boolean isWhenConditionOperand() { + return whenOperand; + } + /** * Test if the value matches the pattern. * @@ -361,17 +425,33 @@ private boolean compare(char[] pattern, String s, int pi, int si) { */ public boolean test(String testPattern, String value, char escapeChar) { initPattern(testPattern, escapeChar); + return test(value); + } + + /** + * Test if the value matches the initialized pattern. + * + * @param value the value + * @return true if the value matches + */ + public boolean test(String value) { if (invalidPattern) { return false; } return compareAt(value, 0, 0, value.length(), patternChars, patternTypes); } - private void initPattern(String p, Character escapeChar) { + /** + * Initializes the pattern. + * + * @param p the pattern + * @param escapeChar the escape character + */ + public void initPattern(String p, Character escapeChar) { if (compareMode.getName().equals(CompareMode.OFF) && !ignoreCase) { fastCompare = true; } - if (regexp) { + if (likeType == LikeType.REGEXP) { patternString = p; try { if (ignoreCase) { @@ -483,6 +563,14 @@ private boolean isFullMatch() { return true; } + @Override + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + return new CompareLike(compareMode, defaultEscape, left, !not, false, right, escape, likeType); + } + @Override public void mapColumns(ColumnResolver resolver, int level, int state) { left.mapColumns(resolver, level, state); @@ -502,7 +590,7 @@ public void setEvaluatable(TableFilter tableFilter, boolean b) { } @Override - public void updateAggregate(Session session, int stage) { + public void updateAggregate(SessionLocal session, int stage) { left.updateAggregate(session, stage); right.updateAggregate(session, stage); if (escape != null) { diff --git a/h2/src/main/org/h2/expression/condition/Comparison.java b/h2/src/main/org/h2/expression/condition/Comparison.java index e3421e666a..666f4063d7 100644 --- a/h2/src/main/org/h2/expression/condition/Comparison.java +++ b/h2/src/main/org/h2/expression/condition/Comparison.java @@ -1,18 +1,17 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.condition; import java.util.ArrayList; -import org.h2.api.ErrorCode; -import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.expression.ExpressionVisitor; import org.h2.expression.Parameter; +import org.h2.expression.TypedValueExpression; import org.h2.expression.ValueExpression; import org.h2.expression.aggregate.Aggregate; import org.h2.expression.aggregate.AggregateType; @@ -21,10 +20,10 @@ import org.h2.table.Column; import org.h2.table.ColumnResolver; import org.h2.table.TableFilter; +import org.h2.value.DataType; import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueBoolean; -import org.h2.value.ValueGeometry; import org.h2.value.ValueNull; /** @@ -34,14 +33,7 @@ * @author Noel Grandin * @author Nicolas Fortin, Atelier SIG, IRSTV FR CNRS 24888 */ -public class Comparison extends Condition { - - /** - * This is a flag meaning the comparison is null safe (meaning never returns - * NULL even if one operand is NULL). Only EQUAL and NOT_EQUAL are supported - * currently. - */ - public static final int NULL_SAFE = 16; +public final class Comparison extends Condition { /** * The comparison type meaning = as in ID=1. @@ -49,261 +41,213 @@ public class Comparison extends Condition { public static final int EQUAL = 0; /** - * The comparison type meaning ID IS 1 (ID IS NOT DISTINCT FROM 1). + * The comparison type meaning <> as in ID<>1. */ - public static final int EQUAL_NULL_SAFE = EQUAL | NULL_SAFE; + public static final int NOT_EQUAL = 1; /** - * The comparison type meaning >= as in ID>=1. + * The comparison type meaning < as in ID<1. */ - public static final int BIGGER_EQUAL = 1; + public static final int SMALLER = 2; /** * The comparison type meaning > as in ID>1. */ - public static final int BIGGER = 2; + public static final int BIGGER = 3; /** * The comparison type meaning <= as in ID<=1. */ - public static final int SMALLER_EQUAL = 3; + public static final int SMALLER_EQUAL = 4; /** - * The comparison type meaning < as in ID<1. + * The comparison type meaning >= as in ID>=1. */ - public static final int SMALLER = 4; + public static final int BIGGER_EQUAL = 5; /** - * The comparison type meaning <> as in ID<>1. + * The comparison type meaning ID IS NOT DISTINCT FROM 1. */ - public static final int NOT_EQUAL = 5; + public static final int EQUAL_NULL_SAFE = 6; /** - * The comparison type meaning ID IS NOT 1 (ID IS DISTINCT FROM 1). + * The comparison type meaning ID IS DISTINCT FROM 1. */ - public static final int NOT_EQUAL_NULL_SAFE = NOT_EQUAL | NULL_SAFE; + public static final int NOT_EQUAL_NULL_SAFE = 7; /** - * The comparison type meaning IS NULL as in NAME IS NULL. + * This is a comparison type that is only used for spatial index + * conditions (operator "&&"). */ - public static final int IS_NULL = 6; + public static final int SPATIAL_INTERSECTS = 8; - /** - * The comparison type meaning IS NOT NULL as in NAME IS NOT NULL. - */ - public static final int IS_NOT_NULL = 7; + static final String[] COMPARE_TYPES = { "=", "<>", "<", ">", "<=", ">=", // + "IS NOT DISTINCT FROM", "IS DISTINCT FROM", // + "&&" }; /** * This is a pseudo comparison type that is only used for index conditions. * It means the comparison will always yield FALSE. Example: 1=0. */ - public static final int FALSE = 8; + public static final int FALSE = 9; /** * This is a pseudo comparison type that is only used for index conditions. * It means equals any value of a list. Example: IN(1, 2, 3). */ - public static final int IN_LIST = 9; + public static final int IN_LIST = 10; /** * This is a pseudo comparison type that is only used for index conditions. * It means equals any value of a list. Example: IN(SELECT ...). */ - public static final int IN_QUERY = 10; - - /** - * This is a comparison type that is only used for spatial index - * conditions (operator "&&"). - */ - public static final int SPATIAL_INTERSECTS = 11; + public static final int IN_QUERY = 11; - private final Database database; private int compareType; private Expression left; private Expression right; + private final boolean whenOperand; - public Comparison(Session session, int compareType, Expression left, - Expression right) { - this.database = session.getDatabase(); + public Comparison(int compareType, Expression left, Expression right, boolean whenOperand) { this.left = left; this.right = right; this.compareType = compareType; + this.whenOperand = whenOperand; } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - boolean encloseRight = false; - builder.append('('); - switch (compareType) { - case IS_NULL: - left.getSQL(builder, alwaysQuote).append(" IS NULL"); - break; - case IS_NOT_NULL: - left.getSQL(builder, alwaysQuote).append(" IS NOT NULL"); - break; - case SPATIAL_INTERSECTS: - builder.append("INTERSECTS("); - left.getSQL(builder, alwaysQuote).append(", "); - right.getSQL(builder, alwaysQuote).append(')'); - break; - case EQUAL: - case BIGGER_EQUAL: - case BIGGER: - case SMALLER_EQUAL: - case SMALLER: - case NOT_EQUAL: - if (right instanceof Aggregate && ((Aggregate) right).getAggregateType() == AggregateType.ANY) { - encloseRight = true; - } - //$FALL-THROUGH$ - default: - left.getSQL(builder, alwaysQuote).append(' ').append(getCompareOperator(compareType)).append(' '); - if (encloseRight) { - builder.append('('); - } - right.getSQL(builder, alwaysQuote); - if (encloseRight) { - builder.append(')'); - } - } - return builder.append(')'); + public boolean needParentheses() { + return true; } - /** - * Get the comparison operator string ("=", ">",...). - * - * @param compareType the compare type - * @return the string - */ - static String getCompareOperator(int compareType) { - switch (compareType) { - case EQUAL: - return "="; - case EQUAL_NULL_SAFE: - return "IS"; - case BIGGER_EQUAL: - return ">="; - case BIGGER: - return ">"; - case SMALLER_EQUAL: - return "<="; - case SMALLER: - return "<"; - case NOT_EQUAL: - return "<>"; - case NOT_EQUAL_NULL_SAFE: - return "IS NOT"; - case SPATIAL_INTERSECTS: - return "&&"; - default: - throw DbException.throwInternalError("compareType=" + compareType); - } + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return getWhenSQL(left.getSQL(builder, sqlFlags, AUTO_PARENTHESES), sqlFlags); + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + builder.append(' ').append(COMPARE_TYPES[compareType]).append(' '); + return right.getSQL(builder, sqlFlags, + right instanceof Aggregate && ((Aggregate) right).getAggregateType() == AggregateType.ANY + ? WITH_PARENTHESES + : AUTO_PARENTHESES); } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { left = left.optimize(session); - if (right != null) { - right = right.optimize(session); - // TODO check row values too - if (right.getType().getValueType() == Value.ARRAY && left.getType().getValueType() != Value.ARRAY) { - throw DbException.get(ErrorCode.COMPARING_ARRAY_TO_SCALAR); - } - if (right instanceof ExpressionColumn) { - if (left.isConstant() || left instanceof Parameter) { - Expression temp = left; - left = right; - right = temp; - compareType = getReversedCompareType(compareType); - } - } - if (left instanceof ExpressionColumn) { - if (right.isConstant()) { - Value r = right.getValue(session); - if (r == ValueNull.INSTANCE) { - if ((compareType & NULL_SAFE) == 0) { - return ValueExpression.getNull(); - } - } - TypeInfo colType = left.getType(), constType = r.getType(); - int constValueType = constType.getValueType(); - if (constValueType != colType.getValueType()) { - TypeInfo resType = Value.getHigherType(colType, constType); - // If not, the column values will need to be promoted - // to constant type, but vise versa, then let's do this here - // once. - if (constValueType != resType.getValueType()) { - Column column = ((ExpressionColumn) left).getColumn(); - right = ValueExpression.get(r.convertTo(resType, session.getDatabase().getMode(), column)); + right = right.optimize(session); + check: { + TypeInfo leftType = left.getType(), rightType = right.getType(); + if (session.getMode().numericWithBooleanComparison) { + switch (compareType) { + case EQUAL: + case NOT_EQUAL: + case EQUAL_NULL_SAFE: + case NOT_EQUAL_NULL_SAFE: + int lValueType = leftType.getValueType(); + if (lValueType == Value.BOOLEAN) { + if (DataType.isNumericType(rightType.getValueType())) { + break check; } + } else if (DataType.isNumericType(lValueType) && rightType.getValueType() == Value.BOOLEAN) { + break check; } - } else if (right instanceof Parameter) { - ((Parameter) right).setColumn( - ((ExpressionColumn) left).getColumn()); } } + TypeInfo.checkComparable(leftType, rightType); } - if (compareType == IS_NULL || compareType == IS_NOT_NULL) { - if (left.isConstant()) { - return ValueExpression.get(getValue(session)); - } - } else { - if (left == null || right == null) { - DbException.throwInternalError(left + " " + right); + if (whenOperand) { + return this; + } + if (right instanceof ExpressionColumn) { + if (left.isConstant() || left instanceof Parameter) { + Expression temp = left; + left = right; + right = temp; + compareType = getReversedCompareType(compareType); } - if (left == ValueExpression.getNull() || - right == ValueExpression.getNull()) { - // TODO NULL handling: maybe issue a warning when comparing with - // a NULL constants - if ((compareType & NULL_SAFE) == 0) { - return ValueExpression.getNull(); + } + if (left instanceof ExpressionColumn) { + if (right.isConstant()) { + Value r = right.getValue(session); + if (r == ValueNull.INSTANCE) { + if ((compareType & ~1) != EQUAL_NULL_SAFE) { + return TypedValueExpression.UNKNOWN; + } + } + TypeInfo colType = left.getType(), constType = r.getType(); + int constValueType = constType.getValueType(); + if (constValueType != colType.getValueType() || constValueType >= Value.ARRAY) { + TypeInfo resType = TypeInfo.getHigherType(colType, constType); + // If not, the column values will need to be promoted + // to constant type, but vise versa, then let's do this here + // once. + if (constValueType != resType.getValueType() || constValueType >= Value.ARRAY) { + Column column = ((ExpressionColumn) left).getColumn(); + right = ValueExpression.get(r.convertTo(resType, session, column)); + } } + } else if (right instanceof Parameter) { + ((Parameter) right).setColumn(((ExpressionColumn) left).getColumn()); } - if (left.isConstant() && right.isConstant()) { - return ValueExpression.get(getValue(session)); + } + if (left.isConstant() && right.isConstant()) { + return ValueExpression.getBoolean(getValue(session)); + } + if (left.isNullConstant() || right.isNullConstant()) { + // TODO NULL handling: maybe issue a warning when comparing with + // a NULL constants + if ((compareType & ~1) != EQUAL_NULL_SAFE) { + return TypedValueExpression.UNKNOWN; + } else { + Expression e = left.isNullConstant() ? right : left; + int type = e.getType().getValueType(); + if (type != Value.UNKNOWN && type != Value.ROW) { + return new NullPredicate(e, compareType == NOT_EQUAL_NULL_SAFE, false); + } } } return this; } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { Value l = left.getValue(session); - if (right == null) { - boolean result; - switch (compareType) { - case IS_NULL: - result = l == ValueNull.INSTANCE; - break; - case IS_NOT_NULL: - result = l != ValueNull.INSTANCE; - break; - default: - throw DbException.throwInternalError("type=" + compareType); - } - return ValueBoolean.get(result); - } // Optimization: do not evaluate right if not necessary - if (l == ValueNull.INSTANCE && (compareType & NULL_SAFE) == 0) { + if (l == ValueNull.INSTANCE && (compareType & ~1) != EQUAL_NULL_SAFE) { return ValueNull.INSTANCE; } - return compare(database, l, right.getValue(session), compareType); + return compare(session, l, right.getValue(session), compareType); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + // Optimization: do not evaluate right if not necessary + if (left == ValueNull.INSTANCE && (compareType & ~1) != EQUAL_NULL_SAFE) { + return false; + } + return compare(session, left, right.getValue(session), compareType).isTrue(); } /** * Compare two values. * - * @param database the database + * @param session the session * @param l the first value * @param r the second value * @param compareType the compare type * @return result of comparison, either TRUE, FALSE, or NULL */ - static Value compare(Database database, Value l, Value r, int compareType) { + static Value compare(SessionLocal session, Value l, Value r, int compareType) { Value result; switch (compareType) { case EQUAL: { - int cmp = database.compareWithNull(l, r, true); + int cmp = session.compareWithNull(l, r, true); if (cmp == 0) { result = ValueBoolean.TRUE; } else if (cmp == Integer.MIN_VALUE) { @@ -314,10 +258,10 @@ static Value compare(Database database, Value l, Value r, int compareType) { break; } case EQUAL_NULL_SAFE: - result = ValueBoolean.get(database.areEqual(l, r)); + result = ValueBoolean.get(session.areEqual(l, r)); break; case NOT_EQUAL: { - int cmp = database.compareWithNull(l, r, true); + int cmp = session.compareWithNull(l, r, true); if (cmp == 0) { result = ValueBoolean.FALSE; } else if (cmp == Integer.MIN_VALUE) { @@ -328,10 +272,10 @@ static Value compare(Database database, Value l, Value r, int compareType) { break; } case NOT_EQUAL_NULL_SAFE: - result = ValueBoolean.get(!database.areEqual(l, r)); + result = ValueBoolean.get(!session.areEqual(l, r)); break; case BIGGER_EQUAL: { - int cmp = database.compareWithNull(l, r, false); + int cmp = session.compareWithNull(l, r, false); if (cmp >= 0) { result = ValueBoolean.TRUE; } else if (cmp == Integer.MIN_VALUE) { @@ -342,7 +286,7 @@ static Value compare(Database database, Value l, Value r, int compareType) { break; } case BIGGER: { - int cmp = database.compareWithNull(l, r, false); + int cmp = session.compareWithNull(l, r, false); if (cmp > 0) { result = ValueBoolean.TRUE; } else if (cmp == Integer.MIN_VALUE) { @@ -353,7 +297,7 @@ static Value compare(Database database, Value l, Value r, int compareType) { break; } case SMALLER_EQUAL: { - int cmp = database.compareWithNull(l, r, false); + int cmp = session.compareWithNull(l, r, false); if (cmp == Integer.MIN_VALUE) { result = ValueNull.INSTANCE; } else { @@ -362,7 +306,7 @@ static Value compare(Database database, Value l, Value r, int compareType) { break; } case SMALLER: { - int cmp = database.compareWithNull(l, r, false); + int cmp = session.compareWithNull(l, r, false); if (cmp == Integer.MIN_VALUE) { result = ValueNull.INSTANCE; } else { @@ -374,20 +318,23 @@ static Value compare(Database database, Value l, Value r, int compareType) { if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { result = ValueNull.INSTANCE; } else { - ValueGeometry lg = (ValueGeometry) l.convertTo(Value.GEOMETRY); - ValueGeometry rg = (ValueGeometry) r.convertTo(Value.GEOMETRY); - result = ValueBoolean.get(lg.intersectsBoundingBox(rg)); + result = ValueBoolean.get(l.convertToGeometry(null).intersectsBoundingBox(r.convertToGeometry(null))); } break; } default: - throw DbException.throwInternalError("type=" + compareType); + throw DbException.getInternalError("type=" + compareType); } return result; } - private int getReversedCompareType(int type) { - switch (compareType) { + @Override + public boolean isWhenConditionOperand() { + return whenOperand; + } + + private static int getReversedCompareType(int type) { + switch (type) { case EQUAL: case EQUAL_NULL_SAFE: case NOT_EQUAL: @@ -403,17 +350,17 @@ private int getReversedCompareType(int type) { case SMALLER: return BIGGER; default: - throw DbException.throwInternalError("type=" + compareType); + throw DbException.getInternalError("type=" + type); } } @Override - public Expression getNotIfPossible(Session session) { - if (compareType == SPATIAL_INTERSECTS) { + public Expression getNotIfPossible(SessionLocal session) { + if (compareType == SPATIAL_INTERSECTS || whenOperand) { return null; } int type = getNotCompareType(); - return new Comparison(session, type, left, right); + return new Comparison(type, left, right, false); } private int getNotCompareType() { @@ -434,17 +381,19 @@ private int getNotCompareType() { return BIGGER; case SMALLER: return BIGGER_EQUAL; - case IS_NULL: - return IS_NOT_NULL; - case IS_NOT_NULL: - return IS_NULL; default: - throw DbException.throwInternalError("type=" + compareType); + throw DbException.getInternalError("type=" + compareType); } } @Override - public void createIndexConditions(Session session, TableFilter filter) { + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (!whenOperand) { + createIndexConditions(filter, left, right, compareType); + } + } + + static void createIndexConditions(TableFilter filter, Expression left, Expression right, int compareType) { if (!filter.getTable().isQueryComparable()) { return; } @@ -455,16 +404,6 @@ public void createIndexConditions(Session session, TableFilter filter) { l = null; } } - if (right == null) { - if (l != null) { - switch (compareType) { - case IS_NULL: - filter.addIndexCondition( - IndexCondition.get(Comparison.EQUAL_NULL_SAFE, l, ValueExpression.getNull())); - } - } - return; - } ExpressionColumn r = null; if (right instanceof ExpressionColumn) { r = (ExpressionColumn) right; @@ -473,34 +412,21 @@ public void createIndexConditions(Session session, TableFilter filter) { } } // one side must be from the current filter - if (l == null && r == null) { - return; - } - if (l != null && r != null) { + if ((l == null) == (r == null)) { return; } if (l == null) { - ExpressionVisitor visitor = - ExpressionVisitor.getNotFromResolverVisitor(filter); - if (!left.isEverything(visitor)) { + if (!left.isEverything(ExpressionVisitor.getNotFromResolverVisitor(filter))) { return; } - } else if (r == null) { - ExpressionVisitor visitor = - ExpressionVisitor.getNotFromResolverVisitor(filter); - if (!right.isEverything(visitor)) { + } else { // r == null + if (!right.isEverything(ExpressionVisitor.getNotFromResolverVisitor(filter))) { return; } - } else { - // if both sides are part of the same filter, it can't be used for - // index lookup - return; } - boolean addIndex; switch (compareType) { case NOT_EQUAL: case NOT_EQUAL_NULL_SAFE: - addIndex = false; break; case EQUAL: case EQUAL_NULL_SAFE: @@ -509,26 +435,21 @@ public void createIndexConditions(Session session, TableFilter filter) { case SMALLER_EQUAL: case SMALLER: case SPATIAL_INTERSECTS: - addIndex = true; - break; - default: - throw DbException.throwInternalError("type=" + compareType); - } - if (addIndex) { if (l != null) { - int rType = right.getType().getValueType(); - if (l.getType().getValueType() == rType || rType != Value.STRING_IGNORECASE) { - filter.addIndexCondition( - IndexCondition.get(compareType, l, right)); + TypeInfo colType = l.getType(); + if (TypeInfo.haveSameOrdering(colType, TypeInfo.getHigherType(colType, right.getType()))) { + filter.addIndexCondition(IndexCondition.get(compareType, l, right)); } - } else if (r != null) { - int lType = left.getType().getValueType(); - if (r.getType().getValueType() == lType || lType != Value.STRING_IGNORECASE) { - int compareRev = getReversedCompareType(compareType); - filter.addIndexCondition( - IndexCondition.get(compareRev, r, left)); + } else { + @SuppressWarnings("null") + TypeInfo colType = r.getType(); + if (TypeInfo.haveSameOrdering(colType, TypeInfo.getHigherType(colType, left.getType()))) { + filter.addIndexCondition(IndexCondition.get(getReversedCompareType(compareType), r, left)); } } + break; + default: + throw DbException.getInternalError("type=" + compareType); } } @@ -541,44 +462,27 @@ public void setEvaluatable(TableFilter tableFilter, boolean b) { } @Override - public void updateAggregate(Session session, int stage) { + public void updateAggregate(SessionLocal session, int stage) { left.updateAggregate(session, stage); if (right != null) { right.updateAggregate(session, stage); } } - @Override - public void addFilterConditions(TableFilter filter, boolean outerJoin) { - if (compareType == IS_NULL && outerJoin) { - // can not optimize: - // select * from test t1 left join test t2 on t1.id = t2.id - // where t2.id is null - // to - // select * from test t1 left join test t2 - // on t1.id = t2.id and t2.id is null - return; - } - super.addFilterConditions(filter, outerJoin); - } - @Override public void mapColumns(ColumnResolver resolver, int level, int state) { left.mapColumns(resolver, level, state); - if (right != null) { - right.mapColumns(resolver, level, state); - } + right.mapColumns(resolver, level, state); } @Override public boolean isEverything(ExpressionVisitor visitor) { - return left.isEverything(visitor) && - (right == null || right.isEverything(visitor)); + return left.isEverything(visitor) && right.isEverything(visitor); } @Override public int getCost() { - return left.getCost() + (right == null ? 0 : right.getCost()) + 1; + return left.getCost() + right.getCost() + 1; } /** @@ -590,10 +494,10 @@ public int getCost() { */ Expression getIfEquals(Expression match) { if (compareType == EQUAL) { - String sql = match.getSQL(true); - if (left.getSQL(true).equals(sql)) { + String sql = match.getSQL(DEFAULT_SQL_FLAGS); + if (left.getSQL(DEFAULT_SQL_FLAGS).equals(sql)) { return right; - } else if (right.getSQL(true).equals(sql)) { + } else if (right.getSQL(DEFAULT_SQL_FLAGS).equals(sql)) { return left; } } @@ -602,62 +506,82 @@ Expression getIfEquals(Expression match) { /** * Get an additional condition if possible. Example: given two conditions - * A=B AND B=C, the new condition A=C is returned. Given the two conditions - * A=1 OR A=2, the new condition A IN(1, 2) is returned. + * A=B AND B=C, the new condition A=C is returned. * * @param session the session * @param other the second condition - * @param and true for AND, false for OR - * @return null or the third condition + * @return null or the third condition for indexes */ - Expression getAdditional(Session session, Comparison other, boolean and) { - if (compareType == other.compareType && compareType == EQUAL) { + Expression getAdditionalAnd(SessionLocal session, Comparison other) { + if (compareType == EQUAL && other.compareType == EQUAL && !whenOperand) { boolean lc = left.isConstant(); boolean rc = right.isConstant(); boolean l2c = other.left.isConstant(); boolean r2c = other.right.isConstant(); - String l = left.getSQL(true); - String l2 = other.left.getSQL(true); - String r = right.getSQL(true); - String r2 = other.right.getSQL(true); - if (and) { - // a=b AND a=c - // must not compare constants. example: NOT(B=2 AND B=3) - if (!(rc && r2c) && l.equals(l2)) { - return new Comparison(session, EQUAL, right, other.right); - } else if (!(rc && l2c) && l.equals(r2)) { - return new Comparison(session, EQUAL, right, other.left); - } else if (!(lc && r2c) && r.equals(l2)) { - return new Comparison(session, EQUAL, left, other.right); - } else if (!(lc && l2c) && r.equals(r2)) { - return new Comparison(session, EQUAL, left, other.left); + String l = left.getSQL(DEFAULT_SQL_FLAGS); + String l2 = other.left.getSQL(DEFAULT_SQL_FLAGS); + String r = right.getSQL(DEFAULT_SQL_FLAGS); + String r2 = other.right.getSQL(DEFAULT_SQL_FLAGS); + // a=b AND a=c + // must not compare constants. example: NOT(B=2 AND B=3) + if (!(rc && r2c) && l.equals(l2)) { + return new Comparison(EQUAL, right, other.right, false); + } else if (!(rc && l2c) && l.equals(r2)) { + return new Comparison(EQUAL, right, other.left, false); + } else if (!(lc && r2c) && r.equals(l2)) { + return new Comparison(EQUAL, left, other.right, false); + } else if (!(lc && l2c) && r.equals(r2)) { + return new Comparison(EQUAL, left, other.left, false); + } + } + return null; + } + + /** + * Replace the OR condition with IN condition if possible. Example: given + * the two conditions A=1 OR A=2, the new condition A IN(1, 2) is returned. + * + * @param session the session + * @param other the second condition + * @return null or the joined IN condition + */ + Expression optimizeOr(SessionLocal session, Comparison other) { + if (compareType == EQUAL && other.compareType == EQUAL) { + Expression left2 = other.left; + Expression right2 = other.right; + String l2 = left2.getSQL(DEFAULT_SQL_FLAGS); + String r2 = right2.getSQL(DEFAULT_SQL_FLAGS); + if (left.isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR)) { + String l = left.getSQL(DEFAULT_SQL_FLAGS); + if (l.equals(l2)) { + return getConditionIn(left, right, right2); + } else if (l.equals(r2)) { + return getConditionIn(left, right, left2); } - } else { - // a=b OR a=c - if (rc && r2c && l.equals(l2)) { - return getConditionIn(session, left, right, other.right); - } else if (rc && l2c && l.equals(r2)) { - return getConditionIn(session, left, right, other.left); - } else if (lc && r2c && r.equals(l2)) { - return getConditionIn(session, right, left, other.right); - } else if (lc && l2c && r.equals(r2)) { - return getConditionIn(session, right, left, other.left); + } + if (right.isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR)) { + String r = right.getSQL(DEFAULT_SQL_FLAGS); + if (r.equals(l2)) { + return getConditionIn(right, left, right2); + } else if (r.equals(r2)) { + return getConditionIn(right, left, left2); } } } return null; } - private static ConditionIn getConditionIn(Session session, Expression left, Expression value1, Expression value2) { + private static ConditionIn getConditionIn(Expression left, Expression value1, + Expression value2) { ArrayList right = new ArrayList<>(2); right.add(value1); right.add(value2); - return new ConditionIn(session.getDatabase(), left, right); + return new ConditionIn(left, false, false, right); } @Override public int getSubexpressionCount() { - return compareType == IS_NULL || compareType == IS_NOT_NULL ? 1 : 2; + return 2; } @Override @@ -666,10 +590,7 @@ public Expression getSubexpression(int index) { case 0: return left; case 1: - if (compareType != IS_NULL && compareType != IS_NOT_NULL) { - return right; - } - //$FALL-THROUGH$ + return right; default: throw new IndexOutOfBoundsException(); } diff --git a/h2/src/main/org/h2/expression/condition/Condition.java b/h2/src/main/org/h2/expression/condition/Condition.java index 3c8c9a7d94..ba3d50991a 100644 --- a/h2/src/main/org/h2/expression/condition/Condition.java +++ b/h2/src/main/org/h2/expression/condition/Condition.java @@ -1,18 +1,35 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.condition; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; +import org.h2.expression.function.CastSpecification; import org.h2.value.TypeInfo; +import org.h2.value.Value; /** * Represents a condition returning a boolean value, or NULL. */ abstract class Condition extends Expression { + /** + * Add a cast around the expression (if necessary) so that the type is boolean. + * + * @param session the session + * @param expression the expression + * @return the new expression + */ + static Expression castToBoolean(SessionLocal session, Expression expression) { + if (expression.getType().getValueType() == Value.BOOLEAN) { + return expression; + } + return new CastSpecification(expression, TypeInfo.TYPE_BOOLEAN); + } + @Override public TypeInfo getType() { return TypeInfo.TYPE_BOOLEAN; diff --git a/h2/src/main/org/h2/expression/condition/ConditionAndOr.java b/h2/src/main/org/h2/expression/condition/ConditionAndOr.java index 075a6fac5e..82dc4fbcb3 100644 --- a/h2/src/main/org/h2/expression/condition/ConditionAndOr.java +++ b/h2/src/main/org/h2/expression/condition/ConditionAndOr.java @@ -1,13 +1,14 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.condition; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionVisitor; +import org.h2.expression.TypedValueExpression; import org.h2.expression.ValueExpression; import org.h2.message.DbException; import org.h2.table.ColumnResolver; @@ -34,19 +35,32 @@ public class ConditionAndOr extends Condition { private final int andOrType; private Expression left, right; + /** + * Additional condition for index only. + */ + private Expression added; + public ConditionAndOr(int andOrType, Expression left, Expression right) { + if (left == null || right == null) { + throw DbException.getInternalError(left + " " + right); + } this.andOrType = andOrType; this.left = left; this.right = right; - if (left == null || right == null) { - DbException.throwInternalError(left + " " + right); - } + } + + int getAndOrType() { + return this.andOrType; } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - builder.append('('); - left.getSQL(builder, alwaysQuote); + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + left.getSQL(builder, sqlFlags, AUTO_PARENTHESES); switch (andOrType) { case AND: builder.append("\n AND "); @@ -55,21 +69,24 @@ public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { builder.append("\n OR "); break; default: - throw DbException.throwInternalError("andOrType=" + andOrType); + throw DbException.getInternalError("andOrType=" + andOrType); } - return right.getSQL(builder, alwaysQuote).append(')'); + return right.getSQL(builder, sqlFlags, AUTO_PARENTHESES); } @Override - public void createIndexConditions(Session session, TableFilter filter) { + public void createIndexConditions(SessionLocal session, TableFilter filter) { if (andOrType == AND) { left.createIndexConditions(session, filter); right.createIndexConditions(session, filter); + if (added != null) { + added.createIndexConditions(session, filter); + } } } @Override - public Expression getNotIfPossible(Session session) { + public Expression getNotIfPossible(SessionLocal session) { // (NOT (A OR B)): (NOT(A) AND NOT(B)) // (NOT (A AND B)): (NOT(A) OR NOT(B)) Expression l = left.getNotIfPossible(session); @@ -85,49 +102,35 @@ public Expression getNotIfPossible(Session session) { } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { Value l = left.getValue(session); Value r; switch (andOrType) { case AND: { - if (l != ValueNull.INSTANCE && !l.getBoolean()) { - return l; - } - r = right.getValue(session); - if (r != ValueNull.INSTANCE && !r.getBoolean()) { - return r; - } - if (l == ValueNull.INSTANCE) { - return l; + if (l.isFalse() || (r = right.getValue(session)).isFalse()) { + return ValueBoolean.FALSE; } - if (r == ValueNull.INSTANCE) { - return r; + if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; } return ValueBoolean.TRUE; } case OR: { - if (l.getBoolean()) { - return l; + if (l.isTrue() || (r = right.getValue(session)).isTrue()) { + return ValueBoolean.TRUE; } - r = right.getValue(session); - if (r.getBoolean()) { - return r; - } - if (l == ValueNull.INSTANCE) { - return l; - } - if (r == ValueNull.INSTANCE) { - return r; + if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; } return ValueBoolean.FALSE; } default: - throw DbException.throwInternalError("type=" + andOrType); + throw DbException.getInternalError("type=" + andOrType); } } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { // NULL handling: see wikipedia, // http://www-cs-students.stanford.edu/~wlam/compsci/sqlnulls left = left.optimize(session); @@ -138,130 +141,146 @@ public Expression optimize(Session session) { left = right; right = t; } - // this optimization does not work in the following case, - // but NOT is optimized before: - // CREATE TABLE TEST(A INT, B INT); - // INSERT INTO TEST VALUES(1, NULL); - // SELECT * FROM TEST WHERE NOT (B=A AND B=0); // no rows - // SELECT * FROM TEST WHERE NOT (B=A AND B=0 AND A=0); // 1, NULL - if (session.getDatabase().getSettings().optimizeTwoEquals && - andOrType == AND) { + switch (andOrType) { + case AND: + if (!session.getDatabase().getSettings().optimizeTwoEquals) { + break; + } + // this optimization does not work in the following case, + // but NOT is optimized before: + // CREATE TABLE TEST(A INT, B INT); + // INSERT INTO TEST VALUES(1, NULL); + // SELECT * FROM TEST WHERE NOT (B=A AND B=0); // no rows + // SELECT * FROM TEST WHERE NOT (B=A AND B=0 AND A=0); // 1, NULL // try to add conditions (A=B AND B=1: add A=1) if (left instanceof Comparison && right instanceof Comparison) { - Comparison compLeft = (Comparison) left; - Comparison compRight = (Comparison) right; - Expression added = compLeft.getAdditional( - session, compRight, true); + // try to add conditions (A=B AND B=1: add A=1) + Expression added = ((Comparison) left).getAdditionalAnd(session, (Comparison) right); if (added != null) { - added = added.optimize(session); - return new ConditionAndOr(AND, this, added); + this.added = added.optimize(session); } } + break; + case OR: + if (!session.getDatabase().getSettings().optimizeOr) { + break; + } + Expression reduced; + if (left instanceof Comparison && right instanceof Comparison) { + reduced = ((Comparison) left).optimizeOr(session, (Comparison) right); + } else if (left instanceof ConditionIn && right instanceof Comparison) { + reduced = ((ConditionIn) left).getAdditional((Comparison) right); + } else if (right instanceof ConditionIn && left instanceof Comparison) { + reduced = ((ConditionIn) right).getAdditional((Comparison) left); + } else if (left instanceof ConditionInConstantSet && right instanceof Comparison) { + reduced = ((ConditionInConstantSet) left).getAdditional(session, (Comparison) right); + } else if (right instanceof ConditionInConstantSet && left instanceof Comparison) { + reduced = ((ConditionInConstantSet) right).getAdditional(session, (Comparison) left); + } else if (left instanceof ConditionAndOr && right instanceof ConditionAndOr) { + reduced = optimizeConditionAndOr((ConditionAndOr)left, (ConditionAndOr)right); + } else { + // TODO optimization: convert .. OR .. to UNION if the cost is lower + break; + } + if (reduced != null) { + return reduced.optimize(session); + } + } + Expression e = optimizeIfConstant(session, andOrType, left, right); + if (e == null) { + return optimizeN(this); + } + if (e instanceof ConditionAndOr) { + return optimizeN((ConditionAndOr) e); } + return e; + } - if (andOrType == OR && - session.getDatabase().getSettings().optimizeOr) { - // try to add conditions (A=B AND B=1: add A=1) - if (left instanceof Comparison && - right instanceof Comparison) { - Comparison compLeft = (Comparison) left; - Comparison compRight = (Comparison) right; - Expression added = compLeft.getAdditional( - session, compRight, false); - if (added != null) { - return added.optimize(session); - } - } else if (left instanceof ConditionIn && - right instanceof Comparison) { - Expression added = ((ConditionIn) left). - getAdditional((Comparison) right); - if (added != null) { - return added.optimize(session); - } - } else if (right instanceof ConditionIn && - left instanceof Comparison) { - Expression added = ((ConditionIn) right). - getAdditional((Comparison) left); - if (added != null) { - return added.optimize(session); - } - } else if (left instanceof ConditionInConstantSet && - right instanceof Comparison) { - Expression added = ((ConditionInConstantSet) left). - getAdditional(session, (Comparison) right); - if (added != null) { - return added.optimize(session); - } - } else if (right instanceof ConditionInConstantSet && - left instanceof Comparison) { - Expression added = ((ConditionInConstantSet) right). - getAdditional(session, (Comparison) left); - if (added != null) { - return added.optimize(session); - } - } else if (left instanceof ConditionAndOr && - right instanceof ConditionAndOr ){ - ConditionAndOr condAORight = (ConditionAndOr)right; - ConditionAndOr condAORLeft = (ConditionAndOr)left; - Expression reduced = optimizeConditionAndOr(condAORLeft,condAORight); - if(reduced != null){ - return reduced.optimize(session); - } + private static Expression optimizeN(ConditionAndOr condition) { + if (condition.right instanceof ConditionAndOr) { + ConditionAndOr rightCondition = (ConditionAndOr) condition.right; + if (rightCondition.andOrType == condition.andOrType) { + return new ConditionAndOrN(condition.andOrType, condition.left, rightCondition.left, + rightCondition.right); } } - // TODO optimization: convert .. OR .. to UNION if the cost is lower - Value l = left.isConstant() ? left.getValue(session) : null; - Value r = right.isConstant() ? right.getValue(session) : null; - if (l == null && r == null) { - return this; + if (condition.right instanceof ConditionAndOrN) { + ConditionAndOrN rightCondition = (ConditionAndOrN) condition.right; + if (rightCondition.getAndOrType() == condition.andOrType) { + rightCondition.addFirst(condition.left); + return rightCondition; + } + } + return condition; + } + + /** + * Optimize the condition if at least one part is constant. + * + * @param session the session + * @param andOrType the type + * @param left the left part of the condition + * @param right the right part of the condition + * @return the optimized condition, or {@code null} if condition cannot be optimized + */ + static Expression optimizeIfConstant(SessionLocal session, int andOrType, Expression left, Expression right) { + if (!left.isConstant()) { + if (!right.isConstant()) { + return null; + } else { + return optimizeConstant(session, andOrType, right.getValue(session), left); + } } - if (l != null && r != null) { - return ValueExpression.get(getValue(session)); + Value l = left.getValue(session); + if (!right.isConstant()) { + return optimizeConstant(session, andOrType, l, right); } + Value r = right.getValue(session); switch (andOrType) { - case AND: - if (l != null) { - if (l != ValueNull.INSTANCE && !l.getBoolean()) { - return ValueExpression.get(l); - } else if (l.getBoolean()) { - return right; - } - } else if (r != null) { - if (r != ValueNull.INSTANCE && !r.getBoolean()) { - return ValueExpression.get(r); - } else if (r.getBoolean()) { - return left; - } + case AND: { + if (l.isFalse() || r.isFalse()) { + return ValueExpression.FALSE; } - break; - case OR: - if (l != null) { - if (l.getBoolean()) { - return ValueExpression.get(l); - } else if (l != ValueNull.INSTANCE) { - return right; - } - } else if (r != null) { - if (r.getBoolean()) { - return ValueExpression.get(r); - } else if (r != ValueNull.INSTANCE) { - return left; - } + if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { + return TypedValueExpression.UNKNOWN; } - break; + return ValueExpression.TRUE; + } + case OR: { + if (l.isTrue() || r.isTrue()) { + return ValueExpression.TRUE; + } + if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { + return TypedValueExpression.UNKNOWN; + } + return ValueExpression.FALSE; + } default: - DbException.throwInternalError("type=" + andOrType); + throw DbException.getInternalError("type=" + andOrType); } - return this; + } + + private static Expression optimizeConstant(SessionLocal session, int andOrType, Value l, Expression right) { + if (l != ValueNull.INSTANCE) { + switch (andOrType) { + case AND: + return l.getBoolean() ? castToBoolean(session, right) : ValueExpression.FALSE; + case OR: + return l.getBoolean() ? ValueExpression.TRUE : castToBoolean(session, right); + default: + throw DbException.getInternalError("type=" + andOrType); + } + } + return null; } @Override - public void addFilterConditions(TableFilter filter, boolean outerJoin) { + public void addFilterConditions(TableFilter filter) { if (andOrType == AND) { - left.addFilterConditions(filter, outerJoin); - right.addFilterConditions(filter, outerJoin); + left.addFilterConditions(filter); + right.addFilterConditions(filter); } else { - super.addFilterConditions(filter, outerJoin); + super.addFilterConditions(filter); } } @@ -278,7 +297,7 @@ public void setEvaluatable(TableFilter tableFilter, boolean b) { } @Override - public void updateAggregate(Session session, int stage) { + public void updateAggregate(SessionLocal session, int stage) { left.updateAggregate(session, stage); right.updateAggregate(session, stage); } @@ -318,30 +337,30 @@ public Expression getSubexpression(int index) { * @param right the second condition * @return null or the third condition */ - private static Expression optimizeConditionAndOr(ConditionAndOr left, ConditionAndOr right) { + static Expression optimizeConditionAndOr(ConditionAndOr left, ConditionAndOr right) { if (left.andOrType != AND || right.andOrType != AND) { return null; } Expression leftLeft = left.getSubexpression(0), leftRight = left.getSubexpression(1); Expression rightLeft = right.getSubexpression(0), rightRight = right.getSubexpression(1); - String leftLeftSQL = leftLeft.getSQL(true), rightLeftSQL = rightLeft.getSQL(true); - Expression combinedExpression; - if (leftLeftSQL.equals(rightLeftSQL)) { - combinedExpression = new ConditionAndOr(OR, leftRight, rightRight); - return new ConditionAndOr(AND, leftLeft, combinedExpression); - } - String rightRightSQL = rightRight.getSQL(true); - if (leftLeftSQL.equals(rightRightSQL)) { - combinedExpression = new ConditionAndOr(OR, leftRight, rightLeft); - return new ConditionAndOr(AND, leftLeft, combinedExpression); + String rightLeftSQL = rightLeft.getSQL(DEFAULT_SQL_FLAGS); + String rightRightSQL = rightRight.getSQL(DEFAULT_SQL_FLAGS); + if (leftLeft.isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR)) { + String leftLeftSQL = leftLeft.getSQL(DEFAULT_SQL_FLAGS); + if (leftLeftSQL.equals(rightLeftSQL)) { + return new ConditionAndOr(AND, leftLeft, new ConditionAndOr(OR, leftRight, rightRight)); + } + if (leftLeftSQL.equals(rightRightSQL)) { + return new ConditionAndOr(AND, leftLeft, new ConditionAndOr(OR, leftRight, rightLeft)); + } } - String leftRightSQL = leftRight.getSQL(true); - if (leftRightSQL.equals(rightLeftSQL)) { - combinedExpression = new ConditionAndOr(OR, leftLeft, rightRight); - return new ConditionAndOr(AND, leftRight, combinedExpression); - } else if (leftRightSQL.equals(rightRightSQL)) { - combinedExpression = new ConditionAndOr(OR, leftLeft, rightLeft); - return new ConditionAndOr(AND, leftRight, combinedExpression); + if (leftRight.isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR)) { + String leftRightSQL = leftRight.getSQL(DEFAULT_SQL_FLAGS); + if (leftRightSQL.equals(rightLeftSQL)) { + return new ConditionAndOr(AND, leftRight, new ConditionAndOr(OR, leftLeft, rightRight)); + } else if (leftRightSQL.equals(rightRightSQL)) { + return new ConditionAndOr(AND, leftRight, new ConditionAndOr(OR, leftLeft, rightLeft)); + } } return null; } diff --git a/h2/src/main/org/h2/expression/condition/ConditionAndOrN.java b/h2/src/main/org/h2/expression/condition/ConditionAndOrN.java new file mode 100644 index 0000000000..51ed2b1216 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/ConditionAndOrN.java @@ -0,0 +1,341 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, and the + * EPL 1.0 (https://h2database.com/html/license.html). Initial Developer: H2 + * Group + */ +package org.h2.expression.condition; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * An 'and' or 'or' condition as in WHERE ID=1 AND NAME=? with N operands. + * Mostly useful for optimisation and preventing stack overflow where generated + * SQL has tons of conditions. + */ +public class ConditionAndOrN extends Condition { + + private final int andOrType; + /** + * Use an ArrayDeque because we primarily insert at the front. + */ + private final List expressions; + + /** + * Additional conditions for index only. + */ + private List added; + + public ConditionAndOrN(int andOrType, Expression expr1, Expression expr2, Expression expr3) { + this.andOrType = andOrType; + this.expressions = new ArrayList<>(3); + expressions.add(expr1); + expressions.add(expr2); + expressions.add(expr3); + } + + public ConditionAndOrN(int andOrType, List expressions) { + this.andOrType = andOrType; + this.expressions = expressions; + } + + int getAndOrType() { + return andOrType; + } + + /** + * Add the expression at the beginning of the list. + * + * @param e the expression + */ + void addFirst(Expression e) { + expressions.add(0, e); + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + Iterator it = expressions.iterator(); + it.next().getSQL(builder, sqlFlags, AUTO_PARENTHESES); + while (it.hasNext()) { + switch (andOrType) { + case ConditionAndOr.AND: + builder.append("\n AND "); + break; + case ConditionAndOr.OR: + builder.append("\n OR "); + break; + default: + throw DbException.getInternalError("andOrType=" + andOrType); + } + it.next().getSQL(builder, sqlFlags, AUTO_PARENTHESES); + } + return builder; + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (andOrType == ConditionAndOr.AND) { + for (Expression e : expressions) { + e.createIndexConditions(session, filter); + } + if (added != null) { + for (Expression e : added) { + e.createIndexConditions(session, filter); + } + } + } + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + // (NOT (A OR B)): (NOT(A) AND NOT(B)) + // (NOT (A AND B)): (NOT(A) OR NOT(B)) + final ArrayList newList = new ArrayList<>(expressions.size()); + for (Expression e : expressions) { + Expression l = e.getNotIfPossible(session); + if (l == null) { + l = new ConditionNot(e); + } + newList.add(l); + } + int reversed = andOrType == ConditionAndOr.AND ? ConditionAndOr.OR : ConditionAndOr.AND; + return new ConditionAndOrN(reversed, newList); + } + + @Override + public Value getValue(SessionLocal session) { + boolean hasNull = false; + switch (andOrType) { + case ConditionAndOr.AND: { + for (Expression e : expressions) { + Value v = e.getValue(session); + if (v == ValueNull.INSTANCE) { + hasNull = true; + } else if (!v.getBoolean()) { + return ValueBoolean.FALSE; + } + } + return hasNull ? ValueNull.INSTANCE : ValueBoolean.TRUE; + } + case ConditionAndOr.OR: { + for (Expression e : expressions) { + Value v = e.getValue(session); + if (v == ValueNull.INSTANCE) { + hasNull = true; + } else if (v.getBoolean()) { + return ValueBoolean.TRUE; + } + } + return hasNull ? ValueNull.INSTANCE : ValueBoolean.FALSE; + } + default: + throw DbException.getInternalError("type=" + andOrType); + } + } + + private static final Comparator COMPARE_BY_COST = new Comparator() { + @Override + public int compare(Expression lhs, Expression rhs) { + return lhs.getCost() - rhs.getCost(); + } + + }; + + @Override + public Expression optimize(SessionLocal session) { + // NULL handling: see wikipedia, + // http://www-cs-students.stanford.edu/~wlam/compsci/sqlnulls + + // first pass, optimize individual sub-expressions + for (int i = 0; i < expressions.size(); i++ ) { + expressions.set(i, expressions.get(i).optimize(session)); + } + + Collections.sort(expressions, COMPARE_BY_COST); + + // TODO we're only matching pairs so that are next to each other, so in complex expressions + // we will miss opportunities + + // second pass, optimize combinations + optimizeMerge(0); + for (int i = 1; i < expressions.size(); ) { + Expression left = expressions.get(i-1); + Expression right = expressions.get(i); + switch (andOrType) { + case ConditionAndOr.AND: + if (!session.getDatabase().getSettings().optimizeTwoEquals) { + break; + } + // this optimization does not work in the following case, + // but NOT is optimized before: + // CREATE TABLE TEST(A INT, B INT); + // INSERT INTO TEST VALUES(1, NULL); + // SELECT * FROM TEST WHERE NOT (B=A AND B=0); // no rows + // SELECT * FROM TEST WHERE NOT (B=A AND B=0 AND A=0); // 1, + // NULL + // try to add conditions (A=B AND B=1: add A=1) + if (left instanceof Comparison && right instanceof Comparison) { + // try to add conditions (A=B AND B=1: add A=1) + Expression added = ((Comparison) left).getAdditionalAnd(session, (Comparison) right); + if (added != null) { + if (this.added == null) { + this.added = new ArrayList<>(); + } + this.added.add(added.optimize(session)); + } + } + break; + case ConditionAndOr.OR: + if (!session.getDatabase().getSettings().optimizeOr) { + break; + } + Expression reduced; + if (left instanceof Comparison && right instanceof Comparison) { + reduced = ((Comparison) left).optimizeOr(session, (Comparison) right); + } else if (left instanceof ConditionIn && right instanceof Comparison) { + reduced = ((ConditionIn) left).getAdditional((Comparison) right); + } else if (right instanceof ConditionIn && left instanceof Comparison) { + reduced = ((ConditionIn) right).getAdditional((Comparison) left); + } else if (left instanceof ConditionInConstantSet && right instanceof Comparison) { + reduced = ((ConditionInConstantSet) left).getAdditional(session, (Comparison) right); + } else if (right instanceof ConditionInConstantSet && left instanceof Comparison) { + reduced = ((ConditionInConstantSet) right).getAdditional(session, (Comparison) left); + } else if (left instanceof ConditionAndOr && right instanceof ConditionAndOr) { + reduced = ConditionAndOr.optimizeConditionAndOr((ConditionAndOr) left, (ConditionAndOr) right); + } else { + // TODO optimization: convert .. OR .. to UNION if the cost + // is lower + break; + } + if (reduced != null) { + expressions.remove(i); + expressions.set(i - 1, reduced.optimize(session)); + continue; // because we don't want to increment, we want to compare the new pair exposed + } + } + + Expression e = ConditionAndOr.optimizeIfConstant(session, andOrType, left, right); + if (e != null) { + expressions.remove(i); + expressions.set(i-1, e); + continue; // because we don't want to increment, we want to compare the new pair exposed + } + + if (optimizeMerge(i)) { + continue; + } + + i++; + } + + Collections.sort(expressions, COMPARE_BY_COST); + + if (expressions.size() == 1) { + return Condition.castToBoolean(session, expressions.get(0)); + } + return this; + } + + + private boolean optimizeMerge(int i) { + Expression e = expressions.get(i); + // If we have a ConditionAndOrN as a sub-expression, see if we can merge it + // into this one. + if (e instanceof ConditionAndOrN) { + ConditionAndOrN rightCondition = (ConditionAndOrN) e; + if (this.andOrType == rightCondition.andOrType) { + expressions.remove(i); + expressions.addAll(i, rightCondition.expressions); + return true; + } + } + else if (e instanceof ConditionAndOr) { + ConditionAndOr rightCondition = (ConditionAndOr) e; + if (this.andOrType == rightCondition.getAndOrType()) { + expressions.set(i, rightCondition.getSubexpression(0)); + expressions.add(i+1, rightCondition.getSubexpression(1)); + return true; + } + } + return false; + } + + @Override + public void addFilterConditions(TableFilter filter) { + if (andOrType == ConditionAndOr.AND) { + for (Expression e : expressions) { + e.addFilterConditions(filter); + } + } else { + super.addFilterConditions(filter); + } + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + for (Expression e : expressions) { + e.mapColumns(resolver, level, state); + } + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + for (Expression e : expressions) { + e.setEvaluatable(tableFilter, b); + } + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + for (Expression e : expressions) { + e.updateAggregate(session, stage); + } + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + for (Expression e : expressions) { + if (!e.isEverything(visitor)) { + return false; + } + } + return true; + } + + @Override + public int getCost() { + int cost = 0; + for (Expression e : expressions) { + cost += e.getCost(); + } + return cost; + } + + @Override + public int getSubexpressionCount() { + return expressions.size(); + } + + @Override + public Expression getSubexpression(int index) { + return expressions.get(index); + } + +} diff --git a/h2/src/main/org/h2/expression/condition/ConditionExists.java b/h2/src/main/org/h2/expression/condition/ConditionExists.java deleted file mode 100644 index 828a3998b5..0000000000 --- a/h2/src/main/org/h2/expression/condition/ConditionExists.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression.condition; - -import org.h2.command.dml.Query; -import org.h2.engine.Session; -import org.h2.expression.Expression; -import org.h2.expression.ExpressionVisitor; -import org.h2.result.ResultInterface; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; -import org.h2.util.StringUtils; -import org.h2.value.Value; -import org.h2.value.ValueBoolean; - -/** - * An 'exists' condition as in WHERE EXISTS(SELECT ...) - */ -public class ConditionExists extends Condition { - - private final Query query; - - public ConditionExists(Query query) { - this.query = query; - } - - @Override - public Value getValue(Session session) { - query.setSession(session); - ResultInterface result = query.query(1); - session.addTemporaryResult(result); - boolean r = result.hasNext(); - return ValueBoolean.get(r); - } - - @Override - public Expression optimize(Session session) { - session.optimizeQueryExpression(query); - return this; - } - - @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - builder.append("EXISTS(\n"); - return StringUtils.indent(builder, query.getPlanSQL(alwaysQuote), 4, false).append(')'); - } - - @Override - public void updateAggregate(Session session, int stage) { - // TODO exists: is it allowed that the subquery contains aggregates? - // probably not - // select id from test group by id having exists (select * from test2 - // where id=count(test.id)) - } - - @Override - public void mapColumns(ColumnResolver resolver, int level, int state) { - query.mapColumns(resolver, level + 1); - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - query.setEvaluatable(tableFilter, b); - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - return query.isEverything(visitor); - } - - @Override - public int getCost() { - return query.getCostAsExpression(); - } - -} diff --git a/h2/src/main/org/h2/expression/condition/ConditionIn.java b/h2/src/main/org/h2/expression/condition/ConditionIn.java index 7df565f986..663f6fc24a 100644 --- a/h2/src/main/org/h2/expression/condition/ConditionIn.java +++ b/h2/src/main/org/h2/expression/condition/ConditionIn.java @@ -1,24 +1,22 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.condition; import java.util.ArrayList; -import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.expression.ExpressionVisitor; import org.h2.expression.Parameter; +import org.h2.expression.TypedValueExpression; import org.h2.expression.ValueExpression; -import org.h2.expression.function.Function; -import org.h2.expression.function.TableFunction; import org.h2.index.IndexCondition; -import org.h2.result.ResultInterface; import org.h2.table.ColumnResolver; import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueBoolean; import org.h2.value.ValueNull; @@ -26,54 +24,64 @@ /** * An 'in' condition with a list of values, as in WHERE NAME IN(...) */ -public class ConditionIn extends Condition { +public final class ConditionIn extends Condition { - private final Database database; private Expression left; + private final boolean not; + private final boolean whenOperand; private final ArrayList valueList; /** * Create a new IN(..) condition. * - * @param database the database * @param left the expression before IN + * @param not whether the result should be negated + * @param whenOperand whether this is a when operand * @param values the value list (at least one element) */ - public ConditionIn(Database database, Expression left, - ArrayList values) { - this.database = database; + public ConditionIn(Expression left, boolean not, boolean whenOperand, ArrayList values) { this.left = left; + this.not = not; + this.whenOperand = whenOperand; this.valueList = values; } @Override - public Value getValue(Session session) { - Value l = left.getValue(session); - if (l.containsNull()) { - return ValueNull.INSTANCE; + public Value getValue(SessionLocal session) { + return getValue(session, left.getValue(session)); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); } - int size = valueList.size(); - if (size == 1) { - Expression e = valueList.get(0); - if (e instanceof TableFunction) { - return ConditionInParameter.getValue(database, l, e.getValue(session)); - } + return getValue(session, left).isTrue(); + } + + private Value getValue(SessionLocal session, Value left) { + if (left.containsNull()) { + return ValueNull.INSTANCE; } boolean hasNull = false; - for (int i = 0; i < size; i++) { - Expression e = valueList.get(i); + for (Expression e : valueList) { Value r = e.getValue(session); - Value cmp = Comparison.compare(database, l, r, Comparison.EQUAL); + Value cmp = Comparison.compare(session, left, r, Comparison.EQUAL); if (cmp == ValueNull.INSTANCE) { hasNull = true; } else if (cmp == ValueBoolean.TRUE) { - return cmp; + return ValueBoolean.get(!not); } } if (hasNull) { return ValueNull.INSTANCE; } - return ValueBoolean.FALSE; + return ValueBoolean.get(not); + } + + @Override + public boolean isWhenConditionOperand() { + return whenOperand; } @Override @@ -85,47 +93,19 @@ public void mapColumns(ColumnResolver resolver, int level, int state) { } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { left = left.optimize(session); - boolean constant = left.isConstant(); - if (constant && left == ValueExpression.getNull()) { - return left; - } - int size = valueList.size(); - if (size == 1) { - Expression right = valueList.get(0); - if (right instanceof TableFunction) { - TableFunction tf = (TableFunction) right; - if (tf.getFunctionType() == Function.UNNEST) { - Expression[] args = tf.getArgs(); - if (args.length == 1) { - Expression arg = args[0]; - if (arg instanceof Parameter) { - return new ConditionInParameter(database, left, (Parameter) arg); - } - } - } - if (tf.isConstant()) { - boolean allValuesNull = true; - ResultInterface ri = right.getValue(session).getResult(); - ArrayList list = new ArrayList<>(ri.getRowCount()); - while (ri.next()) { - Value v = ri.currentRow()[0]; - if (!v.containsNull()) { - allValuesNull = false; - } - list.add(ValueExpression.get(v)); - } - return optimize2(session, constant, true, allValuesNull, list); - } - return this; - } + boolean constant = !whenOperand && left.isConstant(); + if (constant && left.isNullConstant()) { + return TypedValueExpression.UNKNOWN; } boolean allValuesConstant = true; boolean allValuesNull = true; - for (int i = 0; i < size; i++) { + TypeInfo leftType = left.getType(); + for (int i = 0, l = valueList.size(); i < l; i++) { Expression e = valueList.get(i); e = e.optimize(session); + TypeInfo.checkComparable(leftType, e.getType()); if (e.isConstant() && !e.getValue(session).containsNull()) { allValuesNull = false; } @@ -133,21 +113,21 @@ public Expression optimize(Session session) { allValuesConstant = false; } if (left instanceof ExpressionColumn && e instanceof Parameter) { - ((Parameter) e) - .setColumn(((ExpressionColumn) left).getColumn()); + ((Parameter) e).setColumn(((ExpressionColumn) left).getColumn()); } valueList.set(i, e); } return optimize2(session, constant, allValuesConstant, allValuesNull, valueList); } - private Expression optimize2(Session session, boolean constant, boolean allValuesConstant, boolean allValuesNull, - ArrayList values) { + private Expression optimize2(SessionLocal session, boolean constant, boolean allValuesConstant, + boolean allValuesNull, ArrayList values) { if (constant && allValuesConstant) { - return ValueExpression.get(getValue(session)); + return ValueExpression.getBoolean(getValue(session)); } if (values.size() == 1) { - return new Comparison(session, Comparison.EQUAL, left, values.get(0)).optimize(session); + return new Comparison(not ? Comparison.NOT_EQUAL : Comparison.EQUAL, left, values.get(0), whenOperand) + .optimize(session); } if (allValuesConstant && !allValuesNull) { int leftType = left.getType().getValueType(); @@ -157,16 +137,22 @@ private Expression optimize2(Session session, boolean constant, boolean allValue if (leftType == Value.ENUM && !(left instanceof ExpressionColumn)) { return this; } - Expression expr = new ConditionInConstantSet(session, left, values); - expr = expr.optimize(session); - return expr; + return new ConditionInConstantSet(session, left, not, whenOperand, values).optimize(session); } return this; } @Override - public void createIndexConditions(Session session, TableFilter filter) { - if (!(left instanceof ExpressionColumn)) { + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + return new ConditionIn(left, !not, false, valueList); + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (not || whenOperand || !(left instanceof ExpressionColumn)) { return; } ExpressionColumn l = (ExpressionColumn) left; @@ -175,8 +161,10 @@ public void createIndexConditions(Session session, TableFilter filter) { } if (session.getDatabase().getSettings().optimizeInList) { ExpressionVisitor visitor = ExpressionVisitor.getNotFromResolverVisitor(filter); + TypeInfo colType = l.getType(); for (Expression e : valueList) { - if (!e.isEverything(visitor)) { + if (!e.isEverything(visitor) + || !TypeInfo.haveSameOrdering(colType, TypeInfo.getHigherType(colType, e.getType()))) { return; } } @@ -193,15 +181,25 @@ public void setEvaluatable(TableFilter tableFilter, boolean b) { } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - builder.append('('); - left.getSQL(builder, alwaysQuote).append(" IN("); - writeExpressions(builder, valueList, alwaysQuote); - return builder.append("))"); + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return getWhenSQL(left.getSQL(builder, sqlFlags, AUTO_PARENTHESES), sqlFlags); + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + if (not) { + builder.append(" NOT"); + } + return writeExpressions(builder.append(" IN("), valueList, sqlFlags).append(')'); } @Override - public void updateAggregate(Session session, int stage) { + public void updateAggregate(SessionLocal session, int stage) { left.updateAggregate(session, stage); for (Expression e : valueList) { e.updateAggregate(session, stage); @@ -242,10 +240,14 @@ public int getCost() { * @return null if the condition was not added, or the new condition */ Expression getAdditional(Comparison other) { - Expression add = other.getIfEquals(left); - if (add != null) { - valueList.add(add); - return this; + if (!not && !whenOperand && left.isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR)) { + Expression add = other.getIfEquals(left); + if (add != null) { + ArrayList list = new ArrayList<>(valueList.size() + 1); + list.addAll(valueList); + list.add(add); + return new ConditionIn(left, false, false, list); + } } return null; } diff --git a/h2/src/main/org/h2/expression/condition/ConditionInConstantSet.java b/h2/src/main/org/h2/expression/condition/ConditionInConstantSet.java index b7b0bf4cea..4174e8bd15 100644 --- a/h2/src/main/org/h2/expression/condition/ConditionInConstantSet.java +++ b/h2/src/main/org/h2/expression/condition/ConditionInConstantSet.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.condition; @@ -8,14 +8,11 @@ import java.util.ArrayList; import java.util.TreeSet; -import org.h2.engine.Database; -import org.h2.engine.Mode; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.expression.ExpressionVisitor; import org.h2.index.IndexCondition; -import org.h2.message.DbException; import org.h2.table.ColumnResolver; import org.h2.table.TableFilter; import org.h2.value.TypeInfo; @@ -27,9 +24,11 @@ * Used for optimised IN(...) queries where the contents of the IN list are all * constant and of the same type. */ -public class ConditionInConstantSet extends Condition { +public final class ConditionInConstantSet extends Condition { private Expression left; + private final boolean not; + private final boolean whenOperand; private final ArrayList valueList; // HashSet cannot be used here, because we need to compare values of // different type or scale properly. @@ -45,22 +44,31 @@ public class ConditionInConstantSet extends Condition { * the expression before IN. Cannot have {@link Value#UNKNOWN} * data type and {@link Value#ENUM} type is also supported only * for {@link ExpressionColumn}. - * @param valueList the value list (at least two elements) + * @param not whether the result should be negated + * @param whenOperand whether this is a when operand + * @param valueList + * the value list (at least two elements); all values must be + * comparable with left value */ - public ConditionInConstantSet(Session session, Expression left, ArrayList valueList) { + ConditionInConstantSet(SessionLocal session, Expression left, boolean not, boolean whenOperand, + ArrayList valueList) { this.left = left; + this.not = not; + this.whenOperand = whenOperand; this.valueList = valueList; - Database database = session.getDatabase(); - this.valueSet = new TreeSet<>(database.getCompareMode()); - type = left.getType(); - Mode mode = database.getMode(); + this.valueSet = new TreeSet<>(session.getDatabase().getCompareMode()); + TypeInfo type = left.getType(); for (Expression expression : valueList) { - add(expression.getValue(session).convertTo(type, mode, null)); + type = TypeInfo.getHigherType(type, expression.getType()); + } + this.type = type; + for (Expression expression : valueList) { + add(expression.getValue(session), session); } } - private void add(Value v) { - if (v.containsNull()) { + private void add(Value v, SessionLocal session) { + if ((v = v.convertTo(type, session)).containsNull()) { hasNull = true; } else { valueSet.add(v); @@ -68,16 +76,32 @@ private void add(Value v) { } @Override - public Value getValue(Session session) { - Value x = left.getValue(session); - if (x.containsNull()) { - return x; + public Value getValue(SessionLocal session) { + return getValue(left.getValue(session), session); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); } - boolean result = valueSet.contains(x); + return getValue(left, session).isTrue(); + } + + private Value getValue(Value left, SessionLocal session) { + if ((left = left.convertTo(type, session)).containsNull()) { + return ValueNull.INSTANCE; + } + boolean result = valueSet.contains(left); if (!result && hasNull) { return ValueNull.INSTANCE; } - return ValueBoolean.get(result); + return ValueBoolean.get(not ^ result); + } + + @Override + public boolean isWhenConditionOperand() { + return whenOperand; } @Override @@ -86,14 +110,22 @@ public void mapColumns(ColumnResolver resolver, int level, int state) { } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { left = left.optimize(session); return this; } @Override - public void createIndexConditions(Session session, TableFilter filter) { - if (!(left instanceof ExpressionColumn)) { + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + return new ConditionInConstantSet(session, left, !not, false, valueList); + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (not || whenOperand || !(left instanceof ExpressionColumn)) { return; } ExpressionColumn l = (ExpressionColumn) left; @@ -101,7 +133,10 @@ public void createIndexConditions(Session session, TableFilter filter) { return; } if (session.getDatabase().getSettings().optimizeInList) { - filter.addIndexCondition(IndexCondition.getInList(l, valueList)); + TypeInfo colType = l.getType(); + if (TypeInfo.haveSameOrdering(colType, TypeInfo.getHigherType(colType, type))) { + filter.addIndexCondition(IndexCondition.getInList(l, valueList)); + } } } @@ -111,39 +146,31 @@ public void setEvaluatable(TableFilter tableFilter, boolean b) { } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - builder.append('('); - left.getSQL(builder, alwaysQuote).append(" IN("); - writeExpressions(builder, valueList, alwaysQuote); - return builder.append("))"); + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return getWhenSQL(left.getSQL(builder, sqlFlags, AUTO_PARENTHESES), sqlFlags); } @Override - public void updateAggregate(Session session, int stage) { + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + if (not) { + builder.append(" NOT"); + } + return writeExpressions(builder.append(" IN("), valueList, sqlFlags).append(')'); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { left.updateAggregate(session, stage); } @Override public boolean isEverything(ExpressionVisitor visitor) { - if (!left.isEverything(visitor)) { - return false; - } - switch (visitor.getType()) { - case ExpressionVisitor.OPTIMIZABLE_AGGREGATE: - case ExpressionVisitor.DETERMINISTIC: - case ExpressionVisitor.READONLY: - case ExpressionVisitor.INDEPENDENT: - case ExpressionVisitor.EVALUATABLE: - case ExpressionVisitor.SET_MAX_DATA_MODIFICATION_ID: - case ExpressionVisitor.NOT_FROM_RESOLVER: - case ExpressionVisitor.GET_DEPENDENCIES: - case ExpressionVisitor.QUERY_COMPARABLE: - case ExpressionVisitor.GET_COLUMNS1: - case ExpressionVisitor.GET_COLUMNS2: - return true; - default: - throw DbException.throwInternalError("type=" + visitor.getType()); - } + return left.isEverything(visitor); } @Override @@ -159,13 +186,16 @@ public int getCost() { * @param other the second condition * @return null if the condition was not added, or the new condition */ - Expression getAdditional(Session session, Comparison other) { - Expression add = other.getIfEquals(left); - if (add != null) { - if (add.isConstant()) { - valueList.add(add); - add(add.getValue(session).convertTo(type, session.getDatabase().getMode(), null)); - return this; + Expression getAdditional(SessionLocal session, Comparison other) { + if (!not && !whenOperand && left.isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR)) { + Expression add = other.getIfEquals(left); + if (add != null) { + if (add.isConstant()) { + ArrayList list = new ArrayList<>(valueList.size() + 1); + list.addAll(valueList); + list.add(add); + return new ConditionInConstantSet(session, left, false, false, list); + } } } return null; diff --git a/h2/src/main/org/h2/expression/condition/ConditionInParameter.java b/h2/src/main/org/h2/expression/condition/ConditionInParameter.java index 8116320429..6bbf2f82be 100644 --- a/h2/src/main/org/h2/expression/condition/ConditionInParameter.java +++ b/h2/src/main/org/h2/expression/condition/ConditionInParameter.java @@ -1,21 +1,20 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.condition; import java.util.AbstractList; -import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.expression.ExpressionVisitor; import org.h2.expression.Parameter; +import org.h2.expression.TypedValueExpression; import org.h2.expression.ValueExpression; import org.h2.index.IndexCondition; -import org.h2.result.ResultInterface; import org.h2.table.ColumnResolver; import org.h2.table.TableFilter; import org.h2.value.Value; @@ -26,7 +25,7 @@ /** * A condition with parameter as {@code = ANY(?)}. */ -public class ConditionInParameter extends Condition { +public final class ConditionInParameter extends Condition { private static final class ParameterList extends AbstractList { private final Parameter parameter; @@ -59,73 +58,83 @@ public int size() { } } - private final Database database; - private Expression left; + private boolean not; + + private boolean whenOperand; + private final Parameter parameter; /** * Gets evaluated condition value. * - * @param database database instance. + * @param session the session * @param l left value. + * @param not whether the result should be negated * @param value parameter value. * @return Evaluated condition value. */ - static Value getValue(Database database, Value l, Value value) { + static Value getValue(SessionLocal session, Value l, boolean not, Value value) { boolean hasNull = false; if (value.containsNull()) { hasNull = true; - } else if (value.getValueType() == Value.RESULT_SET) { - for (ResultInterface ri = value.getResult(); ri.next();) { - Value r = ri.currentRow()[0]; - Value cmp = Comparison.compare(database, l, r, Comparison.EQUAL); - if (cmp == ValueNull.INSTANCE) { - hasNull = true; - } else if (cmp == ValueBoolean.TRUE) { - return cmp; - } - } } else { - for (Value r : ((ValueArray) value.convertTo(Value.ARRAY)).getList()) { - Value cmp = Comparison.compare(database, l, r, Comparison.EQUAL); + for (Value r : value.convertToAnyArray(session).getList()) { + Value cmp = Comparison.compare(session, l, r, Comparison.EQUAL); if (cmp == ValueNull.INSTANCE) { hasNull = true; } else if (cmp == ValueBoolean.TRUE) { - return cmp; + return ValueBoolean.get(!not); } } } if (hasNull) { return ValueNull.INSTANCE; } - return ValueBoolean.FALSE; + return ValueBoolean.get(not); } /** * Create a new {@code = ANY(?)} condition. * - * @param database - * the database * @param left * the expression before {@code = ANY(?)} + * @param not whether the result should be negated + * @param whenOperand whether this is a when operand * @param parameter * parameter */ - public ConditionInParameter(Database database, Expression left, Parameter parameter) { - this.database = database; + public ConditionInParameter(Expression left, boolean not, boolean whenOperand, Parameter parameter) { this.left = left; + this.not = not; + this.whenOperand = whenOperand; this.parameter = parameter; } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { Value l = left.getValue(session); if (l == ValueNull.INSTANCE) { - return l; + return ValueNull.INSTANCE; + } + return getValue(session, l, not, parameter.getValue(session)); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + if (left == ValueNull.INSTANCE) { + return false; } - return getValue(database, l, parameter.getValue(session)); + return getValue(session, left, not, parameter.getValue(session)).isTrue(); + } + + @Override + public boolean isWhenConditionOperand() { + return whenOperand; } @Override @@ -134,17 +143,25 @@ public void mapColumns(ColumnResolver resolver, int level, int state) { } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { left = left.optimize(session); - if (left == ValueExpression.getNull()) { - return left; + if (!whenOperand && left.isNullConstant()) { + return TypedValueExpression.UNKNOWN; } return this; } @Override - public void createIndexConditions(Session session, TableFilter filter) { - if (!(left instanceof ExpressionColumn)) { + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + return new ConditionInParameter(left, !not, false, parameter); + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (not || whenOperand || !(left instanceof ExpressionColumn)) { return; } ExpressionColumn l = (ExpressionColumn) left; @@ -160,14 +177,37 @@ public void setEvaluatable(TableFilter tableFilter, boolean b) { } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - builder.append('('); - left.getSQL(builder, alwaysQuote).append(" = ANY("); - return parameter.getSQL(builder, alwaysQuote).append("))"); + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + if (not) { + builder.append("NOT ("); + } + left.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + parameter.getSQL(builder.append(" = ANY("), sqlFlags, AUTO_PARENTHESES).append(')'); + if (not) { + builder.append(')'); + } + return builder; + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + if (not) { + builder.append(" NOT IN(UNNEST("); + parameter.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append("))"); + } else { + builder.append(" = ANY("); + parameter.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append(')'); + } + return builder; } @Override - public void updateAggregate(Session session, int stage) { + public void updateAggregate(SessionLocal session, int stage) { left.updateAggregate(session, stage); } diff --git a/h2/src/main/org/h2/expression/condition/ConditionInQuery.java b/h2/src/main/org/h2/expression/condition/ConditionInQuery.java new file mode 100644 index 0000000000..700aea1917 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/ConditionInQuery.java @@ -0,0 +1,256 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import org.h2.command.query.Query; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionVisitor; +import org.h2.index.IndexCondition; +import org.h2.result.LocalResult; +import org.h2.result.ResultInterface; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; +import org.h2.value.ValueRow; + +/** + * An IN() condition with a subquery, as in WHERE ID IN(SELECT ...) + */ +public final class ConditionInQuery extends PredicateWithSubquery { + + private Expression left; + private final boolean not; + private final boolean whenOperand; + private final boolean all; + private final int compareType; + + public ConditionInQuery(Expression left, boolean not, boolean whenOperand, Query query, boolean all, + int compareType) { + super(query); + this.left = left; + this.not = not; + this.whenOperand = whenOperand; + /* + * Need to do it now because other methods may be invoked in different + * order. + */ + query.setRandomAccessResult(true); + query.setNeverLazy(true); + query.setDistinctIfPossible(); + this.all = all; + this.compareType = compareType; + } + + @Override + public Value getValue(SessionLocal session) { + return getValue(session, left.getValue(session)); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + return getValue(session, left).isTrue(); + } + + private Value getValue(SessionLocal session, Value left) { + query.setSession(session); + LocalResult rows = (LocalResult) query.query(0); + if (!rows.hasNext()) { + return ValueBoolean.get(not ^ all); + } + if ((compareType & ~1) == Comparison.EQUAL_NULL_SAFE) { + return getNullSafeValueSlow(session, rows, left); + } + if (left.containsNull()) { + return ValueNull.INSTANCE; + } + if (all || compareType != Comparison.EQUAL || !session.getDatabase().getSettings().optimizeInSelect) { + return getValueSlow(session, rows, left); + } + int columnCount = query.getColumnCount(); + if (columnCount != 1) { + Value[] leftValue = left.convertToAnyRow().getList(); + if (columnCount == leftValue.length && rows.containsDistinct(leftValue)) { + return ValueBoolean.get(!not); + } + } else { + TypeInfo colType = rows.getColumnType(0); + if (colType.getValueType() == Value.NULL) { + return ValueNull.INSTANCE; + } + if (left.getValueType() == Value.ROW) { + left = ((ValueRow) left).getList()[0]; + } + if (rows.containsDistinct(new Value[] { left })) { + return ValueBoolean.get(!not); + } + } + if (rows.containsNull()) { + return ValueNull.INSTANCE; + } + return ValueBoolean.get(not); + } + + private Value getValueSlow(SessionLocal session, ResultInterface rows, Value l) { + // this only returns the correct result if the result has at least one + // row, and if l is not null + boolean simple = l.getValueType() != Value.ROW && query.getColumnCount() == 1; + boolean hasNull = false; + ValueBoolean searched = ValueBoolean.get(!all); + while (rows.next()) { + Value[] currentRow = rows.currentRow(); + Value cmp = Comparison.compare(session, l, simple ? currentRow[0] : ValueRow.get(currentRow), + compareType); + if (cmp == ValueNull.INSTANCE) { + hasNull = true; + } else if (cmp == searched) { + return ValueBoolean.get(not == all); + } + } + if (hasNull) { + return ValueNull.INSTANCE; + } + return ValueBoolean.get(not ^ all); + } + + private Value getNullSafeValueSlow(SessionLocal session, ResultInterface rows, Value l) { + boolean simple = l.getValueType() != Value.ROW && query.getColumnCount() == 1; + boolean searched = all == (compareType == Comparison.NOT_EQUAL_NULL_SAFE); + while (rows.next()) { + Value[] currentRow = rows.currentRow(); + if (session.areEqual(l, simple ? currentRow[0] : ValueRow.get(currentRow)) == searched) { + return ValueBoolean.get(not == all); + } + } + return ValueBoolean.get(not ^ all); + } + + @Override + public boolean isWhenConditionOperand() { + return whenOperand; + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + return new ConditionInQuery(left, !not, false, query, all, compareType); + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + left.mapColumns(resolver, level, state); + super.mapColumns(resolver, level, state); + } + + @Override + public Expression optimize(SessionLocal session) { + super.optimize(session); + left = left.optimize(session); + TypeInfo.checkComparable(left.getType(), query.getRowDataType()); + return this; + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + left.setEvaluatable(tableFilter, b); + super.setEvaluatable(tableFilter, b); + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + boolean outerNot = not && (all || compareType != Comparison.EQUAL); + if (outerNot) { + builder.append("NOT ("); + } + left.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + getWhenSQL(builder, sqlFlags); + if (outerNot) { + builder.append(')'); + } + return builder; + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + if (all) { + builder.append(Comparison.COMPARE_TYPES[compareType]).append(" ALL"); + } else if (compareType == Comparison.EQUAL) { + if (not) { + builder.append(" NOT"); + } + builder.append(" IN"); + } else { + builder.append(' ').append(Comparison.COMPARE_TYPES[compareType]).append(" ANY"); + } + return super.getUnenclosedSQL(builder, sqlFlags); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + left.updateAggregate(session, stage); + super.updateAggregate(session, stage); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return left.isEverything(visitor) && super.isEverything(visitor); + } + + @Override + public int getCost() { + return left.getCost() + super.getCost(); + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (!session.getDatabase().getSettings().optimizeInList) { + return; + } + if (not || compareType != Comparison.EQUAL) { + return; + } + if (query.getColumnCount() != 1) { + return; + } + if (!(left instanceof ExpressionColumn)) { + return; + } + TypeInfo colType = left.getType(); + TypeInfo queryType = query.getExpressions().get(0).getType(); + if (!TypeInfo.haveSameOrdering(colType, TypeInfo.getHigherType(colType, queryType))) { + return; + } + int leftType = colType.getValueType(); + if (!DataType.hasTotalOrdering(leftType) && leftType != queryType.getValueType()) { + return; + } + ExpressionColumn l = (ExpressionColumn) left; + if (filter != l.getTableFilter()) { + return; + } + ExpressionVisitor visitor = ExpressionVisitor.getNotFromResolverVisitor(filter); + if (!query.isEverything(visitor)) { + return; + } + filter.addIndexCondition(IndexCondition.getInQuery(l, query)); + } + +} diff --git a/h2/src/main/org/h2/expression/condition/ConditionInSelect.java b/h2/src/main/org/h2/expression/condition/ConditionInSelect.java deleted file mode 100644 index baa6698883..0000000000 --- a/h2/src/main/org/h2/expression/condition/ConditionInSelect.java +++ /dev/null @@ -1,219 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression.condition; - -import org.h2.api.ErrorCode; -import org.h2.command.dml.Query; -import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.expression.Expression; -import org.h2.expression.ExpressionColumn; -import org.h2.expression.ExpressionVisitor; -import org.h2.index.IndexCondition; -import org.h2.message.DbException; -import org.h2.result.LocalResult; -import org.h2.result.ResultInterface; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; -import org.h2.util.StringUtils; -import org.h2.value.TypeInfo; -import org.h2.value.Value; -import org.h2.value.ValueBoolean; -import org.h2.value.ValueNull; -import org.h2.value.ValueRow; - -/** - * An 'in' condition with a subquery, as in WHERE ID IN(SELECT ...) - */ -public class ConditionInSelect extends Condition { - - private final Database database; - private Expression left; - private final Query query; - private final boolean all; - private final int compareType; - - public ConditionInSelect(Database database, Expression left, Query query, - boolean all, int compareType) { - this.database = database; - this.left = left; - this.query = query; - /* - * Need to do it now because other methods may be invoked in different - * order. - */ - query.setRandomAccessResult(true); - this.all = all; - this.compareType = compareType; - } - - @Override - public Value getValue(Session session) { - query.setSession(session); - // We need a LocalResult - query.setNeverLazy(true); - query.setDistinctIfPossible(); - LocalResult rows = (LocalResult) query.query(0); - Value l = left.getValue(session); - if (!rows.hasNext()) { - return ValueBoolean.get(all); - } else if (l.containsNull()) { - return ValueNull.INSTANCE; - } - if (!database.getSettings().optimizeInSelect) { - return getValueSlow(rows, l); - } - if (all || (compareType != Comparison.EQUAL && - compareType != Comparison.EQUAL_NULL_SAFE)) { - return getValueSlow(rows, l); - } - int columnCount = query.getColumnCount(); - if (columnCount != 1) { - l = l.convertTo(Value.ROW); - Value[] leftValue = ((ValueRow) l).getList(); - if (columnCount == leftValue.length && rows.containsDistinct(leftValue)) { - return ValueBoolean.TRUE; - } - } else { - TypeInfo colType = rows.getColumnType(0); - if (colType.getValueType() == Value.NULL) { - return ValueBoolean.FALSE; - } - if (l.getValueType() == Value.ROW) { - Value[] leftList = ((ValueRow) l).getList(); - if (leftList.length != 1) { - throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); - } - l = leftList[0]; - } - l = l.convertTo(colType, database.getMode(), null); - if (rows.containsDistinct(new Value[] { l })) { - return ValueBoolean.TRUE; - } - } - if (rows.containsNull()) { - return ValueNull.INSTANCE; - } - return ValueBoolean.FALSE; - } - - private Value getValueSlow(ResultInterface rows, Value l) { - // this only returns the correct result if the result has at least one - // row, and if l is not null - boolean hasNull = false; - if (all) { - while (rows.next()) { - Value cmp = compare(l, rows); - if (cmp == ValueNull.INSTANCE) { - hasNull = true; - } else if (cmp == ValueBoolean.FALSE) { - return cmp; - } - } - } else { - while (rows.next()) { - Value cmp = compare(l, rows); - if (cmp == ValueNull.INSTANCE) { - hasNull = true; - } else if (cmp == ValueBoolean.TRUE) { - return cmp; - } - } - } - if (hasNull) { - return ValueNull.INSTANCE; - } - return ValueBoolean.get(all); - } - - private Value compare(Value l, ResultInterface rows) { - Value[] currentRow = rows.currentRow(); - Value r = l.getValueType() != Value.ROW && query.getColumnCount() == 1 ? currentRow[0] - : ValueRow.get(currentRow); - return Comparison.compare(database, l, r, compareType); - } - - @Override - public void mapColumns(ColumnResolver resolver, int level, int state) { - left.mapColumns(resolver, level, state); - query.mapColumns(resolver, level + 1); - } - - @Override - public Expression optimize(Session session) { - left = left.optimize(session); - session.optimizeQueryExpression(query); - // Can not optimize: the data may change - return this; - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - left.setEvaluatable(tableFilter, b); - query.setEvaluatable(tableFilter, b); - } - - @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - builder.append('('); - left.getSQL(builder, alwaysQuote).append(' '); - if (all) { - builder.append(Comparison.getCompareOperator(compareType)). - append(" ALL"); - } else { - if (compareType == Comparison.EQUAL) { - builder.append("IN"); - } else { - builder.append(Comparison.getCompareOperator(compareType)). - append(" ANY"); - } - } - builder.append("(\n"); - return StringUtils.indent(builder, query.getPlanSQL(alwaysQuote), 4, false).append("))"); - } - - @Override - public void updateAggregate(Session session, int stage) { - left.updateAggregate(session, stage); - query.updateAggregate(session, stage); - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - return left.isEverything(visitor) && query.isEverything(visitor); - } - - @Override - public int getCost() { - return left.getCost() + query.getCostAsExpression(); - } - - @Override - public void createIndexConditions(Session session, TableFilter filter) { - if (!session.getDatabase().getSettings().optimizeInList) { - return; - } - if (compareType != Comparison.EQUAL) { - return; - } - if (query.getColumnCount() != 1) { - return; - } - if (!(left instanceof ExpressionColumn)) { - return; - } - ExpressionColumn l = (ExpressionColumn) left; - if (filter != l.getTableFilter()) { - return; - } - ExpressionVisitor visitor = ExpressionVisitor.getNotFromResolverVisitor(filter); - if (!query.isEverything(visitor)) { - return; - } - filter.addIndexCondition(IndexCondition.getInQuery(l, query)); - } - -} diff --git a/h2/src/main/org/h2/expression/condition/ConditionLocalAndGlobal.java b/h2/src/main/org/h2/expression/condition/ConditionLocalAndGlobal.java new file mode 100644 index 0000000000..032604b6bb --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/ConditionLocalAndGlobal.java @@ -0,0 +1,152 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * A global condition or combination of local and global conditions. May be used + * only as a top-level expression in a WHERE, HAVING, or QUALIFY clause of a + * SELECT. + */ +public class ConditionLocalAndGlobal extends Condition { + + private Expression local, global; + + public ConditionLocalAndGlobal(Expression local, Expression global) { + if (global == null) { + throw DbException.getInternalError(); + } + this.local = local; + this.global = global; + } + + @Override + public boolean needParentheses() { + return local != null || global.needParentheses(); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + if (local == null) { + return global.getUnenclosedSQL(builder, sqlFlags); + } + local.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + builder.append("\n _LOCAL_AND_GLOBAL_ "); + return global.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (local != null) { + local.createIndexConditions(session, filter); + } + global.createIndexConditions(session, filter); + } + + @Override + public Value getValue(SessionLocal session) { + if (local == null) { + return global.getValue(session); + } + Value l = local.getValue(session), r; + if (l.isFalse() || (r = global.getValue(session)).isFalse()) { + return ValueBoolean.FALSE; + } + if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + return ValueBoolean.TRUE; + } + + @Override + public Expression optimize(SessionLocal session) { + global = global.optimize(session); + if (local != null) { + local = local.optimize(session); + Expression e = ConditionAndOr.optimizeIfConstant(session, ConditionAndOr.AND, local, global); + if (e != null) { + return e; + } + } + return this; + } + + @Override + public void addFilterConditions(TableFilter filter) { + if (local != null) { + local.addFilterConditions(filter); + } + global.addFilterConditions(filter); + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + if (local != null) { + local.mapColumns(resolver, level, state); + } + global.mapColumns(resolver, level, state); + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + if (local != null) { + local.setEvaluatable(tableFilter, b); + } + global.setEvaluatable(tableFilter, b); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + if (local != null) { + local.updateAggregate(session, stage); + } + global.updateAggregate(session, stage); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return (local == null || local.isEverything(visitor)) && global.isEverything(visitor); + } + + @Override + public int getCost() { + int cost = global.getCost(); + if (local != null) { + cost += local.getCost(); + } + return cost; + } + + @Override + public int getSubexpressionCount() { + return local == null ? 1 : 2; + } + + @Override + public Expression getSubexpression(int index) { + switch (index) { + case 0: + return local != null ? local : global; + case 1: + if (local != null) { + return global; + } + //$FALL-THROUGH$ + default: + throw new IndexOutOfBoundsException(); + } + } + +} diff --git a/h2/src/main/org/h2/expression/condition/ConditionNot.java b/h2/src/main/org/h2/expression/condition/ConditionNot.java index 6544ab4ea6..215926c059 100644 --- a/h2/src/main/org/h2/expression/condition/ConditionNot.java +++ b/h2/src/main/org/h2/expression/condition/ConditionNot.java @@ -1,13 +1,14 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.condition; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionVisitor; +import org.h2.expression.TypedValueExpression; import org.h2.expression.ValueExpression; import org.h2.table.ColumnResolver; import org.h2.table.TableFilter; @@ -26,17 +27,17 @@ public ConditionNot(Expression condition) { } @Override - public Expression getNotIfPossible(Session session) { - return condition; + public Expression getNotIfPossible(SessionLocal session) { + return castToBoolean(session, condition.optimize(session)); } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { Value v = condition.getValue(session); if (v == ValueNull.INSTANCE) { return v; } - return v.convertTo(Value.BOOLEAN).negate(); + return v.convertToBoolean().negate(); } @Override @@ -45,7 +46,7 @@ public void mapColumns(ColumnResolver resolver, int level, int state) { } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { Expression e2 = condition.getNotIfPossible(session); if (e2 != null) { return e2.optimize(session); @@ -54,9 +55,9 @@ public Expression optimize(Session session) { if (expr.isConstant()) { Value v = expr.getValue(session); if (v == ValueNull.INSTANCE) { - return ValueExpression.getNull(); + return TypedValueExpression.UNKNOWN; } - return ValueExpression.get(v.convertTo(Value.BOOLEAN).negate()); + return ValueExpression.getBoolean(!v.getBoolean()); } condition = expr; return this; @@ -68,28 +69,18 @@ public void setEvaluatable(TableFilter tableFilter, boolean b) { } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - builder.append("(NOT "); - return condition.getSQL(builder, alwaysQuote).append(')'); + public boolean needParentheses() { + return true; } @Override - public void updateAggregate(Session session, int stage) { - condition.updateAggregate(session, stage); + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return condition.getSQL(builder.append("NOT "), sqlFlags, AUTO_PARENTHESES); } @Override - public void addFilterConditions(TableFilter filter, boolean outerJoin) { - if (outerJoin) { - // can not optimize: - // select * from test t1 left join test t2 on t1.id = t2.id where - // not t2.id is not null - // to - // select * from test t1 left join test t2 on t1.id = t2.id and - // t2.id is not null - return; - } - super.addFilterConditions(filter, outerJoin); + public void updateAggregate(SessionLocal session, int stage) { + condition.updateAggregate(session, stage); } @Override diff --git a/h2/src/main/org/h2/expression/condition/ExistsPredicate.java b/h2/src/main/org/h2/expression/condition/ExistsPredicate.java new file mode 100644 index 0000000000..be487b4342 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/ExistsPredicate.java @@ -0,0 +1,33 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import org.h2.command.query.Query; +import org.h2.engine.SessionLocal; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; + +/** + * Exists predicate as in EXISTS(SELECT ...) + */ +public class ExistsPredicate extends PredicateWithSubquery { + + public ExistsPredicate(Query query) { + super(query); + } + + @Override + public Value getValue(SessionLocal session) { + query.setSession(session); + return ValueBoolean.get(query.exists()); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return super.getUnenclosedSQL(builder.append("EXISTS"), sqlFlags); + } + +} diff --git a/h2/src/main/org/h2/expression/condition/IsJsonPredicate.java b/h2/src/main/org/h2/expression/condition/IsJsonPredicate.java new file mode 100644 index 0000000000..67b56ea0a3 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/IsJsonPredicate.java @@ -0,0 +1,217 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.ValueExpression; +import org.h2.message.DbException; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.util.json.JSONBytesSource; +import org.h2.util.json.JSONItemType; +import org.h2.util.json.JSONStringSource; +import org.h2.util.json.JSONValidationTarget; +import org.h2.util.json.JSONValidationTargetWithUniqueKeys; +import org.h2.util.json.JSONValidationTargetWithoutUniqueKeys; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueJson; +import org.h2.value.ValueNull; + +/** + * IS JSON predicate. + */ +public final class IsJsonPredicate extends Condition { + + private Expression left; + private final boolean not; + private final boolean whenOperand; + private final boolean withUniqueKeys; + private final JSONItemType itemType; + + public IsJsonPredicate(Expression left, boolean not, boolean whenOperand, boolean withUniqueKeys, + JSONItemType itemType) { + this.left = left; + this.whenOperand = whenOperand; + this.not = not; + this.withUniqueKeys = withUniqueKeys; + this.itemType = itemType; + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return getWhenSQL(left.getSQL(builder, sqlFlags, AUTO_PARENTHESES), sqlFlags); + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + builder.append(" IS"); + if (not) { + builder.append(" NOT"); + } + builder.append(" JSON"); + switch (itemType) { + case VALUE: + break; + case ARRAY: + builder.append(" ARRAY"); + break; + case OBJECT: + builder.append(" OBJECT"); + break; + case SCALAR: + builder.append(" SCALAR"); + break; + default: + throw DbException.getInternalError("itemType=" + itemType); + } + if (withUniqueKeys) { + builder.append(" WITH UNIQUE KEYS"); + } + return builder; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (!whenOperand && left.isConstant()) { + return ValueExpression.getBoolean(getValue(session)); + } + return this; + } + + @Override + public Value getValue(SessionLocal session) { + Value l = left.getValue(session); + if (l == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + return ValueBoolean.get(getValue(l)); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + if (left == ValueNull.INSTANCE) { + return false; + } + return getValue(left); + } + + private boolean getValue(Value left) { + boolean result; + switch (left.getValueType()) { + case Value.VARBINARY: + case Value.BINARY: + case Value.BLOB: { + byte[] bytes = left.getBytesNoCopy(); + JSONValidationTarget target = withUniqueKeys ? new JSONValidationTargetWithUniqueKeys() + : new JSONValidationTargetWithoutUniqueKeys(); + try { + result = itemType.includes(JSONBytesSource.parse(bytes, target)) ^ not; + } catch (RuntimeException ex) { + result = not; + } + break; + } + case Value.JSON: { + JSONItemType valueItemType = ((ValueJson) left).getItemType(); + if (!itemType.includes(valueItemType)) { + result = not; + break; + } else if (!withUniqueKeys || valueItemType == JSONItemType.SCALAR) { + result = !not; + break; + } + } + //$FALL-THROUGH$ + case Value.VARCHAR: + case Value.VARCHAR_IGNORECASE: + case Value.CHAR: + case Value.CLOB: { + String string = left.getString(); + JSONValidationTarget target = withUniqueKeys ? new JSONValidationTargetWithUniqueKeys() + : new JSONValidationTargetWithoutUniqueKeys(); + try { + result = itemType.includes(JSONStringSource.parse(string, target)) ^ not; + } catch (RuntimeException ex) { + result = not; + } + break; + } + default: + result = not; + } + return result; + } + + @Override + public boolean isWhenConditionOperand() { + return whenOperand; + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + return new IsJsonPredicate(left, !not, false, withUniqueKeys, itemType); + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + left.setEvaluatable(tableFilter, b); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + left.updateAggregate(session, stage); + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + left.mapColumns(resolver, level, state); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return left.isEverything(visitor); + } + + @Override + public int getCost() { + int cost = left.getCost(); + if (left.getType().getValueType() == Value.JSON && (!withUniqueKeys || itemType == JSONItemType.SCALAR)) { + cost++; + } else { + cost += 10; + } + return cost; + } + + @Override + public int getSubexpressionCount() { + return 1; + } + + @Override + public Expression getSubexpression(int index) { + if (index == 0) { + return left; + } + throw new IndexOutOfBoundsException(); + } + +} diff --git a/h2/src/main/org/h2/expression/condition/NullPredicate.java b/h2/src/main/org/h2/expression/condition/NullPredicate.java new file mode 100644 index 0000000000..46ae3bfcd8 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/NullPredicate.java @@ -0,0 +1,153 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import java.util.ArrayList; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionList; +import org.h2.expression.ValueExpression; +import org.h2.index.IndexCondition; +import org.h2.table.TableFilter; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; +import org.h2.value.ValueRow; + +/** + * Null predicate (IS [NOT] NULL). + */ +public final class NullPredicate extends SimplePredicate { + + private boolean optimized; + + public NullPredicate(Expression left, boolean not, boolean whenOperand) { + super(left, not, whenOperand); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return getWhenSQL(left.getSQL(builder, sqlFlags, AUTO_PARENTHESES), sqlFlags); + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + return builder.append(not ? " IS NOT NULL" : " IS NULL"); + } + + @Override + public Expression optimize(SessionLocal session) { + if (optimized) { + return this; + } + Expression o = super.optimize(session); + if (o != this) { + return o; + } + optimized = true; + if (!whenOperand && left instanceof ExpressionList) { + ExpressionList list = (ExpressionList) left; + if (!list.isArray()) { + for (int i = 0, count = list.getSubexpressionCount(); i < count; i++) { + if (list.getSubexpression(i).isNullConstant()) { + if (not) { + return ValueExpression.FALSE; + } + ArrayList newList = new ArrayList<>(count - 1); + for (int j = 0; j < i; j++) { + newList.add(list.getSubexpression(j)); + } + for (int j = i + 1; j < count; j++) { + Expression e = list.getSubexpression(j); + if (!e.isNullConstant()) { + newList.add(e); + } + } + left = newList.size() == 1 ? newList.get(0) // + : new ExpressionList(newList.toArray(new Expression[0]), false); + break; + } + } + } + } + return this; + } + + @Override + public Value getValue(SessionLocal session) { + return ValueBoolean.get(getValue(left.getValue(session))); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + return getValue(left); + } + + private boolean getValue(Value left) { + if (left.getType().getValueType() == Value.ROW) { + for (Value v : ((ValueRow) left).getList()) { + if (v != ValueNull.INSTANCE ^ not) { + return false; + } + } + return true; + } + return left == ValueNull.INSTANCE ^ not; + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + Expression o = optimize(session); + if (o != this) { + return o.getNotIfPossible(session); + } + switch (left.getType().getValueType()) { + case Value.UNKNOWN: + case Value.ROW: + return null; + } + return new NullPredicate(left, !not, false); + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (not || whenOperand || !filter.getTable().isQueryComparable()) { + return; + } + if (left instanceof ExpressionColumn) { + createNullIndexCondition(filter, (ExpressionColumn) left); + } else if (left instanceof ExpressionList) { + ExpressionList list = (ExpressionList) left; + if (!list.isArray()) { + for (int i = 0, count = list.getSubexpressionCount(); i < count; i++) { + Expression e = list.getSubexpression(i); + if (e instanceof ExpressionColumn) { + createNullIndexCondition(filter, (ExpressionColumn) e); + } + } + } + } + } + + private static void createNullIndexCondition(TableFilter filter, ExpressionColumn c) { + /* + * Columns with row value data type aren't valid, but perform such check + * to be sure. + */ + if (filter == c.getTableFilter() && c.getType().getValueType() != Value.ROW) { + filter.addIndexCondition(IndexCondition.get(Comparison.EQUAL_NULL_SAFE, c, ValueExpression.NULL)); + } + } + +} diff --git a/h2/src/main/org/h2/expression/condition/PredicateWithSubquery.java b/h2/src/main/org/h2/expression/condition/PredicateWithSubquery.java new file mode 100644 index 0000000000..8065315a72 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/PredicateWithSubquery.java @@ -0,0 +1,66 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import org.h2.command.query.Query; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.util.StringUtils; + +/** + * Base class for predicates with a subquery. + */ +abstract class PredicateWithSubquery extends Condition { + + /** + * The subquery. + */ + final Query query; + + PredicateWithSubquery(Query query) { + this.query = query; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + query.mapColumns(resolver, level + 1); + } + + @Override + public Expression optimize(SessionLocal session) { + query.prepare(); + return this; + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + query.setEvaluatable(tableFilter, value); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return StringUtils.indent(builder.append('('), query.getPlanSQL(sqlFlags), 4, false).append(')'); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + query.updateAggregate(session, stage); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return query.isEverything(visitor); + } + + @Override + public int getCost() { + return query.getCostAsExpression(); + } + +} diff --git a/h2/src/main/org/h2/expression/condition/SimplePredicate.java b/h2/src/main/org/h2/expression/condition/SimplePredicate.java new file mode 100644 index 0000000000..6a23513a85 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/SimplePredicate.java @@ -0,0 +1,98 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.ValueExpression; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; + +/** + * Base class for simple predicates. + */ +public abstract class SimplePredicate extends Condition { + + /** + * The left hand side of the expression. + */ + Expression left; + + /** + * Whether it is a "not" condition (e.g. "is not null"). + */ + final boolean not; + + /** + * Where this is the when operand of the simple case. + */ + final boolean whenOperand; + + SimplePredicate(Expression left, boolean not, boolean whenOperand) { + this.left = left; + this.not = not; + this.whenOperand = whenOperand; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (!whenOperand && left.isConstant()) { + return ValueExpression.getBoolean(getValue(session)); + } + return this; + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + left.setEvaluatable(tableFilter, b); + } + + @Override + public final boolean needParentheses() { + return true; + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + left.updateAggregate(session, stage); + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + left.mapColumns(resolver, level, state); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return left.isEverything(visitor); + } + + @Override + public int getCost() { + return left.getCost() + 1; + } + + @Override + public int getSubexpressionCount() { + return 1; + } + + @Override + public Expression getSubexpression(int index) { + if (index == 0) { + return left; + } + throw new IndexOutOfBoundsException(); + } + + @Override + public final boolean isWhenConditionOperand() { + return whenOperand; + } + +} diff --git a/h2/src/main/org/h2/expression/condition/TypePredicate.java b/h2/src/main/org/h2/expression/condition/TypePredicate.java new file mode 100644 index 0000000000..74ce12ee23 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/TypePredicate.java @@ -0,0 +1,90 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import java.util.Arrays; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * Type predicate (IS [NOT] OF). + */ +public final class TypePredicate extends SimplePredicate { + + private final TypeInfo[] typeList; + private int[] valueTypes; + + public TypePredicate(Expression left, boolean not, boolean whenOperand, TypeInfo[] typeList) { + super(left, not, whenOperand); + this.typeList = typeList; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return getWhenSQL(left.getSQL(builder, sqlFlags, AUTO_PARENTHESES), sqlFlags); + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + builder.append(" IS"); + if (not) { + builder.append(" NOT"); + } + builder.append(" OF ("); + for (int i = 0; i < typeList.length; i++) { + if (i > 0) { + builder.append(", "); + } + typeList[i].getSQL(builder, sqlFlags); + } + return builder.append(')'); + } + + @Override + public Expression optimize(SessionLocal session) { + int count = typeList.length; + valueTypes = new int[count]; + for (int i = 0; i < count; i++) { + valueTypes[i] = typeList[i].getValueType(); + } + Arrays.sort(valueTypes); + return super.optimize(session); + } + + @Override + public Value getValue(SessionLocal session) { + Value l = left.getValue(session); + if (l == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + return ValueBoolean.get(Arrays.binarySearch(valueTypes, l.getValueType()) >= 0 ^ not); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + if (left == ValueNull.INSTANCE) { + return false; + } + return Arrays.binarySearch(valueTypes, left.getValueType()) >= 0 ^ not; + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + return new TypePredicate(left, !not, false, typeList); + } + +} diff --git a/h2/src/main/org/h2/expression/condition/UniquePredicate.java b/h2/src/main/org/h2/expression/condition/UniquePredicate.java new file mode 100644 index 0000000000..745e242fe9 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/UniquePredicate.java @@ -0,0 +1,102 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import java.util.Arrays; + +import org.h2.command.query.Query; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ValueExpression; +import org.h2.result.LocalResult; +import org.h2.result.ResultTarget; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * Unique predicate as in UNIQUE(SELECT ...) + */ +public class UniquePredicate extends PredicateWithSubquery { + + private static final class Target implements ResultTarget { + + private final int columnCount; + + private final LocalResult result; + + boolean hasDuplicates; + + Target(int columnCount, LocalResult result) { + this.columnCount = columnCount; + this.result = result; + } + + @Override + public void limitsWereApplied() { + // Nothing to do + } + + @Override + public long getRowCount() { + // Not required + return 0L; + } + + @Override + public void addRow(Value... values) { + if (hasDuplicates) { + return; + } + for (int i = 0; i < columnCount; i++) { + if (values[i] == ValueNull.INSTANCE) { + return; + } + } + if (values.length != columnCount) { + values = Arrays.copyOf(values, columnCount); + } + long expected = result.getRowCount() + 1; + result.addRow(values); + if (expected != result.getRowCount()) { + hasDuplicates = true; + result.close(); + } + } + } + + public UniquePredicate(Query query) { + super(query); + } + + @Override + public Expression optimize(SessionLocal session) { + super.optimize(session); + if (query.isStandardDistinct()) { + return ValueExpression.TRUE; + } + return this; + } + + @Override + public Value getValue(SessionLocal session) { + query.setSession(session); + int columnCount = query.getColumnCount(); + LocalResult result = new LocalResult(session, + query.getExpressions().toArray(new Expression[0]), columnCount, columnCount); + result.setDistinct(); + Target target = new Target(columnCount, result); + query.query(Integer.MAX_VALUE, target); + result.close(); + return ValueBoolean.get(!target.hasDuplicates); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return super.getUnenclosedSQL(builder.append("UNIQUE"), sqlFlags); + } + +} diff --git a/h2/src/main/org/h2/expression/condition/package.html b/h2/src/main/org/h2/expression/condition/package.html index e69324f359..b8c56e2158 100644 --- a/h2/src/main/org/h2/expression/condition/package.html +++ b/h2/src/main/org/h2/expression/condition/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/expression/function/ArrayFunction.java b/h2/src/main/org/h2/expression/function/ArrayFunction.java new file mode 100644 index 0000000000..ff9798d0a4 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/ArrayFunction.java @@ -0,0 +1,176 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.util.Arrays; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.engine.Mode.ModeEnum; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueCollectionBase; +import org.h2.value.ValueNull; + +/** + * An array function. + */ +public final class ArrayFunction extends FunctionN { + + /** + * TRIM_ARRAY(). + */ + public static final int TRIM_ARRAY = 0; + + /** + * ARRAY_CONTAINS() (non-standard). + */ + public static final int ARRAY_CONTAINS = TRIM_ARRAY + 1; + + /** + * ARRAY_SLICE() (non-standard). + */ + public static final int ARRAY_SLICE = ARRAY_CONTAINS + 1; + + private static final String[] NAMES = { // + "TRIM_ARRAY", "ARRAY_CONTAINS", "ARRAY_SLICE" // + }; + + private final int function; + + public ArrayFunction(Expression arg1, Expression arg2, Expression arg3, int function) { + super(arg3 == null ? new Expression[] { arg1, arg2 } : new Expression[] { arg1, arg2, arg3 }); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v1 = args[0].getValue(session), v2 = args[1].getValue(session); + switch (function) { + case TRIM_ARRAY: { + if (v2 == ValueNull.INSTANCE) { + v1 = ValueNull.INSTANCE; + break; + } + int trim = v2.getInt(); + if (trim < 0) { + // This exception should be thrown even when array is null + throw DbException.get(ErrorCode.ARRAY_ELEMENT_ERROR_2, Integer.toString(trim), // + "0..CARDINALITY(array)"); + } + if (v1 == ValueNull.INSTANCE) { + break; + } + final ValueArray array = v1.convertToAnyArray(session); + Value[] elements = array.getList(); + int length = elements.length; + if (trim > length) { + throw DbException.get(ErrorCode.ARRAY_ELEMENT_ERROR_2, Integer.toString(trim), "0.." + length); + } else if (trim == 0) { + v1 = array; + } else { + v1 = ValueArray.get(array.getComponentType(), Arrays.copyOf(elements, length - trim), session); + } + break; + } + case ARRAY_CONTAINS: { + int t = v1.getValueType(); + if (t == Value.ARRAY || t == Value.ROW) { + Value[] list = ((ValueCollectionBase) v1).getList(); + v1 = ValueBoolean.FALSE; + for (Value v : list) { + if (session.areEqual(v, v2)) { + v1 = ValueBoolean.TRUE; + break; + } + } + } else { + v1 = ValueNull.INSTANCE; + } + break; + } + case ARRAY_SLICE: { + Value v3; + if (v1 == ValueNull.INSTANCE || v2 == ValueNull.INSTANCE + || (v3 = args[2].getValue(session)) == ValueNull.INSTANCE) { + v1 = ValueNull.INSTANCE; + break; + } + ValueArray array = v1.convertToAnyArray(session); + // SQL is 1-based + int index1 = v2.getInt() - 1; + // 1-based and inclusive as postgreSQL (-1+1) + int index2 = v3.getInt(); + // https://www.postgresql.org/docs/current/arrays.html#ARRAYS-ACCESSING + // For historical reasons postgreSQL ignore invalid indexes + final boolean isPG = session.getMode().getEnum() == ModeEnum.PostgreSQL; + if (index1 > index2) { + v1 = isPG ? ValueArray.get(array.getComponentType(), Value.EMPTY_VALUES, session) : ValueNull.INSTANCE; + break; + } + if (index1 < 0) { + if (isPG) { + index1 = 0; + } else { + v1 = ValueNull.INSTANCE; + break; + } + } + if (index2 > array.getList().length) { + if (isPG) { + index2 = array.getList().length; + } else { + v1 = ValueNull.INSTANCE; + break; + } + } + v1 = ValueArray.get(array.getComponentType(), Arrays.copyOfRange(array.getList(), index1, index2), // + session); + break; + } + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + switch (function) { + case TRIM_ARRAY: + case ARRAY_SLICE: { + Expression arg = args[0]; + type = arg.getType(); + int t = type.getValueType(); + if (t != Value.ARRAY && t != Value.NULL) { + throw DbException.getInvalidExpressionTypeException(getName() + " array argument", arg); + } + break; + } + case ARRAY_CONTAINS: + type = TypeInfo.TYPE_BOOLEAN; + break; + default: + throw DbException.getInternalError("function=" + function); + } + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/BitFunction.java b/h2/src/main/org/h2/expression/function/BitFunction.java new file mode 100644 index 0000000000..7172ff8b66 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/BitFunction.java @@ -0,0 +1,724 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.util.Arrays; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.expression.aggregate.Aggregate; +import org.h2.expression.aggregate.AggregateType; +import org.h2.message.DbException; +import org.h2.util.Bits; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueBinary; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueInteger; +import org.h2.value.ValueSmallint; +import org.h2.value.ValueTinyint; +import org.h2.value.ValueVarbinary; + +/** + * A bitwise function. + */ +public final class BitFunction extends Function1_2 { + + /** + * BITAND() (non-standard). + */ + public static final int BITAND = 0; + + /** + * BITOR() (non-standard). + */ + public static final int BITOR = BITAND + 1; + + /** + * BITXOR() (non-standard). + */ + public static final int BITXOR = BITOR + 1; + + /** + * BITNOT() (non-standard). + */ + public static final int BITNOT = BITXOR + 1; + + /** + * BITNAND() (non-standard). + */ + public static final int BITNAND = BITNOT + 1; + + /** + * BITNOR() (non-standard). + */ + public static final int BITNOR = BITNAND + 1; + + /** + * BITXNOR() (non-standard). + */ + public static final int BITXNOR = BITNOR + 1; + + /** + * BITGET() (non-standard). + */ + public static final int BITGET = BITXNOR + 1; + + /** + * BITCOUNT() (non-standard). + */ + public static final int BITCOUNT = BITGET + 1; + + /** + * LSHIFT() (non-standard). + */ + public static final int LSHIFT = BITCOUNT + 1; + + /** + * RSHIFT() (non-standard). + */ + public static final int RSHIFT = LSHIFT + 1; + + /** + * ULSHIFT() (non-standard). + */ + public static final int ULSHIFT = RSHIFT + 1; + + /** + * URSHIFT() (non-standard). + */ + public static final int URSHIFT = ULSHIFT + 1; + + /** + * ROTATELEFT() (non-standard). + */ + public static final int ROTATELEFT = URSHIFT + 1; + + /** + * ROTATERIGHT() (non-standard). + */ + public static final int ROTATERIGHT = ROTATELEFT + 1; + + private static final String[] NAMES = { // + "BITAND", "BITOR", "BITXOR", "BITNOT", "BITNAND", "BITNOR", "BITXNOR", "BITGET", "BITCOUNT", "LSHIFT", + "RSHIFT", "ULSHIFT", "URSHIFT", "ROTATELEFT", "ROTATERIGHT" // + }; + + private final int function; + + public BitFunction(Expression arg1, Expression arg2, int function) { + super(arg1, arg2); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + switch (function) { + case BITGET: + return bitGet(v1, v2); + case BITCOUNT: + return bitCount(v1); + case LSHIFT: + return shift(v1, v2.getLong(), false); + case RSHIFT: { + long offset = v2.getLong(); + return shift(v1, offset != Long.MIN_VALUE ? -offset : Long.MAX_VALUE, false); + } + case ULSHIFT: + return shift(v1, v2.getLong(), true); + case URSHIFT: + return shift(v1, -v2.getLong(), true); + case ROTATELEFT: + return rotate(v1, v2.getLong(), false); + case ROTATERIGHT: + return rotate(v1, v2.getLong(), true); + } + return getBitwise(function, type, v1, v2); + } + + private static ValueBoolean bitGet(Value v1, Value v2) { + long offset = v2.getLong(); + boolean b; + if (offset >= 0L) { + switch (v1.getValueType()) { + case Value.BINARY: + case Value.VARBINARY: { + byte[] bytes = v1.getBytesNoCopy(); + int bit = (int) (offset & 0x7); + offset >>>= 3; + b = offset < bytes.length && (bytes[(int) offset] & (1 << bit)) != 0; + break; + } + case Value.TINYINT: + b = offset < 8 && (v1.getByte() & (1 << offset)) != 0; + break; + case Value.SMALLINT: + b = offset < 16 && (v1.getShort() & (1 << offset)) != 0; + break; + case Value.INTEGER: + b = offset < 32 && (v1.getInt() & (1 << offset)) != 0; + break; + case Value.BIGINT: + b = (v1.getLong() & (1L << offset)) != 0; + break; + default: + throw DbException.getInvalidValueException("bit function parameter", v1.getTraceSQL()); + } + } else { + b = false; + } + return ValueBoolean.get(b); + } + + private static ValueBigint bitCount(Value v1) { + long c; + switch (v1.getValueType()) { + case Value.BINARY: + case Value.VARBINARY: { + byte[] bytes = v1.getBytesNoCopy(); + int l = bytes.length; + c = 0L; + int blocks = l >>> 3; + for (int i = 0; i < blocks; i++) { + c += Long.bitCount(Bits.readLong(bytes, i)); + } + for (int i = blocks << 3; i < l; i++) { + c += Integer.bitCount(bytes[i] & 0xff); + } + break; + } + case Value.TINYINT: + c = Integer.bitCount(v1.getByte() & 0xff); + break; + case Value.SMALLINT: + c = Integer.bitCount(v1.getShort() & 0xffff); + break; + case Value.INTEGER: + c = Integer.bitCount(v1.getInt()); + break; + case Value.BIGINT: + c = Long.bitCount(v1.getLong()); + break; + default: + throw DbException.getInvalidValueException("bit function parameter", v1.getTraceSQL()); + } + return ValueBigint.get(c); + } + + private static Value shift(Value v1, long offset, boolean unsigned) { + if (offset == 0L) { + return v1; + } + int vt = v1.getValueType(); + switch (vt) { + case Value.BINARY: + case Value.VARBINARY: { + byte[] bytes = v1.getBytesNoCopy(); + int length = bytes.length; + if (length == 0) { + return v1; + } + byte[] newBytes = new byte[length]; + if (offset > -8L * length && offset < 8L * length) { + if (offset > 0) { + int nBytes = (int) (offset >> 3); + int nBits = ((int) offset) & 0x7; + if (nBits == 0) { + System.arraycopy(bytes, nBytes, newBytes, 0, length - nBytes); + } else { + int nBits2 = 8 - nBits; + int dstIndex = 0, srcIndex = nBytes; + length--; + while (srcIndex < length) { + newBytes[dstIndex++] = (byte) (bytes[srcIndex++] << nBits + | (bytes[srcIndex] & 0xff) >>> nBits2); + } + newBytes[dstIndex] = (byte) (bytes[srcIndex] << nBits); + } + } else { + offset = -offset; + int nBytes = (int) (offset >> 3); + int nBits = ((int) offset) & 0x7; + if (nBits == 0) { + System.arraycopy(bytes, 0, newBytes, nBytes, length - nBytes); + } else { + int nBits2 = 8 - nBits; + int dstIndex = nBytes, srcIndex = 0; + newBytes[dstIndex++] = (byte) ((bytes[srcIndex] & 0xff) >>> nBits); + while (dstIndex < length) { + newBytes[dstIndex++] = (byte) (bytes[srcIndex++] << nBits2 + | (bytes[srcIndex] & 0xff) >>> nBits); + } + } + } + } + return vt == Value.BINARY ? ValueBinary.getNoCopy(newBytes) : ValueVarbinary.getNoCopy(newBytes); + } + case Value.TINYINT: { + byte v; + if (offset < 8) { + v = v1.getByte(); + if (offset > -8) { + if (offset > 0) { + v <<= (int) offset; + } else if (unsigned) { + v = (byte) ((v & 0xFF) >>> (int) -offset); + } else { + v >>= (int) -offset; + } + } else if (unsigned) { + v = 0; + } else { + v >>= 7; + } + } else { + v = 0; + } + return ValueTinyint.get(v); + } + case Value.SMALLINT: { + short v; + if (offset < 16) { + v = v1.getShort(); + if (offset > -16) { + if (offset > 0) { + v <<= (int) offset; + } else if (unsigned) { + v = (short) ((v & 0xFFFF) >>> (int) -offset); + } else { + v >>= (int) -offset; + } + } else if (unsigned) { + v = 0; + } else { + v >>= 15; + } + } else { + v = 0; + } + return ValueSmallint.get(v); + } + case Value.INTEGER: { + int v; + if (offset < 32) { + v = v1.getInt(); + if (offset > -32) { + if (offset > 0) { + v <<= (int) offset; + } else if (unsigned) { + v >>>= (int) -offset; + } else { + v >>= (int) -offset; + } + } else if (unsigned) { + v = 0; + } else { + v >>= 31; + } + } else { + v = 0; + } + return ValueInteger.get(v); + } + case Value.BIGINT: { + long v; + if (offset < 64) { + v = v1.getLong(); + if (offset > -64) { + if (offset > 0) { + v <<= offset; + } else if (unsigned) { + v >>>= -offset; + } else { + v >>= -offset; + } + } else if (unsigned) { + v = 0; + } else { + v >>= 63; + } + } else { + v = 0; + } + return ValueBigint.get(v); + } + default: + throw DbException.getInvalidValueException("bit function parameter", v1.getTraceSQL()); + } + } + + private static Value rotate(Value v1, long offset, boolean right) { + int vt = v1.getValueType(); + switch (vt) { + case Value.BINARY: + case Value.VARBINARY: { + byte[] bytes = v1.getBytesNoCopy(); + int length = bytes.length; + if (length == 0) { + return v1; + } + long bitLength = length << 3L; + offset %= bitLength; + if (right) { + offset = -offset; + } + if (offset == 0L) { + return v1; + } else if (offset < 0) { + offset += bitLength; + } + byte[] newBytes = new byte[length]; + int nBytes = (int) (offset >> 3); + int nBits = ((int) offset) & 0x7; + if (nBits == 0) { + System.arraycopy(bytes, nBytes, newBytes, 0, length - nBytes); + System.arraycopy(bytes, 0, newBytes, length - nBytes, nBytes); + } else { + int nBits2 = 8 - nBits; + for (int dstIndex = 0, srcIndex = nBytes; dstIndex < length;) { + newBytes[dstIndex++] = (byte) (bytes[srcIndex] << nBits + | (bytes[srcIndex = (srcIndex + 1) % length] & 0xFF) >>> nBits2); + } + } + return vt == Value.BINARY ? ValueBinary.getNoCopy(newBytes) : ValueVarbinary.getNoCopy(newBytes); + } + case Value.TINYINT: { + int o = (int) offset; + if (right) { + o = -o; + } + if ((o &= 0x7) == 0) { + return v1; + } + int v = v1.getByte() & 0xFF; + return ValueTinyint.get((byte) ((v << o) | (v >>> 8 - o))); + } + case Value.SMALLINT: { + int o = (int) offset; + if (right) { + o = -o; + } + if ((o &= 0xF) == 0) { + return v1; + } + int v = v1.getShort() & 0xFFFF; + return ValueSmallint.get((short) ((v << o) | (v >>> 16 - o))); + } + case Value.INTEGER: { + int o = (int) offset; + if (right) { + o = -o; + } + if ((o &= 0x1F) == 0) { + return v1; + } + return ValueInteger.get(Integer.rotateLeft(v1.getInt(), o)); + } + case Value.BIGINT: { + int o = (int) offset; + if (right) { + o = -o; + } + if ((o &= 0x3F) == 0) { + return v1; + } + return ValueBigint.get(Long.rotateLeft(v1.getLong(), o)); + } + default: + throw DbException.getInvalidValueException("bit function parameter", v1.getTraceSQL()); + } + } + + /** + * Computes the value of bitwise function. + * + * @param function + * one of {@link #BITAND}, {@link #BITOR}, {@link #BITXOR}, + * {@link #BITNOT}, {@link #BITNAND}, {@link #BITNOR}, + * {@link #BITXNOR} + * @param type + * the type of result + * @param v1 + * the value of first argument + * @param v2 + * the value of second argument, or {@code null} + * @return the resulting value + */ + public static Value getBitwise(int function, TypeInfo type, Value v1, Value v2) { + return type.getValueType() < Value.TINYINT ? getBinaryString(function, type, v1, v2) + : getNumeric(function, type, v1, v2); + } + + private static Value getBinaryString(int function, TypeInfo type, Value v1, Value v2) { + byte[] bytes; + if (function == BITNOT) { + bytes = v1.getBytes(); + for (int i = 0, l = bytes.length; i < l; i++) { + bytes[i] = (byte) ~bytes[i]; + } + } else { + byte[] bytes1 = v1.getBytesNoCopy(), bytes2 = v2.getBytesNoCopy(); + int length1 = bytes1.length, length2 = bytes2.length; + int min, max; + if (length1 <= length2) { + min = length1; + max = length2; + } else { + min = length2; + max = length1; + byte[] t = bytes1; + bytes1 = bytes2; + bytes2 = t; + } + int limit = (int) type.getPrecision(); + if (min > limit) { + max = min = limit; + } else if (max > limit) { + max = limit; + } + bytes = new byte[max]; + int i = 0; + switch (function) { + case BITAND: + for (; i < min; i++) { + bytes[i] = (byte) (bytes1[i] & bytes2[i]); + } + break; + case BITOR: + for (; i < min; i++) { + bytes[i] = (byte) (bytes1[i] | bytes2[i]); + } + System.arraycopy(bytes2, i, bytes, i, max - i); + break; + case BITXOR: + for (; i < min; i++) { + bytes[i] = (byte) (bytes1[i] ^ bytes2[i]); + } + System.arraycopy(bytes2, i, bytes, i, max - i); + break; + case BITNAND: + for (; i < min; i++) { + bytes[i] = (byte) ~(bytes1[i] & bytes2[i]); + } + Arrays.fill(bytes, i, max, (byte) -1); + break; + case BITNOR: + for (; i < min; i++) { + bytes[i] = (byte) ~(bytes1[i] | bytes2[i]); + } + for (; i < max; i++) { + bytes[i] = (byte) ~bytes2[i]; + } + break; + case BITXNOR: + for (; i < min; i++) { + bytes[i] = (byte) ~(bytes1[i] ^ bytes2[i]); + } + for (; i < max; i++) { + bytes[i] = (byte) ~bytes2[i]; + } + break; + default: + throw DbException.getInternalError("function=" + function); + } + } + return type.getValueType() == Value.BINARY ? ValueBinary.getNoCopy(bytes) : ValueVarbinary.getNoCopy(bytes); + } + + private static Value getNumeric(int function, TypeInfo type, Value v1, Value v2) { + long l1 = v1.getLong(); + switch (function) { + case BITAND: + l1 &= v2.getLong(); + break; + case BITOR: + l1 |= v2.getLong(); + break; + case BITXOR: + l1 ^= v2.getLong(); + break; + case BITNOT: + l1 = ~l1; + break; + case BITNAND: + l1 = ~(l1 & v2.getLong()); + break; + case BITNOR: + l1 = ~(l1 | v2.getLong()); + break; + case BITXNOR: + l1 = ~(l1 ^ v2.getLong()); + break; + default: + throw DbException.getInternalError("function=" + function); + } + switch (type.getValueType()) { + case Value.TINYINT: + return ValueTinyint.get((byte) l1); + case Value.SMALLINT: + return ValueSmallint.get((short) l1); + case Value.INTEGER: + return ValueInteger.get((int) l1); + case Value.BIGINT: + return ValueBigint.get(l1); + default: + throw DbException.getInternalError(); + } + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + switch (function) { + case BITNOT: + return optimizeNot(session); + case BITGET: + type = TypeInfo.TYPE_BOOLEAN; + break; + case BITCOUNT: + type = TypeInfo.TYPE_BIGINT; + break; + case LSHIFT: + case RSHIFT: + case ULSHIFT: + case URSHIFT: + case ROTATELEFT: + case ROTATERIGHT: + type = checkArgType(left); + break; + default: + type = getCommonType(left, right); + break; + } + if (left.isConstant() && (right == null || right.isConstant())) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + private Expression optimizeNot(SessionLocal session) { + type = checkArgType(left); + if (left.isConstant()) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } else if (left instanceof BitFunction) { + BitFunction l = (BitFunction) left; + int f = l.function; + switch (f) { + case BITAND: + case BITOR: + case BITXOR: + f += BITNAND - BITAND; + break; + case BITNOT: + return l.left; + case BITNAND: + case BITNOR: + case BITXNOR: + f -= BITNAND - BITAND; + break; + default: + return this; + } + return new BitFunction(l.left, l.right, f).optimize(session); + } else if (left instanceof Aggregate) { + Aggregate l = (Aggregate) left; + AggregateType t; + switch (l.getAggregateType()) { + case BIT_AND_AGG: + t = AggregateType.BIT_NAND_AGG; + break; + case BIT_OR_AGG: + t = AggregateType.BIT_NOR_AGG; + break; + case BIT_XOR_AGG: + t = AggregateType.BIT_XNOR_AGG; + break; + case BIT_NAND_AGG: + t = AggregateType.BIT_AND_AGG; + break; + case BIT_NOR_AGG: + t = AggregateType.BIT_OR_AGG; + break; + case BIT_XNOR_AGG: + t = AggregateType.BIT_XOR_AGG; + break; + default: + return this; + } + return new Aggregate(t, new Expression[] { l.getSubexpression(0) }, l.getSelect(), l.isDistinct()) + .optimize(session); + } + return this; + } + + private static TypeInfo getCommonType(Expression arg1, Expression arg2) { + TypeInfo t1 = checkArgType(arg1), t2 = checkArgType(arg2); + int vt1 = t1.getValueType(), vt2 = t2.getValueType(); + boolean bs = DataType.isBinaryStringType(vt1); + if (bs != DataType.isBinaryStringType(vt2)) { + throw DbException.getInvalidValueException("bit function parameters", + t2.getSQL(t1.getSQL(new StringBuilder(), TRACE_SQL_FLAGS).append(" vs "), TRACE_SQL_FLAGS) + .toString()); + } + if (bs) { + long precision; + if (vt1 == Value.BINARY) { + precision = t1.getDeclaredPrecision(); + if (vt2 == Value.BINARY) { + precision = Math.max(precision, t2.getDeclaredPrecision()); + } + } else { + if (vt2 == Value.BINARY) { + vt1 = Value.BINARY; + precision = t2.getDeclaredPrecision(); + } else { + long precision1 = t1.getDeclaredPrecision(), precision2 = t2.getDeclaredPrecision(); + precision = precision1 <= 0L || precision2 <= 0L ? -1L : Math.max(precision1, precision2); + } + } + return TypeInfo.getTypeInfo(vt1, precision, 0, null); + } + return TypeInfo.getTypeInfo(Math.max(vt1, vt2)); + } + + /** + * Checks the type of an argument of bitwise function (one of + * {@link #BITAND}, {@link #BITOR}, {@link #BITXOR}, {@link #BITNOT}, + * {@link #BITNAND}, {@link #BITNOR}, {@link #BITXNOR}). + * + * @param arg + * the argument + * @return the type of the specified argument + * @throws DbException + * if argument type is not supported by bitwise functions + */ + public static TypeInfo checkArgType(Expression arg) { + TypeInfo t = arg.getType(); + switch (t.getValueType()) { + case Value.NULL: + case Value.BINARY: + case Value.VARBINARY: + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + case Value.BIGINT: + return t; + } + throw DbException.getInvalidExpressionTypeException("bit function argument", arg); + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/BuiltinFunctions.java b/h2/src/main/org/h2/expression/function/BuiltinFunctions.java new file mode 100644 index 0000000000..efb1187842 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/BuiltinFunctions.java @@ -0,0 +1,136 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.util.HashSet; + +import org.h2.engine.Database; +import org.h2.mode.ModeFunction; + +/** + * Maintains the list of built-in functions. + */ +public final class BuiltinFunctions { + + private static final HashSet FUNCTIONS; + + static { + String[] names = { // + // MathFunction + "ABS", "MOD", "FLOOR", "CEIL", "ROUND", "ROUNDMAGIC", "SIGN", "TRUNC", "TRUNCATE", + // MathFunction1 + "SIN", "COS", "TAN", "COT", "SINH", "COSH", "TANH", "ASIN", "ACOS", "ATAN", // + "LOG10", "LN", "EXP", "SQRT", "DEGREES", "RADIANS", + // MathFunction2 + "ATAN2", "LOG", "POWER", + // BitFunction + "BITAND", "BITOR", "BITXOR", "BITNOT", "BITNAND", "BITNOR", "BITXNOR", "BITGET", "BITCOUNT", "LSHIFT", + "RSHIFT", "ULSHIFT", "URSHIFT", "ROTATELEFT", "ROTATERIGHT", + // DateTimeFunction + "EXTRACT", "DATE_TRUNC", "DATEADD", "DATEDIFF", // + "TIMESTAMPADD", "TIMESTAMPDIFF", + // DateTimeFormatFunction + "FORMATDATETIME", "PARSEDATETIME", + // DayMonthNameFunction + "DAYNAME", "MONTHNAME", + // CardinalityExpression + "CARDINALITY", "ARRAY_MAX_CARDINALITY", + // StringFunction + "LOCATE", "INSERT", "REPLACE", "LPAD", "RPAD", "TRANSLATE", + // StringFunction1 + "UPPER", "LOWER", "ASCII", "CHAR", "CHR", "STRINGENCODE", "STRINGDECODE", "STRINGTOUTF8", + "UTF8TOSTRING", "HEXTORAW", "RAWTOHEX", "SPACE", "QUOTE_IDENT", + // StringFunction2 + /* LEFT and RIGHT are keywords */ "REPEAT", + // SubstringFunction + "SUBSTRING", + // ToCharFunction + "TO_CHAR", + // LengthFunction + "CHAR_LENGTH", "CHARACTER_LENGTH", "LENGTH", "OCTET_LENGTH", "BIT_LENGTH", + // TrimFunction + "TRIM", + // RegexpFunction + "REGEXP_LIKE", "REGEXP_REPLACE", "REGEXP_SUBSTR", + // XMLFunction + "XMLATTR", "XMLCDATA", "XMLCOMMENT", "XMLNODE", "XMLSTARTDOC", "XMLTEXT", + // ArrayFunction + "TRIM_ARRAY", "ARRAY_CONTAINS", "ARRAY_SLICE", + // CompressFunction + "COMPRESS", "EXPAND", + // SoundexFunction + "SOUNDEX", "DIFFERENCE", + // JsonConstructorFunction + "JSON_OBJECT", "JSON_ARRAY", + // CryptFunction + "ENCRYPT", "DECRYPT", + // CoalesceFunction + "COALESCE", "GREATEST", "LEAST", + // NullIfFunction + "NULLIF", + // ConcatFunction + "CONCAT", "CONCAT_WS", + // HashFunction + "HASH", "ORA_HASH", + // RandFunction + "RAND", "RANDOM", "SECURE_RAND", "RANDOM_UUID", "UUID", + // SessionControlFunction + "ABORT_SESSION", "CANCEL_SESSION", + // SysInfoFunction + "AUTOCOMMIT", "DATABASE_PATH", "H2VERSION", "LOCK_MODE", "LOCK_TIMEOUT", "MEMORY_FREE", "MEMORY_USED", + "READONLY", "SESSION_ID", "TRANSACTION_ID", + // TableInfoFunction + "DISK_SPACE_USED", "ESTIMATED_ENVELOPE", + // FileFunction + "FILE_READ", "FILE_WRITE", + // DataTypeSQLFunction + "DATA_TYPE_SQL", + // DBObjectFunction + "DB_OBJECT_ID", "DB_OBJECT_SQL", + // CSVWriteFunction + "CSVWRITE", + // SetFunction + /* SET is keyword */ + // SignalFunction + "SIGNAL", + // TruncateValueFunction + "TRUNCATE_VALUE", + // CompatibilitySequenceValueFunction + "CURRVAL", "NEXTVAL", + // Constants + "ZERO", "PI", + // ArrayTableFunction + "UNNEST", /* TABLE is a keyword */ "TABLE_DISTINCT", + // CSVReadFunction + "CSVREAD", + // LinkSchemaFunction + "LINK_SCHEMA", + // + }; + HashSet set = new HashSet<>(128); + for (String n : names) { + set.add(n); + } + FUNCTIONS = set; + } + + /** + * Returns whether specified function is a non-keyword built-in function. + * + * @param database + * the database + * @param upperName + * the name of the function in upper case + * @return {@code true} if it is + */ + public static boolean isBuiltinFunction(Database database, String upperName) { + return FUNCTIONS.contains(upperName) || ModeFunction.getFunction(database, upperName) != null; + } + + private BuiltinFunctions() { + } + +} diff --git a/h2/src/main/org/h2/expression/function/CSVWriteFunction.java b/h2/src/main/org/h2/expression/function/CSVWriteFunction.java new file mode 100644 index 0000000000..ce1e379559 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/CSVWriteFunction.java @@ -0,0 +1,126 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.sql.Connection; +import java.sql.SQLException; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.tools.Csv; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueInteger; + +/** + * A CSVWRITE function. + */ +public final class CSVWriteFunction extends FunctionN { + + public CSVWriteFunction() { + super(new Expression[4]); + } + + @Override + public Value getValue(SessionLocal session) { + session.getUser().checkAdmin(); + Connection conn = session.createConnection(false); + Csv csv = new Csv(); + String options = getValue(session, 2); + String charset = null; + if (options != null && options.indexOf('=') >= 0) { + charset = csv.setOptions(options); + } else { + charset = options; + String fieldSeparatorWrite = getValue(session, 3); + String fieldDelimiter = getValue(session, 4); + String escapeCharacter = getValue(session, 5); + String nullString = getValue(session, 6); + String lineSeparator = getValue(session, 7); + setCsvDelimiterEscape(csv, fieldSeparatorWrite, fieldDelimiter, escapeCharacter); + csv.setNullString(nullString); + if (lineSeparator != null) { + csv.setLineSeparator(lineSeparator); + } + } + try { + return ValueInteger.get(csv.write(conn, args[0].getValue(session).getString(), + args[1].getValue(session).getString(), charset)); + } catch (SQLException e) { + throw DbException.convert(e); + } + } + + private String getValue(SessionLocal session, int index) { + return index < args.length ? args[index].getValue(session).getString() : null; + } + + /** + * Sets delimiter options. + * + * @param csv + * the CSV utility instance + * @param fieldSeparator + * the field separator + * @param fieldDelimiter + * the field delimiter + * @param escapeCharacter + * the escape character + */ + public static void setCsvDelimiterEscape(Csv csv, String fieldSeparator, String fieldDelimiter, + String escapeCharacter) { + if (fieldSeparator != null) { + csv.setFieldSeparatorWrite(fieldSeparator); + if (!fieldSeparator.isEmpty()) { + char fs = fieldSeparator.charAt(0); + csv.setFieldSeparatorRead(fs); + } + } + if (fieldDelimiter != null) { + char fd = fieldDelimiter.isEmpty() ? 0 : fieldDelimiter.charAt(0); + csv.setFieldDelimiter(fd); + } + if (escapeCharacter != null) { + char ec = escapeCharacter.isEmpty() ? 0 : escapeCharacter.charAt(0); + csv.setEscapeCharacter(ec); + } + } + + @Override + public Expression optimize(SessionLocal session) { + optimizeArguments(session, false); + int len = args.length; + if (len < 2 || len > 8) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, getName(), "2..8"); + } + type = TypeInfo.TYPE_INTEGER; + return this; + } + + @Override + public String getName() { + return "CSVWRITE"; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + if (!super.isEverything(visitor)) { + return false; + } + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + case ExpressionVisitor.QUERY_COMPARABLE: + case ExpressionVisitor.READONLY: + return false; + default: + return true; + } + } + +} diff --git a/h2/src/main/org/h2/expression/function/CardinalityExpression.java b/h2/src/main/org/h2/expression/function/CardinalityExpression.java new file mode 100644 index 0000000000..f565a809e6 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/CardinalityExpression.java @@ -0,0 +1,78 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.util.MathUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; + +/** + * Cardinality expression. + */ +public final class CardinalityExpression extends Function1 { + + private final boolean max; + + /** + * Creates new instance of cardinality expression. + * + * @param arg + * argument + * @param max + * {@code false} for {@code CARDINALITY}, {@code true} for + * {@code ARRAY_MAX_CARDINALITY} + */ + public CardinalityExpression(Expression arg, boolean max) { + super(arg); + this.max = max; + } + + @Override + public Value getValue(SessionLocal session) { + int result; + if (max) { + TypeInfo t = arg.getType(); + if (t.getValueType() == Value.ARRAY) { + result = MathUtils.convertLongToInt(t.getPrecision()); + } else { + throw DbException.getInvalidValueException("array", arg.getValue(session).getTraceSQL()); + } + } else { + Value v = arg.getValue(session); + if (v == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + if (v.getValueType() != Value.ARRAY) { + throw DbException.getInvalidValueException("array", v.getTraceSQL()); + } + result = ((ValueArray) v).getList().length; + } + return ValueInteger.get(result); + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + type = TypeInfo.TYPE_INTEGER; + if (arg.isConstant()) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return max ? "ARRAY_MAX_CARDINALITY" : "CARDINALITY"; + } + +} diff --git a/h2/src/main/org/h2/expression/function/CastSpecification.java b/h2/src/main/org/h2/expression/function/CastSpecification.java new file mode 100644 index 0000000000..d0a54bfc0e --- /dev/null +++ b/h2/src/main/org/h2/expression/function/CastSpecification.java @@ -0,0 +1,115 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.expression.ValueExpression; +import org.h2.schema.Domain; +import org.h2.table.Column; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * A cast specification. + */ +public final class CastSpecification extends Function1 { + + private Domain domain; + + public CastSpecification(Expression arg, Column column) { + super(arg); + type = column.getType(); + domain = column.getDomain(); + } + + public CastSpecification(Expression arg, TypeInfo type) { + super(arg); + this.type = type; + } + + @Override + public Value getValue(SessionLocal session) { + Value v = arg.getValue(session).castTo(type, session); + if (domain != null) { + domain.checkConstraints(session, v); + } + return v; + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + if (arg.isConstant()) { + Value v = getValue(session); + if (v == ValueNull.INSTANCE || canOptimizeCast(arg.getType().getValueType(), type.getValueType())) { + return TypedValueExpression.get(v, type); + } + } + return this; + } + + @Override + public boolean isConstant() { + return arg instanceof ValueExpression && canOptimizeCast(arg.getType().getValueType(), type.getValueType()); + } + + private static boolean canOptimizeCast(int src, int dst) { + switch (src) { + case Value.TIME: + switch (dst) { + case Value.TIME_TZ: + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + return false; + } + break; + case Value.TIME_TZ: + switch (dst) { + case Value.TIME: + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + return false; + } + break; + case Value.DATE: + if (dst == Value.TIMESTAMP_TZ) { + return false; + } + break; + case Value.TIMESTAMP: + switch (dst) { + case Value.TIME_TZ: + case Value.TIMESTAMP_TZ: + return false; + } + break; + case Value.TIMESTAMP_TZ: + switch (dst) { + case Value.TIME: + case Value.DATE: + case Value.TIMESTAMP: + return false; + } + } + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + builder.append("CAST("); + arg.getUnenclosedSQL(builder, arg instanceof ValueExpression ? sqlFlags | NO_CASTS : sqlFlags).append(" AS "); + return (domain != null ? domain : type).getSQL(builder, sqlFlags).append(')'); + } + + @Override + public String getName() { + return "CAST"; + } + +} diff --git a/h2/src/main/org/h2/expression/function/CoalesceFunction.java b/h2/src/main/org/h2/expression/function/CoalesceFunction.java new file mode 100644 index 0000000000..3d5377feb1 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/CoalesceFunction.java @@ -0,0 +1,111 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * A COALESCE, GREATEST, or LEAST function. + */ +public final class CoalesceFunction extends FunctionN { + + /** + * COALESCE(). + */ + public static final int COALESCE = 0; + + /** + * GREATEST() (non-standard). + */ + public static final int GREATEST = COALESCE + 1; + + /** + * LEAST() (non-standard). + */ + public static final int LEAST = GREATEST + 1; + + private static final String[] NAMES = { // + "COALESCE", "GREATEST", "LEAST" // + }; + + private final int function; + + public CoalesceFunction(int function) { + this(function, new Expression[4]); + } + + public CoalesceFunction(int function, Expression... args) { + super(args); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v = ValueNull.INSTANCE; + switch (function) { + case COALESCE: { + for (int i = 0, l = args.length; i < l; i++) { + Value v2 = args[i].getValue(session); + if (v2 != ValueNull.INSTANCE) { + v = v2.convertTo(type, session); + break; + } + } + break; + } + case GREATEST: + case LEAST: { + for (int i = 0, l = args.length; i < l; i++) { + Value v2 = args[i].getValue(session); + if (v2 != ValueNull.INSTANCE) { + v2 = v2.convertTo(type, session); + if (v == ValueNull.INSTANCE) { + v = v2; + } else { + int comp = session.compareTypeSafe(v, v2); + if (function == GREATEST) { + if (comp < 0) { + v = v2; + } + } else if (comp > 0) { + v = v2; + } + } + } + } + break; + } + default: + throw DbException.getInternalError("function=" + function); + } + return v; + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + type = TypeInfo.getHigherType(args); + if (type.getValueType() <= Value.NULL) { + type = TypeInfo.TYPE_VARCHAR; + } + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/CompatibilitySequenceValueFunction.java b/h2/src/main/org/h2/expression/function/CompatibilitySequenceValueFunction.java new file mode 100644 index 0000000000..2d9fd62f69 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/CompatibilitySequenceValueFunction.java @@ -0,0 +1,100 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.command.Parser; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.schema.Schema; +import org.h2.schema.Sequence; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * NEXTVAL() and CURRVAL() compatibility functions. + */ +public final class CompatibilitySequenceValueFunction extends Function1_2 { + + private final boolean current; + + public CompatibilitySequenceValueFunction(Expression left, Expression right, boolean current) { + super(left, right); + this.current = current; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + String schemaName, sequenceName; + if (v2 == null) { + Parser p = new Parser(session); + String sql = v1.getString(); + Expression expr = p.parseExpression(sql); + if (expr instanceof ExpressionColumn) { + ExpressionColumn seq = (ExpressionColumn) expr; + schemaName = seq.getOriginalTableAliasName(); + if (schemaName == null) { + schemaName = session.getCurrentSchemaName(); + sequenceName = sql; + } else { + sequenceName = seq.getColumnName(session, -1); + } + } else { + throw DbException.getSyntaxError(sql, 1); + } + } else { + schemaName = v1.getString(); + sequenceName = v2.getString(); + } + Database database = session.getDatabase(); + Schema s = database.findSchema(schemaName); + if (s == null) { + schemaName = StringUtils.toUpperEnglish(schemaName); + s = database.getSchema(schemaName); + } + Sequence seq = s.findSequence(sequenceName); + if (seq == null) { + sequenceName = StringUtils.toUpperEnglish(sequenceName); + seq = s.getSequence(sequenceName); + } + return (current ? session.getCurrentValueFor(seq) : session.getNextValueFor(seq, null)).convertTo(type); + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + type = session.getMode().decimalSequences ? TypeInfo.TYPE_NUMERIC_BIGINT : TypeInfo.TYPE_BIGINT; + return this; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.INDEPENDENT: + case ExpressionVisitor.DETERMINISTIC: + case ExpressionVisitor.QUERY_COMPARABLE: + return false; + case ExpressionVisitor.READONLY: + if (!current) { + return false; + } + } + return super.isEverything(visitor); + } + + @Override + public String getName() { + return current ? "CURRVAL" : "NEXTVAL"; + } + +} diff --git a/h2/src/main/org/h2/expression/function/CompressFunction.java b/h2/src/main/org/h2/expression/function/CompressFunction.java new file mode 100644 index 0000000000..348c87297e --- /dev/null +++ b/h2/src/main/org/h2/expression/function/CompressFunction.java @@ -0,0 +1,77 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.tools.CompressTool; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueVarbinary; + +/** + * A COMPRESS or EXPAND function. + */ +public final class CompressFunction extends Function1_2 { + + /** + * COMPRESS() (non-standard). + */ + public static final int COMPRESS = 0; + + /** + * EXPAND() (non-standard). + */ + public static final int EXPAND = COMPRESS + 1; + + private static final String[] NAMES = { // + "COMPRESS", "EXPAND" // + }; + + private final int function; + + public CompressFunction(Expression arg1, Expression arg2, int function) { + super(arg1, arg2); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + switch (function) { + case COMPRESS: + v1 = ValueVarbinary.getNoCopy( + CompressTool.getInstance().compress(v1.getBytesNoCopy(), v2 != null ? v2.getString() : null)); + break; + case EXPAND: + v1 = ValueVarbinary.getNoCopy(CompressTool.getInstance().expand(v1.getBytesNoCopy())); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + type = TypeInfo.TYPE_VARBINARY; + if (left.isConstant() && (right == null || right.isConstant())) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/ConcatFunction.java b/h2/src/main/org/h2/expression/function/ConcatFunction.java new file mode 100644 index 0000000000..14f5646c97 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/ConcatFunction.java @@ -0,0 +1,118 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * A CONCAT or CONCAT_WS function. + */ +public final class ConcatFunction extends FunctionN { + + /** + * CONCAT() (non-standard). + */ + public static final int CONCAT = 0; + + /** + * CONCAT_WS() (non-standard). + */ + public static final int CONCAT_WS = CONCAT + 1; + + private static final String[] NAMES = { // + "CONCAT", "CONCAT_WS" // + }; + + private final int function; + + public ConcatFunction(int function) { + this(function, new Expression[4]); + } + + public ConcatFunction(int function, Expression... args) { + super(args); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + int i = 0; + String separator = null; + if (function == CONCAT_WS) { + i = 1; + separator = args[0].getValue(session).getString(); + } + StringBuilder builder = new StringBuilder(); + boolean f = false; + for (int l = args.length; i < l; i++) { + Value v = args[i].getValue(session); + if (v != ValueNull.INSTANCE) { + if (separator != null) { + if (f) { + builder.append(separator); + } + f = true; + } + builder.append(v.getString()); + } + } + return ValueVarchar.get(builder.toString(), session); + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + int i = 0; + long extra = 0L; + if (function == CONCAT_WS) { + i = 1; + extra = getPrecision(0); + } + long precision = 0L; + int l = args.length; + boolean f = false; + for (; i < l; i++) { + if (args[i].isNullConstant()) { + continue; + } + precision = DataType.addPrecision(precision, getPrecision(i)); + if (extra != 0L && f) { + precision = DataType.addPrecision(precision, extra); + } + f = true; + } + type = TypeInfo.getTypeInfo(Value.VARCHAR, precision, 0, null); + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + private long getPrecision(int i) { + TypeInfo t = args[i].getType(); + int valueType = t.getValueType(); + if (valueType == Value.NULL) { + return 0L; + } else if (DataType.isCharacterStringType(valueType)) { + return t.getPrecision(); + } else { + return Long.MAX_VALUE; + } + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/CryptFunction.java b/h2/src/main/org/h2/expression/function/CryptFunction.java new file mode 100644 index 0000000000..47fbb966b6 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/CryptFunction.java @@ -0,0 +1,87 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.security.BlockCipher; +import org.h2.security.CipherFactory; +import org.h2.util.MathUtils; +import org.h2.util.Utils; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueVarbinary; + +/** + * An ENCRYPT or DECRYPT function. + */ +public final class CryptFunction extends FunctionN { + + /** + * ENCRYPT() (non-standard). + */ + public static final int ENCRYPT = 0; + + /** + * DECRYPT() (non-standard). + */ + public static final int DECRYPT = ENCRYPT + 1; + + private static final String[] NAMES = { // + "ENCRYPT", "DECRYPT" // + }; + + private final int function; + + public CryptFunction(Expression arg1, Expression arg2, Expression arg3, int function) { + super(new Expression[] { arg1, arg2, arg3 }); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + BlockCipher cipher = CipherFactory.getBlockCipher(v1.getString()); + cipher.setKey(getPaddedArrayCopy(v2.getBytesNoCopy(), cipher.getKeyLength())); + byte[] newData = getPaddedArrayCopy(v3.getBytesNoCopy(), BlockCipher.ALIGN); + switch (function) { + case ENCRYPT: + cipher.encrypt(newData, 0, newData.length); + break; + case DECRYPT: + cipher.decrypt(newData, 0, newData.length); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return ValueVarbinary.getNoCopy(newData); + } + + private static byte[] getPaddedArrayCopy(byte[] data, int blockSize) { + return Utils.copyBytes(data, MathUtils.roundUpInt(data.length, blockSize)); + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + TypeInfo t = args[2].getType(); + type = DataType.isBinaryStringType(t.getValueType()) + ? TypeInfo.getTypeInfo(Value.VARBINARY, t.getPrecision(), 0, null) + : TypeInfo.TYPE_VARBINARY; + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/CurrentDateTimeValueFunction.java b/h2/src/main/org/h2/expression/function/CurrentDateTimeValueFunction.java new file mode 100644 index 0000000000..de11882bc9 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/CurrentDateTimeValueFunction.java @@ -0,0 +1,112 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Operation0; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueTime; +import org.h2.value.ValueTimestamp; + +/** + * Current datetime value function. + */ +public final class CurrentDateTimeValueFunction extends Operation0 implements NamedExpression { + + /** + * The function "CURRENT_DATE" + */ + public static final int CURRENT_DATE = 0; + + /** + * The function "CURRENT_TIME" + */ + public static final int CURRENT_TIME = 1; + + /** + * The function "LOCALTIME" + */ + public static final int LOCALTIME = 2; + + /** + * The function "CURRENT_TIMESTAMP" + */ + public static final int CURRENT_TIMESTAMP = 3; + + /** + * The function "LOCALTIMESTAMP" + */ + public static final int LOCALTIMESTAMP = 4; + + private static final int[] TYPES = { Value.DATE, Value.TIME_TZ, Value.TIME, Value.TIMESTAMP_TZ, Value.TIMESTAMP }; + + private static final String[] NAMES = { "CURRENT_DATE", "CURRENT_TIME", "LOCALTIME", "CURRENT_TIMESTAMP", + "LOCALTIMESTAMP" }; + + /** + * Get the name for this function id. + * + * @param function the function id + * @return the name + */ + public static String getName(int function) { + return NAMES[function]; + } + + private final int function, scale; + + private final TypeInfo type; + + public CurrentDateTimeValueFunction(int function, int scale) { + this.function = function; + this.scale = scale; + if (scale < 0) { + scale = function >= CURRENT_TIMESTAMP ? ValueTimestamp.DEFAULT_SCALE : ValueTime.DEFAULT_SCALE; + } + type = TypeInfo.getTypeInfo(TYPES[function], 0L, scale, null); + } + + @Override + public Value getValue(SessionLocal session) { + return session.currentTimestamp().castTo(type, session); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + builder.append(getName()); + if (scale >= 0) { + builder.append('(').append(scale).append(')'); + } + return builder; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + return false; + } + return true; + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public int getCost() { + return 1; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/CurrentGeneralValueSpecification.java b/h2/src/main/org/h2/expression/function/CurrentGeneralValueSpecification.java new file mode 100644 index 0000000000..ca76fa7e4c --- /dev/null +++ b/h2/src/main/org/h2/expression/function/CurrentGeneralValueSpecification.java @@ -0,0 +1,147 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Operation0; +import org.h2.message.DbException; +import org.h2.util.HasSQL; +import org.h2.util.ParserUtil; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * Simple general value specifications. + */ +public final class CurrentGeneralValueSpecification extends Operation0 implements NamedExpression { + + /** + * The "CURRENT_CATALOG" general value specification. + */ + public static final int CURRENT_CATALOG = 0; + + /** + * The "CURRENT_PATH" general value specification. + */ + public static final int CURRENT_PATH = CURRENT_CATALOG + 1; + + /** + * The function "CURRENT_ROLE" general value specification. + */ + public static final int CURRENT_ROLE = CURRENT_PATH + 1; + + /** + * The function "CURRENT_SCHEMA" general value specification. + */ + public static final int CURRENT_SCHEMA = CURRENT_ROLE + 1; + + /** + * The function "CURRENT_USER" general value specification. + */ + public static final int CURRENT_USER = CURRENT_SCHEMA + 1; + + /** + * The function "SESSION_USER" general value specification. + */ + public static final int SESSION_USER = CURRENT_USER + 1; + + /** + * The function "SYSTEM_USER" general value specification. + */ + public static final int SYSTEM_USER = SESSION_USER + 1; + + private static final String[] NAMES = { "CURRENT_CATALOG", "CURRENT_PATH", "CURRENT_ROLE", "CURRENT_SCHEMA", + "CURRENT_USER", "SESSION_USER", "SYSTEM_USER" }; + + private final int specification; + + public CurrentGeneralValueSpecification(int specification) { + this.specification = specification; + } + + @Override + public Value getValue(SessionLocal session) { + String s; + switch (specification) { + case CURRENT_CATALOG: + s = session.getDatabase().getShortName(); + break; + case CURRENT_PATH: { + String[] searchPath = session.getSchemaSearchPath(); + if (searchPath != null) { + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < searchPath.length; i++) { + if (i > 0) { + builder.append(','); + } + ParserUtil.quoteIdentifier(builder, searchPath[i], HasSQL.DEFAULT_SQL_FLAGS); + } + s = builder.toString(); + } else { + s = ""; + } + break; + } + case CURRENT_ROLE: { + Database db = session.getDatabase(); + s = db.getPublicRole().getName(); + if (db.getSettings().databaseToLower) { + s = StringUtils.toLowerEnglish(s); + } + break; + } + case CURRENT_SCHEMA: + s = session.getCurrentSchemaName(); + break; + case CURRENT_USER: + case SESSION_USER: + case SYSTEM_USER: + s = session.getUser().getName(); + if (session.getDatabase().getSettings().databaseToLower) { + s = StringUtils.toLowerEnglish(s); + } + break; + default: + throw DbException.getInternalError("specification=" + specification); + } + return s != null ? ValueVarchar.get(s, session) : ValueNull.INSTANCE; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return builder.append(getName()); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + return false; + } + return true; + } + + @Override + public TypeInfo getType() { + return TypeInfo.TYPE_VARCHAR; + } + + @Override + public int getCost() { + return 1; + } + + @Override + public String getName() { + return NAMES[specification]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/DBObjectFunction.java b/h2/src/main/org/h2/expression/function/DBObjectFunction.java new file mode 100644 index 0000000000..55441dc51e --- /dev/null +++ b/h2/src/main/org/h2/expression/function/DBObjectFunction.java @@ -0,0 +1,144 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.Database; +import org.h2.engine.DbObject; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.schema.Schema; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * DB_OBJECT_ID() and DB_OBJECT_SQL() functions. + */ +public final class DBObjectFunction extends FunctionN { + + /** + * DB_OBJECT_ID() (non-standard). + */ + public static final int DB_OBJECT_ID = 0; + + /** + * DB_OBJECT_SQL() (non-standard). + */ + public static final int DB_OBJECT_SQL = DB_OBJECT_ID + 1; + + private static final String[] NAMES = { // + "DB_OBJECT_ID", "DB_OBJECT_SQL" // + }; + + private final int function; + + public DBObjectFunction(Expression objectType, Expression arg1, Expression arg2, int function) { + super(arg2 == null ? new Expression[] { objectType, arg1, } : new Expression[] { objectType, arg1, arg2 }); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + session.getUser().checkAdmin(); + String objectType = v1.getString(); + DbObject object; + if (v3 != null) { + Schema schema = session.getDatabase().findSchema(v2.getString()); + if (schema == null) { + return ValueNull.INSTANCE; + } + String objectName = v3.getString(); + switch (objectType) { + case "CONSTANT": + object = schema.findConstant(objectName); + break; + case "CONSTRAINT": + object = schema.findConstraint(session, objectName); + break; + case "DOMAIN": + object = schema.findDomain(objectName); + break; + case "INDEX": + object = schema.findIndex(session, objectName); + break; + case "ROUTINE": + object = schema.findFunctionOrAggregate(objectName); + break; + case "SEQUENCE": + object = schema.findSequence(objectName); + break; + case "SYNONYM": + object = schema.getSynonym(objectName); + break; + case "TABLE": + object = schema.findTableOrView(session, objectName); + break; + case "TRIGGER": + object = schema.findTrigger(objectName); + break; + default: + return ValueNull.INSTANCE; + } + } else { + String objectName = v2.getString(); + Database database = session.getDatabase(); + switch (objectType) { + case "ROLE": + object = database.findRole(objectName); + break; + case "SETTING": + object = database.findSetting(objectName); + break; + case "SCHEMA": + object = database.findSchema(objectName); + break; + case "USER": + object = database.findUser(objectName); + break; + default: + return ValueNull.INSTANCE; + } + } + if (object == null) { + return ValueNull.INSTANCE; + } + switch (function) { + case DB_OBJECT_ID: + return ValueInteger.get(object.getId()); + case DB_OBJECT_SQL: + String sql = object.getCreateSQLForMeta(); + return sql != null ? ValueVarchar.get(sql, session) : ValueNull.INSTANCE; + default: + throw DbException.getInternalError("function=" + function); + } + } + + @Override + public Expression optimize(SessionLocal session) { + optimizeArguments(session, false); + type = function == DB_OBJECT_ID ? TypeInfo.TYPE_INTEGER : TypeInfo.TYPE_VARCHAR; + return this; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + return false; + } + return super.isEverything(visitor); + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/DataTypeSQLFunction.java b/h2/src/main/org/h2/expression/function/DataTypeSQLFunction.java new file mode 100644 index 0000000000..39c77512e4 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/DataTypeSQLFunction.java @@ -0,0 +1,157 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.schema.Constant; +import org.h2.schema.Domain; +import org.h2.schema.FunctionAlias; +import org.h2.schema.Schema; +import org.h2.schema.FunctionAlias.JavaMethod; +import org.h2.table.Column; +import org.h2.table.Table; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; +import org.h2.value.ValueToObjectConverter2; +import org.h2.value.ValueVarchar; + +/** + * DATA_TYPE_SQL() function. + */ +public final class DataTypeSQLFunction extends FunctionN { + + public DataTypeSQLFunction(Expression objectSchema, Expression objectName, Expression objectType, + Expression typeIdentifier) { + super(new Expression[] { objectSchema, objectName, objectType, typeIdentifier }); + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + Schema schema = session.getDatabase().findSchema(v1.getString()); + if (schema == null) { + return ValueNull.INSTANCE; + } + String objectName = v2.getString(); + String objectType = v3.getString(); + String typeIdentifier = args[3].getValue(session).getString(); + if (typeIdentifier == null) { + return ValueNull.INSTANCE; + } + TypeInfo t; + switch (objectType) { + case "CONSTANT": { + Constant constant = schema.findConstant(objectName); + if (constant == null || !typeIdentifier.equals("TYPE")) { + return ValueNull.INSTANCE; + } + t = constant.getValue().getType(); + break; + } + case "DOMAIN": { + Domain domain = schema.findDomain(objectName); + if (domain == null || !typeIdentifier.equals("TYPE")) { + return ValueNull.INSTANCE; + } + t = domain.getDataType(); + break; + } + case "ROUTINE": { + int idx = objectName.lastIndexOf('_'); + if (idx < 0) { + return ValueNull.INSTANCE; + } + FunctionAlias function = schema.findFunction(objectName.substring(0, idx)); + if (function == null) { + return ValueNull.INSTANCE; + } + int ordinal; + try { + ordinal = Integer.parseInt(objectName.substring(idx + 1)); + } catch (NumberFormatException e) { + return ValueNull.INSTANCE; + } + JavaMethod[] methods; + try { + methods = function.getJavaMethods(); + } catch (DbException e) { + return ValueNull.INSTANCE; + } + if (ordinal < 1 || ordinal > methods.length) { + return ValueNull.INSTANCE; + } + FunctionAlias.JavaMethod method = methods[ordinal - 1]; + if (typeIdentifier.equals("RESULT")) { + t = method.getDataType(); + } else { + try { + ordinal = Integer.parseInt(typeIdentifier); + } catch (NumberFormatException e) { + return ValueNull.INSTANCE; + } + if (ordinal < 1) { + return ValueNull.INSTANCE; + } + if (!method.hasConnectionParam()) { + ordinal--; + } + Class[] columnList = method.getColumnClasses(); + if (ordinal >= columnList.length) { + return ValueNull.INSTANCE; + } + t = ValueToObjectConverter2.classToType(columnList[ordinal]); + } + break; + } + case "TABLE": { + Table table = schema.findTableOrView(session, objectName); + if (table == null) { + return ValueNull.INSTANCE; + } + int ordinal; + try { + ordinal = Integer.parseInt(typeIdentifier); + } catch (NumberFormatException e) { + return ValueNull.INSTANCE; + } + Column[] columns = table.getColumns(); + if (ordinal < 1 || ordinal > columns.length) { + return ValueNull.INSTANCE; + } + t = columns[ordinal - 1].getType(); + break; + } + default: + return ValueNull.INSTANCE; + } + return ValueVarchar.get(t.getSQL(DEFAULT_SQL_FLAGS)); + } + + @Override + public Expression optimize(SessionLocal session) { + optimizeArguments(session, false); + type = TypeInfo.TYPE_VARCHAR; + return this; + } + + @Override + public String getName() { + return "DATA_TYPE_SQL"; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + return false; + } + return true; + } + +} diff --git a/h2/src/main/org/h2/expression/function/DateTimeFormatFunction.java b/h2/src/main/org/h2/expression/function/DateTimeFormatFunction.java new file mode 100644 index 0000000000..e426807e91 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/DateTimeFormatFunction.java @@ -0,0 +1,313 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; +import java.time.temporal.TemporalQueries; +import java.util.LinkedHashMap; +import java.util.Locale; +import java.util.Objects; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.util.JSR310Utils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueTime; +import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueVarchar; + +/** + * A date-time format function. + */ +public final class DateTimeFormatFunction extends FunctionN { + + private static final class CacheKey { + + private final String format; + + private final String locale; + + private final String timeZone; + + CacheKey(String format, String locale, String timeZone) { + this.format = format; + this.locale = locale; + this.timeZone = timeZone; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + format.hashCode(); + result = prime * result + ((locale == null) ? 0 : locale.hashCode()); + result = prime * result + ((timeZone == null) ? 0 : timeZone.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (!(obj instanceof CacheKey)) { + return false; + } + CacheKey other = (CacheKey) obj; + return format.equals(other.format) && Objects.equals(locale, other.locale) + && Objects.equals(timeZone, other.timeZone); + } + + } + + private static final class CacheValue { + + final DateTimeFormatter formatter; + + final ZoneId zoneId; + + CacheValue(DateTimeFormatter formatter, ZoneId zoneId) { + this.formatter = formatter; + this.zoneId = zoneId; + } + + } + + /** + * FORMATDATETIME() (non-standard). + */ + public static final int FORMATDATETIME = 0; + + /** + * PARSEDATETIME() (non-standard). + */ + public static final int PARSEDATETIME = FORMATDATETIME + 1; + + private static final String[] NAMES = { // + "FORMATDATETIME", "PARSEDATETIME" // + }; + + private static final LinkedHashMap CACHE = new LinkedHashMap() { + + private static final long serialVersionUID = 1L; + + @Override + protected boolean removeEldestEntry(java.util.Map.Entry eldest) { + return size() > 100; + } + + }; + + private final int function; + + public DateTimeFormatFunction(int function) { + super(new Expression[4]); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + String format = v2.getString(), locale, tz; + if (v3 != null) { + locale = v3.getString(); + tz = args.length > 3 ? args[3].getValue(session).getString() : null; + } else { + tz = locale = null; + } + switch (function) { + case FORMATDATETIME: + v1 = ValueVarchar.get(formatDateTime(session, v1, format, locale, tz)); + break; + case PARSEDATETIME: + v1 = parseDateTime(session, v1.getString(), format, locale, tz); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + /** + * Formats a date using a format string. + * + * @param session + * the session + * @param date + * the date to format + * @param format + * the format string + * @param locale + * the locale + * @param timeZone + * the time zone + * @return the formatted date + */ + public static String formatDateTime(SessionLocal session, Value date, String format, String locale, + String timeZone) { + CacheValue formatAndZone = getDateFormat(format, locale, timeZone); + ZoneId zoneId = formatAndZone.zoneId; + TemporalAccessor value; + if (date instanceof ValueTimestampTimeZone) { + OffsetDateTime dateTime = JSR310Utils.valueToOffsetDateTime(date, session); + ZoneId zoneToSet; + if (zoneId != null) { + zoneToSet = zoneId; + } else { + ZoneOffset offset = dateTime.getOffset(); + zoneToSet = ZoneId.ofOffset(offset.getTotalSeconds() == 0 ? "UTC" : "GMT", offset); + } + value = dateTime.atZoneSameInstant(zoneToSet); + } else { + LocalDateTime dateTime = JSR310Utils.valueToLocalDateTime(date, session); + value = dateTime.atZone(zoneId != null ? zoneId : ZoneId.of(session.currentTimeZone().getId())); + } + return formatAndZone.formatter.format(value); + } + + /** + * Parses a date using a format string. + * + * @param session + * the session + * @param date + * the date to parse + * @param format + * the parsing format + * @param locale + * the locale + * @param timeZone + * the time zone + * @return the parsed date + */ + public static ValueTimestampTimeZone parseDateTime(SessionLocal session, String date, String format, String locale, + String timeZone) { + CacheValue formatAndZone = getDateFormat(format, locale, timeZone); + try { + ValueTimestampTimeZone result; + TemporalAccessor parsed = formatAndZone.formatter.parse(date); + ZoneId parsedZoneId = parsed.query(TemporalQueries.zoneId()); + if (parsed.isSupported(ChronoField.OFFSET_SECONDS)) { + result = JSR310Utils.offsetDateTimeToValue(OffsetDateTime.from(parsed)); + } else { + if (parsed.isSupported(ChronoField.INSTANT_SECONDS)) { + Instant instant = Instant.from(parsed); + if (parsedZoneId == null) { + parsedZoneId = formatAndZone.zoneId; + } + if (parsedZoneId != null) { + result = JSR310Utils.zonedDateTimeToValue(instant.atZone(parsedZoneId)); + } else { + result = JSR310Utils.offsetDateTimeToValue(instant.atOffset(ZoneOffset.ofTotalSeconds( // + session.currentTimeZone().getTimeZoneOffsetUTC(instant.getEpochSecond())))); + } + } else { + LocalDate localDate = parsed.query(TemporalQueries.localDate()); + LocalTime localTime = parsed.query(TemporalQueries.localTime()); + if (parsedZoneId == null) { + parsedZoneId = formatAndZone.zoneId; + } + if (localDate != null) { + LocalDateTime localDateTime = localTime != null ? LocalDateTime.of(localDate, localTime) + : localDate.atStartOfDay(); + result = parsedZoneId != null + ? JSR310Utils.zonedDateTimeToValue(localDateTime.atZone(parsedZoneId)) + : (ValueTimestampTimeZone) JSR310Utils.localDateTimeToValue(localDateTime) + .convertTo(Value.TIMESTAMP_TZ, session); + } else { + result = parsedZoneId != null + ? JSR310Utils.zonedDateTimeToValue( + JSR310Utils.valueToInstant(session.currentTimestamp(), session) + .atZone(parsedZoneId).with(localTime)) + : (ValueTimestampTimeZone) ValueTime.fromNanos(localTime.toNanoOfDay()) + .convertTo(Value.TIMESTAMP_TZ, session); + } + } + } + return result; + } catch (RuntimeException e) { + throw DbException.get(ErrorCode.PARSE_ERROR_1, e, date); + } + } + + private static CacheValue getDateFormat(String format, String locale, String timeZone) { + Exception ex = null; + if (format.length() <= 100) { + try { + CacheValue value; + CacheKey key = new CacheKey(format, locale, timeZone); + synchronized (CACHE) { + value = CACHE.get(key); + if (value == null) { + DateTimeFormatter df; + if (locale == null) { + df = DateTimeFormatter.ofPattern(format); + } else { + df = DateTimeFormatter.ofPattern(format, new Locale(locale)); + } + ZoneId zoneId; + if (timeZone != null) { + zoneId = getZoneId(timeZone); + df.withZone(zoneId); + } else { + zoneId = null; + } + value = new CacheValue(df, zoneId); + CACHE.put(key, value); + } + } + return value; + } catch (Exception e) { + ex = e; + } + } + throw DbException.get(ErrorCode.PARSE_ERROR_1, ex, format + '/' + locale); + } + + private static ZoneId getZoneId(String timeZone) { + try { + return ZoneId.of(timeZone, ZoneId.SHORT_IDS); + } catch (RuntimeException e) { + throw DbException.getInvalidValueException("TIME ZONE", timeZone); + } + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + switch (function) { + case FORMATDATETIME: + type = TypeInfo.TYPE_VARCHAR; + break; + case PARSEDATETIME: + type = TypeInfo.TYPE_TIMESTAMP_TZ; + break; + default: + throw DbException.getInternalError("function=" + function); + } + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/DateTimeFunction.java b/h2/src/main/org/h2/expression/function/DateTimeFunction.java new file mode 100644 index 0000000000..9f9c2add21 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/DateTimeFunction.java @@ -0,0 +1,1037 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import static org.h2.util.DateTimeUtils.MILLIS_PER_DAY; +import static org.h2.util.DateTimeUtils.NANOS_PER_DAY; +import static org.h2.util.DateTimeUtils.NANOS_PER_HOUR; +import static org.h2.util.DateTimeUtils.NANOS_PER_MINUTE; +import static org.h2.util.DateTimeUtils.NANOS_PER_SECOND; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.time.temporal.WeekFields; +import java.util.Locale; + +import org.h2.api.IntervalQualifier; +import org.h2.engine.Mode.ModeEnum; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.util.DateTimeUtils; +import org.h2.util.IntervalUtils; +import org.h2.util.MathUtils; +import org.h2.util.StringUtils; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueDate; +import org.h2.value.ValueInteger; +import org.h2.value.ValueInterval; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueTime; +import org.h2.value.ValueTimeTimeZone; +import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; + +/** + * A date-time function. + */ +public final class DateTimeFunction extends Function1_2 { + + /** + * EXTRACT(). + */ + public static final int EXTRACT = 0; + + /** + * DATE_TRUNC() (non-standard). + */ + public static final int DATE_TRUNC = EXTRACT + 1; + + /** + * DATEADD() (non-standard). + */ + public static final int DATEADD = DATE_TRUNC + 1; + + /** + * DATEDIFF() (non-standard). + */ + public static final int DATEDIFF = DATEADD + 1; + + private static final String[] NAMES = { // + "EXTRACT", "DATE_TRUNC", "DATEADD", "DATEDIFF" // + }; + + // Standard fields + + /** + * Year. + */ + public static final int YEAR = 0; + + /** + * Month. + */ + public static final int MONTH = YEAR + 1; + + /** + * Day of month. + */ + public static final int DAY = MONTH + 1; + + /** + * Hour. + */ + public static final int HOUR = DAY + 1; + + /** + * Minute. + */ + public static final int MINUTE = HOUR + 1; + + /** + * Second. + */ + public static final int SECOND = MINUTE + 1; + + /** + * Time zone hour. + */ + public static final int TIMEZONE_HOUR = SECOND + 1; + + /** + * Time zone minute. + */ + public static final int TIMEZONE_MINUTE = TIMEZONE_HOUR + 1; + + // Additional fields + + /** + * Time zone second. + */ + public static final int TIMEZONE_SECOND = TIMEZONE_MINUTE + 1; + + /** + * Millennium. + */ + public static final int MILLENNIUM = TIMEZONE_SECOND + 1; + + /** + * Century. + */ + public static final int CENTURY = MILLENNIUM + 1; + + /** + * Decade. + */ + public static final int DECADE = CENTURY + 1; + + /** + * Quarter. + */ + public static final int QUARTER = DECADE + 1; + + /** + * Millisecond. + */ + public static final int MILLISECOND = QUARTER + 1; + + /** + * Microsecond. + */ + public static final int MICROSECOND = MILLISECOND + 1; + + /** + * Nanosecond. + */ + public static final int NANOSECOND = MICROSECOND + 1; + + /** + * Day of year. + */ + public static final int DAY_OF_YEAR = NANOSECOND + 1; + + /** + * ISO day of week. + */ + public static final int ISO_DAY_OF_WEEK = DAY_OF_YEAR + 1; + + /** + * ISO week. + */ + public static final int ISO_WEEK = ISO_DAY_OF_WEEK + 1; + + /** + * ISO week-based year. + */ + public static final int ISO_WEEK_YEAR = ISO_WEEK + 1; + + /** + * Day of week (locale-specific). + */ + public static final int DAY_OF_WEEK = ISO_WEEK_YEAR + 1; + + /** + * Week (locale-specific). + */ + public static final int WEEK = DAY_OF_WEEK + 1; + + /** + * Week-based year (locale-specific). + */ + public static final int WEEK_YEAR = WEEK + 1; + + /** + * Epoch. + */ + public static final int EPOCH = WEEK_YEAR + 1; + + /** + * Day of week (locale-specific) for PostgreSQL compatibility. + */ + public static final int DOW = EPOCH + 1; + + private static final int FIELDS_COUNT = DOW + 1; + + private static final String[] FIELD_NAMES = { // + "YEAR", "MONTH", "DAY", // + "HOUR", "MINUTE", "SECOND", // + "TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TIMEZONE_SECOND", // + "MILLENNIUM", "CENTURY", "DECADE", // + "QUARTER", // + "MILLISECOND", "MICROSECOND", "NANOSECOND", // + "DAY_OF_YEAR", // + "ISO_DAY_OF_WEEK", "ISO_WEEK", "ISO_WEEK_YEAR", // + "DAY_OF_WEEK", "WEEK", "WEEK_YEAR", // + "EPOCH", "DOW", // + }; + + private static final BigDecimal BD_SECONDS_PER_DAY = new BigDecimal(DateTimeUtils.SECONDS_PER_DAY); + + private static final BigInteger BI_SECONDS_PER_DAY = BigInteger.valueOf(DateTimeUtils.SECONDS_PER_DAY); + + private static final BigDecimal BD_NANOS_PER_SECOND = new BigDecimal(NANOS_PER_SECOND); + + /** + * Local definitions of day-of-week, week-of-month, and week-of-year. + */ + private static volatile WeekFields WEEK_FIELDS; + + /** + * Get date-time field for the specified name. + * + * @param name + * the name + * @return the date-time field + * @throws DbException + * on unknown field name + */ + public static int getField(String name) { + switch (StringUtils.toUpperEnglish(name)) { + case "YEAR": + case "YY": + case "YYYY": + case "SQL_TSI_YEAR": + return YEAR; + case "MONTH": + case "M": + case "MM": + case "SQL_TSI_MONTH": + return MONTH; + case "DAY": + case "D": + case "DD": + case "SQL_TSI_DAY": + return DAY; + case "HOUR": + case "HH": + case "SQL_TSI_HOUR": + return HOUR; + case "MINUTE": + case "MI": + case "N": + case "SQL_TSI_MINUTE": + return MINUTE; + case "SECOND": + case "S": + case "SS": + case "SQL_TSI_SECOND": + return SECOND; + case "TIMEZONE_HOUR": + return TIMEZONE_HOUR; + case "TIMEZONE_MINUTE": + return TIMEZONE_MINUTE; + case "TIMEZONE_SECOND": + return TIMEZONE_SECOND; + case "MILLENNIUM": + return MILLENNIUM; + case "CENTURY": + return CENTURY; + case "DECADE": + return DECADE; + case "QUARTER": + return QUARTER; + case "MILLISECOND": + case "MILLISECONDS": + case "MS": + return MILLISECOND; + case "MICROSECOND": + case "MICROSECONDS": + case "MCS": + return MICROSECOND; + case "NANOSECOND": + case "NS": + return NANOSECOND; + case "DAY_OF_YEAR": + case "DAYOFYEAR": + case "DY": + case "DOY": + return DAY_OF_YEAR; + case "ISO_DAY_OF_WEEK": + case "ISODOW": + return ISO_DAY_OF_WEEK; + case "ISO_WEEK": + return ISO_WEEK; + case "ISO_WEEK_YEAR": + case "ISO_YEAR": + case "ISOYEAR": + return ISO_WEEK_YEAR; + case "DAY_OF_WEEK": + case "DAYOFWEEK": + return DAY_OF_WEEK; + case "WEEK": + case "WK": + case "WW": + case "SQL_TSI_WEEK": + return WEEK; + case "WEEK_YEAR": + return WEEK_YEAR; + case "EPOCH": + return EPOCH; + case "DOW": + return DOW; + default: + throw DbException.getInvalidValueException("date-time field", name); + } + } + + /** + * Get the name of the specified date-time field. + * + * @param field + * the date-time field + * @return the name of the specified field + */ + public static String getFieldName(int field) { + if (field < 0 || field >= FIELDS_COUNT) { + throw DbException.getUnsupportedException("datetime field " + field); + } + return FIELD_NAMES[field]; + } + + private final int function, field; + + public DateTimeFunction(int function, int field, Expression arg1, Expression arg2) { + super(arg1, arg2); + this.function = function; + this.field = field; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + switch (function) { + case EXTRACT: + v1 = field == EPOCH ? extractEpoch(session, v1) : ValueInteger.get(extractInteger(session, v1, field)); + break; + case DATE_TRUNC: + v1 = truncateDate(session, field, v1); + break; + case DATEADD: + v1 = dateadd(session, field, v1.getLong(), v2); + break; + case DATEDIFF: + v1 = ValueBigint.get(datediff(session, field, v1, v2)); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + /** + * Get the specified field of a date, however with years normalized to + * positive or negative, and month starting with 1. + * + * @param session + * the session + * @param date + * the date value + * @param field + * the field type + * @return the value + */ + private static int extractInteger(SessionLocal session, Value date, int field) { + return date instanceof ValueInterval ? extractInterval(date, field) : extractDateTime(session, date, field); + } + + private static int extractInterval(Value date, int field) { + ValueInterval interval = (ValueInterval) date; + IntervalQualifier qualifier = interval.getQualifier(); + boolean negative = interval.isNegative(); + long leading = interval.getLeading(), remaining = interval.getRemaining(); + long v; + switch (field) { + case YEAR: + v = IntervalUtils.yearsFromInterval(qualifier, negative, leading, remaining); + break; + case MONTH: + v = IntervalUtils.monthsFromInterval(qualifier, negative, leading, remaining); + break; + case DAY: + case DAY_OF_YEAR: + v = IntervalUtils.daysFromInterval(qualifier, negative, leading, remaining); + break; + case HOUR: + v = IntervalUtils.hoursFromInterval(qualifier, negative, leading, remaining); + break; + case MINUTE: + v = IntervalUtils.minutesFromInterval(qualifier, negative, leading, remaining); + break; + case SECOND: + v = IntervalUtils.nanosFromInterval(qualifier, negative, leading, remaining) / NANOS_PER_SECOND; + break; + case MILLISECOND: + v = IntervalUtils.nanosFromInterval(qualifier, negative, leading, remaining) / 1_000_000 % 1_000; + break; + case MICROSECOND: + v = IntervalUtils.nanosFromInterval(qualifier, negative, leading, remaining) / 1_000 % 1_000_000; + break; + case NANOSECOND: + v = IntervalUtils.nanosFromInterval(qualifier, negative, leading, remaining) % NANOS_PER_SECOND; + break; + default: + throw DbException.getUnsupportedException("getDatePart(" + date + ", " + field + ')'); + } + return (int) v; + } + + static int extractDateTime(SessionLocal session, Value date, int field) { + long[] a = DateTimeUtils.dateAndTimeFromValue(date, session); + long dateValue = a[0]; + long timeNanos = a[1]; + switch (field) { + case YEAR: + return DateTimeUtils.yearFromDateValue(dateValue); + case MONTH: + return DateTimeUtils.monthFromDateValue(dateValue); + case DAY: + return DateTimeUtils.dayFromDateValue(dateValue); + case HOUR: + return (int) (timeNanos / NANOS_PER_HOUR % 24); + case MINUTE: + return (int) (timeNanos / NANOS_PER_MINUTE % 60); + case SECOND: + return (int) (timeNanos / NANOS_PER_SECOND % 60); + case MILLISECOND: + return (int) (timeNanos / 1_000_000 % 1_000); + case MICROSECOND: + return (int) (timeNanos / 1_000 % 1_000_000); + case NANOSECOND: + return (int) (timeNanos % NANOS_PER_SECOND); + case MILLENNIUM: + return millennium(DateTimeUtils.yearFromDateValue(dateValue)); + case CENTURY: + return century(DateTimeUtils.yearFromDateValue(dateValue)); + case DECADE: + return decade(DateTimeUtils.yearFromDateValue(dateValue)); + case DAY_OF_YEAR: + return DateTimeUtils.getDayOfYear(dateValue); + case DOW: + if (session.getMode().getEnum() == ModeEnum.PostgreSQL) { + return DateTimeUtils.getSundayDayOfWeek(dateValue) - 1; + } + //$FALL-THROUGH$ + case DAY_OF_WEEK: + return getLocalDayOfWeek(dateValue); + case WEEK: + return getLocalWeekOfYear(dateValue); + case WEEK_YEAR: { + WeekFields wf = getWeekFields(); + return DateTimeUtils.getWeekYear(dateValue, wf.getFirstDayOfWeek().getValue(), + wf.getMinimalDaysInFirstWeek()); + } + case QUARTER: + return (DateTimeUtils.monthFromDateValue(dateValue) - 1) / 3 + 1; + case ISO_WEEK_YEAR: + return DateTimeUtils.getIsoWeekYear(dateValue); + case ISO_WEEK: + return DateTimeUtils.getIsoWeekOfYear(dateValue); + case ISO_DAY_OF_WEEK: + return DateTimeUtils.getIsoDayOfWeek(dateValue); + case TIMEZONE_HOUR: + case TIMEZONE_MINUTE: + case TIMEZONE_SECOND: { + int offsetSeconds; + if (date instanceof ValueTimestampTimeZone) { + offsetSeconds = ((ValueTimestampTimeZone) date).getTimeZoneOffsetSeconds(); + } else if (date instanceof ValueTimeTimeZone) { + offsetSeconds = ((ValueTimeTimeZone) date).getTimeZoneOffsetSeconds(); + } else { + offsetSeconds = session.currentTimeZone().getTimeZoneOffsetLocal(dateValue, timeNanos); + } + if (field == TIMEZONE_HOUR) { + return offsetSeconds / 3_600; + } else if (field == TIMEZONE_MINUTE) { + return offsetSeconds % 3_600 / 60; + } else { + return offsetSeconds % 60; + } + } + default: + throw DbException.getUnsupportedException("EXTRACT(" + getFieldName(field) + " FROM " + date + ')'); + } + } + + /** + * Truncate the given date-time value to the specified field. + * + * @param session + * the session + * @param field + * the date-time field + * @param value + * the date-time value + * @return date the truncated value + */ + private static Value truncateDate(SessionLocal session, int field, Value value) { + long[] fieldDateAndTime = DateTimeUtils.dateAndTimeFromValue(value, session); + long dateValue = fieldDateAndTime[0]; + long timeNanos = fieldDateAndTime[1]; + switch (field) { + case MICROSECOND: + timeNanos = timeNanos / 1_000L * 1_000L; + break; + case MILLISECOND: + timeNanos = timeNanos / 1_000_000L * 1_000_000L; + break; + case SECOND: + timeNanos = timeNanos / NANOS_PER_SECOND * NANOS_PER_SECOND; + break; + case MINUTE: + timeNanos = timeNanos / NANOS_PER_MINUTE * NANOS_PER_MINUTE; + break; + case HOUR: + timeNanos = timeNanos / NANOS_PER_HOUR * NANOS_PER_HOUR; + break; + case DAY: + timeNanos = 0L; + break; + case ISO_WEEK: + dateValue = truncateToWeek(dateValue, 1); + timeNanos = 0L; + break; + case WEEK: + dateValue = truncateToWeek(dateValue, getWeekFields().getFirstDayOfWeek().getValue()); + timeNanos = 0L; + break; + case ISO_WEEK_YEAR: + dateValue = truncateToWeekYear(dateValue, 1, 4); + timeNanos = 0L; + break; + case WEEK_YEAR: { + WeekFields weekFields = getWeekFields(); + dateValue = truncateToWeekYear(dateValue, weekFields.getFirstDayOfWeek().getValue(), + weekFields.getMinimalDaysInFirstWeek()); + break; + } + case MONTH: + dateValue = dateValue & (-1L << DateTimeUtils.SHIFT_MONTH) | 1L; + timeNanos = 0L; + break; + case QUARTER: + dateValue = DateTimeUtils.dateValue(DateTimeUtils.yearFromDateValue(dateValue), + ((DateTimeUtils.monthFromDateValue(dateValue) - 1) / 3) * 3 + 1, 1); + timeNanos = 0L; + break; + case YEAR: + dateValue = dateValue & (-1L << DateTimeUtils.SHIFT_YEAR) | (1L << DateTimeUtils.SHIFT_MONTH | 1L); + timeNanos = 0L; + break; + case DECADE: { + int year = DateTimeUtils.yearFromDateValue(dateValue); + if (year >= 0) { + year = year / 10 * 10; + } else { + year = (year - 9) / 10 * 10; + } + dateValue = DateTimeUtils.dateValue(year, 1, 1); + timeNanos = 0L; + break; + } + case CENTURY: { + int year = DateTimeUtils.yearFromDateValue(dateValue); + if (year > 0) { + year = (year - 1) / 100 * 100 + 1; + } else { + year = year / 100 * 100 - 99; + } + dateValue = DateTimeUtils.dateValue(year, 1, 1); + timeNanos = 0L; + break; + } + case MILLENNIUM: { + int year = DateTimeUtils.yearFromDateValue(dateValue); + if (year > 0) { + year = (year - 1) / 1000 * 1000 + 1; + } else { + year = year / 1000 * 1000 - 999; + } + dateValue = DateTimeUtils.dateValue(year, 1, 1); + timeNanos = 0L; + break; + } + default: + throw DbException.getUnsupportedException("DATE_TRUNC " + getFieldName(field)); + } + Value result = DateTimeUtils.dateTimeToValue(value, dateValue, timeNanos); + if (session.getMode().getEnum() == ModeEnum.PostgreSQL && result.getValueType() == Value.DATE) { + result = result.convertTo(Value.TIMESTAMP_TZ, session); + } + return result; + } + + private static long truncateToWeek(long dateValue, int firstDayOfWeek) { + long absoluteDay = DateTimeUtils.absoluteDayFromDateValue(dateValue); + int dayOfWeek = DateTimeUtils.getDayOfWeekFromAbsolute(absoluteDay, firstDayOfWeek); + if (dayOfWeek != 1) { + dateValue = DateTimeUtils.dateValueFromAbsoluteDay(absoluteDay - dayOfWeek + 1); + } + return dateValue; + } + + private static long truncateToWeekYear(long dateValue, int firstDayOfWeek, int minimalDaysInFirstWeek) { + long abs = DateTimeUtils.absoluteDayFromDateValue(dateValue); + int year = DateTimeUtils.yearFromDateValue(dateValue); + long base = DateTimeUtils.getWeekYearAbsoluteStart(year, firstDayOfWeek, minimalDaysInFirstWeek); + if (abs < base) { + base = DateTimeUtils.getWeekYearAbsoluteStart(year - 1, firstDayOfWeek, minimalDaysInFirstWeek); + } else if (DateTimeUtils.monthFromDateValue(dateValue) == 12 + && 24 + minimalDaysInFirstWeek < DateTimeUtils.dayFromDateValue(dateValue)) { + long next = DateTimeUtils.getWeekYearAbsoluteStart(year + 1, firstDayOfWeek, minimalDaysInFirstWeek); + if (abs >= next) { + base = next; + } + } + return DateTimeUtils.dateValueFromAbsoluteDay(base); + } + + /** + * DATEADD function. + * + * @param session + * the session + * @param field + * the date-time field + * @param count + * count to add + * @param v + * value to add to + * @return result + */ + public static Value dateadd(SessionLocal session, int field, long count, Value v) { + if (field != MILLISECOND && field != MICROSECOND && field != NANOSECOND + && (count > Integer.MAX_VALUE || count < Integer.MIN_VALUE)) { + throw DbException.getInvalidValueException("DATEADD count", count); + } + long[] a = DateTimeUtils.dateAndTimeFromValue(v, session); + long dateValue = a[0]; + long timeNanos = a[1]; + int type = v.getValueType(); + switch (field) { + case MILLENNIUM: + return addYearsMonths(field, true, count * 1_000, v, type, dateValue, timeNanos); + case CENTURY: + return addYearsMonths(field, true, count * 100, v, type, dateValue, timeNanos); + case DECADE: + return addYearsMonths(field, true, count * 10, v, type, dateValue, timeNanos); + case YEAR: + return addYearsMonths(field, true, count, v, type, dateValue, timeNanos); + case QUARTER: + return addYearsMonths(field, false, count *= 3, v, type, dateValue, timeNanos); + case MONTH: + return addYearsMonths(field, false, count, v, type, dateValue, timeNanos); + case WEEK: + case ISO_WEEK: + count *= 7; + //$FALL-THROUGH$ + case DAY_OF_WEEK: + case DOW: + case ISO_DAY_OF_WEEK: + case DAY: + case DAY_OF_YEAR: + if (type == Value.TIME || type == Value.TIME_TZ) { + throw DbException.getInvalidValueException("DATEADD time part", getFieldName(field)); + } + dateValue = DateTimeUtils + .dateValueFromAbsoluteDay(DateTimeUtils.absoluteDayFromDateValue(dateValue) + count); + return DateTimeUtils.dateTimeToValue(v, dateValue, timeNanos); + case HOUR: + count *= NANOS_PER_HOUR; + break; + case MINUTE: + count *= NANOS_PER_MINUTE; + break; + case SECOND: + case EPOCH: + count *= NANOS_PER_SECOND; + break; + case MILLISECOND: + count *= 1_000_000; + break; + case MICROSECOND: + count *= 1_000; + break; + case NANOSECOND: + break; + case TIMEZONE_HOUR: + return addToTimeZone(field, count * 3_600, v, type, dateValue, timeNanos); + case TIMEZONE_MINUTE: + return addToTimeZone(field, count * 60, v, type, dateValue, timeNanos); + case TIMEZONE_SECOND: + return addToTimeZone(field, count, v, type, dateValue, timeNanos); + default: + throw DbException.getUnsupportedException("DATEADD " + getFieldName(field)); + } + timeNanos += count; + if (timeNanos >= NANOS_PER_DAY || timeNanos < 0) { + long d; + if (timeNanos >= NANOS_PER_DAY) { + d = timeNanos / NANOS_PER_DAY; + } else { + d = (timeNanos - NANOS_PER_DAY + 1) / NANOS_PER_DAY; + } + dateValue = DateTimeUtils.dateValueFromAbsoluteDay(DateTimeUtils.absoluteDayFromDateValue(dateValue) + d); + timeNanos -= d * NANOS_PER_DAY; + } + if (type == Value.DATE) { + return ValueTimestamp.fromDateValueAndNanos(dateValue, timeNanos); + } + return DateTimeUtils.dateTimeToValue(v, dateValue, timeNanos); + } + + private static Value addYearsMonths(int field, boolean years, long count, Value v, int type, long dateValue, + long timeNanos) { + if (type == Value.TIME || type == Value.TIME_TZ) { + throw DbException.getInvalidValueException("DATEADD time part", getFieldName(field)); + } + long year = DateTimeUtils.yearFromDateValue(dateValue); + long month = DateTimeUtils.monthFromDateValue(dateValue); + if (years) { + year += count; + } else { + month += count; + } + return DateTimeUtils.dateTimeToValue(v, + DateTimeUtils.dateValueFromDenormalizedDate(year, month, DateTimeUtils.dayFromDateValue(dateValue)), + timeNanos); + } + + private static Value addToTimeZone(int field, long count, Value v, int type, long dateValue, long timeNanos) { + if (type == Value.TIMESTAMP_TZ) { + return ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, timeNanos, + MathUtils.convertLongToInt(count + ((ValueTimestampTimeZone) v).getTimeZoneOffsetSeconds())); + } else if (type == Value.TIME_TZ) { + return ValueTimeTimeZone.fromNanos(timeNanos, + MathUtils.convertLongToInt(count + ((ValueTimeTimeZone) v).getTimeZoneOffsetSeconds())); + } else { + throw DbException.getUnsupportedException("DATEADD " + getFieldName(field)); + } + } + + /** + * Calculate the number of crossed unit boundaries between two timestamps. + * This method is supported for MS SQL Server compatibility. + * + *
          +     * DATEDIFF(YEAR, '2004-12-31', '2005-01-01') = 1
          +     * 
          + * + * @param session + * the session + * @param field + * the date-time field + * @param v1 + * the first date-time value + * @param v2 + * the second date-time value + * @return the number of crossed boundaries + */ + private static long datediff(SessionLocal session, int field, Value v1, Value v2) { + long[] a1 = DateTimeUtils.dateAndTimeFromValue(v1, session); + long dateValue1 = a1[0]; + long absolute1 = DateTimeUtils.absoluteDayFromDateValue(dateValue1); + long[] a2 = DateTimeUtils.dateAndTimeFromValue(v2, session); + long dateValue2 = a2[0]; + long absolute2 = DateTimeUtils.absoluteDayFromDateValue(dateValue2); + switch (field) { + case NANOSECOND: + case MICROSECOND: + case MILLISECOND: + case SECOND: + case EPOCH: + case MINUTE: + case HOUR: + long timeNanos1 = a1[1]; + long timeNanos2 = a2[1]; + switch (field) { + case NANOSECOND: + return (absolute2 - absolute1) * NANOS_PER_DAY + (timeNanos2 - timeNanos1); + case MICROSECOND: + return (absolute2 - absolute1) * (MILLIS_PER_DAY * 1_000) + (timeNanos2 / 1_000 - timeNanos1 / 1_000); + case MILLISECOND: + return (absolute2 - absolute1) * MILLIS_PER_DAY + (timeNanos2 / 1_000_000 - timeNanos1 / 1_000_000); + case SECOND: + case EPOCH: + return (absolute2 - absolute1) * 86_400 + + (timeNanos2 / NANOS_PER_SECOND - timeNanos1 / NANOS_PER_SECOND); + case MINUTE: + return (absolute2 - absolute1) * 1_440 + + (timeNanos2 / NANOS_PER_MINUTE - timeNanos1 / NANOS_PER_MINUTE); + case HOUR: + return (absolute2 - absolute1) * 24 + (timeNanos2 / NANOS_PER_HOUR - timeNanos1 / NANOS_PER_HOUR); + } + // Fake fall-through + // $FALL-THROUGH$ + case DAY: + case DAY_OF_YEAR: + case DAY_OF_WEEK: + case DOW: + case ISO_DAY_OF_WEEK: + return absolute2 - absolute1; + case WEEK: + return weekdiff(absolute1, absolute2, getWeekFields().getFirstDayOfWeek().getValue()); + case ISO_WEEK: + return weekdiff(absolute1, absolute2, 1); + case MONTH: + return (DateTimeUtils.yearFromDateValue(dateValue2) - DateTimeUtils.yearFromDateValue(dateValue1)) * 12 + + DateTimeUtils.monthFromDateValue(dateValue2) - DateTimeUtils.monthFromDateValue(dateValue1); + case QUARTER: + return (DateTimeUtils.yearFromDateValue(dateValue2) - DateTimeUtils.yearFromDateValue(dateValue1)) * 4 + + (DateTimeUtils.monthFromDateValue(dateValue2) - 1) / 3 + - (DateTimeUtils.monthFromDateValue(dateValue1) - 1) / 3; + case MILLENNIUM: + return millennium(DateTimeUtils.yearFromDateValue(dateValue2)) + - millennium(DateTimeUtils.yearFromDateValue(dateValue1)); + case CENTURY: + return century(DateTimeUtils.yearFromDateValue(dateValue2)) + - century(DateTimeUtils.yearFromDateValue(dateValue1)); + case DECADE: + return decade(DateTimeUtils.yearFromDateValue(dateValue2)) + - decade(DateTimeUtils.yearFromDateValue(dateValue1)); + case YEAR: + return DateTimeUtils.yearFromDateValue(dateValue2) - DateTimeUtils.yearFromDateValue(dateValue1); + case TIMEZONE_HOUR: + case TIMEZONE_MINUTE: + case TIMEZONE_SECOND: { + int offsetSeconds1; + if (v1 instanceof ValueTimestampTimeZone) { + offsetSeconds1 = ((ValueTimestampTimeZone) v1).getTimeZoneOffsetSeconds(); + } else if (v1 instanceof ValueTimeTimeZone) { + offsetSeconds1 = ((ValueTimeTimeZone) v1).getTimeZoneOffsetSeconds(); + } else { + offsetSeconds1 = session.currentTimeZone().getTimeZoneOffsetLocal(dateValue1, a1[1]); + } + int offsetSeconds2; + if (v2 instanceof ValueTimestampTimeZone) { + offsetSeconds2 = ((ValueTimestampTimeZone) v2).getTimeZoneOffsetSeconds(); + } else if (v2 instanceof ValueTimeTimeZone) { + offsetSeconds2 = ((ValueTimeTimeZone) v2).getTimeZoneOffsetSeconds(); + } else { + offsetSeconds2 = session.currentTimeZone().getTimeZoneOffsetLocal(dateValue2, a2[1]); + } + if (field == TIMEZONE_HOUR) { + return (offsetSeconds2 / 3_600) - (offsetSeconds1 / 3_600); + } else if (field == TIMEZONE_MINUTE) { + return (offsetSeconds2 / 60) - (offsetSeconds1 / 60); + } else { + return offsetSeconds2 - offsetSeconds1; + } + } + default: + throw DbException.getUnsupportedException("DATEDIFF " + getFieldName(field)); + } + } + + private static long weekdiff(long absolute1, long absolute2, int firstDayOfWeek) { + absolute1 += 4 - firstDayOfWeek; + long r1 = absolute1 / 7; + if (absolute1 < 0 && (r1 * 7 != absolute1)) { + r1--; + } + absolute2 += 4 - firstDayOfWeek; + long r2 = absolute2 / 7; + if (absolute2 < 0 && (r2 * 7 != absolute2)) { + r2--; + } + return r2 - r1; + } + + private static int millennium(int year) { + return year > 0 ? (year + 999) / 1_000 : year / 1_000; + } + + private static int century(int year) { + return year > 0 ? (year + 99) / 100 : year / 100; + } + + private static int decade(int year) { + return year >= 0 ? year / 10 : (year - 9) / 10; + } + + private static int getLocalDayOfWeek(long dateValue) { + return DateTimeUtils.getDayOfWeek(dateValue, getWeekFields().getFirstDayOfWeek().getValue()); + } + + private static int getLocalWeekOfYear(long dateValue) { + WeekFields weekFields = getWeekFields(); + return DateTimeUtils.getWeekOfYear(dateValue, weekFields.getFirstDayOfWeek().getValue(), + weekFields.getMinimalDaysInFirstWeek()); + } + + private static WeekFields getWeekFields() { + WeekFields weekFields = WEEK_FIELDS; + if (weekFields == null) { + WEEK_FIELDS = weekFields = WeekFields.of(Locale.getDefault()); + } + return weekFields; + } + + private static ValueNumeric extractEpoch(SessionLocal session, Value value) { + ValueNumeric result; + if (value instanceof ValueInterval) { + ValueInterval interval = (ValueInterval) value; + if (interval.getQualifier().isYearMonth()) { + interval = (ValueInterval) interval.convertTo(TypeInfo.TYPE_INTERVAL_YEAR_TO_MONTH); + long leading = interval.getLeading(); + long remaining = interval.getRemaining(); + BigInteger bi = BigInteger.valueOf(leading).multiply(BigInteger.valueOf(31557600)) + .add(BigInteger.valueOf(remaining * 2592000)); + if (interval.isNegative()) { + bi = bi.negate(); + } + return ValueNumeric.get(bi); + } else { + return ValueNumeric + .get(new BigDecimal(IntervalUtils.intervalToAbsolute(interval)).divide(BD_NANOS_PER_SECOND)); + } + } + long[] a = DateTimeUtils.dateAndTimeFromValue(value, session); + long dateValue = a[0]; + long timeNanos = a[1]; + if (value instanceof ValueTime) { + result = ValueNumeric.get(BigDecimal.valueOf(timeNanos).divide(BD_NANOS_PER_SECOND)); + } else if (value instanceof ValueDate) { + result = ValueNumeric.get(BigInteger.valueOf(DateTimeUtils.absoluteDayFromDateValue(dateValue)) // + .multiply(BI_SECONDS_PER_DAY)); + } else { + BigDecimal bd = BigDecimal.valueOf(timeNanos).divide(BD_NANOS_PER_SECOND) + .add(BigDecimal.valueOf(DateTimeUtils.absoluteDayFromDateValue(dateValue)) // + .multiply(BD_SECONDS_PER_DAY)); + if (value instanceof ValueTimestampTimeZone) { + result = ValueNumeric.get( + bd.subtract(BigDecimal.valueOf(((ValueTimestampTimeZone) value).getTimeZoneOffsetSeconds()))); + } else if (value instanceof ValueTimeTimeZone) { + result = ValueNumeric + .get(bd.subtract(BigDecimal.valueOf(((ValueTimeTimeZone) value).getTimeZoneOffsetSeconds()))); + } else { + result = ValueNumeric.get(bd); + } + } + return result; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + switch (function) { + case EXTRACT: + type = field == EPOCH ? TypeInfo.getTypeInfo(Value.NUMERIC, + ValueBigint.DECIMAL_PRECISION + ValueTimestamp.MAXIMUM_SCALE, ValueTimestamp.MAXIMUM_SCALE, null) + : TypeInfo.TYPE_INTEGER; + break; + case DATE_TRUNC: { + type = left.getType(); + int valueType = type.getValueType(); + // TODO set scale when possible + if (!DataType.isDateTimeType(valueType)) { + throw DbException.getInvalidExpressionTypeException("DATE_TRUNC datetime argument", left); + } else if (session.getMode().getEnum() == ModeEnum.PostgreSQL && valueType == Value.DATE) { + type = TypeInfo.TYPE_TIMESTAMP_TZ; + } + break; + } + case DATEADD: { + int valueType = right.getType().getValueType(); + if (valueType == Value.DATE) { + switch (field) { + case HOUR: + case MINUTE: + case SECOND: + case MILLISECOND: + case MICROSECOND: + case NANOSECOND: + case EPOCH: + valueType = Value.TIMESTAMP; + } + } + type = TypeInfo.getTypeInfo(valueType); + break; + } + case DATEDIFF: + type = TypeInfo.TYPE_BIGINT; + break; + default: + throw DbException.getInternalError("function=" + function); + } + if (left.isConstant() && (right == null || right.isConstant())) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + builder.append(getName()).append('(').append(getFieldName(field)); + switch (function) { + case EXTRACT: + left.getUnenclosedSQL(builder.append(" FROM "), sqlFlags); + break; + case DATE_TRUNC: + left.getUnenclosedSQL(builder.append(", "), sqlFlags); + break; + case DATEADD: + case DATEDIFF: + left.getUnenclosedSQL(builder.append(", "), sqlFlags).append(", "); + right.getUnenclosedSQL(builder, sqlFlags); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return builder.append(')'); + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/DateTimeFunctions.java b/h2/src/main/org/h2/expression/function/DateTimeFunctions.java deleted file mode 100644 index 26920e6ba7..0000000000 --- a/h2/src/main/org/h2/expression/function/DateTimeFunctions.java +++ /dev/null @@ -1,817 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression.function; - -import static org.h2.expression.function.Function.CENTURY; -import static org.h2.expression.function.Function.DAY_OF_MONTH; -import static org.h2.expression.function.Function.DAY_OF_WEEK; -import static org.h2.expression.function.Function.DAY_OF_YEAR; -import static org.h2.expression.function.Function.DECADE; -import static org.h2.expression.function.Function.DOW; -import static org.h2.expression.function.Function.EPOCH; -import static org.h2.expression.function.Function.HOUR; -import static org.h2.expression.function.Function.ISO_DAY_OF_WEEK; -import static org.h2.expression.function.Function.ISO_WEEK; -import static org.h2.expression.function.Function.ISO_YEAR; -import static org.h2.expression.function.Function.MICROSECOND; -import static org.h2.expression.function.Function.MILLENNIUM; -import static org.h2.expression.function.Function.MILLISECOND; -import static org.h2.expression.function.Function.MINUTE; -import static org.h2.expression.function.Function.MONTH; -import static org.h2.expression.function.Function.NANOSECOND; -import static org.h2.expression.function.Function.QUARTER; -import static org.h2.expression.function.Function.SECOND; -import static org.h2.expression.function.Function.TIMEZONE_HOUR; -import static org.h2.expression.function.Function.TIMEZONE_MINUTE; -import static org.h2.expression.function.Function.WEEK; -import static org.h2.expression.function.Function.YEAR; -import static org.h2.util.DateTimeUtils.MILLIS_PER_DAY; -import static org.h2.util.DateTimeUtils.NANOS_PER_DAY; -import static org.h2.util.DateTimeUtils.NANOS_PER_HOUR; -import static org.h2.util.DateTimeUtils.NANOS_PER_MINUTE; -import static org.h2.util.DateTimeUtils.NANOS_PER_SECOND; - -import java.math.BigDecimal; -import java.text.DateFormatSymbols; -import java.text.SimpleDateFormat; -import java.util.GregorianCalendar; -import java.util.HashMap; -import java.util.Locale; -import java.util.TimeZone; - -import org.h2.api.ErrorCode; -import org.h2.api.IntervalQualifier; -import org.h2.engine.Mode; -import org.h2.engine.Mode.ModeEnum; -import org.h2.message.DbException; -import org.h2.util.DateTimeUtils; -import org.h2.util.IntervalUtils; -import org.h2.util.StringUtils; -import org.h2.value.Value; -import org.h2.value.ValueDate; -import org.h2.value.ValueDecimal; -import org.h2.value.ValueInt; -import org.h2.value.ValueInterval; -import org.h2.value.ValueTime; -import org.h2.value.ValueTimestamp; -import org.h2.value.ValueTimestampTimeZone; - -/** - * Date and time functions. - */ -public final class DateTimeFunctions { - private static final HashMap DATE_PART = new HashMap<>(128); - - /** - * English names of months and week days. - */ - private static volatile String[][] MONTHS_AND_WEEKS; - - static { - // DATE_PART - DATE_PART.put("SQL_TSI_YEAR", YEAR); - DATE_PART.put("YEAR", YEAR); - DATE_PART.put("YYYY", YEAR); - DATE_PART.put("YY", YEAR); - DATE_PART.put("ISO_YEAR", ISO_YEAR); - DATE_PART.put("ISOYEAR", ISO_YEAR); - DATE_PART.put("SQL_TSI_MONTH", MONTH); - DATE_PART.put("MONTH", MONTH); - DATE_PART.put("MM", MONTH); - DATE_PART.put("M", MONTH); - DATE_PART.put("QUARTER", QUARTER); - DATE_PART.put("SQL_TSI_WEEK", WEEK); - DATE_PART.put("WW", WEEK); - DATE_PART.put("WK", WEEK); - DATE_PART.put("WEEK", WEEK); - DATE_PART.put("ISO_WEEK", ISO_WEEK); - DATE_PART.put("DAY", DAY_OF_MONTH); - DATE_PART.put("DD", DAY_OF_MONTH); - DATE_PART.put("D", DAY_OF_MONTH); - DATE_PART.put("SQL_TSI_DAY", DAY_OF_MONTH); - DATE_PART.put("DAY_OF_WEEK", DAY_OF_WEEK); - DATE_PART.put("DAYOFWEEK", DAY_OF_WEEK); - DATE_PART.put("DOW", DOW); - DATE_PART.put("ISO_DAY_OF_WEEK", ISO_DAY_OF_WEEK); - DATE_PART.put("ISODOW", ISO_DAY_OF_WEEK); - DATE_PART.put("DAYOFYEAR", DAY_OF_YEAR); - DATE_PART.put("DAY_OF_YEAR", DAY_OF_YEAR); - DATE_PART.put("DY", DAY_OF_YEAR); - DATE_PART.put("DOY", DAY_OF_YEAR); - DATE_PART.put("SQL_TSI_HOUR", HOUR); - DATE_PART.put("HOUR", HOUR); - DATE_PART.put("HH", HOUR); - DATE_PART.put("SQL_TSI_MINUTE", MINUTE); - DATE_PART.put("MINUTE", MINUTE); - DATE_PART.put("MI", MINUTE); - DATE_PART.put("N", MINUTE); - DATE_PART.put("SQL_TSI_SECOND", SECOND); - DATE_PART.put("SECOND", SECOND); - DATE_PART.put("SS", SECOND); - DATE_PART.put("S", SECOND); - DATE_PART.put("MILLISECOND", MILLISECOND); - DATE_PART.put("MILLISECONDS", MILLISECOND); - DATE_PART.put("MS", MILLISECOND); - DATE_PART.put("EPOCH", EPOCH); - DATE_PART.put("MICROSECOND", MICROSECOND); - DATE_PART.put("MICROSECONDS", MICROSECOND); - DATE_PART.put("MCS", MICROSECOND); - DATE_PART.put("NANOSECOND", NANOSECOND); - DATE_PART.put("NS", NANOSECOND); - DATE_PART.put("TIMEZONE_HOUR", TIMEZONE_HOUR); - DATE_PART.put("TIMEZONE_MINUTE", TIMEZONE_MINUTE); - DATE_PART.put("DECADE", DECADE); - DATE_PART.put("CENTURY", CENTURY); - DATE_PART.put("MILLENNIUM", MILLENNIUM); - } - - /** - * DATEADD function. - * - * @param part - * name of date-time part - * @param count - * count to add - * @param v - * value to add to - * @return result - */ - public static Value dateadd(String part, long count, Value v) { - int field = getDatePart(part); - if (field != MILLISECOND && field != MICROSECOND && field != NANOSECOND - && (count > Integer.MAX_VALUE || count < Integer.MIN_VALUE)) { - throw DbException.getInvalidValueException("DATEADD count", count); - } - boolean withDate = !(v instanceof ValueTime); - boolean withTime = !(v instanceof ValueDate); - boolean forceTimestamp = false; - long[] a = DateTimeUtils.dateAndTimeFromValue(v); - long dateValue = a[0]; - long timeNanos = a[1]; - switch (field) { - case QUARTER: - count *= 3; - //$FALL-THROUGH$ - case YEAR: - case MONTH: { - if (!withDate) { - throw DbException.getInvalidValueException("DATEADD time part", part); - } - long year = DateTimeUtils.yearFromDateValue(dateValue); - long month = DateTimeUtils.monthFromDateValue(dateValue); - int day = DateTimeUtils.dayFromDateValue(dateValue); - if (field == YEAR) { - year += count; - } else { - month += count; - } - dateValue = DateTimeUtils.dateValueFromDenormalizedDate(year, month, day); - return DateTimeUtils.dateTimeToValue(v, dateValue, timeNanos, forceTimestamp); - } - case WEEK: - case ISO_WEEK: - count *= 7; - //$FALL-THROUGH$ - case DAY_OF_WEEK: - case DOW: - case ISO_DAY_OF_WEEK: - case DAY_OF_MONTH: - case DAY_OF_YEAR: - if (!withDate) { - throw DbException.getInvalidValueException("DATEADD time part", part); - } - dateValue = DateTimeUtils - .dateValueFromAbsoluteDay(DateTimeUtils.absoluteDayFromDateValue(dateValue) + count); - return DateTimeUtils.dateTimeToValue(v, dateValue, timeNanos, forceTimestamp); - case HOUR: - count *= NANOS_PER_HOUR; - break; - case MINUTE: - count *= NANOS_PER_MINUTE; - break; - case SECOND: - case EPOCH: - count *= NANOS_PER_SECOND; - break; - case MILLISECOND: - count *= 1_000_000; - break; - case MICROSECOND: - count *= 1_000; - break; - case NANOSECOND: - break; - case TIMEZONE_HOUR: - count *= 60; - //$FALL-THROUGH$ - case TIMEZONE_MINUTE: { - if (!(v instanceof ValueTimestampTimeZone)) { - throw DbException.getUnsupportedException("DATEADD " + part); - } - count += ((ValueTimestampTimeZone) v).getTimeZoneOffsetMins(); - return ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, timeNanos, (short) count); - } - default: - throw DbException.getUnsupportedException("DATEADD " + part); - } - if (!withTime) { - // Treat date as timestamp at the start of this date - forceTimestamp = true; - } - timeNanos += count; - if (timeNanos >= NANOS_PER_DAY || timeNanos < 0) { - long d; - if (timeNanos >= NANOS_PER_DAY) { - d = timeNanos / NANOS_PER_DAY; - } else { - d = (timeNanos - NANOS_PER_DAY + 1) / NANOS_PER_DAY; - } - timeNanos -= d * NANOS_PER_DAY; - return DateTimeUtils.dateTimeToValue(v, - DateTimeUtils.dateValueFromAbsoluteDay(DateTimeUtils.absoluteDayFromDateValue(dateValue) + d), - timeNanos, forceTimestamp); - } - return DateTimeUtils.dateTimeToValue(v, dateValue, timeNanos, forceTimestamp); - } - - /** - * Calculate the number of crossed unit boundaries between two timestamps. This - * method is supported for MS SQL Server compatibility. - * - *
          -     * DATEDIFF(YEAR, '2004-12-31', '2005-01-01') = 1
          -     * 
          - * - * @param part - * the part - * @param v1 - * the first date-time value - * @param v2 - * the second date-time value - * @return the number of crossed boundaries - */ - public static long datediff(String part, Value v1, Value v2) { - int field = getDatePart(part); - long[] a1 = DateTimeUtils.dateAndTimeFromValue(v1); - long dateValue1 = a1[0]; - long absolute1 = DateTimeUtils.absoluteDayFromDateValue(dateValue1); - long[] a2 = DateTimeUtils.dateAndTimeFromValue(v2); - long dateValue2 = a2[0]; - long absolute2 = DateTimeUtils.absoluteDayFromDateValue(dateValue2); - switch (field) { - case NANOSECOND: - case MICROSECOND: - case MILLISECOND: - case SECOND: - case EPOCH: - case MINUTE: - case HOUR: - long timeNanos1 = a1[1]; - long timeNanos2 = a2[1]; - switch (field) { - case NANOSECOND: - return (absolute2 - absolute1) * NANOS_PER_DAY + (timeNanos2 - timeNanos1); - case MICROSECOND: - return (absolute2 - absolute1) * (MILLIS_PER_DAY * 1_000) - + (timeNanos2 / 1_000 - timeNanos1 / 1_000); - case MILLISECOND: - return (absolute2 - absolute1) * MILLIS_PER_DAY - + (timeNanos2 / 1_000_000 - timeNanos1 / 1_000_000); - case SECOND: - case EPOCH: - return (absolute2 - absolute1) * 86_400 - + (timeNanos2 / NANOS_PER_SECOND - timeNanos1 / NANOS_PER_SECOND); - case MINUTE: - return (absolute2 - absolute1) * 1_440 - + (timeNanos2 / NANOS_PER_MINUTE - timeNanos1 / NANOS_PER_MINUTE); - case HOUR: - return (absolute2 - absolute1) * 24 + (timeNanos2 / NANOS_PER_HOUR - timeNanos1 / NANOS_PER_HOUR); - } - // Fake fall-through - // $FALL-THROUGH$ - case DAY_OF_MONTH: - case DAY_OF_YEAR: - case DAY_OF_WEEK: - case DOW: - case ISO_DAY_OF_WEEK: - return absolute2 - absolute1; - case WEEK: - return weekdiff(absolute1, absolute2, 0); - case ISO_WEEK: - return weekdiff(absolute1, absolute2, 1); - case MONTH: - return (DateTimeUtils.yearFromDateValue(dateValue2) - DateTimeUtils.yearFromDateValue(dateValue1)) * 12 - + DateTimeUtils.monthFromDateValue(dateValue2) - DateTimeUtils.monthFromDateValue(dateValue1); - case QUARTER: - return (DateTimeUtils.yearFromDateValue(dateValue2) - DateTimeUtils.yearFromDateValue(dateValue1)) * 4 - + (DateTimeUtils.monthFromDateValue(dateValue2) - 1) / 3 - - (DateTimeUtils.monthFromDateValue(dateValue1) - 1) / 3; - case YEAR: - return DateTimeUtils.yearFromDateValue(dateValue2) - DateTimeUtils.yearFromDateValue(dateValue1); - case TIMEZONE_HOUR: - case TIMEZONE_MINUTE: { - int offsetMinutes1; - if (v1 instanceof ValueTimestampTimeZone) { - offsetMinutes1 = ((ValueTimestampTimeZone) v1).getTimeZoneOffsetMins(); - } else { - offsetMinutes1 = DateTimeUtils.getTimeZoneOffsetMillis(null, dateValue1, a1[1]); - } - int offsetMinutes2; - if (v2 instanceof ValueTimestampTimeZone) { - offsetMinutes2 = ((ValueTimestampTimeZone) v2).getTimeZoneOffsetMins(); - } else { - offsetMinutes2 = DateTimeUtils.getTimeZoneOffsetMillis(null, dateValue2, a2[1]); - } - if (field == TIMEZONE_HOUR) { - return (offsetMinutes2 / 60) - (offsetMinutes1 / 60); - } else { - return offsetMinutes2 - offsetMinutes1; - } - } - default: - throw DbException.getUnsupportedException("DATEDIFF " + part); - } - } - - /** - * Extracts specified field from the specified date-time value. - * - * @param part - * the date part - * @param value - * the date-time value - * @param mode - * the database mode - * @return extracted field - */ - public static Value extract(String part, Value value, Mode mode) { - Value result; - int field = getDatePart(part); - if (field != EPOCH) { - result = ValueInt.get(getIntDatePart(value, field, mode)); - } else { - // Case where we retrieve the EPOCH time. - if (value instanceof ValueInterval) { - ValueInterval interval = (ValueInterval) value; - BigDecimal bd; - if (interval.getQualifier().isYearMonth()) { - interval = (ValueInterval) interval.convertTo(Value.INTERVAL_YEAR_TO_MONTH); - long leading = interval.getLeading(); - long remaining = interval.getRemaining(); - bd = BigDecimal.valueOf(leading).multiply(BigDecimal.valueOf(31557600)) - .add(BigDecimal.valueOf(remaining * 2592000)); - if (interval.isNegative()) { - bd = bd.negate(); - } - } else { - bd = new BigDecimal(IntervalUtils.intervalToAbsolute(interval)) - .divide(BigDecimal.valueOf(NANOS_PER_SECOND)); - } - return ValueDecimal.get(bd); - } - // First we retrieve the dateValue and his time in nanoseconds. - long[] a = DateTimeUtils.dateAndTimeFromValue(value); - long dateValue = a[0]; - long timeNanos = a[1]; - // We compute the time in nanoseconds and the total number of days. - BigDecimal timeNanosBigDecimal = new BigDecimal(timeNanos); - BigDecimal numberOfDays = new BigDecimal(DateTimeUtils.absoluteDayFromDateValue(dateValue)); - BigDecimal nanosSeconds = new BigDecimal(NANOS_PER_SECOND); - BigDecimal secondsPerDay = new BigDecimal(DateTimeUtils.SECONDS_PER_DAY); - - // Case where the value is of type time e.g. '10:00:00' - if (value instanceof ValueTime) { - - // In order to retrieve the EPOCH time we only have to convert the time - // in nanoseconds (previously retrieved) in seconds. - result = ValueDecimal.get(timeNanosBigDecimal.divide(nanosSeconds)); - - } else if (value instanceof ValueDate) { - - // Case where the value is of type date '2000:01:01', we have to retrieve the - // total number of days and multiply it by the number of seconds in a day. - result = ValueDecimal.get(numberOfDays.multiply(secondsPerDay)); - - } else if (value instanceof ValueTimestampTimeZone) { - - // Case where the value is a of type ValueTimestampTimeZone - // ('2000:01:01 10:00:00+05'). - // We retrieve the time zone offset in minutes - ValueTimestampTimeZone v = (ValueTimestampTimeZone) value; - BigDecimal timeZoneOffsetSeconds = new BigDecimal(v.getTimeZoneOffsetMins() * 60); - // Sum the time in nanoseconds and the total number of days in seconds - // and adding the timeZone offset in seconds. - result = ValueDecimal.get(timeNanosBigDecimal.divide(nanosSeconds) - .add(numberOfDays.multiply(secondsPerDay)).subtract(timeZoneOffsetSeconds)); - - } else { - - // By default, we have the date and the time ('2000:01:01 10:00:00') if no type - // is given. - // We just have to sum the time in nanoseconds and the total number of days in - // seconds. - result = ValueDecimal - .get(timeNanosBigDecimal.divide(nanosSeconds).add(numberOfDays.multiply(secondsPerDay))); - } - } - return result; - } - - /** - * Truncate the given date to the unit specified - * - * @param datePartStr the time unit (e.g. 'DAY', 'HOUR', etc.) - * @param valueDate the date - * @return date truncated to 'day' - */ - public static Value truncateDate(String datePartStr, Value valueDate) { - - int timeUnit = getDatePart(datePartStr); - - // Retrieve the dateValue and the time in nanoseconds of the date. - long[] fieldDateAndTime = DateTimeUtils.dateAndTimeFromValue(valueDate); - long dateValue = fieldDateAndTime[0]; - long timeNanosRetrieved = fieldDateAndTime[1]; - - // Variable used to the time in nanoseconds of the date truncated. - long timeNanos; - - // Compute the number of time unit in the date, for example, the - // number of time unit 'HOUR' in '15:14:13' is '15'. Then convert the - // result to nanoseconds. - switch (timeUnit) { - - case MICROSECOND: - - long nanoInMicroSecond = 1_000L; - long microseconds = timeNanosRetrieved / nanoInMicroSecond; - timeNanos = microseconds * nanoInMicroSecond; - break; - - case MILLISECOND: - - long nanoInMilliSecond = 1_000_000L; - long milliseconds = timeNanosRetrieved / nanoInMilliSecond; - timeNanos = milliseconds * nanoInMilliSecond; - break; - - case SECOND: - - long seconds = timeNanosRetrieved / NANOS_PER_SECOND; - timeNanos = seconds * NANOS_PER_SECOND; - break; - - case MINUTE: - - long minutes = timeNanosRetrieved / NANOS_PER_MINUTE; - timeNanos = minutes * NANOS_PER_MINUTE; - break; - - case HOUR: - - long hours = timeNanosRetrieved / NANOS_PER_HOUR; - timeNanos = hours * NANOS_PER_HOUR; - break; - - case DAY_OF_MONTH: - - timeNanos = 0L; - break; - - case WEEK: - - long absoluteDay = DateTimeUtils.absoluteDayFromDateValue(dateValue); - int dayOfWeek = DateTimeUtils.getDayOfWeekFromAbsolute(absoluteDay, 1); - if (dayOfWeek != 1) { - dateValue = DateTimeUtils.dateValueFromAbsoluteDay(absoluteDay - dayOfWeek + 1); - } - timeNanos = 0L; - break; - - case MONTH: { - - long year = DateTimeUtils.yearFromDateValue(dateValue); - int month = DateTimeUtils.monthFromDateValue(dateValue); - dateValue = DateTimeUtils.dateValue(year, month, 1); - timeNanos = 0L; - break; - - } - case QUARTER: { - - long year = DateTimeUtils.yearFromDateValue(dateValue); - int month = DateTimeUtils.monthFromDateValue(dateValue); - month = ((month - 1) / 3) * 3 + 1; - dateValue = DateTimeUtils.dateValue(year, month, 1); - timeNanos = 0L; - break; - - } - case YEAR: { - - long year = DateTimeUtils.yearFromDateValue(dateValue); - dateValue = DateTimeUtils.dateValue(year, 1, 1); - timeNanos = 0L; - break; - - } - case DECADE: { - - long year = DateTimeUtils.yearFromDateValue(dateValue); - year = (year / 10) * 10; - dateValue = DateTimeUtils.dateValue(year, 1, 1); - timeNanos = 0L; - break; - - } - case CENTURY: { - - long year = DateTimeUtils.yearFromDateValue(dateValue); - year = ((year - 1) / 100) * 100 + 1; - dateValue = DateTimeUtils.dateValue(year, 1, 1); - timeNanos = 0L; - break; - - } - case MILLENNIUM: { - - long year = DateTimeUtils.yearFromDateValue(dateValue); - year = ((year - 1) / 1000) * 1000 + 1; - dateValue = DateTimeUtils.dateValue(year, 1, 1); - timeNanos = 0L; - break; - - } - default: - - // Return an exception in the timeUnit is not recognized - throw DbException.getUnsupportedException(datePartStr); - - } - - Value result; - - if (valueDate instanceof ValueTimestampTimeZone) { - - // Case we create a timestamp with timezone with the dateValue and - // timeNanos computed. - ValueTimestampTimeZone vTmp = (ValueTimestampTimeZone) valueDate; - result = ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, timeNanos, vTmp.getTimeZoneOffsetMins()); - - } else { - - // By default, we create a timestamp with the dateValue and - // timeNanos computed. - result = ValueTimestamp.fromDateValueAndNanos(dateValue, timeNanos); - - } - - return result; - } - - /** - * Formats a date using a format string. - * - * @param date - * the date to format - * @param format - * the format string - * @param locale - * the locale - * @param timeZone - * the timezone - * @return the formatted date - */ - public static String formatDateTime(java.util.Date date, String format, String locale, String timeZone) { - SimpleDateFormat dateFormat = getDateFormat(format, locale, timeZone); - synchronized (dateFormat) { - return dateFormat.format(date); - } - } - - private static SimpleDateFormat getDateFormat(String format, String locale, String timeZone) { - try { - // currently, a new instance is create for each call - // however, could cache the last few instances - SimpleDateFormat df; - if (locale == null) { - df = new SimpleDateFormat(format); - } else { - Locale l = new Locale(locale); - df = new SimpleDateFormat(format, l); - } - if (timeZone != null) { - df.setTimeZone(TimeZone.getTimeZone(timeZone)); - } - return df; - } catch (Exception e) { - throw DbException.get(ErrorCode.PARSE_ERROR_1, e, format + "/" + locale + "/" + timeZone); - } - } - - /** - * Get date part function number from part name. - * - * @param part - * name of the part - * @return function number - */ - public static int getDatePart(String part) { - Integer p = DATE_PART.get(StringUtils.toUpperEnglish(part)); - if (p == null) { - throw DbException.getInvalidValueException("date part", part); - } - return p; - } - - /** - * Get the specified field of a date, however with years normalized to positive - * or negative, and month starting with 1. - * - * @param date - * the date value - * @param field - * the field type, see {@link Function} for constants - * @param mode - * the database mode - * @return the value - */ - public static int getIntDatePart(Value date, int field, Mode mode) { - if (date instanceof ValueInterval) { - ValueInterval interval = (ValueInterval) date; - IntervalQualifier qualifier = interval.getQualifier(); - boolean negative = interval.isNegative(); - long leading = interval.getLeading(), remaining = interval.getRemaining(); - long v; - switch (field) { - case YEAR: - v = IntervalUtils.yearsFromInterval(qualifier, negative, leading, remaining); - break; - case MONTH: - v = IntervalUtils.monthsFromInterval(qualifier, negative, leading, remaining); - break; - case DAY_OF_MONTH: - case DAY_OF_YEAR: - v = IntervalUtils.daysFromInterval(qualifier, negative, leading, remaining); - break; - case HOUR: - v = IntervalUtils.hoursFromInterval(qualifier, negative, leading, remaining); - break; - case MINUTE: - v = IntervalUtils.minutesFromInterval(qualifier, negative, leading, remaining); - break; - case SECOND: - v = IntervalUtils.nanosFromInterval(qualifier, negative, leading, remaining) / NANOS_PER_SECOND; - break; - case MILLISECOND: - v = IntervalUtils.nanosFromInterval(qualifier, negative, leading, remaining) / 1_000_000 % 1_000; - break; - case MICROSECOND: - v = IntervalUtils.nanosFromInterval(qualifier, negative, leading, remaining) / 1_000 % 1_000_000; - break; - case NANOSECOND: - v = IntervalUtils.nanosFromInterval(qualifier, negative, leading, remaining) % NANOS_PER_SECOND; - break; - default: - throw DbException.getUnsupportedException("getDatePart(" + date + ", " + field + ')'); - } - return (int) v; - } else { - long[] a = DateTimeUtils.dateAndTimeFromValue(date); - long dateValue = a[0]; - long timeNanos = a[1]; - switch (field) { - case YEAR: - return DateTimeUtils.yearFromDateValue(dateValue); - case MONTH: - return DateTimeUtils.monthFromDateValue(dateValue); - case DAY_OF_MONTH: - return DateTimeUtils.dayFromDateValue(dateValue); - case HOUR: - return (int) (timeNanos / NANOS_PER_HOUR % 24); - case MINUTE: - return (int) (timeNanos / NANOS_PER_MINUTE % 60); - case SECOND: - return (int) (timeNanos / NANOS_PER_SECOND % 60); - case MILLISECOND: - return (int) (timeNanos / 1_000_000 % 1_000); - case MICROSECOND: - return (int) (timeNanos / 1_000 % 1_000_000); - case NANOSECOND: - return (int) (timeNanos % NANOS_PER_SECOND); - case DAY_OF_YEAR: - return DateTimeUtils.getDayOfYear(dateValue); - case DAY_OF_WEEK: - return DateTimeUtils.getSundayDayOfWeek(dateValue); - case DOW: { - int dow = DateTimeUtils.getSundayDayOfWeek(dateValue); - if (mode.getEnum() == ModeEnum.PostgreSQL) { - dow--; - } - return dow; - } - case WEEK: - GregorianCalendar gc = DateTimeUtils.getCalendar(); - return DateTimeUtils.getWeekOfYear(dateValue, gc.getFirstDayOfWeek() - 1, - gc.getMinimalDaysInFirstWeek()); - case QUARTER: - return (DateTimeUtils.monthFromDateValue(dateValue) - 1) / 3 + 1; - case ISO_YEAR: - return DateTimeUtils.getIsoWeekYear(dateValue); - case ISO_WEEK: - return DateTimeUtils.getIsoWeekOfYear(dateValue); - case ISO_DAY_OF_WEEK: - return DateTimeUtils.getIsoDayOfWeek(dateValue); - case TIMEZONE_HOUR: - case TIMEZONE_MINUTE: { - int offsetMinutes; - if (date instanceof ValueTimestampTimeZone) { - offsetMinutes = ((ValueTimestampTimeZone) date).getTimeZoneOffsetMins(); - } else { - offsetMinutes = DateTimeUtils.getTimeZoneOffsetMillis(null, dateValue, timeNanos); - } - if (field == TIMEZONE_HOUR) { - return offsetMinutes / 60; - } - return offsetMinutes % 60; - } - } - } - throw DbException.getUnsupportedException("getDatePart(" + date + ", " + field + ')'); - } - - /** - * Return names of month or weeks. - * - * @param field - * 0 for months, 1 for weekdays - * @return names of month or weeks - */ - public static String[] getMonthsAndWeeks(int field) { - String[][] result = MONTHS_AND_WEEKS; - if (result == null) { - result = new String[2][]; - DateFormatSymbols dfs = DateFormatSymbols.getInstance(Locale.ENGLISH); - result[0] = dfs.getMonths(); - result[1] = dfs.getWeekdays(); - MONTHS_AND_WEEKS = result; - } - return result[field]; - } - - /** - * Check if a given string is a valid date part string. - * - * @param part - * the string - * @return true if it is - */ - public static boolean isDatePart(String part) { - return DATE_PART.containsKey(StringUtils.toUpperEnglish(part)); - } - - /** - * Parses a date using a format string. - * - * @param date - * the date to parse - * @param format - * the parsing format - * @param locale - * the locale - * @param timeZone - * the timeZone - * @return the parsed date - */ - public static java.util.Date parseDateTime(String date, String format, String locale, String timeZone) { - SimpleDateFormat dateFormat = getDateFormat(format, locale, timeZone); - try { - synchronized (dateFormat) { - return dateFormat.parse(date); - } - } catch (Exception e) { - // ParseException - throw DbException.get(ErrorCode.PARSE_ERROR_1, e, date); - } - } - - private static long weekdiff(long absolute1, long absolute2, int firstDayOfWeek) { - absolute1 += 4 - firstDayOfWeek; - long r1 = absolute1 / 7; - if (absolute1 < 0 && (r1 * 7 != absolute1)) { - r1--; - } - absolute2 += 4 - firstDayOfWeek; - long r2 = absolute2 / 7; - if (absolute2 < 0 && (r2 * 7 != absolute2)) { - r2--; - } - return r2 - r1; - } - - private DateTimeFunctions() { - } -} diff --git a/h2/src/main/org/h2/expression/function/DayMonthNameFunction.java b/h2/src/main/org/h2/expression/function/DayMonthNameFunction.java new file mode 100644 index 0000000000..a6d521a1e7 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/DayMonthNameFunction.java @@ -0,0 +1,107 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.text.DateFormatSymbols; +import java.util.Locale; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.util.DateTimeUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * A DAYNAME() or MONTHNAME() function. + */ +public final class DayMonthNameFunction extends Function1 { + + /** + * DAYNAME() (non-standard). + */ + public static final int DAYNAME = 0; + + /** + * MONTHNAME() (non-standard). + */ + public static final int MONTHNAME = DAYNAME + 1; + + private static final String[] NAMES = { // + "DAYNAME", "MONTHNAME" // + }; + + /** + * English names of months and week days. + */ + private static volatile String[][] MONTHS_AND_WEEKS; + + private final int function; + + public DayMonthNameFunction(Expression arg, int function) { + super(arg); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v = arg.getValue(session); + if (v == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + long dateValue = DateTimeUtils.dateAndTimeFromValue(v, session)[0]; + String result; + switch (function) { + case DAYNAME: + result = getMonthsAndWeeks(1)[DateTimeUtils.getDayOfWeek(dateValue, 0)]; + break; + case MONTHNAME: + result = getMonthsAndWeeks(0)[DateTimeUtils.monthFromDateValue(dateValue) - 1]; + break; + default: + throw DbException.getInternalError("function=" + function); + } + return ValueVarchar.get(result, session); + } + + /** + * Return names of month or weeks. + * + * @param field + * 0 for months, 1 for weekdays + * @return names of month or weeks + */ + private static String[] getMonthsAndWeeks(int field) { + String[][] result = MONTHS_AND_WEEKS; + if (result == null) { + result = new String[2][]; + DateFormatSymbols dfs = DateFormatSymbols.getInstance(Locale.ENGLISH); + result[0] = dfs.getMonths(); + result[1] = dfs.getWeekdays(); + MONTHS_AND_WEEKS = result; + } + return result[field]; + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + type = TypeInfo.getTypeInfo(Value.VARCHAR, 20, 0, null); + if (arg.isConstant()) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/FileFunction.java b/h2/src/main/org/h2/expression/function/FileFunction.java new file mode 100644 index 0000000000..123582d851 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/FileFunction.java @@ -0,0 +1,145 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.Reader; +import java.nio.file.Files; +import java.nio.file.Paths; + +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.store.fs.FileUtils; +import org.h2.util.IOUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueLob; +import org.h2.value.ValueNull; + +/** + * A FILE_READ or FILE_WRITE function. + */ +public final class FileFunction extends Function1_2 { + + /** + * FILE_READ() (non-standard). + */ + public static final int FILE_READ = 0; + + /** + * FILE_WRITE() (non-standard). + */ + public static final int FILE_WRITE = FILE_READ + 1; + + private static final String[] NAMES = { // + "FILE_READ", "FILE_WRITE" // + }; + + private final int function; + + public FileFunction(Expression arg1, Expression arg2, int function) { + super(arg1, arg2); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + session.getUser().checkAdmin(); + Value v1 = left.getValue(session); + if (v1 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + switch (function) { + case FILE_READ: { + String fileName = v1.getString(); + Database database = session.getDatabase(); + try { + long fileLength = FileUtils.size(fileName); + ValueLob lob; + try (InputStream in = FileUtils.newInputStream(fileName)) { + if (right == null) { + lob = database.getLobStorage().createBlob(in, fileLength); + } else { + Value v2 = right.getValue(session); + Reader reader = v2 == ValueNull.INSTANCE ? new InputStreamReader(in) + : new InputStreamReader(in, v2.getString()); + lob = database.getLobStorage().createClob(reader, fileLength); + } + } + v1 = session.addTemporaryLob(lob); + } catch (IOException e) { + throw DbException.convertIOException(e, fileName); + } + break; + } + case FILE_WRITE: { + Value v2 = right.getValue(session); + if (v2 == ValueNull.INSTANCE) { + v1 = ValueNull.INSTANCE; + } else { + String fileName = v2.getString(); + try (OutputStream fileOutputStream = Files.newOutputStream(Paths.get(fileName)); + InputStream in = v1.getInputStream()) { + v1 = ValueBigint.get(IOUtils.copy(in, fileOutputStream)); + } catch (IOException e) { + throw DbException.convertIOException(e, fileName); + } + } + break; + } + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + switch (function) { + case FILE_READ: + type = right == null ? TypeInfo.getTypeInfo(Value.BLOB, Integer.MAX_VALUE, 0, null) + : TypeInfo.getTypeInfo(Value.CLOB, Integer.MAX_VALUE, 0, null); + break; + case FILE_WRITE: + type = TypeInfo.TYPE_BIGINT; + break; + default: + throw DbException.getInternalError("function=" + function); + } + return this; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + case ExpressionVisitor.QUERY_COMPARABLE: + return false; + case ExpressionVisitor.READONLY: + if (function == FILE_WRITE) { + return false; + } + } + return super.isEverything(visitor); + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/Function.java b/h2/src/main/org/h2/expression/function/Function.java deleted file mode 100644 index a93a6f3987..0000000000 --- a/h2/src/main/org/h2/expression/function/Function.java +++ /dev/null @@ -1,2830 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression.function; - -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.Reader; -import java.nio.charset.StandardCharsets; -import java.security.MessageDigest; -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.regex.PatternSyntaxException; - -import org.h2.api.ErrorCode; -import org.h2.command.Command; -import org.h2.command.Parser; -import org.h2.engine.Constants; -import org.h2.engine.Database; -import org.h2.engine.Mode; -import org.h2.engine.Mode.ModeEnum; -import org.h2.engine.Session; -import org.h2.expression.Expression; -import org.h2.expression.ExpressionColumn; -import org.h2.expression.ExpressionVisitor; -import org.h2.expression.SequenceValue; -import org.h2.expression.ValueExpression; -import org.h2.expression.Variable; -import org.h2.index.Index; -import org.h2.message.DbException; -import org.h2.mode.FunctionsMSSQLServer; -import org.h2.mode.FunctionsMySQL; -import org.h2.mvstore.db.MVSpatialIndex; -import org.h2.schema.Schema; -import org.h2.schema.Sequence; -import org.h2.security.BlockCipher; -import org.h2.security.CipherFactory; -import org.h2.store.fs.FileUtils; -import org.h2.table.Column; -import org.h2.table.ColumnResolver; -import org.h2.table.LinkSchema; -import org.h2.table.Table; -import org.h2.table.TableFilter; -import org.h2.tools.CompressTool; -import org.h2.tools.Csv; -import org.h2.util.Bits; -import org.h2.util.DateTimeUtils; -import org.h2.util.IOUtils; -import org.h2.util.JdbcUtils; -import org.h2.util.MathUtils; -import org.h2.util.StringUtils; -import org.h2.util.Utils; -import org.h2.value.DataType; -import org.h2.value.TypeInfo; -import org.h2.value.Value; -import org.h2.value.ValueArray; -import org.h2.value.ValueBoolean; -import org.h2.value.ValueBytes; -import org.h2.value.ValueCollectionBase; -import org.h2.value.ValueDate; -import org.h2.value.ValueDouble; -import org.h2.value.ValueInt; -import org.h2.value.ValueLong; -import org.h2.value.ValueNull; -import org.h2.value.ValueResultSet; -import org.h2.value.ValueString; -import org.h2.value.ValueTime; -import org.h2.value.ValueTimestamp; -import org.h2.value.ValueTimestampTimeZone; -import org.h2.value.ValueUuid; - -/** - * This class implements most built-in functions of this database. - */ -public class Function extends Expression implements FunctionCall { - public static final int ABS = 0, ACOS = 1, ASIN = 2, ATAN = 3, ATAN2 = 4, - BITAND = 5, BITOR = 6, BITXOR = 7, CEILING = 8, COS = 9, COT = 10, - DEGREES = 11, EXP = 12, FLOOR = 13, LOG = 14, LOG10 = 15, MOD = 16, - PI = 17, POWER = 18, RADIANS = 19, RAND = 20, ROUND = 21, - ROUNDMAGIC = 22, SIGN = 23, SIN = 24, SQRT = 25, TAN = 26, - TRUNCATE = 27, SECURE_RAND = 28, HASH = 29, ENCRYPT = 30, - DECRYPT = 31, COMPRESS = 32, EXPAND = 33, ZERO = 34, - RANDOM_UUID = 35, COSH = 36, SINH = 37, TANH = 38, LN = 39, - BITGET = 40, ORA_HASH = 41; - - public static final int ASCII = 50, BIT_LENGTH = 51, CHAR = 52, - CHAR_LENGTH = 53, CONCAT = 54, DIFFERENCE = 55, HEXTORAW = 56, - INSERT = 57, INSTR = 58, LCASE = 59, LEFT = 60, LENGTH = 61, - LOCATE = 62, LTRIM = 63, OCTET_LENGTH = 64, RAWTOHEX = 65, - REPEAT = 66, REPLACE = 67, RIGHT = 68, RTRIM = 69, SOUNDEX = 70, - SPACE = 71, SUBSTR = 72, SUBSTRING = 73, UCASE = 74, LOWER = 75, - UPPER = 76, POSITION = 77, TRIM = 78, STRINGENCODE = 79, - STRINGDECODE = 80, STRINGTOUTF8 = 81, UTF8TOSTRING = 82, - XMLATTR = 83, XMLNODE = 84, XMLCOMMENT = 85, XMLCDATA = 86, - XMLSTARTDOC = 87, XMLTEXT = 88, REGEXP_REPLACE = 89, RPAD = 90, - LPAD = 91, CONCAT_WS = 92, TO_CHAR = 93, TRANSLATE = 94, /* 95 */ - TO_DATE = 96, TO_TIMESTAMP = 97, ADD_MONTHS = 98, TO_TIMESTAMP_TZ = 99; - - public static final int CURRENT_DATE = 100, CURRENT_TIME = 101, LOCALTIME = 102, - CURRENT_TIMESTAMP = 103, LOCALTIMESTAMP = 104, - DATE_ADD = 105, DATE_DIFF = 106, DAY_NAME = 107, DAY_OF_MONTH = 108, - DAY_OF_WEEK = 109, DAY_OF_YEAR = 110, HOUR = 111, MINUTE = 112, - MONTH = 113, MONTH_NAME = 114, QUARTER = 115, - SECOND = 116, WEEK = 117, YEAR = 118, EXTRACT = 119, - FORMATDATETIME = 120, PARSEDATETIME = 121, ISO_YEAR = 122, - ISO_WEEK = 123, ISO_DAY_OF_WEEK = 124, DATE_TRUNC = 125; - - /** - * Pseudo functions for DATEADD, DATEDIFF, and EXTRACT. - */ - public static final int MILLISECOND = 126, EPOCH = 127, MICROSECOND = 128, NANOSECOND = 129, - TIMEZONE_HOUR = 130, TIMEZONE_MINUTE = 131, DECADE = 132, CENTURY = 133, - MILLENNIUM = 134, DOW = 135; - - public static final int DATABASE = 150, USER = 151, CURRENT_USER = 152, - IDENTITY = 153, SCOPE_IDENTITY = 154, AUTOCOMMIT = 155, - READONLY = 156, DATABASE_PATH = 157, LOCK_TIMEOUT = 158, - DISK_SPACE_USED = 159, SIGNAL = 160, ESTIMATED_ENVELOPE = 161; - - private static final Pattern SIGNAL_PATTERN = Pattern.compile("[0-9A-Z]{5}"); - - public static final int IFNULL = 200, CASEWHEN = 201, CONVERT = 202, - CAST = 203, COALESCE = 204, NULLIF = 205, CASE = 206, - NEXTVAL = 207, CURRVAL = 208, ARRAY_GET = 209, CSVREAD = 210, - CSVWRITE = 211, MEMORY_FREE = 212, MEMORY_USED = 213, - LOCK_MODE = 214, SCHEMA = 215, SESSION_ID = 216, - ARRAY_LENGTH = 217, LINK_SCHEMA = 218, GREATEST = 219, LEAST = 220, - CANCEL_SESSION = 221, SET = 222, TABLE = 223, TABLE_DISTINCT = 224, - FILE_READ = 225, TRANSACTION_ID = 226, TRUNCATE_VALUE = 227, - NVL2 = 228, DECODE = 229, ARRAY_CONTAINS = 230, FILE_WRITE = 232, - UNNEST = 233, ARRAY_CONCAT = 234, ARRAY_APPEND = 235, ARRAY_SLICE = 236; - - public static final int REGEXP_LIKE = 240; - - /** - * Used in MySQL-style INSERT ... ON DUPLICATE KEY UPDATE ... VALUES - */ - public static final int VALUES = 250; - - /** - * This is called H2VERSION() and not VERSION(), because we return a fake - * value for VERSION() when running under the PostgreSQL ODBC driver. - */ - public static final int H2VERSION = 231; - - /** - * The flags for TRIM(LEADING ...) function. - */ - public static final int TRIM_LEADING = 1; - - /** - * The flags for TRIM(TRAILING ...) function. - */ - public static final int TRIM_TRAILING = 2; - - protected static final int VAR_ARGS = -1; - - private static final HashMap FUNCTIONS = new HashMap<>(256); - private static final char[] SOUNDEX_INDEX = new char[128]; - - protected Expression[] args; - - protected final FunctionInfo info; - private ArrayList varArgs; - private int flags; - protected TypeInfo type; - - private final Database database; - - static { - // SOUNDEX_INDEX - String index = "7AEIOUY8HW1BFPV2CGJKQSXZ3DT4L5MN6R"; - char number = 0; - for (int i = 0, length = index.length(); i < length; i++) { - char c = index.charAt(i); - if (c < '9') { - number = c; - } else { - SOUNDEX_INDEX[c] = number; - SOUNDEX_INDEX[Character.toLowerCase(c)] = number; - } - } - - // FUNCTIONS - addFunction("ABS", ABS, 1, Value.NULL); - addFunction("ACOS", ACOS, 1, Value.DOUBLE); - addFunction("ASIN", ASIN, 1, Value.DOUBLE); - addFunction("ATAN", ATAN, 1, Value.DOUBLE); - addFunction("ATAN2", ATAN2, 2, Value.DOUBLE); - addFunction("BITAND", BITAND, 2, Value.LONG); - addFunction("BITGET", BITGET, 2, Value.BOOLEAN); - addFunction("BITOR", BITOR, 2, Value.LONG); - addFunction("BITXOR", BITXOR, 2, Value.LONG); - addFunction("CEILING", CEILING, 1, Value.DOUBLE); - addFunction("CEIL", CEILING, 1, Value.DOUBLE); - addFunction("COS", COS, 1, Value.DOUBLE); - addFunction("COSH", COSH, 1, Value.DOUBLE); - addFunction("COT", COT, 1, Value.DOUBLE); - addFunction("DEGREES", DEGREES, 1, Value.DOUBLE); - addFunction("EXP", EXP, 1, Value.DOUBLE); - addFunction("FLOOR", FLOOR, 1, Value.DOUBLE); - addFunction("LOG", LOG, 1, Value.DOUBLE); - addFunction("LN", LN, 1, Value.DOUBLE); - addFunction("LOG10", LOG10, 1, Value.DOUBLE); - addFunction("MOD", MOD, 2, Value.LONG); - addFunction("PI", PI, 0, Value.DOUBLE); - addFunction("POWER", POWER, 2, Value.DOUBLE); - addFunction("RADIANS", RADIANS, 1, Value.DOUBLE); - // RAND without argument: get the next value - // RAND with one argument: seed the random generator - addFunctionNotDeterministic("RAND", RAND, VAR_ARGS, Value.DOUBLE); - addFunctionNotDeterministic("RANDOM", RAND, VAR_ARGS, Value.DOUBLE); - addFunction("ROUND", ROUND, VAR_ARGS, Value.DOUBLE); - addFunction("ROUNDMAGIC", ROUNDMAGIC, 1, Value.DOUBLE); - addFunction("SIGN", SIGN, 1, Value.INT); - addFunction("SIN", SIN, 1, Value.DOUBLE); - addFunction("SINH", SINH, 1, Value.DOUBLE); - addFunction("SQRT", SQRT, 1, Value.DOUBLE); - addFunction("TAN", TAN, 1, Value.DOUBLE); - addFunction("TANH", TANH, 1, Value.DOUBLE); - addFunction("TRUNCATE", TRUNCATE, VAR_ARGS, Value.NULL); - // same as TRUNCATE - addFunction("TRUNC", TRUNCATE, VAR_ARGS, Value.NULL); - addFunction("HASH", HASH, VAR_ARGS, Value.BYTES); - addFunction("ENCRYPT", ENCRYPT, 3, Value.BYTES); - addFunction("DECRYPT", DECRYPT, 3, Value.BYTES); - addFunctionNotDeterministic("SECURE_RAND", SECURE_RAND, 1, Value.BYTES); - addFunction("COMPRESS", COMPRESS, VAR_ARGS, Value.BYTES); - addFunction("EXPAND", EXPAND, 1, Value.BYTES); - addFunction("ZERO", ZERO, 0, Value.INT); - addFunctionNotDeterministic("RANDOM_UUID", RANDOM_UUID, 0, Value.UUID); - addFunctionNotDeterministic("SYS_GUID", RANDOM_UUID, 0, Value.UUID); - addFunctionNotDeterministic("UUID", RANDOM_UUID, 0, Value.UUID); - addFunction("ORA_HASH", ORA_HASH, VAR_ARGS, Value.LONG); - // string - addFunction("ASCII", ASCII, 1, Value.INT); - addFunction("BIT_LENGTH", BIT_LENGTH, 1, Value.LONG); - addFunction("CHAR", CHAR, 1, Value.STRING); - addFunction("CHR", CHAR, 1, Value.STRING); - addFunction("CHAR_LENGTH", CHAR_LENGTH, 1, Value.INT); - // same as CHAR_LENGTH - addFunction("CHARACTER_LENGTH", CHAR_LENGTH, 1, Value.INT); - addFunctionWithNull("CONCAT", CONCAT, VAR_ARGS, Value.STRING); - addFunctionWithNull("CONCAT_WS", CONCAT_WS, VAR_ARGS, Value.STRING); - addFunction("DIFFERENCE", DIFFERENCE, 2, Value.INT); - addFunction("HEXTORAW", HEXTORAW, 1, Value.STRING); - addFunctionWithNull("INSERT", INSERT, 4, Value.STRING); - addFunction("LCASE", LCASE, 1, Value.STRING); - addFunction("LEFT", LEFT, 2, Value.STRING); - addFunction("LENGTH", LENGTH, 1, Value.LONG); - // 2 or 3 arguments - addFunction("LOCATE", LOCATE, VAR_ARGS, Value.INT); - // same as LOCATE with 2 arguments - addFunction("POSITION", LOCATE, 2, Value.INT); - addFunction("INSTR", INSTR, VAR_ARGS, Value.INT); - addFunction("LTRIM", LTRIM, VAR_ARGS, Value.STRING); - addFunction("OCTET_LENGTH", OCTET_LENGTH, 1, Value.LONG); - addFunction("RAWTOHEX", RAWTOHEX, 1, Value.STRING); - addFunction("REPEAT", REPEAT, 2, Value.STRING); - addFunctionWithNull("REPLACE", REPLACE, VAR_ARGS, Value.STRING); - addFunction("RIGHT", RIGHT, 2, Value.STRING); - addFunction("RTRIM", RTRIM, VAR_ARGS, Value.STRING); - addFunction("SOUNDEX", SOUNDEX, 1, Value.STRING); - addFunction("SPACE", SPACE, 1, Value.STRING); - addFunction("SUBSTR", SUBSTR, VAR_ARGS, Value.STRING); - addFunction("SUBSTRING", SUBSTRING, VAR_ARGS, Value.STRING); - addFunction("UCASE", UCASE, 1, Value.STRING); - addFunction("LOWER", LOWER, 1, Value.STRING); - addFunction("UPPER", UPPER, 1, Value.STRING); - addFunction("POSITION", POSITION, 2, Value.INT); - addFunction("TRIM", TRIM, VAR_ARGS, Value.STRING); - addFunction("STRINGENCODE", STRINGENCODE, 1, Value.STRING); - addFunction("STRINGDECODE", STRINGDECODE, 1, Value.STRING); - addFunction("STRINGTOUTF8", STRINGTOUTF8, 1, Value.BYTES); - addFunction("UTF8TOSTRING", UTF8TOSTRING, 1, Value.STRING); - addFunction("XMLATTR", XMLATTR, 2, Value.STRING); - addFunctionWithNull("XMLNODE", XMLNODE, VAR_ARGS, Value.STRING); - addFunction("XMLCOMMENT", XMLCOMMENT, 1, Value.STRING); - addFunction("XMLCDATA", XMLCDATA, 1, Value.STRING); - addFunction("XMLSTARTDOC", XMLSTARTDOC, 0, Value.STRING); - addFunction("XMLTEXT", XMLTEXT, VAR_ARGS, Value.STRING); - addFunction("REGEXP_REPLACE", REGEXP_REPLACE, VAR_ARGS, Value.STRING); - addFunction("RPAD", RPAD, VAR_ARGS, Value.STRING); - addFunction("LPAD", LPAD, VAR_ARGS, Value.STRING); - addFunction("TO_CHAR", TO_CHAR, VAR_ARGS, Value.STRING); - addFunction("TRANSLATE", TRANSLATE, 3, Value.STRING); - addFunction("REGEXP_LIKE", REGEXP_LIKE, VAR_ARGS, Value.BOOLEAN); - - // date - addFunctionNotDeterministic("CURRENT_DATE", CURRENT_DATE, 0, Value.DATE, false); - addFunctionNotDeterministic("CURDATE", CURRENT_DATE, 0, Value.DATE); - addFunctionNotDeterministic("SYSDATE", CURRENT_DATE, 0, Value.DATE, false); - addFunctionNotDeterministic("TODAY", CURRENT_DATE, 0, Value.DATE, false); - - addFunctionNotDeterministic("CURRENT_TIME", CURRENT_TIME, VAR_ARGS, Value.TIME); - - addFunctionNotDeterministic("LOCALTIME", LOCALTIME, VAR_ARGS, Value.TIME, false); - addFunctionNotDeterministic("SYSTIME", LOCALTIME, 0, Value.TIME, false); - addFunctionNotDeterministic("CURTIME", LOCALTIME, VAR_ARGS, Value.TIME); - - addFunctionNotDeterministic("CURRENT_TIMESTAMP", CURRENT_TIMESTAMP, VAR_ARGS, Value.TIMESTAMP_TZ, false); - addFunctionNotDeterministic("SYSTIMESTAMP", CURRENT_TIMESTAMP, VAR_ARGS, Value.TIMESTAMP_TZ, false); - - addFunctionNotDeterministic("LOCALTIMESTAMP", LOCALTIMESTAMP, VAR_ARGS, Value.TIMESTAMP, false); - addFunctionNotDeterministic("NOW", LOCALTIMESTAMP, VAR_ARGS, Value.TIMESTAMP); - - addFunction("TO_DATE", TO_DATE, VAR_ARGS, Value.TIMESTAMP); - addFunction("TO_TIMESTAMP", TO_TIMESTAMP, VAR_ARGS, Value.TIMESTAMP); - addFunction("ADD_MONTHS", ADD_MONTHS, 2, Value.TIMESTAMP); - addFunction("TO_TIMESTAMP_TZ", TO_TIMESTAMP_TZ, VAR_ARGS, Value.TIMESTAMP_TZ); - addFunction("DATEADD", DATE_ADD, - 3, Value.TIMESTAMP); - addFunction("TIMESTAMPADD", DATE_ADD, - 3, Value.TIMESTAMP); - addFunction("DATEDIFF", DATE_DIFF, - 3, Value.LONG); - addFunction("TIMESTAMPDIFF", DATE_DIFF, - 3, Value.LONG); - addFunction("DAYNAME", DAY_NAME, - 1, Value.STRING); - addFunction("DAYNAME", DAY_NAME, - 1, Value.STRING); - addFunction("DAY", DAY_OF_MONTH, - 1, Value.INT); - addFunction("DAY_OF_MONTH", DAY_OF_MONTH, - 1, Value.INT); - addFunction("DAY_OF_WEEK", DAY_OF_WEEK, - 1, Value.INT); - addFunction("DAY_OF_YEAR", DAY_OF_YEAR, - 1, Value.INT); - addFunction("DAYOFMONTH", DAY_OF_MONTH, - 1, Value.INT); - addFunction("DAYOFWEEK", DAY_OF_WEEK, - 1, Value.INT); - addFunction("DAYOFYEAR", DAY_OF_YEAR, - 1, Value.INT); - addFunction("HOUR", HOUR, - 1, Value.INT); - addFunction("MINUTE", MINUTE, - 1, Value.INT); - addFunction("MONTH", MONTH, - 1, Value.INT); - addFunction("MONTHNAME", MONTH_NAME, - 1, Value.STRING); - addFunction("QUARTER", QUARTER, - 1, Value.INT); - addFunction("SECOND", SECOND, - 1, Value.INT); - addFunction("WEEK", WEEK, - 1, Value.INT); - addFunction("YEAR", YEAR, - 1, Value.INT); - addFunction("EXTRACT", EXTRACT, - 2, Value.INT); - addFunctionWithNull("FORMATDATETIME", FORMATDATETIME, - VAR_ARGS, Value.STRING); - addFunctionWithNull("PARSEDATETIME", PARSEDATETIME, - VAR_ARGS, Value.TIMESTAMP); - addFunction("ISO_YEAR", ISO_YEAR, - 1, Value.INT); - addFunction("ISO_WEEK", ISO_WEEK, - 1, Value.INT); - addFunction("ISO_DAY_OF_WEEK", ISO_DAY_OF_WEEK, - 1, Value.INT); - addFunction("DATE_TRUNC", DATE_TRUNC, 2, Value.NULL); - // system - addFunctionNotDeterministic("DATABASE", DATABASE, - 0, Value.STRING); - addFunctionNotDeterministic("USER", USER, - 0, Value.STRING); - addFunctionNotDeterministic("CURRENT_USER", CURRENT_USER, - 0, Value.STRING); - addFunctionNotDeterministic("IDENTITY", IDENTITY, - 0, Value.LONG); - addFunctionNotDeterministic("SCOPE_IDENTITY", SCOPE_IDENTITY, - 0, Value.LONG); - addFunctionNotDeterministic("IDENTITY_VAL_LOCAL", IDENTITY, - 0, Value.LONG); - addFunctionNotDeterministic("LAST_INSERT_ID", IDENTITY, - 0, Value.LONG); - addFunctionNotDeterministic("LASTVAL", IDENTITY, - 0, Value.LONG); - addFunctionNotDeterministic("AUTOCOMMIT", AUTOCOMMIT, - 0, Value.BOOLEAN); - addFunctionNotDeterministic("READONLY", READONLY, - 0, Value.BOOLEAN); - addFunction("DATABASE_PATH", DATABASE_PATH, - 0, Value.STRING); - addFunctionNotDeterministic("LOCK_TIMEOUT", LOCK_TIMEOUT, - 0, Value.INT); - addFunctionWithNull("IFNULL", IFNULL, - 2, Value.NULL); - addFunctionWithNull("ISNULL", IFNULL, - 2, Value.NULL); - addFunctionWithNull("CASEWHEN", CASEWHEN, - 3, Value.NULL); - addFunctionWithNull("CONVERT", CONVERT, - 1, Value.NULL); - addFunctionWithNull("CAST", CAST, - 1, Value.NULL); - addFunctionWithNull("TRUNCATE_VALUE", TRUNCATE_VALUE, - 3, Value.NULL); - addFunctionWithNull("COALESCE", COALESCE, - VAR_ARGS, Value.NULL); - addFunctionWithNull("NVL", COALESCE, - VAR_ARGS, Value.NULL); - addFunctionWithNull("NVL2", NVL2, - 3, Value.NULL); - addFunctionWithNull("NULLIF", NULLIF, - 2, Value.NULL); - addFunctionWithNull("CASE", CASE, - VAR_ARGS, Value.NULL); - addFunctionNotDeterministic("NEXTVAL", NEXTVAL, - VAR_ARGS, Value.LONG); - addFunctionNotDeterministic("CURRVAL", CURRVAL, - VAR_ARGS, Value.LONG); - addFunction("ARRAY_GET", ARRAY_GET, - 2, Value.NULL); - addFunctionWithNull("ARRAY_CONTAINS", ARRAY_CONTAINS, 2, Value.BOOLEAN); - addFunction("ARRAY_CAT", ARRAY_CONCAT, 2, Value.ARRAY); - addFunction("ARRAY_APPEND", ARRAY_APPEND, 2, Value.ARRAY); - addFunction("ARRAY_SLICE", ARRAY_SLICE, 3, Value.ARRAY); - addFunction("CSVREAD", CSVREAD, - VAR_ARGS, Value.RESULT_SET, false, false, false, true); - addFunction("CSVWRITE", CSVWRITE, - VAR_ARGS, Value.INT, false, false, true, true); - addFunctionNotDeterministic("MEMORY_FREE", MEMORY_FREE, - 0, Value.INT); - addFunctionNotDeterministic("MEMORY_USED", MEMORY_USED, - 0, Value.INT); - addFunctionNotDeterministic("LOCK_MODE", LOCK_MODE, - 0, Value.INT); - addFunctionNotDeterministic("SCHEMA", SCHEMA, - 0, Value.STRING); - addFunctionNotDeterministic("SESSION_ID", SESSION_ID, - 0, Value.INT); - addFunction("ARRAY_LENGTH", ARRAY_LENGTH, - 1, Value.INT); - addFunctionNotDeterministic("LINK_SCHEMA", LINK_SCHEMA, - 6, Value.RESULT_SET); - addFunctionWithNull("LEAST", LEAST, - VAR_ARGS, Value.NULL); - addFunctionWithNull("GREATEST", GREATEST, - VAR_ARGS, Value.NULL); - addFunctionNotDeterministic("CANCEL_SESSION", CANCEL_SESSION, - 1, Value.BOOLEAN); - addFunction("SET", SET, - 2, Value.NULL, false, false, true, true); - addFunction("FILE_READ", FILE_READ, - VAR_ARGS, Value.NULL, false, false, true, true); - addFunction("FILE_WRITE", FILE_WRITE, - 2, Value.LONG, false, false, true, true); - addFunctionNotDeterministic("TRANSACTION_ID", TRANSACTION_ID, - 0, Value.STRING); - addFunctionWithNull("DECODE", DECODE, - VAR_ARGS, Value.NULL); - addFunctionNotDeterministic("DISK_SPACE_USED", DISK_SPACE_USED, - 1, Value.LONG); - addFunctionWithNull("SIGNAL", SIGNAL, 2, Value.NULL); - addFunctionNotDeterministic("ESTIMATED_ENVELOPE", ESTIMATED_ENVELOPE, 2, Value.LONG); - addFunction("H2VERSION", H2VERSION, 0, Value.STRING); - - // TableFunction - addFunctionWithNull("TABLE", TABLE, VAR_ARGS, Value.RESULT_SET); - addFunctionWithNull("TABLE_DISTINCT", TABLE_DISTINCT, VAR_ARGS, Value.RESULT_SET); - addFunctionWithNull("UNNEST", UNNEST, VAR_ARGS, Value.RESULT_SET); - - // ON DUPLICATE KEY VALUES function - addFunction("VALUES", VALUES, 1, Value.NULL, false, true, false, true); - } - - /** - * Creates a new instance of function. - * - * @param database database - * @param info function information - */ - public Function(Database database, FunctionInfo info) { - this.database = database; - this.info = info; - if (info.parameterCount == VAR_ARGS) { - varArgs = Utils.newSmallArrayList(); - } else { - args = new Expression[info.parameterCount]; - } - } - - private static void addFunction(String name, int type, int parameterCount, - int returnDataType, boolean nullIfParameterIsNull, boolean deterministic, - boolean bufferResultSetToLocalTemp, boolean requireParentheses) { - FUNCTIONS.put(name, new FunctionInfo(name, type, parameterCount, returnDataType, nullIfParameterIsNull, - deterministic, bufferResultSetToLocalTemp, requireParentheses)); - } - - private static void addFunctionNotDeterministic(String name, int type, - int parameterCount, int returnDataType) { - addFunctionNotDeterministic(name, type, parameterCount, returnDataType, true); - } - - private static void addFunctionNotDeterministic(String name, int type, - int parameterCount, int returnDataType, boolean requireParentheses) { - addFunction(name, type, parameterCount, returnDataType, true, false, true, requireParentheses); - } - - private static void addFunction(String name, int type, int parameterCount, - int returnDataType) { - addFunction(name, type, parameterCount, returnDataType, true, true, true, true); - } - - private static void addFunctionWithNull(String name, int type, - int parameterCount, int returnDataType) { - addFunction(name, type, parameterCount, returnDataType, false, true, true, true); - } - - /** - * Get an instance of the given function for this database. - * If no function with this name is found, null is returned. - * - * @param database the database - * @param name the function name - * @return the function object or null - */ - public static Function getFunction(Database database, String name) { - if (!database.getSettings().databaseToUpper) { - // if not yet converted to uppercase, do it now - name = StringUtils.toUpperEnglish(name); - } - FunctionInfo info = FUNCTIONS.get(name); - if (info == null) { - switch (database.getMode().getEnum()) { - case MSSQLServer: - return FunctionsMSSQLServer.getFunction(database, name); - case MySQL: - return FunctionsMySQL.getFunction(database, name); - default: - return null; - } - } - switch (info.type) { - case TABLE: - case TABLE_DISTINCT: - case UNNEST: - return new TableFunction(database, info, Long.MAX_VALUE); - default: - return new Function(database, info); - } - } - - /** - * Returns function information for the specified function name. - * - * @param upperName the function name in upper case - * @return the function information or {@code null} - */ - public static FunctionInfo getFunctionInfo(String upperName) { - return FUNCTIONS.get(upperName); - } - - /** - * Set the parameter expression at the given index. - * - * @param index the index (0, 1,...) - * @param param the expression - */ - public void setParameter(int index, Expression param) { - if (varArgs != null) { - varArgs.add(param); - } else { - if (index >= args.length) { - throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, - info.name, Integer.toString(args.length)); - } - args[index] = param; - } - } - - /** - * Set the flags for this function. - * - * @param flags the flags to set - */ - public void setFlags(int flags) { - this.flags = flags; - } - - /** - * Returns the flags. - * - * @return the flags - */ - public int getFlags() { - return flags; - } - - @Override - public Value getValue(Session session) { - return getValueWithArgs(session, args); - } - - private Value getSimpleValue(Session session, Value v0, Expression[] args, - Value[] values) { - Value result; - switch (info.type) { - case ABS: - result = v0.getSignum() >= 0 ? v0 : v0.negate(); - break; - case ACOS: - result = ValueDouble.get(Math.acos(v0.getDouble())); - break; - case ASIN: - result = ValueDouble.get(Math.asin(v0.getDouble())); - break; - case ATAN: - result = ValueDouble.get(Math.atan(v0.getDouble())); - break; - case CEILING: - result = ValueDouble.get(Math.ceil(v0.getDouble())); - break; - case COS: - result = ValueDouble.get(Math.cos(v0.getDouble())); - break; - case COSH: - result = ValueDouble.get(Math.cosh(v0.getDouble())); - break; - case COT: { - double d = Math.tan(v0.getDouble()); - if (d == 0.0) { - throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getSQL(false)); - } - result = ValueDouble.get(1. / d); - break; - } - case DEGREES: - result = ValueDouble.get(Math.toDegrees(v0.getDouble())); - break; - case EXP: - result = ValueDouble.get(Math.exp(v0.getDouble())); - break; - case FLOOR: - result = ValueDouble.get(Math.floor(v0.getDouble())); - break; - case LN: - result = ValueDouble.get(Math.log(v0.getDouble())); - break; - case LOG: - if (database.getMode().logIsLogBase10) { - result = ValueDouble.get(Math.log10(v0.getDouble())); - } else { - result = ValueDouble.get(Math.log(v0.getDouble())); - } - break; - case LOG10: - result = ValueDouble.get(Math.log10(v0.getDouble())); - break; - case PI: - result = ValueDouble.get(Math.PI); - break; - case RADIANS: - result = ValueDouble.get(Math.toRadians(v0.getDouble())); - break; - case RAND: { - if (v0 != null) { - session.getRandom().setSeed(v0.getInt()); - } - result = ValueDouble.get(session.getRandom().nextDouble()); - break; - } - case ROUNDMAGIC: - result = ValueDouble.get(roundMagic(v0.getDouble())); - break; - case SIGN: - result = ValueInt.get(v0.getSignum()); - break; - case SIN: - result = ValueDouble.get(Math.sin(v0.getDouble())); - break; - case SINH: - result = ValueDouble.get(Math.sinh(v0.getDouble())); - break; - case SQRT: - result = ValueDouble.get(Math.sqrt(v0.getDouble())); - break; - case TAN: - result = ValueDouble.get(Math.tan(v0.getDouble())); - break; - case TANH: - result = ValueDouble.get(Math.tanh(v0.getDouble())); - break; - case SECURE_RAND: - result = ValueBytes.getNoCopy( - MathUtils.secureRandomBytes(v0.getInt())); - break; - case EXPAND: - result = ValueBytes.getNoCopy( - CompressTool.getInstance().expand(v0.getBytesNoCopy())); - break; - case ZERO: - result = ValueInt.get(0); - break; - case RANDOM_UUID: - result = ValueUuid.getNewRandom(); - break; - // string - case ASCII: { - String s = v0.getString(); - if (s.isEmpty()) { - result = ValueNull.INSTANCE; - } else { - result = ValueInt.get(s.charAt(0)); - } - break; - } - case BIT_LENGTH: - result = ValueLong.get(16 * length(v0)); - break; - case CHAR: - result = ValueString.get(String.valueOf((char) v0.getInt()), - database.getMode().treatEmptyStringsAsNull); - break; - case CHAR_LENGTH: - case LENGTH: - result = ValueLong.get(length(v0)); - break; - case OCTET_LENGTH: - result = ValueLong.get(2 * length(v0)); - break; - case CONCAT_WS: - case CONCAT: { - result = ValueNull.INSTANCE; - int start = 0; - String separator = ""; - if (info.type == CONCAT_WS) { - start = 1; - separator = getNullOrValue(session, args, values, 0).getString(); - } - for (int i = start; i < args.length; i++) { - Value v = getNullOrValue(session, args, values, i); - if (v == ValueNull.INSTANCE) { - continue; - } - if (result == ValueNull.INSTANCE) { - result = v; - } else { - String tmp = v.getString(); - if (!StringUtils.isNullOrEmpty(separator) - && !StringUtils.isNullOrEmpty(tmp)) { - tmp = separator + tmp; - } - result = ValueString.get(result.getString() + tmp, - database.getMode().treatEmptyStringsAsNull); - } - } - if (info.type == CONCAT_WS) { - if (separator != null && result == ValueNull.INSTANCE) { - result = ValueString.get("", - database.getMode().treatEmptyStringsAsNull); - } - } - break; - } - case HEXTORAW: - result = ValueString.get(hexToRaw(v0.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case LOWER: - case LCASE: - // TODO this is locale specific, need to document or provide a way - // to set the locale - result = ValueString.get(v0.getString().toLowerCase(), - database.getMode().treatEmptyStringsAsNull); - break; - case RAWTOHEX: - result = ValueString.get(rawToHex(v0.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case SOUNDEX: - result = ValueString.get(getSoundex(v0.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case SPACE: { - int len = Math.max(0, v0.getInt()); - char[] chars = new char[len]; - for (int i = len - 1; i >= 0; i--) { - chars[i] = ' '; - } - result = ValueString.get(new String(chars), - database.getMode().treatEmptyStringsAsNull); - break; - } - case UPPER: - case UCASE: - // TODO this is locale specific, need to document or provide a way - // to set the locale - result = ValueString.get(v0.getString().toUpperCase(), - database.getMode().treatEmptyStringsAsNull); - break; - case STRINGENCODE: - result = ValueString.get(StringUtils.javaEncode(v0.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case STRINGDECODE: - result = ValueString.get(StringUtils.javaDecode(v0.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case STRINGTOUTF8: - result = ValueBytes.getNoCopy(v0.getString(). - getBytes(StandardCharsets.UTF_8)); - break; - case UTF8TOSTRING: - result = ValueString.get(new String(v0.getBytesNoCopy(), - StandardCharsets.UTF_8), - database.getMode().treatEmptyStringsAsNull); - break; - case XMLCOMMENT: - result = ValueString.get(StringUtils.xmlComment(v0.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case XMLCDATA: - result = ValueString.get(StringUtils.xmlCData(v0.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case XMLSTARTDOC: - result = ValueString.get(StringUtils.xmlStartDoc(), - database.getMode().treatEmptyStringsAsNull); - break; - case CURRENT_DATE: { - result = (database.getMode().dateTimeValueWithinTransaction ? session.getTransactionStart() - : session.getCurrentCommandStart()).convertTo(Value.DATE); - break; - } - case CURRENT_TIME: - case LOCALTIME: { - ValueTime vt = (ValueTime) (database.getMode().dateTimeValueWithinTransaction - ? session.getTransactionStart() - : session.getCurrentCommandStart()).convertTo(Value.TIME); - result = vt.convertScale(false, v0 == null ? 0 : v0.getInt()); - break; - } - case CURRENT_TIMESTAMP: { - ValueTimestampTimeZone vt = database.getMode().dateTimeValueWithinTransaction - ? session.getTransactionStart() - : session.getCurrentCommandStart(); - result = vt.convertScale(false, v0 == null ? 6 : v0.getInt()); - break; - } - case LOCALTIMESTAMP: { - Value vt = (database.getMode().dateTimeValueWithinTransaction ? session.getTransactionStart() - : session.getCurrentCommandStart()).convertTo(Value.TIMESTAMP); - result = vt.convertScale(false, v0 == null ? 6 : v0.getInt()); - break; - } - case DAY_NAME: { - int dayOfWeek = DateTimeUtils.getSundayDayOfWeek(DateTimeUtils.dateAndTimeFromValue(v0)[0]); - result = ValueString.get(DateTimeFunctions.getMonthsAndWeeks(1)[dayOfWeek], - database.getMode().treatEmptyStringsAsNull); - break; - } - case DAY_OF_MONTH: - case DAY_OF_WEEK: - case DAY_OF_YEAR: - case HOUR: - case MINUTE: - case MONTH: - case QUARTER: - case ISO_YEAR: - case ISO_WEEK: - case ISO_DAY_OF_WEEK: - case SECOND: - case WEEK: - case YEAR: - result = ValueInt.get(DateTimeFunctions.getIntDatePart(v0, info.type, database.getMode())); - break; - case MONTH_NAME: { - int month = DateTimeUtils.monthFromDateValue(DateTimeUtils.dateAndTimeFromValue(v0)[0]); - result = ValueString.get(DateTimeFunctions.getMonthsAndWeeks(0)[month - 1], - database.getMode().treatEmptyStringsAsNull); - break; - } - case DATABASE: - result = ValueString.get(database.getShortName(), - database.getMode().treatEmptyStringsAsNull); - break; - case USER: - case CURRENT_USER: - result = ValueString.get(session.getUser().getName(), - database.getMode().treatEmptyStringsAsNull); - break; - case IDENTITY: - result = session.getLastIdentity(); - break; - case SCOPE_IDENTITY: - result = session.getLastScopeIdentity(); - break; - case AUTOCOMMIT: - result = ValueBoolean.get(session.getAutoCommit()); - break; - case READONLY: - result = ValueBoolean.get(database.isReadOnly()); - break; - case DATABASE_PATH: { - String path = database.getDatabasePath(); - result = path == null ? - (Value) ValueNull.INSTANCE : ValueString.get(path, - database.getMode().treatEmptyStringsAsNull); - break; - } - case LOCK_TIMEOUT: - result = ValueInt.get(session.getLockTimeout()); - break; - case DISK_SPACE_USED: - result = ValueLong.get(getDiskSpaceUsed(session, v0)); - break; - case ESTIMATED_ENVELOPE: - result = getEstimatedEnvelope(session, v0, values[1]); - break; - case CAST: - case CONVERT: { - Mode mode = database.getMode(); - TypeInfo type = this.type; - v0 = v0.convertTo(type, mode, null); - v0 = v0.convertScale(mode.convertOnlyToSmallerScale, type.getScale()); - v0 = v0.convertPrecision(type.getPrecision(), false); - result = v0; - break; - } - case MEMORY_FREE: - session.getUser().checkAdmin(); - result = ValueInt.get(Utils.getMemoryFree()); - break; - case MEMORY_USED: - session.getUser().checkAdmin(); - result = ValueInt.get(Utils.getMemoryUsed()); - break; - case LOCK_MODE: - result = ValueInt.get(database.getLockMode()); - break; - case SCHEMA: - result = ValueString.get(session.getCurrentSchemaName(), - database.getMode().treatEmptyStringsAsNull); - break; - case SESSION_ID: - result = ValueInt.get(session.getId()); - break; - case IFNULL: { - result = v0; - if (v0 == ValueNull.INSTANCE) { - result = getNullOrValue(session, args, values, 1); - } - result = result.convertTo(type, database.getMode(), null); - break; - } - case CASEWHEN: { - Value v; - if (!v0.getBoolean()) { - v = getNullOrValue(session, args, values, 2); - } else { - v = getNullOrValue(session, args, values, 1); - } - result = v.convertTo(type, database.getMode(), null); - break; - } - case DECODE: { - int index = -1; - for (int i = 1, len = args.length - 1; i < len; i += 2) { - if (database.areEqual(v0, - getNullOrValue(session, args, values, i))) { - index = i + 1; - break; - } - } - if (index < 0 && args.length % 2 == 0) { - index = args.length - 1; - } - Value v = index < 0 ? ValueNull.INSTANCE : - getNullOrValue(session, args, values, index); - result = v.convertTo(type, database.getMode(), null); - break; - } - case NVL2: { - Value v; - if (v0 == ValueNull.INSTANCE) { - v = getNullOrValue(session, args, values, 2); - } else { - v = getNullOrValue(session, args, values, 1); - } - result = v.convertTo(type, database.getMode(), null); - break; - } - case COALESCE: { - result = v0; - for (int i = 0; i < args.length; i++) { - Value v = getNullOrValue(session, args, values, i); - if (v != ValueNull.INSTANCE) { - result = v.convertTo(type, database.getMode(), null); - break; - } - } - break; - } - case GREATEST: - case LEAST: { - result = ValueNull.INSTANCE; - for (int i = 0; i < args.length; i++) { - Value v = getNullOrValue(session, args, values, i); - if (v != ValueNull.INSTANCE) { - v = v.convertTo(type, database.getMode(), null); - if (result == ValueNull.INSTANCE) { - result = v; - } else { - int comp = database.compareTypeSafe(result, v); - if (info.type == GREATEST && comp < 0) { - result = v; - } else if (info.type == LEAST && comp > 0) { - result = v; - } - } - } - } - break; - } - case CASE: { - Expression then = null; - if (v0 == null) { - // Searched CASE expression - // (null, when, then) - // (null, when, then, else) - // (null, when, then, when, then) - // (null, when, then, when, then, else) - for (int i = 1, len = args.length - 1; i < len; i += 2) { - Value when = args[i].getValue(session); - if (when.getBoolean()) { - then = args[i + 1]; - break; - } - } - } else { - // Simple CASE expression - // (expr, when, then) - // (expr, when, then, else) - // (expr, when, then, when, then) - // (expr, when, then, when, then, else) - if (v0 != ValueNull.INSTANCE) { - for (int i = 1, len = args.length - 1; i < len; i += 2) { - Value when = args[i].getValue(session); - if (database.areEqual(v0, when)) { - then = args[i + 1]; - break; - } - } - } - } - if (then == null && args.length % 2 == 0) { - // then = elsePart - then = args[args.length - 1]; - } - Value v = then == null ? ValueNull.INSTANCE : then.getValue(session); - result = v.convertTo(type, database.getMode(), null); - break; - } - case ARRAY_GET: { - Value[] list = getArray(v0); - if (list != null) { - Value v1 = getNullOrValue(session, args, values, 1); - int element = v1.getInt(); - if (element < 1 || element > list.length) { - result = ValueNull.INSTANCE; - } else { - result = list[element - 1]; - } - } else { - result = ValueNull.INSTANCE; - } - break; - } - case ARRAY_LENGTH: { - Value[] list = getArray(v0); - if (list != null) { - result = ValueInt.get(list.length); - } else { - result = ValueNull.INSTANCE; - } - break; - } - case ARRAY_CONTAINS: { - result = ValueBoolean.FALSE; - Value[] list = getArray(v0); - if (list != null) { - Value v1 = getNullOrValue(session, args, values, 1); - for (Value v : list) { - if (database.areEqual(v, v1)) { - result = ValueBoolean.TRUE; - break; - } - } - } else { - result = ValueNull.INSTANCE; - } - break; - } - case CANCEL_SESSION: { - result = ValueBoolean.get(cancelStatement(session, v0.getInt())); - break; - } - case TRANSACTION_ID: { - result = session.getTransactionId(); - break; - } - default: - result = null; - } - return result; - } - - private static Value[] getArray(Value v0) { - int t = v0.getValueType(); - Value[] list; - if (t == Value.ARRAY || t == Value.ROW) { - list = ((ValueCollectionBase) v0).getList(); - } else { - list = null; - } - return list; - } - - private static boolean cancelStatement(Session session, int targetSessionId) { - session.getUser().checkAdmin(); - Session[] sessions = session.getDatabase().getSessions(false); - for (Session s : sessions) { - if (s.getId() == targetSessionId) { - Command c = s.getCurrentCommand(); - if (c == null) { - return false; - } - c.cancel(); - return true; - } - } - return false; - } - - private static long getDiskSpaceUsed(Session session, Value tableName) { - return getTable(session, tableName).getDiskSpaceUsed(); - } - - private static Value getEstimatedEnvelope(Session session, Value tableName, Value columnName) { - Table table = getTable(session, tableName); - Column column = table.getColumn(columnName.getString()); - ArrayList indexes = table.getIndexes(); - if (indexes != null) { - for (int i = 1, size = indexes.size(); i < size; i++) { - Index index = indexes.get(i); - if (index instanceof MVSpatialIndex && index.isFirstColumn(column)) { - return ((MVSpatialIndex) index).getEstimatedBounds(session); - } - } - } - return ValueNull.INSTANCE; - } - - private static Table getTable(Session session, Value tableName) { - return new Parser(session).parseTableName(tableName.getString()); - } - - /** - * Get value transformed by expression, or null if i is out of range or - * the input value is null. - * - * @param session database session - * @param args expressions - * @param values array of input values - * @param i index of value of transform - * @return value or null - */ - protected static Value getNullOrValue(Session session, Expression[] args, - Value[] values, int i) { - if (i >= args.length) { - return null; - } - Value v = values[i]; - if (v == null) { - Expression e = args[i]; - if (e == null) { - return null; - } - v = values[i] = e.getValue(session); - } - return v; - } - - /** - * Return the resulting value for the given expression arguments. - * - * @param session the session - * @param args argument expressions - * @return the result - */ - protected Value getValueWithArgs(Session session, Expression[] args) { - Value[] values = new Value[args.length]; - if (info.nullIfParameterIsNull) { - for (int i = 0; i < args.length; i++) { - Expression e = args[i]; - Value v = e.getValue(session); - if (v == ValueNull.INSTANCE) { - return ValueNull.INSTANCE; - } - values[i] = v; - } - } - Value v0 = getNullOrValue(session, args, values, 0); - Value resultSimple = getSimpleValue(session, v0, args, values); - if (resultSimple != null) { - return resultSimple; - } - Value v1 = getNullOrValue(session, args, values, 1); - Value v2 = getNullOrValue(session, args, values, 2); - Value v3 = getNullOrValue(session, args, values, 3); - Value v4 = getNullOrValue(session, args, values, 4); - Value v5 = getNullOrValue(session, args, values, 5); - Value result; - switch (info.type) { - case ATAN2: - result = ValueDouble.get( - Math.atan2(v0.getDouble(), v1.getDouble())); - break; - case BITAND: - result = ValueLong.get(v0.getLong() & v1.getLong()); - break; - case BITGET: - result = ValueBoolean.get((v0.getLong() & (1L << v1.getInt())) != 0); - break; - case BITOR: - result = ValueLong.get(v0.getLong() | v1.getLong()); - break; - case BITXOR: - result = ValueLong.get(v0.getLong() ^ v1.getLong()); - break; - case MOD: { - long x = v1.getLong(); - if (x == 0) { - throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getSQL(false)); - } - result = ValueLong.get(v0.getLong() % x); - break; - } - case POWER: - result = ValueDouble.get(Math.pow( - v0.getDouble(), v1.getDouble())); - break; - case ROUND: { - double f = v1 == null ? 1. : Math.pow(10., v1.getDouble()); - - double middleResult = v0.getDouble() * f; - - int oneWithSymbol = middleResult > 0 ? 1 : -1; - result = ValueDouble.get(Math.round(Math.abs(middleResult)) / f * oneWithSymbol); - break; - } - case TRUNCATE: { - if (v0.getValueType() == Value.TIMESTAMP) { - result = ValueTimestamp.fromDateValueAndNanos(((ValueTimestamp) v0).getDateValue(), 0); - } else if (v0.getValueType() == Value.DATE) { - result = ValueTimestamp.fromDateValueAndNanos(((ValueDate) v0).getDateValue(), 0); - } else if (v0.getValueType() == Value.TIMESTAMP_TZ) { - ValueTimestampTimeZone ts = (ValueTimestampTimeZone) v0; - result = ValueTimestampTimeZone.fromDateValueAndNanos(ts.getDateValue(), 0, - ts.getTimeZoneOffsetMins()); - } else if (v0.getValueType() == Value.STRING) { - ValueTimestamp ts = ValueTimestamp.parse(v0.getString(), session.getDatabase().getMode()); - result = ValueTimestamp.fromDateValueAndNanos(ts.getDateValue(), 0); - } else { - double d = v0.getDouble(); - int p = v1 == null ? 0 : v1.getInt(); - double f = Math.pow(10., p); - double g = d * f; - result = ValueDouble.get(((d < 0) ? Math.ceil(g) : Math.floor(g)) / f); - } - break; - } - case HASH: - result = getHash(v0.getString(), v1, v2 == null ? 1 : v2.getInt()); - break; - case ENCRYPT: - result = ValueBytes.getNoCopy(encrypt(v0.getString(), - v1.getBytesNoCopy(), v2.getBytesNoCopy())); - break; - case DECRYPT: - result = ValueBytes.getNoCopy(decrypt(v0.getString(), - v1.getBytesNoCopy(), v2.getBytesNoCopy())); - break; - case COMPRESS: { - String algorithm = null; - if (v1 != null) { - algorithm = v1.getString(); - } - result = ValueBytes.getNoCopy(CompressTool.getInstance(). - compress(v0.getBytesNoCopy(), algorithm)); - break; - } - case ORA_HASH: - result = oraHash(v0, - v1 == null ? 0xffff_ffffL : v1.getLong(), - v2 == null ? 0L : v2.getLong()); - break; - case DIFFERENCE: - result = ValueInt.get(getDifference( - v0.getString(), v1.getString())); - break; - case INSERT: { - if (v1 == ValueNull.INSTANCE || v2 == ValueNull.INSTANCE) { - result = v1; - } else { - result = ValueString.get(insert(v0.getString(), - v1.getInt(), v2.getInt(), v3.getString()), - database.getMode().treatEmptyStringsAsNull); - } - break; - } - case LEFT: - result = ValueString.get(left(v0.getString(), v1.getInt()), - database.getMode().treatEmptyStringsAsNull); - break; - case LOCATE: { - int start = v2 == null ? 0 : v2.getInt(); - result = ValueInt.get(locate(v0.getString(), v1.getString(), start)); - break; - } - case INSTR: { - int start = v2 == null ? 0 : v2.getInt(); - result = ValueInt.get(locate(v1.getString(), v0.getString(), start)); - break; - } - case REPEAT: { - int count = Math.max(0, v1.getInt()); - result = ValueString.get(repeat(v0.getString(), count), - database.getMode().treatEmptyStringsAsNull); - break; - } - case REPLACE: { - if (v0 == ValueNull.INSTANCE || v1 == ValueNull.INSTANCE - || v2 == ValueNull.INSTANCE && database.getMode().getEnum() != Mode.ModeEnum.Oracle) { - result = ValueNull.INSTANCE; - } else { - String s0 = v0.getString(); - String s1 = v1.getString(); - String s2 = (v2 == null) ? "" : v2.getString(); - if (s2 == null) { - s2 = ""; - } - result = ValueString.get(StringUtils.replaceAll(s0, s1, s2), - database.getMode().treatEmptyStringsAsNull); - } - break; - } - case RIGHT: - result = ValueString.get(right(v0.getString(), v1.getInt()), - database.getMode().treatEmptyStringsAsNull); - break; - case LTRIM: - result = ValueString.get(StringUtils.trim(v0.getString(), - true, false, v1 == null ? " " : v1.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case TRIM: - result = ValueString.get(StringUtils.trim(v0.getString(), - (flags & TRIM_LEADING) != 0, (flags & TRIM_TRAILING) != 0, v1 == null ? " " : v1.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case RTRIM: - result = ValueString.get(StringUtils.trim(v0.getString(), - false, true, v1 == null ? " " : v1.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case SUBSTR: - case SUBSTRING: { - String s = v0.getString(); - int offset = v1.getInt(); - if (offset < 0) { - offset = s.length() + offset + 1; - } - int length = v2 == null ? s.length() : v2.getInt(); - result = ValueString.get(substring(s, offset, length), - database.getMode().treatEmptyStringsAsNull); - break; - } - case POSITION: - result = ValueInt.get(locate(v0.getString(), v1.getString(), 0)); - break; - case XMLATTR: - result = ValueString.get( - StringUtils.xmlAttr(v0.getString(), v1.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case XMLNODE: { - String attr = v1 == null ? - null : v1 == ValueNull.INSTANCE ? null : v1.getString(); - String content = v2 == null ? - null : v2 == ValueNull.INSTANCE ? null : v2.getString(); - boolean indent = v3 == null ? - true : v3.getBoolean(); - result = ValueString.get(StringUtils.xmlNode( - v0.getString(), attr, content, indent), - database.getMode().treatEmptyStringsAsNull); - break; - } - case REGEXP_REPLACE: { - String input = v0.getString(); - String regexp = v1.getString(); - String replacement = v2.getString(); - String regexpMode = v3 != null ? v3.getString() : null; - result = regexpReplace(input, regexp, replacement, regexpMode); - break; - } - case RPAD: - result = ValueString.get(StringUtils.pad(v0.getString(), - v1.getInt(), v2 == null ? null : v2.getString(), true), - database.getMode().treatEmptyStringsAsNull); - break; - case LPAD: - result = ValueString.get(StringUtils.pad(v0.getString(), - v1.getInt(), v2 == null ? null : v2.getString(), false), - database.getMode().treatEmptyStringsAsNull); - break; - case TO_CHAR: - switch (v0.getValueType()){ - case Value.TIME: - case Value.DATE: - case Value.TIMESTAMP: - case Value.TIMESTAMP_TZ: - result = ValueString.get(ToChar.toCharDateTime(v0, - v1 == null ? null : v1.getString(), - v2 == null ? null : v2.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case Value.SHORT: - case Value.INT: - case Value.LONG: - case Value.DECIMAL: - case Value.DOUBLE: - case Value.FLOAT: - result = ValueString.get(ToChar.toChar(v0.getBigDecimal(), - v1 == null ? null : v1.getString(), - v2 == null ? null : v2.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - default: - result = ValueString.get(v0.getString(), - database.getMode().treatEmptyStringsAsNull); - } - break; - case TO_DATE: - result = ToDateParser.toDate(session, v0.getString(), v1 == null ? null : v1.getString()); - break; - case TO_TIMESTAMP: - result = ToDateParser.toTimestamp(session, v0.getString(), v1 == null ? null : v1.getString()); - break; - case ADD_MONTHS: - result = DateTimeFunctions.dateadd("MONTH", v1.getInt(), v0); - break; - case TO_TIMESTAMP_TZ: - result = ToDateParser.toTimestampTz(session, v0.getString(), v1 == null ? null : v1.getString()); - break; - case TRANSLATE: { - String matching = v1.getString(); - String replacement = v2.getString(); - result = ValueString.get( - translate(v0.getString(), matching, replacement), - database.getMode().treatEmptyStringsAsNull); - break; - } - case H2VERSION: - result = ValueString.get(Constants.getVersion(), - database.getMode().treatEmptyStringsAsNull); - break; - case DATE_ADD: - result = DateTimeFunctions.dateadd(v0.getString(), v1.getLong(), v2); - break; - case DATE_DIFF: - result = ValueLong.get(DateTimeFunctions.datediff(v0.getString(), v1, v2)); - break; - case DATE_TRUNC: - result = DateTimeFunctions.truncateDate(v0.getString(), v1); - break; - case EXTRACT: - result = DateTimeFunctions.extract(v0.getString(), v1, database.getMode()); - break; - case FORMATDATETIME: { - if (v0 == ValueNull.INSTANCE || v1 == ValueNull.INSTANCE) { - result = ValueNull.INSTANCE; - } else { - String locale = v2 == null ? - null : v2 == ValueNull.INSTANCE ? null : v2.getString(); - String tz = v3 == null ? - null : v3 == ValueNull.INSTANCE ? null : v3.getString(); - if (v0 instanceof ValueTimestampTimeZone) { - tz = DateTimeUtils.timeZoneNameFromOffsetMins( - ((ValueTimestampTimeZone) v0).getTimeZoneOffsetMins()); - } - result = ValueString.get(DateTimeFunctions.formatDateTime( - v0.getTimestamp(), v1.getString(), locale, tz), - database.getMode().treatEmptyStringsAsNull); - } - break; - } - case PARSEDATETIME: { - if (v0 == ValueNull.INSTANCE || v1 == ValueNull.INSTANCE) { - result = ValueNull.INSTANCE; - } else { - String locale = v2 == null ? - null : v2 == ValueNull.INSTANCE ? null : v2.getString(); - String tz = v3 == null ? - null : v3 == ValueNull.INSTANCE ? null : v3.getString(); - java.util.Date d = DateTimeFunctions.parseDateTime( - v0.getString(), v1.getString(), locale, tz); - result = ValueTimestamp.fromMillis(d.getTime()); - } - break; - } - case NULLIF: - result = database.areEqual(v0, v1) ? ValueNull.INSTANCE : v0; - break; - // system - case NEXTVAL: { - Sequence sequence = getSequence(session, v0, v1); - SequenceValue value = new SequenceValue(sequence); - result = value.getValue(session); - break; - } - case CURRVAL: { - Sequence sequence = getSequence(session, v0, v1); - result = ValueLong.get(sequence.getCurrentValue()); - break; - } - case CSVREAD: { - String fileName = v0.getString(); - String columnList = v1 == null ? null : v1.getString(); - Csv csv = new Csv(); - String options = v2 == null ? null : v2.getString(); - String charset = null; - if (options != null && options.indexOf('=') >= 0) { - charset = csv.setOptions(options); - } else { - charset = options; - String fieldSeparatorRead = v3 == null ? null : v3.getString(); - String fieldDelimiter = v4 == null ? null : v4.getString(); - String escapeCharacter = v5 == null ? null : v5.getString(); - Value v6 = getNullOrValue(session, args, values, 6); - String nullString = v6 == null ? null : v6.getString(); - setCsvDelimiterEscape(csv, fieldSeparatorRead, fieldDelimiter, - escapeCharacter); - csv.setNullString(nullString); - } - char fieldSeparator = csv.getFieldSeparatorRead(); - String[] columns = StringUtils.arraySplit(columnList, - fieldSeparator, true); - try { - result = ValueResultSet.get(session, csv.read(fileName, columns, charset), Integer.MAX_VALUE); - } catch (SQLException e) { - throw DbException.convert(e); - } - break; - } - case ARRAY_CONCAT: { - final ValueArray array = (ValueArray) v0.convertTo(Value.ARRAY); - final ValueArray array2 = (ValueArray) v1.convertTo(Value.ARRAY); - if (!array.getComponentType().equals(array2.getComponentType())) - throw DbException.get(ErrorCode.GENERAL_ERROR_1, "Expected component type " + array.getComponentType() - + " but got " + array2.getComponentType()); - final Value[] res = Arrays.copyOf(array.getList(), array.getList().length + array2.getList().length); - System.arraycopy(array2.getList(), 0, res, array.getList().length, array2.getList().length); - result = ValueArray.get(array.getComponentType(), res); - break; - } - case ARRAY_APPEND: { - final ValueArray array = (ValueArray) v0.convertTo(Value.ARRAY); - if (v1 != ValueNull.INSTANCE && array.getComponentType() != Object.class - && !array.getComponentType().isInstance(v1.getObject())) - throw DbException.get(ErrorCode.GENERAL_ERROR_1, - "Expected component type " + array.getComponentType() + " but got " + v1.getClass()); - final Value[] res = Arrays.copyOf(array.getList(), array.getList().length + 1); - res[array.getList().length] = v1; - result = ValueArray.get(array.getComponentType(), res); - break; - } - case ARRAY_SLICE: { - result = null; - final ValueArray array = (ValueArray) v0.convertTo(Value.ARRAY); - // SQL is 1-based - int index1 = v1.getInt() - 1; - // 1-based and inclusive as postgreSQL (-1+1) - int index2 = v2.getInt(); - // https://www.postgresql.org/docs/current/arrays.html#ARRAYS-ACCESSING - // For historical reasons postgreSQL ignore invalid indexes - final boolean isPG = database.getMode().getEnum() == ModeEnum.PostgreSQL; - if (index1 > index2) { - if (isPG) - result = ValueArray.get(array.getComponentType(), new Value[0]); - else - result = ValueNull.INSTANCE; - } else { - if (index1 < 0) { - if (isPG) - index1 = 0; - else - result = ValueNull.INSTANCE; - } - if (index2 > array.getList().length) { - if (isPG) - index2 = array.getList().length; - else - result = ValueNull.INSTANCE; - } - } - if (result == null) - result = ValueArray.get(array.getComponentType(), Arrays.copyOfRange(array.getList(), index1, index2)); - break; - } - case LINK_SCHEMA: { - session.getUser().checkAdmin(); - Connection conn = session.createConnection(false); - ResultSet rs = LinkSchema.linkSchema(conn, v0.getString(), - v1.getString(), v2.getString(), v3.getString(), - v4.getString(), v5.getString()); - result = ValueResultSet.get(session, rs, Integer.MAX_VALUE); - break; - } - case CSVWRITE: { - session.getUser().checkAdmin(); - Connection conn = session.createConnection(false); - Csv csv = new Csv(); - String options = v2 == null ? null : v2.getString(); - String charset = null; - if (options != null && options.indexOf('=') >= 0) { - charset = csv.setOptions(options); - } else { - charset = options; - String fieldSeparatorWrite = v3 == null ? null : v3.getString(); - String fieldDelimiter = v4 == null ? null : v4.getString(); - String escapeCharacter = v5 == null ? null : v5.getString(); - Value v6 = getNullOrValue(session, args, values, 6); - String nullString = v6 == null ? null : v6.getString(); - Value v7 = getNullOrValue(session, args, values, 7); - String lineSeparator = v7 == null ? null : v7.getString(); - setCsvDelimiterEscape(csv, fieldSeparatorWrite, fieldDelimiter, - escapeCharacter); - csv.setNullString(nullString); - if (lineSeparator != null) { - csv.setLineSeparator(lineSeparator); - } - } - try { - int rows = csv.write(conn, v0.getString(), v1.getString(), - charset); - result = ValueInt.get(rows); - } catch (SQLException e) { - throw DbException.convert(e); - } - break; - } - case SET: { - Variable var = (Variable) args[0]; - session.setVariable(var.getName(), v1); - result = v1; - break; - } - case FILE_READ: { - session.getUser().checkAdmin(); - String fileName = v0.getString(); - boolean blob = args.length == 1; - try { - long fileLength = FileUtils.size(fileName); - final InputStream in = FileUtils.newInputStream(fileName); - try { - if (blob) { - result = database.getLobStorage().createBlob(in, fileLength); - } else { - Reader reader; - if (v1 == ValueNull.INSTANCE) { - reader = new InputStreamReader(in); - } else { - reader = new InputStreamReader(in, v1.getString()); - } - result = database.getLobStorage().createClob(reader, fileLength); - } - } finally { - IOUtils.closeSilently(in); - } - session.addTemporaryLob(result); - } catch (IOException e) { - throw DbException.convertIOException(e, fileName); - } - break; - } - case FILE_WRITE: { - session.getUser().checkAdmin(); - result = ValueNull.INSTANCE; - String fileName = v1.getString(); - try { - FileOutputStream fileOutputStream = new FileOutputStream(fileName); - try (InputStream in = v0.getInputStream()) { - result = ValueLong.get(IOUtils.copyAndClose(in, - fileOutputStream)); - } - } catch (IOException e) { - throw DbException.convertIOException(e, fileName); - } - break; - } - case TRUNCATE_VALUE: { - result = v0.convertPrecision(v1.getLong(), v2.getBoolean()); - break; - } - case XMLTEXT: - if (v1 == null) { - result = ValueString.get(StringUtils.xmlText( - v0.getString()), - database.getMode().treatEmptyStringsAsNull); - } else { - result = ValueString.get(StringUtils.xmlText( - v0.getString(), v1.getBoolean()), - database.getMode().treatEmptyStringsAsNull); - } - break; - case REGEXP_LIKE: { - String regexp = v1.getString(); - String regexpMode = v2 != null ? v2.getString() : null; - int flags = makeRegexpFlags(regexpMode, false); - try { - result = ValueBoolean.get(Pattern.compile(regexp, flags) - .matcher(v0.getString()).find()); - } catch (PatternSyntaxException e) { - throw DbException.get(ErrorCode.LIKE_ESCAPE_ERROR_1, e, regexp); - } - break; - } - case VALUES: { - Expression a0 = args[0]; - StringBuilder builder = new StringBuilder(); - Parser.quoteIdentifier(builder, a0.getSchemaName(), true).append('.'); - Parser.quoteIdentifier(builder, a0.getTableName(), true).append('.'); - Parser.quoteIdentifier(builder, a0.getColumnName(), true); - result = session.getVariable(builder.toString()); - break; - } - case SIGNAL: { - String sqlState = v0.getString(); - if (sqlState.startsWith("00") || !SIGNAL_PATTERN.matcher(sqlState).matches()) { - throw DbException.getInvalidValueException("SQLSTATE", sqlState); - } - String msgText = v1.getString(); - throw DbException.fromUser(sqlState, msgText); - } - default: - throw DbException.throwInternalError("type=" + info.type); - } - return result; - } - - private Sequence getSequence(Session session, Value v0, Value v1) { - String schemaName, sequenceName; - if (v1 == null) { - Parser p = new Parser(session); - String sql = v0.getString(); - Expression expr = p.parseExpression(sql); - if (expr instanceof ExpressionColumn) { - ExpressionColumn seq = (ExpressionColumn) expr; - schemaName = seq.getOriginalTableAliasName(); - if (schemaName == null) { - schemaName = session.getCurrentSchemaName(); - sequenceName = sql; - } else { - sequenceName = seq.getColumnName(); - } - } else { - throw DbException.getSyntaxError(sql, 1); - } - } else { - schemaName = v0.getString(); - sequenceName = v1.getString(); - } - Schema s = database.findSchema(schemaName); - if (s == null) { - schemaName = StringUtils.toUpperEnglish(schemaName); - s = database.getSchema(schemaName); - } - Sequence seq = s.findSequence(sequenceName); - if (seq == null) { - sequenceName = StringUtils.toUpperEnglish(sequenceName); - seq = s.getSequence(sequenceName); - } - return seq; - } - - private static long length(Value v) { - switch (v.getValueType()) { - case Value.BLOB: - case Value.CLOB: - case Value.BYTES: - case Value.JAVA_OBJECT: - return v.getType().getPrecision(); - default: - return v.getString().length(); - } - } - - private static byte[] getPaddedArrayCopy(byte[] data, int blockSize) { - int size = MathUtils.roundUpInt(data.length, blockSize); - return Utils.copyBytes(data, size); - } - - private static byte[] decrypt(String algorithm, byte[] key, byte[] data) { - BlockCipher cipher = CipherFactory.getBlockCipher(algorithm); - byte[] newKey = getPaddedArrayCopy(key, cipher.getKeyLength()); - cipher.setKey(newKey); - byte[] newData = getPaddedArrayCopy(data, BlockCipher.ALIGN); - cipher.decrypt(newData, 0, newData.length); - return newData; - } - - private static byte[] encrypt(String algorithm, byte[] key, byte[] data) { - BlockCipher cipher = CipherFactory.getBlockCipher(algorithm); - byte[] newKey = getPaddedArrayCopy(key, cipher.getKeyLength()); - cipher.setKey(newKey); - byte[] newData = getPaddedArrayCopy(data, BlockCipher.ALIGN); - cipher.encrypt(newData, 0, newData.length); - return newData; - } - - private static Value getHash(String algorithm, Value value, int iterations) { - if (!"SHA256".equalsIgnoreCase(algorithm)) { - throw DbException.getInvalidValueException("algorithm", algorithm); - } - if (iterations <= 0) { - throw DbException.getInvalidValueException("iterations", iterations); - } - MessageDigest md = hashImpl(value, "SHA-256"); - if (md == null) { - return ValueNull.INSTANCE; - } - byte[] b = md.digest(); - for (int i = 1; i < iterations; i++) { - b = md.digest(b); - } - return ValueBytes.getNoCopy(b); - } - - private static String substring(String s, int start, int length) { - int len = s.length(); - start--; - if (start < 0) { - start = 0; - } - if (length < 0) { - length = 0; - } - start = (start > len) ? len : start; - if (start + length > len) { - length = len - start; - } - return s.substring(start, start + length); - } - - private static String repeat(String s, int count) { - StringBuilder buff = new StringBuilder(s.length() * count); - while (count-- > 0) { - buff.append(s); - } - return buff.toString(); - } - - private static String rawToHex(String s) { - int length = s.length(); - StringBuilder buff = new StringBuilder(4 * length); - for (int i = 0; i < length; i++) { - String hex = Integer.toHexString(s.charAt(i) & 0xffff); - for (int j = hex.length(); j < 4; j++) { - buff.append('0'); - } - buff.append(hex); - } - return buff.toString(); - } - - private static int locate(String search, String s, int start) { - if (start < 0) { - int i = s.length() + start; - return s.lastIndexOf(search, i) + 1; - } - int i = (start == 0) ? 0 : start - 1; - return s.indexOf(search, i) + 1; - } - - private static String right(String s, int count) { - if (count < 0) { - count = 0; - } else if (count > s.length()) { - count = s.length(); - } - return s.substring(s.length() - count); - } - - private static String left(String s, int count) { - if (count < 0) { - count = 0; - } else if (count > s.length()) { - count = s.length(); - } - return s.substring(0, count); - } - - private static String insert(String s1, int start, int length, String s2) { - if (s1 == null) { - return s2; - } - if (s2 == null) { - return s1; - } - int len1 = s1.length(); - int len2 = s2.length(); - start--; - if (start < 0 || length <= 0 || len2 == 0 || start > len1) { - return s1; - } - if (start + length > len1) { - length = len1 - start; - } - return s1.substring(0, start) + s2 + s1.substring(start + length); - } - - private static String hexToRaw(String s) { - // TODO function hextoraw compatibility with oracle - int len = s.length(); - if (len % 4 != 0) { - throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, s); - } - StringBuilder buff = new StringBuilder(len / 4); - for (int i = 0; i < len; i += 4) { - try { - char raw = (char) Integer.parseInt(s.substring(i, i + 4), 16); - buff.append(raw); - } catch (NumberFormatException e) { - throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, s); - } - } - return buff.toString(); - } - - private static int getDifference(String s1, String s2) { - // TODO function difference: compatibility with SQL Server and HSQLDB - s1 = getSoundex(s1); - s2 = getSoundex(s2); - int e = 0; - for (int i = 0; i < 4; i++) { - if (s1.charAt(i) == s2.charAt(i)) { - e++; - } - } - return e; - } - - private static String translate(String original, String findChars, - String replaceChars) { - if (StringUtils.isNullOrEmpty(original) || - StringUtils.isNullOrEmpty(findChars)) { - return original; - } - // if it stays null, then no replacements have been made - StringBuilder buff = null; - // if shorter than findChars, then characters are removed - // (if null, we don't access replaceChars at all) - int replaceSize = replaceChars == null ? 0 : replaceChars.length(); - for (int i = 0, size = original.length(); i < size; i++) { - char ch = original.charAt(i); - int index = findChars.indexOf(ch); - if (index >= 0) { - if (buff == null) { - buff = new StringBuilder(size); - if (i > 0) { - buff.append(original, 0, i); - } - } - if (index < replaceSize) { - ch = replaceChars.charAt(index); - } - } - if (buff != null) { - buff.append(ch); - } - } - return buff == null ? original : buff.toString(); - } - - private static double roundMagic(double d) { - if ((d < 0.000_000_000_000_1) && (d > -0.000_000_000_000_1)) { - return 0.0; - } - if ((d > 1_000_000_000_000d) || (d < -1_000_000_000_000d)) { - return d; - } - StringBuilder s = new StringBuilder(); - s.append(d); - if (s.toString().indexOf('E') >= 0) { - return d; - } - int len = s.length(); - if (len < 16) { - return d; - } - if (s.toString().indexOf('.') > len - 3) { - return d; - } - s.delete(len - 2, len); - len -= 2; - char c1 = s.charAt(len - 2); - char c2 = s.charAt(len - 3); - char c3 = s.charAt(len - 4); - if ((c1 == '0') && (c2 == '0') && (c3 == '0')) { - s.setCharAt(len - 1, '0'); - } else if ((c1 == '9') && (c2 == '9') && (c3 == '9')) { - s.setCharAt(len - 1, '9'); - s.append('9'); - s.append('9'); - s.append('9'); - } - return Double.parseDouble(s.toString()); - } - - private static String getSoundex(String s) { - int len = s.length(); - char[] chars = { '0', '0', '0', '0' }; - char lastDigit = '0'; - for (int i = 0, j = 0; i < len && j < 4; i++) { - char c = s.charAt(i); - char newDigit = c > SOUNDEX_INDEX.length ? - 0 : SOUNDEX_INDEX[c]; - if (newDigit != 0) { - if (j == 0) { - chars[j++] = c; - lastDigit = newDigit; - } else if (newDigit <= '6') { - if (newDigit != lastDigit) { - chars[j++] = newDigit; - lastDigit = newDigit; - } - } else if (newDigit == '7') { - lastDigit = newDigit; - } - } - } - return new String(chars); - } - - private static Value oraHash(Value value, long bucket, long seed) { - if ((bucket & 0xffff_ffff_0000_0000L) != 0L) { - throw DbException.getInvalidValueException("bucket", bucket); - } - if ((seed & 0xffff_ffff_0000_0000L) != 0L) { - throw DbException.getInvalidValueException("seed", seed); - } - MessageDigest md = hashImpl(value, "SHA-1"); - if (md == null) { - return ValueNull.INSTANCE; - } - if (seed != 0L) { - byte[] b = new byte[4]; - Bits.writeInt(b, 0, (int) seed); - md.update(b); - } - long hc = Bits.readLong(md.digest(), 0); - // Strip sign and use modulo operation to get value from 0 to bucket inclusive - return ValueLong.get((hc & Long.MAX_VALUE) % (bucket + 1)); - } - - private static MessageDigest hashImpl(Value value, String algorithm) { - MessageDigest md; - switch (value.getValueType()) { - case Value.NULL: - return null; - case Value.STRING: - case Value.STRING_FIXED: - case Value.STRING_IGNORECASE: - try { - md = MessageDigest.getInstance(algorithm); - md.update(value.getString().getBytes(StandardCharsets.UTF_8)); - } catch (Exception ex) { - throw DbException.convert(ex); - } - break; - case Value.BLOB: - case Value.CLOB: - try { - md = MessageDigest.getInstance(algorithm); - byte[] buf = new byte[4096]; - try (InputStream is = value.getInputStream()) { - for (int r; (r = is.read(buf)) > 0; ) { - md.update(buf, 0, r); - } - } - } catch (Exception ex) { - throw DbException.convert(ex); - } - break; - default: - try { - md = MessageDigest.getInstance(algorithm); - md.update(value.getBytesNoCopy()); - } catch (Exception ex) { - throw DbException.convert(ex); - } - } - return md; - } - - private Value regexpReplace(String input, String regexp, String replacement, String regexpMode) { - Mode mode = database.getMode(); - if (mode.regexpReplaceBackslashReferences) { - if ((replacement.indexOf('\\') >= 0) || (replacement.indexOf('$') >= 0)) { - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < replacement.length(); i++) { - char c = replacement.charAt(i); - if (c == '$') { - sb.append('\\'); - } else if (c == '\\' && ++i < replacement.length()) { - c = replacement.charAt(i); - sb.append(c >= '0' && c <= '9' ? '$' : '\\'); - } - sb.append(c); - } - replacement = sb.toString(); - } - } - boolean isInPostgreSqlMode = Mode.ModeEnum.PostgreSQL.equals(mode.getEnum()); - int flags = makeRegexpFlags(regexpMode, isInPostgreSqlMode); - try { - Matcher matcher = Pattern.compile(regexp, flags).matcher(input); - return ValueString.get(isInPostgreSqlMode && (regexpMode == null || regexpMode.indexOf('g') < 0) ? - matcher.replaceFirst(replacement) : matcher.replaceAll(replacement), - mode.treatEmptyStringsAsNull); - } catch (PatternSyntaxException e) { - throw DbException.get(ErrorCode.LIKE_ESCAPE_ERROR_1, e, regexp); - } catch (StringIndexOutOfBoundsException | IllegalArgumentException e) { - throw DbException.get(ErrorCode.LIKE_ESCAPE_ERROR_1, e, replacement); - } - } - - private static int makeRegexpFlags(String stringFlags, boolean ignoreGlobalFlag) { - int flags = Pattern.UNICODE_CASE; - if (stringFlags != null) { - for (int i = 0; i < stringFlags.length(); ++i) { - switch (stringFlags.charAt(i)) { - case 'i': - flags |= Pattern.CASE_INSENSITIVE; - break; - case 'c': - flags &= ~Pattern.CASE_INSENSITIVE; - break; - case 'n': - flags |= Pattern.DOTALL; - break; - case 'm': - flags |= Pattern.MULTILINE; - break; - case 'g': - if (ignoreGlobalFlag) { - break; - } - //$FALL-THROUGH$ - default: - throw DbException.get(ErrorCode.INVALID_VALUE_2, stringFlags); - } - } - } - return flags; - } - - @Override - public TypeInfo getType() { - return type; - } - - @Override - public int getValueType() { - return type.getValueType(); - } - - @Override - public void mapColumns(ColumnResolver resolver, int level, int state) { - for (Expression e : args) { - if (e != null) { - e.mapColumns(resolver, level, state); - } - } - } - - /** - * Check if the parameter count is correct. - * - * @param len the number of parameters set - * @throws DbException if the parameter count is incorrect - */ - protected void checkParameterCount(int len) { - int min = 0, max = Integer.MAX_VALUE; - switch (info.type) { - case COALESCE: - case CSVREAD: - case LEAST: - case GREATEST: - min = 1; - break; - case CURRENT_TIME: - case LOCALTIME: - case CURRENT_TIMESTAMP: - case LOCALTIMESTAMP: - case RAND: - max = 1; - break; - case COMPRESS: - case LTRIM: - case RTRIM: - case TRIM: - case FILE_READ: - case ROUND: - case XMLTEXT: - case TRUNCATE: - case TO_TIMESTAMP: - case TO_TIMESTAMP_TZ: - min = 1; - max = 2; - break; - case DATE_TRUNC: - min = 2; - max = 2; - break; - case TO_CHAR: - case TO_DATE: - min = 1; - max = 3; - break; - case ORA_HASH: - min = 1; - max = 3; - break; - case HASH: - case REPLACE: - case LOCATE: - case INSTR: - case SUBSTR: - case SUBSTRING: - case LPAD: - case RPAD: - min = 2; - max = 3; - break; - case CONCAT: - case CONCAT_WS: - case CSVWRITE: - min = 2; - break; - case XMLNODE: - min = 1; - max = 4; - break; - case FORMATDATETIME: - case PARSEDATETIME: - min = 2; - max = 4; - break; - case CURRVAL: - case NEXTVAL: - min = 1; - max = 2; - break; - case DECODE: - case CASE: - min = 3; - break; - case REGEXP_REPLACE: - min = 3; - max = 4; - break; - case REGEXP_LIKE: - min = 2; - max = 3; - break; - default: - DbException.throwInternalError("type=" + info.type); - } - boolean ok = (len >= min) && (len <= max); - if (!ok) { - throw DbException.get( - ErrorCode.INVALID_PARAMETER_COUNT_2, - info.name, min + ".." + max); - } - } - - /** - * This method is called after all the parameters have been set. - * It checks if the parameter count is correct. - * - * @throws DbException if the parameter count is incorrect. - */ - public void doneWithParameters() { - if (info.parameterCount == VAR_ARGS) { - checkParameterCount(varArgs.size()); - args = varArgs.toArray(new Expression[0]); - varArgs = null; - } else { - int len = args.length; - if (len > 0 && args[len - 1] == null) { - throw DbException.get( - ErrorCode.INVALID_PARAMETER_COUNT_2, - info.name, Integer.toString(len)); - } - } - } - - public void setDataType(Column col) { - TypeInfo type = col.getType(); - this.type = type; - } - - @Override - public Expression optimize(Session session) { - boolean allConst = info.deterministic; - for (int i = 0; i < args.length; i++) { - Expression e = args[i]; - if (e == null) { - continue; - } - e = e.optimize(session); - args[i] = e; - if (!e.isConstant()) { - allConst = false; - } - } - TypeInfo typeInfo; - Expression p0 = args.length < 1 ? null : args[0]; - switch (info.type) { - case DATE_ADD: { - typeInfo = TypeInfo.TYPE_TIMESTAMP; - if (p0.isConstant()) { - Expression p2 = args[2]; - switch (p2.getType().getValueType()) { - case Value.TIME: - typeInfo = TypeInfo.TYPE_TIME; - break; - case Value.DATE: { - int field = DateTimeFunctions.getDatePart(p0.getValue(session).getString()); - switch (field) { - case HOUR: - case MINUTE: - case SECOND: - case EPOCH: - case MILLISECOND: - case MICROSECOND: - case NANOSECOND: - // TIMESTAMP result - break; - default: - type = TypeInfo.TYPE_DATE; - } - break; - } - case Value.TIMESTAMP_TZ: - type = TypeInfo.TYPE_TIMESTAMP_TZ; - } - } - break; - } - case EXTRACT: { - if (p0.isConstant() && DateTimeFunctions.getDatePart(p0.getValue(session).getString()) == Function.EPOCH) { - typeInfo = TypeInfo.getTypeInfo(Value.DECIMAL, ValueLong.PRECISION + ValueTimestamp.MAXIMUM_SCALE, - ValueTimestamp.MAXIMUM_SCALE, null); - } else { - typeInfo = TypeInfo.TYPE_INT; - } - break; - } - case DATE_TRUNC: - typeInfo = args[1].getType(); - // TODO set scale when possible - if (typeInfo.getValueType() != Value.TIMESTAMP_TZ) { - typeInfo = TypeInfo.TYPE_TIMESTAMP; - } - break; - case IFNULL: - case NULLIF: - case COALESCE: - case LEAST: - case GREATEST: { - typeInfo = TypeInfo.TYPE_UNKNOWN; - for (Expression e : args) { - if (e != ValueExpression.getNull()) { - TypeInfo type = e.getType(); - int valueType = type.getValueType(); - if (valueType != Value.UNKNOWN && valueType != Value.NULL) { - typeInfo = Value.getHigherType(typeInfo, type); - } - } - } - if (typeInfo.getValueType() == Value.UNKNOWN) { - typeInfo = TypeInfo.TYPE_STRING; - } - break; - } - case CASE: - case DECODE: { - typeInfo = TypeInfo.TYPE_UNKNOWN; - // (expr, when, then) - // (expr, when, then, else) - // (expr, when, then, when, then) - // (expr, when, then, when, then, else) - for (int i = 2, len = args.length; i < len; i += 2) { - Expression then = args[i]; - if (then != ValueExpression.getNull()) { - TypeInfo type = then.getType(); - int valueType = type.getValueType(); - if (valueType != Value.UNKNOWN && valueType != Value.NULL) { - typeInfo = Value.getHigherType(typeInfo, type); - } - } - } - if (args.length % 2 == 0) { - Expression elsePart = args[args.length - 1]; - if (elsePart != ValueExpression.getNull()) { - TypeInfo type = elsePart.getType(); - int valueType = type.getValueType(); - if (valueType != Value.UNKNOWN && valueType != Value.NULL) { - typeInfo = Value.getHigherType(typeInfo, type); - } - } - } - if (typeInfo.getValueType() == Value.UNKNOWN) { - typeInfo = TypeInfo.TYPE_STRING; - } - break; - } - case CASEWHEN: - typeInfo = Value.getHigherType(args[1].getType(), args[2].getType()); - break; - case NVL2: { - TypeInfo t1 = args[1].getType(), t2 = args[2].getType(); - switch (t1.getValueType()) { - case Value.STRING: - case Value.CLOB: - case Value.STRING_FIXED: - case Value.STRING_IGNORECASE: - typeInfo = TypeInfo.getTypeInfo(t1.getValueType(), -1, 0, null); - break; - default: - typeInfo = Value.getHigherType(t1, t2); - break; - } - break; - } - case CAST: - case CONVERT: - case TRUNCATE_VALUE: - if (type != null) { - // data type, precision and scale is already set - typeInfo = type; - } else { - typeInfo = TypeInfo.TYPE_UNKNOWN; - } - break; - case TRUNCATE: - switch (p0.getType().getValueType()) { - case Value.STRING: - case Value.DATE: - case Value.TIMESTAMP: - typeInfo = TypeInfo.getTypeInfo(Value.TIMESTAMP, -1, 0, null); - break; - case Value.TIMESTAMP_TZ: - typeInfo = TypeInfo.getTypeInfo(Value.TIMESTAMP_TZ, -1, 0, null); - break; - default: - typeInfo = TypeInfo.TYPE_DOUBLE; - } - break; - case ABS: - case FLOOR: - case ROUND: { - TypeInfo type = p0.getType(); - typeInfo = type; - if (typeInfo.getValueType() == Value.NULL) { - typeInfo = TypeInfo.TYPE_INT; - } - break; - } - case SET: - typeInfo = args[1].getType(); - if (!(p0 instanceof Variable)) { - throw DbException.get( - ErrorCode.CAN_ONLY_ASSIGN_TO_VARIABLE_1, p0.getSQL(false)); - } - break; - case FILE_READ: { - if (args.length == 1) { - typeInfo = TypeInfo.getTypeInfo(Value.BLOB, Integer.MAX_VALUE, 0, null); - } else { - typeInfo = TypeInfo.getTypeInfo(Value.CLOB, Integer.MAX_VALUE, 0, null); - } - break; - } - case SUBSTRING: - case SUBSTR: { - long p = args[0].getType().getPrecision(); - if (args[1].isConstant()) { - // if only two arguments are used, - // subtract offset from first argument length - p -= args[1].getValue(session).getLong() - 1; - } - if (args.length == 3 && args[2].isConstant()) { - // if the third argument is constant it is at most this value - p = Math.min(p, args[2].getValue(session).getLong()); - } - p = Math.max(0, p); - typeInfo = TypeInfo.getTypeInfo(info.returnDataType, p, 0, null); - break; - } - case ENCRYPT: - case DECRYPT: - typeInfo = TypeInfo.getTypeInfo(info.returnDataType, args[2].getType().getPrecision(), 0, null); - break; - case COMPRESS: - typeInfo = TypeInfo.getTypeInfo(info.returnDataType, args[0].getType().getPrecision(), 0, null); - break; - case CHAR: - typeInfo = TypeInfo.getTypeInfo(info.returnDataType, 1, 0, null); - break; - case CONCAT: { - long p = 0; - for (Expression e : args) { - TypeInfo type = e.getType(); - p += type.getPrecision(); - if (p < 0) { - p = Long.MAX_VALUE; - } - } - typeInfo = TypeInfo.getTypeInfo(info.returnDataType, p, 0, null); - break; - } - case HEXTORAW: - typeInfo = TypeInfo.getTypeInfo(info.returnDataType, (args[0].getType().getPrecision() + 3) / 4, 0, null); - break; - case LCASE: - case LTRIM: - case RIGHT: - case RTRIM: - case UCASE: - case LOWER: - case UPPER: - case TRIM: - case STRINGDECODE: - case UTF8TOSTRING: - typeInfo = TypeInfo.getTypeInfo(info.returnDataType, args[0].getType().getPrecision(), 0, null); - break; - case RAWTOHEX: - typeInfo = TypeInfo.getTypeInfo(info.returnDataType, args[0].getType().getPrecision() * 4, 0, null); - break; - case SOUNDEX: - typeInfo = TypeInfo.getTypeInfo(info.returnDataType, 4, 0, null); - break; - case DAY_NAME: - case MONTH_NAME: - // day and month names may be long in some languages - typeInfo = TypeInfo.getTypeInfo(info.returnDataType, 20, 0, null); - break; - default: - typeInfo = TypeInfo.getTypeInfo(info.returnDataType, -1, -1, null); - } - type = typeInfo; - if (allConst) { - Value v = getValue(session); - if (info.type == CAST || info.type == CONVERT) { - if (v == ValueNull.INSTANCE) { - return this; - } - DataType dt = DataType.getDataType(type.getValueType()); - TypeInfo vt = v.getType(); - if (dt.supportsPrecision && type.getPrecision() != vt.getPrecision() - || dt.supportsScale && type.getScale() != vt.getScale()) { - return this; - } - } - return ValueExpression.get(v); - } - return this; - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - for (Expression e : args) { - if (e != null) { - e.setEvaluatable(tableFilter, b); - } - } - } - - @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - builder.append(info.name); - if (info.type == CASE) { - if (args[0] != null) { - builder.append(' '); - args[0].getSQL(builder, alwaysQuote); - } - for (int i = 1, len = args.length - 1; i < len; i += 2) { - builder.append(" WHEN "); - args[i].getSQL(builder, alwaysQuote); - builder.append(" THEN "); - args[i + 1].getSQL(builder, alwaysQuote); - } - if (args.length % 2 == 0) { - builder.append(" ELSE "); - args[args.length - 1].getSQL(builder, alwaysQuote); - } - return builder.append(" END"); - } - boolean addParentheses = args.length > 0 || info.requireParentheses; - if (addParentheses) { - builder.append('('); - } - switch (info.type) { - case TRIM: { - switch (flags) { - case TRIM_LEADING: - builder.append("LEADING "); - break; - case TRIM_TRAILING: - builder.append("TRAILING "); - break; - } - if (args.length > 1) { - args[1].getSQL(builder, alwaysQuote).append(" FROM "); - } - args[0].getSQL(builder, alwaysQuote); - break; - } - case CAST: { - args[0].getSQL(builder, alwaysQuote).append(" AS ").append(new Column(null, type).getCreateSQL()); - break; - } - case CONVERT: { - if (database.getMode().swapConvertFunctionParameters) { - builder.append(new Column(null, type).getCreateSQL()).append(','); - args[0].getSQL(builder, alwaysQuote); - } else { - args[0].getSQL(builder, alwaysQuote).append(',').append(new Column(null, type).getCreateSQL()); - } - break; - } - case EXTRACT: { - ValueString v = (ValueString) ((ValueExpression) args[0]).getValue(null); - builder.append(v.getString()).append(" FROM "); - args[1].getSQL(builder, alwaysQuote); - break; - } - default: - writeExpressions(builder, args, alwaysQuote); - } - if (addParentheses) { - builder.append(')'); - } - return builder; - } - - @Override - public void updateAggregate(Session session, int stage) { - for (Expression e : args) { - if (e != null) { - e.updateAggregate(session, stage); - } - } - } - - public int getFunctionType() { - return info.type; - } - - @Override - public String getName() { - return info.name; - } - - @Override - public ValueResultSet getValueForColumnList(Session session, - Expression[] argList) { - switch (info.type) { - case CSVREAD: { - String fileName = argList[0].getValue(session).getString(); - if (fileName == null) { - throw DbException.get(ErrorCode.PARAMETER_NOT_SET_1, "fileName"); - } - String columnList = argList.length < 2 ? - null : argList[1].getValue(session).getString(); - Csv csv = new Csv(); - String options = argList.length < 3 ? - null : argList[2].getValue(session).getString(); - String charset = null; - if (options != null && options.indexOf('=') >= 0) { - charset = csv.setOptions(options); - } else { - charset = options; - String fieldSeparatorRead = argList.length < 4 ? - null : argList[3].getValue(session).getString(); - String fieldDelimiter = argList.length < 5 ? - null : argList[4].getValue(session).getString(); - String escapeCharacter = argList.length < 6 ? - null : argList[5].getValue(session).getString(); - setCsvDelimiterEscape(csv, fieldSeparatorRead, fieldDelimiter, - escapeCharacter); - } - char fieldSeparator = csv.getFieldSeparatorRead(); - String[] columns = StringUtils.arraySplit(columnList, fieldSeparator, true); - ResultSet rs = null; - ValueResultSet x; - try { - rs = csv.read(fileName, columns, charset); - x = ValueResultSet.get(session, rs, 0); - } catch (SQLException e) { - throw DbException.convert(e); - } finally { - csv.close(); - JdbcUtils.closeSilently(rs); - } - return x; - } - default: - break; - } - return (ValueResultSet) getValueWithArgs(session, argList); - } - - private static void setCsvDelimiterEscape(Csv csv, String fieldSeparator, - String fieldDelimiter, String escapeCharacter) { - if (fieldSeparator != null) { - csv.setFieldSeparatorWrite(fieldSeparator); - if (!fieldSeparator.isEmpty()) { - char fs = fieldSeparator.charAt(0); - csv.setFieldSeparatorRead(fs); - } - } - if (fieldDelimiter != null) { - char fd = fieldDelimiter.isEmpty() ? 0 : fieldDelimiter.charAt(0); - csv.setFieldDelimiter(fd); - } - if (escapeCharacter != null) { - char ec = escapeCharacter.isEmpty() ? 0 : escapeCharacter.charAt(0); - csv.setEscapeCharacter(ec); - } - } - - @Override - public Expression[] getArgs() { - return args; - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - for (Expression e : args) { - if (e != null && !e.isEverything(visitor)) { - return false; - } - } - switch (visitor.getType()) { - case ExpressionVisitor.DETERMINISTIC: - case ExpressionVisitor.QUERY_COMPARABLE: - case ExpressionVisitor.READONLY: - return info.deterministic; - case ExpressionVisitor.EVALUATABLE: - case ExpressionVisitor.GET_DEPENDENCIES: - case ExpressionVisitor.INDEPENDENT: - case ExpressionVisitor.NOT_FROM_RESOLVER: - case ExpressionVisitor.OPTIMIZABLE_AGGREGATE: - case ExpressionVisitor.SET_MAX_DATA_MODIFICATION_ID: - case ExpressionVisitor.GET_COLUMNS1: - case ExpressionVisitor.GET_COLUMNS2: - return true; - default: - throw DbException.throwInternalError("type=" + visitor.getType()); - } - } - - @Override - public int getCost() { - int cost = 3; - for (Expression e : args) { - if (e != null) { - cost += e.getCost(); - } - } - return cost; - } - - @Override - public boolean isDeterministic() { - return info.deterministic; - } - - @Override - public boolean isBufferResultSetToLocalTemp() { - return info.bufferResultSetToLocalTemp; - } - - @Override - public boolean isGeneratedKey() { - return info.type == NEXTVAL; - } - - @Override - public int getSubexpressionCount() { - return args.length; - } - - @Override - public Expression getSubexpression(int index) { - return args[index]; - } - -} diff --git a/h2/src/main/org/h2/expression/function/Function0_1.java b/h2/src/main/org/h2/expression/function/Function0_1.java new file mode 100644 index 0000000000..a255c6984b --- /dev/null +++ b/h2/src/main/org/h2/expression/function/Function0_1.java @@ -0,0 +1,96 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; + +/** + * Function with one optional argument. + */ +public abstract class Function0_1 extends Expression implements NamedExpression { + + /** + * The argument of the operation. + */ + protected Expression arg; + + /** + * The type of the result. + */ + protected TypeInfo type; + + protected Function0_1(Expression arg) { + this.arg = arg; + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + if (arg != null) { + arg.mapColumns(resolver, level, state); + } + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + if (arg != null) { + arg.setEvaluatable(tableFilter, value); + } + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + if (arg != null) { + arg.updateAggregate(session, stage); + } + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return arg == null || arg.isEverything(visitor); + } + + @Override + public int getCost() { + int cost = 1; + if (arg != null) { + cost += arg.getCost(); + } + return cost; + } + + @Override + public int getSubexpressionCount() { + return arg != null ? 1 : 0; + } + + @Override + public Expression getSubexpression(int index) { + if (index == 0 && arg != null) { + return arg; + } + throw new IndexOutOfBoundsException(); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + builder.append(getName()).append('('); + if (arg != null) { + arg.getUnenclosedSQL(builder, sqlFlags); + } + return builder.append(')'); + } + +} diff --git a/h2/src/main/org/h2/expression/function/Function1.java b/h2/src/main/org/h2/expression/function/Function1.java new file mode 100644 index 0000000000..190113a876 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/Function1.java @@ -0,0 +1,25 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.expression.Expression; +import org.h2.expression.Operation1; + +/** + * Function with one argument. + */ +public abstract class Function1 extends Operation1 implements NamedExpression { + + protected Function1(Expression arg) { + super(arg); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return arg.getUnenclosedSQL(builder.append(getName()).append('('), sqlFlags).append(')'); + } + +} diff --git a/h2/src/main/org/h2/expression/function/Function1_2.java b/h2/src/main/org/h2/expression/function/Function1_2.java new file mode 100644 index 0000000000..75b0d0ec51 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/Function1_2.java @@ -0,0 +1,66 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.Operation1_2; +import org.h2.message.DbException; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * Function with two arguments. + */ +public abstract class Function1_2 extends Operation1_2 implements NamedExpression { + + protected Function1_2(Expression left, Expression right) { + super(left, right); + } + + @Override + public Value getValue(SessionLocal session) { + Value v1 = left.getValue(session); + if (v1 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + Value v2; + if (right != null) { + v2 = right.getValue(session); + if (v2 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + } else { + v2 = null; + } + return getValue(session, v1, v2); + } + + /** + * Returns the value of this function. + * + * @param session + * the session + * @param v1 + * the value of first argument + * @param v2 + * the value of second argument, or {@code null} + * @return the resulting value + */ + protected Value getValue(SessionLocal session, Value v1, Value v2) { + throw DbException.getInternalError(); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + left.getUnenclosedSQL(builder.append(getName()).append('('), sqlFlags); + if (right != null) { + right.getUnenclosedSQL(builder.append(", "), sqlFlags); + } + return builder.append(')'); + } + +} diff --git a/h2/src/main/org/h2/expression/function/Function2.java b/h2/src/main/org/h2/expression/function/Function2.java new file mode 100644 index 0000000000..cfb340f7b6 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/Function2.java @@ -0,0 +1,58 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.Operation2; +import org.h2.message.DbException; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * Function with two arguments. + */ +public abstract class Function2 extends Operation2 implements NamedExpression { + + protected Function2(Expression left, Expression right) { + super(left, right); + } + + @Override + public Value getValue(SessionLocal session) { + Value v1 = left.getValue(session); + if (v1 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + Value v2 = right.getValue(session); + if (v2 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + return getValue(session, v1, v2); + } + + /** + * Returns the value of this function. + * + * @param session + * the session + * @param v1 + * the value of first argument + * @param v2 + * the value of second argument + * @return the resulting value + */ + protected Value getValue(SessionLocal session, Value v1, Value v2) { + throw DbException.getInternalError(); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + left.getUnenclosedSQL(builder.append(getName()).append('('), sqlFlags).append(", "); + return right.getUnenclosedSQL(builder, sqlFlags).append(')'); + } + +} diff --git a/h2/src/main/org/h2/expression/function/FunctionCall.java b/h2/src/main/org/h2/expression/function/FunctionCall.java deleted file mode 100644 index 7a5bc2fe2b..0000000000 --- a/h2/src/main/org/h2/expression/function/FunctionCall.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression.function; - -import org.h2.engine.Session; -import org.h2.expression.Expression; -import org.h2.value.ValueResultSet; - -/** - * This interface is used by the built-in functions, - * as well as the user-defined functions. - */ -public interface FunctionCall { - - /** - * Get the name of the function. - * - * @return the name - */ - String getName(); - - /** - * Get an empty result set with the column names set. - * - * @param session the session - * @param nullArgs the argument list (some arguments may be null) - * @return the empty result set - */ - ValueResultSet getValueForColumnList(Session session, Expression[] nullArgs); - - /** - * Get the data type. - * - * @return the data type - */ - int getValueType(); - - /** - * Optimize the function if possible. - * - * @param session the session - * @return the optimized expression - */ - Expression optimize(Session session); - - /** - * Get the function arguments. - * - * @return argument list - */ - Expression[] getArgs(); - - /** - * Get the SQL snippet of the function (including arguments). - * - * @param alwaysQuote quote all identifiers - * @return the SQL snippet. - */ - String getSQL(boolean alwaysQuote); - - /** - * Whether the function always returns the same result for the same - * parameters. - * - * @return true if it does - */ - boolean isDeterministic(); - - /** - * Should the return value ResultSet be buffered in a local temporary file? - * - * @return true if it should be. - */ - boolean isBufferResultSetToLocalTemp(); - -} diff --git a/h2/src/main/org/h2/expression/function/FunctionN.java b/h2/src/main/org/h2/expression/function/FunctionN.java new file mode 100644 index 0000000000..079191a15e --- /dev/null +++ b/h2/src/main/org/h2/expression/function/FunctionN.java @@ -0,0 +1,77 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.OperationN; +import org.h2.message.DbException; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * Function with many arguments. + */ +public abstract class FunctionN extends OperationN implements NamedExpression { + + protected FunctionN(Expression[] args) { + super(args); + } + + @Override + public Value getValue(SessionLocal session) { + Value v1, v2, v3; + int count = args.length; + if (count >= 1) { + v1 = args[0].getValue(session); + if (v1 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + if (count >= 2) { + v2 = args[1].getValue(session); + if (v2 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + if (count >= 3) { + v3 = args[2].getValue(session); + if (v3 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + } else { + v3 = null; + } + } else { + v3 = v2 = null; + } + } else { + v3 = v2 = v1 = null; + } + return getValue(session, v1, v2, v3); + } + + /** + * Returns the value of this function. + * + * @param session + * the session + * @param v1 + * the value of first argument, or {@code null} + * @param v2 + * the value of second argument, or {@code null} + * @param v3 + * the value of third argument, or {@code null} + * @return the resulting value + */ + protected Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + throw DbException.getInternalError(); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return writeExpressions(builder.append(getName()).append('('), args, sqlFlags).append(')'); + } + +} diff --git a/h2/src/main/org/h2/expression/function/HashFunction.java b/h2/src/main/org/h2/expression/function/HashFunction.java new file mode 100644 index 0000000000..5ea0057992 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/HashFunction.java @@ -0,0 +1,193 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.security.SHA3; +import org.h2.util.Bits; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarbinary; + +/** + * A HASH or ORA_HASH function. + */ +public final class HashFunction extends FunctionN { + + /** + * HASH() (non-standard). + */ + public static final int HASH = 0; + + /** + * ORA_HASH() (non-standard). + */ + public static final int ORA_HASH = HASH + 1; + + private static final String[] NAMES = { // + "HASH", "ORA_HASH" // + }; + + private final int function; + + public HashFunction(Expression arg, int function) { + super(new Expression[] { arg }); + this.function = function; + } + + public HashFunction(Expression arg1, Expression arg2, Expression arg3, int function) { + super(arg3 == null ? new Expression[] { arg1, arg2 } : new Expression[] { arg1, arg2, arg3 }); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + switch (function) { + case HASH: + v1 = getHash(v1.getString(), v2, v3 == null ? 1 : v3.getInt()); + break; + case ORA_HASH: + v1 = oraHash(v1, v2 == null ? 0xffff_ffffL : v2.getLong(), v3 == null ? 0L : v3.getLong()); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + private static Value getHash(String algorithm, Value value, int iterations) { + if (iterations <= 0) { + throw DbException.getInvalidValueException("iterations", iterations); + } + MessageDigest md; + switch (StringUtils.toUpperEnglish(algorithm)) { + case "MD5": + case "SHA-1": + case "SHA-224": + case "SHA-256": + case "SHA-384": + case "SHA-512": + md = hashImpl(value, algorithm); + break; + case "SHA256": + md = hashImpl(value, "SHA-256"); + break; + case "SHA3-224": + md = hashImpl(value, SHA3.getSha3_224()); + break; + case "SHA3-256": + md = hashImpl(value, SHA3.getSha3_256()); + break; + case "SHA3-384": + md = hashImpl(value, SHA3.getSha3_384()); + break; + case "SHA3-512": + md = hashImpl(value, SHA3.getSha3_512()); + break; + default: + throw DbException.getInvalidValueException("algorithm", algorithm); + } + byte[] b = md.digest(); + for (int i = 1; i < iterations; i++) { + b = md.digest(b); + } + return ValueVarbinary.getNoCopy(b); + } + + private static Value oraHash(Value value, long bucket, long seed) { + if ((bucket & 0xffff_ffff_0000_0000L) != 0L) { + throw DbException.getInvalidValueException("bucket", bucket); + } + if ((seed & 0xffff_ffff_0000_0000L) != 0L) { + throw DbException.getInvalidValueException("seed", seed); + } + MessageDigest md = hashImpl(value, "SHA-1"); + if (md == null) { + return ValueNull.INSTANCE; + } + if (seed != 0L) { + byte[] b = new byte[4]; + Bits.writeInt(b, 0, (int) seed); + md.update(b); + } + long hc = Bits.readLong(md.digest(), 0); + // Strip sign and use modulo operation to get value from 0 to bucket + // inclusive + return ValueBigint.get((hc & Long.MAX_VALUE) % (bucket + 1)); + } + + private static MessageDigest hashImpl(Value value, String algorithm) { + MessageDigest md; + try { + md = MessageDigest.getInstance(algorithm); + } catch (Exception ex) { + throw DbException.convert(ex); + } + return hashImpl(value, md); + } + + private static MessageDigest hashImpl(Value value, MessageDigest md) { + try { + switch (value.getValueType()) { + case Value.VARCHAR: + case Value.CHAR: + case Value.VARCHAR_IGNORECASE: + md.update(value.getString().getBytes(StandardCharsets.UTF_8)); + break; + case Value.BLOB: + case Value.CLOB: { + byte[] buf = new byte[4096]; + try (InputStream is = value.getInputStream()) { + for (int r; (r = is.read(buf)) > 0;) { + md.update(buf, 0, r); + } + } + break; + } + default: + md.update(value.getBytesNoCopy()); + } + return md; + } catch (Exception ex) { + throw DbException.convert(ex); + } + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + switch (function) { + case HASH: + type = TypeInfo.TYPE_VARBINARY; + break; + case ORA_HASH: + type = TypeInfo.TYPE_BIGINT; + break; + default: + throw DbException.getInternalError("function=" + function); + } + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/JavaFunction.java b/h2/src/main/org/h2/expression/function/JavaFunction.java index 13d32c7dd5..afc617cbdd 100644 --- a/h2/src/main/org/h2/expression/function/JavaFunction.java +++ b/h2/src/main/org/h2/expression/function/JavaFunction.java @@ -1,29 +1,26 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression.function; -import org.h2.command.Parser; -import org.h2.engine.Constants; -import org.h2.engine.FunctionAlias; -import org.h2.engine.Session; +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionVisitor; import org.h2.expression.ValueExpression; +import org.h2.message.DbException; +import org.h2.schema.FunctionAlias; import org.h2.table.ColumnResolver; import org.h2.table.TableFilter; import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueCollectionBase; -import org.h2.value.ValueNull; -import org.h2.value.ValueResultSet; /** * This class wraps a user-defined function. */ -public class JavaFunction extends Expression implements FunctionCall { +public final class JavaFunction extends Expression implements NamedExpression { private final FunctionAlias functionAlias; private final FunctionAlias.JavaMethod javaMethod; @@ -32,21 +29,19 @@ public class JavaFunction extends Expression implements FunctionCall { public JavaFunction(FunctionAlias functionAlias, Expression[] args) { this.functionAlias = functionAlias; this.javaMethod = functionAlias.findJavaMethod(args); + if (javaMethod.getDataType() == null) { + throw DbException.get(ErrorCode.FUNCTION_NOT_FOUND_1, getName()); + } this.args = args; } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { return javaMethod.getValue(session, args, false); } @Override public TypeInfo getType() { - return TypeInfo.getTypeInfo(javaMethod.getDataType()); - } - - @Override - public int getValueType() { return javaMethod.getDataType(); } @@ -58,8 +53,8 @@ public void mapColumns(ColumnResolver resolver, int level, int state) { } @Override - public Expression optimize(Session session) { - boolean allConst = isDeterministic(); + public Expression optimize(SessionLocal session) { + boolean allConst = functionAlias.isDeterministic(); for (int i = 0, len = args.length; i < len; i++) { Expression e = args[i].optimize(session); args[i] = e; @@ -81,19 +76,12 @@ public void setEvaluatable(TableFilter tableFilter, boolean b) { } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - // TODO always append the schema once FUNCTIONS_IN_SCHEMA is enabled - if (functionAlias.getDatabase().getSettings().functionsInSchema || - functionAlias.getSchema().getId() != Constants.MAIN_SCHEMA_ID) { - Parser.quoteIdentifier(builder, functionAlias.getSchema().getName(), alwaysQuote).append('.'); - } - Parser.quoteIdentifier(builder, functionAlias.getName(), alwaysQuote).append('('); - writeExpressions(builder, this.args, alwaysQuote); - return builder.append(')'); + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return writeExpressions(functionAlias.getSQL(builder, sqlFlags).append('('), args, sqlFlags).append(')'); } @Override - public void updateAggregate(Session session, int stage) { + public void updateAggregate(SessionLocal session, int stage) { for (Expression e : args) { if (e != null) { e.updateAggregate(session, stage); @@ -106,23 +94,13 @@ public String getName() { return functionAlias.getName(); } - @Override - public ValueResultSet getValueForColumnList(Session session, - Expression[] argList) { - Value v = javaMethod.getValue(session, argList, true); - return v == ValueNull.INSTANCE ? null : (ValueResultSet) v; - } - - @Override - public Expression[] getArgs() { - return args; - } - @Override public boolean isEverything(ExpressionVisitor visitor) { switch (visitor.getType()) { case ExpressionVisitor.DETERMINISTIC: - if (!isDeterministic()) { + case ExpressionVisitor.READONLY: + case ExpressionVisitor.QUERY_COMPARABLE: + if (!functionAlias.isDeterministic()) { return false; } // only if all parameters are deterministic as well @@ -149,29 +127,6 @@ public int getCost() { return cost; } - @Override - public boolean isDeterministic() { - return functionAlias.isDeterministic(); - } - - @Override - public Expression[] getExpressionColumns(Session session) { - switch (getValueType()) { - case Value.RESULT_SET: - ValueResultSet rs = getValueForColumnList(session, getArgs()); - return getExpressionColumns(session, rs.getResult()); - case Value.ARRAY: - case Value.ROW: - return getExpressionColumns(session, (ValueCollectionBase) getValue(session)); - } - return super.getExpressionColumns(session); - } - - @Override - public boolean isBufferResultSetToLocalTemp() { - return functionAlias.isBufferResultSetToLocalTemp(); - } - @Override public int getSubexpressionCount() { return args.length; diff --git a/h2/src/main/org/h2/expression/function/JsonConstructorFunction.java b/h2/src/main/org/h2/expression/function/JsonConstructorFunction.java new file mode 100644 index 0000000000..87ab74037c --- /dev/null +++ b/h2/src/main/org/h2/expression/function/JsonConstructorFunction.java @@ -0,0 +1,171 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.io.ByteArrayOutputStream; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionWithFlags; +import org.h2.expression.Format; +import org.h2.expression.OperationN; +import org.h2.expression.Subquery; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.util.json.JsonConstructorUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueJson; +import org.h2.value.ValueNull; + +/** + * JSON constructor function. + */ +public final class JsonConstructorFunction extends OperationN implements ExpressionWithFlags, NamedExpression { + + private final boolean array; + + private int flags; + + /** + * Creates a new instance of JSON constructor function. + * + * @param array + * {@code false} for {@code JSON_OBJECT}, {@code true} for + * {@code JSON_ARRAY}. + */ + public JsonConstructorFunction(boolean array) { + super(new Expression[4]); + this.array = array; + } + + @Override + public void setFlags(int flags) { + this.flags = flags; + } + + @Override + public int getFlags() { + return flags; + } + + @Override + public Value getValue(SessionLocal session) { + return array ? jsonArray(session, args) : jsonObject(session, args); + } + + private Value jsonObject(SessionLocal session, Expression[] args) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + baos.write('{'); + for (int i = 0, l = args.length; i < l;) { + String name = args[i++].getValue(session).getString(); + if (name == null) { + throw DbException.getInvalidValueException("JSON_OBJECT key", "NULL"); + } + Value value = args[i++].getValue(session); + if (value == ValueNull.INSTANCE) { + if ((flags & JsonConstructorUtils.JSON_ABSENT_ON_NULL) != 0) { + continue; + } else { + value = ValueJson.NULL; + } + } + JsonConstructorUtils.jsonObjectAppend(baos, name, value); + } + return JsonConstructorUtils.jsonObjectFinish(baos, flags); + } + + private Value jsonArray(SessionLocal session, Expression[] args) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + baos.write('['); + int l = args.length; + evaluate: { + if (l == 1) { + Expression arg0 = args[0]; + if (arg0 instanceof Subquery) { + Subquery q = (Subquery) arg0; + for (Value value : q.getAllRows(session)) { + JsonConstructorUtils.jsonArrayAppend(baos, value, flags); + } + break evaluate; + } else if (arg0 instanceof Format) { + Format format = (Format) arg0; + arg0 = format.getSubexpression(0); + if (arg0 instanceof Subquery) { + Subquery q = (Subquery) arg0; + for (Value value : q.getAllRows(session)) { + JsonConstructorUtils.jsonArrayAppend(baos, format.getValue(value), flags); + } + break evaluate; + } + } + } + for (int i = 0; i < l;) { + JsonConstructorUtils.jsonArrayAppend(baos, args[i++].getValue(session), flags); + } + } + baos.write(']'); + return ValueJson.getInternal(baos.toByteArray()); + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + type = TypeInfo.TYPE_JSON; + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + builder.append(getName()).append('('); + if (array) { + writeExpressions(builder, args, sqlFlags); + } else { + for (int i = 0, l = args.length; i < l;) { + if (i > 0) { + builder.append(", "); + } + args[i++].getUnenclosedSQL(builder, sqlFlags).append(": "); + args[i++].getUnenclosedSQL(builder, sqlFlags); + } + } + return getJsonFunctionFlagsSQL(builder, flags, array).append(')'); + } + + /** + * Appends flags of a JSON function to the specified string builder. + * + * @param builder + * string builder to append to + * @param flags + * flags to append + * @param forArray + * whether the function is an array function + * @return the specified string builder + */ + public static StringBuilder getJsonFunctionFlagsSQL(StringBuilder builder, int flags, boolean forArray) { + if ((flags & JsonConstructorUtils.JSON_ABSENT_ON_NULL) != 0) { + if (!forArray) { + builder.append(" ABSENT ON NULL"); + } + } else if (forArray) { + builder.append(" NULL ON NULL"); + } + if (!forArray && (flags & JsonConstructorUtils.JSON_WITH_UNIQUE_KEYS) != 0) { + builder.append(" WITH UNIQUE KEYS"); + } + return builder; + } + + @Override + public String getName() { + return array ? "JSON_ARRAY" : "JSON_OBJECT"; + } + +} diff --git a/h2/src/main/org/h2/expression/function/LengthFunction.java b/h2/src/main/org/h2/expression/function/LengthFunction.java new file mode 100644 index 0000000000..199837ddbb --- /dev/null +++ b/h2/src/main/org/h2/expression/function/LengthFunction.java @@ -0,0 +1,86 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueNull; + +/** + * CHAR_LENGTH(), or OCTET_LENGTH() function. + */ +public final class LengthFunction extends Function1 { + + /** + * CHAR_LENGTH(). + */ + public static final int CHAR_LENGTH = 0; + + /** + * OCTET_LENGTH(). + */ + public static final int OCTET_LENGTH = CHAR_LENGTH + 1; + + /** + * BIT_LENGTH() (non-standard). + */ + public static final int BIT_LENGTH = OCTET_LENGTH + 1; + + private static final String[] NAMES = { // + "CHAR_LENGTH", "OCTET_LENGTH", "BIT_LENGTH" // + }; + + private final int function; + + public LengthFunction(Expression arg, int function) { + super(arg); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v = arg.getValue(session); + if (v == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + long l; + switch (function) { + case CHAR_LENGTH: + l = v.charLength(); + break; + case OCTET_LENGTH: + l = v.octetLength(); + break; + case BIT_LENGTH: + l = v.octetLength() * 8; + break; + default: + throw DbException.getInternalError("function=" + function); + } + return ValueBigint.get(l); + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + type = TypeInfo.TYPE_BIGINT; + if (arg.isConstant()) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/MathFunction.java b/h2/src/main/org/h2/expression/function/MathFunction.java new file mode 100644 index 0000000000..cfae2b4a9e --- /dev/null +++ b/h2/src/main/org/h2/expression/function/MathFunction.java @@ -0,0 +1,394 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.math.BigDecimal; +import java.math.RoundingMode; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueDecfloat; +import org.h2.value.ValueDouble; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueReal; + +/** + * A math function. + */ +public final class MathFunction extends Function1_2 { + + /** + * ABS(). + */ + public static final int ABS = 0; + + /** + * MOD(). + */ + public static final int MOD = ABS + 1; + + /** + * FLOOR(). + */ + public static final int FLOOR = MOD + 1; + + /** + * CEIL() or CEILING(). + */ + public static final int CEIL = FLOOR + 1; + + /** + * ROUND() (non-standard) + */ + public static final int ROUND = CEIL + 1; + + /** + * ROUNDMAGIC() (non-standard) + */ + public static final int ROUNDMAGIC = ROUND + 1; + + /** + * SIGN() (non-standard) + */ + public static final int SIGN = ROUNDMAGIC + 1; + + /** + * TRUNC() (non-standard) + */ + public static final int TRUNC = SIGN + 1; + + private static final String[] NAMES = { // + "ABS", "MOD", "FLOOR", "CEIL", "ROUND", "ROUNDMAGIC", "SIGN", "TRUNC" // + }; + + private final int function; + + private TypeInfo commonType; + + public MathFunction(Expression arg1, Expression arg2, int function) { + super(arg1, arg2); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + switch (function) { + case ABS: + if (v1.getSignum() < 0) { + v1 = v1.negate(); + } + break; + case MOD: + v1 = v1.convertTo(commonType, session).modulus(v2.convertTo(commonType, session)).convertTo(type, session); + break; + case FLOOR: + v1 = round(v1, v2, RoundingMode.FLOOR); + break; + case CEIL: + v1 = round(v1, v2, RoundingMode.CEILING); + break; + case ROUND: + v1 = round(v1, v2, RoundingMode.HALF_UP); + break; + case ROUNDMAGIC: + v1 = ValueDouble.get(roundMagic(v1.getDouble())); + break; + case SIGN: + v1 = ValueInteger.get(v1.getSignum()); + break; + case TRUNC: + v1 = round(v1, v2, RoundingMode.DOWN); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + @SuppressWarnings("incomplete-switch") + private Value round(Value v1, Value v2, RoundingMode roundingMode) { + int scale = v2 != null ? v2.getInt() : 0; + int t = type.getValueType(); + c: switch (t) { + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + case Value.BIGINT: { + if (scale < 0) { + long original = v1.getLong(); + long scaled = BigDecimal.valueOf(original).setScale(scale, roundingMode).longValue(); + if (original != scaled) { + v1 = ValueBigint.get(scaled).convertTo(type); + } + } + break; + } + case Value.NUMERIC: { + int targetScale = type.getScale(); + BigDecimal bd = v1.getBigDecimal(); + if (scale < targetScale) { + bd = bd.setScale(scale, roundingMode); + } + v1 = ValueNumeric.get(bd.setScale(targetScale, roundingMode)); + break; + } + case Value.REAL: + case Value.DOUBLE: { + l: if (scale == 0) { + double d; + switch (roundingMode) { + case DOWN: + d = v1.getDouble(); + d = d < 0 ? Math.ceil(d) : Math.floor(d); + break; + case CEILING: + d = Math.ceil(v1.getDouble()); + break; + case FLOOR: + d = Math.floor(v1.getDouble()); + break; + default: + break l; + } + v1 = t == Value.REAL ? ValueReal.get((float) d) : ValueDouble.get(d); + break c; + } + BigDecimal bd = v1.getBigDecimal().setScale(scale, roundingMode); + v1 = t == Value.REAL ? ValueReal.get(bd.floatValue()) : ValueDouble.get(bd.doubleValue()); + break; + } + case Value.DECFLOAT: + v1 = ValueDecfloat.get(v1.getBigDecimal().setScale(scale, roundingMode)); + } + return v1; + } + + private static double roundMagic(double d) { + if ((d < 0.000_000_000_000_1) && (d > -0.000_000_000_000_1)) { + return 0.0; + } + if ((d > 1_000_000_000_000d) || (d < -1_000_000_000_000d)) { + return d; + } + StringBuilder s = new StringBuilder(); + s.append(d); + if (s.toString().indexOf('E') >= 0) { + return d; + } + int len = s.length(); + if (len < 16) { + return d; + } + if (s.toString().indexOf('.') > len - 3) { + return d; + } + s.delete(len - 2, len); + len -= 2; + char c1 = s.charAt(len - 2); + char c2 = s.charAt(len - 3); + char c3 = s.charAt(len - 4); + if ((c1 == '0') && (c2 == '0') && (c3 == '0')) { + s.setCharAt(len - 1, '0'); + } else if ((c1 == '9') && (c2 == '9') && (c3 == '9')) { + s.setCharAt(len - 1, '9'); + s.append('9'); + s.append('9'); + s.append('9'); + } + return Double.parseDouble(s.toString()); + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + switch (function) { + case ABS: + type = left.getType(); + if (type.getValueType() == Value.NULL) { + type = TypeInfo.TYPE_NUMERIC_FLOATING_POINT; + } + break; + case FLOOR: + case CEIL: { + Expression e = optimizeRound(0, true, false, true); + if (e != null) { + return e; + } + break; + } + case MOD: + TypeInfo divisorType = right.getType(); + commonType = TypeInfo.getHigherType(left.getType(), divisorType); + int valueType = commonType.getValueType(); + if (valueType == Value.NULL) { + commonType = TypeInfo.TYPE_BIGINT; + } else if (!DataType.isNumericType(valueType)) { + throw DbException.getInvalidExpressionTypeException("MOD argument", + DataType.isNumericType(left.getType().getValueType()) ? right : left); + } + type = DataType.isNumericType(divisorType.getValueType()) ? divisorType : commonType; + break; + case ROUND: { + Expression e = optimizeRoundWithScale(session, true); + if (e != null) { + return e; + } + break; + } + case ROUNDMAGIC: + type = TypeInfo.TYPE_DOUBLE; + break; + case SIGN: + type = TypeInfo.TYPE_INTEGER; + break; + case TRUNC: + switch (left.getType().getValueType()) { + case Value.VARCHAR: + left = new CastSpecification(left, TypeInfo.getTypeInfo(Value.TIMESTAMP, -1L, 0, null)) + .optimize(session); + //$FALL-THROUGH$ + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + if (right != null) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, "TRUNC", "1"); + } + return new DateTimeFunction(DateTimeFunction.DATE_TRUNC, DateTimeFunction.DAY, left, null) + .optimize(session); + case Value.DATE: + if (right != null) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, "TRUNC", "1"); + } + return new CastSpecification(left, TypeInfo.getTypeInfo(Value.TIMESTAMP, -1L, 0, null)) + .optimize(session); + default: { + Expression e = optimizeRoundWithScale(session, false); + if (e != null) { + return e; + } + } + } + break; + default: + throw DbException.getInternalError("function=" + function); + } + if (left.isConstant() && (right == null || right.isConstant())) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + private Expression optimizeRoundWithScale(SessionLocal session, boolean possibleRoundUp) { + int scale; + boolean scaleIsKnown = false, scaleIsNull = false; + if (right != null) { + if (right.isConstant()) { + Value scaleValue = right.getValue(session); + scaleIsKnown = true; + if (scaleValue != ValueNull.INSTANCE) { + scale = scaleValue.getInt(); + } else { + scale = -1; + scaleIsNull = true; + } + } else { + scale = -1; + } + } else { + scale = 0; + scaleIsKnown = true; + } + return optimizeRound(scale, scaleIsKnown, scaleIsNull, possibleRoundUp); + } + + /** + * Optimizes rounding and truncation functions. + * + * @param scale + * the scale, if known + * @param scaleIsKnown + * whether scale is known + * @param scaleIsNull + * whether scale is {@code NULL} + * @param possibleRoundUp + * {@code true} if result of rounding can have larger precision + * than precision of argument, {@code false} otherwise + * @return the optimized expression or {@code null} if this function should + * be used + */ + private Expression optimizeRound(int scale, boolean scaleIsKnown, boolean scaleIsNull, boolean possibleRoundUp) { + TypeInfo leftType = left.getType(); + switch (leftType.getValueType()) { + case Value.NULL: + type = TypeInfo.TYPE_NUMERIC_SCALE_0; + break; + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + case Value.BIGINT: + if (scaleIsKnown && scale >= 0) { + return left; + } + type = leftType; + break; + case Value.REAL: + case Value.DOUBLE: + case Value.DECFLOAT: + type = leftType; + break; + case Value.NUMERIC: { + long precision; + int originalScale = leftType.getScale(); + if (scaleIsKnown) { + if (originalScale <= scale) { + return left; + } else { + if (scale < 0) { + scale = 0; + } else if (scale > ValueNumeric.MAXIMUM_SCALE) { + scale = ValueNumeric.MAXIMUM_SCALE; + } + precision = leftType.getPrecision() - originalScale + scale; + if (possibleRoundUp) { + precision++; + } + } + } else { + precision = leftType.getPrecision(); + if (possibleRoundUp) { + precision++; + } + scale = originalScale; + } + type = TypeInfo.getTypeInfo(Value.NUMERIC, precision, scale, null); + break; + } + default: + throw DbException.getInvalidExpressionTypeException(getName() + " argument", left); + } + if (scaleIsNull) { + return TypedValueExpression.get(ValueNull.INSTANCE, type); + } + return null; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/MathFunction1.java b/h2/src/main/org/h2/expression/function/MathFunction1.java new file mode 100644 index 0000000000..416b093165 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/MathFunction1.java @@ -0,0 +1,212 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueDouble; +import org.h2.value.ValueNull; + +/** + * A math function with one argument and DOUBLE PRECISION result. + */ +public final class MathFunction1 extends Function1 { + + // Trigonometric functions + + /** + * SIN(). + */ + public static final int SIN = 0; + + /** + * COS(). + */ + public static final int COS = SIN + 1; + + /** + * TAN(). + */ + public static final int TAN = COS + 1; + + /** + * COT() (non-standard). + */ + public static final int COT = TAN + 1; + + /** + * SINH(). + */ + public static final int SINH = COT + 1; + + /** + * COSH(). + */ + public static final int COSH = SINH + 1; + + /** + * TANH(). + */ + public static final int TANH = COSH + 1; + + /** + * ASIN(). + */ + public static final int ASIN = TANH + 1; + + /** + * ACOS(). + */ + public static final int ACOS = ASIN + 1; + + /** + * ATAN(). + */ + public static final int ATAN = ACOS + 1; + + // Logarithm functions + + /** + * LOG10(). + */ + public static final int LOG10 = ATAN + 1; + + /** + * LN(). + */ + public static final int LN = LOG10 + 1; + + // Exponential function + + /** + * EXP(). + */ + public static final int EXP = LN + 1; + + // Square root + + /** + * SQRT(). + */ + public static final int SQRT = EXP + 1; + + // Other non-standard + + /** + * DEGREES() (non-standard). + */ + public static final int DEGREES = SQRT + 1; + + /** + * RADIANS() (non-standard). + */ + public static final int RADIANS = DEGREES + 1; + + private static final String[] NAMES = { // + "SIN", "COS", "TAN", "COT", "SINH", "COSH", "TANH", "ASIN", "ACOS", "ATAN", // + "LOG10", "LN", "EXP", "SQRT", "DEGREES", "RADIANS" // + }; + + private final int function; + + public MathFunction1(Expression arg, int function) { + super(arg); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v = arg.getValue(session); + if (v == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + double d = v.getDouble(); + switch (function) { + case SIN: + d = Math.sin(d); + break; + case COS: + d = Math.cos(d); + break; + case TAN: + d = Math.tan(d); + break; + case COT: + d = Math.tan(d); + if (d == 0.0) { + throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getTraceSQL()); + } + d = 1d / d; + break; + case SINH: + d = Math.sinh(d); + break; + case COSH: + d = Math.cosh(d); + break; + case TANH: + d = Math.tanh(d); + break; + case ASIN: + d = Math.asin(d); + break; + case ACOS: + d = Math.acos(d); + break; + case ATAN: + d = Math.atan(d); + break; + case LOG10: + if (d <= 0) { + throw DbException.getInvalidValueException("LOG10() argument", d); + } + d = Math.log10(d); + break; + case LN: + if (d <= 0) { + throw DbException.getInvalidValueException("LN() argument", d); + } + d = Math.log(d); + break; + case EXP: + d = Math.exp(d); + break; + case SQRT: + d = Math.sqrt(d); + break; + case DEGREES: + d = Math.toDegrees(d); + break; + case RADIANS: + d = Math.toRadians(d); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return ValueDouble.get(d); + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + type = TypeInfo.TYPE_DOUBLE; + if (arg.isConstant()) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/MathFunction2.java b/h2/src/main/org/h2/expression/function/MathFunction2.java new file mode 100644 index 0000000000..52dff56652 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/MathFunction2.java @@ -0,0 +1,100 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueDouble; + +/** + * A math function with two arguments and DOUBLE PRECISION result. + */ +public final class MathFunction2 extends Function2 { + + /** + * ATAN2() (non-standard). + */ + public static final int ATAN2 = 0; + + /** + * LOG(). + */ + public static final int LOG = ATAN2 + 1; + + /** + * POWER(). + */ + public static final int POWER = LOG + 1; + + private static final String[] NAMES = { // + "ATAN2", "LOG", "POWER" // + }; + + private final int function; + + public MathFunction2(Expression arg1, Expression arg2, int function) { + super(arg1, arg2); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + double d1 = v1.getDouble(), d2 = v2.getDouble(); + switch (function) { + case ATAN2: + d1 = Math.atan2(d1, d2); + break; + case LOG: { + if (session.getMode().swapLogFunctionParameters) { + double t = d2; + d2 = d1; + d1 = t; + } + if (d2 <= 0) { + throw DbException.getInvalidValueException("LOG() argument", d2); + } + if (d1 <= 0 || d1 == 1) { + throw DbException.getInvalidValueException("LOG() base", d1); + } + if (d1 == Math.E) { + d1 = Math.log(d2); + } else if (d1 == 10d) { + d1 = Math.log10(d2); + } else { + d1 = Math.log(d2) / Math.log(d1); + } + break; + } + case POWER: + d1 = Math.pow(d1, d2); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return ValueDouble.get(d1); + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + right = right.optimize(session); + type = TypeInfo.TYPE_DOUBLE; + if (left.isConstant() && right.isConstant()) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/NamedExpression.java b/h2/src/main/org/h2/expression/function/NamedExpression.java new file mode 100644 index 0000000000..021c87ec13 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/NamedExpression.java @@ -0,0 +1,20 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +/** + * A function-like expression with a name. + */ +public interface NamedExpression { + + /** + * Get the name. + * + * @return the name in uppercase + */ + String getName(); + +} diff --git a/h2/src/main/org/h2/expression/function/NullIfFunction.java b/h2/src/main/org/h2/expression/function/NullIfFunction.java new file mode 100644 index 0000000000..b4b32d67be --- /dev/null +++ b/h2/src/main/org/h2/expression/function/NullIfFunction.java @@ -0,0 +1,50 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * A NULLIF function. + */ +public final class NullIfFunction extends Function2 { + + public NullIfFunction(Expression arg1, Expression arg2) { + super(arg1, arg2); + } + + @Override + public Value getValue(SessionLocal session) { + Value v = left.getValue(session); + if (session.compareWithNull(v, right.getValue(session), true) == 0) { + v = ValueNull.INSTANCE; + } + return v; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + right = right.optimize(session); + type = left.getType(); + TypeInfo.checkComparable(type, right.getType()); + if (left.isConstant() && right.isConstant()) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return "NULLIF"; + } + +} diff --git a/h2/src/main/org/h2/expression/function/RandFunction.java b/h2/src/main/org/h2/expression/function/RandFunction.java new file mode 100644 index 0000000000..9b4c3afd08 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/RandFunction.java @@ -0,0 +1,124 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.util.Random; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.util.MathUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueDouble; +import org.h2.value.ValueNull; +import org.h2.value.ValueUuid; +import org.h2.value.ValueVarbinary; + +/** + * A RAND, SECURE_RAND, or RANDOM_UUID function. + */ +public final class RandFunction extends Function0_1 { + + /** + * RAND() (non-standard). + */ + public static final int RAND = 0; + + /** + * SECURE_RAND() (non-standard). + */ + public static final int SECURE_RAND = RAND + 1; + + /** + * RANDOM_UUID() (non-standard). + */ + public static final int RANDOM_UUID = SECURE_RAND + 1; + + private static final String[] NAMES = { // + "RAND", "SECURE_RAND", "RANDOM_UUID" // + }; + + private final int function; + + public RandFunction(Expression arg, int function) { + super(arg); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v; + if (arg != null) { + v = arg.getValue(session); + if (v == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + } else { + v = null; + } + switch (function) { + case RAND: { + Random random = session.getRandom(); + if (v != null) { + random.setSeed(v.getInt()); + } + v = ValueDouble.get(random.nextDouble()); + break; + } + case SECURE_RAND: + v = ValueVarbinary.getNoCopy(MathUtils.secureRandomBytes(v.getInt())); + break; + case RANDOM_UUID: + v = ValueUuid.getNewRandom(); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return v; + } + + @Override + public Expression optimize(SessionLocal session) { + if (arg != null) { + arg = arg.optimize(session); + } + switch (function) { + case RAND: + type = TypeInfo.TYPE_DOUBLE; + break; + case SECURE_RAND: { + Value v; + type = arg.isConstant() && (v = arg.getValue(session)) != ValueNull.INSTANCE + ? TypeInfo.getTypeInfo(Value.VARBINARY, Math.max(v.getInt(), 1), 0, null) + : TypeInfo.TYPE_VARBINARY; + break; + } + case RANDOM_UUID: + type = TypeInfo.TYPE_UUID; + break; + default: + throw DbException.getInternalError("function=" + function); + } + return this; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + return false; + } + return super.isEverything(visitor); + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/RegexpFunction.java b/h2/src/main/org/h2/expression/function/RegexpFunction.java new file mode 100644 index 0000000000..a3c1928ab0 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/RegexpFunction.java @@ -0,0 +1,270 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.regex.PatternSyntaxException; + +import org.h2.api.ErrorCode; +import org.h2.engine.Mode; +import org.h2.engine.Mode.ModeEnum; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * A regular expression function. + */ +public final class RegexpFunction extends FunctionN { + + /** + * REGEXP_LIKE() (non-standard). + */ + public static final int REGEXP_LIKE = 0; + + /** + * REGEXP_REPLACE() (non-standard). + */ + public static final int REGEXP_REPLACE = REGEXP_LIKE + 1; + + /** + * REGEXP_SUBSTR() (non-standard). + */ + public static final int REGEXP_SUBSTR = REGEXP_REPLACE + 1; + + private static final String[] NAMES = { // + "REGEXP_LIKE", "REGEXP_REPLACE", "REGEXP_SUBSTR" // + }; + + private final int function; + + public RegexpFunction(int function) { + super(new Expression[function == REGEXP_LIKE ? 3 : 6]); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v1 = args[0].getValue(session); + Value v2 = args[1].getValue(session); + int length = args.length; + switch (function) { + case REGEXP_LIKE: { + Value v3 = length >= 3 ? args[2].getValue(session) : null; + if (v1 == ValueNull.INSTANCE || v2 == ValueNull.INSTANCE || v3 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + String regexp = v2.getString(); + String regexpMode = v3 != null ? v3.getString() : null; + int flags = makeRegexpFlags(regexpMode, false); + try { + v1 = ValueBoolean.get(Pattern.compile(regexp, flags).matcher(v1.getString()).find()); + } catch (PatternSyntaxException e) { + throw DbException.get(ErrorCode.LIKE_ESCAPE_ERROR_1, e, regexp); + } + break; + } + case REGEXP_REPLACE: { + String input = v1.getString(); + if (session.getMode().getEnum() == ModeEnum.Oracle) { + String replacement = args[2].getValue(session).getString(); + int position = length >= 4 ? args[3].getValue(session).getInt() : 1; + int occurrence = length >= 5 ? args[4].getValue(session).getInt() : 0; + String regexpMode = length >= 6 ? args[5].getValue(session).getString() : null; + if (input == null) { + v1 = ValueNull.INSTANCE; + } else { + String regexp = v2.getString(); + v1 = regexpReplace(session, input, regexp != null ? regexp : "", + replacement != null ? replacement : "", position, occurrence, regexpMode); + } + } else { + if (length > 4) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, getName(), "3..4"); + } + Value v3 = args[2].getValue(session); + Value v4 = length == 4 ? args[3].getValue(session) : null; + if (v1 == ValueNull.INSTANCE || v2 == ValueNull.INSTANCE || v3 == ValueNull.INSTANCE + || v4 == ValueNull.INSTANCE) { + v1 = ValueNull.INSTANCE; + } else { + v1 = regexpReplace(session, input, v2.getString(), v3.getString(), 1, 0, + v4 != null ? v4.getString() : null); + } + } + break; + } + case REGEXP_SUBSTR: { + Value v3 = length >= 3 ? args[2].getValue(session) : null; + Value v4 = length >= 4 ? args[3].getValue(session) : null; + Value v5 = length >= 5 ? args[4].getValue(session) : null; + Value v6 = length >= 6 ? args[5].getValue(session) : null; + v1 = regexpSubstr(v1, v2, v3, v4, v5, v6, session); + break; + } + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + private static Value regexpReplace(SessionLocal session, String input, String regexp, String replacement, + int position, int occurrence, String regexpMode) { + Mode mode = session.getMode(); + if (mode.regexpReplaceBackslashReferences) { + if ((replacement.indexOf('\\') >= 0) || (replacement.indexOf('$') >= 0)) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < replacement.length(); i++) { + char c = replacement.charAt(i); + if (c == '$') { + sb.append('\\'); + } else if (c == '\\' && ++i < replacement.length()) { + c = replacement.charAt(i); + sb.append(c >= '0' && c <= '9' ? '$' : '\\'); + } + sb.append(c); + } + replacement = sb.toString(); + } + } + boolean isInPostgreSqlMode = mode.getEnum() == ModeEnum.PostgreSQL; + int flags = makeRegexpFlags(regexpMode, isInPostgreSqlMode); + if (isInPostgreSqlMode && (regexpMode == null || regexpMode.isEmpty() || !regexpMode.contains("g"))) { + occurrence = 1; + } + try { + Matcher matcher = Pattern.compile(regexp, flags).matcher(input).region(position - 1, input.length()); + if (occurrence == 0) { + return ValueVarchar.get(matcher.replaceAll(replacement), session); + } else { + StringBuffer sb = new StringBuffer(); + int index = 1; + while (matcher.find()) { + if (index == occurrence) { + matcher.appendReplacement(sb, replacement); + break; + } + index++; + } + matcher.appendTail(sb); + return ValueVarchar.get(sb.toString(), session); + } + } catch (PatternSyntaxException e) { + throw DbException.get(ErrorCode.LIKE_ESCAPE_ERROR_1, e, regexp); + } catch (StringIndexOutOfBoundsException | IllegalArgumentException e) { + throw DbException.get(ErrorCode.LIKE_ESCAPE_ERROR_1, e, replacement); + } + } + + private static Value regexpSubstr(Value inputString, Value regexpArg, Value positionArg, Value occurrenceArg, + Value regexpModeArg, Value subexpressionArg, SessionLocal session) { + if (inputString == ValueNull.INSTANCE || regexpArg == ValueNull.INSTANCE || positionArg == ValueNull.INSTANCE + || occurrenceArg == ValueNull.INSTANCE || subexpressionArg == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + String regexp = regexpArg.getString(); + + int position = positionArg != null ? positionArg.getInt() - 1 : 0; + int requestedOccurrence = occurrenceArg != null ? occurrenceArg.getInt() : 1; + String regexpMode = regexpModeArg != null ? regexpModeArg.getString() : null; + int subexpression = subexpressionArg != null ? subexpressionArg.getInt() : 0; + int flags = makeRegexpFlags(regexpMode, false); + try { + Matcher m = Pattern.compile(regexp, flags).matcher(inputString.getString()); + + boolean found = m.find(position); + for (int occurrence = 1; occurrence < requestedOccurrence && found; occurrence++) { + found = m.find(); + } + + if (!found) { + return ValueNull.INSTANCE; + } else { + return ValueVarchar.get(m.group(subexpression), session); + } + } catch (PatternSyntaxException e) { + throw DbException.get(ErrorCode.LIKE_ESCAPE_ERROR_1, e, regexp); + } catch (IndexOutOfBoundsException e) { + return ValueNull.INSTANCE; + } + } + + private static int makeRegexpFlags(String stringFlags, boolean ignoreGlobalFlag) { + int flags = Pattern.UNICODE_CASE; + if (stringFlags != null) { + for (int i = 0; i < stringFlags.length(); ++i) { + switch (stringFlags.charAt(i)) { + case 'i': + flags |= Pattern.CASE_INSENSITIVE; + break; + case 'c': + flags &= ~Pattern.CASE_INSENSITIVE; + break; + case 'n': + flags |= Pattern.DOTALL; + break; + case 'm': + flags |= Pattern.MULTILINE; + break; + case 'g': + if (ignoreGlobalFlag) { + break; + } + //$FALL-THROUGH$ + default: + throw DbException.get(ErrorCode.INVALID_VALUE_2, stringFlags); + } + } + } + return flags; + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + int min, max; + switch (function) { + case REGEXP_LIKE: + min = 2; + max = 3; + type = TypeInfo.TYPE_BOOLEAN; + break; + case REGEXP_REPLACE: + min = 3; + max = 6; + type = TypeInfo.TYPE_VARCHAR; + break; + case REGEXP_SUBSTR: + min = 2; + max = 6; + type = TypeInfo.TYPE_VARCHAR; + break; + default: + throw DbException.getInternalError("function=" + function); + } + int len = args.length; + if (len < min || len > max) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, getName(), min + ".." + max); + } + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/SessionControlFunction.java b/h2/src/main/org/h2/expression/function/SessionControlFunction.java new file mode 100644 index 0000000000..c8d3024ff1 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/SessionControlFunction.java @@ -0,0 +1,99 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.command.Command; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * An ABORT_SESSION() or CANCEL_SESSION() function. + */ +public final class SessionControlFunction extends Function1 { + + /** + * ABORT_SESSION(). + */ + public static final int ABORT_SESSION = 0; + + /** + * CANCEL_SESSION(). + */ + public static final int CANCEL_SESSION = ABORT_SESSION + 1; + + private static final String[] NAMES = { // + "ABORT_SESSION", "CANCEL_SESSION" // + }; + + private final int function; + + public SessionControlFunction(Expression arg, int function) { + super(arg); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v = arg.getValue(session); + if (v == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + int targetSessionId = v.getInt(); + session.getUser().checkAdmin(); + loop: for (SessionLocal s : session.getDatabase().getSessions(false)) { + if (s.getId() == targetSessionId) { + Command c = s.getCurrentCommand(); + switch (function) { + case ABORT_SESSION: + if (c != null) { + c.cancel(); + } + s.close(); + return ValueBoolean.TRUE; + case CANCEL_SESSION: + if (c != null) { + c.cancel(); + return ValueBoolean.TRUE; + } + break loop; + default: + throw DbException.getInternalError("function=" + function); + } + } + } + return ValueBoolean.FALSE; + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + type = TypeInfo.TYPE_BOOLEAN; + return this; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + case ExpressionVisitor.READONLY: + case ExpressionVisitor.QUERY_COMPARABLE: + return false; + } + return super.isEverything(visitor); + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/SetFunction.java b/h2/src/main/org/h2/expression/function/SetFunction.java new file mode 100644 index 0000000000..6b85efccee --- /dev/null +++ b/h2/src/main/org/h2/expression/function/SetFunction.java @@ -0,0 +1,64 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Variable; +import org.h2.message.DbException; +import org.h2.value.Value; + +/** + * A SET function. + */ +public final class SetFunction extends Function2 { + + public SetFunction(Expression arg1, Expression arg2) { + super(arg1, arg2); + } + + @Override + public Value getValue(SessionLocal session) { + Variable var = (Variable) left; + Value v = right.getValue(session); + session.setVariable(var.getName(), v); + return v; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + right = right.optimize(session); + type = right.getType(); + if (!(left instanceof Variable)) { + throw DbException.get(ErrorCode.CAN_ONLY_ASSIGN_TO_VARIABLE_1, left.getTraceSQL()); + } + return this; + } + + @Override + public String getName() { + return "SET"; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + if (!super.isEverything(visitor)) { + return false; + } + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + case ExpressionVisitor.QUERY_COMPARABLE: + case ExpressionVisitor.READONLY: + return false; + default: + return true; + } + } + +} diff --git a/h2/src/main/org/h2/expression/function/SignalFunction.java b/h2/src/main/org/h2/expression/function/SignalFunction.java new file mode 100644 index 0000000000..b8f42d2563 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/SignalFunction.java @@ -0,0 +1,49 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.util.regex.Pattern; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * A SIGNAL function. + */ +public final class SignalFunction extends Function2 { + + private static final Pattern SIGNAL_PATTERN = Pattern.compile("[0-9A-Z]{5}"); + + public SignalFunction(Expression arg1, Expression arg2) { + super(arg1, arg2); + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + String sqlState = v1.getString(); + if (sqlState.startsWith("00") || !SIGNAL_PATTERN.matcher(sqlState).matches()) { + throw DbException.getInvalidValueException("SQLSTATE", sqlState); + } + throw DbException.fromUser(sqlState, v2.getString()); + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + right = right.optimize(session); + type = TypeInfo.TYPE_NULL; + return this; + } + + @Override + public String getName() { + return "SIGNAL"; + } + +} diff --git a/h2/src/main/org/h2/expression/function/SoundexFunction.java b/h2/src/main/org/h2/expression/function/SoundexFunction.java new file mode 100644 index 0000000000..b7165c341f --- /dev/null +++ b/h2/src/main/org/h2/expression/function/SoundexFunction.java @@ -0,0 +1,128 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.nio.charset.StandardCharsets; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueInteger; +import org.h2.value.ValueVarchar; + +/** + * A SOUNDEX or DIFFERENCE function. + */ +public final class SoundexFunction extends Function1_2 { + + /** + * SOUNDEX() (non-standard). + */ + public static final int SOUNDEX = 0; + + /** + * DIFFERENCE() (non-standard). + */ + public static final int DIFFERENCE = SOUNDEX + 1; + + private static final String[] NAMES = { // + "SOUNDEX", "DIFFERENCE" // + }; + + private static final byte[] SOUNDEX_INDEX = // + "71237128722455712623718272\000\000\000\000\000\00071237128722455712623718272" + .getBytes(StandardCharsets.ISO_8859_1); + + private final int function; + + public SoundexFunction(Expression arg1, Expression arg2, int function) { + super(arg1, arg2); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + switch (function) { + case SOUNDEX: + v1 = ValueVarchar.get(new String(getSoundex(v1.getString()), StandardCharsets.ISO_8859_1), session); + break; + case DIFFERENCE: { + v1 = ValueInteger.get(getDifference(v1.getString(), v2.getString())); + break; + } + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + private static int getDifference(String s1, String s2) { + // TODO function difference: compatibility with SQL Server and HSQLDB + byte[] b1 = getSoundex(s1), b2 = getSoundex(s2); + int e = 0; + for (int i = 0; i < 4; i++) { + if (b1[i] == b2[i]) { + e++; + } + } + return e; + } + + private static byte[] getSoundex(String s) { + byte[] chars = { '0', '0', '0', '0' }; + byte lastDigit = '0'; + for (int i = 0, j = 0, l = s.length(); i < l && j < 4; i++) { + char c = s.charAt(i); + if (c >= 'A' && c <= 'z') { + byte newDigit = SOUNDEX_INDEX[c - 'A']; + if (newDigit != 0) { + if (j == 0) { + chars[j++] = (byte) c; + lastDigit = newDigit; + } else if (newDigit <= '6') { + if (newDigit != lastDigit) { + chars[j++] = lastDigit = newDigit; + } + } else if (newDigit == '7') { + lastDigit = newDigit; + } + } + } + } + return chars; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + switch (function) { + case SOUNDEX: + type = TypeInfo.getTypeInfo(Value.VARCHAR, 4, 0, null); + break; + case DIFFERENCE: + type = TypeInfo.TYPE_INTEGER; + break; + default: + throw DbException.getInternalError("function=" + function); + } + if (left.isConstant() && (right == null || right.isConstant())) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/StringFunction.java b/h2/src/main/org/h2/expression/function/StringFunction.java new file mode 100644 index 0000000000..d34cfada92 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/StringFunction.java @@ -0,0 +1,244 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.Mode.ModeEnum; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * An string function with multiple arguments. + */ +public final class StringFunction extends FunctionN { + + /** + * LOCATE() (non-standard). + */ + public static final int LOCATE = 0; + + /** + * INSERT() (non-standard). + */ + public static final int INSERT = LOCATE + 1; + + /** + * REPLACE() (non-standard). + */ + public static final int REPLACE = INSERT + 1; + + /** + * LPAD() (non-standard). + */ + public static final int LPAD = REPLACE + 1; + + /** + * RPAD() (non-standard). + */ + public static final int RPAD = LPAD + 1; + + /** + * TRANSLATE() (non-standard). + */ + public static final int TRANSLATE = RPAD + 1; + + private static final String[] NAMES = { // + "LOCATE", "INSERT", "REPLACE", "LPAD", "RPAD", "TRANSLATE" // + }; + + private final int function; + + public StringFunction(Expression arg1, Expression arg2, Expression arg3, int function) { + super(arg3 == null ? new Expression[] { arg1, arg2 } : new Expression[] { arg1, arg2, arg3 }); + this.function = function; + } + + public StringFunction(Expression arg1, Expression arg2, Expression arg3, Expression arg4, int function) { + super(new Expression[] { arg1, arg2, arg3, arg4 }); + this.function = function; + } + + public StringFunction(Expression[] args, int function) { + super(args); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v1 = args[0].getValue(session), v2 = args[1].getValue(session); + switch (function) { + case LOCATE: { + if (v1 == ValueNull.INSTANCE || v2 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + Value v3 = args.length >= 3 ? args[2].getValue(session) : null; + if (v3 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + v1 = ValueInteger.get(locate(v1.getString(), v2.getString(), v3 == null ? 1 : v3.getInt())); + break; + } + case INSERT: { + Value v3 = args[2].getValue(session), v4 = args[3].getValue(session); + if (v2 != ValueNull.INSTANCE && v3 != ValueNull.INSTANCE) { + String s = insert(v1.getString(), v2.getInt(), v3.getInt(), v4.getString()); + v1 = s != null ? ValueVarchar.get(s, session) : ValueNull.INSTANCE; + } + break; + } + case REPLACE: { + if (v1 == ValueNull.INSTANCE || v2 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + String after; + if (args.length >= 3) { + Value v3 = args[2].getValue(session); + if (v3 == ValueNull.INSTANCE && session.getMode().getEnum() != ModeEnum.Oracle) { + return ValueNull.INSTANCE; + } + after = v3.getString(); + if (after == null) { + after = ""; + } + } else { + after = ""; + } + v1 = ValueVarchar.get(StringUtils.replaceAll(v1.getString(), v2.getString(), after), session); + break; + } + case LPAD: + case RPAD: + if (v1 == ValueNull.INSTANCE || v2 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + String padding; + if (args.length >= 3) { + Value v3 = args[2].getValue(session); + if (v3 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + padding = v3.getString(); + } else { + padding = null; + } + v1 = ValueVarchar.get(StringUtils.pad(v1.getString(), v2.getInt(), padding, function == RPAD), session); + break; + case TRANSLATE: { + if (v1 == ValueNull.INSTANCE || v2 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + Value v3 = args[2].getValue(session); + if (v3 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + String matching = v2.getString(); + String replacement = v3.getString(); + if (session.getMode().getEnum() == ModeEnum.DB2) { + String t = matching; + matching = replacement; + replacement = t; + } + v1 = ValueVarchar.get(translate(v1.getString(), matching, replacement), session); + break; + } + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + private static int locate(String search, String s, int start) { + if (start < 0) { + return s.lastIndexOf(search, s.length() + start) + 1; + } + return s.indexOf(search, start == 0 ? 0 : start - 1) + 1; + } + + private static String insert(String s1, int start, int length, String s2) { + if (s1 == null) { + return s2; + } + if (s2 == null) { + return s1; + } + int len1 = s1.length(); + int len2 = s2.length(); + start--; + if (start < 0 || length <= 0 || len2 == 0 || start > len1) { + return s1; + } + if (start + length > len1) { + length = len1 - start; + } + return s1.substring(0, start) + s2 + s1.substring(start + length); + } + + private static String translate(String original, String findChars, String replaceChars) { + if (StringUtils.isNullOrEmpty(original) || StringUtils.isNullOrEmpty(findChars)) { + return original; + } + // if it stays null, then no replacements have been made + StringBuilder builder = null; + // if shorter than findChars, then characters are removed + // (if null, we don't access replaceChars at all) + int replaceSize = replaceChars == null ? 0 : replaceChars.length(); + for (int i = 0, size = original.length(); i < size; i++) { + char ch = original.charAt(i); + int index = findChars.indexOf(ch); + if (index >= 0) { + if (builder == null) { + builder = new StringBuilder(size); + if (i > 0) { + builder.append(original, 0, i); + } + } + if (index < replaceSize) { + ch = replaceChars.charAt(index); + } + } + if (builder != null) { + builder.append(ch); + } + } + return builder == null ? original : builder.toString(); + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + switch (function) { + case LOCATE: + type = TypeInfo.TYPE_INTEGER; + break; + case INSERT: + case REPLACE: + case LPAD: + case RPAD: + case TRANSLATE: + type = TypeInfo.TYPE_VARCHAR; + break; + default: + throw DbException.getInternalError("function=" + function); + } + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/StringFunction1.java b/h2/src/main/org/h2/expression/function/StringFunction1.java new file mode 100644 index 0000000000..9b24996541 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/StringFunction1.java @@ -0,0 +1,283 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +import org.h2.api.ErrorCode; +import org.h2.engine.Mode; +import org.h2.engine.Mode.ModeEnum; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.util.StringUtils; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; + +/** + * A string function with one argument. + */ +public final class StringFunction1 extends Function1 { + + // Fold functions + + /** + * UPPER(). + */ + public static final int UPPER = 0; + + /** + * LOWER(). + */ + public static final int LOWER = UPPER + 1; + + // Various non-standard functions + + /** + * ASCII() (non-standard). + */ + public static final int ASCII = LOWER + 1; + + /** + * CHAR() (non-standard). + */ + public static final int CHAR = ASCII + 1; + + /** + * STRINGENCODE() (non-standard). + */ + public static final int STRINGENCODE = CHAR + 1; + + /** + * STRINGDECODE() (non-standard). + */ + public static final int STRINGDECODE = STRINGENCODE + 1; + + /** + * STRINGTOUTF8() (non-standard). + */ + public static final int STRINGTOUTF8 = STRINGDECODE + 1; + + /** + * UTF8TOSTRING() (non-standard). + */ + public static final int UTF8TOSTRING = STRINGTOUTF8 + 1; + + /** + * HEXTORAW() (non-standard). + */ + public static final int HEXTORAW = UTF8TOSTRING + 1; + + /** + * RAWTOHEX() (non-standard). + */ + public static final int RAWTOHEX = HEXTORAW + 1; + + /** + * SPACE() (non-standard). + */ + public static final int SPACE = RAWTOHEX + 1; + + /** + * QUOTE_IDENT() (non-standard). + */ + public static final int QUOTE_IDENT = SPACE + 1; + + private static final String[] NAMES = { // + "UPPER", "LOWER", "ASCII", "CHAR", "STRINGENCODE", "STRINGDECODE", "STRINGTOUTF8", "UTF8TOSTRING", + "HEXTORAW", "RAWTOHEX", "SPACE", "QUOTE_IDENT" // + }; + + private final int function; + + public StringFunction1(Expression arg, int function) { + super(arg); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v = arg.getValue(session); + if (v == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + switch (function) { + case UPPER: + // TODO this is locale specific, need to document or provide a way + // to set the locale + v = ValueVarchar.get(v.getString().toUpperCase(), session); + break; + case LOWER: + // TODO this is locale specific, need to document or provide a way + // to set the locale + v = ValueVarchar.get(v.getString().toLowerCase(), session); + break; + case ASCII: { + String s = v.getString(); + v = s.isEmpty() ? ValueNull.INSTANCE : ValueInteger.get(s.charAt(0)); + break; + } + case CHAR: + v = ValueVarchar.get(String.valueOf((char) v.getInt()), session); + break; + case STRINGENCODE: + v = ValueVarchar.get(StringUtils.javaEncode(v.getString()), session); + break; + case STRINGDECODE: + v = ValueVarchar.get(StringUtils.javaDecode(v.getString()), session); + break; + case STRINGTOUTF8: + v = ValueVarbinary.getNoCopy(v.getString().getBytes(StandardCharsets.UTF_8)); + break; + case UTF8TOSTRING: + v = ValueVarchar.get(new String(v.getBytesNoCopy(), StandardCharsets.UTF_8), session); + break; + case HEXTORAW: + v = hexToRaw(v.getString(), session); + break; + case RAWTOHEX: + v = ValueVarchar.get(rawToHex(v, session.getMode()), session); + break; + case SPACE: { + byte[] chars = new byte[Math.max(0, v.getInt())]; + Arrays.fill(chars, (byte) ' '); + v = ValueVarchar.get(new String(chars, StandardCharsets.ISO_8859_1), session); + break; + } + case QUOTE_IDENT: + v = ValueVarchar.get(StringUtils.quoteIdentifier(v.getString()), session); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return v; + } + + private static Value hexToRaw(String s, SessionLocal session) { + if (session.getMode().getEnum() == ModeEnum.Oracle) { + return ValueVarbinary.get(StringUtils.convertHexToBytes(s)); + } + int len = s.length(); + if (len % 4 != 0) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, s); + } + StringBuilder builder = new StringBuilder(len / 4); + for (int i = 0; i < len; i += 4) { + try { + builder.append((char) Integer.parseInt(s.substring(i, i + 4), 16)); + } catch (NumberFormatException e) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, s); + } + } + return ValueVarchar.get(builder.toString(), session); + } + + private static String rawToHex(Value v, Mode mode) { + if (DataType.isBinaryStringOrSpecialBinaryType(v.getValueType())) { + return StringUtils.convertBytesToHex(v.getBytesNoCopy()); + } + String s = v.getString(); + if (mode.getEnum() == ModeEnum.Oracle) { + return StringUtils.convertBytesToHex(s.getBytes(StandardCharsets.UTF_8)); + } + int length = s.length(); + StringBuilder buff = new StringBuilder(4 * length); + for (int i = 0; i < length; i++) { + String hex = Integer.toHexString(s.charAt(i) & 0xffff); + for (int j = hex.length(); j < 4; j++) { + buff.append('0'); + } + buff.append(hex); + } + return buff.toString(); + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + switch (function) { + /* + * UPPER and LOWER may return string of different length for some + * characters. + */ + case UPPER: + case LOWER: + case STRINGENCODE: + case SPACE: + case QUOTE_IDENT: + type = TypeInfo.TYPE_VARCHAR; + break; + case ASCII: + type = TypeInfo.TYPE_INTEGER; + break; + case CHAR: + type = TypeInfo.getTypeInfo(Value.VARCHAR, 1L, 0, null); + break; + case STRINGDECODE: { + TypeInfo t = arg.getType(); + type = DataType.isCharacterStringType(t.getValueType()) + ? TypeInfo.getTypeInfo(Value.VARCHAR, t.getPrecision(), 0, null) + : TypeInfo.TYPE_VARCHAR; + break; + } + case STRINGTOUTF8: + type = TypeInfo.TYPE_VARBINARY; + break; + case UTF8TOSTRING: { + TypeInfo t = arg.getType(); + type = DataType.isBinaryStringType(t.getValueType()) + ? TypeInfo.getTypeInfo(Value.VARCHAR, t.getPrecision(), 0, null) + : TypeInfo.TYPE_VARCHAR; + break; + } + case HEXTORAW: { + TypeInfo t = arg.getType(); + if (session.getMode().getEnum() == ModeEnum.Oracle) { + if (DataType.isCharacterStringType(t.getValueType())) { + type = TypeInfo.getTypeInfo(Value.VARBINARY, t.getPrecision() / 2, 0, null); + } else { + type = TypeInfo.TYPE_VARBINARY; + } + } else { + if (DataType.isCharacterStringType(t.getValueType())) { + type = TypeInfo.getTypeInfo(Value.VARCHAR, t.getPrecision() / 4, 0, null); + } else { + type = TypeInfo.TYPE_VARCHAR; + } + } + break; + } + case RAWTOHEX: { + TypeInfo t = arg.getType(); + long precision = t.getPrecision(); + int mul = DataType.isBinaryStringOrSpecialBinaryType(t.getValueType()) ? 2 + : session.getMode().getEnum() == ModeEnum.Oracle ? 6 : 4; + type = TypeInfo.getTypeInfo(Value.VARCHAR, + precision <= Long.MAX_VALUE / mul ? precision * mul : Long.MAX_VALUE, 0, null); + break; + } + default: + throw DbException.getInternalError("function=" + function); + } + if (arg.isConstant()) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/StringFunction2.java b/h2/src/main/org/h2/expression/function/StringFunction2.java new file mode 100644 index 0000000000..6b7395cb02 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/StringFunction2.java @@ -0,0 +1,108 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueVarchar; + +/** + * A string function with two arguments. + */ +public final class StringFunction2 extends Function2 { + + /** + * LEFT() (non-standard). + */ + public static final int LEFT = 0; + + /** + * RIGHT() (non-standard). + */ + public static final int RIGHT = LEFT + 1; + + /** + * REPEAT() (non-standard). + */ + public static final int REPEAT = RIGHT + 1; + + private static final String[] NAMES = { // + "LEFT", "RIGHT", "REPEAT" // + }; + + private final int function; + + public StringFunction2(Expression arg1, Expression arg2, int function) { + super(arg1, arg2); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + String s = v1.getString(); + int count = v2.getInt(); + if (count <= 0) { + return ValueVarchar.get("", session); + } + int length = s.length(); + switch (function) { + case LEFT: + if (count > length) { + count = length; + } + s = s.substring(0, count); + break; + case RIGHT: + if (count > length) { + count = length; + } + s = s.substring(length - count); + break; + case REPEAT: { + StringBuilder builder = new StringBuilder(length * count); + while (count-- > 0) { + builder.append(s); + } + s = builder.toString(); + break; + } + default: + throw DbException.getInternalError("function=" + function); + } + return ValueVarchar.get(s, session); + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + right = right.optimize(session); + switch (function) { + case LEFT: + case RIGHT: + type = TypeInfo.getTypeInfo(Value.VARCHAR, left.getType().getPrecision(), 0, null); + break; + case REPEAT: + type = TypeInfo.TYPE_VARCHAR; + break; + default: + throw DbException.getInternalError("function=" + function); + } + if (left.isConstant() && right.isConstant()) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/SubstringFunction.java b/h2/src/main/org/h2/expression/function/SubstringFunction.java new file mode 100644 index 0000000000..b93e464e54 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/SubstringFunction.java @@ -0,0 +1,126 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.util.Arrays; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; + +/** + * A SUBSTRING function. + */ +public final class SubstringFunction extends FunctionN { + + public SubstringFunction() { + super(new Expression[3]); + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + if (type.getValueType() == Value.VARBINARY) { + byte[] s = v1.getBytesNoCopy(); + int sl = s.length; + int start = v2.getInt(); + // These compatibility conditions violate the Standard + if (start == 0) { + start = 1; + } else if (start < 0) { + start = sl + start + 1; + } + int end = v3 == null ? Math.max(sl + 1, start) : start + v3.getInt(); + // SQL Standard requires "data exception - substring error" when + // end < start but H2 does not throw it for compatibility + start = Math.max(start, 1); + end = Math.min(end, sl + 1); + if (start > sl || end <= start) { + return ValueVarbinary.EMPTY; + } + start--; + end--; + if (start == 0 && end == s.length) { + return v1.convertTo(TypeInfo.TYPE_VARBINARY); + } + return ValueVarbinary.getNoCopy(Arrays.copyOfRange(s, start, end)); + } else { + String s = v1.getString(); + int sl = s.length(); + int start = v2.getInt(); + // These compatibility conditions violate the Standard + if (start == 0) { + start = 1; + } else if (start < 0) { + start = sl + start + 1; + } + int end = v3 == null ? Math.max(sl + 1, start) : start + v3.getInt(); + // SQL Standard requires "data exception - substring error" when + // end < start but H2 does not throw it for compatibility + start = Math.max(start, 1); + end = Math.min(end, sl + 1); + if (start > sl || end <= start) { + return session.getMode().treatEmptyStringsAsNull ? ValueNull.INSTANCE : ValueVarchar.EMPTY; + } + return ValueVarchar.get(s.substring(start - 1, end - 1), null); + } + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + int len = args.length; + if (len < 2 || len > 3) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, getName(), "2..3"); + } + TypeInfo argType = args[0].getType(); + long p = argType.getPrecision(); + Expression arg = args[1]; + Value v; + if (arg.isConstant() && (v = arg.getValue(session)) != ValueNull.INSTANCE) { + // if only two arguments are used, + // subtract offset from first argument length + p -= v.getLong() - 1; + } + if (args.length == 3) { + arg = args[2]; + if (arg.isConstant() && (v = arg.getValue(session)) != ValueNull.INSTANCE) { + // if the third argument is constant it is at most this value + p = Math.min(p, v.getLong()); + } + } + p = Math.max(0, p); + type = TypeInfo.getTypeInfo( + DataType.isBinaryStringType(argType.getValueType()) ? Value.VARBINARY : Value.VARCHAR, p, 0, null); + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + args[0].getUnenclosedSQL(builder.append(getName()).append('('), sqlFlags); + args[1].getUnenclosedSQL(builder.append(" FROM "), sqlFlags); + if (args.length > 2) { + args[2].getUnenclosedSQL(builder.append(" FOR "), sqlFlags); + } + return builder.append(')'); + } + + @Override + public String getName() { + return "SUBSTRING"; + } + +} diff --git a/h2/src/main/org/h2/expression/function/SysInfoFunction.java b/h2/src/main/org/h2/expression/function/SysInfoFunction.java new file mode 100644 index 0000000000..dd02010060 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/SysInfoFunction.java @@ -0,0 +1,176 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.Constants; +import org.h2.engine.SessionLocal; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Operation0; +import org.h2.message.DbException; +import org.h2.util.Utils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * Database or session information function. + */ +public final class SysInfoFunction extends Operation0 implements NamedExpression { + + /** + * AUTOCOMMIT(). + */ + public static final int AUTOCOMMIT = 0; + + /** + * DATABASE_PATH(). + */ + public static final int DATABASE_PATH = AUTOCOMMIT + 1; + + /** + * H2VERSION(). + */ + public static final int H2VERSION = DATABASE_PATH + 1; + + /** + * LOCK_MODE(). + */ + public static final int LOCK_MODE = H2VERSION + 1; + + /** + * LOCK_TIMEOUT(). + */ + public static final int LOCK_TIMEOUT = LOCK_MODE + 1; + + /** + * MEMORY_FREE(). + */ + public static final int MEMORY_FREE = LOCK_TIMEOUT + 1; + + /** + * MEMORY_USED(). + */ + public static final int MEMORY_USED = MEMORY_FREE + 1; + + /** + * READONLY(). + */ + public static final int READONLY = MEMORY_USED + 1; + + /** + * SESSION_ID(). + */ + public static final int SESSION_ID = READONLY + 1; + + /** + * TRANSACTION_ID(). + */ + public static final int TRANSACTION_ID = SESSION_ID + 1; + + private static final int[] TYPES = { Value.BOOLEAN, Value.VARCHAR, Value.VARCHAR, Value.INTEGER, Value.INTEGER, + Value.BIGINT, Value.BIGINT, Value.BOOLEAN, Value.INTEGER, Value.VARCHAR }; + + private static final String[] NAMES = { "AUTOCOMMIT", "DATABASE_PATH", "H2VERSION", "LOCK_MODE", "LOCK_TIMEOUT", + "MEMORY_FREE", "MEMORY_USED", "READONLY", "SESSION_ID", "TRANSACTION_ID" }; + + /** + * Get the name for this function id. + * + * @param function + * the function id + * @return the name + */ + public static String getName(int function) { + return NAMES[function]; + } + + private final int function; + + private final TypeInfo type; + + public SysInfoFunction(int function) { + this.function = function; + type = TypeInfo.getTypeInfo(TYPES[function]); + } + + @Override + public Value getValue(SessionLocal session) { + Value result; + switch (function) { + case AUTOCOMMIT: + result = ValueBoolean.get(session.getAutoCommit()); + break; + case DATABASE_PATH: { + String path = session.getDatabase().getDatabasePath(); + result = path != null ? ValueVarchar.get(path, session) : ValueNull.INSTANCE; + break; + } + case H2VERSION: + result = ValueVarchar.get(Constants.VERSION, session); + break; + case LOCK_MODE: + result = ValueInteger.get(session.getDatabase().getLockMode()); + break; + case LOCK_TIMEOUT: + result = ValueInteger.get(session.getLockTimeout()); + break; + case MEMORY_FREE: + session.getUser().checkAdmin(); + result = ValueBigint.get(Utils.getMemoryFree()); + break; + case MEMORY_USED: + session.getUser().checkAdmin(); + result = ValueBigint.get(Utils.getMemoryUsed()); + break; + case READONLY: + result = ValueBoolean.get(session.getDatabase().isReadOnly()); + break; + case SESSION_ID: + result = ValueInteger.get(session.getId()); + break; + case TRANSACTION_ID: + result = session.getTransactionId(); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return result; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return builder.append(getName()).append("()"); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + return false; + } + return true; + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public int getCost() { + return 1; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/TableInfoFunction.java b/h2/src/main/org/h2/expression/function/TableInfoFunction.java new file mode 100644 index 0000000000..c447033f88 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/TableInfoFunction.java @@ -0,0 +1,111 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.util.ArrayList; + +import org.h2.command.Parser; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.index.Index; +import org.h2.message.DbException; +import org.h2.mvstore.db.MVSpatialIndex; +import org.h2.table.Column; +import org.h2.table.Table; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueNull; + +/** + * A table information function. + */ +public final class TableInfoFunction extends Function1_2 { + + /** + * DISK_SPACE_USED() (non-standard). + */ + public static final int DISK_SPACE_USED = 0; + + /** + * ESTIMATED_ENVELOPE(). + */ + public static final int ESTIMATED_ENVELOPE = DISK_SPACE_USED + 1; + + private static final String[] NAMES = { // + "DISK_SPACE_USED", "ESTIMATED_ENVELOPE" // + }; + + private final int function; + + public TableInfoFunction(Expression arg1, Expression arg2, int function) { + super(arg1, arg2); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + Table table = new Parser(session).parseTableName(v1.getString()); + l: switch (function) { + case DISK_SPACE_USED: + v1 = ValueBigint.get(table.getDiskSpaceUsed()); + break; + case ESTIMATED_ENVELOPE: { + Column column = table.getColumn(v2.getString()); + ArrayList indexes = table.getIndexes(); + if (indexes != null) { + for (int i = 1, size = indexes.size(); i < size; i++) { + Index index = indexes.get(i); + if (index instanceof MVSpatialIndex && index.isFirstColumn(column)) { + v1 = ((MVSpatialIndex) index).getEstimatedBounds(session); + break l; + } + } + } + v1 = ValueNull.INSTANCE; + break; + } + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + switch (function) { + case DISK_SPACE_USED: + type = TypeInfo.TYPE_BIGINT; + break; + case ESTIMATED_ENVELOPE: + type = TypeInfo.TYPE_GEOMETRY; + break; + default: + throw DbException.getInternalError("function=" + function); + } + return this; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + return false; + } + return super.isEverything(visitor); + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/ToChar.java b/h2/src/main/org/h2/expression/function/ToCharFunction.java similarity index 84% rename from h2/src/main/org/h2/expression/function/ToChar.java rename to h2/src/main/org/h2/expression/function/ToCharFunction.java index dc5e18f58b..9eb178060c 100644 --- a/h2/src/main/org/h2/expression/function/ToChar.java +++ b/h2/src/main/org/h2/expression/function/ToCharFunction.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Daniel Gredler */ package org.h2.expression.function; @@ -14,24 +14,31 @@ import java.util.Arrays; import java.util.Currency; import java.util.Locale; -import java.util.TimeZone; import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; import org.h2.message.DbException; import org.h2.util.DateTimeUtils; import org.h2.util.StringUtils; +import org.h2.util.TimeZoneProvider; +import org.h2.value.TypeInfo; import org.h2.value.Value; +import org.h2.value.ValueTimeTimeZone; +import org.h2.value.ValueTimestamp; import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueVarchar; /** * Emulates Oracle's TO_CHAR function. */ -public class ToChar { +public final class ToCharFunction extends FunctionN { /** * The beginning of the Julian calendar. */ - static final int JULIAN_EPOCH = -2_440_588; + public static final int JULIAN_EPOCH = -2_440_588; private static final int[] ROMAN_VALUES = { 1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1 }; @@ -42,22 +49,22 @@ public class ToChar { /** * The month field. */ - static final int MONTHS = 0; + public static final int MONTHS = 0; /** * The month field (short form). */ - static final int SHORT_MONTHS = 1; + public static final int SHORT_MONTHS = 1; /** * The weekday field. */ - static final int WEEKDAYS = 2; + public static final int WEEKDAYS = 2; /** * The weekday field (short form). */ - static final int SHORT_WEEKDAYS = 3; + public static final int SHORT_WEEKDAYS = 3; /** * The AM / PM field. @@ -66,17 +73,14 @@ public class ToChar { private static volatile String[][] NAMES; - private ToChar() { - // utility class - } - /** * Emulates Oracle's TO_CHAR(number) function. * - *

          - * - * - * + *
          InputOutputClosest {@link DecimalFormat} Equivalent
          + * + * + * + * * * * @@ -233,8 +237,11 @@ public static String toChar(BigDecimal number, String format, // go ahead and do that first int separator = findDecimalSeparator(format); int formatScale = calculateScale(format, separator); - if (formatScale < number.scale()) { + int numberScale = number.scale(); + if (formatScale < numberScale) { number = number.setScale(formatScale, RoundingMode.HALF_UP); + } else if (numberScale < 0) { + number = number.setScale(0); } // any 9s to the left of the decimal separator but to the right of a @@ -276,15 +283,15 @@ public static String toChar(BigDecimal number, String format, output.insert(0, localGrouping); } } else if (c == 'C' || c == 'c') { - Currency currency = Currency.getInstance(Locale.getDefault()); + Currency currency = getCurrency(); output.insert(0, currency.getCurrencyCode()); maxLength += 6; } else if (c == 'L' || c == 'l' || c == 'U' || c == 'u') { - Currency currency = Currency.getInstance(Locale.getDefault()); + Currency currency = getCurrency(); output.insert(0, currency.getSymbol()); maxLength += 9; } else if (c == '$') { - Currency currency = Currency.getInstance(Locale.getDefault()); + Currency currency = getCurrency(); String cs = currency.getSymbol(); output.insert(0, cs); } else { @@ -357,6 +364,11 @@ public static String toChar(BigDecimal number, String format, return output.toString(); } + private static Currency getCurrency() { + Locale locale = Locale.getDefault(); + return Currency.getInstance(locale.getCountry().length() == 2 ? locale : Locale.US); + } + private static String zeroesAfterDecimalSeparator(BigDecimal number) { final String numberStr = number.toPlainString(); final int idx = numberStr.indexOf('.'); @@ -487,7 +499,7 @@ private static String toHex(BigDecimal number, String format) { * @param names the field * @return the names */ - static String[] getDateNames(int names) { + public static String[] getDateNames(int names) { String[][] result = NAMES; if (result == null) { result = new String[5][]; @@ -509,9 +521,18 @@ static String[] getDateNames(int names) { return result[names]; } + /** + * Used for testing. + */ + public static void clearNames() { + NAMES = null; + } + /** * Returns time zone display name or ID for the specified date-time value. * + * @param session + * the session * @param value * value * @param tzd @@ -520,25 +541,31 @@ static String[] getDateNames(int names) { * region) * @return time zone display name or ID */ - private static String getTimeZone(Value value, boolean tzd) { - if (!(value instanceof ValueTimestampTimeZone)) { - TimeZone tz = TimeZone.getDefault(); + private static String getTimeZone(SessionLocal session, Value value, boolean tzd) { + if (value instanceof ValueTimestampTimeZone) { + return DateTimeUtils.timeZoneNameFromOffsetSeconds(((ValueTimestampTimeZone) value) + .getTimeZoneOffsetSeconds()); + } else if (value instanceof ValueTimeTimeZone) { + return DateTimeUtils.timeZoneNameFromOffsetSeconds(((ValueTimeTimeZone) value) + .getTimeZoneOffsetSeconds()); + } else { + TimeZoneProvider tz = session.currentTimeZone(); if (tzd) { - boolean daylight = tz.inDaylightTime(value.getTimestamp()); - return tz.getDisplayName(daylight, TimeZone.SHORT); + ValueTimestamp v = (ValueTimestamp) value.convertTo(TypeInfo.TYPE_TIMESTAMP, session); + return tz.getShortId(tz.getEpochSecondsFromLocal(v.getDateValue(), v.getTimeNanos())); } - return tz.getID(); + return tz.getId(); } - return DateTimeUtils.timeZoneNameFromOffsetMins(((ValueTimestampTimeZone) value).getTimeZoneOffsetMins()); } /** * Emulates Oracle's TO_CHAR(datetime) function. * - *

          TO_CHAR(number) function
          InputOutputClosest {@link DecimalFormat} Equivalent
          ,Grouping separator.,
          - * - * - * + *
          InputOutputClosest {@link SimpleDateFormat} Equivalent
          + * + * + * + * * * * @@ -664,22 +691,25 @@ private static String getTimeZone(Value value, boolean tzd) { * See also TO_CHAR(datetime) and datetime format models * in the Oracle documentation. * + * @param session the session * @param value the date-time value to format * @param format the format pattern to use (if any) * @param nlsParam the NLS parameter (if any) + * * @return the formatted timestamp */ - public static String toCharDateTime(Value value, String format, @SuppressWarnings("unused") String nlsParam) { - long[] a = DateTimeUtils.dateAndTimeFromValue(value); + public static String toCharDateTime(SessionLocal session, Value value, String format, + @SuppressWarnings("unused") String nlsParam) { + long[] a = DateTimeUtils.dateAndTimeFromValue(value, session); long dateValue = a[0]; long timeNanos = a[1]; int year = DateTimeUtils.yearFromDateValue(dateValue); int monthOfYear = DateTimeUtils.monthFromDateValue(dateValue); int dayOfMonth = DateTimeUtils.dayFromDateValue(dateValue); int posYear = Math.abs(year); - long second = timeNanos / 1_000_000_000; + int second = (int) (timeNanos / 1_000_000_000); int nanos = (int) (timeNanos - second * 1_000_000_000); - int minute = (int) (second / 60); + int minute = second / 60; second -= minute * 60; int hour = minute / 60; minute -= hour * 60; @@ -727,19 +757,14 @@ public static String toCharDateTime(Value value, String format, @SuppressWarning StringUtils.appendZeroPadded(output, 4, posYear); i += 2; } else if (containsAt(format, i, "DS") != null) { - StringUtils.appendZeroPadded(output, 2, monthOfYear); - output.append('/'); - StringUtils.appendZeroPadded(output, 2, dayOfMonth); - output.append('/'); + StringUtils.appendTwoDigits(output, monthOfYear).append('/'); + StringUtils.appendTwoDigits(output, dayOfMonth).append('/'); StringUtils.appendZeroPadded(output, 4, posYear); i += 2; } else if (containsAt(format, i, "TS") != null) { output.append(h12).append(':'); - StringUtils.appendZeroPadded(output, 2, minute); - output.append(':'); - StringUtils.appendZeroPadded(output, 2, second); - output.append(' '); - output.append(getDateNames(AM_PM)[isAM ? 0 : 1]); + StringUtils.appendTwoDigits(output, minute).append(':'); + StringUtils.appendTwoDigits(output, second).append(' ').append(getDateNames(AM_PM)[isAM ? 0 : 1]); i += 2; // Day @@ -748,7 +773,7 @@ public static String toCharDateTime(Value value, String format, @SuppressWarning output.append(DateTimeUtils.getDayOfYear(dateValue)); i += 3; } else if (containsAt(format, i, "DD") != null) { - StringUtils.appendZeroPadded(output, 2, dayOfMonth); + StringUtils.appendTwoDigits(output, dayOfMonth); i += 2; } else if ((cap = containsAt(format, i, "DY")) != null) { String day = getDateNames(SHORT_WEEKDAYS)[DateTimeUtils.getSundayDayOfWeek(dateValue)]; @@ -771,19 +796,19 @@ public static String toCharDateTime(Value value, String format, @SuppressWarning // Hours } else if (containsAt(format, i, "HH24") != null) { - StringUtils.appendZeroPadded(output, 2, hour); + StringUtils.appendTwoDigits(output, hour); i += 4; } else if (containsAt(format, i, "HH12") != null) { - StringUtils.appendZeroPadded(output, 2, h12); + StringUtils.appendTwoDigits(output, h12); i += 4; } else if (containsAt(format, i, "HH") != null) { - StringUtils.appendZeroPadded(output, 2, h12); + StringUtils.appendTwoDigits(output, h12); i += 2; // Minutes } else if (containsAt(format, i, "MI") != null) { - StringUtils.appendZeroPadded(output, 2, minute); + StringUtils.appendTwoDigits(output, minute); i += 2; // Seconds @@ -793,7 +818,7 @@ public static String toCharDateTime(Value value, String format, @SuppressWarning output.append(seconds); i += 5; } else if (containsAt(format, i, "SS") != null) { - StringUtils.appendZeroPadded(output, 2, second); + StringUtils.appendTwoDigits(output, second); i += 2; // Fractional seconds @@ -811,20 +836,31 @@ public static String toCharDateTime(Value value, String format, @SuppressWarning // Time zone } else if (containsAt(format, i, "TZR") != null) { - output.append(getTimeZone(value, false)); + output.append(getTimeZone(session, value, false)); i += 3; } else if (containsAt(format, i, "TZD") != null) { - output.append(getTimeZone(value, true)); + output.append(getTimeZone(session, value, true)); + i += 3; + } else if (containsAt(format, i, "TZH") != null) { + int hours = DateTimeFunction.extractDateTime(session, value, DateTimeFunction.TIMEZONE_HOUR); + output.append( hours < 0 ? '-' : '+'); + StringUtils.appendTwoDigits(output, Math.abs(hours)); i += 3; - // Week + } else if (containsAt(format, i, "TZM") != null) { + StringUtils.appendTwoDigits(output, + Math.abs(DateTimeFunction.extractDateTime(session, value, DateTimeFunction.TIMEZONE_MINUTE))); + i += 3; - } else if (containsAt(format, i, "IW", "WW") != null) { - output.append(DateTimeUtils.getWeekOfYear(dateValue, 0, 1)); + // Week + } else if (containsAt(format, i, "WW") != null) { + StringUtils.appendTwoDigits(output, (DateTimeUtils.getDayOfYear(dateValue) - 1) / 7 + 1); + i += 2; + } else if (containsAt(format, i, "IW") != null) { + StringUtils.appendTwoDigits(output, DateTimeUtils.getIsoWeekOfYear(dateValue)); i += 2; } else if (containsAt(format, i, "W") != null) { - int w = 1 + dayOfMonth / 7; - output.append(w); + output.append((dayOfMonth - 1) / 7 + 1); i += 1; // Year @@ -852,10 +888,10 @@ public static String toCharDateTime(Value value, String format, @SuppressWarning StringUtils.appendZeroPadded(output, 3, Math.abs(DateTimeUtils.getIsoWeekYear(dateValue)) % 1000); i += 3; } else if (containsAt(format, i, "YY", "RR") != null) { - StringUtils.appendZeroPadded(output, 2, posYear % 100); + StringUtils.appendTwoDigits(output, posYear % 100); i += 2; } else if (containsAt(format, i, "IY") != null) { - StringUtils.appendZeroPadded(output, 2, Math.abs(DateTimeUtils.getIsoWeekYear(dateValue)) % 100); + StringUtils.appendTwoDigits(output, Math.abs(DateTimeUtils.getIsoWeekYear(dateValue)) % 100); i += 2; } else if (containsAt(format, i, "Y") != null) { output.append(posYear % 10); @@ -878,7 +914,7 @@ public static String toCharDateTime(Value value, String format, @SuppressWarning output.append(cap.apply(month)); i += 3; } else if (containsAt(format, i, "MM") != null) { - StringUtils.appendZeroPadded(output, 2, monthOfYear); + StringUtils.appendTwoDigits(output, monthOfYear); i += 2; } else if ((cap = containsAt(format, i, "RM")) != null) { output.append(cap.apply(toRomanNumeral(monthOfYear))); @@ -1042,4 +1078,50 @@ public String apply(String s) { } } } + + public ToCharFunction(Expression arg1, Expression arg2, Expression arg3) { + super(arg2 == null ? new Expression[] { arg1 } + : arg3 == null ? new Expression[] { arg1, arg2 } : new Expression[] { arg1, arg2, arg3 }); + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + switch (v1.getValueType()) { + case Value.TIME: + case Value.DATE: + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + v1 = ValueVarchar.get(toCharDateTime(session, v1, v2 == null ? null : v2.getString(), + v3 == null ? null : v3.getString()), session); + break; + case Value.SMALLINT: + case Value.INTEGER: + case Value.BIGINT: + case Value.NUMERIC: + case Value.DOUBLE: + case Value.REAL: + v1 = ValueVarchar.get(toChar(v1.getBigDecimal(), v2 == null ? null : v2.getString(), + v3 == null ? null : v3.getString()), session); + break; + default: + v1 = ValueVarchar.get(v1.getString(), session); + } + return v1; + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + type = TypeInfo.TYPE_VARCHAR; + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return "TO_CHAR"; + } + } diff --git a/h2/src/main/org/h2/expression/function/TrimFunction.java b/h2/src/main/org/h2/expression/function/TrimFunction.java new file mode 100644 index 0000000000..21f56a6d31 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/TrimFunction.java @@ -0,0 +1,86 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueVarchar; + +/** + * A TRIM function. + */ +public final class TrimFunction extends Function1_2 { + + /** + * The LEADING flag. + */ + public static final int LEADING = 1; + + /** + * The TRAILING flag. + */ + public static final int TRAILING = 2; + + private int flags; + + public TrimFunction(Expression from, Expression space, int flags) { + super(from, space); + this.flags = flags; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + return ValueVarchar.get(StringUtils.trim(v1.getString(), (flags & LEADING) != 0, (flags & TRAILING) != 0, + v2 != null ? v2.getString() : " "), session); + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + type = TypeInfo.getTypeInfo(Value.VARCHAR, left.getType().getPrecision(), 0, null); + if (left.isConstant() && (right == null || right.isConstant())) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + builder.append(getName()).append('('); + boolean needFrom = false; + switch (flags) { + case LEADING: + builder.append("LEADING "); + needFrom = true; + break; + case TRAILING: + builder.append("TRAILING "); + needFrom = true; + break; + } + if (right != null) { + right.getUnenclosedSQL(builder, sqlFlags); + needFrom = true; + } + if (needFrom) { + builder.append(" FROM "); + } + return left.getUnenclosedSQL(builder, sqlFlags).append(')'); + } + + @Override + public String getName() { + return "TRIM"; + } + +} diff --git a/h2/src/main/org/h2/expression/function/TruncateValueFunction.java b/h2/src/main/org/h2/expression/function/TruncateValueFunction.java new file mode 100644 index 0000000000..4bbedf930d --- /dev/null +++ b/h2/src/main/org/h2/expression/function/TruncateValueFunction.java @@ -0,0 +1,105 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.math.BigDecimal; +import java.math.MathContext; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.util.MathUtils; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueDecfloat; +import org.h2.value.ValueNumeric; + +/** + * A TRUNCATE_VALUE function. + */ +public final class TruncateValueFunction extends FunctionN { + + public TruncateValueFunction(Expression arg1, Expression arg2, Expression arg3) { + super(new Expression[] { arg1, arg2, arg3 }); + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + long precision = v2.getLong(); + boolean force = v3.getBoolean(); + if (precision <= 0) { + throw DbException.get(ErrorCode.INVALID_VALUE_PRECISION, Long.toString(precision), "1", + "" + Integer.MAX_VALUE); + } + TypeInfo t = v1.getType(); + int valueType = t.getValueType(); + if (DataType.getDataType(valueType).supportsPrecision) { + if (precision < t.getPrecision()) { + switch (valueType) { + case Value.NUMERIC: { + BigDecimal bd = v1.getBigDecimal().round(new MathContext(MathUtils.convertLongToInt(precision))); + if (bd.scale() < 0) { + bd = bd.setScale(0); + } + return ValueNumeric.get(bd); + } + case Value.DECFLOAT: + return ValueDecfloat + .get(v1.getBigDecimal().round(new MathContext(MathUtils.convertLongToInt(precision)))); + default: + return v1.castTo(TypeInfo.getTypeInfo(valueType, precision, t.getScale(), t.getExtTypeInfo()), + session); + } + } + } else if (force) { + BigDecimal bd; + switch (valueType) { + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + bd = BigDecimal.valueOf(v1.getInt()); + break; + case Value.BIGINT: + bd = BigDecimal.valueOf(v1.getLong()); + break; + case Value.REAL: + case Value.DOUBLE: + bd = v1.getBigDecimal(); + break; + default: + return v1; + } + bd = bd.round(new MathContext(MathUtils.convertLongToInt(precision))); + if (valueType == Value.DECFLOAT) { + return ValueDecfloat.get(bd); + } + if (bd.scale() < 0) { + bd = bd.setScale(0); + } + return ValueNumeric.get(bd).convertTo(valueType); + } + return v1; + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + type = args[0].getType(); + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return "TRUNCATE_VALUE"; + } + +} diff --git a/h2/src/main/org/h2/expression/function/XMLFunction.java b/h2/src/main/org/h2/expression/function/XMLFunction.java new file mode 100644 index 0000000000..fb4491b40f --- /dev/null +++ b/h2/src/main/org/h2/expression/function/XMLFunction.java @@ -0,0 +1,161 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * An XML function. + */ +public final class XMLFunction extends FunctionN { + + /** + * XMLATTR() (non-standard). + */ + public static final int XMLATTR = 0; + + /** + * XMLCDATA() (non-standard). + */ + public static final int XMLCDATA = XMLATTR + 1; + + /** + * XMLCOMMENT() (non-standard). + */ + public static final int XMLCOMMENT = XMLCDATA + 1; + + /** + * XMLNODE() (non-standard). + */ + public static final int XMLNODE = XMLCOMMENT + 1; + + /** + * XMLSTARTDOC() (non-standard). + */ + public static final int XMLSTARTDOC = XMLNODE + 1; + + /** + * XMLTEXT() (non-standard). + */ + public static final int XMLTEXT = XMLSTARTDOC + 1; + + private static final String[] NAMES = { // + "XMLATTR", "XMLCDATA", "XMLCOMMENT", "XMLNODE", "XMLSTARTDOC", "XMLTEXT" // + }; + + private final int function; + + public XMLFunction(int function) { + super(new Expression[4]); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + switch (function) { + case XMLNODE: + return xmlNode(session); + case XMLSTARTDOC: + return ValueVarchar.get(StringUtils.xmlStartDoc(), session); + default: + return super.getValue(session); + } + } + + private Value xmlNode(SessionLocal session) { + Value v1 = args[0].getValue(session); + if (v1 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + int length = args.length; + String attr = length >= 2 ? args[1].getValue(session).getString() : null; + String content = length >= 3 ? args[2].getValue(session).getString() : null; + boolean indent; + if (length >= 4) { + Value v4 = args[3].getValue(session); + if (v4 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + indent = v4.getBoolean(); + } else { + indent = true; + } + return ValueVarchar.get(StringUtils.xmlNode(v1.getString(), attr, content, indent), session); + } + + @Override + protected Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + switch (function) { + case XMLATTR: + v1 = ValueVarchar.get(StringUtils.xmlAttr(v1.getString(), v2.getString()), session); + break; + case XMLCDATA: + v1 = ValueVarchar.get(StringUtils.xmlCData(v1.getString()), session); + break; + case XMLCOMMENT: + v1 = ValueVarchar.get(StringUtils.xmlComment(v1.getString()), session); + break; + case XMLTEXT: + v1 = ValueVarchar.get(StringUtils.xmlText(v1.getString(), v2 != null && v2.getBoolean()), session); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + int min, max; + switch (function) { + case XMLATTR: + max = min = 2; + break; + case XMLNODE: + min = 1; + max = 4; + break; + case XMLCDATA: + case XMLCOMMENT: + max = min = 1; + break; + case XMLSTARTDOC: + max = min = 0; + break; + case XMLTEXT: + min = 1; + max = 2; + break; + default: + throw DbException.getInternalError("function=" + function); + } + int len = args.length; + if (len < min || len > max) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, getName(), min + ".." + max); + } + type = TypeInfo.TYPE_VARCHAR; + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/package.html b/h2/src/main/org/h2/expression/function/package.html index fbc7aec26d..934f342526 100644 --- a/h2/src/main/org/h2/expression/function/package.html +++ b/h2/src/main/org/h2/expression/function/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/expression/function/TableFunction.java b/h2/src/main/org/h2/expression/function/table/ArrayTableFunction.java similarity index 54% rename from h2/src/main/org/h2/expression/function/TableFunction.java rename to h2/src/main/org/h2/expression/function/table/ArrayTableFunction.java index 5ceac804b9..eb5b5c7fa4 100644 --- a/h2/src/main/org/h2/expression/function/TableFunction.java +++ b/h2/src/main/org/h2/expression/function/table/ArrayTableFunction.java @@ -1,74 +1,95 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ -package org.h2.expression.function; +package org.h2.expression.function.table; import java.util.ArrayList; import org.h2.api.ErrorCode; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.message.DbException; import org.h2.result.LocalResult; +import org.h2.result.ResultInterface; import org.h2.table.Column; import org.h2.value.Value; import org.h2.value.ValueCollectionBase; -import org.h2.value.ValueInt; +import org.h2.value.ValueInteger; import org.h2.value.ValueNull; -import org.h2.value.ValueResultSet; /** - * Implementation of the functions TABLE(..), TABLE_DISTINCT(..), and - * UNNEST(..). + * A table value function. */ -public class TableFunction extends Function { - private final long rowCount; +public final class ArrayTableFunction extends TableFunction { + + /** + * UNNEST(). + */ + public static final int UNNEST = 0; + + /** + * TABLE() (non-standard). + */ + public static final int TABLE = UNNEST + 1; + + /** + * TABLE_DISTINCT() (non-standard). + */ + public static final int TABLE_DISTINCT = TABLE + 1; + private Column[] columns; - TableFunction(Database database, FunctionInfo info, long rowCount) { - super(database, info); - this.rowCount = rowCount; + private static final String[] NAMES = { // + "UNNEST", "TABLE", "TABLE_DISTINCT" // + }; + + private final int function; + + public ArrayTableFunction(int function) { + super(new Expression[1]); + this.function = function; } @Override - public Value getValue(Session session) { + public ResultInterface getValue(SessionLocal session) { return getTable(session, false); } @Override - protected void checkParameterCount(int len) { - if (len < 1) { + public void optimize(SessionLocal session) { + super.optimize(session); + if (args.length < 1) { throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, getName(), ">0"); } } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - if (info.type == UNNEST) { - super.getSQL(builder, alwaysQuote); + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + if (function == UNNEST) { + super.getSQL(builder, sqlFlags); if (args.length < columns.length) { builder.append(" WITH ORDINALITY"); } - return builder; - } - builder.append(getName()).append('('); - for (int i = 0; i < args.length; i++) { - if (i > 0) { - builder.append(", "); + } else { + builder.append(getName()).append('('); + for (int i = 0; i < args.length; i++) { + if (i > 0) { + builder.append(", "); + } + builder.append(columns[i].getCreateSQL()).append('='); + args[i].getUnenclosedSQL(builder, sqlFlags); } - builder.append(columns[i].getCreateSQL()).append('='); - args[i].getSQL(builder, alwaysQuote); + builder.append(')'); } - return builder.append(')'); + return builder; } @Override - public ValueResultSet getValueForColumnList(Session session, - Expression[] nullArgs) { + public ResultInterface getValueTemplate(SessionLocal session) { return getTable(session, true); } @@ -76,7 +97,7 @@ public void setColumns(ArrayList columns) { this.columns = columns.toArray(new Column[0]); } - private ValueResultSet getTable(Session session, boolean onlyColumnList) { + private ResultInterface getTable(SessionLocal session, boolean onlyColumnList) { int totalColumns = columns.length; Expression[] header = new Expression[totalColumns]; Database db = session.getDatabase(); @@ -85,13 +106,13 @@ private ValueResultSet getTable(Session session, boolean onlyColumnList) { ExpressionColumn col = new ExpressionColumn(db, c); header[i] = col; } - LocalResult result = db.getResultFactory().create(session, header, totalColumns); - if (!onlyColumnList && info.type == TABLE_DISTINCT) { + LocalResult result = new LocalResult(session, header, totalColumns, totalColumns); + if (!onlyColumnList && function == TABLE_DISTINCT) { result.setDistinct(); } if (!onlyColumnList) { int len = totalColumns; - boolean unnest = info.type == UNNEST, addNumber = false; + boolean unnest = function == UNNEST, addNumber = false; if (unnest) { len = args.length; if (len < totalColumns) { @@ -103,11 +124,11 @@ private ValueResultSet getTable(Session session, boolean onlyColumnList) { for (int i = 0; i < len; i++) { Value v = args[i].getValue(session); if (v == ValueNull.INSTANCE) { - list[i] = new Value[0]; + list[i] = Value.EMPTY_VALUES; } else { int type = v.getValueType(); if (type != Value.ARRAY && type != Value.ROW) { - v = v.convertTo(Value.ARRAY); + v = v.convertToAnyArray(session); } Value[] l = ((ValueCollectionBase) v).getList(); list[i] = l; @@ -125,39 +146,33 @@ private ValueResultSet getTable(Session session, boolean onlyColumnList) { Column c = columns[j]; v = l[row]; if (!unnest) { - v = c.convert(v).convertPrecision(c.getType().getPrecision(), false) - .convertScale(true, c.getType().getScale()); + v = v.convertForAssignTo(c.getType(), session, c); } } r[j] = v; } if (addNumber) { - r[len] = ValueInt.get(row + 1); + r[len] = ValueInteger.get(row + 1); } result.addRow(r); } } result.done(); - return ValueResultSet.get(result, Integer.MAX_VALUE); - } - - public long getRowCount() { - return rowCount; + return result; } @Override - public Expression[] getExpressionColumns(Session session) { - return getExpressionColumns(session, getValueForColumnList(session, null).getResult()); + public String getName() { + return NAMES[function]; } @Override - public boolean isConstant() { - for (Expression e : args) { - if (!e.isConstant()) { - return false; - } - } + public boolean isDeterministic() { return true; } + public int getFunctionType() { + return function; + } + } diff --git a/h2/src/main/org/h2/expression/function/table/CSVReadFunction.java b/h2/src/main/org/h2/expression/function/table/CSVReadFunction.java new file mode 100644 index 0000000000..f03ad1c8b2 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/table/CSVReadFunction.java @@ -0,0 +1,119 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function.table; + +import java.sql.ResultSet; +import java.sql.SQLException; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.function.CSVWriteFunction; +import org.h2.message.DbException; +import org.h2.result.ResultInterface; +import org.h2.schema.FunctionAlias.JavaMethod; +import org.h2.tools.Csv; +import org.h2.util.StringUtils; + +/** + * A CSVREAD function. + */ +public final class CSVReadFunction extends TableFunction { + + public CSVReadFunction() { + super(new Expression[4]); + } + + @Override + public ResultInterface getValue(SessionLocal session) { + session.getUser().checkAdmin(); + String fileName = getValue(session, 0); + String columnList = getValue(session, 1); + Csv csv = new Csv(); + String options = getValue(session, 2); + String charset = null; + if (options != null && options.indexOf('=') >= 0) { + charset = csv.setOptions(options); + } else { + charset = options; + String fieldSeparatorRead = getValue(session, 3); + String fieldDelimiter = getValue(session, 4); + String escapeCharacter = getValue(session, 5); + String nullString = getValue(session, 6); + CSVWriteFunction.setCsvDelimiterEscape(csv, fieldSeparatorRead, fieldDelimiter, escapeCharacter); + csv.setNullString(nullString); + } + char fieldSeparator = csv.getFieldSeparatorRead(); + String[] columns = StringUtils.arraySplit(columnList, fieldSeparator, true); + try { + // TODO create result directly + return JavaMethod.resultSetToResult(session, csv.read(fileName, columns, charset), Integer.MAX_VALUE); + } catch (SQLException e) { + throw DbException.convert(e); + } + } + + private String getValue(SessionLocal session, int index) { + return getValue(session, args, index); + } + + @Override + public void optimize(SessionLocal session) { + super.optimize(session); + int len = args.length; + if (len < 1 || len > 7) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, getName(), "1..7"); + } + } + + @Override + public ResultInterface getValueTemplate(SessionLocal session) { + session.getUser().checkAdmin(); + String fileName = getValue(session, args, 0); + if (fileName == null) { + throw DbException.get(ErrorCode.PARAMETER_NOT_SET_1, "fileName"); + } + String columnList = getValue(session, args, 1); + Csv csv = new Csv(); + String options = getValue(session, args, 2); + String charset = null; + if (options != null && options.indexOf('=') >= 0) { + charset = csv.setOptions(options); + } else { + charset = options; + String fieldSeparatorRead = getValue(session, args, 3); + String fieldDelimiter = getValue(session, args, 4); + String escapeCharacter = getValue(session, args, 5); + CSVWriteFunction.setCsvDelimiterEscape(csv, fieldSeparatorRead, fieldDelimiter, escapeCharacter); + } + char fieldSeparator = csv.getFieldSeparatorRead(); + String[] columns = StringUtils.arraySplit(columnList, fieldSeparator, true); + ResultInterface result; + try (ResultSet rs = csv.read(fileName, columns, charset)) { + result = JavaMethod.resultSetToResult(session, rs, 0); + } catch (SQLException e) { + throw DbException.convert(e); + } finally { + csv.close(); + } + return result; + } + + private static String getValue(SessionLocal session, Expression[] args, int index) { + return index < args.length ? args[index].getValue(session).getString() : null; + } + + @Override + public String getName() { + return "CSVREAD"; + } + + @Override + public boolean isDeterministic() { + return false; + } + +} diff --git a/h2/src/main/org/h2/expression/function/table/JavaTableFunction.java b/h2/src/main/org/h2/expression/function/table/JavaTableFunction.java new file mode 100644 index 0000000000..dc74497c2f --- /dev/null +++ b/h2/src/main/org/h2/expression/function/table/JavaTableFunction.java @@ -0,0 +1,63 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function.table; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.result.ResultInterface; +import org.h2.schema.FunctionAlias; + +/** + * This class wraps a user-defined function. + */ +public final class JavaTableFunction extends TableFunction { + + private final FunctionAlias functionAlias; + private final FunctionAlias.JavaMethod javaMethod; + + public JavaTableFunction(FunctionAlias functionAlias, Expression[] args) { + super(args); + this.functionAlias = functionAlias; + this.javaMethod = functionAlias.findJavaMethod(args); + if (javaMethod.getDataType() != null) { + throw DbException.get(ErrorCode.FUNCTION_MUST_RETURN_RESULT_SET_1, getName()); + } + } + + @Override + public ResultInterface getValue(SessionLocal session) { + return javaMethod.getTableValue(session, args, false); + } + + @Override + public ResultInterface getValueTemplate(SessionLocal session) { + return javaMethod.getTableValue(session, args, true); + } + + @Override + public void optimize(SessionLocal session) { + super.optimize(session); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return Expression.writeExpressions(functionAlias.getSQL(builder, sqlFlags).append('('), args, sqlFlags) + .append(')'); + } + + @Override + public String getName() { + return functionAlias.getName(); + } + + @Override + public boolean isDeterministic() { + return functionAlias.isDeterministic(); + } + +} diff --git a/h2/src/main/org/h2/expression/function/table/LinkSchemaFunction.java b/h2/src/main/org/h2/expression/function/table/LinkSchemaFunction.java new file mode 100644 index 0000000000..2a17b973ef --- /dev/null +++ b/h2/src/main/org/h2/expression/function/table/LinkSchemaFunction.java @@ -0,0 +1,125 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function.table; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.result.ResultInterface; +import org.h2.result.SimpleResult; +import org.h2.util.JdbcUtils; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; +import org.h2.value.ValueVarchar; + +/** + * A LINK_SCHEMA function. + */ +public final class LinkSchemaFunction extends TableFunction { + + public LinkSchemaFunction() { + super(new Expression[6]); + } + + @Override + public ResultInterface getValue(SessionLocal session) { + session.getUser().checkAdmin(); + String targetSchema = getValue(session, 0); + String driver = getValue(session, 1); + String url = getValue(session, 2); + String user = getValue(session, 3); + String password = getValue(session, 4); + String sourceSchema = getValue(session, 5); + if (targetSchema == null || driver == null || url == null || user == null || password == null + || sourceSchema == null) { + return getValueTemplate(session); + } + Connection conn = session.createConnection(false); + Connection c2 = null; + Statement stat = null; + ResultSet rs = null; + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + try { + c2 = JdbcUtils.getConnection(driver, url, user, password); + stat = conn.createStatement(); + stat.execute(StringUtils.quoteIdentifier(new StringBuilder("CREATE SCHEMA IF NOT EXISTS "), targetSchema) + .toString()); + // Workaround for PostgreSQL to avoid index names + if (url.startsWith("jdbc:postgresql:")) { + rs = c2.getMetaData().getTables(null, sourceSchema, null, + new String[] { "TABLE", "LINKED TABLE", "VIEW", "EXTERNAL" }); + } else { + rs = c2.getMetaData().getTables(null, sourceSchema, null, null); + } + while (rs.next()) { + String table = rs.getString("TABLE_NAME"); + StringBuilder buff = new StringBuilder(); + buff.append("DROP TABLE IF EXISTS "); + StringUtils.quoteIdentifier(buff, targetSchema).append('.'); + StringUtils.quoteIdentifier(buff, table); + stat.execute(buff.toString()); + buff.setLength(0); + buff.append("CREATE LINKED TABLE "); + StringUtils.quoteIdentifier(buff, targetSchema).append('.'); + StringUtils.quoteIdentifier(buff, table).append('('); + StringUtils.quoteStringSQL(buff, driver).append(", "); + StringUtils.quoteStringSQL(buff, url).append(", "); + StringUtils.quoteStringSQL(buff, user).append(", "); + StringUtils.quoteStringSQL(buff, password).append(", "); + StringUtils.quoteStringSQL(buff, sourceSchema).append(", "); + StringUtils.quoteStringSQL(buff, table).append(')'); + stat.execute(buff.toString()); + result.addRow(ValueVarchar.get(table, session)); + } + } catch (SQLException e) { + result.close(); + throw DbException.convert(e); + } finally { + JdbcUtils.closeSilently(rs); + JdbcUtils.closeSilently(c2); + JdbcUtils.closeSilently(stat); + } + return result; + } + + private String getValue(SessionLocal session, int index) { + return args[index].getValue(session).getString(); + } + + @Override + public void optimize(SessionLocal session) { + super.optimize(session); + int len = args.length; + if (len != 6) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, getName(), "6"); + } + } + + @Override + public ResultInterface getValueTemplate(SessionLocal session) { + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + return result; + } + + @Override + public String getName() { + return "LINK_SCHEMA"; + } + + @Override + public boolean isDeterministic() { + return false; + } + +} diff --git a/h2/src/main/org/h2/expression/function/table/TableFunction.java b/h2/src/main/org/h2/expression/function/table/TableFunction.java new file mode 100644 index 0000000000..729421f883 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/table/TableFunction.java @@ -0,0 +1,90 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function.table; + +import java.util.Arrays; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionWithVariableParameters; +import org.h2.expression.function.NamedExpression; +import org.h2.message.DbException; +import org.h2.result.ResultInterface; +import org.h2.util.HasSQL; + +/** + * A table value function. + */ +public abstract class TableFunction implements HasSQL, NamedExpression, ExpressionWithVariableParameters { + + protected Expression[] args; + + private int argsCount; + + protected TableFunction(Expression[] args) { + this.args = args; + } + + @Override + public void addParameter(Expression param) { + int capacity = args.length; + if (argsCount >= capacity) { + args = Arrays.copyOf(args, capacity * 2); + } + args[argsCount++] = param; + } + + @Override + public void doneWithParameters() throws DbException { + if (args.length != argsCount) { + args = Arrays.copyOf(args, argsCount); + } + } + + /** + * Get a result with. + * + * @param session + * the session + * @return the result + */ + public abstract ResultInterface getValue(SessionLocal session); + + /** + * Get an empty result with the column names set. + * + * @param session + * the session + * @return the empty result + */ + public abstract ResultInterface getValueTemplate(SessionLocal session); + + /** + * Try to optimize this table function + * + * @param session + * the session + */ + public void optimize(SessionLocal session) { + for (int i = 0, l = args.length; i < l; i++) { + args[i] = args[i].optimize(session); + } + } + + /** + * Whether the function always returns the same result for the same + * parameters. + * + * @return true if it does + */ + public abstract boolean isDeterministic(); + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return Expression.writeExpressions(builder.append(getName()).append('('), args, sqlFlags).append(')'); + } + +} diff --git a/h2/src/tools/org/h2/build/doclet/package.html b/h2/src/main/org/h2/expression/function/table/package.html similarity index 70% rename from h2/src/tools/org/h2/build/doclet/package.html rename to h2/src/main/org/h2/expression/function/table/package.html index 00906cbe16..8dd9d74c78 100644 --- a/h2/src/tools/org/h2/build/doclet/package.html +++ b/h2/src/main/org/h2/expression/function/table/package.html @@ -1,7 +1,7 @@ @@ -9,6 +9,6 @@ Javadoc package documentation

          -A Javadoc doclet to build nicer and smaller API Javadoc HTML files. +Table value functions.

          \ No newline at end of file diff --git a/h2/src/main/org/h2/expression/package.html b/h2/src/main/org/h2/expression/package.html index 07e2d68cfa..7bf9c9620d 100644 --- a/h2/src/main/org/h2/expression/package.html +++ b/h2/src/main/org/h2/expression/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/fulltext/FullText.java b/h2/src/main/org/h2/fulltext/FullText.java index a46c90adf2..8d7dd71b34 100644 --- a/h2/src/main/org/h2/fulltext/FullText.java +++ b/h2/src/main/org/h2/fulltext/FullText.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.fulltext; @@ -26,7 +26,7 @@ import org.h2.api.Trigger; import org.h2.command.Parser; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.expression.ValueExpression; @@ -75,7 +75,7 @@ public class FullText { private static final String SELECT_MAP_BY_WORD_ID = "SELECT ROWID FROM " + SCHEMA + ".MAP WHERE WORDID=?"; private static final String SELECT_ROW_BY_ID = - "SELECT KEY, INDEXID FROM " + SCHEMA + ".ROWS WHERE ID=?"; + "SELECT `KEY`, INDEXID FROM " + SCHEMA + ".ROWS WHERE ID=?"; /** * The column name of the result set returned by the search method. @@ -103,38 +103,34 @@ public class FullText { * * * @param conn the connection + * @throws SQLException on failure */ public static void init(Connection conn) throws SQLException { Statement stat = conn.createStatement(); stat.execute("CREATE SCHEMA IF NOT EXISTS " + SCHEMA); stat.execute("CREATE TABLE IF NOT EXISTS " + SCHEMA + - ".INDEXES(ID INT AUTO_INCREMENT PRIMARY KEY, " + + ".INDEXES(ID INT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, " + "SCHEMA VARCHAR, `TABLE` VARCHAR, COLUMNS VARCHAR, " + "UNIQUE(SCHEMA, `TABLE`))"); stat.execute("CREATE TABLE IF NOT EXISTS " + SCHEMA + - ".WORDS(ID INT AUTO_INCREMENT PRIMARY KEY, " + + ".WORDS(ID INT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, " + "NAME VARCHAR, UNIQUE(NAME))"); stat.execute("CREATE TABLE IF NOT EXISTS " + SCHEMA + - ".ROWS(ID IDENTITY, HASH INT, INDEXID INT, " + - "KEY VARCHAR, UNIQUE(HASH, INDEXID, KEY))"); + ".ROWS(ID BIGINT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, HASH INT, INDEXID INT, " + + "`KEY` VARCHAR, UNIQUE(HASH, INDEXID, `KEY`))"); stat.execute("CREATE TABLE IF NOT EXISTS " + SCHEMA + ".MAP(ROWID INT, WORDID INT, PRIMARY KEY(WORDID, ROWID))"); stat.execute("CREATE TABLE IF NOT EXISTS " + SCHEMA + ".IGNORELIST(LIST VARCHAR)"); stat.execute("CREATE TABLE IF NOT EXISTS " + SCHEMA + - ".SETTINGS(KEY VARCHAR PRIMARY KEY, VALUE VARCHAR)"); - stat.execute("CREATE ALIAS IF NOT EXISTS FT_CREATE_INDEX FOR \"" + - FullText.class.getName() + ".createIndex\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FT_DROP_INDEX FOR \"" + - FullText.class.getName() + ".dropIndex\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FT_SEARCH FOR \"" + - FullText.class.getName() + ".search\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FT_SEARCH_DATA FOR \"" + - FullText.class.getName() + ".searchData\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FT_REINDEX FOR \"" + - FullText.class.getName() + ".reindex\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FT_DROP_ALL FOR \"" + - FullText.class.getName() + ".dropAll\""); + ".SETTINGS(`KEY` VARCHAR PRIMARY KEY, `VALUE` VARCHAR)"); + String className = FullText.class.getName(); + stat.execute("CREATE ALIAS IF NOT EXISTS FT_CREATE_INDEX FOR '" + className + ".createIndex'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FT_DROP_INDEX FOR '" + className + ".dropIndex'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FT_SEARCH FOR '" + className + ".search'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FT_SEARCH_DATA FOR '" + className + ".searchData'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FT_REINDEX FOR '" + className + ".reindex'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FT_DROP_ALL FOR '" + className + ".dropAll'"); FullTextSettings setting = FullTextSettings.getInstance(conn); ResultSet rs = stat.executeQuery("SELECT * FROM " + SCHEMA + ".IGNORELIST"); @@ -170,6 +166,7 @@ public static void init(Connection conn) throws SQLException { * @param schema the schema name of the table (case sensitive) * @param table the table name (case sensitive) * @param columnList the column list (null for all columns) + * @throws SQLException on failure */ public static void createIndex(Connection conn, String schema, String table, String columnList) throws SQLException { @@ -189,6 +186,7 @@ public static void createIndex(Connection conn, String schema, * usually not needed, as the index is kept up-to-date automatically. * * @param conn the connection + * @throws SQLException on failure */ public static void reindex(Connection conn) throws SQLException { init(conn); @@ -215,6 +213,7 @@ public static void reindex(Connection conn) throws SQLException { * @param conn the connection * @param schema the schema name of the table (case sensitive) * @param table the table name (case sensitive) + * @throws SQLException on failure */ public static void dropIndex(Connection conn, String schema, String table) throws SQLException { @@ -257,6 +256,7 @@ public static void dropIndex(Connection conn, String schema, String table) * Drops all full text indexes from the database. * * @param conn the connection + * @throws SQLException on failure */ public static void dropAll(Connection conn) throws SQLException { init(conn); @@ -284,6 +284,7 @@ public static void dropAll(Connection conn) throws SQLException { * @param limit the maximum number of rows or 0 for no limit * @param offset the offset or 0 for no offset * @return the result set + * @throws SQLException on failure */ public static ResultSet search(Connection conn, String text, int limit, int offset) throws SQLException { @@ -315,6 +316,7 @@ public static ResultSet search(Connection conn, String text, int limit, * @param limit the maximum number of rows or 0 for no limit * @param offset the offset or 0 for no offset * @return the result set + * @throws SQLException on failure */ public static ResultSet searchData(Connection conn, String text, int limit, int offset) throws SQLException { @@ -333,6 +335,7 @@ public static ResultSet searchData(Connection conn, String text, int limit, * * @param conn the connection * @param commaSeparatedList the list + * @throws SQLException on failure */ public static void setIgnoreList(Connection conn, String commaSeparatedList) throws SQLException { @@ -358,6 +361,7 @@ public static void setIgnoreList(Connection conn, String commaSeparatedList) * * @param conn the connection * @param whitespaceChars the list of characters + * @throws SQLException on failure */ public static void setWhitespaceChars(Connection conn, String whitespaceChars) throws SQLException { @@ -382,6 +386,7 @@ public static void setWhitespaceChars(Connection conn, * @param data the object * @param type the SQL type * @return the string + * @throws SQLException on failure */ protected static String asString(Object data, int type) throws SQLException { if (data == null) { @@ -445,8 +450,8 @@ protected static SimpleResultSet createResultSet(boolean data) { if (data) { result.addColumn(FullText.FIELD_SCHEMA, Types.VARCHAR, 0, 0); result.addColumn(FullText.FIELD_TABLE, Types.VARCHAR, 0, 0); - result.addColumn(FullText.FIELD_COLUMNS, Types.ARRAY, 0, 0); - result.addColumn(FullText.FIELD_KEYS, Types.ARRAY, 0, 0); + result.addColumn(FullText.FIELD_COLUMNS, Types.ARRAY, "VARCHAR ARRAY", 0, 0); + result.addColumn(FullText.FIELD_KEYS, Types.ARRAY, "VARCHAR ARRAY", 0, 0); } else { result.addColumn(FullText.FIELD_QUERY, Types.VARCHAR, 0, 0); } @@ -461,17 +466,17 @@ protected static SimpleResultSet createResultSet(boolean data) { * @param key the primary key condition as a string * @return an array containing the column name list and the data list */ - protected static Object[][] parseKey(Connection conn, String key) { + protected static String[][] parseKey(Connection conn, String key) { ArrayList columns = Utils.newSmallArrayList(); ArrayList data = Utils.newSmallArrayList(); JdbcConnection c = (JdbcConnection) conn; - Session session = (Session) c.getSession(); + SessionLocal session = (SessionLocal) c.getSession(); Parser p = new Parser(session); Expression expr = p.parseExpression(key); - addColumnData(columns, data, expr); - Object[] col = columns.toArray(); - Object[] dat = data.toArray(); - Object[][] columnData = { col, dat }; + addColumnData(session, columns, data, expr); + String[] col = columns.toArray(new String[0]); + String[] dat = data.toArray(new String[0]); + String[][] columnData = { col, dat }; return columnData; } @@ -482,6 +487,7 @@ protected static Object[][] parseKey(Connection conn, String key) { * @param data the object * @param type the SQL type * @return the SQL String + * @throws SQLException on failure */ protected static String quoteSQL(Object data, int type) throws SQLException { if (data == null) { @@ -511,7 +517,7 @@ protected static String quoteSQL(Object data, int type) throws SQLException { case Types.LONGVARBINARY: case Types.BINARY: if (data instanceof UUID) { - return "'" + data.toString() + "'"; + return "'" + data + "'"; } byte[] bytes = (byte[]) data; StringBuilder builder = new StringBuilder(bytes.length * 2 + 2).append('\''); @@ -538,11 +544,13 @@ protected static String quoteSQL(Object data, int type) throws SQLException { * * @param conn the database connection * @param prefix the prefix + * @throws SQLException on failure */ protected static void removeAllTriggers(Connection conn, String prefix) throws SQLException { Statement stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("SELECT * FROM INFORMATION_SCHEMA.TRIGGERS"); + ResultSet rs = stat.executeQuery( + "SELECT DISTINCT TRIGGER_SCHEMA, TRIGGER_NAME FROM INFORMATION_SCHEMA.TRIGGERS"); Statement stat2 = conn.createStatement(); while (rs.next()) { String schema = rs.getString("TRIGGER_SCHEMA"); @@ -561,6 +569,7 @@ protected static void removeAllTriggers(Connection conn, String prefix) * @param index the column indices (will be modified) * @param keys the key list * @param columns the column list + * @throws SQLException on failure */ protected static void setColumns(int[] index, ArrayList keys, ArrayList columns) throws SQLException { @@ -590,6 +599,7 @@ protected static void setColumns(int[] index, ArrayList keys, * @param offset the offset * @param data whether the raw data should be returned * @return the result set + * @throws SQLException on failure */ protected static ResultSet search(Connection conn, String text, int limit, int offset, boolean data) throws SQLException { @@ -645,7 +655,7 @@ protected static ResultSet search(Connection conn, String text, int limit, int indexId = rs.getInt(2); IndexInfo index = setting.getIndexInfo(indexId); if (data) { - Object[][] columnData = parseKey(conn, key); + String[][] columnData = parseKey(conn, key); result.addRow( index.schema, index.table, @@ -667,16 +677,16 @@ protected static ResultSet search(Connection conn, String text, int limit, return result; } - private static void addColumnData(ArrayList columns, - ArrayList data, Expression expr) { + private static void addColumnData(SessionLocal session, ArrayList columns, ArrayList data, + Expression expr) { if (expr instanceof ConditionAndOr) { ConditionAndOr and = (ConditionAndOr) expr; - addColumnData(columns, data, and.getSubexpression(0)); - addColumnData(columns, data, and.getSubexpression(1)); + addColumnData(session, columns, data, and.getSubexpression(0)); + addColumnData(session, columns, data, and.getSubexpression(1)); } else { Comparison comp = (Comparison) expr; ExpressionColumn ec = (ExpressionColumn) comp.getSubexpression(0); - String columnName = ec.getColumnName(); + String columnName = ec.getColumnName(session, -1); columns.add(columnName); if (expr.getSubexpressionCount() == 1) { data.add(null); @@ -747,6 +757,7 @@ protected static void addWords(FullTextSettings setting, * @param conn the database connection * @param schema the schema name * @param table the table name + * @throws SQLException on failure */ private static void createTrigger(Connection conn, String schema, String table) throws SQLException { @@ -760,7 +771,6 @@ private static void createOrDropTrigger(Connection conn, + StringUtils.quoteIdentifier(TRIGGER_PREFIX + table); stat.execute("DROP TRIGGER IF EXISTS " + trigger); if (create) { - boolean multiThread = FullTextTrigger.isMultiThread(conn); StringBuilder buff = new StringBuilder( "CREATE TRIGGER IF NOT EXISTS "); // unless multithread, trigger needs to be called on rollback as well, @@ -768,9 +778,6 @@ private static void createOrDropTrigger(Connection conn, // (not the user connection) buff.append(trigger). append(" AFTER INSERT, UPDATE, DELETE"); - if(!multiThread) { - buff.append(", ROLLBACK"); - } buff.append(" ON "); StringUtils.quoteIdentifier(buff, schema). append('.'); @@ -789,6 +796,7 @@ private static void createOrDropTrigger(Connection conn, * @param conn the database connection * @param schema the schema name * @param table the table name + * @throws SQLException on failure */ private static void indexExistingRows(Connection conn, String schema, String table) throws SQLException { @@ -863,8 +871,6 @@ public static final class FullTextTrigger implements Trigger { private FullTextSettings setting; private IndexInfo index; private int[] columnTypes; - private final PreparedStatement[] prepStatements = new PreparedStatement[SQL.length]; - private boolean useOwnConnection; private static final int INSERT_WORD = 0; private static final int INSERT_ROW = 1; @@ -873,17 +879,18 @@ public static final class FullTextTrigger implements Trigger { private static final int DELETE_MAP = 4; private static final int SELECT_ROW = 5; - private static final String SQL[] = { + private static final String[] SQL = { "MERGE INTO " + SCHEMA + ".WORDS(NAME) KEY(NAME) VALUES(?)", - "INSERT INTO " + SCHEMA + ".ROWS(HASH, INDEXID, KEY) VALUES(?, ?, ?)", + "INSERT INTO " + SCHEMA + ".ROWS(HASH, INDEXID, `KEY`) VALUES(?, ?, ?)", "INSERT INTO " + SCHEMA + ".MAP(ROWID, WORDID) VALUES(?, ?)", - "DELETE FROM " + SCHEMA + ".ROWS WHERE HASH=? AND INDEXID=? AND KEY=?", + "DELETE FROM " + SCHEMA + ".ROWS WHERE HASH=? AND INDEXID=? AND `KEY`=?", "DELETE FROM " + SCHEMA + ".MAP WHERE ROWID=? AND WORDID=?", - "SELECT ID FROM " + SCHEMA + ".ROWS WHERE HASH=? AND INDEXID=? AND KEY=?" + "SELECT ID FROM " + SCHEMA + ".ROWS WHERE HASH=? AND INDEXID=? AND `KEY`=?" }; /** * INTERNAL + * @see Trigger#init(Connection, String, String, String, boolean, int) */ @Override public void init(Connection conn, String schemaName, String triggerName, @@ -947,34 +954,11 @@ public void init(Connection conn, String schemaName, String triggerName, index.indexColumns = new int[indexList.size()]; setColumns(index.indexColumns, indexList, columnList); setting.addIndexInfo(index); - - useOwnConnection = isMultiThread(conn); - if(!useOwnConnection) { - for (int i = 0; i < SQL.length; i++) { - prepStatements[i] = conn.prepareStatement(SQL[i], - Statement.RETURN_GENERATED_KEYS); - } - } - } - - /** - * Check whether the database is in multi-threaded mode. - * - * @param conn the connection - * @return true if the multi-threaded mode is used - */ - static boolean isMultiThread(Connection conn) - throws SQLException { - try (Statement stat = conn.createStatement()) { - ResultSet rs = stat.executeQuery( - "SELECT value FROM information_schema.settings" + - " WHERE name = 'MULTI_THREADED'"); - return rs.next() && !"0".equals(rs.getString(1)); - } } /** * INTERNAL + * @see Trigger#fire(Connection, Object[], Object[]) */ @Override public void fire(Connection conn, Object[] oldRow, Object[] newRow) @@ -1017,8 +1001,9 @@ public void remove() { * * @param conn to use * @param row the row + * @throws SQLException on failure */ - protected void insert(Connection conn, Object[] row) throws SQLException { + private void insert(Connection conn, Object[] row) throws SQLException { PreparedStatement prepInsertRow = null; PreparedStatement prepInsertMap = null; try { @@ -1041,10 +1026,8 @@ protected void insert(Connection conn, Object[] row) throws SQLException { prepInsertMap.execute(); } } finally { - if (useOwnConnection) { - IOUtils.closeSilently(prepInsertRow); - IOUtils.closeSilently(prepInsertMap); - } + IOUtils.closeSilently(prepInsertRow); + IOUtils.closeSilently(prepInsertMap); } } @@ -1053,8 +1036,9 @@ protected void insert(Connection conn, Object[] row) throws SQLException { * * @param conn to use * @param row the row + * @throws SQLException on failure */ - protected void delete(Connection conn, Object[] row) throws SQLException { + private void delete(Connection conn, Object[] row) throws SQLException { PreparedStatement prepSelectRow = null; PreparedStatement prepDeleteMap = null; PreparedStatement prepDeleteRow = null; @@ -1082,11 +1066,9 @@ protected void delete(Connection conn, Object[] row) throws SQLException { prepDeleteRow.executeUpdate(); } } finally { - if (useOwnConnection) { - IOUtils.closeSilently(prepSelectRow); - IOUtils.closeSilently(prepDeleteMap); - IOUtils.closeSilently(prepDeleteRow); - } + IOUtils.closeSilently(prepSelectRow); + IOUtils.closeSilently(prepDeleteMap); + IOUtils.closeSilently(prepDeleteRow); } } @@ -1134,9 +1116,7 @@ private int[] getWordIds(Connection conn, Object[] row) throws SQLException { Arrays.sort(wordIds); return wordIds; } finally { - if (useOwnConnection) { - IOUtils.closeSilently(prepInsertWord); - } + IOUtils.closeSilently(prepInsertWord); } } @@ -1159,10 +1139,8 @@ private String getKey(Object[] row) throws SQLException { return builder.toString(); } - private PreparedStatement getStatement(Connection conn, int index) throws SQLException { - return useOwnConnection ? - conn.prepareStatement(SQL[index], Statement.RETURN_GENERATED_KEYS) - : prepStatements[index]; + private static PreparedStatement getStatement(Connection conn, int index) throws SQLException { + return conn.prepareStatement(SQL[index], Statement.RETURN_GENERATED_KEYS); } } diff --git a/h2/src/main/org/h2/fulltext/FullTextLucene.java b/h2/src/main/org/h2/fulltext/FullTextLucene.java index 0dfeb9661c..802563cff3 100644 --- a/h2/src/main/org/h2/fulltext/FullTextLucene.java +++ b/h2/src/main/org/h2/fulltext/FullTextLucene.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.fulltext; @@ -35,16 +35,14 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.store.ByteBuffersDirectory; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; -import org.apache.lucene.store.RAMDirectory; -import org.h2.api.ErrorCode; import org.h2.api.Trigger; import org.h2.command.Parser; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.ExpressionColumn; import org.h2.jdbc.JdbcConnection; -import org.h2.message.DbException; import org.h2.store.fs.FileUtils; import org.h2.tools.SimpleResultSet; import org.h2.util.StringUtils; @@ -76,17 +74,6 @@ public class FullTextLucene extends FullText { */ private static final String IN_MEMORY_PREFIX = "mem:"; - private static final java.lang.reflect.Field TOTAL_HITS; - - static { - try { - TOTAL_HITS = TopDocs.class.getField("totalHits"); - } catch (ReflectiveOperationException e) { - throw DbException.get(ErrorCode.GENERAL_ERROR_1, e, - "Field org.apache.lucene.search.TopDocs.totalHits is not found"); - } - } - /** * Initializes full text search functionality for this database. This adds * the following Java functions to the database: @@ -108,6 +95,7 @@ public class FullTextLucene extends FullText { * * * @param conn the connection + * @throws SQLException on failure */ public static void init(Connection conn) throws SQLException { try (Statement stat = conn.createStatement()) { @@ -115,18 +103,13 @@ public static void init(Connection conn) throws SQLException { stat.execute("CREATE TABLE IF NOT EXISTS " + SCHEMA + ".INDEXES(SCHEMA VARCHAR, `TABLE` VARCHAR, " + "COLUMNS VARCHAR, PRIMARY KEY(SCHEMA, `TABLE`))"); - stat.execute("CREATE ALIAS IF NOT EXISTS FTL_CREATE_INDEX FOR \"" + - FullTextLucene.class.getName() + ".createIndex\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FTL_DROP_INDEX FOR \"" + - FullTextLucene.class.getName() + ".dropIndex\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FTL_SEARCH FOR \"" + - FullTextLucene.class.getName() + ".search\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FTL_SEARCH_DATA FOR \"" + - FullTextLucene.class.getName() + ".searchData\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FTL_REINDEX FOR \"" + - FullTextLucene.class.getName() + ".reindex\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FTL_DROP_ALL FOR \"" + - FullTextLucene.class.getName() + ".dropAll\""); + String className = FullTextLucene.class.getName(); + stat.execute("CREATE ALIAS IF NOT EXISTS FTL_CREATE_INDEX FOR '" + className + ".createIndex'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FTL_DROP_INDEX FOR '" + className + ".dropIndex'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FTL_SEARCH FOR '" + className + ".search'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FTL_SEARCH_DATA FOR '" + className + ".searchData'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FTL_REINDEX FOR '" + className + ".reindex'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FTL_DROP_ALL FOR '" + className + ".dropAll'"); } } @@ -138,6 +121,7 @@ public static void init(Connection conn) throws SQLException { * @param schema the schema name of the table (case sensitive) * @param table the table name (case sensitive) * @param columnList the column list (null for all columns) + * @throws SQLException on failure */ public static void createIndex(Connection conn, String schema, String table, String columnList) throws SQLException { @@ -159,6 +143,7 @@ public static void createIndex(Connection conn, String schema, * @param conn the connection * @param schema the schema name of the table (case sensitive) * @param table the table name (case sensitive) + * @throws SQLException on failure */ public static void dropIndex(Connection conn, String schema, String table) throws SQLException { @@ -179,6 +164,7 @@ public static void dropIndex(Connection conn, String schema, String table) * usually not needed, as the index is kept up-to-date automatically. * * @param conn the connection + * @throws SQLException on failure */ public static void reindex(Connection conn) throws SQLException { init(conn); @@ -198,6 +184,7 @@ public static void reindex(Connection conn) throws SQLException { * Drops all full text indexes from the database. * * @param conn the connection + * @throws SQLException on failure */ public static void dropAll(Connection conn) throws SQLException { Statement stat = conn.createStatement(); @@ -220,6 +207,7 @@ public static void dropAll(Connection conn) throws SQLException { * @param limit the maximum number of rows or 0 for no limit * @param offset the offset or 0 for no offset * @return the result set + * @throws SQLException on failure */ public static ResultSet search(Connection conn, String text, int limit, int offset) throws SQLException { @@ -245,6 +233,7 @@ public static ResultSet search(Connection conn, String text, int limit, * @param limit the maximum number of rows or 0 for no limit * @param offset the offset or 0 for no offset * @return the result set + * @throws SQLException on failure */ public static ResultSet searchData(Connection conn, String text, int limit, int offset) throws SQLException { @@ -267,6 +256,7 @@ protected static SQLException convertException(Exception e) { * @param conn the database connection * @param schema the schema name * @param table the table name + * @throws SQLException on failure */ private static void createTrigger(Connection conn, String schema, String table) throws SQLException { @@ -301,6 +291,7 @@ private static void createOrDropTrigger(Connection conn, * * @param conn the connection * @return the index access wrapper + * @throws SQLException on failure */ protected static IndexAccess getIndexAccess(Connection conn) throws SQLException { @@ -310,12 +301,12 @@ protected static IndexAccess getIndexAccess(Connection conn) while (access == null) { try { Directory indexDir = path.startsWith(IN_MEMORY_PREFIX) ? - new RAMDirectory() : FSDirectory.open(Paths.get(path)); + new ByteBuffersDirectory() : FSDirectory.open(Paths.get(path)); Analyzer analyzer = new StandardAnalyzer(); IndexWriterConfig conf = new IndexWriterConfig(analyzer); conf.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND); IndexWriter writer = new IndexWriter(indexDir, conf); - //see http://wiki.apache.org/lucene-java/NearRealtimeSearch + //see https://cwiki.apache.org/confluence/display/lucene/NearRealtimeSearch access = new IndexAccess(writer); } catch (IndexFormatTooOldException e) { reindex(conn); @@ -335,6 +326,7 @@ protected static IndexAccess getIndexAccess(Connection conn) * * @param conn the database connection * @return the path + * @throws SQLException on failure */ protected static String getIndexPath(Connection conn) throws SQLException { Statement stat = conn.createStatement(); @@ -359,6 +351,7 @@ protected static String getIndexPath(Connection conn) throws SQLException { * @param conn the database connection * @param schema the schema name * @param table the table name + * @throws SQLException on failure */ private static void indexExistingRows(Connection conn, String schema, String table) throws SQLException { @@ -391,6 +384,7 @@ private static void removeIndexFiles(Connection conn) throws SQLException { * set. * * @param indexPath the index path + * @throws SQLException on failure */ protected static void removeIndexAccess(String indexPath) throws SQLException { @@ -415,6 +409,7 @@ protected static void removeIndexAccess(String indexPath) * @param offset the offset * @param data whether the raw data should be returned * @return the result set + * @throws SQLException on failure */ protected static ResultSet search(Connection conn, String text, int limit, int offset, boolean data) throws SQLException { @@ -442,14 +437,13 @@ protected static ResultSet search(Connection conn, String text, // will trigger writing results to disk. int maxResults = (limit == 0 ? 100 : limit) + offset; TopDocs docs = searcher.search(query, maxResults); + long totalHits = docs.totalHits.value; if (limit == 0) { - // TopDocs.totalHits is long now - // (https://issues.apache.org/jira/browse/LUCENE-7872) - // but in this context it's safe to cast - limit = (int) TOTAL_HITS.getLong(docs); + // in this context it's safe to cast + limit = (int) totalHits; } for (int i = 0, len = docs.scoreDocs.length; i < limit - && i + offset < docs.totalHits + && i + offset < totalHits && i + offset < len; i++) { ScoreDoc sd = docs.scoreDocs[i + offset]; Document doc = searcher.doc(sd.doc); @@ -458,15 +452,15 @@ protected static ResultSet search(Connection conn, String text, if (data) { int idx = q.indexOf(" WHERE "); JdbcConnection c = (JdbcConnection) conn; - Session session = (Session) c.getSession(); + SessionLocal session = (SessionLocal) c.getSession(); Parser p = new Parser(session); String tab = q.substring(0, idx); ExpressionColumn expr = (ExpressionColumn) p .parseExpression(tab); String schemaName = expr.getOriginalTableAliasName(); - String tableName = expr.getColumnName(); + String tableName = expr.getColumnName(session, -1); q = q.substring(idx + " WHERE ".length()); - Object[][] columnData = parseKey(conn, q); + String[][] columnData = parseKey(conn, q); result.addRow(schemaName, tableName, columnData[0], columnData[1], score); } else { @@ -506,6 +500,7 @@ public FullTextTrigger() { /** * INTERNAL + * @see Trigger#init(Connection, String, String, String, boolean, int) */ @Override public void init(Connection conn, String schemaName, String triggerName, @@ -569,6 +564,7 @@ public void init(Connection conn, String schemaName, String triggerName, /** * INTERNAL + * @see Trigger#fire(Connection, Object[], Object[]) */ @Override public void fire(Connection conn, Object[] oldRow, Object[] newRow) @@ -598,16 +594,9 @@ public void close() throws SQLException { removeIndexAccess(indexPath); } - /** - * INTERNAL - */ - @Override - public void remove() { - // ignore - } - /** * Commit all changes to the Lucene index. + * @throws SQLException on failure */ void commitIndex() throws SQLException { try { @@ -622,8 +611,9 @@ void commitIndex() throws SQLException { * * @param row the row * @param commitIndex whether to commit the changes to the Lucene index + * @throws SQLException on failure */ - protected void insert(Object[] row, boolean commitIndex) throws SQLException { + void insert(Object[] row, boolean commitIndex) throws SQLException { String query = getQuery(row); Document doc = new Document(); doc.add(new Field(LUCENE_FIELD_QUERY, query, DOC_ID_FIELD_TYPE)); @@ -666,8 +656,9 @@ protected void insert(Object[] row, boolean commitIndex) throws SQLException { * * @param row the row * @param commitIndex whether to commit the changes to the Lucene index + * @throws SQLException on failure */ - protected void delete(Object[] row, boolean commitIndex) throws SQLException { + private void delete(Object[] row, boolean commitIndex) throws SQLException { String query = getQuery(row); try { Term term = new Term(LUCENE_FIELD_QUERY, query); @@ -727,6 +718,7 @@ private static final class IndexAccess { * Start using the searcher. * * @return the searcher + * @throws IOException on failure */ synchronized IndexSearcher getSearcher() throws IOException { if (!searcher.getIndexReader().tryIncRef()) { @@ -744,6 +736,7 @@ private void initializeSearcher() throws IOException { * Stop using the searcher. * * @param searcher the searcher + * @throws IOException on failure */ synchronized void returnSearcher(IndexSearcher searcher) throws IOException { searcher.getIndexReader().decRef(); @@ -751,6 +744,7 @@ synchronized void returnSearcher(IndexSearcher searcher) throws IOException { /** * Commit the changes. + * @throws IOException on failure */ public synchronized void commit() throws IOException { writer.commit(); @@ -760,6 +754,7 @@ public synchronized void commit() throws IOException { /** * Close the index. + * @throws IOException on failure */ public synchronized void close() throws IOException { searcher = null; diff --git a/h2/src/main/org/h2/fulltext/FullTextSettings.java b/h2/src/main/org/h2/fulltext/FullTextSettings.java index 4d80a0d7a5..7cdfc2841c 100644 --- a/h2/src/main/org/h2/fulltext/FullTextSettings.java +++ b/h2/src/main/org/h2/fulltext/FullTextSettings.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.fulltext; @@ -12,11 +12,10 @@ import java.sql.Statement; import java.util.HashMap; import java.util.HashSet; -import java.util.Map; -import java.util.Set; +import java.util.WeakHashMap; import java.util.concurrent.ConcurrentHashMap; -import org.h2.util.SoftHashMap; +import org.h2.util.SoftValuesHashMap; /** * The global settings of a full text search. @@ -26,7 +25,7 @@ final class FullTextSettings { /** * The settings of open indexes. */ - private static final Map SETTINGS = new HashMap<>(); + private static final HashMap SETTINGS = new HashMap<>(); /** * Whether this instance has been initialized. @@ -36,12 +35,12 @@ final class FullTextSettings { /** * The set of words not to index (stop words). */ - private final Set ignoreList = new HashSet<>(); + private final HashSet ignoreList = new HashSet<>(); /** * The set of words / terms. */ - private final Map words = new HashMap<>(); + private final HashMap words = new HashMap<>(); /** * The set of indexes in this database. @@ -51,9 +50,7 @@ final class FullTextSettings { /** * The prepared statement cache. */ - private final SoftHashMap> cache = - new SoftHashMap<>(); + private final WeakHashMap> cache = new WeakHashMap<>(); /** * The whitespace characters. @@ -116,9 +113,7 @@ public Integer getWordId(String word) { */ public void addWord(String word, Integer id) { synchronized (words) { - if(!words.containsKey(word)) { - words.put(word, id); - } + words.putIfAbsent(word, id); } } @@ -128,7 +123,7 @@ public void addWord(String word, Integer id) { * @param indexId the index id * @return the index info */ - protected IndexInfo getIndexInfo(int indexId) { + IndexInfo getIndexInfo(int indexId) { return indexes.get(indexId); } @@ -137,7 +132,7 @@ protected IndexInfo getIndexInfo(int indexId) { * * @param index the index */ - protected void addIndexInfo(IndexInfo index) { + void addIndexInfo(IndexInfo index) { indexes.put(index.id, index); } @@ -148,7 +143,7 @@ protected void addIndexInfo(IndexInfo index) { * @param word the word to convert and check * @return the uppercase version of the word or null */ - protected String convertWord(String word) { + String convertWord(String word) { word = normalizeWord(word); synchronized (ignoreList) { if (ignoreList.contains(word)) { @@ -163,8 +158,9 @@ protected String convertWord(String word) { * * @param conn the connection * @return the settings + * @throws SQLException on failure */ - protected static FullTextSettings getInstance(Connection conn) + static FullTextSettings getInstance(Connection conn) throws SQLException { String path = getIndexPath(conn); FullTextSettings setting; @@ -187,7 +183,7 @@ protected static FullTextSettings getInstance(Connection conn) private static String getIndexPath(Connection conn) throws SQLException { Statement stat = conn.createStatement(); ResultSet rs = stat.executeQuery( - "CALL IFNULL(DATABASE_PATH(), 'MEM:' || DATABASE())"); + "CALL COALESCE(DATABASE_PATH(), 'MEM:' || DATABASE())"); rs.next(); String path = rs.getString(1); if ("MEM:UNNAMED".equals(path)) { @@ -205,12 +201,13 @@ private static String getIndexPath(Connection conn) throws SQLException { * @param conn the connection * @param sql the statement * @return the prepared statement + * @throws SQLException on failure */ - protected synchronized PreparedStatement prepare(Connection conn, String sql) + synchronized PreparedStatement prepare(Connection conn, String sql) throws SQLException { - SoftHashMap c = cache.get(conn); + SoftValuesHashMap c = cache.get(conn); if (c == null) { - c = new SoftHashMap<>(); + c = new SoftValuesHashMap<>(); cache.put(conn, c); } PreparedStatement prep = c.get(sql); diff --git a/h2/src/main/org/h2/fulltext/IndexInfo.java b/h2/src/main/org/h2/fulltext/IndexInfo.java index e07bb1d2f5..22c5498afc 100644 --- a/h2/src/main/org/h2/fulltext/IndexInfo.java +++ b/h2/src/main/org/h2/fulltext/IndexInfo.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.fulltext; diff --git a/h2/src/main/org/h2/fulltext/package.html b/h2/src/main/org/h2/fulltext/package.html index c86c055e0e..d3c046257f 100644 --- a/h2/src/main/org/h2/fulltext/package.html +++ b/h2/src/main/org/h2/fulltext/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/index/BaseIndex.java b/h2/src/main/org/h2/index/BaseIndex.java deleted file mode 100644 index 5f050305e7..0000000000 --- a/h2/src/main/org/h2/index/BaseIndex.java +++ /dev/null @@ -1,503 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.util.ArrayList; -import org.h2.api.ErrorCode; -import org.h2.command.dml.AllColumnsForPlan; -import org.h2.engine.Constants; -import org.h2.engine.DbObject; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.message.Trace; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.schema.SchemaObjectBase; -import org.h2.table.Column; -import org.h2.table.IndexColumn; -import org.h2.table.Table; -import org.h2.table.TableFilter; -import org.h2.util.StringUtils; -import org.h2.value.DataType; -import org.h2.value.Value; -import org.h2.value.ValueNull; - -/** - * Most index implementations extend the base index. - */ -public abstract class BaseIndex extends SchemaObjectBase implements Index { - - protected IndexColumn[] indexColumns; - protected Column[] columns; - protected int[] columnIds; - protected final Table table; - protected final IndexType indexType; - - /** - * Initialize the base index. - * - * @param newTable the table - * @param id the object id - * @param name the index name - * @param newIndexColumns the columns that are indexed or null if this is - * not yet known - * @param newIndexType the index type - */ - protected BaseIndex(Table newTable, int id, String name, - IndexColumn[] newIndexColumns, IndexType newIndexType) { - super(newTable.getSchema(), id, name, Trace.INDEX); - this.indexType = newIndexType; - this.table = newTable; - if (newIndexColumns != null) { - this.indexColumns = newIndexColumns; - columns = new Column[newIndexColumns.length]; - int len = columns.length; - columnIds = new int[len]; - for (int i = 0; i < len; i++) { - Column col = newIndexColumns[i].column; - columns[i] = col; - columnIds[i] = col.getColumnId(); - } - } - } - - /** - * Check that the index columns are not CLOB or BLOB. - * - * @param columns the columns - */ - protected static void checkIndexColumnTypes(IndexColumn[] columns) { - for (IndexColumn c : columns) { - if (DataType.isLargeObject(c.column.getType().getValueType())) { - throw DbException.getUnsupportedException( - "Index on BLOB or CLOB column: " + c.column.getCreateSQL()); - } - } - } - - @Override - public String getDropSQL() { - return null; - } - - /** - * Create a duplicate key exception with a message that contains the index - * name. - * - * @param key the key values - * @return the exception - */ - protected DbException getDuplicateKeyException(String key) { - StringBuilder builder = new StringBuilder(); - getSQL(builder, false).append(" ON "); - table.getSQL(builder, false).append('('); - builder.append(getColumnListSQL(false)); - builder.append(')'); - if (key != null) { - builder.append(" VALUES ").append(key); - } - DbException e = DbException.get(ErrorCode.DUPLICATE_KEY_1, builder.toString()); - e.setSource(this); - return e; - } - - @Override - public String getPlanSQL() { - return getSQL(false); - } - - @Override - public void removeChildrenAndResources(Session session) { - table.removeIndex(this); - remove(session); - database.removeMeta(session, getId()); - } - - @Override - public boolean canFindNext() { - return false; - } - - @Override - public boolean isFindUsingFullTableScan() { - return false; - } - - @Override - public Cursor find(TableFilter filter, SearchRow first, SearchRow last) { - return find(filter.getSession(), first, last); - } - - /** - * Find a row or a list of rows that is larger and create a cursor to - * iterate over the result. The base implementation doesn't support this - * feature. - * - * @param session the session - * @param higherThan the lower limit (excluding) - * @param last the last row, or null for no limit - * @return the cursor - * @throws DbException always - */ - @Override - public Cursor findNext(Session session, SearchRow higherThan, SearchRow last) { - throw DbException.throwInternalError(toString()); - } - - /** - * Calculate the cost for the given mask as if this index was a typical - * b-tree range index. This is the estimated cost required to search one - * row, and then iterate over the given number of rows. - * - * @param masks the IndexCondition search masks, one for each column in the - * table - * @param rowCount the number of rows in the index - * @param filters all joined table filters - * @param filter the current table filter index - * @param sortOrder the sort order - * @param isScanIndex whether this is a "table scan" index - * @param allColumnsSet the set of all columns - * @return the estimated cost - */ - protected final long getCostRangeIndex(int[] masks, long rowCount, - TableFilter[] filters, int filter, SortOrder sortOrder, - boolean isScanIndex, AllColumnsForPlan allColumnsSet) { - rowCount += Constants.COST_ROW_OFFSET; - int totalSelectivity = 0; - long rowsCost = rowCount; - if (masks != null) { - int i = 0, len = columns.length; - boolean tryAdditional = false; - while (i < len) { - Column column = columns[i++]; - int index = column.getColumnId(); - int mask = masks[index]; - if ((mask & IndexCondition.EQUALITY) == IndexCondition.EQUALITY) { - if (i == len && getIndexType().isUnique()) { - rowsCost = 3; - break; - } - totalSelectivity = 100 - ((100 - totalSelectivity) * - (100 - column.getSelectivity()) / 100); - long distinctRows = rowCount * totalSelectivity / 100; - if (distinctRows <= 0) { - distinctRows = 1; - } - rowsCost = 2 + Math.max(rowCount / distinctRows, 1); - } else if ((mask & IndexCondition.RANGE) == IndexCondition.RANGE) { - rowsCost = 2 + rowsCost / 4; - tryAdditional = true; - break; - } else if ((mask & IndexCondition.START) == IndexCondition.START) { - rowsCost = 2 + rowsCost / 3; - tryAdditional = true; - break; - } else if ((mask & IndexCondition.END) == IndexCondition.END) { - rowsCost = rowsCost / 3; - tryAdditional = true; - break; - } else { - if (mask == 0) { - // Adjust counter of used columns (i) - i--; - } - break; - } - } - // Some additional columns can still be used - if (tryAdditional) { - while (i < len && masks[columns[i].getColumnId()] != 0) { - i++; - rowsCost--; - } - } - // Increase cost of indexes with additional unused columns - rowsCost += len - i; - } - // If the ORDER BY clause matches the ordering of this index, - // it will be cheaper than another index, so adjust the cost - // accordingly. - long sortingCost = 0; - if (sortOrder != null) { - sortingCost = 100 + rowCount / 10; - } - if (sortOrder != null && !isScanIndex) { - boolean sortOrderMatches = true; - int coveringCount = 0; - int[] sortTypes = sortOrder.getSortTypes(); - TableFilter tableFilter = filters == null ? null : filters[filter]; - for (int i = 0, len = sortTypes.length; i < len; i++) { - if (i >= indexColumns.length) { - // We can still use this index if we are sorting by more - // than it's columns, it's just that the coveringCount - // is lower than with an index that contains - // more of the order by columns. - break; - } - Column col = sortOrder.getColumn(i, tableFilter); - if (col == null) { - sortOrderMatches = false; - break; - } - IndexColumn indexCol = indexColumns[i]; - if (!col.equals(indexCol.column)) { - sortOrderMatches = false; - break; - } - int sortType = sortTypes[i]; - if (sortType != indexCol.sortType) { - sortOrderMatches = false; - break; - } - coveringCount++; - } - if (sortOrderMatches) { - // "coveringCount" makes sure that when we have two - // or more covering indexes, we choose the one - // that covers more. - sortingCost = 100 - coveringCount; - } - } - // If we have two indexes with the same cost, and one of the indexes can - // satisfy the query without needing to read from the primary table - // (scan index), make that one slightly lower cost. - boolean needsToReadFromScanIndex = true; - if (!isScanIndex && allColumnsSet != null) { - boolean foundAllColumnsWeNeed = true; - ArrayList foundCols = allColumnsSet.get(getTable()); - if (foundCols != null) - { - for (Column c : foundCols) { - boolean found = false; - for (Column c2 : columns) { - if (c == c2) { - found = true; - break; - } - } - if (!found) { - foundAllColumnsWeNeed = false; - break; - } - } - } - if (foundAllColumnsWeNeed) { - needsToReadFromScanIndex = false; - } - } - long rc; - if (isScanIndex) { - rc = rowsCost + sortingCost + 20; - } else if (needsToReadFromScanIndex) { - rc = rowsCost + rowsCost + sortingCost + 20; - } else { - // The (20-x) calculation makes sure that when we pick a covering - // index, we pick the covering index that has the smallest number of - // columns (the more columns we have in index - the higher cost). - // This is faster because a smaller index will fit into fewer data - // blocks. - rc = rowsCost + sortingCost + columns.length; - } - return rc; - } - - @Override - public int compareRows(SearchRow rowData, SearchRow compare) { - if (rowData == compare) { - return 0; - } - for (int i = 0, len = indexColumns.length; i < len; i++) { - int index = columnIds[i]; - Value v1 = rowData.getValue(index); - Value v2 = compare.getValue(index); - if (v1 == null || v2 == null) { - // can't compare further - return 0; - } - int c = compareValues(v1, v2, indexColumns[i].sortType); - if (c != 0) { - return c; - } - } - return 0; - } - - /** - * Check if this row may have duplicates with the same indexed values in the - * current compatibility mode. Duplicates with {@code NULL} values are - * allowed in some modes. - * - * @param searchRow - * the row to check - * @return {@code true} if specified row may have duplicates, - * {@code false otherwise} - */ - protected boolean mayHaveNullDuplicates(SearchRow searchRow) { - switch (database.getMode().uniqueIndexNullsHandling) { - case ALLOW_DUPLICATES_WITH_ANY_NULL: - for (int index : columnIds) { - if (searchRow.getValue(index) == ValueNull.INSTANCE) { - return true; - } - } - return false; - case ALLOW_DUPLICATES_WITH_ALL_NULLS: - for (int index : columnIds) { - if (searchRow.getValue(index) != ValueNull.INSTANCE) { - return false; - } - } - return true; - default: - return false; - } - } - - /** - * Compare the positions of two rows. - * - * @param rowData the first row - * @param compare the second row - * @return 0 if both rows are equal, -1 if the first row is smaller, - * otherwise 1 - */ - int compareKeys(SearchRow rowData, SearchRow compare) { - long k1 = rowData.getKey(); - long k2 = compare.getKey(); - if (k1 == k2) { - return 0; - } - return k1 > k2 ? 1 : -1; - } - - private int compareValues(Value a, Value b, int sortType) { - if (a == b) { - return 0; - } - boolean aNull = a == ValueNull.INSTANCE; - boolean bNull = b == ValueNull.INSTANCE; - if (aNull || bNull) { - return SortOrder.compareNull(aNull, sortType); - } - int comp = table.compareValues(a, b); - if ((sortType & SortOrder.DESCENDING) != 0) { - comp = -comp; - } - return comp; - } - - @Override - public int getColumnIndex(Column col) { - for (int i = 0, len = columns.length; i < len; i++) { - if (columns[i].equals(col)) { - return i; - } - } - return -1; - } - - @Override - public boolean isFirstColumn(Column column) { - return column.equals(columns[0]); - } - - /** - * Get the list of columns as a string. - * - * @param alwaysQuote quote all identifiers - * @return the list of columns - */ - private String getColumnListSQL(boolean alwaysQuote) { - return IndexColumn.writeColumns(new StringBuilder(), indexColumns, alwaysQuote).toString(); - } - - @Override - public String getCreateSQLForCopy(Table targetTable, String quotedName) { - StringBuilder buff = new StringBuilder("CREATE "); - buff.append(indexType.getSQL()); - buff.append(' '); - if (table.isHidden()) { - buff.append("IF NOT EXISTS "); - } - buff.append(quotedName); - buff.append(" ON "); - targetTable.getSQL(buff, true); - if (comment != null) { - buff.append(" COMMENT "); - StringUtils.quoteStringSQL(buff, comment); - } - buff.append('(').append(getColumnListSQL(true)).append(')'); - return buff.toString(); - } - - @Override - public String getCreateSQL() { - return getCreateSQLForCopy(table, getSQL(true)); - } - - @Override - public IndexColumn[] getIndexColumns() { - return indexColumns; - } - - @Override - public Column[] getColumns() { - return columns; - } - - @Override - public IndexType getIndexType() { - return indexType; - } - - @Override - public int getType() { - return DbObject.INDEX; - } - - @Override - public Table getTable() { - return table; - } - - @Override - public Row getRow(Session session, long key) { - throw DbException.getUnsupportedException(toString()); - } - - @Override - public boolean isHidden() { - return table.isHidden(); - } - - @Override - public boolean isRowIdIndex() { - return false; - } - - @Override - public boolean canScan() { - return true; - } - - @Override - public void setSortedInsertMode(boolean sortedInsertMode) { - // ignore - } - - @Override - public IndexLookupBatch createLookupBatch(TableFilter[] filters, int filter) { - // Lookup batching is not supported. - return null; - } - - @Override - public void update(Session session, Row oldRow, Row newRow) { - remove(session, oldRow); - add(session, newRow); - } -} diff --git a/h2/src/main/org/h2/index/Cursor.java b/h2/src/main/org/h2/index/Cursor.java index 0228912730..a8e768ae2c 100644 --- a/h2/src/main/org/h2/index/Cursor.java +++ b/h2/src/main/org/h2/index/Cursor.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; diff --git a/h2/src/main/org/h2/index/DualCursor.java b/h2/src/main/org/h2/index/DualCursor.java new file mode 100644 index 0000000000..e49a8bc1fe --- /dev/null +++ b/h2/src/main/org/h2/index/DualCursor.java @@ -0,0 +1,48 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.index; + +import org.h2.message.DbException; +import org.h2.result.Row; +import org.h2.result.SearchRow; +import org.h2.value.Value; + +/** + * The cursor implementation for the DUAL index. + */ +class DualCursor implements Cursor { + + private Row currentRow; + + DualCursor() { + } + + @Override + public Row get() { + return currentRow; + } + + @Override + public SearchRow getSearchRow() { + return currentRow; + } + + @Override + public boolean next() { + if (currentRow == null) { + currentRow = Row.get(Value.EMPTY_VALUES, 1); + return true; + } else { + return false; + } + } + + @Override + public boolean previous() { + throw DbException.getInternalError(toString()); + } + +} diff --git a/h2/src/main/org/h2/index/DualIndex.java b/h2/src/main/org/h2/index/DualIndex.java new file mode 100644 index 0000000000..74539c41b5 --- /dev/null +++ b/h2/src/main/org/h2/index/DualIndex.java @@ -0,0 +1,58 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.index; + +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.SessionLocal; +import org.h2.result.Row; +import org.h2.result.SearchRow; +import org.h2.result.SortOrder; +import org.h2.table.DualTable; +import org.h2.table.IndexColumn; +import org.h2.table.TableFilter; +import org.h2.value.Value; + +/** + * An index for the DUAL table. + */ +public class DualIndex extends VirtualTableIndex { + + public DualIndex(DualTable table) { + super(table, "DUAL_INDEX", new IndexColumn[0]); + } + + @Override + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { + return new DualCursor(); + } + + @Override + public double getCost(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, + AllColumnsForPlan allColumnsSet) { + return 1d; + } + + @Override + public String getCreateSQL() { + return null; + } + + @Override + public boolean canGetFirstOrLast() { + return true; + } + + @Override + public Cursor findFirstOrLast(SessionLocal session, boolean first) { + return new SingleRowCursor(Row.get(Value.EMPTY_VALUES, 1)); + } + + @Override + public String getPlanSQL() { + return "dual index"; + } + +} diff --git a/h2/src/main/org/h2/index/FunctionCursor.java b/h2/src/main/org/h2/index/FunctionCursor.java deleted file mode 100644 index 6f9472d237..0000000000 --- a/h2/src/main/org/h2/index/FunctionCursor.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.engine.Session; -import org.h2.result.ResultInterface; -import org.h2.result.SearchRow; - -/** - * A cursor for a function that returns a result. - */ -public class FunctionCursor extends AbstractFunctionCursor { - - private final ResultInterface result; - - FunctionCursor(FunctionIndex index, SearchRow first, SearchRow last, Session session, ResultInterface result) { - super(index, first, last, session); - this.result = result; - } - - @Override - boolean nextImpl() { - row = null; - if (result != null && result.next()) { - values = result.currentRow(); - } else { - values = null; - } - return values != null; - } - -} diff --git a/h2/src/main/org/h2/index/FunctionCursorResultSet.java b/h2/src/main/org/h2/index/FunctionCursorResultSet.java deleted file mode 100644 index e0bdad6977..0000000000 --- a/h2/src/main/org/h2/index/FunctionCursorResultSet.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.engine.Session; -import org.h2.result.ResultInterface; -import org.h2.result.SearchRow; -import org.h2.value.Value; - -import java.util.Arrays; - -/** - * A cursor for a function that returns a JDBC result set. - */ -public class FunctionCursorResultSet extends AbstractFunctionCursor { - - private final ResultInterface result; - - FunctionCursorResultSet(FunctionIndex index, SearchRow first, SearchRow last, Session session, - ResultInterface result) { - super(index, first, last, session); - this.result = result; - } - - @Override - boolean nextImpl() { - row = null; - if (result != null && result.next()) { - int columnCount = result.getVisibleColumnCount(); - Value[] currentRow = result.currentRow(); - values = Arrays.copyOf(currentRow, columnCount); - } else { - values = null; - } - return values != null; - } - -} \ No newline at end of file diff --git a/h2/src/main/org/h2/index/FunctionIndex.java b/h2/src/main/org/h2/index/FunctionIndex.java deleted file mode 100644 index 263cafb16c..0000000000 --- a/h2/src/main/org/h2/index/FunctionIndex.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.command.dml.AllColumnsForPlan; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.table.FunctionTable; -import org.h2.table.IndexColumn; -import org.h2.table.TableFilter; - -/** - * An index for a function that returns a result set. Search in this index - * performs scan over all rows and should be avoided. - */ -public class FunctionIndex extends BaseIndex { - - private final FunctionTable functionTable; - - public FunctionIndex(FunctionTable functionTable, IndexColumn[] columns) { - super(functionTable, 0, null, columns, IndexType.createNonUnique(true)); - this.functionTable = functionTable; - } - - @Override - public void close(Session session) { - // nothing to do - } - - @Override - public void add(Session session, Row row) { - throw DbException.getUnsupportedException("ALIAS"); - } - - @Override - public void remove(Session session, Row row) { - throw DbException.getUnsupportedException("ALIAS"); - } - - @Override - public boolean isFindUsingFullTableScan() { - return true; - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - if (functionTable.isBufferResultSetToLocalTemp()) { - return new FunctionCursor(this, first, last, session, functionTable.getResult(session)); - } - return new FunctionCursorResultSet(this, first, last, session, - functionTable.getResultSet(session)); - } - - @Override - public double getCost(Session session, int[] masks, - TableFilter[] filters, int filter, SortOrder sortOrder, - AllColumnsForPlan allColumnsSet) { - if (masks != null) { - throw DbException.getUnsupportedException("ALIAS"); - } - long expectedRows; - if (functionTable.canGetRowCount()) { - expectedRows = functionTable.getRowCountApproximation(); - } else { - expectedRows = database.getSettings().estimatedFunctionTableRows; - } - return expectedRows * 10; - } - - @Override - public void remove(Session session) { - throw DbException.getUnsupportedException("ALIAS"); - } - - @Override - public void truncate(Session session) { - throw DbException.getUnsupportedException("ALIAS"); - } - - @Override - public boolean needRebuild() { - return false; - } - - @Override - public void checkRename() { - throw DbException.getUnsupportedException("ALIAS"); - } - - @Override - public boolean canGetFirstOrLast() { - return false; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - throw DbException.getUnsupportedException("ALIAS"); - } - - @Override - public long getRowCount(Session session) { - return functionTable.getRowCount(session); - } - - @Override - public long getRowCountApproximation() { - return functionTable.getRowCountApproximation(); - } - - @Override - public long getDiskSpaceUsed() { - return 0; - } - - @Override - public String getPlanSQL() { - return "function"; - } - - @Override - public boolean canScan() { - return false; - } - -} diff --git a/h2/src/main/org/h2/index/HashIndex.java b/h2/src/main/org/h2/index/HashIndex.java deleted file mode 100644 index 1f33874029..0000000000 --- a/h2/src/main/org/h2/index/HashIndex.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Map; -import java.util.TreeMap; -import org.h2.command.dml.AllColumnsForPlan; -import org.h2.engine.Mode.UniqueIndexNullsHandling; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.table.Column; -import org.h2.table.IndexColumn; -import org.h2.table.PageStoreTable; -import org.h2.table.TableFilter; -import org.h2.value.DataType; -import org.h2.value.Value; -import org.h2.value.ValueNull; - -/** - * An unique index based on an in-memory hash map. - */ -public class HashIndex extends BaseIndex { - - /** - * The index of the indexed column. - */ - private final int indexColumn; - private final boolean totalOrdering; - private final PageStoreTable tableData; - private Map rows; - private final ArrayList nullRows = new ArrayList<>(); - - public HashIndex(PageStoreTable table, int id, String indexName, IndexColumn[] columns, IndexType indexType) { - super(table, id, indexName, columns, indexType); - Column column = columns[0].column; - indexColumn = column.getColumnId(); - totalOrdering = DataType.hasTotalOrdering(column.getType().getValueType()); - this.tableData = table; - reset(); - } - - private void reset() { - rows = totalOrdering ? new HashMap() : new TreeMap(database.getCompareMode()); - } - - @Override - public void truncate(Session session) { - reset(); - } - - @Override - public void add(Session session, Row row) { - Value key = row.getValue(indexColumn); - if (key != ValueNull.INSTANCE - || database.getMode().uniqueIndexNullsHandling == UniqueIndexNullsHandling.FORBID_ANY_DUPLICATES) { - Object old = rows.get(key); - if (old != null) { - // TODO index duplicate key for hash indexes: is this allowed? - throw getDuplicateKeyException(key.toString()); - } - rows.put(key, row.getKey()); - } else { - nullRows.add(row.getKey()); - } - } - - @Override - public void remove(Session session, Row row) { - Value key = row.getValue(indexColumn); - if (key != ValueNull.INSTANCE - || database.getMode().uniqueIndexNullsHandling == UniqueIndexNullsHandling.FORBID_ANY_DUPLICATES) { - rows.remove(key); - } else { - nullRows.remove(row.getKey()); - } - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - if (first == null || last == null) { - // TODO hash index: should additionally check if values are the same - throw DbException.throwInternalError(first + " " + last); - } - Value v = first.getValue(indexColumn); - if (v == ValueNull.INSTANCE - && database.getMode().uniqueIndexNullsHandling != UniqueIndexNullsHandling.FORBID_ANY_DUPLICATES) { - return new NonUniqueHashCursor(session, tableData, nullRows); - } - /* - * Sometimes the incoming search is a similar, but not the same type - * e.g. the search value is INT, but the index column is LONG. In which - * case we need to convert, otherwise the HashMap will not find the - * result. - */ - v = v.convertTo(tableData.getColumn(indexColumn).getType(), database.getMode(), null); - Row result; - Long pos = rows.get(v); - if (pos == null) { - result = null; - } else { - result = tableData.getRow(session, pos.intValue()); - } - return new SingleRowCursor(result); - } - - @Override - public long getRowCount(Session session) { - return getRowCountApproximation(); - } - - @Override - public long getRowCountApproximation() { - return rows.size() + nullRows.size(); - } - - @Override - public long getDiskSpaceUsed() { - return 0; - } - - @Override - public void close(Session session) { - // nothing to do - } - - @Override - public void remove(Session session) { - // nothing to do - } - - @Override - public double getCost(Session session, int[] masks, - TableFilter[] filters, int filter, SortOrder sortOrder, - AllColumnsForPlan allColumnsSet) { - for (Column column : columns) { - int index = column.getColumnId(); - int mask = masks[index]; - if ((mask & IndexCondition.EQUALITY) != IndexCondition.EQUALITY) { - return Long.MAX_VALUE; - } - } - return 2; - } - - @Override - public void checkRename() { - // ok - } - - @Override - public boolean needRebuild() { - return true; - } - - @Override - public boolean canGetFirstOrLast() { - return false; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - throw DbException.getUnsupportedException("HASH"); - } - - @Override - public boolean canScan() { - return false; - } - -} diff --git a/h2/src/main/org/h2/index/Index.java b/h2/src/main/org/h2/index/Index.java index 4a282f3a00..b0104db1b3 100644 --- a/h2/src/main/org/h2/index/Index.java +++ b/h2/src/main/org/h2/index/Index.java @@ -1,13 +1,22 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; -import org.h2.command.dml.AllColumnsForPlan; -import org.h2.engine.Session; +import java.util.ArrayList; +import java.util.Arrays; + +import org.h2.api.ErrorCode; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.Constants; +import org.h2.engine.DbObject; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.message.Trace; import org.h2.result.Row; +import org.h2.result.RowFactory; import org.h2.result.SearchRow; import org.h2.result.SortOrder; import org.h2.schema.SchemaObject; @@ -15,25 +24,185 @@ import org.h2.table.IndexColumn; import org.h2.table.Table; import org.h2.table.TableFilter; +import org.h2.util.StringUtils; +import org.h2.value.CompareMode; +import org.h2.value.DataType; +import org.h2.value.Value; +import org.h2.value.ValueNull; /** * An index. Indexes are used to speed up searching data. */ -public interface Index extends SchemaObject { +public abstract class Index extends SchemaObject { + + /** + * Check that the index columns are not CLOB or BLOB. + * + * @param columns the columns + */ + protected static void checkIndexColumnTypes(IndexColumn[] columns) { + for (IndexColumn c : columns) { + if (!DataType.isIndexable(c.column.getType())) { + throw DbException.getUnsupportedException("Index on column: " + c.column.getCreateSQL()); + } + } + } + + /** + * Columns of this index. + */ + protected IndexColumn[] indexColumns; + + /** + * Table columns used in this index. + */ + protected Column[] columns; + + /** + * Identities of table columns. + */ + protected int[] columnIds; + + /** + * Count of unique columns. Unique columns, if any, are always first columns + * in the lists. + */ + protected final int uniqueColumnColumn; + + /** + * The table. + */ + protected final Table table; + + /** + * The index type. + */ + protected final IndexType indexType; + + private final RowFactory rowFactory; + + private final RowFactory uniqueRowFactory; + + /** + * Initialize the index. + * + * @param newTable the table + * @param id the object id + * @param name the index name + * @param newIndexColumns the columns that are indexed or null if this is + * not yet known + * @param uniqueColumnCount count of unique columns + * @param newIndexType the index type + */ + protected Index(Table newTable, int id, String name, IndexColumn[] newIndexColumns, int uniqueColumnCount, + IndexType newIndexType) { + super(newTable.getSchema(), id, name, Trace.INDEX); + this.uniqueColumnColumn = uniqueColumnCount; + this.indexType = newIndexType; + this.table = newTable; + if (newIndexColumns != null) { + this.indexColumns = newIndexColumns; + columns = new Column[newIndexColumns.length]; + int len = columns.length; + columnIds = new int[len]; + for (int i = 0; i < len; i++) { + Column col = newIndexColumns[i].column; + columns[i] = col; + columnIds[i] = col.getColumnId(); + } + } + RowFactory databaseRowFactory = database.getRowFactory(); + CompareMode compareMode = database.getCompareMode(); + Column[] tableColumns = table.getColumns(); + rowFactory = databaseRowFactory.createRowFactory(database, compareMode, database, tableColumns, + newIndexType.isScan() ? null : newIndexColumns, true); + RowFactory uniqueRowFactory; + if (uniqueColumnCount > 0) { + if (newIndexColumns == null || uniqueColumnCount == newIndexColumns.length) { + uniqueRowFactory = rowFactory; + } else { + uniqueRowFactory = databaseRowFactory.createRowFactory(database, compareMode, database, tableColumns, + Arrays.copyOf(newIndexColumns, uniqueColumnCount), true); + } + } else { + uniqueRowFactory = null; + } + this.uniqueRowFactory = uniqueRowFactory; + } + + @Override + public final int getType() { + return DbObject.INDEX; + } + + @Override + public void removeChildrenAndResources(SessionLocal session) { + table.removeIndex(this); + remove(session); + database.removeMeta(session, getId()); + } + + @Override + public final boolean isHidden() { + return table.isHidden(); + } + + @Override + public String getCreateSQLForCopy(Table targetTable, String quotedName) { + StringBuilder builder = new StringBuilder("CREATE "); + builder.append(indexType.getSQL()); + builder.append(' '); + if (table.isHidden()) { + builder.append("IF NOT EXISTS "); + } + builder.append(quotedName); + builder.append(" ON "); + targetTable.getSQL(builder, DEFAULT_SQL_FLAGS); + if (comment != null) { + builder.append(" COMMENT "); + StringUtils.quoteStringSQL(builder, comment); + } + return getColumnListSQL(builder, DEFAULT_SQL_FLAGS).toString(); + } + + /** + * Get the list of columns as a string. + * + * @param sqlFlags formatting flags + * @return the list of columns + */ + private StringBuilder getColumnListSQL(StringBuilder builder, int sqlFlags) { + builder.append('('); + int length = indexColumns.length; + if (uniqueColumnColumn > 0 && uniqueColumnColumn < length) { + IndexColumn.writeColumns(builder, indexColumns, 0, uniqueColumnColumn, sqlFlags).append(") INCLUDE("); + IndexColumn.writeColumns(builder, indexColumns, uniqueColumnColumn, length, sqlFlags); + } else { + IndexColumn.writeColumns(builder, indexColumns, 0, length, sqlFlags); + } + return builder.append(')'); + } + + @Override + public String getCreateSQL() { + return getCreateSQLForCopy(table, getSQL(DEFAULT_SQL_FLAGS)); + } /** * Get the message to show in a EXPLAIN statement. * * @return the plan */ - String getPlanSQL(); + public String getPlanSQL() { + return getSQL(TRACE_SQL_FLAGS | ADD_PLAN_INFORMATION); + } /** * Close this index. * * @param session the session used to write data */ - void close(Session session); + public abstract void close(SessionLocal session); /** * Add a row to the index. @@ -41,7 +210,7 @@ public interface Index extends SchemaObject { * @param session the session to use * @param row the row to add */ - void add(Session session, Row row); + public abstract void add(SessionLocal session, Row row); /** * Remove a row from the index. @@ -49,7 +218,7 @@ public interface Index extends SchemaObject { * @param session the session * @param row the row */ - void remove(Session session, Row row); + public abstract void remove(SessionLocal session, Row row); /** * Update index after row change. @@ -58,7 +227,10 @@ public interface Index extends SchemaObject { * @param oldRow row before the update * @param newRow row after the update */ - void update(Session session, Row oldRow, Row newRow); + public void update(SessionLocal session, Row oldRow, Row newRow) { + remove(session, oldRow); + add(session, newRow); + } /** * Returns {@code true} if {@code find()} implementation performs scan over all @@ -67,7 +239,9 @@ public interface Index extends SchemaObject { * @return {@code true} if {@code find()} implementation performs scan over all * index, {@code false} if {@code find()} performs the fast lookup */ - boolean isFindUsingFullTableScan(); + public boolean isFindUsingFullTableScan() { + return false; + } /** * Find a row or a list of rows and create a cursor to iterate over the @@ -78,19 +252,7 @@ public interface Index extends SchemaObject { * @param last the last row, or null for no limit * @return the cursor to iterate over the results */ - Cursor find(Session session, SearchRow first, SearchRow last); - - /** - * Find a row or a list of rows and create a cursor to iterate over the - * result. - * - * @param filter the table filter (which possibly knows about additional - * conditions) - * @param first the first row, or null for no limit - * @param last the last row, or null for no limit - * @return the cursor to iterate over the results - */ - Cursor find(TableFilter filter, SearchRow first, SearchRow last); + public abstract Cursor find(SessionLocal session, SearchRow first, SearchRow last); /** * Estimate the cost to search for rows given the search mask. @@ -106,7 +268,7 @@ public interface Index extends SchemaObject { * @param allColumnsSet the set of all columns * @return the estimated cost */ - double getCost(Session session, int[] masks, TableFilter[] filters, int filter, + public abstract double getCost(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, AllColumnsForPlan allColumnsSet); /** @@ -114,14 +276,14 @@ public interface Index extends SchemaObject { * * @param session the session */ - void remove(Session session); + public abstract void remove(SessionLocal session); /** * Remove all rows from the index. * * @param session the session */ - void truncate(Session session); + public abstract void truncate(SessionLocal session); /** * Check if the index can directly look up the lowest or highest value of a @@ -129,14 +291,18 @@ public interface Index extends SchemaObject { * * @return true if it can */ - boolean canGetFirstOrLast(); + public boolean canGetFirstOrLast() { + return false; + } /** * Check if the index can get the next higher value. * * @return true if it can */ - boolean canFindNext(); + public boolean canFindNext() { + return false; + } /** * Find a row or a list of rows that is larger and create a cursor to @@ -147,7 +313,9 @@ public interface Index extends SchemaObject { * @param last the last row, or null for no limit * @return the cursor */ - Cursor findNext(Session session, SearchRow higherThan, SearchRow last); + public Cursor findNext(SessionLocal session, SearchRow higherThan, SearchRow last) { + throw DbException.getInternalError(toString()); + } /** * Find the first (or last) value of this index. The cursor returned is @@ -158,7 +326,9 @@ public interface Index extends SchemaObject { * value should be returned * @return a cursor (never null) */ - Cursor findFirstOrLast(Session session, boolean first); + public Cursor findFirstOrLast(SessionLocal session, boolean first) { + throw DbException.getInternalError(toString()); + } /** * Check if the index needs to be rebuilt. @@ -166,7 +336,7 @@ public interface Index extends SchemaObject { * * @return true if a rebuild is required. */ - boolean needRebuild(); + public abstract boolean needRebuild(); /** * Get the row count of this table, for the given session. @@ -174,21 +344,24 @@ public interface Index extends SchemaObject { * @param session the session * @return the row count */ - long getRowCount(Session session); + public abstract long getRowCount(SessionLocal session); /** * Get the approximated row count for this table. * + * @param session the session * @return the approximated row count */ - long getRowCountApproximation(); + public abstract long getRowCountApproximation(SessionLocal session); /** * Get the used disk space for this index. * * @return the estimated number of bytes */ - long getDiskSpaceUsed(); + public long getDiskSpaceUsed() { + return 0L; + } /** * Compare two rows. @@ -198,7 +371,40 @@ public interface Index extends SchemaObject { * @return 0 if both rows are equal, -1 if the first row is smaller, * otherwise 1 */ - int compareRows(SearchRow rowData, SearchRow compare); + public final int compareRows(SearchRow rowData, SearchRow compare) { + if (rowData == compare) { + return 0; + } + for (int i = 0, len = indexColumns.length; i < len; i++) { + int index = columnIds[i]; + Value v1 = rowData.getValue(index); + Value v2 = compare.getValue(index); + if (v1 == null || v2 == null) { + // can't compare further + return 0; + } + int c = compareValues(v1, v2, indexColumns[i].sortType); + if (c != 0) { + return c; + } + } + return 0; + } + + private int compareValues(Value a, Value b, int sortType) { + if (a == b) { + return 0; + } + boolean aNull = a == ValueNull.INSTANCE; + if (aNull || b == ValueNull.INSTANCE) { + return table.getDatabase().getDefaultNullOrdering().compareNull(aNull, sortType); + } + int comp = table.compareValues(database, a, b); + if ((sortType & SortOrder.DESCENDING) != 0) { + comp = -comp; + } + return comp; + } /** * Get the index of a column in the list of index columns @@ -206,7 +412,14 @@ public interface Index extends SchemaObject { * @param col the column * @return the index (0 meaning first column) */ - int getColumnIndex(Column col); + public int getColumnIndex(Column col) { + for (int i = 0, len = columns.length; i < len; i++) { + if (columns[i].equals(col)) { + return i; + } + } + return -1; + } /** * Check if the given column is the first for this index @@ -214,35 +427,56 @@ public interface Index extends SchemaObject { * @param column the column * @return true if the given columns is the first */ - boolean isFirstColumn(Column column); + public boolean isFirstColumn(Column column) { + return column.equals(columns[0]); + } /** * Get the indexed columns as index columns (with ordering information). * * @return the index columns */ - IndexColumn[] getIndexColumns(); + public final IndexColumn[] getIndexColumns() { + return indexColumns; + } /** * Get the indexed columns. * * @return the columns */ - Column[] getColumns(); + public final Column[] getColumns() { + return columns; + } + + /** + * Returns count of unique columns. Unique columns, if any, are always first + * columns in the lists. Unique indexes may have additional indexed + * non-unique columns. + * + * @return count of unique columns, or 0 if index isn't unique + */ + public final int getUniqueColumnCount() { + return uniqueColumnColumn; + } /** * Get the index type. * * @return the index type */ - IndexType getIndexType(); + public final IndexType getIndexType() { + return indexType; + } /** * Get the table on which this index is based. * * @return the table */ - Table getTable(); + public Table getTable() { + return table; + } /** * Get the row with the given key. @@ -251,39 +485,259 @@ public interface Index extends SchemaObject { * @param key the unique key * @return the row */ - Row getRow(Session session, long key); + public Row getRow(SessionLocal session, long key) { + throw DbException.getUnsupportedException(toString()); + } /** * Does this index support lookup by row id? * * @return true if it does */ - boolean isRowIdIndex(); + public boolean isRowIdIndex() { + return false; + } /** * Can this index iterate over all rows? * * @return true if it can */ - boolean canScan(); + public boolean canScan() { + return true; + } /** - * Enable or disable the 'sorted insert' optimizations (rows are inserted in - * ascending or descending order) if applicable for this index - * implementation. + * Create a duplicate key exception with a message that contains the index + * name. * - * @param sortedInsertMode the new value + * @param key the key values + * @return the exception */ - void setSortedInsertMode(boolean sortedInsertMode); + public DbException getDuplicateKeyException(String key) { + StringBuilder builder = new StringBuilder(); + getSQL(builder, TRACE_SQL_FLAGS).append(" ON "); + table.getSQL(builder, TRACE_SQL_FLAGS); + getColumnListSQL(builder, TRACE_SQL_FLAGS); + if (key != null) { + builder.append(" VALUES ").append(key); + } + DbException e = DbException.get(ErrorCode.DUPLICATE_KEY_1, builder.toString()); + e.setSource(this); + return e; + } /** - * Creates new lookup batch. Note that returned {@link IndexLookupBatch} - * instance can be used multiple times. + * Get "PRIMARY KEY ON <table> [(column)]". * - * @param filters the table filters - * @param filter the filter index (0, 1,...) - * @return created batch or {@code null} if batched lookup is not supported - * by this index. + * @param mainIndexColumn the column index + * @return the message */ - IndexLookupBatch createLookupBatch(TableFilter[] filters, int filter); + protected StringBuilder getDuplicatePrimaryKeyMessage(int mainIndexColumn) { + StringBuilder builder = new StringBuilder("PRIMARY KEY ON "); + table.getSQL(builder, TRACE_SQL_FLAGS); + if (mainIndexColumn >= 0 && mainIndexColumn < indexColumns.length) { + builder.append('('); + indexColumns[mainIndexColumn].getSQL(builder, TRACE_SQL_FLAGS).append(')'); + } + return builder; + } + + /** + * Calculate the cost for the given mask as if this index was a typical + * b-tree range index. This is the estimated cost required to search one + * row, and then iterate over the given number of rows. + * + * @param masks the IndexCondition search masks, one for each column in the + * table + * @param rowCount the number of rows in the index + * @param filters all joined table filters + * @param filter the current table filter index + * @param sortOrder the sort order + * @param isScanIndex whether this is a "table scan" index + * @param allColumnsSet the set of all columns + * @return the estimated cost + */ + protected final long getCostRangeIndex(int[] masks, long rowCount, TableFilter[] filters, int filter, + SortOrder sortOrder, boolean isScanIndex, AllColumnsForPlan allColumnsSet) { + rowCount += Constants.COST_ROW_OFFSET; + int totalSelectivity = 0; + long rowsCost = rowCount; + if (masks != null) { + int i = 0, len = columns.length; + boolean tryAdditional = false; + while (i < len) { + Column column = columns[i++]; + int index = column.getColumnId(); + int mask = masks[index]; + if ((mask & IndexCondition.EQUALITY) == IndexCondition.EQUALITY) { + if (i > 0 && i == uniqueColumnColumn) { + rowsCost = 3; + break; + } + totalSelectivity = 100 - ((100 - totalSelectivity) * + (100 - column.getSelectivity()) / 100); + long distinctRows = rowCount * totalSelectivity / 100; + if (distinctRows <= 0) { + distinctRows = 1; + } + rowsCost = 2 + Math.max(rowCount / distinctRows, 1); + } else if ((mask & IndexCondition.RANGE) == IndexCondition.RANGE) { + rowsCost = 2 + rowsCost / 4; + tryAdditional = true; + break; + } else if ((mask & IndexCondition.START) == IndexCondition.START) { + rowsCost = 2 + rowsCost / 3; + tryAdditional = true; + break; + } else if ((mask & IndexCondition.END) == IndexCondition.END) { + rowsCost = rowsCost / 3; + tryAdditional = true; + break; + } else { + if (mask == 0) { + // Adjust counter of used columns (i) + i--; + } + break; + } + } + // Some additional columns can still be used + if (tryAdditional) { + while (i < len && masks[columns[i].getColumnId()] != 0) { + i++; + rowsCost--; + } + } + // Increase cost of indexes with additional unused columns + rowsCost += len - i; + } + // If the ORDER BY clause matches the ordering of this index, + // it will be cheaper than another index, so adjust the cost + // accordingly. + long sortingCost = 0; + if (sortOrder != null) { + sortingCost = 100 + rowCount / 10; + } + if (sortOrder != null && !isScanIndex) { + boolean sortOrderMatches = true; + int coveringCount = 0; + int[] sortTypes = sortOrder.getSortTypesWithNullOrdering(); + TableFilter tableFilter = filters == null ? null : filters[filter]; + for (int i = 0, len = sortTypes.length; i < len; i++) { + if (i >= indexColumns.length) { + // We can still use this index if we are sorting by more + // than it's columns, it's just that the coveringCount + // is lower than with an index that contains + // more of the order by columns. + break; + } + Column col = sortOrder.getColumn(i, tableFilter); + if (col == null) { + sortOrderMatches = false; + break; + } + IndexColumn indexCol = indexColumns[i]; + if (!col.equals(indexCol.column)) { + sortOrderMatches = false; + break; + } + int sortType = sortTypes[i]; + if (sortType != indexCol.sortType) { + sortOrderMatches = false; + break; + } + coveringCount++; + } + if (sortOrderMatches) { + // "coveringCount" makes sure that when we have two + // or more covering indexes, we choose the one + // that covers more. + sortingCost = 100 - coveringCount; + } + } + // If we have two indexes with the same cost, and one of the indexes can + // satisfy the query without needing to read from the primary table + // (scan index), make that one slightly lower cost. + boolean needsToReadFromScanIndex; + if (!isScanIndex && allColumnsSet != null) { + needsToReadFromScanIndex = false; + ArrayList foundCols = allColumnsSet.get(getTable()); + if (foundCols != null) { + int main = table.getMainIndexColumn(); + loop: for (Column c : foundCols) { + int id = c.getColumnId(); + if (id == SearchRow.ROWID_INDEX || id == main) { + continue; + } + for (Column c2 : columns) { + if (c == c2) { + continue loop; + } + } + needsToReadFromScanIndex = true; + break; + } + } + } else { + needsToReadFromScanIndex = true; + } + long rc; + if (isScanIndex) { + rc = rowsCost + sortingCost + 20; + } else if (needsToReadFromScanIndex) { + rc = rowsCost + rowsCost + sortingCost + 20; + } else { + // The (20-x) calculation makes sure that when we pick a covering + // index, we pick the covering index that has the smallest number of + // columns (the more columns we have in index - the higher cost). + // This is faster because a smaller index will fit into fewer data + // blocks. + rc = rowsCost + sortingCost + columns.length; + } + return rc; + } + + + /** + * Check if this row may have duplicates with the same indexed values in the + * current compatibility mode. Duplicates with {@code NULL} values are + * allowed in some modes. + * + * @param searchRow + * the row to check + * @return {@code true} if specified row may have duplicates, + * {@code false otherwise} + */ + public final boolean mayHaveNullDuplicates(SearchRow searchRow) { + switch (database.getMode().uniqueIndexNullsHandling) { + case ALLOW_DUPLICATES_WITH_ANY_NULL: + for (int i = 0; i < uniqueColumnColumn; i++) { + int index = columnIds[i]; + if (searchRow.getValue(index) == ValueNull.INSTANCE) { + return true; + } + } + return false; + case ALLOW_DUPLICATES_WITH_ALL_NULLS: + for (int i = 0; i < uniqueColumnColumn; i++) { + int index = columnIds[i]; + if (searchRow.getValue(index) != ValueNull.INSTANCE) { + return false; + } + } + return true; + default: + return false; + } + } + + public RowFactory getRowFactory() { + return rowFactory; + } + + public RowFactory getUniqueRowFactory() { + return uniqueRowFactory; + } + } diff --git a/h2/src/main/org/h2/index/IndexCondition.java b/h2/src/main/org/h2/index/IndexCondition.java index 147c73b0e5..d4b32d0590 100644 --- a/h2/src/main/org/h2/index/IndexCondition.java +++ b/h2/src/main/org/h2/index/IndexCondition.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; @@ -9,8 +9,9 @@ import java.util.Arrays; import java.util.List; import java.util.TreeSet; -import org.h2.command.dml.Query; -import org.h2.engine.Session; + +import org.h2.command.query.Query; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.expression.ExpressionVisitor; @@ -134,7 +135,7 @@ public static IndexCondition getInQuery(ExpressionColumn column, Query query) { * @param session the session * @return the value */ - public Value getCurrentValue(Session session) { + public Value getCurrentValue(SessionLocal session) { return expression.getValue(session); } @@ -145,11 +146,11 @@ public Value getCurrentValue(Session session) { * @param session the session * @return the value list */ - public Value[] getCurrentValueList(Session session) { + public Value[] getCurrentValueList(SessionLocal session) { TreeSet valueSet = new TreeSet<>(session.getDatabase().getCompareMode()); for (Expression e : expressionList) { Value v = e.getValue(session); - v = column.convert(v); + v = column.convert(session, v); valueSet.add(v); } Value[] array = valueSet.toArray(new Value[valueSet.size()]); @@ -170,21 +171,24 @@ public ResultInterface getCurrentResult() { /** * Get the SQL snippet of this comparison. * - * @param alwaysQuote quote all identifiers + * @param sqlFlags formatting flags * @return the SQL snippet */ - public String getSQL(boolean alwaysQuote) { + public String getSQL(int sqlFlags) { if (compareType == Comparison.FALSE) { return "FALSE"; } StringBuilder builder = new StringBuilder(); - column.getSQL(builder, alwaysQuote); + column.getSQL(builder, sqlFlags); switch (compareType) { case Comparison.EQUAL: builder.append(" = "); break; case Comparison.EQUAL_NULL_SAFE: - builder.append(" IS "); + builder.append(expression.isNullConstant() + || column.getType().getValueType() == Value.BOOLEAN && expression.isConstant() // + ? " IS " + : " IS NOT DISTINCT FROM "); break; case Comparison.BIGGER_EQUAL: builder.append(" >= "); @@ -199,23 +203,21 @@ public String getSQL(boolean alwaysQuote) { builder.append(" < "); break; case Comparison.IN_LIST: - builder.append(" IN("); - Expression.writeExpressions(builder, expressionList, alwaysQuote); - builder.append(')'); + Expression.writeExpressions(builder.append(" IN("), expressionList, sqlFlags).append(')'); break; case Comparison.IN_QUERY: builder.append(" IN("); - builder.append(expressionQuery.getPlanSQL(alwaysQuote)); + builder.append(expressionQuery.getPlanSQL(sqlFlags)); builder.append(')'); break; case Comparison.SPATIAL_INTERSECTS: builder.append(" && "); break; default: - DbException.throwInternalError("type=" + compareType); + throw DbException.getInternalError("type=" + compareType); } if (expression != null) { - expression.getSQL(builder, alwaysQuote); + expression.getSQL(builder, sqlFlags, Expression.AUTO_PARENTHESES); } return builder.toString(); } @@ -258,7 +260,7 @@ public int getMask(ArrayList indexConditions) { case Comparison.SPATIAL_INTERSECTS: return SPATIAL_INTERSECTS; default: - throw DbException.throwInternalError("type=" + compareType); + throw DbException.getInternalError("type=" + compareType); } } diff --git a/h2/src/main/org/h2/index/IndexCursor.java b/h2/src/main/org/h2/index/IndexCursor.java index cebadead27..2fe8d6fd73 100644 --- a/h2/src/main/org/h2/index/IndexCursor.java +++ b/h2/src/main/org/h2/index/IndexCursor.java @@ -1,13 +1,13 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; import java.util.ArrayList; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.condition.Comparison; import org.h2.message.DbException; import org.h2.result.ResultInterface; @@ -17,7 +17,6 @@ import org.h2.table.Column; import org.h2.table.IndexColumn; import org.h2.table.Table; -import org.h2.table.TableFilter; import org.h2.value.Value; import org.h2.value.ValueGeometry; import org.h2.value.ValueNull; @@ -32,7 +31,7 @@ */ public class IndexCursor implements Cursor { - private final TableFilter tableFilter; + private SessionLocal session; private Index index; private Table table; private IndexColumn[] indexColumns; @@ -45,8 +44,7 @@ public class IndexCursor implements Cursor { private Value[] inList; private ResultInterface inResult; - public IndexCursor(TableFilter filter) { - this.tableFilter = filter; + public IndexCursor() { } public void setIndex(Index index) { @@ -71,7 +69,8 @@ public void setIndex(Index index) { * @param s Session. * @param indexConditions Index conditions. */ - public void prepare(Session s, ArrayList indexConditions) { + public void prepare(SessionLocal s, ArrayList indexConditions) { + session = s; alwaysFalse = false; start = end = null; inList = null; @@ -150,17 +149,16 @@ public void prepare(Session s, ArrayList indexConditions) { * @param s the session * @param indexConditions the index conditions */ - public void find(Session s, ArrayList indexConditions) { + public void find(SessionLocal s, ArrayList indexConditions) { prepare(s, indexConditions); if (inColumn != null) { return; } if (!alwaysFalse) { if (intersects != null && index instanceof SpatialIndex) { - cursor = ((SpatialIndex) index).findByGeometry(tableFilter, - start, end, intersects); + cursor = ((SpatialIndex) index).findByGeometry(session, start, end, intersects); } else if (index != null) { - cursor = index.find(tableFilter, start, end); + cursor = index.find(session, start, end); } } } @@ -193,13 +191,11 @@ private SearchRow getSpatialSearchRow(SearchRow row, int columnId, Value v) { // if an object needs to overlap with both a and b, // then it needs to overlap with the union of a and b // (not the intersection) - ValueGeometry vg = (ValueGeometry) row.getValue(columnId). - convertTo(Value.GEOMETRY); - v = ((ValueGeometry) v.convertTo(Value.GEOMETRY)). - getEnvelopeUnion(vg); + ValueGeometry vg = row.getValue(columnId).convertToGeometry(null); + v = v.convertToGeometry(null).getEnvelopeUnion(vg); } if (columnId == SearchRow.ROWID_INDEX) { - row.setKey(v.getLong()); + row.setKey(v == ValueNull.INSTANCE ? Long.MIN_VALUE : v.getLong()); } else { row.setValue(columnId, v); } @@ -213,7 +209,7 @@ private SearchRow getSearchRow(SearchRow row, int columnId, Value v, boolean max v = getMax(row.getValue(columnId), v, max); } if (columnId == SearchRow.ROWID_INDEX) { - row.setKey(v.getLong()); + row.setKey(v == ValueNull.INSTANCE ? Long.MIN_VALUE : v.getLong()); } else { row.setValue(columnId, v); } @@ -232,7 +228,7 @@ private Value getMax(Value a, Value b, boolean bigger) { } else if (b == ValueNull.INSTANCE) { return a; } - int comp = table.getDatabase().compare(a, b); + int comp = session.compare(a, b); if (comp == 0) { return a; } @@ -316,15 +312,15 @@ private void nextCursor() { } private void find(Value v) { - v = inColumn.convert(v); + v = inColumn.convert(session, v); int id = inColumn.getColumnId(); start.setValue(id, v); - cursor = index.find(tableFilter, start, start); + cursor = index.find(session, start, start); } @Override public boolean previous() { - throw DbException.throwInternalError(toString()); + throw DbException.getInternalError(toString()); } } diff --git a/h2/src/main/org/h2/index/IndexLookupBatch.java b/h2/src/main/org/h2/index/IndexLookupBatch.java deleted file mode 100644 index 6f57359666..0000000000 --- a/h2/src/main/org/h2/index/IndexLookupBatch.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.util.List; -import java.util.concurrent.Future; -import org.h2.result.SearchRow; - -/** - * Support for asynchronous batched lookups in indexes. The flow is the - * following: H2 engine will be calling - * {@link #addSearchRows(SearchRow, SearchRow)} until method - * {@link #isBatchFull()} will return {@code true} or there are no more search - * rows to add. Then method {@link #find()} will be called to execute batched - * lookup. Note that a single instance of {@link IndexLookupBatch} can be reused - * for multiple sequential batched lookups, moreover it can be reused for - * multiple queries for the same prepared statement. - * - * @see Index#createLookupBatch(org.h2.table.TableFilter[], int) - * @author Sergi Vladykin - */ -public interface IndexLookupBatch { - /** - * Add search row pair to the batch. - * - * @param first the first row, or null for no limit - * @param last the last row, or null for no limit - * @return {@code false} if this search row pair is known to produce no - * results and thus the given row pair was not added - * @see Index#find(org.h2.table.TableFilter, SearchRow, SearchRow) - */ - boolean addSearchRows(SearchRow first, SearchRow last); - - /** - * Check if this batch is full. - * - * @return {@code true} If batch is full, will not accept any - * more rows and {@link #find()} can be executed. - */ - boolean isBatchFull(); - - /** - * Execute batched lookup and return future cursor for each provided search - * row pair. Note that this method must return exactly the same number of - * future cursors in result list as number of - * {@link #addSearchRows(SearchRow, SearchRow)} calls has been done before - * {@link #find()} call exactly in the same order. - * - * @return List of future cursors for collected search rows. - */ - List> find(); - - /** - * Get plan for EXPLAIN. - * - * @return plan - */ - String getPlanSQL(); - - /** - * Reset this batch to clear state. This method will be called before and - * after each query execution. - * - * @param beforeQuery if it is being called before query execution - */ - void reset(boolean beforeQuery); -} diff --git a/h2/src/main/org/h2/index/IndexType.java b/h2/src/main/org/h2/index/IndexType.java index e16d11b09a..6949b61585 100644 --- a/h2/src/main/org/h2/index/IndexType.java +++ b/h2/src/main/org/h2/index/IndexType.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; @@ -10,7 +10,7 @@ */ public class IndexType { - private boolean primaryKey, persistent, unique, hash, scan, spatial, affinity; + private boolean primaryKey, persistent, unique, hash, scan, spatial; private boolean belongsToConstraint; /** @@ -71,17 +71,6 @@ public static IndexType createNonUnique(boolean persistent, boolean hash, return type; } - /** - * Create an affinity index. - * - * @return the index type - */ - public static IndexType createAffinity() { - IndexType type = new IndexType(); - type.affinity = true; - return type; - } - /** * Create a scan pseudo-index. * @@ -159,15 +148,6 @@ public boolean isUnique() { return unique; } - /** - * Does this index represent an affinity key? - * - * @return true if it does - */ - public boolean isAffinity() { - return affinity; - } - /** * Get the SQL snippet to create such an index. * diff --git a/h2/src/main/org/h2/index/LinkedCursor.java b/h2/src/main/org/h2/index/LinkedCursor.java index b97453bb3b..75fb1e3b82 100644 --- a/h2/src/main/org/h2/index/LinkedCursor.java +++ b/h2/src/main/org/h2/index/LinkedCursor.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; @@ -8,14 +8,12 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.result.Row; import org.h2.result.SearchRow; -import org.h2.table.Column; import org.h2.table.TableLink; -import org.h2.value.DataType; -import org.h2.value.Value; +import org.h2.value.ValueToObjectConverter2; /** * The cursor implementation for the linked index. @@ -25,11 +23,11 @@ public class LinkedCursor implements Cursor { private final TableLink tableLink; private final PreparedStatement prep; private final String sql; - private final Session session; + private final SessionLocal session; private final ResultSet rs; private Row current; - LinkedCursor(TableLink tableLink, ResultSet rs, Session session, + LinkedCursor(TableLink tableLink, ResultSet rs, SessionLocal session, String sql, PreparedStatement prep) { this.session = session; this.tableLink = tableLink; @@ -63,16 +61,15 @@ public boolean next() { } current = tableLink.getTemplateRow(); for (int i = 0; i < current.getColumnCount(); i++) { - Column col = tableLink.getColumn(i); - Value v = DataType.readValue(session, rs, i + 1, col.getType().getValueType()); - current.setValue(i, v); + current.setValue(i, ValueToObjectConverter2.readValue(session, rs, i + 1, + tableLink.getColumn(i).getType().getValueType())); } return true; } @Override public boolean previous() { - throw DbException.throwInternalError(toString()); + throw DbException.getInternalError(toString()); } } diff --git a/h2/src/main/org/h2/index/LinkedIndex.java b/h2/src/main/org/h2/index/LinkedIndex.java index e2c2b22ed3..b5b9a00914 100644 --- a/h2/src/main/org/h2/index/LinkedIndex.java +++ b/h2/src/main/org/h2/index/LinkedIndex.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; @@ -9,9 +9,9 @@ import java.sql.ResultSet; import java.util.ArrayList; -import org.h2.command.dml.AllColumnsForPlan; +import org.h2.command.query.AllColumnsForPlan; import org.h2.engine.Constants; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.result.Row; import org.h2.result.SearchRow; @@ -29,17 +29,16 @@ * A linked index is a index for a linked (remote) table. * It is backed by an index on the remote table which is accessed over JDBC. */ -public class LinkedIndex extends BaseIndex { +public class LinkedIndex extends Index { private final TableLink link; private final String targetTableName; private long rowCount; - private final boolean quoteAllIdentifiers = false; + private final int sqlFlags = QUOTE_ONLY_WHEN_REQUIRED; - public LinkedIndex(TableLink table, int id, IndexColumn[] columns, - IndexType indexType) { - super(table, id, null, columns, indexType); + public LinkedIndex(TableLink table, int id, IndexColumn[] columns, int uniqueColumnCount, IndexType indexType) { + super(table, id, null, columns, uniqueColumnCount, indexType); link = table; targetTableName = link.getQualifiedTable(); } @@ -50,7 +49,7 @@ public String getCreateSQL() { } @Override - public void close(Session session) { + public void close(SessionLocal session) { // nothing to do } @@ -59,7 +58,7 @@ private static boolean isNull(Value v) { } @Override - public void add(Session session, Row row) { + public void add(SessionLocal session, Row row) { ArrayList params = Utils.newSmallArrayList(); StringBuilder buff = new StringBuilder("INSERT INTO "); buff.append(targetTableName).append(" VALUES("); @@ -80,7 +79,7 @@ public void add(Session session, Row row) { buff.append(')'); String sql = buff.toString(); try { - link.execute(sql, params, true); + link.execute(sql, params, true, session); rowCount++; } catch (Exception e) { throw TableLink.wrapException(sql, e); @@ -88,7 +87,7 @@ public void add(Session session, Row row) { } @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { ArrayList params = Utils.newSmallArrayList(); StringBuilder builder = new StringBuilder("SELECT * FROM ").append(targetTableName).append(" T"); boolean f = false; @@ -98,7 +97,7 @@ public Cursor find(Session session, SearchRow first, SearchRow last) { builder.append(f ? " AND " : " WHERE "); f = true; Column col = table.getColumn(i); - col.getSQL(builder, quoteAllIdentifiers); + col.getSQL(builder, sqlFlags); if (v == ValueNull.INSTANCE) { builder.append(" IS NULL"); } else { @@ -114,7 +113,7 @@ public Cursor find(Session session, SearchRow first, SearchRow last) { builder.append(f ? " AND " : " WHERE "); f = true; Column col = table.getColumn(i); - col.getSQL(builder, quoteAllIdentifiers); + col.getSQL(builder, sqlFlags); if (v == ValueNull.INSTANCE) { builder.append(" IS NULL"); } else { @@ -126,7 +125,7 @@ public Cursor find(Session session, SearchRow first, SearchRow last) { } String sql = builder.toString(); try { - PreparedStatement prep = link.execute(sql, params, false); + PreparedStatement prep = link.execute(sql, params, false, session); ResultSet rs = prep.getResultSet(); return new LinkedCursor(link, rs, session, sql, prep); } catch (Exception e) { @@ -136,7 +135,7 @@ public Cursor find(Session session, SearchRow first, SearchRow last) { private void addParameter(StringBuilder builder, Column col) { TypeInfo type = col.getType(); - if (type.getValueType() == Value.STRING_FIXED && link.isOracle()) { + if (type.getValueType() == Value.CHAR && link.isOracle()) { // workaround for Oracle // create table test(id int primary key, name char(15)); // insert into test values(1, 'Hello') @@ -148,7 +147,7 @@ private void addParameter(StringBuilder builder, Column col) { } @Override - public double getCost(Session session, int[] masks, + public double getCost(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, AllColumnsForPlan allColumnsSet) { return 100 + getCostRangeIndex(masks, rowCount + @@ -156,12 +155,12 @@ public double getCost(Session session, int[] masks, } @Override - public void remove(Session session) { + public void remove(SessionLocal session) { // nothing to do } @Override - public void truncate(Session session) { + public void truncate(SessionLocal session) { // nothing to do } @@ -176,19 +175,7 @@ public boolean needRebuild() { } @Override - public boolean canGetFirstOrLast() { - return false; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - // TODO optimization: could get the first or last value (in any case; - // maybe not optimized) - throw DbException.getUnsupportedException("LINKED"); - } - - @Override - public void remove(Session session, Row row) { + public void remove(SessionLocal session, Row row) { ArrayList params = Utils.newSmallArrayList(); StringBuilder builder = new StringBuilder("DELETE FROM ").append(targetTableName).append(" WHERE "); for (int i = 0; i < row.getColumnCount(); i++) { @@ -196,7 +183,7 @@ public void remove(Session session, Row row) { builder.append("AND "); } Column col = table.getColumn(i); - col.getSQL(builder, quoteAllIdentifiers); + col.getSQL(builder, sqlFlags); Value v = row.getValue(i); if (isNull(v)) { builder.append(" IS NULL "); @@ -209,7 +196,7 @@ public void remove(Session session, Row row) { } String sql = builder.toString(); try { - PreparedStatement prep = link.execute(sql, params, false); + PreparedStatement prep = link.execute(sql, params, false, session); int count = prep.executeUpdate(); link.reusePreparedStatement(prep, sql); rowCount -= count; @@ -224,15 +211,16 @@ public void remove(Session session, Row row) { * * @param oldRow the old data * @param newRow the new data + * @param session the session */ - public void update(Row oldRow, Row newRow) { + public void update(Row oldRow, Row newRow, SessionLocal session) { ArrayList params = Utils.newSmallArrayList(); StringBuilder builder = new StringBuilder("UPDATE ").append(targetTableName).append(" SET "); for (int i = 0; i < newRow.getColumnCount(); i++) { if (i > 0) { builder.append(", "); } - table.getColumn(i).getSQL(builder, quoteAllIdentifiers).append('='); + table.getColumn(i).getSQL(builder, sqlFlags).append('='); Value v = newRow.getValue(i); if (v == null) { builder.append("DEFAULT"); @@ -247,7 +235,7 @@ public void update(Row oldRow, Row newRow) { if (i > 0) { builder.append(" AND "); } - col.getSQL(builder, quoteAllIdentifiers); + col.getSQL(builder, sqlFlags); Value v = oldRow.getValue(i); if (isNull(v)) { builder.append(" IS NULL"); @@ -259,24 +247,20 @@ public void update(Row oldRow, Row newRow) { } String sql = builder.toString(); try { - link.execute(sql, params, true); + link.execute(sql, params, true, session); } catch (Exception e) { throw TableLink.wrapException(sql, e); } } @Override - public long getRowCount(Session session) { + public long getRowCount(SessionLocal session) { return rowCount; } @Override - public long getRowCountApproximation() { + public long getRowCountApproximation(SessionLocal session) { return rowCount; } - @Override - public long getDiskSpaceUsed() { - return 0; - } } diff --git a/h2/src/main/org/h2/index/MetaCursor.java b/h2/src/main/org/h2/index/MetaCursor.java index 8b7047b95c..8932d016ca 100644 --- a/h2/src/main/org/h2/index/MetaCursor.java +++ b/h2/src/main/org/h2/index/MetaCursor.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; @@ -42,7 +42,7 @@ public boolean next() { @Override public boolean previous() { - throw DbException.throwInternalError(toString()); + throw DbException.getInternalError(toString()); } } diff --git a/h2/src/main/org/h2/index/MetaIndex.java b/h2/src/main/org/h2/index/MetaIndex.java index fbc73b2f95..86ee869899 100644 --- a/h2/src/main/org/h2/index/MetaIndex.java +++ b/h2/src/main/org/h2/index/MetaIndex.java @@ -1,13 +1,14 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; import java.util.ArrayList; -import org.h2.command.dml.AllColumnsForPlan; -import org.h2.engine.Session; + +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.result.Row; import org.h2.result.SearchRow; @@ -20,40 +21,40 @@ /** * The index implementation for meta data tables. */ -public class MetaIndex extends BaseIndex { +public class MetaIndex extends Index { private final MetaTable meta; private final boolean scan; public MetaIndex(MetaTable meta, IndexColumn[] columns, boolean scan) { - super(meta, 0, null, columns, IndexType.createNonUnique(true)); + super(meta, 0, null, columns, 0, IndexType.createNonUnique(true)); this.meta = meta; this.scan = scan; } @Override - public void close(Session session) { + public void close(SessionLocal session) { // nothing to do } @Override - public void add(Session session, Row row) { + public void add(SessionLocal session, Row row) { throw DbException.getUnsupportedException("META"); } @Override - public void remove(Session session, Row row) { + public void remove(SessionLocal session, Row row) { throw DbException.getUnsupportedException("META"); } @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { ArrayList rows = meta.generateRows(session, first, last); return new MetaCursor(rows); } @Override - public double getCost(Session session, int[] masks, + public double getCost(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, AllColumnsForPlan allColumnsSet) { if (scan) { @@ -64,12 +65,12 @@ public double getCost(Session session, int[] masks, } @Override - public void truncate(Session session) { + public void truncate(SessionLocal session) { throw DbException.getUnsupportedException("META"); } @Override - public void remove(Session session) { + public void remove(SessionLocal session) { throw DbException.getUnsupportedException("META"); } @@ -106,22 +107,12 @@ public String getCreateSQL() { } @Override - public boolean canGetFirstOrLast() { - return false; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - throw DbException.getUnsupportedException("META"); - } - - @Override - public long getRowCount(Session session) { + public long getRowCount(SessionLocal session) { return MetaTable.ROW_COUNT_APPROXIMATION; } @Override - public long getRowCountApproximation() { + public long getRowCountApproximation(SessionLocal session) { return MetaTable.ROW_COUNT_APPROXIMATION; } diff --git a/h2/src/main/org/h2/index/NonUniqueHashCursor.java b/h2/src/main/org/h2/index/NonUniqueHashCursor.java deleted file mode 100644 index d3ca04affa..0000000000 --- a/h2/src/main/org/h2/index/NonUniqueHashCursor.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.util.ArrayList; -import org.h2.engine.Session; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.table.PageStoreTable; - -/** - * Cursor implementation for non-unique hash index - * - * @author Sergi Vladykin - */ -public class NonUniqueHashCursor implements Cursor { - - private final Session session; - private final ArrayList positions; - private final PageStoreTable tableData; - - private int index = -1; - - public NonUniqueHashCursor(Session session, PageStoreTable tableData, - ArrayList positions) { - this.session = session; - this.tableData = tableData; - this.positions = positions; - } - - @Override - public Row get() { - if (index < 0 || index >= positions.size()) { - return null; - } - return tableData.getRow(session, positions.get(index)); - } - - @Override - public SearchRow getSearchRow() { - return get(); - } - - @Override - public boolean next() { - return positions != null && ++index < positions.size(); - } - - @Override - public boolean previous() { - return positions != null && --index >= 0; - } - -} diff --git a/h2/src/main/org/h2/index/NonUniqueHashIndex.java b/h2/src/main/org/h2/index/NonUniqueHashIndex.java deleted file mode 100644 index 919c9f4c39..0000000000 --- a/h2/src/main/org/h2/index/NonUniqueHashIndex.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Map; -import java.util.TreeMap; - -import org.h2.command.dml.AllColumnsForPlan; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.table.Column; -import org.h2.table.IndexColumn; -import org.h2.table.PageStoreTable; -import org.h2.table.TableFilter; -import org.h2.util.Utils; -import org.h2.value.DataType; -import org.h2.value.Value; - -/** - * A non-unique index based on an in-memory hash map. - * - * @author Sergi Vladykin - */ -public class NonUniqueHashIndex extends BaseIndex { - - /** - * The index of the indexed column. - */ - private final int indexColumn; - private final boolean totalOrdering; - private Map> rows; - private final PageStoreTable tableData; - private long rowCount; - - public NonUniqueHashIndex(PageStoreTable table, int id, String indexName, - IndexColumn[] columns, IndexType indexType) { - super(table, id, indexName, columns, indexType); - Column column = columns[0].column; - indexColumn = column.getColumnId(); - totalOrdering = DataType.hasTotalOrdering(column.getType().getValueType()); - tableData = table; - reset(); - } - - private void reset() { - rows = totalOrdering ? new HashMap>() - : new TreeMap>(database.getCompareMode()); - rowCount = 0; - } - - @Override - public void truncate(Session session) { - reset(); - } - - @Override - public void add(Session session, Row row) { - Value key = row.getValue(indexColumn); - ArrayList positions = rows.get(key); - if (positions == null) { - positions = Utils.newSmallArrayList(); - rows.put(key, positions); - } - positions.add(row.getKey()); - rowCount++; - } - - @Override - public void remove(Session session, Row row) { - if (rowCount == 1) { - // last row in table - reset(); - } else { - Value key = row.getValue(indexColumn); - ArrayList positions = rows.get(key); - if (positions.size() == 1) { - // last row with such key - rows.remove(key); - } else { - positions.remove(row.getKey()); - } - rowCount--; - } - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - if (first == null || last == null) { - throw DbException.throwInternalError(first + " " + last); - } - if (first != last) { - if (compareKeys(first, last) != 0) { - throw DbException.throwInternalError(); - } - } - Value v = first.getValue(indexColumn); - /* - * Sometimes the incoming search is a similar, but not the same type - * e.g. the search value is INT, but the index column is LONG. In which - * case we need to convert, otherwise the HashMap will not find the - * result. - */ - v = v.convertTo(tableData.getColumn(indexColumn).getType(), database.getMode(), null); - ArrayList positions = rows.get(v); - return new NonUniqueHashCursor(session, tableData, positions); - } - - @Override - public long getRowCount(Session session) { - return rowCount; - } - - @Override - public long getRowCountApproximation() { - return rowCount; - } - - @Override - public long getDiskSpaceUsed() { - return 0; - } - - @Override - public void close(Session session) { - // nothing to do - } - - @Override - public void remove(Session session) { - // nothing to do - } - - @Override - public double getCost(Session session, int[] masks, - TableFilter[] filters, int filter, SortOrder sortOrder, - AllColumnsForPlan allColumnsSet) { - for (Column column : columns) { - int index = column.getColumnId(); - int mask = masks[index]; - if ((mask & IndexCondition.EQUALITY) != IndexCondition.EQUALITY) { - return Long.MAX_VALUE; - } - } - return 2; - } - - @Override - public void checkRename() { - // ok - } - - @Override - public boolean needRebuild() { - return true; - } - - @Override - public boolean canGetFirstOrLast() { - return false; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - throw DbException.getUnsupportedException("HASH"); - } - - @Override - public boolean canScan() { - return false; - } - -} diff --git a/h2/src/main/org/h2/index/PageBtree.java b/h2/src/main/org/h2/index/PageBtree.java deleted file mode 100644 index cff4ba67b8..0000000000 --- a/h2/src/main/org/h2/index/PageBtree.java +++ /dev/null @@ -1,292 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.result.SearchRow; -import org.h2.store.Data; -import org.h2.store.Page; - -/** - * A page that contains index data. - */ -public abstract class PageBtree extends Page { - - /** - * This is a root page. - */ - static final int ROOT = 0; - - /** - * Indicator that the row count is not known. - */ - static final int UNKNOWN_ROWCOUNT = -1; - - /** - * The index. - */ - protected final PageBtreeIndex index; - - /** - * The page number of the parent. - */ - protected int parentPageId; - - /** - * The data page. - */ - protected final Data data; - - /** - * The row offsets. - */ - protected int[] offsets; - - /** - * The number of entries. - */ - protected int entryCount; - - /** - * The index data - */ - protected SearchRow[] rows; - - /** - * The start of the data area. - */ - protected int start; - - /** - * If only the position of the row is stored in the page - */ - protected boolean onlyPosition; - - /** - * Whether the data page is up-to-date. - */ - protected boolean written; - - /** - * The estimated memory used by this object. - */ - private final int memoryEstimated; - - PageBtree(PageBtreeIndex index, int pageId, Data data) { - this.index = index; - this.data = data; - setPos(pageId); - memoryEstimated = index.getMemoryPerPage(); - } - - /** - * Get the real row count. If required, this will read all child pages. - * - * @return the row count - */ - abstract int getRowCount(); - - /** - * Set the stored row count. This will write the page. - * - * @param rowCount the stored row count - */ - abstract void setRowCountStored(int rowCount); - - /** - * Find an entry. - * - * @param compare the row - * @param bigger if looking for a larger row - * @param add if the row should be added (check for duplicate keys) - * @param compareKeys compare the row keys as well - * @return the index of the found row - */ - int find(SearchRow compare, boolean bigger, boolean add, boolean compareKeys) { - if (compare == null) { - return 0; - } - int l = 0, r = entryCount; - int comp = 1; - while (l < r) { - int i = (l + r) >>> 1; - SearchRow row = getRow(i); - comp = index.compareRows(row, compare); - if (comp == 0) { - if (add && index.indexType.isUnique()) { - if (!index.mayHaveNullDuplicates(compare)) { - throw index.getDuplicateKeyException(compare.toString()); - } - } - if (compareKeys) { - comp = index.compareKeys(row, compare); - if (comp == 0) { - return i; - } - } - } - if (comp > 0 || (!bigger && comp == 0)) { - r = i; - } else { - l = i + 1; - } - } - return l; - } - - /** - * Add a row if possible. If it is possible this method returns -1, - * otherwise the split point. It is always possible to add one row. - * - * @param row the row to add - * @return the split point of this page, or -1 if no split is required - */ - abstract int addRowTry(SearchRow row); - - /** - * Find the first row. - * - * @param cursor the cursor - * @param first the row to find - * @param bigger if the row should be bigger - */ - abstract void find(PageBtreeCursor cursor, SearchRow first, boolean bigger); - - /** - * Find the last row. - * - * @param cursor the cursor - */ - abstract void last(PageBtreeCursor cursor); - - /** - * Get the row at this position. - * - * @param at the index - * @return the row - */ - SearchRow getRow(int at) { - SearchRow row = rows[at]; - if (row == null) { - row = index.readRow(data, offsets[at], onlyPosition, true); - memoryChange(); - rows[at] = row; - } else if (!index.hasData(row)) { - row = index.readRow(row.getKey()); - memoryChange(); - rows[at] = row; - } - return row; - } - - /** - * The memory usage of this page was changed. Propagate the change if - * needed. - */ - protected void memoryChange() { - // nothing to do - } - - /** - * Split the index page at the given point. - * - * @param splitPoint the index where to split - * @return the new page that contains about half the entries - */ - abstract PageBtree split(int splitPoint); - - /** - * Change the page id. - * - * @param id the new page id - */ - void setPageId(int id) { - changeCount = index.getPageStore().getChangeCount(); - written = false; - index.getPageStore().removeFromCache(getPos()); - setPos(id); - index.getPageStore().logUndo(this, null); - remapChildren(); - } - - /** - * Get the first child leaf page of a page. - * - * @return the page - */ - abstract PageBtreeLeaf getFirstLeaf(); - - /** - * Get the first child leaf page of a page. - * - * @return the page - */ - abstract PageBtreeLeaf getLastLeaf(); - - /** - * Change the parent page id. - * - * @param id the new parent page id - */ - void setParentPageId(int id) { - index.getPageStore().logUndo(this, data); - changeCount = index.getPageStore().getChangeCount(); - written = false; - parentPageId = id; - } - - /** - * Update the parent id of all children. - */ - abstract void remapChildren(); - - /** - * Remove a row. - * - * @param row the row to remove - * @return null if the last row didn't change, - * the deleted row if the page is now empty, - * otherwise the new last row of this page - */ - abstract SearchRow remove(SearchRow row); - - /** - * Free this page and all child pages. - */ - abstract void freeRecursive(); - - /** - * Ensure all rows are read in memory. - */ - protected void readAllRows() { - for (int i = 0; i < entryCount; i++) { - SearchRow row = rows[i]; - if (row == null) { - row = index.readRow(data, offsets[i], onlyPosition, false); - rows[i] = row; - } - } - } - - /** - * Get the estimated memory size. - * - * @return number of double words (4 bytes) - */ - @Override - public int getMemory() { - // need to always return the same value for the same object (otherwise - // the cache size would change after adding and then removing the same - // page from the cache) but index.getMemoryPerPage() can adopt according - // to how much memory a row needs on average - return memoryEstimated; - } - - @Override - public boolean canRemove() { - return changeCount < index.getPageStore().getChangeCount(); - } - -} diff --git a/h2/src/main/org/h2/index/PageBtreeCursor.java b/h2/src/main/org/h2/index/PageBtreeCursor.java deleted file mode 100644 index 192166a677..0000000000 --- a/h2/src/main/org/h2/index/PageBtreeCursor.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.engine.Session; -import org.h2.result.Row; -import org.h2.result.SearchRow; - -/** - * The cursor implementation for the page b-tree index. - */ -public class PageBtreeCursor implements Cursor { - - private final Session session; - private final PageBtreeIndex index; - private final SearchRow last; - private PageBtreeLeaf current; - private int i; - private SearchRow currentSearchRow; - private Row currentRow; - - PageBtreeCursor(Session session, PageBtreeIndex index, SearchRow last) { - this.session = session; - this.index = index; - this.last = last; - } - - /** - * Set the position of the current row. - * - * @param current the leaf page - * @param i the index within the page - */ - void setCurrent(PageBtreeLeaf current, int i) { - this.current = current; - this.i = i; - } - - @Override - public Row get() { - if (currentRow == null && currentSearchRow != null) { - currentRow = index.getRow(session, currentSearchRow.getKey()); - } - return currentRow; - } - - @Override - public SearchRow getSearchRow() { - return currentSearchRow; - } - - @Override - public boolean next() { - if (current == null) { - return false; - } - if (i >= current.getEntryCount()) { - current.nextPage(this); - if (current == null) { - return false; - } - } - currentSearchRow = current.getRow(i); - currentRow = null; - if (last != null && index.compareRows(currentSearchRow, last) > 0) { - currentSearchRow = null; - return false; - } - i++; - return true; - } - - @Override - public boolean previous() { - if (current == null) { - return false; - } - if (i < 0) { - current.previousPage(this); - if (current == null) { - return false; - } - } - currentSearchRow = current.getRow(i); - currentRow = null; - i--; - return true; - } - -} diff --git a/h2/src/main/org/h2/index/PageBtreeIndex.java b/h2/src/main/org/h2/index/PageBtreeIndex.java deleted file mode 100644 index 67a750e50d..0000000000 --- a/h2/src/main/org/h2/index/PageBtreeIndex.java +++ /dev/null @@ -1,494 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.api.ErrorCode; -import org.h2.command.dml.AllColumnsForPlan; -import org.h2.engine.Constants; -import org.h2.engine.Session; -import org.h2.engine.SysProperties; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.store.Data; -import org.h2.store.Page; -import org.h2.store.PageStore; -import org.h2.table.Column; -import org.h2.table.IndexColumn; -import org.h2.table.PageStoreTable; -import org.h2.table.TableFilter; -import org.h2.util.MathUtils; -import org.h2.value.Value; -import org.h2.value.ValueNull; - -/** - * This is the most common type of index, a b tree index. - * Only the data of the indexed columns are stored in the index. - */ -public class PageBtreeIndex extends PageIndex { - - private static int memoryChangeRequired; - - private final PageStore store; - private final PageStoreTable tableData; - private final boolean needRebuild; - private long rowCount; - private int memoryPerPage; - private int memoryCount; - - public PageBtreeIndex(PageStoreTable table, int id, String indexName, - IndexColumn[] columns, - IndexType indexType, boolean create, Session session) { - super(table, id, indexName, columns, indexType); - if (!database.isStarting() && create) { - checkIndexColumnTypes(columns); - } - // int test; - // trace.setLevel(TraceSystem.DEBUG); - tableData = table; - if (!database.isPersistent() || id < 0) { - throw DbException.throwInternalError(indexName); - } - this.store = database.getPageStore(); - store.addIndex(this); - if (create) { - // new index - rootPageId = store.allocatePage(); - // TODO currently the head position is stored in the log - // it should not for new tables, otherwise redo of other operations - // must ensure this page is not used for other things - store.addMeta(this, session); - PageBtreeLeaf root = PageBtreeLeaf.create(this, rootPageId, PageBtree.ROOT); - store.logUndo(root, null); - store.update(root); - } else { - rootPageId = store.getRootPageId(id); - PageBtree root = getPage(rootPageId); - rowCount = root.getRowCount(); - } - this.needRebuild = create || (rowCount == 0 && store.isRecoveryRunning()); - if (trace.isDebugEnabled()) { - trace.debug("opened {0} rows: {1}", getName() , rowCount); - } - memoryPerPage = (Constants.MEMORY_PAGE_BTREE + store.getPageSize()) >> 2; - } - - @Override - public void add(Session session, Row row) { - if (trace.isDebugEnabled()) { - trace.debug("{0} add {1}", getName(), row); - } - // safe memory - SearchRow newRow = getSearchRow(row); - try { - addRow(newRow); - } finally { - store.incrementChangeCount(); - } - } - - private void addRow(SearchRow newRow) { - while (true) { - PageBtree root = getPage(rootPageId); - int splitPoint = root.addRowTry(newRow); - if (splitPoint == -1) { - break; - } - if (trace.isDebugEnabled()) { - trace.debug("split {0}", splitPoint); - } - SearchRow pivot = root.getRow(splitPoint - 1); - store.logUndo(root, root.data); - PageBtree page1 = root; - PageBtree page2 = root.split(splitPoint); - store.logUndo(page2, null); - int id = store.allocatePage(); - page1.setPageId(id); - page1.setParentPageId(rootPageId); - page2.setParentPageId(rootPageId); - PageBtreeNode newRoot = PageBtreeNode.create( - this, rootPageId, PageBtree.ROOT); - store.logUndo(newRoot, null); - newRoot.init(page1, pivot, page2); - store.update(page1); - store.update(page2); - store.update(newRoot); - root = newRoot; - } - invalidateRowCount(); - rowCount++; - } - - /** - * Create a search row for this row. - * - * @param row the row - * @return the search row - */ - private SearchRow getSearchRow(Row row) { - SearchRow r = table.getTemplateSimpleRow(columns.length == 1); - r.setKey(row); - for (Column c : columns) { - int idx = c.getColumnId(); - r.setValue(idx, row.getValue(idx)); - } - return r; - } - - /** - * Read the given page. - * - * @param id the page id - * @return the page - */ - PageBtree getPage(int id) { - Page p = store.getPage(id); - if (p == null) { - PageBtreeLeaf empty = PageBtreeLeaf.create(this, id, PageBtree.ROOT); - // could have been created before, but never committed - store.logUndo(empty, null); - store.update(empty); - return empty; - } else if (!(p instanceof PageBtree)) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, String.valueOf(p)); - } - return (PageBtree) p; - } - - @Override - public boolean canGetFirstOrLast() { - return true; - } - - @Override - public Cursor findNext(Session session, SearchRow first, SearchRow last) { - return find(session, first, true, last); - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - return find(session, first, false, last); - } - - private Cursor find(Session session, SearchRow first, boolean bigger, - SearchRow last) { - if (store == null) { - throw DbException.get(ErrorCode.OBJECT_CLOSED); - } - PageBtree root = getPage(rootPageId); - PageBtreeCursor cursor = new PageBtreeCursor(session, this, last); - root.find(cursor, first, bigger); - return cursor; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - if (first) { - // TODO optimization: this loops through NULL elements - Cursor cursor = find(session, null, false, null); - while (cursor.next()) { - SearchRow row = cursor.getSearchRow(); - Value v = row.getValue(columnIds[0]); - if (v != ValueNull.INSTANCE) { - return cursor; - } - } - return cursor; - } - PageBtree root = getPage(rootPageId); - PageBtreeCursor cursor = new PageBtreeCursor(session, this, null); - root.last(cursor); - cursor.previous(); - // TODO optimization: this loops through NULL elements - do { - SearchRow row = cursor.getSearchRow(); - if (row == null) { - break; - } - Value v = row.getValue(columnIds[0]); - if (v != ValueNull.INSTANCE) { - return cursor; - } - } while (cursor.previous()); - return cursor; - } - - @Override - public double getCost(Session session, int[] masks, - TableFilter[] filters, int filter, SortOrder sortOrder, - AllColumnsForPlan allColumnsSet) { - return 10 * getCostRangeIndex(masks, tableData.getRowCount(session), - filters, filter, sortOrder, false, allColumnsSet); - } - - @Override - public boolean needRebuild() { - return needRebuild; - } - - @Override - public void remove(Session session, Row row) { - if (trace.isDebugEnabled()) { - trace.debug("{0} remove {1}", getName(), row); - } - // TODO invalidate row count - // setChanged(session); - if (rowCount == 1) { - removeAllRows(); - } else { - try { - PageBtree root = getPage(rootPageId); - root.remove(row); - invalidateRowCount(); - rowCount--; - } finally { - store.incrementChangeCount(); - } - } - } - - @Override - public void remove(Session session) { - if (trace.isDebugEnabled()) { - trace.debug("remove"); - } - removeAllRows(); - store.free(rootPageId); - store.removeMeta(this, session); - } - - @Override - public void truncate(Session session) { - if (trace.isDebugEnabled()) { - trace.debug("truncate"); - } - removeAllRows(); - if (tableData.getContainsLargeObject()) { - database.getLobStorage().removeAllForTable(table.getId()); - } - tableData.setRowCount(0); - } - - private void removeAllRows() { - try { - PageBtree root = getPage(rootPageId); - root.freeRecursive(); - root = PageBtreeLeaf.create(this, rootPageId, PageBtree.ROOT); - store.removeFromCache(rootPageId); - store.update(root); - rowCount = 0; - } finally { - store.incrementChangeCount(); - } - } - - @Override - public void checkRename() { - // ok - } - - /** - * Get a row from the main index. - * - * @param session the session - * @param key the row key - * @return the row - */ - @Override - public Row getRow(Session session, long key) { - return tableData.getRow(session, key); - } - - PageStore getPageStore() { - return store; - } - - @Override - public long getRowCountApproximation() { - return tableData.getRowCountApproximation(); - } - - @Override - public long getDiskSpaceUsed() { - return tableData.getDiskSpaceUsed(); - } - - @Override - public long getRowCount(Session session) { - return rowCount; - } - - @Override - public void close(Session session) { - if (trace.isDebugEnabled()) { - trace.debug("close"); - } - // can not close the index because it might get used afterwards, - // for example after running recovery - try { - writeRowCount(); - } finally { - store.incrementChangeCount(); - } - } - - /** - * Read a row from the data page at the given offset. - * - * @param data the data - * @param offset the offset - * @param onlyPosition whether only the position of the row is stored - * @param needData whether the row data is required - * @return the row - */ - SearchRow readRow(Data data, int offset, boolean onlyPosition, - boolean needData) { - synchronized (data) { - data.setPos(offset); - long key = data.readVarLong(); - if (onlyPosition) { - if (needData) { - return tableData.getRow(null, key); - } - SearchRow row = table.getTemplateSimpleRow(true); - row.setKey(key); - return row; - } - SearchRow row = table.getTemplateSimpleRow(columns.length == 1); - row.setKey(key); - for (Column col : columns) { - int idx = col.getColumnId(); - row.setValue(idx, data.readValue()); - } - return row; - } - } - - /** - * Get the complete row from the data index. - * - * @param key the key - * @return the row - */ - SearchRow readRow(long key) { - return tableData.getRow(null, key); - } - - /** - * Write a row to the data page at the given offset. - * - * @param data the data - * @param offset the offset - * @param onlyPosition whether only the position of the row is stored - * @param row the row to write - */ - void writeRow(Data data, int offset, SearchRow row, boolean onlyPosition) { - data.setPos(offset); - data.writeVarLong(row.getKey()); - if (!onlyPosition) { - for (Column col : columns) { - int idx = col.getColumnId(); - data.writeValue(row.getValue(idx)); - } - } - } - - /** - * Get the size of a row (only the part that is stored in the index). - * - * @param dummy a dummy data page to calculate the size - * @param row the row - * @param onlyPosition whether only the position of the row is stored - * @return the number of bytes - */ - int getRowSize(Data dummy, SearchRow row, boolean onlyPosition) { - int rowsize = Data.getVarLongLen(row.getKey()); - if (!onlyPosition) { - for (Column col : columns) { - Value v = row.getValue(col.getColumnId()); - rowsize += dummy.getValueLen(v); - } - } - return rowsize; - } - - @Override - public boolean canFindNext() { - return true; - } - - /** - * The root page has changed. - * - * @param session the session - * @param newPos the new position - */ - void setRootPageId(Session session, int newPos) { - store.removeMeta(this, session); - this.rootPageId = newPos; - store.addMeta(this, session); - store.addIndex(this); - } - - private void invalidateRowCount() { - PageBtree root = getPage(rootPageId); - root.setRowCountStored(PageData.UNKNOWN_ROWCOUNT); - } - - @Override - public void writeRowCount() { - if (SysProperties.MODIFY_ON_WRITE && rootPageId == 0) { - // currently creating the index - return; - } - PageBtree root = getPage(rootPageId); - root.setRowCountStored(MathUtils.convertLongToInt(rowCount)); - } - - /** - * Check whether the given row contains data. - * - * @param row the row - * @return true if it contains data - */ - boolean hasData(SearchRow row) { - return row.getValue(columns[0].getColumnId()) != null; - } - - int getMemoryPerPage() { - return memoryPerPage; - } - - /** - * The memory usage of a page was changed. The new value is used to adopt - * the average estimated memory size of a page. - * - * @param x the new memory size - */ - void memoryChange(int x) { - if (memoryCount < Constants.MEMORY_FACTOR) { - memoryPerPage += (x - memoryPerPage) / ++memoryCount; - } else { - memoryPerPage += (x > memoryPerPage ? 1 : -1) + - ((x - memoryPerPage) / Constants.MEMORY_FACTOR); - } - } - - /** - * Check if calculating the memory is required. - * - * @return true if it is - */ - static boolean isMemoryChangeRequired() { - if (memoryChangeRequired-- <= 0) { - memoryChangeRequired = 10; - return true; - } - return false; - } - -} diff --git a/h2/src/main/org/h2/index/PageBtreeLeaf.java b/h2/src/main/org/h2/index/PageBtreeLeaf.java deleted file mode 100644 index 99355bfb03..0000000000 --- a/h2/src/main/org/h2/index/PageBtreeLeaf.java +++ /dev/null @@ -1,402 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.util.Arrays; -import org.h2.api.ErrorCode; -import org.h2.engine.Constants; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.result.SearchRow; -import org.h2.store.Data; -import org.h2.store.Page; -import org.h2.store.PageStore; - -/** - * A b-tree leaf page that contains index data. Format: - *
            - *
          • page type: byte
          • - *
          • checksum: short
          • - *
          • parent page id (0 for root): int
          • - *
          • index id: varInt
          • - *
          • entry count: short
          • - *
          • list of offsets: short
          • - *
          • data (key: varLong, value,...)
          • - *
          - */ -public class PageBtreeLeaf extends PageBtree { - - private static final int OFFSET_LENGTH = 2; - - private final boolean optimizeUpdate; - private boolean writtenData; - - private PageBtreeLeaf(PageBtreeIndex index, int pageId, Data data) { - super(index, pageId, data); - this.optimizeUpdate = index.getDatabase().getSettings().optimizeUpdate; - } - - /** - * Read a b-tree leaf page. - * - * @param index the index - * @param data the data - * @param pageId the page id - * @return the page - */ - public static Page read(PageBtreeIndex index, Data data, int pageId) { - PageBtreeLeaf p = new PageBtreeLeaf(index, pageId, data); - p.read(); - return p; - } - - /** - * Create a new page. - * - * @param index the index - * @param pageId the page id - * @param parentPageId the parent - * @return the page - */ - static PageBtreeLeaf create(PageBtreeIndex index, int pageId, - int parentPageId) { - PageBtreeLeaf p = new PageBtreeLeaf(index, pageId, index.getPageStore() - .createData()); - index.getPageStore().logUndo(p, null); - p.rows = SearchRow.EMPTY_ARRAY; - p.parentPageId = parentPageId; - p.writeHead(); - p.start = p.data.length(); - return p; - } - - private void read() { - data.reset(); - int type = data.readByte(); - data.readShortInt(); - this.parentPageId = data.readInt(); - onlyPosition = (type & Page.FLAG_LAST) == 0; - int indexId = data.readVarInt(); - if (indexId != index.getId()) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "page:" + getPos() + " expected index:" + index.getId() + - "got:" + indexId); - } - entryCount = data.readShortInt(); - offsets = new int[entryCount]; - rows = new SearchRow[entryCount]; - for (int i = 0; i < entryCount; i++) { - offsets[i] = data.readShortInt(); - } - start = data.length(); - written = true; - writtenData = true; - } - - @Override - int addRowTry(SearchRow row) { - int x = addRow(row, true); - memoryChange(); - return x; - } - - private int addRow(SearchRow row, boolean tryOnly) { - int rowLength = index.getRowSize(data, row, onlyPosition); - int pageSize = index.getPageStore().getPageSize(); - int last = entryCount == 0 ? pageSize : offsets[entryCount - 1]; - if (last - rowLength < start + OFFSET_LENGTH) { - if (tryOnly && entryCount > 1) { - int x = find(row, false, true, true); - if (entryCount < 5) { - // required, otherwise the index doesn't work correctly - return entryCount / 2; - } - // split near the insertion point to better fill pages - // split in half would be: - // return entryCount / 2; - int third = entryCount / 3; - return x < third ? third : x >= 2 * third ? 2 * third : x; - } - readAllRows(); - writtenData = false; - onlyPosition = true; - // change the offsets (now storing only positions) - int o = pageSize; - for (int i = 0; i < entryCount; i++) { - o -= index.getRowSize(data, getRow(i), true); - offsets[i] = o; - } - last = entryCount == 0 ? pageSize : offsets[entryCount - 1]; - rowLength = index.getRowSize(data, row, true); - if (last - rowLength < start + OFFSET_LENGTH) { - throw DbException.throwInternalError(); - } - } - index.getPageStore().logUndo(this, data); - if (!optimizeUpdate) { - readAllRows(); - } - changeCount = index.getPageStore().getChangeCount(); - written = false; - int x; - if (entryCount == 0) { - x = 0; - } else { - x = find(row, false, true, true); - } - start += OFFSET_LENGTH; - int offset = (x == 0 ? pageSize : offsets[x - 1]) - rowLength; - if (optimizeUpdate && writtenData) { - if (entryCount > 0) { - byte[] d = data.getBytes(); - int dataStart = offsets[entryCount - 1]; - System.arraycopy(d, dataStart, d, dataStart - rowLength, - offset - dataStart + rowLength); - } - index.writeRow(data, offset, row, onlyPosition); - } - offsets = insert(offsets, entryCount, x, offset); - add(offsets, x + 1, entryCount + 1, -rowLength); - rows = insert(rows, entryCount, x, row); - entryCount++; - index.getPageStore().update(this); - return -1; - } - - private void removeRow(int at) { - if (!optimizeUpdate) { - readAllRows(); - } - index.getPageStore().logUndo(this, data); - entryCount--; - written = false; - changeCount = index.getPageStore().getChangeCount(); - if (entryCount <= 0) { - DbException.throwInternalError(Integer.toString(entryCount)); - } - int startNext = at > 0 ? offsets[at - 1] : index.getPageStore().getPageSize(); - int rowLength = startNext - offsets[at]; - start -= OFFSET_LENGTH; - - if (optimizeUpdate) { - if (writtenData) { - byte[] d = data.getBytes(); - int dataStart = offsets[entryCount]; - System.arraycopy(d, dataStart, d, - dataStart + rowLength, offsets[at] - dataStart); - Arrays.fill(d, dataStart, dataStart + rowLength, (byte) 0); - } - } - - offsets = remove(offsets, entryCount + 1, at); - add(offsets, at, entryCount, rowLength); - rows = remove(rows, entryCount + 1, at); - } - - int getEntryCount() { - return entryCount; - } - - @Override - PageBtree split(int splitPoint) { - int newPageId = index.getPageStore().allocatePage(); - PageBtreeLeaf p2 = PageBtreeLeaf.create(index, newPageId, parentPageId); - while (splitPoint < entryCount) { - p2.addRow(getRow(splitPoint), false); - removeRow(splitPoint); - } - memoryChange(); - p2.memoryChange(); - return p2; - } - - @Override - PageBtreeLeaf getFirstLeaf() { - return this; - } - - @Override - PageBtreeLeaf getLastLeaf() { - return this; - } - - @Override - SearchRow remove(SearchRow row) { - int at = find(row, false, false, true); - SearchRow delete = getRow(at); - if (index.compareRows(row, delete) != 0 || delete.getKey() != row.getKey()) { - throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, - index.getSQL(new StringBuilder(), false).append(": ").append(row).toString()); - } - index.getPageStore().logUndo(this, data); - if (entryCount == 1) { - // the page is now empty - return row; - } - removeRow(at); - memoryChange(); - index.getPageStore().update(this); - if (at == entryCount) { - // the last row changed - return getRow(at - 1); - } - // the last row didn't change - return null; - } - - @Override - void freeRecursive() { - index.getPageStore().logUndo(this, data); - index.getPageStore().free(getPos()); - } - - @Override - int getRowCount() { - return entryCount; - } - - @Override - void setRowCountStored(int rowCount) { - // ignore - } - - @Override - public void write() { - writeData(); - index.getPageStore().writePage(getPos(), data); - } - - private void writeHead() { - data.reset(); - data.writeByte((byte) (Page.TYPE_BTREE_LEAF | - (onlyPosition ? 0 : Page.FLAG_LAST))); - data.writeShortInt(0); - data.writeInt(parentPageId); - data.writeVarInt(index.getId()); - data.writeShortInt(entryCount); - } - - private void writeData() { - if (written) { - return; - } - if (!optimizeUpdate) { - readAllRows(); - } - writeHead(); - for (int i = 0; i < entryCount; i++) { - data.writeShortInt(offsets[i]); - } - if (!writtenData || !optimizeUpdate) { - for (int i = 0; i < entryCount; i++) { - index.writeRow(data, offsets[i], rows[i], onlyPosition); - } - writtenData = true; - } - written = true; - memoryChange(); - } - - @Override - void find(PageBtreeCursor cursor, SearchRow first, boolean bigger) { - int i = find(first, bigger, false, false); - if (i > entryCount) { - if (parentPageId == PageBtree.ROOT) { - return; - } - PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId); - next.find(cursor, first, bigger); - return; - } - cursor.setCurrent(this, i); - } - - @Override - void last(PageBtreeCursor cursor) { - cursor.setCurrent(this, entryCount - 1); - } - - @Override - void remapChildren() { - // nothing to do - } - - /** - * Set the cursor to the first row of the next page. - * - * @param cursor the cursor - */ - void nextPage(PageBtreeCursor cursor) { - if (parentPageId == PageBtree.ROOT) { - cursor.setCurrent(null, 0); - return; - } - PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId); - next.nextPage(cursor, getPos()); - } - - /** - * Set the cursor to the last row of the previous page. - * - * @param cursor the cursor - */ - void previousPage(PageBtreeCursor cursor) { - if (parentPageId == PageBtree.ROOT) { - cursor.setCurrent(null, 0); - return; - } - PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId); - next.previousPage(cursor, getPos()); - } - - @Override - public String toString() { - return "page[" + getPos() + "] b-tree leaf table:" + - index.getId() + " entries:" + entryCount; - } - - @Override - public void moveTo(Session session, int newPos) { - PageStore store = index.getPageStore(); - readAllRows(); - PageBtreeLeaf p2 = PageBtreeLeaf.create(index, newPos, parentPageId); - store.logUndo(this, data); - store.logUndo(p2, null); - p2.rows = rows; - p2.entryCount = entryCount; - p2.offsets = offsets; - p2.onlyPosition = onlyPosition; - p2.parentPageId = parentPageId; - p2.start = start; - store.update(p2); - if (parentPageId == ROOT) { - index.setRootPageId(session, newPos); - } else { - PageBtreeNode p = (PageBtreeNode) store.getPage(parentPageId); - p.moveChild(getPos(), newPos); - } - store.free(getPos()); - } - - @Override - protected void memoryChange() { - if (!PageBtreeIndex.isMemoryChangeRequired()) { - return; - } - int memory = Constants.MEMORY_PAGE_BTREE + index.getPageStore().getPageSize(); - if (rows != null) { - memory += getEntryCount() * (4 + Constants.MEMORY_POINTER); - for (int i = 0; i < entryCount; i++) { - SearchRow r = rows[i]; - if (r != null) { - memory += r.getMemory(); - } - } - } - index.memoryChange(memory >> 2); - } - -} diff --git a/h2/src/main/org/h2/index/PageBtreeNode.java b/h2/src/main/org/h2/index/PageBtreeNode.java deleted file mode 100644 index a21ef80b7e..0000000000 --- a/h2/src/main/org/h2/index/PageBtreeNode.java +++ /dev/null @@ -1,609 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.api.DatabaseEventListener; -import org.h2.api.ErrorCode; -import org.h2.engine.Session; -import org.h2.engine.SysProperties; -import org.h2.message.DbException; -import org.h2.result.SearchRow; -import org.h2.store.Data; -import org.h2.store.Page; -import org.h2.store.PageStore; -import org.h2.util.Utils; - -/** - * A b-tree node page that contains index data. Format: - *
            - *
          • page type: byte
          • - *
          • checksum: short
          • - *
          • parent page id (0 for root): int
          • - *
          • index id: varInt
          • - *
          • count of all children (-1 if not known): int
          • - *
          • entry count: short
          • - *
          • rightmost child page id: int
          • - *
          • entries (child page id: int, offset: short)
          • - *
          - * The row contains the largest key of the respective child, - * meaning row[0] contains the largest key of child[0]. - */ -public class PageBtreeNode extends PageBtree { - - private static final int CHILD_OFFSET_PAIR_LENGTH = 6; - private static final int MAX_KEY_LENGTH = 10; - - private final boolean pageStoreInternalCount; - - /** - * The page ids of the children. - */ - private int[] childPageIds; - - private int rowCountStored = UNKNOWN_ROWCOUNT; - - private int rowCount = UNKNOWN_ROWCOUNT; - - private PageBtreeNode(PageBtreeIndex index, int pageId, Data data) { - super(index, pageId, data); - this.pageStoreInternalCount = index.getDatabase(). - getSettings().pageStoreInternalCount; - } - - /** - * Read a b-tree node page. - * - * @param index the index - * @param data the data - * @param pageId the page id - * @return the page - */ - public static Page read(PageBtreeIndex index, Data data, int pageId) { - PageBtreeNode p = new PageBtreeNode(index, pageId, data); - p.read(); - return p; - } - - /** - * Create a new b-tree node page. - * - * @param index the index - * @param pageId the page id - * @param parentPageId the parent page id - * @return the page - */ - static PageBtreeNode create(PageBtreeIndex index, int pageId, - int parentPageId) { - PageBtreeNode p = new PageBtreeNode(index, pageId, index.getPageStore() - .createData()); - index.getPageStore().logUndo(p, null); - p.parentPageId = parentPageId; - p.writeHead(); - // 4 bytes for the rightmost child page id - p.start = p.data.length() + 4; - p.rows = SearchRow.EMPTY_ARRAY; - if (p.pageStoreInternalCount) { - p.rowCount = 0; - } - return p; - } - - private void read() { - data.reset(); - int type = data.readByte(); - data.readShortInt(); - this.parentPageId = data.readInt(); - onlyPosition = (type & Page.FLAG_LAST) == 0; - int indexId = data.readVarInt(); - if (indexId != index.getId()) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "page:" + getPos() + " expected index:" + index.getId() + - "got:" + indexId); - } - rowCount = rowCountStored = data.readInt(); - entryCount = data.readShortInt(); - childPageIds = new int[entryCount + 1]; - childPageIds[entryCount] = data.readInt(); - rows = entryCount == 0 ? SearchRow.EMPTY_ARRAY : new SearchRow[entryCount]; - offsets = Utils.newIntArray(entryCount); - for (int i = 0; i < entryCount; i++) { - childPageIds[i] = data.readInt(); - offsets[i] = data.readShortInt(); - } - check(); - start = data.length(); - written = true; - } - - /** - * Add a row. If it is possible this method returns -1, otherwise - * the split point. It is always possible to add two rows. - * - * @param row the now to add - * @return the split point of this page, or -1 if no split is required - */ - private int addChildTry(SearchRow row) { - if (entryCount < 4) { - return -1; - } - int startData; - if (onlyPosition) { - // if we only store the position, we may at most store as many - // entries as there is space for keys, because the current data area - // might get larger when _removing_ a child (if the new key needs - // more space) - and removing a child can't split this page - startData = entryCount + 1 * MAX_KEY_LENGTH; - } else { - int rowLength = index.getRowSize(data, row, onlyPosition); - int pageSize = index.getPageStore().getPageSize(); - int last = entryCount == 0 ? pageSize : offsets[entryCount - 1]; - startData = last - rowLength; - } - if (startData < start + CHILD_OFFSET_PAIR_LENGTH) { - return entryCount / 2; - } - return -1; - } - - /** - * Add a child at the given position. - * - * @param x the position - * @param childPageId the child - * @param row the row smaller than the first row of the child and its - * children - */ - private void addChild(int x, int childPageId, SearchRow row) { - int rowLength = index.getRowSize(data, row, onlyPosition); - int pageSize = index.getPageStore().getPageSize(); - int last = entryCount == 0 ? pageSize : offsets[entryCount - 1]; - if (last - rowLength < start + CHILD_OFFSET_PAIR_LENGTH) { - readAllRows(); - onlyPosition = true; - // change the offsets (now storing only positions) - int o = pageSize; - for (int i = 0; i < entryCount; i++) { - o -= index.getRowSize(data, getRow(i), true); - offsets[i] = o; - } - last = entryCount == 0 ? pageSize : offsets[entryCount - 1]; - rowLength = index.getRowSize(data, row, true); - if (last - rowLength < start + CHILD_OFFSET_PAIR_LENGTH) { - throw DbException.throwInternalError(); - } - } - int offset = last - rowLength; - if (entryCount > 0) { - if (x < entryCount) { - offset = (x == 0 ? pageSize : offsets[x - 1]) - rowLength; - } - } - rows = insert(rows, entryCount, x, row); - offsets = insert(offsets, entryCount, x, offset); - add(offsets, x + 1, entryCount + 1, -rowLength); - childPageIds = insert(childPageIds, entryCount + 1, x + 1, childPageId); - start += CHILD_OFFSET_PAIR_LENGTH; - if (pageStoreInternalCount) { - if (rowCount != UNKNOWN_ROWCOUNT) { - rowCount += offset; - } - } - entryCount++; - written = false; - changeCount = index.getPageStore().getChangeCount(); - } - - @Override - int addRowTry(SearchRow row) { - while (true) { - int x = find(row, false, true, true); - PageBtree page = index.getPage(childPageIds[x]); - int splitPoint = page.addRowTry(row); - if (splitPoint == -1) { - break; - } - SearchRow pivot = page.getRow(splitPoint - 1); - index.getPageStore().logUndo(this, data); - int splitPoint2 = addChildTry(pivot); - if (splitPoint2 != -1) { - return splitPoint2; - } - PageBtree page2 = page.split(splitPoint); - readAllRows(); - addChild(x, page2.getPos(), pivot); - index.getPageStore().update(page); - index.getPageStore().update(page2); - index.getPageStore().update(this); - } - updateRowCount(1); - written = false; - changeCount = index.getPageStore().getChangeCount(); - return -1; - } - - private void updateRowCount(int offset) { - if (rowCount != UNKNOWN_ROWCOUNT) { - rowCount += offset; - } - if (rowCountStored != UNKNOWN_ROWCOUNT) { - rowCountStored = UNKNOWN_ROWCOUNT; - index.getPageStore().logUndo(this, data); - if (written) { - writeHead(); - } - index.getPageStore().update(this); - } - } - - @Override - PageBtree split(int splitPoint) { - int newPageId = index.getPageStore().allocatePage(); - PageBtreeNode p2 = PageBtreeNode.create(index, newPageId, parentPageId); - index.getPageStore().logUndo(this, data); - if (onlyPosition) { - // TODO optimize: maybe not required - p2.onlyPosition = true; - } - int firstChild = childPageIds[splitPoint]; - readAllRows(); - while (splitPoint < entryCount) { - p2.addChild(p2.entryCount, childPageIds[splitPoint + 1], getRow(splitPoint)); - removeChild(splitPoint); - } - int lastChild = childPageIds[splitPoint - 1]; - removeChild(splitPoint - 1); - childPageIds[splitPoint - 1] = lastChild; - if (p2.childPageIds == null) { - p2.childPageIds = new int[1]; - } - p2.childPageIds[0] = firstChild; - p2.remapChildren(); - return p2; - } - - @Override - protected void remapChildren() { - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - PageBtree p = index.getPage(child); - p.setParentPageId(getPos()); - index.getPageStore().update(p); - } - } - - /** - * Initialize the page. - * - * @param page1 the first child page - * @param pivot the pivot key - * @param page2 the last child page - */ - void init(PageBtree page1, SearchRow pivot, PageBtree page2) { - entryCount = 0; - childPageIds = new int[] { page1.getPos() }; - rows = SearchRow.EMPTY_ARRAY; - offsets = Utils.EMPTY_INT_ARRAY; - addChild(0, page2.getPos(), pivot); - if (pageStoreInternalCount) { - rowCount = page1.getRowCount() + page2.getRowCount(); - } - check(); - } - - @Override - void find(PageBtreeCursor cursor, SearchRow first, boolean bigger) { - int i = find(first, bigger, false, false); - if (i > entryCount) { - if (parentPageId == PageBtree.ROOT) { - return; - } - PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId); - next.find(cursor, first, bigger); - return; - } - PageBtree page = index.getPage(childPageIds[i]); - page.find(cursor, first, bigger); - } - - @Override - void last(PageBtreeCursor cursor) { - int child = childPageIds[entryCount]; - index.getPage(child).last(cursor); - } - - @Override - PageBtreeLeaf getFirstLeaf() { - int child = childPageIds[0]; - return index.getPage(child).getFirstLeaf(); - } - - @Override - PageBtreeLeaf getLastLeaf() { - int child = childPageIds[entryCount]; - return index.getPage(child).getLastLeaf(); - } - - @Override - SearchRow remove(SearchRow row) { - int at = find(row, false, false, true); - // merge is not implemented to allow concurrent usage - // TODO maybe implement merge - PageBtree page = index.getPage(childPageIds[at]); - SearchRow last = page.remove(row); - index.getPageStore().logUndo(this, data); - updateRowCount(-1); - written = false; - changeCount = index.getPageStore().getChangeCount(); - if (last == null) { - // the last row didn't change - nothing to do - return null; - } else if (last == row) { - // this child is now empty - index.getPageStore().free(page.getPos()); - if (entryCount < 1) { - // no more children - this page is empty as well - return row; - } - if (at == entryCount) { - // removing the last child - last = getRow(at - 1); - } else { - last = null; - } - removeChild(at); - index.getPageStore().update(this); - return last; - } - // the last row is in the last child - if (at == entryCount) { - return last; - } - int child = childPageIds[at]; - removeChild(at); - // TODO this can mean only the position is now stored - // should split at the next possible moment - addChild(at, child, last); - // remove and add swapped two children, fix that - int temp = childPageIds[at]; - childPageIds[at] = childPageIds[at + 1]; - childPageIds[at + 1] = temp; - index.getPageStore().update(this); - return null; - } - - @Override - int getRowCount() { - if (rowCount == UNKNOWN_ROWCOUNT) { - int count = 0; - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - PageBtree page = index.getPage(child); - count += page.getRowCount(); - index.getDatabase().setProgress( - DatabaseEventListener.STATE_SCAN_FILE, - index.getName(), count, Integer.MAX_VALUE); - } - rowCount = count; - } - return rowCount; - } - - @Override - void setRowCountStored(int rowCount) { - if (rowCount < 0 && pageStoreInternalCount) { - return; - } - this.rowCount = rowCount; - if (rowCountStored != rowCount) { - rowCountStored = rowCount; - index.getPageStore().logUndo(this, data); - if (written) { - changeCount = index.getPageStore().getChangeCount(); - writeHead(); - } - index.getPageStore().update(this); - } - } - - private void check() { - if (SysProperties.CHECK) { - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - if (child == 0) { - DbException.throwInternalError(); - } - } - } - } - - @Override - public void write() { - check(); - writeData(); - index.getPageStore().writePage(getPos(), data); - } - - private void writeHead() { - data.reset(); - data.writeByte((byte) (Page.TYPE_BTREE_NODE | - (onlyPosition ? 0 : Page.FLAG_LAST))); - data.writeShortInt(0); - data.writeInt(parentPageId); - data.writeVarInt(index.getId()); - data.writeInt(rowCountStored); - data.writeShortInt(entryCount); - } - - private void writeData() { - if (written) { - return; - } - readAllRows(); - writeHead(); - data.writeInt(childPageIds[entryCount]); - for (int i = 0; i < entryCount; i++) { - data.writeInt(childPageIds[i]); - data.writeShortInt(offsets[i]); - } - for (int i = 0; i < entryCount; i++) { - index.writeRow(data, offsets[i], rows[i], onlyPosition); - } - written = true; - } - - @Override - void freeRecursive() { - index.getPageStore().logUndo(this, data); - index.getPageStore().free(getPos()); - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - index.getPage(child).freeRecursive(); - } - } - - private void removeChild(int i) { - readAllRows(); - entryCount--; - if (pageStoreInternalCount) { - updateRowCount(-index.getPage(childPageIds[i]).getRowCount()); - } - written = false; - changeCount = index.getPageStore().getChangeCount(); - if (entryCount < 0) { - DbException.throwInternalError(Integer.toString(entryCount)); - } - if (entryCount > i) { - int startNext = i > 0 ? offsets[i - 1] : index.getPageStore().getPageSize(); - int rowLength = startNext - offsets[i]; - add(offsets, i, entryCount + 1, rowLength); - } - rows = remove(rows, entryCount + 1, i); - offsets = remove(offsets, entryCount + 1, i); - childPageIds = remove(childPageIds, entryCount + 2, i); - start -= CHILD_OFFSET_PAIR_LENGTH; - } - - /** - * Set the cursor to the first row of the next page. - * - * @param cursor the cursor - * @param pageId id of the next page - */ - void nextPage(PageBtreeCursor cursor, int pageId) { - int i; - // TODO maybe keep the index in the child page (transiently) - for (i = 0; i < entryCount + 1; i++) { - if (childPageIds[i] == pageId) { - i++; - break; - } - } - if (i > entryCount) { - if (parentPageId == PageBtree.ROOT) { - cursor.setCurrent(null, 0); - return; - } - PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId); - next.nextPage(cursor, getPos()); - return; - } - PageBtree page = index.getPage(childPageIds[i]); - PageBtreeLeaf leaf = page.getFirstLeaf(); - cursor.setCurrent(leaf, 0); - } - - /** - * Set the cursor to the last row of the previous page. - * - * @param cursor the cursor - * @param pageId id of the previous page - */ - void previousPage(PageBtreeCursor cursor, int pageId) { - int i; - // TODO maybe keep the index in the child page (transiently) - for (i = entryCount; i >= 0; i--) { - if (childPageIds[i] == pageId) { - i--; - break; - } - } - if (i < 0) { - if (parentPageId == PageBtree.ROOT) { - cursor.setCurrent(null, 0); - return; - } - PageBtreeNode previous = (PageBtreeNode) index.getPage(parentPageId); - previous.previousPage(cursor, getPos()); - return; - } - PageBtree page = index.getPage(childPageIds[i]); - PageBtreeLeaf leaf = page.getLastLeaf(); - cursor.setCurrent(leaf, leaf.entryCount - 1); - } - - - @Override - public String toString() { - return "page[" + getPos() + "] b-tree node table:" + - index.getId() + " entries:" + entryCount; - } - - @Override - public void moveTo(Session session, int newPos) { - PageStore store = index.getPageStore(); - store.logUndo(this, data); - PageBtreeNode p2 = PageBtreeNode.create(index, newPos, parentPageId); - readAllRows(); - p2.rowCountStored = rowCountStored; - p2.rowCount = rowCount; - p2.childPageIds = childPageIds; - p2.rows = rows; - p2.entryCount = entryCount; - p2.offsets = offsets; - p2.onlyPosition = onlyPosition; - p2.parentPageId = parentPageId; - p2.start = start; - store.update(p2); - if (parentPageId == ROOT) { - index.setRootPageId(session, newPos); - } else { - Page p = store.getPage(parentPageId); - if (!(p instanceof PageBtreeNode)) { - throw DbException.throwInternalError(); - } - PageBtreeNode n = (PageBtreeNode) p; - n.moveChild(getPos(), newPos); - } - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - PageBtree p = index.getPage(child); - p.setParentPageId(newPos); - store.update(p); - } - store.free(getPos()); - } - - /** - * One of the children has moved to a new page. - * - * @param oldPos the old position - * @param newPos the new position - */ - void moveChild(int oldPos, int newPos) { - for (int i = 0; i < entryCount + 1; i++) { - if (childPageIds[i] == oldPos) { - index.getPageStore().logUndo(this, data); - written = false; - changeCount = index.getPageStore().getChangeCount(); - childPageIds[i] = newPos; - index.getPageStore().update(this); - return; - } - } - throw DbException.throwInternalError(oldPos + " " + newPos); - } - -} \ No newline at end of file diff --git a/h2/src/main/org/h2/index/PageData.java b/h2/src/main/org/h2/index/PageData.java deleted file mode 100644 index dc39b0640c..0000000000 --- a/h2/src/main/org/h2/index/PageData.java +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.engine.Session; -import org.h2.result.Row; -import org.h2.store.Data; -import org.h2.store.Page; - -/** - * A page that contains data rows. - */ -abstract class PageData extends Page { - - /** - * The position of the parent page id. - */ - static final int START_PARENT = 3; - - /** - * This is a root page. - */ - static final int ROOT = 0; - - /** - * Indicator that the row count is not known. - */ - static final int UNKNOWN_ROWCOUNT = -1; - - /** - * The index. - */ - protected final PageDataIndex index; - - /** - * The page number of the parent. - */ - protected int parentPageId; - - /** - * The data page. - */ - protected final Data data; - - /** - * The number of entries. - */ - protected int entryCount; - - /** - * The row keys. - */ - protected long[] keys; - - /** - * Whether the data page is up-to-date. - */ - protected boolean written; - - /** - * The estimated heap memory used by this object, in number of double words - * (4 bytes each). - */ - private final int memoryEstimated; - - PageData(PageDataIndex index, int pageId, Data data) { - this.index = index; - this.data = data; - setPos(pageId); - memoryEstimated = index.getMemoryPerPage(); - } - - /** - * Get the real row count. If required, this will read all child pages. - * - * @return the row count - */ - abstract int getRowCount(); - - /** - * Set the stored row count. This will write the page. - * - * @param rowCount the stored row count - */ - abstract void setRowCountStored(int rowCount); - - /** - * Get the used disk space for this index. - * - * @return the estimated number of bytes - */ - abstract long getDiskSpaceUsed(); - - /** - * Find an entry by key. - * - * @param key the key (may not exist) - * @return the matching or next index - */ - int find(long key) { - int l = 0, r = entryCount; - while (l < r) { - int i = (l + r) >>> 1; - long k = keys[i]; - if (k == key) { - return i; - } else if (k > key) { - r = i; - } else { - l = i + 1; - } - } - return l; - } - - /** - * Add a row if possible. If it is possible this method returns -1, - * otherwise the split point. It is always possible to add one row. - * - * @param row the now to add - * @return the split point of this page, or -1 if no split is required - */ - abstract int addRowTry(Row row); - - /** - * Get a cursor. - * - * @param session the session - * @param minKey the smallest key - * @param maxKey the largest key - * @return the cursor - */ - abstract Cursor find(Session session, long minKey, long maxKey); - - /** - * Get the key at this position. - * - * @param at the index - * @return the key - */ - long getKey(int at) { - return keys[at]; - } - - /** - * Split the index page at the given point. - * - * @param splitPoint the index where to split - * @return the new page that contains about half the entries - */ - abstract PageData split(int splitPoint); - - /** - * Change the page id. - * - * @param id the new page id - */ - void setPageId(int id) { - int old = getPos(); - index.getPageStore().removeFromCache(getPos()); - setPos(id); - index.getPageStore().logUndo(this, null); - remapChildren(old); - } - - /** - * Get the last key of a page. - * - * @return the last key - */ - abstract long getLastKey(); - - /** - * Get the first child leaf page of a page. - * - * @return the page - */ - abstract PageDataLeaf getFirstLeaf(); - - /** - * Change the parent page id. - * - * @param id the new parent page id - */ - void setParentPageId(int id) { - index.getPageStore().logUndo(this, data); - parentPageId = id; - if (written) { - changeCount = index.getPageStore().getChangeCount(); - data.setInt(START_PARENT, parentPageId); - } - } - - /** - * Update the parent id of all children. - * - * @param old the previous position - */ - abstract void remapChildren(int old); - - /** - * Remove a row. - * - * @param key the key of the row to remove - * @return true if this page is now empty - */ - abstract boolean remove(long key); - - /** - * Free this page and all child pages. - */ - abstract void freeRecursive(); - - /** - * Get the row for the given key. - * - * @param key the key - * @return the row - */ - abstract Row getRowWithKey(long key); - - /** - * Get the estimated heap memory size. - * - * @return number of double words (4 bytes each) - */ - @Override - public int getMemory() { - // need to always return the same value for the same object (otherwise - // the cache size would change after adding and then removing the same - // page from the cache) but index.getMemoryPerPage() can adopt according - // to how much memory a row needs on average - return memoryEstimated; - } - - int getParentPageId() { - return parentPageId; - } - - @Override - public boolean canRemove() { - return changeCount < index.getPageStore().getChangeCount(); - } - -} diff --git a/h2/src/main/org/h2/index/PageDataCursor.java b/h2/src/main/org/h2/index/PageDataCursor.java deleted file mode 100644 index 812c093981..0000000000 --- a/h2/src/main/org/h2/index/PageDataCursor.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; - -/** - * The cursor implementation for the page scan index. - */ -class PageDataCursor implements Cursor { - - private PageDataLeaf current; - private int idx; - private final long maxKey; - private Row row; - - PageDataCursor(PageDataLeaf current, int idx, long maxKey) { - this.current = current; - this.idx = idx; - this.maxKey = maxKey; - } - - @Override - public Row get() { - return row; - } - - @Override - public SearchRow getSearchRow() { - return get(); - } - - @Override - public boolean next() { - nextRow(); - return checkMax(); - } - - private boolean checkMax() { - if (row != null) { - if (maxKey != Long.MAX_VALUE) { - long x = current.index.getKey(row, Long.MAX_VALUE, Long.MAX_VALUE); - if (x > maxKey) { - row = null; - return false; - } - } - return true; - } - return false; - } - - private void nextRow() { - if (idx >= current.getEntryCount()) { - current = current.getNextPage(); - idx = 0; - if (current == null) { - row = null; - return; - } - } - row = current.getRowAt(idx); - idx++; - } - - @Override - public boolean previous() { - throw DbException.throwInternalError(toString()); - } - -} diff --git a/h2/src/main/org/h2/index/PageDataIndex.java b/h2/src/main/org/h2/index/PageDataIndex.java deleted file mode 100644 index eff330ba7d..0000000000 --- a/h2/src/main/org/h2/index/PageDataIndex.java +++ /dev/null @@ -1,510 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.api.ErrorCode; -import org.h2.command.dml.AllColumnsForPlan; -import org.h2.engine.Constants; -import org.h2.engine.Session; -import org.h2.engine.SysProperties; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.store.Page; -import org.h2.store.PageStore; -import org.h2.table.Column; -import org.h2.table.IndexColumn; -import org.h2.table.PageStoreTable; -import org.h2.table.TableFilter; -import org.h2.util.MathUtils; -import org.h2.value.Value; -import org.h2.value.ValueNull; - -/** - * The scan index allows to access a row by key. It can be used to iterate over - * all rows of a table. Each regular table has one such object, even if no - * primary key or indexes are defined. - */ -public class PageDataIndex extends PageIndex { - - private final PageStore store; - private final PageStoreTable tableData; - private long lastKey; - private long rowCount; - private int mainIndexColumn = -1; - private DbException fastDuplicateKeyException; - - /** - * The estimated heap memory per page, in number of double words (4 bytes - * each). - */ - private int memoryPerPage; - private int memoryCount; - - public PageDataIndex(PageStoreTable table, int id, IndexColumn[] columns, - IndexType indexType, boolean create, Session session) { - super(table, id, table.getName() + "_DATA", columns, indexType); - - // trace = database.getTrace(Trace.PAGE_STORE + "_di"); - // trace.setLevel(TraceSystem.DEBUG); - tableData = table; - this.store = database.getPageStore(); - store.addIndex(this); - if (!database.isPersistent()) { - throw DbException.throwInternalError(table.getName()); - } - if (create) { - rootPageId = store.allocatePage(); - store.addMeta(this, session); - PageDataLeaf root = PageDataLeaf.create(this, rootPageId, PageData.ROOT); - store.update(root); - } else { - rootPageId = store.getRootPageId(id); - PageData root = getPage(rootPageId, 0); - lastKey = root.getLastKey(); - rowCount = root.getRowCount(); - } - if (trace.isDebugEnabled()) { - trace.debug("{0} opened rows: {1}", this, rowCount); - } - table.setRowCount(rowCount); - memoryPerPage = (Constants.MEMORY_PAGE_DATA + store.getPageSize()) >> 2; - } - - @Override - public DbException getDuplicateKeyException(String key) { - if (fastDuplicateKeyException == null) { - fastDuplicateKeyException = super.getDuplicateKeyException(null); - } - return fastDuplicateKeyException; - } - - @Override - public void add(Session session, Row row) { - boolean retry = false; - if (mainIndexColumn != -1) { - row.setKey(row.getValue(mainIndexColumn).getLong()); - } else { - if (row.getKey() == 0) { - row.setKey((int) ++lastKey); - retry = true; - } - } - if (tableData.getContainsLargeObject()) { - for (int i = 0, len = row.getColumnCount(); i < len; i++) { - Value v = row.getValue(i); - Value v2 = v.copy(database, getId()); - if (v2.isLinkedToTable()) { - session.removeAtCommitStop(v2); - } - if (v != v2) { - row.setValue(i, v2); - } - } - } - // when using auto-generated values, it's possible that multiple - // tries are required (specially if there was originally a primary key) - if (trace.isDebugEnabled()) { - trace.debug("{0} add {1}", getName(), row); - } - long add = 0; - while (true) { - try { - addTry(session, row); - break; - } catch (DbException e) { - if (e != fastDuplicateKeyException) { - throw e; - } - if (!retry) { - throw getNewDuplicateKeyException(); - } - if (add == 0) { - // in the first re-try add a small random number, - // to avoid collisions after a re-start - row.setKey((long) (row.getKey() + Math.random() * 10_000)); - } else { - row.setKey(row.getKey() + add); - } - add++; - } finally { - store.incrementChangeCount(); - } - } - lastKey = Math.max(lastKey, row.getKey()); - } - - public DbException getNewDuplicateKeyException() { - StringBuilder builder = new StringBuilder("PRIMARY KEY ON "); - table.getSQL(builder, false); - if (mainIndexColumn >= 0 && mainIndexColumn < indexColumns.length) { - builder.append('('); - indexColumns[mainIndexColumn].getSQL(builder, false).append(')'); - } - DbException e = DbException.get(ErrorCode.DUPLICATE_KEY_1, builder.toString()); - e.setSource(this); - return e; - } - - private void addTry(Session session, Row row) { - while (true) { - PageData root = getPage(rootPageId, 0); - int splitPoint = root.addRowTry(row); - if (splitPoint == -1) { - break; - } - if (trace.isDebugEnabled()) { - trace.debug("{0} split", this); - } - long pivot = splitPoint == 0 ? row.getKey() : root.getKey(splitPoint - 1); - PageData page1 = root; - PageData page2 = root.split(splitPoint); - int id = store.allocatePage(); - page1.setPageId(id); - page1.setParentPageId(rootPageId); - page2.setParentPageId(rootPageId); - PageDataNode newRoot = PageDataNode.create(this, rootPageId, PageData.ROOT); - newRoot.init(page1, pivot, page2); - store.update(page1); - store.update(page2); - store.update(newRoot); - root = newRoot; - } - row.setDeleted(false); - invalidateRowCount(); - rowCount++; - store.logAddOrRemoveRow(session, tableData.getId(), row, true); - } - - /** - * Read an overflow page. - * - * @param id the page id - * @return the page - */ - PageDataOverflow getPageOverflow(int id) { - Page p = store.getPage(id); - if (p instanceof PageDataOverflow) { - return (PageDataOverflow) p; - } - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - p == null ? "null" : p.toString()); - } - - /** - * Read the given page. - * - * @param id the page id - * @param parent the parent, or -1 if unknown - * @return the page - */ - PageData getPage(int id, int parent) { - Page pd = store.getPage(id); - if (pd == null) { - PageDataLeaf empty = PageDataLeaf.create(this, id, parent); - // could have been created before, but never committed - store.logUndo(empty, null); - store.update(empty); - return empty; - } else if (!(pd instanceof PageData)) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, String.valueOf(pd)); - } - PageData p = (PageData) pd; - if (parent != -1) { - if (p.getParentPageId() != parent) { - throw DbException.throwInternalError(p + - " parent " + p.getParentPageId() + " expected " + parent); - } - } - return p; - } - - @Override - public boolean canGetFirstOrLast() { - return false; - } - - /** - * Get the key from the row. - * - * @param row the row - * @param ifEmpty the value to use if the row is empty - * @param ifNull the value to use if the column is NULL - * @return the key - */ - long getKey(SearchRow row, long ifEmpty, long ifNull) { - if (row == null) { - return ifEmpty; - } - Value v = row.getValue(mainIndexColumn); - if (v == null) { - return row.getKey(); - } else if (v == ValueNull.INSTANCE) { - return ifNull; - } - return v.getLong(); - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - long from = first == null ? Long.MIN_VALUE : first.getKey(); - long to = last == null ? Long.MAX_VALUE : last.getKey(); - PageData root = getPage(rootPageId, 0); - return root.find(session, from, to); - - } - - /** - * Search for a specific row or a set of rows. - * - * @param session the session - * @param first the key of the first row - * @param last the key of the last row - * @return the cursor - */ - Cursor find(Session session, long first, long last) { - PageData root = getPage(rootPageId, 0); - return root.find(session, first, last); - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - throw DbException.throwInternalError(toString()); - } - - long getLastKey() { - PageData root = getPage(rootPageId, 0); - return root.getLastKey(); - } - - @Override - public double getCost(Session session, int[] masks, - TableFilter[] filters, int filter, SortOrder sortOrder, - AllColumnsForPlan allColumnsSet) { - // The +200 is so that indexes that can return the same data, but have less - // columns, will take precedence. This all works out easier in the MVStore case, - // because MVStore uses the same cost calculation code for the ScanIndex (i.e. - // the MVPrimaryIndex) and all other indices. - return 10 * (tableData.getRowCountApproximation() + - Constants.COST_ROW_OFFSET) + 200; - } - - @Override - public boolean needRebuild() { - return false; - } - - @Override - public void remove(Session session, Row row) { - if (tableData.getContainsLargeObject()) { - for (int i = 0, len = row.getColumnCount(); i < len; i++) { - Value v = row.getValue(i); - if (v.isLinkedToTable()) { - session.removeAtCommit(v); - } - } - } - if (trace.isDebugEnabled()) { - trace.debug("{0} remove {1}", getName(), row); - } - if (rowCount == 1) { - removeAllRows(); - } else { - try { - long key = row.getKey(); - PageData root = getPage(rootPageId, 0); - root.remove(key); - invalidateRowCount(); - rowCount--; - } finally { - store.incrementChangeCount(); - } - } - store.logAddOrRemoveRow(session, tableData.getId(), row, false); - } - - @Override - public void remove(Session session) { - if (trace.isDebugEnabled()) { - trace.debug("{0} remove", this); - } - removeAllRows(); - store.free(rootPageId); - store.removeMeta(this, session); - } - - @Override - public void truncate(Session session) { - if (trace.isDebugEnabled()) { - trace.debug("{0} truncate", this); - } - store.logTruncate(session, tableData.getId()); - removeAllRows(); - if (tableData.getContainsLargeObject() && tableData.isPersistData()) { - // unfortunately, the data is gone on rollback - session.commit(false); - database.getLobStorage().removeAllForTable(table.getId()); - } - tableData.setRowCount(0); - } - - private void removeAllRows() { - try { - PageData root = getPage(rootPageId, 0); - root.freeRecursive(); - root = PageDataLeaf.create(this, rootPageId, PageData.ROOT); - store.removeFromCache(rootPageId); - store.update(root); - rowCount = 0; - lastKey = 0; - } finally { - store.incrementChangeCount(); - } - } - - @Override - public void checkRename() { - throw DbException.getUnsupportedException("PAGE"); - } - - @Override - public Row getRow(Session session, long key) { - return getRowWithKey(key); - } - - /** - * Get the row with the given key. - * - * @param key the key - * @return the row - */ - public Row getRowWithKey(long key) { - PageData root = getPage(rootPageId, 0); - return root.getRowWithKey(key); - } - - PageStore getPageStore() { - return store; - } - - @Override - public long getRowCountApproximation() { - return rowCount; - } - - @Override - public long getRowCount(Session session) { - return rowCount; - } - - @Override - public long getDiskSpaceUsed() { - PageData root = getPage(rootPageId, 0); - return root.getDiskSpaceUsed(); - } - - @Override - public String getCreateSQL() { - return null; - } - - @Override - public int getColumnIndex(Column col) { - // can not use this index - use the PageDelegateIndex instead - return -1; - } - - @Override - public boolean isFirstColumn(Column column) { - return false; - } - - @Override - public void close(Session session) { - if (trace.isDebugEnabled()) { - trace.debug("{0} close", this); - } - // can not close the index because it might get used afterwards, - // for example after running recovery - writeRowCount(); - } - - /** - * The root page has changed. - * - * @param session the session - * @param newPos the new position - */ - void setRootPageId(Session session, int newPos) { - store.removeMeta(this, session); - this.rootPageId = newPos; - store.addMeta(this, session); - store.addIndex(this); - } - - public void setMainIndexColumn(int mainIndexColumn) { - this.mainIndexColumn = mainIndexColumn; - } - - public int getMainIndexColumn() { - return mainIndexColumn; - } - - @Override - public String toString() { - return getName(); - } - - private void invalidateRowCount() { - PageData root = getPage(rootPageId, 0); - root.setRowCountStored(PageData.UNKNOWN_ROWCOUNT); - } - - @Override - public void writeRowCount() { - if (SysProperties.MODIFY_ON_WRITE && rootPageId == 0) { - // currently creating the index - return; - } - try { - PageData root = getPage(rootPageId, 0); - root.setRowCountStored(MathUtils.convertLongToInt(rowCount)); - } finally { - store.incrementChangeCount(); - } - } - - @Override - public String getPlanSQL() { - return table.getSQL(new StringBuilder(), false).append(".tableScan").toString(); - } - - int getMemoryPerPage() { - return memoryPerPage; - } - - /** - * The memory usage of a page was changed. The new value is used to adopt - * the average estimated memory size of a page. - * - * @param x the new memory size - */ - void memoryChange(int x) { - if (memoryCount < Constants.MEMORY_FACTOR) { - memoryPerPage += (x - memoryPerPage) / ++memoryCount; - } else { - memoryPerPage += (x > memoryPerPage ? 1 : -1) + - ((x - memoryPerPage) / Constants.MEMORY_FACTOR); - } - } - - @Override - public boolean isRowIdIndex() { - return true; - } - -} diff --git a/h2/src/main/org/h2/index/PageDataLeaf.java b/h2/src/main/org/h2/index/PageDataLeaf.java deleted file mode 100644 index 2d9bfb98c0..0000000000 --- a/h2/src/main/org/h2/index/PageDataLeaf.java +++ /dev/null @@ -1,625 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.lang.ref.SoftReference; -import java.util.Arrays; -import org.h2.api.ErrorCode; -import org.h2.engine.Constants; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.store.Data; -import org.h2.store.Page; -import org.h2.store.PageStore; -import org.h2.value.Value; - -/** - * A leaf page that contains data of one or multiple rows. Format: - *
            - *
          • page type: byte (0)
          • - *
          • checksum: short (1-2)
          • - *
          • parent page id (0 for root): int (3-6)
          • - *
          • table id: varInt
          • - *
          • column count: varInt
          • - *
          • entry count: short
          • - *
          • with overflow: the first overflow page id: int
          • - *
          • list of key / offset pairs (key: varLong, offset: shortInt)
          • - *
          • data
          • - *
          - */ -public class PageDataLeaf extends PageData { - - private final boolean optimizeUpdate; - - /** - * The row offsets. - */ - private int[] offsets; - - /** - * The rows. - */ - private Row[] rows; - - /** - * For pages with overflow: the soft reference to the row - */ - private SoftReference rowRef; - - /** - * The page id of the first overflow page (0 if no overflow). - */ - private int firstOverflowPageId; - - /** - * The start of the data area. - */ - private int start; - - /** - * The size of the row in bytes for large rows. - */ - private int overflowRowSize; - - private int columnCount; - - private int memoryData; - - private boolean writtenData; - - private PageDataLeaf(PageDataIndex index, int pageId, Data data) { - super(index, pageId, data); - this.optimizeUpdate = index.getDatabase().getSettings().optimizeUpdate; - } - - /** - * Create a new page. - * - * @param index the index - * @param pageId the page id - * @param parentPageId the parent - * @return the page - */ - static PageDataLeaf create(PageDataIndex index, int pageId, int parentPageId) { - PageDataLeaf p = new PageDataLeaf(index, pageId, index.getPageStore() - .createData()); - index.getPageStore().logUndo(p, null); - p.rows = Row.EMPTY_ARRAY; - p.parentPageId = parentPageId; - p.columnCount = index.getTable().getColumns().length; - p.writeHead(); - p.start = p.data.length(); - return p; - } - - /** - * Read a data leaf page. - * - * @param index the index - * @param data the data - * @param pageId the page id - * @return the page - */ - public static Page read(PageDataIndex index, Data data, int pageId) { - PageDataLeaf p = new PageDataLeaf(index, pageId, data); - p.read(); - return p; - } - - private void read() { - data.reset(); - int type = data.readByte(); - data.readShortInt(); - this.parentPageId = data.readInt(); - int tableId = data.readVarInt(); - if (tableId != index.getId()) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "page:" + getPos() + " expected table:" + index.getId() + - " got:" + tableId + " type:" + type); - } - columnCount = data.readVarInt(); - entryCount = data.readShortInt(); - offsets = new int[entryCount]; - keys = new long[entryCount]; - rows = new Row[entryCount]; - if (type == Page.TYPE_DATA_LEAF) { - if (entryCount != 1) { - DbException.throwInternalError("entries: " + entryCount); - } - firstOverflowPageId = data.readInt(); - } - for (int i = 0; i < entryCount; i++) { - keys[i] = data.readVarLong(); - offsets[i] = data.readShortInt(); - } - start = data.length(); - written = true; - writtenData = true; - } - - private int getRowLength(Row row) { - int size = 0; - for (int i = 0; i < columnCount; i++) { - size += data.getValueLen(row.getValue(i)); - } - return size; - } - - private int findInsertionPoint(long key) { - int x = find(key); - if (x < entryCount && keys[x] == key) { - throw index.getDuplicateKeyException(String.valueOf(key)); - } - return x; - } - - @Override - int addRowTry(Row row) { - index.getPageStore().logUndo(this, data); - int rowLength = getRowLength(row); - int pageSize = index.getPageStore().getPageSize(); - int last = entryCount == 0 ? pageSize : offsets[entryCount - 1]; - int keyOffsetPairLen = 2 + Data.getVarLongLen(row.getKey()); - if (entryCount > 0 && last - rowLength < start + keyOffsetPairLen) { - int x = findInsertionPoint(row.getKey()); - if (entryCount > 1) { - if (entryCount < 5) { - // required, otherwise the index doesn't work correctly - return entryCount / 2; - } - if (index.isSortedInsertMode()) { - return x < 2 ? 1 : x > entryCount - 1 ? entryCount - 1 : x; - } - // split near the insertion point to better fill pages - // split in half would be: - // return entryCount / 2; - int third = entryCount / 3; - return x < third ? third : x >= 2 * third ? 2 * third : x; - } - return x; - } - index.getPageStore().logUndo(this, data); - int x; - if (entryCount == 0) { - x = 0; - } else { - if (!optimizeUpdate) { - readAllRows(); - } - x = findInsertionPoint(row.getKey()); - } - written = false; - changeCount = index.getPageStore().getChangeCount(); - last = x == 0 ? pageSize : offsets[x - 1]; - int offset = last - rowLength; - start += keyOffsetPairLen; - offsets = insert(offsets, entryCount, x, offset); - add(offsets, x + 1, entryCount + 1, -rowLength); - keys = insert(keys, entryCount, x, row.getKey()); - rows = insert(rows, entryCount, x, row); - entryCount++; - index.getPageStore().update(this); - if (optimizeUpdate) { - if (writtenData && offset >= start) { - byte[] d = data.getBytes(); - int dataStart = offsets[entryCount - 1] + rowLength; - int dataEnd = offsets[x]; - System.arraycopy(d, dataStart, d, dataStart - rowLength, - dataEnd - dataStart + rowLength); - data.setPos(dataEnd); - for (int j = 0; j < columnCount; j++) { - data.writeValue(row.getValue(j)); - } - } - } - if (offset < start) { - writtenData = false; - if (entryCount > 1) { - DbException.throwInternalError(Integer.toString(entryCount)); - } - // need to write the overflow page id - start += 4; - int remaining = rowLength - (pageSize - start); - // fix offset - offset = start; - offsets[x] = offset; - int previous = getPos(); - int dataOffset = pageSize; - int page = index.getPageStore().allocatePage(); - firstOverflowPageId = page; - this.overflowRowSize = pageSize + rowLength; - writeData(); - // free up the space used by the row - Row r = rows[0]; - rowRef = new SoftReference<>(r); - rows[0] = null; - Data all = index.getPageStore().createData(); - all.checkCapacity(data.length()); - all.write(data.getBytes(), 0, data.length()); - data.truncate(index.getPageStore().getPageSize()); - do { - int type, size, next; - if (remaining <= pageSize - PageDataOverflow.START_LAST) { - type = Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST; - size = remaining; - next = 0; - } else { - type = Page.TYPE_DATA_OVERFLOW; - size = pageSize - PageDataOverflow.START_MORE; - next = index.getPageStore().allocatePage(); - } - PageDataOverflow overflow = PageDataOverflow.create(index.getPageStore(), - page, type, previous, next, all, dataOffset, size); - index.getPageStore().update(overflow); - dataOffset += size; - remaining -= size; - previous = page; - page = next; - } while (remaining > 0); - } - if (rowRef == null) { - memoryChange(true, row); - } else { - memoryChange(true, null); - } - return -1; - } - - private void removeRow(int i) { - index.getPageStore().logUndo(this, data); - written = false; - changeCount = index.getPageStore().getChangeCount(); - if (!optimizeUpdate) { - readAllRows(); - } - Row r = getRowAt(i); - if (r != null) { - memoryChange(false, r); - } - entryCount--; - if (entryCount < 0) { - DbException.throwInternalError(Integer.toString(entryCount)); - } - if (firstOverflowPageId != 0) { - start -= 4; - freeOverflow(); - firstOverflowPageId = 0; - overflowRowSize = 0; - rowRef = null; - } - int keyOffsetPairLen = 2 + Data.getVarLongLen(keys[i]); - int startNext = i > 0 ? offsets[i - 1] : index.getPageStore().getPageSize(); - int rowLength = startNext - offsets[i]; - if (optimizeUpdate) { - if (writtenData) { - byte[] d = data.getBytes(); - int dataStart = offsets[entryCount]; - System.arraycopy(d, dataStart, d, dataStart + rowLength, - offsets[i] - dataStart); - Arrays.fill(d, dataStart, dataStart + rowLength, (byte) 0); - } - } else { - int clearStart = offsets[entryCount]; - Arrays.fill(data.getBytes(), clearStart, clearStart + rowLength, (byte) 0); - } - start -= keyOffsetPairLen; - offsets = remove(offsets, entryCount + 1, i); - add(offsets, i, entryCount, rowLength); - keys = remove(keys, entryCount + 1, i); - rows = remove(rows, entryCount + 1, i); - } - - @Override - Cursor find(Session session, long minKey, long maxKey) { - int x = find(minKey); - return new PageDataCursor(this, x, maxKey); - } - - /** - * Get the row at the given index. - * - * @param at the index - * @return the row - */ - Row getRowAt(int at) { - Row r = rows[at]; - if (r == null) { - if (firstOverflowPageId == 0) { - r = readRow(data, offsets[at], columnCount); - } else { - if (rowRef != null) { - r = rowRef.get(); - if (r != null) { - return r; - } - } - PageStore store = index.getPageStore(); - Data buff = store.createData(); - int pageSize = store.getPageSize(); - int offset = offsets[at]; - buff.write(data.getBytes(), offset, pageSize - offset); - int next = firstOverflowPageId; - do { - PageDataOverflow page = index.getPageOverflow(next); - next = page.readInto(buff); - } while (next != 0); - overflowRowSize = pageSize + buff.length(); - r = readRow(buff, 0, columnCount); - } - r.setKey(keys[at]); - if (firstOverflowPageId != 0) { - rowRef = new SoftReference<>(r); - } else { - rows[at] = r; - memoryChange(true, r); - } - } - return r; - } - - int getEntryCount() { - return entryCount; - } - - @Override - PageData split(int splitPoint) { - int newPageId = index.getPageStore().allocatePage(); - PageDataLeaf p2 = PageDataLeaf.create(index, newPageId, parentPageId); - while (splitPoint < entryCount) { - int split = p2.addRowTry(getRowAt(splitPoint)); - if (split != -1) { - DbException.throwInternalError("split " + split); - } - removeRow(splitPoint); - } - return p2; - } - - @Override - long getLastKey() { - // TODO re-use keys, but remove this mechanism - if (entryCount == 0) { - return 0; - } - return getRowAt(entryCount - 1).getKey(); - } - - PageDataLeaf getNextPage() { - if (parentPageId == PageData.ROOT) { - return null; - } - PageDataNode next = (PageDataNode) index.getPage(parentPageId, -1); - return next.getNextPage(keys[entryCount - 1]); - } - - @Override - PageDataLeaf getFirstLeaf() { - return this; - } - - @Override - protected void remapChildren(int old) { - if (firstOverflowPageId == 0) { - return; - } - PageDataOverflow overflow = index.getPageOverflow(firstOverflowPageId); - overflow.setParentPageId(getPos()); - index.getPageStore().update(overflow); - } - - @Override - boolean remove(long key) { - int i = find(key); - if (keys == null || keys[i] != key) { - throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, - index.getSQL(new StringBuilder(), false).append(": ").append(key).append(' ') - .append(keys == null ? -1 : keys[i]).toString()); - } - index.getPageStore().logUndo(this, data); - if (entryCount == 1) { - freeRecursive(); - return true; - } - removeRow(i); - index.getPageStore().update(this); - return false; - } - - @Override - void freeRecursive() { - index.getPageStore().logUndo(this, data); - index.getPageStore().free(getPos()); - freeOverflow(); - } - - private void freeOverflow() { - if (firstOverflowPageId != 0) { - int next = firstOverflowPageId; - do { - PageDataOverflow page = index.getPageOverflow(next); - page.free(); - next = page.getNextOverflow(); - } while (next != 0); - } - } - - @Override - Row getRowWithKey(long key) { - int at = find(key); - return getRowAt(at); - } - - @Override - int getRowCount() { - return entryCount; - } - - @Override - void setRowCountStored(int rowCount) { - // ignore - } - - @Override - long getDiskSpaceUsed() { - return index.getPageStore().getPageSize(); - } - - @Override - public void write() { - writeData(); - index.getPageStore().writePage(getPos(), data); - data.truncate(index.getPageStore().getPageSize()); - } - - private void readAllRows() { - for (int i = 0; i < entryCount; i++) { - getRowAt(i); - } - } - - private void writeHead() { - data.reset(); - int type; - if (firstOverflowPageId == 0) { - type = Page.TYPE_DATA_LEAF | Page.FLAG_LAST; - } else { - type = Page.TYPE_DATA_LEAF; - } - data.writeByte((byte) type); - data.writeShortInt(0); - assert data.length() == START_PARENT; - data.writeInt(parentPageId); - data.writeVarInt(index.getId()); - data.writeVarInt(columnCount); - data.writeShortInt(entryCount); - } - - private void writeData() { - if (written) { - return; - } - if (!optimizeUpdate) { - readAllRows(); - } - writeHead(); - if (firstOverflowPageId != 0) { - data.writeInt(firstOverflowPageId); - data.checkCapacity(overflowRowSize); - } - for (int i = 0; i < entryCount; i++) { - data.writeVarLong(keys[i]); - data.writeShortInt(offsets[i]); - } - if (!writtenData || !optimizeUpdate) { - for (int i = 0; i < entryCount; i++) { - data.setPos(offsets[i]); - Row r = getRowAt(i); - for (int j = 0; j < columnCount; j++) { - data.writeValue(r.getValue(j)); - } - } - writtenData = true; - } - written = true; - } - - @Override - public String toString() { - return "page[" + getPos() + "] data leaf table:" + - index.getId() + " " + index.getTable().getName() + - " entries:" + entryCount + " parent:" + parentPageId + - (firstOverflowPageId == 0 ? "" : " overflow:" + firstOverflowPageId) + - " keys:" + Arrays.toString(keys) + " offsets:" + Arrays.toString(offsets); - } - - @Override - public void moveTo(Session session, int newPos) { - PageStore store = index.getPageStore(); - // load the pages into the cache, to ensure old pages - // are written - if (parentPageId != ROOT) { - store.getPage(parentPageId); - } - store.logUndo(this, data); - PageDataLeaf p2 = PageDataLeaf.create(index, newPos, parentPageId); - readAllRows(); - p2.keys = keys; - p2.overflowRowSize = overflowRowSize; - p2.firstOverflowPageId = firstOverflowPageId; - p2.rowRef = rowRef; - p2.rows = rows; - if (firstOverflowPageId != 0) { - p2.rows[0] = getRowAt(0); - } - p2.entryCount = entryCount; - p2.offsets = offsets; - p2.start = start; - p2.remapChildren(getPos()); - p2.writeData(); - p2.data.truncate(index.getPageStore().getPageSize()); - store.update(p2); - if (parentPageId == ROOT) { - index.setRootPageId(session, newPos); - } else { - PageDataNode p = (PageDataNode) store.getPage(parentPageId); - p.moveChild(getPos(), newPos); - } - store.free(getPos()); - } - - /** - * Set the overflow page id. - * - * @param old the old overflow page id - * @param overflow the new overflow page id - */ - void setOverflow(int old, int overflow) { - if (old != firstOverflowPageId) { - DbException.throwInternalError("move " + this + " " + firstOverflowPageId); - } - index.getPageStore().logUndo(this, data); - firstOverflowPageId = overflow; - if (written) { - changeCount = index.getPageStore().getChangeCount(); - writeHead(); - data.writeInt(firstOverflowPageId); - } - index.getPageStore().update(this); - } - - private void memoryChange(boolean add, Row r) { - int diff = r == null ? 0 : 4 + 8 + Constants.MEMORY_POINTER + r.getMemory(); - memoryData += add ? diff : -diff; - index.memoryChange((Constants.MEMORY_PAGE_DATA + - memoryData + index.getPageStore().getPageSize()) >> 2); - } - - @Override - public boolean isStream() { - return firstOverflowPageId > 0; - } - - /** - * Read a row from the data page at the given position. - * - * @param data the data page - * @param pos the position to read from - * @param columnCount the number of columns - * @return the row - */ - private Row readRow(Data data, int pos, int columnCount) { - Value[] values = new Value[columnCount]; - synchronized (data) { - data.setPos(pos); - for (int i = 0; i < columnCount; i++) { - values[i] = data.readValue(); - } - } - return index.getDatabase().createRow(values, Row.MEMORY_CALCULATE); - } - -} diff --git a/h2/src/main/org/h2/index/PageDataNode.java b/h2/src/main/org/h2/index/PageDataNode.java deleted file mode 100644 index 247ce3b5b5..0000000000 --- a/h2/src/main/org/h2/index/PageDataNode.java +++ /dev/null @@ -1,454 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.util.Arrays; -import org.h2.api.DatabaseEventListener; -import org.h2.api.ErrorCode; -import org.h2.engine.Session; -import org.h2.engine.SysProperties; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.store.Data; -import org.h2.store.Page; -import org.h2.store.PageStore; -import org.h2.util.Utils; - -/** - * A leaf page that contains data of one or multiple rows. Format: - *
            - *
          • page type: byte (0)
          • - *
          • checksum: short (1-2)
          • - *
          • parent page id (0 for root): int (3-6)
          • - *
          • table id: varInt
          • - *
          • count of all children (-1 if not known): int
          • - *
          • entry count: short
          • - *
          • rightmost child page id: int
          • - *
          • entries (child page id: int, key: varLong)
          • - *
          - * The key is the largest key of the respective child, meaning key[0] is the - * largest key of child[0]. - */ -public class PageDataNode extends PageData { - - /** - * The page ids of the children. - */ - private int[] childPageIds; - - private int rowCountStored = UNKNOWN_ROWCOUNT; - - private int rowCount = UNKNOWN_ROWCOUNT; - - /** - * The number of bytes used in the page - */ - private int length; - - private PageDataNode(PageDataIndex index, int pageId, Data data) { - super(index, pageId, data); - } - - /** - * Create a new page. - * - * @param index the index - * @param pageId the page id - * @param parentPageId the parent - * @return the page - */ - static PageDataNode create(PageDataIndex index, int pageId, int parentPageId) { - PageDataNode p = new PageDataNode(index, pageId, - index.getPageStore().createData()); - index.getPageStore().logUndo(p, null); - p.parentPageId = parentPageId; - p.writeHead(); - // 4 bytes for the rightmost child page id - p.length = p.data.length() + 4; - return p; - } - - /** - * Read a data node page. - * - * @param index the index - * @param data the data - * @param pageId the page id - * @return the page - */ - public static Page read(PageDataIndex index, Data data, int pageId) { - PageDataNode p = new PageDataNode(index, pageId, data); - p.read(); - return p; - } - - private void read() { - data.reset(); - data.readByte(); - data.readShortInt(); - this.parentPageId = data.readInt(); - int indexId = data.readVarInt(); - if (indexId != index.getId()) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "page:" + getPos() + " expected index:" + index.getId() + - "got:" + indexId); - } - rowCount = rowCountStored = data.readInt(); - entryCount = data.readShortInt(); - childPageIds = new int[entryCount + 1]; - childPageIds[entryCount] = data.readInt(); - keys = Utils.newLongArray(entryCount); - for (int i = 0; i < entryCount; i++) { - childPageIds[i] = data.readInt(); - keys[i] = data.readVarLong(); - } - length = data.length(); - check(); - written = true; - } - - private void addChild(int x, int childPageId, long key) { - index.getPageStore().logUndo(this, data); - written = false; - changeCount = index.getPageStore().getChangeCount(); - childPageIds = insert(childPageIds, entryCount + 1, x + 1, childPageId); - keys = insert(keys, entryCount, x, key); - entryCount++; - length += 4 + Data.getVarLongLen(key); - } - - @Override - int addRowTry(Row row) { - index.getPageStore().logUndo(this, data); - int keyOffsetPairLen = 4 + Data.getVarLongLen(row.getKey()); - while (true) { - int x = find(row.getKey()); - PageData page = index.getPage(childPageIds[x], getPos()); - int splitPoint = page.addRowTry(row); - if (splitPoint == -1) { - break; - } - if (length + keyOffsetPairLen > index.getPageStore().getPageSize()) { - return entryCount / 2; - } - long pivot = splitPoint == 0 ? row.getKey() : page.getKey(splitPoint - 1); - PageData page2 = page.split(splitPoint); - index.getPageStore().update(page); - index.getPageStore().update(page2); - addChild(x, page2.getPos(), pivot); - index.getPageStore().update(this); - } - updateRowCount(1); - return -1; - } - - private void updateRowCount(int offset) { - if (rowCount != UNKNOWN_ROWCOUNT) { - rowCount += offset; - } - if (rowCountStored != UNKNOWN_ROWCOUNT) { - rowCountStored = UNKNOWN_ROWCOUNT; - index.getPageStore().logUndo(this, data); - if (written) { - writeHead(); - } - index.getPageStore().update(this); - } - } - - @Override - Cursor find(Session session, long minKey, long maxKey) { - int x = find(minKey); - int child = childPageIds[x]; - return index.getPage(child, getPos()).find(session, minKey, maxKey); - } - - @Override - PageData split(int splitPoint) { - int newPageId = index.getPageStore().allocatePage(); - PageDataNode p2 = PageDataNode.create(index, newPageId, parentPageId); - int firstChild = childPageIds[splitPoint]; - while (splitPoint < entryCount) { - p2.addChild(p2.entryCount, childPageIds[splitPoint + 1], keys[splitPoint]); - removeChild(splitPoint); - } - int lastChild = childPageIds[splitPoint - 1]; - removeChild(splitPoint - 1); - childPageIds[splitPoint - 1] = lastChild; - p2.childPageIds[0] = firstChild; - p2.remapChildren(getPos()); - return p2; - } - - @Override - protected void remapChildren(int old) { - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - PageData p = index.getPage(child, old); - p.setParentPageId(getPos()); - index.getPageStore().update(p); - } - } - - /** - * Initialize the page. - * - * @param page1 the first child page - * @param pivot the pivot key - * @param page2 the last child page - */ - void init(PageData page1, long pivot, PageData page2) { - entryCount = 1; - childPageIds = new int[] { page1.getPos(), page2.getPos() }; - keys = new long[] { pivot }; - length += 4 + Data.getVarLongLen(pivot); - check(); - } - - @Override - long getLastKey() { - return index.getPage(childPageIds[entryCount], getPos()).getLastKey(); - } - - /** - * Get the next leaf page. - * - * @param key the last key of the current page - * @return the next leaf page - */ - PageDataLeaf getNextPage(long key) { - int i = find(key) + 1; - if (i > entryCount) { - if (parentPageId == PageData.ROOT) { - return null; - } - PageDataNode next = (PageDataNode) index.getPage(parentPageId, -1); - return next.getNextPage(key); - } - PageData page = index.getPage(childPageIds[i], getPos()); - return page.getFirstLeaf(); - } - - @Override - PageDataLeaf getFirstLeaf() { - int child = childPageIds[0]; - return index.getPage(child, getPos()).getFirstLeaf(); - } - - @Override - boolean remove(long key) { - int at = find(key); - // merge is not implemented to allow concurrent usage - // TODO maybe implement merge - PageData page = index.getPage(childPageIds[at], getPos()); - boolean empty = page.remove(key); - index.getPageStore().logUndo(this, data); - updateRowCount(-1); - if (!empty) { - // the first row didn't change - nothing to do - return false; - } - // this child is now empty - index.getPageStore().free(page.getPos()); - if (entryCount < 1) { - // no more children - this page is empty as well - return true; - } - removeChild(at); - index.getPageStore().update(this); - return false; - } - - @Override - void freeRecursive() { - index.getPageStore().logUndo(this, data); - index.getPageStore().free(getPos()); - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - index.getPage(child, getPos()).freeRecursive(); - } - } - - @Override - Row getRowWithKey(long key) { - int at = find(key); - PageData page = index.getPage(childPageIds[at], getPos()); - return page.getRowWithKey(key); - } - - @Override - int getRowCount() { - if (rowCount == UNKNOWN_ROWCOUNT) { - int count = 0; - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - PageData page = index.getPage(child, getPos()); - if (getPos() == page.getPos()) { - throw DbException.throwInternalError("Page is its own child: " + getPos()); - } - count += page.getRowCount(); - index.getDatabase().setProgress(DatabaseEventListener.STATE_SCAN_FILE, - index.getTable() + "." + index.getName(), count, Integer.MAX_VALUE); - } - rowCount = count; - } - return rowCount; - } - - @Override - long getDiskSpaceUsed() { - long count = 0; - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - PageData page = index.getPage(child, getPos()); - if (getPos() == page.getPos()) { - throw DbException.throwInternalError("Page is its own child: " + getPos()); - } - count += page.getDiskSpaceUsed(); - index.getDatabase().setProgress(DatabaseEventListener.STATE_SCAN_FILE, - index.getTable() + "." + index.getName(), - (int) (count >> 16), Integer.MAX_VALUE); - } - return count; - } - - @Override - void setRowCountStored(int rowCount) { - this.rowCount = rowCount; - if (rowCountStored != rowCount) { - rowCountStored = rowCount; - index.getPageStore().logUndo(this, data); - if (written) { - changeCount = index.getPageStore().getChangeCount(); - writeHead(); - } - index.getPageStore().update(this); - } - } - - private void check() { - if (SysProperties.CHECK) { - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - if (child == 0) { - DbException.throwInternalError(); - } - } - } - } - - @Override - public void write() { - writeData(); - index.getPageStore().writePage(getPos(), data); - } - - private void writeHead() { - data.reset(); - data.writeByte((byte) Page.TYPE_DATA_NODE); - data.writeShortInt(0); - assert data.length() == START_PARENT; - data.writeInt(parentPageId); - data.writeVarInt(index.getId()); - data.writeInt(rowCountStored); - data.writeShortInt(entryCount); - } - - private void writeData() { - if (written) { - return; - } - check(); - writeHead(); - data.writeInt(childPageIds[entryCount]); - for (int i = 0; i < entryCount; i++) { - data.writeInt(childPageIds[i]); - data.writeVarLong(keys[i]); - } - if (length != data.length()) { - DbException.throwInternalError("expected pos: " + length + - " got: " + data.length()); - } - written = true; - } - - private void removeChild(int i) { - index.getPageStore().logUndo(this, data); - written = false; - changeCount = index.getPageStore().getChangeCount(); - int removedKeyIndex = i < entryCount ? i : i - 1; - entryCount--; - length -= 4 + Data.getVarLongLen(keys[removedKeyIndex]); - if (entryCount < 0) { - DbException.throwInternalError(Integer.toString(entryCount)); - } - keys = remove(keys, entryCount + 1, removedKeyIndex); - childPageIds = remove(childPageIds, entryCount + 2, i); - } - - @Override - public String toString() { - return "page[" + getPos() + "] data node table:" + index.getId() + - " entries:" + entryCount + " " + Arrays.toString(childPageIds); - } - - @Override - public void moveTo(Session session, int newPos) { - PageStore store = index.getPageStore(); - // load the pages into the cache, to ensure old pages - // are written - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - store.getPage(child); - } - if (parentPageId != ROOT) { - store.getPage(parentPageId); - } - store.logUndo(this, data); - PageDataNode p2 = PageDataNode.create(index, newPos, parentPageId); - p2.rowCountStored = rowCountStored; - p2.rowCount = rowCount; - p2.childPageIds = childPageIds; - p2.keys = keys; - p2.entryCount = entryCount; - p2.length = length; - store.update(p2); - if (parentPageId == ROOT) { - index.setRootPageId(session, newPos); - } else { - PageDataNode p = (PageDataNode) store.getPage(parentPageId); - p.moveChild(getPos(), newPos); - } - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - PageData p = (PageData) store.getPage(child); - p.setParentPageId(newPos); - store.update(p); - } - store.free(getPos()); - } - - /** - * One of the children has moved to another page. - * - * @param oldPos the old position - * @param newPos the new position - */ - void moveChild(int oldPos, int newPos) { - for (int i = 0; i < entryCount + 1; i++) { - if (childPageIds[i] == oldPos) { - index.getPageStore().logUndo(this, data); - written = false; - changeCount = index.getPageStore().getChangeCount(); - childPageIds[i] = newPos; - index.getPageStore().update(this); - return; - } - } - throw DbException.throwInternalError(oldPos + " " + newPos); - } - -} diff --git a/h2/src/main/org/h2/index/PageDataOverflow.java b/h2/src/main/org/h2/index/PageDataOverflow.java deleted file mode 100644 index f384efdd27..0000000000 --- a/h2/src/main/org/h2/index/PageDataOverflow.java +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.api.ErrorCode; -import org.h2.engine.Constants; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.store.Data; -import org.h2.store.Page; -import org.h2.store.PageStore; - -/** - * Overflow data for a leaf page. Format: - *
            - *
          • page type: byte (0)
          • - *
          • checksum: short (1-2)
          • - *
          • parent page id (0 for root): int (3-6)
          • - *
          • more data: next overflow page id: int (7-10)
          • - *
          • last remaining size: short (7-8)
          • - *
          • data (11-/9-)
          • - *
          - */ -public class PageDataOverflow extends Page { - - /** - * The start of the data in the last overflow page. - */ - static final int START_LAST = 9; - - /** - * The start of the data in a overflow page that is not the last one. - */ - static final int START_MORE = 11; - - private static final int START_NEXT_OVERFLOW = 7; - - /** - * The page store. - */ - private final PageStore store; - - /** - * The page type. - */ - private int type; - - /** - * The parent page (overflow or leaf). - */ - private int parentPageId; - - /** - * The next overflow page, or 0. - */ - private int nextPage; - - private final Data data; - - private int start; - private int size; - - /** - * Create an object from the given data page. - * - * @param store the page store - * @param pageId the page id - * @param data the data page - */ - private PageDataOverflow(PageStore store, int pageId, Data data) { - this.store = store; - setPos(pageId); - this.data = data; - } - - /** - * Read an overflow page. - * - * @param store the page store - * @param data the data - * @param pageId the page id - * @return the page - */ - public static Page read(PageStore store, Data data, int pageId) { - PageDataOverflow p = new PageDataOverflow(store, pageId, data); - p.read(); - return p; - } - - /** - * Create a new overflow page. - * - * @param store the page store - * @param page the page id - * @param type the page type - * @param parentPageId the parent page id - * @param next the next page or 0 - * @param all the data - * @param offset the offset within the data - * @param size the number of bytes - * @return the page - */ - static PageDataOverflow create(PageStore store, int page, - int type, int parentPageId, int next, - Data all, int offset, int size) { - Data data = store.createData(); - PageDataOverflow p = new PageDataOverflow(store, page, data); - store.logUndo(p, null); - data.writeByte((byte) type); - data.writeShortInt(0); - data.writeInt(parentPageId); - if (type == Page.TYPE_DATA_OVERFLOW) { - data.writeInt(next); - } else { - data.writeShortInt(size); - } - p.start = data.length(); - data.write(all.getBytes(), offset, size); - p.type = type; - p.parentPageId = parentPageId; - p.nextPage = next; - p.size = size; - return p; - } - - /** - * Read the page. - */ - private void read() { - data.reset(); - type = data.readByte(); - data.readShortInt(); - parentPageId = data.readInt(); - if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) { - size = data.readShortInt(); - nextPage = 0; - } else if (type == Page.TYPE_DATA_OVERFLOW) { - nextPage = data.readInt(); - size = store.getPageSize() - data.length(); - } else { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "page:" + - getPos() + " type:" + type); - } - start = data.length(); - } - - /** - * Read the data into a target buffer. - * - * @param target the target data page - * @return the next page, or 0 if no next page - */ - int readInto(Data target) { - target.checkCapacity(size); - if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) { - target.write(data.getBytes(), START_LAST, size); - return 0; - } - target.write(data.getBytes(), START_MORE, size); - return nextPage; - } - - int getNextOverflow() { - return nextPage; - } - - private void writeHead() { - data.writeByte((byte) type); - data.writeShortInt(0); - data.writeInt(parentPageId); - } - - @Override - public void write() { - writeData(); - store.writePage(getPos(), data); - } - - - private void writeData() { - data.reset(); - writeHead(); - if (type == Page.TYPE_DATA_OVERFLOW) { - data.writeInt(nextPage); - } else { - data.writeShortInt(size); - } - } - - - @Override - public String toString() { - return "page[" + getPos() + "] data leaf overflow parent:" + - parentPageId + " next:" + nextPage; - } - - /** - * Get the estimated memory size. - * - * @return number of double words (4 bytes) - */ - @Override - public int getMemory() { - return (Constants.MEMORY_PAGE_DATA_OVERFLOW + store.getPageSize()) >> 2; - } - - void setParentPageId(int parent) { - store.logUndo(this, data); - this.parentPageId = parent; - } - - @Override - public void moveTo(Session session, int newPos) { - // load the pages into the cache, to ensure old pages - // are written - Page parent = store.getPage(parentPageId); - if (parent == null) { - throw DbException.throwInternalError(); - } - PageDataOverflow next = null; - if (nextPage != 0) { - next = (PageDataOverflow) store.getPage(nextPage); - } - store.logUndo(this, data); - PageDataOverflow p2 = PageDataOverflow.create(store, newPos, type, - parentPageId, nextPage, data, start, size); - store.update(p2); - if (next != null) { - next.setParentPageId(newPos); - store.update(next); - } - if (parent instanceof PageDataOverflow) { - PageDataOverflow p1 = (PageDataOverflow) parent; - p1.setNext(getPos(), newPos); - } else { - PageDataLeaf p1 = (PageDataLeaf) parent; - p1.setOverflow(getPos(), newPos); - } - store.update(parent); - store.free(getPos()); - } - - private void setNext(int old, int nextPage) { - if (old != this.nextPage) { - DbException.throwInternalError("move " + this + " " + nextPage); - } - store.logUndo(this, data); - this.nextPage = nextPage; - data.setInt(START_NEXT_OVERFLOW, nextPage); - } - - /** - * Free this page. - */ - void free() { - store.logUndo(this, data); - store.free(getPos()); - } - - @Override - public boolean canRemove() { - return true; - } - - @Override - public boolean isStream() { - return true; - } - -} diff --git a/h2/src/main/org/h2/index/PageDelegateIndex.java b/h2/src/main/org/h2/index/PageDelegateIndex.java deleted file mode 100644 index 1c57a47d08..0000000000 --- a/h2/src/main/org/h2/index/PageDelegateIndex.java +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.command.dml.AllColumnsForPlan; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.store.PageStore; -import org.h2.table.Column; -import org.h2.table.IndexColumn; -import org.h2.table.PageStoreTable; -import org.h2.table.TableFilter; - -/** - * An index that delegates indexing to the page data index. - */ -public class PageDelegateIndex extends PageIndex { - - private final PageDataIndex mainIndex; - - public PageDelegateIndex(PageStoreTable table, int id, String name, - IndexType indexType, PageDataIndex mainIndex, boolean create, - Session session) { - super(table, id, name, - IndexColumn.wrap(new Column[] { table.getColumn(mainIndex.getMainIndexColumn()) }), - indexType); - this.mainIndex = mainIndex; - if (!database.isPersistent() || id < 0) { - throw DbException.throwInternalError(name); - } - PageStore store = database.getPageStore(); - store.addIndex(this); - if (create) { - store.addMeta(this, session); - } - } - - @Override - public void add(Session session, Row row) { - // nothing to do - } - - @Override - public boolean canFindNext() { - return false; - } - - @Override - public boolean canGetFirstOrLast() { - return true; - } - - @Override - public void close(Session session) { - // nothing to do - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - long min = mainIndex.getKey(first, Long.MIN_VALUE, Long.MIN_VALUE); - // ifNull is MIN_VALUE as well, because the column is never NULL - // so avoid returning all rows (returning one row is OK) - long max = mainIndex.getKey(last, Long.MAX_VALUE, Long.MIN_VALUE); - return mainIndex.find(session, min, max); - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - Cursor cursor; - if (first) { - cursor = mainIndex.find(session, Long.MIN_VALUE, Long.MAX_VALUE); - } else { - long x = mainIndex.getLastKey(); - cursor = mainIndex.find(session, x, x); - } - cursor.next(); - return cursor; - } - - @Override - public Cursor findNext(Session session, SearchRow higherThan, SearchRow last) { - throw DbException.throwInternalError(toString()); - } - - @Override - public int getColumnIndex(Column col) { - if (col.getColumnId() == mainIndex.getMainIndexColumn()) { - return 0; - } - return -1; - } - - @Override - public boolean isFirstColumn(Column column) { - return getColumnIndex(column) == 0; - } - - @Override - public double getCost(Session session, int[] masks, - TableFilter[] filters, int filter, SortOrder sortOrder, - AllColumnsForPlan allColumnsSet) { - return 10 * getCostRangeIndex(masks, mainIndex.getRowCount(session), - filters, filter, sortOrder, false, allColumnsSet); - } - - @Override - public boolean needRebuild() { - return false; - } - - @Override - public void remove(Session session, Row row) { - // nothing to do - } - - @Override - public void update(Session session, Row oldRow, Row newRow) { - // nothing to do - } - - @Override - public void remove(Session session) { - mainIndex.setMainIndexColumn(-1); - session.getDatabase().getPageStore().removeMeta(this, session); - } - - @Override - public void truncate(Session session) { - // nothing to do - } - - @Override - public void checkRename() { - // ok - } - - @Override - public long getRowCount(Session session) { - return mainIndex.getRowCount(session); - } - - @Override - public long getRowCountApproximation() { - return mainIndex.getRowCountApproximation(); - } - - @Override - public long getDiskSpaceUsed() { - return mainIndex.getDiskSpaceUsed(); - } - - @Override - public void writeRowCount() { - // ignore - } - -} diff --git a/h2/src/main/org/h2/index/PageIndex.java b/h2/src/main/org/h2/index/PageIndex.java deleted file mode 100644 index 327f2df759..0000000000 --- a/h2/src/main/org/h2/index/PageIndex.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.table.IndexColumn; -import org.h2.table.Table; - -/** - * A page store index. - */ -public abstract class PageIndex extends BaseIndex { - - /** - * The root page of this index. - */ - protected int rootPageId; - - private boolean sortedInsertMode; - - /** - * Initialize the page store index. - * - * @param newTable the table - * @param id the object id - * @param name the index name - * @param newIndexColumns the columns that are indexed or null if this is - * not yet known - * @param newIndexType the index type - */ - protected PageIndex(Table newTable, int id, String name, IndexColumn[] newIndexColumns, IndexType newIndexType) { - super(newTable, id, name, newIndexColumns, newIndexType); - } - - /** - * Get the root page of this index. - * - * @return the root page id - */ - public int getRootPageId() { - return rootPageId; - } - - /** - * Write back the row count if it has changed. - */ - public abstract void writeRowCount(); - - @Override - public void setSortedInsertMode(boolean sortedInsertMode) { - this.sortedInsertMode = sortedInsertMode; - } - - boolean isSortedInsertMode() { - return sortedInsertMode; - } - -} diff --git a/h2/src/main/org/h2/index/RangeCursor.java b/h2/src/main/org/h2/index/RangeCursor.java index ce3e8159f2..e51e1d0406 100644 --- a/h2/src/main/org/h2/index/RangeCursor.java +++ b/h2/src/main/org/h2/index/RangeCursor.java @@ -1,34 +1,27 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; -import org.h2.engine.Session; import org.h2.message.DbException; import org.h2.result.Row; import org.h2.result.SearchRow; import org.h2.value.Value; -import org.h2.value.ValueLong; +import org.h2.value.ValueBigint; /** * The cursor implementation for the range index. */ class RangeCursor implements Cursor { - private final Session session; private boolean beforeFirst; private long current; private Row currentRow; private final long start, end, step; - RangeCursor(Session session, long start, long end) { - this(session, start, end, 1); - } - - RangeCursor(Session session, long start, long end, long step) { - this.session = session; + RangeCursor(long start, long end, long step) { this.start = start; this.end = end; this.step = step; @@ -53,13 +46,13 @@ public boolean next() { } else { current += step; } - currentRow = session.createRow(new Value[]{ValueLong.get(current)}, 1); + currentRow = Row.get(new Value[]{ValueBigint.get(current)}, 1); return step > 0 ? current <= end : current >= end; } @Override public boolean previous() { - throw DbException.throwInternalError(toString()); + throw DbException.getInternalError(toString()); } } diff --git a/h2/src/main/org/h2/index/RangeIndex.java b/h2/src/main/org/h2/index/RangeIndex.java index 939e1ca422..30f3bab70b 100644 --- a/h2/src/main/org/h2/index/RangeIndex.java +++ b/h2/src/main/org/h2/index/RangeIndex.java @@ -1,12 +1,13 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; -import org.h2.command.dml.AllColumnsForPlan; -import org.h2.engine.Session; +import org.h2.api.ErrorCode; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.result.Row; import org.h2.result.SearchRow; @@ -14,41 +15,30 @@ import org.h2.table.IndexColumn; import org.h2.table.RangeTable; import org.h2.table.TableFilter; +import org.h2.value.Value; +import org.h2.value.ValueBigint; /** * An index for the SYSTEM_RANGE table. * This index can only scan through all rows, search is not supported. */ -public class RangeIndex extends BaseIndex { +public class RangeIndex extends VirtualTableIndex { private final RangeTable rangeTable; public RangeIndex(RangeTable table, IndexColumn[] columns) { - super(table, 0, "RANGE_INDEX", columns, - IndexType.createNonUnique(true)); + super(table, "RANGE_INDEX", columns); this.rangeTable = table; } @Override - public void close(Session session) { - // nothing to do - } - - @Override - public void add(Session session, Row row) { - throw DbException.getUnsupportedException("SYSTEM_RANGE"); - } - - @Override - public void remove(Session session, Row row) { - throw DbException.getUnsupportedException("SYSTEM_RANGE"); - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { long min = rangeTable.getMin(session); long max = rangeTable.getMax(session); long step = rangeTable.getStep(session); + if (step == 0L) { + throw DbException.get(ErrorCode.STEP_SIZE_MUST_NOT_BE_ZERO); + } if (first != null) { try { long v = first.getValue(0).getLong(); @@ -77,14 +67,14 @@ public Cursor find(Session session, SearchRow first, SearchRow last) { // error when converting the value - ignore } } - return new RangeCursor(session, min, max, step); + return new RangeCursor(min, max, step); } @Override - public double getCost(Session session, int[] masks, + public double getCost(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, AllColumnsForPlan allColumnsSet) { - return 1; + return 1d; } @Override @@ -92,49 +82,26 @@ public String getCreateSQL() { return null; } - @Override - public void remove(Session session) { - throw DbException.getUnsupportedException("SYSTEM_RANGE"); - } - - @Override - public void truncate(Session session) { - throw DbException.getUnsupportedException("SYSTEM_RANGE"); - } - - @Override - public boolean needRebuild() { - return false; - } - - @Override - public void checkRename() { - throw DbException.getUnsupportedException("SYSTEM_RANGE"); - } - @Override public boolean canGetFirstOrLast() { return true; } @Override - public Cursor findFirstOrLast(Session session, boolean first) { - long pos = first ? rangeTable.getMin(session) : rangeTable.getMax(session); - return new RangeCursor(session, pos, pos); - } - - @Override - public long getRowCount(Session session) { - return rangeTable.getRowCount(session); + public Cursor findFirstOrLast(SessionLocal session, boolean first) { + long min = rangeTable.getMin(session); + long max = rangeTable.getMax(session); + long step = rangeTable.getStep(session); + if (step == 0L) { + throw DbException.get(ErrorCode.STEP_SIZE_MUST_NOT_BE_ZERO); + } + return new SingleRowCursor((step > 0 ? min <= max : min >= max) + ? Row.get(new Value[]{ ValueBigint.get(first ^ min >= max ? min : max) }, 1) : null); } @Override - public long getRowCountApproximation() { - return rangeTable.getRowCountApproximation(); + public String getPlanSQL() { + return "range index"; } - @Override - public long getDiskSpaceUsed() { - return 0; - } } diff --git a/h2/src/main/org/h2/index/ScanCursor.java b/h2/src/main/org/h2/index/ScanCursor.java deleted file mode 100644 index 2ead41e14b..0000000000 --- a/h2/src/main/org/h2/index/ScanCursor.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; - -/** - * The cursor implementation for the scan index. - */ -public class ScanCursor implements Cursor { - private final ScanIndex scan; - private Row row; - - ScanCursor(ScanIndex scan) { - this.scan = scan; - row = null; - } - - @Override - public Row get() { - return row; - } - - @Override - public SearchRow getSearchRow() { - return row; - } - - @Override - public boolean next() { - row = scan.getNextRow(row); - return row != null; - } - - @Override - public boolean previous() { - throw DbException.throwInternalError(toString()); - } - -} diff --git a/h2/src/main/org/h2/index/ScanIndex.java b/h2/src/main/org/h2/index/ScanIndex.java deleted file mode 100644 index 09a3eaddd1..0000000000 --- a/h2/src/main/org/h2/index/ScanIndex.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.util.ArrayList; - -import org.h2.api.ErrorCode; -import org.h2.command.dml.AllColumnsForPlan; -import org.h2.engine.Constants; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.table.Column; -import org.h2.table.IndexColumn; -import org.h2.table.PageStoreTable; -import org.h2.table.TableFilter; -import org.h2.util.Utils; - -/** - * The scan index is not really an 'index' in the strict sense, because it can - * not be used for direct lookup. It can only be used to iterate over all rows - * of a table. Each regular table has one such object, even if no primary key or - * indexes are defined. - */ -public class ScanIndex extends BaseIndex { - private long firstFree = -1; - private ArrayList rows = Utils.newSmallArrayList(); - private final PageStoreTable tableData; - private long rowCount; - - public ScanIndex(PageStoreTable table, int id, IndexColumn[] columns, - IndexType indexType) { - super(table, id, table.getName() + "_DATA", columns, indexType); - tableData = table; - } - - @Override - public void remove(Session session) { - truncate(session); - } - - @Override - public void truncate(Session session) { - rows = Utils.newSmallArrayList(); - firstFree = -1; - if (tableData.getContainsLargeObject() && tableData.isPersistData()) { - database.getLobStorage().removeAllForTable(table.getId()); - } - tableData.setRowCount(0); - rowCount = 0; - } - - @Override - public String getCreateSQL() { - return null; - } - - @Override - public void close(Session session) { - // nothing to do - } - - @Override - public Row getRow(Session session, long key) { - return rows.get((int) key); - } - - @Override - public void add(Session session, Row row) { - // in-memory - if (firstFree == -1) { - int key = rows.size(); - row.setKey(key); - rows.add(row); - } else { - long key = firstFree; - Row free = rows.get((int) key); - firstFree = free.getKey(); - row.setKey(key); - rows.set((int) key, row); - } - row.setDeleted(false); - rowCount++; - } - - @Override - public void remove(Session session, Row row) { - // in-memory - if (rowCount == 1) { - rows = Utils.newSmallArrayList(); - firstFree = -1; - } else { - Row free = session.createRow(null, 1); - free.setKey(firstFree); - long key = row.getKey(); - if (rows.size() <= key) { - throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, - rows.size() + ": " + key); - } - rows.set((int) key, free); - firstFree = key; - } - rowCount--; - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - return new ScanCursor(this); - } - - @Override - public double getCost(Session session, int[] masks, - TableFilter[] filters, int filter, SortOrder sortOrder, - AllColumnsForPlan allColumnsSet) { - return tableData.getRowCountApproximation() + Constants.COST_ROW_OFFSET; - } - - @Override - public long getRowCount(Session session) { - return rowCount; - } - - /** - * Get the next row that is stored after this row. - * - * @param row the current row or null to start the scan - * @return the next row or null if there are no more rows - */ - Row getNextRow(Row row) { - long key; - if (row == null) { - key = -1; - } else { - key = row.getKey(); - } - while (true) { - key++; - if (key >= rows.size()) { - return null; - } - row = rows.get((int) key); - if (!row.isEmpty()) { - return row; - } - } - } - - @Override - public int getColumnIndex(Column col) { - // the scan index cannot use any columns - return -1; - } - - @Override - public boolean isFirstColumn(Column column) { - return false; - } - - @Override - public void checkRename() { - throw DbException.getUnsupportedException("SCAN"); - } - - @Override - public boolean needRebuild() { - return false; - } - - @Override - public boolean canGetFirstOrLast() { - return false; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - throw DbException.getUnsupportedException("SCAN"); - } - - @Override - public long getRowCountApproximation() { - return rowCount; - } - - @Override - public long getDiskSpaceUsed() { - return 0; - } - - @Override - public String getPlanSQL() { - return table.getSQL(new StringBuilder(), false).append(".tableScan").toString(); - } - -} diff --git a/h2/src/main/org/h2/index/SingleRowCursor.java b/h2/src/main/org/h2/index/SingleRowCursor.java index 8de04db619..1ef602b207 100644 --- a/h2/src/main/org/h2/index/SingleRowCursor.java +++ b/h2/src/main/org/h2/index/SingleRowCursor.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; @@ -47,7 +47,7 @@ public boolean next() { @Override public boolean previous() { - throw DbException.throwInternalError(toString()); + throw DbException.getInternalError(toString()); } } diff --git a/h2/src/main/org/h2/index/SpatialIndex.java b/h2/src/main/org/h2/index/SpatialIndex.java index 4f4c3caa1b..1494d36cbe 100644 --- a/h2/src/main/org/h2/index/SpatialIndex.java +++ b/h2/src/main/org/h2/index/SpatialIndex.java @@ -1,32 +1,30 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; +import org.h2.engine.SessionLocal; import org.h2.result.SearchRow; -import org.h2.table.TableFilter; /** * A spatial index. Spatial indexes are used to speed up searching * spatial/geometric data. */ -public interface SpatialIndex extends Index { +public interface SpatialIndex { /** * Find a row or a list of rows and create a cursor to iterate over the * result. * - * @param filter the table filter (which possibly knows about additional - * conditions) + * @param session the session * @param first the lower bound * @param last the upper bound * @param intersection the geometry which values should intersect with, or * null for anything * @return the cursor to iterate over the results */ - Cursor findByGeometry(TableFilter filter, SearchRow first, SearchRow last, - SearchRow intersection); + Cursor findByGeometry(SessionLocal session, SearchRow first, SearchRow last, SearchRow intersection); } diff --git a/h2/src/main/org/h2/index/SpatialTreeIndex.java b/h2/src/main/org/h2/index/SpatialTreeIndex.java deleted file mode 100644 index 58b9769118..0000000000 --- a/h2/src/main/org/h2/index/SpatialTreeIndex.java +++ /dev/null @@ -1,308 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import static org.h2.util.geometry.GeometryUtils.MAX_X; -import static org.h2.util.geometry.GeometryUtils.MAX_Y; -import static org.h2.util.geometry.GeometryUtils.MIN_X; -import static org.h2.util.geometry.GeometryUtils.MIN_Y; - -import java.util.Iterator; -import org.h2.command.dml.AllColumnsForPlan; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.mvstore.MVStore; -import org.h2.mvstore.db.MVTableEngine; -import org.h2.mvstore.rtree.MVRTreeMap; -import org.h2.mvstore.rtree.SpatialKey; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.table.Column; -import org.h2.table.IndexColumn; -import org.h2.table.Table; -import org.h2.table.TableFilter; -import org.h2.value.Value; -import org.h2.value.ValueGeometry; -import org.h2.value.ValueNull; - -/** - * This is an index based on a MVR-TreeMap. - * - * @author Thomas Mueller - * @author Noel Grandin - * @author Nicolas Fortin, Atelier SIG, IRSTV FR CNRS 24888 - */ -public class SpatialTreeIndex extends BaseIndex implements SpatialIndex { - - private static final String MAP_PREFIX = "RTREE_"; - - private final MVRTreeMap treeMap; - private final MVStore store; - - private boolean closed; - private boolean needRebuild; - - /** - * Constructor. - * - * @param table the table instance - * @param id the index id - * @param indexName the index name - * @param columns the indexed columns (only one geometry column allowed) - * @param persistent whether the index should be persisted - * @param indexType the index type (only spatial index) - * @param create whether to create a new index - * @param session the session. - */ - public SpatialTreeIndex(Table table, int id, String indexName, - IndexColumn[] columns, IndexType indexType, boolean persistent, - boolean create, Session session) { - super(table, id, indexName, columns, indexType); - if (indexType.isUnique()) { - throw DbException.getUnsupportedException("not unique"); - } - if (!persistent && !create) { - throw DbException.getUnsupportedException( - "Non persistent index called with create==false"); - } - if (columns.length > 1) { - throw DbException.getUnsupportedException( - "can only do one column"); - } - if ((columns[0].sortType & SortOrder.DESCENDING) != 0) { - throw DbException.getUnsupportedException( - "cannot do descending"); - } - if ((columns[0].sortType & SortOrder.NULLS_FIRST) != 0) { - throw DbException.getUnsupportedException( - "cannot do nulls first"); - } - if ((columns[0].sortType & SortOrder.NULLS_LAST) != 0) { - throw DbException.getUnsupportedException( - "cannot do nulls last"); - } - this.needRebuild = create; - if (!database.isStarting()) { - if (columns[0].column.getType().getValueType() != Value.GEOMETRY) { - throw DbException.getUnsupportedException( - "spatial index on non-geometry column, " + - columns[0].column.getCreateSQL()); - } - } - if (!persistent) { - // Index in memory - store = MVStore.open(null); - treeMap = store.openMap("spatialIndex", - new MVRTreeMap.Builder()); - } else { - if (id < 0) { - throw DbException.getUnsupportedException( - "Persistent index with id<0"); - } - MVTableEngine.init(session.getDatabase()); - store = session.getDatabase().getStore().getMvStore(); - // Called after CREATE SPATIAL INDEX or - // by PageStore.addMeta - treeMap = store.openMap(MAP_PREFIX + getId(), - new MVRTreeMap.Builder()); - if (treeMap.isEmpty()) { - needRebuild = true; - } - } - } - - @Override - public void close(Session session) { - store.close(); - closed = true; - } - - @Override - public void add(Session session, Row row) { - if (closed) { - throw DbException.throwInternalError(); - } - treeMap.add(getKey(row), row.getKey()); - } - - private SpatialKey getKey(SearchRow row) { - if (row == null) { - return null; - } - Value v = row.getValue(columnIds[0]); - double[] env; - if (v == ValueNull.INSTANCE || - (env = ((ValueGeometry) v.convertTo(Value.GEOMETRY)).getEnvelopeNoCopy()) == null) { - return new SpatialKey(row.getKey()); - } - return new SpatialKey(row.getKey(), - (float) env[MIN_X], (float) env[MAX_X], (float) env[MIN_Y], (float) env[MAX_Y]); - } - - @Override - public void remove(Session session, Row row) { - if (closed) { - throw DbException.throwInternalError(); - } - if (!treeMap.remove(getKey(row), row.getKey())) { - throw DbException.throwInternalError("row not found"); - } - } - - @Override - public Cursor find(TableFilter filter, SearchRow first, SearchRow last) { - return find(filter.getSession()); - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - return find(session); - } - - private Cursor find(Session session) { - return new SpatialCursor(treeMap.keySet().iterator(), table, session); - } - - @Override - public Cursor findByGeometry(TableFilter filter, SearchRow first, - SearchRow last, SearchRow intersection) { - if (intersection == null) { - return find(filter.getSession(), first, last); - } - return new SpatialCursor( - treeMap.findIntersectingKeys(getKey(intersection)), table, - filter.getSession()); - } - - /** - * Compute spatial index cost - * @param masks Search mask - * @param columns Table columns - * @return Index cost hint - */ - public static long getCostRangeIndex(int[] masks, Column[] columns) { - // Never use spatial tree index without spatial filter - if (columns.length == 0) { - return Long.MAX_VALUE; - } - for (Column column : columns) { - int index = column.getColumnId(); - int mask = masks[index]; - if ((mask & IndexCondition.SPATIAL_INTERSECTS) != IndexCondition.SPATIAL_INTERSECTS) { - return Long.MAX_VALUE; - } - } - return 2; - } - - @Override - public double getCost(Session session, int[] masks, - TableFilter[] filters, int filter, SortOrder sortOrder, - AllColumnsForPlan allColumnsSet) { - return getCostRangeIndex(masks, columns); - } - - - @Override - public void remove(Session session) { - if (!treeMap.isClosed()) { - store.removeMap(treeMap); - } - } - - @Override - public void truncate(Session session) { - treeMap.clear(); - } - - @Override - public void checkRename() { - // nothing to do - } - - @Override - public boolean needRebuild() { - return needRebuild; - } - - @Override - public boolean canGetFirstOrLast() { - return true; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - if (closed) { - throw DbException.throwInternalError(toString()); - } - if (!first) { - throw DbException.throwInternalError( - "Spatial Index can only be fetch by ascending order"); - } - return find(session); - } - - @Override - public long getRowCount(Session session) { - return treeMap.sizeAsLong(); - } - - @Override - public long getRowCountApproximation() { - return treeMap.sizeAsLong(); - } - - @Override - public long getDiskSpaceUsed() { - // TODO estimate disk space usage - return 0; - } - - /** - * A cursor to iterate over spatial keys. - */ - private static final class SpatialCursor implements Cursor { - - private final Iterator it; - private SpatialKey current; - private final Table table; - private final Session session; - - public SpatialCursor(Iterator it, Table table, Session session) { - this.it = it; - this.table = table; - this.session = session; - } - - @Override - public Row get() { - return table.getRow(session, current.getId()); - } - - @Override - public SearchRow getSearchRow() { - return get(); - } - - @Override - public boolean next() { - if (!it.hasNext()) { - return false; - } - current = it.next(); - return true; - } - - @Override - public boolean previous() { - return false; - } - - } - -} - diff --git a/h2/src/main/org/h2/index/TreeCursor.java b/h2/src/main/org/h2/index/TreeCursor.java deleted file mode 100644 index 2124248a98..0000000000 --- a/h2/src/main/org/h2/index/TreeCursor.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.result.Row; -import org.h2.result.SearchRow; - -/** - * The cursor implementation for a tree index. - */ -public class TreeCursor implements Cursor { - private final TreeIndex tree; - private TreeNode node; - private boolean beforeFirst; - private final SearchRow first, last; - - TreeCursor(TreeIndex tree, TreeNode node, SearchRow first, SearchRow last) { - this.tree = tree; - this.node = node; - this.first = first; - this.last = last; - beforeFirst = true; - } - - @Override - public Row get() { - return node == null ? null : node.row; - } - - @Override - public SearchRow getSearchRow() { - return get(); - } - - @Override - public boolean next() { - if (beforeFirst) { - beforeFirst = false; - if (node == null) { - return false; - } - if (first != null && tree.compareRows(node.row, first) < 0) { - node = next(node); - } - } else { - node = next(node); - } - if (node != null && last != null) { - if (tree.compareRows(node.row, last) > 0) { - node = null; - } - } - return node != null; - } - - @Override - public boolean previous() { - node = previous(node); - return node != null; - } - - /** - * Get the next node if there is one. - * - * @param x the node - * @return the next node or null - */ - private static TreeNode next(TreeNode x) { - if (x == null) { - return null; - } - TreeNode r = x.right; - if (r != null) { - x = r; - TreeNode l = x.left; - while (l != null) { - x = l; - l = x.left; - } - return x; - } - TreeNode ch = x; - x = x.parent; - while (x != null && ch == x.right) { - ch = x; - x = x.parent; - } - return x; - } - - - /** - * Get the previous node if there is one. - * - * @param x the node - * @return the previous node or null - */ - private static TreeNode previous(TreeNode x) { - if (x == null) { - return null; - } - TreeNode l = x.left; - if (l != null) { - x = l; - TreeNode r = x.right; - while (r != null) { - x = r; - r = x.right; - } - return x; - } - TreeNode ch = x; - x = x.parent; - while (x != null && ch == x.left) { - ch = x; - x = x.parent; - } - return x; - } - -} diff --git a/h2/src/main/org/h2/index/TreeIndex.java b/h2/src/main/org/h2/index/TreeIndex.java deleted file mode 100644 index f4d0cefcb2..0000000000 --- a/h2/src/main/org/h2/index/TreeIndex.java +++ /dev/null @@ -1,411 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.command.dml.AllColumnsForPlan; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.table.IndexColumn; -import org.h2.table.PageStoreTable; -import org.h2.table.TableFilter; -import org.h2.value.Value; -import org.h2.value.ValueNull; - -/** - * The tree index is an in-memory index based on a binary AVL trees. - */ -public class TreeIndex extends BaseIndex { - - private TreeNode root; - private final PageStoreTable tableData; - private long rowCount; - private boolean closed; - - public TreeIndex(PageStoreTable table, int id, String indexName, - IndexColumn[] columns, IndexType indexType) { - super(table, id, indexName, columns, indexType); - tableData = table; - if (!database.isStarting()) { - checkIndexColumnTypes(columns); - } - } - - @Override - public void close(Session session) { - root = null; - closed = true; - } - - @Override - public void add(Session session, Row row) { - if (closed) { - throw DbException.throwInternalError(); - } - TreeNode i = new TreeNode(row); - TreeNode n = root, x = n; - boolean isLeft = true; - while (true) { - if (n == null) { - if (x == null) { - root = i; - rowCount++; - return; - } - set(x, isLeft, i); - break; - } - Row r = n.row; - int compare = compareRows(row, r); - if (compare == 0) { - if (indexType.isUnique()) { - if (!mayHaveNullDuplicates(row)) { - throw getDuplicateKeyException(row.toString()); - } - } - compare = compareKeys(row, r); - } - isLeft = compare < 0; - x = n; - n = child(x, isLeft); - } - balance(x, isLeft); - rowCount++; - } - - private void balance(TreeNode x, boolean isLeft) { - while (true) { - int sign = isLeft ? 1 : -1; - switch (x.balance * sign) { - case 1: - x.balance = 0; - return; - case 0: - x.balance = -sign; - break; - case -1: - TreeNode l = child(x, isLeft); - if (l.balance == -sign) { - replace(x, l); - set(x, isLeft, child(l, !isLeft)); - set(l, !isLeft, x); - x.balance = 0; - l.balance = 0; - } else { - TreeNode r = child(l, !isLeft); - replace(x, r); - set(l, !isLeft, child(r, isLeft)); - set(r, isLeft, l); - set(x, isLeft, child(r, !isLeft)); - set(r, !isLeft, x); - int rb = r.balance; - x.balance = (rb == -sign) ? sign : 0; - l.balance = (rb == sign) ? -sign : 0; - r.balance = 0; - } - return; - default: - DbException.throwInternalError("b:" + x.balance * sign); - } - if (x == root) { - return; - } - isLeft = x.isFromLeft(); - x = x.parent; - } - } - - private static TreeNode child(TreeNode x, boolean isLeft) { - return isLeft ? x.left : x.right; - } - - private void replace(TreeNode x, TreeNode n) { - if (x == root) { - root = n; - if (n != null) { - n.parent = null; - } - } else { - set(x.parent, x.isFromLeft(), n); - } - } - - private static void set(TreeNode parent, boolean left, TreeNode n) { - if (left) { - parent.left = n; - } else { - parent.right = n; - } - if (n != null) { - n.parent = parent; - } - } - - @Override - public void remove(Session session, Row row) { - if (closed) { - throw DbException.throwInternalError(); - } - TreeNode x = findFirstNode(row, true); - if (x == null) { - throw DbException.throwInternalError("not found!"); - } - TreeNode n; - if (x.left == null) { - n = x.right; - } else if (x.right == null) { - n = x.left; - } else { - TreeNode d = x; - x = x.left; - for (TreeNode temp = x; (temp = temp.right) != null;) { - x = temp; - } - // x will be replaced with n later - n = x.left; - // swap d and x - int b = x.balance; - x.balance = d.balance; - d.balance = b; - - // set x.parent - TreeNode xp = x.parent; - TreeNode dp = d.parent; - if (d == root) { - root = x; - } - x.parent = dp; - if (dp != null) { - if (dp.right == d) { - dp.right = x; - } else { - dp.left = x; - } - } - // TODO index / tree: link d.r = x(p?).r directly - if (xp == d) { - d.parent = x; - if (d.left == x) { - x.left = d; - x.right = d.right; - } else { - x.right = d; - x.left = d.left; - } - } else { - d.parent = xp; - xp.right = d; - x.right = d.right; - x.left = d.left; - } - - if (x.right == null) { - DbException.throwInternalError("tree corrupted"); - } - x.right.parent = x; - x.left.parent = x; - // set d.left, d.right - d.left = n; - if (n != null) { - n.parent = d; - } - d.right = null; - x = d; - } - rowCount--; - - boolean isLeft = x.isFromLeft(); - replace(x, n); - n = x.parent; - while (n != null) { - x = n; - int sign = isLeft ? 1 : -1; - switch (x.balance * sign) { - case -1: - x.balance = 0; - break; - case 0: - x.balance = sign; - return; - case 1: - TreeNode r = child(x, !isLeft); - int b = r.balance; - if (b * sign >= 0) { - replace(x, r); - set(x, !isLeft, child(r, isLeft)); - set(r, isLeft, x); - if (b == 0) { - x.balance = sign; - r.balance = -sign; - return; - } - x.balance = 0; - r.balance = 0; - x = r; - } else { - TreeNode l = child(r, isLeft); - replace(x, l); - b = l.balance; - set(r, isLeft, child(l, !isLeft)); - set(l, !isLeft, r); - set(x, !isLeft, child(l, isLeft)); - set(l, isLeft, x); - x.balance = (b == sign) ? -sign : 0; - r.balance = (b == -sign) ? sign : 0; - l.balance = 0; - x = l; - } - break; - default: - DbException.throwInternalError("b: " + x.balance * sign); - } - isLeft = x.isFromLeft(); - n = x.parent; - } - } - - private TreeNode findFirstNode(SearchRow row, boolean withKey) { - TreeNode x = root, result = x; - while (x != null) { - result = x; - int compare = compareRows(x.row, row); - if (compare == 0 && withKey) { - compare = compareKeys(x.row, row); - } - if (compare == 0) { - if (withKey) { - return x; - } - x = x.left; - } else if (compare > 0) { - x = x.left; - } else { - x = x.right; - } - } - return result; - } - - @Override - public Cursor find(TableFilter filter, SearchRow first, SearchRow last) { - return find(first, last); - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - return find(first, last); - } - - private Cursor find(SearchRow first, SearchRow last) { - if (first == null) { - TreeNode x = root, n; - while (x != null) { - n = x.left; - if (n == null) { - break; - } - x = n; - } - return new TreeCursor(this, x, null, last); - } - TreeNode x = findFirstNode(first, false); - return new TreeCursor(this, x, first, last); - } - - @Override - public double getCost(Session session, int[] masks, TableFilter[] filters, int filter, - SortOrder sortOrder, AllColumnsForPlan allColumnsSet) { - return getCostRangeIndex(masks, tableData.getRowCountApproximation(), - filters, filter, sortOrder, false, allColumnsSet); - } - - @Override - public void remove(Session session) { - truncate(session); - } - - @Override - public void truncate(Session session) { - root = null; - rowCount = 0; - } - - @Override - public void checkRename() { - // nothing to do - } - - @Override - public boolean needRebuild() { - return true; - } - - @Override - public boolean canGetFirstOrLast() { - return true; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - if (closed) { - throw DbException.throwInternalError(toString()); - } - if (first) { - // TODO optimization: this loops through NULL - Cursor cursor = find(session, null, null); - while (cursor.next()) { - SearchRow row = cursor.getSearchRow(); - Value v = row.getValue(columnIds[0]); - if (v != ValueNull.INSTANCE) { - return cursor; - } - } - return cursor; - } - TreeNode x = root, n; - while (x != null) { - n = x.right; - if (n == null) { - break; - } - x = n; - } - TreeCursor cursor = new TreeCursor(this, x, null, null); - if (x == null) { - return cursor; - } - // TODO optimization: this loops through NULL elements - do { - SearchRow row = cursor.getSearchRow(); - if (row == null) { - break; - } - Value v = row.getValue(columnIds[0]); - if (v != ValueNull.INSTANCE) { - return cursor; - } - } while (cursor.previous()); - return cursor; - } - - @Override - public long getRowCount(Session session) { - return rowCount; - } - - @Override - public long getRowCountApproximation() { - return rowCount; - } - - @Override - public long getDiskSpaceUsed() { - return 0; - } - -} diff --git a/h2/src/main/org/h2/index/TreeNode.java b/h2/src/main/org/h2/index/TreeNode.java deleted file mode 100644 index 447188acee..0000000000 --- a/h2/src/main/org/h2/index/TreeNode.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.result.Row; - -/** - * Represents a index node of a tree index. - */ -class TreeNode { - - /** - * The balance. For more information, see the AVL tree documentation. - */ - int balance; - - /** - * The left child node or null. - */ - TreeNode left; - - /** - * The right child node or null. - */ - TreeNode right; - - /** - * The parent node or null if this is the root node. - */ - TreeNode parent; - - /** - * The row. - */ - final Row row; - - TreeNode(Row row) { - this.row = row; - } - - /** - * Check if this node is the left child of its parent. This method returns - * true if this is the root node. - * - * @return true if this node is the root or a left child - */ - boolean isFromLeft() { - return parent == null || parent.left == this; - } - -} diff --git a/h2/src/main/org/h2/index/ViewCursor.java b/h2/src/main/org/h2/index/ViewCursor.java index 7952fcaacb..53ac2a72ab 100644 --- a/h2/src/main/org/h2/index/ViewCursor.java +++ b/h2/src/main/org/h2/index/ViewCursor.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; @@ -81,7 +81,7 @@ public boolean next() { @Override public boolean previous() { - throw DbException.throwInternalError(toString()); + throw DbException.getInternalError(toString()); } } diff --git a/h2/src/main/org/h2/index/ViewIndex.java b/h2/src/main/org/h2/index/ViewIndex.java index f92f22c41c..173fe9a9b8 100644 --- a/h2/src/main/org/h2/index/ViewIndex.java +++ b/h2/src/main/org/h2/index/ViewIndex.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; @@ -10,12 +10,11 @@ import org.h2.api.ErrorCode; import org.h2.command.Parser; -import org.h2.command.Prepared; -import org.h2.command.dml.AllColumnsForPlan; -import org.h2.command.dml.Query; -import org.h2.command.dml.SelectUnion; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.command.query.Query; +import org.h2.command.query.SelectUnion; import org.h2.engine.Constants; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Parameter; import org.h2.expression.condition.Comparison; import org.h2.message.DbException; @@ -26,7 +25,6 @@ import org.h2.result.SortOrder; import org.h2.table.Column; import org.h2.table.IndexColumn; -import org.h2.table.JoinBatch; import org.h2.table.TableFilter; import org.h2.table.TableView; import org.h2.util.IntArray; @@ -36,7 +34,7 @@ * This object represents a virtual index for a query. * Actually it only represents a prepared SELECT statement. */ -public class ViewIndex extends BaseIndex implements SpatialIndex { +public class ViewIndex extends Index implements SpatialIndex { private static final long MAX_AGE_NANOS = TimeUnit.MILLISECONDS.toNanos(Constants.VIEW_COST_CACHE_MAX_AGE); @@ -47,7 +45,7 @@ public class ViewIndex extends BaseIndex implements SpatialIndex { private boolean recursive; private final int[] indexMasks; private Query query; - private final Session createSession; + private final SessionLocal createSession; /** * The time in nanoseconds when this index (and its cost) was calculated. @@ -64,7 +62,7 @@ public class ViewIndex extends BaseIndex implements SpatialIndex { */ public ViewIndex(TableView view, String querySQL, ArrayList originalParameters, boolean recursive) { - super(view, 0, null, null, IndexType.createNonUnique(false)); + super(view, 0, null, null, 0, IndexType.createNonUnique(false)); this.view = view; this.querySQL = querySQL; this.originalParameters = originalParameters; @@ -89,9 +87,9 @@ public ViewIndex(TableView view, String querySQL, * @param filter current filter * @param sortOrder sort order */ - public ViewIndex(TableView view, ViewIndex index, Session session, + public ViewIndex(TableView view, ViewIndex index, SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder) { - super(view, 0, null, null, IndexType.createNonUnique(false)); + super(view, 0, null, null, 0, IndexType.createNonUnique(false)); this.view = view; this.querySQL = index.querySQL; this.originalParameters = index.originalParameters; @@ -100,25 +98,20 @@ public ViewIndex(TableView view, ViewIndex index, Session session, this.createSession = session; columns = new Column[0]; if (!recursive) { - query = getQuery(session, masks, filters, filter, sortOrder); + query = getQuery(session, masks); } - // we don't need eviction for recursive views since we can't calculate - // their cost if it is a sub-query we don't need eviction as well - // because the whole ViewIndex cache is getting dropped in - // Session.prepareLocal - evaluatedAt = recursive || view.getTopQuery() != null ? Long.MAX_VALUE : System.nanoTime(); - } - - @Override - public IndexLookupBatch createLookupBatch(TableFilter[] filters, int filter) { - if (recursive) { - // we do not support batching for recursive queries - return null; + if (recursive || view.getTopQuery() != null) { + evaluatedAt = Long.MAX_VALUE; + } else { + long time = System.nanoTime(); + if (time == Long.MAX_VALUE) { + time++; + } + evaluatedAt = time; } - return JoinBatch.createViewIndexLookupBatch(this); } - public Session getSession() { + public SessionLocal getSession() { return createSession; } @@ -130,52 +123,39 @@ public boolean isExpired() { @Override public String getPlanSQL() { - return query == null ? null : query.getPlanSQL(false); + return query == null ? null : query.getPlanSQL(TRACE_SQL_FLAGS | ADD_PLAN_INFORMATION); } @Override - public void close(Session session) { + public void close(SessionLocal session) { // nothing to do } @Override - public void add(Session session, Row row) { + public void add(SessionLocal session, Row row) { throw DbException.getUnsupportedException("VIEW"); } @Override - public void remove(Session session, Row row) { + public void remove(SessionLocal session, Row row) { throw DbException.getUnsupportedException("VIEW"); } @Override - public double getCost(Session session, int[] masks, + public double getCost(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, AllColumnsForPlan allColumnsSet) { return recursive ? 1000 : query.getCost(); } @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { return find(session, first, last, null); } @Override - public Cursor findByGeometry(TableFilter filter, SearchRow first, - SearchRow last, SearchRow intersection) { - return find(filter.getSession(), first, last, intersection); - } - - private static Query prepareSubQuery(String sql, Session session, int[] masks, - TableFilter[] filters, int filter, SortOrder sortOrder) { - Prepared p; - session.pushSubQueryInfo(masks, filters, filter, sortOrder); - try { - p = session.prepare(sql, true, true); - } finally { - session.popSubQueryInfo(); - } - return (Query) p; + public Cursor findByGeometry(SessionLocal session, SearchRow first, SearchRow last, SearchRow intersection) { + return find(session, first, last, intersection); } private Cursor findRecursive(SearchRow first, SearchRow last) { @@ -188,7 +168,7 @@ private Cursor findRecursive(SearchRow first, SearchRow last) { if (query == null) { Parser parser = new Parser(createSession); parser.setRightsChecked(true); - parser.setSuppliedParameterList(originalParameters); + parser.setSuppliedParameters(originalParameters); query = (Query) parser.prepare(querySQL); query.setNeverLazy(true); } @@ -241,14 +221,16 @@ private Cursor findRecursive(SearchRow first, SearchRow last) { * @param last the upper bound * @param intersection the intersection */ - public void setupQueryParameters(Session session, SearchRow first, SearchRow last, + public void setupQueryParameters(SessionLocal session, SearchRow first, SearchRow last, SearchRow intersection) { ArrayList paramList = query.getParameters(); if (originalParameters != null) { for (Parameter orig : originalParameters) { - int idx = orig.getIndex(); - Value value = orig.getValue(session); - setParameter(paramList, idx, value); + if (orig != null) { + int idx = orig.getIndex(); + Value value = orig.getValue(session); + setParameter(paramList, idx, value); + } } } int len; @@ -279,7 +261,7 @@ public void setupQueryParameters(Session session, SearchRow first, SearchRow las } } - private Cursor find(Session session, SearchRow first, SearchRow last, + private Cursor find(SessionLocal session, SearchRow first, SearchRow last, SearchRow intersection) { if (recursive) { return findRecursive(first, last); @@ -304,9 +286,8 @@ public Query getQuery() { return query; } - private Query getQuery(Session session, int[] masks, - TableFilter[] filters, int filter, SortOrder sortOrder) { - Query q = prepareSubQuery(querySQL, session, masks, filters, filter, sortOrder); + private Query getQuery(SessionLocal session, int[] masks) { + Query q = (Query) session.prepare(querySQL, true, true); if (masks == null) { return q; } @@ -382,26 +363,25 @@ private Query getQuery(Session session, int[] masks, continue; } } - IndexColumn c = new IndexColumn(); - c.column = table.getColumn(i); - indexColumns[indexColumnId] = c; - columnIds[indexColumnId] = c.column.getColumnId(); + Column column = table.getColumn(i); + indexColumns[indexColumnId] = new IndexColumn(column); + columnIds[indexColumnId] = column.getColumnId(); indexColumnId++; } } - String sql = q.getPlanSQL(true); - q = prepareSubQuery(sql, session, masks, filters, filter, sortOrder); + String sql = q.getPlanSQL(DEFAULT_SQL_FLAGS); + q = (Query) session.prepare(sql, true, true); return q; } @Override - public void remove(Session session) { + public void remove(SessionLocal session) { throw DbException.getUnsupportedException("VIEW"); } @Override - public void truncate(Session session) { + public void truncate(SessionLocal session) { throw DbException.getUnsupportedException("VIEW"); } @@ -415,32 +395,17 @@ public boolean needRebuild() { return false; } - @Override - public boolean canGetFirstOrLast() { - return false; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - throw DbException.getUnsupportedException("VIEW"); - } - public void setRecursive(boolean value) { this.recursive = value; } @Override - public long getRowCount(Session session) { - return 0; - } - - @Override - public long getRowCountApproximation() { + public long getRowCount(SessionLocal session) { return 0; } @Override - public long getDiskSpaceUsed() { + public long getRowCountApproximation(SessionLocal session) { return 0; } diff --git a/h2/src/main/org/h2/index/VirtualConstructedTableIndex.java b/h2/src/main/org/h2/index/VirtualConstructedTableIndex.java new file mode 100644 index 0000000000..bde72c8df3 --- /dev/null +++ b/h2/src/main/org/h2/index/VirtualConstructedTableIndex.java @@ -0,0 +1,66 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.index; + +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.result.SearchRow; +import org.h2.result.SortOrder; +import org.h2.table.FunctionTable; +import org.h2.table.IndexColumn; +import org.h2.table.TableFilter; +import org.h2.table.VirtualConstructedTable; + +/** + * An index for a virtual table that returns a result set. Search in this index + * performs scan over all rows and should be avoided. + */ +public class VirtualConstructedTableIndex extends VirtualTableIndex { + + private final VirtualConstructedTable table; + + public VirtualConstructedTableIndex(VirtualConstructedTable table, IndexColumn[] columns) { + super(table, null, columns); + this.table = table; + } + + @Override + public boolean isFindUsingFullTableScan() { + return true; + } + + @Override + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { + return new VirtualTableCursor(this, first, last, table.getResult(session)); + } + + @Override + public double getCost(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, + AllColumnsForPlan allColumnsSet) { + if (masks != null) { + throw DbException.getUnsupportedException("Virtual table"); + } + long expectedRows; + if (table.canGetRowCount(session)) { + expectedRows = table.getRowCountApproximation(session); + } else { + expectedRows = database.getSettings().estimatedFunctionTableRows; + } + return expectedRows * 10; + } + + @Override + public String getPlanSQL() { + return table instanceof FunctionTable ? "function" : "table scan"; + } + + @Override + public boolean canScan() { + return false; + } + +} diff --git a/h2/src/main/org/h2/index/AbstractFunctionCursor.java b/h2/src/main/org/h2/index/VirtualTableCursor.java similarity index 62% rename from h2/src/main/org/h2/index/AbstractFunctionCursor.java rename to h2/src/main/org/h2/index/VirtualTableCursor.java index 2dc18b584b..0831454efb 100644 --- a/h2/src/main/org/h2/index/AbstractFunctionCursor.java +++ b/h2/src/main/org/h2/index/VirtualTableCursor.java @@ -1,29 +1,30 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; -import org.h2.engine.Session; import org.h2.message.DbException; +import org.h2.result.ResultInterface; import org.h2.result.Row; import org.h2.result.SearchRow; import org.h2.value.Value; /** - * Abstract function cursor. This implementation filters the rows (only returns - * entries that are larger or equal to "first", and smaller than last or equal - * to "last"). + * A cursor for a virtual table. This implementation filters the rows (only + * returns entries that are larger or equal to "first", and smaller than last or + * equal to "last"). */ -abstract class AbstractFunctionCursor implements Cursor { - private final FunctionIndex index; +class VirtualTableCursor implements Cursor { + + private final VirtualTableIndex index; private final SearchRow first; private final SearchRow last; - final Session session; + private final ResultInterface result; Value[] values; @@ -36,14 +37,15 @@ abstract class AbstractFunctionCursor implements Cursor { * first row * @param last * last row - * @param session - * session + * @param result + * the result */ - AbstractFunctionCursor(FunctionIndex index, SearchRow first, SearchRow last, Session session) { + VirtualTableCursor(VirtualTableIndex index, SearchRow first, SearchRow last, + ResultInterface result) { this.index = index; this.first = first; this.last = last; - this.session = session; + this.result = result; } @Override @@ -52,7 +54,7 @@ public Row get() { return null; } if (row == null) { - row = session.createRow(values, 1); + row = Row.get(values, 1); } return row; } @@ -92,11 +94,19 @@ public boolean next() { * * @return true if another row is available */ - abstract boolean nextImpl(); + private boolean nextImpl() { + row = null; + if (result != null && result.next()) { + values = result.currentRow(); + } else { + values = null; + } + return values != null; + } @Override public boolean previous() { - throw DbException.throwInternalError(toString()); + throw DbException.getInternalError(toString()); } } diff --git a/h2/src/main/org/h2/index/VirtualTableIndex.java b/h2/src/main/org/h2/index/VirtualTableIndex.java new file mode 100644 index 0000000000..eee94df727 --- /dev/null +++ b/h2/src/main/org/h2/index/VirtualTableIndex.java @@ -0,0 +1,68 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.index; + +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.result.Row; +import org.h2.table.IndexColumn; +import org.h2.table.VirtualTable; + +/** + * An base class for indexes of virtual tables. + */ +public abstract class VirtualTableIndex extends Index { + + protected VirtualTableIndex(VirtualTable table, String name, IndexColumn[] columns) { + super(table, 0, name, columns, 0, IndexType.createNonUnique(true)); + } + + @Override + public void close(SessionLocal session) { + // nothing to do + } + + @Override + public void add(SessionLocal session, Row row) { + throw DbException.getUnsupportedException("Virtual table"); + } + + @Override + public void remove(SessionLocal session, Row row) { + throw DbException.getUnsupportedException("Virtual table"); + } + + @Override + public void remove(SessionLocal session) { + throw DbException.getUnsupportedException("Virtual table"); + } + + @Override + public void truncate(SessionLocal session) { + throw DbException.getUnsupportedException("Virtual table"); + } + + @Override + public boolean needRebuild() { + return false; + } + + @Override + public void checkRename() { + throw DbException.getUnsupportedException("Virtual table"); + } + + @Override + public long getRowCount(SessionLocal session) { + return table.getRowCount(session); + } + + @Override + public long getRowCountApproximation(SessionLocal session) { + return table.getRowCountApproximation(session); + } + +} diff --git a/h2/src/main/org/h2/index/package.html b/h2/src/main/org/h2/index/package.html index 719fc84439..40a17031a5 100644 --- a/h2/src/main/org/h2/index/package.html +++ b/h2/src/main/org/h2/index/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/jdbc/JdbcArray.java b/h2/src/main/org/h2/jdbc/JdbcArray.java index 10734c6e94..90c745d051 100644 --- a/h2/src/main/org/h2/jdbc/JdbcArray.java +++ b/h2/src/main/org/h2/jdbc/JdbcArray.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -8,34 +8,37 @@ import java.sql.Array; import java.sql.ResultSet; import java.sql.SQLException; -import java.sql.Types; import java.util.Map; import org.h2.api.ErrorCode; import org.h2.message.DbException; import org.h2.message.TraceObject; import org.h2.result.SimpleResult; +import org.h2.value.DataType; import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueArray; -import org.h2.value.ValueLong; -import org.h2.value.ValueNull; +import org.h2.value.ValueBigint; +import org.h2.value.ValueToObjectConverter; /** * Represents an ARRAY value. */ -public class JdbcArray extends TraceObject implements Array { +public final class JdbcArray extends TraceObject implements Array { - private Value value; + private ValueArray value; private final JdbcConnection conn; /** * INTERNAL + * @param conn it belongs to + * @param value of + * @param id of the trace object */ public JdbcArray(JdbcConnection conn, Value value, int id) { setTrace(conn.getSession().getTrace(), TraceObject.ARRAY, id); this.conn = conn; - this.value = value.convertTo(Value.ARRAY); + this.value = value.convertToAnyArray(conn); } /** @@ -66,7 +69,7 @@ public Object getArray() throws SQLException { public Object getArray(Map> map) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getArray("+quoteMap(map)+");"); + debugCode("getArray(" + quoteMap(map) + ')'); } JdbcConnection.checkMap(map); checkClosed(); @@ -89,7 +92,7 @@ public Object getArray(Map> map) throws SQLException { public Object getArray(long index, int count) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getArray(" + index + ", " + count + ");"); + debugCode("getArray(" + index + ", " + count + ')'); } checkClosed(); return get(index, count); @@ -113,7 +116,7 @@ public Object getArray(long index, int count, Map> map) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getArray(" + index + ", " + count + ", " + quoteMap(map)+");"); + debugCode("getArray(" + index + ", " + count + ", " + quoteMap(map) + ')'); } checkClosed(); JdbcConnection.checkMap(map); @@ -124,17 +127,16 @@ public Object getArray(long index, int count, Map> map) } /** - * Returns the base type of the array. This database does support mixed type - * arrays and therefore there is no base type. + * Returns the base type of the array. * - * @return Types.NULL + * @return the base type or Types.NULL */ @Override public int getBaseType() throws SQLException { try { debugCodeCall("getBaseType"); checkClosed(); - return Types.NULL; + return DataType.convertTypeToSQLType(value.getComponentType()); } catch (Exception e) { throw logAndConvert(e); } @@ -144,14 +146,14 @@ public int getBaseType() throws SQLException { * Returns the base type name of the array. This database does support mixed * type arrays and therefore there is no base type. * - * @return "NULL" + * @return the base type name or "NULL" */ @Override public String getBaseTypeName() throws SQLException { try { debugCodeCall("getBaseTypeName"); checkClosed(); - return "NULL"; + return value.getComponentType().getDeclaredTypeName(); } catch (Exception e) { throw logAndConvert(e); } @@ -186,7 +188,7 @@ public ResultSet getResultSet() throws SQLException { public ResultSet getResultSet(Map> map) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getResultSet("+quoteMap(map)+");"); + debugCode("getResultSet(" + quoteMap(map) + ')'); } checkClosed(); JdbcConnection.checkMap(map); @@ -210,7 +212,7 @@ public ResultSet getResultSet(Map> map) throws SQLException { public ResultSet getResultSet(long index, int count) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getResultSet("+index+", " + count+");"); + debugCode("getResultSet(" + index + ", " + count + ')'); } checkClosed(); return getResultSetImpl(index, count); @@ -236,7 +238,7 @@ public ResultSet getResultSet(long index, int count, Map> map) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getResultSet("+index+", " + count+", " + quoteMap(map)+");"); + debugCode("getResultSet(" + index + ", " + count + ", " + quoteMap(map) + ')'); } checkClosed(); JdbcConnection.checkMap(map); @@ -258,17 +260,14 @@ public void free() { private ResultSet getResultSetImpl(long index, int count) { int id = getNextId(TraceObject.RESULT_SET); SimpleResult rs = new SimpleResult(); - rs.addColumn("INDEX", "INDEX", TypeInfo.TYPE_LONG); - // TODO array result set: there are multiple data types possible - rs.addColumn("VALUE", "VALUE", TypeInfo.TYPE_NULL); - if (value != ValueNull.INSTANCE) { - Value[] values = ((ValueArray) value).getList(); - count = checkRange(index, count, values.length); - for (int i = (int) index; i < index + count; i++) { - rs.addRow(ValueLong.get(i), values[i - 1]); - } + rs.addColumn("INDEX", TypeInfo.TYPE_BIGINT); + rs.addColumn("VALUE", value.getComponentType()); + Value[] values = value.getList(); + count = checkRange(index, count, values.length); + for (int i = (int) index; i < index + count; i++) { + rs.addRow(ValueBigint.get(i), values[i - 1]); } - return new JdbcResultSet(conn, null, null, rs, id, false, true, false); + return new JdbcResultSet(conn, null, null, rs, id, true, false, false); } private void checkClosed() { @@ -278,25 +277,22 @@ private void checkClosed() { } } - private Object[] get() { - return (Object[]) value.getObject(); + private Object get() { + return ValueToObjectConverter.valueToDefaultArray(value, conn, true); } - private Object[] get(long index, int count) { - if (value == ValueNull.INSTANCE) { - return null; - } - Value[] values = ((ValueArray) value).getList(); + private Object get(long index, int count) { + Value[] values = value.getList(); count = checkRange(index, count, values.length); Object[] a = new Object[count]; for (int i = 0, j = (int) index - 1; i < count; i++, j++) { - a[i] = values[j].getObject(); + a[i] = ValueToObjectConverter.valueToDefaultObject(values[j], conn, true); } return a; } private static int checkRange(long index, int count, int len) { - if (index < 1 || index > len) { + if (index < 1 || (index != 1 && index > len)) { throw DbException.getInvalidValueException("index (1.." + len + ')', index); } int rem = len - (int) index + 1; diff --git a/h2/src/main/org/h2/jdbc/JdbcBatchUpdateException.java b/h2/src/main/org/h2/jdbc/JdbcBatchUpdateException.java index 0fbd67802c..e8040c8a82 100644 --- a/h2/src/main/org/h2/jdbc/JdbcBatchUpdateException.java +++ b/h2/src/main/org/h2/jdbc/JdbcBatchUpdateException.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -13,18 +13,30 @@ /** * Represents a batch update database exception. */ -public class JdbcBatchUpdateException extends BatchUpdateException { +public final class JdbcBatchUpdateException extends BatchUpdateException { private static final long serialVersionUID = 1L; /** * INTERNAL + * @param next exception + * @param updateCounts affected record counts */ JdbcBatchUpdateException(SQLException next, int[] updateCounts) { super(next.getMessage(), next.getSQLState(), next.getErrorCode(), updateCounts); setNextException(next); } + /** + * INTERNAL + * @param next exception + * @param updateCounts affected record counts + */ + JdbcBatchUpdateException(SQLException next, long[] updateCounts) { + super(next.getMessage(), next.getSQLState(), next.getErrorCode(), updateCounts, null); + setNextException(next); + } + /** * INTERNAL */ diff --git a/h2/src/main/org/h2/jdbc/JdbcBlob.java b/h2/src/main/org/h2/jdbc/JdbcBlob.java index ed32317a5e..b6a49b1e38 100644 --- a/h2/src/main/org/h2/jdbc/JdbcBlob.java +++ b/h2/src/main/org/h2/jdbc/JdbcBlob.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -25,10 +25,14 @@ /** * Represents a BLOB value. */ -public class JdbcBlob extends JdbcLob implements Blob { +public final class JdbcBlob extends JdbcLob implements Blob { /** * INTERNAL + * @param conn it belongs to + * @param value of + * @param state of the LOB + * @param id of the trace object */ public JdbcBlob(JdbcConnection conn, Value value, State state, int id) { super(conn, value, state, TraceObject.BLOB, id); @@ -77,7 +81,7 @@ public void truncate(long len) throws SQLException { public byte[] getBytes(long pos, int length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getBytes("+pos+", "+length+");"); + debugCode("getBytes(" + pos + ", " + length + ')'); } checkReadable(); ByteArrayOutputStream out = new ByteArrayOutputStream(); @@ -107,7 +111,7 @@ public int setBytes(long pos, byte[] bytes) throws SQLException { } try { if (isDebugEnabled()) { - debugCode("setBytes("+pos+", "+quoteBytes(bytes)+");"); + debugCode("setBytes(" + pos + ", " + quoteBytes(bytes) + ')'); } checkEditable(); if (pos != 1) { @@ -137,7 +141,7 @@ public int setBytes(long pos, byte[] bytes, int offset, int len) } try { if (isDebugEnabled()) { - debugCode("setBytes(" + pos + ", " + quoteBytes(bytes) + ", " + offset + ", " + len + ");"); + debugCode("setBytes(" + pos + ", " + quoteBytes(bytes) + ", " + offset + ", " + len + ')'); } checkEditable(); if (pos != 1) { @@ -169,7 +173,7 @@ public InputStream getBinaryStream() throws SQLException { public OutputStream setBinaryStream(long pos) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setBinaryStream("+pos+");"); + debugCodeCall("setBinaryStream", pos); } checkEditable(); if (pos != 1) { @@ -201,7 +205,7 @@ public void call() { @Override public long position(byte[] pattern, long start) throws SQLException { if (isDebugEnabled()) { - debugCode("position("+quoteBytes(pattern)+", "+start+");"); + debugCode("position(" + quoteBytes(pattern) + ", " + start + ')'); } if (Constants.BLOB_SEARCH) { try { @@ -256,7 +260,7 @@ public long position(byte[] pattern, long start) throws SQLException { @Override public long position(Blob blobPattern, long start) throws SQLException { if (isDebugEnabled()) { - debugCode("position(blobPattern, "+start+");"); + debugCode("position(blobPattern, " + start + ')'); } if (Constants.BLOB_SEARCH) { try { @@ -292,7 +296,7 @@ public long position(Blob blobPattern, long start) throws SQLException { public InputStream getBinaryStream(long pos, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getBinaryStream(" + pos + ", " + length + ");"); + debugCode("getBinaryStream(" + pos + ", " + length + ')'); } checkReadable(); if (state == State.NEW) { diff --git a/h2/src/main/org/h2/jdbc/JdbcCallableStatement.java b/h2/src/main/org/h2/jdbc/JdbcCallableStatement.java index 70664ddedb..6541722bbb 100644 --- a/h2/src/main/org/h2/jdbc/JdbcCallableStatement.java +++ b/h2/src/main/org/h2/jdbc/JdbcCallableStatement.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -19,6 +19,7 @@ import java.sql.ResultSetMetaData; import java.sql.RowId; import java.sql.SQLException; +import java.sql.SQLType; import java.sql.SQLXML; import java.sql.Time; import java.sql.Timestamp; @@ -34,20 +35,37 @@ /** * Represents a callable statement. - * + *

          + * Thread safety: the callable statement is not thread-safe. If the same + * callable statement is used by multiple threads access to it must be + * synchronized. The single synchronized block must include assignment of + * parameters, execution of the command and all operations with its result. + *

          + *
          + * synchronized (call) {
          + *     call.setInt(1, 10);
          + *     try (ResultSet rs = call.executeQuery()) {
          + *         while (rs.next) {
          + *             // Do something
          + *         }
          + *     }
          + * }
          + * synchronized (call) {
          + *     call.setInt(1, 15);
          + *     updateCount = call.executeUpdate();
          + * }
          + * 
          * @author Sergi Vladykin * @author Thomas Mueller */ -public class JdbcCallableStatement extends JdbcPreparedStatement implements - CallableStatement, JdbcCallableStatementBackwardsCompat { +public final class JdbcCallableStatement extends JdbcPreparedStatement implements CallableStatement { private BitSet outParameters; private int maxOutParameters; private HashMap namedParameters; - JdbcCallableStatement(JdbcConnection conn, String sql, int id, - int resultSetType, int resultSetConcurrency) { - super(conn, sql, id, resultSetType, resultSetConcurrency, false, false); + JdbcCallableStatement(JdbcConnection conn, String sql, int id, int resultSetType, int resultSetConcurrency) { + super(conn, sql, id, resultSetType, resultSetConcurrency, null); setTrace(session.getTrace(), TraceObject.CALLABLE_STATEMENT, id); } @@ -352,11 +370,16 @@ public byte[] getBytes(int parameterIndex) throws SQLException { /** * Returns the value of the specified column as a java.sql.Date. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalDate.class)} instead. + *

          * * @param parameterIndex the parameter index (1, 2, ...) * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(int, Class) */ @Override public Date getDate(int parameterIndex) throws SQLException { @@ -366,11 +389,16 @@ public Date getDate(int parameterIndex) throws SQLException { /** * Returns the value of the specified column as a java.sql.Time. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalTime.class)} instead. + *

          * * @param parameterIndex the parameter index (1, 2, ...) * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(int, Class) */ @Override public Time getTime(int parameterIndex) throws SQLException { @@ -380,11 +408,16 @@ public Time getTime(int parameterIndex) throws SQLException { /** * Returns the value of the specified column as a java.sql.Timestamp. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalDateTime.class)} instead. + *

          * * @param parameterIndex the parameter index (1, 2, ...) * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(int, Class) */ @Override public Timestamp getTimestamp(int parameterIndex) throws SQLException { @@ -484,12 +517,17 @@ public Array getArray(int parameterIndex) throws SQLException { /** * Returns the value of the specified column as a java.sql.Date using a * specified time zone. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalDate.class)} instead. + *

          * * @param parameterIndex the parameter index (1, 2, ...) * @param cal the calendar * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(int, Class) */ @Override public Date getDate(int parameterIndex, Calendar cal) throws SQLException { @@ -500,12 +538,17 @@ public Date getDate(int parameterIndex, Calendar cal) throws SQLException { /** * Returns the value of the specified column as a java.sql.Time using a * specified time zone. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalTime.class)} instead. + *

          * * @param parameterIndex the parameter index (1, 2, ...) * @param cal the calendar * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(int, Class) */ @Override public Time getTime(int parameterIndex, Calendar cal) throws SQLException { @@ -516,16 +559,20 @@ public Time getTime(int parameterIndex, Calendar cal) throws SQLException { /** * Returns the value of the specified column as a java.sql.Timestamp using a * specified time zone. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalDateTime.class)} instead. + *

          * * @param parameterIndex the parameter index (1, 2, ...) * @param cal the calendar * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(int, Class) */ @Override - public Timestamp getTimestamp(int parameterIndex, Calendar cal) - throws SQLException { + public Timestamp getTimestamp(int parameterIndex, Calendar cal) throws SQLException { checkRegistered(parameterIndex); return getOpenResultSet().getTimestamp(parameterIndex, cal); } @@ -541,28 +588,37 @@ public URL getURL(String parameterName) throws SQLException { /** * Returns the value of the specified column as a java.sql.Timestamp using a * specified time zone. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(parameterName, LocalDateTime.class)} instead. + *

          * * @param parameterName the parameter name * @param cal the calendar * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(String, Class) */ @Override - public Timestamp getTimestamp(String parameterName, Calendar cal) - throws SQLException { + public Timestamp getTimestamp(String parameterName, Calendar cal) throws SQLException { return getTimestamp(getIndexForName(parameterName), cal); } /** * Returns the value of the specified column as a java.sql.Time using a * specified time zone. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(parameterName, LocalTime.class)} instead. + *

          * * @param parameterName the parameter name * @param cal the calendar * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(String, Class) */ @Override public Time getTime(String parameterName, Calendar cal) throws SQLException { @@ -572,12 +628,17 @@ public Time getTime(String parameterName, Calendar cal) throws SQLException { /** * Returns the value of the specified column as a java.sql.Date using a * specified time zone. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(parameterName, LocalDate.class)} instead. + *

          * * @param parameterName the parameter name * @param cal the calendar * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(String, Class) */ @Override public Date getDate(String parameterName, Calendar cal) throws SQLException { @@ -670,11 +731,16 @@ public Object getObject(String parameterName) throws SQLException { /** * Returns the value of the specified column as a java.sql.Timestamp. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(parameterName, LocalDateTime.class)} instead. + *

          * * @param parameterName the parameter name * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(String, Class) */ @Override public Timestamp getTimestamp(String parameterName) throws SQLException { @@ -683,11 +749,16 @@ public Timestamp getTimestamp(String parameterName) throws SQLException { /** * Returns the value of the specified column as a java.sql.Time. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(parameterName, LocalTime.class)} instead. + *

          * * @param parameterName the parameter name * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(String, Class) */ @Override public Time getTime(String parameterName) throws SQLException { @@ -696,11 +767,16 @@ public Time getTime(String parameterName) throws SQLException { /** * Returns the value of the specified column as a java.sql.Date. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(parameterName, LocalDate.class)} instead. + *

          * * @param parameterName the parameter name * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(String, Class) */ @Override public Date getDate(String parameterName) throws SQLException { @@ -1014,45 +1090,60 @@ public void setNull(String parameterName, int sqlType) throws SQLException { /** * Sets the timestamp using a specified time zone. The value will be * converted to the local time zone. + *

          + * Usage of this method is discouraged. Use + * {@code setObject(parameterName, value)} with + * {@link java.time.LocalDateTime} parameter instead. + *

          * * @param parameterName the parameter name * @param x the value * @param cal the calendar * @throws SQLException if this object is closed + * @see #setObject(String, Object) */ @Override - public void setTimestamp(String parameterName, Timestamp x, Calendar cal) - throws SQLException { + public void setTimestamp(String parameterName, Timestamp x, Calendar cal) throws SQLException { setTimestamp(getIndexForName(parameterName), x, cal); } /** * Sets the time using a specified time zone. The value will be converted to * the local time zone. + *

          + * Usage of this method is discouraged. Use + * {@code setObject(parameterName, value)} with {@link java.time.LocalTime} + * parameter instead. + *

          * * @param parameterName the parameter name * @param x the value * @param cal the calendar * @throws SQLException if this object is closed + * @see #setObject(String, Object) */ @Override - public void setTime(String parameterName, Time x, Calendar cal) - throws SQLException { + public void setTime(String parameterName, Time x, Calendar cal) throws SQLException { setTime(getIndexForName(parameterName), x, cal); } /** * Sets the date using a specified time zone. The value will be converted to * the local time zone. + *

          + * Usage of this method is discouraged. Use + * {@code setObject(parameterName, value)} with {@link java.time.LocalDate} + * parameter instead. + *

          * * @param parameterName the parameter name * @param x the value * @param cal the calendar * @throws SQLException if this object is closed + * @see #setObject(String, Object) */ @Override - public void setDate(String parameterName, Date x, Calendar cal) - throws SQLException { + public void setDate(String parameterName, Date x, Calendar cal) throws SQLException { setDate(getIndexForName(parameterName), x, cal); } @@ -1118,6 +1209,38 @@ public void setObject(String parameterName, Object x, int targetSqlType, setObject(getIndexForName(parameterName), x, targetSqlType, scale); } + /** + * Sets the value of a parameter. The object is converted, if required, to + * the specified data type before sending to the database. + * Objects of unknown classes are serialized (on the client side). + * + * @param parameterName the parameter name + * @param x the value, null is allowed + * @param targetSqlType the type + * @throws SQLException if this object is closed + */ + @Override + public void setObject(String parameterName, Object x, SQLType targetSqlType) throws SQLException { + setObject(getIndexForName(parameterName), x, targetSqlType); + } + + /** + * Sets the value of a parameter. The object is converted, if required, to + * the specified data type before sending to the database. + * Objects of unknown classes are serialized (on the client side). + * + * @param parameterName the parameter name + * @param x the value, null is allowed + * @param targetSqlType the type + * @param scaleOrLength is ignored + * @throws SQLException if this object is closed + */ + @Override + public void setObject(String parameterName, Object x, SQLType targetSqlType, int scaleOrLength) + throws SQLException { + setObject(getIndexForName(parameterName), x, targetSqlType, scaleOrLength); + } + /** * Sets the value of a parameter as an input stream. * This method does not close the stream. @@ -1152,23 +1275,34 @@ public void setAsciiStream(String parameterName, /** * Sets the value of a parameter. + *

          + * Usage of this method is discouraged. Use + * {@code setObject(parameterName, value)} with + * {@link java.time.LocalDateTime} parameter instead. + *

          * * @param parameterName the parameter name * @param x the value * @throws SQLException if this object is closed + * @see #setObject(String, Object) */ @Override - public void setTimestamp(String parameterName, Timestamp x) - throws SQLException { + public void setTimestamp(String parameterName, Timestamp x) throws SQLException { setTimestamp(getIndexForName(parameterName), x); } /** * Sets the time using a specified time zone. + *

          + * Usage of this method is discouraged. Use + * {@code setObject(parameterName, value)} with {@link java.time.LocalTime} + * parameter instead. + *

          * * @param parameterName the parameter name * @param x the value * @throws SQLException if this object is closed + * @see #setObject(String, Object) */ @Override public void setTime(String parameterName, Time x) throws SQLException { @@ -1177,10 +1311,16 @@ public void setTime(String parameterName, Time x) throws SQLException { /** * Sets the value of a parameter. + *

          + * Usage of this method is discouraged. Use + * {@code setObject(parameterName, value)} with {@link java.time.LocalDate} + * parameter instead. + *

          * * @param parameterName the parameter name * @param x the value * @throws SQLException if this object is closed + * @see #setObject(String, Object) */ @Override public void setDate(String parameterName, Date x) throws SQLException { @@ -1606,10 +1746,14 @@ public void setSQLXML(String parameterName, SQLXML x) } /** - * [Not supported] + * Returns the value of the specified column as a Java object of the + * specified type. * * @param parameterIndex the parameter index (1, 2, ...) * @param type the class of the returned value + * @return the value + * @throws SQLException if the column is not found or if this object is + * closed */ @Override public T getObject(int parameterIndex, Class type) throws SQLException { @@ -1617,10 +1761,14 @@ public T getObject(int parameterIndex, Class type) throws SQLException { } /** - * [Not supported] + * Returns the value of the specified column as a Java object of the + * specified type. * * @param parameterName the parameter name * @param type the class of the returned value + * @return the value + * @throws SQLException if the column is not found or if this object is + * closed */ @Override public T getObject(String parameterName, Class type) throws SQLException { diff --git a/h2/src/main/org/h2/jdbc/JdbcCallableStatementBackwardsCompat.java b/h2/src/main/org/h2/jdbc/JdbcCallableStatementBackwardsCompat.java deleted file mode 100644 index 5ca85d4410..0000000000 --- a/h2/src/main/org/h2/jdbc/JdbcCallableStatementBackwardsCompat.java +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jdbc; - -/** - * Allows us to compile on older platforms, while still implementing the methods - * from the newer JDBC API. - */ -public interface JdbcCallableStatementBackwardsCompat { - - // compatibility interface - -} diff --git a/h2/src/main/org/h2/jdbc/JdbcClob.java b/h2/src/main/org/h2/jdbc/JdbcClob.java index 674e4f11c8..d23dbfafc7 100644 --- a/h2/src/main/org/h2/jdbc/JdbcClob.java +++ b/h2/src/main/org/h2/jdbc/JdbcClob.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -25,10 +25,14 @@ /** * Represents a CLOB value. */ -public class JdbcClob extends JdbcLob implements NClob { +public final class JdbcClob extends JdbcLob implements NClob { /** * INTERNAL + * @param conn it belongs to + * @param value of + * @param state of the LOB + * @param id of the trace object */ public JdbcClob(JdbcConnection conn, Value value, State state, int id) { super(conn, value, state, TraceObject.CLOB, id); @@ -108,7 +112,7 @@ public Reader getCharacterStream() throws SQLException { public Writer setCharacterStream(long pos) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setCharacterStream(" + pos + ");"); + debugCodeCall("setCharacterStream", pos); } checkEditable(); if (pos != 1) { @@ -132,7 +136,7 @@ public Writer setCharacterStream(long pos) throws SQLException { public String getSubString(long pos, int length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getSubString(" + pos + ", " + length + ");"); + debugCode("getSubString(" + pos + ", " + length + ')'); } checkReadable(); if (pos < 1) { @@ -161,12 +165,13 @@ public String getSubString(long pos, int length) throws SQLException { * @param pos where to start writing (the first character is at position 1) * @param str the string to add * @return the length of the added text + * @throws SQLException on failure */ @Override public int setString(long pos, String str) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setString(" + pos + ", " + quote(str) + ");"); + debugCode("setString(" + pos + ", " + quote(str) + ')'); } checkEditable(); if (pos != 1) { @@ -197,7 +202,7 @@ public int setString(long pos, String str, int offset, int len) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setString(" + pos + ", " + quote(str) + ", " + offset + ", " + len + ");"); + debugCode("setString(" + pos + ", " + quote(str) + ", " + offset + ", " + len + ')'); } checkEditable(); if (pos != 1) { @@ -239,7 +244,7 @@ public long position(Clob clobPattern, long start) throws SQLException { public Reader getCharacterStream(long pos, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getCharacterStream(" + pos + ", " + length + ");"); + debugCode("getCharacterStream(" + pos + ", " + length + ')'); } checkReadable(); if (state == State.NEW) { diff --git a/h2/src/main/org/h2/jdbc/JdbcConnection.java b/h2/src/main/org/h2/jdbc/JdbcConnection.java index 652bfe8a90..9834e7a03f 100644 --- a/h2/src/main/org/h2/jdbc/JdbcConnection.java +++ b/h2/src/main/org/h2/jdbc/JdbcConnection.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, and the - * EPL 1.0 (http://h2database.com/html/license.html). Initial Developer: H2 + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, and the + * EPL 1.0 (https://h2database.com/html/license.html). Initial Developer: H2 * Group */ package org.h2.jdbc; @@ -10,7 +10,6 @@ import java.sql.Array; import java.sql.Blob; import java.sql.CallableStatement; -import java.sql.ClientInfoStatus; import java.sql.Clob; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -34,85 +33,43 @@ import java.util.regex.Pattern; import org.h2.api.ErrorCode; +import org.h2.api.JavaObjectSerializer; import org.h2.command.CommandInterface; +import org.h2.engine.CastDataProvider; import org.h2.engine.ConnectionInfo; import org.h2.engine.Constants; +import org.h2.engine.IsolationLevel; import org.h2.engine.Mode; -import org.h2.engine.Mode.ModeEnum; -import org.h2.engine.SessionInterface; +import org.h2.engine.Session; +import org.h2.engine.Session.StaticSettings; import org.h2.engine.SessionRemote; import org.h2.engine.SysProperties; import org.h2.message.DbException; import org.h2.message.TraceObject; import org.h2.result.ResultInterface; import org.h2.util.CloseWatcher; -import org.h2.util.JdbcUtils; +import org.h2.util.TimeZoneProvider; import org.h2.value.CompareMode; -import org.h2.value.DataType; import org.h2.value.Value; -import org.h2.value.ValueBytes; -import org.h2.value.ValueInt; +import org.h2.value.ValueInteger; import org.h2.value.ValueNull; -import org.h2.value.ValueResultSet; -import org.h2.value.ValueString; +import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueToObjectConverter; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; /** - *

          * Represents a connection (session) to a database. - *

          *

          * Thread safety: the connection is thread-safe, because access is synchronized. - * However, for compatibility with other databases, a connection should only be - * used in one thread at any time. + * Different statements from the same connection may try to execute their + * commands in parallel, but they will be executed sequentially. If real + * concurrent execution of these commands is needed, different connections + * should be used. *

          */ -public class JdbcConnection extends TraceObject implements Connection, JdbcConnectionBackwardsCompat { - - /** - * Database settings. - */ - public static final class Settings { - - /** - * The database mode. - */ - public final Mode mode; - - /** - * Whether unquoted identifiers are converted to upper case. - */ - public final boolean databaseToUpper; - - /** - * Whether unquoted identifiers are converted to lower case. - */ - public final boolean databaseToLower; - - /** - * Whether all identifiers are case insensitive. - */ - public final boolean caseInsensitiveIdentifiers; - - /** - * Creates new instance of database settings. - * - * @param mode - * the database mode - * @param databaseToUpper - * whether unquoted identifiers are converted to upper case - * @param databaseToLower - * whether unquoted identifiers are converted to lower case - * @param caseInsensitiveIdentifiers - * whether all identifiers are case insensitive - */ - Settings(Mode mode, boolean databaseToUpper, boolean databaseToLower, boolean caseInsensitiveIdentifiers) { - this.mode = mode; - this.databaseToUpper = databaseToUpper; - this.databaseToLower = databaseToLower; - this.caseInsensitiveIdentifiers = caseInsensitiveIdentifiers; - } - - } +public class JdbcConnection extends TraceObject implements Connection, JdbcConnectionBackwardsCompat, + CastDataProvider { private static final String NUM_SERVERS = "numServers"; private static final String PREFIX_SERVER = "server"; @@ -125,10 +82,9 @@ public static final class Settings { // ResultSet.HOLD_CURSORS_OVER_COMMIT private int holdability = 1; - private SessionInterface session; + private Session session; private CommandInterface commit, rollback; private CommandInterface getReadOnly, getGeneratedKeys; - private CommandInterface setLockMode, getLockMode; private CommandInterface setQueryTimeout, getQueryTimeout; private int savepointId; @@ -138,47 +94,41 @@ public static final class Settings { private int queryTimeoutCache = -1; private Map clientInfo; - private volatile Settings settings; - private final boolean scopeGeneratedKeys; /** * INTERNAL - */ - public JdbcConnection(String url, Properties info) throws SQLException { - this(new ConnectionInfo(url, info), true); - } - - /** - * INTERNAL - */ - /* * the session closable object does not leak as Eclipse warns - due to the * CloseWatcher. + * @param url of this connection + * @param info of this connection + * @param user of this connection + * @param password for the user + * @param forbidCreation whether database creation is forbidden + * @throws SQLException on failure */ @SuppressWarnings("resource") - public JdbcConnection(ConnectionInfo ci, boolean useBaseDir) + public JdbcConnection(String url, Properties info, String user, Object password, boolean forbidCreation) throws SQLException { try { - if (useBaseDir) { - String baseDir = SysProperties.getBaseDir(); - if (baseDir != null) { - ci.setBaseDir(baseDir); - } + ConnectionInfo ci = new ConnectionInfo(url, info, user, password); + if (forbidCreation) { + ci.setProperty("FORBID_CREATION", "TRUE"); + } + String baseDir = SysProperties.getBaseDir(); + if (baseDir != null) { + ci.setBaseDir(baseDir); } // this will return an embedded or server connection session = new SessionRemote(ci).connectEmbeddedOrServer(false); - trace = session.getTrace(); - int id = getNextId(TraceObject.CONNECTION); - setTrace(trace, TraceObject.CONNECTION, id); + setTrace(session.getTrace(), TraceObject.CONNECTION, getNextId(TraceObject.CONNECTION)); this.user = ci.getUserName(); if (isInfoEnabled()) { trace.infoCode("Connection " + getTraceObjectName() + " = DriverManager.getConnection(" - + quote(ci.getOriginalURL()) + ", " + quote(user) + + quote(ci.getOriginalURL()) + ", " + quote(this.user) + ", \"\");"); } this.url = ci.getURL(); - scopeGeneratedKeys = ci.getProperty("SCOPE_GENERATED_KEYS", false); closeOld(); watcher = CloseWatcher.register(this, session, keepOpenStackTrace); } catch (Exception e) { @@ -188,22 +138,19 @@ public JdbcConnection(ConnectionInfo ci, boolean useBaseDir) /** * INTERNAL + * @param clone connection to clone */ public JdbcConnection(JdbcConnection clone) { this.session = clone.session; - trace = session.getTrace(); - int id = getNextId(TraceObject.CONNECTION); - setTrace(trace, TraceObject.CONNECTION, id); + setTrace(session.getTrace(), TraceObject.CONNECTION, getNextId(TraceObject.CONNECTION)); this.user = clone.user; this.url = clone.url; this.catalog = clone.catalog; this.commit = clone.commit; this.getGeneratedKeys = clone.getGeneratedKeys; - this.getLockMode = clone.getLockMode; this.getQueryTimeout = clone.getQueryTimeout; this.getReadOnly = clone.getReadOnly; this.rollback = clone.rollback; - this.scopeGeneratedKeys = clone.scopeGeneratedKeys; this.watcher = null; if (clone.clientInfo != null) { this.clientInfo = new HashMap<>(clone.clientInfo); @@ -212,15 +159,15 @@ public JdbcConnection(JdbcConnection clone) { /** * INTERNAL + * @param session of this connection + * @param user of this connection + * @param url of this connection */ - public JdbcConnection(SessionInterface session, String user, String url) { + public JdbcConnection(Session session, String user, String url) { this.session = session; - trace = session.getTrace(); - int id = getNextId(TraceObject.CONNECTION); - setTrace(trace, TraceObject.CONNECTION, id); + setTrace(session.getTrace(), TraceObject.CONNECTION, getNextId(TraceObject.CONNECTION)); this.user = user; this.url = url; - this.scopeGeneratedKeys = false; this.watcher = null; } @@ -255,13 +202,9 @@ private void closeOld() { public Statement createStatement() throws SQLException { try { int id = getNextId(TraceObject.STATEMENT); - if (isDebugEnabled()) { - debugCodeAssign("Statement", TraceObject.STATEMENT, id, - "createStatement()"); - } + debugCodeAssign("Statement", TraceObject.STATEMENT, id, "createStatement()"); checkClosed(); - return new JdbcStatement(this, id, ResultSet.TYPE_FORWARD_ONLY, - Constants.DEFAULT_RESULT_SET_CONCURRENCY, false); + return new JdbcStatement(this, id, ResultSet.TYPE_FORWARD_ONLY, Constants.DEFAULT_RESULT_SET_CONCURRENCY); } catch (Exception e) { throw logAndConvert(e); } @@ -283,13 +226,11 @@ public Statement createStatement(int resultSetType, int id = getNextId(TraceObject.STATEMENT); if (isDebugEnabled()) { debugCodeAssign("Statement", TraceObject.STATEMENT, id, - "createStatement(" + resultSetType + ", " - + resultSetConcurrency + ")"); + "createStatement(" + resultSetType + ", " + resultSetConcurrency + ')'); } checkTypeConcurrency(resultSetType, resultSetConcurrency); checkClosed(); - return new JdbcStatement(this, id, resultSetType, - resultSetConcurrency, false); + return new JdbcStatement(this, id, resultSetType, resultSetConcurrency); } catch (Exception e) { throw logAndConvert(e); } @@ -316,13 +257,12 @@ public Statement createStatement(int resultSetType, debugCodeAssign("Statement", TraceObject.STATEMENT, id, "createStatement(" + resultSetType + ", " + resultSetConcurrency + ", " - + resultSetHoldability + ")"); + + resultSetHoldability + ')'); } checkTypeConcurrency(resultSetType, resultSetConcurrency); checkHoldability(resultSetHoldability); checkClosed(); - return new JdbcStatement(this, id, resultSetType, - resultSetConcurrency, false); + return new JdbcStatement(this, id, resultSetType, resultSetConcurrency); } catch (Exception e) { throw logAndConvert(e); } @@ -340,41 +280,13 @@ public PreparedStatement prepareStatement(String sql) throws SQLException { try { int id = getNextId(TraceObject.PREPARED_STATEMENT); if (isDebugEnabled()) { - debugCodeAssign("PreparedStatement", - TraceObject.PREPARED_STATEMENT, id, - "prepareStatement(" + quote(sql) + ")"); - } - checkClosed(); - sql = translateSQL(sql); - return new JdbcPreparedStatement(this, sql, id, - ResultSet.TYPE_FORWARD_ONLY, - Constants.DEFAULT_RESULT_SET_CONCURRENCY, false, false); - } catch (Exception e) { - throw logAndConvert(e); - } - } - - /** - * Prepare a statement that will automatically close when the result set is - * closed. This method is used to retrieve database meta data. - * - * @param sql the SQL statement - * @return the prepared statement - */ - PreparedStatement prepareAutoCloseStatement(String sql) - throws SQLException { - try { - int id = getNextId(TraceObject.PREPARED_STATEMENT); - if (isDebugEnabled()) { - debugCodeAssign("PreparedStatement", - TraceObject.PREPARED_STATEMENT, id, - "prepareStatement(" + quote(sql) + ")"); + debugCodeAssign("PreparedStatement", TraceObject.PREPARED_STATEMENT, id, + "prepareStatement(" + quote(sql) + ')'); } checkClosed(); sql = translateSQL(sql); - return new JdbcPreparedStatement(this, sql, id, - ResultSet.TYPE_FORWARD_ONLY, - Constants.DEFAULT_RESULT_SET_CONCURRENCY, true, false); + return new JdbcPreparedStatement(this, sql, id, ResultSet.TYPE_FORWARD_ONLY, + Constants.DEFAULT_RESULT_SET_CONCURRENCY, null); } catch (Exception e) { throw logAndConvert(e); } @@ -390,10 +302,7 @@ PreparedStatement prepareAutoCloseStatement(String sql) public DatabaseMetaData getMetaData() throws SQLException { try { int id = getNextId(TraceObject.DATABASE_META_DATA); - if (isDebugEnabled()) { - debugCodeAssign("DatabaseMetaData", - TraceObject.DATABASE_META_DATA, id, "getMetaData()"); - } + debugCodeAssign("DatabaseMetaData", TraceObject.DATABASE_META_DATA, id, "getMetaData()"); checkClosed(); return new JdbcDatabaseMetaData(this, trace, id); } catch (Exception e) { @@ -403,8 +312,9 @@ public DatabaseMetaData getMetaData() throws SQLException { /** * INTERNAL + * @return session */ - public SessionInterface getSession() { + public Session getSession() { return session; } @@ -427,7 +337,7 @@ public synchronized void close() throws SQLException { if (executingStatement != null) { try { executingStatement.cancel(); - } catch (NullPointerException e) { + } catch (NullPointerException | SQLException e) { // ignore } } @@ -435,21 +345,15 @@ public synchronized void close() throws SQLException { if (!session.isClosed()) { try { if (session.hasPendingTransaction()) { - // roll back unless that would require to - // re-connect (the transaction can't be rolled - // back after re-connecting) - if (!session.isReconnectNeeded(true)) { - try { - rollbackInternal(); - } catch (DbException e) { - // ignore if the connection is broken - // right now - if (e.getErrorCode() != ErrorCode.CONNECTION_BROKEN_1) { - throw e; - } + try { + rollbackInternal(); + } catch (DbException e) { + // ignore if the connection is broken or database shut down + if (e.getErrorCode() != ErrorCode.CONNECTION_BROKEN_1 && + e.getErrorCode() != ErrorCode.DATABASE_IS_CLOSED) { + throw e; } } - session.afterWriting(); } closePreparedCommands(); } finally { @@ -470,8 +374,6 @@ private void closePreparedCommands() { rollback = closeAndSetNull(rollback); getReadOnly = closeAndSetNull(getReadOnly); getGeneratedKeys = closeAndSetNull(getGeneratedKeys); - getLockMode = closeAndSetNull(getLockMode); - setLockMode = closeAndSetNull(setLockMode); getQueryTimeout = closeAndSetNull(getQueryTimeout); setQueryTimeout = closeAndSetNull(setQueryTimeout); } @@ -495,7 +397,7 @@ public synchronized void setAutoCommit(boolean autoCommit) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setAutoCommit(" + autoCommit + ");"); + debugCode("setAutoCommit(" + autoCommit + ')'); } checkClosed(); synchronized (session) { @@ -536,13 +438,13 @@ public synchronized boolean getAutoCommit() throws SQLException { public synchronized void commit() throws SQLException { try { debugCodeCall("commit"); - checkClosedForWrite(); - try { - commit = prepareCommand("COMMIT", commit); - commit.executeUpdate(false); - } finally { - afterWriting(); + checkClosed(); + if (SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT + && getAutoCommit()) { + throw DbException.get(ErrorCode.METHOD_DISABLED_ON_AUTOCOMMIT_TRUE, "commit()"); } + commit = prepareCommand("COMMIT", commit); + commit.executeUpdate(null); } catch (Exception e) { throw logAndConvert(e); } @@ -558,12 +460,12 @@ public synchronized void commit() throws SQLException { public synchronized void rollback() throws SQLException { try { debugCodeCall("rollback"); - checkClosedForWrite(); - try { - rollbackInternal(); - } finally { - afterWriting(); + checkClosed(); + if (SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT + && getAutoCommit()) { + throw DbException.get(ErrorCode.METHOD_DISABLED_ON_AUTOCOMMIT_TRUE, "rollback()"); } + rollbackInternal(); } catch (Exception e) { throw logAndConvert(e); } @@ -613,7 +515,7 @@ public String nativeSQL(String sql) throws SQLException { public void setReadOnly(boolean readOnly) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setReadOnly(" + readOnly + ");"); + debugCode("setReadOnly(" + readOnly + ')'); } checkClosed(); } catch (Exception e) { @@ -728,16 +630,13 @@ public PreparedStatement prepareStatement(String sql, int resultSetType, try { int id = getNextId(TraceObject.PREPARED_STATEMENT); if (isDebugEnabled()) { - debugCodeAssign("PreparedStatement", - TraceObject.PREPARED_STATEMENT, id, - "prepareStatement(" + quote(sql) + ", " + resultSetType - + ", " + resultSetConcurrency + ")"); + debugCodeAssign("PreparedStatement", TraceObject.PREPARED_STATEMENT, id, + "prepareStatement(" + quote(sql) + ", " + resultSetType + ", " + resultSetConcurrency + ')'); } checkTypeConcurrency(resultSetType, resultSetConcurrency); checkClosed(); sql = translateSQL(sql); - return new JdbcPreparedStatement(this, sql, id, resultSetType, - resultSetConcurrency, false, false); + return new JdbcPreparedStatement(this, sql, id, resultSetType, resultSetConcurrency, null); } catch (Exception e) { throw logAndConvert(e); } @@ -746,53 +645,26 @@ public PreparedStatement prepareStatement(String sql, int resultSetType, /** * Changes the current transaction isolation level. Calling this method will * commit an open transaction, even if the new level is the same as the old - * one, except if the level is not supported. Internally, this method calls - * SET LOCK_MODE, which affects all connections. The following isolation - * levels are supported: - *
            - *
          • Connection.TRANSACTION_READ_UNCOMMITTED = SET LOCK_MODE 0: no locking - * (should only be used for testing).
          • - *
          • Connection.TRANSACTION_SERIALIZABLE = SET LOCK_MODE 1: table level - * locking.
          • - *
          • Connection.TRANSACTION_READ_COMMITTED = SET LOCK_MODE 3: table level - * locking, but read locks are released immediately (default).
          • - *
          - * This setting is not persistent. Please note that using - * TRANSACTION_READ_UNCOMMITTED while at the same time using multiple - * connections may result in inconsistent transactions. + * one. * * @param level the new transaction isolation level: * Connection.TRANSACTION_READ_UNCOMMITTED, - * Connection.TRANSACTION_READ_COMMITTED, or + * Connection.TRANSACTION_READ_COMMITTED, + * Connection.TRANSACTION_REPEATABLE_READ, + * 6 (SNAPSHOT), or * Connection.TRANSACTION_SERIALIZABLE * @throws SQLException if the connection is closed or the isolation level - * is not supported + * is not valid */ @Override public void setTransactionIsolation(int level) throws SQLException { try { debugCodeCall("setTransactionIsolation", level); checkClosed(); - int lockMode; - switch (level) { - case Connection.TRANSACTION_READ_UNCOMMITTED: - lockMode = Constants.LOCK_MODE_OFF; - break; - case Connection.TRANSACTION_READ_COMMITTED: - lockMode = Constants.LOCK_MODE_READ_COMMITTED; - break; - case Connection.TRANSACTION_REPEATABLE_READ: - case Connection.TRANSACTION_SERIALIZABLE: - lockMode = Constants.LOCK_MODE_TABLE; - break; - default: - throw DbException.getInvalidValueException("level", level); + if (!getAutoCommit()) { + commit(); } - commit(); - setLockMode = prepareCommand("SET LOCK_MODE ?", setLockMode); - setLockMode.getParameters().get(0).setValue(ValueInt.get(lockMode), - false); - setLockMode.executeUpdate(false); + session.setIsolationLevel(IsolationLevel.fromJdbc(level)); } catch (Exception e) { throw logAndConvert(e); } @@ -801,15 +673,15 @@ public void setTransactionIsolation(int level) throws SQLException { /** * INTERNAL */ - public void setQueryTimeout(int seconds) throws SQLException { + void setQueryTimeout(int seconds) throws SQLException { try { debugCodeCall("setQueryTimeout", seconds); checkClosed(); setQueryTimeout = prepareCommand("SET QUERY_TIMEOUT ?", setQueryTimeout); setQueryTimeout.getParameters().get(0) - .setValue(ValueInt.get(seconds * 1000), false); - setQueryTimeout.executeUpdate(false); + .setValue(ValueInteger.get(seconds * 1000), false); + setQueryTimeout.executeUpdate(null); queryTimeoutCache = seconds; } catch (Exception e) { throw logAndConvert(e); @@ -823,12 +695,11 @@ int getQueryTimeout() throws SQLException { try { if (queryTimeoutCache == -1) { checkClosed(); - getQueryTimeout = prepareCommand( - "SELECT VALUE FROM INFORMATION_SCHEMA.SETTINGS " - + "WHERE NAME=?", - getQueryTimeout); + getQueryTimeout = prepareCommand(!session.isOldInformationSchema() + ? "SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME=?" + : "SELECT `VALUE` FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME=?", getQueryTimeout); getQueryTimeout.getParameters().get(0) - .setValue(ValueString.get("QUERY_TIMEOUT"), false); + .setValue(ValueVarchar.get("QUERY_TIMEOUT"), false); ResultInterface result = getQueryTimeout.executeQuery(0, false); result.next(); int queryTimeout = result.currentRow()[0].getInt(); @@ -849,7 +720,7 @@ int getQueryTimeout() throws SQLException { /** * Returns the current transaction isolation level. * - * @return the isolation level. + * @return the isolation level * @throws SQLException if the connection is closed */ @Override @@ -857,27 +728,7 @@ public int getTransactionIsolation() throws SQLException { try { debugCodeCall("getTransactionIsolation"); checkClosed(); - getLockMode = prepareCommand("CALL LOCK_MODE()", getLockMode); - ResultInterface result = getLockMode.executeQuery(0, false); - result.next(); - int lockMode = result.currentRow()[0].getInt(); - result.close(); - int transactionIsolationLevel; - switch (lockMode) { - case Constants.LOCK_MODE_OFF: - transactionIsolationLevel = Connection.TRANSACTION_READ_UNCOMMITTED; - break; - case Constants.LOCK_MODE_READ_COMMITTED: - transactionIsolationLevel = Connection.TRANSACTION_READ_COMMITTED; - break; - case Constants.LOCK_MODE_TABLE: - case Constants.LOCK_MODE_TABLE_GC: - transactionIsolationLevel = Connection.TRANSACTION_SERIALIZABLE; - break; - default: - throw DbException.throwInternalError("lockMode:" + lockMode); - } - return transactionIsolationLevel; + return session.getIsolationLevel().getJdbc(); } catch (Exception e) { throw logAndConvert(e); } @@ -945,7 +796,7 @@ public Map> getTypeMap() throws SQLException { public void setTypeMap(Map> map) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setTypeMap(" + quoteMap(map) + ");"); + debugCode("setTypeMap(" + quoteMap(map) + ')'); } checkMap(map); } catch (Exception e) { @@ -966,9 +817,8 @@ public CallableStatement prepareCall(String sql) throws SQLException { try { int id = getNextId(TraceObject.CALLABLE_STATEMENT); if (isDebugEnabled()) { - debugCodeAssign("CallableStatement", - TraceObject.CALLABLE_STATEMENT, id, - "prepareCall(" + quote(sql) + ")"); + debugCodeAssign("CallableStatement", TraceObject.CALLABLE_STATEMENT, id, + "prepareCall(" + quote(sql) + ')'); } checkClosed(); sql = translateSQL(sql); @@ -997,10 +847,8 @@ public CallableStatement prepareCall(String sql, int resultSetType, try { int id = getNextId(TraceObject.CALLABLE_STATEMENT); if (isDebugEnabled()) { - debugCodeAssign("CallableStatement", - TraceObject.CALLABLE_STATEMENT, id, - "prepareCall(" + quote(sql) + ", " + resultSetType - + ", " + resultSetConcurrency + ")"); + debugCodeAssign("CallableStatement", TraceObject.CALLABLE_STATEMENT, id, + "prepareCall(" + quote(sql) + ", " + resultSetType + ", " + resultSetConcurrency + ')'); } checkTypeConcurrency(resultSetType, resultSetConcurrency); checkClosed(); @@ -1031,11 +879,9 @@ public CallableStatement prepareCall(String sql, int resultSetType, try { int id = getNextId(TraceObject.CALLABLE_STATEMENT); if (isDebugEnabled()) { - debugCodeAssign("CallableStatement", - TraceObject.CALLABLE_STATEMENT, id, - "prepareCall(" + quote(sql) + ", " + resultSetType - + ", " + resultSetConcurrency + ", " - + resultSetHoldability + ")"); + debugCodeAssign("CallableStatement", TraceObject.CALLABLE_STATEMENT, id, + "prepareCall(" + quote(sql) + ", " + resultSetType + ", " + resultSetConcurrency + ", " + + resultSetHoldability + ')'); } checkTypeConcurrency(resultSetType, resultSetConcurrency); checkHoldability(resultSetHoldability); @@ -1057,15 +903,12 @@ public CallableStatement prepareCall(String sql, int resultSetType, public Savepoint setSavepoint() throws SQLException { try { int id = getNextId(TraceObject.SAVEPOINT); - if (isDebugEnabled()) { - debugCodeAssign("Savepoint", TraceObject.SAVEPOINT, id, - "setSavepoint()"); - } + debugCodeAssign("Savepoint", TraceObject.SAVEPOINT, id, "setSavepoint()"); checkClosed(); CommandInterface set = prepareCommand( "SAVEPOINT " + JdbcSavepoint.getName(null, savepointId), Integer.MAX_VALUE); - set.executeUpdate(false); + set.executeUpdate(null); JdbcSavepoint savepoint = new JdbcSavepoint(this, savepointId, null, trace, id); savepointId++; @@ -1086,16 +929,14 @@ public Savepoint setSavepoint(String name) throws SQLException { try { int id = getNextId(TraceObject.SAVEPOINT); if (isDebugEnabled()) { - debugCodeAssign("Savepoint", TraceObject.SAVEPOINT, id, - "setSavepoint(" + quote(name) + ")"); + debugCodeAssign("Savepoint", TraceObject.SAVEPOINT, id, "setSavepoint(" + quote(name) + ')'); } checkClosed(); CommandInterface set = prepareCommand( "SAVEPOINT " + JdbcSavepoint.getName(name, 0), Integer.MAX_VALUE); - set.executeUpdate(false); - return new JdbcSavepoint(this, 0, name, trace, - id); + set.executeUpdate(null); + return new JdbcSavepoint(this, 0, name, trace, id); } catch (Exception e) { throw logAndConvert(e); } @@ -1111,14 +952,10 @@ public void rollback(Savepoint savepoint) throws SQLException { try { JdbcSavepoint sp = convertSavepoint(savepoint); if (isDebugEnabled()) { - debugCode("rollback(" + sp.getTraceObjectName() + ");"); - } - checkClosedForWrite(); - try { - sp.rollback(); - } finally { - afterWriting(); + debugCode("rollback(" + sp.getTraceObjectName() + ')'); } + checkClosed(); + sp.rollback(); } catch (Exception e) { throw logAndConvert(e); } @@ -1132,7 +969,7 @@ public void rollback(Savepoint savepoint) throws SQLException { @Override public void releaseSavepoint(Savepoint savepoint) throws SQLException { try { - debugCode("releaseSavepoint(savepoint);"); + debugCode("releaseSavepoint(savepoint)"); checkClosed(); convertSavepoint(savepoint).release(); } catch (Exception e) { @@ -1167,18 +1004,15 @@ public PreparedStatement prepareStatement(String sql, int resultSetType, try { int id = getNextId(TraceObject.PREPARED_STATEMENT); if (isDebugEnabled()) { - debugCodeAssign("PreparedStatement", - TraceObject.PREPARED_STATEMENT, id, - "prepareStatement(" + quote(sql) + ", " + resultSetType - + ", " + resultSetConcurrency + ", " - + resultSetHoldability + ")"); + debugCodeAssign("PreparedStatement", TraceObject.PREPARED_STATEMENT, id, + "prepareStatement(" + quote(sql) + ", " + resultSetType + ", " + resultSetConcurrency + ", " + + resultSetHoldability + ')'); } checkTypeConcurrency(resultSetType, resultSetConcurrency); checkHoldability(resultSetHoldability); checkClosed(); sql = translateSQL(sql); - return new JdbcPreparedStatement(this, sql, id, resultSetType, - resultSetConcurrency, false, false); + return new JdbcPreparedStatement(this, sql, id, resultSetType, resultSetConcurrency, null); } catch (Exception e) { throw logAndConvert(e); } @@ -1201,17 +1035,13 @@ public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) try { int id = getNextId(TraceObject.PREPARED_STATEMENT); if (isDebugEnabled()) { - debugCodeAssign("PreparedStatement", - TraceObject.PREPARED_STATEMENT, id, - "prepareStatement(" + quote(sql) + ", " - + autoGeneratedKeys + ");"); + debugCodeAssign("PreparedStatement", TraceObject.PREPARED_STATEMENT, id, + "prepareStatement(" + quote(sql) + ", " + autoGeneratedKeys + ')'); } checkClosed(); sql = translateSQL(sql); - return new JdbcPreparedStatement(this, sql, id, - ResultSet.TYPE_FORWARD_ONLY, - Constants.DEFAULT_RESULT_SET_CONCURRENCY, false, - autoGeneratedKeys == Statement.RETURN_GENERATED_KEYS); + return new JdbcPreparedStatement(this, sql, id, ResultSet.TYPE_FORWARD_ONLY, + Constants.DEFAULT_RESULT_SET_CONCURRENCY, autoGeneratedKeys == Statement.RETURN_GENERATED_KEYS); } catch (Exception e) { throw logAndConvert(e); } @@ -1233,16 +1063,13 @@ public PreparedStatement prepareStatement(String sql, int[] columnIndexes) try { int id = getNextId(TraceObject.PREPARED_STATEMENT); if (isDebugEnabled()) { - debugCodeAssign("PreparedStatement", - TraceObject.PREPARED_STATEMENT, id, - "prepareStatement(" + quote(sql) + ", " - + quoteIntArray(columnIndexes) + ");"); + debugCodeAssign("PreparedStatement", TraceObject.PREPARED_STATEMENT, id, + "prepareStatement(" + quote(sql) + ", " + quoteIntArray(columnIndexes) + ')'); } checkClosed(); sql = translateSQL(sql); - return new JdbcPreparedStatement(this, sql, id, - ResultSet.TYPE_FORWARD_ONLY, - Constants.DEFAULT_RESULT_SET_CONCURRENCY, false, columnIndexes); + return new JdbcPreparedStatement(this, sql, id, ResultSet.TYPE_FORWARD_ONLY, + Constants.DEFAULT_RESULT_SET_CONCURRENCY, columnIndexes); } catch (Exception e) { throw logAndConvert(e); } @@ -1264,16 +1091,13 @@ public PreparedStatement prepareStatement(String sql, String[] columnNames) try { int id = getNextId(TraceObject.PREPARED_STATEMENT); if (isDebugEnabled()) { - debugCodeAssign("PreparedStatement", - TraceObject.PREPARED_STATEMENT, id, - "prepareStatement(" + quote(sql) + ", " - + quoteArray(columnNames) + ");"); + debugCodeAssign("PreparedStatement", TraceObject.PREPARED_STATEMENT, id, + "prepareStatement(" + quote(sql) + ", " + quoteArray(columnNames) + ')'); } checkClosed(); sql = translateSQL(sql); - return new JdbcPreparedStatement(this, sql, id, - ResultSet.TYPE_FORWARD_ONLY, - Constants.DEFAULT_RESULT_SET_CONCURRENCY, false, columnNames); + return new JdbcPreparedStatement(this, sql, id, ResultSet.TYPE_FORWARD_ONLY, + Constants.DEFAULT_RESULT_SET_CONCURRENCY, columnNames); } catch (Exception e) { throw logAndConvert(e); } @@ -1355,7 +1179,7 @@ private static int translateGetEnd(String sql, int i, char c) { return i; } default: - throw DbException.throwInternalError("c=" + c); + throw DbException.getInternalError("c=" + c); } } @@ -1382,12 +1206,13 @@ static String translateSQL(String sql, boolean escapeProcessing) { if (sql == null) { throw DbException.getInvalidValueException("SQL", null); } - if (!escapeProcessing) { - return sql; - } - if (sql.indexOf('{') < 0) { + if (!escapeProcessing || sql.indexOf('{') < 0) { return sql; } + return translateSQLImpl(sql); + } + + private static String translateSQLImpl(String sql) { int len = sql.length(); char[] chars = null; int level = 0; @@ -1537,55 +1362,18 @@ private static void checkHoldability(int resultSetHoldability) { } } - /** - * INTERNAL. Check if this connection is closed. The next operation is a - * read request. - * - * @throws DbException if the connection or session is closed - */ - protected void checkClosed() { - checkClosed(false); - } - - /** - * Check if this connection is closed. The next operation may be a write - * request. - * - * @throws DbException if the connection or session is closed - */ - private void checkClosedForWrite() { - checkClosed(true); - } - /** * INTERNAL. Check if this connection is closed. * - * @param write if the next operation is possibly writing * @throws DbException if the connection or session is closed */ - protected void checkClosed(boolean write) { + protected void checkClosed() { if (session == null) { throw DbException.get(ErrorCode.OBJECT_CLOSED); } if (session.isClosed()) { throw DbException.get(ErrorCode.DATABASE_CALLED_AT_SHUTDOWN); } - if (session.isReconnectNeeded(write)) { - trace.debug("reconnect"); - closePreparedCommands(); - session = session.reconnect(write); - trace = session.getTrace(); - } - } - - /** - * INTERNAL. Called after executing a command that could have written - * something. - */ - protected void afterWriting() { - if (session != null) { - session.afterWriting(); - } } String getURL() { @@ -1600,53 +1388,16 @@ String getUser() { private void rollbackInternal() { rollback = prepareCommand("ROLLBACK", rollback); - rollback.executeUpdate(false); - } - - /** - * INTERNAL - */ - public int getPowerOffCount() { - return (session == null || session.isClosed()) ? 0 - : session.getPowerOffCount(); + rollback.executeUpdate(null); } /** * INTERNAL */ - public void setPowerOffCount(int count) { - if (session != null) { - session.setPowerOffCount(count); - } - } - - /** - * INTERNAL - */ - public void setExecutingStatement(Statement stat) { + void setExecutingStatement(Statement stat) { executingStatement = stat; } - /** - * INTERNAL - */ - boolean scopeGeneratedKeys() { - return scopeGeneratedKeys; - } - - /** - * INTERNAL - */ - JdbcResultSet getGeneratedKeys(JdbcStatement stat, int id) { - getGeneratedKeys = prepareCommand( - "SELECT SCOPE_IDENTITY() " - + "WHERE SCOPE_IDENTITY() IS NOT NULL", - getGeneratedKeys); - ResultInterface result = getGeneratedKeys.executeQuery(0, false); - return new JdbcResultSet(this, stat, getGeneratedKeys, result, - id, false, true, false); - } - /** * Create a new empty Clob object. * @@ -1657,8 +1408,8 @@ public Clob createClob() throws SQLException { try { int id = getNextId(TraceObject.CLOB); debugCodeAssign("Clob", TraceObject.CLOB, id, "createClob()"); - checkClosedForWrite(); - return new JdbcClob(this, ValueString.EMPTY, JdbcLob.State.NEW, id); + checkClosed(); + return new JdbcClob(this, ValueVarchar.EMPTY, JdbcLob.State.NEW, id); } catch (Exception e) { throw logAndConvert(e); } @@ -1674,8 +1425,8 @@ public Blob createBlob() throws SQLException { try { int id = getNextId(TraceObject.BLOB); debugCodeAssign("Blob", TraceObject.BLOB, id, "createClob()"); - checkClosedForWrite(); - return new JdbcBlob(this, ValueBytes.EMPTY, JdbcLob.State.NEW, id); + checkClosed(); + return new JdbcBlob(this, ValueVarbinary.EMPTY, JdbcLob.State.NEW, id); } catch (Exception e) { throw logAndConvert(e); } @@ -1691,8 +1442,8 @@ public NClob createNClob() throws SQLException { try { int id = getNextId(TraceObject.CLOB); debugCodeAssign("NClob", TraceObject.CLOB, id, "createNClob()"); - checkClosedForWrite(); - return new JdbcClob(this, ValueString.EMPTY, JdbcLob.State.NEW, id); + checkClosed(); + return new JdbcClob(this, ValueVarchar.EMPTY, JdbcLob.State.NEW, id); } catch (Exception e) { throw logAndConvert(e); } @@ -1708,8 +1459,8 @@ public SQLXML createSQLXML() throws SQLException { try { int id = getNextId(TraceObject.SQLXML); debugCodeAssign("SQLXML", TraceObject.SQLXML, id, "createSQLXML()"); - checkClosedForWrite(); - return new JdbcSQLXML(this, ValueString.EMPTY, JdbcLob.State.NEW, id); + checkClosed(); + return new JdbcSQLXML(this, ValueVarchar.EMPTY, JdbcLob.State.NEW, id); } catch (Exception e) { throw logAndConvert(e); } @@ -1729,8 +1480,7 @@ public Array createArrayOf(String typeName, Object[] elements) int id = getNextId(TraceObject.ARRAY); debugCodeAssign("Array", TraceObject.ARRAY, id, "createArrayOf()"); checkClosed(); - Value value = DataType.convertToValue(session, elements, - Value.ARRAY); + Value value = ValueToObjectConverter.objectToValue(session, elements, Value.ARRAY); return new JdbcArray(this, value, id); } catch (Exception e) { throw logAndConvert(e); @@ -1792,8 +1542,7 @@ public void setClientInfo(String name, String value) throws SQLClientInfoException { try { if (isDebugEnabled()) { - debugCode("setClientInfo(" + quote(name) + ", " + quote(value) - + ");"); + debugCode("setClientInfo(" + quote(name) + ", " + quote(value) + ')'); } checkClosed(); @@ -1807,7 +1556,7 @@ public void setClientInfo(String name, String value) if (isInternalProperty(name)) { throw new SQLClientInfoException( "Property name '" + name + " is used internally by H2.", - Collections. emptyMap()); + Collections.emptyMap()); } Pattern clientInfoNameRegEx = getMode().supportedClientInfoPropertiesRegEx; @@ -1821,7 +1570,7 @@ public void setClientInfo(String name, String value) } else { throw new SQLClientInfoException( "Client info name '" + name + "' not supported.", - Collections. emptyMap()); + Collections.emptyMap()); } } catch (Exception e) { throw convertToClientInfoException(logAndConvert(e)); @@ -1854,7 +1603,7 @@ public void setClientInfo(Properties properties) throws SQLClientInfoException { try { if (isDebugEnabled()) { - debugCode("setClientInfo(properties);"); + debugCode("setClientInfo(properties)"); } checkClosed(); if (clientInfo == null) { @@ -1879,9 +1628,7 @@ public void setClientInfo(Properties properties) @Override public Properties getClientInfo() throws SQLException { try { - if (isDebugEnabled()) { - debugCode("getClientInfo();"); - } + debugCodeCall("getClientInfo"); checkClosed(); ArrayList serverList = session.getClusterServers(); Properties p = new Properties(); @@ -1964,17 +1711,14 @@ public boolean isWrapperFor(Class iface) throws SQLException { * end of file is read) * @return the value */ - public Value createClob(Reader x, long length) { + Value createClob(Reader x, long length) { if (x == null) { return ValueNull.INSTANCE; } if (length <= 0) { length = -1; } - Value v = session.getDataHandler().getLobStorage().createClob(x, - length); - session.addTemporaryLob(v); - return v; + return session.addTemporaryLob(session.getDataHandler().getLobStorage().createClob(x, length)); } /** @@ -1985,17 +1729,14 @@ public Value createClob(Reader x, long length) { * end of file is read) * @return the value */ - public Value createBlob(InputStream x, long length) { + Value createBlob(InputStream x, long length) { if (x == null) { return ValueNull.INSTANCE; } if (length <= 0) { length = -1; } - Value v = session.getDataHandler().getLobStorage().createBlob(x, - length); - session.addTemporaryLob(v); - return v; + return session.addTemporaryLob(session.getDataHandler().getLobStorage().createBlob(x, length)); } /** @@ -2025,9 +1766,7 @@ public void setSchema(String schema) throws SQLException { @Override public String getSchema() throws SQLException { try { - if (isDebugEnabled()) { - debugCodeCall("getSchema"); - } + debugCodeCall("getSchema"); checkClosed(); return session.getCurrentSchemaName(); } catch (Exception e) { @@ -2084,110 +1823,58 @@ public String toString() { return getTraceObjectName() + ": url=" + url + " user=" + user; } - /** - * Convert an object to the default Java object for the given SQL type. For - * example, LOB objects are converted to java.sql.Clob / java.sql.Blob. - * - * @param v the value - * @return the object - */ - Object convertToDefaultObject(Value v) { - switch (v.getValueType()) { - case Value.CLOB: { - int id = getNextId(TraceObject.CLOB); - return new JdbcClob(this, v, JdbcLob.State.WITH_VALUE, id); - } - case Value.BLOB: { - int id = getNextId(TraceObject.BLOB); - return new JdbcBlob(this, v, JdbcLob.State.WITH_VALUE, id); - } - case Value.JAVA_OBJECT: - if (SysProperties.serializeJavaObject) { - return JdbcUtils.deserialize(v.getBytesNoCopy(), - session.getDataHandler()); - } - break; - case Value.RESULT_SET: { - int id = getNextId(TraceObject.RESULT_SET); - return new JdbcResultSet(this, null, null, ((ValueResultSet) v).getResult(), id, false, true, false); - } - case Value.BYTE: - case Value.SHORT: - if (!SysProperties.OLD_RESULT_SET_GET_OBJECT) { - return v.getInt(); - } - break; - } - return v.getObject(); - } - CompareMode getCompareMode() { return session.getDataHandler().getCompareMode(); } + @Override + public Mode getMode() { + return session.getMode(); + } + /** * INTERNAL + * @return StaticSettings */ - public void setTraceLevel(int level) { - trace.setLevel(level); + public StaticSettings getStaticSettings() { + checkClosed(); + return session.getStaticSettings(); } - Mode getMode() throws SQLException { - return getSettings().mode; + @Override + public ValueTimestampTimeZone currentTimestamp() { + Session session = this.session; + if (session == null) { + throw DbException.get(ErrorCode.OBJECT_CLOSED); + } + return session.currentTimestamp(); } - /** - * INTERNAL - */ - public Settings getSettings() throws SQLException { - Settings settings = this.settings; - if (settings == null) { - String modeName = ModeEnum.REGULAR.name(); - boolean databaseToUpper = true, databaseToLower = false, caseInsensitiveIdentifiers = false; - try (PreparedStatement prep = prepareStatement( - "SELECT NAME, VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME IN (?, ?, ?, ?)")) { - prep.setString(1, "MODE"); - prep.setString(2, "DATABASE_TO_UPPER"); - prep.setString(3, "DATABASE_TO_LOWER"); - prep.setString(4, "CASE_INSENSITIVE_IDENTIFIERS"); - ResultSet rs = prep.executeQuery(); - while (rs.next()) { - String value = rs.getString(2); - switch (rs.getString(1)) { - case "MODE": - modeName = value; - break; - case "DATABASE_TO_UPPER": - databaseToUpper = Boolean.valueOf(value); - break; - case "DATABASE_TO_LOWER": - databaseToLower = Boolean.valueOf(value); - break; - case "CASE_INSENSITIVE_IDENTIFIERS": - caseInsensitiveIdentifiers = Boolean.valueOf(value); - } - } - } - Mode mode = Mode.getInstance(modeName); - if (mode == null) { - mode = Mode.getRegular(); - } - if (session instanceof SessionRemote - && ((SessionRemote) session).getClientVersion() < Constants.TCP_PROTOCOL_VERSION_18) { - caseInsensitiveIdentifiers = !databaseToUpper; - } - settings = new Settings(mode, databaseToUpper, databaseToLower, caseInsensitiveIdentifiers); - this.settings = settings; + @Override + public TimeZoneProvider currentTimeZone() { + Session session = this.session; + if (session == null) { + throw DbException.get(ErrorCode.OBJECT_CLOSED); } - return settings; + return session.currentTimeZone(); } - /** - * INTERNAL - */ - public boolean isRegularMode() throws SQLException { - // Clear cached settings if any (required by tests) - settings = null; - return getMode().getEnum() == ModeEnum.REGULAR; + @Override + public JavaObjectSerializer getJavaObjectSerializer() { + Session session = this.session; + if (session == null) { + throw DbException.get(ErrorCode.OBJECT_CLOSED); + } + return session.getJavaObjectSerializer(); + } + + @Override + public boolean zeroBasedEnums() { + Session session = this.session; + if (session == null) { + throw DbException.get(ErrorCode.OBJECT_CLOSED); + } + return session.zeroBasedEnums(); } + } diff --git a/h2/src/main/org/h2/jdbc/JdbcConnectionBackwardsCompat.java b/h2/src/main/org/h2/jdbc/JdbcConnectionBackwardsCompat.java index 78fd53b5ef..ba85d7d0f6 100644 --- a/h2/src/main/org/h2/jdbc/JdbcConnectionBackwardsCompat.java +++ b/h2/src/main/org/h2/jdbc/JdbcConnectionBackwardsCompat.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; diff --git a/h2/src/main/org/h2/jdbc/JdbcDatabaseMetaData.java b/h2/src/main/org/h2/jdbc/JdbcDatabaseMetaData.java index bd5311def4..842f3aeff1 100644 --- a/h2/src/main/org/h2/jdbc/JdbcDatabaseMetaData.java +++ b/h2/src/main/org/h2/jdbc/JdbcDatabaseMetaData.java @@ -1,44 +1,48 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; import java.sql.Connection; import java.sql.DatabaseMetaData; -import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.RowIdLifetime; import java.sql.SQLException; -import java.util.Arrays; import java.util.Map.Entry; import java.util.Properties; import org.h2.engine.Constants; -import org.h2.engine.SessionInterface; -import org.h2.engine.SessionRemote; -import org.h2.engine.SysProperties; +import org.h2.engine.Session; +import org.h2.jdbc.meta.DatabaseMeta; +import org.h2.jdbc.meta.DatabaseMetaLegacy; import org.h2.message.DbException; import org.h2.message.Trace; import org.h2.message.TraceObject; +import org.h2.mode.DefaultNullOrdering; +import org.h2.result.ResultInterface; import org.h2.result.SimpleResult; -import org.h2.util.StringUtils; import org.h2.value.TypeInfo; -import org.h2.value.ValueInt; -import org.h2.value.ValueString; +import org.h2.value.ValueInteger; +import org.h2.value.ValueVarchar; /** * Represents the meta data for a database. */ -public class JdbcDatabaseMetaData extends TraceObject implements - DatabaseMetaData, JdbcDatabaseMetaDataBackwardsCompat { +public final class JdbcDatabaseMetaData extends TraceObject + implements DatabaseMetaData, JdbcDatabaseMetaDataBackwardsCompat { private final JdbcConnection conn; + private final DatabaseMeta meta; + JdbcDatabaseMetaData(JdbcConnection conn, Trace trace, int id) { setTrace(trace, TraceObject.DATABASE_META_DATA, id); this.conn = conn; + Session session = conn.getSession(); + meta = session.isOldInformationSchema() ? new DatabaseMetaLegacy(session) + : conn.getSession().getDatabaseMeta(); } /** @@ -72,7 +76,7 @@ public int getDriverMinorVersion() { public String getDatabaseProductName() { debugCodeCall("getDatabaseProductName"); // This value must stay like that, see - // http://opensource.atlassian.com/projects/hibernate/browse/HHH-2682 + // https://hibernate.atlassian.net/browse/HHH-2682 return "H2"; } @@ -82,9 +86,13 @@ public String getDatabaseProductName() { * @return the product version */ @Override - public String getDatabaseProductVersion() { - debugCodeCall("getDatabaseProductVersion"); - return Constants.getFullVersion(); + public String getDatabaseProductVersion() throws SQLException { + try { + debugCodeCall("getDatabaseProductVersion"); + return meta.getDatabaseProductVersion(); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -107,13 +115,7 @@ public String getDriverName() { @Override public String getDriverVersion() { debugCodeCall("getDriverVersion"); - return Constants.getFullVersion(); - } - - private boolean hasSynonyms() { - SessionInterface si = conn.getSession(); - return !(si instanceof SessionRemote) - || ((SessionRemote) si).getClientVersion() >= Constants.TCP_PROTOCOL_VERSION_17; + return Constants.FULL_VERSION; } /** @@ -134,7 +136,7 @@ private boolean hasSynonyms() { *
        • SQL (String) the create table statement or NULL for systems tables.
        • * * - * @param catalogPattern null (to get all objects) or the catalog name + * @param catalog null (to get all objects) or the catalog name * @param schemaPattern null (to get all objects) or a schema name * (uppercase for unquoted names) * @param tableNamePattern null (to get all objects) or a table name @@ -144,91 +146,14 @@ private boolean hasSynonyms() { * @throws SQLException if the connection is closed */ @Override - public ResultSet getTables(String catalogPattern, String schemaPattern, - String tableNamePattern, String[] types) throws SQLException { + public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) + throws SQLException { try { if (isDebugEnabled()) { - debugCode("getTables(" + quote(catalogPattern) + ", " + - quote(schemaPattern) + ", " + quote(tableNamePattern) + - ", " + quoteArray(types) + ");"); - } - checkClosed(); - int typesLength = types != null ? types.length : 0; - boolean includeSynonyms = hasSynonyms() && (types == null || Arrays.asList(types).contains("SYNONYM")); - - // (1024 - 16) is enough for the most cases - StringBuilder select = new StringBuilder(1008); - if (includeSynonyms) { - select.append("SELECT " - + "TABLE_CAT, " - + "TABLE_SCHEM, " - + "TABLE_NAME, " - + "TABLE_TYPE, " - + "REMARKS, " - + "TYPE_CAT, " - + "TYPE_SCHEM, " - + "TYPE_NAME, " - + "SELF_REFERENCING_COL_NAME, " - + "REF_GENERATION, " - + "SQL " - + "FROM (" - + "SELECT " - + "SYNONYM_CATALOG TABLE_CAT, " - + "SYNONYM_SCHEMA TABLE_SCHEM, " - + "SYNONYM_NAME as TABLE_NAME, " - + "TYPE_NAME AS TABLE_TYPE, " - + "REMARKS, " - + "TYPE_NAME TYPE_CAT, " - + "TYPE_NAME TYPE_SCHEM, " - + "TYPE_NAME AS TYPE_NAME, " - + "TYPE_NAME SELF_REFERENCING_COL_NAME, " - + "TYPE_NAME REF_GENERATION, " - + "NULL AS SQL " - + "FROM INFORMATION_SCHEMA.SYNONYMS " - + "WHERE SYNONYM_CATALOG LIKE ?1 ESCAPE ?4 " - + "AND SYNONYM_SCHEMA LIKE ?2 ESCAPE ?4 " - + "AND SYNONYM_NAME LIKE ?3 ESCAPE ?4 " - + "UNION "); - } - select.append("SELECT " - + "TABLE_CATALOG TABLE_CAT, " - + "TABLE_SCHEMA TABLE_SCHEM, " - + "TABLE_NAME, " - + "TABLE_TYPE, " - + "REMARKS, " - + "TYPE_NAME TYPE_CAT, " - + "TYPE_NAME TYPE_SCHEM, " - + "TYPE_NAME, " - + "TYPE_NAME SELF_REFERENCING_COL_NAME, " - + "TYPE_NAME REF_GENERATION, " - + "SQL " - + "FROM INFORMATION_SCHEMA.TABLES " - + "WHERE TABLE_CATALOG LIKE ?1 ESCAPE ?4 " - + "AND TABLE_SCHEMA LIKE ?2 ESCAPE ?4 " - + "AND TABLE_NAME LIKE ?3 ESCAPE ?4"); - if (typesLength > 0) { - select.append(" AND TABLE_TYPE IN("); - for (int i = 0; i < typesLength; i++) { - if (i > 0) { - select.append(", "); - } - select.append('?').append(i + 5); - } - select.append(')'); + debugCode("getTables(" + quote(catalog) + ", " + quote(schemaPattern) + ", " + quote(tableNamePattern) + + ", " + quoteArray(types) + ')'); } - if (includeSynonyms) { - select.append(')'); - } - PreparedStatement prep = conn.prepareAutoCloseStatement( - select.append(" ORDER BY TABLE_TYPE, TABLE_SCHEM, TABLE_NAME").toString()); - prep.setString(1, getCatalogPattern(catalogPattern)); - prep.setString(2, getSchemaPattern(schemaPattern)); - prep.setString(3, getPattern(tableNamePattern)); - prep.setString(4, "\\"); - for (int i = 0; i < typesLength; i++) { - prep.setString(5 + i, types[i]); - } - return prep.executeQuery(); + return getResultSet(meta.getTables(catalog, schemaPattern, tableNamePattern, types)); } catch (Exception e) { throw logAndConvert(e); } @@ -243,15 +168,15 @@ public ResultSet getTables(String catalogPattern, String schemaPattern, *
        • TABLE_SCHEM (String) table schema
        • *
        • TABLE_NAME (String) table name
        • *
        • COLUMN_NAME (String) column name
        • - *
        • DATA_TYPE (short) data type (see java.sql.Types)
        • + *
        • DATA_TYPE (int) data type (see java.sql.Types)
        • *
        • TYPE_NAME (String) data type name ("INTEGER", "VARCHAR",...)
        • *
        • COLUMN_SIZE (int) precision * (values larger than 2 GB are returned as 2 GB)
        • *
        • BUFFER_LENGTH (int) unused
        • *
        • DECIMAL_DIGITS (int) scale (0 for INTEGER and VARCHAR)
        • - *
        • NUM_PREC_RADIX (int) radix (always 10)
        • + *
        • NUM_PREC_RADIX (int) radix
        • *
        • NULLABLE (int) columnNoNulls or columnNullable
        • - *
        • REMARKS (String) comment (always empty)
        • + *
        • REMARKS (String) comment
        • *
        • COLUMN_DEF (String) default value
        • *
        • SQL_DATA_TYPE (int) unused
        • *
        • SQL_DATETIME_SUB (int) unused
        • @@ -266,7 +191,7 @@ public ResultSet getTables(String catalogPattern, String schemaPattern, *
        • IS_GENERATEDCOLUMN (String) "NO" or "YES"
        • * * - * @param catalogPattern null (to get all objects) or the catalog name + * @param catalog null (to get all objects) or the catalog name * @param schemaPattern null (to get all objects) or a schema name * (uppercase for unquoted names) * @param tableNamePattern null (to get all objects) or a table name @@ -277,128 +202,16 @@ public ResultSet getTables(String catalogPattern, String schemaPattern, * @throws SQLException if the connection is closed */ @Override - public ResultSet getColumns(String catalogPattern, String schemaPattern, - String tableNamePattern, String columnNamePattern) - throws SQLException { + public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getColumns(" + quote(catalogPattern)+", " + debugCode("getColumns(" + quote(catalog)+", " +quote(schemaPattern)+", " +quote(tableNamePattern)+", " - +quote(columnNamePattern)+");"); + +quote(columnNamePattern)+')'); } - checkClosed(); - boolean includeSynonyms = hasSynonyms(); - - StringBuilder select = new StringBuilder(2432); - if (includeSynonyms) { - select.append("SELECT " - + "TABLE_CAT, " - + "TABLE_SCHEM, " - + "TABLE_NAME, " - + "COLUMN_NAME, " - + "DATA_TYPE, " - + "TYPE_NAME, " - + "COLUMN_SIZE, " - + "BUFFER_LENGTH, " - + "DECIMAL_DIGITS, " - + "NUM_PREC_RADIX, " - + "NULLABLE, " - + "REMARKS, " - + "COLUMN_DEF, " - + "SQL_DATA_TYPE, " - + "SQL_DATETIME_SUB, " - + "CHAR_OCTET_LENGTH, " - + "ORDINAL_POSITION, " - + "IS_NULLABLE, " - + "SCOPE_CATALOG, " - + "SCOPE_SCHEMA, " - + "SCOPE_TABLE, " - + "SOURCE_DATA_TYPE, " - + "IS_AUTOINCREMENT, " - + "IS_GENERATEDCOLUMN " - + "FROM (" - + "SELECT " - + "s.SYNONYM_CATALOG TABLE_CAT, " - + "s.SYNONYM_SCHEMA TABLE_SCHEM, " - + "s.SYNONYM_NAME TABLE_NAME, " - + "c.COLUMN_NAME, " - + "c.DATA_TYPE, " - + "c.TYPE_NAME, " - + "c.CHARACTER_MAXIMUM_LENGTH COLUMN_SIZE, " - + "c.CHARACTER_MAXIMUM_LENGTH BUFFER_LENGTH, " - + "c.NUMERIC_SCALE DECIMAL_DIGITS, " - + "c.NUMERIC_PRECISION_RADIX NUM_PREC_RADIX, " - + "c.NULLABLE, " - + "c.REMARKS, " - + "c.COLUMN_DEFAULT COLUMN_DEF, " - + "c.DATA_TYPE SQL_DATA_TYPE, " - + "ZERO() SQL_DATETIME_SUB, " - + "c.CHARACTER_OCTET_LENGTH CHAR_OCTET_LENGTH, " - + "c.ORDINAL_POSITION, " - + "c.IS_NULLABLE IS_NULLABLE, " - + "CAST(c.SOURCE_DATA_TYPE AS VARCHAR) SCOPE_CATALOG, " - + "CAST(c.SOURCE_DATA_TYPE AS VARCHAR) SCOPE_SCHEMA, " - + "CAST(c.SOURCE_DATA_TYPE AS VARCHAR) SCOPE_TABLE, " - + "c.SOURCE_DATA_TYPE, " - + "CASE WHEN c.SEQUENCE_NAME IS NULL THEN " - + "CAST(?1 AS VARCHAR) ELSE CAST(?2 AS VARCHAR) END IS_AUTOINCREMENT, " - + "CASE WHEN c.IS_COMPUTED THEN " - + "CAST(?2 AS VARCHAR) ELSE CAST(?1 AS VARCHAR) END IS_GENERATEDCOLUMN " - + "FROM INFORMATION_SCHEMA.COLUMNS c JOIN INFORMATION_SCHEMA.SYNONYMS s ON " - + "s.SYNONYM_FOR = c.TABLE_NAME " - + "AND s.SYNONYM_FOR_SCHEMA = c.TABLE_SCHEMA " - + "WHERE s.SYNONYM_CATALOG LIKE ?3 ESCAPE ?7 " - + "AND s.SYNONYM_SCHEMA LIKE ?4 ESCAPE ?7 " - + "AND s.SYNONYM_NAME LIKE ?5 ESCAPE ?7 " - + "AND c.COLUMN_NAME LIKE ?6 ESCAPE ?7 " - + "UNION "); - } - select.append("SELECT " - + "TABLE_CATALOG TABLE_CAT, " - + "TABLE_SCHEMA TABLE_SCHEM, " - + "TABLE_NAME, " - + "COLUMN_NAME, " - + "DATA_TYPE, " - + "TYPE_NAME, " - + "CHARACTER_MAXIMUM_LENGTH COLUMN_SIZE, " - + "CHARACTER_MAXIMUM_LENGTH BUFFER_LENGTH, " - + "NUMERIC_SCALE DECIMAL_DIGITS, " - + "NUMERIC_PRECISION_RADIX NUM_PREC_RADIX, " - + "NULLABLE, " - + "REMARKS, " - + "COLUMN_DEFAULT COLUMN_DEF, " - + "DATA_TYPE SQL_DATA_TYPE, " - + "ZERO() SQL_DATETIME_SUB, " - + "CHARACTER_OCTET_LENGTH CHAR_OCTET_LENGTH, " - + "ORDINAL_POSITION, " - + "IS_NULLABLE IS_NULLABLE, " - + "CAST(SOURCE_DATA_TYPE AS VARCHAR) SCOPE_CATALOG, " - + "CAST(SOURCE_DATA_TYPE AS VARCHAR) SCOPE_SCHEMA, " - + "CAST(SOURCE_DATA_TYPE AS VARCHAR) SCOPE_TABLE, " - + "SOURCE_DATA_TYPE, " - + "CASE WHEN SEQUENCE_NAME IS NULL THEN " - + "CAST(?1 AS VARCHAR) ELSE CAST(?2 AS VARCHAR) END IS_AUTOINCREMENT, " - + "CASE WHEN IS_COMPUTED THEN " - + "CAST(?2 AS VARCHAR) ELSE CAST(?1 AS VARCHAR) END IS_GENERATEDCOLUMN " - + "FROM INFORMATION_SCHEMA.COLUMNS " - + "WHERE TABLE_CATALOG LIKE ?3 ESCAPE ?7 " - + "AND TABLE_SCHEMA LIKE ?4 ESCAPE ?7 " - + "AND TABLE_NAME LIKE ?5 ESCAPE ?7 " - + "AND COLUMN_NAME LIKE ?6 ESCAPE ?7"); - if (includeSynonyms) { - select.append(')'); - } - PreparedStatement prep = conn.prepareAutoCloseStatement( - select.append(" ORDER BY TABLE_SCHEM, TABLE_NAME, ORDINAL_POSITION").toString()); - prep.setString(1, "NO"); - prep.setString(2, "YES"); - prep.setString(3, getCatalogPattern(catalogPattern)); - prep.setString(4, getSchemaPattern(schemaPattern)); - prep.setString(5, getPattern(tableNamePattern)); - prep.setString(6, getPattern(columnNamePattern)); - prep.setString(7, "\\"); - return prep.executeQuery(); + return getResultSet(meta.getColumns(catalog, schemaPattern, tableNamePattern, columnNamePattern)); } catch (Exception e) { throw logAndConvert(e); } @@ -417,71 +230,36 @@ public ResultSet getColumns(String catalogPattern, String schemaPattern, *
        • NON_UNIQUE (boolean) 'true' if non-unique
        • *
        • INDEX_QUALIFIER (String) index catalog
        • *
        • INDEX_NAME (String) index name
        • - *
        • TYPE (short) the index type (always tableIndexOther)
        • + *
        • TYPE (short) the index type (tableIndexOther or tableIndexHash for + * unique indexes on non-nullable columns, tableIndexStatistics for other + * indexes)
        • *
        • ORDINAL_POSITION (short) column index (1, 2, ...)
        • *
        • COLUMN_NAME (String) column name
        • *
        • ASC_OR_DESC (String) ascending or descending (always 'A')
        • - *
        • CARDINALITY (int) numbers of unique values
        • - *
        • PAGES (int) number of pages use (always 0)
        • + *
        • CARDINALITY (long) number of rows or numbers of unique values for + * unique indexes on non-nullable columns
        • + *
        • PAGES (long) number of pages use
        • *
        • FILTER_CONDITION (String) filter condition (always empty)
        • - *
        • SORT_TYPE (int) the sort type bit map: 1=DESCENDING, - * 2=NULLS_FIRST, 4=NULLS_LAST
        • * * - * @param catalogPattern null or the catalog name - * @param schemaPattern null (to get all objects) or a schema name + * @param catalog null or the catalog name + * @param schema null (to get all objects) or a schema name * (uppercase for unquoted names) - * @param tableName table name (must be specified) + * @param table table name (must be specified) * @param unique only unique indexes - * @param approximate is ignored + * @param approximate if true, return fast, but approximate CARDINALITY * @return the list of indexes and columns * @throws SQLException if the connection is closed */ @Override - public ResultSet getIndexInfo(String catalogPattern, String schemaPattern, - String tableName, boolean unique, boolean approximate) + public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getIndexInfo(" + quote(catalogPattern) + ", " + - quote(schemaPattern) + ", " + quote(tableName) + ", " + - unique + ", " + approximate + ");"); - } - String uniqueCondition; - if (unique) { - uniqueCondition = "NON_UNIQUE=FALSE"; - } else { - uniqueCondition = "TRUE"; + debugCode("getIndexInfo(" + quote(catalog) + ", " + quote(schema) + ", " + quote(table) + ", " + unique + + ", " + approximate + ')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "TABLE_CATALOG TABLE_CAT, " - + "TABLE_SCHEMA TABLE_SCHEM, " - + "TABLE_NAME, " - + "NON_UNIQUE, " - + "TABLE_CATALOG INDEX_QUALIFIER, " - + "INDEX_NAME, " - + "INDEX_TYPE TYPE, " - + "ORDINAL_POSITION, " - + "COLUMN_NAME, " - + "ASC_OR_DESC, " - // TODO meta data for number of unique values in an index - + "CARDINALITY, " - + "PAGES, " - + "FILTER_CONDITION, " - + "SORT_TYPE " - + "FROM INFORMATION_SCHEMA.INDEXES " - + "WHERE TABLE_CATALOG LIKE ? ESCAPE ? " - + "AND TABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND (" + uniqueCondition + ") " - + "AND TABLE_NAME = ? " - + "ORDER BY NON_UNIQUE, TYPE, TABLE_SCHEM, INDEX_NAME, ORDINAL_POSITION"); - prep.setString(1, getCatalogPattern(catalogPattern)); - prep.setString(2, "\\"); - prep.setString(3, getSchemaPattern(schemaPattern)); - prep.setString(4, "\\"); - prep.setString(5, tableName); - return prep.executeQuery(); + return getResultSet(meta.getIndexInfo(catalog, schema, table, unique, approximate)); } catch (Exception e) { throw logAndConvert(e); } @@ -500,43 +278,20 @@ public ResultSet getIndexInfo(String catalogPattern, String schemaPattern, *
        • PK_NAME (String) the name of the primary key index
        • * * - * @param catalogPattern null or the catalog name - * @param schemaPattern null (to get all objects) or a schema name + * @param catalog null or the catalog name + * @param schema null (to get all objects) or a schema name * (uppercase for unquoted names) - * @param tableName table name (must be specified) + * @param table table name (must be specified) * @return the list of primary key columns * @throws SQLException if the connection is closed */ @Override - public ResultSet getPrimaryKeys(String catalogPattern, - String schemaPattern, String tableName) throws SQLException { + public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getPrimaryKeys(" - +quote(catalogPattern)+", " - +quote(schemaPattern)+", " - +quote(tableName)+");"); + debugCode("getPrimaryKeys(" + quote(catalog) + ", " + quote(schema) + ", " + quote(table) + ')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "TABLE_CATALOG TABLE_CAT, " - + "TABLE_SCHEMA TABLE_SCHEM, " - + "TABLE_NAME, " - + "COLUMN_NAME, " - + "ORDINAL_POSITION KEY_SEQ, " - + "IFNULL(CONSTRAINT_NAME, INDEX_NAME) PK_NAME " - + "FROM INFORMATION_SCHEMA.INDEXES " - + "WHERE TABLE_CATALOG LIKE ? ESCAPE ? " - + "AND TABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND TABLE_NAME = ? " - + "AND PRIMARY_KEY = TRUE " - + "ORDER BY COLUMN_NAME"); - prep.setString(1, getCatalogPattern(catalogPattern)); - prep.setString(2, "\\"); - prep.setString(3, getSchemaPattern(schemaPattern)); - prep.setString(4, "\\"); - prep.setString(5, tableName); - return prep.executeQuery(); + return getResultSet(meta.getPrimaryKeys(catalog, schema, table)); } catch (Exception e) { throw logAndConvert(e); } @@ -611,50 +366,67 @@ public boolean isReadOnly() throws SQLException { } /** - * Checks if NULL is sorted high (bigger than anything that is not null). + * Checks if NULL values are sorted high (bigger than anything that is not + * null). * - * @return false by default; true if the system property h2.sortNullsHigh is - * set to true + * @return if NULL values are sorted high */ @Override - public boolean nullsAreSortedHigh() { - debugCodeCall("nullsAreSortedHigh"); - return SysProperties.SORT_NULLS_HIGH; + public boolean nullsAreSortedHigh() throws SQLException { + try { + debugCodeCall("nullsAreSortedHigh"); + return meta.defaultNullOrdering() == DefaultNullOrdering.HIGH; + } catch (Exception e) { + throw logAndConvert(e); + } } /** - * Checks if NULL is sorted low (smaller than anything that is not null). + * Checks if NULL values are sorted low (smaller than anything that is not + * null). * - * @return true by default; false if the system property h2.sortNullsHigh is - * set to true + * @return if NULL values are sorted low */ @Override - public boolean nullsAreSortedLow() { - debugCodeCall("nullsAreSortedLow"); - return !SysProperties.SORT_NULLS_HIGH; + public boolean nullsAreSortedLow() throws SQLException { + try { + debugCodeCall("nullsAreSortedLow"); + return meta.defaultNullOrdering() == DefaultNullOrdering.LOW; + } catch (Exception e) { + throw logAndConvert(e); + } } /** - * Checks if NULL is sorted at the beginning (no matter if ASC or DESC is - * used). + * Checks if NULL values are sorted at the beginning (no matter if ASC or + * DESC is used). * - * @return false + * @return if NULL values are sorted at the beginning */ @Override - public boolean nullsAreSortedAtStart() { - debugCodeCall("nullsAreSortedAtStart"); - return false; + public boolean nullsAreSortedAtStart() throws SQLException { + try { + debugCodeCall("nullsAreSortedAtStart"); + return meta.defaultNullOrdering() == DefaultNullOrdering.FIRST; + } catch (Exception e) { + throw logAndConvert(e); + } } /** - * Checks if NULL is sorted at the end (no matter if ASC or DESC is used). + * Checks if NULL values are sorted at the end (no matter if ASC or DESC is + * used). * - * @return false + * @return if NULL values are sorted at the end */ @Override - public boolean nullsAreSortedAtEnd() { - debugCodeCall("nullsAreSortedAtEnd"); - return false; + public boolean nullsAreSortedAtEnd() throws SQLException { + try { + debugCodeCall("nullsAreSortedAtEnd"); + return meta.defaultNullOrdering() == DefaultNullOrdering.LAST; + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -678,16 +450,17 @@ public Connection getConnection() { *
        • PROCEDURE_CAT (String) catalog
        • *
        • PROCEDURE_SCHEM (String) schema
        • *
        • PROCEDURE_NAME (String) name
        • - *
        • NUM_INPUT_PARAMS (int) the number of arguments
        • - *
        • NUM_OUTPUT_PARAMS (int) for future use, always 0
        • - *
        • NUM_RESULT_SETS (int) for future use, always 0
        • + *
        • reserved
        • + *
        • reserved
        • + *
        • reserved
        • *
        • REMARKS (String) description
        • *
        • PROCEDURE_TYPE (short) if this procedure returns a result * (procedureNoResult or procedureReturnsResult)
        • - *
        • SPECIFIC_NAME (String) name
        • + *
        • SPECIFIC_NAME (String) non-ambiguous name to distinguish + * overloads
        • * * - * @param catalogPattern null or the catalog name + * @param catalog null or the catalog name * @param schemaPattern null (to get all objects) or a schema name * (uppercase for unquoted names) * @param procedureNamePattern the procedure name pattern @@ -695,38 +468,16 @@ public Connection getConnection() { * @throws SQLException if the connection is closed */ @Override - public ResultSet getProcedures(String catalogPattern, String schemaPattern, + public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) throws SQLException { try { if (isDebugEnabled()) { debugCode("getProcedures(" - +quote(catalogPattern)+", " + +quote(catalog)+", " +quote(schemaPattern)+", " - +quote(procedureNamePattern)+");"); + +quote(procedureNamePattern)+')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "ALIAS_CATALOG PROCEDURE_CAT, " - + "ALIAS_SCHEMA PROCEDURE_SCHEM, " - + "ALIAS_NAME PROCEDURE_NAME, " - + "COLUMN_COUNT NUM_INPUT_PARAMS, " - + "ZERO() NUM_OUTPUT_PARAMS, " - + "ZERO() NUM_RESULT_SETS, " - + "REMARKS, " - + "RETURNS_RESULT PROCEDURE_TYPE, " - + "ALIAS_NAME SPECIFIC_NAME " - + "FROM INFORMATION_SCHEMA.FUNCTION_ALIASES " - + "WHERE ALIAS_CATALOG LIKE ? ESCAPE ? " - + "AND ALIAS_SCHEMA LIKE ? ESCAPE ? " - + "AND ALIAS_NAME LIKE ? ESCAPE ? " - + "ORDER BY PROCEDURE_SCHEM, PROCEDURE_NAME, NUM_INPUT_PARAMS"); - prep.setString(1, getCatalogPattern(catalogPattern)); - prep.setString(2, "\\"); - prep.setString(3, getSchemaPattern(schemaPattern)); - prep.setString(4, "\\"); - prep.setString(5, getPattern(procedureNamePattern)); - prep.setString(6, "\\"); - return prep.executeQuery(); + return getResultSet(meta.getProcedures(catalog, schemaPattern, procedureNamePattern)); } catch (Exception e) { throw logAndConvert(e); } @@ -750,22 +501,23 @@ public ResultSet getProcedures(String catalogPattern, String schemaPattern, *
        • PRECISION (int) precision
        • *
        • LENGTH (int) length
        • *
        • SCALE (short) scale
        • - *
        • RADIX (int) always 10
        • + *
        • RADIX (int)
        • *
        • NULLABLE (short) nullable * (DatabaseMetaData.columnNoNulls for primitive data types, * DatabaseMetaData.columnNullable otherwise)
        • *
        • REMARKS (String) description
        • *
        • COLUMN_DEF (String) always null
        • - *
        • SQL_DATA_TYPE (int) for future use, always 0
        • - *
        • SQL_DATETIME_SUB (int) for future use, always 0
        • - *
        • CHAR_OCTET_LENGTH (int) always null
        • + *
        • SQL_DATA_TYPE (int) for future use
        • + *
        • SQL_DATETIME_SUB (int) for future use
        • + *
        • CHAR_OCTET_LENGTH (int)
        • *
        • ORDINAL_POSITION (int) the parameter index * starting from 1 (0 is the return value)
        • *
        • IS_NULLABLE (String) always "YES"
        • - *
        • SPECIFIC_NAME (String) name
        • + *
        • SPECIFIC_NAME (String) non-ambiguous procedure name to distinguish + * overloads
        • * * - * @param catalogPattern null or the catalog name + * @param catalog null or the catalog name * @param schemaPattern null (to get all objects) or a schema name * (uppercase for unquoted names) * @param procedureNamePattern the procedure name pattern @@ -774,55 +526,16 @@ public ResultSet getProcedures(String catalogPattern, String schemaPattern, * @throws SQLException if the connection is closed */ @Override - public ResultSet getProcedureColumns(String catalogPattern, - String schemaPattern, String procedureNamePattern, + public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getProcedureColumns(" - +quote(catalogPattern)+", " - +quote(schemaPattern)+", " - +quote(procedureNamePattern)+", " - +quote(columnNamePattern)+");"); + debugCode("getProcedureColumns(" + quote(catalog) + ", " + quote(schemaPattern) + ", " + + quote(procedureNamePattern) + ", " + quote(columnNamePattern) + ')'); } checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "ALIAS_CATALOG PROCEDURE_CAT, " - + "ALIAS_SCHEMA PROCEDURE_SCHEM, " - + "ALIAS_NAME PROCEDURE_NAME, " - + "COLUMN_NAME, " - + "COLUMN_TYPE, " - + "DATA_TYPE, " - + "TYPE_NAME, " - + "PRECISION, " - + "PRECISION LENGTH, " - + "SCALE, " - + "RADIX, " - + "NULLABLE, " - + "REMARKS, " - + "COLUMN_DEFAULT COLUMN_DEF, " - + "ZERO() SQL_DATA_TYPE, " - + "ZERO() SQL_DATETIME_SUB, " - + "ZERO() CHAR_OCTET_LENGTH, " - + "POS ORDINAL_POSITION, " - + "? IS_NULLABLE, " - + "ALIAS_NAME SPECIFIC_NAME " - + "FROM INFORMATION_SCHEMA.FUNCTION_COLUMNS " - + "WHERE ALIAS_CATALOG LIKE ? ESCAPE ? " - + "AND ALIAS_SCHEMA LIKE ? ESCAPE ? " - + "AND ALIAS_NAME LIKE ? ESCAPE ? " - + "AND COLUMN_NAME LIKE ? ESCAPE ? " - + "ORDER BY PROCEDURE_SCHEM, PROCEDURE_NAME, ORDINAL_POSITION"); - prep.setString(1, "YES"); - prep.setString(2, getCatalogPattern(catalogPattern)); - prep.setString(3, "\\"); - prep.setString(4, getSchemaPattern(schemaPattern)); - prep.setString(5, "\\"); - prep.setString(6, getPattern(procedureNamePattern)); - prep.setString(7, "\\"); - prep.setString(8, getPattern(columnNamePattern)); - prep.setString(9, "\\"); - return prep.executeQuery(); + return getResultSet( + meta.getProcedureColumns(catalog, schemaPattern, procedureNamePattern, columnNamePattern)); } catch (Exception e) { throw logAndConvert(e); } @@ -835,7 +548,6 @@ public ResultSet getProcedureColumns(String catalogPattern, *
            *
          1. TABLE_SCHEM (String) schema name
          2. *
          3. TABLE_CATALOG (String) catalog name
          4. - *
          5. IS_DEFAULT (boolean) if this is the default schema
          6. *
          * * @return the schema list @@ -845,15 +557,7 @@ public ResultSet getProcedureColumns(String catalogPattern, public ResultSet getSchemas() throws SQLException { try { debugCodeCall("getSchemas"); - checkClosed(); - PreparedStatement prep = conn - .prepareAutoCloseStatement("SELECT " - + "SCHEMA_NAME TABLE_SCHEM, " - + "CATALOG_NAME TABLE_CATALOG, " - +" IS_DEFAULT " - + "FROM INFORMATION_SCHEMA.SCHEMATA " - + "ORDER BY SCHEMA_NAME"); - return prep.executeQuery(); + return getResultSet(meta.getSchemas()); } catch (Exception e) { throw logAndConvert(e); } @@ -874,11 +578,7 @@ public ResultSet getSchemas() throws SQLException { public ResultSet getCatalogs() throws SQLException { try { debugCodeCall("getCatalogs"); - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement( - "SELECT CATALOG_NAME TABLE_CAT " - + "FROM INFORMATION_SCHEMA.CATALOGS"); - return prep.executeQuery(); + return getResultSet(meta.getCatalogs()); } catch (Exception e) { throw logAndConvert(e); } @@ -898,12 +598,7 @@ public ResultSet getCatalogs() throws SQLException { public ResultSet getTableTypes() throws SQLException { try { debugCodeCall("getTableTypes"); - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "TYPE TABLE_TYPE " - + "FROM INFORMATION_SCHEMA.TABLE_TYPES " - + "ORDER BY TABLE_TYPE"); - return prep.executeQuery(); + return getResultSet(meta.getTableTypes()); } catch (Exception e) { throw logAndConvert(e); } @@ -926,8 +621,8 @@ public ResultSet getTableTypes() throws SQLException { * others * * - * @param catalogPattern null (to get all objects) or the catalog name - * @param schemaPattern null (to get all objects) or a schema name + * @param catalog null (to get all objects) or the catalog name + * @param schema null (to get all objects) or a schema name * (uppercase for unquoted names) * @param table a table name (uppercase for unquoted names) * @param columnNamePattern null (to get all objects) or a column name @@ -936,41 +631,14 @@ public ResultSet getTableTypes() throws SQLException { * @throws SQLException if the connection is closed */ @Override - public ResultSet getColumnPrivileges(String catalogPattern, - String schemaPattern, String table, String columnNamePattern) + public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getColumnPrivileges(" - +quote(catalogPattern)+", " - +quote(schemaPattern)+", " - +quote(table)+", " - +quote(columnNamePattern)+");"); + debugCode("getColumnPrivileges(" + quote(catalog) + ", " + quote(schema) + ", " + quote(table) + ", " + + quote(columnNamePattern) + ')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "TABLE_CATALOG TABLE_CAT, " - + "TABLE_SCHEMA TABLE_SCHEM, " - + "TABLE_NAME, " - + "COLUMN_NAME, " - + "GRANTOR, " - + "GRANTEE, " - + "PRIVILEGE_TYPE PRIVILEGE, " - + "IS_GRANTABLE " - + "FROM INFORMATION_SCHEMA.COLUMN_PRIVILEGES " - + "WHERE TABLE_CATALOG LIKE ? ESCAPE ? " - + "AND TABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND TABLE_NAME = ? " - + "AND COLUMN_NAME LIKE ? ESCAPE ? " - + "ORDER BY COLUMN_NAME, PRIVILEGE"); - prep.setString(1, getCatalogPattern(catalogPattern)); - prep.setString(2, "\\"); - prep.setString(3, getSchemaPattern(schemaPattern)); - prep.setString(4, "\\"); - prep.setString(5, table); - prep.setString(6, getPattern(columnNamePattern)); - prep.setString(7, "\\"); - return prep.executeQuery(); + return getResultSet(meta.getColumnPrivileges(catalog, schema, table, columnNamePattern)); } catch (Exception e) { throw logAndConvert(e); } @@ -992,7 +660,7 @@ public ResultSet getColumnPrivileges(String catalogPattern, * others * * - * @param catalogPattern null (to get all objects) or the catalog name + * @param catalog null (to get all objects) or the catalog name * @param schemaPattern null (to get all objects) or a schema name * (uppercase for unquoted names) * @param tableNamePattern null (to get all objects) or a table name @@ -1001,36 +669,15 @@ public ResultSet getColumnPrivileges(String catalogPattern, * @throws SQLException if the connection is closed */ @Override - public ResultSet getTablePrivileges(String catalogPattern, - String schemaPattern, String tableNamePattern) throws SQLException { + public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) + throws SQLException { try { if (isDebugEnabled()) { - debugCode("getTablePrivileges(" - +quote(catalogPattern)+", " - +quote(schemaPattern)+", " - +quote(tableNamePattern)+");"); + debugCode("getTablePrivileges(" + quote(catalog) + ", " + quote(schemaPattern) + ", " + + quote(tableNamePattern) + ')'); } checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "TABLE_CATALOG TABLE_CAT, " - + "TABLE_SCHEMA TABLE_SCHEM, " - + "TABLE_NAME, " - + "GRANTOR, " - + "GRANTEE, " - + "PRIVILEGE_TYPE PRIVILEGE, " - + "IS_GRANTABLE " - + "FROM INFORMATION_SCHEMA.TABLE_PRIVILEGES " - + "WHERE TABLE_CATALOG LIKE ? ESCAPE ? " - + "AND TABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND TABLE_NAME LIKE ? ESCAPE ? " - + "ORDER BY TABLE_SCHEM, TABLE_NAME, PRIVILEGE"); - prep.setString(1, getCatalogPattern(catalogPattern)); - prep.setString(2, "\\"); - prep.setString(3, getSchemaPattern(schemaPattern)); - prep.setString(4, "\\"); - prep.setString(5, getPattern(tableNamePattern)); - prep.setString(6, "\\"); - return prep.executeQuery(); + return getResultSet(meta.getTablePrivileges(catalog, schemaPattern, tableNamePattern)); } catch (Exception e) { throw logAndConvert(e); } @@ -1052,56 +699,24 @@ public ResultSet getTablePrivileges(String catalogPattern, *
        • PSEUDO_COLUMN (short) (always bestRowNotPseudo)
        • * * - * @param catalogPattern null (to get all objects) or the catalog name - * @param schemaPattern null (to get all objects) or a schema name + * @param catalog null (to get all objects) or the catalog name + * @param schema null (to get all objects) or a schema name * (uppercase for unquoted names) - * @param tableName table name (must be specified) + * @param table table name (must be specified) * @param scope ignored * @param nullable ignored * @return the primary key index * @throws SQLException if the connection is closed */ @Override - public ResultSet getBestRowIdentifier(String catalogPattern, - String schemaPattern, String tableName, int scope, boolean nullable) + public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getBestRowIdentifier(" - +quote(catalogPattern)+", " - +quote(schemaPattern)+", " - +quote(tableName)+", " - +scope+", "+nullable+");"); + debugCode("getBestRowIdentifier(" + quote(catalog) + ", " + quote(schema) + ", " + quote(table) + ", " + + scope + ", " + nullable + ')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "CAST(? AS SMALLINT) SCOPE, " - + "C.COLUMN_NAME, " - + "C.DATA_TYPE, " - + "C.TYPE_NAME, " - + "C.CHARACTER_MAXIMUM_LENGTH COLUMN_SIZE, " - + "C.CHARACTER_MAXIMUM_LENGTH BUFFER_LENGTH, " - + "CAST(C.NUMERIC_SCALE AS SMALLINT) DECIMAL_DIGITS, " - + "CAST(? AS SMALLINT) PSEUDO_COLUMN " - + "FROM INFORMATION_SCHEMA.INDEXES I, " - +" INFORMATION_SCHEMA.COLUMNS C " - + "WHERE C.TABLE_NAME = I.TABLE_NAME " - + "AND C.COLUMN_NAME = I.COLUMN_NAME " - + "AND C.TABLE_CATALOG LIKE ? ESCAPE ? " - + "AND C.TABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND C.TABLE_NAME = ? " - + "AND I.PRIMARY_KEY = TRUE " - + "ORDER BY SCOPE"); - // SCOPE - prep.setInt(1, DatabaseMetaData.bestRowSession); - // PSEUDO_COLUMN - prep.setInt(2, DatabaseMetaData.bestRowNotPseudo); - prep.setString(3, getCatalogPattern(catalogPattern)); - prep.setString(4, "\\"); - prep.setString(5, getSchemaPattern(schemaPattern)); - prep.setString(6, "\\"); - prep.setString(7, tableName); - return prep.executeQuery(); + return getResultSet(meta.getBestRowIdentifier(catalog, schema, table, scope, nullable)); } catch (Exception e) { throw logAndConvert(e); } @@ -1125,33 +740,17 @@ public ResultSet getBestRowIdentifier(String catalogPattern, * * @param catalog null (to get all objects) or the catalog name * @param schema null (to get all objects) or a schema name - * @param tableName table name (must be specified) + * @param table table name (must be specified) * @return an empty result set * @throws SQLException if the connection is closed */ @Override - public ResultSet getVersionColumns(String catalog, String schema, - String tableName) throws SQLException { + public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getVersionColumns(" - +quote(catalog)+", " - +quote(schema)+", " - +quote(tableName)+");"); + debugCode("getVersionColumns(" + quote(catalog) + ", " + quote(schema) + ", " + quote(table) + ')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "ZERO() SCOPE, " - + "COLUMN_NAME, " - + "CAST(DATA_TYPE AS INT) DATA_TYPE, " - + "TYPE_NAME, " - + "NUMERIC_PRECISION COLUMN_SIZE, " - + "NUMERIC_PRECISION BUFFER_LENGTH, " - + "NUMERIC_PRECISION DECIMAL_DIGITS, " - + "ZERO() PSEUDO_COLUMN " - + "FROM INFORMATION_SCHEMA.COLUMNS " - + "WHERE FALSE"); - return prep.executeQuery(); + return getResultSet(meta.getVersionColumns(catalog, schema, table)); } catch (Exception e) { throw logAndConvert(e); } @@ -1182,49 +781,19 @@ public ResultSet getVersionColumns(String catalog, String schema, * importedKeyNotDeferrable) * * - * @param catalogPattern null (to get all objects) or the catalog name - * @param schemaPattern the schema name of the foreign table - * @param tableName the name of the foreign table + * @param catalog null (to get all objects) or the catalog name + * @param schema the schema name of the foreign table + * @param table the name of the foreign table * @return the result set * @throws SQLException if the connection is closed */ @Override - public ResultSet getImportedKeys(String catalogPattern, - String schemaPattern, String tableName) throws SQLException { + public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getImportedKeys(" - +quote(catalogPattern)+", " - +quote(schemaPattern)+", " - +quote(tableName)+");"); + debugCode("getImportedKeys(" + quote(catalog) + ", " + quote(schema) + ", " + quote(table) + ')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "PKTABLE_CATALOG PKTABLE_CAT, " - + "PKTABLE_SCHEMA PKTABLE_SCHEM, " - + "PKTABLE_NAME PKTABLE_NAME, " - + "PKCOLUMN_NAME, " - + "FKTABLE_CATALOG FKTABLE_CAT, " - + "FKTABLE_SCHEMA FKTABLE_SCHEM, " - + "FKTABLE_NAME, " - + "FKCOLUMN_NAME, " - + "ORDINAL_POSITION KEY_SEQ, " - + "UPDATE_RULE, " - + "DELETE_RULE, " - + "FK_NAME, " - + "PK_NAME, " - + "DEFERRABILITY " - + "FROM INFORMATION_SCHEMA.CROSS_REFERENCES " - + "WHERE FKTABLE_CATALOG LIKE ? ESCAPE ? " - + "AND FKTABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND FKTABLE_NAME = ? " - + "ORDER BY PKTABLE_CAT, PKTABLE_SCHEM, PKTABLE_NAME, FK_NAME, KEY_SEQ"); - prep.setString(1, getCatalogPattern(catalogPattern)); - prep.setString(2, "\\"); - prep.setString(3, getSchemaPattern(schemaPattern)); - prep.setString(4, "\\"); - prep.setString(5, tableName); - return prep.executeQuery(); + return getResultSet(meta.getImportedKeys(catalog, schema, table)); } catch (Exception e) { throw logAndConvert(e); } @@ -1255,49 +824,19 @@ public ResultSet getImportedKeys(String catalogPattern, * importedKeyNotDeferrable) * * - * @param catalogPattern null or the catalog name - * @param schemaPattern the schema name of the primary table - * @param tableName the name of the primary table + * @param catalog null or the catalog name + * @param schema the schema name of the primary table + * @param table the name of the primary table * @return the result set * @throws SQLException if the connection is closed */ @Override - public ResultSet getExportedKeys(String catalogPattern, - String schemaPattern, String tableName) throws SQLException { + public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getExportedKeys(" - +quote(catalogPattern)+", " - +quote(schemaPattern)+", " - +quote(tableName)+");"); + debugCode("getExportedKeys(" + quote(catalog) + ", " + quote(schema) + ", " + quote(table) + ')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "PKTABLE_CATALOG PKTABLE_CAT, " - + "PKTABLE_SCHEMA PKTABLE_SCHEM, " - + "PKTABLE_NAME PKTABLE_NAME, " - + "PKCOLUMN_NAME, " - + "FKTABLE_CATALOG FKTABLE_CAT, " - + "FKTABLE_SCHEMA FKTABLE_SCHEM, " - + "FKTABLE_NAME, " - + "FKCOLUMN_NAME, " - + "ORDINAL_POSITION KEY_SEQ, " - + "UPDATE_RULE, " - + "DELETE_RULE, " - + "FK_NAME, " - + "PK_NAME, " - + "DEFERRABILITY " - + "FROM INFORMATION_SCHEMA.CROSS_REFERENCES " - + "WHERE PKTABLE_CATALOG LIKE ? ESCAPE ? " - + "AND PKTABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND PKTABLE_NAME = ? " - + "ORDER BY FKTABLE_CAT, FKTABLE_SCHEM, FKTABLE_NAME, FK_NAME, KEY_SEQ"); - prep.setString(1, getCatalogPattern(catalogPattern)); - prep.setString(2, "\\"); - prep.setString(3, getSchemaPattern(schemaPattern)); - prep.setString(4, "\\"); - prep.setString(5, tableName); - return prep.executeQuery(); + return getResultSet(meta.getExportedKeys(catalog, schema, table)); } catch (Exception e) { throw logAndConvert(e); } @@ -1329,66 +868,28 @@ public ResultSet getExportedKeys(String catalogPattern, * importedKeyNotDeferrable) * * - * @param primaryCatalogPattern null or the catalog name - * @param primarySchemaPattern the schema name of the primary table + * @param primaryCatalog null or the catalog name + * @param primarySchema the schema name of the primary table * (optional) * @param primaryTable the name of the primary table (must be specified) - * @param foreignCatalogPattern null or the catalog name - * @param foreignSchemaPattern the schema name of the foreign table + * @param foreignCatalog null or the catalog name + * @param foreignSchema the schema name of the foreign table * (optional) * @param foreignTable the name of the foreign table (must be specified) * @return the result set * @throws SQLException if the connection is closed */ @Override - public ResultSet getCrossReference(String primaryCatalogPattern, - String primarySchemaPattern, String primaryTable, String foreignCatalogPattern, - String foreignSchemaPattern, String foreignTable) throws SQLException { + public ResultSet getCrossReference(String primaryCatalog, String primarySchema, String primaryTable, + String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getCrossReference(" - +quote(primaryCatalogPattern)+", " - +quote(primarySchemaPattern)+", " - +quote(primaryTable)+", " - +quote(foreignCatalogPattern)+", " - +quote(foreignSchemaPattern)+", " - +quote(foreignTable)+");"); + debugCode("getCrossReference(" + quote(primaryCatalog) + ", " + quote(primarySchema) + ", " + + quote(primaryTable) + ", " + quote(foreignCatalog) + ", " + quote(foreignSchema) + ", " + + quote(foreignTable) + ')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "PKTABLE_CATALOG PKTABLE_CAT, " - + "PKTABLE_SCHEMA PKTABLE_SCHEM, " - + "PKTABLE_NAME PKTABLE_NAME, " - + "PKCOLUMN_NAME, " - + "FKTABLE_CATALOG FKTABLE_CAT, " - + "FKTABLE_SCHEMA FKTABLE_SCHEM, " - + "FKTABLE_NAME, " - + "FKCOLUMN_NAME, " - + "ORDINAL_POSITION KEY_SEQ, " - + "UPDATE_RULE, " - + "DELETE_RULE, " - + "FK_NAME, " - + "PK_NAME, " - + "DEFERRABILITY " - + "FROM INFORMATION_SCHEMA.CROSS_REFERENCES " - + "WHERE PKTABLE_CATALOG LIKE ? ESCAPE ? " - + "AND PKTABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND PKTABLE_NAME = ? " - + "AND FKTABLE_CATALOG LIKE ? ESCAPE ? " - + "AND FKTABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND FKTABLE_NAME = ? " - + "ORDER BY FKTABLE_CAT, FKTABLE_SCHEM, FKTABLE_NAME, FK_NAME, KEY_SEQ"); - prep.setString(1, getCatalogPattern(primaryCatalogPattern)); - prep.setString(2, "\\"); - prep.setString(3, getSchemaPattern(primarySchemaPattern)); - prep.setString(4, "\\"); - prep.setString(5, primaryTable); - prep.setString(6, getCatalogPattern(foreignCatalogPattern)); - prep.setString(7, "\\"); - prep.setString(8, getSchemaPattern(foreignSchemaPattern)); - prep.setString(9, "\\"); - prep.setString(10, foreignTable); - return prep.executeQuery(); + return getResultSet(meta.getCrossReference(primaryCatalog, primarySchema, primaryTable, foreignCatalog, + foreignSchema, foreignTable)); } catch (Exception e) { throw logAndConvert(e); } @@ -1424,19 +925,9 @@ public ResultSet getUDTs(String catalog, String schemaPattern, +quote(catalog)+", " +quote(schemaPattern)+", " +quote(typeNamePattern)+", " - +quoteIntArray(types)+");"); + +quoteIntArray(types)+')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "CAST(NULL AS VARCHAR) TYPE_CAT, " - + "CAST(NULL AS VARCHAR) TYPE_SCHEM, " - + "CAST(NULL AS VARCHAR) TYPE_NAME, " - + "CAST(NULL AS VARCHAR) CLASS_NAME, " - + "CAST(NULL AS SMALLINT) DATA_TYPE, " - + "CAST(NULL AS VARCHAR) REMARKS, " - + "CAST(NULL AS SMALLINT) BASE_TYPE " - + "FROM DUAL WHERE FALSE"); - return prep.executeQuery(); + return getResultSet(meta.getUDTs(catalog, schemaPattern, typeNamePattern, types)); } catch (Exception e) { throw logAndConvert(e); } @@ -1475,29 +966,7 @@ public ResultSet getUDTs(String catalog, String schemaPattern, public ResultSet getTypeInfo() throws SQLException { try { debugCodeCall("getTypeInfo"); - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "TYPE_NAME, " - + "DATA_TYPE, " - + "PRECISION, " - + "PREFIX LITERAL_PREFIX, " - + "SUFFIX LITERAL_SUFFIX, " - + "PARAMS CREATE_PARAMS, " - + "NULLABLE, " - + "CASE_SENSITIVE, " - + "SEARCHABLE, " - + "FALSE UNSIGNED_ATTRIBUTE, " - + "FALSE FIXED_PREC_SCALE, " - + "AUTO_INCREMENT, " - + "TYPE_NAME LOCAL_TYPE_NAME, " - + "MINIMUM_SCALE, " - + "MAXIMUM_SCALE, " - + "DATA_TYPE SQL_DATA_TYPE, " - + "ZERO() SQL_DATETIME_SUB, " - + "RADIX NUM_PREC_RADIX " - + "FROM INFORMATION_SCHEMA.TYPE_INFO " - + "ORDER BY DATA_TYPE, POS"); - return prep.executeQuery(); + return getResultSet(meta.getTypeInfo()); } catch (Exception e) { throw logAndConvert(e); } @@ -1537,60 +1006,23 @@ public String getIdentifierQuoteString() { } /** - * Gets the comma-separated list of all SQL keywords that are not supported as - * table/column/index name, in addition to the SQL:2003 keywords. The list - * returned is: - *
          -     * GROUPS
          -     * IF,ILIKE,INTERSECTS,
          -     * LIMIT,
          -     * MINUS,
          -     * OFFSET,
          -     * QUALIFY,
          -     * REGEXP,_ROWID_,ROWNUM,
          -     * SYSDATE,SYSTIME,SYSTIMESTAMP,
          -     * TODAY,TOP
          -     * 
          - * The complete list of keywords (including SQL:2003 keywords) is: - *
          -     * ALL, AND, ARRAY, AS,
          -     * BETWEEN, BOTH
          -     * CASE, CHECK, CONSTRAINT, CROSS, CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP, CURRENT_USER,
          -     * DISTINCT,
          -     * EXCEPT, EXISTS,
          -     * FALSE, FETCH, FILTER, FOR, FOREIGN, FROM, FULL,
          -     * GROUP, GROUPS
          -     * HAVING,
          -     * IF, ILIKE, IN, INNER, INTERSECT, INTERSECTS, INTERVAL, IS,
          -     * JOIN,
          -     * LEADING, LEFT, LIKE, LIMIT, LOCALTIME, LOCALTIMESTAMP,
          -     * MINUS,
          -     * NATURAL, NOT, NULL,
          -     * OFFSET, ON, OR, ORDER, OVER,
          -     * PARTITION, PRIMARY,
          -     * QUALIFY,
          -     * RANGE, REGEXP, RIGHT, ROW, _ROWID_, ROWNUM, ROWS,
          -     * SELECT, SYSDATE, SYSTIME, SYSTIMESTAMP,
          -     * TABLE, TODAY, TOP, TRAILING, TRUE,
          -     * UNION, UNIQUE,
          -     * VALUES,
          -     * WHERE, WINDOW, WITH
          -     * 
          - * - * @return a list of additional the keywords - */ - @Override - public String getSQLKeywords() { - debugCodeCall("getSQLKeywords"); - return "GROUPS," // - + "IF,ILIKE,INTERSECTS," // - + "LIMIT," // - + "MINUS," // - + "OFFSET," // - + "QUALIFY," // - + "REGEXP,_ROWID_,ROWNUM," // - + "SYSDATE,SYSTIME,SYSTIMESTAMP," // - + "TODAY,TOP"; + * Gets the comma-separated list of all SQL keywords that are not supported + * as unquoted identifiers, in addition to the SQL:2003 reserved words. + *

          + * List of keywords in H2 may depend on compatibility mode and other + * settings. + *

          + * + * @return a list of additional keywords + */ + @Override + public String getSQLKeywords() throws SQLException { + try { + debugCodeCall("getSQLKeywords"); + return meta.getSQLKeywords(); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1600,8 +1032,12 @@ public String getSQLKeywords() { */ @Override public String getNumericFunctions() throws SQLException { - debugCodeCall("getNumericFunctions"); - return getFunctions("Functions (Numeric)"); + try { + debugCodeCall("getNumericFunctions"); + return meta.getNumericFunctions(); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1611,8 +1047,12 @@ public String getNumericFunctions() throws SQLException { */ @Override public String getStringFunctions() throws SQLException { - debugCodeCall("getStringFunctions"); - return getFunctions("Functions (String)"); + try { + debugCodeCall("getStringFunctions"); + return meta.getStringFunctions(); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1622,8 +1062,12 @@ public String getStringFunctions() throws SQLException { */ @Override public String getSystemFunctions() throws SQLException { - debugCodeCall("getSystemFunctions"); - return getFunctions("Functions (System)"); + try { + debugCodeCall("getSystemFunctions"); + return meta.getSystemFunctions(); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1633,38 +1077,9 @@ public String getSystemFunctions() throws SQLException { */ @Override public String getTimeDateFunctions() throws SQLException { - debugCodeCall("getTimeDateFunctions"); - return getFunctions("Functions (Time and Date)"); - } - - private String getFunctions(String section) throws SQLException { try { - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT TOPIC " - + "FROM INFORMATION_SCHEMA.HELP WHERE SECTION = ?"); - prep.setString(1, section); - ResultSet rs = prep.executeQuery(); - StringBuilder builder = new StringBuilder(); - while (rs.next()) { - String s = rs.getString(1).trim(); - String[] array = StringUtils.arraySplit(s, ',', true); - for (String a : array) { - if (builder.length() != 0) { - builder.append(','); - } - String f = a.trim(); - int spaceIndex = f.indexOf(' '); - if (spaceIndex >= 0) { - // remove 'Function' from 'INSERT Function' - StringUtils.trimSubstring(builder, f, 0, spaceIndex); - } else { - builder.append(f); - } - } - } - rs.close(); - prep.close(); - return builder.toString(); + debugCodeCall("getTimeDateFunctions"); + return meta.getTimeDateFunctions(); } catch (Exception e) { throw logAndConvert(e); } @@ -1678,9 +1093,13 @@ private String getFunctions(String section) throws SQLException { * mode) */ @Override - public String getSearchStringEscape() { - debugCodeCall("getSearchStringEscape"); - return "\\"; + public String getSearchStringEscape() throws SQLException { + try { + debugCodeCall("getSearchStringEscape"); + return meta.getSearchStringEscape(); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1697,6 +1116,7 @@ public String getExtraNameCharacters() { /** * Returns whether alter table with add column is supported. + * * @return true */ @Override @@ -1759,7 +1179,7 @@ public boolean supportsConvert() { @Override public boolean supportsConvert(int fromType, int toType) { if (isDebugEnabled()) { - debugCode("supportsConvert("+fromType+", "+fromType+");"); + debugCode("supportsConvert(" + fromType + ", " + toType + ')'); } return true; } @@ -2169,23 +1589,23 @@ public boolean supportsCatalogsInPrivilegeDefinitions() { /** * Returns whether positioned deletes are supported. * - * @return true + * @return false */ @Override public boolean supportsPositionedDelete() { debugCodeCall("supportsPositionedDelete"); - return true; + return false; } /** * Returns whether positioned updates are supported. * - * @return true + * @return false */ @Override public boolean supportsPositionedUpdate() { debugCodeCall("supportsPositionedUpdate"); - return true; + return false; } /** @@ -2352,25 +1772,10 @@ public boolean supportsTransactions() { public boolean supportsTransactionIsolationLevel(int level) throws SQLException { debugCodeCall("supportsTransactionIsolationLevel"); switch (level) { - case Connection.TRANSACTION_READ_UNCOMMITTED: { - // Currently the combination of MV_STORE=FALSE, LOCK_MODE=0 and - // MULTI_THREADED=TRUE is not supported. Also see code in - // Database#setLockMode(int) - try (PreparedStatement prep = conn.prepareStatement( - "SELECT VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME=?")) { - // TODO skip MV_STORE check for H2 <= 1.4.197 - prep.setString(1, "MV_STORE"); - ResultSet rs = prep.executeQuery(); - if (rs.next() && Boolean.parseBoolean(rs.getString(1))) { - return true; - } - prep.setString(1, "MULTI_THREADED"); - rs = prep.executeQuery(); - return !rs.next() || !rs.getString(1).equals("1"); - } - } + case Connection.TRANSACTION_READ_UNCOMMITTED: case Connection.TRANSACTION_READ_COMMITTED: case Connection.TRANSACTION_REPEATABLE_READ: + case Constants.TRANSACTION_SNAPSHOT: case Connection.TRANSACTION_SERIALIZABLE: return true; default: @@ -2428,7 +1833,7 @@ public boolean dataDefinitionIgnoredInTransactions() { * ResultSet.TYPE_SCROLL_SENSITIVE is not supported. * * @param type the result set type - * @return true for all types except ResultSet.TYPE_FORWARD_ONLY + * @return true for all types except ResultSet.TYPE_SCROLL_SENSITIVE */ @Override public boolean supportsResultSetType(int type) { @@ -2447,7 +1852,7 @@ public boolean supportsResultSetType(int type) { @Override public boolean supportsResultSetConcurrency(int type, int concurrency) { if (isDebugEnabled()) { - debugCode("supportsResultSetConcurrency("+type+", "+concurrency+");"); + debugCode("supportsResultSetConcurrency(" + type + ", " + concurrency + ')'); } return type != ResultSet.TYPE_SCROLL_SENSITIVE; } @@ -2600,9 +2005,9 @@ public int getDefaultTransactionIsolation() { * @return true is so, false otherwise */ @Override - public boolean supportsMixedCaseIdentifiers() throws SQLException{ + public boolean supportsMixedCaseIdentifiers() throws SQLException { debugCodeCall("supportsMixedCaseIdentifiers"); - JdbcConnection.Settings settings = conn.getSettings(); + Session.StaticSettings settings = conn.getStaticSettings(); return !settings.databaseToUpper && !settings.databaseToLower && !settings.caseInsensitiveIdentifiers; } @@ -2615,7 +2020,7 @@ public boolean supportsMixedCaseIdentifiers() throws SQLException{ @Override public boolean storesUpperCaseIdentifiers() throws SQLException { debugCodeCall("storesUpperCaseIdentifiers"); - return conn.getSettings().databaseToUpper; + return conn.getStaticSettings().databaseToUpper; } /** @@ -2627,7 +2032,7 @@ public boolean storesUpperCaseIdentifiers() throws SQLException { @Override public boolean storesLowerCaseIdentifiers() throws SQLException { debugCodeCall("storesLowerCaseIdentifiers"); - return conn.getSettings().databaseToLower; + return conn.getStaticSettings().databaseToLower; } /** @@ -2639,7 +2044,7 @@ public boolean storesLowerCaseIdentifiers() throws SQLException { @Override public boolean storesMixedCaseIdentifiers() throws SQLException { debugCodeCall("storesMixedCaseIdentifiers"); - JdbcConnection.Settings settings = conn.getSettings(); + Session.StaticSettings settings = conn.getStaticSettings(); return !settings.databaseToUpper && !settings.databaseToLower && settings.caseInsensitiveIdentifiers; } @@ -2652,7 +2057,7 @@ public boolean storesMixedCaseIdentifiers() throws SQLException { @Override public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { debugCodeCall("supportsMixedCaseQuotedIdentifiers"); - return !conn.getSettings().caseInsensitiveIdentifiers; + return !conn.getStaticSettings().caseInsensitiveIdentifiers; } /** @@ -2688,7 +2093,7 @@ public boolean storesLowerCaseQuotedIdentifiers() throws SQLException { @Override public boolean storesMixedCaseQuotedIdentifiers() throws SQLException { debugCodeCall("storesMixedCaseQuotedIdentifiers"); - return conn.getSettings().caseInsensitiveIdentifiers; + return conn.getStaticSettings().caseInsensitiveIdentifiers; } /** @@ -2934,14 +2339,15 @@ public boolean supportsNamedParameters() { } /** - * Does the database support multiple open result sets. + * Does the database support multiple open result sets returned from a + * CallableStatement. * - * @return true + * @return false */ @Override public boolean supportsMultipleOpenResults() { debugCodeCall("supportsMultipleOpenResults"); - return true; + return false; } /** @@ -2959,9 +2365,16 @@ public boolean supportsGetGeneratedKeys() { * [Not supported] */ @Override - public ResultSet getSuperTypes(String catalog, String schemaPattern, - String typeNamePattern) throws SQLException { - throw unsupported("superTypes"); + public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("getSuperTypes(" + quote(catalog) + ", " + quote(schemaPattern) + ", " + + quote(typeNamePattern) + ')'); + } + return getResultSet(meta.getSuperTypes(catalog, schemaPattern, typeNamePattern)); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2982,24 +2395,14 @@ public ResultSet getSuperTypes(String catalog, String schemaPattern, * @return an empty result set */ @Override - public ResultSet getSuperTables(String catalog, String schemaPattern, - String tableNamePattern) throws SQLException { + public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) // + throws SQLException { try { if (isDebugEnabled()) { - debugCode("getSuperTables(" - +quote(catalog)+", " - +quote(schemaPattern)+", " - +quote(tableNamePattern)+");"); + debugCode("getSuperTables(" + quote(catalog) + ", " + quote(schemaPattern) + ", " + + quote(tableNamePattern) + ')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "CATALOG_NAME TABLE_CAT, " - + "CATALOG_NAME TABLE_SCHEM, " - + "CATALOG_NAME TABLE_NAME, " - + "CATALOG_NAME SUPERTABLE_NAME " - + "FROM INFORMATION_SCHEMA.CATALOGS " - + "WHERE FALSE"); - return prep.executeQuery(); + return getResultSet(meta.getSuperTables(catalog, schemaPattern, tableNamePattern)); } catch (Exception e) { throw logAndConvert(e); } @@ -3009,10 +2412,17 @@ public ResultSet getSuperTables(String catalog, String schemaPattern, * [Not supported] */ @Override - public ResultSet getAttributes(String catalog, String schemaPattern, - String typeNamePattern, String attributeNamePattern) - throws SQLException { - throw unsupported("attributes"); + public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, + String attributeNamePattern) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("getAttributes(" + quote(catalog) + ", " + quote(schemaPattern) + ", " + + quote(typeNamePattern) + ", " + quote(attributeNamePattern) + ')'); + } + return getResultSet(meta.getAttributes(catalog, schemaPattern, typeNamePattern, attributeNamePattern)); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -3045,9 +2455,13 @@ public int getResultSetHoldability() { * @return the major version */ @Override - public int getDatabaseMajorVersion() { - debugCodeCall("getDatabaseMajorVersion"); - return Constants.VERSION_MAJOR; + public int getDatabaseMajorVersion() throws SQLException { + try { + debugCodeCall("getDatabaseMajorVersion"); + return meta.getDatabaseMajorVersion(); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -3056,9 +2470,13 @@ public int getDatabaseMajorVersion() { * @return the minor version */ @Override - public int getDatabaseMinorVersion() { - debugCodeCall("getDatabaseMinorVersion"); - return Constants.VERSION_MINOR; + public int getDatabaseMinorVersion() throws SQLException { + try { + debugCodeCall("getDatabaseMinorVersion"); + return meta.getDatabaseMinorVersion(); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -3075,23 +2493,23 @@ public int getJDBCMajorVersion() { /** * Gets the minor version of the supported JDBC API. * - * @return the minor version (1) + * @return the minor version (2) */ @Override public int getJDBCMinorVersion() { debugCodeCall("getJDBCMinorVersion"); - return 1; + return 2; } /** * Gets the SQL State type. * - * @return DatabaseMetaData.sqlStateSQL99 + * @return {@link DatabaseMetaData#sqlStateSQL} */ @Override public int getSQLStateType() { debugCodeCall("getSQLStateType"); - return DatabaseMetaData.sqlStateSQL99; + return DatabaseMetaData.sqlStateSQL; } /** @@ -3122,22 +2540,6 @@ private void checkClosed() { conn.checkClosed(); } - private static String getPattern(String pattern) { - return pattern == null ? "%" : pattern; - } - - private static String getSchemaPattern(String pattern) { - return pattern == null ? "%" : pattern.isEmpty() ? - Constants.SCHEMA_MAIN : pattern; - } - - private static String getCatalogPattern(String catalogPattern) { - // Workaround for OpenOffice: getColumns is called with "" as the - // catalog - return catalogPattern == null || catalogPattern.isEmpty() ? - "%" : catalogPattern; - } - /** * Get the lifetime of a rowid. * @@ -3156,7 +2558,6 @@ public RowIdLifetime getRowIdLifetime() { *
            *
          1. TABLE_SCHEM (String) schema name
          2. *
          3. TABLE_CATALOG (String) catalog name
          4. - *
          5. IS_DEFAULT (boolean) if this is the default schema
          6. *
          * * @param catalogPattern null (to get all objects) or the catalog name @@ -3170,21 +2571,7 @@ public ResultSet getSchemas(String catalogPattern, String schemaPattern) throws SQLException { try { debugCodeCall("getSchemas(String,String)"); - checkClosed(); - PreparedStatement prep = conn - .prepareAutoCloseStatement("SELECT " - + "SCHEMA_NAME TABLE_SCHEM, " - + "CATALOG_NAME TABLE_CATALOG, " - +" IS_DEFAULT " - + "FROM INFORMATION_SCHEMA.SCHEMATA " - + "WHERE CATALOG_NAME LIKE ? ESCAPE ? " - + "AND SCHEMA_NAME LIKE ? ESCAPE ? " - + "ORDER BY SCHEMA_NAME"); - prep.setString(1, getCatalogPattern(catalogPattern)); - prep.setString(2, "\\"); - prep.setString(3, getSchemaPattern(schemaPattern)); - prep.setString(4, "\\"); - return prep.executeQuery(); + return getResultSet(meta.getSchemas(catalogPattern, schemaPattern)); } catch (Exception e) { throw logAndConvert(e); } @@ -3218,21 +2605,19 @@ public boolean autoCommitFailureClosesAllResultSets() { public ResultSet getClientInfoProperties() throws SQLException { Properties clientInfo = conn.getClientInfo(); SimpleResult result = new SimpleResult(); - result.addColumn("NAME", "NAME", TypeInfo.TYPE_STRING); - result.addColumn("MAX_LEN", "MAX_LEN", TypeInfo.TYPE_INT); - result.addColumn("DEFAULT_VALUE", "DEFAULT_VALUE", TypeInfo.TYPE_STRING); - result.addColumn("DESCRIPTION", "DESCRIPTION", TypeInfo.TYPE_STRING); + result.addColumn("NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("MAX_LEN", TypeInfo.TYPE_INTEGER); + result.addColumn("DEFAULT_VALUE", TypeInfo.TYPE_VARCHAR); + result.addColumn("DESCRIPTION", TypeInfo.TYPE_VARCHAR); // Non-standard column - result.addColumn("VALUE", "VALUE", TypeInfo.TYPE_STRING); + result.addColumn("VALUE", TypeInfo.TYPE_VARCHAR); for (Entry entry : clientInfo.entrySet()) { - result.addRow(ValueString.get((String) entry.getKey()), ValueInt.get(Integer.MAX_VALUE), - ValueString.EMPTY, ValueString.EMPTY, ValueString.get((String) entry.getValue())); + result.addRow(ValueVarchar.get((String) entry.getKey()), ValueInteger.get(Integer.MAX_VALUE), + ValueVarchar.EMPTY, ValueVarchar.EMPTY, ValueVarchar.get((String) entry.getValue())); } int id = getNextId(TraceObject.RESULT_SET); - if (isDebugEnabled()) { - debugCodeAssign("ResultSet", TraceObject.RESULT_SET, id, "getClientInfoProperties()"); - } - return new JdbcResultSet(conn, null, null, result, id, false, true, false); + debugCodeAssign("ResultSet", TraceObject.RESULT_SET, id, "getClientInfoProperties()"); + return new JdbcResultSet(conn, null, null, result, id, true, false, false); } /** @@ -3269,23 +2654,42 @@ public boolean isWrapperFor(Class iface) throws SQLException { * [Not supported] Gets the list of function columns. */ @Override - public ResultSet getFunctionColumns(String catalog, String schemaPattern, - String functionNamePattern, String columnNamePattern) - throws SQLException { - throw unsupported("getFunctionColumns"); + public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, + String columnNamePattern) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("getFunctionColumns(" + quote(catalog) + ", " + quote(schemaPattern) + ", " + + quote(functionNamePattern) + ", " + quote(columnNamePattern) + ')'); + } + return getResultSet( + meta.getFunctionColumns(catalog, schemaPattern, functionNamePattern, columnNamePattern)); + } catch (Exception e) { + throw logAndConvert(e); + } } /** * [Not supported] Gets the list of functions. */ @Override - public ResultSet getFunctions(String catalog, String schemaPattern, - String functionNamePattern) throws SQLException { - throw unsupported("getFunctions"); + public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) + throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("getFunctions(" + quote(catalog) + ", " + quote(schemaPattern) + ", " + + quote(functionNamePattern) + ')'); + } + return getResultSet(meta.getFunctions(catalog, schemaPattern, functionNamePattern)); + } catch (Exception e) { + throw logAndConvert(e); + } } /** - * [Not supported] + * Returns whether database always returns generated keys if valid names or + * indexes of columns were specified and command was completed successfully. + * + * @return true */ @Override public boolean generatedKeyAlwaysReturned() { @@ -3293,7 +2697,26 @@ public boolean generatedKeyAlwaysReturned() { } /** - * [Not supported] + * Gets the list of pseudo and invisible columns. The result set is sorted + * by TABLE_SCHEM, TABLE_NAME, and COLUMN_NAME. + * + *
            + *
          1. TABLE_CAT (String) table catalog
          2. + *
          3. TABLE_SCHEM (String) table schema
          4. + *
          5. TABLE_NAME (String) table name
          6. + *
          7. COLUMN_NAME (String) column name
          8. + *
          9. DATA_TYPE (int) data type (see java.sql.Types)
          10. + *
          11. COLUMN_SIZE (int) precision + * (values larger than 2 GB are returned as 2 GB)
          12. + *
          13. DECIMAL_DIGITS (int) scale (0 for INTEGER and VARCHAR)
          14. + *
          15. NUM_PREC_RADIX (int) radix
          16. + *
          17. COLUMN_USAGE (String) he allowed usage for the column, + * see {@link java.sql.PseudoColumnUsage}
          18. + *
          19. REMARKS (String) comment
          20. + *
          21. CHAR_OCTET_LENGTH (int) for char types the + * maximum number of bytes in the column
          22. + *
          23. IS_NULLABLE (String) "NO" or "YES"
          24. + *
          * * @param catalog null (to get all objects) or the catalog name * @param schemaPattern null (to get all objects) or a schema name @@ -3302,11 +2725,20 @@ public boolean generatedKeyAlwaysReturned() { * (uppercase for unquoted names) * @param columnNamePattern null (to get all objects) or a column name * (uppercase for unquoted names) + * @return the list of pseudo and invisible columns */ @Override - public ResultSet getPseudoColumns(String catalog, String schemaPattern, - String tableNamePattern, String columnNamePattern) { - return null; + public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("getPseudoColumns(" + quote(catalog) + ", " + quote(schemaPattern) + ", " + + quote(tableNamePattern) + ", " + quote(columnNamePattern) + ')'); + } + return getResultSet(meta.getPseudoColumns(catalog, schemaPattern, tableNamePattern, columnNamePattern)); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -3317,4 +2749,8 @@ public String toString() { return getTraceObjectName() + ": " + conn; } + private JdbcResultSet getResultSet(ResultInterface result) { + return new JdbcResultSet(conn, null, null, result, getNextId(TraceObject.RESULT_SET), true, false, false); + } + } diff --git a/h2/src/main/org/h2/jdbc/JdbcDatabaseMetaDataBackwardsCompat.java b/h2/src/main/org/h2/jdbc/JdbcDatabaseMetaDataBackwardsCompat.java index 6652ac883f..9dafb7ab58 100644 --- a/h2/src/main/org/h2/jdbc/JdbcDatabaseMetaDataBackwardsCompat.java +++ b/h2/src/main/org/h2/jdbc/JdbcDatabaseMetaDataBackwardsCompat.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, and the - * EPL 1.0 (http://h2database.com/html/license.html). Initial Developer: H2 + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, and the + * EPL 1.0 (https://h2database.com/html/license.html). Initial Developer: H2 * Group */ package org.h2.jdbc; diff --git a/h2/src/main/org/h2/jdbc/JdbcException.java b/h2/src/main/org/h2/jdbc/JdbcException.java index 2bf7998c54..4578f57454 100644 --- a/h2/src/main/org/h2/jdbc/JdbcException.java +++ b/h2/src/main/org/h2/jdbc/JdbcException.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -19,6 +19,7 @@ public interface JdbcException { /** * INTERNAL + * @return original message */ String getOriginalMessage(); @@ -34,6 +35,7 @@ public interface JdbcException { /** * INTERNAL + * @param sql to set */ void setSQL(String sql); diff --git a/h2/src/main/org/h2/jdbc/JdbcLob.java b/h2/src/main/org/h2/jdbc/JdbcLob.java index d94625e6b9..6862c1b984 100644 --- a/h2/src/main/org/h2/jdbc/JdbcLob.java +++ b/h2/src/main/org/h2/jdbc/JdbcLob.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -16,6 +16,7 @@ import org.h2.api.ErrorCode; import org.h2.message.DbException; import org.h2.message.TraceObject; +import org.h2.mvstore.DataUtils; import org.h2.util.IOUtils; import org.h2.util.Task; import org.h2.value.Value; @@ -25,7 +26,7 @@ */ public abstract class JdbcLob extends TraceObject { - final class LobPipedOutputStream extends PipedOutputStream { + static final class LobPipedOutputStream extends PipedOutputStream { private final Task task; LobPipedOutputStream(PipedInputStream snk, Task task) throws IOException { @@ -39,7 +40,7 @@ public void close() throws IOException { try { task.get(); } catch (Exception e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } } } @@ -116,6 +117,9 @@ void checkEditable() { /** * Check the state of the LOB and throws the exception when check failed * (the LOB must be set completely before read). + * + * @throws SQLException on SQL exception + * @throws IOException on I/O exception */ void checkReadable() throws SQLException, IOException { checkClosed(); @@ -147,6 +151,7 @@ public void free() { * Returns the input stream. * * @return the input stream + * @throws SQLException on failure */ InputStream getBinaryStream() throws SQLException { try { @@ -162,6 +167,7 @@ InputStream getBinaryStream() throws SQLException { * Returns the reader. * * @return the reader + * @throws SQLException on failure */ Reader getCharacterStream() throws SQLException { try { diff --git a/h2/src/main/org/h2/jdbc/JdbcParameterMetaData.java b/h2/src/main/org/h2/jdbc/JdbcParameterMetaData.java index eb02ab7cf6..febbe79dcf 100644 --- a/h2/src/main/org/h2/jdbc/JdbcParameterMetaData.java +++ b/h2/src/main/org/h2/jdbc/JdbcParameterMetaData.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -15,13 +15,14 @@ import org.h2.message.TraceObject; import org.h2.util.MathUtils; import org.h2.value.DataType; +import org.h2.value.TypeInfo; import org.h2.value.Value; +import org.h2.value.ValueToObjectConverter; /** * Information about the parameters of a prepared statement. */ -public class JdbcParameterMetaData extends TraceObject implements - ParameterMetaData { +public final class JdbcParameterMetaData extends TraceObject implements ParameterMetaData { private final JdbcPreparedStatement prep; private final int paramCount; @@ -80,12 +81,11 @@ public int getParameterMode(int param) throws SQLException { public int getParameterType(int param) throws SQLException { try { debugCodeCall("getParameterType", param); - ParameterInterface p = getParameter(param); - int type = p.getValueType(); - if (type == Value.UNKNOWN) { - type = Value.STRING; + TypeInfo type = getParameter(param).getType(); + if (type.getValueType() == Value.UNKNOWN) { + type = TypeInfo.TYPE_VARCHAR; } - return DataType.getDataType(type).sqlType; + return DataType.convertTypeToSQLType(type); } catch (Exception e) { throw logAndConvert(e); } @@ -102,8 +102,8 @@ public int getParameterType(int param) throws SQLException { public int getPrecision(int param) throws SQLException { try { debugCodeCall("getPrecision", param); - ParameterInterface p = getParameter(param); - return MathUtils.convertLongToInt(p.getPrecision()); + TypeInfo type = getParameter(param).getType(); + return type.getValueType() == Value.UNKNOWN ? 0 : MathUtils.convertLongToInt(type.getPrecision()); } catch (Exception e) { throw logAndConvert(e); } @@ -120,8 +120,8 @@ public int getPrecision(int param) throws SQLException { public int getScale(int param) throws SQLException { try { debugCodeCall("getScale", param); - ParameterInterface p = getParameter(param); - return p.getScale(); + TypeInfo type = getParameter(param).getType(); + return type.getValueType() == Value.UNKNOWN ? 0 : type.getScale(); } catch (Exception e) { throw logAndConvert(e); } @@ -173,12 +173,11 @@ public boolean isSigned(int param) throws SQLException { public String getParameterClassName(int param) throws SQLException { try { debugCodeCall("getParameterClassName", param); - ParameterInterface p = getParameter(param); - int type = p.getValueType(); + int type = getParameter(param).getType().getValueType(); if (type == Value.UNKNOWN) { - type = Value.STRING; + type = Value.VARCHAR; } - return DataType.getTypeClassName(type, false); + return ValueToObjectConverter.getDefaultClass(type, true).getName(); } catch (Exception e) { throw logAndConvert(e); } @@ -195,12 +194,11 @@ public String getParameterClassName(int param) throws SQLException { public String getParameterTypeName(int param) throws SQLException { try { debugCodeCall("getParameterTypeName", param); - ParameterInterface p = getParameter(param); - int type = p.getValueType(); - if (type == Value.UNKNOWN) { - type = Value.STRING; + TypeInfo type = getParameter(param).getType(); + if (type.getValueType() == Value.UNKNOWN) { + type = TypeInfo.TYPE_VARCHAR; } - return DataType.getDataType(type).name; + return type.getDeclaredTypeName(); } catch (Exception e) { throw logAndConvert(e); } diff --git a/h2/src/main/org/h2/jdbc/JdbcPreparedStatement.java b/h2/src/main/org/h2/jdbc/JdbcPreparedStatement.java index 1ab8dba2ff..9533d97c3b 100644 --- a/h2/src/main/org/h2/jdbc/JdbcPreparedStatement.java +++ b/h2/src/main/org/h2/jdbc/JdbcPreparedStatement.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -20,6 +20,7 @@ import java.sql.ResultSetMetaData; import java.sql.RowId; import java.sql.SQLException; +import java.sql.SQLType; import java.sql.SQLXML; import java.sql.Statement; import java.util.ArrayList; @@ -34,46 +35,60 @@ import org.h2.result.MergedResult; import org.h2.result.ResultInterface; import org.h2.result.ResultWithGeneratedKeys; -import org.h2.util.DateTimeUtils; import org.h2.util.IOUtils; +import org.h2.util.LegacyDateTimeUtils; import org.h2.util.Utils; import org.h2.value.DataType; import org.h2.value.Value; +import org.h2.value.ValueBigint; import org.h2.value.ValueBoolean; -import org.h2.value.ValueByte; -import org.h2.value.ValueBytes; -import org.h2.value.ValueDate; -import org.h2.value.ValueDecimal; import org.h2.value.ValueDouble; -import org.h2.value.ValueFloat; -import org.h2.value.ValueInt; -import org.h2.value.ValueLong; +import org.h2.value.ValueInteger; import org.h2.value.ValueNull; -import org.h2.value.ValueShort; -import org.h2.value.ValueString; -import org.h2.value.ValueTime; -import org.h2.value.ValueTimestamp; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueReal; +import org.h2.value.ValueSmallint; +import org.h2.value.ValueTinyint; +import org.h2.value.ValueToObjectConverter; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; /** * Represents a prepared statement. + *

          + * Thread safety: the prepared statement is not thread-safe. If the same + * prepared statement is used by multiple threads access to it must be + * synchronized. The single synchronized block must include assignment of + * parameters, execution of the command and all operations with its result. + *

          + *
          + * synchronized (prep) {
          + *     prep.setInt(1, 10);
          + *     try (ResultSet rs = prep.executeQuery()) {
          + *         while (rs.next) {
          + *             // Do something
          + *         }
          + *     }
          + * }
          + * synchronized (prep) {
          + *     prep.setInt(1, 15);
          + *     updateCount = prep.executeUpdate();
          + * }
          + * 
          */ -public class JdbcPreparedStatement extends JdbcStatement implements - PreparedStatement, JdbcPreparedStatementBackwardsCompat { +public class JdbcPreparedStatement extends JdbcStatement implements PreparedStatement { protected CommandInterface command; - private final String sqlStatement; private ArrayList batchParameters; private MergedResult batchIdentities; private HashMap cachedColumnLabelMap; private final Object generatedKeysRequest; - JdbcPreparedStatement(JdbcConnection conn, String sql, int id, - int resultSetType, int resultSetConcurrency, - boolean closeWithResultSet, Object generatedKeysRequest) { - super(conn, id, resultSetType, resultSetConcurrency, closeWithResultSet); - this.generatedKeysRequest = conn.scopeGeneratedKeys() ? false : generatedKeysRequest; + JdbcPreparedStatement(JdbcConnection conn, String sql, int id, int resultSetType, int resultSetConcurrency, + Object generatedKeysRequest) { + super(conn, id, resultSetType, resultSetConcurrency); + this.generatedKeysRequest = generatedKeysRequest; setTrace(session.getTrace(), TraceObject.PREPARED_STATEMENT, id); - this.sqlStatement = sql; command = conn.prepareCommand(sql, fetchSize); } @@ -99,9 +114,7 @@ void setCachedColumnLabelMap(HashMap cachedColumnLabelMap) { public ResultSet executeQuery() throws SQLException { try { int id = getNextId(TraceObject.RESULT_SET); - if (isDebugEnabled()) { - debugCodeAssign("ResultSet", TraceObject.RESULT_SET, id, "executeQuery()"); - } + debugCodeAssign("ResultSet", TraceObject.RESULT_SET, id, "executeQuery()"); batchIdentities = null; synchronized (session) { checkClosed(); @@ -119,8 +132,8 @@ public ResultSet executeQuery() throws SQLException { setExecutingStatement(null); } } - resultSet = new JdbcResultSet(conn, this, command, result, id, - closedByResultSet, scrollable, updatable, cachedColumnLabelMap); + resultSet = new JdbcResultSet(conn, this, command, result, id, scrollable, updatable, + cachedColumnLabelMap); } return resultSet; } catch (Exception e) { @@ -139,22 +152,22 @@ public ResultSet executeQuery() throws SQLException { * throw an exception, the current transaction (if any) is committed after * executing the statement. * - * @return the update count (number of row affected by an insert, update or - * delete, or 0 if no rows or the statement was a create, drop, - * commit or rollback) + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returns nothing, or + * {@link #SUCCESS_NO_INFO} if number of rows is too large for + * {@code int} data type) * @throws SQLException if this object is closed or invalid + * @see #executeLargeUpdate() */ @Override public int executeUpdate() throws SQLException { try { debugCodeCall("executeUpdate"); - checkClosedForWrite(); + checkClosed(); batchIdentities = null; - try { - return executeUpdateInternal(); - } finally { - afterWriting(); - } + long updateCount = executeUpdateInternal(); + return updateCount <= Integer.MAX_VALUE ? (int) updateCount : SUCCESS_NO_INFO; } catch (Exception e) { throw logAndConvert(e); } @@ -171,28 +184,24 @@ public int executeUpdate() throws SQLException { * throw an exception, the current transaction (if any) is committed after * executing the statement. * - * @return the update count (number of row affected by an insert, update or - * delete, or 0 if no rows or the statement was a create, drop, - * commit or rollback) + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returns nothing) * @throws SQLException if this object is closed or invalid */ @Override public long executeLargeUpdate() throws SQLException { try { debugCodeCall("executeLargeUpdate"); - checkClosedForWrite(); + checkClosed(); batchIdentities = null; - try { - return executeUpdateInternal(); - } finally { - afterWriting(); - } + return executeUpdateInternal(); } catch (Exception e) { throw logAndConvert(e); } } - private int executeUpdateInternal() throws SQLException { + private long executeUpdateInternal() { closeOldResultSet(); synchronized (session) { try { @@ -202,8 +211,7 @@ private int executeUpdateInternal() throws SQLException { ResultInterface gk = result.getGeneratedKeys(); if (gk != null) { int id = getNextId(TraceObject.RESULT_SET); - generatedKeys = new JdbcResultSet(conn, this, command, gk, id, - false, true, false); + generatedKeys = new JdbcResultSet(conn, this, command, gk, id, true, false, false); } } finally { setExecutingStatement(null); @@ -225,46 +233,38 @@ private int executeUpdateInternal() throws SQLException { public boolean execute() throws SQLException { try { int id = getNextId(TraceObject.RESULT_SET); - if (isDebugEnabled()) { - debugCodeCall("execute"); - } - checkClosedForWrite(); - try { - boolean returnsResultSet; - synchronized (conn.getSession()) { - closeOldResultSet(); - boolean lazy = false; - try { - setExecutingStatement(command); - if (command.isQuery()) { - returnsResultSet = true; - boolean scrollable = resultSetType != ResultSet.TYPE_FORWARD_ONLY; - boolean updatable = resultSetConcurrency == ResultSet.CONCUR_UPDATABLE; - ResultInterface result = command.executeQuery(maxRows, scrollable); - lazy = result.isLazy(); - resultSet = new JdbcResultSet(conn, this, command, result, - id, closedByResultSet, scrollable, - updatable, cachedColumnLabelMap); - } else { - returnsResultSet = false; - ResultWithGeneratedKeys result = command.executeUpdate(generatedKeysRequest); - updateCount = result.getUpdateCount(); - ResultInterface gk = result.getGeneratedKeys(); - if (gk != null) { - generatedKeys = new JdbcResultSet(conn, this, command, gk, id, - false, true, false); - } - } - } finally { - if (!lazy) { - setExecutingStatement(null); + debugCodeCall("execute"); + checkClosed(); + boolean returnsResultSet; + synchronized (session) { + closeOldResultSet(); + boolean lazy = false; + try { + setExecutingStatement(command); + if (command.isQuery()) { + returnsResultSet = true; + boolean scrollable = resultSetType != ResultSet.TYPE_FORWARD_ONLY; + boolean updatable = resultSetConcurrency == ResultSet.CONCUR_UPDATABLE; + ResultInterface result = command.executeQuery(maxRows, scrollable); + lazy = result.isLazy(); + resultSet = new JdbcResultSet(conn, this, command, result, id, scrollable, updatable, + cachedColumnLabelMap); + } else { + returnsResultSet = false; + ResultWithGeneratedKeys result = command.executeUpdate(generatedKeysRequest); + updateCount = result.getUpdateCount(); + ResultInterface gk = result.getGeneratedKeys(); + if (gk != null) { + generatedKeys = new JdbcResultSet(conn, this, command, gk, id, true, false, false); } } + } finally { + if (!lazy) { + setExecutingStatement(null); + } } - return returnsResultSet; - } finally { - afterWriting(); } + return returnsResultSet; } catch (Throwable e) { throw logAndConvert(e); } @@ -322,54 +322,6 @@ public void addBatch(String sql) throws SQLException { } } - /** - * Calling this method is not legal on a PreparedStatement. - * - * @param sql ignored - * @throws SQLException Unsupported Feature - */ - @Override - public int executeUpdate(String sql) throws SQLException { - try { - debugCodeCall("executeUpdate", sql); - throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); - } catch (Exception e) { - throw logAndConvert(e); - } - } - - /** - * Calling this method is not legal on a PreparedStatement. - * - * @param sql ignored - * @throws SQLException Unsupported Feature - */ - @Override - public long executeLargeUpdate(String sql) throws SQLException { - try { - debugCodeCall("executeLargeUpdate", sql); - throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); - } catch (Exception e) { - throw logAndConvert(e); - } - } - - /** - * Calling this method is not legal on a PreparedStatement. - * - * @param sql ignored - * @throws SQLException Unsupported Feature - */ - @Override - public boolean execute(String sql) throws SQLException { - try { - debugCodeCall("execute", sql); - throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); - } catch (Exception e) { - throw logAndConvert(e); - } - } - // ============================================================= /** @@ -383,7 +335,7 @@ public boolean execute(String sql) throws SQLException { public void setNull(int parameterIndex, int sqlType) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setNull("+parameterIndex+", "+sqlType+");"); + debugCode("setNull(" + parameterIndex + ", " + sqlType + ')'); } setParameter(parameterIndex, ValueNull.INSTANCE); } catch (Exception e) { @@ -402,9 +354,9 @@ public void setNull(int parameterIndex, int sqlType) throws SQLException { public void setInt(int parameterIndex, int x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setInt("+parameterIndex+", "+x+");"); + debugCode("setInt(" + parameterIndex + ", " + x + ')'); } - setParameter(parameterIndex, ValueInt.get(x)); + setParameter(parameterIndex, ValueInteger.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -421,10 +373,9 @@ public void setInt(int parameterIndex, int x) throws SQLException { public void setString(int parameterIndex, String x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setString("+parameterIndex+", "+quote(x)+");"); + debugCode("setString(" + parameterIndex + ", " + quote(x) + ')'); } - Value v = x == null ? (Value) ValueNull.INSTANCE : ValueString.get(x); - setParameter(parameterIndex, v); + setParameter(parameterIndex, x == null ? ValueNull.INSTANCE : ValueVarchar.get(x, conn)); } catch (Exception e) { throw logAndConvert(e); } @@ -438,14 +389,12 @@ public void setString(int parameterIndex, String x) throws SQLException { * @throws SQLException if this object is closed */ @Override - public void setBigDecimal(int parameterIndex, BigDecimal x) - throws SQLException { + public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setBigDecimal("+parameterIndex+", " + quoteBigDecimal(x) + ");"); + debugCode("setBigDecimal(" + parameterIndex + ", " + quoteBigDecimal(x) + ')'); } - Value v = x == null ? (Value) ValueNull.INSTANCE : ValueDecimal.get(x); - setParameter(parameterIndex, v); + setParameter(parameterIndex, x == null ? ValueNull.INSTANCE : ValueNumeric.getAnyScale(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -453,20 +402,24 @@ public void setBigDecimal(int parameterIndex, BigDecimal x) /** * Sets the value of a parameter. + *

          + * Usage of this method is discouraged. Use + * {@code setObject(parameterIndex, value)} with {@link java.time.LocalDate} + * parameter instead. + *

          * * @param parameterIndex the parameter index (1, 2, ...) * @param x the value * @throws SQLException if this object is closed + * @see #setObject(int, Object) */ @Override - public void setDate(int parameterIndex, java.sql.Date x) - throws SQLException { + public void setDate(int parameterIndex, java.sql.Date x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setDate("+parameterIndex+", " + quoteDate(x) + ");"); + debugCode("setDate(" + parameterIndex + ", " + quoteDate(x) + ')'); } - Value v = x == null ? (Value) ValueNull.INSTANCE : ValueDate.get(x); - setParameter(parameterIndex, v); + setParameter(parameterIndex, x == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromDate(conn, null, x)); } catch (Exception e) { throw logAndConvert(e); } @@ -474,20 +427,24 @@ public void setDate(int parameterIndex, java.sql.Date x) /** * Sets the value of a parameter. + *

          + * Usage of this method is discouraged. Use + * {@code setObject(parameterIndex, value)} with {@link java.time.LocalTime} + * parameter instead. + *

          * * @param parameterIndex the parameter index (1, 2, ...) * @param x the value * @throws SQLException if this object is closed + * @see #setObject(int, Object) */ @Override - public void setTime(int parameterIndex, java.sql.Time x) - throws SQLException { + public void setTime(int parameterIndex, java.sql.Time x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setTime("+parameterIndex+", " + quoteTime(x) + ");"); + debugCode("setTime(" + parameterIndex + ", " + quoteTime(x) + ')'); } - Value v = x == null ? (Value) ValueNull.INSTANCE : ValueTime.get(x); - setParameter(parameterIndex, v); + setParameter(parameterIndex, x == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromTime(conn, null, x)); } catch (Exception e) { throw logAndConvert(e); } @@ -495,20 +452,25 @@ public void setTime(int parameterIndex, java.sql.Time x) /** * Sets the value of a parameter. + *

          + * Usage of this method is discouraged. Use + * {@code setObject(parameterIndex, value)} with + * {@link java.time.LocalDateTime} parameter instead. + *

          * * @param parameterIndex the parameter index (1, 2, ...) * @param x the value * @throws SQLException if this object is closed + * @see #setObject(int, Object) */ @Override - public void setTimestamp(int parameterIndex, java.sql.Timestamp x) - throws SQLException { + public void setTimestamp(int parameterIndex, java.sql.Timestamp x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setTimestamp("+parameterIndex+", " + quoteTimestamp(x) + ");"); + debugCode("setTimestamp(" + parameterIndex + ", " + quoteTimestamp(x) + ')'); } - Value v = x == null ? (Value) ValueNull.INSTANCE : ValueTimestamp.get(x); - setParameter(parameterIndex, v); + setParameter(parameterIndex, + x == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromTimestamp(conn, null, x)); } catch (Exception e) { throw logAndConvert(e); } @@ -526,14 +488,12 @@ public void setTimestamp(int parameterIndex, java.sql.Timestamp x) public void setObject(int parameterIndex, Object x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setObject("+parameterIndex+", x);"); + debugCode("setObject(" + parameterIndex + ", x)"); } if (x == null) { - // throw Errors.getInvalidValueException("null", "x"); setParameter(parameterIndex, ValueNull.INSTANCE); } else { - setParameter(parameterIndex, - DataType.convertToValue(session, x, Value.UNKNOWN)); + setParameter(parameterIndex, ValueToObjectConverter.objectToValue(session, x, Value.UNKNOWN)); } } catch (Exception e) { throw logAndConvert(e); @@ -555,15 +515,9 @@ public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setObject("+parameterIndex+", x, "+targetSqlType+");"); - } - int type = DataType.convertSQLTypeToValueType(targetSqlType); - if (x == null) { - setParameter(parameterIndex, ValueNull.INSTANCE); - } else { - Value v = DataType.convertToValue(conn.getSession(), x, type); - setParameter(parameterIndex, v.convertTo(type, conn.getMode())); + debugCode("setObject(" + parameterIndex + ", x, " + targetSqlType + ')'); } + setObjectWithType(parameterIndex, x, DataType.convertSQLTypeToValueType(targetSqlType)); } catch (Exception e) { throw logAndConvert(e); } @@ -585,14 +539,72 @@ public void setObject(int parameterIndex, Object x, int targetSqlType, int scale) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setObject("+parameterIndex+", x, "+targetSqlType+", "+scale+");"); + debugCode("setObject(" + parameterIndex + ", x, " + targetSqlType + ", " + scale + ')'); + } + setObjectWithType(parameterIndex, x, DataType.convertSQLTypeToValueType(targetSqlType)); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + /** + * Sets the value of a parameter. The object is converted, if required, to + * the specified data type before sending to the database. + * Objects of unknown classes are serialized (on the client side). + * + * @param parameterIndex the parameter index (1, 2, ...) + * @param x the value, null is allowed + * @param targetSqlType the SQL type + * @throws SQLException if this object is closed + */ + @Override + public void setObject(int parameterIndex, Object x, SQLType targetSqlType) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("setObject(" + parameterIndex + ", x, " + DataType.sqlTypeToString(targetSqlType) + ')'); } - setObject(parameterIndex, x, targetSqlType); + setObjectWithType(parameterIndex, x, DataType.convertSQLTypeToValueType(targetSqlType)); } catch (Exception e) { throw logAndConvert(e); } } + /** + * Sets the value of a parameter. The object is converted, if required, to + * the specified data type before sending to the database. + * Objects of unknown classes are serialized (on the client side). + * + * @param parameterIndex the parameter index (1, 2, ...) + * @param x the value, null is allowed + * @param targetSqlType the SQL type + * @param scaleOrLength is ignored + * @throws SQLException if this object is closed + */ + @Override + public void setObject(int parameterIndex, Object x, SQLType targetSqlType, int scaleOrLength) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("setObject(" + parameterIndex + ", x, " + DataType.sqlTypeToString(targetSqlType) + ", " + + scaleOrLength + ')'); + } + setObjectWithType(parameterIndex, x, DataType.convertSQLTypeToValueType(targetSqlType)); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + private void setObjectWithType(int parameterIndex, Object x, int type) { + if (x == null) { + setParameter(parameterIndex, ValueNull.INSTANCE); + } else { + Value v = ValueToObjectConverter.objectToValue(conn.getSession(), x, type); + if (type != Value.UNKNOWN) { + v = v.convertTo(type, conn); + } + setParameter(parameterIndex, v); + } + } + /** * Sets the value of a parameter. * @@ -604,7 +616,7 @@ public void setObject(int parameterIndex, Object x, int targetSqlType, public void setBoolean(int parameterIndex, boolean x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setBoolean("+parameterIndex+", "+x+");"); + debugCode("setBoolean(" + parameterIndex + ", " + x + ')'); } setParameter(parameterIndex, ValueBoolean.get(x)); } catch (Exception e) { @@ -623,9 +635,9 @@ public void setBoolean(int parameterIndex, boolean x) throws SQLException { public void setByte(int parameterIndex, byte x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setByte("+parameterIndex+", "+x+");"); + debugCode("setByte(" + parameterIndex + ", " + x + ')'); } - setParameter(parameterIndex, ValueByte.get(x)); + setParameter(parameterIndex, ValueTinyint.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -642,9 +654,9 @@ public void setByte(int parameterIndex, byte x) throws SQLException { public void setShort(int parameterIndex, short x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setShort("+parameterIndex+", (short) "+x+");"); + debugCode("setShort(" + parameterIndex + ", (short) " + x + ')'); } - setParameter(parameterIndex, ValueShort.get(x)); + setParameter(parameterIndex, ValueSmallint.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -661,9 +673,9 @@ public void setShort(int parameterIndex, short x) throws SQLException { public void setLong(int parameterIndex, long x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setLong("+parameterIndex+", "+x+"L);"); + debugCode("setLong(" + parameterIndex + ", " + x + "L)"); } - setParameter(parameterIndex, ValueLong.get(x)); + setParameter(parameterIndex, ValueBigint.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -680,9 +692,9 @@ public void setLong(int parameterIndex, long x) throws SQLException { public void setFloat(int parameterIndex, float x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setFloat("+parameterIndex+", "+x+"f);"); + debugCode("setFloat(" + parameterIndex + ", " + x + "f)"); } - setParameter(parameterIndex, ValueFloat.get(x)); + setParameter(parameterIndex, ValueReal.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -699,7 +711,7 @@ public void setFloat(int parameterIndex, float x) throws SQLException { public void setDouble(int parameterIndex, double x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setDouble("+parameterIndex+", "+x+"d);"); + debugCode("setDouble(" + parameterIndex + ", " + x + "d)"); } setParameter(parameterIndex, ValueDouble.get(x)); } catch (Exception e) { @@ -718,24 +730,29 @@ public void setRef(int parameterIndex, Ref x) throws SQLException { /** * Sets the date using a specified time zone. The value will be converted to * the local time zone. + *

          + * Usage of this method is discouraged. Use + * {@code setObject(parameterIndex, value)} with {@link java.time.LocalDate} + * parameter instead. + *

          * * @param parameterIndex the parameter index (1, 2, ...) * @param x the value * @param calendar the calendar * @throws SQLException if this object is closed + * @see #setObject(int, Object) */ @Override - public void setDate(int parameterIndex, java.sql.Date x, Calendar calendar) - throws SQLException { + public void setDate(int parameterIndex, java.sql.Date x, Calendar calendar) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setDate("+parameterIndex+", " + quoteDate(x) + ", calendar);"); + debugCode("setDate(" + parameterIndex + ", " + quoteDate(x) + ", calendar)"); } if (x == null) { setParameter(parameterIndex, ValueNull.INSTANCE); } else { setParameter(parameterIndex, - calendar != null ? DateTimeUtils.convertDate(x, calendar) : ValueDate.get(x)); + LegacyDateTimeUtils.fromDate(conn, calendar != null ? calendar.getTimeZone() : null, x)); } } catch (Exception e) { throw logAndConvert(e); @@ -745,24 +762,29 @@ public void setDate(int parameterIndex, java.sql.Date x, Calendar calendar) /** * Sets the time using a specified time zone. The value will be converted to * the local time zone. + *

          + * Usage of this method is discouraged. Use + * {@code setObject(parameterIndex, value)} with {@link java.time.LocalTime} + * parameter instead. + *

          * * @param parameterIndex the parameter index (1, 2, ...) * @param x the value * @param calendar the calendar * @throws SQLException if this object is closed + * @see #setObject(int, Object) */ @Override - public void setTime(int parameterIndex, java.sql.Time x, Calendar calendar) - throws SQLException { + public void setTime(int parameterIndex, java.sql.Time x, Calendar calendar) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setTime("+parameterIndex+", " + quoteTime(x) + ", calendar);"); + debugCode("setTime(" + parameterIndex + ", " + quoteTime(x) + ", calendar)"); } if (x == null) { setParameter(parameterIndex, ValueNull.INSTANCE); } else { setParameter(parameterIndex, - calendar != null ? DateTimeUtils.convertTime(x, calendar) : ValueTime.get(x)); + LegacyDateTimeUtils.fromTime(conn, calendar != null ? calendar.getTimeZone() : null, x)); } } catch (Exception e) { throw logAndConvert(e); @@ -772,25 +794,29 @@ public void setTime(int parameterIndex, java.sql.Time x, Calendar calendar) /** * Sets the timestamp using a specified time zone. The value will be * converted to the local time zone. + *

          + * Usage of this method is discouraged. Use + * {@code setObject(parameterIndex, value)} with + * {@link java.time.LocalDateTime} parameter instead. + *

          * * @param parameterIndex the parameter index (1, 2, ...) * @param x the value * @param calendar the calendar * @throws SQLException if this object is closed + * @see #setObject(int, Object) */ @Override - public void setTimestamp(int parameterIndex, java.sql.Timestamp x, - Calendar calendar) throws SQLException { + public void setTimestamp(int parameterIndex, java.sql.Timestamp x, Calendar calendar) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setTimestamp(" + parameterIndex + ", " + - quoteTimestamp(x) + ", calendar);"); + debugCode("setTimestamp(" + parameterIndex + ", " + quoteTimestamp(x) + ", calendar)"); } if (x == null) { setParameter(parameterIndex, ValueNull.INSTANCE); } else { setParameter(parameterIndex, - calendar != null ? DateTimeUtils.convertTimestamp(x, calendar) : ValueTimestamp.get(x)); + LegacyDateTimeUtils.fromTimestamp(conn, calendar != null ? calendar.getTimeZone() : null, x)); } } catch (Exception e) { throw logAndConvert(e); @@ -822,7 +848,7 @@ public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setNull("+parameterIndex+", "+sqlType+", "+quote(typeName)+");"); + debugCode("setNull(" + parameterIndex + ", " + sqlType + ", " + quote(typeName) + ')'); } setNull(parameterIndex, sqlType); } catch (Exception e) { @@ -841,20 +867,16 @@ public void setNull(int parameterIndex, int sqlType, String typeName) public void setBlob(int parameterIndex, Blob x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setBlob("+parameterIndex+", x);"); + debugCode("setBlob(" + parameterIndex + ", x)"); } - checkClosedForWrite(); - try { - Value v; - if (x == null) { - v = ValueNull.INSTANCE; - } else { - v = conn.createBlob(x.getBinaryStream(), -1); - } - setParameter(parameterIndex, v); - } finally { - afterWriting(); + checkClosed(); + Value v; + if (x == null) { + v = ValueNull.INSTANCE; + } else { + v = conn.createBlob(x.getBinaryStream(), -1); } + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -873,15 +895,11 @@ public void setBlob(int parameterIndex, Blob x) throws SQLException { public void setBlob(int parameterIndex, InputStream x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setBlob("+parameterIndex+", x);"); - } - checkClosedForWrite(); - try { - Value v = conn.createBlob(x, -1); - setParameter(parameterIndex, v); - } finally { - afterWriting(); + debugCode("setBlob(" + parameterIndex + ", x)"); } + checkClosed(); + Value v = conn.createBlob(x, -1); + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -898,20 +916,16 @@ public void setBlob(int parameterIndex, InputStream x) throws SQLException { public void setClob(int parameterIndex, Clob x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setClob("+parameterIndex+", x);"); + debugCode("setClob(" + parameterIndex + ", x)"); } - checkClosedForWrite(); - try { - Value v; - if (x == null) { - v = ValueNull.INSTANCE; - } else { - v = conn.createClob(x.getCharacterStream(), -1); - } - setParameter(parameterIndex, v); - } finally { - afterWriting(); + checkClosed(); + Value v; + if (x == null) { + v = ValueNull.INSTANCE; + } else { + v = conn.createClob(x.getCharacterStream(), -1); } + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -930,20 +944,16 @@ public void setClob(int parameterIndex, Clob x) throws SQLException { public void setClob(int parameterIndex, Reader x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setClob("+parameterIndex+", x);"); + debugCode("setClob(" + parameterIndex + ", x)"); } - checkClosedForWrite(); - try { - Value v; - if (x == null) { - v = ValueNull.INSTANCE; - } else { - v = conn.createClob(x, -1); - } - setParameter(parameterIndex, v); - } finally { - afterWriting(); + checkClosed(); + Value v; + if (x == null) { + v = ValueNull.INSTANCE; + } else { + v = conn.createClob(x, -1); } + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -960,14 +970,14 @@ public void setClob(int parameterIndex, Reader x) throws SQLException { public void setArray(int parameterIndex, Array x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setArray("+parameterIndex+", x);"); + debugCode("setArray(" + parameterIndex + ", x)"); } checkClosed(); Value v; if (x == null) { v = ValueNull.INSTANCE; } else { - v = DataType.convertToValue(session, x.getArray(), Value.ARRAY); + v = ValueToObjectConverter.objectToValue(session, x.getArray(), Value.ARRAY); } setParameter(parameterIndex, v); } catch (Exception e) { @@ -986,10 +996,9 @@ public void setArray(int parameterIndex, Array x) throws SQLException { public void setBytes(int parameterIndex, byte[] x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setBytes("+parameterIndex+", "+quoteBytes(x)+");"); + debugCode("setBytes(" + parameterIndex + ", " + quoteBytes(x) + ')'); } - Value v = x == null ? (Value) ValueNull.INSTANCE : ValueBytes.get(x); - setParameter(parameterIndex, v); + setParameter(parameterIndex, x == null ? ValueNull.INSTANCE : ValueVarbinary.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1010,15 +1019,11 @@ public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setBinaryStream("+parameterIndex+", x, "+length+"L);"); - } - checkClosedForWrite(); - try { - Value v = conn.createBlob(x, length); - setParameter(parameterIndex, v); - } finally { - afterWriting(); + debugCode("setBinaryStream(" + parameterIndex + ", x, " + length + "L)"); } + checkClosed(); + Value v = conn.createBlob(x, length); + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -1086,15 +1091,11 @@ public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setAsciiStream("+parameterIndex+", x, "+length+"L);"); - } - checkClosedForWrite(); - try { - Value v = conn.createClob(IOUtils.getAsciiReader(x), length); - setParameter(parameterIndex, v); - } finally { - afterWriting(); + debugCode("setAsciiStream(" + parameterIndex + ", x, " + length + "L)"); } + checkClosed(); + Value v = conn.createClob(IOUtils.getAsciiReader(x), length); + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -1161,15 +1162,11 @@ public void setCharacterStream(int parameterIndex, Reader x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setCharacterStream("+parameterIndex+", x, "+length+"L);"); - } - checkClosedForWrite(); - try { - Value v = conn.createClob(x, length); - setParameter(parameterIndex, v); - } finally { - afterWriting(); + debugCode("setCharacterStream(" + parameterIndex + ", x, " + length + "L)"); } + checkClosed(); + Value v = conn.createClob(x, length); + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -1200,13 +1197,9 @@ public ResultSetMetaData getMetaData() throws SQLException { return null; } int id = getNextId(TraceObject.RESULT_SET_META_DATA); - if (isDebugEnabled()) { - debugCodeAssign("ResultSetMetaData", - TraceObject.RESULT_SET_META_DATA, id, "getMetaData()"); - } + debugCodeAssign("ResultSetMetaData", TraceObject.RESULT_SET_META_DATA, id, "getMetaData()"); String catalog = conn.getCatalog(); - return new JdbcResultSetMetaData( - null, this, result, catalog, session.getTrace(), id); + return new JdbcResultSetMetaData(null, this, result, catalog, session.getTrace(), id); } catch (Exception e) { throw logAndConvert(e); } @@ -1251,6 +1244,7 @@ public void close() throws SQLException { * If one of the batched statements fails, this database will continue. * * @return the array of update counts + * @see #executeLargeBatch() */ @Override public int[] executeBatch() throws SQLException { @@ -1258,285 +1252,117 @@ public int[] executeBatch() throws SQLException { debugCodeCall("executeBatch"); if (batchParameters == null) { // Empty batch is allowed, see JDK-4639504 and other issues - batchParameters = Utils.newSmallArrayList(); + batchParameters = new ArrayList<>(); } batchIdentities = new MergedResult(); int size = batchParameters.size(); int[] result = new int[size]; - boolean error = false; - SQLException next = null; - checkClosedForWrite(); - try { - for (int i = 0; i < size; i++) { - Value[] set = batchParameters.get(i); - ArrayList parameters = - command.getParameters(); - for (int j = 0; j < set.length; j++) { - Value value = set[j]; - ParameterInterface param = parameters.get(j); - param.setValue(value, false); - } - try { - result[i] = executeUpdateInternal(); - // Cannot use own implementation, it returns batch identities - ResultSet rs = super.getGeneratedKeys(); - batchIdentities.add(((JdbcResultSet) rs).result); - } catch (Exception re) { - SQLException e = logAndConvert(re); - if (next == null) { - next = e; - } else { - e.setNextException(next); - next = e; - } - result[i] = Statement.EXECUTE_FAILED; - error = true; - } - } - batchParameters = null; - if (error) { - throw new JdbcBatchUpdateException(next, result); - } - return result; - } finally { - afterWriting(); - } - } catch (Exception e) { - throw logAndConvert(e); - } - } - - @Override - public ResultSet getGeneratedKeys() throws SQLException { - if (batchIdentities != null) { - try { - int id = getNextId(TraceObject.RESULT_SET); - if (isDebugEnabled()) { - debugCodeAssign("ResultSet", TraceObject.RESULT_SET, id, "getGeneratedKeys()"); - } - checkClosed(); - generatedKeys = new JdbcResultSet(conn, this, null, batchIdentities.getResult(), id, false, true, - false); - } catch (Exception e) { - throw logAndConvert(e); - } - } - return super.getGeneratedKeys(); - } - - /** - * Adds the current settings to the batch. - */ - @Override - public void addBatch() throws SQLException { - try { - debugCodeCall("addBatch"); - checkClosedForWrite(); - try { - ArrayList parameters = - command.getParameters(); - int size = parameters.size(); - Value[] set = new Value[size]; - for (int i = 0; i < size; i++) { - ParameterInterface param = parameters.get(i); - param.checkSet(); - Value value = param.getParamValue(); - set[i] = value; - } - if (batchParameters == null) { - batchParameters = Utils.newSmallArrayList(); - } - batchParameters.add(set); - } finally { - afterWriting(); + SQLException exception = new SQLException(); + checkClosed(); + for (int i = 0; i < size; i++) { + long updateCount = executeBatchElement(batchParameters.get(i), exception); + result[i] = updateCount <= Integer.MAX_VALUE ? (int) updateCount : SUCCESS_NO_INFO; } - } catch (Exception e) { - throw logAndConvert(e); - } - } - - /** - * Calling this method is not legal on a PreparedStatement. - * - * @param sql ignored - * @param autoGeneratedKeys ignored - * @throws SQLException Unsupported Feature - */ - @Override - public int executeUpdate(String sql, int autoGeneratedKeys) - throws SQLException { - try { - if (isDebugEnabled()) { - debugCode("executeUpdate("+quote(sql)+", "+autoGeneratedKeys+");"); + batchParameters = null; + exception = exception.getNextException(); + if (exception != null) { + throw new JdbcBatchUpdateException(exception, result); } - throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); + return result; } catch (Exception e) { throw logAndConvert(e); } } /** - * Calling this method is not legal on a PreparedStatement. + * Executes the batch. + * If one of the batched statements fails, this database will continue. * - * @param sql ignored - * @param autoGeneratedKeys ignored - * @throws SQLException Unsupported Feature + * @return the array of update counts */ @Override - public long executeLargeUpdate(String sql, int autoGeneratedKeys) - throws SQLException { + public long[] executeLargeBatch() throws SQLException { try { - if (isDebugEnabled()) { - debugCode("executeLargeUpdate("+quote(sql)+", "+autoGeneratedKeys+");"); + debugCodeCall("executeLargeBatch"); + if (batchParameters == null) { + // Empty batch is allowed, see JDK-4639504 and other issues + batchParameters = new ArrayList<>(); } - throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); - } catch (Exception e) { - throw logAndConvert(e); - } - } - - /** - * Calling this method is not legal on a PreparedStatement. - * - * @param sql ignored - * @param columnIndexes ignored - * @throws SQLException Unsupported Feature - */ - @Override - public int executeUpdate(String sql, int[] columnIndexes) - throws SQLException { - try { - if (isDebugEnabled()) { - debugCode("executeUpdate(" + quote(sql) + ", " + - quoteIntArray(columnIndexes) + ");"); + batchIdentities = new MergedResult(); + int size = batchParameters.size(); + long[] result = new long[size]; + SQLException exception = new SQLException(); + checkClosed(); + for (int i = 0; i < size; i++) { + result[i] = executeBatchElement(batchParameters.get(i), exception); } - throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); - } catch (Exception e) { - throw logAndConvert(e); - } - } - - /** - * Calling this method is not legal on a PreparedStatement. - * - * @param sql ignored - * @param columnIndexes ignored - * @throws SQLException Unsupported Feature - */ - @Override - public long executeLargeUpdate(String sql, int[] columnIndexes) - throws SQLException { - try { - if (isDebugEnabled()) { - debugCode("executeLargeUpdate(" + quote(sql) + ", " + - quoteIntArray(columnIndexes) + ");"); + batchParameters = null; + exception = exception.getNextException(); + if (exception != null) { + throw new JdbcBatchUpdateException(exception, result); } - throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); + return result; } catch (Exception e) { throw logAndConvert(e); } } - /** - * Calling this method is not legal on a PreparedStatement. - * - * @param sql ignored - * @param columnNames ignored - * @throws SQLException Unsupported Feature - */ - @Override - public int executeUpdate(String sql, String[] columnNames) - throws SQLException { - try { - if (isDebugEnabled()) { - debugCode("executeUpdate(" + quote(sql) + ", " + - quoteArray(columnNames) + ");"); - } - throw DbException.get( - ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); - } catch (Exception e) { - throw logAndConvert(e); + private long executeBatchElement(Value[] set, SQLException exception) { + ArrayList parameters = command.getParameters(); + for (int i = 0, l = set.length; i < l; i++) { + parameters.get(i).setValue(set[i], false); } - } - - /** - * Calling this method is not legal on a PreparedStatement. - * - * @param sql ignored - * @param columnNames ignored - * @throws SQLException Unsupported Feature - */ - @Override - public long executeLargeUpdate(String sql, String[] columnNames) - throws SQLException { + long updateCount; try { - if (isDebugEnabled()) { - debugCode("executeLargeUpdate(" + quote(sql) + ", " + - quoteArray(columnNames) + ");"); - } - throw DbException.get( - ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); + updateCount = executeUpdateInternal(); + // Cannot use own implementation, it returns batch identities + ResultSet rs = super.getGeneratedKeys(); + batchIdentities.add(((JdbcResultSet) rs).result); } catch (Exception e) { - throw logAndConvert(e); + exception.setNextException(logAndConvert(e)); + updateCount = Statement.EXECUTE_FAILED; } + return updateCount; } - /** - * Calling this method is not legal on a PreparedStatement. - * - * @param sql ignored - * @param autoGeneratedKeys ignored - * @throws SQLException Unsupported Feature - */ @Override - public boolean execute(String sql, int autoGeneratedKeys) - throws SQLException { - try { - if (isDebugEnabled()) { - debugCode("execute(" + quote(sql) + ", " + autoGeneratedKeys + ");"); + public ResultSet getGeneratedKeys() throws SQLException { + if (batchIdentities != null) { + try { + int id = getNextId(TraceObject.RESULT_SET); + debugCodeAssign("ResultSet", TraceObject.RESULT_SET, id, "getGeneratedKeys()"); + checkClosed(); + generatedKeys = new JdbcResultSet(conn, this, null, batchIdentities.getResult(), id, true, false, + false); + } catch (Exception e) { + throw logAndConvert(e); } - throw DbException.get( - ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); - } catch (Exception e) { - throw logAndConvert(e); } + return super.getGeneratedKeys(); } /** - * Calling this method is not legal on a PreparedStatement. - * - * @param sql ignored - * @param columnIndexes ignored - * @throws SQLException Unsupported Feature + * Adds the current settings to the batch. */ @Override - public boolean execute(String sql, int[] columnIndexes) throws SQLException { + public void addBatch() throws SQLException { try { - if (isDebugEnabled()) { - debugCode("execute(" + quote(sql) + ", " + quoteIntArray(columnIndexes) + ");"); + debugCodeCall("addBatch"); + checkClosed(); + ArrayList parameters = + command.getParameters(); + int size = parameters.size(); + Value[] set = new Value[size]; + for (int i = 0; i < size; i++) { + ParameterInterface param = parameters.get(i); + param.checkSet(); + Value value = param.getParamValue(); + set[i] = value; } - throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); - } catch (Exception e) { - throw logAndConvert(e); - } - } - - /** - * Calling this method is not legal on a PreparedStatement. - * - * @param sql ignored - * @param columnNames ignored - * @throws SQLException Unsupported Feature - */ - @Override - public boolean execute(String sql, String[] columnNames) - throws SQLException { - try { - if (isDebugEnabled()) { - debugCode("execute(" + quote(sql) + ", " + quoteArray(columnNames) + ");"); + if (batchParameters == null) { + batchParameters = Utils.newSmallArrayList(); } - throw DbException.get( - ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); + batchParameters.add(set); } catch (Exception e) { throw logAndConvert(e); } @@ -1551,13 +1377,9 @@ public boolean execute(String sql, String[] columnNames) public ParameterMetaData getParameterMetaData() throws SQLException { try { int id = getNextId(TraceObject.PARAMETER_META_DATA); - if (isDebugEnabled()) { - debugCodeAssign("ParameterMetaData", - TraceObject.PARAMETER_META_DATA, id, "getParameterMetaData()"); - } + debugCodeAssign("ParameterMetaData", TraceObject.PARAMETER_META_DATA, id, "getParameterMetaData()"); checkClosed(); - return new JdbcParameterMetaData( - session.getTrace(), this, command, id); + return new JdbcParameterMetaData(session.getTrace(), this, command, id); } catch (Exception e) { throw logAndConvert(e); } @@ -1597,10 +1419,9 @@ public void setRowId(int parameterIndex, RowId x) throws SQLException { public void setNString(int parameterIndex, String x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setNString("+parameterIndex+", "+quote(x)+");"); + debugCode("setNString(" + parameterIndex + ", " + quote(x) + ')'); } - Value v = x == null ? (Value) ValueNull.INSTANCE : ValueString.get(x); - setParameter(parameterIndex, v); + setParameter(parameterIndex, x == null ? ValueNull.INSTANCE : ValueVarchar.get(x, conn)); } catch (Exception e) { throw logAndConvert(e); } @@ -1621,16 +1442,11 @@ public void setNCharacterStream(int parameterIndex, Reader x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setNCharacterStream("+ - parameterIndex+", x, "+length+"L);"); - } - checkClosedForWrite(); - try { - Value v = conn.createClob(x, length); - setParameter(parameterIndex, v); - } finally { - afterWriting(); + debugCode("setNCharacterStream(" + parameterIndex + ", x, " + length + "L)"); } + checkClosed(); + Value v = conn.createClob(x, length); + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -1662,9 +1478,9 @@ public void setNCharacterStream(int parameterIndex, Reader x) public void setNClob(int parameterIndex, NClob x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setNClob("+parameterIndex+", x);"); + debugCode("setNClob(" + parameterIndex + ", x)"); } - checkClosedForWrite(); + checkClosed(); Value v; if (x == null) { v = ValueNull.INSTANCE; @@ -1690,15 +1506,11 @@ public void setNClob(int parameterIndex, NClob x) throws SQLException { public void setNClob(int parameterIndex, Reader x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setNClob("+parameterIndex+", x);"); - } - checkClosedForWrite(); - try { - Value v = conn.createClob(x, -1); - setParameter(parameterIndex, v); - } finally { - afterWriting(); + debugCode("setNClob(" + parameterIndex + ", x)"); } + checkClosed(); + Value v = conn.createClob(x, -1); + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -1718,15 +1530,11 @@ public void setClob(int parameterIndex, Reader x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setClob("+parameterIndex+", x, "+length+"L);"); - } - checkClosedForWrite(); - try { - Value v = conn.createClob(x, length); - setParameter(parameterIndex, v); - } finally { - afterWriting(); + debugCode("setClob(" + parameterIndex + ", x, " + length + "L)"); } + checkClosed(); + Value v = conn.createClob(x, length); + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -1747,15 +1555,11 @@ public void setBlob(int parameterIndex, InputStream x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setBlob("+parameterIndex+", x, "+length+"L);"); - } - checkClosedForWrite(); - try { - Value v = conn.createBlob(x, length); - setParameter(parameterIndex, v); - } finally { - afterWriting(); + debugCode("setBlob(" + parameterIndex + ", x, " + length + "L)"); } + checkClosed(); + Value v = conn.createBlob(x, length); + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -1776,15 +1580,11 @@ public void setNClob(int parameterIndex, Reader x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setNClob("+parameterIndex+", x, "+length+"L);"); - } - checkClosedForWrite(); - try { - Value v = conn.createClob(x, length); - setParameter(parameterIndex, v); - } finally { - afterWriting(); + debugCode("setNClob(" + parameterIndex + ", x, " + length + "L)"); } + checkClosed(); + Value v = conn.createClob(x, length); + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -1801,9 +1601,9 @@ public void setNClob(int parameterIndex, Reader x, long length) public void setSQLXML(int parameterIndex, SQLXML x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setSQLXML("+parameterIndex+", x);"); + debugCode("setSQLXML(" + parameterIndex + ", x)"); } - checkClosedForWrite(); + checkClosed(); Value v; if (x == null) { v = ValueNull.INSTANCE; @@ -1824,24 +1624,4 @@ public String toString() { return getTraceObjectName() + ": " + command; } - @Override - protected boolean checkClosed(boolean write) { - if (super.checkClosed(write)) { - // if the session was re-connected, re-prepare the statement - ArrayList oldParams = command.getParameters(); - command = conn.prepareCommand(sqlStatement, fetchSize); - ArrayList newParams = command.getParameters(); - for (int i = 0, size = oldParams.size(); i < size; i++) { - ParameterInterface old = oldParams.get(i); - Value value = old.getParamValue(); - if (value != null) { - ParameterInterface n = newParams.get(i); - n.setValue(value, false); - } - } - return true; - } - return false; - } - } diff --git a/h2/src/main/org/h2/jdbc/JdbcPreparedStatementBackwardsCompat.java b/h2/src/main/org/h2/jdbc/JdbcPreparedStatementBackwardsCompat.java deleted file mode 100644 index 5be72b49a5..0000000000 --- a/h2/src/main/org/h2/jdbc/JdbcPreparedStatementBackwardsCompat.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jdbc; - -import java.sql.SQLException; - -/** - * Allows us to compile on older platforms, while still implementing the methods - * from the newer JDBC API. - */ -public interface JdbcPreparedStatementBackwardsCompat { - - // compatibility interface - - // JDBC 4.2 (incomplete) - - /** - * Executes a statement (insert, update, delete, create, drop) - * and returns the update count. - * If another result set exists for this statement, this will be closed - * (even if this statement fails). - * - * If auto commit is on, this statement will be committed. - * If the statement is a DDL statement (create, drop, alter) and does not - * throw an exception, the current transaction (if any) is committed after - * executing the statement. - * - * @return the update count (number of row affected by an insert, update or - * delete, or 0 if no rows or the statement was a create, drop, - * commit or rollback) - * @throws SQLException if this object is closed or invalid - */ - long executeLargeUpdate() throws SQLException; -} diff --git a/h2/src/main/org/h2/jdbc/JdbcResultSet.java b/h2/src/main/org/h2/jdbc/JdbcResultSet.java index b1a7a0eb81..5984628827 100644 --- a/h2/src/main/org/h2/jdbc/JdbcResultSet.java +++ b/h2/src/main/org/h2/jdbc/JdbcResultSet.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -8,7 +8,6 @@ import java.io.InputStream; import java.io.Reader; import java.math.BigDecimal; -import java.math.BigInteger; import java.net.URL; import java.sql.Array; import java.sql.Blob; @@ -20,6 +19,7 @@ import java.sql.ResultSetMetaData; import java.sql.RowId; import java.sql.SQLException; +import java.sql.SQLType; import java.sql.SQLWarning; import java.sql.SQLXML; import java.sql.Statement; @@ -28,50 +28,45 @@ import java.util.Calendar; import java.util.HashMap; import java.util.Map; -import java.util.UUID; + import org.h2.api.ErrorCode; -import org.h2.api.Interval; -import org.h2.api.TimestampWithTimeZone; import org.h2.command.CommandInterface; -import org.h2.engine.Mode; +import org.h2.engine.Session; import org.h2.engine.SysProperties; import org.h2.message.DbException; import org.h2.message.TraceObject; import org.h2.result.ResultInterface; import org.h2.result.UpdatableRow; -import org.h2.util.DateTimeUtils; import org.h2.util.IOUtils; -import org.h2.util.LocalDateTimeUtils; +import org.h2.util.LegacyDateTimeUtils; import org.h2.util.StringUtils; import org.h2.value.CompareMode; import org.h2.value.DataType; import org.h2.value.Value; +import org.h2.value.ValueBigint; import org.h2.value.ValueBoolean; -import org.h2.value.ValueByte; -import org.h2.value.ValueBytes; -import org.h2.value.ValueDate; -import org.h2.value.ValueDecimal; import org.h2.value.ValueDouble; -import org.h2.value.ValueFloat; -import org.h2.value.ValueInt; -import org.h2.value.ValueInterval; -import org.h2.value.ValueLong; +import org.h2.value.ValueInteger; import org.h2.value.ValueNull; -import org.h2.value.ValueShort; -import org.h2.value.ValueString; -import org.h2.value.ValueTime; -import org.h2.value.ValueTimestamp; -import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueReal; +import org.h2.value.ValueSmallint; +import org.h2.value.ValueTinyint; +import org.h2.value.ValueToObjectConverter; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; /** - *

          * Represents a result set. - *

          *

          * Column labels are case-insensitive, quotes are not supported. The first * column has the column index 1. *

          *

          + * Thread safety: the result set is not thread-safe and must not be used by + * multiple threads concurrently. + *

          + *

          * Updatable result sets: Result sets are updatable when the result only * contains columns from one table, and if it contains all columns of a unique * index (primary key or other) of this table. Key columns may not contain NULL @@ -79,11 +74,11 @@ * changes are visible, but not own inserts and deletes. *

          */ -public class JdbcResultSet extends TraceObject implements ResultSet, JdbcResultSetBackwardsCompat { +public final class JdbcResultSet extends TraceObject implements ResultSet { - private final boolean closeStatement; private final boolean scrollable; private final boolean updatable; + private final boolean triggerUpdatable; ResultInterface result; private JdbcConnection conn; private JdbcStatement stat; @@ -92,30 +87,27 @@ public class JdbcResultSet extends TraceObject implements ResultSet, JdbcResultS private Value[] insertRow; private Value[] updateRow; private HashMap columnLabelMap; - private HashMap patchedRows; + private HashMap patchedRows; private JdbcPreparedStatement preparedStatement; private final CommandInterface command; - JdbcResultSet(JdbcConnection conn, JdbcStatement stat, CommandInterface command, - ResultInterface result, int id, boolean closeStatement, - boolean scrollable, boolean updatable) { + public JdbcResultSet(JdbcConnection conn, JdbcStatement stat, CommandInterface command, ResultInterface result, + int id, boolean scrollable, boolean updatable, boolean triggerUpdatable) { setTrace(conn.getSession().getTrace(), TraceObject.RESULT_SET, id); this.conn = conn; this.stat = stat; this.command = command; this.result = result; this.columnCount = result.getVisibleColumnCount(); - this.closeStatement = closeStatement; this.scrollable = scrollable; this.updatable = updatable; + this.triggerUpdatable = triggerUpdatable; } - JdbcResultSet(JdbcConnection conn, JdbcPreparedStatement preparedStatement, - CommandInterface command, ResultInterface result, int id, boolean closeStatement, - boolean scrollable, boolean updatable, + JdbcResultSet(JdbcConnection conn, JdbcPreparedStatement preparedStatement, CommandInterface command, + ResultInterface result, int id, boolean scrollable, boolean updatable, HashMap columnLabelMap) { - this(conn, preparedStatement, command, result, id, closeStatement, scrollable, - updatable); + this(conn, preparedStatement, command, result, id, scrollable, updatable, false); this.columnLabelMap = columnLabelMap; this.preparedStatement = preparedStatement; } @@ -145,10 +137,7 @@ public boolean next() throws SQLException { public ResultSetMetaData getMetaData() throws SQLException { try { int id = getNextId(TraceObject.RESULT_SET_META_DATA); - if (isDebugEnabled()) { - debugCodeAssign("ResultSetMetaData", - TraceObject.RESULT_SET_META_DATA, id, "getMetaData()"); - } + debugCodeAssign("ResultSetMetaData", TraceObject.RESULT_SET_META_DATA, id, "getMetaData()"); checkClosed(); String catalog = conn.getCatalog(); return new JdbcResultSetMetaData(this, null, result, catalog, conn.getSession().getTrace(), id); @@ -199,7 +188,7 @@ public int findColumn(String columnLabel) throws SQLException { public void close() throws SQLException { try { debugCodeCall("close"); - closeInternal(); + closeInternal(false); } catch (Exception e) { throw logAndConvert(e); } @@ -207,24 +196,26 @@ public void close() throws SQLException { /** * Close the result set. This method also closes the statement if required. + * @param fromStatement if true - close statement in the end */ - void closeInternal() throws SQLException { + void closeInternal(boolean fromStatement) { if (result != null) { try { if (result.isLazy()) { stat.onLazyResultSetClose(command, preparedStatement == null); } result.close(); - if (closeStatement && stat != null) { - stat.close(); - } } finally { + JdbcStatement s = stat; columnCount = 0; result = null; stat = null; conn = null; insertRow = null; updateRow = null; + if (!fromStatement && s != null) { + s.closeIfCloseOnCompletion(); + } } } } @@ -240,10 +231,6 @@ public Statement getStatement() throws SQLException { try { debugCodeCall("getStatement"); checkClosed(); - if (closeStatement) { - // if the result set was opened by a DatabaseMetaData call - return null; - } return stat; } catch (Exception e) { throw logAndConvert(e); @@ -293,7 +280,7 @@ public void clearWarnings() throws SQLException { public String getString(int columnIndex) throws SQLException { try { debugCodeCall("getString", columnIndex); - return get(columnIndex).getString(); + return get(checkColumnIndex(columnIndex)).getString(); } catch (Exception e) { throw logAndConvert(e); } @@ -311,7 +298,7 @@ public String getString(int columnIndex) throws SQLException { public String getString(String columnLabel) throws SQLException { try { debugCodeCall("getString", columnLabel); - return get(columnLabel).getString(); + return get(getColumnIndex(columnLabel)).getString(); } catch (Exception e) { throw logAndConvert(e); } @@ -329,7 +316,7 @@ public String getString(String columnLabel) throws SQLException { public int getInt(int columnIndex) throws SQLException { try { debugCodeCall("getInt", columnIndex); - return get(columnIndex).getInt(); + return getIntInternal(checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -347,12 +334,25 @@ public int getInt(int columnIndex) throws SQLException { public int getInt(String columnLabel) throws SQLException { try { debugCodeCall("getInt", columnLabel); - return get(columnLabel).getInt(); + return getIntInternal(getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private int getIntInternal(int columnIndex) { + Value v = getInternal(columnIndex); + int result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = v.getInt(); + } else { + wasNull = true; + result = 0; + } + return result; + } + /** * Returns the value of the specified column as a BigDecimal. * @@ -365,7 +365,7 @@ public int getInt(String columnLabel) throws SQLException { public BigDecimal getBigDecimal(int columnIndex) throws SQLException { try { debugCodeCall("getBigDecimal", columnIndex); - return get(columnIndex).getBigDecimal(); + return get(checkColumnIndex(columnIndex)).getBigDecimal(); } catch (Exception e) { throw logAndConvert(e); } @@ -373,17 +373,22 @@ public BigDecimal getBigDecimal(int columnIndex) throws SQLException { /** * Returns the value of the specified column as a java.sql.Date. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalDate.class)} instead. + *

          * * @param columnIndex (1,2,...) * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(int, Class) */ @Override public Date getDate(int columnIndex) throws SQLException { try { debugCodeCall("getDate", columnIndex); - return get(columnIndex).getDate(); + return LegacyDateTimeUtils.toDate(conn, null, get(checkColumnIndex(columnIndex))); } catch (Exception e) { throw logAndConvert(e); } @@ -391,17 +396,22 @@ public Date getDate(int columnIndex) throws SQLException { /** * Returns the value of the specified column as a java.sql.Time. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalTime.class)} instead. + *

          * * @param columnIndex (1,2,...) * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(int, Class) */ @Override public Time getTime(int columnIndex) throws SQLException { try { debugCodeCall("getTime", columnIndex); - return get(columnIndex).getTime(); + return LegacyDateTimeUtils.toTime(conn, null, get(checkColumnIndex(columnIndex))); } catch (Exception e) { throw logAndConvert(e); } @@ -409,17 +419,22 @@ public Time getTime(int columnIndex) throws SQLException { /** * Returns the value of the specified column as a java.sql.Timestamp. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalDateTime.class)} instead. + *

          * * @param columnIndex (1,2,...) * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(int, Class) */ @Override public Timestamp getTimestamp(int columnIndex) throws SQLException { try { debugCodeCall("getTimestamp", columnIndex); - return get(columnIndex).getTimestamp(); + return LegacyDateTimeUtils.toTimestamp(conn, null, get(checkColumnIndex(columnIndex))); } catch (Exception e) { throw logAndConvert(e); } @@ -437,7 +452,7 @@ public Timestamp getTimestamp(int columnIndex) throws SQLException { public BigDecimal getBigDecimal(String columnLabel) throws SQLException { try { debugCodeCall("getBigDecimal", columnLabel); - return get(columnLabel).getBigDecimal(); + return get(getColumnIndex(columnLabel)).getBigDecimal(); } catch (Exception e) { throw logAndConvert(e); } @@ -445,17 +460,22 @@ public BigDecimal getBigDecimal(String columnLabel) throws SQLException { /** * Returns the value of the specified column as a java.sql.Date. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(columnLabel, LocalDate.class)} instead. + *

          * * @param columnLabel the column label * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(String, Class) */ @Override public Date getDate(String columnLabel) throws SQLException { try { debugCodeCall("getDate", columnLabel); - return get(columnLabel).getDate(); + return LegacyDateTimeUtils.toDate(conn, null, get(getColumnIndex(columnLabel))); } catch (Exception e) { throw logAndConvert(e); } @@ -463,17 +483,22 @@ public Date getDate(String columnLabel) throws SQLException { /** * Returns the value of the specified column as a java.sql.Time. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(columnLabel, LocalTime.class)} instead. + *

          * * @param columnLabel the column label * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(String, Class) */ @Override public Time getTime(String columnLabel) throws SQLException { try { debugCodeCall("getTime", columnLabel); - return get(columnLabel).getTime(); + return LegacyDateTimeUtils.toTime(conn, null, get(getColumnIndex(columnLabel))); } catch (Exception e) { throw logAndConvert(e); } @@ -481,17 +506,22 @@ public Time getTime(String columnLabel) throws SQLException { /** * Returns the value of the specified column as a java.sql.Timestamp. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(columnLabel, LocalDateTime.class)} instead. + *

          * * @param columnLabel the column label * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(String, Class) */ @Override public Timestamp getTimestamp(String columnLabel) throws SQLException { try { debugCodeCall("getTimestamp", columnLabel); - return get(columnLabel).getTimestamp(); + return LegacyDateTimeUtils.toTimestamp(conn, null, get(getColumnIndex(columnLabel))); } catch (Exception e) { throw logAndConvert(e); } @@ -510,8 +540,7 @@ public Timestamp getTimestamp(String columnLabel) throws SQLException { public Object getObject(int columnIndex) throws SQLException { try { debugCodeCall("getObject", columnIndex); - Value v = get(columnIndex); - return conn.convertToDefaultObject(v); + return ValueToObjectConverter.valueToDefaultObject(get(checkColumnIndex(columnIndex)), conn, true); } catch (Exception e) { throw logAndConvert(e); } @@ -530,8 +559,7 @@ public Object getObject(int columnIndex) throws SQLException { public Object getObject(String columnLabel) throws SQLException { try { debugCodeCall("getObject", columnLabel); - Value v = get(columnLabel); - return conn.convertToDefaultObject(v); + return ValueToObjectConverter.valueToDefaultObject(get(getColumnIndex(columnLabel)), conn, true); } catch (Exception e) { throw logAndConvert(e); } @@ -549,7 +577,7 @@ public Object getObject(String columnLabel) throws SQLException { public boolean getBoolean(int columnIndex) throws SQLException { try { debugCodeCall("getBoolean", columnIndex); - return get(columnIndex).getBoolean(); + return getBooleanInternal(checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -567,12 +595,25 @@ public boolean getBoolean(int columnIndex) throws SQLException { public boolean getBoolean(String columnLabel) throws SQLException { try { debugCodeCall("getBoolean", columnLabel); - return get(columnLabel).getBoolean(); + return getBooleanInternal(getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private boolean getBooleanInternal(int columnIndex) { + Value v = getInternal(columnIndex); + boolean result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = v.getBoolean(); + } else { + wasNull = true; + result = false; + } + return result; + } + /** * Returns the value of the specified column as a byte. * @@ -585,7 +626,7 @@ public boolean getBoolean(String columnLabel) throws SQLException { public byte getByte(int columnIndex) throws SQLException { try { debugCodeCall("getByte", columnIndex); - return get(columnIndex).getByte(); + return getByteInternal(checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -603,12 +644,25 @@ public byte getByte(int columnIndex) throws SQLException { public byte getByte(String columnLabel) throws SQLException { try { debugCodeCall("getByte", columnLabel); - return get(columnLabel).getByte(); + return getByteInternal(getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private byte getByteInternal(int columnIndex) { + Value v = getInternal(columnIndex); + byte result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = v.getByte(); + } else { + wasNull = true; + result = 0; + } + return result; + } + /** * Returns the value of the specified column as a short. * @@ -621,7 +675,7 @@ public byte getByte(String columnLabel) throws SQLException { public short getShort(int columnIndex) throws SQLException { try { debugCodeCall("getShort", columnIndex); - return get(columnIndex).getShort(); + return getShortInternal(checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -639,12 +693,25 @@ public short getShort(int columnIndex) throws SQLException { public short getShort(String columnLabel) throws SQLException { try { debugCodeCall("getShort", columnLabel); - return get(columnLabel).getShort(); + return getShortInternal(getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private short getShortInternal(int columnIndex) { + Value v = getInternal(columnIndex); + short result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = v.getShort(); + } else { + wasNull = true; + result = 0; + } + return result; + } + /** * Returns the value of the specified column as a long. * @@ -657,7 +724,7 @@ public short getShort(String columnLabel) throws SQLException { public long getLong(int columnIndex) throws SQLException { try { debugCodeCall("getLong", columnIndex); - return get(columnIndex).getLong(); + return getLongInternal(checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -675,12 +742,25 @@ public long getLong(int columnIndex) throws SQLException { public long getLong(String columnLabel) throws SQLException { try { debugCodeCall("getLong", columnLabel); - return get(columnLabel).getLong(); + return getLongInternal(getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private long getLongInternal(int columnIndex) { + Value v = getInternal(columnIndex); + long result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = v.getLong(); + } else { + wasNull = true; + result = 0L; + } + return result; + } + /** * Returns the value of the specified column as a float. * @@ -693,7 +773,7 @@ public long getLong(String columnLabel) throws SQLException { public float getFloat(int columnIndex) throws SQLException { try { debugCodeCall("getFloat", columnIndex); - return get(columnIndex).getFloat(); + return getFloatInternal(checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -711,12 +791,25 @@ public float getFloat(int columnIndex) throws SQLException { public float getFloat(String columnLabel) throws SQLException { try { debugCodeCall("getFloat", columnLabel); - return get(columnLabel).getFloat(); + return getFloatInternal(getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private float getFloatInternal(int columnIndex) { + Value v = getInternal(columnIndex); + float result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = v.getFloat(); + } else { + wasNull = true; + result = 0f; + } + return result; + } + /** * Returns the value of the specified column as a double. * @@ -729,7 +822,7 @@ public float getFloat(String columnLabel) throws SQLException { public double getDouble(int columnIndex) throws SQLException { try { debugCodeCall("getDouble", columnIndex); - return get(columnIndex).getDouble(); + return getDoubleInternal(checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -747,12 +840,25 @@ public double getDouble(int columnIndex) throws SQLException { public double getDouble(String columnLabel) throws SQLException { try { debugCodeCall("getDouble", columnLabel); - return get(columnLabel).getDouble(); + return getDoubleInternal(getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private double getDoubleInternal(int columnIndex) { + Value v = getInternal(columnIndex); + double result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = v.getDouble(); + } else { + wasNull = true; + result = 0d; + } + return result; + } + /** * Returns the value of the specified column as a BigDecimal. * @@ -766,18 +872,16 @@ public double getDouble(String columnLabel) throws SQLException { */ @Deprecated @Override - public BigDecimal getBigDecimal(String columnLabel, int scale) - throws SQLException { + public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getBigDecimal(" + - StringUtils.quoteJavaString(columnLabel)+", "+scale+");"); + debugCode("getBigDecimal(" + quote(columnLabel) + ", " + scale + ')'); } if (scale < 0) { throw DbException.getInvalidValueException("scale", scale); } - BigDecimal bd = get(columnLabel).getBigDecimal(); - return bd == null ? null : ValueDecimal.setScale(bd, scale); + BigDecimal bd = get(getColumnIndex(columnLabel)).getBigDecimal(); + return bd == null ? null : ValueNumeric.setScale(bd, scale); } catch (Exception e) { throw logAndConvert(e); } @@ -796,17 +900,16 @@ public BigDecimal getBigDecimal(String columnLabel, int scale) */ @Deprecated @Override - public BigDecimal getBigDecimal(int columnIndex, int scale) - throws SQLException { + public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getBigDecimal(" + columnIndex + ", " + scale + ");"); + debugCode("getBigDecimal(" + columnIndex + ", " + scale + ')'); } if (scale < 0) { throw DbException.getInvalidValueException("scale", scale); } - BigDecimal bd = get(columnIndex).getBigDecimal(); - return bd == null ? null : ValueDecimal.setScale(bd, scale); + BigDecimal bd = get(checkColumnIndex(columnIndex)).getBigDecimal(); + return bd == null ? null : ValueNumeric.setScale(bd, scale); } catch (Exception e) { throw logAndConvert(e); } @@ -871,12 +974,17 @@ public Ref getRef(String columnLabel) throws SQLException { /** * Returns the value of the specified column as a java.sql.Date using a * specified time zone. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalDate.class)} instead. + *

          * * @param columnIndex (1,2,...) * @param calendar the calendar * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(int, Class) */ @Override public Date getDate(int columnIndex, Calendar calendar) throws SQLException { @@ -884,7 +992,8 @@ public Date getDate(int columnIndex, Calendar calendar) throws SQLException { if (isDebugEnabled()) { debugCode("getDate(" + columnIndex + ", calendar)"); } - return DateTimeUtils.convertDate(get(columnIndex), calendar); + return LegacyDateTimeUtils.toDate(conn, calendar != null ? calendar.getTimeZone() : null, + get(checkColumnIndex(columnIndex))); } catch (Exception e) { throw logAndConvert(e); } @@ -893,23 +1002,26 @@ public Date getDate(int columnIndex, Calendar calendar) throws SQLException { /** * Returns the value of the specified column as a java.sql.Date using a * specified time zone. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(columnLabel, LocalDate.class)} instead. + *

          * * @param columnLabel the column label * @param calendar the calendar * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(String, Class) */ @Override - public Date getDate(String columnLabel, Calendar calendar) - throws SQLException { + public Date getDate(String columnLabel, Calendar calendar) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getDate(" + - StringUtils.quoteJavaString(columnLabel) + - ", calendar)"); + debugCode("getDate(" + quote(columnLabel) + ", calendar)"); } - return DateTimeUtils.convertDate(get(columnLabel), calendar); + return LegacyDateTimeUtils.toDate(conn, calendar != null ? calendar.getTimeZone() : null, + get(getColumnIndex(columnLabel))); } catch (Exception e) { throw logAndConvert(e); } @@ -918,12 +1030,17 @@ public Date getDate(String columnLabel, Calendar calendar) /** * Returns the value of the specified column as a java.sql.Time using a * specified time zone. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalTime.class)} instead. + *

          * * @param columnIndex (1,2,...) * @param calendar the calendar * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(int, Class) */ @Override public Time getTime(int columnIndex, Calendar calendar) throws SQLException { @@ -931,7 +1048,8 @@ public Time getTime(int columnIndex, Calendar calendar) throws SQLException { if (isDebugEnabled()) { debugCode("getTime(" + columnIndex + ", calendar)"); } - return DateTimeUtils.convertTime(get(columnIndex), calendar); + return LegacyDateTimeUtils.toTime(conn, calendar != null ? calendar.getTimeZone() : null, + get(checkColumnIndex(columnIndex))); } catch (Exception e) { throw logAndConvert(e); } @@ -940,23 +1058,26 @@ public Time getTime(int columnIndex, Calendar calendar) throws SQLException { /** * Returns the value of the specified column as a java.sql.Time using a * specified time zone. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(columnLabel, LocalTime.class)} instead. + *

          * * @param columnLabel the column label * @param calendar the calendar * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(String, Class) */ @Override - public Time getTime(String columnLabel, Calendar calendar) - throws SQLException { + public Time getTime(String columnLabel, Calendar calendar) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getTime(" + - StringUtils.quoteJavaString(columnLabel) + - ", calendar)"); + debugCode("getTime(" + quote(columnLabel) + ", calendar)"); } - return DateTimeUtils.convertTime(get(columnLabel), calendar); + return LegacyDateTimeUtils.toTime(conn, calendar != null ? calendar.getTimeZone() : null, + get(getColumnIndex(columnLabel))); } catch (Exception e) { throw logAndConvert(e); } @@ -965,22 +1086,26 @@ public Time getTime(String columnLabel, Calendar calendar) /** * Returns the value of the specified column as a java.sql.Timestamp using a * specified time zone. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalDateTime.class)} instead. + *

          * * @param columnIndex (1,2,...) * @param calendar the calendar * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(int, Class) */ @Override - public Timestamp getTimestamp(int columnIndex, Calendar calendar) - throws SQLException { + public Timestamp getTimestamp(int columnIndex, Calendar calendar) throws SQLException { try { if (isDebugEnabled()) { debugCode("getTimestamp(" + columnIndex + ", calendar)"); } - Value value = get(columnIndex); - return DateTimeUtils.convertTimestamp(value, calendar); + return LegacyDateTimeUtils.toTimestamp(conn, calendar != null ? calendar.getTimeZone() : null, + get(checkColumnIndex(columnIndex))); } catch (Exception e) { throw logAndConvert(e); } @@ -988,24 +1113,26 @@ public Timestamp getTimestamp(int columnIndex, Calendar calendar) /** * Returns the value of the specified column as a java.sql.Timestamp. + *

          + * Usage of this method is discouraged. Use + * {@code getObject(columnLabel, LocalDateTime.class)} instead. + *

          * * @param columnLabel the column label * @param calendar the calendar * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(String, Class) */ @Override - public Timestamp getTimestamp(String columnLabel, Calendar calendar) - throws SQLException { + public Timestamp getTimestamp(String columnLabel, Calendar calendar) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getTimestamp(" + - StringUtils.quoteJavaString(columnLabel) + - ", calendar)"); + debugCode("getTimestamp(" + quote(columnLabel) + ", calendar)"); } - Value value = get(columnLabel); - return DateTimeUtils.convertTimestamp(value, calendar); + return LegacyDateTimeUtils.toTimestamp(conn, calendar != null ? calendar.getTimeZone() : null, + get(getColumnIndex(columnLabel))); } catch (Exception e) { throw logAndConvert(e); } @@ -1024,11 +1151,9 @@ public Blob getBlob(int columnIndex) throws SQLException { try { int id = getNextId(TraceObject.BLOB); if (isDebugEnabled()) { - debugCodeAssign("Blob", TraceObject.BLOB, - id, "getBlob(" + columnIndex + ")"); + debugCodeAssign("Blob", TraceObject.BLOB, id, "getBlob(" + columnIndex + ')'); } - Value v = get(columnIndex); - return v == ValueNull.INSTANCE ? null : new JdbcBlob(conn, v, JdbcLob.State.WITH_VALUE, id); + return getBlob(id, checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -1047,16 +1172,27 @@ public Blob getBlob(String columnLabel) throws SQLException { try { int id = getNextId(TraceObject.BLOB); if (isDebugEnabled()) { - debugCodeAssign("Blob", TraceObject.BLOB, - id, "getBlob(" + quote(columnLabel) + ")"); + debugCodeAssign("Blob", TraceObject.BLOB, id, "getBlob(" + quote(columnLabel) + ')'); } - Value v = get(columnLabel); - return v == ValueNull.INSTANCE ? null : new JdbcBlob(conn, v, JdbcLob.State.WITH_VALUE, id); + return getBlob(id, getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private JdbcBlob getBlob(int id, int columnIndex) { + Value v = getInternal(columnIndex); + JdbcBlob result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = new JdbcBlob(conn, v, JdbcLob.State.WITH_VALUE, id); + } else { + wasNull = true; + result = null; + } + return result; + } + /** * Returns the value of the specified column as a byte array. * @@ -1069,7 +1205,7 @@ public Blob getBlob(String columnLabel) throws SQLException { public byte[] getBytes(int columnIndex) throws SQLException { try { debugCodeCall("getBytes", columnIndex); - return get(columnIndex).convertTo(Value.BYTES, conn.getMode()).getBytes(); + return get(checkColumnIndex(columnIndex)).getBytes(); } catch (Exception e) { throw logAndConvert(e); } @@ -1087,7 +1223,7 @@ public byte[] getBytes(int columnIndex) throws SQLException { public byte[] getBytes(String columnLabel) throws SQLException { try { debugCodeCall("getBytes", columnLabel); - return get(columnLabel).convertTo(Value.BYTES, conn.getMode()).getBytes(); + return get(getColumnIndex(columnLabel)).getBytes(); } catch (Exception e) { throw logAndConvert(e); } @@ -1105,7 +1241,7 @@ public byte[] getBytes(String columnLabel) throws SQLException { public InputStream getBinaryStream(int columnIndex) throws SQLException { try { debugCodeCall("getBinaryStream", columnIndex); - return get(columnIndex).getInputStream(); + return get(checkColumnIndex(columnIndex)).getInputStream(); } catch (Exception e) { throw logAndConvert(e); } @@ -1123,7 +1259,7 @@ public InputStream getBinaryStream(int columnIndex) throws SQLException { public InputStream getBinaryStream(String columnLabel) throws SQLException { try { debugCodeCall("getBinaryStream", columnLabel); - return get(columnLabel).getInputStream(); + return get(getColumnIndex(columnLabel)).getInputStream(); } catch (Exception e) { throw logAndConvert(e); } @@ -1143,10 +1279,9 @@ public Clob getClob(int columnIndex) throws SQLException { try { int id = getNextId(TraceObject.CLOB); if (isDebugEnabled()) { - debugCodeAssign("Clob", TraceObject.CLOB, id, "getClob(" + columnIndex + ")"); + debugCodeAssign("Clob", TraceObject.CLOB, id, "getClob(" + columnIndex + ')'); } - Value v = get(columnIndex); - return v == ValueNull.INSTANCE ? null : new JdbcClob(conn, v, JdbcLob.State.WITH_VALUE, id); + return getClob(id, checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -1165,11 +1300,9 @@ public Clob getClob(String columnLabel) throws SQLException { try { int id = getNextId(TraceObject.CLOB); if (isDebugEnabled()) { - debugCodeAssign("Clob", TraceObject.CLOB, id, "getClob(" + - quote(columnLabel) + ")"); + debugCodeAssign("Clob", TraceObject.CLOB, id, "getClob(" + quote(columnLabel) + ')'); } - Value v = get(columnLabel); - return v == ValueNull.INSTANCE ? null : new JdbcClob(conn, v, JdbcLob.State.WITH_VALUE, id); + return getClob(id, getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } @@ -1188,10 +1321,9 @@ public Array getArray(int columnIndex) throws SQLException { try { int id = getNextId(TraceObject.ARRAY); if (isDebugEnabled()) { - debugCodeAssign("Array", TraceObject.ARRAY, id, "getArray(" + columnIndex + ")"); + debugCodeAssign("Array", TraceObject.ARRAY, id, "getArray(" + columnIndex + ')'); } - Value v = get(columnIndex); - return v == ValueNull.INSTANCE ? null : new JdbcArray(conn, v, id); + return getArray(id, checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -1210,16 +1342,27 @@ public Array getArray(String columnLabel) throws SQLException { try { int id = getNextId(TraceObject.ARRAY); if (isDebugEnabled()) { - debugCodeAssign("Array", TraceObject.ARRAY, id, "getArray(" + - quote(columnLabel) + ")"); + debugCodeAssign("Array", TraceObject.ARRAY, id, "getArray(" + quote(columnLabel) + ')'); } - Value v = get(columnLabel); - return v == ValueNull.INSTANCE ? null : new JdbcArray(conn, v, id); + return getArray(id, getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private Array getArray(int id, int columnIndex) { + Value v = getInternal(columnIndex); + JdbcArray result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = new JdbcArray(conn, v, id); + } else { + wasNull = true; + result = null; + } + return result; + } + /** * Returns the value of the specified column as an input stream. * @@ -1232,7 +1375,7 @@ public Array getArray(String columnLabel) throws SQLException { public InputStream getAsciiStream(int columnIndex) throws SQLException { try { debugCodeCall("getAsciiStream", columnIndex); - String s = get(columnIndex).getString(); + String s = get(checkColumnIndex(columnIndex)).getString(); return s == null ? null : IOUtils.getInputStreamFromString(s); } catch (Exception e) { throw logAndConvert(e); @@ -1251,7 +1394,7 @@ public InputStream getAsciiStream(int columnIndex) throws SQLException { public InputStream getAsciiStream(String columnLabel) throws SQLException { try { debugCodeCall("getAsciiStream", columnLabel); - String s = get(columnLabel).getString(); + String s = get(getColumnIndex(columnLabel)).getString(); return IOUtils.getInputStreamFromString(s); } catch (Exception e) { throw logAndConvert(e); @@ -1270,7 +1413,7 @@ public InputStream getAsciiStream(String columnLabel) throws SQLException { public Reader getCharacterStream(int columnIndex) throws SQLException { try { debugCodeCall("getCharacterStream", columnIndex); - return get(columnIndex).getReader(); + return get(checkColumnIndex(columnIndex)).getReader(); } catch (Exception e) { throw logAndConvert(e); } @@ -1288,7 +1431,7 @@ public Reader getCharacterStream(int columnIndex) throws SQLException { public Reader getCharacterStream(String columnLabel) throws SQLException { try { debugCodeCall("getCharacterStream", columnLabel); - return get(columnLabel).getReader(); + return get(getColumnIndex(columnLabel)).getReader(); } catch (Exception e) { throw logAndConvert(e); } @@ -1322,7 +1465,7 @@ public URL getURL(String columnLabel) throws SQLException { public void updateNull(int columnIndex) throws SQLException { try { debugCodeCall("updateNull", columnIndex); - update(columnIndex, ValueNull.INSTANCE); + update(checkColumnIndex(columnIndex), ValueNull.INSTANCE); } catch (Exception e) { throw logAndConvert(e); } @@ -1338,7 +1481,7 @@ public void updateNull(int columnIndex) throws SQLException { public void updateNull(String columnLabel) throws SQLException { try { debugCodeCall("updateNull", columnLabel); - update(columnLabel, ValueNull.INSTANCE); + update(getColumnIndex(columnLabel), ValueNull.INSTANCE); } catch (Exception e) { throw logAndConvert(e); } @@ -1355,9 +1498,9 @@ public void updateNull(String columnLabel) throws SQLException { public void updateBoolean(int columnIndex, boolean x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBoolean("+columnIndex+", "+x+");"); + debugCode("updateBoolean(" + columnIndex + ", " + x + ')'); } - update(columnIndex, ValueBoolean.get(x)); + update(checkColumnIndex(columnIndex), ValueBoolean.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1371,13 +1514,12 @@ public void updateBoolean(int columnIndex, boolean x) throws SQLException { * @throws SQLException if result set is closed or not updatable */ @Override - public void updateBoolean(String columnLabel, boolean x) - throws SQLException { + public void updateBoolean(String columnLabel, boolean x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBoolean("+quote(columnLabel)+", "+x+");"); + debugCode("updateBoolean(" + quote(columnLabel) + ", " + x + ')'); } - update(columnLabel, ValueBoolean.get(x)); + update(getColumnIndex(columnLabel), ValueBoolean.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1394,9 +1536,9 @@ public void updateBoolean(String columnLabel, boolean x) public void updateByte(int columnIndex, byte x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateByte("+columnIndex+", "+x+");"); + debugCode("updateByte(" + columnIndex + ", " + x + ')'); } - update(columnIndex, ValueByte.get(x)); + update(checkColumnIndex(columnIndex), ValueTinyint.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1413,9 +1555,9 @@ public void updateByte(int columnIndex, byte x) throws SQLException { public void updateByte(String columnLabel, byte x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateByte("+columnLabel+", "+x+");"); + debugCode("updateByte(" + quote(columnLabel) + ", " + x + ')'); } - update(columnLabel, ValueByte.get(x)); + update(getColumnIndex(columnLabel), ValueTinyint.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1432,9 +1574,9 @@ public void updateByte(String columnLabel, byte x) throws SQLException { public void updateBytes(int columnIndex, byte[] x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBytes("+columnIndex+", x);"); + debugCode("updateBytes(" + columnIndex + ", x)"); } - update(columnIndex, x == null ? (Value) ValueNull.INSTANCE : ValueBytes.get(x)); + update(checkColumnIndex(columnIndex), x == null ? ValueNull.INSTANCE : ValueVarbinary.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1451,9 +1593,9 @@ public void updateBytes(int columnIndex, byte[] x) throws SQLException { public void updateBytes(String columnLabel, byte[] x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBytes("+quote(columnLabel)+", x);"); + debugCode("updateBytes(" + quote(columnLabel) + ", x)"); } - update(columnLabel, x == null ? (Value) ValueNull.INSTANCE : ValueBytes.get(x)); + update(getColumnIndex(columnLabel), x == null ? ValueNull.INSTANCE : ValueVarbinary.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1470,9 +1612,9 @@ public void updateBytes(String columnLabel, byte[] x) throws SQLException { public void updateShort(int columnIndex, short x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateShort("+columnIndex+", (short) "+x+");"); + debugCode("updateShort(" + columnIndex + ", (short) " + x + ')'); } - update(columnIndex, ValueShort.get(x)); + update(checkColumnIndex(columnIndex), ValueSmallint.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1489,9 +1631,9 @@ public void updateShort(int columnIndex, short x) throws SQLException { public void updateShort(String columnLabel, short x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateShort("+quote(columnLabel)+", (short) "+x+");"); + debugCode("updateShort(" + quote(columnLabel) + ", (short) " + x + ')'); } - update(columnLabel, ValueShort.get(x)); + update(getColumnIndex(columnLabel), ValueSmallint.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1508,9 +1650,9 @@ public void updateShort(String columnLabel, short x) throws SQLException { public void updateInt(int columnIndex, int x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateInt("+columnIndex+", "+x+");"); + debugCode("updateInt(" + columnIndex + ", " + x + ')'); } - update(columnIndex, ValueInt.get(x)); + update(checkColumnIndex(columnIndex), ValueInteger.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1527,9 +1669,9 @@ public void updateInt(int columnIndex, int x) throws SQLException { public void updateInt(String columnLabel, int x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateInt("+quote(columnLabel)+", "+x+");"); + debugCode("updateInt(" + quote(columnLabel) + ", " + x + ')'); } - update(columnLabel, ValueInt.get(x)); + update(getColumnIndex(columnLabel), ValueInteger.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1546,9 +1688,9 @@ public void updateInt(String columnLabel, int x) throws SQLException { public void updateLong(int columnIndex, long x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateLong("+columnIndex+", "+x+"L);"); + debugCode("updateLong(" + columnIndex + ", " + x + "L)"); } - update(columnIndex, ValueLong.get(x)); + update(checkColumnIndex(columnIndex), ValueBigint.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1565,9 +1707,9 @@ public void updateLong(int columnIndex, long x) throws SQLException { public void updateLong(String columnLabel, long x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateLong("+quote(columnLabel)+", "+x+"L);"); + debugCode("updateLong(" + quote(columnLabel) + ", " + x + "L)"); } - update(columnLabel, ValueLong.get(x)); + update(getColumnIndex(columnLabel), ValueBigint.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1584,9 +1726,9 @@ public void updateLong(String columnLabel, long x) throws SQLException { public void updateFloat(int columnIndex, float x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateFloat("+columnIndex+", "+x+"f);"); + debugCode("updateFloat(" + columnIndex + ", " + x + "f)"); } - update(columnIndex, ValueFloat.get(x)); + update(checkColumnIndex(columnIndex), ValueReal.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1603,9 +1745,9 @@ public void updateFloat(int columnIndex, float x) throws SQLException { public void updateFloat(String columnLabel, float x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateFloat("+quote(columnLabel)+", "+x+"f);"); + debugCode("updateFloat(" + quote(columnLabel) + ", " + x + "f)"); } - update(columnLabel, ValueFloat.get(x)); + update(getColumnIndex(columnLabel), ValueReal.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1622,9 +1764,9 @@ public void updateFloat(String columnLabel, float x) throws SQLException { public void updateDouble(int columnIndex, double x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateDouble("+columnIndex+", "+x+"d);"); + debugCode("updateDouble(" + columnIndex + ", " + x + "d)"); } - update(columnIndex, ValueDouble.get(x)); + update(checkColumnIndex(columnIndex), ValueDouble.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1641,9 +1783,9 @@ public void updateDouble(int columnIndex, double x) throws SQLException { public void updateDouble(String columnLabel, double x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateDouble("+quote(columnLabel)+", "+x+"d);"); + debugCode("updateDouble(" + quote(columnLabel) + ", " + x + "d)"); } - update(columnLabel, ValueDouble.get(x)); + update(getColumnIndex(columnLabel), ValueDouble.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1657,14 +1799,12 @@ public void updateDouble(String columnLabel, double x) throws SQLException { * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBigDecimal(int columnIndex, BigDecimal x) - throws SQLException { + public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBigDecimal("+columnIndex+", " + quoteBigDecimal(x) + ");"); + debugCode("updateBigDecimal(" + columnIndex + ", " + quoteBigDecimal(x) + ')'); } - update(columnIndex, x == null ? (Value) ValueNull.INSTANCE - : ValueDecimal.get(x)); + update(checkColumnIndex(columnIndex), x == null ? ValueNull.INSTANCE : ValueNumeric.getAnyScale(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1678,15 +1818,12 @@ public void updateBigDecimal(int columnIndex, BigDecimal x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBigDecimal(String columnLabel, BigDecimal x) - throws SQLException { + public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBigDecimal(" + quote(columnLabel) + ", " + - quoteBigDecimal(x) + ");"); + debugCode("updateBigDecimal(" + quote(columnLabel) + ", " + quoteBigDecimal(x) + ')'); } - update(columnLabel, x == null ? (Value) ValueNull.INSTANCE - : ValueDecimal.get(x)); + update(getColumnIndex(columnLabel), x == null ? ValueNull.INSTANCE : ValueNumeric.getAnyScale(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1703,10 +1840,9 @@ public void updateBigDecimal(String columnLabel, BigDecimal x) public void updateString(int columnIndex, String x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateString("+columnIndex+", "+quote(x)+");"); + debugCode("updateString(" + columnIndex + ", " + quote(x) + ')'); } - update(columnIndex, x == null ? (Value) ValueNull.INSTANCE - : ValueString.get(x)); + update(checkColumnIndex(columnIndex), x == null ? ValueNull.INSTANCE : ValueVarchar.get(x, conn)); } catch (Exception e) { throw logAndConvert(e); } @@ -1723,10 +1859,9 @@ public void updateString(int columnIndex, String x) throws SQLException { public void updateString(String columnLabel, String x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateString("+quote(columnLabel)+", "+quote(x)+");"); + debugCode("updateString(" + quote(columnLabel) + ", " + quote(x) + ')'); } - update(columnLabel, x == null ? (Value) ValueNull.INSTANCE - : ValueString.get(x)); + update(getColumnIndex(columnLabel), x == null ? ValueNull.INSTANCE : ValueVarchar.get(x, conn)); } catch (Exception e) { throw logAndConvert(e); } @@ -1734,18 +1869,25 @@ public void updateString(String columnLabel, String x) throws SQLException { /** * Updates a column in the current or insert row. + *

          + * Usage of this method is discouraged. Use + * {@code updateObject(columnIndex, value)} with {@link java.time.LocalDate} + * parameter instead. + *

          * * @param columnIndex (1,2,...) * @param x the value * @throws SQLException if the result set is closed or not updatable + * @see #updateObject(int, Object) */ @Override public void updateDate(int columnIndex, Date x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateDate("+columnIndex+", x);"); + debugCode("updateDate(" + columnIndex + ", " + quoteDate(x) + ')'); } - update(columnIndex, x == null ? (Value) ValueNull.INSTANCE : ValueDate.get(x)); + update(checkColumnIndex(columnIndex), + x == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromDate(conn, null, x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1753,18 +1895,25 @@ public void updateDate(int columnIndex, Date x) throws SQLException { /** * Updates a column in the current or insert row. + *

          + * Usage of this method is discouraged. Use + * {@code updateObject(columnLabel, value)} with {@link java.time.LocalDate} + * parameter instead. + *

          * * @param columnLabel the column label * @param x the value * @throws SQLException if the result set is closed or not updatable + * @see #updateObject(String, Object) */ @Override public void updateDate(String columnLabel, Date x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateDate("+quote(columnLabel)+", x);"); + debugCode("updateDate(" + quote(columnLabel) + ", " + quoteDate(x) + ')'); } - update(columnLabel, x == null ? (Value) ValueNull.INSTANCE : ValueDate.get(x)); + update(getColumnIndex(columnLabel), + x == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromDate(conn, null, x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1772,18 +1921,25 @@ public void updateDate(String columnLabel, Date x) throws SQLException { /** * Updates a column in the current or insert row. + *

          + * Usage of this method is discouraged. Use + * {@code updateObject(columnIndex, value)} with {@link java.time.LocalTime} + * parameter instead. + *

          * * @param columnIndex (1,2,...) * @param x the value * @throws SQLException if the result set is closed or not updatable + * @see #updateObject(int, Object) */ @Override public void updateTime(int columnIndex, Time x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateTime("+columnIndex+", x);"); + debugCode("updateTime(" + columnIndex + ", " + quoteTime(x) + ')'); } - update(columnIndex, x == null ? (Value) ValueNull.INSTANCE : ValueTime.get(x)); + update(checkColumnIndex(columnIndex), + x == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromTime(conn, null, x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1791,18 +1947,25 @@ public void updateTime(int columnIndex, Time x) throws SQLException { /** * Updates a column in the current or insert row. + *

          + * Usage of this method is discouraged. Use + * {@code updateObject(columnLabel, value)} with {@link java.time.LocalTime} + * parameter instead. + *

          * * @param columnLabel the column label * @param x the value * @throws SQLException if the result set is closed or not updatable + * @see #updateObject(String, Object) */ @Override public void updateTime(String columnLabel, Time x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateTime("+quote(columnLabel)+", x);"); + debugCode("updateTime(" + quote(columnLabel) + ", " + quoteTime(x) + ')'); } - update(columnLabel, x == null ? (Value) ValueNull.INSTANCE : ValueTime.get(x)); + update(getColumnIndex(columnLabel), + x == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromTime(conn, null, x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1810,20 +1973,25 @@ public void updateTime(String columnLabel, Time x) throws SQLException { /** * Updates a column in the current or insert row. + *

          + * Usage of this method is discouraged. Use + * {@code updateObject(columnIndex, value)} with + * {@link java.time.LocalDateTime} parameter instead. + *

          * * @param columnIndex (1,2,...) * @param x the value * @throws SQLException if the result set is closed or not updatable + * @see #updateObject(int, Object) */ @Override - public void updateTimestamp(int columnIndex, Timestamp x) - throws SQLException { + public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateTimestamp("+columnIndex+", x);"); + debugCode("updateTimestamp(" + columnIndex + ", " + quoteTimestamp(x) + ')'); } - update(columnIndex, x == null ? (Value) ValueNull.INSTANCE - : ValueTimestamp.get(x)); + update(checkColumnIndex(columnIndex), + x == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromTimestamp(conn, null, x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1831,20 +1999,25 @@ public void updateTimestamp(int columnIndex, Timestamp x) /** * Updates a column in the current or insert row. + *

          + * Usage of this method is discouraged. Use + * {@code updateObject(columnLabel, value)} with + * {@link java.time.LocalDateTime} parameter instead. + *

          * * @param columnLabel the column label * @param x the value * @throws SQLException if the result set is closed or not updatable + * @see #updateObject(String, Object) */ @Override - public void updateTimestamp(String columnLabel, Timestamp x) - throws SQLException { + public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateTimestamp("+quote(columnLabel)+", x);"); + debugCode("updateTimestamp(" + quote(columnLabel) + ", " + quoteTimestamp(x) + ')'); } - update(columnLabel, x == null ? (Value) ValueNull.INSTANCE - : ValueTimestamp.get(x)); + update(getColumnIndex(columnLabel), + x == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromTimestamp(conn, null, x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1859,9 +2032,15 @@ public void updateTimestamp(String columnLabel, Timestamp x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateAsciiStream(int columnIndex, InputStream x, int length) - throws SQLException { - updateAsciiStream(columnIndex, x, (long) length); + public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateAsciiStream(" + columnIndex + ", x, " + length + ')'); + } + updateAscii(checkColumnIndex(columnIndex), x, length); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1872,9 +2051,15 @@ public void updateAsciiStream(int columnIndex, InputStream x, int length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateAsciiStream(int columnIndex, InputStream x) - throws SQLException { - updateAsciiStream(columnIndex, x, -1); + public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateAsciiStream(" + columnIndex + ", x)"); + } + updateAscii(checkColumnIndex(columnIndex), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1886,15 +2071,12 @@ public void updateAsciiStream(int columnIndex, InputStream x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateAsciiStream(int columnIndex, InputStream x, long length) - throws SQLException { + public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateAsciiStream("+columnIndex+", x, "+length+"L);"); + debugCode("updateAsciiStream(" + columnIndex + ", x, " + length + "L)"); } - checkClosed(); - Value v = conn.createClob(IOUtils.getAsciiReader(x), length); - update(columnIndex, v); + updateAscii(checkColumnIndex(columnIndex), x, length); } catch (Exception e) { throw logAndConvert(e); } @@ -1909,9 +2091,15 @@ public void updateAsciiStream(int columnIndex, InputStream x, long length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateAsciiStream(String columnLabel, InputStream x, int length) - throws SQLException { - updateAsciiStream(columnLabel, x, (long) length); + public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateAsciiStream(" + quote(columnLabel) + ", x, " + length + ')'); + } + updateAscii(getColumnIndex(columnLabel), x, length); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1922,9 +2110,15 @@ public void updateAsciiStream(String columnLabel, InputStream x, int length) * @throws SQLException if the result set is closed */ @Override - public void updateAsciiStream(String columnLabel, InputStream x) - throws SQLException { - updateAsciiStream(columnLabel, x, -1); + public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateAsciiStream(" + quote(columnLabel) + ", x)"); + } + updateAscii(getColumnIndex(columnLabel), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1936,20 +2130,21 @@ public void updateAsciiStream(String columnLabel, InputStream x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateAsciiStream(String columnLabel, InputStream x, long length) - throws SQLException { + public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateAsciiStream("+quote(columnLabel)+", x, "+length+"L);"); + debugCode("updateAsciiStream(" + quote(columnLabel) + ", x, " + length + "L)"); } - checkClosed(); - Value v = conn.createClob(IOUtils.getAsciiReader(x), length); - update(columnLabel, v); + updateAscii(getColumnIndex(columnLabel), x, length); } catch (Exception e) { throw logAndConvert(e); } } + private void updateAscii(int columnIndex, InputStream x, long length) { + update(columnIndex, conn.createClob(IOUtils.getAsciiReader(x), length)); + } + /** * Updates a column in the current or insert row. * @@ -1959,9 +2154,15 @@ public void updateAsciiStream(String columnLabel, InputStream x, long length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBinaryStream(int columnIndex, InputStream x, int length) - throws SQLException { - updateBinaryStream(columnIndex, x, (long) length); + public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateBinaryStream(" + columnIndex + ", x, " + length + ')'); + } + updateBlobImpl(checkColumnIndex(columnIndex), x, length); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1972,9 +2173,15 @@ public void updateBinaryStream(int columnIndex, InputStream x, int length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBinaryStream(int columnIndex, InputStream x) - throws SQLException { - updateBinaryStream(columnIndex, x, -1); + public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateBinaryStream(" + columnIndex + ", x)"); + } + updateBlobImpl(checkColumnIndex(columnIndex), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1986,15 +2193,12 @@ public void updateBinaryStream(int columnIndex, InputStream x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBinaryStream(int columnIndex, InputStream x, long length) - throws SQLException { + public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBinaryStream("+columnIndex+", x, "+length+"L);"); + debugCode("updateBinaryStream(" + columnIndex + ", x, " + length + "L)"); } - checkClosed(); - Value v = conn.createBlob(x, length); - update(columnIndex, v); + updateBlobImpl(checkColumnIndex(columnIndex), x, length); } catch (Exception e) { throw logAndConvert(e); } @@ -2008,9 +2212,15 @@ public void updateBinaryStream(int columnIndex, InputStream x, long length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBinaryStream(String columnLabel, InputStream x) - throws SQLException { - updateBinaryStream(columnLabel, x, -1); + public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateBinaryStream(" + quote(columnLabel) + ", x)"); + } + updateBlobImpl(getColumnIndex(columnLabel), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2022,9 +2232,15 @@ public void updateBinaryStream(String columnLabel, InputStream x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBinaryStream(String columnLabel, InputStream x, int length) - throws SQLException { - updateBinaryStream(columnLabel, x, (long) length); + public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateBinaryStream(" + quote(columnLabel) + ", x, " + length + ')'); + } + updateBlobImpl(getColumnIndex(columnLabel), x, length); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2036,15 +2252,12 @@ public void updateBinaryStream(String columnLabel, InputStream x, int length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBinaryStream(String columnLabel, InputStream x, - long length) throws SQLException { + public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBinaryStream("+quote(columnLabel)+", x, "+length+"L);"); + debugCode("updateBinaryStream(" + quote(columnLabel) + ", x, " + length + "L)"); } - checkClosed(); - Value v = conn.createBlob(x, length); - update(columnLabel, v); + updateBlobImpl(getColumnIndex(columnLabel), x, length); } catch (Exception e) { throw logAndConvert(e); } @@ -2059,15 +2272,12 @@ public void updateBinaryStream(String columnLabel, InputStream x, * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateCharacterStream(int columnIndex, Reader x, long length) - throws SQLException { + public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateCharacterStream("+columnIndex+", x, "+length+"L);"); + debugCode("updateCharacterStream(" + columnIndex + ", x, " + length + "L)"); } - checkClosed(); - Value v = conn.createClob(x, length); - update(columnIndex, v); + updateClobImpl(checkColumnIndex(columnIndex), x, length); } catch (Exception e) { throw logAndConvert(e); } @@ -2082,9 +2292,15 @@ public void updateCharacterStream(int columnIndex, Reader x, long length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateCharacterStream(int columnIndex, Reader x, int length) - throws SQLException { - updateCharacterStream(columnIndex, x, (long) length); + public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateCharacterStream(" + columnIndex + ", x, " + length + ')'); + } + updateClobImpl(checkColumnIndex(columnIndex), x, length); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2095,9 +2311,15 @@ public void updateCharacterStream(int columnIndex, Reader x, int length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateCharacterStream(int columnIndex, Reader x) - throws SQLException { - updateCharacterStream(columnIndex, x, -1); + public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateCharacterStream(" + columnIndex + ", x)"); + } + updateClobImpl(checkColumnIndex(columnIndex), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2109,9 +2331,15 @@ public void updateCharacterStream(int columnIndex, Reader x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateCharacterStream(String columnLabel, Reader x, int length) - throws SQLException { - updateCharacterStream(columnLabel, x, (long) length); + public void updateCharacterStream(String columnLabel, Reader x, int length) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateCharacterStream(" + quote(columnLabel) + ", x, " + length + ')'); + } + updateClobImpl(getColumnIndex(columnLabel), x, length); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2122,9 +2350,15 @@ public void updateCharacterStream(String columnLabel, Reader x, int length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateCharacterStream(String columnLabel, Reader x) - throws SQLException { - updateCharacterStream(columnLabel, x, -1); + public void updateCharacterStream(String columnLabel, Reader x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateCharacterStream(" + quote(columnLabel) + ", x)"); + } + updateClobImpl(getColumnIndex(columnLabel), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2136,15 +2370,12 @@ public void updateCharacterStream(String columnLabel, Reader x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateCharacterStream(String columnLabel, Reader x, long length) - throws SQLException { + public void updateCharacterStream(String columnLabel, Reader x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateCharacterStream("+quote(columnLabel)+", x, "+length+"L);"); + debugCode("updateCharacterStream(" + quote(columnLabel) + ", x, " + length + "L)"); } - checkClosed(); - Value v = conn.createClob(x, length); - update(columnLabel, v); + updateClobImpl(getColumnIndex(columnLabel), x, length); } catch (Exception e) { throw logAndConvert(e); } @@ -2159,20 +2390,17 @@ public void updateCharacterStream(String columnLabel, Reader x, long length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateObject(int columnIndex, Object x, int scale) - throws SQLException { + public void updateObject(int columnIndex, Object x, int scale) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateObject("+columnIndex+", x, "+scale+");"); + debugCode("updateObject(" + columnIndex + ", x, " + scale + ')'); } - update(columnIndex, convertToUnknownValue(x)); + update(checkColumnIndex(columnIndex), convertToUnknownValue(x)); } catch (Exception e) { throw logAndConvert(e); } } - - /** * Updates a column in the current or insert row. * @@ -2182,13 +2410,12 @@ public void updateObject(int columnIndex, Object x, int scale) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateObject(String columnLabel, Object x, int scale) - throws SQLException { + public void updateObject(String columnLabel, Object x, int scale) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateObject("+quote(columnLabel)+", x, "+scale+");"); + debugCode("updateObject(" + quote(columnLabel) + ", x, " + scale + ')'); } - update(columnLabel, convertToUnknownValue(x)); + update(getColumnIndex(columnLabel), convertToUnknownValue(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -2205,9 +2432,9 @@ public void updateObject(String columnLabel, Object x, int scale) public void updateObject(int columnIndex, Object x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateObject("+columnIndex+", x);"); + debugCode("updateObject(" + columnIndex + ", x)"); } - update(columnIndex, convertToUnknownValue(x)); + update(checkColumnIndex(columnIndex), convertToUnknownValue(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -2224,9 +2451,95 @@ public void updateObject(int columnIndex, Object x) throws SQLException { public void updateObject(String columnLabel, Object x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateObject("+quote(columnLabel)+", x);"); + debugCode("updateObject(" + quote(columnLabel) + ", x)"); + } + update(getColumnIndex(columnLabel), convertToUnknownValue(x)); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + /** + * Updates a column in the current or insert row. + * + * @param columnIndex (1,2,...) + * @param x the value + * @param targetSqlType the SQL type + * @throws SQLException if the result set is closed or not updatable + */ + @Override + public void updateObject(int columnIndex, Object x, SQLType targetSqlType) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateObject(" + columnIndex + ", x, " + DataType.sqlTypeToString(targetSqlType) + ')'); + } + update(checkColumnIndex(columnIndex), convertToValue(x, targetSqlType)); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + /** + * Updates a column in the current or insert row. + * + * @param columnIndex (1,2,...) + * @param x the value + * @param targetSqlType the SQL type + * @param scaleOrLength is ignored + * @throws SQLException if the result set is closed or not updatable + */ + @Override + public void updateObject(int columnIndex, Object x, SQLType targetSqlType, int scaleOrLength) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateObject(" + columnIndex + ", x, " + DataType.sqlTypeToString(targetSqlType) + ", " + + scaleOrLength + ')'); + } + update(checkColumnIndex(columnIndex), convertToValue(x, targetSqlType)); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + /** + * Updates a column in the current or insert row. + * + * @param columnLabel the column label + * @param x the value + * @param targetSqlType the SQL type + * @throws SQLException if the result set is closed or not updatable + */ + @Override + public void updateObject(String columnLabel, Object x, SQLType targetSqlType) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateObject(" + quote(columnLabel) + ", x, " + DataType.sqlTypeToString(targetSqlType) + + ')'); } - update(columnLabel, convertToUnknownValue(x)); + update(getColumnIndex(columnLabel), convertToValue(x, targetSqlType)); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + /** + * Updates a column in the current or insert row. + * + * @param columnLabel the column label + * @param x the value + * @param targetSqlType the SQL type + * @param scaleOrLength is ignored + * @throws SQLException if the result set is closed or not updatable + */ + @Override + public void updateObject(String columnLabel, Object x, SQLType targetSqlType, int scaleOrLength) + throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateObject(" + quote(columnLabel) + ", x, " + DataType.sqlTypeToString(targetSqlType) + + ", " + scaleOrLength + ')'); + } + update(getColumnIndex(columnLabel), convertToValue(x, targetSqlType)); } catch (Exception e) { throw logAndConvert(e); } @@ -2257,7 +2570,14 @@ public void updateRef(String columnLabel, Ref x) throws SQLException { */ @Override public void updateBlob(int columnIndex, InputStream x) throws SQLException { - updateBlob(columnIndex, x, -1); + try { + if (isDebugEnabled()) { + debugCode("updateBlob(" + columnIndex + ", (InputStream) x)"); + } + updateBlobImpl(checkColumnIndex(columnIndex), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2269,15 +2589,12 @@ public void updateBlob(int columnIndex, InputStream x) throws SQLException { * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBlob(int columnIndex, InputStream x, long length) - throws SQLException { + public void updateBlob(int columnIndex, InputStream x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBlob("+columnIndex+", x, " + length + "L);"); + debugCode("updateBlob(" + columnIndex + ", (InputStream) x, " + length + "L)"); } - checkClosed(); - Value v = conn.createBlob(x, length); - update(columnIndex, v); + updateBlobImpl(checkColumnIndex(columnIndex), x, length); } catch (Exception e) { throw logAndConvert(e); } @@ -2294,16 +2611,9 @@ public void updateBlob(int columnIndex, InputStream x, long length) public void updateBlob(int columnIndex, Blob x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBlob("+columnIndex+", x);"); - } - checkClosed(); - Value v; - if (x == null) { - v = ValueNull.INSTANCE; - } else { - v = conn.createBlob(x.getBinaryStream(), -1); + debugCode("updateBlob(" + columnIndex + ", (Blob) x)"); } - update(columnIndex, v); + updateBlobImpl(checkColumnIndex(columnIndex), x, -1L); } catch (Exception e) { throw logAndConvert(e); } @@ -2320,21 +2630,18 @@ public void updateBlob(int columnIndex, Blob x) throws SQLException { public void updateBlob(String columnLabel, Blob x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBlob("+quote(columnLabel)+", x);"); + debugCode("updateBlob(" + quote(columnLabel) + ", (Blob) x)"); } - checkClosed(); - Value v; - if (x == null) { - v = ValueNull.INSTANCE; - } else { - v = conn.createBlob(x.getBinaryStream(), -1); - } - update(columnLabel, v); + updateBlobImpl(getColumnIndex(columnLabel), x, -1L); } catch (Exception e) { throw logAndConvert(e); } } + private void updateBlobImpl(int columnIndex, Blob x, long length) throws SQLException { + update(columnIndex, x == null ? ValueNull.INSTANCE : conn.createBlob(x.getBinaryStream(), length)); + } + /** * Updates a column in the current or insert row. * @@ -2344,7 +2651,14 @@ public void updateBlob(String columnLabel, Blob x) throws SQLException { */ @Override public void updateBlob(String columnLabel, InputStream x) throws SQLException { - updateBlob(columnLabel, x, -1); + try { + if (isDebugEnabled()) { + debugCode("updateBlob(" + quote(columnLabel) + ", (InputStream) x)"); + } + updateBlobImpl(getColumnIndex(columnLabel), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2356,20 +2670,21 @@ public void updateBlob(String columnLabel, InputStream x) throws SQLException { * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBlob(String columnLabel, InputStream x, long length) - throws SQLException { + public void updateBlob(String columnLabel, InputStream x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBlob("+quote(columnLabel)+", x, " + length + "L);"); + debugCode("updateBlob(" + quote(columnLabel) + ", (InputStream) x, " + length + "L)"); } - checkClosed(); - Value v = conn.createBlob(x, -1); - update(columnLabel, v); + updateBlobImpl(getColumnIndex(columnLabel), x, length); } catch (Exception e) { throw logAndConvert(e); } } + private void updateBlobImpl(int columnIndex, InputStream x, long length) { + update(columnIndex, conn.createBlob(x, length)); + } + /** * Updates a column in the current or insert row. * @@ -2381,16 +2696,9 @@ public void updateBlob(String columnLabel, InputStream x, long length) public void updateClob(int columnIndex, Clob x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateClob("+columnIndex+", x);"); + debugCode("updateClob(" + columnIndex + ", (Clob) x)"); } - checkClosed(); - Value v; - if (x == null) { - v = ValueNull.INSTANCE; - } else { - v = conn.createClob(x.getCharacterStream(), -1); - } - update(columnIndex, v); + updateClobImpl(checkColumnIndex(columnIndex), x); } catch (Exception e) { throw logAndConvert(e); } @@ -2405,7 +2713,14 @@ public void updateClob(int columnIndex, Clob x) throws SQLException { */ @Override public void updateClob(int columnIndex, Reader x) throws SQLException { - updateClob(columnIndex, x, -1); + try { + if (isDebugEnabled()) { + debugCode("updateClob(" + columnIndex + ", (Reader) x)"); + } + updateClobImpl(checkColumnIndex(columnIndex), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2417,15 +2732,12 @@ public void updateClob(int columnIndex, Reader x) throws SQLException { * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateClob(int columnIndex, Reader x, long length) - throws SQLException { + public void updateClob(int columnIndex, Reader x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateClob("+columnIndex+", x, " + length + "L);"); + debugCode("updateClob(" + columnIndex + ", (Reader) x, " + length + "L)"); } - checkClosed(); - Value v = conn.createClob(x, length); - update(columnIndex, v); + updateClobImpl(checkColumnIndex(columnIndex), x, length); } catch (Exception e) { throw logAndConvert(e); } @@ -2442,16 +2754,9 @@ public void updateClob(int columnIndex, Reader x, long length) public void updateClob(String columnLabel, Clob x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateClob("+quote(columnLabel)+", x);"); + debugCode("updateClob(" + quote(columnLabel) + ", (Clob) x)"); } - checkClosed(); - Value v; - if (x == null) { - v = ValueNull.INSTANCE; - } else { - v = conn.createClob(x.getCharacterStream(), -1); - } - update(columnLabel, v); + updateClobImpl(getColumnIndex(columnLabel), x); } catch (Exception e) { throw logAndConvert(e); } @@ -2466,7 +2771,14 @@ public void updateClob(String columnLabel, Clob x) throws SQLException { */ @Override public void updateClob(String columnLabel, Reader x) throws SQLException { - updateClob(columnLabel, x, -1); + try { + if (isDebugEnabled()) { + debugCode("updateClob(" + quote(columnLabel) + ", (Reader) x)"); + } + updateClobImpl(getColumnIndex(columnLabel), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2478,15 +2790,12 @@ public void updateClob(String columnLabel, Reader x) throws SQLException { * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateClob(String columnLabel, Reader x, long length) - throws SQLException { + public void updateClob(String columnLabel, Reader x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateClob("+quote(columnLabel)+", x, " + length + "L);"); + debugCode("updateClob(" + quote(columnLabel) + ", (Reader) x, " + length + "L)"); } - checkClosed(); - Value v = conn.createClob(x, length); - update(columnLabel, v); + updateClobImpl(getColumnIndex(columnLabel), x, length); } catch (Exception e) { throw logAndConvert(e); } @@ -2503,16 +2812,9 @@ public void updateClob(String columnLabel, Reader x, long length) public void updateArray(int columnIndex, Array x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateArray(" + columnIndex + ", x);"); - } - checkClosed(); - Value v; - if (x == null) { - v = ValueNull.INSTANCE; - } else { - v = DataType.convertToValue(stat.session, x.getArray(), Value.ARRAY); + debugCode("updateArray(" + columnIndex + ", x)"); } - update(columnIndex, v); + updateArrayImpl(checkColumnIndex(columnIndex), x); } catch (Exception e) { throw logAndConvert(e); } @@ -2529,21 +2831,19 @@ public void updateArray(int columnIndex, Array x) throws SQLException { public void updateArray(String columnLabel, Array x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateArray(" + quote(columnLabel) + ", x);"); - } - checkClosed(); - Value v; - if (x == null) { - v = ValueNull.INSTANCE; - } else { - v = DataType.convertToValue(stat.session, x.getArray(), Value.ARRAY); + debugCode("updateArray(" + quote(columnLabel) + ", x)"); } - update(columnLabel, v); + updateArrayImpl(getColumnIndex(columnLabel), x); } catch (Exception e) { throw logAndConvert(e); } } + private void updateArrayImpl(int columnIndex, Array x) throws SQLException { + update(columnIndex, x == null ? ValueNull.INSTANCE + : ValueToObjectConverter.objectToValue(stat.session, x.getArray(), Value.ARRAY)); + } + /** * [Not supported] Gets the cursor name if it was defined. This feature is * superseded by updateX methods. This method throws a SQLException because @@ -2568,8 +2868,8 @@ public int getRow() throws SQLException { if (result.isAfterLast()) { return 0; } - int rowId = result.getRowId(); - return rowId + 1; + long rowNumber = result.getRowId() + 1; + return rowNumber <= Integer.MAX_VALUE ? (int) rowNumber : Statement.SUCCESS_NO_INFO; } catch (Exception e) { throw logAndConvert(e); } @@ -2769,7 +3069,7 @@ public boolean isLast() throws SQLException { try { debugCodeCall("isLast"); checkClosed(); - int rowId = result.getRowId(); + long rowId = result.getRowId(); return rowId >= 0 && !result.isAfterLast() && !result.hasNext(); } catch (Exception e) { throw logAndConvert(e); @@ -2812,7 +3112,7 @@ public void afterLast() throws SQLException { } catch (Exception e) { throw logAndConvert(e); } -} + } /** * Moves the current position to the first row. This is the same as calling @@ -2874,13 +3174,11 @@ public boolean absolute(int rowNumber) throws SQLException { try { debugCodeCall("absolute", rowNumber); checkClosed(); - if (rowNumber < 0) { - rowNumber = result.getRowCount() + rowNumber + 1; - } - if (--rowNumber < result.getRowId()) { + long longRowNumber = rowNumber >= 0 ? rowNumber : result.getRowCount() + rowNumber + 1; + if (--longRowNumber < result.getRowId()) { resetResult(); } - while (result.getRowId() < rowNumber) { + while (result.getRowId() < longRowNumber) { if (!nextRow()) { return false; } @@ -2906,11 +3204,14 @@ public boolean relative(int rowCount) throws SQLException { try { debugCodeCall("relative", rowCount); checkClosed(); + long longRowCount; if (rowCount < 0) { - rowCount = result.getRowId() + rowCount + 1; + longRowCount = result.getRowId() + rowCount + 1; resetResult(); + } else { + longRowCount = rowCount; } - for (int i = 0; i < rowCount; i++) { + while (longRowCount-- > 0) { if (!nextRow()) { return false; } @@ -3058,7 +3359,7 @@ public void updateRow() throws SQLException { UpdatableRow row = getUpdatableRow(); Value[] current = new Value[columnCount]; for (int i = 0; i < updateRow.length; i++) { - current[i] = get(i + 1); + current[i] = getInternal(checkColumnIndex(i + 1)); } row.updateRow(current, updateRow); for (int i = 0; i < updateRow.length; i++) { @@ -3163,17 +3464,20 @@ private int getColumnIndex(String columnLabel) { // column labels have higher priority for (int i = 0; i < columnCount; i++) { String c = StringUtils.toUpperEnglish(result.getAlias(i)); - mapColumn(map, c, i); + // Don't override previous mapping + map.putIfAbsent(c, i); } for (int i = 0; i < columnCount; i++) { String colName = result.getColumnName(i); if (colName != null) { colName = StringUtils.toUpperEnglish(colName); - mapColumn(map, colName, i); + // Don't override previous mapping + map.putIfAbsent(colName, i); String tabName = result.getTableName(i); if (tabName != null) { - colName = StringUtils.toUpperEnglish(tabName) + "." + colName; - mapColumn(map, colName, i); + colName = StringUtils.toUpperEnglish(tabName) + '.' + colName; + // Don't override previous mapping + map.putIfAbsent(colName, i); } } } @@ -3214,22 +3518,12 @@ private int getColumnIndex(String columnLabel) { throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, columnLabel); } - private static void mapColumn(HashMap map, String label, - int index) { - // put the index (usually that's the only operation) - Integer old = map.put(label, index); - if (old != null) { - // if there was a clash (which is seldom), - // put the old one back - map.put(label, old); - } - } - - private void checkColumnIndex(int columnIndex) { + private int checkColumnIndex(int columnIndex) { checkClosed(); if (columnIndex < 1 || columnIndex > columnCount) { throw DbException.getInvalidValueException("columnIndex", columnIndex); } + return columnIndex; } /** @@ -3259,6 +3553,12 @@ private void checkOnValidRow() { } } + private Value get(int columnIndex) { + Value value = getInternal(columnIndex); + wasNull = value == ValueNull.INSTANCE; + return value; + } + /** * INTERNAL * @@ -3266,36 +3566,19 @@ private void checkOnValidRow() { * index of a column * @return internal representation of the value in the specified column */ - public Value get(int columnIndex) { - checkColumnIndex(columnIndex); + public Value getInternal(int columnIndex) { checkOnValidRow(); Value[] list; - if (patchedRows == null) { + if (patchedRows == null || (list = patchedRows.get(result.getRowId())) == null) { list = result.currentRow(); - } else { - list = patchedRows.get(result.getRowId()); - if (list == null) { - list = result.currentRow(); - } } - Value value = list[columnIndex - 1]; - wasNull = value == ValueNull.INSTANCE; - return value; - } - - private Value get(String columnLabel) { - int columnIndex = getColumnIndex(columnLabel); - return get(columnIndex); - } - - private void update(String columnLabel, Value v) { - int columnIndex = getColumnIndex(columnLabel); - update(columnIndex, v); + return list[columnIndex - 1]; } private void update(int columnIndex, Value v) { - checkUpdatable(); - checkColumnIndex(columnIndex); + if (!triggerUpdatable) { + checkUpdatable(); + } if (insertRow != null) { insertRow[columnIndex - 1] = v; } else { @@ -3307,16 +3590,28 @@ private void update(int columnIndex, Value v) { } private boolean nextRow() { - if (result.isLazy() && stat.isCancelled()) { - throw DbException.get(ErrorCode.STATEMENT_WAS_CANCELED); - } - boolean next = result.next(); + boolean next = result.isLazy() ? nextLazyRow() : result.next(); if (!next && !scrollable) { result.close(); } return next; } + private boolean nextLazyRow() { + Session session; + if (stat.isCancelled() || conn == null || (session = conn.getSession()) == null) { + throw DbException.get(ErrorCode.STATEMENT_WAS_CANCELED); + } + Session oldSession = session.setThreadLocalSession(); + boolean next; + try { + next = result.next(); + } finally { + session.resetThreadLocalSession(oldSession); + } + return next; + } + private void resetResult() { if (!scrollable) { throw DbException.get(ErrorCode.RESULT_SET_NOT_SCROLLABLE); @@ -3409,10 +3704,9 @@ public boolean isClosed() throws SQLException { public void updateNString(int columnIndex, String x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateNString("+columnIndex+", "+quote(x)+");"); + debugCode("updateNString(" + columnIndex + ", " + quote(x) + ')'); } - update(columnIndex, x == null ? (Value) - ValueNull.INSTANCE : ValueString.get(x)); + update(checkColumnIndex(columnIndex), x == null ? ValueNull.INSTANCE : ValueVarchar.get(x, conn)); } catch (Exception e) { throw logAndConvert(e); } @@ -3429,10 +3723,9 @@ public void updateNString(int columnIndex, String x) throws SQLException { public void updateNString(String columnLabel, String x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateNString("+quote(columnLabel)+", "+quote(x)+");"); + debugCode("updateNString(" + quote(columnLabel) + ", " + quote(x) + ')'); } - update(columnLabel, x == null ? (Value) ValueNull.INSTANCE : - ValueString.get(x)); + update(getColumnIndex(columnLabel), x == null ? ValueNull.INSTANCE : ValueVarchar.get(x, conn)); } catch (Exception e) { throw logAndConvert(e); } @@ -3447,7 +3740,14 @@ public void updateNString(String columnLabel, String x) throws SQLException { */ @Override public void updateNClob(int columnIndex, NClob x) throws SQLException { - updateClob(columnIndex, x); + try { + if (isDebugEnabled()) { + debugCode("updateNClob(" + columnIndex + ", (NClob) x)"); + } + updateClobImpl(checkColumnIndex(columnIndex), x); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -3459,7 +3759,14 @@ public void updateNClob(int columnIndex, NClob x) throws SQLException { */ @Override public void updateNClob(int columnIndex, Reader x) throws SQLException { - updateClob(columnIndex, x, -1); + try { + if (isDebugEnabled()) { + debugCode("updateNClob(" + columnIndex + ", (Reader) x)"); + } + updateClobImpl(checkColumnIndex(columnIndex), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -3471,9 +3778,15 @@ public void updateNClob(int columnIndex, Reader x) throws SQLException { * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateNClob(int columnIndex, Reader x, long length) - throws SQLException { - updateClob(columnIndex, x, length); + public void updateNClob(int columnIndex, Reader x, long length) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateNClob(" + columnIndex + ", (Reader) x, " + length + "L)"); + } + updateClobImpl(checkColumnIndex(columnIndex), x, length); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -3485,7 +3798,14 @@ public void updateNClob(int columnIndex, Reader x, long length) */ @Override public void updateNClob(String columnLabel, Reader x) throws SQLException { - updateClob(columnLabel, x, -1); + try { + if (isDebugEnabled()) { + debugCode("updateNClob(" + quote(columnLabel) + ", (Reader) x)"); + } + updateClobImpl(getColumnIndex(columnLabel), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -3497,9 +3817,15 @@ public void updateNClob(String columnLabel, Reader x) throws SQLException { * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateNClob(String columnLabel, Reader x, long length) - throws SQLException { - updateClob(columnLabel, x, length); + public void updateNClob(String columnLabel, Reader x, long length) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateNClob(" + quote(columnLabel) + ", (Reader) x, " + length + "L)"); + } + updateClobImpl(getColumnIndex(columnLabel), x, length); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -3511,7 +3837,18 @@ public void updateNClob(String columnLabel, Reader x, long length) */ @Override public void updateNClob(String columnLabel, NClob x) throws SQLException { - updateClob(columnLabel, x); + try { + if (isDebugEnabled()) { + debugCode("updateNClob(" + quote(columnLabel) + ", (NClob) x)"); + } + updateClobImpl(getColumnIndex(columnLabel), x); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + private void updateClobImpl(int columnIndex, Clob x) throws SQLException { + update(columnIndex, x == null ? ValueNull.INSTANCE : conn.createClob(x.getCharacterStream(), -1)); } /** @@ -3527,10 +3864,9 @@ public NClob getNClob(int columnIndex) throws SQLException { try { int id = getNextId(TraceObject.CLOB); if (isDebugEnabled()) { - debugCodeAssign("NClob", TraceObject.CLOB, id, "getNClob(" + columnIndex + ")"); + debugCodeAssign("NClob", TraceObject.CLOB, id, "getNClob(" + columnIndex + ')'); } - Value v = get(columnIndex); - return v == ValueNull.INSTANCE ? null : new JdbcClob(conn, v, JdbcLob.State.WITH_VALUE, id); + return getClob(id, checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -3549,15 +3885,27 @@ public NClob getNClob(String columnLabel) throws SQLException { try { int id = getNextId(TraceObject.CLOB); if (isDebugEnabled()) { - debugCodeAssign("NClob", TraceObject.CLOB, id, "getNClob(" + columnLabel + ")"); + debugCodeAssign("NClob", TraceObject.CLOB, id, "getNClob(" + quote(columnLabel) + ')'); } - Value v = get(columnLabel); - return v == ValueNull.INSTANCE ? null : new JdbcClob(conn, v, JdbcLob.State.WITH_VALUE, id); + return getClob(id, getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private JdbcClob getClob(int id, int columnIndex) { + Value v = getInternal(columnIndex); + JdbcClob result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = new JdbcClob(conn, v, JdbcLob.State.WITH_VALUE, id); + } else { + wasNull = true; + result = null; + } + return result; + } + /** * Returns the value of the specified column as a SQLXML. * @@ -3571,9 +3919,9 @@ public SQLXML getSQLXML(int columnIndex) throws SQLException { try { int id = getNextId(TraceObject.SQLXML); if (isDebugEnabled()) { - debugCodeAssign("SQLXML", TraceObject.SQLXML, id, "getSQLXML(" + columnIndex + ")"); + debugCodeAssign("SQLXML", TraceObject.SQLXML, id, "getSQLXML(" + columnIndex + ')'); } - Value v = get(columnIndex); + Value v = get(checkColumnIndex(columnIndex)); return v == ValueNull.INSTANCE ? null : new JdbcSQLXML(conn, v, JdbcLob.State.WITH_VALUE, id); } catch (Exception e) { throw logAndConvert(e); @@ -3593,9 +3941,9 @@ public SQLXML getSQLXML(String columnLabel) throws SQLException { try { int id = getNextId(TraceObject.SQLXML); if (isDebugEnabled()) { - debugCodeAssign("SQLXML", TraceObject.SQLXML, id, "getSQLXML(" + columnLabel + ")"); + debugCodeAssign("SQLXML", TraceObject.SQLXML, id, "getSQLXML(" + quote(columnLabel) + ')'); } - Value v = get(columnLabel); + Value v = get(getColumnIndex(columnLabel)); return v == ValueNull.INSTANCE ? null : new JdbcSQLXML(conn, v, JdbcLob.State.WITH_VALUE, id); } catch (Exception e) { throw logAndConvert(e); @@ -3610,20 +3958,12 @@ public SQLXML getSQLXML(String columnLabel) throws SQLException { * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateSQLXML(int columnIndex, SQLXML xmlObject) - throws SQLException { + public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateSQLXML("+columnIndex+", x);"); - } - checkClosed(); - Value v; - if (xmlObject == null) { - v = ValueNull.INSTANCE; - } else { - v = conn.createClob(xmlObject.getCharacterStream(), -1); + debugCode("updateSQLXML(" + columnIndex + ", x)"); } - update(columnIndex, v); + updateSQLXMLImpl(checkColumnIndex(columnIndex), xmlObject); } catch (Exception e) { throw logAndConvert(e); } @@ -3637,25 +3977,22 @@ public void updateSQLXML(int columnIndex, SQLXML xmlObject) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateSQLXML(String columnLabel, SQLXML xmlObject) - throws SQLException { + public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateSQLXML("+quote(columnLabel)+", x);"); - } - checkClosed(); - Value v; - if (xmlObject == null) { - v = ValueNull.INSTANCE; - } else { - v = conn.createClob(xmlObject.getCharacterStream(), -1); + debugCode("updateSQLXML(" + quote(columnLabel) + ", x)"); } - update(columnLabel, v); + updateSQLXMLImpl(getColumnIndex(columnLabel), xmlObject); } catch (Exception e) { throw logAndConvert(e); } } + private void updateSQLXMLImpl(int columnIndex, SQLXML xmlObject) throws SQLException { + update(columnIndex, + xmlObject == null ? ValueNull.INSTANCE : conn.createClob(xmlObject.getCharacterStream(), -1)); + } + /** * Returns the value of the specified column as a String. * @@ -3668,7 +4005,7 @@ public void updateSQLXML(String columnLabel, SQLXML xmlObject) public String getNString(int columnIndex) throws SQLException { try { debugCodeCall("getNString", columnIndex); - return get(columnIndex).getString(); + return get(checkColumnIndex(columnIndex)).getString(); } catch (Exception e) { throw logAndConvert(e); } @@ -3686,7 +4023,7 @@ public String getNString(int columnIndex) throws SQLException { public String getNString(String columnLabel) throws SQLException { try { debugCodeCall("getNString", columnLabel); - return get(columnLabel).getString(); + return get(getColumnIndex(columnLabel)).getString(); } catch (Exception e) { throw logAndConvert(e); } @@ -3704,7 +4041,7 @@ public String getNString(String columnLabel) throws SQLException { public Reader getNCharacterStream(int columnIndex) throws SQLException { try { debugCodeCall("getNCharacterStream", columnIndex); - return get(columnIndex).getReader(); + return get(checkColumnIndex(columnIndex)).getReader(); } catch (Exception e) { throw logAndConvert(e); } @@ -3722,7 +4059,7 @@ public Reader getNCharacterStream(int columnIndex) throws SQLException { public Reader getNCharacterStream(String columnLabel) throws SQLException { try { debugCodeCall("getNCharacterStream", columnLabel); - return get(columnLabel).getReader(); + return get(getColumnIndex(columnLabel)).getReader(); } catch (Exception e) { throw logAndConvert(e); } @@ -3736,9 +4073,15 @@ public Reader getNCharacterStream(String columnLabel) throws SQLException { * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateNCharacterStream(int columnIndex, Reader x) - throws SQLException { - updateNCharacterStream(columnIndex, x, -1); + public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateNCharacterStream(" + columnIndex + ", x)"); + } + updateClobImpl(checkColumnIndex(columnIndex), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -3750,15 +4093,12 @@ public void updateNCharacterStream(int columnIndex, Reader x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateNCharacterStream(int columnIndex, Reader x, long length) - throws SQLException { + public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateNCharacterStream("+columnIndex+", x, "+length+"L);"); + debugCode("updateNCharacterStream(" + columnIndex + ", x, " + length + "L)"); } - checkClosed(); - Value v = conn.createClob(x, length); - update(columnIndex, v); + updateClobImpl(checkColumnIndex(columnIndex), x, length); } catch (Exception e) { throw logAndConvert(e); } @@ -3772,9 +4112,15 @@ public void updateNCharacterStream(int columnIndex, Reader x, long length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateNCharacterStream(String columnLabel, Reader x) - throws SQLException { - updateNCharacterStream(columnLabel, x, -1); + public void updateNCharacterStream(String columnLabel, Reader x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateNCharacterStream(" + quote(columnLabel) + ", x)"); + } + updateClobImpl(getColumnIndex(columnLabel), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -3786,20 +4132,21 @@ public void updateNCharacterStream(String columnLabel, Reader x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateNCharacterStream(String columnLabel, Reader x, long length) - throws SQLException { + public void updateNCharacterStream(String columnLabel, Reader x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateNCharacterStream("+quote(columnLabel)+", x, "+length+"L);"); + debugCode("updateNCharacterStream(" + quote(columnLabel) + ", x, " + length + "L)"); } - checkClosed(); - Value v = conn.createClob(x, length); - update(columnLabel, v); + updateClobImpl(getColumnIndex(columnLabel), x, length); } catch (Exception e) { throw logAndConvert(e); } } + private void updateClobImpl(int columnIndex, Reader x, long length) { + update(columnIndex, conn.createClob(x, length)); + } + /** * Return an object of this class if possible. * @@ -3831,8 +4178,7 @@ public boolean isWrapperFor(Class iface) throws SQLException { } /** - * Returns a column value as a Java object. The data is - * de-serialized into a Java object (on the client side). + * Returns a column value as a Java object of the specified type. * * @param columnIndex the column index (1, 2, ...) * @param type the class of the returned value @@ -3847,16 +4193,14 @@ public T getObject(int columnIndex, Class type) throws SQLException { throw DbException.getInvalidValueException("type", type); } debugCodeCall("getObject", columnIndex); - Value value = get(columnIndex); - return extractObjectOfType(type, value); + return ValueToObjectConverter.valueToObject(type, get(checkColumnIndex(columnIndex)), conn); } catch (Exception e) { throw logAndConvert(e); } } /** - * Returns a column value as a Java object. The data is - * de-serialized into a Java object (on the client side). + * Returns a column value as a Java object of the specified type. * * @param columnName the column name * @param type the class of the returned value @@ -3869,95 +4213,12 @@ public T getObject(String columnName, Class type) throws SQLException { throw DbException.getInvalidValueException("type", type); } debugCodeCall("getObject", columnName); - Value value = get(columnName); - return extractObjectOfType(type, value); + return ValueToObjectConverter.valueToObject(type, get(getColumnIndex(columnName)), conn); } catch (Exception e) { throw logAndConvert(e); } } - private T extractObjectOfType(Class type, Value value) throws SQLException { - if (value == ValueNull.INSTANCE) { - return null; - } - if (type == BigDecimal.class) { - return type.cast(value.getBigDecimal()); - } else if (type == BigInteger.class) { - return type.cast(value.getBigDecimal().toBigInteger()); - } else if (type == String.class) { - return type.cast(value.getString()); - } else if (type == Boolean.class) { - return type.cast(value.getBoolean()); - } else if (type == Byte.class) { - return type.cast(value.getByte()); - } else if (type == Short.class) { - return type.cast(value.getShort()); - } else if (type == Integer.class) { - return type.cast(value.getInt()); - } else if (type == Long.class) { - return type.cast(value.getLong()); - } else if (type == Float.class) { - return type.cast(value.getFloat()); - } else if (type == Double.class) { - return type.cast(value.getDouble()); - } else if (type == Date.class) { - return type.cast(value.getDate()); - } else if (type == Time.class) { - return type.cast(value.getTime()); - } else if (type == Timestamp.class) { - return type.cast(value.getTimestamp()); - } else if (type == java.util.Date.class) { - return type.cast(new java.util.Date(value.getTimestamp().getTime())); - } else if (type == Calendar.class) { - Calendar calendar = DateTimeUtils.createGregorianCalendar(); - calendar.setTime(value.getTimestamp()); - return type.cast(calendar); - } else if (type == UUID.class) { - return type.cast(value.getObject()); - } else if (type == byte[].class) { - return type.cast(value.getBytes()); - } else if (type == java.sql.Array.class) { - int id = getNextId(TraceObject.ARRAY); - return type.cast(new JdbcArray(conn, value, id)); - } else if (type == Blob.class) { - int id = getNextId(TraceObject.BLOB); - return type.cast(new JdbcBlob(conn, value, JdbcLob.State.WITH_VALUE, id)); - } else if (type == Clob.class) { - int id = getNextId(TraceObject.CLOB); - return type.cast(new JdbcClob(conn, value, JdbcLob.State.WITH_VALUE, id)); - } else if (type == SQLXML.class) { - int id = getNextId(TraceObject.SQLXML); - return type.cast(new JdbcSQLXML(conn, value, JdbcLob.State.WITH_VALUE, id)); - } else if (type == TimestampWithTimeZone.class) { - ValueTimestampTimeZone v = (ValueTimestampTimeZone) value.convertTo(Value.TIMESTAMP_TZ); - return type.cast(new TimestampWithTimeZone(v.getDateValue(), v.getTimeNanos(), v.getTimeZoneOffsetMins())); - } else if (type == Interval.class) { - if (!(value instanceof ValueInterval)) { - value = value.convertTo(Value.INTERVAL_DAY_TO_SECOND); - } - ValueInterval v = (ValueInterval) value; - return type.cast(new Interval(v.getQualifier(), false, v.getLeading(), v.getRemaining())); - } else if (DataType.isGeometryClass(type)) { - return type.cast(value.convertTo(Value.GEOMETRY).getObject()); - } else if (type == LocalDateTimeUtils.LOCAL_DATE) { - return type.cast(LocalDateTimeUtils.valueToLocalDate(value)); - } else if (type == LocalDateTimeUtils.LOCAL_TIME) { - return type.cast(LocalDateTimeUtils.valueToLocalTime(value)); - } else if (type == LocalDateTimeUtils.LOCAL_DATE_TIME) { - return type.cast(LocalDateTimeUtils.valueToLocalDateTime(value)); - } else if (type == LocalDateTimeUtils.INSTANT) { - return type.cast(LocalDateTimeUtils.valueToInstant(value)); - } else if (type == LocalDateTimeUtils.OFFSET_DATE_TIME) { - return type.cast(LocalDateTimeUtils.valueToOffsetDateTime(value)); - } else if (type == LocalDateTimeUtils.PERIOD) { - return type.cast(LocalDateTimeUtils.valueToPeriod(value)); - } else if (type == LocalDateTimeUtils.DURATION) { - return type.cast(LocalDateTimeUtils.valueToDuration(value)); - } else { - throw unsupported(type.getName()); - } - } - /** * INTERNAL */ @@ -3966,13 +4227,12 @@ public String toString() { return getTraceObjectName() + ": " + result; } - private void patchCurrentRow(Value[] row) throws SQLException { + private void patchCurrentRow(Value[] row) { boolean changed = false; Value[] current = result.currentRow(); - Mode databaseMode = conn.getMode(); CompareMode compareMode = conn.getCompareMode(); for (int i = 0; i < row.length; i++) { - if (row[i].compareTo(current[i], databaseMode, compareMode) != 0) { + if (row[i].compareTo(current[i], conn, compareMode) != 0) { changed = true; break; } @@ -3980,7 +4240,7 @@ private void patchCurrentRow(Value[] row) throws SQLException { if (patchedRows == null) { patchedRows = new HashMap<>(); } - Integer rowId = result.getRowId(); + Long rowId = result.getRowId(); if (!changed) { patchedRows.remove(rowId); } else { @@ -3988,9 +4248,18 @@ private void patchCurrentRow(Value[] row) throws SQLException { } } + private Value convertToValue(Object x, SQLType targetSqlType) { + if (x == null) { + return ValueNull.INSTANCE; + } else { + int type = DataType.convertSQLTypeToValueType(targetSqlType); + Value v = ValueToObjectConverter.objectToValue(conn.getSession(), x, type); + return v.convertTo(type, conn); + } + } + private Value convertToUnknownValue(Object x) { - checkClosed(); - return DataType.convertToValue(conn.getSession(), x, Value.UNKNOWN); + return ValueToObjectConverter.objectToValue(conn.getSession(), x, Value.UNKNOWN); } private void checkUpdatable() { @@ -4000,4 +4269,22 @@ private void checkUpdatable() { } } + /** + * INTERNAL + * + * @return array of column values for the current row + */ + public Value[] getUpdateRow() { + return updateRow; + } + + /** + * INTERNAL + * + * @return result + */ + public ResultInterface getResult() { + return result; + } + } diff --git a/h2/src/main/org/h2/jdbc/JdbcResultSetBackwardsCompat.java b/h2/src/main/org/h2/jdbc/JdbcResultSetBackwardsCompat.java deleted file mode 100644 index 0766260a55..0000000000 --- a/h2/src/main/org/h2/jdbc/JdbcResultSetBackwardsCompat.java +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jdbc; - -/** - * Allows us to compile on older platforms, while still implementing the methods - * from the newer JDBC API. - */ -public interface JdbcResultSetBackwardsCompat { - - // compatibility interface - -} diff --git a/h2/src/main/org/h2/jdbc/JdbcResultSetMetaData.java b/h2/src/main/org/h2/jdbc/JdbcResultSetMetaData.java index df8907e256..e3658d6f23 100644 --- a/h2/src/main/org/h2/jdbc/JdbcResultSetMetaData.java +++ b/h2/src/main/org/h2/jdbc/JdbcResultSetMetaData.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -13,12 +13,12 @@ import org.h2.result.ResultInterface; import org.h2.util.MathUtils; import org.h2.value.DataType; +import org.h2.value.ValueToObjectConverter; /** * Represents the meta data for a ResultSet. */ -public class JdbcResultSetMetaData extends TraceObject implements - ResultSetMetaData { +public final class JdbcResultSetMetaData extends TraceObject implements ResultSetMetaData { private final String catalog; private final JdbcResultSet rs; @@ -63,9 +63,7 @@ public int getColumnCount() throws SQLException { @Override public String getColumnLabel(int column) throws SQLException { try { - debugCodeCall("getColumnLabel", column); - checkColumnIndex(column); - return result.getAlias(--column); + return result.getAlias(getColumn("getColumnLabel", column)); } catch (Exception e) { throw logAndConvert(e); } @@ -81,9 +79,7 @@ public String getColumnLabel(int column) throws SQLException { @Override public String getColumnName(int column) throws SQLException { try { - debugCodeCall("getColumnName", column); - checkColumnIndex(column); - return result.getColumnName(--column); + return result.getColumnName(getColumn("getColumnName", column)); } catch (Exception e) { throw logAndConvert(e); } @@ -100,10 +96,7 @@ public String getColumnName(int column) throws SQLException { @Override public int getColumnType(int column) throws SQLException { try { - debugCodeCall("getColumnType", column); - checkColumnIndex(column); - int type = result.getColumnType(--column).getValueType(); - return DataType.convertTypeToSQLType(type); + return DataType.convertTypeToSQLType(result.getColumnType(getColumn("getColumnType", column))); } catch (Exception e) { throw logAndConvert(e); } @@ -119,10 +112,7 @@ public int getColumnType(int column) throws SQLException { @Override public String getColumnTypeName(int column) throws SQLException { try { - debugCodeCall("getColumnTypeName", column); - checkColumnIndex(column); - int type = result.getColumnType(--column).getValueType(); - return DataType.getDataType(type).name; + return result.getColumnType(getColumn("getColumnTypeName", column)).getDeclaredTypeName(); } catch (Exception e) { throw logAndConvert(e); } @@ -138,9 +128,7 @@ public String getColumnTypeName(int column) throws SQLException { @Override public String getSchemaName(int column) throws SQLException { try { - debugCodeCall("getSchemaName", column); - checkColumnIndex(column); - String schema = result.getSchemaName(--column); + String schema = result.getSchemaName(getColumn("getSchemaName", column)); return schema == null ? "" : schema; } catch (Exception e) { throw logAndConvert(e); @@ -157,9 +145,7 @@ public String getSchemaName(int column) throws SQLException { @Override public String getTableName(int column) throws SQLException { try { - debugCodeCall("getTableName", column); - checkColumnIndex(column); - String table = result.getTableName(--column); + String table = result.getTableName(getColumn("getTableName", column)); return table == null ? "" : table; } catch (Exception e) { throw logAndConvert(e); @@ -176,8 +162,7 @@ public String getTableName(int column) throws SQLException { @Override public String getCatalogName(int column) throws SQLException { try { - debugCodeCall("getCatalogName", column); - checkColumnIndex(column); + getColumn("getCatalogName", column); return catalog == null ? "" : catalog; } catch (Exception e) { throw logAndConvert(e); @@ -194,9 +179,7 @@ public String getCatalogName(int column) throws SQLException { @Override public boolean isAutoIncrement(int column) throws SQLException { try { - debugCodeCall("isAutoIncrement", column); - checkColumnIndex(column); - return result.isAutoIncrement(--column); + return result.isIdentity(getColumn("isAutoIncrement", column)); } catch (Exception e) { throw logAndConvert(e); } @@ -213,8 +196,7 @@ public boolean isAutoIncrement(int column) throws SQLException { @Override public boolean isCaseSensitive(int column) throws SQLException { try { - debugCodeCall("isCaseSensitive", column); - checkColumnIndex(column); + getColumn("isCaseSensitive", column); return true; } catch (Exception e) { throw logAndConvert(e); @@ -232,8 +214,7 @@ public boolean isCaseSensitive(int column) throws SQLException { @Override public boolean isSearchable(int column) throws SQLException { try { - debugCodeCall("isSearchable", column); - checkColumnIndex(column); + getColumn("isSearchable", column); return true; } catch (Exception e) { throw logAndConvert(e); @@ -251,8 +232,7 @@ public boolean isSearchable(int column) throws SQLException { @Override public boolean isCurrency(int column) throws SQLException { try { - debugCodeCall("isCurrency", column); - checkColumnIndex(column); + getColumn("isCurrency", column); return false; } catch (Exception e) { throw logAndConvert(e); @@ -273,9 +253,7 @@ public boolean isCurrency(int column) throws SQLException { @Override public int isNullable(int column) throws SQLException { try { - debugCodeCall("isNullable", column); - checkColumnIndex(column); - return result.getNullable(--column); + return result.getNullable(getColumn("isNullable", column)); } catch (Exception e) { throw logAndConvert(e); } @@ -283,18 +261,16 @@ public int isNullable(int column) throws SQLException { /** * Checks if this column is signed. - * It always returns true. + * Returns true for numeric columns. * * @param column the column index (1,2,...) - * @return true + * @return true for numeric columns * @throws SQLException if the result set is closed or invalid */ @Override public boolean isSigned(int column) throws SQLException { try { - debugCodeCall("isSigned", column); - checkColumnIndex(column); - return true; + return DataType.isNumericType(result.getColumnType(getColumn("isSigned", column)).getValueType()); } catch (Exception e) { throw logAndConvert(e); } @@ -311,8 +287,7 @@ public boolean isSigned(int column) throws SQLException { @Override public boolean isReadOnly(int column) throws SQLException { try { - debugCodeCall("isReadOnly", column); - checkColumnIndex(column); + getColumn("isReadOnly", column); return false; } catch (Exception e) { throw logAndConvert(e); @@ -330,8 +305,7 @@ public boolean isReadOnly(int column) throws SQLException { @Override public boolean isWritable(int column) throws SQLException { try { - debugCodeCall("isWritable", column); - checkColumnIndex(column); + getColumn("isWritable", column); return true; } catch (Exception e) { throw logAndConvert(e); @@ -349,8 +323,7 @@ public boolean isWritable(int column) throws SQLException { @Override public boolean isDefinitelyWritable(int column) throws SQLException { try { - debugCodeCall("isDefinitelyWritable", column); - checkColumnIndex(column); + getColumn("isDefinitelyWritable", column); return false; } catch (Exception e) { throw logAndConvert(e); @@ -368,10 +341,8 @@ public boolean isDefinitelyWritable(int column) throws SQLException { @Override public String getColumnClassName(int column) throws SQLException { try { - debugCodeCall("getColumnClassName", column); - checkColumnIndex(column); - int type = result.getColumnType(--column).getValueType(); - return DataType.getTypeClassName(type, true); + int type = result.getColumnType(getColumn("getColumnClassName", column)).getValueType(); + return ValueToObjectConverter.getDefaultClass(type, true).getName(); } catch (Exception e) { throw logAndConvert(e); } @@ -387,10 +358,7 @@ public String getColumnClassName(int column) throws SQLException { @Override public int getPrecision(int column) throws SQLException { try { - debugCodeCall("getPrecision", column); - checkColumnIndex(column); - long prec = result.getColumnType(--column).getPrecision(); - return MathUtils.convertLongToInt(prec); + return MathUtils.convertLongToInt(result.getColumnType(getColumn("getPrecision", column)).getPrecision()); } catch (Exception e) { throw logAndConvert(e); } @@ -406,9 +374,7 @@ public int getPrecision(int column) throws SQLException { @Override public int getScale(int column) throws SQLException { try { - debugCodeCall("getScale", column); - checkColumnIndex(column); - return result.getColumnType(--column).getScale(); + return result.getColumnType(getColumn("getScale", column)).getScale(); } catch (Exception e) { throw logAndConvert(e); } @@ -424,9 +390,7 @@ public int getScale(int column) throws SQLException { @Override public int getColumnDisplaySize(int column) throws SQLException { try { - debugCodeCall("getColumnDisplaySize", column); - checkColumnIndex(column); - return result.getColumnType(--column).getDisplaySize(); + return result.getColumnType(getColumn("getColumnDisplaySize", column)).getDisplaySize(); } catch (Exception e) { throw logAndConvert(e); } @@ -441,11 +405,23 @@ private void checkClosed() { } } - private void checkColumnIndex(int columnIndex) { + /** + * Writes trace information and checks validity of this object and + * parameter. + * + * @param methodName + * the called method name + * @param columnIndex + * 1-based column index + * @return 0-based column index + */ + private int getColumn(String methodName, int columnIndex) { + debugCodeCall(methodName, columnIndex); checkClosed(); if (columnIndex < 1 || columnIndex > columnCount) { throw DbException.getInvalidValueException("columnIndex", columnIndex); } + return columnIndex - 1; } /** diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLDataException.java b/h2/src/main/org/h2/jdbc/JdbcSQLDataException.java index 27065d8935..0016f23f3f 100644 --- a/h2/src/main/org/h2/jdbc/JdbcSQLDataException.java +++ b/h2/src/main/org/h2/jdbc/JdbcSQLDataException.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -14,7 +14,7 @@ /** * Represents a database exception. */ -public class JdbcSQLDataException extends SQLDataException implements JdbcException { +public final class JdbcSQLDataException extends SQLDataException implements JdbcException { private static final long serialVersionUID = 1L; diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLException.java b/h2/src/main/org/h2/jdbc/JdbcSQLException.java index 0f55a66f66..de08d17dde 100644 --- a/h2/src/main/org/h2/jdbc/JdbcSQLException.java +++ b/h2/src/main/org/h2/jdbc/JdbcSQLException.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -14,7 +14,7 @@ /** * Represents a database exception. */ -public class JdbcSQLException extends SQLException implements JdbcException { +public final class JdbcSQLException extends SQLException implements JdbcException { private static final long serialVersionUID = 1L; diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLFeatureNotSupportedException.java b/h2/src/main/org/h2/jdbc/JdbcSQLFeatureNotSupportedException.java index 05c26f15ec..bf9416b842 100644 --- a/h2/src/main/org/h2/jdbc/JdbcSQLFeatureNotSupportedException.java +++ b/h2/src/main/org/h2/jdbc/JdbcSQLFeatureNotSupportedException.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -14,7 +14,8 @@ /** * Represents a database exception. */ -public class JdbcSQLFeatureNotSupportedException extends SQLFeatureNotSupportedException implements JdbcException { +public final class JdbcSQLFeatureNotSupportedException extends SQLFeatureNotSupportedException + implements JdbcException { private static final long serialVersionUID = 1L; diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLIntegrityConstraintViolationException.java b/h2/src/main/org/h2/jdbc/JdbcSQLIntegrityConstraintViolationException.java index 2ae1a6b3de..6ce24217ae 100644 --- a/h2/src/main/org/h2/jdbc/JdbcSQLIntegrityConstraintViolationException.java +++ b/h2/src/main/org/h2/jdbc/JdbcSQLIntegrityConstraintViolationException.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -14,7 +14,7 @@ /** * Represents a database exception. */ -public class JdbcSQLIntegrityConstraintViolationException extends SQLIntegrityConstraintViolationException +public final class JdbcSQLIntegrityConstraintViolationException extends SQLIntegrityConstraintViolationException implements JdbcException { private static final long serialVersionUID = 1L; diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLInvalidAuthorizationSpecException.java b/h2/src/main/org/h2/jdbc/JdbcSQLInvalidAuthorizationSpecException.java index 1c6338b325..d06886c201 100644 --- a/h2/src/main/org/h2/jdbc/JdbcSQLInvalidAuthorizationSpecException.java +++ b/h2/src/main/org/h2/jdbc/JdbcSQLInvalidAuthorizationSpecException.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -14,7 +14,7 @@ /** * Represents a database exception. */ -public class JdbcSQLInvalidAuthorizationSpecException extends SQLInvalidAuthorizationSpecException +public final class JdbcSQLInvalidAuthorizationSpecException extends SQLInvalidAuthorizationSpecException implements JdbcException { private static final long serialVersionUID = 1L; diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLNonTransientConnectionException.java b/h2/src/main/org/h2/jdbc/JdbcSQLNonTransientConnectionException.java index 8823d096c3..b76dd0d0c3 100644 --- a/h2/src/main/org/h2/jdbc/JdbcSQLNonTransientConnectionException.java +++ b/h2/src/main/org/h2/jdbc/JdbcSQLNonTransientConnectionException.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -14,7 +14,7 @@ /** * Represents a database exception. */ -public class JdbcSQLNonTransientConnectionException extends SQLNonTransientConnectionException +public final class JdbcSQLNonTransientConnectionException extends SQLNonTransientConnectionException implements JdbcException { private static final long serialVersionUID = 1L; diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLNonTransientException.java b/h2/src/main/org/h2/jdbc/JdbcSQLNonTransientException.java index 2ee6e2567d..858a5647af 100644 --- a/h2/src/main/org/h2/jdbc/JdbcSQLNonTransientException.java +++ b/h2/src/main/org/h2/jdbc/JdbcSQLNonTransientException.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -14,7 +14,7 @@ /** * Represents a database exception. */ -public class JdbcSQLNonTransientException extends SQLNonTransientException implements JdbcException { +public final class JdbcSQLNonTransientException extends SQLNonTransientException implements JdbcException { private static final long serialVersionUID = 1L; diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLSyntaxErrorException.java b/h2/src/main/org/h2/jdbc/JdbcSQLSyntaxErrorException.java index ca83bf42a9..97bb472f2a 100644 --- a/h2/src/main/org/h2/jdbc/JdbcSQLSyntaxErrorException.java +++ b/h2/src/main/org/h2/jdbc/JdbcSQLSyntaxErrorException.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -14,7 +14,7 @@ /** * Represents a database exception. */ -public class JdbcSQLSyntaxErrorException extends SQLSyntaxErrorException implements JdbcException { +public final class JdbcSQLSyntaxErrorException extends SQLSyntaxErrorException implements JdbcException { private static final long serialVersionUID = 1L; diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLTimeoutException.java b/h2/src/main/org/h2/jdbc/JdbcSQLTimeoutException.java index a85334f48c..7e8ee1a2a9 100644 --- a/h2/src/main/org/h2/jdbc/JdbcSQLTimeoutException.java +++ b/h2/src/main/org/h2/jdbc/JdbcSQLTimeoutException.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -14,7 +14,7 @@ /** * Represents a database exception. */ -public class JdbcSQLTimeoutException extends SQLTimeoutException implements JdbcException { +public final class JdbcSQLTimeoutException extends SQLTimeoutException implements JdbcException { private static final long serialVersionUID = 1L; diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLTransactionRollbackException.java b/h2/src/main/org/h2/jdbc/JdbcSQLTransactionRollbackException.java index 307356f3d2..34e54b36b8 100644 --- a/h2/src/main/org/h2/jdbc/JdbcSQLTransactionRollbackException.java +++ b/h2/src/main/org/h2/jdbc/JdbcSQLTransactionRollbackException.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -14,7 +14,8 @@ /** * Represents a database exception. */ -public class JdbcSQLTransactionRollbackException extends SQLTransactionRollbackException implements JdbcException { +public final class JdbcSQLTransactionRollbackException extends SQLTransactionRollbackException + implements JdbcException { private static final long serialVersionUID = 1L; diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLTransientException.java b/h2/src/main/org/h2/jdbc/JdbcSQLTransientException.java index 5394de0dab..6566d1d9a3 100644 --- a/h2/src/main/org/h2/jdbc/JdbcSQLTransientException.java +++ b/h2/src/main/org/h2/jdbc/JdbcSQLTransientException.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -14,7 +14,7 @@ /** * Represents a database exception. */ -public class JdbcSQLTransientException extends SQLTransientException implements JdbcException { +public final class JdbcSQLTransientException extends SQLTransientException implements JdbcException { private static final long serialVersionUID = 1L; diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLXML.java b/h2/src/main/org/h2/jdbc/JdbcSQLXML.java index bd99d57411..83a0a6a6b9 100644 --- a/h2/src/main/org/h2/jdbc/JdbcSQLXML.java +++ b/h2/src/main/org/h2/jdbc/JdbcSQLXML.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -16,14 +16,20 @@ import java.io.Writer; import java.sql.SQLException; import java.sql.SQLXML; +import java.util.HashMap; +import java.util.Map; +import javax.xml.XMLConstants; +import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.SAXParserFactory; import javax.xml.stream.XMLInputFactory; import javax.xml.stream.XMLOutputFactory; import javax.xml.transform.Result; import javax.xml.transform.Source; import javax.xml.transform.Transformer; import javax.xml.transform.TransformerFactory; +import javax.xml.transform.URIResolver; import javax.xml.transform.dom.DOMResult; import javax.xml.transform.dom.DOMSource; import javax.xml.transform.sax.SAXResult; @@ -39,12 +45,27 @@ import org.h2.message.TraceObject; import org.h2.value.Value; import org.w3c.dom.Node; +import org.xml.sax.EntityResolver; import org.xml.sax.InputSource; +import org.xml.sax.XMLReader; +import org.xml.sax.helpers.DefaultHandler; /** * Represents a SQLXML value. */ -public class JdbcSQLXML extends JdbcLob implements SQLXML { +public final class JdbcSQLXML extends JdbcLob implements SQLXML { + + private static final Map secureFeatureMap = new HashMap<>(); + private static final EntityResolver NOOP_ENTITY_RESOLVER = (pubId, sysId) -> new InputSource(new StringReader("")); + private static final URIResolver NOOP_URI_RESOLVER = (href, base) -> new StreamSource(new StringReader("")); + + static { + secureFeatureMap.put(XMLConstants.FEATURE_SECURE_PROCESSING, true); + secureFeatureMap.put("http://apache.org/xml/features/disallow-doctype-decl", true); + secureFeatureMap.put("http://xml.org/sax/features/external-general-entities", false); + secureFeatureMap.put("http://xml.org/sax/features/external-parameter-entities", false); + secureFeatureMap.put("http://apache.org/xml/features/nonvalidating/load-external-dtd", false); + } private DOMResult domResult; @@ -55,6 +76,10 @@ public class JdbcSQLXML extends JdbcLob implements SQLXML { /** * INTERNAL + * @param conn to use + * @param value for this JdbcSQLXML + * @param state of the LOB + * @param id of the trace object */ public JdbcSQLXML(JdbcConnection conn, Value value, State state, int id) { super(conn, value, state, TraceObject.SQLXML, id); @@ -103,19 +128,47 @@ public Reader getCharacterStream() throws SQLException { public T getSource(Class sourceClass) throws SQLException { try { if (isDebugEnabled()) { - debugCodeCall( + debugCode( "getSource(" + (sourceClass != null ? sourceClass.getSimpleName() + ".class" : "null") + ')'); } checkReadable(); + // see https://cheatsheetseries.owasp.org/cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html if (sourceClass == null || sourceClass == DOMSource.class) { DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); - return (T) new DOMSource(dbf.newDocumentBuilder().parse(new InputSource(value.getInputStream()))); + for (Map.Entry entry : secureFeatureMap.entrySet()) { + try { + dbf.setFeature(entry.getKey(), entry.getValue()); + } catch (Exception ignore) {/**/} + } + dbf.setXIncludeAware(false); + dbf.setExpandEntityReferences(false); + dbf.setAttribute(XMLConstants.ACCESS_EXTERNAL_SCHEMA, ""); + DocumentBuilder db = dbf.newDocumentBuilder(); + db.setEntityResolver(NOOP_ENTITY_RESOLVER); + return (T) new DOMSource(db.parse(new InputSource(value.getInputStream()))); } else if (sourceClass == SAXSource.class) { - return (T) new SAXSource(new InputSource(value.getInputStream())); + SAXParserFactory spf = SAXParserFactory.newInstance(); + for (Map.Entry entry : secureFeatureMap.entrySet()) { + try { + spf.setFeature(entry.getKey(), entry.getValue()); + } catch (Exception ignore) {/**/} + } + XMLReader reader = spf.newSAXParser().getXMLReader(); + reader.setEntityResolver(NOOP_ENTITY_RESOLVER); + return (T) new SAXSource(reader, new InputSource(value.getInputStream())); } else if (sourceClass == StAXSource.class) { XMLInputFactory xif = XMLInputFactory.newInstance(); + xif.setProperty(XMLInputFactory.SUPPORT_DTD, false); + xif.setProperty(XMLConstants.ACCESS_EXTERNAL_DTD, ""); + xif.setProperty("javax.xml.stream.isSupportingExternalEntities", false); return (T) new StAXSource(xif.createXMLStreamReader(value.getInputStream())); } else if (sourceClass == StreamSource.class) { + TransformerFactory tf = TransformerFactory.newInstance(); + tf.setAttribute(XMLConstants.ACCESS_EXTERNAL_DTD, ""); + tf.setAttribute(XMLConstants.ACCESS_EXTERNAL_STYLESHEET, ""); + tf.setURIResolver(NOOP_URI_RESOLVER); + tf.newTransformer().transform(new StreamSource(value.getInputStream()), + new SAXResult(new DefaultHandler())); return (T) new StreamSource(value.getInputStream()); } throw unsupported(sourceClass.getName()); @@ -164,8 +217,8 @@ public Writer setCharacterStream() throws SQLException { public T setResult(Class resultClass) throws SQLException { try { if (isDebugEnabled()) { - debugCodeCall( - "getSource(" + (resultClass != null ? resultClass.getSimpleName() + ".class" : "null") + ')'); + debugCode( + "setResult(" + (resultClass != null ? resultClass.getSimpleName() + ".class" : "null") + ')'); } checkEditable(); if (resultClass == null || resultClass == DOMResult.class) { diff --git a/h2/src/main/org/h2/jdbc/JdbcSavepoint.java b/h2/src/main/org/h2/jdbc/JdbcSavepoint.java index a7c57e9b2c..f08eabdbde 100644 --- a/h2/src/main/org/h2/jdbc/JdbcSavepoint.java +++ b/h2/src/main/org/h2/jdbc/JdbcSavepoint.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -19,7 +19,7 @@ * rolled back. The tasks that where done before the savepoint are not rolled * back in this case. */ -public class JdbcSavepoint extends TraceObject implements Savepoint { +public final class JdbcSavepoint extends TraceObject implements Savepoint { private static final String SYSTEM_SAVEPOINT_PREFIX = "SYSTEM_SAVEPOINT_"; @@ -65,7 +65,7 @@ void rollback() { checkValid(); conn.prepareCommand( "ROLLBACK TO SAVEPOINT " + getName(name, savepointId), - Integer.MAX_VALUE).executeUpdate(false); + Integer.MAX_VALUE).executeUpdate(null); } private void checkValid() { diff --git a/h2/src/main/org/h2/jdbc/JdbcStatement.java b/h2/src/main/org/h2/jdbc/JdbcStatement.java index 55bb7747fb..80ce508023 100644 --- a/h2/src/main/org/h2/jdbc/JdbcStatement.java +++ b/h2/src/main/org/h2/jdbc/JdbcStatement.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -13,7 +13,7 @@ import java.util.ArrayList; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; -import org.h2.engine.SessionInterface; +import org.h2.engine.Session; import org.h2.engine.SysProperties; import org.h2.message.DbException; import org.h2.message.TraceObject; @@ -26,33 +26,48 @@ /** * Represents a statement. + *

          + * Thread safety: the statement is not thread-safe. If the same statement is + * used by multiple threads access to it must be synchronized. The single + * synchronized block must include execution of the command and all operations + * with its result. + *

          + *
          + * synchronized (stat) {
          + *     try (ResultSet rs = stat.executeQuery(queryString)) {
          + *         while (rs.next) {
          + *             // Do something
          + *         }
          + *     }
          + * }
          + * synchronized (stat) {
          + *     updateCount = stat.executeUpdate(commandString);
          + * }
          + * 
          */ public class JdbcStatement extends TraceObject implements Statement, JdbcStatementBackwardsCompat { protected JdbcConnection conn; - protected SessionInterface session; + protected Session session; protected JdbcResultSet resultSet; - protected int maxRows; + protected long maxRows; protected int fetchSize = SysProperties.SERVER_RESULT_SET_FETCH_SIZE; - protected int updateCount; + protected long updateCount; protected JdbcResultSet generatedKeys; protected final int resultSetType; protected final int resultSetConcurrency; - protected final boolean closedByResultSet; private volatile CommandInterface executingCommand; - private int lastExecutedCommandType; private ArrayList batchCommands; private boolean escapeProcessing = true; private volatile boolean cancelled; + private boolean closeOnCompletion; - JdbcStatement(JdbcConnection conn, int id, int resultSetType, - int resultSetConcurrency, boolean closeWithResultSet) { + JdbcStatement(JdbcConnection conn, int id, int resultSetType, int resultSetConcurrency) { this.conn = conn; this.session = conn.getSession(); setTrace(session.getTrace(), TraceObject.STATEMENT, id); this.resultSetType = resultSetType; this.resultSetConcurrency = resultSetConcurrency; - this.closedByResultSet = closeWithResultSet; } /** @@ -68,8 +83,7 @@ public ResultSet executeQuery(String sql) throws SQLException { try { int id = getNextId(TraceObject.RESULT_SET); if (isDebugEnabled()) { - debugCodeAssign("ResultSet", TraceObject.RESULT_SET, id, - "executeQuery(" + quote(sql) + ")"); + debugCodeAssign("ResultSet", TraceObject.RESULT_SET, id, "executeQuery(" + quote(sql) + ')'); } synchronized (session) { checkClosed(); @@ -92,8 +106,7 @@ public ResultSet executeQuery(String sql) throws SQLException { if (!lazy) { command.close(); } - resultSet = new JdbcResultSet(conn, this, command, result, id, - closedByResultSet, scrollable, updatable); + resultSet = new JdbcResultSet(conn, this, command, result, id, scrollable, updatable, false); } return resultSet; } catch (Exception e) { @@ -103,7 +116,8 @@ public ResultSet executeQuery(String sql) throws SQLException { /** * Executes a statement (insert, update, delete, create, drop) - * and returns the update count. + * and returns the update count. This method is not + * allowed for prepared statements. * If another result set exists for this statement, this will be closed * (even if this statement fails). * @@ -113,17 +127,21 @@ public ResultSet executeQuery(String sql) throws SQLException { * executing the statement. * * @param sql the SQL statement - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returned nothing, or + * {@link #SUCCESS_NO_INFO} if number of rows is too large for the + * {@code int} data type) * @throws SQLException if a database error occurred or a * select statement was executed + * @see #executeLargeUpdate(String) */ @Override - public int executeUpdate(String sql) throws SQLException { + public final int executeUpdate(String sql) throws SQLException { try { debugCodeCall("executeUpdate", sql); - return executeUpdateInternal(sql, false); + long updateCount = executeUpdateInternal(sql, null); + return updateCount <= Integer.MAX_VALUE ? (int) updateCount : SUCCESS_NO_INFO; } catch (Exception e) { throw logAndConvert(e); } @@ -131,7 +149,8 @@ public int executeUpdate(String sql) throws SQLException { /** * Executes a statement (insert, update, delete, create, drop) - * and returns the update count. + * and returns the update count. This method is not + * allowed for prepared statements. * If another result set exists for this statement, this will be closed * (even if this statement fails). * @@ -141,53 +160,52 @@ public int executeUpdate(String sql) throws SQLException { * executing the statement. * * @param sql the SQL statement - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returned nothing) * @throws SQLException if a database error occurred or a * select statement was executed */ @Override - public long executeLargeUpdate(String sql) throws SQLException { + public final long executeLargeUpdate(String sql) throws SQLException { try { debugCodeCall("executeLargeUpdate", sql); - return executeUpdateInternal(sql, false); + return executeUpdateInternal(sql, null); } catch (Exception e) { throw logAndConvert(e); } } - private int executeUpdateInternal(String sql, Object generatedKeysRequest) throws SQLException { - checkClosedForWrite(); - try { - closeOldResultSet(); - sql = JdbcConnection.translateSQL(sql, escapeProcessing); - CommandInterface command = conn.prepareCommand(sql, fetchSize); - synchronized (session) { - setExecutingStatement(command); - try { - ResultWithGeneratedKeys result = command.executeUpdate( - conn.scopeGeneratedKeys() ? false : generatedKeysRequest); - updateCount = result.getUpdateCount(); - ResultInterface gk = result.getGeneratedKeys(); - if (gk != null) { - int id = getNextId(TraceObject.RESULT_SET); - generatedKeys = new JdbcResultSet(conn, this, command, gk, id, - false, true, false); - } - } finally { - setExecutingStatement(null); + private long executeUpdateInternal(String sql, Object generatedKeysRequest) { + if (getClass() != JdbcStatement.class) { + throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); + } + checkClosed(); + closeOldResultSet(); + sql = JdbcConnection.translateSQL(sql, escapeProcessing); + CommandInterface command = conn.prepareCommand(sql, fetchSize); + synchronized (session) { + setExecutingStatement(command); + try { + ResultWithGeneratedKeys result = command.executeUpdate(generatedKeysRequest); + updateCount = result.getUpdateCount(); + ResultInterface gk = result.getGeneratedKeys(); + if (gk != null) { + int id = getNextId(TraceObject.RESULT_SET); + generatedKeys = new JdbcResultSet(conn, this, command, gk, id, true, false, false); } + } finally { + setExecutingStatement(null); } - command.close(); - return updateCount; - } finally { - afterWriting(); } + command.close(); + return updateCount; } /** - * Executes an arbitrary statement. If another result set exists for this + * Executes a statement and returns type of its result. This method is not + * allowed for prepared statements. + * If another result set exists for this * statement, this will be closed (even if this statement fails). * * If the statement is a create or drop and does not throw an exception, the @@ -196,10 +214,10 @@ private int executeUpdateInternal(String sql, Object generatedKeysRequest) throw * will be committed. * * @param sql the SQL statement to execute - * @return true if a result set is available, false if not + * @return true if result is a result set, false otherwise */ @Override - public boolean execute(String sql) throws SQLException { + public final boolean execute(String sql) throws SQLException { try { debugCodeCall("execute", sql); return executeInternal(sql, false); @@ -208,50 +226,46 @@ public boolean execute(String sql) throws SQLException { } } - private boolean executeInternal(String sql, Object generatedKeysRequest) throws SQLException { + private boolean executeInternal(String sql, Object generatedKeysRequest) { + if (getClass() != JdbcStatement.class) { + throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); + } int id = getNextId(TraceObject.RESULT_SET); - checkClosedForWrite(); - try { - closeOldResultSet(); - sql = JdbcConnection.translateSQL(sql, escapeProcessing); - CommandInterface command = conn.prepareCommand(sql, fetchSize); - boolean lazy = false; - boolean returnsResultSet; - synchronized (session) { - setExecutingStatement(command); - try { - if (command.isQuery()) { - returnsResultSet = true; - boolean scrollable = resultSetType != ResultSet.TYPE_FORWARD_ONLY; - boolean updatable = resultSetConcurrency == ResultSet.CONCUR_UPDATABLE; - ResultInterface result = command.executeQuery(maxRows, scrollable); - lazy = result.isLazy(); - resultSet = new JdbcResultSet(conn, this, command, result, id, - closedByResultSet, scrollable, updatable); - } else { - returnsResultSet = false; - ResultWithGeneratedKeys result = command.executeUpdate( - conn.scopeGeneratedKeys() ? false : generatedKeysRequest); - updateCount = result.getUpdateCount(); - ResultInterface gk = result.getGeneratedKeys(); - if (gk != null) { - generatedKeys = new JdbcResultSet(conn, this, command, gk, id, - false, true, false); - } - } - } finally { - if (!lazy) { - setExecutingStatement(null); + checkClosed(); + closeOldResultSet(); + sql = JdbcConnection.translateSQL(sql, escapeProcessing); + CommandInterface command = conn.prepareCommand(sql, fetchSize); + boolean lazy = false; + boolean returnsResultSet; + synchronized (session) { + setExecutingStatement(command); + try { + if (command.isQuery()) { + returnsResultSet = true; + boolean scrollable = resultSetType != ResultSet.TYPE_FORWARD_ONLY; + boolean updatable = resultSetConcurrency == ResultSet.CONCUR_UPDATABLE; + ResultInterface result = command.executeQuery(maxRows, scrollable); + lazy = result.isLazy(); + resultSet = new JdbcResultSet(conn, this, command, result, id, scrollable, updatable, false); + } else { + returnsResultSet = false; + ResultWithGeneratedKeys result = command.executeUpdate(generatedKeysRequest); + updateCount = result.getUpdateCount(); + ResultInterface gk = result.getGeneratedKeys(); + if (gk != null) { + generatedKeys = new JdbcResultSet(conn, this, command, gk, id, true, false, false); } } + } finally { + if (!lazy) { + setExecutingStatement(null); + } } - if (!lazy) { - command.close(); - } - return returnsResultSet; - } finally { - afterWriting(); } + if (!lazy) { + command.close(); + } + return returnsResultSet; } /** @@ -278,17 +292,20 @@ public ResultSet getResultSet() throws SQLException { /** * Returns the last update count of this statement. * - * @return the update count (number of row affected by an insert, update or - * delete, or 0 if no rows or the statement was a create, drop, - * commit or rollback; -1 if the statement was a select). + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returned nothing, or -1 if + * statement was a query, or {@link #SUCCESS_NO_INFO} if number of + * rows is too large for the {@code int} data type) * @throws SQLException if this object is closed or invalid + * @see #getLargeUpdateCount() */ @Override - public int getUpdateCount() throws SQLException { + public final int getUpdateCount() throws SQLException { try { debugCodeCall("getUpdateCount"); checkClosed(); - return updateCount; + return updateCount <= Integer.MAX_VALUE ? (int) updateCount : SUCCESS_NO_INFO; } catch (Exception e) { throw logAndConvert(e); } @@ -297,13 +314,14 @@ public int getUpdateCount() throws SQLException { /** * Returns the last update count of this statement. * - * @return the update count (number of row affected by an insert, update or - * delete, or 0 if no rows or the statement was a create, drop, - * commit or rollback; -1 if the statement was a select). + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returned nothing, or -1 if + * statement was a query) * @throws SQLException if this object is closed or invalid */ @Override - public long getLargeUpdateCount() throws SQLException { + public final long getLargeUpdateCount() throws SQLException { try { debugCodeCall("getLargeUpdateCount"); checkClosed(); @@ -322,17 +340,21 @@ public long getLargeUpdateCount() throws SQLException { public void close() throws SQLException { try { debugCodeCall("close"); - synchronized (session) { - closeOldResultSet(); - if (conn != null) { - conn = null; - } - } + closeInternal(); } catch (Exception e) { throw logAndConvert(e); } } + private void closeInternal() { + synchronized (session) { + closeOldResultSet(); + if (conn != null) { + conn = null; + } + } + } + /** * Returns the connection that created this object. * @@ -436,7 +458,7 @@ public int getMaxRows() throws SQLException { try { debugCodeCall("getMaxRows"); checkClosed(); - return maxRows; + return maxRows <= Integer.MAX_VALUE ? (int) maxRows : 0; } catch (Exception e) { throw logAndConvert(e); } @@ -493,7 +515,7 @@ public void setLargeMaxRows(long maxRows) throws SQLException { if (maxRows < 0) { throw DbException.getInvalidValueException("maxRows", maxRows); } - this.maxRows = maxRows <= Integer.MAX_VALUE ? (int) maxRows : 0; + this.maxRows = maxRows; } catch (Exception e) { throw logAndConvert(e); } @@ -622,7 +644,7 @@ public void setMaxFieldSize(int max) throws SQLException { public void setEscapeProcessing(boolean enable) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setEscapeProcessing("+enable+");"); + debugCode("setEscapeProcessing(" + enable + ')'); } checkClosed(); escapeProcessing = enable; @@ -755,46 +777,29 @@ public void clearBatch() throws SQLException { * If one of the batched statements fails, this database will continue. * * @return the array of update counts + * @see #executeLargeBatch() */ @Override public int[] executeBatch() throws SQLException { try { debugCodeCall("executeBatch"); - checkClosedForWrite(); - try { - if (batchCommands == null) { - // TODO batch: check what other database do if no commands - // are set - batchCommands = Utils.newSmallArrayList(); - } - int size = batchCommands.size(); - int[] result = new int[size]; - boolean error = false; - SQLException next = null; - for (int i = 0; i < size; i++) { - String sql = batchCommands.get(i); - try { - result[i] = executeUpdateInternal(sql, false); - } catch (Exception re) { - SQLException e = logAndConvert(re); - if (next == null) { - next = e; - } else { - e.setNextException(next); - next = e; - } - result[i] = Statement.EXECUTE_FAILED; - error = true; - } - } - batchCommands = null; - if (error) { - throw new JdbcBatchUpdateException(next, result); - } - return result; - } finally { - afterWriting(); + checkClosed(); + if (batchCommands == null) { + batchCommands = new ArrayList<>(); + } + int size = batchCommands.size(); + int[] result = new int[size]; + SQLException exception = new SQLException(); + for (int i = 0; i < size; i++) { + long updateCount = executeBatchElement(batchCommands.get(i), exception); + result[i] = updateCount <= Integer.MAX_VALUE ? (int) updateCount : SUCCESS_NO_INFO; } + batchCommands = null; + exception = exception.getNextException(); + if (exception != null) { + throw new JdbcBatchUpdateException(exception, result); + } + return result; } catch (Exception e) { throw logAndConvert(e); } @@ -808,56 +813,69 @@ public int[] executeBatch() throws SQLException { */ @Override public long[] executeLargeBatch() throws SQLException { - int[] intResult = executeBatch(); - int count = intResult.length; - long[] longResult = new long[count]; - for (int i = 0; i < count; i++) { - longResult[i] = intResult[i]; + try { + debugCodeCall("executeLargeBatch"); + checkClosed(); + if (batchCommands == null) { + batchCommands = new ArrayList<>(); + } + int size = batchCommands.size(); + long[] result = new long[size]; + SQLException exception = new SQLException(); + for (int i = 0; i < size; i++) { + result[i] = executeBatchElement(batchCommands.get(i), exception); + } + batchCommands = null; + exception = exception.getNextException(); + if (exception != null) { + throw new JdbcBatchUpdateException(exception, result); + } + return result; + } catch (Exception e) { + throw logAndConvert(e); } - return longResult; + } + + private long executeBatchElement(String sql, SQLException exception) { + long updateCount; + try { + updateCount = executeUpdateInternal(sql, null); + } catch (Exception e) { + exception.setNextException(logAndConvert(e)); + updateCount = Statement.EXECUTE_FAILED; + } + return updateCount; } /** - * Return a result set with generated keys from the latest executed command or - * an empty result set if keys were not generated or were not requested with - * {@link Statement#RETURN_GENERATED_KEYS}, column indexes, or column names. + * Return a result set with generated keys from the latest executed command + * or an empty result set if keys were not generated or were not requested + * with {@link Statement#RETURN_GENERATED_KEYS}, column indexes, or column + * names. *

          - * Generated keys are only returned from inserted rows from {@code INSERT}, - * {@code MERGE INTO}, and {@code MERGE INTO ... USING} commands. Generated keys - * are not returned if exact values of generated columns were specified - * explicitly in SQL command. All columns with inserted generated values are - * included in the result if command was executed with - * {@link Statement#RETURN_GENERATED_KEYS} parameter. + * Generated keys are only returned from from {@code INSERT}, + * {@code UPDATE}, {@code MERGE INTO}, and {@code MERGE INTO ... USING} + * commands. *

          *

          - * If SQL command inserts multiple rows with generated keys each such inserted - * row is returned. Batch methods are also supported. When multiple rows are - * returned each row contains only generated values for this row. It's possible - * to insert several rows with generated values in different columns with some - * specific commands, in this special case the returned result set contains all - * used columns, but each row will contain only generated values, columns that - * were not generated for this row will contain {@code null} values. + * If SQL command inserts or updates multiple rows with generated keys each + * such inserted or updated row is returned. Batch methods are also + * supported. *

          *

          - * H2 treats inserted value as generated in the following cases: + * When {@link Statement#RETURN_GENERATED_KEYS} is used H2 chooses columns + * to return automatically. The following columns are chosen: *

          *
            *
          • Columns with sequences including {@code IDENTITY} columns and columns - * with {@code AUTO_INCREMENT} if value was generated automatically (not - * specified in command).
          • - *
          • Columns with other default values that are not evaluated into constant - * expressions (like {@code DEFAULT RANDOM_UUID()}) also only if default value - * was inserted.
          • - *
          • Columns that were set by triggers.
          • - *
          • Columns with values specified in command with invocation of some sequence - * (like {@code INSERT INTO ... VALUES (NEXT VALUE FOR ...)}).
          • + * with {@code AUTO_INCREMENT}. + *
          • Columns with other default values that are not evaluated into + * constant expressions (like {@code DEFAULT RANDOM_UUID()}).
          • + *
          • Columns that are included into the PRIMARY KEY constraint.
          • *
          *

          * Exact required columns for the returning result set may be specified on - * execution of command with names or indexes of columns to limit output or - * reorder columns in result set. Specifying of some column has no effect on - * treatment of inserted values as generated or not. If some value is not - * determined to be generated it will not be returned even on explicit request. + * execution of command with names or indexes of columns. *

          * * @return the possibly empty result set with generated keys @@ -872,12 +890,7 @@ public ResultSet getGeneratedKeys() throws SQLException { } checkClosed(); if (generatedKeys == null) { - if (!conn.scopeGeneratedKeys() && session.isSupportsGeneratedKeys()) { - generatedKeys = new JdbcResultSet(conn, this, null, new SimpleResult(), id, false, true, false); - } else { - // Compatibility mode or an old server, so use SCOPE_IDENTITY() - generatedKeys = conn.getGeneratedKeys(this, id); - } + generatedKeys = new JdbcResultSet(conn, this, null, new SimpleResult(), id, true, false, false); } return generatedKeys; } catch (Exception e) { @@ -938,51 +951,57 @@ public boolean getMoreResults(int current) throws SQLException { } /** - * Executes a statement and returns the update count. + * Executes a statement and returns the update count. This method is not + * allowed for prepared statements. * * @param sql the SQL statement * @param autoGeneratedKeys * {@link Statement#RETURN_GENERATED_KEYS} if generated keys should * be available for retrieval, {@link Statement#NO_GENERATED_KEYS} if * generated keys should not be available - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returned nothing, or + * {@link #SUCCESS_NO_INFO} if number of rows is too large for the + * {@code int} data type) * @throws SQLException if a database error occurred or a * select statement was executed + * @see #executeLargeUpdate(String, int) */ @Override - public int executeUpdate(String sql, int autoGeneratedKeys) + public final int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { try { if (isDebugEnabled()) { - debugCode("executeUpdate("+quote(sql)+", "+autoGeneratedKeys+");"); + debugCode("executeUpdate(" + quote(sql) + ", " + autoGeneratedKeys + ')'); } - return executeUpdateInternal(sql, autoGeneratedKeys == RETURN_GENERATED_KEYS); + long updateCount = executeUpdateInternal(sql, autoGeneratedKeys == RETURN_GENERATED_KEYS); + return updateCount <= Integer.MAX_VALUE ? (int) updateCount : SUCCESS_NO_INFO; } catch (Exception e) { throw logAndConvert(e); } } /** - * Executes a statement and returns the update count. + * Executes a statement and returns the update count. This method is not + * allowed for prepared statements. * * @param sql the SQL statement * @param autoGeneratedKeys * {@link Statement#RETURN_GENERATED_KEYS} if generated keys should * be available for retrieval, {@link Statement#NO_GENERATED_KEYS} if * generated keys should not be available - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returned nothing) * @throws SQLException if a database error occurred or a * select statement was executed */ @Override - public long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + public final long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException { try { if (isDebugEnabled()) { - debugCode("executeLargeUpdate("+quote(sql)+", "+autoGeneratedKeys+");"); + debugCode("executeLargeUpdate(" + quote(sql) + ", " + autoGeneratedKeys + ')'); } return executeUpdateInternal(sql, autoGeneratedKeys == RETURN_GENERATED_KEYS); } catch (Exception e) { @@ -991,48 +1010,54 @@ public long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLExce } /** - * Executes a statement and returns the update count. + * Executes a statement and returns the update count. This method is not + * allowed for prepared statements. * * @param sql the SQL statement * @param columnIndexes * an array of column indexes indicating the columns with generated * keys that should be returned from the inserted row - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returned nothing, or + * {@link #SUCCESS_NO_INFO} if number of rows is too large for the + * {@code int} data type) * @throws SQLException if a database error occurred or a * select statement was executed + * @see #executeLargeUpdate(String, int[]) */ @Override - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + public final int executeUpdate(String sql, int[] columnIndexes) throws SQLException { try { if (isDebugEnabled()) { - debugCode("executeUpdate("+quote(sql)+", "+quoteIntArray(columnIndexes)+");"); + debugCode("executeUpdate(" + quote(sql) + ", " + quoteIntArray(columnIndexes) + ')'); } - return executeUpdateInternal(sql, columnIndexes); + long updateCount = executeUpdateInternal(sql, columnIndexes); + return updateCount <= Integer.MAX_VALUE ? (int) updateCount : SUCCESS_NO_INFO; } catch (Exception e) { throw logAndConvert(e); } } /** - * Executes a statement and returns the update count. + * Executes a statement and returns the update count. This method is not + * allowed for prepared statements. * * @param sql the SQL statement * @param columnIndexes * an array of column indexes indicating the columns with generated * keys that should be returned from the inserted row - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returned nothing) * @throws SQLException if a database error occurred or a * select statement was executed */ @Override - public long executeLargeUpdate(String sql, int columnIndexes[]) throws SQLException { + public final long executeLargeUpdate(String sql, int columnIndexes[]) throws SQLException { try { if (isDebugEnabled()) { - debugCode("executeLargeUpdate("+quote(sql)+", "+quoteIntArray(columnIndexes)+");"); + debugCode("executeLargeUpdate(" + quote(sql) + ", " + quoteIntArray(columnIndexes) + ')'); } return executeUpdateInternal(sql, columnIndexes); } catch (Exception e) { @@ -1041,32 +1066,38 @@ public long executeLargeUpdate(String sql, int columnIndexes[]) throws SQLExcept } /** - * Executes a statement and returns the update count. + * Executes a statement and returns the update count. This method is not + * allowed for prepared statements. * * @param sql the SQL statement * @param columnNames * an array of column names indicating the columns with generated * keys that should be returned from the inserted row - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returned nothing, or + * {@link #SUCCESS_NO_INFO} if number of rows is too large for the + * {@code int} data type) * @throws SQLException if a database error occurred or a * select statement was executed + * @see #executeLargeUpdate(String, String[]) */ @Override - public int executeUpdate(String sql, String[] columnNames) throws SQLException { + public final int executeUpdate(String sql, String[] columnNames) throws SQLException { try { if (isDebugEnabled()) { - debugCode("executeUpdate("+quote(sql)+", "+quoteArray(columnNames)+");"); + debugCode("executeUpdate(" + quote(sql) + ", " + quoteArray(columnNames) + ')'); } - return executeUpdateInternal(sql, columnNames); + long updateCount = executeUpdateInternal(sql, columnNames); + return updateCount <= Integer.MAX_VALUE ? (int) updateCount : SUCCESS_NO_INFO; } catch (Exception e) { throw logAndConvert(e); } } /** - * Executes a statement and returns the update count. + * Executes a statement and returns the update count. This method is not + * allowed for prepared statements. * * @param sql the SQL statement * @param columnNames @@ -1079,10 +1110,10 @@ public int executeUpdate(String sql, String[] columnNames) throws SQLException { * select statement was executed */ @Override - public long executeLargeUpdate(String sql, String columnNames[]) throws SQLException { + public final long executeLargeUpdate(String sql, String columnNames[]) throws SQLException { try { if (isDebugEnabled()) { - debugCode("executeLargeUpdate("+quote(sql)+", "+quoteArray(columnNames)+");"); + debugCode("executeLargeUpdate(" + quote(sql) + ", " + quoteArray(columnNames) + ')'); } return executeUpdateInternal(sql, columnNames); } catch (Exception e) { @@ -1091,24 +1122,23 @@ public long executeLargeUpdate(String sql, String columnNames[]) throws SQLExcep } /** - * Executes a statement and returns the update count. + * Executes a statement and returns type of its result. This method is not + * allowed for prepared statements. * * @param sql the SQL statement * @param autoGeneratedKeys * {@link Statement#RETURN_GENERATED_KEYS} if generated keys should * be available for retrieval, {@link Statement#NO_GENERATED_KEYS} if * generated keys should not be available - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) + * @return true if result is a result set, false otherwise * @throws SQLException if a database error occurred or a * select statement was executed */ @Override - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + public final boolean execute(String sql, int autoGeneratedKeys) throws SQLException { try { if (isDebugEnabled()) { - debugCode("execute("+quote(sql)+", "+autoGeneratedKeys+");"); + debugCode("execute(" + quote(sql) + ", " + autoGeneratedKeys + ')'); } return executeInternal(sql, autoGeneratedKeys == RETURN_GENERATED_KEYS); } catch (Exception e) { @@ -1117,23 +1147,22 @@ public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { } /** - * Executes a statement and returns the update count. + * Executes a statement and returns type of its result. This method is not + * allowed for prepared statements. * * @param sql the SQL statement * @param columnIndexes * an array of column indexes indicating the columns with generated * keys that should be returned from the inserted row - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) + * @return true if result is a result set, false otherwise * @throws SQLException if a database error occurred or a * select statement was executed */ @Override - public boolean execute(String sql, int[] columnIndexes) throws SQLException { + public final boolean execute(String sql, int[] columnIndexes) throws SQLException { try { if (isDebugEnabled()) { - debugCode("execute("+quote(sql)+", "+quoteIntArray(columnIndexes)+");"); + debugCode("execute(" + quote(sql) + ", " + quoteIntArray(columnIndexes) + ')'); } return executeInternal(sql, columnIndexes); } catch (Exception e) { @@ -1142,23 +1171,22 @@ public boolean execute(String sql, int[] columnIndexes) throws SQLException { } /** - * Executes a statement and returns the update count. + * Executes a statement and returns type of its result. This method is not + * allowed for prepared statements. * * @param sql the SQL statement * @param columnNames * an array of column names indicating the columns with generated * keys that should be returned from the inserted row - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) + * @return true if result is a result set, false otherwise * @throws SQLException if a database error occurred or a * select statement was executed */ @Override - public boolean execute(String sql, String[] columnNames) throws SQLException { + public final boolean execute(String sql, String[] columnNames) throws SQLException { try { if (isDebugEnabled()) { - debugCode("execute("+quote(sql)+", "+quoteArray(columnNames)+");"); + debugCode("execute(" + quote(sql) + ", " + quoteArray(columnNames) + ')'); } return executeInternal(sql, columnNames); } catch (Exception e) { @@ -1183,89 +1211,79 @@ public int getResultSetHoldability() throws SQLException { } /** - * [Not supported] + * Specifies that this statement will be closed when its dependent result + * set is closed. + * + * @throws SQLException + * if this statement is closed */ @Override - public void closeOnCompletion() { - // not supported + public void closeOnCompletion() throws SQLException { + try { + debugCodeCall("closeOnCompletion"); + checkClosed(); + closeOnCompletion = true; + } catch (Exception e) { + throw logAndConvert(e); + } } /** - * [Not supported] + * Returns whether this statement will be closed when its dependent result + * set is closed. + * + * @return {@code true} if this statement will be closed when its dependent + * result set is closed + * @throws SQLException + * if this statement is closed */ @Override - public boolean isCloseOnCompletion() { - return true; + public boolean isCloseOnCompletion() throws SQLException { + try { + debugCodeCall("isCloseOnCompletion"); + checkClosed(); + return closeOnCompletion; + } catch (Exception e) { + throw logAndConvert(e); + } } - // ============================================================= - - /** - * Check if this connection is closed. - * The next operation is a read request. - * - * @return true if the session was re-connected - * @throws DbException if the connection or session is closed - */ - boolean checkClosed() { - return checkClosed(false); + void closeIfCloseOnCompletion() { + if (closeOnCompletion) { + try { + closeInternal(); + } catch (Exception e) { + // Don't re-throw + logAndConvert(e); + } + } } + // ============================================================= + /** * Check if this connection is closed. - * The next operation may be a write request. * - * @return true if the session was re-connected * @throws DbException if the connection or session is closed */ - boolean checkClosedForWrite() { - return checkClosed(true); - } - - /** - * INTERNAL. - * Check if the statement is closed. - * - * @param write if the next operation is possibly writing - * @return true if a reconnect was required - * @throws DbException if it is closed - */ - protected boolean checkClosed(boolean write) { + void checkClosed() { if (conn == null) { throw DbException.get(ErrorCode.OBJECT_CLOSED); } - conn.checkClosed(write); - SessionInterface s = conn.getSession(); - if (s != session) { - session = s; - trace = session.getTrace(); - return true; - } - return false; - } - - /** - * Called after each write operation. - */ - void afterWriting() { - if (conn != null) { - conn.afterWriting(); - } + conn.checkClosed(); } /** * INTERNAL. * Close and old result set if there is still one open. */ - protected void closeOldResultSet() throws SQLException { + protected void closeOldResultSet() { try { - if (!closedByResultSet) { - if (resultSet != null) { - resultSet.closeInternal(); - } - if (generatedKeys != null) { - generatedKeys.closeInternal(); - } + if (resultSet != null) { + resultSet.closeInternal(true); + } + if (generatedKeys != null) { + generatedKeys.closeInternal(true); } } finally { cancelled = false; @@ -1281,12 +1299,11 @@ protected void closeOldResultSet() throws SQLException { * * @param c the command */ - protected void setExecutingStatement(CommandInterface c) { + void setExecutingStatement(CommandInterface c) { if (c == null) { conn.setExecutingStatement(null); } else { conn.setExecutingStatement(this); - lastExecutedCommandType = c.getCommandType(); } executingCommand = c; } @@ -1305,14 +1322,6 @@ void onLazyResultSetClose(CommandInterface command, boolean closeCommand) { } } - /** - * INTERNAL. - * Get the command type of the last executed command. - */ - public int getLastExecutedCommandType() { - return lastExecutedCommandType; - } - /** * Returns whether this statement is closed. * @@ -1377,7 +1386,7 @@ public boolean isPoolable() { @Override public void setPoolable(boolean poolable) { if (isDebugEnabled()) { - debugCode("setPoolable("+poolable+");"); + debugCode("setPoolable(" + poolable + ')'); } } @@ -1388,6 +1397,8 @@ public void setPoolable(boolean poolable) { * if {@code true} identifier will be quoted unconditionally * @return specified identifier quoted if required, explicitly requested, or * if it was already quoted + * @throws NullPointerException + * if identifier is {@code null} * @throws SQLException * if identifier is not a valid identifier */ @@ -1396,32 +1407,56 @@ public String enquoteIdentifier(String identifier, boolean alwaysQuote) throws S if (isSimpleIdentifier(identifier)) { return alwaysQuote ? '"' + identifier + '"': identifier; } - int length = identifier.length(); - if (length > 0 && identifier.charAt(0) == '"') { - boolean quoted = true; - for (int i = 1; i < length; i++) { - if (identifier.charAt(i) == '"') { - quoted = !quoted; - } else if (!quoted) { - throw new SQLException(); + try { + int length = identifier.length(); + if (length > 0) { + if (identifier.charAt(0) == '"') { + checkQuotes(identifier, 1, length); + return identifier; + } else if (identifier.startsWith("U&\"") || identifier.startsWith("u&\"")) { + // Check validity of double quotes + checkQuotes(identifier, 3, length); + // Check validity of escape sequences + StringUtils.decodeUnicodeStringSQL(identifier, '\\'); + return identifier; } } - if (quoted) { - throw new SQLException(); + return StringUtils.quoteIdentifier(identifier); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + private static void checkQuotes(String identifier, int offset, int length) { + boolean quoted = true; + for (int i = offset; i < length; i++) { + if (identifier.charAt(i) == '"') { + quoted = !quoted; + } else if (!quoted) { + throw DbException.get(ErrorCode.INVALID_NAME_1, identifier); } - return identifier; } - return StringUtils.quoteIdentifier(identifier); + if (quoted) { + throw DbException.get(ErrorCode.INVALID_NAME_1, identifier); + } } /** * @param identifier * identifier to check * @return is specified identifier may be used without quotes + * @throws NullPointerException + * if identifier is {@code null} */ @Override public boolean isSimpleIdentifier(String identifier) throws SQLException { - JdbcConnection.Settings settings = conn.getSettings(); + Session.StaticSettings settings; + try { + checkClosed(); + settings = conn.getStaticSettings(); + } catch (Exception e) { + throw logAndConvert(e); + } return ParserUtil.isSimpleIdentifier(identifier, settings.databaseToUpper, settings.databaseToLower); } diff --git a/h2/src/main/org/h2/jdbc/JdbcStatementBackwardsCompat.java b/h2/src/main/org/h2/jdbc/JdbcStatementBackwardsCompat.java index 021bd7fff4..5406337da0 100644 --- a/h2/src/main/org/h2/jdbc/JdbcStatementBackwardsCompat.java +++ b/h2/src/main/org/h2/jdbc/JdbcStatementBackwardsCompat.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -15,107 +15,6 @@ public interface JdbcStatementBackwardsCompat { // compatibility interface - // JDBC 4.2 - - /** - * Returns the last update count of this statement. - * - * @return the update count (number of row affected by an insert, update or - * delete, or 0 if no rows or the statement was a create, drop, - * commit or rollback; -1 if the statement was a select). - * @throws SQLException if this object is closed or invalid - */ - long getLargeUpdateCount() throws SQLException; - - /** - * Gets the maximum number of rows for a ResultSet. - * - * @param max the number of rows where 0 means no limit - * @throws SQLException if this object is closed - */ - void setLargeMaxRows(long max) throws SQLException; - - /** - * Gets the maximum number of rows for a ResultSet. - * - * @return the number of rows where 0 means no limit - * @throws SQLException if this object is closed - */ - long getLargeMaxRows() throws SQLException; - - /** - * Executes the batch. - * If one of the batched statements fails, this database will continue. - * - * @return the array of update counts - */ - long[] executeLargeBatch() throws SQLException; - - /** - * Executes a statement (insert, update, delete, create, drop) - * and returns the update count. - * If another result set exists for this statement, this will be closed - * (even if this statement fails). - * - * If auto commit is on, this statement will be committed. - * If the statement is a DDL statement (create, drop, alter) and does not - * throw an exception, the current transaction (if any) is committed after - * executing the statement. - * - * @param sql the SQL statement - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) - * @throws SQLException if a database error occurred or a - * select statement was executed - */ - long executeLargeUpdate(String sql) throws SQLException; - - /** - * Executes a statement and returns the update count. - * This method just calls executeUpdate(String sql) internally. - * The method getGeneratedKeys supports at most one columns and row. - * - * @param sql the SQL statement - * @param autoGeneratedKeys ignored - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) - * @throws SQLException if a database error occurred or a - * select statement was executed - */ - long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException; - - /** - * Executes a statement and returns the update count. - * This method just calls executeUpdate(String sql) internally. - * The method getGeneratedKeys supports at most one columns and row. - * - * @param sql the SQL statement - * @param columnIndexes ignored - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) - * @throws SQLException if a database error occurred or a - * select statement was executed - */ - long executeLargeUpdate(String sql, int columnIndexes[]) throws SQLException; - - /** - * Executes a statement and returns the update count. - * This method just calls executeUpdate(String sql) internally. - * The method getGeneratedKeys supports at most one columns and row. - * - * @param sql the SQL statement - * @param columnNames ignored - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) - * @throws SQLException if a database error occurred or a - * select statement was executed - */ - long executeLargeUpdate(String sql, String columnNames[]) throws SQLException; - // JDBC 4.3 (incomplete) /** @@ -126,6 +25,7 @@ public interface JdbcStatementBackwardsCompat { * @param alwaysQuote * if {@code true} identifier will be quoted unconditionally * @return specified identifier quoted if required or explicitly requested + * @throws SQLException on failure */ String enquoteIdentifier(String identifier, boolean alwaysQuote) throws SQLException; @@ -135,6 +35,7 @@ public interface JdbcStatementBackwardsCompat { * @param identifier * identifier to check * @return is specified identifier may be used without quotes + * @throws SQLException on failure */ boolean isSimpleIdentifier(String identifier) throws SQLException; } diff --git a/h2/src/main/org/h2/jdbc/meta/DatabaseMeta.java b/h2/src/main/org/h2/jdbc/meta/DatabaseMeta.java new file mode 100644 index 0000000000..0b7da247eb --- /dev/null +++ b/h2/src/main/org/h2/jdbc/meta/DatabaseMeta.java @@ -0,0 +1,395 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc.meta; + +import org.h2.mode.DefaultNullOrdering; +import org.h2.result.ResultInterface; + +/** + * Database meta information. + */ +public abstract class DatabaseMeta { + + /** + * INTERNAL + * + * @see java.sql.DatabaseMetaData#nullsAreSortedHigh() + * @see java.sql.DatabaseMetaData#nullsAreSortedLow() + * @see java.sql.DatabaseMetaData#nullsAreSortedAtStart() + * @see java.sql.DatabaseMetaData#nullsAreSortedAtEnd() + * @return DefaultNullOrdering + */ + public abstract DefaultNullOrdering defaultNullOrdering(); + + /** + * INTERNAL + * + * @see java.sql.DatabaseMetaData#getDatabaseProductVersion() + * @return product version as String + */ + public abstract String getDatabaseProductVersion(); + + /** + * INTERNAL + * + * @see java.sql.DatabaseMetaData#getSQLKeywords() + * @return list of supported SQL keywords + */ + public abstract String getSQLKeywords(); + + /** + * INTERNAL + * + * @see java.sql.DatabaseMetaData#getNumericFunctions() + * @return list of supported numeric functions + */ + public abstract String getNumericFunctions(); + + /** + * INTERNAL + * + * @see java.sql.DatabaseMetaData#getStringFunctions() + * @return list of supported string functions + */ + public abstract String getStringFunctions(); + + /** + * INTERNAL + * + * @see java.sql.DatabaseMetaData#getSystemFunctions() + * @return list of supported system functions + */ + public abstract String getSystemFunctions(); + + /** + * INTERNAL + * + * @see java.sql.DatabaseMetaData#getTimeDateFunctions() + * @return list of supported time/date functions + */ + public abstract String getTimeDateFunctions(); + + /** + * INTERNAL + * + * @see java.sql.DatabaseMetaData#getSearchStringEscape() + * @return search string escape sequence + */ + public abstract String getSearchStringEscape(); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param procedureNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getProcedures(String, String, String) + */ + public abstract ResultInterface getProcedures(String catalog, String schemaPattern, String procedureNamePattern); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param procedureNamePattern "LIKE" style pattern to filter result + * @param columnNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getProcedureColumns(String, String, + * String, String) + */ + public abstract ResultInterface getProcedureColumns(String catalog, String schemaPattern, + String procedureNamePattern, String columnNamePattern); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param tableNamePattern "LIKE" style pattern to filter result + * @param types String[] + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getTables(String, String, String, + * String[]) + */ + public abstract ResultInterface getTables(String catalog, String schemaPattern, String tableNamePattern, + String[] types); + + /** + * INTERNAL + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getSchemas() + */ + public abstract ResultInterface getSchemas(); + + /** + * INTERNAL + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getCatalogs() + */ + public abstract ResultInterface getCatalogs(); + + /** + * INTERNAL + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getTableTypes() + */ + public abstract ResultInterface getTableTypes(); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param tableNamePattern "LIKE" style pattern to filter result + * @param columnNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getColumns(String, String, String, String) + */ + public abstract ResultInterface getColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern); + + /** + * INTERNAL + * @param catalog to inspect + * @param schema to inspect + * @param table to inspect + * @param columnNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getColumnPrivileges(String, String, + * String, String) + */ + public abstract ResultInterface getColumnPrivileges(String catalog, String schema, String table, + String columnNamePattern); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param tableNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getTablePrivileges(String, String, String) + */ + public abstract ResultInterface getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern); + + /** + * INTERNAL + * @param catalogPattern "LIKE" style pattern to filter result + * @param schemaPattern "LIKE" style pattern to filter result + * @param tableName table of interest + * @param scope of interest + * @param nullable include nullable columns + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getBestRowIdentifier(String, String, + * String, int, boolean) + */ + public abstract ResultInterface getBestRowIdentifier(String catalogPattern, String schemaPattern, String tableName, + int scope, boolean nullable); + + /** + * INTERNAL + * @param catalog to inspect + * @param schema to inspect + * @param table to inspect + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getVersionColumns(String, String, String) + */ + public abstract ResultInterface getVersionColumns(String catalog, String schema, String table); + + /** + * INTERNAL + * @param catalog to inspect + * @param schema to inspect + * @param table to inspect + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getPrimaryKeys(String, String, String) + */ + public abstract ResultInterface getPrimaryKeys(String catalog, String schema, String table); + + /** + * INTERNAL + * @param catalog to inspect + * @param schema to inspect + * @param table to inspect + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getImportedKeys(String, String, String) + */ + public abstract ResultInterface getImportedKeys(String catalog, String schema, String table); + + /** + * INTERNAL + * @param catalog to inspect + * @param schema to inspect + * @param table to inspect + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getExportedKeys(String, String, String) + */ + public abstract ResultInterface getExportedKeys(String catalog, String schema, String table); + + /** + * INTERNAL + * @param primaryCatalog to inspect + * @param primarySchema to inspect + * @param primaryTable to inspect + * @param foreignCatalog to inspect + * @param foreignSchema to inspect + * @param foreignTable to inspect + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getCrossReference(String, String, String, + * String, String, String) + */ + public abstract ResultInterface getCrossReference(String primaryCatalog, String primarySchema, String primaryTable, + String foreignCatalog, String foreignSchema, String foreignTable); + + /** + * INTERNAL + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getTypeInfo() + */ + public abstract ResultInterface getTypeInfo(); + + /** + * INTERNAL + * @param catalog to inspect + * @param schema to inspect + * @param table to inspect + * @param unique only + * @param approximate allowed + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getIndexInfo(String, String, String, + * boolean, boolean) + */ + public abstract ResultInterface getIndexInfo(String catalog, String schema, String table, boolean unique, + boolean approximate); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param typeNamePattern "LIKE" style pattern to filter result + * @param types int[] + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getUDTs(String, String, String, int[]) + */ + public abstract ResultInterface getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param typeNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getSuperTypes(String, String, String) + */ + public abstract ResultInterface getSuperTypes(String catalog, String schemaPattern, String typeNamePattern); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param tableNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getSuperTables(String, String, String) + */ + public abstract ResultInterface getSuperTables(String catalog, String schemaPattern, String tableNamePattern); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param typeNamePattern "LIKE" style pattern to filter result + * @param attributeNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getAttributes(String, String, String, + * String) + */ + public abstract ResultInterface getAttributes(String catalog, String schemaPattern, String typeNamePattern, + String attributeNamePattern); + + /** + * INTERNAL + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getDatabaseMajorVersion() + */ + public abstract int getDatabaseMajorVersion(); + + /** + * INTERNAL + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getDatabaseMinorVersion() + */ + public abstract int getDatabaseMinorVersion(); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getSchemas(String, String) + */ + public abstract ResultInterface getSchemas(String catalog, String schemaPattern); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param functionNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getFunctions(String, String, String) + */ + public abstract ResultInterface getFunctions(String catalog, String schemaPattern, String functionNamePattern); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param functionNamePattern "LIKE" style pattern to filter result + * @param columnNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getFunctionColumns(String, String, String, + * String) + */ + public abstract ResultInterface getFunctionColumns(String catalog, String schemaPattern, // + String functionNamePattern, String columnNamePattern); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param tableNamePattern "LIKE" style pattern to filter result + * @param columnNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getPseudoColumns(String, String, String, + * String) + */ + public abstract ResultInterface getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern); + +} diff --git a/h2/src/main/org/h2/jdbc/meta/DatabaseMetaLegacy.java b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaLegacy.java new file mode 100644 index 0000000000..c33ff10c3c --- /dev/null +++ b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaLegacy.java @@ -0,0 +1,691 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc.meta; + +import java.sql.DatabaseMetaData; +import java.util.ArrayList; +import java.util.Arrays; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.engine.Constants; +import org.h2.engine.Session; +import org.h2.expression.ParameterInterface; +import org.h2.message.DbException; +import org.h2.mode.DefaultNullOrdering; +import org.h2.result.ResultInterface; +import org.h2.util.StringUtils; +import org.h2.value.Value; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * Legacy implementation of database meta information. + */ +public final class DatabaseMetaLegacy extends DatabaseMetaLocalBase { + + private static final Value PERCENT = ValueVarchar.get("%"); + + private static final Value BACKSLASH = ValueVarchar.get("\\"); + + private static final Value YES = ValueVarchar.get("YES"); + + private static final Value NO = ValueVarchar.get("NO"); + + private static final Value SCHEMA_MAIN = ValueVarchar.get(Constants.SCHEMA_MAIN); + + private final Session session; + + public DatabaseMetaLegacy(Session session) { + this.session = session; + } + + @Override + public final DefaultNullOrdering defaultNullOrdering() { + return DefaultNullOrdering.LOW; + } + + @Override + public String getSQLKeywords() { + return "CURRENT_CATALOG," // + + "CURRENT_SCHEMA," // + + "GROUPS," // + + "IF,ILIKE,INTERSECTS," // + + "KEY," // + + "LIMIT," // + + "MINUS," // + + "OFFSET," // + + "QUALIFY," // + + "REGEXP,ROWNUM," // + + "SYSDATE,SYSTIME,SYSTIMESTAMP," // + + "TODAY,TOP,"// + + "_ROWID_"; + } + + @Override + public String getNumericFunctions() { + return getFunctions("Functions (Numeric)"); + } + + @Override + public String getStringFunctions() { + return getFunctions("Functions (String)"); + } + + @Override + public String getSystemFunctions() { + return getFunctions("Functions (System)"); + } + + @Override + public String getTimeDateFunctions() { + return getFunctions("Functions (Time and Date)"); + } + + private String getFunctions(String section) { + String sql = "SELECT TOPIC FROM INFORMATION_SCHEMA.HELP WHERE SECTION = ?"; + Value[] args = new Value[] { getString(section) }; + ResultInterface result = executeQuery(sql, args); + StringBuilder builder = new StringBuilder(); + while (result.next()) { + String s = result.currentRow()[0].getString().trim(); + String[] array = StringUtils.arraySplit(s, ',', true); + for (String a : array) { + if (builder.length() != 0) { + builder.append(','); + } + String f = a.trim(); + int spaceIndex = f.indexOf(' '); + if (spaceIndex >= 0) { + // remove 'Function' from 'INSERT Function' + StringUtils.trimSubstring(builder, f, 0, spaceIndex); + } else { + builder.append(f); + } + } + } + return builder.toString(); + } + + @Override + public String getSearchStringEscape() { + return "\\"; + } + + @Override + public ResultInterface getProcedures(String catalog, String schemaPattern, String procedureNamePattern) { + return executeQuery("SELECT " // + + "ALIAS_CATALOG PROCEDURE_CAT, " // + + "ALIAS_SCHEMA PROCEDURE_SCHEM, " // + + "ALIAS_NAME PROCEDURE_NAME, " // + + "COLUMN_COUNT NUM_INPUT_PARAMS, " // + + "ZERO() NUM_OUTPUT_PARAMS, " // + + "ZERO() NUM_RESULT_SETS, " // + + "REMARKS, " // + + "RETURNS_RESULT PROCEDURE_TYPE, " // + + "ALIAS_NAME SPECIFIC_NAME " // + + "FROM INFORMATION_SCHEMA.FUNCTION_ALIASES " // + + "WHERE ALIAS_CATALOG LIKE ?1 ESCAPE ?4 " // + + "AND ALIAS_SCHEMA LIKE ?2 ESCAPE ?4 " // + + "AND ALIAS_NAME LIKE ?3 ESCAPE ?4 " // + + "ORDER BY PROCEDURE_SCHEM, PROCEDURE_NAME, NUM_INPUT_PARAMS", // + getCatalogPattern(catalog), // + getSchemaPattern(schemaPattern), // + getPattern(procedureNamePattern), // + BACKSLASH); + } + + @Override + public ResultInterface getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, + String columnNamePattern) { + return executeQuery("SELECT " // + + "ALIAS_CATALOG PROCEDURE_CAT, " // + + "ALIAS_SCHEMA PROCEDURE_SCHEM, " // + + "ALIAS_NAME PROCEDURE_NAME, " // + + "COLUMN_NAME, " // + + "COLUMN_TYPE, " // + + "DATA_TYPE, " // + + "TYPE_NAME, " // + + "PRECISION, " // + + "PRECISION LENGTH, " // + + "SCALE, " // + + "RADIX, " // + + "NULLABLE, " // + + "REMARKS, " // + + "COLUMN_DEFAULT COLUMN_DEF, " // + + "ZERO() SQL_DATA_TYPE, " // + + "ZERO() SQL_DATETIME_SUB, " // + + "ZERO() CHAR_OCTET_LENGTH, " // + + "POS ORDINAL_POSITION, " // + + "?1 IS_NULLABLE, " // + + "ALIAS_NAME SPECIFIC_NAME " // + + "FROM INFORMATION_SCHEMA.FUNCTION_COLUMNS " // + + "WHERE ALIAS_CATALOG LIKE ?2 ESCAPE ?6 " // + + "AND ALIAS_SCHEMA LIKE ?3 ESCAPE ?6 " // + + "AND ALIAS_NAME LIKE ?4 ESCAPE ?6 " // + + "AND COLUMN_NAME LIKE ?5 ESCAPE ?6 " // + + "ORDER BY PROCEDURE_SCHEM, PROCEDURE_NAME, ORDINAL_POSITION", // + YES, // + getCatalogPattern(catalog), // + getSchemaPattern(schemaPattern), // + getPattern(procedureNamePattern), // + getPattern(columnNamePattern), // + BACKSLASH); + } + + @Override + public ResultInterface getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) { + int typesLength = types != null ? types.length : 0; + boolean includeSynonyms = types == null || Arrays.asList(types).contains("SYNONYM"); + // (1024 - 16) is enough for the most cases + StringBuilder select = new StringBuilder(1008); + if (includeSynonyms) { + select.append("SELECT " // + + "TABLE_CAT, " // + + "TABLE_SCHEM, " // + + "TABLE_NAME, " // + + "TABLE_TYPE, " // + + "REMARKS, " // + + "TYPE_CAT, " // + + "TYPE_SCHEM, " // + + "TYPE_NAME, " // + + "SELF_REFERENCING_COL_NAME, " // + + "REF_GENERATION, " // + + "SQL " // + + "FROM (" // + + "SELECT " // + + "SYNONYM_CATALOG TABLE_CAT, " // + + "SYNONYM_SCHEMA TABLE_SCHEM, " // + + "SYNONYM_NAME as TABLE_NAME, " // + + "TYPE_NAME AS TABLE_TYPE, " // + + "REMARKS, " // + + "TYPE_NAME TYPE_CAT, " // + + "TYPE_NAME TYPE_SCHEM, " // + + "TYPE_NAME AS TYPE_NAME, " // + + "TYPE_NAME SELF_REFERENCING_COL_NAME, " // + + "TYPE_NAME REF_GENERATION, " // + + "NULL AS SQL " // + + "FROM INFORMATION_SCHEMA.SYNONYMS " // + + "WHERE SYNONYM_CATALOG LIKE ?1 ESCAPE ?4 " // + + "AND SYNONYM_SCHEMA LIKE ?2 ESCAPE ?4 " // + + "AND SYNONYM_NAME LIKE ?3 ESCAPE ?4 " // + + "UNION "); + } + select.append("SELECT " // + + "TABLE_CATALOG TABLE_CAT, " // + + "TABLE_SCHEMA TABLE_SCHEM, " // + + "TABLE_NAME, " // + + "TABLE_TYPE, " // + + "REMARKS, " // + + "TYPE_NAME TYPE_CAT, " // + + "TYPE_NAME TYPE_SCHEM, " // + + "TYPE_NAME, " // + + "TYPE_NAME SELF_REFERENCING_COL_NAME, " // + + "TYPE_NAME REF_GENERATION, " // + + "SQL " // + + "FROM INFORMATION_SCHEMA.TABLES " // + + "WHERE TABLE_CATALOG LIKE ?1 ESCAPE ?4 " // + + "AND TABLE_SCHEMA LIKE ?2 ESCAPE ?4 " // + + "AND TABLE_NAME LIKE ?3 ESCAPE ?4"); + if (typesLength > 0) { + select.append(" AND TABLE_TYPE IN("); + for (int i = 0; i < typesLength; i++) { + if (i > 0) { + select.append(", "); + } + select.append('?').append(i + 5); + } + select.append(')'); + } + if (includeSynonyms) { + select.append(')'); + } + Value[] args = new Value[typesLength + 4]; + args[0] = getCatalogPattern(catalog); + args[1] = getSchemaPattern(schemaPattern); + args[2] = getPattern(tableNamePattern); + args[3] = BACKSLASH; + for (int i = 0; i < typesLength; i++) { + args[i + 4] = getString(types[i]); + } + return executeQuery(select.append(" ORDER BY TABLE_TYPE, TABLE_SCHEM, TABLE_NAME").toString(), args); + } + + @Override + public ResultInterface getSchemas() { + return executeQuery("SELECT " // + + "SCHEMA_NAME TABLE_SCHEM, " // + + "CATALOG_NAME TABLE_CATALOG " // + + "FROM INFORMATION_SCHEMA.SCHEMATA " // + + "ORDER BY SCHEMA_NAME"); + } + + @Override + public ResultInterface getCatalogs() { + return executeQuery("SELECT CATALOG_NAME TABLE_CAT " // + + "FROM INFORMATION_SCHEMA.CATALOGS"); + } + + @Override + public ResultInterface getTableTypes() { + return executeQuery("SELECT " // + + "TYPE TABLE_TYPE " // + + "FROM INFORMATION_SCHEMA.TABLE_TYPES " // + + "ORDER BY TABLE_TYPE"); + } + + @Override + public ResultInterface getColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) { + return executeQuery("SELECT " // + + "TABLE_CAT, " // + + "TABLE_SCHEM, " // + + "TABLE_NAME, " // + + "COLUMN_NAME, " // + + "DATA_TYPE, " // + + "TYPE_NAME, " // + + "COLUMN_SIZE, " // + + "BUFFER_LENGTH, " // + + "DECIMAL_DIGITS, " // + + "NUM_PREC_RADIX, " // + + "NULLABLE, " // + + "REMARKS, " // + + "COLUMN_DEF, " // + + "SQL_DATA_TYPE, " // + + "SQL_DATETIME_SUB, " // + + "CHAR_OCTET_LENGTH, " // + + "ORDINAL_POSITION, " // + + "IS_NULLABLE, " // + + "SCOPE_CATALOG, " // + + "SCOPE_SCHEMA, " // + + "SCOPE_TABLE, " // + + "SOURCE_DATA_TYPE, " // + + "IS_AUTOINCREMENT, " // + + "IS_GENERATEDCOLUMN " // + + "FROM (" // + + "SELECT " // + + "s.SYNONYM_CATALOG TABLE_CAT, " // + + "s.SYNONYM_SCHEMA TABLE_SCHEM, " // + + "s.SYNONYM_NAME TABLE_NAME, " // + + "c.COLUMN_NAME, " // + + "c.DATA_TYPE, " // + + "c.TYPE_NAME, " // + + "c.CHARACTER_MAXIMUM_LENGTH COLUMN_SIZE, " // + + "c.CHARACTER_MAXIMUM_LENGTH BUFFER_LENGTH, " // + + "c.NUMERIC_SCALE DECIMAL_DIGITS, " // + + "c.NUMERIC_PRECISION_RADIX NUM_PREC_RADIX, " // + + "c.NULLABLE, " // + + "c.REMARKS, " // + + "c.COLUMN_DEFAULT COLUMN_DEF, " // + + "c.DATA_TYPE SQL_DATA_TYPE, " // + + "ZERO() SQL_DATETIME_SUB, " // + + "c.CHARACTER_OCTET_LENGTH CHAR_OCTET_LENGTH, " // + + "c.ORDINAL_POSITION, " // + + "c.IS_NULLABLE IS_NULLABLE, " // + + "CAST(c.SOURCE_DATA_TYPE AS VARCHAR) SCOPE_CATALOG, " // + + "CAST(c.SOURCE_DATA_TYPE AS VARCHAR) SCOPE_SCHEMA, " // + + "CAST(c.SOURCE_DATA_TYPE AS VARCHAR) SCOPE_TABLE, " // + + "c.SOURCE_DATA_TYPE, " // + + "CASE WHEN c.SEQUENCE_NAME IS NULL THEN " // + + "CAST(?1 AS VARCHAR) ELSE CAST(?2 AS VARCHAR) END IS_AUTOINCREMENT, " // + + "CASE WHEN c.IS_COMPUTED THEN " // + + "CAST(?2 AS VARCHAR) ELSE CAST(?1 AS VARCHAR) END IS_GENERATEDCOLUMN " // + + "FROM INFORMATION_SCHEMA.COLUMNS c JOIN INFORMATION_SCHEMA.SYNONYMS s ON " // + + "s.SYNONYM_FOR = c.TABLE_NAME " // + + "AND s.SYNONYM_FOR_SCHEMA = c.TABLE_SCHEMA " // + + "WHERE s.SYNONYM_CATALOG LIKE ?3 ESCAPE ?7 " // + + "AND s.SYNONYM_SCHEMA LIKE ?4 ESCAPE ?7 " // + + "AND s.SYNONYM_NAME LIKE ?5 ESCAPE ?7 " // + + "AND c.COLUMN_NAME LIKE ?6 ESCAPE ?7 " // + + "UNION SELECT " // + + "TABLE_CATALOG TABLE_CAT, " // + + "TABLE_SCHEMA TABLE_SCHEM, " // + + "TABLE_NAME, " // + + "COLUMN_NAME, " // + + "DATA_TYPE, " // + + "TYPE_NAME, " // + + "CHARACTER_MAXIMUM_LENGTH COLUMN_SIZE, " // + + "CHARACTER_MAXIMUM_LENGTH BUFFER_LENGTH, " // + + "NUMERIC_SCALE DECIMAL_DIGITS, " // + + "NUMERIC_PRECISION_RADIX NUM_PREC_RADIX, " // + + "NULLABLE, " // + + "REMARKS, " // + + "COLUMN_DEFAULT COLUMN_DEF, " // + + "DATA_TYPE SQL_DATA_TYPE, " // + + "ZERO() SQL_DATETIME_SUB, " // + + "CHARACTER_OCTET_LENGTH CHAR_OCTET_LENGTH, " // + + "ORDINAL_POSITION, " // + + "IS_NULLABLE IS_NULLABLE, " // + + "CAST(SOURCE_DATA_TYPE AS VARCHAR) SCOPE_CATALOG, " // + + "CAST(SOURCE_DATA_TYPE AS VARCHAR) SCOPE_SCHEMA, " // + + "CAST(SOURCE_DATA_TYPE AS VARCHAR) SCOPE_TABLE, " // + + "SOURCE_DATA_TYPE, " // + + "CASE WHEN SEQUENCE_NAME IS NULL THEN " // + + "CAST(?1 AS VARCHAR) ELSE CAST(?2 AS VARCHAR) END IS_AUTOINCREMENT, " // + + "CASE WHEN IS_COMPUTED THEN " // + + "CAST(?2 AS VARCHAR) ELSE CAST(?1 AS VARCHAR) END IS_GENERATEDCOLUMN " // + + "FROM INFORMATION_SCHEMA.COLUMNS " // + + "WHERE TABLE_CATALOG LIKE ?3 ESCAPE ?7 " // + + "AND TABLE_SCHEMA LIKE ?4 ESCAPE ?7 " // + + "AND TABLE_NAME LIKE ?5 ESCAPE ?7 " // + + "AND COLUMN_NAME LIKE ?6 ESCAPE ?7) " // + + "ORDER BY TABLE_SCHEM, TABLE_NAME, ORDINAL_POSITION", // + NO, // + YES, // + getCatalogPattern(catalog), // + getSchemaPattern(schemaPattern), // + getPattern(tableNamePattern), // + getPattern(columnNamePattern), // + BACKSLASH); + } + + @Override + public ResultInterface getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) { + return executeQuery("SELECT " // + + "TABLE_CATALOG TABLE_CAT, " // + + "TABLE_SCHEMA TABLE_SCHEM, " // + + "TABLE_NAME, " // + + "COLUMN_NAME, " // + + "GRANTOR, " // + + "GRANTEE, " // + + "PRIVILEGE_TYPE PRIVILEGE, " // + + "IS_GRANTABLE " // + + "FROM INFORMATION_SCHEMA.COLUMN_PRIVILEGES " // + + "WHERE TABLE_CATALOG LIKE ?1 ESCAPE ?5 " // + + "AND TABLE_SCHEMA LIKE ?2 ESCAPE ?5 " // + + "AND TABLE_NAME = ?3 " // + + "AND COLUMN_NAME LIKE ?4 ESCAPE ?5 " // + + "ORDER BY COLUMN_NAME, PRIVILEGE", // + getCatalogPattern(catalog), // + getSchemaPattern(schema), // + getString(table), // + getPattern(columnNamePattern), // + BACKSLASH); + } + + @Override + public ResultInterface getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) { + return executeQuery("SELECT " // + + "TABLE_CATALOG TABLE_CAT, " // + + "TABLE_SCHEMA TABLE_SCHEM, " // + + "TABLE_NAME, " // + + "GRANTOR, " // + + "GRANTEE, " // + + "PRIVILEGE_TYPE PRIVILEGE, " // + + "IS_GRANTABLE " // + + "FROM INFORMATION_SCHEMA.TABLE_PRIVILEGES " // + + "WHERE TABLE_CATALOG LIKE ?1 ESCAPE ?4 " // + + "AND TABLE_SCHEMA LIKE ?2 ESCAPE ?4 " // + + "AND TABLE_NAME LIKE ?3 ESCAPE ?4 " // + + "ORDER BY TABLE_SCHEM, TABLE_NAME, PRIVILEGE", // + getCatalogPattern(catalog), // + getSchemaPattern(schemaPattern), // + getPattern(tableNamePattern), // + BACKSLASH); + } + + @Override + public ResultInterface getBestRowIdentifier(String catalogPattern, String schemaPattern, String tableName, + int scope, boolean nullable) { + return executeQuery("SELECT " // + + "CAST(?1 AS SMALLINT) SCOPE, " // + + "C.COLUMN_NAME, " // + + "C.DATA_TYPE, " // + + "C.TYPE_NAME, " // + + "C.CHARACTER_MAXIMUM_LENGTH COLUMN_SIZE, " // + + "C.CHARACTER_MAXIMUM_LENGTH BUFFER_LENGTH, " // + + "CAST(C.NUMERIC_SCALE AS SMALLINT) DECIMAL_DIGITS, " // + + "CAST(?2 AS SMALLINT) PSEUDO_COLUMN " // + + "FROM INFORMATION_SCHEMA.INDEXES I, " // + + "INFORMATION_SCHEMA.COLUMNS C " // + + "WHERE C.TABLE_NAME = I.TABLE_NAME " // + + "AND C.COLUMN_NAME = I.COLUMN_NAME " // + + "AND C.TABLE_CATALOG LIKE ?3 ESCAPE ?6 " // + + "AND C.TABLE_SCHEMA LIKE ?4 ESCAPE ?6 " // + + "AND C.TABLE_NAME = ?5 " // + + "AND I.PRIMARY_KEY = TRUE " // + + "ORDER BY SCOPE", // + // SCOPE + ValueInteger.get(DatabaseMetaData.bestRowSession), // + // PSEUDO_COLUMN + ValueInteger.get(DatabaseMetaData.bestRowNotPseudo), // + getCatalogPattern(catalogPattern), // + getSchemaPattern(schemaPattern), // + getString(tableName), // + BACKSLASH); + } + + @Override + public ResultInterface getPrimaryKeys(String catalog, String schema, String table) { + return executeQuery("SELECT " // + + "TABLE_CATALOG TABLE_CAT, " // + + "TABLE_SCHEMA TABLE_SCHEM, " // + + "TABLE_NAME, " // + + "COLUMN_NAME, " // + + "ORDINAL_POSITION KEY_SEQ, " // + + "COALESCE(CONSTRAINT_NAME, INDEX_NAME) PK_NAME " // + + "FROM INFORMATION_SCHEMA.INDEXES " // + + "WHERE TABLE_CATALOG LIKE ?1 ESCAPE ?4 " // + + "AND TABLE_SCHEMA LIKE ?2 ESCAPE ?4 " // + + "AND TABLE_NAME = ?3 " // + + "AND PRIMARY_KEY = TRUE " // + + "ORDER BY COLUMN_NAME", // + getCatalogPattern(catalog), // + getSchemaPattern(schema), // + getString(table), // + BACKSLASH); + } + + @Override + public ResultInterface getImportedKeys(String catalog, String schema, String table) { + return executeQuery("SELECT " // + + "PKTABLE_CATALOG PKTABLE_CAT, " // + + "PKTABLE_SCHEMA PKTABLE_SCHEM, " // + + "PKTABLE_NAME PKTABLE_NAME, " // + + "PKCOLUMN_NAME, " // + + "FKTABLE_CATALOG FKTABLE_CAT, " // + + "FKTABLE_SCHEMA FKTABLE_SCHEM, " // + + "FKTABLE_NAME, " // + + "FKCOLUMN_NAME, " // + + "ORDINAL_POSITION KEY_SEQ, " // + + "UPDATE_RULE, " // + + "DELETE_RULE, " // + + "FK_NAME, " // + + "PK_NAME, " // + + "DEFERRABILITY " // + + "FROM INFORMATION_SCHEMA.CROSS_REFERENCES " // + + "WHERE FKTABLE_CATALOG LIKE ?1 ESCAPE ?4 " // + + "AND FKTABLE_SCHEMA LIKE ?2 ESCAPE ?4 " // + + "AND FKTABLE_NAME = ?3 " // + + "ORDER BY PKTABLE_CAT, PKTABLE_SCHEM, PKTABLE_NAME, FK_NAME, KEY_SEQ", // + getCatalogPattern(catalog), // + getSchemaPattern(schema), // + getString(table), // + BACKSLASH); + } + + @Override + public ResultInterface getExportedKeys(String catalog, String schema, String table) { + return executeQuery("SELECT " // + + "PKTABLE_CATALOG PKTABLE_CAT, " // + + "PKTABLE_SCHEMA PKTABLE_SCHEM, " // + + "PKTABLE_NAME PKTABLE_NAME, " // + + "PKCOLUMN_NAME, " // + + "FKTABLE_CATALOG FKTABLE_CAT, " // + + "FKTABLE_SCHEMA FKTABLE_SCHEM, " // + + "FKTABLE_NAME, " // + + "FKCOLUMN_NAME, " // + + "ORDINAL_POSITION KEY_SEQ, " // + + "UPDATE_RULE, " // + + "DELETE_RULE, " // + + "FK_NAME, " // + + "PK_NAME, " // + + "DEFERRABILITY " // + + "FROM INFORMATION_SCHEMA.CROSS_REFERENCES " // + + "WHERE PKTABLE_CATALOG LIKE ?1 ESCAPE ?4 " // + + "AND PKTABLE_SCHEMA LIKE ?2 ESCAPE ?4 " // + + "AND PKTABLE_NAME = ?3 " // + + "ORDER BY FKTABLE_CAT, FKTABLE_SCHEM, FKTABLE_NAME, FK_NAME, KEY_SEQ", // + getCatalogPattern(catalog), // + getSchemaPattern(schema), // + getString(table), // + BACKSLASH); + } + + @Override + public ResultInterface getCrossReference(String primaryCatalog, String primarySchema, String primaryTable, + String foreignCatalog, String foreignSchema, String foreignTable) { + return executeQuery("SELECT " // + + "PKTABLE_CATALOG PKTABLE_CAT, " // + + "PKTABLE_SCHEMA PKTABLE_SCHEM, " // + + "PKTABLE_NAME PKTABLE_NAME, " // + + "PKCOLUMN_NAME, " // + + "FKTABLE_CATALOG FKTABLE_CAT, " // + + "FKTABLE_SCHEMA FKTABLE_SCHEM, " // + + "FKTABLE_NAME, " // + + "FKCOLUMN_NAME, " // + + "ORDINAL_POSITION KEY_SEQ, " // + + "UPDATE_RULE, " // + + "DELETE_RULE, " // + + "FK_NAME, " // + + "PK_NAME, " // + + "DEFERRABILITY " // + + "FROM INFORMATION_SCHEMA.CROSS_REFERENCES " // + + "WHERE PKTABLE_CATALOG LIKE ?1 ESCAPE ?7 " // + + "AND PKTABLE_SCHEMA LIKE ?2 ESCAPE ?7 " // + + "AND PKTABLE_NAME = ?3 " // + + "AND FKTABLE_CATALOG LIKE ?4 ESCAPE ?7 " // + + "AND FKTABLE_SCHEMA LIKE ?5 ESCAPE ?7 " // + + "AND FKTABLE_NAME = ?6 " // + + "ORDER BY FKTABLE_CAT, FKTABLE_SCHEM, FKTABLE_NAME, FK_NAME, KEY_SEQ", // + getCatalogPattern(primaryCatalog), // + getSchemaPattern(primarySchema), // + getString(primaryTable), // + getCatalogPattern(foreignCatalog), // + getSchemaPattern(foreignSchema), // + getString(foreignTable), // + BACKSLASH); + } + + @Override + public ResultInterface getTypeInfo() { + return executeQuery("SELECT " // + + "TYPE_NAME, " // + + "DATA_TYPE, " // + + "PRECISION, " // + + "PREFIX LITERAL_PREFIX, " // + + "SUFFIX LITERAL_SUFFIX, " // + + "PARAMS CREATE_PARAMS, " // + + "NULLABLE, " // + + "CASE_SENSITIVE, " // + + "SEARCHABLE, " // + + "FALSE UNSIGNED_ATTRIBUTE, " // + + "FALSE FIXED_PREC_SCALE, " // + + "AUTO_INCREMENT, " // + + "TYPE_NAME LOCAL_TYPE_NAME, " // + + "MINIMUM_SCALE, " // + + "MAXIMUM_SCALE, " // + + "DATA_TYPE SQL_DATA_TYPE, " // + + "ZERO() SQL_DATETIME_SUB, " // + + "RADIX NUM_PREC_RADIX " // + + "FROM INFORMATION_SCHEMA.TYPE_INFO " // + + "ORDER BY DATA_TYPE, POS"); + } + + @Override + public ResultInterface getIndexInfo(String catalog, String schema, String table, boolean unique, + boolean approximate) { + String uniqueCondition = unique ? "NON_UNIQUE=FALSE" : "TRUE"; + return executeQuery("SELECT " // + + "TABLE_CATALOG TABLE_CAT, " // + + "TABLE_SCHEMA TABLE_SCHEM, " // + + "TABLE_NAME, " // + + "NON_UNIQUE, " // + + "TABLE_CATALOG INDEX_QUALIFIER, " // + + "INDEX_NAME, " // + + "INDEX_TYPE TYPE, " // + + "ORDINAL_POSITION, " // + + "COLUMN_NAME, " // + + "ASC_OR_DESC, " // + // TODO meta data for number of unique values in an index + + "CARDINALITY, " // + + "PAGES, " // + + "FILTER_CONDITION, " // + + "SORT_TYPE " // + + "FROM INFORMATION_SCHEMA.INDEXES " // + + "WHERE TABLE_CATALOG LIKE ?1 ESCAPE ?4 " // + + "AND TABLE_SCHEMA LIKE ?2 ESCAPE ?4 " // + + "AND (" + uniqueCondition + ") " // + + "AND TABLE_NAME = ?3 " // + + "ORDER BY NON_UNIQUE, TYPE, TABLE_SCHEM, INDEX_NAME, ORDINAL_POSITION", // + getCatalogPattern(catalog), // + getSchemaPattern(schema), // + getString(table), // + BACKSLASH); + } + + @Override + public ResultInterface getSchemas(String catalog, String schemaPattern) { + return executeQuery("SELECT " // + + "SCHEMA_NAME TABLE_SCHEM, " // + + "CATALOG_NAME TABLE_CATALOG " // + + "FROM INFORMATION_SCHEMA.SCHEMATA " // + + "WHERE CATALOG_NAME LIKE ?1 ESCAPE ?3 " // + + "AND SCHEMA_NAME LIKE ?2 ESCAPE ?3 " // + + "ORDER BY SCHEMA_NAME", // + getCatalogPattern(catalog), // + getSchemaPattern(schemaPattern), // + BACKSLASH); + } + + @Override + public ResultInterface getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) { + return getPseudoColumnsResult(); + } + + private ResultInterface executeQuery(String sql, Value... args) { + checkClosed(); + synchronized (session) { + CommandInterface command = session.prepareCommand(sql, Integer.MAX_VALUE); + int l = args.length; + if (l > 0) { + ArrayList parameters = command.getParameters(); + for (int i = 0; i < l; i++) { + parameters.get(i).setValue(args[i], true); + } + } + ResultInterface result = command.executeQuery(0, false); + command.close(); + return result; + } + } + + @Override + void checkClosed() { + if (session.isClosed()) { + throw DbException.get(ErrorCode.DATABASE_CALLED_AT_SHUTDOWN); + } + } + + private Value getString(String string) { + return string != null ? ValueVarchar.get(string, session) : ValueNull.INSTANCE; + } + + private Value getPattern(String pattern) { + return pattern == null ? PERCENT : getString(pattern); + } + + private Value getSchemaPattern(String pattern) { + return pattern == null ? PERCENT : pattern.isEmpty() ? SCHEMA_MAIN : getString(pattern); + } + + private Value getCatalogPattern(String catalogPattern) { + return catalogPattern == null || catalogPattern.isEmpty() ? PERCENT : getString(catalogPattern); + } + +} diff --git a/h2/src/main/org/h2/jdbc/meta/DatabaseMetaLocal.java b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaLocal.java new file mode 100644 index 0000000000..fa43376376 --- /dev/null +++ b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaLocal.java @@ -0,0 +1,1523 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc.meta; + +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; + +import org.h2.api.ErrorCode; +import org.h2.command.dml.Help; +import org.h2.constraint.Constraint; +import org.h2.constraint.ConstraintActionType; +import org.h2.constraint.ConstraintReferential; +import org.h2.constraint.ConstraintUnique; +import org.h2.engine.Database; +import org.h2.engine.DbObject; +import org.h2.engine.Mode; +import org.h2.engine.Right; +import org.h2.engine.SessionLocal; +import org.h2.engine.User; +import org.h2.expression.condition.CompareLike; +import org.h2.index.Index; +import org.h2.message.DbException; +import org.h2.mode.DefaultNullOrdering; +import org.h2.result.ResultInterface; +import org.h2.result.SimpleResult; +import org.h2.result.SortOrder; +import org.h2.schema.FunctionAlias; +import org.h2.schema.FunctionAlias.JavaMethod; +import org.h2.schema.Schema; +import org.h2.schema.SchemaObject; +import org.h2.schema.UserDefinedFunction; +import org.h2.table.Column; +import org.h2.table.IndexColumn; +import org.h2.table.Table; +import org.h2.table.TableSynonym; +import org.h2.util.MathUtils; +import org.h2.util.StringUtils; +import org.h2.util.Utils; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueSmallint; +import org.h2.value.ValueToObjectConverter2; +import org.h2.value.ValueVarchar; + +/** + * Local implementation of database meta information. + */ +public final class DatabaseMetaLocal extends DatabaseMetaLocalBase { + + private static final Value YES = ValueVarchar.get("YES"); + + private static final Value NO = ValueVarchar.get("NO"); + + private static final ValueSmallint BEST_ROW_SESSION = ValueSmallint.get((short) DatabaseMetaData.bestRowSession); + + private static final ValueSmallint BEST_ROW_NOT_PSEUDO = ValueSmallint + .get((short) DatabaseMetaData.bestRowNotPseudo); + + private static final ValueInteger COLUMN_NO_NULLS = ValueInteger.get(DatabaseMetaData.columnNoNulls); + + private static final ValueSmallint COLUMN_NO_NULLS_SMALL = ValueSmallint + .get((short) DatabaseMetaData.columnNoNulls); + + private static final ValueInteger COLUMN_NULLABLE = ValueInteger.get(DatabaseMetaData.columnNullable); + + private static final ValueSmallint COLUMN_NULLABLE_UNKNOWN_SMALL = ValueSmallint + .get((short) DatabaseMetaData.columnNullableUnknown); + + private static final ValueSmallint IMPORTED_KEY_CASCADE = ValueSmallint + .get((short) DatabaseMetaData.importedKeyCascade); + + private static final ValueSmallint IMPORTED_KEY_RESTRICT = ValueSmallint + .get((short) DatabaseMetaData.importedKeyRestrict); + + private static final ValueSmallint IMPORTED_KEY_DEFAULT = ValueSmallint + .get((short) DatabaseMetaData.importedKeySetDefault); + + private static final ValueSmallint IMPORTED_KEY_SET_NULL = ValueSmallint + .get((short) DatabaseMetaData.importedKeySetNull); + + private static final ValueSmallint IMPORTED_KEY_NOT_DEFERRABLE = ValueSmallint + .get((short) DatabaseMetaData.importedKeyNotDeferrable); + + private static final ValueSmallint PROCEDURE_COLUMN_IN = ValueSmallint + .get((short) DatabaseMetaData.procedureColumnIn); + + private static final ValueSmallint PROCEDURE_COLUMN_RETURN = ValueSmallint + .get((short) DatabaseMetaData.procedureColumnReturn); + + private static final ValueSmallint PROCEDURE_NO_RESULT = ValueSmallint + .get((short) DatabaseMetaData.procedureNoResult); + + private static final ValueSmallint PROCEDURE_RETURNS_RESULT = ValueSmallint + .get((short) DatabaseMetaData.procedureReturnsResult); + + private static final ValueSmallint TABLE_INDEX_HASHED = ValueSmallint.get(DatabaseMetaData.tableIndexHashed); + + private static final ValueSmallint TABLE_INDEX_OTHER = ValueSmallint.get(DatabaseMetaData.tableIndexOther); + + // This list must be ordered + private static final String[] TABLE_TYPES = { "BASE TABLE", "GLOBAL TEMPORARY", "LOCAL TEMPORARY", "SYNONYM", + "VIEW" }; + + private static final ValueSmallint TYPE_NULLABLE = ValueSmallint.get((short) DatabaseMetaData.typeNullable); + + private static final ValueSmallint TYPE_SEARCHABLE = ValueSmallint.get((short) DatabaseMetaData.typeSearchable); + + private static final Value NO_USAGE_RESTRICTIONS = ValueVarchar.get("NO_USAGE_RESTRICTIONS"); + + private final SessionLocal session; + + public DatabaseMetaLocal(SessionLocal session) { + this.session = session; + } + + @Override + public final DefaultNullOrdering defaultNullOrdering() { + return session.getDatabase().getDefaultNullOrdering(); + } + + @Override + public String getSQLKeywords() { + StringBuilder builder = new StringBuilder(103).append( // + "CURRENT_CATALOG," // + + "CURRENT_SCHEMA," // + + "GROUPS," // + + "IF,ILIKE," // + + "KEY,"); + Mode mode = session.getMode(); + if (mode.limit) { + builder.append("LIMIT,"); + } + if (mode.minusIsExcept) { + builder.append("MINUS,"); + } + builder.append( // + "OFFSET," // + + "QUALIFY," // + + "REGEXP,ROWNUM,"); + if (mode.topInSelect || mode.topInDML) { + builder.append("TOP,"); + } + return builder.append("_ROWID_") // + .toString(); + } + + @Override + public String getNumericFunctions() { + return getFunctions("Functions (Numeric)"); + } + + @Override + public String getStringFunctions() { + return getFunctions("Functions (String)"); + } + + @Override + public String getSystemFunctions() { + return getFunctions("Functions (System)"); + } + + @Override + public String getTimeDateFunctions() { + return getFunctions("Functions (Time and Date)"); + } + + private String getFunctions(String section) { + checkClosed(); + StringBuilder builder = new StringBuilder(); + try { + ResultSet rs = Help.getTable(); + while (rs.next()) { + if (rs.getString(1).trim().equals(section)) { + if (builder.length() != 0) { + builder.append(','); + } + String topic = rs.getString(2).trim(); + int spaceIndex = topic.indexOf(' '); + if (spaceIndex >= 0) { + // remove 'Function' from 'INSERT Function' + StringUtils.trimSubstring(builder, topic, 0, spaceIndex); + } else { + builder.append(topic); + } + } + } + } catch (Exception e) { + throw DbException.convert(e); + } + return builder.toString(); + } + + @Override + public String getSearchStringEscape() { + return session.getDatabase().getSettings().defaultEscape; + } + + @Override + public ResultInterface getProcedures(String catalog, String schemaPattern, String procedureNamePattern) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("PROCEDURE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("PROCEDURE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("PROCEDURE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("RESERVED1", TypeInfo.TYPE_NULL); + result.addColumn("RESERVED2", TypeInfo.TYPE_NULL); + result.addColumn("RESERVED3", TypeInfo.TYPE_NULL); + result.addColumn("REMARKS", TypeInfo.TYPE_VARCHAR); + result.addColumn("PROCEDURE_TYPE", TypeInfo.TYPE_SMALLINT); + result.addColumn("SPECIFIC_NAME", TypeInfo.TYPE_VARCHAR); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + CompareLike procedureLike = getLike(procedureNamePattern); + for (Schema s : getSchemasForPattern(schemaPattern)) { + Value schemaValue = getString(s.getName()); + for (UserDefinedFunction userDefinedFunction : s.getAllFunctionsAndAggregates()) { + String procedureName = userDefinedFunction.getName(); + if (procedureLike != null && !procedureLike.test(procedureName)) { + continue; + } + Value procedureNameValue = getString(procedureName); + if (userDefinedFunction instanceof FunctionAlias) { + JavaMethod[] methods; + try { + methods = ((FunctionAlias) userDefinedFunction).getJavaMethods(); + } catch (DbException e) { + continue; + } + for (int i = 0; i < methods.length; i++) { + JavaMethod method = methods[i]; + TypeInfo typeInfo = method.getDataType(); + getProceduresAdd(result, catalogValue, schemaValue, procedureNameValue, + userDefinedFunction.getComment(), + typeInfo == null || typeInfo.getValueType() != Value.NULL ? PROCEDURE_RETURNS_RESULT + : PROCEDURE_NO_RESULT, + getString(procedureName + '_' + (i + 1))); + } + } else { + getProceduresAdd(result, catalogValue, schemaValue, procedureNameValue, + userDefinedFunction.getComment(), PROCEDURE_RETURNS_RESULT, procedureNameValue); + } + } + } + // PROCEDURE_CAT, PROCEDURE_SCHEM, PROCEDURE_NAME, SPECIFIC_ NAME + result.sortRows(new SortOrder(session, new int[] { 1, 2, 8 })); + return result; + } + + private void getProceduresAdd(SimpleResult result, Value catalogValue, Value schemaValue, Value procedureNameValue, + String comment, ValueSmallint procedureType, Value specificNameValue) { + result.addRow( + // PROCEDURE_CAT + catalogValue, + // PROCEDURE_SCHEM + schemaValue, + // PROCEDURE_NAME + procedureNameValue, + // RESERVED1 + ValueNull.INSTANCE, + // RESERVED2 + ValueNull.INSTANCE, + // RESERVED3 + ValueNull.INSTANCE, + // REMARKS + getString(comment), + // PROCEDURE_TYPE + procedureType, + // SPECIFIC_NAME + specificNameValue); + } + + @Override + public ResultInterface getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, + String columnNamePattern) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("PROCEDURE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("PROCEDURE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("PROCEDURE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_TYPE", TypeInfo.TYPE_SMALLINT); + result.addColumn("DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("PRECISION", TypeInfo.TYPE_INTEGER); + result.addColumn("LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("SCALE", TypeInfo.TYPE_SMALLINT); + result.addColumn("RADIX", TypeInfo.TYPE_SMALLINT); + result.addColumn("NULLABLE", TypeInfo.TYPE_SMALLINT); + result.addColumn("REMARKS", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_DEF", TypeInfo.TYPE_VARCHAR); + result.addColumn("SQL_DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("SQL_DATETIME_SUB", TypeInfo.TYPE_INTEGER); + result.addColumn("CHAR_OCTET_LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("ORDINAL_POSITION", TypeInfo.TYPE_INTEGER); + result.addColumn("IS_NULLABLE", TypeInfo.TYPE_VARCHAR); + result.addColumn("SPECIFIC_NAME", TypeInfo.TYPE_VARCHAR); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + CompareLike procedureLike = getLike(procedureNamePattern); + for (Schema s : getSchemasForPattern(schemaPattern)) { + Value schemaValue = getString(s.getName()); + for (UserDefinedFunction userDefinedFunction : s.getAllFunctionsAndAggregates()) { + if (!(userDefinedFunction instanceof FunctionAlias)) { + continue; + } + String procedureName = userDefinedFunction.getName(); + if (procedureLike != null && !procedureLike.test(procedureName)) { + continue; + } + Value procedureNameValue = getString(procedureName); + JavaMethod[] methods; + try { + methods = ((FunctionAlias) userDefinedFunction).getJavaMethods(); + } catch (DbException e) { + continue; + } + for (int i = 0, l = methods.length; i < l; i++) { + JavaMethod method = methods[i]; + Value specificNameValue = getString(procedureName + '_' + (i + 1)); + TypeInfo typeInfo = method.getDataType(); + if (typeInfo != null && typeInfo.getValueType() != Value.NULL) { + getProcedureColumnAdd(result, catalogValue, schemaValue, procedureNameValue, specificNameValue, + typeInfo, method.getClass().isPrimitive(), 0); + } + Class[] columnList = method.getColumnClasses(); + for (int o = 1, p = method.hasConnectionParam() ? 1 : 0, n = columnList.length; p < n; o++, p++) { + Class clazz = columnList[p]; + getProcedureColumnAdd(result, catalogValue, schemaValue, procedureNameValue, specificNameValue, + ValueToObjectConverter2.classToType(clazz), clazz.isPrimitive(), o); + } + } + } + } + // PROCEDURE_CAT, PROCEDURE_SCHEM, PROCEDURE_NAME, SPECIFIC_NAME, return + // value first + result.sortRows(new SortOrder(session, new int[] { 1, 2, 19 })); + return result; + } + + private void getProcedureColumnAdd(SimpleResult result, Value catalogValue, Value schemaValue, + Value procedureNameValue, Value specificNameValue, TypeInfo type, boolean notNull, int ordinal) { + int valueType = type.getValueType(); + DataType dt = DataType.getDataType(valueType); + ValueInteger precisionValue = ValueInteger.get(MathUtils.convertLongToInt(type.getPrecision())); + result.addRow( + // PROCEDURE_CAT + catalogValue, + // PROCEDURE_SCHEM + schemaValue, + // PROCEDURE_NAME + procedureNameValue, + // COLUMN_NAME + getString(ordinal == 0 ? "RESULT" : "P" + ordinal), + // COLUMN_TYPE + ordinal == 0 ? PROCEDURE_COLUMN_RETURN : PROCEDURE_COLUMN_IN, + // DATA_TYPE + ValueInteger.get(DataType.convertTypeToSQLType(type)), + // TYPE_NAME + getDataTypeName(type), + // PRECISION + precisionValue, + // LENGTH + precisionValue, + // SCALE + dt.supportsScale // + ? ValueSmallint.get(MathUtils.convertIntToShort(dt.defaultScale)) + : ValueNull.INSTANCE, + // RADIX + getRadix(valueType, true), + // NULLABLE + notNull ? COLUMN_NO_NULLS_SMALL : COLUMN_NULLABLE_UNKNOWN_SMALL, + // REMARKS + ValueNull.INSTANCE, + // COLUMN_DEF + ValueNull.INSTANCE, + // SQL_DATA_TYPE + ValueNull.INSTANCE, + // SQL_DATETIME_SUB + ValueNull.INSTANCE, + // CHAR_OCTET_LENGTH + DataType.isBinaryStringType(valueType) || DataType.isCharacterStringType(valueType) ? precisionValue + : ValueNull.INSTANCE, + // ORDINAL_POSITION + ValueInteger.get(ordinal), + // IS_NULLABLE + ValueVarchar.EMPTY, + // SPECIFIC_NAME + specificNameValue); + } + + @Override + public ResultInterface getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) { + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_TYPE", TypeInfo.TYPE_VARCHAR); + result.addColumn("REMARKS", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("SELF_REFERENCING_COL_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("REF_GENERATION", TypeInfo.TYPE_VARCHAR); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + HashSet typesSet; + if (types != null) { + typesSet = new HashSet<>(8); + for (String type : types) { + int idx = Arrays.binarySearch(TABLE_TYPES, type); + if (idx >= 0) { + typesSet.add(TABLE_TYPES[idx]); + } else if (type.equals("TABLE")) { + typesSet.add("BASE TABLE"); + } + } + if (typesSet.isEmpty()) { + return result; + } + } else { + typesSet = null; + } + for (Schema schema : getSchemasForPattern(schemaPattern)) { + Value schemaValue = getString(schema.getName()); + for (SchemaObject object : getTablesForPattern(schema, tableNamePattern)) { + Value tableName = getString(object.getName()); + if (object instanceof Table) { + Table t = (Table) object; + if (!t.isHidden()) { + getTablesAdd(result, catalogValue, schemaValue, tableName, t, false, typesSet); + } + } else { + getTablesAdd(result, catalogValue, schemaValue, tableName, ((TableSynonym) object).getSynonymFor(), + true, typesSet); + } + } + } + // TABLE_TYPE, TABLE_CAT, TABLE_SCHEM, TABLE_NAME + result.sortRows(new SortOrder(session, new int[] { 3, 1, 2 })); + return result; + } + + private void getTablesAdd(SimpleResult result, Value catalogValue, Value schemaValue, Value tableName, Table t, + boolean synonym, HashSet typesSet) { + String type = synonym ? "SYNONYM" : t.getSQLTableType(); + if (typesSet != null && !typesSet.contains(type)) { + return; + } + result.addRow( + // TABLE_CAT + catalogValue, + // TABLE_SCHEM + schemaValue, + // TABLE_NAME + tableName, + // TABLE_TYPE + getString(type), + // REMARKS + getString(t.getComment()), + // TYPE_CAT + ValueNull.INSTANCE, + // TYPE_SCHEM + ValueNull.INSTANCE, + // TYPE_NAME + ValueNull.INSTANCE, + // SELF_REFERENCING_COL_NAME + ValueNull.INSTANCE, + // REF_GENERATION + ValueNull.INSTANCE); + } + + @Override + public ResultInterface getSchemas() { + return getSchemas(null, null); + } + + @Override + public ResultInterface getCatalogs() { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addRow(getString(session.getDatabase().getShortName())); + return result; + } + + @Override + public ResultInterface getTableTypes() { + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_TYPE", TypeInfo.TYPE_VARCHAR); + // Order by TABLE_TYPE + result.addRow(getString("BASE TABLE")); + result.addRow(getString("GLOBAL TEMPORARY")); + result.addRow(getString("LOCAL TEMPORARY")); + result.addRow(getString("SYNONYM")); + result.addRow(getString("VIEW")); + return result; + } + + @Override + public ResultInterface getColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) { + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_SIZE", TypeInfo.TYPE_INTEGER); + result.addColumn("BUFFER_LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("DECIMAL_DIGITS", TypeInfo.TYPE_INTEGER); + result.addColumn("NUM_PREC_RADIX", TypeInfo.TYPE_INTEGER); + result.addColumn("NULLABLE", TypeInfo.TYPE_INTEGER); + result.addColumn("REMARKS", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_DEF", TypeInfo.TYPE_VARCHAR); + result.addColumn("SQL_DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("SQL_DATETIME_SUB", TypeInfo.TYPE_INTEGER); + result.addColumn("CHAR_OCTET_LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("ORDINAL_POSITION", TypeInfo.TYPE_INTEGER); + result.addColumn("IS_NULLABLE", TypeInfo.TYPE_VARCHAR); + result.addColumn("SCOPE_CATALOG", TypeInfo.TYPE_VARCHAR); + result.addColumn("SCOPE_SCHEMA", TypeInfo.TYPE_VARCHAR); + result.addColumn("SCOPE_TABLE", TypeInfo.TYPE_VARCHAR); + result.addColumn("SOURCE_DATA_TYPE", TypeInfo.TYPE_SMALLINT); + result.addColumn("IS_AUTOINCREMENT", TypeInfo.TYPE_VARCHAR); + result.addColumn("IS_GENERATEDCOLUMN", TypeInfo.TYPE_VARCHAR); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + CompareLike columnLike = getLike(columnNamePattern); + for (Schema schema : getSchemasForPattern(schemaPattern)) { + Value schemaValue = getString(schema.getName()); + for (SchemaObject object : getTablesForPattern(schema, tableNamePattern)) { + Value tableName = getString(object.getName()); + if (object instanceof Table) { + Table t = (Table) object; + if (!t.isHidden()) { + getColumnsAdd(result, catalogValue, schemaValue, tableName, t, columnLike); + } + } else { + TableSynonym s = (TableSynonym) object; + Table t = s.getSynonymFor(); + getColumnsAdd(result, catalogValue, schemaValue, tableName, t, columnLike); + } + } + } + // TABLE_CAT, TABLE_SCHEM, TABLE_NAME, ORDINAL_POSITION + result.sortRows(new SortOrder(session, new int[] { 1, 2, 16 })); + return result; + } + + private void getColumnsAdd(SimpleResult result, Value catalogValue, Value schemaValue, Value tableName, Table t, + CompareLike columnLike) { + int ordinal = 0; + for (Column c : t.getColumns()) { + if (!c.getVisible()) { + continue; + } + ordinal++; + String name = c.getName(); + if (columnLike != null && !columnLike.test(name)) { + continue; + } + TypeInfo type = c.getType(); + ValueInteger precision = ValueInteger.get(MathUtils.convertLongToInt(type.getPrecision())); + boolean nullable = c.isNullable(), isGenerated = c.isGenerated(); + result.addRow( + // TABLE_CAT + catalogValue, + // TABLE_SCHEM + schemaValue, + // TABLE_NAME + tableName, + // COLUMN_NAME + getString(name), + // DATA_TYPE + ValueInteger.get(DataType.convertTypeToSQLType(type)), + // TYPE_NAME + getDataTypeName(type), + // COLUMN_SIZE + precision, + // BUFFER_LENGTH + ValueNull.INSTANCE, + // DECIMAL_DIGITS + ValueInteger.get(type.getScale()), + // NUM_PREC_RADIX + getRadix(type.getValueType(), false), + // NULLABLE + nullable ? COLUMN_NULLABLE : COLUMN_NO_NULLS, + // REMARKS + getString(c.getComment()), + // COLUMN_DEF + isGenerated ? ValueNull.INSTANCE : getString(c.getDefaultSQL()), + // SQL_DATA_TYPE (unused) + ValueNull.INSTANCE, + // SQL_DATETIME_SUB (unused) + ValueNull.INSTANCE, + // CHAR_OCTET_LENGTH + precision, + // ORDINAL_POSITION + ValueInteger.get(ordinal), + // IS_NULLABLE + nullable ? YES : NO, + // SCOPE_CATALOG + ValueNull.INSTANCE, + // SCOPE_SCHEMA + ValueNull.INSTANCE, + // SCOPE_TABLE + ValueNull.INSTANCE, + // SOURCE_DATA_TYPE + ValueNull.INSTANCE, + // IS_AUTOINCREMENT + c.isIdentity() ? YES : NO, + // IS_GENERATEDCOLUMN + isGenerated ? YES : NO); + } + } + + @Override + public ResultInterface getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) { + if (table == null) { + throw DbException.getInvalidValueException("table", null); + } + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("GRANTOR", TypeInfo.TYPE_VARCHAR); + result.addColumn("GRANTEE", TypeInfo.TYPE_VARCHAR); + result.addColumn("PRIVILEGE", TypeInfo.TYPE_VARCHAR); + result.addColumn("IS_GRANTABLE", TypeInfo.TYPE_VARCHAR); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + CompareLike columnLike = getLike(columnNamePattern); + for (Right r : db.getAllRights()) { + DbObject object = r.getGrantedObject(); + if (!(object instanceof Table)) { + continue; + } + Table t = (Table) object; + if (t.isHidden()) { + continue; + } + String tableName = t.getName(); + if (!db.equalsIdentifiers(table, tableName)) { + continue; + } + Schema s = t.getSchema(); + if (!checkSchema(schema, s)) { + continue; + } + addPrivileges(result, catalogValue, s.getName(), tableName, r.getGrantee(), r.getRightMask(), columnLike, + t.getColumns()); + } + // COLUMN_NAME, PRIVILEGE + result.sortRows(new SortOrder(session, new int[] { 3, 6 })); + return result; + } + + @Override + public ResultInterface getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("GRANTOR", TypeInfo.TYPE_VARCHAR); + result.addColumn("GRANTEE", TypeInfo.TYPE_VARCHAR); + result.addColumn("PRIVILEGE", TypeInfo.TYPE_VARCHAR); + result.addColumn("IS_GRANTABLE", TypeInfo.TYPE_VARCHAR); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + CompareLike schemaLike = getLike(schemaPattern); + CompareLike tableLike = getLike(tableNamePattern); + for (Right r : db.getAllRights()) { + DbObject object = r.getGrantedObject(); + if (!(object instanceof Table)) { + continue; + } + Table table = (Table) object; + if (table.isHidden()) { + continue; + } + String tableName = table.getName(); + if (tableLike != null && !tableLike.test(tableName)) { + continue; + } + Schema schema = table.getSchema(); + String schemaName = schema.getName(); + if (schemaPattern != null) { + if (schemaPattern.isEmpty()) { + if (schema != db.getMainSchema()) { + continue; + } + } else { + if (!schemaLike.test(schemaName)) { + continue; + } + } + } + addPrivileges(result, catalogValue, schemaName, tableName, r.getGrantee(), r.getRightMask(), null, null); + } + // TABLE_CAT, TABLE_SCHEM, TABLE_NAME, PRIVILEGE + result.sortRows(new SortOrder(session, new int[] { 1, 2, 5 })); + return result; + } + + private void addPrivileges(SimpleResult result, Value catalogValue, String schemaName, String tableName, + DbObject grantee, int rightMask, CompareLike columnLike, Column[] columns) { + Value schemaValue = getString(schemaName); + Value tableValue = getString(tableName); + Value granteeValue = getString(grantee.getName()); + boolean isAdmin = grantee.getType() == DbObject.USER && ((User) grantee).isAdmin(); + if ((rightMask & Right.SELECT) != 0) { + addPrivilege(result, catalogValue, schemaValue, tableValue, granteeValue, "SELECT", isAdmin, columnLike, + columns); + } + if ((rightMask & Right.INSERT) != 0) { + addPrivilege(result, catalogValue, schemaValue, tableValue, granteeValue, "INSERT", isAdmin, columnLike, + columns); + } + if ((rightMask & Right.UPDATE) != 0) { + addPrivilege(result, catalogValue, schemaValue, tableValue, granteeValue, "UPDATE", isAdmin, columnLike, + columns); + } + if ((rightMask & Right.DELETE) != 0) { + addPrivilege(result, catalogValue, schemaValue, tableValue, granteeValue, "DELETE", isAdmin, columnLike, + columns); + } + } + + private void addPrivilege(SimpleResult result, Value catalogValue, Value schemaValue, Value tableValue, + Value granteeValue, String right, boolean isAdmin, CompareLike columnLike, Column[] columns) { + if (columns == null) { + result.addRow( + // TABLE_CAT + catalogValue, + // TABLE_SCHEM + schemaValue, + // TABLE_NAME + tableValue, + // GRANTOR + ValueNull.INSTANCE, + // GRANTEE + granteeValue, + // PRIVILEGE + getString(right), + // IS_GRANTABLE + isAdmin ? YES : NO); + } else { + for (Column column : columns) { + String columnName = column.getName(); + if (columnLike != null && !columnLike.test(columnName)) { + continue; + } + result.addRow( + // TABLE_CAT + catalogValue, + // TABLE_SCHEM + schemaValue, + // TABLE_NAME + tableValue, + // COLUMN_NAME + getString(columnName), + // GRANTOR + ValueNull.INSTANCE, + // GRANTEE + granteeValue, + // PRIVILEGE + getString(right), + // IS_GRANTABLE + isAdmin ? YES : NO); + } + } + } + + @Override + public ResultInterface getBestRowIdentifier(String catalog, String schema, String table, int scope, + boolean nullable) { + if (table == null) { + throw DbException.getInvalidValueException("table", null); + } + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("SCOPE", TypeInfo.TYPE_SMALLINT); + result.addColumn("COLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_SIZE", TypeInfo.TYPE_INTEGER); + result.addColumn("BUFFER_LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("DECIMAL_DIGITS", TypeInfo.TYPE_SMALLINT); + result.addColumn("PSEUDO_COLUMN", TypeInfo.TYPE_SMALLINT); + if (!checkCatalogName(catalog)) { + return result; + } + for (Schema s : getSchemas(schema)) { + Table t = s.findTableOrView(session, table); + if (t == null || t.isHidden()) { + continue; + } + ArrayList constraints = t.getConstraints(); + if (constraints == null) { + continue; + } + for (Constraint constraint : constraints) { + if (constraint.getConstraintType() != Constraint.Type.PRIMARY_KEY) { + continue; + } + IndexColumn[] columns = ((ConstraintUnique) constraint).getColumns(); + for (int i = 0, l = columns.length; i < l; i++) { + IndexColumn ic = columns[i]; + Column c = ic.column; + TypeInfo type = c.getType(); + DataType dt = DataType.getDataType(type.getValueType()); + result.addRow( + // SCOPE + BEST_ROW_SESSION, + // COLUMN_NAME + getString(c.getName()), + // DATA_TYPE + ValueInteger.get(DataType.convertTypeToSQLType(type)), + // TYPE_NAME + getDataTypeName(type), + // COLUMN_SIZE + ValueInteger.get(MathUtils.convertLongToInt(type.getPrecision())), + // BUFFER_LENGTH + ValueNull.INSTANCE, + // DECIMAL_DIGITS + dt.supportsScale ? ValueSmallint.get(MathUtils.convertIntToShort(type.getScale())) + : ValueNull.INSTANCE, + // PSEUDO_COLUMN + BEST_ROW_NOT_PSEUDO); + } + } + } + // Order by SCOPE (always the same) + return result; + } + + private Value getDataTypeName(TypeInfo typeInfo) { + return getString(typeInfo.getDeclaredTypeName()); + } + + @Override + public ResultInterface getPrimaryKeys(String catalog, String schema, String table) { + if (table == null) { + throw DbException.getInvalidValueException("table", null); + } + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("KEY_SEQ", TypeInfo.TYPE_SMALLINT); + result.addColumn("PK_NAME", TypeInfo.TYPE_VARCHAR); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + for (Schema s : getSchemas(schema)) { + Table t = s.findTableOrView(session, table); + if (t == null || t.isHidden()) { + continue; + } + ArrayList constraints = t.getConstraints(); + if (constraints == null) { + continue; + } + for (Constraint constraint : constraints) { + if (constraint.getConstraintType() != Constraint.Type.PRIMARY_KEY) { + continue; + } + Value schemaValue = getString(s.getName()); + Value tableValue = getString(t.getName()); + Value pkValue = getString(constraint.getName()); + IndexColumn[] columns = ((ConstraintUnique) constraint).getColumns(); + for (int i = 0, l = columns.length; i < l;) { + result.addRow( + // TABLE_CAT + catalogValue, + // TABLE_SCHEM + schemaValue, + // TABLE_NAME + tableValue, + // COLUMN_NAME + getString(columns[i].column.getName()), + // KEY_SEQ + ValueSmallint.get((short) ++i), + // PK_NAME + pkValue); + } + } + } + // COLUMN_NAME + result.sortRows(new SortOrder(session, new int[] { 3 })); + return result; + } + + @Override + public ResultInterface getImportedKeys(String catalog, String schema, String table) { + if (table == null) { + throw DbException.getInvalidValueException("table", null); + } + SimpleResult result = initCrossReferenceResult(); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + for (Schema s : getSchemas(schema)) { + Table t = s.findTableOrView(session, table); + if (t == null || t.isHidden()) { + continue; + } + ArrayList constraints = t.getConstraints(); + if (constraints == null) { + continue; + } + for (Constraint constraint : constraints) { + if (constraint.getConstraintType() != Constraint.Type.REFERENTIAL) { + continue; + } + ConstraintReferential fk = (ConstraintReferential) constraint; + Table fkTable = fk.getTable(); + if (fkTable != t) { + continue; + } + Table pkTable = fk.getRefTable(); + addCrossReferenceResult(result, catalogValue, pkTable.getSchema().getName(), pkTable, + fkTable.getSchema().getName(), fkTable, fk); + } + } + // PKTABLE_CAT, PKTABLE_SCHEM, PKTABLE_NAME, KEY_SEQ + result.sortRows(new SortOrder(session, new int[] { 1, 2, 8 })); + return result; + } + + @Override + public ResultInterface getExportedKeys(String catalog, String schema, String table) { + if (table == null) { + throw DbException.getInvalidValueException("table", null); + } + SimpleResult result = initCrossReferenceResult(); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + for (Schema s : getSchemas(schema)) { + Table t = s.findTableOrView(session, table); + if (t == null || t.isHidden()) { + continue; + } + ArrayList constraints = t.getConstraints(); + if (constraints == null) { + continue; + } + for (Constraint constraint : constraints) { + if (constraint.getConstraintType() != Constraint.Type.REFERENTIAL) { + continue; + } + ConstraintReferential fk = (ConstraintReferential) constraint; + Table pkTable = fk.getRefTable(); + if (pkTable != t) { + continue; + } + Table fkTable = fk.getTable(); + addCrossReferenceResult(result, catalogValue, pkTable.getSchema().getName(), pkTable, + fkTable.getSchema().getName(), fkTable, fk); + } + } + // FKTABLE_CAT FKTABLE_SCHEM, FKTABLE_NAME, KEY_SEQ + result.sortRows(new SortOrder(session, new int[] { 5, 6, 8 })); + return result; + } + + @Override + public ResultInterface getCrossReference(String primaryCatalog, String primarySchema, String primaryTable, + String foreignCatalog, String foreignSchema, String foreignTable) { + if (primaryTable == null) { + throw DbException.getInvalidValueException("primaryTable", null); + } + if (foreignTable == null) { + throw DbException.getInvalidValueException("foreignTable", null); + } + SimpleResult result = initCrossReferenceResult(); + if (!checkCatalogName(primaryCatalog) || !checkCatalogName(foreignCatalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + for (Schema s : getSchemas(foreignSchema)) { + Table t = s.findTableOrView(session, foreignTable); + if (t == null || t.isHidden()) { + continue; + } + ArrayList constraints = t.getConstraints(); + if (constraints == null) { + continue; + } + for (Constraint constraint : constraints) { + if (constraint.getConstraintType() != Constraint.Type.REFERENTIAL) { + continue; + } + ConstraintReferential fk = (ConstraintReferential) constraint; + Table fkTable = fk.getTable(); + if (fkTable != t) { + continue; + } + Table pkTable = fk.getRefTable(); + if (!db.equalsIdentifiers(pkTable.getName(), primaryTable)) { + continue; + } + Schema pkSchema = pkTable.getSchema(); + if (!checkSchema(primarySchema, pkSchema)) { + continue; + } + addCrossReferenceResult(result, catalogValue, pkSchema.getName(), pkTable, + fkTable.getSchema().getName(), fkTable, fk); + } + } + // FKTABLE_CAT FKTABLE_SCHEM, FKTABLE_NAME, KEY_SEQ + result.sortRows(new SortOrder(session, new int[] { 5, 6, 8 })); + return result; + } + + private SimpleResult initCrossReferenceResult() { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("PKTABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("PKTABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("PKTABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("PKCOLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("FKTABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("FKTABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("FKTABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("FKCOLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("KEY_SEQ", TypeInfo.TYPE_SMALLINT); + result.addColumn("UPDATE_RULE", TypeInfo.TYPE_SMALLINT); + result.addColumn("DELETE_RULE", TypeInfo.TYPE_SMALLINT); + result.addColumn("FK_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("PK_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("DEFERRABILITY", TypeInfo.TYPE_SMALLINT); + return result; + } + + private void addCrossReferenceResult(SimpleResult result, Value catalog, String pkSchema, Table pkTable, + String fkSchema, Table fkTable, ConstraintReferential fk) { + Value pkSchemaValue = getString(pkSchema); + Value pkTableValue = getString(pkTable.getName()); + Value fkSchemaValue = getString(fkSchema); + Value fkTableValue = getString(fkTable.getName()); + IndexColumn[] pkCols = fk.getRefColumns(); + IndexColumn[] fkCols = fk.getColumns(); + Value update = getRefAction(fk.getUpdateAction()); + Value delete = getRefAction(fk.getDeleteAction()); + Value fkNameValue = getString(fk.getName()); + Value pkNameValue = getString(fk.getReferencedConstraint().getName()); + for (int j = 0, len = fkCols.length; j < len; j++) { + result.addRow( + // PKTABLE_CAT + catalog, + // PKTABLE_SCHEM + pkSchemaValue, + // PKTABLE_NAME + pkTableValue, + // PKCOLUMN_NAME + getString(pkCols[j].column.getName()), + // FKTABLE_CAT + catalog, + // FKTABLE_SCHEM + fkSchemaValue, + // FKTABLE_NAME + fkTableValue, + // FKCOLUMN_NAME + getString(fkCols[j].column.getName()), + // KEY_SEQ + ValueSmallint.get((short) (j + 1)), + // UPDATE_RULE + update, + // DELETE_RULE + delete, + // FK_NAME + fkNameValue, + // PK_NAME + pkNameValue, + // DEFERRABILITY + IMPORTED_KEY_NOT_DEFERRABLE); + } + } + + private static ValueSmallint getRefAction(ConstraintActionType action) { + switch (action) { + case CASCADE: + return IMPORTED_KEY_CASCADE; + case RESTRICT: + return IMPORTED_KEY_RESTRICT; + case SET_DEFAULT: + return IMPORTED_KEY_DEFAULT; + case SET_NULL: + return IMPORTED_KEY_SET_NULL; + default: + throw DbException.getInternalError("action=" + action); + } + } + + @Override + public ResultInterface getTypeInfo() { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("PRECISION", TypeInfo.TYPE_INTEGER); + result.addColumn("LITERAL_PREFIX", TypeInfo.TYPE_VARCHAR); + result.addColumn("LITERAL_SUFFIX", TypeInfo.TYPE_VARCHAR); + result.addColumn("CREATE_PARAMS", TypeInfo.TYPE_VARCHAR); + result.addColumn("NULLABLE", TypeInfo.TYPE_SMALLINT); + result.addColumn("CASE_SENSITIVE", TypeInfo.TYPE_BOOLEAN); + result.addColumn("SEARCHABLE", TypeInfo.TYPE_SMALLINT); + result.addColumn("UNSIGNED_ATTRIBUTE", TypeInfo.TYPE_BOOLEAN); + result.addColumn("FIXED_PREC_SCALE", TypeInfo.TYPE_BOOLEAN); + result.addColumn("AUTO_INCREMENT", TypeInfo.TYPE_BOOLEAN); + result.addColumn("LOCAL_TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("MINIMUM_SCALE", TypeInfo.TYPE_SMALLINT); + result.addColumn("MAXIMUM_SCALE", TypeInfo.TYPE_SMALLINT); + result.addColumn("SQL_DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("SQL_DATETIME_SUB", TypeInfo.TYPE_INTEGER); + result.addColumn("NUM_PREC_RADIX", TypeInfo.TYPE_INTEGER); + for (int i = 1, l = Value.TYPE_COUNT; i < l; i++) { + DataType t = DataType.getDataType(i); + Value name = getString(Value.getTypeName(t.type)); + result.addRow( + // TYPE_NAME + name, + // DATA_TYPE + ValueInteger.get(t.sqlType), + // PRECISION + ValueInteger.get(MathUtils.convertLongToInt(t.maxPrecision)), + // LITERAL_PREFIX + getString(t.prefix), + // LITERAL_SUFFIX + getString(t.suffix), + // CREATE_PARAMS + getString(t.params), + // NULLABLE + TYPE_NULLABLE, + // CASE_SENSITIVE + ValueBoolean.get(t.caseSensitive), + // SEARCHABLE + TYPE_SEARCHABLE, + // UNSIGNED_ATTRIBUTE + ValueBoolean.FALSE, + // FIXED_PREC_SCALE + ValueBoolean.get(t.type == Value.NUMERIC), + // AUTO_INCREMENT + ValueBoolean.FALSE, + // LOCAL_TYPE_NAME + name, + // MINIMUM_SCALE + ValueSmallint.get(MathUtils.convertIntToShort(t.minScale)), + // MAXIMUM_SCALE + ValueSmallint.get(MathUtils.convertIntToShort(t.maxScale)), + // SQL_DATA_TYPE (unused) + ValueNull.INSTANCE, + // SQL_DATETIME_SUB (unused) + ValueNull.INSTANCE, + // NUM_PREC_RADIX + getRadix(t.type, false)); + } + // DATA_TYPE, better types first + result.sortRows(new SortOrder(session, new int[] { 1 })); + return result; + } + + private static Value getRadix(int valueType, boolean small) { + if (DataType.isNumericType(valueType)) { + int radix = valueType == Value.NUMERIC || valueType == Value.DECFLOAT ? 10 : 2; + return small ? ValueSmallint.get((short) radix) : ValueInteger.get(radix); + } + return ValueNull.INSTANCE; + } + + @Override + public ResultInterface getIndexInfo(String catalog, String schema, String table, boolean unique, + boolean approximate) { + if (table == null) { + throw DbException.getInvalidValueException("table", null); + } + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("NON_UNIQUE", TypeInfo.TYPE_BOOLEAN); + result.addColumn("INDEX_QUALIFIER", TypeInfo.TYPE_VARCHAR); + result.addColumn("INDEX_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE", TypeInfo.TYPE_SMALLINT); + result.addColumn("ORDINAL_POSITION", TypeInfo.TYPE_SMALLINT); + result.addColumn("COLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("ASC_OR_DESC", TypeInfo.TYPE_VARCHAR); + result.addColumn("CARDINALITY", TypeInfo.TYPE_BIGINT); + result.addColumn("PAGES", TypeInfo.TYPE_BIGINT); + result.addColumn("FILTER_CONDITION", TypeInfo.TYPE_VARCHAR); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + for (Schema s : getSchemas(schema)) { + Table t = s.findTableOrView(session, table); + if (t == null || t.isHidden()) { + continue; + } + getIndexInfo(catalogValue, getString(s.getName()), t, unique, approximate, result, db); + } + // NON_UNIQUE, TYPE, INDEX_NAME, ORDINAL_POSITION + result.sortRows(new SortOrder(session, new int[] { 3, 6, 5, 7 })); + return result; + } + + private void getIndexInfo(Value catalogValue, Value schemaValue, Table table, boolean unique, boolean approximate, + SimpleResult result, Database db) { + ArrayList indexes = table.getIndexes(); + if (indexes != null) { + for (Index index : indexes) { + if (index.getCreateSQL() == null) { + continue; + } + int uniqueColumnCount = index.getUniqueColumnCount(); + if (unique && uniqueColumnCount == 0) { + continue; + } + Value tableValue = getString(table.getName()); + Value indexValue = getString(index.getName()); + IndexColumn[] cols = index.getIndexColumns(); + ValueSmallint type = index.getIndexType().isHash() ? TABLE_INDEX_HASHED : TABLE_INDEX_OTHER; + for (int i = 0, l = cols.length; i < l; i++) { + IndexColumn c = cols[i]; + boolean nonUnique = i >= uniqueColumnCount; + if (unique && nonUnique) { + break; + } + result.addRow( + // TABLE_CAT + catalogValue, + // TABLE_SCHEM + schemaValue, + // TABLE_NAME + tableValue, + // NON_UNIQUE + ValueBoolean.get(nonUnique), + // INDEX_QUALIFIER + catalogValue, + // INDEX_NAME + indexValue, + // TYPE + type, + // ORDINAL_POSITION + ValueSmallint.get((short) (i + 1)), + // COLUMN_NAME + getString(c.column.getName()), + // ASC_OR_DESC + getString((c.sortType & SortOrder.DESCENDING) != 0 ? "D" : "A"), + // CARDINALITY + ValueBigint.get(approximate // + ? index.getRowCountApproximation(session) + : index.getRowCount(session)), + // PAGES + ValueBigint.get(index.getDiskSpaceUsed() / db.getPageSize()), + // FILTER_CONDITION + ValueNull.INSTANCE); + } + } + } + } + + @Override + public ResultInterface getSchemas(String catalog, String schemaPattern) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_CATALOG", TypeInfo.TYPE_VARCHAR); + if (!checkCatalogName(catalog)) { + return result; + } + CompareLike schemaLike = getLike(schemaPattern); + Collection allSchemas = session.getDatabase().getAllSchemas(); + Value catalogValue = getString(session.getDatabase().getShortName()); + if (schemaLike == null) { + for (Schema s : allSchemas) { + result.addRow(getString(s.getName()), catalogValue); + } + } else { + for (Schema s : allSchemas) { + String name = s.getName(); + if (schemaLike.test(name)) { + result.addRow(getString(s.getName()), catalogValue); + } + } + } + // TABLE_CATALOG, TABLE_SCHEM + result.sortRows(new SortOrder(session, new int[] { 0 })); + return result; + } + + @Override + public ResultInterface getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) { + SimpleResult result = getPseudoColumnsResult(); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + CompareLike columnLike = getLike(columnNamePattern); + for (Schema schema : getSchemasForPattern(schemaPattern)) { + Value schemaValue = getString(schema.getName()); + for (SchemaObject object : getTablesForPattern(schema, tableNamePattern)) { + Value tableName = getString(object.getName()); + if (object instanceof Table) { + Table t = (Table) object; + if (!t.isHidden()) { + getPseudoColumnsAdd(result, catalogValue, schemaValue, tableName, t, columnLike); + } + } else { + TableSynonym s = (TableSynonym) object; + Table t = s.getSynonymFor(); + getPseudoColumnsAdd(result, catalogValue, schemaValue, tableName, t, columnLike); + } + } + } + // TABLE_CAT, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME + result.sortRows(new SortOrder(session, new int[] { 1, 2, 3 })); + return result; + } + + private void getPseudoColumnsAdd(SimpleResult result, Value catalogValue, Value schemaValue, Value tableName, + Table t, CompareLike columnLike) { + Column rowId = t.getRowIdColumn(); + if (rowId != null) { + getPseudoColumnsAdd(result, catalogValue, schemaValue, tableName, columnLike, rowId); + } + for (Column c : t.getColumns()) { + if (!c.getVisible()) { + getPseudoColumnsAdd(result, catalogValue, schemaValue, tableName, columnLike, c); + } + } + } + + private void getPseudoColumnsAdd(SimpleResult result, Value catalogValue, Value schemaValue, Value tableName, + CompareLike columnLike, Column c) { + String name = c.getName(); + if (columnLike != null && !columnLike.test(name)) { + return; + } + TypeInfo type = c.getType(); + ValueInteger precision = ValueInteger.get(MathUtils.convertLongToInt(type.getPrecision())); + result.addRow( + // TABLE_CAT + catalogValue, + // TABLE_SCHEM + schemaValue, + // TABLE_NAME + tableName, + // COLUMN_NAME + getString(name), + // DATA_TYPE + ValueInteger.get(DataType.convertTypeToSQLType(type)), + // COLUMN_SIZE + precision, + // DECIMAL_DIGITS + ValueInteger.get(type.getScale()), + // NUM_PREC_RADIX + getRadix(type.getValueType(), false), + // COLUMN_USAGE + NO_USAGE_RESTRICTIONS, + // REMARKS + getString(c.getComment()), + // CHAR_OCTET_LENGTH + precision, + // IS_NULLABLE + c.isNullable() ? YES : NO); + } + + @Override + void checkClosed() { + if (session.isClosed()) { + throw DbException.get(ErrorCode.DATABASE_CALLED_AT_SHUTDOWN); + } + } + + Value getString(String string) { + return string != null ? ValueVarchar.get(string, session) : ValueNull.INSTANCE; + } + + private boolean checkCatalogName(String catalog) { + if (catalog != null && !catalog.isEmpty()) { + Database db = session.getDatabase(); + return db.equalsIdentifiers(catalog, db.getShortName()); + } + return true; + } + + private Collection getSchemas(String schema) { + Database db = session.getDatabase(); + if (schema == null) { + return db.getAllSchemas(); + } else if (schema.isEmpty()) { + return Collections.singleton(db.getMainSchema()); + } else { + Schema s = db.findSchema(schema); + if (s != null) { + return Collections.singleton(s); + } + return Collections.emptySet(); + } + } + + private Collection getSchemasForPattern(String schemaPattern) { + Database db = session.getDatabase(); + if (schemaPattern == null) { + return db.getAllSchemas(); + } else if (schemaPattern.isEmpty()) { + return Collections.singleton(db.getMainSchema()); + } else { + ArrayList list = Utils.newSmallArrayList(); + CompareLike like = getLike(schemaPattern); + for (Schema s : db.getAllSchemas()) { + if (like.test(s.getName())) { + list.add(s); + } + } + return list; + } + } + + private Collection getTablesForPattern(Schema schema, String tablePattern) { + Collection
          TO_CHAR(datetime) function
          InputOutputClosest {@link SimpleDateFormat} Equivalent
          - / , . ; : "text"Reproduced verbatim.'text'
          tables = schema.getAllTablesAndViews(session); + Collection synonyms = schema.getAllSynonyms(); + if (tablePattern == null) { + if (tables.isEmpty()) { + return synonyms; + } else if (synonyms.isEmpty()) { + return tables; + } + ArrayList list = new ArrayList<>(tables.size() + synonyms.size()); + list.addAll(tables); + list.addAll(synonyms); + return list; + } else if (tables.isEmpty() && synonyms.isEmpty()) { + return Collections.emptySet(); + } else { + ArrayList list = Utils.newSmallArrayList(); + CompareLike like = getLike(tablePattern); + for (Table t : tables) { + if (like.test(t.getName())) { + list.add(t); + } + } + for (TableSynonym t : synonyms) { + if (like.test(t.getName())) { + list.add(t); + } + } + return list; + } + } + + private boolean checkSchema(String schemaName, Schema schema) { + if (schemaName == null) { + return true; + } else if (schemaName.isEmpty()) { + return schema == session.getDatabase().getMainSchema(); + } else { + return session.getDatabase().equalsIdentifiers(schemaName, schema.getName()); + } + } + + private CompareLike getLike(String pattern) { + if (pattern == null) { + return null; + } + CompareLike like = new CompareLike(session.getDatabase().getCompareMode(), "\\", null, false, false, null, // + null, CompareLike.LikeType.LIKE); + like.initPattern(pattern, '\\'); + return like; + } + +} diff --git a/h2/src/main/org/h2/jdbc/meta/DatabaseMetaLocalBase.java b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaLocalBase.java new file mode 100644 index 0000000000..70a96e669e --- /dev/null +++ b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaLocalBase.java @@ -0,0 +1,173 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc.meta; + +import org.h2.engine.Constants; +import org.h2.result.ResultInterface; +import org.h2.result.SimpleResult; +import org.h2.value.TypeInfo; + +/** + * Base implementation of database meta information. + */ +abstract class DatabaseMetaLocalBase extends DatabaseMeta { + + @Override + public final String getDatabaseProductVersion() { + return Constants.FULL_VERSION; + } + + @Override + public final ResultInterface getVersionColumns(String catalog, String schema, String table) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("SCOPE", TypeInfo.TYPE_SMALLINT); + result.addColumn("COLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_SIZE", TypeInfo.TYPE_INTEGER); + result.addColumn("BUFFER_LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("DECIMAL_DIGITS", TypeInfo.TYPE_SMALLINT); + result.addColumn("PSEUDO_COLUMN", TypeInfo.TYPE_SMALLINT); + return result; + } + + @Override + public final ResultInterface getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TYPE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("CLASS_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("REMARKS", TypeInfo.TYPE_VARCHAR); + result.addColumn("BASE_TYPE", TypeInfo.TYPE_SMALLINT); + return result; + } + + @Override + public final ResultInterface getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TYPE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("SUPERTYPE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("SUPERTYPE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("SUPERTYPE_NAME", TypeInfo.TYPE_VARCHAR); + return result; + } + + @Override + public final ResultInterface getSuperTables(String catalog, String schemaPattern, String tableNamePattern) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("SUPERTABLE_NAME", TypeInfo.TYPE_VARCHAR); + return result; + } + + @Override + public final ResultInterface getAttributes(String catalog, String schemaPattern, String typeNamePattern, + String attributeNamePattern) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TYPE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("ATTR_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("ATTR_TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("ATTR_SIZE", TypeInfo.TYPE_INTEGER); + result.addColumn("DECIMAL_DIGITS", TypeInfo.TYPE_INTEGER); + result.addColumn("NUM_PREC_RADIX", TypeInfo.TYPE_INTEGER); + result.addColumn("NULLABLE", TypeInfo.TYPE_INTEGER); + result.addColumn("REMARKS", TypeInfo.TYPE_VARCHAR); + result.addColumn("ATTR_DEF", TypeInfo.TYPE_VARCHAR); + result.addColumn("SQL_DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("SQL_DATETIME_SUB", TypeInfo.TYPE_INTEGER); + result.addColumn("CHAR_OCTET_LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("ORDINAL_POSITION", TypeInfo.TYPE_INTEGER); + result.addColumn("IS_NULLABLE", TypeInfo.TYPE_VARCHAR); + result.addColumn("SCOPE_CATALOG", TypeInfo.TYPE_VARCHAR); + result.addColumn("SCOPE_SCHEMA", TypeInfo.TYPE_VARCHAR); + result.addColumn("SCOPE_TABLE", TypeInfo.TYPE_VARCHAR); + result.addColumn("SOURCE_DATA_TYPE", TypeInfo.TYPE_SMALLINT); + return result; + } + + @Override + public final int getDatabaseMajorVersion() { + return Constants.VERSION_MAJOR; + } + + @Override + public final int getDatabaseMinorVersion() { + return Constants.VERSION_MINOR; + } + + @Override + public final ResultInterface getFunctions(String catalog, String schemaPattern, String functionNamePattern) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("FUNCTION_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("FUNCTION_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("FUNCTION_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("REMARKS", TypeInfo.TYPE_VARCHAR); + result.addColumn("FUNCTION_TYPE", TypeInfo.TYPE_SMALLINT); + result.addColumn("SPECIFIC_NAME", TypeInfo.TYPE_VARCHAR); + return result; + } + + @Override + public final ResultInterface getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, + String columnNamePattern) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("FUNCTION_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("FUNCTION_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("FUNCTION_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_TYPE", TypeInfo.TYPE_SMALLINT); + result.addColumn("DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("PRECISION", TypeInfo.TYPE_INTEGER); + result.addColumn("LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("SCALE", TypeInfo.TYPE_SMALLINT); + result.addColumn("RADIX", TypeInfo.TYPE_SMALLINT); + result.addColumn("NULLABLE", TypeInfo.TYPE_SMALLINT); + result.addColumn("REMARKS", TypeInfo.TYPE_VARCHAR); + result.addColumn("CHAR_OCTET_LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("ORDINAL_POSITION", TypeInfo.TYPE_INTEGER); + result.addColumn("IS_NULLABLE", TypeInfo.TYPE_VARCHAR); + result.addColumn("SPECIFIC_NAME", TypeInfo.TYPE_VARCHAR); + return result; + } + + final SimpleResult getPseudoColumnsResult() { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("COLUMN_SIZE", TypeInfo.TYPE_INTEGER); + result.addColumn("DECIMAL_DIGITS", TypeInfo.TYPE_INTEGER); + result.addColumn("NUM_PREC_RADIX", TypeInfo.TYPE_INTEGER); + result.addColumn("COLUMN_USAGE", TypeInfo.TYPE_VARCHAR); + result.addColumn("REMARKS", TypeInfo.TYPE_VARCHAR); + result.addColumn("CHAR_OCTET_LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("IS_NULLABLE", TypeInfo.TYPE_VARCHAR); + return result; + } + + abstract void checkClosed(); + +} diff --git a/h2/src/main/org/h2/jdbc/meta/DatabaseMetaRemote.java b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaRemote.java new file mode 100644 index 0000000000..8c099838ae --- /dev/null +++ b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaRemote.java @@ -0,0 +1,383 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc.meta; + +import java.io.IOException; +import java.util.ArrayList; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionRemote; +import org.h2.message.DbException; +import org.h2.mode.DefaultNullOrdering; +import org.h2.result.ResultInterface; +import org.h2.result.ResultRemote; +import org.h2.value.Transfer; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * Remote implementation of database meta information. + */ +public class DatabaseMetaRemote extends DatabaseMeta { + + static final int DEFAULT_NULL_ORDERING = 0; + + static final int GET_DATABASE_PRODUCT_VERSION = 1; + + static final int GET_SQL_KEYWORDS = 2; + + static final int GET_NUMERIC_FUNCTIONS = 3; + + static final int GET_STRING_FUNCTIONS = 4; + + static final int GET_SYSTEM_FUNCTIONS = 5; + + static final int GET_TIME_DATE_FUNCTIONS = 6; + + static final int GET_SEARCH_STRING_ESCAPE = 7; + + static final int GET_PROCEDURES_3 = 8; + + static final int GET_PROCEDURE_COLUMNS_4 = 9; + + static final int GET_TABLES_4 = 10; + + static final int GET_SCHEMAS = 11; + + static final int GET_CATALOGS = 12; + + static final int GET_TABLE_TYPES = 13; + + static final int GET_COLUMNS_4 = 14; + + static final int GET_COLUMN_PRIVILEGES_4 = 15; + + static final int GET_TABLE_PRIVILEGES_3 = 16; + + static final int GET_BEST_ROW_IDENTIFIER_5 = 17; + + static final int GET_VERSION_COLUMNS_3 = 18; + + static final int GET_PRIMARY_KEYS_3 = 19; + + static final int GET_IMPORTED_KEYS_3 = 20; + + static final int GET_EXPORTED_KEYS_3 = 21; + + static final int GET_CROSS_REFERENCE_6 = 22; + + static final int GET_TYPE_INFO = 23; + + static final int GET_INDEX_INFO_5 = 24; + + static final int GET_UDTS_4 = 25; + + static final int GET_SUPER_TYPES_3 = 26; + + static final int GET_SUPER_TABLES_3 = 27; + + static final int GET_ATTRIBUTES_4 = 28; + + static final int GET_DATABASE_MAJOR_VERSION = 29; + + static final int GET_DATABASE_MINOR_VERSION = 30; + + static final int GET_SCHEMAS_2 = 31; + + static final int GET_FUNCTIONS_3 = 32; + + static final int GET_FUNCTION_COLUMNS_4 = 33; + + static final int GET_PSEUDO_COLUMNS_4 = 34; + + private final SessionRemote session; + + private final ArrayList transferList; + + public DatabaseMetaRemote(SessionRemote session, ArrayList transferList) { + this.session = session; + this.transferList = transferList; + } + + @Override + public DefaultNullOrdering defaultNullOrdering() { + ResultInterface result = executeQuery(DEFAULT_NULL_ORDERING); + result.next(); + return DefaultNullOrdering.valueOf(result.currentRow()[0].getInt()); + } + + @Override + public String getDatabaseProductVersion() { + ResultInterface result = executeQuery(GET_DATABASE_PRODUCT_VERSION); + result.next(); + return result.currentRow()[0].getString(); + } + + @Override + public String getSQLKeywords() { + ResultInterface result = executeQuery(GET_SQL_KEYWORDS); + result.next(); + return result.currentRow()[0].getString(); + } + + @Override + public String getNumericFunctions() { + ResultInterface result = executeQuery(GET_NUMERIC_FUNCTIONS); + result.next(); + return result.currentRow()[0].getString(); + } + + @Override + public String getStringFunctions() { + ResultInterface result = executeQuery(GET_STRING_FUNCTIONS); + result.next(); + return result.currentRow()[0].getString(); + } + + @Override + public String getSystemFunctions() { + ResultInterface result = executeQuery(GET_SYSTEM_FUNCTIONS); + result.next(); + return result.currentRow()[0].getString(); + } + + @Override + public String getTimeDateFunctions() { + ResultInterface result = executeQuery(GET_TIME_DATE_FUNCTIONS); + result.next(); + return result.currentRow()[0].getString(); + } + + @Override + public String getSearchStringEscape() { + ResultInterface result = executeQuery(GET_SEARCH_STRING_ESCAPE); + result.next(); + return result.currentRow()[0].getString(); + } + + @Override + public ResultInterface getProcedures(String catalog, String schemaPattern, String procedureNamePattern) { + return executeQuery(GET_PROCEDURES_3, getString(catalog), getString(schemaPattern), + getString(procedureNamePattern)); + } + + @Override + public ResultInterface getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, + String columnNamePattern) { + return executeQuery(GET_PROCEDURE_COLUMNS_4, getString(catalog), getString(schemaPattern), + getString(procedureNamePattern), getString(columnNamePattern)); + } + + @Override + public ResultInterface getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) { + return executeQuery(GET_TABLES_4, getString(catalog), getString(schemaPattern), getString(tableNamePattern), + getStringArray(types)); + } + + @Override + public ResultInterface getSchemas() { + return executeQuery(GET_SCHEMAS); + } + + @Override + public ResultInterface getCatalogs() { + return executeQuery(GET_CATALOGS); + } + + @Override + public ResultInterface getTableTypes() { + return executeQuery(GET_TABLE_TYPES); + } + + @Override + public ResultInterface getColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) { + return executeQuery(GET_COLUMNS_4, getString(catalog), getString(schemaPattern), getString(tableNamePattern), + getString(columnNamePattern)); + } + + @Override + public ResultInterface getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) { + return executeQuery(GET_COLUMN_PRIVILEGES_4, getString(catalog), getString(schema), getString(table), + getString(columnNamePattern)); + } + + @Override + public ResultInterface getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) { + return executeQuery(GET_TABLE_PRIVILEGES_3, getString(catalog), getString(schemaPattern), // + getString(tableNamePattern)); + } + + @Override + public ResultInterface getBestRowIdentifier(String catalog, String schema, String table, int scope, + boolean nullable) { + return executeQuery(GET_BEST_ROW_IDENTIFIER_5, getString(catalog), getString(schema), getString(table), + ValueInteger.get(scope), ValueBoolean.get(nullable)); + } + + @Override + public ResultInterface getVersionColumns(String catalog, String schema, String table) { + return executeQuery(GET_VERSION_COLUMNS_3, getString(catalog), getString(schema), getString(table)); + } + + @Override + public ResultInterface getPrimaryKeys(String catalog, String schema, String table) { + return executeQuery(GET_PRIMARY_KEYS_3, getString(catalog), getString(schema), getString(table)); + } + + @Override + public ResultInterface getImportedKeys(String catalog, String schema, String table) { + return executeQuery(GET_IMPORTED_KEYS_3, getString(catalog), getString(schema), getString(table)); + } + + @Override + public ResultInterface getExportedKeys(String catalog, String schema, String table) { + return executeQuery(GET_EXPORTED_KEYS_3, getString(catalog), getString(schema), getString(table)); + } + + @Override + public ResultInterface getCrossReference(String primaryCatalog, String primarySchema, String primaryTable, + String foreignCatalog, String foreignSchema, String foreignTable) { + return executeQuery(GET_CROSS_REFERENCE_6, getString(primaryCatalog), getString(primarySchema), + getString(primaryTable), getString(foreignCatalog), getString(foreignSchema), getString(foreignTable)); + } + + @Override + public ResultInterface getTypeInfo() { + return executeQuery(GET_TYPE_INFO); + } + + @Override + public ResultInterface getIndexInfo(String catalog, String schema, String table, boolean unique, + boolean approximate) { + return executeQuery(GET_INDEX_INFO_5, getString(catalog), getString(schema), // + getString(table), ValueBoolean.get(unique), ValueBoolean.get(approximate)); + } + + @Override + public ResultInterface getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) { + return executeQuery(GET_UDTS_4, getString(catalog), getString(schemaPattern), getString(typeNamePattern), + getIntArray(types)); + } + + @Override + public ResultInterface getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) { + return executeQuery(GET_SUPER_TYPES_3, getString(catalog), getString(schemaPattern), + getString(typeNamePattern)); + } + + @Override + public ResultInterface getSuperTables(String catalog, String schemaPattern, String tableNamePattern) { + return executeQuery(GET_SUPER_TABLES_3, getString(catalog), getString(schemaPattern), + getString(tableNamePattern)); + } + + @Override + public ResultInterface getAttributes(String catalog, String schemaPattern, String typeNamePattern, + String attributeNamePattern) { + return executeQuery(GET_ATTRIBUTES_4, getString(catalog), getString(schemaPattern), getString(typeNamePattern), + getString(attributeNamePattern)); + } + + @Override + public int getDatabaseMajorVersion() { + ResultInterface result = executeQuery(GET_DATABASE_MAJOR_VERSION); + result.next(); + return result.currentRow()[0].getInt(); + } + + @Override + public int getDatabaseMinorVersion() { + ResultInterface result = executeQuery(GET_DATABASE_MINOR_VERSION); + result.next(); + return result.currentRow()[0].getInt(); + } + + @Override + public ResultInterface getSchemas(String catalog, String schemaPattern) { + return executeQuery(GET_SCHEMAS_2, getString(catalog), getString(schemaPattern)); + } + + @Override + public ResultInterface getFunctions(String catalog, String schemaPattern, String functionNamePattern) { + return executeQuery(GET_FUNCTIONS_3, getString(catalog), getString(schemaPattern), + getString(functionNamePattern)); + } + + @Override + public ResultInterface getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, + String columnNamePattern) { + return executeQuery(GET_FUNCTION_COLUMNS_4, getString(catalog), getString(schemaPattern), + getString(functionNamePattern), getString(columnNamePattern)); + } + + @Override + public ResultInterface getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) { + return executeQuery(GET_PSEUDO_COLUMNS_4, getString(catalog), getString(schemaPattern), + getString(tableNamePattern), getString(columnNamePattern)); + } + + private ResultInterface executeQuery(int code, Value... args) { + if (session.isClosed()) { + throw DbException.get(ErrorCode.DATABASE_CALLED_AT_SHUTDOWN); + } + synchronized (session) { + int objectId = session.getNextId(); + for (int i = 0, count = 0; i < transferList.size(); i++) { + Transfer transfer = transferList.get(i); + try { + session.traceOperation("GET_META", objectId); + int len = args.length; + transfer.writeInt(SessionRemote.GET_JDBC_META).writeInt(code).writeInt(len); + for (int j = 0; j < len; j++) { + transfer.writeValue(args[j]); + } + session.done(transfer); + int columnCount = transfer.readInt(); + return new ResultRemote(session, transfer, objectId, columnCount, Integer.MAX_VALUE); + } catch (IOException e) { + session.removeServer(e, i--, ++count); + } + } + return null; + } + } + + private Value getIntArray(int[] array) { + if (array == null) { + return ValueNull.INSTANCE; + } + int cardinality = array.length; + Value[] values = new Value[cardinality]; + for (int i = 0; i < cardinality; i++) { + values[i] = ValueInteger.get(array[i]); + } + return ValueArray.get(TypeInfo.TYPE_INTEGER, values, session); + } + + private Value getStringArray(String[] array) { + if (array == null) { + return ValueNull.INSTANCE; + } + int cardinality = array.length; + Value[] values = new Value[cardinality]; + for (int i = 0; i < cardinality; i++) { + values[i] = getString(array[i]); + } + return ValueArray.get(TypeInfo.TYPE_VARCHAR, values, session); + } + + private Value getString(String string) { + return string != null ? ValueVarchar.get(string, session) : ValueNull.INSTANCE; + } + +} diff --git a/h2/src/main/org/h2/jdbc/meta/DatabaseMetaServer.java b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaServer.java new file mode 100644 index 0000000000..9559233526 --- /dev/null +++ b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaServer.java @@ -0,0 +1,198 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc.meta; + +import static org.h2.jdbc.meta.DatabaseMetaRemote.DEFAULT_NULL_ORDERING; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_ATTRIBUTES_4; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_BEST_ROW_IDENTIFIER_5; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_CATALOGS; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_COLUMNS_4; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_COLUMN_PRIVILEGES_4; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_CROSS_REFERENCE_6; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_DATABASE_MAJOR_VERSION; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_DATABASE_MINOR_VERSION; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_DATABASE_PRODUCT_VERSION; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_EXPORTED_KEYS_3; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_FUNCTIONS_3; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_FUNCTION_COLUMNS_4; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_IMPORTED_KEYS_3; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_INDEX_INFO_5; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_NUMERIC_FUNCTIONS; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_PRIMARY_KEYS_3; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_PROCEDURES_3; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_PROCEDURE_COLUMNS_4; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_PSEUDO_COLUMNS_4; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_SCHEMAS; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_SCHEMAS_2; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_SEARCH_STRING_ESCAPE; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_SQL_KEYWORDS; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_STRING_FUNCTIONS; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_SUPER_TABLES_3; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_SUPER_TYPES_3; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_SYSTEM_FUNCTIONS; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_TABLES_4; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_TABLE_PRIVILEGES_3; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_TABLE_TYPES; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_TIME_DATE_FUNCTIONS; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_TYPE_INFO; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_UDTS_4; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_VERSION_COLUMNS_3; + +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.result.ResultInterface; +import org.h2.result.SimpleResult; +import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * Server side support of database meta information. + */ +public final class DatabaseMetaServer { + + /** + * Process a database meta data request. + * + * @param session the session + * @param code the operation code + * @param args the arguments + * @return the result + */ + public static ResultInterface process(SessionLocal session, int code, Value[] args) { + DatabaseMeta meta = session.getDatabaseMeta(); + switch (code) { + case DEFAULT_NULL_ORDERING: + return result(meta.defaultNullOrdering().ordinal()); + case GET_DATABASE_PRODUCT_VERSION: + return result(session, meta.getDatabaseProductVersion()); + case GET_SQL_KEYWORDS: + return result(session, meta.getSQLKeywords()); + case GET_NUMERIC_FUNCTIONS: + return result(session, meta.getNumericFunctions()); + case GET_STRING_FUNCTIONS: + return result(session, meta.getStringFunctions()); + case GET_SYSTEM_FUNCTIONS: + return result(session, meta.getSystemFunctions()); + case GET_TIME_DATE_FUNCTIONS: + return result(session, meta.getTimeDateFunctions()); + case GET_SEARCH_STRING_ESCAPE: + return result(session, meta.getSearchStringEscape()); + case GET_PROCEDURES_3: + return meta.getProcedures(args[0].getString(), args[1].getString(), args[2].getString()); + case GET_PROCEDURE_COLUMNS_4: + return meta.getProcedureColumns(args[0].getString(), args[1].getString(), args[2].getString(), + args[3].getString()); + case GET_TABLES_4: + return meta.getTables(args[0].getString(), args[1].getString(), args[2].getString(), + toStringArray(args[3])); + case GET_SCHEMAS: + return meta.getSchemas(); + case GET_CATALOGS: + return meta.getCatalogs(); + case GET_TABLE_TYPES: + return meta.getTableTypes(); + case GET_COLUMNS_4: + return meta.getColumns(args[0].getString(), args[1].getString(), args[2].getString(), args[3].getString()); + case GET_COLUMN_PRIVILEGES_4: + return meta.getColumnPrivileges(args[0].getString(), args[1].getString(), args[2].getString(), + args[3].getString()); + case GET_TABLE_PRIVILEGES_3: + return meta.getTablePrivileges(args[0].getString(), args[1].getString(), args[2].getString()); + case GET_BEST_ROW_IDENTIFIER_5: + return meta.getBestRowIdentifier(args[0].getString(), args[1].getString(), args[2].getString(), + args[3].getInt(), args[4].getBoolean()); + case GET_VERSION_COLUMNS_3: + return meta.getVersionColumns(args[0].getString(), args[1].getString(), args[2].getString()); + case GET_PRIMARY_KEYS_3: + return meta.getPrimaryKeys(args[0].getString(), args[1].getString(), args[2].getString()); + case GET_IMPORTED_KEYS_3: + return meta.getImportedKeys(args[0].getString(), args[1].getString(), args[2].getString()); + case GET_EXPORTED_KEYS_3: + return meta.getExportedKeys(args[0].getString(), args[1].getString(), args[2].getString()); + case GET_CROSS_REFERENCE_6: + return meta.getCrossReference(args[0].getString(), args[1].getString(), args[2].getString(), + args[3].getString(), args[4].getString(), args[5].getString()); + case GET_TYPE_INFO: + return meta.getTypeInfo(); + case GET_INDEX_INFO_5: + return meta.getIndexInfo(args[0].getString(), args[1].getString(), args[2].getString(), + args[3].getBoolean(), args[4].getBoolean()); + case GET_UDTS_4: + return meta.getUDTs(args[0].getString(), args[1].getString(), args[2].getString(), toIntArray(args[3])); + case GET_SUPER_TYPES_3: + return meta.getSuperTypes(args[0].getString(), args[1].getString(), args[2].getString()); + case GET_SUPER_TABLES_3: + return meta.getSuperTables(args[0].getString(), args[1].getString(), args[2].getString()); + case GET_ATTRIBUTES_4: + return meta.getAttributes(args[0].getString(), args[1].getString(), args[2].getString(), + args[3].getString()); + case GET_DATABASE_MAJOR_VERSION: + return result(meta.getDatabaseMajorVersion()); + case GET_DATABASE_MINOR_VERSION: + return result(meta.getDatabaseMinorVersion()); + case GET_SCHEMAS_2: + return meta.getSchemas(args[0].getString(), args[1].getString()); + case GET_FUNCTIONS_3: + return meta.getFunctions(args[0].getString(), args[1].getString(), args[2].getString()); + case GET_FUNCTION_COLUMNS_4: + return meta.getFunctionColumns(args[0].getString(), args[1].getString(), args[2].getString(), + args[3].getString()); + case GET_PSEUDO_COLUMNS_4: + return meta.getPseudoColumns(args[0].getString(), args[1].getString(), args[2].getString(), + args[3].getString()); + default: + throw DbException.getUnsupportedException("META " + code); + } + } + + private static String[] toStringArray(Value value) { + if (value == ValueNull.INSTANCE) { + return null; + } + Value[] list = ((ValueArray) value).getList(); + int l = list.length; + String[] result = new String[l]; + for (int i = 0; i < l; i++) { + result[i] = list[i].getString(); + } + return result; + } + + private static int[] toIntArray(Value value) { + if (value == ValueNull.INSTANCE) { + return null; + } + Value[] list = ((ValueArray) value).getList(); + int l = list.length; + int[] result = new int[l]; + for (int i = 0; i < l; i++) { + result[i] = list[i].getInt(); + } + return result; + } + + private static ResultInterface result(int value) { + return result(ValueInteger.get(value)); + } + + private static ResultInterface result(SessionLocal session, String value) { + return result(ValueVarchar.get(value, session)); + } + + private static ResultInterface result(Value v) { + SimpleResult result = new SimpleResult(); + result.addColumn("RESULT", v.getType()); + result.addRow(v); + return result; + } + + private DatabaseMetaServer() { + } + +} diff --git a/h2/src/main/org/h2/jdbc/meta/package.html b/h2/src/main/org/h2/jdbc/meta/package.html new file mode 100644 index 0000000000..68e717102e --- /dev/null +++ b/h2/src/main/org/h2/jdbc/meta/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

          + +Implementation of the JDBC database metadata API (package java.sql). + +

          \ No newline at end of file diff --git a/h2/src/main/org/h2/jdbc/package.html b/h2/src/main/org/h2/jdbc/package.html index 0410e879df..ffc7f90f3d 100644 --- a/h2/src/main/org/h2/jdbc/package.html +++ b/h2/src/main/org/h2/jdbc/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/jdbcx/JdbcConnectionPool.java b/h2/src/main/org/h2/jdbcx/JdbcConnectionPool.java index c8f6851b09..0ff22cd22f 100644 --- a/h2/src/main/org/h2/jdbcx/JdbcConnectionPool.java +++ b/h2/src/main/org/h2/jdbcx/JdbcConnectionPool.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Christian d'Heureuse, www.source-code.biz * * This class is multi-licensed under LGPL, MPL 2.0, and EPL 1.0. @@ -9,7 +9,7 @@ * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation, either * version 3 of the License, or (at your option) any later version. - * See http://www.gnu.org/licenses/lgpl.html + * See https://www.gnu.org/licenses/lgpl-3.0.html * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied @@ -24,7 +24,6 @@ import java.sql.SQLException; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.logging.Logger; @@ -64,8 +63,8 @@ * (www.source-code.biz) * @author Thomas Mueller */ -public class JdbcConnectionPool implements DataSource, ConnectionEventListener, - JdbcConnectionPoolBackwardsCompat { +public final class JdbcConnectionPool + implements DataSource, ConnectionEventListener, JdbcConnectionPoolBackwardsCompat { private static final int DEFAULT_TIMEOUT = 30; private static final int DEFAULT_MAX_CONNECTIONS = 10; @@ -191,7 +190,7 @@ public void dispose() { */ @Override public Connection getConnection() throws SQLException { - long max = System.nanoTime() + TimeUnit.SECONDS.toNanos(timeout); + long max = System.nanoTime() + timeout * 1_000_000_000L; int spin = 0; do { if (activeConnections.incrementAndGet() <= maxConnections) { @@ -318,23 +317,33 @@ public void setLogWriter(PrintWriter logWriter) { } /** - * [Not supported] Return an object of this class if possible. + * Return an object of this class if possible. * * @param iface the class + * @return this */ @Override + @SuppressWarnings("unchecked") public T unwrap(Class iface) throws SQLException { - throw DbException.getUnsupportedException("unwrap"); + try { + if (isWrapperFor(iface)) { + return (T) this; + } + throw DbException.getInvalidValueException("iface", iface); + } catch (Exception e) { + throw DbException.toSQLException(e); + } } /** - * [Not supported] Checks if unwrap can return an object of this class. + * Checks if unwrap can return an object of this class. * * @param iface the class + * @return whether or not the interface is assignable from this class */ @Override public boolean isWrapperFor(Class iface) throws SQLException { - throw DbException.getUnsupportedException("isWrapperFor"); + return iface != null && iface.isAssignableFrom(getClass()); } /** diff --git a/h2/src/main/org/h2/jdbcx/JdbcConnectionPoolBackwardsCompat.java b/h2/src/main/org/h2/jdbcx/JdbcConnectionPoolBackwardsCompat.java index 9d2fe4ac9c..b901d49301 100644 --- a/h2/src/main/org/h2/jdbcx/JdbcConnectionPoolBackwardsCompat.java +++ b/h2/src/main/org/h2/jdbcx/JdbcConnectionPoolBackwardsCompat.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbcx; diff --git a/h2/src/main/org/h2/jdbcx/JdbcDataSource.java b/h2/src/main/org/h2/jdbcx/JdbcDataSource.java index ed1ac5be34..4c0ab0cfad 100644 --- a/h2/src/main/org/h2/jdbcx/JdbcDataSource.java +++ b/h2/src/main/org/h2/jdbcx/JdbcDataSource.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbcx; @@ -11,7 +11,6 @@ import java.io.Serializable; import java.sql.Connection; import java.sql.SQLException; -import java.util.Properties; import java.util.logging.Logger; import javax.naming.Reference; import javax.naming.Referenceable; @@ -21,7 +20,6 @@ import javax.sql.PooledConnection; import javax.sql.XAConnection; import javax.sql.XADataSource; -import org.h2.Driver; import org.h2.jdbc.JdbcConnection; import org.h2.message.DbException; import org.h2.message.TraceObject; @@ -60,9 +58,8 @@ * In this example the user name and password are serialized as * well; this may be a security problem in some cases. */ -public class JdbcDataSource extends TraceObject implements XADataSource, - DataSource, ConnectionPoolDataSource, Serializable, Referenceable, - JdbcDataSourceBackwardsCompat { +public final class JdbcDataSource extends TraceObject implements XADataSource, DataSource, ConnectionPoolDataSource, + Serializable, Referenceable, JdbcDataSourceBackwardsCompat { private static final long serialVersionUID = 1288136338451857771L; @@ -74,10 +71,6 @@ public class JdbcDataSource extends TraceObject implements XADataSource, private String url = ""; private String description; - static { - org.h2.Driver.load(); - } - /** * The public constructor. */ @@ -91,6 +84,8 @@ public JdbcDataSource() { * Called when de-serializing the object. * * @param in the input stream + * @throws IOException on failure + * @throws ClassNotFoundException on failure */ private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { @@ -157,8 +152,7 @@ public void setLogWriter(PrintWriter out) { @Override public Connection getConnection() throws SQLException { debugCodeCall("getConnection"); - return getJdbcConnection(userName, - StringUtils.cloneCharArray(passwordChars)); + return new JdbcConnection(url, null, userName, StringUtils.cloneCharArray(passwordChars), false); } /** @@ -173,29 +167,9 @@ public Connection getConnection() throws SQLException { public Connection getConnection(String user, String password) throws SQLException { if (isDebugEnabled()) { - debugCode("getConnection("+quote(user)+", \"\");"); - } - return getJdbcConnection(user, convertToCharArray(password)); - } - - private JdbcConnection getJdbcConnection(String user, char[] password) - throws SQLException { - if (isDebugEnabled()) { - debugCode("getJdbcConnection("+quote(user)+", new char[0]);"); - } - Properties info = new Properties(); - info.setProperty("user", user); - info.put("password", password); - Connection conn = Driver.load().connect(url, info); - if (conn == null) { - throw new SQLException("No suitable driver found for " + url, - "08001", 8001); - } else if (!(conn instanceof JdbcConnection)) { - throw new SQLException( - "Connecting with old version is not supported: " + url, - "08001", 8001); + debugCode("getConnection(" + quote(user) + ", \"\")"); } - return (JdbcConnection) conn; + return new JdbcConnection(url, null, user, password, false); } /** @@ -249,7 +223,7 @@ public void setUrl(String url) { */ public void setPassword(String password) { debugCodeCall("setPassword", ""); - this.passwordChars = convertToCharArray(password); + this.passwordChars = password == null ? null : password.toCharArray(); } /** @@ -259,15 +233,11 @@ public void setPassword(String password) { */ public void setPasswordChars(char[] password) { if (isDebugEnabled()) { - debugCode("setPasswordChars(new char[0]);"); + debugCode("setPasswordChars(new char[0])"); } this.passwordChars = password; } - private static char[] convertToCharArray(String s) { - return s == null ? null : s.toCharArray(); - } - private static String convertToString(char[] a) { return a == null ? null : new String(a); } @@ -348,9 +318,8 @@ public Reference getReference() { @Override public XAConnection getXAConnection() throws SQLException { debugCodeCall("getXAConnection"); - int id = getNextId(XA_DATA_SOURCE); - return new JdbcXAConnection(factory, id, getJdbcConnection(userName, - StringUtils.cloneCharArray(passwordChars))); + return new JdbcXAConnection(factory, getNextId(XA_DATA_SOURCE), + new JdbcConnection(url, null, userName, StringUtils.cloneCharArray(passwordChars), false)); } /** @@ -365,11 +334,10 @@ public XAConnection getXAConnection() throws SQLException { public XAConnection getXAConnection(String user, String password) throws SQLException { if (isDebugEnabled()) { - debugCode("getXAConnection("+quote(user)+", \"\");"); + debugCode("getXAConnection(" + quote(user) + ", \"\")"); } - int id = getNextId(XA_DATA_SOURCE); - return new JdbcXAConnection(factory, id, getJdbcConnection(user, - convertToCharArray(password))); + return new JdbcXAConnection(factory, getNextId(XA_DATA_SOURCE), + new JdbcConnection(url, null, user, password, false)); } /** @@ -396,7 +364,7 @@ public PooledConnection getPooledConnection() throws SQLException { public PooledConnection getPooledConnection(String user, String password) throws SQLException { if (isDebugEnabled()) { - debugCode("getPooledConnection("+quote(user)+", \"\");"); + debugCode("getPooledConnection(" + quote(user) + ", \"\")"); } return getXAConnection(user, password); } diff --git a/h2/src/main/org/h2/jdbcx/JdbcDataSourceBackwardsCompat.java b/h2/src/main/org/h2/jdbcx/JdbcDataSourceBackwardsCompat.java index 1b8e44c188..cf00ae6b82 100644 --- a/h2/src/main/org/h2/jdbcx/JdbcDataSourceBackwardsCompat.java +++ b/h2/src/main/org/h2/jdbcx/JdbcDataSourceBackwardsCompat.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbcx; diff --git a/h2/src/main/org/h2/jdbcx/JdbcDataSourceFactory.java b/h2/src/main/org/h2/jdbcx/JdbcDataSourceFactory.java index 17e637d9dc..07673fff43 100644 --- a/h2/src/main/org/h2/jdbcx/JdbcDataSourceFactory.java +++ b/h2/src/main/org/h2/jdbcx/JdbcDataSourceFactory.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbcx; @@ -21,20 +21,23 @@ * This class is used to create new DataSource objects. * An application should not use this class directly. */ -public class JdbcDataSourceFactory implements ObjectFactory { +public final class JdbcDataSourceFactory implements ObjectFactory { + + private static final TraceSystem traceSystem; - private static TraceSystem cachedTraceSystem; private final Trace trace; static { - org.h2.Driver.load(); + traceSystem = new TraceSystem(SysProperties.CLIENT_TRACE_DIRECTORY + "h2datasource" + + Constants.SUFFIX_TRACE_FILE); + traceSystem.setLevelFile(SysProperties.DATASOURCE_TRACE_LEVEL); } /** * The public constructor to create new factory objects. */ public JdbcDataSourceFactory() { - trace = getTraceSystem().getTrace(Trace.JDBCX); + trace = traceSystem.getTrace(Trace.JDBCX); } /** @@ -74,17 +77,10 @@ public synchronized Object getObjectInstance(Object obj, Name name, /** * INTERNAL + * @return TraceSystem */ public static TraceSystem getTraceSystem() { - synchronized (JdbcDataSourceFactory.class) { - if (cachedTraceSystem == null) { - cachedTraceSystem = new TraceSystem( - SysProperties.CLIENT_TRACE_DIRECTORY + "h2datasource" + - Constants.SUFFIX_TRACE_FILE); - cachedTraceSystem.setLevelFile(SysProperties.DATASOURCE_TRACE_LEVEL); - } - return cachedTraceSystem; - } + return traceSystem; } Trace getTrace() { diff --git a/h2/src/main/org/h2/jdbcx/JdbcXAConnection.java b/h2/src/main/org/h2/jdbcx/JdbcXAConnection.java index bb158345ae..fe7cbe5953 100644 --- a/h2/src/main/org/h2/jdbcx/JdbcXAConnection.java +++ b/h2/src/main/org/h2/jdbcx/JdbcXAConnection.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbcx; @@ -31,7 +31,7 @@ * An application developer usually does not use this interface. * It is used by the transaction manager internally. */ -public class JdbcXAConnection extends TraceObject implements XAConnection, +public final class JdbcXAConnection extends TraceObject implements XAConnection, XAResource { private final JdbcDataSourceFactory factory; @@ -45,10 +45,6 @@ public class JdbcXAConnection extends TraceObject implements XAConnection, private Xid currentTransaction; private boolean prepared; - static { - org.h2.Driver.load(); - } - JdbcXAConnection(JdbcDataSourceFactory factory, int id, JdbcConnection physicalConn) { this.factory = factory; @@ -115,7 +111,7 @@ public Connection getConnection() throws SQLException { */ @Override public void addConnectionEventListener(ConnectionEventListener listener) { - debugCode("addConnectionEventListener(listener);"); + debugCode("addConnectionEventListener(listener)"); listeners.add(listener); } @@ -126,7 +122,7 @@ public void addConnectionEventListener(ConnectionEventListener listener) { */ @Override public void removeConnectionEventListener(ConnectionEventListener listener) { - debugCode("removeConnectionEventListener(listener);"); + debugCode("removeConnectionEventListener(listener)"); listeners.remove(listener); } @@ -134,7 +130,7 @@ public void removeConnectionEventListener(ConnectionEventListener listener) { * INTERNAL */ void closedHandle() { - debugCode("closedHandle();"); + debugCodeCall("closedHandle"); ConnectionEvent event = new ConnectionEvent(this); // go backward so that a listener can remove itself // (otherwise we need to clone the list) @@ -176,7 +172,7 @@ public boolean setTransactionTimeout(int seconds) { */ @Override public boolean isSameRM(XAResource xares) { - debugCode("isSameRM(xares);"); + debugCode("isSameRM(xares)"); return xares == this; } @@ -193,11 +189,10 @@ public Xid[] recover(int flag) throws XAException { debugCodeCall("recover", quoteFlags(flag)); checkOpen(); try (Statement stat = physicalConn.createStatement()) { - ResultSet rs = stat.executeQuery("SELECT * FROM " + - "INFORMATION_SCHEMA.IN_DOUBT ORDER BY TRANSACTION"); + ResultSet rs = stat.executeQuery("SELECT * FROM INFORMATION_SCHEMA.IN_DOUBT ORDER BY TRANSACTION_NAME"); ArrayList list = Utils.newSmallArrayList(); while (rs.next()) { - String tid = rs.getString("TRANSACTION"); + String tid = rs.getString("TRANSACTION_NAME"); int id = getNextId(XID); Xid xid = new JdbcXid(factory, id, tid); list.add(xid); @@ -224,7 +219,7 @@ public Xid[] recover(int flag) throws XAException { @Override public int prepare(Xid xid) throws XAException { if (isDebugEnabled()) { - debugCode("prepare("+JdbcXid.toString(xid)+");"); + debugCode("prepare(" + quoteXid(xid) + ')'); } checkOpen(); if (!currentTransaction.equals(xid)) { @@ -232,7 +227,7 @@ public int prepare(Xid xid) throws XAException { } try (Statement stat = physicalConn.createStatement()) { - stat.execute("PREPARE COMMIT " + JdbcXid.toString(xid)); + stat.execute(JdbcXid.toString(new StringBuilder("PREPARE COMMIT \""), xid).append('"').toString()); prepared = true; } catch (SQLException e) { throw convertException(e); @@ -249,7 +244,7 @@ public int prepare(Xid xid) throws XAException { @Override public void forget(Xid xid) { if (isDebugEnabled()) { - debugCode("forget("+JdbcXid.toString(xid)+");"); + debugCode("forget(" + quoteXid(xid) + ')'); } prepared = false; } @@ -262,12 +257,13 @@ public void forget(Xid xid) { @Override public void rollback(Xid xid) throws XAException { if (isDebugEnabled()) { - debugCode("rollback("+JdbcXid.toString(xid)+");"); + debugCode("rollback(" + quoteXid(xid) + ')'); } try { if (prepared) { try (Statement stat = physicalConn.createStatement()) { - stat.execute("ROLLBACK TRANSACTION " + JdbcXid.toString(xid)); + stat.execute(JdbcXid.toString( // + new StringBuilder("ROLLBACK TRANSACTION \""), xid).append('"').toString()); } prepared = false; } else { @@ -289,7 +285,7 @@ public void rollback(Xid xid) throws XAException { @Override public void end(Xid xid, int flags) throws XAException { if (isDebugEnabled()) { - debugCode("end("+JdbcXid.toString(xid)+", "+quoteFlags(flags)+");"); + debugCode("end(" + quoteXid(xid) + ", " + quoteFlags(flags) + ')'); } // TODO transaction end: implement this method if (flags == TMSUSPEND) { @@ -310,7 +306,7 @@ public void end(Xid xid, int flags) throws XAException { @Override public void start(Xid xid, int flags) throws XAException { if (isDebugEnabled()) { - debugCode("start("+JdbcXid.toString(xid)+", "+quoteFlags(flags)+");"); + debugCode("start(" + quoteXid(xid) + ", " + quoteFlags(flags) + ')'); } if (flags == TMRESUME) { return; @@ -340,7 +336,7 @@ public void start(Xid xid, int flags) throws XAException { @Override public void commit(Xid xid, boolean onePhase) throws XAException { if (isDebugEnabled()) { - debugCode("commit("+JdbcXid.toString(xid)+", "+onePhase+");"); + debugCode("commit(" + quoteXid(xid) + ", " + onePhase + ')'); } try { @@ -348,7 +344,8 @@ public void commit(Xid xid, boolean onePhase) throws XAException { physicalConn.commit(); } else { try (Statement stat = physicalConn.createStatement()) { - stat.execute("COMMIT TRANSACTION " + JdbcXid.toString(xid)); + stat.execute( + JdbcXid.toString(new StringBuilder("COMMIT TRANSACTION \""), xid).append('"').toString()); prepared = false; } } @@ -393,6 +390,10 @@ private static XAException convertException(SQLException e) { return xa; } + private static String quoteXid(Xid xid) { + return JdbcXid.toString(new StringBuilder(), xid).toString().replace('-', '$'); + } + private static String quoteFlags(int flags) { StringBuilder buff = new StringBuilder(); if ((flags & XAResource.TMENDRSCAN) != 0) { @@ -425,7 +426,7 @@ private static String quoteFlags(int flags) { if (buff.length() == 0) { buff.append("|XAResource.TMNOFLAGS"); } - return buff.toString().substring(1); + return buff.substring(1); } private void checkOpen() throws XAException { @@ -437,7 +438,7 @@ private void checkOpen() throws XAException { /** * A pooled connection. */ - class PooledJdbcConnection extends JdbcConnection { + final class PooledJdbcConnection extends JdbcConnection { private boolean isClosed; @@ -465,11 +466,11 @@ public synchronized boolean isClosed() throws SQLException { } @Override - protected synchronized void checkClosed(boolean write) { + protected synchronized void checkClosed() { if (isClosed) { throw DbException.get(ErrorCode.OBJECT_CLOSED); } - super.checkClosed(write); + super.checkClosed(); } } diff --git a/h2/src/main/org/h2/jdbcx/JdbcXid.java b/h2/src/main/org/h2/jdbcx/JdbcXid.java index 3a5f7657c9..c31cc0f4ff 100644 --- a/h2/src/main/org/h2/jdbcx/JdbcXid.java +++ b/h2/src/main/org/h2/jdbcx/JdbcXid.java @@ -1,25 +1,26 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbcx; -import java.util.StringTokenizer; +import java.util.Base64; import javax.transaction.xa.Xid; import org.h2.api.ErrorCode; import org.h2.message.DbException; import org.h2.message.TraceObject; -import org.h2.util.StringUtils; /** * An object of this class represents a transaction id. */ -public class JdbcXid extends TraceObject implements Xid { +public final class JdbcXid extends TraceObject implements Xid { private static final String PREFIX = "XID"; + private static final Base64.Encoder ENCODER = Base64.getUrlEncoder().withoutPadding(); + private final int formatId; private final byte[] branchQualifier; private final byte[] globalTransactionId; @@ -27,32 +28,29 @@ public class JdbcXid extends TraceObject implements Xid { JdbcXid(JdbcDataSourceFactory factory, int id, String tid) { setTrace(factory.getTrace(), TraceObject.XID, id); try { - StringTokenizer tokenizer = new StringTokenizer(tid, "_"); - String prefix = tokenizer.nextToken(); - if (!PREFIX.equals(prefix)) { - throw DbException.get(ErrorCode.WRONG_XID_FORMAT_1, tid); + String[] splits = tid.split("\\|"); + if (splits.length == 4 && PREFIX.equals(splits[0])) { + formatId = Integer.parseInt(splits[1]); + Base64.Decoder decoder = Base64.getUrlDecoder(); + branchQualifier = decoder.decode(splits[2]); + globalTransactionId = decoder.decode(splits[3]); + return; } - formatId = Integer.parseInt(tokenizer.nextToken()); - branchQualifier = StringUtils.convertHexToBytes(tokenizer.nextToken()); - globalTransactionId = StringUtils.convertHexToBytes(tokenizer.nextToken()); - } catch (RuntimeException e) { - throw DbException.get(ErrorCode.WRONG_XID_FORMAT_1, tid); + } catch (IllegalArgumentException e) { } + throw DbException.get(ErrorCode.WRONG_XID_FORMAT_1, tid); } /** * INTERNAL + * @param builder to put result into + * @param xid to provide string representation for + * @return provided StringBuilder */ - public static String toString(Xid xid) { - StringBuilder builder = new StringBuilder() - .append(PREFIX) - .append('_') - .append(xid.getFormatId()) - .append('_'); - StringUtils.convertBytesToHex(builder, xid.getBranchQualifier()) - .append('_'); - StringUtils.convertBytesToHex(builder, xid.getGlobalTransactionId()); - return builder.toString(); + static StringBuilder toString(StringBuilder builder, Xid xid) { + return builder.append(PREFIX).append('|').append(xid.getFormatId()) // + .append('|').append(ENCODER.encodeToString(xid.getBranchQualifier())) // + .append('|').append(ENCODER.encodeToString(xid.getGlobalTransactionId())); } /** diff --git a/h2/src/main/org/h2/jdbcx/package.html b/h2/src/main/org/h2/jdbcx/package.html index 0b70b6c8a1..aae3de2eb6 100644 --- a/h2/src/main/org/h2/jdbcx/package.html +++ b/h2/src/main/org/h2/jdbcx/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/jmx/DatabaseInfo.java b/h2/src/main/org/h2/jmx/DatabaseInfo.java index 5670e377fa..9e14dfdde4 100644 --- a/h2/src/main/org/h2/jmx/DatabaseInfo.java +++ b/h2/src/main/org/h2/jmx/DatabaseInfo.java @@ -1,17 +1,15 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jmx; import java.lang.management.ManagementFactory; - -import java.sql.Timestamp; import java.util.HashMap; import java.util.Hashtable; import java.util.Map; -import java.util.TreeMap; +import java.util.Map.Entry; import javax.management.JMException; import javax.management.MBeanServer; import javax.management.ObjectName; @@ -19,9 +17,9 @@ import org.h2.engine.ConnectionInfo; import org.h2.engine.Constants; import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.store.PageStore; +import org.h2.engine.SessionLocal; import org.h2.table.Table; +import org.h2.util.NetworkConnectionInfo; /** * The MBean implementation. @@ -66,6 +64,7 @@ private static ObjectName getObjectName(String name, String path) * * @param connectionInfo connection info * @param database database + * @throws JMException on failure */ public static void registerMBean(ConnectionInfo connectionInfo, Database database) throws JMException { @@ -85,6 +84,7 @@ public static void registerMBean(ConnectionInfo connectionInfo, * Unregisters the MBean for the database if one is registered. * * @param name database name + * @throws JMException on failure */ public static void unregisterMBean(String name) throws Exception { ObjectName mbeanObjectName = MBEANS.remove(name); @@ -109,27 +109,6 @@ public String getMode() { return database.getMode().getName(); } - @Override - public boolean isMultiThreaded() { - return database.isMultiThreaded(); - } - - @Deprecated - @Override - public boolean isMvcc() { - return database.isMVStore(); - } - - @Override - public int getLogMode() { - return database.getLogMode(); - } - - @Override - public void setLogMode(int value) { - database.setLogMode(value); - } - @Override public int getTraceLevel() { return database.getTraceSystem().getLevelFile(); @@ -140,66 +119,37 @@ public void setTraceLevel(int level) { database.getTraceSystem().setLevelFile(level); } - @Override - public long getFileWriteCountTotal() { - if (!database.isPersistent()) { - return 0; - } - PageStore p = database.getPageStore(); - if (p != null) { - return p.getWriteCountTotal(); - } - // TODO remove this method when removing the page store - // (the MVStore doesn't support it) - return 0; - } - @Override public long getFileWriteCount() { - if (!database.isPersistent()) { - return 0; - } - PageStore p = database.getPageStore(); - if (p != null) { - return p.getWriteCount(); + if (database.isPersistent()) { + return database.getStore().getMvStore().getFileStore().getWriteCount(); } - return database.getStore().getMvStore().getFileStore().getReadCount(); + return 0; } @Override public long getFileReadCount() { - if (!database.isPersistent()) { - return 0; - } - PageStore p = database.getPageStore(); - if (p != null) { - return p.getReadCount(); + if (database.isPersistent()) { + return database.getStore().getMvStore().getFileStore().getReadCount(); } - return database.getStore().getMvStore().getFileStore().getReadCount(); + return 0; } @Override public long getFileSize() { - if (!database.isPersistent()) { - return 0; - } - PageStore p = database.getPageStore(); - if (p != null) { - return p.getPageCount() * p.getPageSize() / 1024; + long size = 0; + if (database.isPersistent()) { + size = database.getStore().getMvStore().getFileStore().size(); } - return database.getStore().getMvStore().getFileStore().size(); + return size / 1024; } @Override public int getCacheSizeMax() { - if (!database.isPersistent()) { - return 0; - } - PageStore p = database.getPageStore(); - if (p != null) { - return p.getCache().getMaxMemory(); + if (database.isPersistent()) { + return database.getStore().getMvStore().getCacheSize() * 1024; } - return database.getStore().getMvStore().getCacheSize() * 1024; + return 0; } @Override @@ -211,42 +161,45 @@ public void setCacheSizeMax(int kb) { @Override public int getCacheSize() { - if (!database.isPersistent()) { - return 0; - } - PageStore p = database.getPageStore(); - if (p != null) { - return p.getCache().getMemory(); + if (database.isPersistent()) { + return database.getStore().getMvStore().getCacheSizeUsed() * 1024; } - return database.getStore().getMvStore().getCacheSizeUsed() * 1024; + return 0; } @Override public String getVersion() { - return Constants.getFullVersion(); + return Constants.FULL_VERSION; } @Override public String listSettings() { - StringBuilder buff = new StringBuilder(); - for (Map.Entry e : - new TreeMap<>( - database.getSettings().getSettings()).entrySet()) { - buff.append(e.getKey()).append(" = ").append(e.getValue()).append('\n'); + StringBuilder builder = new StringBuilder(); + for (Entry e : database.getSettings().getSortedSettings()) { + builder.append(e.getKey()).append(" = ").append(e.getValue()).append('\n'); } - return buff.toString(); + return builder.toString(); } @Override public String listSessions() { StringBuilder buff = new StringBuilder(); - for (Session session : database.getSessions(false)) { + for (SessionLocal session : database.getSessions(false)) { buff.append("session id: ").append(session.getId()); buff.append(" user: "). append(session.getUser().getName()). append('\n'); + NetworkConnectionInfo networkConnectionInfo = session.getNetworkConnectionInfo(); + if (networkConnectionInfo != null) { + buff.append("server: ").append(networkConnectionInfo.getServer()).append('\n') // + .append("clientAddr: ").append(networkConnectionInfo.getClient()).append('\n'); + String clientInfo = networkConnectionInfo.getClientInfo(); + if (clientInfo != null) { + buff.append("clientInfo: ").append(clientInfo).append('\n'); + } + } buff.append("connected: "). - append(new Timestamp(session.getSessionStart())). + append(session.getSessionStart().getString()). append('\n'); Command command = session.getCurrentCommand(); if (command != null) { @@ -254,21 +207,18 @@ public String listSessions() { .append(command) .append('\n') .append("started: ") - .append(session.getCurrentCommandStart().getString()) + .append(session.getCommandStartOrEnd().getString()) .append('\n'); } - Table[] t = session.getLocks(); - if (t.length > 0) { - for (Table table : session.getLocks()) { - if (table.isLockedExclusivelyBy(session)) { - buff.append("write lock on "); - } else { - buff.append("read lock on "); - } - buff.append(table.getSchema().getName()). - append('.').append(table.getName()). - append('\n'); + for (Table table : session.getLocks()) { + if (table.isLockedExclusivelyBy(session)) { + buff.append("write lock on "); + } else { + buff.append("read lock on "); } + buff.append(table.getSchema().getName()). + append('.').append(table.getName()). + append('\n'); } buff.append('\n'); } diff --git a/h2/src/main/org/h2/jmx/DatabaseInfoMBean.java b/h2/src/main/org/h2/jmx/DatabaseInfoMBean.java index 7b76348281..15f994d296 100644 --- a/h2/src/main/org/h2/jmx/DatabaseInfoMBean.java +++ b/h2/src/main/org/h2/jmx/DatabaseInfoMBean.java @@ -1,13 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jmx; /** * Information and management operations for the given database. - * @h2.resource * * @author Eric Dong * @author Thomas Mueller @@ -16,7 +15,6 @@ public interface DatabaseInfoMBean { /** * Is the database open in exclusive mode? - * @h2.resource * * @return true if the database is open in exclusive mode, false otherwise */ @@ -24,7 +22,6 @@ public interface DatabaseInfoMBean { /** * Is the database read-only? - * @h2.resource * * @return true if the database is read-only, false otherwise */ @@ -33,55 +30,13 @@ public interface DatabaseInfoMBean { /** * The database compatibility mode (REGULAR if no compatibility mode is * used). - * @h2.resource * * @return the database mode */ String getMode(); - /** - * Is multi-threading enabled? - * @h2.resource - * - * @return true if multi-threading is enabled, false otherwise - */ - boolean isMultiThreaded(); - - /** - * Is MVCC (multi version concurrency) enabled? - * @h2.resource - * - * @return true if MVCC is enabled, false otherwise - */ - @Deprecated - boolean isMvcc(); - - /** - * The transaction log mode (0 disabled, 1 without sync, 2 enabled). - * @h2.resource - * - * @return the transaction log mode - */ - int getLogMode(); - - /** - * Set the transaction log mode. - * - * @param value the new log mode - */ - void setLogMode(int value); - - /** - * The number of write operations since the database was created. - * @h2.resource - * - * @return the total write count - */ - long getFileWriteCountTotal(); - /** * The number of write operations since the database was opened. - * @h2.resource * * @return the write count */ @@ -89,7 +44,6 @@ public interface DatabaseInfoMBean { /** * The file read count since the database was opened. - * @h2.resource * * @return the read count */ @@ -97,7 +51,6 @@ public interface DatabaseInfoMBean { /** * The database file size in KB. - * @h2.resource * * @return the number of pages */ @@ -105,7 +58,6 @@ public interface DatabaseInfoMBean { /** * The maximum cache size in KB. - * @h2.resource * * @return the maximum size */ @@ -120,7 +72,6 @@ public interface DatabaseInfoMBean { /** * The current cache size in KB. - * @h2.resource * * @return the current size */ @@ -128,7 +79,6 @@ public interface DatabaseInfoMBean { /** * The database version. - * @h2.resource * * @return the version */ @@ -136,7 +86,6 @@ public interface DatabaseInfoMBean { /** * The trace level (0 disabled, 1 error, 2 info, 3 debug). - * @h2.resource * * @return the level */ @@ -151,7 +100,6 @@ public interface DatabaseInfoMBean { /** * List the database settings. - * @h2.resource * * @return the database settings */ @@ -160,7 +108,6 @@ public interface DatabaseInfoMBean { /** * List sessions, including the queries that are in * progress, and locked tables. - * @h2.resource * * @return information about the sessions */ diff --git a/h2/src/main/org/h2/jmx/DocumentedMBean.java b/h2/src/main/org/h2/jmx/DocumentedMBean.java index 99858b5978..e36fd104ad 100644 --- a/h2/src/main/org/h2/jmx/DocumentedMBean.java +++ b/h2/src/main/org/h2/jmx/DocumentedMBean.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jmx; diff --git a/h2/src/main/org/h2/jmx/package.html b/h2/src/main/org/h2/jmx/package.html index c34b11262f..01ab3555ce 100644 --- a/h2/src/main/org/h2/jmx/package.html +++ b/h2/src/main/org/h2/jmx/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/message/DbException.java b/h2/src/main/org/h2/message/DbException.java index c6887a7877..a2549073df 100644 --- a/h2/src/main/org/h2/message/DbException.java +++ b/h2/src/main/org/h2/message/DbException.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.message; @@ -33,9 +33,13 @@ import org.h2.jdbc.JdbcSQLTimeoutException; import org.h2.jdbc.JdbcSQLTransactionRollbackException; import org.h2.jdbc.JdbcSQLTransientException; +import org.h2.util.HasSQL; import org.h2.util.SortedProperties; import org.h2.util.StringUtils; import org.h2.util.Utils; +import org.h2.value.TypeInfo; +import org.h2.value.Typed; +import org.h2.value.Value; /** * This exception wraps a checked exception. @@ -67,8 +71,7 @@ public class DbException extends RuntimeException { static { try { - byte[] messages = Utils.getResource( - "/org/h2/res/_messages_en.prop"); + byte[] messages = Utils.getResource("/org/h2/res/_messages_en.prop"); if (messages != null) { MESSAGES.load(new ByteArrayInputStream(messages)); } @@ -102,11 +105,7 @@ private DbException(SQLException e) { } private static String translate(String key, String... params) { - String message = null; - if (MESSAGES != null) { - // Tomcat sets final static fields to null sometimes - message = MESSAGES.getProperty(key); - } + String message = MESSAGES.getProperty(key); if (message == null) { message = "(Message " + key + " not found)"; } @@ -114,7 +113,7 @@ private static String translate(String key, String... params) { for (int i = 0; i < params.length; i++) { String s = params[i]; if (s != null && s.length() > 0) { - params[i] = StringUtils.quoteIdentifier(s); + params[i] = quote(s); } } message = MessageFormat.format(message, (Object[]) params); @@ -122,6 +121,29 @@ private static String translate(String key, String... params) { return message; } + private static String quote(String s) { + int l = s.length(); + StringBuilder builder = new StringBuilder(l + 2).append('"'); + for (int i = 0; i < l;) { + int cp = s.codePointAt(i); + i += Character.charCount(cp); + int t = Character.getType(cp); + if (t == 0 || t >= Character.SPACE_SEPARATOR && t <= Character.SURROGATE && cp != ' ') { + if (cp <= 0xffff) { + StringUtils.appendHex(builder.append('\\'), cp, 2); + } else { + StringUtils.appendHex(builder.append("\\+"), cp, 3); + } + } else { + if (cp == '"' || cp == '\\') { + builder.append((char) cp); + } + builder.appendCodePoint(cp); + } + } + return builder.append('"').toString(); + } + /** * Get the SQLException object. * @@ -274,36 +296,81 @@ public static DbException getUnsupportedException(String message) { * * @param param the name of the parameter * @param value the value passed - * @return the IllegalArgumentException object + * @return the exception */ public static DbException getInvalidValueException(String param, Object value) { return get(INVALID_VALUE_2, value == null ? "null" : value.toString(), param); } /** - * Throw an internal error. This method seems to return an exception object, - * so that it can be used instead of 'return', but in fact it always throws - * the exception. + * Gets a SQL exception meaning the type of expression is invalid or unknown. + * + * @param param the name of the parameter + * @param e the expression + * @return the exception + */ + public static DbException getInvalidExpressionTypeException(String param, Typed e) { + TypeInfo type = e.getType(); + if (type.getValueType() == Value.UNKNOWN) { + return get(UNKNOWN_DATA_TYPE_1, (e instanceof HasSQL ? (HasSQL) e : type).getTraceSQL()); + } + return get(INVALID_VALUE_2, type.getTraceSQL(), param); + } + + /** + * Gets a SQL exception meaning this value is too long. + * + * @param columnOrType + * column with data type or data type name + * @param value + * string representation of value, will be truncated to 80 + * characters + * @param valueLength + * the actual length of value, {@code -1L} if unknown + * @return the exception + */ + public static DbException getValueTooLongException(String columnOrType, String value, long valueLength) { + int length = value.length(); + int m = valueLength >= 0 ? 22 : 0; + StringBuilder builder = length > 80 // + ? new StringBuilder(83 + m).append(value, 0, 80).append("...") + : new StringBuilder(length + m).append(value); + if (valueLength >= 0) { + builder.append(" (").append(valueLength).append(')'); + } + return get(VALUE_TOO_LONG_2, columnOrType, builder.toString()); + } + + /** + * Gets a file version exception. + * + * @param dataFileName the name of the database + * @return the exception + */ + public static DbException getFileVersionError(String dataFileName) { + return DbException.get(FILE_VERSION_ERROR_1, "Old database: " + dataFileName + + " - please convert the database to a SQL script and re-create it."); + } + + /** + * Gets an internal error. * * @param s the message * @return the RuntimeException object - * @throws RuntimeException the exception */ - public static RuntimeException throwInternalError(String s) { + public static RuntimeException getInternalError(String s) { RuntimeException e = new RuntimeException(s); DbException.traceThrowable(e); - throw e; + return e; } /** - * Throw an internal error. This method seems to return an exception object, - * so that it can be used instead of 'return', but in fact it always throws - * the exception. + * Gets an internal error. * * @return the RuntimeException object */ - public static RuntimeException throwInternalError() { - return throwInternalError("Unexpected code path"); + public static RuntimeException getInternalError() { + return getInternalError("Unexpected code path"); } /** @@ -345,6 +412,8 @@ public static DbException convert(Throwable e) { throw (Error) e; } return get(GENERAL_ERROR_1, e, e.toString()); + } catch (OutOfMemoryError ignore) { + return OOME; } catch (Throwable ex) { try { DbException dbException = new DbException( @@ -448,6 +517,7 @@ public static SQLException getJdbcSQLException(String message, String sql, Strin case 7: case 21: case 42: + case 54: return new JdbcSQLSyntaxErrorException(message, sql, state, errorCode, cause, stackTrace); case 8: return new JdbcSQLNonTransientConnectionException(message, sql, state, errorCode, cause, stackTrace); @@ -499,6 +569,7 @@ public static SQLException getJdbcSQLException(String message, String sql, Strin case METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT: case ACCESS_DENIED_TO_CLASS_1: case RESULT_SET_READONLY: + case CURRENT_SEQUENCE_VALUE_IS_NOT_DEFINED_IN_SESSION_1: return new JdbcSQLNonTransientException(message, sql, state, errorCode, cause, stackTrace); case FEATURE_NOT_SUPPORTED_1: return new JdbcSQLFeatureNotSupportedException(message, sql, state, errorCode, cause, stackTrace); @@ -507,7 +578,7 @@ public static SQLException getJdbcSQLException(String message, String sql, Strin case LOB_CLOSED_ON_TIMEOUT_1: return new JdbcSQLTimeoutException(message, sql, state, errorCode, cause, stackTrace); case FUNCTION_MUST_RETURN_RESULT_SET_1: - case TRIGGER_SELECT_AND_ROW_BASED_NOT_SUPPORTED: + case INVALID_TRIGGER_FLAGS_1: case SUM_OR_AVG_ON_WRONG_DATATYPE_1: case MUST_GROUP_BY_COLUMN_1: case SECOND_PRIMARY_KEY: @@ -523,7 +594,6 @@ public static SQLException getJdbcSQLException(String message, String sql, Strin case TRIGGER_NOT_FOUND_1: case ERROR_CREATING_TRIGGER_OBJECT_3: case CONSTRAINT_ALREADY_EXISTS_1: - case INVALID_VALUE_SCALE_PRECISION: case SUBQUERY_IS_NOT_SINGLE_COLUMN: case INVALID_USE_OF_AGGREGATE_FUNCTION_1: case CONSTRAINT_NOT_FOUND_1: @@ -554,7 +624,7 @@ public static SQLException getJdbcSQLException(String message, String sql, Strin case CANNOT_TRUNCATE_1: case CANNOT_DROP_2: case VIEW_IS_INVALID_2: - case COMPARING_ARRAY_TO_SCALAR: + case TYPES_ARE_NOT_COMPARABLE_2: case CONSTANT_ALREADY_EXISTS_1: case CONSTANT_NOT_FOUND_1: case LITERALS_ARE_NOT_ALLOWED: @@ -570,11 +640,19 @@ public static SQLException getJdbcSQLException(String message, String sql, Strin case PUBLIC_STATIC_JAVA_METHOD_NOT_FOUND_1: case JAVA_OBJECT_SERIALIZER_CHANGE_WITH_DATA_TABLE: case FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT: + case INVALID_VALUE_PRECISION: + case INVALID_VALUE_SCALE: + case CONSTRAINT_IS_USED_BY_CONSTRAINT_2: + case UNCOMPARABLE_REFERENCED_COLUMN_2: + case GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1: + case GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2: + case COLUMN_ALIAS_IS_NOT_SPECIFIED_1: + case GROUP_BY_NOT_IN_THE_RESULT: return new JdbcSQLSyntaxErrorException(message, sql, state, errorCode, cause, stackTrace); case HEX_STRING_ODD_1: case HEX_STRING_WRONG_1: case INVALID_VALUE_2: - case SEQUENCE_ATTRIBUTES_INVALID: + case SEQUENCE_ATTRIBUTES_INVALID_7: case INVALID_TO_CHAR_FORMAT: case PARAMETER_NOT_SET_1: case PARSE_ERROR_1: @@ -587,7 +665,8 @@ public static SQLException getJdbcSQLException(String message, String sql, Strin return new JdbcSQLDataException(message, sql, state, errorCode, cause, stackTrace); case URL_RELATIVE_TO_CWD: case DATABASE_NOT_FOUND_1: - case DATABASE_NOT_FOUND_2: + case DATABASE_NOT_FOUND_WITH_IF_EXISTS_1: + case REMOTE_DATABASE_NOT_FOUND_1: case TRACE_CONNECTION_NOT_CLOSED: case DATABASE_ALREADY_OPEN_1: case FILE_CORRUPTED_1: @@ -614,6 +693,7 @@ public static SQLException getJdbcSQLException(String message, String sql, Strin case DATABASE_IS_IN_EXCLUSIVE_MODE: case INVALID_DATABASE_NAME_1: case AUTHENTICATOR_NOT_AVAILABLE: + case METHOD_DISABLED_ON_AUTOCOMMIT_TRUE: return new JdbcSQLNonTransientConnectionException(message, sql, state, errorCode, cause, stackTrace); case ROW_NOT_FOUND_WHEN_DELETING_1: case CONCURRENT_UPDATE_1: @@ -628,24 +708,6 @@ private static String filterSQL(String sql) { return sql == null || !sql.contains(HIDE_SQL) ? sql : "-"; } - /** - * Convert an exception to an IO exception. - * - * @param e the root cause - * @return the IO exception - */ - public static IOException convertToIOException(Throwable e) { - if (e instanceof IOException) { - return (IOException) e; - } - if (e instanceof JdbcException) { - if (e.getCause() != null) { - e = e.getCause(); - } - } - return new IOException(e.toString(), e); - } - /** * Builds message for an exception. * diff --git a/h2/src/main/org/h2/message/Trace.java b/h2/src/main/org/h2/message/Trace.java index 862c5127fc..066d026cdd 100644 --- a/h2/src/main/org/h2/message/Trace.java +++ b/h2/src/main/org/h2/message/Trace.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.message; @@ -8,14 +8,13 @@ import java.text.MessageFormat; import java.util.ArrayList; -import org.h2.engine.SysProperties; import org.h2.expression.ParameterInterface; import org.h2.util.StringUtils; /** * This class represents a trace module. */ -public class Trace { +public final class Trace { /** * The trace module id for commands. @@ -87,15 +86,10 @@ public class Trace { */ public static final int USER = 13; - /** - * The trace module id for the page store. - */ - public static final int PAGE_STORE = 14; - /** * The trace module id for the JDBCX API */ - public static final int JDBCX = 15; + public static final int JDBCX = 14; /** * Module names by their ids as array indexes. @@ -115,7 +109,6 @@ public class Trace { "table", "trigger", "user", - "pageStore", "JDBCX" }; @@ -131,7 +124,7 @@ public class Trace { Trace(TraceWriter traceWriter, String module) { this.traceWriter = traceWriter; this.module = module; - this.lineSeparator = SysProperties.LINE_SEPARATOR; + this.lineSeparator = System.lineSeparator(); } /** @@ -264,7 +257,7 @@ public static String formatParams(ArrayList parame * @param count the update count * @param time the time it took to run the statement in ms */ - public void infoSQL(String sql, String params, int count, long time) { + public void infoSQL(String sql, String params, long count, long time) { if (!isEnabled(TraceSystem.INFO)) { return; } diff --git a/h2/src/main/org/h2/message/TraceObject.java b/h2/src/main/org/h2/message/TraceObject.java index baa0c53ced..58444781ea 100644 --- a/h2/src/main/org/h2/message/TraceObject.java +++ b/h2/src/main/org/h2/message/TraceObject.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.message; @@ -16,7 +16,7 @@ /** * The base class for objects that can print trace information about themselves. */ -public class TraceObject { +public abstract class TraceObject { /** * The trace type id for callable statements. @@ -130,6 +130,7 @@ protected void setTrace(Trace trace, int type, int id) { /** * INTERNAL + * @return id */ public int getTraceId() { return id; @@ -137,6 +138,7 @@ public int getTraceId() { /** * INTERNAL + * @return object name */ public String getTraceObjectName() { return PREFIX[traceType] + id; @@ -157,7 +159,7 @@ protected static int getNextId(int type) { * * @return true if it is */ - protected boolean isDebugEnabled() { + protected final boolean isDebugEnabled() { return trace.isDebugEnabled(); } @@ -166,7 +168,7 @@ protected boolean isDebugEnabled() { * * @return true if it is */ - protected boolean isInfoEnabled() { + protected final boolean isInfoEnabled() { return trace.isInfoEnabled(); } @@ -179,11 +181,10 @@ protected boolean isInfoEnabled() { * @param newId the trace object id of the created object * @param value the value to assign this new object to */ - protected void debugCodeAssign(String className, int newType, int newId, - String value) { + protected final void debugCodeAssign(String className, int newType, int newId, String value) { if (trace.isDebugEnabled()) { - trace.debugCode(className + " " + PREFIX[newType] + - newId + " = " + getTraceObjectName() + "." + value + ";"); + trace.debugCode(className + ' ' + PREFIX[newType] + newId + " = " + getTraceObjectName() + '.' + value + + ';'); } } @@ -193,9 +194,9 @@ protected void debugCodeAssign(String className, int newType, int newId, * * @param methodName the method name */ - protected void debugCodeCall(String methodName) { + protected final void debugCodeCall(String methodName) { if (trace.isDebugEnabled()) { - trace.debugCode(getTraceObjectName() + "." + methodName + "();"); + trace.debugCode(getTraceObjectName() + '.' + methodName + "();"); } } @@ -207,10 +208,9 @@ protected void debugCodeCall(String methodName) { * @param methodName the method name * @param param one single long parameter */ - protected void debugCodeCall(String methodName, long param) { + protected final void debugCodeCall(String methodName, long param) { if (trace.isDebugEnabled()) { - trace.debugCode(getTraceObjectName() + "." + - methodName + "(" + param + ");"); + trace.debugCode(getTraceObjectName() + '.' + methodName + '(' + param + ");"); } } @@ -222,10 +222,9 @@ protected void debugCodeCall(String methodName, long param) { * @param methodName the method name * @param param one single string parameter */ - protected void debugCodeCall(String methodName, String param) { + protected final void debugCodeCall(String methodName, String param) { if (trace.isDebugEnabled()) { - trace.debugCode(getTraceObjectName() + "." + - methodName + "(" + quote(param) + ");"); + trace.debugCode(getTraceObjectName() + '.' + methodName + '(' + quote(param) + ");"); } } @@ -234,9 +233,9 @@ protected void debugCodeCall(String methodName, String param) { * * @param text the trace text */ - protected void debugCode(String text) { + protected final void debugCode(String text) { if (trace.isDebugEnabled()) { - trace.debugCode(getTraceObjectName() + "." + text); + trace.debugCode(getTraceObjectName() + '.' + text + ';'); } } diff --git a/h2/src/main/org/h2/message/TraceSystem.java b/h2/src/main/org/h2/message/TraceSystem.java index 9890459ab7..96743a26c2 100644 --- a/h2/src/main/org/h2/message/TraceSystem.java +++ b/h2/src/main/org/h2/message/TraceSystem.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.message; @@ -86,7 +86,12 @@ public class TraceSystem implements TraceWriter { private SimpleDateFormat dateFormat; private Writer fileWriter; private PrintWriter printWriter; - private int checkSize; + /** + * Starts at -1 so that we check the file size immediately upon open. This + * Can be important if we open and close the trace file without managing to + * have written CHECK_SIZE_EACH_WRITES bytes each time. + */ + private int checkSize = -1; private boolean closed; private boolean writingErrorLogged; private TraceWriter writer = this; @@ -246,8 +251,8 @@ public void write(int level, String module, String s, Throwable t) { private synchronized void writeFile(String s, Throwable t) { try { - if (checkSize++ >= CHECK_SIZE_EACH_WRITES) { - checkSize = 0; + checkSize = (checkSize + 1) % CHECK_SIZE_EACH_WRITES; + if (checkSize == 0) { closeWriter(); if (maxFileSize > 0 && FileUtils.size(fileName) > maxFileSize) { String old = fileName + ".old"; @@ -264,7 +269,7 @@ private synchronized void writeFile(String s, Throwable t) { JdbcException se = (JdbcException) t; int code = se.getErrorCode(); if (ErrorCode.isCommon(code)) { - printWriter.println(t.toString()); + printWriter.println(t); } else { t.printStackTrace(printWriter); } diff --git a/h2/src/main/org/h2/message/TraceWriter.java b/h2/src/main/org/h2/message/TraceWriter.java index 56e778ccfa..368411e6bc 100644 --- a/h2/src/main/org/h2/message/TraceWriter.java +++ b/h2/src/main/org/h2/message/TraceWriter.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.message; diff --git a/h2/src/main/org/h2/message/TraceWriterAdapter.java b/h2/src/main/org/h2/message/TraceWriterAdapter.java index 26f35bc83b..2ec4867155 100644 --- a/h2/src/main/org/h2/message/TraceWriterAdapter.java +++ b/h2/src/main/org/h2/message/TraceWriterAdapter.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.message; diff --git a/h2/src/main/org/h2/message/package.html b/h2/src/main/org/h2/message/package.html index b638a7c5e8..ccdcc35a66 100644 --- a/h2/src/main/org/h2/message/package.html +++ b/h2/src/main/org/h2/message/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/mode/DefaultNullOrdering.java b/h2/src/main/org/h2/mode/DefaultNullOrdering.java new file mode 100644 index 0000000000..32c4e4a297 --- /dev/null +++ b/h2/src/main/org/h2/mode/DefaultNullOrdering.java @@ -0,0 +1,102 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import static org.h2.result.SortOrder.DESCENDING; +import static org.h2.result.SortOrder.NULLS_FIRST; +import static org.h2.result.SortOrder.NULLS_LAST; + +/** + * Default ordering of NULL values. + */ +public enum DefaultNullOrdering { + + /** + * NULL values are considered as smaller than other values during sorting. + */ + LOW(NULLS_FIRST, NULLS_LAST), + + /** + * NULL values are considered as larger than other values during sorting. + */ + HIGH(NULLS_LAST, NULLS_FIRST), + + /** + * NULL values are sorted before other values, no matter if ascending or + * descending order is used. + */ + FIRST(NULLS_FIRST, NULLS_FIRST), + + /** + * NULL values are sorted after other values, no matter if ascending or + * descending order is used. + */ + LAST(NULLS_LAST, NULLS_LAST); + + private static final DefaultNullOrdering[] VALUES = values(); + + /** + * Returns default ordering of NULL values for the specified ordinal number. + * + * @param ordinal + * ordinal number + * @return default ordering of NULL values for the specified ordinal number + * @see #ordinal() + */ + public static DefaultNullOrdering valueOf(int ordinal) { + return VALUES[ordinal]; + } + + private final int defaultAscNulls, defaultDescNulls; + + private final int nullAsc, nullDesc; + + private DefaultNullOrdering(int defaultAscNulls, int defaultDescNulls) { + this.defaultAscNulls = defaultAscNulls; + this.defaultDescNulls = defaultDescNulls; + nullAsc = defaultAscNulls == NULLS_FIRST ? -1 : 1; + nullDesc = defaultDescNulls == NULLS_FIRST ? -1 : 1; + } + + /** + * Returns a sort type bit mask with {@link org.h2.result.SortOrder#NULLS_FIRST} or + * {@link org.h2.result.SortOrder#NULLS_LAST} explicitly set + * + * @param sortType + * sort type bit mask + * @return bit mask with {@link org.h2.result.SortOrder#NULLS_FIRST} or {@link org.h2.result.SortOrder#NULLS_LAST} + * explicitly set + */ + public int addExplicitNullOrdering(int sortType) { + if ((sortType & (NULLS_FIRST | NULLS_LAST)) == 0) { + sortType |= ((sortType & DESCENDING) == 0 ? defaultAscNulls : defaultDescNulls); + } + return sortType; + } + + /** + * Compare two expressions where one of them is NULL. + * + * @param aNull + * whether the first expression is null + * @param sortType + * the sort bit mask to use + * @return the result of the comparison (-1 meaning the first expression + * should appear before the second, 0 if they are equal) + */ + public int compareNull(boolean aNull, int sortType) { + if ((sortType & NULLS_FIRST) != 0) { + return aNull ? -1 : 1; + } else if ((sortType & NULLS_LAST) != 0) { + return aNull ? 1 : -1; + } else if ((sortType & DESCENDING) == 0) { + return aNull ? nullAsc : -nullAsc; + } else { + return aNull ? nullDesc : -nullDesc; + } + } + +} diff --git a/h2/src/main/org/h2/expression/function/FunctionInfo.java b/h2/src/main/org/h2/mode/FunctionInfo.java similarity index 68% rename from h2/src/main/org/h2/expression/function/FunctionInfo.java rename to h2/src/main/org/h2/mode/FunctionInfo.java index 093357f78d..ba47964407 100644 --- a/h2/src/main/org/h2/expression/function/FunctionInfo.java +++ b/h2/src/main/org/h2/mode/FunctionInfo.java @@ -1,9 +1,9 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ -package org.h2.expression.function; +package org.h2.mode; /** * This class contains information about a built-in function. @@ -33,23 +33,13 @@ public final class FunctionInfo { /** * If the result of the function is NULL if any of the parameters is NULL. */ - final boolean nullIfParameterIsNull; + public final boolean nullIfParameterIsNull; /** * If this function always returns the same value for the same parameters. */ public final boolean deterministic; - /** - * Should the return value ResultSet be buffered in a local temporary file? - */ - final boolean bufferResultSetToLocalTemp; - - /** - * Should the no-arg function require parentheses. - */ - final boolean requireParentheses; - /** * Creates new instance of built-in function information. * @@ -67,22 +57,15 @@ public final class FunctionInfo { * @param deterministic * if this function always returns the same value for the same * parameters - * @param bufferResultSetToLocalTemp - * should the return value ResultSet be buffered in a local - * temporary file? - * @param requireParentheses - * should the no-arg function require parentheses */ public FunctionInfo(String name, int type, int parameterCount, int returnDataType, boolean nullIfParameterIsNull, - boolean deterministic, boolean bufferResultSetToLocalTemp, boolean requireParentheses) { + boolean deterministic) { this.name = name; this.type = type; this.parameterCount = parameterCount; this.returnDataType = returnDataType; this.nullIfParameterIsNull = nullIfParameterIsNull; this.deterministic = deterministic; - this.bufferResultSetToLocalTemp = bufferResultSetToLocalTemp; - this.requireParentheses = requireParentheses; } /** @@ -101,8 +84,6 @@ public FunctionInfo(FunctionInfo source, String name) { parameterCount = source.parameterCount; nullIfParameterIsNull = source.nullIfParameterIsNull; deterministic = source.deterministic; - bufferResultSetToLocalTemp = source.bufferResultSetToLocalTemp; - requireParentheses = true; } } diff --git a/h2/src/main/org/h2/mode/FunctionsBase.java b/h2/src/main/org/h2/mode/FunctionsBase.java deleted file mode 100644 index 5e0a98ec16..0000000000 --- a/h2/src/main/org/h2/mode/FunctionsBase.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.mode; - -import java.util.HashMap; - -import org.h2.engine.Database; -import org.h2.expression.function.Function; -import org.h2.expression.function.FunctionInfo; - -/** - * Base class for mode-specific functions. - */ -abstract class FunctionsBase extends Function { - - FunctionsBase(Database database, FunctionInfo info) { - super(database, info); - } - - /** - * Copy a standard function to a mode functions with a different name. - * - * @param functions - * mode functions - * @param stdName - * the name of the standard function - * @param newName - * the name of the mode-specific function - */ - static void copyFunction(HashMap functions, String stdName, String newName) { - functions.put(newName, new FunctionInfo(Function.getFunctionInfo(stdName), newName)); - } - -} diff --git a/h2/src/main/org/h2/mode/FunctionsDB2Derby.java b/h2/src/main/org/h2/mode/FunctionsDB2Derby.java new file mode 100644 index 0000000000..bc61364705 --- /dev/null +++ b/h2/src/main/org/h2/mode/FunctionsDB2Derby.java @@ -0,0 +1,73 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import java.util.HashMap; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.value.ExtTypeInfoNumeric; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * Functions for {@link org.h2.engine.Mode.ModeEnum#DB2} and + * {@link org.h2.engine.Mode.ModeEnum#Derby} compatibility modes. + */ +public final class FunctionsDB2Derby extends ModeFunction { + + private static final int IDENTITY_VAL_LOCAL = 5001; + + private static final HashMap FUNCTIONS = new HashMap<>(); + + private static final TypeInfo IDENTITY_VAL_LOCAL_TYPE = TypeInfo.getTypeInfo(Value.NUMERIC, 31, 0, + ExtTypeInfoNumeric.DECIMAL); + + static { + FUNCTIONS.put("IDENTITY_VAL_LOCAL", + new FunctionInfo("IDENTITY_VAL_LOCAL", IDENTITY_VAL_LOCAL, 0, Value.BIGINT, true, false)); + } + + /** + * Returns mode-specific function for a given name, or {@code null}. + * + * @param upperName + * the upper-case name of a function + * @return the function with specified name or {@code null} + */ + public static FunctionsDB2Derby getFunction(String upperName) { + FunctionInfo info = FUNCTIONS.get(upperName); + return info != null ? new FunctionsDB2Derby(info) : null; + } + + private FunctionsDB2Derby(FunctionInfo info) { + super(info); + } + + @Override + public Value getValue(SessionLocal session) { + switch (info.type) { + case IDENTITY_VAL_LOCAL: + return session.getLastIdentity().convertTo(type); + default: + throw DbException.getInternalError("type=" + info.type); + } + } + + @Override + public Expression optimize(SessionLocal session) { + switch (info.type) { + case IDENTITY_VAL_LOCAL: + type = IDENTITY_VAL_LOCAL_TYPE; + break; + default: + throw DbException.getInternalError("type=" + info.type); + } + return this; + } + +} diff --git a/h2/src/main/org/h2/mode/FunctionsLegacy.java b/h2/src/main/org/h2/mode/FunctionsLegacy.java new file mode 100644 index 0000000000..64df770078 --- /dev/null +++ b/h2/src/main/org/h2/mode/FunctionsLegacy.java @@ -0,0 +1,69 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import java.util.HashMap; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * This class implements some legacy functions not available in Regular mode. + */ +public class FunctionsLegacy extends ModeFunction { + + private static final HashMap FUNCTIONS = new HashMap<>(); + + private static final int IDENTITY = 6001; + + private static final int SCOPE_IDENTITY = IDENTITY + 1; + + static { + FUNCTIONS.put("IDENTITY", new FunctionInfo("IDENTITY", IDENTITY, 0, Value.BIGINT, true, false)); + FUNCTIONS.put("SCOPE_IDENTITY", + new FunctionInfo("SCOPE_IDENTITY", SCOPE_IDENTITY, 0, Value.BIGINT, true, false)); + } + + /** + * Returns mode-specific function for a given name, or {@code null}. + * + * @param upperName + * the upper-case name of a function + * @return the function with specified name or {@code null} + */ + public static FunctionsLegacy getFunction(String upperName) { + FunctionInfo info = FUNCTIONS.get(upperName); + if (info != null) { + return new FunctionsLegacy(info); + } + return null; + } + + private FunctionsLegacy(FunctionInfo info) { + super(info); + } + + @Override + public Value getValue(SessionLocal session) { + switch (info.type) { + case IDENTITY: + case SCOPE_IDENTITY: + return session.getLastIdentity().convertTo(type); + default: + throw DbException.getInternalError("type=" + info.type); + } + } + + @Override + public Expression optimize(SessionLocal session) { + type = TypeInfo.getTypeInfo(info.returnDataType); + return this; + } + +} diff --git a/h2/src/main/org/h2/mode/FunctionsMSSQLServer.java b/h2/src/main/org/h2/mode/FunctionsMSSQLServer.java index effbd0b9f2..92cfca0867 100644 --- a/h2/src/main/org/h2/mode/FunctionsMSSQLServer.java +++ b/h2/src/main/org/h2/mode/FunctionsMSSQLServer.java @@ -1,45 +1,143 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mode; import java.util.HashMap; -import org.h2.engine.Database; -import org.h2.expression.function.Function; -import org.h2.expression.function.FunctionInfo; +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.expression.function.CoalesceFunction; +import org.h2.expression.function.CurrentDateTimeValueFunction; +import org.h2.expression.function.RandFunction; +import org.h2.expression.function.StringFunction; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueNull; /** * Functions for {@link org.h2.engine.Mode.ModeEnum#MSSQLServer} compatibility * mode. */ -public final class FunctionsMSSQLServer extends FunctionsBase { +public final class FunctionsMSSQLServer extends ModeFunction { + private static final HashMap FUNCTIONS = new HashMap<>(); + private static final int CHARINDEX = 4001; + + private static final int GETDATE = CHARINDEX + 1; + + private static final int ISNULL = GETDATE + 1; + + private static final int LEN = ISNULL + 1; + + private static final int NEWID = LEN + 1; + + private static final int SCOPE_IDENTITY = NEWID + 1; + + private static final TypeInfo SCOPE_IDENTITY_TYPE = TypeInfo.getTypeInfo(Value.NUMERIC, 38, 0, null); + static { - copyFunction(FUNCTIONS, "LOCATE", "CHARINDEX"); - copyFunction(FUNCTIONS, "CURRENT_DATE", "GETDATE"); - copyFunction(FUNCTIONS, "LENGTH", "LEN"); - copyFunction(FUNCTIONS, "RANDOM_UUID", "NEWID"); + FUNCTIONS.put("CHARINDEX", new FunctionInfo("CHARINDEX", CHARINDEX, VAR_ARGS, Value.INTEGER, true, true)); + FUNCTIONS.put("GETDATE", new FunctionInfo("GETDATE", GETDATE, 0, Value.TIMESTAMP, false, true)); + FUNCTIONS.put("LEN", new FunctionInfo("LEN", LEN, 1, Value.INTEGER, true, true)); + FUNCTIONS.put("NEWID", new FunctionInfo("NEWID", NEWID, 0, Value.UUID, true, false)); + FUNCTIONS.put("ISNULL", new FunctionInfo("ISNULL", ISNULL, 2, Value.NULL, false, true)); + FUNCTIONS.put("SCOPE_IDENTITY", + new FunctionInfo("SCOPE_IDENTITY", SCOPE_IDENTITY, 0, Value.NUMERIC, true, false)); } /** * Returns mode-specific function for a given name, or {@code null}. * - * @param database - * the database * @param upperName * the upper-case name of a function * @return the function with specified name or {@code null} */ - public static Function getFunction(Database database, String upperName) { + public static FunctionsMSSQLServer getFunction(String upperName) { FunctionInfo info = FUNCTIONS.get(upperName); - return info != null ? new Function(database, info) : null; + if (info != null) { + return new FunctionsMSSQLServer(info); + } + return null; } - private FunctionsMSSQLServer(Database database, FunctionInfo info) { - super(database, info); + private FunctionsMSSQLServer(FunctionInfo info) { + super(info); } + + @Override + protected void checkParameterCount(int len) { + int min, max; + switch (info.type) { + case CHARINDEX: + min = 2; + max = 3; + break; + default: + throw DbException.getInternalError("type=" + info.type); + } + if (len < min || len > max) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, info.name, min + ".." + max); + } + } + + @Override + public Value getValue(SessionLocal session) { + Value[] values = getArgumentsValues(session, args); + if (values == null) { + return ValueNull.INSTANCE; + } + Value v0 = getNullOrValue(session, args, values, 0); + switch (info.type) { + case LEN: { + long len; + if (v0.getValueType() == Value.CHAR) { + String s = v0.getString(); + int l = s.length(); + while (l > 0 && s.charAt(l - 1) == ' ') { + l--; + } + len = l; + } else { + len = v0.charLength(); + } + return ValueBigint.get(len); + } + case SCOPE_IDENTITY: + return session.getLastIdentity().convertTo(type); + default: + throw DbException.getInternalError("type=" + info.type); + } + } + + @Override + public Expression optimize(SessionLocal session) { + switch (info.type) { + case CHARINDEX: + return new StringFunction(args, StringFunction.LOCATE).optimize(session); + case GETDATE: + return new CurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIMESTAMP, 3).optimize(session); + case ISNULL: + return new CoalesceFunction(CoalesceFunction.COALESCE, args).optimize(session); + case NEWID: + return new RandFunction(null, RandFunction.RANDOM_UUID).optimize(session); + case SCOPE_IDENTITY: + type = SCOPE_IDENTITY_TYPE; + break; + default: + type = TypeInfo.getTypeInfo(info.returnDataType); + if (optimizeArguments(session)) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + } + return this; + } + } diff --git a/h2/src/main/org/h2/mode/FunctionsMySQL.java b/h2/src/main/org/h2/mode/FunctionsMySQL.java index af1d0aaa40..480100ee3a 100644 --- a/h2/src/main/org/h2/mode/FunctionsMySQL.java +++ b/h2/src/main/org/h2/mode/FunctionsMySQL.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Jason Brittain (jason.brittain at gmail.com) */ package org.h2.mode; @@ -11,19 +11,20 @@ import java.util.Locale; import org.h2.api.ErrorCode; -import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ValueExpression; -import org.h2.expression.function.Function; -import org.h2.expression.function.FunctionInfo; import org.h2.message.DbException; +import org.h2.util.DateTimeUtils; import org.h2.util.StringUtils; import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueInt; +import org.h2.value.ValueBigint; +import org.h2.value.ValueInteger; import org.h2.value.ValueNull; -import org.h2.value.ValueString; +import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueVarchar; /** * This class implements some MySQL-specific functions. @@ -31,19 +32,20 @@ * @author Jason Brittain * @author Thomas Mueller */ -public class FunctionsMySQL extends FunctionsBase { +public final class FunctionsMySQL extends ModeFunction { - private static final int UNIX_TIMESTAMP = 1001, FROM_UNIXTIME = 1002, DATE = 1003; + private static final int UNIX_TIMESTAMP = 1001, FROM_UNIXTIME = 1002, DATE = 1003, LAST_INSERT_ID = 1004; private static final HashMap FUNCTIONS = new HashMap<>(); static { - FUNCTIONS.put("UNIX_TIMESTAMP", new FunctionInfo("UNIX_TIMESTAMP", UNIX_TIMESTAMP, - VAR_ARGS, Value.INT, false, false, false, true)); - FUNCTIONS.put("FROM_UNIXTIME", new FunctionInfo("FROM_UNIXTIME", FROM_UNIXTIME, - VAR_ARGS, Value.STRING, false, true, false, true)); - FUNCTIONS.put("DATE", new FunctionInfo("DATE", DATE, - 1, Value.DATE, false, true, false, true)); + FUNCTIONS.put("UNIX_TIMESTAMP", + new FunctionInfo("UNIX_TIMESTAMP", UNIX_TIMESTAMP, VAR_ARGS, Value.INTEGER, false, false)); + FUNCTIONS.put("FROM_UNIXTIME", + new FunctionInfo("FROM_UNIXTIME", FROM_UNIXTIME, VAR_ARGS, Value.VARCHAR, false, true)); + FUNCTIONS.put("DATE", new FunctionInfo("DATE", DATE, 1, Value.DATE, false, true)); + FUNCTIONS.put("LAST_INSERT_ID", + new FunctionInfo("LAST_INSERT_ID", LAST_INSERT_ID, VAR_ARGS, Value.BIGINT, false, false)); } /** @@ -55,7 +57,7 @@ public class FunctionsMySQL extends FunctionsBase { /** * Format replacements for MySQL date formats. * See - * http://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_date-format + * https://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_date-format */ private static final String[] FORMAT_REPLACE = { "%a", "EEE", @@ -84,32 +86,33 @@ public class FunctionsMySQL extends FunctionsBase { "%%", "%", }; - /** - * Get the seconds since 1970-01-01 00:00:00 UTC. - * See - * http://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_unix-timestamp - * - * @return the current timestamp in seconds (not milliseconds). - */ - public static int unixTimestamp() { - return (int) (System.currentTimeMillis() / 1000L); - } - /** * Get the seconds since 1970-01-01 00:00:00 UTC of the given timestamp. * See - * http://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_unix-timestamp + * https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_unix-timestamp * - * @param timestamp the timestamp - * @return the current timestamp in seconds (not milliseconds). + * @param session the session + * @param value the timestamp + * @return the timestamp in seconds since EPOCH */ - public static int unixTimestamp(java.sql.Timestamp timestamp) { - return (int) (timestamp.getTime() / 1000L); + public static int unixTimestamp(SessionLocal session, Value value) { + long seconds; + if (value instanceof ValueTimestampTimeZone) { + ValueTimestampTimeZone t = (ValueTimestampTimeZone) value; + long timeNanos = t.getTimeNanos(); + seconds = DateTimeUtils.absoluteDayFromDateValue(t.getDateValue()) * DateTimeUtils.SECONDS_PER_DAY + + timeNanos / DateTimeUtils.NANOS_PER_SECOND - t.getTimeZoneOffsetSeconds(); + } else { + ValueTimestamp t = (ValueTimestamp) value.convertTo(TypeInfo.TYPE_TIMESTAMP, session); + long timeNanos = t.getTimeNanos(); + seconds = session.currentTimeZone().getEpochSecondsFromLocal(t.getDateValue(), timeNanos); + } + return (int) seconds; } /** * See - * http://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_from-unixtime + * https://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_from-unixtime * * @param seconds The current timestamp in seconds. * @return a formatted date/time String in the format "yyyy-MM-dd HH:mm:ss". @@ -117,12 +120,12 @@ public static int unixTimestamp(java.sql.Timestamp timestamp) { public static String fromUnixTime(int seconds) { SimpleDateFormat formatter = new SimpleDateFormat(DATE_TIME_FORMAT, Locale.ENGLISH); - return formatter.format(new Date(seconds * 1000L)); + return formatter.format(new Date(seconds * 1_000L)); } /** * See - * http://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_from-unixtime + * https://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_from-unixtime * * @param seconds The current timestamp in seconds. * @param format The format of the date/time String to return. @@ -131,7 +134,7 @@ public static String fromUnixTime(int seconds) { public static String fromUnixTime(int seconds, String format) { format = convertToSimpleDateFormat(format); SimpleDateFormat formatter = new SimpleDateFormat(format, Locale.ENGLISH); - return formatter.format(new Date(seconds * 1000L)); + return formatter.format(new Date(seconds * 1_000L)); } private static String convertToSimpleDateFormat(String format) { @@ -145,19 +148,17 @@ private static String convertToSimpleDateFormat(String format) { /** * Returns mode-specific function for a given name, or {@code null}. * - * @param database - * the database * @param upperName * the upper-case name of a function * @return the function with specified name or {@code null} */ - public static Function getFunction(Database database, String upperName) { + public static FunctionsMySQL getFunction(String upperName) { FunctionInfo info = FUNCTIONS.get(upperName); - return info != null ? new FunctionsMySQL(database, info) : null; + return info != null ? new FunctionsMySQL(info) : null; } - FunctionsMySQL(Database database, FunctionInfo info) { - super(database, info); + FunctionsMySQL(FunctionInfo info) { + super(info); } @Override @@ -166,7 +167,7 @@ protected void checkParameterCount(int len) { switch (info.type) { case UNIX_TIMESTAMP: min = 0; - max = 2; + max = 1; break; case FROM_UNIXTIME: min = 1; @@ -176,9 +177,12 @@ protected void checkParameterCount(int len) { min = 1; max = 1; break; + case LAST_INSERT_ID: + min = 0; + max = 1; + break; default: - DbException.throwInternalError("type=" + info.type); - return; + throw DbException.getInternalError("type=" + info.type); } if (len < min || len > max) { throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, info.name, min + ".." + max); @@ -186,59 +190,67 @@ protected void checkParameterCount(int len) { } @Override - public Expression optimize(Session session) { - boolean allConst = info.deterministic; - for (int i = 0; i < args.length; i++) { - Expression e = args[i]; - if (e == null) { - continue; - } - e = e.optimize(session); - args[i] = e; - if (!e.isConstant()) { - allConst = false; - } - } + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session); + type = TypeInfo.getTypeInfo(info.returnDataType); if (allConst) { return ValueExpression.get(getValue(session)); } - type = TypeInfo.getTypeInfo(info.returnDataType); return this; } @Override - protected Value getValueWithArgs(Session session, Expression[] args) { + public Value getValue(SessionLocal session) { Value[] values = new Value[args.length]; Value v0 = getNullOrValue(session, args, values, 0); Value v1 = getNullOrValue(session, args, values, 1); Value result; switch (info.type) { case UNIX_TIMESTAMP: - result = ValueInt.get(v0 == null ? unixTimestamp() : unixTimestamp(v0.getTimestamp())); + result = ValueInteger.get(unixTimestamp(session, v0 == null ? session.currentTimestamp() : v0)); break; case FROM_UNIXTIME: - result = ValueString.get( + result = ValueVarchar.get( v1 == null ? fromUnixTime(v0.getInt()) : fromUnixTime(v0.getInt(), v1.getString())); break; case DATE: switch (v0.getValueType()) { + case Value.NULL: case Value.DATE: result = v0; break; default: try { - v0 = v0.convertTo(Value.TIMESTAMP); + v0 = v0.convertTo(TypeInfo.TYPE_TIMESTAMP, session); } catch (DbException ex) { - v0 = ValueNull.INSTANCE; + result = ValueNull.INSTANCE; + break; } //$FALL-THROUGH$ case Value.TIMESTAMP: case Value.TIMESTAMP_TZ: - result = v0.convertTo(Value.DATE); + result = v0.convertToDate(session); + } + break; + case LAST_INSERT_ID: + if (args.length == 0) { + result = session.getLastIdentity(); + if (result == ValueNull.INSTANCE) { + result = ValueBigint.get(0L); + } else { + result = result.convertToBigint(null); + } + } else { + result = v0; + if (result == ValueNull.INSTANCE) { + session.setLastIdentity(ValueNull.INSTANCE); + } else { + session.setLastIdentity(result = result.convertToBigint(null)); + } } break; default: - throw DbException.throwInternalError("type=" + info.type); + throw DbException.getInternalError("type=" + info.type); } return result; } diff --git a/h2/src/main/org/h2/mode/FunctionsOracle.java b/h2/src/main/org/h2/mode/FunctionsOracle.java new file mode 100644 index 0000000000..d950752c6b --- /dev/null +++ b/h2/src/main/org/h2/mode/FunctionsOracle.java @@ -0,0 +1,135 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import java.util.HashMap; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ValueExpression; +import org.h2.expression.function.DateTimeFunction; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; +import org.h2.value.ValueUuid; + +/** + * Functions for {@link org.h2.engine.Mode.ModeEnum#Oracle} compatibility mode. + */ +public final class FunctionsOracle extends ModeFunction { + + private static final int ADD_MONTHS = 2001; + + private static final int SYS_GUID = ADD_MONTHS + 1; + + private static final int TO_DATE = SYS_GUID + 1; + + private static final int TO_TIMESTAMP = TO_DATE + 1; + + private static final int TO_TIMESTAMP_TZ = TO_TIMESTAMP + 1; + + private static final HashMap FUNCTIONS = new HashMap<>(); + + static { + FUNCTIONS.put("ADD_MONTHS", + new FunctionInfo("ADD_MONTHS", ADD_MONTHS, 2, Value.TIMESTAMP, true, true)); + FUNCTIONS.put("SYS_GUID", + new FunctionInfo("SYS_GUID", SYS_GUID, 0, Value.VARBINARY, false, false)); + FUNCTIONS.put("TO_DATE", + new FunctionInfo("TO_DATE", TO_DATE, VAR_ARGS, Value.TIMESTAMP, true, true)); + FUNCTIONS.put("TO_TIMESTAMP", + new FunctionInfo("TO_TIMESTAMP", TO_TIMESTAMP, VAR_ARGS, Value.TIMESTAMP, true, true)); + FUNCTIONS.put("TO_TIMESTAMP_TZ", + new FunctionInfo("TO_TIMESTAMP_TZ", TO_TIMESTAMP_TZ, VAR_ARGS, Value.TIMESTAMP_TZ, true, true)); + } + + /** + * Returns mode-specific function for a given name, or {@code null}. + * + * @param upperName + * the upper-case name of a function + * @return the function with specified name or {@code null} + */ + public static FunctionsOracle getFunction(String upperName) { + FunctionInfo info = FUNCTIONS.get(upperName); + return info != null ? new FunctionsOracle(info) : null; + } + + private FunctionsOracle(FunctionInfo info) { + super(info); + } + + @Override + protected void checkParameterCount(int len) { + int min = 0, max = Integer.MAX_VALUE; + switch (info.type) { + case TO_TIMESTAMP: + case TO_TIMESTAMP_TZ: + min = 1; + max = 2; + break; + case TO_DATE: + min = 1; + max = 3; + break; + default: + throw DbException.getInternalError("type=" + info.type); + } + if (len < min || len > max) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, info.name, min + ".." + max); + } + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session); + switch (info.type) { + case SYS_GUID: + type = TypeInfo.getTypeInfo(Value.VARBINARY, 16, 0, null); + break; + default: + type = TypeInfo.getTypeInfo(info.returnDataType); + } + if (allConst) { + return ValueExpression.get(getValue(session)); + } + return this; + } + + @Override + public Value getValue(SessionLocal session) { + Value[] values = getArgumentsValues(session, args); + if (values == null) { + return ValueNull.INSTANCE; + } + Value v0 = getNullOrValue(session, args, values, 0); + Value v1 = getNullOrValue(session, args, values, 1); + Value result; + switch (info.type) { + case ADD_MONTHS: + result = DateTimeFunction.dateadd(session, DateTimeFunction.MONTH, v1.getInt(), v0); + break; + case SYS_GUID: + result = ValueUuid.getNewRandom().convertTo(TypeInfo.TYPE_VARBINARY); + break; + case TO_DATE: + result = ToDateParser.toDate(session, v0.getString(), v1 == null ? null : v1.getString()); + break; + case TO_TIMESTAMP: + result = ToDateParser.toTimestamp(session, v0.getString(), v1 == null ? null : v1.getString()); + break; + case TO_TIMESTAMP_TZ: + result = ToDateParser.toTimestampTz(session, v0.getString(), v1 == null ? null : v1.getString()); + break; + default: + throw DbException.getInternalError("type=" + info.type); + } + return result; + } + +} diff --git a/h2/src/main/org/h2/mode/FunctionsPostgreSQL.java b/h2/src/main/org/h2/mode/FunctionsPostgreSQL.java new file mode 100644 index 0000000000..ad2be4d957 --- /dev/null +++ b/h2/src/main/org/h2/mode/FunctionsPostgreSQL.java @@ -0,0 +1,377 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import java.util.HashMap; +import java.util.StringJoiner; + +import org.h2.api.ErrorCode; +import org.h2.command.Parser; +import org.h2.engine.Constants; +import org.h2.engine.RightOwner; +import org.h2.engine.SessionLocal; +import org.h2.engine.User; +import org.h2.expression.Expression; +import org.h2.expression.ValueExpression; +import org.h2.expression.function.CurrentGeneralValueSpecification; +import org.h2.index.Index; +import org.h2.message.DbException; +import org.h2.schema.Schema; +import org.h2.server.pg.PgServer; +import org.h2.table.Column; +import org.h2.table.Table; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueBigint; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * Functions for {@link org.h2.engine.Mode.ModeEnum#PostgreSQL} compatibility + * mode. + */ +public final class FunctionsPostgreSQL extends ModeFunction { + + private static final int CURRENT_DATABASE = 3001; + + private static final int CURRTID2 = CURRENT_DATABASE + 1; + + private static final int FORMAT_TYPE = CURRTID2 + 1; + + private static final int HAS_DATABASE_PRIVILEGE = FORMAT_TYPE + 1; + + private static final int HAS_SCHEMA_PRIVILEGE = HAS_DATABASE_PRIVILEGE + 1; + + private static final int HAS_TABLE_PRIVILEGE = HAS_SCHEMA_PRIVILEGE + 1; + + private static final int LASTVAL = HAS_TABLE_PRIVILEGE + 1; + + private static final int VERSION = LASTVAL + 1; + + private static final int OBJ_DESCRIPTION = VERSION + 1; + + private static final int PG_ENCODING_TO_CHAR = OBJ_DESCRIPTION + 1; + + private static final int PG_GET_EXPR = PG_ENCODING_TO_CHAR + 1; + + private static final int PG_GET_INDEXDEF = PG_GET_EXPR + 1; + + private static final int PG_GET_USERBYID = PG_GET_INDEXDEF + 1; + + private static final int PG_POSTMASTER_START_TIME = PG_GET_USERBYID + 1; + + private static final int PG_RELATION_SIZE = PG_POSTMASTER_START_TIME + 1; + + private static final int PG_TABLE_IS_VISIBLE = PG_RELATION_SIZE + 1; + + private static final int SET_CONFIG = PG_TABLE_IS_VISIBLE + 1; + + private static final int ARRAY_TO_STRING = SET_CONFIG + 1; + + private static final int PG_STAT_GET_NUMSCANS = ARRAY_TO_STRING + 1; + + private static final int TO_DATE = PG_STAT_GET_NUMSCANS + 1; + + private static final int TO_TIMESTAMP = TO_DATE + 1; + + private static final HashMap FUNCTIONS = new HashMap<>(32); + + static { + FUNCTIONS.put("CURRENT_DATABASE", + new FunctionInfo("CURRENT_DATABASE", CURRENT_DATABASE, 0, Value.VARCHAR, true, false)); + FUNCTIONS.put("CURRTID2", new FunctionInfo("CURRTID2", CURRTID2, 2, Value.INTEGER, true, false)); + FUNCTIONS.put("FORMAT_TYPE", new FunctionInfo("FORMAT_TYPE", FORMAT_TYPE, 2, Value.VARCHAR, false, true)); + FUNCTIONS.put("HAS_DATABASE_PRIVILEGE", new FunctionInfo("HAS_DATABASE_PRIVILEGE", HAS_DATABASE_PRIVILEGE, + VAR_ARGS, Value.BOOLEAN, true, false)); + FUNCTIONS.put("HAS_SCHEMA_PRIVILEGE", + new FunctionInfo("HAS_SCHEMA_PRIVILEGE", HAS_SCHEMA_PRIVILEGE, VAR_ARGS, Value.BOOLEAN, true, false)); + FUNCTIONS.put("HAS_TABLE_PRIVILEGE", + new FunctionInfo("HAS_TABLE_PRIVILEGE", HAS_TABLE_PRIVILEGE, VAR_ARGS, Value.BOOLEAN, true, false)); + FUNCTIONS.put("LASTVAL", new FunctionInfo("LASTVAL", LASTVAL, 0, Value.BIGINT, true, false)); + FUNCTIONS.put("VERSION", new FunctionInfo("VERSION", VERSION, 0, Value.VARCHAR, true, false)); + FUNCTIONS.put("OBJ_DESCRIPTION", + new FunctionInfo("OBJ_DESCRIPTION", OBJ_DESCRIPTION, VAR_ARGS, Value.VARCHAR, true, false)); + FUNCTIONS.put("PG_ENCODING_TO_CHAR", + new FunctionInfo("PG_ENCODING_TO_CHAR", PG_ENCODING_TO_CHAR, 1, Value.VARCHAR, true, true)); + FUNCTIONS.put("PG_GET_EXPR", // + new FunctionInfo("PG_GET_EXPR", PG_GET_EXPR, VAR_ARGS, Value.VARCHAR, true, true)); + FUNCTIONS.put("PG_GET_INDEXDEF", + new FunctionInfo("PG_GET_INDEXDEF", PG_GET_INDEXDEF, VAR_ARGS, Value.VARCHAR, true, false)); + FUNCTIONS.put("PG_GET_USERBYID", + new FunctionInfo("PG_GET_USERBYID", PG_GET_USERBYID, 1, Value.VARCHAR, true, false)); + FUNCTIONS.put("PG_POSTMASTER_START_TIME", // + new FunctionInfo("PG_POSTMASTER_START_TIME", PG_POSTMASTER_START_TIME, 0, Value.TIMESTAMP_TZ, true, + false)); + FUNCTIONS.put("PG_RELATION_SIZE", + new FunctionInfo("PG_RELATION_SIZE", PG_RELATION_SIZE, VAR_ARGS, Value.BIGINT, true, false)); + FUNCTIONS.put("PG_TABLE_IS_VISIBLE", + new FunctionInfo("PG_TABLE_IS_VISIBLE", PG_TABLE_IS_VISIBLE, 1, Value.BOOLEAN, true, false)); + FUNCTIONS.put("SET_CONFIG", new FunctionInfo("SET_CONFIG", SET_CONFIG, 3, Value.VARCHAR, true, false)); + FUNCTIONS.put("ARRAY_TO_STRING", + new FunctionInfo("ARRAY_TO_STRING", ARRAY_TO_STRING, VAR_ARGS, Value.VARCHAR, false, true)); + FUNCTIONS.put("PG_STAT_GET_NUMSCANS", + new FunctionInfo("PG_STAT_GET_NUMSCANS", PG_STAT_GET_NUMSCANS, 1, Value.INTEGER, true, true)); + FUNCTIONS.put("TO_DATE", new FunctionInfo("TO_DATE", TO_DATE, 2, Value.DATE, true, true)); + FUNCTIONS.put("TO_TIMESTAMP", + new FunctionInfo("TO_TIMESTAMP", TO_TIMESTAMP, 2, Value.TIMESTAMP_TZ, true, true)); + + } + + /** + * Returns mode-specific function for a given name, or {@code null}. + * + * @param upperName + * the upper-case name of a function + * @return the function with specified name or {@code null} + */ + public static FunctionsPostgreSQL getFunction(String upperName) { + FunctionInfo info = FUNCTIONS.get(upperName); + if (info != null) { + return new FunctionsPostgreSQL(info); + } + return null; + } + + private FunctionsPostgreSQL(FunctionInfo info) { + super(info); + } + + @Override + protected void checkParameterCount(int len) { + int min, max; + switch (info.type) { + case HAS_DATABASE_PRIVILEGE: + case HAS_SCHEMA_PRIVILEGE: + case HAS_TABLE_PRIVILEGE: + min = 2; + max = 3; + break; + case OBJ_DESCRIPTION: + case PG_RELATION_SIZE: + min = 1; + max = 2; + break; + case PG_GET_INDEXDEF: + if (len != 1 && len != 3) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, info.name, "1, 3"); + } + return; + case PG_GET_EXPR: + case ARRAY_TO_STRING: + min = 2; + max = 3; + break; + default: + throw DbException.getInternalError("type=" + info.type); + } + if (len < min || len > max) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, info.name, min + ".." + max); + } + } + + @Override + public Expression optimize(SessionLocal session) { + switch (info.type) { + case CURRENT_DATABASE: + return new CurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_CATALOG) + .optimize(session); + default: + boolean allConst = optimizeArguments(session); + type = TypeInfo.getTypeInfo(info.returnDataType); + if (allConst) { + return ValueExpression.get(getValue(session)); + } + } + return this; + } + + @Override + public Value getValue(SessionLocal session) { + Value[] values = getArgumentsValues(session, args); + if (values == null) { + return ValueNull.INSTANCE; + } + Value v0 = getNullOrValue(session, args, values, 0); + Value v1 = getNullOrValue(session, args, values, 1); + Value v2 = getNullOrValue(session, args, values, 2); + Value result; + switch (info.type) { + case CURRTID2: + // Not implemented + result = ValueInteger.get(1); + break; + case FORMAT_TYPE: + result = v0 != ValueNull.INSTANCE ? ValueVarchar.get(PgServer.formatType(v0.getInt())) // + : ValueNull.INSTANCE; + break; + case HAS_DATABASE_PRIVILEGE: + case HAS_SCHEMA_PRIVILEGE: + case HAS_TABLE_PRIVILEGE: + case PG_TABLE_IS_VISIBLE: + // Not implemented + result = ValueBoolean.TRUE; + break; + case LASTVAL: + result = session.getLastIdentity(); + if (result == ValueNull.INSTANCE) { + throw DbException.get(ErrorCode.CURRENT_SEQUENCE_VALUE_IS_NOT_DEFINED_IN_SESSION_1, "lastval()"); + } + result = result.convertToBigint(null); + break; + case VERSION: + result = ValueVarchar + .get("PostgreSQL " + Constants.PG_VERSION + " server protocol using H2 " + Constants.FULL_VERSION); + break; + case OBJ_DESCRIPTION: + // Not implemented + result = ValueNull.INSTANCE; + break; + case PG_ENCODING_TO_CHAR: + result = ValueVarchar.get(encodingToChar(v0.getInt())); + break; + case PG_GET_EXPR: + // Not implemented + result = ValueNull.INSTANCE; + break; + case PG_GET_INDEXDEF: + result = getIndexdef(session, v0.getInt(), v1, v2); + break; + case PG_GET_USERBYID: + result = ValueVarchar.get(getUserbyid(session, v0.getInt())); + break; + case PG_POSTMASTER_START_TIME: + result = session.getDatabase().getSystemSession().getSessionStart(); + break; + case PG_RELATION_SIZE: + // Optional second argument is ignored + result = relationSize(session, v0); + break; + case SET_CONFIG: + // Not implemented + result = v1.convertTo(Value.VARCHAR); + break; + case ARRAY_TO_STRING: + if (v0 == ValueNull.INSTANCE || v1 == ValueNull.INSTANCE) { + result = ValueNull.INSTANCE; + break; + } + StringJoiner joiner = new StringJoiner(v1.getString()); + if (v0.getValueType() != Value.ARRAY) { + throw DbException.getInvalidValueException("ARRAY_TO_STRING array", v0); + } + String nullString = null; + if (v2 != null) { + nullString = v2.getString(); + } + for (Value v : ((ValueArray) v0).getList()) { + if (v != ValueNull.INSTANCE) { + joiner.add(v.getString()); + } else if (nullString != null) { + joiner.add(nullString); + } + } + result = ValueVarchar.get(joiner.toString()); + break; + case PG_STAT_GET_NUMSCANS: + // Not implemented + result = ValueInteger.get(0); + break; + case TO_DATE: + result = ToDateParser.toDate(session, v0.getString(), v1.getString()).convertToDate(session); + break; + case TO_TIMESTAMP: + result = ToDateParser.toTimestampTz(session, v0.getString(), v1.getString()); + break; + default: + throw DbException.getInternalError("type=" + info.type); + } + return result; + } + + private static String encodingToChar(int code) { + switch (code) { + case 0: + return "SQL_ASCII"; + case 6: + return "UTF8"; + case 8: + return "LATIN1"; + default: + // This function returns empty string for unknown encodings + return code < 40 ? "UTF8" : ""; + } + } + + private static Value getIndexdef(SessionLocal session, int indexId, Value ordinalPosition, Value pretty) { + for (Schema schema : session.getDatabase().getAllSchemasNoMeta()) { + for (Index index : schema.getAllIndexes()) { + if (index.getId() == indexId) { + if (!index.getTable().isHidden()) { + int ordinal; + if (ordinalPosition == null || (ordinal = ordinalPosition.getInt()) == 0) { + return ValueVarchar.get(index.getCreateSQL()); + } + Column[] columns; + if (ordinal >= 1 && ordinal <= (columns = index.getColumns()).length) { + return ValueVarchar.get(columns[ordinal - 1].getName()); + } + } + break; + } + } + } + return ValueNull.INSTANCE; + } + + private static String getUserbyid(SessionLocal session, int uid) { + User u = session.getUser(); + String name; + search: { + if (u.getId() == uid) { + name = u.getName(); + break search; + } else { + if (u.isAdmin()) { + for (RightOwner rightOwner : session.getDatabase().getAllUsersAndRoles()) { + if (rightOwner.getId() == uid) { + name = rightOwner.getName(); + break search; + } + } + } + } + return "unknown (OID=" + uid + ')'; + } + if (session.getDatabase().getSettings().databaseToLower) { + name = StringUtils.toLowerEnglish(name); + } + return name; + } + + private static Value relationSize(SessionLocal session, Value tableOidOrName) { + Table t; + if (tableOidOrName.getValueType() == Value.INTEGER) { + int tid = tableOidOrName.getInt(); + for (Schema schema : session.getDatabase().getAllSchemasNoMeta()) { + for (Table table : schema.getAllTablesAndViews(session)) { + if (tid == table.getId()) { + t = table; + break; + } + } + } + return ValueNull.INSTANCE; + } else { + t = new Parser(session).parseTableName(tableOidOrName.getString()); + } + return ValueBigint.get(t.getDiskSpaceUsed()); + } + +} diff --git a/h2/src/main/org/h2/mode/ModeFunction.java b/h2/src/main/org/h2/mode/ModeFunction.java new file mode 100644 index 0000000000..59f212242e --- /dev/null +++ b/h2/src/main/org/h2/mode/ModeFunction.java @@ -0,0 +1,184 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import org.h2.api.ErrorCode; +import org.h2.engine.Database; +import org.h2.engine.Mode.ModeEnum; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.function.FunctionN; +import org.h2.message.DbException; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * Base class for mode-specific functions. + */ +public abstract class ModeFunction extends FunctionN { + + /** + * Constant for variable number of arguments. + */ + protected static final int VAR_ARGS = -1; + + /** + * The information about this function. + */ + protected final FunctionInfo info; + + /** + * Get an instance of the given function for this database. + * If no function with this name is found, null is returned. + * + * @param database the database + * @param name the upper case function name + * @return the function object or null + */ + public static ModeFunction getFunction(Database database, String name) { + ModeEnum modeEnum = database.getMode().getEnum(); + if (modeEnum != ModeEnum.REGULAR) { + return getCompatibilityModeFunction(name, modeEnum); + } + return null; + } + + private static ModeFunction getCompatibilityModeFunction(String name, ModeEnum modeEnum) { + switch (modeEnum) { + case LEGACY: + return FunctionsLegacy.getFunction(name); + case DB2: + case Derby: + return FunctionsDB2Derby.getFunction(name); + case MSSQLServer: + return FunctionsMSSQLServer.getFunction(name); + case MySQL: + return FunctionsMySQL.getFunction(name); + case Oracle: + return FunctionsOracle.getFunction(name); + case PostgreSQL: + return FunctionsPostgreSQL.getFunction(name); + default: + return null; + } + } + + + /** + * Creates a new instance of function. + * + * @param info function information + */ + ModeFunction(FunctionInfo info) { + super(new Expression[info.parameterCount != VAR_ARGS ? info.parameterCount : 4]); + this.info = info; + } + + /** + * Get value transformed by expression, or null if i is out of range or + * the input value is null. + * + * @param session database session + * @param args expressions + * @param values array of input values + * @param i index of value of transform + * @return value or null + */ + static Value getNullOrValue(SessionLocal session, Expression[] args, + Value[] values, int i) { + if (i >= args.length) { + return null; + } + Value v = values[i]; + if (v == null) { + Expression e = args[i]; + if (e == null) { + return null; + } + v = values[i] = e.getValue(session); + } + return v; + } + + /** + * Gets values of arguments and checks them for NULL values if function + * returns NULL on NULL argument. + * + * @param session + * the session + * @param args + * the arguments + * @return the values, or {@code null} if function should return NULL due to + * NULL argument + */ + final Value[] getArgumentsValues(SessionLocal session, Expression[] args) { + Value[] values = new Value[args.length]; + if (info.nullIfParameterIsNull) { + for (int i = 0, l = args.length; i < l; i++) { + Value v = args[i].getValue(session); + if (v == ValueNull.INSTANCE) { + return null; + } + values[i] = v; + } + } + return values; + } + + /** + * Check if the parameter count is correct. + * + * @param len the number of parameters set + * @throws DbException if the parameter count is incorrect + */ + void checkParameterCount(int len) { + throw DbException.getInternalError("type=" + info.type); + } + + @Override + public void doneWithParameters() { + int count = info.parameterCount; + if (count == VAR_ARGS) { + checkParameterCount(argsCount); + super.doneWithParameters(); + } else if (count != argsCount) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, info.name, Integer.toString(argsCount)); + } + } + + /** + * Optimizes arguments. + * + * @param session + * the session + * @return whether all arguments are constants and function is deterministic + */ + final boolean optimizeArguments(SessionLocal session) { + return optimizeArguments(session, info.deterministic); + } + + @Override + public String getName() { + return info.name; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + if (!super.isEverything(visitor)) { + return false; + } + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + case ExpressionVisitor.QUERY_COMPARABLE: + case ExpressionVisitor.READONLY: + return info.deterministic; + default: + return true; + } + } + +} diff --git a/h2/src/main/org/h2/mode/OnDuplicateKeyValues.java b/h2/src/main/org/h2/mode/OnDuplicateKeyValues.java new file mode 100644 index 0000000000..44c245682b --- /dev/null +++ b/h2/src/main/org/h2/mode/OnDuplicateKeyValues.java @@ -0,0 +1,64 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import org.h2.command.dml.Update; +import org.h2.engine.SessionLocal; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Operation0; +import org.h2.message.DbException; +import org.h2.table.Column; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * VALUES(column) function for ON DUPLICATE KEY UPDATE clause. + */ +public final class OnDuplicateKeyValues extends Operation0 { + + private final Column column; + + private final Update update; + + public OnDuplicateKeyValues(Column column, Update update) { + this.column = column; + this.update = update; + } + + @Override + public Value getValue(SessionLocal session) { + Value v = update.getOnDuplicateKeyInsert().getOnDuplicateKeyValue(column.getColumnId()); + if (v == null) { + throw DbException.getUnsupportedException(getTraceSQL()); + } + return v; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return column.getSQL(builder.append("VALUES("), sqlFlags).append(')'); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + return false; + } + return true; + } + + @Override + public TypeInfo getType() { + return column.getType(); + } + + @Override + public int getCost() { + return 1; + } + +} diff --git a/h2/src/main/org/h2/mode/PgCatalogSchema.java b/h2/src/main/org/h2/mode/PgCatalogSchema.java new file mode 100644 index 0000000000..e88f20ac54 --- /dev/null +++ b/h2/src/main/org/h2/mode/PgCatalogSchema.java @@ -0,0 +1,59 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import java.util.HashMap; +import java.util.Map; + +import org.h2.engine.Constants; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.engine.User; +import org.h2.schema.MetaSchema; +import org.h2.table.Table; + +/** + * {@code pg_catalog} schema. + */ +public final class PgCatalogSchema extends MetaSchema { + + private volatile HashMap tables; + + /** + * Creates new instance of {@code pg_catalog} schema. + * + * @param database + * the database + * @param owner + * the owner of the schema (system user) + */ + public PgCatalogSchema(Database database, User owner) { + super(database, Constants.PG_CATALOG_SCHEMA_ID, database.sysIdentifier(Constants.SCHEMA_PG_CATALOG), owner); + } + + @Override + protected Map getMap(SessionLocal session) { + HashMap map = tables; + if (map == null) { + map = fillMap(); + } + return map; + } + + private synchronized HashMap fillMap() { + HashMap map = tables; + if (map == null) { + map = database.newStringMap(); + for (int type = 0; type < PgCatalogTable.META_TABLE_TYPE_COUNT; type++) { + PgCatalogTable table = new PgCatalogTable(this, Constants.PG_CATALOG_SCHEMA_ID - type, type); + map.put(table.getName(), table); + } + tables = map; + } + return map; + } + +} diff --git a/h2/src/main/org/h2/mode/PgCatalogTable.java b/h2/src/main/org/h2/mode/PgCatalogTable.java new file mode 100644 index 0000000000..161da669a1 --- /dev/null +++ b/h2/src/main/org/h2/mode/PgCatalogTable.java @@ -0,0 +1,721 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; + +import org.h2.constraint.Constraint; +import org.h2.engine.Constants; +import org.h2.engine.RightOwner; +import org.h2.engine.SessionLocal; +import org.h2.engine.User; +import org.h2.index.Index; +import org.h2.message.DbException; +import org.h2.result.Row; +import org.h2.result.SearchRow; +import org.h2.schema.Schema; +import org.h2.schema.TriggerObject; +import org.h2.server.pg.PgServer; +import org.h2.table.Column; +import org.h2.table.MetaTable; +import org.h2.table.Table; +import org.h2.util.StringUtils; +import org.h2.util.Utils; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueDouble; +import org.h2.value.ValueInteger; +import org.h2.value.ValueSmallint; + +/** + * This class is responsible to build the pg_catalog tables. + */ +public final class PgCatalogTable extends MetaTable { + + private static final int PG_AM = 0; + + private static final int PG_ATTRDEF = PG_AM + 1; + + private static final int PG_ATTRIBUTE = PG_ATTRDEF + 1; + + private static final int PG_AUTHID = PG_ATTRIBUTE + 1; + + private static final int PG_CLASS = PG_AUTHID + 1; + + private static final int PG_CONSTRAINT = PG_CLASS + 1; + + private static final int PG_DATABASE = PG_CONSTRAINT + 1; + + private static final int PG_DESCRIPTION = PG_DATABASE + 1; + + private static final int PG_GROUP = PG_DESCRIPTION + 1; + + private static final int PG_INDEX = PG_GROUP + 1; + + private static final int PG_INHERITS = PG_INDEX + 1; + + private static final int PG_NAMESPACE = PG_INHERITS + 1; + + private static final int PG_PROC = PG_NAMESPACE + 1; + + private static final int PG_ROLES = PG_PROC + 1; + + private static final int PG_SETTINGS = PG_ROLES + 1; + + private static final int PG_TABLESPACE = PG_SETTINGS + 1; + + private static final int PG_TRIGGER = PG_TABLESPACE + 1; + + private static final int PG_TYPE = PG_TRIGGER + 1; + + private static final int PG_USER = PG_TYPE + 1; + + /** + * The number of meta table types. Supported meta table types are + * {@code 0..META_TABLE_TYPE_COUNT - 1}. + */ + public static final int META_TABLE_TYPE_COUNT = PG_USER + 1; + + private static final Object[][] PG_EXTRA_TYPES = { + { 18, "char", 1, 0 }, + { 19, "name", 64, 18 }, + { 22, "int2vector", -1, 21 }, + { 24, "regproc", 4, 0 }, + { PgServer.PG_TYPE_INT2_ARRAY, "_int2", -1, PgServer.PG_TYPE_INT2 }, + { PgServer.PG_TYPE_INT4_ARRAY, "_int4", -1, PgServer.PG_TYPE_INT4 }, + { PgServer.PG_TYPE_VARCHAR_ARRAY, "_varchar", -1, PgServer.PG_TYPE_VARCHAR }, + { 2205, "regclass", 4, 0 }, + }; + + /** + * Create a new metadata table. + * + * @param schema + * the schema + * @param id + * the object id + * @param type + * the meta table type + */ + public PgCatalogTable(Schema schema, int id, int type) { + super(schema, id, type); + Column[] cols; + switch (type) { + case PG_AM: + setMetaTableName("PG_AM"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("AMNAME", TypeInfo.TYPE_VARCHAR), // + }; + break; + case PG_ATTRDEF: + setMetaTableName("PG_ATTRDEF"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("ADSRC", TypeInfo.TYPE_INTEGER), // + column("ADRELID", TypeInfo.TYPE_INTEGER), // + column("ADNUM", TypeInfo.TYPE_INTEGER), // + column("ADBIN", TypeInfo.TYPE_VARCHAR), // pg_node_tree + }; + break; + case PG_ATTRIBUTE: + setMetaTableName("PG_ATTRIBUTE"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("ATTRELID", TypeInfo.TYPE_INTEGER), // + column("ATTNAME", TypeInfo.TYPE_VARCHAR), // + column("ATTTYPID", TypeInfo.TYPE_INTEGER), // + column("ATTLEN", TypeInfo.TYPE_INTEGER), // + column("ATTNUM", TypeInfo.TYPE_INTEGER), // + column("ATTTYPMOD", TypeInfo.TYPE_INTEGER), // + column("ATTNOTNULL", TypeInfo.TYPE_BOOLEAN), // + column("ATTISDROPPED", TypeInfo.TYPE_BOOLEAN), // + column("ATTHASDEF", TypeInfo.TYPE_BOOLEAN), // + }; + break; + case PG_AUTHID: + setMetaTableName("PG_AUTHID"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("ROLNAME", TypeInfo.TYPE_VARCHAR), // + column("ROLSUPER", TypeInfo.TYPE_BOOLEAN), // + column("ROLINHERIT", TypeInfo.TYPE_BOOLEAN), // + column("ROLCREATEROLE", TypeInfo.TYPE_BOOLEAN), // + column("ROLCREATEDB", TypeInfo.TYPE_BOOLEAN), // + column("ROLCATUPDATE", TypeInfo.TYPE_BOOLEAN), // + column("ROLCANLOGIN", TypeInfo.TYPE_BOOLEAN), // + column("ROLCONNLIMIT", TypeInfo.TYPE_BOOLEAN), // + column("ROLPASSWORD", TypeInfo.TYPE_BOOLEAN), // + column("ROLVALIDUNTIL", TypeInfo.TYPE_TIMESTAMP_TZ), // + column("ROLCONFIG", TypeInfo.getTypeInfo(Value.ARRAY, -1L, 0, TypeInfo.TYPE_VARCHAR)), // + }; + break; + case PG_CLASS: + setMetaTableName("PG_CLASS"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("RELNAME", TypeInfo.TYPE_VARCHAR), // + column("RELNAMESPACE", TypeInfo.TYPE_INTEGER), // + column("RELKIND", TypeInfo.TYPE_CHAR), // + column("RELAM", TypeInfo.TYPE_INTEGER), // + column("RELTUPLES", TypeInfo.TYPE_DOUBLE), // + column("RELTABLESPACE", TypeInfo.TYPE_INTEGER), // + column("RELPAGES", TypeInfo.TYPE_INTEGER), // + column("RELHASINDEX", TypeInfo.TYPE_BOOLEAN), // + column("RELHASRULES", TypeInfo.TYPE_BOOLEAN), // + column("RELHASOIDS", TypeInfo.TYPE_BOOLEAN), // + column("RELCHECKS", TypeInfo.TYPE_SMALLINT), // + column("RELTRIGGERS", TypeInfo.TYPE_INTEGER), // + }; + break; + case PG_CONSTRAINT: + setMetaTableName("PG_CONSTRAINT"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("CONNAME", TypeInfo.TYPE_VARCHAR), // + column("CONTYPE", TypeInfo.TYPE_VARCHAR), // + column("CONRELID", TypeInfo.TYPE_INTEGER), // + column("CONFRELID", TypeInfo.TYPE_INTEGER), // + column("CONKEY", TypeInfo.getTypeInfo(Value.ARRAY, -1L, 0, TypeInfo.TYPE_SMALLINT)), // + }; + break; + case PG_DATABASE: + setMetaTableName("PG_DATABASE"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("DATNAME", TypeInfo.TYPE_VARCHAR), // + column("ENCODING", TypeInfo.TYPE_INTEGER), // + column("DATLASTSYSOID", TypeInfo.TYPE_INTEGER), // + column("DATALLOWCONN", TypeInfo.TYPE_BOOLEAN), // + column("DATCONFIG", TypeInfo.getTypeInfo(Value.ARRAY, -1L, 0, TypeInfo.TYPE_VARCHAR)), // + column("DATACL", TypeInfo.getTypeInfo(Value.ARRAY, -1L, 0, TypeInfo.TYPE_VARCHAR)), // aclitem[] + column("DATDBA", TypeInfo.TYPE_INTEGER), // + column("DATTABLESPACE", TypeInfo.TYPE_INTEGER), // + }; + break; + case PG_DESCRIPTION: + setMetaTableName("PG_DESCRIPTION"); + cols = new Column[] { // + column("OBJOID", TypeInfo.TYPE_INTEGER), // + column("OBJSUBID", TypeInfo.TYPE_INTEGER), // + column("CLASSOID", TypeInfo.TYPE_INTEGER), // + column("DESCRIPTION", TypeInfo.TYPE_VARCHAR), // + }; + break; + case PG_GROUP: + setMetaTableName("PG_GROUP"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("GRONAME", TypeInfo.TYPE_VARCHAR), // + }; + break; + case PG_INDEX: + setMetaTableName("PG_INDEX"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("INDEXRELID", TypeInfo.TYPE_INTEGER), // + column("INDRELID", TypeInfo.TYPE_INTEGER), // + column("INDISCLUSTERED", TypeInfo.TYPE_BOOLEAN), // + column("INDISUNIQUE", TypeInfo.TYPE_BOOLEAN), // + column("INDISPRIMARY", TypeInfo.TYPE_BOOLEAN), // + column("INDEXPRS", TypeInfo.TYPE_VARCHAR), // + column("INDKEY", TypeInfo.getTypeInfo(Value.ARRAY, -1L, 0, TypeInfo.TYPE_INTEGER)), // + column("INDPRED", TypeInfo.TYPE_VARCHAR), // pg_node_tree + }; + break; + case PG_INHERITS: + setMetaTableName("PG_INHERITS"); + cols = new Column[] { // + column("INHRELID", TypeInfo.TYPE_INTEGER), // + column("INHPARENT", TypeInfo.TYPE_INTEGER), // + column("INHSEQNO", TypeInfo.TYPE_INTEGER), // + }; + break; + case PG_NAMESPACE: + setMetaTableName("PG_NAMESPACE"); + cols = new Column[] { // + column("ID", TypeInfo.TYPE_INTEGER), // + column("NSPNAME", TypeInfo.TYPE_VARCHAR), // + }; + break; + case PG_PROC: + setMetaTableName("PG_PROC"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("PRONAME", TypeInfo.TYPE_VARCHAR), // + column("PRORETTYPE", TypeInfo.TYPE_INTEGER), // + column("PROARGTYPES", TypeInfo.getTypeInfo(Value.ARRAY, -1L, 0, TypeInfo.TYPE_INTEGER)), // + column("PRONAMESPACE", TypeInfo.TYPE_INTEGER), // + }; + break; + case PG_ROLES: + setMetaTableName("PG_ROLES"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("ROLNAME", TypeInfo.TYPE_VARCHAR), // + column("ROLSUPER", TypeInfo.TYPE_CHAR), // + column("ROLCREATEROLE", TypeInfo.TYPE_CHAR), // + column("ROLCREATEDB", TypeInfo.TYPE_CHAR), // + }; + break; + case PG_SETTINGS: + setMetaTableName("PG_SETTINGS"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("NAME", TypeInfo.TYPE_VARCHAR), // + column("SETTING", TypeInfo.TYPE_VARCHAR), // + }; + break; + case PG_TABLESPACE: + setMetaTableName("PG_TABLESPACE"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("SPCNAME", TypeInfo.TYPE_VARCHAR), // + column("SPCLOCATION", TypeInfo.TYPE_VARCHAR), // + column("SPCOWNER", TypeInfo.TYPE_INTEGER), // + column("SPCACL", TypeInfo.getTypeInfo(Value.ARRAY, -1L, 0, TypeInfo.TYPE_VARCHAR)), // ACLITEM[] + }; + break; + case PG_TRIGGER: + setMetaTableName("PG_TRIGGER"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("TGCONSTRRELID", TypeInfo.TYPE_INTEGER), // + column("TGFOID", TypeInfo.TYPE_INTEGER), // + column("TGARGS", TypeInfo.TYPE_INTEGER), // + column("TGNARGS", TypeInfo.TYPE_INTEGER), // + column("TGDEFERRABLE", TypeInfo.TYPE_BOOLEAN), // + column("TGINITDEFERRED", TypeInfo.TYPE_BOOLEAN), // + column("TGCONSTRNAME", TypeInfo.TYPE_VARCHAR), // + column("TGRELID", TypeInfo.TYPE_INTEGER), // + }; + break; + case PG_TYPE: + setMetaTableName("PG_TYPE"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("TYPNAME", TypeInfo.TYPE_VARCHAR), // + column("TYPNAMESPACE", TypeInfo.TYPE_INTEGER), // + column("TYPLEN", TypeInfo.TYPE_INTEGER), // + column("TYPTYPE", TypeInfo.TYPE_VARCHAR), // + column("TYPDELIM", TypeInfo.TYPE_VARCHAR), // + column("TYPRELID", TypeInfo.TYPE_INTEGER), // + column("TYPELEM", TypeInfo.TYPE_INTEGER), // + column("TYPBASETYPE", TypeInfo.TYPE_INTEGER), // + column("TYPTYPMOD", TypeInfo.TYPE_INTEGER), // + column("TYPNOTNULL", TypeInfo.TYPE_BOOLEAN), // + column("TYPINPUT", TypeInfo.TYPE_VARCHAR), // + }; + break; + case PG_USER: + setMetaTableName("PG_USER"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("USENAME", TypeInfo.TYPE_VARCHAR), // + column("USECREATEDB", TypeInfo.TYPE_BOOLEAN), // + column("USESUPER", TypeInfo.TYPE_BOOLEAN), // + }; + break; + default: + throw DbException.getInternalError("type=" + type); + } + setColumns(cols); + indexColumn = -1; + metaIndex = null; + } + + @Override + public ArrayList generateRows(SessionLocal session, SearchRow first, SearchRow last) { + ArrayList rows = Utils.newSmallArrayList(); + String catalog = database.getShortName(); + boolean admin = session.getUser().isAdmin(); + switch (type) { + case PG_AM: { + String[] am = { "btree", "hash" }; + for (int i = 0, l = am.length; i < l; i++) { + add(session, rows, + // OID + ValueInteger.get(i), + // AMNAME + am[i]); + } + break; + } + case PG_ATTRDEF: + break; + case PG_ATTRIBUTE: + for (Schema schema : database.getAllSchemas()) { + for (Table table : schema.getAllTablesAndViews(session)) { + if (!hideTable(table, session)) { + pgAttribute(session, rows, table); + } + } + } + for (Table table: session.getLocalTempTables()) { + if (!hideTable(table, session)) { + pgAttribute(session, rows, table); + } + } + break; + case PG_AUTHID: + break; + case PG_CLASS: + for (Schema schema : database.getAllSchemas()) { + for (Table table : schema.getAllTablesAndViews(session)) { + if (!hideTable(table, session)) { + pgClass(session, rows, table); + } + } + } + for (Table table: session.getLocalTempTables()) { + if (!hideTable(table, session)) { + pgClass(session, rows, table); + } + } + break; + case PG_CONSTRAINT: + pgConstraint(session, rows); + break; + case PG_DATABASE: { + int uid = Integer.MAX_VALUE; + for (RightOwner rightOwner : database.getAllUsersAndRoles()) { + if (rightOwner instanceof User && ((User) rightOwner).isAdmin()) { + int id = rightOwner.getId(); + if (id < uid) { + uid = id; + } + } + } + add(session, rows, + // OID + ValueInteger.get(100_001), + // DATNAME + catalog, + // ENCODING INT, + ValueInteger.get(6), // UTF-8 + // DATLASTSYSOID INT, + ValueInteger.get(100_000), + // DATALLOWCONN BOOLEAN, + ValueBoolean.TRUE, + // DATCONFIG ARRAY, -- TEXT[] + null, + // DATACL ARRAY, -- ACLITEM[] + null, + // DATDBA INT, + ValueInteger.get(uid), + // DATTABLESPACE INT + ValueInteger.get(0)); + break; + } + case PG_DESCRIPTION: + add(session, rows, + // OBJOID + ValueInteger.get(0), + // OBJSUBID + ValueInteger.get(0), + // CLASSOID + ValueInteger.get(-1), + // DESCRIPTION + catalog); + break; + case PG_GROUP: + // The next one returns no rows due to MS Access problem opening + // tables with primary key + case PG_INDEX: + case PG_INHERITS: + break; + case PG_NAMESPACE: + for (Schema schema : database.getAllSchemas()) { + add(session, rows, + // ID + ValueInteger.get(schema.getId()), + // NSPNAME + schema.getName()); + } + break; + case PG_PROC: + break; + case PG_ROLES: + for (RightOwner rightOwner : database.getAllUsersAndRoles()) { + if (admin || session.getUser() == rightOwner) { + String r = rightOwner instanceof User && ((User) rightOwner).isAdmin() ? "t" : "f"; + add(session, rows, + // OID + ValueInteger.get(rightOwner.getId()), + // ROLNAME + identifier(rightOwner.getName()), + // ROLSUPER + r, + // ROLCREATEROLE + r, + // ROLCREATEDB; + r); + } + } + break; + case PG_SETTINGS: { + String[][] settings = { { "autovacuum", "on" }, { "stats_start_collector", "on" }, + { "stats_row_level", "on" } }; + for (int i = 0, l = settings.length; i < l; i++) { + String[] setting = settings[i]; + add(session, rows, + // OID + ValueInteger.get(i), + // NAME + setting[0], + // SETTING + setting[1]); + } + break; + } + case PG_TABLESPACE: + add(session, rows, + // OID INTEGER + ValueInteger.get(0), + // SPCNAME + "main", + // SPCLOCATION + "?", + // SPCOWNER + ValueInteger.get(0), + // SPCACL + null); + break; + case PG_TRIGGER: + break; + case PG_TYPE: { + HashSet types = new HashSet<>(); + for (int i = 1, l = Value.TYPE_COUNT; i < l; i++) { + DataType t = DataType.getDataType(i); + if (t.type == Value.ARRAY) { + continue; + } + int pgType = PgServer.convertType(TypeInfo.getTypeInfo(t.type)); + if (pgType == PgServer.PG_TYPE_UNKNOWN || !types.add(pgType)) { + continue; + } + add(session, rows, + // OID + ValueInteger.get(pgType), + // TYPNAME + Value.getTypeName(t.type), + // TYPNAMESPACE + ValueInteger.get(Constants.PG_CATALOG_SCHEMA_ID), + // TYPLEN + ValueInteger.get(-1), + // TYPTYPE + "b", + // TYPDELIM + ",", + // TYPRELID + ValueInteger.get(0), + // TYPELEM + ValueInteger.get(0), + // TYPBASETYPE + ValueInteger.get(0), + // TYPTYPMOD + ValueInteger.get(-1), + // TYPNOTNULL + ValueBoolean.FALSE, + // TYPINPUT + null); + } + for (Object[] pgType : PG_EXTRA_TYPES) { + add(session, rows, + // OID + ValueInteger.get((int) pgType[0]), + // TYPNAME + pgType[1], + // TYPNAMESPACE + ValueInteger.get(Constants.PG_CATALOG_SCHEMA_ID), + // TYPLEN + ValueInteger.get((int) pgType[2]), + // TYPTYPE + "b", + // TYPDELIM + ",", + // TYPRELID + ValueInteger.get(0), + // TYPELEM + ValueInteger.get((int) pgType[3]), + // TYPBASETYPE + ValueInteger.get(0), + // TYPTYPMOD + ValueInteger.get(-1), + // TYPNOTNULL + ValueBoolean.FALSE, + // TYPINPUT + null); + } + break; + } + case PG_USER: + for (RightOwner rightOwner : database.getAllUsersAndRoles()) { + if (rightOwner instanceof User) { + User u = (User) rightOwner; + if (admin || session.getUser() == u) { + ValueBoolean r = ValueBoolean.get(u.isAdmin()); + add(session, rows, + // OID + ValueInteger.get(u.getId()), + // USENAME + identifier(u.getName()), + // USECREATEDB + r, + // USESUPER; + r); + } + } + } + break; + default: + throw DbException.getInternalError("type=" + type); + } + return rows; + + } + + private void pgAttribute(SessionLocal session, ArrayList rows, Table table) { + Column[] cols = table.getColumns(); + int tableId = table.getId(); + for (int i = 0; i < cols.length;) { + Column column = cols[i++]; + addAttribute(session, rows, tableId * 10_000 + i, tableId, table, column, i); + } + for (Index index : table.getIndexes()) { + if (index.getCreateSQL() == null) { + continue; + } + cols = index.getColumns(); + for (int i = 0; i < cols.length;) { + Column column = cols[i++]; + int indexId = index.getId(); + addAttribute(session, rows, 1_000_000 * indexId + tableId * 10_000 + i, indexId, table, column, + i); + } + } + } + + private void pgClass(SessionLocal session, ArrayList rows, Table table) { + ArrayList triggers = table.getTriggers(); + addClass(session, rows, table.getId(), table.getName(), table.getSchema().getId(), + table.isView() ? "v" : "r", false, triggers != null ? triggers.size() : 0); + ArrayList indexes = table.getIndexes(); + if (indexes != null) { + for (Index index : indexes) { + if (index.getCreateSQL() == null) { + continue; + } + addClass(session, rows, index.getId(), index.getName(), index.getSchema().getId(), "i", true, + 0); + } + } + } + + private void pgConstraint(SessionLocal session, ArrayList rows) { + for (Schema schema : database.getAllSchemasNoMeta()) { + for (Constraint constraint : schema.getAllConstraints()) { + Constraint.Type constraintType = constraint.getConstraintType(); + if (constraintType == Constraint.Type.DOMAIN) { + continue; + } + Table table = constraint.getTable(); + if (hideTable(table, session)) { + continue; + } + List conkey = new ArrayList<>(); + for (Column column : constraint.getReferencedColumns(table)) { + conkey.add(ValueSmallint.get((short) (column.getColumnId() + 1))); + } + Table refTable = constraint.getRefTable(); + add(session, + rows, + // OID + ValueInteger.get(constraint.getId()), + // CONNAME + constraint.getName(), + // CONTYPE + StringUtils.toLowerEnglish(constraintType.getSqlName().substring(0, 1)), + // CONRELID + ValueInteger.get(table.getId()), + // CONFRELID + ValueInteger.get(refTable != null && refTable != table + && !hideTable(refTable, session) ? table.getId() : 0), + // CONKEY + ValueArray.get(TypeInfo.TYPE_SMALLINT, conkey.toArray(Value.EMPTY_VALUES), null) + ); + } + } + } + + private void addAttribute(SessionLocal session, ArrayList rows, int id, int relId, Table table, Column column, + int ordinal) { + long precision = column.getType().getPrecision(); + add(session, rows, + // OID + ValueInteger.get(id), + // ATTRELID + ValueInteger.get(relId), + // ATTNAME + column.getName(), + // ATTTYPID + ValueInteger.get(PgServer.convertType(column.getType())), + // ATTLEN + ValueInteger.get(precision > 255 ? -1 : (int) precision), + // ATTNUM + ValueInteger.get(ordinal), + // ATTTYPMOD + ValueInteger.get(-1), + // ATTNOTNULL + ValueBoolean.get(!column.isNullable()), + // ATTISDROPPED + ValueBoolean.FALSE, + // ATTHASDEF + ValueBoolean.FALSE); + } + + private void addClass(SessionLocal session, ArrayList rows, int id, String name, int schema, String kind, + boolean index, int triggers) { + add(session, rows, + // OID + ValueInteger.get(id), + // RELNAME + name, + // RELNAMESPACE + ValueInteger.get(schema), + // RELKIND + kind, + // RELAM + ValueInteger.get(0), + // RELTUPLES + ValueDouble.get(0d), + // RELTABLESPACE + ValueInteger.get(0), + // RELPAGES + ValueInteger.get(0), + // RELHASINDEX + ValueBoolean.get(index), + // RELHASRULES + ValueBoolean.FALSE, + // RELHASOIDS + ValueBoolean.FALSE, + // RELCHECKS + ValueSmallint.get((short) 0), + // RELTRIGGERS + ValueInteger.get(triggers)); + } + + @Override + public long getMaxDataModificationId() { + return database.getModificationDataId(); + } + +} diff --git a/h2/src/main/org/h2/mode/Regclass.java b/h2/src/main/org/h2/mode/Regclass.java new file mode 100644 index 0000000000..e3fc92303b --- /dev/null +++ b/h2/src/main/org/h2/mode/Regclass.java @@ -0,0 +1,82 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.Operation1; +import org.h2.expression.ValueExpression; +import org.h2.index.Index; +import org.h2.message.DbException; +import org.h2.schema.Schema; +import org.h2.table.Table; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; + +/** + * A ::regclass expression. + */ +public final class Regclass extends Operation1 { + + public Regclass(Expression arg) { + super(arg); + } + + @Override + public Value getValue(SessionLocal session) { + Value value = arg.getValue(session); + if (value == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + int valueType = value.getValueType(); + if (valueType >= Value.TINYINT && valueType <= Value.INTEGER) { + return value.convertToInt(null); + } + if (valueType == Value.BIGINT) { + return ValueInteger.get((int) value.getLong()); + } + String name = value.getString(); + for (Schema schema : session.getDatabase().getAllSchemas()) { + Table table = schema.findTableOrView(session, name); + if (table != null && !table.isHidden()) { + return ValueInteger.get(table.getId()); + } + Index index = schema.findIndex(session, name); + if (index != null && index.getCreateSQL() != null) { + return ValueInteger.get(index.getId()); + } + } + throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, name); + } + + @Override + public TypeInfo getType() { + return TypeInfo.TYPE_INTEGER; + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + if (arg.isConstant()) { + return ValueExpression.get(getValue(session)); + } + return this; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return arg.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append("::REGCLASS"); + } + + @Override + public int getCost() { + return arg.getCost() + 100; + } + +} diff --git a/h2/src/main/org/h2/expression/function/ToDateParser.java b/h2/src/main/org/h2/mode/ToDateParser.java similarity index 86% rename from h2/src/main/org/h2/expression/function/ToDateParser.java rename to h2/src/main/org/h2/mode/ToDateParser.java index aed8674a6c..b789555175 100644 --- a/h2/src/main/org/h2/expression/function/ToDateParser.java +++ b/h2/src/main/org/h2/mode/ToDateParser.java @@ -1,17 +1,17 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Daniel Gredler */ -package org.h2.expression.function; +package org.h2.mode; import static java.lang.String.format; import java.util.List; -import java.util.TimeZone; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.util.DateTimeUtils; +import org.h2.util.TimeZoneProvider; import org.h2.value.ValueTimestamp; import org.h2.value.ValueTimestampTimeZone; @@ -19,8 +19,9 @@ * Emulates Oracle's TO_DATE function.
          * This class holds and handles the input data form the TO_DATE-method */ -public class ToDateParser { - private final Session session; +public final class ToDateParser { + + private final SessionLocal session; private final String unmodifiedInputStr; private final String unmodifiedFormatStr; @@ -46,7 +47,7 @@ public class ToDateParser { private boolean isAM = true; - private TimeZone timeZone; + private TimeZoneProvider timeZone; private int timeZoneHour, timeZoneMinute; @@ -59,7 +60,7 @@ public class ToDateParser { * @param input the input date with the date-time info * @param format the format of date-time info */ - private ToDateParser(Session session, ConfigParam functionName, String input, String format) { + private ToDateParser(SessionLocal session, ConfigParam functionName, String input, String format) { this.session = session; this.functionName = functionName; inputStr = input.trim(); @@ -75,7 +76,8 @@ private ToDateParser(Session session, ConfigParam functionName, String input, St unmodifiedFormatStr = formatStr; } - private static ToDateParser getTimestampParser(Session session, ConfigParam param, String input, String format) { + private static ToDateParser getTimestampParser(SessionLocal session, ConfigParam param, String input, + String format) { ToDateParser result = new ToDateParser(session, param, input, format); parse(result); return result; @@ -120,17 +122,13 @@ private ValueTimestamp getResultingValue() { private ValueTimestampTimeZone getResultingValueWithTimeZone() { ValueTimestamp ts = getResultingValue(); - long dateValue = ts.getDateValue(); - short offset; + long dateValue = ts.getDateValue(), timeNanos = ts.getTimeNanos(); + int offset; if (timeZoneHMValid) { - offset = (short) (timeZoneHour * 60 + ((timeZoneHour >= 0) ? timeZoneMinute : -timeZoneMinute)); + offset = (timeZoneHour * 60 + ((timeZoneHour >= 0) ? timeZoneMinute : -timeZoneMinute)) * 60; } else { - TimeZone timeZone = this.timeZone; - if (timeZone == null) { - timeZone = TimeZone.getDefault(); - } - long millis = DateTimeUtils.convertDateTimeValueToMillis(timeZone, dateValue, nanos / 1_000_000); - offset = (short) (timeZone.getOffset(millis) / 60_000); + offset = (timeZone != null ? timeZone : session.currentTimeZone()) + .getTimeZoneOffsetLocal(dateValue, timeNanos); } return ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, ts.getTimeNanos(), offset); } @@ -148,8 +146,7 @@ String getFunctionName() { } private void queryCurrentYearAndMonth() { - long dateValue = (session.getDatabase().getMode().dateTimeValueWithinTransaction - ? session.getTransactionStart() : session.getCurrentCommandStart()).getDateValue(); + long dateValue = session.currentTimestamp().getDateValue(); currentYear = DateTimeUtils.yearFromDateValue(dateValue); currentMonth = DateTimeUtils.monthFromDateValue(dateValue); } @@ -237,7 +234,7 @@ void setHour12(int hour12) { this.hour12 = hour12; } - void setTimeZone(TimeZone timeZone) { + void setTimeZone(TimeZoneProvider timeZone) { timeZoneHMValid = false; this.timeZone = timeZone; } @@ -327,7 +324,7 @@ public String toString() { * @param format the format * @return the timestamp */ - public static ValueTimestamp toTimestamp(Session session, String input, String format) { + public static ValueTimestamp toTimestamp(SessionLocal session, String input, String format) { ToDateParser parser = getTimestampParser(session, ConfigParam.TO_TIMESTAMP, input, format); return parser.getResultingValue(); } @@ -340,7 +337,7 @@ public static ValueTimestamp toTimestamp(Session session, String input, String f * @param format the format * @return the timestamp */ - public static ValueTimestampTimeZone toTimestampTz(Session session, String input, String format) { + public static ValueTimestampTimeZone toTimestampTz(SessionLocal session, String input, String format) { ToDateParser parser = getTimestampParser(session, ConfigParam.TO_TIMESTAMP_TZ, input, format); return parser.getResultingValueWithTimeZone(); } @@ -353,7 +350,7 @@ public static ValueTimestampTimeZone toTimestampTz(Session session, String input * @param format the format * @return the date as a timestamp */ - public static ValueTimestamp toDate(Session session, String input, String format) { + public static ValueTimestamp toDate(SessionLocal session, String input, String format) { ToDateParser parser = getTimestampParser(session, ConfigParam.TO_DATE, input, format); return parser.getResultingValue(); } diff --git a/h2/src/main/org/h2/expression/function/ToDateTokenizer.java b/h2/src/main/org/h2/mode/ToDateTokenizer.java similarity index 96% rename from h2/src/main/org/h2/expression/function/ToDateTokenizer.java rename to h2/src/main/org/h2/mode/ToDateTokenizer.java index 7a723521f4..1cf83463e5 100644 --- a/h2/src/main/org/h2/expression/function/ToDateTokenizer.java +++ b/h2/src/main/org/h2/mode/ToDateTokenizer.java @@ -1,26 +1,27 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Daniel Gredler */ -package org.h2.expression.function; +package org.h2.mode; import static java.lang.String.format; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.TimeZone; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.h2.api.ErrorCode; +import org.h2.expression.function.ToCharFunction; import org.h2.message.DbException; +import org.h2.util.TimeZoneProvider; /** * Emulates Oracle's TO_DATE function. This class knows all about the * TO_DATE-format conventions and how to parse the corresponding data. */ -class ToDateTokenizer { +final class ToDateTokenizer { /** * The pattern for a number. @@ -253,14 +254,14 @@ public void parse(ToDateParser params, FormatTokenEnum formatTokenEnum, int dateNr = 0; switch (formatTokenEnum) { case MONTH: - inputFragmentStr = setByName(params, ToChar.MONTHS); + inputFragmentStr = setByName(params, ToCharFunction.MONTHS); break; case Q /* NOT supported yet */: throwException(params, format("token '%s' not supported yet.", formatTokenEnum.name())); break; case MON: - inputFragmentStr = setByName(params, ToChar.SHORT_MONTHS); + inputFragmentStr = setByName(params, ToCharFunction.SHORT_MONTHS); break; case MM: // Note: In Calendar Month go from 0 - 11 @@ -327,16 +328,16 @@ public void parse(ToDateParser params, FormatTokenEnum formatTokenEnum, params.setDay(dateNr); break; case DAY: - inputFragmentStr = setByName(params, ToChar.WEEKDAYS); + inputFragmentStr = setByName(params, ToCharFunction.WEEKDAYS); break; case DY: - inputFragmentStr = setByName(params, ToChar.SHORT_WEEKDAYS); + inputFragmentStr = setByName(params, ToCharFunction.SHORT_WEEKDAYS); break; case J: inputFragmentStr = matchStringOrThrow(PATTERN_NUMBER, params, formatTokenEnum); dateNr = Integer.parseInt(inputFragmentStr); - params.setAbsoluteDay(dateNr + ToChar.JULIAN_EPOCH); + params.setAbsoluteDay(dateNr + ToCharFunction.JULIAN_EPOCH); break; default: throw new IllegalArgumentException(format( @@ -430,7 +431,7 @@ public void parse(ToDateParser params, FormatTokenEnum formatTokenEnum, case TZR: case TZD: String tzName = params.getInputStr(); - params.setTimeZone(TimeZone.getTimeZone(tzName)); + params.setTimeZone(TimeZoneProvider.ofId(tzName)); inputFragmentStr = tzName; break; default: @@ -493,7 +494,7 @@ static String matchStringOrThrow(Pattern p, ToDateParser params, static String setByName(ToDateParser params, int field) { String inputFragmentStr = null; String s = params.getInputStr(); - String[] values = ToChar.getDateNames(field); + String[] values = ToCharFunction.getDateNames(field); for (int i = 0; i < values.length; i++) { String dayName = values[i]; if (dayName == null) { @@ -502,12 +503,12 @@ static String setByName(ToDateParser params, int field) { int len = dayName.length(); if (dayName.equalsIgnoreCase(s.substring(0, len))) { switch (field) { - case ToChar.MONTHS: - case ToChar.SHORT_MONTHS: + case ToCharFunction.MONTHS: + case ToCharFunction.SHORT_MONTHS: params.setMonth(i + 1); break; - case ToChar.WEEKDAYS: - case ToChar.SHORT_WEEKDAYS: + case ToCharFunction.WEEKDAYS: + case ToCharFunction.SHORT_WEEKDAYS: // TODO break; default: @@ -710,4 +711,7 @@ boolean parseFormatStrWithToken(ToDateParser params) { } } + private ToDateTokenizer() { + } + } diff --git a/h2/src/main/org/h2/mode/package.html b/h2/src/main/org/h2/mode/package.html index c469b47225..b1194fe11f 100644 --- a/h2/src/main/org/h2/mode/package.html +++ b/h2/src/main/org/h2/mode/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/mvstore/Chunk.java b/h2/src/main/org/h2/mvstore/Chunk.java index 56e9dba14a..c6da22f2c0 100644 --- a/h2/src/main/org/h2/mvstore/Chunk.java +++ b/h2/src/main/org/h2/mvstore/Chunk.java @@ -1,22 +1,29 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; -import java.util.HashMap; +import java.util.BitSet; +import java.util.Comparator; +import java.util.Map; + +import org.h2.util.StringUtils; /** * A chunk of data, containing one or multiple pages. *

          - * Chunks are page aligned (each page is usually 4096 bytes). + * Minimum chunk size is usually 4096 bytes, and it grows in those fixed increments (blocks). + * Chunk's length and it's position in the underlying filestore + * are multiples of that increment (block size), + * therefore they both are measured in blocks, instead of bytes. * There are at most 67 million (2^26) chunks, - * each chunk is at most 2 GB large. + * and each chunk is at most 2 GB large. */ -public class Chunk { +public final class Chunk { /** * The maximum chunk id. @@ -35,6 +42,25 @@ public class Chunk { */ static final int FOOTER_LENGTH = 128; + private static final String ATTR_CHUNK = "chunk"; + private static final String ATTR_BLOCK = "block"; + private static final String ATTR_LEN = "len"; + private static final String ATTR_MAP = "map"; + private static final String ATTR_MAX = "max"; + private static final String ATTR_NEXT = "next"; + private static final String ATTR_PAGES = "pages"; + private static final String ATTR_ROOT = "root"; + private static final String ATTR_TIME = "time"; + private static final String ATTR_VERSION = "version"; + private static final String ATTR_LIVE_MAX = "liveMax"; + private static final String ATTR_LIVE_PAGES = "livePages"; + private static final String ATTR_UNUSED = "unused"; + private static final String ATTR_UNUSED_AT_VERSION = "unusedAtVersion"; + private static final String ATTR_PIN_COUNT = "pinCount"; + private static final String ATTR_TOC = "toc"; + private static final String ATTR_OCCUPANCY = "occupancy"; + private static final String ATTR_FLETCHER = "fletcher"; + /** * The chunk id. */ @@ -43,7 +69,7 @@ public class Chunk { /** * The start block number within the file. */ - public long block; + public volatile long block; /** * The length in number of blocks. @@ -53,12 +79,27 @@ public class Chunk { /** * The total number of pages in this chunk. */ - public int pageCount; + int pageCount; + + /** + * The number of pages that are still alive in the latest version of the store. + */ + int pageCountLive; /** - * The number of pages still alive. + * Offset (from the beginning of the chunk) for the table of content. + * Table of content is holding a value of type "long" for each page in the chunk. + * This value consists of map id, page offset, page length and page type. + * Format is the same as page's position id, but with map id replacing chunk id. + * + * @see DataUtils#getTocElement(int, int, int, int) for field format details */ - public int pageCountLive; + int tocPos; + + /** + * Collection of "deleted" flags for all pages in the chunk. + */ + BitSet occupancy; /** * The sum of the max length of all pages. @@ -66,7 +107,7 @@ public class Chunk { public long maxLen; /** - * The sum of the max length of all pages that are in use. + * The sum of the length of all pages that are still alive. */ public long maxLenLive; @@ -74,12 +115,12 @@ public class Chunk { * The garbage collection priority. Priority 0 means it needs to be * collected, a high value means low priority. */ - public int collectPriority; + int collectPriority; /** - * The position of the meta root. + * The position of the root of layout map. */ - public long metaRootPos; + long layoutRootPos; /** * The version stored in this chunk. @@ -98,6 +139,12 @@ public class Chunk { */ public long unused; + /** + * Version of the store at which chunk become unused and therefore can be + * considered "dead" and collected after this version is no longer in use. + */ + long unusedAtVersion; + /** * The last used map id. */ @@ -108,8 +155,58 @@ public class Chunk { */ public long next; + /** + * Number of live pinned pages. + */ + private int pinCount; + + + private Chunk(String s) { + this(DataUtils.parseMap(s), true); + } + + Chunk(Map map) { + this(map, false); + } + + private Chunk(Map map, boolean full) { + this(DataUtils.readHexInt(map, ATTR_CHUNK, 0)); + block = DataUtils.readHexLong(map, ATTR_BLOCK, 0); + version = DataUtils.readHexLong(map, ATTR_VERSION, id); + if (full) { + len = DataUtils.readHexInt(map, ATTR_LEN, 0); + pageCount = DataUtils.readHexInt(map, ATTR_PAGES, 0); + pageCountLive = DataUtils.readHexInt(map, ATTR_LIVE_PAGES, pageCount); + mapId = DataUtils.readHexInt(map, ATTR_MAP, 0); + maxLen = DataUtils.readHexLong(map, ATTR_MAX, 0); + maxLenLive = DataUtils.readHexLong(map, ATTR_LIVE_MAX, maxLen); + layoutRootPos = DataUtils.readHexLong(map, ATTR_ROOT, 0); + time = DataUtils.readHexLong(map, ATTR_TIME, 0); + unused = DataUtils.readHexLong(map, ATTR_UNUSED, 0); + unusedAtVersion = DataUtils.readHexLong(map, ATTR_UNUSED_AT_VERSION, 0); + next = DataUtils.readHexLong(map, ATTR_NEXT, 0); + pinCount = DataUtils.readHexInt(map, ATTR_PIN_COUNT, 0); + tocPos = DataUtils.readHexInt(map, ATTR_TOC, 0); + byte[] bytes = DataUtils.parseHexBytes(map, ATTR_OCCUPANCY); + if (bytes == null) { + occupancy = new BitSet(); + } else { + occupancy = BitSet.valueOf(bytes); + if (pageCount - pageCountLive != occupancy.cardinality()) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_FILE_CORRUPT, "Inconsistent occupancy info {0} - {1} != {2} {3}", + pageCount, pageCountLive, occupancy.cardinality(), this); + } + } + } + } + Chunk(int id) { this.id = id; + if (id <= 0) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_FILE_CORRUPT, "Invalid chunk id {0}", id); + } } /** @@ -134,11 +231,11 @@ static Chunk readChunkHeader(ByteBuffer buff, long start) { } } catch (Exception e) { // there could be various reasons - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_FILE_CORRUPT, "File corrupt reading chunk at position {0}", start, e); } - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_FILE_CORRUPT, "File corrupt reading chunk at position {0}", start); } @@ -150,13 +247,13 @@ static Chunk readChunkHeader(ByteBuffer buff, long start) { * @param minLength the minimum length */ void writeChunkHeader(WriteBuffer buff, int minLength) { - long pos = buff.position(); + long delimiterPosition = buff.position() + minLength - 1; buff.put(asString().getBytes(StandardCharsets.ISO_8859_1)); - while (buff.position() - pos < minLength - 1) { + while (buff.position() < delimiterPosition) { buff.put((byte) ' '); } - if (minLength != 0 && buff.position() > minLength) { - throw DataUtils.newIllegalStateException( + if (minLength != 0 && buff.position() > delimiterPosition) { + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Chunk metadata too long"); } @@ -170,7 +267,7 @@ void writeChunkHeader(WriteBuffer buff, int minLength) { * @return the metadata key */ static String getMetaKey(int chunkId) { - return "chunk." + Integer.toHexString(chunkId); + return ATTR_CHUNK + "." + Integer.toHexString(chunkId); } /** @@ -180,22 +277,7 @@ static String getMetaKey(int chunkId) { * @return the block */ public static Chunk fromString(String s) { - HashMap map = DataUtils.parseMap(s); - int id = DataUtils.readHexInt(map, "chunk", 0); - Chunk c = new Chunk(id); - c.block = DataUtils.readHexLong(map, "block", 0); - c.len = DataUtils.readHexInt(map, "len", 0); - c.pageCount = DataUtils.readHexInt(map, "pages", 0); - c.pageCountLive = DataUtils.readHexInt(map, "livePages", c.pageCount); - c.mapId = DataUtils.readHexInt(map, "map", 0); - c.maxLen = DataUtils.readHexLong(map, "max", 0); - c.maxLenLive = DataUtils.readHexLong(map, "liveMax", c.maxLen); - c.metaRootPos = DataUtils.readHexLong(map, "root", 0); - c.time = DataUtils.readHexLong(map, "time", 0); - c.unused = DataUtils.readHexLong(map, "unused", 0); - c.version = DataUtils.readHexLong(map, "version", id); - c.next = DataUtils.readHexLong(map, "next", 0); - return c; + return new Chunk(s); } /** @@ -203,7 +285,8 @@ public static Chunk fromString(String s) { * * @return the fill rate */ - public int getFillRate() { + int getFillRate() { + assert maxLenLive <= maxLen : maxLenLive + " > " + maxLen; if (maxLenLive <= 0) { return 0; } else if (maxLenLive == maxLen) { @@ -229,38 +312,51 @@ public boolean equals(Object o) { */ public String asString() { StringBuilder buff = new StringBuilder(240); - DataUtils.appendMap(buff, "chunk", id); - DataUtils.appendMap(buff, "block", block); - DataUtils.appendMap(buff, "len", len); + DataUtils.appendMap(buff, ATTR_CHUNK, id); + DataUtils.appendMap(buff, ATTR_BLOCK, block); + DataUtils.appendMap(buff, ATTR_LEN, len); if (maxLen != maxLenLive) { - DataUtils.appendMap(buff, "liveMax", maxLenLive); + DataUtils.appendMap(buff, ATTR_LIVE_MAX, maxLenLive); } if (pageCount != pageCountLive) { - DataUtils.appendMap(buff, "livePages", pageCountLive); + DataUtils.appendMap(buff, ATTR_LIVE_PAGES, pageCountLive); } - DataUtils.appendMap(buff, "map", mapId); - DataUtils.appendMap(buff, "max", maxLen); + DataUtils.appendMap(buff, ATTR_MAP, mapId); + DataUtils.appendMap(buff, ATTR_MAX, maxLen); if (next != 0) { - DataUtils.appendMap(buff, "next", next); + DataUtils.appendMap(buff, ATTR_NEXT, next); } - DataUtils.appendMap(buff, "pages", pageCount); - DataUtils.appendMap(buff, "root", metaRootPos); - DataUtils.appendMap(buff, "time", time); + DataUtils.appendMap(buff, ATTR_PAGES, pageCount); + DataUtils.appendMap(buff, ATTR_ROOT, layoutRootPos); + DataUtils.appendMap(buff, ATTR_TIME, time); if (unused != 0) { - DataUtils.appendMap(buff, "unused", unused); + DataUtils.appendMap(buff, ATTR_UNUSED, unused); + } + if (unusedAtVersion != 0) { + DataUtils.appendMap(buff, ATTR_UNUSED_AT_VERSION, unusedAtVersion); + } + DataUtils.appendMap(buff, ATTR_VERSION, version); + if (pinCount > 0) { + DataUtils.appendMap(buff, ATTR_PIN_COUNT, pinCount); + } + if (tocPos > 0) { + DataUtils.appendMap(buff, ATTR_TOC, tocPos); + } + if (!occupancy.isEmpty()) { + DataUtils.appendMap(buff, ATTR_OCCUPANCY, + StringUtils.convertBytesToHex(occupancy.toByteArray())); } - DataUtils.appendMap(buff, "version", version); return buff.toString(); } byte[] getFooterBytes() { StringBuilder buff = new StringBuilder(FOOTER_LENGTH); - DataUtils.appendMap(buff, "chunk", id); - DataUtils.appendMap(buff, "block", block); - DataUtils.appendMap(buff, "version", version); + DataUtils.appendMap(buff, ATTR_CHUNK, id); + DataUtils.appendMap(buff, ATTR_BLOCK, block); + DataUtils.appendMap(buff, ATTR_VERSION, version); byte[] bytes = buff.toString().getBytes(StandardCharsets.ISO_8859_1); int checksum = DataUtils.getFletcher32(bytes, 0, bytes.length); - DataUtils.appendMap(buff, "fletcher", checksum); + DataUtils.appendMap(buff, ATTR_FLETCHER, checksum); while (buff.length() < FOOTER_LENGTH - 1) { buff.append(' '); } @@ -268,10 +364,185 @@ byte[] getFooterBytes() { return buff.toString().getBytes(StandardCharsets.ISO_8859_1); } + boolean isSaved() { + return block != Long.MAX_VALUE; + } + + boolean isLive() { + return pageCountLive > 0; + } + + boolean isRewritable() { + return isSaved() + && isLive() + && pageCountLive < pageCount // not fully occupied + && isEvacuatable(); + } + + private boolean isEvacuatable() { + return pinCount == 0; + } + + /** + * Read a page of data into a ByteBuffer. + * + * @param fileStore to use + * @param offset of the page data + * @param pos page pos + * @return ByteBuffer containing page data. + */ + ByteBuffer readBufferForPage(FileStore fileStore, int offset, long pos) { + assert isSaved() : this; + while (true) { + long originalBlock = block; + try { + long filePos = originalBlock * MVStore.BLOCK_SIZE; + long maxPos = filePos + (long) len * MVStore.BLOCK_SIZE; + filePos += offset; + if (filePos < 0) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_FILE_CORRUPT, + "Negative position {0}; p={1}, c={2}", filePos, pos, toString()); + } + + int length = DataUtils.getPageMaxLength(pos); + if (length == DataUtils.PAGE_LARGE) { + // read the first bytes to figure out actual length + length = fileStore.readFully(filePos, 128).getInt(); + // pageNo is deliberately not included into length to preserve compatibility + // TODO: remove this adjustment when page on disk format is re-organized + length += 4; + } + length = (int) Math.min(maxPos - filePos, length); + if (length < 0) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_FILE_CORRUPT, + "Illegal page length {0} reading at {1}; max pos {2} ", length, filePos, maxPos); + } + + ByteBuffer buff = fileStore.readFully(filePos, length); + + if (originalBlock == block) { + return buff; + } + } catch (MVStoreException ex) { + if (originalBlock == block) { + throw ex; + } + } + } + } + + long[] readToC(FileStore fileStore) { + assert isSaved() : this; + assert tocPos > 0; + while (true) { + long originalBlock = block; + try { + long filePos = originalBlock * MVStore.BLOCK_SIZE + tocPos; + int length = pageCount * 8; + long[] toc = new long[pageCount]; + fileStore.readFully(filePos, length).asLongBuffer().get(toc); + if (originalBlock == block) { + return toc; + } + } catch (MVStoreException ex) { + if (originalBlock == block) { + throw ex; + } + } + } + } + + /** + * Modifies internal state to reflect the fact that one more page is stored + * within this chunk. + * @param pageLengthOnDisk + * size of the page + * @param singleWriter + * indicates whether page belongs to append mode capable map + * (single writer map). Such pages are "pinned" to the chunk, + * they can't be evacuated (moved to a different chunk) while + */ + void accountForWrittenPage(int pageLengthOnDisk, boolean singleWriter) { + maxLen += pageLengthOnDisk; + pageCount++; + maxLenLive += pageLengthOnDisk; + pageCountLive++; + if (singleWriter) { + pinCount++; + } + assert pageCount - pageCountLive == occupancy.cardinality() + : pageCount + " - " + pageCountLive + " <> " + occupancy.cardinality() + " : " + occupancy; + } + + /** + * Modifies internal state to reflect the fact that one the pages within + * this chunk was removed from the map. + * + * @param pageNo + * sequential page number within the chunk + * @param pageLength + * on disk of the removed page + * @param pinned + * whether removed page was pinned + * @param now + * is a moment in time (since creation of the store), when + * removal is recorded, and retention period starts + * @param version + * at which page was removed + * @return true if all of the pages, this chunk contains, were already + * removed, and false otherwise + */ + boolean accountForRemovedPage(int pageNo, int pageLength, boolean pinned, long now, long version) { + assert isSaved() : this; + // legacy chunks do not have a table of content, + // therefore pageNo is not valid, skip + if (tocPos > 0) { + assert pageNo >= 0 && pageNo < pageCount : pageNo + " // " + pageCount; + assert !occupancy.get(pageNo) : pageNo + " " + this + " " + occupancy; + assert pageCount - pageCountLive == occupancy.cardinality() + : pageCount + " - " + pageCountLive + " <> " + occupancy.cardinality() + " : " + occupancy; + occupancy.set(pageNo); + } + + maxLenLive -= pageLength; + pageCountLive--; + if (pinned) { + pinCount--; + } + + if (unusedAtVersion < version) { + unusedAtVersion = version; + } + + assert pinCount >= 0 : this; + assert pageCountLive >= 0 : this; + assert pinCount <= pageCountLive : this; + assert maxLenLive >= 0 : this; + assert (pageCountLive == 0) == (maxLenLive == 0) : this; + + if (!isLive()) { + unused = now; + return true; + } + return false; + } + @Override public String toString() { return asString(); } + + public static final class PositionComparator implements Comparator { + public static final Comparator INSTANCE = new PositionComparator(); + + private PositionComparator() {} + + @Override + public int compare(Chunk one, Chunk two) { + return Long.compare(one.block, two.block); + } + } } diff --git a/h2/src/main/org/h2/mvstore/Cursor.java b/h2/src/main/org/h2/mvstore/Cursor.java index c3db5c2e63..d60ca8c29a 100644 --- a/h2/src/main/org/h2/mvstore/Cursor.java +++ b/h2/src/main/org/h2/mvstore/Cursor.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; @@ -9,69 +9,82 @@ import java.util.NoSuchElementException; /** - * A cursor to iterate over elements in ascending order. + * A cursor to iterate over elements in ascending or descending order. * * @param the key type * @param the value type */ -public class Cursor implements Iterator { +public final class Cursor implements Iterator { + private final boolean reverse; private final K to; - private CursorPos cursorPos; - private CursorPos keeper; + private CursorPos cursorPos; + private CursorPos keeper; private K current; private K last; private V lastValue; - private Page lastPage; + private Page lastPage; - public Cursor(Page root, K from) { - this(root, from, null); + + public Cursor(RootReference rootReference, K from, K to) { + this(rootReference, from, to, false); } - public Cursor(Page root, K from, K to) { - this.cursorPos = traverseDown(root, from); + /** + * @param rootReference of the tree + * @param from starting key (inclusive), if null start from the first / last key + * @param to ending key (inclusive), if null there is no boundary + * @param reverse true if tree should be iterated in key's descending order + */ + public Cursor(RootReference rootReference, K from, K to, boolean reverse) { + this.lastPage = rootReference.root; + this.cursorPos = traverseDown(lastPage, from, reverse); this.to = to; + this.reverse = reverse; } @Override - @SuppressWarnings("unchecked") public boolean hasNext() { if (cursorPos != null) { + int increment = reverse ? -1 : 1; while (current == null) { - Page page = cursorPos.page; + Page page = cursorPos.page; int index = cursorPos.index; - if (index >= (page.isLeaf() ? page.getKeyCount() : page.map.getChildPageCount(page))) { - CursorPos tmp = cursorPos; + if (reverse ? index < 0 : index >= upperBound(page)) { + // traversal of this page is over, going up a level or stop if at the root already + CursorPos tmp = cursorPos; cursorPos = cursorPos.parent; - tmp.parent = keeper; - keeper = tmp; - if(cursorPos == null) - { + if (cursorPos == null) { return false; } + tmp.parent = keeper; + keeper = tmp; } else { + // traverse down to the leaf taking the leftmost path while (!page.isLeaf()) { page = page.getChildPage(index); + index = reverse ? upperBound(page) - 1 : 0; if (keeper == null) { - cursorPos = new CursorPos(page, 0, cursorPos); + cursorPos = new CursorPos<>(page, index, cursorPos); } else { - CursorPos tmp = keeper; + CursorPos tmp = keeper; keeper = keeper.parent; tmp.parent = cursorPos; tmp.page = page; - tmp.index = 0; + tmp.index = index; cursorPos = tmp; } - index = 0; } - K key = (K) page.getKey(index); - if (to != null && page.map.getKeyType().compare(key, to) > 0) { - return false; + if (reverse ? index >= 0 : index < page.getKeyCount()) { + K key = page.getKey(index); + if (to != null && Integer.signum(page.map.getKeyType().compare(key, to)) == increment) { + return false; + } + current = last = key; + lastValue = page.getValue(index); + lastPage = page; } - current = last = key; - lastValue = (V) page.getValue(index); - lastPage = page; } - ++cursorPos.index; + cursorPos.index += increment; } } return current != null; @@ -109,7 +122,8 @@ public V getValue() { * * @return the page */ - Page getPage() { + @SuppressWarnings("unused") + Page getPage() { return lastPage; } @@ -126,51 +140,46 @@ public void skip(long n) { } } else if(hasNext()) { assert cursorPos != null; - CursorPos cp = cursorPos; - CursorPos parent; + CursorPos cp = cursorPos; + CursorPos parent; while ((parent = cp.parent) != null) cp = parent; - Page root = cp.page; - @SuppressWarnings("unchecked") - MVMap map = (MVMap) root.map; + Page root = cp.page; + MVMap map = root.map; long index = map.getKeyIndex(next()); - last = map.getKey(index + n); - this.cursorPos = traverseDown(root, last); + last = map.getKey(index + (reverse ? -n : n)); + this.cursorPos = traverseDown(root, last, reverse); } } - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException( - "Removal is not supported"); - } - /** * Fetch the next entry that is equal or larger than the given key, starting - * from the given page. This method retains the stack. + * from the given page. This method returns the path. * - * @param p the page to start from - * @param key the key to search, null means search for the first key + * @param key type + * @param value type + * + * @param page to start from as a root + * @param key to search for, null means search for the first available key + * @param reverse true if traversal is in reverse direction, false otherwise + * @return CursorPos representing path from the entry found, + * or from insertion point if not, + * all the way up to to the root page provided */ - private static CursorPos traverseDown(Page p, Object key) { - CursorPos cursorPos = null; - while (!p.isLeaf()) { - int index = 0; - if(key != null) { - index = p.binarySearch(key) + 1; - if (index < 0) { - index = -index; - } + static CursorPos traverseDown(Page page, K key, boolean reverse) { + CursorPos cursorPos = key != null ? CursorPos.traverseDown(page, key) : + reverse ? page.getAppendCursorPos(null) : page.getPrependCursorPos(null); + int index = cursorPos.index; + if (index < 0) { + index = ~index; + if (reverse) { + --index; } - cursorPos = new CursorPos(p, index, cursorPos); - p = p.getChildPage(index); + cursorPos.index = index; } - int index = 0; - if(key != null) { - index = p.binarySearch(key); - if (index < 0) { - index = -index - 1; - } - } - return new CursorPos(p, index, cursorPos); + return cursorPos; + } + + private static int upperBound(Page page) { + return page.isLeaf() ? page.getKeyCount() : page.map.getChildPageCount(page); } } diff --git a/h2/src/main/org/h2/mvstore/CursorPos.java b/h2/src/main/org/h2/mvstore/CursorPos.java index b7e5a5d77d..15334bc9d4 100644 --- a/h2/src/main/org/h2/mvstore/CursorPos.java +++ b/h2/src/main/org/h2/mvstore/CursorPos.java @@ -1,35 +1,89 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; /** - * A position in a cursor + * A position in a cursor. + * Instance represents a node in the linked list, which traces path + * from a specific (target) key within a leaf node all the way up to te root + * (bottom up path). */ -public class CursorPos { +public final class CursorPos { /** - * The current page. + * The page at the current level. */ - public Page page; + public Page page; /** - * The current index. + * Index of the key (within page above) used to go down to a lower level + * in case of intermediate nodes, or index of the target key for leaf a node. + * In a later case, it could be negative, if the key is not present. */ public int index; /** - * The position in the parent page, if any. + * Next node in the linked list, representing the position within parent level, + * or null, if we are at the root level already. */ - public CursorPos parent; + public CursorPos parent; - public CursorPos(Page page, int index, CursorPos parent) { + + public CursorPos(Page page, int index, CursorPos parent) { this.page = page; this.index = index; this.parent = parent; } + /** + * Searches for a given key and creates a breadcrumb trail through a B-tree + * rooted at a given Page. Resulting path starts at "insertion point" for a + * given key and goes back to the root. + * + * @param key type + * @param value type + * + * @param page root of the tree + * @param key the key to search for + * @return head of the CursorPos chain (insertion point) + */ + static CursorPos traverseDown(Page page, K key) { + CursorPos cursorPos = null; + while (!page.isLeaf()) { + int index = page.binarySearch(key) + 1; + if (index < 0) { + index = -index; + } + cursorPos = new CursorPos<>(page, index, cursorPos); + page = page.getChildPage(index); + } + return new CursorPos<>(page, page.binarySearch(key), cursorPos); + } + + /** + * Calculate the memory used by changes that are not yet stored. + * + * @param version the version + * @return the amount of memory + */ + int processRemovalInfo(long version) { + int unsavedMemory = 0; + for (CursorPos head = this; head != null; head = head.parent) { + unsavedMemory += head.page.removePage(version); + } + return unsavedMemory; + } + + @Override + public String toString() { + return "CursorPos{" + + "page=" + page + + ", index=" + index + + ", parent=" + parent + + '}'; + } } diff --git a/h2/src/main/org/h2/mvstore/DataUtils.java b/h2/src/main/org/h2/mvstore/DataUtils.java index 116e407975..872e7b79e6 100644 --- a/h2/src/main/org/h2/mvstore/DataUtils.java +++ b/h2/src/main/org/h2/mvstore/DataUtils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; @@ -17,6 +17,7 @@ import java.util.Map; import org.h2.engine.Constants; +import org.h2.jdbc.JdbcException; import org.h2.util.StringUtils; /** @@ -107,6 +108,12 @@ public final class DataUtils { */ public static final int ERROR_TRANSACTIONS_DEADLOCK = 105; + /** + * The transaction store can not be initialized because data type + * is not found in type registry. + */ + public static final int ERROR_UNKNOWN_DATA_TYPE = 106; + /** * The type for leaf page. */ @@ -127,6 +134,11 @@ public final class DataUtils { */ public static final int PAGE_COMPRESSED_HIGH = 2 + 4; + /** + * The bit mask for pages with page sequential number. + */ + public static final int PAGE_HAS_PAGE_NO = 8; + /** * The maximum length of a variable size int. */ @@ -154,6 +166,34 @@ public final class DataUtils { */ public static final int PAGE_LARGE = 2 * 1024 * 1024; + // The following are key prefixes used in layout map + + /** + * The prefix for chunks ("chunk."). This, plus the chunk id (hex encoded) + * is the key, and the serialized chunk metadata is the value. + */ + public static final String META_CHUNK = "chunk."; + + /** + * The prefix for root positions of maps ("root."). This, plus the map id + * (hex encoded) is the key, and the position (hex encoded) is the value. + */ + public static final String META_ROOT = "root."; + + // The following are key prefixes used in meta map + + /** + * The prefix for names ("name."). This, plus the name of the map, is the + * key, and the map id (hex encoded) is the value. + */ + public static final String META_NAME = "name."; + + /** + * The prefix for maps ("map."). This, plus the map id (hex encoded) is the + * key, and the serialized in the map metadata is the value. + */ + public static final String META_MAP = "map."; + /** * Get the length of the variable size int. * @@ -300,6 +340,16 @@ public static void writeStringData(ByteBuffer buff, } } + /** + * Read a string. + * + * @param buff the source buffer + * @return the value + */ + public static String readString(ByteBuffer buff) { + return readString(buff, readVarInt(buff)); + } + /** * Read a string. * @@ -398,7 +448,7 @@ public static void copyExcept(Object src, Object dst, int oldSize, * @param file the file channel * @param pos the absolute position within the file * @param dst the byte buffer - * @throws IllegalStateException if some data could not be read + * @throws MVStoreException if some data could not be read */ public static void readFully(FileChannel file, long pos, ByteBuffer dst) { try { @@ -417,11 +467,11 @@ public static void readFully(FileChannel file, long pos, ByteBuffer dst) { } catch (IOException e2) { size = -1; } - throw newIllegalStateException( + throw newMVStoreException( ERROR_READING_FAILED, - "Reading from {0} failed; file length {1} " + - "read length {2} at {3}", - file, size, dst.remaining(), pos, e); + "Reading from file {0} failed at {1} (length {2}), " + + "read {3}, remaining {4}", + file, pos, size, dst.position(), dst.remaining(), e); } } @@ -440,7 +490,7 @@ public static void writeFully(FileChannel file, long pos, ByteBuffer src) { off += len; } while (src.remaining() > 0); } catch (IOException e) { - throw newIllegalStateException( + throw newMVStoreException( ERROR_WRITING_FAILED, "Writing to {0} failed; length {1} at {2}", file, src.remaining(), pos, e); @@ -491,14 +541,34 @@ public static int getPageChunkId(long pos) { } /** - * Get the maximum length for the given code. - * For the code 31, PAGE_LARGE is returned. + * Get the map id from the chunk's table of content element. + * + * @param tocElement packed table of content element + * @return the map id + */ + public static int getPageMapId(long tocElement) { + return (int) (tocElement >>> 38); + } + + /** + * Get the maximum length for the given page position. * * @param pos the position * @return the maximum length */ public static int getPageMaxLength(long pos) { int code = (int) ((pos >> 1) & 31); + return decodePageLength(code); + } + + /** + * Get the maximum length for the given code. + * For the code 31, PAGE_LARGE is returned. + * + * @param code encoded page length + * @return the maximum length + */ + public static int decodePageLength(int code) { if (code == 31) { return PAGE_LARGE; } @@ -508,11 +578,11 @@ public static int getPageMaxLength(long pos) { /** * Get the offset from the position. * - * @param pos the position + * @param tocElement packed table of content element * @return the offset */ - public static int getPageOffset(long pos) { - return (int) (pos >> 6); + public static int getPageOffset(long tocElement) { + return (int) (tocElement >> 6); } /** @@ -525,6 +595,15 @@ public static int getPageType(long pos) { return ((int) pos) & 1; } + /** + * Determines whether specified file position corresponds to a leaf page + * @param pos the position + * @return true if it is a leaf, false otherwise + */ + public static boolean isLeafPosition(long pos) { + return getPageType(pos) == PAGE_TYPE_LEAF; + } + /** * Find out if page was saved. * @@ -532,12 +611,23 @@ public static int getPageType(long pos) { * @return true if page has been saved */ public static boolean isPageSaved(long pos) { - return pos != 0; + return (pos & ~1L) != 0; + } + + /** + * Find out if page was removed. + * + * @param pos the position + * @return true if page has been removed (no longer accessible from the + * current root of the tree) + */ + static boolean isPageRemoved(long pos) { + return pos == 1L; } /** * Get the position of this page. The following information is encoded in - * the position: the chunk id, the offset, the maximum length, and the type + * the position: the chunk id, the page sequential number, the maximum length, and the type * (node or leaf). * * @param chunkId the chunk id @@ -546,8 +636,7 @@ public static boolean isPageSaved(long pos) { * @param type the page type (1 for node, 0 for leaf) * @return the position */ - public static long getPagePos(int chunkId, int offset, - int length, int type) { + public static long getPagePos(int chunkId, int offset, int length, int type) { long pos = (long) chunkId << 38; pos |= (long) offset << 6; pos |= encodeLength(length) << 1; @@ -555,6 +644,36 @@ public static long getPagePos(int chunkId, int offset, return pos; } + /** + * Convert tocElement into pagePos by replacing mapId with chunkId. + * + * @param chunkId the chunk id + * @param tocElement the element + * @return the page position + */ + public static long getPagePos(int chunkId, long tocElement) { + return (tocElement & 0x3FFFFFFFFFL) | ((long) chunkId << 38); + } + + /** + * Create table of content element. The following information is encoded in it: + * the map id, the page offset, the maximum length, and the type + * (node or leaf). + * + * @param mapId the chunk id + * @param offset the offset + * @param length the length + * @param type the page type (1 for node, 0 for leaf) + * @return the position + */ + public static long getTocElement(int mapId, int offset, int length, int type) { + long pos = (long) mapId << 38; + pos |= (long) offset << 6; + pos |= encodeLength(length) << 1; + pos |= type; + return pos; + } + /** * Calculate a check value for the given integer. A check value is mean to * verify the data is consistent with a high probability, but not meant to @@ -665,7 +784,7 @@ private static int parseMapValue(StringBuilder buff, String s, int i, int size) c = s.charAt(i++); if (c == '\\') { if (i == size) { - throw newIllegalStateException(ERROR_FILE_CORRUPT, "Not a map: {0}", s); + throw newMVStoreException(ERROR_FILE_CORRUPT, "Not a map: {0}", s); } c = s.charAt(i++); } else if (c == '\"') { @@ -685,7 +804,7 @@ private static int parseMapValue(StringBuilder buff, String s, int i, int size) * * @param s the list * @return the map - * @throws IllegalStateException if parsing failed + * @throws MVStoreException if parsing failed */ public static HashMap parseMap(String s) { HashMap map = new HashMap<>(); @@ -694,7 +813,7 @@ public static HashMap parseMap(String s) { int startKey = i; i = s.indexOf(':', i); if (i < 0) { - throw newIllegalStateException(ERROR_FILE_CORRUPT, "Not a map: {0}", s); + throw newMVStoreException(ERROR_FILE_CORRUPT, "Not a map: {0}", s); } String key = s.substring(startKey, i++); i = parseMapValue(buff, s, i, size); @@ -709,9 +828,9 @@ public static HashMap parseMap(String s) { * * @param bytes encoded map * @return the map without mapping for {@code "fletcher"}, or {@code null} if checksum is wrong - * @throws IllegalStateException if parsing failed + * or parameter do not represent a properly formatted map serialization */ - public static HashMap parseChecksummedMap(byte[] bytes) { + static HashMap parseChecksummedMap(byte[] bytes) { int start = 0, end = bytes.length; while (start < end && bytes[start] <= ' ') { start++; @@ -726,7 +845,8 @@ public static HashMap parseChecksummedMap(byte[] bytes) { int startKey = i; i = s.indexOf(':', i); if (i < 0) { - throw newIllegalStateException(ERROR_FILE_CORRUPT, "Not a map: {0}", s); + // Corrupted map + return null; } if (i - startKey == 8 && s.regionMatches(startKey, "fletcher", 0, 8)) { parseMapValue(buff, s, i + 1, size); @@ -751,7 +871,7 @@ public static HashMap parseChecksummedMap(byte[] bytes) { * * @param s the list * @return value of name item, or {@code null} - * @throws IllegalStateException if parsing failed + * @throws MVStoreException if parsing failed */ public static String getMapName(String s) { return getFromMap(s, "name"); @@ -763,7 +883,7 @@ public static String getMapName(String s) { * @param s the list * @param key the name of the key * @return value of the specified item, or {@code null} - * @throws IllegalStateException if parsing failed + * @throws MVStoreException if parsing failed */ public static String getFromMap(String s, String key) { int keyLength = key.length(); @@ -771,7 +891,7 @@ public static String getFromMap(String s, String key) { int startKey = i; i = s.indexOf(':', i); if (i < 0) { - throw newIllegalStateException(ERROR_FILE_CORRUPT, "Not a map: {0}", s); + throw newMVStoreException(ERROR_FILE_CORRUPT, "Not a map: {0}", s); } if (i++ - startKey == keyLength && s.regionMatches(startKey, key, 0, keyLength)) { StringBuilder buff = new StringBuilder(); @@ -787,7 +907,7 @@ public static String getFromMap(String s, String key) { c = s.charAt(i++); if (c == '\\') { if (i++ == size) { - throw newIllegalStateException(ERROR_FILE_CORRUPT, "Not a map: {0}", s); + throw newMVStoreException(ERROR_FILE_CORRUPT, "Not a map: {0}", s); } } else if (c == '\"') { break; @@ -871,16 +991,16 @@ public static IllegalArgumentException newIllegalArgumentException( } /** - * Create a new IllegalStateException. + * Create a new MVStoreException. * * @param errorCode the error code * @param message the message * @param arguments the arguments * @return the exception */ - public static IllegalStateException newIllegalStateException( + public static MVStoreException newMVStoreException( int errorCode, String message, Object... arguments) { - return initCause(new IllegalStateException( + return initCause(new MVStoreException(errorCode, formatMessage(errorCode, message, arguments)), arguments); } @@ -924,26 +1044,6 @@ public static String formatMessage(int errorCode, String message, "/" + errorCode + "]"; } - /** - * Get the error code from an exception message. - * - * @param m the message - * @return the error code, or 0 if none - */ - public static int getErrorCode(String m) { - if (m != null && m.endsWith("]")) { - int dash = m.lastIndexOf('/'); - if (dash >= 0) { - try { - return StringUtils.parseUInt31(m, dash + 1, m.length() - 1); - } catch (NumberFormatException e) { - // no error code - } - } - } - return 0; - } - /** * Read a hex long value from a map. * @@ -951,7 +1051,7 @@ public static int getErrorCode(String m) { * @param key the key * @param defaultValue if the value is null * @return the parsed value - * @throws IllegalStateException if parsing fails + * @throws MVStoreException if parsing fails */ public static long readHexLong(Map map, String key, long defaultValue) { Object v = map.get(key); @@ -963,7 +1063,7 @@ public static long readHexLong(Map map, String key, long defaultValue try { return parseHexLong((String) v); } catch (NumberFormatException e) { - throw newIllegalStateException(ERROR_FILE_CORRUPT, + throw newMVStoreException(ERROR_FILE_CORRUPT, "Error parsing the value {0}", v, e); } } @@ -973,7 +1073,7 @@ public static long readHexLong(Map map, String key, long defaultValue * * @param x the string * @return the parsed value - * @throws IllegalStateException if parsing fails + * @throws MVStoreException if parsing fails */ public static long parseHexLong(String x) { try { @@ -985,7 +1085,7 @@ public static long parseHexLong(String x) { } return Long.parseLong(x, 16); } catch (NumberFormatException e) { - throw newIllegalStateException(ERROR_FILE_CORRUPT, + throw newMVStoreException(ERROR_FILE_CORRUPT, "Error parsing the value {0}", x, e); } } @@ -995,7 +1095,7 @@ public static long parseHexLong(String x) { * * @param x the string * @return the parsed value - * @throws IllegalStateException if parsing fails + * @throws MVStoreException if parsing fails */ public static int parseHexInt(String x) { try { @@ -1003,7 +1103,7 @@ public static int parseHexInt(String x) { // in Java 8, we can use Integer.parseLong(x, 16); return (int) Long.parseLong(x, 16); } catch (NumberFormatException e) { - throw newIllegalStateException(ERROR_FILE_CORRUPT, + throw newMVStoreException(ERROR_FILE_CORRUPT, "Error parsing the value {0}", x, e); } } @@ -1015,9 +1115,9 @@ public static int parseHexInt(String x) { * @param key the key * @param defaultValue if the value is null * @return the parsed value - * @throws IllegalStateException if parsing fails + * @throws MVStoreException if parsing fails */ - public static int readHexInt(Map map, String key, int defaultValue) { + static int readHexInt(Map map, String key, int defaultValue) { Object v = map.get(key); if (v == null) { return defaultValue; @@ -1028,11 +1128,26 @@ public static int readHexInt(Map map, String key, int defaultValue) { // support unsigned hex value return (int) Long.parseLong((String) v, 16); } catch (NumberFormatException e) { - throw newIllegalStateException(ERROR_FILE_CORRUPT, + throw newMVStoreException(ERROR_FILE_CORRUPT, "Error parsing the value {0}", v, e); } } + /** + * Parse the hex-encoded bytes of an entry in the map. + * + * @param map the map + * @param key the key + * @return the byte array, or null if not in the map + */ + static byte[] parseHexBytes(Map map, String key) { + Object v = map.get(key); + if (v == null) { + return null; + } + return StringUtils.convertHexToBytes((String)v); + } + /** * Get the configuration parameter value, or default. * @@ -1041,7 +1156,7 @@ public static int readHexInt(Map map, String key, int defaultValue) { * @param defaultValue the default * @return the configured value or default */ - public static int getConfigParam(Map config, String key, int defaultValue) { + static int getConfigParam(Map config, String key, int defaultValue) { Object o = config.get(key); if (o instanceof Number) { return ((Number) o).intValue(); @@ -1055,4 +1170,21 @@ public static int getConfigParam(Map config, String key, int defaultV return defaultValue; } + /** + * Convert an exception to an IO exception. + * + * @param e the root cause + * @return the IO exception + */ + public static IOException convertToIOException(Throwable e) { + if (e instanceof IOException) { + return (IOException) e; + } + if (e instanceof JdbcException) { + if (e.getCause() != null) { + e = e.getCause(); + } + } + return new IOException(e.toString(), e); + } } diff --git a/h2/src/main/org/h2/mvstore/FileStore.java b/h2/src/main/org/h2/mvstore/FileStore.java index cd8b2a7135..dc1142fcac 100644 --- a/h2/src/main/org/h2/mvstore/FileStore.java +++ b/h2/src/main/org/h2/mvstore/FileStore.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; @@ -13,9 +13,8 @@ import java.util.concurrent.atomic.AtomicLong; import org.h2.mvstore.cache.FilePathCache; import org.h2.store.fs.FilePath; -import org.h2.store.fs.FilePathDisk; -import org.h2.store.fs.FilePathEncrypt; -import org.h2.store.fs.FilePathNio; +import org.h2.store.fs.encrypt.FileEncrypt; +import org.h2.store.fs.encrypt.FilePathEncrypt; /** * The default storage mechanism of the MVStore. This implementation persists @@ -130,14 +129,6 @@ public void open(String fileName, boolean readOnly, char[] encryptionKey) { } // ensure the Cache file system is registered FilePathCache.INSTANCE.getScheme(); - FilePath p = FilePath.get(fileName); - // if no explicit scheme was specified, NIO is used - if (p instanceof FilePathDisk && - !fileName.startsWith(p.getScheme() + ":")) { - // ensure the NIO file system is registered - FilePathNio.class.getName(); - fileName = "nio:" + fileName; - } this.fileName = fileName; FilePath f = FilePath.get(fileName); FilePath parent = f.getParent(); @@ -154,7 +145,7 @@ public void open(String fileName, boolean readOnly, char[] encryptionKey) { if (encryptionKey != null) { byte[] key = FilePathEncrypt.getPasswordBytes(encryptionKey); encryptedFile = file; - file = new FilePathEncrypt.FileEncrypt(fileName, key, file); + file = new FileEncrypt(fileName, key, file); } try { if (readOnly) { @@ -163,20 +154,20 @@ public void open(String fileName, boolean readOnly, char[] encryptionKey) { fileLock = file.tryLock(); } } catch (OverlappingFileLockException e) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_FILE_LOCKED, "The file is locked: {0}", fileName, e); } if (fileLock == null) { try { close(); } catch (Exception ignore) {} - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_FILE_LOCKED, "The file is locked: {0}", fileName); } fileSize = file.size(); } catch (IOException e) { try { close(); } catch (Exception ignore) {} - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_READING_FAILED, "Could not open file {0}", fileName, e); } @@ -194,7 +185,7 @@ public void close() { file.close(); } } catch (Exception e) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_WRITING_FAILED, "Closing failed for file {0}", fileName, e); } finally { @@ -211,7 +202,7 @@ public void sync() { try { file.force(true); } catch (IOException e) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_WRITING_FAILED, "Could not sync file {0}", fileName, e); } @@ -242,7 +233,7 @@ public void truncate(long size) { return; } catch (IOException e) { if (++attemptCount == 10) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_WRITING_FAILED, "Could not truncate file {0} to size {1}", fileName, size, e); @@ -342,20 +333,30 @@ public void markUsed(long pos, int length) { * Allocate a number of blocks and mark them as used. * * @param length the number of bytes to allocate + * @param reservedLow start block index of the reserved area (inclusive) + * @param reservedHigh end block index of the reserved area (exclusive), + * special value -1 means beginning of the infinite free area * @return the start position in bytes */ - public long allocate(int length) { - return freeSpace.allocate(length); + long allocate(int length, long reservedLow, long reservedHigh) { + return freeSpace.allocate(length, reservedLow, reservedHigh); } /** * Calculate starting position of the prospective allocation. * - * @param length the number of bytes to allocate - * @return the start position in bytes + * @param blocks the number of blocks to allocate + * @param reservedLow start block index of the reserved area (inclusive) + * @param reservedHigh end block index of the reserved area (exclusive), + * special value -1 means beginning of the infinite free area + * @return the starting block index */ - public long predictAllocation(int length) { - return freeSpace.predictAllocation(length); + long predictAllocation(int blocks, long reservedLow, long reservedHigh) { + return freeSpace.predictAllocation(blocks, reservedLow, reservedHigh); + } + + boolean isFragmented() { + return freeSpace.isFragmented(); } /** @@ -372,6 +373,19 @@ public int getFillRate() { return freeSpace.getFillRate(); } + /** + * Calculates a prospective fill rate, which store would have after rewrite + * of sparsely populated chunk(s) and evacuation of still live data into a + * new chunk. + * + * @param vacatedBlocks + * number of blocks vacated + * @return prospective fill rate (0 - 100) + */ + public int getProjectedFillRate(int vacatedBlocks) { + return freeSpace.getProjectedFillRate(vacatedBlocks); + } + long getFirstFree() { return freeSpace.getFirstFree(); } @@ -380,6 +394,20 @@ long getFileLengthInUse() { return freeSpace.getLastFree(); } + /** + * Calculates relative "priority" for chunk to be moved. + * + * @param block where chunk starts + * @return priority, bigger number indicate that chunk need to be moved sooner + */ + int getMovePriority(int block) { + return freeSpace.getMovePriority(block); + } + + long getAfterLastBlock() { + return freeSpace.getAfterLastBlock(); + } + /** * Mark the file as empty. */ diff --git a/h2/src/main/org/h2/mvstore/FreeSpaceBitSet.java b/h2/src/main/org/h2/mvstore/FreeSpaceBitSet.java index 817fa6148c..f302283bec 100644 --- a/h2/src/main/org/h2/mvstore/FreeSpaceBitSet.java +++ b/h2/src/main/org/h2/mvstore/FreeSpaceBitSet.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; @@ -31,6 +31,16 @@ public class FreeSpaceBitSet { */ private final BitSet set = new BitSet(); + /** + * Left-shifting register, which holds outcomes of recent allocations. Only + * allocations done in "reuseSpace" mode are recorded here. For example, + * rightmost bit set to 1 means that last allocation failed to find a hole + * big enough, and next bit set to 0 means that previous allocation request + * have found one. + */ + private int failureFlags; + + /** * Create a new free space map. * @@ -94,32 +104,68 @@ public boolean isFree(long pos, int length) { * @return the start position in bytes */ public long allocate(int length) { - return allocate(length, true); + return allocate(length, 0, 0); } /** - * Calculate starting position of the prospective allocation. + * Allocate a number of blocks and mark them as used. * * @param length the number of bytes to allocate + * @param reservedLow start block index of the reserved area (inclusive) + * @param reservedHigh end block index of the reserved area (exclusive), + * special value -1 means beginning of the infinite free area * @return the start position in bytes */ - public long predictAllocation(int length) { - return allocate(length, false); + long allocate(int length, long reservedLow, long reservedHigh) { + return getPos(allocate(getBlockCount(length), (int)reservedLow, (int)reservedHigh, true)); } - private long allocate(int length, boolean allocate) { - int blocks = getBlockCount(length); + /** + * Calculate starting position of the prospective allocation. + * + * @param blocks the number of blocks to allocate + * @param reservedLow start block index of the reserved area (inclusive) + * @param reservedHigh end block index of the reserved area (exclusive), + * special value -1 means beginning of the infinite free area + * @return the starting block index + */ + long predictAllocation(int blocks, long reservedLow, long reservedHigh) { + return allocate(blocks, (int)reservedLow, (int)reservedHigh, false); + } + + boolean isFragmented() { + return Integer.bitCount(failureFlags & 0x0F) > 1; + } + + private int allocate(int blocks, int reservedLow, int reservedHigh, boolean allocate) { + int freeBlocksTotal = 0; for (int i = 0;;) { int start = set.nextClearBit(i); int end = set.nextSetBit(start + 1); - if (end < 0 || end - start >= blocks) { + int freeBlocks = end - start; + if (end < 0 || freeBlocks >= blocks) { + if ((reservedHigh < 0 || start < reservedHigh) && start + blocks > reservedLow) { // overlap detected + if (reservedHigh < 0) { + start = getAfterLastBlock(); + end = -1; + } else { + i = reservedHigh; + continue; + } + } assert set.nextSetBit(start) == -1 || set.nextSetBit(start) >= start + blocks : "Double alloc: " + Integer.toHexString(start) + "/" + Integer.toHexString(blocks) + " " + this; if (allocate) { set.set(start, start + blocks); + } else { + failureFlags <<= 1; + if (end < 0 && freeBlocksTotal > 4 * blocks) { + failureFlags |= 1; + } } - return getPos(start); + return start; } + freeBlocksTotal += freeBlocks; i = end; } } @@ -133,8 +179,13 @@ private long allocate(int length, boolean allocate) { public void markUsed(long pos, int length) { int start = getBlock(pos); int blocks = getBlockCount(length); - assert set.nextSetBit(start) == -1 || set.nextSetBit(start) >= start + blocks : - "Double mark: " + Integer.toHexString(start) + "/" + Integer.toHexString(blocks) + " " + this; + // this is not an assert because we get called during file opening + if (set.nextSetBit(start) != -1 && set.nextSetBit(start) < start + blocks ) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_FILE_CORRUPT, + "Double mark: " + Integer.toHexString(start) + + "/" + Integer.toHexString(blocks) + " " + this); + } set.set(start, start + blocks); } @@ -170,12 +221,37 @@ private int getBlockCount(int length) { * * @return the fill rate (0 - 100) */ - public int getFillRate() { - int cardinality = set.cardinality(); - if (cardinality == 0) { - return 0; - } - return Math.max(1, (int)(100L * cardinality / set.length())); + int getFillRate() { + return getProjectedFillRate(0); + } + + /** + * Calculates a prospective fill rate, which store would have after rewrite + * of sparsely populated chunk(s) and evacuation of still live data into a + * new chunk. + * + * @param vacatedBlocks + * number of blocks vacated as a result of live data evacuation less + * number of blocks in prospective chunk with evacuated live data + * @return prospective fill rate (0 - 100) + */ + int getProjectedFillRate(int vacatedBlocks) { + // it's not bullet-proof against race condition but should be good enough + // to get approximation without holding a store lock + int usedBlocks; + int totalBlocks; + // to prevent infinite loop, which I saw once + int cnt = 3; + do { + if (--cnt == 0) { + return 100; + } + totalBlocks = set.length(); + usedBlocks = set.cardinality(); + } while (totalBlocks != set.length() || usedBlocks > totalBlocks); + usedBlocks -= firstFreeBlock + vacatedBlocks; + totalBlocks -= firstFreeBlock; + return usedBlocks == 0 ? 0 : (int)((100L * usedBlocks + totalBlocks - 1) / totalBlocks); } /** @@ -183,7 +259,7 @@ public int getFillRate() { * * @return the position. */ - public long getFirstFree() { + long getFirstFree() { return getPos(set.nextClearBit(0)); } @@ -192,8 +268,45 @@ public long getFirstFree() { * * @return the position. */ - public long getLastFree() { - return getPos(set.previousSetBit(set.size()-1) + 1); + long getLastFree() { + return getPos(getAfterLastBlock()); + } + + /** + * Get the index of the first block after last occupied one. + * It marks the beginning of the last (infinite) free space. + * + * @return block index + */ + int getAfterLastBlock() { + return set.previousSetBit(set.size() - 1) + 1; + } + + /** + * Calculates relative "priority" for chunk to be moved. + * + * @param block where chunk starts + * @return priority, bigger number indicate that chunk need to be moved sooner + */ + int getMovePriority(int block) { + // The most desirable chunks to move are the ones sitting within + // a relatively short span of occupied blocks which is surrounded + // from both sides by relatively long free spans + int prevEnd = set.previousClearBit(block); + int freeSize; + if (prevEnd < 0) { + prevEnd = firstFreeBlock; + freeSize = 0; + } else { + freeSize = prevEnd - set.previousSetBit(prevEnd); + } + + int nextStart = set.nextClearBit(block); + int nextEnd = set.nextSetBit(nextStart); + if (nextEnd >= 0) { + freeSize += nextEnd - nextStart; + } + return (nextStart - prevEnd - 1) * 1000 / (freeSize + 1); } @Override @@ -235,5 +348,4 @@ public String toString() { buff.append(']'); return buff.toString(); } - } \ No newline at end of file diff --git a/h2/src/main/org/h2/mvstore/MVMap.java b/h2/src/main/org/h2/mvstore/MVMap.java index 391e88cde1..d1de1f181b 100644 --- a/h2/src/main/org/h2/mvstore/MVMap.java +++ b/h2/src/main/org/h2/mvstore/MVMap.java @@ -1,10 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; +import static org.h2.engine.Constants.MEMORY_POINTER; + import java.util.AbstractList; import java.util.AbstractMap; import java.util.AbstractSet; @@ -14,10 +16,12 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; + import org.h2.mvstore.type.DataType; import org.h2.mvstore.type.ObjectDataType; -import org.h2.mvstore.type.StringDataType; +import org.h2.util.MemoryEstimator; /** * A stored map. @@ -28,9 +32,7 @@ * @param the key class * @param the value class */ -public class MVMap extends AbstractMap - implements ConcurrentMap -{ +public class MVMap extends AbstractMap implements ConcurrentMap { /** * The store. @@ -40,12 +42,12 @@ public class MVMap extends AbstractMap /** * Reference to the current root page. */ - private final AtomicReference root; + private final AtomicReference> root; private final int id; private final long createVersion; - private final DataType keyType; - private final DataType valueType; + private final DataType keyType; + private final DataType valueType; private final int keysPerPage; private final boolean singleWriter; private final K[] keysBuffer; @@ -61,6 +63,8 @@ public class MVMap extends AbstractMap private volatile boolean closed; private boolean readOnly; private boolean isVolatile; + private final AtomicLong avgKeySize; + private final AtomicLong avgValSize; /** * This designates the "last stored" version for a store which was @@ -68,13 +72,12 @@ public class MVMap extends AbstractMap */ static final long INITIAL_VERSION = -1; - protected MVMap(Map config) { - this((MVStore) config.get("store"), - (DataType) config.get("key"), - (DataType) config.get("val"), + + protected MVMap(Map config, DataType keyType, DataType valueType) { + this((MVStore) config.get("store"), keyType, valueType, DataUtils.readHexInt(config, "id", 0), DataUtils.readHexLong(config, "createVersion", 0), - new AtomicReference(), + new AtomicReference<>(), ((MVStore) config.get("store")).getKeysPerPage(), config.containsKey("singleWriter") && (Boolean) config.get("singleWriter") ); @@ -82,21 +85,20 @@ protected MVMap(Map config) { } // constructor for cloneIt() + @SuppressWarnings("CopyConstructorMissesField") protected MVMap(MVMap source) { this(source.store, source.keyType, source.valueType, source.id, source.createVersion, new AtomicReference<>(source.root.get()), source.keysPerPage, source.singleWriter); } // meta map constructor - MVMap(MVStore store) { - this(store, StringDataType.INSTANCE,StringDataType.INSTANCE, 0, 0, new AtomicReference(), - store.getKeysPerPage(), false); + MVMap(MVStore store, int id, DataType keyType, DataType valueType) { + this(store, keyType, valueType, id, 0, new AtomicReference<>(), store.getKeysPerPage(), false); setInitialRoot(createEmptyLeaf(), store.getCurrentVersion()); } - @SuppressWarnings("unchecked") - private MVMap(MVStore store, DataType keyType, DataType valueType, int id, long createVersion, - AtomicReference root, int keysPerPage, boolean singleWriter) { + private MVMap(MVStore store, DataType keyType, DataType valueType, int id, long createVersion, + AtomicReference> root, int keysPerPage, boolean singleWriter) { this.store = store; this.id = id; this.createVersion = createVersion; @@ -104,9 +106,12 @@ private MVMap(MVStore store, DataType keyType, DataType valueType, int id, long this.valueType = valueType; this.root = root; this.keysPerPage = keysPerPage; - this.keysBuffer = singleWriter ? (K[]) new Object[keysPerPage] : null; - this.valuesBuffer = singleWriter ? (V[]) new Object[keysPerPage] : null; + this.keysBuffer = singleWriter ? keyType.createStorage(keysPerPage) : null; + this.valuesBuffer = singleWriter ? valueType.createStorage(keysPerPage) : null; this.singleWriter = singleWriter; + this.avgKeySize = keyType.isMemoryEstimationAllowed() ? new AtomicLong() : null; + this.avgValSize = valueType.isMemoryEstimationAllowed() ? new AtomicLong() : null; + } /** @@ -125,7 +130,7 @@ protected MVMap cloneIt() { * @return the metadata key */ static String getMapRootKey(int mapId) { - return "root." + Integer.toHexString(mapId); + return DataUtils.META_ROOT + Integer.toHexString(mapId); } /** @@ -135,7 +140,7 @@ static String getMapRootKey(int mapId) { * @return the metadata key */ static String getMapKey(int mapId) { - return "map." + Integer.toHexString(mapId); + return DataUtils.META_MAP + Integer.toHexString(mapId); } /** @@ -181,15 +186,14 @@ public final K getKey(long index) { if (index < 0 || index >= sizeAsLong()) { return null; } - Page p = getRootPage(); + Page p = getRootPage(); long offset = 0; while (true) { if (p.isLeaf()) { if (index >= offset + p.getKeyCount()) { return null; } - @SuppressWarnings("unchecked") - K key = (K) p.getKey((int) (index - offset)); + K key = p.getKey((int) (index - offset)); return key; } int i = 0, size = getChildPageCount(p); @@ -250,7 +254,7 @@ public int indexOf(Object key) { * @return the index */ public final long getKeyIndex(K key) { - Page p = getRootPage(); + Page p = getRootPage(); if (p.getTotalCount() == 0) { return -1; } @@ -279,23 +283,26 @@ public final long getKeyIndex(K key) { * @param first whether to retrieve the first key * @return the key, or null if the map is empty */ - @SuppressWarnings("unchecked") private K getFirstLast(boolean first) { - Page p = getRootPage(); + Page p = getRootPage(); + return getFirstLast(p, first); + } + + private K getFirstLast(Page p, boolean first) { if (p.getTotalCount() == 0) { return null; } while (true) { if (p.isLeaf()) { - return (K) p.getKey(first ? 0 : p.getKeyCount() - 1); + return p.getKey(first ? 0 : p.getKeyCount() - 1); } p = p.getChildPage(first ? 0 : getChildPageCount(p) - 1); } } /** - * Get the smallest key that is larger than the given key, or null if no - * such key exists. + * Get the smallest key that is larger than the given key (next key in ascending order), + * or null if no such key exists. * * @param key the key * @return the result @@ -304,6 +311,18 @@ public final K higherKey(K key) { return getMinMax(key, false, true); } + /** + * Get the smallest key that is larger than the given key, for the given + * root page, or null if no such key exists. + * + * @param rootRef the root reference of the map + * @param key to start from + * @return the result + */ + public final K higherKey(RootReference rootRef, K key) { + return getMinMax(rootRef, key, false, true); + } + /** * Get the smallest key that is larger or equal to this key. * @@ -335,6 +354,18 @@ public final K lowerKey(K key) { return getMinMax(key, true, true); } + /** + * Get the largest key that is smaller than the given key, for the given + * root page, or null if no such key exists. + * + * @param rootRef the root page + * @param key the key + * @return the result + */ + public final K lowerKey(RootReference rootRef, K key) { + return getMinMax(rootRef, key, true, true); + } + /** * Get the smallest or largest key using the given bounds. * @@ -344,11 +375,14 @@ public final K lowerKey(K key) { * @return the key, or null if no such key exists */ private K getMinMax(K key, boolean min, boolean excluding) { - return getMinMax(getRootPage(), key, min, excluding); + return getMinMax(flushAndGetRoot(), key, min, excluding); } - @SuppressWarnings("unchecked") - private K getMinMax(Page p, K key, boolean min, boolean excluding) { + private K getMinMax(RootReference rootRef, K key, boolean min, boolean excluding) { + return getMinMax(rootRef.root, key, min, excluding); + } + + private K getMinMax(Page p, K key, boolean min, boolean excluding) { int x = p.binarySearch(key); if (p.isLeaf()) { if (x < 0) { @@ -359,7 +393,7 @@ private K getMinMax(Page p, K key, boolean min, boolean excluding) { if (x < 0 || x >= p.getKeyCount()) { return null; } - return (K) p.getKey(x); + return p.getKey(x); } if (x++ < 0) { x = -x; @@ -384,9 +418,10 @@ private K getMinMax(Page p, K key, boolean min, boolean excluding) { * @return the value, or null if not found * @throws ClassCastException if type of the specified key is not compatible with this map */ + @SuppressWarnings("unchecked") @Override public final V get(Object key) { - return get(getRootPage(), key); + return get(getRootPage(), (K) key); } /** @@ -397,9 +432,8 @@ public final V get(Object key) { * @return the value, or null if not found * @throws ClassCastException if type of the specified key is not compatible with this map */ - @SuppressWarnings("unchecked") - public V get(Page p, Object key) { - return (V) Page.get(p, key); + public V get(Page p, K key) { + return Page.get(p, key); } @Override @@ -412,13 +446,49 @@ public final boolean containsKey(Object key) { */ @Override public void clear() { - RootReference rootReference; - Page emptyRootPage = createEmptyLeaf(); + clearIt(); + } + + /** + * Remove all entries and return the root reference. + * + * @return the new root reference + */ + RootReference clearIt() { + Page emptyRootPage = createEmptyLeaf(); int attempt = 0; - do { - rootReference = getRoot(); - } while (!updateRoot(rootReference, emptyRootPage, ++attempt)); - rootReference.root.removeAllRecursive(); + while (true) { + RootReference rootReference = flushAndGetRoot(); + if (rootReference.getTotalCount() == 0) { + return rootReference; + } + boolean locked = rootReference.isLockedByCurrentThread(); + if (!locked) { + if (attempt++ == 0) { + beforeWrite(); + } else if (attempt > 3 || rootReference.isLocked()) { + rootReference = lockRoot(rootReference, attempt); + locked = true; + } + } + Page rootPage = rootReference.root; + long version = rootReference.version; + try { + if (!locked) { + rootReference = rootReference.updateRootPage(emptyRootPage, attempt); + if (rootReference == null) { + continue; + } + } + store.registerUnsavedMemory(rootPage.removeAllRecursive(version)); + rootPage = emptyRootPage; + return rootReference; + } finally { + if(locked) { + unlockRoot(rootPage); + } + } + } } /** @@ -476,12 +546,14 @@ public boolean remove(Object key, Object value) { /** * Check whether the two values are equal. * + * @param type of values to compare + * * @param a the first value * @param b the second value * @param datatype to use for comparison * @return true if they are equal */ - static boolean areValuesEqual(DataType datatype, Object a, Object b) { + static boolean areValuesEqual(DataType datatype, X a, X b) { return a == b || a != null && b != null && datatype.compare(a, b) == 0; } @@ -522,7 +594,8 @@ public final V replace(K key, V value) { * @param b the second key * @return -1 if the first key is smaller, 1 if bigger, 0 if equal */ - final int compare(Object a, Object b) { + @SuppressWarnings("unused") + final int compare(K a, K b) { return keyType.compare(a, b); } @@ -531,7 +604,7 @@ final int compare(Object a, Object b) { * * @return the key type */ - public final DataType getKeyType() { + public final DataType getKeyType() { return keyType; } @@ -540,17 +613,21 @@ public final DataType getKeyType() { * * @return the value type */ - public final DataType getValueType() { + public final DataType getValueType() { return valueType; } + boolean isSingleWriter() { + return singleWriter; + } + /** * Read a page. * * @param pos the position of the page * @return the page */ - final Page readPage(long pos) { + final Page readPage(long pos) { return store.readPage(this, pos); } @@ -561,13 +638,22 @@ final Page readPage(long pos) { * */ final void setRootPos(long rootPos, long version) { - Page root = readOrCreateRootPage(rootPos); + Page root = readOrCreateRootPage(rootPos); + if (root.map != this) { + // this can only happen on concurrent opening of existing map, + // when second thread picks up some cached page already owned by + // the first map's instantiation (both maps share the same id) + assert id == root.map.id; + // since it is unknown which one will win the race, + // let each map instance to have it's own copy + root = root.copy(this, false); + } setInitialRoot(root, version); setWriteVersion(store.getCurrentVersion()); } - private Page readOrCreateRootPage(long rootPos) { - Page root = rootPos == 0 ? createEmptyLeaf() : readPage(rootPos); + private Page readOrCreateRootPage(long rootPos) { + Page root = rootPos == 0 ? createEmptyLeaf() : readPage(rootPos); return root; } @@ -578,96 +664,79 @@ private Page readOrCreateRootPage(long rootPos) { * @return the iterator */ public final Iterator keyIterator(K from) { - return new Cursor(getRootPage(), from); + return cursor(from, null, false); } /** - * Re-write any pages that belong to one of the chunks in the given set. + * Iterate over a number of keys in reverse order * - * @param set the set of chunk ids + * @param from the first key to return + * @return the iterator */ - final void rewrite(Set set) { - rewrite(getRootPage(), set); + public final Iterator keyIteratorReverse(K from) { + return cursor(from, null, true); } - private int rewrite(Page p, Set set) { - if (p.isLeaf()) { - long pos = p.getPos(); - int chunkId = DataUtils.getPageChunkId(pos); - if (!set.contains(chunkId)) { - return 0; - } - assert p.getKeyCount() > 0; - return rewritePage(p) ? 0 : 1; - } - int writtenPageCount = 0; - for (int i = 0; i < getChildPageCount(p); i++) { - long childPos = p.getChildPagePos(i); - if (childPos != 0 && DataUtils.getPageType(childPos) == DataUtils.PAGE_TYPE_LEAF) { - // we would need to load the page, and it's a leaf: - // only do that if it's within the set of chunks we are - // interested in - int chunkId = DataUtils.getPageChunkId(childPos); - if (!set.contains(chunkId)) { - continue; - } - } - writtenPageCount += rewrite(p.getChildPage(i), set); - } - if (writtenPageCount == 0) { - long pos = p.getPos(); - int chunkId = DataUtils.getPageChunkId(pos); - if (set.contains(chunkId)) { - // an inner node page that is in one of the chunks, - // but only points to chunks that are not in the set: - // if no child was changed, we need to do that now - // (this is not needed if anyway one of the children - // was changed, as this would have updated this - // page as well) - Page p2 = p; - while (!p2.isLeaf()) { - p2 = p2.getChildPage(0); - } - if (rewritePage(p2)) { - return 0; - } - writtenPageCount++; - } + final boolean rewritePage(long pagePos) { + Page p = readPage(pagePos); + if (p.getKeyCount()==0) { + return true; } - return writtenPageCount; - } - - private boolean rewritePage(Page p) { - @SuppressWarnings("unchecked") - K key = (K) p.getKey(0); - V value = get(key); - if (value != null) { - if (isClosed()) { - return true; - } - replace(key, value, value); + assert p.isSaved(); + K key = p.getKey(0); + if (!isClosed()) { + RewriteDecisionMaker decisionMaker = new RewriteDecisionMaker<>(p.getPos()); + V result = operate(key, null, decisionMaker); + boolean res = decisionMaker.getDecision() != Decision.ABORT; + assert !res || result != null; + return res; } return false; } /** - * Get a cursor to iterate over a number of keys and values. + * Get a cursor to iterate over a number of keys and values in the latest version of this map. * * @param from the first key to return * @return the cursor */ public final Cursor cursor(K from) { - return new Cursor<>(getRootPage(), from); + return cursor(from, null, false); + } + + /** + * Get a cursor to iterate over a number of keys and values in the latest version of this map. + * + * @param from the first key to return + * @param to the last key to return + * @param reverse if true, iterate in reverse (descending) order + * @return the cursor + */ + public final Cursor cursor(K from, K to, boolean reverse) { + return cursor(flushAndGetRoot(), from, to, reverse); + } + + /** + * Get a cursor to iterate over a number of keys and values. + * + * @param rootReference of this map's version to iterate over + * @param from the first key to return + * @param to the last key to return + * @param reverse if true, iterate in reverse (descending) order + * @return the cursor + */ + public Cursor cursor(RootReference rootReference, K from, K to, boolean reverse) { + return new Cursor<>(rootReference, from, to, reverse); } @Override public final Set> entrySet() { - final Page root = this.getRootPage(); + final RootReference rootReference = flushAndGetRoot(); return new AbstractSet>() { @Override public Iterator> iterator() { - final Cursor cursor = new Cursor<>(root, null); + final Cursor cursor = cursor(rootReference, null, null, false); return new Iterator>() { @Override @@ -680,12 +749,6 @@ public Entry next() { K k = cursor.next(); return new SimpleImmutableEntry<>(k, cursor.getValue()); } - - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException( - "Removing is not supported"); - } }; } @@ -706,12 +769,12 @@ public boolean contains(Object o) { @Override public Set keySet() { - final Page root = this.getRootPage(); + final RootReference rootReference = flushAndGetRoot(); return new AbstractSet() { @Override public Iterator iterator() { - return new Cursor(root, null); + return cursor(rootReference, null, null, false); } @Override @@ -740,6 +803,10 @@ public final MVStore getStore() { return store; } + protected final boolean isPersistent() { + return store.getFileStore() != null && !isVolatile; + } + /** * Get the map id. Please note the map id may be different after compacting * a store. @@ -755,11 +822,11 @@ public final int getId() { * * @return the root page */ - public final Page getRootPage() { + public final Page getRootPage() { return flushAndGetRoot().root; } - public RootReference getRoot() { + public RootReference getRoot() { return root.get(); } @@ -768,10 +835,10 @@ public RootReference getRoot() { * * @return current root reference */ - public RootReference flushAndGetRoot() { - RootReference rootReference = getRoot(); + public RootReference flushAndGetRoot() { + RootReference rootReference = getRoot(); if (singleWriter && rootReference.getAppendCounter() > 0) { - return flushAppendBuffer(rootReference, false); + return flushAppendBuffer(rootReference, true); } return rootReference; } @@ -782,8 +849,20 @@ public RootReference flushAndGetRoot() { * @param rootPage root page * @param version initial version */ - final void setInitialRoot(Page rootPage, long version) { - root.set(new RootReference(rootPage, version)); + final void setInitialRoot(Page rootPage, long version) { + root.set(new RootReference<>(rootPage, version)); + } + + /** + * Compare and set the root reference. + * + * @param expectedRootReference the old (expected) + * @param updatedRootReference the new + * @return whether updating worked + */ + final boolean compareAndSetRoot(RootReference expectedRootReference, + RootReference updatedRootReference) { + return root.compareAndSet(expectedRootReference, updatedRootReference); } /** @@ -802,11 +881,11 @@ final void rollbackTo(long version) { * Roll the root back to the specified version. * * @param version to rollback to + * @return true if rollback was a success, false if there was not enough in-memory history */ - void rollbackRoot(long version) - { - RootReference rootReference = flushAndGetRoot(); - RootReference previous; + boolean rollbackRoot(long version) { + RootReference rootReference = flushAndGetRoot(); + RootReference previous; while (rootReference.version >= version && (previous = rootReference.previous) != null) { if (root.compareAndSet(rootReference, previous)) { rootReference = previous; @@ -814,41 +893,31 @@ void rollbackRoot(long version) } } setWriteVersion(version); + return rootReference.version < version; } /** * Use the new root page from now on. + * + * @param the key class + * @param the value class * @param expectedRootReference expected current root reference * @param newRootPage the new root page * @param attemptUpdateCounter how many attempt (including current) * were made to update root * @return new RootReference or null if update failed */ - protected final boolean updateRoot(RootReference expectedRootReference, Page newRootPage, int attemptUpdateCounter) - { - RootReference currentRoot = flushAndGetRoot(); - return currentRoot == expectedRootReference && - !currentRoot.lockedForUpdate && - root.compareAndSet(currentRoot, - new RootReference(currentRoot, newRootPage, attemptUpdateCounter)); + protected static boolean updateRoot(RootReference expectedRootReference, Page newRootPage, + int attemptUpdateCounter) { + return expectedRootReference.updateRootPage(newRootPage, attemptUpdateCounter) != null; } /** * Forget those old versions that are no longer needed. * @param rootReference to inspect */ - private void removeUnusedOldVersions(RootReference rootReference) { - long oldest = store.getOldestVersionToKeep(); - // We need to keep at least one previous version (if any) here, - // because in order to retain whole history of some version - // we really need last root of the previous version. - // Root labeled with version "X" is the LAST known root for that version - // and therefore the FIRST known root for the version "X+1" - for(RootReference rootRef = rootReference; rootRef != null; rootRef = rootRef.previous) { - if (rootRef.version < oldest) { - rootRef.previous = null; - } - } + private void removeUnusedOldVersions(RootReference rootReference) { + rootReference.removeUnusedOldVersions(store.getOldestVersionToKeep()); } public final boolean isReadOnly() { @@ -884,10 +953,11 @@ public final boolean isVolatile() { * or if another thread is concurrently writing */ protected final void beforeWrite() { + assert !getRoot().isLockedByCurrentThread() : getRoot(); if (closed) { int id = getId(); String mapName = store.getMapName(id); - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_CLOSED, "Map {0}({1}) is closed. {2}", mapName, id, store.getPanicException()); } if (readOnly) { @@ -934,22 +1004,15 @@ public boolean isEmpty() { return sizeAsLong() == 0; } - public final long getCreateVersion() { + final long getCreateVersion() { return createVersion; } - /** - * Remove the given page (make the space available). - * - * @param pos the position of the page to remove - * @param memory the number of bytes used for this page - */ - protected final void removePage(long pos, int memory) { - store.removePage(pos, memory); - } - /** * Open an old version for the given map. + * It will restore map at last known state of the version specified. + * (at the point right before the commit() call, which advanced map to the next version) + * Map is opened in read-only mode. * * @param version the version * @return the map @@ -963,16 +1026,14 @@ public final MVMap openVersion(long version) { DataUtils.checkArgument(version >= createVersion, "Unknown version {0}; this map was created in version is {1}", version, createVersion); - RootReference rootReference = getRoot(); + RootReference rootReference = flushAndGetRoot(); removeUnusedOldVersions(rootReference); - while (rootReference != null && rootReference.version > version) { - rootReference = rootReference.previous; + RootReference previous; + while ((previous = rootReference.previous) != null && previous.version >= version) { + rootReference = previous; } - - if (rootReference == null) { - // smaller than all in-memory versions - MVMap map = openReadOnly(store.getRootPos(getId(), version), version); - return map; + if (previous == null && version < store.getOldestVersionToKeep()) { + throw DataUtils.newIllegalArgumentException("Unknown version {0}", version); } MVMap m = openReadOnly(rootReference.root, version); assert m.getVersion() <= version : m.getVersion() + " <= " + version; @@ -987,11 +1048,11 @@ public final MVMap openVersion(long version) { * @return the opened map */ final MVMap openReadOnly(long rootPos, long version) { - Page root = readOrCreateRootPage(rootPos); + Page root = readOrCreateRootPage(rootPos); return openReadOnly(root, version); } - private MVMap openReadOnly(Page root, long version) { + private MVMap openReadOnly(Page root, long version) { MVMap m = cloneIt(); m.readOnly = true; m.setInitialRoot(root, version); @@ -1005,14 +1066,7 @@ private MVMap openReadOnly(Page root, long version) { * @return version */ public final long getVersion() { - return getVersion(getRoot()); - } - - private static long getVersion(RootReference rootReference) { - RootReference previous = rootReference.previous; - return previous == null || previous.root != rootReference.root || - previous.appendCounter != rootReference.appendCounter ? - rootReference.version : previous.version; + return getRoot().getVersion(); } /** @@ -1022,10 +1076,7 @@ private static long getVersion(RootReference rootReference) { * @return true if has changes */ final boolean hasChangesSince(long version) { - RootReference rootReference = getRoot(); - Page root = rootReference.root; - return !root.isSaved() && rootReference.getTotalCount() > 0 || - getVersion(rootReference) > version; + return getRoot().hasChangesSince(version, isPersistent()); } /** @@ -1036,7 +1087,7 @@ final boolean hasChangesSince(long version) { * @param p the page * @return the number of direct children */ - protected int getChildPageCount(Page p) { + protected int getChildPageCount(Page p) { return p.getRawChildPageCount(); } @@ -1070,22 +1121,38 @@ protected String asString(String name) { return buff.toString(); } - final RootReference setWriteVersion(long writeVersion) { + final RootReference setWriteVersion(long writeVersion) { int attempt = 0; while(true) { - RootReference rootReference = flushAndGetRoot(); + RootReference rootReference = flushAndGetRoot(); if(rootReference.version >= writeVersion) { return rootReference; } else if (isClosed()) { - if (rootReference.version < store.getOldestVersionToKeep()) { + // map was closed a while back and can not possibly be in use by now + // it's time to remove it completely from the store (it was anonymous already) + if (rootReference.getVersion() + 1 < store.getOldestVersionToKeep()) { + store.deregisterMapRoot(id); return null; } - return rootReference; } - RootReference updatedRootReference = new RootReference(rootReference, writeVersion, ++attempt); - if(root.compareAndSet(rootReference, updatedRootReference)) { - removeUnusedOldVersions(updatedRootReference); - return updatedRootReference; + + RootReference lockedRootReference = null; + if (++attempt > 3 || rootReference.isLocked()) { + lockedRootReference = lockRoot(rootReference, attempt); + rootReference = flushAndGetRoot(); + } + + try { + rootReference = rootReference.tryUnlockAndUpdateVersion(writeVersion, attempt); + if (rootReference != null) { + lockedRootReference = null; + removeUnusedOldVersions(rootReference); + return rootReference; + } + } finally { + if (lockedRootReference != null) { + unlockRoot(); + } } } } @@ -1095,7 +1162,7 @@ final RootReference setWriteVersion(long writeVersion) { * * @return new page */ - public Page createEmptyLeaf() { + protected Page createEmptyLeaf() { return Page.createEmptyLeaf(this); } @@ -1104,7 +1171,7 @@ public Page createEmptyLeaf() { * * @return new page */ - protected Page createEmptyNode() { + protected Page createEmptyNode() { return Page.createEmptyNode(this); } @@ -1114,12 +1181,6 @@ protected Page createEmptyNode() { * @param sourceMap the source map */ final void copyFrom(MVMap sourceMap) { - // We are going to cheat a little bit in the copy() - // by temporary setting map's root to some arbitrary nodes. - // This will allow for newly created ones to be saved. - // That's why it's important to preserve all chunks - // created in the process, especially if retention time - // is set to a lower value, or even 0. MVStore.TxCounter txCounter = store.registerVersionUsage(); try { beforeWrite(); @@ -1129,8 +1190,8 @@ final void copyFrom(MVMap sourceMap) { } } - private Page copy(Page source, Page parent, int index) { - Page target = source.copy(this); + private void copy(Page source, Page parent, int index) { + Page target = source.copy(this, true); if (parent == null) { setInitialRoot(target, INITIAL_VERSION); } else { @@ -1147,48 +1208,55 @@ private Page copy(Page source, Page parent, int index) { } target.setComplete(); } - store.registerUnsavedPage(target.getMemory()); + store.registerUnsavedMemory(target.getMemory()); if (store.isSaveNeeded()) { store.commit(); } - return target; } /** * If map was used in append mode, this method will ensure that append buffer * is flushed - emptied with all entries inserted into map as a new leaf. * @param rootReference current RootReference - * @param lockedForUpdate whether rootReference is pre-locked already and - * should stay locked upon return + * @param fullFlush whether buffer should be completely flushed, + * otherwise just a single empty slot is required * @return potentially updated RootReference */ - private RootReference flushAppendBuffer(RootReference rootReference, boolean lockedForUpdate) { - IntValueHolder unsavedMemoryHolder = new IntValueHolder(); - RootReference lockedRootReference = lockedForUpdate ? rootReference : null; + private RootReference flushAppendBuffer(RootReference rootReference, boolean fullFlush) { + boolean preLocked = rootReference.isLockedByCurrentThread(); + boolean locked = preLocked; + int keysPerPage = store.getKeysPerPage(); try { + IntValueHolder unsavedMemoryHolder = new IntValueHolder(); int attempt = 0; int keyCount; - while ((keyCount = rootReference.getAppendCounter()) > 0) { - if (lockedRootReference == null) { - lockedRootReference = tryLock(rootReference, ++attempt); - rootReference = lockedRootReference == null ? getRoot() : lockedRootReference; - continue; + int availabilityThreshold = fullFlush ? 0 : keysPerPage - 1; + while ((keyCount = rootReference.getAppendCounter()) > availabilityThreshold) { + if (!locked) { + // instead of just calling lockRoot() we loop here and check if someone else + // already flushed the buffer, then we don't need a lock + rootReference = tryLock(rootReference, ++attempt); + if (rootReference == null) { + rootReference = getRoot(); + continue; + } + locked = true; } - Page rootPage = rootReference.root; - - CursorPos pos = rootPage.getAppendCursorPos(null); + Page rootPage = rootReference.root; + long version = rootReference.version; + CursorPos pos = rootPage.getAppendCursorPos(null); assert pos != null; assert pos.index < 0 : pos.index; int index = -pos.index - 1; assert index == pos.page.getKeyCount() : index + " != " + pos.page.getKeyCount(); - Page p = pos.page; - CursorPos tip = pos; + Page p = pos.page; + CursorPos tip = pos; pos = pos.parent; int remainingBuffer = 0; - Page page = null; - int available = store.getKeysPerPage() - p.getKeyCount(); + Page page = null; + int available = keysPerPage - p.getKeyCount(); if (available > 0) { p = p.copy(); if (keyCount <= available) { @@ -1196,23 +1264,27 @@ private RootReference flushAppendBuffer(RootReference rootReference, boolean loc } else { p.expand(available, keysBuffer, valuesBuffer); keyCount -= available; - if (lockedForUpdate) { - System.arraycopy(keysBuffer, available, keysBuffer, 0, keyCount); - System.arraycopy(valuesBuffer, available, valuesBuffer, 0, keyCount); - remainingBuffer = keyCount; - } else { - Object[] keys = new Object[keyCount]; - Object[] values = new Object[keyCount]; + if (fullFlush) { + K[] keys = p.createKeyStorage(keyCount); + V[] values = p.createValueStorage(keyCount); System.arraycopy(keysBuffer, available, keys, 0, keyCount); - System.arraycopy(valuesBuffer, available, values, 0, keyCount); + if (valuesBuffer != null) { + System.arraycopy(valuesBuffer, available, values, 0, keyCount); + } page = Page.createLeaf(this, keys, values, 0); + } else { + System.arraycopy(keysBuffer, available, keysBuffer, 0, keyCount); + if (valuesBuffer != null) { + System.arraycopy(valuesBuffer, available, valuesBuffer, 0, keyCount); + } + remainingBuffer = keyCount; } } } else { tip = tip.parent; page = Page.createLeaf(this, Arrays.copyOf(keysBuffer, keyCount), - Arrays.copyOf(valuesBuffer, keyCount), + valuesBuffer == null ? null : Arrays.copyOf(valuesBuffer, keyCount), 0); } @@ -1220,22 +1292,24 @@ private RootReference flushAppendBuffer(RootReference rootReference, boolean loc if (page != null) { assert page.map == this; assert page.getKeyCount() > 0; - Object key = page.getKey(0); + K key = page.getKey(0); unsavedMemoryHolder.value += page.getMemory(); while (true) { if (pos == null) { if (p.getKeyCount() == 0) { p = page; } else { - Object[] keys = new Object[]{key}; - Page.PageReference[] children = new Page.PageReference[]{ - new Page.PageReference(p), - new Page.PageReference(page)}; + K[] keys = p.createKeyStorage(1); + keys[0] = key; + Page.PageReference[] children = Page.createRefStorage(2); + children[0] = new Page.PageReference<>(p); + children[1] = new Page.PageReference<>(page); + unsavedMemoryHolder.value += p.getMemory(); p = Page.createNode(this, keys, children, p.getTotalCount() + page.getTotalCount(), 0); } break; } - Page c = p; + Page c = p; p = pos.page; index = pos.index; pos = pos.parent; @@ -1244,7 +1318,7 @@ private RootReference flushAppendBuffer(RootReference rootReference, boolean loc p.insertNode(index, key, c); keyCount = p.getKeyCount(); int at = keyCount - (p.isLeaf() ? 1 : 2); - if (keyCount <= store.getKeysPerPage() && + if (keyCount <= keysPerPage && (p.getMemory() < store.getMaxPageSize() || at <= 0)) { break; } @@ -1254,40 +1328,36 @@ private RootReference flushAppendBuffer(RootReference rootReference, boolean loc } } p = replacePage(pos, p, unsavedMemoryHolder); - - RootReference updatedRootReference = new RootReference(rootReference, p, remainingBuffer, - lockedForUpdate); - if (root.compareAndSet(rootReference, updatedRootReference)) { - lockedRootReference = null; - while (tip != null) { - tip.page.removePage(); - tip = tip.parent; - } - if (store.getFileStore() != null) { - store.registerUnsavedPage(unsavedMemoryHolder.value); + rootReference = rootReference.updatePageAndLockedStatus(p, preLocked || isPersistent(), + remainingBuffer); + if (rootReference != null) { + // should always be the case, except for spurious failure? + locked = preLocked || isPersistent(); + if (isPersistent() && tip != null) { + store.registerUnsavedMemory(unsavedMemoryHolder.value + tip.processRemovalInfo(version)); } - assert lockedForUpdate || updatedRootReference.getAppendCounter() == 0; - return updatedRootReference; + assert rootReference.getAppendCounter() <= availabilityThreshold; + break; } rootReference = getRoot(); } } finally { - if (lockedRootReference != null && !lockedForUpdate) { - assert rootReference.root == lockedRootReference.root; + if (locked && !preLocked) { rootReference = unlockRoot(); } } return rootReference; } - private static Page replacePage(CursorPos path, Page replacement, IntValueHolder unsavedMemoryHolder) { - int unsavedMemory = replacement.getMemory(); + private static Page replacePage(CursorPos path, Page replacement, + IntValueHolder unsavedMemoryHolder) { + int unsavedMemory = replacement.isSaved() ? 0 : replacement.getMemory(); while (path != null) { - Page parent = path.page; + Page parent = path.page; // condition below should always be true, but older versions (up to 1.4.197) // may create single-childed (with no keys) internal nodes, which we skip here if (parent.getKeyCount() > 0) { - Page child = replacement; + Page child = replacement; replacement = parent.copy(); replacement.setChild(path.index, child); unsavedMemory += replacement.getMemory(); @@ -1307,20 +1377,26 @@ private static Page replacePage(CursorPos path, Page replacement, IntValueHolder * @param value to be appended */ public void append(K key, V value) { - beforeWrite(); - RootReference rootReference = lockRoot(getRoot(), 1); - int appendCounter = rootReference.getAppendCounter(); - try { - if (appendCounter >= keysPerPage) { - rootReference = flushAppendBuffer(rootReference, true); - appendCounter = rootReference.getAppendCounter(); - assert appendCounter < keysPerPage; + if (singleWriter) { + beforeWrite(); + RootReference rootReference = lockRoot(getRoot(), 1); + int appendCounter = rootReference.getAppendCounter(); + try { + if (appendCounter >= keysPerPage) { + rootReference = flushAppendBuffer(rootReference, false); + appendCounter = rootReference.getAppendCounter(); + assert appendCounter < keysPerPage; + } + keysBuffer[appendCounter] = key; + if (valuesBuffer != null) { + valuesBuffer[appendCounter] = value; + } + ++appendCounter; + } finally { + unlockRoot(appendCounter); } - keysBuffer[appendCounter] = key; - valuesBuffer[appendCounter] = value; - ++appendCounter; - } finally { - unlockRoot(rootReference.root, appendCounter); + } else { + put(key, value); } } @@ -1330,24 +1406,31 @@ public void append(K key, V value) { * Non-updating method may be used concurrently, but latest removal may not be visible. */ public void trimLast() { - RootReference rootReference = getRoot(); - int appendCounter = rootReference.getAppendCounter(); - boolean useRegularRemove = appendCounter == 0; - if (!useRegularRemove) { - rootReference = lockRoot(rootReference, 1); - appendCounter = rootReference.getAppendCounter(); - useRegularRemove = appendCounter == 0; + if (singleWriter) { + RootReference rootReference = getRoot(); + int appendCounter = rootReference.getAppendCounter(); + boolean useRegularRemove = appendCounter == 0; if (!useRegularRemove) { - --appendCounter; + rootReference = lockRoot(rootReference, 1); + try { + appendCounter = rootReference.getAppendCounter(); + useRegularRemove = appendCounter == 0; + if (!useRegularRemove) { + --appendCounter; + } + } finally { + unlockRoot(appendCounter); + } } - unlockRoot(rootReference.root, appendCounter); - } - if (useRegularRemove) { - Page lastLeaf = rootReference.root.getAppendCursorPos(null).page; - assert lastLeaf.isLeaf(); - assert lastLeaf.getKeyCount() > 0; - Object key = lastLeaf.getKey(lastLeaf.getKeyCount() - 1); - remove(key); + if (useRegularRemove) { + Page lastLeaf = rootReference.root.getAppendCursorPos(null).page; + assert lastLeaf.isLeaf(); + assert lastLeaf.getKeyCount() > 0; + Object key = lastLeaf.getKey(lastLeaf.getKeyCount() - 1); + remove(key); + } + } else { + remove(lastKey()); } } @@ -1374,13 +1457,13 @@ public interface MapBuilder, K, V> { */ M create(MVStore store, Map config); - DataType getKeyType(); + DataType getKeyType(); - DataType getValueType(); + DataType getValueType(); - void setKeyType(DataType dataType); + void setKeyType(DataType dataType); - void setValueType(DataType dataType); + void setValueType(DataType dataType); } @@ -1392,8 +1475,8 @@ public interface MapBuilder, K, V> { */ public abstract static class BasicBuilder, K, V> implements MapBuilder { - private DataType keyType; - private DataType valueType; + private DataType keyType; + private DataType valueType; /** * Create a new builder with the default key and value data types. @@ -1403,23 +1486,25 @@ protected BasicBuilder() { } @Override - public DataType getKeyType() { + public DataType getKeyType() { return keyType; } @Override - public DataType getValueType() { + public DataType getValueType() { return valueType; } + @SuppressWarnings("unchecked") @Override - public void setKeyType(DataType keyType) { - this.keyType = keyType; + public void setKeyType(DataType keyType) { + this.keyType = (DataType)keyType; } + @SuppressWarnings("unchecked") @Override - public void setValueType(DataType valueType) { - this.valueType = valueType; + public void setValueType(DataType valueType) { + this.valueType = (DataType)valueType; } /** @@ -1428,8 +1513,8 @@ public void setValueType(DataType valueType) { * @param keyType the key type * @return this */ - public BasicBuilder keyType(DataType keyType) { - this.keyType = keyType; + public BasicBuilder keyType(DataType keyType) { + setKeyType(keyType); return this; } @@ -1439,8 +1524,8 @@ public BasicBuilder keyType(DataType keyType) { * @param valueType the value type * @return this */ - public BasicBuilder valueType(DataType valueType) { - this.valueType = valueType; + public BasicBuilder valueType(DataType valueType) { + setValueType(valueType); return this; } @@ -1452,8 +1537,8 @@ public M create(MVStore store, Map config) { if (getValueType() == null) { setValueType(new ObjectDataType()); } - DataType keyType = getKeyType(); - DataType valueType = getValueType(); + DataType keyType = getKeyType(); + DataType valueType = getValueType(); config.put("store", store); config.put("key", keyType); config.put("val", valueType); @@ -1481,13 +1566,13 @@ public static class Builder extends BasicBuilder, K, V> { public Builder() {} @Override - public Builder keyType(DataType dataType) { + public Builder keyType(DataType dataType) { setKeyType(dataType); return this; } @Override - public Builder valueType(DataType dataType) { + public Builder valueType(DataType dataType) { setValueType(dataType); return this; } @@ -1508,12 +1593,15 @@ protected MVMap create(Map config) { config.put("singleWriter", singleWriter); Object type = config.get("type"); if(type == null || type.equals("rtree")) { - return new MVMap<>(config); + return new MVMap<>(config, getKeyType(), getValueType()); } throw new IllegalArgumentException("Incompatible map type"); } } + /** + * The decision on what to do on an update. + */ public enum Decision { ABORT, REMOVE, PUT, REPEAT } /** @@ -1527,8 +1615,7 @@ public enum Decision { ABORT, REMOVE, PUT, REPEAT } * * @param value type of the map */ - public abstract static class DecisionMaker - { + public abstract static class DecisionMaker { /** * Decision maker for transaction rollback. */ @@ -1604,14 +1691,27 @@ public String toString() { } }; + /** + * Makes a decision about how to proceed with the update. + * + * @param existingValue the old value + * @param providedValue the new value + * @param tip the cursor position + * @return the decision + */ + public Decision decide(V existingValue, V providedValue, CursorPos tip) { + return decide(existingValue, providedValue); + } + /** * Makes a decision about how to proceed with the update. * @param existingValue value currently exists in the map * @param providedValue original input value * @return PUT if a new value need to replace existing one or - * new value to be inserted if there is none + * a new value to be inserted if there is none * REMOVE if existing value should be deleted - * ABORT if update operation should be aborted + * ABORT if update operation should be aborted or repeated later + * REPEAT if update operation should be repeated immediately */ public abstract Decision decide(V existingValue, V providedValue); @@ -1644,44 +1744,51 @@ public void reset() {} * @param decisionMaker command object to make choices during transaction. * @return previous value, if mapping for that key existed, or null otherwise */ - @SuppressWarnings("unchecked") public V operate(K key, V value, DecisionMaker decisionMaker) { - beforeWrite(); IntValueHolder unsavedMemoryHolder = new IntValueHolder(); int attempt = 0; while(true) { - RootReference rootReference = flushAndGetRoot(); - RootReference lockedRootReference = null; - if ((++attempt > 3 || rootReference.lockedForUpdate)) { - lockedRootReference = lockRoot(rootReference, attempt); - rootReference = lockedRootReference; + RootReference rootReference = flushAndGetRoot(); + boolean locked = rootReference.isLockedByCurrentThread(); + if (!locked) { + if (attempt++ == 0) { + beforeWrite(); + } + if (attempt > 3 || rootReference.isLocked()) { + rootReference = lockRoot(rootReference, attempt); + locked = true; + } } - Page rootPage = rootReference.root; - CursorPos tip; + Page rootPage = rootReference.root; + long version = rootReference.version; + CursorPos tip; V result; unsavedMemoryHolder.value = 0; try { - CursorPos pos = traverseDown(rootPage, key); - Page p = pos.page; + CursorPos pos = CursorPos.traverseDown(rootPage, key); + if(!locked && rootReference != getRoot()) { + continue; + } + Page p = pos.page; int index = pos.index; tip = pos; pos = pos.parent; - result = index < 0 ? null : (V)p.getValue(index); - Decision decision = decisionMaker.decide(result, value); + result = index < 0 ? null : p.getValue(index); + Decision decision = decisionMaker.decide(result, value, tip); switch (decision) { case REPEAT: decisionMaker.reset(); continue; case ABORT: - if(rootReference != getRoot()) { + if(!locked && rootReference != getRoot()) { decisionMaker.reset(); continue; } return result; case REMOVE: { if (index < 0) { - if(rootReference != getRoot()) { + if(!locked && rootReference != getRoot()) { decisionMaker.reset(); continue; } @@ -1729,19 +1836,19 @@ public V operate(K key, V value, DecisionMaker decisionMaker) { && keyCount > (p.isLeaf() ? 1 : 2)) { long totalCount = p.getTotalCount(); int at = keyCount >> 1; - Object k = p.getKey(at); - Page split = p.split(at); + K k = p.getKey(at); + Page split = p.split(at); unsavedMemoryHolder.value += p.getMemory() + split.getMemory(); if (pos == null) { - Object[] keys = { k }; - Page.PageReference[] children = { - new Page.PageReference(p), - new Page.PageReference(split) - }; + K[] keys = p.createKeyStorage(1); + keys[0] = k; + Page.PageReference[] children = Page.createRefStorage(2); + children[0] = new Page.PageReference<>(p); + children[1] = new Page.PageReference<>(split); p = Page.createNode(this, keys, children, totalCount, 0); break; } - Page c = p; + Page c = p; p = pos.page; index = pos.index; pos = pos.parent; @@ -1756,33 +1863,26 @@ public V operate(K key, V value, DecisionMaker decisionMaker) { } } rootPage = replacePage(pos, p, unsavedMemoryHolder); - if (lockedRootReference == null) { - if (!updateRoot(rootReference, rootPage, attempt)) { + if (!locked) { + rootReference = rootReference.updateRootPage(rootPage, attempt); + if (rootReference == null) { decisionMaker.reset(); continue; - } else { - notifyWaiters(); } } + store.registerUnsavedMemory(unsavedMemoryHolder.value + tip.processRemovalInfo(version)); + return result; } finally { - if(lockedRootReference != null) { + if(locked) { unlockRoot(rootPage); } } - while (tip != null) { - tip.page.removePage(); - tip = tip.parent; - } - if (store.getFileStore() != null) { - store.registerUnsavedPage(unsavedMemoryHolder.value); - } - return result; } } - private RootReference lockRoot(RootReference rootReference, int attempt) { + private RootReference lockRoot(RootReference rootReference, int attempt) { while(true) { - RootReference lockedRootReference = tryLock(rootReference, attempt++); + RootReference lockedRootReference = tryLock(rootReference, attempt++); if (lockedRootReference != null) { return lockedRootReference; } @@ -1790,15 +1890,20 @@ private RootReference lockRoot(RootReference rootReference, int attempt) { } } - private RootReference tryLock(RootReference rootReference, int attempt) { - if (!rootReference.lockedForUpdate) { - RootReference lockedRootReference = new RootReference(rootReference, attempt); - if (root.compareAndSet(rootReference, lockedRootReference)) { - return lockedRootReference; - } + /** + * Try to lock the root. + * + * @param rootReference the old root reference + * @param attempt the number of attempts so far + * @return the new root reference + */ + protected RootReference tryLock(RootReference rootReference, int attempt) { + RootReference lockedRootReference = rootReference.tryLock(attempt); + if (lockedRootReference != null) { + return lockedRootReference; } - - RootReference oldRootReference = rootReference.previous; + assert !rootReference.isLockedByCurrentThread() : rootReference; + RootReference oldRootReference = rootReference.previous; int contention = 1; if (oldRootReference != null) { long updateAttemptCounter = rootReference.updateAttemptCounter - @@ -1832,26 +1937,40 @@ private RootReference tryLock(RootReference rootReference, int attempt) { return null; } - private RootReference unlockRoot() { + /** + * Unlock the root page, the new root being null. + * + * @return the new root reference (never null) + */ + private RootReference unlockRoot() { return unlockRoot(null, -1); } - private RootReference unlockRoot(Page newRootPage) { + /** + * Unlock the root page. + * + * @param newRootPage the new root + * @return the new root reference (never null) + */ + protected RootReference unlockRoot(Page newRootPage) { return unlockRoot(newRootPage, -1); } - private RootReference unlockRoot(Page newRootPage, int appendCounter) { - RootReference updatedRootReference; - boolean success; + private void unlockRoot(int appendCounter) { + unlockRoot(null, appendCounter); + } + + private RootReference unlockRoot(Page newRootPage, int appendCounter) { + RootReference updatedRootReference; do { - RootReference rootReference = getRoot(); - assert rootReference.lockedForUpdate; - updatedRootReference = new RootReference(rootReference, + RootReference rootReference = getRoot(); + assert rootReference.isLockedByCurrentThread(); + updatedRootReference = rootReference.updatePageAndLockedStatus( newRootPage == null ? rootReference.root : newRootPage, - appendCounter == -1 ? rootReference.getAppendCounter() : appendCounter, - false); - success = root.compareAndSet(rootReference, updatedRootReference); - } while(!success); + false, + appendCounter == -1 ? rootReference.getAppendCounter() : appendCounter + ); + } while(updatedRootReference == null); notifyWaiters(); return updatedRootReference; @@ -1866,25 +1985,56 @@ private void notifyWaiters() { } } - private static CursorPos traverseDown(Page p, Object key) { - CursorPos pos = null; - while (!p.isLeaf()) { - int index = p.binarySearch(key) + 1; - if (index < 0) { - index = -index; - } - pos = new CursorPos(p, index, pos); - p = p.getChildPage(index); + final boolean isMemoryEstimationAllowed() { + return avgKeySize != null || avgValSize != null; + } + + final int evaluateMemoryForKeys(K[] storage, int count) { + if (avgKeySize == null) { + return calculateMemory(keyType, storage, count); } - return new CursorPos(p, p.binarySearch(key), pos); + return MemoryEstimator.estimateMemory(avgKeySize, keyType, storage, count); + } + + final int evaluateMemoryForValues(V[] storage, int count) { + if (avgValSize == null) { + return calculateMemory(valueType, storage, count); + } + return MemoryEstimator.estimateMemory(avgValSize, valueType, storage, count); + } + + private static int calculateMemory(DataType keyType, T[] storage, int count) { + int mem = count * MEMORY_POINTER; + for (int i = 0; i < count; i++) { + mem += keyType.getMemory(storage[i]); + } + return mem; + } + + final int evaluateMemoryForKey(K key) { + if (avgKeySize == null) { + return keyType.getMemory(key); + } + return MemoryEstimator.estimateMemory(avgKeySize, keyType, key); + } + + final int evaluateMemoryForValue(V value) { + if (avgValSize == null) { + return valueType.getMemory(value); + } + return MemoryEstimator.estimateMemory(avgValSize, valueType, value); + } + + static int samplingPct(AtomicLong stats) { + return MemoryEstimator.samplingPct(stats); } private static final class EqualsDecisionMaker extends DecisionMaker { - private final DataType dataType; - private final V expectedValue; - private Decision decision; + private final DataType dataType; + private final V expectedValue; + private Decision decision; - EqualsDecisionMaker(DataType dataType, V expectedValue) { + EqualsDecisionMaker(DataType dataType, V expectedValue) { this.dataType = dataType; this.expectedValue = expectedValue; } @@ -1912,6 +2062,57 @@ public String toString() { } } + private static final class RewriteDecisionMaker extends DecisionMaker { + private final long pagePos; + private Decision decision; + + RewriteDecisionMaker(long pagePos) { + this.pagePos = pagePos; + } + + @Override + public Decision decide(V existingValue, V providedValue, CursorPos tip) { + assert decision == null; + decision = Decision.ABORT; + if(!DataUtils.isLeafPosition(pagePos)) { + while ((tip = tip.parent) != null) { + if (tip.page.getPos() == pagePos) { + decision = decide(existingValue, providedValue); + break; + } + } + } else if (tip.page.getPos() == pagePos) { + decision = decide(existingValue, providedValue); + } + return decision; + } + + @Override + public Decision decide(V existingValue, V providedValue) { + decision = existingValue == null ? Decision.ABORT : Decision.PUT; + return decision; + } + + @Override + public T selectValue(T existingValue, T providedValue) { + return existingValue; + } + + @Override + public void reset() { + decision = null; + } + + Decision getDecision() { + return decision; + } + + @Override + public String toString() { + return "rewrite"; + } + } + private static final class IntValueHolder { int value; diff --git a/h2/src/main/org/h2/mvstore/MVStore.java b/h2/src/main/org/h2/mvstore/MVStore.java index efc46e5cb8..46daadd11e 100644 --- a/h2/src/main/org/h2/mvstore/MVStore.java +++ b/h2/src/main/org/h2/mvstore/MVStore.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; @@ -9,8 +9,10 @@ import java.lang.Thread.UncaughtExceptionHandler; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Arrays; +import java.util.BitSet; import java.util.Collections; import java.util.Comparator; import java.util.Deque; @@ -18,23 +20,32 @@ import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; +import java.util.List; import java.util.Map; import java.util.PriorityQueue; import java.util.Queue; import java.util.Set; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.PriorityBlockingQueue; +import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Predicate; +import java.util.function.Supplier; import org.h2.compress.CompressDeflate; import org.h2.compress.CompressLZF; import org.h2.compress.Compressor; -import org.h2.engine.Constants; import org.h2.mvstore.cache.CacheLongKeyLIRS; +import org.h2.mvstore.type.StringDataType; import org.h2.util.MathUtils; import org.h2.util.Utils; @@ -122,10 +133,6 @@ to a map (possibly the metadata map) - a map lookup when reading old data; also, this old data map needs to be cleaned up somehow; maybe using an additional timeout -- rollback of removeMap should restore the data - - which has big consequences, as the metadata map - would probably need references to the root nodes of all maps - */ /** @@ -133,20 +140,35 @@ to a map (possibly the metadata map) - */ public class MVStore implements AutoCloseable { + // The following are attribute names (keys) in store header map + private static final String HDR_H = "H"; + private static final String HDR_BLOCK_SIZE = "blockSize"; + private static final String HDR_FORMAT = "format"; + private static final String HDR_CREATED = "created"; + private static final String HDR_FORMAT_READ = "formatRead"; + private static final String HDR_CHUNK = "chunk"; + private static final String HDR_BLOCK = "block"; + private static final String HDR_VERSION = "version"; + private static final String HDR_CLEAN = "clean"; + private static final String HDR_FLETCHER = "fletcher"; + + /** + * The key for the entry within "layout" map, which contains id of "meta" map. + * Entry value (hex encoded) is usually equal to 1, unless it's a legacy + * (upgraded) database and id 1 has been taken already by another map. + */ + public static final String META_ID_KEY = "meta.id"; + /** * The block size (physical sector size) of the disk. The store header is * written twice, one copy in each block, to ensure it survives a crash. */ static final int BLOCK_SIZE = 4 * 1024; - private static final int FORMAT_WRITE = 1; - private static final int FORMAT_READ = 1; - - /** - * Used to mark a chunk as free, when it was detected that live bookkeeping - * is incorrect. - */ - private static final int MARKED_FREE = 10_000_000; + private static final int FORMAT_WRITE_MIN = 2; + private static final int FORMAT_WRITE_MAX = 2; + private static final int FORMAT_READ_MIN = 2; + private static final int FORMAT_READ_MAX = 2; /** * Store is open. @@ -170,19 +192,33 @@ public class MVStore implements AutoCloseable { */ private static final int STATE_CLOSED = 3; + private static final int PIPE_LENGTH = 1; + + /** * Lock which governs access to major store operations: store(), close(), ... - * It should used in a non-reentrant fashion. * It serves as a replacement for synchronized(this), except it allows for * non-blocking lock attempts. */ private final ReentrantLock storeLock = new ReentrantLock(true); + private final ReentrantLock serializationLock = new ReentrantLock(true); + private final ReentrantLock saveChunkLock = new ReentrantLock(true); /** * Reference to a background thread, which is expected to be running, if any. */ private final AtomicReference backgroundWriterThread = new AtomicReference<>(); + /** + * Single-threaded executor for serialization of the store snapshot into ByteBuffer + */ + private ThreadPoolExecutor serializationExecutor; + + /** + * Single-threaded executor for saving ByteBuffer as a new Chunk + */ + private ThreadPoolExecutor bufferSaveExecutor; + private volatile boolean reuseSpace = true; private volatile int state; @@ -200,49 +236,54 @@ public class MVStore implements AutoCloseable { * It is split in 16 segments. The stack move distance is 2% of the expected * number of entries. */ - final CacheLongKeyLIRS cache; + private final CacheLongKeyLIRS> cache; /** - * The page chunk references cache. The default size is 4 MB, and the - * average size is 2 KB. It is split in 16 segments. The stack move distance - * is 2% of the expected number of entries. + * Cache for chunks "Table of Content" used to translate page's + * sequential number within containing chunk into byte position + * within chunk's image. Cache keyed by chunk id. */ - final CacheLongKeyLIRS cacheChunkRef; + private final CacheLongKeyLIRS chunksToC; /** * The newest chunk. If nothing was stored yet, this field is not set. */ - private Chunk lastChunk; + private volatile Chunk lastChunk; /** * The map of chunks. */ - private final ConcurrentHashMap chunks = - new ConcurrentHashMap<>(); + private final ConcurrentHashMap chunks = new ConcurrentHashMap<>(); + + private final Queue removedPages = new PriorityBlockingQueue<>(); + + private final Deque deadChunks = new ArrayDeque<>(); private long updateCounter = 0; private long updateAttemptCounter = 0; /** - * The map of temporarily freed storage space caused by freed pages. - * It contains the number of freed entries per chunk. + * The layout map. Contains chunks metadata and root locations for all maps. + * This is relatively fast changing part of metadata */ - private final Map freedPageSpace = new HashMap<>(); + private final MVMap layout; /** - * The metadata map. Write access to this map needs to be done under storeLock. + * The metadata map. Holds name -> id and id -> name and id -> metadata + * mapping for all maps. This is relatively slow changing part of metadata */ private final MVMap meta; - private final ConcurrentHashMap> maps = - new ConcurrentHashMap<>(); + private final ConcurrentHashMap> maps = new ConcurrentHashMap<>(); private final HashMap storeHeader = new HashMap<>(); - private WriteBuffer writeBuffer; + private final Queue writeBufferPool = new ArrayBlockingQueue<>(PIPE_LENGTH + 1); private final AtomicInteger lastMapId = new AtomicInteger(); + private int lastChunkId; + private int versionsToKeep = 5; /** @@ -255,14 +296,11 @@ public class MVStore implements AutoCloseable { private Compressor compressorHigh; - private final UncaughtExceptionHandler backgroundExceptionHandler; + private final boolean recoveryMode; - private volatile long currentVersion; + public final UncaughtExceptionHandler backgroundExceptionHandler; - /** - * The version of the last stored chunk, or -1 if nothing was stored so far. - */ - private long lastStoredVersion = INITIAL_VERSION; + private volatile long currentVersion; /** * Oldest store version in use. All version beyond this can be safely dropped @@ -307,7 +345,7 @@ public class MVStore implements AutoCloseable { /** * The version of the current store operation (if any). */ - private volatile long currentStoreVersion = -1; + private volatile long currentStoreVersion = INITIAL_VERSION; private volatile boolean metaChanged; @@ -319,32 +357,43 @@ public class MVStore implements AutoCloseable { private final int autoCompactFillRate; private long autoCompactLastFileOpCount; - private volatile IllegalStateException panicException; + private volatile MVStoreException panicException; private long lastTimeAbsolute; - private long lastFreeUnusedChunks; + private long leafCount; + private long nonLeafCount; + /** * Create and open the store. * * @param config the configuration to use - * @throws IllegalStateException if the file is corrupt, or an exception + * @throws MVStoreException if the file is corrupt, or an exception * occurred while opening * @throws IllegalArgumentException if the directory does not exist */ MVStore(Map config) { - this.compressionLevel = DataUtils.getConfigParam(config, "compress", 0); + recoveryMode = config.containsKey("recoveryMode"); + compressionLevel = DataUtils.getConfigParam(config, "compress", 0); String fileName = (String) config.get("fileName"); FileStore fileStore = (FileStore) config.get("fileStore"); - fileStoreIsProvided = fileStore != null; - if(fileStore == null && fileName != null) { - fileStore = new FileStore(); + if (fileStore == null) { + fileStoreIsProvided = false; + if (fileName != null) { + fileStore = new FileStore(); + } + } else { + if (fileName != null) { + throw new IllegalArgumentException("fileName && fileStore"); + } + fileStoreIsProvided = true; } this.fileStore = fileStore; int pgSplitSize = 48; // for "mem:" case it is # of keys CacheLongKeyLIRS.Config cc = null; + CacheLongKeyLIRS.Config cc2 = null; if (this.fileStore != null) { int mb = DataUtils.getConfigParam(config, "cacheSize", 16); if (mb > 0) { @@ -355,16 +404,16 @@ public class MVStore implements AutoCloseable { cc.segmentCount = (Integer)o; } } + cc2 = new CacheLongKeyLIRS.Config(); + cc2.maxMemory = 1024L * 1024L; pgSplitSize = 16 * 1024; } if (cc != null) { cache = new CacheLongKeyLIRS<>(cc); - cc.maxMemory /= 4; - cacheChunkRef = new CacheLongKeyLIRS<>(cc); } else { cache = null; - cacheChunkRef = null; } + chunksToC = cc2 == null ? null : new CacheLongKeyLIRS<>(cc2); pgSplitSize = DataUtils.getConfigParam(config, "pageSplitSize", pgSplitSize); // Make sure pages will fit into cache @@ -375,56 +424,52 @@ public class MVStore implements AutoCloseable { keysPerPage = DataUtils.getConfigParam(config, "keysPerPage", 48); backgroundExceptionHandler = (UncaughtExceptionHandler)config.get("backgroundExceptionHandler"); - meta = new MVMap<>(this); + layout = new MVMap<>(this, 0, StringDataType.INSTANCE, StringDataType.INSTANCE); if (this.fileStore != null) { retentionTime = this.fileStore.getDefaultRetentionTime(); // 19 KB memory is about 1 KB storage int kb = Math.max(1, Math.min(19, Utils.scaleForAvailableMemory(64))) * 1024; kb = DataUtils.getConfigParam(config, "autoCommitBufferSize", kb); autoCommitMemory = kb * 1024; - autoCompactFillRate = DataUtils.getConfigParam(config, "autoCompactFillRate", 40); + autoCompactFillRate = DataUtils.getConfigParam(config, "autoCompactFillRate", 90); char[] encryptionKey = (char[]) config.get("encryptionKey"); + // there is no need to lock store here, since it is not opened (or even created) yet, + // just to make some assertions happy, when they ensure single-threaded access + storeLock.lock(); try { - if (!fileStoreIsProvided) { - boolean readOnly = config.containsKey("readOnly"); - this.fileStore.open(fileName, readOnly, encryptionKey); - } - if (this.fileStore.size() == 0) { - creationTime = getTimeAbsolute(); - lastCommitTime = creationTime; - storeHeader.put("H", 2); - storeHeader.put("blockSize", BLOCK_SIZE); - storeHeader.put("format", FORMAT_WRITE); - storeHeader.put("created", creationTime); - writeStoreHeader(); - } else { - readStoreHeader(); + saveChunkLock.lock(); + try { + if (!fileStoreIsProvided) { + boolean readOnly = config.containsKey("readOnly"); + this.fileStore.open(fileName, readOnly, encryptionKey); + } + if (this.fileStore.size() == 0) { + creationTime = getTimeAbsolute(); + storeHeader.put(HDR_H, 2); + storeHeader.put(HDR_BLOCK_SIZE, BLOCK_SIZE); + storeHeader.put(HDR_FORMAT, FORMAT_WRITE_MAX); + storeHeader.put(HDR_CREATED, creationTime); + setLastChunk(null); + writeStoreHeader(); + } else { + readStoreHeader(); + } + } finally { + saveChunkLock.unlock(); } - } catch (IllegalStateException e) { + } catch (MVStoreException e) { panic(e); } finally { if (encryptionKey != null) { Arrays.fill(encryptionKey, (char) 0); } + unlockAndCheckPanicCondition(); } lastCommitTime = getTimeSinceCreation(); - Set rootsToRemove = new HashSet<>(); - for (Iterator it = meta.keyIterator("root."); it.hasNext();) { - String key = it.next(); - if (!key.startsWith("root.")) { - break; - } - String mapId = key.substring(key.lastIndexOf('.') + 1); - if(!meta.containsKey("map."+mapId)) { - rootsToRemove.add(key); - } - } - - for (String key : rootsToRemove) { - meta.remove(key); - markMetaChanged(); - } + meta = openMetaMap(); + scrubLayoutMap(); + scrubMetaMap(); // setAutoCommitDelay starts the thread, but only if // the parameter is different from the old value @@ -433,19 +478,117 @@ public class MVStore implements AutoCloseable { } else { autoCommitMemory = 0; autoCompactFillRate = 0; + meta = openMetaMap(); + } + onVersionChange(currentVersion); + } + + private MVMap openMetaMap() { + String metaIdStr = layout.get(META_ID_KEY); + int metaId; + if (metaIdStr == null) { + metaId = lastMapId.incrementAndGet(); + layout.put(META_ID_KEY, Integer.toHexString(metaId)); + } else { + metaId = DataUtils.parseHexInt(metaIdStr); + } + MVMap map = new MVMap<>(this, metaId, StringDataType.INSTANCE, StringDataType.INSTANCE); + map.setRootPos(getRootPos(map.getId()), currentVersion - 1); + return map; + } + + private void scrubLayoutMap() { + Set keysToRemove = new HashSet<>(); + + // split meta map off layout map + for (String prefix : new String[]{ DataUtils.META_NAME, DataUtils.META_MAP }) { + for (Iterator it = layout.keyIterator(prefix); it.hasNext(); ) { + String key = it.next(); + if (!key.startsWith(prefix)) { + break; + } + meta.putIfAbsent(key, layout.get(key)); + markMetaChanged(); + keysToRemove.add(key); + } + } + + // remove roots of non-existent maps (leftover after unfinished map removal) + for (Iterator it = layout.keyIterator(DataUtils.META_ROOT); it.hasNext();) { + String key = it.next(); + if (!key.startsWith(DataUtils.META_ROOT)) { + break; + } + String mapIdStr = key.substring(key.lastIndexOf('.') + 1); + if(!meta.containsKey(DataUtils.META_MAP + mapIdStr) && DataUtils.parseHexInt(mapIdStr) != meta.getId()) { + keysToRemove.add(key); + } + } + + for (String key : keysToRemove) { + layout.remove(key); + } + } + + private void scrubMetaMap() { + Set keysToRemove = new HashSet<>(); + + // ensure that there is only one name mapped to each id + // this could be a leftover of an unfinished map rename + for (Iterator it = meta.keyIterator(DataUtils.META_NAME); it.hasNext();) { + String key = it.next(); + if (!key.startsWith(DataUtils.META_NAME)) { + break; + } + String mapName = key.substring(DataUtils.META_NAME.length()); + int mapId = DataUtils.parseHexInt(meta.get(key)); + String realMapName = getMapName(mapId); + if(!mapName.equals(realMapName)) { + keysToRemove.add(key); + } + } + + for (String key : keysToRemove) { + meta.remove(key); + markMetaChanged(); + } + + for (Iterator it = meta.keyIterator(DataUtils.META_MAP); it.hasNext();) { + String key = it.next(); + if (!key.startsWith(DataUtils.META_MAP)) { + break; + } + String mapName = DataUtils.getMapName(meta.get(key)); + String mapIdStr = key.substring(DataUtils.META_MAP.length()); + // ensure that last map id is not smaller than max of any existing map ids + int mapId = DataUtils.parseHexInt(mapIdStr); + if (mapId > lastMapId.get()) { + lastMapId.set(mapId); + } + // each map should have a proper name + if(!mapIdStr.equals(meta.get(DataUtils.META_NAME + mapName))) { + meta.put(DataUtils.META_NAME + mapName, mapIdStr); + markMetaChanged(); + } + } + } + + private void unlockAndCheckPanicCondition() { + storeLock.unlock(); + if (getPanicException() != null) { + closeImmediately(); } } - private void panic(IllegalStateException e) { + private void panic(MVStoreException e) { if (isOpen()) { handleException(e); panicException = e; - closeImmediately(); } throw e; } - public IllegalStateException getPanicException() { + public MVStoreException getPanicException() { return panicException; } @@ -462,18 +605,6 @@ public static MVStore open(String fileName) { return new MVStore(config); } - /** - * Find position of the root page for historical version of the map. - * - * @param mapId to find the old version for - * @param version the version - * @return position of the root Page - */ - long getRootPos(int mapId, long version) { - MVMap oldMeta = getMetaMap(version); - return getRootPos(oldMeta, mapId); - } - /** * Open a map with the default settings. The map is automatically create if * it does not yet exist. If a map with this name is already open, this map @@ -485,7 +616,7 @@ long getRootPos(int mapId, long version) { * @return the map */ public MVMap openMap(String name) { - return openMap(name, new MVMap.Builder()); + return openMap(name, new MVMap.Builder<>()); } /** @@ -502,22 +633,32 @@ public MVMap openMap(String name) { */ public , K, V> M openMap(String name, MVMap.MapBuilder builder) { int id = getMapId(name); - M map; if (id >= 0) { - map = openMap(id, builder); + @SuppressWarnings("unchecked") + M map = (M) getMap(id); + if(map == null) { + map = openMap(id, builder); + } assert builder.getKeyType() == null || map.getKeyType().getClass().equals(builder.getKeyType().getClass()); - assert builder.getValueType() == null || map.getValueType().getClass().equals(builder.getValueType() - .getClass()); + assert builder.getValueType() == null + || map.getValueType().getClass().equals(builder.getValueType().getClass()); + return map; } else { HashMap c = new HashMap<>(); id = lastMapId.incrementAndGet(); assert getMap(id) == null; c.put("id", id); c.put("createVersion", currentVersion); - map = builder.create(this, c); + M map = builder.create(this, c); String x = Integer.toHexString(id); meta.put(MVMap.getMapKey(id), map.asString(name)); - meta.put("name." + name, x); + String existing = meta.putIfAbsent(DataUtils.META_NAME + name, x); + if (existing != null) { + // looks like map was created concurrently, cleanup and re-start + meta.remove(MVMap.getMapKey(id)); + return openMap(name, builder); + } + long lastStoredVersion = currentVersion - 1; map.setRootPos(0, lastStoredVersion); markMetaChanged(); @SuppressWarnings("unchecked") @@ -525,33 +666,38 @@ public , K, V> M openMap(String name, MVMap.MapBuilder, K, V> M openMap(int id, MVMap.MapBuilder builder) { - storeLock.lock(); - try { - @SuppressWarnings("unchecked") - M map = (M) getMap(id); - if (map == null) { - String configAsString = meta.get(MVMap.getMapKey(id)); - HashMap config; - if (configAsString != null) { - config = new HashMap(DataUtils.parseMap(configAsString)); - } else { - config = new HashMap<>(); - } - config.put("id", id); - map = builder.create(this, config); - long root = getRootPos(meta, id); - map.setRootPos(root, lastStoredVersion); - maps.put(id, map); + /** + * Open an existing map with the given builder. + * + * @param the map type + * @param the key type + * @param the value type + * @param id the map id + * @param builder the map builder + * @return the map + */ + @SuppressWarnings("unchecked") + public , K, V> M openMap(int id, MVMap.MapBuilder builder) { + M map; + while ((map = (M)getMap(id)) == null) { + String configAsString = meta.get(MVMap.getMapKey(id)); + DataUtils.checkArgument(configAsString != null, "Missing map with id {0}", id); + HashMap config = new HashMap<>(DataUtils.parseMap(configAsString)); + config.put("id", id); + map = builder.create(this, config); + long root = getRootPos(id); + long lastStoredVersion = currentVersion - 1; + map.setRootPos(root, lastStoredVersion); + if (maps.putIfAbsent(id, map) == null) { + break; } - return map; - } finally { - storeLock.unlock(); + // looks like map has been concurrently created already, re-start } + return map; } /** @@ -577,31 +723,46 @@ public MVMap getMap(int id) { public Set getMapNames() { HashSet set = new HashSet<>(); checkOpen(); - for (Iterator it = meta.keyIterator("name."); it.hasNext();) { + for (Iterator it = meta.keyIterator(DataUtils.META_NAME); it.hasNext();) { String x = it.next(); - if (!x.startsWith("name.")) { + if (!x.startsWith(DataUtils.META_NAME)) { break; } - String mapName = x.substring("name.".length()); + String mapName = x.substring(DataUtils.META_NAME.length()); set.add(mapName); } return set; } + /** + * Get this store's layout map. This data is for informational purposes only. The + * data is subject to change in future versions. + *

          + * The data in this map should not be modified (changing system data may corrupt the store). + *

          + * The layout map contains the following entries: + *

          +     * chunk.{chunkId} = {chunk metadata}
          +     * root.{mapId} = {root position}
          +     * 
          + * + * @return the metadata map + */ + public MVMap getLayoutMap() { + checkOpen(); + return layout; + } + /** * Get the metadata map. This data is for informational purposes only. The * data is subject to change in future versions. *

          - * The data in this map should not be modified (changing system data may - * corrupt the store). If modifications are needed, they need be - * synchronized on the store. + * The data in this map should not be modified (changing system data may corrupt the store). *

          * The metadata map contains the following entries: *

          -     * chunk.{chunkId} = {chunk metadata}
                * name.{name} = {mapId}
                * map.{mapId} = {map metadata}
          -     * root.{mapId} = {root position}
                * setting.storeVersion = {version}
                * 
          * @@ -612,12 +773,13 @@ public MVMap getMetaMap() { return meta; } - private MVMap getMetaMap(long version) { + private MVMap getLayoutMap(long version) { Chunk c = getChunkForVersion(version); DataUtils.checkArgument(c != null, "Unknown version {0}", version); - c = readChunkHeader(c.block); - MVMap oldMeta = meta.openReadOnly(c.metaRootPos, version); - return oldMeta; + long block = c.block; + c = readChunkHeader(block); + MVMap oldMap = layout.openReadOnly(c.layoutRootPos, version); + return oldMap; } private Chunk getChunkForVersion(long version) { @@ -639,7 +801,7 @@ private Chunk getChunkForVersion(long version) { * @return true if it exists */ public boolean hasMap(String name) { - return meta.containsKey("name." + name); + return meta.containsKey(DataUtils.META_NAME + name); } /** @@ -649,7 +811,7 @@ public boolean hasMap(String name) { * @return true if it exists and has data. */ public boolean hasData(String name) { - return hasMap(name) && getRootPos(meta, getMapId(name)) != 0; + return hasMap(name) && getRootPos(getMapId(name)) != 0; } private void markMetaChanged() { @@ -660,6 +822,7 @@ private void markMetaChanged() { private void readStoreHeader() { Chunk newest = null; + boolean assumeCleanShutdown = true; boolean validStoreHeader = false; // find out which chunk and version are the newest // read the first two blocks @@ -671,53 +834,68 @@ private void readStoreHeader() { try { HashMap m = DataUtils.parseChecksummedMap(buff); if (m == null) { + assumeCleanShutdown = false; continue; } - int blockSize = DataUtils.readHexInt( - m, "blockSize", BLOCK_SIZE); - if (blockSize != BLOCK_SIZE) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_UNSUPPORTED_FORMAT, - "Block size {0} is currently not supported", - blockSize); - } - long version = DataUtils.readHexLong(m, "version", 0); + long version = DataUtils.readHexLong(m, HDR_VERSION, 0); + // if both header blocks do agree on version + // we'll continue on happy path - assume that previous shutdown was clean + assumeCleanShutdown = assumeCleanShutdown && (newest == null || version == newest.version); if (newest == null || version > newest.version) { validStoreHeader = true; storeHeader.putAll(m); - creationTime = DataUtils.readHexLong(m, "created", 0); - int chunkId = DataUtils.readHexInt(m, "chunk", 0); - long block = DataUtils.readHexLong(m, "block", 0); - Chunk test = readChunkHeaderAndFooter(block); - if (test != null && test.id == chunkId) { + creationTime = DataUtils.readHexLong(m, HDR_CREATED, 0); + int chunkId = DataUtils.readHexInt(m, HDR_CHUNK, 0); + long block = DataUtils.readHexLong(m, HDR_BLOCK, 2); + Chunk test = readChunkHeaderAndFooter(block, chunkId); + if (test != null) { newest = test; } } - } catch (Exception ignore) {/**/} + } catch (Exception ignore) { + assumeCleanShutdown = false; + } } + if (!validStoreHeader) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_FILE_CORRUPT, "Store header is corrupt: {0}", fileStore); } - long format = DataUtils.readHexLong(storeHeader, "format", 1); - if (format > FORMAT_WRITE && !fileStore.isReadOnly()) { - throw DataUtils.newIllegalStateException( + int blockSize = DataUtils.readHexInt(storeHeader, HDR_BLOCK_SIZE, BLOCK_SIZE); + if (blockSize != BLOCK_SIZE) { + throw DataUtils.newMVStoreException( DataUtils.ERROR_UNSUPPORTED_FORMAT, - "The write format {0} is larger " + - "than the supported format {1}, " + - "and the file was not opened in read-only mode", - format, FORMAT_WRITE); - } - format = DataUtils.readHexLong(storeHeader, "formatRead", format); - if (format > FORMAT_READ) { - throw DataUtils.newIllegalStateException( + "Block size {0} is currently not supported", + blockSize); + } + long format = DataUtils.readHexLong(storeHeader, HDR_FORMAT, 1); + if (!fileStore.isReadOnly()) { + if (format > FORMAT_WRITE_MAX) { + throw getUnsupportedWriteFormatException(format, FORMAT_WRITE_MAX, + "The write format {0} is larger than the supported format {1}"); + } else if (format < FORMAT_WRITE_MIN) { + throw getUnsupportedWriteFormatException(format, FORMAT_WRITE_MIN, + "The write format {0} is smaller than the supported format {1}"); + } + } + format = DataUtils.readHexLong(storeHeader, HDR_FORMAT_READ, format); + if (format > FORMAT_READ_MAX) { + throw DataUtils.newMVStoreException( DataUtils.ERROR_UNSUPPORTED_FORMAT, - "The read format {0} is larger " + - "than the supported format {1}", - format, FORMAT_READ); + "The read format {0} is larger than the supported format {1}", + format, FORMAT_READ_MAX); + } else if (format < FORMAT_READ_MIN) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_UNSUPPORTED_FORMAT, + "The read format {0} is smaller than the supported format {1}", + format, FORMAT_READ_MIN); + } + + assumeCleanShutdown = assumeCleanShutdown && newest != null && !recoveryMode; + if (assumeCleanShutdown) { + assumeCleanShutdown = DataUtils.readHexInt(storeHeader, HDR_CLEAN, 0) != 0; } - lastStoredVersion = INITIAL_VERSION; chunks.clear(); long now = System.currentTimeMillis(); // calculate the year (doesn't have to be exact; @@ -733,123 +911,276 @@ private void readStoreHeader() { // the system time was set to the past: // we change the creation time creationTime = now; - storeHeader.put("created", creationTime); + storeHeader.put(HDR_CREATED, creationTime); } - Chunk test = readChunkFooter(fileStore.size()); - if (test != null) { - test = readChunkHeaderAndFooter(test.block); - if (test != null) { - if (newest == null || test.version > newest.version) { + + long fileSize = fileStore.size(); + long blocksInStore = fileSize / BLOCK_SIZE; + + Comparator chunkComparator = (one, two) -> { + int result = Long.compare(two.version, one.version); + if (result == 0) { + // out of two copies of the same chunk we prefer the one + // close to the beginning of file (presumably later version) + result = Long.compare(one.block, two.block); + } + return result; + }; + + Map validChunksByLocation = new HashMap<>(); + if (!assumeCleanShutdown) { + Chunk tailChunk = discoverChunk(blocksInStore); + if (tailChunk != null) { + blocksInStore = tailChunk.block; // for a possible full scan later on + if (newest == null || tailChunk.version > newest.version) { + newest = tailChunk; + } + } + + if (newest != null) { + // read the chunk header and footer, + // and follow the chain of next chunks + while (true) { + validChunksByLocation.put(newest.block, newest); + if (newest.next == 0 || newest.next >= blocksInStore) { + // no (valid) next + break; + } + Chunk test = readChunkHeaderAndFooter(newest.next, newest.id + 1); + if (test == null || test.version <= newest.version) { + break; + } + // if shutdown was really clean then chain should be empty + assumeCleanShutdown = false; newest = test; } } } - long blocksInStore = fileStore.size() / BLOCK_SIZE; - // this queue will hold potential candidates for lastChunk to fall back to - Queue lastChunkCandidates = new PriorityQueue<>(Math.max(32, (int)(blocksInStore / 4)), - new Comparator() { - @Override - public int compare(Chunk one, Chunk two) { - int result = Long.compare(two.version, one.version); - if (result == 0) { - // out of two versions of the same chunk we prefer the one - // close to the beginning of file (presumably later version) - result = Long.compare(one.block, two.block); + if (assumeCleanShutdown) { + // quickly check latest 20 chunks referenced in meta table + Queue chunksToVerify = new PriorityQueue<>(20, Collections.reverseOrder(chunkComparator)); + try { + setLastChunk(newest); + // load the chunk metadata: although meta's root page resides in the lastChunk, + // traversing meta map might recursively load another chunk(s) + Cursor cursor = layout.cursor(DataUtils.META_CHUNK); + while (cursor.hasNext() && cursor.next().startsWith(DataUtils.META_CHUNK)) { + Chunk c = Chunk.fromString(cursor.getValue()); + assert c.version <= currentVersion; + // might be there already, due to meta traversal + // see readPage() ... getChunkIfFound() + chunks.putIfAbsent(c.id, c); + chunksToVerify.offer(c); + if (chunksToVerify.size() == 20) { + chunksToVerify.poll(); + } + } + Chunk c; + while (assumeCleanShutdown && (c = chunksToVerify.poll()) != null) { + Chunk test = readChunkHeaderAndFooter(c.block, c.id); + assumeCleanShutdown = test != null; + if (assumeCleanShutdown) { + validChunksByLocation.put(test.block, test); + } } - return result; + } catch(MVStoreException ignored) { + assumeCleanShutdown = false; } - }); - Map validChunkCacheByLocation = new HashMap<>(); + } - if (newest != null) { - // read the chunk header and footer, - // and follow the chain of next chunks - while (true) { - validChunkCacheByLocation.put(newest.block, newest); - lastChunkCandidates.add(newest); - if (newest.next == 0 || - newest.next >= blocksInStore) { - // no (valid) next - break; + if (!assumeCleanShutdown) { + boolean quickRecovery = false; + if (!recoveryMode) { + // now we know, that previous shutdown did not go well and file + // is possibly corrupted but there is still hope for a quick + // recovery + + // this collection will hold potential candidates for lastChunk to fall back to, + // in order from the most to least likely + Chunk[] lastChunkCandidates = validChunksByLocation.values().toArray(new Chunk[0]); + Arrays.sort(lastChunkCandidates, chunkComparator); + Map validChunksById = new HashMap<>(); + for (Chunk chunk : lastChunkCandidates) { + validChunksById.put(chunk.id, chunk); } - test = readChunkHeaderAndFooter(newest.next); - if (test == null || test.id <= newest.id) { - break; + quickRecovery = findLastChunkWithCompleteValidChunkSet(lastChunkCandidates, validChunksByLocation, + validChunksById, false); + } + + if (!quickRecovery) { + // scan whole file and try to fetch chunk header and/or footer out of every block + // matching pairs with nothing in-between are considered as valid chunk + long block = blocksInStore; + Chunk tailChunk; + while ((tailChunk = discoverChunk(block)) != null) { + block = tailChunk.block; + validChunksByLocation.put(block, tailChunk); + } + + // this collection will hold potential candidates for lastChunk to fall back to, + // in order from the most to least likely + Chunk[] lastChunkCandidates = validChunksByLocation.values().toArray(new Chunk[0]); + Arrays.sort(lastChunkCandidates, chunkComparator); + Map validChunksById = new HashMap<>(); + for (Chunk chunk : lastChunkCandidates) { + validChunksById.put(chunk.id, chunk); } - newest = test; + if (!findLastChunkWithCompleteValidChunkSet(lastChunkCandidates, validChunksByLocation, + validChunksById, true) && lastChunk != null) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_FILE_CORRUPT, + "File is corrupted - unable to recover a valid set of chunks"); + + } + } + } + + fileStore.clear(); + // build the free space list + for (Chunk c : chunks.values()) { + if (c.isSaved()) { + long start = c.block * BLOCK_SIZE; + int length = c.len * BLOCK_SIZE; + fileStore.markUsed(start, length); } + if (!c.isLive()) { + deadChunks.offer(c); + } + } + assert validateFileLength("on open"); + } + + private MVStoreException getUnsupportedWriteFormatException(long format, int expectedFormat, String s) { + format = DataUtils.readHexLong(storeHeader, HDR_FORMAT_READ, format); + if (format >= FORMAT_READ_MIN && format <= FORMAT_READ_MAX) { + s += ", and the file was not opened in read-only mode"; } + return DataUtils.newMVStoreException(DataUtils.ERROR_UNSUPPORTED_FORMAT, s, format, expectedFormat); + } + private boolean findLastChunkWithCompleteValidChunkSet(Chunk[] lastChunkCandidates, + Map validChunksByLocation, + Map validChunksById, + boolean afterFullScan) { // Try candidates for "last chunk" in order from newest to oldest // until suitable is found. Suitable one should have meta map // where all chunk references point to valid locations. - boolean verified = false; - while(!verified && setLastChunk(lastChunkCandidates.poll()) != null) { - verified = true; - // load the chunk metadata: although meta's root page resides in the lastChunk, - // traversing meta map might recursively load another chunk(s) - Cursor cursor = meta.cursor("chunk."); - while (cursor.hasNext() && cursor.next().startsWith("chunk.")) { - Chunk c = Chunk.fromString(cursor.getValue()); - assert c.version <= currentVersion; - // might be there already, due to meta traversal - // see readPage() ... getChunkIfFound() - chunks.putIfAbsent(c.id, c); - long block = c.block; - test = validChunkCacheByLocation.get(block); - if (test == null) { - test = readChunkHeaderAndFooter(block); - if (test != null && test.id == c.id) { // chunk is valid - validChunkCacheByLocation.put(block, test); - lastChunkCandidates.offer(test); - continue; + for (Chunk chunk : lastChunkCandidates) { + boolean verified = true; + try { + setLastChunk(chunk); + // load the chunk metadata: although meta's root page resides in the lastChunk, + // traversing meta map might recursively load another chunk(s) + Cursor cursor = layout.cursor(DataUtils.META_CHUNK); + while (cursor.hasNext() && cursor.next().startsWith(DataUtils.META_CHUNK)) { + Chunk c = Chunk.fromString(cursor.getValue()); + assert c.version <= currentVersion; + // might be there already, due to meta traversal + // see readPage() ... getChunkIfFound() + Chunk test = chunks.putIfAbsent(c.id, c); + if (test != null) { + c = test; + } + assert chunks.get(c.id) == c; + if ((test = validChunksByLocation.get(c.block)) == null || test.id != c.id) { + if ((test = validChunksById.get(c.id)) != null) { + // We do not have a valid chunk at that location, + // but there is a copy of same chunk from original + // location. + // Chunk header at original location does not have + // any dynamic (occupancy) metadata, so it can't be + // used here as is, re-point our chunk to original + // location instead. + c.block = test.block; + } else if (c.isLive() && (afterFullScan || readChunkHeaderAndFooter(c.block, c.id) == null)) { + // chunk reference is invalid + // this "last chunk" candidate is not suitable + verified = false; + break; + } + } + if (!c.isLive()) { + // we can just remove entry from meta, referencing to this chunk, + // but store maybe R/O, and it's not properly started yet, + // so lets make this chunk "dead" and taking no space, + // and it will be automatically removed later. + c.block = Long.MAX_VALUE; + c.len = Integer.MAX_VALUE; + if (c.unused == 0) { + c.unused = creationTime; + } + if (c.unusedAtVersion == 0) { + c.unusedAtVersion = INITIAL_VERSION; + } } - } else if (test.id == c.id) { // chunk is valid - // nothing to do, since chunk was already verified - // and registered as potential "last chunk" candidate - continue; } - // chunk reference is invalid - // this "last chunk" candidate is not suitable - // but we continue to process all references - // to find other potential candidates + } catch(Exception ignored) { verified = false; } + if (verified) { + return true; + } } - - fileStore.clear(); - // build the free space list - for (Chunk c : chunks.values()) { - long start = c.block * BLOCK_SIZE; - int length = c.len * BLOCK_SIZE; - fileStore.markUsed(start, length); - } - assert fileStore.getFileLengthInUse() == measureFileLengthInUse() : - fileStore.getFileLengthInUse() + " != " + measureFileLengthInUse(); - setWriteVersion(currentVersion); - if (lastStoredVersion == INITIAL_VERSION) { - lastStoredVersion = currentVersion - 1; - } + return false; } - private Chunk setLastChunk(Chunk last) { + private void setLastChunk(Chunk last) { chunks.clear(); lastChunk = last; - if (last == null) { - // no valid chunk - lastMapId.set(0); - currentVersion = 0; - lastStoredVersion = INITIAL_VERSION; - meta.setRootPos(0, INITIAL_VERSION); - } else { - lastMapId.set(last.mapId); + lastChunkId = 0; + currentVersion = lastChunkVersion(); + long layoutRootPos = 0; + int mapId = 0; + if (last != null) { // there is a valid chunk + lastChunkId = last.id; currentVersion = last.version; + layoutRootPos = last.layoutRootPos; + mapId = last.mapId; chunks.put(last.id, last); - lastStoredVersion = currentVersion - 1; - meta.setRootPos(last.metaRootPos, lastStoredVersion); } - return last; + lastMapId.set(mapId); + layout.setRootPos(layoutRootPos, currentVersion - 1); + } + + /** + * Discover a valid chunk, searching file backwards from the given block + * + * @param block to start search from (found chunk footer should be no + * further than block-1) + * @return valid chunk or null if none found + */ + private Chunk discoverChunk(long block) { + long candidateLocation = Long.MAX_VALUE; + Chunk candidate = null; + while (true) { + if (block == candidateLocation) { + return candidate; + } + if (block == 2) { // number of blocks occupied by headers + return null; + } + Chunk test = readChunkFooter(block); + if (test != null) { + // if we encounter chunk footer (with or without corresponding header) + // in the middle of prospective chunk, stop considering it + candidateLocation = Long.MAX_VALUE; + test = readChunkHeaderOptionally(test.block, test.id); + if (test != null) { + // if that footer has a corresponding header, + // consider them as a new candidate for a valid chunk + candidate = test; + candidateLocation = test.block; + } + } + + // if we encounter chunk header without corresponding footer + // (due to incomplete write?) in the middle of prospective + // chunk, stop considering it + if (--block > candidateLocation && readChunkHeaderOptionally(block) != null) { + candidateLocation = Long.MAX_VALUE; + } + } } @@ -857,23 +1188,17 @@ private Chunk setLastChunk(Chunk last) { * Read a chunk header and footer, and verify the stored data is consistent. * * @param block the block + * @param expectedId of the chunk * @return the chunk, or null if the header or footer don't match or are not * consistent */ - private Chunk readChunkHeaderAndFooter(long block) { - Chunk header; - try { - header = readChunkHeader(block); - } catch (Exception e) { - // invalid chunk header: ignore, but stop - return null; - } - if (header == null) { - return null; - } - Chunk footer = readChunkFooter((block + header.len) * BLOCK_SIZE); - if (footer == null || footer.id != header.id) { - return null; + private Chunk readChunkHeaderAndFooter(long block, int expectedId) { + Chunk header = readChunkHeaderOptionally(block, expectedId); + if (header != null) { + Chunk footer = readChunkFooter(block + header.len); + if (footer == null || footer.id != expectedId || footer.block != header.block) { + return null; + } } return header; } @@ -881,14 +1206,14 @@ private Chunk readChunkHeaderAndFooter(long block) { /** * Try to read a chunk footer. * - * @param end the end of the chunk + * @param block the index of the next block after the chunk * @return the chunk, or null if not successful */ - private Chunk readChunkFooter(long end) { + private Chunk readChunkFooter(long block) { // the following can fail for various reasons try { // read the chunk footer of the last block of the file - long pos = end - Chunk.FOOTER_LENGTH; + long pos = block * BLOCK_SIZE - Chunk.FOOTER_LENGTH; if(pos < 0) { return null; } @@ -897,11 +1222,7 @@ private Chunk readChunkFooter(long end) { lastBlock.get(buff); HashMap m = DataUtils.parseChecksummedMap(buff); if (m != null) { - int chunk = DataUtils.readHexInt(m, "chunk", 0); - Chunk c = new Chunk(chunk); - c.version = DataUtils.readHexLong(m, "version", 0); - c.block = DataUtils.readHexLong(m, "block", 0); - return c; + return new Chunk(m); } } catch (Exception e) { // ignore @@ -910,16 +1231,17 @@ private Chunk readChunkFooter(long end) { } private void writeStoreHeader() { - StringBuilder buff = new StringBuilder(112); + Chunk lastChunk = this.lastChunk; if (lastChunk != null) { - storeHeader.put("block", lastChunk.block); - storeHeader.put("chunk", lastChunk.id); - storeHeader.put("version", lastChunk.version); + storeHeader.put(HDR_BLOCK, lastChunk.block); + storeHeader.put(HDR_CHUNK, lastChunk.id); + storeHeader.put(HDR_VERSION, lastChunk.version); } + StringBuilder buff = new StringBuilder(112); DataUtils.appendMap(buff, storeHeader); byte[] bytes = buff.toString().getBytes(StandardCharsets.ISO_8859_1); int checksum = DataUtils.getFletcher32(bytes, 0, bytes.length); - DataUtils.appendMap(buff, "fletcher", checksum); + DataUtils.appendMap(buff, HDR_FLETCHER, checksum); buff.append('\n'); bytes = buff.toString().getBytes(StandardCharsets.ISO_8859_1); ByteBuffer header = ByteBuffer.allocate(2 * BLOCK_SIZE); @@ -933,7 +1255,7 @@ private void writeStoreHeader() { private void write(long pos, ByteBuffer buffer) { try { fileStore.writeFully(pos, buffer); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { panic(e); } } @@ -943,7 +1265,18 @@ private void write(long pos, ByteBuffer buffer) { */ @Override public void close() { - closeStore(true); + closeStore(true, 0); + } + + /** + * Close the file and the store. Unsaved changes are written to disk first, + * and compaction (up to a specified number of milliseconds) is attempted. + * + * @param allowedCompactionTime the allowed time for compaction (in + * milliseconds) + */ + public void close(int allowedCompactionTime) { + closeStore(true, allowedCompactionTime); } /** @@ -953,13 +1286,13 @@ public void close() { */ public void closeImmediately() { try { - closeStore(false); + closeStore(false, 0); } catch (Throwable e) { handleException(e); } } - private void closeStore(boolean normalShutdown) { + private void closeStore(boolean normalShutdown, int allowedCompactionTime) { // If any other thead have already initiated closure procedure, // isClosed() would wait until closure is done and then we jump out of the loop. // This is a subtle difference between !isClosed() and isOpen(). @@ -974,26 +1307,34 @@ private void closeStore(boolean normalShutdown) { if (normalShutdown && fileStore != null && !fileStore.isReadOnly()) { for (MVMap map : maps.values()) { if (map.isClosed()) { - if (meta.remove(MVMap.getMapRootKey(map.getId())) != null) { - markMetaChanged(); - } + deregisterMapRoot(map.getId()); } } + setRetentionTime(0); commit(); + if (allowedCompactionTime > 0) { + compactFile(allowedCompactionTime); + } else if (allowedCompactionTime < 0) { + doMaintenance(autoCompactFillRate); + } - shrinkFileIfPossible(0); + saveChunkLock.lock(); + try { + shrinkFileIfPossible(0); + storeHeader.put(HDR_CLEAN, 1); + writeStoreHeader(); + sync(); + assert validateFileLength("on close"); + } finally { + saveChunkLock.unlock(); + } } state = STATE_CLOSING; // release memory early - this is important when called // because of out of memory - if (cache != null) { - cache.clear(); - } - if (cacheChunkRef != null) { - cacheChunkRef.clear(); - } + clearCaches(); for (MVMap m : new ArrayList<>(maps.values())) { m.close(); } @@ -1014,64 +1355,16 @@ private void closeStore(boolean normalShutdown) { } } - /** - * Read a page of data into a ByteBuffer. - * - * @param pos page pos - * @param expectedMapId expected map id for the page - * @return ByteBuffer containing page data. - */ - ByteBuffer readBufferForPage(long pos, int expectedMapId) { - Chunk c = getChunk(pos); - long filePos = c.block * BLOCK_SIZE; - filePos += DataUtils.getPageOffset(pos); - if (filePos < 0) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_FILE_CORRUPT, - "Negative position {0}; p={1}, c={2}", filePos, pos, c.toString()); - } - long maxPos = (c.block + c.len) * BLOCK_SIZE; - - ByteBuffer buff; - int maxLength = DataUtils.getPageMaxLength(pos); - if (maxLength == DataUtils.PAGE_LARGE) { - buff = fileStore.readFully(filePos, 128); - maxLength = buff.getInt(); - // read the first bytes again - } - maxLength = (int) Math.min(maxPos - filePos, maxLength); - int length = maxLength; - if (length < 0) { - throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT, - "Illegal page length {0} reading at {1}; max pos {2} ", length, filePos, maxPos); - } - buff = fileStore.readFully(filePos, length); - int chunkId = DataUtils.getPageChunkId(pos); - int offset = DataUtils.getPageOffset(pos); - int start = buff.position(); - int remaining = buff.remaining(); - int pageLength = buff.getInt(); - if (pageLength > remaining || pageLength < 4) { - throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT, - "File corrupted in chunk {0}, expected page length 4..{1}, got {2}", chunkId, remaining, - pageLength); - } - buff.limit(start + pageLength); - - short check = buff.getShort(); - int mapId = DataUtils.readVarInt(buff); - if (mapId != expectedMapId) { - throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT, - "File corrupted in chunk {0}, expected map id {1}, got {2}", chunkId, expectedMapId, mapId); - } - int checkTest = DataUtils.getCheckValue(chunkId) - ^ DataUtils.getCheckValue(offset) - ^ DataUtils.getCheckValue(pageLength); - if (check != (short) checkTest) { - throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT, - "File corrupted in chunk {0}, expected check value {1}, got {2}", chunkId, checkTest, check); + private static void shutdownExecutor(ThreadPoolExecutor executor) { + if (executor != null) { + executor.shutdown(); + try { + if (executor.awaitTermination(1000, TimeUnit.MILLISECONDS)) { + return; + } + } catch (InterruptedException ignore) {/**/} + executor.shutdownNow(); } - return buff; } /** @@ -1085,15 +1378,15 @@ private Chunk getChunk(long pos) { Chunk c = chunks.get(chunkId); if (c == null) { checkOpen(); - String s = meta.get(Chunk.getMetaKey(chunkId)); + String s = layout.get(Chunk.getMetaKey(chunkId)); if (s == null) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_CHUNK_NOT_FOUND, "Chunk {0} not found", chunkId); } c = Chunk.fromString(s); - if (c.block == Long.MAX_VALUE) { - throw DataUtils.newIllegalStateException( + if (!c.isSaved()) { + throw DataUtils.newMVStoreException( DataUtils.ERROR_FILE_CORRUPT, "Chunk {0} is invalid", chunkId); } @@ -1105,15 +1398,13 @@ private Chunk getChunk(long pos) { private void setWriteVersion(long version) { for (Iterator> iter = maps.values().iterator(); iter.hasNext(); ) { MVMap map = iter.next(); + assert map != layout && map != meta; if (map.setWriteVersion(version) == null) { - assert map.isClosed(); - assert map.getVersion() < getOldestVersionToKeep(); - meta.remove(MVMap.getMapRootKey(map.getId())); - markMetaChanged(); iter.remove(); } } meta.setWriteVersion(version); + layout.setWriteVersion(version); onVersionChange(version); } @@ -1126,15 +1417,21 @@ private void setWriteVersion(long version) { * @return the new version (incremented if there were changes) */ public long tryCommit() { + return tryCommit(x -> true); + } + + private long tryCommit(Predicate check) { // we need to prevent re-entrance, which may be possible, // because meta map is modified within storeNow() and that // causes beforeWrite() call with possibility of going back here if ((!storeLock.isHeldByCurrentThread() || currentStoreVersion < 0) && storeLock.tryLock()) { try { - store(); + if (check.test(this)) { + store(false); + } } finally { - storeLock.unlock(); + unlockAndCheckPanicCondition(); } } return currentVersion; @@ -1157,114 +1454,114 @@ public long tryCommit() { * @return the new version (incremented if there were changes) */ public long commit() { + return commit(x -> true); + } + + private long commit(Predicate check) { // we need to prevent re-entrance, which may be possible, // because meta map is modified within storeNow() and that // causes beforeWrite() call with possibility of going back here if(!storeLock.isHeldByCurrentThread() || currentStoreVersion < 0) { storeLock.lock(); try { - store(); + if (check.test(this)) { + store(true); + } } finally { - storeLock.unlock(); + unlockAndCheckPanicCondition(); } } return currentVersion; } - private void store() { - try { - if (isOpenOrStopping() && hasUnsavedChangesInternal()) { - currentStoreVersion = currentVersion; - if (fileStore == null) { - lastStoredVersion = currentVersion; - //noinspection NonAtomicOperationOnVolatileField - ++currentVersion; - setWriteVersion(currentVersion); - metaChanged = false; - } else { - if (fileStore.isReadOnly()) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_WRITING_FAILED, "This store is read-only"); - } - try { - storeNow(); - } catch (IllegalStateException e) { - panic(e); - } catch (Throwable e) { - panic(DataUtils.newIllegalStateException(DataUtils.ERROR_INTERNAL, e.toString(), e)); + private void store(boolean syncWrite) { + assert storeLock.isHeldByCurrentThread(); + assert !saveChunkLock.isHeldByCurrentThread(); + if (isOpenOrStopping()) { + if (hasUnsavedChanges()) { + dropUnusedChunks(); + try { + currentStoreVersion = currentVersion; + if (fileStore == null) { + //noinspection NonAtomicOperationOnVolatileField + ++currentVersion; + setWriteVersion(currentVersion); + metaChanged = false; + } else { + if (fileStore.isReadOnly()) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_WRITING_FAILED, "This store is read-only"); + } + storeNow(syncWrite, 0, () -> reuseSpace ? 0 : getAfterLastBlock()); } + } finally { + // in any case reset the current store version, + // to allow closing the store + currentStoreVersion = -1; } } - } finally { - // in any case reset the current store version, - // to allow closing the store - currentStoreVersion = -1; } } - private void storeNow() { - assert storeLock.isHeldByCurrentThread(); - long time = getTimeSinceCreation(); - freeUnusedIfNeeded(time); - int currentUnsavedPageCount = unsavedMemory; - long storeVersion = currentStoreVersion; - long version = ++currentVersion; - lastCommitTime = time; - - // the metadata of the last chunk was not stored so far, and needs to be - // set now (it's better not to update right after storing, because that - // would modify the meta map again) - int lastChunkId; - if (lastChunk == null) { - lastChunkId = 0; - } else { - lastChunkId = lastChunk.id; - meta.put(Chunk.getMetaKey(lastChunkId), lastChunk.asString()); - markMetaChanged(); - // never go backward in time - time = Math.max(lastChunk.time, time); + private void storeNow(boolean syncWrite, long reservedLow, Supplier reservedHighSupplier) { + try { + lastCommitTime = getTimeSinceCreation(); + int currentUnsavedPageCount = unsavedMemory; + // it is ok, since that path suppose to be single-threaded under storeLock + //noinspection NonAtomicOperationOnVolatileField + long version = ++currentVersion; + ArrayList> changed = collectChangedMapRoots(version); + + assert storeLock.isHeldByCurrentThread(); + submitOrRun(serializationExecutor, + () -> serializeAndStore(syncWrite, reservedLow, reservedHighSupplier, + changed, lastCommitTime, version), + syncWrite); + + // some pages might have been changed in the meantime (in the newest + // version) + saveNeeded = false; + unsavedMemory = Math.max(0, unsavedMemory - currentUnsavedPageCount); + } catch (MVStoreException e) { + panic(e); + } catch (Throwable e) { + panic(DataUtils.newMVStoreException(DataUtils.ERROR_INTERNAL, "{0}", e.toString(), + e)); } - int newChunkId = lastChunkId; - while (true) { - newChunkId = (newChunkId + 1) & Chunk.MAX_ID; - Chunk old = chunks.get(newChunkId); - if (old == null) { - break; - } - if (old.block == Long.MAX_VALUE) { - IllegalStateException e = DataUtils.newIllegalStateException( - DataUtils.ERROR_INTERNAL, - "Last block {0} not stored, possibly due to out-of-memory", old); - panic(e); + } + + private static void submitOrRun(ThreadPoolExecutor executor, Runnable action, + boolean syncRun) throws ExecutionException { + if (executor != null) { + try { + Future future = executor.submit(action); + if (syncRun || executor.getQueue().size() > PIPE_LENGTH) { + try { + future.get(); + } catch (InterruptedException ignore) {/**/} + } + return; + } catch (RejectedExecutionException ex) { + assert executor.isShutdown(); + shutdownExecutor(executor); } } - Chunk c = new Chunk(newChunkId); - c.pageCount = Integer.MAX_VALUE; - c.pageCountLive = Integer.MAX_VALUE; - c.maxLen = Long.MAX_VALUE; - c.maxLenLive = Long.MAX_VALUE; - c.metaRootPos = Long.MAX_VALUE; - c.block = Long.MAX_VALUE; - c.len = Integer.MAX_VALUE; - c.time = time; - c.version = version; - c.mapId = lastMapId.get(); - c.next = Long.MAX_VALUE; - chunks.put(c.id, c); - ArrayList changed = new ArrayList<>(); + action.run(); + } + + private ArrayList> collectChangedMapRoots(long version) { + long lastStoredVersion = version - 2; + ArrayList> changed = new ArrayList<>(); for (Iterator> iter = maps.values().iterator(); iter.hasNext(); ) { MVMap map = iter.next(); - RootReference rootReference = map.setWriteVersion(version); + RootReference rootReference = map.setWriteVersion(version); if (rootReference == null) { - assert map.isClosed(); - assert map.getVersion() < getOldestVersionToKeep(); - meta.remove(MVMap.getMapRootKey(map.getId())); iter.remove(); - } else if (map.getCreateVersion() <= storeVersion && // if map was created after storing started, skip it + } else if (map.getCreateVersion() < version && // if map was created after storing started, skip it !map.isVolatile() && map.hasChangesSince(lastStoredVersion)) { assert rootReference.version <= version : rootReference.version + " > " + version; - Page rootPage = rootReference.root; + Page rootPage = rootReference.root; if (!rootPage.isSaved() || // after deletion previously saved leaf // may pop up as a root, but we still need @@ -1274,34 +1571,140 @@ private void storeNow() { } } } - WriteBuffer buff = getWriteBuffer(); - // need to patch the header later - c.writeChunkHeader(buff, 0); - int headerLength = buff.position(); + RootReference rootReference = meta.setWriteVersion(version); + if (meta.hasChangesSince(lastStoredVersion) || metaChanged) { + assert rootReference != null && rootReference.version <= version + : rootReference == null ? "null" : rootReference.version + " > " + version; + Page rootPage = rootReference.root; + if (!rootPage.isSaved() || + // after deletion previously saved leaf + // may pop up as a root, but we still need + // to save new root pos in meta + rootPage.isLeaf()) { + changed.add(rootPage); + } + } + return changed; + } + + private void serializeAndStore(boolean syncRun, long reservedLow, Supplier reservedHighSupplier, + ArrayList> changed, long time, long version) { + serializationLock.lock(); + try { + Chunk c = createChunk(time, version); + chunks.put(c.id, c); + WriteBuffer buff = getWriteBuffer(); + serializeToBuffer(buff, changed, c, reservedLow, reservedHighSupplier); + + submitOrRun(bufferSaveExecutor, () -> storeBuffer(c, buff, changed), syncRun); + + } catch (MVStoreException e) { + panic(e); + } catch (Throwable e) { + panic(DataUtils.newMVStoreException(DataUtils.ERROR_INTERNAL, "{0}", e.toString(), e)); + } finally { + serializationLock.unlock(); + } + } + + private Chunk createChunk(long time, long version) { + int chunkId = lastChunkId; + if (chunkId != 0) { + chunkId &= Chunk.MAX_ID; + Chunk lastChunk = chunks.get(chunkId); + assert lastChunk != null; + assert lastChunk.isSaved(); + assert lastChunk.version + 1 == version : lastChunk.version + " " + version; + // the metadata of the last chunk was not stored so far, and needs to be + // set now (it's better not to update right after storing, because that + // would modify the meta map again) + layout.put(Chunk.getMetaKey(chunkId), lastChunk.asString()); + // never go backward in time + time = Math.max(lastChunk.time, time); + } + int newChunkId; + while (true) { + newChunkId = ++lastChunkId & Chunk.MAX_ID; + Chunk old = chunks.get(newChunkId); + if (old == null) { + break; + } + if (!old.isSaved()) { + MVStoreException e = DataUtils.newMVStoreException( + DataUtils.ERROR_INTERNAL, + "Last block {0} not stored, possibly due to out-of-memory", old); + panic(e); + } + } + Chunk c = new Chunk(newChunkId); c.pageCount = 0; c.pageCountLive = 0; c.maxLen = 0; c.maxLenLive = 0; - for (Page p : changed) { + c.layoutRootPos = Long.MAX_VALUE; + c.block = Long.MAX_VALUE; + c.len = Integer.MAX_VALUE; + c.time = time; + c.version = version; + c.next = Long.MAX_VALUE; + c.occupancy = new BitSet(); + return c; + } + + private void serializeToBuffer(WriteBuffer buff, ArrayList> changed, Chunk c, + long reservedLow, Supplier reservedHighSupplier) { + // need to patch the header later + c.writeChunkHeader(buff, 0); + int headerLength = buff.position() + 44; + buff.position(headerLength); + + long version = c.version; + List toc = new ArrayList<>(); + for (Page p : changed) { String key = MVMap.getMapRootKey(p.getMapId()); if (p.getTotalCount() == 0) { - meta.remove(key); + layout.remove(key); } else { - p.writeUnsavedRecursive(c, buff); + p.writeUnsavedRecursive(c, buff, toc); long root = p.getPos(); - meta.put(key, Long.toHexString(root)); + layout.put(key, Long.toHexString(root)); } } - applyFreedSpace(); - RootReference metaRootReference = meta.setWriteVersion(version); - assert metaRootReference != null; - assert metaRootReference.version == version : metaRootReference.version + " != " + version; + + acceptChunkOccupancyChanges(c.time, version); + + RootReference layoutRootReference = layout.setWriteVersion(version); + assert layoutRootReference != null; + assert layoutRootReference.version == version : layoutRootReference.version + " != " + version; metaChanged = false; + + acceptChunkOccupancyChanges(c.time, version); + onVersionChange(version); - Page metaRoot = metaRootReference.root; - metaRoot.writeUnsavedRecursive(c, buff); + Page layoutRoot = layoutRootReference.root; + layoutRoot.writeUnsavedRecursive(c, buff, toc); + c.layoutRootPos = layoutRoot.getPos(); + changed.add(layoutRoot); + // last allocated map id should be captured after the meta map was saved, because + // this will ensure that concurrently created map, which made it into meta before save, + // will have it's id reflected in mapid field of currently written chunk + c.mapId = lastMapId.get(); + + c.tocPos = buff.position(); + long[] tocArray = new long[toc.size()]; + int index = 0; + for (long tocElement : toc) { + tocArray[index++] = tocElement; + buff.putLong(tocElement); + if (DataUtils.isLeafPosition(tocElement)) { + ++leafCount; + } else { + ++nonLeafCount; + } + } + chunksToC.put(c.id, tocArray); int chunkLength = buff.position(); // add the store header and round to the next block @@ -1309,328 +1712,97 @@ private void storeNow() { Chunk.FOOTER_LENGTH, BLOCK_SIZE); buff.limit(length); - long filePos = allocateFileSpace(length, !reuseSpace); - c.block = filePos / BLOCK_SIZE; - c.len = length / BLOCK_SIZE; - assert fileStore.getFileLengthInUse() == measureFileLengthInUse() : - fileStore.getFileLengthInUse() + " != " + measureFileLengthInUse() + " " + c; - c.metaRootPos = metaRoot.getPos(); - // calculate and set the likely next position - if (reuseSpace) { - c.next = fileStore.predictAllocation(c.len * BLOCK_SIZE) / BLOCK_SIZE; - } else { - // just after this chunk - c.next = 0; + saveChunkLock.lock(); + try { + Long reservedHigh = reservedHighSupplier.get(); + long filePos = fileStore.allocate(buff.limit(), reservedLow, reservedHigh); + c.len = buff.limit() / BLOCK_SIZE; + c.block = filePos / BLOCK_SIZE; + assert validateFileLength(c.asString()); + // calculate and set the likely next position + if (reservedLow > 0 || reservedHigh == reservedLow) { + c.next = fileStore.predictAllocation(c.len, 0, 0); + } else { + // just after this chunk + c.next = 0; + } + assert c.pageCountLive == c.pageCount : c; + assert c.occupancy.cardinality() == 0 : c; + + buff.position(0); + assert c.pageCountLive == c.pageCount : c; + assert c.occupancy.cardinality() == 0 : c; + c.writeChunkHeader(buff, headerLength); + + buff.position(buff.limit() - Chunk.FOOTER_LENGTH); + buff.put(c.getFooterBytes()); + } finally { + saveChunkLock.unlock(); } - buff.position(0); - c.writeChunkHeader(buff, headerLength); + } - buff.position(buff.limit() - Chunk.FOOTER_LENGTH); - buff.put(c.getFooterBytes()); + private void storeBuffer(Chunk c, WriteBuffer buff, ArrayList> changed) { + saveChunkLock.lock(); + try { + buff.position(0); + long filePos = c.block * BLOCK_SIZE; + write(filePos, buff.getBuffer()); + releaseWriteBuffer(buff); + + // end of the used space is not necessarily the end of the file + boolean storeAtEndOfFile = filePos + buff.limit() >= fileStore.size(); + boolean writeStoreHeader = isWriteStoreHeader(c, storeAtEndOfFile); + lastChunk = c; + if (writeStoreHeader) { + writeStoreHeader(); + } + if (!storeAtEndOfFile) { + // may only shrink after the store header was written + shrinkFileIfPossible(1); + } + } catch (MVStoreException e) { + panic(e); + } catch (Throwable e) { + panic(DataUtils.newMVStoreException(DataUtils.ERROR_INTERNAL, "{0}", e.toString(), e)); + } finally { + saveChunkLock.unlock(); + } - buff.position(0); - write(filePos, buff.getBuffer()); - releaseWriteBuffer(buff); + for (Page p : changed) { + p.releaseSavedPages(); + } + } + private boolean isWriteStoreHeader(Chunk c, boolean storeAtEndOfFile) { // whether we need to write the store header boolean writeStoreHeader = false; - // end of the used space is not necessarily the end of the file - boolean storeAtEndOfFile = filePos + length >= fileStore.size(); if (!storeAtEndOfFile) { + Chunk lastChunk = this.lastChunk; if (lastChunk == null) { writeStoreHeader = true; } else if (lastChunk.next != c.block) { // the last prediction did not matched writeStoreHeader = true; } else { - long headerVersion = DataUtils.readHexLong( - storeHeader, "version", 0); + long headerVersion = DataUtils.readHexLong(storeHeader, HDR_VERSION, 0); if (lastChunk.version - headerVersion > 20) { // we write after at least every 20 versions writeStoreHeader = true; } else { - int chunkId = DataUtils.readHexInt(storeHeader, "chunk", 0); - while (true) { - Chunk old = chunks.get(chunkId); - if (old == null) { - // one of the chunks in between - // was removed - writeStoreHeader = true; - break; - } - if (chunkId == lastChunk.id) { - break; - } - chunkId++; - } - } - } - } - - lastChunk = c; - if (writeStoreHeader) { - writeStoreHeader(); - } - if (!storeAtEndOfFile) { - // may only shrink after the store header was written - shrinkFileIfPossible(1); - } - for (Page p : changed) { - p.writeEnd(); - } - metaRoot.writeEnd(); - - // some pages might have been changed in the meantime (in the newest - // version) - unsavedMemory = Math.max(0, unsavedMemory - - currentUnsavedPageCount); - - lastStoredVersion = storeVersion; - } - - /** - * Try to free unused chunks. This method doesn't directly write, but can - * change the metadata, and therefore cause a background write. - */ - private void freeUnusedIfNeeded(long time) { - int freeDelay = retentionTime / 5; - if (time - lastFreeUnusedChunks >= freeDelay) { - // set early in case it fails (out of memory or so) - lastFreeUnusedChunks = time; - freeUnusedChunks(true); - } - } - - private void freeUnusedChunks(boolean fast) { - assert storeLock.isHeldByCurrentThread(); - if (lastChunk != null && reuseSpace) { - Set referenced = collectReferencedChunks(fast); - long time = getTimeSinceCreation(); - - for (Iterator iterator = chunks.values().iterator(); iterator.hasNext(); ) { - Chunk c = iterator.next(); - if (c.block != Long.MAX_VALUE && !referenced.contains(c.id)) { - if (canOverwriteChunk(c, time)) { - iterator.remove(); - if (meta.remove(Chunk.getMetaKey(c.id)) != null) { - markMetaChanged(); - } - long start = c.block * BLOCK_SIZE; - int length = c.len * BLOCK_SIZE; - fileStore.free(start, length); - assert fileStore.getFileLengthInUse() == measureFileLengthInUse() : - fileStore.getFileLengthInUse() + " != " + measureFileLengthInUse(); - } else { - if (c.unused == 0) { - c.unused = time; - meta.put(Chunk.getMetaKey(c.id), c.asString()); - markMetaChanged(); - } + for (int chunkId = DataUtils.readHexInt(storeHeader, HDR_CHUNK, 0); + !writeStoreHeader && chunkId <= lastChunk.id; ++chunkId) { + // one of the chunks in between + // was removed + writeStoreHeader = !chunks.containsKey(chunkId); } } } - // set it here, to avoid calling it often if it was slow - lastFreeUnusedChunks = getTimeSinceCreation(); - } - } - - /** - * Collect ids for chunks that are in use. - * @param fast if true, simplified version is used, which assumes that recent chunks - * are still in-use and do not scan recent versions of the store. - * Also is this case only oldest available version of the store is scanned. - * @return set of chunk ids in-use, or null if all chunks should be considered in-use - */ - private Set collectReferencedChunks(boolean fast) { - assert lastChunk != null; - final ThreadPoolExecutor executorService = new ThreadPoolExecutor(10, 10, 10L, TimeUnit.SECONDS, - new ArrayBlockingQueue(keysPerPage + 1)); - final AtomicInteger executingThreadCounter = new AtomicInteger(); - try { - ChunkIdsCollector collector = new ChunkIdsCollector(meta.getId()); - long oldestVersionToKeep = getOldestVersionToKeep(); - RootReference rootReference = meta.flushAndGetRoot(); - if (fast) { - RootReference previous; - while (rootReference.version >= oldestVersionToKeep && (previous = rootReference.previous) != null) { - rootReference = previous; - } - inspectVersion(rootReference, collector, executorService, executingThreadCounter, null); - - Page rootPage = rootReference.root; - long pos = rootPage.getPos(); - assert rootPage.isSaved(); - int chunkId = DataUtils.getPageChunkId(pos); - while (++chunkId <= lastChunk.id) { - collector.registerChunk(chunkId); - } - } else { - Set inspectedRoots = new HashSet<>(); - do { - inspectVersion(rootReference, collector, executorService, executingThreadCounter, inspectedRoots); - } while (rootReference.version >= oldestVersionToKeep - && (rootReference = rootReference.previous) != null); - } - return collector.getReferenced(); - } finally { - executorService.shutdownNow(); - } - } - - /** - * Scans all map of a particular store version and marks visited chunks as in-use. - * @param rootReference of the meta map of the version - * @param collector to report visited chunks to - * @param executorService to use for parallel processing - * @param executingThreadCounter counter for threads already in use - * @param inspectedRoots set of page positions for map's roots already inspected - * or null if not to be used - */ - private void inspectVersion(RootReference rootReference, ChunkIdsCollector collector, - ThreadPoolExecutor executorService, - AtomicInteger executingThreadCounter, - Set inspectedRoots) { - Page rootPage = rootReference.root; - long pos = rootPage.getPos(); - if (rootPage.isSaved()) { - if (inspectedRoots != null && !inspectedRoots.add(pos)) { - return; - } - collector.setMapId(meta.getId()); - collector.visit(pos, executorService, executingThreadCounter); - } - for (Cursor c = new Cursor<>(rootPage, "root."); c.hasNext(); ) { - String key = c.next(); - if (!key.startsWith("root.")) { - break; - } - pos = DataUtils.parseHexLong(c.getValue()); - if (DataUtils.isPageSaved(pos)) { - if (inspectedRoots == null || inspectedRoots.add(pos)) { - // to allow for something like "root.tmp.123" to be processed - int mapId = DataUtils.parseHexInt(key.substring(key.lastIndexOf('.') + 1)); - collector.setMapId(mapId); - collector.visit(pos, executorService, executingThreadCounter); - } - } - } - } - - final class ChunkIdsCollector { - - /** really a set */ - private final ConcurrentHashMap referencedChunks = new ConcurrentHashMap<>(); - private final ChunkIdsCollector parent; - private int mapId; - - ChunkIdsCollector(int mapId) { - this.parent = null; - this.mapId = mapId; - } - - private ChunkIdsCollector(ChunkIdsCollector parent) { - this.parent = parent; - this.mapId = parent.mapId; - } - - public int getMapId() { - return mapId; - } - - public void setMapId(int mapId) { - this.mapId = mapId; - } - - public Set getReferenced() { - return new HashSet<>(referencedChunks.keySet()); - } - - /** - * Visit a page on a chunk and collect ids for it and its children. - * - * @param page the page to visit - * @param executorService the service to use when doing visit in parallel - * @param executingThreadCounter number of threads currently active - */ - public void visit(Page page, ThreadPoolExecutor executorService, AtomicInteger executingThreadCounter) { - long pos = page.getPos(); - if (DataUtils.isPageSaved(pos)) { - registerChunk(DataUtils.getPageChunkId(pos)); - } - int count = page.map.getChildPageCount(page); - if (count == 0) { - return; - } - ChunkIdsCollector childCollector = DataUtils.isPageSaved(pos) && cacheChunkRef != null ? - new ChunkIdsCollector(this) : this; - for (int i = 0; i < count; i++) { - Page childPage = page.getChildPageIfLoaded(i); - if (childPage != null) { - childCollector.visit(childPage, executorService, executingThreadCounter); - } else { - childCollector.visit(page.getChildPagePos(i), executorService, executingThreadCounter); - } - } - cacheCollectedChunkIds(pos, childCollector); - } - - /** - * Visit a page on a chunk and collect ids for it and its children. - * - * @param pos position of the page to visit - * @param executorService the service to use when doing visit in parallel - * @param executingThreadCounter number of threads currently active - */ - public void visit(long pos, ThreadPoolExecutor executorService, AtomicInteger executingThreadCounter) { - if (!DataUtils.isPageSaved(pos)) { - return; - } - registerChunk(DataUtils.getPageChunkId(pos)); - if (DataUtils.getPageType(pos) == DataUtils.PAGE_TYPE_LEAF) { - return; - } - int[] chunkIds; - if (cacheChunkRef != null && (chunkIds = cacheChunkRef.get(pos)) != null) { - // there is a cached set of chunk ids for this position - for (int chunkId : chunkIds) { - registerChunk(chunkId); - } - } else { - ChunkIdsCollector childCollector = cacheChunkRef != null ? new ChunkIdsCollector(this) : this; - Page page; - if (cache != null && (page = cache.get(pos)) != null) { - // there is a full page in cache, use it - childCollector.visit(page, executorService, executingThreadCounter); - } else { - // page was not cached: read the data - ByteBuffer buff = readBufferForPage(pos, getMapId()); - Page.readChildrenPositions(buff, pos, childCollector, executorService, executingThreadCounter); - } - cacheCollectedChunkIds(pos, childCollector); - } - } - - /** - * Add chunk to list of referenced chunks. - * - * @param chunkId chunk id - */ - void registerChunk(int chunkId) { - if (referencedChunks.put(chunkId, 1) == null && parent != null) { - parent.registerChunk(chunkId); - } } - private void cacheCollectedChunkIds(long pos, ChunkIdsCollector childCollector) { - if (childCollector != this) { - int[] chunkIds = new int[childCollector.referencedChunks.size()]; - int index = 0; - for (Integer chunkId : childCollector.referencedChunks.keySet()) { - chunkIds[index++] = chunkId; - } - cacheChunkRef.put(pos, chunkIds, Constants.MEMORY_ARRAY + 4 * chunkIds.length); - } + if (storeHeader.remove(HDR_CLEAN) != null) { + writeStoreHeader = true; } + return writeStoreHeader; } /** @@ -1640,9 +1812,8 @@ private void cacheCollectedChunkIds(long pos, ChunkIdsCollector childCollector) * @return the buffer */ private WriteBuffer getWriteBuffer() { - WriteBuffer buff; - if (writeBuffer != null) { - buff = writeBuffer; + WriteBuffer buff = writeBufferPool.poll(); + if (buff != null) { buff.clear(); } else { buff = new WriteBuffer(); @@ -1658,20 +1829,16 @@ private WriteBuffer getWriteBuffer() { */ private void releaseWriteBuffer(WriteBuffer buff) { if (buff.capacity() <= 4 * 1024 * 1024) { - writeBuffer = buff; + writeBufferPool.offer(buff); } } - private boolean canOverwriteChunk(Chunk c, long time) { - if (retentionTime >= 0) { - if (c.time + retentionTime > time) { - return false; - } - if (c.unused == 0 || c.unused + retentionTime / 2 > time) { - return false; - } - } - return true; + private static boolean canOverwriteChunk(Chunk c, long oldestVersionToKeep) { + return !c.isLive() && c.unusedAtVersion < oldestVersionToKeep; + } + + private boolean isSeasonedChunk(Chunk chunk, long time) { + return retentionTime < 0 || chunk.time + retentionTime <= time; } private long getTimeSinceCreation() { @@ -1694,37 +1861,39 @@ private long getTimeAbsolute() { /** * Apply the freed space to the chunk metadata. The metadata is updated, but * completely free chunks are not removed from the set of chunks, and the - * disk space is not yet marked as free. + * disk space is not yet marked as free. They are queued instead and wait until + * their usage is over. */ - private void applyFreedSpace() { - while (true) { - ArrayList modified = new ArrayList<>(); - synchronized (freedPageSpace) { - for (Chunk f : freedPageSpace.values()) { - Chunk c = chunks.get(f.id); - if (c != null) { // skip if was already removed - c.maxLenLive += f.maxLenLive; - c.pageCountLive += f.pageCountLive; - if (c.pageCountLive < 0 && c.pageCountLive > -MARKED_FREE) { - // can happen after a rollback - c.pageCountLive = 0; - } - if (c.maxLenLive < 0 && c.maxLenLive > -MARKED_FREE) { - // can happen after a rollback - c.maxLenLive = 0; + private void acceptChunkOccupancyChanges(long time, long version) { + assert serializationLock.isHeldByCurrentThread(); + if (lastChunk != null) { + Set modifiedChunks = new HashSet<>(); + while (true) { + RemovedPageInfo rpi; + while ((rpi = removedPages.peek()) != null && rpi.version < version) { + rpi = removedPages.poll(); // could be different from the peeked one + assert rpi != null; // since nobody else retrieves from queue + assert rpi.version < version : rpi + " < " + version; + int chunkId = rpi.getPageChunkId(); + Chunk chunk = chunks.get(chunkId); + assert !isOpen() || chunk != null : chunkId; + if (chunk != null) { + modifiedChunks.add(chunk); + if (chunk.accountForRemovedPage(rpi.getPageNo(), rpi.getPageLength(), + rpi.isPinned(), time, rpi.version)) { + deadChunks.offer(chunk); } - modified.add(c); } } - freedPageSpace.clear(); - } - if (modified.isEmpty()) { - break; - } - for (Chunk c : modified) { - meta.put(Chunk.getMetaKey(c.id), c.asString()); + if (modifiedChunks.isEmpty()) { + return; + } + for (Chunk chunk : modifiedChunks) { + int chunkId = chunk.id; + layout.put(Chunk.getMetaKey(chunkId), chunk.asString()); + } + modifiedChunks.clear(); } - markMetaChanged(); } } @@ -1735,6 +1904,7 @@ private void applyFreedSpace() { * @param minPercent the minimum percentage to save */ private void shrinkFileIfPossible(int minPercent) { + assert saveChunkLock.isHeldByCurrentThread(); if (fileStore.isReadOnly()) { return; } @@ -1762,15 +1932,28 @@ private void shrinkFileIfPossible(int minPercent) { * @return the position */ private long getFileLengthInUse() { + assert saveChunkLock.isHeldByCurrentThread(); long result = fileStore.getFileLengthInUse(); assert result == measureFileLengthInUse() : result + " != " + measureFileLengthInUse(); return result; } + /** + * Get the index of the first block after last occupied one. + * It marks the beginning of the last (infinite) free space. + * + * @return block index + */ + private long getAfterLastBlock() { + assert saveChunkLock.isHeldByCurrentThread(); + return fileStore.getAfterLastBlock(); + } + private long measureFileLengthInUse() { + assert saveChunkLock.isHeldByCurrentThread(); long size = 2; for (Chunk c : chunks.values()) { - if (c.len != Integer.MAX_VALUE) { + if (c.isSaved()) { size = Math.max(size, c.block + c.len); } } @@ -1786,6 +1969,7 @@ public boolean hasUnsavedChanges() { if (metaChanged) { return true; } + long lastStoredVersion = currentVersion - 1; for (MVMap m : maps.values()) { if (!m.isClosed()) { if(m.hasChangesSince(lastStoredVersion)) { @@ -1793,14 +1977,7 @@ public boolean hasUnsavedChanges() { } } } - return false; - } - - private boolean hasUnsavedChangesInternal() { - if (meta.hasChangesSince(lastStoredVersion)) { - return true; - } - return hasUnsavedChanges(); + return layout.hasChangesSince(lastStoredVersion) && lastStoredVersion > INITIAL_VERSION; } private Chunk readChunkHeader(long block) { @@ -1809,42 +1986,18 @@ private Chunk readChunkHeader(long block) { return Chunk.readChunkHeader(buff, p); } - /** - * Compact the store by moving all live pages to new chunks. - * - * @return if anything was written - */ - public boolean compactRewriteFully() { - storeLock.lock(); + private Chunk readChunkHeaderOptionally(long block) { try { - checkOpen(); - if (lastChunk == null) { - // nothing to do - return false; - } - for (MVMap m : maps.values()) { - @SuppressWarnings("unchecked") - MVMap map = (MVMap) m; - Cursor cursor = map.cursor(null); - Page lastPage = null; - while (cursor.hasNext()) { - cursor.next(); - Page p = cursor.getPage(); - if (p == lastPage) { - continue; - } - Object k = p.getKey(0); - Object v = p.getValue(0); - map.put(k, v); - lastPage = p; - } - } - commit(); - return true; - } finally { - storeLock.unlock(); + Chunk chunk = readChunkHeader(block); + return chunk.block != block ? null : chunk; + } catch (Exception ignore) { + return null; } + } + private Chunk readChunkHeaderOptionally(long block, int expectedId) { + Chunk chunk = readChunkHeaderOptionally(block); + return chunk == null || chunk.id != expectedId ? null : chunk; } /** @@ -1863,132 +2016,241 @@ public void compactMoveChunks() { * @param targetFillRate do nothing if the file store fill rate is higher * than this * @param moveSize the number of bytes to move + * @return true if any chunks were moved as result of this operation, false otherwise */ - public void compactMoveChunks(int targetFillRate, long moveSize) { + boolean compactMoveChunks(int targetFillRate, long moveSize) { + boolean res = false; storeLock.lock(); try { checkOpen(); - if (lastChunk != null && reuseSpace) { - int oldRetentionTime = retentionTime; - boolean oldReuse = reuseSpace; + // because serializationExecutor is a single-threaded one and + // all task submissions to it are done under storeLock, + // it is guaranteed, that upon this dummy task completion + // there are no pending / in-progress task here + submitOrRun(serializationExecutor, () -> {}, true); + serializationLock.lock(); + try { + // similarly, all task submissions to bufferSaveExecutor + // are done under serializationLock, and upon this dummy task completion + // it will be no pending / in-progress task here + submitOrRun(bufferSaveExecutor, () -> {}, true); + saveChunkLock.lock(); try { - retentionTime = -1; - freeUnusedChunks(false); - if (fileStore.getFillRate() <= targetFillRate) { - long start = fileStore.getFirstFree() / BLOCK_SIZE; - ArrayList move = findChunksToMove(start, moveSize); - compactMoveChunks(move); + if (lastChunk != null && reuseSpace && getFillRate() <= targetFillRate) { + res = compactMoveChunks(moveSize); } } finally { - reuseSpace = oldReuse; - retentionTime = oldRetentionTime; + saveChunkLock.unlock(); } + } finally { + serializationLock.unlock(); } + } catch (MVStoreException e) { + panic(e); + } catch (Throwable e) { + panic(DataUtils.newMVStoreException( + DataUtils.ERROR_INTERNAL, "{0}", e.toString(), e)); } finally { - storeLock.unlock(); + unlockAndCheckPanicCondition(); } + return res; } - private ArrayList findChunksToMove(long startBlock, long moveSize) { - ArrayList move = new ArrayList<>(); - for (Chunk c : chunks.values()) { - if (c.block > startBlock) { - move.add(c); - } + private boolean compactMoveChunks(long moveSize) { + assert storeLock.isHeldByCurrentThread(); + dropUnusedChunks(); + long start = fileStore.getFirstFree() / BLOCK_SIZE; + Iterable chunksToMove = findChunksToMove(start, moveSize); + if (chunksToMove == null) { + return false; } - // sort by block - Collections.sort(move, new Comparator() { - @Override - public int compare(Chunk o1, Chunk o2) { - return Long.signum(o1.block - o2.block); + compactMoveChunks(chunksToMove); + return true; + } + + private Iterable findChunksToMove(long startBlock, long moveSize) { + long maxBlocksToMove = moveSize / BLOCK_SIZE; + Iterable result = null; + if (maxBlocksToMove > 0) { + PriorityQueue queue = new PriorityQueue<>(chunks.size() / 2 + 1, + (o1, o2) -> { + // instead of selection just closest to beginning of the file, + // pick smaller chunk(s) which sit in between bigger holes + int res = Integer.compare(o2.collectPriority, o1.collectPriority); + if (res != 0) { + return res; + } + return Long.signum(o2.block - o1.block); + }); + long size = 0; + for (Chunk chunk : chunks.values()) { + if (chunk.isSaved() && chunk.block > startBlock) { + chunk.collectPriority = getMovePriority(chunk); + queue.offer(chunk); + size += chunk.len; + while (size > maxBlocksToMove) { + Chunk removed = queue.poll(); + if (removed == null) { + break; + } + size -= removed.len; + } + } } - }); - // find which is the last block to keep - int count = 0; - long size = 0; - for (Chunk c : move) { - long chunkSize = c.len * (long) BLOCK_SIZE; - size += chunkSize; - if (size > moveSize) { - break; + if (!queue.isEmpty()) { + ArrayList list = new ArrayList<>(queue); + list.sort(Chunk.PositionComparator.INSTANCE); + result = list; } - count++; } - // move the first block (so the first gap is moved), - // and the one at the end (so the file shrinks) - while (move.size() > count && move.size() > 1) { - move.remove(1); - } - - return move; + return result; } - private void compactMoveChunks(ArrayList move) { - for (Chunk c : move) { - moveChunk(c, true); - } + private int getMovePriority(Chunk chunk) { + return fileStore.getMovePriority((int)chunk.block); + } - // update the metadata (store at the end of the file) - reuseSpace = false; - commit(); - sync(); + private void compactMoveChunks(Iterable move) { + assert storeLock.isHeldByCurrentThread(); + assert serializationLock.isHeldByCurrentThread(); + assert saveChunkLock.isHeldByCurrentThread(); + if (move != null) { + // this will ensure better recognition of the last chunk + // in case of power failure, since we are going to move older chunks + // to the end of the file + writeStoreHeader(); + sync(); - Chunk chunk = this.lastChunk; + Iterator iterator = move.iterator(); + assert iterator.hasNext(); + long leftmostBlock = iterator.next().block; + long originalBlockCount = getAfterLastBlock(); + // we need to ensure that chunks moved within the following loop + // do not overlap with space just released by chunks moved before them, + // hence the need to reserve this area [leftmostBlock, originalBlockCount) + for (Chunk chunk : move) { + moveChunk(chunk, leftmostBlock, originalBlockCount); + } + // update the metadata (hopefully within the file) + store(leftmostBlock, originalBlockCount); + sync(); - // now re-use the empty space - reuseSpace = true; - for (Chunk c : move) { - // ignore if already removed during the previous store operation - if (chunks.containsKey(c.id)) { - moveChunk(c, false); + Chunk chunkToMove = lastChunk; + assert chunkToMove != null; + long postEvacuationBlockCount = getAfterLastBlock(); + + boolean chunkToMoveIsAlreadyInside = chunkToMove.block < leftmostBlock; + boolean movedToEOF = !chunkToMoveIsAlreadyInside; + // move all chunks, which previously did not fit before reserved area + // now we can re-use previously reserved area [leftmostBlock, originalBlockCount), + // but need to reserve [originalBlockCount, postEvacuationBlockCount) + for (Chunk c : move) { + if (c.block >= originalBlockCount && + moveChunk(c, originalBlockCount, postEvacuationBlockCount)) { + assert c.block < originalBlockCount; + movedToEOF = true; + } + } + assert postEvacuationBlockCount >= getAfterLastBlock(); + + if (movedToEOF) { + boolean moved = moveChunkInside(chunkToMove, originalBlockCount); + + // store a new chunk with updated metadata (hopefully within a file) + store(originalBlockCount, postEvacuationBlockCount); + sync(); + // if chunkToMove did not fit within originalBlockCount (move is + // false), and since now previously reserved area + // [originalBlockCount, postEvacuationBlockCount) also can be + // used, lets try to move that chunk into this area, closer to + // the beginning of the file + long lastBoundary = moved || chunkToMoveIsAlreadyInside ? + postEvacuationBlockCount : chunkToMove.block; + moved = !moved && moveChunkInside(chunkToMove, lastBoundary); + if (moveChunkInside(lastChunk, lastBoundary) || moved) { + store(lastBoundary, -1); + } } + + shrinkFileIfPossible(0); + sync(); } + } - // update the metadata (within the file) - commit(); - sync(); - if (chunks.containsKey(chunk.id)) { - moveChunk(chunk, false); - commit(); + private void store(long reservedLow, long reservedHigh) { + saveChunkLock.unlock(); + try { + serializationLock.unlock(); + try { + storeNow(true, reservedLow, () -> reservedHigh); + } finally { + serializationLock.lock(); + } + } finally { + saveChunkLock.lock(); } - shrinkFileIfPossible(0); - sync(); } - private void moveChunk(Chunk c, boolean toTheEnd) { + private boolean moveChunkInside(Chunk chunkToMove, long boundary) { + boolean res = chunkToMove.block >= boundary && + fileStore.predictAllocation(chunkToMove.len, boundary, -1) < boundary && + moveChunk(chunkToMove, boundary, -1); + assert !res || chunkToMove.block + chunkToMove.len <= boundary; + return res; + } + + /** + * Move specified chunk into free area of the file. "Reserved" area + * specifies file interval to be avoided, when un-allocated space will be + * chosen for a new chunk's location. + * + * @param chunk to move + * @param reservedAreaLow low boundary of reserved area, inclusive + * @param reservedAreaHigh high boundary of reserved area, exclusive + * @return true if block was moved, false otherwise + */ + private boolean moveChunk(Chunk chunk, long reservedAreaLow, long reservedAreaHigh) { + // ignore if already removed during the previous store operations + // those are possible either as explicit commit calls + // or from meta map updates at the end of this method + if (!chunks.containsKey(chunk.id)) { + return false; + } + long start = chunk.block * BLOCK_SIZE; + int length = chunk.len * BLOCK_SIZE; + long block; WriteBuffer buff = getWriteBuffer(); - long start = c.block * BLOCK_SIZE; - int length = c.len * BLOCK_SIZE; - buff.limit(length); - ByteBuffer readBuff = fileStore.readFully(start, length); - Chunk.readChunkHeader(readBuff, start); - int chunkHeaderLen = readBuff.position(); - buff.position(chunkHeaderLen); - buff.put(readBuff); - long pos = allocateFileSpace(length, toTheEnd); - fileStore.free(start, length); - c.block = pos / BLOCK_SIZE; - c.next = 0; - buff.position(0); - c.writeChunkHeader(buff, chunkHeaderLen); - buff.position(length - Chunk.FOOTER_LENGTH); - buff.put(c.getFooterBytes()); - buff.position(0); - write(pos, buff.getBuffer()); - releaseWriteBuffer(buff); - meta.put(Chunk.getMetaKey(c.id), c.asString()); - markMetaChanged(); - } - - private long allocateFileSpace(int length, boolean atTheEnd) { - long filePos; - if (atTheEnd) { - filePos = getFileLengthInUse(); - fileStore.markUsed(filePos, length); - } else { - filePos = fileStore.allocate(length); + try { + buff.limit(length); + ByteBuffer readBuff = fileStore.readFully(start, length); + Chunk chunkFromFile = Chunk.readChunkHeader(readBuff, start); + int chunkHeaderLen = readBuff.position(); + buff.position(chunkHeaderLen); + buff.put(readBuff); + long pos = fileStore.allocate(length, reservedAreaLow, reservedAreaHigh); + block = pos / BLOCK_SIZE; + // in the absence of a reserved area, + // block should always move closer to the beginning of the file + assert reservedAreaHigh > 0 || block <= chunk.block : block + " " + chunk; + buff.position(0); + // can not set chunk's new block/len until it's fully written at new location, + // because concurrent reader can pick it up prematurely, + // also occupancy accounting fields should not leak into header + chunkFromFile.block = block; + chunkFromFile.next = 0; + chunkFromFile.writeChunkHeader(buff, chunkHeaderLen); + buff.position(length - Chunk.FOOTER_LENGTH); + buff.put(chunkFromFile.getFooterBytes()); + buff.position(0); + write(pos, buff.getBuffer()); + } finally { + releaseWriteBuffer(buff); } - return filePos; + fileStore.free(start, length); + chunk.block = block; + chunk.next = 0; + layout.put(Chunk.getMetaKey(chunk.id), chunk.asString()); + return true; } /** @@ -2003,6 +2265,26 @@ public void sync() { } } + /** + * Compact store file, that is, compact blocks that have a low + * fill rate, and move chunks next to each other. This will typically + * shrink the file. Changes are flushed to the file, and old + * chunks are overwritten. + * + * @param maxCompactTime the maximum time in milliseconds to compact + */ + public void compactFile(int maxCompactTime) { + setRetentionTime(0); + long stopAt = System.nanoTime() + maxCompactTime * 1_000_000L; + while (compact(95, 16 * 1024 * 1024)) { + sync(); + compactMoveChunks(95, 16 * 1024 * 1024); + if (System.nanoTime() - stopAt > 0L) { + break; + } + } + } + /** * Try to increase the fill rate by re-writing partially full chunks. Chunks * with a low number of live items are re-written. @@ -2022,31 +2304,46 @@ public void sync() { * @return if a chunk was re-written */ public boolean compact(int targetFillRate, int write) { - if (!reuseSpace) { - return false; - } - checkOpen(); - // We can't wait forever for the lock here, - // because if called from the background thread, - // it might go into deadlock with concurrent database closure - // and attempt to stop this thread. - try { - if (!storeLock.isHeldByCurrentThread() && - storeLock.tryLock(10, TimeUnit.MILLISECONDS)) { + if (reuseSpace && lastChunk != null) { + checkOpen(); + if (targetFillRate > 0 && getChunksFillRate() < targetFillRate) { + // We can't wait forever for the lock here, + // because if called from the background thread, + // it might go into deadlock with concurrent database closure + // and attempt to stop this thread. try { - ArrayList old = findOldChunks(targetFillRate, write); - if (old == null || old.isEmpty()) { - return false; + if (storeLock.tryLock(10, TimeUnit.MILLISECONDS)) { + try { + return rewriteChunks(write, 100); + } finally { + storeLock.unlock(); + } } - compactRewrite(old); - return true; - } finally { - storeLock.unlock(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + return false; + } + + private boolean rewriteChunks(int writeLimit, int targetFillRate) { + serializationLock.lock(); + try { + TxCounter txCounter = registerVersionUsage(); + try { + acceptChunkOccupancyChanges(getTimeSinceCreation(), currentVersion); + Iterable old = findOldChunks(writeLimit, targetFillRate); + if (old != null) { + HashSet idSet = createIdSet(old); + return !idSet.isEmpty() && compactRewrite(idSet) > 0; } + } finally { + deregisterVersionUsage(txCounter); } return false; - } catch (InterruptedException e) { - throw new RuntimeException(e); + } finally { + serializationLock.unlock(); } } @@ -2058,167 +2355,293 @@ public boolean compact(int targetFillRate, int write) { * * @return the fill rate, in percent (100 is completely full) */ - public int getCurrentFillRate() { + public int getChunksFillRate() { + return getChunksFillRate(true); + } + + public int getRewritableChunksFillRate() { + return getChunksFillRate(false); + } + + private int getChunksFillRate(boolean all) { long maxLengthSum = 1; long maxLengthLiveSum = 1; long time = getTimeSinceCreation(); for (Chunk c : chunks.values()) { - maxLengthSum += c.maxLen; - if (c.time + retentionTime > time) { - // young chunks (we don't optimize those): - // assume if they are fully live - // so that we don't try to optimize yet - // until they get old - maxLengthLiveSum += c.maxLen; - } else { + if (all || isRewritable(c, time)) { + assert c.maxLen >= 0; + maxLengthSum += c.maxLen; maxLengthLiveSum += c.maxLenLive; } } // the fill rate of all chunks combined - if (maxLengthSum <= 0) { - // avoid division by 0 - maxLengthSum = 1; - } int fillRate = (int) (100 * maxLengthLiveSum / maxLengthSum); return fillRate; } - private ArrayList findOldChunks(int targetFillRate, int write) { - if (lastChunk == null) { - // nothing to do - return null; + /** + * Get data chunks count. + * + * @return number of existing chunks in store. + */ + public int getChunkCount() { + return chunks.size(); + } + + /** + * Get data pages count. + * + * @return number of existing pages in store. + */ + public int getPageCount() { + int count = 0; + for (Chunk chunk : chunks.values()) { + count += chunk.pageCount; } - long time = getTimeSinceCreation(); - int fillRate = getCurrentFillRate(); - if (fillRate >= targetFillRate) { - return null; + return count; + } + + /** + * Get live data pages count. + * + * @return number of existing live pages in store. + */ + public int getLivePageCount() { + int count = 0; + for (Chunk chunk : chunks.values()) { + count += chunk.pageCountLive; } + return count; + } - // the 'old' list contains the chunks we want to free up - ArrayList old = new ArrayList<>(); - Chunk last = chunks.get(lastChunk.id); - for (Chunk c : chunks.values()) { + private int getProjectedFillRate(int thresholdChunkFillRate) { + saveChunkLock.lock(); + try { + int vacatedBlocks = 0; + long maxLengthSum = 1; + long maxLengthLiveSum = 1; + long time = getTimeSinceCreation(); + for (Chunk c : chunks.values()) { + assert c.maxLen >= 0; + if (isRewritable(c, time) && c.getFillRate() <= thresholdChunkFillRate) { + assert c.maxLen >= c.maxLenLive; + vacatedBlocks += c.len; + maxLengthSum += c.maxLen; + maxLengthLiveSum += c.maxLenLive; + } + } + int additionalBlocks = (int) (vacatedBlocks * maxLengthLiveSum / maxLengthSum); + int fillRate = fileStore.getProjectedFillRate(vacatedBlocks - additionalBlocks); + return fillRate; + } finally { + saveChunkLock.unlock(); + } + } + + public int getFillRate() { + saveChunkLock.lock(); + try { + return fileStore.getFillRate(); + } finally { + saveChunkLock.unlock(); + } + } + + private Iterable findOldChunks(int writeLimit, int targetFillRate) { + assert lastChunk != null; + long time = getTimeSinceCreation(); + + // the queue will contain chunks we want to free up + // the smaller the collectionPriority, the more desirable this chunk's re-write is + // queue will be ordered in descending order of collectionPriority values, + // so most desirable chunks will stay at the tail + PriorityQueue queue = new PriorityQueue<>(this.chunks.size() / 4 + 1, + (o1, o2) -> { + int comp = Integer.compare(o2.collectPriority, o1.collectPriority); + if (comp == 0) { + comp = Long.compare(o2.maxLenLive, o1.maxLenLive); + } + return comp; + }); + + long totalSize = 0; + long latestVersion = lastChunk.version + 1; + for (Chunk chunk : chunks.values()) { // only look at chunk older than the retention time // (it's possible to compact chunks earlier, but right // now we don't do that) - if (c.time + retentionTime <= time) { - long age = last.version - c.version + 1; - c.collectPriority = (int) (c.getFillRate() * 1000 / Math.max(1,age)); - old.add(c); + int fillRate = chunk.getFillRate(); + if (isRewritable(chunk, time) && fillRate <= targetFillRate) { + long age = Math.max(1, latestVersion - chunk.version); + chunk.collectPriority = (int) (fillRate * 1000 / age); + totalSize += chunk.maxLenLive; + queue.offer(chunk); + while (totalSize > writeLimit) { + Chunk removed = queue.poll(); + if (removed == null) { + break; + } + totalSize -= removed.maxLenLive; + } } } - if (old.isEmpty()) { - return null; - } - // sort the list, so the first entry should be collected first - Collections.sort(old, new Comparator() { - @Override - public int compare(Chunk o1, Chunk o2) { - int comp = Integer.compare(o1.collectPriority, o2.collectPriority); - if (comp == 0) { - comp = Long.compare(o1.maxLenLive, o2.maxLenLive); - } - return comp; - } - }); - // find out up to were in the old list we need to move - long written = 0; - int chunkCount = 0; - Chunk move = null; - for (Chunk c : old) { - if (move != null) { - if (c.collectPriority > 0 && written > write) { - break; + return queue.isEmpty() ? null : queue; + } + + private boolean isRewritable(Chunk chunk, long time) { + return chunk.isRewritable() && isSeasonedChunk(chunk, time); + } + + private int compactRewrite(Set set) { + assert storeLock.isHeldByCurrentThread(); + assert currentStoreVersion < 0; // we should be able to do tryCommit() -> store() + acceptChunkOccupancyChanges(getTimeSinceCreation(), currentVersion); + int rewrittenPageCount = rewriteChunks(set, false); + acceptChunkOccupancyChanges(getTimeSinceCreation(), currentVersion); + rewrittenPageCount += rewriteChunks(set, true); + return rewrittenPageCount; + } + + private int rewriteChunks(Set set, boolean secondPass) { + int rewrittenPageCount = 0; + for (int chunkId : set) { + Chunk chunk = chunks.get(chunkId); + long[] toc = getToC(chunk); + if (toc != null) { + for (int pageNo = 0; (pageNo = chunk.occupancy.nextClearBit(pageNo)) < chunk.pageCount; ++pageNo) { + long tocElement = toc[pageNo]; + int mapId = DataUtils.getPageMapId(tocElement); + MVMap map = mapId == layout.getId() ? layout : mapId == meta.getId() ? meta : getMap(mapId); + if (map != null && !map.isClosed()) { + assert !map.isSingleWriter(); + if (secondPass || DataUtils.isLeafPosition(tocElement)) { + long pagePos = DataUtils.getPagePos(chunkId, tocElement); + serializationLock.unlock(); + try { + if (map.rewritePage(pagePos)) { + ++rewrittenPageCount; + if (map == meta) { + markMetaChanged(); + } + } + } finally { + serializationLock.lock(); + } + } + } } } - written += c.maxLenLive; - chunkCount++; - move = c; - } - if (chunkCount < 1) { - return null; - } - // remove the chunks we want to keep from this list - boolean remove = false; - for (Iterator it = old.iterator(); it.hasNext();) { - Chunk c = it.next(); - if (move == c) { - remove = true; - } else if (remove) { - it.remove(); - } } - return old; + return rewrittenPageCount; } - private void compactRewrite(Iterable old) { + private static HashSet createIdSet(Iterable toCompact) { HashSet set = new HashSet<>(); - for (Chunk c : old) { + for (Chunk c : toCompact) { set.add(c.id); } - for (MVMap m : maps.values()) { - @SuppressWarnings("unchecked") - MVMap map = (MVMap) m; - if (!map.isClosed()) { - map.rewrite(set); - } - } - meta.rewrite(set); - freeUnusedChunks(false); - commit(); + return set; } /** * Read a page. * + * @param key type + * @param value type + * * @param map the map * @param pos the page position * @return the page */ - Page readPage(MVMap map, long pos) { - if (!DataUtils.isPageSaved(pos)) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_FILE_CORRUPT, "Position 0"); + Page readPage(MVMap map, long pos) { + try { + if (!DataUtils.isPageSaved(pos)) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_FILE_CORRUPT, "Position 0"); + } + Page p = readPageFromCache(pos); + if (p == null) { + Chunk chunk = getChunk(pos); + int pageOffset = DataUtils.getPageOffset(pos); + try { + ByteBuffer buff = chunk.readBufferForPage(fileStore, pageOffset, pos); + p = Page.read(buff, pos, map); + } catch (MVStoreException e) { + throw e; + } catch (Exception e) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_FILE_CORRUPT, + "Unable to read the page at position {0}, chunk {1}, offset {2}", + pos, chunk.id, pageOffset, e); + } + cachePage(p); + } + return p; + } catch (MVStoreException e) { + if (recoveryMode) { + return map.createEmptyLeaf(); + } + throw e; } - Page p = cache == null ? null : cache.get(pos); - if (p == null) { - ByteBuffer buff = readBufferForPage(pos, map.getId()); - p = Page.read(buff, pos, map); - cachePage(p); + } + + private long[] getToC(Chunk chunk) { + if (chunk.tocPos == 0) { + // legacy chunk without table of content + return null; + } + long[] toc = chunksToC.get(chunk.id); + if (toc == null) { + toc = chunk.readToC(fileStore); + chunksToC.put(chunk.id, toc, toc.length * 8); } - return p; + assert toc.length == chunk.pageCount : toc.length + " != " + chunk.pageCount; + return toc; + } + + @SuppressWarnings("unchecked") + private Page readPageFromCache(long pos) { + return cache == null ? null : (Page)cache.get(pos); } /** * Remove a page. - * - * @param pos the position of the page - * @param memory the memory usage - */ - void removePage(long pos, int memory) { - // we need to keep temporary pages, - // to support reading old versions and rollback - if (!DataUtils.isPageSaved(pos)) { - // the page was not yet stored: - // just using "unsavedMemory -= memory" could result in negative - // values, because in some cases a page is allocated, but never - // stored, so we need to use max - unsavedMemory = Math.max(0, unsavedMemory - memory); - return; - } - - int chunkId = DataUtils.getPageChunkId(pos); - // synchronize, because pages could be freed concurrently - synchronized (freedPageSpace) { - Chunk chunk = freedPageSpace.get(chunkId); - if (chunk == null) { - chunk = new Chunk(chunkId); - freedPageSpace.put(chunkId, chunk); + * @param pos the position of the page + * @param version at which page was removed + * @param pinned whether page is considered pinned + * @param pageNo sequential page number within chunk + */ + void accountForRemovedPage(long pos, long version, boolean pinned, int pageNo) { + assert DataUtils.isPageSaved(pos); + if (pageNo < 0) { + pageNo = calculatePageNo(pos); + } + RemovedPageInfo rpi = new RemovedPageInfo(pos, pinned, version, pageNo); + removedPages.add(rpi); + } + + private int calculatePageNo(long pos) { + int pageNo = -1; + Chunk chunk = getChunk(pos); + long[] toC = getToC(chunk); + if (toC != null) { + int offset = DataUtils.getPageOffset(pos); + int low = 0; + int high = toC.length - 1; + while (low <= high) { + int mid = (low + high) >>> 1; + long midVal = DataUtils.getPageOffset(toC[mid]); + if (midVal < offset) { + low = mid + 1; + } else if (midVal > offset) { + high = mid - 1; + } else { + pageNo = mid; + break; + } } - chunk.maxLenLive -= DataUtils.getPageMaxLength(pos); - chunk.pageCountLive -= 1; } + return pageNo; } Compressor getCompressorFast() { @@ -2321,21 +2744,24 @@ public long getVersionsToKeep() { } /** - * Get the oldest version to retain in memory, which is the manually set - * retain version, or the current store version (whatever is older). + * Get the oldest version to retain. + * We keep at least number of previous versions specified by "versionsToKeep" + * configuration parameter (default 5). + * Previously it was used only in case of non-persistent MVStore. + * Now it's honored in all cases (although H2 always sets it to zero). + * Oldest version determination also takes into account calls (de)registerVersionUsage(), + * an will not release the version, while version is still in use. * * @return the version */ - public long getOldestVersionToKeep() { + long getOldestVersionToKeep() { long v = oldestVersionToKeep.get(); - if (fileStore == null) { - v = Math.max(v - versionsToKeep + 1, INITIAL_VERSION); - return v; - } - - long storeVersion = currentStoreVersion; - if (storeVersion != INITIAL_VERSION && storeVersion < v) { - v = storeVersion; + v = Math.max(v - versionsToKeep, INITIAL_VERSION); + if (fileStore != null) { + long storeVersion = lastChunkVersion() - 1; + if (storeVersion != INITIAL_VERSION && storeVersion < v) { + v = storeVersion; + } } return v; } @@ -2350,6 +2776,11 @@ private void setOldestVersionToKeep(long oldestVersionToKeep) { } while (!success); } + private long lastChunkVersion() { + Chunk chunk = lastChunk; + return chunk == null ? INITIAL_VERSION + 1 : chunk.version; + } + /** * Check whether all data can be read from this version. This requires that * all chunks referenced by this version are still available (not @@ -2373,27 +2804,23 @@ private boolean isKnownVersion(long version) { } // also, all chunks referenced by this version // need to be available in the file - MVMap oldMeta = getMetaMap(version); - if (oldMeta == null) { - return false; - } + MVMap oldLayoutMap = getLayoutMap(version); try { - for (Iterator it = oldMeta.keyIterator("chunk."); - it.hasNext();) { + for (Iterator it = oldLayoutMap.keyIterator(DataUtils.META_CHUNK); it.hasNext();) { String chunkKey = it.next(); - if (!chunkKey.startsWith("chunk.")) { + if (!chunkKey.startsWith(DataUtils.META_CHUNK)) { break; } - if (!meta.containsKey(chunkKey)) { - String s = oldMeta.get(chunkKey); + if (!layout.containsKey(chunkKey)) { + String s = oldLayoutMap.get(chunkKey); Chunk c2 = Chunk.fromString(s); - Chunk test = readChunkHeaderAndFooter(c2.block); - if (test == null || test.id != c2.id) { + Chunk test = readChunkHeaderAndFooter(c2.block, c2.id); + if (test == null) { return false; } } } - } catch (IllegalStateException e) { + } catch (MVStoreException e) { // the chunk missing where the metadata is stored return false; } @@ -2401,11 +2828,16 @@ private boolean isKnownVersion(long version) { } /** - * Increment the number of unsaved pages. + * Adjust amount of "unsaved memory" meaning amount of RAM occupied by pages + * not saved yet to the file. This is the amount which triggers auto-commit. * - * @param memory the memory usage of the page + * @param memory adjustment */ - public void registerUnsavedPage(int memory) { + public void registerUnsavedMemory(int memory) { + // this counter was intentionally left unprotected against race + // condition for performance reasons + // TODO: evaluate performance impact of atomic implementation, + // since updates to unsavedMemory are largely aggregated now unsavedMemory += memory; int newValue = unsavedMemory; if (newValue > autoCommitMemory && autoCommitMemory > 0) { @@ -2413,7 +2845,7 @@ public void registerUnsavedPage(int memory) { } } - public boolean isSaveNeeded() { + boolean isSaveNeeded() { return saveNeeded; } @@ -2423,22 +2855,37 @@ public boolean isSaveNeeded() { * @param map the map */ void beforeWrite(MVMap map) { - if (saveNeeded && fileStore != null && isOpenOrStopping()) { + if (saveNeeded && fileStore != null && isOpenOrStopping() && + // condition below is to prevent potential deadlock, + // because we should never seek storeLock while holding + // map root lock + (storeLock.isHeldByCurrentThread() || !map.getRoot().isLockedByCurrentThread()) && + // to avoid infinite recursion via store() -> dropUnusedChunks() -> layout.remove() + map != layout) { + saveNeeded = false; // check again, because it could have been written by now - if (unsavedMemory > autoCommitMemory && autoCommitMemory > 0) { + if (autoCommitMemory > 0 && needStore()) { // if unsaved memory creation rate is to high, // some back pressure need to be applied // to slow things down and avoid OOME - if (3 * unsavedMemory > 4 * autoCommitMemory) { - commit(); + if (requireStore() && !map.isSingleWriter()) { + commit(MVStore::requireStore); } else { - tryCommit(); + tryCommit(MVStore::needStore); } } } } + private boolean requireStore() { + return 3 * unsavedMemory > 4 * autoCommitMemory; + } + + private boolean needStore() { + return unsavedMemory > autoCommitMemory; + } + /** * Get the store version. The store version is usually used to upgrade the * structure of the store after upgrading the application. Initially the @@ -2488,35 +2935,36 @@ public void rollbackTo(long version) { storeLock.lock(); try { checkOpen(); + currentVersion = version; if (version == 0) { // special case: remove all data - for (MVMap m : maps.values()) { - m.close(); - } + layout.setInitialRoot(layout.createEmptyLeaf(), INITIAL_VERSION); meta.setInitialRoot(meta.createEmptyLeaf(), INITIAL_VERSION); - + layout.put(META_ID_KEY, Integer.toHexString(meta.getId())); + deadChunks.clear(); + removedPages.clear(); chunks.clear(); + clearCaches(); if (fileStore != null) { - fileStore.clear(); + saveChunkLock.lock(); + try { + fileStore.clear(); + } finally { + saveChunkLock.unlock(); + } } - maps.clear(); lastChunk = null; - synchronized (freedPageSpace) { - freedPageSpace.clear(); - } versions.clear(); - currentVersion = version; setWriteVersion(version); metaChanged = false; - lastStoredVersion = INITIAL_VERSION; + for (MVMap m : maps.values()) { + m.close(); + } return; } DataUtils.checkArgument( isKnownVersion(version), "Unknown version {0}", version); - for (MVMap m : maps.values()) { - m.rollbackTo(version); - } TxCounter txCounter; while ((txCounter = versions.peekLast()) != null && txCounter.version >= version) { @@ -2524,74 +2972,66 @@ public void rollbackTo(long version) { } currentTxCounter = new TxCounter(version); - meta.rollbackTo(version); - metaChanged = false; - boolean loadFromFile = false; - // find out which chunks to remove, - // and which is the newest chunk to keep - // (the chunk list can have gaps) - ArrayList remove = new ArrayList<>(); - Chunk keep = null; - for (Chunk c : chunks.values()) { - if (c.version > version) { - remove.add(c.id); - } else if (keep == null || keep.id < c.id) { - keep = c; - } + if (!layout.rollbackRoot(version)) { + MVMap layoutMap = getLayoutMap(version); + layout.setInitialRoot(layoutMap.getRootPage(), version); } - if (!remove.isEmpty()) { - // remove the youngest first, so we don't create gaps - // (in case we remove many chunks) - Collections.sort(remove, Collections.reverseOrder()); - loadFromFile = true; - for (int id : remove) { - Chunk c = chunks.remove(id); - long start = c.block * BLOCK_SIZE; - int length = c.len * BLOCK_SIZE; - fileStore.free(start, length); - assert fileStore.getFileLengthInUse() == measureFileLengthInUse() : - fileStore.getFileLengthInUse() + " != " + measureFileLengthInUse(); - // overwrite the chunk, - // so it is not be used later on - WriteBuffer buff = getWriteBuffer(); - buff.limit(length); - // buff.clear() does not set the data - Arrays.fill(buff.getBuffer().array(), (byte) 0); - write(start, buff.getBuffer()); - releaseWriteBuffer(buff); - // only really needed if we remove many chunks, when writes are - // re-ordered - but we do it always, because rollback is not - // performance critical - sync(); - } - lastChunk = keep; - writeStoreHeader(); - readStoreHeader(); + if (!meta.rollbackRoot(version)) { + meta.setRootPos(getRootPos(meta.getId()), version - 1); } + metaChanged = false; + for (MVMap m : new ArrayList<>(maps.values())) { int id = m.getId(); if (m.getCreateVersion() >= version) { m.close(); maps.remove(id); } else { - if (loadFromFile) { - m.setRootPos(getRootPos(meta, id), version); - } else { - m.rollbackRoot(version); + if (!m.rollbackRoot(version)) { + m.setRootPos(getRootPos(id), version - 1); } } } - currentVersion = version; - if (lastStoredVersion == INITIAL_VERSION) { - lastStoredVersion = currentVersion - 1; + + deadChunks.clear(); + removedPages.clear(); + clearCaches(); + + serializationLock.lock(); + try { + Chunk keep = getChunkForVersion(version); + if (keep != null) { + saveChunkLock.lock(); + try { + setLastChunk(keep); + storeHeader.put(HDR_CLEAN, 1); + writeStoreHeader(); + readStoreHeader(); + } finally { + saveChunkLock.unlock(); + } + } + } finally { + serializationLock.unlock(); } + onVersionChange(currentVersion); + assert !hasUnsavedChanges(); } finally { - storeLock.unlock(); + unlockAndCheckPanicCondition(); + } + } + + private void clearCaches() { + if (cache != null) { + cache.clear(); + } + if (chunksToC != null) { + chunksToC.clear(); } } - private static long getRootPos(MVMap map, int mapId) { - String root = map.get(MVMap.getMapRootKey(mapId)); + private long getRootPos(int mapId) { + String root = layout.get(MVMap.getMapRootKey(mapId)); return root == null ? 0 : DataUtils.parseHexLong(root); } @@ -2605,10 +3045,6 @@ public long getCurrentVersion() { return currentVersion; } - public long getLastStoredVersion() { - return lastStoredVersion; - } - /** * Get the file store. * @@ -2631,7 +3067,7 @@ public Map getStoreHeader() { private void checkOpen() { if (!isOpenOrStopping()) { - throw DataUtils.newIllegalStateException(DataUtils.ERROR_CLOSED, + throw DataUtils.newMVStoreException(DataUtils.ERROR_CLOSED, "This store is closed", panicException); } } @@ -2644,14 +3080,14 @@ private void checkOpen() { */ public void renameMap(MVMap map, String newName) { checkOpen(); - DataUtils.checkArgument(map != meta, + DataUtils.checkArgument(map != layout && map != meta, "Renaming the meta map is not allowed"); int id = map.getId(); String oldName = getMapName(id); if (oldName != null && !oldName.equals(newName)) { String idHexStr = Integer.toHexString(id); // at first create a new name as an "alias" - String existingIdHexStr = meta.putIfAbsent("name." + newName, idHexStr); + String existingIdHexStr = meta.putIfAbsent(DataUtils.META_NAME + newName, idHexStr); // we need to cope with the case of previously unfinished rename DataUtils.checkArgument( existingIdHexStr == null || existingIdHexStr.equals(idHexStr), @@ -2659,59 +3095,51 @@ public void renameMap(MVMap map, String newName) { // switch roles of a new and old names - old one is an alias now meta.put(MVMap.getMapKey(id), map.asString(newName)); // get rid of the old name completely - meta.remove("name." + oldName); + meta.remove(DataUtils.META_NAME + oldName); markMetaChanged(); } } /** - * Remove a map. Please note rolling back this operation does not restore - * the data; if you need this ability, use Map.clear(). - * - * @param map the map to remove - */ - public void removeMap(MVMap map) { - removeMap(map, true); - } - - /** - * Remove a map. + * Remove a map from the current version of the store. * * @param map the map to remove - * @param delayed whether to delay deleting the metadata */ - public void removeMap(MVMap map, boolean delayed) { + public void removeMap(MVMap map) { storeLock.lock(); try { checkOpen(); - DataUtils.checkArgument(map != meta, + DataUtils.checkArgument(layout != meta && map != meta, "Removing the meta map is not allowed"); + RootReference rootReference = map.clearIt(); map.close(); - RootReference rootReference = map.getRoot(); + updateCounter += rootReference.updateCounter; updateAttemptCounter += rootReference.updateAttemptCounter; int id = map.getId(); String name = getMapName(id); - removeMap(name, id, delayed); + if (meta.remove(MVMap.getMapKey(id)) != null) { + markMetaChanged(); + } + if (meta.remove(DataUtils.META_NAME + name) != null) { + markMetaChanged(); + } } finally { storeLock.unlock(); } } - private void removeMap(String name, int id, boolean delayed) { - if (meta.remove(MVMap.getMapKey(id)) != null) { - markMetaChanged(); - } - if (meta.remove("name." + name) != null) { + /** + * Performs final stage of map removal - delete root location info from the layout table. + * Map is supposedly closed and anonymous and has no outstanding usage by now. + * + * @param mapId to deregister + */ + void deregisterMapRoot(int mapId) { + if (layout.remove(MVMap.getMapRootKey(mapId)) != null) { markMetaChanged(); } - if (!delayed) { - if (meta.remove(MVMap.getMapRootKey(id)) != null) { - markMetaChanged(); - } - maps.remove(id); - } } /** @@ -2722,7 +3150,11 @@ private void removeMap(String name, int id, boolean delayed) { public void removeMap(String name) { int id = getMapId(name); if(id > 0) { - removeMap(name, id, false); + MVMap map = getMap(id); + if (map == null) { + map = openMap(name, MVStoreTool.getGenericMapBuilder()); + } + removeMap(map); } } @@ -2733,13 +3165,12 @@ public void removeMap(String name) { * @return the name, or null if not found */ public String getMapName(int id) { - checkOpen(); String m = meta.get(MVMap.getMapKey(id)); return m == null ? null : DataUtils.getMapName(m); } private int getMapId(String name) { - String m = meta.get("name." + name); + String m = meta.get(DataUtils.META_NAME + name); return m == null ? -1 : DataUtils.parseHexInt(m); } @@ -2749,7 +3180,7 @@ private int getMapId(String name) { */ void writeInBackground() { try { - if (!isOpenOrStopping()) { + if (!isOpenOrStopping() || isReadOnly()) { return; } @@ -2757,37 +3188,115 @@ void writeInBackground() { // but according to a test it doesn't really help long time = getTimeSinceCreation(); - if (time <= lastCommitTime + autoCommitDelay) { - return; + if (time > lastCommitTime + autoCommitDelay) { + tryCommit(); + if (autoCompactFillRate < 0) { + compact(-getTargetFillRate(), autoCommitMemory); + } } - tryCommit(); - if (autoCompactFillRate > 0) { - // whether there were file read or write operations since - // the last time - boolean fileOps; - long fileOpCount = fileStore.getWriteCount() + fileStore.getReadCount(); - if (autoCompactLastFileOpCount != fileOpCount) { - fileOps = true; - } else { - fileOps = false; + int fillRate = getFillRate(); + if (fileStore.isFragmented() && fillRate < autoCompactFillRate) { + if (storeLock.tryLock(10, TimeUnit.MILLISECONDS)) { + try { + int moveSize = autoCommitMemory; + if (isIdle()) { + moveSize *= 4; + } + compactMoveChunks(101, moveSize); + } finally { + unlockAndCheckPanicCondition(); + } + } + } else if (fillRate >= autoCompactFillRate && lastChunk != null) { + int chunksFillRate = getRewritableChunksFillRate(); + chunksFillRate = isIdle() ? 100 - (100 - chunksFillRate) / 2 : chunksFillRate; + if (chunksFillRate < getTargetFillRate()) { + if (storeLock.tryLock(10, TimeUnit.MILLISECONDS)) { + try { + int writeLimit = autoCommitMemory * fillRate / Math.max(chunksFillRate, 1); + if (!isIdle()) { + writeLimit /= 4; + } + if (rewriteChunks(writeLimit, chunksFillRate)) { + dropUnusedChunks(); + } + } finally { + storeLock.unlock(); + } + } } - // use a lower fill rate if there were any file operations - int targetFillRate = fileOps ? autoCompactFillRate / 3 : autoCompactFillRate; - compact(targetFillRate, autoCommitMemory); - autoCompactLastFileOpCount = fileStore.getWriteCount() + fileStore.getReadCount(); } + autoCompactLastFileOpCount = fileStore.getWriteCount() + fileStore.getReadCount(); + } catch (InterruptedException ignore) { } catch (Throwable e) { handleException(e); + if (backgroundExceptionHandler == null) { + throw e; + } + } + } + + private void doMaintenance(int targetFillRate) { + if (autoCompactFillRate > 0 && lastChunk != null && reuseSpace) { + try { + int lastProjectedFillRate = -1; + for (int cnt = 0; cnt < 5; cnt++) { + int fillRate = getFillRate(); + int projectedFillRate = fillRate; + if (fillRate > targetFillRate) { + projectedFillRate = getProjectedFillRate(100); + if (projectedFillRate > targetFillRate || projectedFillRate <= lastProjectedFillRate) { + break; + } + } + lastProjectedFillRate = projectedFillRate; + // We can't wait forever for the lock here, + // because if called from the background thread, + // it might go into deadlock with concurrent database closure + // and attempt to stop this thread. + if (!storeLock.tryLock(10, TimeUnit.MILLISECONDS)) { + break; + } + try { + int writeLimit = autoCommitMemory * targetFillRate / Math.max(projectedFillRate, 1); + if (projectedFillRate < fillRate) { + if ((!rewriteChunks(writeLimit, targetFillRate) || dropUnusedChunks() == 0) && cnt > 0) { + break; + } + } + if (!compactMoveChunks(101, writeLimit)) { + break; + } + } finally { + unlockAndCheckPanicCondition(); + } + } + } catch (InterruptedException e) { + throw new RuntimeException(e); + } } } + private int getTargetFillRate() { + int targetRate = autoCompactFillRate; + // use a lower fill rate if there were any file operations since the last time + if (!isIdle()) { + targetRate /= 2; + } + return targetRate; + } + + private boolean isIdle() { + return autoCompactLastFileOpCount == fileStore.getWriteCount() + fileStore.getReadCount(); + } + private void handleException(Throwable ex) { if (backgroundExceptionHandler != null) { try { backgroundExceptionHandler.uncaughtException(Thread.currentThread(), ex); - } catch(Throwable ignore) { - if (ex != ignore) { // OOME may be the same - ex.addSuppressed(ignore); + } catch(Throwable e) { + if (ex != e) { // OOME may be the same + ex.addSuppressed(e); } } } @@ -2804,10 +3313,6 @@ public void setCacheSize(int mb) { cache.setMaxMemory(bytes); cache.clear(); } - if (cacheChunkRef != null) { - cacheChunkRef.setMaxMemory(bytes / 4); - cacheChunkRef.clear(); - } } private boolean isOpen() { @@ -2856,6 +3361,10 @@ private void stopBackgroundThread(boolean waitForIt) { } } } + shutdownExecutor(serializationExecutor); + serializationExecutor = null; + shutdownExecutor(bufferSaveExecutor); + bufferSaveExecutor = null; break; } } @@ -2889,11 +3398,23 @@ public void setAutoCommitDelay(int millis) { fileStore.toString()); if (backgroundWriterThread.compareAndSet(null, t)) { t.start(); + serializationExecutor = createSingleThreadExecutor("H2-serialization"); + bufferSaveExecutor = createSingleThreadExecutor("H2-save"); } } } - boolean isBackgroundThread() { + private static ThreadPoolExecutor createSingleThreadExecutor(String threadName) { + return new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, + new LinkedBlockingQueue<>(), + r -> { + Thread thread = new Thread(r, threadName); + thread.setDaemon(true); + return thread; + }); + } + + public boolean isBackgroundThread() { return Thread.currentThread() == backgroundWriterThread.get(); } @@ -2932,7 +3453,7 @@ public int getUnsavedMemory() { * Put the page in the cache. * @param page the page */ - void cachePage(Page page) { + void cachePage(Page page) { if (cache != null) { cache.put(page.getPos(), page, page.getMemory()); } @@ -2971,7 +3492,7 @@ public int getCacheSize() { * * @return the cache */ - public CacheLongKeyLIRS getCache() { + public CacheLongKeyLIRS> getCache() { return cache; } @@ -2984,14 +3505,37 @@ public boolean isReadOnly() { return fileStore != null && fileStore.isReadOnly(); } + public int getCacheHitRatio() { + return getCacheHitRatio(cache); + } + + public int getTocCacheHitRatio() { + return getCacheHitRatio(chunksToC); + } + + private static int getCacheHitRatio(CacheLongKeyLIRS cache) { + if (cache == null) { + return 0; + } + long hits = cache.getHits(); + return (int) (100 * hits / (hits + cache.getMisses() + 1)); + } + + public int getLeafRatio() { + return (int)(leafCount * 100 / Math.max(1, leafCount + nonLeafCount)); + } + public double getUpdateFailureRatio() { long updateCounter = this.updateCounter; long updateAttemptCounter = this.updateAttemptCounter; - RootReference rootReference = meta.getRoot(); + RootReference rootReference = layout.getRoot(); + updateCounter += rootReference.updateCounter; + updateAttemptCounter += rootReference.updateAttemptCounter; + rootReference = meta.getRoot(); updateCounter += rootReference.updateCounter; updateAttemptCounter += rootReference.updateAttemptCounter; for (MVMap map : maps.values()) { - RootReference root = map.getRoot(); + RootReference root = map.getRoot(); updateCounter += root.updateCounter; updateAttemptCounter += root.updateAttemptCounter; } @@ -3009,8 +3553,8 @@ public TxCounter registerVersionUsage() { TxCounter txCounter; while(true) { txCounter = currentTxCounter; - if(txCounter.counter.getAndIncrement() >= 0) { - break; + if(txCounter.incrementAndGet() > 0) { + return txCounter; } // The only way for counter to be negative // if it was retrieved right before onVersionChange() @@ -3020,9 +3564,8 @@ public TxCounter registerVersionUsage() { // not to upset accounting and try again with a new // version (currentTxCounter should have changed). assert txCounter != currentTxCounter : txCounter; - txCounter.counter.decrementAndGet(); + txCounter.decrementAndGet(); } - return txCounter; } /** @@ -3035,8 +3578,10 @@ public TxCounter registerVersionUsage() { */ public void deregisterVersionUsage(TxCounter txCounter) { if(txCounter != null) { - if(txCounter.counter.decrementAndGet() <= 0) { - if (!storeLock.isHeldByCurrentThread() && storeLock.tryLock()) { + if(txCounter.decrementAndGet() <= 0) { + if (storeLock.isHeldByCurrentThread()) { + dropUnusedVersions(); + } else if (storeLock.tryLock()) { try { dropUnusedVersions(); } finally { @@ -3048,21 +3593,80 @@ public void deregisterVersionUsage(TxCounter txCounter) { } private void onVersionChange(long version) { - TxCounter txCounter = this.currentTxCounter; - assert txCounter.counter.get() >= 0; + TxCounter txCounter = currentTxCounter; + assert txCounter.get() >= 0; versions.add(txCounter); currentTxCounter = new TxCounter(version); - txCounter.counter.decrementAndGet(); + txCounter.decrementAndGet(); dropUnusedVersions(); } private void dropUnusedVersions() { TxCounter txCounter; while ((txCounter = versions.peek()) != null - && txCounter.counter.get() < 0) { + && txCounter.get() < 0) { versions.poll(); } - setOldestVersionToKeep(txCounter != null ? txCounter.version : currentTxCounter.version); + setOldestVersionToKeep((txCounter != null ? txCounter : currentTxCounter).version); + } + + private int dropUnusedChunks() { + assert storeLock.isHeldByCurrentThread(); + int count = 0; + if (!deadChunks.isEmpty()) { + long oldestVersionToKeep = getOldestVersionToKeep(); + long time = getTimeSinceCreation(); + saveChunkLock.lock(); + try { + Chunk chunk; + while ((chunk = deadChunks.poll()) != null && + (isSeasonedChunk(chunk, time) && canOverwriteChunk(chunk, oldestVersionToKeep) || + // if chunk is not ready yet, put it back and exit + // since this deque is unbounded, offerFirst() always return true + !deadChunks.offerFirst(chunk))) { + + if (chunks.remove(chunk.id) != null) { + // purge dead pages from cache + long[] toc = chunksToC.remove(chunk.id); + if (toc != null && cache != null) { + for (long tocElement : toc) { + long pagePos = DataUtils.getPagePos(chunk.id, tocElement); + cache.remove(pagePos); + } + } + + if (layout.remove(Chunk.getMetaKey(chunk.id)) != null) { + markMetaChanged(); + } + if (chunk.isSaved()) { + freeChunkSpace(chunk); + } + ++count; + } + } + } finally { + saveChunkLock.unlock(); + } + } + return count; + } + + private void freeChunkSpace(Chunk chunk) { + long start = chunk.block * BLOCK_SIZE; + int length = chunk.len * BLOCK_SIZE; + freeFileSpace(start, length); + } + + private void freeFileSpace(long start, int length) { + fileStore.free(start, length); + assert validateFileLength(start + ":" + length); + } + + private boolean validateFileLength(String msg) { + assert saveChunkLock.isHeldByCurrentThread(); + assert fileStore.getFileLengthInUse() == measureFileLengthInUse() : + fileStore.getFileLengthInUse() + " != " + measureFileLengthInUse() + " " + msg; + return true; } /** @@ -3071,6 +3675,7 @@ private void dropUnusedVersions() { * which are still operating on this version. */ public static final class TxCounter { + /** * Version of a store, this TxCounter is related to */ @@ -3079,12 +3684,38 @@ public static final class TxCounter { /** * Counter of outstanding operation on this version of a store */ - public final AtomicInteger counter = new AtomicInteger(); + private volatile int counter; + + private static final AtomicIntegerFieldUpdater counterUpdater = + AtomicIntegerFieldUpdater.newUpdater(TxCounter.class, "counter"); + TxCounter(long version) { this.version = version; } + int get() { + return counter; + } + + /** + * Increment and get the counter value. + * + * @return the new value + */ + int incrementAndGet() { + return counterUpdater.incrementAndGet(this); + } + + /** + * Decrement and get the counter values. + * + * @return the new value + */ + int decrementAndGet() { + return counterUpdater.decrementAndGet(this); + } + @Override public String toString() { return "v=" + version + " / cnt=" + counter; @@ -3125,6 +3756,69 @@ public void run() { } } + private static class RemovedPageInfo implements Comparable { + final long version; + final long removedPageInfo; + + RemovedPageInfo(long pagePos, boolean pinned, long version, int pageNo) { + this.removedPageInfo = createRemovedPageInfo(pagePos, pinned, pageNo); + this.version = version; + } + + @Override + public int compareTo(RemovedPageInfo other) { + return Long.compare(version, other.version); + } + + int getPageChunkId() { + return DataUtils.getPageChunkId(removedPageInfo); + } + + int getPageNo() { + return DataUtils.getPageOffset(removedPageInfo); + } + + int getPageLength() { + return DataUtils.getPageMaxLength(removedPageInfo); + } + + /** + * Find out if removed page was pinned (can not be evacuated to a new chunk). + * @return true if page has been pinned + */ + boolean isPinned() { + return (removedPageInfo & 1) == 1; + } + + /** + * Transforms saved page position into removed page info by + * replacing "page offset" with "page sequential number" and + * "page type" bit with "pinned page" flag. + * @param pagePos of the saved page + * @param isPinned whether page belong to a "single writer" map + * @param pageNo 0-based sequential page number within containing chunk + * @return removed page info that contains chunk id, page number, page length and pinned flag + */ + private static long createRemovedPageInfo(long pagePos, boolean isPinned, int pageNo) { + long result = (pagePos & ~((0xFFFFFFFFL << 6) | 1)) | ((pageNo << 6) & 0xFFFFFFFFL); + if (isPinned) { + result |= 1; + } + return result; + } + + @Override + public String toString() { + return "RemovedPageInfo{" + + "version=" + version + + ", chunk=" + getPageChunkId() + + ", pageNo=" + getPageNo() + + ", len=" + getPageLength() + + (isPinned() ? ", pinned" : "") + + '}'; + } + } + /** * A builder for an MVStore. */ @@ -3188,8 +3882,8 @@ public Builder autoCommitBufferSize(int kb) { * this value, then chunks at the end of the file are moved. Compaction * stops if the target fill rate is reached. *

          - * The default value is 40 (40%). The value 0 disables auto-compacting. - *

          + * The default value is 90 (90%). The value 0 disables auto-compacting. + *

          * * @param percent the target fill rate * @return this @@ -3240,6 +3934,25 @@ public Builder readOnly() { return set("readOnly", 1); } + /** + * Set the number of keys per page. + * + * @param keyCount the number of keys + * @return this + */ + public Builder keysPerPage(int keyCount) { + return set("keysPerPage", keyCount); + } + + /** + * Open the file in recovery mode, where some errors may be ignored. + * + * @return this + */ + public Builder recoveryMode() { + return set("recoveryMode", 1); + } + /** * Set the read cache size in MB. The default is 16 MB. * @@ -3354,7 +4067,7 @@ public String toString() { * @param s the string representation * @return the builder */ - @SuppressWarnings({ "unchecked", "rawtypes" }) + @SuppressWarnings({"unchecked", "rawtypes", "unused"}) public static Builder fromString(String s) { // Cast from HashMap to HashMap is safe return new Builder((HashMap) DataUtils.parseMap(s)); diff --git a/h2/src/main/org/h2/mvstore/MVStoreException.java b/h2/src/main/org/h2/mvstore/MVStoreException.java new file mode 100644 index 0000000000..0cd1b95c7b --- /dev/null +++ b/h2/src/main/org/h2/mvstore/MVStoreException.java @@ -0,0 +1,25 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore; + +/** + * Various kinds of MVStore problems, along with associated error code. + */ +public class MVStoreException extends RuntimeException { + + private static final long serialVersionUID = 2847042930249663807L; + + private final int errorCode; + + public MVStoreException(int errorCode, String message) { + super(message); + this.errorCode = errorCode; + } + + public int getErrorCode() { + return errorCode; + } +} diff --git a/h2/src/main/org/h2/mvstore/MVStoreTool.java b/h2/src/main/org/h2/mvstore/MVStoreTool.java index 673a2198d2..ae7f5e4f37 100644 --- a/h2/src/main/org/h2/mvstore/MVStoreTool.java +++ b/h2/src/main/org/h2/mvstore/MVStoreTool.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; @@ -22,7 +22,8 @@ import org.h2.compress.Compressor; import org.h2.engine.Constants; import org.h2.message.DbException; -import org.h2.mvstore.type.DataType; +import org.h2.mvstore.tx.TransactionStore; +import org.h2.mvstore.type.BasicDataType; import org.h2.mvstore.type.StringDataType; import org.h2.store.fs.FilePath; import org.h2.store.fs.FileUtils; @@ -36,7 +37,8 @@ public class MVStoreTool { /** * Runs this tool. * Options are case sensitive. Supported options are: - *
          + *
          + * * * * @@ -109,20 +111,28 @@ public static void dump(String fileName, Writer writer, boolean details) { } long size = FileUtils.size(fileName); pw.printf("File %s, %d bytes, %d MB\n", fileName, size, size / 1024 / 1024); - FileChannel file = null; int blockSize = MVStore.BLOCK_SIZE; TreeMap mapSizesTotal = new TreeMap<>(); long pageSizeTotal = 0; - try { - file = FilePath.get(fileName).open("r"); + try (FileChannel file = FilePath.get(fileName).open("r")) { long fileSize = file.size(); int len = Long.toHexString(fileSize).length(); ByteBuffer block = ByteBuffer.allocate(4096); long pageCount = 0; - for (long pos = 0; pos < fileSize;) { + for (long pos = 0; pos < fileSize; ) { block.rewind(); - DataUtils.readFully(file, pos, block); + // Bugfix - An MVStoreException that wraps EOFException is + // thrown when partial writes happens in the case of power off + // or file system issues. + // So we should skip the broken block at end of the DB file. + try { + DataUtils.readFully(file, pos, block); + } catch (MVStoreException e) { + pos += blockSize; + pw.printf("ERROR illegal position %d%n", pos); + continue; + } block.rewind(); int headerType = block.get(); if (headerType == 'H') { @@ -137,10 +147,10 @@ public static void dump(String fileName, Writer writer, boolean details) { continue; } block.position(0); - Chunk c = null; + Chunk c; try { c = Chunk.readChunkHeader(block, pos); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { pos += blockSize; continue; } @@ -173,23 +183,24 @@ public static void dump(String fileName, Writer writer, boolean details) { int pageSize = chunk.getInt(); // check value (ignored) chunk.getShort(); + /*int pageNo =*/ DataUtils.readVarInt(chunk); int mapId = DataUtils.readVarInt(chunk); int entries = DataUtils.readVarInt(chunk); int type = chunk.get(); boolean compressed = (type & DataUtils.PAGE_COMPRESSED) != 0; - boolean node = (type & 1) != 0; + boolean node = (type & DataUtils.PAGE_TYPE_NODE) != 0; if (details) { pw.printf( "+%0" + len + - "x %s, map %x, %d entries, %d bytes, maxLen %x%n", + "x %s, map %x, %d entries, %d bytes, maxLen %x%n", p, (node ? "node" : "leaf") + - (compressed ? " compressed" : ""), + (compressed ? " compressed" : ""), mapId, node ? entries + 1 : entries, pageSize, DataUtils.getPageMaxLength(DataUtils.getPagePos(0, 0, pageSize, 0)) - ); + ); } p += pageSize; Integer mapSize = mapSizes.get(mapId); @@ -243,8 +254,8 @@ public static void dump(String fileName, Writer writer, boolean details) { for (int i = 0; i < entries; i++) { long cp = children[i]; pw.printf(" %d children < %s @ " + - "chunk %x +%0" + - len + "x%n", + "chunk %x +%0" + + len + "x%n", counts[i], keys[i], DataUtils.getPageChunkId(cp), @@ -252,7 +263,7 @@ public static void dump(String fileName, Writer writer, boolean details) { } long cp = children[entries]; pw.printf(" %d children >= %s @ chunk %x +%0" + - len + "x%n", + len + "x%n", counts[entries], keys.length >= entries ? null : keys[entries], DataUtils.getPageChunkId(cp), @@ -274,7 +285,7 @@ public static void dump(String fileName, Writer writer, boolean details) { for (int i = 0; i <= entries; i++) { long cp = children[i]; pw.printf(" %d children @ chunk %x +%0" + - len + "x%n", + len + "x%n", counts[i], DataUtils.getPageChunkId(cp), DataUtils.getPageOffset(cp)); @@ -313,15 +324,8 @@ public static void dump(String fileName, Writer writer, boolean details) { } catch (IOException e) { pw.println("ERROR: " + e); e.printStackTrace(pw); - } finally { - if (file != null) { - try { - file.close(); - } catch (IOException e) { - // ignore - } - } } + // ignore pw.flush(); } @@ -344,11 +348,10 @@ public static String info(String fileName, Writer writer) { return "File not found: " + fileName; } long fileLength = FileUtils.size(fileName); - MVStore store = new MVStore.Builder(). - fileName(fileName). - readOnly().open(); - try { - MVMap meta = store.getMetaMap(); + try (MVStore store = new MVStore.Builder(). + fileName(fileName).recoveryMode(). + readOnly().open()) { + MVMap layout = store.getLayoutMap(); Map header = store.getStoreHeader(); long fileCreated = DataUtils.readHexLong(header, "created", 0L); TreeMap chunks = new TreeMap<>(); @@ -356,9 +359,9 @@ public static String info(String fileName, Writer writer) { long maxLength = 0; long maxLengthLive = 0; long maxLengthNotEmpty = 0; - for (Entry e : meta.entrySet()) { + for (Entry e : layout.entrySet()) { String k = e.getKey(); - if (k.startsWith("chunk.")) { + if (k.startsWith(DataUtils.META_CHUNK)) { Chunk c = Chunk.fromString(e.getValue()); chunks.put(c.id, c); chunkLength += c.len * MVStore.BLOCK_SIZE; @@ -401,8 +404,6 @@ c.id, formatTimestamp(created, fileCreated), pw.println("ERROR: " + e); e.printStackTrace(pw); return e.getMessage(); - } finally { - store.close(); } pw.flush(); return null; @@ -508,36 +509,51 @@ public static void compact(String sourceFileName, String targetFileName, boolean */ public static void compact(MVStore source, MVStore target) { int autoCommitDelay = target.getAutoCommitDelay(); - int retentionTime = target.getRetentionTime(); - target.setAutoCommitDelay(0); - target.setRetentionTime(Integer.MAX_VALUE); // disable unused chunks collection - MVMap sourceMeta = source.getMetaMap(); - MVMap targetMeta = target.getMetaMap(); - for (Entry m : sourceMeta.entrySet()) { - String key = m.getKey(); - if (key.startsWith("chunk.")) { - // ignore - } else if (key.startsWith("map.")) { - // ignore - } else if (key.startsWith("name.")) { - // ignore - } else if (key.startsWith("root.")) { - // ignore - } else { - targetMeta.put(key, m.getValue()); + boolean reuseSpace = target.getReuseSpace(); + try { + target.setReuseSpace(false); // disable unused chunks collection + target.setAutoCommitDelay(0); // disable autocommit + MVMap sourceMeta = source.getMetaMap(); + MVMap targetMeta = target.getMetaMap(); + for (Entry m : sourceMeta.entrySet()) { + String key = m.getKey(); + if (key.startsWith(DataUtils.META_MAP)) { + // ignore + } else if (key.startsWith(DataUtils.META_NAME)) { + // ignore + } else { + targetMeta.put(key, m.getValue()); + } } + // We are going to cheat a little bit in the copyFrom() by employing "incomplete" pages, + // which would be spared of saving, but save completed pages underneath, + // and those may appear as dead (non-reachable). + // That's why it is important to preserve all chunks + // created in the process, especially if retention time + // is set to a lower value, or even 0. + for (String mapName : source.getMapNames()) { + MVMap.Builder mp = getGenericMapBuilder(); + // This is a hack to preserve chunks occupancy rate accounting. + // It exposes design deficiency flaw in MVStore related to lack of + // map's type metadata. + // TODO: Introduce type metadata which will allow to open any store + // TODO: without prior knowledge of keys / values types and map implementation + // TODO: (MVMap vs MVRTreeMap, regular vs. singleWriter etc.) + if (mapName.startsWith(TransactionStore.UNDO_LOG_NAME_PREFIX)) { + mp.singleWriter(); + } + MVMap sourceMap = source.openMap(mapName, mp); + MVMap targetMap = target.openMap(mapName, mp); + targetMap.copyFrom(sourceMap); + targetMeta.put(MVMap.getMapKey(targetMap.getId()), sourceMeta.get(MVMap.getMapKey(sourceMap.getId()))); + } + // this will end hacky mode of operation with incomplete pages + // end ensure that all pages are saved + target.commit(); + } finally { + target.setAutoCommitDelay(autoCommitDelay); + target.setReuseSpace(reuseSpace); } - for (String mapName : source.getMapNames()) { - MVMap.Builder mp = - new MVMap.Builder<>(). - keyType(new GenericDataType()). - valueType(new GenericDataType()); - MVMap sourceMap = source.openMap(mapName, mp); - MVMap targetMap = target.openMap(mapName, mp); - targetMap.copyFrom(sourceMap); - } - target.setRetentionTime(retentionTime); - target.setAutoCommitDelay(autoCommitDelay); } /** @@ -550,7 +566,7 @@ public static void repair(String fileName) { long version = Long.MAX_VALUE; OutputStream ignore = new OutputStream() { @Override - public void write(int b) throws IOException { + public void write(int b) { // ignore } }; @@ -618,10 +634,10 @@ public static long rollback(String fileName, long targetVersion, Writer writer) pos += blockSize; continue; } - Chunk c = null; + Chunk c; try { c = Chunk.readChunkHeader(block, pos); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { pos += blockSize; continue; } @@ -674,38 +690,46 @@ public static long rollback(String fileName, long targetVersion, Writer writer) return newestVersion; } + @SuppressWarnings({"rawtypes","unchecked"}) + static MVMap.Builder getGenericMapBuilder() { + return (MVMap.Builder)new MVMap.Builder(). + keyType(GenericDataType.INSTANCE). + valueType(GenericDataType.INSTANCE); + } + /** * A data type that can read any data that is persisted, and converts it to * a byte array. */ - static class GenericDataType implements DataType { + private static class GenericDataType extends BasicDataType { + static GenericDataType INSTANCE = new GenericDataType(); + + private GenericDataType() {} @Override - public int compare(Object a, Object b) { - throw DataUtils.newUnsupportedOperationException("Can not compare"); + public boolean isMemoryEstimationAllowed() { + return false; } @Override - public int getMemory(Object obj) { - return obj == null ? 0 : ((byte[]) obj).length * 8; + public int getMemory(byte[] obj) { + return obj == null ? 0 : obj.length * 8; } @Override - public void write(WriteBuffer buff, Object obj) { - if (obj != null) { - buff.put((byte[]) obj); - } + public byte[][] createStorage(int size) { + return new byte[size][]; } @Override - public void write(WriteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - write(buff, obj[i]); + public void write(WriteBuffer buff, byte[] obj) { + if (obj != null) { + buff.put(obj); } } @Override - public Object read(ByteBuffer buff) { + public byte[] read(ByteBuffer buff) { int len = buff.remaining(); if (len == 0) { return null; @@ -714,12 +738,5 @@ public Object read(ByteBuffer buff) { buff.get(data); return data; } - - @Override - public void read(ByteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - obj[i] = read(buff); - } - } } } diff --git a/h2/src/main/org/h2/mvstore/OffHeapStore.java b/h2/src/main/org/h2/mvstore/OffHeapStore.java index 2b38ccdadc..6dc9d8764c 100644 --- a/h2/src/main/org/h2/mvstore/OffHeapStore.java +++ b/h2/src/main/org/h2/mvstore/OffHeapStore.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; @@ -33,7 +33,7 @@ public String toString() { public ByteBuffer readFully(long pos, int len) { Entry memEntry = memory.floorEntry(pos); if (memEntry == null) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_READING_FAILED, "Could not read from position {0}", pos); } @@ -54,7 +54,7 @@ public void free(long pos, int length) { if (buff == null) { // nothing was written (just allocated) } else if (buff.remaining() != length) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_READING_FAILED, "Partial remove is not supported at position {0}", pos); } @@ -75,7 +75,7 @@ public void writeFully(long pos, ByteBuffer src) { int length = src.remaining(); if (prevPos == pos) { if (prevLength != length) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_READING_FAILED, "Could not write to position {0}; " + "partial overwrite is not supported", pos); @@ -87,7 +87,7 @@ public void writeFully(long pos, ByteBuffer src) { return; } if (prevPos + prevLength > pos) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_READING_FAILED, "Could not write to position {0}; " + "partial overwrite is not supported", pos); @@ -121,7 +121,7 @@ public void truncate(long size) { } ByteBuffer buff = memory.get(pos); if (buff.capacity() > size) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_READING_FAILED, "Could not truncate to {0}; " + "partial truncate is not supported", pos); diff --git a/h2/src/main/org/h2/mvstore/Page.java b/h2/src/main/org/h2/mvstore/Page.java index e93c70d8c3..5ff8b3477b 100644 --- a/h2/src/main/org/h2/mvstore/Page.java +++ b/h2/src/main/org/h2/mvstore/Page.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; @@ -10,16 +10,10 @@ import static org.h2.engine.Constants.MEMORY_POINTER; import static org.h2.mvstore.DataUtils.PAGE_TYPE_LEAF; import java.nio.ByteBuffer; -import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLongFieldUpdater; import org.h2.compress.Compressor; -import org.h2.message.DbException; -import org.h2.mvstore.type.DataType; import org.h2.util.Utils; /** @@ -28,28 +22,45 @@ * For b-tree nodes, the key at a given index is larger than the largest key of * the child at the same index. *

          - * File format: - * page length (including length): int + * Serialized format: + * length of a serialized page in bytes (including this field): int * check value: short + * page number (0-based sequential number within a chunk): varInt * map id: varInt * number of keys: varInt * type: byte (0: leaf, 1: node; +2: compressed) + * children of the non-leaf node (1 more than keys) * compressed: bytes saved (varInt) * keys - * leaf: values (one for each key) - * node: children (1 more than keys) + * values of the leaf node (one for each key) */ -public abstract class Page implements Cloneable -{ +public abstract class Page implements Cloneable { + /** * Map this page belongs to */ - public final MVMap map; + public final MVMap map; /** - * Position of this page's saved image within a Chunk or 0 if this page has not been saved yet. + * Position of this page's saved image within a Chunk + * or 0 if this page has not been saved yet + * or 1 if this page has not been saved yet, but already removed + * This "removed" flag is to keep track of pages that concurrently + * changed while they are being stored, in which case the live bookkeeping + * needs to be aware of such cases. + * Field need to be volatile to avoid races between saving thread setting it + * and other thread reading it to access the page. + * On top of this update atomicity is required so removal mark and saved position + * can be set concurrently. + * + * @see DataUtils#getPagePos(int, int, int, int) for field format details */ - private long pos; + private volatile long pos; + + /** + * Sequential 0-based number of the page within containing chunk. + */ + public int pageNo = -1; /** * The last result of a find operation is cached. @@ -69,16 +80,15 @@ public abstract class Page implements Cloneable /** * The keys. */ - private Object[] keys; + private K[] keys; /** - * Whether the page is an in-memory (not stored, or not yet stored) page, - * and it is removed. This is to keep track of pages that concurrently - * changed while they are being stored, in which case the live bookkeeping - * needs to be aware of such cases. + * Updater for pos field, which can be updated when page is saved, + * but can be concurrently marked as removed */ - private volatile boolean removedInMemory; - + @SuppressWarnings("rawtypes") + private static final AtomicLongFieldUpdater posUpdater = + AtomicLongFieldUpdater.newUpdater(Page.class, "pos"); /** * The estimated number of bytes used per child entry. */ @@ -109,29 +119,25 @@ public abstract class Page implements Cloneable MEMORY_POINTER + // values MEMORY_ARRAY; // Object[] values - /** - * An empty object array. - */ - private static final Object[] EMPTY_OBJECT_ARRAY = new Object[0]; - /** * Marker value for memory field, meaning that memory accounting is replaced by key count. */ private static final int IN_MEMORY = Integer.MIN_VALUE; + @SuppressWarnings("rawtypes") private static final PageReference[] SINGLE_EMPTY = { PageReference.EMPTY }; - Page(MVMap map) { + Page(MVMap map) { this.map = map; } - Page(MVMap map, Page source) { + Page(MVMap map, Page source) { this(map, source.keys); memory = source.memory; } - Page(MVMap map, Object[] keys) { + Page(MVMap map, K[] keys) { this.map = map; this.keys = keys; } @@ -139,27 +145,37 @@ public abstract class Page implements Cloneable /** * Create a new, empty leaf page. * + * @param key type + * @param value type + * * @param map the map * @return the new page */ - static Page createEmptyLeaf(MVMap map) { - return createLeaf(map, EMPTY_OBJECT_ARRAY, EMPTY_OBJECT_ARRAY, PAGE_LEAF_MEMORY); + static Page createEmptyLeaf(MVMap map) { + return createLeaf(map, map.getKeyType().createStorage(0), + map.getValueType().createStorage(0), PAGE_LEAF_MEMORY); } /** * Create a new, empty internal node page. * + * @param key type + * @param value type + * * @param map the map * @return the new page */ - static Page createEmptyNode(MVMap map) { - return createNode(map, EMPTY_OBJECT_ARRAY, SINGLE_EMPTY, 0, + @SuppressWarnings("unchecked") + static Page createEmptyNode(MVMap map) { + return createNode(map, map.getKeyType().createStorage(0), SINGLE_EMPTY, 0, PAGE_NODE_MEMORY + MEMORY_POINTER + PAGE_MEMORY_CHILD); // there is always one child } /** * Create a new non-leaf page. The arrays are not cloned. * + * @param the key class + * @param the value class * @param map the map * @param keys the keys * @param children the child page positions @@ -167,10 +183,10 @@ static Page createEmptyNode(MVMap map) { * @param memory the memory used in bytes * @return the page */ - public static Page createNode(MVMap map, Object[] keys, PageReference[] children, + public static Page createNode(MVMap map, K[] keys, PageReference[] children, long totalCount, int memory) { assert keys != null; - Page page = new NonLeaf(map, keys, children, totalCount); + Page page = new NonLeaf<>(map, keys, children, totalCount); page.initMemoryAccount(memory); return page; } @@ -178,21 +194,24 @@ public static Page createNode(MVMap map, Object[] keys, PageReference[] ch /** * Create a new leaf page. The arrays are not cloned. * + * @param key type + * @param value type + * * @param map the map * @param keys the keys * @param values the values * @param memory the memory used in bytes * @return the page */ - public static Page createLeaf(MVMap map, Object[] keys, Object[] values, int memory) { + static Page createLeaf(MVMap map, K[] keys, V[] values, int memory) { assert keys != null; - Page page = new Leaf(map, keys, values); + Page page = new Leaf<>(map, keys, values); page.initMemoryAccount(memory); return page; } private void initMemoryAccount(int memoryCount) { - if(map.store.getFileStore() == null) { + if(!map.isPersistent()) { memory = IN_MEMORY; } else if (memoryCount == 0) { recalculateMemory(); @@ -206,11 +225,14 @@ private void initMemoryAccount(int memoryCount) { * Get the value for the given key, or null if not found. * Search is done in the tree rooted at given page. * + * @param key type + * @param value type + * * @param key the key * @param p the root page * @return the value, or null if not found */ - static Object get(Page p, Object key) { + static V get(Page p, K key) { while (true) { int index = p.binarySearch(key); if (p.isLeaf()) { @@ -225,83 +247,22 @@ static Object get(Page p, Object key) { /** * Read a page. * + * @param key type + * @param value type + * * @param buff ByteBuffer containing serialized page info * @param pos the position * @param map the map * @return the page */ - static Page read(ByteBuffer buff, long pos, MVMap map) { + static Page read(ByteBuffer buff, long pos, MVMap map) { boolean leaf = (DataUtils.getPageType(pos) & 1) == PAGE_TYPE_LEAF; - Page p = leaf ? new Leaf(map) : new NonLeaf(map); + Page p = leaf ? new Leaf<>(map) : new NonLeaf<>(map); p.pos = pos; - int chunkId = DataUtils.getPageChunkId(pos); - p.read(buff, chunkId); + p.read(buff); return p; } - /** - * Read an inner node page from the buffer, but ignore the keys and - * values. - * - * @param buff ByteBuffer containing serialized page info - * @param pos the position - * @param collector to report child pages positions to - * @param executorService to use far parallel processing - * @param executingThreadCounter for parallel processing - */ - static void readChildrenPositions(ByteBuffer buff, long pos, - final MVStore.ChunkIdsCollector collector, - final ThreadPoolExecutor executorService, - final AtomicInteger executingThreadCounter) { - int len = DataUtils.readVarInt(buff); - int type = buff.get(); - if ((type & 1) != DataUtils.PAGE_TYPE_NODE) { - throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT, - "Position {0} expected to be a non-leaf", pos); - } - /* - * The logic here is a little awkward. We want to (a) execute reads in parallel, but (b) - * limit the number of threads we create. This is complicated by (a) the algorithm is - * recursive and needs to wait for children before returning up the call-stack, (b) checking - * the size of the thread-pool is not reliable. - */ - final List> futures = new ArrayList<>(len + 1); - for (int i = 0; i <= len; i++) { - final long childPagePos = buff.getLong(); - for (;;) { - int counter = executingThreadCounter.get(); - if (counter >= executorService.getMaximumPoolSize()) { - collector.visit(childPagePos, executorService, executingThreadCounter); - break; - } else { - if (executingThreadCounter.compareAndSet(counter, counter + 1)) { - Future f = executorService.submit(new Runnable() { - @Override - public void run() { - try { - collector.visit(childPagePos, executorService, executingThreadCounter); - } finally { - executingThreadCounter.decrementAndGet(); - } - } - }); - futures.add(f); - break; - } - } - } - } - for (Future f : futures) { - try { - f.get(); - } catch (InterruptedException ex) { - throw new RuntimeException(ex); - } catch (ExecutionException ex) { - throw DbException.convert(ex); - } - } - } - /** * Get the id of the page's owner map * @return id @@ -318,9 +279,10 @@ public final int getMapId() { * mid-process without tree integrity violation * * @param map new map to own resulting page + * @param eraseChildrenRefs whether cloned Page should have no child references or keep originals * @return the page */ - abstract Page copy(MVMap map); + abstract Page copy(MVMap map, boolean eraseChildrenRefs); /** * Get the key at the given index. @@ -328,7 +290,7 @@ public final int getMapId() { * @param index the index * @return the key */ - public Object getKey(int index) { + public K getKey(int index) { return keys[index]; } @@ -338,17 +300,7 @@ public Object getKey(int index) { * @param index the index * @return the child page */ - public abstract Page getChildPage(int index); - - /** - * Get the child page at the given index only if is - * already loaded. Does not make any attempt to load - * the page or retrieve it from the cache. - * - * @param index the index - * @return the child page, null if it is not loaded - */ - public abstract Page getChildPageIfLoaded(int index); + public abstract Page getChildPage(int index); /** * Get the position of the child. @@ -364,7 +316,7 @@ public Object getKey(int index) { * @param index the index * @return the value */ - public abstract Object getValue(int index); + public abstract V getValue(int index); /** * Get the number of keys in this page. @@ -421,35 +373,18 @@ protected void dump(StringBuilder buff) { * * @return a mutable copy of this page */ - public final Page copy() { - return copy(false); - } - - /** - * Create a copy of this page. - * - * @param countRemoval When {@code true} the current page is removed, - * when {@code false} just copy the page. - * @return a mutable copy of this page - */ - public final Page copy(boolean countRemoval) { - Page newPage = clone(); + public final Page copy() { + Page newPage = clone(); newPage.pos = 0; - // mark the old as deleted - if(countRemoval) { - removePage(); - if(isPersistent()) { - map.store.registerUnsavedPage(newPage.getMemory()); - } - } return newPage; } + @SuppressWarnings("unchecked") @Override - protected final Page clone() { - Page clone; + protected final Page clone() { + Page clone; try { - clone = (Page) super.clone(); + clone = (Page) super.clone(); } catch (CloneNotSupportedException impossible) { throw new RuntimeException(impossible); } @@ -467,30 +402,10 @@ protected final Page clone() { * @param key the key * @return the value or null */ - int binarySearch(Object key) { - int low = 0, high = getKeyCount() - 1; - // the cached index minus one, so that - // for the first time (when cachedCompare is 0), - // the default value is used - int x = cachedCompare - 1; - if (x < 0 || x > high) { - x = high >>> 1; - } - Object[] k = keys; - while (low <= high) { - int compare = map.compare(key, k[x]); - if (compare > 0) { - low = x + 1; - } else if (compare < 0) { - high = x - 1; - } else { - cachedCompare = x + 1; - return x; - } - x = (low + high) >>> 1; - } - cachedCompare = low; - return -(low + 1); + int binarySearch(K key) { + int res = map.getKeyType().binarySearch(key, keys, getKeyCount(), cachedCompare); + cachedCompare = res < 0 ? ~res : res + 1; + return res; } /** @@ -499,7 +414,7 @@ int binarySearch(Object key) { * @param at the split index * @return the page with the entries after the split index */ - abstract Page split(int at); + abstract Page split(int at); /** * Split the current keys array into two arrays. @@ -508,10 +423,10 @@ int binarySearch(Object key) { * @param bCount size of the second array/ * @return the second array. */ - final Object[] splitKeys(int aCount, int bCount) { + final K[] splitKeys(int aCount, int bCount) { assert aCount + bCount <= getKeyCount(); - Object[] aKeys = createKeyStorage(aCount); - Object[] bKeys = createKeyStorage(bCount); + K[] aKeys = createKeyStorage(aCount); + K[] bKeys = createKeyStorage(bCount); System.arraycopy(keys, 0, aKeys, 0, aCount); System.arraycopy(keys, getKeyCount() - bCount, bKeys, 0, bCount); keys = aKeys; @@ -526,7 +441,7 @@ final Object[] splitKeys(int aCount, int bCount) { * @param extraKeys to be added * @param extraValues to be added */ - abstract void expand(int extraKeyCount, Object[] extraKeys, Object[] extraValues); + abstract void expand(int extraKeyCount, K[] extraKeys, V[] extraValues); /** * Expand the keys array. @@ -534,9 +449,9 @@ final Object[] splitKeys(int aCount, int bCount) { * @param extraKeyCount number of extra key entries to create * @param extraKeys extra key values */ - final void expandKeys(int extraKeyCount, Object[] extraKeys) { + final void expandKeys(int extraKeyCount, K[] extraKeys) { int keyCount = getKeyCount(); - Object[] newKeys = createKeyStorage(keyCount + extraKeyCount); + K[] newKeys = createKeyStorage(keyCount + extraKeyCount); System.arraycopy(keys, 0, newKeys, 0, keyCount); System.arraycopy(extraKeys, 0, newKeys, keyCount, extraKeyCount); keys = newKeys; @@ -563,7 +478,7 @@ final void expandKeys(int extraKeyCount, Object[] extraKeys) { * @param index the index * @param c the new child page */ - public abstract void setChild(int index, Page c); + public abstract void setChild(int index, Page c); /** * Replace the key at an index in this page. @@ -571,16 +486,17 @@ final void expandKeys(int extraKeyCount, Object[] extraKeys) { * @param index the index * @param key the new key */ - public final void setKey(int index, Object key) { + public final void setKey(int index, K key) { keys = keys.clone(); if(isPersistent()) { - Object old = keys[index]; - DataType keyType = map.getKeyType(); - int mem = keyType.getMemory(key); - if (old != null) { - mem -= keyType.getMemory(old); + K old = keys[index]; + if (!map.isMemoryEstimationAllowed() || old == null) { + int mem = map.evaluateMemoryForKey(key); + if (old != null) { + mem -= map.evaluateMemoryForKey(old); + } + addMemory(mem); } - addMemory(mem); } keys[index] = key; } @@ -592,7 +508,7 @@ public final void setKey(int index, Object key) { * @param value the new value * @return the old value */ - public abstract Object setValue(int index, Object value); + public abstract V setValue(int index, V value); /** * Insert a key-value pair into this leaf. @@ -601,7 +517,7 @@ public final void setKey(int index, Object key) { * @param key the key * @param value the value */ - public abstract void insertLeaf(int index, Object key, Object value); + public abstract void insertLeaf(int index, K key, V value); /** * Insert a child page into this node. @@ -610,7 +526,7 @@ public final void setKey(int index, Object key) { * @param key the key * @param childPage the child page */ - public abstract void insertNode(int index, Object key, Page childPage); + public abstract void insertNode(int index, K key, Page childPage); /** * Insert a key into the key array @@ -618,17 +534,17 @@ public final void setKey(int index, Object key) { * @param index index to insert at * @param key the key value */ - final void insertKey(int index, Object key) { + final void insertKey(int index, K key) { int keyCount = getKeyCount(); assert index <= keyCount : index + " > " + keyCount; - Object[] newKeys = createKeyStorage(keyCount + 1); + K[] newKeys = createKeyStorage(keyCount + 1); DataUtils.copyWithGap(keys, newKeys, keyCount, index); keys = newKeys; keys[index] = key; if (isPersistent()) { - addMemory(MEMORY_POINTER + map.getKeyType().getMemory(key)); + addMemory(MEMORY_POINTER + map.evaluateMemoryForKey(key)); } } @@ -639,15 +555,16 @@ final void insertKey(int index, Object key) { */ public void remove(int index) { int keyCount = getKeyCount(); - DataType keyType = map.getKeyType(); if (index == keyCount) { --index; } if(isPersistent()) { - Object old = getKey(index); - addMemory(-MEMORY_POINTER - keyType.getMemory(old)); + if (!map.isMemoryEstimationAllowed()) { + K old = getKey(index); + addMemory(-MEMORY_POINTER - map.evaluateMemoryForKey(old)); + } } - Object[] newKeys = createKeyStorage(keyCount - 1); + K[] newKeys = createKeyStorage(keyCount - 1); DataUtils.copyExcept(keys, newKeys, keyCount, index); keys = newKeys; } @@ -655,20 +572,55 @@ public void remove(int index) { /** * Read the page from the buffer. * - * @param buff the buffer - * @param chunkId the chunk id + * @param buff the buffer to read from */ - private void read(ByteBuffer buff, int chunkId) { - int pageLength = buff.remaining() + 4; // size of int, since we've read page length already - int len = DataUtils.readVarInt(buff); - keys = createKeyStorage(len); + private void read(ByteBuffer buff) { + int chunkId = DataUtils.getPageChunkId(pos); + int offset = DataUtils.getPageOffset(pos); + + int start = buff.position(); + int pageLength = buff.getInt(); // does not include optional part (pageNo) + int remaining = buff.remaining() + 4; + if (pageLength > remaining || pageLength < 4) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_FILE_CORRUPT, + "File corrupted in chunk {0}, expected page length 4..{1}, got {2}", chunkId, remaining, + pageLength); + } + + short check = buff.getShort(); + int checkTest = DataUtils.getCheckValue(chunkId) + ^ DataUtils.getCheckValue(offset) + ^ DataUtils.getCheckValue(pageLength); + if (check != (short) checkTest) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_FILE_CORRUPT, + "File corrupted in chunk {0}, expected check value {1}, got {2}", chunkId, checkTest, check); + } + + pageNo = DataUtils.readVarInt(buff); + if (pageNo < 0) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_FILE_CORRUPT, + "File corrupted in chunk {0}, got negative page No {1}", chunkId, pageNo); + } + + int mapId = DataUtils.readVarInt(buff); + if (mapId != map.getId()) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_FILE_CORRUPT, + "File corrupted in chunk {0}, expected map id {1}, got {2}", chunkId, map.getId(), mapId); + } + + int keyCount = DataUtils.readVarInt(buff); + keys = createKeyStorage(keyCount); int type = buff.get(); if(isLeaf() != ((type & 1) == PAGE_TYPE_LEAF)) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_FILE_CORRUPT, "File corrupted in chunk {0}, expected node type {1}, got {2}", chunkId, isLeaf() ? "0" : "1" , type); } + + // to restrain hacky GenericDataType, which grabs the whole remainder of the buffer + buff.limit(start + pageLength); + if (!isLeaf()) { readPayLoad(buff); } @@ -683,14 +635,21 @@ private void read(ByteBuffer buff, int chunkId) { } int lenAdd = DataUtils.readVarInt(buff); int compLen = buff.remaining(); - byte[] comp = Utils.newBytes(compLen); - buff.get(comp); + byte[] comp; + int pos = 0; + if (buff.hasArray()) { + comp = buff.array(); + pos = buff.arrayOffset() + buff.position(); + } else { + comp = Utils.newBytes(compLen); + buff.get(comp); + } int l = compLen + lenAdd; buff = ByteBuffer.allocate(l); - compressor.expand(comp, 0, compLen, buff.array(), + compressor.expand(comp, pos, compLen, buff.array(), buff.arrayOffset(), l); } - map.getKeyType().read(buff, keys, len, true); + map.getKeyType().read(buff, keys, keyCount); if (isLeaf()) { readPayLoad(buff); } @@ -709,26 +668,56 @@ public final boolean isSaved() { return DataUtils.isPageSaved(pos); } + public final boolean isRemoved() { + return DataUtils.isPageRemoved(pos); + } + + /** + * Mark this page as removed "in memory". That means that only adjustment of + * "unsaved memory" amount is required. On the other hand, if page was + * persisted, it's removal should be reflected in occupancy of the + * containing chunk. + * + * @return true if it was marked by this call or has been marked already, + * false if page has been saved already. + */ + private boolean markAsRemoved() { + assert getTotalCount() > 0 : this; + long pagePos; + do { + pagePos = pos; + if (DataUtils.isPageSaved(pagePos)) { + return false; + } + assert !DataUtils.isPageRemoved(pagePos); + } while (!posUpdater.compareAndSet(this, 0L, 1L)); + return true; + } + /** * Store the page and update the position. * * @param chunk the chunk * @param buff the target buffer + * @param toc prospective table of content * @return the position of the buffer just after the type */ - protected final int write(Chunk chunk, WriteBuffer buff) { + protected final int write(Chunk chunk, WriteBuffer buff, List toc) { + pageNo = toc.size(); + int keyCount = getKeyCount(); int start = buff.position(); - int len = getKeyCount(); - int type = isLeaf() ? PAGE_TYPE_LEAF : DataUtils.PAGE_TYPE_NODE; - buff.putInt(0). - putShort((byte) 0). - putVarInt(map.getId()). - putVarInt(len); + buff.putInt(0) // placeholder for pageLength + .putShort((byte)0) // placeholder for check + .putVarInt(pageNo) + .putVarInt(map.getId()) + .putVarInt(keyCount); int typePos = buff.position(); - buff.put((byte) type); + int type = isLeaf() ? PAGE_TYPE_LEAF : DataUtils.PAGE_TYPE_NODE; + buff.put((byte)type); + int childrenPos = buff.position(); writeChildren(buff, true); int compressStart = buff.position(); - map.getKeyType().write(buff, keys, len, true); + map.getKeyType().write(buff, keys, keyCount); writeValues(buff); MVStore store = map.getStore(); int expLen = buff.position() - compressStart; @@ -738,27 +727,37 @@ protected final int write(Chunk chunk, WriteBuffer buff) { Compressor compressor; int compressType; if (compressionLevel == 1) { - compressor = map.getStore().getCompressorFast(); + compressor = store.getCompressorFast(); compressType = DataUtils.PAGE_COMPRESSED; } else { - compressor = map.getStore().getCompressorHigh(); + compressor = store.getCompressorHigh(); compressType = DataUtils.PAGE_COMPRESSED_HIGH; } - byte[] exp = new byte[expLen]; - buff.position(compressStart).get(exp); byte[] comp = new byte[expLen * 2]; - int compLen = compressor.compress(exp, expLen, comp, 0); - int plus = DataUtils.getVarIntLen(compLen - expLen); + ByteBuffer byteBuffer = buff.getBuffer(); + int pos = 0; + byte[] exp; + if (byteBuffer.hasArray()) { + exp = byteBuffer.array(); + pos = byteBuffer.arrayOffset() + compressStart; + } else { + exp = Utils.newBytes(expLen); + buff.position(compressStart).get(exp); + } + int compLen = compressor.compress(exp, pos, expLen, comp, 0); + int plus = DataUtils.getVarIntLen(expLen - compLen); if (compLen + plus < expLen) { - buff.position(typePos). - put((byte) (type + compressType)); - buff.position(compressStart). - putVarInt(expLen - compLen). - put(comp, 0, compLen); + buff.position(typePos) + .put((byte) (type | compressType)); + buff.position(compressStart) + .putVarInt(expLen - compLen) + .put(comp, 0, compLen); } } } int pageLength = buff.position() - start; + long tocElement = DataUtils.getTocElement(getMapId(), start, buff.position() - start, type); + toc.add(tocElement); int chunkId = chunk.id; int check = DataUtils.getCheckValue(chunkId) ^ DataUtils.getCheckValue(start) @@ -766,29 +765,28 @@ protected final int write(Chunk chunk, WriteBuffer buff) { buff.putInt(start, pageLength). putShort(start + 4, (short) check); if (isSaved()) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Page already stored"); } - pos = DataUtils.getPagePos(chunkId, start, pageLength, type); + long pagePos = DataUtils.getPagePos(chunkId, tocElement); + boolean isDeleted = isRemoved(); + while (!posUpdater.compareAndSet(this, isDeleted ? 1L : 0L, pagePos)) { + isDeleted = isRemoved(); + } store.cachePage(this); if (type == DataUtils.PAGE_TYPE_NODE) { // cache again - this will make sure nodes stays in the cache // for a longer time store.cachePage(this); } - int max = DataUtils.getPageMaxLength(pos); - chunk.maxLen += max; - chunk.maxLenLive += max; - chunk.pageCount++; - chunk.pageCountLive++; - if (removedInMemory) { - // if the page was removed _before_ the position was assigned, we - // need to mark it removed here, so the fields are updated - // when the next chunk is stored - map.removePage(pos, memory); - } - diskSpaceUsed = max != DataUtils.PAGE_LARGE ? max : pageLength; - return typePos + 1; + int pageLengthEncoded = DataUtils.getPageMaxLength(pos); + boolean singleWriter = map.isSingleWriter(); + chunk.accountForWrittenPage(pageLengthEncoded, singleWriter); + if (isDeleted) { + store.accountForRemovedPage(pagePos, chunk.version + 1, singleWriter, pageNo); + } + diskSpaceUsed = pageLengthEncoded != DataUtils.PAGE_LARGE ? pageLengthEncoded : pageLength; + return childrenPos; } /** @@ -809,29 +807,19 @@ protected final int write(Chunk chunk, WriteBuffer buff) { /** * Store this page and all children that are changed, in reverse order, and * update the position and the children. - * * @param chunk the chunk * @param buff the target buffer + * @param toc prospective table of content */ - abstract void writeUnsavedRecursive(Chunk chunk, WriteBuffer buff); + abstract void writeUnsavedRecursive(Chunk chunk, WriteBuffer buff, List toc); /** * Unlink the children recursively after all data is written. */ - abstract void writeEnd(); + abstract void releaseSavedPages(); public abstract int getRawChildPageCount(); - @Override - public final boolean equals(Object other) { - return other == this || other instanceof Page && isSaved() && ((Page) other).pos == pos; - } - - @Override - public final int hashCode() { - return isSaved() ? (int) (pos | (pos >>> 32)) : super.hashCode(); - } - protected final boolean isPersistent() { return memory != IN_MEMORY; } @@ -854,12 +842,12 @@ public long getDiskSpaceUsed() { long r = 0; if (isPersistent()) { r += diskSpaceUsed; - } - if (!isLeaf()) { - for (int i = 0; i < getRawChildPageCount(); i++) { - long pos = getChildPagePos(i); - if (pos != 0) { - r += getChildPage(i).getDiskSpaceUsed(); + if (!isLeaf()) { + for (int i = 0; i < getRawChildPageCount(); i++) { + long pos = getChildPagePos(i); + if (pos != 0) { + r += getChildPage(i).getDiskSpaceUsed(); + } } } } @@ -873,6 +861,7 @@ public long getDiskSpaceUsed() { */ final void addMemory(int mem) { memory += mem; + assert memory >= 0; } /** @@ -889,13 +878,17 @@ final void recalculateMemory() { * @return memory in bytes */ protected int calculateMemory() { +//* + return map.evaluateMemoryForKeys(keys, getKeyCount()); +/*/ int keyCount = getKeyCount(); int mem = keyCount * MEMORY_POINTER; - DataType keyType = map.getKeyType(); + DataType keyType = map.getKeyType(); for (int i = 0; i < keyCount; i++) { - mem += keyType.getMemory(keys[i]); + mem += getMemory(keyType, keys[i]); } return mem; +//*/ } public boolean isComplete() { @@ -908,30 +901,49 @@ public boolean isComplete() { public void setComplete() {} /** - * Remove the page. - */ - public final void removePage() { - if(isPersistent()) { - long p = pos; - if (p == 0) { - removedInMemory = true; + * Make accounting changes (chunk occupancy or "unsaved" RAM), related to + * this page removal. + * + * @param version at which page was removed + * @return amount (negative), by which "unsaved memory" should be adjusted, + * if page is unsaved one, and 0 for page that was already saved, or + * in case of non-persistent map + */ + public final int removePage(long version) { + if(isPersistent() && getTotalCount() > 0) { + MVStore store = map.store; + if (!markAsRemoved()) { // only if it has been saved already + long pagePos = pos; + store.accountForRemovedPage(pagePos, version, map.isSingleWriter(), pageNo); + } else { + return -memory; } - map.removePage(p, memory); } + return 0; } /** - * Update given CursorPos chain to correspond to "append point" in a B-tree rooted at this Page. + * Extend path from a given CursorPos chain to "prepend point" in a B-tree, rooted at this Page. + * + * @param cursorPos presumably pointing to this Page (null if real root), to build upon + * @return new head of the CursorPos chain + */ + public abstract CursorPos getPrependCursorPos(CursorPos cursorPos); + + /** + * Extend path from a given CursorPos chain to "append point" in a B-tree, rooted at this Page. * - * @param cursorPos to update, presumably pointing to this Page + * @param cursorPos presumably pointing to this Page (null if real root), to build upon * @return new head of the CursorPos chain */ - public abstract CursorPos getAppendCursorPos(CursorPos cursorPos); + public abstract CursorPos getAppendCursorPos(CursorPos cursorPos); /** * Remove all page data recursively. + * @param version at which page got removed + * @return adjustment for "unsaved memory" amount */ - public abstract void removeAllRecursive(); + public abstract int removeAllRecursive(long version); /** * Create array for keys storage. @@ -939,9 +951,8 @@ public final void removePage() { * @param size number of entries * @return values array */ - private Object[] createKeyStorage(int size) - { - return new Object[size]; + public final K[] createKeyStorage(int size) { + return map.getKeyType().createStorage(size); } /** @@ -950,20 +961,33 @@ private Object[] createKeyStorage(int size) * @param size number of entries * @return values array */ - final Object[] createValueStorage(int size) - { - return new Object[size]; + final V[] createValueStorage(int size) { + return map.getValueType().createStorage(size); + } + + /** + * Create an array of page references. + * + * @param the key class + * @param the value class + * @param size the number of entries + * @return the array + */ + @SuppressWarnings("unchecked") + public static PageReference[] createRefStorage(int size) { + return new PageReference[size]; } /** * A pointer to a page, either in-memory or using a page position. */ - public static final class PageReference { + public static final class PageReference { /** * Singleton object used when arrays of PageReference have not yet been filled. */ - public static final PageReference EMPTY = new PageReference(null, 0, 0); + @SuppressWarnings("rawtypes") + static final PageReference EMPTY = new PageReference<>(null, 0, 0); /** * The position, if known, or 0. @@ -973,14 +997,26 @@ public static final class PageReference { /** * The page, if in memory, or null. */ - private Page page; + private Page page; /** * The descendant count for this child page. */ final long count; - public PageReference(Page page) { + /** + * Get an empty page reference. + * + * @param the key class + * @param the value class + * @return the page reference + */ + @SuppressWarnings("unchecked") + public static PageReference empty() { + return EMPTY; + } + + public PageReference(Page page) { this(page, page.getPos(), page.getTotalCount()); } @@ -989,13 +1025,13 @@ public PageReference(Page page) { assert DataUtils.isPageSaved(pos); } - private PageReference(Page page, long pos, long count) { + private PageReference(Page page, long pos, long count) { this.page = page; this.pos = pos; this.count = count; } - public Page getPage() { + public Page getPage() { return page; } @@ -1006,7 +1042,7 @@ public Page getPage() { */ void clearPageReference() { if (page != null) { - page.writeEnd(); + page.releaseSavedPages(); assert page.isSaved() || !page.isComplete(); if (page.isSaved()) { assert pos == page.getPos(); @@ -1024,7 +1060,7 @@ long getPos() { * Re-acquire position from in-memory page. */ void resetPos() { - Page p = page; + Page p = page; if (p != null && p.isSaved()) { pos = p.getPos(); assert count == p.getTotalCount(); @@ -1033,36 +1069,37 @@ void resetPos() { @Override public String toString() { - return "Cnt:" + count + ", pos:" + DataUtils.getPageChunkId(pos) + - "-" + DataUtils.getPageOffset(pos) + ":" + DataUtils.getPageMaxLength(pos) + - (page == null ? DataUtils.getPageType(pos) == 0 : page.isLeaf() ? " leaf" : " node") + ", " + page; + return "Cnt:" + count + ", pos:" + (pos == 0 ? "0" : DataUtils.getPageChunkId(pos) + + (page == null ? "" : "/" + page.pageNo) + + "-" + DataUtils.getPageOffset(pos) + ":" + DataUtils.getPageMaxLength(pos)) + + ((page == null ? DataUtils.getPageType(pos) == 0 : page.isLeaf()) ? " leaf" : " node") + + ", page:{" + page + "}"; } } - private static class NonLeaf extends Page - { + private static class NonLeaf extends Page { /** * The child page references. */ - private PageReference[] children; + private PageReference[] children; /** * The total entry count of this page and all children. */ private long totalCount; - NonLeaf(MVMap map) { + NonLeaf(MVMap map) { super(map); } - NonLeaf(MVMap map, NonLeaf source, PageReference[] children, long totalCount) { + NonLeaf(MVMap map, NonLeaf source, PageReference[] children, long totalCount) { super(map, source); this.children = children; this.totalCount = totalCount; } - NonLeaf(MVMap map, Object[] keys, PageReference[] children, long totalCount) { + NonLeaf(MVMap map, K[] keys, PageReference[] children, long totalCount) { super(map, keys); this.children = children; this.totalCount = totalCount; @@ -1074,14 +1111,16 @@ public int getNodeType() { } @Override - public Page copy(MVMap map) { - return new IncompleteNonLeaf(map, this); + public Page copy(MVMap map, boolean eraseChildrenRefs) { + return eraseChildrenRefs ? + new IncompleteNonLeaf<>(map, this) : + new NonLeaf<>(map, this, children, totalCount); } @Override - public Page getChildPage(int index) { - PageReference ref = children[index]; - Page page = ref.getPage(); + public Page getChildPage(int index) { + PageReference ref = children[index]; + Page page = ref.getPage(); if(page == null) { page = map.readPage(ref.getPos()); assert ref.getPos() == page.getPos(); @@ -1090,42 +1129,37 @@ public Page getChildPage(int index) { return page; } - @Override - public Page getChildPageIfLoaded(int index) { - return children[index].getPage(); - } - @Override public long getChildPagePos(int index) { return children[index].getPos(); } @Override - public Object getValue(int index) { + public V getValue(int index) { throw new UnsupportedOperationException(); } @Override - public Page split(int at) { + public Page split(int at) { assert !isSaved(); int b = getKeyCount() - at; - Object[] bKeys = splitKeys(at, b - 1); - PageReference[] aChildren = new PageReference[at + 1]; - PageReference[] bChildren = new PageReference[b]; + K[] bKeys = splitKeys(at, b - 1); + PageReference[] aChildren = createRefStorage(at + 1); + PageReference[] bChildren = createRefStorage(b); System.arraycopy(children, 0, aChildren, 0, at + 1); System.arraycopy(children, at + 1, bChildren, 0, b); children = aChildren; long t = 0; - for (PageReference x : aChildren) { + for (PageReference x : aChildren) { t += x.count; } totalCount = t; t = 0; - for (PageReference x : bChildren) { + for (PageReference x : bChildren) { t += x.count; } - Page newPage = createNode(map, bKeys, bChildren, t, 0); + Page newPage = createNode(map, bKeys, bChildren, t, 0); if(isPersistent()) { recalculateMemory(); } @@ -1153,7 +1187,7 @@ private long calculateTotalCount() { return check; } - protected void recalculateTotalCount() { + void recalculateTotalCount() { totalCount = calculateTotalCount(); } @@ -1163,35 +1197,35 @@ long getCounts(int index) { } @Override - public void setChild(int index, Page c) { + public void setChild(int index, Page c) { assert c != null; - PageReference child = children[index]; + PageReference child = children[index]; if (c != child.getPage() || c.getPos() != child.getPos()) { totalCount += c.getTotalCount() - child.count; children = children.clone(); - children[index] = new PageReference(c); + children[index] = new PageReference<>(c); } } @Override - public Object setValue(int index, Object value) { + public V setValue(int index, V value) { throw new UnsupportedOperationException(); } @Override - public void insertLeaf(int index, Object key, Object value) { + public void insertLeaf(int index, K key, V value) { throw new UnsupportedOperationException(); } @Override - public void insertNode(int index, Object key, Page childPage) { + public void insertNode(int index, K key, Page childPage) { int childCount = getRawChildPageCount(); insertKey(index, key); - PageReference[] newChildren = new PageReference[childCount + 1]; + PageReference[] newChildren = createRefStorage(childCount + 1); DataUtils.copyWithGap(children, newChildren, childCount, index); children = newChildren; - children[index] = new PageReference(childPage); + children[index] = new PageReference<>(childPage); totalCount += childPage.getTotalCount(); if (isPersistent()) { @@ -1204,48 +1238,58 @@ public void remove(int index) { int childCount = getRawChildPageCount(); super.remove(index); if(isPersistent()) { - addMemory(-MEMORY_POINTER - PAGE_MEMORY_CHILD); + if (map.isMemoryEstimationAllowed()) { + addMemory(-getMemory() / childCount); + } else { + addMemory(-MEMORY_POINTER - PAGE_MEMORY_CHILD); + } } totalCount -= children[index].count; - PageReference[] newChildren = new PageReference[childCount - 1]; + PageReference[] newChildren = createRefStorage(childCount - 1); DataUtils.copyExcept(children, newChildren, childCount, index); children = newChildren; } @Override - public void removeAllRecursive() { + public int removeAllRecursive(long version) { + int unsavedMemory = removePage(version); if (isPersistent()) { for (int i = 0, size = map.getChildPageCount(this); i < size; i++) { - PageReference ref = children[i]; - Page page = ref.getPage(); + PageReference ref = children[i]; + Page page = ref.getPage(); if (page != null) { - page.removeAllRecursive(); + unsavedMemory += page.removeAllRecursive(version); } else { - long c = ref.getPos(); - int type = DataUtils.getPageType(c); - if (type == PAGE_TYPE_LEAF) { - int mem = DataUtils.getPageMaxLength(c); - map.removePage(c, mem); + long pagePos = ref.getPos(); + assert DataUtils.isPageSaved(pagePos); + if (DataUtils.isLeafPosition(pagePos)) { + map.store.accountForRemovedPage(pagePos, version, map.isSingleWriter(), -1); } else { - map.readPage(c).removeAllRecursive(); + unsavedMemory += map.readPage(pagePos).removeAllRecursive(version); } } } } - removePage(); + return unsavedMemory; } @Override - public CursorPos getAppendCursorPos(CursorPos cursorPos) { + public CursorPos getPrependCursorPos(CursorPos cursorPos) { + Page childPage = getChildPage(0); + return childPage.getPrependCursorPos(new CursorPos<>(this, 0, cursorPos)); + } + + @Override + public CursorPos getAppendCursorPos(CursorPos cursorPos) { int keyCount = getKeyCount(); - Page childPage = getChildPage(keyCount); - return childPage.getAppendCursorPos(new CursorPos(this, keyCount, cursorPos)); + Page childPage = getChildPage(keyCount); + return childPage.getAppendCursorPos(new CursorPos<>(this, keyCount, cursorPos)); } @Override protected void readPayLoad(ByteBuffer buff) { int keyCount = getKeyCount(); - children = new PageReference[keyCount + 1]; + children = createRefStorage(keyCount + 1); long[] p = new long[keyCount + 1]; for (int i = 0; i <= keyCount; i++) { p[i] = buff.getLong(); @@ -1256,7 +1300,9 @@ protected void readPayLoad(ByteBuffer buff) { long position = p[i]; assert position == 0 ? s == 0 : s >= 0; total += s; - children[i] = position == 0 ? PageReference.EMPTY : new PageReference(position, s); + children[i] = position == 0 ? + PageReference.empty() : + new PageReference<>(position, s); } totalCount = total; } @@ -1278,10 +1324,10 @@ protected void writeChildren(WriteBuffer buff, boolean withCounts) { } @Override - void writeUnsavedRecursive(Chunk chunk, WriteBuffer buff) { + void writeUnsavedRecursive(Chunk chunk, WriteBuffer buff, List toc) { if (!isSaved()) { - int patch = write(chunk, buff); - writeChildrenRecursive(chunk, buff); + int patch = write(chunk, buff, toc); + writeChildrenRecursive(chunk, buff, toc); int old = buff.position(); buff.position(patch); writeChildren(buff, false); @@ -1289,20 +1335,20 @@ void writeUnsavedRecursive(Chunk chunk, WriteBuffer buff) { } } - void writeChildrenRecursive(Chunk chunk, WriteBuffer buff) { + void writeChildrenRecursive(Chunk chunk, WriteBuffer buff, List toc) { int len = getRawChildPageCount(); for (int i = 0; i < len; i++) { - PageReference ref = children[i]; - Page p = ref.getPage(); + PageReference ref = children[i]; + Page p = ref.getPage(); if (p != null) { - p.writeUnsavedRecursive(chunk, buff); + p.writeUnsavedRecursive(chunk, buff, toc); ref.resetPos(); } } } @Override - void writeEnd() { + void releaseSavedPages() { int len = getRawChildPageCount(); for (int i = 0; i < len; i++) { children[i].clearPageReference(); @@ -1337,27 +1383,27 @@ public void dump(StringBuilder buff) { } - private static class IncompleteNonLeaf extends NonLeaf { + private static class IncompleteNonLeaf extends NonLeaf { private boolean complete; - IncompleteNonLeaf(MVMap map, NonLeaf source) { + IncompleteNonLeaf(MVMap map, NonLeaf source) { super(map, source, constructEmptyPageRefs(source.getRawChildPageCount()), source.getTotalCount()); } - private static PageReference[] constructEmptyPageRefs(int size) { + private static PageReference[] constructEmptyPageRefs(int size) { // replace child pages with empty pages - PageReference[] children = new PageReference[size]; - Arrays.fill(children, PageReference.EMPTY); + PageReference[] children = createRefStorage(size); + Arrays.fill(children, PageReference.empty()); return children; } @Override - void writeUnsavedRecursive(Chunk chunk, WriteBuffer buff) { + void writeUnsavedRecursive(Chunk chunk, WriteBuffer buff, List toc) { if (complete) { - super.writeUnsavedRecursive(chunk, buff); + super.writeUnsavedRecursive(chunk, buff, toc); } else if (!isSaved()) { - writeChildrenRecursive(chunk, buff); + writeChildrenRecursive(chunk, buff, toc); } } @@ -1381,23 +1427,23 @@ public void dump(StringBuilder buff) { } - private static class Leaf extends Page - { + + private static class Leaf extends Page { /** * The storage for values. */ - private Object[] values; + private V[] values; - Leaf(MVMap map) { + Leaf(MVMap map) { super(map); } - private Leaf(MVMap map, Leaf source) { + private Leaf(MVMap map, Leaf source) { super(map, source); this.values = source.values; } - Leaf(MVMap map, Object[] keys, Object[] values) { + Leaf(MVMap map, K[] keys, V[] values) { super(map, keys); this.values = values; } @@ -1408,41 +1454,38 @@ public int getNodeType() { } @Override - public Page copy(MVMap map) { - return new Leaf(map, this); + public Page copy(MVMap map, boolean eraseChildrenRefs) { + return new Leaf<>(map, this); } @Override - public Page getChildPage(int index) { + public Page getChildPage(int index) { throw new UnsupportedOperationException(); } - @Override - public Page getChildPageIfLoaded(int index) { throw new UnsupportedOperationException(); } - @Override public long getChildPagePos(int index) { throw new UnsupportedOperationException(); } @Override - public Object getValue(int index) { - return values[index]; + public V getValue(int index) { + return values == null ? null : values[index]; } @Override - public Page split(int at) { + public Page split(int at) { assert !isSaved(); int b = getKeyCount() - at; - Object[] bKeys = splitKeys(at, b); - Object[] bValues = createValueStorage(b); + K[] bKeys = splitKeys(at, b); + V[] bValues = createValueStorage(b); if(values != null) { - Object[] aValues = createValueStorage(at); + V[] aValues = createValueStorage(at); System.arraycopy(values, 0, aValues, 0, at); System.arraycopy(values, at, bValues, 0, b); values = aValues; } - Page newPage = createLeaf(map, bKeys, bValues, 0); + Page newPage = createLeaf(map, bKeys, bValues, 0); if(isPersistent()) { recalculateMemory(); } @@ -1450,11 +1493,11 @@ public Page split(int at) { } @Override - public void expand(int extraKeyCount, Object[] extraKeys, Object[] extraValues) { + public void expand(int extraKeyCount, K[] extraKeys, V[] extraValues) { int keyCount = getKeyCount(); expandKeys(extraKeyCount, extraKeys); if(values != null) { - Object[] newValues = createValueStorage(keyCount + extraKeyCount); + V[] newValues = createValueStorage(keyCount + extraKeyCount); System.arraycopy(values, 0, newValues, 0, keyCount); System.arraycopy(extraValues, 0, newValues, keyCount, extraKeyCount); values = newValues; @@ -1475,46 +1518,47 @@ long getCounts(int index) { } @Override - public void setChild(int index, Page c) { + public void setChild(int index, Page c) { throw new UnsupportedOperationException(); } @Override - public Object setValue(int index, Object value) { - DataType valueType = map.getValueType(); + public V setValue(int index, V value) { values = values.clone(); - Object old = setValueInternal(index, value); + V old = setValueInternal(index, value); if(isPersistent()) { - addMemory(valueType.getMemory(value) - - valueType.getMemory(old)); + if (!map.isMemoryEstimationAllowed()) { + addMemory(map.evaluateMemoryForValue(value) - + map.evaluateMemoryForValue(old)); + } } return old; } - private Object setValueInternal(int index, Object value) { - Object old = values[index]; + private V setValueInternal(int index, V value) { + V old = values[index]; values[index] = value; return old; } @Override - public void insertLeaf(int index, Object key, Object value) { + public void insertLeaf(int index, K key, V value) { int keyCount = getKeyCount(); insertKey(index, key); if(values != null) { - Object[] newValues = createValueStorage(keyCount + 1); + V[] newValues = createValueStorage(keyCount + 1); DataUtils.copyWithGap(values, newValues, keyCount, index); values = newValues; setValueInternal(index, value); if (isPersistent()) { - addMemory(MEMORY_POINTER + map.getValueType().getMemory(value)); + addMemory(MEMORY_POINTER + map.evaluateMemoryForValue(value)); } } } @Override - public void insertNode(int index, Object key, Page childPage) { + public void insertNode(int index, K key, Page childPage) { throw new UnsupportedOperationException(); } @@ -1524,50 +1568,59 @@ public void remove(int index) { super.remove(index); if (values != null) { if(isPersistent()) { - Object old = getValue(index); - addMemory(-MEMORY_POINTER - map.getValueType().getMemory(old)); + if (map.isMemoryEstimationAllowed()) { + addMemory(-getMemory() / keyCount); + } else { + V old = getValue(index); + addMemory(-MEMORY_POINTER - map.evaluateMemoryForValue(old)); + } } - Object[] newValues = createValueStorage(keyCount - 1); + V[] newValues = createValueStorage(keyCount - 1); DataUtils.copyExcept(values, newValues, keyCount, index); values = newValues; } } @Override - public void removeAllRecursive() { - removePage(); + public int removeAllRecursive(long version) { + return removePage(version); + } + + @Override + public CursorPos getPrependCursorPos(CursorPos cursorPos) { + return new CursorPos<>(this, -1, cursorPos); } @Override - public CursorPos getAppendCursorPos(CursorPos cursorPos) { + public CursorPos getAppendCursorPos(CursorPos cursorPos) { int keyCount = getKeyCount(); - return new CursorPos(this, -keyCount - 1, cursorPos); + return new CursorPos<>(this, ~keyCount, cursorPos); } @Override protected void readPayLoad(ByteBuffer buff) { int keyCount = getKeyCount(); values = createValueStorage(keyCount); - map.getValueType().read(buff, values, getKeyCount(), false); + map.getValueType().read(buff, values, getKeyCount()); } @Override protected void writeValues(WriteBuffer buff) { - map.getValueType().write(buff, values, getKeyCount(), false); + map.getValueType().write(buff, values, getKeyCount()); } @Override protected void writeChildren(WriteBuffer buff, boolean withCounts) {} @Override - void writeUnsavedRecursive(Chunk chunk, WriteBuffer buff) { + void writeUnsavedRecursive(Chunk chunk, WriteBuffer buff, List toc) { if (!isSaved()) { - write(chunk, buff); + write(chunk, buff, toc); } } @Override - void writeEnd() {} + void releaseSavedPages() {} @Override public int getRawChildPageCount() { @@ -1576,13 +1629,18 @@ public int getRawChildPageCount() { @Override protected int calculateMemory() { +//* + return super.calculateMemory() + PAGE_LEAF_MEMORY + + (values == null ? 0 : map.evaluateMemoryForValues(values, getKeyCount())); +/*/ int keyCount = getKeyCount(); int mem = super.calculateMemory() + PAGE_LEAF_MEMORY + keyCount * MEMORY_POINTER; - DataType valueType = map.getValueType(); + DataType valueType = map.getValueType(); for (int i = 0; i < keyCount; i++) { - mem += valueType.getMemory(values[i]); + mem += getMemory(valueType, values[i]); } return mem; +//*/ } @Override diff --git a/h2/src/main/org/h2/mvstore/RootReference.java b/h2/src/main/org/h2/mvstore/RootReference.java index b106214dc8..dff79839c0 100644 --- a/h2/src/main/org/h2/mvstore/RootReference.java +++ b/h2/src/main/org/h2/mvstore/RootReference.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; @@ -13,24 +13,30 @@ * * @author Andrei Tokar */ -public final class RootReference -{ +public final class RootReference { + /** * The root page. */ - public final Page root; + public final Page root; /** * The version used for writing. */ public final long version; /** - * Indicator that map is locked for update. + * Counter of reentrant locks. + */ + private final byte holdCount; + /** + * Lock owner thread id. */ - final boolean lockedForUpdate; + private final long ownerId; /** * Reference to the previous root in the chain. + * That is the last root of the previous version, which had any data changes. + * Versions without any data changes are dropped from the chain, as it built. */ - public volatile RootReference previous; + volatile RootReference previous; /** * Counter for successful root updates. */ @@ -42,55 +48,64 @@ public final class RootReference /** * Size of the occupied part of the append buffer. */ - final byte appendCounter; + private final byte appendCounter; + // This one is used to set root initially and for r/o snapshots - RootReference(Page root, long version) { + RootReference(Page root, long version) { this.root = root; this.version = version; this.previous = null; this.updateCounter = 1; this.updateAttemptCounter = 1; - this.lockedForUpdate = false; + this.holdCount = 0; + this.ownerId = 0; this.appendCounter = 0; } - RootReference(RootReference r, Page root, long updateAttemptCounter) { + private RootReference(RootReference r, Page root, long updateAttemptCounter) { this.root = root; this.version = r.version; this.previous = r.previous; this.updateCounter = r.updateCounter + 1; this.updateAttemptCounter = r.updateAttemptCounter + updateAttemptCounter; - this.lockedForUpdate = false; + this.holdCount = 0; + this.ownerId = 0; this.appendCounter = r.appendCounter; } // This one is used for locking - RootReference(RootReference r, int attempt) { + private RootReference(RootReference r, int attempt) { this.root = r.root; this.version = r.version; this.previous = r.previous; this.updateCounter = r.updateCounter + 1; this.updateAttemptCounter = r.updateAttemptCounter + attempt; - this.lockedForUpdate = true; + assert r.holdCount == 0 || r.ownerId == Thread.currentThread().getId() // + : Thread.currentThread().getId() + " " + r; + this.holdCount = (byte)(r.holdCount + 1); + this.ownerId = Thread.currentThread().getId(); this.appendCounter = r.appendCounter; } // This one is used for unlocking - RootReference(RootReference r, Page root, int appendCounter, boolean lockedForUpdate) { + private RootReference(RootReference r, Page root, boolean keepLocked, int appendCounter) { this.root = root; this.version = r.version; this.previous = r.previous; this.updateCounter = r.updateCounter; this.updateAttemptCounter = r.updateAttemptCounter; - this.lockedForUpdate = lockedForUpdate; + assert r.holdCount > 0 && r.ownerId == Thread.currentThread().getId() // + : Thread.currentThread().getId() + " " + r; + this.holdCount = (byte)(r.holdCount - (keepLocked ? 0 : 1)); + this.ownerId = this.holdCount == 0 ? 0 : Thread.currentThread().getId(); this.appendCounter = (byte) appendCounter; } // This one is used for version change - RootReference(RootReference r, long version, int attempt) { - RootReference previous = r; - RootReference tmp; + private RootReference(RootReference r, long version, int attempt) { + RootReference previous = r; + RootReference tmp; while ((tmp = previous.previous) != null && tmp.root == r.root) { previous = tmp; } @@ -99,20 +114,143 @@ public final class RootReference this.previous = previous; this.updateCounter = r.updateCounter + 1; this.updateAttemptCounter = r.updateAttemptCounter + attempt; - this.lockedForUpdate = r.lockedForUpdate; - this.appendCounter = r.appendCounter; + this.holdCount = r.holdCount == 0 ? 0 : (byte)(r.holdCount - 1); + this.ownerId = this.holdCount == 0 ? 0 : r.ownerId; + assert r.appendCounter == 0; + this.appendCounter = 0; + } + + /** + * Try to unlock. + * + * @param newRootPage the new root page + * @param attemptCounter the number of attempts so far + * @return the new, unlocked, root reference, or null if not successful + */ + RootReference updateRootPage(Page newRootPage, long attemptCounter) { + return isFree() ? tryUpdate(new RootReference<>(this, newRootPage, attemptCounter)) : null; + } + + /** + * Try to lock. + * + * @param attemptCounter the number of attempts so far + * @return the new, locked, root reference, or null if not successful + */ + RootReference tryLock(int attemptCounter) { + return canUpdate() ? tryUpdate(new RootReference<>(this, attemptCounter)) : null; + } + + /** + * Try to unlock, and if successful update the version + * + * @param version the version + * @param attempt the number of attempts so far + * @return the new, unlocked and updated, root reference, or null if not successful + */ + RootReference tryUnlockAndUpdateVersion(long version, int attempt) { + return canUpdate() ? tryUpdate(new RootReference<>(this, version, attempt)) : null; + } + + /** + * Update the page, possibly keeping it locked. + * + * @param page the page + * @param keepLocked whether to keep it locked + * @param appendCounter number of items in append buffer + * @return the new root reference, or null if not successful + */ + RootReference updatePageAndLockedStatus(Page page, boolean keepLocked, int appendCounter) { + return canUpdate() ? tryUpdate(new RootReference<>(this, page, keepLocked, appendCounter)) : null; + } + + /** + * Removed old versions that are not longer used. + * + * @param oldestVersionToKeep the oldest version that needs to be retained + */ + void removeUnusedOldVersions(long oldestVersionToKeep) { + // We need to keep at least one previous version (if any) here, + // because in order to retain whole history of some version + // we really need last root of the previous version. + // Root labeled with version "X" is the LAST known root for that version + // and therefore the FIRST known root for the version "X+1" + for(RootReference rootRef = this; rootRef != null; rootRef = rootRef.previous) { + if (rootRef.version < oldestVersionToKeep) { + RootReference previous; + assert (previous = rootRef.previous) == null || previous.getAppendCounter() == 0 // + : oldestVersionToKeep + " " + rootRef.previous; + rootRef.previous = null; + } + } + } + + boolean isLocked() { + return holdCount != 0; + } + + private boolean isFree() { + return holdCount == 0; + } + + + private boolean canUpdate() { + return isFree() || ownerId == Thread.currentThread().getId(); + } + + public boolean isLockedByCurrentThread() { + return holdCount != 0 && ownerId == Thread.currentThread().getId(); + } + + private RootReference tryUpdate(RootReference updatedRootReference) { + assert canUpdate(); + return root.map.compareAndSetRoot(this, updatedRootReference) ? updatedRootReference : null; + } + + long getVersion() { + RootReference prev = previous; + return prev == null || prev.root != root || + prev.appendCounter != appendCounter ? + version : prev.getVersion(); + } + + /** + * Does the root have changes since the specified version? + * + * @param version to check against + * @param persistent whether map is backed by persistent storage + * @return true if this root has unsaved changes + */ + boolean hasChangesSince(long version, boolean persistent) { + return persistent && (root.isSaved() ? getAppendCounter() > 0 : getTotalCount() > 0) + || getVersion() > version; } int getAppendCounter() { return appendCounter & 0xff; } + /** + * Whether flushing is needed. + * + * @return true if yes + */ + public boolean needFlush() { + return appendCounter != 0; + } + public long getTotalCount() { return root.getTotalCount() + getAppendCounter(); } @Override public String toString() { - return "RootReference(" + System.identityHashCode(root) + "," + version + "," + lockedForUpdate + ")"; + return "RootReference(" + System.identityHashCode(root) + + ", v=" + version + + ", owner=" + ownerId + (ownerId == Thread.currentThread().getId() ? "(current)" : "") + + ", holdCnt=" + holdCount + + ", keys=" + root.getTotalCount() + + ", append=" + getAppendCounter() + + ")"; } } diff --git a/h2/src/main/org/h2/mvstore/StreamStore.java b/h2/src/main/org/h2/mvstore/StreamStore.java index d94d259f4e..82a3944d83 100644 --- a/h2/src/main/org/h2/mvstore/StreamStore.java +++ b/h2/src/main/org/h2/mvstore/StreamStore.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; @@ -433,7 +433,7 @@ public InputStream get(byte[] id) { byte[] getBlock(long key) { byte[] data = map.get(key); if (data == null) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_BLOCK_NOT_FOUND, "Block {0} not found", key); } @@ -506,7 +506,7 @@ public int read(byte[] b, int off, int len) throws IOException { if (buffer == null) { try { buffer = nextBuffer(); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { String msg = DataUtils.formatMessage( DataUtils.ERROR_BLOCK_NOT_FOUND, "Block not found in id {0}", diff --git a/h2/src/main/org/h2/mvstore/WriteBuffer.java b/h2/src/main/org/h2/mvstore/WriteBuffer.java index b6e511ab29..9dd2be2460 100644 --- a/h2/src/main/org/h2/mvstore/WriteBuffer.java +++ b/h2/src/main/org/h2/mvstore/WriteBuffer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; diff --git a/h2/src/main/org/h2/mvstore/cache/CacheLongKeyLIRS.java b/h2/src/main/org/h2/mvstore/cache/CacheLongKeyLIRS.java index 4aaab5d211..d75127e3a6 100644 --- a/h2/src/main/org/h2/mvstore/cache/CacheLongKeyLIRS.java +++ b/h2/src/main/org/h2/mvstore/cache/CacheLongKeyLIRS.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.cache; @@ -27,7 +27,7 @@ *

          * This class implements an approximation of the LIRS replacement algorithm * invented by Xiaodong Zhang and Song Jiang as described in - * http://www.cse.ohio-state.edu/~zhang/lirs-sigmetrics-02.html with a few + * https://web.cse.ohio-state.edu/~zhang.574/lirs-sigmetrics-02.html with a few * smaller changes: An additional queue for non-resident entries is used, to * prevent unbound memory usage. The maximum size of this queue is at most the * size of the rest of the stack. About 6.25% of the mapped entries are cold. @@ -313,14 +313,7 @@ public long getMaxMemory() { * @return the entry set */ public synchronized Set> entrySet() { - HashMap map = new HashMap<>(); - for (long k : keySet()) { - V value = peek(k); - if (value != null) { - map.put(k, value); - } - } - return map.entrySet(); + return getMap().entrySet(); } /** diff --git a/h2/src/main/org/h2/mvstore/cache/FilePathCache.java b/h2/src/main/org/h2/mvstore/cache/FilePathCache.java index af86a5a9f9..fc04065198 100644 --- a/h2/src/main/org/h2/mvstore/cache/FilePathCache.java +++ b/h2/src/main/org/h2/mvstore/cache/FilePathCache.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.cache; diff --git a/h2/src/main/org/h2/mvstore/cache/package.html b/h2/src/main/org/h2/mvstore/cache/package.html index 18a7ffa218..0821fb4922 100644 --- a/h2/src/main/org/h2/mvstore/cache/package.html +++ b/h2/src/main/org/h2/mvstore/cache/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/mvstore/db/LobStorageMap.java b/h2/src/main/org/h2/mvstore/db/LobStorageMap.java new file mode 100644 index 0000000000..16d74229ae --- /dev/null +++ b/h2/src/main/org/h2/mvstore/db/LobStorageMap.java @@ -0,0 +1,563 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.db; + +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.Map.Entry; +import java.util.concurrent.atomic.AtomicLong; +import org.h2.api.ErrorCode; +import org.h2.engine.Database; +import org.h2.message.DbException; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.MVMap; +import org.h2.mvstore.MVStore; +import org.h2.mvstore.StreamStore; +import org.h2.mvstore.WriteBuffer; +import org.h2.mvstore.tx.TransactionStore; +import org.h2.mvstore.type.BasicDataType; +import org.h2.mvstore.type.ByteArrayDataType; +import org.h2.mvstore.type.LongDataType; +import org.h2.store.CountingReaderInputStream; +import org.h2.store.LobStorageFrontend; +import org.h2.store.LobStorageInterface; +import org.h2.store.RangeInputStream; +import org.h2.util.IOUtils; +import org.h2.util.StringUtils; +import org.h2.value.Value; +import org.h2.value.ValueBlob; +import org.h2.value.ValueClob; +import org.h2.value.ValueLob; +import org.h2.value.ValueNull; +import org.h2.value.lob.LobData; +import org.h2.value.lob.LobDataDatabase; + +/** + * This class stores LOB objects in the database, in maps. This is the back-end + * i.e. the server side of the LOB storage. + */ +public final class LobStorageMap implements LobStorageInterface +{ + private static final boolean TRACE = false; + + private final Database database; + final MVStore mvStore; + private final AtomicLong nextLobId = new AtomicLong(0); + + /** + * The lob metadata map. It contains the mapping from the lob id + * (which is a long) to the blob metadata, including stream store id (which is a byte array). + */ + private final MVMap lobMap; + + /** + * The lob metadata map for temporary lobs. It contains the mapping from the lob id + * (which is a long) to the stream store id (which is a byte array). + * + * Key: lobId (long) + * Value: streamStoreId (byte[]) + */ + private final MVMap tempLobMap; + + /** + * The reference map. It is used to remove data from the stream store: if no + * more entries for the given streamStoreId exist, the data is removed from + * the stream store. + */ + private final MVMap refMap; + + private final StreamStore streamStore; + + + /** + * Open map used to store LOB metadata + * @param txStore containing map + * @return MVMap instance + */ + public static MVMap openLobMap(TransactionStore txStore) { + return txStore.openMap("lobMap", LongDataType.INSTANCE, LobStorageMap.BlobMeta.Type.INSTANCE); + } + + /** + * Open map used to store LOB data + * @param txStore containing map + * @return MVMap instance + */ + public static MVMap openLobDataMap(TransactionStore txStore) { + return txStore.openMap("lobData", LongDataType.INSTANCE, ByteArrayDataType.INSTANCE); + } + + public LobStorageMap(Database database) { + this.database = database; + Store s = database.getStore(); + TransactionStore txStore = s.getTransactionStore(); + mvStore = s.getMvStore(); + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + lobMap = openLobMap(txStore); + tempLobMap = txStore.openMap("tempLobMap", LongDataType.INSTANCE, ByteArrayDataType.INSTANCE); + refMap = txStore.openMap("lobRef", BlobReference.Type.INSTANCE, NullValueDataType.INSTANCE); + /* The stream store data map. + * + * Key: stream store block id (long). + * Value: data (byte[]). + */ + MVMap dataMap = openLobDataMap(txStore); + streamStore = new StreamStore(dataMap); + // garbage collection of the last blocks + if (!database.isReadOnly()) { + // don't re-use block ids, except at the very end + Long last = dataMap.lastKey(); + if (last != null) { + streamStore.setNextKey(last + 1); + } + // find the latest lob ID + Long id1 = lobMap.lastKey(); + Long id2 = tempLobMap.lastKey(); // just in case we had unclean shutdown + long next = 1; + if (id1 != null) { + next = id1 + 1; + } + if (id2 != null) { + next = Math.max(next, id2 + 1); + } + nextLobId.set( next ); + } + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + + @Override + public ValueBlob createBlob(InputStream in, long maxLength) { + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + if (maxLength != -1 + && maxLength <= database.getMaxLengthInplaceLob()) { + byte[] small = new byte[(int) maxLength]; + int len = IOUtils.readFully(in, small, (int) maxLength); + if (len > maxLength) { + throw new IllegalStateException( + "len > blobLength, " + len + " > " + maxLength); + } + if (len < small.length) { + small = Arrays.copyOf(small, len); + } + return ValueBlob.createSmall(small); + } + if (maxLength != -1) { + in = new RangeInputStream(in, 0L, maxLength); + } + return createBlob(in); + } catch (IllegalStateException e) { + throw DbException.get(ErrorCode.OBJECT_CLOSED, e); + } catch (IOException e) { + throw DbException.convertIOException(e, null); + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + + @Override + public ValueClob createClob(Reader reader, long maxLength) { + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + // we multiple by 3 here to get the worst-case size in bytes + if (maxLength != -1 + && maxLength * 3 <= database.getMaxLengthInplaceLob()) { + char[] small = new char[(int) maxLength]; + int len = IOUtils.readFully(reader, small, (int) maxLength); + if (len > maxLength) { + throw new IllegalStateException( + "len > blobLength, " + len + " > " + maxLength); + } + byte[] utf8 = new String(small, 0, len) + .getBytes(StandardCharsets.UTF_8); + if (utf8.length > database.getMaxLengthInplaceLob()) { + throw new IllegalStateException( + "len > maxinplace, " + utf8.length + " > " + + database.getMaxLengthInplaceLob()); + } + return ValueClob.createSmall(utf8, len); + } + if (maxLength < 0) { + maxLength = Long.MAX_VALUE; + } + CountingReaderInputStream in = new CountingReaderInputStream(reader, maxLength); + ValueBlob blob = createBlob(in); + LobData lobData = blob.getLobData(); + return new ValueClob(lobData, blob.octetLength(), in.getLength()); + } catch (IllegalStateException e) { + throw DbException.get(ErrorCode.OBJECT_CLOSED, e); + } catch (IOException e) { + throw DbException.convertIOException(e, null); + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + + private ValueBlob createBlob(InputStream in) throws IOException { + byte[] streamStoreId; + try { + streamStoreId = streamStore.put(in); + } catch (Exception e) { + throw DataUtils.convertToIOException(e); + } + long lobId = generateLobId(); + long length = streamStore.length(streamStoreId); + final int tableId = LobStorageFrontend.TABLE_TEMP; + tempLobMap.put(lobId, streamStoreId); + BlobReference key = new BlobReference(streamStoreId, lobId); + refMap.put(key, ValueNull.INSTANCE); + ValueBlob lob = new ValueBlob(new LobDataDatabase(database, tableId, lobId), length); + if (TRACE) { + trace("create " + tableId + "/" + lobId); + } + return lob; + } + + private long generateLobId() { + return nextLobId.getAndIncrement(); + } + + @Override + public boolean isReadOnly() { + return database.isReadOnly(); + } + + @Override + public ValueLob copyLob(ValueLob old, int tableId) { + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + final LobDataDatabase lobData = (LobDataDatabase) old.getLobData(); + final int type = old.getValueType(); + final long oldLobId = lobData.getLobId(); + long octetLength = old.octetLength(); + // get source lob + final byte[] streamStoreId; + if (isTemporaryLob(lobData.getTableId())) { + streamStoreId = tempLobMap.get(oldLobId); + } else { + BlobMeta value = lobMap.get(oldLobId); + streamStoreId = value.streamStoreId; + } + // create destination lob + final long newLobId = generateLobId(); + if (isTemporaryLob(tableId)) { + tempLobMap.put(newLobId, streamStoreId); + } else { + BlobMeta value = new BlobMeta(streamStoreId, tableId, + type == Value.CLOB ? old.charLength() : octetLength, 0); + lobMap.put(newLobId, value); + } + BlobReference refMapKey = new BlobReference(streamStoreId, newLobId); + refMap.put(refMapKey, ValueNull.INSTANCE); + LobDataDatabase newLobData = new LobDataDatabase(database, tableId, newLobId); + ValueLob lob = type == Value.BLOB ? new ValueBlob(newLobData, octetLength) + : new ValueClob(newLobData, octetLength, old.charLength()); + if (TRACE) { + trace("copy " + lobData.getTableId() + "/" + lobData.getLobId() + + " > " + tableId + "/" + newLobId); + } + return lob; + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + + @Override + public InputStream getInputStream(long lobId, long byteCount) + throws IOException { + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + byte[] streamStoreId = tempLobMap.get(lobId); + if (streamStoreId == null) { + BlobMeta value = lobMap.get(lobId); + streamStoreId = value.streamStoreId; + } + if (streamStoreId == null) { + throw DbException.get(ErrorCode.LOB_CLOSED_ON_TIMEOUT_1, "" + lobId); + } + InputStream inputStream = streamStore.get(streamStoreId); + return new LobInputStream(inputStream); + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + + @Override + public InputStream getInputStream(long lobId, int tableId, long byteCount) + throws IOException { + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + byte[] streamStoreId; + if (isTemporaryLob(tableId)) { + streamStoreId = tempLobMap.get(lobId); + } else { + BlobMeta value = lobMap.get(lobId); + streamStoreId = value.streamStoreId; + } + if (streamStoreId == null) { + throw DbException.get(ErrorCode.LOB_CLOSED_ON_TIMEOUT_1, "" + lobId); + } + InputStream inputStream = streamStore.get(streamStoreId); + return new LobInputStream(inputStream); + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + + private final class LobInputStream extends FilterInputStream { + + public LobInputStream(InputStream in) { + super(in); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + return super.read(b, off, len); + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + + @Override + public int read() throws IOException { + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + return super.read(); + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + } + + @Override + public void removeAllForTable(int tableId) { + if (mvStore.isClosed()) { + return; + } + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + if (isTemporaryLob(tableId)) { + final Iterator iter = tempLobMap.keyIterator(0L); + while (iter.hasNext()) { + long lobId = iter.next(); + removeLob(tableId, lobId); + } + tempLobMap.clear(); + } else { + final ArrayList list = new ArrayList<>(); + // This might not be very efficient, but should only happen + // on DROP TABLE. + // To speed it up, we would need yet another map. + for (Entry e : lobMap.entrySet()) { + BlobMeta value = e.getValue(); + if (value.tableId == tableId) { + list.add(e.getKey()); + } + } + for (long lobId : list) { + removeLob(tableId, lobId); + } + } + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + + @Override + public void removeLob(ValueLob lob) { + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + LobDataDatabase lobData = (LobDataDatabase) lob.getLobData(); + int tableId = lobData.getTableId(); + long lobId = lobData.getLobId(); + removeLob(tableId, lobId); + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + + private void removeLob(int tableId, long lobId) { + if (TRACE) { + trace("remove " + tableId + "/" + lobId); + } + byte[] streamStoreId; + if (isTemporaryLob(tableId)) { + streamStoreId = tempLobMap.remove(lobId); + if (streamStoreId == null) { + // already removed + return; + } + } else { + BlobMeta value = lobMap.remove(lobId); + if (value == null) { + // already removed + return; + } + streamStoreId = value.streamStoreId; + } + BlobReference key = new BlobReference(streamStoreId, lobId); + Value existing = refMap.remove(key); + assert existing != null; + // check if there are more entries for this streamStoreId + key = new BlobReference(streamStoreId, 0L); + BlobReference value = refMap.ceilingKey(key); + boolean hasMoreEntries = false; + if (value != null) { + byte[] s2 = value.streamStoreId; + if (Arrays.equals(streamStoreId, s2)) { + if (TRACE) { + trace(" stream still needed in lob " + value.lobId); + } + hasMoreEntries = true; + } + } + if (!hasMoreEntries) { + if (TRACE) { + trace(" remove stream " + StringUtils.convertBytesToHex(streamStoreId)); + } + streamStore.remove(streamStoreId); + } + } + + private static boolean isTemporaryLob(int tableId) { + return tableId == LobStorageFrontend.TABLE_ID_SESSION_VARIABLE || tableId == LobStorageFrontend.TABLE_TEMP + || tableId == LobStorageFrontend.TABLE_RESULT; + } + + private static void trace(String op) { + System.out.println("[" + Thread.currentThread().getName() + "] LOB " + op); + } + + + public static final class BlobReference implements Comparable + { + public final byte[] streamStoreId; + public final long lobId; + + public BlobReference(byte[] streamStoreId, long lobId) { + this.streamStoreId = streamStoreId; + this.lobId = lobId; + } + + @Override + public int compareTo(BlobReference other) { + int res = Integer.compare(streamStoreId.length, other.streamStoreId.length); + if (res == 0) { + for (int i = 0; res == 0 && i < streamStoreId.length; i++) { + res = Byte.compare(streamStoreId[i], other.streamStoreId[i]); + } + if (res == 0) { + res = Long.compare(lobId, other.lobId); + } + } + return res; + } + + public static final class Type extends BasicDataType { + public static final Type INSTANCE = new Type(); + + private Type() {} + + @Override + public int getMemory(BlobReference blobReference) { + return blobReference.streamStoreId.length + 8; + } + + @Override + public int compare(BlobReference one, BlobReference two) { + return one == two ? 0 : one == null ? 1 : two == null ? -1 : one.compareTo(two); + } + + @Override + public void write(WriteBuffer buff, BlobReference blobReference) { + buff.putVarInt(blobReference.streamStoreId.length); + buff.put(blobReference.streamStoreId); + buff.putVarLong(blobReference.lobId); + } + + @Override + public BlobReference read(ByteBuffer buff) { + int len = DataUtils.readVarInt(buff); + byte[] streamStoreId = new byte[len]; + buff.get(streamStoreId); + long blobId = DataUtils.readVarLong(buff); + return new BlobReference(streamStoreId, blobId); + } + + @Override + public BlobReference[] createStorage(int size) { + return new BlobReference[size]; + } + } + } + + public static final class BlobMeta + { + /** + * Stream identifier. It is used as a key in LOB data map. + */ + public final byte[] streamStoreId; + final int tableId; + final long byteCount; + final long hash; + + public BlobMeta(byte[] streamStoreId, int tableId, long byteCount, long hash) { + this.streamStoreId = streamStoreId; + this.tableId = tableId; + this.byteCount = byteCount; + this.hash = hash; + } + + public static final class Type extends BasicDataType { + public static final Type INSTANCE = new Type(); + + private Type() { + } + + @Override + public int getMemory(BlobMeta blobMeta) { + return blobMeta.streamStoreId.length + 20; + } + + @Override + public void write(WriteBuffer buff, BlobMeta blobMeta) { + buff.putVarInt(blobMeta.streamStoreId.length); + buff.put(blobMeta.streamStoreId); + buff.putVarInt(blobMeta.tableId); + buff.putVarLong(blobMeta.byteCount); + buff.putLong(blobMeta.hash); + } + + @Override + public BlobMeta read(ByteBuffer buff) { + int len = DataUtils.readVarInt(buff); + byte[] streamStoreId = new byte[len]; + buff.get(streamStoreId); + int tableId = DataUtils.readVarInt(buff); + long byteCount = DataUtils.readVarLong(buff); + long hash = buff.getLong(); + return new BlobMeta(streamStoreId, tableId, byteCount, hash); + } + + @Override + public BlobMeta[] createStorage(int size) { + return new BlobMeta[size]; + } + } + } +} diff --git a/h2/src/main/org/h2/mvstore/db/MVDelegateIndex.java b/h2/src/main/org/h2/mvstore/db/MVDelegateIndex.java index 4014e08d51..0cceba0c96 100644 --- a/h2/src/main/org/h2/mvstore/db/MVDelegateIndex.java +++ b/h2/src/main/org/h2/mvstore/db/MVDelegateIndex.java @@ -1,61 +1,70 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.db; import java.util.List; -import org.h2.command.dml.AllColumnsForPlan; -import org.h2.engine.Session; -import org.h2.index.BaseIndex; + +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.SessionLocal; import org.h2.index.Cursor; import org.h2.index.IndexType; import org.h2.message.DbException; +import org.h2.mvstore.MVMap; import org.h2.result.Row; +import org.h2.result.RowFactory; import org.h2.result.SearchRow; import org.h2.result.SortOrder; import org.h2.table.Column; import org.h2.table.IndexColumn; import org.h2.table.TableFilter; -import org.h2.value.ValueLong; +import org.h2.value.VersionedValue; /** * An index that delegates indexing to another index. */ -public class MVDelegateIndex extends BaseIndex implements MVIndex { +public class MVDelegateIndex extends MVIndex { private final MVPrimaryIndex mainIndex; - public MVDelegateIndex(MVTable table, int id, String name, - MVPrimaryIndex mainIndex, - IndexType indexType) { - super(table, id, name, - IndexColumn.wrap(new Column[] { table.getColumn(mainIndex.getMainIndexColumn()) }), - indexType); + public MVDelegateIndex(MVTable table, int id, String name, MVPrimaryIndex mainIndex, IndexType indexType) { + super(table, id, name, IndexColumn.wrap(new Column[] { table.getColumn(mainIndex.getMainIndexColumn()) }), + 1, indexType); this.mainIndex = mainIndex; if (id < 0) { - throw DbException.throwInternalError(name); + throw DbException.getInternalError(name); } } + @Override + public RowFactory getRowFactory() { + return mainIndex.getRowFactory(); + } + @Override public void addRowsToBuffer(List rows, String bufferName) { - throw DbException.throwInternalError(); + throw DbException.getInternalError(); } @Override public void addBufferedRows(List bufferNames) { - throw DbException.throwInternalError(); + throw DbException.getInternalError(); + } + + @Override + public MVMap> getMVMap() { + return mainIndex.getMVMap(); } @Override - public void add(Session session, Row row) { + public void add(SessionLocal session, Row row) { // nothing to do } @Override - public Row getRow(Session session, long key) { + public Row getRow(SessionLocal session, long key) { return mainIndex.getRow(session, key); } @@ -70,21 +79,17 @@ public boolean canGetFirstOrLast() { } @Override - public void close(Session session) { + public void close(SessionLocal session) { // nothing to do } @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - ValueLong min = mainIndex.getKey(first, ValueLong.MIN, ValueLong.MIN); - // ifNull is MIN as well, because the column is never NULL - // so avoid returning all rows (returning one row is OK) - ValueLong max = mainIndex.getKey(last, ValueLong.MAX, ValueLong.MIN); - return mainIndex.find(session, min, max); + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { + return mainIndex.find(session, first, last); } @Override - public Cursor findFirstOrLast(Session session, boolean first) { + public Cursor findFirstOrLast(SessionLocal session, boolean first) { return mainIndex.findFirstOrLast(session, first); } @@ -102,10 +107,10 @@ public boolean isFirstColumn(Column column) { } @Override - public double getCost(Session session, int[] masks, + public double getCost(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, AllColumnsForPlan allColumnsSet) { - return 10 * getCostRangeIndex(masks, mainIndex.getRowCountApproximation(), + return 10 * getCostRangeIndex(masks, mainIndex.getRowCountApproximation(session), filters, filter, sortOrder, true, allColumnsSet); } @@ -115,43 +120,33 @@ public boolean needRebuild() { } @Override - public void remove(Session session, Row row) { + public void remove(SessionLocal session, Row row) { // nothing to do } @Override - public void update(Session session, Row oldRow, Row newRow) { + public void update(SessionLocal session, Row oldRow, Row newRow) { // nothing to do } @Override - public void remove(Session session) { + public void remove(SessionLocal session) { mainIndex.setMainIndexColumn(SearchRow.ROWID_INDEX); } @Override - public void truncate(Session session) { + public void truncate(SessionLocal session) { // nothing to do } @Override - public void checkRename() { - // ok - } - - @Override - public long getRowCount(Session session) { + public long getRowCount(SessionLocal session) { return mainIndex.getRowCount(session); } @Override - public long getRowCountApproximation() { - return mainIndex.getRowCountApproximation(); - } - - @Override - public long getDiskSpaceUsed() { - return 0; + public long getRowCountApproximation(SessionLocal session) { + return mainIndex.getRowCountApproximation(session); } } diff --git a/h2/src/main/org/h2/mvstore/db/MVInDoubtTransaction.java b/h2/src/main/org/h2/mvstore/db/MVInDoubtTransaction.java new file mode 100644 index 0000000000..e8e9c01dae --- /dev/null +++ b/h2/src/main/org/h2/mvstore/db/MVInDoubtTransaction.java @@ -0,0 +1,47 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.db; + +import org.h2.mvstore.MVStore; +import org.h2.mvstore.tx.Transaction; +import org.h2.store.InDoubtTransaction; + +/** + * An in-doubt transaction. + */ +final class MVInDoubtTransaction implements InDoubtTransaction { + + private final MVStore store; + private final Transaction transaction; + private int state = InDoubtTransaction.IN_DOUBT; + + MVInDoubtTransaction(MVStore store, Transaction transaction) { + this.store = store; + this.transaction = transaction; + } + + @Override + public void setState(int state) { + if (state == InDoubtTransaction.COMMIT) { + transaction.commit(); + } else { + transaction.rollback(); + } + store.commit(); + this.state = state; + } + + @Override + public int getState() { + return state; + } + + @Override + public String getTransactionName() { + return transaction.getName(); + } + +} diff --git a/h2/src/main/org/h2/mvstore/db/MVIndex.java b/h2/src/main/org/h2/mvstore/db/MVIndex.java index 157f4f7373..a831e6d9c3 100644 --- a/h2/src/main/org/h2/mvstore/db/MVIndex.java +++ b/h2/src/main/org/h2/mvstore/db/MVIndex.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.db; @@ -8,12 +8,22 @@ import java.util.List; import org.h2.index.Index; +import org.h2.index.IndexType; +import org.h2.mvstore.MVMap; import org.h2.result.Row; +import org.h2.table.IndexColumn; +import org.h2.table.Table; +import org.h2.value.VersionedValue; /** * An index that stores the data in an MVStore. */ -public interface MVIndex extends Index { +public abstract class MVIndex extends Index { + + protected MVIndex(Table newTable, int id, String name, IndexColumn[] newIndexColumns, int uniqueColumnCount, + IndexType newIndexType) { + super(newTable, id, name, newIndexColumns, uniqueColumnCount, newIndexType); + } /** * Add the rows to a temporary storage (not to the index yet). The rows are @@ -22,7 +32,7 @@ public interface MVIndex extends Index { * @param rows the rows * @param bufferName the name of the temporary storage */ - void addRowsToBuffer(List rows, String bufferName); + public abstract void addRowsToBuffer(List rows, String bufferName); /** * Add all the index data from the buffers to the index. The index will @@ -30,6 +40,8 @@ public interface MVIndex extends Index { * * @param bufferNames the names of the temporary storage */ - void addBufferedRows(List bufferNames); + public abstract void addBufferedRows(List bufferNames); + + public abstract MVMap> getMVMap(); } diff --git a/h2/src/main/org/h2/mvstore/db/MVPlainTempResult.java b/h2/src/main/org/h2/mvstore/db/MVPlainTempResult.java index 4e6dbd1835..e00e19e7ce 100644 --- a/h2/src/main/org/h2/mvstore/db/MVPlainTempResult.java +++ b/h2/src/main/org/h2/mvstore/db/MVPlainTempResult.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.db; @@ -11,7 +11,9 @@ import org.h2.mvstore.Cursor; import org.h2.mvstore.MVMap; import org.h2.mvstore.MVMap.Builder; +import org.h2.mvstore.type.LongDataType; import org.h2.result.ResultExternal; +import org.h2.result.RowFactory.DefaultRowFactory; import org.h2.value.Value; import org.h2.value.ValueRow; @@ -49,7 +51,9 @@ private MVPlainTempResult(MVPlainTempResult parent) { } /** - * Creates a new plain temporary result. + * Creates a new plain temporary result. This result does not sort its rows, + * but it can be used in index-sorted queries and it can preserve additional + * columns for WITH TIES processing. * * @param database * database @@ -57,12 +61,17 @@ private MVPlainTempResult(MVPlainTempResult parent) { * column expressions * @param visibleColumnCount * count of visible columns + * @param resultColumnCount + * the number of columns including visible columns and additional + * virtual columns for ORDER BY clause */ - MVPlainTempResult(Database database, Expression[] expressions, int visibleColumnCount) { - super(database, expressions, visibleColumnCount); - ValueDataType valueType = new ValueDataType(database, new int[expressions.length]); - Builder builder = new MVMap.Builder() - .valueType(valueType).singleWriter(); + MVPlainTempResult(Database database, Expression[] expressions, int visibleColumnCount, int resultColumnCount) { + super(database, expressions, visibleColumnCount, resultColumnCount); + ValueDataType valueType = new ValueDataType(database, new int[resultColumnCount]); + valueType.setRowFactory(DefaultRowFactory.INSTANCE.createRowFactory(database, database.getCompareMode(), + database, expressions, null, false)); + Builder builder = new MVMap.Builder().keyType(LongDataType.INSTANCE) + .valueType(valueType).singleWriter(); map = store.openMap("tmp", builder); } @@ -99,11 +108,7 @@ public Value[] next() { return null; } cursor.next(); - Value[] currentRow = cursor.getValue().getList(); - if (hasEnum) { - fixEnum(currentRow); - } - return currentRow; + return cursor.getValue().getList(); } @Override diff --git a/h2/src/main/org/h2/mvstore/db/MVPrimaryIndex.java b/h2/src/main/org/h2/mvstore/db/MVPrimaryIndex.java index cc32853e6b..bf1a576a7f 100644 --- a/h2/src/main/org/h2/mvstore/db/MVPrimaryIndex.java +++ b/h2/src/main/org/h2/mvstore/db/MVPrimaryIndex.java @@ -1,26 +1,27 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.db; -import java.util.AbstractMap; -import java.util.Collections; -import java.util.Iterator; import java.util.List; import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicLong; import org.h2.api.ErrorCode; -import org.h2.command.dml.AllColumnsForPlan; +import org.h2.command.query.AllColumnsForPlan; import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.index.BaseIndex; +import org.h2.engine.SessionLocal; import org.h2.index.Cursor; import org.h2.index.IndexType; +import org.h2.index.SingleRowCursor; import org.h2.message.DbException; +import org.h2.mvstore.MVMap; +import org.h2.mvstore.MVStoreException; import org.h2.mvstore.tx.Transaction; import org.h2.mvstore.tx.TransactionMap; +import org.h2.mvstore.tx.TransactionMap.TMIterator; +import org.h2.mvstore.type.LongDataType; import org.h2.result.Row; import org.h2.result.SearchRow; import org.h2.result.SortOrder; @@ -28,39 +29,35 @@ import org.h2.table.IndexColumn; import org.h2.table.TableFilter; import org.h2.value.Value; -import org.h2.value.ValueArray; -import org.h2.value.ValueLong; +import org.h2.value.ValueLob; import org.h2.value.ValueNull; +import org.h2.value.VersionedValue; /** * A table stored in a MVStore. */ -public class MVPrimaryIndex extends BaseIndex { +public class MVPrimaryIndex extends MVIndex { private final MVTable mvTable; private final String mapName; - private final TransactionMap dataMap; + private final TransactionMap dataMap; private final AtomicLong lastKey = new AtomicLong(); private int mainIndexColumn = SearchRow.ROWID_INDEX; - public MVPrimaryIndex(Database db, MVTable table, int id, - IndexColumn[] columns, IndexType indexType) { - super(table, id, table.getName() + "_DATA", columns, indexType); + public MVPrimaryIndex(Database db, MVTable table, int id, IndexColumn[] columns, IndexType indexType) { + super(table, id, table.getName() + "_DATA", columns, 0, indexType); this.mvTable = table; - int[] sortTypes = new int[columns.length]; - for (int i = 0; i < columns.length; i++) { - sortTypes[i] = SortOrder.ASCENDING; - } - ValueDataType keyType = new ValueDataType(); - ValueDataType valueType = new ValueDataType(db, sortTypes); + RowDataType valueType = table.getRowFactory().getRowDataType(); mapName = "table." + getId(); - assert db.isStarting() || !db.getStore().getMvStore().getMetaMap().containsKey("name." + mapName); Transaction t = mvTable.getTransactionBegin(); - dataMap = t.openMap(mapName, keyType, valueType); + dataMap = t.openMap(mapName, LongDataType.INSTANCE, valueType); dataMap.map.setVolatile(!table.isPersistData() || !indexType.isPersistent()); + if (!db.isStarting()) { + dataMap.clear(); + } t.commit(); - Value k = dataMap.map.lastKey(); // include uncommitted keys as well - lastKey.set(k == null ? 0 : k.getLong()); + Long k = dataMap.map.lastKey(); // include uncommitted keys as well + lastKey.set(k == null ? 0 : k); } @Override @@ -70,7 +67,7 @@ public String getCreateSQL() { @Override public String getPlanSQL() { - return table.getSQL(new StringBuilder(), false).append(".tableScan").toString(); + return table.getSQL(new StringBuilder(), TRACE_SQL_FLAGS).append(".tableScan").toString(); } public void setMainIndexColumn(int mainIndexColumn) { @@ -82,12 +79,12 @@ public int getMainIndexColumn() { } @Override - public void close(Session session) { + public void close(SessionLocal session) { // ok } @Override - public void add(Session session, Row row) { + public void add(SessionLocal session, Row row) { if (mainIndexColumn == SearchRow.ROWID_INDEX) { if (row.getKey() == 0) { row.setKey(lastKey.incrementAndGet()); @@ -100,39 +97,32 @@ public void add(Session session, Row row) { if (mvTable.getContainsLargeObject()) { for (int i = 0, len = row.getColumnCount(); i < len; i++) { Value v = row.getValue(i); - Value v2 = v.copy(database, getId()); - if (v2.isLinkedToTable()) { - session.removeAtCommitStop(v2); - } - if (v != v2) { - row.setValue(i, v2); + if (v instanceof ValueLob) { + ValueLob lob = ((ValueLob) v).copy(database, getId()); + session.removeAtCommitStop(lob); + if (v != lob) { + row.setValue(i, lob); + } } } } - TransactionMap map = getMap(session); + TransactionMap map = getMap(session); long rowKey = row.getKey(); - Value key = ValueLong.get(rowKey); try { - Value oldValue = map.putIfAbsent(key, ValueArray.get(row.getValueList())); - if (oldValue != null) { - StringBuilder builder = new StringBuilder("PRIMARY KEY ON "); - table.getSQL(builder, false); - if (mainIndexColumn >= 0 && mainIndexColumn < indexColumns.length) { - builder.append('('); - indexColumns[mainIndexColumn].getSQL(builder, false).append(')'); - } + Row old = (Row)map.putIfAbsent(rowKey, row); + if (old != null) { int errorCode = ErrorCode.CONCURRENT_UPDATE_1; - if (map.get(key) != null) { + if (map.getImmediate(rowKey) != null || map.getFromSnapshot(rowKey) != null) { // committed errorCode = ErrorCode.DUPLICATE_KEY_1; } - builder.append(' ').append(oldValue); - DbException e = DbException.get(errorCode, builder.toString()); + DbException e = DbException.get(errorCode, + getDuplicatePrimaryKeyMessage(mainIndexColumn).append(' ').append(old).toString()); e.setSource(this); throw e; } - } catch (IllegalStateException e) { + } catch (MVStoreException e) { throw mvTable.convertException(e); } // because it's possible to directly update the key using the _rowid_ @@ -144,30 +134,30 @@ public void add(Session session, Row row) { } @Override - public void remove(Session session, Row row) { + public void remove(SessionLocal session, Row row) { if (mvTable.getContainsLargeObject()) { for (int i = 0, len = row.getColumnCount(); i < len; i++) { Value v = row.getValue(i); - if (v.isLinkedToTable()) { - session.removeAtCommit(v); + if (v instanceof ValueLob) { + session.removeAtCommit((ValueLob) v); } } } - TransactionMap map = getMap(session); + TransactionMap map = getMap(session); try { - Value old = map.remove(ValueLong.get(row.getKey())); - if (old == null) { + Row existing = (Row)map.remove(row.getKey()); + if (existing == null) { StringBuilder builder = new StringBuilder(); - getSQL(builder, false).append(": ").append(row.getKey()); + getSQL(builder, TRACE_SQL_FLAGS).append(": ").append(row.getKey()); throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, builder.toString()); } - } catch (IllegalStateException e) { + } catch (MVStoreException e) { throw mvTable.convertException(e); } } @Override - public void update(Session session, Row oldRow, Row newRow) { + public void update(SessionLocal session, Row oldRow, Row newRow) { if (mainIndexColumn != SearchRow.ROWID_INDEX) { long c = newRow.getValue(mainIndexColumn).getLong(); newRow.setKey(c); @@ -179,30 +169,30 @@ public void update(Session session, Row oldRow, Row newRow) { for (int i = 0, len = oldRow.getColumnCount(); i < len; i++) { Value oldValue = oldRow.getValue(i); Value newValue = newRow.getValue(i); - if(oldValue != newValue) { - if (oldValue.isLinkedToTable()) { - session.removeAtCommit(oldValue); - } - Value v2 = newValue.copy(database, getId()); - if (v2.isLinkedToTable()) { - session.removeAtCommitStop(v2); + if (oldValue != newValue) { + if (oldValue instanceof ValueLob) { + session.removeAtCommit((ValueLob) oldValue); } - if (newValue != v2) { - newRow.setValue(i, v2); + if (newValue instanceof ValueLob) { + ValueLob lob = ((ValueLob) newValue).copy(database, getId()); + session.removeAtCommitStop(lob); + if (newValue != lob) { + newRow.setValue(i, lob); + } } } } } - TransactionMap map = getMap(session); + TransactionMap map = getMap(session); try { - Value existing = map.put(ValueLong.get(key), ValueArray.get(newRow.getValueList())); + Row existing = (Row)map.put(key, newRow); if (existing == null) { StringBuilder builder = new StringBuilder(); - getSQL(builder, false).append(": ").append(key); + getSQL(builder, TRACE_SQL_FLAGS).append(": ").append(key); throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, builder.toString()); } - } catch (IllegalStateException e) { + } catch (MVStoreException e) { throw mvTable.convertException(e); } @@ -221,76 +211,70 @@ public void update(Session session, Row oldRow, Row newRow) { * @param row to lock * @return row object if it exists */ - Row lockRow(Session session, Row row) { - TransactionMap map = getMap(session); + Row lockRow(SessionLocal session, Row row) { + TransactionMap map = getMap(session); long key = row.getKey(); - ValueArray array = (ValueArray) lockRow(map, key); - return array == null ? null : getRow(session, key, array); + return lockRow(map, key); } - private Value lockRow(TransactionMap map, long key) { + private Row lockRow(TransactionMap map, long key) { try { - return map.lock(ValueLong.get(key)); - } catch (IllegalStateException ex) { + return setRowKey((Row) map.lock(key), key); + } catch (MVStoreException ex) { throw mvTable.convertException(ex); } } @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - ValueLong min = extractPKFromRow(first, ValueLong.MIN); - ValueLong max = extractPKFromRow(last, ValueLong.MAX); - TransactionMap map = getMap(session); - return new MVStoreCursor(session, map.entryIterator(min, max)); + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { + long min = extractPKFromRow(first, Long.MIN_VALUE); + long max = extractPKFromRow(last, Long.MAX_VALUE); + return find(session, min, max); } - private ValueLong extractPKFromRow(SearchRow row, ValueLong defaultValue) { - ValueLong result; + private long extractPKFromRow(SearchRow row, long defaultValue) { + long result; if (row == null) { result = defaultValue; } else if (mainIndexColumn == SearchRow.ROWID_INDEX) { - result = ValueLong.get(row.getKey()); + result = row.getKey(); } else { - ValueLong v = (ValueLong) row.getValue(mainIndexColumn); + Value v = row.getValue(mainIndexColumn); if (v == null) { - result = ValueLong.get(row.getKey()); + result = row.getKey(); + } else if (v == ValueNull.INSTANCE) { + result = 0L; } else { - result = v; + result = v.getLong(); } } return result; } + @Override public MVTable getTable() { return mvTable; } @Override - public Row getRow(Session session, long key) { - TransactionMap map = getMap(session); - Value v = map.get(ValueLong.get(key)); - if (v == null) { - throw DbException.get(ErrorCode.ROW_NOT_FOUND_IN_PRIMARY_INDEX, - getSQL(false), String.valueOf(key)); + public Row getRow(SessionLocal session, long key) { + TransactionMap map = getMap(session); + Row row = (Row) map.getFromSnapshot(key); + if (row == null) { + throw DbException.get(ErrorCode.ROW_NOT_FOUND_IN_PRIMARY_INDEX, getTraceSQL(), String.valueOf(key)); } - return getRow(session, key, (ValueArray) v); - } - - private static Row getRow(Session session, long key, ValueArray array) { - Row row = session.createRow(array.getList(), 0); - row.setKey(key); - return row; + return setRowKey(row, key); } @Override - public double getCost(Session session, int[] masks, + public double getCost(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, AllColumnsForPlan allColumnsSet) { try { return 10 * getCostRangeIndex(masks, dataMap.sizeAsLongMax(), filters, filter, sortOrder, true, allColumnsSet); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { throw DbException.get(ErrorCode.OBJECT_CLOSED, e); } } @@ -307,8 +291,8 @@ public boolean isFirstColumn(Column column) { } @Override - public void remove(Session session) { - TransactionMap map = getMap(session); + public void remove(SessionLocal session) { + TransactionMap map = getMap(session); if (!map.isClosed()) { Transaction t = session.getTransaction(); t.removeMap(map); @@ -316,12 +300,11 @@ public void remove(Session session) { } @Override - public void truncate(Session session) { - TransactionMap map = getMap(session); + public void truncate(SessionLocal session) { if (mvTable.getContainsLargeObject()) { database.getLobStorage().removeAllForTable(table.getId()); } - map.clear(); + getMap(session).clear(); } @Override @@ -330,19 +313,10 @@ public boolean canGetFirstOrLast() { } @Override - public Cursor findFirstOrLast(Session session, boolean first) { - TransactionMap map = getMap(session); - ValueLong v = (ValueLong) (first ? map.firstKey() : map.lastKey()); - if (v == null) { - return new MVStoreCursor(session, - Collections.> emptyIterator()); - } - Value value = map.get(v); - Entry e = new AbstractMap.SimpleImmutableEntry(v, value); - List> list = Collections.singletonList(e); - MVStoreCursor c = new MVStoreCursor(session, list.iterator()); - c.next(); - return c; + public Cursor findFirstOrLast(SessionLocal session, boolean first) { + TransactionMap map = getMap(session); + Entry entry = first ? map.firstEntry() : map.lastEntry(); + return new SingleRowCursor(entry != null ? setRowKey((Row) entry.getValue(), entry.getKey()) : null); } @Override @@ -351,9 +325,8 @@ public boolean needRebuild() { } @Override - public long getRowCount(Session session) { - TransactionMap map = getMap(session); - return map.sizeAsLong(); + public long getRowCount(SessionLocal session) { + return getMap(session).sizeAsLong(); } /** @@ -366,7 +339,7 @@ public long getRowCountMax() { } @Override - public long getRowCountApproximation() { + public long getRowCountApproximation(SessionLocal session) { return getRowCountMax(); } @@ -380,42 +353,21 @@ public String getMapName() { } @Override - public void checkRename() { - // ok + public void addRowsToBuffer(List rows, String bufferName) { + throw new UnsupportedOperationException(); } - /** - * Get the key from the row. - * - * @param row the row - * @param ifEmpty the value to use if the row is empty - * @param ifNull the value to use if the column is NULL - * @return the key - */ - ValueLong getKey(SearchRow row, ValueLong ifEmpty, ValueLong ifNull) { - if (row == null) { - return ifEmpty; - } - Value v = row.getValue(mainIndexColumn); - if (v == null) { - throw DbException.throwInternalError(row.toString()); - } else if (v == ValueNull.INSTANCE) { - return ifNull; - } - return (ValueLong) v.convertTo(Value.LONG); + @Override + public void addBufferedRows(List bufferNames) { + throw new UnsupportedOperationException(); } - /** - * Search for a specific row or a set of rows. - * - * @param session the session - * @param first the key of the first row - * @param last the key of the last row - * @return the cursor - */ - Cursor find(Session session, ValueLong first, ValueLong last) { - TransactionMap map = getMap(session); - return new MVStoreCursor(session, map.entryIterator(first, last)); + private Cursor find(SessionLocal session, Long first, Long last) { + TransactionMap map = getMap(session); + if (first != null && last != null && first.longValue() == last.longValue()) { + return new SingleRowCursor(setRowKey((Row) map.getFromSnapshot(first), first)); + } + return new MVStoreCursor(map.entryIterator(first, last)); } @Override @@ -429,7 +381,7 @@ public boolean isRowIdIndex() { * @param session the session * @return the map */ - TransactionMap getMap(Session session) { + TransactionMap getMap(SessionLocal session) { if (session == null) { return dataMap; } @@ -437,18 +389,28 @@ TransactionMap getMap(Session session) { return dataMap.getInstance(t); } + @Override + public MVMap> getMVMap() { + return dataMap.map; + } + + private static Row setRowKey(Row row, long key) { + if (row != null && row.getKey() == 0) { + row.setKey(key); + } + return row; + } + /** * A cursor. */ - static class MVStoreCursor implements Cursor { + static final class MVStoreCursor implements Cursor { - private final Session session; - private final Iterator> it; - private Entry current; + private final TMIterator> it; + private Entry current; private Row row; - public MVStoreCursor(Session session, Iterator> it) { - this.session = session; + public MVStoreCursor(TMIterator> it) { this.it = it; } @@ -456,9 +418,10 @@ public MVStoreCursor(Session session, Iterator> it) { public Row get() { if (row == null) { if (current != null) { - ValueArray array = (ValueArray) current.getValue(); - row = session.createRow(array.getList(), 0); - row.setKey(current.getKey().getLong()); + row = (Row)current.getValue(); + if (row.getKey() == 0) { + row.setKey(current.getKey()); + } } } return row; @@ -471,7 +434,7 @@ public SearchRow getSearchRow() { @Override public boolean next() { - current = it.hasNext() ? it.next() : null; + current = it.fetchNext(); row = null; return current != null; } @@ -480,6 +443,5 @@ public boolean next() { public boolean previous() { throw DbException.getUnsupportedException("previous"); } - } } diff --git a/h2/src/main/org/h2/mvstore/db/MVSecondaryIndex.java b/h2/src/main/org/h2/mvstore/db/MVSecondaryIndex.java index f2134db612..0792c6a17c 100644 --- a/h2/src/main/org/h2/mvstore/db/MVSecondaryIndex.java +++ b/h2/src/main/org/h2/mvstore/db/MVSecondaryIndex.java @@ -1,77 +1,69 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.db; -import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Objects; import java.util.PriorityQueue; import java.util.Queue; import org.h2.api.ErrorCode; -import org.h2.command.dml.AllColumnsForPlan; +import org.h2.command.query.AllColumnsForPlan; import org.h2.engine.Database; -import org.h2.engine.Mode; -import org.h2.engine.Session; -import org.h2.index.BaseIndex; +import org.h2.engine.SessionLocal; import org.h2.index.Cursor; import org.h2.index.IndexType; +import org.h2.index.SingleRowCursor; import org.h2.message.DbException; import org.h2.mvstore.MVMap; import org.h2.mvstore.MVStore; +import org.h2.mvstore.MVStoreException; import org.h2.mvstore.tx.Transaction; import org.h2.mvstore.tx.TransactionMap; +import org.h2.mvstore.tx.TransactionMap.TMIterator; +import org.h2.mvstore.type.DataType; import org.h2.result.Row; +import org.h2.result.RowFactory; import org.h2.result.SearchRow; import org.h2.result.SortOrder; -import org.h2.table.Column; import org.h2.table.IndexColumn; import org.h2.table.TableFilter; -import org.h2.value.CompareMode; import org.h2.value.Value; -import org.h2.value.ValueArray; -import org.h2.value.ValueLong; import org.h2.value.ValueNull; +import org.h2.value.VersionedValue; /** - * A table stored in a MVStore. + * An index stored in a MVStore. */ -public final class MVSecondaryIndex extends BaseIndex implements MVIndex { +public final class MVSecondaryIndex extends MVIndex { /** * The multi-value table. */ - final MVTable mvTable; - private final int keyColumns; - private final TransactionMap dataMap; + private final MVTable mvTable; + private final TransactionMap dataMap; public MVSecondaryIndex(Database db, MVTable table, int id, String indexName, - IndexColumn[] columns, IndexType indexType) { - super(table, id, indexName, columns, indexType); + IndexColumn[] columns, int uniqueColumnCount, IndexType indexType) { + super(table, id, indexName, columns, uniqueColumnCount, indexType); this.mvTable = table; if (!database.isStarting()) { checkIndexColumnTypes(columns); } - // always store the row key in the map key, - // even for unique indexes, as some of the index columns could be null - keyColumns = columns.length + 1; String mapName = "index." + getId(); - assert db.isStarting() || !db.getStore().getMvStore().getMetaMap().containsKey("name." + mapName); - int[] sortTypes = new int[keyColumns]; - for (int i = 0; i < columns.length; i++) { - sortTypes[i] = columns[i].sortType; - } - sortTypes[keyColumns - 1] = SortOrder.ASCENDING; - ValueDataType keyType = new ValueDataType(db, sortTypes); - ValueDataType valueType = new ValueDataType(); + RowDataType keyType = getRowFactory().getRowDataType(); Transaction t = mvTable.getTransactionBegin(); - dataMap = t.openMap(mapName, keyType, valueType); + dataMap = t.openMap(mapName, keyType, NullValueDataType.INSTANCE); dataMap.map.setVolatile(!table.isPersistData() || !indexType.isPersistent()); + if (!db.isStarting()) { + dataMap.clear(); + } t.commit(); if (!keyType.equals(dataMap.getKeyType())) { - throw DbException.throwInternalError( + throw DbException.getInternalError( "Incompatible key type, expected " + keyType + " but got " + dataMap.getKeyType() + " for index " + indexName); } @@ -79,18 +71,22 @@ public MVSecondaryIndex(Database db, MVTable table, int id, String indexName, @Override public void addRowsToBuffer(List rows, String bufferName) { - MVMap map = openMap(bufferName); + MVMap map = openMap(bufferName); for (Row row : rows) { - ValueArray key = convertToKey(row, null); - map.append(key, ValueNull.INSTANCE); + SearchRow r = getRowFactory().createRow(); + r.copyFrom(row); + map.append(r, ValueNull.INSTANCE); } } private static final class Source { - private final Iterator iterator; - ValueArray currentRowData; - public Source(Iterator iterator) { + private final Iterator iterator; + + SearchRow currentRowData; + + public Source(Iterator iterator) { + assert iterator.hasNext(); this.iterator = iterator; this.currentRowData = iterator.next(); } @@ -103,34 +99,32 @@ public boolean hasNext() { return result; } - public ValueArray next() { + public SearchRow next() { return currentRowData; } - public static final class Comparator implements java.util.Comparator { - private final Mode databaseMode; - private final CompareMode compareMode; + static final class Comparator implements java.util.Comparator { - public Comparator(Mode databaseMode, CompareMode compareMode) { - this.databaseMode = databaseMode; - this.compareMode = compareMode; + private final DataType type; + + public Comparator(DataType type) { + this.type = type; } @Override public int compare(Source one, Source two) { - return one.currentRowData.compareTo(two.currentRowData, databaseMode, compareMode); + return type.compare(one.currentRowData, two.currentRowData); } } } @Override public void addBufferedRows(List bufferNames) { - CompareMode compareMode = database.getCompareMode(); int buffersCount = bufferNames.size(); Queue queue = new PriorityQueue<>(buffersCount, - new Source.Comparator(database.getMode(), compareMode)); + new Source.Comparator(getRowFactory().getRowDataType())); for (String bufferName : bufferNames) { - Iterator iter = openMap(bufferName).keyIterator(null); + Iterator iter = openMap(bufferName).keyIterator(null); if (iter.hasNext()) { queue.offer(new Source(iter)); } @@ -139,14 +133,13 @@ public void addBufferedRows(List bufferNames) { try { while (!queue.isEmpty()) { Source s = queue.poll(); - ValueArray rowData = s.next(); - SearchRow row = convertToSearchRow(rowData); + SearchRow row = s.next(); - if (indexType.isUnique() && !mayHaveNullDuplicates(row)) { - checkUnique(dataMap, rowData, Long.MIN_VALUE); + if (uniqueColumnColumn > 0 && !mayHaveNullDuplicates(row)) { + checkUnique(false, dataMap, row, Long.MIN_VALUE); } - dataMap.putCommitted(rowData, ValueNull.INSTANCE); + dataMap.putCommitted(row, ValueNull.INSTANCE); if (s.hasNext()) { queue.offer(s); @@ -160,22 +153,16 @@ public void addBufferedRows(List bufferNames) { } } - private MVMap openMap(String mapName) { - int[] sortTypes = new int[keyColumns]; - for (int i = 0; i < indexColumns.length; i++) { - sortTypes[i] = indexColumns[i].sortType; - } - sortTypes[keyColumns - 1] = SortOrder.ASCENDING; - ValueDataType keyType = new ValueDataType(database, sortTypes); - ValueDataType valueType = new ValueDataType(); - MVMap.Builder builder = - new MVMap.Builder() - .singleWriter() - .keyType(keyType).valueType(valueType); - MVMap map = database.getStore(). - getMvStore().openMap(mapName, builder); + private MVMap openMap(String mapName) { + RowDataType keyType = getRowFactory().getRowDataType(); + MVMap.Builder builder = new MVMap.Builder() + .singleWriter() + .keyType(keyType) + .valueType(NullValueDataType.INSTANCE); + MVMap map = database.getStore().getMvStore() + .openMap(mapName, builder); if (!keyType.equals(map.getKeyType())) { - throw DbException.throwInternalError( + throw DbException.getInternalError( "Incompatible key type, expected " + keyType + " but got " + map.getKeyType() + " for map " + mapName); } @@ -183,41 +170,57 @@ private MVMap openMap(String mapName) { } @Override - public void close(Session session) { + public void close(SessionLocal session) { // ok } @Override - public void add(Session session, Row row) { - TransactionMap map = getMap(session); - ValueArray array = convertToKey(row, null); - boolean checkRequired = indexType.isUnique() && !mayHaveNullDuplicates(row); + public void add(SessionLocal session, Row row) { + TransactionMap map = getMap(session); + SearchRow key = convertToKey(row, null); + boolean checkRequired = uniqueColumnColumn > 0 && !mayHaveNullDuplicates(row); if (checkRequired) { - checkUnique(map, array, Long.MIN_VALUE); + boolean repeatableRead = !session.getTransaction().allowNonRepeatableRead(); + checkUnique(repeatableRead, map, row, Long.MIN_VALUE); } try { - map.put(array, ValueNull.INSTANCE); - } catch (IllegalStateException e) { + map.put(key, ValueNull.INSTANCE); + } catch (MVStoreException e) { throw mvTable.convertException(e); } if (checkRequired) { - checkUnique(map, array, row.getKey()); + checkUnique(false, map, row, row.getKey()); } } - private void checkUnique(TransactionMap map, ValueArray row, long newKey) { - Iterator it = map.keyIterator(convertToKey(row, ValueLong.MIN), convertToKey(row, ValueLong.MAX), true); - while (it.hasNext()) { - ValueArray rowData = (ValueArray)it.next(); - Value[] array = rowData.getList(); - Value rowKey = array[array.length - 1]; - long rowId = rowKey.getLong(); - if (newKey != rowId) { - if (map.get(rowData) != null) { + private void checkUnique(boolean repeatableRead, TransactionMap map, SearchRow row, + long newKey) { + RowFactory uniqueRowFactory = getUniqueRowFactory(); + SearchRow from = uniqueRowFactory.createRow(); + from.copyFrom(row); + from.setKey(Long.MIN_VALUE); + SearchRow to = uniqueRowFactory.createRow(); + to.copyFrom(row); + to.setKey(Long.MAX_VALUE); + if (repeatableRead) { + // In order to guarantee repeatable reads, snapshot taken at the beginning of the statement or transaction + // need to be checked additionally, because existence of the key should be accounted for, + // even if since then, it was already deleted by another (possibly committed) transaction. + TMIterator it = map.keyIterator(from, to); + for (SearchRow k; (k = it.fetchNext()) != null;) { + if (newKey != k.getKey() && !map.isDeletedByCurrentTransaction(k)) { + throw getDuplicateKeyException(k.toString()); + } + } + } + TMIterator it = map.keyIteratorUncommitted(from, to); + for (SearchRow k; (k = it.fetchNext()) != null;) { + if (newKey != k.getKey()) { + if (map.getImmediate(k) != null) { // committed - throw getDuplicateKeyException(rowKey.toString()); + throw getDuplicateKeyException(k.toString()); } throw DbException.get(ErrorCode.CONCURRENT_UPDATE_1, table.getName()); } @@ -225,24 +228,25 @@ private void checkUnique(TransactionMap map, ValueArray row, long } @Override - public void remove(Session session, Row row) { - ValueArray array = convertToKey(row, null); - TransactionMap map = getMap(session); + public void remove(SessionLocal session, Row row) { + SearchRow searchRow = convertToKey(row, null); + TransactionMap map = getMap(session); try { - Value old = map.remove(array); - if (old == null) { + if (map.remove(searchRow) == null) { StringBuilder builder = new StringBuilder(); - getSQL(builder, false).append(": ").append(row.getKey()); + getSQL(builder, TRACE_SQL_FLAGS).append(": ").append(row.getKey()); throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, builder.toString()); } - } catch (IllegalStateException e) { + } catch (MVStoreException e) { throw mvTable.convertException(e); } } @Override - public void update(Session session, Row oldRow, Row newRow) { - if (!rowsAreEqual(oldRow, newRow)) { + public void update(SessionLocal session, Row oldRow, Row newRow) { + SearchRow searchRowOld = convertToKey(oldRow, null); + SearchRow searchRowNew = convertToKey(newRow, null); + if (!rowsAreEqual(searchRowOld, searchRowNew)) { super.update(session, oldRow, newRow); } } @@ -254,7 +258,7 @@ private boolean rowsAreEqual(SearchRow rowOne, SearchRow rowTwo) { for (int index : columnIds) { Value v1 = rowOne.getValue(index); Value v2 = rowTwo.getValue(index); - if (v1 == null ? v2 != null : !v1.equals(v2)) { + if (!Objects.equals(v1, v2)) { return false; } } @@ -262,58 +266,27 @@ private boolean rowsAreEqual(SearchRow rowOne, SearchRow rowTwo) { } @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { return find(session, first, false, last); } - private Cursor find(Session session, SearchRow first, boolean bigger, SearchRow last) { - ValueArray min = convertToKey(first, bigger ? ValueLong.MAX : ValueLong.MIN); - ValueArray max = convertToKey(last, ValueLong.MAX); - TransactionMap map = getMap(session); - return new MVStoreCursor(session, map.keyIterator(min, max, false)); - } - - private static ValueArray convertToKey(ValueArray r, ValueLong key) { - Value[] values = r.getList().clone(); - values[values.length - 1] = key; - return ValueArray.get(values); + private Cursor find(SessionLocal session, SearchRow first, boolean bigger, SearchRow last) { + SearchRow min = convertToKey(first, bigger); + SearchRow max = convertToKey(last, Boolean.TRUE); + return new MVStoreCursor(session, getMap(session).keyIterator(min, max), mvTable); } - private ValueArray convertToKey(SearchRow r, ValueLong key) { + private SearchRow convertToKey(SearchRow r, Boolean minMax) { if (r == null) { return null; } - Value[] array = new Value[keyColumns]; - for (int i = 0; i < columns.length; i++) { - Column c = columns[i]; - int idx = c.getColumnId(); - Value v = r.getValue(idx); - if (v != null) { - array[i] = v.convertTo(c.getType(), database.getMode(), null); - } - } - array[keyColumns - 1] = key != null ? key : ValueLong.get(r.getKey()); - return ValueArray.get(array); - } - /** - * Convert array of values to a SearchRow. - * - * @param key the index key - * @return the row - */ - SearchRow convertToSearchRow(ValueArray key) { - Value[] array = key.getList(); - SearchRow searchRow = mvTable.getTemplateRow(); - searchRow.setKey((array[array.length - 1]).getLong()); - Column[] cols = getColumns(); - for (int i = 0; i < array.length - 1; i++) { - Column c = cols[i]; - int idx = c.getColumnId(); - Value v = array[i]; - searchRow.setValue(idx, v); - } - return searchRow; + SearchRow row = getRowFactory().createRow(); + row.copyFrom(r); + if (minMax != null) { + row.setKey(minMax ? Long.MAX_VALUE : Long.MIN_VALUE); + } + return row; } @Override @@ -322,20 +295,20 @@ public MVTable getTable() { } @Override - public double getCost(Session session, int[] masks, + public double getCost(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, AllColumnsForPlan allColumnsSet) { try { return 10 * getCostRangeIndex(masks, dataMap.sizeAsLongMax(), filters, filter, sortOrder, false, allColumnsSet); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { throw DbException.get(ErrorCode.OBJECT_CLOSED, e); } } @Override - public void remove(Session session) { - TransactionMap map = getMap(session); + public void remove(SessionLocal session) { + TransactionMap map = getMap(session); if (!map.isClosed()) { Transaction t = session.getTransaction(); t.removeMap(map); @@ -343,8 +316,8 @@ public void remove(Session session) { } @Override - public void truncate(Session session) { - TransactionMap map = getMap(session); + public void truncate(SessionLocal session) { + TransactionMap map = getMap(session); map.clear(); } @@ -354,45 +327,36 @@ public boolean canGetFirstOrLast() { } @Override - public Cursor findFirstOrLast(Session session, boolean first) { - TransactionMap map = getMap(session); - Value key = first ? map.firstKey() : map.lastKey(); - while (true) { - if (key == null) { - return new MVStoreCursor(session, - Collections.emptyIterator()); - } - if (((ValueArray) key).getList()[0] != ValueNull.INSTANCE) { - break; + public Cursor findFirstOrLast(SessionLocal session, boolean first) { + TMIterator iter = getMap(session).keyIterator(null, !first); + for (SearchRow key; (key = iter.fetchNext()) != null;) { + if (key.getValue(columnIds[0]) != ValueNull.INSTANCE) { + return new SingleRowCursor(mvTable.getRow(session, key.getKey())); } - key = first ? map.higherKey(key) : map.lowerKey(key); } - MVStoreCursor cursor = new MVStoreCursor(session, - Collections.singletonList(key).iterator()); - cursor.next(); - return cursor; + return new SingleRowCursor(null); } @Override public boolean needRebuild() { try { return dataMap.sizeAsLongMax() == 0; - } catch (IllegalStateException e) { + } catch (MVStoreException e) { throw DbException.get(ErrorCode.OBJECT_CLOSED, e); } } @Override - public long getRowCount(Session session) { - TransactionMap map = getMap(session); + public long getRowCount(SessionLocal session) { + TransactionMap map = getMap(session); return map.sizeAsLong(); } @Override - public long getRowCountApproximation() { + public long getRowCountApproximation(SessionLocal session) { try { return dataMap.sizeAsLongMax(); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { throw DbException.get(ErrorCode.OBJECT_CLOSED, e); } } @@ -409,22 +373,17 @@ public boolean canFindNext() { } @Override - public Cursor findNext(Session session, SearchRow higherThan, SearchRow last) { + public Cursor findNext(SessionLocal session, SearchRow higherThan, SearchRow last) { return find(session, higherThan, true, last); } - @Override - public void checkRename() { - // ok - } - /** * Get the map to store the data. * * @param session the session * @return the map */ - private TransactionMap getMap(Session session) { + private TransactionMap getMap(SessionLocal session) { if (session == null) { return dataMap; } @@ -432,27 +391,34 @@ private TransactionMap getMap(Session session) { return dataMap.getInstance(t); } + @Override + public MVMap> getMVMap() { + return dataMap.map; + } + /** * A cursor. */ - final class MVStoreCursor implements Cursor { + static final class MVStoreCursor implements Cursor { - private final Session session; - private final Iterator it; - private ValueArray current; - private Row row; + private final SessionLocal session; + private final TMIterator it; + private final MVTable mvTable; + private SearchRow current; + private Row row; - MVStoreCursor(Session session, Iterator it) { + MVStoreCursor(SessionLocal session, TMIterator it, MVTable mvTable) { this.session = session; this.it = it; + this.mvTable = mvTable; } @Override public Row get() { if (row == null) { - if (current != null) { - Value[] values = current.getList(); - row = mvTable.getRow(session, values[values.length - 1].getLong()); + SearchRow r = getSearchRow(); + if (r != null) { + row = mvTable.getRow(session, r.getKey()); } } return row; @@ -460,12 +426,12 @@ public Row get() { @Override public SearchRow getSearchRow() { - return current == null ? null : convertToSearchRow(current); + return current; } @Override public boolean next() { - current = it.hasNext() ? (ValueArray)it.next() : null; + current = it.fetchNext(); row = null; return current != null; } diff --git a/h2/src/main/org/h2/mvstore/db/MVSortedTempResult.java b/h2/src/main/org/h2/mvstore/db/MVSortedTempResult.java index f37f6702f9..17579c9479 100644 --- a/h2/src/main/org/h2/mvstore/db/MVSortedTempResult.java +++ b/h2/src/main/org/h2/mvstore/db/MVSortedTempResult.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.db; @@ -14,9 +14,14 @@ import org.h2.mvstore.Cursor; import org.h2.mvstore.MVMap; import org.h2.mvstore.MVMap.Builder; +import org.h2.mvstore.type.DataType; +import org.h2.mvstore.type.LongDataType; import org.h2.result.ResultExternal; +import org.h2.result.RowFactory.DefaultRowFactory; import org.h2.result.SortOrder; +import org.h2.value.TypeInfo; import org.h2.value.Value; +import org.h2.value.ValueNull; import org.h2.value.ValueRow; /** @@ -56,7 +61,7 @@ class MVSortedTempResult extends MVTempResult { * {@link #contains(Value[])} method is invoked. Only the root result should * have an index if required. */ - private MVMap index; + private MVMap index; /** * Used for DISTINCT ON in presence of ORDER BY. @@ -108,24 +113,26 @@ private MVSortedTempResult(MVSortedTempResult parent) { * indexes of distinct columns for DISTINCT ON results * @param visibleColumnCount * count of visible columns + * @param resultColumnCount + * the number of columns including visible columns and additional + * virtual columns for ORDER BY and DISTINCT ON clauses * @param sort * sort order, or {@code null} if this result does not need any * sorting */ MVSortedTempResult(Database database, Expression[] expressions, boolean distinct, int[] distinctIndexes, - int visibleColumnCount, SortOrder sort) { - super(database, expressions, visibleColumnCount); + int visibleColumnCount, int resultColumnCount, SortOrder sort) { + super(database, expressions, visibleColumnCount, resultColumnCount); this.distinct = distinct; this.distinctIndexes = distinctIndexes; - int length = expressions.length; - int[] sortTypes = new int[length]; + int[] sortTypes = new int[resultColumnCount]; int[] indexes; if (sort != null) { /* * If sorting is specified we need to reorder columns in requested order and set * sort types (ASC, DESC etc) for them properly. */ - indexes = new int[length]; + indexes = new int[resultColumnCount]; int[] colIndex = sort.getQueryColumnIndexes(); int len = colIndex.length; // This set is used to remember columns that are already included @@ -143,7 +150,7 @@ private MVSortedTempResult(MVSortedTempResult parent) { * order (ASC / 0) will be used for them. */ int idx = 0; - for (int i = len; i < length; i++) { + for (int i = len; i < resultColumnCount; i++) { idx = used.nextClearBit(idx); indexes[i] = idx; idx++; @@ -154,7 +161,7 @@ private MVSortedTempResult(MVSortedTempResult parent) { * reordered or have the same order. */ sameOrder: { - for (int i = 0; i < length; i++) { + for (int i = 0; i < resultColumnCount; i++) { if (indexes[i] != i) { // Columns are reordered break sameOrder; @@ -171,17 +178,49 @@ private MVSortedTempResult(MVSortedTempResult parent) { indexes = null; } this.indexes = indexes; - ValueDataType keyType = new ValueDataType(database, sortTypes); - Builder builder = new MVMap.Builder().keyType(keyType); + ValueDataType keyType = new ValueDataType(database, SortOrder.addNullOrdering(database, sortTypes)); + if (indexes != null) { + int l = indexes.length; + TypeInfo[] types = new TypeInfo[l]; + for (int i = 0; i < l; i++) { + types[i] = expressions[indexes[i]].getType(); + } + keyType.setRowFactory(DefaultRowFactory.INSTANCE.createRowFactory(database, database.getCompareMode(), + database, types, null, false)); + } else { + keyType.setRowFactory(DefaultRowFactory.INSTANCE.createRowFactory(database, database.getCompareMode(), + database, expressions, null, false)); + } + Builder builder = new MVMap.Builder().keyType(keyType) + .valueType(LongDataType.INSTANCE); map = store.openMap("tmp", builder); - if (distinct && length != visibleColumnCount || distinctIndexes != null) { - int count = distinctIndexes != null ? distinctIndexes.length : visibleColumnCount; + if (distinct && resultColumnCount != visibleColumnCount || distinctIndexes != null) { + int count; + TypeInfo[] types; + if (distinctIndexes != null) { + count = distinctIndexes.length; + types = new TypeInfo[count]; + for (int i = 0; i < count; i++) { + types[i] = expressions[distinctIndexes[i]].getType(); + } + } else { + count = visibleColumnCount; + types = new TypeInfo[count]; + for (int i = 0; i < count; i++) { + types[i] = expressions[i].getType(); + } + } ValueDataType distinctType = new ValueDataType(database, new int[count]); - Builder indexBuilder = new MVMap.Builder().keyType(distinctType); + distinctType.setRowFactory(DefaultRowFactory.INSTANCE.createRowFactory(database, database.getCompareMode(), + database, types, null, false)); + DataType distinctValueType; if (distinctIndexes != null && sort != null) { - indexBuilder.valueType(keyType); - orderedDistinctOnType = keyType; + distinctValueType = orderedDistinctOnType = keyType; + } else { + distinctValueType = NullValueDataType.INSTANCE; } + Builder indexBuilder = new MVMap.Builder().keyType(distinctType) + .valueType(distinctValueType); index = store.openMap("idx", indexBuilder); } } @@ -199,7 +238,7 @@ public int addRow(Value[] values) { } ValueRow distinctRow = ValueRow.get(newValues); if (orderedDistinctOnType == null) { - if (index.putIfAbsent(distinctRow, true) != null) { + if (index.putIfAbsent(distinctRow, ValueNull.INSTANCE) != null) { return rowCount; } } else { @@ -214,9 +253,9 @@ public int addRow(Value[] values) { return rowCount; } } - } else if (expressions.length != visibleColumnCount) { + } else if (visibleColumnCount != resultColumnCount) { ValueRow distinctRow = ValueRow.get(Arrays.copyOf(values, visibleColumnCount)); - if (index.putIfAbsent(distinctRow, true) != null) { + if (index.putIfAbsent(distinctRow, ValueNull.INSTANCE) != null) { return rowCount; } } @@ -243,7 +282,7 @@ public boolean contains(Value[] values) { return parent.contains(values); } assert distinct; - if (expressions.length != visibleColumnCount) { + if (visibleColumnCount != resultColumnCount) { return index.containsKey(ValueRow.get(values)); } return map.containsKey(getKey(values)); @@ -319,9 +358,6 @@ public Value[] next() { } // Read the next row current = getValue(cursor.next().getList()); - if (hasEnum) { - fixEnum(current); - } /* * If valueCount is greater than 1 that is possible for non-distinct results the * following invocations of next() will use this.current and this.valueCount. @@ -333,7 +369,7 @@ public Value[] next() { @Override public int removeRow(Value[] values) { assert parent == null && distinct; - if (expressions.length != visibleColumnCount) { + if (visibleColumnCount != resultColumnCount) { throw DbException.getUnsupportedException("removeRow()"); } // If an entry was removed decrement the counter diff --git a/h2/src/main/org/h2/mvstore/db/MVSpatialIndex.java b/h2/src/main/org/h2/mvstore/db/MVSpatialIndex.java index a965882b23..5d07ec7607 100644 --- a/h2/src/main/org/h2/mvstore/db/MVSpatialIndex.java +++ b/h2/src/main/org/h2/mvstore/db/MVSpatialIndex.java @@ -1,10 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.db; +import org.h2.mvstore.rtree.Spatial; import static org.h2.util.geometry.GeometryUtils.MAX_X; import static org.h2.util.geometry.GeometryUtils.MAX_Y; import static org.h2.util.geometry.GeometryUtils.MIN_X; @@ -13,32 +14,32 @@ import java.util.Iterator; import java.util.List; import org.h2.api.ErrorCode; -import org.h2.command.dml.AllColumnsForPlan; +import org.h2.command.query.AllColumnsForPlan; import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.index.BaseIndex; +import org.h2.engine.SessionLocal; import org.h2.index.Cursor; +import org.h2.index.IndexCondition; import org.h2.index.IndexType; import org.h2.index.SpatialIndex; -import org.h2.index.SpatialTreeIndex; import org.h2.message.DbException; +import org.h2.mvstore.MVMap; +import org.h2.mvstore.MVStoreException; import org.h2.mvstore.Page; import org.h2.mvstore.rtree.MVRTreeMap; import org.h2.mvstore.rtree.MVRTreeMap.RTreeCursor; -import org.h2.mvstore.rtree.SpatialKey; import org.h2.mvstore.tx.Transaction; import org.h2.mvstore.tx.TransactionMap; import org.h2.mvstore.tx.VersionedValueType; -import org.h2.value.VersionedValue; import org.h2.result.Row; import org.h2.result.SearchRow; import org.h2.result.SortOrder; +import org.h2.table.Column; import org.h2.table.IndexColumn; import org.h2.table.TableFilter; import org.h2.value.Value; import org.h2.value.ValueGeometry; -import org.h2.value.ValueLong; import org.h2.value.ValueNull; +import org.h2.value.VersionedValue; /** * This is an index based on a MVRTreeMap. @@ -47,15 +48,15 @@ * @author Noel Grandin * @author Nicolas Fortin, Atelier SIG, IRSTV FR CNRS 24888 */ -public class MVSpatialIndex extends BaseIndex implements SpatialIndex, MVIndex { +public class MVSpatialIndex extends MVIndex implements SpatialIndex { /** * The multi-value table. */ final MVTable mvTable; - private final TransactionMap dataMap; - private final MVRTreeMap spatialMap; + private final TransactionMap dataMap; + private final MVRTreeMap> spatialMap; /** * Constructor. @@ -65,12 +66,12 @@ public class MVSpatialIndex extends BaseIndex implements SpatialIndex, MVIndex { * @param id the index id * @param indexName the index name * @param columns the indexed columns (only one geometry column allowed) + * @param uniqueColumnCount count of unique columns (0 or 1) * @param indexType the index type (only spatial index) */ - public MVSpatialIndex( - Database db, MVTable table, int id, String indexName, - IndexColumn[] columns, IndexType indexType) { - super(table, id, indexName, columns, indexType); + public MVSpatialIndex(Database db, MVTable table, int id, String indexName, IndexColumn[] columns, + int uniqueColumnCount, IndexType indexType) { + super(table, id, indexName, columns, uniqueColumnCount, indexType); if (columns.length != 1) { throw DbException.getUnsupportedException( "Can only index one column"); @@ -98,70 +99,69 @@ public MVSpatialIndex( checkIndexColumnTypes(columns); } String mapName = "index." + getId(); - ValueDataType vt = new ValueDataType(db, null); - VersionedValueType valueType = new VersionedValueType(vt); - MVRTreeMap.Builder mapBuilder = - new MVRTreeMap.Builder(). + VersionedValueType valueType = new VersionedValueType<>(NullValueDataType.INSTANCE); + MVRTreeMap.Builder> mapBuilder = + new MVRTreeMap.Builder>(). valueType(valueType); spatialMap = db.getStore().getMvStore().openMap(mapName, mapBuilder); Transaction t = mvTable.getTransactionBegin(); - dataMap = t.openMap(spatialMap); + dataMap = t.openMapX(spatialMap); dataMap.map.setVolatile(!table.isPersistData() || !indexType.isPersistent()); t.commit(); } @Override public void addRowsToBuffer(List rows, String bufferName) { - throw DbException.throwInternalError(); + throw DbException.getInternalError(); } @Override public void addBufferedRows(List bufferNames) { - throw DbException.throwInternalError(); + throw DbException.getInternalError(); } @Override - public void close(Session session) { + public void close(SessionLocal session) { // ok } @Override - public void add(Session session, Row row) { - TransactionMap map = getMap(session); + public void add(SessionLocal session, Row row) { + TransactionMap map = getMap(session); SpatialKey key = getKey(row); if (key.isNull()) { return; } - if (indexType.isUnique()) { + if (uniqueColumnColumn > 0) { // this will detect committed entries only - RTreeCursor cursor = spatialMap.findContainedKeys(key); - Iterator it = map.wrapIterator(cursor, false); + RTreeCursor> cursor = spatialMap.findContainedKeys(key); + Iterator it = new SpatialKeyIterator(map, cursor, false); while (it.hasNext()) { - SpatialKey k = it.next(); + Spatial k = it.next(); if (k.equalsIgnoringId(key)) { throw getDuplicateKeyException(key.toString()); } } } try { - map.put(key, ValueLong.get(0)); - } catch (IllegalStateException e) { + map.put(key, ValueNull.INSTANCE); + } catch (MVStoreException e) { throw mvTable.convertException(e); } - if (indexType.isUnique()) { + if (uniqueColumnColumn > 0) { // check if there is another (uncommitted) entry - RTreeCursor cursor = spatialMap.findContainedKeys(key); - Iterator it = map.wrapIterator(cursor, true); + RTreeCursor> cursor = spatialMap.findContainedKeys(key); + Iterator it = new SpatialKeyIterator(map, cursor, true); while (it.hasNext()) { - SpatialKey k = it.next(); + Spatial k = it.next(); if (k.equalsIgnoringId(key)) { if (map.isSameTransaction(k)) { continue; } map.remove(key); - if (map.get(k) != null) { + if (map.getImmediate(k) != null) { // committed throw getDuplicateKeyException(k.toString()); } @@ -172,54 +172,43 @@ public void add(Session session, Row row) { } @Override - public void remove(Session session, Row row) { + public void remove(SessionLocal session, Row row) { SpatialKey key = getKey(row); if (key.isNull()) { return; } - TransactionMap map = getMap(session); + TransactionMap map = getMap(session); try { Value old = map.remove(key); if (old == null) { StringBuilder builder = new StringBuilder(); - getSQL(builder, false).append(": ").append(row.getKey()); + getSQL(builder, TRACE_SQL_FLAGS).append(": ").append(row.getKey()); throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, builder.toString()); } - } catch (IllegalStateException e) { + } catch (MVStoreException e) { throw mvTable.convertException(e); } } @Override - public Cursor find(TableFilter filter, SearchRow first, SearchRow last) { - return find(filter.getSession()); - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - return find(session); - } - - private Cursor find(Session session) { - Iterator cursor = spatialMap.keyIterator(null); - TransactionMap map = getMap(session); - Iterator it = map.wrapIterator(cursor, false); + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { + Iterator cursor = spatialMap.keyIterator(null); + TransactionMap map = getMap(session); + Iterator it = new SpatialKeyIterator(map, cursor, false); return new MVStoreCursor(session, it, mvTable); } @Override - public Cursor findByGeometry(TableFilter filter, SearchRow first, - SearchRow last, SearchRow intersection) { - Session session = filter.getSession(); + public Cursor findByGeometry(SessionLocal session, SearchRow first, SearchRow last, SearchRow intersection) { if (intersection == null) { return find(session, first, last); } - Iterator cursor = + Iterator cursor = spatialMap.findIntersectingKeys(getKey(intersection)); - TransactionMap map = getMap(session); - Iterator it = map.wrapIterator(cursor, false); + TransactionMap map = getMap(session); + Iterator it = new SpatialKeyIterator(map, cursor, false); return new MVStoreCursor(session, it, mvTable); } @@ -229,7 +218,7 @@ public Cursor findByGeometry(TableFilter filter, SearchRow first, * @param session the session * @return the minimum bounding box that encloses all keys, or null */ - public Value getBounds(Session session) { + public Value getBounds(SessionLocal session) { FindBoundsCursor cursor = new FindBoundsCursor(spatialMap.getRootPage(), new SpatialKey(0), session, getMap(session), columnIds[0]); while (cursor.hasNext()) { @@ -246,14 +235,14 @@ public Value getBounds(Session session) { * @param session the session * @return the estimated minimum bounding box that encloses all keys, or null */ - public Value getEstimatedBounds(Session session) { - Page p = spatialMap.getRootPage(); + public Value getEstimatedBounds(SessionLocal session) { + Page> p = spatialMap.getRootPage(); int count = p.getKeyCount(); if (count > 0) { - SpatialKey key = (SpatialKey) p.getKey(0); + Spatial key = p.getKey(0); float bminxf = key.min(0), bmaxxf = key.max(0), bminyf = key.min(1), bmaxyf = key.max(1); for (int i = 1; i < count; i++) { - key = (SpatialKey) p.getKey(i); + key = p.getKey(i); float minxf = key.min(0), maxxf = key.max(0), minyf = key.min(1), maxyf = key.max(1); if (minxf < bminxf) { bminxf = minxf; @@ -276,8 +265,7 @@ public Value getEstimatedBounds(Session session) { private SpatialKey getKey(SearchRow row) { Value v = row.getValue(columnIds[0]); double[] env; - if (v == ValueNull.INSTANCE || - (env = ((ValueGeometry) v.convertTo(Value.GEOMETRY)).getEnvelopeNoCopy()) == null) { + if (v == ValueNull.INSTANCE || (env = v.convertToGeometry(null).getEnvelopeNoCopy()) == null) { return new SpatialKey(row.getKey()); } return new SpatialKey(row.getKey(), @@ -291,15 +279,36 @@ public MVTable getTable() { } @Override - public double getCost(Session session, int[] masks, TableFilter[] filters, + public double getCost(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, AllColumnsForPlan allColumnsSet) { - return SpatialTreeIndex.getCostRangeIndex(masks, columns); + return getCostRangeIndex(masks, columns); + } + + /** + * Compute spatial index cost + * @param masks Search mask + * @param columns Table columns + * @return Index cost hint + */ + public static long getCostRangeIndex(int[] masks, Column[] columns) { + // Never use spatial tree index without spatial filter + if (columns.length == 0) { + return Long.MAX_VALUE; + } + for (Column column : columns) { + int index = column.getColumnId(); + int mask = masks[index]; + if ((mask & IndexCondition.SPATIAL_INTERSECTS) != IndexCondition.SPATIAL_INTERSECTS) { + return Long.MAX_VALUE; + } + } + return 2; } @Override - public void remove(Session session) { - TransactionMap map = getMap(session); + public void remove(SessionLocal session) { + TransactionMap map = getMap(session); if (!map.isClosed()) { Transaction t = session.getTransaction(); t.removeMap(map); @@ -307,45 +316,31 @@ public void remove(Session session) { } @Override - public void truncate(Session session) { - TransactionMap map = getMap(session); + public void truncate(SessionLocal session) { + TransactionMap map = getMap(session); map.clear(); } - @Override - public boolean canGetFirstOrLast() { - return true; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - if (!first) { - throw DbException.throwInternalError( - "Spatial Index can only be fetch in ascending order"); - } - return find(session); - } - @Override public boolean needRebuild() { try { return dataMap.sizeAsLongMax() == 0; - } catch (IllegalStateException e) { + } catch (MVStoreException e) { throw DbException.get(ErrorCode.OBJECT_CLOSED, e); } } @Override - public long getRowCount(Session session) { - TransactionMap map = getMap(session); + public long getRowCount(SessionLocal session) { + TransactionMap map = getMap(session); return map.sizeAsLong(); } @Override - public long getRowCountApproximation() { + public long getRowCountApproximation(SessionLocal session) { try { return dataMap.sizeAsLongMax(); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { throw DbException.get(ErrorCode.OBJECT_CLOSED, e); } } @@ -356,18 +351,13 @@ public long getDiskSpaceUsed() { return 0; } - @Override - public void checkRename() { - // ok - } - /** * Get the map to store the data. * * @param session the session * @return the map */ - private TransactionMap getMap(Session session) { + private TransactionMap getMap(SessionLocal session) { if (session == null) { return dataMap; } @@ -375,19 +365,25 @@ private TransactionMap getMap(Session session) { return dataMap.getInstance(t); } + @Override + public MVMap> getMVMap() { + return dataMap.map; + } + + /** * A cursor. */ - public static class MVStoreCursor implements Cursor { + private static class MVStoreCursor implements Cursor { - private final Session session; - private final Iterator it; + private final SessionLocal session; + private final Iterator it; private final MVTable mvTable; - private SpatialKey current; + private Spatial current; private SearchRow searchRow; private Row row; - public MVStoreCursor(Session session, Iterator it, MVTable mvTable) { + MVStoreCursor(SessionLocal session, Iterator it, MVTable mvTable) { this.session = session; this.it = it; this.mvTable = mvTable; @@ -415,15 +411,6 @@ public SearchRow getSearchRow() { return searchRow; } - /** - * Returns the current key. - * - * @return the current key - */ - public SpatialKey getKey() { - return current; - } - @Override public boolean next() { current = it.hasNext() ? it.next() : null; @@ -439,14 +426,52 @@ public boolean previous() { } + private static class SpatialKeyIterator implements Iterator { + + private final TransactionMap map; + private final Iterator iterator; + private final boolean includeUncommitted; + private Spatial current; + + SpatialKeyIterator(TransactionMap map, + Iterator iterator, boolean includeUncommitted) { + this.map = map; + this.iterator = iterator; + this.includeUncommitted = includeUncommitted; + fetchNext(); + } + + private void fetchNext() { + while (iterator.hasNext()) { + current = iterator.next(); + if (includeUncommitted || map.containsKey(current)) { + return; + } + } + current = null; + } + + @Override + public boolean hasNext() { + return current != null; + } + + @Override + public Spatial next() { + Spatial result = current; + fetchNext(); + return result; + } + } + /** * A cursor for getBounds() method. */ - private final class FindBoundsCursor extends RTreeCursor { + private final class FindBoundsCursor extends RTreeCursor> { - private final Session session; + private final SessionLocal session; - private final TransactionMap map; + private final TransactionMap map; private final int columnId; @@ -456,8 +481,8 @@ private final class FindBoundsCursor extends RTreeCursor { private double bminxd, bmaxxd, bminyd, bmaxyd; - FindBoundsCursor(Page root, SpatialKey filter, Session session, TransactionMap map, - int columnId) { + FindBoundsCursor(Page> root, Spatial filter, SessionLocal session, + TransactionMap map, int columnId) { super(root, filter); this.session = session; this.map = map; @@ -465,7 +490,7 @@ private final class FindBoundsCursor extends RTreeCursor { } @Override - protected boolean check(boolean leaf, SpatialKey key, SpatialKey test) { + protected boolean check(boolean leaf, Spatial key, Spatial test) { float minxf = key.min(0), maxxf = key.max(0), minyf = key.min(1), maxyf = key.max(1); if (leaf) { if (hasBounds) { diff --git a/h2/src/main/org/h2/mvstore/db/MVTable.java b/h2/src/main/org/h2/mvstore/db/MVTable.java index dcec7ccf1e..65c611845e 100644 --- a/h2/src/main/org/h2/mvstore/db/MVTable.java +++ b/h2/src/main/org/h2/mvstore/db/MVTable.java @@ -1,46 +1,53 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.db; import java.util.ArrayDeque; import java.util.ArrayList; -import java.util.concurrent.TimeUnit; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; - import org.h2.api.DatabaseEventListener; import org.h2.api.ErrorCode; import org.h2.command.ddl.CreateTableData; +import org.h2.constraint.Constraint; +import org.h2.constraint.ConstraintReferential; import org.h2.engine.Constants; -import org.h2.engine.DbObject; -import org.h2.engine.Session; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; import org.h2.engine.SysProperties; import org.h2.index.Cursor; import org.h2.index.Index; import org.h2.index.IndexType; import org.h2.message.DbException; import org.h2.message.Trace; +import org.h2.mode.DefaultNullOrdering; import org.h2.mvstore.DataUtils; -import org.h2.mvstore.db.MVTableEngine.Store; +import org.h2.mvstore.MVStoreException; import org.h2.mvstore.tx.Transaction; import org.h2.mvstore.tx.TransactionStore; import org.h2.result.Row; import org.h2.result.SearchRow; -import org.h2.schema.SchemaObject; +import org.h2.result.SortOrder; import org.h2.table.Column; import org.h2.table.IndexColumn; -import org.h2.table.RegularTable; +import org.h2.table.Table; +import org.h2.table.TableBase; +import org.h2.table.TableType; import org.h2.util.DebuggingThreadLocal; -import org.h2.util.MathUtils; import org.h2.util.Utils; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; /** * A table stored in a MVStore. */ -public class MVTable extends RegularTable { +public class MVTable extends TableBase { /** * The table name this thread is waiting to lock. */ @@ -93,7 +100,26 @@ public String getEventText() { } } - private MVPrimaryIndex primaryIndex; + /** + * Whether the table contains a CLOB or BLOB. + */ + private final boolean containsLargeObject; + + /** + * The session (if any) that has exclusively locked this table. + */ + private volatile SessionLocal lockExclusiveSession; + + /** + * The set of sessions (if any) that have a shared lock on the table. Here + * we are using using a ConcurrentHashMap as a set, as there is no + * ConcurrentHashSet. + */ + private final ConcurrentHashMap lockSharedSessions = new ConcurrentHashMap<>(); + + private Column rowIdColumn; + + private final MVPrimaryIndex primaryIndex; private final ArrayList indexes = Utils.newSmallArrayList(); private final AtomicLong lastModificationId = new AtomicLong(); @@ -101,16 +127,25 @@ public String getEventText() { * The queue of sessions waiting to lock the table. It is a FIFO queue to * prevent starvation, since Java's synchronized locking is biased. */ - private final ArrayDeque waitingSessions = new ArrayDeque<>(); + private final ArrayDeque waitingSessions = new ArrayDeque<>(); private final Trace traceLock; private final AtomicInteger changesUntilAnalyze; private int nextAnalyze; - private final MVTableEngine.Store store; + private final Store store; private final TransactionStore transactionStore; - public MVTable(CreateTableData data, MVTableEngine.Store store) { + public MVTable(CreateTableData data, Store store) { super(data); + this.isHidden = data.isHidden; + boolean b = false; + for (Column col : getColumns()) { + if (DataType.isLargeObject(col.getType().getValueType())) { + b = true; + break; + } + } + containsLargeObject = b; nextAnalyze = database.getSettings().analyzeAuto; changesUntilAnalyze = nextAnalyze <= 0 ? null : new AtomicInteger(nextAnalyze); this.store = store; @@ -127,33 +162,22 @@ public String getMapName() { } @Override - public boolean lock(Session session, boolean exclusive, - boolean forceLockEvenInMvcc) { - int lockMode = database.getLockMode(); - if (lockMode == Constants.LOCK_MODE_OFF) { + public boolean lock(SessionLocal session, int lockType) { + if (database.getLockMode() == Constants.LOCK_MODE_OFF) { + session.registerTableAsUpdated(this); return false; } - if (!forceLockEvenInMvcc) { - // MVCC: update, delete, and insert use a shared lock. - // Select doesn't lock except when using FOR UPDATE and - // the system property h2.selectForUpdateMvcc - // is not enabled - if (exclusive) { - exclusive = false; - } else { - if (lockExclusiveSession == null) { - return false; - } - } + if (lockType == Table.READ_LOCK && lockExclusiveSession == null) { + return false; } if (lockExclusiveSession == session) { return true; } - if (!exclusive && lockSharedSessions.containsKey(session)) { + if (lockType != Table.EXCLUSIVE_LOCK && lockSharedSessions.containsKey(session)) { return true; } - synchronized (getLockSyncObject()) { - if (!exclusive && lockSharedSessions.containsKey(session)) { + synchronized (this) { + if (lockType != Table.EXCLUSIVE_LOCK && lockSharedSessions.containsKey(session)) { return true; } session.setWaitForLock(this, Thread.currentThread()); @@ -162,7 +186,7 @@ public boolean lock(Session session, boolean exclusive, } waitingSessions.addLast(session); try { - doLock1(session, lockMode, exclusive); + doLock1(session, lockType); } finally { session.setWaitForLock(null, null); if (SysProperties.THREAD_DEADLOCK_DETECTOR) { @@ -174,190 +198,148 @@ public boolean lock(Session session, boolean exclusive, return false; } - /** - * The the object on which to synchronize and wait on. For the - * multi-threaded mode, this is this object, but for non-multi-threaded, it - * is the database, as in this case all operations are synchronized on the - * database object. - * - * @return the lock sync object - */ - private Object getLockSyncObject() { - if (database.isMultiThreaded()) { - return this; - } - return database; - } - - private void doLock1(Session session, int lockMode, boolean exclusive) { - traceLock(session, exclusive, TraceLockEvent.TRACE_LOCK_REQUESTING_FOR, NO_EXTRA_INFO); + private void doLock1(SessionLocal session, int lockType) { + traceLock(session, lockType, TraceLockEvent.TRACE_LOCK_REQUESTING_FOR, NO_EXTRA_INFO); // don't get the current time unless necessary - long max = 0; + long max = 0L; boolean checkDeadlock = false; while (true) { // if I'm the next one in the queue - if (waitingSessions.getFirst() == session) { - if (doLock2(session, lockMode, exclusive)) { + if (waitingSessions.getFirst() == session && lockExclusiveSession == null) { + if (doLock2(session, lockType)) { return; } } if (checkDeadlock) { - ArrayList sessions = checkDeadlock(session, null, null); + ArrayList sessions = checkDeadlock(session, null, null); if (sessions != null) { throw DbException.get(ErrorCode.DEADLOCK_1, - getDeadlockDetails(sessions, exclusive)); + getDeadlockDetails(sessions, lockType)); } } else { // check for deadlocks from now on checkDeadlock = true; } long now = System.nanoTime(); - if (max == 0) { + if (max == 0L) { // try at least one more time - max = now + TimeUnit.MILLISECONDS.toNanos(session.getLockTimeout()); - } else if (now >= max) { - traceLock(session, exclusive, - TraceLockEvent.TRACE_LOCK_TIMEOUT_AFTER, NO_EXTRA_INFO+session.getLockTimeout()); + max = Utils.nanoTimePlusMillis(now, session.getLockTimeout()); + } else if (now - max >= 0L) { + traceLock(session, lockType, + TraceLockEvent.TRACE_LOCK_TIMEOUT_AFTER, Integer.toString(session.getLockTimeout())); throw DbException.get(ErrorCode.LOCK_TIMEOUT_1, getName()); } try { - traceLock(session, exclusive, TraceLockEvent.TRACE_LOCK_WAITING_FOR, NO_EXTRA_INFO); - if (database.getLockMode() == Constants.LOCK_MODE_TABLE_GC) { - for (int i = 0; i < 20; i++) { - long free = Runtime.getRuntime().freeMemory(); - System.gc(); - long free2 = Runtime.getRuntime().freeMemory(); - if (free == free2) { - break; - } - } - } + traceLock(session, lockType, TraceLockEvent.TRACE_LOCK_WAITING_FOR, NO_EXTRA_INFO); // don't wait too long so that deadlocks are detected early - long sleep = Math.min(Constants.DEADLOCK_CHECK, - TimeUnit.NANOSECONDS.toMillis(max - now)); + long sleep = Math.min(Constants.DEADLOCK_CHECK, (max - now) / 1_000_000L); if (sleep == 0) { sleep = 1; } - getLockSyncObject().wait(sleep); + wait(sleep); } catch (InterruptedException e) { // ignore } } } - private boolean doLock2(Session session, int lockMode, boolean exclusive) { - if (lockExclusiveSession == null) { - if (exclusive) { - if (lockSharedSessions.isEmpty()) { - traceLock(session, exclusive, TraceLockEvent.TRACE_LOCK_ADDED_FOR, NO_EXTRA_INFO); - session.addLock(this); - lockExclusiveSession = session; - if (SysProperties.THREAD_DEADLOCK_DETECTOR) { - if (EXCLUSIVE_LOCKS.get() == null) { - EXCLUSIVE_LOCKS.set(new ArrayList()); - } - EXCLUSIVE_LOCKS.get().add(getName()); - } - return true; - } else if (lockSharedSessions.size() == 1 && - lockSharedSessions.containsKey(session)) { - traceLock(session, exclusive, TraceLockEvent.TRACE_LOCK_ADD_UPGRADED_FOR, NO_EXTRA_INFO); - lockExclusiveSession = session; - if (SysProperties.THREAD_DEADLOCK_DETECTOR) { - if (EXCLUSIVE_LOCKS.get() == null) { - EXCLUSIVE_LOCKS.set(new ArrayList()); - } - EXCLUSIVE_LOCKS.get().add(getName()); - } - return true; - } + private boolean doLock2(SessionLocal session, int lockType) { + switch (lockType) { + case Table.EXCLUSIVE_LOCK: + int size = lockSharedSessions.size(); + if (size == 0) { + traceLock(session, lockType, TraceLockEvent.TRACE_LOCK_ADDED_FOR, NO_EXTRA_INFO); + session.registerTableAsLocked(this); + } else if (size == 1 && lockSharedSessions.containsKey(session)) { + traceLock(session, lockType, TraceLockEvent.TRACE_LOCK_ADD_UPGRADED_FOR, NO_EXTRA_INFO); } else { - if (lockSharedSessions.putIfAbsent(session, session) == null) { - traceLock(session, exclusive, TraceLockEvent.TRACE_LOCK_OK, NO_EXTRA_INFO); - session.addLock(this); - if (SysProperties.THREAD_DEADLOCK_DETECTOR) { - ArrayList list = SHARED_LOCKS.get(); - if (list == null) { - list = new ArrayList<>(); - SHARED_LOCKS.set(list); - } - list.add(getName()); - } + return false; + } + lockExclusiveSession = session; + if (SysProperties.THREAD_DEADLOCK_DETECTOR) { + addLockToDebugList(EXCLUSIVE_LOCKS); + } + break; + case Table.WRITE_LOCK: + if (lockSharedSessions.putIfAbsent(session, session) == null) { + traceLock(session, lockType, TraceLockEvent.TRACE_LOCK_OK, NO_EXTRA_INFO); + session.registerTableAsLocked(this); + if (SysProperties.THREAD_DEADLOCK_DETECTOR) { + addLockToDebugList(SHARED_LOCKS); } - return true; } } - return false; + return true; } - private void traceLock(Session session, boolean exclusive, TraceLockEvent eventEnum, String extraInfo) { + private void addLockToDebugList(DebuggingThreadLocal> locks) { + ArrayList list = locks.get(); + if (list == null) { + list = new ArrayList<>(); + locks.set(list); + } + list.add(getName()); + } + + private void traceLock(SessionLocal session, int lockType, TraceLockEvent eventEnum, String extraInfo) { if (traceLock.isDebugEnabled()) { traceLock.debug("{0} {1} {2} {3} {4}", session.getId(), - exclusive ? "exclusive write lock" : "shared read lock", eventEnum.getEventText(), + lockTypeToString(lockType), eventEnum.getEventText(), getName(), extraInfo); } } @Override - public void unlock(Session s) { + public void unlock(SessionLocal s) { if (database != null) { - boolean wasLocked = lockExclusiveSession == s; - traceLock(s, wasLocked, TraceLockEvent.TRACE_LOCK_UNLOCK, NO_EXTRA_INFO); - if (wasLocked) { + int lockType; + if (lockExclusiveSession == s) { + lockType = Table.EXCLUSIVE_LOCK; lockSharedSessions.remove(s); lockExclusiveSession = null; if (SysProperties.THREAD_DEADLOCK_DETECTOR) { - if (EXCLUSIVE_LOCKS.get() != null) { - EXCLUSIVE_LOCKS.get().remove(getName()); + ArrayList exclusiveLocks = EXCLUSIVE_LOCKS.get(); + if (exclusiveLocks != null) { + exclusiveLocks.remove(getName()); } } } else { - wasLocked = lockSharedSessions.remove(s) != null; + lockType = lockSharedSessions.remove(s) != null ? Table.WRITE_LOCK : Table.READ_LOCK; if (SysProperties.THREAD_DEADLOCK_DETECTOR) { - if (SHARED_LOCKS.get() != null) { - SHARED_LOCKS.get().remove(getName()); + ArrayList sharedLocks = SHARED_LOCKS.get(); + if (sharedLocks != null) { + sharedLocks.remove(getName()); } } } - if (wasLocked && !waitingSessions.isEmpty()) { - Object lockSyncObject = getLockSyncObject(); - synchronized (lockSyncObject) { - lockSyncObject.notifyAll(); + traceLock(s, lockType, TraceLockEvent.TRACE_LOCK_UNLOCK, NO_EXTRA_INFO); + if (lockType != Table.READ_LOCK && !waitingSessions.isEmpty()) { + synchronized (this) { + notifyAll(); } } } } @Override - public void close(Session session) { + public void close(SessionLocal session) { // ignore } @Override - public Row getRow(Session session, long key) { + public Row getRow(SessionLocal session, long key) { return primaryIndex.getRow(session, key); } @Override - public Index addIndex(Session session, String indexName, int indexId, - IndexColumn[] cols, IndexType indexType, boolean create, - String indexComment) { - if (indexType.isPrimaryKey()) { - for (IndexColumn c : cols) { - Column column = c.column; - if (column.isNullable()) { - throw DbException.get( - ErrorCode.COLUMN_MUST_NOT_BE_NULLABLE_1, - column.getName()); - } - column.setPrimaryKey(true); - } - } + public Index addIndex(SessionLocal session, String indexName, int indexId, IndexColumn[] cols, + int uniqueColumnCount, IndexType indexType, boolean create, String indexComment) { + cols = prepareColumns(database, cols, indexType); boolean isSessionTemporary = isTemporary() && !isGlobalTemporary(); if (!isSessionTemporary) { database.lockMeta(session); } - MVIndex index; + MVIndex index; int mainIndexColumn = primaryIndex.getMainIndexColumn() != SearchRow.ROWID_INDEX ? SearchRow.ROWID_INDEX : getMainIndexColumn(indexType, cols); if (database.isStarting()) { @@ -376,10 +358,10 @@ public Index addIndex(Session session, String indexName, int indexId, indexType); } else if (indexType.isSpatial()) { index = new MVSpatialIndex(session.getDatabase(), this, indexId, - indexName, cols, indexType); + indexName, cols, uniqueColumnCount, indexType); } else { index = new MVSecondaryIndex(session.getDatabase(), this, indexId, - indexName, cols, indexType); + indexName, cols, uniqueColumnCount, indexType); } if (index.needRebuild()) { rebuildIndex(session, index, indexName); @@ -398,10 +380,9 @@ public Index addIndex(Session session, String indexName, int indexId, return index; } - private void rebuildIndex(Session session, MVIndex index, String indexName) { + private void rebuildIndex(SessionLocal session, MVIndex index, String indexName) { try { - if (session.getDatabase().getStore() == null || - index instanceof MVSpatialIndex) { + if (!session.getDatabase().isPersistent() || index instanceof MVSpatialIndex) { // in-memory rebuildIndexBuffered(session, index); } else { @@ -422,11 +403,7 @@ private void rebuildIndex(Session session, MVIndex index, String indexName) { } } - private void rebuildIndexBlockMerge(Session session, MVIndex index) { - if (index instanceof MVSpatialIndex) { - // the spatial index doesn't support multi-way merge sort - rebuildIndexBuffered(session, index); - } + private void rebuildIndexBlockMerge(SessionLocal session, MVIndex index) { // Read entries in memory, sort them, write to a new map (in sorted // order); repeat (using a new map for every block of 1 MB) until all // record are read. Merge all maps to the target (using merge sort; @@ -443,14 +420,12 @@ private void rebuildIndexBlockMerge(Session session, MVIndex index) { int bufferSize = database.getMaxMemoryRows() / 2; ArrayList buffer = new ArrayList<>(bufferSize); - String n = getName() + ":" + index.getName(); - int t = MathUtils.convertLongToInt(total); + String n = getName() + ':' + index.getName(); ArrayList bufferNames = Utils.newSmallArrayList(); while (cursor.next()) { Row row = cursor.get(); buffer.add(row); - database.setProgress(DatabaseEventListener.STATE_CREATE_INDEX, n, - MathUtils.convertLongToInt(i++), t); + database.setProgress(DatabaseEventListener.STATE_CREATE_INDEX, n, i++, total); if (buffer.size() >= bufferSize) { sortRows(buffer, index); String mapName = store.nextTemporaryMapName(); @@ -471,12 +446,11 @@ private void rebuildIndexBlockMerge(Session session, MVIndex index) { addRowsToIndex(session, buffer, index); } if (remaining != 0) { - DbException.throwInternalError("rowcount remaining=" + remaining + - " " + getName()); + throw DbException.getInternalError("rowcount remaining=" + remaining + ' ' + getName()); } } - private void rebuildIndexBuffered(Session session, Index index) { + private void rebuildIndexBuffered(SessionLocal session, Index index) { Index scan = getScanIndex(session); long remaining = scan.getRowCount(session); long total = remaining; @@ -484,13 +458,11 @@ private void rebuildIndexBuffered(Session session, Index index) { long i = 0; int bufferSize = (int) Math.min(total, database.getMaxMemoryRows()); ArrayList buffer = new ArrayList<>(bufferSize); - String n = getName() + ":" + index.getName(); - int t = MathUtils.convertLongToInt(total); + String n = getName() + ':' + index.getName(); while (cursor.next()) { Row row = cursor.get(); buffer.add(row); - database.setProgress(DatabaseEventListener.STATE_CREATE_INDEX, n, - MathUtils.convertLongToInt(i++), t); + database.setProgress(DatabaseEventListener.STATE_CREATE_INDEX, n, i++, total); if (buffer.size() >= bufferSize) { addRowsToIndex(session, buffer, index); } @@ -498,13 +470,12 @@ private void rebuildIndexBuffered(Session session, Index index) { } addRowsToIndex(session, buffer, index); if (remaining != 0) { - DbException.throwInternalError("rowcount remaining=" + remaining + - " " + getName()); + throw DbException.getInternalError("rowcount remaining=" + remaining + ' ' + getName()); } } @Override - public void removeRow(Session session, Row row) { + public void removeRow(SessionLocal session, Row row) { syncLastModificationIdWithDatabase(); Transaction t = session.getTransaction(); long savepoint = t.setSavepoint(); @@ -525,8 +496,9 @@ public void removeRow(Session session, Row row) { } @Override - public void truncate(Session session) { + public long truncate(SessionLocal session) { syncLastModificationIdWithDatabase(); + long result = getRowCountApproximation(session); for (int i = indexes.size() - 1; i >= 0; i--) { Index index = indexes.get(i); index.truncate(session); @@ -534,10 +506,11 @@ public void truncate(Session session) { if (changesUntilAnalyze != null) { changesUntilAnalyze.set(nextAnalyze); } + return result; } @Override - public void addRow(Session session, Row row) { + public void addRow(SessionLocal session, Row row) { syncLastModificationIdWithDatabase(); Transaction t = session.getTransaction(); long savepoint = t.setSavepoint(); @@ -557,7 +530,7 @@ public void addRow(Session session, Row row) { } @Override - public void updateRow(Session session, Row oldRow, Row newRow) { + public void updateRow(SessionLocal session, Row oldRow, Row newRow) { newRow.setKey(oldRow.getKey()); syncLastModificationIdWithDatabase(); Transaction t = session.getTransaction(); @@ -578,11 +551,15 @@ public void updateRow(Session session, Row oldRow, Row newRow) { } @Override - public Row lockRow(Session session, Row row) { - return primaryIndex.lockRow(session, row); + public Row lockRow(SessionLocal session, Row row) { + Row lockedRow = primaryIndex.lockRow(session, row); + if (lockedRow == null || !row.hasSharedData(lockedRow)) { + syncLastModificationIdWithDatabase(); + } + return lockedRow; } - private void analyzeIfRequired(Session session) { + private void analyzeIfRequired(SessionLocal session) { if (changesUntilAnalyze != null) { if (changesUntilAnalyze.decrementAndGet() == 0) { if (nextAnalyze <= Integer.MAX_VALUE / 2) { @@ -595,12 +572,7 @@ private void analyzeIfRequired(Session session) { } @Override - public Index getScanIndex(Session session) { - return primaryIndex; - } - - @Override - public Index getUniqueIndex() { + public Index getScanIndex(SessionLocal session) { return primaryIndex; } @@ -615,7 +587,7 @@ public long getMaxDataModificationId() { } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { if (containsLargeObject) { // unfortunately, the data is gone on rollback truncate(session); @@ -636,28 +608,18 @@ public void removeChildrenAndResources(Session session) { } primaryIndex.remove(session); indexes.clear(); - if (SysProperties.CHECK) { - for (SchemaObject obj : database - .getAllSchemaObjects(DbObject.INDEX)) { - Index index = (Index) obj; - if (index.getTable() == this) { - DbException.throwInternalError("index not dropped: " + - index.getName()); - } - } - } close(session); invalidate(); } @Override - public long getRowCount(Session session) { + public long getRowCount(SessionLocal session) { return primaryIndex.getRowCount(session); } @Override - public long getRowCountApproximation() { - return primaryIndex.getRowCountApproximation(); + public long getRowCountApproximation(SessionLocal session) { + return primaryIndex.getRowCountApproximation(session); } @Override @@ -676,7 +638,7 @@ Transaction getTransactionBegin() { } @Override - public boolean isMVStore() { + public boolean isRowLockable() { return true; } @@ -707,13 +669,13 @@ private void syncLastModificationIdWithDatabase() { } /** - * Convert the illegal state exception to a database exception. + * Convert the MVStoreException to a database exception. * * @param e the illegal state exception * @return the database exception */ - DbException convertException(IllegalStateException e) { - int errorCode = DataUtils.getErrorCode(e.getMessage()); + DbException convertException(MVStoreException e) { + int errorCode = e.getErrorCode(); if (errorCode == DataUtils.ERROR_TRANSACTION_LOCKED) { throw DbException.get(ErrorCode.CONCURRENT_UPDATE_1, e, getName()); @@ -722,6 +684,263 @@ DbException convertException(IllegalStateException e) { throw DbException.get(ErrorCode.DEADLOCK_1, e, getName()); } - return store.convertIllegalStateException(e); + return store.convertMVStoreException(e); + } + + @Override + public int getMainIndexColumn() { + return primaryIndex.getMainIndexColumn(); + } + + + /** + * Appends the specified rows to the specified index. + * + * @param session + * the session + * @param list + * the rows, list is cleared on completion + * @param index + * the index to append to + */ + private static void addRowsToIndex(SessionLocal session, ArrayList list, Index index) { + sortRows(list, index); + for (Row row : list) { + index.add(session, row); + } + list.clear(); + } + + /** + * Formats details of a deadlock. + * + * @param sessions + * the list of sessions + * @param lockType + * the type of lock + * @return formatted details of a deadlock + */ + private static String getDeadlockDetails(ArrayList sessions, int lockType) { + // We add the thread details here to make it easier for customers to + // match up these error messages with their own logs. + StringBuilder builder = new StringBuilder(); + for (SessionLocal s : sessions) { + Table lock = s.getWaitForLock(); + Thread thread = s.getWaitForLockThread(); + builder.append("\nSession ").append(s).append(" on thread ").append(thread.getName()) + .append(" is waiting to lock ").append(lock.toString()) + .append(" (").append(lockTypeToString(lockType)).append(") while locking "); + boolean addComma = false; + for (Table t : s.getLocks()) { + if (addComma) { + builder.append(", "); + } + addComma = true; + builder.append(t.toString()); + if (t instanceof MVTable) { + if (((MVTable) t).lockExclusiveSession == s) { + builder.append(" (exclusive)"); + } else { + builder.append(" (shared)"); + } + } + } + builder.append('.'); + } + return builder.toString(); + } + + private static String lockTypeToString(int lockType) { + return lockType == Table.READ_LOCK ? "shared read" + : lockType == Table.WRITE_LOCK ? "shared write" : "exclusive"; + } + + /** + * Sorts the specified list of rows for a specified index. + * + * @param list + * the list of rows + * @param index + * the index to sort for + */ + private static void sortRows(ArrayList list, final Index index) { + list.sort(index::compareRows); + } + + @Override + public boolean canDrop() { + return true; + } + + @Override + public boolean canGetRowCount(SessionLocal session) { + return true; + } + + @Override + public boolean canTruncate() { + if (getCheckForeignKeyConstraints() && database.getReferentialIntegrity()) { + ArrayList constraints = getConstraints(); + if (constraints != null) { + for (Constraint c : constraints) { + if (c.getConstraintType() != Constraint.Type.REFERENTIAL) { + continue; + } + ConstraintReferential ref = (ConstraintReferential) c; + if (ref.getRefTable() == this) { + return false; + } + } + } + } + return true; + } + + @Override + public ArrayList checkDeadlock(SessionLocal session, SessionLocal clash, Set visited) { + // only one deadlock check at any given time + synchronized (getClass()) { + if (clash == null) { + // verification is started + clash = session; + visited = new HashSet<>(); + } else if (clash == session) { + // we found a cycle where this session is involved + return new ArrayList<>(0); + } else if (visited.contains(session)) { + // we have already checked this session. + // there is a cycle, but the sessions in the cycle need to + // find it out themselves + return null; + } + visited.add(session); + ArrayList error = null; + for (SessionLocal s : lockSharedSessions.keySet()) { + if (s == session) { + // it doesn't matter if we have locked the object already + continue; + } + Table t = s.getWaitForLock(); + if (t != null) { + error = t.checkDeadlock(s, clash, visited); + if (error != null) { + error.add(session); + break; + } + } + } + // take a local copy so we don't see inconsistent data, since we are + // not locked while checking the lockExclusiveSession value + SessionLocal copyOfLockExclusiveSession = lockExclusiveSession; + if (error == null && copyOfLockExclusiveSession != null) { + Table t = copyOfLockExclusiveSession.getWaitForLock(); + if (t != null) { + error = t.checkDeadlock(copyOfLockExclusiveSession, clash, visited); + if (error != null) { + error.add(session); + } + } + } + return error; + } + } + + @Override + public void checkSupportAlter() { + // ok + } + + public boolean getContainsLargeObject() { + return containsLargeObject; + } + + @Override + public Column getRowIdColumn() { + if (rowIdColumn == null) { + rowIdColumn = new Column(Column.ROWID, TypeInfo.TYPE_BIGINT, this, SearchRow.ROWID_INDEX); + rowIdColumn.setRowId(true); + rowIdColumn.setNullable(false); + } + return rowIdColumn; + } + + @Override + public TableType getTableType() { + return TableType.TABLE; + } + + @Override + public boolean isDeterministic() { + return true; + } + + @Override + public boolean isLockedExclusively() { + return lockExclusiveSession != null; + } + + @Override + public boolean isLockedExclusivelyBy(SessionLocal session) { + return lockExclusiveSession == session; + } + + @Override + protected void invalidate() { + super.invalidate(); + /* + * Query cache of a some sleeping session can have references to + * invalidated tables. When this table was dropped by another session, + * the field below still points to it and prevents its garbage + * collection, so this field needs to be cleared to prevent a memory + * leak. + */ + lockExclusiveSession = null; + } + + @Override + public String toString() { + return getTraceSQL(); + } + + /** + * Prepares columns of an index. + * + * @param database the database + * @param cols the index columns + * @param indexType the type of an index + * @return the prepared columns with flags set + */ + private static IndexColumn[] prepareColumns(Database database, IndexColumn[] cols, IndexType indexType) { + if (indexType.isPrimaryKey()) { + for (IndexColumn c : cols) { + Column column = c.column; + if (column.isNullable()) { + throw DbException.get(ErrorCode.COLUMN_MUST_NOT_BE_NULLABLE_1, column.getName()); + } + } + for (IndexColumn c : cols) { + c.column.setPrimaryKey(true); + } + } else if (!indexType.isSpatial()) { + int i = 0, l = cols.length; + while (i < l && (cols[i].sortType & (SortOrder.NULLS_FIRST | SortOrder.NULLS_LAST)) != 0) { + i++; + } + if (i != l) { + cols = cols.clone(); + DefaultNullOrdering defaultNullOrdering = database.getDefaultNullOrdering(); + for (; i < l; i++) { + IndexColumn oldColumn = cols[i]; + int sortTypeOld = oldColumn.sortType; + int sortTypeNew = defaultNullOrdering.addExplicitNullOrdering(sortTypeOld); + if (sortTypeNew != sortTypeOld) { + IndexColumn newColumn = new IndexColumn(oldColumn.columnName, sortTypeNew); + newColumn.column = oldColumn.column; + cols[i] = newColumn; + } + } + } + } + return cols; } } diff --git a/h2/src/main/org/h2/mvstore/db/MVTableEngine.java b/h2/src/main/org/h2/mvstore/db/MVTableEngine.java deleted file mode 100644 index adb65f8725..0000000000 --- a/h2/src/main/org/h2/mvstore/db/MVTableEngine.java +++ /dev/null @@ -1,475 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.mvstore.db; - -import java.io.InputStream; -import java.lang.Thread.UncaughtExceptionHandler; -import java.nio.channels.FileChannel; -import java.util.ArrayList; -import java.util.BitSet; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeUnit; - -import org.h2.api.ErrorCode; -import org.h2.api.TableEngine; -import org.h2.command.ddl.CreateTableData; -import org.h2.engine.Constants; -import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.mvstore.DataUtils; -import org.h2.mvstore.FileStore; -import org.h2.mvstore.MVStore; -import org.h2.mvstore.MVStoreTool; -import org.h2.mvstore.tx.Transaction; -import org.h2.mvstore.tx.TransactionStore; -import org.h2.store.InDoubtTransaction; -import org.h2.store.fs.FileChannelInputStream; -import org.h2.store.fs.FileUtils; -import org.h2.table.TableBase; -import org.h2.util.StringUtils; -import org.h2.util.Utils; - -/** - * A table engine that internally uses the MVStore. - */ -public class MVTableEngine implements TableEngine { - - /** - * Initialize the MVStore. - * - * @param db the database - * @return the store - */ - public static Store init(final Database db) { - Store store = db.getStore(); - if (store != null) { - return store; - } - byte[] key = db.getFileEncryptionKey(); - String dbPath = db.getDatabasePath(); - MVStore.Builder builder = new MVStore.Builder(); - store = new Store(); - boolean encrypted = false; - if (dbPath != null) { - String fileName = dbPath + Constants.SUFFIX_MV_FILE; - MVStoreTool.compactCleanUp(fileName); - builder.fileName(fileName); - builder.pageSplitSize(db.getPageSize()); - if (db.isReadOnly()) { - builder.readOnly(); - } else { - // possibly create the directory - boolean exists = FileUtils.exists(fileName); - if (exists && !FileUtils.canWrite(fileName)) { - // read only - } else { - String dir = FileUtils.getParent(fileName); - FileUtils.createDirectories(dir); - } - } - if (key != null) { - encrypted = true; - builder.encryptionKey(decodePassword(key)); - } - if (db.getSettings().compressData) { - builder.compress(); - // use a larger page split size to improve the compression ratio - builder.pageSplitSize(64 * 1024); - } - builder.backgroundExceptionHandler(new UncaughtExceptionHandler() { - - @Override - public void uncaughtException(Thread t, Throwable e) { - db.setBackgroundException(DbException.convert(e)); - } - - }); - } - store.open(db, builder, encrypted); - db.setStore(store); - return store; - } - - /** - * Convert password from byte[] to char[]. - * - * @param key password as byte[] - * @return password as char[]. - */ - static char[] decodePassword(byte[] key) { - char[] password = new char[key.length / 2]; - for (int i = 0; i < password.length; i++) { - password[i] = (char) (((key[i + i] & 255) << 16) | - ((key[i + i + 1]) & 255)); - } - return password; - } - - @Override - public TableBase createTable(CreateTableData data) { - Database db = data.session.getDatabase(); - Store store = init(db); - return store.createTable(data); - } - - /** - * A store with open tables. - */ - public static class Store { - - /** - * The map of open tables. - * Key: the map name, value: the table. - */ - private final ConcurrentHashMap tableMap = - new ConcurrentHashMap<>(); - - /** - * The store. - */ - private MVStore mvStore; - - /** - * The transaction store. - */ - private TransactionStore transactionStore; - - private long statisticsStart; - - private int temporaryMapId; - - private boolean encrypted; - - private String fileName; - - /** - * Open the store for this database. - * - * @param db the database - * @param builder the builder - * @param encrypted whether the store is encrypted - */ - void open(Database db, MVStore.Builder builder, boolean encrypted) { - this.encrypted = encrypted; - try { - this.mvStore = builder.open(); - FileStore fs = mvStore.getFileStore(); - if (fs != null) { - this.fileName = fs.getFileName(); - } - if (!db.getSettings().reuseSpace) { - mvStore.setReuseSpace(false); - } - this.transactionStore = new TransactionStore(mvStore, - new ValueDataType(db, null), db.getLockTimeout()); - } catch (IllegalStateException e) { - throw convertIllegalStateException(e); - } - } - - /** - * Convert the illegal state exception to the correct database - * exception. - * - * @param e the illegal state exception - * @return the database exception - */ - DbException convertIllegalStateException(IllegalStateException e) { - int errorCode = DataUtils.getErrorCode(e.getMessage()); - if (errorCode == DataUtils.ERROR_FILE_CORRUPT) { - if (encrypted) { - throw DbException.get( - ErrorCode.FILE_ENCRYPTION_ERROR_1, - e, fileName); - } - } else if (errorCode == DataUtils.ERROR_FILE_LOCKED) { - throw DbException.get( - ErrorCode.DATABASE_ALREADY_OPEN_1, - e, fileName); - } else if (errorCode == DataUtils.ERROR_READING_FAILED) { - throw DbException.get( - ErrorCode.IO_EXCEPTION_1, - e, fileName); - } else if (errorCode == DataUtils.ERROR_INTERNAL) { - throw DbException.get( - ErrorCode.GENERAL_ERROR_1, - e, fileName); - } - throw DbException.get( - ErrorCode.FILE_CORRUPTED_1, - e, fileName); - - } - - public MVStore getMvStore() { - return mvStore; - } - - public TransactionStore getTransactionStore() { - return transactionStore; - } - - /** - * Get MVTable by table name. - * - * @param tableName table name - * @return MVTable - */ - public MVTable getTable(String tableName) { - return tableMap.get(tableName); - } - - /** - * Create a table. - * - * @param data CreateTableData - * @return table created - */ - public MVTable createTable(CreateTableData data) { - MVTable table = new MVTable(data, this); - tableMap.put(table.getMapName(), table); - return table; - } - - /** - * Remove a table. - * - * @param table the table - */ - public void removeTable(MVTable table) { - tableMap.remove(table.getMapName()); - } - - /** - * Store all pending changes. - */ - public void flush() { - FileStore s = mvStore.getFileStore(); - if (s == null || s.isReadOnly()) { - return; - } - if (!mvStore.compact(50, 4 * 1024 * 1024)) { - mvStore.commit(); - } - } - - /** - * Close the store, without persisting changes. - */ - public void closeImmediately() { - if (mvStore.isClosed()) { - return; - } - mvStore.closeImmediately(); - } - - /** - * Remove all temporary maps. - * - * @param objectIds the ids of the objects to keep - */ - public void removeTemporaryMaps(BitSet objectIds) { - for (String mapName : mvStore.getMapNames()) { - if (mapName.startsWith("temp.")) { - mvStore.removeMap(mapName); - } else if (mapName.startsWith("table.") || mapName.startsWith("index.")) { - int id = StringUtils.parseUInt31(mapName, mapName.indexOf('.') + 1, mapName.length()); - if (!objectIds.get(id)) { - mvStore.removeMap(mapName); - } - } - } - } - - /** - * Get the name of the next available temporary map. - * - * @return the map name - */ - public synchronized String nextTemporaryMapName() { - return "temp." + temporaryMapId++; - } - - /** - * Prepare a transaction. - * - * @param session the session - * @param transactionName the transaction name (may be null) - */ - public void prepareCommit(Session session, String transactionName) { - Transaction t = session.getTransaction(); - t.setName(transactionName); - t.prepare(); - mvStore.commit(); - } - - public ArrayList getInDoubtTransactions() { - List list = transactionStore.getOpenTransactions(); - ArrayList result = Utils.newSmallArrayList(); - for (Transaction t : list) { - if (t.getStatus() == Transaction.STATUS_PREPARED) { - result.add(new MVInDoubtTransaction(mvStore, t)); - } - } - return result; - } - - /** - * Set the maximum memory to be used by the cache. - * - * @param kb the maximum size in KB - */ - public void setCacheSize(int kb) { - mvStore.setCacheSize(Math.max(1, kb / 1024)); - } - - public InputStream getInputStream() { - FileChannel fc = mvStore.getFileStore().getEncryptedFile(); - if (fc == null) { - fc = mvStore.getFileStore().getFile(); - } - return new FileChannelInputStream(fc, false); - } - - /** - * Force the changes to disk. - */ - public void sync() { - flush(); - mvStore.sync(); - } - - /** - * Compact the database file, that is, compact blocks that have a low - * fill rate, and move chunks next to each other. This will typically - * shrink the database file. Changes are flushed to the file, and old - * chunks are overwritten. - * - * @param maxCompactTime the maximum time in milliseconds to compact - */ - public void compactFile(long maxCompactTime) { - mvStore.setRetentionTime(0); - long start = System.nanoTime(); - while (mvStore.compact(95, 16 * 1024 * 1024)) { - mvStore.sync(); - mvStore.compactMoveChunks(95, 16 * 1024 * 1024); - long time = System.nanoTime() - start; - if (time > TimeUnit.MILLISECONDS.toNanos(maxCompactTime)) { - break; - } - } - } - - /** - * Close the store. Pending changes are persisted. Chunks with a low - * fill rate are compacted, but old chunks are kept for some time, so - * most likely the database file will not shrink. - * - * @param compactFully true if storage need to be compacted after closer - */ - public void close(boolean compactFully) { - try { - FileStore fileStore = mvStore.getFileStore(); - if (!mvStore.isClosed() && fileStore != null) { - if (fileStore.isReadOnly()) { - compactFully = false; - } else { - transactionStore.close(); - } - String fileName = fileStore.getFileName(); - mvStore.close(); - if (compactFully && FileUtils.exists(fileName)) { - // the file could have been deleted concurrently, - // so only compact if the file still exists - MVStoreTool.compact(fileName, true); - } - } - } catch (IllegalStateException e) { - int errorCode = DataUtils.getErrorCode(e.getMessage()); - if (errorCode == DataUtils.ERROR_WRITING_FAILED) { - // disk full - ok - } else if (errorCode == DataUtils.ERROR_FILE_CORRUPT) { - // wrong encryption key - ok - } - mvStore.closeImmediately(); - throw DbException.get(ErrorCode.IO_EXCEPTION_1, e, "Closing"); - } - } - - /** - * Start collecting statistics. - */ - public void statisticsStart() { - FileStore fs = mvStore.getFileStore(); - statisticsStart = fs == null ? 0 : fs.getReadCount(); - } - - /** - * Stop collecting statistics. - * - * @return the statistics - */ - public Map statisticsEnd() { - HashMap map = new HashMap<>(); - FileStore fs = mvStore.getFileStore(); - int reads = fs == null ? 0 : (int) (fs.getReadCount() - statisticsStart); - map.put("reads", reads); - return map; - } - - } - - /** - * An in-doubt transaction. - */ - private static class MVInDoubtTransaction implements InDoubtTransaction { - - private final MVStore store; - private final Transaction transaction; - private int state = InDoubtTransaction.IN_DOUBT; - - MVInDoubtTransaction(MVStore store, Transaction transaction) { - this.store = store; - this.transaction = transaction; - } - - @Override - public void setState(int state) { - if (state == InDoubtTransaction.COMMIT) { - transaction.commit(); - } else { - transaction.rollback(); - } - store.commit(); - this.state = state; - } - - @Override - public String getState() { - switch (state) { - case IN_DOUBT: - return "IN_DOUBT"; - case COMMIT: - return "COMMIT"; - case ROLLBACK: - return "ROLLBACK"; - default: - throw DbException.throwInternalError("state="+state); - } - } - - @Override - public String getTransactionName() { - return transaction.getName(); - } - - } - -} diff --git a/h2/src/main/org/h2/mvstore/db/MVTempResult.java b/h2/src/main/org/h2/mvstore/db/MVTempResult.java index 06d97e3172..97779cba55 100644 --- a/h2/src/main/org/h2/mvstore/db/MVTempResult.java +++ b/h2/src/main/org/h2/mvstore/db/MVTempResult.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.db; @@ -19,7 +19,6 @@ import org.h2.result.SortOrder; import org.h2.store.fs.FileUtils; import org.h2.util.TempFileDeleter; -import org.h2.value.TypeInfo; import org.h2.value.Value; /** @@ -71,17 +70,23 @@ public void close() throws Exception { * indexes of distinct columns for DISTINCT ON results * @param visibleColumnCount * count of visible columns + * @param resultColumnCount + * the number of columns including visible columns and additional + * virtual columns for ORDER BY and DISTINCT ON clauses * @param sort * sort order, or {@code null} * @return temporary result */ public static ResultExternal of(Database database, Expression[] expressions, boolean distinct, - int[] distinctIndexes, int visibleColumnCount, SortOrder sort) { + int[] distinctIndexes, int visibleColumnCount, int resultColumnCount, SortOrder sort) { return distinct || distinctIndexes != null || sort != null - ? new MVSortedTempResult(database, expressions, distinct, distinctIndexes, visibleColumnCount, sort) - : new MVPlainTempResult(database, expressions, visibleColumnCount); + ? new MVSortedTempResult(database, expressions, distinct, distinctIndexes, visibleColumnCount, + resultColumnCount, sort) + : new MVPlainTempResult(database, expressions, visibleColumnCount, resultColumnCount); } + private final Database database; + /** * MVStore. */ @@ -97,7 +102,10 @@ public static ResultExternal of(Database database, Expression[] expressions, boo */ final int visibleColumnCount; - final boolean hasEnum; + /** + * Total count of columns. + */ + final int resultColumnCount; /** * Count of rows. Used only in a root results, copies always have 0 value. @@ -142,10 +150,11 @@ public static ResultExternal of(Database database, Expression[] expressions, boo */ MVTempResult(MVTempResult parent) { this.parent = parent; + this.database = parent.database; this.store = parent.store; this.expressions = parent.expressions; this.visibleColumnCount = parent.visibleColumnCount; - this.hasEnum = parent.hasEnum; + this.resultColumnCount = parent.resultColumnCount; this.tempFileDeleter = null; this.closeable = null; this.fileRef = null; @@ -160,26 +169,22 @@ public static ResultExternal of(Database database, Expression[] expressions, boo * column expressions * @param visibleColumnCount * count of visible columns + * @param resultColumnCount + * total count of columns */ - MVTempResult(Database database, Expression[] expressions, int visibleColumnCount) { + MVTempResult(Database database, Expression[] expressions, int visibleColumnCount, int resultColumnCount) { + this.database = database; try { String fileName = FileUtils.createTempFile("h2tmp", Constants.SUFFIX_TEMP_FILE, true); Builder builder = new MVStore.Builder().fileName(fileName).cacheSize(0).autoCommitDisabled(); byte[] key = database.getFileEncryptionKey(); if (key != null) { - builder.encryptionKey(MVTableEngine.decodePassword(key)); + builder.encryptionKey(Store.decodePassword(key)); } store = builder.open(); this.expressions = expressions; this.visibleColumnCount = visibleColumnCount; - boolean hasEnum = false; - for (Expression e : expressions) { - if (e.getType().getValueType() == Value.ENUM) { - hasEnum = true; - break; - } - } - this.hasEnum = hasEnum; + this.resultColumnCount = resultColumnCount; tempFileDeleter = database.getTempFileDeleter(); closeable = new CloseImpl(store, fileName); fileRef = tempFileDeleter.addFile(closeable, this); @@ -222,18 +227,4 @@ private void delete() { tempFileDeleter.deleteFile(fileRef, closeable); } - /** - * If any value in the rows is a ValueEnum, apply custom type conversion. - * - * @param row the array of values (modified in-place if needed) - */ - final void fixEnum(Value[] row) { - for (int i = 0, l = expressions.length; i < l; i++) { - TypeInfo type = expressions[i].getType(); - if (type.getValueType() == Value.ENUM) { - row[i] = type.getExtTypeInfo().cast(row[i]); - } - } - } - } diff --git a/h2/src/main/org/h2/mvstore/db/NullValueDataType.java b/h2/src/main/org/h2/mvstore/db/NullValueDataType.java new file mode 100644 index 0000000000..c9b4ff3035 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/db/NullValueDataType.java @@ -0,0 +1,73 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.db; + +import java.nio.ByteBuffer; +import java.util.Arrays; + +import org.h2.mvstore.WriteBuffer; +import org.h2.mvstore.type.DataType; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * Dummy data type used when no value is required. This data type doesn't use + * any disk space and always returns SQL NULL value. + */ +public final class NullValueDataType implements DataType { + + /** + * Dummy data type instance. + */ + public static final NullValueDataType INSTANCE = new NullValueDataType(); + + private NullValueDataType() { + } + + @Override + public int compare(Value a, Value b) { + return 0; + } + + @Override + public int binarySearch(Value key, Object storage, int size, int initialGuess) { + return 0; + } + + @Override + public int getMemory(Value obj) { + return 0; + } + + @Override + public boolean isMemoryEstimationAllowed() { + return true; + } + + @Override + public void write(WriteBuffer buff, Value obj) { + } + + @Override + public void write(WriteBuffer buff, Object storage, int len) { + } + + @Override + public Value read(ByteBuffer buff) { + return ValueNull.INSTANCE; + } + + @Override + public void read(ByteBuffer buff, Object storage, int len) { + Arrays.fill((Value[]) storage, 0, len, ValueNull.INSTANCE); + } + + @Override + public Value[] createStorage(int size) { + return new Value[size]; + } + +} diff --git a/h2/src/main/org/h2/mvstore/db/RowDataType.java b/h2/src/main/org/h2/mvstore/db/RowDataType.java new file mode 100644 index 0000000000..3486203410 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/db/RowDataType.java @@ -0,0 +1,262 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.db; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.h2.engine.CastDataProvider; +import org.h2.engine.Database; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.WriteBuffer; +import org.h2.mvstore.type.BasicDataType; +import org.h2.mvstore.type.MetaType; +import org.h2.mvstore.type.StatefulDataType; +import org.h2.result.RowFactory; +import org.h2.result.SearchRow; +import org.h2.store.DataHandler; +import org.h2.value.CompareMode; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * The data type for rows. + * + * @author Andrei Tokar + */ +public final class RowDataType extends BasicDataType implements StatefulDataType { + + private final ValueDataType valueDataType; + private final int[] sortTypes; + private final int[] indexes; + private final int columnCount; + private final boolean storeKeys; + + public RowDataType(CastDataProvider provider, CompareMode compareMode, DataHandler handler, int[] sortTypes, + int[] indexes, int columnCount, boolean storeKeys) { + this.valueDataType = new ValueDataType(provider, compareMode, handler, sortTypes); + this.sortTypes = sortTypes; + this.indexes = indexes; + this.columnCount = columnCount; + this.storeKeys = storeKeys; + assert indexes == null || sortTypes.length == indexes.length; + } + + public int[] getIndexes() { + return indexes; + } + + public RowFactory getRowFactory() { + return valueDataType.getRowFactory(); + } + + public void setRowFactory(RowFactory rowFactory) { + valueDataType.setRowFactory(rowFactory); + } + + public int getColumnCount() { + return columnCount; + } + + public boolean isStoreKeys() { + return storeKeys; + } + + @Override + public SearchRow[] createStorage(int capacity) { + return new SearchRow[capacity]; + } + + @Override + public int compare(SearchRow a, SearchRow b) { + if (a == b) { + return 0; + } + if (indexes == null) { + int len = a.getColumnCount(); + assert len == b.getColumnCount() : len + " != " + b.getColumnCount(); + for (int i = 0; i < len; i++) { + int comp = valueDataType.compareValues(a.getValue(i), b.getValue(i), sortTypes[i]); + if (comp != 0) { + return comp; + } + } + return 0; + } else { + return compareSearchRows(a, b); + } + } + + private int compareSearchRows(SearchRow a, SearchRow b) { + for (int i = 0; i < indexes.length; i++) { + int index = indexes[i]; + Value v1 = a.getValue(index); + Value v2 = b.getValue(index); + if (v1 == null || v2 == null) { + // can't compare further + break; + } + int comp = valueDataType.compareValues(v1, v2, sortTypes[i]); + if (comp != 0) { + return comp; + } + } + long aKey = a.getKey(); + long bKey = b.getKey(); + return aKey == SearchRow.MATCH_ALL_ROW_KEY || bKey == SearchRow.MATCH_ALL_ROW_KEY ? + 0 : Long.compare(aKey, bKey); + } + + @Override + public int binarySearch(SearchRow key, Object storage, int size, int initialGuess) { + return binarySearch(key, (SearchRow[])storage, size, initialGuess); + } + + public int binarySearch(SearchRow key, SearchRow[] keys, int size, int initialGuess) { + int low = 0; + int high = size - 1; + // the cached index minus one, so that + // for the first time (when cachedCompare is 0), + // the default value is used + int x = initialGuess - 1; + if (x < 0 || x > high) { + x = high >>> 1; + } + while (low <= high) { + int compare = compareSearchRows(key, keys[x]); + if (compare > 0) { + low = x + 1; + } else if (compare < 0) { + high = x - 1; + } else { + return x; + } + x = (low + high) >>> 1; + } + return -(low + 1); + } + + @Override + public int getMemory(SearchRow row) { + return row.getMemory(); + } + + @Override + public SearchRow read(ByteBuffer buff) { + RowFactory rowFactory = valueDataType.getRowFactory(); + SearchRow row = rowFactory.createRow(); + if (storeKeys) { + row.setKey(DataUtils.readVarLong(buff)); + } + TypeInfo[] columnTypes = rowFactory.getColumnTypes(); + if (indexes == null) { + int columnCount = row.getColumnCount(); + for (int i = 0; i < columnCount; i++) { + row.setValue(i, valueDataType.readValue(buff, columnTypes != null ? columnTypes[i] : null)); + } + } else { + for (int i : indexes) { + row.setValue(i, valueDataType.readValue(buff, columnTypes != null ? columnTypes[i] : null)); + } + } + return row; + } + + @Override + public void write(WriteBuffer buff, SearchRow row) { + if (storeKeys) { + buff.putVarLong(row.getKey()); + } + if (indexes == null) { + int columnCount = row.getColumnCount(); + for (int i = 0; i < columnCount; i++) { + valueDataType.write(buff, row.getValue(i)); + } + } else { + for (int i : indexes) { + valueDataType.write(buff, row.getValue(i)); + } + } + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } else if (obj == null || obj.getClass() != RowDataType.class) { + return false; + } + RowDataType other = (RowDataType) obj; + return columnCount == other.columnCount + && Arrays.equals(indexes, other.indexes) + && Arrays.equals(sortTypes, other.sortTypes) + && valueDataType.equals(other.valueDataType); + } + + @Override + public int hashCode() { + int res = super.hashCode(); + res = res * 31 + columnCount; + res = res * 31 + Arrays.hashCode(indexes); + res = res * 31 + Arrays.hashCode(sortTypes); + res = res * 31 + valueDataType.hashCode(); + return res; + } + + @Override + public void save(WriteBuffer buff, MetaType metaType) { + buff.putVarInt(columnCount); + writeIntArray(buff, sortTypes); + writeIntArray(buff, indexes); + buff.put(storeKeys ? (byte) 1 : (byte) 0); + } + + private static void writeIntArray(WriteBuffer buff, int[] array) { + if(array == null) { + buff.putVarInt(0); + } else { + buff.putVarInt(array.length + 1); + for (int i : array) { + buff.putVarInt(i); + } + } + } + + @Override + public Factory getFactory() { + return FACTORY; + } + + + + private static final Factory FACTORY = new Factory(); + + public static final class Factory implements StatefulDataType.Factory { + + @Override + public RowDataType create(ByteBuffer buff, MetaType metaDataType, Database database) { + int columnCount = DataUtils.readVarInt(buff); + int[] sortTypes = readIntArray(buff); + int[] indexes = readIntArray(buff); + boolean storeKeys = buff.get() != 0; + CompareMode compareMode = database == null ? CompareMode.getInstance(null, 0) : database.getCompareMode(); + RowFactory rowFactory = RowFactory.getDefaultRowFactory().createRowFactory(database, compareMode, database, + sortTypes, indexes, null, columnCount, storeKeys); + return rowFactory.getRowDataType(); + } + + private static int[] readIntArray(ByteBuffer buff) { + int len = DataUtils.readVarInt(buff) - 1; + if(len < 0) { + return null; + } + int[] res = new int[len]; + for (int i = 0; i < res.length; i++) { + res[i] = DataUtils.readVarInt(buff); + } + return res; + } + } +} diff --git a/h2/src/main/org/h2/mvstore/rtree/SpatialKey.java b/h2/src/main/org/h2/mvstore/db/SpatialKey.java similarity index 54% rename from h2/src/main/org/h2/mvstore/rtree/SpatialKey.java rename to h2/src/main/org/h2/mvstore/db/SpatialKey.java index e6c1ca9395..2a9438eb15 100644 --- a/h2/src/main/org/h2/mvstore/rtree/SpatialKey.java +++ b/h2/src/main/org/h2/mvstore/db/SpatialKey.java @@ -1,16 +1,21 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ -package org.h2.mvstore.rtree; +package org.h2.mvstore.db; import java.util.Arrays; +import org.h2.engine.CastDataProvider; +import org.h2.mvstore.rtree.Spatial; +import org.h2.value.CompareMode; +import org.h2.value.TypeInfo; +import org.h2.value.Value; /** * A unique spatial key. */ -public class SpatialKey { +public class SpatialKey extends Value implements Spatial { private final long id; private final float[] minMax; @@ -31,65 +36,44 @@ public SpatialKey(long id, SpatialKey other) { this.minMax = other.minMax.clone(); } - /** - * Get the minimum value for the given dimension. - * - * @param dim the dimension - * @return the value - */ + @Override public float min(int dim) { return minMax[dim + dim]; } - /** - * Set the minimum value for the given dimension. - * - * @param dim the dimension - * @param x the value - */ + @Override public void setMin(int dim, float x) { minMax[dim + dim] = x; } - /** - * Get the maximum value for the given dimension. - * - * @param dim the dimension - * @return the value - */ + @Override public float max(int dim) { return minMax[dim + dim + 1]; } - /** - * Set the maximum value for the given dimension. - * - * @param dim the dimension - * @param x the value - */ + @Override public void setMax(int dim, float x) { minMax[dim + dim + 1] = x; } + @Override + public Spatial clone(long id) { + return new SpatialKey(id, this); + } + + @Override public long getId() { return id; } + @Override public boolean isNull() { return minMax.length == 0; } @Override public String toString() { - StringBuilder buff = new StringBuilder(); - buff.append(id).append(": ("); - for (int i = 0; i < minMax.length; i += 2) { - if (i > 0) { - buff.append(", "); - } - buff.append(minMax[i]).append('/').append(minMax[i + 1]); - } - return buff.append(")").toString(); + return getString(); } @Override @@ -111,14 +95,49 @@ public boolean equals(Object other) { return equalsIgnoringId(o); } + @Override + public int compareTypeSafe(Value v, CompareMode mode, CastDataProvider provider) { + throw new UnsupportedOperationException(); +// return 0; + } + /** * Check whether two objects are equals, but do not compare the id fields. * * @param o the other key * @return true if the contents are the same */ - public boolean equalsIgnoringId(SpatialKey o) { - return Arrays.equals(minMax, o.minMax); + @Override + public boolean equalsIgnoringId(Spatial o) { + return Arrays.equals(minMax, ((SpatialKey)o).minMax); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + builder.append(id).append(": ("); + for (int i = 0; i < minMax.length; i += 2) { + if (i > 0) { + builder.append(", "); + } + builder.append(minMax[i]).append('/').append(minMax[i + 1]); + } + builder.append(")"); + return builder; + } + + @Override + public TypeInfo getType() { + return TypeInfo.TYPE_GEOMETRY; + } + + @Override + public int getValueType() { + return Value.GEOMETRY; + } + + @Override + public String getString() { + return getTraceSQL(); } } diff --git a/h2/src/main/org/h2/mvstore/db/Store.java b/h2/src/main/org/h2/mvstore/db/Store.java new file mode 100644 index 0000000000..6f5b5befcf --- /dev/null +++ b/h2/src/main/org/h2/mvstore/db/Store.java @@ -0,0 +1,396 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.db; + +import java.io.InputStream; +import java.nio.channels.FileChannel; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import org.h2.api.ErrorCode; +import org.h2.command.ddl.CreateTableData; +import org.h2.engine.Constants; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.FileStore; +import org.h2.mvstore.MVStore; +import org.h2.mvstore.MVStoreException; +import org.h2.mvstore.MVStoreTool; +import org.h2.mvstore.tx.Transaction; +import org.h2.mvstore.tx.TransactionStore; +import org.h2.mvstore.type.MetaType; +import org.h2.store.InDoubtTransaction; +import org.h2.store.fs.FileChannelInputStream; +import org.h2.store.fs.FileUtils; +import org.h2.util.StringUtils; +import org.h2.util.Utils; + +/** + * A store with open tables. + */ +public final class Store { + + /** + * Convert password from byte[] to char[]. + * + * @param key password as byte[] + * @return password as char[]. + */ + static char[] decodePassword(byte[] key) { + char[] password = new char[key.length / 2]; + for (int i = 0; i < password.length; i++) { + password[i] = (char) (((key[i + i] & 255) << 16) | ((key[i + i + 1]) & 255)); + } + return password; + } + + /** + * The map of open tables. + * Key: the map name, value: the table. + */ + private final ConcurrentHashMap tableMap = new ConcurrentHashMap<>(); + + /** + * The store. + */ + private final MVStore mvStore; + + /** + * The transaction store. + */ + private final TransactionStore transactionStore; + + private long statisticsStart; + + private int temporaryMapId; + + private final boolean encrypted; + + private final String fileName; + + /** + * Creates the store. + * + * @param db the database + */ + public Store(Database db) { + byte[] key = db.getFileEncryptionKey(); + String dbPath = db.getDatabasePath(); + MVStore.Builder builder = new MVStore.Builder(); + boolean encrypted = false; + if (dbPath != null) { + String fileName = dbPath + Constants.SUFFIX_MV_FILE; + MVStoreTool.compactCleanUp(fileName); + builder.fileName(fileName); + builder.pageSplitSize(db.getPageSize()); + if (db.isReadOnly()) { + builder.readOnly(); + } else { + // possibly create the directory + boolean exists = FileUtils.exists(fileName); + if (exists && !FileUtils.canWrite(fileName)) { + // read only + } else { + String dir = FileUtils.getParent(fileName); + FileUtils.createDirectories(dir); + } + int autoCompactFillRate = db.getSettings().autoCompactFillRate; + if (autoCompactFillRate <= 100) { + builder.autoCompactFillRate(autoCompactFillRate); + } + } + if (key != null) { + encrypted = true; + builder.encryptionKey(decodePassword(key)); + } + if (db.getSettings().compressData) { + builder.compress(); + // use a larger page split size to improve the compression ratio + builder.pageSplitSize(64 * 1024); + } + builder.backgroundExceptionHandler((t, e) -> db.setBackgroundException(DbException.convert(e))); + // always start without background thread first, and if necessary, + // it will be set up later, after db has been fully started, + // otherwise background thread would compete for store lock + // with maps opening procedure + builder.autoCommitDisabled(); + } + this.encrypted = encrypted; + try { + this.mvStore = builder.open(); + FileStore fs = mvStore.getFileStore(); + fileName = fs != null ? fs.getFileName() : null; + if (!db.getSettings().reuseSpace) { + mvStore.setReuseSpace(false); + } + mvStore.setVersionsToKeep(0); + this.transactionStore = new TransactionStore(mvStore, + new MetaType<>(db, mvStore.backgroundExceptionHandler), new ValueDataType(db, null), + db.getLockTimeout()); + } catch (MVStoreException e) { + throw convertMVStoreException(e); + } + } + + /** + * Convert a MVStoreException to the similar exception used + * for the table/sql layers. + * + * @param e the illegal state exception + * @return the database exception + */ + DbException convertMVStoreException(MVStoreException e) { + switch (e.getErrorCode()) { + case DataUtils.ERROR_CLOSED: + throw DbException.get(ErrorCode.DATABASE_IS_CLOSED, e, fileName); + case DataUtils.ERROR_FILE_CORRUPT: + if (encrypted) { + throw DbException.get(ErrorCode.FILE_ENCRYPTION_ERROR_1, e, fileName); + } + throw DbException.get(ErrorCode.FILE_CORRUPTED_1, e, fileName); + case DataUtils.ERROR_FILE_LOCKED: + throw DbException.get(ErrorCode.DATABASE_ALREADY_OPEN_1, e, fileName); + case DataUtils.ERROR_READING_FAILED: + case DataUtils.ERROR_WRITING_FAILED: + throw DbException.get(ErrorCode.IO_EXCEPTION_1, e, fileName); + default: + throw DbException.get(ErrorCode.GENERAL_ERROR_1, e, e.getMessage()); + } + } + + public MVStore getMvStore() { + return mvStore; + } + + public TransactionStore getTransactionStore() { + return transactionStore; + } + + /** + * Get MVTable by table name. + * + * @param tableName table name + * @return MVTable + */ + public MVTable getTable(String tableName) { + return tableMap.get(tableName); + } + + /** + * Create a table. + * + * @param data CreateTableData + * @return table created + */ + public MVTable createTable(CreateTableData data) { + try { + MVTable table = new MVTable(data, this); + tableMap.put(table.getMapName(), table); + return table; + } catch (MVStoreException e) { + throw convertMVStoreException(e); + } + } + + /** + * Remove a table. + * + * @param table the table + */ + public void removeTable(MVTable table) { + try { + tableMap.remove(table.getMapName()); + } catch (MVStoreException e) { + throw convertMVStoreException(e); + } + } + + /** + * Store all pending changes. + */ + public void flush() { + FileStore s = mvStore.getFileStore(); + if (s == null || s.isReadOnly()) { + return; + } + if (!mvStore.compact(50, 4 * 1024 * 1024)) { + mvStore.commit(); + } + } + + /** + * Close the store, without persisting changes. + */ + public void closeImmediately() { + if (!mvStore.isClosed()) { + mvStore.closeImmediately(); + } + } + + /** + * Remove all temporary maps. + * + * @param objectIds the ids of the objects to keep + */ + public void removeTemporaryMaps(BitSet objectIds) { + for (String mapName : mvStore.getMapNames()) { + if (mapName.startsWith("temp.")) { + mvStore.removeMap(mapName); + } else if (mapName.startsWith("table.") || mapName.startsWith("index.")) { + int id = StringUtils.parseUInt31(mapName, mapName.indexOf('.') + 1, mapName.length()); + if (!objectIds.get(id)) { + mvStore.removeMap(mapName); + } + } + } + } + + /** + * Get the name of the next available temporary map. + * + * @return the map name + */ + public synchronized String nextTemporaryMapName() { + return "temp." + temporaryMapId++; + } + + /** + * Prepare a transaction. + * + * @param session the session + * @param transactionName the transaction name (may be null) + */ + public void prepareCommit(SessionLocal session, String transactionName) { + Transaction t = session.getTransaction(); + t.setName(transactionName); + t.prepare(); + mvStore.commit(); + } + + public ArrayList getInDoubtTransactions() { + List list = transactionStore.getOpenTransactions(); + ArrayList result = Utils.newSmallArrayList(); + for (Transaction t : list) { + if (t.getStatus() == Transaction.STATUS_PREPARED) { + result.add(new MVInDoubtTransaction(mvStore, t)); + } + } + return result; + } + + /** + * Set the maximum memory to be used by the cache. + * + * @param kb the maximum size in KB + */ + public void setCacheSize(int kb) { + mvStore.setCacheSize(Math.max(1, kb / 1024)); + } + + public InputStream getInputStream() { + FileChannel fc = mvStore.getFileStore().getEncryptedFile(); + if (fc == null) { + fc = mvStore.getFileStore().getFile(); + } + return new FileChannelInputStream(fc, false); + } + + /** + * Force the changes to disk. + */ + public void sync() { + flush(); + mvStore.sync(); + } + + /** + * Compact the database file, that is, compact blocks that have a low + * fill rate, and move chunks next to each other. This will typically + * shrink the database file. Changes are flushed to the file, and old + * chunks are overwritten. + * + * @param maxCompactTime the maximum time in milliseconds to compact + */ + @SuppressWarnings("unused") + public void compactFile(int maxCompactTime) { + mvStore.compactFile(maxCompactTime); + } + + /** + * Close the store. Pending changes are persisted. + * If time is allocated for housekeeping, chunks with a low + * fill rate are compacted, and some chunks are put next to each other. + * If time is unlimited then full compaction is performed, which uses + * different algorithm - opens alternative temp store and writes all live + * data there, then replaces this store with a new one. + * + * @param allowedCompactionTime time (in milliseconds) alloted for file + * compaction activity, 0 means no compaction, + * -1 means unlimited time (full compaction) + */ + public void close(int allowedCompactionTime) { + try { + FileStore fileStore = mvStore.getFileStore(); + if (!mvStore.isClosed() && fileStore != null) { + boolean compactFully = allowedCompactionTime == -1; + if (fileStore.isReadOnly()) { + compactFully = false; + } else { + transactionStore.close(); + } + if (compactFully) { + allowedCompactionTime = 0; + } + + mvStore.close(allowedCompactionTime); + + String fileName = fileStore.getFileName(); + if (compactFully && FileUtils.exists(fileName)) { + // the file could have been deleted concurrently, + // so only compact if the file still exists + MVStoreTool.compact(fileName, true); + } + } + } catch (MVStoreException e) { + int errorCode = e.getErrorCode(); + if (errorCode == DataUtils.ERROR_WRITING_FAILED) { + // disk full - ok + } else if (errorCode == DataUtils.ERROR_FILE_CORRUPT) { + // wrong encryption key - ok + } + mvStore.closeImmediately(); + throw DbException.get(ErrorCode.IO_EXCEPTION_1, e, "Closing"); + } + } + + /** + * Start collecting statistics. + */ + public void statisticsStart() { + FileStore fs = mvStore.getFileStore(); + statisticsStart = fs == null ? 0 : fs.getReadCount(); + } + + /** + * Stop collecting statistics. + * + * @return the statistics + */ + public Map statisticsEnd() { + HashMap map = new HashMap<>(); + FileStore fs = mvStore.getFileStore(); + int reads = fs == null ? 0 : (int) (fs.getReadCount() - statisticsStart); + map.put("reads", reads); + return map; + } + +} diff --git a/h2/src/main/org/h2/mvstore/db/ValueDataType.java b/h2/src/main/org/h2/mvstore/db/ValueDataType.java index 717ac13473..36d4ccbe0f 100644 --- a/h2/src/main/org/h2/mvstore/db/ValueDataType.java +++ b/h2/src/main/org/h2/mvstore/db/ValueDataType.java @@ -1,142 +1,170 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.db; +import static org.h2.mvstore.DataUtils.readString; +import static org.h2.mvstore.DataUtils.readVarInt; +import static org.h2.mvstore.DataUtils.readVarLong; + import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import java.util.Arrays; +import java.util.Iterator; +import java.util.Map.Entry; import org.h2.api.ErrorCode; import org.h2.api.IntervalQualifier; +import org.h2.engine.CastDataProvider; import org.h2.engine.Database; -import org.h2.engine.Mode; import org.h2.message.DbException; +import org.h2.mode.DefaultNullOrdering; import org.h2.mvstore.DataUtils; import org.h2.mvstore.WriteBuffer; -import org.h2.mvstore.rtree.SpatialDataType; -import org.h2.mvstore.rtree.SpatialKey; +import org.h2.mvstore.type.BasicDataType; import org.h2.mvstore.type.DataType; -import org.h2.result.ResultInterface; -import org.h2.result.SimpleResult; +import org.h2.mvstore.type.MetaType; +import org.h2.mvstore.type.StatefulDataType; +import org.h2.result.RowFactory; +import org.h2.result.SearchRow; import org.h2.result.SortOrder; import org.h2.store.DataHandler; -import org.h2.util.JdbcUtils; +import org.h2.util.DateTimeUtils; import org.h2.util.Utils; import org.h2.value.CompareMode; +import org.h2.value.ExtTypeInfoEnum; +import org.h2.value.ExtTypeInfoRow; import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueArray; +import org.h2.value.ValueBigint; +import org.h2.value.ValueBinary; +import org.h2.value.ValueBlob; import org.h2.value.ValueBoolean; -import org.h2.value.ValueByte; -import org.h2.value.ValueBytes; +import org.h2.value.ValueChar; +import org.h2.value.ValueClob; import org.h2.value.ValueCollectionBase; import org.h2.value.ValueDate; -import org.h2.value.ValueDecimal; +import org.h2.value.ValueDecfloat; import org.h2.value.ValueDouble; -import org.h2.value.ValueFloat; import org.h2.value.ValueGeometry; -import org.h2.value.ValueInt; +import org.h2.value.ValueInteger; import org.h2.value.ValueInterval; import org.h2.value.ValueJavaObject; -import org.h2.value.ValueLobDb; -import org.h2.value.ValueLong; +import org.h2.value.ValueJson; import org.h2.value.ValueNull; -import org.h2.value.ValueResultSet; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueReal; import org.h2.value.ValueRow; -import org.h2.value.ValueShort; -import org.h2.value.ValueString; -import org.h2.value.ValueStringFixed; -import org.h2.value.ValueStringIgnoreCase; +import org.h2.value.ValueSmallint; import org.h2.value.ValueTime; +import org.h2.value.ValueTimeTimeZone; import org.h2.value.ValueTimestamp; import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueTinyint; import org.h2.value.ValueUuid; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; +import org.h2.value.ValueVarcharIgnoreCase; +import org.h2.value.lob.LobData; +import org.h2.value.lob.LobDataDatabase; +import org.h2.value.lob.LobDataInMemory; /** * A row type. */ -public class ValueDataType implements DataType { +public final class ValueDataType extends BasicDataType implements StatefulDataType { private static final byte NULL = 0; - private static final byte BYTE = 2; - private static final byte SHORT = 3; - private static final byte INT = 4; - private static final byte LONG = 5; - private static final byte DECIMAL = 6; + private static final byte TINYINT = 2; + private static final byte SMALLINT = 3; + private static final byte INTEGER = 4; + private static final byte BIGINT = 5; + private static final byte NUMERIC = 6; private static final byte DOUBLE = 7; - private static final byte FLOAT = 8; + private static final byte REAL = 8; private static final byte TIME = 9; private static final byte DATE = 10; private static final byte TIMESTAMP = 11; - private static final byte BYTES = 12; - private static final byte STRING = 13; - private static final byte STRING_IGNORECASE = 14; + private static final byte VARBINARY = 12; + private static final byte VARCHAR = 13; + private static final byte VARCHAR_IGNORECASE = 14; private static final byte BLOB = 15; private static final byte CLOB = 16; private static final byte ARRAY = 17; - private static final byte RESULT_SET = 18; private static final byte JAVA_OBJECT = 19; private static final byte UUID = 20; - private static final byte STRING_FIXED = 21; + private static final byte CHAR = 21; private static final byte GEOMETRY = 22; - private static final byte TIMESTAMP_TZ = 24; + private static final byte TIMESTAMP_TZ_OLD = 24; private static final byte ENUM = 25; private static final byte INTERVAL = 26; private static final byte ROW = 27; private static final byte INT_0_15 = 32; - private static final byte LONG_0_7 = 48; - private static final byte DECIMAL_0_1 = 56; - private static final byte DECIMAL_SMALL_0 = 58; - private static final byte DECIMAL_SMALL = 59; + private static final byte BIGINT_0_7 = 48; + private static final byte NUMERIC_0_1 = 56; + private static final byte NUMERIC_SMALL_0 = 58; + private static final byte NUMERIC_SMALL = 59; private static final byte DOUBLE_0_1 = 60; - private static final byte FLOAT_0_1 = 62; + private static final byte REAL_0_1 = 62; private static final byte BOOLEAN_FALSE = 64; private static final byte BOOLEAN_TRUE = 65; private static final byte INT_NEG = 66; - private static final byte LONG_NEG = 67; - private static final byte STRING_0_31 = 68; - private static final int BYTES_0_31 = 100; - private static final int SPATIAL_KEY_2D = 132; - private static final int CUSTOM_DATA_TYPE = 133; + private static final byte BIGINT_NEG = 67; + private static final byte VARCHAR_0_31 = 68; + private static final int VARBINARY_0_31 = 100; + // 132 was used for SPATIAL_KEY_2D + // 133 was used for CUSTOM_DATA_TYPE + private static final int JSON = 134; + private static final int TIMESTAMP_TZ = 135; + private static final int TIME_TZ = 136; + private static final int BINARY = 137; + private static final int DECFLOAT = 138; final DataHandler handler; + final CastDataProvider provider; final CompareMode compareMode; - protected final Mode mode; final int[] sortTypes; - SpatialDataType spatialType; + private RowFactory rowFactory; public ValueDataType() { - this(CompareMode.getInstance(null, 0), null, null, null); + this(null, CompareMode.getInstance(null, 0), null, null); } public ValueDataType(Database database, int[] sortTypes) { - this(database.getCompareMode(), database.getMode(), database, sortTypes); + this(database, database.getCompareMode(), database, sortTypes); } - private ValueDataType(CompareMode compareMode, Mode mode, DataHandler handler, - int[] sortTypes) { + public ValueDataType(CastDataProvider provider, CompareMode compareMode, DataHandler handler, int[] sortTypes) { + this.provider = provider; this.compareMode = compareMode; - this.mode = mode; this.handler = handler; this.sortTypes = sortTypes; } - private SpatialDataType getSpatialDataType() { - if (spatialType == null) { - spatialType = new SpatialDataType(2); - } - return spatialType; + public RowFactory getRowFactory() { + return rowFactory; + } + + public void setRowFactory(RowFactory rowFactory) { + this.rowFactory = rowFactory; + } + + @Override + public Value[] createStorage(int size) { + return new Value[size]; } @Override - public int compare(Object a, Object b) { + public int compare(Value a, Value b) { if (a == b) { return 0; } - if (a instanceof ValueCollectionBase && b instanceof ValueCollectionBase) { + if (a instanceof SearchRow && b instanceof SearchRow) { + return compare((SearchRow)a, (SearchRow)b); + } else if (a instanceof ValueCollectionBase && b instanceof ValueCollectionBase) { Value[] ax = ((ValueCollectionBase) a).getList(); Value[] bx = ((ValueCollectionBase) b).getList(); int al = ax.length; @@ -162,19 +190,69 @@ public int compare(Object a, Object b) { } return 0; } - return compareValues((Value) a, (Value) b, SortOrder.ASCENDING); + return compareValues(a, b, SortOrder.ASCENDING); } - private int compareValues(Value a, Value b, int sortType) { + private int compare(SearchRow a, SearchRow b) { + if (a == b) { + return 0; + } + int[] indexes = rowFactory.getIndexes(); + if (indexes == null) { + int len = a.getColumnCount(); + assert len == b.getColumnCount() : len + " != " + b.getColumnCount(); + for (int i = 0; i < len; i++) { + int comp = compareValues(a.getValue(i), b.getValue(i), sortTypes[i]); + if (comp != 0) { + return comp; + } + } + return 0; + } else { + assert sortTypes.length == indexes.length; + for (int i = 0; i < indexes.length; i++) { + int index = indexes[i]; + Value v1 = a.getValue(index); + Value v2 = b.getValue(index); + if (v1 == null || v2 == null) { + // can't compare further + break; + } + int comp = compareValues(a.getValue(index), b.getValue(index), sortTypes[i]); + if (comp != 0) { + return comp; + } + } + long aKey = a.getKey(); + long bKey = b.getKey(); + return aKey == SearchRow.MATCH_ALL_ROW_KEY || bKey == SearchRow.MATCH_ALL_ROW_KEY ? + 0 : Long.compare(aKey, bKey); + } + } + + /** + * Compares the specified values. + * + * @param a the first value + * @param b the second value + * @param sortType the sorting type + * @return 0 if equal, -1 if first value is smaller for ascending or larger + * for descending sort type, 1 otherwise + */ + public int compareValues(Value a, Value b, int sortType) { if (a == b) { return 0; } boolean aNull = a == ValueNull.INSTANCE; if (aNull || b == ValueNull.INSTANCE) { - return SortOrder.compareNull(aNull, sortType); + /* + * Indexes with nullable values should have explicit null ordering, + * so default should not matter. + */ + return DefaultNullOrdering.LOW.compareNull(aNull, sortType); } - int comp = a.compareTo(b, mode, compareMode); + int comp = a.compareTo(b, provider, compareMode); if ((sortType & SortOrder.DESCENDING) != 0) { comp = -comp; @@ -183,48 +261,17 @@ private int compareValues(Value a, Value b, int sortType) { } @Override - public int getMemory(Object obj) { - if (obj instanceof SpatialKey) { - return getSpatialDataType().getMemory(obj); - } - return getMemory((Value) obj); - } - - private static int getMemory(Value v) { + public int getMemory(Value v) { return v == null ? 0 : v.getMemory(); } @Override - public void read(ByteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - obj[i] = read(buff); - } + public Value read(ByteBuffer buff) { + return readValue(buff, null); } @Override - public void write(WriteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - write(buff, obj[i]); - } - } - - @Override - public Object read(ByteBuffer buff) { - return readValue(buff); - } - - @Override - public void write(WriteBuffer buff, Object obj) { - if (obj instanceof SpatialKey) { - buff.put((byte) SPATIAL_KEY_2D); - getSpatialDataType().write(buff, obj); - return; - } - Value x = (Value) obj; - writeValue(buff, x); - } - - private void writeValue(WriteBuffer buff, Value v) { + public void write(WriteBuffer buff, Value v) { if (v == ValueNull.INSTANCE) { buff.put((byte) 0); return; @@ -234,57 +281,49 @@ private void writeValue(WriteBuffer buff, Value v) { case Value.BOOLEAN: buff.put(v.getBoolean() ? BOOLEAN_TRUE : BOOLEAN_FALSE); break; - case Value.BYTE: - buff.put(BYTE).put(v.getByte()); + case Value.TINYINT: + buff.put(TINYINT).put(v.getByte()); break; - case Value.SHORT: - buff.put(SHORT).putShort(v.getShort()); + case Value.SMALLINT: + buff.put(SMALLINT).putShort(v.getShort()); break; case Value.ENUM: - case Value.INT: { + case Value.INTEGER: { int x = v.getInt(); if (x < 0) { buff.put(INT_NEG).putVarInt(-x); } else if (x < 16) { buff.put((byte) (INT_0_15 + x)); } else { - buff.put(type == Value.INT ? INT : ENUM).putVarInt(x); + buff.put(type == Value.INTEGER ? INTEGER : ENUM).putVarInt(x); } break; } - case Value.LONG: { - long x = v.getLong(); - if (x < 0) { - buff.put(LONG_NEG).putVarLong(-x); - } else if (x < 8) { - buff.put((byte) (LONG_0_7 + x)); - } else { - buff.put(LONG).putVarLong(x); - } + case Value.BIGINT: + writeLong(buff, v.getLong()); break; - } - case Value.DECIMAL: { + case Value.NUMERIC: { BigDecimal x = v.getBigDecimal(); if (BigDecimal.ZERO.equals(x)) { - buff.put(DECIMAL_0_1); + buff.put(NUMERIC_0_1); } else if (BigDecimal.ONE.equals(x)) { - buff.put((byte) (DECIMAL_0_1 + 1)); + buff.put((byte) (NUMERIC_0_1 + 1)); } else { int scale = x.scale(); BigInteger b = x.unscaledValue(); int bits = b.bitLength(); if (bits <= 63) { if (scale == 0) { - buff.put(DECIMAL_SMALL_0). + buff.put(NUMERIC_SMALL_0). putVarLong(b.longValue()); } else { - buff.put(DECIMAL_SMALL). + buff.put(NUMERIC_SMALL). putVarInt(scale). putVarLong(b.longValue()); } } else { byte[] bytes = b.toByteArray(); - buff.put(DECIMAL). + buff.put(NUMERIC). putVarInt(scale). putVarInt(bytes.length). put(bytes); @@ -292,66 +331,72 @@ private void writeValue(WriteBuffer buff, Value v) { } break; } - case Value.TIME: { - ValueTime t = (ValueTime) v; - long nanos = t.getNanos(); - long millis = nanos / 1000000; - nanos -= millis * 1000000; - buff.put(TIME). - putVarLong(millis). - putVarLong(nanos); + case Value.DECFLOAT: { + ValueDecfloat d = (ValueDecfloat) v; + buff.put((byte) DECFLOAT); + if (d.isFinite()) { + BigDecimal x = d.getBigDecimal(); + byte[] bytes = x.unscaledValue().toByteArray(); + buff.putVarInt(x.scale()). + putVarInt(bytes.length). + put(bytes); + } else { + int c; + if (d == ValueDecfloat.NEGATIVE_INFINITY) { + c = -3; + } else if (d == ValueDecfloat.POSITIVE_INFINITY) { + c = -2; + } else { + c = -1; + } + buff.putVarInt(0).putVarInt(c); + } break; } - case Value.DATE: { - long x = ((ValueDate) v).getDateValue(); - buff.put(DATE).putVarLong(x); + case Value.TIME: + writeTimestampTime(buff.put(TIME), ((ValueTime) v).getNanos()); + break; + case Value.TIME_TZ: { + ValueTimeTimeZone t = (ValueTimeTimeZone) v; + long nanosOfDay = t.getNanos(); + buff.put((byte) TIME_TZ). + putVarInt((int) (nanosOfDay / DateTimeUtils.NANOS_PER_SECOND)). + putVarInt((int) (nanosOfDay % DateTimeUtils.NANOS_PER_SECOND)); + writeTimeZone(buff, t.getTimeZoneOffsetSeconds()); break; } + case Value.DATE: + buff.put(DATE).putVarLong(((ValueDate) v).getDateValue()); + break; case Value.TIMESTAMP: { ValueTimestamp ts = (ValueTimestamp) v; - long dateValue = ts.getDateValue(); - long nanos = ts.getTimeNanos(); - long millis = nanos / 1000000; - nanos -= millis * 1000000; - buff.put(TIMESTAMP). - putVarLong(dateValue). - putVarLong(millis). - putVarLong(nanos); + buff.put(TIMESTAMP).putVarLong(ts.getDateValue()); + writeTimestampTime(buff, ts.getTimeNanos()); break; } case Value.TIMESTAMP_TZ: { ValueTimestampTimeZone ts = (ValueTimestampTimeZone) v; - long dateValue = ts.getDateValue(); - long nanos = ts.getTimeNanos(); - long millis = nanos / 1000000; - nanos -= millis * 1000000; - buff.put(TIMESTAMP_TZ). - putVarLong(dateValue). - putVarLong(millis). - putVarLong(nanos). - putVarInt(ts.getTimeZoneOffsetMins()); + buff.put((byte) TIMESTAMP_TZ).putVarLong(ts.getDateValue()); + writeTimestampTime(buff, ts.getTimeNanos()); + writeTimeZone(buff, ts.getTimeZoneOffsetSeconds()); break; } - case Value.JAVA_OBJECT: { - byte[] b = v.getBytesNoCopy(); - buff.put(JAVA_OBJECT). - putVarInt(b.length). - put(b); + case Value.JAVA_OBJECT: + writeBinary(JAVA_OBJECT, buff, v); break; - } - case Value.BYTES: { + case Value.VARBINARY: { byte[] b = v.getBytesNoCopy(); int len = b.length; if (len < 32) { - buff.put((byte) (BYTES_0_31 + len)). - put(b); + buff.put((byte) (VARBINARY_0_31 + len)).put(b); } else { - buff.put(BYTES). - putVarInt(b.length). - put(b); + buff.put(VARBINARY).putVarInt(len).put(b); } break; } + case Value.BINARY: + writeBinary((byte) BINARY, buff, v); + break; case Value.UUID: { ValueUuid uuid = (ValueUuid) v; buff.put(UUID). @@ -359,25 +404,21 @@ private void writeValue(WriteBuffer buff, Value v) { putLong(uuid.getLow()); break; } - case Value.STRING: { + case Value.VARCHAR: { String s = v.getString(); int len = s.length(); if (len < 32) { - buff.put((byte) (STRING_0_31 + len)). - putStringData(s, len); + buff.put((byte) (VARCHAR_0_31 + len)).putStringData(s, len); } else { - buff.put(STRING); - writeString(buff, s); + writeString(buff.put(VARCHAR), s); } break; } - case Value.STRING_IGNORECASE: - buff.put(STRING_IGNORECASE); - writeString(buff, v.getString()); + case Value.VARCHAR_IGNORECASE: + writeString(buff.put(VARCHAR_IGNORECASE), v.getString()); break; - case Value.STRING_FIXED: - buff.put(STRING_FIXED); - writeString(buff, v.getString()); + case Value.CHAR: + writeString(buff.put(CHAR), v.getString()); break; case Value.DOUBLE: { double x = v.getDouble(); @@ -394,78 +435,70 @@ private void writeValue(WriteBuffer buff, Value v) { } break; } - case Value.FLOAT: { + case Value.REAL: { float x = v.getFloat(); if (x == 1.0f) { - buff.put((byte) (FLOAT_0_1 + 1)); + buff.put((byte) (REAL_0_1 + 1)); } else { int f = Float.floatToIntBits(x); - if (f == ValueFloat.ZERO_BITS) { - buff.put(FLOAT_0_1); + if (f == ValueReal.ZERO_BITS) { + buff.put(REAL_0_1); } else { - buff.put(FLOAT). + buff.put(REAL). putVarInt(Integer.reverse(f)); } } break; } - case Value.BLOB: - case Value.CLOB: { - buff.put(type == Value.BLOB ? BLOB : CLOB); - ValueLobDb lob = (ValueLobDb) v; - byte[] small = lob.getSmall(); - if (small == null) { + case Value.BLOB: { + buff.put(BLOB); + ValueBlob lob = (ValueBlob) v; + LobData lobData = lob.getLobData(); + if (lobData instanceof LobDataDatabase) { + LobDataDatabase lobDataDatabase = (LobDataDatabase) lobData; buff.putVarInt(-3). - putVarInt(lob.getTableId()). - putVarLong(lob.getLobId()). - putVarLong(lob.getType().getPrecision()); + putVarInt(lobDataDatabase.getTableId()). + putVarLong(lobDataDatabase.getLobId()). + putVarLong(lob.octetLength()); } else { + byte[] small = ((LobDataInMemory) lobData).getSmall(); buff.putVarInt(small.length). put(small); } break; } + case Value.CLOB: { + buff.put(CLOB); + ValueClob lob = (ValueClob) v; + LobData lobData = lob.getLobData(); + if (lobData instanceof LobDataDatabase) { + LobDataDatabase lobDataDatabase = (LobDataDatabase) lobData; + buff.putVarInt(-3). + putVarInt(lobDataDatabase.getTableId()). + putVarLong(lobDataDatabase.getLobId()). + putVarLong(lob.octetLength()). + putVarLong(lob.charLength()); + } else { + byte[] small = ((LobDataInMemory) lobData).getSmall(); + buff.putVarInt(small.length). + put(small). + putVarLong(lob.charLength()); + } + break; + } case Value.ARRAY: case Value.ROW: { Value[] list = ((ValueCollectionBase) v).getList(); buff.put(type == Value.ARRAY ? ARRAY : ROW) .putVarInt(list.length); for (Value x : list) { - writeValue(buff, x); + write(buff, x); } break; } - case Value.RESULT_SET: { - buff.put(RESULT_SET); - ResultInterface result = ((ValueResultSet) v).getResult(); - int columnCount = result.getVisibleColumnCount(); - buff.putVarInt(columnCount); - for (int i = 0; i < columnCount; i++) { - writeString(buff, result.getAlias(i)); - writeString(buff, result.getColumnName(i)); - TypeInfo columnType = result.getColumnType(i); - buff.putVarInt(columnType.getValueType()). - putVarLong(columnType.getPrecision()). - putVarInt(columnType.getScale()); - } - while (result.next()) { - buff.put((byte) 1); - Value[] row = result.currentRow(); - for (int i = 0; i < columnCount; i++) { - writeValue(buff, row[i]); - } - } - buff.put((byte) 0); + case Value.GEOMETRY: + writeBinary(GEOMETRY, buff, v); break; - } - case Value.GEOMETRY: { - byte[] b = v.getBytes(); - int len = b.length; - buff.put(GEOMETRY). - putVarInt(len). - put(b); - break; - } case Value.INTERVAL_YEAR: case Value.INTERVAL_MONTH: case Value.INTERVAL_DAY: @@ -500,16 +533,32 @@ private void writeValue(WriteBuffer buff, Value v) { putVarLong(interval.getRemaining()); break; } + case Value.JSON: + writeBinary((byte) JSON, buff, v); + break; default: - if (JdbcUtils.customDataTypesHandler != null) { - byte[] b = v.getBytesNoCopy(); - buff.put((byte)CUSTOM_DATA_TYPE). - putVarInt(type). - putVarInt(b.length). - put(b); - break; - } - DbException.throwInternalError("type=" + v.getValueType()); + throw DbException.getInternalError("type=" + v.getValueType()); + } + } + + private static void writeBinary(byte type, WriteBuffer buff, Value v) { + byte[] b = v.getBytesNoCopy(); + buff.put(type).putVarInt(b.length).put(b); + } + + /** + * Writes a long. + * + * @param buff the target buffer + * @param x the long value + */ + public static void writeLong(WriteBuffer buff, long x) { + if (x < 0) { + buff.put(BIGINT_NEG).putVarLong(-x); + } else if (x < 8) { + buff.put((byte) (BIGINT_0_7 + x)); + } else { + buff.put(BIGINT).putVarLong(x); } } @@ -518,12 +567,32 @@ private static void writeString(WriteBuffer buff, String s) { buff.putVarInt(len).putStringData(s, len); } + private static void writeTimestampTime(WriteBuffer buff, long nanos) { + long millis = nanos / 1_000_000L; + buff.putVarLong(millis).putVarInt((int) (nanos - millis * 1_000_000L)); + } + + private static void writeTimeZone(WriteBuffer buff, int timeZoneOffset) { + // Valid JSR-310 offsets are -64,800..64,800 + // Use 1 byte for common time zones (including +8:45 etc.) + if (timeZoneOffset % 900 == 0) { + // -72..72 + buff.put((byte) (timeZoneOffset / 900)); + } else if (timeZoneOffset > 0) { + buff.put(Byte.MAX_VALUE).putVarInt(timeZoneOffset); + } else { + buff.put(Byte.MIN_VALUE).putVarInt(-timeZoneOffset); + } + } + /** * Read a value. * + * @param buff the source buffer + * @param columnType the data type of value, or {@code null} * @return the value */ - private Object readValue(ByteBuffer buff) { + Value readValue(ByteBuffer buff, TypeInfo columnType) { int type = buff.get() & 255; switch (type) { case NULL: @@ -533,76 +602,82 @@ private Object readValue(ByteBuffer buff) { case BOOLEAN_FALSE: return ValueBoolean.FALSE; case INT_NEG: - return ValueInt.get(-readVarInt(buff)); - case ENUM: - case INT: - return ValueInt.get(readVarInt(buff)); - case LONG_NEG: - return ValueLong.get(-readVarLong(buff)); - case LONG: - return ValueLong.get(readVarLong(buff)); - case BYTE: - return ValueByte.get(buff.get()); - case SHORT: - return ValueShort.get(buff.getShort()); - case DECIMAL_0_1: - return ValueDecimal.ZERO; - case DECIMAL_0_1 + 1: - return ValueDecimal.ONE; - case DECIMAL_SMALL_0: - return ValueDecimal.get(BigDecimal.valueOf( - readVarLong(buff))); - case DECIMAL_SMALL: { + return ValueInteger.get(-readVarInt(buff)); + case INTEGER: + return ValueInteger.get(readVarInt(buff)); + case BIGINT_NEG: + return ValueBigint.get(-readVarLong(buff)); + case BIGINT: + return ValueBigint.get(readVarLong(buff)); + case TINYINT: + return ValueTinyint.get(buff.get()); + case SMALLINT: + return ValueSmallint.get(buff.getShort()); + case NUMERIC_0_1: + return ValueNumeric.ZERO; + case NUMERIC_0_1 + 1: + return ValueNumeric.ONE; + case NUMERIC_SMALL_0: + return ValueNumeric.get(BigDecimal.valueOf(readVarLong(buff))); + case NUMERIC_SMALL: { int scale = readVarInt(buff); - return ValueDecimal.get(BigDecimal.valueOf( - readVarLong(buff), scale)); + return ValueNumeric.get(BigDecimal.valueOf(readVarLong(buff), scale)); } - case DECIMAL: { + case NUMERIC: { int scale = readVarInt(buff); - int len = readVarInt(buff); - byte[] buff2 = Utils.newBytes(len); - buff.get(buff2, 0, len); - BigInteger b = new BigInteger(buff2); - return ValueDecimal.get(new BigDecimal(b, scale)); + return ValueNumeric.get(new BigDecimal(new BigInteger(readVarBytes(buff)), scale)); + } + case DECFLOAT: { + int scale = readVarInt(buff), len = readVarInt(buff); + switch (len) { + case -3: + return ValueDecfloat.NEGATIVE_INFINITY; + case -2: + return ValueDecfloat.POSITIVE_INFINITY; + case -1: + return ValueDecfloat.NAN; + default: + byte[] b = Utils.newBytes(len); + buff.get(b, 0, len); + return ValueDecfloat.get(new BigDecimal(new BigInteger(b), scale)); + } } - case DATE: { + case DATE: return ValueDate.fromDateValue(readVarLong(buff)); - } - case TIME: { - long nanos = readVarLong(buff) * 1000000 + readVarLong(buff); - return ValueTime.fromNanos(nanos); - } - case TIMESTAMP: { - long dateValue = readVarLong(buff); - long nanos = readVarLong(buff) * 1000000 + readVarLong(buff); - return ValueTimestamp.fromDateValueAndNanos(dateValue, nanos); - } - case TIMESTAMP_TZ: { - long dateValue = readVarLong(buff); - long nanos = readVarLong(buff) * 1000000 + readVarLong(buff); - short tz = (short) readVarInt(buff); - return ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, nanos, tz); - } - case BYTES: { - int len = readVarInt(buff); - byte[] b = Utils.newBytes(len); - buff.get(b, 0, len); - return ValueBytes.getNoCopy(b); - } - case JAVA_OBJECT: { - int len = readVarInt(buff); - byte[] b = Utils.newBytes(len); - buff.get(b, 0, len); - return ValueJavaObject.getNoCopy(null, b, handler); - } + case TIME: + return ValueTime.fromNanos(readTimestampTime(buff)); + case TIME_TZ: + return ValueTimeTimeZone.fromNanos(readVarInt(buff) * DateTimeUtils.NANOS_PER_SECOND + readVarInt(buff), + readTimeZone(buff)); + case TIMESTAMP: + return ValueTimestamp.fromDateValueAndNanos(readVarLong(buff), readTimestampTime(buff)); + case TIMESTAMP_TZ_OLD: + return ValueTimestampTimeZone.fromDateValueAndNanos(readVarLong(buff), readTimestampTime(buff), + readVarInt(buff) * 60); + case TIMESTAMP_TZ: + return ValueTimestampTimeZone.fromDateValueAndNanos(readVarLong(buff), readTimestampTime(buff), + readTimeZone(buff)); + case VARBINARY: + return ValueVarbinary.getNoCopy(readVarBytes(buff)); + case BINARY: + return ValueBinary.getNoCopy(readVarBytes(buff)); + case JAVA_OBJECT: + return ValueJavaObject.getNoCopy(readVarBytes(buff)); case UUID: return ValueUuid.get(buff.getLong(), buff.getLong()); - case STRING: - return ValueString.get(readString(buff)); - case STRING_IGNORECASE: - return ValueStringIgnoreCase.get(readString(buff)); - case STRING_FIXED: - return ValueStringFixed.get(readString(buff)); + case VARCHAR: + return ValueVarchar.get(readString(buff)); + case VARCHAR_IGNORECASE: + return ValueVarcharIgnoreCase.get(readString(buff)); + case CHAR: + return ValueChar.get(readString(buff)); + case ENUM: { + int ordinal = readVarInt(buff); + if (columnType != null) { + return ((ExtTypeInfoEnum) columnType.getExtTypeInfo()).getValue(ordinal, provider); + } + return ValueInteger.get(ordinal); + } case INTERVAL: { int ordinal = buff.get(); boolean negative = ordinal < 0; @@ -612,118 +687,127 @@ private Object readValue(ByteBuffer buff) { return ValueInterval.from(IntervalQualifier.valueOf(ordinal), negative, readVarLong(buff), ordinal < 5 ? 0 : readVarLong(buff)); } - case FLOAT_0_1: - return ValueFloat.ZERO; - case FLOAT_0_1 + 1: - return ValueFloat.ONE; + case REAL_0_1: + return ValueReal.ZERO; + case REAL_0_1 + 1: + return ValueReal.ONE; case DOUBLE_0_1: return ValueDouble.ZERO; case DOUBLE_0_1 + 1: return ValueDouble.ONE; case DOUBLE: return ValueDouble.get(Double.longBitsToDouble(Long.reverse(readVarLong(buff)))); - case FLOAT: - return ValueFloat.get(Float.intBitsToFloat(Integer.reverse(readVarInt(buff)))); - case BLOB: + case REAL: + return ValueReal.get(Float.intBitsToFloat(Integer.reverse(readVarInt(buff)))); + case BLOB: { + int smallLen = readVarInt(buff); + if (smallLen >= 0) { + byte[] small = Utils.newBytes(smallLen); + buff.get(small, 0, smallLen); + return ValueBlob.createSmall(small); + } else if (smallLen == -3) { + return new ValueBlob(readLobDataDatabase(buff), readVarLong(buff)); + } else { + throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "lob type: " + smallLen); + } + } case CLOB: { int smallLen = readVarInt(buff); if (smallLen >= 0) { byte[] small = Utils.newBytes(smallLen); buff.get(small, 0, smallLen); - return ValueLobDb.createSmallLob(type == BLOB ? Value.BLOB : Value.CLOB, small); + return ValueClob.createSmall(small, readVarLong(buff)); } else if (smallLen == -3) { - int tableId = readVarInt(buff); - long lobId = readVarLong(buff); - long precision = readVarLong(buff); - return ValueLobDb.create(type == BLOB ? Value.BLOB : Value.CLOB, - handler, tableId, lobId, null, precision); + return new ValueClob(readLobDataDatabase(buff), readVarLong(buff), readVarLong(buff)); } else { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "lob type: " + smallLen); + throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "lob type: " + smallLen); } } - case ARRAY: + case ARRAY: { + if (columnType != null) { + TypeInfo elementType = (TypeInfo) columnType.getExtTypeInfo(); + return ValueArray.get(elementType, readArrayElements(buff, elementType), provider); + } + return ValueArray.get(readArrayElements(buff, null), provider); + } case ROW: { int len = readVarInt(buff); Value[] list = new Value[len]; - for (int i = 0; i < len; i++) { - list[i] = (Value) readValue(buff); - } - return type == ARRAY ? ValueArray.get(list) : ValueRow.get(list); - } - case RESULT_SET: { - SimpleResult rs = new SimpleResult(); - int columns = readVarInt(buff); - for (int i = 0; i < columns; i++) { - rs.addColumn(readString(buff), readString(buff), readVarInt(buff), readVarLong(buff), - readVarInt(buff)); - } - while (buff.get() != 0) { - Value[] o = new Value[columns]; - for (int i = 0; i < columns; i++) { - o[i] = (Value) readValue(buff); + if (columnType != null) { + ExtTypeInfoRow extTypeInfoRow = (ExtTypeInfoRow) columnType.getExtTypeInfo(); + Iterator> fields = extTypeInfoRow.getFields().iterator(); + for (int i = 0; i < len; i++) { + list[i] = readValue(buff, fields.next().getValue()); } - rs.addRow(o); + return ValueRow.get(columnType, list); } - return ValueResultSet.get(rs); - } - case GEOMETRY: { - int len = readVarInt(buff); - byte[] b = Utils.newBytes(len); - buff.get(b, 0, len); - return ValueGeometry.get(b); - } - case SPATIAL_KEY_2D: - return getSpatialDataType().read(buff); - case CUSTOM_DATA_TYPE: { - if (JdbcUtils.customDataTypesHandler != null) { - int customType = readVarInt(buff); - int len = readVarInt(buff); - byte[] b = Utils.newBytes(len); - buff.get(b, 0, len); - return JdbcUtils.customDataTypesHandler.convert( - ValueBytes.getNoCopy(b), customType); + TypeInfo[] columnTypes = rowFactory.getColumnTypes(); + for (int i = 0; i < len; i++) { + list[i] = readValue(buff, columnTypes[i]); } - throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, - "No CustomDataTypesHandler has been set up"); + return ValueRow.get(list); } + case GEOMETRY: + return ValueGeometry.get(readVarBytes(buff)); + case JSON: + return ValueJson.getInternal(readVarBytes(buff)); default: if (type >= INT_0_15 && type < INT_0_15 + 16) { - return ValueInt.get(type - INT_0_15); - } else if (type >= LONG_0_7 && type < LONG_0_7 + 8) { - return ValueLong.get(type - LONG_0_7); - } else if (type >= BYTES_0_31 && type < BYTES_0_31 + 32) { - int len = type - BYTES_0_31; + int i = type - INT_0_15; + if (columnType != null && columnType.getValueType() == Value.ENUM) { + return ((ExtTypeInfoEnum) columnType.getExtTypeInfo()).getValue(i, provider); + } + return ValueInteger.get(i); + } else if (type >= BIGINT_0_7 && type < BIGINT_0_7 + 8) { + return ValueBigint.get(type - BIGINT_0_7); + } else if (type >= VARBINARY_0_31 && type < VARBINARY_0_31 + 32) { + int len = type - VARBINARY_0_31; byte[] b = Utils.newBytes(len); buff.get(b, 0, len); - return ValueBytes.getNoCopy(b); - } else if (type >= STRING_0_31 && type < STRING_0_31 + 32) { - return ValueString.get(readString(buff, type - STRING_0_31)); + return ValueVarbinary.getNoCopy(b); + } else if (type >= VARCHAR_0_31 && type < VARCHAR_0_31 + 32) { + return ValueVarchar.get(readString(buff, type - VARCHAR_0_31)); } throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "type: " + type); } } - private static int readVarInt(ByteBuffer buff) { - return DataUtils.readVarInt(buff); + private LobDataDatabase readLobDataDatabase(ByteBuffer buff) { + int tableId = readVarInt(buff); + long lobId = readVarLong(buff); + LobDataDatabase lobData = new LobDataDatabase(handler, tableId, lobId); + return lobData; } - private static long readVarLong(ByteBuffer buff) { - return DataUtils.readVarLong(buff); + private Value[] readArrayElements(ByteBuffer buff, TypeInfo elementType) { + int len = readVarInt(buff); + Value[] list = new Value[len]; + for (int i = 0; i < len; i++) { + list[i] = readValue(buff, elementType); + } + return list; } - private static String readString(ByteBuffer buff, int len) { - return DataUtils.readString(buff, len); + private static byte[] readVarBytes(ByteBuffer buff) { + int len = readVarInt(buff); + byte[] b = Utils.newBytes(len); + buff.get(b, 0, len); + return b; } - private static String readString(ByteBuffer buff) { - int len = readVarInt(buff); - return DataUtils.readString(buff, len); + private static long readTimestampTime(ByteBuffer buff) { + return readVarLong(buff) * 1_000_000L + readVarInt(buff); } - @Override - public int hashCode() { - return compareMode.hashCode() ^ Arrays.hashCode(sortTypes); + private static int readTimeZone(ByteBuffer buff) { + byte b = buff.get(); + if (b == Byte.MAX_VALUE) { + return readVarInt(buff); + } else if (b == Byte.MIN_VALUE) { + return -readVarInt(buff); + } else { + return b * 900; + } } @Override @@ -737,7 +821,77 @@ public boolean equals(Object obj) { if (!compareMode.equals(v.compareMode)) { return false; } - return Arrays.equals(sortTypes, v.sortTypes); + int[] indexes = rowFactory == null ? null : rowFactory.getIndexes(); + int[] indexes2 = v.rowFactory == null ? null : v.rowFactory.getIndexes(); + return Arrays.equals(sortTypes, v.sortTypes) + && Arrays.equals(indexes, indexes2); + } + + @Override + public int hashCode() { + int[] indexes = rowFactory == null ? null : rowFactory.getIndexes(); + return super.hashCode() ^ Arrays.hashCode(indexes) + ^ compareMode.hashCode() ^ Arrays.hashCode(sortTypes); + } + + @Override + public void save(WriteBuffer buff, MetaType metaType) { + writeIntArray(buff, sortTypes); + int columnCount = rowFactory == null ? 0 : rowFactory.getColumnCount(); + buff.putVarInt(columnCount); + int[] indexes = rowFactory == null ? null : rowFactory.getIndexes(); + writeIntArray(buff, indexes); + buff.put(rowFactory == null || rowFactory.getRowDataType().isStoreKeys() ? (byte) 1 : (byte) 0); + } + + private static void writeIntArray(WriteBuffer buff, int[] array) { + if(array == null) { + buff.putVarInt(0); + } else { + buff.putVarInt(array.length + 1); + for (int i : array) { + buff.putVarInt(i); + } + } + } + + @Override + public Factory getFactory() { + return FACTORY; + } + + private static final Factory FACTORY = new Factory(); + + public static final class Factory implements StatefulDataType.Factory { + + @Override + public DataType create(ByteBuffer buff, MetaType metaType, Database database) { + int[] sortTypes = readIntArray(buff); + int columnCount = DataUtils.readVarInt(buff); + int[] indexes = readIntArray(buff); + boolean storeKeys = buff.get() != 0; + CompareMode compareMode = database == null ? CompareMode.getInstance(null, 0) : database.getCompareMode(); + if (database == null) { + return new ValueDataType(); + } else if (sortTypes == null) { + return new ValueDataType(database, null); + } + RowFactory rowFactory = RowFactory.getDefaultRowFactory().createRowFactory(database, compareMode, database, + sortTypes, indexes, null, columnCount, storeKeys); + return rowFactory.getRowDataType(); + } + + private static int[] readIntArray(ByteBuffer buff) { + int len = DataUtils.readVarInt(buff) - 1; + if(len < 0) { + return null; + } + int[] res = new int[len]; + for (int i = 0; i < res.length; i++) { + res[i] = DataUtils.readVarInt(buff); + } + return res; + } } } diff --git a/h2/src/main/org/h2/mvstore/db/package.html b/h2/src/main/org/h2/mvstore/db/package.html index cfb522810c..efa1e98076 100644 --- a/h2/src/main/org/h2/mvstore/db/package.html +++ b/h2/src/main/org/h2/mvstore/db/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/mvstore/package.html b/h2/src/main/org/h2/mvstore/package.html index d7988128dd..9ebeb43f22 100644 --- a/h2/src/main/org/h2/mvstore/package.html +++ b/h2/src/main/org/h2/mvstore/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/mvstore/rtree/DefaultSpatial.java b/h2/src/main/org/h2/mvstore/rtree/DefaultSpatial.java new file mode 100644 index 0000000000..e8b7a200f2 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/rtree/DefaultSpatial.java @@ -0,0 +1,75 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.rtree; + +import java.util.Arrays; + +/** + * Class BasicSpatialImpl. + * + * @author Andrei Tokar + */ +final class DefaultSpatial implements Spatial +{ + private final long id; + private final float[] minMax; + + /** + * Create a new key. + * + * @param id the id + * @param minMax min x, max x, min y, max y, and so on + */ + public DefaultSpatial(long id, float... minMax) { + this.id = id; + this.minMax = minMax; + } + + private DefaultSpatial(long id, DefaultSpatial other) { + this.id = id; + this.minMax = other.minMax.clone(); + } + + @Override + public float min(int dim) { + return minMax[dim + dim]; + } + + @Override + public void setMin(int dim, float x) { + minMax[dim + dim] = x; + } + + @Override + public float max(int dim) { + return minMax[dim + dim + 1]; + } + + @Override + public void setMax(int dim, float x) { + minMax[dim + dim + 1] = x; + } + + @Override + public Spatial clone(long id) { + return new DefaultSpatial(id, this); + } + + @Override + public long getId() { + return id; + } + + @Override + public boolean isNull() { + return minMax.length == 0; + } + + @Override + public boolean equalsIgnoringId(Spatial o) { + return Arrays.equals(minMax, ((DefaultSpatial)o).minMax); + } +} diff --git a/h2/src/main/org/h2/mvstore/rtree/MVRTreeMap.java b/h2/src/main/org/h2/mvstore/rtree/MVRTreeMap.java index bbf5739408..4b8a7a60c1 100644 --- a/h2/src/main/org/h2/mvstore/rtree/MVRTreeMap.java +++ b/h2/src/main/org/h2/mvstore/rtree/MVRTreeMap.java @@ -1,16 +1,16 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.rtree; import java.util.ArrayList; +import java.util.Collection; import java.util.Iterator; import java.util.Map; import org.h2.mvstore.CursorPos; -import org.h2.mvstore.DataUtils; import org.h2.mvstore.MVMap; import org.h2.mvstore.Page; import org.h2.mvstore.RootReference; @@ -22,19 +22,19 @@ * * @param the value class */ -public final class MVRTreeMap extends MVMap { +public final class MVRTreeMap extends MVMap { /** * The spatial key type. */ - final SpatialDataType keyType; + private final SpatialDataType keyType; private boolean quadraticSplit; - public MVRTreeMap(Map config) { - super(config); - keyType = (SpatialDataType) config.get("key"); - quadraticSplit = Boolean.valueOf(String.valueOf(config.get("quadraticSplit"))); + public MVRTreeMap(Map config, SpatialDataType keyType, DataType valueType) { + super(config, keyType, valueType); + this.keyType = keyType; + quadraticSplit = Boolean.parseBoolean(String.valueOf(config.get("quadraticSplit"))); } private MVRTreeMap(MVRTreeMap source) { @@ -54,14 +54,8 @@ public MVRTreeMap cloneIt() { * @param x the rectangle * @return the iterator */ - public RTreeCursor findIntersectingKeys(SpatialKey x) { - return new RTreeCursor(getRootPage(), x) { - @Override - protected boolean check(boolean leaf, SpatialKey key, - SpatialKey test) { - return keyType.isOverlap(key, test); - } - }; + public RTreeCursor findIntersectingKeys(Spatial x) { + return new IntersectsRTreeCursor<>(getRootPage(), x, keyType); } /** @@ -71,20 +65,11 @@ protected boolean check(boolean leaf, SpatialKey key, * @param x the rectangle * @return the iterator */ - public RTreeCursor findContainedKeys(SpatialKey x) { - return new RTreeCursor(getRootPage(), x) { - @Override - protected boolean check(boolean leaf, SpatialKey key, - SpatialKey test) { - if (leaf) { - return keyType.isInside(key, test); - } - return keyType.isOverlap(key, test); - } - }; + public RTreeCursor findContainedKeys(Spatial x) { + return new ContainsRTreeCursor<>(getRootPage(), x, keyType); } - private boolean contains(Page p, int index, Object key) { + private boolean contains(Page p, int index, Object key) { return keyType.contains(p.getKey(index), key); } @@ -95,9 +80,8 @@ private boolean contains(Page p, int index, Object key) { * @param key the key * @return the value, or null if not found */ - @SuppressWarnings("unchecked") @Override - public V get(Page p, Object key) { + public V get(Page p, Spatial key) { int keyCount = p.getKeyCount(); if (!p.isLeaf()) { for (int i = 0; i < keyCount; i++) { @@ -111,7 +95,7 @@ public V get(Page p, Object key) { } else { for (int i = 0; i < keyCount; i++) { if (keyType.equals(p.getKey(i), key)) { - return (V)p.getValue(i); + return p.getValue(i); } } } @@ -126,50 +110,80 @@ public V get(Page p, Object key) { */ @Override public V remove(Object key) { - return operate((SpatialKey) key, null, DecisionMaker.REMOVE); + return operate((Spatial) key, null, DecisionMaker.REMOVE); } @Override - public V operate(SpatialKey key, V value, DecisionMaker decisionMaker) { - beforeWrite(); + public V operate(Spatial key, V value, DecisionMaker decisionMaker) { int attempt = 0; + final Collection> removedPages = isPersistent() ? new ArrayList<>() : null; while(true) { - ++attempt; - RootReference rootReference = flushAndGetRoot(); - Page p = rootReference.root.copy(true); - V result = operate(p, key, value, decisionMaker); + RootReference rootReference = flushAndGetRoot(); + if (attempt++ == 0 && !rootReference.isLockedByCurrentThread()) { + beforeWrite(); + } + Page p = rootReference.root; + if (removedPages != null && p.getTotalCount() > 0) { + removedPages.add(p); + } + p = p.copy(); + V result = operate(p, key, value, decisionMaker, removedPages); if (!p.isLeaf() && p.getTotalCount() == 0) { - p.removePage(); + if (removedPages != null) { + removedPages.add(p); + } p = createEmptyLeaf(); } else if (p.getKeyCount() > store.getKeysPerPage() || p.getMemory() > store.getMaxPageSize() && p.getKeyCount() > 3) { // only possible if this is the root, else we would have // split earlier (this requires pageSplitSize is fixed) long totalCount = p.getTotalCount(); - Page split = split(p); - Object k1 = getBounds(p); - Object k2 = getBounds(split); - Object[] keys = {k1, k2}; - Page.PageReference[] children = { - new Page.PageReference(p), - new Page.PageReference(split), - Page.PageReference.EMPTY - }; + Page split = split(p); + Spatial k1 = getBounds(p); + Spatial k2 = getBounds(split); + Spatial[] keys = p.createKeyStorage(2); + keys[0] = k1; + keys[1] = k2; + Page.PageReference[] children = Page.createRefStorage(3); + children[0] = new Page.PageReference<>(p); + children[1] = new Page.PageReference<>(split); + children[2] = Page.PageReference.empty(); p = Page.createNode(this, keys, children, totalCount, 0); - if(store.getFileStore() != null) { - store.registerUnsavedPage(p.getMemory()); + if(isPersistent()) { + store.registerUnsavedMemory(p.getMemory()); } } - if(updateRoot(rootReference, p, attempt)) { - return result; + + if (removedPages == null) { + if (updateRoot(rootReference, p, attempt)) { + return result; + } + } else { + RootReference lockedRootReference = tryLock(rootReference, attempt); + if (lockedRootReference != null) { + try { + long version = lockedRootReference.version; + int unsavedMemory = 0; + for (Page page : removedPages) { + if (!page.isRemoved()) { + unsavedMemory += page.removePage(version); + } + } + store.registerUnsavedMemory(unsavedMemory); + } finally { + unlockRoot(p); + } + return result; + } + removedPages.clear(); } decisionMaker.reset(); } } - @SuppressWarnings("unchecked") - private V operate(Page p, Object key, V value, DecisionMaker decisionMaker) { - V result = null; + private V operate(Page p, Spatial key, V value, DecisionMaker decisionMaker, + Collection> removedPages) { + V result; if (p.isLeaf()) { int index = -1; int keyCount = p.getKeyCount(); @@ -178,11 +192,12 @@ private V operate(Page p, Object key, V value, DecisionMaker decision index = i; } } - result = index < 0 ? null : (V)p.getValue(index); + result = index < 0 ? null : p.getValue(index); Decision decision = decisionMaker.decide(result, value); switch (decision) { - case REPEAT: break; - case ABORT: break; + case REPEAT: + case ABORT: + break; case REMOVE: if(index >= 0) { p.remove(index); @@ -201,90 +216,65 @@ private V operate(Page p, Object key, V value, DecisionMaker decision return result; } - // p is a node - if(value == null) - { - for (int i = 0; i < p.getKeyCount(); i++) { - if (contains(p, i, key)) { - Page cOld = p.getChildPage(i); - // this will mark the old page as deleted - // so we need to update the parent in any case - // (otherwise the old page might be deleted again) - Page c = cOld.copy(true); - long oldSize = c.getTotalCount(); - result = operate(c, key, value, decisionMaker); - p.setChild(i, c); - if (oldSize == c.getTotalCount()) { - decisionMaker.reset(); - continue; - } - if (c.getTotalCount() == 0) { - // this child was deleted - p.remove(i); - if (p.getKeyCount() == 0) { - c.removePage(); - } - break; - } - Object oldBounds = p.getKey(i); - if (!keyType.isInside(key, oldBounds)) { - p.setKey(i, getBounds(c)); - } + // p is an internal node + int index = -1; + for (int i = 0; i < p.getKeyCount(); i++) { + if (contains(p, i, key)) { + Page c = p.getChildPage(i); + if(get(c, key) != null) { + index = i; break; } + if(index < 0) { + index = i; + } } - } else { - int index = -1; + } + if (index < 0) { + // a new entry, we don't know where to add yet + float min = Float.MAX_VALUE; for (int i = 0; i < p.getKeyCount(); i++) { - if (contains(p, i, key)) { - Page c = p.getChildPage(i); - if(get(c, key) != null) { - index = i; - break; - } - if(index < 0) { - index = i; - } + Object k = p.getKey(i); + float areaIncrease = keyType.getAreaIncrease(k, key); + if (areaIncrease < min) { + index = i; + min = areaIncrease; } } - if (index < 0) { - // a new entry, we don't know where to add yet - float min = Float.MAX_VALUE; - for (int i = 0; i < p.getKeyCount(); i++) { - Object k = p.getKey(i); - float areaIncrease = keyType.getAreaIncrease(k, key); - if (areaIncrease < min) { - index = i; - min = areaIncrease; - } - } + } + Page c = p.getChildPage(index); + if (removedPages != null) { + removedPages.add(c); + } + c = c.copy(); + if (c.getKeyCount() > store.getKeysPerPage() || c.getMemory() > store.getMaxPageSize() + && c.getKeyCount() > 4) { + // split on the way down + Page split = split(c); + p.setKey(index, getBounds(c)); + p.setChild(index, c); + p.insertNode(index, getBounds(split), split); + // now we are not sure where to add + result = operate(p, key, value, decisionMaker, removedPages); + } else { + result = operate(c, key, value, decisionMaker, removedPages); + Spatial bounds = p.getKey(index); + if (!keyType.contains(bounds, key)) { + bounds = keyType.createBoundingBox(bounds); + keyType.increaseBounds(bounds, key); + p.setKey(index, bounds); } - Page c = p.getChildPage(index).copy(true); - if (c.getKeyCount() > store.getKeysPerPage() || c.getMemory() > store.getMaxPageSize() - && c.getKeyCount() > 4) { - // split on the way down - Page split = split(c); - p.setKey(index, getBounds(c)); + if (c.getTotalCount() > 0) { p.setChild(index, c); - p.insertNode(index, getBounds(split), split); - // now we are not sure where to add - result = operate(p, key, value, decisionMaker); } else { - result = operate(c, key, value, decisionMaker); - Object bounds = p.getKey(index); - if (!keyType.contains(bounds, key)) { - bounds = keyType.createBoundingBox(bounds); - keyType.increaseBounds(bounds, key); - p.setKey(index, bounds); - } - p.setChild(index, c); + p.remove(index); } } return result; } - private Object getBounds(Page x) { - Object bounds = keyType.createBoundingBox(x.getKey(0)); + private Spatial getBounds(Page x) { + Spatial bounds = keyType.createBoundingBox(x.getKey(0)); int keyCount = x.getKeyCount(); for (int i = 1; i < keyCount; i++) { keyType.increaseBounds(bounds, x.getKey(i)); @@ -293,7 +283,7 @@ private Object getBounds(Page x) { } @Override - public V put(SpatialKey key, V value) { + public V put(Spatial key, V value) { return operate(key, value, DecisionMaker.PUT); } @@ -304,17 +294,17 @@ public V put(SpatialKey key, V value) { * @param key the key * @param value the value */ - public void add(SpatialKey key, V value) { + public void add(Spatial key, V value) { operate(key, value, DecisionMaker.PUT); } - private Page split(Page p) { + private Page split(Page p) { return quadraticSplit ? splitQuadratic(p) : splitLinear(p); } - private Page splitLinear(Page p) { + private Page splitLinear(Page p) { int keyCount = p.getKeyCount(); ArrayList keys = new ArrayList<>(keyCount); for (int i = 0; i < keyCount; i++) { @@ -324,8 +314,8 @@ private Page splitLinear(Page p) { if (extremes == null) { return splitQuadratic(p); } - Page splitA = newPage(p.isLeaf()); - Page splitB = newPage(p.isLeaf()); + Page splitA = newPage(p.isLeaf()); + Page splitB = newPage(p.isLeaf()); move(p, splitA, extremes[0]); if (extremes[1] > extremes[0]) { extremes[1]--; @@ -351,9 +341,9 @@ private Page splitLinear(Page p) { return splitA; } - private Page splitQuadratic(Page p) { - Page splitA = newPage(p.isLeaf()); - Page splitB = newPage(p.isLeaf()); + private Page splitQuadratic(Page p) { + Page splitA = newPage(p.isLeaf()); + Page splitB = newPage(p.isLeaf()); float largest = Float.MIN_VALUE; int ia = 0, ib = 0; int keyCount = p.getKeyCount(); @@ -409,22 +399,21 @@ private Page splitQuadratic(Page p) { return splitA; } - private Page newPage(boolean leaf) { - Page page = leaf ? createEmptyLeaf() : createEmptyNode(); - if(store.getFileStore() != null) - { - store.registerUnsavedPage(page.getMemory()); + private Page newPage(boolean leaf) { + Page page = leaf ? createEmptyLeaf() : createEmptyNode(); + if(isPersistent()) { + store.registerUnsavedMemory(page.getMemory()); } return page; } - private static void move(Page source, Page target, int sourceIndex) { - Object k = source.getKey(sourceIndex); + private static void move(Page source, Page target, int sourceIndex) { + Spatial k = source.getKey(sourceIndex); if (source.isLeaf()) { - Object v = source.getValue(sourceIndex); + V v = source.getValue(sourceIndex); target.insertLeaf(0, k, v); } else { - Page c = source.getChildPage(sourceIndex); + Page c = source.getChildPage(sourceIndex); target.insertNode(0, k, c); } source.remove(sourceIndex); @@ -437,16 +426,17 @@ private static void move(Page source, Page target, int sourceIndex) { * @param list the list * @param p the root page */ - public void addNodeKeys(ArrayList list, Page p) { + public void addNodeKeys(ArrayList list, Page p) { if (p != null && !p.isLeaf()) { int keyCount = p.getKeyCount(); for (int i = 0; i < keyCount; i++) { - list.add((SpatialKey) p.getKey(i)); + list.add(p.getKey(i)); addNodeKeys(list, p.getChildPage(i)); } } } + @SuppressWarnings("unused") public boolean isQuadraticSplit() { return quadraticSplit; } @@ -456,22 +446,22 @@ public void setQuadraticSplit(boolean quadraticSplit) { } @Override - protected int getChildPageCount(Page p) { + protected int getChildPageCount(Page p) { return p.getRawChildPageCount() - 1; } /** * A cursor to iterate over a subset of the keys. */ - public static class RTreeCursor implements Iterator { + public abstract static class RTreeCursor implements Iterator { - private final SpatialKey filter; - private CursorPos pos; - private SpatialKey current; - private final Page root; + private final Spatial filter; + private CursorPos pos; + private Spatial current; + private final Page root; private boolean initialized; - protected RTreeCursor(Page root, SpatialKey filter) { + protected RTreeCursor(Page root, Spatial filter) { this.root = root; this.filter = filter; } @@ -480,7 +470,7 @@ protected RTreeCursor(Page root, SpatialKey filter) { public boolean hasNext() { if (!initialized) { // init - pos = new CursorPos(root, 0, null); + pos = new CursorPos<>(root, 0, null); fetchNext(); initialized = true; } @@ -500,30 +490,24 @@ public void skip(long n) { } @Override - public SpatialKey next() { + public Spatial next() { if (!hasNext()) { return null; } - SpatialKey c = current; + Spatial c = current; fetchNext(); return c; } - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException( - "Removing is not supported"); - } - /** * Fetch the next entry if there is one. */ - protected void fetchNext() { + void fetchNext() { while (pos != null) { - Page p = pos.page; + Page p = pos.page; if (p.isLeaf()) { while (pos.index < p.getKeyCount()) { - SpatialKey c = (SpatialKey) p.getKey(pos.index++); + Spatial c = p.getKey(pos.index++); if (filter == null || check(true, c, filter)) { current = c; return; @@ -533,10 +517,10 @@ protected void fetchNext() { boolean found = false; while (pos.index < p.getKeyCount()) { int index = pos.index++; - SpatialKey c = (SpatialKey) p.getKey(index); + Spatial c = p.getKey(index); if (filter == null || check(false, c, filter)) { - Page child = pos.page.getChildPage(index); - pos = new CursorPos(child, 0, pos); + Page child = pos.page.getChildPage(index); + pos = new CursorPos<>(child, 0, pos); found = true; break; } @@ -559,11 +543,38 @@ protected void fetchNext() { * @param test the user-supplied test key * @return true if there is a match */ - @SuppressWarnings("unused") - protected boolean check(boolean leaf, SpatialKey key, SpatialKey test) { - return true; + protected abstract boolean check(boolean leaf, Spatial key, Spatial test); + } + + private static final class IntersectsRTreeCursor extends RTreeCursor { + private final SpatialDataType keyType; + + public IntersectsRTreeCursor(Page root, Spatial filter, SpatialDataType keyType) { + super(root, filter); + this.keyType = keyType; } + @Override + protected boolean check(boolean leaf, Spatial key, + Spatial test) { + return keyType.isOverlap(key, test); + } + } + + private static final class ContainsRTreeCursor extends RTreeCursor { + private final SpatialDataType keyType; + + public ContainsRTreeCursor(Page root, Spatial filter, SpatialDataType keyType) { + super(root, filter); + this.keyType = keyType; + } + + @Override + protected boolean check(boolean leaf, Spatial key, Spatial test) { + return leaf ? + keyType.isInside(key, test) : + keyType.isOverlap(key, test); + } } @Override @@ -576,7 +587,7 @@ public String getType() { * * @param the value type */ - public static class Builder extends MVMap.BasicBuilder, SpatialKey, V> { + public static class Builder extends MVMap.BasicBuilder, Spatial, V> { private int dimensions = 2; @@ -606,14 +617,14 @@ public Builder dimensions(int dimensions) { * @return this */ @Override - public Builder valueType(DataType valueType) { + public Builder valueType(DataType valueType) { setValueType(valueType); return this; } @Override public MVRTreeMap create(Map config) { - return new MVRTreeMap<>(config); + return new MVRTreeMap<>(config, (SpatialDataType)getKeyType(), getValueType()); } } } diff --git a/h2/src/main/org/h2/mvstore/rtree/Spatial.java b/h2/src/main/org/h2/mvstore/rtree/Spatial.java new file mode 100644 index 0000000000..1b9682d354 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/rtree/Spatial.java @@ -0,0 +1,76 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.rtree; + +/** + * Interface Spatial represents boxes in 2+ dimensional space, + * where total ordering is not that straight-forward. + * They can be used as keys for MVRTree. + * + * @author Andrei Tokar + */ +public interface Spatial +{ + /** + * Get the minimum value for the given dimension. + * + * @param dim the dimension + * @return the value + */ + float min(int dim); + + /** + * Set the minimum value for the given dimension. + * + * @param dim the dimension + * @param x the value + */ + void setMin(int dim, float x); + + /** + * Get the maximum value for the given dimension. + * + * @param dim the dimension + * @return the value + */ + float max(int dim); + + /** + * Set the maximum value for the given dimension. + * + * @param dim the dimension + * @param x the value + */ + void setMax(int dim, float x); + + /** + * Creates a copy of this Spatial object with different id. + * + * @param id for the new Spatial object + * @return a clone + */ + Spatial clone(long id); + + /** + * Get id of this Spatial object + * @return id + */ + long getId(); + + /** + * Test whether this object has no value + * @return true if it is NULL, false otherwise + */ + boolean isNull(); + + /** + * Check whether two objects are equals, but do not compare the id fields. + * + * @param o the other key + * @return true if the contents are the same + */ + boolean equalsIgnoringId(Spatial o); +} diff --git a/h2/src/main/org/h2/mvstore/rtree/SpatialDataType.java b/h2/src/main/org/h2/mvstore/rtree/SpatialDataType.java index 392a956c78..6af8a5887e 100644 --- a/h2/src/main/org/h2/mvstore/rtree/SpatialDataType.java +++ b/h2/src/main/org/h2/mvstore/rtree/SpatialDataType.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.rtree; @@ -10,14 +10,14 @@ import org.h2.mvstore.DataUtils; import org.h2.mvstore.WriteBuffer; -import org.h2.mvstore.type.DataType; +import org.h2.mvstore.type.BasicDataType; /** * A spatial data type. This class supports up to 31 dimensions. Each dimension * can have a minimum and a maximum value of type float. For each dimension, the * maximum value is only stored when it is not the same as the minimum. */ -public class SpatialDataType implements DataType { +public class SpatialDataType extends BasicDataType { private final int dimensions; @@ -31,8 +31,24 @@ public SpatialDataType(int dimensions) { this.dimensions = dimensions; } + /** + * Creates spatial object with specified parameters. + * + * @param id the ID + * @param minMax min x, max x, min y, max y, and so on + * @return the spatial object + */ + protected Spatial create(long id, float... minMax) { + return new DefaultSpatial(id, minMax); + } + @Override - public int compare(Object a, Object b) { + public Spatial[] createStorage(int size) { + return new Spatial[size]; + } + + @Override + public int compare(Spatial a, Spatial b) { if (a == b) { return 0; } else if (a == null) { @@ -40,8 +56,8 @@ public int compare(Object a, Object b) { } else if (b == null) { return 1; } - long la = ((SpatialKey) a).getId(); - long lb = ((SpatialKey) b).getId(); + long la = a.getId(); + long lb = b.getId(); return Long.compare(la, lb); } @@ -58,33 +74,18 @@ public boolean equals(Object a, Object b) { } else if (a == null || b == null) { return false; } - long la = ((SpatialKey) a).getId(); - long lb = ((SpatialKey) b).getId(); + long la = ((Spatial) a).getId(); + long lb = ((Spatial) b).getId(); return la == lb; } @Override - public int getMemory(Object obj) { + public int getMemory(Spatial obj) { return 40 + dimensions * 4; } @Override - public void read(ByteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - obj[i] = read(buff); - } - } - - @Override - public void write(WriteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - write(buff, obj[i]); - } - } - - @Override - public void write(WriteBuffer buff, Object obj) { - SpatialKey k = (SpatialKey) obj; + public void write(WriteBuffer buff, Spatial k) { if (k.isNull()) { buff.putVarInt(-1); buff.putVarLong(k.getId()); @@ -107,11 +108,11 @@ public void write(WriteBuffer buff, Object obj) { } @Override - public Object read(ByteBuffer buff) { + public Spatial read(ByteBuffer buff) { int flags = DataUtils.readVarInt(buff); if (flags == -1) { long id = DataUtils.readVarLong(buff); - return new SpatialKey(id); + return create(id); } float[] minMax = new float[dimensions * 2]; for (int i = 0; i < dimensions; i++) { @@ -126,19 +127,17 @@ public Object read(ByteBuffer buff) { minMax[i + i + 1] = max; } long id = DataUtils.readVarLong(buff); - return new SpatialKey(id, minMax); + return create(id, minMax); } /** * Check whether the two objects overlap. * - * @param objA the first object - * @param objB the second object + * @param a the first object + * @param b the second object * @return true if they overlap */ - public boolean isOverlap(Object objA, Object objB) { - SpatialKey a = (SpatialKey) objA; - SpatialKey b = (SpatialKey) objB; + public boolean isOverlap(Spatial a, Spatial b) { if (a.isNull() || b.isNull()) { return false; } @@ -157,8 +156,8 @@ public boolean isOverlap(Object objA, Object objB) { * @param add the value */ public void increaseBounds(Object bounds, Object add) { - SpatialKey a = (SpatialKey) add; - SpatialKey b = (SpatialKey) bounds; + Spatial a = (Spatial) add; + Spatial b = (Spatial) bounds; if (a.isNull() || b.isNull()) { return; } @@ -182,8 +181,8 @@ public void increaseBounds(Object bounds, Object add) { * @return the area */ public float getAreaIncrease(Object objA, Object objB) { - SpatialKey b = (SpatialKey) objB; - SpatialKey a = (SpatialKey) objA; + Spatial b = (Spatial) objB; + Spatial a = (Spatial) objA; if (a.isNull() || b.isNull()) { return 0; } @@ -212,8 +211,8 @@ public float getAreaIncrease(Object objA, Object objB) { * @return the area */ float getCombinedArea(Object objA, Object objB) { - SpatialKey a = (SpatialKey) objA; - SpatialKey b = (SpatialKey) objB; + Spatial a = (Spatial) objA; + Spatial b = (Spatial) objB; if (a.isNull()) { return getArea(b); } else if (b.isNull()) { @@ -228,7 +227,7 @@ float getCombinedArea(Object objA, Object objB) { return area; } - private float getArea(SpatialKey a) { + private float getArea(Spatial a) { if (a.isNull()) { return 0; } @@ -247,8 +246,8 @@ private float getArea(SpatialKey a) { * @return the area */ public boolean contains(Object objA, Object objB) { - SpatialKey a = (SpatialKey) objA; - SpatialKey b = (SpatialKey) objB; + Spatial a = (Spatial) objA; + Spatial b = (Spatial) objB; if (a.isNull() || b.isNull()) { return false; } @@ -269,8 +268,8 @@ public boolean contains(Object objA, Object objB) { * @return true if a is completely inside b */ public boolean isInside(Object objA, Object objB) { - SpatialKey a = (SpatialKey) objA; - SpatialKey b = (SpatialKey) objB; + Spatial a = (Spatial) objA; + Spatial b = (Spatial) objB; if (a.isNull() || b.isNull()) { return false; } @@ -288,12 +287,12 @@ public boolean isInside(Object objA, Object objB) { * @param objA the object * @return the bounding box */ - Object createBoundingBox(Object objA) { - SpatialKey a = (SpatialKey) objA; + Spatial createBoundingBox(Object objA) { + Spatial a = (Spatial) objA; if (a.isNull()) { return a; } - return new SpatialKey(0, a); + return a.clone(0); } /** @@ -309,8 +308,8 @@ public int[] getExtremes(ArrayList list) { if (list.isEmpty()) { return null; } - SpatialKey bounds = (SpatialKey) createBoundingBox(list.get(0)); - SpatialKey boundsInner = (SpatialKey) createBoundingBox(bounds); + Spatial bounds = createBoundingBox(list.get(0)); + Spatial boundsInner = createBoundingBox(bounds); for (int i = 0; i < dimensions; i++) { float t = boundsInner.min(i); boundsInner.setMin(i, boundsInner.max(i)); @@ -342,7 +341,7 @@ public int[] getExtremes(ArrayList list) { int firstIndex = -1, lastIndex = -1; for (int i = 0; i < list.size() && (firstIndex < 0 || lastIndex < 0); i++) { - SpatialKey o = (SpatialKey) list.get(i); + Spatial o = (Spatial) list.get(i); if (firstIndex < 0 && o.max(bestDim) == min) { firstIndex = i; } else if (lastIndex < 0 && o.min(bestDim) == max) { @@ -355,7 +354,7 @@ public int[] getExtremes(ArrayList list) { private static ArrayList getNotNull(ArrayList list) { boolean foundNull = false; for (Object o : list) { - SpatialKey a = (SpatialKey) o; + Spatial a = (Spatial) o; if (a.isNull()) { foundNull = true; break; @@ -366,7 +365,7 @@ private static ArrayList getNotNull(ArrayList list) { } ArrayList result = new ArrayList<>(); for (Object o : list) { - SpatialKey a = (SpatialKey) o; + Spatial a = (Spatial) o; if (!a.isNull()) { result.add(a); } @@ -375,8 +374,8 @@ private static ArrayList getNotNull(ArrayList list) { } private void increaseMaxInnerBounds(Object bounds, Object add) { - SpatialKey b = (SpatialKey) bounds; - SpatialKey a = (SpatialKey) add; + Spatial b = (Spatial) bounds; + Spatial a = (Spatial) add; for (int i = 0; i < dimensions; i++) { b.setMin(i, Math.min(b.min(i), a.max(i))); b.setMax(i, Math.max(b.max(i), a.min(i))); diff --git a/h2/src/main/org/h2/mvstore/rtree/package.html b/h2/src/main/org/h2/mvstore/rtree/package.html index 051d9b9e65..240224c617 100644 --- a/h2/src/main/org/h2/mvstore/rtree/package.html +++ b/h2/src/main/org/h2/mvstore/rtree/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/mvstore/tx/CommitDecisionMaker.java b/h2/src/main/org/h2/mvstore/tx/CommitDecisionMaker.java index 5a8530e421..f3867b3b86 100644 --- a/h2/src/main/org/h2/mvstore/tx/CommitDecisionMaker.java +++ b/h2/src/main/org/h2/mvstore/tx/CommitDecisionMaker.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.tx; @@ -15,7 +15,7 @@ * * @author Andrei Tokar */ -final class CommitDecisionMaker extends MVMap.DecisionMaker { +final class CommitDecisionMaker extends MVMap.DecisionMaker> { private long undoKey; private MVMap.Decision decision; @@ -25,7 +25,7 @@ void setUndoKey(long undoKey) { } @Override - public MVMap.Decision decide(VersionedValue existingValue, VersionedValue providedValue) { + public MVMap.Decision decide(VersionedValue existingValue, VersionedValue providedValue) { assert decision == null; if (existingValue == null || // map entry was treated as already committed, and then @@ -47,10 +47,10 @@ public MVMap.Decision decide(VersionedValue existingValue, VersionedValue provid @SuppressWarnings("unchecked") @Override - public VersionedValue selectValue(VersionedValue existingValue, VersionedValue providedValue) { + public > T selectValue(T existingValue, T providedValue) { assert decision == MVMap.Decision.PUT; assert existingValue != null; - return VersionedValueCommitted.getInstance(existingValue.getCurrentValue()); + return (T) VersionedValueCommitted.getInstance(existingValue.getCurrentValue()); } @Override diff --git a/h2/src/main/org/h2/mvstore/tx/Record.java b/h2/src/main/org/h2/mvstore/tx/Record.java new file mode 100644 index 0000000000..4da15fdb44 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/tx/Record.java @@ -0,0 +1,118 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.tx; + +import java.nio.ByteBuffer; +import org.h2.engine.Constants; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.MVMap; +import org.h2.mvstore.WriteBuffer; +import org.h2.mvstore.type.BasicDataType; +import org.h2.value.VersionedValue; + +/** + * Class Record is a value for undoLog. + * It contains information about a single change of some map. + * + * @author Andrei Tokar + */ +final class Record { + + // -1 is a bogus map id + static final Record COMMIT_MARKER = new Record<>(-1, null, null); + + /** + * Map id for this change is related to + */ + final int mapId; + + /** + * Key of the changed map entry key + */ + final K key; + + /** + * Value of the entry before change. + * It is null if entry did not exist before the change (addition). + */ + final VersionedValue oldValue; + + Record(int mapId, K key, VersionedValue oldValue) { + this.mapId = mapId; + this.key = key; + this.oldValue = oldValue; + } + + @Override + public String toString() { + return "mapId=" + mapId + ", key=" + key + ", value=" + oldValue; + } + + /** + * A data type for undo log values + */ + static final class Type extends BasicDataType> { + private final TransactionStore transactionStore; + + Type(TransactionStore transactionStore) { + this.transactionStore = transactionStore; + } + + @Override + public int getMemory(Record record) { + int result = Constants.MEMORY_OBJECT + 4 + 3 * Constants.MEMORY_POINTER; + if (record.mapId >= 0) { + MVMap> map = transactionStore.getMap(record.mapId); + result += map.getKeyType().getMemory(record.key) + + map.getValueType().getMemory(record.oldValue); + } + return result; + } + + @Override + public int compare(Record aObj, Record bObj) { + throw new UnsupportedOperationException(); + } + + @Override + public void write(WriteBuffer buff, Record record) { + buff.putVarInt(record.mapId); + if (record.mapId >= 0) { + MVMap> map = transactionStore.getMap(record.mapId); + map.getKeyType().write(buff, record.key); + VersionedValue oldValue = record.oldValue; + if (oldValue == null) { + buff.put((byte) 0); + } else { + buff.put((byte) 1); + map.getValueType().write(buff, oldValue); + } + } + } + + @SuppressWarnings("unchecked") + @Override + public Record read(ByteBuffer buff) { + int mapId = DataUtils.readVarInt(buff); + if (mapId < 0) { + return (Record)COMMIT_MARKER; + } + MVMap> map = transactionStore.getMap(mapId); + K key = map.getKeyType().read(buff); + VersionedValue oldValue = null; + if (buff.get() == 1) { + oldValue = map.getValueType().read(buff); + } + return new Record<>(mapId, key, oldValue); + } + + @SuppressWarnings("unchecked") + @Override + public Record[] createStorage(int size) { + return new Record[size]; + } + } +} diff --git a/h2/src/main/org/h2/mvstore/tx/RollbackDecisionMaker.java b/h2/src/main/org/h2/mvstore/tx/RollbackDecisionMaker.java index 987b72b6f5..923605ed56 100644 --- a/h2/src/main/org/h2/mvstore/tx/RollbackDecisionMaker.java +++ b/h2/src/main/org/h2/mvstore/tx/RollbackDecisionMaker.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.tx; @@ -13,7 +13,7 @@ * * @author Andrei Tokar */ -final class RollbackDecisionMaker extends MVMap.DecisionMaker { +final class RollbackDecisionMaker extends MVMap.DecisionMaker> { private final TransactionStore store; private final long transactionId; private final long toLogId; @@ -28,25 +28,27 @@ final class RollbackDecisionMaker extends MVMap.DecisionMaker { this.listener = listener; } + @SuppressWarnings({"unchecked","rawtypes"}) @Override - public MVMap.Decision decide(Object[] existingValue, Object[] providedValue) { + public MVMap.Decision decide(Record existingValue, Record providedValue) { assert decision == null; if (existingValue == null) { // normally existingValue will always be there except of db initialization // where some undo log entry was captured on disk but actual map entry was not decision = MVMap.Decision.ABORT; } else { - VersionedValue valueToRestore = (VersionedValue) existingValue[2]; + VersionedValue valueToRestore = existingValue.oldValue; long operationId; if (valueToRestore == null || (operationId = valueToRestore.getOperationId()) == 0 || TransactionStore.getTransactionId(operationId) == transactionId && TransactionStore.getLogId(operationId) < toLogId) { - int mapId = (Integer) existingValue[0]; - MVMap map = store.openMap(mapId); + int mapId = existingValue.mapId; + MVMap> map = store.openMap(mapId); if (map != null && !map.isClosed()) { - Object key = existingValue[1]; - VersionedValue previousValue = map.operate(key, valueToRestore, MVMap.DecisionMaker.DEFAULT); + Object key = existingValue.key; + VersionedValue previousValue = map.operate(key, valueToRestore, + MVMap.DecisionMaker.DEFAULT); listener.onRollback(map, key, previousValue, valueToRestore); } } diff --git a/h2/src/main/org/h2/mvstore/tx/Snapshot.java b/h2/src/main/org/h2/mvstore/tx/Snapshot.java new file mode 100644 index 0000000000..224d1ce1ff --- /dev/null +++ b/h2/src/main/org/h2/mvstore/tx/Snapshot.java @@ -0,0 +1,54 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.tx; + +import java.util.BitSet; + +import org.h2.mvstore.RootReference; + +/** + * Snapshot of the map root and committing transactions. + */ +final class Snapshot { + + /** + * The root reference. + */ + final RootReference root; + + /** + * The committing transactions (see also TransactionStore.committingTransactions). + */ + final BitSet committingTransactions; + + Snapshot(RootReference root, BitSet committingTransactions) { + this.root = root; + this.committingTransactions = committingTransactions; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + committingTransactions.hashCode(); + result = prime * result + root.hashCode(); + return result; + } + + @SuppressWarnings("unchecked") + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof Snapshot)) { + return false; + } + Snapshot other = (Snapshot) obj; + return committingTransactions == other.committingTransactions && root == other.root; + } + +} diff --git a/h2/src/main/org/h2/mvstore/tx/Transaction.java b/h2/src/main/org/h2/mvstore/tx/Transaction.java index 4a1c2a2b30..892bf4ef79 100644 --- a/h2/src/main/org/h2/mvstore/tx/Transaction.java +++ b/h2/src/main/org/h2/mvstore/tx/Transaction.java @@ -1,22 +1,28 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.tx; +import java.util.BitSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; +import org.h2.engine.IsolationLevel; import org.h2.mvstore.DataUtils; import org.h2.mvstore.MVMap; import org.h2.mvstore.MVStore; +import org.h2.mvstore.RootReference; import org.h2.mvstore.type.DataType; import org.h2.value.VersionedValue; -import java.util.Iterator; -import java.util.concurrent.atomic.AtomicLong; /** * A transaction. */ -public class Transaction { +public final class Transaction { /** * The status of a closed transaction (committed or rolled back). @@ -126,7 +132,7 @@ public class Transaction { /** * How long to wait for blocking transaction to commit or rollback. */ - final int timeoutMillis; + int timeoutMillis; /** * Identification of the owner of this transaction, @@ -142,7 +148,7 @@ public class Transaction { /** * Map on which this transaction is blocked. */ - private MVMap blockingMap; + private String blockingMapName; /** * Key in blockingMap on which this transaction is blocked. @@ -154,17 +160,33 @@ public class Transaction { */ private volatile boolean notificationRequested; + /** + * RootReferences for undo log snapshots + */ + private RootReference>[] undoLogRootReferences; + + /** + * Map of transactional maps for this transaction + */ + private final Map> transactionMaps = new HashMap<>(); + + /** + * The current isolation level. + */ + final IsolationLevel isolationLevel; + Transaction(TransactionStore store, int transactionId, long sequenceNum, int status, String name, long logId, int timeoutMillis, int ownerId, - TransactionStore.RollbackListener listener) { + IsolationLevel isolationLevel, TransactionStore.RollbackListener listener) { this.store = store; this.transactionId = transactionId; this.sequenceNum = sequenceNum; this.statusAndLogId = new AtomicLong(composeState(status, logId, false)); this.name = name; - this.timeoutMillis = timeoutMillis; + setTimeoutMillis(timeoutMillis); this.ownerId = ownerId; + this.isolationLevel = isolationLevel; this.listener = listener; } @@ -180,6 +202,10 @@ public int getStatus() { return getStatus(statusAndLogId.get()); } + RootReference>[] getUndoLogRootReferences() { + return undoLogRootReferences; + } + /** * Changes transaction status to a specified value * @param status to be set @@ -207,7 +233,8 @@ private long setStatus(int status) { break; case STATUS_ROLLED_BACK: valid = currentStatus == STATUS_OPEN || - currentStatus == STATUS_PREPARED; + currentStatus == STATUS_PREPARED || + currentStatus == STATUS_ROLLING_BACK; break; case STATUS_CLOSED: valid = currentStatus == STATUS_COMMITTED || @@ -219,10 +246,10 @@ private long setStatus(int status) { break; } if (!valid) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_TRANSACTION_ILLEGAL_STATE, "Transaction was illegally transitioned from {0} to {1}", - STATUS_NAMES[currentStatus], STATUS_NAMES[status]); + getStatusName(currentStatus), getStatusName(status)); } long newState = composeState(status, logId, hasRollback(currentState)); if (statusAndLogId.compareAndSet(currentState, newState)) { @@ -264,20 +291,99 @@ public long setSavepoint() { return getLogId(); } + /** + * Returns whether statement dependencies are currently set. + * + * @return whether statement dependencies are currently set + */ + public boolean hasStatementDependencies() { + return !transactionMaps.isEmpty(); + } + + /** + * Returns the isolation level of this transaction. + * + * @return the isolation level of this transaction + */ + public IsolationLevel getIsolationLevel() { + return isolationLevel; + } + + boolean isReadCommitted() { + return isolationLevel == IsolationLevel.READ_COMMITTED; + } + + /** + * Whether this transaction has isolation level READ_COMMITTED or below. + * @return true if isolation level is READ_COMMITTED or READ_UNCOMMITTED + */ + public boolean allowNonRepeatableRead() { + return isolationLevel.allowNonRepeatableRead(); + } + /** * Mark an entry into a new SQL statement execution within this transaction. + * + * @param maps + * set of maps used by transaction or statement is about to be executed */ - public void markStatementStart() { + @SuppressWarnings({"unchecked","rawtypes"}) + public void markStatementStart(HashSet>> maps) { markStatementEnd(); - txCounter = store.store.registerVersionUsage(); + if (txCounter == null) { + txCounter = store.store.registerVersionUsage(); + } + + if (maps != null && !maps.isEmpty()) { + // The purpose of the following loop is to get a coherent picture + // In order to get such a "snapshot", we wait for a moment of silence, + // when no new transaction were committed / closed. + BitSet committingTransactions; + do { + committingTransactions = store.committingTransactions.get(); + for (MVMap> map : maps) { + TransactionMap txMap = openMapX(map); + txMap.setStatementSnapshot(new Snapshot(map.flushAndGetRoot(), committingTransactions)); + } + if (isReadCommitted()) { + undoLogRootReferences = store.collectUndoLogRootReferences(); + } + } while (committingTransactions != store.committingTransactions.get()); + // Now we have a snapshot, where each map RootReference point to state of the map, + // undoLogRootReferences captures the state of undo logs + // and committingTransactions mask tells us which of seemingly uncommitted changes + // should be considered as committed. + // Subsequent processing uses this snapshot info only. + for (MVMap> map : maps) { + TransactionMap txMap = openMapX(map); + txMap.promoteSnapshot(); + } + } } /** * Mark an exit from SQL statement execution within this transaction. */ public void markStatementEnd() { + if (allowNonRepeatableRead()) { + releaseSnapshot(); + } + for (TransactionMap transactionMap : transactionMaps.values()) { + transactionMap.setStatementSnapshot(null); + } + } + + private void markTransactionEnd() { + if (!allowNonRepeatableRead()) { + releaseSnapshot(); + } + } + + private void releaseSnapshot() { + transactionMaps.clear(); + undoLogRootReferences = null; MVStore.TxCounter counter = txCounter; - if(counter != null) { + if (counter != null) { txCounter = null; store.store.deregisterVersionUsage(counter); } @@ -286,24 +392,22 @@ public void markStatementEnd() { /** * Add a log entry. * - * @param mapId the map id - * @param key the key - * @param oldValue the old value + * @param logRecord to append * * @return key for the newly added undo log entry */ - long log(int mapId, Object key, VersionedValue oldValue) { + long log(Record logRecord) { long currentState = statusAndLogId.getAndIncrement(); long logId = getLogId(currentState); if (logId >= LOG_ID_LIMIT) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_TRANSACTION_TOO_BIG, "Transaction {0} has too many changes", transactionId); } int currentStatus = getStatus(currentState); checkOpen(currentStatus); - long undoKey = store.addUndoLogRecord(transactionId, logId, new Object[]{ mapId, key, oldValue }); + long undoKey = store.addUndoLogRecord(transactionId, logId, logRecord); return undoKey; } @@ -314,7 +418,7 @@ void logUndo() { long currentState = statusAndLogId.decrementAndGet(); long logId = getLogId(currentState); if (logId >= LOG_ID_LIMIT) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_TRANSACTION_CORRUPT, "Transaction {0} has internal error", transactionId); @@ -347,9 +451,10 @@ public TransactionMap openMap(String name) { * @return the transaction map */ public TransactionMap openMap(String name, - DataType keyType, DataType valueType) { - MVMap map = store.openMap(name, keyType, valueType); - return openMap(map); + DataType keyType, + DataType valueType) { + MVMap> map = store.openVersionedMap(name, keyType, valueType); + return openMapX(map); } /** @@ -360,9 +465,16 @@ public TransactionMap openMap(String name, * @param map the base map * @return the transactional map */ - public TransactionMap openMap(MVMap map) { + @SuppressWarnings("unchecked") + public TransactionMap openMapX(MVMap> map) { checkNotClosed(); - return new TransactionMap<>(this, map); + int id = map.getId(); + TransactionMap transactionMap = (TransactionMap)transactionMaps.get(id); + if (transactionMap == null) { + transactionMap = new TransactionMap<>(this, map); + transactionMaps.put(id, transactionMap); + } + return transactionMap; } /** @@ -379,12 +491,14 @@ public void prepare() { */ public void commit() { assert store.openTransactions.get().get(transactionId); + markTransactionEnd(); Throwable ex = null; boolean hasChanges = false; + int previousStatus = STATUS_OPEN; try { long state = setStatus(STATUS_COMMITTED); hasChanges = hasChanges(state); - int previousStatus = getStatus(state); + previousStatus = getStatus(state); if (hasChanges) { store.commit(this, previousStatus == STATUS_COMMITTED); } @@ -392,13 +506,15 @@ public void commit() { ex = e; throw e; } finally { - try { - store.endTransaction(this, hasChanges); - } catch (Throwable e) { - if (ex == null) { - throw e; - } else { - ex.addSuppressed(e); + if (isActive(previousStatus)) { + try { + store.endTransaction(this, hasChanges); + } catch (Throwable e) { + if (ex == null) { + throw e; + } else { + ex.addSuppressed(e); + } } } } @@ -417,9 +533,7 @@ public void rollbackToSavepoint(long savepointId) { try { store.rollbackTo(this, logId, savepointId); } finally { - if (notificationRequested) { - notifyAllWaitingTransactions(); - } + notifyAllWaitingTransactions(); long expectedState = composeState(STATUS_ROLLING_BACK, logId, hasRollback(lastState)); long newState = composeState(STATUS_OPEN, savepointId, true); do { @@ -428,7 +542,7 @@ public void rollbackToSavepoint(long savepointId) { } // this is moved outside of finally block to avert masking original exception, if any if (!success) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_TRANSACTION_ILLEGAL_STATE, "Transaction {0} concurrently modified while rollback to savepoint was in progress", transactionId); @@ -439,19 +553,27 @@ public void rollbackToSavepoint(long savepointId) { * Roll the transaction back. Afterwards, this transaction is closed. */ public void rollback() { + markTransactionEnd(); Throwable ex = null; + int status = STATUS_OPEN; try { long lastState = setStatus(STATUS_ROLLED_BACK); + status = getStatus(lastState); long logId = getLogId(lastState); if (logId > 0) { store.rollbackTo(this, logId, 0); } } catch (Throwable e) { - ex = e; - throw e; + status = getStatus(); + if (isActive(status)) { + ex = e; + throw e; + } } finally { try { - store.endTransaction(this, true); + if (isActive(status)) { + store.endTransaction(this, true); + } } catch (Throwable e) { if (ex == null) { throw e; @@ -462,6 +584,12 @@ public void rollback() { } } + private static boolean isActive(int status) { + return status != STATUS_CLOSED + && status != STATUS_COMMITTED + && status != STATUS_ROLLED_BACK; + } + /** * Get the list of changes, starting with the latest change, up to the * given savepoint (in reverse order than they occurred). The value of @@ -475,6 +603,15 @@ public Iterator getChanges(long savepointId) { return store.getChanges(this, getLogId(), savepointId); } + /** + * Sets the new lock timeout. + * + * @param timeoutMillis the new lock timeout in milliseconds + */ + public void setTimeoutMillis(int timeoutMillis) { + this.timeoutMillis = timeoutMillis > 0 ? timeoutMillis : store.timeoutMillis; + } + private long getLogId() { return getLogId(statusAndLogId.get()); } @@ -484,9 +621,9 @@ private long getLogId() { */ private void checkOpen(int status) { if (status != STATUS_OPEN) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_TRANSACTION_ILLEGAL_STATE, - "Transaction {0} has status {1}, not OPEN", transactionId, STATUS_NAMES[status]); + "Transaction {0} has status {1}, not OPEN", transactionId, getStatusName(status)); } } @@ -495,7 +632,7 @@ private void checkOpen(int status) { */ private void checkNotClosed() { if (getStatus() == STATUS_CLOSED) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_CLOSED, "Transaction {0} is closed", transactionId); } } @@ -504,15 +641,20 @@ private void checkNotClosed() { * Transition this transaction into a closed state. */ void closeIt() { + transactionMaps.clear(); long lastState = setStatus(STATUS_CLOSED); store.store.deregisterVersionUsage(txCounter); - if((hasChanges(lastState) || hasRollback(lastState)) && notificationRequested) { + if((hasChanges(lastState) || hasRollback(lastState))) { notifyAllWaitingTransactions(); } } - private synchronized void notifyAllWaitingTransactions() { - notifyAll(); + private void notifyAllWaitingTransactions() { + if (notificationRequested) { + synchronized (this) { + notifyAll(); + } + } } /** @@ -520,61 +662,81 @@ private synchronized void notifyAllWaitingTransactions() { * because both of them try to modify the same map entry. * * @param toWaitFor transaction to wait for - * @param map containing blocking entry + * @param mapName name of the map containing blocking entry * @param key of the blocking entry * @return true if other transaction was closed and this one can proceed, false if timed out */ - public boolean waitFor(Transaction toWaitFor, MVMap map, Object key) { + public boolean waitFor(Transaction toWaitFor, String mapName, Object key) { blockingTransaction = toWaitFor; - blockingMap = map; + blockingMapName = mapName; blockingKey = key; if (isDeadlocked(toWaitFor)) { - StringBuilder details = new StringBuilder( - String.format("Transaction %d has been chosen as a deadlock victim. Details:%n", transactionId)); - for (Transaction tx = toWaitFor, nextTx; (nextTx = tx.blockingTransaction) != null; tx = nextTx) { - details.append(String.format( - "Transaction %d attempts to update map <%s> entry with key <%s> modified by transaction %s%n", - tx.transactionId, tx.blockingMap.getName(), tx.blockingKey, tx.blockingTransaction)); - if (nextTx == this) { - details.append(String.format( - "Transaction %d attempts to update map <%s> entry with key <%s>" - + " modified by transaction %s%n", - transactionId, blockingMap.getName(), blockingKey, toWaitFor)); - if (isDeadlocked(toWaitFor)) { - throw DataUtils.newIllegalStateException(DataUtils.ERROR_TRANSACTIONS_DEADLOCK, - details.toString()); - } - } - } - } - - try { - return toWaitFor.waitForThisToEnd(timeoutMillis); - } finally { - blockingMap = null; - blockingKey = null; - blockingTransaction = null; + tryThrowDeadLockException(false); } + boolean result = toWaitFor.waitForThisToEnd(timeoutMillis, this); + blockingMapName = null; + blockingKey = null; + blockingTransaction = null; + return result; } private boolean isDeadlocked(Transaction toWaitFor) { + // use transaction sequence No as a tie-breaker + // the youngest transaction should be selected as a victim + Transaction youngest = toWaitFor; + int backstop = store.getMaxTransactionId(); for(Transaction tx = toWaitFor, nextTx; - (nextTx = tx.blockingTransaction) != null && tx.getStatus() == Transaction.STATUS_OPEN; - tx = nextTx) { + (nextTx = tx.blockingTransaction) != null && tx.getStatus() == Transaction.STATUS_OPEN && backstop > 0; + tx = nextTx, --backstop) { + + if (nextTx.sequenceNum > youngest.sequenceNum) { + youngest = nextTx; + } + if (nextTx == this) { - return true; + if (youngest == this) { + return true; + } + Transaction btx = youngest.blockingTransaction; + if (btx != null) { + youngest.setStatus(STATUS_ROLLING_BACK); + btx.notifyAllWaitingTransactions(); + return false; + } } } return false; } - private synchronized boolean waitForThisToEnd(int millis) { + private void tryThrowDeadLockException(boolean throwIt) { + BitSet visited = new BitSet(); + StringBuilder details = new StringBuilder( + String.format("Transaction %d has been chosen as a deadlock victim. Details:%n", transactionId)); + for (Transaction tx = this, nextTx; + !visited.get(tx.transactionId) && (nextTx = tx.blockingTransaction) != null; tx = nextTx) { + visited.set(tx.transactionId); + details.append(String.format( + "Transaction %d attempts to update map <%s> entry with key <%s> modified by transaction %s%n", + tx.transactionId, tx.blockingMapName, tx.blockingKey, tx.blockingTransaction)); + if (nextTx == this) { + throwIt = true; + } + } + if (throwIt) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_TRANSACTIONS_DEADLOCK, "{0}", details.toString()); + } + } + + private synchronized boolean waitForThisToEnd(int millis, Transaction waiter) { long until = System.currentTimeMillis() + millis; notificationRequested = true; long state; int status; while((status = getStatus(state = statusAndLogId.get())) != STATUS_CLOSED && status != STATUS_ROLLED_BACK && !hasRollback(state)) { + if (waiter.getStatus() != STATUS_OPEN) { + waiter.tryThrowDeadLockException(true); + } long dur = until - System.currentTimeMillis(); if(dur <= 0) { return false; @@ -609,7 +771,7 @@ private String stateToString() { } private static String stateToString(long state) { - return STATUS_NAMES[getStatus(state)] + (hasRollback(state) ? "<" : "") + " " + getLogId(state); + return getStatusName(getStatus(state)) + (hasRollback(state) ? "<" : "") + " " + getLogId(state); } @@ -638,4 +800,8 @@ private static long composeState(int status, long logId, boolean hasRollback) { } return ((long)status << LOG_ID_BITS1) | logId; } + + private static String getStatusName(int status) { + return status >= 0 && status < STATUS_NAMES.length ? STATUS_NAMES[status] : "UNKNOWN_STATUS_" + status; + } } diff --git a/h2/src/main/org/h2/mvstore/tx/TransactionMap.java b/h2/src/main/org/h2/mvstore/tx/TransactionMap.java index ae5b312685..2c5d7f2a63 100644 --- a/h2/src/main/org/h2/mvstore/tx/TransactionMap.java +++ b/h2/src/main/org/h2/mvstore/tx/TransactionMap.java @@ -1,18 +1,10 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.tx; -import org.h2.mvstore.Cursor; -import org.h2.mvstore.DataUtils; -import org.h2.mvstore.MVMap; -import org.h2.mvstore.Page; -import org.h2.mvstore.RootReference; -import org.h2.mvstore.type.DataType; -import org.h2.value.VersionedValue; - import java.util.AbstractMap; import java.util.AbstractSet; import java.util.BitSet; @@ -20,6 +12,17 @@ import java.util.Map; import java.util.NoSuchElementException; import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiFunction; + +import org.h2.engine.IsolationLevel; +import org.h2.mvstore.Cursor; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.MVMap; +import org.h2.mvstore.MVStoreException; +import org.h2.mvstore.RootReference; +import org.h2.mvstore.type.DataType; +import org.h2.value.VersionedValue; /** * A map that supports transactions. @@ -34,7 +37,7 @@ * @param the key type * @param the value type */ -public class TransactionMap extends AbstractMap { +public final class TransactionMap extends AbstractMap { /** * The map used for writing (the latest version). @@ -42,16 +45,46 @@ public class TransactionMap extends AbstractMap { * Key: key the key of the data. * Value: { transactionId, oldVersion, value } */ - public final MVMap map; + public final MVMap> map; /** * The transaction which is used for this map. */ private final Transaction transaction; - TransactionMap(Transaction transaction, MVMap map) { + /** + * Snapshot of this map as of beginning of transaction or + * first usage within transaction or + * beginning of the statement, depending on isolation level + */ + private Snapshot> snapshot; + + /** + * Snapshot of this map as of beginning of beginning of the statement + */ + private Snapshot> statementSnapshot; + + /** + * Indicates whether underlying map was modified from within related transaction + */ + private boolean hasChanges; + + private final TxDecisionMaker txDecisionMaker; + private final TxDecisionMaker ifAbsentDecisionMaker; + private final TxDecisionMaker lockDecisionMaker; + + + TransactionMap(Transaction transaction, MVMap> map) { this.transaction = transaction; this.map = map; + this.txDecisionMaker = new TxDecisionMaker<>(map.getId(), transaction); + this.ifAbsentDecisionMaker = new TxDecisionMaker.PutIfAbsentDecisionMaker<>(map.getId(), + transaction, this::getFromSnapshot); + this.lockDecisionMaker = transaction.allowNonRepeatableRead() + ? new TxDecisionMaker.LockDecisionMaker<>(map.getId(), transaction) + : new TxDecisionMaker.RepeatableReadLockDecisionMaker<>(map.getId(), transaction, + map.getValueType(), this::getFromSnapshot); + } /** @@ -60,8 +93,8 @@ public class TransactionMap extends AbstractMap { * @param transaction the transaction * @return the map */ - public TransactionMap getInstance(Transaction transaction) { - return new TransactionMap<>(transaction, map); + public TransactionMap getInstance(Transaction transaction) { + return transaction.openMapX(map); } /** @@ -72,7 +105,7 @@ public TransactionMap getInstance(Transaction transaction) { * @see #sizeAsLong() */ @Override - public final int size() { + public int size() { long size = sizeAsLong(); return size > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) size; } @@ -93,78 +126,67 @@ public long sizeAsLongMax() { * @return the size */ public long sizeAsLong() { - TransactionStore store = transaction.store; - - // The purpose of the following loop is to get a coherent picture - // of a state of three independent volatile / atomic variables, - // which they had at some recent moment in time. - // In order to get such a "snapshot", we wait for a moment of silence, - // when none of the variables concurrently changes it's value. - BitSet committingTransactions; - RootReference mapRootReference; - RootReference[] undoLogRootReferences; - long undoLogSize; + IsolationLevel isolationLevel = transaction.getIsolationLevel(); + if (!isolationLevel.allowNonRepeatableRead() && hasChanges) { + return sizeAsLongRepeatableReadWithChanges(); + } + // getting coherent picture of the map, committing transactions, and undo logs + // either from values stored in transaction (never loops in that case), + // or current values from the transaction store (loops until moment of silence) + Snapshot> snapshot; + RootReference>[] undoLogRootReferences; do { - committingTransactions = store.committingTransactions.get(); - mapRootReference = map.flushAndGetRoot(); - BitSet opentransactions = store.openTransactions.get(); - undoLogRootReferences = new RootReference[opentransactions.length()]; - undoLogSize = 0; - for (int i = opentransactions.nextSetBit(0); i >= 0; i = opentransactions.nextSetBit(i+1)) { - MVMap undoLog = store.undoLogs[i]; - if (undoLog != null) { - RootReference rootReference = undoLog.flushAndGetRoot(); - undoLogRootReferences[i] = rootReference; - undoLogSize += rootReference.getTotalCount(); - } - } - } while(committingTransactions != store.committingTransactions.get() || - mapRootReference != map.getRoot()); - // Now we have a snapshot, where mapRootReference points to state of the map, - // undoLogRootReference captures the state of undo log - // and committingTransactions mask tells us which of seemingly uncommitted changes - // should be considered as committed. - // Subsequent processing uses this snapshot info only. - Page mapRootPage = mapRootReference.root; + snapshot = getSnapshot(); + undoLogRootReferences = getTransaction().getUndoLogRootReferences(); + } while (!snapshot.equals(getSnapshot())); + + RootReference> mapRootReference = snapshot.root; long size = mapRootReference.getTotalCount(); + long undoLogsTotalSize = undoLogRootReferences == null ? size + : TransactionStore.calculateUndoLogsTotalSize(undoLogRootReferences); // if we are looking at the map without any uncommitted values - if (undoLogSize == 0) { + if (undoLogsTotalSize == 0) { return size; } + return adjustSize(undoLogRootReferences, mapRootReference, + isolationLevel == IsolationLevel.READ_UNCOMMITTED ? null : snapshot.committingTransactions, + size, undoLogsTotalSize); + } + private long adjustSize(RootReference>[] undoLogRootReferences, + RootReference> mapRootReference, BitSet committingTransactions, long size, + long undoLogsTotalSize) { // Entries describing removals from the map by this transaction and all transactions, // which are committed but not closed yet, // and entries about additions to the map by other uncommitted transactions were counted, // but they should not contribute into total count. - if (2 * undoLogSize > size) { + if (2 * undoLogsTotalSize > size) { // the undo log is larger than half of the map - scan the entries of the map directly - Cursor cursor = new Cursor<>(mapRootPage, null); - while(cursor.hasNext()) { + Cursor> cursor = map.cursor(mapRootReference, null, null, false); + while (cursor.hasNext()) { cursor.next(); - VersionedValue currentValue = cursor.getValue(); + VersionedValue currentValue = cursor.getValue(); assert currentValue != null; long operationId = currentValue.getOperationId(); - if (operationId != 0) { // skip committed entries - int txId = TransactionStore.getTransactionId(operationId); - boolean isVisible = txId == transaction.transactionId || - committingTransactions.get(txId); - Object v = isVisible ? currentValue.getCurrentValue() : currentValue.getCommittedValue(); - if (v == null) { - --size; - } + if (operationId != 0 && // skip committed entries + isIrrelevant(operationId, currentValue, committingTransactions)) { + --size; } } } else { + assert undoLogRootReferences != null; // The undo logs are much smaller than the map - scan all undo logs, // and then lookup relevant map entry. - for (RootReference undoLogRootReference : undoLogRootReferences) { + for (RootReference> undoLogRootReference : undoLogRootReferences) { if (undoLogRootReference != null) { - Cursor cursor = new Cursor<>(undoLogRootReference.root, null); + Cursor> cursor = undoLogRootReference.root.map.cursor(undoLogRootReference, + null, null, false); while (cursor.hasNext()) { cursor.next(); - Object op[] = cursor.getValue(); - if ((int) op[0] == map.getId()) { - VersionedValue currentValue = map.get(mapRootPage, op[1]); + Record op = cursor.getValue(); + if (op.mapId == map.getId()) { + @SuppressWarnings("unchecked") + VersionedValue currentValue = map.get(mapRootReference.root, (K)op.key); // If map entry is not there, then we never counted // it, in the first place, so skip it. // This is possible when undo entry exists because @@ -175,15 +197,10 @@ public long sizeAsLong() { // only the last undo entry for any given map // key should be considered long operationId = cursor.getKey(); - if (currentValue.getOperationId() == operationId) { - int txId = TransactionStore.getTransactionId(operationId); - boolean isVisible = txId == transaction.transactionId || - committingTransactions.get(txId); - Object v = isVisible ? currentValue.getCurrentValue() - : currentValue.getCommittedValue(); - if (v == null) { - --size; - } + assert operationId != 0; + if (currentValue.getOperationId() == operationId && + isIrrelevant(operationId, currentValue, committingTransactions)) { + --size; } } } @@ -194,6 +211,27 @@ public long sizeAsLong() { return size; } + private boolean isIrrelevant(long operationId, VersionedValue currentValue, BitSet committingTransactions) { + Object v; + if (committingTransactions == null) { + v = currentValue.getCurrentValue(); + } else { + int txId = TransactionStore.getTransactionId(operationId); + v = txId == transaction.transactionId || committingTransactions.get(txId) + ? currentValue.getCurrentValue() : currentValue.getCommittedValue(); + } + return v == null; + } + + private long sizeAsLongRepeatableReadWithChanges() { + long count = 0L; + RepeatableIterator iterator = new RepeatableIterator<>(this, null, null, false, false); + while (iterator.fetchNext() != null) { + count++; + } + return count; + } + /** * Remove an entry. *

          @@ -201,12 +239,13 @@ public long sizeAsLong() { * updated or until a lock timeout. * * @param key the key - * @throws IllegalStateException if a lock timeout occurs + * @throws MVStoreException if a lock timeout occurs * @throws ClassCastException if type of the specified key is not compatible with this map */ + @SuppressWarnings("unchecked") @Override public V remove(Object key) { - return set(key, (V)null); + return set((K)key, (V)null); } /** @@ -218,7 +257,7 @@ public V remove(Object key) { * @param key the key * @param value the new value (not null) * @return the old value - * @throws IllegalStateException if a lock timeout occurs + * @throws MVStoreException if a lock timeout occurs */ @Override public V put(K key, V value) { @@ -235,12 +274,15 @@ public V put(K key, V value) { * @param value the new value (not null) * @return the old value */ - // Do not add @Override, code should be compatible with Java 7 + @Override public V putIfAbsent(K key, V value) { DataUtils.checkArgument(value != null, "The value may not be null"); - TxDecisionMaker decisionMaker = new TxDecisionMaker.PutIfAbsentDecisionMaker(map.getId(), key, value, - transaction); - return set(key, decisionMaker); + ifAbsentDecisionMaker.initialize(key, value); + V result = set(key, ifAbsentDecisionMaker); + if (ifAbsentDecisionMaker.getDecision() == MVMap.Decision.ABORT) { + result = ifAbsentDecisionMaker.getLastValue(); + } + return result; } /** @@ -250,7 +292,9 @@ public V putIfAbsent(K key, V value) { * @param value to be appended */ public void append(K key, V value) { - map.append(key, VersionedValueUncommitted.getInstance(transaction.log(map.getId(), key, null), value, null)); + map.append(key, VersionedValueUncommitted.getInstance( + transaction.log(new Record<>(map.getId(), key, null)), value, null)); + hasChanges = true; } /** @@ -261,11 +305,11 @@ public void append(K key, V value) { * * @param key the key * @return the locked value - * @throws IllegalStateException if a lock timeout occurs + * @throws MVStoreException if a lock timeout occurs */ public V lock(K key) { - TxDecisionMaker decisionMaker = new TxDecisionMaker.LockDecisionMaker(map.getId(), key, transaction); - return set(key, decisionMaker); + lockDecisionMaker.initialize(key, null); + return set(key, lockDecisionMaker); } /** @@ -275,53 +319,51 @@ public V lock(K key) { * @param value the value * @return the old value */ + @SuppressWarnings("UnusedReturnValue") public V putCommitted(K key, V value) { DataUtils.checkArgument(value != null, "The value may not be null"); - VersionedValue newValue = VersionedValueCommitted.getInstance(value); - VersionedValue oldValue = map.put(key, newValue); - @SuppressWarnings("unchecked") - V result = (V) (oldValue == null ? null : oldValue.getCurrentValue()); + VersionedValue newValue = VersionedValueCommitted.getInstance(value); + VersionedValue oldValue = map.put(key, newValue); + V result = oldValue == null ? null : oldValue.getCurrentValue(); return result; } - private V set(Object key, V value) { - TxDecisionMaker decisionMaker = new TxDecisionMaker.PutDecisionMaker(map.getId(), key, value, transaction); - return set(key, decisionMaker); + private V set(K key, V value) { + txDecisionMaker.initialize(key, value); + return set(key, txDecisionMaker); } - private V set(Object key, TxDecisionMaker decisionMaker) { - TransactionStore store = transaction.store; + private V set(Object key, TxDecisionMaker decisionMaker) { Transaction blockingTransaction; - long sequenceNumWhenStarted; - VersionedValue result; + VersionedValue result; + String mapName = null; do { - sequenceNumWhenStarted = store.openTransactions.get().getVersion(); assert transaction.getBlockerId() == 0; - // although second parameter (value) is not really used, - // since TxDecisionMaker has it embedded, - // MVRTreeMap has weird traversal logic based on it, - // and any non-null value will do @SuppressWarnings("unchecked") K k = (K) key; - result = map.operate(k, VersionedValue.DUMMY, decisionMaker); + // second parameter (value) is not really used, + // since TxDecisionMaker has it embedded + result = map.operate(k, null, decisionMaker); MVMap.Decision decision = decisionMaker.getDecision(); assert decision != null; assert decision != MVMap.Decision.REPEAT; blockingTransaction = decisionMaker.getBlockingTransaction(); if (decision != MVMap.Decision.ABORT || blockingTransaction == null) { - @SuppressWarnings("unchecked") - V res = result == null ? null : (V) result.getCurrentValue(); + hasChanges |= decision != MVMap.Decision.ABORT; + V res = result == null ? null : result.getCurrentValue(); return res; } decisionMaker.reset(); - } while (blockingTransaction.sequenceNum > sequenceNumWhenStarted - || transaction.waitFor(blockingTransaction, map, key)); + if (mapName == null) { + mapName = map.getName(); + } + } while (transaction.waitFor(blockingTransaction, mapName, key)); - throw DataUtils.newIllegalStateException(DataUtils.ERROR_TRANSACTION_LOCKED, + throw DataUtils.newMVStoreException(DataUtils.ERROR_TRANSACTION_LOCKED, "Map entry <{0}> with key <{1}> and value {2} is locked by tx {3} and can not be updated by tx {4}" + " within allocated time interval {5} ms.", - map.getName(), key, result, blockingTransaction.transactionId, transaction.transactionId, + mapName, key, result, blockingTransaction.transactionId, transaction.transactionId, transaction.timeoutMillis); } @@ -370,7 +412,7 @@ public boolean trySet(K key, V value) { // TODO: eliminate exception usage as part of normal control flaw set(key, value); return true; - } catch (IllegalStateException e) { + } catch (MVStoreException e) { return false; } } @@ -382,25 +424,132 @@ public boolean trySet(K key, V value) { * @return the value or null * @throws ClassCastException if type of the specified key is not compatible with this map */ - @Override @SuppressWarnings("unchecked") + @Override public V get(Object key) { - VersionedValue data = map.get(key); + return getImmediate((K)key); + } + + /** + * Get the value for the given key, or null if value does not exist in accordance with transactional rules. + * Value is taken from a snapshot, appropriate for an isolation level of the related transaction + * + * @param key the key + * @return the value, or null if not found + */ + public V getFromSnapshot(K key) { + switch (transaction.isolationLevel) { + case READ_UNCOMMITTED: { + Snapshot> snapshot = getStatementSnapshot(); + VersionedValue data = map.get(snapshot.root.root, key); + if (data != null) { + return data.getCurrentValue(); + } + return null; + } + case REPEATABLE_READ: + case SNAPSHOT: + case SERIALIZABLE: + if (transaction.hasChanges()) { + Snapshot> snapshot = getStatementSnapshot(); + VersionedValue data = map.get(snapshot.root.root, key); + if (data != null) { + long id = data.getOperationId(); + if (id != 0L && transaction.transactionId == TransactionStore.getTransactionId(id)) { + return data.getCurrentValue(); + } + } + } + //$FALL-THROUGH$ + case READ_COMMITTED: + default: + Snapshot> snapshot = getSnapshot(); + return getFromSnapshot(snapshot.root, snapshot.committingTransactions, key); + } + } + + private V getFromSnapshot(RootReference> rootRef, BitSet committingTransactions, K key) { + VersionedValue data = map.get(rootRef.root, key); if (data == null) { - // doesn't exist or deleted by a committed transaction + // doesn't exist return null; } long id = data.getOperationId(); - if (id == 0) { - // it is committed - return (V)data.getCurrentValue(); - } - int tx = TransactionStore.getTransactionId(id); - if (tx == transaction.transactionId || transaction.store.committingTransactions.get().get(tx)) { - // added by this transaction or another transaction which is committed by now - return (V) data.getCurrentValue(); - } else { - return (V) data.getCommittedValue(); + if (id != 0) { + int tx = TransactionStore.getTransactionId(id); + if (tx != transaction.transactionId && !committingTransactions.get(tx)) { + // added/modified/removed by uncommitted transaction, change should not be visible + return data.getCommittedValue(); + } + } + // added/modified/removed by this transaction or another transaction which is committed by now + return data.getCurrentValue(); + } + + /** + * Get the value for the given key, or null if not found. + * Operation is performed on a snapshot of the map taken during this call. + * + * @param key the key + * @return the value, or null if not found + */ + public V getImmediate(K key) { + return useSnapshot((rootReference, committedTransactions) -> + getFromSnapshot(rootReference, committedTransactions, key)); + } + + Snapshot> getSnapshot() { + return snapshot == null ? createSnapshot() : snapshot; + } + + Snapshot> getStatementSnapshot() { + return statementSnapshot == null ? createSnapshot() : statementSnapshot; + } + + void setStatementSnapshot(Snapshot> snapshot) { + statementSnapshot = snapshot; + } + + void promoteSnapshot() { + if (snapshot == null) { + snapshot = statementSnapshot; + } + } + + /** + * Create a new snapshot for this map. + * + * @return the snapshot + */ + Snapshot> createSnapshot() { + return useSnapshot(Snapshot::new); + } + + /** + * Gets a coherent picture of committing transactions and root reference, + * passes it to the specified function, and returns its result. + * + * @param type of the result + * + * @param snapshotConsumer + * function to invoke on a snapshot + * @return function's result + */ + R useSnapshot(BiFunction>, BitSet, R> snapshotConsumer) { + // The purpose of the following loop is to get a coherent picture + // of a state of two independent volatile / atomic variables, + // which they had at some recent moment in time. + // In order to get such a "snapshot", we wait for a moment of silence, + // when neither of the variables concurrently changes it's value. + AtomicReference holder = transaction.store.committingTransactions; + BitSet committingTransactions = holder.get(); + while (true) { + BitSet prevCommittingTransactions = committingTransactions; + RootReference> root = map.getRoot(); + committingTransactions = holder.get(); + if (committingTransactions == prevCommittingTransactions) { + return snapshotConsumer.apply(root, committingTransactions); + } } } @@ -411,9 +560,26 @@ public V get(Object key) { * @return true if the map contains an entry for this key * @throws ClassCastException if type of the specified key is not compatible with this map */ + @SuppressWarnings("unchecked") @Override public boolean containsKey(Object key) { - return get(key) != null; + return getImmediate((K)key) != null; + } + + /** + * Check if the row was deleted by this transaction. + * + * @param key the key + * @return {@code true} if it was + */ + public boolean isDeletedByCurrentTransaction(K key) { + VersionedValue data = map.get(key); + if (data != null) { + long id = data.getOperationId(); + return id != 0 && TransactionStore.getTransactionId(id) == transaction.transactionId + && data.getCurrentValue() == null; + } + return false; } /** @@ -424,7 +590,7 @@ public boolean containsKey(Object key) { * @return true if yes */ public boolean isSameTransaction(K key) { - VersionedValue data = map.get(key); + VersionedValue data = map.get(key); if (data == null) { // doesn't exist or deleted by a committed transaction return false; @@ -449,6 +615,7 @@ public boolean isClosed() { public void clear() { // TODO truncate transactionally? map.clear(); + hasChanges = true; } @Override @@ -473,14 +640,31 @@ public boolean contains(Object o) { }; } + /** + * Get the first entry. + * + * @return the first entry, or null if empty + */ + public Entry firstEntry() { + return this.>chooseIterator(null, null, false, true).fetchNext(); + } + /** * Get the first key. * * @return the first key, or null if empty */ public K firstKey() { - Iterator it = keyIterator(null); - return it.hasNext() ? it.next() : null; + return this.chooseIterator(null, null, false, false).fetchNext(); + } + + /** + * Get the last entry. + * + * @return the last entry, or null if empty + */ + public Entry lastEntry() { + return this.>chooseIterator(null, null, true, true).fetchNext(); } /** @@ -489,11 +673,18 @@ public K firstKey() { * @return the last key, or null if empty */ public K lastKey() { - K k = map.lastKey(); - while (k != null && get(k) == null) { - k = map.lowerKey(k); - } - return k; + return this.chooseIterator(null, null, true, false).fetchNext(); + } + + /** + * Get the entry with smallest key that is larger than the given key, or null if no + * such key exists. + * + * @param key the key (may not be null) + * @return the result + */ + public Entry higherEntry(K key) { + return higherLowerEntry(key, false); } /** @@ -504,10 +695,18 @@ public K lastKey() { * @return the result */ public K higherKey(K key) { - do { - key = map.higherKey(key); - } while (key != null && get(key) == null); - return key; + return higherLowerKey(key, false); + } + + /** + * Get the entry with smallest key that is larger than or equal to this key, + * or null if no such key exists. + * + * @param key the key (may not be null) + * @return the result + */ + public Entry ceilingEntry(K key) { + return this.>chooseIterator(key, null, false, true).fetchNext(); } /** @@ -518,8 +717,18 @@ public K higherKey(K key) { * @return the result */ public K ceilingKey(K key) { - Iterator it = keyIterator(key); - return it.hasNext() ? it.next() : null; + return this.chooseIterator(key, null, false, false).fetchNext(); + } + + /** + * Get the entry with largest key that is smaller than or equal to this key, + * or null if no such key exists. + * + * @param key the key (may not be null) + * @return the result + */ + public Entry floorEntry(K key) { + return this.>chooseIterator(key, null, true, true).fetchNext(); } /** @@ -530,12 +739,18 @@ public K ceilingKey(K key) { * @return the result */ public K floorKey(K key) { - key = map.floorKey(key); - while (key != null && get(key) == null) { - // Use lowerKey() for the next attempts, otherwise we'll get an infinite loop - key = map.lowerKey(key); - } - return key; + return this.chooseIterator(key, null, true, false).fetchNext(); + } + + /** + * Get the entry with largest key that is smaller than the given key, or null if no + * such key exists. + * + * @param key the key (may not be null) + * @return the result + */ + public Entry lowerEntry(K key) { + return higherLowerEntry(key, true); } /** @@ -546,10 +761,25 @@ public K floorKey(K key) { * @return the result */ public K lowerKey(K key) { - do { - key = map.lowerKey(key); - } while (key != null && get(key) == null); - return key; + return higherLowerKey(key, true); + } + + private Entry higherLowerEntry(K key, boolean lower) { + TMIterator> it = chooseIterator(key, null, lower, true); + Entry result = it.fetchNext(); + if (result != null && map.getKeyType().compare(key, result.getKey()) == 0) { + result = it.fetchNext(); + } + return result; + } + + private K higherLowerKey(K key, boolean lower) { + TMIterator it = chooseIterator(key, null, lower, false); + K result = it.fetchNext(); + if (result != null && map.getKeyType().compare(key, result) == 0) { + result = it.fetchNext(); + } + return result; } /** @@ -559,7 +789,18 @@ public K lowerKey(K key) { * @return the iterator */ public Iterator keyIterator(K from) { - return keyIterator(from, null, false); + return chooseIterator(from, null, false, false); + } + + /** + * Iterate over keys in the specified order. + * + * @param from the first key to return + * @param reverse if true, iterate in reverse (descending) order + * @return the iterator + */ + public TMIterator keyIterator(K from, boolean reverse) { + return chooseIterator(from, null, reverse, false); } /** @@ -567,197 +808,320 @@ public Iterator keyIterator(K from) { * * @param from the first key to return * @param to the last key to return or null if there is no limit - * @param includeUncommitted whether uncommitted entries should be - * included * @return the iterator */ - public Iterator keyIterator(K from, K to, boolean includeUncommitted) { - return new KeyIterator<>(this, from, to, includeUncommitted); + public TMIterator keyIterator(K from, K to) { + return chooseIterator(from, to, false, false); } /** - * Iterate over entries. + * Iterate over keys, including keys from uncommitted entries. * * @param from the first key to return - * @param to the last key to return + * @param to the last key to return or null if there is no limit * @return the iterator */ - public Iterator> entryIterator(final K from, final K to) { - return new EntryIterator<>(this, from, to); + public TMIterator keyIteratorUncommitted(K from, K to) { + return new ValidationIterator<>(this, from, to); } /** - * Iterate over keys. + * Iterate over entries. * - * @param iterator the iterator to wrap - * @param includeUncommitted whether uncommitted entries should be - * included + * @param from the first key to return + * @param to the last key to return * @return the iterator */ - public Iterator wrapIterator(final Iterator iterator, - final boolean includeUncommitted) { - // TODO duplicate code for wrapIterator and entryIterator - return new Iterator() { - private K current; - - { - fetchNext(); - } + public TMIterator> entryIterator(final K from, final K to) { + return chooseIterator(from, to, false, true); + } - private void fetchNext() { - while (iterator.hasNext()) { - current = iterator.next(); - if (includeUncommitted) { - return; - } - if (containsKey(current)) { - return; - } + private TMIterator chooseIterator(K from, K to, boolean reverse, boolean forEntries) { + switch (transaction.isolationLevel) { + case READ_UNCOMMITTED: + return new UncommittedIterator<>(this, from, to, reverse, forEntries); + case REPEATABLE_READ: + case SNAPSHOT: + case SERIALIZABLE: + if (hasChanges) { + return new RepeatableIterator<>(this, from, to, reverse, forEntries); } - current = null; - } - - @Override - public boolean hasNext() { - return current != null; - } - - @Override - public K next() { - K result = current; - fetchNext(); - return result; - } - - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException( - "Removal is not supported"); - } - }; + //$FALL-THROUGH$ + case READ_COMMITTED: + default: + return new CommittedIterator<>(this, from, to, reverse, forEntries); + } } public Transaction getTransaction() { return transaction; } - public DataType getKeyType() { + public DataType getKeyType() { return map.getKeyType(); } + /** + * The iterator for read uncommitted isolation level. This iterator is also + * used for unique indexes. + * + * @param + * the type of keys + * @param + * the type of elements + */ + private static class UncommittedIterator extends TMIterator { + UncommittedIterator(TransactionMap transactionMap, K from, K to, boolean reverse, boolean forEntries) { + super(transactionMap, from, to, transactionMap.createSnapshot(), reverse, forEntries); + } - private static final class KeyIterator extends TMIterator { - - public KeyIterator(TransactionMap transactionMap, K from, K to, boolean includeUncommitted) { - super(transactionMap, from, to, includeUncommitted); + UncommittedIterator(TransactionMap transactionMap, K from, K to, Snapshot> snapshot, + boolean reverse, boolean forEntries) { + super(transactionMap, from, to, snapshot, reverse, forEntries); } @Override - protected K registerCurrent(K key, VersionedValue data) { - return key; + public final X fetchNext() { + while (cursor.hasNext()) { + K key = cursor.next(); + VersionedValue data = cursor.getValue(); + if (data != null) { + Object currentValue = data.getCurrentValue(); + if (currentValue != null || shouldIgnoreRemoval(data)) { + return toElement(key, currentValue); + } + } + } + return null; + } + + boolean shouldIgnoreRemoval(VersionedValue data) { + return false; } } - private static final class EntryIterator extends TMIterator> { - public EntryIterator(TransactionMap transactionMap, K from, K to) { - super(transactionMap, from, to, false); + // This iterator should include all entries applicable for unique index validation, + // committed and otherwise, only excluding keys removed by the current transaction + // or by some other already committed (but not closed yet) transactions + private static final class ValidationIterator extends UncommittedIterator { + ValidationIterator(TransactionMap transactionMap, K from, K to) { + super(transactionMap, from, to, transactionMap.createSnapshot(), false, false); } @Override - @SuppressWarnings("unchecked") - protected Map.Entry registerCurrent(K key, VersionedValue data) { - return new AbstractMap.SimpleImmutableEntry<>(key, (V) data.getCurrentValue()); + boolean shouldIgnoreRemoval(VersionedValue data) { + assert data.getCurrentValue() == null; + long id = data.getOperationId(); + if (id != 0) { + int tx = TransactionStore.getTransactionId(id); + return transactionId != tx && !committingTransactions.get(tx); + } + return false; } } - private abstract static class TMIterator implements Iterator { - private final int transactionId; - private final BitSet committingTransactions; - private final Cursor cursor; - private final boolean includeAllUncommitted; - private X current; + /** + * The iterator for read committed isolation level. Can also be used on + * higher levels when the transaction doesn't have own changes. + * + * @param + * the type of keys + * @param + * the type of elements + */ + private static final class CommittedIterator extends TMIterator { + CommittedIterator(TransactionMap transactionMap, K from, K to, boolean reverse, boolean forEntries) { + super(transactionMap, from, to, transactionMap.getSnapshot(), reverse, forEntries); + } - TMIterator(TransactionMap transactionMap, K from, K to, boolean includeAllUncommitted) { - Transaction transaction = transactionMap.getTransaction(); - this.transactionId = transaction.transactionId; - TransactionStore store = transaction.store; - MVMap map = transactionMap.map; - // The purpose of the following loop is to get a coherent picture - // of a state of two independent volatile / atomic variables, - // which they had at some recent moment in time. - // In order to get such a "snapshot", we wait for a moment of silence, - // when neither of the variables concurrently changes it's value. - BitSet committingTransactions; - RootReference mapRootReference; - do { - committingTransactions = store.committingTransactions.get(); - mapRootReference = map.flushAndGetRoot(); - } while (committingTransactions != store.committingTransactions.get()); - // Now we have a snapshot, where mapRootReference points to state of the map - // and committingTransactions mask tells us which of seemingly uncommitted changes - // should be considered as committed. - // Subsequent map traversal uses this snapshot info only. - this.cursor = new Cursor<>(mapRootReference.root, from, to); - this.committingTransactions = committingTransactions; + @Override + public X fetchNext() { + while (cursor.hasNext()) { + K key = cursor.next(); + VersionedValue data = cursor.getValue(); + // If value doesn't exist or it was deleted by a committed transaction, + // or if value is a committed one, just return it. + if (data != null) { + long id = data.getOperationId(); + if (id != 0) { + int tx = TransactionStore.getTransactionId(id); + if (tx != transactionId && !committingTransactions.get(tx)) { + // current value comes from another uncommitted transaction + // take committed value instead + Object committedValue = data.getCommittedValue(); + if (committedValue == null) { + continue; + } + return toElement(key, committedValue); + } + } + Object currentValue = data.getCurrentValue(); + if (currentValue != null) { + return toElement(key, currentValue); + } + } + } + return null; + } + } + + /** + * The iterator for repeatable read and serializable isolation levels. + * + * @param + * the type of keys + * @param + * the type of elements + */ + private static final class RepeatableIterator extends TMIterator { + private final DataType keyType; - this.includeAllUncommitted = includeAllUncommitted; - fetchNext(); + private K snapshotKey; + + private Object snapshotValue; + + private final Cursor> uncommittedCursor; + + private K uncommittedKey; + + private V uncommittedValue; + + RepeatableIterator(TransactionMap transactionMap, K from, K to, boolean reverse, boolean forEntries) { + super(transactionMap, from, to, transactionMap.getSnapshot(), reverse, forEntries); + keyType = transactionMap.map.getKeyType(); + Snapshot> snapshot = transactionMap.getStatementSnapshot(); + uncommittedCursor = transactionMap.map.cursor(snapshot.root, from, to, reverse); } - protected abstract X registerCurrent(K key, VersionedValue data); + @Override + public X fetchNext() { + X next = null; + do { + if (snapshotKey == null) { + fetchSnapshot(); + } + if (uncommittedKey == null) { + fetchUncommitted(); + } + if (snapshotKey == null && uncommittedKey == null) { + break; + } + int cmp = snapshotKey == null ? 1 : + uncommittedKey == null ? -1 : + keyType.compare(snapshotKey, uncommittedKey); + if (cmp < 0) { + next = toElement(snapshotKey, snapshotValue); + snapshotKey = null; + break; + } + if (uncommittedValue != null) { + // This entry was added / updated by this transaction, use the new value + next = toElement(uncommittedKey, uncommittedValue); + } + if (cmp == 0) { // This entry was updated / deleted + snapshotKey = null; + } + uncommittedKey = null; + } while (next == null); + return next; + } - private void fetchNext() { + private void fetchSnapshot() { while (cursor.hasNext()) { K key = cursor.next(); - VersionedValue data = cursor.getValue(); - if (!includeAllUncommitted) { - // If value doesn't exist or it was deleted by a committed transaction, - // or if value is a committed one, just return it. - if (data != null) { - long id = data.getOperationId(); - if (id != 0) { - int tx = TransactionStore.getTransactionId(id); - if (tx != transactionId && !committingTransactions.get(tx)) { - // current value comes from another uncommitted transaction - // take committed value instead - Object committedValue = data.getCommittedValue(); - data = committedValue == null ? null - : VersionedValueCommitted.getInstance(committedValue); - } + VersionedValue data = cursor.getValue(); + // If value doesn't exist or it was deleted by a committed transaction, + // or if value is a committed one, just return it. + if (data != null) { + Object value = data.getCommittedValue(); + long id = data.getOperationId(); + if (id != 0) { + int tx = TransactionStore.getTransactionId(id); + if (tx == transactionId || committingTransactions.get(tx)) { + // value comes from this transaction or another committed transaction + // take current value instead instead of committed one + value = data.getCurrentValue(); } } + if (value != null) { + snapshotKey = key; + snapshotValue = value; + return; + } } - if (data != null && (data.getCurrentValue() != null || - includeAllUncommitted && transactionId != - TransactionStore.getTransactionId(data.getOperationId()))) { - current = registerCurrent(key, data); - return; + } + } + + private void fetchUncommitted() { + while (uncommittedCursor.hasNext()) { + K key = uncommittedCursor.next(); + VersionedValue data = uncommittedCursor.getValue(); + if (data != null) { + long id = data.getOperationId(); + if (id != 0L && transactionId == TransactionStore.getTransactionId(id)) { + uncommittedKey = key; + uncommittedValue = data.getCurrentValue(); + return; + } } } - current = null; + } + } + + public abstract static class TMIterator implements Iterator { + final int transactionId; + + final BitSet committingTransactions; + + protected final Cursor> cursor; + + private final boolean forEntries; + + X current; + + TMIterator(TransactionMap transactionMap, K from, K to, Snapshot> snapshot, + boolean reverse, boolean forEntries) { + Transaction transaction = transactionMap.getTransaction(); + this.transactionId = transaction.transactionId; + this.forEntries = forEntries; + this.cursor = transactionMap.map.cursor(snapshot.root, from, to, reverse); + this.committingTransactions = snapshot.committingTransactions; } + @SuppressWarnings("unchecked") + final X toElement(K key, Object value) { + return (X) (forEntries ? new AbstractMap.SimpleImmutableEntry<>(key, value) : key); + } + + /** + * Fetches a next entry. + * + * This method cannot be used together with {@link #hasNext()} and + * {@link #next()}. + * + * @return the next entry or {@code null} + */ + public abstract X fetchNext(); + @Override public final boolean hasNext() { - return current != null; + return current != null || (current = fetchNext()) != null; } @Override public final X next() { - if(current == null) { - throw new NoSuchElementException(); - } X result = current; - fetchNext(); + if (result == null) { + if ((result = fetchNext()) == null) { + throw new NoSuchElementException(); + } + } else { + current = null; + } return result; } - @Override - public final void remove() { - throw DataUtils.newUnsupportedOperationException( - "Removal is not supported"); - } } } diff --git a/h2/src/main/org/h2/mvstore/tx/TransactionStore.java b/h2/src/main/org/h2/mvstore/tx/TransactionStore.java index 01675f23ac..bd4d43cdd9 100644 --- a/h2/src/main/org/h2/mvstore/tx/TransactionStore.java +++ b/h2/src/main/org/h2/mvstore/tx/TransactionStore.java @@ -1,24 +1,31 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.tx; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.BitSet; import java.util.Iterator; import java.util.List; +import java.util.Map; +import java.util.Objects; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReferenceArray; +import org.h2.engine.IsolationLevel; import org.h2.mvstore.Cursor; import org.h2.mvstore.DataUtils; import org.h2.mvstore.MVMap; import org.h2.mvstore.MVStore; -import org.h2.mvstore.WriteBuffer; +import org.h2.mvstore.RootReference; +import org.h2.mvstore.rtree.MVRTreeMap; +import org.h2.mvstore.rtree.SpatialDataType; import org.h2.mvstore.type.DataType; +import org.h2.mvstore.type.LongDataType; +import org.h2.mvstore.type.MetaType; import org.h2.mvstore.type.ObjectDataType; +import org.h2.mvstore.type.StringDataType; import org.h2.util.StringUtils; import org.h2.value.VersionedValue; @@ -35,7 +42,7 @@ public class TransactionStore { /** * Default blocked transaction timeout */ - private final int timeoutMillis; + final int timeoutMillis; /** * The persisted map of prepared transactions. @@ -43,6 +50,8 @@ public class TransactionStore { */ private final MVMap preparedTransactions; + private final MVMap> typeRegistry; + /** * Undo logs. *

          @@ -54,10 +63,10 @@ public class TransactionStore { * Key: opId, value: [ mapId, key, oldValue ]. */ @SuppressWarnings("unchecked") - final MVMap undoLogs[] = new MVMap[MAX_OPEN_TRANSACTIONS]; - private final MVMap.Builder undoLogBuilder; + final MVMap>[] undoLogs = new MVMap[MAX_OPEN_TRANSACTIONS]; + private final MVMap.Builder> undoLogBuilder; - private final MVMap.Builder mapBuilder; + private final DataType dataType; /** * This BitSet is used as vacancy indicator for transaction slots in transactions[]. @@ -91,8 +100,16 @@ public class TransactionStore { private final AtomicReferenceArray transactions = new AtomicReferenceArray<>(MAX_OPEN_TRANSACTIONS + 1); - private static final String UNDO_LOG_NAME_PREFIX = "undoLog"; - private static final char UNDO_LOG_COMMITTED = '-'; // must come before open in lexicographical order + private static final String TYPE_REGISTRY_NAME = "_"; + + /** + * The prefix for undo log entries. + */ + public static final String UNDO_LOG_NAME_PREFIX = "undoLog"; + + // must come before open in lexicographical order + private static final char UNDO_LOG_COMMITTED = '-'; + private static final char UNDO_LOG_OPEN = '.'; /** @@ -101,22 +118,16 @@ public class TransactionStore { // TODO: introduce constructor parameter instead of a static field, driven by URL parameter private static final int MAX_OPEN_TRANSACTIONS = 65535; - /** * Generate a string used to name undo log map for a specific transaction. - * This name will contain transaction id and reflect the fact - * whether transaction is logically committed or not. - * This information might be used by recovery procedure after unclean shutdown - * (termination before transaction is fully committed). + * This name will contain transaction id. * - * @param committed true if transaction is logically committed, false otherwise * @param transactionId of the corresponding transaction * @return undo log name */ - public static String getUndoLogName(boolean committed, int transactionId) { - return UNDO_LOG_NAME_PREFIX + - (committed ? UNDO_LOG_COMMITTED : UNDO_LOG_OPEN) + - (transactionId > 0 ? String.valueOf(transactionId) : ""); + private static String getUndoLogName(int transactionId) { + return transactionId > 0 ? UNDO_LOG_NAME_PREFIX + UNDO_LOG_OPEN + transactionId + : UNDO_LOG_NAME_PREFIX + UNDO_LOG_OPEN; } /** @@ -125,47 +136,71 @@ public static String getUndoLogName(boolean committed, int transactionId) { * @param store the store */ public TransactionStore(MVStore store) { - this(store, new ObjectDataType(), 0); + this(store, new ObjectDataType()); + } + + public TransactionStore(MVStore store, DataType dataType) { + this(store, new MetaType<>(null, store.backgroundExceptionHandler), dataType, 0); } /** * Create a new transaction store. - * * @param store the store - * @param dataType the data type for map keys and values + * @param metaDataType the data type for type registry map values + * @param dataType default data type for map keys and values * @param timeoutMillis lock acquisition timeout in milliseconds, 0 means no wait */ - public TransactionStore(MVStore store, DataType dataType, int timeoutMillis) { + public TransactionStore(MVStore store, MetaType metaDataType, DataType dataType, int timeoutMillis) { this.store = store; + this.dataType = dataType; this.timeoutMillis = timeoutMillis; - preparedTransactions = store.openMap("openTransactions", - new MVMap.Builder()); - DataType oldValueType = new VersionedValueType(dataType); - ArrayType undoLogValueType = new ArrayType(new DataType[]{ - new ObjectDataType(), dataType, oldValueType - }); - undoLogBuilder = new MVMap.Builder() + this.typeRegistry = openTypeRegistry(store, metaDataType); + this.preparedTransactions = store.openMap("openTransactions", new MVMap.Builder<>()); + this.undoLogBuilder = createUndoLogBuilder(); + } + + @SuppressWarnings({"unchecked","rawtypes"}) + MVMap.Builder> createUndoLogBuilder() { + return new MVMap.Builder>() .singleWriter() - .valueType(undoLogValueType); - DataType vt = new VersionedValueType(dataType); - mapBuilder = new MVMap.Builder() - .keyType(dataType).valueType(vt); + .keyType(LongDataType.INSTANCE) + .valueType(new Record.Type(this)); + } + + private static MVMap> openTypeRegistry(MVStore store, MetaType metaDataType) { + MVMap.Builder> typeRegistryBuilder = + new MVMap.Builder>() + .keyType(StringDataType.INSTANCE) + .valueType(metaDataType); + return store.openMap(TYPE_REGISTRY_NAME, typeRegistryBuilder); + } + + /** + * Initialize the store without any RollbackListener. + * @see #init(RollbackListener) + */ + public void init() { + init(ROLLBACK_LISTENER_NONE); } /** * Initialize the store. This is needed before a transaction can be opened. * If the transaction store is corrupt, this method can throw an exception, * in which case the store can only be used for reading. + * + * @param listener to notify about transaction rollback */ - public void init() { + public void init(RollbackListener listener) { if (!init) { for (String mapName : store.getMapNames()) { if (mapName.startsWith(UNDO_LOG_NAME_PREFIX)) { // Unexpectedly short name may be encountered upon upgrade from older version // where undo log was persisted as a single map, remove it. if (mapName.length() > UNDO_LOG_NAME_PREFIX.length()) { + // make a decision about tx status based on a log name + // to handle upgrade from a previous versions boolean committed = mapName.charAt(UNDO_LOG_NAME_PREFIX.length()) == UNDO_LOG_COMMITTED; - if (store.hasData(mapName) || committed) { + if (store.hasData(mapName)) { int transactionId = StringUtils.parseUInt31(mapName, UNDO_LOG_NAME_PREFIX.length() + 1, mapName.length()); VersionedBitSet openTxBitSet = openTransactions.get(); @@ -180,17 +215,27 @@ public void init() { status = (Integer) data[0]; name = (String) data[1]; } + MVMap> undoLog = store.openMap(mapName, undoLogBuilder); + undoLogs[transactionId] = undoLog; + Long lastUndoKey = undoLog.lastKey(); + assert lastUndoKey != null; + assert getTransactionId(lastUndoKey) == transactionId; + long logId = getLogId(lastUndoKey) + 1; + if (committed) { + // give it a proper name and used marker record instead + store.renameMap(undoLog, getUndoLogName(transactionId)); + markUndoLogAsCommitted(transactionId); + } else { + committed = logId > LOG_ID_MASK; + } if (committed) { status = Transaction.STATUS_COMMITTED; + lastUndoKey = undoLog.lowerKey(lastUndoKey); + assert lastUndoKey == null || getTransactionId(lastUndoKey) == transactionId; + logId = lastUndoKey == null ? 0 : getLogId(lastUndoKey) + 1; } - MVMap undoLog = store.openMap(mapName, undoLogBuilder); - undoLogs[transactionId] = undoLog; - Long lastUndoKey = undoLog.lastKey(); - assert committed || lastUndoKey != null; - assert committed || getTransactionId(lastUndoKey) == transactionId; - long logId = lastUndoKey == null ? 0 : getLogId(lastUndoKey) + 1; registerTransaction(transactionId, status, name, logId, timeoutMillis, 0, - ROLLBACK_LISTENER_NONE); + IsolationLevel.READ_COMMITTED, listener); continue; } } @@ -205,6 +250,10 @@ public void init() { } } + private void markUndoLogAsCommitted(int transactionId) { + addUndoLogRecord(transactionId, LOG_ID_MASK, Record.COMMIT_MARKER); + } + /** * Commit all transactions that are in the committed state, and * rollback all open transactions. @@ -221,6 +270,10 @@ public void endLeftoverTransactions() { } } + int getMaxTransactionId() { + return maxTransactionId; + } + /** * Set the maximum transaction id, after which ids are re-used. If the old * transaction is still in use when re-using an old id, the new transaction @@ -318,7 +371,7 @@ public synchronized void close() { * @return the transaction */ public Transaction begin() { - return begin(ROLLBACK_LISTENER_NONE, timeoutMillis, 0); + return begin(ROLLBACK_LISTENER_NONE, timeoutMillis, 0, IsolationLevel.READ_COMMITTED); } /** @@ -326,20 +379,19 @@ public Transaction begin() { * @param listener to be notified in case of a rollback * @param timeoutMillis to wait for a blocking transaction * @param ownerId of the owner (Session?) to be reported by getBlockerId + * @param isolationLevel of new transaction * @return the transaction */ - public Transaction begin(RollbackListener listener, int timeoutMillis, int ownerId) { - - if(timeoutMillis <= 0) { - timeoutMillis = this.timeoutMillis; - } + public Transaction begin(RollbackListener listener, int timeoutMillis, int ownerId, + IsolationLevel isolationLevel) { Transaction transaction = registerTransaction(0, Transaction.STATUS_OPEN, null, 0, - timeoutMillis, ownerId, listener); + timeoutMillis, ownerId, isolationLevel, listener); return transaction; } private Transaction registerTransaction(int txId, int status, String name, long logId, - int timeoutMillis, int ownerId, RollbackListener listener) { + int timeoutMillis, int ownerId, + IsolationLevel isolationLevel, RollbackListener listener) { int transactionId; long sequenceNo; boolean success; @@ -352,7 +404,7 @@ private Transaction registerTransaction(int txId, int status, String name, long assert !original.get(transactionId); } if (transactionId > maxTransactionId) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_TOO_MANY_OPEN_TRANSACTIONS, "There are {0} open transactions", transactionId - 1); @@ -365,14 +417,14 @@ private Transaction registerTransaction(int txId, int status, String name, long } while(!success); Transaction transaction = new Transaction(this, transactionId, sequenceNo, status, name, logId, - timeoutMillis, ownerId, listener); + timeoutMillis, ownerId, isolationLevel, listener); assert transactions.get(transactionId) == null; transactions.set(transactionId, transaction); if (undoLogs[transactionId] == null) { - String undoName = getUndoLogName(status == Transaction.STATUS_COMMITTED, transactionId); - MVMap undoLog = store.openMap(undoName, undoLogBuilder); + String undoName = getUndoLogName(transactionId); + MVMap> undoLog = store.openMap(undoName, undoLogBuilder); undoLogs[transactionId] = undoLog; } return transaction; @@ -397,20 +449,20 @@ void storeTransaction(Transaction t) { * * @param transactionId id of the transaction * @param logId sequential number of the log record within transaction - * @param undoLogRecord Object[mapId, key, previousValue] - * @return undo key + * @param record Record(mapId, key, previousValue) to add + * @return key for the added record */ - long addUndoLogRecord(int transactionId, long logId, Object[] undoLogRecord) { - MVMap undoLog = undoLogs[transactionId]; + long addUndoLogRecord(int transactionId, long logId, Record record) { + MVMap> undoLog = undoLogs[transactionId]; long undoKey = getOperationId(transactionId, logId); if (logId == 0 && !undoLog.isEmpty()) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_TOO_MANY_OPEN_TRANSACTIONS, "An old transaction with the same id " + "is still open: {0}", transactionId); } - undoLog.append(undoKey, undoLogRecord); + undoLog.append(undoKey, record); return undoKey; } @@ -425,12 +477,10 @@ void removeUndoLogRecord(int transactionId) { /** * Remove the given map. * - * @param the key type - * @param the value type * @param map the map */ - void removeMap(TransactionMap map) { - store.removeMap(map.map, false); + void removeMap(TransactionMap map) { + store.removeMap(map.map); } /** @@ -442,39 +492,44 @@ void removeMap(TransactionMap map) { void commit(Transaction t, boolean recovery) { if (!store.isClosed()) { int transactionId = t.transactionId; + // First, mark log as "committed". + // It does not change the way this transaction is treated by others, + // but preserves fact of commit in case of abrupt termination. + MVMap> undoLog = undoLogs[transactionId]; + Cursor> cursor; + if(recovery) { + removeUndoLogRecord(transactionId); + cursor = undoLog.cursor(null); + } else { + cursor = undoLog.cursor(null); + markUndoLogAsCommitted(transactionId); + } + // this is an atomic action that causes all changes // made by this transaction, to be considered as "committed" flipCommittingTransactionsBit(transactionId, true); - CommitDecisionMaker commitDecisionMaker = new CommitDecisionMaker(); + CommitDecisionMaker commitDecisionMaker = new CommitDecisionMaker<>(); try { - MVMap undoLog = undoLogs[transactionId]; - if(!recovery) { - store.renameMap(undoLog, getUndoLogName(true, transactionId)); + while (cursor.hasNext()) { + Long undoKey = cursor.next(); + Record op = cursor.getValue(); + int mapId = op.mapId; + MVMap> map = openMap(mapId); + if (map != null && !map.isClosed()) { // might be null if map was removed later + Object key = op.key; + commitDecisionMaker.setUndoKey(undoKey); + // second parameter (value) is not really + // used by CommitDecisionMaker + map.operate(key, null, commitDecisionMaker); + } } + } finally { try { - Cursor cursor = undoLog.cursor(null); - while (cursor.hasNext()) { - Long undoKey = cursor.next(); - Object[] op = cursor.getValue(); - int mapId = (Integer) op[0]; - MVMap map = openMap(mapId); - if (map != null) { // might be null if map was removed later - Object key = op[1]; - commitDecisionMaker.setUndoKey(undoKey); - // although second parameter (value) is not really - // used by CommitDecisionMaker, MVRTreeMap has weird - // traversal logic based on it, and any non-null - // value will do, to signify update, not removal - map.operate(key, VersionedValue.DUMMY, commitDecisionMaker); - } - } undoLog.clear(); } finally { - store.renameMap(undoLog, getUndoLogName(false, transactionId)); + flipCommittingTransactionsBit(transactionId, false); } - } finally { - flipCommittingTransactionsBit(transactionId, false); } } } @@ -490,48 +545,56 @@ private void flipCommittingTransactionsBit(int transactionId, boolean flag) { } while(!success); } + MVMap> openVersionedMap(String name, DataType keyType, DataType valueType) { + VersionedValueType vt = valueType == null ? null : new VersionedValueType<>(valueType); + return openMap(name, keyType, vt); + } + /** * Open the map with the given name. * * @param the key type + * @param the value type * @param name the map name * @param keyType the key type * @param valueType the value type * @return the map */ - MVMap openMap(String name, - DataType keyType, DataType valueType) { - if (keyType == null) { - keyType = new ObjectDataType(); - } - if (valueType == null) { - valueType = new ObjectDataType(); - } - VersionedValueType vt = new VersionedValueType(valueType); - MVMap map; - MVMap.Builder builder = - new MVMap.Builder(). - keyType(keyType).valueType(vt); - map = store.openMap(name, builder); - return map; + public MVMap openMap(String name, DataType keyType, DataType valueType) { + return store.openMap(name, new TxMapBuilder(typeRegistry, dataType) + .keyType(keyType).valueType(valueType)); } /** * Open the map with the given id. * + * @param key type + * @param value type + * * @param mapId the id * @return the map */ - MVMap openMap(int mapId) { - MVMap map = store.getMap(mapId); + MVMap> openMap(int mapId) { + MVMap> map = store.getMap(mapId); if (map == null) { String mapName = store.getMapName(mapId); if (mapName == null) { // the map was removed later on return null; } - map = store.openMap(mapName, mapBuilder); + MVMap.Builder> txMapBuilder = new TxMapBuilder<>(typeRegistry, dataType); + map = store.openMap(mapId, txMapBuilder); + } + return map; + } + + MVMap> getMap(int mapId) { + MVMap> map = store.getMap(mapId); + if (map == null && !init) { + map = openMap(mapId); } + assert map != null : "map with id " + mapId + " is missing" + + (init ? "" : " during initialization"); return map; } @@ -565,28 +628,71 @@ void endTransaction(Transaction t, boolean hasChanges) { preparedTransactions.remove(txId); } - if (wasStored || store.getAutoCommitDelay() == 0) { - store.tryCommit(); - } else { - if (isUndoEmpty()) { - // to avoid having to store the transaction log, - // if there is no open transaction, - // and if there have been many changes, store them now - int unsaved = store.getUnsavedMemory(); - int max = store.getAutoCommitMemory(); - // save at 3/4 capacity - if (unsaved * 4 > max * 3) { - store.tryCommit(); + if (store.getFileStore() != null) { + if (wasStored || store.getAutoCommitDelay() == 0) { + store.commit(); + } else { + if (isUndoEmpty()) { + // to avoid having to store the transaction log, + // if there is no open transaction, + // and if there have been many changes, store them now + int unsaved = store.getUnsavedMemory(); + int max = store.getAutoCommitMemory(); + // save at 3/4 capacity + if (unsaved * 4 > max * 3) { + store.tryCommit(); + } } } } } } + /** + * Get the root references (snapshots) for undo-log maps. + * Those snapshots can potentially be used to optimize TransactionMap.size(). + * + * @return the array of root references or null if snapshotting is not possible + */ + RootReference>[] collectUndoLogRootReferences() { + BitSet opentransactions = openTransactions.get(); + @SuppressWarnings("unchecked") + RootReference>[] undoLogRootReferences = new RootReference[opentransactions.length()]; + for (int i = opentransactions.nextSetBit(0); i >= 0; i = opentransactions.nextSetBit(i+1)) { + MVMap> undoLog = undoLogs[i]; + if (undoLog != null) { + RootReference> rootReference = undoLog.getRoot(); + if (rootReference.needFlush()) { + // abort attempt to collect snapshots for all undo logs + // because map's append buffer can't be flushed from a non-owning thread + return null; + } + undoLogRootReferences[i] = rootReference; + } + } + return undoLogRootReferences; + } + + /** + * Calculate the size for undo log entries. + * + * @param undoLogRootReferences the root references + * @return the number of key-value pairs + */ + static long calculateUndoLogsTotalSize(RootReference>[] undoLogRootReferences) { + long undoLogsTotalSize = 0; + for (RootReference> rootReference : undoLogRootReferences) { + if (rootReference != null) { + undoLogsTotalSize += rootReference.getTotalCount(); + } + } + return undoLogsTotalSize; + } + private boolean isUndoEmpty() { BitSet openTrans = openTransactions.get(); for (int i = openTrans.nextSetBit(0); i >= 0; i = openTrans.nextSetBit(i + 1)) { - MVMap undoLog = undoLogs[i]; + MVMap> undoLog = undoLogs[i]; if (undoLog != null && !undoLog.isEmpty()) { return false; } @@ -613,7 +719,7 @@ Transaction getTransaction(int transactionId) { */ void rollbackTo(Transaction t, long maxLogId, long toLogId) { int transactionId = t.getId(); - MVMap undoLog = undoLogs[transactionId]; + MVMap> undoLog = undoLogs[transactionId]; RollbackDecisionMaker decisionMaker = new RollbackDecisionMaker(this, transactionId, toLogId, t.listener); for (long logId = maxLogId - 1; logId >= toLogId; logId--) { Long undoKey = getOperationId(transactionId, logId); @@ -634,7 +740,7 @@ void rollbackTo(Transaction t, long maxLogId, long toLogId) { Iterator getChanges(final Transaction t, final long maxLogId, final long toLogId) { - final MVMap undoLog = undoLogs[t.getId()]; + final MVMap> undoLog = undoLogs[t.getId()]; return new Iterator() { private long logId = maxLogId - 1; @@ -644,7 +750,7 @@ private void fetchNext() { int transactionId = t.getId(); while (logId >= toLogId) { Long undoKey = getOperationId(transactionId, logId); - Object[] op = undoLog.get(undoKey); + Record op = undoLog.get(undoKey); logId--; if (op == null) { // partially rolled back: load previous @@ -655,11 +761,12 @@ private void fetchNext() { logId = getLogId(undoKey); continue; } - int mapId = (int)op[0]; - MVMap m = openMap(mapId); + int mapId = op.mapId; + MVMap> m = openMap(mapId); if (m != null) { // could be null if map was removed later on - VersionedValue oldValue = (VersionedValue) op[2]; - current = new Change(m.getName(), op[1], oldValue == null ? null : oldValue.getCurrentValue()); + VersionedValue oldValue = op.oldValue; + current = new Change(m.getName(), op.key, + oldValue == null ? null : oldValue.getCurrentValue()); return; } } @@ -684,11 +791,6 @@ public Change next() { return result; } - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException("remove"); - } - }; } @@ -736,105 +838,124 @@ public interface RollbackListener { * @param existingValue value in the map (null if delete is rolled back) * @param restoredValue value to be restored (null if add is rolled back) */ - void onRollback(MVMap map, Object key, - VersionedValue existingValue, VersionedValue restoredValue); + void onRollback(MVMap> map, Object key, + VersionedValue existingValue, VersionedValue restoredValue); } - private static final RollbackListener ROLLBACK_LISTENER_NONE = new RollbackListener() { - @Override - public void onRollback(MVMap map, Object key, - VersionedValue existingValue, VersionedValue restoredValue) { - // do nothing - } - }; + private static final RollbackListener ROLLBACK_LISTENER_NONE = (map, key, existingValue, restoredValue) -> {}; - /** - * A data type that contains an array of objects with the specified data - * types. - */ - public static class ArrayType implements DataType { + private static final class TxMapBuilder extends MVMap.Builder { - private final int arrayLength; - private final DataType[] elementTypes; + private final MVMap> typeRegistry; + private final DataType defaultDataType; - ArrayType(DataType[] elementTypes) { - this.arrayLength = elementTypes.length; - this.elementTypes = elementTypes; + TxMapBuilder(MVMap> typeRegistry, DataType defaultDataType) { + this.typeRegistry = typeRegistry; + this.defaultDataType = defaultDataType; } - @Override - public int getMemory(Object obj) { - Object[] array = (Object[]) obj; - int size = 0; - for (int i = 0; i < arrayLength; i++) { - DataType t = elementTypes[i]; - Object o = array[i]; - if (o != null) { - size += t.getMemory(o); - } + private void registerDataType(DataType dataType) { + String key = getDataTypeRegistrationKey(dataType); + DataType registeredDataType = typeRegistry.putIfAbsent(key, dataType); + if(registeredDataType != null) { + // TODO: ensure type consistency } - return size; } + static String getDataTypeRegistrationKey(DataType dataType) { + return Integer.toHexString(Objects.hashCode(dataType)); + } + + @SuppressWarnings("unchecked") @Override - public int compare(Object aObj, Object bObj) { - if (aObj == bObj) { - return 0; + public MVMap create(MVStore store, Map config) { + DataType keyType = getKeyType(); + if (keyType == null) { + String keyTypeKey = (String) config.remove("key"); + if (keyTypeKey != null) { + keyType = (DataType)typeRegistry.get(keyTypeKey); + if (keyType == null) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_UNKNOWN_DATA_TYPE, + "Data type with hash {0} can not be found", keyTypeKey); + } + setKeyType(keyType); + } + } else { + registerDataType(keyType); } - Object[] a = (Object[]) aObj; - Object[] b = (Object[]) bObj; - for (int i = 0; i < arrayLength; i++) { - DataType t = elementTypes[i]; - int comp = t.compare(a[i], b[i]); - if (comp != 0) { - return comp; + + DataType valueType = getValueType(); + if (valueType == null) { + String valueTypeKey = (String) config.remove("val"); + if (valueTypeKey != null) { + valueType = (DataType)typeRegistry.get(valueTypeKey); + if (valueType == null) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_UNKNOWN_DATA_TYPE, + "Data type with hash {0} can not be found", valueTypeKey); + } + setValueType(valueType); } + } else { + registerDataType(valueType); } - return 0; - } - @Override - public void read(ByteBuffer buff, Object[] obj, - int len, boolean key) { - for (int i = 0; i < len; i++) { - obj[i] = read(buff); + if (getKeyType() == null) { + setKeyType(defaultDataType); + registerDataType(getKeyType()); + } + if (getValueType() == null) { + setValueType((DataType) new VersionedValueType(defaultDataType)); + registerDataType(getValueType()); } + + config.put("store", store); + config.put("key", getKeyType()); + config.put("val", getValueType()); + return create(config); } @Override - public void write(WriteBuffer buff, Object[] obj, - int len, boolean key) { - for (int i = 0; i < len; i++) { - write(buff, obj[i]); + @SuppressWarnings("unchecked") + protected MVMap create(Map config) { + if ("rtree".equals(config.get("type"))) { + MVMap map = (MVMap) new MVRTreeMap<>(config, (SpatialDataType) getKeyType(), + getValueType()); + return map; } + return new TMVMap<>(config, getKeyType(), getValueType()); } - @Override - public void write(WriteBuffer buff, Object obj) { - Object[] array = (Object[]) obj; - for (int i = 0; i < arrayLength; i++) { - DataType t = elementTypes[i]; - Object o = array[i]; - if (o == null) { - buff.put((byte) 0); - } else { - buff.put((byte) 1); - t.write(buff, o); - } + private static final class TMVMap extends MVMap { + private final String type; + + TMVMap(Map config, DataType keyType, DataType valueType) { + super(config, keyType, valueType); + type = (String)config.get("type"); } - } - @Override - public Object read(ByteBuffer buff) { - Object[] array = new Object[arrayLength]; - for (int i = 0; i < arrayLength; i++) { - DataType t = elementTypes[i]; - if (buff.get() == 1) { - array[i] = t.read(buff); - } + private TMVMap(MVMap source) { + super(source); + type = source.getType(); + } + + @Override + protected MVMap cloneIt() { + return new TMVMap<>(this); } - return array; - } + @Override + public String getType() { + return type; + } + + @Override + protected String asString(String name) { + StringBuilder buff = new StringBuilder(); + buff.append(super.asString(name)); + DataUtils.appendMap(buff, "key", getDataTypeRegistrationKey(getKeyType())); + DataUtils.appendMap(buff, "val", getDataTypeRegistrationKey(getValueType())); + return buff.toString(); + } + } } } diff --git a/h2/src/main/org/h2/mvstore/tx/TxDecisionMaker.java b/h2/src/main/org/h2/mvstore/tx/TxDecisionMaker.java index 8ecf862259..2ab6535b6d 100644 --- a/h2/src/main/org/h2/mvstore/tx/TxDecisionMaker.java +++ b/h2/src/main/org/h2/mvstore/tx/TxDecisionMaker.java @@ -1,11 +1,15 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.tx; +import java.util.function.Function; +import org.h2.mvstore.DataUtils; import org.h2.mvstore.MVMap; +import org.h2.mvstore.MVMap.Decision; +import org.h2.mvstore.type.DataType; import org.h2.value.VersionedValue; /** @@ -14,7 +18,7 @@ * * @author Andrei Tokar */ -abstract class TxDecisionMaker extends MVMap.DecisionMaker { +class TxDecisionMaker extends MVMap.DecisionMaker> { /** * Map to decide upon */ @@ -23,12 +27,12 @@ abstract class TxDecisionMaker extends MVMap.DecisionMaker { /** * Key for the map entry to decide upon */ - private final Object key; + protected K key; /** * Value for the map entry */ - final Object value; + private V value; /** * Transaction we are operating within @@ -38,25 +42,32 @@ abstract class TxDecisionMaker extends MVMap.DecisionMaker { /** * Id for the undo log entry created for this modification */ - long undoKey; + private long undoKey; /** - * Id of the last operation, we decided to {@link MVMap.Decision.REPEAT}. + * Id of the last operation, we decided to + * {@link org.h2.mvstore.MVMap.Decision#REPEAT}. */ private long lastOperationId; private Transaction blockingTransaction; private MVMap.Decision decision; + private V lastValue; - TxDecisionMaker(int mapId, Object key, Object value, Transaction transaction) { + TxDecisionMaker(int mapId, Transaction transaction) { this.mapId = mapId; + this.transaction = transaction; + } + + void initialize(K key, V value) { this.key = key; this.value = value; - this.transaction = transaction; + decision = null; + reset(); } @Override - public MVMap.Decision decide(VersionedValue existingValue, VersionedValue providedValue) { + public MVMap.Decision decide(VersionedValue existingValue, VersionedValue providedValue) { assert decision == null; long id; int blockingId; @@ -66,20 +77,20 @@ public MVMap.Decision decide(VersionedValue existingValue, VersionedValue provid (id = existingValue.getOperationId()) == 0 || // or it came from the same transaction isThisTransaction(blockingId = TransactionStore.getTransactionId(id))) { - logIt(existingValue); - decision = MVMap.Decision.PUT; + logAndDecideToPut(existingValue, existingValue == null ? null : existingValue.getCommittedValue()); } else if (isCommitted(blockingId)) { // Condition above means that entry belongs to a committing transaction. // We assume that we are looking at the final value for this transaction, // and if it's not the case, then it will fail later, // because a tree root has definitely been changed. - logIt(existingValue.getCurrentValue() == null ? null - : VersionedValueCommitted.getInstance(existingValue.getCurrentValue())); - decision = MVMap.Decision.PUT; + V currentValue = existingValue.getCurrentValue(); + logAndDecideToPut(currentValue == null ? null : VersionedValueCommitted.getInstance(currentValue), + currentValue); } else if (getBlockingTransaction() != null) { // this entry comes from a different transaction, and this // transaction is not committed yet // should wait on blockingTransaction that was determined earlier + lastValue = existingValue.getCurrentValue(); decision = MVMap.Decision.ABORT; } else if (isRepeatedOperation(id)) { // There is no transaction with that id, and we've tried it just @@ -89,9 +100,9 @@ public MVMap.Decision decide(VersionedValue existingValue, VersionedValue provid // Now we assume it's a leftover after unclean shutdown (map update // was written but not undo log), and will effectively roll it back // (just assume committed value and overwrite). - Object committedValue = existingValue.getCommittedValue(); - logIt(committedValue == null ? null : VersionedValueCommitted.getInstance(committedValue)); - decision = MVMap.Decision.PUT; + V committedValue = existingValue.getCommittedValue(); + logAndDecideToPut(committedValue == null ? null : VersionedValueCommitted.getInstance(committedValue), + committedValue); } else { // transaction has been committed/rolled back and is closed by now, so // we can retry immediately and either that entry become committed @@ -113,9 +124,52 @@ public final void reset() { } blockingTransaction = null; decision = null; + lastValue = null; + } + + @SuppressWarnings("unchecked") + @Override + // always return value (ignores existingValue) + public > T selectValue(T existingValue, T providedValue) { + return (T) VersionedValueUncommitted.getInstance(undoKey, getNewValue(existingValue), lastValue); } - public final MVMap.Decision getDecision() { + /** + * Get the new value. + * This implementation always return the current value (ignores the parameter). + * + * @param existingValue the parameter value + * @return the current value. + */ + V getNewValue(VersionedValue existingValue) { + return value; + } + + /** + * Create undo log entry and record for future references + * {@link org.h2.mvstore.MVMap.Decision#PUT} decision along with last known + * committed value + * + * @param valueToLog previous value to be logged + * @param lastValue last known committed value + * @return {@link org.h2.mvstore.MVMap.Decision#PUT} + */ + MVMap.Decision logAndDecideToPut(VersionedValue valueToLog, V lastValue) { + undoKey = transaction.log(new Record<>(mapId, key, valueToLog)); + this.lastValue = lastValue; + return setDecision(MVMap.Decision.PUT); + } + + final MVMap.Decision decideToAbort(V lastValue) { + this.lastValue = lastValue; + return setDecision(Decision.ABORT); + } + + final boolean allowNonRepeatableRead() { + return transaction.allowNonRepeatableRead(); + } + + final MVMap.Decision getDecision() { return decision; } @@ -123,12 +177,8 @@ final Transaction getBlockingTransaction() { return blockingTransaction; } - /** - * Create undo log entry - * @param value previous value to be logged - */ - final void logIt(VersionedValue value) { - undoKey = transaction.log(mapId, key, value); + final V getLastValue() { + return lastValue; } /** @@ -153,10 +203,11 @@ final boolean isThisTransaction(int transactionId) { final boolean isCommitted(int transactionId) { Transaction blockingTx; boolean result; + TransactionStore store = transaction.store; do { - blockingTx = transaction.store.getTransaction(transactionId); - result = transaction.store.committingTransactions.get().get(transactionId); - } while (blockingTx != transaction.store.getTransaction(transactionId)); + blockingTx = store.getTransaction(transactionId); + result = store.committingTransactions.get().get(transactionId); + } while (blockingTx != store.getTransaction(transactionId)); if (!result) { blockingTransaction = blockingTx; @@ -169,7 +220,9 @@ final boolean isCommitted(int transactionId) { * This is to prevent an infinite loop in case of uncommitted "leftover" entry * (one without a corresponding undo log entry, most likely as a result of unclean shutdown). * - * @param id for the operation we decided to {@link MVMap.Decision.REPEAT} + * @param id + * for the operation we decided to + * {@link org.h2.mvstore.MVMap.Decision#REPEAT} * @return true if the same as last operation id, false otherwise */ final boolean isRepeatedOperation(long id) { @@ -196,59 +249,63 @@ public final String toString() { } - public static class PutDecisionMaker extends TxDecisionMaker - { - PutDecisionMaker(int mapId, Object key, Object value, Transaction transaction) { - super(mapId, key, value, transaction); - } - - @SuppressWarnings("unchecked") - @Override - public final VersionedValue selectValue(VersionedValue existingValue, VersionedValue providedValue) { - return VersionedValueUncommitted.getInstance(undoKey, value, - existingValue == null ? null : existingValue.getCommittedValue()); - } - } + public static final class PutIfAbsentDecisionMaker extends TxDecisionMaker { + private final Function oldValueSupplier; - public static final class PutIfAbsentDecisionMaker extends PutDecisionMaker - { - PutIfAbsentDecisionMaker(int mapId, Object key, Object value, Transaction transaction) { - super(mapId, key, value, transaction); + PutIfAbsentDecisionMaker(int mapId, Transaction transaction, Function oldValueSupplier) { + super(mapId, transaction); + this.oldValueSupplier = oldValueSupplier; } @Override - public MVMap.Decision decide(VersionedValue existingValue, VersionedValue providedValue) { + public MVMap.Decision decide(VersionedValue existingValue, VersionedValue providedValue) { assert getDecision() == null; int blockingId; // if map does not have that entry yet if (existingValue == null) { - logIt(null); - return setDecision(MVMap.Decision.PUT); + V snapshotValue = getValueInSnapshot(); + if (snapshotValue != null) { + // value exists in a snapshot but not in current map, therefore + // it was removed and committed by another transaction + return decideToAbort(snapshotValue); + } + return logAndDecideToPut(null, null); } else { long id = existingValue.getOperationId(); if (id == 0 // entry is a committed one // or it came from the same transaction || isThisTransaction(blockingId = TransactionStore.getTransactionId(id))) { if(existingValue.getCurrentValue() != null) { - return setDecision(MVMap.Decision.ABORT); + return decideToAbort(existingValue.getCurrentValue()); + } + if (id == 0) { + V snapshotValue = getValueInSnapshot(); + if (snapshotValue != null) { + return decideToAbort(snapshotValue); + } } - logIt(existingValue); - return setDecision(MVMap.Decision.PUT); + return logAndDecideToPut(existingValue, existingValue.getCommittedValue()); } else if (isCommitted(blockingId)) { // entry belongs to a committing transaction // and therefore will be committed soon if(existingValue.getCurrentValue() != null) { - return setDecision(MVMap.Decision.ABORT); + return decideToAbort(existingValue.getCurrentValue()); + } + // even if that commit will result in entry removal + // current operation should fail within repeatable read transaction + // if initial snapshot carries some value + V snapshotValue = getValueInSnapshot(); + if (snapshotValue != null) { + return decideToAbort(snapshotValue); } - logIt(null); - return setDecision(MVMap.Decision.PUT); + return logAndDecideToPut(null, null); } else if (getBlockingTransaction() != null) { // this entry comes from a different transaction, and this // transaction is not committed yet // should wait on blockingTransaction that was determined // earlier and then try again - return setDecision(MVMap.Decision.ABORT); + return decideToAbort(existingValue.getCurrentValue()); } else if (isRepeatedOperation(id)) { // There is no transaction with that id, and we've tried it // just before, but map root has not changed (which must be @@ -258,12 +315,11 @@ public MVMap.Decision decide(VersionedValue existingValue, VersionedValue provid // update was written but not undo log), and will // effectively roll it back (just assume committed value and // overwrite). - Object committedValue = existingValue.getCommittedValue(); + V committedValue = existingValue.getCommittedValue(); if (committedValue != null) { - return setDecision(MVMap.Decision.ABORT); + return decideToAbort(committedValue); } - logIt(null); - return setDecision(MVMap.Decision.PUT); + return logAndDecideToPut(null, null); } else { // transaction has been committed/rolled back and is closed // by now, so we can retry immediately and either that entry @@ -272,17 +328,21 @@ public MVMap.Decision decide(VersionedValue existingValue, VersionedValue provid } } } + + private V getValueInSnapshot() { + return allowNonRepeatableRead() ? null : oldValueSupplier.apply(key); + } } - public static final class LockDecisionMaker extends TxDecisionMaker - { - LockDecisionMaker(int mapId, Object key, Transaction transaction) { - super(mapId, key, null, transaction); + public static class LockDecisionMaker extends TxDecisionMaker { + + LockDecisionMaker(int mapId, Transaction transaction) { + super(mapId, transaction); } @Override - public MVMap.Decision decide(VersionedValue existingValue, VersionedValue providedValue) { + public MVMap.Decision decide(VersionedValue existingValue, VersionedValue providedValue) { MVMap.Decision decision = super.decide(existingValue, providedValue); if (existingValue == null) { assert decision == MVMap.Decision.PUT; @@ -291,12 +351,33 @@ public MVMap.Decision decide(VersionedValue existingValue, VersionedValue provid return decision; } - @SuppressWarnings("unchecked") @Override - public VersionedValue selectValue(VersionedValue existingValue, VersionedValue providedValue) { - return VersionedValueUncommitted.getInstance(undoKey, - existingValue == null ? null : existingValue.getCurrentValue(), - existingValue == null ? null : existingValue.getCommittedValue()); + V getNewValue(VersionedValue existingValue) { + return existingValue == null ? null : existingValue.getCurrentValue(); + } + } + + public static final class RepeatableReadLockDecisionMaker extends LockDecisionMaker { + + private final DataType> valueType; + + private final Function snapshotValueSupplier; + + RepeatableReadLockDecisionMaker(int mapId, Transaction transaction, + DataType> valueType, Function snapshotValueSupplier) { + super(mapId, transaction); + this.valueType = valueType; + this.snapshotValueSupplier = snapshotValueSupplier; + } + + @Override + Decision logAndDecideToPut(VersionedValue valueToLog, V value) { + V snapshotValue = snapshotValueSupplier.apply(key); + if (snapshotValue != null && (valueToLog == null + || valueType.compare(VersionedValueCommitted.getInstance(snapshotValue), valueToLog) != 0)) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_TRANSACTIONS_DEADLOCK, ""); + } + return super.logAndDecideToPut(valueToLog, value); } } } diff --git a/h2/src/main/org/h2/mvstore/tx/VersionedBitSet.java b/h2/src/main/org/h2/mvstore/tx/VersionedBitSet.java index 55c0dc6a8b..e0d8351195 100644 --- a/h2/src/main/org/h2/mvstore/tx/VersionedBitSet.java +++ b/h2/src/main/org/h2/mvstore/tx/VersionedBitSet.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.tx; diff --git a/h2/src/main/org/h2/mvstore/tx/VersionedValueCommitted.java b/h2/src/main/org/h2/mvstore/tx/VersionedValueCommitted.java index 6cbd9ecd19..3d0df25758 100644 --- a/h2/src/main/org/h2/mvstore/tx/VersionedValueCommitted.java +++ b/h2/src/main/org/h2/mvstore/tx/VersionedValueCommitted.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.tx; @@ -12,33 +12,37 @@ * * @author Andrei Tokar */ -class VersionedValueCommitted extends VersionedValue { +class VersionedValueCommitted extends VersionedValue { /** * The current value. */ - public final Object value; + public final T value; - VersionedValueCommitted(Object value) { + VersionedValueCommitted(T value) { this.value = value; } /** * Either cast to VersionedValue, or wrap in VersionedValueCommitted + * + * @param type of the value to get the VersionedValue for + * * @param value the object to cast/wrap * @return VersionedValue instance */ - static VersionedValue getInstance(Object value) { + @SuppressWarnings("unchecked") + static VersionedValue getInstance(X value) { assert value != null; - return value instanceof VersionedValue ? (VersionedValue) value : new VersionedValueCommitted(value); + return value instanceof VersionedValue ? (VersionedValue)value : new VersionedValueCommitted<>(value); } @Override - public Object getCurrentValue() { + public T getCurrentValue() { return value; } @Override - public Object getCommittedValue() { + public T getCommittedValue() { return value; } diff --git a/h2/src/main/org/h2/mvstore/tx/VersionedValueType.java b/h2/src/main/org/h2/mvstore/tx/VersionedValueType.java index c55391cd10..a088b70c41 100644 --- a/h2/src/main/org/h2/mvstore/tx/VersionedValueType.java +++ b/h2/src/main/org/h2/mvstore/tx/VersionedValueType.java @@ -1,32 +1,42 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.tx; +import java.nio.ByteBuffer; import org.h2.engine.Constants; import org.h2.mvstore.DataUtils; import org.h2.mvstore.WriteBuffer; +import org.h2.mvstore.type.BasicDataType; import org.h2.mvstore.type.DataType; +import org.h2.mvstore.type.MetaType; +import org.h2.mvstore.type.StatefulDataType; import org.h2.value.VersionedValue; -import java.nio.ByteBuffer; /** * The value type for a versioned value. */ -public class VersionedValueType implements DataType { +public class VersionedValueType extends BasicDataType> implements StatefulDataType { - private final DataType valueType; + private final DataType valueType; + private final Factory factory = new Factory<>(); - public VersionedValueType(DataType valueType) { + + public VersionedValueType(DataType valueType) { this.valueType = valueType; } @Override - public int getMemory(Object obj) { - if(obj == null) return 0; - VersionedValue v = (VersionedValue) obj; + @SuppressWarnings("unchecked") + public VersionedValue[] createStorage(int size) { + return new VersionedValue[size]; + } + + @Override + public int getMemory(VersionedValue v) { + if(v == null) return 0; int res = Constants.MEMORY_OBJECT + 8 + 2 * Constants.MEMORY_POINTER + getValMemory(v.getCurrentValue()); if (v.getOperationId() != 0) { @@ -35,61 +45,43 @@ public int getMemory(Object obj) { return res; } - private int getValMemory(Object obj) { + private int getValMemory(T obj) { return obj == null ? 0 : valueType.getMemory(obj); } @Override - public int compare(Object aObj, Object bObj) { - if (aObj == bObj) { - return 0; - } else if (aObj == null) { - return -1; - } else if (bObj == null) { - return 1; - } - VersionedValue a = (VersionedValue) aObj; - VersionedValue b = (VersionedValue) bObj; - long comp = a.getOperationId() - b.getOperationId(); - if (comp == 0) { - return valueType.compare(a.getCurrentValue(), b.getCurrentValue()); - } - return Long.signum(comp); - } - - @Override - public void read(ByteBuffer buff, Object[] obj, int len, boolean key) { + public void read(ByteBuffer buff, Object storage, int len) { if (buff.get() == 0) { // fast path (no op ids or null entries) for (int i = 0; i < len; i++) { - obj[i] = VersionedValueCommitted.getInstance(valueType.read(buff)); + cast(storage)[i] = VersionedValueCommitted.getInstance(valueType.read(buff)); } } else { // slow path (some entries may be null) for (int i = 0; i < len; i++) { - obj[i] = read(buff); + cast(storage)[i] = read(buff); } } } @Override - public Object read(ByteBuffer buff) { + public VersionedValue read(ByteBuffer buff) { long operationId = DataUtils.readVarLong(buff); if (operationId == 0) { return VersionedValueCommitted.getInstance(valueType.read(buff)); } else { byte flags = buff.get(); - Object value = (flags & 1) != 0 ? valueType.read(buff) : null; - Object committedValue = (flags & 2) != 0 ? valueType.read(buff) : null; + T value = (flags & 1) != 0 ? valueType.read(buff) : null; + T committedValue = (flags & 2) != 0 ? valueType.read(buff) : null; return VersionedValueUncommitted.getInstance(operationId, value, committedValue); } } @Override - public void write(WriteBuffer buff, Object[] obj, int len, boolean key) { + public void write(WriteBuffer buff, Object storage, int len) { boolean fastPath = true; for (int i = 0; i < len; i++) { - VersionedValue v = (VersionedValue) obj[i]; + VersionedValue v = cast(storage)[i]; if (v.getOperationId() != 0 || v.getCurrentValue() == null) { fastPath = false; } @@ -97,7 +89,7 @@ public void write(WriteBuffer buff, Object[] obj, int len, boolean key) { if (fastPath) { buff.put((byte) 0); for (int i = 0; i < len; i++) { - VersionedValue v = (VersionedValue) obj[i]; + VersionedValue v = cast(storage)[i]; valueType.write(buff, v.getCurrentValue()); } } else { @@ -105,20 +97,19 @@ public void write(WriteBuffer buff, Object[] obj, int len, boolean key) { // store op ids, and some entries may be null buff.put((byte) 1); for (int i = 0; i < len; i++) { - write(buff, obj[i]); + write(buff, cast(storage)[i]); } } } @Override - public void write(WriteBuffer buff, Object obj) { - VersionedValue v = (VersionedValue) obj; + public void write(WriteBuffer buff, VersionedValue v) { long operationId = v.getOperationId(); buff.putVarLong(operationId); if (operationId == 0) { valueType.write(buff, v.getCurrentValue()); } else { - Object committedValue = v.getCommittedValue(); + T committedValue = v.getCommittedValue(); int flags = (v.getCurrentValue() == null ? 0 : 1) | (committedValue == null ? 0 : 2); buff.put((byte) flags); if (v.getCurrentValue() != null) { @@ -129,4 +120,45 @@ public void write(WriteBuffer buff, Object obj) { } } } + + @Override + @SuppressWarnings("unchecked") + public boolean equals(Object obj) { + if (obj == this) { + return true; + } else if (!(obj instanceof VersionedValueType)) { + return false; + } + VersionedValueType other = (VersionedValueType) obj; + return valueType.equals(other.valueType); + } + + @Override + public int hashCode() { + return super.hashCode() ^ valueType.hashCode(); + } + + @Override + public void save(WriteBuffer buff, MetaType metaType) { + metaType.write(buff, valueType); + } + + @Override + public int compare(VersionedValue a, VersionedValue b) { + return valueType.compare(a.getCurrentValue(), b.getCurrentValue()); + } + + @Override + public Factory getFactory() { + return factory; + } + + public static final class Factory implements StatefulDataType.Factory { + @SuppressWarnings("unchecked") + @Override + public DataType create(ByteBuffer buff, MetaType metaType, D database) { + DataType> valueType = (DataType>)metaType.read(buff); + return new VersionedValueType,D>(valueType); + } + } } diff --git a/h2/src/main/org/h2/mvstore/tx/VersionedValueUncommitted.java b/h2/src/main/org/h2/mvstore/tx/VersionedValueUncommitted.java index 340ea98140..dad0b330c3 100644 --- a/h2/src/main/org/h2/mvstore/tx/VersionedValueUncommitted.java +++ b/h2/src/main/org/h2/mvstore/tx/VersionedValueUncommitted.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.tx; @@ -12,11 +12,11 @@ * * @author Andrei Tokar */ -class VersionedValueUncommitted extends VersionedValueCommitted { +class VersionedValueUncommitted extends VersionedValueCommitted { private final long operationId; - private final Object committedValue; + private final T committedValue; - private VersionedValueUncommitted(long operationId, Object value, Object committedValue) { + private VersionedValueUncommitted(long operationId, T value, T committedValue) { super(value); assert operationId != 0; this.operationId = operationId; @@ -26,13 +26,15 @@ private VersionedValueUncommitted(long operationId, Object value, Object committ /** * Create new VersionedValueUncommitted. * + * @param type of the value to get the VersionedValue for + * * @param operationId combined log/transaction id * @param value value before commit * @param committedValue value after commit * @return VersionedValue instance */ - static VersionedValue getInstance(long operationId, Object value, Object committedValue) { - return new VersionedValueUncommitted(operationId, value, committedValue); + static VersionedValue getInstance(long operationId, X value, X committedValue) { + return new VersionedValueUncommitted<>(operationId, value, committedValue); } @Override @@ -46,7 +48,7 @@ public long getOperationId() { } @Override - public Object getCommittedValue() { + public T getCommittedValue() { return committedValue; } diff --git a/h2/src/main/org/h2/mvstore/tx/package.html b/h2/src/main/org/h2/mvstore/tx/package.html index 13e447c90a..08b0f02706 100644 --- a/h2/src/main/org/h2/mvstore/tx/package.html +++ b/h2/src/main/org/h2/mvstore/tx/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/mvstore/type/BasicDataType.java b/h2/src/main/org/h2/mvstore/type/BasicDataType.java new file mode 100644 index 0000000000..d9c79e6f08 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/type/BasicDataType.java @@ -0,0 +1,98 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.type; + +import java.nio.ByteBuffer; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.WriteBuffer; + +/** + * The base class for data type implementations. + * + * @author Andrei Tokar + */ +public abstract class BasicDataType implements DataType { + + @Override + public abstract int getMemory(T obj); + + @Override + public abstract void write(WriteBuffer buff, T obj); + + @Override + public abstract T read(ByteBuffer buff); + + @Override + public int compare(T a, T b) { + throw DataUtils.newUnsupportedOperationException("Can not compare"); + } + + @Override + public boolean isMemoryEstimationAllowed() { + return true; + } + + @Override + public int binarySearch(T key, Object storageObj, int size, int initialGuess) { + T[] storage = cast(storageObj); + int low = 0; + int high = size - 1; + // the cached index minus one, so that + // for the first time (when cachedCompare is 0), + // the default value is used + int x = initialGuess - 1; + if (x < 0 || x > high) { + x = high >>> 1; + } + while (low <= high) { + int compare = compare(key, storage[x]); + if (compare > 0) { + low = x + 1; + } else if (compare < 0) { + high = x - 1; + } else { + return x; + } + x = (low + high) >>> 1; + } + return ~low; + } + + @Override + public void write(WriteBuffer buff, Object storage, int len) { + for (int i = 0; i < len; i++) { + write(buff, cast(storage)[i]); + } + } + + @Override + public void read(ByteBuffer buff, Object storage, int len) { + for (int i = 0; i < len; i++) { + cast(storage)[i] = read(buff); + } + } + + @Override + public int hashCode() { + return getClass().getName().hashCode(); + } + + @Override + public boolean equals(Object obj) { + return obj != null && getClass().equals(obj.getClass()); + } + + /** + * Cast the storage object to an array of type T. + * + * @param storage the storage object + * @return the array + */ + @SuppressWarnings("unchecked") + protected final T[] cast(Object storage) { + return (T[])storage; + } +} diff --git a/h2/src/main/org/h2/mvstore/type/ByteArrayDataType.java b/h2/src/main/org/h2/mvstore/type/ByteArrayDataType.java new file mode 100644 index 0000000000..9fb8546268 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/type/ByteArrayDataType.java @@ -0,0 +1,46 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.type; + +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.WriteBuffer; +import java.nio.ByteBuffer; + +/** + * Class ByteArrayDataType. + * + * @author Andrei Tokar + */ +public final class ByteArrayDataType extends BasicDataType +{ + public static final ByteArrayDataType INSTANCE = new ByteArrayDataType(); + + private ByteArrayDataType() {} + + @Override + public int getMemory(byte[] data) { + return data.length; + } + + @Override + public void write(WriteBuffer buff, byte[] data) { + buff.putVarInt(data.length); + buff.put(data); + } + + @Override + public byte[] read(ByteBuffer buff) { + int size = DataUtils.readVarInt(buff); + byte[] data = new byte[size]; + buff.get(data); + return data; + } + + @Override + public byte[][] createStorage(int size) { + return new byte[size][]; + } +} diff --git a/h2/src/main/org/h2/mvstore/type/DataType.java b/h2/src/main/org/h2/mvstore/type/DataType.java index f39a5d4ea1..4066cbc057 100644 --- a/h2/src/main/org/h2/mvstore/type/DataType.java +++ b/h2/src/main/org/h2/mvstore/type/DataType.java @@ -1,18 +1,19 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.type; import java.nio.ByteBuffer; +import java.util.Comparator; import org.h2.mvstore.WriteBuffer; /** * A data type. */ -public interface DataType { +public interface DataType extends Comparator { /** * Compare two keys. @@ -22,15 +23,32 @@ public interface DataType { * @return -1 if the first key is smaller, 1 if larger, and 0 if equal * @throws UnsupportedOperationException if the type is not orderable */ - int compare(Object a, Object b); + @Override + int compare(T a, T b); /** - * Estimate the used memory in bytes. + * Perform binary search for the key within the storage + * @param key to search for + * @param storage to search within (an array of type T) + * @param size number of data items in the storage + * @param initialGuess for key position + * @return index of the key , if found, - index of the insertion point, if not + */ + int binarySearch(T key, Object storage, int size, int initialGuess); + + /** + * Calculates the amount of used memory in bytes. * * @param obj the object * @return the used memory */ - int getMemory(Object obj); + int getMemory(T obj); + + /** + * Whether memory estimation based on previously seen values is allowed/desirable + * @return true if memory estimation is allowed + */ + boolean isMemoryEstimationAllowed(); /** * Write an object. @@ -38,17 +56,16 @@ public interface DataType { * @param buff the target buffer * @param obj the value */ - void write(WriteBuffer buff, Object obj); + void write(WriteBuffer buff, T obj); /** * Write a list of objects. * * @param buff the target buffer - * @param obj the objects + * @param storage the objects * @param len the number of objects to write - * @param key whether the objects are keys */ - void write(WriteBuffer buff, Object[] obj, int len, boolean key); + void write(WriteBuffer buff, Object storage, int len); /** * Read an object. @@ -56,17 +73,23 @@ public interface DataType { * @param buff the source buffer * @return the object */ - Object read(ByteBuffer buff); + T read(ByteBuffer buff); /** * Read a list of objects. * * @param buff the target buffer - * @param obj the objects + * @param storage the objects * @param len the number of objects to read - * @param key whether the objects are keys */ - void read(ByteBuffer buff, Object[] obj, int len, boolean key); + void read(ByteBuffer buff, Object storage, int len); + /** + * Create storage object of array type to hold values + * + * @param size number of values to hold + * @return storage object + */ + T[] createStorage(int size); } diff --git a/h2/src/main/org/h2/mvstore/type/LongDataType.java b/h2/src/main/org/h2/mvstore/type/LongDataType.java new file mode 100644 index 0000000000..1fbca0eb7f --- /dev/null +++ b/h2/src/main/org/h2/mvstore/type/LongDataType.java @@ -0,0 +1,83 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.type; + +import java.nio.ByteBuffer; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.WriteBuffer; + +/** + * Class LongDataType. + *
            + *
          • 8/21/17 6:52 PM initial creation + *
          + * + * @author Andrei Tokar + */ +public class LongDataType extends BasicDataType { + + public static final LongDataType INSTANCE = new LongDataType(); + + private static final Long[] EMPTY_LONG_ARR = new Long[0]; + + private LongDataType() {} + + @Override + public int getMemory(Long obj) { + return 8; + } + + @Override + public void write(WriteBuffer buff, Long data) { + buff.putVarLong(data); + } + + @Override + public Long read(ByteBuffer buff) { + return DataUtils.readVarLong(buff); + } + + @Override + public Long[] createStorage(int size) { + return size == 0 ? EMPTY_LONG_ARR : new Long[size]; + } + + @Override + public int compare(Long one, Long two) { + return Long.compare(one, two); + } + + @Override + public int binarySearch(Long keyObj, Object storageObj, int size, int initialGuess) { + long key = keyObj; + Long[] storage = cast(storageObj); + int low = 0; + int high = size - 1; + // the cached index minus one, so that + // for the first time (when cachedCompare is 0), + // the default value is used + int x = initialGuess - 1; + if (x < 0 || x > high) { + x = high >>> 1; + } + return binarySearch(key, storage, low, high, x); + } + + private static int binarySearch(long key, Long[] storage, int low, int high, int x) { + while (low <= high) { + long midVal = storage[x]; + if (key > midVal) { + low = x + 1; + } else if (key < midVal) { + high = x - 1; + } else { + return x; + } + x = (low + high) >>> 1; + } + return -(low + 1); + } +} diff --git a/h2/src/main/org/h2/mvstore/type/MetaType.java b/h2/src/main/org/h2/mvstore/type/MetaType.java new file mode 100644 index 0000000000..d522ca17c0 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/type/MetaType.java @@ -0,0 +1,108 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.type; + +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; + +import org.h2.engine.Constants; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.WriteBuffer; + +/** + * Class DBMetaType is a type for values in the type registry map. + * + * @param type of opaque parameter passed as an operational context to Factory.create() + * + * @author Andrei Tokar + */ +public final class MetaType extends BasicDataType> { + + private final D database; + private final Thread.UncaughtExceptionHandler exceptionHandler; + private final Map cache = new HashMap<>(); + + public MetaType(D database, Thread.UncaughtExceptionHandler exceptionHandler) { + this.database = database; + this.exceptionHandler = exceptionHandler; + } + + @Override + public int compare(DataType a, DataType b) { + throw new UnsupportedOperationException(); + } + + @Override + public int getMemory(DataType obj) { + return Constants.MEMORY_OBJECT; + } + + @SuppressWarnings("unchecked") + @Override + public void write(WriteBuffer buff, DataType obj) { + Class clazz = obj.getClass(); + StatefulDataType statefulDataType = null; + if (obj instanceof StatefulDataType) { + statefulDataType = (StatefulDataType) obj; + StatefulDataType.Factory factory = statefulDataType.getFactory(); + if (factory != null) { + clazz = factory.getClass(); + } + } + String className = clazz.getName(); + int len = className.length(); + buff.putVarInt(len) + .putStringData(className, len); + if (statefulDataType != null) { + statefulDataType.save(buff, this); + } + } + + @SuppressWarnings("unchecked") + @Override + public DataType read(ByteBuffer buff) { + int len = DataUtils.readVarInt(buff); + String className = DataUtils.readString(buff, len); + try { + Object o = cache.get(className); + if (o != null) { + if (o instanceof StatefulDataType.Factory) { + return ((StatefulDataType.Factory) o).create(buff, this, database); + } + return (DataType) o; + } + Class clazz = Class.forName(className); + boolean singleton = false; + Object obj; + try { + obj = clazz.getDeclaredField("INSTANCE").get(null); + singleton = true; + } catch (ReflectiveOperationException | NullPointerException e) { + obj = clazz.getDeclaredConstructor().newInstance(); + } + if (obj instanceof StatefulDataType.Factory) { + StatefulDataType.Factory factory = (StatefulDataType.Factory) obj; + cache.put(className, factory); + return factory.create(buff, this, database); + } + if (singleton) { + cache.put(className, obj); + } + return (DataType) obj; + } catch (ReflectiveOperationException | SecurityException | IllegalArgumentException e) { + if (exceptionHandler != null) { + exceptionHandler.uncaughtException(Thread.currentThread(), e); + } + throw new RuntimeException(e); + } + } + + @Override + public DataType[] createStorage(int size) { + return new DataType[size]; + } +} diff --git a/h2/src/main/org/h2/mvstore/type/ObjectDataType.java b/h2/src/main/org/h2/mvstore/type/ObjectDataType.java index afa5e02d63..3b41c930d8 100644 --- a/h2/src/main/org/h2/mvstore/type/ObjectDataType.java +++ b/h2/src/main/org/h2/mvstore/type/ObjectDataType.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.type; @@ -25,7 +25,7 @@ * A data type implementation for the most common data types, including * serializable objects. */ -public class ObjectDataType implements DataType { +public class ObjectDataType extends BasicDataType { /** * The type constants are also used as tag values. @@ -94,76 +94,101 @@ public class ObjectDataType implements DataType { Float.class, Double.class, BigDecimal.class, String.class, UUID.class, Date.class }; - private static final HashMap, Integer> COMMON_CLASSES_MAP = new HashMap<>(32); + private static class Holder { + private static final HashMap, Integer> COMMON_CLASSES_MAP = new HashMap<>(32); - private AutoDetectDataType last = new StringType(this); + static { + for (int i = 0, size = COMMON_CLASSES.length; i < size; i++) { + COMMON_CLASSES_MAP.put(COMMON_CLASSES[i], i); + } + } - @Override - public int compare(Object a, Object b) { - return last.compare(a, b); + /** + * Get the class id, or null if not found. + * + * @param clazz the class + * @return the class id or null + */ + static Integer getCommonClassId(Class clazz) { + return COMMON_CLASSES_MAP.get(clazz); + } } + @SuppressWarnings("unchecked") + private AutoDetectDataType last = selectDataType(TYPE_NULL); + @Override - public int getMemory(Object obj) { - return last.getMemory(obj); + public Object[] createStorage(int size) { + return new Object[size]; } @Override - public void read(ByteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - obj[i] = read(buff); + public int compare(Object a, Object b) { + int typeId = getTypeId(a); + int typeDiff = typeId - getTypeId(b); + if (typeDiff == 0) { + return newType(typeId).compare(a, b); } + return Integer.signum(typeDiff); } @Override - public void write(WriteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - write(buff, obj[i]); - } + public int getMemory(Object obj) { + return switchType(obj).getMemory(obj); } @Override public void write(WriteBuffer buff, Object obj) { - last.write(buff, obj); + switchType(obj).write(buff, obj); } - private AutoDetectDataType newType(int typeId) { + @SuppressWarnings("unchecked") + private AutoDetectDataType newType(int typeId) { + if (typeId == last.typeId) { + return last; + } + return selectDataType(typeId); + } + + @SuppressWarnings("rawtypes") + private AutoDetectDataType selectDataType(int typeId) { switch (typeId) { case TYPE_NULL: - return new NullType(this); + return NullType.INSTANCE; case TYPE_BOOLEAN: - return new BooleanType(this); + return BooleanType.INSTANCE; case TYPE_BYTE: - return new ByteType(this); + return ByteType.INSTANCE; case TYPE_SHORT: - return new ShortType(this); + return ShortType.INSTANCE; case TYPE_CHAR: - return new CharacterType(this); + return CharacterType.INSTANCE; case TYPE_INT: - return new IntegerType(this); + return IntegerType.INSTANCE; case TYPE_LONG: - return new LongType(this); + return LongType.INSTANCE; case TYPE_FLOAT: - return new FloatType(this); + return FloatType.INSTANCE; case TYPE_DOUBLE: - return new DoubleType(this); + return DoubleType.INSTANCE; case TYPE_BIG_INTEGER: - return new BigIntegerType(this); + return BigIntegerType.INSTANCE; case TYPE_BIG_DECIMAL: - return new BigDecimalType(this); + return BigDecimalType.INSTANCE; case TYPE_STRING: - return new StringType(this); + return StringType.INSTANCE; case TYPE_UUID: - return new UUIDType(this); + return UUIDType.INSTANCE; case TYPE_DATE: - return new DateType(this); + return DateType.INSTANCE; case TYPE_ARRAY: - return new ObjectArrayType(this); + return new ObjectArrayType(); case TYPE_SERIALIZED_OBJECT: return new SerializedObjectType(this); + default: + throw DataUtils.newMVStoreException(DataUtils.ERROR_INTERNAL, + "Unsupported type {0}", typeId); } - throw DataUtils.newIllegalStateException(DataUtils.ERROR_INTERNAL, - "Unsupported type {0}", typeId); } @Override @@ -218,13 +243,13 @@ public Object read(ByteBuffer buff) { && tag <= TAG_BYTE_ARRAY_0_15 + 15) { typeId = TYPE_ARRAY; } else { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_FILE_CORRUPT, "Unknown tag {0}", tag); } } } - AutoDetectDataType t = last; + AutoDetectDataType t = last; if (typeId != t.typeId) { last = t = newType(typeId); } @@ -272,9 +297,9 @@ private static int getTypeId(Object obj) { * @param obj the object * @return the auto-detected type used */ - AutoDetectDataType switchType(Object obj) { + AutoDetectDataType switchType(Object obj) { int typeId = getTypeId(obj); - AutoDetectDataType l = last; + AutoDetectDataType l = last; if (typeId != l.typeId) { last = l = newType(typeId); } @@ -321,28 +346,6 @@ static boolean isArray(Object obj) { return obj != null && obj.getClass().isArray(); } - /** - * Get the class id, or null if not found. - * - * @param clazz the class - * @return the class id or null - */ - static Integer getCommonClassId(Class clazz) { - HashMap, Integer> map = COMMON_CLASSES_MAP; - if (map.size() == 0) { - // lazy initialization - // synchronized, because the COMMON_CLASSES_MAP is not - synchronized (map) { - if (map.size() == 0) { - for (int i = 0, size = COMMON_CLASSES.length; i < size; i++) { - map.put(COMMON_CLASSES[i], i); - } - } - } - } - return map.get(clazz); - } - /** * Serialize the object to a byte array. * @@ -408,10 +411,19 @@ public static int compareNotNull(byte[] data1, byte[] data2) { /** * The base class for auto-detect data types. */ - abstract static class AutoDetectDataType implements DataType { + abstract static class AutoDetectDataType extends BasicDataType { + + private final ObjectDataType base; - protected final ObjectDataType base; - protected final int typeId; + /** + * The type id. + */ + final int typeId; + + AutoDetectDataType(int typeId) { + this.base = null; + this.typeId = typeId; + } AutoDetectDataType(ObjectDataType base, int typeId) { this.base = base; @@ -419,55 +431,22 @@ abstract static class AutoDetectDataType implements DataType { } @Override - public int getMemory(Object o) { + public int getMemory(T o) { return getType(o).getMemory(o); } @Override - public int compare(Object aObj, Object bObj) { - AutoDetectDataType aType = getType(aObj); - AutoDetectDataType bType = getType(bObj); - int typeDiff = aType.typeId - bType.typeId; - if (typeDiff == 0) { - return aType.compare(aObj, bObj); - } - return Integer.signum(typeDiff); - } - - @Override - public void write(WriteBuffer buff, Object[] obj, - int len, boolean key) { - for (int i = 0; i < len; i++) { - write(buff, obj[i]); - } - } - - @Override - public void write(WriteBuffer buff, Object o) { + public void write(WriteBuffer buff, T o) { getType(o).write(buff, o); } - @Override - public void read(ByteBuffer buff, Object[] obj, - int len, boolean key) { - for (int i = 0; i < len; i++) { - obj[i] = read(buff); - } - } - - @Override - public final Object read(ByteBuffer buff) { - throw DataUtils.newIllegalStateException(DataUtils.ERROR_INTERNAL, - "Internal error"); - } - /** * Get the type for the given object. * * @param o the object * @return the type */ - AutoDetectDataType getType(Object o) { + DataType getType(Object o) { return base.switchType(o); } @@ -485,38 +464,42 @@ AutoDetectDataType getType(Object o) { /** * The type for the null value */ - static class NullType extends AutoDetectDataType { + static class NullType extends AutoDetectDataType { - NullType(ObjectDataType base) { - super(base, TYPE_NULL); + /** + * The only instance of this type. + */ + static final NullType INSTANCE = new NullType(); + + private NullType() { + super(TYPE_NULL); + } + + @Override + public Object[] createStorage(int size) { + return null; } @Override public int compare(Object aObj, Object bObj) { - if (aObj == null && bObj == null) { - return 0; - } else if (aObj == null) { - return -1; - } else if (bObj == null) { - return 1; - } - return super.compare(aObj, bObj); + return 0; } @Override public int getMemory(Object obj) { - return obj == null ? 0 : super.getMemory(obj); + return 0; } @Override public void write(WriteBuffer buff, Object obj) { - if (obj != null) { - super.write(buff, obj); - return; - } buff.put((byte) TYPE_NULL); } + @Override + public Object read(ByteBuffer buff) { + return null; + } + @Override public Object read(ByteBuffer buff, int tag) { return null; @@ -527,76 +510,87 @@ public Object read(ByteBuffer buff, int tag) { /** * The type for boolean true and false. */ - static class BooleanType extends AutoDetectDataType { + static class BooleanType extends AutoDetectDataType { - BooleanType(ObjectDataType base) { - super(base, TYPE_BOOLEAN); + /** + * The only instance of this type. + */ + static final BooleanType INSTANCE = new BooleanType(); + + private BooleanType() { + super(TYPE_BOOLEAN); } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof Boolean && bObj instanceof Boolean) { - Boolean a = (Boolean) aObj; - Boolean b = (Boolean) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public Boolean[] createStorage(int size) { + return new Boolean[size]; } @Override - public int getMemory(Object obj) { - return obj instanceof Boolean ? 0 : super.getMemory(obj); + public int compare(Boolean a, Boolean b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof Boolean)) { - super.write(buff, obj); - return; - } - int tag = ((Boolean) obj) ? TAG_BOOLEAN_TRUE : TYPE_BOOLEAN; + public int getMemory(Boolean obj) { + return 0; + } + + @Override + public void write(WriteBuffer buff, Boolean obj) { + int tag = obj ? TAG_BOOLEAN_TRUE : TYPE_BOOLEAN; buff.put((byte) tag); } @Override - public Object read(ByteBuffer buff, int tag) { - return tag == TYPE_BOOLEAN ? Boolean.FALSE : Boolean.TRUE; + public Boolean read(ByteBuffer buff) { + return buff.get() == TAG_BOOLEAN_TRUE ? Boolean.TRUE : Boolean.FALSE; } + @Override + public Boolean read(ByteBuffer buff, int tag) { + return tag == TYPE_BOOLEAN ? Boolean.FALSE : Boolean.TRUE; + } } /** * The type for byte objects. */ - static class ByteType extends AutoDetectDataType { + static class ByteType extends AutoDetectDataType { + + /** + * The only instance of this type. + */ + static final ByteType INSTANCE = new ByteType(); - ByteType(ObjectDataType base) { - super(base, TYPE_BYTE); + private ByteType() { + super(TYPE_BYTE); } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof Byte && bObj instanceof Byte) { - Byte a = (Byte) aObj; - Byte b = (Byte) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public Byte[] createStorage(int size) { + return new Byte[size]; } @Override - public int getMemory(Object obj) { - return obj instanceof Byte ? 0 : super.getMemory(obj); + public int compare(Byte a, Byte b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof Byte)) { - super.write(buff, obj); - return; - } + public int getMemory(Byte obj) { + return 1; + } + + @Override + public void write(WriteBuffer buff, Byte obj) { buff.put((byte) TYPE_BYTE); - buff.put((Byte) obj); + buff.put(obj); + } + + @Override + public Byte read(ByteBuffer buff) { + return buff.get(); } @Override @@ -609,116 +603,127 @@ public Object read(ByteBuffer buff, int tag) { /** * The type for character objects. */ - static class CharacterType extends AutoDetectDataType { + static class CharacterType extends AutoDetectDataType { - CharacterType(ObjectDataType base) { - super(base, TYPE_CHAR); + /** + * The only instance of this type. + */ + static final CharacterType INSTANCE = new CharacterType(); + + private CharacterType() { + super(TYPE_CHAR); } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof Character && bObj instanceof Character) { - Character a = (Character) aObj; - Character b = (Character) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public Character[] createStorage(int size) { + return new Character[size]; } @Override - public int getMemory(Object obj) { - return obj instanceof Character ? 24 : super.getMemory(obj); + public int compare(Character a, Character b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof Character)) { - super.write(buff, obj); - return; - } + public int getMemory(Character obj) { + return 24; + } + + @Override + public void write(WriteBuffer buff, Character obj) { buff.put((byte) TYPE_CHAR); - buff.putChar((Character) obj); + buff.putChar(obj); } @Override - public Object read(ByteBuffer buff, int tag) { + public Character read(ByteBuffer buff) { return buff.getChar(); } + @Override + public Character read(ByteBuffer buff, int tag) { + return buff.getChar(); + } } /** * The type for short objects. */ - static class ShortType extends AutoDetectDataType { + static class ShortType extends AutoDetectDataType { + + /** + * The only instance of this type. + */ + static final ShortType INSTANCE = new ShortType(); - ShortType(ObjectDataType base) { - super(base, TYPE_SHORT); + private ShortType() { + super(TYPE_SHORT); } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof Short && bObj instanceof Short) { - Short a = (Short) aObj; - Short b = (Short) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public Short[] createStorage(int size) { + return new Short[size]; } @Override - public int getMemory(Object obj) { - return obj instanceof Short ? 24 : super.getMemory(obj); + public int compare(Short a, Short b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof Short)) { - super.write(buff, obj); - return; - } + public int getMemory(Short obj) { + return 24; + } + + @Override + public void write(WriteBuffer buff, Short obj) { buff.put((byte) TYPE_SHORT); - buff.putShort((Short) obj); + buff.putShort(obj); } @Override - public Object read(ByteBuffer buff, int tag) { - return buff.getShort(); + public Short read(ByteBuffer buff) { + return read(buff, buff.get()); } + @Override + public Short read(ByteBuffer buff, int tag) { + return buff.getShort(); + } } /** * The type for integer objects. */ - static class IntegerType extends AutoDetectDataType { + static class IntegerType extends AutoDetectDataType { + + /** + * The only instance of this type. + */ + static final IntegerType INSTANCE = new IntegerType(); - IntegerType(ObjectDataType base) { - super(base, TYPE_INT); + private IntegerType() { + super(TYPE_INT); } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof Integer && bObj instanceof Integer) { - Integer a = (Integer) aObj; - Integer b = (Integer) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public Integer[] createStorage(int size) { + return new Integer[size]; } @Override - public int getMemory(Object obj) { - return obj instanceof Integer ? 24 : super.getMemory(obj); + public int compare(Integer a, Integer b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof Integer)) { - super.write(buff, obj); - return; - } - int x = (Integer) obj; + public int getMemory(Integer obj) { + return 24; + } + + @Override + public void write(WriteBuffer buff, Integer obj) { + int x = obj; if (x < 0) { // -Integer.MIN_VALUE is smaller than 0 if (-x < 0 || -x > DataUtils.COMPRESSED_VAR_INT_MAX) { @@ -736,7 +741,12 @@ public void write(WriteBuffer buff, Object obj) { } @Override - public Object read(ByteBuffer buff, int tag) { + public Integer read(ByteBuffer buff) { + return read(buff, buff.get()); + } + + @Override + public Integer read(ByteBuffer buff, int tag) { switch (tag) { case TYPE_INT: return DataUtils.readVarInt(buff); @@ -747,40 +757,40 @@ public Object read(ByteBuffer buff, int tag) { } return tag - TAG_INTEGER_0_15; } - } /** * The type for long objects. */ - static class LongType extends AutoDetectDataType { + static class LongType extends AutoDetectDataType { - LongType(ObjectDataType base) { - super(base, TYPE_LONG); + /** + * The only instance of this type. + */ + static final LongType INSTANCE = new LongType(); + + private LongType() { + super(TYPE_LONG); } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof Long && bObj instanceof Long) { - Long a = (Long) aObj; - Long b = (Long) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public Long[] createStorage(int size) { + return new Long[size]; } @Override - public int getMemory(Object obj) { - return obj instanceof Long ? 30 : super.getMemory(obj); + public int compare(Long a, Long b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof Long)) { - super.write(buff, obj); - return; - } - long x = (Long) obj; + public int getMemory(Long obj) { + return 30; + } + + @Override + public void write(WriteBuffer buff, Long obj) { + long x = obj; if (x < 0) { // -Long.MIN_VALUE is smaller than 0 if (-x < 0 || -x > DataUtils.COMPRESSED_VAR_LONG_MAX) { @@ -802,7 +812,12 @@ public void write(WriteBuffer buff, Object obj) { } @Override - public Object read(ByteBuffer buff, int tag) { + public Long read(ByteBuffer buff) { + return read(buff, buff.get()); + } + + @Override + public Long read(ByteBuffer buff, int tag) { switch (tag) { case TYPE_LONG: return DataUtils.readVarLong(buff); @@ -813,40 +828,40 @@ public Object read(ByteBuffer buff, int tag) { } return (long) (tag - TAG_LONG_0_7); } - } /** * The type for float objects. */ - static class FloatType extends AutoDetectDataType { + static class FloatType extends AutoDetectDataType { - FloatType(ObjectDataType base) { - super(base, TYPE_FLOAT); + /** + * The only instance of this type. + */ + static final FloatType INSTANCE = new FloatType(); + + private FloatType() { + super(TYPE_FLOAT); } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof Float && bObj instanceof Float) { - Float a = (Float) aObj; - Float b = (Float) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public Float[] createStorage(int size) { + return new Float[size]; } @Override - public int getMemory(Object obj) { - return obj instanceof Float ? 24 : super.getMemory(obj); + public int compare(Float a, Float b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof Float)) { - super.write(buff, obj); - return; - } - float x = (Float) obj; + public int getMemory(Float obj) { + return 24; + } + + @Override + public void write(WriteBuffer buff, Float obj) { + float x = obj; int f = Float.floatToIntBits(x); if (f == ObjectDataType.FLOAT_ZERO_BITS) { buff.put((byte) TAG_FLOAT_0); @@ -863,7 +878,12 @@ public void write(WriteBuffer buff, Object obj) { } @Override - public Object read(ByteBuffer buff, int tag) { + public Float read(ByteBuffer buff) { + return read(buff, buff.get()); + } + + @Override + public Float read(ByteBuffer buff, int tag) { switch (tag) { case TAG_FLOAT_0: return 0f; @@ -881,34 +901,35 @@ public Object read(ByteBuffer buff, int tag) { /** * The type for double objects. */ - static class DoubleType extends AutoDetectDataType { + static class DoubleType extends AutoDetectDataType { + + /** + * The only instance of this type. + */ + static final DoubleType INSTANCE = new DoubleType(); - DoubleType(ObjectDataType base) { - super(base, TYPE_DOUBLE); + private DoubleType() { + super(TYPE_DOUBLE); } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof Double && bObj instanceof Double) { - Double a = (Double) aObj; - Double b = (Double) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public Double[] createStorage(int size) { + return new Double[size]; } @Override - public int getMemory(Object obj) { - return obj instanceof Double ? 30 : super.getMemory(obj); + public int compare(Double a, Double b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof Double)) { - super.write(buff, obj); - return; - } - double x = (Double) obj; + public int getMemory(Double obj) { + return 30; + } + + @Override + public void write(WriteBuffer buff, Double obj) { + double x = obj; long d = Double.doubleToLongBits(x); if (d == ObjectDataType.DOUBLE_ZERO_BITS) { buff.put((byte) TAG_DOUBLE_0); @@ -927,7 +948,12 @@ public void write(WriteBuffer buff, Object obj) { } @Override - public Object read(ByteBuffer buff, int tag) { + public Double read(ByteBuffer buff) { + return read(buff, buff.get()); + } + + @Override + public Double read(ByteBuffer buff, int tag) { switch (tag) { case TAG_DOUBLE_0: return 0d; @@ -939,40 +965,39 @@ public Object read(ByteBuffer buff, int tag) { return Double.longBitsToDouble(Long.reverse(DataUtils .readVarLong(buff))); } - } /** * The type for BigInteger objects. */ - static class BigIntegerType extends AutoDetectDataType { + static class BigIntegerType extends AutoDetectDataType { - BigIntegerType(ObjectDataType base) { - super(base, TYPE_BIG_INTEGER); + /** + * The only instance of this type. + */ + static final BigIntegerType INSTANCE = new BigIntegerType(); + + private BigIntegerType() { + super(TYPE_BIG_INTEGER); } @Override - public int compare(Object aObj, Object bObj) { - if (isBigInteger(aObj) && isBigInteger(bObj)) { - BigInteger a = (BigInteger) aObj; - BigInteger b = (BigInteger) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public BigInteger[] createStorage(int size) { + return new BigInteger[size]; } @Override - public int getMemory(Object obj) { - return isBigInteger(obj) ? 100 : super.getMemory(obj); + public int compare(BigInteger a, BigInteger b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!isBigInteger(obj)) { - super.write(buff, obj); - return; - } - BigInteger x = (BigInteger) obj; + public int getMemory(BigInteger obj) { + return 100; + } + + @Override + public void write(WriteBuffer buff, BigInteger x) { if (BigInteger.ZERO.equals(x)) { buff.put((byte) TAG_BIG_INTEGER_0); } else if (BigInteger.ONE.equals(x)) { @@ -991,7 +1016,12 @@ public void write(WriteBuffer buff, Object obj) { } @Override - public Object read(ByteBuffer buff, int tag) { + public BigInteger read(ByteBuffer buff) { + return read(buff, buff.get()); + } + + @Override + public BigInteger read(ByteBuffer buff, int tag) { switch (tag) { case TAG_BIG_INTEGER_0: return BigInteger.ZERO; @@ -1005,40 +1035,39 @@ public Object read(ByteBuffer buff, int tag) { buff.get(bytes); return new BigInteger(bytes); } - } /** * The type for BigDecimal objects. */ - static class BigDecimalType extends AutoDetectDataType { + static class BigDecimalType extends AutoDetectDataType { - BigDecimalType(ObjectDataType base) { - super(base, TYPE_BIG_DECIMAL); + /** + * The only instance of this type. + */ + static final BigDecimalType INSTANCE = new BigDecimalType(); + + private BigDecimalType() { + super(TYPE_BIG_DECIMAL); } @Override - public int compare(Object aObj, Object bObj) { - if (isBigDecimal(aObj) && isBigDecimal(bObj)) { - BigDecimal a = (BigDecimal) aObj; - BigDecimal b = (BigDecimal) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public BigDecimal[] createStorage(int size) { + return new BigDecimal[size]; } @Override - public int getMemory(Object obj) { - return isBigDecimal(obj) ? 150 : super.getMemory(obj); + public int compare(BigDecimal a, BigDecimal b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!isBigDecimal(obj)) { - super.write(buff, obj); - return; - } - BigDecimal x = (BigDecimal) obj; + public int getMemory(BigDecimal obj) { + return 150; + } + + @Override + public void write(WriteBuffer buff, BigDecimal x) { if (BigDecimal.ZERO.equals(x)) { buff.put((byte) TAG_BIG_DECIMAL_0); } else if (BigDecimal.ONE.equals(x)) { @@ -1064,7 +1093,12 @@ public void write(WriteBuffer buff, Object obj) { } @Override - public Object read(ByteBuffer buff, int tag) { + public BigDecimal read(ByteBuffer buff) { + return read(buff, buff.get()); + } + + @Override + public BigDecimal read(ByteBuffer buff, int tag) { switch (tag) { case TAG_BIG_DECIMAL_0: return BigDecimal.ZERO; @@ -1089,35 +1123,34 @@ public Object read(ByteBuffer buff, int tag) { /** * The type for string objects. */ - static class StringType extends AutoDetectDataType { + static class StringType extends AutoDetectDataType { - StringType(ObjectDataType base) { - super(base, TYPE_STRING); + /** + * The only instance of this type. + */ + static final StringType INSTANCE = new StringType(); + + private StringType() { + super(TYPE_STRING); } @Override - public int getMemory(Object obj) { - if (!(obj instanceof String)) { - return super.getMemory(obj); - } - return 24 + 2 * obj.toString().length(); + public String[] createStorage(int size) { + return new String[size]; } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof String && bObj instanceof String) { - return aObj.toString().compareTo(bObj.toString()); - } - return super.compare(aObj, bObj); + public int getMemory(String obj) { + return 24 + 2 * obj.length(); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof String)) { - super.write(buff, obj); - return; - } - String s = (String) obj; + public int compare(String aObj, String bObj) { + return aObj.compareTo(bObj); + } + + @Override + public void write(WriteBuffer buff, String s) { int len = s.length(); if (len <= 15) { buff.put((byte) (TAG_STRING_0_15 + len)); @@ -1128,7 +1161,12 @@ public void write(WriteBuffer buff, Object obj) { } @Override - public Object read(ByteBuffer buff, int tag) { + public String read(ByteBuffer buff) { + return read(buff, buff.get()); + } + + @Override + public String read(ByteBuffer buff, int tag) { int len; if (tag == TYPE_STRING) { len = DataUtils.readVarInt(buff); @@ -1143,41 +1181,46 @@ public Object read(ByteBuffer buff, int tag) { /** * The type for UUID objects. */ - static class UUIDType extends AutoDetectDataType { + static class UUIDType extends AutoDetectDataType { - UUIDType(ObjectDataType base) { - super(base, TYPE_UUID); + /** + * The only instance of this type. + */ + static final UUIDType INSTANCE = new UUIDType(); + + private UUIDType() { + super(TYPE_UUID); } @Override - public int getMemory(Object obj) { - return obj instanceof UUID ? 40 : super.getMemory(obj); + public UUID[] createStorage(int size) { + return new UUID[size]; } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof UUID && bObj instanceof UUID) { - UUID a = (UUID) aObj; - UUID b = (UUID) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public int getMemory(UUID obj) { + return 40; } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof UUID)) { - super.write(buff, obj); - return; - } + public int compare(UUID a, UUID b) { + return a.compareTo(b); + } + + @Override + public void write(WriteBuffer buff, UUID a) { buff.put((byte) TYPE_UUID); - UUID a = (UUID) obj; buff.putLong(a.getMostSignificantBits()); buff.putLong(a.getLeastSignificantBits()); } @Override - public Object read(ByteBuffer buff, int tag) { + public UUID read(ByteBuffer buff) { + return read(buff, buff.get()); + } + + @Override + public UUID read(ByteBuffer buff, int tag) { long a = buff.getLong(), b = buff.getLong(); return new UUID(a, b); } @@ -1187,40 +1230,45 @@ public Object read(ByteBuffer buff, int tag) { /** * The type for java.util.Date objects. */ - static class DateType extends AutoDetectDataType { + static class DateType extends AutoDetectDataType { - DateType(ObjectDataType base) { - super(base, TYPE_DATE); + /** + * The only instance of this type. + */ + static final DateType INSTANCE = new DateType(); + + private DateType() { + super(TYPE_DATE); } @Override - public int getMemory(Object obj) { - return isDate(obj) ? 40 : super.getMemory(obj); + public Date[] createStorage(int size) { + return new Date[size]; } @Override - public int compare(Object aObj, Object bObj) { - if (isDate(aObj) && isDate(bObj)) { - Date a = (Date) aObj; - Date b = (Date) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public int getMemory(Date obj) { + return 40; } @Override - public void write(WriteBuffer buff, Object obj) { - if (!isDate(obj)) { - super.write(buff, obj); - return; - } + public int compare(Date a, Date b) { + return a.compareTo(b); + } + + @Override + public void write(WriteBuffer buff, Date a) { buff.put((byte) TYPE_DATE); - Date a = (Date) obj; buff.putLong(a.getTime()); } @Override - public Object read(ByteBuffer buff, int tag) { + public Date read(ByteBuffer buff) { + return read(buff, buff.get()); + } + + @Override + public Date read(ByteBuffer buff, int tag) { long a = buff.getLong(); return new Date(a); } @@ -1230,12 +1278,16 @@ public Object read(ByteBuffer buff, int tag) { /** * The type for object arrays. */ - static class ObjectArrayType extends AutoDetectDataType { - + static class ObjectArrayType extends AutoDetectDataType { private final ObjectDataType elementType = new ObjectDataType(); - ObjectArrayType(ObjectDataType base) { - super(base, TYPE_ARRAY); + ObjectArrayType() { + super(TYPE_ARRAY); + } + + @Override + public Object[] createStorage(int size) { + return new Object[size]; } @Override @@ -1279,8 +1331,8 @@ public int compare(Object aObj, Object bObj) { Class type = aObj.getClass().getComponentType(); Class bType = bObj.getClass().getComponentType(); if (type != bType) { - Integer classA = getCommonClassId(type); - Integer classB = getCommonClassId(bType); + Integer classA = Holder.getCommonClassId(type); + Integer classB = Holder.getCommonClassId(bType); if (classA != null) { if (classB != null) { return classA.compareTo(classB); @@ -1350,7 +1402,7 @@ public void write(WriteBuffer buff, Object obj) { return; } Class type = obj.getClass().getComponentType(); - Integer classId = getCommonClassId(type); + Integer classId = Holder.getCommonClassId(type); if (classId != null) { if (type.isPrimitive()) { if (type == byte.class) { @@ -1402,6 +1454,11 @@ public void write(WriteBuffer buff, Object obj) { } } + @Override + public Object read(ByteBuffer buff) { + return read(buff, buff.get()); + } + @Override public Object read(ByteBuffer buff, int tag) { if (tag != TYPE_ARRAY) { @@ -1419,7 +1476,7 @@ public Object read(ByteBuffer buff, int tag) { try { clazz = Class.forName(componentType); } catch (Exception e) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_SERIALIZATION, "Could not get class {0}", componentType, e); } @@ -1430,7 +1487,7 @@ public Object read(ByteBuffer buff, int tag) { try { obj = Array.newInstance(clazz, len); } catch (Exception e) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_SERIALIZATION, "Could not create array of type {0} length {1}", clazz, len, e); @@ -1469,7 +1526,7 @@ public Object read(ByteBuffer buff, int tag) { /** * The type for serialized objects. */ - static class SerializedObjectType extends AutoDetectDataType { + static class SerializedObjectType extends AutoDetectDataType { private int averageSize = 10_000; @@ -1477,14 +1534,19 @@ static class SerializedObjectType extends AutoDetectDataType { super(base, TYPE_SERIALIZED_OBJECT); } + @Override + public Object[] createStorage(int size) { + return new Object[size]; + } + @SuppressWarnings("unchecked") @Override public int compare(Object aObj, Object bObj) { if (aObj == bObj) { return 0; } - DataType ta = getType(aObj); - DataType tb = getType(bObj); + DataType ta = getType(aObj); + DataType tb = getType(bObj); if (ta != this || tb != this) { if (ta == tb) { return ta.compare(aObj, bObj); @@ -1510,7 +1572,7 @@ public int compare(Object aObj, Object bObj) { @Override public int getMemory(Object obj) { - DataType t = getType(obj); + DataType t = getType(obj); if (t == this) { return averageSize; } @@ -1519,7 +1581,7 @@ public int getMemory(Object obj) { @Override public void write(WriteBuffer buff, Object obj) { - DataType t = getType(obj); + DataType t = getType(obj); if (t != this) { t.write(buff, obj); return; @@ -1530,11 +1592,16 @@ public void write(WriteBuffer buff, Object obj) { int size = data.length * 2; // adjust the average size // using an exponential moving average - averageSize = (size + 15 * averageSize) / 16; + averageSize = (int) ((size + 15L * averageSize) / 16); buff.put((byte) TYPE_SERIALIZED_OBJECT).putVarInt(data.length) .put(data); } + @Override + public Object read(ByteBuffer buff) { + return read(buff, buff.get()); + } + @Override public Object read(ByteBuffer buff, int tag) { int len = DataUtils.readVarInt(buff); @@ -1542,7 +1609,7 @@ public Object read(ByteBuffer buff, int tag) { int size = data.length * 2; // adjust the average size // using an exponential moving average - averageSize = (size + 15 * averageSize) / 16; + averageSize = (int) ((size + 15L * averageSize) / 16); buff.get(data); return deserialize(data); } diff --git a/h2/src/main/org/h2/mvstore/type/StatefulDataType.java b/h2/src/main/org/h2/mvstore/type/StatefulDataType.java new file mode 100644 index 0000000000..9a53c2cdda --- /dev/null +++ b/h2/src/main/org/h2/mvstore/type/StatefulDataType.java @@ -0,0 +1,47 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.type; + +import java.nio.ByteBuffer; + +import org.h2.mvstore.WriteBuffer; + +/** + * A data type that allows to save its state. + * + * @param type of opaque parameter passed as an operational context to Factory.create() + * + * @author Andrei Tokar + */ +public interface StatefulDataType { + + /** + * Save the state. + * + * @param buff the target buffer + * @param metaType the meta type + */ + void save(WriteBuffer buff, MetaType metaType); + + Factory getFactory(); + + /** + * A factory for data types. + * + * @param the database type + */ + interface Factory { + /** + * Reads the data type. + * + * @param buff the buffer the source buffer + * @param metaDataType the type + * @param database the database + * @return the data type + */ + DataType create(ByteBuffer buff, MetaType metaDataType, D database); + } +} diff --git a/h2/src/main/org/h2/mvstore/type/StringDataType.java b/h2/src/main/org/h2/mvstore/type/StringDataType.java index 6af6d106a5..63f907c90e 100644 --- a/h2/src/main/org/h2/mvstore/type/StringDataType.java +++ b/h2/src/main/org/h2/mvstore/type/StringDataType.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.type; @@ -12,46 +12,61 @@ /** * A string type. */ -public class StringDataType implements DataType { +public class StringDataType extends BasicDataType { public static final StringDataType INSTANCE = new StringDataType(); + private static final String[] EMPTY_STRING_ARR = new String[0]; + @Override - public int compare(Object a, Object b) { - return a.toString().compareTo(b.toString()); + public String[] createStorage(int size) { + return size == 0 ? EMPTY_STRING_ARR : new String[size]; } @Override - public int getMemory(Object obj) { - return 24 + 2 * obj.toString().length(); + public int compare(String a, String b) { + return a.compareTo(b); } @Override - public void read(ByteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - obj[i] = read(buff); + public int binarySearch(String key, Object storageObj, int size, int initialGuess) { + String[] storage = cast(storageObj); + int low = 0; + int high = size - 1; + // the cached index minus one, so that + // for the first time (when cachedCompare is 0), + // the default value is used + int x = initialGuess - 1; + if (x < 0 || x > high) { + x = high >>> 1; + } + while (low <= high) { + int compare = key.compareTo(storage[x]); + if (compare > 0) { + low = x + 1; + } else if (compare < 0) { + high = x - 1; + } else { + return x; + } + x = (low + high) >>> 1; } + return -(low + 1); } - @Override - public void write(WriteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - write(buff, obj[i]); - } + public int getMemory(String obj) { + return 24 + 2 * obj.length(); } @Override public String read(ByteBuffer buff) { - int len = DataUtils.readVarInt(buff); - return DataUtils.readString(buff, len); + return DataUtils.readString(buff); } @Override - public void write(WriteBuffer buff, Object obj) { - String s = obj.toString(); + public void write(WriteBuffer buff, String s) { int len = s.length(); buff.putVarInt(len).putStringData(s, len); } - } diff --git a/h2/src/main/org/h2/mvstore/type/package.html b/h2/src/main/org/h2/mvstore/type/package.html index d08fcc27c9..110f3d7863 100644 --- a/h2/src/main/org/h2/mvstore/type/package.html +++ b/h2/src/main/org/h2/mvstore/type/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/package.html b/h2/src/main/org/h2/package.html index 3385cd835f..77e208421c 100644 --- a/h2/src/main/org/h2/package.html +++ b/h2/src/main/org/h2/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/res/_messages_cs.prop b/h2/src/main/org/h2/res/_messages_cs.prop index 08f3c03260..f827d3dd88 100644 --- a/h2/src/main/org/h2/res/_messages_cs.prop +++ b/h2/src/main/org/h2/res/_messages_cs.prop @@ -11,6 +11,7 @@ 22013=#Invalid PRECEDING or FOLLOWING size in window function: {0} 22018=Chyba při převodu dat {0} 22025=Chyba v LIKE escapování: {0} +2202E=#Array element error: {0}, expected {1} 22030=#Value not permitted for column {0}: {1} 22031=#Value not a member of enumerators {0}: {1} 22032=#Empty enums are not allowed @@ -26,23 +27,29 @@ 40001=Detekován deadlock. Probíhající transakce byla vrácena zpět. Podrobnosti: {0} 42000=Chyba syntaxe v SQL příkazu {0} 42001=Chyba syntaxe v SQL příkazu {0}; očekáváno {1} +42602=#Invalid name {0} +42622=#The name that starts with {0} is too long. The maximum length is {1} 42S01=Tabulka {0} již existuje 42S02=Tabulka {0} nenalezena +42S03=#Table {0} not found (candidates are: {1}) +42S04=#Table {0} not found (this database is empty) 42S11=Index {0} již existuje 42S12=Index {0} nenalezen 42S21=Duplicitní název sloupce {0} 42S22=Sloupec {0} nenalezen 42S31=#Identical expressions should be used; expected {0}, found {1} +54011=#Too many columns. The maximum count is {0} 57014=Příkaz byl zrušen nebo připojení vypršelo 90000=Funkce {0} musí vracet výsledek 90001=Metoda neumožňuje dotazování. Použijte execute nebo executeQuery namísto executeUpdate 90002=Metoda umožňuje pouze pro dotazování. Použijte execute nebo executeUpdate namísto executeQuery 90003=Hexadecimální řetězec s lichým počtem znaků: {0} +90005=#Invalid trigger flags: {0} 90004=Hexadecimální řetězec obsahuje neplatný znak: {0} 90006=#Sequence {0} has run out of numbers 90007=Tento objekt byl již uzavřen 90008=Neplatná hodnota {0} pro parametr {1} -90009=#Unable to create or alter sequence {0} because of invalid attributes (start value {1}, min value {2}, max value {3}, increment {4}) +90009=#Unable to create or alter sequence {0} because of invalid attributes (base value {1}, start value {2}, min value {3}, max value {4}, increment {5}, cache size {6}) 90010=#Invalid TO_CHAR format {0} 90011=#A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead. 90012=Parametr {0} není nastaven @@ -84,7 +91,6 @@ 90048=Nepodporovaná verze souboru databáze nebo neplatná hlavička souboru {0} 90049=Chyba šifrování v souboru {0} 90050=Nesprávný formát hesla, musí být: heslo k souboru uživatelské heslo -90051=#Scale(${0}) must not be bigger than precision({1}) 90052=Vnořený dotaz není pouze jediný sloupec dotazu 90053=Skalární vnořený dotaz obsahuje více než jeden řádek 90054=Neplatné použití agregátní funkce {0} @@ -141,7 +147,7 @@ 90107=Nelze odstranit {0}, protože {1} na něm závisí 90108=Nedostatek paměti. 90109=Pohled {0} je neplatný: {1} -90110=#Comparing ARRAY to scalar value +90110=#Values of types {0} and {1} are not comparable 90111=Chyba přístupu propojené tabulky s SQL příkazem {0}, příčina: {1} 90112=Řádek nebyl nalezen při pokusu o smazání z indexu {0} 90113=Nepodporované nastavení připojení {0} @@ -178,6 +184,17 @@ 90144=#Authenticator not enabled on database {0} 90145=#FOR UPDATE is not allowed in DISTINCT or grouped select 90146=#Database {0} not found, and IFEXISTS=true, so we can't auto-create it +90147=#Method {0} is not allowed when connection is in auto-commit mode +90148=#Current value of sequence {0} is not yet defined in this session +90149=#Database {0} not found, either pre-create it or allow remote database creation (not recommended in secure environments) +90150=#Precision ({0}) must be between {1} and {2} inclusive +90151=#Scale or fractional seconds precision ({0}) must be between {1} and {2} inclusive +90152=#Constraint {0} is used by constraint {1} +90153=#Column {0} references uncomparable column {1} +90154=#Generated column {0} cannot be assigned +90155=#Generated column {0} cannot be updatable by a referential constraint with {1} clause +90156=#Column alias is not specified for expression {0} +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=Obecná chyba: {0} HY004=Neznámý datový typ: {0} HYC00=Vlastnost není podporována: {0} diff --git a/h2/src/main/org/h2/res/_messages_de.prop b/h2/src/main/org/h2/res/_messages_de.prop index 0b412b90fb..f91951e045 100644 --- a/h2/src/main/org/h2/res/_messages_de.prop +++ b/h2/src/main/org/h2/res/_messages_de.prop @@ -3,14 +3,15 @@ 07001=Ungültige Anzahl Parameter für {0}, erwartet: {1} 08000=Fehler beim Öffnen der Datenbank: {0} 21S02=Anzahl der Felder stimmt nicht überein -22001=Wert zu gross / lang für Feld {0}: {1} -22003=Numerischer Wert ausserhalb des Bereichs: {0} -22004=Numerischer Wert ausserhalb des Bereichs: {0} in Feld {1} +22001=Wert zu groß / lang für Feld {0}: {1} +22003=Numerischer Wert außerhalb des Bereichs: {0} +22004=Numerischer Wert außerhalb des Bereichs: {0} in Feld {1} 22007=Kann {0} {1} nicht umwandeln 22012=Division durch 0: {0} -22013=#Invalid PRECEDING or FOLLOWING size in window function: {0} +22013=Ungültige PRECEDING oder FOLLOWING Größe in Window-Funktion: {0} 22018=Datenumwandlungsfehler beim Umwandeln von {0} 22025=Fehler in LIKE ESCAPE: {0} +2202E=Fehlerhaftes Array-Element: {0}, erwartet: {1} 22030=Wert nicht erlaubt für Feld {0}: {1} 22031=Wert nicht Teil der Aufzählung {0}: {1} 22032=Leere Aufzählungen sind nicht erlaubt @@ -22,27 +23,33 @@ 23507=Kein Vorgabewert für Feld {0} 23513=Bedingung verletzt: {0} 23514=Ungültige Bedingung: {0} -28000=Falscher Benutzer Name oder Passwort +28000=Falscher Benutzername oder Passwort 40001=Eine Verklemmung (Deadlock) ist aufgetreten. Die aktuelle Transaktion wurde rückgängig gemacht. Details: {0} 42000=Syntax Fehler in SQL Befehl {0} 42001=Syntax Fehler in SQL Befehl {0}; erwartet {1} +42602=Ungültiger Name {0} +42622=Der Name mit {0} beginnt ist zu lang. Die maximale Länge beträgt {1} 42S01=Tabelle {0} besteht bereits 42S02=Tabelle {0} nicht gefunden +42S03=Tabelle {0} nicht gefunden (mögliche Kandidaten: {1}) +42S04=Tabelle {0} nicht gefunden (diese Datenbank ist leer) 42S11=Index {0} besteht bereits 42S12=Index {0} nicht gefunden 42S21=Doppelter Feldname {0} 42S22=Feld {0} nicht gefunden -42S31=#Identical expressions should be used; expected {0}, found {1} +42S31=Es sollten identische Ausdrücke verwendet werden; erwartet {0}, tatsächlich {1} +54011=Zu viele Felder definiert. Maximale Anzahl von Felder: {0} 57014=Befehl wurde abgebrochen oder das Session-Timeout ist abgelaufen 90000=Funktion {0} muss Zeilen zurückgeben 90001=Methode nicht zulässig für eine Abfrage. Erlaubt sind execute oder executeQuery, nicht jedoch executeUpdate 90002=Methode nur zulässig für eine Abfrage. Erlaubt sind execute oder executeUpdate, nicht jedoch executeQuery 90003=Hexadezimal Zahl mit einer ungeraden Anzahl Zeichen: {0} 90004=Hexadezimal Zahl enthält unerlaubtes Zeichen: {0} +90005=Ungültige Triggeroptionen: {0} 90006=Die Sequenz {0} hat keine freien Nummern mehr 90007=Das Objekt wurde bereits geschlossen 90008=Unerlaubter Wert {0} für Parameter {1} -90009=Kann die Sequenz {0} nicht ändern aufgrund falscher Attribute (Start-Wert {1}, Minimal-Wert {2}, Maximal-Wert {3}, Inkrement {4}) +90009=Kann die Sequenz {0} nicht ändern aufgrund falscher Attribute (Basiswert {1}, Start-Wert {2}, Minimal-Wert {3}, Maximal-Wert {4}, Inkrement {5}, Cachegröße {6}) 90010=Ungültiges TO_CHAR Format {0} 90011=Ein implizit relativer Pfad zum Arbeitsverzeichnis ist nicht erlaubt in der Datenbank URL {0}. Bitte absolute Pfade, ~/name, ./name, oder baseDir verwenden. 90012=Parameter {0} wurde nicht gesetzt @@ -56,11 +63,11 @@ 90020=Datenbank wird wahrscheinlich bereits benutzt: {0}. Mögliche Lösungen: alle Verbindungen schliessen; Server Modus verwenden 90021=Diese Kombination von Einstellungen wird nicht unterstützt {0} 90022=Funktion {0} nicht gefunden -90023=Feld {0} darf nicht NULL nicht erlauben +90023=Feld {0} darf nicht nullable sein 90024=Fehler beim Umbenennen der Datei {0} nach {1} 90025=Kann Datei {0} nicht löschen 90026=Serialisierung fehlgeschlagen, Grund: {0} -90027=De-Serialisierung fehlgeschlagen, Grund: {1} +90027=De-Serialisierung fehlgeschlagen, Grund: {0} 90028=Eingabe/Ausgabe Fehler: {0} 90029=Im Moment nicht auf einer veränderbaren Zeile 90030=Datei fehlerhaft beim Lesen des Datensatzes: {0}. Mögliche Lösung: Recovery Werkzeug verwenden @@ -80,11 +87,10 @@ 90044=Fehler beim Ausführen des Triggers {0}, Klasse {1}, Grund: {1}; siehe Ursache für Details 90045=Bedingung {0} besteht bereits 90046=URL Format Fehler; erwartet {0}, erhalten {1} -90047=Falsche Version, Treiber Version ist {0}, Server Version ist {1} +90047=Falsche Version, Treiberversion ist {0}, Serverversion ist {1} 90048=Datenbank Datei Version wird nicht unterstützt oder ungültiger Dateikopf in Datei {0} 90049=Verschlüsselungsfehler in Datei {0} -90050=Falsches Passwort Format, benötigt wird: Datei-Passwort Benutzer-Passwort -90051=Skalierung(${0}) darf nicht grösser als Präzision sein({1}) +90050=Falsches Passwortformat, benötigt wird: Datei-Passwort Benutzer-Passwort 90052=Unterabfrage gibt mehr als eine Feld zurück 90053=Skalar-Unterabfrage enthält mehr als eine Zeile 90054=Ungültige Verwendung der Aggregat Funktion {0} @@ -101,7 +107,7 @@ 90065=Savepoint hat einen Namen 90066=Doppeltes Merkmahl {0} 90067=Verbindung ist unterbrochen: {0} -90068=Sortier-Ausdruck {0} muss in diesem Fall im Resultat vorkommen +90068=Sortierausdruck {0} muss in diesem Fall im Resultat vorkommen 90069=Rolle {0} besteht bereits 90070=Rolle {0} nicht gefunden 90071=Benutzer or Rolle {0} nicht gefunden @@ -113,7 +119,7 @@ 90077=Funktions-Alias {0} nicht gefunden 90078=Schema {0} besteht bereits 90079=Schema {0} nicht gefunden -90080=Schema Namen müssen übereinstimmen +90080=Schemanamen müssen übereinstimmen 90081=Feld {0} enthält NULL Werte 90082=Sequenz {0} gehört zu einer Tabelle 90083=Feld wird referenziert durch {0} @@ -126,7 +132,7 @@ 90090=Schema {0} kann nicht gelöscht werden 90091=Rolle {0} kann nicht gelöscht werden 90093=Clustering Fehler - Datenbank läuft bereits im autonomen Modus -90094=Clustering Fehler - Datenbank läuft bereits im Cluster Modus, Serverliste: {0} +90094=Clustering Fehler - Datenbank läuft bereits im Cluster-Modus, Serverliste: {0} 90095=Textformat Fehler: {0} 90096=Nicht genug Rechte für Objekt {0} 90097=Die Datenbank ist schreibgeschützt @@ -135,13 +141,13 @@ 90101=Falsches XID Format: {0} 90102=Datenkompressions-Option nicht unterstützt: {0} 90103=Datenkompressions-Algorithmus nicht unterstützt: {0} -90104=Datenkompressions Fehler +90104=Datenkompressions-Fehler 90105=Fehler beim Aufruf eine benutzerdefinierten Funktion: {0} 90106=Kann {0} nicht zurücksetzen per TRUNCATE 90107=Kann {0} nicht löschen weil {1} davon abhängt 90108=Nicht genug Hauptspeicher. 90109=View {0} ist ungültig: {1} -90110=#Comparing ARRAY to scalar value +90110=Werte des Typs {0} und {1} sind nicht vergleichbar 90111=Fehler beim Zugriff auf eine verknüpfte Tabelle mit SQL Befehl {0}, Grund: {1} 90112=Zeile nicht gefunden beim Löschen von Index {0} 90113=Datenbank-Verbindungs Option {0} nicht unterstützt @@ -153,7 +159,7 @@ 90119=Domäne {0} besteht bereits 90120=Domäne {0} nicht gefunden 90121=Die Datenbank wurde bereits geschlossen (um das automatische Schliessen beim Stopp der VM zu deaktivieren, die Datenbank URL mit ";DB_CLOSE_ON_EXIT=FALSE" ergänzen) -90122=#The WITH TIES clause is not allowed without a corresponding ORDER BY clause. +90122=Der WITH TIES Ausdruck ist ohne zugehörigem ORDER BY Ausdruck nicht erlaubt. 90123=Kann nicht indizierte und nicht indizierte Parameter mischen 90124=Datei nicht gefunden: {0} 90125=Ungültig Klasse, erwartet {0} erhalten {1} @@ -167,17 +173,28 @@ 90133=Kann das Setting {0} nicht ändern wenn die Datenbank bereits geöffnet ist 90134=Der Zugriff auf die Klasse {0} ist nicht erlaubt 90135=Die Datenbank befindet sich im Exclusiv Modus; es können keine zusätzlichen Verbindungen geöffnet werden -90136=#Window not found: {0} +90136=Bereich (Window) nicht gefunden: {0} 90137=Werte können nur einer Variablen zugewiesen werden, nicht an: {0} -90138=Ungültiger Datenbank Name: {0} -90139=Die (public static) Java Funktion wurde nicht gefunden: {0} +90138=Ungültiger Datenbankname: {0} +90139=Die (public static) Java-Funktion wurde nicht gefunden: {0} 90140=Die Resultat-Zeilen können nicht verändert werden. Mögliche Lösung: conn.createStatement(.., ResultSet.CONCUR_UPDATABLE). 90141=Serialisierer kann nicht geändert werden wenn eine Daten-Tabelle existiert: {0} -90142=Schrittgrösse darf nicht 0 sein -90143=#Row {1} not found in primary index {0} -90144=#Authenticator not enabled on database {0} -90145=#FOR UPDATE is not allowed in DISTINCT or grouped select -90146=#Database {0} not found, and IFEXISTS=true, so we can't auto-create it +90142=Schrittgröße darf nicht 0 sein +90143=Zeile {1} nicht gefunden im Primärschlüssel {0} +90144=Authenticator ist für die Datenbank {0} nicht aktiviert +90145=FOR UPDATE ist in einem DISTINCT oder gruppiertem Select nicht erlaubt +90146=Datenbank {0} nicht gefunden und IFEXISTS=true, daher können wir sie nicht automatisch anlegen +90147=Methode {0} ist nicht erlaubt, wenn sich die Verbindung im auto-commit Modus befindet +90148=Der aktuelle Wert der Sequenz {0} ist in dieser Session noch nicht definiert +90149=Datenbank {0} nicht gefunden. Entweder legen Sie sie an oder erlauben das Anlegen einer Datenbank aus der Ferne (nicht empfohlen in sicherheitsrelevanten Umgebungen) +90150=Genauigkeit ({0}) muss zwischen {1} und {2} inklusive liegen +90151=Genauigkeit von Skalierung oder anteiligen Sekunden ({0}) muss zwischen {1} und {2} inklusive liegen +90152=Referentielle Integrität {0} wird von referentieller Integrität {1} genutzt +90153=Spalte {0} bezieht sich auf nicht vergleichbare Spalte {1} +90154=Erzeugte Spalte {0} kann nicht zugewiesen werden +90155=Erzeugte Spalte {0} kann nicht durch eine referentielle Integrität mit dem Ausdruck {1} veränderbar sein +90156=Spalten-Alias ist nicht für den Audruck {0} angegeben +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=Allgemeiner Fehler: {0} HY004=Unbekannter Datentyp: {0} HYC00=Dieses Feature wird nicht unterstützt: {0} diff --git a/h2/src/main/org/h2/res/_messages_en.prop b/h2/src/main/org/h2/res/_messages_en.prop index 06f821581c..85844f6d1e 100644 --- a/h2/src/main/org/h2/res/_messages_en.prop +++ b/h2/src/main/org/h2/res/_messages_en.prop @@ -11,6 +11,7 @@ 22013=Invalid PRECEDING or FOLLOWING size in window function: {0} 22018=Data conversion error converting {0} 22025=Error in LIKE ESCAPE: {0} +2202E=Array element error: {0}, expected {1} 22030=Value not permitted for column {0}: {1} 22031=Value not a member of enumerators {0}: {1} 22032=Empty enums are not allowed @@ -26,23 +27,29 @@ 40001=Deadlock detected. The current transaction was rolled back. Details: {0} 42000=Syntax error in SQL statement {0} 42001=Syntax error in SQL statement {0}; expected {1} +42602=Invalid name {0} +42622=The name that starts with {0} is too long. The maximum length is {1} 42S01=Table {0} already exists 42S02=Table {0} not found +42S03=Table {0} not found (candidates are: {1}) +42S04=Table {0} not found (this database is empty) 42S11=Index {0} already exists 42S12=Index {0} not found 42S21=Duplicate column name {0} 42S22=Column {0} not found 42S31=Identical expressions should be used; expected {0}, found {1} +54011=Too many columns. The maximum count is {0} 57014=Statement was canceled or the session timed out 90000=Function {0} must return a result set 90001=Method is not allowed for a query. Use execute or executeQuery instead of executeUpdate 90002=Method is only allowed for a query. Use execute or executeUpdate instead of executeQuery 90003=Hexadecimal string with odd number of characters: {0} 90004=Hexadecimal string contains non-hex character: {0} +90005=Invalid trigger flags: {0} 90006=Sequence {0} has run out of numbers 90007=The object is already closed 90008=Invalid value {0} for parameter {1} -90009=Unable to create or alter sequence {0} because of invalid attributes (start value {1}, min value {2}, max value {3}, increment {4}) +90009=Unable to create or alter sequence {0} because of invalid attributes (base value {1}, start value {2}, min value {3}, max value {4}, increment {5}, cache size {6}) 90010=Invalid TO_CHAR format {0} 90011=A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead. 90012=Parameter {0} is not set @@ -84,7 +91,6 @@ 90048=Unsupported database file version or invalid file header in file {0} 90049=Encryption error in file {0} 90050=Wrong password format, must be: file password user password -90051=Scale(${0}) must not be bigger than precision({1}) 90052=Subquery is not a single column query 90053=Scalar subquery contains more than one row 90054=Invalid use of aggregate function {0} @@ -141,7 +147,7 @@ 90107=Cannot drop {0} because {1} depends on it 90108=Out of memory. 90109=View {0} is invalid: {1} -90110=Comparing ARRAY to scalar value +90110=Values of types {0} and {1} are not comparable 90111=Error accessing linked table with SQL statement {0}, cause: {1} 90112=Row not found when trying to delete from index {0} 90113=Unsupported connection setting {0} @@ -178,6 +184,17 @@ 90144=Authenticator not enabled on database {0} 90145=FOR UPDATE is not allowed in DISTINCT or grouped select 90146=Database {0} not found, and IFEXISTS=true, so we can't auto-create it +90147=Method {0} is not allowed when connection is in auto-commit mode +90148=Current value of sequence {0} is not yet defined in this session +90149=Database {0} not found, either pre-create it or allow remote database creation (not recommended in secure environments) +90150=Precision ({0}) must be between {1} and {2} inclusive +90151=Scale or fractional seconds precision ({0}) must be between {1} and {2} inclusive +90152=Constraint {0} is used by constraint {1} +90153=Column {0} references uncomparable column {1} +90154=Generated column {0} cannot be assigned +90155=Generated column {0} cannot be updatable by a referential constraint with {1} clause +90156=Column alias is not specified for expression {0} +90157=Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=General error: {0} HY004=Unknown data type: {0} HYC00=Feature not supported: {0} diff --git a/h2/src/main/org/h2/res/_messages_es.prop b/h2/src/main/org/h2/res/_messages_es.prop index 1488ab5935..50089a49b0 100644 --- a/h2/src/main/org/h2/res/_messages_es.prop +++ b/h2/src/main/org/h2/res/_messages_es.prop @@ -11,6 +11,7 @@ 22013=#Invalid PRECEDING or FOLLOWING size in window function: {0} 22018=Conversión de datos fallida, convirtiendo {0} 22025=Error en LIKE ESCAPE: {0} +2202E=#Array element error: {0}, expected {1} 22030=Valor no permitido para la columna {0}: {1} 22031=#Value not a member of enumerators {0}: {1} 22032=#Empty enums are not allowed @@ -26,23 +27,29 @@ 40001=Deadlock - Punto muerto detectado. La transacción actual fue retrotraída (rollback). Detalles: {0} 42000=Error de Sintaxis en sentencia SQL {0} 42001=Error de Sintaxis en sentencia SQL {0}; se esperaba {1} +42602=#Invalid name {0} +42622=#The name that starts with {0} is too long. The maximum length is {1} 42S01=Tabla {0} ya existe 42S02=Tabla {0} no encontrada +42S03=#Table {0} not found (candidates are: {1}) +42S04=#Table {0} not found (this database is empty) 42S11=Indice {0} ya existe 42S12=Indice {0} no encontrado 42S21=Nombre de columna Duplicada {0} 42S22=Columna {0} no encontrada 42S31=#Identical expressions should be used; expected {0}, found {1} +54011=#Too many columns. The maximum count is {0} 57014=Ls sentencia fue cancelado ó la sesión expiró por tiempo vencido 90000=Función {0} debe devolver un set de resultados (ResultSet) 90001=Metodo no permitido en un query. Use execute ó executeQuery en lugar de executeUpdate 90002=Metodo permitido unicamente en un query. Use execute ó executeUpdate en lugar de executeQuery 90003=Cadena Hexadecimal con cantidad impar de caracteres: {0} 90004=Cadena Hexadecimal contiene caracteres invalidos: {0} +90005=#Invalid trigger flags: {0} 90006=#Sequence {0} has run out of numbers 90007=El objeto ya está cerrado 90008=Valor Invalido {0} para el parametro {1} -90009=#Unable to create or alter sequence {0} because of invalid attributes (start value {1}, min value {2}, max value {3}, increment {4}) +90009=#Unable to create or alter sequence {0} because of invalid attributes (base value {1}, start value {2}, min value {3}, max value {4}, increment {5}, cache size {6}) 90010=#Invalid TO_CHAR format {0} 90011=#A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead. 90012=Parametro {0} no está fijado @@ -84,7 +91,6 @@ 90048=Versión del archivo de base de datos no soportada ó encabezado de archivo invalido en archivo {0} 90049=Error de Encriptación en archivo {0} 90050=Formato de password erroneo, debe ser: archivo password Usuario password -90051=#Scale(${0}) must not be bigger than precision({1}) 90052=El Subquery no es un query escalar (debe devolver una sola columna) 90053=El Subquery escalar contiene mas de una fila 90054=Uso Invalido de la función de columna agregada {0} @@ -141,7 +147,7 @@ 90107=Imposible eliminar {0} debido a que {1} depende de él. 90108=Memoria Insuficiente - Out of memory. Tamaño: {0} 90109=La Vista {0} es invalida: {1} -90110=#Comparing ARRAY to scalar value +90110=#Values of types {0} and {1} are not comparable 90111=Error accediendo Linked Table con sentencia SQL {0}, causa: {1} 90112=Fila no encontrada mientras se intentaba borrar del indice {0} 90113=Parametro de conexión No soportado {0} @@ -178,6 +184,17 @@ 90144=#Authenticator not enabled on database {0} 90145=#FOR UPDATE is not allowed in DISTINCT or grouped select 90146=#Database {0} not found, and IFEXISTS=true, so we can't auto-create it +90147=#Method {0} is not allowed when connection is in auto-commit mode +90148=#Current value of sequence {0} is not yet defined in this session +90149=#Database {0} not found, either pre-create it or allow remote database creation (not recommended in secure environments) +90150=#Precision ({0}) must be between {1} and {2} inclusive +90151=#Scale or fractional seconds precision ({0}) must be between {1} and {2} inclusive +90152=#Constraint {0} is used by constraint {1} +90153=#Column {0} references uncomparable column {1} +90154=#Generated column {0} cannot be assigned +90155=#Generated column {0} cannot be updatable by a referential constraint with {1} clause +90156=#Column alias is not specified for expression {0} +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=Error General : {0} HY004=Tipo de dato desconocido : {0} HYC00=Caracteristica no soportada: {0} diff --git a/h2/src/main/org/h2/res/_messages_fr.prop b/h2/src/main/org/h2/res/_messages_fr.prop index 8f04da12de..69671ba7fe 100644 --- a/h2/src/main/org/h2/res/_messages_fr.prop +++ b/h2/src/main/org/h2/res/_messages_fr.prop @@ -11,6 +11,7 @@ 22013=#Invalid PRECEDING or FOLLOWING size in window function: {0} 22018=Erreur lors de la conversion de données {0} 22025=Erreur dans LIKE ESCAPE: {0} +2202E=#Array element error: {0}, expected {1} 22030=Valeur non permise pour la colonne {0}: {1} 22031=La valeur n''est pas un membre de l''énumération {0}: {1} 22032=Les enums vides ne sont pas permis @@ -26,23 +27,29 @@ 40001=Deadlock détecté. La transaction courante a été annulée. Détails: {0} 42000=Erreur de syntaxe dans l''instruction SQL {0} 42001=Erreur de syntaxe dans l''instruction SQL {0}; attendu {1} +42602=#Invalid name {0} +42622=#The name that starts with {0} is too long. The maximum length is {1} 42S01=La table {0} existe déjà 42S02=Table {0} non trouvée +42S03=#Table {0} not found (candidates are: {1}) +42S04=#Table {0} not found (this database is empty) 42S11=L''index {0} existe déjà 42S12=Index {0} non trouvé 42S21=Duplication du nom de colonnes {0} 42S22=Colonne {0} non trouvée 42S31=#Identical expressions should be used; expected {0}, found {1} +54011=#Too many columns. The maximum count is {0} 57014=L''instruction a été annulée ou la session a expiré 90000=La fonction {0} doit retourner résultat 90001=Methode non autorisée pour une requête. Utilisez execute ou executeQuery à la place d''executeUpdate 90002=Methode est autorisée uniquement pour une requête. Utilisez execute ou executeUpdate à la place d''executeQuery 90003=Chaîne héxadecimale contenant un nombre impair de caractères: {0} 90004=Chaîne héxadecimale contenant un caractère non-héxa: {0} +90005=#Invalid trigger flags: {0} 90006=La séquence {0} a épuisé ses éléments 90007=L''objet est déjà fermé 90008=Valeur invalide {0} pour le paramètre {1} -90009=Impossible de créer ou modifier la séquence {0} car les attributs sont invalides (start value {1}, min value {2}, max value {3}, increment {4}) +90009=Impossible de créer ou modifier la séquence {0} car les attributs sont invalides (base value {1}, start value {2}, min value {3}, max value {4}, increment {5}, cache size {6}) 90010=Format invalide TO_CHAR {0} 90011=Un chemin de fichier implicitement relatif au répertoire de travail actuel n''est pas autorisé dans l''URL de la base de données {0}. Utilisez un chemin absolu, ~ /nom, ./nom ou le paramètre baseDir à la place. 90012=La paramètre {0} n''est pas initialisé @@ -84,7 +91,6 @@ 90048=Version de fichier de base de données non supportée ou entête de ficher invalide dans le fichier {0} 90049=Erreur de cryptage dans le fichier {0} 90050=Mauvais format de mot de passe, doit être: mot de passe du fichier mot de passe de l''utilisateur -90051=L''échelle(${0}) ne doit pas être plus grande que la précision({1}) 90052=La sous requête n''est pas une requête sur une seule colonne 90053=La sous-requête scalaire contient plus d''une rangée 90054=Utilisation invalide de la fonction agrégée {0} @@ -141,7 +147,7 @@ 90107=Impossible de supprimer {0} car {1} dépend de lui 90108=Mémoire insuffisante. 90109=La vue {0} est invalide: {1} -90110=#Comparing ARRAY to scalar value +90110=#Values of types {0} and {1} are not comparable 90111=Erreur lors de l''accès à la table liée à l''aide de l''instruction SQL {0}, cause: {1} 90112=Ligne non trouvée lors de la tentative de suppression à partir de l''index {0} 90113=Paramétrage de connexion non pris en charge {0} @@ -178,6 +184,17 @@ 90144=#Authenticator not enabled on database {0} 90145=#FOR UPDATE is not allowed in DISTINCT or grouped select 90146=#Database {0} not found, and IFEXISTS=true, so we can't auto-create it +90147=#Method {0} is not allowed when connection is in auto-commit mode +90148=#Current value of sequence {0} is not yet defined in this session +90149=#Database {0} not found, either pre-create it or allow remote database creation (not recommended in secure environments) +90150=#Precision ({0}) must be between {1} and {2} inclusive +90151=#Scale or fractional seconds precision ({0}) must be between {1} and {2} inclusive +90152=#Constraint {0} is used by constraint {1} +90153=#Column {0} references uncomparable column {1} +90154=#Generated column {0} cannot be assigned +90155=#Generated column {0} cannot be updatable by a referential constraint with {1} clause +90156=#Column alias is not specified for expression {0} +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=Erreur générale: {0} HY004=Type de données inconnu: {0} HYC00=Fonctionnalité non supportée: {0} diff --git a/h2/src/main/org/h2/res/_messages_ja.prop b/h2/src/main/org/h2/res/_messages_ja.prop index 2e956cca08..9eab01d8e5 100644 --- a/h2/src/main/org/h2/res/_messages_ja.prop +++ b/h2/src/main/org/h2/res/_messages_ja.prop @@ -11,6 +11,7 @@ 22013=#Invalid PRECEDING or FOLLOWING size in window function: {0} 22018=データ変換中にエラーが発生しました {0} 22025=LIKE ESCAPE にエラーがあります: {0} +2202E=#Array element error: {0}, expected {1} 22030=#Value not permitted for column {0}: {1} 22031=#Value not a member of enumerators {0}: {1} 22032=#Empty enums are not allowed @@ -26,23 +27,29 @@ 40001=デッドロックが検出されました。現在のトランザクションはロールバックされました。詳細: {0} 42000=SQLステートメントに文法エラーがあります {0} 42001=SQLステートメントに文法エラーがあります {0}; 期待されるステートメント {1} +42602=#Invalid name {0} +42622=#The name that starts with {0} is too long. The maximum length is {1} 42S01=テーブル {0} はすでに存在します 42S02=テーブル {0} が見つかりません +42S03=#Table {0} not found (candidates are: {1}) +42S04=#Table {0} not found (this database is empty) 42S11=インデックス {0} はすでに存在します 42S12=インデックス {0} が見つかりません 42S21=列名 {0} が重複しています 42S22=列 {0} が見つかりません 42S31=#Identical expressions should be used; expected {0}, found {1} +54011=#Too many columns. The maximum count is {0} 57014=ステートメントがキャンセルされたか、セッションがタイムアウトしました 90000=関数 {0} はリザルトセットを返さなければなりません 90001=メソッドはクエリをサポートしていません。executeUpdateのかわりに、excute、またはexecuteQueryを使用してください 90002=メソッドはクエリしかサポートしていません。executeQueryのかわりに、excecute、またはexecuteUpdateを使用してください 90003=文字数が奇数の16進文字列です: {0} 90004=16進文字列に不正な文字が含まれています: {0} +90005=#Invalid trigger flags: {0} 90006=シーケンス {0} を使い果たしました 90007=オブジェクトはすでに閉じられています 90008=パラメータ {1} に対する値 {0} が不正です -90009=無効な属性により、シーケンス {0} の作成または変更ができません。(開始値 {1}, 最小値 {2}, 最大値 {3}, 増分 {4}) +90009=#無効な属性により、シーケンス {0} の作成または変更ができません。(base value {1}, 開始値 {2}, 最小値 {3}, 最大値 {4}, 増分 {5}, cache size {6}) 90010=無効な TO_CHAR フォーマット {0} 90011=暗黙的なカレントディレクトリからの相対ファイルパスをデータベースURL({0})に指定することは許可されていません。代わりに絶対パスか相対パス( ~/name, ./name)あるいは baseDir を指定して下さい. 90012=パラメータ {0} がセットされていません @@ -59,8 +66,8 @@ 90023=列 {0} にはnull値を許すべきてはありません 90024=ファイル名を {0} から {1} に変更中にエラーが発生しました 90025=ファイル {0} を削除できません -90026=直列化に失敗しました -90027=直列化復元に失敗しました +90026=直列化に失敗しました: {0} +90027=直列化復元に失敗しました: {0} 90028=入出力例外: {0} 90029=現在行は更新不可です 90030=レコード {0} を読み込み中にファイルの破損を検出しました。可能な解決策: リカバリツールを使用してください @@ -84,7 +91,6 @@ 90048=ファイル {0} は、未サポートのバージョンか、不正なファイルヘッダを持つデータベースファイルです 90049=ファイル {0} の暗号化エラーです 90050=不正なパスワードフォーマットです。正しくは: ファイルパスワード <空白> ユーザパスワード -90051=スケール(${0}) より大きい精度({1})は指定できません 90052=サブクエリが単一列のクエリではありません 90053=数値サブクエリが複数の行を含んでいます 90054=集約関数 {0} の不正な使用 @@ -141,7 +147,7 @@ 90107={1} が依存しているため、{0} をドロップすることはできません 90108=メモリが不足しています 90109=ビュー {0} は無効です: {1} -90110=#Comparing ARRAY to scalar value +90110=#Values of types {0} and {1} are not comparable 90111=SQLステートメント {0} による結合テーブルアクセスエラー 90112=インデックス {0} から削除を試みましたが、行が見つかりません 90113=未サポートの接続設定 {0} @@ -178,6 +184,17 @@ 90144=#Authenticator not enabled on database {0} 90145=#FOR UPDATE is not allowed in DISTINCT or grouped select 90146=#Database {0} not found, and IFEXISTS=true, so we can't auto-create it +90147=#Method {0} is not allowed when connection is in auto-commit mode +90148=#Current value of sequence {0} is not yet defined in this session +90149=#Database {0} not found, either pre-create it or allow remote database creation (not recommended in secure environments) +90150=#Precision ({0}) must be between {1} and {2} inclusive +90151=#Scale or fractional seconds precision ({0}) must be between {1} and {2} inclusive +90152=#Constraint {0} is used by constraint {1} +90153=#Column {0} references uncomparable column {1} +90154=#Generated column {0} cannot be assigned +90155=#Generated column {0} cannot be updatable by a referential constraint with {1} clause +90156=#Column alias is not specified for expression {0} +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=一般エラー: {0} HY004=不明なデータ型: {0} HYC00=機能はサポートされていません: {0} diff --git a/h2/src/main/org/h2/res/_messages_pl.prop b/h2/src/main/org/h2/res/_messages_pl.prop index 3807cf7967..44d4eebd9a 100644 --- a/h2/src/main/org/h2/res/_messages_pl.prop +++ b/h2/src/main/org/h2/res/_messages_pl.prop @@ -11,6 +11,7 @@ 22013=#Invalid PRECEDING or FOLLOWING size in window function: {0} 22018=Błąd konwersji danych {0} 22025=Błąd w LIKE ESCAPE: {0} +2202E=#Array element error: {0}, expected {1} 22030=#Value not permitted for column {0}: {1} 22031=#Value not a member of enumerators {0}: {1} 22032=#Empty enums are not allowed @@ -26,23 +27,29 @@ 40001=Wykryto zakleszczenie. Bieżąca transakcja została wycofana. Szczegóły : {0} 42000=Błąd składniowy w wyrażeniu SQL {0} 42001=Błąd składniowy w wyrażeniu SQL {0}; oczekiwano {1} +42602=#Invalid name {0} +42622=#The name that starts with {0} is too long. The maximum length is {1} 42S01=Tabela {0} już istnieje 42S02=Tabela {0} nie istnieje +42S03=#Table {0} not found (candidates are: {1}) +42S04=#Table {0} not found (this database is empty) 42S11=Indeks {0} już istnieje 42S12=Indeks {0} nie istnieje 42S21=Zduplikowana nazwa kolumny {0} 42S22=Kolumna {0} nie istnieje 42S31=#Identical expressions should be used; expected {0}, found {1} +54011=#Too many columns. The maximum count is {0} 57014=Kwerenda została anulowana albo sesja wygasła 90000=Funkcja {0} musi zwrócić dane 90001=Metoda nie jest dozwolona w kwerendzie 90002=Metoda jest dozwolona tylko w kwerendzie 90003=Heksadecymalny string z nieparzystą liczbą znaków: {0} 90004=Heksadecymalny string zawiera niedozwolony znak: {0} +90005=#Invalid trigger flags: {0} 90006=Sekwencja {0} została wyczerpana 90007=Obiekt jest zamknięty 90008=Nieprawidłowa wartość {0} parametru {1} -90009=Nie można utworzyć/zmienić sekwencji {0} ponieważ podane atrybuty są nieprawidłowe (wartość początkowa {1}, wartość minimalna {2}, wartość maksymalna {3}, przyrost {4}) +90009=#Nie można utworzyć/zmienić sekwencji {0} ponieważ podane atrybuty są nieprawidłowe (base value {1}, wartość początkowa {2}, wartość minimalna {3}, wartość maksymalna {4}, przyrost {5}, cache size {6}) 90010=Nieprawidłowy format TO_CHAR {0} 90011=#A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead. 90012=Parametr o numerze {0} nie jest ustalony @@ -84,7 +91,6 @@ 90048=Nieprawidłowa wersja pliku bazy danych lub nieprawidłowy nagłówek pliku {0} 90049=Błąd szyfrowania pliku {0} 90050=Zły format hasła, powinno być: plik hasło użytkownik hasło -90051=#Scale(${0}) must not be bigger than precision({1}) 90052=Podzapytanie nie jest zapytaniem opartym o jedna kolumnę 90053=Skalarna pod-kwerenda zawiera więcej niż jeden wiersz 90054=Nieprawidłowe użycie funkcji agregującej {0} @@ -141,7 +147,7 @@ 90107=Nie można skasować {0} ponieważ zależy od {1} 90108=Brak pamięci. 90109=Widok {0} jest nieprawidłowy -90110=#Comparing ARRAY to scalar value +90110=#Values of types {0} and {1} are not comparable 90111=Błąd dostępu do tabeli skrzyżowań przy pomocy zapytania SQL {0}, błąd: {1} 90112=Rekord nie znaleziony przy probie kasowania z indeksu {0} 90113=Nie wspierana opcja połączenia {0} @@ -178,6 +184,17 @@ 90144=#Authenticator not enabled on database {0} 90145=#FOR UPDATE is not allowed in DISTINCT or grouped select 90146=#Database {0} not found, and IFEXISTS=true, so we can't auto-create it +90147=#Method {0} is not allowed when connection is in auto-commit mode +90148=#Current value of sequence {0} is not yet defined in this session +90149=#Database {0} not found, either pre-create it or allow remote database creation (not recommended in secure environments) +90150=#Precision ({0}) must be between {1} and {2} inclusive +90151=#Scale or fractional seconds precision ({0}) must be between {1} and {2} inclusive +90152=#Constraint {0} is used by constraint {1} +90153=#Column {0} references uncomparable column {1} +90154=#Generated column {0} cannot be assigned +90155=#Generated column {0} cannot be updatable by a referential constraint with {1} clause +90156=#Column alias is not specified for expression {0} +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=Błąd ogólny: {0} HY004=Nieznany typ danych: {0} HYC00=Cecha nie jest wspierana: {0} diff --git a/h2/src/main/org/h2/res/_messages_pt_br.prop b/h2/src/main/org/h2/res/_messages_pt_br.prop index 02e109ef90..e9383f5128 100644 --- a/h2/src/main/org/h2/res/_messages_pt_br.prop +++ b/h2/src/main/org/h2/res/_messages_pt_br.prop @@ -11,6 +11,7 @@ 22013=#Invalid PRECEDING or FOLLOWING size in window function: {0} 22018=Erro na conversão de dado, convertendo {0} 22025=Erro em LIKE ESCAPE: {0} +2202E=#Array element error: {0}, expected {1} 22030=#Value not permitted for column {0}: {1} 22031=#Value not a member of enumerators {0}: {1} 22032=#Empty enums are not allowed @@ -26,23 +27,29 @@ 40001=#Deadlock detected. The current transaction was rolled back. Details: {0} 42000=Erro de sintax na declaração SQL {0} 42001=Erro de sintax na declaração SQL {0}; esperado {1} +42602=#Invalid name {0} +42622=#The name that starts with {0} is too long. The maximum length is {1} 42S01=Tabela {0} já existe 42S02=Tabela {0} não foi encontrada +42S03=#Table {0} not found (candidates are: {1}) +42S04=#Table {0} not found (this database is empty) 42S11=índice {0} já existe 42S12=índice {0} não foi encontrado 42S21=Nome duplicado da coluna {0} 42S22=Coluna {0} não foi encontrada 42S31=#Identical expressions should be used; expected {0}, found {1} +54011=#Too many columns. The maximum count is {0} 57014=#Statement was canceled or the session timed out 90000=Função {0} deve retornar algum resultado 90001=O método não esta hábilitado para consulta. Use o execute ou o executeQuery em vez de executeUpdate 90002=O método é apenas para consulta. Use o execute ou o executeUpdate em vez de executeQuery 90003=Sequência Hexadecimal com número ímpar de caracteres: {0} 90004=Sequência Hexadecimal contêm caracteres inválidos: {0} +90005=#Invalid trigger flags: {0} 90006=#Sequence {0} has run out of numbers 90007=O objeto está fechado 90008=Valor inválido {0} para o parâmetro {1} -90009=#Unable to create or alter sequence {0} because of invalid attributes (start value {1}, min value {2}, max value {3}, increment {4}) +90009=#Unable to create or alter sequence {0} because of invalid attributes (base value {1}, start value {2}, min value {3}, max value {4}, increment {5}, cache size {6}) 90010=#Invalid TO_CHAR format {0} 90011=#A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead. 90012=Parâmetro {0} não esta definido @@ -84,7 +91,6 @@ 90048=Versão do arquivo de base de dados não é suportado, ou o cabeçalho do arquivo é inválido, no arquivo {0} 90049=Erro de encriptação no arquivo {0} 90050=Erro no formato da senha, deveria ser: arquivo de senha senha do usuário -90051=#Scale(${0}) must not be bigger than precision({1}) 90052=A Subquery não é de coluna única 90053=A Subquery contém mais de uma linha 90054=Uso inválido da função {0} agregada @@ -141,7 +147,7 @@ 90107=Não pode apagar {0} por que depende de {1} 90108=#Out of memory. 90109=Vista {0} é inválida: {1} -90110=#Comparing ARRAY to scalar value +90110=#Values of types {0} and {1} are not comparable 90111=Erro ao acessar a tabela lincada com a instrução SQL {0}, causa: {1} 90112=A linha não foi encontrada ao tentar eliminar apartir do índice {0} 90113=Não suporta a definição de conecção {0} @@ -178,6 +184,17 @@ 90144=#Authenticator not enabled on database {0} 90145=#FOR UPDATE is not allowed in DISTINCT or grouped select 90146=#Database {0} not found, and IFEXISTS=true, so we can't auto-create it +90147=#Method {0} is not allowed when connection is in auto-commit mode +90148=#Current value of sequence {0} is not yet defined in this session +90149=#Database {0} not found, either pre-create it or allow remote database creation (not recommended in secure environments) +90150=#Precision ({0}) must be between {1} and {2} inclusive +90151=#Scale or fractional seconds precision ({0}) must be between {1} and {2} inclusive +90152=#Constraint {0} is used by constraint {1} +90153=#Column {0} references uncomparable column {1} +90154=#Generated column {0} cannot be assigned +90155=#Generated column {0} cannot be updatable by a referential constraint with {1} clause +90156=#Column alias is not specified for expression {0} +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=Erro geral: {0} HY004=Tipo de dados desconhecido: {0} HYC00=Recurso não suportado: {0} diff --git a/h2/src/main/org/h2/res/_messages_ru.prop b/h2/src/main/org/h2/res/_messages_ru.prop index df30bbc324..c037c350ff 100644 --- a/h2/src/main/org/h2/res/_messages_ru.prop +++ b/h2/src/main/org/h2/res/_messages_ru.prop @@ -11,6 +11,7 @@ 22013=Недопустимое значение PRECEDING или FOLLOWING в оконной функции: {0} 22018=Ошибка преобразования данных при конвертации {0} 22025=Ошибка в LIKE ESCAPE: {0} +2202E=Недопустимый элемент массива: {0}, ожидался {1} 22030=Недопустимое значение для столбца {0}: {1} 22031=Значение не указано в перечислимом типе {0}: {1} 22032=Пустые перечислимые типы не допускаются @@ -26,23 +27,29 @@ 40001=Обнаружена взаимная блокировка потоков. Текущая транзакция была откачена. Детали: {0} 42000=Синтаксическая ошибка в выражении SQL {0} 42001=Синтаксическая ошибка в выражении SQL {0}; ожидалось {1} +42602=Недопустимое имя {0} +42622=Имя, начинающееся с {0}, слишком длинное. Максимальная длина: {1} 42S01=Таблица {0} уже существует 42S02=Таблица {0} не найдена +42S03=#Table {0} not found (candidates are: {1}) +42S04=#Table {0} not found (this database is empty) 42S11=Индекс {0} уже существует 42S12=Индекс {0} не найден 42S21=Повтор имени столбца {0} 42S22=Столбец {0} не найден 42S31=Должны использоваться идентичные выражения; ожидалось {0}, получено {1} +54011=Слишком много столбцов. Масимальное количество {0} 57014=Запрос был отменен или закончилось время ожидания сессии 90000=Функция {0} должна возвращать набор записей 90001=Метод не разрешен для запросов. Используйте execute или executeQuery вместо executeUpdate 90002=Метод разрешен только для запросов. Используйте execute или executeUpdate вместо executeQuery 90003=Шестнадцатиричная строка содержит нечетное количество символов: {0} 90004=Шестнадцатиричная строка содержит нешестнадцатиричные символы: {0} +90005=Недопустимые флаги триггера: {0} 90006=Последовательность {0} вышла за границы (MINVALUE, MAXVALUE) 90007=Объект уже закрыт 90008=Недопустимое значение {0} для параметра {1} -90009=Невозможно создать или изменить последовательность {0} из-за неправильных атрибутов (START/RESTART {1}, MINVALUE {2}, MAXVALUE {3}, INCREMENT {4}) +90009=Невозможно создать или изменить последовательность {0} из-за неправильных атрибутов (базовое значение {1}, начальное значение {2}, минимальное значение {3}, максимальное значение {4}, приращение {5}, кэш {6}) 90010=Неправильный формат TO_CHAR {0} 90011=Путь неявно является относительным для текущего рабочего каталога и не допустим в URL базы данных {0}. Используйте абсолютный путь, ~/name, ./name, или настройку baseDir. 90012=Параметр {0} не установлен @@ -84,7 +91,6 @@ 90048=Неподдерживаемая версия файлов базы данных или некорректный заголовок в файле {0} 90049=Ошибка шифрования в файле {0} 90050=Некорректный формат пароля, должен быть: пароль файла <пробел> пароль пользователя -90051=Количество цифр после разделителя (scale) (${0}) не должно быть больше общего количества цифр (precision) ({1}) 90052=Подзапрос выбирает более одного столбца 90053=Подзапрос выбирает более одной строки 90054=Некорректное использование агрегирующей функции {0} @@ -141,6 +147,7 @@ 90107=Невозможно удалить {0}, пока существует зависимый объект {1} 90108=Ошибка нехватки памяти 90109=Представление {0} содержит ошибки: {1} +90110=Значения типов данных {0} и {1} не сравнимы друг с другом 90110=Сравнение массива (ARRAY) со скалярным значением 90111=Ошибка при обращении к линкованной таблице SQL запросом {0}, причина: {1} 90112=Запись не найдена при удалении из индекса {0} @@ -178,6 +185,17 @@ 90144=Внешняя аутентификация не включена в базе данных {0} 90145=FOR UPDATE не допускается в запросе с DISTINCT или запросе с группировкой 90146=База данных {0} не найдена и её автоматическое создание запрещено флагом IFEXISTS=true +90147=Нельзя использовать метод {0} при включённом автовыполнении +90148=Текущее значение последовательности {0} ещё не определено в этой сессии +90149=База данных {0} не найдена, создайте её предварительно или разрешите удалённое создание баз данных (не рекомендуется в защищённых системах) +90150=Диапазон или точность ({0}) должны быть в пределах от {1} до {2} включительно +90151=Масштаб или точность долей секунды ({0}) должны быть в пределах {1} до {2} включительно +90152=Ограничение {0} испльзуется ограничением {1} +90153=Столбец {0} ссылается на столбец {1}, не имеющий допустимой операции сравнения +90154=Нельзя присвоить значение генерируемому столбцу {0} +90155=Генерируемый столбец {0} не может обновляться ссылочным ограничением с пунктом {1} +90156=Имя столбца не указано для выражения {0} +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=Внутренняя ошибка: {0} HY004=Неизвестный тип данных: {0} HYC00=Данная функция не поддерживается: {0} diff --git a/h2/src/main/org/h2/res/_messages_sk.prop b/h2/src/main/org/h2/res/_messages_sk.prop index 98cfa648ea..b86a883353 100644 --- a/h2/src/main/org/h2/res/_messages_sk.prop +++ b/h2/src/main/org/h2/res/_messages_sk.prop @@ -11,6 +11,7 @@ 22013=#Invalid PRECEDING or FOLLOWING size in window function: {0} 22018=Chyba konverzie dát pre {0} 22025=Chyba v LIKE ESCAPE: {0} +2202E=#Array element error: {0}, expected {1} 22030=#Value not permitted for column {0}: {1} 22031=#Value not a member of enumerators {0}: {1} 22032=#Empty enums are not allowed @@ -26,23 +27,29 @@ 40001=Mŕtvy bod (deadlock) detegovaný. Aktuálna transakcia bude odvolaná (rolled back). Podrobnosti: {0} 42000=Syntaktická chyba v SQL príkaze {0} 42001=Syntaktická chyba v SQL príkaze {0}; očakávané {1} +42602=#Invalid name {0} +42622=#The name that starts with {0} is too long. The maximum length is {1} 42S01=Tabuľka {0} už existuje 42S02=Tabuľka {0} nenájdená +42S03=#Table {0} not found (candidates are: {1}) +42S04=#Table {0} not found (this database is empty) 42S11=Index {0} už existuje 42S12=Index {0} nenájdený 42S21=Duplicitné meno stĺpca {0} 42S22=Stĺpec {0} nenájdený 42S31=#Identical expressions should be used; expected {0}, found {1} +54011=#Too many columns. The maximum count is {0} 57014=Príkaz bol zrušený alebo vypršal časový limit sedenia 90000=Funkcia {0} musí vracať výsledok (result set) 90001=Metóda nie je povolená pre dopyt (query). Použite execute alebo executeQuery namiesto executeUpdate 90002=Metóda je povolená iba pre dopyt (query). Použite execute alebo executeUpdate namiesto executeQuery 90003=Hexadecimálny reťazec s nepárnym počtom znakov: {0} 90004=Hexadecimálny reťazec obsahuje nepovolené znaky pre šestnáskovú sústavu: {0} +90005=#Invalid trigger flags: {0} 90006=#Sequence {0} has run out of numbers 90007=Objekt už je zatvorený 90008=Nesprávna hodnota {0} parametra {1} -90009=#Unable to create or alter sequence {0} because of invalid attributes (start value {1}, min value {2}, max value {3}, increment {4}) +90009=#Unable to create or alter sequence {0} because of invalid attributes (base value {1}, start value {2}, min value {3}, max value {4}, increment {5}, cache size {6}) 90010=#Invalid TO_CHAR format {0} 90011=#A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead. 90012=Parameter {0} nie je nastavený @@ -84,7 +91,6 @@ 90048=Nepodporovaná verzia databázového súboru alebo chybná hlavička súuboru {0} 90049=Chyba šifrovania súboru {0} 90050=Nesprávny formát hesiel, musí byť: súborové heslo používateľské heslo -90051=#Scale(${0}) must not be bigger than precision({1}) 90052=Vnorený dopyt (subquery) nie je dopyt na jeden stĺpec 90053=Skalárny vnorený dopyt (scalar subquery) obsahuje viac ako jeden riadok 90054=Nesprávne použitie agregačnej funkcie {0} @@ -141,7 +147,7 @@ 90107=Nemôžem zmazať {0} lebo {1} zavisí na {0} 90108=Nedostatok pamäte. 90109=Pohľad (view) {0} je nesprávny: {1} -90110=#Comparing ARRAY to scalar value +90110=#Values of types {0} and {1} are not comparable 90111=Chyba prístupu k linkovanej tabuľke SQL príkazom {0}, dôvod: {1} 90112=Riadok nenájdený pri pokuse o vymazanie cez index {0} 90113=Nepodporované nastavenie spojenia {0} @@ -178,6 +184,17 @@ 90144=#Authenticator not enabled on database {0} 90145=#FOR UPDATE is not allowed in DISTINCT or grouped select 90146=#Database {0} not found, and IFEXISTS=true, so we can't auto-create it +90147=#Method {0} is not allowed when connection is in auto-commit mode +90148=#Current value of sequence {0} is not yet defined in this session +90149=#Database {0} not found, either pre-create it or allow remote database creation (not recommended in secure environments) +90150=#Precision ({0}) must be between {1} and {2} inclusive +90151=#Scale or fractional seconds precision ({0}) must be between {1} and {2} inclusive +90152=#Constraint {0} is used by constraint {1} +90153=#Column {0} references uncomparable column {1} +90154=#Generated column {0} cannot be assigned +90155=#Generated column {0} cannot be updatable by a referential constraint with {1} clause +90156=#Column alias is not specified for expression {0} +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=Všeobecná chyba: {0} HY004=Neznámy dátový typ: {0} HYC00=Vlastnosť nie je podporovaná: {0} diff --git a/h2/src/main/org/h2/res/_messages_zh_cn.prop b/h2/src/main/org/h2/res/_messages_zh_cn.prop index 0dffe3b36d..03d1079e61 100644 --- a/h2/src/main/org/h2/res/_messages_zh_cn.prop +++ b/h2/src/main/org/h2/res/_messages_zh_cn.prop @@ -11,6 +11,7 @@ 22013=#Invalid PRECEDING or FOLLOWING size in window function: {0} 22018=转换数据{0}期间出现转换错误 22025=LIKE ESCAPE(转义符)存在错误: {0} +2202E=#Array element error: {0}, expected {1} 22030=#Value not permitted for column {0}: {1} 22031=#Value not a member of enumerators {0}: {1} 22032=#Empty enums are not allowed @@ -26,23 +27,29 @@ 40001=检测到死锁.当前事务已回滚.详情: {0} 42000=SQL语法错误 {0} 42001=SQL语法错误 {0}; 预期: {1} +42602=#Invalid name {0} +42622=#The name that starts with {0} is too long. The maximum length is {1} 42S01= {0}表已存在 42S02=找不到表 {0} +42S03=#Table {0} not found (candidates are: {1}) +42S04=#Table {0} not found (this database is empty) 42S11=索引 {0} 已存在 42S12=找不到索引 {0} 42S21=重复的字段: {0} 42S22=找不到字段 {0} 42S31=#Identical expressions should be used; expected {0}, found {1} +54011=#Too many columns. The maximum count is {0} 57014=语句已取消执行或会话已过期 90000={0} 函数必须返回一个结果集 90001=不允许在查询内使用的方法,使用execute 或 executeQuery 代替 executeUpdate 90002=只允许在查询内使用的方法. 使用 execute 或 executeUpdate 代替 executeQuery 90003=十六进制字符串包含奇数个数字字符: {0} 90004=十六进制字符串包含非十六进制字符: {0} +90005=#Invalid trigger flags: {0} 90006=#Sequence {0} has run out of numbers 90007=对象已关闭 90008=被发现非法的数值 {0} 在参数 {1} -90009=#Unable to create or alter sequence {0} because of invalid attributes (start value {1}, min value {2}, max value {3}, increment {4}) +90009=#Unable to create or alter sequence {0} because of invalid attributes (base value {1}, start value {2}, min value {3}, max value {4}, increment {5}, cache size {6}) 90010=#Invalid TO_CHAR format {0} 90011=#A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead. 90012=参数 {0} 的值还没有设置 @@ -84,7 +91,6 @@ 90048=不支持的数据库文件版本或无效的文件头 {0} 90049=文件加密错误 {0} 90050=错误的口令格式, 必须为: 文件 口令 <空格> 用户 口令 -90051=#Scale(${0}) must not be bigger than precision({1}) 90052=子查询非单一字段查询 90053=标量子查询(Scalar subquery)包含多于一行结果 90054=非法使用聚合函数 {0} @@ -141,7 +147,7 @@ 90107=不能删除 {0} ,因为 {1} 依赖着它 90108=内存不足. 90109=视图 {0} 无效: {1} -90110=#Comparing ARRAY to scalar value +90110=#Values of types {0} and {1} are not comparable 90111=SQL语句访问表连接错误 {0}, 原因: {1} 90112=尝试从索引中删除 {0}的时候找不到行 90113=不支持的连接设置 {0} @@ -178,6 +184,17 @@ 90144=#Authenticator not enabled on database {0} 90145=#FOR UPDATE is not allowed in DISTINCT or grouped select 90146=#Database {0} not found, and IFEXISTS=true, so we can't auto-create it +90147=#Method {0} is not allowed when connection is in auto-commit mode +90148=#Current value of sequence {0} is not yet defined in this session +90149=#Database {0} not found, either pre-create it or allow remote database creation (not recommended in secure environments) +90150=#Precision ({0}) must be between {1} and {2} inclusive +90151=#Scale or fractional seconds precision ({0}) must be between {1} and {2} inclusive +90152=#Constraint {0} is used by constraint {1} +90153=#Column {0} references uncomparable column {1} +90154=#Generated column {0} cannot be assigned +90155=#Generated column {0} cannot be updatable by a referential constraint with {1} clause +90156=#Column alias is not specified for expression {0} +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=常规错误: {0} HY004=位置数据类型: {0} HYC00=不支持的特性: {0} diff --git a/h2/src/docsrc/help/help.csv b/h2/src/main/org/h2/res/help.csv similarity index 52% rename from h2/src/docsrc/help/help.csv rename to h2/src/main/org/h2/res/help.csv index f85898de64..d783fa770a 100644 --- a/h2/src/docsrc/help/help.csv +++ b/h2/src/main/org/h2/res/help.csv @@ -1,25 +1,23 @@ -# Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, -# and the EPL 1.0 (http://h2database.com/html/license.html). +# Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +# and the EPL 1.0 (https://h2database.com/html/license.html). # Initial Developer: H2 Group "SECTION","TOPIC","SYNTAX","TEXT","EXAMPLE" "Commands (DML)","SELECT"," -SELECT [ TOP term [ PERCENT ] [ WITH TIES ] ] -[ DISTINCT [ ON ( expression [,...] ) ] | ALL ] +SELECT [ DISTINCT @h2@ [ ON ( expression [,...] ) ] | ALL ] selectExpression [,...] [ FROM tableExpression [,...] ] [ WHERE expression ] -[ GROUP BY expression [,...] ] [ HAVING expression ] +[ GROUP BY groupingElement [,...] ] [ HAVING expression ] [ WINDOW { { windowName AS windowSpecification } [,...] } ] -[ QUALIFY expression ] -[ { UNION [ ALL ] | EXCEPT | MINUS | INTERSECT } select ] -[ ORDER BY order [,...] ] -[ LIMIT expression [ OFFSET expression ] [ SAMPLE_SIZE rowCountInt ] ] -[ [ OFFSET expression { ROW | ROWS } ] - [ FETCH { FIRST | NEXT } [ expression [ PERCENT ] ] { ROW | ROWS } - { ONLY | WITH TIES } ] [ SAMPLE_SIZE rowCountInt ] ] -[ FOR UPDATE ] +@h2@ [ QUALIFY expression ] +[ { UNION [ ALL ] | EXCEPT | INTERSECT } query ] +[ ORDER BY selectOrder [,...] ] +[ OFFSET expression { ROW | ROWS } ] +[ FETCH { FIRST | NEXT } [ expression [ PERCENT ] ] { ROW | ROWS } + { ONLY | WITH TIES } ] +@h2@ [ FOR UPDATE ] "," Selects data from a table or multiple tables. @@ -34,7 +32,7 @@ If FROM clause is not specified a single row is constructed. If GROUP BY clause is not specified, but non-window aggregate functions are used or HAVING is specified all rows are grouped together. -4. Aggregate functions are evaluated, SAMPLE_SIZE limits the number of rows read. +4. Aggregate functions are evaluated. 5. HAVING filters rows after grouping and evaluation of aggregate functions. Non-window aggregate functions are allowed in this clause. @@ -50,18 +48,16 @@ ORDER BY clause, if any, is used to determine preserved rows. First row is each DISTINCT ON group is preserved. In absence of ORDER BY preserved rows are not determined, database may choose any row from each DISTINCT ON group. -9. UNION, EXCEPT (MINUS), and INTERSECT combine the result of this query with the results of another query. -Multiple set operators (UNION, INTERSECT, MINUS, EXCEPT) are evaluated from left to right. -For compatibility with other databases and future versions of H2 please use parentheses. +9. UNION, EXCEPT, and INTERSECT combine the result of this query with the results of another query. +INTERSECT has higher precedence than UNION and EXCEPT. +Operators with equal precedence are evaluated from left to right. 10. ORDER BY sorts the result by the given column(s) or expression(s). -11. Number of rows in output can be limited either with standard OFFSET / FETCH, -with non-standard LIMIT / OFFSET, or with non-standard TOP clauses. -Different clauses cannot be used together. +11. Number of rows in output can be limited with OFFSET and FETCH clauses. OFFSET specifies how many rows to skip. Please note that queries with high offset values can be slow. -FETCH FIRST/NEXT, LIMIT or TOP limits the number of rows returned by the query (no limit if null or smaller than zero). +FETCH FIRST/NEXT limits the number of rows returned by the query. If PERCENT is specified number of rows is specified as a percent of the total number of rows and should be an integer value between 0 and 100 inclusive. WITH TIES can be used only together with ORDER BY and means that all additional rows that have the same sorting position @@ -72,16 +68,16 @@ This clause can be used to reuse the same definition in multiple functions. If FOR UPDATE is specified, the tables or rows are locked for writing. This clause is not allowed in DISTINCT queries and in queries with non-window aggregates, GROUP BY, or HAVING clauses. -When using default MVStore engine only the selected rows are locked as in an UPDATE statement. +Only the selected rows are locked as in an UPDATE statement. Rows from the right side of a left join and from the left side of a right join, including nested joins, aren't locked. Locking behavior for rows that were excluded from result using OFFSET / FETCH / LIMIT / TOP or QUALIFY is undefined, to avoid possible locking of excessive rows try to filter out unneeded rows with the WHERE criteria when possible. -Rows are processed one by one. Committed row is read, tested with WHERE criteria, locked, read again and re-tested, +Rows are processed one by one. Each row is read, tested with WHERE criteria, locked, read again and re-tested, because its value may be changed by concurrent transaction before lock acquisition. -Note that new uncommitted rows from other transactions are not visible due to read committed isolation level -and therefore cannot be locked. -With PageStore engine the whole tables are locked; -to avoid deadlocks with this engine always lock the tables in the same order in all transactions. +Note that new uncommitted rows from other transactions are not visible unless read uncommitted isolation level is used +and therefore cannot be selected and locked. +Modified uncommitted rows from other transactions that satisfy the WHERE criteria cause this SELECT to wait for +commit or rollback of those transactions. "," SELECT * FROM TEST; SELECT * FROM TEST ORDER BY NAME; @@ -98,39 +94,43 @@ SELECT DISTINCT ON(C1) C1, C2 FROM TEST ORDER BY C1; " "Commands (DML)","INSERT"," -INSERT INTO tableName insertColumnsAndSource +INSERT INTO [schemaName.]tableName [ ( columnName [,...] ) ] +{ [ overrideClause ] { insertValues | @h2@ [ DIRECT ] query } } + | DEFAULT VALUES "," Inserts a new row / new rows into a table. When using DIRECT, then the results from the query are directly applied in the target table without any intermediate step. - -When using SORTED, b-tree pages are split at the insertion point. This can improve performance and reduce disk usage. "," INSERT INTO TEST VALUES(1, 'Hello') " "Commands (DML)","UPDATE"," -UPDATE tableName [ [ AS ] newTableAlias ] SET setClauseList -[ WHERE expression ] [ ORDER BY order [,...] ] [ LIMIT expression ] +UPDATE [schemaName.]tableName [ [ AS ] newTableAlias ] SET setClauseList +[ WHERE expression ] @c@ [ ORDER BY sortSpecificationList ] +@h2@ FETCH { FIRST | NEXT } [ expression ] { ROW | ROWS } ONLY "," Updates data in a table. ORDER BY is supported for MySQL compatibility, but it is ignored. +If FETCH is specified, at most the specified number of rows are updated (no limit if null or smaller than zero). "," UPDATE TEST SET NAME='Hi' WHERE ID=1; UPDATE PERSON P SET NAME=(SELECT A.NAME FROM ADDRESS A WHERE A.ID=P.ID); " "Commands (DML)","DELETE"," -DELETE [ TOP term ] FROM tableName [ WHERE expression ] [ LIMIT term ] +DELETE FROM [schemaName.]tableName +[ WHERE expression ] +@h2@ FETCH { FIRST | NEXT } [ expression ] { ROW | ROWS } ONLY "," Deletes rows form a table. -If TOP or LIMIT is specified, at most the specified number of rows are deleted (no limit if null or smaller than zero). +If FETCH is specified, at most the specified number of rows are deleted (no limit if null or smaller than zero). "," DELETE FROM TEST WHERE ID=2 " "Commands (DML)","BACKUP"," -BACKUP TO fileNameString +@h2@ BACKUP TO fileNameString "," Backs up the database files to a .zip file. Objects are not locked, but the backup is transactionally consistent because the transaction log is also copied. @@ -149,9 +149,21 @@ If the called function returns an array, then each element in this array is retu CALL 15*25 " +"Commands (DML)","EXECUTE IMMEDIATE"," +EXECUTE IMMEDIATE sqlString +"," +Dynamically prepares and executes the SQL command specified as a string. Query commands may not be used. +"," +EXECUTE IMMEDIATE 'ALTER TABLE TEST DROP CONSTRAINT ' || + QUOTE_IDENT((SELECT CONSTRAINT_NAME + FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS + WHERE TABLE_SCHEMA = 'PUBLIC' AND TABLE_NAME = 'TEST' + AND CONSTRAINT_TYPE = 'UNIQUE')); +" + "Commands (DML)","EXPLAIN"," -EXPLAIN { [ PLAN FOR ] | ANALYZE } -{ select | insert | update | delete | merge } +@h2@ EXPLAIN { [ PLAN FOR ] | ANALYZE } +@h2@ { query | insert | update | delete | mergeInto | mergeUsing } "," Shows the execution plan for a statement. When using EXPLAIN ANALYZE, the statement is actually executed, and the query plan @@ -160,10 +172,10 @@ will include the actual row scan count for each table. EXPLAIN SELECT * FROM TEST WHERE ID=1 " -"Commands (DML)","MERGE"," -MERGE INTO tableName [ ( columnName [,...] ) ] -[ KEY ( columnName [,...] ) ] -{ insertValues | select } +"Commands (DML)","MERGE INTO"," +@h2@ MERGE INTO [schemaName.]tableName [ ( columnName [,...] ) ] +@h2@ [ KEY ( columnName [,...] ) ] +@h2@ { insertValues | query } "," Updates existing rows, and insert rows that don't exist. If no key column is specified, the primary key columns are used to find the row. If more than one @@ -173,16 +185,18 @@ MERGE INTO TEST KEY(ID) VALUES(2, 'World') " "Commands (DML)","MERGE USING"," -MERGE INTO targetTableName [ [AS] targetAlias] -USING { ( select ) | sourceTableName }[ [AS] sourceAlias ] +MERGE INTO [schemaName.]targetTableName [ [AS] targetAlias] +USING tableExpression ON expression mergeWhenClause [,...] "," Updates or deletes existing rows, and insert rows that don't exist. The ON clause specifies the matching column expression. -Different rows from a source table may not match with the same target row, -but one source row may be matched with multiple target rows. + +Different rows from a source table may not match with the same target row +(this is not ensured by H2 if target table is an updatable view). +One source row may be matched with multiple target rows. If statement doesn't need a source table a DUAL table can be substituted. "," @@ -193,7 +207,7 @@ MERGE INTO TARGET_TABLE AS T USING SOURCE_TABLE AS S WHEN MATCHED AND T.COL2 = 'FINAL' THEN DELETE WHEN NOT MATCHED THEN - INSERT (ID, COL1, COL2) VALUES(S.ID, S.COL1, S.COL2) + INSERT (ID, COL1, COL2) VALUES(S.ID, S.COL1, S.COL2); MERGE INTO TARGET_TABLE AS T USING (SELECT * FROM SOURCE_TABLE) AS S ON T.ID = S.ID WHEN MATCHED AND T.COL2 <> 'FINAL' THEN @@ -201,42 +215,58 @@ MERGE INTO TARGET_TABLE AS T USING (SELECT * FROM SOURCE_TABLE) AS S WHEN MATCHED AND T.COL2 = 'FINAL' THEN DELETE WHEN NOT MATCHED THEN - INSERT VALUES (S.ID, S.COL1, S.COL2) + INSERT VALUES (S.ID, S.COL1, S.COL2); +MERGE INTO TARGET T USING (VALUES (1, 4), (2, 15)) S(ID, V) + ON T.ID = S.ID + WHEN MATCHED THEN UPDATE SET V = S.V + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.V); MERGE INTO TARGET_TABLE USING DUAL ON ID = 1 WHEN NOT MATCHED THEN INSERT VALUES (1, 'Test') - WHEN MATCHED THEN UPDATE SET NAME = 'Test' + WHEN MATCHED THEN UPDATE SET NAME = 'Test'; " "Commands (DML)","RUNSCRIPT"," -RUNSCRIPT FROM fileNameString scriptCompressionEncryption -[ CHARSET charsetString ] +@h2@ RUNSCRIPT FROM fileNameString scriptCompressionEncryption +@h2@ [ CHARSET charsetString ] +@h2@ { [ QUIRKS_MODE ] [ VARIABLE_BINARY ] | FROM_1X } "," Runs a SQL script from a file. The script is a text file containing SQL statements; each statement must end with ';'. This command can be used to restore a database from a backup. The password must be in single quotes; it is case sensitive and can contain spaces. -Instead of a file name, an URL may be used. +Instead of a file name, a URL may be used. To read a stream from the classpath, use the prefix 'classpath:'. -See the Pluggable File System section on the Advanced page. +See the [Pluggable File System](https://h2database.com/html/advanced.html#file_system) section. The compression algorithm must match the one used when creating the script. -Instead of a file, an URL may be used. +Instead of a file, a URL may be used. + +If ""QUIRKS_MODE"" is specified, the various compatibility quirks for scripts from older versions of H2 are enabled. +Use this clause when you import script that was generated by H2 1.4.200 or an older version into more recent version. + +If ""VARIABLE_BINARY"" is specified, the ""BINARY"" data type will be parsed as ""VARBINARY"". +Use this clause when you import script that was generated by H2 1.4.200 or an older version into more recent version. + +If ""FROM_1X"" is specified, quirks for scripts exported from H2 1.*.* are enabled. +Use this flag to populate a new database with the data exported from 1.*.* versions of H2. +This flag also enables ""QUIRKS_MODE"" and ""VARIABLE_BINARY"" implicitly. Admin rights are required to execute this command. "," RUNSCRIPT FROM 'backup.sql' RUNSCRIPT FROM 'classpath:/com/acme/test.sql' +RUNSCRIPT FROM 'dump_from_1_4_200.sql' FROM_1X " "Commands (DML)","SCRIPT"," -SCRIPT { [ NODATA ] | [ SIMPLE ] [ COLUMNS ] } -[ NOPASSWORDS ] [ NOSETTINGS ] -[ DROP ] [ BLOCKSIZE blockSizeInt ] -[ TO fileNameString scriptCompressionEncryption +@h2@ SCRIPT { [ NODATA ] | [ SIMPLE ] [ COLUMNS ] } +@h2@ [ NOPASSWORDS ] @h2@ [ NOSETTINGS ] +@h2@ [ DROP ] @h2@ [ BLOCKSIZE blockSizeInt ] +@h2@ [ TO fileNameString scriptCompressionEncryption [ CHARSET charsetString ] ] -[ TABLE tableName [, ...] ] -[ SCHEMA schemaName [, ...] ] +@h2@ [ TABLE [schemaName.]tableName [, ...] ] +@h2@ [ SCHEMA schemaName [, ...] ] "," Creates a SQL script from the database. @@ -271,7 +301,7 @@ SCRIPT NODATA " "Commands (DML)","SHOW"," -SHOW { SCHEMAS | TABLES [ FROM schemaName ] | +@c@ SHOW { SCHEMAS | TABLES [ FROM schemaName ] | COLUMNS FROM tableName [ FROM schemaName ] } "," Lists the schemas, tables, or the columns of a table. @@ -281,7 +311,7 @@ SHOW TABLES "Commands (DML)","Explicit table"," TABLE [schemaName.]tableName -[ ORDER BY order [,...] ] +[ ORDER BY selectOrder [,...] ] [ OFFSET expression { ROW | ROWS } ] [ FETCH { FIRST | NEXT } [ expression [ PERCENT ] ] { ROW | ROWS } { ONLY | WITH TIES } ] @@ -289,7 +319,7 @@ TABLE [schemaName.]tableName Selects data from a table. This command is an equivalent to SELECT * FROM tableName. -See SELECT command for description of ORDER BY, OFFSET, and FETCH. +See [SELECT](https://h2database.com/html/commands.html#select) command for description of ORDER BY, OFFSET, and FETCH. "," TABLE TEST; TABLE TEST ORDER BY ID FETCH FIRST ROW ONLY; @@ -297,21 +327,21 @@ TABLE TEST ORDER BY ID FETCH FIRST ROW ONLY; "Commands (DML)","Table value"," VALUES rowValueExpression [,...] -[ ORDER BY order [,...] ] +[ ORDER BY selectOrder [,...] ] [ OFFSET expression { ROW | ROWS } ] [ FETCH { FIRST | NEXT } [ expression [ PERCENT ] ] { ROW | ROWS } { ONLY | WITH TIES } ] "," A list of rows that can be used like a table. -See SELECT command for description of ORDER BY, OFFSET, and FETCH. +See See [SELECT](https://h2database.com/html/commands.html#select) command for description of ORDER BY, OFFSET, and FETCH. The column list of the resulting table is C1, C2, and so on. "," VALUES (1, 'Hello'), (2, 'World'); " "Commands (DML)","WITH"," -WITH [ RECURSIVE ] { name [( columnName [,...] )] AS ( select ) [,...] } -{ select | insert | update | merge | delete | createTable } +WITH [ RECURSIVE ] { name [( columnName [,...] )] AS ( query ) [,...] } +{ query | @h2@ { insert | update | delete | mergeInto | mergeUsing | createTable } } "," Can be used to create a recursive or non-recursive query (common table expression). For recursive queries the first select has to be a UNION. @@ -337,8 +367,81 @@ WITH cte1 AS ( SELECT sum(FIRST_COLUMN) FROM cte2; " +"Commands (DDL)","ALTER DOMAIN"," +ALTER DOMAIN @h2@ [ IF EXISTS ] [schemaName.]domainName +{ SET DEFAULT expression } + | { DROP DEFAULT } + | @h2@ { SET ON UPDATE expression } + | @h2@ { DROP ON UPDATE } +"," +Changes the default or on update expression of a domain. +Schema owner rights are required to execute this command. + +SET DEFAULT changes the default expression of a domain. + +DROP DEFAULT removes the default expression of a domain. +Old expression is copied into domains and columns that use this domain and don't have an own default expression. + +SET ON UPDATE changes the expression that is set on update if value for this domain is not specified in update +statement. + +DROP ON UPDATE removes the expression that is set on update of a column with this domain. +Old expression is copied into domains and columns that use this domain and don't have an own on update expression. + +This command commits an open transaction in this connection. +"," +ALTER DOMAIN D1 SET DEFAULT ''; +ALTER DOMAIN D1 DROP DEFAULT; +ALTER DOMAIN D1 SET ON UPDATE CURRENT_TIMESTAMP; +ALTER DOMAIN D1 DROP ON UPDATE; +" + +"Commands (DDL)","ALTER DOMAIN ADD CONSTRAINT"," +ALTER DOMAIN @h2@ [ IF EXISTS ] [schemaName.]domainName +ADD [ constraintNameDefinition ] +CHECK (condition) @h2@ [ CHECK | NOCHECK ] +"," +Adds a constraint to a domain. +Schema owner rights are required to execute this command. +This command commits an open transaction in this connection. +"," +ALTER DOMAIN D ADD CONSTRAINT D_POSITIVE CHECK (VALUE > 0) +" + +"Commands (DDL)","ALTER DOMAIN DROP CONSTRAINT"," +ALTER DOMAIN @h2@ [ IF EXISTS ] [schemaName.]domainName +DROP CONSTRAINT @h2@ [ IF EXISTS ] [schemaName.]constraintName +"," +Removes a constraint from a domain. +Schema owner rights are required to execute this command. +This command commits an open transaction in this connection. +"," +ALTER DOMAIN D DROP CONSTRAINT D_POSITIVE +" + +"Commands (DDL)","ALTER DOMAIN RENAME"," +@h2@ ALTER DOMAIN [ IF EXISTS ] [schemaName.]domainName RENAME TO newName +"," +Renames a domain. +Schema owner rights are required to execute this command. +This command commits an open transaction in this connection. +"," +ALTER DOMAIN TEST RENAME TO MY_TYPE +" + +"Commands (DDL)","ALTER DOMAIN RENAME CONSTRAINT"," +@h2@ ALTER DOMAIN [ IF EXISTS ] [schemaName.]domainName +@h2@ RENAME CONSTRAINT [schemaName.]oldConstraintName +@h2@ TO newConstraintName +"," +Renames a constraint. +This command commits an open transaction in this connection. +"," +ALTER DOMAIN D RENAME CONSTRAINT FOO TO BAR +" + "Commands (DDL)","ALTER INDEX RENAME"," -ALTER INDEX [ IF EXISTS ] indexName RENAME TO newIndexName +@h2@ ALTER INDEX [ IF EXISTS ] [schemaName.]indexName RENAME TO newIndexName "," Renames an index. This command commits an open transaction in this connection. @@ -347,24 +450,20 @@ ALTER INDEX IDXNAME RENAME TO IDX_TEST_NAME " "Commands (DDL)","ALTER SCHEMA RENAME"," -ALTER SCHEMA [ IF EXISTS ] schema RENAME TO newSchemaName +@h2@ ALTER SCHEMA [ IF EXISTS ] schemaName RENAME TO newSchemaName "," Renames a schema. +Schema admin rights are required to execute this command. This command commits an open transaction in this connection. "," ALTER SCHEMA TEST RENAME TO PRODUCTION " "Commands (DDL)","ALTER SEQUENCE"," -ALTER SEQUENCE [ IF EXISTS ] sequenceName -[ RESTART WITH long ] -[ INCREMENT BY long ] -[ MINVALUE long | NOMINVALUE | NO MINVALUE ] -[ MAXVALUE long | NOMAXVALUE | NO MAXVALUE ] -[ CYCLE long | NOCYCLE | NO CYCLE ] -[ CACHE long | NOCACHE | NO CACHE ] +ALTER SEQUENCE @h2@ [ IF EXISTS ] [schemaName.]sequenceName alterSequenceOption [...] "," Changes the parameters of a sequence. +Schema owner rights are required to execute this command. This command does not commit the current transaction; however the new value is used by other transactions immediately, and rolling back this command has no effect. "," @@ -372,19 +471,24 @@ ALTER SEQUENCE SEQ_ID RESTART WITH 1000 " "Commands (DDL)","ALTER TABLE ADD"," -ALTER TABLE [ IF EXISTS ] tableName ADD [ COLUMN ] -{ [ IF NOT EXISTS ] columnName columnDefinition - | ( { columnName columnDefinition | constraint } [,...] ) } -[ { { BEFORE | AFTER } columnName } | FIRST ] +ALTER TABLE @h2@ [ IF EXISTS ] [schemaName.]tableName ADD [ COLUMN ] +{ @h2@ [ IF NOT EXISTS ] columnName columnDefinition @h2@ [ USING initialValueExpression ] + | @h2@ { ( { columnName columnDefinition | tableConstraintDefinition } [,...] ) } } +@h2@ [ { { BEFORE | AFTER } columnName } | FIRST ] "," Adds a new column to a table. This command commits an open transaction in this connection. + +If USING is specified the provided expression is used to generate initial value of the new column for each row. +The expression may reference existing columns of the table. +Otherwise the DEFAULT expression is used, if any. +If neither USING nor DEFAULT are specified, the NULL is used. "," ALTER TABLE TEST ADD CREATEDATE TIMESTAMP " "Commands (DDL)","ALTER TABLE ADD CONSTRAINT"," -ALTER TABLE [ IF EXISTS ] tableName ADD constraint [ CHECK | NOCHECK ] +ALTER TABLE @h2@ [ IF EXISTS ] tableName ADD tableConstraintDefinition @h2@ [ CHECK | NOCHECK ] "," Adds a constraint to a table. If NOCHECK is specified, existing rows are not checked for consistency (the default is to check consistency for existing rows). @@ -396,8 +500,9 @@ ALTER TABLE TEST ADD CONSTRAINT NAME_UNIQUE UNIQUE(NAME) " "Commands (DDL)","ALTER TABLE RENAME CONSTRAINT"," -ALTER TABLE [ IF EXISTS ] tableName RENAME oldConstraintName -TO newConstraintName +@h2@ ALTER TABLE [ IF EXISTS ] [schemaName.]tableName +@h2@ RENAME CONSTRAINT [schemaName.]oldConstraintName +@h2@ TO newConstraintName "," Renames a constraint. This command commits an open transaction in this connection. @@ -406,28 +511,35 @@ ALTER TABLE TEST RENAME CONSTRAINT FOO TO BAR " "Commands (DDL)","ALTER TABLE ALTER COLUMN"," -ALTER TABLE [ IF EXISTS ] tableName ALTER COLUMN columnName -{ { columnDefinition } - | { RENAME TO name } - | { RESTART WITH long } - | { SELECTIVITY int } +ALTER TABLE @h2@ [ IF EXISTS ] [schemaName.]tableName +ALTER COLUMN @h2@ [ IF EXISTS ] columnName +{ @h2@ { columnDefinition } + | @h2@ { RENAME TO name } + | SET GENERATED { ALWAYS | BY DEFAULT } [ alterIdentityColumnOption [...] ] + | alterIdentityColumnOption [...] + | DROP IDENTITY + | @h2@ { SELECTIVITY int } | { SET DEFAULT expression } | { DROP DEFAULT } - | { SET ON UPDATE expression } - | { DROP ON UPDATE } + | DROP EXPRESSION + | @h2@ { SET ON UPDATE expression } + | @h2@ { DROP ON UPDATE } + | @h2@ { SET DEFAULT ON NULL } + | @h2@ { DROP DEFAULT ON NULL } | { SET NOT NULL } - | { DROP NOT NULL } | { SET NULL } - | { SET DATA TYPE dataType } - | { SET { VISIBLE | INVISIBLE } } } + | { DROP NOT NULL } | @c@ { SET NULL } + | { SET DATA TYPE dataTypeOrDomain @h2@ [ USING newValueExpression ] } + | @h2@ { SET { VISIBLE | INVISIBLE } } } "," Changes the data type of a column, rename a column, change the identity value, or change the selectivity. Changing the data type fails if the data can not be converted. -RESTART changes the next value of an auto increment column. -The column must already be an auto increment column. -For RESTART, the same transactional rules as for ALTER SEQUENCE apply. +SET GENERATED ALWAYS, SET GENERATED BY DEFAULT, or identity options convert the column into identity column +(if it wasn't an identity column) and set new values of specified options for its sequence. + +DROP IDENTITY removes identity status of a column. SELECTIVITY sets the selectivity (1-100) for a column. Setting the selectivity to 0 means the default value. @@ -435,18 +547,29 @@ Selectivity is used by the cost based optimizer to calculate the estimated cost Selectivity 100 means values are unique, 10 means every distinct value appears 10 times on average. SET DEFAULT changes the default value of a column. +This command doesn't affect generated and identity columns. DROP DEFAULT removes the default value of a column. +DROP EXPRESSION converts generated column into base column. + SET ON UPDATE changes the value that is set on update if value for this column is not specified in update statement. +This command doesn't affect generated and identity columns. DROP ON UPDATE removes the value that is set on update of a column. -SET NOT NULL sets a column to not allow NULL. Rows may not contains NULL in this column. +SET DEFAULT ON NULL makes NULL value work as DEFAULT value is assignments to this column. + +DROP DEFAULT ON NULL makes NULL value work as NULL value in assignments to this column. -DROP NOT NULL and SET NULL set a column to allow NULL. The row may not be part of a primary key. +SET NOT NULL sets a column to not allow NULL. Rows may not contain NULL in this column. -SET DATA TYPE changes the data type of a column. +DROP NOT NULL and SET NULL set a column to allow NULL. +The column may not be part of a primary key and may not be an identity column. + +SET DATA TYPE changes the data type of a column, for each row old value is converted to this data type +unless USING is specified with a custom expression. +USING expression may reference previous value of the modified column by its name and values of other columns. SET INVISIBLE makes the column hidden, i.e. it will not appear in SELECT * results. SET VISIBLE has the reverse effect. @@ -465,30 +588,33 @@ ALTER TABLE TEST ALTER COLUMN NAME SET INVISIBLE; " "Commands (DDL)","ALTER TABLE DROP COLUMN"," -ALTER TABLE [ IF EXISTS ] tableName DROP COLUMN [ IF EXISTS ] -columnName [,...] | ( columnName [,...] ) +ALTER TABLE @h2@ [ IF EXISTS ] [schemaName.]tableName +DROP [ COLUMN ] @h2@ [ IF EXISTS ] +@h2@ { ( columnName [,...] ) } | columnName @c@ [,...] "," Removes column(s) from a table. This command commits an open transaction in this connection. "," ALTER TABLE TEST DROP COLUMN NAME -ALTER TABLE TEST DROP COLUMN NAME1, NAME2 ALTER TABLE TEST DROP COLUMN (NAME1, NAME2) " "Commands (DDL)","ALTER TABLE DROP CONSTRAINT"," -ALTER TABLE [ IF EXISTS ] tableName DROP -{ CONSTRAINT [ IF EXISTS ] constraintName | PRIMARY KEY } +ALTER TABLE @h2@ [ IF EXISTS ] [schemaName.]tableName DROP +CONSTRAINT @h2@ [ IF EXISTS ] [schemaName.]constraintName [ RESTRICT | CASCADE ] | @c@ { PRIMARY KEY } "," Removes a constraint or a primary key from a table. +If CASCADE is specified, unique or primary key constraint is dropped together with all +referential constraints that reference the specified constraint. This command commits an open transaction in this connection. "," -ALTER TABLE TEST DROP CONSTRAINT UNIQUE_NAME +ALTER TABLE TEST DROP CONSTRAINT UNIQUE_NAME RESTRICT " "Commands (DDL)","ALTER TABLE SET"," -ALTER TABLE [ IF EXISTS ] tableName SET REFERENTIAL_INTEGRITY -{ FALSE | TRUE } [ CHECK | NOCHECK ] +@h2@ ALTER TABLE [ IF EXISTS ] [schemaName.]tableName +SET REFERENTIAL_INTEGRITY +@h2@ { FALSE | TRUE } @h2@ [ CHECK | NOCHECK ] "," Disables or enables referential integrity checking for a table. This command can be used inside a transaction. Enabling referential integrity does not check @@ -502,7 +628,7 @@ ALTER TABLE TEST SET REFERENTIAL_INTEGRITY FALSE " "Commands (DDL)","ALTER TABLE RENAME"," -ALTER TABLE [ IF EXISTS ] tableName RENAME TO newName +@h2@ ALTER TABLE [ IF EXISTS ] [schemaName.]tableName RENAME TO newName "," Renames a table. This command commits an open transaction in this connection. @@ -511,7 +637,7 @@ ALTER TABLE TEST RENAME TO MY_DATA " "Commands (DDL)","ALTER USER ADMIN"," -ALTER USER userName ADMIN { TRUE | FALSE } +@h2@ ALTER USER userName ADMIN { TRUE | FALSE } "," Switches the admin flag of a user on or off. @@ -523,7 +649,7 @@ ALTER USER TOM ADMIN TRUE " "Commands (DDL)","ALTER USER RENAME"," -ALTER USER userName RENAME TO newUserName +@h2@ ALTER USER userName RENAME TO newUserName "," Renames a user. After renaming a user, the password becomes invalid and needs to be changed as well. @@ -536,7 +662,7 @@ ALTER USER TOM RENAME TO THOMAS " "Commands (DDL)","ALTER USER SET PASSWORD"," -ALTER USER userName SET { PASSWORD string | SALT bytes HASH bytes } +@h2@ ALTER USER userName SET { PASSWORD string | SALT bytes HASH bytes } "," Changes the password of a user. Only unquoted or uppercase user names are allowed. @@ -550,9 +676,10 @@ ALTER USER SA SET PASSWORD 'rioyxlgt' " "Commands (DDL)","ALTER VIEW RECOMPILE"," -ALTER VIEW [ IF EXISTS ] viewName RECOMPILE +@h2@ ALTER VIEW [ IF EXISTS ] [schemaName.]viewName RECOMPILE "," Recompiles a view after the underlying tables have been changed or created. +Schema owner rights are required to execute this command. This command is used for views created using CREATE FORCE VIEW. This command commits an open transaction in this connection. "," @@ -560,16 +687,17 @@ ALTER VIEW ADDRESS_VIEW RECOMPILE " "Commands (DDL)","ALTER VIEW RENAME"," -ALTER VIEW [ IF EXISTS ] viewName RENAME TO newName +@h2@ ALTER VIEW [ IF EXISTS ] [schemaName.]viewName RENAME TO newName "," Renames a view. +Schema owner rights are required to execute this command. This command commits an open transaction in this connection. "," ALTER VIEW TEST RENAME TO MY_VIEW " "Commands (DDL)","ANALYZE"," -ANALYZE [ TABLE tableName ] [ SAMPLE_SIZE rowCountInt ] +@h2@ ANALYZE [ TABLE [schemaName.]tableName ] [ SAMPLE_SIZE rowCountInt ] "," Updates the selectivity statistics of tables. If no table name is given, all tables are analyzed. @@ -585,23 +713,24 @@ This command commits an open transaction in this connection. ANALYZE SAMPLE_SIZE 1000 " -"Commands (DDL)","COMMENT"," -COMMENT ON -{ { COLUMN [ schemaName. ] tableName.columnName } +"Commands (DDL)","COMMENT ON"," +@h2@ COMMENT ON +@h2@ { { COLUMN [schemaName.]tableName.columnName } | { { TABLE | VIEW | CONSTANT | CONSTRAINT | ALIAS | INDEX | ROLE - | SCHEMA | SEQUENCE | TRIGGER | USER | DOMAIN } [ schemaName. ] objectName } } -IS expression + | SCHEMA | SEQUENCE | TRIGGER | USER | DOMAIN } [schemaName.]objectName } } +@h2@ IS expression "," -Sets the comment of a database object. Use NULL to remove the comment. +Sets the comment of a database object. Use NULL or empty string to remove the comment. -Admin rights are required to execute this command. +Admin rights are required to execute this command if object is a USER or ROLE. +Schema owner rights are required to execute this command for all other types of objects. This command commits an open transaction in this connection. "," COMMENT ON TABLE TEST IS 'Table used for testing' " "Commands (DDL)","CREATE AGGREGATE"," -CREATE AGGREGATE [ IF NOT EXISTS ] newAggregateName FOR className +@h2@ CREATE AGGREGATE [ IF NOT EXISTS ] [schemaName.]aggregateName FOR classNameString "," Creates a new user-defined aggregate function. The method name must be the full qualified class name. The class must implement the interface @@ -610,18 +739,17 @@ qualified class name. The class must implement the interface Admin rights are required to execute this command. This command commits an open transaction in this connection. "," -CREATE AGGREGATE SIMPLE_MEDIAN FOR ""com.acme.db.Median"" +CREATE AGGREGATE SIMPLE_MEDIAN FOR 'com.acme.db.Median' " "Commands (DDL)","CREATE ALIAS"," -CREATE ALIAS [ IF NOT EXISTS ] newFunctionAliasName [ DETERMINISTIC ] -[ NOBUFFER ] { FOR classAndMethodName | AS sourceCodeString } +@h2@ CREATE ALIAS [ IF NOT EXISTS ] [schemaName.]functionAliasName +@h2@ [ DETERMINISTIC ] +@h2@ { FOR classAndMethodString | AS sourceCodeString } "," Creates a new function alias. If this is a ResultSet returning function, by default the return value is cached in a local temporary file. -NOBUFFER - disables caching of ResultSet return value to temporary file. - DETERMINISTIC - Deterministic functions must always return the same value for the same parameters. The method name must be the full qualified class and method name, @@ -651,38 +779,44 @@ This command commits an open transaction in this connection. If you have the Groovy jar in your classpath, it is also possible to write methods using Groovy. "," -CREATE ALIAS MY_SQRT FOR ""java.lang.Math.sqrt""; -CREATE ALIAS GET_SYSTEM_PROPERTY FOR ""java.lang.System.getProperty""; +CREATE ALIAS MY_SQRT FOR 'java.lang.Math.sqrt'; +CREATE ALIAS MY_ROUND FOR 'java.lang.Math.round(double)'; +CREATE ALIAS GET_SYSTEM_PROPERTY FOR 'java.lang.System.getProperty'; CALL GET_SYSTEM_PROPERTY('java.class.path'); CALL GET_SYSTEM_PROPERTY('com.acme.test', 'true'); -CREATE ALIAS REVERSE AS $$ String reverse(String s) { return new StringBuilder(s).reverse().toString(); } $$; +CREATE ALIAS REVERSE AS 'String reverse(String s) { return new StringBuilder(s).reverse().toString(); }'; CALL REVERSE('Test'); -CREATE ALIAS tr AS $$@groovy.transform.CompileStatic +CREATE ALIAS tr AS '@groovy.transform.CompileStatic static String tr(String str, String sourceSet, String replacementSet){ return str.tr(sourceSet, replacementSet); } -$$ +' " "Commands (DDL)","CREATE CONSTANT"," -CREATE CONSTANT [ IF NOT EXISTS ] newConstantName VALUE expression +@h2@ CREATE CONSTANT [ IF NOT EXISTS ] [schemaName.]constantName +VALUE expression "," Creates a new constant. +Schema owner rights are required to execute this command. This command commits an open transaction in this connection. "," CREATE CONSTANT ONE VALUE 1 " "Commands (DDL)","CREATE DOMAIN"," -CREATE DOMAIN [ IF NOT EXISTS ] newDomainName AS dataType -[ DEFAULT expression ] [ [ NOT ] NULL ] [ SELECTIVITY selectivity ] -[ CHECK condition ] -"," -Creates a new data type (domain). The check condition must evaluate to true or -to NULL (to prevent NULL, use ""NOT NULL""). In the condition, the term VALUE refers -to the value being tested. - -Domains are usable within the whole database. They can not be created in a specific schema. +CREATE DOMAIN @h2@ [ IF NOT EXISTS ] [schemaName.]domainName +[ AS ] dataTypeOrDomain +[ DEFAULT expression ] +@h2@ [ ON UPDATE expression ] +@h2@ [ COMMENT expression ] +[ CHECK (condition) ] [...] +"," +Creates a new domain to define a set of permissible values. +Schema owner rights are required to execute this command. +Domains can be used as data types. +The domain constraints must evaluate to TRUE or to UNKNOWN. +In the conditions, the term VALUE refers to the value being tested. This command commits an open transaction in this connection. "," @@ -690,30 +824,30 @@ CREATE DOMAIN EMAIL AS VARCHAR(255) CHECK (POSITION('@', VALUE) > 1) " "Commands (DDL)","CREATE INDEX"," -CREATE -{ [ UNIQUE ] [ HASH | SPATIAL] INDEX [ [ IF NOT EXISTS ] newIndexName ] - | PRIMARY KEY [ HASH ] } -ON tableName ( indexColumn [,...] ) +@h2@ CREATE [ UNIQUE | SPATIAL ] INDEX +@h2@ [ [ IF NOT EXISTS ] [schemaName.]indexName ] +@h2@ ON [schemaName.]tableName ( indexColumn [,...] ) +@h2@ [ INCLUDE ( indexColumn [,...] ) ] "," Creates a new index. This command commits an open transaction in this connection. -Hash indexes are meant for in-memory databases and memory tables (CREATE MEMORY TABLE) when PageStore engine is used. -For other tables, or if the index contains multiple columns, the HASH keyword is ignored. -Hash indexes can only test for equality, do not support range queries (similar to a hash table), use more memory, -but can perform lookups faster. -Non-unique keys are supported. +INCLUDE clause may only be specified for UNIQUE indexes. +With this clause additional columns are included into index, but aren't used in unique checks. -Spatial indexes are supported only on Geometry columns. +Spatial indexes are supported only on GEOMETRY columns. +They may contain only one column and are used by the +[spatial overlapping operator](https://h2database.com/html/grammar.html#compare). "," CREATE INDEX IDXNAME ON TEST(NAME) " "Commands (DDL)","CREATE LINKED TABLE"," -CREATE [ FORCE ] [ [ GLOBAL | LOCAL ] TEMPORARY ] -LINKED TABLE [ IF NOT EXISTS ] -name ( driverString, urlString, userString, passwordString, -[ originalSchemaString, ] originalTableString ) [ EMIT UPDATES | READONLY ] +@h2@ CREATE [ FORCE ] [ [ GLOBAL | LOCAL ] TEMPORARY ] +@h2@ LINKED TABLE [ IF NOT EXISTS ] +@h2@ [schemaName.]tableName ( driverString, urlString, userString, passwordString, +@h2@ [ originalSchemaString, ] @h2@ originalTableString ) +@h2@ [ EMIT UPDATES | READONLY ] [ FETCH_SIZE sizeInt] [AUTOCOMMIT ON|OFF] "," Creates a table link to an external table. The driver name may be empty if the driver is already loaded. If the schema name is not set, only one table with @@ -728,6 +862,13 @@ work. Linked tables to the same database share one connection. READONLY - is set, the remote table may not be updated. This is enforced by H2. +FETCH_SIZE - the number of rows fetched, a hint with non-negative number of rows to fetch from the external table +at once, may be ignored by the driver of external database. 0 is default and means no hint. +The value is passed to ""java.sql.Statement.setFetchSize()"" method. + +AUTOCOMMIT - is set to ON, the auto-commit mode is enable. OFF is disable. +The value is passed to ""java.sql.Connection.setAutoCommit()"" method. + If the connection to the source database is lost, the connection is re-opened (this is a workaround for MySQL that disconnects after 8 hours of inactivity by default). @@ -741,9 +882,9 @@ be the resource name (for example ""java:comp/env/jdbc/Test""). Admin rights are required to execute this command. This command commits an open transaction in this connection. "," -CREATE LINKED TABLE LINK('org.h2.Driver', 'jdbc:h2:test2', +CREATE LINKED TABLE LINK('org.h2.Driver', 'jdbc:h2:./test2', 'sa', 'sa', 'TEST'); -CREATE LINKED TABLE LINK('', 'jdbc:h2:test2', 'sa', 'sa', +CREATE LINKED TABLE LINK('', 'jdbc:h2:./test2', 'sa', 'sa', '(SELECT * FROM TEST WHERE ID>0)'); CREATE LINKED TABLE LINK('javax.naming.InitialContext', 'java:comp/env/jdbc/Test', NULL, NULL, @@ -751,7 +892,7 @@ CREATE LINKED TABLE LINK('javax.naming.InitialContext', " "Commands (DDL)","CREATE ROLE"," -CREATE ROLE [ IF NOT EXISTS ] newRoleName +CREATE ROLE @h2@ [ IF NOT EXISTS ] newRoleName "," Creates a new role. This command commits an open transaction in this connection. @@ -760,13 +901,21 @@ CREATE ROLE READONLY " "Commands (DDL)","CREATE SCHEMA"," -CREATE SCHEMA [ IF NOT EXISTS ] name -[ AUTHORIZATION ownerUserName ] -[ WITH tableEngineParamName [,...] ] +CREATE SCHEMA @h2@ [ IF NOT EXISTS ] +{ name [ AUTHORIZATION ownerName ] | [ AUTHORIZATION ownerName ] } +@h2@ [ WITH tableEngineParamName [,...] ] "," -Creates a new schema. If no owner is specified, the current user is used. The -user that executes the command must have admin rights, as well as the owner. -Specifying the owner currently has no effect. +Creates a new schema. +Schema admin rights are required to execute this command. + +If schema name is not specified, the owner name is used as a schema name. +If schema name is specified, but no owner is specified, the current user is used as an owner. + +Schema owners can create, rename, and drop objects in the schema. +Schema owners can drop the schema itself, but cannot rename it. +Some objects may still require admin rights for their creation, +see documentation of their CREATE statements for details. + Optional table engine parameters are used when CREATE TABLE command is run on this schema without having its engine params set. @@ -776,31 +925,40 @@ CREATE SCHEMA TEST_SCHEMA AUTHORIZATION SA " "Commands (DDL)","CREATE SEQUENCE"," -CREATE SEQUENCE [ IF NOT EXISTS ] newSequenceName -sequenceOptions +CREATE SEQUENCE @h2@ [ IF NOT EXISTS ] [schemaName.]sequenceName +[ { AS dataType | sequenceOption } [...] ] "," Creates a new sequence. -The data type of a sequence is BIGINT. -Used values are never re-used, even when the transaction is rolled back. +Schema owner rights are required to execute this command. + +The data type of a sequence must be a numeric type, the default is BIGINT. +Sequence can produce only integer values. +For TINYINT the allowed values are between -128 and 127. +For SMALLINT the allowed values are between -32768 and 32767. +For INTEGER the allowed values are between -2147483648 and 2147483647. +For BIGINT the allowed values are between -9223372036854775808 and 9223372036854775807. +For NUMERIC and DECFLOAT the allowed values depend on precision, +but cannot exceed the range of BIGINT data type (from -9223372036854775808 to 9223372036854775807); +the scale of NUMERIC must be 0. +For REAL the allowed values are between -16777216 and 16777216. +For DOUBLE PRECISION the allowed values are between -9007199254740992 and 9007199254740992. -The cache is the number of pre-allocated numbers. -If the system crashes without closing the database, at most this many numbers are lost. -The default cache size is 32. -To disable caching, use the cache size 1 or lower. +Used values are never re-used, even when the transaction is rolled back. This command commits an open transaction in this connection. "," -CREATE SEQUENCE SEQ_ID +CREATE SEQUENCE SEQ_ID; +CREATE SEQUENCE SEQ2 AS INTEGER START WITH 10; " "Commands (DDL)","CREATE TABLE"," -CREATE [ CACHED | MEMORY ] [ TEMP | [ GLOBAL | LOCAL ] TEMPORARY ] -TABLE [ IF NOT EXISTS ] name -[ ( { columnName [columnDefinition] | constraint } [,...] ) ] -[ ENGINE tableEngineName ] -[ WITH tableEngineParamName [,...] ] -[ NOT PERSISTENT ] [ TRANSACTIONAL ] -[ AS select [ WITH [ NO ] DATA ] ]"," +CREATE @h2@ [ CACHED | MEMORY ] [ @c@ { TEMP } | [ GLOBAL | LOCAL ] TEMPORARY ] +TABLE @h2@ [ IF NOT EXISTS ] [schemaName.]tableName +[ ( { columnName [columnDefinition] | tableConstraintDefinition } [,...] ) ] +@h2@ [ ENGINE tableEngineName ] +@h2@ [ WITH tableEngineParamName [,...] ] +@h2@ [ NOT PERSISTENT ] @h2@ [ TRANSACTIONAL ] +[ AS query [ WITH [ NO ] DATA ] ]"," Creates a new table. Cached tables (the default for regular tables) are persistent, @@ -837,14 +995,16 @@ CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)) " "Commands (DDL)","CREATE TRIGGER"," -CREATE TRIGGER [ IF NOT EXISTS ] newTriggerName +CREATE TRIGGER @h2@ [ IF NOT EXISTS ] [schemaName.]triggerName { BEFORE | AFTER | INSTEAD OF } -{ INSERT | UPDATE | DELETE | SELECT | ROLLBACK } -[,...] ON tableName [ FOR EACH ROW ] -[ QUEUE int ] [ NOWAIT ] -{ CALL triggeredClassName | AS sourceCodeString } +{ INSERT | UPDATE | DELETE | @h2@ { SELECT | ROLLBACK } } +@h2@ [,...] ON [schemaName.]tableName [ FOR EACH { ROW | STATEMENT } ] +@c@ [ QUEUE int ] @h2@ [ NOWAIT ] +@h2@ { CALL triggeredClassNameString | AS sourceCodeString } "," Creates a new trigger. +Admin rights are required to execute this command. + The trigger class must be public and implement ""org.h2.api.Trigger"". Inner classes are not supported. The class must be available in the classpath of the database engine @@ -870,6 +1030,9 @@ ROLLBACK triggers are only required if an operation communicates outside of the INSTEAD OF triggers are implicitly row based and behave like BEFORE triggers. Only the first such trigger is called. Such triggers on views are supported. They can be used to make views updatable. +These triggers on INSERT and UPDATE must update the passed new row to values that were actually inserted +by the trigger; they are used for [FINAL TABLE](https://h2database.com/html/grammar.html#data_change_delta_table) +and for retrieval of generated keys. A BEFORE SELECT trigger is fired just before the database engine tries to read from the table. The trigger can be used to update a table on demand. @@ -891,14 +1054,17 @@ The schema name does not need to be specified when creating the trigger. This command commits an open transaction in this connection. "," -CREATE TRIGGER TRIG_INS BEFORE INSERT ON TEST FOR EACH ROW CALL ""MyTrigger""; -CREATE TRIGGER TRIG_SRC BEFORE INSERT ON TEST AS $$org.h2.api.Trigger create() { return new MyTrigger(""constructorParam""); } $$; -CREATE TRIGGER TRIG_JS BEFORE INSERT ON TEST AS $$//javascript\nreturn new Packages.MyTrigger(""constructorParam""); $$; -CREATE TRIGGER TRIG_RUBY BEFORE INSERT ON TEST AS $$#ruby\nJava::MyPackage::MyTrigger.new(""constructorParam"") $$; +CREATE TRIGGER TRIG_INS BEFORE INSERT ON TEST FOR EACH ROW CALL 'MyTrigger'; +CREATE TRIGGER TRIG_SRC BEFORE INSERT ON TEST AS + 'org.h2.api.Trigger create() { return new MyTrigger(""constructorParam""); }'; +CREATE TRIGGER TRIG_JS BEFORE INSERT ON TEST AS '//javascript +return new Packages.MyTrigger(""constructorParam"");'; +CREATE TRIGGER TRIG_RUBY BEFORE INSERT ON TEST AS '#ruby +Java::MyPackage::MyTrigger.new(""constructorParam"")'; " "Commands (DDL)","CREATE USER"," -CREATE USER [ IF NOT EXISTS ] newUserName -{ PASSWORD string | SALT bytes HASH bytes } [ ADMIN ] +@h2@ CREATE USER [ IF NOT EXISTS ] newUserName +@h2@ { PASSWORD string | SALT bytes HASH bytes } @h2@ [ ADMIN ] "," Creates a new user. For compatibility, only unquoted or uppercase user names are allowed. The password must be in single quotes. It is case sensitive and can contain spaces. @@ -911,11 +1077,13 @@ CREATE USER GUEST PASSWORD 'abc' " "Commands (DDL)","CREATE VIEW"," -CREATE [ OR REPLACE ] [ FORCE ] VIEW [ IF NOT EXISTS ] newViewName -[ ( columnName [,...] ) ] AS select +CREATE @h2@ [ OR REPLACE ] @h2@ [ FORCE ] +VIEW @h2@ [ IF NOT EXISTS ] [schemaName.]viewName +[ ( columnName [,...] ) ] AS query "," Creates a new view. If the force option is used, then the view is created even if the underlying table(s) don't exist. +Schema owner rights are required to execute this command. If the OR REPLACE clause is used an existing view will be replaced, and any dependent views will not need to be recreated. If dependent views will become @@ -924,36 +1092,35 @@ can be ignored if the FORCE clause is also used. Views are not updatable except when using 'instead of' triggers. -Admin rights are required to execute this command. This command commits an open transaction in this connection. "," CREATE VIEW TEST_VIEW AS SELECT * FROM TEST WHERE ID < 100 " "Commands (DDL)","DROP AGGREGATE"," -DROP AGGREGATE [ IF EXISTS ] aggregateName +@h2@ DROP AGGREGATE [ IF EXISTS ] aggregateName "," Drops an existing user-defined aggregate function. +Schema owner rights are required to execute this command. -Admin rights are required to execute this command. This command commits an open transaction in this connection. "," DROP AGGREGATE SIMPLE_MEDIAN " "Commands (DDL)","DROP ALIAS"," -DROP ALIAS [ IF EXISTS ] existingFunctionAliasName +@h2@ DROP ALIAS [ IF EXISTS ] [schemaName.]aliasName "," Drops an existing function alias. +Schema owner rights are required to execute this command. -Admin rights are required to execute this command. This command commits an open transaction in this connection. "," DROP ALIAS MY_SQRT " "Commands (DDL)","DROP ALL OBJECTS"," -DROP ALL OBJECTS [ DELETE FILES ] +@h2@ DROP ALL OBJECTS [ DELETE FILES ] "," Drops all existing views, tables, sequences, schemas, function aliases, roles, user-defined aggregate functions, domains, and users (except the current user). @@ -967,27 +1134,33 @@ DROP ALL OBJECTS " "Commands (DDL)","DROP CONSTANT"," -DROP CONSTANT [ IF EXISTS ] constantName +@h2@ DROP CONSTANT [ IF EXISTS ] [schemaName.]constantName "," Drops a constant. +Schema owner rights are required to execute this command. This command commits an open transaction in this connection. "," DROP CONSTANT ONE " "Commands (DDL)","DROP DOMAIN"," -DROP DOMAIN [ IF EXISTS ] domainName [ RESTRICT | CASCADE ] +DROP DOMAIN @h2@ [ IF EXISTS ] [schemaName.]domainName [ RESTRICT | CASCADE ] "," Drops a data type (domain). -The command will fail if it is referenced by a column (the default). +Schema owner rights are required to execute this command. + +The command will fail if it is referenced by a column or another domain (the default). Column descriptors are replaced with original definition of specified domain if the CASCADE clause is used. +Default and on update expressions are copied into domains and columns that use this domain and don't have own +expressions. Domain constraints are copied into domains that use this domain and to columns (as check constraints) that +use this domain. This command commits an open transaction in this connection. "," DROP DOMAIN EMAIL " "Commands (DDL)","DROP INDEX"," -DROP INDEX [ IF EXISTS ] indexName +@h2@ DROP INDEX [ IF EXISTS ] [schemaName.]indexName "," Drops an index. This command commits an open transaction in this connection. @@ -996,18 +1169,20 @@ DROP INDEX IF EXISTS IDXNAME " "Commands (DDL)","DROP ROLE"," -DROP ROLE [ IF EXISTS ] roleName +DROP ROLE @h2@ [ IF EXISTS ] roleName "," Drops a role. +Admin rights are required to execute this command. This command commits an open transaction in this connection. "," DROP ROLE READONLY " "Commands (DDL)","DROP SCHEMA"," -DROP SCHEMA [ IF EXISTS ] schemaName [ RESTRICT | CASCADE ] +DROP SCHEMA @h2@ [ IF EXISTS ] schemaName [ RESTRICT | CASCADE ] "," Drops a schema. +Schema owner rights are required to execute this command. The command will fail if objects in this schema exist and the RESTRICT clause is used (the default). All objects in this schema are dropped as well if the CASCADE clause is used. This command commits an open transaction in this connection. @@ -1016,16 +1191,18 @@ DROP SCHEMA TEST_SCHEMA " "Commands (DDL)","DROP SEQUENCE"," -DROP SEQUENCE [ IF EXISTS ] sequenceName +DROP SEQUENCE @h2@ [ IF EXISTS ] [schemaName.]sequenceName "," Drops a sequence. +Schema owner rights are required to execute this command. This command commits an open transaction in this connection. "," DROP SEQUENCE SEQ_ID " "Commands (DDL)","DROP TABLE"," -DROP TABLE [ IF EXISTS ] tableName [,...] [ RESTRICT | CASCADE ] +DROP TABLE @h2@ [ IF EXISTS ] [schemaName.]tableName @h2@ [,...] +[ RESTRICT | CASCADE ] "," Drops an existing table, or a list of tables. The command will fail if dependent objects exist and the RESTRICT clause is used (the default). @@ -1036,7 +1213,7 @@ DROP TABLE TEST " "Commands (DDL)","DROP TRIGGER"," -DROP TRIGGER [ IF EXISTS ] triggerName +DROP TRIGGER @h2@ [ IF EXISTS ] [schemaName.]triggerName "," Drops an existing trigger. This command commits an open transaction in this connection. @@ -1045,7 +1222,7 @@ DROP TRIGGER TRIG_INS " "Commands (DDL)","DROP USER"," -DROP USER [ IF EXISTS ] userName +@h2@ DROP USER [ IF EXISTS ] userName "," Drops a user. The current user cannot be dropped. For compatibility, only unquoted or uppercase user names are allowed. @@ -1057,9 +1234,10 @@ DROP USER TOM " "Commands (DDL)","DROP VIEW"," -DROP VIEW [ IF EXISTS ] viewName [ RESTRICT | CASCADE ] +DROP VIEW @h2@ [ IF EXISTS ] [schemaName.]viewName [ RESTRICT | CASCADE ] "," Drops an existing view. +Schema owner rights are required to execute this command. All dependent views are dropped as well if the CASCADE clause is used (the default). The command will fail if dependent views exist and the RESTRICT clause is used. This command commits an open transaction in this connection. @@ -1068,7 +1246,7 @@ DROP VIEW TEST_VIEW " "Commands (DDL)","TRUNCATE TABLE"," -TRUNCATE TABLE tableName [ [ CONTINUE | RESTART ] IDENTITY ] +TRUNCATE TABLE [schemaName.]tableName [ [ CONTINUE | RESTART ] IDENTITY ] "," Removes all rows from a table. Unlike DELETE FROM without where clause, this command can not be rolled back. @@ -1076,7 +1254,7 @@ This command is faster than DELETE without where clause. Only regular data tables without foreign key constraints can be truncated (except if referential integrity is disabled for this database or for this table). Linked tables can't be truncated. -If RESTART IDENTITY is specified next values for auto-incremented columns are restarted. +If RESTART IDENTITY is specified next values for identity columns are restarted. This command commits an open transaction in this connection. "," @@ -1084,7 +1262,7 @@ TRUNCATE TABLE TEST " "Commands (Other)","CHECKPOINT"," -CHECKPOINT +@h2@ CHECKPOINT "," Flushes the data to disk. @@ -1094,7 +1272,7 @@ CHECKPOINT " "Commands (Other)","CHECKPOINT SYNC"," -CHECKPOINT SYNC +@h2@ CHECKPOINT SYNC "," Flushes the data to disk and forces all system buffers be written to the underlying device. @@ -1113,7 +1291,7 @@ COMMIT " "Commands (Other)","COMMIT TRANSACTION"," -COMMIT TRANSACTION transactionName +@h2@ COMMIT TRANSACTION transactionName "," Sets the resolution of an in-doubt transaction to 'commit'. @@ -1124,22 +1302,24 @@ COMMIT TRANSACTION XID_TEST " "Commands (Other)","GRANT RIGHT"," -GRANT { SELECT | INSERT | UPDATE | DELETE | ALL } [,...] ON -{ { SCHEMA schemaName } | { tableName [,...] } } +GRANT { { SELECT | INSERT | UPDATE | DELETE } [,..] | ALL [ PRIVILEGES ] } ON +{ @h2@ { SCHEMA schemaName } | { [ TABLE ] [schemaName.]tableName @h2@ [,...] } } TO { PUBLIC | userName | roleName } "," Grants rights for a table to a user or role. -Admin rights are required to execute this command. +Schema owner rights are required to execute this command. This command commits an open transaction in this connection. "," GRANT SELECT ON TEST TO READONLY " "Commands (Other)","GRANT ALTER ANY SCHEMA"," -GRANT ALTER ANY SCHEMA TO userName +@h2@ GRANT ALTER ANY SCHEMA TO userName "," -Grant schema altering rights to a user. +Grant schema admin rights to a user. + +Schema admin can create, rename, or drop schemas and also has schema owner rights in every schema. Admin rights are required to execute this command. This command commits an open transaction in this connection. @@ -1148,7 +1328,7 @@ GRANT ALTER ANY SCHEMA TO Bob " "Commands (Other)","GRANT ROLE"," -GRANT roleName TO { PUBLIC | userName | roleName } +GRANT { roleName [,...] } TO { PUBLIC | userName | roleName } "," Grants a role to a user or role. @@ -1159,7 +1339,7 @@ GRANT READONLY TO PUBLIC " "Commands (Other)","HELP"," -HELP [ anything [...] ] +@h2@ HELP [ anything [...] ] "," Displays the help pages of SQL commands or keywords. "," @@ -1167,7 +1347,7 @@ HELP SELECT " "Commands (Other)","PREPARE COMMIT"," -PREPARE COMMIT newTransactionName +@h2@ PREPARE COMMIT newTransactionName "," Prepares committing a transaction. This command is part of the 2-phase-commit protocol. @@ -1176,20 +1356,31 @@ PREPARE COMMIT XID_TEST " "Commands (Other)","REVOKE RIGHT"," -REVOKE { SELECT | INSERT | UPDATE | DELETE | ALL } [,...] ON -{ { SCHEMA schemaName } | { tableName [,...] } } +REVOKE { { SELECT | INSERT | UPDATE | DELETE } [,..] | ALL [ PRIVILEGES ] } ON +{ @h2@ { SCHEMA schemaName } | { [ TABLE ] [schemaName.]tableName @h2@ [,...] } } FROM { PUBLIC | userName | roleName } "," Removes rights for a table from a user or role. -Admin rights are required to execute this command. +Schema owner rights are required to execute this command. This command commits an open transaction in this connection. "," REVOKE SELECT ON TEST FROM READONLY " +"Commands (Other)","REVOKE ALTER ANY SCHEMA"," +@h2@ REVOKE ALTER ANY SCHEMA FROM userName +"," +Removes schema admin rights from a user. + +Admin rights are required to execute this command. +This command commits an open transaction in this connection. +"," +GRANT ALTER ANY SCHEMA TO Bob +" + "Commands (Other)","REVOKE ROLE"," -REVOKE roleName FROM { PUBLIC | userName | roleName } +REVOKE { roleName [,...] } FROM { PUBLIC | userName | roleName } "," Removes a role from a user or role. @@ -1200,7 +1391,7 @@ REVOKE READONLY FROM TOM " "Commands (Other)","ROLLBACK"," -ROLLBACK [ TO SAVEPOINT savepointName ] +ROLLBACK [ WORK ] [ TO SAVEPOINT savepointName ] "," Rolls back a transaction. If a savepoint name is used, the transaction is only rolled back to the specified savepoint. @@ -1209,7 +1400,7 @@ ROLLBACK " "Commands (Other)","ROLLBACK TRANSACTION"," -ROLLBACK TRANSACTION transactionName +@h2@ ROLLBACK TRANSACTION transactionName "," Sets the resolution of an in-doubt transaction to 'rollback'. @@ -1229,7 +1420,7 @@ SAVEPOINT HALF_DONE " "Commands (Other)","SET @"," -SET @variableName [ = ] expression +@h2@ SET @variableName [ = ] expression "," Updates a user-defined variable. Variables are not persisted and session scoped, that means only visible from within the session in which they are defined. @@ -1239,7 +1430,7 @@ SET @TOTAL=0 " "Commands (Other)","SET ALLOW_LITERALS"," -SET ALLOW_LITERALS { NONE | ALL | NUMBERS } +@h2@ SET ALLOW_LITERALS { NONE | ALL | NUMBERS } "," This setting can help solve the SQL injection problem. By default, text and number literals are allowed in SQL statements. However, this enables SQL @@ -1255,16 +1446,16 @@ See also CREATE CONSTANT. Admin rights are required to execute this command, as it affects all connections. This command commits an open transaction in this connection. This setting is persistent. -This setting can be appended to the database URL: ""jdbc:h2:test;ALLOW_LITERALS=NONE"" +This setting can be appended to the database URL: ""jdbc:h2:./test;ALLOW_LITERALS=NONE"" "," SET ALLOW_LITERALS NONE " "Commands (Other)","SET AUTOCOMMIT"," -SET AUTOCOMMIT { TRUE | ON | FALSE | OFF } +@h2@ SET AUTOCOMMIT { TRUE | ON | FALSE | OFF } "," Switches auto commit on or off. -This setting can be appended to the database URL: ""jdbc:h2:test;AUTOCOMMIT=OFF"" - +This setting can be appended to the database URL: ""jdbc:h2:./test;AUTOCOMMIT=OFF"" - however this will not work as expected when using a connection pool (the connection pool manager will re-enable autocommit when returning the connection to the pool, so autocommit will only be disabled the first @@ -1274,7 +1465,7 @@ SET AUTOCOMMIT OFF " "Commands (Other)","SET CACHE_SIZE"," -SET CACHE_SIZE int +@h2@ SET CACHE_SIZE int "," Sets the size of the cache in KB (each KB being 1024 bytes) for the current database. The default is 65536 per available GB of RAM, i.e. 64 MB per GB. @@ -1290,13 +1481,13 @@ It has no effect for in-memory databases. Admin rights are required to execute this command, as it affects all connections. This command commits an open transaction in this connection. This setting is persistent. -This setting can be appended to the database URL: ""jdbc:h2:test;CACHE_SIZE=8192"" +This setting can be appended to the database URL: ""jdbc:h2:./test;CACHE_SIZE=8192"" "," SET CACHE_SIZE 8192 " "Commands (Other)","SET CLUSTER"," -SET CLUSTER serverListString +@h2@ SET CLUSTER serverListString "," This command should not be used directly by an application, the statement is executed automatically by the system. The behavior may change in future @@ -1310,53 +1501,32 @@ This command is effective immediately, but does not commit an open transaction. SET CLUSTER '' " -"Commands (Other)","SET BINARY_COLLATION"," -SET BINARY_COLLATION { UNSIGNED | SIGNED } -"," -Sets the collation used for comparing BINARY columns, the default is SIGNED -for version 1.3 and older, and UNSIGNED for version 1.4 and newer. -This command can only be executed if there are no tables defined. - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -This setting is persistent. -"," -SET BINARY_COLLATION SIGNED -" - -"Commands (Other)","SET UUID_COLLATION"," -SET UUID_COLLATION { UNSIGNED | SIGNED } +"Commands (Other)","SET BUILTIN_ALIAS_OVERRIDE"," +@h2@ SET BUILTIN_ALIAS_OVERRIDE { TRUE | FALSE } "," -Sets the collation used for comparing UUID columns, the default is SIGNED. -This command can only be executed if there are no tables defined. - -SIGNED means signed comparison between first 64 bits of compared values treated as long values -and if they are equal a signed comparison of the last 64 bits of compared values treated as long values. -See also Java ""UUID.compareTo()"". -UNSIGNED means RFC 4122 compatible unsigned comparison. +Allows the overriding of the builtin system date/time functions +for unit testing purposes. Admin rights are required to execute this command. This command commits an open transaction in this connection. -This setting is persistent. "," -SET UUID_COLLATION UNSIGNED +SET BUILTIN_ALIAS_OVERRIDE TRUE " -"Commands (Other)","SET BUILTIN_ALIAS_OVERRIDE"," -SET BUILTIN_ALIAS_OVERRIDE { TRUE | FALSE } +"Commands (Other)","SET CATALOG"," +SET CATALOG { catalogString | @h2@ { catalogName } } "," -Allows the overriding of the builtin system date/time functions -for unit testing purposes. +This command has no effect if the specified name matches the name of the database, otherwise it throws an exception. -Admin rights are required to execute this command. -This command commits an open transaction in this connection. +This command does not commit a transaction. "," -SET BUILTIN_ALIAS_OVERRIDE TRUE +SET CATALOG 'DB' +SET CATALOG DB_NAME " "Commands (Other)","SET COLLATION"," -SET [ DATABASE ] COLLATION -{ OFF | collationName +@h2@ SET [ DATABASE ] COLLATION +@h2@ { OFF | collationName [ STRENGTH { PRIMARY | SECONDARY | TERTIARY | IDENTICAL } ] } "," Sets the collation used for comparing strings. @@ -1376,44 +1546,27 @@ strings according to the binary representation in the given charset. Admin rights are required to execute this command. This command commits an open transaction in this connection. This setting is persistent. -This setting can be appended to the database URL: ""jdbc:h2:test;COLLATION='ENGLISH'"" +This setting can be appended to the database URL: ""jdbc:h2:./test;COLLATION='ENGLISH'"" "," SET COLLATION ENGLISH SET COLLATION CHARSET_CP500 " -"Commands (Other)","SET COMPRESS_LOB"," -SET COMPRESS_LOB { NO | LZF | DEFLATE } -"," -This feature is only available for the PageStore storage engine. -For the MVStore engine (the default for H2 version 1.4.x), -append "";COMPRESS=TRUE"" to the database URL instead. - -Sets the compression algorithm for BLOB and CLOB data. Compression is usually -slower, but needs less disk space. LZF is faster but uses more space. - -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting is persistent. -"," -SET COMPRESS_LOB LZF -" - "Commands (Other)","SET DATABASE_EVENT_LISTENER"," -SET DATABASE_EVENT_LISTENER classNameString +@h2@ SET DATABASE_EVENT_LISTENER classNameString "," Sets the event listener class. An empty string ('') means no listener should be used. This setting is not persistent. Admin rights are required to execute this command, except if it is set when opening the database (in this case it is reset just after opening the database). -This setting can be appended to the database URL: ""jdbc:h2:test;DATABASE_EVENT_LISTENER='sample.MyListener'"" +This setting can be appended to the database URL: ""jdbc:h2:./test;DATABASE_EVENT_LISTENER='sample.MyListener'"" "," SET DATABASE_EVENT_LISTENER 'sample.MyListener' " "Commands (Other)","SET DB_CLOSE_DELAY"," -SET DB_CLOSE_DELAY int +@h2@ SET DB_CLOSE_DELAY int "," Sets the delay for closing a database if all connections are closed. The value -1 means the database is never closed until the close delay is set to some other value or SHUTDOWN is called. @@ -1425,13 +1578,13 @@ If the application exits normally or System.exit is called, the database is clos Admin rights are required to execute this command, as it affects all connections. This command commits an open transaction in this connection. This setting is persistent. -This setting can be appended to the database URL: ""jdbc:h2:test;DB_CLOSE_DELAY=-1"" +This setting can be appended to the database URL: ""jdbc:h2:./test;DB_CLOSE_DELAY=-1"" "," SET DB_CLOSE_DELAY -1 " "Commands (Other)","SET DEFAULT_LOCK_TIMEOUT"," -SET DEFAULT LOCK_TIMEOUT int +@h2@ SET DEFAULT LOCK_TIMEOUT int "," Sets the default lock timeout (in milliseconds) in this database that is used for the new sessions. The default value for this setting is 1000 (one second). @@ -1443,8 +1596,36 @@ This setting is persistent. SET DEFAULT_LOCK_TIMEOUT 5000 " +"Commands (Other)","SET DEFAULT_NULL_ORDERING"," +@h2@ SET DEFAULT_NULL_ORDERING { LOW | HIGH | FIRST | LAST } +"," +Changes the default ordering of NULL values. +This setting affects new indexes without explicit NULLS FIRST or NULLS LAST columns, +and ordering clauses of other commands without explicit null ordering. +This setting doesn't affect ordering of NULL values inside ARRAY or ROW values +(""ARRAY[NULL]"" is always considered as smaller than ""ARRAY[1]"" during sorting). + +LOW is the default one, NULL values are considered as smaller than other values during sorting. + +With HIGH default ordering NULL values are considered as larger than other values during sorting. + +With FIRST default ordering NULL values are sorted before other values, +no matter if ascending or descending order is used. + +WITH LAST default ordering NULL values are sorted after other values, +no matter if ascending or descending order is used. + +This setting is not persistent, but indexes are persisted with explicit NULLS FIRST or NULLS LAST ordering +and aren't affected by changes in this setting. +Admin rights are required to execute this command, as it affects all connections. +This command commits an open transaction in this connection. +This setting can be appended to the database URL: ""jdbc:h2:./test;DEFAULT_NULL_ORDERING=HIGH"" +"," +SET DEFAULT_NULL_ORDERING HIGH +" + "Commands (Other)","SET DEFAULT_TABLE_TYPE"," -SET DEFAULT_TABLE_TYPE { MEMORY | CACHED } +@h2@ SET DEFAULT_TABLE_TYPE { MEMORY | CACHED } "," Sets the default table storage type that is used when creating new tables. Memory tables are kept fully in the main memory (including indexes), however @@ -1460,7 +1641,7 @@ SET DEFAULT_TABLE_TYPE MEMORY " "Commands (Other)","SET EXCLUSIVE"," -SET EXCLUSIVE { 0 | 1 | 2 } +@h2@ SET EXCLUSIVE { 0 | 1 | 2 } "," Switched the database to exclusive mode (1, 2) and back to normal mode (0). @@ -1480,7 +1661,7 @@ SET EXCLUSIVE 1 " "Commands (Other)","SET IGNORECASE"," -SET IGNORECASE { TRUE | FALSE } +@h2@ SET IGNORECASE { TRUE | FALSE } "," If IGNORECASE is enabled, text columns in newly created tables will be case-insensitive. Already existing tables are not affected. The effect of @@ -1491,14 +1672,25 @@ String literals and parameters are however still considered case sensitive even Admin rights are required to execute this command, as it affects all connections. This command commits an open transaction in this connection. This setting is persistent. -This setting can be appended to the database URL: ""jdbc:h2:test;IGNORECASE=TRUE"" +This setting can be appended to the database URL: ""jdbc:h2:./test;IGNORECASE=TRUE"" "," SET IGNORECASE TRUE " +"Commands (Other)","SET IGNORE_CATALOGS"," +@c@ SET IGNORE_CATALOGS { TRUE | FALSE } +"," +If IGNORE_CATALOGS is enabled, catalog names in front of schema names will be ignored. This can be used if +multiple catalogs used by the same connections must be simulated. Caveat: if both catalogs contain schemas of the +same name and if those schemas contain objects of the same name, this will lead to errors, when trying to manage, +access or change these objects. +This setting can be appended to the database URL: ""jdbc:h2:./test;IGNORE_CATALOGS=TRUE"" +"," +SET IGNORE_CATALOGS TRUE +" + "Commands (Other)","SET JAVA_OBJECT_SERIALIZER"," -SET JAVA_OBJECT_SERIALIZER -{ null | className } +@h2@ SET JAVA_OBJECT_SERIALIZER { null | className } "," Sets the object used to serialize and deserialize java objects being stored in column of type OTHER. The serializer class must be public and implement ""org.h2.api.JavaObjectSerializer"". @@ -1510,13 +1702,13 @@ This command can only be executed if there are no tables defined. Admin rights are required to execute this command. This command commits an open transaction in this connection. This setting is persistent. -This setting can be appended to the database URL: ""jdbc:h2:test;JAVA_OBJECT_SERIALIZER='com.acme.SerializerClassName'"" +This setting can be appended to the database URL: ""jdbc:h2:./test;JAVA_OBJECT_SERIALIZER='com.acme.SerializerClassName'"" "," SET JAVA_OBJECT_SERIALIZER 'com.acme.SerializerClassName' " "Commands (Other)","SET LAZY_QUERY_EXECUTION"," -SET LAZY_QUERY_EXECUTION int +@h2@ SET LAZY_QUERY_EXECUTION int "," Sets the lazy query execution mode. The values 0, 1 are supported. @@ -1524,86 +1716,53 @@ If true, then large results are retrieved in chunks. Note that not all queries support this feature, queries which do not are processed normally. -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting is not persistent. -This setting can be appended to the database URL: ""jdbc:h2:test;LAZY_QUERY_EXECUTION=1"" +This command does not commit a transaction, and rollback does not affect it. +This setting can be appended to the database URL: ""jdbc:h2:./test;LAZY_QUERY_EXECUTION=1"" "," SET LAZY_QUERY_EXECUTION 1 " -"Commands (Other)","SET LOG"," -SET LOG int -"," -Sets the transaction log mode. The values 0, 1, and 2 are supported, the default is 2. -This setting affects all connections. - -LOG 0 means the transaction log is disabled completely. It is the fastest mode, -but also the most dangerous: if the process is killed while the database is open in this mode, -the data might be lost. It must only be used if this is not a problem, for example when -initially loading a database, or when running tests. - -LOG 1 means the transaction log is enabled, but FileDescriptor.sync is disabled. -This setting is about half as fast as with LOG 0. This setting is useful if no protection -against power failure is required, but the data must be protected against killing the process. - -LOG 2 (the default) means the transaction log is enabled, and FileDescriptor.sync is called -for each checkpoint. This setting is about half as fast as LOG 1. Depending on the -file system, this will also protect against power failure in the majority if cases. - -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting is not persistent. -This setting can be appended to the database URL: ""jdbc:h2:test;LOG=0"" -"," -SET LOG 1 -" - "Commands (Other)","SET LOCK_MODE"," -SET LOCK_MODE int +@h2@ SET LOCK_MODE int "," -Sets the lock mode. The values 0, 1, 2, and 3 are supported. The default is 3 -(READ_COMMITTED). This setting affects all connections. +Sets the lock mode. The values 0, 1, 2, and 3 are supported. The default is 3. +This setting affects all connections. -The value 0 means no locking (should only be used for testing; also known as -READ_UNCOMMITTED). Please note that using SET LOCK_MODE 0 while at the same time +The value 0 means no locking (should only be used for testing). +Please note that using SET LOCK_MODE 0 while at the same time using multiple connections may result in inconsistent transactions. -The value 1 means table level locking (also known as SERIALIZABLE). +The value 3 means row-level locking for write operations. -The value 2 means table level locking with garbage collection (if the -application does not close all connections). - -The value 3 means table level locking, but read locks are released immediately -(default; also known as READ_COMMITTED). +The values 1 and 2 have the same effect as 3. Admin rights are required to execute this command, as it affects all connections. This command commits an open transaction in this connection. This setting is persistent. -This setting can be appended to the database URL: ""jdbc:h2:test;LOCK_MODE=3"" +This setting can be appended to the database URL: ""jdbc:h2:./test;LOCK_MODE=0"" "," -SET LOCK_MODE 1 +SET LOCK_MODE 0 " "Commands (Other)","SET LOCK_TIMEOUT"," -SET LOCK_TIMEOUT int +@h2@ SET LOCK_TIMEOUT int "," Sets the lock timeout (in milliseconds) for the current session. The default value for this setting is 1000 (one second). This command does not commit a transaction, and rollback does not affect it. -This setting can be appended to the database URL: ""jdbc:h2:test;LOCK_TIMEOUT=10000"" +This setting can be appended to the database URL: ""jdbc:h2:./test;LOCK_TIMEOUT=10000"" "," SET LOCK_TIMEOUT 1000 " "Commands (Other)","SET MAX_LENGTH_INPLACE_LOB"," -SET MAX_LENGTH_INPLACE_LOB int +@h2@ SET MAX_LENGTH_INPLACE_LOB int "," Sets the maximum size of an in-place LOB object. This is the maximum length of an LOB that is stored with the record itself, -and the default value is 128. +and the default value is 256. Admin rights are required to execute this command, as it affects all connections. This command commits an open transaction in this connection. @@ -1613,7 +1772,7 @@ SET MAX_LENGTH_INPLACE_LOB 128 " "Commands (Other)","SET MAX_LOG_SIZE"," -SET MAX_LOG_SIZE int +@h2@ SET MAX_LOG_SIZE int "," Sets the maximum size of the transaction log, in megabytes. If the log is larger, and if there is no open transaction, the transaction log is truncated. @@ -1629,7 +1788,7 @@ SET MAX_LOG_SIZE 2 " "Commands (Other)","SET MAX_MEMORY_ROWS"," -SET MAX_MEMORY_ROWS int +@h2@ SET MAX_MEMORY_ROWS int "," The maximum number of rows in a result set that are kept in-memory. If more rows are read, then the rows are buffered to disk. @@ -1644,7 +1803,7 @@ SET MAX_MEMORY_ROWS 1000 " "Commands (Other)","SET MAX_MEMORY_UNDO"," -SET MAX_MEMORY_UNDO int +@h2@ SET MAX_MEMORY_UNDO int "," The maximum number of undo records per a session that are kept in-memory. If a transaction is larger, the records are buffered to disk. @@ -1661,7 +1820,7 @@ SET MAX_MEMORY_UNDO 1000 " "Commands (Other)","SET MAX_OPERATION_MEMORY"," -SET MAX_OPERATION_MEMORY int +@h2@ SET MAX_OPERATION_MEMORY int "," Sets the maximum memory used for large operations (delete and insert), in bytes. Operations that use more memory are buffered to disk, slowing down the @@ -1670,44 +1829,40 @@ operation. The default max size is 100000. 0 means no limit. This setting is not persistent. Admin rights are required to execute this command, as it affects all connections. It has no effect for in-memory databases. -This setting can be appended to the database URL: ""jdbc:h2:test;MAX_OPERATION_MEMORY=10000"" +This setting can be appended to the database URL: ""jdbc:h2:./test;MAX_OPERATION_MEMORY=10000"" "," SET MAX_OPERATION_MEMORY 0 " "Commands (Other)","SET MODE"," -SET MODE { REGULAR | DB2 | DERBY | HSQLDB | MSSQLSERVER | MYSQL | ORACLE | POSTGRESQL } +@h2@ SET MODE { REGULAR | STRICT | LEGACY | DB2 | DERBY | HSQLDB | MSSQLSERVER | MYSQL | ORACLE | POSTGRESQL } "," -Changes to another database compatibility mode. For details, see Compatibility -Modes in the feature section. +Changes to another database compatibility mode. For details, see +[Compatibility Modes](https://h2database.com/html/features.html#compatibility_modes). This setting is not persistent. Admin rights are required to execute this command, as it affects all connections. This command commits an open transaction in this connection. -This setting can be appended to the database URL: ""jdbc:h2:test;MODE=MYSQL"" +This setting can be appended to the database URL: ""jdbc:h2:./test;MODE=MYSQL"" "," SET MODE HSQLDB " -"Commands (Other)","SET MULTI_THREADED"," -SET MULTI_THREADED { 0 | 1 } +"Commands (Other)","SET NON_KEYWORDS"," +@h2@ SET NON_KEYWORDS [ name [,...] ] "," -Enabled (1) or disabled (0) multi-threading inside the database engine. -MULTI_THREADED is enabled by default with default MVStore storage engine. -MULTI_THREADED is disabled by default when using PageStore storage engine, enabling this with PageStore is experimental only. - -This is a global setting, which means it is not possible to open multiple databases with different modes at the same time in the same virtual machine. -This setting is not persistent, however the value is kept until the virtual machine exits or it is changed. +Converts the specified tokens from keywords to plain identifiers for the current session. +This setting may break some commands and should be used with caution and only when necessary. +Use [quoted identifiers](https://h2database.com/html/grammar.html#quoted_name) instead of this setting if possible. -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting can be appended to the database URL: ""jdbc:h2:test;MULTI_THREADED=1"" +This command does not commit a transaction, and rollback does not affect it. +This setting can be appended to the database URL: ""jdbc:h2:./test;NON_KEYWORDS=KEY,VALUE"" "," -SET MULTI_THREADED 1 +SET NON_KEYWORDS KEY, VALUE " "Commands (Other)","SET OPTIMIZE_REUSE_RESULTS"," -SET OPTIMIZE_REUSE_RESULTS { 0 | 1 } +@h2@ SET OPTIMIZE_REUSE_RESULTS { 0 | 1 } "," Enabled (1) or disabled (0) the result reuse optimization. If enabled, subqueries and views used as subqueries are only re-run if the data in one of @@ -1715,13 +1870,13 @@ the tables was changed. This option is enabled by default. Admin rights are required to execute this command, as it affects all connections. This command commits an open transaction in this connection. -This setting can be appended to the database URL: ""jdbc:h2:test;OPTIMIZE_REUSE_RESULTS=0"" +This setting can be appended to the database URL: ""jdbc:h2:./test;OPTIMIZE_REUSE_RESULTS=0"" "," SET OPTIMIZE_REUSE_RESULTS 0 " "Commands (Other)","SET PASSWORD"," -SET PASSWORD string +@h2@ SET PASSWORD string "," Changes the password of the current user. The password must be in single quotes. It is case sensitive and can contain spaces. @@ -1732,7 +1887,7 @@ SET PASSWORD 'abcstzri!.5' " "Commands (Other)","SET QUERY_STATISTICS"," -SET QUERY_STATISTICS { TRUE | FALSE } +@h2@ SET QUERY_STATISTICS { TRUE | FALSE } "," Disabled or enables query statistics gathering for the whole database. The statistics are reflected in the INFORMATION_SCHEMA.QUERY_STATISTICS meta-table. @@ -1745,7 +1900,7 @@ SET QUERY_STATISTICS FALSE " "Commands (Other)","SET QUERY_STATISTICS_MAX_ENTRIES"," -SET QUERY_STATISTICS int +@h2@ SET QUERY_STATISTICS int "," Set the maximum number of entries in query statistics meta-table. Default value is 100. @@ -1758,7 +1913,7 @@ SET QUERY_STATISTICS_MAX_ENTRIES 500 " "Commands (Other)","SET QUERY_TIMEOUT"," -SET QUERY_TIMEOUT int +@h2@ SET QUERY_TIMEOUT int "," Set the query timeout of the current session to the given value. The timeout is in milliseconds. All kinds of statements will throw an exception if they take @@ -1770,7 +1925,7 @@ SET QUERY_TIMEOUT 10000 " "Commands (Other)","SET REFERENTIAL_INTEGRITY"," -SET REFERENTIAL_INTEGRITY { TRUE | FALSE } +@h2@ SET REFERENTIAL_INTEGRITY { TRUE | FALSE } "," Disabled or enables referential integrity checking for the whole database. Enabling it does not check existing data. Use ALTER TABLE SET to disable it only @@ -1784,9 +1939,8 @@ SET REFERENTIAL_INTEGRITY FALSE " "Commands (Other)","SET RETENTION_TIME"," -SET RETENTION_TIME int +@h2@ SET RETENTION_TIME int "," -This property is only used when using the MVStore storage engine. How long to retain old, persisted data, in milliseconds. The default is 45000 (45 seconds), 0 means overwrite data as early as possible. It is assumed that a file system and hard disk will flush all write buffers within this time. @@ -1798,13 +1952,13 @@ depending on the operating system and hardware. Admin rights are required to execute this command, as it affects all connections. This command commits an open transaction in this connection. This setting is persistent. -This setting can be appended to the database URL: ""jdbc:h2:test;RETENTION_TIME=0"" +This setting can be appended to the database URL: ""jdbc:h2:./test;RETENTION_TIME=0"" "," SET RETENTION_TIME 0 " "Commands (Other)","SET SALT HASH"," -SET SALT bytes HASH bytes +@h2@ SET SALT bytes HASH bytes "," Sets the password salt and hash for the current user. The password must be in single quotes. It is case sensitive and can contain spaces. @@ -1815,46 +1969,80 @@ SET SALT '00' HASH '1122' " "Commands (Other)","SET SCHEMA"," -SET SCHEMA schemaName +SET SCHEMA { schemaString | @h2@ { schemaName } } "," Changes the default schema of the current connection. The default schema is used in statements where no schema is set explicitly. The default schema for new connections is PUBLIC. This command does not commit a transaction, and rollback does not affect it. -This setting can be appended to the database URL: ""jdbc:h2:test;SCHEMA=ABC"" +This setting can be appended to the database URL: ""jdbc:h2:./test;SCHEMA=ABC"" "," +SET SCHEMA 'PUBLIC' SET SCHEMA INFORMATION_SCHEMA " "Commands (Other)","SET SCHEMA_SEARCH_PATH"," -SET SCHEMA_SEARCH_PATH schemaName [,...] +@h2@ SET SCHEMA_SEARCH_PATH schemaName [,...] "," Changes the schema search path of the current connection. The default schema is used in statements where no schema is set explicitly. The default schema for new connections is PUBLIC. This command does not commit a transaction, and rollback does not affect it. -This setting can be appended to the database URL: ""jdbc:h2:test;SCHEMA_SEARCH_PATH=ABC,DEF"" +This setting can be appended to the database URL: ""jdbc:h2:./test;SCHEMA_SEARCH_PATH=ABC,DEF"" "," SET SCHEMA_SEARCH_PATH INFORMATION_SCHEMA, PUBLIC " +"Commands (Other)","SET SESSION CHARACTERISTICS"," +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL +{ READ UNCOMMITTED | READ COMMITTED | REPEATABLE READ | SERIALIZABLE } +"," +Changes the transaction isolation level of the current session. +The actual support of isolation levels depends on the database engine. + +This command commits an open transaction in this session. +"," +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL SERIALIZABLE +" + "Commands (Other)","SET THROTTLE"," -SET THROTTLE int +@h2@ SET THROTTLE int "," Sets the throttle for the current connection. The value is the number of milliseconds delay after each 50 ms. The default value is 0 (throttling disabled). This command does not commit a transaction, and rollback does not affect it. -This setting can be appended to the database URL: ""jdbc:h2:test;THROTTLE=50"" +This setting can be appended to the database URL: ""jdbc:h2:./test;THROTTLE=50"" "," SET THROTTLE 200 " +"Commands (Other)","SET TIME ZONE"," +SET TIME ZONE { LOCAL | intervalHourToMinute | @h2@ { intervalHourToSecond | string } } +"," +Sets the current time zone for the session. + +This command does not commit a transaction, and rollback does not affect it. +This setting can be appended to the database URL: ""jdbc:h2:./test;TIME ZONE='1:00'"" + +Time zone offset used for [CURRENT_TIME](https://h2database.com/html/functions.html#current_time), +[CURRENT_TIMESTAMP](https://h2database.com/html/functions.html#current_timestamp), +[CURRENT_DATE](https://h2database.com/html/functions.html#current_date), +[LOCALTIME](https://h2database.com/html/functions.html#localtime), +and [LOCALTIMESTAMP](https://h2database.com/html/functions.html#localtimestamp) is adjusted, +so these functions will return new values based on the same UTC timestamp after execution of this command. +"," +SET TIME ZONE LOCAL +SET TIME ZONE '-5:00' +SET TIME ZONE INTERVAL '1:00' HOUR TO MINUTE +SET TIME ZONE 'Europe/London' +" + "Commands (Other)","SET TRACE_LEVEL"," -SET { TRACE_LEVEL_FILE | TRACE_LEVEL_SYSTEM_OUT } int +@h2@ SET { TRACE_LEVEL_FILE | TRACE_LEVEL_SYSTEM_OUT } int "," Sets the trace level for file the file or system out stream. Levels are: 0=off, 1=error, 2=info, 3=debug. The default level is 1 for file and 0 for system out. @@ -1863,13 +2051,13 @@ To use SLF4J, append "";TRACE_LEVEL_FILE=4"" to the database URL when opening th This setting is not persistent. Admin rights are required to execute this command, as it affects all connections. This command does not commit a transaction, and rollback does not affect it. -This setting can be appended to the database URL: ""jdbc:h2:test;TRACE_LEVEL_SYSTEM_OUT=3"" +This setting can be appended to the database URL: ""jdbc:h2:./test;TRACE_LEVEL_SYSTEM_OUT=3"" "," SET TRACE_LEVEL_SYSTEM_OUT 3 " "Commands (Other)","SET TRACE_MAX_FILE_SIZE"," -SET TRACE_MAX_FILE_SIZE int +@h2@ SET TRACE_MAX_FILE_SIZE int "," Sets the maximum trace file size. If the file exceeds the limit, the file is renamed to .old and a new file is created. If another .old file exists, it is @@ -1878,38 +2066,51 @@ deleted. The default max size is 16 MB. This setting is persistent. Admin rights are required to execute this command, as it affects all connections. This command commits an open transaction in this connection. -This setting can be appended to the database URL: ""jdbc:h2:test;TRACE_MAX_FILE_SIZE=3"" +This setting can be appended to the database URL: ""jdbc:h2:./test;TRACE_MAX_FILE_SIZE=3"" "," SET TRACE_MAX_FILE_SIZE 10 " -"Commands (Other)","SET UNDO_LOG"," -SET UNDO_LOG int +"Commands (Other)","SET TRUNCATE_LARGE_LENGTH"," +@h2@ SET TRUNCATE_LARGE_LENGTH { TRUE | FALSE } "," -Enables (1) or disables (0) the per session undo log. The undo log is enabled by -default. When disabled, transactions can not be rolled back. This setting should -only be used for bulk operations that don't need to be atomic. +If ""TRUE"" is specified, the ""CHARACTER"", ""CHARACTER VARYING"", ""VARCHAR_IGNORECASE"", ""BINARY"", +"BINARY_VARYING", "JAVA_OBJECT"" and ""JSON"" data types with too large length will be treated as these data types with +maximum allowed length instead. +By default, or if ""FALSE"" is specified, such definitions throw an exception. +This setting can be used for compatibility with definitions from older versions of H2. -This command commits an open transaction in this connection. +This setting can be appended to the database URL: ""jdbc:h2:./test;TRUNCATE_LARGE_LENGTH=TRUE"" +"," +SET TRUNCATE_LARGE_LENGTH TRUE +" + +"Commands (Other)","SET VARIABLE_BINARY"," +@h2@ SET VARIABLE_BINARY { TRUE | FALSE } +"," +If ""TRUE"" is specified, the ""BINARY"" data type will be parsed as ""VARBINARY"" in the current session. +It can be used for compatibility with older versions of H2. + +This setting can be appended to the database URL: ""jdbc:h2:./test;VARIABLE_BINARY=TRUE"" "," -SET UNDO_LOG 0 +SET VARIABLE_BINARY TRUE " "Commands (Other)","SET WRITE_DELAY"," -SET WRITE_DELAY int +@h2@ SET WRITE_DELAY int "," Set the maximum delay between a commit and flushing the log, in milliseconds. This setting is persistent. The default is 500 ms. Admin rights are required to execute this command, as it affects all connections. This command commits an open transaction in this connection. -This setting can be appended to the database URL: ""jdbc:h2:test;WRITE_DELAY=0"" +This setting can be appended to the database URL: ""jdbc:h2:./test;WRITE_DELAY=0"" "," SET WRITE_DELAY 2000 " "Commands (Other)","SHUTDOWN"," -SHUTDOWN [ IMMEDIATELY | COMPACT | DEFRAG ] +@h2@ SHUTDOWN [ IMMEDIATELY | COMPACT | DEFRAG ] "," This statement closes all open connections to the database and closes the database. This command is usually not required, as the database is @@ -1924,7 +2125,7 @@ but only for at most the time defined by the database setting ""h2.maxCompactTim SHUTDOWN IMMEDIATELY closes the database files without any cleanup and without compacting. -SHUTDOWN DEFRAG re-orders the pages when closing the database so that table scans are faster. In case of MVStore it is currently equivalent to COMPACT. +SHUTDOWN DEFRAG is currently equivalent to COMPACT. Admin rights are required to execute this command. "," @@ -1932,16 +2133,35 @@ SHUTDOWN COMPACT " "Literals","Value"," -string | dollarQuotedString | numeric | dateAndTime | boolean | bytes - | interval | array | null +string | @h2@ { dollarQuotedString } | numeric | dateAndTime | boolean | bytes + | interval | array | @h2@ { geometry | json | uuid } | null "," A literal value of any data type, or null. "," 10 " +"Literals","Approximate numeric"," +[ + | - ] { { number [ . number ] } | { . number } } +E [ + | - ] expNumber +"," +An approximate numeric value. +Approximate numeric values have [DECFLOAT](https://h2database.com/html/datatypes.html#decfloat_type) data type. +To define a [DOUBLE PRECISION](https://h2database.com/html/datatypes.html#double_precision_type) value, use +""CAST(X AS DOUBLE PRECISION)"". +To define a [REAL](https://h2database.com/html/datatypes.html#real_type) value, use ""CAST(X AS REAL)"". +There are some special REAL, DOUBLE PRECISION, and DECFLOAT values: +to represent positive infinity, use ""CAST('Infinity' AS dataType)""; +for negative infinity, use ""CAST('-Infinity' AS dataType)""; +for ""NaN"" (not a number), use ""CAST('NaN' AS dataType)"". +"," +-1.4e-10 +CAST(1e2 AS REAL) +CAST('NaN' AS DOUBLE PRECISION) +" + "Literals","Array"," -ARRAY '[' [ expression, [,...] ] ']' +ARRAY '[' [ expression [,...] ] ']' "," An array of values. "," @@ -1951,57 +2171,44 @@ ARRAY[] " "Literals","Boolean"," -TRUE | FALSE +TRUE | FALSE | UNKNOWN "," A boolean value. +UNKNOWN is a NULL value with the boolean data type. "," TRUE " "Literals","Bytes"," -X'hex' +X'hex' [ 'hex' [...] ] "," -A binary value. The hex value is not case sensitive. +A binary string value. The hex value is not case sensitive and may contain space characters as separators. +If there are more than one group of quoted hex values, groups must be separated with whitespace. "," +X'' X'01FF' +X'01 bc 2a' +X'01' '02' " "Literals","Date"," -DATE 'yyyy-MM-dd' +DATE '[-]yyyy-MM-dd' "," -A date literal. The limitations are the same as for the Java data type -""java.sql.Date"", but for compatibility with other databases the suggested minimum -and maximum years are 0001 and 9999. +A date literal. "," DATE '2004-12-31' " "Literals","Date and time"," -date | time | timestamp | timestampWithTimeZone +date | time | timeWithTimeZone | timestamp | timestampWithTimeZone "," A literal value of any date-time data type. "," TIMESTAMP '1999-01-31 10:00:00' " -"Literals","Decimal"," -[ + | - ] { { number [ . number ] } | { . number } } -[ E [ + | - ] expNumber [...] ] ] -"," -A decimal number with fixed precision and scale. -Internally, ""java.lang.BigDecimal"" is used. -To ensure the floating point representation is used, use CAST(X AS DOUBLE). -There are some special decimal values: to represent positive infinity, use ""POWER(0, -1)""; -for negative infinity, use ""(-POWER(0, -1))""; for -0.0, use ""(-CAST(0 AS DOUBLE))""; -for ""NaN"" (not a number), use ""SQRT(-1)"". -"," -SELECT -1600.05 -SELECT CAST(0 AS DOUBLE) -SELECT -1.4e-10 -" - "Literals","Dollar Quoted String"," -$$anythingExceptTwoDollarSigns$$ +@h2@ $$anythingExceptTwoDollarSigns$$ "," A string starts and ends with two dollar signs. Two dollar signs are not allowed within the text. A whitespace is required before the first set of dollar signs. @@ -2010,8 +2217,20 @@ No escaping is required within the text. $$John's car$$ " +"Literals","Exact numeric"," +[ + | - ] { { number [ . number ] } | { . number } } +"," +An exact numeric value. +Exact numeric values with dot have [NUMERIC](https://h2database.com/html/datatypes.html#numeric_type) data type, values +without dot small enough to fit into [INTEGER](https://h2database.com/html/datatypes.html#integer_type) data type have +this type, larger values small enough to fit into [BIGINT](https://h2database.com/html/datatypes.html#bigint_type) data +type have this type, others also have NUMERIC data type. +"," +-1600.05 +" + "Literals","Hex Number"," -[ + | - ] 0x hex +@h2@ [ + | - ] @h2@ 0x { digit | a-f | A-F } [...] "," A number written in hexadecimal notation. "," @@ -2026,6 +2245,37 @@ The maximum integer number is 2147483647, the minimum is -2147483648. 10 " +"Literals","GEOMETRY"," +@h2@ GEOMETRY { bytes | string } +"," +A binary string or character string with GEOMETRY object. + +A binary string should contain Well-known Binary Representation (WKB) from OGC 06-103r4. +Dimension system marks may be specified either in both OGC WKB or in PostGIS EWKB formats. +Optional SRID from EWKB may be specified. +POINT EMPTY stored with NaN values as specified in OGC 12-128r15 is supported. + +A character string should contain Well-known Text Representation (WKT) from OGC 06-103r4 +with optional SRID from PostGIS EWKT extension. + +"," +GEOMETRY 'GEOMETRYCOLLECTION (POINT (1 2))' +GEOMETRY X'00000000013ff00000000000003ff0000000000000' +" + +"Literals","JSON"," +@h2@ JSON { bytes | string } +"," +A binary or character string with a RFC 8259-compliant JSON text and data format. +JSON text is parsed into internal representation. +Order of object members is preserved as is. +Duplicate object member names are allowed. +"," +JSON '{""id"":10,""name"":""What''s this?""}' +JSON '[1, ' '2]'; +JSON X'7472' '7565' +" + "Literals","Long"," [ + | - ] number "," @@ -2051,23 +2301,50 @@ The maximum length of the number depends on the data type used. " "Literals","Numeric"," -decimal | int | long | hexNumber +exactNumeric | approximateNumeric | int | long | @h2@ { hexNumber } "," -The data type of a numeric value is always the lowest possible for the given value. -If the number contains a dot this is decimal; otherwise it is int, long, or decimal (depending on the value). +The data type of a numeric literal is the one of numeric data types, such as NUMERIC, DECFLOAT, BIGINT, or INTEGER +depending on format and value. + +An explicit CAST can be used to change the data type. "," -SELECT -1600.05 -SELECT CAST(0 AS DOUBLE) -SELECT -1.4e-10 +-1600.05 +CAST(0 AS DOUBLE PRECISION) +-1.4e-10 " "Literals","String"," -'anythingExceptSingleQuote' -"," -A string starts and ends with a single quote. Two single quotes can be used to -create a single quote inside a string. +[N]'anythingExceptSingleQuote' [...] + | U&{'anythingExceptSingleQuote' [...]} [ UESCAPE 'singleCharacter' ] +"," +A character string literal starts and ends with a single quote. +Two single quotes can be used to create a single quote inside a string. +Prefix ""N"" means a national character string literal; +H2 does not distinguish regular and national character string literals in any way, this prefix has no effect in H2. + +String literals staring with ""U&"" are Unicode character string literals. +All character string literals in H2 may have Unicode characters, +but Unicode character string literals may contain Unicode escape sequences ""\0000"" or ""\+000000"", +where \ is an escape character, ""0000"" and ""000000"" are Unicode character codes in hexadecimal notation. +Optional ""UESCAPE"" clause may be used to specify another escape character, +with exception for single quote, double quote, plus sign, and hexadecimal digits (0-9, a-f, and A-F). +By default the backslash is used. +Two escape characters can be used to include a single character inside a string. +Two single quotes can be used to create a single quote inside a string. "," 'John''s car' +'A' 'B' 'C' +U&'W\00f6rter ' '\\ \+01f600 /' +U&'|00a1' UESCAPE '|' +" + +"Literals","UUID"," +@h2@ UUID '{ digit | a-f | A-F | - } [...]' +"," +A UUID literal. +Must contain 32 hexadecimal digits. Digits may be separated with - signs. +"," +UUID '12345678-1234-1234-1234-123456789ABC' " "Literals","Time"," @@ -2079,19 +2356,28 @@ and has nanosecond resolution. TIME '23:59:59' " +"Literals","Time with time zone"," +TIME WITH TIME ZONE 'hh:mm:ss[.nnnnnnnnn]{ @h2@ { Z } | { - | + } timeZoneOffsetString}' +"," +A time with time zone literal. A value is between 0:00:00 and 23:59:59.999999999 +and has nanosecond resolution. +"," +TIME WITH TIME ZONE '23:59:59+01' +TIME WITH TIME ZONE '10:15:30.334-03:30' +TIME WITH TIME ZONE '0:00:00Z' +" + "Literals","Timestamp"," -TIMESTAMP [ WITHOUT TIME ZONE ] 'yyyy-MM-dd hh:mm:ss[.nnnnnnnnn]' +TIMESTAMP [ WITHOUT TIME ZONE ] '[-]yyyy-MM-dd hh:mm:ss[.nnnnnnnnn]' "," -A timestamp literal. The limitations are the same as for the Java data type -""java.sql.Timestamp"", but for compatibility with other databases the suggested -minimum and maximum years are 0001 and 9999. +A timestamp literal. "," TIMESTAMP '2005-12-31 23:59:59' " "Literals","Timestamp with time zone"," -TIMESTAMP WITH TIME ZONE 'yyyy-MM-dd hh:mm:ss[.nnnnnnnnn] -[Z | { - | + } timeZoneOffsetString | timeZoneNameString ]' +TIMESTAMP WITH TIME ZONE '[-]yyyy-MM-dd hh:mm:ss[.nnnnnnnnn] +[ @h2@ { Z } | { - | + } timeZoneOffsetString | @h2@ { timeZoneNameString } ]' "," A timestamp with time zone literal. If name of time zone is specified it will be converted to time zone offset. @@ -2220,19 +2506,23 @@ INTERVAL '11:12.123' MINUTE TO SECOND "Datetime fields","Datetime field"," yearField | monthField | dayOfMonthField | hourField | minuteField | secondField - | millisecondField | microsecondField | nanosecondField | timezoneHourField | timezoneMinuteField - | dayOfWeekField | isoWeekYearField | isoDayOfWeekField - | weekOfYearField | isoWeekOfYearField - | quarterField | dayOfYearField | epochField + | @h2@ { timezoneSecondField + | millenniumField | centuryField | decadeField + | quarterField + | millisecondField | microsecondField | nanosecondField + | dayOfYearField + | isoDayOfWeekField | isoWeekField | isoWeekYearField + | dayOfWeekField | weekField | weekYearField + | epochField } "," -Fields for EXTRACT, DATEADD, and DATEDIFF functions. +Fields for EXTRACT, DATEADD, DATEDIFF, and DATE_TRUNC functions. "," YEAR " "Datetime fields","Year field"," -YEAR | YYYY | YY | SQL_TSI_YEAR +YEAR | @c@ { YYYY | YY | SQL_TSI_YEAR } "," Year. "," @@ -2240,7 +2530,7 @@ YEAR " "Datetime fields","Month field"," -MONTH | MM | M | SQL_TSI_MONTH +MONTH | @c@ { MM | M | SQL_TSI_MONTH } "," Month (1-12). "," @@ -2248,7 +2538,7 @@ MONTH " "Datetime fields","Day of month field"," -DAY | DD | D | SQL_TSI_DAY +DAY | @c@ { DD | D | SQL_TSI_DAY } "," Day of month (1-31). "," @@ -2256,7 +2546,7 @@ DAY " "Datetime fields","Hour field"," -HOUR | HH | SQL_TSI_HOUR +HOUR | @c@ { HH | SQL_TSI_HOUR } "," Hour (0-23). "," @@ -2264,7 +2554,7 @@ HOUR " "Datetime fields","Minute field"," -MINUTE | MI | N | SQL_TSI_MINUTE +MINUTE | @c@ { MI | N | SQL_TSI_MINUTE } "," Minute (0-59). "," @@ -2272,37 +2562,13 @@ MINUTE " "Datetime fields","Second field"," -SECOND | SS | S | SQL_TSI_SECOND +SECOND | @c@ { SS | S | SQL_TSI_SECOND } "," Second (0-59). "," SECOND " -"Datetime fields","Millisecond field"," -MILLISECOND | MS -"," -Millisecond (0-999). -"," -MILLISECOND -" - -"Datetime fields","Microsecond field"," -MICROSECOND | MCS -"," -Microsecond (0-999999). -"," -MICROSECOND -" - -"Datetime fields","Nanosecond field"," -NANOSECOND | NS -"," -Nanosecond (0-999999999). -"," -NANOSECOND -" - "Datetime fields","Timezone hour field"," TIMEZONE_HOUR "," @@ -2319,68 +2585,132 @@ Timezone minute (from -59 to +59). TIMEZONE_MINUTE " -"Datetime fields","Day of week field"," -DAY_OF_WEEK | DAYOFWEEK | DOW -"," -Day of week (1-7). Sunday is 1. -"," -DAY_OF_WEEK -" - -"Datetime fields","ISO week year field"," -ISO_YEAR | ISOYEAR +"Datetime fields","Timezone second field"," +@h2@ TIMEZONE_SECOND "," -Returns the ISO week year from a date/time value. +Timezone second (from -59 to +59). +Local mean time (LMT) used in the past may have offsets with seconds. +Standard time doesn't use such offsets. "," -ISO_YEAR +TIMEZONE_SECOND " -"Datetime fields","ISO day of week field"," -ISO_DAY_OF_WEEK | ISODOW +"Datetime fields","Millennium field"," +@h2@ MILLENNIUM "," -ISO day of week (1-7). Monday is 1. +Century, or one thousand years (2001-01-01 to 3000-12-31). "," -ISO_DAY_OF_WEEK +MILLENNIUM " -"Datetime fields","Week of year field"," -WEEK | WW | W | SQL_TSI_WEEK +"Datetime fields","Century field"," +@h2@ CENTURY "," -Week of year (1-53). -EXTRACT function uses local rules to get number of week in year. -DATEDIFF function uses Sunday as a first day of week. +Century, or one hundred years (2001-01-01 to 2100-12-31). "," -WEEK +CENTURY " -"Datetime fields","ISO week of year field"," -ISO_WEEK +"Datetime fields","Decade field"," +@h2@ DECADE "," -ISO week of year (1-53). -ISO definition is used when first week of year should have at least four days -and week is started with Monday. +Decade, or ten years (2020-01-01 to 2029-12-31). "," -ISO_WEEK +DECADE " "Datetime fields","Quarter field"," -QUARTER +@h2@ QUARTER "," Quarter (1-4). "," QUARTER " -"Datetime fields","Day of year field"," -DAYOFYEAR | DAY_OF_YEAR | DOY | DY +"Datetime fields","Millisecond field"," +@h2@ { MILLISECOND } | @c@ { MS } "," -Day of year (1-366). +Millisecond (0-999). "," -DAYOFYEAR +MILLISECOND " -"Datetime fields","Epoch field"," -EPOCH +"Datetime fields","Microsecond field"," +@h2@ { MICROSECOND } | @c@ { MCS } +"," +Microsecond (0-999999). +"," +MICROSECOND +" + +"Datetime fields","Nanosecond field"," +@h2@ { NANOSECOND } | @c@ { NS } +"," +Nanosecond (0-999999999). +"," +NANOSECOND +" + +"Datetime fields","Day of year field"," +@h2@ { DAYOFYEAR | DAY_OF_YEAR } | @c@ { DOY | DY } +"," +Day of year (1-366). +"," +DAYOFYEAR +" + +"Datetime fields","ISO day of week field"," +@h2@ { ISO_DAY_OF_WEEK } | @c@ { ISODOW } +"," +ISO day of week (1-7). Monday is 1. +"," +ISO_DAY_OF_WEEK +" + +"Datetime fields","ISO week field"," +@h2@ ISO_WEEK +"," +ISO week of year (1-53). +ISO definition is used when first week of year should have at least four days +and week is started with Monday. +"," +ISO_WEEK +" + +"Datetime fields","ISO week year field"," +@h2@ { ISO_WEEK_YEAR } | @c@ { ISO_YEAR | ISOYEAR } +"," +Returns the ISO week-based year from a date/time value. +"," +ISO_WEEK_YEAR +" + +"Datetime fields","Day of week field"," +@h2@ { DAY_OF_WEEK | DAYOFWEEK } | @c@ { DOW } +"," +Day of week (1-7), locale-specific. +"," +DAY_OF_WEEK +" + +"Datetime fields","Week field"," +@h2@ { WEEK } | @c@ { WW | W | SQL_TSI_WEEK } +"," +Week of year (1-53) using local rules. +"," +WEEK +" + +"Datetime fields","Week year field"," +@h2@ { WEEK_YEAR } +"," +Returns the week-based year (locale-specific) from a date/time value. +"," +WEEK_YEAR +" + +"Datetime fields","Epoch field"," +@h2@ EPOCH "," For TIMESTAMP values number of seconds since 1970-01-01 00:00:00 in local time zone. For TIMESTAMP WITH TIME ZONE values number of seconds since 1970-01-01 00:00:00 in UTC time zone. @@ -2406,28 +2736,87 @@ Value or condition. ID=1 AND NAME='Hi' " -"Other Grammar","Case"," -CASE expression { WHEN expression THEN expression } [...] +"Other Grammar","Array element reference"," +array '[' indexInt ']' +"," +Returns array element at specified index or NULL if array is null or index is null. +"," +A[2] +" + +"Other Grammar","Field reference"," +(expression).fieldName +"," +Returns field value from the row value or NULL if row value is null. +Row value expression must be enclosed in parentheses. +"," +(R).COL1 +" + +"Other Grammar","Array value constructor by query"," +ARRAY (query) +"," +Collects values from the subquery into array. + +The subquery should have exactly one column. +Number of elements in the returned array is the number of rows in the subquery. +NULL values are included into array. +"," +ARRAY(SELECT * FROM SYSTEM_RANGE(1, 10)); +" + +"Other Grammar","Case expression"," +simpleCase | searchedCase +"," +Performs conditional evaluation of expressions. +"," +CASE A WHEN 'a' THEN 1 ELSE 2 END +CASE WHEN V > 10 THEN 1 WHEN V < 0 THEN 2 END +CASE WHEN A IS NULL THEN 'Null' ELSE 'Not null' END +" + +"Other Grammar","Simple case"," +CASE expression +{ WHEN { expression | conditionRightHandSide } [,...] THEN expression } [...] [ ELSE expression ] END "," -Returns the first expression where the value is equal to the test expression. If -no else part is specified, return NULL. +Returns then expression from the first when clause where one of its operands was was evaluated to ""TRUE"" +for the case expression. +If there are no such clauses, returns else expression or NULL if it is absent. + +Plain expressions are tested for equality with the case expression, ""NULL"" is not equal to ""NULL"". +Right sides of conditions are evaluated with the case expression on the left side. "," -CASE CNT WHEN 0 THEN 'No' WHEN 1 THEN 'One' ELSE 'Some' END +CASE CNT WHEN IS NULL THEN 'Null' WHEN 0 THEN 'No' WHEN 1 THEN 'One' WHEN 2, 3 THEN 'Few' ELSE 'Some' END " -"Other Grammar","Case When"," -CASE { WHEN expression THEN expression} [...] +"Other Grammar","Searched case"," +CASE { WHEN expression THEN expression } [...] [ ELSE expression ] END "," Returns the first expression where the condition is true. If no else part is specified, return NULL. "," CASE WHEN CNT<10 THEN 'Low' ELSE 'High' END +CASE WHEN A IS NULL THEN 'Null' ELSE 'Not null' END +" + +"Other Grammar","Cast specification"," +CAST(value AS dataTypeOrDomain) +"," +Converts a value to another data type. The following conversion rules are used: +When converting a number to a boolean, 0 is false and every other value is true. +When converting a boolean to a number, false is 0 and true is 1. +When converting a number to a number of another type, the value is checked for overflow. +When converting a string to binary, UTF-8 encoding is used. +Note that some data types may need explicitly specified precision to avoid overflow or rounding. +"," +CAST(NAME AS INT); +CAST(TIMESTAMP '2010-01-01 10:40:00.123456' AS TIME(6)) " "Other Grammar","Cipher"," -AES +@h2@ AES "," Only the algorithm AES (""AES-128"") is supported currently. "," @@ -2435,55 +2824,97 @@ AES " "Other Grammar","Column Definition"," -dataType [ VISIBLE | INVISIBLE ] +dataTypeOrDomain @h2@ [ VISIBLE | INVISIBLE ] [ { DEFAULT expression - | AS computedColumnExpression - | GENERATED {ALWAYS | BY DEFAULT} AS IDENTITY [(sequenceOptions)]} ] -[ ON UPDATE expression ] [ [ NOT ] NULL ] -[ { AUTO_INCREMENT | IDENTITY } [ ( startInt [, incrementInt ] ) ] ] -[ SELECTIVITY selectivity ] [ COMMENT expression ] -[ PRIMARY KEY [ HASH ] | UNIQUE ] [ CHECK condition ] -"," -Default expressions are used if no explicit value was used when adding a row. -The computed column expression is evaluated and assigned whenever the row changes. + | GENERATED ALWAYS AS (generatedColumnExpression) + | GENERATED {ALWAYS | BY DEFAULT} AS IDENTITY [(sequenceOption [...])]} ] +@h2@ [ ON UPDATE expression ] +@h2@ [ DEFAULT ON NULL ] +@h2@ [ SELECTIVITY selectivityInt ] @h2@ [ COMMENT expression ] +[ columnConstraintDefinition ] [...] +"," +The default expression is used if no explicit value was used when adding a row +and when DEFAULT value was specified in an update command. + +A column is either a generated column or a base column. +The generated column has a generated column expression. +The generated column expression is evaluated and assigned whenever the row changes. +This expression may reference base columns of the table, but may not reference other data. +The value of the generated column cannot be set explicitly. +Generated columns may not have DEFAULT or ON UPDATE expressions. + On update column expression is used if row is updated, -at least one column have a new value that is different from its previous value +at least one column has a new value that is different from its previous value and value for this column is not set explicitly in update statement. -Identity, auto-increment, or generated as identity columns are columns with a sequence as the default. -The column declared as the identity columns with IDENTITY data type or with IDENTITY () clause +Identity column is a column generated with a sequence. +The column declared as the identity column with IDENTITY data type or with IDENTITY () clause is implicitly the primary key column of this table. -AUTO_INCREMENT and GENERATED clauses do not create the primary key constraint. -GENERATED ALWAYS is accepted by treated in the same way as GENERATED BY DEFAULT. +GENERATED ALWAYS AS IDENTITY, GENERATED BY DEFAULT AS IDENTITY, and AUTO_INCREMENT clauses +do not create the primary key constraint automatically. +GENERATED ALWAYS AS IDENTITY clause indicates that column can only be generated by the sequence, +its value cannot be set explicitly. +Identity column has implicit NOT NULL constraint. +Identity column may not have DEFAULT or ON UPDATE expressions. + +DEFAULT ON NULL makes NULL value work as DEFAULT value is assignments to this column. The invisible column will not be displayed as a result of SELECT * query. Otherwise, it works as normal column. -The options PRIMARY KEY, UNIQUE, and CHECK are not supported for ALTER statements. +Column constraint definitions are not supported for ALTER statements. +"," +CREATE TABLE TEST(ID INT PRIMARY KEY, + NAME VARCHAR(255) DEFAULT '' NOT NULL); +CREATE TABLE TEST(ID BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + QUANTITY INT, PRICE NUMERIC(10, 2), + AMOUNT NUMERIC(20, 2) GENERATED ALWAYS AS (QUANTITY * PRICE)); +" + +"Other Grammar","Column Constraint Definition"," +[ constraintNameDefinition ] +NOT NULL | PRIMARY KEY | UNIQUE | referencesSpecification | CHECK (condition) +"," +NOT NULL disallows NULL value for a column. + +PRIMARY KEY and UNIQUE require unique values. +PRIMARY KEY also disallows NULL values and marks the column as a primary key. -Check constraints can reference columns of the table, -and they can reference objects that exist while the statement is executed. -Conditions are only checked when a row is added or modified -in the table where the constraint exists. +Referential constraint requires values that exist in other column (usually in another table). + +Check constraint require a specified condition to return TRUE or UNKNOWN (NULL). +It can reference columns of the table, and can reference objects that exist while the statement is executed. +Conditions are only checked when a row is added or modified in the table where the constraint exists. +"," +NOT NULL +PRIMARY KEY +UNIQUE +REFERENCES T2(ID) +CHECK (VALUE > 0) +" +"Other Grammar","Comment"," +bracketedComment | -- anythingUntilEndOfLine | @c@ // anythingUntilEndOfLine +"," +Comments can be used anywhere in a command and are ignored by the database. +Line comments ""--"" and ""//"" end with a newline. "," -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255) DEFAULT ''); -CREATE TABLE TEST(ID BIGINT IDENTITY); -CREATE TABLE TEST(QUANTITY INT, PRICE DECIMAL, AMOUNT DECIMAL AS QUANTITY*PRICE); +-- comment +/* comment */ " -"Other Grammar","Comments"," --- anythingUntilEndOfLine | // anythingUntilEndOfLine | /* anythingUntilEndComment */ +"Other Grammar","Bracketed comment"," +/* [ [ bracketedComment ] [ anythingUntilCommentStartOrEnd ] [...] ] */ "," -Comments can be used anywhere in a command and are ignored by the database. Line -comments end with a newline. Block comments cannot be nested, but can be -multiple lines long. +Comments can be used anywhere in a command and are ignored by the database. +Bracketed comments ""/* */"" can be nested and can be multiple lines long. "," -// This is a comment +/* comment */ +/* comment /* nested comment */ comment */ " "Other Grammar","Compare"," -<> | <= | >= | = | < | > | != | && +<> | <= | >= | = | < | > | @c@ { != } | @h2@ && "," Comparison operator. The operator != is the same as <>. The operator ""&&"" means overlapping; it can only be used with geometry types. @@ -2492,76 +2923,257 @@ The operator ""&&"" means overlapping; it can only be used with geometry types. " "Other Grammar","Condition"," -operand [ conditionRightHandSide ] | NOT condition | EXISTS ( select ) +operand [ conditionRightHandSide ] + | NOT condition + | EXISTS ( query ) + | UNIQUE ( query ) + | @h2@ INTERSECTS (operand, operand) "," Boolean value or condition. + +""NOT"" condition negates the result of subcondition and returns ""TRUE"", ""FALSE"", or ""UNKNOWN"" (""NULL""). + +""EXISTS"" predicate tests whether the result of the specified subquery is not empty and returns ""TRUE"" or ""FALSE"". + +""UNIQUE"" predicate tests absence of duplicate rows in the specified subquery and returns ""TRUE"" or ""FALSE"". +Rows with ""NULL"" value in any column are ignored. + +""INTERSECTS"" checks whether 2D bounding boxes of specified geometries intersect with each other +and returns ""TRUE"" or ""FALSE"". "," -ID<>2 +ID <> 2 +NOT(A OR B) +EXISTS (SELECT NULL FROM TEST T WHERE T.GROUP_ID = P.ID) +UNIQUE (SELECT A, B FROM TEST T WHERE T.CATEGORY = CAT) +INTERSECTS(GEOM1, GEOM2) " "Other Grammar","Condition Right Hand Side"," -compare { { { ALL | ANY | SOME } ( select ) } | operand } - | IS [ NOT ] NULL - | IS [ NOT ] [ DISTINCT FROM ] operand - | BETWEEN operand AND operand - | IN ( { select | expression [,...] } ) - | [ NOT ] [ LIKE | ILIKE ] operand [ ESCAPE string ] - | [ NOT ] REGEXP operand +comparisonRightHandSide + | quantifiedComparisonRightHandSide + | nullPredicateRightHandSide + | distinctPredicateRightHandSide + | quantifiedDistinctPredicateRightHandSide + | booleanTestRightHandSide + | typePredicateRightHandSide + | jsonPredicateRightHandSide + | betweenPredicateRightHandSide + | inPredicateRightHandSide + | likePredicateRightHandSide + | regexpPredicateRightHandSide "," The right hand side of a condition. +"," +> 10 +IS NULL +IS NOT NULL +IS NOT DISTINCT FROM B +IS OF (DATE, TIMESTAMP, TIMESTAMP WITH TIME ZONE) +IS JSON OBJECT WITH UNIQUE KEYS +LIKE 'Jo%' +" + +"Other Grammar","Comparison Right Hand Side"," +compare operand +"," +Right side of comparison predicates. +"," +> 10 +" + +"Other Grammar","Quantified Comparison Right Hand Side"," +compare { ALL | ANY | SOME } ( query ) +"," +Right side of quantified comparison predicates. Quantified comparison predicate ALL returns TRUE if specified comparison operation between left size of condition and each row from a subquery returns TRUE, including case when there are no rows. ALL predicate returns FALSE if at least one such comparison returns FALSE. -Otherwise it returns NULL. +Otherwise it returns UNKNOWN. Quantified comparison predicates ANY and SOME return TRUE if specified comparison operation between left size of condition and at least one row from a subquery returns TRUE. ANY and SOME predicates return FALSE if all such comparisons return FALSE. -Otherwise it returns NULL. +Otherwise they return UNKNOWN. + +Note that these predicates have priority over ANY and SOME aggregate functions with subquery on the right side. +Use parentheses around aggregate function. +"," +< ALL(SELECT V FROM TEST) +" + +"Other Grammar","Null Predicate Right Hand Side"," +IS [ NOT ] NULL +"," +Right side of null predicate. + +Check whether the specified value(s) are NULL values. +To test multiple values a row value must be specified. +""IS NULL"" returns ""TRUE"" if and only if all values are ""NULL"" values; otherwise it returns ""FALSE"". +""IS NOT NULL"" returns ""TRUE"" if and only if all values are not ""NULL"" values; otherwise it returns ""FALSE"". +"," +IS NULL +" + +"Other Grammar","Distinct Predicate Right Hand Side"," +IS [ NOT ] [ DISTINCT FROM ] operand +"," +Right side of distinct predicate. + +Distinct predicate is null-safe, meaning NULL is considered the same as NULL, +and the condition never evaluates to UNKNOWN. +"," +IS NOT DISTINCT FROM OTHER +" + +"Other Grammar","Quantified Distinct Predicate Right Hand Side"," +@h2@ IS [ NOT ] [ DISTINCT FROM ] { ALL | ANY | SOME } ( query ) +"," +Right side of quantified distinct predicate. + +Quantified distinct predicate is null-safe, meaning NULL is considered the same as NULL, +and the condition never evaluates to UNKNOWN. + +Quantified distinct predicate ALL returns TRUE if specified distinct predicate between +left size of condition and each row from a subquery returns TRUE, including case when there are no rows. +Otherwise it returns FALSE. + +Quantified distinct predicates ANY and SOME return TRUE if specified distinct predicate between +left size of condition and at least one row from a subquery returns TRUE. +Otherwise they return FALSE. + Note that these predicates have priority over ANY and SOME aggregate functions with subquery on the right side. Use parentheses around aggregate function. +"," +IS DISTINCT FROM ALL(SELECT V FROM TEST) +" + +"Other Grammar","Boolean Test Right Hand Side"," +IS [ NOT ] { TRUE | FALSE | UNKNOWN } +"," +Right side of boolean test. + +Checks whether the specified value is (not) ""TRUE"", ""FALSE"", or ""UNKNOWN"" (""NULL"") +and return ""TRUE"" or ""FALSE"". +This test is null-safe. +"," +IS TRUE +" + +"Other Grammar","Type Predicate Right Hand Side"," +IS [ NOT ] OF (dataType [,...]) +"," +Right side of type predicate. + +Checks whether the data type of the specified operand is one of the specified data types. +Some data types have multiple names, these names are considered as equal here. +Domains and their base data types are currently not distinguished from each other. +Precision and scale are also ignored. +If operand is NULL, the result is UNKNOWN. +"," +IS OF (INTEGER, BIGINT) +" + +"Other Grammar","JSON Predicate Right Hand Side"," +IS [ NOT ] JSON [ VALUE | ARRAY | OBJECT | SCALAR ] + [ [ WITH | WITHOUT ] UNIQUE [ KEYS ] ] +"," +Right side of JSON predicate. + +Checks whether value of the specified string, binary data, or a JSON is a valid JSON. +If ""ARRAY"", ""OBJECT"", or ""SCALAR"" is specified, only JSON items of the specified type are considered as valid. +If ""WITH UNIQUE [ KEYS ]"" is specified only JSON with unique keys is considered as valid. +This predicate isn't null-safe, it returns UNKNOWN if operand is NULL. +"," +IS JSON OBJECT WITH UNIQUE KEYS +" + +"Other Grammar","Between Predicate Right Hand Side"," +[ NOT ] BETWEEN [ ASYMMETRIC | SYMMETRIC ] operand AND operand +"," +Right side of between predicate. + +Checks whether the value is within the range inclusive. +""V BETWEEN [ ASYMMETRIC ] A AND B"" is equivalent to ""A <= V AND V <= B"". +""V BETWEEN SYMMETRIC A AND B"" is equivalent to ""A <= V AND V <= B OR A >= V AND V >= B"". +"," +BETWEEN LOW AND HIGH +" + +"Other Grammar","In Predicate Right Hand Side"," +[ NOT ] IN ( { query | expression [,...] } ) +"," +Right side of in predicate. + +Checks presence of value in the specified list of values or in result of the specified query. + +Returns ""TRUE"" if row value on the left side is equal to one of values on the right side, +""FALSE"" if all comparison operations were evaluated to ""FALSE"" or right side has no values, +and ""UNKNOWN"" otherwise. -The conditions ""IS [ NOT ]"" and ""IS [ NOT ] DISTINCT FROM"" are null-safe, meaning -NULL is considered the same as NULL, and the condition never evaluates to NULL. +This operation is logically equivalent to ""OR"" between comparison operations +comparing left side and each value from the right side. +"," +IN (A, B, C) +IN (SELECT V FROM TEST) +" + +"Other Grammar","Like Predicate Right Hand Side"," +[ NOT ] { LIKE | @h2@ { ILIKE } } operand [ ESCAPE string ] +"," +Right side of like predicate. -When comparing with LIKE, the wildcards characters are ""_"" (any one character) -and ""%"" (any characters). The database uses an index when comparing with LIKE -except if the operand starts with a wildcard. To search for the characters ""%"" and -""_"", the characters need to be escaped. The default escape character is "" \ "" (backslash). +The wildcards characters are ""_"" (any one character) and ""%"" (any characters). +The database uses an index when comparing with LIKE except if the operand starts with a wildcard. +To search for the characters ""%"" and ""_"", the characters need to be escaped. +The default escape character is "" \ "" (backslash). To select no escape character, use ""ESCAPE ''"" (empty string). At most one escape character is allowed. Each character that follows the escape character in the pattern needs to match exactly. Patterns that end with an escape character are invalid and the expression returns NULL. ILIKE does a case-insensitive compare. +"," +LIKE 'a%' +" + +"Other Grammar","Regexp Predicate Right Hand Side"," +@h2@ { [ NOT ] REGEXP operand } +"," +Right side of Regexp predicate. -When comparing with REGEXP, regular expression matching is used. +Regular expression matching is used. See Java ""Matcher.find"" for details. "," -VALUE > 10 -A IS NOT DISTINCT FROM B -LIKE 'Jo%' +REGEXP '[a-z]' " -"Other Grammar","Constraint"," +"Other Grammar","Table Constraint Definition"," [ constraintNameDefinition ] -{ CHECK expression - | UNIQUE ( columnName [,...] ) +{ PRIMARY KEY @h2@ [ HASH ] ( columnName [,...] ) } + | UNIQUE ( { columnName [,...] | VALUE } ) | referentialConstraint - | PRIMARY KEY [ HASH ] ( columnName [,...] ) } + | CHECK (condition) "," Defines a constraint. -The check condition must evaluate to TRUE, FALSE or NULL. -TRUE and NULL mean the operation is to be permitted, -and FALSE means the operation is to be rejected. -To prevent NULL in a column, use NOT NULL instead of a check constraint. + +PRIMARY KEY and UNIQUE require unique values. +PRIMARY KEY also disallows NULL values and marks the column as a primary key, a table can have only one primary key. +UNIQUE constraint supports NULL values and rows with NULL value in any column are considered as unique. +UNIQUE (VALUE) creates a unique constraint on entire row, excluding invisible columns; +but if new columns will be added to the table, they will not be included into this constraint. + +Referential constraint requires values that exist in other column(s) (usually in another table). + +Check constraint requires a specified condition to return TRUE or UNKNOWN (NULL). +It can reference columns of the table, and can reference objects that exist while the statement is executed. +Conditions are only checked when a row is added or modified in the table where the constraint exists. "," PRIMARY KEY(ID, NAME) " "Other Grammar","Constraint Name Definition"," -CONSTRAINT [ IF NOT EXISTS ] newConstraintName +CONSTRAINT @h2@ [ IF NOT EXISTS ] newConstraintName "," Defines a constraint name. "," @@ -2569,7 +3181,7 @@ CONSTRAINT CONST_ID " "Other Grammar","Csv Options"," -charsetString [, fieldSepString [, fieldDelimString [, escString [, nullString]]]]] +@h2@ charsetString [, fieldSepString [, fieldDelimString [, escString [, nullString]]]] | optionString "," Optional parameters for CSVREAD and CSVWRITE. @@ -2609,16 +3221,55 @@ newline and tab characters are written as such. CALL CSVWRITE('test2.csv', 'SELECT * FROM TEST', 'charset=UTF-8 fieldSeparator=|'); " +"Other Grammar","Data Change Delta Table"," +{ OLD | NEW | FINAL } TABLE +( { insert | update | delete | @h2@ { mergeInto } | mergeUsing } ) +"," +Executes the inner data change command and returns old, new, or final rows. + +""OLD"" is not allowed for ""INSERT"" command. It returns old rows. + +""NEW"" and ""FINAL"" are not allowed for ""DELETE"" command. + +""NEW"" returns new rows after evaluation of default expressions, but before execution of triggers. + +""FINAL"" returns new rows after execution of triggers. +"," +SELECT ID FROM FINAL TABLE (INSERT INTO TEST (A, B) VALUES (1, 2)) +" + +"Other Grammar","Data Type or Domain"," +dataType | [schemaName.]domainName +"," +A data type or domain name. +"," +INTEGER +MY_DOMAIN +" + "Other Grammar","Data Type"," -intType | booleanType | tinyintType | smallintType | bigintType | identityType - | decimalType | doubleType | realType | dateType | timeType | timestampType - | timestampWithTimeZoneType | binaryType | otherType | varcharType - | varcharIgnorecaseType | charType | blobType | clobType | uuidType - | arrayType | enumType | intervalType +predefinedType | arrayType | rowType "," -A data type definition. +A data type. "," -INT +INTEGER +" + +"Other Grammar","Predefined Type"," +characterType | characterVaryingType | characterLargeObjectType + | binaryType | binaryVaryingType | binaryLargeObjectType + | booleanType + | smallintType | integerType | bigintType + | numericType | realType | doublePrecisionType | decfloatType + | dateType | timeType | timeWithTimeZoneType + | timestampType | timestampWithTimeZoneType + | intervalType + | @h2@ { tinyintType | javaObjectType | enumType + | geometryType | jsonType | uuidType } +"," +A predefined data type. +"," +INTEGER " "Other Grammar","Digit"," @@ -2638,20 +3289,32 @@ ID=1 OR NAME='Hi' " "Other Grammar","Factor"," -term [ { { * | / | % } term } [...] ] +term [ { { * | / | @c@ { % } } term } [...] ] "," A value or a numeric factor. "," ID * 10 " +"Other Grammar","Grouping element"," +expression | (expression [, ...]) | () +"," +A grouping element of GROUP BY clause. +"," +A +(B, C) +() +" + "Other Grammar","Hex"," -{ { digit | a-f | A-F } { digit | a-f | A-F } } [...] +[' ' [...]] { { digit | a-f | A-F } [' ' [...]] { digit | a-f | A-F } [' ' [...]] } [...] "," -The hexadecimal representation of a number or of bytes. Two characters are one -byte. +The hexadecimal representation of a number or of bytes with optional space characters. +Two hexadecimal digit characters are one byte. "," cafe +11 22 33 +a b c d " "Other Grammar","Index Column"," @@ -2664,16 +3327,6 @@ the column in the same way. NAME " -"Other Grammar","Insert columns and source"," -{ [ ( columnName [,...] ) ] - { insertValues | [ DIRECT ] [ SORTED ] select | DEFAULT VALUES } } - | { SET { columnName = { DEFAULT | expression } } [,...] } -"," -Names of columns and their values for INSERT statement. -"," -(ID, NAME) VALUES (1, 'Test') -" - "Other Grammar","Insert values"," VALUES { DEFAULT|expression | [ROW] ({DEFAULT|expression} [,...]) }, [,...] "," @@ -2682,6 +3335,28 @@ Values for INSERT statement. VALUES (1, 'Test') " +"Other Grammar","Interval qualifier"," +YEAR [(precisionInt)] [ TO MONTH ] + | MONTH [(precisionInt)] + | DAY [(precisionInt)] [ TO { HOUR | MINUTE | SECOND [(scaleInt)] } ] + | HOUR [(precisionInt)] [ TO { MINUTE | SECOND [(scaleInt)] } ] + | MINUTE [(precisionInt)] [ TO SECOND [(scaleInt)] ] + | SECOND [(precisionInt [, scaleInt])] +"," +An interval qualifier. +"," +DAY TO SECOND +" + +"Other Grammar","Join specification"," +ON expression | USING (columnName [,...]) +"," +Specifies a join condition or column names. +"," +ON B.ID = A.PARENT_ID +USING (ID) +" + "Other Grammar","Merge when clause"," mergeWhenMatchedClause|mergeWhenNotMatchedClause "," @@ -2692,22 +3367,19 @@ WHEN MATCHED THEN DELETE "Other Grammar","Merge when matched clause"," WHEN MATCHED [ AND expression ] THEN -UPDATE SET setClauseList - | DELETE - | {UPDATE SET setClauseList [ WHERE expression ] DELETE [ WHERE expression ]} +UPDATE SET setClauseList | DELETE "," WHEN MATCHED clause for MERGE USING command. - -If both UPDATE and DELETE are specified, DELETE can delete only rows that were updated, -WHERE condition in DELETE clause can be used to specify which updated rows should be deleted. -This condition checks values in updated row. "," -WHEN MATCHED THEN UPDATE SET VALUE = S.VALUE +WHEN MATCHED THEN UPDATE SET NAME = S.NAME WHEN MATCHED THEN DELETE " "Other Grammar","Merge when not matched clause"," -WHEN NOT MATCHED [ AND expression ] THEN INSERT insertColumnsAndSource +WHEN NOT MATCHED [ AND expression ] THEN INSERT +[ ( columnName [,...] ) ] +[ overrideClause ] +VALUES ({DEFAULT|expression} [,...]) "," WHEN NOT MATCHED clause for MERGE USING command. "," @@ -2717,7 +3389,19 @@ WHEN NOT MATCHED THEN INSERT (ID, NAME) VALUES (S.ID, S.NAME) "Other Grammar","Name"," { { A-Z|_ } [ { A-Z|_|0-9 } [...] ] } | quotedName "," -Names are not case sensitive. There is no maximum name length. +With default settings unquoted names are converted to upper case. +The maximum name length is 256 characters. + +Identifiers in H2 are case sensitive by default. +Because unquoted names are converted to upper case, they can be written in any case anyway. +When both quoted and unquoted names are used for the same identifier the quoted names must be written in upper case. +Identifiers with lowercase characters can be written only as a quoted name, they aren't accessible with unquoted names. + +If DATABASE_TO_UPPER setting is set to FALSE the unquoted names aren't converted to upper case. + +If DATABASE_TO_LOWER setting is set to TRUE the unquoted names are converted to lower case instead. + +If CASE_INSENSITIVE_IDENTIFIERS setting is set to TRUE all identifiers are case insensitive. "," TEST " @@ -2725,45 +3409,88 @@ TEST "Other Grammar","Operand"," summand [ { || summand } [...] ] "," -A value or a concatenation of values. +Performs the concatenation of character string, binary string, or array values. In the default mode, the result is NULL if either parameter is NULL. +In compatibility modes result of string concatenation with NULL parameter can be different. "," 'Hi' || ' Eva' +X'AB' || X'CD' +ARRAY[1, 2] || 3 +1 || ARRAY[2, 3] +ARRAY[1, 2] || ARRAY[3, 4] " -"Other Grammar","Order"," -{ int | expression } [ ASC | DESC ] [ NULLS { FIRST | LAST } ] +"Other Grammar","Override clause"," +OVERRIDING { USER | SYSTEM } VALUE "," -Sorts the result by the given column number, or by an expression. If the -expression is a single parameter, then the value is interpreted as a column -number. Negative column numbers reverse the sort order. +If OVERRIDING USER VALUE is specified, INSERT statement ignores the provided value for identity column +and generates a new one instead. + +If OVERRIDING SYSTEM VALUE is specified, INSERT statement assigns the provided value to identity column. + +If neither clauses are specified, INSERT statement assigns the provided value to +GENERATED BY DEFAULT AS IDENTITY column, +but throws an exception if value is specified for GENERATED ALWAYS AS IDENTITY column. "," -NAME DESC NULLS LAST +OVERRIDING SYSTEM VALUE +OVERRIDING USER VALUE +" + +"Other Grammar","Query"," +select | explicitTable | tableValue +"," +A query, such as SELECT, explicit table, or table value. +"," +SELECT ID FROM TEST; +TABLE TEST; +VALUES (1, 2), (3, 4); " "Other Grammar","Quoted Name"," ""anythingExceptDoubleQuote"" -"," -Quoted names are case sensitive, and can contain spaces. There is no maximum -name length. Two double quotes can be used to create a single double quote -inside an identifier. + | U&""anythingExceptDoubleQuote"" [ UESCAPE 'singleCharacter' ] +"," +Case of characters in quoted names is preserved as is. Such names can contain spaces. +The maximum name length is 256 characters. +Two double quotes can be used to create a single double quote inside an identifier. +With default settings identifiers in H2 are case sensitive. + +Identifiers staring with ""U&"" are Unicode identifiers. +All identifiers in H2 may have Unicode characters, +but Unicode identifiers may contain Unicode escape sequences ""\0000"" or ""\+000000"", +where \ is an escape character, ""0000"" and ""000000"" are Unicode character codes in hexadecimal notation. +Optional ""UESCAPE"" clause may be used to specify another escape character, +with exception for single quote, double quote, plus sign, and hexadecimal digits (0-9, a-f, and A-F). +By default the backslash is used. +Two escape characters can be used to include a single character inside an Unicode identifier. +Two double quotes can be used to create a single double quote inside an Unicode identifier. "," ""FirstName"" +U&""\00d6ffnungszeit"" +U&""/00d6ffnungszeit"" UESCAPE '/' " "Other Grammar","Referential Constraint"," -FOREIGN KEY ( columnName [,...] ) +FOREIGN KEY ( columnName [,...] ) referencesSpecification +"," +Defines a referential constraint. +"," +FOREIGN KEY(ID) REFERENCES TEST(ID) +" + +"Other Grammar","References Specification"," REFERENCES [ refTableName ] [ ( refColumnName [,...] ) ] [ ON DELETE referentialAction ] [ ON UPDATE referentialAction ] "," -Defines a referential constraint. +Defines a referential specification of a referential constraint. If the table name is not specified, then the same table is referenced. RESTRICT is the default action. If the referenced columns are not specified, then the primary key columns are used. -The required indexes are automatically created if required. +Referential constraint requires an existing unique or primary key constraint on referenced columns, +this constraint must include all referenced columns in any order and must not include any other columns. Some tables may not be referenced, such as metadata tables. "," -FOREIGN KEY(ID) REFERENCES TEST(ID) +REFERENCES TEST(ID) " "Other Grammar","Referential Action"," @@ -2774,12 +3501,13 @@ RESTRICT is the default action. As this database does not support deferred checking, RESTRICT and NO ACTION will both throw an exception if the constraint is violated. The action SET DEFAULT will set the column in the referencing (child) table to the default value, while SET NULL will set it to NULL. "," -FOREIGN KEY(ID) REFERENCES TEST(ID) ON UPDATE CASCADE +CASCADE +SET NULL " "Other Grammar","Script Compression Encryption"," -[ COMPRESSION { DEFLATE | LZF | ZIP | GZIP } ] -[ CIPHER cipher PASSWORD string ] +@h2@ [ COMPRESSION { DEFLATE | LZF | ZIP | GZIP } ] +@h2@ [ CIPHER cipher PASSWORD string ] "," The compression and encryption algorithm to use for script files. When using encryption, only DEFLATE and LZF are supported. @@ -2788,6 +3516,16 @@ LZF is faster but uses more space. COMPRESSION LZF " +"Other Grammar","Select order"," +{ expression | @c@ { int } } [ ASC | DESC ] [ NULLS { FIRST | LAST } ] +"," +Sorts the result by the given column number, or by an expression. If the +expression is a single parameter, then the value is interpreted as a column +number. Negative column numbers reverse the sort order. +"," +NAME DESC NULLS LAST +" + "Other Grammar","Row value expression"," ROW (expression, [,...]) | ( [ expression, expression [,...] ] ) @@ -2805,59 +3543,169 @@ wildcardExpression | expression [ [ AS ] columnAlias ] "," An expression in a SELECT statement. "," -ID AS VALUE +ID AS DOCUMENT_ID " -"Other Grammar","Sequence options"," -[ START WITH long ] -[ INCREMENT BY long ] -[ MINVALUE long | NOMINVALUE | NO MINVALUE ] -[ MAXVALUE long | NOMAXVALUE | NO MAXVALUE ] -[ CYCLE long | NOCYCLE | NO CYCLE ] -[ CACHE long | NOCACHE | NO CACHE ] +"Other Grammar","Sequence value expression"," +{ NEXT | @h2@ { CURRENT } } VALUE FOR [schemaName.]sequenceName "," -Options of a sequence. +The next or current value of a sequence. + +When the next value is requested the sequence is incremented and the current value of the sequence +and the last identity in the current session are updated with the generated value. +The next value of the sequence is generated only once for each processed row. +If this expression is used multiple times with the same sequence it returns the same value within a processed row. +Used values are never re-used, even when the transaction is rolled back. + +Current value may only be requested after generation of the sequence value in the current session. +It returns the latest generated value for the current session. + +If a single command contains next and current value expressions for the same sequence there is no guarantee that +the next value expression will be evaluated before the evaluation of current value expression. "," -START WITH 1 +NEXT VALUE FOR SEQ1 +CURRENT VALUE FOR SCHEMA2.SEQ2 " -"Other Grammar","Set clause list"," -{ { columnName = { DEFAULT | expression } } [,...] } - | { ( columnName [,...] ) = {rowValueExpression|(select)} } +"Other Grammar","Sequence option"," +START WITH long + | @h2@ { RESTART WITH long } + | basicSequenceOption "," -List of SET clauses. +Option of a sequence. + +START WITH is used to set the initial value of the sequence. +If initial value is not defined, MINVALUE for incrementing sequences and MAXVALUE for decrementing sequences is used. + +RESTART is used to immediately restart the sequence with the specified value. "," -NAME = 'Test', VALUE = 2 -(A, B) = (1, 2) -(A, B) = (SELECT X, Y FROM OTHER T2 WHERE T1.ID = T2.ID) +START WITH 10000 +NO CACHE " -"Other Grammar","Summand"," -factor [ { { + | - } factor } [...] ] +"Other Grammar","Alter sequence option"," +@h2@ { START WITH long } + | RESTART [ WITH long ] + | basicSequenceOption "," -A value or a numeric sum. +Option of a sequence. -Please note the text concatenation operator is ""||"". +START WITH is used to change the initial value of the sequence. +It does not affect the current value of the sequence, +it only changes the preserved initial value that is used for simple RESTART without a value. + +RESTART is used to restart the sequence from its initial value or with the specified value. "," -ID + 20 +START WITH 10000 +NO CACHE +" + +"Other Grammar","Alter identity column option"," +@h2@ { START WITH long } + | RESTART [ WITH long ] + | SET basicSequenceOption +"," +Option of an identity column. + +START WITH is used to set or change the initial value of the sequence. +START WITH does not affect the current value of the sequence, +it only changes the preserved initial value that is used for simple RESTART without a value. + +RESTART is used to restart the sequence from its initial value or with the specified value. +"," +START WITH 10000 +SET NO CACHE +" + +"Other Grammar","Basic sequence option"," +INCREMENT BY long + | MINVALUE long | NO MINVALUE | @c@ { NOMINVALUE } + | MAXVALUE long | NO MAXVALUE | @c@ { NOMAXVALUE } + | CYCLE | NO CYCLE | @h2@ { EXHAUSTED } | @c@ { NOCYCLE } + | @h2@ { CACHE long } | @h2@ { NO CACHE } | @c@ { NOCACHE } +"," +Basic option of a sequence. + +INCREMENT BY specifies the step of the sequence, may be positive or negative, but may not be zero. +The default is 1. + +MINVALUE and MAXVALUE specify the bounds of the sequence. + +Sequences with CYCLE option start the generation again from +MINVALUE (incrementing sequences) or MAXVALUE (decrementing sequences) instead of exhausting with an error. +Sequences with EXHAUSTED option can't return values until they will be restarted. + +The CACHE option sets the number of pre-allocated numbers. +If the system crashes without closing the database, at most this many numbers are lost. +The default cache size is 32 if sequence has enough range of values. +NO CACHE option or the cache size 1 or lower disable the cache. +If CACHE option is specified, it cannot be larger than the total number of values +that sequence can produce within a cycle. +"," +MAXVALUE 100000 +CYCLE +NO CACHE +" + +"Other Grammar","Set clause list"," +{ { columnName = { DEFAULT | expression } } + | { ( columnName [,...] ) = { rowValueExpression | (query) } } } [,...] +"," +List of SET clauses. +"," +NAME = 'Test', PRICE = 2 +(A, B) = (1, 2) +(A, B) = (1, 2), C = 3 +(A, B) = (SELECT X, Y FROM OTHER T2 WHERE T1.ID = T2.ID) +" + +"Other Grammar","Sort specification"," +expression [ ASC | DESC ] [ NULLS { FIRST | LAST } ] +"," +Sorts the result by an expression. +"," +X ASC NULLS FIRST +" + +"Other Grammar","Sort specification list"," +sortSpecification [,...] +"," +Sorts the result by expressions. +"," +V +A, B DESC NULLS FIRST +" + +"Other Grammar","Summand"," +factor [ { { + | - } factor } [...] ] +"," +A value or a numeric sum. + +Please note the text concatenation operator is ""||"". +"," +ID + 20 " "Other Grammar","Table Expression"," -{ [ schemaName. ] tableName | ( select ) | valuesExpression } +{ [ schemaName. ] tableName + | ( query ) + | unnest + | table + | dataChangeDeltaTable } [ [ AS ] newTableAlias [ ( columnName [,...] ) ] ] -[ USE INDEX ([ indexName [,...] ]) ] +@h2@ [ USE INDEX ([ indexName [,...] ]) ] [ { { LEFT | RIGHT } [ OUTER ] | [ INNER ] | CROSS | NATURAL } - JOIN tableExpression [ ON expression ] ] + JOIN tableExpression [ joinSpecification ] ] "," -Joins a table. The join expression is not supported for cross and natural joins. +Joins a table. The join specification is not supported for cross and natural joins. A natural join is an inner join, where the condition is automatically on the columns with the same name. "," -TEST AS T LEFT JOIN TEST AS T1 ON T.ID = T1.ID +TEST1 AS T1 LEFT JOIN TEST2 AS T2 ON T1.ID = T2.PARENT_ID " "Other Grammar","Within group specification"," -WITHIN GROUP (ORDER BY {expression [ASC|DESC]} [,...]) +WITHIN GROUP (ORDER BY sortSpecificationList) "," Group specification for ordered set functions. "," @@ -2865,7 +3713,8 @@ WITHIN GROUP (ORDER BY ID DESC) " "Other Grammar","Wildcard expression"," -{* | tableAlias.*} [EXCEPT ([tableAlias.]columnName, [,...])] +[[schemaName.]tableAlias.]* +@h2@ [EXCEPT ([[schemaName.]tableAlias.]columnName, [,...])] "," A wildcard expression in a SELECT statement. A wildcard expression represents all visible columns. Some columns can be excluded with optional EXCEPT clause. @@ -2887,7 +3736,7 @@ W1 "Other Grammar","Window specification"," ([existingWindowName] -[PARTITION BY expression [,...]] [ORDER BY order [,...]] +[PARTITION BY expression [,...]] [ORDER BY sortSpecificationList] [windowFrame]) "," A window specification for a window, window function or aggregate. @@ -2972,25 +3821,49 @@ CURRENT ROW " "Other Grammar","Term"," -value +{ value | column | ?[ int ] - | NEXT VALUE FOR sequenceName + | sequenceValueExpression | function | { - | + } term | ( expression ) - | select - | case - | caseWhen - | userDefinedFunctionName + | arrayElementReference + | fieldReference + | query + | caseExpression + | castSpecification + | userDefinedFunctionName } +[ timeZone | intervalQualifier ] "," A value. Parameters can be indexed, for example ""?1"" meaning the first parameter. + +Interval qualifier may only be specified for a compatible value +or for a subtraction operation between two datetime values. +The subtraction operation ignores the leading field precision of the qualifier. "," 'Hello' + +" + +"Other Grammar","Time zone"," +AT { TIME ZONE { intervalHourToMinute | intervalHourToSecond | @h2@ { string } } | LOCAL } +"," +A time zone. Converts the timestamp with or without time zone into timestamp with time zone at specified time zone. +If a day-time interval is specified as a time zone, +it may not have fractional seconds and must be between -18 to 18 hours inclusive. +"," +AT LOCAL +AT TIME ZONE '2' +AT TIME ZONE '-6:00' +AT TIME ZONE INTERVAL '10:00' HOUR TO MINUTE +AT TIME ZONE INTERVAL '10:00:00' HOUR TO SECOND +AT TIME ZONE 'UTC' +AT TIME ZONE 'Europe/London' " "Other Grammar","Column"," -[[schemaName.]tableAlias.] { columnName | _ROWID_ } +[[schemaName.]tableAlias.] { columnName | @h2@ { _ROWID_ } } "," A column name with optional table alias and schema. _ROWID_ can be used to access unique row identifier. @@ -2998,101 +3871,321 @@ _ROWID_ can be used to access unique row identifier. ID " -"Data Types","INT Type"," -INT | INTEGER | MEDIUMINT | INT4 | SIGNED +"Data Types","CHARACTER Type"," +{ CHARACTER | CHAR | NATIONAL { CHARACTER | CHAR } | NCHAR } +[ ( lengthInt [CHARACTERS|OCTETS] ) ] "," -Possible values: -2147483648 to 2147483647. +A Unicode String of fixed length. -Mapped to ""java.lang.Integer"". +Length, if any, should be specified in characters, CHARACTERS and OCTETS units have no effect in H2. +The allowed length is from 1 to 1048576 characters. +If length is not specified, 1 character is used by default. + +The whole text is kept in memory when using this data type. +For variable-length strings use [CHARACTER VARYING](https://h2database.com/html/datatypes.html#character_varying_type) +data type instead. +For large text data [CHARACTER LARGE OBJECT](https://h2database.com/html/datatypes.html#character_large_object_type) +should be used; see there for details. + +Too short strings are right-padded with space characters. +Too long strings are truncated by CAST specification and rejected by column assignment. + +Two CHARACTER strings of different length are considered as equal if all additional characters in the longer string +are space characters. + +See also [string](https://h2database.com/html/grammar.html#string) literal grammar. +Mapped to ""java.lang.String"". "," -INT +CHARACTER +CHAR(10) +" + +"Data Types","CHARACTER VARYING Type"," +{ { CHARACTER | CHAR } VARYING + | VARCHAR + | { NATIONAL { CHARACTER | CHAR } | NCHAR } VARYING + | @c@ { LONGVARCHAR | VARCHAR2 | NVARCHAR | NVARCHAR2 } + | @h2@ { VARCHAR_CASESENSITIVE } } +[ ( lengthInt [CHARACTERS|OCTETS] ) ] +"," +A Unicode String. +Use two single quotes ('') to create a quote. + +The allowed length is from 1 to 1048576 characters. +The length is a size constraint; only the actual data is persisted. +Length, if any, should be specified in characters, CHARACTERS and OCTETS units have no effect in H2. + +The whole text is loaded into memory when using this data type. +For large text data [CHARACTER LARGE OBJECT](https://h2database.com/html/datatypes.html#character_large_object_type) +should be used; see there for details. + +See also [string](https://h2database.com/html/grammar.html#string) literal grammar. +Mapped to ""java.lang.String"". +"," +CHARACTER VARYING(100) +VARCHAR(255) +" + +"Data Types","CHARACTER LARGE OBJECT Type"," +{ { CHARACTER | CHAR } LARGE OBJECT | CLOB + | { NATIONAL CHARACTER | NCHAR } LARGE OBJECT | NCLOB + | @c@ { TINYTEXT | TEXT | MEDIUMTEXT | LONGTEXT | NTEXT } } +[ ( lengthLong [K|M|G|T|P] [CHARACTERS|OCTETS]) ] +"," +CHARACTER LARGE OBJECT is intended for very large Unicode character string values. +Unlike when using [CHARACTER VARYING](https://h2database.com/html/datatypes.html#character_varying_type), +large CHARACTER LARGE OBJECT values are not kept fully in-memory; instead, they are streamed. +CHARACTER LARGE OBJECT should be used for documents and texts with arbitrary size such as XML or +HTML documents, text files, or memo fields of unlimited size. +Use ""PreparedStatement.setCharacterStream"" to store values. +See also [Large Objects](https://h2database.com/html/advanced.html#large_objects) section. + +CHARACTER VARYING should be used for text with relatively short average size (for example +shorter than 200 characters). Short CHARACTER LARGE OBJECT values are stored inline, but there is +an overhead compared to CHARACTER VARYING. + +Length, if any, should be specified in characters, CHARACTERS and OCTETS units have no effect in H2. + +Mapped to ""java.sql.Clob"" (""java.io.Reader"" is also supported). +"," +CHARACTER LARGE OBJECT +CLOB(10K) +" + +"Data Types","VARCHAR_IGNORECASE Type"," +@h2@ VARCHAR_IGNORECASE +[ ( lengthInt [CHARACTERS|OCTETS] ) ] +"," +Same as VARCHAR, but not case sensitive when comparing. +Stored in mixed case. + +The allowed length is from 1 to 1048576 characters. +The length is a size constraint; only the actual data is persisted. +Length, if any, should be specified in characters, CHARACTERS and OCTETS units have no effect in H2. + +The whole text is loaded into memory when using this data type. +For large text data CLOB should be used; see there for details. + +See also [string](https://h2database.com/html/grammar.html#string) literal grammar. +Mapped to ""java.lang.String"". +"," +VARCHAR_IGNORECASE +" + +"Data Types","BINARY Type"," +BINARY [ ( lengthInt ) ] +"," +Represents a binary string (byte array) of fixed predefined length. + +The allowed length is from 1 to 1048576 bytes. +If length is not specified, 1 byte is used by default. + +The whole binary string is kept in memory when using this data type. +For variable-length binary strings use [BINARY VARYING](https://h2database.com/html/datatypes.html#binary_varying_type) +data type instead. +For large binary data [BINARY LARGE OBJECT](https://h2database.com/html/datatypes.html#binary_large_object_type) +should be used; see there for details. + +Too short binary string are right-padded with zero bytes. +Too long binary strings are truncated by CAST specification and rejected by column assignment. + +Binary strings of different length are considered as not equal to each other. + +See also [bytes](https://h2database.com/html/grammar.html#bytes) literal grammar. +Mapped to byte[]. +"," +BINARY +BINARY(1000) +" + +"Data Types","BINARY VARYING Type"," +{ BINARY VARYING | VARBINARY + | @c@ { LONGVARBINARY | RAW | BYTEA } } +[ ( lengthInt ) ] +"," +Represents a byte array. + +The allowed length is from 1 to 1048576 bytes. +The length is a size constraint; only the actual data is persisted. + +The whole binary string is kept in memory when using this data type. +For large binary data [BINARY LARGE OBJECT](https://h2database.com/html/datatypes.html#binary_large_object_type) +should be used; see there for details. + +See also [bytes](https://h2database.com/html/grammar.html#bytes) literal grammar. +Mapped to byte[]. +"," +BINARY VARYING(100) +VARBINARY(1000) +" + +"Data Types","BINARY LARGE OBJECT Type"," +{ BINARY LARGE OBJECT | BLOB + | @c@ { TINYBLOB | MEDIUMBLOB | LONGBLOB | IMAGE } } +[ ( lengthLong [K|M|G|T|P]) ] +"," +BINARY LARGE OBJECT is intended for very large binary values such as files or images. +Unlike when using [BINARY VARYING](https://h2database.com/html/datatypes.html#binary_varying_type), +large objects are not kept fully in-memory; instead, they are streamed. +Use ""PreparedStatement.setBinaryStream"" to store values. +See also [CHARACTER LARGE OBJECT](https://h2database.com/html/datatypes.html#character_large_object_type) +and [Large Objects](https://h2database.com/html/advanced.html#large_objects) section. + +Mapped to ""java.sql.Blob"" (""java.io.InputStream"" is also supported). +"," +BINARY LARGE OBJECT +BLOB(10K) " "Data Types","BOOLEAN Type"," -BOOLEAN | BIT | BOOL +BOOLEAN | @c@ { BIT | BOOL } "," -Possible values: TRUE and FALSE. +Possible values: TRUE, FALSE, and UNKNOWN (NULL). +See also [boolean](https://h2database.com/html/grammar.html#boolean) literal grammar. Mapped to ""java.lang.Boolean"". "," BOOLEAN " "Data Types","TINYINT Type"," -TINYINT +@h2@ TINYINT "," Possible values are: -128 to 127. -Mapped to ""java.lang.Byte"". +See also [integer](https://h2database.com/html/grammar.html#int) literal grammar. + +In JDBC this data type is mapped to ""java.lang.Integer"". +""java.lang.Byte"" is also supported. + +In ""org.h2.api.Aggregate"", ""org.h2.api.AggregateFunction"", and ""org.h2.api.Trigger"" +this data type is mapped to ""java.lang.Byte"". + "," TINYINT " "Data Types","SMALLINT Type"," -SMALLINT | INT2 | YEAR +SMALLINT | @c@ { INT2 } "," Possible values: -32768 to 32767. -Mapped to ""java.lang.Short"". +See also [integer](https://h2database.com/html/grammar.html#int) literal grammar. + +In JDBC this data type is mapped to ""java.lang.Integer"". +""java.lang.Short"" is also supported. + +In ""org.h2.api.Aggregate"", ""org.h2.api.AggregateFunction"", and ""org.h2.api.Trigger"" +this data type is mapped to ""java.lang.Short"". "," SMALLINT " +"Data Types","INTEGER Type"," +INTEGER | INT | @c@ { MEDIUMINT | INT4 | SIGNED } +"," +Possible values: -2147483648 to 2147483647. + +See also [integer](https://h2database.com/html/grammar.html#int) literal grammar. +Mapped to ""java.lang.Integer"". +"," +INTEGER +INT +" + "Data Types","BIGINT Type"," -BIGINT | INT8 +BIGINT | @c@ INT8 "," Possible values: -9223372036854775808 to 9223372036854775807. +See also [long](https://h2database.com/html/grammar.html#long) literal grammar. Mapped to ""java.lang.Long"". "," BIGINT " -"Data Types","IDENTITY Type"," -IDENTITY +"Data Types","NUMERIC Type"," +{ NUMERIC | DECIMAL | DEC } [ ( precisionInt [ , scaleInt ] ) ] "," -Auto-Increment value. Possible values: -9223372036854775808 to -9223372036854775807. Used values are never re-used, even when the transaction is -rolled back. +Data type with fixed decimal precision and scale. +This data type is recommended for storing currency values. -Mapped to ""java.lang.Long"". +If precision is specified, it must be from 1 to 100000. +If scale is specified, it must be from 0 to 100000, 0 is default. + +See also [numeric](https://h2database.com/html/grammar.html#numeric) literal grammar. +Mapped to ""java.math.BigDecimal"". "," -IDENTITY +NUMERIC(20, 2) " -"Data Types","DECIMAL Type"," -{ DECIMAL | NUMBER | DEC | NUMERIC } ( precisionInt [ , scaleInt ] ) +"Data Types","REAL Type"," +REAL | FLOAT ( precisionInt ) | @c@ { FLOAT4 } "," -Data type with fixed precision and scale. This data type is recommended for -storing currency values. +A single precision floating point number. +Should not be used to represent currency values, because of rounding problems. +Precision value for FLOAT type name should be from 1 to 24. -Mapped to ""java.math.BigDecimal"". +See also [numeric](https://h2database.com/html/grammar.html#numeric) literal grammar. +Mapped to ""java.lang.Float"". "," -DECIMAL(20, 2) +REAL " -"Data Types","DOUBLE Type"," -{ DOUBLE [ PRECISION ] | FLOAT [ ( precisionInt ) ] | FLOAT8 } +"Data Types","DOUBLE PRECISION Type"," +DOUBLE PRECISION | FLOAT [ ( precisionInt ) ] | @c@ { DOUBLE | FLOAT8 } "," -A floating point number. Should not be used to represent currency values, because -of rounding problems. +A double precision floating point number. +Should not be used to represent currency values, because of rounding problems. If precision value is specified for FLOAT type name, it should be from 25 to 53. +See also [numeric](https://h2database.com/html/grammar.html#numeric) literal grammar. Mapped to ""java.lang.Double"". "," -DOUBLE +DOUBLE PRECISION " -"Data Types","REAL Type"," -{ REAL | FLOAT ( precisionInt ) | FLOAT4 } +"Data Types","DECFLOAT Type"," +DECFLOAT [ ( precisionInt ) ] "," -A single precision floating point number. Should not be used to represent currency -values, because of rounding problems. -Precision value for FLOAT type name should be from 0 to 24. +Decimal floating point number. +This data type is not recommended to represent currency values, because of variable scale. -Mapped to ""java.lang.Float"". +If precision is specified, it must be from 1 to 100000. + +See also [numeric](https://h2database.com/html/grammar.html#numeric) literal grammar. +Mapped to ""java.math.BigDecimal"". +There are three special values: 'Infinity', '-Infinity', and 'NaN'. +These special values can't be read or set as ""BigDecimal"" values, +but they can be read or set using ""java.lang.String"", float, or double. "," -REAL +DECFLOAT +DECFLOAT(20) +" + +"Data Types","DATE Type"," +DATE +"," +The date data type. The proleptic Gregorian calendar is used. + +See also [date](https://h2database.com/html/grammar.html#date) literal grammar. + +In JDBC this data type is mapped to ""java.sql.Date"", with the time set to ""00:00:00"" +(or to the next possible time if midnight doesn't exist for the given date and time zone due to a daylight saving change). +""java.time.LocalDate"" is also supported and recommended. + +In ""org.h2.api.Aggregate"", ""org.h2.api.AggregateFunction"", and ""org.h2.api.Trigger"" +this data type is mapped to ""java.time.LocalDate"". + +If your time zone had LMT (local mean time) in the past and you use such old dates +(depends on the time zone, usually 100 or more years ago), +don't use ""java.sql.Date"" to read and write them. + +If you deal with very old dates (before 1582-10-15) note that ""java.sql.Date"" uses a mixed Julian/Gregorian calendar, +""java.util.GregorianCalendar"" can be configured to proleptic Gregorian with +""setGregorianChange(new java.util.Date(Long.MIN_VALUE))"" and used to read or write fields of dates. +"," +DATE " "Data Types","TIME Type"," @@ -3101,39 +4194,69 @@ TIME [ ( precisionInt ) ] [ WITHOUT TIME ZONE ] The time data type. The format is hh:mm:ss[.nnnnnnnnn]. If fractional seconds precision is specified it should be from 0 to 9, 0 is default. -Mapped to ""java.sql.Time"". When converted to a ""java.sql.Date"", the date is set to ""1970-01-01"". -""java.time.LocalTime"" is also supported on Java 8 and later versions. +See also [time](https://h2database.com/html/grammar.html#time) literal grammar. + +In JDBC this data type is mapped to ""java.sql.Time"". +""java.time.LocalTime"" is also supported and recommended. + +In ""org.h2.api.Aggregate"", ""org.h2.api.AggregateFunction"", and ""org.h2.api.Trigger"" +this data type is mapped to ""java.time.LocalTime"". + Use ""java.time.LocalTime"" or ""String"" instead of ""java.sql.Time"" when non-zero precision is needed. Cast from higher fractional seconds precision to lower fractional seconds precision performs round half up; -if result of rounding is higher than maximum supported value 23:59:59.999999999 it is saturated to 23:59:59.999999999. +if result of rounding is higher than maximum supported value 23:59:59.999999999 the value is rounded down instead. +The CAST operation to TIMESTAMP and TIMESTAMP WITH TIME ZONE data types uses the +[CURRENT_DATE](https://h2database.com/html/functions.html#current_date) for date fields. "," TIME TIME(9) " -"Data Types","DATE Type"," -DATE +"Data Types","TIME WITH TIME ZONE Type"," +TIME [ ( precisionInt ) ] WITH TIME ZONE "," -The date data type. The format is yyyy-MM-dd. +The time with time zone data type. +If fractional seconds precision is specified it should be from 0 to 9, 0 is default. -Mapped to ""java.sql.Date"", with the time set to ""00:00:00"" -(or to the next possible time if midnight doesn't exist for the given date and timezone due to a daylight saving change). -""java.time.LocalDate"" is also supported on Java 8 and later versions. +See also [time with time zone](https://h2database.com/html/grammar.html#time_with_time_zone) literal grammar. +Mapped to ""java.time.OffsetTime"". +Cast from higher fractional seconds precision to lower fractional seconds precision performs round half up; +if result of rounding is higher than maximum supported value 23:59:59.999999999 the value is rounded down instead. +The CAST operation to TIMESTAMP and TIMESTAMP WITH TIME ZONE data types uses the +[CURRENT_DATE](https://h2database.com/html/functions.html#current_date) for date fields. "," -DATE +TIME WITH TIME ZONE +TIME(9) WITH TIME ZONE " "Data Types","TIMESTAMP Type"," -{ TIMESTAMP [ ( precisionInt ) ] [ WITHOUT TIME ZONE ] - | DATETIME [ ( precisionInt ) ] | SMALLDATETIME } +TIMESTAMP [ ( precisionInt ) ] [ WITHOUT TIME ZONE ] + | @c@ { DATETIME [ ( precisionInt ) ] | SMALLDATETIME } "," -The timestamp data type. The format is yyyy-MM-dd hh:mm:ss[.nnnnnnnnn]. -Stored internally as a BCD-encoded date, and nanoseconds since midnight. +The timestamp data type. The proleptic Gregorian calendar is used. If fractional seconds precision is specified it should be from 0 to 9, 6 is default. Fractional seconds precision of SMALLDATETIME is always 0 and cannot be specified. -Mapped to ""java.sql.Timestamp"" (""java.util.Date"" may be used too). -""java.time.LocalDateTime"" is also supported on Java 8 and later versions. +This data type holds the local date and time without time zone information. +It cannot distinguish timestamps near transitions from DST to normal time. +For absolute timestamps use the [TIMESTAMP WITH TIME ZONE](https://h2database.com/html/datatypes.html#timestamp_with_time_zone_type) data type instead. + +See also [timestamp](https://h2database.com/html/grammar.html#timestamp) literal grammar. + +In JDBC this data type is mapped to ""java.sql.Timestamp"" (""java.util.Date"" may be used too). +""java.time.LocalDateTime"" is also supported and recommended. + +In ""org.h2.api.Aggregate"", ""org.h2.api.AggregateFunction"", and ""org.h2.api.Trigger"" +this data type is mapped to ""java.time.LocalDateTime"". + +If your time zone had LMT (local mean time) in the past and you use such old dates +(depends on the time zone, usually 100 or more years ago), +don't use ""java.sql.Timestamp"" and ""java.util.Date"" to read and write them. + +If you deal with very old dates (before 1582-10-15) note that ""java.sql.Timestamp"" and ""java.util.Date"" +use a mixed Julian/Gregorian calendar, ""java.util.GregorianCalendar"" can be configured to proleptic Gregorian with +""setGregorianChange(new java.util.Date(Long.MIN_VALUE))"" and used to read or write fields of timestamps. + Cast from higher fractional seconds precision to lower fractional seconds precision performs round half up. "," TIMESTAMP @@ -3143,12 +4266,12 @@ TIMESTAMP(9) "Data Types","TIMESTAMP WITH TIME ZONE Type"," TIMESTAMP [ ( precisionInt ) ] WITH TIME ZONE "," -The timestamp with time zone data type. -Stored internally as a BCD-encoded date, nanoseconds since midnight, and time zone offset in minutes. +The timestamp with time zone data type. The proleptic Gregorian calendar is used. If fractional seconds precision is specified it should be from 0 to 9, 6 is default. -Mapped to ""org.h2.api.TimestampWithTimeZone"". -""java.time.OffsetDateTime"" and ""java.time.Instant"" are also supported on Java 8 and later versions. +See also [timestamp with time zone](https://h2database.com/html/grammar.html#timestamp_with_time_zone) literal grammar. +Mapped to ""java.time.OffsetDateTime"". +""java.time.ZonedDateTime"" and ""java.time.Instant"" are also supported. Values of this data type are compared by UTC values. It means that ""2010-01-01 10:00:00+01"" is greater than ""2010-01-01 11:00:00+03"". @@ -3160,135 +4283,132 @@ TIMESTAMP WITH TIME ZONE TIMESTAMP(9) WITH TIME ZONE " -"Data Types","BINARY Type"," -{ BINARY | VARBINARY | BINARY VARYING - | LONGVARBINARY | RAW | BYTEA } -[ ( precisionInt ) ] +"Data Types","INTERVAL Type"," +intervalYearType | intervalMonthType | intervalDayType + | intervalHourType| intervalMinuteType | intervalSecondType + | intervalYearToMonthType | intervalDayToHourType + | intervalDayToMinuteType | intervalDayToSecondType + | intervalHourToMinuteType | intervalHourToSecondType + | intervalMinuteToSecondType "," -Represents a byte array. For very long arrays, use BLOB. -The maximum size is 2 GB, but the whole object is kept in -memory when using this data type. The precision is a size constraint; -only the actual data is persisted. For large text data BLOB or CLOB -should be used. +Interval data type. +There are two classes of intervals. Year-month intervals can store years and months. +Day-time intervals can store days, hours, minutes, and seconds. +Year-month intervals are comparable only with another year-month intervals. +Day-time intervals are comparable only with another day-time intervals. -Mapped to byte[]. +Mapped to ""org.h2.api.Interval"". "," -BINARY(1000) +INTERVAL DAY TO SECOND " -"Data Types","OTHER Type"," -OTHER +"Data Types","JAVA_OBJECT Type"," +@h2@ { JAVA_OBJECT | OBJECT | OTHER } [ ( lengthInt ) ] "," -This type allows storing serialized Java objects. Internally, a byte array is used. -Serialization and deserialization is done on the client side only. +This type allows storing serialized Java objects. Internally, a byte array with serialized form is used. +The allowed length is from 1 (useful only with custom serializer) to 1048576 bytes. +The length is a size constraint; only the actual data is persisted. + +Serialization and deserialization is done on the client side only with two exclusions described below. Deserialization is only done when ""getObject"" is called. Java operations cannot be executed inside the database engine for security reasons. -Use ""PreparedStatement.setObject"" to store values. +Use ""PreparedStatement.setObject"" with ""Types.JAVA_OBJECT"" or ""H2Type.JAVA_OBJECT"" +as a third argument to store values. + +If Java method alias has ""Object"" parameter(s), values are deserialized during invocation of this method +on the server side. + +If a [linked table](https://h2database.com/html/advanced.html#linked_tables) has a column with ""Types.JAVA_OBJECT"" +JDBC data type and its database is not an another H2, Java objects need to be serialized and deserialized during +interaction between H2 and database that owns the table on the server side of H2. + +This data type needs special attention in secure environments. Mapped to ""java.lang.Object"" (or any subclass). "," -OTHER +JAVA_OBJECT +JAVA_OBJECT(10000) " -"Data Types","VARCHAR Type"," -{ VARCHAR | CHARACTER VARYING | LONGVARCHAR | VARCHAR2 | NVARCHAR - | NVARCHAR2 | VARCHAR_CASESENSITIVE} [ ( precisionInt ) ] +"Data Types","ENUM Type"," +@h2@ ENUM (string [, ... ]) "," -A Unicode String. -Use two single quotes ('') to create a quote. - -The maximum precision is ""Integer.MAX_VALUE"". -The precision is a size constraint; only the actual data is persisted. - -The whole text is loaded into memory when using this data type. -For large text data CLOB should be used; see there for details. - +A type with enumerated values. Mapped to ""java.lang.String"". + +Duplicate and empty values are not permitted. +The maximum allowed length of value is 1048576 characters. +The maximum number of values is 65536. "," -VARCHAR(255) +ENUM('clubs', 'diamonds', 'hearts', 'spades') " -"Data Types","VARCHAR_IGNORECASE Type"," -VARCHAR_IGNORECASE [ ( precisionInt ) ] +"Data Types","GEOMETRY Type"," +@h2@ GEOMETRY + [({ GEOMETRY | + { POINT + | LINESTRING + | POLYGON + | MULTIPOINT + | MULTILINESTRING + | MULTIPOLYGON + | GEOMETRYCOLLECTION } [Z|M|ZM]} + [, sridInt] )] "," -Same as VARCHAR, but not case sensitive when comparing. -Stored in mixed case. - -The maximum precision is ""Integer.MAX_VALUE"". -The precision is a size constraint; only the actual data is persisted. +A spatial geometry type. +If additional constraints are not specified this type accepts all supported types of geometries. +A constraint with required geometry type and dimension system can be set by specifying name of the type and +dimension system. A whitespace between them is optional. +2D dimension system does not have a name and assumed if only a geometry type name is specified. +POINT means 2D point, POINT Z or POINTZ means 3D point. +GEOMETRY constraint means no restrictions on type or dimension system of geometry. +A constraint with required spatial reference system identifier (SRID) can be set by specifying this identifier. -The whole text is loaded into memory when using this data type. -For large text data CLOB should be used; see there for details. +Mapped to ""org.locationtech.jts.geom.Geometry"" if JTS library is in classpath and to ""java.lang.String"" otherwise. +May be represented in textual format using the WKT (well-known text) or EWKT (extended well-known text) format. +Values are stored internally in EWKB (extended well-known binary) format, the maximum allowed length is 1048576 bytes. +Only a subset of EWKB and EWKT features is supported. +Supported objects are POINT, LINESTRING, POLYGON, MULTIPOINT, MULTILINESTRING, MULTIPOLYGON, and GEOMETRYCOLLECTION. +Supported dimension systems are 2D (XY), Z (XYZ), M (XYM), and ZM (XYZM). +SRID (spatial reference system identifier) is supported. -Mapped to ""java.lang.String"". +Use a quoted string containing a WKT/EWKT formatted string or ""PreparedStatement.setObject()"" to store values, +and ""ResultSet.getObject(..)"" or ""ResultSet.getString(..)"" to retrieve the values. "," -VARCHAR_IGNORECASE +GEOMETRY +GEOMETRY(POINT) +GEOMETRY(POINT Z) +GEOMETRY(POINT Z, 4326) +GEOMETRY(GEOMETRY, 4326) " -"Data Types","CHAR Type"," -{ CHAR | CHARACTER | NCHAR } [ ( precisionInt ) ] +"Data Types","JSON Type"," +@h2@ JSON [(lengthInt)] "," -A Unicode String. -This type is supported for compatibility with other databases and older applications. -The difference to VARCHAR is that trailing spaces are ignored and not persisted. +A RFC 8259-compliant JSON text. -The maximum precision is ""Integer.MAX_VALUE"". -The precision is a size constraint; only the actual data is persisted. +See also [json](https://h2database.com/html/grammar.html#json) literal grammar. +Mapped to ""byte[]"". +The allowed length is from 1 to 1048576 bytes. +The length is a size constraint; only the actual data is persisted. -The whole text is kept in memory when using this data type. -For large text data CLOB should be used; see there for details. +To set a JSON value with ""java.lang.String"" in a PreparedStatement use a ""FORMAT JSON"" data format +(""INSERT INTO TEST(ID, DATA) VALUES (?, ? FORMAT JSON)""). +Without the data format VARCHAR values are converted to a JSON string values. -Mapped to ""java.lang.String"". +Order of object members is preserved as is. +Duplicate object member names are allowed. "," -CHAR(10) +JSON " -"Data Types","BLOB Type"," -{ BLOB | BINARY LARGE OBJECT - | TINYBLOB | MEDIUMBLOB | LONGBLOB | IMAGE | OID } -[ ( precisionInt [K|M|G|T|P]) ] +"Data Types","UUID Type"," +@h2@ UUID "," -Like BINARY, but intended for very large values such as files or images. Unlike -when using BINARY, large objects are not kept fully in-memory. Use -""PreparedStatement.setBinaryStream"" to store values. See also CLOB and -Advanced / Large Objects. - -Mapped to ""java.sql.Blob"" (""java.io.InputStream"" is also supported). -"," -BLOB -BLOB(10K) -" - -"Data Types","CLOB Type"," -{ CLOB | CHARACTER LARGE OBJECT - | TINYTEXT | TEXT | MEDIUMTEXT | LONGTEXT | NTEXT | NCLOB } -[ ( precisionInt [K|M|G|T|P] [CHARACTERS|OCTETS]) ] -"," -CLOB is like VARCHAR, but intended for very large values. Unlike when using -VARCHAR, large CLOB objects are not kept fully in-memory; instead, they are streamed. -CLOB should be used for documents and texts with arbitrary size such as XML or -HTML documents, text files, or memo fields of unlimited size. Use -""PreparedStatement.setCharacterStream"" to store values. See also Advanced / Large Objects. - -VARCHAR should be used for text with relatively short average size (for example -shorter than 200 characters). Short CLOB values are stored inline, but there is -an overhead compared to VARCHAR. - -Precision, if any, should be specified in characters, CHARACTERS and OCTETS units have no effect in H2. - -Mapped to ""java.sql.Clob"" (""java.io.Reader"" is also supported). -"," -CLOB -CLOB(10K) -" - -"Data Types","UUID Type"," -UUID -"," -Universally unique identifier. This is a 128 bit value. -To store values, use ""PreparedStatement.setBytes"", -""setString"", or ""setObject(uuid)"" (where ""uuid"" is a ""java.util.UUID""). -""ResultSet.getObject"" will return a ""java.util.UUID"". +Universally unique identifier. This is a 128 bit value. +To store values, use ""PreparedStatement.setBytes"", +""setString"", or ""setObject(uuid)"" (where ""uuid"" is a ""java.util.UUID""). +""ResultSet.getObject"" will return a ""java.util.UUID"". Please note that using an index on randomly generated data will result on poor performance once there are millions of rows in a table. @@ -3301,87 +4421,34 @@ UUID " "Data Types","ARRAY Type"," -ARRAY +baseDataType ARRAY [ '[' maximumCardinalityInt ']' ] "," -An array of values. +A data type for array of values. +Base data type specifies the data type of elements. +Array may have NULL elements. +Maximum cardinality, if any, specifies maximum allowed number of elements in the array. +The allowed cardinality is from 0 to 65536 elements. + +See also [array](https://h2database.com/html/grammar.html#array) literal grammar. Mapped to ""java.lang.Object[]"" (arrays of any non-primitive type are also supported). -Use a value list (1, 2) or ""PreparedStatement.setObject(.., new Object[] {..})"" to store values, +Use ""PreparedStatement.setArray(..)"" or ""PreparedStatement.setObject(.., new Object[] {..})"" to store values, and ""ResultSet.getObject(..)"" or ""ResultSet.getArray(..)"" to retrieve the values. "," -ARRAY -" - -"Data Types","ENUM Type"," -{ ENUM (string [, ... ]) } -"," -A type with enumerated values. -Mapped to ""java.lang.Integer"". - -The first provided value is mapped to 0, the -second mapped to 1, and so on. - -Duplicate and empty values are not permitted. -"," - -ENUM('clubs', 'diamonds', 'hearts', 'spades') -" -"Data Types","GEOMETRY Type"," -GEOMETRY - [({ GEOMETRY | - { POINT - | LINESTRING - | POLYGON - | MULTIPOINT - | MULTILINESTRING - | MULTIPOLYGON - | GEOMETRYCOLLECTION } [Z|M|ZM]} - [, sridInt] )] -"," -A spatial geometry type. -If additional constraints are not specified this type accepts all supported types of geometries. -A constraint with required geometry type and dimension system can be set by specifying name of the type and -dimension system. A whitespace between them is optional. -2D dimension system does not have a name and assumed if only a geometry type name is specified. -POINT means 2D point, POINT Z or POINTZ means 3D point. -GEOMETRY constraint means no restrictions on type or dimension system of geometry. -A constraint with required spatial reference system identifier (SRID) can be set by specifying this identifier. - -Mapped to ""org.locationtech.jts.geom.Geometry"" if JTS library is in classpath and to ""java.lang.String"" otherwise. -May be represented in textual format using the WKT (well-known text) or EWKT (extended well-known text) format. -Values are stored internally in EWKB (extended well-known binary) format. -Only a subset of EWKB and EWKT features is supported. -Supported objects are POINT, LINESTRING, POLYGON, MULTIPOINT, MULTILINESTRING, MULTIPOLYGON, and GEOMETRYCOLLECTION. -Supported dimension systems are 2D (XY), Z (XYZ), M (XYM), and ZM (XYZM). -SRID (spatial reference system identifier) is supported. - -Use a quoted string containing a WKT/EWKT formatted string or ""PreparedStatement.setObject()"" to store values, -and ""ResultSet.getObject(..)"" or ""ResultSet.getString(..)"" to retrieve the values. -"," -GEOMETRY -GEOMETRY(POINT) -GEOMETRY(POINT Z) -GEOMETRY(POINT Z, 4326) -GEOMETRY(GEOMETRY, 4326) +BOOLEAN ARRAY +VARCHAR(100) ARRAY +INTEGER ARRAY[10] " -"Data Types","INTERVAL Type"," -intervalYearType | intervalMonthType | intervalDayType - | intervalHourType| intervalMinuteType | intervalSecondType - | intervalYearToMonthType | intervalDayToHourType - | intervalDayToMinuteType | intervalDayToSecondType - | intervalHourToMinuteType | intervalHourToSecondType - | intervalMinuteToSecondType +"Data Types","ROW Type"," +ROW (fieldName dataType [,...]) "," -Interval data type. -There are two classes of intervals. Year-month intervals can store years and months. -Day-time intervals can store days, hours, minutes, and seconds. -Year-month intervals are comparable only with another year-month intervals. -Day-time intervals are comparable only with another day-time intervals. +A row value data type. This data type should not be normally used as data type of a column. -Mapped to ""org.h2.api.Interval"". +See also [row value expression](https://h2database.com/html/grammar.html#row_value_expression) grammar. +Mapped to ""java.sql.ResultSet"". "," -INTERVAL DAY TO SECOND +ROW(A INT, B VARCHAR(10)) " "Interval Data Types","INTERVAL YEAR Type"," @@ -3390,8 +4457,9 @@ INTERVAL YEAR [ ( precisionInt ) ] Interval data type. If precision is specified it should be from 1 to 18, 2 is default. +See also [year interval](https://h2database.com/html/grammar.html#interval_year) literal grammar. Mapped to ""org.h2.api.Interval"". -""java.time.Period"" is also supported on Java 8 and later versions. +""java.time.Period"" is also supported. "," INTERVAL YEAR " @@ -3402,8 +4470,9 @@ INTERVAL MONTH [ ( precisionInt ) ] Interval data type. If precision is specified it should be from 1 to 18, 2 is default. +See also [month interval](https://h2database.com/html/grammar.html#interval_month) literal grammar. Mapped to ""org.h2.api.Interval"". -""java.time.Period"" is also supported on Java 8 and later versions. +""java.time.Period"" is also supported. "," INTERVAL MONTH " @@ -3414,8 +4483,9 @@ INTERVAL DAY [ ( precisionInt ) ] Interval data type. If precision is specified it should be from 1 to 18, 2 is default. +See also [day interval](https://h2database.com/html/grammar.html#interval_day) literal grammar. Mapped to ""org.h2.api.Interval"". -""java.time.Duration"" is also supported on Java 8 and later versions. +""java.time.Duration"" is also supported. "," INTERVAL DAY " @@ -3426,8 +4496,9 @@ INTERVAL HOUR [ ( precisionInt ) ] Interval data type. If precision is specified it should be from 1 to 18, 2 is default. +See also [hour interval](https://h2database.com/html/grammar.html#interval_hour) literal grammar. Mapped to ""org.h2.api.Interval"". -""java.time.Duration"" is also supported on Java 8 and later versions. +""java.time.Duration"" is also supported. "," INTERVAL HOUR " @@ -3438,8 +4509,9 @@ INTERVAL MINUTE [ ( precisionInt ) ] Interval data type. If precision is specified it should be from 1 to 18, 2 is default. +See also [minute interval](https://h2database.com/html/grammar.html#interval_minute) literal grammar. Mapped to ""org.h2.api.Interval"". -""java.time.Duration"" is also supported on Java 8 and later versions. +""java.time.Duration"" is also supported. "," INTERVAL MINUTE " @@ -3451,8 +4523,9 @@ Interval data type. If precision is specified it should be from 1 to 18, 2 is default. If fractional seconds precision is specified it should be from 0 to 9, 6 is default. +See also [second interval](https://h2database.com/html/grammar.html#interval_second) literal grammar. Mapped to ""org.h2.api.Interval"". -""java.time.Duration"" is also supported on Java 8 and later versions. +""java.time.Duration"" is also supported. "," INTERVAL SECOND " @@ -3463,8 +4536,9 @@ INTERVAL YEAR [ ( precisionInt ) ] TO MONTH Interval data type. If leading field precision is specified it should be from 1 to 18, 2 is default. +See also [year to month interval](https://h2database.com/html/grammar.html#interval_year_to_month) literal grammar. Mapped to ""org.h2.api.Interval"". -""java.time.Period"" is also supported on Java 8 and later versions. +""java.time.Period"" is also supported. "," INTERVAL YEAR TO MONTH " @@ -3475,8 +4549,9 @@ INTERVAL DAY [ ( precisionInt ) ] TO HOUR Interval data type. If leading field precision is specified it should be from 1 to 18, 2 is default. +See also [day to hour interval](https://h2database.com/html/grammar.html#interval_day_to_hour) literal grammar. Mapped to ""org.h2.api.Interval"". -""java.time.Duration"" is also supported on Java 8 and later versions. +""java.time.Duration"" is also supported. "," INTERVAL DAY TO HOUR " @@ -3487,8 +4562,9 @@ INTERVAL DAY [ ( precisionInt ) ] TO MINUTE Interval data type. If leading field precision is specified it should be from 1 to 18, 2 is default. +See also [day to minute interval](https://h2database.com/html/grammar.html#interval_day_to_minute) literal grammar. Mapped to ""org.h2.api.Interval"". -""java.time.Duration"" is also supported on Java 8 and later versions. +""java.time.Duration"" is also supported. "," INTERVAL DAY TO MINUTE " @@ -3500,8 +4576,9 @@ Interval data type. If leading field precision is specified it should be from 1 to 18, 2 is default. If fractional seconds precision is specified it should be from 0 to 9, 6 is default. +See also [day to second interval](https://h2database.com/html/grammar.html#interval_day_to_second) literal grammar. Mapped to ""org.h2.api.Interval"". -""java.time.Duration"" is also supported on Java 8 and later versions. +""java.time.Duration"" is also supported. "," INTERVAL DAY TO SECOND " @@ -3512,8 +4589,9 @@ INTERVAL HOUR [ ( precisionInt ) ] TO MINUTE Interval data type. If leading field precision is specified it should be from 1 to 18, 2 is default. +See also [hour to minute interval](https://h2database.com/html/grammar.html#interval_hour_to_minute) literal grammar. Mapped to ""org.h2.api.Interval"". -""java.time.Duration"" is also supported on Java 8 and later versions. +""java.time.Duration"" is also supported. "," INTERVAL HOUR TO MINUTE " @@ -3525,8 +4603,9 @@ Interval data type. If leading field precision is specified it should be from 1 to 18, 2 is default. If fractional seconds precision is specified it should be from 0 to 9, 6 is default. +See also [hour to second interval](https://h2database.com/html/grammar.html#interval_hour_to_second) literal grammar. Mapped to ""org.h2.api.Interval"". -""java.time.Duration"" is also supported on Java 8 and later versions. +""java.time.Duration"" is also supported. "," INTERVAL HOUR TO SECOND " @@ -3538,540 +4617,355 @@ Interval data type. If leading field precision is specified it should be from 1 to 18, 2 is default. If fractional seconds precision is specified it should be from 0 to 9, 6 is default. +See also [minute to second interval](https://h2database.com/html/grammar.html#interval_minute_to_second) literal grammar. Mapped to ""org.h2.api.Interval"". -""java.time.Duration"" is also supported on Java 8 and later versions. +""java.time.Duration"" is also supported. "," INTERVAL MINUTE TO SECOND " -"Functions (Aggregate)","AVG"," -AVG ( [ DISTINCT|ALL ] { numeric } ) -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"Functions (Numeric)","ABS"," +ABS( { numeric | interval } ) "," -The average (mean) value. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. +Returns the absolute value of a specified value. The returned value is of the same data type as the parameter. + +Note that TINYINT, SMALLINT, INT, and BIGINT data types cannot represent absolute values +of their minimum negative values, because they have more negative values than positive. +For example, for INT data type allowed values are from -2147483648 to 2147483647. +ABS(-2147483648) should be 2147483648, but this value is not allowed for this data type. +It leads to an exception. +To avoid it cast argument of this function to a higher data type. "," -AVG(X) +ABS(I) +ABS(CAST(I AS BIGINT)) " -"Functions (Aggregate)","BIT_AND"," -BIT_AND(expression) -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"Functions (Numeric)","ACOS"," +ACOS(numeric) "," -The bitwise AND of all non-null values. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. +Calculate the arc cosine. +See also Java ""Math.acos"". +This method returns a double. "," -BIT_AND(ID) +ACOS(D) " -"Functions (Aggregate)","BIT_OR"," -BIT_OR(expression) -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"Functions (Numeric)","ASIN"," +ASIN(numeric) "," -The bitwise OR of all non-null values. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. +Calculate the arc sine. +See also Java ""Math.asin"". +This method returns a double. "," -BIT_OR(ID) +ASIN(D) " -"Functions (Aggregate)","EVERY"," -{EVERY|BOOL_AND}(boolean) -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"Functions (Numeric)","ATAN"," +ATAN(numeric) "," -Returns true if all expressions are true. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. +Calculate the arc tangent. +See also Java ""Math.atan"". +This method returns a double. "," -EVERY(ID>10) +ATAN(D) " -"Functions (Aggregate)","ANY"," -{ANY|SOME|BOOL_OR}(boolean) -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"Functions (Numeric)","COS"," +COS(numeric) "," -Returns true if any expression is true. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. - -Note that if ANY or SOME aggregate function is placed on the right side of comparison operation -and argument of this function is a subquery additional parentheses around aggregate function are required, -otherwise it will be parsed as quantified comparison predicate. +Calculate the trigonometric cosine. +See also Java ""Math.cos"". +This method returns a double. "," -ANY(NAME LIKE 'W%') -A = (ANY((SELECT B FROM T))) +COS(ANGLE) " -"Functions (Aggregate)","COUNT"," -COUNT( { * | { [ DISTINCT|ALL ] expression } } ) -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"Functions (Numeric)","COSH"," +COSH(numeric) "," -The count of all row, or of the non-null values. -This method returns a long. -If no rows are selected, the result is 0. -Aggregates are only allowed in select statements. +Calculate the hyperbolic cosine. +See also Java ""Math.cosh"". +This method returns a double. "," -COUNT(*) +COSH(X) " -"Functions (Aggregate)","LISTAGG"," -{ LISTAGG ( [ DISTINCT|ALL ] string [, separatorString] [ ON OVERFLOW ERROR ] ) - withinGroupSpecification } -| { GROUP_CONCAT ( [ DISTINCT|ALL ] string - [ ORDER BY { expression [ ASC | DESC ] } [,...] ] - [ SEPARATOR separatorString ] ) } -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"Functions (Numeric)","COT"," +@h2@ COT(numeric) "," -Concatenates strings with a separator. -Separator must be the same for all rows in the same group. -The default separator is a ',' (without space). -This method returns a string. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. +Calculate the trigonometric cotangent (""1/TAN(ANGLE)""). +See also Java ""Math.*"" functions. +This method returns a double. "," -LISTAGG(NAME, ', ') WITHIN GROUP (ORDER BY ID) -LISTAGG(ID, ', ') WITHIN GROUP (ORDER BY ID) OVER (ORDER BY ID) +COT(ANGLE) " -"Functions (Aggregate)","ARRAY_AGG"," -ARRAY_AGG ( [ DISTINCT|ALL ] string -[ ORDER BY { expression [ ASC | DESC ] } [,...] ] ) -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"Functions (Numeric)","SIN"," +SIN(numeric) "," -Aggregate the value into an array. -This method returns an array. -If no rows are selected, the result is NULL. -If ORDER BY is not specified order of values is not determined. -When this aggregate is used with OVER clause that contains ORDER BY subclause -it does not enforce exact order of values. -This aggregate needs additional own ORDER BY clause to make it deterministic. -Aggregates are only allowed in select statements. +Calculate the trigonometric sine. +See also Java ""Math.sin"". +This method returns a double. "," -ARRAY_AGG(NAME ORDER BY ID) -ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY ID) +SIN(ANGLE) " -"Functions (Aggregate)","MAX"," -MAX(value) -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"Functions (Numeric)","SINH"," +SINH(numeric) "," -The highest value. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. -The returned value is of the same data type as the parameter. +Calculate the hyperbolic sine. +See also Java ""Math.sinh"". +This method returns a double. "," -MAX(NAME) +SINH(ANGLE) " -"Functions (Aggregate)","MIN"," -MIN(value) -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"Functions (Numeric)","TAN"," +TAN(numeric) "," -The lowest value. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. -The returned value is of the same data type as the parameter. +Calculate the trigonometric tangent. +See also Java ""Math.tan"". +This method returns a double. "," -MIN(NAME) +TAN(ANGLE) " -"Functions (Aggregate)","SUM"," -SUM( [ DISTINCT|ALL ] { numeric } ) -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"Functions (Numeric)","TANH"," +TANH(numeric) "," -The sum of all values. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. -The data type of the returned value depends on the parameter data type like this: -""BOOLEAN, TINYINT, SMALLINT, INT -> BIGINT, BIGINT -> DECIMAL, REAL -> DOUBLE"" +Calculate the hyperbolic tangent. +See also Java ""Math.tanh"". +This method returns a double. "," -SUM(X) +TANH(X) " -"Functions (Aggregate)","SELECTIVITY"," -SELECTIVITY(value) -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"Functions (Numeric)","ATAN2"," +@h2@ ATAN2(numeric, numeric) "," -Estimates the selectivity (0-100) of a value. -The value is defined as (100 * distinctCount / rowCount). -The selectivity of 0 rows is 0 (unknown). -Up to 10000 values are kept in memory. -Aggregates are only allowed in select statements. +Calculate the angle when converting the rectangular coordinates to polar coordinates. +See also Java ""Math.atan2"". +This method returns a double. "," -SELECT SELECTIVITY(FIRSTNAME), SELECTIVITY(NAME) FROM TEST WHERE ROWNUM()<20000 +ATAN2(X, Y) " -"Functions (Aggregate)","STDDEV_POP"," -STDDEV_POP( [ DISTINCT|ALL ] numeric ) -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"Functions (Numeric)","BITAND"," +@h2@ BITAND(expression, expression) "," -The population standard deviation. -This method returns a double. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. +The bitwise AND operation. +Arguments should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. + +For aggregate function see [BIT_AND_AGG](https://h2database.com/html/functions-aggregate.html#bit_and_agg). "," -STDDEV_POP(X) +BITAND(A, B) " -"Functions (Aggregate)","STDDEV_SAMP"," -STDDEV_SAMP( [ DISTINCT|ALL ] numeric ) -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"Functions (Numeric)","BITOR"," +@h2@ BITOR(expression, expression) "," -The sample standard deviation. -This method returns a double. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. +The bitwise OR operation. +Arguments should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. + +For aggregate function see [BIT_OR_AGG](https://h2database.com/html/functions-aggregate.html#bit_or_agg). "," -STDDEV(X) +BITOR(A, B) " -"Functions (Aggregate)","VAR_POP"," -VAR_POP( [ DISTINCT|ALL ] numeric ) -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] -"," -The population variance (square of the population standard deviation). -This method returns a double. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. -"," -VAR_POP(X) -" - -"Functions (Aggregate)","VAR_SAMP"," -VAR_SAMP( [ DISTINCT|ALL ] numeric ) -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] -"," -The sample variance (square of the sample standard deviation). -This method returns a double. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. -"," -VAR_SAMP(X) -" - -"Functions (Aggregate)","RANK aggregate"," -RANK(value [,...]) -withinGroupSpecification -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] -"," -Returns the rank of the hypothetical row in specified collection of rows. -The rank of a row is the number of rows that precede this row plus 1. -If two or more rows have the same values in ORDER BY columns, these rows get the same rank from the first row with the same values. -It means that gaps in ranks are possible. -"," -SELECT RANK(5) WITHIN GROUP (ORDER BY V) FROM TEST; -" - -"Functions (Aggregate)","DENSE_RANK aggregate"," -DENSE_RANK(value [,...]) -withinGroupSpecification -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] -"," -Returns the dense rank of the hypothetical row in specified collection of rows. -The rank of a row is the number of groups of rows with the same values in ORDER BY columns that precede group with this row plus 1. -If two or more rows have the same values in ORDER BY columns, these rows get the same rank. -Gaps in ranks are not possible. -"," -SELECT DENSE_RANK(5) WITHIN GROUP (ORDER BY V) FROM TEST; -" - -"Functions (Aggregate)","PERCENT_RANK aggregate"," -PERCENT_RANK(value [,...]) -withinGroupSpecification -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] -"," -Returns the relative rank of the hypothetical row in specified collection of rows. -The relative rank is calculated as (RANK - 1) / (NR - 1), -where RANK is a rank of the row and NR is a total number of rows in the collection including hypothetical row. +"Functions (Numeric)","BITXOR"," +@h2@ BITXOR(expression, expression) "," -SELECT PERCENT_RANK(5) WITHIN GROUP (ORDER BY V) FROM TEST; -" +Arguments should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. -"Functions (Aggregate)","CUME_DIST aggregate"," -CUME_DIST(value [,...]) -withinGroupSpecification -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +For aggregate function see [BIT_XOR_AGG](https://h2database.com/html/functions-aggregate.html#bit_xor_agg). "," -Returns the relative rank of the hypothetical row in specified collection of rows. -The relative rank is calculated as NP / NR -where NP is a number of rows that precede the current row or have the same values in ORDER BY columns -and NR is a total number of rows in the collection including hypothetical row. +The bitwise XOR operation. "," -SELECT CUME_DIST(5) WITHIN GROUP (ORDER BY V) FROM TEST; +BITXOR(A, B) " -"Functions (Aggregate)","PERCENTILE_CONT"," -PERCENTILE_CONT(numeric) WITHIN GROUP (ORDER BY value [ASC|DESC]) -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"Functions (Numeric)","BITNOT"," +@h2@ BITNOT(expression) "," -Return percentile of values from the group with interpolation. -Interpolation is only supported for numeric, date-time, and interval data types. -Argument must be between 0 and 1 inclusive. -Argument must be the same for all rows in the same group. -If argument is NULL, the result is NULL. -NULL values are ignored in the calculation. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. +The bitwise NOT operation. +Argument should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. "," -PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY V) +BITNOT(A) " -"Functions (Aggregate)","PERCENTILE_DISC"," -PERCENTILE_DISC(numeric) WITHIN GROUP (ORDER BY value [ASC|DESC]) -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] -"," -Return percentile of values from the group. -Interpolation is not performed. -Argument must be between 0 and 1 inclusive. -Argument must be the same for all rows in the same group. -If argument is NULL, the result is NULL. -NULL values are ignored in the calculation. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. +"Functions (Numeric)","BITNAND"," +@h2@ BITNAND(expression, expression) "," -PERCENTILE_DISC(0.5) WITHIN GROUP (ORDER BY V) -" +The bitwise NAND operation equivalent to ""BITNOT(BITAND(expression, expression))"". +Arguments should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. -"Functions (Aggregate)","MEDIAN"," -MEDIAN( [ DISTINCT|ALL ] value ) -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] -"," -The value separating the higher half of a values from the lower half. -Returns the middle value or an interpolated value between two middle values if number of values is even. -Interpolation is only supported for numeric, date-time, and interval data types. -NULL values are ignored in the calculation. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. +For aggregate function see [BIT_NAND_AGG](https://h2database.com/html/functions-aggregate.html#bit_nand_agg). "," -MEDIAN(X) +BITNAND(A, B) " -"Functions (Aggregate)","MODE"," -{ MODE( value ) [ ORDER BY expression [ ASC | DESC ] ] } - | { MODE() WITHIN GROUP (ORDER BY expression [ ASC | DESC ]) } -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"Functions (Numeric)","BITNOR"," +@h2@ BITNOR(expression, expression) "," -Returns the value that occurs with the greatest frequency. -If there are multiple values with the same frequency only one value will be returned. -In this situation value will be chosen based on optional ORDER BY clause -that should specify exactly the same expression as argument of this function. -Use ascending order to get smallest value or descending order to get largest value -from multiple values with the same frequency. -If this clause is not specified the exact chosen value is not determined in this situation. -NULL values are ignored in the calculation. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. -"," -MODE(X) -MODE(X ORDER BY X) -MODE() WITHIN GROUP (ORDER BY X) -" +The bitwise NOR operation equivalent to ""BITNOT(BITOR(expression, expression))"". +Arguments should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. -"Functions (Aggregate)","ENVELOPE"," -ENVELOPE( value ) -[FILTER (WHERE expression)] [OVER windowNameOrSpecification] -"," -Returns the minimum bounding box that encloses all specified GEOMETRY values. -Only 2D coordinate plane is supported. -NULL values are ignored in the calculation. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. +For aggregate function see [BIT_NOR_AGG](https://h2database.com/html/functions-aggregate.html#bit_nor_agg). "," -ENVELOPE(X) +BITNOR(A, B) " -"Functions (Numeric)","ABS"," -ABS( { numeric | interval } ) -"," -Returns the absolute value of a specified value. -The returned value is of the same data type as the parameter. - -Note that TINYINT, SMALLINT, INT, and BIGINT data types cannot represent absolute values -of their minimum negative values, because they have more negative values than positive. -For example, for INT data type allowed values are from -2147483648 to 2147483647. -ABS(-2147483648) should be 2147483648, but this value is not allowed for this data type. -It leads to an exception. -To avoid it cast argument of this function to a higher data type. +"Functions (Numeric)","BITXNOR"," +@h2@ BITXNOR(expression, expression) "," -ABS(VALUE) -ABS(CAST(VALUE AS BIGINT)) -" +The bitwise XNOR operation equivalent to ""BITNOT(BITXOR(expression, expression))"". +Arguments should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. -"Functions (Numeric)","ACOS"," -ACOS(numeric) -"," -Calculate the arc cosine. -See also Java ""Math.acos"". -This method returns a double. +For aggregate function see [BIT_XNOR_AGG](https://h2database.com/html/functions-aggregate.html#bit_xnor_agg). "," -ACOS(D) +BITXNOR(A, B) " -"Functions (Numeric)","ASIN"," -ASIN(numeric) +"Functions (Numeric)","BITGET"," +@h2@ BITGET(expression, long) "," -Calculate the arc sine. -See also Java ""Math.asin"". -This method returns a double. +Returns true if and only if the first argument has a bit set in the +position specified by the second parameter. +The first argument should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This method returns a boolean. +The second argument is zero-indexed; the least significant bit has position 0. "," -ASIN(D) +BITGET(A, 1) " -"Functions (Numeric)","ATAN"," -ATAN(numeric) +"Functions (Numeric)","BITCOUNT"," +@h2@ BITCOUNT(expression) "," -Calculate the arc tangent. -See also Java ""Math.atan"". -This method returns a double. +Returns count of set bits in the specified value. +Value should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This method returns a long. "," -ATAN(D) +BITCOUNT(A) " -"Functions (Numeric)","COS"," -COS(numeric) -"," -Calculate the trigonometric cosine. -See also Java ""Math.cos"". -This method returns a double. +"Functions (Numeric)","LSHIFT"," +@h2@ LSHIFT(expression, long) "," -COS(ANGLE) -" +The bitwise signed left shift operation. +Shifts the first argument by the number of bits given by the second argument. +Argument should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. -"Functions (Numeric)","COSH"," -COSH(numeric) -"," -Calculate the hyperbolic cosine. -See also Java ""Math.cosh"". -This method returns a double. +If number of bits is negative, a signed right shift is performed instead. +For numeric values a sign bit is used for left-padding (with negative offset). +If number of bits is equal to or larger than number of bits in value all bits are pushed out from the value. +For binary string arguments signed and unsigned shifts return the same results. "," -COSH(X) +LSHIFT(A, B) " -"Functions (Numeric)","COT"," -COT(numeric) -"," -Calculate the trigonometric cotangent (""1/TAN(ANGLE)""). -See also Java ""Math.*"" functions. -This method returns a double. +"Functions (Numeric)","RSHIFT"," +@h2@ RSHIFT(expression, long) "," -COT(ANGLE) -" +The bitwise signed right shift operation. +Shifts the first argument by the number of bits given by the second argument. +Argument should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. -"Functions (Numeric)","SIN"," -SIN(numeric) -"," -Calculate the trigonometric sine. -See also Java ""Math.sin"". -This method returns a double. +If number of bits is negative, a signed left shift is performed instead. +For numeric values a sign bit is used for left-padding (with positive offset). +If number of bits is equal to or larger than number of bits in value all bits are pushed out from the value. +For binary string arguments signed and unsigned shifts return the same results. "," -SIN(ANGLE) +RSHIFT(A, B) " -"Functions (Numeric)","SINH"," -SINH(numeric) -"," -Calculate the hyperbolic sine. -See also Java ""Math.sinh"". -This method returns a double. +"Functions (Numeric)","ULSHIFT"," +@h2@ ULSHIFT(expression, long) "," -SINH(ANGLE) -" +The bitwise unsigned left shift operation. +Shifts the first argument by the number of bits given by the second argument. +Argument should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. -"Functions (Numeric)","TAN"," -TAN(numeric) -"," -Calculate the trigonometric tangent. -See also Java ""Math.tan"". -This method returns a double. +If number of bits is negative, an unsigned right shift is performed instead. +If number of bits is equal to or larger than number of bits in value all bits are pushed out from the value. +For binary string arguments signed and unsigned shifts return the same results. "," -TAN(ANGLE) +ULSHIFT(A, B) " -"Functions (Numeric)","TANH"," -TANH(numeric) +"Functions (Numeric)","URSHIFT"," +@h2@ URSHIFT(expression, long) "," -Calculate the hyperbolic tangent. -See also Java ""Math.tanh"". -This method returns a double. -"," -TANH(X) -" +The bitwise unsigned right shift operation. +Shifts the first argument by the number of bits given by the second argument. +Argument should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. -"Functions (Numeric)","ATAN2"," -ATAN2(numeric, numeric) -"," -Calculate the angle when converting the rectangular coordinates to polar coordinates. -See also Java ""Math.atan2"". -This method returns a double. +If number of bits is negative, an unsigned left shift is performed instead. +If number of bits is equal to or larger than number of bits in value all bits are pushed out from the value. +For binary string arguments signed and unsigned shifts return the same results. "," -ATAN2(X, Y) +URSHIFT(A, B) " -"Functions (Numeric)","BITAND"," -BITAND(long, long) +"Functions (Numeric)","ROTATELEFT"," +@h2@ ROTATELEFT(expression, long) "," -The bitwise AND operation. -This method returns a long. -See also Java operator &. +The bitwise left rotation operation. +Rotates the first argument by the number of bits given by the second argument. +Argument should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. "," -BITAND(A, B) +ROTATELEFT(A, B) " -"Functions (Numeric)","BITGET"," -BITGET(long, int) +"Functions (Numeric)","ROTATERIGHT"," +@h2@ ROTATERIGHT(expression, long) "," -Returns true if and only if the first parameter has a bit set in the -position specified by the second parameter. -This method returns a boolean. -The second parameter is zero-indexed; the least significant bit has position 0. +The bitwise right rotation operation. +Rotates the first argument by the number of bits given by the second argument. +Argument should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. "," -BITGET(A, 1) +ROTATERIGHT(A, B) " -"Functions (Numeric)","BITOR"," -BITOR(long, long) -"," -The bitwise OR operation. -This method returns a long. -See also Java operator |. +"Functions (Numeric)","MOD"," +MOD(dividendNumeric, divisorNumeric) "," -BITOR(A, B) -" +The modulus expression. -"Functions (Numeric)","BITXOR"," -BITXOR(long, long) -"," -The bitwise XOR operation. -This method returns a long. -See also Java operator ^. -"," -BITXOR(A, B) -" +Result has the same type as divisor. +Result is NULL if either of arguments is NULL. +If divisor is 0, an exception is raised. +Result has the same sign as dividend or is equal to 0. -"Functions (Numeric)","MOD"," -MOD(long, long) -"," -The modulo operation. -This method returns a long. -See also Java operator %. +Usually arguments should have scale 0, but it isn't required by H2. "," MOD(A, B) " -"Functions (Numeric)","CEILING"," -{ CEILING | CEIL } (numeric) +"Functions (Numeric)","CEIL"," +{ CEIL | CEILING } (numeric) "," -See also Java ""Math.ceil"". -This method returns a double. +Returns the smallest integer value that is greater than or equal to the argument. +This method returns value of the same type as argument, but with scale set to 0 and adjusted precision, if applicable. "," CEIL(A) " "Functions (Numeric)","DEGREES"," -DEGREES(numeric) +@h2@ DEGREES(numeric) "," See also Java ""Math.toDegrees"". This method returns a double. @@ -4091,33 +4985,48 @@ EXP(A) "Functions (Numeric)","FLOOR"," FLOOR(numeric) "," -See also Java ""Math.floor"". -This method returns a double. +Returns the largest integer value that is less than or equal to the argument. +This method returns value of the same type as argument, but with scale set to 0 and adjusted precision, if applicable. "," FLOOR(A) " "Functions (Numeric)","LN"," -{LN|LOG}(numeric) +LN(numeric) "," Calculates the natural (base e) logarithm as a double value. -In the PostgreSQL mode, LOG(x) is base 10. -See also Java ""Math.log"". +Argument must be a positive numeric value. "," LN(A) " +"Functions (Numeric)","LOG"," +LOG({baseNumeric, numeric | @c@{numeric}}) +"," +Calculates the logarithm with specified base as a double value. +Argument and base must be positive numeric values. +Base cannot be equal to 1. + +The default base is e (natural logarithm), in the PostgreSQL mode the default base is base 10. +In MSSQLServer mode the optional base is specified after the argument. + +Single-argument variant of LOG function is deprecated, use [LN](https://h2database.com/html/functions.html#ln) +or [LOG10](https://h2database.com/html/functions.html#log10) instead. +"," +LOG(2, A) +" + "Functions (Numeric)","LOG10"," LOG10(numeric) "," Calculates the base 10 logarithm as a double value. -See also Java ""Math.log10"". +Argument must be a positive numeric value. "," LOG10(A) " "Functions (Numeric)","ORA_HASH"," -ORA_HASH(expression [, bucketLong [, seedLong]]) +@c@ ORA_HASH(expression [, bucketLong [, seedLong]]) "," Computes a hash value. Optional bucket argument determines the maximum returned value. @@ -4130,7 +5039,7 @@ ORA_HASH(A) " "Functions (Numeric)","RADIANS"," -RADIANS(numeric) +@h2@ RADIANS(numeric) "," See also Java ""Math.toRadians"". This method returns a double. @@ -4148,7 +5057,7 @@ SQRT(A) " "Functions (Numeric)","PI"," -PI() +@h2@ PI() "," See also Java ""Math.PI"". This method returns a double. @@ -4166,7 +5075,7 @@ POWER(A, B) " "Functions (Numeric)","RAND"," -{ RAND | RANDOM } ( [ int ] ) +@h2@ { RAND | RANDOM } ( [ int ] ) "," Calling the function without parameter returns the next a pseudo random number. Calling it with an parameter seeds the session's random number generator. @@ -4176,7 +5085,7 @@ RAND() " "Functions (Numeric)","RANDOM_UUID"," -{ RANDOM_UUID | UUID } () +@h2@ { RANDOM_UUID | UUID } () "," Returns a new UUID with 122 pseudo random bits. @@ -4189,16 +5098,16 @@ RANDOM_UUID() " "Functions (Numeric)","ROUND"," -ROUND(numeric [, digitsInt]) +@h2@ ROUND(numeric [, digitsInt]) "," -Rounds to a number of digits, or to the nearest long if the number of digits if not set. -This method returns a numeric (the same type as the input). +Rounds to a number of fractional digits. +This method returns value of the same type as argument, but with adjusted precision and scale, if applicable. "," -ROUND(VALUE, 2) +ROUND(N, 2) " "Functions (Numeric)","ROUNDMAGIC"," -ROUNDMAGIC(numeric) +@h2@ ROUNDMAGIC(numeric) "," This function rounds numbers in a good way, but it is slow. It has a special handling for numbers around 0. @@ -4207,11 +5116,11 @@ The value is converted to a String internally, and then the last 4 characters ar '000x' becomes '0000' and '999x' becomes '999999', which is rounded automatically. This method returns a double. "," -ROUNDMAGIC(VALUE/3*3) +ROUNDMAGIC(N/3*3) " "Functions (Numeric)","SECURE_RAND"," -SECURE_RAND(int) +@h2@ SECURE_RAND(int) "," Generates a number of cryptographically secure random numbers. This method returns bytes. @@ -4220,15 +5129,15 @@ CALL SECURE_RAND(16) " "Functions (Numeric)","SIGN"," -SIGN( { numeric | interval } ) +@h2@ SIGN( { numeric | interval } ) "," -Returns -1 if the value is smaller than 0, 0 if zero, and otherwise 1. +Returns -1 if the value is smaller than 0, 0 if zero or NaN, and otherwise 1. "," -SIGN(VALUE) +SIGN(N) " "Functions (Numeric)","ENCRYPT"," -ENCRYPT(algorithmString, keyBytes, dataBytes) +@h2@ ENCRYPT(algorithmString, keyBytes, dataBytes) "," Encrypts data using a key. The supported algorithm is AES. @@ -4239,7 +5148,7 @@ CALL ENCRYPT('AES', '00', STRINGTOUTF8('Test')) " "Functions (Numeric)","DECRYPT"," -DECRYPT(algorithmString, keyBytes, dataBytes) +@h2@ DECRYPT(algorithmString, keyBytes, dataBytes) "," Decrypts data using a key. The supported algorithm is AES. @@ -4251,30 +5160,46 @@ CALL TRIM(CHAR(0) FROM UTF8TOSTRING( " "Functions (Numeric)","HASH"," -HASH(algorithmString, expression [, iterationInt]) +@h2@ HASH(algorithmString, expression [, iterationInt]) "," Calculate the hash value using an algorithm, and repeat this process for a number of iterations. -Currently, the only algorithm supported is SHA256. + +This function supports MD5, SHA-1, SHA-224, SHA-256, SHA-384, SHA-512, SHA3-224, SHA3-256, SHA3-384, and SHA3-512 +algorithms. +SHA-224, SHA-384, and SHA-512 may be unavailable in some JREs. + +MD5 and SHA-1 algorithms should not be considered as secure. + +If this function is used to encrypt a password, a random salt should be concatenated with a password and this salt and +result of the function should be stored to prevent a rainbow table attack and number of iterations should be large +enough to slow down a dictionary or a brute force attack. + This method returns bytes. "," -CALL HASH('SHA256', STRINGTOUTF8('Password'), 1000) +CALL HASH('SHA-256', 'Text', 1000) +CALL HASH('SHA3-256', X'0102') " -"Functions (Numeric)","TRUNCATE"," -{ TRUNC | TRUNCATE } ( { {numeric, digitsInt} - | timestamp | timestampWithTimeZone | date | timestampString } ) +"Functions (Numeric)","TRUNC"," +@h2@ { TRUNC | TRUNCATE } ( { {numeric [, digitsInt] } + | @c@ { timestamp | timestampWithTimeZone | date | timestampString } } ) "," -Truncates to a number of digits (to the next value closer to 0). -This method returns a double. -When used with a timestamp, truncates a timestamp to a date (day) value. -When used with a date, truncates a date to a date (day) value less time part. -When used with a timestamp as string, truncates a timestamp to a date (day) value. +When a numeric argument is specified, truncates it to a number of digits (to the next value closer to 0) +and returns value of the same type as argument, but with adjusted precision and scale, if applicable. + +This function with datetime or string argument is deprecated, use +[DATE_TRUNC](https://h2database.com/html/functions.html#date_trunc) instead. +When used with a timestamp, truncates the timestamp to a date (day) value +and returns a timestamp with or without time zone depending on type of the argument. +When used with a date, returns a timestamp at start of this date. +When used with a timestamp as string, truncates the timestamp to a date (day) value +and returns a timestamp without time zone. "," -TRUNCATE(VALUE, 2) +TRUNCATE(N, 2) " "Functions (Numeric)","COMPRESS"," -COMPRESS(dataBytes [, algorithmString]) +@h2@ COMPRESS(dataBytes [, algorithmString]) "," Compresses the data using the specified compression algorithm. Supported algorithms are: LZF (faster but lower compression; default), and DEFLATE (higher compression). @@ -4285,7 +5210,7 @@ COMPRESS(STRINGTOUTF8('Test')) " "Functions (Numeric)","EXPAND"," -EXPAND(bytes) +@h2@ EXPAND(bytes) "," Expands data that was compressed using the COMPRESS function. This method returns bytes. @@ -4294,7 +5219,7 @@ UTF8TOSTRING(EXPAND(COMPRESS(STRINGTOUTF8('Test')))) " "Functions (Numeric)","ZERO"," -ZERO() +@h2@ ZERO() "," Returns the value 0. This function can be used even if numeric literals are disabled. "," @@ -4302,7 +5227,7 @@ ZERO() " "Functions (String)","ASCII"," -ASCII(string) +@h2@ ASCII(string) "," Returns the ASCII value of the first character in the string. This method returns an int. @@ -4310,38 +5235,34 @@ This method returns an int. ASCII('Hi') " "Functions (String)","BIT_LENGTH"," -BIT_LENGTH(string) +@h2@ BIT_LENGTH(bytes) "," -Returns the number of bits in a string. +Returns the number of bits in a binary string. This method returns a long. -For BLOB, CLOB, BYTES and JAVA_OBJECT, the precision is used. Each character needs 16 bits. "," BIT_LENGTH(NAME) " -"Functions (String)","LENGTH"," -{ LENGTH | CHAR_LENGTH | CHARACTER_LENGTH } ( string ) +"Functions (String)","CHAR_LENGTH"," +{ CHAR_LENGTH | CHARACTER_LENGTH | @c@ { LENGTH } } ( string ) "," -Returns the number of characters in a string. +Returns the number of characters in a character string. This method returns a long. -For BLOB, CLOB, BYTES and JAVA_OBJECT, the precision is used. "," -LENGTH(NAME) +CHAR_LENGTH(NAME) " "Functions (String)","OCTET_LENGTH"," -OCTET_LENGTH(string) +OCTET_LENGTH(bytes) "," -Returns the number of bytes in a string. +Returns the number of bytes in a binary string. This method returns a long. -For BLOB, CLOB, BYTES and JAVA_OBJECT, the precision is used. -Each character needs 2 bytes. "," OCTET_LENGTH(NAME) " "Functions (String)","CHAR"," -{ CHAR | CHR } ( int ) +@h2@ { CHAR | CHR } ( int ) "," Returns the character that represents the ASCII value. This method returns a string. @@ -4350,29 +5271,32 @@ CHAR(65) " "Functions (String)","CONCAT"," -CONCAT(string, string [,...]) +@h2@ CONCAT(string, string [,...]) "," Combines strings. Unlike with the operator ""||"", NULL parameters are ignored, and do not cause the result to become NULL. +If all parameters are NULL the result is an empty string. This method returns a string. "," CONCAT(NAME, '!') " "Functions (String)","CONCAT_WS"," -CONCAT_WS(separatorString, string, string [,...]) +@h2@ CONCAT_WS(separatorString, string, string [,...]) "," Combines strings with separator. -Unlike with the operator ""||"", NULL parameters are ignored, -and do not cause the result to become NULL. +If separator is NULL it is treated like an empty string. +Other NULL parameters are ignored. +Remaining non-NULL parameters, if any, are concatenated with the specified separator. +If there are no remaining parameters the result is an empty string. This method returns a string. "," CONCAT_WS(',', NAME, '!') " "Functions (String)","DIFFERENCE"," -DIFFERENCE(string, string) +@h2@ DIFFERENCE(string, string) "," Returns the difference between the sounds of two strings. The difference is calculated as a number of matched characters @@ -4385,7 +5309,7 @@ DIFFERENCE(T1.NAME, T2.NAME) " "Functions (String)","HEXTORAW"," -HEXTORAW(string) +@h2@ HEXTORAW(string) "," Converts a hex representation of a string to a string. 4 hex characters per string character are used. @@ -4394,29 +5318,17 @@ HEXTORAW(DATA) " "Functions (String)","RAWTOHEX"," -RAWTOHEX(string) +@h2@ RAWTOHEX({string|bytes}) "," -Converts a string to the hex representation. +Converts a string or bytes to the hex representation. 4 hex characters per string character are used. This method returns a string. "," RAWTOHEX(DATA) " -"Functions (String)","INSTR"," -INSTR(string, searchString, [, startInt]) -"," -Returns the location of a search string in a string. -If a start position is used, the characters before it are ignored. -If position is negative, the rightmost location is returned. -0 is returned if the search string is not found. -Please note this function is case sensitive, even if the parameters are not. -"," -INSTR(EMAIL,'@') -" - -"Functions (String)","INSERT Function"," -INSERT(originalString, startInt, lengthInt, addString) +"Functions (String)","INSERT Function"," +@h2@ INSERT(originalString, startInt, lengthInt, addString) "," Inserts a additional string into the original string at a specified start position. The length specifies the number of characters that are removed at the start position in the original string. @@ -4426,7 +5338,7 @@ INSERT(NAME, 1, 1, ' ') " "Functions (String)","LOWER"," -{ LOWER | LCASE } ( string ) +{ LOWER | @c@ { LCASE } } ( string ) "," Converts a string to lowercase. "," @@ -4434,7 +5346,7 @@ LOWER(NAME) " "Functions (String)","UPPER"," -{ UPPER | UCASE } ( string ) +{ UPPER | @c@ { UCASE } } ( string ) "," Converts a string to uppercase. "," @@ -4442,7 +5354,7 @@ UPPER(NAME) " "Functions (String)","LEFT"," -LEFT(string, int) +@h2@ LEFT(string, int) "," Returns the leftmost number of characters. "," @@ -4450,7 +5362,7 @@ LEFT(NAME, 3) " "Functions (String)","RIGHT"," -RIGHT(string, int) +@h2@ RIGHT(string, int) "," Returns the rightmost number of characters. "," @@ -4458,26 +5370,21 @@ RIGHT(NAME, 3) " "Functions (String)","LOCATE"," -LOCATE(searchString, string [, startInt]) +@h2@ { LOCATE(searchString, string [, startInt]) } + | @c@ { INSTR(string, searchString, [, startInt]) } + | @c@ { POSITION(searchString, string) } "," Returns the location of a search string in a string. If a start position is used, the characters before it are ignored. If position is negative, the rightmost location is returned. 0 is returned if the search string is not found. +Please note this function is case sensitive, even if the parameters are not. "," LOCATE('.', NAME) " -"Functions (String)","POSITION"," -POSITION(searchString, string) -"," -Returns the location of a search string in a string. See also LOCATE. -"," -POSITION('.', NAME) -" - "Functions (String)","LPAD"," -LPAD(string, int[, paddingString]) +@h2@ LPAD(string, int[, paddingString]) "," Left pad the string to the specified length. If the length is shorter than the string, it will be truncated at the end. @@ -4487,7 +5394,7 @@ LPAD(AMOUNT, 10, '*') " "Functions (String)","RPAD"," -RPAD(string, int[, paddingString]) +@h2@ RPAD(string, int[, paddingString]) "," Right pad the string to the specified length. If the length is shorter than the string, it will be truncated. @@ -4497,17 +5404,21 @@ RPAD(TEXT, 10, '-') " "Functions (String)","LTRIM"," -LTRIM(string) +@c@ LTRIM(string) "," Removes all leading spaces from a string. + +This function is deprecated, use [TRIM](https://h2database.com/html/functions.html#trim) instead of it. "," LTRIM(NAME) " "Functions (String)","RTRIM"," -RTRIM(string) +@c@ RTRIM(string) "," Removes all trailing spaces from a string. + +This function is deprecated, use [TRIM](https://h2database.com/html/functions.html#trim) instead of it. "," RTRIM(NAME) " @@ -4522,15 +5433,15 @@ TRIM(BOTH '_' FROM NAME) " "Functions (String)","REGEXP_REPLACE"," -REGEXP_REPLACE(inputString, regexString, replacementString [, flagsString]) +@h2@ REGEXP_REPLACE(inputString, regexString, replacementString [, flagsString]) "," Replaces each substring that matches a regular expression. For details, see the Java ""String.replaceAll()"" method. If any parameter is null (except optional flagsString parameter), the result is null. -Flags values limited to 'i', 'c', 'n', 'm'. Other symbols causes exception. -Multiple symbols could be uses in one flagsString parameter (like 'im'). -Later flags overrides first ones, for example 'ic' equivalent to case sensitive matching 'c'. +Flags values are limited to 'i', 'c', 'n', 'm'. Other symbols cause exception. +Multiple symbols could be used in one flagsString parameter (like 'im'). +Later flags override first ones, for example 'ic' is equivalent to case sensitive matching 'c'. 'i' enables case insensitive matching (Pattern.CASE_INSENSITIVE) @@ -4546,15 +5457,15 @@ REGEXP_REPLACE('Hello WWWWorld', 'w+', 'W', 'i') " "Functions (String)","REGEXP_LIKE"," -REGEXP_LIKE(inputString, regexString [, flagsString]) +@h2@ REGEXP_LIKE(inputString, regexString [, flagsString]) "," Matches string to a regular expression. For details, see the Java ""Matcher.find()"" method. If any parameter is null (except optional flagsString parameter), the result is null. -Flags values limited to 'i', 'c', 'n', 'm'. Other symbols causes exception. -Multiple symbols could be uses in one flagsString parameter (like 'im'). -Later flags overrides first ones, for example 'ic' equivalent to case sensitive matching 'c'. +Flags values are limited to 'i', 'c', 'n', 'm'. Other symbols cause exception. +Multiple symbols could be used in one flagsString parameter (like 'im'). +Later flags override first ones, for example 'ic' is equivalent to case sensitive matching 'c'. 'i' enables case insensitive matching (Pattern.CASE_INSENSITIVE) @@ -4568,8 +5479,36 @@ Later flags overrides first ones, for example 'ic' equivalent to case sensitive REGEXP_LIKE('Hello World', '[A-Z ]*', 'i') " +"Functions (String)","REGEXP_SUBSTR"," +@h2@ REGEXP_SUBSTR(inputString, regexString [, positionInt, occurrenceInt, flagsString, groupInt]) +"," +Matches string to a regular expression and returns the matched substring. +For details, see the java.util.regex.Pattern and related functionality. + +The parameter position specifies where in inputString the match should start. Occurrence indicates +which occurrence of pattern in inputString to search for. + +Flags values are limited to 'i', 'c', 'n', 'm'. Other symbols cause exception. +Multiple symbols could be used in one flagsString parameter (like 'im'). +Later flags override first ones, for example 'ic' is equivalent to case sensitive matching 'c'. + +'i' enables case insensitive matching (Pattern.CASE_INSENSITIVE) + +'c' disables case insensitive matching (Pattern.CASE_INSENSITIVE) + +'n' allows the period to match the newline character (Pattern.DOTALL) + +'m' enables multiline mode (Pattern.MULTILINE) + +If the pattern has groups, the group parameter can be used to specify which group to return. + +"," +REGEXP_SUBSTR('2020-10-01', '\d{4}') +REGEXP_SUBSTR('2020-10-01', '(\d{4})-(\d{2})-(\d{2})', 1, 1, NULL, 2) +" + "Functions (String)","REPEAT"," -REPEAT(string, int) +@h2@ REPEAT(string, int) "," Returns a string repeated some number of times. "," @@ -4577,7 +5516,7 @@ REPEAT(NAME || ' ', 10) " "Functions (String)","REPLACE"," -REPLACE(string, searchString [, replacementString]) +@h2@ REPLACE(string, searchString [, replacementString]) "," Replaces all occurrences of a search string in a text with another string. If no replacement is specified, the search string is removed from the original string. @@ -4587,7 +5526,7 @@ REPLACE(NAME, ' ') " "Functions (String)","SOUNDEX"," -SOUNDEX(string) +@h2@ SOUNDEX(string) "," Returns a four character code representing the sound of a string. This method returns a string, or null if parameter is null. @@ -4597,7 +5536,7 @@ SOUNDEX(NAME) " "Functions (String)","SPACE"," -SPACE(int) +@h2@ SPACE(int) "," Returns a string consisting of a number of spaces. "," @@ -4605,7 +5544,7 @@ SPACE(80) " "Functions (String)","STRINGDECODE"," -STRINGDECODE(string) +@h2@ STRINGDECODE(string) "," Converts a encoded string using the Java string literal encoding format. Special characters are \b, \t, \n, \f, \r, \"", \\, \, \u. @@ -4615,7 +5554,7 @@ CALL STRINGENCODE(STRINGDECODE('Lines 1\nLine 2')) " "Functions (String)","STRINGENCODE"," -STRINGENCODE(string) +@h2@ STRINGENCODE(string) "," Encodes special characters in a string using the Java string literal encoding format. Special characters are \b, \t, \n, \f, \r, \"", \\, \, \u. @@ -4625,7 +5564,7 @@ CALL STRINGENCODE(STRINGDECODE('Lines 1\nLine 2')) " "Functions (String)","STRINGTOUTF8"," -STRINGTOUTF8(string) +@h2@ STRINGTOUTF8(string) "," Encodes a string to a byte array using the UTF8 encoding format. This method returns bytes. @@ -4634,37 +5573,47 @@ CALL UTF8TOSTRING(STRINGTOUTF8('This is a test')) " "Functions (String)","SUBSTRING"," -{ SUBSTRING | SUBSTR } ( string, startInt [, lengthInt ] ) +SUBSTRING ( {string|bytes} FROM startInt [ FOR lengthInt ] ) + | @c@ { { SUBSTRING | SUBSTR } ( {string|bytes}, startInt [, lengthInt ] ) } "," Returns a substring of a string starting at a position. If the start index is negative, then the start index is relative to the end of the string. The length is optional. -Also supported is: ""SUBSTRING(string [FROM start] [FOR length])"". "," -CALL SUBSTR('[Hello]', 2, 5); -CALL SUBSTR('Hello World', -5); +CALL SUBSTRING('[Hello]' FROM 2 FOR 5); +CALL SUBSTRING('hour' FROM 2); " "Functions (String)","UTF8TOSTRING"," -UTF8TOSTRING(bytes) +@h2@ UTF8TOSTRING(bytes) "," Decodes a byte array in the UTF8 format to a string. "," CALL UTF8TOSTRING(STRINGTOUTF8('This is a test')) " +"Functions (String)","QUOTE_IDENT"," +@h2@ QUOTE_IDENT(string) +"," +Quotes the specified identifier. +Identifier is surrounded by double quotes. +If identifier contains double quotes they are repeated twice. +"," +QUOTE_IDENT('Column 1') +" + "Functions (String)","XMLATTR"," -XMLATTR(nameString, valueString) +@h2@ XMLATTR(nameString, valueString) "," Creates an XML attribute element of the form ""name=value"". The value is encoded as XML text. This method returns a string. "," -CALL XMLNODE('a', XMLATTR('href', 'http://h2database.com')) +CALL XMLNODE('a', XMLATTR('href', 'https://h2database.com')) " "Functions (String)","XMLNODE"," -XMLNODE(elementString [, attributesString [, contentString [, indentBoolean]]]) +@h2@ XMLNODE(elementString [, attributesString [, contentString [, indentBoolean]]]) "," Create an XML node element. An empty or null attribute string means no attributes are set. @@ -4672,11 +5621,11 @@ An empty or null content string means the node is empty. The content is indented by default if it contains a newline. This method returns a string. "," -CALL XMLNODE('a', XMLATTR('href', 'http://h2database.com'), 'H2') +CALL XMLNODE('a', XMLATTR('href', 'https://h2database.com'), 'H2') " "Functions (String)","XMLCOMMENT"," -XMLCOMMENT(commentString) +@h2@ XMLCOMMENT(commentString) "," Creates an XML comment. Two dashes (""--"") are converted to ""- -"". @@ -4686,7 +5635,7 @@ CALL XMLCOMMENT('Test') " "Functions (String)","XMLCDATA"," -XMLCDATA(valueString) +@h2@ XMLCDATA(valueString) "," Creates an XML CDATA element. If the value contains ""]]>"", an XML text element is created instead. @@ -4696,7 +5645,7 @@ CALL XMLCDATA('data') " "Functions (String)","XMLSTARTDOC"," -XMLSTARTDOC() +@h2@ XMLSTARTDOC() "," Returns the XML declaration. The result is always """". @@ -4705,7 +5654,7 @@ CALL XMLSTARTDOC() " "Functions (String)","XMLTEXT"," -XMLTEXT(valueString [, escapeNewlineBoolean]) +@h2@ XMLTEXT(valueString [, escapeNewlineBoolean]) "," Creates an XML text element. If enabled, newline and linefeed is converted to an XML entity (&#). @@ -4715,7 +5664,7 @@ CALL XMLTEXT('test') " "Functions (String)","TO_CHAR"," -TO_CHAR(value [, formatString[, nlsParamString]]) +@c@ TO_CHAR(value [, formatString[, nlsParamString]]) "," Oracle-compatible TO_CHAR function that can format a timestamp, a number, or text. "," @@ -4723,7 +5672,7 @@ CALL TO_CHAR(TIMESTAMP '2010-01-01 00:00:00', 'DD MON, YYYY') " "Functions (String)","TRANSLATE"," -TRANSLATE(value, searchString, replacementString) +@c@ TRANSLATE(value, searchString, replacementString) "," Oracle-compatible TRANSLATE function that replaces a sequence of characters in a string with another set of characters. "," @@ -4731,11 +5680,15 @@ CALL TRANSLATE('Hello world', 'eo', 'EO') " "Functions (Time and Date)","CURRENT_DATE"," -{ CURRENT_DATE | CURDATE() | SYSDATE | TODAY } +CURRENT_DATE | @c@ { CURDATE() | SYSDATE | TODAY } "," Returns the current date. -These methods always return the same value within a transaction (default) + +These functions return the same value within a transaction (default) or within a command depending on database mode. + +[SET TIME ZONE](https://h2database.com/html/commands.html#set_time_zone) command reevaluates the value +for these functions using the same original UTC timestamp of transaction. "," CURRENT_DATE " @@ -4743,14 +5696,17 @@ CURRENT_DATE "Functions (Time and Date)","CURRENT_TIME"," CURRENT_TIME [ (int) ] "," -Returns the current time. -The returned value does not have time zone information, because TIME WITH TIME ZONE data type is not supported in H2. +Returns the current time with time zone. If fractional seconds precision is specified it should be from 0 to 9, 0 is default. The specified value can be used only to limit precision of a result. The actual maximum available precision depends on operating system and JVM and can be 3 (milliseconds) or higher. Higher precision is not available before Java 9. -These methods always return the same value within a transaction (default) + +This function returns the same value within a transaction (default) or within a command depending on database mode. + +[SET TIME ZONE](https://h2database.com/html/commands.html#set_time_zone) command reevaluates the value +for this function using the same original UTC timestamp of transaction. "," CURRENT_TIME CURRENT_TIME(9) @@ -4765,45 +5721,63 @@ If fractional seconds precision is specified it should be from 0 to 9, 6 is defa The specified value can be used only to limit precision of a result. The actual maximum available precision depends on operating system and JVM and can be 3 (milliseconds) or higher. Higher precision is not available before Java 9. -This method always returns the same value within a transaction (default) + +This function returns the same value within a transaction (default) or within a command depending on database mode. + +[SET TIME ZONE](https://h2database.com/html/commands.html#set_time_zone) command reevaluates the value +for this function using the same original UTC timestamp of transaction. "," CURRENT_TIMESTAMP CURRENT_TIMESTAMP(9) " "Functions (Time and Date)","LOCALTIME"," -{ LOCALTIME [ (int) ] | CURTIME([ int ]) } +LOCALTIME [ (int) ] | @c@ CURTIME([ int ]) "," -Returns the current time. +Returns the current time without time zone. If fractional seconds precision is specified it should be from 0 to 9, 0 is default. The specified value can be used only to limit precision of a result. The actual maximum available precision depends on operating system and JVM and can be 3 (milliseconds) or higher. Higher precision is not available before Java 9. -These methods always return the same value within a transaction (default) + +These functions return the same value within a transaction (default) or within a command depending on database mode. + +[SET TIME ZONE](https://h2database.com/html/commands.html#set_time_zone) command reevaluates the value +for these functions using the same original UTC timestamp of transaction. "," LOCALTIME LOCALTIME(9) " "Functions (Time and Date)","LOCALTIMESTAMP"," -{ LOCALTIMESTAMP [ (int) ] | NOW( [ int ] ) } +LOCALTIMESTAMP [ (int) ] | @c@ NOW( [ int ] ) "," Returns the current timestamp without time zone. If fractional seconds precision is specified it should be from 0 to 9, 6 is default. The specified value can be used only to limit precision of a result. The actual maximum available precision depends on operating system and JVM and can be 3 (milliseconds) or higher. Higher precision is not available before Java 9. -These methods always return the same value within a transaction (default) + +The returned value has date and time without time zone information. +If time zone has DST transitions the returned values are ambiguous during transition from DST to normal time. +For absolute timestamps use the [CURRENT_TIMESTAMP](https://h2database.com/html/functions.html#current_timestamp) +function and [TIMESTAMP WITH TIME ZONE](https://h2database.com/html/datatypes.html#timestamp_with_time_zone_type) +data type. + +These functions return the same value within a transaction (default) or within a command depending on database mode. + +[SET TIME ZONE](https://h2database.com/html/commands.html#set_time_zone) reevaluates the value +for these functions using the same original UTC timestamp of transaction. "," LOCALTIMESTAMP LOCALTIMESTAMP(9) " "Functions (Time and Date)","DATEADD"," -{ DATEADD| TIMESTAMPADD } (datetimeField, addIntLong, dateAndTime) +@h2@ { DATEADD| TIMESTAMPADD } @h2@ (datetimeField, addIntLong, dateAndTime) "," Adds units to a date-time value. The datetimeField indicates the unit. Use negative values to subtract units. @@ -4812,25 +5786,33 @@ microseconds, or nanoseconds otherwise its range is restricted to int. This method returns a value with the same type as specified value if unit is compatible with this value. If specified field is a HOUR, MINUTE, SECOND, MILLISECOND, etc and value is a DATE value DATEADD returns combined TIMESTAMP. Fields DAY, MONTH, YEAR, WEEK, etc are not allowed for TIME values. -Fields TIMEZONE_HOUR and TIMEZONE_MINUTE are only allowed for TIMESTAMP WITH TIME ZONE values. +Fields TIMEZONE_HOUR, TIMEZONE_MINUTE, and TIMEZONE_SECOND are only allowed for TIMESTAMP WITH TIME ZONE values. "," DATEADD(MONTH, 1, DATE '2001-01-31') " "Functions (Time and Date)","DATEDIFF"," -{ DATEDIFF | TIMESTAMPDIFF } (datetimeField, aDateAndTime, bDateAndTime) +@h2@ { DATEDIFF | TIMESTAMPDIFF } @h2@ (datetimeField, aDateAndTime, bDateAndTime) "," Returns the number of crossed unit boundaries between two date/time values. This method returns a long. The datetimeField indicates the unit. -Only TIMEZONE_HOUR and TIMEZONE_MINUTE fields use the time zone offset component. +Only TIMEZONE_HOUR, TIMEZONE_MINUTE, and TIMEZONE_SECOND fields use the time zone offset component. With all other fields if date/time values have time zone offset component it is ignored. "," DATEDIFF(YEAR, T1.CREATED, T2.CREATED) " +"Functions (Time and Date)","DATE_TRUNC"," +@h2@ DATE_TRUNC (datetimeField, dateAndTime) +"," +Truncates the specified date-time value to the specified field. +"," +DATE_TRUNC(DAY, TIMESTAMP '2010-01-03 10:40:00'); +" + "Functions (Time and Date)","DAYNAME"," -DAYNAME(dateAndTime) +@h2@ DAYNAME(dateAndTime) "," Returns the name of the day (in English). "," @@ -4838,33 +5820,41 @@ DAYNAME(CREATED) " "Functions (Time and Date)","DAY_OF_MONTH"," -DAY_OF_MONTH(dateAndTime|interval) +@c@ DAY_OF_MONTH({dateAndTime|interval}) "," Returns the day of the month (1-31). + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. "," DAY_OF_MONTH(CREATED) " "Functions (Time and Date)","DAY_OF_WEEK"," -DAY_OF_WEEK(dateAndTime) +@c@ DAY_OF_WEEK(dateAndTime) "," -Returns the day of the week (1 means Sunday). +Returns the day of the week (1-7), locale-specific. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. "," DAY_OF_WEEK(CREATED) " "Functions (Time and Date)","ISO_DAY_OF_WEEK"," -ISO_DAY_OF_WEEK(dateAndTime) +@c@ ISO_DAY_OF_WEEK(dateAndTime) "," Returns the ISO day of the week (1 means Monday). + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. "," ISO_DAY_OF_WEEK(CREATED) " "Functions (Time and Date)","DAY_OF_YEAR"," -DAY_OF_YEAR(dateAndTime|interval) +@c@ DAY_OF_YEAR({dateAndTime|interval}) "," Returns the day of the year (1-366). + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. "," DAY_OF_YEAR(CREATED) " @@ -4880,15 +5870,18 @@ EXTRACT(SECOND FROM CURRENT_TIMESTAMP) " "Functions (Time and Date)","FORMATDATETIME"," -FORMATDATETIME ( dateAndTime, formatString +@h2@ FORMATDATETIME ( dateAndTime, formatString [ , localeString [ , timeZoneString ] ] ) "," Formats a date, time or timestamp as a string. The most important format characters are: y year, M month, d day, H hour, m minute, s second. -For details of the format, see ""java.text.SimpleDateFormat"". -timeZoneString may be specified if dateAndTime is a DATE, TIME or TIMESTAMP. -timeZoneString is ignored if dateAndTime is TIMESTAMP WITH TIME ZONE. +For details of the format, see ""java.time.format.DateTimeFormatter"". + +If timeZoneString is specified, it is used in formatted string if formatString has time zone. +If TIMESTAMP WITH TIME ZONE is passed and timeZoneString is specified, +the timestamp is converted to the specified time zone and its UTC value is preserved. + This method returns a string. "," CALL FORMATDATETIME(TIMESTAMP '2001-02-03 04:05:06', @@ -4896,31 +5889,37 @@ CALL FORMATDATETIME(TIMESTAMP '2001-02-03 04:05:06', " "Functions (Time and Date)","HOUR"," -HOUR(dateAndTime|interval) +@c@ HOUR({dateAndTime|interval}) "," Returns the hour (0-23) from a date/time value. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. "," HOUR(CREATED) " "Functions (Time and Date)","MINUTE"," -MINUTE(dateAndTime|interval) +@c@ MINUTE({dateAndTime|interval}) "," Returns the minute (0-59) from a date/time value. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. "," MINUTE(CREATED) " "Functions (Time and Date)","MONTH"," -MONTH(dateAndTime|interval) +@c@ MONTH({dateAndTime|interval}) "," Returns the month (1-12) from a date/time value. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. "," MONTH(CREATED) " "Functions (Time and Date)","MONTHNAME"," -MONTHNAME(dateAndTime) +@h2@ MONTHNAME(dateAndTime) "," Returns the name of the month (in English). "," @@ -4928,47 +5927,59 @@ MONTHNAME(CREATED) " "Functions (Time and Date)","PARSEDATETIME"," -PARSEDATETIME(string, formatString +@h2@ PARSEDATETIME(string, formatString [, localeString [, timeZoneString]]) "," -Parses a string and returns a timestamp. +Parses a string and returns a TIMESTAMP WITH TIME ZONE value. The most important format characters are: y year, M month, d day, H hour, m minute, s second. -For details of the format, see ""java.text.SimpleDateFormat"". +For details of the format, see ""java.time.format.DateTimeFormatter"". + +If timeZoneString is specified, it is used as default. "," CALL PARSEDATETIME('Sat, 3 Feb 2001 03:05:06 GMT', 'EEE, d MMM yyyy HH:mm:ss z', 'en', 'GMT') " "Functions (Time and Date)","QUARTER"," -QUARTER(dateAndTime) +@c@ QUARTER(dateAndTime) "," Returns the quarter (1-4) from a date/time value. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. "," QUARTER(CREATED) " "Functions (Time and Date)","SECOND"," -SECOND(dateAndTime) +@c@ SECOND(dateAndTime) "," Returns the second (0-59) from a date/time value. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. "," SECOND(CREATED|interval) " "Functions (Time and Date)","WEEK"," -WEEK(dateAndTime) +@c@ WEEK(dateAndTime) "," Returns the week (1-53) from a date/time value. -This method uses the current system locale. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. + +This function uses the current system locale. "," WEEK(CREATED) " "Functions (Time and Date)","ISO_WEEK"," -ISO_WEEK(dateAndTime) +@c@ ISO_WEEK(dateAndTime) "," Returns the ISO week (1-53) from a date/time value. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. + This function uses the ISO definition when first week of year should have at least four days and week is started with Monday. @@ -4977,41 +5988,62 @@ ISO_WEEK(CREATED) " "Functions (Time and Date)","YEAR"," -YEAR(dateAndTime|interval) +@c@ YEAR({dateAndTime|interval}) "," Returns the year from a date/time value. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. "," YEAR(CREATED) " "Functions (Time and Date)","ISO_YEAR"," -ISO_YEAR(dateAndTime) +@c@ ISO_YEAR(dateAndTime) "," Returns the ISO week year from a date/time value. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. "," ISO_YEAR(CREATED) " +"Functions (System)","ABORT_SESSION"," +@h2@ ABORT_SESSION(sessionInt) +"," +Cancels the currently executing statement of another session. Closes the session and releases the allocated resources. +Returns true if the session was closed, false if no session with the given id was found. + +If a client was connected while its session was aborted it will see an error. + +Admin rights are required to execute this command. +"," +CALL ABORT_SESSION(3) +" + "Functions (System)","ARRAY_GET"," -ARRAY_GET(arrayExpression, indexExpression) +@c@ ARRAY_GET(arrayExpression, indexExpression) "," Returns element at the specified 1-based index from an array. -Returns NULL if there is no such element or array is NULL. + +This function is deprecated, use +[array element reference](https://www.h2database.com/html/grammar.html#array_element_reference) instead of it. + +Returns NULL if array or index is NULL. "," CALL ARRAY_GET(ARRAY['Hello', 'World'], 2) " -"Functions (System)","ARRAY_LENGTH"," -ARRAY_LENGTH(arrayExpression) +"Functions (System)","CARDINALITY"," +{ CARDINALITY | @c@ { ARRAY_LENGTH } } (arrayExpression) "," Returns the length of an array. Returns NULL if the specified array is NULL. "," -CALL ARRAY_LENGTH(ARRAY['Hello', 'World']) +CALL CARDINALITY(ARRAY['Hello', 'World']) " "Functions (System)","ARRAY_CONTAINS"," -ARRAY_CONTAINS(arrayExpression, value) +@h2@ ARRAY_CONTAINS(arrayExpression, value) "," Returns a boolean TRUE if the array contains the value or FALSE if it does not contain it. Returns NULL if the specified array is NULL. @@ -5020,25 +6052,51 @@ CALL ARRAY_CONTAINS(ARRAY['Hello', 'World'], 'Hello') " "Functions (System)","ARRAY_CAT"," -ARRAY_CAT(arrayExpression, arrayExpression) +@c@ ARRAY_CAT(arrayExpression, arrayExpression) "," Returns the concatenation of two arrays. + +This function is deprecated, use ""||"" instead of it. + Returns NULL if any parameter is NULL. "," CALL ARRAY_CAT(ARRAY[1, 2], ARRAY[3, 4]) " "Functions (System)","ARRAY_APPEND"," -ARRAY_APPEND(arrayExpression, value) +@c@ ARRAY_APPEND(arrayExpression, value) "," Append an element to the end of an array. + +This function is deprecated, use ""||"" instead of it. + Returns NULL if any parameter is NULL. "," CALL ARRAY_APPEND(ARRAY[1, 2], 3) " +"Functions (System)","ARRAY_MAX_CARDINALITY"," +ARRAY_MAX_CARDINALITY(arrayExpression) +"," +Returns the maximum allowed array cardinality (length) of the declared data type of argument. +"," +SELECT ARRAY_MAX_CARDINALITY(COL1) FROM TEST FETCH FIRST ROW ONLY; +" + +"Functions (System)","TRIM_ARRAY"," +TRIM_ARRAY(arrayExpression, int) +"," +Removes the specified number of elements from the end of the array. + +Returns NULL if second parameter is NULL or if first parameter is NULL and second parameter is not negative. +Throws exception if second parameter is negative or larger than number of elements in array. +Otherwise returns the truncated array. +"," +CALL TRIM_ARRAY(ARRAY[1, 2, 3, 4], 1) +" + "Functions (System)","ARRAY_SLICE"," -ARRAY_SLICE(arrayExpression, lowerBoundInt, upperBoundInt) +@h2@ ARRAY_SLICE(arrayExpression, lowerBoundInt, upperBoundInt) "," Returns elements from the array as specified by the lower and upper bound parameters. Both parameters are inclusive and the first element has index 1, i.e. ARRAY_SLICE(a, 2, 2) has only the second element. @@ -5048,7 +6106,7 @@ CALL ARRAY_SLICE(ARRAY[1, 2, 3, 4], 1, 3) " "Functions (System)","AUTOCOMMIT"," -AUTOCOMMIT() +@h2@ AUTOCOMMIT() "," Returns true if auto commit is switched on for this session. "," @@ -5056,10 +6114,9 @@ AUTOCOMMIT() " "Functions (System)","CANCEL_SESSION"," -CANCEL_SESSION(sessionInt) +@h2@ CANCEL_SESSION(sessionInt) "," Cancels the currently executing statement of another session. -The method only works if the multithreaded kernel is enabled (see SET MULTI_THREADED). Returns true if the statement was canceled, false if the session is closed or no statement is currently executing. Admin rights are required to execute this command. @@ -5068,35 +6125,18 @@ CANCEL_SESSION(3) " "Functions (System)","CASEWHEN Function"," -CASEWHEN(boolean, aValue, bValue) -"," -Returns 'a' if the boolean expression is true, otherwise 'b'. -Returns the same data type as the parameter. +@c@ CASEWHEN(boolean, aValue, bValue) "," -CASEWHEN(ID=1, 'A', 'B') -" +Returns 'aValue' if the boolean expression is true, otherwise 'bValue'. -"Functions (System)","CAST"," -CAST(value AS dataType) +This function is deprecated, use [CASE](https://h2database.com/html/grammar.html#searched_case) instead of it. "," -Converts a value to another data type. The following conversion rules are used: -When converting a number to a boolean, 0 is false and every other value is true. -When converting a boolean to a number, false is 0 and true is 1. -When converting a number to a number of another type, the value is checked for overflow. -When converting a number to binary, the number of bytes matches the precision. -When converting a string to binary, it is hex encoded (every byte two characters); -a hex string can be converted to a number by first converting it to binary. -If a direct conversion is not possible, the value is first converted to a string. -Note that some data types may need explicitly specified precision to avoid overflow or rounding. -"," -CAST(NAME AS INT); -CAST(65535 AS BINARY); -CAST(CAST('FFFF' AS BINARY) AS INT); -CAST(TIMESTAMP '2010-01-01 10:40:00.123456' AS TIME(6)) +CASEWHEN(ID=1, 'A', 'B') " "Functions (System)","COALESCE"," -{ COALESCE | NVL } (aValue, bValue [,...]) +{ COALESCE | @c@ { NVL } } (aValue, bValue [,...]) + | @c@ IFNULL(aValue, bValue) "," Returns the first value that is not null. "," @@ -5104,69 +6144,32 @@ COALESCE(A, B, C) " "Functions (System)","CONVERT"," -CONVERT(value, dataType) +@c@ CONVERT(value, dataTypeOrDomain) "," Converts a value to another data type. + +This function is deprecated, use [CAST](https://h2database.com/html/grammar.html#cast_specification) instead of it. "," CONVERT(NAME, INT) " "Functions (System)","CURRVAL"," -CURRVAL( [ schemaName, ] sequenceString ) +@c@ CURRVAL( [ schemaNameString, ] sequenceString ) "," -Returns the current (last) value of the sequence, independent of the session. -If the sequence was just created, the method returns (start - interval). +Returns the latest generated value of the sequence for the current session. +Current value may only be requested after generation of the sequence value in the current session. +This method exists only for compatibility, when it isn't required use +[CURRENT VALUE FOR sequenceName](https://h2database.com/html/grammar.html#sequence_value_expression) +instead. If the schema name is not set, the current schema is used. -If the schema name is not set, the sequence name is converted to uppercase (for compatibility). +When sequence is not found, the uppercase name is also checked. This method returns a long. "," CURRVAL('TEST_SEQ') " -"Functions (System)","CSVREAD"," -CSVREAD(fileNameString [, columnsString [, csvOptions ] ] ) -"," -Returns the result set of reading the CSV (comma separated values) file. -For each parameter, NULL means the default value should be used. - -If the column names are specified (a list of column names separated with the -fieldSeparator), those are used, otherwise (or if they are set to NULL) the first line of -the file is interpreted as the column names. -In that case, column names that contain no special characters (only letters, '_', -and digits; similar to the rule for Java identifiers) are considered case insensitive. -Other column names are case sensitive, that means you need to use quoted identifiers -(see below). - -The default charset is the default value for this system, and the default field separator -is a comma. Missing unquoted values as well as data that matches nullString is -parsed as NULL. All columns of type VARCHAR. - -The BOM (the byte-order-mark) character 0xfeff at the beginning of the file is ignored. - -This function can be used like a table: ""SELECT * FROM CSVREAD(...)"". - -Instead of a file, an URL may be used, for example -""jar:file:///c:/temp/example.zip!/org/example/nested.csv"". -To read a stream from the classpath, use the prefix ""classpath:"". -To read from HTTP, use the prefix ""http:"" (as in a browser). - -For performance reason, CSVREAD should not be used inside a join. -Instead, import the data first (possibly into a temporary table) and then use the table. - -Admin rights are required to execute this command. -"," -CALL CSVREAD('test.csv'); --- Read a file containing the columns ID, NAME with -CALL CSVREAD('test2.csv', 'ID|NAME', 'charset=UTF-8 fieldSeparator=|'); -SELECT * FROM CSVREAD('data/test.csv', null, 'rowSeparator=;'); --- Read a tab-separated file -SELECT * FROM CSVREAD('data/test.tsv', null, 'rowSeparator=' || CHAR(9)); -SELECT ""Last Name"" FROM CSVREAD('address.csv'); -SELECT ""Last Name"" FROM CSVREAD('classpath:/org/acme/data/address.csv'); -" - "Functions (System)","CSVWRITE"," -CSVWRITE ( fileNameString, queryString [, csvOptions [, lineSepString] ] ) +@h2@ CSVWRITE ( fileNameString, queryString [, csvOptions [, lineSepString] ] ) "," Writes a CSV (comma separated values). The file is overwritten if it exists. If only a file name is specified, it will be written to the current working directory. @@ -5188,16 +6191,24 @@ CALL CSVWRITE('data/test2.csv', 'SELECT * FROM TEST', 'charset=UTF-8 fieldSepara CALL CSVWRITE('data/test.tsv', 'SELECT * FROM TEST', 'charset=UTF-8 fieldSeparator=' || CHAR(9)); " -"Functions (System)","DATABASE"," -DATABASE() +"Functions (System)","CURRENT_SCHEMA"," +CURRENT_SCHEMA | @c@ SCHEMA() +"," +Returns the name of the default schema for this session. +"," +CALL CURRENT_SCHEMA +" + +"Functions (System)","CURRENT_CATALOG"," +CURRENT_CATALOG | @c@ DATABASE() "," Returns the name of the database. "," -CALL DATABASE(); +CALL CURRENT_CATALOG " "Functions (System)","DATABASE_PATH"," -DATABASE_PATH() +@h2@ DATABASE_PATH() "," Returns the directory of the database files and the database name, if it is file based. Returns NULL otherwise. @@ -5205,18 +6216,85 @@ Returns NULL otherwise. CALL DATABASE_PATH(); " +"Functions (System)","DATA_TYPE_SQL"," +@h2@ DATA_TYPE_SQL +@h2@ (objectSchemaString, objectNameString, objectTypeString, typeIdentifierString) +"," +Returns SQL representation of data type of the specified +constant, domain, table column, routine result or argument. + +For constants object type is 'CONSTANT' and type identifier is the value of +""INFORMATION_SCHEMA.CONSTANTS.DTD_IDENTIFIER"". + +For domains object type is 'DOMAIN' and type identifier is the value of +""INFORMATION_SCHEMA.DOMAINS.DTD_IDENTIFIER"". + +For columns object type is 'TABLE' and type identifier is the value of +""INFORMATION_SCHEMA.COLUMNS.DTD_IDENTIFIER"". + +For routines object name is the value of ""INFORMATION_SCHEMA.ROUTINES.SPECIFIC_NAME"", +object type is 'ROUTINE', and type identifier is the value of +""INFORMATION_SCHEMA.ROUTINES.DTD_IDENTIFIER"" for data type of the result and the value of +""INFORMATION_SCHEMA.PARAMETERS.DTD_IDENTIFIER"" for data types of arguments. +Aggregate functions aren't supported by this function, because their data type isn't statically known. + +This function returns NULL if any argument is NULL, object type is not valid, or object isn't found. +"," +DATA_TYPE_SQL('PUBLIC', 'C', 'CONSTANT', 'TYPE') +DATA_TYPE_SQL('PUBLIC', 'D', 'DOMAIN', 'TYPE') +DATA_TYPE_SQL('PUBLIC', 'T', 'TABLE', '1') +DATA_TYPE_SQL('PUBLIC', 'R_1', 'ROUTINE', 'RESULT') +DATA_TYPE_SQL('PUBLIC', 'R_1', 'ROUTINE', '1') +COALESCE( + QUOTE_IDENT(DOMAIN_SCHEMA) || '.' || QUOTE_IDENT(DOMAIN_NAME), + DATA_TYPE_SQL(TABLE_SCHEMA, TABLE_NAME, 'TABLE', DTD_IDENTIFIER)) +" + +"Functions (System)","DB_OBJECT_ID"," +@h2@ DB_OBJECT_ID({{'ROLE'|'SETTING'|'SCHEMA'|'USER'}, objectNameString + | {'CONSTANT'|'CONSTRAINT'|'DOMAIN'|'INDEX'|'ROUTINE'|'SEQUENCE' + |'SYNONYM'|'TABLE'|'TRIGGER'}, schemaNameString, objectNameString }) +"," +Returns internal identifier of the specified database object as integer value or NULL if object doesn't exist. + +Admin rights are required to execute this function. +"," +CALL DB_OBJECT_ID('ROLE', 'MANAGER'); +CALL DB_OBJECT_ID('TABLE', 'PUBLIC', 'MY_TABLE'); +" + +"Functions (System)","DB_OBJECT_SQL"," +@h2@ DB_OBJECT_SQL({{'ROLE'|'SETTING'|'SCHEMA'|'USER'}, objectNameString + | {'CONSTANT'|'CONSTRAINT'|'DOMAIN'|'INDEX'|'ROUTINE'|'SEQUENCE' + |'SYNONYM'|'TABLE'|'TRIGGER'}, schemaNameString, objectNameString }) +"," +Returns internal SQL definition of the specified database object or NULL if object doesn't exist +or it is a system object without SQL definition. + +This function should not be used to analyze structure of the object by machine code. +Internal SQL representation may contain undocumented non-standard clauses +and may be different in different versions of H2. +Objects are described in the ""INFORMATION_SCHEMA"" in machine-readable way. + +Admin rights are required to execute this function. +"," +CALL DB_OBJECT_SQL('ROLE', 'MANAGER'); +CALL DB_OBJECT_SQL('TABLE', 'PUBLIC', 'MY_TABLE'); +" + "Functions (System)","DECODE"," -DECODE(value, whenValue, thenValue [,...]) +@c@ DECODE(value, whenValue, thenValue [,...]) "," Returns the first matching value. NULL is considered to match NULL. If no match was found, then NULL or the last parameter (if the parameter count is even) is returned. -This function is provided for Oracle compatibility (see there for details). +This function is provided for Oracle compatibility, +use [CASE](https://h2database.com/html/grammar.html#case_expression) instead of it. "," CALL DECODE(RAND()>0.5, 0, 'Red', 1, 'Black'); " "Functions (System)","DISK_SPACE_USED"," -DISK_SPACE_USED(tableNameString) +@h2@ DISK_SPACE_USED(tableNameString) "," Returns the approximate amount of space used by the table specified. Does not currently take into account indexes or LOB's. @@ -5226,7 +6304,7 @@ CALL DISK_SPACE_USED('my_table'); " "Functions (System)","SIGNAL"," -SIGNAL(sqlStateString, messageString) +@h2@ SIGNAL(sqlStateString, messageString) "," Throw an SQLException with the passed SQLState and reason. "," @@ -5234,12 +6312,11 @@ CALL SIGNAL('23505', 'Duplicate user ID: ' || user_id); " "Functions (System)","ESTIMATED_ENVELOPE"," -ESTIMATED_ENVELOPE(tableNameString, columnNameString) +@h2@ ESTIMATED_ENVELOPE(tableNameString, columnNameString) "," Returns the estimated minimum bounding box that encloses all specified GEOMETRY values. Only 2D coordinate plane is supported. NULL values are ignored. -This function is only supported by MVStore engine. Column must have a spatial index. This function is fast, but estimation may include uncommitted data (including data from other transactions), may return approximate bounds, or be different with actual value due to other reasons. @@ -5251,7 +6328,7 @@ CALL ESTIMATED_ENVELOPE('MY_TABLE', 'GEOMETRY_COLUMN'); " "Functions (System)","FILE_READ"," -FILE_READ(fileNameString [,encodingString]) +@h2@ FILE_READ(fileNameString [,encodingString]) "," Returns the contents of a file. If only one parameter is supplied, the data are returned as a BLOB. If two parameters are used, the data is returned as a CLOB @@ -5268,7 +6345,7 @@ SELECT FILE_READ('http://localhost:8182/stylesheet.css', NULL) CSS; " "Functions (System)","FILE_WRITE"," -FILE_WRITE(blobValue, fileNameString) +@h2@ FILE_WRITE(blobValue, fileNameString) "," Write the supplied parameter into a file. Return the number of bytes written. @@ -5278,250 +6355,900 @@ SELECT FILE_WRITE('Hello world', '/tmp/hello.txt')) LEN; " "Functions (System)","GREATEST"," -GREATEST(aValue, bValue [,...]) +@h2@ GREATEST(aValue, bValue [,...]) "," Returns the largest value that is not NULL, or NULL if all values are NULL. "," CALL GREATEST(1, 2, 3); " -"Functions (System)","IDENTITY"," -IDENTITY() +"Functions (System)","LEAST"," +@h2@ LEAST(aValue, bValue [,...]) +"," +Returns the smallest value that is not NULL, or NULL if all values are NULL. +"," +CALL LEAST(1, 2, 3); +" + +"Functions (System)","LOCK_MODE"," +@h2@ LOCK_MODE() +"," +Returns the current lock mode. See SET LOCK_MODE. +This method returns an int. +"," +CALL LOCK_MODE(); +" + +"Functions (System)","LOCK_TIMEOUT"," +@h2@ LOCK_TIMEOUT() +"," +Returns the lock timeout of the current session (in milliseconds). +"," +LOCK_TIMEOUT() +" + +"Functions (System)","MEMORY_FREE"," +@h2@ MEMORY_FREE() +"," +Returns the free memory in KB (where 1024 bytes is a KB). +This method returns a long. +The garbage is run before returning the value. +Admin rights are required to execute this command. +"," +MEMORY_FREE() +" + +"Functions (System)","MEMORY_USED"," +@h2@ MEMORY_USED() "," -Returns the last inserted identity value for this session. -This value changes whenever a new sequence number was generated, -even within a trigger or Java function. See also SCOPE_IDENTITY. +Returns the used memory in KB (where 1024 bytes is a KB). This method returns a long. +The garbage is run before returning the value. +Admin rights are required to execute this command. "," -CALL IDENTITY(); +MEMORY_USED() " -"Functions (System)","IFNULL"," -IFNULL(aValue, bValue) +"Functions (System)","NEXTVAL"," +@c@ NEXTVAL ( [ schemaNameString, ] sequenceString ) "," -Returns the value of 'a' if it is not null, otherwise 'b'. +Increments the sequence and returns its value. +The current value of the sequence and the last identity in the current session are updated with the generated value. +Used values are never re-used, even when the transaction is rolled back. +This method exists only for compatibility, it's recommended to use the standard +[NEXT VALUE FOR sequenceName](https://h2database.com/html/grammar.html#sequence_value_expression) +instead. +If the schema name is not set, the current schema is used. +When sequence is not found, the uppercase name is also checked. +This method returns a long. "," -CALL IFNULL(NULL, ''); +NEXTVAL('TEST_SEQ') " -"Functions (System)","LEAST"," -LEAST(aValue, bValue [,...]) +"Functions (System)","NULLIF"," +NULLIF(aValue, bValue) "," -Returns the smallest value that is not NULL, or NULL if all values are NULL. +Returns NULL if 'a' is equal to 'b', otherwise 'a'. +"," +NULLIF(A, B) +A / NULLIF(B, 0) +" + +"Functions (System)","NVL2"," +@c@ NVL2(testValue, aValue, bValue) +"," +If the test value is null, then 'b' is returned. Otherwise, 'a' is returned. +The data type of the returned value is the data type of 'a' if this is a text type. + +This function is provided for Oracle compatibility, +use [CASE](https://h2database.com/html/grammar.html#case_expression) +or [COALESCE](https://h2database.com/html/functions.html#coalesce) instead of it. +"," +NVL2(X, 'not null', 'null') +" + +"Functions (System)","READONLY"," +@h2@ READONLY() +"," +Returns true if the database is read-only. +"," +READONLY() +" + +"Functions (System)","ROWNUM"," +@h2@ ROWNUM() +"," +Returns the number of the current row. +This method returns a long value. +It is supported for SELECT statements, as well as for DELETE and UPDATE. +The first row has the row number 1, and is calculated before ordering and grouping the result set, +but after evaluating index conditions (even when the index conditions are specified in an outer query). +Use the [ROW_NUMBER() OVER ()](https://h2database.com/html/functions-window.html#row_number) +function to get row numbers after grouping or in specified order. +"," +SELECT ROWNUM(), * FROM TEST; +SELECT ROWNUM(), * FROM (SELECT * FROM TEST ORDER BY NAME); +SELECT ID FROM (SELECT T.*, ROWNUM AS R FROM TEST T) WHERE R BETWEEN 2 AND 3; +" + +"Functions (System)","SESSION_ID"," +@h2@ SESSION_ID() +"," +Returns the unique session id number for the current database connection. +This id stays the same while the connection is open. +This method returns an int. +The database engine may re-use a session id after the connection is closed. +"," +CALL SESSION_ID() +" + +"Functions (System)","SET"," +@h2@ SET(@variableName, value) +"," +Updates a variable with the given value. +The new value is returned. +When used in a query, the value is updated in the order the rows are read. +When used in a subquery, not all rows might be read depending on the query plan. +This can be used to implement running totals / cumulative sums. +"," +SELECT X, SET(@I, COALESCE(@I, 0)+X) RUNNING_TOTAL FROM SYSTEM_RANGE(1, 10) +" + +"Functions (System)","TRANSACTION_ID"," +@h2@ TRANSACTION_ID() +"," +Returns the current transaction id for this session. +This method returns NULL if there is no uncommitted change, or if the database is not persisted. +Otherwise a value of the following form is returned: +""logFileId-position-sessionId"". +This method returns a string. +The value is unique across database restarts (values are not re-used). +"," +CALL TRANSACTION_ID() +" + +"Functions (System)","TRUNCATE_VALUE"," +@h2@ TRUNCATE_VALUE(value, precisionInt, forceBoolean) +"," +Truncate a value to the required precision. +If force flag is set to ""FALSE"" fixed precision values are not truncated. +The method returns a value with the same data type as the first parameter. +"," +CALL TRUNCATE_VALUE(X, 10, TRUE); +" + +"Functions (System)","CURRENT_PATH"," +CURRENT_PATH +"," +Returns the comma-separated list of quoted schema names where user-defined functions are searched +when they are referenced without the schema name. +"," +CURRENT_PATH +" + +"Functions (System)","CURRENT_ROLE"," +CURRENT_ROLE +"," +Returns the name of the PUBLIC role. +"," +CURRENT_ROLE +" + +"Functions (System)","CURRENT_USER"," +CURRENT_USER | SESSION_USER | SYSTEM_USER | USER +"," +Returns the name of the current user of this session. +"," +CURRENT_USER +" + +"Functions (System)","H2VERSION"," +@h2@ H2VERSION() +"," +Returns the H2 version as a String. +"," +H2VERSION() +" + +"Functions (JSON)","JSON_OBJECT"," +JSON_OBJECT( +[{{[KEY] string VALUE expression} | {string : expression}} [,...] ] +[ { NULL | ABSENT } ON NULL ] +[ { WITH | WITHOUT } UNIQUE KEYS ] +) +"," +Returns a JSON object constructed from the specified properties. +If ABSENT ON NULL is specified properties with NULL value are not included in the object. +If WITH UNIQUE KEYS is specified the constructed object is checked for uniqueness of keys, +nested objects, if any, are checked too. +"," +JSON_OBJECT('id': 100, 'name': 'Joe', 'groups': '[2,5]' FORMAT JSON); +" + +"Functions (JSON)","JSON_ARRAY"," +JSON_ARRAY( +[expression [,...]]|{(query) [FORMAT JSON]} +[ { NULL | ABSENT } ON NULL ] +) +"," +Returns a JSON array constructed from the specified values or from the specified single-column subquery. +If NULL ON NULL is specified NULL values are included in the array. +"," +JSON_ARRAY(10, 15, 20); +JSON_ARRAY(JSON_DATA_A FORMAT JSON, JSON_DATA_B FORMAT JSON); +JSON_ARRAY((SELECT J FROM PROPS) FORMAT JSON); +" + +"Functions (Table)","CSVREAD"," +@h2@ CSVREAD(fileNameString [, columnsString [, csvOptions ] ] ) +"," +Returns the result set of reading the CSV (comma separated values) file. +For each parameter, NULL means the default value should be used. + +If the column names are specified (a list of column names separated with the +fieldSeparator), those are used, otherwise (or if they are set to NULL) the first line of +the file is interpreted as the column names. +In that case, column names that contain no special characters (only letters, '_', +and digits; similar to the rule for Java identifiers) are processed is the same way as unquoted identifiers +and therefore case of characters may be changed. +Other column names are processed as quoted identifiers and case of characters is preserved. +To preserve the case of column names unconditionally use +[caseSensitiveColumnNames](https://h2database.com/html/grammar.html#csv_options) option. + +The default charset is the default value for this system, and the default field separator +is a comma. Missing unquoted values as well as data that matches nullString is +parsed as NULL. All columns are of type VARCHAR. + +The BOM (the byte-order-mark) character 0xfeff at the beginning of the file is ignored. + +This function can be used like a table: ""SELECT * FROM CSVREAD(...)"". + +Instead of a file, a URL may be used, for example +""jar:file:///c:/temp/example.zip!/org/example/nested.csv"". +To read a stream from the classpath, use the prefix ""classpath:"". +To read from HTTP, use the prefix ""http:"" (as in a browser). + +For performance reason, CSVREAD should not be used inside a join. +Instead, import the data first (possibly into a temporary table) and then use the table. + +Admin rights are required to execute this command. +"," +SELECT * FROM CSVREAD('test.csv'); +-- Read a file containing the columns ID, NAME with +SELECT * FROM CSVREAD('test2.csv', 'ID|NAME', 'charset=UTF-8 fieldSeparator=|'); +SELECT * FROM CSVREAD('data/test.csv', null, 'rowSeparator=;'); +-- Read a tab-separated file +SELECT * FROM CSVREAD('data/test.tsv', null, 'rowSeparator=' || CHAR(9)); +SELECT ""Last Name"" FROM CSVREAD('address.csv'); +SELECT ""Last Name"" FROM CSVREAD('classpath:/org/acme/data/address.csv'); +" + +"Functions (Table)","LINK_SCHEMA"," +@h2@ LINK_SCHEMA (targetSchemaString, driverString, urlString, +@h2@ userString, passwordString, sourceSchemaString) +"," +Creates table links for all tables in a schema. +If tables with the same name already exist, they are dropped first. +The target schema is created automatically if it does not yet exist. +The driver name may be empty if the driver is already loaded. +The list of tables linked is returned in the form of a result set. +Admin rights are required to execute this command. +"," +SELECT * FROM LINK_SCHEMA('TEST2', '', 'jdbc:h2:./test2', 'sa', 'sa', 'PUBLIC'); +" + +"Functions (Table)","TABLE"," +@h2@ { TABLE | TABLE_DISTINCT } +@h2@ ( { name dataTypeOrDomain = {array|rowValueExpression} } [,...] ) +"," +Returns the result set. TABLE_DISTINCT removes duplicate rows. +"," +SELECT * FROM TABLE(V INT = ARRAY[1, 2]); +SELECT * FROM TABLE(ID INT=(1, 2), NAME VARCHAR=('Hello', 'World')); +" + +"Functions (Table)","UNNEST"," +UNNEST(array, [,...]) [WITH ORDINALITY] +"," +Returns the result set. +Number of columns is equal to number of arguments, +plus one additional column with row number if WITH ORDINALITY is specified. +Number of rows is equal to length of longest specified array. +If multiple arguments are specified and they have different length, cells with missing values will contain null values. +"," +SELECT * FROM UNNEST(ARRAY['a', 'b', 'c']); +" + +"Aggregate Functions (General)","AVG"," +AVG ( [ DISTINCT|ALL ] { numeric | interval } ) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The average (mean) value. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. + +The data type of result is DOUBLE PRECISION for TINYINT, SMALLINT, INTEGER, and REAL arguments, +NUMERIC with additional 10 decimal digits of precision and scale for BIGINT and NUMERIC arguments; +DECFLOAT with additional 10 decimal digits of precision for DOUBLE PRECISION and DECFLOAT arguments; +INTERVAL with the same leading field precision, all additional smaller datetime units in interval qualifier, +and the maximum scale for INTERVAL arguments. +"," +AVG(X) +" + +"Aggregate Functions (General)","MAX"," +MAX(value) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The highest value. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +The returned value is of the same data type as the parameter. +"," +MAX(NAME) +" + +"Aggregate Functions (General)","MIN"," +MIN(value) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The lowest value. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +The returned value is of the same data type as the parameter. +"," +MIN(NAME) +" + +"Aggregate Functions (General)","SUM"," +SUM( [ DISTINCT|ALL ] { numeric | interval | @h2@ { boolean } } ) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The sum of all values. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. + +The data type of result is BIGINT for BOOLEAN, TINYINT, SMALLINT, and INTEGER arguments; +NUMERIC with additional 10 decimal digits of precision for BIGINT and NUMERIC arguments; +DOUBLE PRECISION for REAL arguments, +DECFLOAT with additional 10 decimal digits of precision for DOUBLE PRECISION and DECFLOAT arguments; +INTERVAL with maximum precision and the same interval qualifier and scale for INTERVAL arguments. +"," +SUM(X) +" + +"Aggregate Functions (General)","EVERY"," +{EVERY| @c@ {BOOL_AND}}(boolean) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +Returns true if all expressions are true. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +EVERY(ID>10) +" + +"Aggregate Functions (General)","ANY"," +{ANY|SOME| @c@ {BOOL_OR}}(boolean) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +Returns true if any expression is true. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. + +Note that if ANY or SOME aggregate function is placed on the right side of comparison operation or distinct predicate +and argument of this function is a subquery additional parentheses around aggregate function are required, +otherwise it will be parsed as quantified predicate. +"," +ANY(NAME LIKE 'W%') +A = (ANY((SELECT B FROM T))) +" + +"Aggregate Functions (General)","COUNT"," +COUNT( { * | { [ DISTINCT|ALL ] expression } } ) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The count of all row, or of the non-null values. +This method returns a long. +If no rows are selected, the result is 0. +Aggregates are only allowed in select statements. +"," +COUNT(*) +" + +"Aggregate Functions (General)","STDDEV_POP"," +STDDEV_POP( [ DISTINCT|ALL ] numeric ) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The population standard deviation. +This method returns a double. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +STDDEV_POP(X) +" + +"Aggregate Functions (General)","STDDEV_SAMP"," +STDDEV_SAMP( [ DISTINCT|ALL ] numeric ) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The sample standard deviation. +This method returns a double. +If less than two rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +STDDEV(X) +" + +"Aggregate Functions (General)","VAR_POP"," +VAR_POP( [ DISTINCT|ALL ] numeric ) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The population variance (square of the population standard deviation). +This method returns a double. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +VAR_POP(X) +" + +"Aggregate Functions (General)","VAR_SAMP"," +VAR_SAMP( [ DISTINCT|ALL ] numeric ) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The sample variance (square of the sample standard deviation). +This method returns a double. +If less than two rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +VAR_SAMP(X) +" + +"Aggregate Functions (General)","BIT_AND_AGG"," +{@h2@{BIT_AND_AGG}|@c@{BIT_AND}}@h2@(expression) +@h2@ [FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +The bitwise AND of all non-null values. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. + +For non-aggregate function see [BITAND](https://h2database.com/html/functions.html#bitand). +"," +BIT_AND_AGG(X) +" + +"Aggregate Functions (General)","BIT_OR_AGG"," +{@h2@{BIT_OR_AGG}|@c@{BIT_OR}}@h2@(expression) +@h2@ [FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +The bitwise OR of all non-null values. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. + +For non-aggregate function see [BITOR](https://h2database.com/html/functions.html#bitor). +"," +BIT_OR_AGG(X) +" + +"Aggregate Functions (General)","BIT_XOR_AGG"," +@h2@ BIT_XOR_AGG( [ DISTINCT|ALL ] expression) +@h2@ [FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +The bitwise XOR of all non-null values. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. + +For non-aggregate function see [BITXOR](https://h2database.com/html/functions.html#bitxor). +"," +BIT_XOR_AGG(X) +" + +"Aggregate Functions (General)","BIT_NAND_AGG"," +@h2@ BIT_NAND_AGG(expression) +@h2@ [FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +The bitwise NAND of all non-null values. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. + +For non-aggregate function see [BITNAND](https://h2database.com/html/functions.html#bitnand). +"," +BIT_NAND_AGG(X) +" + +"Aggregate Functions (General)","BIT_NOR_AGG"," +@h2@ BIT_NOR_AGG(expression) +@h2@ [FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +The bitwise NOR of all non-null values. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. + +For non-aggregate function see [BITNOR](https://h2database.com/html/functions.html#bitnor). +"," +BIT_NOR_AGG(X) +" + +"Aggregate Functions (General)","BIT_XNOR_AGG"," +@h2@ BIT_XNOR_AGG( [ DISTINCT|ALL ] expression) +@h2@ [FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +The bitwise XNOR of all non-null values. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. + +For non-aggregate function see [BITXNOR](https://h2database.com/html/functions.html#bitxnor). +"," +BIT_XNOR_AGG(X) +" + +"Aggregate Functions (General)","ENVELOPE"," +@h2@ ENVELOPE( value ) +@h2@ [FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +Returns the minimum bounding box that encloses all specified GEOMETRY values. +Only 2D coordinate plane is supported. +NULL values are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +ENVELOPE(X) +" + +"Aggregate Functions (Binary Set)","COVAR_POP"," +COVAR_POP(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The population covariance. +This method returns a double. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +COVAR_POP(Y, X) +" + +"Aggregate Functions (Binary Set)","COVAR_SAMP"," +COVAR_SAMP(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The sample covariance. +This method returns a double. +Rows in which either argument is NULL are ignored in the calculation. +If less than two rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +COVAR_SAMP(Y, X) +" + +"Aggregate Functions (Binary Set)","CORR"," +CORR(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +Pearson's correlation coefficient. +This method returns a double. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +CORR(Y, X) +" + +"Aggregate Functions (Binary Set)","REGR_SLOPE"," +REGR_SLOPE(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The slope of the line. +This method returns a double. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. "," -CALL LEAST(1, 2, 3); +REGR_SLOPE(Y, X) " -"Functions (System)","LOCK_MODE"," -LOCK_MODE() +"Aggregate Functions (Binary Set)","REGR_INTERCEPT"," +REGR_INTERCEPT(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] "," -Returns the current lock mode. See SET LOCK_MODE. -This method returns an int. +The y-intercept of the regression line. +This method returns a double. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. "," -CALL LOCK_MODE(); +REGR_INTERCEPT(Y, X) " -"Functions (System)","LOCK_TIMEOUT"," -LOCK_TIMEOUT() +"Aggregate Functions (Binary Set)","REGR_COUNT"," +REGR_COUNT(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] "," -Returns the lock timeout of the current session (in milliseconds). +Returns the number of rows in the group. +This method returns a long. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is 0. +Aggregates are only allowed in select statements. "," -LOCK_TIMEOUT() +REGR_COUNT(Y, X) " -"Functions (System)","LINK_SCHEMA"," -LINK_SCHEMA(targetSchemaString, driverString, urlString, -userString, passwordString, sourceSchemaString) +"Aggregate Functions (Binary Set)","REGR_R2"," +REGR_R2(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] "," -Creates table links for all tables in a schema. -If tables with the same name already exist, they are dropped first. -The target schema is created automatically if it does not yet exist. -The driver name may be empty if the driver is already loaded. -The list of tables linked is returned in the form of a result set. -Admin rights are required to execute this command. +The coefficient of determination. +This method returns a double. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. "," -CALL LINK_SCHEMA('TEST2', '', 'jdbc:h2:test2', 'sa', 'sa', 'PUBLIC'); +REGR_R2(Y, X) " -"Functions (System)","MEMORY_FREE"," -MEMORY_FREE() +"Aggregate Functions (Binary Set)","REGR_AVGX"," +REGR_AVGX(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] "," -Returns the free memory in KB (where 1024 bytes is a KB). -This method returns an int. -The garbage is run before returning the value. -Admin rights are required to execute this command. +The average (mean) value of dependent expression. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +For details about the data type see [AVG](https://h2database.com/html/functions-aggregate.html#avg). +Aggregates are only allowed in select statements. "," -MEMORY_FREE() +REGR_AVGX(Y, X) " -"Functions (System)","MEMORY_USED"," -MEMORY_USED() +"Aggregate Functions (Binary Set)","REGR_AVGY"," +REGR_AVGY(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] "," -Returns the used memory in KB (where 1024 bytes is a KB). -This method returns an int. -The garbage is run before returning the value. -Admin rights are required to execute this command. +The average (mean) value of independent expression. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +For details about the data type see [AVG](https://h2database.com/html/functions-aggregate.html#avg). +Aggregates are only allowed in select statements. "," -MEMORY_USED() +REGR_AVGY(Y, X) " -"Functions (System)","NEXTVAL"," -NEXTVAL ( [ schemaName, ] sequenceString ) +"Aggregate Functions (Binary Set)","REGR_SXX"," +REGR_SXX(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] "," -Returns the next value of the sequence. -Used values are never re-used, even when the transaction is rolled back. -If the schema name is not set, the current schema is used, and the sequence name is converted to uppercase (for compatibility). -This method returns a long. +The the sum of squares of independent expression. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. "," -NEXTVAL('TEST_SEQ') +REGR_SXX(Y, X) " -"Functions (System)","NULLIF"," -NULLIF(aValue, bValue) +"Aggregate Functions (Binary Set)","REGR_SYY"," +REGR_SYY(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] "," -Returns NULL if 'a' is equals to 'b', otherwise 'a'. +The the sum of squares of dependent expression. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. "," -NULLIF(A, B) +REGR_SYY(Y, X) " -"Functions (System)","NVL2"," -NVL2(testValue, aValue, bValue) +"Aggregate Functions (Binary Set)","REGR_SXY"," +REGR_SXY(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] "," -If the test value is null, then 'b' is returned. Otherwise, 'a' is returned. -The data type of the returned value is the data type of 'a' if this is a text type. +The the sum of products independent expression times dependent expression. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. "," -NVL2(X, 'not null', 'null') +REGR_SXY(Y, X) " -"Functions (System)","READONLY"," -READONLY() +"Aggregate Functions (Ordered)","LISTAGG"," +LISTAGG ( [ DISTINCT|ALL ] string [, separatorString] +[ ON OVERFLOW { ERROR + | TRUNCATE [ filterString ] { WITH | WITHOUT } COUNT } ] ) +withinGroupSpecification +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] "," -Returns true if the database is read-only. +Concatenates strings with a separator. +The default separator is a ',' (without space). +This method returns a string. +NULL values are ignored in the calculation, COALESCE can be used to replace them. +If no rows are selected, the result is NULL. + +If ""ON OVERFLOW TRUNCATE"" is specified, values that don't fit into returned string are truncated +and replaced with filter string placeholder ('...' by default) and count of truncated elements in parentheses. +If ""WITHOUT COUNT"" is specified, count of truncated elements is not appended. + +Aggregates are only allowed in select statements. "," -READONLY() +LISTAGG(NAME, ', ') WITHIN GROUP (ORDER BY ID) +LISTAGG(COALESCE(NAME, 'null'), ', ') WITHIN GROUP (ORDER BY ID) +LISTAGG(ID, ', ') WITHIN GROUP (ORDER BY ID) OVER (ORDER BY ID) +LISTAGG(ID, ';' ON OVERFLOW TRUNCATE 'etc' WITHOUT COUNT) WITHIN GROUP (ORDER BY ID) " -"Functions (System)","ROWNUM"," -ROWNUM() +"Aggregate Functions (Ordered)","ARRAY_AGG"," +ARRAY_AGG ( @h2@ [ DISTINCT|ALL ] value +[ ORDER BY sortSpecificationList ] ) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] "," -Returns the number of the current row. -This method returns a long value. -It is supported for SELECT statements, as well as for DELETE and UPDATE. -The first row has the row number 1, and is calculated before ordering and grouping the result set, -but after evaluating index conditions (even when the index conditions are specified in an outer query). -Use the ROW_NUMBER() OVER () function to get row numbers after grouping or in specified order. +Aggregate the value into an array. +This method returns an array. +NULL values are included in the array, FILTER clause can be used to exclude them. +If no rows are selected, the result is NULL. +If ORDER BY is not specified order of values is not determined. +When this aggregate is used with OVER clause that contains ORDER BY subclause +it does not enforce exact order of values. +This aggregate needs additional own ORDER BY clause to make it deterministic. +Aggregates are only allowed in select statements. "," -SELECT ROWNUM(), * FROM TEST; -SELECT ROWNUM(), * FROM (SELECT * FROM TEST ORDER BY NAME); -SELECT ID FROM (SELECT T.*, ROWNUM AS R FROM TEST T) WHERE R BETWEEN 2 AND 3; +ARRAY_AGG(NAME ORDER BY ID) +ARRAY_AGG(NAME ORDER BY ID) FILTER (WHERE NAME IS NOT NULL) +ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY ID) " -"Functions (System)","SCHEMA"," -SCHEMA() +"Aggregate Functions (Hypothetical Set)","RANK aggregate"," +RANK(value [,...]) +withinGroupSpecification +[FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] "," -Returns the name of the default schema for this session. +Returns the rank of the hypothetical row in specified collection of rows. +The rank of a row is the number of rows that precede this row plus 1. +If two or more rows have the same values in ORDER BY columns, these rows get the same rank from the first row with the same values. +It means that gaps in ranks are possible. + +See [RANK](https://h2database.com/html/functions-window.html#rank) for a window function with the same name. "," -CALL SCHEMA() +SELECT RANK(5) WITHIN GROUP (ORDER BY V) FROM TEST; " -"Functions (System)","SCOPE_IDENTITY"," -SCOPE_IDENTITY() +"Aggregate Functions (Hypothetical Set)","DENSE_RANK aggregate"," +DENSE_RANK(value [,...]) +withinGroupSpecification +[FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] "," -Returns the last inserted identity value for this session for the current scope -(the current statement). -Changes within triggers and Java functions are ignored. See also IDENTITY(). -This method returns a long. +Returns the dense rank of the hypothetical row in specified collection of rows. +The rank of a row is the number of groups of rows with the same values in ORDER BY columns that precede group with this row plus 1. +If two or more rows have the same values in ORDER BY columns, these rows get the same rank. +Gaps in ranks are not possible. + +See [DENSE_RANK](https://h2database.com/html/functions-window.html#dense_rank) for a window function with the same name. "," -CALL SCOPE_IDENTITY(); +SELECT DENSE_RANK(5) WITHIN GROUP (ORDER BY V) FROM TEST; " -"Functions (System)","SESSION_ID"," -SESSION_ID() +"Aggregate Functions (Hypothetical Set)","PERCENT_RANK aggregate"," +PERCENT_RANK(value [,...]) +withinGroupSpecification +[FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] "," -Returns the unique session id number for the current database connection. -This id stays the same while the connection is open. -This method returns an int. -The database engine may re-use a session id after the connection is closed. +Returns the relative rank of the hypothetical row in specified collection of rows. +The relative rank is calculated as (RANK - 1) / (NR - 1), +where RANK is a rank of the row and NR is a total number of rows in the collection including hypothetical row. + +See [PERCENT_RANK](https://h2database.com/html/functions-window.html#percent_rank) for a window function with the same name. "," -CALL SESSION_ID() +SELECT PERCENT_RANK(5) WITHIN GROUP (ORDER BY V) FROM TEST; " -"Functions (System)","SET"," -SET(@variableName, value) +"Aggregate Functions (Hypothetical Set)","CUME_DIST aggregate"," +CUME_DIST(value [,...]) +withinGroupSpecification +[FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] "," -Updates a variable with the given value. -The new value is returned. -When used in a query, the value is updated in the order the rows are read. -When used in a subquery, not all rows might be read depending on the query plan. -This can be used to implement running totals / cumulative sums. +Returns the relative rank of the hypothetical row in specified collection of rows. +The relative rank is calculated as NP / NR +where NP is a number of rows that precede the current row or have the same values in ORDER BY columns +and NR is a total number of rows in the collection including hypothetical row. + +See [CUME_DIST](https://h2database.com/html/functions-window.html#cume_dist) for a window function with the same name. "," -SELECT X, SET(@I, IFNULL(@I, 0)+X) RUNNING_TOTAL FROM SYSTEM_RANGE(1, 10) +SELECT CUME_DIST(5) WITHIN GROUP (ORDER BY V) FROM TEST; " -"Functions (System)","TABLE"," -{ TABLE | TABLE_DISTINCT } -( { name dataType = array|rowValueExpression } [,...] ) +"Aggregate Functions (Inverse Distribution)","PERCENTILE_CONT"," +PERCENTILE_CONT(numeric) WITHIN GROUP (ORDER BY sortSpecification) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] "," -Returns the result set. TABLE_DISTINCT removes duplicate rows. +Return percentile of values from the group with interpolation. +Interpolation is only supported for numeric, date-time, and interval data types. +Argument must be between 0 and 1 inclusive. +Argument must be the same for all rows in the same group. +If argument is NULL, the result is NULL. +NULL values are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. "," -SELECT * FROM TABLE(VALUE INT = ARRAY[1, 2]); -SELECT * FROM TABLE(ID INT=(1, 2), NAME VARCHAR=('Hello', 'World')); +PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY V) " -"Functions (System)","TRANSACTION_ID"," -TRANSACTION_ID() +"Aggregate Functions (Inverse Distribution)","PERCENTILE_DISC"," +PERCENTILE_DISC(numeric) WITHIN GROUP (ORDER BY sortSpecification) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] "," -Returns the current transaction id for this session. -This method returns NULL if there is no uncommitted change, or if the database is not persisted. -Otherwise a value of the following form is returned: -""logFileId-position-sessionId"". -This method returns a string. -The value is unique across database restarts (values are not re-used). +Return percentile of values from the group. +Interpolation is not performed. +Argument must be between 0 and 1 inclusive. +Argument must be the same for all rows in the same group. +If argument is NULL, the result is NULL. +NULL values are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. "," -CALL TRANSACTION_ID() +PERCENTILE_DISC(0.5) WITHIN GROUP (ORDER BY V) " -"Functions (System)","TRUNCATE_VALUE"," -TRUNCATE_VALUE(value, precisionInt, forceBoolean) +"Aggregate Functions (Inverse Distribution)","MEDIAN"," +@h2@ MEDIAN( [ DISTINCT|ALL ] value ) +@h2@ [FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] "," -Truncate a value to the required precision. -The precision of the returned value may be a bit larger than requested, -because fixed precision values are not truncated (unlike the numeric TRUNCATE method). -Unlike CAST, the truncating a decimal value may lose precision if the force flag is set to true. -The method returns a value with the same data type as the first parameter. +The value separating the higher half of a values from the lower half. +Returns the middle value or an interpolated value between two middle values if number of values is even. +Interpolation is only supported for numeric, date-time, and interval data types. +NULL values are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. "," -CALL TRUNCATE_VALUE(X, 10, TRUE); +MEDIAN(X) " -"Functions (System)","UNNEST"," -UNNEST(array, [,...]) [WITH ORDINALITY] +"Aggregate Functions (Inverse Distribution)","MODE"," +@h2@ { MODE() WITHIN GROUP (ORDER BY sortSpecification) } + | @c@ { MODE( value [ ORDER BY sortSpecification ] ) } +@h2@ [FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] "," -Returns the result set. -Number of columns is equal to number of arguments, -plus one additional column with row number if WITH ORDINALITY is specified. -Number of rows is equal to length of longest specified array. -If multiple arguments are specified and they have different length, cells with missing values will contain null values. +Returns the value that occurs with the greatest frequency. +If there are multiple values with the same frequency only one value will be returned. +In this situation value will be chosen based on optional ORDER BY clause +that should specify exactly the same expression as argument of this function. +Use ascending order to get smallest value or descending order to get largest value +from multiple values with the same frequency. +If this clause is not specified the exact chosen value is not determined in this situation. +NULL values are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. "," -SELECT * FROM UNNEST(ARRAY['a', 'b', 'c']); +MODE() WITHIN GROUP (ORDER BY X) " -"Functions (System)","USER"," -{ USER | CURRENT_USER } () +"Aggregate Functions (JSON)","JSON_OBJECTAGG"," +JSON_OBJECTAGG( +{[KEY] string VALUE value} | {string : value} +[ { NULL | ABSENT } ON NULL ] +[ { WITH | WITHOUT } UNIQUE KEYS ] +) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] "," -Returns the name of the current user of this session. +Aggregates the keys with values into a JSON object. +If ABSENT ON NULL is specified properties with NULL value are not included in the object. +If WITH UNIQUE KEYS is specified the constructed object is checked for uniqueness of keys, +nested objects, if any, are checked too. +If no values are selected, the result is SQL NULL value. "," -CURRENT_USER() +JSON_OBJECTAGG(NAME: VAL); +JSON_OBJECTAGG(KEY NAME VALUE VAL); " -"Functions (System)","H2VERSION"," -H2VERSION() +"Aggregate Functions (JSON)","JSON_ARRAYAGG"," +JSON_ARRAYAGG( @h2@ [ DISTINCT|ALL ] expression +[ ORDER BY sortSpecificationList ] +[ { NULL | ABSENT } ON NULL ] ) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] "," -Returns the H2 version as a String. +Aggregates the values into a JSON array. +If NULL ON NULL is specified NULL values are included in the array. +If no values are selected, the result is SQL NULL value. "," -H2VERSION() +JSON_ARRAYAGG(NUMBER) " -"Functions (Window)","ROW_NUMBER"," +"Window Functions (Row Number)","ROW_NUMBER"," ROW_NUMBER() OVER windowNameOrSpecification "," Returns the number of the current row starting with 1. @@ -5534,7 +7261,7 @@ SELECT ROW_NUMBER() OVER (ORDER BY ID), * FROM TEST; SELECT ROW_NUMBER() OVER (PARTITION BY CATEGORY ORDER BY ID), * FROM TEST; " -"Functions (Window)","RANK"," +"Window Functions (Rank)","RANK"," RANK() OVER windowNameOrSpecification "," Returns the rank of the current row. @@ -5545,12 +7272,14 @@ This function requires window order clause. Window frame clause is not allowed for this function. Window functions in H2 may require a lot of memory for large queries. + +See [RANK aggregate](https://h2database.com/html/functions-aggregate.html#rank_aggregate) for a hypothetical set function with the same name. "," SELECT RANK() OVER (ORDER BY ID), * FROM TEST; SELECT RANK() OVER (PARTITION BY CATEGORY ORDER BY ID), * FROM TEST; " -"Functions (Window)","DENSE_RANK"," +"Window Functions (Rank)","DENSE_RANK"," DENSE_RANK() OVER windowNameOrSpecification "," Returns the dense rank of the current row. @@ -5561,12 +7290,14 @@ This function requires window order clause. Window frame clause is not allowed for this function. Window functions in H2 may require a lot of memory for large queries. + +See [DENSE_RANK aggregate](https://h2database.com/html/functions-aggregate.html#dense_rank_aggregate) for a hypothetical set function with the same name. "," SELECT DENSE_RANK() OVER (ORDER BY ID), * FROM TEST; SELECT DENSE_RANK() OVER (PARTITION BY CATEGORY ORDER BY ID), * FROM TEST; " -"Functions (Window)","PERCENT_RANK"," +"Window Functions (Rank)","PERCENT_RANK"," PERCENT_RANK() OVER windowNameOrSpecification "," Returns the relative rank of the current row. @@ -5576,12 +7307,14 @@ Note that result is always 0 if window order clause is not specified. Window frame clause is not allowed for this function. Window functions in H2 may require a lot of memory for large queries. + +See [PERCENT_RANK aggregate](https://h2database.com/html/functions-aggregate.html#percent_rank_aggregate) for a hypothetical set function with the same name. "," SELECT PERCENT_RANK() OVER (ORDER BY ID), * FROM TEST; SELECT PERCENT_RANK() OVER (PARTITION BY CATEGORY ORDER BY ID), * FROM TEST; " -"Functions (Window)","CUME_DIST"," +"Window Functions (Rank)","CUME_DIST"," CUME_DIST() OVER windowNameOrSpecification "," Returns the relative rank of the current row. @@ -5592,29 +7325,14 @@ Note that result is always 1 if window order clause is not specified. Window frame clause is not allowed for this function. Window functions in H2 may require a lot of memory for large queries. + +See [CUME_DIST aggregate](https://h2database.com/html/functions-aggregate.html#cume_dist_aggregate) for a hypothetical set function with the same name. "," SELECT CUME_DIST() OVER (ORDER BY ID), * FROM TEST; SELECT CUME_DIST() OVER (PARTITION BY CATEGORY ORDER BY ID), * FROM TEST; " -"Functions (Window)","NTILE"," -NTILE(long) OVER windowNameOrSpecification -"," -Distributes the rows into a specified number of groups. -Number of groups should be a positive long value. -NTILE returns the 1-based number of the group to which the current row belongs. -First groups will have more rows if number of rows is not divisible by number of groups. -For example, if 5 rows are distributed into 2 groups this function returns 1 for the first 3 row and 2 for the last 2 rows. -This function requires window order clause. -Window frame clause is not allowed for this function. - -Window functions in H2 may require a lot of memory for large queries. -"," -SELECT NTILE(10) OVER (ORDER BY ID), * FROM TEST; -SELECT NTILE(5) OVER (PARTITION BY CATEGORY ORDER BY ID), * FROM TEST; -" - -"Functions (Window)","LEAD"," +"Window Functions (Lead or Lag)","LEAD"," LEAD(value [, offsetInt [, defaultValue]]) [{RESPECT|IGNORE} NULLS] OVER windowNameOrSpecification "," @@ -5635,7 +7353,7 @@ SELECT LEAD(X, 2, 0) IGNORE NULLS OVER ( ), * FROM TEST; " -"Functions (Window)","LAG"," +"Window Functions (Lead or Lag)","LAG"," LAG(value [, offsetInt [, defaultValue]]) [{RESPECT|IGNORE} NULLS] OVER windowNameOrSpecification "," @@ -5656,7 +7374,7 @@ SELECT LAG(X, 2, 0) IGNORE NULLS OVER ( ), * FROM TEST; " -"Functions (Window)","FIRST_VALUE"," +"Window Functions (Nth Value)","FIRST_VALUE"," FIRST_VALUE(value) [{RESPECT|IGNORE} NULLS] OVER windowNameOrSpecification "," @@ -5669,7 +7387,7 @@ SELECT FIRST_VALUE(X) OVER (ORDER BY ID), * FROM TEST; SELECT FIRST_VALUE(X) IGNORE NULLS OVER (PARTITION BY CATEGORY ORDER BY ID), * FROM TEST; " -"Functions (Window)","LAST_VALUE"," +"Window Functions (Nth Value)","LAST_VALUE"," LAST_VALUE(value) [{RESPECT|IGNORE} NULLS] OVER windowNameOrSpecification "," @@ -5688,7 +7406,7 @@ SELECT LAST_VALUE(X) IGNORE NULLS OVER ( ), * FROM TEST; " -"Functions (Window)","NTH_VALUE"," +"Window Functions (Nth Value)","NTH_VALUE"," NTH_VALUE(value, nInt) [FROM {FIRST|LAST}] [{RESPECT|IGNORE} NULLS] OVER windowNameOrSpecification "," @@ -5709,9 +7427,26 @@ SELECT NTH_VALUE(X) IGNORE NULLS OVER ( ), * FROM TEST; " -"Functions (Window)","RATIO_TO_REPORT"," -RATIO_TO_REPORT(value) -OVER windowNameOrSpecification +"Window Functions (Other)","NTILE"," +NTILE(long) OVER windowNameOrSpecification +"," +Distributes the rows into a specified number of groups. +Number of groups should be a positive long value. +NTILE returns the 1-based number of the group to which the current row belongs. +First groups will have more rows if number of rows is not divisible by number of groups. +For example, if 5 rows are distributed into 2 groups this function returns 1 for the first 3 row and 2 for the last 2 rows. +This function requires window order clause. +Window frame clause is not allowed for this function. + +Window functions in H2 may require a lot of memory for large queries. +"," +SELECT NTILE(10) OVER (ORDER BY ID), * FROM TEST; +SELECT NTILE(5) OVER (PARTITION BY CATEGORY ORDER BY ID), * FROM TEST; +" + +"Window Functions (Other)","RATIO_TO_REPORT"," +@h2@ RATIO_TO_REPORT(value) +@h2@ OVER windowNameOrSpecification "," Returns the ratio of a value to the sum of all values. If argument is NULL or sum of all values is 0, then the value of function is NULL. @@ -5728,10 +7463,10 @@ INFORMATION_SCHEMA To get the list of system tables, execute the statement SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'INFORMATION_SCHEMA' "," - " + "System Tables","Range Table"," -SYSTEM_RANGE(start, end) +@h2@ SYSTEM_RANGE(start, end [, step]) "," Contains all values from start to end (this is a dynamic table). "," diff --git a/h2/src/main/org/h2/res/javadoc.properties b/h2/src/main/org/h2/res/javadoc.properties index 0dce60c8fe..fabf642fa4 100644 --- a/h2/src/main/org/h2/res/javadoc.properties +++ b/h2/src/main/org/h2/res/javadoc.properties @@ -4,38 +4,34 @@ org.h2.jmx.DatabaseInfoMBean.getCacheSizeMax=The maximum cache size in KB. org.h2.jmx.DatabaseInfoMBean.getFileReadCount=The file read count since the database was opened. org.h2.jmx.DatabaseInfoMBean.getFileSize=The database file size in KB. org.h2.jmx.DatabaseInfoMBean.getFileWriteCount=The number of write operations since the database was opened. -org.h2.jmx.DatabaseInfoMBean.getFileWriteCountTotal=The number of write operations since the database was created. -org.h2.jmx.DatabaseInfoMBean.getLogMode=The transaction log mode (0 disabled, 1 without sync, 2 enabled). org.h2.jmx.DatabaseInfoMBean.getMode=The database compatibility mode (REGULAR if no compatibility mode is\n used). org.h2.jmx.DatabaseInfoMBean.getTraceLevel=The trace level (0 disabled, 1 error, 2 info, 3 debug). org.h2.jmx.DatabaseInfoMBean.getVersion=The database version. org.h2.jmx.DatabaseInfoMBean.isExclusive=Is the database open in exclusive mode? -org.h2.jmx.DatabaseInfoMBean.isMultiThreaded=Is multi-threading enabled? -org.h2.jmx.DatabaseInfoMBean.isMvcc=Is MVCC (multi version concurrency) enabled? org.h2.jmx.DatabaseInfoMBean.isReadOnly=Is the database read-only? org.h2.jmx.DatabaseInfoMBean.listSessions=List sessions, including the queries that are in\n progress, and locked tables. org.h2.jmx.DatabaseInfoMBean.listSettings=List the database settings. -org.h2.tools.Backup=Creates a backup of a database.\nThis tool copies all database files. The database must be closed before using\n this tool. To create a backup while the database is in use, run the BACKUP\n SQL statement. In an emergency, for example if the application is not\n responding, creating a backup using the Backup tool is possible by using the\n quiet mode. However, if the database is changed while the backup is running\n in quiet mode, the backup could be corrupt. -org.h2.tools.Backup.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-file ] The target file name (default\: backup.zip)\n[-dir ] The source directory (default\: .)\n[-db ] Source database; not required if there is only one\n[-quiet] Do not print progress information -org.h2.tools.ChangeFileEncryption=Allows changing the database file encryption password or algorithm.\nThis tool can not be used to change a password of a user.\n The database must be closed before using this tool. -org.h2.tools.ChangeFileEncryption.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-cipher type] The encryption type (AES)\n[-dir ] The database directory (default\: .)\n[-db ] Database name (all databases if not set)\n[-decrypt ] The decryption password (if not set\: not yet encrypted)\n[-encrypt ] The encryption password (if not set\: do not encrypt)\n[-quiet] Do not print progress information +org.h2.tools.Backup=Creates a backup of a database.\n\n This tool copies all database files. The database must be closed before using\n this tool. To create a backup while the database is in use, run the BACKUP\n SQL statement. In an emergency, for example if the application is not\n responding, creating a backup using the Backup tool is possible by using the\n quiet mode. However, if the database is changed while the backup is running\n in quiet mode, the backup could be corrupt. +org.h2.tools.Backup.main=Options are case sensitive.\nSupported options are\:[-help] or [-?]Print the list of options\n[-file ] The target file name (default\: backup.zip)\n[-dir ] The source directory (default\: .)\n[-db ] Source database; not required if there is only one\n[-quiet] Do not print progress information +org.h2.tools.ChangeFileEncryption=Allows changing the database file encryption password or algorithm.\n\n This tool can not be used to change a password of a user.\n The database must be closed before using this tool. +org.h2.tools.ChangeFileEncryption.main=Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-cipher type] The encryption type (AES)\n[-dir ] The database directory (default\: .)\n[-db ] Database name (all databases if not set)\n[-decrypt ] The decryption password (if not set\: not yet encrypted)\n[-encrypt ] The encryption password (if not set\: do not encrypt)\n[-quiet] Do not print progress information org.h2.tools.Console=Starts the H2 Console (web-) server, as well as the TCP and PG server. -org.h2.tools.Console.main=When running without options, -tcp, -web, -browser and -pg are started.\nOptions are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-url] Start a browser and connect to this URL\n[-driver] Used together with -url\: the driver\n[-user] Used together with -url\: the user name\n[-password] Used together with -url\: the password\n[-web] Start the web server with the H2 Console\n[-tool] Start the icon or window that allows to start a browser\n[-browser] Start a browser connecting to the web server\n[-tcp] Start the TCP server\n[-pg] Start the PG server\nFor each Server, additional options are available;\n for details, see the Server tool.\nIf a service can not be started, the program\n terminates with an exit code of 1. -org.h2.tools.ConvertTraceFile=Converts a .trace.db file to a SQL script and Java source code.\nSQL statement statistics are listed as well. -org.h2.tools.ConvertTraceFile.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-traceFile ] The trace file name (default\: test.trace.db)\n[-script ] The script file name (default\: test.sql)\n[-javaClass ] The Java directory and class file name (default\: Test) -org.h2.tools.CreateCluster=Creates a cluster from a stand-alone database.\nCopies a database to another location if required. -org.h2.tools.CreateCluster.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-urlSource ""] The database URL of the source database (jdbc\:h2\:...)\n[-urlTarget ""] The database URL of the target database (jdbc\:h2\:...)\n[-user ] The user name (default\: sa)\n[-password ] The password\n[-serverList ] The comma separated list of host names or IP addresses -org.h2.tools.DeleteDbFiles=Deletes all files belonging to a database.\nThe database must be closed before calling this tool. -org.h2.tools.DeleteDbFiles.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-dir ] The directory (default\: .)\n[-db ] The database name\n[-quiet] Do not print progress information +org.h2.tools.Console.main=When running without options, -tcp, -web, -browser and -pg are started.\n\n Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-url] Start a browser and connect to this URL\n[-driver] Used together with -url\: the driver\n[-user] Used together with -url\: the user name\n[-password] Used together with -url\: the password\n[-web] Start the web server with the H2 Console\n[-tool] Start the icon or window that allows to start a browser\n[-browser] Start a browser connecting to the web server\n[-tcp] Start the TCP server\n[-pg] Start the PG server\nFor each Server, additional options are available;\n for details, see the Server tool.\n If a service can not be started, the program\n terminates with an exit code of 1. +org.h2.tools.ConvertTraceFile=Converts a .trace.db file to a SQL script and Java source code.\n\n SQL statement statistics are listed as well. +org.h2.tools.ConvertTraceFile.main=Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-traceFile ] The trace file name (default\: test.trace.db)\n[-script ] The script file name (default\: test.sql)\n[-javaClass ] The Java directory and class file name (default\: Test) +org.h2.tools.CreateCluster=Creates a cluster from a stand-alone database.\n\n Copies a database to another location if required. +org.h2.tools.CreateCluster.main=Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-urlSource ""] The database URL of the source database (jdbc\:h2\:...)\n[-urlTarget ""] The database URL of the target database (jdbc\:h2\:...)\n[-user ] The user name (default\: sa)\n[-password ] The password\n[-serverList ] The comma separated list of host names or IP addresses +org.h2.tools.DeleteDbFiles=Deletes all files belonging to a database.\n\n The database must be closed before calling this tool. +org.h2.tools.DeleteDbFiles.main=Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-dir ] The directory (default\: .)\n[-db ] The database name\n[-quiet] Do not print progress information org.h2.tools.Recover=Helps recovering a corrupted database. -org.h2.tools.Recover.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-dir ] The directory (default\: .)\n[-db ] The database name (all databases if not set)\n[-trace] Print additional trace information\n[-transactionLog] Print the transaction log\nEncrypted databases need to be decrypted first. +org.h2.tools.Recover.main=Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-dir ] The directory (default\: .)\n[-db ] The database name (all databases if not set)\n[-trace] Print additional trace information\n[-transactionLog] Print the transaction log\nEncrypted databases need to be decrypted first. org.h2.tools.Restore=Restores a H2 database by extracting the database files from a .zip file. -org.h2.tools.Restore.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-file ] The source file name (default\: backup.zip)\n[-dir ] The target directory (default\: .)\n[-db ] The target database name (as stored if not set)\n[-quiet] Do not print progress information +org.h2.tools.Restore.main=Options are case sensitive. Supported options\nSupported options[-help] or [-?]Print the list of options\n[-file ] The source file name (default\: backup.zip)\n[-dir ] The target directory (default\: .)\n[-db ] The target database name (as stored if not set)\n[-quiet] Do not print progress information org.h2.tools.RunScript=Runs a SQL script against a database. -org.h2.tools.RunScript.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-url ""] The database URL (jdbc\:...)\n[-user ] The user name (default\: sa)\n[-password ] The password\n[-script ] The script file to run (default\: backup.sql)\n[-driver ] The JDBC driver class to use (not required in most cases)\n[-showResults] Show the statements and the results of queries\n[-checkResults] Check if the query results match the expected results\n[-continueOnError] Continue even if the script contains errors\n[-options ...] RUNSCRIPT options (embedded H2; -*Results not supported) +org.h2.tools.RunScript.main=Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-url ""] The database URL (jdbc\:...)\n[-user ] The user name (default\: sa)\n[-password ] The password\n[-script ] The script file to run (default\: backup.sql)\n[-driver ] The JDBC driver class to use (not required in most cases)\n[-showResults] Show the statements and the results of queries\n[-checkResults] Check if the query results match the expected results\n[-continueOnError] Continue even if the script contains errors\n[-options ...] RUNSCRIPT options (embedded H2; -*Results not supported) org.h2.tools.Script=Creates a SQL script file by extracting the schema and data of a database. -org.h2.tools.Script.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-url ""] The database URL (jdbc\:...)\n[-user ] The user name (default\: sa)\n[-password ] The password\n[-script ] The target script file name (default\: backup.sql)\n[-options ...] A list of options (only for embedded H2, see SCRIPT)\n[-quiet] Do not print progress information +org.h2.tools.Script.main=Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-url ""] The database URL (jdbc\:...)\n[-user ] The user name (default\: sa)\n[-password ] The password\n[-script ] The target script file name (default\: backup.sql)\n[-options ...] A list of options (only for embedded H2, see SCRIPT)\n[-quiet] Do not print progress information org.h2.tools.Server=Starts the H2 Console (web-) server, TCP, and PG server. -org.h2.tools.Server.main=When running without options, -tcp, -web, -browser and -pg are started.\nOptions are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-web] Start the web server with the H2 Console\n[-webAllowOthers] Allow other computers to connect - see below\n[-webDaemon] Use a daemon thread\n[-webPort ] The port (default\: 8082)\n[-webSSL] Use encrypted (HTTPS) connections\n[-webAdminPassword] Password of DB Console administrator\n[-browser] Start a browser connecting to the web server\n[-tcp] Start the TCP server\n[-tcpAllowOthers] Allow other computers to connect - see below\n[-tcpDaemon] Use a daemon thread\n[-tcpPort ] The port (default\: 9092)\n[-tcpSSL] Use encrypted (SSL) connections\n[-tcpPassword ] The password for shutting down a TCP server\n[-tcpShutdown ""] Stop the TCP server; example\: tcp\://localhost\n[-tcpShutdownForce] Do not wait until all connections are closed\n[-pg] Start the PG server\n[-pgAllowOthers] Allow other computers to connect - see below\n[-pgDaemon] Use a daemon thread\n[-pgPort ] The port (default\: 5435)\n[-properties ""] Server properties (default\: ~, disable\: null)\n[-baseDir ] The base directory for H2 databases (all servers)\n[-ifExists] Only existing databases may be opened (all servers)\n[-ifNotExists] Databases are created when accessed\n[-trace] Print additional trace information (all servers)\n[-key ] Allows to map a database name to another (all servers)\nThe options -xAllowOthers are potentially risky.\nFor details, see Advanced Topics / Protection against Remote Access. +org.h2.tools.Server.main=When running without options, -tcp, -web, -browser and -pg are started.\n\n Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-web] Start the web server with the H2 Console\n[-webAllowOthers] Allow other computers to connect - see below\n[-webDaemon] Use a daemon thread\n[-webPort ] The port (default\: 8082)\n[-webSSL] Use encrypted (HTTPS) connections\n[-webAdminPassword] Password of DB Console administrator\n[-browser] Start a browser connecting to the web server\n[-tcp] Start the TCP server\n[-tcpAllowOthers] Allow other computers to connect - see below\n[-tcpDaemon] Use a daemon thread\n[-tcpPort ] The port (default\: 9092)\n[-tcpSSL] Use encrypted (SSL) connections\n[-tcpPassword ] The password for shutting down a TCP server\n[-tcpShutdown ""] Stop the TCP server; example\: tcp\://localhost\n[-tcpShutdownForce] Do not wait until all connections are closed\n[-pg] Start the PG server\n[-pgAllowOthers] Allow other computers to connect - see below\n[-pgDaemon] Use a daemon thread\n[-pgPort ] The port (default\: 5435)\n[-properties ""] Server properties (default\: ~, disable\: null)\n[-baseDir ] The base directory for H2 databases (all servers)\n[-ifExists] Only existing databases may be opened (all servers)\n[-ifNotExists] Databases are created when accessed\n[-trace] Print additional trace information (all servers)\n[-key ] Allows to map a database name to another (all servers)\nThe options -xAllowOthers are potentially risky.\n\n For details, see Advanced Topics / Protection against Remote Access. org.h2.tools.Shell=Interactive command line tool to access a database using JDBC. -org.h2.tools.Shell.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-url ""] The database URL (jdbc\:h2\:...)\n[-user ] The user name\n[-password ] The password\n[-driver ] The JDBC driver class to use (not required in most cases)\n[-sql ""] Execute the SQL statements and exit\n[-properties ""] Load the server properties from this directory\nIf special characters don't work as expected, you may need to use\n -Dfile.encoding\=UTF-8 (Mac OS X) or CP850 (Windows). +org.h2.tools.Shell.main=Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-url ""] The database URL (jdbc\:h2\:...)\n[-user ] The user name\n[-password ] The password\n[-driver ] The JDBC driver class to use (not required in most cases)\n[-sql ""] Execute the SQL statements and exit\n[-properties ""] Load the server properties from this directory\nIf special characters don't work as expected, you may need to use\n -Dfile.encoding\=UTF-8 (Mac OS X) or CP850 (Windows). diff --git a/h2/src/main/org/h2/result/DefaultRow.java b/h2/src/main/org/h2/result/DefaultRow.java new file mode 100644 index 0000000000..a9fe6c4063 --- /dev/null +++ b/h2/src/main/org/h2/result/DefaultRow.java @@ -0,0 +1,116 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.result; + +import org.h2.engine.Constants; +import org.h2.value.Value; +import org.h2.value.ValueBigint; + +/** + * The default implementation of a row in a table. + */ +public class DefaultRow extends Row { + + /** + * The constant that means "memory usage is unknown and needs to be calculated first". + */ + public static final int MEMORY_CALCULATE = -1; + + /** + * The values of the row (one entry per column). + */ + protected final Value[] data; + + private int memory; + + DefaultRow(int columnCount) { + this.data = new Value[columnCount]; + this.memory = MEMORY_CALCULATE; + } + + public DefaultRow(Value[] data) { + this.data = data; + this.memory = MEMORY_CALCULATE; + } + + public DefaultRow(Value[] data, int memory) { + this.data = data; + this.memory = memory; + } + + @Override + public Value getValue(int i) { + return i == ROWID_INDEX ? ValueBigint.get(key) : data[i]; + } + + @Override + public void setValue(int i, Value v) { + if (i == ROWID_INDEX) { + key = v.getLong(); + } else { + data[i] = v; + } + } + + @Override + public int getColumnCount() { + return data.length; + } + + @Override + public int getMemory() { + if (memory != MEMORY_CALCULATE) { + return memory; + } + return memory = calculateMemory(); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder("( /* key:").append(key).append(" */ "); + for (int i = 0, length = data.length; i < length; i++) { + if (i > 0) { + builder.append(", "); + } + Value v = data[i]; + builder.append(v == null ? "null" : v.getTraceSQL()); + } + return builder.append(')').toString(); + } + + /** + * Calculate the estimated memory used for this row, in bytes. + * + * @return the memory + */ + protected int calculateMemory() { + int m = Constants.MEMORY_ROW + Constants.MEMORY_ARRAY + data.length * Constants.MEMORY_POINTER; + for (Value v : data) { + if (v != null) { + m += v.getMemory(); + } + } + return m; + } + + @Override + public Value[] getValueList() { + return data; + } + + @Override + public boolean hasSharedData(Row other) { + return other instanceof DefaultRow && data == ((DefaultRow) other).data; + } + + @Override + public void copyFrom(SearchRow source) { + setKey(source.getKey()); + for (int i = 0; i < getColumnCount(); i++) { + setValue(i, source.getValue(i)); + } + } +} diff --git a/h2/src/main/org/h2/result/FetchedResult.java b/h2/src/main/org/h2/result/FetchedResult.java new file mode 100644 index 0000000000..6882ede34c --- /dev/null +++ b/h2/src/main/org/h2/result/FetchedResult.java @@ -0,0 +1,69 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.result; + +import org.h2.engine.Session; +import org.h2.value.Value; + +/** + * Abstract fetched result. + */ +public abstract class FetchedResult implements ResultInterface { + + long rowId = -1; + + Value[] currentRow; + + Value[] nextRow; + + boolean afterLast; + + FetchedResult() { + } + + @Override + public final Value[] currentRow() { + return currentRow; + } + + @Override + public final boolean next() { + if (hasNext()) { + rowId++; + currentRow = nextRow; + nextRow = null; + return true; + } + if (!afterLast) { + rowId++; + currentRow = null; + afterLast = true; + } + return false; + } + + @Override + public final boolean isAfterLast() { + return afterLast; + } + + @Override + public final long getRowId() { + return rowId; + } + + @Override + public final boolean needToClose() { + return true; + } + + @Override + public final ResultInterface createShallowCopy(Session targetSession) { + // The operation is not supported on fetched result. + return null; + } + +} diff --git a/h2/src/main/org/h2/result/LazyResult.java b/h2/src/main/org/h2/result/LazyResult.java index e4c01dcea2..66c6187343 100644 --- a/h2/src/main/org/h2/result/LazyResult.java +++ b/h2/src/main/org/h2/result/LazyResult.java @@ -1,11 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; -import org.h2.engine.SessionInterface; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.message.DbException; import org.h2.value.TypeInfo; @@ -16,21 +16,19 @@ * * @author Sergi Vladykin */ -public abstract class LazyResult implements ResultInterface { +public abstract class LazyResult extends FetchedResult { + private final SessionLocal session; private final Expression[] expressions; - private int rowId = -1; - private Value[] currentRow; - private Value[] nextRow; private boolean closed; - private boolean afterLast; - private int limit; + private long limit; - public LazyResult(Expression[] expressions) { + public LazyResult(SessionLocal session, Expression[] expressions) { + this.session = session; this.expressions = expressions; } - public void setLimit(int limit) { + public void setLimit(long limit) { this.limit = limit; } @@ -42,35 +40,14 @@ public boolean isLazy() { @Override public void reset() { if (closed) { - throw DbException.throwInternalError(); + throw DbException.getInternalError(); } - rowId = -1; + rowId = -1L; afterLast = false; currentRow = null; nextRow = null; } - @Override - public Value[] currentRow() { - return currentRow; - } - - @Override - public boolean next() { - if (hasNext()) { - rowId++; - currentRow = nextRow; - nextRow = null; - return true; - } - if (!afterLast) { - rowId++; - currentRow = null; - afterLast = true; - } - return false; - } - /** * Go to the next row and skip it. * @@ -120,25 +97,10 @@ protected boolean skipNextRow() { } @Override - public boolean isAfterLast() { - return afterLast; - } - - @Override - public int getRowId() { - return rowId; - } - - @Override - public int getRowCount() { + public long getRowCount() { throw DbException.getUnsupportedException("Row count is unknown for lazy result."); } - @Override - public boolean needToClose() { - return true; - } - @Override public boolean isClosed() { return closed; @@ -151,7 +113,7 @@ public void close() { @Override public String getAlias(int i) { - return expressions[i].getAlias(); + return expressions[i].getAlias(session, i); } @Override @@ -166,7 +128,7 @@ public String getTableName(int i) { @Override public String getColumnName(int i) { - return expressions[i].getColumnName(); + return expressions[i].getColumnName(session, i); } @Override @@ -175,8 +137,8 @@ public TypeInfo getColumnType(int i) { } @Override - public boolean isAutoIncrement(int i) { - return expressions[i].isAutoIncrement(); + public boolean isIdentity(int i) { + return expressions[i].isIdentity(); } @Override @@ -195,10 +157,4 @@ public int getFetchSize() { return 1; } - @Override - public ResultInterface createShallowCopy(SessionInterface targetSession) { - // Copying is impossible with lazy result. - return null; - } - } diff --git a/h2/src/main/org/h2/result/LocalResult.java b/h2/src/main/org/h2/result/LocalResult.java index 948063b012..fa630ed495 100644 --- a/h2/src/main/org/h2/result/LocalResult.java +++ b/h2/src/main/org/h2/result/LocalResult.java @@ -1,20 +1,135 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.TreeMap; + +import org.h2.engine.Database; +import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.engine.SysProperties; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.message.DbException; +import org.h2.mvstore.db.MVTempResult; +import org.h2.table.Column; +import org.h2.table.Table; +import org.h2.util.Utils; +import org.h2.value.TypeInfo; import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueLob; +import org.h2.value.ValueRow; /** * A local result set contains all row data of a result set. - * The object is generated by {@link LocalResultFactory}, + * This is the object generated by engine, * and it is also used directly by the ResultSet class in the embedded mode. - * The memory usage and other policies are defined by implementation. + * If the result does not fit in memory, it is written to a temporary file. */ -public interface LocalResult extends ResultInterface, ResultTarget { +public class LocalResult implements ResultInterface, ResultTarget { + + /** + * Constructs a new local result object for the specified table. + * + * @param session + * the session + * @param table + * the table + * @return the local result + */ + public static LocalResult forTable(SessionLocal session, Table table) { + Column[] columns = table.getColumns(); + int degree = columns.length; + Expression[] expressions = new Expression[degree + 1]; + Database database = session.getDatabase(); + for (int i = 0; i < degree; i++) { + expressions[i] = new ExpressionColumn(database, columns[i]); + } + Column rowIdColumn = table.getRowIdColumn(); + expressions[degree] = rowIdColumn != null ? new ExpressionColumn(database, rowIdColumn) + : new ExpressionColumn(database, null, table.getName()); + return new LocalResult(session, expressions, degree, degree + 1); + } + + private int maxMemoryRows; + private final SessionLocal session; + private int visibleColumnCount; + private int resultColumnCount; + private Expression[] expressions; + private boolean forDataChangeDeltaTable; + private long rowId, rowCount; + private ArrayList rows; + private SortOrder sort; + // HashSet cannot be used here, because we need to compare values of + // different type or scale properly. + private TreeMap distinctRows; + private Value[] currentRow; + private long offset; + private long limit = -1; + private boolean fetchPercent; + private SortOrder withTiesSortOrder; + private boolean limitsWereApplied; + private ResultExternal external; + private boolean distinct; + private int[] distinctIndexes; + private boolean closed; + private boolean containsLobs; + private Boolean containsNull; + + /** + * Construct a local result object. + */ + public LocalResult() { + this(null); + } + + private LocalResult(SessionLocal session) { + this.session = session; + } + + /** + * Construct a local result object. + * + * @param session + * the session + * @param expressions + * the expression array + * @param visibleColumnCount + * the number of visible columns + * @param resultColumnCount + * the number of columns including visible columns and additional + * virtual columns for ORDER BY and DISTINCT ON clauses + */ + public LocalResult(SessionLocal session, Expression[] expressions, int visibleColumnCount, int resultColumnCount) { + this.session = session; + if (session == null) { + this.maxMemoryRows = Integer.MAX_VALUE; + } else { + Database db = session.getDatabase(); + if (db.isPersistent() && !db.isReadOnly()) { + this.maxMemoryRows = session.getDatabase().getMaxMemoryRows(); + } else { + this.maxMemoryRows = Integer.MAX_VALUE; + } + } + rows = Utils.newSmallArrayList(); + this.visibleColumnCount = visibleColumnCount; + this.resultColumnCount = resultColumnCount; + rowId = -1; + this.expressions = expressions; + } + + @Override + public boolean isLazy() { + return false; + } + /** * Redefine count of maximum rows holds in memory for the result. * @@ -22,7 +137,58 @@ public interface LocalResult extends ResultInterface, ResultTarget { * * @see SysProperties#MAX_MEMORY_ROWS */ - public void setMaxMemoryRows(int maxValue); + public void setMaxMemoryRows(int maxValue) { + this.maxMemoryRows = maxValue; + } + + /** + * Sets value collection mode for data change delta tables. + */ + public void setForDataChangeDeltaTable() { + forDataChangeDeltaTable = true; + } + + /** + * Create a shallow copy of the result set. The data and a temporary table + * (if there is any) is not copied. + * + * @param targetSession the session of the copy + * @return the copy if possible, or null if copying is not possible + */ + @Override + public LocalResult createShallowCopy(Session targetSession) { + if (external == null && (rows == null || rows.size() < rowCount)) { + return null; + } + if (containsLobs) { + return null; + } + ResultExternal e2 = null; + if (external != null) { + e2 = external.createShallowCopy(); + if (e2 == null) { + return null; + } + } + LocalResult copy = new LocalResult((SessionLocal) targetSession); + copy.maxMemoryRows = this.maxMemoryRows; + copy.visibleColumnCount = this.visibleColumnCount; + copy.resultColumnCount = this.resultColumnCount; + copy.expressions = this.expressions; + copy.rowId = -1; + copy.rowCount = this.rowCount; + copy.rows = this.rows; + copy.sort = this.sort; + copy.distinctRows = this.distinctRows; + copy.distinct = distinct; + copy.distinctIndexes = distinctIndexes; + copy.currentRow = null; + copy.offset = 0; + copy.limit = -1; + copy.external = e2; + copy.containsNull = containsNull; + return copy; + } /** * Sets sort order to be used by this result. When rows are presorted by the @@ -30,19 +196,36 @@ public interface LocalResult extends ResultInterface, ResultTarget { * * @param sort the sort order */ - public void setSortOrder(SortOrder sort); + public void setSortOrder(SortOrder sort) { + this.sort = sort; + } /** * Remove duplicate rows. */ - public void setDistinct(); + public void setDistinct() { + assert distinctIndexes == null; + distinct = true; + distinctRows = new TreeMap<>(session.getDatabase().getCompareMode()); + } /** * Remove rows with duplicates in columns with specified indexes. * * @param distinctIndexes distinct indexes */ - public void setDistinct(int[] distinctIndexes); + public void setDistinct(int[] distinctIndexes) { + assert !distinct; + this.distinctIndexes = distinctIndexes; + distinctRows = new TreeMap<>(session.getDatabase().getCompareMode()); + } + + /** + * @return whether this result is a distinct result + */ + private boolean isAnyDistinct() { + return distinct || distinctIndexes != null; + } /** * Check if this result set contains the given row. @@ -50,7 +233,21 @@ public interface LocalResult extends ResultInterface, ResultTarget { * @param values the row * @return true if the row exists */ - boolean containsDistinct(Value[] values); + public boolean containsDistinct(Value[] values) { + assert values.length == visibleColumnCount; + if (external != null) { + return external.contains(values); + } + if (distinctRows == null) { + distinctRows = new TreeMap<>(session.getDatabase().getCompareMode()); + for (Value[] row : rows) { + ValueRow array = getDistinctRow(row); + distinctRows.put(array, array.getList()); + } + } + ValueRow array = ValueRow.get(values); + return distinctRows.get(array) != null; + } /** * Check if this result set contains a NULL value. This method may reset @@ -58,31 +255,346 @@ public interface LocalResult extends ResultInterface, ResultTarget { * * @return true if there is a NULL value */ - boolean containsNull(); + public boolean containsNull() { + Boolean r = containsNull; + if (r == null) { + r = false; + reset(); + loop: while (next()) { + Value[] row = currentRow; + for (int i = 0; i < visibleColumnCount; i++) { + if (row[i].containsNull()) { + r = true; + break loop; + } + } + } + reset(); + containsNull = r; + } + return r; + } /** * Remove the row from the result set if it exists. * * @param values the row */ - public void removeDistinct(Value[] values); + public void removeDistinct(Value[] values) { + if (!distinct) { + throw DbException.getInternalError(); + } + assert values.length == visibleColumnCount; + if (distinctRows != null) { + distinctRows.remove(ValueRow.get(values)); + rowCount = distinctRows.size(); + } else { + rowCount = external.removeRow(values); + } + } + + @Override + public void reset() { + rowId = -1; + currentRow = null; + if (external != null) { + external.reset(); + } + } + + /** + * Retrieve the current row + * @return row + */ + public Row currentRowForTable() { + int degree = visibleColumnCount; + Value[] currentRow = this.currentRow; + Row row = session.getDatabase().getRowFactory() + .createRow(Arrays.copyOf(currentRow, degree), SearchRow.MEMORY_CALCULATE); + row.setKey(currentRow[degree].getLong()); + return row; + } + + @Override + public Value[] currentRow() { + return currentRow; + } + + @Override + public boolean next() { + if (!closed && rowId < rowCount) { + rowId++; + if (rowId < rowCount) { + if (external != null) { + currentRow = external.next(); + } else { + currentRow = rows.get((int) rowId); + } + return true; + } + currentRow = null; + } + return false; + } + + @Override + public long getRowId() { + return rowId; + } + + @Override + public boolean isAfterLast() { + return rowId >= rowCount; + } + + private void cloneLobs(Value[] values) { + for (int i = 0; i < values.length; i++) { + Value v = values[i]; + if (v instanceof ValueLob) { + if (forDataChangeDeltaTable) { + containsLobs = true; + } else { + ValueLob v2 = ((ValueLob) v).copyToResult(); + if (v2 != v) { + containsLobs = true; + values[i] = session.addTemporaryLob(v2); + } + } + } + } + } + + private ValueRow getDistinctRow(Value[] values) { + if (distinctIndexes != null) { + int cnt = distinctIndexes.length; + Value[] newValues = new Value[cnt]; + for (int i = 0; i < cnt; i++) { + newValues[i] = values[distinctIndexes[i]]; + } + values = newValues; + } else if (values.length > visibleColumnCount) { + values = Arrays.copyOf(values, visibleColumnCount); + } + return ValueRow.get(values); + } + + private void createExternalResult() { + external = MVTempResult.of(session.getDatabase(), expressions, distinct, distinctIndexes, visibleColumnCount, + resultColumnCount, sort); + } + + /** + * Add a row for a table. + * + * @param row the row to add + */ + public void addRowForTable(Row row) { + int degree = visibleColumnCount; + Value[] values = new Value[degree + 1]; + for (int i = 0; i < degree; i++) { + values[i] = row.getValue(i); + } + values[degree] = ValueBigint.get(row.getKey()); + addRowInternal(values); + } + + /** + * Add a row to this object. + * + * @param values the row to add + */ + @Override + public void addRow(Value... values) { + assert values.length == resultColumnCount; + cloneLobs(values); + addRowInternal(values); + } + + private void addRowInternal(Value... values) { + if (isAnyDistinct()) { + if (distinctRows != null) { + ValueRow distinctRow = getDistinctRow(values); + Value[] previous = distinctRows.get(distinctRow); + if (previous == null || sort != null && sort.compare(previous, values) > 0) { + distinctRows.put(distinctRow, values); + } + rowCount = distinctRows.size(); + if (rowCount > maxMemoryRows) { + createExternalResult(); + rowCount = external.addRows(distinctRows.values()); + distinctRows = null; + } + } else { + rowCount = external.addRow(values); + } + } else { + rows.add(values); + rowCount++; + if (rows.size() > maxMemoryRows) { + addRowsToDisk(); + } + } + } + + private void addRowsToDisk() { + if (external == null) { + createExternalResult(); + } + rowCount = external.addRows(rows); + rows.clear(); + } + + @Override + public int getVisibleColumnCount() { + return visibleColumnCount; + } /** * This method is called after all rows have been added. */ - public void done(); + public void done() { + if (external != null) { + addRowsToDisk(); + } else { + if (isAnyDistinct()) { + rows = new ArrayList<>(distinctRows.values()); + } + if (sort != null && limit != 0 && !limitsWereApplied) { + boolean withLimit = limit > 0 && withTiesSortOrder == null; + if (offset > 0 || withLimit) { + int endExclusive = rows.size(); + if (offset < endExclusive) { + int fromInclusive = (int) offset; + if (withLimit && limit < endExclusive - fromInclusive) { + endExclusive = fromInclusive + (int) limit; + } + sort.sort(rows, fromInclusive, endExclusive); + } + } else { + sort.sort(rows); + } + } + } + applyOffsetAndLimit(); + reset(); + } + + private void applyOffsetAndLimit() { + if (limitsWereApplied) { + return; + } + long offset = Math.max(this.offset, 0); + long limit = this.limit; + if (offset == 0 && limit < 0 && !fetchPercent || rowCount == 0) { + return; + } + if (fetchPercent) { + if (limit < 0 || limit > 100) { + throw DbException.getInvalidValueException("FETCH PERCENT", limit); + } + // Oracle rounds percent up, do the same for now + limit = (limit * rowCount + 99) / 100; + } + boolean clearAll = offset >= rowCount || limit == 0; + if (!clearAll) { + long remaining = rowCount - offset; + limit = limit < 0 ? remaining : Math.min(remaining, limit); + if (offset == 0 && remaining <= limit) { + return; + } + } else { + limit = 0; + } + distinctRows = null; + rowCount = limit; + if (external == null) { + if (clearAll) { + rows.clear(); + return; + } + int to = (int) (offset + limit); + if (withTiesSortOrder != null) { + Value[] expected = rows.get(to - 1); + while (to < rows.size() && withTiesSortOrder.compare(expected, rows.get(to)) == 0) { + to++; + rowCount++; + } + } + if (offset != 0 || to != rows.size()) { + // avoid copying the whole array for each row + rows = new ArrayList<>(rows.subList((int) offset, to)); + } + } else { + if (clearAll) { + external.close(); + external = null; + return; + } + trimExternal(offset, limit); + } + } + + private void trimExternal(long offset, long limit) { + ResultExternal temp = external; + external = null; + temp.reset(); + while (--offset >= 0) { + temp.next(); + } + Value[] row = null; + while (--limit >= 0) { + row = temp.next(); + rows.add(row); + if (rows.size() > maxMemoryRows) { + addRowsToDisk(); + } + } + if (withTiesSortOrder != null && row != null) { + Value[] expected = row; + while ((row = temp.next()) != null && withTiesSortOrder.compare(expected, row) == 0) { + rows.add(row); + rowCount++; + if (rows.size() > maxMemoryRows) { + addRowsToDisk(); + } + } + } + if (external != null) { + addRowsToDisk(); + } + temp.close(); + } + + @Override + public long getRowCount() { + return rowCount; + } + + @Override + public void limitsWereApplied() { + this.limitsWereApplied = true; + } + + @Override + public boolean hasNext() { + return !closed && rowId < rowCount - 1; + } /** * Set the number of rows that this result will return at the maximum. * * @param limit the limit (-1 means no limit, 0 means no rows) */ - public void setLimit(int limit); + public void setLimit(long limit) { + this.limit = limit; + } /** * @param fetchPercent whether limit expression specifies percentage of rows */ - public void setFetchPercent(boolean fetchPercent); + public void setFetchPercent(boolean fetchPercent) { + this.fetchPercent = fetchPercent; + } /** * Enables inclusion of tied rows to result and sets the sort order for tied @@ -92,12 +604,93 @@ public interface LocalResult extends ResultInterface, ResultTarget { * * @param withTiesSortOrder the sort order for tied rows */ - public void setWithTies(SortOrder withTiesSortOrder); + public void setWithTies(SortOrder withTiesSortOrder) { + assert sort == null || sort == withTiesSortOrder; + this.withTiesSortOrder = withTiesSortOrder; + } + + @Override + public boolean needToClose() { + return external != null; + } + + @Override + public void close() { + if (external != null) { + external.close(); + external = null; + closed = true; + } + } + + @Override + public String getAlias(int i) { + return expressions[i].getAlias(session, i); + } + + @Override + public String getTableName(int i) { + return expressions[i].getTableName(); + } + + @Override + public String getSchemaName(int i) { + return expressions[i].getSchemaName(); + } + + @Override + public String getColumnName(int i) { + return expressions[i].getColumnName(session, i); + } + + @Override + public TypeInfo getColumnType(int i) { + return expressions[i].getType(); + } + + @Override + public int getNullable(int i) { + return expressions[i].getNullable(); + } + + @Override + public boolean isIdentity(int i) { + return expressions[i].isIdentity(); + } /** * Set the offset of the first row to return. * * @param offset the offset */ - public void setOffset(int offset); + public void setOffset(long offset) { + this.offset = offset; + } + + @Override + public String toString() { + return super.toString() + " columns: " + visibleColumnCount + + " rows: " + rowCount + " pos: " + rowId; + } + + /** + * Check if this result set is closed. + * + * @return true if it is + */ + @Override + public boolean isClosed() { + return closed; + } + + @Override + public int getFetchSize() { + return 0; + } + + @Override + public void setFetchSize(int fetchSize) { + // ignore + } + } diff --git a/h2/src/main/org/h2/result/LocalResultFactory.java b/h2/src/main/org/h2/result/LocalResultFactory.java deleted file mode 100644 index c50833e1c0..0000000000 --- a/h2/src/main/org/h2/result/LocalResultFactory.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.result; - -import org.h2.engine.Session; -import org.h2.expression.Expression; - -/** - * Creates local result. - */ -public abstract class LocalResultFactory { - /** - * Default implementation of local result factory. - */ - public static final LocalResultFactory DEFAULT = new DefaultLocalResultFactory(); - - /** - * Create a local result object. - * - * @param session the session - * @param expressions the expression array - * @param visibleColumnCount the number of visible columns - * @return object to collect local result. - */ - public abstract LocalResult create(Session session, Expression[] expressions, int visibleColumnCount); - - /** - * Create a local result object. - * @return object to collect local result. - */ - public abstract LocalResult create(); - - /** - * Default implementation of local result factory. - */ - private static final class DefaultLocalResultFactory extends LocalResultFactory { - /** - * - */ - DefaultLocalResultFactory() { - //No-op. - } - - @Override - public LocalResult create(Session session, Expression[] expressions, int visibleColumnCount) { - return new LocalResultImpl(session, expressions, visibleColumnCount); - } - - @Override - public LocalResult create() { - return new LocalResultImpl(); - } - } -} diff --git a/h2/src/main/org/h2/result/LocalResultImpl.java b/h2/src/main/org/h2/result/LocalResultImpl.java deleted file mode 100644 index 7da54e1664..0000000000 --- a/h2/src/main/org/h2/result/LocalResultImpl.java +++ /dev/null @@ -1,588 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.result; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.TreeMap; -import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.engine.SessionInterface; -import org.h2.expression.Expression; -import org.h2.message.DbException; -import org.h2.mvstore.db.MVTempResult; -import org.h2.util.Utils; -import org.h2.value.TypeInfo; -import org.h2.value.Value; -import org.h2.value.ValueRow; - -/** - * A local result set contains all row data of a result set. - * This is the object generated by engine, - * and it is also used directly by the ResultSet class in the embedded mode. - * If the result does not fit in memory, it is written to a temporary file. - */ -public class LocalResultImpl implements LocalResult { - - private int maxMemoryRows; - private Session session; - private int visibleColumnCount; - private Expression[] expressions; - private int rowId, rowCount; - private ArrayList rows; - private SortOrder sort; - // HashSet cannot be used here, because we need to compare values of - // different type or scale properly. - private TreeMap distinctRows; - private Value[] currentRow; - private int offset; - private int limit = -1; - private boolean fetchPercent; - private SortOrder withTiesSortOrder; - private boolean limitsWereApplied; - private ResultExternal external; - private boolean distinct; - private int[] distinctIndexes; - private boolean closed; - private boolean containsLobs; - private Boolean containsNull; - - /** - * Construct a local result object. - */ - public LocalResultImpl() { - // nothing to do - } - - /** - * Construct a local result object. - * - * @param session the session - * @param expressions the expression array - * @param visibleColumnCount the number of visible columns - */ - public LocalResultImpl(Session session, Expression[] expressions, - int visibleColumnCount) { - this.session = session; - if (session == null) { - this.maxMemoryRows = Integer.MAX_VALUE; - } else { - Database db = session.getDatabase(); - if (db.isPersistent() && !db.isReadOnly()) { - this.maxMemoryRows = session.getDatabase().getMaxMemoryRows(); - } else { - this.maxMemoryRows = Integer.MAX_VALUE; - } - } - rows = Utils.newSmallArrayList(); - this.visibleColumnCount = visibleColumnCount; - rowId = -1; - this.expressions = expressions; - } - - @Override - public boolean isLazy() { - return false; - } - - @Override - public void setMaxMemoryRows(int maxValue) { - this.maxMemoryRows = maxValue; - } - - /** - * Create a shallow copy of the result set. The data and a temporary table - * (if there is any) is not copied. - * - * @param targetSession the session of the copy - * @return the copy if possible, or null if copying is not possible - */ - @Override - public LocalResultImpl createShallowCopy(SessionInterface targetSession) { - if (external == null && (rows == null || rows.size() < rowCount)) { - return null; - } - if (containsLobs) { - return null; - } - ResultExternal e2 = null; - if (external != null) { - e2 = external.createShallowCopy(); - if (e2 == null) { - return null; - } - } - LocalResultImpl copy = new LocalResultImpl(); - copy.maxMemoryRows = this.maxMemoryRows; - copy.session = (Session) targetSession; - copy.visibleColumnCount = this.visibleColumnCount; - copy.expressions = this.expressions; - copy.rowId = -1; - copy.rowCount = this.rowCount; - copy.rows = this.rows; - copy.sort = this.sort; - copy.distinctRows = this.distinctRows; - copy.distinct = distinct; - copy.distinctIndexes = distinctIndexes; - copy.currentRow = null; - copy.offset = 0; - copy.limit = -1; - copy.external = e2; - copy.containsNull = containsNull; - return copy; - } - - @Override - public void setSortOrder(SortOrder sort) { - this.sort = sort; - } - - /** - * Remove duplicate rows. - */ - @Override - public void setDistinct() { - assert distinctIndexes == null; - distinct = true; - distinctRows = new TreeMap<>(session.getDatabase().getCompareMode()); - } - - /** - * Remove rows with duplicates in columns with specified indexes. - * - * @param distinctIndexes distinct indexes - */ - @Override - public void setDistinct(int[] distinctIndexes) { - assert !distinct; - this.distinctIndexes = distinctIndexes; - distinctRows = new TreeMap<>(session.getDatabase().getCompareMode()); - } - - /** - * @return whether this result is a distinct result - */ - private boolean isAnyDistinct() { - return distinct || distinctIndexes != null; - } - - /** - * Remove the row from the result set if it exists. - * - * @param values the row - */ - @Override - public void removeDistinct(Value[] values) { - if (!distinct) { - DbException.throwInternalError(); - } - assert values.length == visibleColumnCount; - if (distinctRows != null) { - ValueRow array = ValueRow.get(values); - distinctRows.remove(array); - rowCount = distinctRows.size(); - } else { - rowCount = external.removeRow(values); - } - } - - /** - * Check if this result set contains the given row. - * - * @param values the row - * @return true if the row exists - */ - @Override - public boolean containsDistinct(Value[] values) { - assert values.length == visibleColumnCount; - if (external != null) { - return external.contains(values); - } - if (distinctRows == null) { - distinctRows = new TreeMap<>(session.getDatabase().getCompareMode()); - for (Value[] row : rows) { - ValueRow array = getDistinctRow(row); - distinctRows.put(array, array.getList()); - } - } - ValueRow array = ValueRow.get(values); - return distinctRows.get(array) != null; - } - - @Override - public boolean containsNull() { - Boolean r = containsNull; - if (r == null) { - r = false; - reset(); - loop: while (next()) { - Value[] row = currentRow; - for (int i = 0; i < visibleColumnCount; i++) { - if (row[i].containsNull()) { - r = true; - break loop; - } - } - } - reset(); - containsNull = r; - } - return r; - } - - @Override - public void reset() { - rowId = -1; - currentRow = null; - if (external != null) { - external.reset(); - } - } - - @Override - public Value[] currentRow() { - return currentRow; - } - - @Override - public boolean next() { - if (!closed && rowId < rowCount) { - rowId++; - if (rowId < rowCount) { - if (external != null) { - currentRow = external.next(); - } else { - currentRow = rows.get(rowId); - } - return true; - } - currentRow = null; - } - return false; - } - - @Override - public int getRowId() { - return rowId; - } - - @Override - public boolean isAfterLast() { - return rowId >= rowCount; - } - - private void cloneLobs(Value[] values) { - for (int i = 0; i < values.length; i++) { - Value v = values[i]; - Value v2 = v.copyToResult(); - if (v2 != v) { - containsLobs = true; - session.addTemporaryLob(v2); - values[i] = v2; - } - } - } - - private ValueRow getDistinctRow(Value[] values) { - if (distinctIndexes != null) { - int cnt = distinctIndexes.length; - Value[] newValues = new Value[cnt]; - for (int i = 0; i < cnt; i++) { - newValues[i] = values[distinctIndexes[i]]; - } - values = newValues; - } else if (values.length > visibleColumnCount) { - values = Arrays.copyOf(values, visibleColumnCount); - } - return ValueRow.get(values); - } - - private void createExternalResult() { - external = MVTempResult.of(session.getDatabase(), expressions, distinct, distinctIndexes, visibleColumnCount, - sort); - } - - /** - * Add a row to this object. - * - * @param values the row to add - */ - @Override - public void addRow(Value[] values) { - cloneLobs(values); - if (isAnyDistinct()) { - if (distinctRows != null) { - ValueRow array = getDistinctRow(values); - Value[] previous = distinctRows.get(array); - if (previous == null || sort != null && sort.compare(previous, values) > 0) { - distinctRows.put(array, values); - } - rowCount = distinctRows.size(); - if (rowCount > maxMemoryRows) { - createExternalResult(); - rowCount = external.addRows(distinctRows.values()); - distinctRows = null; - } - } else { - rowCount = external.addRow(values); - } - } else { - rows.add(values); - rowCount++; - if (rows.size() > maxMemoryRows) { - addRowsToDisk(); - } - } - } - - private void addRowsToDisk() { - if (external == null) { - createExternalResult(); - } - rowCount = external.addRows(rows); - rows.clear(); - } - - @Override - public int getVisibleColumnCount() { - return visibleColumnCount; - } - - /** - * This method is called after all rows have been added. - */ - @Override - public void done() { - if (external != null) { - addRowsToDisk(); - } else { - if (isAnyDistinct()) { - rows = new ArrayList<>(distinctRows.values()); - } - if (sort != null && limit != 0 && !limitsWereApplied) { - boolean withLimit = limit > 0 && withTiesSortOrder == null; - if (offset > 0 || withLimit) { - sort.sort(rows, offset, withLimit ? limit : rows.size()); - } else { - sort.sort(rows); - } - } - } - applyOffsetAndLimit(); - reset(); - } - - private void applyOffsetAndLimit() { - if (limitsWereApplied) { - return; - } - int offset = Math.max(this.offset, 0); - int limit = this.limit; - if (offset == 0 && limit < 0 && !fetchPercent || rowCount == 0) { - return; - } - if (fetchPercent) { - if (limit < 0 || limit > 100) { - throw DbException.getInvalidValueException("FETCH PERCENT", limit); - } - // Oracle rounds percent up, do the same for now - limit = (int) (((long) limit * rowCount + 99) / 100); - } - boolean clearAll = offset >= rowCount || limit == 0; - if (!clearAll) { - int remaining = rowCount - offset; - limit = limit < 0 ? remaining : Math.min(remaining, limit); - if (offset == 0 && remaining <= limit) { - return; - } - } else { - limit = 0; - } - distinctRows = null; - rowCount = limit; - if (external == null) { - if (clearAll) { - rows.clear(); - return; - } - int to = offset + limit; - if (withTiesSortOrder != null) { - Value[] expected = rows.get(to - 1); - while (to < rows.size() && withTiesSortOrder.compare(expected, rows.get(to)) == 0) { - to++; - rowCount++; - } - } - if (offset != 0 || to != rows.size()) { - // avoid copying the whole array for each row - rows = new ArrayList<>(rows.subList(offset, to)); - } - } else { - if (clearAll) { - external.close(); - external = null; - return; - } - trimExternal(offset, limit); - } - } - - private void trimExternal(int offset, int limit) { - ResultExternal temp = external; - external = null; - temp.reset(); - while (--offset >= 0) { - temp.next(); - } - Value[] row = null; - while (--limit >= 0) { - row = temp.next(); - rows.add(row); - if (rows.size() > maxMemoryRows) { - addRowsToDisk(); - } - } - if (withTiesSortOrder != null && row != null) { - Value[] expected = row; - while ((row = temp.next()) != null && withTiesSortOrder.compare(expected, row) == 0) { - rows.add(row); - rowCount++; - if (rows.size() > maxMemoryRows) { - addRowsToDisk(); - } - } - } - if (external != null) { - addRowsToDisk(); - } - temp.close(); - } - - @Override - public int getRowCount() { - return rowCount; - } - - @Override - public void limitsWereApplied() { - this.limitsWereApplied = true; - } - - @Override - public boolean hasNext() { - return !closed && rowId < rowCount - 1; - } - - /** - * Set the number of rows that this result will return at the maximum. - * - * @param limit the limit (-1 means no limit, 0 means no rows) - */ - @Override - public void setLimit(int limit) { - this.limit = limit; - } - - /** - * @param fetchPercent whether limit expression specifies percentage of rows - */ - @Override - public void setFetchPercent(boolean fetchPercent) { - this.fetchPercent = fetchPercent; - } - - @Override - public void setWithTies(SortOrder withTiesSortOrder) { - assert sort == null || sort == withTiesSortOrder; - this.withTiesSortOrder = withTiesSortOrder; - } - - @Override - public boolean needToClose() { - return external != null; - } - - @Override - public void close() { - if (external != null) { - external.close(); - external = null; - closed = true; - } - } - - @Override - public String getAlias(int i) { - return expressions[i].getAlias(); - } - - @Override - public String getTableName(int i) { - return expressions[i].getTableName(); - } - - @Override - public String getSchemaName(int i) { - return expressions[i].getSchemaName(); - } - - @Override - public String getColumnName(int i) { - return expressions[i].getColumnName(); - } - - @Override - public TypeInfo getColumnType(int i) { - return expressions[i].getType(); - } - - @Override - public int getNullable(int i) { - return expressions[i].getNullable(); - } - - @Override - public boolean isAutoIncrement(int i) { - return expressions[i].isAutoIncrement(); - } - - /** - * Set the offset of the first row to return. - * - * @param offset the offset - */ - @Override - public void setOffset(int offset) { - this.offset = offset; - } - - @Override - public String toString() { - return super.toString() + " columns: " + visibleColumnCount + - " rows: " + rowCount + " pos: " + rowId; - } - - /** - * Check if this result set is closed. - * - * @return true if it is - */ - @Override - public boolean isClosed() { - return closed; - } - - @Override - public int getFetchSize() { - return 0; - } - - @Override - public void setFetchSize(int fetchSize) { - // ignore - } - -} diff --git a/h2/src/main/org/h2/result/MergedResult.java b/h2/src/main/org/h2/result/MergedResult.java index 8761b154b9..57545821e5 100644 --- a/h2/src/main/org/h2/result/MergedResult.java +++ b/h2/src/main/org/h2/result/MergedResult.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; diff --git a/h2/src/main/org/h2/result/ResultColumn.java b/h2/src/main/org/h2/result/ResultColumn.java index 907be99077..f8cc1a51f5 100644 --- a/h2/src/main/org/h2/result/ResultColumn.java +++ b/h2/src/main/org/h2/result/ResultColumn.java @@ -1,12 +1,13 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; import java.io.IOException; +import org.h2.engine.Constants; import org.h2.value.Transfer; import org.h2.value.TypeInfo; @@ -41,9 +42,9 @@ public class ResultColumn { final TypeInfo columnType; /** - * True if this is an autoincrement column. + * True if this is an identity column. */ - final boolean autoIncrement; + final boolean identity; /** * True if this column is nullable. @@ -60,12 +61,11 @@ public class ResultColumn { schemaName = in.readString(); tableName = in.readString(); columnName = in.readString(); - int valueType = in.readInt(); - long precision = in.readLong(); - int scale = in.readInt(); - int displaySize = in.readInt(); - columnType = new TypeInfo(valueType, precision, scale, displaySize, null); - autoIncrement = in.readBoolean(); + columnType = in.readTypeInfo(); + if (in.getVersion() < Constants.TCP_PROTOCOL_VERSION_20) { + in.readInt(); + } + identity = in.readBoolean(); nullable = in.readInt(); } @@ -75,6 +75,7 @@ public class ResultColumn { * @param out the object to where to write the data * @param result the result * @param i the column index + * @throws IOException on failure */ public static void writeColumn(Transfer out, ResultInterface result, int i) throws IOException { @@ -83,11 +84,11 @@ public static void writeColumn(Transfer out, ResultInterface result, int i) out.writeString(result.getTableName(i)); out.writeString(result.getColumnName(i)); TypeInfo type = result.getColumnType(i); - out.writeInt(type.getValueType()); - out.writeLong(type.getPrecision()); - out.writeInt(type.getScale()); - out.writeInt(type.getDisplaySize()); - out.writeBoolean(result.isAutoIncrement(i)); + out.writeTypeInfo(type); + if (out.getVersion() < Constants.TCP_PROTOCOL_VERSION_20) { + out.writeInt(type.getDisplaySize()); + } + out.writeBoolean(result.isIdentity(i)); out.writeInt(result.getNullable(i)); } diff --git a/h2/src/main/org/h2/result/ResultExternal.java b/h2/src/main/org/h2/result/ResultExternal.java index be57ebf8b3..c61b5a176b 100644 --- a/h2/src/main/org/h2/result/ResultExternal.java +++ b/h2/src/main/org/h2/result/ResultExternal.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; diff --git a/h2/src/main/org/h2/result/ResultInterface.java b/h2/src/main/org/h2/result/ResultInterface.java index a71355fc3e..c9ac258198 100644 --- a/h2/src/main/org/h2/result/ResultInterface.java +++ b/h2/src/main/org/h2/result/ResultInterface.java @@ -1,11 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; -import org.h2.engine.SessionInterface; +import org.h2.engine.Session; import org.h2.value.TypeInfo; import org.h2.value.Value; @@ -41,7 +41,7 @@ public interface ResultInterface extends AutoCloseable { * * @return the row id */ - int getRowId(); + long getRowId(); /** * Check if the current position is after last row. @@ -63,7 +63,7 @@ public interface ResultInterface extends AutoCloseable { * * @return the number of rows */ - int getRowCount(); + long getRowCount(); /** * Check if this result has more rows to fetch. @@ -127,12 +127,12 @@ public interface ResultInterface extends AutoCloseable { TypeInfo getColumnType(int i); /** - * Check if this is an auto-increment column. + * Check if this is an identity column. * * @param i the column number (starting with 0) - * @return true for auto-increment columns + * @return true for identity columns */ - boolean isAutoIncrement(int i); + boolean isIdentity(int i); /** * Check if this column is nullable. @@ -177,6 +177,6 @@ public interface ResultInterface extends AutoCloseable { * @param targetSession the session of the copy * @return the copy if possible, or null if copying is not possible */ - ResultInterface createShallowCopy(SessionInterface targetSession); + ResultInterface createShallowCopy(Session targetSession); } diff --git a/h2/src/main/org/h2/result/ResultRemote.java b/h2/src/main/org/h2/result/ResultRemote.java index 9ec2a7061c..e3e5a532e6 100644 --- a/h2/src/main/org/h2/result/ResultRemote.java +++ b/h2/src/main/org/h2/result/ResultRemote.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; @@ -8,7 +8,7 @@ import java.io.IOException; import java.util.ArrayList; -import org.h2.engine.SessionInterface; +import org.h2.api.ErrorCode; import org.h2.engine.SessionRemote; import org.h2.engine.SysProperties; import org.h2.message.DbException; @@ -22,16 +22,15 @@ * In many cases, the complete data is kept on the client side, * but for large results only a subset is in-memory. */ -public class ResultRemote implements ResultInterface { +public final class ResultRemote extends FetchedResult { private int fetchSize; private SessionRemote session; private Transfer transfer; private int id; private final ResultColumn[] columns; - private Value[] currentRow; - private final int rowCount; - private int rowId, rowOffset; + private long rowCount; + private long rowOffset; private ArrayList result; private final Trace trace; @@ -42,19 +41,32 @@ public ResultRemote(SessionRemote session, Transfer transfer, int id, this.transfer = transfer; this.id = id; this.columns = new ResultColumn[columnCount]; - rowCount = transfer.readInt(); + rowCount = transfer.readRowCount(); for (int i = 0; i < columnCount; i++) { columns[i] = new ResultColumn(transfer); } rowId = -1; - result = new ArrayList<>(Math.min(fetchSize, rowCount)); this.fetchSize = fetchSize; - fetchRows(false); + if (rowCount >= 0) { + fetchSize = (int) Math.min(rowCount, fetchSize); + result = new ArrayList<>(fetchSize); + } else { + result = new ArrayList<>(); + } + synchronized (session) { + try { + if (fetchRows(fetchSize)) { + rowCount = result.size(); + } + } catch (IOException e) { + throw DbException.convertIOException(e, null); + } + } } @Override public boolean isLazy() { - return false; + return rowCount < 0L; } @Override @@ -83,8 +95,8 @@ public TypeInfo getColumnType(int i) { } @Override - public boolean isAutoIncrement(int i) { - return columns[i].autoIncrement; + public boolean isIdentity(int i) { + return columns[i].identity; } @Override @@ -94,8 +106,13 @@ public int getNullable(int i) { @Override public void reset() { + if (rowCount < 0L || rowOffset > 0L) { + throw DbException.get(ErrorCode.RESULT_SET_NOT_SCROLLABLE); + } rowId = -1; currentRow = null; + nextRow = null; + afterLast = false; if (session == null) { return; } @@ -110,51 +127,38 @@ public void reset() { } } - @Override - public Value[] currentRow() { - return currentRow; - } - - @Override - public boolean next() { - if (rowId < rowCount) { - rowId++; - remapIfOld(); - if (rowId < rowCount) { - if (rowId - rowOffset >= result.size()) { - fetchRows(true); - } - currentRow = result.get(rowId - rowOffset); - return true; - } - currentRow = null; - } - return false; - } - - @Override - public int getRowId() { - return rowId; - } - - @Override - public boolean isAfterLast() { - return rowId >= rowCount; - } - @Override public int getVisibleColumnCount() { return columns.length; } @Override - public int getRowCount() { + public long getRowCount() { + if (rowCount < 0L) { + throw DbException.getUnsupportedException("Row count is unknown for lazy result."); + } return rowCount; } @Override public boolean hasNext() { - return rowId < rowCount - 1; + if (afterLast) { + return false; + } + if (nextRow == null) { + if (rowCount < 0L || rowId < rowCount - 1) { + long nextRowId = rowId + 1; + if (session != null) { + remapIfOld(); + if (nextRowId - rowOffset >= result.size()) { + fetchAdditionalRows(); + } + } + int index = (int) (nextRowId - rowOffset); + nextRow = index < result.size() ? result.get(index) : null; + } + } + return nextRow != null; } private void sendClose() { @@ -182,9 +186,6 @@ public void close() { } private void remapIfOld() { - if (session == null) { - return; - } try { if (id <= session.getCurrentId() - SysProperties.SERVER_CACHED_OBJECTS / 2) { // object is too old - we need to map it to a new id @@ -201,44 +202,58 @@ private void remapIfOld() { } } - private void fetchRows(boolean sendFetch) { + private void fetchAdditionalRows() { synchronized (session) { session.checkClosed(); try { rowOffset += result.size(); result.clear(); - int fetch = Math.min(fetchSize, rowCount - rowOffset); - if (sendFetch) { - session.traceOperation("RESULT_FETCH_ROWS", id); - transfer.writeInt(SessionRemote.RESULT_FETCH_ROWS). - writeInt(id).writeInt(fetch); - session.done(transfer); - } - for (int r = 0; r < fetch; r++) { - boolean row = transfer.readBoolean(); - if (!row) { - break; - } - int len = columns.length; - Value[] values = new Value[len]; - for (int i = 0; i < len; i++) { - Value v = transfer.readValue(); - values[i] = v; - } - result.add(values); - } - if (rowOffset + result.size() >= rowCount) { - sendClose(); + int fetch = fetchSize; + if (rowCount >= 0) { + fetch = (int) Math.min(fetch, rowCount - rowOffset); + } else if (fetch == Integer.MAX_VALUE) { + fetch = SysProperties.SERVER_RESULT_SET_FETCH_SIZE; } + session.traceOperation("RESULT_FETCH_ROWS", id); + transfer.writeInt(SessionRemote.RESULT_FETCH_ROWS).writeInt(id).writeInt(fetch); + session.done(transfer); + fetchRows(fetch); } catch (IOException e) { throw DbException.convertIOException(e, null); } } } + private boolean fetchRows(int fetch) throws IOException { + int len = columns.length; + for (int r = 0; r < fetch; r++) { + switch (transfer.readByte()) { + case 1: { + Value[] values = new Value[len]; + for (int i = 0; i < len; i++) { + values[i] = transfer.readValue(columns[i].columnType); + } + result.add(values); + break; + } + case 0: + sendClose(); + return true; + case -1: + throw SessionRemote.readException(transfer); + default: + throw DbException.getInternalError(); + } + } + if (rowCount >= 0L && rowOffset + result.size() >= rowCount) { + sendClose(); + } + return false; + } + @Override public String toString() { - return "columns: " + columns.length + " rows: " + rowCount + " pos: " + rowId; + return "columns: " + columns.length + (rowCount < 0L ? " lazy" : " rows: " + rowCount) + " pos: " + rowId; } @Override @@ -251,17 +266,6 @@ public void setFetchSize(int fetchSize) { this.fetchSize = fetchSize; } - @Override - public boolean needToClose() { - return true; - } - - @Override - public ResultInterface createShallowCopy(SessionInterface targetSession) { - // The operation is not supported on remote result. - return null; - } - @Override public boolean isClosed() { return result == null; diff --git a/h2/src/main/org/h2/result/ResultTarget.java b/h2/src/main/org/h2/result/ResultTarget.java index 523437a0df..cca53de6cd 100644 --- a/h2/src/main/org/h2/result/ResultTarget.java +++ b/h2/src/main/org/h2/result/ResultTarget.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; @@ -17,14 +17,14 @@ public interface ResultTarget { * * @param values the values */ - void addRow(Value[] values); + void addRow(Value... values); /** * Get the number of rows. * * @return the number of rows */ - int getRowCount(); + long getRowCount(); /** * A hint that sorting, offset and limit may be ignored by this result diff --git a/h2/src/main/org/h2/result/ResultWithGeneratedKeys.java b/h2/src/main/org/h2/result/ResultWithGeneratedKeys.java index 01c9fdd881..62a8427285 100644 --- a/h2/src/main/org/h2/result/ResultWithGeneratedKeys.java +++ b/h2/src/main/org/h2/result/ResultWithGeneratedKeys.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; @@ -23,7 +23,7 @@ public static final class WithKeys extends ResultWithGeneratedKeys { * @param generatedKeys * generated keys */ - public WithKeys(int updateCount, ResultInterface generatedKeys) { + public WithKeys(long updateCount, ResultInterface generatedKeys) { super(updateCount); this.generatedKeys = generatedKeys; } @@ -41,13 +41,13 @@ public ResultInterface getGeneratedKeys() { * update count * @return the result. */ - public static ResultWithGeneratedKeys of(int updateCount) { + public static ResultWithGeneratedKeys of(long updateCount) { return new ResultWithGeneratedKeys(updateCount); } - private final int updateCount; + private final long updateCount; - ResultWithGeneratedKeys(int updateCount) { + ResultWithGeneratedKeys(long updateCount) { this.updateCount = updateCount; } @@ -65,7 +65,7 @@ public ResultInterface getGeneratedKeys() { * * @return update count */ - public int getUpdateCount() { + public long getUpdateCount() { return updateCount; } diff --git a/h2/src/main/org/h2/result/ResultWithPaddedStrings.java b/h2/src/main/org/h2/result/ResultWithPaddedStrings.java index b68946b37b..d195f91504 100644 --- a/h2/src/main/org/h2/result/ResultWithPaddedStrings.java +++ b/h2/src/main/org/h2/result/ResultWithPaddedStrings.java @@ -1,16 +1,16 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; import java.util.Arrays; -import org.h2.engine.SessionInterface; +import org.h2.engine.Session; import org.h2.util.MathUtils; import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueString; +import org.h2.value.ValueVarchar; /** * Result with padded fixed length strings. @@ -30,7 +30,7 @@ public class ResultWithPaddedStrings implements ResultInterface { public static ResultInterface get(ResultInterface source) { int count = source.getVisibleColumnCount(); for (int i = 0; i < count; i++) { - if (source.getColumnType(i).getValueType() == Value.STRING_FIXED) { + if (source.getColumnType(i).getValueType() == Value.CHAR) { return new ResultWithPaddedStrings(source); } } @@ -58,7 +58,7 @@ public Value[] currentRow() { Value[] row = Arrays.copyOf(source.currentRow(), count); for (int i = 0; i < count; i++) { TypeInfo type = source.getColumnType(i); - if (type.getValueType() == Value.STRING_FIXED) { + if (type.getValueType() == Value.CHAR) { long precision = type.getPrecision(); if (precision == Integer.MAX_VALUE) { // CHAR is CHAR(1) @@ -71,7 +71,7 @@ public Value[] currentRow() { * no difference between ValueStringFixed and ValueString * for JDBC layer anyway. */ - row[i] = ValueString.get(rightPadWithSpaces(s, MathUtils.convertLongToInt(precision))); + row[i] = ValueVarchar.get(rightPadWithSpaces(s, MathUtils.convertLongToInt(precision))); } } } @@ -95,7 +95,7 @@ public boolean next() { } @Override - public int getRowId() { + public long getRowId() { return source.getRowId(); } @@ -110,7 +110,7 @@ public int getVisibleColumnCount() { } @Override - public int getRowCount() { + public long getRowCount() { return source.getRowCount(); } @@ -155,8 +155,8 @@ public TypeInfo getColumnType(int i) { } @Override - public boolean isAutoIncrement(int i) { - return source.isAutoIncrement(i); + public boolean isIdentity(int i) { + return source.isIdentity(i); } @Override @@ -185,7 +185,7 @@ public boolean isClosed() { } @Override - public ResultInterface createShallowCopy(SessionInterface targetSession) { + public ResultInterface createShallowCopy(Session targetSession) { ResultInterface copy = source.createShallowCopy(targetSession); return copy != null ? new ResultWithPaddedStrings(copy) : null; } diff --git a/h2/src/main/org/h2/result/Row.java b/h2/src/main/org/h2/result/Row.java index 14f406286f..29dbc80417 100644 --- a/h2/src/main/org/h2/result/Row.java +++ b/h2/src/main/org/h2/result/Row.java @@ -1,56 +1,62 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; -import org.h2.store.Data; +import java.util.Arrays; + import org.h2.value.Value; /** * Represents a row in a table. */ -public interface Row extends SearchRow { - - int MEMORY_CALCULATE = -1; - Row[] EMPTY_ARRAY = {}; - - /** - * Get the number of bytes required for the data. - * - * @param dummy the template buffer - * @return the number of bytes - */ - int getByteCount(Data dummy); +public abstract class Row extends SearchRow { /** - * Check if this is an empty row. + * Creates a new row. * - * @return {@code true} if the row is empty + * @param data values of columns, or null + * @param memory used memory + * @return the allocated row */ - boolean isEmpty(); + public static Row get(Value[] data, int memory) { + return new DefaultRow(data, memory); + } /** - * Mark the row as deleted. + * Creates a new row with the specified key. * - * @param deleted deleted flag + * @param data values of columns, or null + * @param memory used memory + * @param key the key + * @return the allocated row */ - void setDeleted(boolean deleted); + public static Row get(Value[] data, int memory, long key) { + Row r = new DefaultRow(data, memory); + r.setKey(key); + return r; + } /** - * Check if the row is deleted. + * Get values. * - * @return {@code true} if the row is deleted + * @return values */ - boolean isDeleted(); + public abstract Value[] getValueList(); /** - * Get values. + * Check whether values of this row are equal to values of other row. * - * @return values + * @param other + * the other row + * @return {@code true} if values are equal, + * {@code false} otherwise */ - Value[] getValueList(); + public boolean hasSameValues(Row other) { + return Arrays.equals(getValueList(), other.getValueList()); + } /** * Check whether this row and the specified row share the same underlying @@ -64,6 +70,8 @@ public interface Row extends SearchRow { * @return {@code true} if rows share the same underlying data, * {@code false} otherwise or when unknown */ - boolean hasSharedData(Row other); + public boolean hasSharedData(Row other) { + return false; + } } diff --git a/h2/src/main/org/h2/result/RowFactory.java b/h2/src/main/org/h2/result/RowFactory.java index b65e4738cf..0a257fd7c1 100644 --- a/h2/src/main/org/h2/result/RowFactory.java +++ b/h2/src/main/org/h2/result/RowFactory.java @@ -1,39 +1,207 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; +import org.h2.engine.CastDataProvider; +import org.h2.mvstore.db.RowDataType; +import org.h2.store.DataHandler; +import org.h2.table.IndexColumn; +import org.h2.value.CompareMode; +import org.h2.value.TypeInfo; +import org.h2.value.Typed; import org.h2.value.Value; /** * Creates rows. * * @author Sergi Vladykin + * @author Andrei Tokar */ public abstract class RowFactory { + + private static final class Holder { + static final RowFactory EFFECTIVE = DefaultRowFactory.INSTANCE; + } + + public static DefaultRowFactory getDefaultRowFactory() { + return DefaultRowFactory.INSTANCE; + } + + public static RowFactory getRowFactory() { + return Holder.EFFECTIVE; + } + /** - * Default implementation of row factory. + * Create a new row factory. + * + * @param provider the cast provider + * @param compareMode the compare mode + * @param handler the data handler + * @param columns the list of columns + * @param indexColumns the list of index columns + * @param storeKeys whether row keys are stored + * @return the (possibly new) row factory */ - public static final RowFactory DEFAULT = new DefaultRowFactory(); + public RowFactory createRowFactory(CastDataProvider provider, CompareMode compareMode, DataHandler handler, + Typed[] columns, IndexColumn[] indexColumns, boolean storeKeys) { + return this; + } /** - * Create new row. + * Create a new row. * * @param data the values - * @param memory whether the row is in memory + * @param memory the estimated memory usage in bytes * @return the created row */ public abstract Row createRow(Value[] data, int memory); + /** + * Create new row. + * + * @return the created row + */ + public abstract SearchRow createRow(); + + public abstract RowDataType getRowDataType(); + + public abstract int[] getIndexes(); + + public abstract TypeInfo[] getColumnTypes(); + + public abstract int getColumnCount(); + + public abstract boolean getStoreKeys(); + + /** * Default implementation of row factory. */ - static final class DefaultRowFactory extends RowFactory { + public static final class DefaultRowFactory extends RowFactory { + private final RowDataType dataType; + private final int columnCount; + private final int[] indexes; + private TypeInfo[] columnTypes; + private final int[] map; + + public static final DefaultRowFactory INSTANCE = new DefaultRowFactory(); + + DefaultRowFactory() { + this(new RowDataType(null, CompareMode.getInstance(null, 0), null, null, null, 0, true), 0, null, null); + } + + private DefaultRowFactory(RowDataType dataType, int columnCount, int[] indexes, TypeInfo[] columnTypes) { + this.dataType = dataType; + this.columnCount = columnCount; + this.indexes = indexes; + if (indexes == null) { + map = null; + } else { + map = new int[columnCount]; + for (int i = 0, l = indexes.length; i < l;) { + map[indexes[i]] = ++i; + } + } + this.columnTypes = columnTypes; + } + + @Override + public RowFactory createRowFactory(CastDataProvider provider, CompareMode compareMode, DataHandler handler, + Typed[] columns, IndexColumn[] indexColumns, boolean storeKeys) { + int[] indexes = null; + int[] sortTypes = null; + TypeInfo[] columnTypes = null; + int columnCount = 0; + if (columns != null) { + columnCount = columns.length; + if (indexColumns == null) { + sortTypes = new int[columnCount]; + for (int i = 0; i < columnCount; i++) { + sortTypes[i] = SortOrder.ASCENDING; + } + } else { + int len = indexColumns.length; + indexes = new int[len]; + sortTypes = new int[len]; + for (int i = 0; i < len; i++) { + IndexColumn indexColumn = indexColumns[i]; + indexes[i] = indexColumn.column.getColumnId(); + sortTypes[i] = indexColumn.sortType; + } + } + columnTypes = new TypeInfo[columnCount]; + for (int i = 0; i < columnCount; i++) { + columnTypes[i] = columns[i].getType(); + } + } + return createRowFactory(provider, compareMode, handler, sortTypes, indexes, columnTypes, columnCount, + storeKeys); + } + + /** + * Create a new row factory. + * + * @param provider the cast provider + * @param compareMode the compare mode + * @param handler the data handler + * @param sortTypes the sort types + * @param indexes the list of indexed columns + * @param columnTypes the list of column data type information + * @param columnCount the number of columns + * @param storeKeys whether row keys are stored + * @return the (possibly new) row factory + */ + public RowFactory createRowFactory(CastDataProvider provider, CompareMode compareMode, DataHandler handler, + int[] sortTypes, int[] indexes, TypeInfo[] columnTypes, int columnCount, boolean storeKeys) { + RowDataType rowDataType = new RowDataType(provider, compareMode, handler, sortTypes, indexes, columnCount, + storeKeys); + RowFactory rowFactory = new DefaultRowFactory(rowDataType, columnCount, indexes, columnTypes); + rowDataType.setRowFactory(rowFactory); + return rowFactory; + } + @Override public Row createRow(Value[] data, int memory) { - return new RowImpl(data, memory); + return new DefaultRow(data, memory); + } + + @Override + public SearchRow createRow() { + if (indexes == null) { + return new DefaultRow(columnCount); + } else if (indexes.length == 1) { + return new SimpleRowValue(columnCount, indexes[0]); + } else { + return new Sparse(columnCount, indexes.length, map); + } + } + + @Override + public RowDataType getRowDataType() { + return dataType; + } + + @Override + public int[] getIndexes() { + return indexes; + } + + @Override + public TypeInfo[] getColumnTypes() { + return columnTypes; + } + + @Override + public int getColumnCount() { + return columnCount; + } + + @Override + public boolean getStoreKeys() { + return dataType.isStoreKeys(); } } } diff --git a/h2/src/main/org/h2/result/RowImpl.java b/h2/src/main/org/h2/result/RowImpl.java deleted file mode 100644 index eddfe2d253..0000000000 --- a/h2/src/main/org/h2/result/RowImpl.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.result; - -import org.h2.engine.Constants; -import org.h2.store.Data; -import org.h2.value.Value; -import org.h2.value.ValueLong; - -/** - * Default row implementation. - */ -public class RowImpl implements Row { - private long key; - private final Value[] data; - private int memory; - private boolean deleted; - - public RowImpl(Value[] data, int memory) { - this.data = data; - this.memory = memory; - } - - @Override - public void setKey(SearchRow row) { - setKey(row.getKey()); - } - - @Override - public long getKey() { - return key; - } - - @Override - public void setKey(long key) { - this.key = key; - } - - @Override - public Value getValue(int i) { - return i == SearchRow.ROWID_INDEX ? ValueLong.get(key) : data[i]; - } - - /** - * Get the number of bytes required for the data. - * - * @param dummy the template buffer - * @return the number of bytes - */ - @Override - public int getByteCount(Data dummy) { - int size = 0; - for (Value v : data) { - size += dummy.getValueLen(v); - } - return size; - } - - @Override - public void setValue(int i, Value v) { - if (i == SearchRow.ROWID_INDEX) { - this.key = v.getLong(); - } else { - data[i] = v; - } - } - - @Override - public boolean isEmpty() { - return data == null; - } - - @Override - public int getColumnCount() { - return data.length; - } - - @Override - public int getMemory() { - if (memory != MEMORY_CALCULATE) { - return memory; - } - int m = Constants.MEMORY_ROW; - if (data != null) { - int len = data.length; - m += Constants.MEMORY_OBJECT + len * Constants.MEMORY_POINTER; - for (Value v : data) { - if (v != null) { - m += v.getMemory(); - } - } - } - this.memory = m; - return m; - } - - @Override - public String toString() { - return toString(key, deleted, data); - } - - /** - * Convert a row to a string. - * - * @param key the key - * @param isDeleted whether the row is deleted - * @param data the row data - * @return the string representation - */ - static String toString(long key, boolean isDeleted, Value[] data) { - StringBuilder builder = new StringBuilder("( /* key:").append(key); - if (isDeleted) { - builder.append(" deleted"); - } - builder.append(" */ "); - if (data != null) { - for (int i = 0, length = data.length; i < length; i++) { - if (i > 0) { - builder.append(", "); - } - Value v = data[i]; - builder.append(v == null ? "null" : v.getTraceSQL()); - } - } - return builder.append(')').toString(); - } - - @Override - public void setDeleted(boolean deleted) { - this.deleted = deleted; - } - - @Override - public boolean isDeleted() { - return deleted; - } - - @Override - public Value[] getValueList() { - return data; - } - - @Override - public boolean hasSharedData(Row other) { - if (other.getClass() == RowImpl.class) { - RowImpl o = (RowImpl) other; - return data == o.data; - } - return false; - } -} diff --git a/h2/src/main/org/h2/result/RowList.java b/h2/src/main/org/h2/result/RowList.java deleted file mode 100644 index 71e339036f..0000000000 --- a/h2/src/main/org/h2/result/RowList.java +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.result; - -import java.util.ArrayList; - -import org.h2.engine.Constants; -import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.store.Data; -import org.h2.store.FileStore; -import org.h2.util.Utils; -import org.h2.value.DataType; -import org.h2.value.Value; - -/** - * A list of rows. If the list grows too large, it is buffered to disk - * automatically. - */ -public class RowList implements AutoCloseable { - - private final Session session; - private final ArrayList list = Utils.newSmallArrayList(); - private int size; - private int index, listIndex; - private FileStore file; - private Data rowBuff; - private ArrayList lobs; - private final int maxMemory; - private int memory; - private boolean written; - - /** - * Construct a new row list for this session. - * - * @param session the session - */ - public RowList(Session session) { - this.session = session; - if (session.getDatabase().isPersistent()) { - maxMemory = session.getDatabase().getMaxOperationMemory(); - } else { - maxMemory = 0; - } - } - - private void writeRow(Data buff, Row r) { - buff.checkCapacity(2 + Data.LENGTH_INT * 3 + Data.LENGTH_LONG); - buff.writeByte((byte) 1); - buff.writeInt(r.getMemory()); - int columnCount = r.getColumnCount(); - buff.writeInt(columnCount); - buff.writeLong(r.getKey()); - buff.writeByte(r.isDeleted() ? (byte) 1 : (byte) 0); - for (int i = 0; i < columnCount; i++) { - Value v = r.getValue(i); - buff.checkCapacity(1); - if (v == null) { - buff.writeByte((byte) 0); - } else { - buff.writeByte((byte) 1); - if (DataType.isLargeObject(v.getValueType())) { - // need to keep a reference to temporary lobs, - // otherwise the temp file is deleted - if (v.getSmall() == null && v.getTableId() == 0) { - if (lobs == null) { - lobs = Utils.newSmallArrayList(); - } - // need to create a copy, otherwise, - // if stored multiple times, it may be renamed - // and then not found - v = v.copyToTemp(); - lobs.add(v); - } - } - buff.checkCapacity(buff.getValueLen(v)); - buff.writeValue(v); - } - } - } - - private void writeAllRows() { - if (file == null) { - Database db = session.getDatabase(); - String fileName = db.createTempFile(); - file = db.openFile(fileName, "rw", false); - file.setCheckedWriting(false); - file.seek(FileStore.HEADER_LENGTH); - rowBuff = Data.create(db, Constants.DEFAULT_PAGE_SIZE, true); - file.seek(FileStore.HEADER_LENGTH); - } - Data buff = rowBuff; - initBuffer(buff); - for (int i = 0, size = list.size(); i < size; i++) { - if (i > 0 && buff.length() > Constants.IO_BUFFER_SIZE) { - flushBuffer(buff); - initBuffer(buff); - } - Row r = list.get(i); - writeRow(buff, r); - } - flushBuffer(buff); - list.clear(); - memory = 0; - } - - private static void initBuffer(Data buff) { - buff.reset(); - buff.writeInt(0); - } - - private void flushBuffer(Data buff) { - buff.checkCapacity(1); - buff.writeByte((byte) 0); - buff.fillAligned(); - buff.setInt(0, buff.length() / Constants.FILE_BLOCK_SIZE); - file.write(buff.getBytes(), 0, buff.length()); - } - - /** - * Add a row to the list. - * - * @param r the row to add - */ - public void add(Row r) { - list.add(r); - memory += r.getMemory() + Constants.MEMORY_POINTER; - if (maxMemory > 0 && memory > maxMemory) { - writeAllRows(); - } - size++; - } - - /** - * Remove all rows from the list. - */ - public void reset() { - index = 0; - if (file != null) { - listIndex = 0; - if (!written) { - writeAllRows(); - written = true; - } - list.clear(); - file.seek(FileStore.HEADER_LENGTH); - } - } - - /** - * Check if there are more rows in this list. - * - * @return true it there are more rows - */ - public boolean hasNext() { - return index < size; - } - - private Row readRow(Data buff) { - if (buff.readByte() == 0) { - return null; - } - int mem = buff.readInt(); - int columnCount = buff.readInt(); - long key = buff.readLong(); - boolean deleted = buff.readByte() != 0; - Value[] values = new Value[columnCount]; - for (int i = 0; i < columnCount; i++) { - Value v; - if (buff.readByte() == 0) { - v = null; - } else { - v = buff.readValue(); - if (v.isLinkedToTable()) { - // the table id is 0 if it was linked when writing - // a temporary entry - if (v.getTableId() == 0) { - session.removeAtCommit(v); - } - } - } - values[i] = v; - } - Row row = session.createRow(values, mem); - row.setKey(key); - row.setDeleted(deleted); - return row; - } - - /** - * Get the next row from the list. - * - * @return the next row - */ - public Row next() { - Row r; - if (file == null) { - r = list.get(index++); - } else { - if (listIndex >= list.size()) { - list.clear(); - listIndex = 0; - Data buff = rowBuff; - buff.reset(); - int min = Constants.FILE_BLOCK_SIZE; - file.readFully(buff.getBytes(), 0, min); - int len = buff.readInt() * Constants.FILE_BLOCK_SIZE; - buff.checkCapacity(len); - if (len - min > 0) { - file.readFully(buff.getBytes(), min, len - min); - } - while (true) { - r = readRow(buff); - if (r == null) { - break; - } - list.add(r); - } - } - index++; - r = list.get(listIndex++); - } - return r; - } - - /** - * Get the number of rows in this list. - * - * @return the number of rows - */ - public int size() { - return size; - } - - /** - * Close the result list and delete the temporary file. - */ - @Override - public void close() { - if (file != null) { - file.closeAndDeleteSilently(); - file = null; - rowBuff = null; - } - } - -} diff --git a/h2/src/main/org/h2/result/SearchRow.java b/h2/src/main/org/h2/result/SearchRow.java index 004bb0d147..80babceb2a 100644 --- a/h2/src/main/org/h2/result/SearchRow.java +++ b/h2/src/main/org/h2/result/SearchRow.java @@ -1,33 +1,58 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; +import org.h2.engine.CastDataProvider; +import org.h2.value.CompareMode; +import org.h2.value.TypeInfo; import org.h2.value.Value; +import org.h2.value.ValueNull; /** - * The interface for rows stored in a table, and for partial rows stored in the + * The base class for rows stored in a table, and for partial rows stored in the * index. */ -public interface SearchRow { +public abstract class SearchRow extends Value { + /** * Index of a virtual "_ROWID_" column within a row or a table */ - int ROWID_INDEX = -1; + public static final int ROWID_INDEX = -1; + + /** + * If the key is this value, then the key is considered equal to all other + * keys, when comparing. + */ + public static long MATCH_ALL_ROW_KEY = Long.MIN_VALUE + 1; + + /** + * The constant that means "memory usage is unknown and needs to be calculated first". + */ + public static final int MEMORY_CALCULATE = -1; /** - * An empty array of SearchRow objects. + * The row key. */ - SearchRow[] EMPTY_ARRAY = {}; + protected long key; /** * Get the column count. * * @return the column count */ - int getColumnCount(); + public abstract int getColumnCount(); + + /** + * Determine if specified column contains NULL + * @param index column index + * @return true if NULL + */ + public boolean isNull(int index) { + return getValue(index) == ValueNull.INSTANCE; + } /** * Get the value for the column @@ -35,7 +60,7 @@ public interface SearchRow { * @param index the column number (starting with 0) * @return the value */ - Value getValue(int index); + public abstract Value getValue(int index); /** * Set the value for given column @@ -43,34 +68,79 @@ public interface SearchRow { * @param index the column number (starting with 0) * @param v the new value */ - void setValue(int index, Value v); - - /** - * Set the position to match another row. - * - * @param old the other row. - */ - void setKey(SearchRow old); + public abstract void setValue(int index, Value v); /** * Set the unique key of the row. * * @param key the key */ - void setKey(long key); + public void setKey(long key) { + this.key = key; + } /** * Get the unique key of the row. * * @return the key */ - long getKey(); + public long getKey() { + return key; + } /** * Get the estimated memory used for this row, in bytes. * * @return the memory */ - int getMemory(); + @Override + public abstract int getMemory(); + + /** + * Copy all relevant values from the source to this row. + * @param source source of column values + */ + public abstract void copyFrom(SearchRow source); + + @Override + public TypeInfo getType() { + return TypeInfo.TYPE_ROW_EMPTY; + } + + @Override + public int getValueType() { + return Value.ROW; + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + builder.append("ROW ("); + for (int index = 0, count = getColumnCount(); index < count; index++) { + if (index != 0) { + builder.append(", "); + } + getValue(index).getSQL(builder, sqlFlags); + } + return builder.append(')'); + } + + @Override + public String getString() { + return getTraceSQL(); + } + + @Override + public int hashCode() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean equals(Object other) { + throw new UnsupportedOperationException(); + } + @Override + public int compareTypeSafe(Value v, CompareMode mode, CastDataProvider provider) { + throw new UnsupportedOperationException(); + } } diff --git a/h2/src/main/org/h2/result/SimpleResult.java b/h2/src/main/org/h2/result/SimpleResult.java index a2c7a91c25..c47a315d61 100644 --- a/h2/src/main/org/h2/result/SimpleResult.java +++ b/h2/src/main/org/h2/result/SimpleResult.java @@ -1,14 +1,15 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; import java.sql.ResultSetMetaData; import java.util.ArrayList; +import java.util.Comparator; -import org.h2.engine.SessionInterface; +import org.h2.engine.Session; import org.h2.util.Utils; import org.h2.value.TypeInfo; import org.h2.value.Value; @@ -16,10 +17,10 @@ /** * Simple in-memory result. */ -public class SimpleResult implements ResultInterface { +public class SimpleResult implements ResultInterface, ResultTarget { /** - * Column info for the simple result. + * Column info for the simple result. */ static final class Column { /** Column alias. */ @@ -31,15 +32,6 @@ static final class Column { /** Column type. */ final TypeInfo columnType; - Column(String alias, String columnName, int columnType, long columnPrecision, int columnScale) { - if (alias == null || columnName == null) { - throw new NullPointerException(); - } - this.alias = alias; - this.columnName = columnName; - this.columnType = TypeInfo.getTypeInfo(columnType, columnPrecision, columnScale, null); - } - Column(String alias, String columnName, TypeInfo columnType) { if (alias == null || columnName == null) { throw new NullPointerException(); @@ -82,42 +74,80 @@ public String toString() { private final ArrayList rows; + private final String schemaName, tableName; + private int rowId; /** * Creates new instance of simple result. */ public SimpleResult() { + this("", ""); + } + + /** + * Creates new instance of simple result. + * + * @param schemaName + * the name of the schema + * @param tableName + * the name of the table + */ + public SimpleResult(String schemaName, String tableName) { this.columns = Utils.newSmallArrayList(); this.rows = new ArrayList<>(); + this.schemaName = schemaName; + this.tableName = tableName; this.rowId = -1; } - private SimpleResult(ArrayList columns, ArrayList rows) { + private SimpleResult(ArrayList columns, ArrayList rows, String schemaName, String tableName) { this.columns = columns; this.rows = rows; + this.schemaName = schemaName; + this.tableName = tableName; this.rowId = -1; } /** * Add column to the result. * - * @param alias Column's alias. - * @param columnName Column's name. - * @param columnType Column's value type. - * @param columnPrecision Column's precision. - * @param columnScale Column's scale. + * @param alias + * Column's alias. + * @param columnName + * Column's name. + * @param columnType + * Column's value type. + * @param columnPrecision + * Column's precision. + * @param columnScale + * Column's scale. */ public void addColumn(String alias, String columnName, int columnType, long columnPrecision, int columnScale) { - addColumn(new Column(alias, columnName, columnType, columnPrecision, columnScale)); + addColumn(alias, columnName, TypeInfo.getTypeInfo(columnType, columnPrecision, columnScale, null)); + } + + /** + * Add column to the result. + * + * @param columnName + * Column's name. + * @param columnType + * Column's type. + */ + public void addColumn(String columnName, TypeInfo columnType) { + addColumn(new Column(columnName, columnName, columnType)); } /** * Add column to the result. * - * @param alias Column's alias. - * @param columnName Column's name. - * @param columnType Column's type. + * @param alias + * Column's alias. + * @param columnName + * Column's name. + * @param columnType + * Column's type. */ public void addColumn(String alias, String columnName, TypeInfo columnType) { addColumn(new Column(alias, columnName, columnType)); @@ -126,18 +156,15 @@ public void addColumn(String alias, String columnName, TypeInfo columnType) { /** * Add column to the result. * - * @param column Column info. + * @param column + * Column info. */ void addColumn(Column column) { assert rows.isEmpty(); columns.add(column); } - /** - * Add row to result. - * - * @param values Row's values. - */ + @Override public void addRow(Value... values) { assert values.length == columns.size(); rows.add(values); @@ -163,7 +190,7 @@ public boolean next() { } @Override - public int getRowId() { + public long getRowId() { return rowId; } @@ -178,7 +205,7 @@ public int getVisibleColumnCount() { } @Override - public int getRowCount() { + public long getRowCount() { return rows.size(); } @@ -204,12 +231,12 @@ public String getAlias(int i) { @Override public String getSchemaName(int i) { - return ""; + return schemaName; } @Override public String getTableName(int i) { - return ""; + return tableName; } @Override @@ -223,7 +250,7 @@ public TypeInfo getColumnType(int i) { } @Override - public boolean isAutoIncrement(int i) { + public boolean isIdentity(int i) { return false; } @@ -253,8 +280,23 @@ public boolean isClosed() { } @Override - public ResultInterface createShallowCopy(SessionInterface targetSession) { - return new SimpleResult(columns, rows); + public SimpleResult createShallowCopy(Session targetSession) { + return new SimpleResult(columns, rows, schemaName, tableName); + } + + @Override + public void limitsWereApplied() { + // Nothing to do + } + + /** + * Sort rows in the list. + * + * @param comparator + * the comparator + */ + public void sortRows(Comparator comparator) { + rows.sort(comparator); } } diff --git a/h2/src/main/org/h2/result/SimpleRow.java b/h2/src/main/org/h2/result/SimpleRow.java deleted file mode 100644 index 2362278de5..0000000000 --- a/h2/src/main/org/h2/result/SimpleRow.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.result; - -import org.h2.engine.Constants; -import org.h2.value.Value; - -/** - * Represents a simple row without state. - */ -public class SimpleRow implements SearchRow { - - private long key; - private final Value[] data; - private int memory; - - public SimpleRow(Value[] data) { - this.data = data; - } - - @Override - public int getColumnCount() { - return data.length; - } - - @Override - public long getKey() { - return key; - } - - @Override - public void setKey(long key) { - this.key = key; - } - - @Override - public void setKey(SearchRow row) { - key = row.getKey(); - } - - @Override - public void setValue(int i, Value v) { - data[i] = v; - } - - @Override - public Value getValue(int i) { - return data[i]; - } - - @Override - public String toString() { - return RowImpl.toString(key, false, data); - } - - @Override - public int getMemory() { - if (memory == 0) { - int len = data.length; - memory = Constants.MEMORY_OBJECT + len * Constants.MEMORY_POINTER; - for (Value v : data) { - if (v != null) { - memory += v.getMemory(); - } - } - } - return memory; - } - -} diff --git a/h2/src/main/org/h2/result/SimpleRowValue.java b/h2/src/main/org/h2/result/SimpleRowValue.java index 90d33cb068..84181cde70 100644 --- a/h2/src/main/org/h2/result/SimpleRowValue.java +++ b/h2/src/main/org/h2/result/SimpleRowValue.java @@ -1,19 +1,20 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; import org.h2.engine.Constants; import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueNull; /** * A simple row that contains data for only one column. */ -public class SimpleRowValue implements SearchRow { +public class SimpleRowValue extends SearchRow { - private long key; private int index; private final int virtualColumnCount; private Value data; @@ -22,9 +23,9 @@ public SimpleRowValue(int columnCount) { this.virtualColumnCount = columnCount; } - @Override - public void setKey(SearchRow row) { - key = row.getKey(); + public SimpleRowValue(int columnCount, int index) { + this.virtualColumnCount = columnCount; + this.index = index; } @Override @@ -32,23 +33,19 @@ public int getColumnCount() { return virtualColumnCount; } - @Override - public long getKey() { - return key; - } - - @Override - public void setKey(long key) { - this.key = key; - } - @Override public Value getValue(int idx) { + if (idx == ROWID_INDEX) { + return ValueBigint.get(getKey()); + } return idx == index ? data : null; } @Override public void setValue(int idx, Value v) { + if (idx == ROWID_INDEX) { + setKey(v.getLong()); + } index = idx; data = v; } @@ -61,7 +58,17 @@ public String toString() { @Override public int getMemory() { - return Constants.MEMORY_OBJECT + (data == null ? 0 : data.getMemory()); + return Constants.MEMORY_ROW + (data == null ? 0 : data.getMemory()); + } + + @Override + public boolean isNull(int index) { + return index != this.index || data == null || data == ValueNull.INSTANCE; } + @Override + public void copyFrom(SearchRow source) { + setKey(source.getKey()); + setValue(index, source.getValue(index)); + } } diff --git a/h2/src/main/org/h2/result/SortOrder.java b/h2/src/main/org/h2/result/SortOrder.java index f92e7fb23a..65b9782468 100644 --- a/h2/src/main/org/h2/result/SortOrder.java +++ b/h2/src/main/org/h2/result/SortOrder.java @@ -1,15 +1,20 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; -import org.h2.command.dml.SelectOrderBy; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; + +import org.h2.command.query.QueryOrderBy; import org.h2.engine.Database; -import org.h2.engine.SysProperties; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; +import org.h2.mode.DefaultNullOrdering; import org.h2.table.Column; import org.h2.table.TableFilter; import org.h2.util.Utils; @@ -17,14 +22,10 @@ import org.h2.value.ValueNull; import org.h2.value.ValueRow; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; - /** * A sort order represents an ORDER BY clause in a query. */ -public class SortOrder implements Comparator { +public final class SortOrder implements Comparator { /** * This bit mask means the values should be sorted in ascending order. @@ -48,34 +49,7 @@ public class SortOrder implements Comparator { */ public static final int NULLS_LAST = 4; - /** - * The default comparison result for NULL, either 1 or -1. - */ - private static final int DEFAULT_NULL_SORT; - - /** - * The default NULLs sort order bit for ASC indexes. - */ - private static final int DEFAULT_ASC_NULLS; - - /** - * The default NULLs sort order bit for DESC indexes. - */ - private static final int DEFAULT_DESC_NULLS; - - static { - if (SysProperties.SORT_NULLS_HIGH) { - DEFAULT_NULL_SORT = 1; - DEFAULT_ASC_NULLS = NULLS_LAST; - DEFAULT_DESC_NULLS = NULLS_FIRST; - } else { // default - DEFAULT_NULL_SORT = -1; - DEFAULT_ASC_NULLS = NULLS_FIRST; - DEFAULT_DESC_NULLS = NULLS_LAST; - } - } - - private final Database database; + private final SessionLocal session; /** * The column indexes of the order by expressions within the query. @@ -90,19 +64,29 @@ public class SortOrder implements Comparator { /** * The order list. */ - private final ArrayList orderList; + private final ArrayList orderList; + + /** + * Construct a new sort order object with default sort directions. + * + * @param session the session + * @param queryColumnIndexes the column index list + */ + public SortOrder(SessionLocal session, int[] queryColumnIndexes) { + this (session, queryColumnIndexes, new int[queryColumnIndexes.length], null); + } /** * Construct a new sort order object. * - * @param database the database + * @param session the session * @param queryColumnIndexes the column index list * @param sortType the sort order bit masks * @param orderList the original query order list (if this is a query) */ - public SortOrder(Database database, int[] queryColumnIndexes, - int[] sortType, ArrayList orderList) { - this.database = database; + public SortOrder(SessionLocal session, int[] queryColumnIndexes, int[] sortType, + ArrayList orderList) { + this.session = session; this.queryColumnIndexes = queryColumnIndexes; this.sortTypes = sortType; this.orderList = orderList; @@ -112,13 +96,13 @@ public SortOrder(Database database, int[] queryColumnIndexes, * Create the SQL snippet that describes this sort order. * This is the SQL snippet that usually appears after the ORDER BY clause. * + * @param builder string builder to append to * @param list the expression list * @param visible the number of columns in the select list - * @param alwaysQuote quote all identifiers - * @return the SQL snippet + * @param sqlFlags formatting flags + * @return the specified string builder */ - public String getSQL(Expression[] list, int visible, boolean alwaysQuote) { - StringBuilder builder = new StringBuilder(); + public StringBuilder getSQL(StringBuilder builder, Expression[] list, int visible, int sqlFlags) { int i = 0; for (int idx : queryColumnIndexes) { if (i > 0) { @@ -127,12 +111,11 @@ public String getSQL(Expression[] list, int visible, boolean alwaysQuote) { if (idx < visible) { builder.append(idx + 1); } else { - builder.append('='); - list[idx].getUnenclosedSQL(builder, alwaysQuote); + list[idx].getUnenclosedSQL(builder, sqlFlags); } typeToString(builder, sortTypes[i++]); } - return builder.toString(); + return builder; } /** @@ -151,26 +134,6 @@ public static void typeToString(StringBuilder builder, int type) { } } - /** - * Compare two expressions where one of them is NULL. - * - * @param aNull whether the first expression is null - * @param sortType the sort bit mask to use - * @return the result of the comparison (-1 meaning the first expression - * should appear before the second, 0 if they are equal) - */ - public static int compareNull(boolean aNull, int sortType) { - if ((sortType & NULLS_FIRST) != 0) { - return aNull ? -1 : 1; - } else if ((sortType & NULLS_LAST) != 0) { - return aNull ? 1 : -1; - } else { - // see also JdbcDatabaseMetaData.nullsAreSorted* - int comp = aNull ? DEFAULT_NULL_SORT : -DEFAULT_NULL_SORT; - return (sortType & DESCENDING) == 0 ? comp : -comp; - } - } - /** * Compare two expression lists. * @@ -190,9 +153,9 @@ public int compare(Value[] a, Value[] b) { if (aNull == bNull) { continue; } - return compareNull(aNull, type); + return session.getDatabase().getDefaultNullOrdering().compareNull(aNull, type); } - int comp = database.compare(ao, bo); + int comp = session.compare(ao, bo); if (comp != 0) { return (type & DESCENDING) == 0 ? comp : -comp; } @@ -206,34 +169,24 @@ public int compare(Value[] a, Value[] b) { * @param rows the list of rows */ public void sort(ArrayList rows) { - Collections.sort(rows, this); + rows.sort(this); } /** * Sort a list of rows using offset and limit. * * @param rows the list of rows - * @param offset the offset - * @param limit the limit + * @param fromInclusive the start index, inclusive + * @param toExclusive the end index, exclusive */ - public void sort(ArrayList rows, int offset, int limit) { - int rowsSize = rows.size(); - if (rows.isEmpty() || offset >= rowsSize || limit == 0) { - return; - } - if (offset < 0) { - offset = 0; - } - if (offset + limit > rowsSize) { - limit = rowsSize - offset; - } - if (limit == 1 && offset == 0) { + public void sort(ArrayList rows, int fromInclusive, int toExclusive) { + if (toExclusive == 1 && fromInclusive == 0) { rows.set(0, Collections.min(rows, this)); return; } Value[][] arr = rows.toArray(new Value[0][]); - Utils.sortTopN(arr, offset, limit, this); - for (int i = 0, end = Math.min(offset + limit, rowsSize); i < end; i++) { + Utils.sortTopN(arr, fromInclusive, toExclusive, this); + for (int i = fromInclusive; i < toExclusive; i++) { rows.set(i, arr[i]); } } @@ -265,7 +218,7 @@ public Column getColumn(int index, TableFilter filter) { if (orderList == null) { return null; } - SelectOrderBy order = orderList.get(index); + QueryOrderBy order = orderList.get(index); Expression expr = order.expression; if (expr == null) { return null; @@ -294,45 +247,50 @@ public int[] getSortTypes() { } /** - * Returns sort order bit masks with {@link #NULLS_FIRST} or {@link #NULLS_LAST} - * explicitly set, depending on {@link SysProperties#SORT_NULLS_HIGH}. + * Returns the original query order list. * - * @return bit masks with either {@link #NULLS_FIRST} or {@link #NULLS_LAST} explicitly set. + * @return the original query order list */ - public int[] getSortTypesWithNullPosition() { - final int[] sortTypes = this.sortTypes.clone(); - for (int i=0, length = sortTypes.length; i getOrderList() { + return orderList; } /** - * Returns comparator for row values. + * Returns sort order bit masks with {@link SortOrder#NULLS_FIRST} or + * {@link SortOrder#NULLS_LAST} explicitly set. * - * @return comparator for row values. + * @return bit masks with either {@link SortOrder#NULLS_FIRST} or {@link SortOrder#NULLS_LAST} + * explicitly set. */ - public Comparator getRowValueComparator() { - return new Comparator() { - @Override - public int compare(Value o1, Value o2) { - return SortOrder.this.compare(((ValueRow) o1).getList(), ((ValueRow) o2).getList()); - } - }; + public int[] getSortTypesWithNullOrdering() { + return addNullOrdering(session.getDatabase(), sortTypes.clone()); } /** - * Returns a sort type bit mask with {@link #NULLS_FIRST} or {@link #NULLS_LAST} - * explicitly set, depending on {@link SysProperties#SORT_NULLS_HIGH}. + * Add explicit {@link SortOrder#NULLS_FIRST} or {@link SortOrder#NULLS_LAST} where they + * aren't already specified. * - * @param sortType sort type bit mask - * @return bit mask with either {@link #NULLS_FIRST} or {@link #NULLS_LAST} explicitly set. + * @param database + * the database + * @param sortTypes + * bit masks + * @return the specified array with possibly modified bit masks */ - public static int addExplicitNullPosition(int sortType) { - if ((sortType & (NULLS_FIRST | NULLS_LAST)) == 0) { - return sortType | ((sortType & DESCENDING) == 0 ? DEFAULT_ASC_NULLS : DEFAULT_DESC_NULLS); - } else { - return sortType; + public static int[] addNullOrdering(Database database, int[] sortTypes) { + DefaultNullOrdering defaultNullOrdering = database.getDefaultNullOrdering(); + for (int i = 0, length = sortTypes.length; i < length; i++) { + sortTypes[i] = defaultNullOrdering.addExplicitNullOrdering(sortTypes[i]); } + return sortTypes; } + + /** + * Returns comparator for row values. + * + * @return comparator for row values. + */ + public Comparator getRowValueComparator() { + return (o1, o2) -> compare(((ValueRow) o1).getList(), ((ValueRow) o2).getList()); + } + } diff --git a/h2/src/main/org/h2/result/Sparse.java b/h2/src/main/org/h2/result/Sparse.java new file mode 100644 index 0000000000..828cd05197 --- /dev/null +++ b/h2/src/main/org/h2/result/Sparse.java @@ -0,0 +1,64 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.result; + +import org.h2.value.Value; +import org.h2.value.ValueBigint; + +/** + * Class Sparse. + *
            + *
          • 11/16/19 7:35 PM initial creation + *
          + * + * @author Andrei Tokar + */ +public final class Sparse extends DefaultRow { + private final int columnCount; + private final int[] map; + + Sparse(int columnCount, int capacity, int[] map) { + super(new Value[capacity]); + this.columnCount = columnCount; + this.map = map; + } + + @Override + public int getColumnCount() { + return columnCount; + } + + @Override + public Value getValue(int i) { + if (i == ROWID_INDEX) { + return ValueBigint.get(getKey()); + } + int index = map[i]; + return index > 0 ? super.getValue(index - 1) : null; + } + + @Override + public void setValue(int i, Value v) { + if (i == ROWID_INDEX) { + setKey(v.getLong()); + } + int index = map[i]; + if (index > 0) { + super.setValue(index - 1, v); + } + } + + @Override + public void copyFrom(SearchRow source) { + setKey(source.getKey()); + for (int i = 0; i < map.length; i++) { + int index = map[i]; + if (index > 0) { + super.setValue(index - 1, source.getValue(i)); + } + } + } +} diff --git a/h2/src/main/org/h2/result/UpdatableRow.java b/h2/src/main/org/h2/result/UpdatableRow.java index 77707a4039..fb3e7077de 100644 --- a/h2/src/main/org/h2/result/UpdatableRow.java +++ b/h2/src/main/org/h2/result/UpdatableRow.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; @@ -12,13 +12,18 @@ import java.util.ArrayList; import org.h2.api.ErrorCode; +import org.h2.engine.Constants; +import org.h2.engine.Session; +import org.h2.engine.SessionRemote; import org.h2.jdbc.JdbcConnection; +import org.h2.jdbc.JdbcResultSet; import org.h2.message.DbException; +import org.h2.util.JdbcUtils; import org.h2.util.StringUtils; import org.h2.util.Utils; -import org.h2.value.DataType; import org.h2.value.Value; import org.h2.value.ValueNull; +import org.h2.value.ValueToObjectConverter; /** * This class is used for updatable result sets. An updatable row provides @@ -40,12 +45,16 @@ public class UpdatableRow { * * @param conn the database connection * @param result the result + * @throws SQLException on failure */ public UpdatableRow(JdbcConnection conn, ResultInterface result) throws SQLException { this.conn = conn; this.result = result; columnCount = result.getVisibleColumnCount(); + if (columnCount == 0) { + return; + } for (int i = 0; i < columnCount; i++) { String t = result.getTableName(i); String s = result.getSchemaName(i); @@ -63,18 +72,20 @@ public UpdatableRow(JdbcConnection conn, ResultInterface result) return; } } + String type = "BASE TABLE"; + Session session = conn.getSession(); + if (session instanceof SessionRemote + && ((SessionRemote) session).getClientVersion() <= Constants.TCP_PROTOCOL_VERSION_19) { + type = "TABLE"; + } final DatabaseMetaData meta = conn.getMetaData(); ResultSet rs = meta.getTables(null, StringUtils.escapeMetaDataPattern(schemaName), StringUtils.escapeMetaDataPattern(tableName), - new String[] { "TABLE" }); + new String[] { type }); if (!rs.next()) { return; } - if (rs.getString("SQL") == null) { - // system table - return; - } String table = rs.getString("TABLE_NAME"); // if the table name in the database meta data is lower case, // but the table in the result set meta data is not, then the column @@ -178,8 +189,7 @@ private void appendKeyCondition(StringBuilder builder) { } } - private void setKey(PreparedStatement prep, int start, Value[] current) - throws SQLException { + private void setKey(PreparedStatement prep, int start, Value[] current) throws SQLException { for (int i = 0, size = key.size(); i < size; i++) { String col = key.get(i); int idx = getColumnIndex(col); @@ -189,7 +199,7 @@ private void setKey(PreparedStatement prep, int start, Value[] current) // as multiple such rows could exist throw DbException.get(ErrorCode.NO_DATA_AVAILABLE); } - v.set(prep, start + i); + JdbcUtils.set(prep, start + i, v, conn); } } @@ -217,6 +227,7 @@ private void appendTableName(StringBuilder builder) { * * @param row the values that contain the key * @return the row + * @throws SQLException on failure */ public Value[] readRow(Value[] row) throws SQLException { StringBuilder builder = new StringBuilder("SELECT "); @@ -226,14 +237,13 @@ public Value[] readRow(Value[] row) throws SQLException { appendKeyCondition(builder); PreparedStatement prep = conn.prepareStatement(builder.toString()); setKey(prep, 1, row); - ResultSet rs = prep.executeQuery(); + JdbcResultSet rs = (JdbcResultSet) prep.executeQuery(); if (!rs.next()) { throw DbException.get(ErrorCode.NO_DATA_AVAILABLE); } Value[] newRow = new Value[columnCount]; for (int i = 0; i < columnCount; i++) { - int type = result.getColumnType(i).getValueType(); - newRow[i] = DataType.readValue(conn.getSession(), rs, i + 1, type); + newRow[i] = ValueToObjectConverter.readValue(conn.getSession(), rs, i + 1); } return newRow; } @@ -280,7 +290,7 @@ public void updateRow(Value[] current, Value[] updateRow) throws SQLException { if (v == null) { v = current[i]; } - v.set(prep, j++); + JdbcUtils.set(prep, j++, v, conn); } setKey(prep, j, current); int count = prep.executeUpdate(); @@ -318,7 +328,7 @@ public void insertRow(Value[] row) throws SQLException { for (int i = 0, j = 0; i < columnCount; i++) { Value v = row[i]; if (v != null) { - v.set(prep, j++ + 1); + JdbcUtils.set(prep, j++ + 1, v, conn); } } int count = prep.executeUpdate(); diff --git a/h2/src/main/org/h2/result/package.html b/h2/src/main/org/h2/result/package.html index 927aa555df..0629958272 100644 --- a/h2/src/main/org/h2/result/package.html +++ b/h2/src/main/org/h2/result/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/schema/Constant.java b/h2/src/main/org/h2/schema/Constant.java index cd34a87eee..bcf523ab79 100644 --- a/h2/src/main/org/h2/schema/Constant.java +++ b/h2/src/main/org/h2/schema/Constant.java @@ -1,12 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.schema; import org.h2.engine.DbObject; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.ValueExpression; import org.h2.message.DbException; import org.h2.message.Trace; @@ -17,7 +17,7 @@ * A user-defined constant as created by the SQL statement * CREATE CONSTANT */ -public class Constant extends SchemaObjectBase { +public final class Constant extends SchemaObject { private Value value; private ValueExpression expression; @@ -28,19 +28,14 @@ public Constant(Schema schema, int id, String name) { @Override public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(toString()); - } - - @Override - public String getDropSQL() { - return null; + throw DbException.getInternalError(toString()); } @Override public String getCreateSQL() { StringBuilder builder = new StringBuilder("CREATE CONSTANT "); - getSQL(builder, true).append(" VALUE "); - return value.getSQL(builder).toString(); + getSQL(builder, DEFAULT_SQL_FLAGS).append(" VALUE "); + return value.getSQL(builder, DEFAULT_SQL_FLAGS).toString(); } @Override @@ -49,16 +44,11 @@ public int getType() { } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { database.removeMeta(session, getId()); invalidate(); } - @Override - public void checkRename() { - // ok - } - public void setValue(Value value) { this.value = value; expression = ValueExpression.get(value); diff --git a/h2/src/main/org/h2/schema/Domain.java b/h2/src/main/org/h2/schema/Domain.java new file mode 100644 index 0000000000..1003a2105a --- /dev/null +++ b/h2/src/main/org/h2/schema/Domain.java @@ -0,0 +1,224 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.schema; + +import java.util.ArrayList; +import org.h2.constraint.Constraint; +import org.h2.constraint.ConstraintDomain; +import org.h2.engine.DbObject; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ValueExpression; +import org.h2.message.DbException; +import org.h2.message.Trace; +import org.h2.table.ColumnTemplate; +import org.h2.table.Table; +import org.h2.util.Utils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * Represents a domain. + */ +public final class Domain extends SchemaObject implements ColumnTemplate { + + private TypeInfo type; + + /** + * Parent domain. + */ + private Domain domain; + + private Expression defaultExpression; + + private Expression onUpdateExpression; + + private ArrayList constraints; + + public Domain(Schema schema, int id, String name) { + super(schema, id, name, Trace.SCHEMA); + } + + @Override + public String getCreateSQLForCopy(Table table, String quotedName) { + throw DbException.getInternalError(toString()); + } + + @Override + public String getDropSQL() { + StringBuilder builder = new StringBuilder("DROP DOMAIN IF EXISTS "); + return getSQL(builder, DEFAULT_SQL_FLAGS).toString(); + } + + @Override + public String getCreateSQL() { + StringBuilder builder = getSQL(new StringBuilder("CREATE DOMAIN "), DEFAULT_SQL_FLAGS).append(" AS "); + if (domain != null) { + domain.getSQL(builder, DEFAULT_SQL_FLAGS); + } else { + type.getSQL(builder, DEFAULT_SQL_FLAGS); + } + if (defaultExpression != null) { + defaultExpression.getUnenclosedSQL(builder.append(" DEFAULT "), DEFAULT_SQL_FLAGS); + } + if (onUpdateExpression != null) { + onUpdateExpression.getUnenclosedSQL(builder.append(" ON UPDATE "), DEFAULT_SQL_FLAGS); + } + return builder.toString(); + } + + public void setDataType(TypeInfo type) { + this.type = type; + } + + public TypeInfo getDataType() { + return type; + } + + @Override + public void setDomain(Domain domain) { + this.domain = domain; + } + + @Override + public Domain getDomain() { + return domain; + } + + @Override + public void setDefaultExpression(SessionLocal session, Expression defaultExpression) { + // also to test that no column names are used + if (defaultExpression != null) { + defaultExpression = defaultExpression.optimize(session); + if (defaultExpression.isConstant()) { + defaultExpression = ValueExpression.get(defaultExpression.getValue(session)); + } + } + this.defaultExpression = defaultExpression; + } + + @Override + public Expression getDefaultExpression() { + return defaultExpression; + } + + @Override + public Expression getEffectiveDefaultExpression() { + return defaultExpression != null ? defaultExpression + : domain != null ? domain.getEffectiveDefaultExpression() : null; + } + + @Override + public String getDefaultSQL() { + return defaultExpression == null ? null + : defaultExpression.getUnenclosedSQL(new StringBuilder(), DEFAULT_SQL_FLAGS).toString(); + } + + @Override + public void setOnUpdateExpression(SessionLocal session, Expression onUpdateExpression) { + // also to test that no column names are used + if (onUpdateExpression != null) { + onUpdateExpression = onUpdateExpression.optimize(session); + if (onUpdateExpression.isConstant()) { + onUpdateExpression = ValueExpression.get(onUpdateExpression.getValue(session)); + } + } + this.onUpdateExpression = onUpdateExpression; + } + + @Override + public Expression getOnUpdateExpression() { + return onUpdateExpression; + } + + @Override + public Expression getEffectiveOnUpdateExpression() { + return onUpdateExpression != null ? onUpdateExpression + : domain != null ? domain.getEffectiveOnUpdateExpression() : null; + } + + @Override + public String getOnUpdateSQL() { + return onUpdateExpression == null ? null + : onUpdateExpression.getUnenclosedSQL(new StringBuilder(), DEFAULT_SQL_FLAGS).toString(); + } + + @Override + public void prepareExpressions(SessionLocal session) { + if (defaultExpression != null) { + defaultExpression = defaultExpression.optimize(session); + } + if (onUpdateExpression != null) { + onUpdateExpression = onUpdateExpression.optimize(session); + } + if (domain != null) { + domain.prepareExpressions(session); + } + } + + /** + * Add a constraint to the domain. + * + * @param constraint the constraint to add + */ + public void addConstraint(ConstraintDomain constraint) { + if (constraints == null) { + constraints = Utils.newSmallArrayList(); + } + if (!constraints.contains(constraint)) { + constraints.add(constraint); + } + } + + public ArrayList getConstraints() { + return constraints; + } + + /** + * Remove the given constraint from the list. + * + * @param constraint the constraint to remove + */ + public void removeConstraint(Constraint constraint) { + if (constraints != null) { + constraints.remove(constraint); + } + } + + @Override + public int getType() { + return DbObject.DOMAIN; + } + + @Override + public void removeChildrenAndResources(SessionLocal session) { + if (constraints != null && !constraints.isEmpty()) { + for (ConstraintDomain constraint : constraints.toArray(new ConstraintDomain[0])) { + database.removeSchemaObject(session, constraint); + } + constraints = null; + } + database.removeMeta(session, getId()); + } + + /** + * Check the specified value. + * + * @param session the session + * @param value the value + */ + public void checkConstraints(SessionLocal session, Value value) { + if (constraints != null) { + for (ConstraintDomain constraint : constraints) { + constraint.check(session, value); + } + } + if (domain != null) { + domain.checkConstraints(session, value); + } + } + +} diff --git a/h2/src/main/org/h2/engine/FunctionAlias.java b/h2/src/main/org/h2/schema/FunctionAlias.java similarity index 65% rename from h2/src/main/org/h2/engine/FunctionAlias.java rename to h2/src/main/org/h2/schema/FunctionAlias.java index 328d5a8a1e..47caf1ecf9 100644 --- a/h2/src/main/org/h2/engine/FunctionAlias.java +++ b/h2/src/main/org/h2/schema/FunctionAlias.java @@ -1,34 +1,45 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ -package org.h2.engine; +package org.h2.schema; import java.lang.reflect.Array; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; import java.util.ArrayList; import java.util.Arrays; import org.h2.Driver; import org.h2.api.ErrorCode; -import org.h2.command.Parser; +import org.h2.engine.Constants; +import org.h2.engine.DbObject; +import org.h2.engine.SessionLocal; +import org.h2.expression.Alias; import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.jdbc.JdbcConnection; import org.h2.message.DbException; import org.h2.message.Trace; -import org.h2.schema.Schema; -import org.h2.schema.SchemaObjectBase; -import org.h2.table.Table; +import org.h2.result.LocalResult; +import org.h2.result.ResultInterface; +import org.h2.table.Column; import org.h2.util.JdbcUtils; import org.h2.util.SourceCompiler; import org.h2.util.StringUtils; +import org.h2.util.Utils; import org.h2.value.DataType; +import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueArray; import org.h2.value.ValueNull; +import org.h2.value.ValueToObjectConverter; +import org.h2.value.ValueToObjectConverter2; /** * Represents a user-defined function, or alias. @@ -36,14 +47,12 @@ * @author Thomas Mueller * @author Gary Tong */ -public class FunctionAlias extends SchemaObjectBase { +public final class FunctionAlias extends UserDefinedFunction { - private String className; private String methodName; private String source; private JavaMethod[] javaMethods; private boolean deterministic; - private boolean bufferResultSetToLocalTemp = true; private FunctionAlias(Schema schema, int id, String name) { super(schema, id, name, Trace.FUNCTION); @@ -57,12 +66,11 @@ private FunctionAlias(Schema schema, int id, String name) { * @param name the name * @param javaClassMethod the class and method name * @param force create the object even if the class or method does not exist - * @param bufferResultSetToLocalTemp whether the result should be buffered * @return the database object */ public static FunctionAlias newInstance( Schema schema, int id, String name, String javaClassMethod, - boolean force, boolean bufferResultSetToLocalTemp) { + boolean force) { FunctionAlias alias = new FunctionAlias(schema, id, name); int paren = javaClassMethod.indexOf('('); int lastDot = javaClassMethod.lastIndexOf('.', paren < 0 ? @@ -72,7 +80,6 @@ public static FunctionAlias newInstance( } alias.className = javaClassMethod.substring(0, lastDot); alias.methodName = javaClassMethod.substring(lastDot + 1); - alias.bufferResultSetToLocalTemp = bufferResultSetToLocalTemp; alias.init(force); return alias; } @@ -85,15 +92,12 @@ public static FunctionAlias newInstance( * @param name the name * @param source the source code * @param force create the object even if the class or method does not exist - * @param bufferResultSetToLocalTemp whether the result should be buffered * @return the database object */ public static FunctionAlias newInstanceFromSource( - Schema schema, int id, String name, String source, boolean force, - boolean bufferResultSetToLocalTemp) { + Schema schema, int id, String name, String source, boolean force) { FunctionAlias alias = new FunctionAlias(schema, id, name); alias.source = source; - alias.bufferResultSetToLocalTemp = bufferResultSetToLocalTemp; alias.init(force); return alias; } @@ -195,43 +199,24 @@ private static String getMethodSignature(Method m) { return buff.append(')').toString(); } - @Override - public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(toString()); - } - @Override public String getDropSQL() { - return "DROP ALIAS IF EXISTS " + getSQL(true); - } - - @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - // TODO can remove this method once FUNCTIONS_IN_SCHEMA is enabled - if (database.getSettings().functionsInSchema || getSchema().getId() != Constants.MAIN_SCHEMA_ID) { - return super.getSQL(builder, alwaysQuote); - } - return Parser.quoteIdentifier(builder, getName(), alwaysQuote); + return getSQL(new StringBuilder("DROP ALIAS IF EXISTS "), DEFAULT_SQL_FLAGS).toString(); } @Override public String getCreateSQL() { - StringBuilder buff = new StringBuilder("CREATE FORCE ALIAS "); - buff.append(getSQL(true)); + StringBuilder builder = new StringBuilder("CREATE FORCE ALIAS "); + getSQL(builder, DEFAULT_SQL_FLAGS); if (deterministic) { - buff.append(" DETERMINISTIC"); - } - if (!bufferResultSetToLocalTemp) { - buff.append(" NOBUFFER"); + builder.append(" DETERMINISTIC"); } if (source != null) { - buff.append(" AS "); - StringUtils.quoteStringSQL(buff, source); + StringUtils.quoteStringSQL(builder.append(" AS "), source); } else { - buff.append(" FOR "); - Parser.quoteIdentifier(buff, className + "." + methodName, true); + StringUtils.quoteStringSQL(builder.append(" FOR "), className + '.' + methodName); } - return buff.toString(); + return builder.toString(); } @Override @@ -240,7 +225,7 @@ public int getType() { } @Override - public synchronized void removeChildrenAndResources(Session session) { + public synchronized void removeChildrenAndResources(SessionLocal session) { database.removeMeta(session, getId()); className = null; methodName = null; @@ -248,11 +233,6 @@ public synchronized void removeChildrenAndResources(Session session) { invalidate(); } - @Override - public void checkRename() { - throw DbException.getUnsupportedException("RENAME"); - } - /** * Find the Java method that matches the arguments. * @@ -274,10 +254,6 @@ public JavaMethod findJavaMethod(Expression[] args) { className + ", parameter count: " + parameterCount + ")"); } - public String getJavaClassName() { - return this.className; - } - public String getJavaMethodName() { return this.methodName; } @@ -304,15 +280,6 @@ public String getSource() { return source; } - /** - * Should the return value ResultSet be buffered in a local temporary file? - * - * @return true if yes - */ - public boolean isBufferResultSetToLocalTemp() { - return bufferResultSetToLocalTemp; - } - /** * There may be multiple Java methods that match a function name. * Each method must have a different number of parameters however. @@ -321,7 +288,7 @@ public boolean isBufferResultSetToLocalTemp() { public static class JavaMethod implements Comparable { private final int id; private final Method method; - private final int dataType; + private final TypeInfo dataType; private boolean hasConnectionParam; private boolean varArgs; private Class varArgClass; @@ -347,7 +314,8 @@ public static class JavaMethod implements Comparable { } } Class returnClass = method.getReturnType(); - dataType = DataType.getTypeFromClass(returnClass); + dataType = ResultSet.class.isAssignableFrom(returnClass) ? null + : ValueToObjectConverter2.classToType(returnClass); } @Override @@ -373,13 +341,95 @@ public boolean hasConnectionParam() { * list * @return the value */ - public Value getValue(Session session, Expression[] args, - boolean columnList) { + public Value getValue(SessionLocal session, Expression[] args, boolean columnList) { + Object returnValue = execute(session, args, columnList); + if (Value.class.isAssignableFrom(method.getReturnType())) { + return (Value) returnValue; + } + return ValueToObjectConverter.objectToValue(session, returnValue, dataType.getValueType()) + .convertTo(dataType, session); + } + + /** + * Call the table user-defined function and return the value. + * + * @param session the session + * @param args the argument list + * @param columnList true if the function should only return the column + * list + * @return the value + */ + public ResultInterface getTableValue(SessionLocal session, Expression[] args, boolean columnList) { + Object o = execute(session, args, columnList); + if (o == null) { + throw DbException.get(ErrorCode.FUNCTION_MUST_RETURN_RESULT_SET_1, method.getName()); + } + if (ResultInterface.class.isAssignableFrom(method.getReturnType())) { + return (ResultInterface) o; + } + return resultSetToResult(session, (ResultSet) o, columnList ? 0 : Integer.MAX_VALUE); + } + + /** + * Create a result for the given result set. + * + * @param session the session + * @param rs the result set + * @param maxrows the maximum number of rows to read (0 to just read the + * meta data) + * @return the value + */ + public static ResultInterface resultSetToResult(SessionLocal session, ResultSet rs, int maxrows) { + try { + ResultSetMetaData meta = rs.getMetaData(); + int columnCount = meta.getColumnCount(); + Expression[] columns = new Expression[columnCount]; + for (int i = 0; i < columnCount; i++) { + String alias = meta.getColumnLabel(i + 1); + String name = meta.getColumnName(i + 1); + String columnTypeName = meta.getColumnTypeName(i + 1); + int columnType = DataType.convertSQLTypeToValueType(meta.getColumnType(i + 1), columnTypeName); + int precision = meta.getPrecision(i + 1); + int scale = meta.getScale(i + 1); + TypeInfo typeInfo; + if (columnType == Value.ARRAY && columnTypeName.endsWith(" ARRAY")) { + typeInfo = TypeInfo + .getTypeInfo(Value.ARRAY, -1L, 0, + TypeInfo.getTypeInfo(DataType.getTypeByName( + columnTypeName.substring(0, columnTypeName.length() - 6), + session.getMode()).type)); + } else { + typeInfo = TypeInfo.getTypeInfo(columnType, precision, scale, null); + } + Expression e = new ExpressionColumn(session.getDatabase(), new Column(name, typeInfo)); + if (!alias.equals(name)) { + e = new Alias(e, alias, false); + } + columns[i] = e; + } + LocalResult result = new LocalResult(session, columns, columnCount, columnCount); + for (int i = 0; i < maxrows && rs.next(); i++) { + Value[] list = new Value[columnCount]; + for (int j = 0; j < columnCount; j++) { + list[j] = ValueToObjectConverter.objectToValue(session, rs.getObject(j + 1), + columns[j].getType().getValueType()); + } + result.addRow(list); + } + result.done(); + return result; + } catch (SQLException e) { + throw DbException.convert(e); + } + } + + private Object execute(SessionLocal session, Expression[] args, boolean columnList) { Class[] paramClasses = method.getParameterTypes(); Object[] params = new Object[paramClasses.length]; int p = 0; + JdbcConnection conn = session.createConnection(columnList); if (hasConnectionParam && params.length > 0) { - params[p++] = session.createConnection(columnList); + params[p++] = conn; } // allocate array for varArgs parameters @@ -400,43 +450,29 @@ public Value getValue(Session session, Expression[] args, } else { paramClass = paramClasses[p]; } - int type = DataType.getTypeFromClass(paramClass); Value v = args[a].getValue(session); Object o; if (Value.class.isAssignableFrom(paramClass)) { o = v; - } else if (v.getValueType() == Value.ARRAY && - paramClass.isArray() && - paramClass.getComponentType() != Object.class) { - Value[] array = ((ValueArray) v).getList(); - Object[] objArray = (Object[]) Array.newInstance( - paramClass.getComponentType(), array.length); - int componentType = DataType.getTypeFromClass( - paramClass.getComponentType()); - Mode mode = session.getDatabase().getMode(); - for (int i = 0; i < objArray.length; i++) { - objArray[i] = array[i].convertTo(componentType, mode).getObject(); - } - o = objArray; } else { - v = v.convertTo(type, session.getDatabase().getMode()); - o = v.getObject(); - } - if (o == null) { - if (paramClass.isPrimitive()) { - if (columnList) { - // If the column list is requested, the parameters - // may be null. Need to set to default value, - // otherwise the function can't be called at all. - o = DataType.getDefaultForPrimitiveType(paramClass); + boolean primitive = paramClass.isPrimitive(); + if (v == ValueNull.INSTANCE) { + if (primitive) { + if (columnList) { + // If the column list is requested, the parameters + // may be null. Need to set to default value, + // otherwise the function can't be called at all. + o = DataType.getDefaultForPrimitiveType(paramClass); + } else { + // NULL for a java primitive: return NULL + return null; + } } else { - // NULL for a java primitive: return NULL - return ValueNull.INSTANCE; + o = null; } - } - } else { - if (!paramClass.isAssignableFrom(o.getClass()) && !paramClass.isPrimitive()) { - o = DataType.convertTo(session.createConnection(false), v, paramClass); + } else { + o = ValueToObjectConverter.valueToObject( + (Class) (primitive ? Utils.getNonPrimitiveClass(paramClass) : paramClass), v, conn); } } if (currentIsVarArg) { @@ -446,7 +482,7 @@ public Value getValue(Session session, Expression[] args, } } boolean old = session.getAutoCommit(); - Value identity = session.getLastScopeIdentity(); + Value identity = session.getLastIdentity(); boolean defaultConnection = session.getDatabase(). getSettings().defaultConnection; try { @@ -454,12 +490,11 @@ public Value getValue(Session session, Expression[] args, Object returnValue; try { if (defaultConnection) { - Driver.setDefaultConnection( - session.createConnection(columnList)); + Driver.setDefaultConnection(session.createConnection(columnList)); } returnValue = method.invoke(null, params); if (returnValue == null) { - return ValueNull.INSTANCE; + return null; } } catch (InvocationTargetException e) { StringBuilder builder = new StringBuilder(method.getName()).append('('); @@ -474,13 +509,9 @@ public Value getValue(Session session, Expression[] args, } catch (Exception e) { throw DbException.convert(e); } - if (Value.class.isAssignableFrom(method.getReturnType())) { - return (Value) returnValue; - } - Value ret = DataType.convertToValue(session, returnValue, dataType); - return ret.convertTo(dataType); + return returnValue; } finally { - session.setLastScopeIdentity(identity); + session.setLastIdentity(identity); session.setAutoCommit(old); if (defaultConnection) { Driver.setDefaultConnection(null); @@ -492,7 +523,14 @@ public Class[] getColumnClasses() { return method.getParameterTypes(); } - public int getDataType() { + /** + * Returns data type information for regular functions or {@code null} + * for table value functions. + * + * @return data type information for regular functions or {@code null} + * for table value functions + */ + public TypeInfo getDataType() { return dataType; } diff --git a/h2/src/main/org/h2/schema/InformationSchema.java b/h2/src/main/org/h2/schema/InformationSchema.java new file mode 100644 index 0000000000..a958166363 --- /dev/null +++ b/h2/src/main/org/h2/schema/InformationSchema.java @@ -0,0 +1,77 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.schema; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import org.h2.engine.Constants; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.engine.User; +import org.h2.table.InformationSchemaTable; +import org.h2.table.InformationSchemaTableLegacy; +import org.h2.table.Table; + +/** + * Information schema. + */ +public final class InformationSchema extends MetaSchema { + + private volatile HashMap newTables; + + private volatile HashMap oldTables; + + /** + * Creates new instance of information schema. + * + * @param database + * the database + * @param owner + * the owner of the schema (system user) + */ + public InformationSchema(Database database, User owner) { + super(database, Constants.INFORMATION_SCHEMA_ID, database.sysIdentifier("INFORMATION_SCHEMA"), owner); + } + + @Override + protected Map getMap(SessionLocal session) { + if (session == null) { + return Collections.emptyMap(); + } + boolean old = session.isOldInformationSchema(); + HashMap map = old ? oldTables : newTables; + if (map == null) { + map = fillMap(old); + } + return map; + } + + private synchronized HashMap fillMap(boolean old) { + HashMap map = old ? oldTables : newTables; + if (map == null) { + map = database.newStringMap(64); + if (old) { + for (int type = 0; type < InformationSchemaTableLegacy.META_TABLE_TYPE_COUNT; type++) { + InformationSchemaTableLegacy table = new InformationSchemaTableLegacy(this, + Constants.INFORMATION_SCHEMA_ID - type, type); + map.put(table.getName(), table); + } + oldTables = map; + } else { + for (int type = 0; type < InformationSchemaTable.META_TABLE_TYPE_COUNT; type++) { + InformationSchemaTable table = new InformationSchemaTable(this, + Constants.INFORMATION_SCHEMA_ID - type, type); + map.put(table.getName(), table); + } + newTables = map; + } + } + return map; + } + +} diff --git a/h2/src/main/org/h2/schema/MetaSchema.java b/h2/src/main/org/h2/schema/MetaSchema.java new file mode 100644 index 0000000000..867421ddc1 --- /dev/null +++ b/h2/src/main/org/h2/schema/MetaSchema.java @@ -0,0 +1,97 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.schema; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Map; + +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.engine.User; +import org.h2.table.Table; + +/** + * Meta data schema. + */ +public abstract class MetaSchema extends Schema { + + /** + * Creates a new instance of meta data schema. + * + * @param database + * the database + * @param id + * the object id + * @param schemaName + * the schema name + * @param owner + * the owner of the schema + */ + public MetaSchema(Database database, int id, String schemaName, User owner) { + super(database, id, schemaName, owner, true); + } + + @Override + public Table findTableOrView(SessionLocal session, String name) { + Map map = getMap(session); + Table table = map.get(name); + if (table != null) { + return table; + } + return super.findTableOrView(session, name); + } + + @Override + public Collection

          Command line options
          [-dump <fileName>]Dump the contends of the file
          [-info <fileName>]
          getAllTablesAndViews(SessionLocal session) { + Collection
          userTables = super.getAllTablesAndViews(session); + if (session == null) { + return userTables; + } + Collection
          systemTables = getMap(session).values(); + if (userTables.isEmpty()) { + return systemTables; + } + ArrayList
          list = new ArrayList<>(systemTables.size() + userTables.size()); + list.addAll(systemTables); + list.addAll(userTables); + return list; + } + + @Override + public Table getTableOrView(SessionLocal session, String name) { + Map map = getMap(session); + Table table = map.get(name); + if (table != null) { + return table; + } + return super.getTableOrView(session, name); + } + + @Override + public Table getTableOrViewByName(SessionLocal session, String name) { + Map map = getMap(session); + Table table = map.get(name); + if (table != null) { + return table; + } + return super.getTableOrViewByName(session, name); + } + + /** + * Returns map of tables in this schema. + * + * @param session the session + * @return map of tables in this schema + */ + protected abstract Map getMap(SessionLocal session); + + @Override + public boolean isEmpty() { + return false; + } + +} diff --git a/h2/src/main/org/h2/schema/Schema.java b/h2/src/main/org/h2/schema/Schema.java index eccfdebbaa..9002a5c8a9 100644 --- a/h2/src/main/org/h2/schema/Schema.java +++ b/h2/src/main/org/h2/schema/Schema.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.schema; @@ -17,42 +17,39 @@ import org.h2.constraint.Constraint; import org.h2.engine.Database; import org.h2.engine.DbObject; -import org.h2.engine.DbObjectBase; import org.h2.engine.DbSettings; -import org.h2.engine.FunctionAlias; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.RightOwner; +import org.h2.engine.SessionLocal; import org.h2.engine.SysProperties; -import org.h2.engine.User; import org.h2.index.Index; import org.h2.message.DbException; import org.h2.message.Trace; -import org.h2.mvstore.db.MVTableEngine; -import org.h2.table.PageStoreTable; +import org.h2.table.MetaTable; import org.h2.table.Table; import org.h2.table.TableLink; import org.h2.table.TableSynonym; -import org.h2.util.StringUtils; import org.h2.util.Utils; /** * A schema as created by the SQL statement * CREATE SCHEMA */ -public class Schema extends DbObjectBase { +public class Schema extends DbObject { - private User owner; + private RightOwner owner; private final boolean system; private ArrayList tableEngineParams; private final ConcurrentHashMap tablesAndViews; + private final ConcurrentHashMap domains; private final ConcurrentHashMap synonyms; private final ConcurrentHashMap indexes; private final ConcurrentHashMap sequences; private final ConcurrentHashMap triggers; private final ConcurrentHashMap constraints; private final ConcurrentHashMap constants; - private final ConcurrentHashMap functions; + private final ConcurrentHashMap functionsAndAggregates; /** * The set of returned unique names that are not yet stored. It is used to @@ -71,17 +68,17 @@ public class Schema extends DbObjectBase { * @param system if this is a system schema (such a schema can not be * dropped) */ - public Schema(Database database, int id, String schemaName, User owner, - boolean system) { + public Schema(Database database, int id, String schemaName, RightOwner owner, boolean system) { super(database, id, schemaName, Trace.SCHEMA); tablesAndViews = database.newConcurrentStringMap(); + domains = database.newConcurrentStringMap(); synonyms = database.newConcurrentStringMap(); indexes = database.newConcurrentStringMap(); sequences = database.newConcurrentStringMap(); triggers = database.newConcurrentStringMap(); constraints = database.newConcurrentStringMap(); constants = database.newConcurrentStringMap(); - functions = database.newConcurrentStringMap(); + functionsAndAggregates = database.newConcurrentStringMap(); this.owner = owner; this.system = system; } @@ -97,12 +94,7 @@ public boolean canDrop() { @Override public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(toString()); - } - - @Override - public String getDropSQL() { - return null; + throw DbException.getInternalError(toString()); } @Override @@ -111,8 +103,8 @@ public String getCreateSQL() { return null; } StringBuilder builder = new StringBuilder("CREATE SCHEMA IF NOT EXISTS "); - getSQL(builder, true).append(" AUTHORIZATION "); - owner.getSQL(builder, true); + getSQL(builder, DEFAULT_SQL_FLAGS).append(" AUTHORIZATION "); + owner.getSQL(builder, DEFAULT_SQL_FLAGS); return builder.toString(); } @@ -127,8 +119,9 @@ public int getType() { * @return {@code true} if this schema is empty, {@code false} otherwise */ public boolean isEmpty() { - return tablesAndViews.isEmpty() && synonyms.isEmpty() && indexes.isEmpty() && sequences.isEmpty() - && triggers.isEmpty() && constraints.isEmpty() && constants.isEmpty() && functions.isEmpty(); + return tablesAndViews.isEmpty() && domains.isEmpty() && synonyms.isEmpty() && indexes.isEmpty() + && sequences.isEmpty() && triggers.isEmpty() && constraints.isEmpty() && constants.isEmpty() + && functionsAndAggregates.isEmpty(); } @Override @@ -144,51 +137,39 @@ public ArrayList getChildren() { } @Override - public void removeChildrenAndResources(Session session) { - while (triggers != null && triggers.size() > 0) { - TriggerObject obj = (TriggerObject) triggers.values().toArray()[0]; - database.removeSchemaObject(session, obj); - } - while (constraints != null && constraints.size() > 0) { - Constraint obj = (Constraint) constraints.values().toArray()[0]; - database.removeSchemaObject(session, obj); - } + public void removeChildrenAndResources(SessionLocal session) { + removeChildrenFromMap(session, triggers); + removeChildrenFromMap(session, constraints); // There can be dependencies between tables e.g. using computed columns, // so we might need to loop over them multiple times. - boolean runLoopAgain = false; - do { - runLoopAgain = false; - if (tablesAndViews != null) { - // Loop over a copy because the map is modified underneath us. - for (Table obj : new ArrayList<>(tablesAndViews.values())) { - // Check for null because multiple tables might be deleted - // in one go underneath us. - if (obj.getName() != null) { - if (database.getDependentTable(obj, obj) == null) { - database.removeSchemaObject(session, obj); - } else { - runLoopAgain = true; - } + boolean modified = true; + while (!tablesAndViews.isEmpty()) { + boolean newModified = false; + for (Table obj : tablesAndViews.values()) { + if (obj.getName() != null) { + // Database.removeSchemaObject() removes the object from + // the map too, but it is safe for ConcurrentHashMap. + Table dependentTable = database.getDependentTable(obj, obj); + if (dependentTable == null) { + database.removeSchemaObject(session, obj); + newModified = true; + } else if (dependentTable.getSchema() != this) { + throw DbException.get(ErrorCode.CANNOT_DROP_2, // + obj.getTraceSQL(), dependentTable.getTraceSQL()); + } else if (!modified) { + dependentTable.removeColumnExpressionsDependencies(session); + dependentTable.setModified(); + database.updateMeta(session, dependentTable); } } } - } while (runLoopAgain); - while (indexes != null && indexes.size() > 0) { - Index obj = (Index) indexes.values().toArray()[0]; - database.removeSchemaObject(session, obj); - } - while (sequences != null && sequences.size() > 0) { - Sequence obj = (Sequence) sequences.values().toArray()[0]; - database.removeSchemaObject(session, obj); - } - while (constants != null && constants.size() > 0) { - Constant obj = (Constant) constants.values().toArray()[0]; - database.removeSchemaObject(session, obj); - } - while (functions != null && functions.size() > 0) { - FunctionAlias obj = (FunctionAlias) functions.values().toArray()[0]; - database.removeSchemaObject(session, obj); + modified = newModified; } + removeChildrenFromMap(session, domains); + removeChildrenFromMap(session, indexes); + removeChildrenFromMap(session, sequences); + removeChildrenFromMap(session, constants); + removeChildrenFromMap(session, functionsAndAggregates); for (Right right : database.getAllRights()) { if (right.getGrantedObject() == this) { database.removeDatabaseObject(session, right); @@ -199,9 +180,21 @@ public void removeChildrenAndResources(Session session) { invalidate(); } - @Override - public void checkRename() { - // ok + private void removeChildrenFromMap(SessionLocal session, ConcurrentHashMap map) { + if (!map.isEmpty()) { + for (SchemaObject obj : map.values()) { + /* + * Referential constraints are dropped when unique or PK + * constraint is dropped, but iterator may return already + * removed objects in some cases. + */ + if (obj.isValid()) { + // Database.removeSchemaObject() removes the object from + // the map too, but it is safe for ConcurrentHashMap. + database.removeSchemaObject(session, obj); + } + } + } } /** @@ -209,7 +202,7 @@ public void checkRename() { * * @return the owner */ - public User getOwner() { + public RightOwner getOwner() { return owner; } @@ -237,6 +230,9 @@ private Map getMap(int type) { case DbObject.TABLE_OR_VIEW: result = tablesAndViews; break; + case DbObject.DOMAIN: + result = domains; + break; case DbObject.SYNONYM: result = synonyms; break; @@ -256,10 +252,11 @@ private Map getMap(int type) { result = constants; break; case DbObject.FUNCTION_ALIAS: - result = functions; + case DbObject.AGGREGATE: + result = functionsAndAggregates; break; default: - throw DbException.throwInternalError("type=" + type); + throw DbException.getInternalError("type=" + type); } return (Map) result; } @@ -273,14 +270,13 @@ private Map getMap(int type) { */ public void add(SchemaObject obj) { if (obj.getSchema() != this) { - DbException.throwInternalError("wrong schema"); + throw DbException.getInternalError("wrong schema"); } String name = obj.getName(); Map map = getMap(obj.getType()); - if (SysProperties.CHECK && map.get(name) != null) { - DbException.throwInternalError("object already exists: " + name); + if (map.putIfAbsent(name, obj) != null) { + throw DbException.getInternalError("object already exists: " + name); } - map.put(name, obj); freeUniqueName(name); } @@ -294,11 +290,11 @@ public void rename(SchemaObject obj, String newName) { int type = obj.getType(); Map map = getMap(type); if (SysProperties.CHECK) { - if (!map.containsKey(obj.getName())) { - DbException.throwInternalError("not found: " + obj.getName()); + if (!map.containsKey(obj.getName()) && !(obj instanceof MetaTable)) { + throw DbException.getInternalError("not found: " + obj.getName()); } if (obj.getName().equals(newName) || map.containsKey(newName)) { - DbException.throwInternalError("object already exists: " + newName); + throw DbException.getInternalError("object already exists: " + newName); } } obj.checkRename(); @@ -318,7 +314,7 @@ public void rename(SchemaObject obj, String newName) { * @param name the object name * @return the object or null */ - public Table findTableOrView(Session session, String name) { + public Table findTableOrView(SessionLocal session, String name) { Table table = tablesAndViews.get(name); if (table == null && session != null) { table = session.findLocalTempTable(name); @@ -336,7 +332,7 @@ public Table findTableOrView(Session session, String name) { * @param name the object name * @return the object or null */ - public Table resolveTableOrView(Session session, String name) { + public Table resolveTableOrView(SessionLocal session, String name) { Table table = findTableOrView(session, name); if (table == null) { TableSynonym synonym = synonyms.get(name); @@ -358,6 +354,16 @@ public TableSynonym getSynonym(String name) { return synonyms.get(name); } + /** + * Get the domain if it exists, or null if not. + * + * @param name the name of the domain + * @return the domain or null + */ + public Domain findDomain(String name) { + return domains.get(name); + } + /** * Try to find an index with this name. This method returns null if * no object with this name exists. @@ -366,7 +372,7 @@ public TableSynonym getSynonym(String name) { * @param name the object name * @return the object or null */ - public Index findIndex(Session session, String name) { + public Index findIndex(SessionLocal session, String name) { Index index = indexes.get(name); if (index == null) { index = session.findLocalTempTableIndex(name); @@ -404,7 +410,7 @@ public Sequence findSequence(String sequenceName) { * @param name the object name * @return the object or null */ - public Constraint findConstraint(Session session, String name) { + public Constraint findConstraint(SessionLocal session, String name) { Constraint constraint = constraints.get(name); if (constraint == null) { constraint = session.findLocalTempTableConstraint(name); @@ -431,7 +437,46 @@ public Constant findConstant(String constantName) { * @return the object or null */ public FunctionAlias findFunction(String functionAlias) { - return functions.get(functionAlias); + UserDefinedFunction userDefinedFunction = findFunctionOrAggregate(functionAlias); + return userDefinedFunction instanceof FunctionAlias ? (FunctionAlias) userDefinedFunction : null; + } + + /** + * Get the user defined aggregate function if it exists. This method returns + * null if no object with this name exists. + * + * @param name the name of the user defined aggregate function + * @return the aggregate function or null + */ + public UserAggregate findAggregate(String name) { + UserDefinedFunction userDefinedFunction = findFunctionOrAggregate(name); + return userDefinedFunction instanceof UserAggregate ? (UserAggregate) userDefinedFunction : null; + } + + /** + * Try to find a user defined function or aggregate function with the + * specified name. This method returns null if no object with this name + * exists. + * + * @param name + * the object name + * @return the object or null + */ + public UserDefinedFunction findFunctionOrAggregate(String name) { + return functionsAndAggregates.get(name); + } + + /** + * Reserve a unique object name. + * + * @param name the object name + */ + public void reserveUniqueName(String name) { + if (name != null) { + synchronized (temporaryUniqueNames) { + temporaryUniqueNames.add(name); + } + } } /** @@ -447,30 +492,26 @@ public void freeUniqueName(String name) { } } - private String getUniqueName(DbObject obj, - Map map, String prefix) { - String hash = StringUtils.toUpperEnglish(Integer.toHexString(obj.getName().hashCode())); - String name = null; + private String getUniqueName(DbObject obj, Map map, String prefix) { + StringBuilder nameBuilder = new StringBuilder(prefix); + String hash = Integer.toHexString(obj.getName().hashCode()); synchronized (temporaryUniqueNames) { - for (int i = 1, len = hash.length(); i < len; i++) { - name = prefix + hash.substring(0, i); - if (!map.containsKey(name) && !temporaryUniqueNames.contains(name)) { - break; + for (int i = 0, len = hash.length(); i < len; i++) { + char c = hash.charAt(i); + String name = nameBuilder.append(c >= 'a' ? (char) (c - 0x20) : c).toString(); + if (!map.containsKey(name) && temporaryUniqueNames.add(name)) { + return name; } - name = null; } - if (name == null) { - prefix = prefix + hash + "_"; - for (int i = 0;; i++) { - name = prefix + i; - if (!map.containsKey(name) && !temporaryUniqueNames.contains(name)) { - break; - } + int nameLength = nameBuilder.append('_').length(); + for (int i = 0;; i++) { + String name = nameBuilder.append(i).toString(); + if (!map.containsKey(name) && temporaryUniqueNames.add(name)) { + return name; } + nameBuilder.setLength(nameLength); } - temporaryUniqueNames.add(name); } - return name; } /** @@ -480,7 +521,7 @@ private String getUniqueName(DbObject obj, * @param table the constraint table * @return the unique name */ - public String getUniqueConstraintName(Session session, Table table) { + public String getUniqueConstraintName(SessionLocal session, Table table) { Map tableConstraints; if (table.isTemporary() && !table.isGlobalTemporary()) { tableConstraints = session.getLocalTempTableConstraints(); @@ -490,6 +531,17 @@ public String getUniqueConstraintName(Session session, Table table) { return getUniqueName(table, tableConstraints, "CONSTRAINT_"); } + /** + * Create a unique constraint name. + * + * @param session the session + * @param domain the constraint domain + * @return the unique name + */ + public String getUniqueDomainConstraintName(SessionLocal session, Domain domain) { + return getUniqueName(domain, constraints, "CONSTRAINT_"); + } + /** * Create a unique index name. * @@ -498,7 +550,7 @@ public String getUniqueConstraintName(Session session, Table table) { * @param prefix the index name prefix * @return the unique name */ - public String getUniqueIndexName(Session session, Table table, String prefix) { + public String getUniqueIndexName(SessionLocal session, Table table, String prefix) { Map tableIndexes; if (table.isTemporary() && !table.isGlobalTemporary()) { tableIndexes = session.getLocalTempTableIndexes(); @@ -517,7 +569,7 @@ public String getUniqueIndexName(Session session, Table table, String prefix) { * @return the table or view * @throws DbException if no such object exists */ - public Table getTableOrView(Session session, String name) { + public Table getTableOrView(SessionLocal session, String name) { Table table = tablesAndViews.get(name); if (table == null) { if (session != null) { @@ -530,6 +582,21 @@ public Table getTableOrView(Session session, String name) { return table; } + /** + * Get the domain with the given name. + * + * @param name the domain name + * @return the domain + * @throws DbException if no such object exists + */ + public Domain getDomain(String name) { + Domain domain = domains.get(name); + if (domain == null) { + throw DbException.get(ErrorCode.DOMAIN_NOT_FOUND_1, name); + } + return domain; + } + /** * Get the index with the given name. * @@ -604,13 +671,14 @@ public ArrayList getAll(ArrayList addTo) { addTo = Utils.newSmallArrayList(); } addTo.addAll(tablesAndViews.values()); + addTo.addAll(domains.values()); addTo.addAll(synonyms.values()); addTo.addAll(sequences.values()); addTo.addAll(indexes.values()); addTo.addAll(triggers.values()); addTo.addAll(constraints.values()); addTo.addAll(constants.values()); - addTo.addAll(functions.values()); + addTo.addAll(functionsAndAggregates.values()); return addTo; } @@ -620,49 +688,63 @@ public ArrayList getAll(ArrayList addTo) { * @param type * the object type * @param addTo - * list to add objects to, or {@code null} to allocate a new - * list - * @return the specified list with added objects, or a new (possibly empty) list - * with objects of the given type + * list to add objects to */ - public ArrayList getAll(int type, ArrayList addTo) { - Collection values = getMap(type).values(); - if (addTo != null) { - addTo.addAll(values); - } else { - addTo = new ArrayList<>(values); - } - return addTo; + public void getAll(int type, ArrayList addTo) { + addTo.addAll(getMap(type).values()); + } + + public Collection getAllDomains() { + return domains.values(); + } + + public Collection getAllConstraints() { + return constraints.values(); + } + + public Collection getAllConstants() { + return constants.values(); + } + + public Collection getAllSequences() { + return sequences.values(); + } + + public Collection getAllTriggers() { + return triggers.values(); } /** * Get all tables and views. * + * @param session the session, {@code null} to exclude meta tables * @return a (possible empty) list of all objects */ - public ArrayList
          getAllTablesAndViews() { - synchronized (database) { - return new ArrayList<>(tablesAndViews.values()); - } + public Collection
          getAllTablesAndViews(SessionLocal session) { + return tablesAndViews.values(); } + public Collection getAllIndexes() { + return indexes.values(); + } - public ArrayList getAllSynonyms() { - synchronized (database) { - return new ArrayList<>(synonyms.values()); - } + public Collection getAllSynonyms() { + return synonyms.values(); + } + + public Collection getAllFunctionsAndAggregates() { + return functionsAndAggregates.values(); } /** * Get the table with the given name, if any. * + * @param session the session * @param name the table name * @return the table or null if not found */ - public Table getTableOrViewByName(String name) { - synchronized (database) { - return tablesAndViews.get(name); - } + public Table getTableOrViewByName(SessionLocal session, String name) { + return tablesAndViews.get(name); } /** @@ -674,7 +756,7 @@ public void remove(SchemaObject obj) { String objName = obj.getName(); Map map = getMap(obj.getType()); if (map.remove(objName) == null) { - DbException.throwInternalError("not found: " + objName); + throw DbException.getInternalError("not found: " + objName); } freeUniqueName(objName); } @@ -691,21 +773,19 @@ public Table createTable(CreateTableData data) { database.lockMeta(data.session); } data.schema = this; - if (data.tableEngine == null) { + String tableEngine = data.tableEngine; + if (tableEngine == null) { DbSettings s = database.getSettings(); - if (s.defaultTableEngine != null) { - data.tableEngine = s.defaultTableEngine; - } else if (s.mvStore) { - data.tableEngine = MVTableEngine.class.getName(); + tableEngine = s.defaultTableEngine; + if (tableEngine == null) { + return database.getStore().createTable(data); } + data.tableEngine = tableEngine; } - if (data.tableEngine != null) { - if (data.tableEngineParams == null) { - data.tableEngineParams = this.tableEngineParams; - } - return database.getTableEngine(data.tableEngine).createTable(data); + if (data.tableEngineParams == null) { + data.tableEngineParams = this.tableEngineParams; } - return new PageStoreTable(data); + return database.getTableEngine(tableEngine).createTable(data); } } diff --git a/h2/src/main/org/h2/schema/SchemaObject.java b/h2/src/main/org/h2/schema/SchemaObject.java index 5e32fcf9fa..f777d038cf 100644 --- a/h2/src/main/org/h2/schema/SchemaObject.java +++ b/h2/src/main/org/h2/schema/SchemaObject.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.schema; @@ -10,14 +10,42 @@ /** * Any database object that is stored in a schema. */ -public interface SchemaObject extends DbObject { +public abstract class SchemaObject extends DbObject { + + private final Schema schema; + + /** + * Initialize some attributes of this object. + * + * @param newSchema the schema + * @param id the object id + * @param name the name + * @param traceModuleId the trace module id + */ + protected SchemaObject(Schema newSchema, int id, String name, int traceModuleId) { + super(newSchema.getDatabase(), id, name, traceModuleId); + this.schema = newSchema; + } /** * Get the schema in which this object is defined * * @return the schema */ - Schema getSchema(); + public final Schema getSchema() { + return schema; + } + + @Override + public String getSQL(int sqlFlags) { + return getSQL(new StringBuilder(), sqlFlags).toString(); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + schema.getSQL(builder, sqlFlags).append('.'); + return super.getSQL(builder, sqlFlags); + } /** * Check whether this is a hidden object that doesn't appear in the meta @@ -25,6 +53,8 @@ public interface SchemaObject extends DbObject { * * @return true if it is hidden */ - boolean isHidden(); + public boolean isHidden() { + return false; + } } diff --git a/h2/src/main/org/h2/schema/SchemaObjectBase.java b/h2/src/main/org/h2/schema/SchemaObjectBase.java deleted file mode 100644 index 9800ba2544..0000000000 --- a/h2/src/main/org/h2/schema/SchemaObjectBase.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.schema; - -import org.h2.engine.DbObjectBase; - -/** - * The base class for classes implementing SchemaObject. - */ -public abstract class SchemaObjectBase extends DbObjectBase implements - SchemaObject { - - private final Schema schema; - - /** - * Initialize some attributes of this object. - * - * @param newSchema the schema - * @param id the object id - * @param name the name - * @param traceModuleId the trace module id - */ - protected SchemaObjectBase(Schema newSchema, int id, String name, - int traceModuleId) { - super(newSchema.getDatabase(), id, name, traceModuleId); - this.schema = newSchema; - } - - @Override - public Schema getSchema() { - return schema; - } - - @Override - public String getSQL(boolean alwaysQuote) { - return getSQL(new StringBuilder(), alwaysQuote).toString(); - } - - @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - schema.getSQL(builder, alwaysQuote).append('.'); - return super.getSQL(builder, alwaysQuote); - } - - @Override - public boolean isHidden() { - return false; - } - -} diff --git a/h2/src/main/org/h2/schema/Sequence.java b/h2/src/main/org/h2/schema/Sequence.java index 6162066b97..f21b918132 100644 --- a/h2/src/main/org/h2/schema/Sequence.java +++ b/h2/src/main/org/h2/schema/Sequence.java @@ -1,151 +1,266 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.schema; import org.h2.api.ErrorCode; +import org.h2.command.ddl.SequenceOptions; import org.h2.engine.DbObject; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.message.Trace; import org.h2.table.Table; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; /** * A sequence is created using the statement * CREATE SEQUENCE */ -public class Sequence extends SchemaObjectBase { +public final class Sequence extends SchemaObject { + + /** + * CYCLE clause and sequence state. + */ + public enum Cycle { + + /** + * Sequence is cycled. + */ + CYCLE, + + /** + * Sequence is not cycled and isn't exhausted yet. + */ + NO_CYCLE, + + /** + * Sequence is not cycled and was already exhausted. + */ + EXHAUSTED; + + /** + * Return whether sequence is cycled. + * + * @return {@code true} if sequence is cycled, {@code false} if sequence + * is not cycled + */ + public boolean isCycle() { + return this == CYCLE; + } + + } /** * The default cache size for sequences. */ public static final int DEFAULT_CACHE_SIZE = 32; - private long value; - private long valueWithMargin; + private long baseValue; + private long margin; + + private TypeInfo dataType; + private long increment; private long cacheSize; + private long startValue; private long minValue; private long maxValue; - private boolean cycle; + private Cycle cycle; private boolean belongsToTable; private boolean writeWithMargin; - /** - * Creates a new sequence for an auto-increment column. - * - * @param schema the schema - * @param id the object id - * @param name the sequence name - * @param startValue the first value to return - * @param increment the increment count - */ - public Sequence(Schema schema, int id, String name, long startValue, - long increment) { - this(schema, id, name, startValue, increment, null, null, null, false, - true); - } - /** * Creates a new sequence. * - * @param schema the schema - * @param id the object id - * @param name the sequence name - * @param startValue the first value to return - * @param increment the increment count - * @param cacheSize the number of entries to pre-fetch - * @param minValue the minimum value - * @param maxValue the maximum value - * @param cycle whether to jump back to the min value if needed - * @param belongsToTable whether this sequence belongs to a table (for - * auto-increment columns) + * @param session + * the session + * @param schema + * the schema + * @param id + * the object id + * @param name + * the sequence name + * @param options + * the sequence options + * @param belongsToTable + * whether this sequence belongs to a table (for generated + * columns) */ - public Sequence(Schema schema, int id, String name, Long startValue, - Long increment, Long cacheSize, Long minValue, Long maxValue, - boolean cycle, boolean belongsToTable) { + public Sequence(SessionLocal session, Schema schema, int id, String name, SequenceOptions options, + boolean belongsToTable) { super(schema, id, name, Trace.SEQUENCE); - this.increment = increment != null ? - increment : 1; - this.minValue = minValue != null ? - minValue : getDefaultMinValue(startValue, this.increment); - this.maxValue = maxValue != null ? - maxValue : getDefaultMaxValue(startValue, this.increment); - this.value = startValue != null ? - startValue : getDefaultStartValue(this.increment); - this.valueWithMargin = value; - this.cacheSize = cacheSize != null ? - Math.max(1, cacheSize) : DEFAULT_CACHE_SIZE; + dataType = options.getDataType(); + if (dataType == null) { + options.setDataType(dataType = session.getMode().decimalSequences ? TypeInfo.TYPE_NUMERIC_BIGINT + : TypeInfo.TYPE_BIGINT); + } + long bounds[] = options.getBounds(); + Long t = options.getIncrement(session); + long increment = t != null ? t : 1; + Long start = options.getStartValue(session); + Long min = options.getMinValue(null, session); + Long max = options.getMaxValue(null, session); + long minValue = min != null ? min : getDefaultMinValue(start, increment, bounds); + long maxValue = max != null ? max : getDefaultMaxValue(start, increment, bounds); + long startValue = start != null ? start : increment >= 0 ? minValue : maxValue; + Long restart = options.getRestartValue(session, startValue); + long baseValue = restart != null ? restart : startValue; + t = options.getCacheSize(session); + long cacheSize; + boolean mayAdjustCacheSize; + if (t != null) { + cacheSize = t; + mayAdjustCacheSize = false; + } else { + cacheSize = DEFAULT_CACHE_SIZE; + mayAdjustCacheSize = true; + } + cacheSize = checkOptions(baseValue, startValue, minValue, maxValue, increment, cacheSize, mayAdjustCacheSize); + Cycle cycle = options.getCycle(); + if (cycle == null) { + cycle = Cycle.NO_CYCLE; + } else if (cycle == Cycle.EXHAUSTED) { + baseValue = startValue; + } + this.margin = this.baseValue = baseValue; + this.increment = increment; + this.cacheSize = cacheSize; + this.startValue = startValue; + this.minValue = minValue; + this.maxValue = maxValue; this.cycle = cycle; this.belongsToTable = belongsToTable; - if (!isValid(this.value, this.minValue, this.maxValue, this.increment)) { - throw DbException.get(ErrorCode.SEQUENCE_ATTRIBUTES_INVALID, name, - Long.toString(this.value), Long.toString(this.minValue), - Long.toString(this.maxValue), - Long.toString(this.increment)); - } } /** - * Allows the start value, increment, min value and max value to be updated - * atomically, including atomic validation. Useful because setting these - * attributes one after the other could otherwise result in an invalid - * sequence state (e.g. min value > max value, start value < min value, - * etc). - * - * @param startValue the new start value (null if no change) - * @param minValue the new min value (null if no change) - * @param maxValue the new max value (null if no change) - * @param increment the new increment (null if no change) + * Allows the base value, start value, min value, max value, increment and + * cache size to be updated atomically, including atomic validation. Useful + * because setting these attributes one after the other could otherwise + * result in an invalid sequence state (e.g. min value > max value, start + * value < min value, etc). + * @param baseValue + * the base value ({@code null} if restart is not requested) + * @param startValue + * the new start value ({@code null} if no change) + * @param minValue + * the new min value ({@code null} if no change) + * @param maxValue + * the new max value ({@code null} if no change) + * @param increment + * the new increment ({@code null} if no change) + * @param cycle + * the new cycle value, or {@code null} if no change + * @param cacheSize + * the new cache size ({@code null} if no change) */ - public synchronized void modify(Long startValue, Long minValue, - Long maxValue, Long increment) { - if (startValue == null) { - startValue = this.value; - } - if (minValue == null) { - minValue = this.minValue; - } - if (maxValue == null) { - maxValue = this.maxValue; - } - if (increment == null) { - increment = this.increment; + public synchronized void modify(Long baseValue, Long startValue, Long minValue, Long maxValue, Long increment, + Cycle cycle, Long cacheSize) { + long baseValueAsLong = baseValue != null ? baseValue : this.baseValue; + long startValueAsLong = startValue != null ? startValue : this.startValue; + long minValueAsLong = minValue != null ? minValue : this.minValue; + long maxValueAsLong = maxValue != null ? maxValue : this.maxValue; + long incrementAsLong = increment != null ? increment : this.increment; + long cacheSizeAsLong; + boolean mayAdjustCacheSize; + if (cacheSize != null) { + cacheSizeAsLong = cacheSize; + mayAdjustCacheSize = false; + } else { + cacheSizeAsLong = this.cacheSize; + mayAdjustCacheSize = true; } - if (!isValid(startValue, minValue, maxValue, increment)) { - throw DbException.get(ErrorCode.SEQUENCE_ATTRIBUTES_INVALID, - getName(), String.valueOf(startValue), - String.valueOf(minValue), - String.valueOf(maxValue), - String.valueOf(increment)); + cacheSizeAsLong = checkOptions(baseValueAsLong, startValueAsLong, minValueAsLong, maxValueAsLong, + incrementAsLong, cacheSizeAsLong, mayAdjustCacheSize); + if (cycle == null) { + cycle = this.cycle; + if (cycle == Cycle.EXHAUSTED && baseValue != null) { + cycle = Cycle.NO_CYCLE; + } + } else if (cycle == Cycle.EXHAUSTED) { + baseValueAsLong = startValueAsLong; } - this.value = startValue; - this.valueWithMargin = startValue; - this.minValue = minValue; - this.maxValue = maxValue; - this.increment = increment; + this.margin = this.baseValue = baseValueAsLong; + this.startValue = startValueAsLong; + this.minValue = minValueAsLong; + this.maxValue = maxValueAsLong; + this.increment = incrementAsLong; + this.cacheSize = cacheSizeAsLong; + this.cycle = cycle; } /** - * Validates the specified prospective start value, min value, max value and - * increment relative to each other, since each of their respective - * validities are contingent on the values of the other parameters. + * Validates the specified prospective base value, start value, min value, + * max value, increment, and cache size relative to each other, since each + * of their respective validities are contingent on the values of the other + * parameters. * - * @param value the prospective start value - * @param minValue the prospective min value - * @param maxValue the prospective max value - * @param increment the prospective increment + * @param baseValue + * the prospective base value + * @param startValue + * the prospective start value + * @param minValue + * the prospective min value + * @param maxValue + * the prospective max value + * @param increment + * the prospective increment + * @param cacheSize + * the prospective cache size + * @param mayAdjustCacheSize + * whether cache size may be adjusted, cache size 0 is adjusted + * unconditionally to 1 + * @return the prospective or adjusted cache size */ - private static boolean isValid(long value, long minValue, long maxValue, long increment) { - return minValue <= value && - maxValue >= value && - maxValue > minValue && - increment != 0 && - // Math.abs(increment) <= maxValue - minValue - // Can use Long.compareUnsigned() on Java 8 - Math.abs(increment) + Long.MIN_VALUE <= maxValue - minValue + Long.MIN_VALUE; + private long checkOptions(long baseValue, long startValue, long minValue, long maxValue, long increment, + long cacheSize, boolean mayAdjustCacheSize) { + if (minValue <= baseValue && baseValue <= maxValue // + && minValue <= startValue && startValue <= maxValue // + && minValue < maxValue && increment != 0L) { + long range = maxValue - minValue; + if (Long.compareUnsigned(Math.abs(increment), range) <= 0 && cacheSize >= 0L) { + if (cacheSize <= 1L) { + return 1L; + } + long maxCacheSize = getMaxCacheSize(range, increment); + if (cacheSize <= maxCacheSize) { + return cacheSize; + } + if (mayAdjustCacheSize) { + return maxCacheSize; + } + } + } + throw DbException.get(ErrorCode.SEQUENCE_ATTRIBUTES_INVALID_7, getName(), Long.toString(baseValue), + Long.toString(startValue), Long.toString(minValue), Long.toString(maxValue), Long.toString(increment), + Long.toString(cacheSize)); + } + + private static long getMaxCacheSize(long range, long increment) { + if (increment > 0L) { + if (range < 0) { + range = Long.MAX_VALUE; + } else { + range += increment; + if (range < 0) { + range = Long.MAX_VALUE; + } + } + } else { + range = -range; + if (range > 0) { + range = Long.MIN_VALUE; + } else { + range += increment; + if (range >= 0) { + range = Long.MIN_VALUE; + } + } + } + return range / increment; } /** @@ -153,10 +268,11 @@ private static boolean isValid(long value, long minValue, long maxValue, long in * * @param startValue the start value of the sequence. * @param increment the increment of the sequence value. + * @param bounds min and max bounds of data type of the sequence * @return min value. */ - public static long getDefaultMinValue(Long startValue, long increment) { - long v = increment >= 0 ? 1 : Long.MIN_VALUE; + public static long getDefaultMinValue(Long startValue, long increment, long[] bounds) { + long v = increment >= 0 ? 1 : bounds[0]; if (startValue != null && increment >= 0 && startValue < v) { v = startValue; } @@ -168,28 +284,51 @@ public static long getDefaultMinValue(Long startValue, long increment) { * * @param startValue the start value of the sequence. * @param increment the increment of the sequence value. + * @param bounds min and max bounds of data type of the sequence * @return min value. */ - public static long getDefaultMaxValue(Long startValue, long increment) { - long v = increment >= 0 ? Long.MAX_VALUE : -1; + public static long getDefaultMaxValue(Long startValue, long increment, long[] bounds) { + long v = increment >= 0 ? bounds[1] : -1; if (startValue != null && increment < 0 && startValue > v) { v = startValue; } return v; } - private long getDefaultStartValue(long increment) { - return increment >= 0 ? minValue : maxValue; - } - public boolean getBelongsToTable() { return belongsToTable; } + public TypeInfo getDataType() { + return dataType; + } + + public int getEffectivePrecision() { + TypeInfo dataType = this.dataType; + switch (dataType.getValueType()) { + case Value.NUMERIC: { + int p = (int) dataType.getPrecision(); + int s = dataType.getScale(); + if (p - s > ValueBigint.DECIMAL_PRECISION) { + return ValueBigint.DECIMAL_PRECISION + s; + } + return p; + } + case Value.DECFLOAT: + return Math.min((int) dataType.getPrecision(), ValueBigint.DECIMAL_PRECISION); + default: + return (int) dataType.getPrecision(); + } + } + public long getIncrement() { return increment; } + public long getStartValue() { + return startValue; + } + public long getMinValue() { return minValue; } @@ -198,94 +337,173 @@ public long getMaxValue() { return maxValue; } - public boolean getCycle() { + public Cycle getCycle() { return cycle; } - public void setCycle(boolean cycle) { - this.cycle = cycle; - } - @Override public String getDropSQL() { if (getBelongsToTable()) { return null; } StringBuilder builder = new StringBuilder("DROP SEQUENCE IF EXISTS "); - return getSQL(builder, true).toString(); + return getSQL(builder, DEFAULT_SQL_FLAGS).toString(); } @Override public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(toString()); + throw DbException.getInternalError(toString()); } @Override - public synchronized String getCreateSQL() { - long v = writeWithMargin ? valueWithMargin : value; - StringBuilder buff = new StringBuilder("CREATE SEQUENCE "); - getSQL(buff, true).append(" START WITH ").append(v); + public String getCreateSQL() { + StringBuilder builder = getSQL(new StringBuilder("CREATE SEQUENCE "), DEFAULT_SQL_FLAGS); + if (dataType.getValueType() != Value.BIGINT) { + dataType.getSQL(builder.append(" AS "), DEFAULT_SQL_FLAGS); + } + builder.append(' '); + synchronized (this) { + getSequenceOptionsSQL(builder, writeWithMargin ? margin : baseValue); + } + if (belongsToTable) { + builder.append(" BELONGS_TO_TABLE"); + } + return builder.toString(); + } + + /** + * Append the options part of the SQL statement to create the sequence. + * + * @param builder the builder + * @return the builder + */ + public synchronized StringBuilder getSequenceOptionsSQL(StringBuilder builder) { + return getSequenceOptionsSQL(builder, baseValue); + } + + private StringBuilder getSequenceOptionsSQL(StringBuilder builder, long value) { + builder.append("START WITH ").append(startValue); + if (value != startValue && cycle != Cycle.EXHAUSTED) { + builder.append(" RESTART WITH ").append(value); + } if (increment != 1) { - buff.append(" INCREMENT BY ").append(increment); + builder.append(" INCREMENT BY ").append(increment); } - if (minValue != getDefaultMinValue(v, increment)) { - buff.append(" MINVALUE ").append(minValue); + long[] bounds = SequenceOptions.getBounds(dataType); + if (minValue != getDefaultMinValue(value, increment, bounds)) { + builder.append(" MINVALUE ").append(minValue); } - if (maxValue != getDefaultMaxValue(v, increment)) { - buff.append(" MAXVALUE ").append(maxValue); + if (maxValue != getDefaultMaxValue(value, increment, bounds)) { + builder.append(" MAXVALUE ").append(maxValue); } - if (cycle) { - buff.append(" CYCLE"); + if (cycle == Cycle.CYCLE) { + builder.append(" CYCLE"); + } else if (cycle == Cycle.EXHAUSTED) { + builder.append(" EXHAUSTED"); } if (cacheSize != DEFAULT_CACHE_SIZE) { - buff.append(" CACHE ").append(cacheSize); - } - if (belongsToTable) { - buff.append(" BELONGS_TO_TABLE"); + if (cacheSize == 1) { + builder.append(" NO CACHE"); + } else if (cacheSize > DEFAULT_CACHE_SIZE // + || cacheSize != getMaxCacheSize(maxValue - minValue, increment)) { + builder.append(" CACHE ").append(cacheSize); + } } - return buff.toString(); + return builder; } /** - * Get the next value for this sequence. + * Get the next value for this sequence. Should not be called directly, use + * {@link SessionLocal#getNextValueFor(Sequence, org.h2.command.Prepared)} instead. * * @param session the session * @return the next value */ - public long getNext(Session session) { - boolean needsFlush = false; + public Value getNext(SessionLocal session) { long result; + boolean needsFlush; synchronized (this) { - if ((increment > 0 && value >= valueWithMargin) || - (increment < 0 && value <= valueWithMargin)) { - valueWithMargin += increment * cacheSize; - needsFlush = true; + if (cycle == Cycle.EXHAUSTED) { + throw DbException.get(ErrorCode.SEQUENCE_EXHAUSTED, getName()); } - if ((increment > 0 && value > maxValue) || - (increment < 0 && value < minValue)) { - if (cycle) { - value = increment > 0 ? minValue : maxValue; - valueWithMargin = value + (increment * cacheSize); - needsFlush = true; - } else { - throw DbException.get(ErrorCode.SEQUENCE_EXHAUSTED, getName()); - } - } - result = value; - value += increment; + result = baseValue; + long newBase = result + increment; + needsFlush = increment > 0 ? increment(result, newBase) : decrement(result, newBase); } if (needsFlush) { flush(session); } - return result; + return ValueBigint.get(result).castTo(dataType, session); + } + + private boolean increment(long oldBase, long newBase) { + boolean needsFlush = false; + /* + * If old base is not negative and new base is negative there is an + * overflow. + */ + if (newBase > maxValue || (~oldBase & newBase) < 0) { + newBase = minValue; + needsFlush = true; + if (cycle == Cycle.CYCLE) { + margin = newBase + increment * (cacheSize - 1); + } else { + margin = newBase; + cycle = Cycle.EXHAUSTED; + } + } else if (newBase > margin) { + long newMargin = newBase + increment * (cacheSize - 1); + if (newMargin > maxValue || (~newBase & newMargin) < 0) { + /* + * Don't cache values near the end of the sequence for + * simplicity. + */ + newMargin = newBase; + } + margin = newMargin; + needsFlush = true; + } + baseValue = newBase; + return needsFlush; + } + + private boolean decrement(long oldBase, long newBase) { + boolean needsFlush = false; + /* + * If old base is negative and new base is not negative there is an + * overflow. + */ + if (newBase < minValue || (oldBase & ~newBase) < 0) { + newBase = maxValue; + needsFlush = true; + if (cycle == Cycle.CYCLE) { + margin = newBase + increment * (cacheSize - 1); + } else { + margin = newBase; + cycle = Cycle.EXHAUSTED; + } + } else if (newBase < margin) { + long newMargin = newBase + increment * (cacheSize - 1); + if (newMargin < minValue || (newBase & ~newMargin) < 0) { + /* + * Don't cache values near the end of the sequence for + * simplicity. + */ + newMargin = newBase; + } + margin = newMargin; + needsFlush = true; + } + baseValue = newBase; + return needsFlush; } /** * Flush the current value to disk. */ public void flushWithoutMargin() { - if (valueWithMargin != value) { - valueWithMargin = value; + if (margin != baseValue) { + margin = baseValue; flush(null); } } @@ -295,7 +513,7 @@ public void flushWithoutMargin() { * * @param session the session */ - public void flush(Session session) { + public void flush(SessionLocal session) { if (isTemporary()) { return; } @@ -303,19 +521,19 @@ public void flush(Session session) { // This session may not lock the sys table (except if it has already // locked it) because it must be committed immediately, otherwise // other threads can not access the sys table. - Session sysSession = database.getSystemSession(); - synchronized (database.isMultiThreaded() ? sysSession : database) { + SessionLocal sysSession = database.getSystemSession(); + synchronized (sysSession) { flushInternal(sysSession); sysSession.commit(false); } } else { - synchronized (database.isMultiThreaded() ? session : database) { + synchronized (session) { flushInternal(session); } } } - private void flushInternal(Session session) { + private void flushInternal(SessionLocal session) { final boolean metaWasLocked = database.lockMeta(session); // just for this case, use the value with the margin try { @@ -342,28 +560,24 @@ public int getType() { } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { database.removeMeta(session, getId()); invalidate(); } - @Override - public void checkRename() { - // nothing to do + public synchronized long getBaseValue() { + // Use synchronized because baseValue is not volatile + return baseValue; } public synchronized long getCurrentValue() { - return value - increment; + return baseValue - increment; } public void setBelongsToTable(boolean b) { this.belongsToTable = b; } - public void setCacheSize(long cacheSize) { - this.cacheSize = Math.max(1, cacheSize); - } - public long getCacheSize() { return cacheSize; } diff --git a/h2/src/main/org/h2/schema/TriggerObject.java b/h2/src/main/org/h2/schema/TriggerObject.java index dd3ede3bac..fbf2b462ea 100644 --- a/h2/src/main/org/h2/schema/TriggerObject.java +++ b/h2/src/main/org/h2/schema/TriggerObject.java @@ -1,36 +1,41 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.schema; import java.lang.reflect.Method; import java.sql.Connection; +import java.sql.ResultSet; import java.sql.SQLException; import java.util.Arrays; import org.h2.api.ErrorCode; import org.h2.api.Trigger; -import org.h2.command.Parser; import org.h2.engine.Constants; import org.h2.engine.DbObject; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; +import org.h2.jdbc.JdbcConnection; +import org.h2.jdbc.JdbcResultSet; import org.h2.message.DbException; import org.h2.message.Trace; import org.h2.result.Row; +import org.h2.result.SimpleResult; +import org.h2.table.Column; import org.h2.table.Table; +import org.h2.tools.TriggerAdapter; import org.h2.util.JdbcUtils; import org.h2.util.SourceCompiler; import org.h2.util.StringUtils; -import org.h2.value.DataType; import org.h2.value.Value; +import org.h2.value.ValueToObjectConverter; /** *A trigger is created using the statement * CREATE TRIGGER */ -public class TriggerObject extends SchemaObjectBase { +public final class TriggerObject extends SchemaObject { /** * The default queue size. @@ -60,6 +65,10 @@ public void setBefore(boolean before) { this.before = before; } + public boolean isInsteadOf() { + return insteadOf; + } + public void setInsteadOf(boolean insteadOf) { this.insteadOf = insteadOf; } @@ -69,7 +78,7 @@ private synchronized void load() { return; } try { - Session sysSession = database.getSystemSession(); + SessionLocal sysSession = database.getSystemSession(); Connection c2 = sysSession.createConnection(false); Object obj; if (triggerClassName != null) { @@ -154,7 +163,7 @@ private void setTriggerAction(String triggerClassName, String source, boolean fo * @param type the trigger type * @param beforeAction if this method is called before applying the changes */ - public void fire(Session session, int type, boolean beforeAction) { + public void fire(SessionLocal session, int type, boolean beforeAction) { if (rowBased || before != beforeAction || (typeMask & type) == 0) { return; } @@ -164,33 +173,31 @@ public void fire(Session session, int type, boolean beforeAction) { if (type != Trigger.SELECT) { old = session.setCommitOrRollbackDisabled(true); } - Value identity = session.getLastScopeIdentity(); + Value identity = session.getLastIdentity(); try { - triggerCallback.fire(c2, null, null); - } catch (Throwable e) { - throw DbException.get(ErrorCode.ERROR_EXECUTING_TRIGGER_3, e, getName(), - triggerClassName != null ? triggerClassName : "..source..", e.toString()); - } finally { - if (session.getLastTriggerIdentity() != null) { - session.setLastScopeIdentity(session.getLastTriggerIdentity()); - session.setLastTriggerIdentity(null); + if (triggerCallback instanceof TriggerAdapter) { + ((TriggerAdapter) triggerCallback).fire(c2, (ResultSet) null, (ResultSet) null); } else { - session.setLastScopeIdentity(identity); + triggerCallback.fire(c2, null, null); } + } catch (Throwable e) { + throw getErrorExecutingTrigger(e); + } finally { + session.setLastIdentity(identity); if (type != Trigger.SELECT) { session.setCommitOrRollbackDisabled(old); } } } - private static Object[] convertToObjectList(Row row) { + private static Object[] convertToObjectList(Row row, JdbcConnection conn) { if (row == null) { return null; } int len = row.getColumnCount(); Object[] list = new Object[len]; for (int i = 0; i < len; i++) { - list[i] = row.getValue(i).getObject(); + list[i] = ValueToObjectConverter.valueToDefaultObject(row.getValue(i), conn, false); } return list; } @@ -210,7 +217,7 @@ private static Object[] convertToObjectList(Row row) { * @param rollback when the operation occurred within a rollback * @return true if no further action is required (for 'instead of' triggers) */ - public boolean fireRow(Session session, Table table, Row oldRow, Row newRow, + public boolean fireRow(SessionLocal session, Table table, Row oldRow, Row newRow, boolean beforeAction, boolean rollback) { if (!rowBased || before != beforeAction) { return false; @@ -219,8 +226,6 @@ public boolean fireRow(Session session, Table table, Row oldRow, Row newRow, return false; } load(); - Object[] oldList; - Object[] newList; boolean fire = false; if ((typeMask & Trigger.INSERT) != 0) { if (oldRow == null && newRow != null) { @@ -240,28 +245,56 @@ public boolean fireRow(Session session, Table table, Row oldRow, Row newRow, if (!fire) { return false; } - oldList = convertToObjectList(oldRow); - newList = convertToObjectList(newRow); - Object[] newListBackup; - if (before && newList != null) { - newListBackup = Arrays.copyOf(newList, newList.length); - } else { - newListBackup = null; - } - Connection c2 = session.createConnection(false); + JdbcConnection c2 = session.createConnection(false); boolean old = session.getAutoCommit(); boolean oldDisabled = session.setCommitOrRollbackDisabled(true); - Value identity = session.getLastScopeIdentity(); + Value identity = session.getLastIdentity(); try { session.setAutoCommit(false); - triggerCallback.fire(c2, oldList, newList); - if (newListBackup != null) { - for (int i = 0; i < newList.length; i++) { - Object o = newList[i]; - if (o != newListBackup[i]) { - Value v = DataType.convertToValue(session, o, Value.UNKNOWN); - session.getGeneratedKeys().add(table.getColumn(i)); - newRow.setValue(i, v); + if (triggerCallback instanceof TriggerAdapter) { + JdbcResultSet oldResultSet = oldRow != null ? createResultSet(c2, table, oldRow, false) : null; + JdbcResultSet newResultSet = newRow != null ? createResultSet(c2, table, newRow, before) : null; + try { + ((TriggerAdapter) triggerCallback).fire(c2, oldResultSet, newResultSet); + } catch (Throwable e) { + throw getErrorExecutingTrigger(e); + } + if (newResultSet != null) { + Value[] updatedList = newResultSet.getUpdateRow(); + if (updatedList != null) { + boolean modified = false; + for (int i = 0, l = updatedList.length; i < l; i++) { + Value v = updatedList[i]; + if (v != null) { + modified = true; + newRow.setValue(i, v); + } + } + if (modified) { + table.convertUpdateRow(session, newRow, true); + } + } + } + } else { + Object[] oldList = convertToObjectList(oldRow, c2); + Object[] newList = convertToObjectList(newRow, c2); + Object[] newListBackup = before && newList != null ? Arrays.copyOf(newList, newList.length) : null; + try { + triggerCallback.fire(c2, oldList, newList); + } catch (Throwable e) { + throw getErrorExecutingTrigger(e); + } + if (newListBackup != null) { + boolean modified = false; + for (int i = 0; i < newList.length; i++) { + Object o = newList[i]; + if (o != newListBackup[i]) { + modified = true; + newRow.setValue(i, ValueToObjectConverter.objectToValue(session, o, Value.UNKNOWN)); + } + } + if (modified) { + table.convertUpdateRow(session, newRow, true); } } } @@ -272,18 +305,50 @@ public boolean fireRow(Session session, Table table, Row oldRow, Row newRow, throw DbException.convert(e); } } finally { - if (session.getLastTriggerIdentity() != null) { - session.setLastScopeIdentity(session.getLastTriggerIdentity()); - session.setLastTriggerIdentity(null); - } else { - session.setLastScopeIdentity(identity); - } + session.setLastIdentity(identity); session.setCommitOrRollbackDisabled(oldDisabled); session.setAutoCommit(old); } return insteadOf; } + private static JdbcResultSet createResultSet(JdbcConnection conn, Table table, Row row, boolean updatable) + throws SQLException { + SimpleResult result = new SimpleResult(table.getSchema().getName(), table.getName()); + for (Column c : table.getColumns()) { + result.addColumn(c.getName(), c.getType()); + } + /* + * Old implementation works with and without next() invocation, so add + * the row twice for compatibility. + */ + result.addRow(row.getValueList()); + result.addRow(row.getValueList()); + JdbcResultSet resultSet = new JdbcResultSet(conn, null, null, result, -1, false, false, updatable); + resultSet.next(); + return resultSet; + } + + private DbException getErrorExecutingTrigger(Throwable e) { + if (e instanceof DbException) { + return (DbException) e; + } + if (e instanceof SQLException) { + return DbException.convert(e); + } + return DbException.get(ErrorCode.ERROR_EXECUTING_TRIGGER_3, e, getName(), + triggerClassName != null ? triggerClassName : "..source..", e.toString()); + } + + /** + * Returns the trigger type. + * + * @return the trigger type + */ + public int getTypeMask() { + return typeMask; + } + /** * Set the trigger type. * @@ -297,6 +362,10 @@ public void setRowBased(boolean rowBased) { this.rowBased = rowBased; } + public boolean isRowBased() { + return rowBased; + } + public void setQueueSize(int size) { this.queueSize = size; } @@ -317,9 +386,8 @@ public void setOnRollback(boolean onRollback) { this.onRollback = onRollback; } - @Override - public String getDropSQL() { - return null; + public boolean isOnRollback() { + return onRollback; } @Override @@ -334,7 +402,7 @@ public String getCreateSQLForCopy(Table targetTable, String quotedName) { builder.append(" AFTER "); } getTypeNameList(builder).append(" ON "); - targetTable.getSQL(builder, true); + targetTable.getSQL(builder, DEFAULT_SQL_FLAGS); if (rowBased) { builder.append(" FOR EACH ROW"); } @@ -344,11 +412,9 @@ public String getCreateSQLForCopy(Table targetTable, String quotedName) { builder.append(" QUEUE ").append(queueSize); } if (triggerClassName != null) { - builder.append(" CALL "); - Parser.quoteIdentifier(builder, triggerClassName, true); + StringUtils.quoteStringSQL(builder.append(" CALL "), triggerClassName); } else { - builder.append(" AS "); - StringUtils.quoteStringSQL(builder, triggerSource); + StringUtils.quoteStringSQL(builder.append(" AS "), triggerSource); } return builder.toString(); } @@ -397,7 +463,7 @@ public StringBuilder getTypeNameList(StringBuilder builder) { @Override public String getCreateSQL() { - return getCreateSQLForCopy(table, getSQL(true)); + return getCreateSQLForCopy(table, getSQL(DEFAULT_SQL_FLAGS)); } @Override @@ -406,7 +472,7 @@ public int getType() { } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { table.removeTrigger(this); database.removeMeta(session, getId()); if (triggerCallback != null) { @@ -423,11 +489,6 @@ public void removeChildrenAndResources(Session session) { invalidate(); } - @Override - public void checkRename() { - // nothing to do - } - /** * Get the table of this trigger. * @@ -461,6 +522,7 @@ public String getTriggerSource() { /** * Close the trigger. + * @throws SQLException on failure */ public void close() throws SQLException { if (triggerCallback != null) { diff --git a/h2/src/main/org/h2/engine/UserAggregate.java b/h2/src/main/org/h2/schema/UserAggregate.java similarity index 71% rename from h2/src/main/org/h2/engine/UserAggregate.java rename to h2/src/main/org/h2/schema/UserAggregate.java index 66138fff8f..45ee8b42df 100644 --- a/h2/src/main/org/h2/engine/UserAggregate.java +++ b/h2/src/main/org/h2/schema/UserAggregate.java @@ -1,32 +1,34 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ -package org.h2.engine; +package org.h2.schema; import java.sql.Connection; import java.sql.SQLException; + import org.h2.api.Aggregate; import org.h2.api.AggregateFunction; -import org.h2.command.Parser; +import org.h2.engine.DbObject; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.message.Trace; -import org.h2.table.Table; import org.h2.util.JdbcUtils; +import org.h2.util.StringUtils; import org.h2.value.DataType; +import org.h2.value.TypeInfo; /** * Represents a user-defined aggregate function. */ -public class UserAggregate extends DbObjectBase { +public final class UserAggregate extends UserDefinedFunction { - private String className; private Class javaClass; - public UserAggregate(Database db, int id, String name, String className, + public UserAggregate(Schema schema, int id, String name, String className, boolean force) { - super(db, id, name, Trace.FUNCTION); + super(schema, id, name, Trace.FUNCTION); this.className = className; if (!force) { getInstance(); @@ -52,23 +54,17 @@ public Aggregate getInstance() { } } - @Override - public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(toString()); - } - @Override public String getDropSQL() { StringBuilder builder = new StringBuilder("DROP AGGREGATE IF EXISTS "); - return getSQL(builder, true).toString(); + return getSQL(builder, DEFAULT_SQL_FLAGS).toString(); } @Override public String getCreateSQL() { StringBuilder builder = new StringBuilder("CREATE FORCE AGGREGATE "); - getSQL(builder, true).append(" FOR "); - Parser.quoteIdentifier(builder, className, true); - return builder.toString(); + getSQL(builder, DEFAULT_SQL_FLAGS).append(" FOR "); + return StringUtils.quoteStringSQL(builder, className).toString(); } @Override @@ -77,22 +73,13 @@ public int getType() { } @Override - public synchronized void removeChildrenAndResources(Session session) { + public synchronized void removeChildrenAndResources(SessionLocal session) { database.removeMeta(session, getId()); className = null; javaClass = null; invalidate(); } - @Override - public void checkRename() { - throw DbException.getUnsupportedException("AGGREGATE"); - } - - public String getJavaClassName() { - return this.className; - } - /** * Wrap {@link AggregateFunction} in order to behave as * {@link org.h2.api.Aggregate} @@ -113,7 +100,7 @@ public void init(Connection conn) throws SQLException { public int getInternalType(int[] inputTypes) throws SQLException { int[] sqlTypes = new int[inputTypes.length]; for (int i = 0; i < inputTypes.length; i++) { - sqlTypes[i] = DataType.convertTypeToSQLType(inputTypes[i]); + sqlTypes[i] = DataType.convertTypeToSQLType(TypeInfo.getTypeInfo(inputTypes[i])); } return DataType.convertSQLTypeToValueType(aggregateFunction.getType(sqlTypes)); } diff --git a/h2/src/main/org/h2/schema/UserDefinedFunction.java b/h2/src/main/org/h2/schema/UserDefinedFunction.java new file mode 100644 index 0000000000..7a3c6c8954 --- /dev/null +++ b/h2/src/main/org/h2/schema/UserDefinedFunction.java @@ -0,0 +1,36 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.schema; + +import org.h2.message.DbException; +import org.h2.table.Table; + +/** + * User-defined Java function or aggregate function. + */ +public abstract class UserDefinedFunction extends SchemaObject { + + String className; + + UserDefinedFunction(Schema newSchema, int id, String name, int traceModuleId) { + super(newSchema, id, name, traceModuleId); + } + + @Override + public final String getCreateSQLForCopy(Table table, String quotedName) { + throw DbException.getInternalError(toString()); + } + + @Override + public final void checkRename() { + throw DbException.getUnsupportedException("RENAME"); + } + + public final String getJavaClassName() { + return className; + } + +} diff --git a/h2/src/main/org/h2/schema/package.html b/h2/src/main/org/h2/schema/package.html index 6383ade969..815a65a659 100644 --- a/h2/src/main/org/h2/schema/package.html +++ b/h2/src/main/org/h2/schema/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/security/AES.java b/h2/src/main/org/h2/security/AES.java index b233f3fbb5..24a73257f8 100644 --- a/h2/src/main/org/h2/security/AES.java +++ b/h2/src/main/org/h2/security/AES.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.security; diff --git a/h2/src/main/org/h2/security/BlockCipher.java b/h2/src/main/org/h2/security/BlockCipher.java index 43c2656f81..6e4cca4fab 100644 --- a/h2/src/main/org/h2/security/BlockCipher.java +++ b/h2/src/main/org/h2/security/BlockCipher.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.security; diff --git a/h2/src/main/org/h2/security/CipherFactory.java b/h2/src/main/org/h2/security/CipherFactory.java index af7bbb2250..0477e9afa7 100644 --- a/h2/src/main/org/h2/security/CipherFactory.java +++ b/h2/src/main/org/h2/security/CipherFactory.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.security; @@ -38,6 +38,7 @@ import org.h2.api.ErrorCode; import org.h2.engine.SysProperties; import org.h2.message.DbException; +import org.h2.mvstore.DataUtils; import org.h2.store.fs.FileUtils; import org.h2.util.IOUtils; import org.h2.util.StringUtils; @@ -103,10 +104,10 @@ public static BlockCipher getBlockCipher(String algorithm) { * @param address the address to connect to * @param port the port * @return the socket + * @throws IOException on failure */ public static Socket createSocket(InetAddress address, int port) throws IOException { - Socket socket = null; setKeystore(); SSLSocketFactory f = (SSLSocketFactory) SSLSocketFactory.getDefault(); SSLSocket secureSocket = (SSLSocket) f.createSocket(); @@ -120,8 +121,7 @@ public static Socket createSocket(InetAddress address, int port) secureSocket.getSupportedCipherSuites()); secureSocket.setEnabledCipherSuites(list); } - socket = secureSocket; - return socket; + return secureSocket; } /** @@ -137,6 +137,7 @@ public static Socket createSocket(InetAddress address, int port) * @param bindAddress the address to bind to, or null to bind to all * addresses * @return the server socket + * @throws IOException on failure */ public static ServerSocket createServerSocket(int port, InetAddress bindAddress) throws IOException { @@ -260,7 +261,7 @@ private static byte[] getKeyStoreBytes(KeyStore store, String password) try { store.store(bout, password.toCharArray()); } catch (Exception e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } return bout.toByteArray(); } @@ -270,6 +271,7 @@ private static byte[] getKeyStoreBytes(KeyStore store, String password) * * @param password the keystore password * @return the keystore + * @throws IOException on failure */ public static KeyStore getKeyStore(String password) throws IOException { try { @@ -277,7 +279,7 @@ public static KeyStore getKeyStore(String password) throws IOException { // if you have a keystore file. // This code is (hopefully) more Java version independent // than using keystores directly. See also: - // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4887561 + // https://bugs.openjdk.java.net/browse/JDK-4887561 // (1.4.2 cannot read keystore written with 1.4.1) // --- generated code start --- @@ -350,7 +352,7 @@ public static KeyStore getKeyStore(String password) throws IOException { // --- generated code end --- return store; } catch (Exception e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } } @@ -375,7 +377,7 @@ private static void setKeystore() throws IOException { out.write(data); out.close(); } catch (Exception e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } } String absolutePath = FileUtils.toRealPath(fileName); diff --git a/h2/src/main/org/h2/security/Fog.java b/h2/src/main/org/h2/security/Fog.java index 6d81b207ab..ab5d61fc1b 100644 --- a/h2/src/main/org/h2/security/Fog.java +++ b/h2/src/main/org/h2/security/Fog.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.security; diff --git a/h2/src/main/org/h2/security/SHA256.java b/h2/src/main/org/h2/security/SHA256.java index 1496a4b4e0..1b372893c4 100644 --- a/h2/src/main/org/h2/security/SHA256.java +++ b/h2/src/main/org/h2/security/SHA256.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.security; diff --git a/h2/src/main/org/h2/security/SHA3.java b/h2/src/main/org/h2/security/SHA3.java new file mode 100644 index 0000000000..cc22b7bde5 --- /dev/null +++ b/h2/src/main/org/h2/security/SHA3.java @@ -0,0 +1,289 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.security; + +import java.security.MessageDigest; +import java.util.Arrays; + +import org.h2.util.Bits; + +/** + * SHA-3 message digest family. + */ +public final class SHA3 extends MessageDigest { + + private static final long[] ROUND_CONSTANTS; + + static { + long[] rc = new long[24]; + byte l = 1; + for (int i = 0; i < 24; i++) { + rc[i] = 0; + for (int j = 0; j < 7; j++) { + byte t = l; + l = (byte) (t < 0 ? t << 1 ^ 0x71 : t << 1); + if ((t & 1) != 0) { + rc[i] ^= 1L << (1 << j) - 1; + } + } + } + ROUND_CONSTANTS = rc; + } + + /** + * Returns a new instance of SHA3-224 message digest. + * + * @return SHA3-224 message digest + */ + public static SHA3 getSha3_224() { + return new SHA3("SHA3-224", 28); + } + + /** + * Returns a new instance of SHA3-256 message digest. + * + * @return SHA3-256 message digest + */ + public static SHA3 getSha3_256() { + return new SHA3("SHA3-256", 32); + } + + /** + * Returns a new instance of SHA3-384 message digest. + * + * @return SHA3-384 message digest + */ + public static SHA3 getSha3_384() { + return new SHA3("SHA3-384", 48); + } + + /** + * Returns a new instance of SHA3-512 message digest. + * + * @return SHA3-512 message digest + */ + public static SHA3 getSha3_512() { + return new SHA3("SHA3-512", 64); + } + + private final int digestLength; + + private final int rate; + + private long state00, state01, state02, state03, state04, state05, state06, state07, state08, state09, // + state10, state11, state12, state13, state14, state15, state16, state17, state18, state19, // + state20, state21, state22, state23, state24; + + private final byte[] buf; + + private int bufcnt; + + private SHA3(String algorithm, int digestLength) { + super(algorithm); + this.digestLength = digestLength; + buf = new byte[this.rate = 200 - digestLength * 2]; + } + + @Override + protected byte[] engineDigest() { + buf[bufcnt] = 0b110; + Arrays.fill(buf, bufcnt + 1, rate, (byte) 0); + buf[rate - 1] |= 0x80; + absorbQueue(); + byte[] r = new byte[digestLength]; + switch (digestLength) { + case 64: + Bits.writeLongLE(r, 56, state07); + Bits.writeLongLE(r, 48, state06); + //$FALL-THROUGH$ + case 48: + Bits.writeLongLE(r, 40, state05); + Bits.writeLongLE(r, 32, state04); + //$FALL-THROUGH$ + case 32: + Bits.writeLongLE(r, 24, state03); + break; + case 28: + Bits.writeIntLE(r, 24, (int) state03); + } + Bits.writeLongLE(r, 16, state02); + Bits.writeLongLE(r, 8, state01); + Bits.writeLongLE(r, 0, state00); + engineReset(); + return r; + } + + @Override + protected int engineGetDigestLength() { + return digestLength; + } + + @Override + protected void engineReset() { + state24 = state23 = state22 = state21 = state20 // + = state19 = state18 = state17 = state16 = state15 // + = state14 = state13 = state12 = state11 = state10 // + = state09 = state08 = state07 = state06 = state05 // + = state04 = state03 = state02 = state01 = state00 = 0L; + Arrays.fill(buf, (byte) 0); + bufcnt = 0; + } + + @Override + protected void engineUpdate(byte input) { + buf[bufcnt++] = input; + if (bufcnt == rate) { + absorbQueue(); + } + } + + @Override + protected void engineUpdate(byte[] input, int offset, int len) { + while (len > 0) { + if (bufcnt == 0 && len >= rate) { + do { + absorb(input, offset); + offset += rate; + len -= rate; + } while (len >= rate); + } else { + int partialBlock = Math.min(len, rate - bufcnt); + System.arraycopy(input, offset, buf, bufcnt, partialBlock); + bufcnt += partialBlock; + offset += partialBlock; + len -= partialBlock; + if (bufcnt == rate) { + absorbQueue(); + } + } + } + } + + private void absorbQueue() { + absorb(buf, 0); + bufcnt = 0; + } + + private void absorb(byte[] data, int offset) { + /* + * There is no need to copy 25 state* fields into local variables, + * because so large number of local variables only hurts performance. + */ + switch (digestLength) { + case 28: + state17 ^= Bits.readLongLE(data, offset + 136); + //$FALL-THROUGH$ + case 32: + state13 ^= Bits.readLongLE(data, offset + 104); + state14 ^= Bits.readLongLE(data, offset + 112); + state15 ^= Bits.readLongLE(data, offset + 120); + state16 ^= Bits.readLongLE(data, offset + 128); + //$FALL-THROUGH$ + case 48: + state09 ^= Bits.readLongLE(data, offset + 72); + state10 ^= Bits.readLongLE(data, offset + 80); + state11 ^= Bits.readLongLE(data, offset + 88); + state12 ^= Bits.readLongLE(data, offset + 96); + } + state00 ^= Bits.readLongLE(data, offset); + state01 ^= Bits.readLongLE(data, offset + 8); + state02 ^= Bits.readLongLE(data, offset + 16); + state03 ^= Bits.readLongLE(data, offset + 24); + state04 ^= Bits.readLongLE(data, offset + 32); + state05 ^= Bits.readLongLE(data, offset + 40); + state06 ^= Bits.readLongLE(data, offset + 48); + state07 ^= Bits.readLongLE(data, offset + 56); + state08 ^= Bits.readLongLE(data, offset + 64); + for (int i = 0; i < 24; i++) { + long c0 = state00 ^ state05 ^ state10 ^ state15 ^ state20; + long c1 = state01 ^ state06 ^ state11 ^ state16 ^ state21; + long c2 = state02 ^ state07 ^ state12 ^ state17 ^ state22; + long c3 = state03 ^ state08 ^ state13 ^ state18 ^ state23; + long c4 = state04 ^ state09 ^ state14 ^ state19 ^ state24; + long dX = (c1 << 1 | c1 >>> 63) ^ c4; + state00 ^= dX; + state05 ^= dX; + state10 ^= dX; + state15 ^= dX; + state20 ^= dX; + dX = (c2 << 1 | c2 >>> 63) ^ c0; + state01 ^= dX; + state06 ^= dX; + state11 ^= dX; + state16 ^= dX; + state21 ^= dX; + dX = (c3 << 1 | c3 >>> 63) ^ c1; + state02 ^= dX; + state07 ^= dX; + state12 ^= dX; + state17 ^= dX; + state22 ^= dX; + dX = (c4 << 1 | c4 >>> 63) ^ c2; + state03 ^= dX; + state08 ^= dX; + state13 ^= dX; + state18 ^= dX; + state23 ^= dX; + dX = (c0 << 1 | c0 >>> 63) ^ c3; + state04 ^= dX; + state09 ^= dX; + state14 ^= dX; + state19 ^= dX; + state24 ^= dX; + long s00 = state00; + long s01 = state06 << 44 | state06 >>> 20; + long s02 = state12 << 43 | state12 >>> 21; + long s03 = state18 << 21 | state18 >>> 43; + long s04 = state24 << 14 | state24 >>> 50; + long s05 = state03 << 28 | state03 >>> 36; + long s06 = state09 << 20 | state09 >>> 44; + long s07 = state10 << 3 | state10 >>> 61; + long s08 = state16 << 45 | state16 >>> 19; + long s09 = state22 << 61 | state22 >>> 3; + long s10 = state01 << 1 | state01 >>> 63; + long s11 = state07 << 6 | state07 >>> 58; + long s12 = state13 << 25 | state13 >>> 39; + long s13 = state19 << 8 | state19 >>> 56; + long s14 = state20 << 18 | state20 >>> 46; + long s15 = state04 << 27 | state04 >>> 37; + long s16 = state05 << 36 | state05 >>> 28; + long s17 = state11 << 10 | state11 >>> 54; + long s18 = state17 << 15 | state17 >>> 49; + long s19 = state23 << 56 | state23 >>> 8; + long s20 = state02 << 62 | state02 >>> 2; + long s21 = state08 << 55 | state08 >>> 9; + long s22 = state14 << 39 | state14 >>> 25; + long s23 = state15 << 41 | state15 >>> 23; + long s24 = state21 << 2 | state21 >>> 62; + state00 = s00 ^ ~s01 & s02 ^ ROUND_CONSTANTS[i]; + state01 = s01 ^ ~s02 & s03; + state02 = s02 ^ ~s03 & s04; + state03 = s03 ^ ~s04 & s00; + state04 = s04 ^ ~s00 & s01; + state05 = s05 ^ ~s06 & s07; + state06 = s06 ^ ~s07 & s08; + state07 = s07 ^ ~s08 & s09; + state08 = s08 ^ ~s09 & s05; + state09 = s09 ^ ~s05 & s06; + state10 = s10 ^ ~s11 & s12; + state11 = s11 ^ ~s12 & s13; + state12 = s12 ^ ~s13 & s14; + state13 = s13 ^ ~s14 & s10; + state14 = s14 ^ ~s10 & s11; + state15 = s15 ^ ~s16 & s17; + state16 = s16 ^ ~s17 & s18; + state17 = s17 ^ ~s18 & s19; + state18 = s18 ^ ~s19 & s15; + state19 = s19 ^ ~s15 & s16; + state20 = s20 ^ ~s21 & s22; + state21 = s21 ^ ~s22 & s23; + state22 = s22 ^ ~s23 & s24; + state23 = s23 ^ ~s24 & s20; + state24 = s24 ^ ~s20 & s21; + } + } + +} diff --git a/h2/src/main/org/h2/security/SecureFileStore.java b/h2/src/main/org/h2/security/SecureFileStore.java index 050d4751e6..2e70aaa14a 100644 --- a/h2/src/main/org/h2/security/SecureFileStore.java +++ b/h2/src/main/org/h2/security/SecureFileStore.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.security; @@ -71,7 +71,7 @@ public void write(byte[] b, int off, int len) { } @Override - protected void readFullyDirect(byte[] b, int off, int len) { + public void readFullyDirect(byte[] b, int off, int len) { super.readFully(b, off, len); pos += len; } diff --git a/h2/src/main/org/h2/security/XTEA.java b/h2/src/main/org/h2/security/XTEA.java index 2436b8f6ae..01f2192bf5 100644 --- a/h2/src/main/org/h2/security/XTEA.java +++ b/h2/src/main/org/h2/security/XTEA.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.security; @@ -47,7 +47,7 @@ public void setKey(byte[] b) { @Override public void encrypt(byte[] bytes, int off, int len) { if (len % ALIGN != 0) { - DbException.throwInternalError("unaligned len " + len); + throw DbException.getInternalError("unaligned len " + len); } for (int i = off; i < off + len; i += 8) { encryptBlock(bytes, bytes, i); @@ -57,7 +57,7 @@ public void encrypt(byte[] bytes, int off, int len) { @Override public void decrypt(byte[] bytes, int off, int len) { if (len % ALIGN != 0) { - DbException.throwInternalError("unaligned len " + len); + throw DbException.getInternalError("unaligned len " + len); } for (int i = off; i < off + len; i += 8) { decryptBlock(bytes, bytes, i); diff --git a/h2/src/main/org/h2/security/auth/AuthConfigException.java b/h2/src/main/org/h2/security/auth/AuthConfigException.java index 9137f6e970..6135f6d590 100644 --- a/h2/src/main/org/h2/security/auth/AuthConfigException.java +++ b/h2/src/main/org/h2/security/auth/AuthConfigException.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.security.auth; diff --git a/h2/src/main/org/h2/security/auth/AuthenticationException.java b/h2/src/main/org/h2/security/auth/AuthenticationException.java index f64057bbbd..df054b2b56 100644 --- a/h2/src/main/org/h2/security/auth/AuthenticationException.java +++ b/h2/src/main/org/h2/security/auth/AuthenticationException.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.security.auth; diff --git a/h2/src/main/org/h2/security/auth/AuthenticationInfo.java b/h2/src/main/org/h2/security/auth/AuthenticationInfo.java index ecff484b89..ab9ecfd9cf 100644 --- a/h2/src/main/org/h2/security/auth/AuthenticationInfo.java +++ b/h2/src/main/org/h2/security/auth/AuthenticationInfo.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.security.auth; diff --git a/h2/src/main/org/h2/security/auth/Authenticator.java b/h2/src/main/org/h2/security/auth/Authenticator.java index 76bfdb35d8..c5ea0b1b73 100644 --- a/h2/src/main/org/h2/security/auth/Authenticator.java +++ b/h2/src/main/org/h2/security/auth/Authenticator.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.security.auth; @@ -20,6 +20,7 @@ public interface Authenticator { * @param database target database instance. * @return valid database user or null if user doesn't exists in the * database + * @throws AuthenticationException on failure */ User authenticate(AuthenticationInfo authenticationInfo, Database database) throws AuthenticationException; diff --git a/h2/src/main/org/h2/security/auth/AuthenticatorFactory.java b/h2/src/main/org/h2/security/auth/AuthenticatorFactory.java index 3059dcf9b4..c099ac5a1d 100644 --- a/h2/src/main/org/h2/security/auth/AuthenticatorFactory.java +++ b/h2/src/main/org/h2/security/auth/AuthenticatorFactory.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.security.auth; diff --git a/h2/src/main/org/h2/security/auth/ConfigProperties.java b/h2/src/main/org/h2/security/auth/ConfigProperties.java index cd8d9163d3..0dc19bf20d 100644 --- a/h2/src/main/org/h2/security/auth/ConfigProperties.java +++ b/h2/src/main/org/h2/security/auth/ConfigProperties.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.security.auth; @@ -8,7 +8,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.HashMap; -import java.util.Map; import org.h2.util.Utils; @@ -17,7 +16,7 @@ */ public class ConfigProperties { - private Map properties; + private HashMap properties; public ConfigProperties() { properties = new HashMap<>(); @@ -29,9 +28,9 @@ public ConfigProperties(PropertyConfig... configProperties) { public ConfigProperties(Collection configProperties) { properties = new HashMap<>(); - if (properties != null) { + if (configProperties != null) { for (PropertyConfig currentProperty : configProperties) { - if (properties.put(currentProperty.getName(), currentProperty.getValue()) != null) { + if (properties.putIfAbsent(currentProperty.getName(), currentProperty.getValue()) != null) { throw new AuthConfigException("duplicate property " + currentProperty.getName()); } } diff --git a/h2/src/main/org/h2/security/auth/Configurable.java b/h2/src/main/org/h2/security/auth/Configurable.java index 605f844294..56191e1b65 100644 --- a/h2/src/main/org/h2/security/auth/Configurable.java +++ b/h2/src/main/org/h2/security/auth/Configurable.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.security.auth; diff --git a/h2/src/main/org/h2/security/auth/DefaultAuthenticator.java b/h2/src/main/org/h2/security/auth/DefaultAuthenticator.java index 22586149bc..052270ef17 100644 --- a/h2/src/main/org/h2/security/auth/DefaultAuthenticator.java +++ b/h2/src/main/org/h2/security/auth/DefaultAuthenticator.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.security.auth; @@ -246,6 +246,10 @@ private void defaultConfiguration() { * Configure the authenticator from a configuration file * * @param configUrl URL of configuration file + * @throws AuthenticationException on failure + * @throws SAXException on failure + * @throws IOException on failure + * @throws ParserConfigurationException on failure */ public void configureFromUrl(URL configUrl) throws AuthenticationException, SAXException, IOException, ParserConfigurationException { @@ -256,7 +260,7 @@ public void configureFromUrl(URL configUrl) throws AuthenticationException, private void configureFrom(H2AuthConfig config) throws AuthenticationException { allowUserRegistration = config.isAllowUserRegistration(); createMissingRoles = config.isCreateMissingRoles(); - Map newRealms = new HashMap<>(); + HashMap newRealms = new HashMap<>(); for (RealmConfig currentRealmConfig : config.getRealms()) { String currentRealmName = currentRealmConfig.getName(); if (currentRealmName == null) { @@ -271,7 +275,7 @@ private void configureFrom(H2AuthConfig config) throws AuthenticationException { throw new AuthenticationException("invalid validator class fo realm " + currentRealmName, e); } currentValidator.configure(new ConfigProperties(currentRealmConfig.getProperties())); - if (newRealms.put(currentRealmConfig.getName().toUpperCase(), currentValidator) != null) { + if (newRealms.putIfAbsent(currentRealmConfig.getName().toUpperCase(), currentValidator) != null) { throw new AuthenticationException("Duplicate realm " + currentRealmConfig.getName()); } } diff --git a/h2/src/main/org/h2/security/auth/H2AuthConfig.java b/h2/src/main/org/h2/security/auth/H2AuthConfig.java index 4d78f99e01..9fe168883d 100644 --- a/h2/src/main/org/h2/security/auth/H2AuthConfig.java +++ b/h2/src/main/org/h2/security/auth/H2AuthConfig.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.security.auth; diff --git a/h2/src/main/org/h2/security/auth/H2AuthConfigXml.java b/h2/src/main/org/h2/security/auth/H2AuthConfigXml.java index 30e2803f2f..b1f6888d59 100644 --- a/h2/src/main/org/h2/security/auth/H2AuthConfigXml.java +++ b/h2/src/main/org/h2/security/auth/H2AuthConfigXml.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.security.auth; diff --git a/h2/src/main/org/h2/security/auth/HasConfigProperties.java b/h2/src/main/org/h2/security/auth/HasConfigProperties.java index ad108ef7e1..93856bffc0 100644 --- a/h2/src/main/org/h2/security/auth/HasConfigProperties.java +++ b/h2/src/main/org/h2/security/auth/HasConfigProperties.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.security.auth; diff --git a/h2/src/main/org/h2/security/auth/PropertyConfig.java b/h2/src/main/org/h2/security/auth/PropertyConfig.java index 9db75c88e9..2f049cf492 100644 --- a/h2/src/main/org/h2/security/auth/PropertyConfig.java +++ b/h2/src/main/org/h2/security/auth/PropertyConfig.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.security.auth; diff --git a/h2/src/main/org/h2/security/auth/RealmConfig.java b/h2/src/main/org/h2/security/auth/RealmConfig.java index 2797f7526f..f020fca229 100644 --- a/h2/src/main/org/h2/security/auth/RealmConfig.java +++ b/h2/src/main/org/h2/security/auth/RealmConfig.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.security.auth; diff --git a/h2/src/main/org/h2/security/auth/UserToRolesMapperConfig.java b/h2/src/main/org/h2/security/auth/UserToRolesMapperConfig.java index f52782ca14..16df852a16 100644 --- a/h2/src/main/org/h2/security/auth/UserToRolesMapperConfig.java +++ b/h2/src/main/org/h2/security/auth/UserToRolesMapperConfig.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.security.auth; diff --git a/h2/src/main/org/h2/security/auth/impl/AssignRealmNameRole.java b/h2/src/main/org/h2/security/auth/impl/AssignRealmNameRole.java index 4444e73fb4..825ce3928c 100644 --- a/h2/src/main/org/h2/security/auth/impl/AssignRealmNameRole.java +++ b/h2/src/main/org/h2/security/auth/impl/AssignRealmNameRole.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.security.auth.impl; diff --git a/h2/src/main/org/h2/security/auth/impl/JaasCredentialsValidator.java b/h2/src/main/org/h2/security/auth/impl/JaasCredentialsValidator.java index 1db55b6520..9b43a30f2b 100644 --- a/h2/src/main/org/h2/security/auth/impl/JaasCredentialsValidator.java +++ b/h2/src/main/org/h2/security/auth/impl/JaasCredentialsValidator.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.security.auth.impl; @@ -52,7 +52,7 @@ public void configure(ConfigProperties configProperties) { appName=configProperties.getStringValue("appName",appName); } - class AuthenticationInfoCallbackHandler implements CallbackHandler { + static class AuthenticationInfoCallbackHandler implements CallbackHandler { AuthenticationInfo authenticationInfo; diff --git a/h2/src/main/org/h2/security/auth/impl/LdapCredentialsValidator.java b/h2/src/main/org/h2/security/auth/impl/LdapCredentialsValidator.java index 5f00830e1e..e1e85c8222 100644 --- a/h2/src/main/org/h2/security/auth/impl/LdapCredentialsValidator.java +++ b/h2/src/main/org/h2/security/auth/impl/LdapCredentialsValidator.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.security.auth.impl; diff --git a/h2/src/main/org/h2/security/auth/impl/StaticRolesMapper.java b/h2/src/main/org/h2/security/auth/impl/StaticRolesMapper.java index 203badbf36..adbed395ac 100644 --- a/h2/src/main/org/h2/security/auth/impl/StaticRolesMapper.java +++ b/h2/src/main/org/h2/security/auth/impl/StaticRolesMapper.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.security.auth.impl; diff --git a/h2/src/main/org/h2/security/auth/impl/StaticUserCredentialsValidator.java b/h2/src/main/org/h2/security/auth/impl/StaticUserCredentialsValidator.java index ada21b393f..edee8de558 100644 --- a/h2/src/main/org/h2/security/auth/impl/StaticUserCredentialsValidator.java +++ b/h2/src/main/org/h2/security/auth/impl/StaticUserCredentialsValidator.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.security.auth.impl; diff --git a/h2/src/main/org/h2/security/auth/impl/package.html b/h2/src/main/org/h2/security/auth/impl/package.html index 739c3fdc1d..429db14800 100644 --- a/h2/src/main/org/h2/security/auth/impl/package.html +++ b/h2/src/main/org/h2/security/auth/impl/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/security/auth/package.html b/h2/src/main/org/h2/security/auth/package.html index 739c3fdc1d..429db14800 100644 --- a/h2/src/main/org/h2/security/auth/package.html +++ b/h2/src/main/org/h2/security/auth/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/security/package.html b/h2/src/main/org/h2/security/package.html index b77ddaafcb..44e27d75a6 100644 --- a/h2/src/main/org/h2/security/package.html +++ b/h2/src/main/org/h2/security/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/Service.java b/h2/src/main/org/h2/server/Service.java index a583eed9da..dfcd8b0ceb 100644 --- a/h2/src/main/org/h2/server/Service.java +++ b/h2/src/main/org/h2/server/Service.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server; @@ -19,6 +19,7 @@ public interface Service { * Initialize the service from command line options. * * @param args the command line options + * @throws Exception on failure */ void init(String... args) throws Exception; @@ -32,6 +33,7 @@ public interface Service { /** * Start the service. This usually means create the server socket. * This method must not block. + * @throws SQLException on failure */ void start() throws SQLException; diff --git a/h2/src/main/org/h2/server/ShutdownHandler.java b/h2/src/main/org/h2/server/ShutdownHandler.java index a1cce8944f..49b24d3dbc 100644 --- a/h2/src/main/org/h2/server/ShutdownHandler.java +++ b/h2/src/main/org/h2/server/ShutdownHandler.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server; diff --git a/h2/src/main/org/h2/server/TcpServer.java b/h2/src/main/org/h2/server/TcpServer.java index 4175197bc1..fe90ba41ba 100644 --- a/h2/src/main/org/h2/server/TcpServer.java +++ b/h2/src/main/org/h2/server/TcpServer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server; @@ -9,26 +9,24 @@ import java.net.ServerSocket; import java.net.Socket; import java.net.UnknownHostException; -import java.sql.Connection; -import java.sql.DriverManager; import java.sql.PreparedStatement; import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; -import java.util.Properties; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import org.h2.Driver; import org.h2.api.ErrorCode; import org.h2.engine.Constants; +import org.h2.jdbc.JdbcConnection; import org.h2.message.DbException; -import org.h2.util.JdbcUtils; +import org.h2.util.MathUtils; import org.h2.util.NetUtils; import org.h2.util.StringUtils; import org.h2.util.Tool; +import org.h2.util.Utils10; /** * The TCP server implements the native H2 database server protocol. @@ -64,7 +62,7 @@ public class TcpServer implements Service { private boolean allowOthers; private boolean isDaemon; private boolean ifExists = true; - private Connection managementDb; + private JdbcConnection managementDb; private PreparedStatement managementDbAdd; private PreparedStatement managementDbRemove; private String managementPassword = ""; @@ -84,22 +82,21 @@ public static String getManagementDbName(int port) { } private void initManagementDb() throws SQLException { - Properties prop = new Properties(); - prop.setProperty("user", ""); - prop.setProperty("password", managementPassword); + if (managementPassword.isEmpty()) { + managementPassword = StringUtils.convertBytesToHex(MathUtils.secureRandomBytes(32)); + } // avoid using the driver manager - Connection conn = Driver.load().connect("jdbc:h2:" + - getManagementDbName(port), prop); + JdbcConnection conn = new JdbcConnection("jdbc:h2:" + getManagementDbName(port), null, "", managementPassword, + false); managementDb = conn; try (Statement stat = conn.createStatement()) { - stat.execute("CREATE ALIAS IF NOT EXISTS STOP_SERVER FOR \"" + - TcpServer.class.getName() + ".stopServer\""); + stat.execute("CREATE ALIAS IF NOT EXISTS STOP_SERVER FOR '" + TcpServer.class.getName() + ".stopServer'"); stat.execute("CREATE TABLE IF NOT EXISTS SESSIONS" + - "(ID INT PRIMARY KEY, URL VARCHAR, USER VARCHAR, " + - "CONNECTED TIMESTAMP)"); + "(ID INT PRIMARY KEY, URL VARCHAR, `USER` VARCHAR, " + + "CONNECTED TIMESTAMP(9) WITH TIME ZONE)"); managementDbAdd = conn.prepareStatement( - "INSERT INTO SESSIONS VALUES(?, ?, ?, NOW())"); + "INSERT INTO SESSIONS VALUES(?, ?, ?, CURRENT_TIMESTAMP(9))"); managementDbRemove = conn.prepareStatement( "DELETE FROM SESSIONS WHERE ID=?"); } @@ -191,7 +188,6 @@ public void init(String... args) { ifExists = false; } } - org.h2.Driver.load(); } @Override @@ -204,6 +200,16 @@ public int getPort() { return port; } + /** + * Returns whether a secure protocol is used. + * + * @return {@code true} if SSL socket is used, {@code false} if plain socket + * is used + */ + public boolean getSSL() { + return ssl; + } + /** * Check if this socket may connect to this server. Remote connections are * not allowed if the flag allowOthers is set. @@ -246,9 +252,11 @@ public void listen() { try { while (!stop) { Socket s = serverSocket.accept(); - TcpServerThread c = new TcpServerThread(s, this, nextThreadId++); + Utils10.setTcpQuickack(s, true); + int id = nextThreadId++; + TcpServerThread c = new TcpServerThread(s, this, id); running.add(c); - Thread thread = new Thread(c, threadName + " thread"); + Thread thread = new Thread(c, threadName + " thread-" + id); thread.setDaemon(isDaemon); c.setThread(thread); thread.start(); @@ -424,6 +432,7 @@ boolean getIfExists() { * @param force if the server should be stopped immediately * @param all whether all TCP servers that are running in the JVM should be * stopped + * @throws SQLException on failure */ public static synchronized void shutdown(String url, String password, boolean force, boolean all) throws SQLException { @@ -437,17 +446,9 @@ public static synchronized void shutdown(String url, String password, } } String db = getManagementDbName(port); - try { - org.h2.Driver.load(); - } catch (Throwable e) { - throw DbException.convert(e); - } for (int i = 0; i < 2; i++) { - Connection conn = null; - PreparedStatement prep = null; - try { - conn = DriverManager.getConnection("jdbc:h2:" + url + "/" + db, "", password); - prep = conn.prepareStatement("CALL STOP_SERVER(?, ?, ?)"); + try (JdbcConnection conn = new JdbcConnection("jdbc:h2:" + url + '/' + db, null, "", password, true)) { + PreparedStatement prep = conn.prepareStatement("CALL STOP_SERVER(?, ?, ?)"); prep.setInt(1, all ? 0 : port); prep.setString(2, password); prep.setInt(3, force ? SHUTDOWN_FORCE : SHUTDOWN_NORMAL); @@ -467,9 +468,6 @@ public static synchronized void shutdown(String url, String password, if (i == 1) { throw e; } - } finally { - JdbcUtils.closeSilently(prep); - JdbcUtils.closeSilently(conn); } } } catch (Exception e) { diff --git a/h2/src/main/org/h2/server/TcpServerThread.java b/h2/src/main/org/h2/server/TcpServerThread.java index 401c7e7b48..82c210f441 100644 --- a/h2/src/main/org/h2/server/TcpServerThread.java +++ b/h2/src/main/org/h2/server/TcpServerThread.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server; @@ -23,24 +23,28 @@ import org.h2.engine.Engine; import org.h2.engine.GeneratedKeysMode; import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.engine.SessionRemote; import org.h2.engine.SysProperties; import org.h2.expression.Parameter; import org.h2.expression.ParameterInterface; import org.h2.expression.ParameterRemote; import org.h2.jdbc.JdbcException; +import org.h2.jdbc.meta.DatabaseMetaServer; import org.h2.message.DbException; import org.h2.result.ResultColumn; import org.h2.result.ResultInterface; import org.h2.result.ResultWithGeneratedKeys; import org.h2.store.LobStorageInterface; import org.h2.util.IOUtils; +import org.h2.util.NetUtils; +import org.h2.util.NetworkConnectionInfo; import org.h2.util.SmallLRUCache; import org.h2.util.SmallMap; -import org.h2.value.DataType; +import org.h2.util.TimeZoneProvider; import org.h2.value.Transfer; import org.h2.value.Value; -import org.h2.value.ValueLobDb; +import org.h2.value.ValueLob; /** * One server thread is opened per client connection. @@ -49,7 +53,7 @@ public class TcpServerThread implements Runnable { protected final Transfer transfer; private final TcpServer server; - private Session session; + private SessionLocal session; private boolean stop; private Thread thread; private Command commit; @@ -62,6 +66,7 @@ public class TcpServerThread implements Runnable { private final int threadId; private int clientVersion; private String sessionId; + private long lastRemoteSettingsId; TcpServerThread(Socket socket, TcpServer server, int id) { this.server = server; @@ -147,34 +152,45 @@ public void run() { ci.setBaseDir(baseDir); } if (server.getIfExists()) { - ci.setProperty("IFEXISTS", "TRUE"); + ci.setProperty("FORBID_CREATION", "TRUE"); } transfer.writeInt(SessionRemote.STATUS_OK); transfer.writeInt(clientVersion); transfer.flush(); - if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_13) { - if (ci.getFilePasswordHash() != null) { - ci.setFileEncryptionKey(transfer.readBytes()); - } + if (ci.getFilePasswordHash() != null) { + ci.setFileEncryptionKey(transfer.readBytes()); + } + ci.setNetworkConnectionInfo(new NetworkConnectionInfo( + NetUtils.ipToShortForm(new StringBuilder(server.getSSL() ? "ssl://" : "tcp://"), + socket.getLocalAddress().getAddress(), true) // + .append(':').append(socket.getLocalPort()).toString(), // + socket.getInetAddress().getAddress(), socket.getPort(), + new StringBuilder().append('P').append(clientVersion).toString())); + if (clientVersion < Constants.TCP_PROTOCOL_VERSION_20) { + // For DatabaseMetaData + ci.setProperty("OLD_INFORMATION_SCHEMA", "TRUE"); + // For H2 Console + ci.setProperty("NON_KEYWORDS", "VALUE"); } - session = Engine.getInstance().createSession(ci); + session = Engine.createSession(ci); transfer.setSession(session); server.addConnection(threadId, originalURL, ci.getUserName()); trace("Connected"); + lastRemoteSettingsId = session.getDatabase().getRemoteSettingsId(); } catch (OutOfMemoryError e) { // catch this separately otherwise such errors will never hit the console server.traceError(e); - sendError(e); + sendError(e, true); stop = true; } catch (Throwable e) { - sendError(e); + sendError(e,true); stop = true; } while (!stop) { try { process(); } catch (Throwable e) { - sendError(e); + sendError(e, true); } } trace("Disconnect"); @@ -221,7 +237,7 @@ void close() { } } - private void sendError(Throwable t) { + private void sendError(Throwable t, boolean withStatus) { try { SQLException e = DbException.convert(t).getSQLException(); StringWriter writer = new StringWriter(); @@ -237,7 +253,10 @@ private void sendError(Throwable t) { message = e.getMessage(); sql = null; } - transfer.writeInt(SessionRemote.STATUS_ERROR). + if (withStatus) { + transfer.writeInt(SessionRemote.STATUS_ERROR); + } + transfer. writeString(e.getSQLState()).writeString(message). writeString(sql).writeInt(e.getErrorCode()).writeString(trace).flush(); } catch (Exception e2) { @@ -254,16 +273,15 @@ private void setParameters(Command command) throws IOException { ArrayList params = command.getParameters(); for (int i = 0; i < len; i++) { Parameter p = (Parameter) params.get(i); - p.setValue(transfer.readValue()); + p.setValue(transfer.readValue(null)); } } private void process() throws IOException { int operation = transfer.readInt(); switch (operation) { - case SessionRemote.SESSION_PREPARE_READ_PARAMS: - case SessionRemote.SESSION_PREPARE_READ_PARAMS2: - case SessionRemote.SESSION_PREPARE: { + case SessionRemote.SESSION_PREPARE: + case SessionRemote.SESSION_PREPARE_READ_PARAMS2: { int id = transfer.readInt(); String sql = transfer.readString(); int old = session.getModificationId(); @@ -275,7 +293,7 @@ private void process() throws IOException { transfer.writeInt(getState(old)).writeBoolean(isQuery). writeBoolean(readonly); - if (operation == SessionRemote.SESSION_PREPARE_READ_PARAMS2) { + if (operation != SessionRemote.SESSION_PREPARE) { transfer.writeInt(command.getCommandType()); } @@ -303,7 +321,7 @@ private void process() throws IOException { commit = session.prepareLocal("COMMIT"); } int old = session.getModificationId(); - commit.executeUpdate(false); + commit.executeUpdate(null); transfer.writeInt(getState(old)).flush(); break; } @@ -315,7 +333,7 @@ private void process() throws IOException { cache.addObject(objectId, result); int columnCount = result.getVisibleColumnCount(); transfer.writeInt(SessionRemote.STATUS_OK). - writeInt(columnCount).writeInt(0); + writeInt(columnCount).writeRowCount(0L); for (int i = 0; i < columnCount; i++) { ResultColumn.writeColumn(transfer, result, i); } @@ -325,7 +343,7 @@ private void process() throws IOException { case SessionRemote.COMMAND_EXECUTE_QUERY: { int id = transfer.readInt(); int objectId = transfer.readInt(); - int maxRows = transfer.readInt(); + long maxRows = transfer.readRowCount(); int fetchSize = transfer.readInt(); Command command = (Command) cache.getObject(id, false); setParameters(command); @@ -338,15 +356,12 @@ private void process() throws IOException { int columnCount = result.getVisibleColumnCount(); int state = getState(old); transfer.writeInt(state).writeInt(columnCount); - int rowCount = result.getRowCount(); - transfer.writeInt(rowCount); + long rowCount = result.isLazy() ? -1L : result.getRowCount(); + transfer.writeRowCount(rowCount); for (int i = 0; i < columnCount; i++) { ResultColumn.writeColumn(transfer, result, i); } - int fetch = Math.min(rowCount, fetchSize); - for (int i = 0; i < fetch; i++) { - sendRow(result); - } + sendRows(result, rowCount >= 0L ? Math.min(rowCount, fetchSize) : fetchSize); transfer.flush(); break; } @@ -354,43 +369,38 @@ private void process() throws IOException { int id = transfer.readInt(); Command command = (Command) cache.getObject(id, false); setParameters(command); - boolean supportsGeneratedKeys = clientVersion >= Constants.TCP_PROTOCOL_VERSION_17; - boolean writeGeneratedKeys = supportsGeneratedKeys; + boolean writeGeneratedKeys = true; Object generatedKeysRequest; - if (supportsGeneratedKeys) { - int mode = transfer.readInt(); - switch (mode) { - case GeneratedKeysMode.NONE: - generatedKeysRequest = false; - writeGeneratedKeys = false; - break; - case GeneratedKeysMode.AUTO: - generatedKeysRequest = true; - break; - case GeneratedKeysMode.COLUMN_NUMBERS: { - int len = transfer.readInt(); - int[] keys = new int[len]; - for (int i = 0; i < len; i++) { - keys[i] = transfer.readInt(); - } - generatedKeysRequest = keys; - break; - } - case GeneratedKeysMode.COLUMN_NAMES: { - int len = transfer.readInt(); - String[] keys = new String[len]; - for (int i = 0; i < len; i++) { - keys[i] = transfer.readString(); - } - generatedKeysRequest = keys; - break; + int mode = transfer.readInt(); + switch (mode) { + case GeneratedKeysMode.NONE: + generatedKeysRequest = false; + writeGeneratedKeys = false; + break; + case GeneratedKeysMode.AUTO: + generatedKeysRequest = true; + break; + case GeneratedKeysMode.COLUMN_NUMBERS: { + int len = transfer.readInt(); + int[] keys = new int[len]; + for (int i = 0; i < len; i++) { + keys[i] = transfer.readInt(); } - default: - throw DbException.get(ErrorCode.CONNECTION_BROKEN_1, - "Unsupported generated keys' mode " + mode); + generatedKeysRequest = keys; + break; + } + case GeneratedKeysMode.COLUMN_NAMES: { + int len = transfer.readInt(); + String[] keys = new String[len]; + for (int i = 0; i < len; i++) { + keys[i] = transfer.readString(); } - } else { - generatedKeysRequest = false; + generatedKeysRequest = keys; + break; + } + default: + throw DbException.get(ErrorCode.CONNECTION_BROKEN_1, + "Unsupported generated keys' mode " + mode); } int old = session.getModificationId(); ResultWithGeneratedKeys result; @@ -404,20 +414,19 @@ private void process() throws IOException { } else { status = getState(old); } - transfer.writeInt(status).writeInt(result.getUpdateCount()). - writeBoolean(session.getAutoCommit()); + transfer.writeInt(status); + transfer.writeRowCount(result.getUpdateCount()); + transfer.writeBoolean(session.getAutoCommit()); if (writeGeneratedKeys) { ResultInterface generatedKeys = result.getGeneratedKeys(); int columnCount = generatedKeys.getVisibleColumnCount(); transfer.writeInt(columnCount); - int rowCount = generatedKeys.getRowCount(); - transfer.writeInt(rowCount); + long rowCount = generatedKeys.getRowCount(); + transfer.writeRowCount(rowCount); for (int i = 0; i < columnCount; i++) { ResultColumn.writeColumn(transfer, generatedKeys, i); } - for (int i = 0; i < rowCount; i++) { - sendRow(generatedKeys); - } + sendRows(generatedKeys, rowCount); generatedKeys.close(); } transfer.flush(); @@ -437,9 +446,7 @@ private void process() throws IOException { int count = transfer.readInt(); ResultInterface result = (ResultInterface) cache.getObject(id, false); transfer.writeInt(SessionRemote.STATUS_OK); - for (int i = 0; i < count; i++) { - sendRow(result); - } + sendRows(result, count); transfer.flush(); break; } @@ -468,11 +475,12 @@ private void process() throws IOException { } case SessionRemote.SESSION_SET_ID: { sessionId = transfer.readString(); - transfer.writeInt(SessionRemote.STATUS_OK); - if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_15) { - transfer.writeBoolean(session.getAutoCommit()); + if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_20) { + session.setTimeZone(TimeZoneProvider.ofId(transfer.readString())); } - transfer.flush(); + transfer.writeInt(SessionRemote.STATUS_OK) + .writeBoolean(session.getAutoCommit()) + .flush(); break; } case SessionRemote.SESSION_SET_AUTOCOMMIT: { @@ -488,40 +496,15 @@ private void process() throws IOException { } case SessionRemote.LOB_READ: { long lobId = transfer.readLong(); - byte[] hmac; - CachedInputStream in; - boolean verifyMac; - if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_11) { - if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_12) { - hmac = transfer.readBytes(); - verifyMac = true; - } else { - hmac = null; - verifyMac = false; - } - in = lobs.get(lobId); - if (in == null && verifyMac) { - in = new CachedInputStream(null); - lobs.put(lobId, in); - } - } else { - verifyMac = false; - hmac = null; - in = lobs.get(lobId); - } + byte[] hmac = transfer.readBytes(); long offset = transfer.readLong(); int length = transfer.readInt(); - if (verifyMac) { - transfer.verifyLobMac(hmac, lobId); - } - if (in == null) { - throw DbException.get(ErrorCode.OBJECT_CLOSED); - } - if (in.getPos() != offset) { + transfer.verifyLobMac(hmac, lobId); + CachedInputStream in = lobs.get(lobId); + if (in == null || in.getPos() != offset) { LobStorageInterface lobStorage = session.getDataHandler().getLobStorage(); // only the lob id is used - ValueLobDb lob = ValueLobDb.create(Value.BLOB, null, -1, lobId, hmac, -1); - InputStream lobIn = lobStorage.getInputStream(lob, hmac, -1); + InputStream lobIn = lobStorage.getInputStream(lobId, -1); in = new CachedInputStream(lobIn); lobs.put(lobId, in); lobIn.skip(offset); @@ -536,6 +519,30 @@ private void process() throws IOException { transfer.flush(); break; } + case SessionRemote.GET_JDBC_META: { + int code = transfer.readInt(); + int length = transfer.readInt(); + Value[] args = new Value[length]; + for (int i = 0; i < length; i++) { + args[i] = transfer.readValue(null); + } + int old = session.getModificationId(); + ResultInterface result; + synchronized (session) { + result = DatabaseMetaServer.process(session, code, args); + } + int columnCount = result.getVisibleColumnCount(); + int state = getState(old); + transfer.writeInt(state).writeInt(columnCount); + long rowCount = result.getRowCount(); + transfer.writeRowCount(rowCount); + for (int i = 0; i < columnCount; i++) { + ResultColumn.writeColumn(transfer, result, i); + } + sendRows(result, rowCount); + transfer.flush(); + break; + } default: trace("Unknown operation: " + operation); close(); @@ -547,38 +554,52 @@ private int getState(int oldModificationId) { return SessionRemote.STATUS_CLOSED; } if (session.getModificationId() == oldModificationId) { - return SessionRemote.STATUS_OK; + long remoteSettingsId = session.getDatabase().getRemoteSettingsId(); + if (lastRemoteSettingsId == remoteSettingsId) { + return SessionRemote.STATUS_OK; + } + lastRemoteSettingsId = remoteSettingsId; } return SessionRemote.STATUS_OK_STATE_CHANGED; } - private void sendRow(ResultInterface result) throws IOException { - if (result.next()) { - transfer.writeBoolean(true); - Value[] v = result.currentRow(); - for (int i = 0; i < result.getVisibleColumnCount(); i++) { - if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_12) { - transfer.writeValue(v[i]); + private void sendRows(ResultInterface result, long count) throws IOException { + int columnCount = result.getVisibleColumnCount(); + boolean lazy = result.isLazy(); + Session oldSession = lazy ? session.setThreadLocalSession() : null; + try { + while (count-- > 0L) { + boolean hasNext; + try { + hasNext = result.next(); + } catch (Exception e) { + transfer.writeByte((byte) -1); + sendError(e, false); + break; + } + if (hasNext) { + transfer.writeByte((byte) 1); + Value[] values = result.currentRow(); + for (int i = 0; i < columnCount; i++) { + Value v = values[i]; + if (lazy && v instanceof ValueLob) { + ValueLob v2 = ((ValueLob) v).copyToResult(); + if (v2 != v) { + v = session.addTemporaryLob(v2); + } + } + transfer.writeValue(v); + } } else { - writeValue(v[i]); + transfer.writeByte((byte) 0); + break; } } - } else { - transfer.writeBoolean(false); - } - } - - private void writeValue(Value v) throws IOException { - if (DataType.isLargeObject(v.getValueType())) { - if (v instanceof ValueLobDb) { - ValueLobDb lob = (ValueLobDb) v; - if (lob.isStored()) { - long id = lob.getLobId(); - lobs.put(id, new CachedInputStream(null)); - } + } finally { + if (lazy) { + session.resetThreadLocalSession(oldSession); } } - transfer.writeValue(v); } void setThread(Thread thread) { diff --git a/h2/src/main/org/h2/server/package.html b/h2/src/main/org/h2/server/package.html index 16ccaa6557..05dde64b0c 100644 --- a/h2/src/main/org/h2/server/package.html +++ b/h2/src/main/org/h2/server/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/pg/PgServer.java b/h2/src/main/org/h2/server/pg/PgServer.java index 2f8812a156..94a59dd41d 100644 --- a/h2/src/main/org/h2/server/pg/PgServer.java +++ b/h2/src/main/org/h2/server/pg/PgServer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.pg; @@ -9,29 +9,25 @@ import java.net.ServerSocket; import java.net.Socket; import java.net.UnknownHostException; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.sql.Types; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import org.h2.api.ErrorCode; -import org.h2.engine.Constants; import org.h2.message.DbException; import org.h2.server.Service; import org.h2.util.NetUtils; import org.h2.util.Tool; +import org.h2.util.Utils10; +import org.h2.value.TypeInfo; +import org.h2.value.Value; /** * This class implements a subset of the PostgreSQL protocol as described here: - * http://developer.postgresql.org/pgdocs/postgres/protocol.html + * https://www.postgresql.org/docs/devel/protocol.html * The PostgreSQL catalog is described here: - * http://www.postgresql.org/docs/7.4/static/catalogs.html + * https://www.postgresql.org/docs/7.4/catalogs.html * * @author Thomas Mueller * @author Sergi Vladykin 2009-07-03 (convertType) @@ -56,14 +52,17 @@ public class PgServer implements Service { public static final int PG_TYPE_INT2 = 21; public static final int PG_TYPE_INT4 = 23; public static final int PG_TYPE_TEXT = 25; - public static final int PG_TYPE_OID = 26; public static final int PG_TYPE_FLOAT4 = 700; public static final int PG_TYPE_FLOAT8 = 701; public static final int PG_TYPE_UNKNOWN = 705; - public static final int PG_TYPE_TEXTARRAY = 1009; + public static final int PG_TYPE_INT2_ARRAY = 1005; + public static final int PG_TYPE_INT4_ARRAY = 1007; + public static final int PG_TYPE_VARCHAR_ARRAY = 1015; public static final int PG_TYPE_DATE = 1082; public static final int PG_TYPE_TIME = 1083; - public static final int PG_TYPE_TIMESTAMP_NO_TMZONE = 1114; + public static final int PG_TYPE_TIMETZ = 1266; + public static final int PG_TYPE_TIMESTAMP = 1114; + public static final int PG_TYPE_TIMESTAMPTZ = 1184; public static final int PG_TYPE_NUMERIC = 1700; private final HashSet typeSet = new HashSet<>(); @@ -107,7 +106,6 @@ public void init(String... args) { keyDatabase = args[++i]; } } - org.h2.Driver.load(); // int testing; // trace = true; } @@ -194,10 +192,12 @@ public void listen() { trace("Connection not allowed"); s.close(); } else { + Utils10.setTcpQuickack(s, true); PgServerThread c = new PgServerThread(s, this); running.add(c); - c.setProcessId(pid.incrementAndGet()); - Thread thread = new Thread(c, threadName+" thread"); + int id = pid.incrementAndGet(); + c.setProcessId(id); + Thread thread = new Thread(c, threadName + " thread-" + id); thread.setDaemon(isDaemon); c.setThread(thread); thread.start(); @@ -296,210 +296,84 @@ boolean getIfExists() { } /** - * The Java implementation of the PostgreSQL function pg_get_indexdef. The - * method is used to get CREATE INDEX command for an index, or the column - * definition of one column in the index. + * Returns the name of the given type. * - * @param conn the connection - * @param indexId the index id - * @param ordinalPosition the ordinal position (null if the SQL statement - * should be returned) - * @param pretty this flag is ignored - * @return the SQL statement or the column name - */ - @SuppressWarnings("unused") - public static String getIndexColumn(Connection conn, int indexId, - Integer ordinalPosition, Boolean pretty) throws SQLException { - if (ordinalPosition == null || ordinalPosition == 0) { - PreparedStatement prep = conn.prepareStatement( - "select sql from information_schema.indexes where id=?"); - prep.setInt(1, indexId); - ResultSet rs = prep.executeQuery(); - if (rs.next()) { - return rs.getString(1); - } - return ""; - } - PreparedStatement prep = conn.prepareStatement( - "select column_name from information_schema.indexes " + - "where id=? and ordinal_position=?"); - prep.setInt(1, indexId); - prep.setInt(2, ordinalPosition); - ResultSet rs = prep.executeQuery(); - if (rs.next()) { - return rs.getString(1); - } - return ""; - } - - /** - * Get the name of the current schema. - * This method is called by the database. - * - * @param conn the connection - * @return the schema name - */ - public static String getCurrentSchema(Connection conn) throws SQLException { - ResultSet rs = conn.createStatement().executeQuery("call schema()"); - rs.next(); - return rs.getString(1); - } - - /** - * Get the OID of an object. This method is called by the database. - * - * @param conn the connection - * @param tableName the table name - * @return the oid - */ - public static int getOid(Connection conn, String tableName) - throws SQLException { - if (tableName.startsWith("\"") && tableName.endsWith("\"")) { - tableName = tableName.substring(1, tableName.length() - 1); - } - PreparedStatement prep = conn.prepareStatement( - "select oid from pg_class where relName = ?"); - prep.setString(1, tableName); - ResultSet rs = prep.executeQuery(); - if (!rs.next()) { - return 0; - } - return rs.getInt(1); - } - - /** - * Get the name of this encoding code. - * This method is called by the database. - * - * @param code the encoding code - * @return the encoding name - */ - public static String getEncodingName(int code) { - switch (code) { - case 0: - return "SQL_ASCII"; - case 6: - return "UTF8"; - case 8: - return "LATIN1"; - default: - return code < 40 ? "UTF8" : ""; - } - } - - /** - * Get the version. This method must return PostgreSQL to keep some clients - * happy. This method is called by the database. - * - * @return the server name and version - */ - public static String getVersion() { - return "PostgreSQL " + Constants.PG_VERSION + " server protocol using H2 " + - Constants.getFullVersion(); - } - - /** - * Get the current system time. - * This method is called by the database. - * - * @return the current system time - */ - public static Timestamp getStartTime() { - return new Timestamp(System.currentTimeMillis()); - } - - /** - * Get the user name for this id. - * This method is called by the database. - * - * @param conn the connection - * @param id the user id - * @return the user name - */ - public static String getUserById(Connection conn, int id) throws SQLException { - PreparedStatement prep = conn.prepareStatement( - "SELECT NAME FROM INFORMATION_SCHEMA.USERS WHERE ID=?"); - prep.setInt(1, id); - ResultSet rs = prep.executeQuery(); - if (rs.next()) { - return rs.getString(1); - } - return null; - } - - /** - * Check if the this session has the given database privilege. - * This method is called by the database. - * - * @param id the session id - * @param privilege the privilege to check - * @return true - */ - @SuppressWarnings("unused") - public static boolean hasDatabasePrivilege(int id, String privilege) { - return true; - } - - /** - * Check if the current session has access to this table. - * This method is called by the database. - * - * @param table the table name - * @param privilege the privilege to check - * @return true - */ - @SuppressWarnings("unused") - public static boolean hasTablePrivilege(String table, String privilege) { - return true; - } - - /** - * Get the current transaction id. - * This method is called by the database. - * - * @param table the table name - * @param id the id - * @return 1 - */ - @SuppressWarnings("unused") - public static int getCurrentTid(String table, String id) { - return 1; - } - - /** - * A fake wrapper around pg_get_expr(expr_text, relation_oid), in PostgreSQL - * it "decompiles the internal form of an expression, assuming that any vars - * in it refer to the relation indicated by the second parameter". - * - * @param exprText the expression text - * @param relationOid the relation object id - * @return always null - */ - @SuppressWarnings("unused") - public static String getPgExpr(String exprText, int relationOid) { - return null; - } - - /** - * Check if the current session has access to this table. - * This method is called by the database. - * - * @param conn the connection * @param pgType the PostgreSQL type oid - * @param typeMod the type modifier (typically -1) * @return the name of the given type */ - public static String formatType(Connection conn, int pgType, int typeMod) - throws SQLException { - PreparedStatement prep = conn.prepareStatement( - "select typname from pg_catalog.pg_type where oid = ? and typtypmod = ?"); - prep.setInt(1, pgType); - prep.setInt(2, typeMod); - ResultSet rs = prep.executeQuery(); - if (rs.next()) { - return rs.getString(1); + public static String formatType(int pgType) { + int valueType; + switch (pgType) { + case 0: + return "-"; + case PG_TYPE_BOOL: + valueType = Value.BOOLEAN; + break; + case PG_TYPE_BYTEA: + valueType = Value.VARBINARY; + break; + case 18: + return "char"; + case 19: + return "name"; + case PG_TYPE_INT8: + valueType = Value.BIGINT; + break; + case PG_TYPE_INT2: + valueType = Value.SMALLINT; + break; + case 22: + return "int2vector"; + case PG_TYPE_INT4: + valueType = Value.INTEGER; + break; + case 24: + return "regproc"; + case PG_TYPE_TEXT: + valueType = Value.CLOB; + break; + case PG_TYPE_FLOAT4: + valueType = Value.REAL; + break; + case PG_TYPE_FLOAT8: + valueType = Value.DOUBLE; + break; + case PG_TYPE_INT2_ARRAY: + return "smallint[]"; + case PG_TYPE_INT4_ARRAY: + return "integer[]"; + case PG_TYPE_VARCHAR_ARRAY: + return "character varying[]"; + case PG_TYPE_BPCHAR: + valueType = Value.CHAR; + break; + case PG_TYPE_VARCHAR: + valueType = Value.VARCHAR; + break; + case PG_TYPE_DATE: + valueType = Value.DATE; + break; + case PG_TYPE_TIME: + valueType = Value.TIME; + break; + case PG_TYPE_TIMETZ: + valueType = Value.TIME_TZ; + break; + case PG_TYPE_TIMESTAMP: + valueType = Value.TIMESTAMP; + break; + case PG_TYPE_TIMESTAMPTZ: + valueType = Value.TIMESTAMP_TZ; + break; + case PG_TYPE_NUMERIC: + valueType = Value.NUMERIC; + break; + case 2205: + return "regclass"; + default: + return "???"; } - return null; + return Value.getTypeName(valueType); } /** @@ -508,40 +382,56 @@ public static String formatType(Connection conn, int pgType, int typeMod) * @param type the SQL type * @return the PostgreSQL type */ - public static int convertType(final int type) { - switch (type) { - case Types.BOOLEAN: + public static int convertType(TypeInfo type) { + switch (type.getValueType()) { + case Value.BOOLEAN: return PG_TYPE_BOOL; - case Types.VARCHAR: + case Value.VARCHAR: return PG_TYPE_VARCHAR; - case Types.CLOB: + case Value.NULL: + case Value.CLOB: return PG_TYPE_TEXT; - case Types.CHAR: + case Value.CHAR: return PG_TYPE_BPCHAR; - case Types.SMALLINT: + case Value.SMALLINT: return PG_TYPE_INT2; - case Types.INTEGER: + case Value.INTEGER: return PG_TYPE_INT4; - case Types.BIGINT: + case Value.BIGINT: return PG_TYPE_INT8; - case Types.DECIMAL: + case Value.NUMERIC: + case Value.DECFLOAT: return PG_TYPE_NUMERIC; - case Types.REAL: + case Value.REAL: return PG_TYPE_FLOAT4; - case Types.DOUBLE: + case Value.DOUBLE: return PG_TYPE_FLOAT8; - case Types.TIME: + case Value.TIME: return PG_TYPE_TIME; - case Types.DATE: + case Value.TIME_TZ: + return PG_TYPE_TIMETZ; + case Value.DATE: return PG_TYPE_DATE; - case Types.TIMESTAMP: - return PG_TYPE_TIMESTAMP_NO_TMZONE; - case Types.VARBINARY: + case Value.TIMESTAMP: + return PG_TYPE_TIMESTAMP; + case Value.TIMESTAMP_TZ: + return PG_TYPE_TIMESTAMPTZ; + case Value.BINARY: + case Value.VARBINARY: return PG_TYPE_BYTEA; - case Types.BLOB: - return PG_TYPE_OID; - case Types.ARRAY: - return PG_TYPE_TEXTARRAY; + case Value.ARRAY: { + type = (TypeInfo) type.getExtTypeInfo(); + switch (type.getValueType()) { + case Value.SMALLINT: + return PG_TYPE_INT2_ARRAY; + case Value.INTEGER: + return PG_TYPE_INT4_ARRAY; + case Value.VARCHAR: + return PG_TYPE_VARCHAR_ARRAY; + default: + return PG_TYPE_VARCHAR_ARRAY; + } + } default: return PG_TYPE_UNKNOWN; } diff --git a/h2/src/main/org/h2/server/pg/PgServerThread.java b/h2/src/main/org/h2/server/pg/PgServerThread.java index 36488fdc9d..aba652af6a 100644 --- a/h2/src/main/org/h2/server/pg/PgServerThread.java +++ b/h2/src/main/org/h2/server/pg/PgServerThread.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.pg; @@ -12,61 +12,99 @@ import java.io.EOFException; import java.io.IOException; import java.io.InputStream; -import java.io.InputStreamReader; import java.io.OutputStream; -import java.io.Reader; import java.io.StringReader; +import java.math.BigDecimal; +import java.math.BigInteger; import java.net.Socket; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; -import java.sql.Connection; -import java.sql.ParameterMetaData; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.ResultSetMetaData; import java.sql.SQLException; -import java.sql.Statement; -import java.sql.Types; +import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Properties; +import java.util.regex.Pattern; + +import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.ConnectionInfo; import org.h2.engine.Constants; +import org.h2.engine.Database; +import org.h2.engine.Engine; +import org.h2.engine.SessionLocal; import org.h2.engine.SysProperties; -import org.h2.jdbc.JdbcConnection; -import org.h2.jdbc.JdbcPreparedStatement; -import org.h2.jdbc.JdbcResultSet; -import org.h2.jdbc.JdbcStatement; +import org.h2.expression.ParameterInterface; import org.h2.message.DbException; +import org.h2.result.ResultInterface; +import org.h2.schema.Schema; +import org.h2.table.Column; +import org.h2.table.Table; import org.h2.util.DateTimeUtils; -import org.h2.util.JdbcUtils; import org.h2.util.MathUtils; +import org.h2.util.NetUtils; +import org.h2.util.NetworkConnectionInfo; import org.h2.util.ScriptReader; import org.h2.util.StringUtils; +import org.h2.util.TimeZoneProvider; import org.h2.util.Utils; +import org.h2.util.Utils10; import org.h2.value.CaseInsensitiveMap; +import org.h2.value.TypeInfo; import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueBigint; import org.h2.value.ValueDate; +import org.h2.value.ValueDouble; +import org.h2.value.ValueInteger; import org.h2.value.ValueNull; +import org.h2.value.ValueReal; +import org.h2.value.ValueSmallint; import org.h2.value.ValueTime; +import org.h2.value.ValueTimeTimeZone; import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; /** * One server thread is opened for each client. */ -public class PgServerThread implements Runnable { +public final class PgServerThread implements Runnable { + private static final boolean INTEGER_DATE_TYPES = false; + private static final Pattern SHOULD_QUOTE = Pattern.compile(".*[\",\\\\{}].*"); + + private static String pgTimeZone(String value) { + if (value.startsWith("GMT+")) { + return convertTimeZone(value, "GMT-"); + } else if (value.startsWith("GMT-")) { + return convertTimeZone(value, "GMT+"); + } else if (value.startsWith("UTC+")) { + return convertTimeZone(value, "UTC-"); + } else if (value.startsWith("UTC-")) { + return convertTimeZone(value, "UTC+"); + } else { + return value; + } + } + + private static String convertTimeZone(String value, String prefix) { + int length = value.length(); + return new StringBuilder(length).append(prefix).append(value, 4, length).toString(); + } + private final PgServer server; private Socket socket; - private Connection conn; + private SessionLocal session; private boolean stop; private DataInputStream dataInRaw; private DataInputStream dataIn; private OutputStream out; private int messageType; - private ByteArrayOutputStream outBuffer; + private ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); private DataOutputStream dataOut; private Thread thread; private boolean initDone; @@ -74,9 +112,10 @@ public class PgServerThread implements Runnable { private String databaseName; private int processId; private final int secret; - private JdbcStatement activeRequest; + private CommandInterface activeRequest; private String clientEncoding = SysProperties.PG_DEFAULT_CLIENT_ENCODING; private String dateStyle = "ISO, MDY"; + private TimeZoneProvider timeZone = DateTimeUtils.getTimeZone(); private final HashMap prepared = new CaseInsensitiveMap<>(); private final HashMap portals = @@ -118,7 +157,7 @@ private String readString() throws IOException { } buff.write(x); } - return new String(buff.toByteArray(), getEncoding()); + return Utils10.byteArrayOutputStreamToString(buff, getEncoding()); } private int readInt() throws IOException { @@ -184,18 +223,36 @@ private void process() throws IOException { break; } String value = readString(); - if ("user".equals(param)) { + switch (param) { + case "user": this.userName = value; - } else if ("database".equals(param)) { + break; + case "database": this.databaseName = server.checkKeyAndGetDatabaseName(value); - } else if ("client_encoding".equals(param)) { + break; + case "client_encoding": + // node-postgres will send "'utf-8'" + int length = value.length(); + if (length >= 2 && value.charAt(0) == '\'' + && value.charAt(length - 1) == '\'') { + value = value.substring(1, length - 1); + } // UTF8 clientEncoding = value; - } else if ("DateStyle".equals(param)) { + break; + case "DateStyle": if (value.indexOf(',') < 0) { value += ", MDY"; } dateStyle = value; + break; + case "TimeZone": + try { + timeZone = TimeZoneProvider.ofId(pgTimeZone(value)); + } catch (Exception e) { + server.trace("Unknown TimeZone: " + value); + } + break; } // extra_float_digits 2 // geqo on (Genetic Query Optimization) @@ -211,10 +268,10 @@ private void process() throws IOException { try { Properties info = new Properties(); info.put("MODE", "PostgreSQL"); - info.put("USER", userName); - info.put("PASSWORD", password); + info.put("DATABASE_TO_LOWER", "TRUE"); + info.put("DEFAULT_NULL_ORDERING", "HIGH"); String url = "jdbc:h2:" + databaseName; - ConnectionInfo ci = new ConnectionInfo(url, info); + ConnectionInfo ci = new ConnectionInfo(url, info, userName, password); String baseDir = server.getBaseDir(); if (baseDir == null) { baseDir = SysProperties.getBaseDir(); @@ -223,12 +280,14 @@ private void process() throws IOException { ci.setBaseDir(baseDir); } if (server.getIfExists()) { - ci.setProperty("IFEXISTS", "TRUE"); + ci.setProperty("FORBID_CREATION", "TRUE"); } - conn = new JdbcConnection(ci, false); - // can not do this because when called inside - // DriverManager.getConnection, a deadlock occurs - // conn = DriverManager.getConnection(url, userName, password); + ci.setNetworkConnectionInfo(new NetworkConnectionInfo( // + NetUtils.ipToShortForm(new StringBuilder("pg://"), // + socket.getLocalAddress().getAddress(), true) // + .append(':').append(socket.getLocalPort()).toString(), // + socket.getInetAddress().getAddress(), socket.getPort(), null)); + session = Engine.createSession(ci); initDb(); sendAuthenticationOk(); } catch (Exception e) { @@ -251,16 +310,17 @@ private void process() throws IOException { } } try { - p.prep = (JdbcPreparedStatement) conn.prepareStatement(p.sql); - ParameterMetaData meta = p.prep.getParameterMetaData(); - p.paramType = new int[meta.getParameterCount()]; - for (int i = 0; i < p.paramType.length; i++) { + p.prep = session.prepareLocal(p.sql); + ArrayList parameters = p.prep.getParameters(); + int count = parameters.size(); + p.paramType = new int[count]; + for (int i = 0; i < count; i++) { int type; if (i < paramTypesCount && paramTypes[i] != 0) { type = paramTypes[i]; server.checkType(type); } else { - type = PgServer.convertType(meta.getParameterType(i + 1)); + type = PgServer.convertType(parameters.get(i).getType()); } p.paramType[i] = type; } @@ -290,8 +350,9 @@ private void process() throws IOException { } int paramCount = readShort(); try { + ArrayList parameters = prep.prep.getParameters(); for (int i = 0; i < paramCount; i++) { - setParameter(prep.prep, prep.paramType[i], i, formatCodes); + setParameter(parameters, prep.paramType[i], i, formatCodes); } } catch (Exception e) { sendErrorResponse(e); @@ -312,10 +373,13 @@ private void process() throws IOException { if (type == 'S') { Prepared p = prepared.remove(name); if (p != null) { - JdbcUtils.closeSilently(p.prep); + p.close(); } } else if (type == 'P') { - portals.remove(name); + Portal p = portals.remove(name); + if (p != null) { + p.prep.closeResult(); + } } else { server.trace("expected S or P, got " + type); sendErrorResponse("expected S or P"); @@ -334,8 +398,8 @@ private void process() throws IOException { sendErrorResponse("Prepared not found: " + name); } else { try { - sendParameterDescription(p.prep.getParameterMetaData(), p.paramType); - sendRowDescription(p.prep.getMetaData()); + sendParameterDescription(p.prep.getParameters(), p.paramType); + sendRowDescription(p.prep.getMetaData(), null); } catch (Exception e) { sendErrorResponse(e); } @@ -345,10 +409,9 @@ private void process() throws IOException { if (p == null) { sendErrorResponse("Portal not found: " + name); } else { - PreparedStatement prep = p.prep.prep; + CommandInterface prep = p.prep.prep; try { - ResultSetMetaData meta = prep.getMetaData(); - sendRowDescription(meta); + sendRowDescription(prep.getMetaData(), p.resultColumnFormat); } catch (Exception e) { sendErrorResponse(e); } @@ -367,34 +430,19 @@ private void process() throws IOException { sendErrorResponse("Portal not found: " + name); break; } - int maxRows = readShort(); + int maxRows = readInt(); Prepared prepared = p.prep; - JdbcPreparedStatement prep = prepared.prep; + CommandInterface prep = prepared.prep; server.trace(prepared.sql); try { - prep.setMaxRows(maxRows); setActiveRequest(prep); - boolean result = prep.execute(); - if (result) { - try { - ResultSet rs = prep.getResultSet(); - // the meta-data is sent in the prior 'Describe' - while (rs.next()) { - sendDataRow(rs, p.resultColumnFormat); - } - sendCommandComplete(prep, 0); - } catch (Exception e) { - sendErrorResponse(e); - } + if (prep.isQuery()) { + executeQuery(prepared, prep, p.resultColumnFormat, maxRows); } else { - sendCommandComplete(prep, prep.getUpdateCount()); + sendCommandComplete(prep, prep.executeUpdate(null).getUpdateCount()); } } catch (Exception e) { - if (prep.isCancelled()) { - sendCancelQueryResponse(); - } else { - sendErrorResponse(e); - } + sendErrorOrCancelResponse(e); } finally { setActiveRequest(null); } @@ -408,43 +456,31 @@ private void process() throws IOException { case 'Q': { server.trace("Query"); String query = readString(); + @SuppressWarnings("resource") ScriptReader reader = new ScriptReader(new StringReader(query)); while (true) { - JdbcStatement stat = null; - try { - String s = reader.readStatement(); - if (s == null) { - break; - } - s = getSQL(s); - stat = (JdbcStatement) conn.createStatement(); - setActiveRequest(stat); - boolean result = stat.execute(s); - if (result) { - ResultSet rs = stat.getResultSet(); - ResultSetMetaData meta = rs.getMetaData(); - try { - sendRowDescription(meta); - while (rs.next()) { - sendDataRow(rs, null); + String s = reader.readStatement(); + if (s == null) { + break; + } + s = getSQL(s); + try (CommandInterface command = session.prepareLocal(s)) { + setActiveRequest(command); + if (command.isQuery()) { + try (ResultInterface result = command.executeQuery(0, false)) { + sendRowDescription(result, null); + while (result.next()) { + sendDataRow(result, null); } - sendCommandComplete(stat, 0); - } catch (Exception e) { - sendErrorResponse(e); - break; + sendCommandComplete(command, 0); } } else { - sendCommandComplete(stat, stat.getUpdateCount()); - } - } catch (SQLException e) { - if (stat != null && stat.isCancelled()) { - sendCancelQueryResponse(); - } else { - sendErrorResponse(e); + sendCommandComplete(command, command.executeUpdate(null).getUpdateCount()); } + } catch (Exception e) { + sendErrorOrCancelResponse(e); break; } finally { - JdbcUtils.closeSilently(stat); setActiveRequest(null); } } @@ -462,6 +498,36 @@ private void process() throws IOException { } } + private void executeQuery(Prepared prepared, CommandInterface prep, int[] resultColumnFormat, int maxRows) + throws Exception { + ResultInterface result = prepared.result; + if (result == null) { + result = prep.executeQuery(0L, false); + } + try { + // the meta-data is sent in the prior 'Describe' + if (maxRows == 0) { + while (result.next()) { + sendDataRow(result, resultColumnFormat); + } + } else { + for (; maxRows > 0 && result.next(); maxRows--) { + sendDataRow(result, resultColumnFormat); + } + if (result.hasNext()) { + prepared.result = result; + sendCommandSuspended(); + return; + } + } + prepared.closeResult(); + sendCommandComplete(prep, 0); + } catch (Exception e) { + prepared.closeResult(); + throw e; + } + } + private String getSQL(String s) { String lower = StringUtils.toLowerEnglish(s); if (lower.startsWith("show max_identifier_length")) { @@ -476,21 +542,20 @@ private String getSQL(String s) { return s; } - private void sendCommandComplete(JdbcStatement stat, int updateCount) - throws IOException { + private void sendCommandComplete(CommandInterface command, long updateCount) throws IOException { startMessage('C'); - switch (stat.getLastExecutedCommandType()) { + switch (command.getCommandType()) { case CommandInterface.INSERT: writeStringPart("INSERT 0 "); - writeString(Integer.toString(updateCount)); + writeString(Long.toString(updateCount)); break; case CommandInterface.UPDATE: writeStringPart("UPDATE "); - writeString(Integer.toString(updateCount)); + writeString(Long.toString(updateCount)); break; case CommandInterface.DELETE: writeStringPart("DELETE "); - writeString(Integer.toString(updateCount)); + writeString(Long.toString(updateCount)); break; case CommandInterface.SELECT: case CommandInterface.CALL: @@ -500,42 +565,36 @@ private void sendCommandComplete(JdbcStatement stat, int updateCount) writeString("BEGIN"); break; default: - server.trace("check CommandComplete tag for command " + stat); + server.trace("check CommandComplete tag for command " + command); writeStringPart("UPDATE "); - writeString(Integer.toString(updateCount)); + writeString(Long.toString(updateCount)); } sendMessage(); } - private void sendDataRow(ResultSet rs, int[] formatCodes) throws IOException, SQLException { - ResultSetMetaData metaData = rs.getMetaData(); - int columns = metaData.getColumnCount(); + private void sendCommandSuspended() throws IOException { + startMessage('s'); + sendMessage(); + } + + private void sendDataRow(ResultInterface result, int[] formatCodes) throws IOException { + int columns = result.getVisibleColumnCount(); startMessage('D'); writeShort(columns); - for (int i = 1; i <= columns; i++) { - int pgType = PgServer.convertType(metaData.getColumnType(i)); - boolean text = formatAsText(pgType); - if (formatCodes != null) { - if (formatCodes.length == 0) { - text = true; - } else if (formatCodes.length == 1) { - text = formatCodes[0] == 0; - } else if (i - 1 < formatCodes.length) { - text = formatCodes[i - 1] == 0; - } - } - writeDataColumn(rs, i, pgType, text); + Value[] row = result.currentRow(); + for (int i = 0; i < columns; i++) { + int pgType = PgServer.convertType(result.getColumnType(i)); + boolean text = formatAsText(pgType, formatCodes, i); + writeDataColumn(row[i], pgType, text); } sendMessage(); } private static long toPostgreDays(long dateValue) { - return DateTimeUtils.prolepticGregorianAbsoluteDayFromDateValue(dateValue) - 10_957; + return DateTimeUtils.absoluteDayFromDateValue(dateValue) - 10_957; } - private void writeDataColumn(ResultSet rs, int column, int pgType, boolean text) - throws IOException { - Value v = ((JdbcResultSet) rs).get(column); + private void writeDataColumn(Value v, int pgType, boolean text) throws IOException { if (v == ValueNull.INSTANCE) { writeInt(-1); return; @@ -547,6 +606,62 @@ private void writeDataColumn(ResultSet rs, int column, int pgType, boolean text) writeInt(1); dataOut.writeByte(v.getBoolean() ? 't' : 'f'); break; + case PgServer.PG_TYPE_BYTEA: { + byte[] bytes = v.getBytesNoCopy(); + int length = bytes.length; + int cnt = length; + for (int i = 0; i < length; i++) { + byte b = bytes[i]; + if (b < 32 || b > 126) { + cnt += 3; + } else if (b == 92) { + cnt++; + } + } + byte[] data = new byte[cnt]; + for (int i = 0, j = 0; i < length; i++) { + byte b = bytes[i]; + if (b < 32 || b > 126) { + data[j++] = '\\'; + data[j++] = (byte) (((b >>> 6) & 3) + '0'); + data[j++] = (byte) (((b >>> 3) & 7) + '0'); + data[j++] = (byte) ((b & 7) + '0'); + } else if (b == 92) { + data[j++] = '\\'; + data[j++] = '\\'; + } else { + data[j++] = b; + } + } + writeInt(data.length); + write(data); + break; + } + case PgServer.PG_TYPE_INT2_ARRAY: + case PgServer.PG_TYPE_INT4_ARRAY: + case PgServer.PG_TYPE_VARCHAR_ARRAY: + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + baos.write('{'); + Value[] values = ((ValueArray) v).getList(); + Charset encoding = getEncoding(); + for (int i = 0; i < values.length; i++) { + if (i > 0) { + baos.write(','); + } + String s = values[i].getString(); + if (SHOULD_QUOTE.matcher(s).matches()) { + List ss = new ArrayList<>(); + for (String s0 : s.split("\\\\")) { + ss.add(s0.replace("\"", "\\\"")); + } + s = "\"" + String.join("\\\\", ss) + "\""; + } + baos.write(s.getBytes(encoding)); + } + baos.write('}'); + writeInt(baos.size()); + write(baos); + break; default: byte[] data = v.getString().getBytes(getEncoding()); writeInt(data.length); @@ -555,6 +670,10 @@ private void writeDataColumn(ResultSet rs, int column, int pgType, boolean text) } else { // binary switch (pgType) { + case PgServer.PG_TYPE_BOOL: + writeInt(1); + dataOut.writeByte(v.getBoolean() ? 1 : 0); + break; case PgServer.PG_TYPE_INT2: writeInt(2); writeShort(v.getShort()); @@ -575,45 +694,45 @@ private void writeDataColumn(ResultSet rs, int column, int pgType, boolean text) writeInt(8); dataOut.writeDouble(v.getDouble()); break; + case PgServer.PG_TYPE_NUMERIC: + writeNumericBinary(v.getBigDecimal()); + break; case PgServer.PG_TYPE_BYTEA: { byte[] data = v.getBytesNoCopy(); writeInt(data.length); write(data); break; } - case PgServer.PG_TYPE_DATE: { - ValueDate d = (ValueDate) v.convertTo(Value.DATE); + case PgServer.PG_TYPE_DATE: writeInt(4); - writeInt((int) (toPostgreDays(d.getDateValue()))); + writeInt((int) (toPostgreDays(((ValueDate) v).getDateValue()))); break; - } - case PgServer.PG_TYPE_TIME: { - ValueTime t = (ValueTime) v.convertTo(Value.TIME); - writeInt(8); + case PgServer.PG_TYPE_TIME: + writeTimeBinary(((ValueTime) v).getNanos(), 8); + break; + case PgServer.PG_TYPE_TIMETZ: { + ValueTimeTimeZone t = (ValueTimeTimeZone) v; long m = t.getNanos(); - if (INTEGER_DATE_TYPES) { - // long format - m /= 1_000; - } else { - // double format - m = Double.doubleToLongBits(m * 0.000_000_001); - } - dataOut.writeLong(m); + writeTimeBinary(m, 12); + dataOut.writeInt(-t.getTimeZoneOffsetSeconds()); break; } - case PgServer.PG_TYPE_TIMESTAMP_NO_TMZONE: { - ValueTimestamp t = (ValueTimestamp) v.convertTo(Value.TIMESTAMP); - writeInt(8); + case PgServer.PG_TYPE_TIMESTAMP: { + ValueTimestamp t = (ValueTimestamp) v; long m = toPostgreDays(t.getDateValue()) * 86_400; long nanos = t.getTimeNanos(); - if (INTEGER_DATE_TYPES) { - // long format - m = m * 1_000_000 + nanos / 1_000; - } else { - // double format - m = Double.doubleToLongBits(m + nanos * 0.000_000_001); + writeTimestampBinary(m, nanos); + break; + } + case PgServer.PG_TYPE_TIMESTAMPTZ: { + ValueTimestampTimeZone t = (ValueTimestampTimeZone) v; + long m = toPostgreDays(t.getDateValue()) * 86_400; + long nanos = t.getTimeNanos() - t.getTimeZoneOffsetSeconds() * 1_000_000_000L; + if (nanos < 0L) { + m--; + nanos += DateTimeUtils.NANOS_PER_DAY; } - dataOut.writeLong(m); + writeTimestampBinary(m, nanos); break; } default: throw new IllegalStateException("output binary format is undefined"); @@ -621,6 +740,92 @@ private void writeDataColumn(ResultSet rs, int column, int pgType, boolean text) } } + private static final int[] POWERS10 = {1, 10, 100, 1000, 10000}; + private static final int MAX_GROUP_SCALE = 4; + private static final int MAX_GROUP_SIZE = POWERS10[4]; + + private static int divide(BigInteger[] unscaled, int divisor) { + BigInteger[] bi = unscaled[0].divideAndRemainder(BigInteger.valueOf(divisor)); + unscaled[0] = bi[0]; + return bi[1].intValue(); + } + + // https://www.npgsql.org/dev/types.html + // https://github.com/npgsql/npgsql/blob/8a479081f707784b5040747b23102c3d6371b9d3/ + // src/Npgsql/TypeHandlers/NumericHandlers/NumericHandler.cs#L166 + private void writeNumericBinary(BigDecimal value) throws IOException { + int weight = 0; + List groups = new ArrayList<>(); + int scale = value.scale(); + int signum = value.signum(); + if (signum != 0) { + BigInteger[] unscaled = {null}; + if (scale < 0) { + unscaled[0] = value.setScale(0).unscaledValue(); + scale = 0; + } else { + unscaled[0] = value.unscaledValue(); + } + if (signum < 0) { + unscaled[0] = unscaled[0].negate(); + } + weight = -scale / MAX_GROUP_SCALE - 1; + int remainder = 0; + int scaleChunk = scale % MAX_GROUP_SCALE; + if (scaleChunk > 0) { + remainder = divide(unscaled, POWERS10[scaleChunk]) * POWERS10[MAX_GROUP_SCALE - scaleChunk]; + if (remainder != 0) { + weight--; + } + } + if (remainder == 0) { + while ((remainder = divide(unscaled, MAX_GROUP_SIZE)) == 0) { + weight++; + } + } + groups.add(remainder); + while (unscaled[0].signum() != 0) { + groups.add(divide(unscaled, MAX_GROUP_SIZE)); + } + } + int groupCount = groups.size(); + if (groupCount + weight > Short.MAX_VALUE || scale > Short.MAX_VALUE) { + throw DbException.get(ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE_1, value.toString()); + } + writeInt(8 + groupCount * 2); + writeShort(groupCount); + writeShort(groupCount + weight); + writeShort(signum < 0 ? 16384 : 0); + writeShort(scale); + for (int i = groupCount - 1; i >= 0; i--) { + writeShort(groups.get(i)); + } + } + + private void writeTimeBinary(long m, int numBytes) throws IOException { + writeInt(numBytes); + if (INTEGER_DATE_TYPES) { + // long format + m /= 1_000; + } else { + // double format + m = Double.doubleToLongBits(m * 0.000_000_001); + } + dataOut.writeLong(m); + } + + private void writeTimestampBinary(long m, long nanos) throws IOException { + writeInt(8); + if (INTEGER_DATE_TYPES) { + // long format + m = m * 1_000_000 + nanos / 1_000; + } else { + // double format + m = Double.doubleToLongBits(m + nanos * 0.000_000_001); + } + dataOut.writeLong(m); + } + private Charset getEncoding() { if ("UNICODE".equals(clientEncoding)) { return StandardCharsets.UTF_8; @@ -628,13 +833,18 @@ private Charset getEncoding() { return Charset.forName(clientEncoding); } - private void setParameter(PreparedStatement prep, - int pgType, int i, int[] formatCodes) throws SQLException, IOException { - boolean text = (i >= formatCodes.length) || (formatCodes[i] == 0); - int col = i + 1; + private void setParameter(ArrayList parameters, int pgType, int i, int[] formatCodes) + throws IOException { + boolean text = true; + if (formatCodes.length == 1) { + text = formatCodes[0] == 0; + } else if (i < formatCodes.length) { + text = formatCodes[i] == 0; + } int paramLen = readInt(); + Value value; if (paramLen == -1) { - prep.setNull(col, Types.NULL); + value = ValueNull.INSTANCE; } else if (text) { // plain text byte[] data = Utils.newBytes(paramLen); @@ -661,42 +871,43 @@ private void setParameter(PreparedStatement prep, break; } } - prep.setString(col, str); + value = ValueVarchar.get(str, session); } else { // binary switch (pgType) { case PgServer.PG_TYPE_INT2: checkParamLength(2, paramLen); - prep.setShort(col, readShort()); + value = ValueSmallint.get(readShort()); break; case PgServer.PG_TYPE_INT4: checkParamLength(4, paramLen); - prep.setInt(col, readInt()); + value = ValueInteger.get(readInt()); break; case PgServer.PG_TYPE_INT8: checkParamLength(8, paramLen); - prep.setLong(col, dataIn.readLong()); + value = ValueBigint.get(dataIn.readLong()); break; case PgServer.PG_TYPE_FLOAT4: checkParamLength(4, paramLen); - prep.setFloat(col, dataIn.readFloat()); + value = ValueReal.get(dataIn.readFloat()); break; case PgServer.PG_TYPE_FLOAT8: checkParamLength(8, paramLen); - prep.setDouble(col, dataIn.readDouble()); + value = ValueDouble.get(dataIn.readDouble()); break; case PgServer.PG_TYPE_BYTEA: byte[] d1 = Utils.newBytes(paramLen); readFully(d1); - prep.setBytes(col, d1); + value = ValueVarbinary.getNoCopy(d1); break; default: server.trace("Binary format for type: "+pgType+" is unsupported"); byte[] d2 = Utils.newBytes(paramLen); readFully(d2); - prep.setString(col, new String(d2, getEncoding())); + value = ValueVarchar.get(new String(d2, getEncoding()), session); } } + parameters.get(i).setValue(value, true); } private static void checkParamLength(int expected, int got) { @@ -705,6 +916,14 @@ private static void checkParamLength(int expected, int got) { } } + private void sendErrorOrCancelResponse(Exception e) throws IOException { + if (e instanceof DbException && ((DbException) e).getErrorCode() == ErrorCode.STATEMENT_WAS_CANCELED) { + sendCancelQueryResponse(); + } else { + sendErrorResponse(e); + } + } + private void sendErrorResponse(Exception re) throws IOException { SQLException e = DbException.toSQLException(re); server.traceError(e); @@ -734,9 +953,9 @@ private void sendCancelQueryResponse() throws IOException { sendMessage(); } - private void sendParameterDescription(ParameterMetaData meta, - int[] paramTypes) throws Exception { - int count = meta.getParameterCount(); + private void sendParameterDescription(ArrayList parameters, int[] paramTypes) + throws Exception { + int count = parameters.size(); startMessage('t'); writeShort(count); for (int i = 0; i < count; i++) { @@ -757,18 +976,32 @@ private void sendNoData() throws IOException { sendMessage(); } - private void sendRowDescription(ResultSetMetaData meta) throws IOException, SQLException { - if (meta == null) { + private void sendRowDescription(ResultInterface result, int[] formatCodes) throws IOException { + if (result == null) { sendNoData(); } else { - int columns = meta.getColumnCount(); + int columns = result.getVisibleColumnCount(); + int[] oids = new int[columns]; + int[] attnums = new int[columns]; int[] types = new int[columns]; int[] precision = new int[columns]; String[] names = new String[columns]; + Database database = session.getDatabase(); for (int i = 0; i < columns; i++) { - String name = meta.getColumnName(i + 1); + String name = result.getColumnName(i); + Schema schema = database.findSchema(result.getSchemaName(i)); + if (schema != null) { + Table table = schema.findTableOrView(session, result.getTableName(i)); + if (table != null) { + oids[i] = table.getId(); + Column column = table.findColumn(name); + if (column != null) { + attnums[i] = column.getColumnId() + 1; + } + } + } names[i] = name; - int type = meta.getColumnType(i + 1); + TypeInfo type = result.getColumnType(i); int pgType = PgServer.convertType(type); // the ODBC client needs the column pg_catalog.pg_index // to be of type 'int2vector' @@ -777,8 +1010,8 @@ private void sendRowDescription(ResultSetMetaData meta) throws IOException, SQLE // meta.getTableName(i + 1))) { // type = PgServer.PG_TYPE_INT2VECTOR; // } - precision[i] = meta.getColumnDisplaySize(i + 1); - if (type != Types.NULL) { + precision[i] = type.getDisplaySize(); + if (type.getValueType() != Value.NULL) { server.checkType(pgType); } types[i] = pgType; @@ -788,9 +1021,9 @@ private void sendRowDescription(ResultSetMetaData meta) throws IOException, SQLE for (int i = 0; i < columns; i++) { writeString(StringUtils.toLowerEnglish(names[i])); // object ID - writeInt(0); + writeInt(oids[i]); // attribute number of the column - writeShort(0); + writeShort(attnums[i]); // data type writeInt(types[i]); // pg_type.typlen @@ -798,7 +1031,7 @@ private void sendRowDescription(ResultSetMetaData meta) throws IOException, SQLE // pg_attribute.atttypmod writeInt(-1); // the format type: text = 0, binary = 1 - writeShort(formatAsText(types[i]) ? 0 : 1); + writeShort(formatAsText(types[i], formatCodes, i) ? 0 : 1); } sendMessage(); } @@ -807,16 +1040,21 @@ private void sendRowDescription(ResultSetMetaData meta) throws IOException, SQLE /** * Check whether the given type should be formatted as text. * - * @return true for binary + * @param pgType data type + * @param formatCodes format codes, or {@code null} + * @param column 0-based column number + * @return true for text */ - private static boolean formatAsText(int pgType) { - switch (pgType) { - // TODO: add more types to send as binary once compatibility is - // confirmed - case PgServer.PG_TYPE_BYTEA: - return false; + private static boolean formatAsText(int pgType, int[] formatCodes, int column) { + boolean text = true; + if (formatCodes != null && formatCodes.length > 0) { + if (formatCodes.length == 1) { + text = formatCodes[0] == 0; + } else if (column < formatCodes.length) { + text = formatCodes[column] == 0; + } } - return true; + return text; } private static int getTypeSize(int pgType, int precision) { @@ -858,60 +1096,19 @@ private void sendCloseComplete() throws IOException { sendMessage(); } - private void initDb() throws SQLException { - Statement stat = null; - try { - synchronized (server) { - // better would be: set the database to exclusive mode - boolean tableFound; - try (ResultSet rs = conn.getMetaData().getTables(null, "PG_CATALOG", "PG_VERSION", null)) { - tableFound = rs.next(); - } - stat = conn.createStatement(); - if (!tableFound) { - installPgCatalog(stat); - } - try (ResultSet rs = stat.executeQuery("select * from pg_catalog.pg_version")) { - if (!rs.next() || rs.getInt(1) < 2) { - // installation incomplete, or old version - installPgCatalog(stat); - } else { - // version 2 or newer: check the read version - int versionRead = rs.getInt(2); - if (versionRead > 2) { - throw DbException.throwInternalError("Incompatible PG_VERSION"); - } - } - } - } - stat.execute("set search_path = PUBLIC, pg_catalog"); - HashSet typeSet = server.getTypeSet(); - if (typeSet.isEmpty()) { - try (ResultSet rs = stat.executeQuery("select oid from pg_catalog.pg_type")) { - while (rs.next()) { - typeSet.add(rs.getInt(1)); - } - } - } - } finally { - JdbcUtils.closeSilently(stat); + private void initDb() { + session.setTimeZone(timeZone); + try (CommandInterface command = session.prepareLocal("set search_path = public, pg_catalog")) { + command.executeUpdate(null); } - } - - private static void installPgCatalog(Statement stat) throws SQLException { - try (Reader r = new InputStreamReader(new ByteArrayInputStream(Utils - .getResource("/org/h2/server/pg/pg_catalog.sql")))) { - ScriptReader reader = new ScriptReader(r); - while (true) { - String sql = reader.readStatement(); - if (sql == null) { - break; + HashSet typeSet = server.getTypeSet(); + if (typeSet.isEmpty()) { + try (CommandInterface command = session.prepareLocal("select oid from pg_catalog.pg_type"); + ResultInterface result = command.executeQuery(0, false)) { + while (result.next()) { + typeSet.add(result.currentRow()[0].getInt()); } - stat.execute(sql); } - reader.close(); - } catch (IOException e) { - throw DbException.convertIOException(e, "Can not read pg_catalog resource"); } } @@ -919,9 +1116,16 @@ private static void installPgCatalog(Statement stat) throws SQLException { * Close this connection. */ void close() { + for (Prepared prep : prepared.values()) { + prep.close(); + } try { stop = true; - JdbcUtils.closeSilently(conn); + try { + session.close(); + } catch (Exception e) { + // Ignore + } if (socket != null) { socket.close(); } @@ -929,7 +1133,7 @@ void close() { } catch (Exception e) { server.traceError(e); } - conn = null; + session = null; socket = null; server.remove(this); } @@ -946,35 +1150,22 @@ private void sendAuthenticationOk() throws IOException { sendMessage(); sendParameterStatus("client_encoding", clientEncoding); sendParameterStatus("DateStyle", dateStyle); - sendParameterStatus("integer_datetimes", "off"); sendParameterStatus("is_superuser", "off"); sendParameterStatus("server_encoding", "SQL_ASCII"); sendParameterStatus("server_version", Constants.PG_VERSION); sendParameterStatus("session_authorization", userName); sendParameterStatus("standard_conforming_strings", "off"); - // TODO PostgreSQL TimeZone - sendParameterStatus("TimeZone", "CET"); - sendParameterStatus("integer_datetimes", INTEGER_DATE_TYPES ? "on" : "off"); + sendParameterStatus("TimeZone", pgTimeZone(timeZone.getId())); + // Don't inline, see https://bugs.eclipse.org/bugs/show_bug.cgi?id=569498 + String value = INTEGER_DATE_TYPES ? "on" : "off"; + sendParameterStatus("integer_datetimes", value); sendBackendKeyData(); sendReadyForQuery(); } private void sendReadyForQuery() throws IOException { startMessage('Z'); - char c; - try { - if (conn.getAutoCommit()) { - // idle - c = 'I'; - } else { - // in a transaction block - c = 'T'; - } - } catch (SQLException e) { - // failed transaction block - c = 'E'; - } - write((byte) c); + write((byte) (session.getAutoCommit() ? /* idle */ 'I' : /* in a transaction block */ 'T')); sendMessage(); } @@ -1006,24 +1197,30 @@ private void write(byte[] data) throws IOException { dataOut.write(data); } + private void write(ByteArrayOutputStream baos) throws IOException { + baos.writeTo(dataOut); + } + private void write(int b) throws IOException { dataOut.write(b); } private void startMessage(int newMessageType) { this.messageType = newMessageType; - outBuffer = new ByteArrayOutputStream(); + if (outBuffer.size() <= 65_536) { + outBuffer.reset(); + } else { + outBuffer = new ByteArrayOutputStream(); + } dataOut = new DataOutputStream(outBuffer); } private void sendMessage() throws IOException { dataOut.flush(); - byte[] buff = outBuffer.toByteArray(); - int len = buff.length; dataOut = new DataOutputStream(out); - dataOut.write(messageType); - dataOut.writeInt(len + 4); - dataOut.write(buff); + write(messageType); + writeInt(outBuffer.size() + 4); + write(outBuffer); dataOut.flush(); } @@ -1051,7 +1248,7 @@ int getProcessId() { return this.processId; } - private synchronized void setActiveRequest(JdbcStatement statement) { + private synchronized void setActiveRequest(CommandInterface statement) { activeRequest = statement; } @@ -1060,12 +1257,8 @@ private synchronized void setActiveRequest(JdbcStatement statement) { */ private synchronized void cancelRequest() { if (activeRequest != null) { - try { - activeRequest.cancel(); - activeRequest = null; - } catch (SQLException e) { - throw DbException.convert(e); - } + activeRequest.cancel(); + activeRequest = null; } } @@ -1087,12 +1280,40 @@ static class Prepared { /** * The prepared statement. */ - JdbcPreparedStatement prep; + CommandInterface prep; + + /** + * The current result (for suspended portal). + */ + ResultInterface result; /** * The list of parameter types (if set). */ int[] paramType; + + /** + * Closes prepared statement and result, if any. + */ + void close() { + try { + closeResult(); + prep.close(); + } catch (Exception e) { + // Ignore + } + } + + /** + * Closes the result, if any. + */ + void closeResult() { + ResultInterface result = this.result; + if (result != null) { + this.result = null; + result.close(); + } + } } /** diff --git a/h2/src/main/org/h2/server/pg/package.html b/h2/src/main/org/h2/server/pg/package.html index 764df7cf2f..0a3346d9f6 100644 --- a/h2/src/main/org/h2/server/pg/package.html +++ b/h2/src/main/org/h2/server/pg/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/pg/pg_catalog.sql b/h2/src/main/org/h2/server/pg/pg_catalog.sql deleted file mode 100644 index 8942f1b6d2..0000000000 --- a/h2/src/main/org/h2/server/pg/pg_catalog.sql +++ /dev/null @@ -1,379 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -; -drop schema if exists pg_catalog cascade; -create schema pg_catalog; - -drop alias if exists pg_convertType; -create alias pg_convertType deterministic for "org.h2.server.pg.PgServer.convertType"; - -drop alias if exists pg_get_oid; -create alias pg_get_oid deterministic for "org.h2.server.pg.PgServer.getOid"; - -create table pg_catalog.pg_version as select 2 as version, 2 as version_read; -grant select on pg_catalog.pg_version to PUBLIC; - -create view pg_catalog.pg_roles -- (oid, rolname, rolcreaterole, rolcreatedb) -as -select - id oid, - cast(name as varchar_ignorecase) rolname, - case when admin then 't' else 'f' end as rolcreaterole, - case when admin then 't' else 'f' end as rolcreatedb -from INFORMATION_SCHEMA.users; -grant select on pg_catalog.pg_roles to PUBLIC; - -create view pg_catalog.pg_namespace -- (oid, nspname) -as -select - id oid, - cast(schema_name as varchar_ignorecase) nspname -from INFORMATION_SCHEMA.schemata; -grant select on pg_catalog.pg_namespace to PUBLIC; - -create table pg_catalog.pg_type( - oid int primary key, - typname varchar_ignorecase, - typnamespace int, - typlen int, - typtype varchar, - typbasetype int, - typtypmod int, - typnotnull boolean, - typinput varchar -); -grant select on pg_catalog.pg_type to PUBLIC; - -insert into pg_catalog.pg_type -select - pg_convertType(data_type) oid, - cast(type_name as varchar_ignorecase) typname, - (select oid from pg_catalog.pg_namespace where nspname = 'pg_catalog') typnamespace, - -1 typlen, - 'c' typtype, - 0 typbasetype, - -1 typtypmod, - false typnotnull, - null typinput -from INFORMATION_SCHEMA.type_info -where pos = 0 - and pg_convertType(data_type) <> 705; -- not unknown - -merge into pg_catalog.pg_type values( - 19, - 'name', - (select oid from pg_catalog.pg_namespace where nspname = 'pg_catalog'), - -1, - 'c', - 0, - -1, - false, - null -); -merge into pg_catalog.pg_type values( - 0, - 'null', - (select oid from pg_catalog.pg_namespace where nspname = 'pg_catalog'), - -1, - 'c', - 0, - -1, - false, - null -); -merge into pg_catalog.pg_type values( - 22, - 'int2vector', - (select oid from pg_catalog.pg_namespace where nspname = 'pg_catalog'), - -1, - 'c', - 0, - -1, - false, - null -); -merge into pg_catalog.pg_type values( - 2205, - 'regproc', - (select oid from pg_catalog.pg_namespace where nspname = 'pg_catalog'), - 4, - 'b', - 0, - -1, - false, - null -); - -drop domain if exists regproc cascade; -create domain regproc as varchar_ignorecase; - -create view pg_catalog.pg_class -- (oid, relname, relnamespace, relkind, relam, reltuples, reltablespace, relpages, relhasindex, relhasrules, relhasoids, relchecks, reltriggers) -as -select - id oid, - cast(table_name as varchar_ignorecase) relname, - (select id from INFORMATION_SCHEMA.schemata where schema_name = table_schema) relnamespace, - case table_type when 'TABLE' then 'r' else 'v' end relkind, - 0 relam, - cast(0 as float) reltuples, - 0 reltablespace, - 0 relpages, - false relhasindex, - false relhasrules, - false relhasoids, - cast(0 as smallint) relchecks, - (select count(*) from INFORMATION_SCHEMA.triggers t where t.table_schema = table_schema and t.table_name = table_name) reltriggers -from INFORMATION_SCHEMA.tables -union all -select - id oid, - cast(index_name as varchar_ignorecase) relname, - (select id from INFORMATION_SCHEMA.schemata where schema_name = table_schema) relnamespace, - 'i' relkind, - 0 relam, - cast(0 as float) reltuples, - 0 reltablespace, - 0 relpages, - true relhasindex, - false relhasrules, - false relhasoids, - cast(0 as smallint) relchecks, - 0 reltriggers -from INFORMATION_SCHEMA.indexes; -grant select on pg_catalog.pg_class to PUBLIC; - -create table pg_catalog.pg_proc( - oid int, - proname varchar_ignorecase, - prorettype int, - pronamespace int -); -grant select on pg_catalog.pg_proc to PUBLIC; - -create table pg_catalog.pg_trigger( - oid int, - tgconstrrelid int, - tgfoid int, - tgargs int, - tgnargs int, - tgdeferrable boolean, - tginitdeferred boolean, - tgconstrname varchar_ignorecase, - tgrelid int -); -grant select on pg_catalog.pg_trigger to PUBLIC; - -create view pg_catalog.pg_attrdef -- (oid, adsrc, adrelid, adnum) -as -select - id oid, - 0 adsrc, - 0 adrelid, - 0 adnum, - null adbin -from INFORMATION_SCHEMA.tables where 1=0; -grant select on pg_catalog.pg_attrdef to PUBLIC; - -create view pg_catalog.pg_attribute -- (oid, attrelid, attname, atttypid, attlen, attnum, atttypmod, attnotnull, attisdropped, atthasdef) -as -select - t.id*10000 + c.ordinal_position oid, - t.id attrelid, - c.column_name attname, - pg_convertType(data_type) atttypid, - case when numeric_precision > 255 then -1 else numeric_precision end attlen, - c.ordinal_position attnum, - -1 atttypmod, - case c.is_nullable when 'YES' then false else true end attnotnull, - false attisdropped, - false atthasdef -from INFORMATION_SCHEMA.tables t, INFORMATION_SCHEMA.columns c -where t.table_name = c.table_name -and t.table_schema = c.table_schema -union all -select - 1000000 + t.id*10000 + c.ordinal_position oid, - i.id attrelid, - c.column_name attname, - pg_convertType(data_type) atttypid, - case when numeric_precision > 255 then -1 else numeric_precision end attlen, - c.ordinal_position attnum, - -1 atttypmod, - case c.is_nullable when 'YES' then false else true end attnotnull, - false attisdropped, - false atthasdef -from INFORMATION_SCHEMA.tables t, INFORMATION_SCHEMA.indexes i, INFORMATION_SCHEMA.columns c -where t.table_name = i.table_name -and t.table_schema = i.table_schema -and t.table_name = c.table_name -and t.table_schema = c.table_schema; -grant select on pg_catalog.pg_attribute to PUBLIC; - -create view pg_catalog.pg_index -- (oid, indexrelid, indrelid, indisclustered, indisunique, indisprimary, indexprs, indkey, indpred) -as -select - i.id oid, - i.id indexrelid, - t.id indrelid, - false indisclustered, - not non_unique indisunique, - primary_key indisprimary, - cast('' as varchar_ignorecase) indexprs, - cast(1 as array) indkey, - null indpred -from INFORMATION_SCHEMA.indexes i, INFORMATION_SCHEMA.tables t -where i.table_schema = t.table_schema -and i.table_name = t.table_name -and i.ordinal_position = 1 --- workaround for MS Access problem opening tables with primary key -and 1=0; -grant select on pg_catalog.pg_index to PUBLIC; - -drop alias if exists pg_get_indexdef; -create alias pg_get_indexdef for "org.h2.server.pg.PgServer.getIndexColumn"; - -drop alias if exists pg_catalog.pg_get_indexdef; -create alias pg_catalog.pg_get_indexdef for "org.h2.server.pg.PgServer.getIndexColumn"; - -drop alias if exists pg_catalog.pg_get_expr; -create alias pg_catalog.pg_get_expr for "org.h2.server.pg.PgServer.getPgExpr"; - -drop alias if exists pg_catalog.format_type; -create alias pg_catalog.format_type for "org.h2.server.pg.PgServer.formatType"; - -drop alias if exists version; -create alias version for "org.h2.server.pg.PgServer.getVersion"; - -drop alias if exists current_schema; -create alias current_schema for "org.h2.server.pg.PgServer.getCurrentSchema"; - -drop alias if exists pg_encoding_to_char; -create alias pg_encoding_to_char for "org.h2.server.pg.PgServer.getEncodingName"; - -drop alias if exists pg_postmaster_start_time; -create alias pg_postmaster_start_time for "org.h2.server.pg.PgServer.getStartTime"; - -drop alias if exists pg_get_userbyid; -create alias pg_get_userbyid for "org.h2.server.pg.PgServer.getUserById"; - -drop alias if exists has_database_privilege; -create alias has_database_privilege for "org.h2.server.pg.PgServer.hasDatabasePrivilege"; - -drop alias if exists has_table_privilege; -create alias has_table_privilege for "org.h2.server.pg.PgServer.hasTablePrivilege"; - -drop alias if exists currtid2; -create alias currtid2 for "org.h2.server.pg.PgServer.getCurrentTid"; - -create table pg_catalog.pg_database( - oid int, - datname varchar_ignorecase, - encoding int, - datlastsysoid int, - datallowconn boolean, - datconfig array, -- text[] - datacl array, -- aclitem[] - datdba int, - dattablespace int -); -grant select on pg_catalog.pg_database to PUBLIC; - -insert into pg_catalog.pg_database values( - 0, -- oid - 'postgres', -- datname - 6, -- encoding, UTF8 - 100000, -- datlastsysoid - true, -- datallowconn - null, -- datconfig - null, -- datacl - select min(id) from INFORMATION_SCHEMA.users where admin=true, -- datdba - 0 -- dattablespace -); - -create table pg_catalog.pg_tablespace( - oid int, - spcname varchar_ignorecase, - spclocation varchar_ignorecase, - spcowner int, - spcacl array -- aclitem[] -); -grant select on pg_catalog.pg_tablespace to PUBLIC; - -insert into pg_catalog.pg_tablespace values( - 0, - 'main', -- spcname - '?', -- spclocation - 0, -- spcowner, - null -- spcacl -); - -create table pg_catalog.pg_settings( - oid int, - name varchar_ignorecase, - setting varchar_ignorecase -); -grant select on pg_catalog.pg_settings to PUBLIC; - -insert into pg_catalog.pg_settings values -(0, 'autovacuum', 'on'), -(1, 'stats_start_collector', 'on'), -(2, 'stats_row_level', 'on'); - -create view pg_catalog.pg_user -- oid, usename, usecreatedb, usesuper -as -select - id oid, - cast(name as varchar_ignorecase) usename, - true usecreatedb, - true usesuper -from INFORMATION_SCHEMA.users; -grant select on pg_catalog.pg_user to PUBLIC; - -create table pg_catalog.pg_authid( - oid int, - rolname varchar_ignorecase, - rolsuper boolean, - rolinherit boolean, - rolcreaterole boolean, - rolcreatedb boolean, - rolcatupdate boolean, - rolcanlogin boolean, - rolconnlimit boolean, - rolpassword boolean, - rolvaliduntil timestamp, -- timestamptz - rolconfig array -- text[] -); -grant select on pg_catalog.pg_authid to PUBLIC; - -create table pg_catalog.pg_am(oid int, amname varchar_ignorecase); -grant select on pg_catalog.pg_am to PUBLIC; -insert into pg_catalog.pg_am values(0, 'btree'); -insert into pg_catalog.pg_am values(1, 'hash'); - -create table pg_catalog.pg_description -- (objoid, objsubid, classoid, description) -as -select - oid objoid, - 0 objsubid, - -1 classoid, - cast(datname as varchar_ignorecase) description -from pg_catalog.pg_database; -grant select on pg_catalog.pg_description to PUBLIC; - -create table pg_catalog.pg_group -- oid, groname -as -select - 0 oid, - cast('' as varchar_ignorecase) groname -from pg_catalog.pg_database where 1=0; -grant select on pg_catalog.pg_group to PUBLIC; - -create table pg_catalog.pg_inherits( - inhrelid int, - inhparent int, - inhseqno int -); -grant select on pg_catalog.pg_inherits to PUBLIC; diff --git a/h2/src/main/org/h2/server/web/ConnectionInfo.java b/h2/src/main/org/h2/server/web/ConnectionInfo.java index 5edb718c10..2b6fcdb9ab 100644 --- a/h2/src/main/org/h2/server/web/ConnectionInfo.java +++ b/h2/src/main/org/h2/server/web/ConnectionInfo.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.web; @@ -60,7 +60,7 @@ String getString() { @Override public int compareTo(ConnectionInfo o) { - return -Integer.compare(lastAccess, o.lastAccess); + return Integer.compare(o.lastAccess, lastAccess); } } diff --git a/h2/src/main/org/h2/server/web/DbStarter.java b/h2/src/main/org/h2/server/web/DbStarter.java index 73a2479f91..3cbb46515b 100644 --- a/h2/src/main/org/h2/server/web/DbStarter.java +++ b/h2/src/main/org/h2/server/web/DbStarter.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.web; diff --git a/h2/src/main/org/h2/server/web/JakartaDbStarter.java b/h2/src/main/org/h2/server/web/JakartaDbStarter.java new file mode 100644 index 0000000000..1547672b97 --- /dev/null +++ b/h2/src/main/org/h2/server/web/JakartaDbStarter.java @@ -0,0 +1,93 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.server.web; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.Statement; + +import jakarta.servlet.ServletContext; +import jakarta.servlet.ServletContextEvent; +import jakarta.servlet.ServletContextListener; + +import org.h2.tools.Server; +import org.h2.util.StringUtils; + +/** + * This class can be used to start the H2 TCP server (or other H2 servers, for + * example the PG server) inside a Jakarta web application container such as + * Tomcat or Jetty. It can also open a database connection. + */ +public class JakartaDbStarter implements ServletContextListener { + + private Connection conn; + private Server server; + + @Override + public void contextInitialized(ServletContextEvent servletContextEvent) { + try { + org.h2.Driver.load(); + + // This will get the setting from a context-param in web.xml if + // defined: + ServletContext servletContext = servletContextEvent.getServletContext(); + String url = getParameter(servletContext, "db.url", "jdbc:h2:~/test"); + String user = getParameter(servletContext, "db.user", "sa"); + String password = getParameter(servletContext, "db.password", "sa"); + + // Start the server if configured to do so + String serverParams = getParameter(servletContext, "db.tcpServer", null); + if (serverParams != null) { + String[] params = StringUtils.arraySplit(serverParams, ' ', true); + server = Server.createTcpServer(params); + server.start(); + } + + // To access the database in server mode, use the database URL: + // jdbc:h2:tcp://localhost/~/test + conn = DriverManager.getConnection(url, user, password); + servletContext.setAttribute("connection", conn); + } catch (Exception e) { + e.printStackTrace(); + } + } + + private static String getParameter(ServletContext servletContext, + String key, String defaultValue) { + String value = servletContext.getInitParameter(key); + return value == null ? defaultValue : value; + } + + /** + * Get the connection. + * + * @return the connection + */ + public Connection getConnection() { + return conn; + } + + @Override + public void contextDestroyed(ServletContextEvent servletContextEvent) { + try { + Statement stat = conn.createStatement(); + stat.execute("SHUTDOWN"); + stat.close(); + } catch (Exception e) { + e.printStackTrace(); + } + try { + conn.close(); + } catch (Exception e) { + e.printStackTrace(); + } + if (server != null) { + server.stop(); + server = null; + } + } + +} diff --git a/h2/src/main/org/h2/server/web/JakartaWebServlet.java b/h2/src/main/org/h2/server/web/JakartaWebServlet.java new file mode 100644 index 0000000000..260266e0e1 --- /dev/null +++ b/h2/src/main/org/h2/server/web/JakartaWebServlet.java @@ -0,0 +1,169 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.server.web; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Enumeration; +import java.util.Properties; + +import jakarta.servlet.ServletConfig; +import jakarta.servlet.ServletOutputStream; +import jakarta.servlet.http.HttpServlet; +import jakarta.servlet.http.HttpServletRequest; +import jakarta.servlet.http.HttpServletResponse; + +import org.h2.util.NetworkConnectionInfo; + +/** + * This servlet lets the H2 Console be used in a Jakarta servlet container + * such as Tomcat or Jetty. + */ +public class JakartaWebServlet extends HttpServlet { + + private static final long serialVersionUID = 1L; + private transient WebServer server; + + @Override + public void init() { + ServletConfig config = getServletConfig(); + Enumeration en = config.getInitParameterNames(); + ArrayList list = new ArrayList<>(); + while (en.hasMoreElements()) { + String name = en.nextElement().toString(); + String value = config.getInitParameter(name); + if (!name.startsWith("-")) { + name = "-" + name; + } + list.add(name); + if (value.length() > 0) { + list.add(value); + } + } + String[] args = list.toArray(new String[0]); + server = new WebServer(); + server.setAllowChunked(false); + server.init(args); + } + + @Override + public void destroy() { + server.stop(); + } + + private boolean allow(HttpServletRequest req) { + if (server.getAllowOthers()) { + return true; + } + String addr = req.getRemoteAddr(); + try { + InetAddress address = InetAddress.getByName(addr); + return address.isLoopbackAddress(); + } catch (UnknownHostException | NoClassDefFoundError e) { + // Google App Engine does not allow java.net.InetAddress + return false; + } + + } + + private String getAllowedFile(HttpServletRequest req, String requestedFile) { + if (!allow(req)) { + return "notAllowed.jsp"; + } + if (requestedFile.length() == 0) { + return "index.do"; + } + return requestedFile; + } + + @Override + public void doGet(HttpServletRequest req, HttpServletResponse resp) + throws IOException { + req.setCharacterEncoding("utf-8"); + String file = req.getPathInfo(); + if (file == null) { + resp.sendRedirect(req.getRequestURI() + "/"); + return; + } else if (file.startsWith("/")) { + file = file.substring(1); + } + file = getAllowedFile(req, file); + + // extract the request attributes + Properties attributes = new Properties(); + Enumeration en = req.getAttributeNames(); + while (en.hasMoreElements()) { + String name = en.nextElement().toString(); + String value = req.getAttribute(name).toString(); + attributes.put(name, value); + } + en = req.getParameterNames(); + while (en.hasMoreElements()) { + String name = en.nextElement().toString(); + String value = req.getParameter(name); + attributes.put(name, value); + } + + WebSession session = null; + String sessionId = attributes.getProperty("jsessionid"); + if (sessionId != null) { + session = server.getSession(sessionId); + } + WebApp app = new WebApp(server); + app.setSession(session, attributes); + String ifModifiedSince = req.getHeader("if-modified-since"); + + String scheme = req.getScheme(); + StringBuilder builder = new StringBuilder(scheme).append("://").append(req.getServerName()); + int serverPort = req.getServerPort(); + if (!(serverPort == 80 && scheme.equals("http") || serverPort == 443 && scheme.equals("https"))) { + builder.append(':').append(serverPort); + } + String path = builder.append(req.getContextPath()).toString(); + file = app.processRequest(file, new NetworkConnectionInfo(path, req.getRemoteAddr(), req.getRemotePort())); + session = app.getSession(); + + String mimeType = app.getMimeType(); + boolean cache = app.getCache(); + + if (cache && server.getStartDateTime().equals(ifModifiedSince)) { + resp.setStatus(HttpServletResponse.SC_NOT_MODIFIED); + return; + } + byte[] bytes = server.getFile(file); + if (bytes == null) { + resp.sendError(HttpServletResponse.SC_NOT_FOUND); + bytes = ("File not found: " + file).getBytes(StandardCharsets.UTF_8); + } else { + if (session != null && file.endsWith(".jsp")) { + String page = new String(bytes, StandardCharsets.UTF_8); + page = PageParser.parse(page, session.map); + bytes = page.getBytes(StandardCharsets.UTF_8); + } + resp.setContentType(mimeType); + if (!cache) { + resp.setHeader("Cache-Control", "no-cache"); + } else { + resp.setHeader("Cache-Control", "max-age=10"); + resp.setHeader("Last-Modified", server.getStartDateTime()); + } + } + if (bytes != null) { + ServletOutputStream out = resp.getOutputStream(); + out.write(bytes); + } + } + + @Override + public void doPost(HttpServletRequest req, HttpServletResponse resp) + throws IOException { + doGet(req, resp); + } + +} diff --git a/h2/src/main/org/h2/server/web/PageParser.java b/h2/src/main/org/h2/server/web/PageParser.java index d228948a2b..78f8036d99 100644 --- a/h2/src/main/org/h2/server/web/PageParser.java +++ b/h2/src/main/org/h2/server/web/PageParser.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.web; @@ -246,61 +246,61 @@ private static String escapeHtml(String s, boolean convertBreakAndSpace) { return " "; } } - StringBuilder buff = new StringBuilder(length); + StringBuilder builder = new StringBuilder(length); boolean convertSpace = true; - for (int i = 0; i < length; i++) { - char c = s.charAt(i); - if (c == ' ' || c == '\t') { + for (int i = 0; i < length;) { + int cp = s.codePointAt(i); + if (cp == ' ' || cp == '\t') { // convert tabs into spaces - for (int j = 0; j < (c == ' ' ? 1 : TAB_WIDTH); j++) { + for (int j = 0; j < (cp == ' ' ? 1 : TAB_WIDTH); j++) { if (convertSpace && convertBreakAndSpace) { - buff.append(" "); + builder.append(" "); } else { - buff.append(' '); + builder.append(' '); convertSpace = true; } } - continue; - } - convertSpace = false; - switch (c) { - case '$': - // so that ${ } in the text is interpreted correctly - buff.append("$"); - break; - case '<': - buff.append("<"); - break; - case '>': - buff.append(">"); - break; - case '&': - buff.append("&"); - break; - case '"': - buff.append("""); - break; - case '\'': - buff.append("'"); - break; - case '\n': - if (convertBreakAndSpace) { - buff.append("
          "); - convertSpace = true; - } else { - buff.append(c); - } - break; - default: - if (c >= 128) { - buff.append("&#").append((int) c).append(';'); - } else { - buff.append(c); + } else { + convertSpace = false; + switch (cp) { + case '$': + // so that ${ } in the text is interpreted correctly + builder.append("$"); + break; + case '<': + builder.append("<"); + break; + case '>': + builder.append(">"); + break; + case '&': + builder.append("&"); + break; + case '"': + builder.append("""); + break; + case '\'': + builder.append("'"); + break; + case '\n': + if (convertBreakAndSpace) { + builder.append("
          "); + convertSpace = true; + } else { + builder.append(cp); + } + break; + default: + if (cp >= 128) { + builder.append("&#").append(cp).append(';'); + } else { + builder.append((char) cp); + } } - break; } + i += Character.charCount(cp); } - return buff.toString(); + return builder.toString(); } /** diff --git a/h2/src/main/org/h2/server/web/WebApp.java b/h2/src/main/org/h2/server/web/WebApp.java index 3d534011d1..945403679c 100644 --- a/h2/src/main/org/h2/server/web/WebApp.java +++ b/h2/src/main/org/h2/server/web/WebApp.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.web; @@ -10,8 +10,6 @@ import java.io.PrintWriter; import java.io.StringReader; import java.io.StringWriter; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; import java.math.BigDecimal; import java.nio.charset.StandardCharsets; import java.sql.Connection; @@ -40,6 +38,7 @@ import org.h2.bnf.context.DbContents; import org.h2.bnf.context.DbSchema; import org.h2.bnf.context.DbTableOrView; +import org.h2.command.Parser; import org.h2.engine.Constants; import org.h2.engine.SysProperties; import org.h2.jdbc.JdbcException; @@ -56,12 +55,16 @@ import org.h2.tools.Script; import org.h2.tools.SimpleResultSet; import org.h2.util.JdbcUtils; +import org.h2.util.NetUtils; +import org.h2.util.NetworkConnectionInfo; import org.h2.util.Profiler; import org.h2.util.ScriptReader; import org.h2.util.SortedProperties; import org.h2.util.StringUtils; import org.h2.util.Tool; import org.h2.util.Utils; +import org.h2.util.Utils10; +import org.h2.value.DataType; /** * For each connection to a session, an object of this class is created. @@ -69,6 +72,9 @@ */ public class WebApp { + private static final Comparator SYSTEM_SCHEMA_COMPARATOR = Comparator + .comparing(DbTableOrView::getName, String.CASE_INSENSITIVE_ORDER); + /** * The web server. */ @@ -125,10 +131,10 @@ void setSession(WebSession session, Properties attributes) { * Process an HTTP request. * * @param file the file that was requested - * @param hostAddr the host address + * @param networkConnectionInfo the network connection information * @return the name of the file to return to the client */ - String processRequest(String file, String hostAddr) { + String processRequest(String file, NetworkConnectionInfo networkConnectionInfo) { int index = file.lastIndexOf('.'); String suffix; if (index >= 0) { @@ -151,7 +157,8 @@ String processRequest(String file, String hostAddr) { cache = false; mimeType = "text/html"; if (session == null) { - session = server.createNewSession(hostAddr); + session = server.createNewSession( + NetUtils.ipToShortForm(null, networkConnectionInfo.getClientAddr(), false).toString()); if (!"notAllowed.jsp".equals(file)) { file = "index.do"; } @@ -166,13 +173,13 @@ String processRequest(String file, String hostAddr) { trace("mimeType=" + mimeType); trace(file); if (file.endsWith(".do")) { - file = process(file); + file = process(file, networkConnectionInfo); } else if (file.endsWith(".jsp")) { switch (file) { case "admin.jsp": case "tools.jsp": if (!checkAdmin(file)) { - file = process("adminLogin.do"); + file = process("adminLogin.do", networkConnectionInfo); } } } @@ -211,12 +218,12 @@ private static String getComboBox(String[][] elements, String selected) { return buff.toString(); } - private String process(String file) { + private String process(String file, NetworkConnectionInfo networkConnectionInfo) { trace("process " + file); while (file.endsWith(".do")) { switch (file) { case "login.do": - file = login(); + file = login(networkConnectionInfo); break; case "index.do": file = index(); @@ -231,7 +238,7 @@ private String process(String file) { file = settingSave(); break; case "test.do": - file = test(); + file = test(networkConnectionInfo); break; case "query.do": file = query(); @@ -375,7 +382,7 @@ private String autoCompleteList() { if (query.endsWith("\n") || tQuery.endsWith(";")) { list.add(0, "1#(Newline)#\n"); } - result = StringUtils.join(new StringBuilder(), list, "|").toString(); + result = String.join("|", list); } session.put("autoCompleteList", result); } catch (Throwable e) { @@ -387,6 +394,7 @@ private String autoCompleteList() { private String admin() { session.put("port", Integer.toString(server.getPort())); session.put("allowOthers", Boolean.toString(server.getAllowOthers())); + session.put("webExternalNames", server.getExternalNames()); session.put("ssl", String.valueOf(server.getSSL())); session.put("sessions", server.getSessions()); return "admin.jsp"; @@ -401,6 +409,9 @@ private String adminSave() { boolean allowOthers = Utils.parseBoolean((String) attributes.get("allowOthers"), false, false); prop.setProperty("webAllowOthers", String.valueOf(allowOthers)); server.setAllowOthers(allowOthers); + String externalNames = (String) attributes.get("webExternalNames"); + prop.setProperty("webExternalNames", externalNames); + server.setExternalNames(externalNames); boolean ssl = Utils.parseBoolean((String) attributes.get("ssl"), false, false); prop.setProperty("webSSL", String.valueOf(ssl)); server.setSSL(ssl); @@ -441,7 +452,7 @@ private String tools() { } else if ("CreateCluster".equals(toolName)) { tool = new CreateCluster(); } else { - throw DbException.throwInternalError(toolName); + throw DbException.getInternalError(toolName); } ByteArrayOutputStream outBuff = new ByteArrayOutputStream(); PrintStream out = new PrintStream(outBuff, false, "UTF-8"); @@ -449,7 +460,7 @@ private String tools() { try { tool.runTool(argList); out.flush(); - String o = new String(outBuff.toByteArray(), StandardCharsets.UTF_8); + String o = Utils10.byteArrayOutputStreamToString(outBuff, StandardCharsets.UTF_8); String result = PageParser.escapeHtml(o); session.put("toolResult", result); } catch (Exception e) { @@ -527,25 +538,24 @@ private String getHistory() { return "query.jsp"; } - private static int addColumns(boolean mainSchema, DbTableOrView table, - StringBuilder buff, int treeIndex, boolean showColumnTypes, - StringBuilder columnsBuffer) { + private static int addColumns(boolean mainSchema, DbTableOrView table, StringBuilder builder, int treeIndex, + boolean showColumnTypes, StringBuilder columnsBuilder) { DbColumn[] columns = table.getColumns(); for (int i = 0; columns != null && i < columns.length; i++) { DbColumn column = columns[i]; - if (columnsBuffer.length() > 0) { - columnsBuffer.append(' '); + if (columnsBuilder.length() > 0) { + columnsBuilder.append(' '); } - columnsBuffer.append(column.getName()); + columnsBuilder.append(column.getName()); String col = escapeIdentifier(column.getName()); String level = mainSchema ? ", 1, 1" : ", 2, 2"; - buff.append("setNode(").append(treeIndex).append(level) + builder.append("setNode(").append(treeIndex).append(level) .append(", 'column', '") .append(PageParser.escapeJavaScript(column.getName())) .append("', 'javascript:ins(\\'").append(col).append("\\')');\n"); treeIndex++; if (mainSchema && showColumnTypes) { - buff.append("setNode(").append(treeIndex) + builder.append("setNode(").append(treeIndex) .append(", 2, 2, 'type', '") .append(PageParser.escapeJavaScript(column.getDataType())) .append("', null);\n"); @@ -647,8 +657,8 @@ private static int addIndexes(boolean mainSchema, DatabaseMetaData meta, return treeIndex; } - private int addTablesAndViews(DbSchema schema, boolean mainSchema, - StringBuilder buff, int treeIndex) throws SQLException { + private int addTablesAndViews(DbSchema schema, boolean mainSchema, StringBuilder builder, int treeIndex) + throws SQLException { if (schema == null) { return treeIndex; } @@ -662,80 +672,89 @@ private int addTablesAndViews(DbSchema schema, boolean mainSchema, if (tables == null) { return treeIndex; } - boolean isOracle = schema.getContents().isOracle(); + DbContents contents = schema.getContents(); + boolean isOracle = contents.isOracle(); boolean notManyTables = tables.length < SysProperties.CONSOLE_MAX_TABLES_LIST_INDEXES; - for (DbTableOrView table : tables) { - if (table.isView()) { - continue; - } - int tableId = treeIndex; - String tab = table.getQuotedName(); - if (!mainSchema) { - tab = schema.quotedName + "." + tab; - } - tab = escapeIdentifier(tab); - buff.append("setNode(").append(treeIndex).append(indentation) - .append(" 'table', '") - .append(PageParser.escapeJavaScript(table.getName())) - .append("', 'javascript:ins(\\'").append(tab).append("\\',true)');\n"); - treeIndex++; - if (mainSchema || showColumns) { - StringBuilder columnsBuffer = new StringBuilder(); - treeIndex = addColumns(mainSchema, table, buff, treeIndex, - notManyTables, columnsBuffer); - if (!isOracle && notManyTables) { - treeIndex = addIndexes(mainSchema, meta, table.getName(), - schema.name, buff, treeIndex); + try (PreparedStatement prep = showColumns ? prepareViewDefinitionQuery(conn, contents) : null) { + if (prep != null) { + prep.setString(1, schema.name); + } + if (schema.isSystem) { + Arrays.sort(tables, SYSTEM_SCHEMA_COMPARATOR); + for (DbTableOrView table : tables) { + treeIndex = addTableOrView(schema, mainSchema, builder, treeIndex, meta, false, indentation, + isOracle, notManyTables, table, table.isView(), prep, indentNode); + } + } else { + for (DbTableOrView table : tables) { + if (table.isView()) { + continue; + } + treeIndex = addTableOrView(schema, mainSchema, builder, treeIndex, meta, showColumns, indentation, + isOracle, notManyTables, table, false, null, indentNode); + } + for (DbTableOrView table : tables) { + if (!table.isView()) { + continue; + } + treeIndex = addTableOrView(schema, mainSchema, builder, treeIndex, meta, showColumns, indentation, + isOracle, notManyTables, table, true, prep, indentNode); } - buff.append("addTable('") - .append(PageParser.escapeJavaScript(table.getName())).append("', '") - .append(PageParser.escapeJavaScript(columnsBuffer.toString())).append("', ") - .append(tableId).append(");\n"); } } - tables = schema.getTables(); - for (DbTableOrView view : tables) { - if (!view.isView()) { - continue; - } - int tableId = treeIndex; - String tab = view.getQuotedName(); - if (!mainSchema) { - tab = view.getSchema().quotedName + "." + tab; + return treeIndex; + } + + private static PreparedStatement prepareViewDefinitionQuery(Connection conn, DbContents contents) { + if (contents.mayHaveStandardViews()) { + try { + return conn.prepareStatement("SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS" + + " WHERE TABLE_SCHEMA = ? AND TABLE_NAME = ?"); + } catch (SQLException e) { + contents.setMayHaveStandardViews(false); } - tab = escapeIdentifier(tab); - buff.append("setNode(").append(treeIndex).append(indentation) - .append(" 'view', '") - .append(PageParser.escapeJavaScript(view.getName())) - .append("', 'javascript:ins(\\'").append(tab).append("\\',true)');\n"); - treeIndex++; - if (mainSchema) { - StringBuilder columnsBuffer = new StringBuilder(); - treeIndex = addColumns(mainSchema, view, buff, - treeIndex, notManyTables, columnsBuffer); - if (schema.getContents().isH2()) { - - try (PreparedStatement prep = conn.prepareStatement("SELECT * FROM " + - "INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME=?")) { - prep.setString(1, view.getName()); - ResultSet rs = prep.executeQuery(); + } + return null; + } + + private static int addTableOrView(DbSchema schema, boolean mainSchema, StringBuilder builder, int treeIndex, + DatabaseMetaData meta, boolean showColumns, String indentation, boolean isOracle, boolean notManyTables, + DbTableOrView table, boolean isView, PreparedStatement prep, String indentNode) throws SQLException { + int tableId = treeIndex; + String tab = table.getQuotedName(); + if (!mainSchema) { + tab = schema.quotedName + '.' + tab; + } + tab = escapeIdentifier(tab); + builder.append("setNode(").append(treeIndex).append(indentation) + .append(" '").append(isView ? "view" : "table").append("', '") + .append(PageParser.escapeJavaScript(table.getName())) + .append("', 'javascript:ins(\\'").append(tab).append("\\',true)');\n"); + treeIndex++; + if (showColumns) { + StringBuilder columnsBuilder = new StringBuilder(); + treeIndex = addColumns(mainSchema, table, builder, treeIndex, notManyTables, columnsBuilder); + if (isView) { + if (prep != null) { + prep.setString(2, table.getName()); + try (ResultSet rs = prep.executeQuery()) { if (rs.next()) { - String sql = rs.getString("SQL"); - buff.append("setNode(").append(treeIndex) - .append(indentNode) - .append(" 'type', '") - .append(PageParser.escapeJavaScript(sql)) - .append("', null);\n"); - treeIndex++; + String sql = rs.getString(1); + if (sql != null) { + builder.append("setNode(").append(treeIndex).append(indentNode).append(" 'type', '") + .append(PageParser.escapeJavaScript(sql)).append("', null);\n"); + treeIndex++; + } } - rs.close(); } } - buff.append("addTable('") - .append(PageParser.escapeJavaScript(view.getName())).append("', '") - .append(PageParser.escapeJavaScript(columnsBuffer.toString())).append("', ") - .append(tableId).append(");\n"); + } else if (!isOracle && notManyTables) { + treeIndex = addIndexes(mainSchema, meta, table.getName(), schema.name, builder, treeIndex); } + builder.append("addTable('") + .append(PageParser.escapeJavaScript(table.getName())).append("', '") + .append(PageParser.escapeJavaScript(columnsBuilder.toString())).append("', ") + .append(tableId).append(");\n"); } return treeIndex; } @@ -771,17 +790,23 @@ private String tables() { } if (isH2) { try (Statement stat = conn.createStatement()) { - ResultSet rs = stat.executeQuery("SELECT * FROM " + - "INFORMATION_SCHEMA.SEQUENCES ORDER BY SEQUENCE_NAME"); + ResultSet rs; + try { + rs = stat.executeQuery("SELECT SEQUENCE_NAME, BASE_VALUE, INCREMENT FROM " + + "INFORMATION_SCHEMA.SEQUENCES ORDER BY SEQUENCE_NAME"); + } catch (SQLException e) { + rs = stat.executeQuery("SELECT SEQUENCE_NAME, CURRENT_VALUE, INCREMENT FROM " + + "INFORMATION_SCHEMA.SEQUENCES ORDER BY SEQUENCE_NAME"); + } for (int i = 0; rs.next(); i++) { if (i == 0) { buff.append("setNode(").append(treeIndex) .append(", 0, 1, 'sequences', '${text.tree.sequences}', null);\n"); treeIndex++; } - String name = rs.getString("SEQUENCE_NAME"); - String current = rs.getString("CURRENT_VALUE"); - String increment = rs.getString("INCREMENT"); + String name = rs.getString(1); + String currentBase = rs.getString(2); + String increment = rs.getString(3); buff.append("setNode(").append(treeIndex) .append(", 1, 1, 'sequence', '") .append(PageParser.escapeJavaScript(name)) @@ -789,7 +814,7 @@ private String tables() { treeIndex++; buff.append("setNode(").append(treeIndex) .append(", 2, 2, 'type', '${text.tree.current}: ") - .append(PageParser.escapeJavaScript(current)) + .append(PageParser.escapeJavaScript(currentBase)) .append("', null);\n"); treeIndex++; if (!"1".equals(increment)) { @@ -801,16 +826,20 @@ private String tables() { } } rs.close(); - rs = stat.executeQuery("SELECT * FROM " + - "INFORMATION_SCHEMA.USERS ORDER BY NAME"); + try { + rs = stat.executeQuery( + "SELECT USER_NAME, IS_ADMIN FROM INFORMATION_SCHEMA.USERS ORDER BY USER_NAME"); + } catch (SQLException e) { + rs = stat.executeQuery("SELECT NAME, ADMIN FROM INFORMATION_SCHEMA.USERS ORDER BY NAME"); + } for (int i = 0; rs.next(); i++) { if (i == 0) { buff.append("setNode(").append(treeIndex) .append(", 0, 1, 'users', '${text.tree.users}', null);\n"); treeIndex++; } - String name = rs.getString("NAME"); - String admin = rs.getString("ADMIN"); + String name = rs.getString(1); + String admin = rs.getString(2); buff.append("setNode(").append(treeIndex) .append(", 1, 1, 'user', '") .append(PageParser.escapeJavaScript(name)) @@ -862,7 +891,7 @@ private String getStackTrace(int id, Throwable e, boolean isH2) { error += " " + se.getSQLState() + "/" + se.getErrorCode(); if (isH2) { int code = se.getErrorCode(); - error += " (${text.a.help})"; } @@ -903,7 +932,7 @@ private static String linkToSource(String s) { String file = element.substring(open + 1, colon); String lineNumber = element.substring(colon + 1, element.length()); String fullFileName = packageName.replace('.', '/') + "/" + file; - result.append("" + s + ""; } - private String test() { + private String test(NetworkConnectionInfo networkConnectionInfo) { String driver = attributes.getProperty("driver", ""); String url = attributes.getProperty("url", ""); String user = attributes.getProperty("user", ""); @@ -940,7 +969,7 @@ private String test() { prof.startCollecting(); Connection conn; try { - conn = server.getConnection(driver, url, user, password, null); + conn = server.getConnection(driver, url, user, password, null, networkConnectionInfo); } finally { prof.stopCollecting(); profOpen = prof.getTop(3); @@ -991,7 +1020,7 @@ private String getLoginError(Exception e, boolean isH2) { return getStackTrace(0, e, isH2); } - private String login() { + private String login(NetworkConnectionInfo networkConnectionInfo) { String driver = attributes.getProperty("driver", ""); String url = attributes.getProperty("url", ""); String user = attributes.getProperty("user", ""); @@ -1001,7 +1030,8 @@ private String login() { session.put("maxrows", "1000"); boolean isH2 = url.startsWith("jdbc:h2:"); try { - Connection conn = server.getConnection(driver, url, user, password, (String) session.get("key")); + Connection conn = server.getConnection(driver, url, user, password, (String) session.get("key"), + networkConnectionInfo); session.setConnection(conn); session.put("url", url); session.put("user", user); @@ -1073,10 +1103,6 @@ public String next() { query(conn, s, i - 1, list.size() - 2, b); return b.toString(); } - @Override - public void remove() { - throw new UnsupportedOperationException(); - } }); return "result.jsp"; } @@ -1151,157 +1177,6 @@ private String editResult() { return "result.jsp"; } - private ResultSet getMetaResultSet(Connection conn, String sql) - throws SQLException { - DatabaseMetaData meta = conn.getMetaData(); - if (isBuiltIn(sql, "@best_row_identifier")) { - String[] p = split(sql); - int scale = p[4] == null ? 0 : Integer.parseInt(p[4]); - boolean nullable = Boolean.parseBoolean(p[5]); - return meta.getBestRowIdentifier(p[1], p[2], p[3], scale, nullable); - } else if (isBuiltIn(sql, "@catalogs")) { - return meta.getCatalogs(); - } else if (isBuiltIn(sql, "@columns")) { - String[] p = split(sql); - return meta.getColumns(p[1], p[2], p[3], p[4]); - } else if (isBuiltIn(sql, "@column_privileges")) { - String[] p = split(sql); - return meta.getColumnPrivileges(p[1], p[2], p[3], p[4]); - } else if (isBuiltIn(sql, "@cross_references")) { - String[] p = split(sql); - return meta.getCrossReference(p[1], p[2], p[3], p[4], p[5], p[6]); - } else if (isBuiltIn(sql, "@exported_keys")) { - String[] p = split(sql); - return meta.getExportedKeys(p[1], p[2], p[3]); - } else if (isBuiltIn(sql, "@imported_keys")) { - String[] p = split(sql); - return meta.getImportedKeys(p[1], p[2], p[3]); - } else if (isBuiltIn(sql, "@index_info")) { - String[] p = split(sql); - boolean unique = Boolean.parseBoolean(p[4]); - boolean approx = Boolean.parseBoolean(p[5]); - return meta.getIndexInfo(p[1], p[2], p[3], unique, approx); - } else if (isBuiltIn(sql, "@primary_keys")) { - String[] p = split(sql); - return meta.getPrimaryKeys(p[1], p[2], p[3]); - } else if (isBuiltIn(sql, "@procedures")) { - String[] p = split(sql); - return meta.getProcedures(p[1], p[2], p[3]); - } else if (isBuiltIn(sql, "@procedure_columns")) { - String[] p = split(sql); - return meta.getProcedureColumns(p[1], p[2], p[3], p[4]); - } else if (isBuiltIn(sql, "@schemas")) { - return meta.getSchemas(); - } else if (isBuiltIn(sql, "@tables")) { - String[] p = split(sql); - String[] types = p[4] == null ? null : StringUtils.arraySplit(p[4], ',', false); - return meta.getTables(p[1], p[2], p[3], types); - } else if (isBuiltIn(sql, "@table_privileges")) { - String[] p = split(sql); - return meta.getTablePrivileges(p[1], p[2], p[3]); - } else if (isBuiltIn(sql, "@table_types")) { - return meta.getTableTypes(); - } else if (isBuiltIn(sql, "@type_info")) { - return meta.getTypeInfo(); - } else if (isBuiltIn(sql, "@udts")) { - String[] p = split(sql); - int[] types; - if (p[4] == null) { - types = null; - } else { - String[] t = StringUtils.arraySplit(p[4], ',', false); - types = new int[t.length]; - for (int i = 0; i < t.length; i++) { - types[i] = Integer.parseInt(t[i]); - } - } - return meta.getUDTs(p[1], p[2], p[3], types); - } else if (isBuiltIn(sql, "@version_columns")) { - String[] p = split(sql); - return meta.getVersionColumns(p[1], p[2], p[3]); - } else if (isBuiltIn(sql, "@memory")) { - SimpleResultSet rs = new SimpleResultSet(); - rs.addColumn("Type", Types.VARCHAR, 0, 0); - rs.addColumn("KB", Types.VARCHAR, 0, 0); - rs.addRow("Used Memory", Integer.toString(Utils.getMemoryUsed())); - rs.addRow("Free Memory", Integer.toString(Utils.getMemoryFree())); - return rs; - } else if (isBuiltIn(sql, "@info")) { - SimpleResultSet rs = new SimpleResultSet(); - rs.addColumn("KEY", Types.VARCHAR, 0, 0); - rs.addColumn("VALUE", Types.VARCHAR, 0, 0); - rs.addRow("conn.getCatalog", conn.getCatalog()); - rs.addRow("conn.getAutoCommit", Boolean.toString(conn.getAutoCommit())); - rs.addRow("conn.getTransactionIsolation", Integer.toString(conn.getTransactionIsolation())); - rs.addRow("conn.getWarnings", String.valueOf(conn.getWarnings())); - String map; - try { - map = String.valueOf(conn.getTypeMap()); - } catch (SQLException e) { - map = e.toString(); - } - rs.addRow("conn.getTypeMap", map); - rs.addRow("conn.isReadOnly", Boolean.toString(conn.isReadOnly())); - rs.addRow("conn.getHoldability", Integer.toString(conn.getHoldability())); - addDatabaseMetaData(rs, meta); - return rs; - } else if (isBuiltIn(sql, "@attributes")) { - String[] p = split(sql); - return meta.getAttributes(p[1], p[2], p[3], p[4]); - } else if (isBuiltIn(sql, "@super_tables")) { - String[] p = split(sql); - return meta.getSuperTables(p[1], p[2], p[3]); - } else if (isBuiltIn(sql, "@super_types")) { - String[] p = split(sql); - return meta.getSuperTypes(p[1], p[2], p[3]); - } else if (isBuiltIn(sql, "@prof_stop")) { - if (profiler != null) { - profiler.stopCollecting(); - SimpleResultSet rs = new SimpleResultSet(); - rs.addColumn("Top Stack Trace(s)", Types.VARCHAR, 0, 0); - rs.addRow(profiler.getTop(3)); - profiler = null; - return rs; - } - } - return null; - } - - private static void addDatabaseMetaData(SimpleResultSet rs, - DatabaseMetaData meta) { - Method[] methods = DatabaseMetaData.class.getDeclaredMethods(); - Arrays.sort(methods, new Comparator() { - @Override - public int compare(Method o1, Method o2) { - return o1.toString().compareTo(o2.toString()); - } - }); - for (Method m : methods) { - if (m.getParameterTypes().length == 0) { - try { - Object o = m.invoke(meta); - rs.addRow("meta." + m.getName(), String.valueOf(o)); - } catch (InvocationTargetException e) { - rs.addRow("meta." + m.getName(), e.getTargetException().toString()); - } catch (Exception e) { - rs.addRow("meta." + m.getName(), e.toString()); - } - } - } - } - - private static String[] split(String s) { - String[] list = new String[10]; - String[] t = StringUtils.arraySplit(s, ' ', true); - System.arraycopy(t, 0, list, 0, t.length); - for (int i = 0; i < list.length; i++) { - if ("null".equals(list[i])) { - list[i] = null; - } - } - return list; - } - private int getMaxrows() { String r = (String) session.get("maxrows"); return r == null ? 0 : Integer.parseInt(r); @@ -1333,16 +1208,16 @@ private String getResult(Connection conn, int id, String sql, ResultSet rs; long time = System.currentTimeMillis(); boolean metadata = false; - int generatedKeys = Statement.NO_GENERATED_KEYS; + Object generatedKeys = null; boolean edit = false; boolean list = false; - if (isBuiltIn(sql, "@autocommit_true")) { + if (JdbcUtils.isBuiltIn(sql, "@autocommit_true")) { conn.setAutoCommit(true); return "${text.result.autoCommitOn}"; - } else if (isBuiltIn(sql, "@autocommit_false")) { + } else if (JdbcUtils.isBuiltIn(sql, "@autocommit_false")) { conn.setAutoCommit(false); return "${text.result.autoCommitOff}"; - } else if (isBuiltIn(sql, "@cancel")) { + } else if (JdbcUtils.isBuiltIn(sql, "@cancel")) { stat = session.executingStatement; if (stat != null) { stat.cancel(); @@ -1351,53 +1226,67 @@ private String getResult(Connection conn, int id, String sql, buff.append("${text.result.noRunningStatement}"); } return buff.toString(); - } else if (isBuiltIn(sql, "@edit")) { + } else if (JdbcUtils.isBuiltIn(sql, "@edit")) { edit = true; sql = StringUtils.trimSubstring(sql, "@edit".length()); session.put("resultSetSQL", sql); } - if (isBuiltIn(sql, "@list")) { + if (JdbcUtils.isBuiltIn(sql, "@list")) { list = true; sql = StringUtils.trimSubstring(sql, "@list".length()); } - if (isBuiltIn(sql, "@meta")) { + if (JdbcUtils.isBuiltIn(sql, "@meta")) { metadata = true; sql = StringUtils.trimSubstring(sql, "@meta".length()); } - if (isBuiltIn(sql, "@generated")) { - generatedKeys = Statement.RETURN_GENERATED_KEYS; - sql = StringUtils.trimSubstring(sql, "@generated".length()); - } else if (isBuiltIn(sql, "@history")) { + if (JdbcUtils.isBuiltIn(sql, "@generated")) { + generatedKeys = true; + int offset = "@generated".length(); + int length = sql.length(); + for (; offset < length; offset++) { + char c = sql.charAt(offset); + if (c == '(') { + Parser p = new Parser(); + generatedKeys = p.parseColumnList(sql, offset); + offset = p.getLastParseIndex(); + break; + } + if (!Character.isWhitespace(c)) { + break; + } + } + sql = StringUtils.trimSubstring(sql, offset); + } else if (JdbcUtils.isBuiltIn(sql, "@history")) { buff.append(getCommandHistoryString()); return buff.toString(); - } else if (isBuiltIn(sql, "@loop")) { + } else if (JdbcUtils.isBuiltIn(sql, "@loop")) { sql = StringUtils.trimSubstring(sql, "@loop".length()); int idx = sql.indexOf(' '); int count = Integer.decode(sql.substring(0, idx)); sql = StringUtils.trimSubstring(sql, idx); return executeLoop(conn, count, sql); - } else if (isBuiltIn(sql, "@maxrows")) { + } else if (JdbcUtils.isBuiltIn(sql, "@maxrows")) { int maxrows = (int) Double.parseDouble(StringUtils.trimSubstring(sql, "@maxrows".length())); session.put("maxrows", Integer.toString(maxrows)); return "${text.result.maxrowsSet}"; - } else if (isBuiltIn(sql, "@parameter_meta")) { + } else if (JdbcUtils.isBuiltIn(sql, "@parameter_meta")) { sql = StringUtils.trimSubstring(sql, "@parameter_meta".length()); PreparedStatement prep = conn.prepareStatement(sql); buff.append(getParameterResultSet(prep.getParameterMetaData())); return buff.toString(); - } else if (isBuiltIn(sql, "@password_hash")) { + } else if (JdbcUtils.isBuiltIn(sql, "@password_hash")) { sql = StringUtils.trimSubstring(sql, "@password_hash".length()); - String[] p = split(sql); + String[] p = JdbcUtils.split(sql); return StringUtils.convertBytesToHex( SHA256.getKeyPasswordHash(p[0], p[1].toCharArray())); - } else if (isBuiltIn(sql, "@prof_start")) { + } else if (JdbcUtils.isBuiltIn(sql, "@prof_start")) { if (profiler != null) { profiler.stopCollecting(); } profiler = new Profiler(); profiler.startCollecting(); return "Ok"; - } else if (isBuiltIn(sql, "@sleep")) { + } else if (JdbcUtils.isBuiltIn(sql, "@sleep")) { String s = StringUtils.trimSubstring(sql, "@sleep".length()); int sleep = 1; if (s.length() > 0) { @@ -1405,7 +1294,7 @@ private String getResult(Connection conn, int id, String sql, } Thread.sleep(sleep * 1000); return "Ok"; - } else if (isBuiltIn(sql, "@transaction_isolation")) { + } else if (JdbcUtils.isBuiltIn(sql, "@transaction_isolation")) { String s = StringUtils.trimSubstring(sql, "@transaction_isolation".length()); if (s.length() > 0) { int level = Integer.parseInt(s); @@ -1420,11 +1309,23 @@ private String getResult(Connection conn, int id, String sql, .append(": read_committed
          "); buff.append(Connection.TRANSACTION_REPEATABLE_READ) .append(": repeatable_read
          "); + buff.append(Constants.TRANSACTION_SNAPSHOT) + .append(": snapshot
          "); buff.append(Connection.TRANSACTION_SERIALIZABLE) .append(": serializable"); } if (sql.startsWith("@")) { - rs = getMetaResultSet(conn, sql); + rs = JdbcUtils.getMetaResultSet(conn, sql); + if (rs == null && JdbcUtils.isBuiltIn(sql, "@prof_stop")) { + if (profiler != null) { + profiler.stopCollecting(); + SimpleResultSet simple = new SimpleResultSet(); + simple.addColumn("Top Stack Trace(s)", Types.VARCHAR, 0, 0); + simple.addRow(profiler.getTop(3)); + rs = simple; + profiler = null; + } + } if (rs == null) { buff.append("?: ").append(sql); return buff.toString(); @@ -1433,15 +1334,30 @@ private String getResult(Connection conn, int id, String sql, int maxrows = getMaxrows(); stat.setMaxRows(maxrows); session.executingStatement = stat; - boolean isResultSet = stat.execute(sql, generatedKeys); + boolean isResultSet; + if (generatedKeys == null) { + isResultSet = stat.execute(sql); + } else if (generatedKeys instanceof Boolean) { + isResultSet = stat.execute(sql, + ((Boolean) generatedKeys) ? Statement.RETURN_GENERATED_KEYS : Statement.NO_GENERATED_KEYS); + } else if (generatedKeys instanceof String[]) { + isResultSet = stat.execute(sql, (String[]) generatedKeys); + } else { + isResultSet = stat.execute(sql, (int[]) generatedKeys); + } session.addCommand(sql); - if (generatedKeys == Statement.RETURN_GENERATED_KEYS) { + if (generatedKeys != null) { rs = null; rs = stat.getGeneratedKeys(); } else { if (!isResultSet) { - buff.append("${text.result.updateCount}: ") - .append(stat.getUpdateCount()); + long updateCount; + try { + updateCount = stat.getLargeUpdateCount(); + } catch (UnsupportedOperationException e) { + updateCount = stat.getUpdateCount(); + } + buff.append("${text.result.updateCount}: ").append(updateCount); time = System.currentTimeMillis() - time; buff.append("
          (").append(time).append(" ms)"); stat.close(); @@ -1469,11 +1385,6 @@ private String getResult(Connection conn, int id, String sql, } } - private static boolean isBuiltIn(String sql, String builtIn) { - int len = builtIn.length(); - return sql.length() >= len && sql.regionMatches(true, 0, builtIn, 0, len); - } - private String executeLoop(Connection conn, int count, String sql) throws SQLException { ArrayList params = new ArrayList<>(); @@ -1483,7 +1394,7 @@ private String executeLoop(Connection conn, int count, String sql) if (idx < 0) { break; } - if (isBuiltIn(sql.substring(idx), "?/*rnd*/")) { + if (JdbcUtils.isBuiltIn(sql.substring(idx), "?/*rnd*/")) { params.add(1); sql = sql.substring(0, idx) + "?" + sql.substring(idx + "/*rnd*/".length() + 1); } else { @@ -1494,7 +1405,7 @@ private String executeLoop(Connection conn, int count, String sql) boolean prepared; Random random = new Random(1); long time = System.currentTimeMillis(); - if (isBuiltIn(sql, "@statement")) { + if (JdbcUtils.isBuiltIn(sql, "@statement")) { sql = StringUtils.trimSubstring(sql, "@statement".length()); prepared = false; Statement stat = conn.createStatement(); @@ -1617,9 +1528,9 @@ private String getResultSet(String sql, ResultSet rs, boolean metadata, "id=\"mainForm\" target=\"h2result\">" + "" + "" + - "
          "); + "
          "); } else { - buff.append("
          "); + buff.append("
          "); } if (metadata) { SimpleResultSet r = new SimpleResultSet(); @@ -1815,21 +1726,23 @@ private String settingSave() { return "index.do"; } - private static String escapeData(ResultSet rs, int columnIndex) - throws SQLException { + private static String escapeData(ResultSet rs, int columnIndex) throws SQLException { + if (DataType.isBinaryColumn(rs.getMetaData(), columnIndex)) { + byte[] d = rs.getBytes(columnIndex); + if (d == null) { + return "null"; + } else if (d.length > 50_000) { + return "
          =+
          " + StringUtils.convertBytesToHex(d, 3) + "... (" + + d.length + " ${text.result.bytes})"; + } + return StringUtils.convertBytesToHex(d); + } String d = rs.getString(columnIndex); if (d == null) { return "null"; } else if (d.length() > 100_000) { - String s; - if (isBinary(rs.getMetaData().getColumnType(columnIndex))) { - s = PageParser.escapeHtml(d.substring(0, 6)) + - "... (" + (d.length() / 2) + " ${text.result.bytes})"; - } else { - s = PageParser.escapeHtml(d.substring(0, 100)) + - "... (" + d.length() + " ${text.result.characters})"; - } - return "
          =+
          " + s; + return "
          =+
          " + PageParser.escapeHtml(d.substring(0, 100)) + "... (" + + d.length() + " ${text.result.characters})"; } else if (d.equals("null") || d.startsWith("= ") || d.startsWith("=+")) { return "
          =
          " + PageParser.escapeHtml(d); } else if (d.equals("")) { @@ -1839,19 +1752,6 @@ private static String escapeData(ResultSet rs, int columnIndex) return PageParser.escapeHtml(d); } - private static boolean isBinary(int sqlType) { - switch (sqlType) { - case Types.BINARY: - case Types.BLOB: - case Types.JAVA_OBJECT: - case Types.LONGVARBINARY: - case Types.OTHER: - case Types.VARBINARY: - return true; - } - return false; - } - private void unescapeData(String x, ResultSet rs, int columnIndex) throws SQLException { if (x.equals("null")) { @@ -1880,6 +1780,10 @@ private void unescapeData(String x, ResultSet rs, int columnIndex) x = x.substring(2); } ResultSetMetaData meta = rs.getMetaData(); + if (DataType.isBinaryColumn(meta, columnIndex)) { + rs.updateBytes(columnIndex, StringUtils.convertHexToBytes(x)); + return; + } int type = meta.getColumnType(columnIndex); if (session.getContents().isH2()) { rs.updateString(columnIndex, x); diff --git a/h2/src/main/org/h2/server/web/WebServer.java b/h2/src/main/org/h2/server/web/WebServer.java index f3330b932d..73d17644da 100644 --- a/h2/src/main/org/h2/server/web/WebServer.java +++ b/h2/src/main/org/h2/server/web/WebServer.java @@ -1,20 +1,24 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.web; -import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.ServerSocket; import java.net.Socket; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.sql.Connection; import java.sql.SQLException; -import java.text.SimpleDateFormat; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -33,10 +37,10 @@ import org.h2.server.Service; import org.h2.server.ShutdownHandler; import org.h2.store.fs.FileUtils; -import org.h2.util.DateTimeUtils; import org.h2.util.JdbcUtils; import org.h2.util.MathUtils; import org.h2.util.NetUtils; +import org.h2.util.NetworkConnectionInfo; import org.h2.util.SortedProperties; import org.h2.util.StringUtils; import org.h2.util.Tool; @@ -54,6 +58,7 @@ public class WebServer implements Service { { "en", "English" }, { "es", "Espa\u00f1ol" }, { "fr", "Fran\u00e7ais" }, + { "hi", "Hindi \u0939\u093f\u0902\u0926\u0940" }, { "hu", "Magyar"}, { "ko", "\ud55c\uad6d\uc5b4"}, { "in", "Indonesia"}, @@ -106,13 +111,15 @@ public class WebServer implements Service { "jdbc:sqlserver://localhost;DatabaseName=test|sa", "Generic PostgreSQL|org.postgresql.Driver|" + "jdbc:postgresql:test|" , - "Generic MySQL|com.mysql.jdbc.Driver|" + + "Generic MySQL|com.mysql.cj.jdbc.Driver|" + "jdbc:mysql://localhost:3306/test|" , + "Generic MariaDB|org.mariadb.jdbc.Driver|" + + "jdbc:mariadb://localhost:3306/test|" , "Generic HSQLDB|org.hsqldb.jdbcDriver|" + "jdbc:hsqldb:test;hsqldb.default_table_type=cached|sa" , - "Generic Derby (Server)|org.apache.derby.jdbc.ClientDriver|" + + "Generic Derby (Server)|org.apache.derby.client.ClientAutoloadedDriver|" + "jdbc:derby://localhost:1527/test;create=true|sa", - "Generic Derby (Embedded)|org.apache.derby.jdbc.EmbeddedDriver|" + + "Generic Derby (Embedded)|org.apache.derby.iapi.jdbc.AutoloadedDriver|" + "jdbc:derby:test;create=true|sa", "Generic H2 (Server)|org.h2.Driver|" + "jdbc:h2:tcp://localhost/~/test|sa", @@ -152,6 +159,7 @@ public class WebServer implements Service { // private URLClassLoader urlClassLoader; private int port; private boolean allowOthers; + private String externalNames; private boolean isDaemon; private final Set running = Collections.synchronizedSet(new HashSet()); @@ -164,6 +172,7 @@ public class WebServer implements Service { private final HashSet languages = new HashSet<>(); private String startDateTime; private ServerSocket serverSocket; + private String host; private String url; private ShutdownHandler shutdownHandler; private Thread listenerThread; @@ -182,6 +191,7 @@ public class WebServer implements Service { * * @param file the file name * @return the data + * @throws IOException on failure */ byte[] getFile(String file) throws IOException { trace("getFile <" + file + ">"); @@ -260,10 +270,8 @@ WebSession createNewSession(String hostAddr) { String getStartDateTime() { if (startDateTime == null) { - SimpleDateFormat format = new SimpleDateFormat( - "EEE, d MMM yyyy HH:mm:ss z", new Locale("en", "")); - format.setTimeZone(DateTimeUtils.UTC); - startDateTime = format.format(System.currentTimeMillis()); + startDateTime = DateTimeFormatter.ofPattern("EEE, d MMM yyyy HH:mm:ss z", Locale.ENGLISH) + .format(ZonedDateTime.now(ZoneId.of("UTC"))); } return startDateTime; } @@ -313,6 +321,7 @@ public void init(String... args) { "webSSL", false); allowOthers = SortedProperties.getBooleanProperty(prop, "webAllowOthers", false); + setExternalNames(SortedProperties.getStringProperty(prop, "webExternalNames", null)); setAdminPassword(SortedProperties.getStringProperty(prop, "webAdminPassword", null)); commandHistoryString = prop.getProperty(COMMAND_HISTORY); for (int i = 0; args != null && i < args.length; i++) { @@ -323,6 +332,8 @@ public void init(String... args) { ssl = true; } else if (Tool.isOption(a, "-webAllowOthers")) { allowOthers = true; + } else if (Tool.isOption(a, "-webExternalNames")) { + setExternalNames(args[++i]); } else if (Tool.isOption(a, "-webDaemon")) { isDaemon = true; } else if (Tool.isOption(a, "-baseDir")) { @@ -369,11 +380,22 @@ public String getURL() { return url; } + /** + * @return host name + */ + public String getHost() { + if (host == null) { + updateURL(); + } + return host; + } + private void updateURL() { try { + host = StringUtils.toLowerEnglish(NetUtils.getLocalAddress()); StringBuilder builder = new StringBuilder(ssl ? "https" : "http").append("://") - .append(NetUtils.getLocalAddress()).append(':').append(port); - if (key != null) { + .append(host).append(':').append(port); + if (key != null && serverSocket != null) { builder.append("?key=").append(key); } url = builder.toString(); @@ -545,6 +567,14 @@ public boolean getAllowOthers() { return allowOthers; } + void setExternalNames(String externalNames) { + this.externalNames = externalNames != null ? StringUtils.toLowerEnglish(externalNames) : null; + } + + String getExternalNames() { + return externalNames; + } + void setSSL(boolean b) { ssl = b; } @@ -725,6 +755,9 @@ synchronized void saveProperties(Properties prop) { Integer.toString(SortedProperties.getIntProperty(old, "webPort", port))); prop.setProperty("webAllowOthers", Boolean.toString(SortedProperties.getBooleanProperty(old, "webAllowOthers", allowOthers))); + if (externalNames != null) { + prop.setProperty("webExternalNames", externalNames); + } prop.setProperty("webSSL", Boolean.toString(SortedProperties.getBooleanProperty(old, "webSSL", ssl))); if (adminPassword != null) { @@ -761,25 +794,18 @@ synchronized void saveProperties(Properties prop) { * @param user the user name * @param password the password * @param userKey the key of privileged user + * @param networkConnectionInfo the network connection information * @return the database connection + * @throws SQLException on failure */ Connection getConnection(String driver, String databaseUrl, String user, - String password, String userKey) throws SQLException { + String password, String userKey, NetworkConnectionInfo networkConnectionInfo) throws SQLException { driver = driver.trim(); databaseUrl = databaseUrl.trim(); - Properties p = new Properties(); - p.setProperty("user", user.trim()); // do not trim the password, otherwise an // encrypted H2 database with empty user password doesn't work - p.setProperty("password", password); - if (databaseUrl.startsWith("jdbc:h2:")) { - if (!allowSecureCreation || key == null || !key.equals(userKey)) { - if (ifExists) { - databaseUrl += ";IFEXISTS=TRUE"; - } - } - } - return JdbcUtils.getConnection(driver, databaseUrl, p); + return JdbcUtils.getConnection(driver, databaseUrl, user.trim(), password, networkConnectionInfo, + ifExists && (!allowSecureCreation || key == null || !key.equals(userKey))); } /** @@ -800,6 +826,7 @@ public void setShutdownHandler(ShutdownHandler shutdownHandler) { * * @param conn the connection * @return the URL of the web site to access this connection + * @throws SQLException on failure */ public String addSession(Connection conn) throws SQLException { WebSession session = createNewSession("local"); @@ -816,7 +843,7 @@ public String addSession(Connection conn) throws SQLException { */ private class TranslateThread extends Thread { - private final File file = new File("translation.properties"); + private final Path file = Paths.get("translation.properties"); private final Map translation; private volatile boolean stopNow; @@ -825,7 +852,7 @@ private class TranslateThread extends Thread { } public String getFileName() { - return file.getAbsolutePath(); + return file.toAbsolutePath().toString(); } public void stopNow() { @@ -842,12 +869,12 @@ public void run() { while (!stopNow) { try { SortedProperties sp = new SortedProperties(); - if (file.exists()) { - InputStream in = FileUtils.newInputStream(file.getName()); + if (Files.exists(file)) { + InputStream in = Files.newInputStream(file); sp.load(in); translation.putAll(sp); } else { - OutputStream out = FileUtils.newOutputStream(file.getName(), false); + OutputStream out = Files.newOutputStream(file); sp.putAll(translation); sp.store(out, "Translation"); } @@ -914,7 +941,7 @@ void setAdminPassword(String password) { /** * Check the admin password. * - * @param the password to test + * @param password the password to test * @return true if admin password not configure, or admin password correct */ boolean checkAdminPassword(String password) { diff --git a/h2/src/main/org/h2/server/web/WebServlet.java b/h2/src/main/org/h2/server/web/WebServlet.java index 6e787edf2e..752cf6bbc6 100644 --- a/h2/src/main/org/h2/server/web/WebServlet.java +++ b/h2/src/main/org/h2/server/web/WebServlet.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.web; @@ -19,6 +19,8 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; +import org.h2.util.NetworkConnectionInfo; + /** * This servlet lets the H2 Console be used in a standard servlet container * such as Tomcat or Jetty. @@ -117,8 +119,14 @@ public void doGet(HttpServletRequest req, HttpServletResponse resp) app.setSession(session, attributes); String ifModifiedSince = req.getHeader("if-modified-since"); - String hostAddr = req.getRemoteAddr(); - file = app.processRequest(file, hostAddr); + String scheme = req.getScheme(); + StringBuilder builder = new StringBuilder(scheme).append("://").append(req.getServerName()); + int serverPort = req.getServerPort(); + if (!(serverPort == 80 && scheme.equals("http") || serverPort == 443 && scheme.equals("https"))) { + builder.append(':').append(serverPort); + } + String path = builder.append(req.getContextPath()).toString(); + file = app.processRequest(file, new NetworkConnectionInfo(path, req.getRemoteAddr(), req.getRemotePort())); session = app.getSession(); String mimeType = app.getMimeType(); diff --git a/h2/src/main/org/h2/server/web/WebSession.java b/h2/src/main/org/h2/server/web/WebSession.java index 1baeb66454..bda717d1a0 100644 --- a/h2/src/main/org/h2/server/web/WebSession.java +++ b/h2/src/main/org/h2/server/web/WebSession.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.web; diff --git a/h2/src/main/org/h2/server/web/WebThread.java b/h2/src/main/org/h2/server/web/WebThread.java index a81f4e38e7..2c6a7fd6b5 100644 --- a/h2/src/main/org/h2/server/web/WebThread.java +++ b/h2/src/main/org/h2/server/web/WebThread.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.web; @@ -9,6 +9,7 @@ import java.io.BufferedOutputStream; import java.io.IOException; import java.io.InputStream; +import java.io.InterruptedIOException; import java.io.OutputStream; import java.net.Socket; import java.net.UnknownHostException; @@ -22,6 +23,7 @@ import org.h2.message.DbException; import org.h2.util.IOUtils; import org.h2.util.NetUtils; +import org.h2.util.NetworkConnectionInfo; import org.h2.util.StringUtils; import org.h2.util.Utils; @@ -31,10 +33,16 @@ */ class WebThread extends WebApp implements Runnable { + private static final byte[] RN = { '\r', '\n' }; + + private static final byte[] RNRN = { '\r', '\n', '\r', '\n' }; + protected OutputStream output; protected final Socket socket; private final Thread thread; private InputStream input; + private String host; + private int dataLength; private String ifModifiedSince; WebThread(Socket socket, WebServer server) { @@ -54,6 +62,7 @@ void start() { * Wait until the thread is stopped. * * @param millis the maximum number of milliseconds to wait + * @throws InterruptedException if interrupted */ void join(int millis) throws InterruptedException { thread.join(millis); @@ -110,108 +119,159 @@ public void run() { @SuppressWarnings("unchecked") private boolean process() throws IOException { - boolean keepAlive = false; String head = readHeaderLine(); - if (head.startsWith("GET ") || head.startsWith("POST ")) { - int begin = head.indexOf('/'), end = head.lastIndexOf(' '); - String file; - if (begin < 0 || end < begin) { - file = ""; - } else { - file = StringUtils.trimSubstring(head, begin + 1, end); - } - trace(head + ": " + file); - file = getAllowedFile(file); - attributes = new Properties(); - int paramIndex = file.indexOf('?'); - session = null; - String key = null; - if (paramIndex >= 0) { - String attrib = file.substring(paramIndex + 1); - parseAttributes(attrib); - String sessionId = attributes.getProperty("jsessionid"); - key = attributes.getProperty("key"); - file = file.substring(0, paramIndex); - session = server.getSession(sessionId); - } - keepAlive = parseHeader(); - String hostAddr = socket.getInetAddress().getHostAddress(); - file = processRequest(file, hostAddr); - if (file.length() == 0) { - // asynchronous request - return true; + boolean get = head.startsWith("GET "); + if ((!get && !head.startsWith("POST ")) || !head.endsWith(" HTTP/1.1")) { + writeSimple("HTTP/1.1 400 Bad Request", "Bad request"); + return false; + } + String file = StringUtils.trimSubstring(head, get ? 4 : 5, head.length() - 9); + if (file.isEmpty() || file.charAt(0) != '/') { + writeSimple("HTTP/1.1 400 Bad Request", "Bad request"); + return false; + } + attributes = new Properties(); + boolean keepAlive = parseHeader(); + if (!checkHost(host)) { + return false; + } + file = file.substring(1); + trace(head + ": " + file); + file = getAllowedFile(file); + int paramIndex = file.indexOf('?'); + session = null; + String key = null; + if (paramIndex >= 0) { + String attrib = file.substring(paramIndex + 1); + parseAttributes(attrib); + String sessionId = attributes.getProperty("jsessionid"); + key = attributes.getProperty("key"); + file = file.substring(0, paramIndex); + session = server.getSession(sessionId); + } + parseBodyAttributes(); + file = processRequest(file, + new NetworkConnectionInfo( + NetUtils.ipToShortForm(new StringBuilder(server.getSSL() ? "https://" : "http://"), + socket.getLocalAddress().getAddress(), true) // + .append(':').append(socket.getLocalPort()).toString(), // + socket.getInetAddress().getAddress(), socket.getPort(), null)); + if (file.length() == 0) { + // asynchronous request + return true; + } + String message; + if (cache && ifModifiedSince != null && ifModifiedSince.equals(server.getStartDateTime())) { + writeSimple("HTTP/1.1 304 Not Modified", (byte[]) null); + return keepAlive; + } + byte[] bytes = server.getFile(file); + if (bytes == null) { + writeSimple("HTTP/1.1 404 Not Found", "File not found: " + file); + return keepAlive; + } + if (session != null && file.endsWith(".jsp")) { + if (key != null) { + session.put("key", key); } - String message; - byte[] bytes; - if (cache && ifModifiedSince != null && - ifModifiedSince.equals(server.getStartDateTime())) { - bytes = null; - message = "HTTP/1.1 304 Not Modified\r\n"; - } else { - bytes = server.getFile(file); - if (bytes == null) { - message = "HTTP/1.1 404 Not Found\r\n"; - bytes = ("File not found: " + file).getBytes(StandardCharsets.UTF_8); - message += "Content-Length: " + bytes.length + "\r\n"; - } else { - if (session != null && file.endsWith(".jsp")) { - if (key != null) { - session.put("key", key); - } - String page = new String(bytes, StandardCharsets.UTF_8); - if (SysProperties.CONSOLE_STREAM) { - Iterator it = (Iterator) session.map.remove("chunks"); - if (it != null) { - message = "HTTP/1.1 200 OK\r\n"; - message += "Content-Type: " + mimeType + "\r\n"; - message += "Cache-Control: no-cache\r\n"; - message += "Transfer-Encoding: chunked\r\n"; - message += "\r\n"; - trace(message); - output.write(message.getBytes()); - while (it.hasNext()) { - String s = it.next(); - s = PageParser.parse(s, session.map); - bytes = s.getBytes(StandardCharsets.UTF_8); - if (bytes.length == 0) { - continue; - } - output.write(Integer.toHexString(bytes.length).getBytes()); - output.write("\r\n".getBytes()); - output.write(bytes); - output.write("\r\n".getBytes()); - output.flush(); - } - output.write("0\r\n\r\n".getBytes()); - output.flush(); - return keepAlive; - } - } - page = PageParser.parse(page, session.map); - bytes = page.getBytes(StandardCharsets.UTF_8); - } + String page = new String(bytes, StandardCharsets.UTF_8); + if (SysProperties.CONSOLE_STREAM) { + Iterator it = (Iterator) session.map.remove("chunks"); + if (it != null) { message = "HTTP/1.1 200 OK\r\n"; message += "Content-Type: " + mimeType + "\r\n"; - if (!cache) { - message += "Cache-Control: no-cache\r\n"; - } else { - message += "Cache-Control: max-age=10\r\n"; - message += "Last-Modified: " + server.getStartDateTime() + "\r\n"; + message += "Cache-Control: no-cache\r\n"; + message += "Transfer-Encoding: chunked\r\n"; + message += "\r\n"; + trace(message); + output.write(message.getBytes(StandardCharsets.ISO_8859_1)); + while (it.hasNext()) { + String s = it.next(); + s = PageParser.parse(s, session.map); + bytes = s.getBytes(StandardCharsets.UTF_8); + if (bytes.length == 0) { + continue; + } + output.write(Integer.toHexString(bytes.length).getBytes(StandardCharsets.ISO_8859_1)); + output.write(RN); + output.write(bytes); + output.write(RN); + output.flush(); } - message += "Content-Length: " + bytes.length + "\r\n"; + output.write('0'); + output.write(RNRN); + output.flush(); + return keepAlive; } } - message += "\r\n"; - trace(message); - output.write(message.getBytes()); - if (bytes != null) { - output.write(bytes); - } - output.flush(); + page = PageParser.parse(page, session.map); + bytes = page.getBytes(StandardCharsets.UTF_8); + } + message = "HTTP/1.1 200 OK\r\n"; + message += "Content-Type: " + mimeType + "\r\n"; + if (!cache) { + message += "Cache-Control: no-cache\r\n"; + } else { + message += "Cache-Control: max-age=10\r\n"; + message += "Last-Modified: " + server.getStartDateTime() + "\r\n"; } + message += "Content-Length: " + bytes.length + "\r\n"; + message += "\r\n"; + trace(message); + output.write(message.getBytes(StandardCharsets.ISO_8859_1)); + output.write(bytes); + output.flush(); return keepAlive; } + private void writeSimple(String status, String text) throws IOException { + writeSimple(status, text != null ? text.getBytes(StandardCharsets.UTF_8) : null); + } + + private void writeSimple(String status, byte[] bytes) throws IOException { + trace(status); + output.write(status.getBytes(StandardCharsets.ISO_8859_1)); + if (bytes != null) { + output.write(RN); + String contentLength = "Content-Length: " + bytes.length; + trace(contentLength); + output.write(contentLength.getBytes(StandardCharsets.ISO_8859_1)); + output.write(RNRN); + output.write(bytes); + } else { + output.write(RNRN); + } + output.flush(); + } + + private boolean checkHost(String host) throws IOException { + if (host == null) { + writeSimple("HTTP/1.1 400 Bad Request", "Bad request"); + return false; + } + int index = host.indexOf(':'); + if (index >= 0) { + host = host.substring(0, index); + } + if (host.isEmpty()) { + return false; + } + host = StringUtils.toLowerEnglish(host); + if (host.equals(server.getHost()) || host.equals("localhost") || host.equals("127.0.0.1")) { + return true; + } + String externalNames = server.getExternalNames(); + if (externalNames != null && !externalNames.isEmpty()) { + for (String s : externalNames.split(",")) { + if (host.equals(s.trim())) { + return true; + } + } + } + writeSimple("HTTP/1.1 404 Not Found", "Host " + host + " not found"); + return false; + } + private String readHeaderLine() throws IOException { StringBuilder buff = new StringBuilder(); while (true) { @@ -230,6 +290,17 @@ private String readHeaderLine() throws IOException { } } + private void parseBodyAttributes() throws IOException { + if (dataLength > 0) { + byte[] bytes = Utils.newBytes(dataLength); + for (int pos = 0; pos < dataLength;) { + pos += input.read(bytes, pos, dataLength - pos); + } + String s = new String(bytes, StandardCharsets.UTF_8); + parseAttributes(s); + } + } + private void parseAttributes(String s) { trace("data=" + s); while (s != null) { @@ -258,16 +329,15 @@ private boolean parseHeader() throws IOException { boolean keepAlive = false; trace("parseHeader"); int len = 0; + host = null; ifModifiedSince = null; boolean multipart = false; - while (true) { - String line = readHeaderLine(); - if (line == null) { - break; - } + for (String line; (line = readHeaderLine()) != null;) { trace(" " + line); String lower = StringUtils.toLowerEnglish(line); - if (lower.startsWith("if-modified-since")) { + if (lower.startsWith("host")) { + host = getHeaderLineValue(line); + } else if (lower.startsWith("if-modified-since")) { ifModifiedSince = getHeaderLineValue(line); } else if (lower.startsWith("connection")) { String conn = getHeaderLineValue(line); @@ -286,7 +356,7 @@ private boolean parseHeader() throws IOException { boolean isWebKit = lower.contains("webkit/"); if (isWebKit && session != null) { // workaround for what seems to be a WebKit bug: - // http://code.google.com/p/chromium/issues/detail?id=6402 + // https://bugs.chromium.org/p/chromium/issues/detail?id=6402 session.put("frame-border", "1"); session.put("frameset-border", "2"); } @@ -322,15 +392,11 @@ private boolean parseHeader() throws IOException { break; } } + dataLength = 0; if (multipart) { // not supported - } else if (session != null && len > 0) { - byte[] bytes = Utils.newBytes(len); - for (int pos = 0; pos < len;) { - pos += input.read(bytes, pos, len - pos); - } - String s = new String(bytes); - parseAttributes(s); + } else if (len > 0) { + dataLength = len; } return keepAlive; } diff --git a/h2/src/main/org/h2/server/web/package.html b/h2/src/main/org/h2/server/web/package.html index c55cec4e58..4eab3b2de8 100644 --- a/h2/src/main/org/h2/server/web/package.html +++ b/h2/src/main/org/h2/server/web/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/web/res/_text_cs.prop b/h2/src/main/org/h2/server/web/res/_text_cs.prop index 2126edefac..4e082236b1 100644 --- a/h2/src/main/org/h2/server/web/res/_text_cs.prop +++ b/h2/src/main/org/h2/server/web/res/_text_cs.prop @@ -25,6 +25,7 @@ adminLoginCancel=Zrušit adminLoginOk=OK adminLogout=Odhlásit adminOthers=Povolit připojení z jiných počítačů +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Číslo portu adminPortWeb=Číslo portu webového serveru adminRestart=Změny se projeví po restartu serveru. diff --git a/h2/src/main/org/h2/server/web/res/_text_de.prop b/h2/src/main/org/h2/server/web/res/_text_de.prop index 53cfa6f07e..846bcbd3ff 100644 --- a/h2/src/main/org/h2/server/web/res/_text_de.prop +++ b/h2/src/main/org/h2/server/web/res/_text_de.prop @@ -25,6 +25,7 @@ adminLoginCancel=Abbrechen adminLoginOk=OK adminLogout=Beenden adminOthers=Verbindungen von anderen Computern erlauben +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Admin Port adminPortWeb=Web-Server Port adminRestart=Änderungen werden nach einem Neustart des Servers aktiv. @@ -98,9 +99,9 @@ toolbar.autoComplete=Auto-Complete toolbar.autoComplete.full=Alles toolbar.autoComplete.normal=Normal toolbar.autoComplete.off=Aus -toolbar.autoSelect=#Auto select +toolbar.autoSelect=Automatische Auswahl toolbar.autoSelect.off=Aus -toolbar.autoSelect.on=#On +toolbar.autoSelect.on=An toolbar.cancelStatement=Laufenden Befehl abbrechen toolbar.clear=Leeren toolbar.commit=Commit (Abschliessen/Speichern) diff --git a/h2/src/main/org/h2/server/web/res/_text_en.prop b/h2/src/main/org/h2/server/web/res/_text_en.prop index 792bbb2859..b6f0fb8a0c 100644 --- a/h2/src/main/org/h2/server/web/res/_text_en.prop +++ b/h2/src/main/org/h2/server/web/res/_text_en.prop @@ -1,7 +1,7 @@ .translator=Thomas Mueller a.help=Help a.language=English -a.lynxNotSupported=Sorry, Lynx not supported yet +a.lynxNotSupported=Sorry, Lynx is not supported yet a.password=Password a.remoteConnectionsDisabled=Sorry, remote connections ('webAllowOthers') are disabled on this server. a.title=H2 Console @@ -25,6 +25,7 @@ adminLoginCancel=Cancel adminLoginOk=OK adminLogout=Logout adminOthers=Allow connections from other computers +adminWebExternalNames=External names or addresses of this server (comma-separated) adminPort=Port number adminPortWeb=Web server port number adminRestart=Changes take effect after restarting the server. diff --git a/h2/src/main/org/h2/server/web/res/_text_es.prop b/h2/src/main/org/h2/server/web/res/_text_es.prop index 8f1e1c576e..8e41b66ce5 100644 --- a/h2/src/main/org/h2/server/web/res/_text_es.prop +++ b/h2/src/main/org/h2/server/web/res/_text_es.prop @@ -25,6 +25,7 @@ adminLoginCancel=Cancelar adminLoginOk=Aceptar adminLogout=Desconectar adminOthers=Permitir conexiones desde otros ordenadores +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Puerto adminPortWeb=Puerto del servidor Web adminRestart=Los cambios tendrán efecto al reiniciar el servidor. diff --git a/h2/src/main/org/h2/server/web/res/_text_fr.prop b/h2/src/main/org/h2/server/web/res/_text_fr.prop index 8380c479c8..792f72ecf8 100644 --- a/h2/src/main/org/h2/server/web/res/_text_fr.prop +++ b/h2/src/main/org/h2/server/web/res/_text_fr.prop @@ -25,6 +25,7 @@ adminLoginCancel=Annuler adminLoginOk=OK adminLogout=Déconnexion adminOthers=Autoriser les connexions d'ordinateurs distants +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Numéro de port adminPortWeb=Numéro de port du serveur Web adminRestart=Modifications effectuées après redémarrage du serveur. diff --git a/h2/src/main/org/h2/server/web/res/_text_hi.prop b/h2/src/main/org/h2/server/web/res/_text_hi.prop new file mode 100644 index 0000000000..a7d8a05293 --- /dev/null +++ b/h2/src/main/org/h2/server/web/res/_text_hi.prop @@ -0,0 +1,164 @@ +.translator=vikash verma +a.help=सहायता +a.language=Hindi(हिंदी) +a.lynxNotSupported=क्षमा करें, लिंक्स(Lynx) अभी तक समर्थित नहीं है +a.password=पासवर्ड +a.remoteConnectionsDisabled=क्षमा करें, इस सर्वर पर दूरस्थ कनेक्शन ('webAllowOthers') अक्षम हैं। +a.title=एच 2 कंसोल +a.tools=उपकरण +a.user=प्रयोक्ता नाम +admin.executing=निष्पादित +admin.ip=आईपी (IP) +admin.lastAccess=अंतिम पहुंच +admin.lastQuery=अंतिम प्रश्न(query) +admin.no=नहीं +admin.notConnected=जुड़े नहीं हैं +admin.url=यूआरएल (URL) +admin.yes=हाँ +adminAllow=ग्राहकों को अनुमति है +adminConnection=कनेक्शन सुरक्षा +adminHttp=अनएन्क्रिप्टेड HTTP कनेक्शन का उपयोग करें +adminHttps=एन्क्रिप्टेड एसएसएल (HTTPS) कनेक्शन का उपयोग करें +adminLocal=केवल स्थानीय कनेक्शन की अनुमति दें +adminLogin=प्रशासन लॉगिन करें +adminLoginCancel=रद्द करना +adminLoginOk=ठीक +adminLogout=लोग आउट +adminOthers=अन्य कंप्यूटर से कनेक्शन की अनुमति दें +adminWebExternalNames=#External names or addresses of this server (comma-separated) +adminPort=पोर्ट नंबर +adminPortWeb=वेब सर्वर पोर्ट नंबर +adminRestart=सर्वर को पुनरारंभ करने के बाद परिवर्तन प्रभावी होते हैं। +adminSave=रक्षित करें +adminSessions=सक्रिय सत्र +adminShutdown=बंद करना +adminTitle=एच 2 कंसोल प्राथमिकताएं +adminTranslateHelp=अनुवाद या H2 कंसोल के अनुवाद में सुधार। +adminTranslateStart=अनुवाद करना +helpAction=कर्म +helpAddAnotherRow=एक और पंक्ति जोड़ें +helpAddDrivers=डेटाबेस ड्राइवर्स जोड़ना +helpAddDriversText=अतिरिक्त डेटाबेस ड्राइवरों को पर्यावरण चर (environment variables) H2DRIVERS या CLASSPATH में ड्राइवर के जार फ़ाइल स्थान को जोड़कर पंजीकृत किया जा सकता है। उदाहरण (विंडोज़) : डेटाबेस ड्राइवर लाइब्रेरी को जोड़ने के लिए C : / Programs / hsqldb / lib / hsqldb.jar, C_: / प्रोग्राम / hsqldb (lib / hsqldb.jar) पर्यावरण चर H2DRIVERS सेट करें। +helpAddRow=एक नई पंक्ति जोड़ें +helpCommandHistory=कमांड इतिहास दिखाता है +helpCreateTable=एक नई तालिका बनाएँ +helpDeleteRow=एक पंक्ति निकालें +helpDisconnect=डेटाबेस से डिस्कनेक्ट करता है +helpDisplayThis=यह सहायता पृष्ठ प्रदर्शित करें +helpDropTable=यदि मौजूद है तो तालिका हटाएं +helpExecuteCurrent=वर्तमान SQL कथन निष्पादित करता है +helpExecuteSelected=पाठ चयन द्वारा परिभाषित SQL कथन निष्पादित करता है +helpIcon=चिह्न +helpImportantCommands=महत्वपूर्ण आदेश +helpOperations=संचालन +helpQuery=तालिका को क्वेरी करें +helpSampleSQL=नमूना एसक्यूएल स्क्रिप्ट +helpStatements=एसक्यूएल बयान +helpUpdate=एक पंक्ति में डेटा बदलें +helpWithColumnsIdName=आईडी और NAME कॉलम के साथ +key.alt=Alt +key.ctrl=Ctrl +key.enter=Enter +key.shift=Shift +key.space=Space +login.connect=जुडिये +login.driverClass=चालक वर्ग (Driver Class) +login.driverNotFound=डेटाबेस ड्राइवर नहीं मिला
          ड्राइवरों को जोड़ने के लिए सहायता में देखें +login.goAdmin=पसंद +login.jdbcUrl=JDBC URL +login.language=भाषा +login.login=लॉग इन करें +login.remove=हटाये +login.save=रक्षित करें +login.savedSetting=सहेजे गए सेटिंग्स +login.settingName=सेटिंग्स का नाम +login.testConnection=परीक्षण कनेक्शन +login.testSuccessful=सफल परीक्षण +login.welcome=एच 2 कंसोल +result.1row=1 पंक्ति +result.autoCommitOff=ऑटो कमिट बंद +result.autoCommitOn=ऑटो कमिट चालू +result.bytes=बाइट्स +result.characters=वर्ण +result.maxrowsSet=अधिकतम पंक्ति संख्या सेट है +result.noRows=कोई पंक्तियाँ नहीं +result.noRunningStatement=वर्तमान में कोई स्टेटमेंट नहीं चल रहा है +result.rows=पंक्तियां +result.statementWasCanceled=बयान रद्द कर दिया गया +result.updateCount=अद्यतन गणना +resultEdit.action=कर्म +resultEdit.add=जोड़ना +resultEdit.cancel=रद्द करना +resultEdit.delete=हटाये +resultEdit.edit=संपादित करें +resultEdit.editResult=संपादित करें +resultEdit.save=रक्षित करें +toolbar.all=सब +toolbar.autoCommit=ऑटो कमिट +toolbar.autoComplete=ऑटो पूर्ण +toolbar.autoComplete.full=पूर्ण +toolbar.autoComplete.normal=सामान्य +toolbar.autoComplete.off=बंद +toolbar.autoSelect=स्वतः चयन +toolbar.autoSelect.off=बंद +toolbar.autoSelect.on=पर +toolbar.cancelStatement=वर्तमान कथन को रद्द करें +toolbar.clear=स्पष्ट +toolbar.commit=कमिट +toolbar.disconnect=डिस्कनेक्ट +toolbar.history=कमान का इतिहास +toolbar.maxRows=अधिकतम पंक्तियाँ +toolbar.refresh=ताज़ा करना +toolbar.rollback=रोलबैक +toolbar.run=रन +toolbar.runSelected=चयनित चलाएं +toolbar.sqlStatement=एसक्यूएल बयान +tools.backup=बैकअप +tools.backup.help=एक डेटाबेस का बैकअप बनाता है। +tools.changeFileEncryption=ChangeFileEncryption +tools.changeFileEncryption.help=डेटाबेस फ़ाइल एन्क्रिप्शन पासवर्ड और एल्गोरिथ्म को बदलने देता है। +tools.cipher=सिफर (एईएस या एक्सटीईए) +tools.commandLine=कमांड लाइन +tools.convertTraceFile=ConvertTraceFile +tools.convertTraceFile.help=एक जावा अनुप्रयोग और SQL स्क्रिप्ट के लिए एक .trace.db फ़ाइल में कनवर्ट करता है। +tools.createCluster=CreateCluster +tools.createCluster.help=एक स्टैंडअलोन डेटाबेस से एक क्लस्टर बनाता है। +tools.databaseName=डेटाबेस नाम +tools.decryptionPassword=डिक्रिप्शन पासवर्ड +tools.deleteDbFiles=DeleteDbFiles +tools.deleteDbFiles.help=डेटाबेस से संबंधित सभी फ़ाइलों को हटाता है। +tools.directory=निर्देशिका +tools.encryptionPassword=एन्क्रिप्शन पासवर्ड +tools.javaDirectoryClassName=जावा निर्देशिका और वर्ग का नाम +tools.recover=वसूली +tools.recover.help=एक दूषित डेटाबेस को पुनर्प्राप्त करने में मदद करता है। +tools.restore=पुनर्स्थापित +tools.restore.help=डेटाबेस बैकअप पुनर्स्थापित करता है। +tools.result=परिणाम +tools.run=रन +tools.runScript=RunScript +tools.runScript.help=SQL स्क्रिप्ट चलाता है। +tools.script=लिपि +tools.script.help=बैकअप या माइग्रेशन के लिए डेटाबेस को SQL स्क्रिप्ट में बदलने की अनुमति देता है। +tools.scriptFileName=स्क्रिप्ट फ़ाइल नाम +tools.serverList=सर्वर सूची +tools.sourceDatabaseName=स्रोत डेटाबेस का नाम +tools.sourceDatabaseURL=स्रोत डेटाबेस URL +tools.sourceDirectory=स्रोत निर्देशिका +tools.sourceFileName=स्रोत फ़ाइल नाम +tools.sourceScriptFileName=स्रोत स्क्रिप्ट फ़ाइल नाम +tools.targetDatabaseName=लक्ष्य डेटाबेस नाम +tools.targetDatabaseURL=लक्ष्य डेटाबेस URL +tools.targetDirectory=लक्ष्य निर्देशिका +tools.targetFileName=लक्ष्य फ़ाइल नाम +tools.targetScriptFileName=लक्ष्य स्क्रिप्ट फ़ाइल नाम +tools.traceFileName=ट्रेस फ़ाइल नाम +tree.admin=व्यवस्थापक +tree.current=वर्तमान मूल्य +tree.hashed=टुकड़ों में बांटा(Hashed) +tree.increment=वृद्धि +tree.indexes=इंडेक्स +tree.nonUnique=गैर अद्वितीय +tree.sequences=दृश्यों +tree.unique=अद्वितीय +tree.users=उपयोगकर्ता diff --git a/h2/src/main/org/h2/server/web/res/_text_hu.prop b/h2/src/main/org/h2/server/web/res/_text_hu.prop index 56aeddfbcc..1406ed0e2b 100644 --- a/h2/src/main/org/h2/server/web/res/_text_hu.prop +++ b/h2/src/main/org/h2/server/web/res/_text_hu.prop @@ -25,6 +25,7 @@ adminLoginCancel=Mégse adminLoginOk=OK adminLogout=Kilépés adminOthers=Más számítógépekről kezdeményezett kapcsolatok engedélyezése +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=#Port number adminPortWeb=Webkiszolgáló portszáma adminRestart=A változtatások a kiszolgáló újraindítása után lépnek érvénybe diff --git a/h2/src/main/org/h2/server/web/res/_text_in.prop b/h2/src/main/org/h2/server/web/res/_text_in.prop index 8a569cb42e..e954ac7a4d 100644 --- a/h2/src/main/org/h2/server/web/res/_text_in.prop +++ b/h2/src/main/org/h2/server/web/res/_text_in.prop @@ -25,6 +25,7 @@ adminLoginCancel=Batal adminLoginOk=OK adminLogout=Keluar adminOthers=Ijinkan koneksi dari komputer lain +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Nomor port adminPortWeb=Nomor port web server adminRestart=Perubahan akan efektif setelah server di-restart. diff --git a/h2/src/main/org/h2/server/web/res/_text_it.prop b/h2/src/main/org/h2/server/web/res/_text_it.prop index ac32ed9406..73fa39f5e5 100644 --- a/h2/src/main/org/h2/server/web/res/_text_it.prop +++ b/h2/src/main/org/h2/server/web/res/_text_it.prop @@ -25,6 +25,7 @@ adminLoginCancel=Annulla adminLoginOk=OK adminLogout=Disconnessione adminOthers=Abilita connessioni da altri computers +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Numero di porta adminPortWeb=Numero di porta del server Web adminRestart=Le modifiche saranno effettive dopo il riavvio del server. diff --git a/h2/src/main/org/h2/server/web/res/_text_ja.prop b/h2/src/main/org/h2/server/web/res/_text_ja.prop index e16f17ae4b..f998bfda46 100644 --- a/h2/src/main/org/h2/server/web/res/_text_ja.prop +++ b/h2/src/main/org/h2/server/web/res/_text_ja.prop @@ -25,6 +25,7 @@ adminLoginCancel=キャンセル adminLoginOk=OK adminLogout=ログアウト adminOthers=他のコンピュータからの接続を許可 +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=ポート番号 adminPortWeb=Webサーバポート番号 adminRestart=変更はサーバの再起動後に有効になります。 diff --git a/h2/src/main/org/h2/server/web/res/_text_ko.prop b/h2/src/main/org/h2/server/web/res/_text_ko.prop index 780072c65d..cfa58eb3bf 100644 --- a/h2/src/main/org/h2/server/web/res/_text_ko.prop +++ b/h2/src/main/org/h2/server/web/res/_text_ko.prop @@ -25,6 +25,7 @@ adminLoginCancel=취소 adminLoginOk=확인 adminLogout=로그아웃 adminOthers=다른 컴퓨터에서의 연결 허가 +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=포트 번호 adminPortWeb=웹 서버 포트 번호 adminRestart=변경 사항은 서버 재시작 후 반영됩니다. diff --git a/h2/src/main/org/h2/server/web/res/_text_nl.prop b/h2/src/main/org/h2/server/web/res/_text_nl.prop index ccea089d33..5c04618251 100644 --- a/h2/src/main/org/h2/server/web/res/_text_nl.prop +++ b/h2/src/main/org/h2/server/web/res/_text_nl.prop @@ -25,6 +25,7 @@ adminLoginCancel=Annuleren adminLoginOk=OK adminLogout=Uitloggen adminOthers=Sta verbindingen vanaf andere computers toe +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Poortnummer adminPortWeb=Webserver poortnummer adminRestart=Wijzigingen worden doorgevoerd na herstarten server diff --git a/h2/src/main/org/h2/server/web/res/_text_pl.prop b/h2/src/main/org/h2/server/web/res/_text_pl.prop index 0c10899fef..b13069bc0c 100644 --- a/h2/src/main/org/h2/server/web/res/_text_pl.prop +++ b/h2/src/main/org/h2/server/web/res/_text_pl.prop @@ -25,6 +25,7 @@ adminLoginCancel=Anuluj adminLoginOk=OK adminLogout=Wyloguj adminOthers=Pozwalaj na połączenia zdalne +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Numer portu adminPortWeb=Numer portu serwera Web adminRestart=Zmiany będą widoczne po zrestartowaniu serwera. diff --git a/h2/src/main/org/h2/server/web/res/_text_pt_br.prop b/h2/src/main/org/h2/server/web/res/_text_pt_br.prop index ed98fc282f..56516c98c8 100644 --- a/h2/src/main/org/h2/server/web/res/_text_pt_br.prop +++ b/h2/src/main/org/h2/server/web/res/_text_pt_br.prop @@ -25,6 +25,7 @@ adminLoginCancel=Cancelar adminLoginOk=Confirmar adminLogout=Sair adminOthers=Permitir conexões de outros computadores na rede +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Número da porta adminPortWeb=Número da porta do servidor adminRestart=As alterações serão aplicadas depois de reiniciar o servidor. @@ -92,7 +93,7 @@ resultEdit.delete=Apagar resultEdit.edit=Alterar resultEdit.editResult=Alterar resultEdit.save=Salvar -toolbar.all=Todas +toolbar.all=Todos toolbar.autoCommit=Auto commit toolbar.autoComplete=Auto complete toolbar.autoComplete.full=Total @@ -110,10 +111,10 @@ toolbar.maxRows=Número máximo de linhas toolbar.refresh=Atualizar toolbar.rollback=Rollback toolbar.run=Executar comando -toolbar.runSelected=#Run Selected +toolbar.runSelected=Executar selecionado toolbar.sqlStatement=Comando SQL tools.backup=#Backup -tools.backup.help=#Creates a backup of a database. +tools.backup.help=Cria um backup de um banco de dados. tools.changeFileEncryption=#ChangeFileEncryption tools.changeFileEncryption.help=#Allows changing the database file encryption password and algorithm. tools.cipher=#Cipher (AES or XTEA) @@ -132,7 +133,7 @@ tools.javaDirectoryClassName=#Java directory and class name tools.recover=#Recover tools.recover.help=#Helps recovering a corrupted database. tools.restore=#Restore -tools.restore.help=#Restores a database backup. +tools.restore.help=Restaura um backup de banco de dados. tools.result=#Result tools.run=Executar comando tools.runScript=#RunScript @@ -149,8 +150,8 @@ tools.sourceScriptFileName=#Source script file name tools.targetDatabaseName=#Target database name tools.targetDatabaseURL=#Target database URL tools.targetDirectory=#Target directory -tools.targetFileName=#Target file name -tools.targetScriptFileName=#Target script file name +tools.targetFileName=Nome do arquivo de destino +tools.targetScriptFileName=Nome do arquivo de script de destino tools.traceFileName=#Trace file name tree.admin=Administrador tree.current=Valor corrente diff --git a/h2/src/main/org/h2/server/web/res/_text_pt_pt.prop b/h2/src/main/org/h2/server/web/res/_text_pt_pt.prop index 205084a6ac..3323f3b3a1 100644 --- a/h2/src/main/org/h2/server/web/res/_text_pt_pt.prop +++ b/h2/src/main/org/h2/server/web/res/_text_pt_pt.prop @@ -25,6 +25,7 @@ adminLoginCancel=Cancelar adminLoginOk=Confirmar adminLogout=Sair adminOthers=Permitir conexões a partir de outro computador na rede +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Número do porto adminPortWeb=Número do porto do servidor adminRestart=As alterações apenas serão aplicadas após reiniciar o servidor. diff --git a/h2/src/main/org/h2/server/web/res/_text_ru.prop b/h2/src/main/org/h2/server/web/res/_text_ru.prop index 8b1a32cbe0..4f23c8aa0d 100644 --- a/h2/src/main/org/h2/server/web/res/_text_ru.prop +++ b/h2/src/main/org/h2/server/web/res/_text_ru.prop @@ -1,21 +1,21 @@ .translator=Vlad Alexahin a.help=Помощь a.language=Русский -a.lynxNotSupported=Извините, Lynx пока что не поддерживается +a.lynxNotSupported=Извините, Lynx пока не поддерживается a.password=Пароль -a.remoteConnectionsDisabled=Извините, удаленные подключения ('webAllowOthers') запрещены на этом сервере. +a.remoteConnectionsDisabled=Извините, удалённые подключения ('webAllowOthers') запрещены на этом сервере. a.title=H2 Console a.tools=Инструменты a.user=Имя пользователя admin.executing=Выполняется admin.ip=IP -admin.lastAccess=Последний Вход -admin.lastQuery=Последний Запрос +admin.lastAccess=Последний доступ +admin.lastQuery=Последний запрос admin.no=нет admin.notConnected=нет соединения admin.url=URL admin.yes=да -adminAllow=Разрешенные клиенты +adminAllow=Разрешённые клиенты adminConnection=Безопасность подключения adminHttp=Используйте незашифрованные HTTP-соединения adminHttps=Используйте SSL (HTTPS) соединения @@ -25,6 +25,7 @@ adminLoginCancel=Отменить adminLoginOk=OK adminLogout=Выход adminOthers=Разрешить удаленные подключения +adminWebExternalNames=Внешние имена или адреса этого сервера (через запятую) adminPort=Номер порта adminPortWeb=Порт web-сервера adminRestart=Изменения вступят в силу после перезагрузки сервера. @@ -81,7 +82,7 @@ result.bytes=байт result.characters=символов result.maxrowsSet=Установлено максимальное количество строк result.noRows=нет строк -result.noRunningStatement=Сейчас нету выполняемых запросов +result.noRunningStatement=Сейчас нет выполняемых запросов result.rows=строки result.statementWasCanceled=Запрос был отменен result.updateCount=Обновить количество @@ -103,12 +104,12 @@ toolbar.autoSelect.off=Выключено toolbar.autoSelect.on=Включено toolbar.cancelStatement=Отменить текущий запрос toolbar.clear=Очистить -toolbar.commit=Выполнить +toolbar.commit=Зафиксировать транзакцию toolbar.disconnect=Отсоединиться toolbar.history=История команд toolbar.maxRows=Максимальное количество строк toolbar.refresh=Обновить -toolbar.rollback=Вернуть назад +toolbar.rollback=Откатить транзакцию toolbar.run=Выполнить toolbar.runSelected=Выполнить выделенное toolbar.sqlStatement=SQL-запрос @@ -155,9 +156,9 @@ tools.traceFileName=Имя trace-файла tree.admin=Администратор tree.current=Текущее значение tree.hashed=Hashed -tree.increment=Увеличить +tree.increment=Приращение tree.indexes=Индексы tree.nonUnique=Неуникальное -tree.sequences=Последовательность +tree.sequences=Последовательности tree.unique=Уникальное tree.users=Пользователи diff --git a/h2/src/main/org/h2/server/web/res/_text_sk.prop b/h2/src/main/org/h2/server/web/res/_text_sk.prop index 2d9c227666..a4f11dba77 100644 --- a/h2/src/main/org/h2/server/web/res/_text_sk.prop +++ b/h2/src/main/org/h2/server/web/res/_text_sk.prop @@ -25,6 +25,7 @@ adminLoginCancel=Zrušiť adminLoginOk=OK adminLogout=Odhlásiť adminOthers=Povoliť pripojenia z iných počítačov +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Číslo portu adminPortWeb=Číslo portu Web servera adminRestart=Zmeny sa vykonajú po reštarte servera diff --git a/h2/src/main/org/h2/server/web/res/_text_tr.prop b/h2/src/main/org/h2/server/web/res/_text_tr.prop index deac77695c..80aed9ffbc 100644 --- a/h2/src/main/org/h2/server/web/res/_text_tr.prop +++ b/h2/src/main/org/h2/server/web/res/_text_tr.prop @@ -25,6 +25,7 @@ adminLoginCancel=İptal et adminLoginOk=Tamam adminLogout=Bitir adminOthers=Başka bilgisayarlardan, veri tabanına bağlanma izni ver +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Port adminPortWeb=Web-Server Port adminRestart=Değişiklikler veri tabanı hizmetçisinin yeniden başlatılmasıyla etkinlik kazanacak. diff --git a/h2/src/main/org/h2/server/web/res/_text_uk.prop b/h2/src/main/org/h2/server/web/res/_text_uk.prop index 8b32ea913a..3c71e5d54c 100644 --- a/h2/src/main/org/h2/server/web/res/_text_uk.prop +++ b/h2/src/main/org/h2/server/web/res/_text_uk.prop @@ -25,6 +25,7 @@ adminLoginCancel=Відмінити adminLoginOk=OK adminLogout=Завершення сеансу adminOthers=Дозволити під'єднання з інших копм'ютерів +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Номер порта adminPortWeb=Номер порта веб сервера adminRestart=Зміни вступлять в силу після перезавантаження сервера. diff --git a/h2/src/main/org/h2/server/web/res/_text_zh_cn.prop b/h2/src/main/org/h2/server/web/res/_text_zh_cn.prop index aac9fffdf9..5dabdcd54d 100644 --- a/h2/src/main/org/h2/server/web/res/_text_zh_cn.prop +++ b/h2/src/main/org/h2/server/web/res/_text_zh_cn.prop @@ -25,6 +25,7 @@ adminLoginCancel=取消 adminLoginOk=确认 adminLogout=注销 adminOthers=允许来自其他远程计算机的连接 +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=端口号 adminPortWeb=Web 服务器端口号 adminRestart=更新配置将在重启服务器后生效. diff --git a/h2/src/main/org/h2/server/web/res/_text_zh_tw.prop b/h2/src/main/org/h2/server/web/res/_text_zh_tw.prop index cd3f35eb38..6e726c8271 100644 --- a/h2/src/main/org/h2/server/web/res/_text_zh_tw.prop +++ b/h2/src/main/org/h2/server/web/res/_text_zh_tw.prop @@ -25,6 +25,7 @@ adminLoginCancel=取消 adminLoginOk=確定 adminLogout=登出 adminOthers=允許來自其他電腦的連接 +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=通訊埠 adminPortWeb=Web 伺服器的通訊埠 adminRestart=伺服器重新啟動後修改才會生效. diff --git a/h2/src/main/org/h2/server/web/res/admin.jsp b/h2/src/main/org/h2/server/web/res/admin.jsp index 97cf11151e..f9b3ae2337 100644 --- a/h2/src/main/org/h2/server/web/res/admin.jsp +++ b/h2/src/main/org/h2/server/web/res/admin.jsp @@ -1,7 +1,7 @@ @@ -39,6 +39,10 @@ Initial Developer: H2 Group ${text.adminOthers}

          +

          + ${text.adminWebExternalNames}:
          + +

          ${text.adminConnection}

          diff --git a/h2/src/main/org/h2/server/web/res/adminLogin.jsp b/h2/src/main/org/h2/server/web/res/adminLogin.jsp index 01f1f9ecef..4f13e87478 100644 --- a/h2/src/main/org/h2/server/web/res/adminLogin.jsp +++ b/h2/src/main/org/h2/server/web/res/adminLogin.jsp @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/web/res/error.jsp b/h2/src/main/org/h2/server/web/res/error.jsp index d7cda64e4d..f0f26fe6b5 100644 --- a/h2/src/main/org/h2/server/web/res/error.jsp +++ b/h2/src/main/org/h2/server/web/res/error.jsp @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/web/res/frame.jsp b/h2/src/main/org/h2/server/web/res/frame.jsp index 3bf168464a..224b6a3f60 100644 --- a/h2/src/main/org/h2/server/web/res/frame.jsp +++ b/h2/src/main/org/h2/server/web/res/frame.jsp @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/web/res/header.jsp b/h2/src/main/org/h2/server/web/res/header.jsp index af39b78be6..5edb39866b 100644 --- a/h2/src/main/org/h2/server/web/res/header.jsp +++ b/h2/src/main/org/h2/server/web/res/header.jsp @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/web/res/help.jsp b/h2/src/main/org/h2/server/web/res/help.jsp index 720910fa35..c5d9421bc7 100644 --- a/h2/src/main/org/h2/server/web/res/help.jsp +++ b/h2/src/main/org/h2/server/web/res/help.jsp @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/web/res/helpTranslate.jsp b/h2/src/main/org/h2/server/web/res/helpTranslate.jsp index 631f62516b..2df2f6b0af 100644 --- a/h2/src/main/org/h2/server/web/res/helpTranslate.jsp +++ b/h2/src/main/org/h2/server/web/res/helpTranslate.jsp @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/web/res/index.jsp b/h2/src/main/org/h2/server/web/res/index.jsp index b674806d1e..d4577b3cd6 100644 --- a/h2/src/main/org/h2/server/web/res/index.jsp +++ b/h2/src/main/org/h2/server/web/res/index.jsp @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/web/res/login.jsp b/h2/src/main/org/h2/server/web/res/login.jsp index a8adb9e38f..ab9483f83b 100644 --- a/h2/src/main/org/h2/server/web/res/login.jsp +++ b/h2/src/main/org/h2/server/web/res/login.jsp @@ -1,7 +1,7 @@ @@ -97,12 +97,11 @@ Initial Developer: H2 Group Absolute locations like jdbc:h2:/data/db/test are supported. In embedded mode, the database runs in the same process as the application. Only one process may access a database at any time. - Databases are automatically created if they don't exist. - Warning: if no path is used (for example jdbc:h2:test), - then the database is stored in the current working directory - (the directory where the application was started). - URLs of the form jdbc:h2:data/test are relative to - the current working directory. It is recommended to use locations relative to ~ + Databases are automatically created if they don't exist +
          if you have a permission. + URLs of the form jdbc:h2:./data/test are relative to + the current working directory (the directory where the application was started). + It is recommended to use locations relative to ~ or absolute locations.

          @@ -124,7 +123,7 @@ Initial Developer: H2 Group

          - For more information, see Database URL Overview. + For more information, see Database URL Overview.

          ${error}

          diff --git a/h2/src/main/org/h2/server/web/res/notAllowed.jsp b/h2/src/main/org/h2/server/web/res/notAllowed.jsp index c8bab9a5ee..bb4b34fb33 100644 --- a/h2/src/main/org/h2/server/web/res/notAllowed.jsp +++ b/h2/src/main/org/h2/server/web/res/notAllowed.jsp @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/web/res/query.jsp b/h2/src/main/org/h2/server/web/res/query.jsp index 3918130595..a177c03448 100644 --- a/h2/src/main/org/h2/server/web/res/query.jsp +++ b/h2/src/main/org/h2/server/web/res/query.jsp @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/web/res/result.jsp b/h2/src/main/org/h2/server/web/res/result.jsp index 84b8755ae4..72a4ace4b3 100644 --- a/h2/src/main/org/h2/server/web/res/result.jsp +++ b/h2/src/main/org/h2/server/web/res/result.jsp @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/web/res/stylesheet.css b/h2/src/main/org/h2/server/web/res/stylesheet.css index a3647665f5..8d217a060e 100644 --- a/h2/src/main/org/h2/server/web/res/stylesheet.css +++ b/h2/src/main/org/h2/server/web/res/stylesheet.css @@ -1,7 +1,7 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * * Initial Developer: H2 Group + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group */ td, input, select, textarea, body, code, pre { @@ -94,6 +94,10 @@ ul { margin: 10px; } +table.resultSet { + white-space: pre; +} + .toolbar { background-color: #ece9d8; } diff --git a/h2/src/main/org/h2/server/web/res/table.js b/h2/src/main/org/h2/server/web/res/table.js index 8031d74faa..841b3dad9b 100644 --- a/h2/src/main/org/h2/server/web/res/table.js +++ b/h2/src/main/org/h2/server/web/res/table.js @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * * Initial Developer: H2 Group */ @@ -142,7 +142,7 @@ function editKeyDown(row, object, event) { function getInnerText(el) { if (typeof el == "string") return el; - if (typeof el == "undefined") { return el }; + if (typeof el == "undefined") return el; if (el.innerText) { // not needed but it is faster return el.innerText; @@ -175,7 +175,6 @@ function resortTable(link) { span = link.childNodes[ci]; } } - var spantext = getInnerText(span); var td = link.parentNode; var column = td.cellIndex; var table = getParent(td,'TABLE'); diff --git a/h2/src/main/org/h2/server/web/res/tables.jsp b/h2/src/main/org/h2/server/web/res/tables.jsp index 4e17ee98f6..229c0219d5 100644 --- a/h2/src/main/org/h2/server/web/res/tables.jsp +++ b/h2/src/main/org/h2/server/web/res/tables.jsp @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/web/res/tools.jsp b/h2/src/main/org/h2/server/web/res/tools.jsp index 856ae17950..110378c8d2 100644 --- a/h2/src/main/org/h2/server/web/res/tools.jsp +++ b/h2/src/main/org/h2/server/web/res/tools.jsp @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/web/res/tree.js b/h2/src/main/org/h2/server/web/res/tree.js index 7e7c4f3670..e4de5f3928 100644 --- a/h2/src/main/org/h2/server/web/res/tree.js +++ b/h2/src/main/org/h2/server/web/res/tree.js @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * * Initial Developer: H2 Group */ diff --git a/h2/src/main/org/h2/store/CountingReaderInputStream.java b/h2/src/main/org/h2/store/CountingReaderInputStream.java index b059d9c5b9..23f4e66389 100644 --- a/h2/src/main/org/h2/store/CountingReaderInputStream.java +++ b/h2/src/main/org/h2/store/CountingReaderInputStream.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store; @@ -35,7 +35,7 @@ public class CountingReaderInputStream extends InputStream { private long length; private long remaining; - CountingReaderInputStream(Reader reader, long maxLength) { + public CountingReaderInputStream(Reader reader, long maxLength) { this.reader = reader; this.remaining = maxLength; } diff --git a/h2/src/main/org/h2/store/Data.java b/h2/src/main/org/h2/store/Data.java index 7388c15dd4..76136b935e 100644 --- a/h2/src/main/org/h2/store/Data.java +++ b/h2/src/main/org/h2/store/Data.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group * * The variable size number format code is a port from SQLite, @@ -11,51 +11,11 @@ import java.io.IOException; import java.io.OutputStream; import java.io.Reader; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.sql.Timestamp; -import java.util.Arrays; -import org.h2.api.ErrorCode; -import org.h2.api.IntervalQualifier; import org.h2.engine.Constants; -import org.h2.message.DbException; -import org.h2.result.ResultInterface; -import org.h2.result.SimpleResult; import org.h2.util.Bits; -import org.h2.util.DateTimeUtils; -import org.h2.util.JdbcUtils; import org.h2.util.MathUtils; import org.h2.util.Utils; -import org.h2.value.TypeInfo; -import org.h2.value.Value; -import org.h2.value.ValueArray; -import org.h2.value.ValueBoolean; -import org.h2.value.ValueByte; -import org.h2.value.ValueBytes; -import org.h2.value.ValueCollectionBase; -import org.h2.value.ValueDate; -import org.h2.value.ValueDecimal; -import org.h2.value.ValueDouble; -import org.h2.value.ValueFloat; -import org.h2.value.ValueGeometry; -import org.h2.value.ValueInt; -import org.h2.value.ValueInterval; -import org.h2.value.ValueJavaObject; -import org.h2.value.ValueLob; -import org.h2.value.ValueLobDb; -import org.h2.value.ValueLong; -import org.h2.value.ValueNull; -import org.h2.value.ValueResultSet; -import org.h2.value.ValueRow; -import org.h2.value.ValueShort; -import org.h2.value.ValueString; -import org.h2.value.ValueStringFixed; -import org.h2.value.ValueStringIgnoreCase; -import org.h2.value.ValueTime; -import org.h2.value.ValueTimestamp; -import org.h2.value.ValueTimestampTimeZone; -import org.h2.value.ValueUuid; /** * This class represents a byte buffer that contains persistent data of a page. @@ -66,62 +26,6 @@ */ public class Data { - /** - * The length of an integer value. - */ - public static final int LENGTH_INT = 4; - - /** - * The length of a long value. - */ - public static final int LENGTH_LONG = 8; - - private static final byte NULL = 0; - private static final byte BYTE = 2; - private static final byte SHORT = 3; - private static final byte INT = 4; - private static final byte LONG = 5; - private static final byte DECIMAL = 6; - private static final byte DOUBLE = 7; - private static final byte FLOAT = 8; - private static final byte TIME = 9; - private static final byte DATE = 10; - private static final byte TIMESTAMP = 11; - private static final byte BYTES = 12; - private static final byte STRING = 13; - private static final byte STRING_IGNORECASE = 14; - private static final byte BLOB = 15; - private static final byte CLOB = 16; - private static final byte ARRAY = 17; - private static final byte RESULT_SET = 18; - private static final byte JAVA_OBJECT = 19; - private static final byte UUID = 20; - private static final byte STRING_FIXED = 21; - private static final byte GEOMETRY = 22; - private static final byte TIMESTAMP_TZ = 24; - private static final byte ENUM = 25; - private static final byte INTERVAL = 26; - private static final byte ROW = 27; - private static final byte INT_0_15 = 32; - private static final byte LONG_0_7 = 48; - private static final byte DECIMAL_0_1 = 56; - private static final byte DECIMAL_SMALL_0 = 58; - private static final byte DECIMAL_SMALL = 59; - private static final byte DOUBLE_0_1 = 60; - private static final byte FLOAT_0_1 = 62; - private static final byte BOOLEAN_FALSE = 64; - private static final byte BOOLEAN_TRUE = 65; - private static final byte INT_NEG = 66; - private static final byte LONG_NEG = 67; - private static final byte STRING_0_31 = 68; - private static final int BYTES_0_31 = 100; - private static final int LOCAL_TIME = 132; - private static final int LOCAL_DATE = 133; - private static final int LOCAL_TIMESTAMP = 134; - private static final int CUSTOM_DATA_TYPE = 135; - - private static final long MILLIS_PER_MINUTE = 1000 * 60; - /** * The data itself. */ @@ -132,28 +36,8 @@ public class Data { */ private int pos; - /** - * The data handler responsible for lob objects. - */ - private final DataHandler handler; - - private final boolean storeLocalTime; - - private Data(DataHandler handler, byte[] data, boolean storeLocalTime) { - this.handler = handler; + private Data(byte[] data) { this.data = data; - this.storeLocalTime = storeLocalTime; - } - - /** - * Update an integer at the given position. - * The current position is not change. - * - * @param pos the position - * @param x the value - */ - public void setInt(int pos, int x) { - Bits.writeInt(data, pos, x); } /** @@ -179,123 +63,6 @@ public int readInt() { return x; } - /** - * Get the length of a String. This includes the bytes required to encode - * the length. - * - * @param s the string - * @return the number of bytes required - */ - public static int getStringLen(String s) { - int len = s.length(); - return getStringWithoutLengthLen(s, len) + getVarIntLen(len); - } - - /** - * Calculate the length of String, excluding the bytes required to encode - * the length. - *

          - * For performance reasons the internal representation of a String is - * similar to UTF-8, but not exactly UTF-8. - * - * @param s the string - * @param len the length of the string - * @return the number of bytes required - */ - private static int getStringWithoutLengthLen(String s, int len) { - int plus = 0; - for (int i = 0; i < len; i++) { - char c = s.charAt(i); - if (c >= 0x800) { - plus += 2; - } else if (c >= 0x80) { - plus++; - } - } - return len + plus; - } - - /** - * Read a String value. - * The current position is incremented. - * - * @return the value - */ - public String readString() { - int len = readVarInt(); - return readString(len); - } - - /** - * Read a String from the byte array. - *

          - * For performance reasons the internal representation of a String is - * similar to UTF-8, but not exactly UTF-8. - * - * @param len the length of the resulting string - * @return the String - */ - private String readString(int len) { - byte[] buff = data; - int p = pos; - char[] chars = new char[len]; - for (int i = 0; i < len; i++) { - int x = buff[p++] & 0xff; - if (x < 0x80) { - chars[i] = (char) x; - } else if (x >= 0xe0) { - chars[i] = (char) (((x & 0xf) << 12) + - ((buff[p++] & 0x3f) << 6) + - (buff[p++] & 0x3f)); - } else { - chars[i] = (char) (((x & 0x1f) << 6) + - (buff[p++] & 0x3f)); - } - } - pos = p; - return new String(chars); - } - - /** - * Write a String. - * The current position is incremented. - * - * @param s the value - */ - public void writeString(String s) { - int len = s.length(); - writeVarInt(len); - writeStringWithoutLength(s, len); - } - - /** - * Write a String. - *

          - * For performance reasons the internal representation of a String is - * similar to UTF-8, but not exactly UTF-8. - * - * @param s the string - * @param len the number of characters to write - */ - private void writeStringWithoutLength(String s, int len) { - int p = pos; - byte[] buff = data; - for (int i = 0; i < len; i++) { - int c = s.charAt(i); - if (c < 0x80) { - buff[p++] = (byte) c; - } else if (c >= 0x800) { - buff[p++] = (byte) (0xe0 | (c >> 12)); - buff[p++] = (byte) (((c >> 6) & 0x3f)); - buff[p++] = (byte) (c & 0x3f); - } else { - buff[p++] = (byte) (0xc0 | (c >> 6)); - buff[p++] = (byte) (c & 0x3f); - } - } - pos = p; - } - private void writeStringWithoutLength(char[] chars, int len) { int p = pos; byte[] buff = data; @@ -316,33 +83,13 @@ private void writeStringWithoutLength(char[] chars, int len) { } /** - * Create a new buffer for the given handler. The - * handler will decide what type of buffer is created. + * Create a new buffer. * - * @param handler the data handler * @param capacity the initial capacity of the buffer - * @param storeLocalTime - * store DATE, TIME, and TIMESTAMP values with local time storage - * format * @return the buffer */ - public static Data create(DataHandler handler, int capacity, boolean storeLocalTime) { - return new Data(handler, new byte[capacity], storeLocalTime); - } - - /** - * Create a new buffer using the given data for the given handler. The - * handler will decide what type of buffer is created. - * - * @param handler the data handler - * @param buff the data - * @param storeLocalTime - * store DATE, TIME, and TIMESTAMP values with local time storage - * format - * @return the buffer - */ - public static Data create(DataHandler handler, byte[] buff, boolean storeLocalTime) { - return new Data(handler, buff, storeLocalTime); + public static Data create(int capacity) { + return new Data(new byte[capacity]); } /** @@ -396,845 +143,6 @@ public void read(byte[] buff, int off, int len) { pos += len; } - /** - * Append one single byte. - * - * @param x the value - */ - public void writeByte(byte x) { - data[pos++] = x; - } - - /** - * Read one single byte. - * - * @return the value - */ - public byte readByte() { - return data[pos++]; - } - - /** - * Read a long value. This method reads two int values and combines them. - * - * @return the long value - */ - public long readLong() { - long x = Bits.readLong(data, pos); - pos += 8; - return x; - } - - /** - * Append a long value. This method writes two int values. - * - * @param x the value - */ - public void writeLong(long x) { - Bits.writeLong(data, pos, x); - pos += 8; - } - - /** - * Append a value. - * - * @param v the value - */ - public void writeValue(Value v) { - int start = pos; - if (v == ValueNull.INSTANCE) { - data[pos++] = NULL; - return; - } - int type = v.getValueType(); - switch (type) { - case Value.BOOLEAN: - writeByte(v.getBoolean() ? BOOLEAN_TRUE : BOOLEAN_FALSE); - break; - case Value.BYTE: - writeByte(BYTE); - writeByte(v.getByte()); - break; - case Value.SHORT: - writeByte(SHORT); - writeShortInt(v.getShort()); - break; - case Value.ENUM: - case Value.INT: { - int x = v.getInt(); - if (x < 0) { - writeByte(INT_NEG); - writeVarInt(-x); - } else if (x < 16) { - writeByte((byte) (INT_0_15 + x)); - } else { - writeByte(type == Value.INT ? INT : ENUM); - writeVarInt(x); - } - break; - } - case Value.LONG: { - long x = v.getLong(); - if (x < 0) { - writeByte(LONG_NEG); - writeVarLong(-x); - } else if (x < 8) { - writeByte((byte) (LONG_0_7 + x)); - } else { - writeByte(LONG); - writeVarLong(x); - } - break; - } - case Value.DECIMAL: { - BigDecimal x = v.getBigDecimal(); - if (BigDecimal.ZERO.equals(x)) { - writeByte(DECIMAL_0_1); - } else if (BigDecimal.ONE.equals(x)) { - writeByte((byte) (DECIMAL_0_1 + 1)); - } else { - int scale = x.scale(); - BigInteger b = x.unscaledValue(); - int bits = b.bitLength(); - if (bits <= 63) { - if (scale == 0) { - writeByte(DECIMAL_SMALL_0); - writeVarLong(b.longValue()); - } else { - writeByte(DECIMAL_SMALL); - writeVarInt(scale); - writeVarLong(b.longValue()); - } - } else { - writeByte(DECIMAL); - writeVarInt(scale); - byte[] bytes = b.toByteArray(); - writeVarInt(bytes.length); - write(bytes, 0, bytes.length); - } - } - break; - } - case Value.TIME: - if (storeLocalTime) { - writeByte((byte) LOCAL_TIME); - ValueTime t = (ValueTime) v; - long nanos = t.getNanos(); - long millis = nanos / 1_000_000; - nanos -= millis * 1_000_000; - writeVarLong(millis); - writeVarLong(nanos); - } else { - writeByte(TIME); - writeVarLong(DateTimeUtils.getTimeLocalWithoutDst(v.getTime())); - } - break; - case Value.DATE: { - if (storeLocalTime) { - writeByte((byte) LOCAL_DATE); - long x = ((ValueDate) v).getDateValue(); - writeVarLong(x); - } else { - writeByte(DATE); - long x = DateTimeUtils.getTimeLocalWithoutDst(v.getDate()); - writeVarLong(x / MILLIS_PER_MINUTE); - } - break; - } - case Value.TIMESTAMP: { - if (storeLocalTime) { - writeByte((byte) LOCAL_TIMESTAMP); - ValueTimestamp ts = (ValueTimestamp) v; - long dateValue = ts.getDateValue(); - writeVarLong(dateValue); - long nanos = ts.getTimeNanos(); - long millis = nanos / 1_000_000; - nanos -= millis * 1_000_000; - writeVarLong(millis); - writeVarLong(nanos); - } else { - Timestamp ts = v.getTimestamp(); - writeByte(TIMESTAMP); - writeVarLong(DateTimeUtils.getTimeLocalWithoutDst(ts)); - writeVarInt(ts.getNanos() % 1_000_000); - } - break; - } - case Value.TIMESTAMP_TZ: { - ValueTimestampTimeZone ts = (ValueTimestampTimeZone) v; - writeByte(TIMESTAMP_TZ); - writeVarLong(ts.getDateValue()); - writeVarLong(ts.getTimeNanos()); - writeVarInt(ts.getTimeZoneOffsetMins()); - break; - } - case Value.GEOMETRY: - // fall though - case Value.JAVA_OBJECT: { - writeByte(type == Value.GEOMETRY ? GEOMETRY : JAVA_OBJECT); - byte[] b = v.getBytesNoCopy(); - int len = b.length; - writeVarInt(len); - write(b, 0, len); - break; - } - case Value.BYTES: { - byte[] b = v.getBytesNoCopy(); - int len = b.length; - if (len < 32) { - writeByte((byte) (BYTES_0_31 + len)); - write(b, 0, len); - } else { - writeByte(BYTES); - writeVarInt(len); - write(b, 0, len); - } - break; - } - case Value.UUID: { - writeByte(UUID); - ValueUuid uuid = (ValueUuid) v; - writeLong(uuid.getHigh()); - writeLong(uuid.getLow()); - break; - } - case Value.STRING: { - String s = v.getString(); - int len = s.length(); - if (len < 32) { - writeByte((byte) (STRING_0_31 + len)); - writeStringWithoutLength(s, len); - } else { - writeByte(STRING); - writeString(s); - } - break; - } - case Value.STRING_IGNORECASE: - writeByte(STRING_IGNORECASE); - writeString(v.getString()); - break; - case Value.STRING_FIXED: - writeByte(STRING_FIXED); - writeString(v.getString()); - break; - case Value.DOUBLE: { - double x = v.getDouble(); - if (x == 1.0d) { - writeByte((byte) (DOUBLE_0_1 + 1)); - } else { - long d = Double.doubleToLongBits(x); - if (d == ValueDouble.ZERO_BITS) { - writeByte(DOUBLE_0_1); - } else { - writeByte(DOUBLE); - writeVarLong(Long.reverse(d)); - } - } - break; - } - case Value.FLOAT: { - float x = v.getFloat(); - if (x == 1.0f) { - writeByte((byte) (FLOAT_0_1 + 1)); - } else { - int f = Float.floatToIntBits(x); - if (f == ValueFloat.ZERO_BITS) { - writeByte(FLOAT_0_1); - } else { - writeByte(FLOAT); - writeVarInt(Integer.reverse(f)); - } - } - break; - } - case Value.BLOB: - case Value.CLOB: { - writeByte(type == Value.BLOB ? BLOB : CLOB); - if (v instanceof ValueLob) { - ValueLob lob = (ValueLob) v; - byte[] small = lob.getSmall(); - if (small == null) { - int t = -1; - if (!lob.isLinkedToTable()) { - t = -2; - } - writeVarInt(t); - writeVarInt(lob.getTableId()); - writeVarInt(lob.getObjectId()); - writeVarLong(lob.getType().getPrecision()); - writeByte((byte) (lob.isCompressed() ? 1 : 0)); - if (t == -2) { - writeString(lob.getFileName()); - } - } else { - writeVarInt(small.length); - write(small, 0, small.length); - } - } else { - ValueLobDb lob = (ValueLobDb) v; - byte[] small = lob.getSmall(); - if (small == null) { - writeVarInt(-3); - writeVarInt(lob.getTableId()); - writeVarLong(lob.getLobId()); - writeVarLong(lob.getType().getPrecision()); - } else { - writeVarInt(small.length); - write(small, 0, small.length); - } - } - break; - } - case Value.ARRAY: - case Value.ROW: { - writeByte(type == Value.ARRAY ? ARRAY : ROW); - Value[] list = ((ValueCollectionBase) v).getList(); - writeVarInt(list.length); - for (Value x : list) { - writeValue(x); - } - break; - } - case Value.RESULT_SET: { - writeByte(RESULT_SET); - ResultInterface result = ((ValueResultSet) v).getResult(); - result.reset(); - int columnCount = result.getVisibleColumnCount(); - writeVarInt(columnCount); - for (int i = 0; i < columnCount; i++) { - writeString(result.getAlias(i)); - writeString(result.getColumnName(i)); - TypeInfo columnType = result.getColumnType(i); - writeVarInt(columnType.getValueType()); - writeVarLong(columnType.getPrecision()); - writeVarInt(columnType.getScale()); - } - while (result.next()) { - writeByte((byte) 1); - Value[] row = result.currentRow(); - for (int i = 0; i < columnCount; i++) { - writeValue(row[i]); - } - } - writeByte((byte) 0); - break; - } - case Value.INTERVAL_YEAR: - case Value.INTERVAL_MONTH: - case Value.INTERVAL_DAY: - case Value.INTERVAL_HOUR: - case Value.INTERVAL_MINUTE: { - ValueInterval interval = (ValueInterval) v; - int ordinal = type - Value.INTERVAL_YEAR; - if (interval.isNegative()) { - ordinal = ~ordinal; - } - writeByte(INTERVAL); - writeByte((byte) ordinal); - writeVarLong(interval.getLeading()); - break; - } - case Value.INTERVAL_SECOND: - case Value.INTERVAL_YEAR_TO_MONTH: - case Value.INTERVAL_DAY_TO_HOUR: - case Value.INTERVAL_DAY_TO_MINUTE: - case Value.INTERVAL_DAY_TO_SECOND: - case Value.INTERVAL_HOUR_TO_MINUTE: - case Value.INTERVAL_HOUR_TO_SECOND: - case Value.INTERVAL_MINUTE_TO_SECOND: { - ValueInterval interval = (ValueInterval) v; - int ordinal = type - Value.INTERVAL_YEAR; - if (interval.isNegative()) { - ordinal = ~ordinal; - } - writeByte(INTERVAL); - writeByte((byte) ordinal); - writeVarLong(interval.getLeading()); - writeVarLong(interval.getRemaining()); - break; - } - default: - if (JdbcUtils.customDataTypesHandler != null) { - byte[] b = v.getBytesNoCopy(); - writeByte((byte) CUSTOM_DATA_TYPE); - writeVarInt(type); - writeVarInt(b.length); - write(b, 0, b.length); - break; - } - DbException.throwInternalError("type=" + v.getValueType()); - } - assert pos - start == getValueLen(v) - : "value size error: got " + (pos - start) + " expected " + getValueLen(v); - } - - /** - * Read a value. - * - * @return the value - */ - public Value readValue() { - int type = data[pos++] & 255; - switch (type) { - case NULL: - return ValueNull.INSTANCE; - case BOOLEAN_TRUE: - return ValueBoolean.TRUE; - case BOOLEAN_FALSE: - return ValueBoolean.FALSE; - case INT_NEG: - return ValueInt.get(-readVarInt()); - case ENUM: - case INT: - return ValueInt.get(readVarInt()); - case LONG_NEG: - return ValueLong.get(-readVarLong()); - case Value.LONG: - return ValueLong.get(readVarLong()); - case BYTE: - return ValueByte.get(readByte()); - case SHORT: - return ValueShort.get(readShortInt()); - case DECIMAL_0_1: - return (ValueDecimal) ValueDecimal.ZERO; - case DECIMAL_0_1 + 1: - return (ValueDecimal) ValueDecimal.ONE; - case DECIMAL_SMALL_0: - return ValueDecimal.get(BigDecimal.valueOf(readVarLong())); - case DECIMAL_SMALL: { - int scale = readVarInt(); - return ValueDecimal.get(BigDecimal.valueOf(readVarLong(), scale)); - } - case DECIMAL: { - int scale = readVarInt(); - int len = readVarInt(); - byte[] buff = Utils.newBytes(len); - read(buff, 0, len); - BigInteger b = new BigInteger(buff); - return ValueDecimal.get(new BigDecimal(b, scale)); - } - case LOCAL_DATE: { - return ValueDate.fromDateValue(readVarLong()); - } - case DATE: { - long x = readVarLong() * MILLIS_PER_MINUTE; - return ValueDate.fromMillis(DateTimeUtils.getTimeUTCWithoutDst(x)); - } - case LOCAL_TIME: { - long nanos = readVarLong() * 1_000_000 + readVarLong(); - return ValueTime.fromNanos(nanos); - } - case TIME: - // need to normalize the year, month and day - return ValueTime.fromMillis( - DateTimeUtils.getTimeUTCWithoutDst(readVarLong())); - case LOCAL_TIMESTAMP: { - long dateValue = readVarLong(); - long nanos = readVarLong() * 1_000_000 + readVarLong(); - return ValueTimestamp.fromDateValueAndNanos(dateValue, nanos); - } - case TIMESTAMP: { - return ValueTimestamp.fromMillisNanos( - DateTimeUtils.getTimeUTCWithoutDst(readVarLong()), - readVarInt() % 1_000_000); - } - case TIMESTAMP_TZ: { - long dateValue = readVarLong(); - long nanos = readVarLong(); - short tz = (short) readVarInt(); - return ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, nanos, tz); - } - case BYTES: { - int len = readVarInt(); - byte[] b = Utils.newBytes(len); - read(b, 0, len); - return ValueBytes.getNoCopy(b); - } - case GEOMETRY: { - int len = readVarInt(); - byte[] b = Utils.newBytes(len); - read(b, 0, len); - return ValueGeometry.get(b); - } - case JAVA_OBJECT: { - int len = readVarInt(); - byte[] b = Utils.newBytes(len); - read(b, 0, len); - return ValueJavaObject.getNoCopy(null, b, handler); - } - case UUID: - return ValueUuid.get(readLong(), readLong()); - case STRING: - return ValueString.get(readString()); - case STRING_IGNORECASE: - return ValueStringIgnoreCase.get(readString()); - case STRING_FIXED: - return ValueStringFixed.get(readString()); - case FLOAT_0_1: - return ValueFloat.ZERO; - case FLOAT_0_1 + 1: - return ValueFloat.ONE; - case DOUBLE_0_1: - return ValueDouble.ZERO; - case DOUBLE_0_1 + 1: - return ValueDouble.ONE; - case DOUBLE: - return ValueDouble.get(Double.longBitsToDouble(Long.reverse(readVarLong()))); - case FLOAT: - return ValueFloat.get(Float.intBitsToFloat(Integer.reverse(readVarInt()))); - case BLOB: - case CLOB: { - int smallLen = readVarInt(); - if (smallLen >= 0) { - byte[] small = Utils.newBytes(smallLen); - read(small, 0, smallLen); - return ValueLobDb.createSmallLob(type == BLOB ? Value.BLOB : Value.CLOB, small); - } else if (smallLen == -3) { - int tableId = readVarInt(); - long lobId = readVarLong(); - long precision = readVarLong(); - return ValueLobDb.create(type == BLOB ? Value.BLOB : Value.CLOB, handler, tableId, - lobId, null, precision); - } else { - int tableId = readVarInt(); - int objectId = readVarInt(); - long precision = 0; - boolean compression = false; - // -1: regular; -2: regular, but not linked (in this case: - // including file name) - if (smallLen == -1 || smallLen == -2) { - precision = readVarLong(); - compression = readByte() == 1; - } - if (smallLen == -2) { - String filename = readString(); - return ValueLob.openUnlinked(type == BLOB ? Value.BLOB : Value.CLOB, handler, tableId, - objectId, precision, compression, filename); - } - return ValueLob.openLinked(type == BLOB ? Value.BLOB : Value.CLOB, handler, tableId, - objectId, precision, compression); - } - } - case ARRAY: - case ROW: // Special storage type for ValueRow - { - int len = readVarInt(); - Value[] list = new Value[len]; - for (int i = 0; i < len; i++) { - list[i] = readValue(); - } - return type == ARRAY ? ValueArray.get(list) : ValueRow.get(list); - } - case RESULT_SET: { - SimpleResult rs = new SimpleResult(); - int columns = readVarInt(); - for (int i = 0; i < columns; i++) { - rs.addColumn(readString(), readString(), readVarInt(), readVarLong(), readVarInt()); - } - while (readByte() != 0) { - Value[] o = new Value[columns]; - for (int i = 0; i < columns; i++) { - o[i] = readValue(); - } - rs.addRow(o); - } - return ValueResultSet.get(rs); - } - case INTERVAL: { - int ordinal = readByte(); - boolean negative = ordinal < 0; - if (negative) { - ordinal = ~ordinal; - } - return ValueInterval.from(IntervalQualifier.valueOf(ordinal), negative, readVarLong(), - ordinal < 5 ? 0 : readVarLong()); - } - case CUSTOM_DATA_TYPE: { - if (JdbcUtils.customDataTypesHandler != null) { - int customType = readVarInt(); - int len = readVarInt(); - byte[] b = Utils.newBytes(len); - read(b, 0, len); - return JdbcUtils.customDataTypesHandler.convert( - ValueBytes.getNoCopy(b), customType); - } - throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, - "No CustomDataTypesHandler has been set up"); - } - default: - if (type >= INT_0_15 && type < INT_0_15 + 16) { - return ValueInt.get(type - INT_0_15); - } else if (type >= LONG_0_7 && type < LONG_0_7 + 8) { - return ValueLong.get(type - LONG_0_7); - } else if (type >= BYTES_0_31 && type < BYTES_0_31 + 32) { - int len = type - BYTES_0_31; - byte[] b = Utils.newBytes(len); - read(b, 0, len); - return ValueBytes.getNoCopy(b); - } else if (type >= STRING_0_31 && type < STRING_0_31 + 32) { - return ValueString.get(readString(type - STRING_0_31)); - } - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "type: " + type); - } - } - - /** - * Calculate the number of bytes required to encode the given value. - * - * @param v the value - * @return the number of bytes required to store this value - */ - public int getValueLen(Value v) { - return getValueLen(v, storeLocalTime); - } - - /** - * Calculate the number of bytes required to encode the given value. - * - * @param v the value - * @param storeLocalTime - * calculate size of DATE, TIME, and TIMESTAMP values with local - * time storage format - * @return the number of bytes required to store this value - */ - public static int getValueLen(Value v, boolean storeLocalTime) { - if (v == ValueNull.INSTANCE) { - return 1; - } - switch (v.getValueType()) { - case Value.BOOLEAN: - return 1; - case Value.BYTE: - return 2; - case Value.SHORT: - return 3; - case Value.ENUM: - case Value.INT: { - int x = v.getInt(); - if (x < 0) { - return 1 + getVarIntLen(-x); - } else if (x < 16) { - return 1; - } else { - return 1 + getVarIntLen(x); - } - } - case Value.LONG: { - long x = v.getLong(); - if (x < 0) { - return 1 + getVarLongLen(-x); - } else if (x < 8) { - return 1; - } else { - return 1 + getVarLongLen(x); - } - } - case Value.DOUBLE: { - double x = v.getDouble(); - if (x == 1.0d) { - return 1; - } - long d = Double.doubleToLongBits(x); - if (d == ValueDouble.ZERO_BITS) { - return 1; - } - return 1 + getVarLongLen(Long.reverse(d)); - } - case Value.FLOAT: { - float x = v.getFloat(); - if (x == 1.0f) { - return 1; - } - int f = Float.floatToIntBits(x); - if (f == ValueFloat.ZERO_BITS) { - return 1; - } - return 1 + getVarIntLen(Integer.reverse(f)); - } - case Value.STRING: { - String s = v.getString(); - int len = s.length(); - if (len < 32) { - return 1 + getStringWithoutLengthLen(s, len); - } - return 1 + getStringLen(s); - } - case Value.STRING_IGNORECASE: - case Value.STRING_FIXED: - return 1 + getStringLen(v.getString()); - case Value.DECIMAL: { - BigDecimal x = v.getBigDecimal(); - if (BigDecimal.ZERO.equals(x)) { - return 1; - } else if (BigDecimal.ONE.equals(x)) { - return 1; - } - int scale = x.scale(); - BigInteger b = x.unscaledValue(); - int bits = b.bitLength(); - if (bits <= 63) { - if (scale == 0) { - return 1 + getVarLongLen(b.longValue()); - } - return 1 + getVarIntLen(scale) + getVarLongLen(b.longValue()); - } - byte[] bytes = b.toByteArray(); - return 1 + getVarIntLen(scale) + getVarIntLen(bytes.length) + bytes.length; - } - case Value.TIME: - if (storeLocalTime) { - long nanos = ((ValueTime) v).getNanos(); - long millis = nanos / 1_000_000; - nanos -= millis * 1_000_000; - return 1 + getVarLongLen(millis) + getVarLongLen(nanos); - } - return 1 + getVarLongLen(DateTimeUtils.getTimeLocalWithoutDst(v.getTime())); - case Value.DATE: { - if (storeLocalTime) { - long dateValue = ((ValueDate) v).getDateValue(); - return 1 + getVarLongLen(dateValue); - } - long x = DateTimeUtils.getTimeLocalWithoutDst(v.getDate()); - return 1 + getVarLongLen(x / MILLIS_PER_MINUTE); - } - case Value.TIMESTAMP: { - if (storeLocalTime) { - ValueTimestamp ts = (ValueTimestamp) v; - long dateValue = ts.getDateValue(); - long nanos = ts.getTimeNanos(); - long millis = nanos / 1_000_000; - nanos -= millis * 1_000_000; - return 1 + getVarLongLen(dateValue) + getVarLongLen(millis) + - getVarLongLen(nanos); - } - Timestamp ts = v.getTimestamp(); - return 1 + getVarLongLen(DateTimeUtils.getTimeLocalWithoutDst(ts)) + - getVarIntLen(ts.getNanos() % 1_000_000); - } - case Value.TIMESTAMP_TZ: { - ValueTimestampTimeZone ts = (ValueTimestampTimeZone) v; - long dateValue = ts.getDateValue(); - long nanos = ts.getTimeNanos(); - short tz = ts.getTimeZoneOffsetMins(); - return 1 + getVarLongLen(dateValue) + getVarLongLen(nanos) + - getVarIntLen(tz); - } - case Value.GEOMETRY: - case Value.JAVA_OBJECT: { - byte[] b = v.getBytesNoCopy(); - return 1 + getVarIntLen(b.length) + b.length; - } - case Value.BYTES: { - byte[] b = v.getBytesNoCopy(); - int len = b.length; - if (len < 32) { - return 1 + b.length; - } - return 1 + getVarIntLen(b.length) + b.length; - } - case Value.UUID: - return 1 + LENGTH_LONG + LENGTH_LONG; - case Value.BLOB: - case Value.CLOB: { - int len = 1; - if (v instanceof ValueLob) { - ValueLob lob = (ValueLob) v; - byte[] small = lob.getSmall(); - if (small == null) { - int t = -1; - if (!lob.isLinkedToTable()) { - t = -2; - } - len += getVarIntLen(t); - len += getVarIntLen(lob.getTableId()); - len += getVarIntLen(lob.getObjectId()); - len += getVarLongLen(lob.getType().getPrecision()); - len += 1; - if (t == -2) { - len += getStringLen(lob.getFileName()); - } - } else { - len += getVarIntLen(small.length); - len += small.length; - } - } else { - ValueLobDb lob = (ValueLobDb) v; - byte[] small = lob.getSmall(); - if (small == null) { - len += getVarIntLen(-3); - len += getVarIntLen(lob.getTableId()); - len += getVarLongLen(lob.getLobId()); - len += getVarLongLen(lob.getType().getPrecision()); - } else { - len += getVarIntLen(small.length); - len += small.length; - } - } - return len; - } - case Value.ARRAY: - case Value.ROW: { - Value[] list = ((ValueCollectionBase) v).getList(); - int len = 1 + getVarIntLen(list.length); - for (Value x : list) { - len += getValueLen(x, storeLocalTime); - } - return len; - } - case Value.RESULT_SET: { - int len = 1; - ResultInterface result = ((ValueResultSet) v).getResult(); - int columnCount = result.getVisibleColumnCount(); - len += getVarIntLen(columnCount); - for (int i = 0; i < columnCount; i++) { - len += getStringLen(result.getAlias(i)); - len += getStringLen(result.getColumnName(i)); - TypeInfo columnType = result.getColumnType(i); - len += getVarIntLen(columnType.getValueType()); - len += getVarLongLen(columnType.getPrecision()); - len += getVarIntLen(columnType.getScale()); - } - while (result.next()) { - len++; - Value[] row = result.currentRow(); - for (int i = 0; i < columnCount; i++) { - Value val = row[i]; - len += getValueLen(val, storeLocalTime); - } - } - len++; - return len; - } - case Value.INTERVAL_YEAR: - case Value.INTERVAL_MONTH: - case Value.INTERVAL_DAY: - case Value.INTERVAL_HOUR: - case Value.INTERVAL_MINUTE: { - ValueInterval interval = (ValueInterval) v; - return 2 + getVarLongLen(interval.getLeading()); - } - case Value.INTERVAL_SECOND: - case Value.INTERVAL_YEAR_TO_MONTH: - case Value.INTERVAL_DAY_TO_HOUR: - case Value.INTERVAL_DAY_TO_MINUTE: - case Value.INTERVAL_DAY_TO_SECOND: - case Value.INTERVAL_HOUR_TO_MINUTE: - case Value.INTERVAL_HOUR_TO_SECOND: - case Value.INTERVAL_MINUTE_TO_SECOND: { - ValueInterval interval = (ValueInterval) v; - return 2 + getVarLongLen(interval.getLeading()) + getVarLongLen(interval.getRemaining()); - } - default: - if (JdbcUtils.customDataTypesHandler != null) { - byte[] b = v.getBytesNoCopy(); - return 1 + getVarIntLen(v.getValueType()) - + getVarIntLen(b.length) + b.length; - } - throw DbException.throwInternalError("type=" + v.getValueType()); - } - } - /** * Set the current read / write position. * @@ -1245,160 +153,12 @@ public void setPos(int pos) { } /** - * Write a short integer at the current position. - * The current position is incremented. - * - * @param x the value - */ - public void writeShortInt(int x) { - byte[] buff = data; - buff[pos++] = (byte) (x >> 8); - buff[pos++] = (byte) x; - } - - /** - * Read an short integer at the current position. - * The current position is incremented. - * - * @return the value - */ - public short readShortInt() { - byte[] buff = data; - return (short) (((buff[pos++] & 0xff) << 8) + (buff[pos++] & 0xff)); - } - - /** - * Shrink the array to this size. - * - * @param size the new size - */ - public void truncate(int size) { - if (pos > size) { - byte[] buff = Arrays.copyOf(data, size); - this.pos = size; - data = buff; - } - } - - /** - * The number of bytes required for a variable size int. - * - * @param x the value - * @return the len - */ - private static int getVarIntLen(int x) { - if ((x & (-1 << 7)) == 0) { - return 1; - } else if ((x & (-1 << 14)) == 0) { - return 2; - } else if ((x & (-1 << 21)) == 0) { - return 3; - } else if ((x & (-1 << 28)) == 0) { - return 4; - } - return 5; - } - - /** - * Write a variable size int. - * - * @param x the value - */ - public void writeVarInt(int x) { - while ((x & ~0x7f) != 0) { - data[pos++] = (byte) (x | 0x80); - x >>>= 7; - } - data[pos++] = (byte) x; - } - - /** - * Read a variable size int. - * - * @return the value - */ - public int readVarInt() { - int b = data[pos]; - if (b >= 0) { - pos++; - return b; - } - // a separate function so that this one can be inlined - return readVarIntRest(b); - } - - private int readVarIntRest(int b) { - int x = b & 0x7f; - b = data[pos + 1]; - if (b >= 0) { - pos += 2; - return x | (b << 7); - } - x |= (b & 0x7f) << 7; - b = data[pos + 2]; - if (b >= 0) { - pos += 3; - return x | (b << 14); - } - x |= (b & 0x7f) << 14; - b = data[pos + 3]; - if (b >= 0) { - pos += 4; - return x | b << 21; - } - x |= ((b & 0x7f) << 21) | (data[pos + 4] << 28); - pos += 5; - return x; - } - - /** - * The number of bytes required for a variable size long. - * - * @param x the value - * @return the len - */ - public static int getVarLongLen(long x) { - int i = 1; - while (true) { - x >>>= 7; - if (x == 0) { - return i; - } - i++; - } - } - - /** - * Write a variable size long. - * - * @param x the value - */ - public void writeVarLong(long x) { - while ((x & ~0x7f) != 0) { - data[pos++] = (byte) (x | 0x80); - x >>>= 7; - } - data[pos++] = (byte) x; - } - - /** - * Read a variable size long. + * Read one single byte. * * @return the value */ - public long readVarLong() { - long x = data[pos++]; - if (x >= 0) { - return x; - } - x &= 0x7f; - for (int s = 7;; s += 7) { - long b = data[pos++]; - x |= (b & 0x7f) << s; - if (b >= 0) { - return x; - } - } + public byte readByte() { + return data[pos++]; } /** @@ -1438,11 +198,12 @@ public void fillAligned() { * * @param source the reader * @param target the output stream + * @throws IOException on failure */ public static void copyString(Reader source, OutputStream target) throws IOException { char[] buff = new char[Constants.IO_BUFFER_SIZE]; - Data d = new Data(null, new byte[3 * Constants.IO_BUFFER_SIZE], false); + Data d = new Data(new byte[3 * Constants.IO_BUFFER_SIZE]); while (true) { int l = source.read(buff); if (l < 0) { @@ -1454,8 +215,4 @@ public static void copyString(Reader source, OutputStream target) } } - public DataHandler getHandler() { - return handler; - } - } diff --git a/h2/src/main/org/h2/store/DataHandler.java b/h2/src/main/org/h2/store/DataHandler.java index ce8a3ea1a1..6c115d42ac 100644 --- a/h2/src/main/org/h2/store/DataHandler.java +++ b/h2/src/main/org/h2/store/DataHandler.java @@ -1,11 +1,10 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store; -import org.h2.api.JavaObjectSerializer; import org.h2.message.DbException; import org.h2.util.SmallLRUCache; import org.h2.util.TempFileDeleter; @@ -56,14 +55,6 @@ public interface DataHandler { */ int getMaxLengthInplaceLob(); - /** - * Get the compression algorithm used for large objects. - * - * @param type the data type (CLOB or BLOB) - * @return the compression algorithm, or null - */ - String getLobCompressionAlgorithm(int type); - /** * Get the temp file deleter mechanism. * @@ -103,17 +94,7 @@ public interface DataHandler { * @param length the number of bytes to read * @return the number of bytes read */ - int readLob(long lobId, byte[] hmac, long offset, byte[] buff, int off, - int length); - - /** - * Return the serializer to be used for java objects being stored in - * column of type OTHER. - * - * @return the serializer to be used for java objects being stored in - * column of type OTHER - */ - JavaObjectSerializer getJavaObjectSerializer(); + int readLob(long lobId, byte[] hmac, long offset, byte[] buff, int off, int length); /** * Return compare mode. diff --git a/h2/src/main/org/h2/store/DataReader.java b/h2/src/main/org/h2/store/DataReader.java index c2393c7443..8c552f0461 100644 --- a/h2/src/main/org/h2/store/DataReader.java +++ b/h2/src/main/org/h2/store/DataReader.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store; @@ -9,7 +9,6 @@ import java.io.IOException; import java.io.InputStream; import java.io.Reader; -import org.h2.util.IOUtils; /** * This class is backed by an input stream and supports reading values and @@ -32,6 +31,7 @@ public DataReader(InputStream in) { * Read a byte. * * @return the byte + * @throws IOException on failure */ public byte readByte() throws IOException { int x = in.read(); @@ -45,6 +45,7 @@ public byte readByte() throws IOException { * Read a variable size integer. * * @return the value + * @throws IOException on failure */ public int readVarInt() throws IOException { int b = readByte(); @@ -69,76 +70,6 @@ public int readVarInt() throws IOException { return x | ((b & 0x7f) << 21) | (readByte() << 28); } - /** - * Read a variable size long. - * - * @return the value - */ - public long readVarLong() throws IOException { - long x = readByte(); - if (x >= 0) { - return x; - } - x &= 0x7f; - for (int s = 7;; s += 7) { - long b = readByte(); - x |= (b & 0x7f) << s; - if (b >= 0) { - return x; - } - } - } - - /** - * Read an integer. - * - * @return the value - */ - // public int readInt() throws IOException { - // return (read() << 24) + ((read() & 0xff) << 16) + - // ((read() & 0xff) << 8) + (read() & 0xff); - //} - - /** - * Read a long. - * - * @return the value - */ - // public long readLong() throws IOException { - // return ((long) (readInt()) << 32) + (readInt() & 0xffffffffL); - // } - - /** - * Read a number of bytes. - * - * @param buff the target buffer - * @param len the number of bytes to read - */ - public void readFully(byte[] buff, int len) throws IOException { - int got = IOUtils.readFully(in, buff, len); - if (got < len) { - throw new FastEOFException(); - } - } - - /** - * Read a string from the stream. - * - * @return the string - */ - public String readString() throws IOException { - int len = readVarInt(); - return readString(len); - } - - private String readString(int len) throws IOException { - char[] chars = new char[len]; - for (int i = 0; i < len; i++) { - chars[i] = readChar(); - } - return new String(chars); - } - /** * Read one character from the input stream. * diff --git a/h2/src/main/org/h2/store/FileLister.java b/h2/src/main/org/h2/store/FileLister.java index 62ea02cb41..2fc6f5a420 100644 --- a/h2/src/main/org/h2/store/FileLister.java +++ b/h2/src/main/org/h2/store/FileLister.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store; @@ -90,16 +90,7 @@ public static ArrayList getDatabaseFiles(String dir, String db, String start = db == null ? null : (FileUtils.toRealPath(dir + "/" + db) + "."); for (String f : FileUtils.newDirectoryStream(dir)) { boolean ok = false; - if (f.endsWith(Constants.SUFFIX_LOBS_DIRECTORY)) { - if (start == null || f.startsWith(start)) { - files.addAll(getDatabaseFiles(f, null, all)); - ok = true; - } - } else if (f.endsWith(Constants.SUFFIX_LOB_FILE)) { - ok = true; - } else if (f.endsWith(Constants.SUFFIX_PAGE_FILE)) { - ok = true; - } else if (f.endsWith(Constants.SUFFIX_MV_FILE)) { + if (f.endsWith(Constants.SUFFIX_MV_FILE)) { ok = true; } else if (all) { if (f.endsWith(Constants.SUFFIX_LOCK_FILE)) { diff --git a/h2/src/main/org/h2/store/FileLock.java b/h2/src/main/org/h2/store/FileLock.java index 5b7f121ead..fbe3539b82 100644 --- a/h2/src/main/org/h2/store/FileLock.java +++ b/h2/src/main/org/h2/store/FileLock.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store; @@ -13,6 +13,9 @@ import java.net.ServerSocket; import java.net.Socket; import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.file.Paths; import java.util.Properties; import org.h2.Driver; import org.h2.api.ErrorCode; @@ -38,7 +41,6 @@ public class FileLock implements Runnable { private static final String MAGIC = "FileLock"; private static final String FILE = "file"; private static final String SOCKET = "socket"; - private static final String SERIALIZED = "serialized"; private static final int RANDOM_BYTES = 16; private static final int SLEEP_GAP = 25; private static final int TIME_GRANULARITY = 2000; @@ -101,7 +103,7 @@ public FileLock(TraceSystem traceSystem, String fileName, int sleep) { public synchronized void lock(FileLockMethod fileLockMethod) { checkServer(); if (locked) { - DbException.throwInternalError("already locked"); + throw DbException.getInternalError("already locked"); } switch (fileLockMethod) { case FILE: @@ -110,9 +112,6 @@ public synchronized void lock(FileLockMethod fileLockMethod) { case SOCKET: lockSocket(); break; - case SERIALIZED: - lockSerialized(); - break; case FS: case NO: break; @@ -187,7 +186,7 @@ public Properties save() { try (OutputStream out = FileUtils.newOutputStream(fileName, false)) { properties.store(out, MAGIC); } - lastWrite = FileUtils.lastModified(fileName); + lastWrite = aggressiveLastModified(fileName); if (trace.isDebugEnabled()) { trace.debug("save " + properties); } @@ -197,6 +196,28 @@ public Properties save() { } } + /** + * Aggressively read last modified time, to work-around remote filesystems. + * + * @param fileName file name to check + * @return last modified date/time in milliseconds UTC + */ + private static long aggressiveLastModified(String fileName) { + /* + * Some remote filesystem, e.g. SMB on Windows, can cache metadata for + * 5-10 seconds. To work around that, do a one-byte read from the + * underlying file, which has the effect of invalidating the metadata + * cache. + */ + try { + try (FileChannel f = FileChannel.open(Paths.get(fileName), FileUtils.RWS, FileUtils.NO_ATTRIBUTES);) { + ByteBuffer b = ByteBuffer.wrap(new byte[1]); + f.read(b); + } + } catch (IOException ignoreEx) {} + return FileUtils.lastModified(fileName); + } + private void checkServer() { Properties prop = load(); String server = prop.getProperty("server"); @@ -257,7 +278,7 @@ public Properties load() { private void waitUntilOld() { for (int i = 0; i < 2 * TIME_GRANULARITY / SLEEP_GAP; i++) { - long last = FileUtils.lastModified(fileName); + long last = aggressiveLastModified(fileName); long dist = System.currentTimeMillis() - last; if (dist < -TIME_GRANULARITY) { // lock file modified in the future - @@ -287,26 +308,6 @@ private void setUniqueId() { properties.setProperty("id", uniqueId); } - private void lockSerialized() { - method = SERIALIZED; - FileUtils.createDirectories(FileUtils.getParent(fileName)); - if (FileUtils.createFile(fileName)) { - properties = new SortedProperties(); - properties.setProperty("method", String.valueOf(method)); - setUniqueId(); - save(); - } else { - while (true) { - try { - properties = load(); - } catch (DbException e) { - // ignore - } - return; - } - } - } - private void lockFile() { method = FILE; properties = new SortedProperties(); @@ -354,7 +355,7 @@ private void lockSocket() { FileUtils.createDirectories(FileUtils.getParent(fileName)); if (!FileUtils.createFile(fileName)) { waitUntilOld(); - long read = FileUtils.lastModified(fileName); + long read = aggressiveLastModified(fileName); Properties p2 = load(); String m2 = p2.getProperty("method", SOCKET); if (m2.equals(FILE)) { @@ -388,7 +389,7 @@ private void lockSocket() { throw getExceptionFatal("IOException", null); } } - if (read != FileUtils.lastModified(fileName)) { + if (read != aggressiveLastModified(fileName)) { throw getExceptionFatal("Concurrent update", null); } FileUtils.delete(fileName); @@ -461,13 +462,10 @@ public static FileLockMethod getFileLockMethod(String method) { return FileLockMethod.NO; } else if (method.equalsIgnoreCase("SOCKET")) { return FileLockMethod.SOCKET; - } else if (method.equalsIgnoreCase("SERIALIZED")) { - return FileLockMethod.SERIALIZED; } else if (method.equalsIgnoreCase("FS")) { return FileLockMethod.FS; } else { - throw DbException.get( - ErrorCode.UNSUPPORTED_LOCK_METHOD_1, method); + throw DbException.get(ErrorCode.UNSUPPORTED_LOCK_METHOD_1, method); } } @@ -482,7 +480,7 @@ public void run() { // trace.debug("watchdog check"); try { if (!FileUtils.exists(fileName) || - FileUtils.lastModified(fileName) != lastWrite) { + aggressiveLastModified(fileName) != lastWrite) { save(); } Thread.sleep(sleep); diff --git a/h2/src/main/org/h2/store/FileLockMethod.java b/h2/src/main/org/h2/store/FileLockMethod.java index 094cf40750..c225f4a64b 100644 --- a/h2/src/main/org/h2/store/FileLockMethod.java +++ b/h2/src/main/org/h2/store/FileLockMethod.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store; @@ -22,12 +22,6 @@ public enum FileLockMethod { */ SOCKET, - /** - * This locking method means multiple writers are allowed, and they - * synchronize themselves. - */ - SERIALIZED, - /** * Use the file system to lock the file; don't use a separate lock file. */ diff --git a/h2/src/main/org/h2/store/FileStore.java b/h2/src/main/org/h2/store/FileStore.java index e89696e122..adfd343173 100644 --- a/h2/src/main/org/h2/store/FileStore.java +++ b/h2/src/main/org/h2/store/FileStore.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store; @@ -261,7 +261,7 @@ public void closeAndDeleteSilently() { * @param off the offset * @param len the number of bytes to read */ - protected void readFullyDirect(byte[] b, int off, int len) { + public void readFullyDirect(byte[] b, int off, int len) { readFully(b, off, len); } @@ -274,8 +274,7 @@ protected void readFullyDirect(byte[] b, int off, int len) { */ public void readFully(byte[] b, int off, int len) { if (len < 0 || len % Constants.FILE_BLOCK_SIZE != 0) { - DbException.throwInternalError( - "unaligned read " + name + " len " + len); + throw DbException.getInternalError("unaligned read " + name + " len " + len); } checkPowerOff(); try { @@ -293,8 +292,7 @@ public void readFully(byte[] b, int off, int len) { */ public void seek(long pos) { if (pos % Constants.FILE_BLOCK_SIZE != 0) { - DbException.throwInternalError( - "unaligned seek " + name + " pos " + pos); + throw DbException.getInternalError("unaligned seek " + name + " pos " + pos); } try { if (pos != filePos) { @@ -326,8 +324,7 @@ protected void writeDirect(byte[] b, int off, int len) { */ public void write(byte[] b, int off, int len) { if (len < 0 || len % Constants.FILE_BLOCK_SIZE != 0) { - DbException.throwInternalError( - "unaligned write " + name + " len " + len); + throw DbException.getInternalError("unaligned write " + name + " len " + len); } checkWritingAllowed(); checkPowerOff(); @@ -348,8 +345,7 @@ public void write(byte[] b, int off, int len) { */ public void setLength(long newLength) { if (newLength % Constants.FILE_BLOCK_SIZE != 0) { - DbException.throwInternalError( - "unaligned setLength " + name + " pos " + newLength); + throw DbException.getInternalError("unaligned setLength " + name + " pos " + newLength); } checkPowerOff(); checkWritingAllowed(); @@ -380,16 +376,14 @@ public long length() { try { len = file.size(); if (len != fileLength) { - DbException.throwInternalError( - "file " + name + " length " + len + " expected " + fileLength); + throw DbException.getInternalError("file " + name + " length " + len + " expected " + fileLength); } if (len % Constants.FILE_BLOCK_SIZE != 0) { long newLength = len + Constants.FILE_BLOCK_SIZE - (len % Constants.FILE_BLOCK_SIZE); file.truncate(newLength); fileLength = newLength; - DbException.throwInternalError( - "unaligned file length " + name + " len " + len); + throw DbException.getInternalError("unaligned file length " + name + " len " + len); } } catch (IOException e) { throw DbException.convertIOException(e, name); @@ -407,7 +401,7 @@ public long getFilePointer() { if (ASSERT) { try { if (file.position() != filePos) { - DbException.throwInternalError(file.position() + " " + filePos); + throw DbException.getInternalError(file.position() + " " + filePos); } } catch (IOException e) { throw DbException.convertIOException(e, name); @@ -448,6 +442,7 @@ public void stopAutoDelete() { /** * Close the file. The file may later be re-opened using openFile. + * @throws IOException on failure */ public void closeFile() throws IOException { file.close(); @@ -470,6 +465,7 @@ private void closeFileSilently() { /** * Re-open the file. The file pointer will be reset to the previous * location. + * @throws IOException on failure */ public void openFile() throws IOException { if (file == null) { diff --git a/h2/src/main/org/h2/store/FileStoreInputStream.java b/h2/src/main/org/h2/store/FileStoreInputStream.java index 8466b37682..87b9fdb70a 100644 --- a/h2/src/main/org/h2/store/FileStoreInputStream.java +++ b/h2/src/main/org/h2/store/FileStoreInputStream.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store; @@ -24,8 +24,7 @@ public class FileStoreInputStream extends InputStream { private boolean endOfFile; private final boolean alwaysClose; - public FileStoreInputStream(FileStore store, DataHandler handler, - boolean compression, boolean alwaysClose) { + public FileStoreInputStream(FileStore store, boolean compression, boolean alwaysClose) { this.store = store; this.alwaysClose = alwaysClose; if (compression) { @@ -33,7 +32,7 @@ public FileStoreInputStream(FileStore store, DataHandler handler, } else { compress = null; } - page = Data.create(handler, Constants.FILE_BLOCK_SIZE, true); + page = Data.create(Constants.FILE_BLOCK_SIZE); try { if (store.length() <= FileStore.HEADER_LENGTH) { close(); @@ -104,7 +103,7 @@ private void fillBuffer() throws IOException { page.checkCapacity(remainingInBuffer); // get the length to read if (compress != null) { - page.checkCapacity(Data.LENGTH_INT); + page.checkCapacity(Integer.BYTES); page.readInt(); } page.setPos(page.length() + remainingInBuffer); diff --git a/h2/src/main/org/h2/store/FileStoreOutputStream.java b/h2/src/main/org/h2/store/FileStoreOutputStream.java index 87291cf5bf..a414443f17 100644 --- a/h2/src/main/org/h2/store/FileStoreOutputStream.java +++ b/h2/src/main/org/h2/store/FileStoreOutputStream.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store; @@ -21,8 +21,7 @@ public class FileStoreOutputStream extends OutputStream { private final CompressTool compress; private final byte[] buffer = { 0 }; - public FileStoreOutputStream(FileStore store, DataHandler handler, - String compressionAlgorithm) { + public FileStoreOutputStream(FileStore store, String compressionAlgorithm) { this.store = store; if (compressionAlgorithm != null) { this.compress = CompressTool.getInstance(); @@ -31,7 +30,7 @@ public FileStoreOutputStream(FileStore store, DataHandler handler, this.compress = null; this.compressionAlgorithm = null; } - page = Data.create(handler, Constants.FILE_BLOCK_SIZE, true); + page = Data.create(Constants.FILE_BLOCK_SIZE); } @Override @@ -57,12 +56,12 @@ public void write(byte[] buff, int off, int len) { int uncompressed = len; buff = compress.compress(buff, compressionAlgorithm); len = buff.length; - page.checkCapacity(2 * Data.LENGTH_INT + len); + page.checkCapacity(2 * Integer.BYTES + len); page.writeInt(len); page.writeInt(uncompressed); page.write(buff, off, len); } else { - page.checkCapacity(Data.LENGTH_INT + len); + page.checkCapacity(Integer.BYTES + len); page.writeInt(len); page.write(buff, off, len); } diff --git a/h2/src/main/org/h2/store/InDoubtTransaction.java b/h2/src/main/org/h2/store/InDoubtTransaction.java index 25fd06da5a..33a1292a0d 100644 --- a/h2/src/main/org/h2/store/InDoubtTransaction.java +++ b/h2/src/main/org/h2/store/InDoubtTransaction.java @@ -1,10 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store; +import org.h2.message.DbException; + /** * Represents an in-doubt transaction (a transaction in the prepare phase). */ @@ -34,12 +36,31 @@ public interface InDoubtTransaction { */ void setState(int state); + /** + * Get the state of this transaction. + * + * @return the transaction state + */ + int getState(); + /** * Get the state of this transaction as a text. * * @return the transaction state text */ - String getState(); + default String getStateDescription() { + int state = getState(); + switch (state) { + case 0: + return "IN_DOUBT"; + case 1: + return "COMMIT"; + case 2: + return "ROLLBACK"; + default: + throw DbException.getInternalError("state=" + state); + } + } /** * Get the name of the transaction. @@ -47,5 +68,4 @@ public interface InDoubtTransaction { * @return the transaction name */ String getTransactionName(); - } diff --git a/h2/src/main/org/h2/store/LobStorageBackend.java b/h2/src/main/org/h2/store/LobStorageBackend.java deleted file mode 100644 index 489bfb7e5e..0000000000 --- a/h2/src/main/org/h2/store/LobStorageBackend.java +++ /dev/null @@ -1,780 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store; - -import java.io.IOException; -import java.io.InputStream; -import java.io.Reader; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import org.h2.api.ErrorCode; -import org.h2.engine.Database; -import org.h2.jdbc.JdbcConnection; -import org.h2.message.DbException; -import org.h2.tools.CompressTool; -import org.h2.util.IOUtils; -import org.h2.util.MathUtils; -import org.h2.util.Utils; -import org.h2.value.Value; -import org.h2.value.ValueLobDb; - -/** - * This class stores LOB objects in the database, in tables. This is the - * back-end i.e. the server side of the LOB storage. - *

          - * Using the system session - *

          - * Why do we use the system session to store the data? Some LOB operations can - * take a very long time. If we did them on a normal session, we would be - * locking the LOB tables for long periods of time, which is extremely - * detrimental to the rest of the system. Perhaps when we shift to the MVStore - * engine, we can revisit this design decision (using the StreamStore, that is, - * no connection at all). - *

          - * Locking - *

          - * Normally, the locking order in H2 is: first lock the Session object, then - * lock the Database object. However, in the case of the LOB data, we are using - * the system session to store the data. If we locked the normal way, we see - * deadlocks caused by the following pattern: - * - *

          - *  Thread 1:
          - *     locks normal session
          - *     locks database
          - *     waiting to lock system session
          - *  Thread 2:
          - *      locks system session
          - *      waiting to lock database.
          - * 
          - * - * So, in this class alone, we do two things: we have our very own dedicated - * session, the LOB session, and we take the locks in this order: first the - * Database object, and then the LOB session. Since we own the LOB session, - * no-one else can lock on it, and we are safe. - */ -public class LobStorageBackend implements LobStorageInterface { - - /** - * The name of the lob data table. If this table exists, then lob storage is - * used. - */ - public static final String LOB_DATA_TABLE = "LOB_DATA"; - - private static final String LOB_SCHEMA = "INFORMATION_SCHEMA"; - private static final String LOBS = LOB_SCHEMA + ".LOBS"; - private static final String LOB_MAP = LOB_SCHEMA + ".LOB_MAP"; - private static final String LOB_DATA = LOB_SCHEMA + "." + LOB_DATA_TABLE; - - /** - * The size of the chunks we use when storing LOBs inside the database file. - */ - private static final int BLOCK_LENGTH = 20_000; - - /** - * The size of cache for lob block hashes. Each entry needs 2 longs (16 - * bytes), therefore, the size 4096 means 64 KB. - */ - private static final int HASH_CACHE_SIZE = 4 * 1024; - - JdbcConnection conn; - final Database database; - - private final HashMap prepared = new HashMap<>(); - private long nextBlock; - private final CompressTool compress = CompressTool.getInstance(); - private long[] hashBlocks; - - private boolean init; - - public LobStorageBackend(Database database) { - this.database = database; - } - - @Override - public void init() { - if (init) { - return; - } - synchronized (database) { - // have to check this again or we might miss an update on another - // thread - if (init) { - return; - } - init = true; - conn = database.getLobConnectionForRegularUse(); - JdbcConnection initConn = database.getLobConnectionForInit(); - try { - Statement stat = initConn.createStatement(); - // stat.execute("SET UNDO_LOG 0"); - // stat.execute("SET REDO_LOG_BINARY 0"); - boolean create = true; - PreparedStatement prep = initConn.prepareStatement( - "SELECT ZERO() FROM INFORMATION_SCHEMA.COLUMNS WHERE " + - "TABLE_SCHEMA=? AND TABLE_NAME=? AND COLUMN_NAME=?"); - prep.setString(1, "INFORMATION_SCHEMA"); - prep.setString(2, "LOB_MAP"); - prep.setString(3, "POS"); - ResultSet rs; - rs = prep.executeQuery(); - if (rs.next()) { - prep = initConn.prepareStatement( - "SELECT ZERO() FROM INFORMATION_SCHEMA.TABLES WHERE " + - "TABLE_SCHEMA=? AND TABLE_NAME=?"); - prep.setString(1, "INFORMATION_SCHEMA"); - prep.setString(2, "LOB_DATA"); - rs = prep.executeQuery(); - if (rs.next()) { - create = false; - } - } - if (create) { - stat.execute("CREATE CACHED TABLE IF NOT EXISTS " + LOBS + - "(ID BIGINT PRIMARY KEY, BYTE_COUNT BIGINT, `TABLE` INT) HIDDEN"); - stat.execute("CREATE INDEX IF NOT EXISTS " + - "INFORMATION_SCHEMA.INDEX_LOB_TABLE ON " + - LOBS + "(`TABLE`)"); - stat.execute("CREATE CACHED TABLE IF NOT EXISTS " + LOB_MAP + - "(LOB BIGINT, SEQ INT, POS BIGINT, HASH INT, " + - "BLOCK BIGINT, PRIMARY KEY(LOB, SEQ)) HIDDEN"); - stat.execute("ALTER TABLE " + LOB_MAP + - " RENAME TO " + LOB_MAP + " HIDDEN"); - stat.execute("ALTER TABLE " + LOB_MAP + - " ADD IF NOT EXISTS POS BIGINT BEFORE HASH"); - // TODO the column name OFFSET was used in version 1.3.156, - // so this can be remove in a later version - stat.execute("ALTER TABLE " + LOB_MAP + - " DROP COLUMN IF EXISTS \"OFFSET\""); - stat.execute("CREATE INDEX IF NOT EXISTS " + - "INFORMATION_SCHEMA.INDEX_LOB_MAP_DATA_LOB ON " + - LOB_MAP + "(BLOCK, LOB)"); - stat.execute("CREATE CACHED TABLE IF NOT EXISTS " + - LOB_DATA + - "(BLOCK BIGINT PRIMARY KEY, COMPRESSED INT, DATA BINARY) HIDDEN"); - } - rs = stat.executeQuery("SELECT MAX(BLOCK) FROM " + LOB_DATA); - rs.next(); - nextBlock = rs.getLong(1) + 1; - stat.close(); - } catch (SQLException e) { - throw DbException.convert(e); - } - } - } - - private long getNextLobId() throws SQLException { - String sql = "SELECT MAX(LOB) FROM " + LOB_MAP; - PreparedStatement prep = prepare(sql); - ResultSet rs = prep.executeQuery(); - rs.next(); - long x = rs.getLong(1) + 1; - reuse(sql, prep); - sql = "SELECT MAX(ID) FROM " + LOBS; - prep = prepare(sql); - rs = prep.executeQuery(); - rs.next(); - x = Math.max(x, rs.getLong(1) + 1); - reuse(sql, prep); - return x; - } - - @Override - public void removeAllForTable(int tableId) { - init(); - try { - String sql = "SELECT ID FROM " + LOBS + " WHERE `TABLE` = ?"; - PreparedStatement prep = prepare(sql); - prep.setInt(1, tableId); - ResultSet rs = prep.executeQuery(); - while (rs.next()) { - removeLob(rs.getLong(1)); - } - reuse(sql, prep); - } catch (SQLException e) { - throw DbException.convert(e); - } - if (tableId == LobStorageFrontend.TABLE_ID_SESSION_VARIABLE) { - removeAllForTable(LobStorageFrontend.TABLE_TEMP); - removeAllForTable(LobStorageFrontend.TABLE_RESULT); - } - } - - /** - * Read a block of data from the given LOB. - * - * @param block the block number - * @return the block (expanded if stored compressed) - */ - byte[] readBlock(long block) throws SQLException { - // see locking discussion at the top - assertNotHolds(conn.getSession()); - synchronized (database) { - synchronized (conn.getSession()) { - String sql = "SELECT COMPRESSED, DATA FROM " + - LOB_DATA + " WHERE BLOCK = ?"; - PreparedStatement prep = prepare(sql); - prep.setLong(1, block); - ResultSet rs = prep.executeQuery(); - if (!rs.next()) { - throw DbException.getJdbcSQLException(ErrorCode.IO_EXCEPTION_1, - "Missing lob entry, block: " + block); - } - int compressed = rs.getInt(1); - byte[] buffer = rs.getBytes(2); - if (compressed != 0) { - buffer = compress.expand(buffer); - } - reuse(sql, prep); - return buffer; - } - } - } - - /** - * Create a prepared statement, or re-use an existing one. - * - * @param sql the SQL statement - * @return the prepared statement - */ - PreparedStatement prepare(String sql) throws SQLException { - assert Thread.holdsLock(database); - PreparedStatement prep = prepared.remove(sql); - if (prep == null) { - prep = conn.prepareStatement(sql); - } - return prep; - } - - /** - * Allow to re-use the prepared statement. - * - * @param sql the SQL statement - * @param prep the prepared statement - */ - void reuse(String sql, PreparedStatement prep) { - assert Thread.holdsLock(database); - prepared.put(sql, prep); - } - - @Override - public void removeLob(ValueLobDb lob) { - removeLob(lob.getLobId()); - } - - private void removeLob(long lobId) { - try { - // see locking discussion at the top - assertNotHolds(conn.getSession()); - synchronized (database) { - synchronized (conn.getSession()) { - String sql = "SELECT BLOCK, HASH FROM " + LOB_MAP + " D WHERE D.LOB = ? " + - "AND NOT EXISTS(SELECT 1 FROM " + LOB_MAP + " O " + - "WHERE O.BLOCK = D.BLOCK AND O.LOB <> ?)"; - PreparedStatement prep = prepare(sql); - prep.setLong(1, lobId); - prep.setLong(2, lobId); - ResultSet rs = prep.executeQuery(); - ArrayList blocks = Utils.newSmallArrayList(); - while (rs.next()) { - blocks.add(rs.getLong(1)); - int hash = rs.getInt(2); - setHashCacheBlock(hash, -1); - } - reuse(sql, prep); - - sql = "DELETE FROM " + LOB_MAP + " WHERE LOB = ?"; - prep = prepare(sql); - prep.setLong(1, lobId); - prep.execute(); - reuse(sql, prep); - - sql = "DELETE FROM " + LOB_DATA + " WHERE BLOCK = ?"; - prep = prepare(sql); - for (long block : blocks) { - prep.setLong(1, block); - prep.execute(); - } - reuse(sql, prep); - - sql = "DELETE FROM " + LOBS + " WHERE ID = ?"; - prep = prepare(sql); - prep.setLong(1, lobId); - prep.execute(); - reuse(sql, prep); - } - } - } catch (SQLException e) { - throw DbException.convert(e); - } - } - - @Override - public InputStream getInputStream(ValueLobDb lob, byte[] hmac, - long byteCount) throws IOException { - try { - init(); - assertNotHolds(conn.getSession()); - // see locking discussion at the top - synchronized (database) { - synchronized (conn.getSession()) { - long lobId = lob.getLobId(); - return new LobInputStream(lobId, byteCount); - } - } - } catch (SQLException e) { - throw DbException.convertToIOException(e); - } - } - - private ValueLobDb addLob(InputStream in, long maxLength, int type, - CountingReaderInputStream countingReaderForClob) { - try { - byte[] buff = new byte[BLOCK_LENGTH]; - if (maxLength < 0) { - maxLength = Long.MAX_VALUE; - } - long length = 0; - long lobId = -1; - int maxLengthInPlaceLob = database.getMaxLengthInplaceLob(); - String compressAlgorithm = database.getLobCompressionAlgorithm(type); - try { - byte[] small = null; - for (int seq = 0; maxLength > 0; seq++) { - int len = (int) Math.min(BLOCK_LENGTH, maxLength); - len = IOUtils.readFully(in, buff, len); - if (len <= 0) { - break; - } - maxLength -= len; - // if we had a short read, trim the buffer - byte[] b; - if (len != buff.length) { - b = Arrays.copyOf(buff, len); - } else { - b = buff; - } - if (seq == 0 && b.length < BLOCK_LENGTH && - b.length <= maxLengthInPlaceLob) { - small = b; - break; - } - assertNotHolds(conn.getSession()); - // see locking discussion at the top - synchronized (database) { - synchronized (conn.getSession()) { - if (seq == 0) { - lobId = getNextLobId(); - } - storeBlock(lobId, seq, length, b, compressAlgorithm); - } - } - length += len; - } - if (lobId == -1 && small == null) { - // zero length - small = new byte[0]; - } - if (small != null) { - // For a BLOB, precision is length in bytes. - // For a CLOB, precision is length in chars - long precision = countingReaderForClob == null ? - small.length : countingReaderForClob.getLength(); - return ValueLobDb.createSmallLob(type, small, precision); - } - // For a BLOB, precision is length in bytes. - // For a CLOB, precision is length in chars - long precision = countingReaderForClob == null ? - length : countingReaderForClob.getLength(); - return registerLob(type, lobId, - LobStorageFrontend.TABLE_TEMP, length, precision); - } catch (IOException e) { - if (lobId != -1) { - removeLob(lobId); - } - throw DbException.convertIOException(e, null); - } - } catch (SQLException e) { - throw DbException.convert(e); - } - } - - private ValueLobDb registerLob(int type, long lobId, int tableId, - long byteCount, long precision) throws SQLException { - assertNotHolds(conn.getSession()); - // see locking discussion at the top - synchronized (database) { - synchronized (conn.getSession()) { - String sql = "INSERT INTO " + LOBS + - "(ID, BYTE_COUNT, `TABLE`) VALUES(?, ?, ?)"; - PreparedStatement prep = prepare(sql); - prep.setLong(1, lobId); - prep.setLong(2, byteCount); - prep.setInt(3, tableId); - prep.execute(); - reuse(sql, prep); - return ValueLobDb.create(type, - database, tableId, lobId, null, precision); - } - } - } - - @Override - public boolean isReadOnly() { - return database.isReadOnly(); - } - - @Override - public ValueLobDb copyLob(ValueLobDb old, int tableId, long length) { - int type = old.getValueType(); - long oldLobId = old.getLobId(); - assertNotHolds(conn.getSession()); - // see locking discussion at the top - synchronized (database) { - synchronized (conn.getSession()) { - try { - init(); - ValueLobDb v = null; - if (!old.isRecoveryReference()) { - long lobId = getNextLobId(); - String sql = "INSERT INTO " + LOB_MAP + - "(LOB, SEQ, POS, HASH, BLOCK) " + - "SELECT ?, SEQ, POS, HASH, BLOCK FROM " + - LOB_MAP + " WHERE LOB = ?"; - PreparedStatement prep = prepare(sql); - prep.setLong(1, lobId); - prep.setLong(2, oldLobId); - prep.executeUpdate(); - reuse(sql, prep); - - sql = "INSERT INTO " + LOBS + - "(ID, BYTE_COUNT, `TABLE`) " + - "SELECT ?, BYTE_COUNT, ? FROM " + LOBS + - " WHERE ID = ?"; - prep = prepare(sql); - prep.setLong(1, lobId); - prep.setLong(2, tableId); - prep.setLong(3, oldLobId); - prep.executeUpdate(); - reuse(sql, prep); - - v = ValueLobDb.create(type, database, tableId, lobId, null, length); - } else { - // Recovery process, no need to copy LOB using normal - // infrastructure - v = ValueLobDb.create(type, database, tableId, oldLobId, null, length); - } - return v; - } catch (SQLException e) { - throw DbException.convert(e); - } - } - } - } - - private long getHashCacheBlock(int hash) { - if (HASH_CACHE_SIZE > 0) { - initHashCache(); - int index = hash & (HASH_CACHE_SIZE - 1); - long oldHash = hashBlocks[index]; - if (oldHash == hash) { - return hashBlocks[index + HASH_CACHE_SIZE]; - } - } - return -1; - } - - private void setHashCacheBlock(int hash, long block) { - if (HASH_CACHE_SIZE > 0) { - initHashCache(); - int index = hash & (HASH_CACHE_SIZE - 1); - hashBlocks[index] = hash; - hashBlocks[index + HASH_CACHE_SIZE] = block; - } - } - - private void initHashCache() { - if (hashBlocks == null) { - hashBlocks = new long[HASH_CACHE_SIZE * 2]; - } - } - - /** - * Store a block in the LOB storage. - * - * @param lobId the lob id - * @param seq the sequence number - * @param pos the position within the lob - * @param b the data - * @param compressAlgorithm the compression algorithm (may be null) - */ - void storeBlock(long lobId, int seq, long pos, byte[] b, - String compressAlgorithm) throws SQLException { - long block; - boolean blockExists = false; - if (compressAlgorithm != null) { - b = compress.compress(b, compressAlgorithm); - } - int hash = Arrays.hashCode(b); - assertHoldsLock(conn.getSession()); - assertHoldsLock(database); - block = getHashCacheBlock(hash); - if (block != -1) { - String sql = "SELECT COMPRESSED, DATA FROM " + LOB_DATA + - " WHERE BLOCK = ?"; - PreparedStatement prep = prepare(sql); - prep.setLong(1, block); - ResultSet rs = prep.executeQuery(); - if (rs.next()) { - boolean compressed = rs.getInt(1) != 0; - byte[] compare = rs.getBytes(2); - if (compressed == (compressAlgorithm != null) && Arrays.equals(b, compare)) { - blockExists = true; - } - } - reuse(sql, prep); - } - if (!blockExists) { - block = nextBlock++; - setHashCacheBlock(hash, block); - String sql = "INSERT INTO " + LOB_DATA + - "(BLOCK, COMPRESSED, DATA) VALUES(?, ?, ?)"; - PreparedStatement prep = prepare(sql); - prep.setLong(1, block); - prep.setInt(2, compressAlgorithm == null ? 0 : 1); - prep.setBytes(3, b); - prep.execute(); - reuse(sql, prep); - } - String sql = "INSERT INTO " + LOB_MAP + - "(LOB, SEQ, POS, HASH, BLOCK) VALUES(?, ?, ?, ?, ?)"; - PreparedStatement prep = prepare(sql); - prep.setLong(1, lobId); - prep.setInt(2, seq); - prep.setLong(3, pos); - prep.setLong(4, hash); - prep.setLong(5, block); - prep.execute(); - reuse(sql, prep); - } - - @Override - public Value createBlob(InputStream in, long maxLength) { - init(); - return addLob(in, maxLength, Value.BLOB, null); - } - - @Override - public Value createClob(Reader reader, long maxLength) { - init(); - long max = maxLength == -1 ? Long.MAX_VALUE : maxLength; - CountingReaderInputStream in = new CountingReaderInputStream(reader, max); - return addLob(in, Long.MAX_VALUE, Value.CLOB, in); - } - - private static void assertNotHolds(Object lock) { - if (Thread.holdsLock(lock)) { - throw DbException.throwInternalError(lock.toString()); - } - } - - /** - * Check whether this thread has synchronized on this object. - * - * @param lock the object - */ - static void assertHoldsLock(Object lock) { - if (!Thread.holdsLock(lock)) { - throw DbException.throwInternalError(lock.toString()); - } - } - - /** - * An input stream that reads from a LOB. - */ - public class LobInputStream extends InputStream { - - /** - * Data from the LOB_MAP table. We cache this to prevent other updates - * to the table that contains the LOB column from changing the data - * under us. - */ - private final long[] lobMapBlocks; - - /** - * index into the lobMapBlocks array. - */ - private int lobMapIndex; - - /** - * The remaining bytes in the lob. - */ - private long remainingBytes; - - /** - * The temporary buffer. - */ - private byte[] buffer; - - /** - * The position within the buffer. - */ - private int bufferPos; - - - public LobInputStream(long lobId, long byteCount) throws SQLException { - - // we have to take the lock on the session - // before the lock on the database to prevent ABBA deadlocks - assertHoldsLock(conn.getSession()); - assertHoldsLock(database); - - if (byteCount == -1) { - String sql = "SELECT BYTE_COUNT FROM " + LOBS + " WHERE ID = ?"; - PreparedStatement prep = prepare(sql); - prep.setLong(1, lobId); - ResultSet rs = prep.executeQuery(); - if (!rs.next()) { - throw DbException.getJdbcSQLException(ErrorCode.IO_EXCEPTION_1, - "Missing lob entry: " + lobId); - } - byteCount = rs.getLong(1); - reuse(sql, prep); - } - this.remainingBytes = byteCount; - - String sql = "SELECT COUNT(*) FROM " + LOB_MAP + " WHERE LOB = ?"; - PreparedStatement prep = prepare(sql); - prep.setLong(1, lobId); - ResultSet rs = prep.executeQuery(); - rs.next(); - int lobMapCount = rs.getInt(1); - if (lobMapCount == 0) { - throw DbException.getJdbcSQLException(ErrorCode.IO_EXCEPTION_1, - "Missing lob entry: " + lobId); - } - reuse(sql, prep); - - this.lobMapBlocks = new long[lobMapCount]; - - sql = "SELECT BLOCK FROM " + LOB_MAP + " WHERE LOB = ? ORDER BY SEQ"; - prep = prepare(sql); - prep.setLong(1, lobId); - rs = prep.executeQuery(); - int i = 0; - while (rs.next()) { - this.lobMapBlocks[i] = rs.getLong(1); - i++; - } - reuse(sql, prep); - } - - @Override - public int read() throws IOException { - fillBuffer(); - if (remainingBytes <= 0) { - return -1; - } - remainingBytes--; - return buffer[bufferPos++] & 255; - } - - @Override - public long skip(long n) throws IOException { - if (n <= 0) { - return 0; - } - long remaining = n; - remaining -= skipSmall(remaining); - if (remaining > BLOCK_LENGTH) { - while (remaining > BLOCK_LENGTH) { - remaining -= BLOCK_LENGTH; - remainingBytes -= BLOCK_LENGTH; - lobMapIndex++; - } - bufferPos = 0; - buffer = null; - } - fillBuffer(); - remaining -= skipSmall(remaining); - remaining -= super.skip(remaining); - return n - remaining; - } - - private int skipSmall(long n) { - if (buffer != null && bufferPos < buffer.length) { - int x = MathUtils.convertLongToInt(Math.min(n, buffer.length - bufferPos)); - bufferPos += x; - remainingBytes -= x; - return x; - } - return 0; - } - - @Override - public int available() throws IOException { - return MathUtils.convertLongToInt(remainingBytes); - } - - @Override - public int read(byte[] buff) throws IOException { - return readFully(buff, 0, buff.length); - } - - @Override - public int read(byte[] buff, int off, int length) throws IOException { - return readFully(buff, off, length); - } - - private int readFully(byte[] buff, int off, int length) throws IOException { - if (length == 0) { - return 0; - } - int read = 0; - while (length > 0) { - fillBuffer(); - if (remainingBytes <= 0) { - break; - } - int len = (int) Math.min(length, remainingBytes); - len = Math.min(len, buffer.length - bufferPos); - System.arraycopy(buffer, bufferPos, buff, off, len); - bufferPos += len; - read += len; - remainingBytes -= len; - off += len; - length -= len; - } - return read == 0 ? -1 : read; - } - - private void fillBuffer() throws IOException { - if (buffer != null && bufferPos < buffer.length) { - return; - } - if (remainingBytes <= 0) { - return; - } -if (lobMapIndex >= lobMapBlocks.length) { - System.out.println("halt!"); -} - try { - buffer = readBlock(lobMapBlocks[lobMapIndex]); - lobMapIndex++; - bufferPos = 0; - } catch (SQLException e) { - throw DbException.convertToIOException(e); - } - } - - } - -} diff --git a/h2/src/main/org/h2/store/LobStorageFrontend.java b/h2/src/main/org/h2/store/LobStorageFrontend.java index 84950a96b7..5c57acef4a 100644 --- a/h2/src/main/org/h2/store/LobStorageFrontend.java +++ b/h2/src/main/org/h2/store/LobStorageFrontend.java @@ -1,16 +1,17 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store; -import java.io.BufferedInputStream; import java.io.IOException; import java.io.InputStream; import java.io.Reader; -import org.h2.value.Value; -import org.h2.value.ValueLobDb; +import org.h2.engine.SessionRemote; +import org.h2.value.ValueBlob; +import org.h2.value.ValueClob; +import org.h2.value.ValueLob; /** * This factory creates in-memory objects and temporary files. It is used on the @@ -33,33 +34,29 @@ public class LobStorageFrontend implements LobStorageInterface { */ public static final int TABLE_RESULT = -3; - private final DataHandler handler; + private final SessionRemote sessionRemote; - public LobStorageFrontend(DataHandler handler) { - this.handler = handler; + public LobStorageFrontend(SessionRemote handler) { + this.sessionRemote = handler; } @Override - public void removeLob(ValueLobDb lob) { + public void removeLob(ValueLob lob) { // not stored in the database } - /** - * Get the input stream for the given lob. - * - * @param lob the lob - * @param hmac the message authentication code (for remote input streams) - * @param byteCount the number of bytes to read, or -1 if not known - * @return the stream - */ @Override - public InputStream getInputStream(ValueLobDb lob, byte[] hmac, + public InputStream getInputStream(long lobId, long byteCount) throws IOException { - if (byteCount < 0) { - byteCount = Long.MAX_VALUE; - } - return new BufferedInputStream(new LobStorageRemoteInputStream( - handler, lob, hmac, byteCount)); + // this method is only implemented on the server side of a TCP connection + throw new IllegalStateException(); + } + + @Override + public InputStream getInputStream(long lobId, int tableId, long byteCount) throws IOException { + // this method is only implemented on the server side of a TCP + // connection + throw new IllegalStateException(); } @Override @@ -68,7 +65,7 @@ public boolean isReadOnly() { } @Override - public ValueLobDb copyLob(ValueLobDb old, int tableId, long length) { + public ValueLob copyLob(ValueLob old, int tableId) { throw new UnsupportedOperationException(); } @@ -78,11 +75,11 @@ public void removeAllForTable(int tableId) { } @Override - public Value createBlob(InputStream in, long maxLength) { + public ValueBlob createBlob(InputStream in, long maxLength) { // need to use a temp file, because the input stream could come from // the same database, which would create a weird situation (trying // to read a block while writing something) - return ValueLobDb.createTempBlob(in, maxLength, handler); + return ValueBlob.createTempBlob(in, maxLength, sessionRemote); } /** @@ -93,16 +90,10 @@ public Value createBlob(InputStream in, long maxLength) { * @return the LOB */ @Override - public Value createClob(Reader reader, long maxLength) { + public ValueClob createClob(Reader reader, long maxLength) { // need to use a temp file, because the input stream could come from // the same database, which would create a weird situation (trying // to read a block while writing something) - return ValueLobDb.createTempClob(reader, maxLength, handler); + return ValueClob.createTempClob(reader, maxLength, sessionRemote); } - - @Override - public void init() { - // nothing to do - } - } diff --git a/h2/src/main/org/h2/store/LobStorageInterface.java b/h2/src/main/org/h2/store/LobStorageInterface.java index 7b9a1a7a06..b750c5a83b 100644 --- a/h2/src/main/org/h2/store/LobStorageInterface.java +++ b/h2/src/main/org/h2/store/LobStorageInterface.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store; @@ -8,8 +8,10 @@ import java.io.IOException; import java.io.InputStream; import java.io.Reader; -import org.h2.value.Value; -import org.h2.value.ValueLobDb; + +import org.h2.value.ValueBlob; +import org.h2.value.ValueClob; +import org.h2.value.ValueLob; /** * A mechanism to store and retrieve lob data. @@ -23,7 +25,7 @@ public interface LobStorageInterface { * @param maxLength the maximum length (-1 if not known) * @return the LOB */ - Value createClob(Reader reader, long maxLength); + ValueClob createClob(Reader reader, long maxLength); /** * Create a BLOB object. @@ -32,35 +34,44 @@ public interface LobStorageInterface { * @param maxLength the maximum length (-1 if not known) * @return the LOB */ - Value createBlob(InputStream in, long maxLength); + ValueBlob createBlob(InputStream in, long maxLength); /** * Copy a lob. * * @param old the old lob * @param tableId the new table id - * @param length the length * @return the new lob */ - ValueLobDb copyLob(ValueLobDb old, int tableId, long length); + ValueLob copyLob(ValueLob old, int tableId); /** - * Get the input stream for the given lob. + * Get the input stream for the given lob, only called on server side of a TCP connection. * - * @param lob the lob id - * @param hmac the message authentication code (for remote input streams) + * @param lobId the lob id * @param byteCount the number of bytes to read, or -1 if not known * @return the stream + * @throws IOException on failure */ - InputStream getInputStream(ValueLobDb lob, byte[] hmac, long byteCount) - throws IOException; + InputStream getInputStream(long lobId, long byteCount) throws IOException; + + /** + * Get the input stream for the given lob + * + * @param lobId the lob id + * @param tableId the able id + * @param byteCount the number of bytes to read, or -1 if not known + * @return the stream + * @throws IOException on failure + */ + InputStream getInputStream(long lobId, int tableId, long byteCount) throws IOException; /** * Delete a LOB (from the database, if it is stored there). * * @param lob the lob */ - void removeLob(ValueLobDb lob); + void removeLob(ValueLob lob); /** * Remove all LOBs for this table. @@ -69,16 +80,10 @@ InputStream getInputStream(ValueLobDb lob, byte[] hmac, long byteCount) */ void removeAllForTable(int tableId); - /** - * Initialize the lob storage. - */ - void init(); - /** * Whether the storage is read-only * * @return true if yes */ boolean isReadOnly(); - } diff --git a/h2/src/main/org/h2/store/LobStorageMap.java b/h2/src/main/org/h2/store/LobStorageMap.java deleted file mode 100644 index 96abeb11a2..0000000000 --- a/h2/src/main/org/h2/store/LobStorageMap.java +++ /dev/null @@ -1,359 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store; - -import java.io.IOException; -import java.io.InputStream; -import java.io.Reader; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Map.Entry; - -import org.h2.api.ErrorCode; -import org.h2.engine.Database; -import org.h2.message.DbException; -import org.h2.mvstore.MVMap; -import org.h2.mvstore.MVStore; -import org.h2.mvstore.StreamStore; -import org.h2.mvstore.db.MVTableEngine.Store; -import org.h2.util.IOUtils; -import org.h2.util.StringUtils; -import org.h2.value.Value; -import org.h2.value.ValueLobDb; - -/** - * This class stores LOB objects in the database, in maps. This is the back-end - * i.e. the server side of the LOB storage. - */ -public class LobStorageMap implements LobStorageInterface { - - private static final boolean TRACE = false; - - private final Database database; - - private boolean init; - - private final Object nextLobIdSync = new Object(); - private long nextLobId; - - /** - * The lob metadata map. It contains the mapping from the lob id - * (which is a long) to the stream store id (which is a byte array). - * - * Key: lobId (long) - * Value: { streamStoreId (byte[]), tableId (int), - * byteCount (long), hash (long) }. - */ - private MVMap lobMap; - - /** - * The reference map. It is used to remove data from the stream store: if no - * more entries for the given streamStoreId exist, the data is removed from - * the stream store. - * - * Key: { streamStoreId (byte[]), lobId (long) }. - * Value: true (boolean). - */ - private MVMap refMap; - - private StreamStore streamStore; - - public LobStorageMap(Database database) { - this.database = database; - } - - @Override - public void init() { - if (init) { - return; - } - init = true; - Store s = database.getStore(); - MVStore mvStore; - if (s == null) { - // in-memory database - mvStore = MVStore.open(null); - } else { - mvStore = s.getMvStore(); - } - lobMap = mvStore.openMap("lobMap"); - refMap = mvStore.openMap("lobRef"); - - /* The stream store data map. - * - * Key: stream store block id (long). - * Value: data (byte[]). - */ - MVMap dataMap = mvStore.openMap("lobData"); - streamStore = new StreamStore(dataMap); - // garbage collection of the last blocks - if (database.isReadOnly()) { - return; - } - if (dataMap.isEmpty()) { - return; - } - // search for the last block - // (in theory, only the latest lob can have unreferenced blocks, - // but the latest lob could be a copy of another one, and - // we don't know that, so we iterate over all lobs) - long lastUsedKey = -1; - for (Entry e : lobMap.entrySet()) { - long lobId = e.getKey(); - Object[] v = e.getValue(); - byte[] id = (byte[]) v[0]; - long max = streamStore.getMaxBlockKey(id); - // a lob may not have a referenced blocks if data is kept inline - if (max != -1 && max > lastUsedKey) { - lastUsedKey = max; - if (TRACE) { - trace("lob " + lobId + " lastUsedKey=" + lastUsedKey); - } - } - } - if (TRACE) { - trace("lastUsedKey=" + lastUsedKey); - } - // delete all blocks that are newer - while (true) { - Long last = dataMap.lastKey(); - if (last == null || last <= lastUsedKey) { - break; - } - if (TRACE) { - trace("gc " + last); - } - dataMap.remove(last); - } - // don't re-use block ids, except at the very end - Long last = dataMap.lastKey(); - if (last != null) { - streamStore.setNextKey(last + 1); - } - } - - @Override - public Value createBlob(InputStream in, long maxLength) { - init(); - int type = Value.BLOB; - try { - if (maxLength != -1 - && maxLength <= database.getMaxLengthInplaceLob()) { - byte[] small = new byte[(int) maxLength]; - int len = IOUtils.readFully(in, small, (int) maxLength); - if (len > maxLength) { - throw new IllegalStateException( - "len > blobLength, " + len + " > " + maxLength); - } - if (len < small.length) { - small = Arrays.copyOf(small, len); - } - return ValueLobDb.createSmallLob(type, small); - } - if (maxLength != -1) { - in = new RangeInputStream(in, 0L, maxLength); - } - return createLob(in, type); - } catch (IllegalStateException e) { - throw DbException.get(ErrorCode.OBJECT_CLOSED, e); - } catch (IOException e) { - throw DbException.convertIOException(e, null); - } - } - - @Override - public Value createClob(Reader reader, long maxLength) { - init(); - int type = Value.CLOB; - try { - // we multiple by 3 here to get the worst-case size in bytes - if (maxLength != -1 - && maxLength * 3 <= database.getMaxLengthInplaceLob()) { - char[] small = new char[(int) maxLength]; - int len = IOUtils.readFully(reader, small, (int) maxLength); - if (len > maxLength) { - throw new IllegalStateException( - "len > blobLength, " + len + " > " + maxLength); - } - byte[] utf8 = new String(small, 0, len) - .getBytes(StandardCharsets.UTF_8); - if (utf8.length > database.getMaxLengthInplaceLob()) { - throw new IllegalStateException( - "len > maxinplace, " + utf8.length + " > " - + database.getMaxLengthInplaceLob()); - } - return ValueLobDb.createSmallLob(type, utf8); - } - if (maxLength < 0) { - maxLength = Long.MAX_VALUE; - } - CountingReaderInputStream in = new CountingReaderInputStream(reader, - maxLength); - ValueLobDb lob = createLob(in, type); - // the length is not correct - lob = ValueLobDb.create(type, database, lob.getTableId(), - lob.getLobId(), null, in.getLength()); - return lob; - } catch (IllegalStateException e) { - throw DbException.get(ErrorCode.OBJECT_CLOSED, e); - } catch (IOException e) { - throw DbException.convertIOException(e, null); - } - } - - private ValueLobDb createLob(InputStream in, int type) throws IOException { - byte[] streamStoreId; - try { - streamStoreId = streamStore.put(in); - } catch (Exception e) { - throw DbException.convertToIOException(e); - } - long lobId = generateLobId(); - long length = streamStore.length(streamStoreId); - int tableId = LobStorageFrontend.TABLE_TEMP; - Object[] value = { streamStoreId, tableId, length, 0 }; - lobMap.put(lobId, value); - Object[] key = { streamStoreId, lobId }; - refMap.put(key, Boolean.TRUE); - ValueLobDb lob = ValueLobDb.create( - type, database, tableId, lobId, null, length); - if (TRACE) { - trace("create " + tableId + "/" + lobId); - } - return lob; - } - - private long generateLobId() { - synchronized (nextLobIdSync) { - if (nextLobId == 0) { - Long id = lobMap.lastKey(); - nextLobId = id == null ? 1 : id + 1; - } - return nextLobId++; - } - } - - @Override - public boolean isReadOnly() { - return database.isReadOnly(); - } - - @Override - public ValueLobDb copyLob(ValueLobDb old, int tableId, long length) { - init(); - int type = old.getValueType(); - long oldLobId = old.getLobId(); - long oldLength = old.getType().getPrecision(); - if (oldLength != length) { - throw DbException.throwInternalError("Length is different"); - } - Object[] value = lobMap.get(oldLobId); - value = value.clone(); - byte[] streamStoreId = (byte[]) value[0]; - long lobId = generateLobId(); - value[1] = tableId; - lobMap.put(lobId, value); - Object[] key = { streamStoreId, lobId }; - refMap.put(key, Boolean.TRUE); - ValueLobDb lob = ValueLobDb.create( - type, database, tableId, lobId, null, length); - if (TRACE) { - trace("copy " + old.getTableId() + "/" + old.getLobId() + - " > " + tableId + "/" + lobId); - } - return lob; - } - - @Override - public InputStream getInputStream(ValueLobDb lob, byte[] hmac, long byteCount) - throws IOException { - init(); - Object[] value = lobMap.get(lob.getLobId()); - if (value == null) { - if (lob.getTableId() == LobStorageFrontend.TABLE_RESULT || - lob.getTableId() == LobStorageFrontend.TABLE_ID_SESSION_VARIABLE) { - throw DbException.get( - ErrorCode.LOB_CLOSED_ON_TIMEOUT_1, lob.getLobId() + "/" + lob.getTableId()); - } - throw DbException.throwInternalError("Lob not found: " + - lob.getLobId() + "/" + lob.getTableId()); - } - byte[] streamStoreId = (byte[]) value[0]; - return streamStore.get(streamStoreId); - } - - @Override - public void removeAllForTable(int tableId) { - init(); - if (database.getStore().getMvStore().isClosed()) { - return; - } - // this might not be very efficient - - // to speed it up, we would need yet another map - ArrayList list = new ArrayList<>(); - for (Entry e : lobMap.entrySet()) { - Object[] value = e.getValue(); - int t = (Integer) value[1]; - if (t == tableId) { - list.add(e.getKey()); - } - } - for (long lobId : list) { - removeLob(tableId, lobId); - } - if (tableId == LobStorageFrontend.TABLE_ID_SESSION_VARIABLE) { - removeAllForTable(LobStorageFrontend.TABLE_TEMP); - removeAllForTable(LobStorageFrontend.TABLE_RESULT); - } - } - - @Override - public void removeLob(ValueLobDb lob) { - init(); - int tableId = lob.getTableId(); - long lobId = lob.getLobId(); - removeLob(tableId, lobId); - } - - private void removeLob(int tableId, long lobId) { - if (TRACE) { - trace("remove " + tableId + "/" + lobId); - } - Object[] value = lobMap.remove(lobId); - if (value == null) { - // already removed - return; - } - byte[] streamStoreId = (byte[]) value[0]; - Object[] key = {streamStoreId, lobId }; - refMap.remove(key); - // check if there are more entries for this streamStoreId - key = new Object[] {streamStoreId, 0L }; - value = refMap.ceilingKey(key); - boolean hasMoreEntries = false; - if (value != null) { - byte[] s2 = (byte[]) value[0]; - if (Arrays.equals(streamStoreId, s2)) { - if (TRACE) { - trace(" stream still needed in lob " + value[1]); - } - hasMoreEntries = true; - } - } - if (!hasMoreEntries) { - if (TRACE) { - trace(" remove stream " + StringUtils.convertBytesToHex(streamStoreId)); - } - streamStore.remove(streamStoreId); - } - } - - private static void trace(String op) { - System.out.println("[" + Thread.currentThread().getName() + "] LOB " + op); - } - -} diff --git a/h2/src/main/org/h2/store/LobStorageRemoteInputStream.java b/h2/src/main/org/h2/store/LobStorageRemoteInputStream.java index d1a3c054a1..06e1d86adf 100644 --- a/h2/src/main/org/h2/store/LobStorageRemoteInputStream.java +++ b/h2/src/main/org/h2/store/LobStorageRemoteInputStream.java @@ -1,30 +1,28 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, and the + * EPL 1.0 (https://h2database.com/html/license.html). Initial Developer: H2 + * Group */ package org.h2.store; import java.io.IOException; import java.io.InputStream; - +import org.h2.engine.SessionRemote; import org.h2.message.DbException; -import org.h2.value.ValueLobDb; +import org.h2.mvstore.DataUtils; /** - * An input stream that reads from a remote LOB. + * An input stream used by the client side of a tcp connection to fetch LOB data + * on demand from the server. */ -class LobStorageRemoteInputStream extends InputStream { +public class LobStorageRemoteInputStream extends InputStream { - /** - * The data handler. - */ - private final DataHandler handler; + private final SessionRemote sessionRemote; /** * The lob id. */ - private final long lob; + private final long lobId; private final byte[] hmac; @@ -33,17 +31,10 @@ class LobStorageRemoteInputStream extends InputStream { */ private long pos; - /** - * The remaining bytes in the lob. - */ - private long remainingBytes; - - public LobStorageRemoteInputStream(DataHandler handler, ValueLobDb lob, - byte[] hmac, long byteCount) { - this.handler = handler; - this.lob = lob.getLobId(); + public LobStorageRemoteInputStream(SessionRemote handler, long lobId, byte[] hmac) { + this.sessionRemote = handler; + this.lobId = lobId; this.hmac = hmac; - remainingBytes = byteCount; } @Override @@ -60,31 +51,20 @@ public int read(byte[] buff) throws IOException { @Override public int read(byte[] buff, int off, int length) throws IOException { + assert(length >= 0); if (length == 0) { return 0; } - length = (int) Math.min(length, remainingBytes); - if (length == 0) { - return -1; - } try { - length = handler.readLob(lob, hmac, pos, buff, off, length); + length = sessionRemote.readLob(lobId, hmac, pos, buff, off, length); } catch (DbException e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } if (length == 0) { return -1; } - remainingBytes -= length; pos += length; return length; } - @Override - public long skip(long n) { - remainingBytes -= n; - pos += n; - return n; - } - -} \ No newline at end of file +} diff --git a/h2/src/main/org/h2/store/Page.java b/h2/src/main/org/h2/store/Page.java deleted file mode 100644 index 8ce683908d..0000000000 --- a/h2/src/main/org/h2/store/Page.java +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store; - -import java.lang.reflect.Array; -import org.h2.engine.Session; -import org.h2.util.CacheObject; - -/** - * A page. Format: - *
          • 0-3: parent page id (0 for root) - *
          • 4-4: page type - *
          • page-type specific data - *
          - */ -public abstract class Page extends CacheObject { - - /** - * This is the last page of a chain. - */ - public static final int FLAG_LAST = 16; - - /** - * An empty page. - */ - public static final int TYPE_EMPTY = 0; - - /** - * A data leaf page (without overflow: + FLAG_LAST). - */ - public static final int TYPE_DATA_LEAF = 1; - - /** - * A data node page (never has overflow pages). - */ - public static final int TYPE_DATA_NODE = 2; - - /** - * A data overflow page (the last page: + FLAG_LAST). - */ - public static final int TYPE_DATA_OVERFLOW = 3; - - /** - * A b-tree leaf page (without overflow: + FLAG_LAST). - */ - public static final int TYPE_BTREE_LEAF = 4; - - /** - * A b-tree node page (never has overflow pages). - */ - public static final int TYPE_BTREE_NODE = 5; - - /** - * A page containing a list of free pages (the last page: + FLAG_LAST). - */ - public static final int TYPE_FREE_LIST = 6; - - /** - * A stream trunk page. - */ - public static final int TYPE_STREAM_TRUNK = 7; - - /** - * A stream data page. - */ - public static final int TYPE_STREAM_DATA = 8; - - private static final int COPY_THRESHOLD = 4; - - /** - * When this page was changed the last time. - */ - protected long changeCount; - - /** - * Copy the data to a new location, change the parent to point to the new - * location, and free up the current page. - * - * @param session the session - * @param newPos the new position - */ - public abstract void moveTo(Session session, int newPos); - - /** - * Write the page. - */ - public abstract void write(); - - /** - * Insert a value in an array. A new array is created if required. - * - * @param old the old array - * @param oldSize the old size - * @param pos the position - * @param x the value to insert - * @return the (new) array - */ - @SuppressWarnings("unchecked") - public static T[] insert(T[] old, int oldSize, int pos, T x) { - T[] result; - if (old.length > oldSize) { - result = old; - } else { - // according to a test, this is as fast as "new Row[..]" - result = (T[]) Array.newInstance( - old.getClass().getComponentType(), oldSize + 1 + COPY_THRESHOLD); - if (pos > 0) { - System.arraycopy(old, 0, result, 0, pos); - } - } - if (oldSize - pos > 0) { - System.arraycopy(old, pos, result, pos + 1, oldSize - pos); - } - result[pos] = x; - return result; - } - - /** - * Delete a value in an array. A new array is created if required. - * - * @param old the old array - * @param oldSize the old size - * @param pos the position - * @return the (new) array - */ - @SuppressWarnings("unchecked") - public - static T[] remove(T[] old, int oldSize, int pos) { - T[] result; - if (old.length - oldSize < COPY_THRESHOLD) { - result = old; - } else { - // according to a test, this is as fast as "new Row[..]" - result = (T[]) Array.newInstance( - old.getClass().getComponentType(), oldSize - 1); - System.arraycopy(old, 0, result, 0, Math.min(oldSize - 1, pos)); - } - if (pos < oldSize) { - System.arraycopy(old, pos + 1, result, pos, oldSize - pos - 1); - } - return result; - } - - /** - * Insert a value in an array. A new array is created if required. - * - * @param old the old array - * @param oldSize the old size - * @param pos the position - * @param x the value to insert - * @return the (new) array - */ - protected static long[] insert(long[] old, int oldSize, int pos, long x) { - long[] result; - if (old != null && old.length > oldSize) { - result = old; - } else { - result = new long[oldSize + 1 + COPY_THRESHOLD]; - if (pos > 0) { - System.arraycopy(old, 0, result, 0, pos); - } - } - if (old != null && oldSize - pos > 0) { - System.arraycopy(old, pos, result, pos + 1, oldSize - pos); - } - result[pos] = x; - return result; - } - - /** - * Delete a value in an array. A new array is created if required. - * - * @param old the old array - * @param oldSize the old size - * @param pos the position - * @return the (new) array - */ - protected static long[] remove(long[] old, int oldSize, int pos) { - long[] result; - if (old.length - oldSize < COPY_THRESHOLD) { - result = old; - } else { - result = new long[oldSize - 1]; - System.arraycopy(old, 0, result, 0, pos); - } - System.arraycopy(old, pos + 1, result, pos, oldSize - pos - 1); - return result; - } - - /** - * Insert a value in an array. A new array is created if required. - * - * @param old the old array - * @param oldSize the old size - * @param pos the position - * @param x the value to insert - * @return the (new) array - */ - protected static int[] insert(int[] old, int oldSize, int pos, int x) { - int[] result; - if (old != null && old.length > oldSize) { - result = old; - } else { - result = new int[oldSize + 1 + COPY_THRESHOLD]; - if (pos > 0 && old != null) { - System.arraycopy(old, 0, result, 0, pos); - } - } - if (old != null && oldSize - pos > 0) { - System.arraycopy(old, pos, result, pos + 1, oldSize - pos); - } - result[pos] = x; - return result; - } - - /** - * Delete a value in an array. A new array is created if required. - * - * @param old the old array - * @param oldSize the old size - * @param pos the position - * @return the (new) array - */ - protected static int[] remove(int[] old, int oldSize, int pos) { - int[] result; - if (old.length - oldSize < COPY_THRESHOLD) { - result = old; - } else { - result = new int[oldSize - 1]; - System.arraycopy(old, 0, result, 0, Math.min(oldSize - 1, pos)); - } - if (pos < oldSize) { - System.arraycopy(old, pos + 1, result, pos, oldSize - pos - 1); - } - return result; - } - - /** - * Add a value to a subset of the array. - * - * @param array the array - * @param from the index of the first element (including) - * @param to the index of the last element (excluding) - * @param x the value to add - */ - protected static void add(int[] array, int from, int to, int x) { - for (int i = from; i < to; i++) { - array[i] += x; - } - } - - /** - * If this page can be moved. Transaction log and free-list pages can not. - * - * @return true if moving is allowed - */ - public boolean canMove() { - return true; - } - -} diff --git a/h2/src/main/org/h2/store/PageFreeList.java b/h2/src/main/org/h2/store/PageFreeList.java deleted file mode 100644 index 42bc394235..0000000000 --- a/h2/src/main/org/h2/store/PageFreeList.java +++ /dev/null @@ -1,231 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store; - -import java.util.BitSet; - -import org.h2.engine.Session; - -/** - * The list of free pages of a page store. The format of a free list trunk page - * is: - *
            - *
          • page type: byte (0)
          • - *
          • checksum: short (1-2)
          • - *
          • data (3-)
          • - *
          - */ -public class PageFreeList extends Page { - - private static final int DATA_START = 3; - - private final PageStore store; - private final BitSet used; - private final int pageCount; - private boolean full; - private Data data; - - private PageFreeList(PageStore store, int pageId, int pageCount, BitSet used) { - // kept in cache, and array list in page store - setPos(pageId); - this.store = store; - this.pageCount = pageCount; - this.used = used; - } - - /** - * Read a free-list page. - * - * @param store the page store - * @param data the data - * @param pageId the page id - * @return the page - */ - static PageFreeList read(PageStore store, Data data, int pageId) { - data.reset(); - data.readByte(); - data.readShortInt(); - int length = store.getPageSize() - DATA_START; - byte[] b = new byte[length]; - data.read(b, 0, b.length); - PageFreeList p = new PageFreeList(store, pageId, length * 8, BitSet.valueOf(b)); - p.data = data; - p.full = false; - return p; - } - - /** - * Create a new free-list page. - * - * @param store the page store - * @param pageId the page id - * @return the page - */ - static PageFreeList create(PageStore store, int pageId) { - int pageCount = (store.getPageSize() - DATA_START) * 8; - BitSet used = new BitSet(pageCount); - used.set(0); - return new PageFreeList(store, pageId, pageCount, used); - } - - /** - * Allocate a page from the free list. - * - * @param exclude the exclude list or null - * @param first the first page to look for - * @return the page, or -1 if all pages are used - */ - int allocate(BitSet exclude, int first) { - if (full) { - return -1; - } - // TODO cache last result - int start = Math.max(0, first - getPos()); - while (true) { - int free = used.nextClearBit(start); - if (free >= pageCount) { - if (start == 0) { - full = true; - } - return -1; - } - if (exclude != null && exclude.get(free + getPos())) { - start = exclude.nextClearBit(free + getPos()) - getPos(); - if (start >= pageCount) { - return -1; - } - } else { - // set the bit first, because logUndo can - // allocate other pages, and we must not - // return the same page twice - used.set(free); - store.logUndo(this, data); - store.update(this); - return free + getPos(); - } - } - } - - /** - * Get the first free page starting at the given offset. - * - * @param first the page number to start the search - * @return the page number, or -1 - */ - int getFirstFree(int first) { - if (full) { - return -1; - } - int start = Math.max(0, first - getPos()); - int free = used.nextClearBit(start); - if (free >= pageCount) { - return -1; - } - return free + getPos(); - } - - int getLastUsed() { - int last = used.length() - 1; - return last <= 0 ? -1 : last + getPos(); - } - - /** - * Mark a page as used. - * - * @param pageId the page id - */ - void allocate(int pageId) { - int idx = pageId - getPos(); - if (idx >= 0 && !used.get(idx)) { - // set the bit first, because logUndo can - // allocate other pages, and we must not - // return the same page twice - used.set(idx); - store.logUndo(this, data); - store.update(this); - } - } - - /** - * Add a page to the free list. - * - * @param pageId the page id to add - */ - void free(int pageId) { - full = false; - store.logUndo(this, data); - used.clear(pageId - getPos()); - store.update(this); - } - - @Override - public void write() { - data = store.createData(); - data.writeByte((byte) Page.TYPE_FREE_LIST); - data.writeShortInt(0); - int cnt = pageCount >>> 3; - byte[] b = used.toByteArray(); - int l = Math.min(b.length, cnt); - data.write(b, 0, l); - for (int i = cnt - l; i > 0; i--) { - data.writeByte((byte) 0); - } - store.writePage(getPos(), data); - } - - /** - * Get the number of pages that can fit in a free list. - * - * @param pageSize the page size - * @return the number of pages - */ - public static int getPagesAddressed(int pageSize) { - return (pageSize - DATA_START) * 8; - } - - /** - * Get the estimated memory size. - * - * @return number of double words (4 bytes) - */ - @Override - public int getMemory() { - return store.getPageSize() >> 2; - } - - /** - * Check if a page is already in use. - * - * @param pageId the page to check - * @return true if it is in use - */ - boolean isUsed(int pageId) { - return used.get(pageId - getPos()); - } - - @Override - public void moveTo(Session session, int newPos) { - // the old data does not need to be copied, as free-list pages - // at the end of the file are not required - store.free(getPos(), false); - } - - @Override - public String toString() { - return "page [" + getPos() + "] freeList" + (full ? "full" : ""); - } - - @Override - public boolean canRemove() { - return true; - } - - @Override - public boolean canMove() { - return false; - } - -} diff --git a/h2/src/main/org/h2/store/PageInputStream.java b/h2/src/main/org/h2/store/PageInputStream.java deleted file mode 100644 index 277793e6c0..0000000000 --- a/h2/src/main/org/h2/store/PageInputStream.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store; - -import java.io.EOFException; -import java.io.IOException; -import java.io.InputStream; -import java.util.BitSet; - -import org.h2.message.DbException; -import org.h2.message.Trace; - -/** - * An input stream that reads from a page store. - */ -public class PageInputStream extends InputStream { - - private final PageStore store; - private final Trace trace; - private final int firstTrunkPage; - private final PageStreamTrunk.Iterator trunkIterator; - private int dataPage; - private PageStreamTrunk trunk; - private int trunkIndex; - private PageStreamData data; - private int dataPos; - private boolean endOfFile; - private int remaining; - private final byte[] buffer = { 0 }; - private int logKey; - - PageInputStream(PageStore store, int logKey, int firstTrunkPage, int dataPage) { - this.store = store; - this.trace = store.getTrace(); - // minus one because we increment before comparing - this.logKey = logKey - 1; - this.firstTrunkPage = firstTrunkPage; - trunkIterator = new PageStreamTrunk.Iterator(store, firstTrunkPage); - this.dataPage = dataPage; - } - - @Override - public int read() throws IOException { - int len = read(buffer); - return len < 0 ? -1 : (buffer[0] & 255); - } - - @Override - public int read(byte[] b) throws IOException { - return read(b, 0, b.length); - } - - @Override - public int read(byte[] b, int off, int len) throws IOException { - if (len == 0) { - return 0; - } - int read = 0; - while (len > 0) { - int r = readBlock(b, off, len); - if (r < 0) { - break; - } - read += r; - off += r; - len -= r; - } - return read == 0 ? -1 : read; - } - - private int readBlock(byte[] buff, int off, int len) throws IOException { - try { - fillBuffer(); - if (endOfFile) { - return -1; - } - int l = Math.min(remaining, len); - data.read(dataPos, buff, off, l); - remaining -= l; - dataPos += l; - return l; - } catch (DbException e) { - throw new EOFException(); - } - } - - private void fillBuffer() { - if (remaining > 0 || endOfFile) { - return; - } - int next; - while (true) { - if (trunk == null) { - trunk = trunkIterator.next(); - trunkIndex = 0; - logKey++; - if (trunk == null || trunk.getLogKey() != logKey) { - endOfFile = true; - return; - } - } - if (trunk != null) { - next = trunk.getPageData(trunkIndex++); - if (next == -1) { - trunk = null; - } else if (dataPage == -1 || dataPage == next) { - break; - } - } - } - if (trace.isDebugEnabled()) { - trace.debug("pageIn.readPage " + next); - } - dataPage = -1; - data = null; - Page p = store.getPage(next); - if (p instanceof PageStreamData) { - data = (PageStreamData) p; - } - if (data == null || data.getLogKey() != logKey) { - endOfFile = true; - return; - } - dataPos = PageStreamData.getReadStart(); - remaining = store.getPageSize() - dataPos; - } - - /** - * Set all pages as 'allocated' in the page store. - * - * @return the bit set - */ - BitSet allocateAllPages() { - BitSet pages = new BitSet(); - int key = logKey; - PageStreamTrunk.Iterator it = new PageStreamTrunk.Iterator( - store, firstTrunkPage); - while (true) { - PageStreamTrunk t = it.next(); - key++; - if (it.canDelete()) { - store.allocatePage(it.getCurrentPageId()); - } - if (t == null || t.getLogKey() != key) { - break; - } - pages.set(t.getPos()); - for (int i = 0;; i++) { - int n = t.getPageData(i); - if (n == -1) { - break; - } - pages.set(n); - store.allocatePage(n); - } - } - return pages; - } - - int getDataPage() { - return data.getPos(); - } - - @Override - public void close() { - // nothing to do - } - -} diff --git a/h2/src/main/org/h2/store/PageLog.java b/h2/src/main/org/h2/store/PageLog.java deleted file mode 100644 index 53ad1dea58..0000000000 --- a/h2/src/main/org/h2/store/PageLog.java +++ /dev/null @@ -1,895 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.BitSet; -import java.util.HashMap; - -import org.h2.api.ErrorCode; -import org.h2.compress.CompressLZF; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.message.Trace; -import org.h2.result.Row; -import org.h2.result.RowFactory; -import org.h2.util.IntArray; -import org.h2.util.IntIntHashMap; -import org.h2.util.Utils; -import org.h2.value.Value; -import org.h2.value.ValueNull; - -/** - * Transaction log mechanism. The stream contains a list of records. The data - * format for a record is: - *
            - *
          • type (0: no-op, 1: undo, 2: commit, ...)
          • - *
          • data
          • - *
          - * The transaction log is split into sections. - * A checkpoint starts a new section. - */ -public class PageLog { - - /** - * No operation. - */ - public static final int NOOP = 0; - - /** - * An undo log entry. Format: page id: varInt, size, page. Size 0 means - * uncompressed, size 1 means empty page, otherwise the size is the number - * of compressed bytes. - */ - public static final int UNDO = 1; - - /** - * A commit entry of a session. - * Format: session id: varInt. - */ - public static final int COMMIT = 2; - - /** - * A prepare commit entry for a session. - * Format: session id: varInt, transaction name: string. - */ - public static final int PREPARE_COMMIT = 3; - - /** - * Roll back a prepared transaction. - * Format: session id: varInt. - */ - public static final int ROLLBACK = 4; - - /** - * Add a record to a table. - * Format: session id: varInt, table id: varInt, row. - */ - public static final int ADD = 5; - - /** - * Remove a record from a table. - * Format: session id: varInt, table id: varInt, row. - */ - public static final int REMOVE = 6; - - /** - * Truncate a table. - * Format: session id: varInt, table id: varInt. - */ - public static final int TRUNCATE = 7; - - /** - * Perform a checkpoint. The log section id is incremented. - * Format: - - */ - public static final int CHECKPOINT = 8; - - /** - * Free a log page. - * Format: count: varInt, page ids: varInt - */ - public static final int FREE_LOG = 9; - - /** - * The recovery stage to undo changes (re-apply the backup). - */ - static final int RECOVERY_STAGE_UNDO = 0; - - /** - * The recovery stage to allocate pages used by the transaction log. - */ - static final int RECOVERY_STAGE_ALLOCATE = 1; - - /** - * The recovery stage to redo operations. - */ - static final int RECOVERY_STAGE_REDO = 2; - - private static final boolean COMPRESS_UNDO = true; - - private final PageStore store; - private final Trace trace; - - private Data writeBuffer; - private PageOutputStream pageOut; - private int firstTrunkPage; - private int firstDataPage; - private final Data dataBuffer; - private int logKey; - private int logSectionId, logPos; - private int firstSectionId; - - private final CompressLZF compress; - private final byte[] compressBuffer; - - /** - * If the bit is set, the given page was written to the current log section. - * The undo entry of these pages doesn't need to be written again. - */ - private BitSet undo = new BitSet(); - - /** - * The undo entry of those pages was written in any log section. - * These pages may not be used in the transaction log. - */ - private final BitSet undoAll = new BitSet(); - - /** - * The map of section ids (key) and data page where the section starts - * (value). - */ - private final IntIntHashMap logSectionPageMap = new IntIntHashMap(); - - /** - * The session state map. - * Only used during recovery. - */ - private HashMap sessionStates = new HashMap<>(); - - /** - * The map of pages used by the transaction log. - * Only used during recovery. - */ - private BitSet usedLogPages; - - /** - * This flag is set while freeing up pages. - */ - private boolean freeing; - - PageLog(PageStore store) { - this.store = store; - dataBuffer = store.createData(); - trace = store.getTrace(); - compress = new CompressLZF(); - compressBuffer = new byte[store.getPageSize() * 2]; - } - - /** - * Open the log for writing. For an existing database, the recovery - * must be run first. - * - * @param newFirstTrunkPage the first trunk page - * @param atEnd whether only pages at the end of the file should be used - */ - void openForWriting(int newFirstTrunkPage, boolean atEnd) { - trace.debug("log openForWriting firstPage: " + newFirstTrunkPage); - this.firstTrunkPage = newFirstTrunkPage; - logKey++; - pageOut = new PageOutputStream(store, - newFirstTrunkPage, undoAll, logKey, atEnd); - pageOut.reserve(1); - // pageBuffer = new BufferedOutputStream(pageOut, 8 * 1024); - store.setLogFirstPage(logKey, newFirstTrunkPage, - pageOut.getCurrentDataPageId()); - writeBuffer = store.createData(); - } - - /** - * Free up all pages allocated by the log. - */ - void free() { - if (trace.isDebugEnabled()) { - trace.debug("log free"); - } - int currentDataPage = 0; - if (pageOut != null) { - currentDataPage = pageOut.getCurrentDataPageId(); - pageOut.freeReserved(); - } - try { - freeing = true; - int first = 0; - int loopDetect = 1024, loopCount = 0; - PageStreamTrunk.Iterator it = new PageStreamTrunk.Iterator( - store, firstTrunkPage); - while (firstTrunkPage != 0 && firstTrunkPage < store.getPageCount()) { - PageStreamTrunk t = it.next(); - if (t == null) { - if (it.canDelete()) { - store.free(firstTrunkPage, false); - } - break; - } - if (loopCount++ >= loopDetect) { - first = t.getPos(); - loopCount = 0; - loopDetect *= 2; - } else if (first != 0 && first == t.getPos()) { - throw DbException.throwInternalError( - "endless loop at " + t); - } - t.free(currentDataPage); - firstTrunkPage = t.getNextTrunk(); - } - } finally { - freeing = false; - } - } - - /** - * Open the log for reading. - * - * @param newLogKey the first expected log key - * @param newFirstTrunkPage the first trunk page - * @param newFirstDataPage the index of the first data page - */ - void openForReading(int newLogKey, int newFirstTrunkPage, - int newFirstDataPage) { - this.logKey = newLogKey; - this.firstTrunkPage = newFirstTrunkPage; - this.firstDataPage = newFirstDataPage; - } - - /** - * Run one recovery stage. There are three recovery stages: 0: only the undo - * steps are run (restoring the state before the last checkpoint). 1: the - * pages that are used by the transaction log are allocated. 2: the - * committed operations are re-applied. - * - * @param stage the recovery stage - * @return whether the transaction log was empty - */ - boolean recover(int stage) { - if (trace.isDebugEnabled()) { - trace.debug("log recover stage: " + stage); - } - if (stage == RECOVERY_STAGE_ALLOCATE) { - PageInputStream in = new PageInputStream(store, - logKey, firstTrunkPage, firstDataPage); - usedLogPages = in.allocateAllPages(); - in.close(); - return true; - } - PageInputStream pageIn = new PageInputStream(store, - logKey, firstTrunkPage, firstDataPage); - DataReader in = new DataReader(pageIn); - int logId = 0; - Data data = store.createData(); - boolean isEmpty = true; - try { - int pos = 0; - while (true) { - int x = in.readByte(); - if (x < 0) { - break; - } - pos++; - isEmpty = false; - if (x == UNDO) { - int pageId = in.readVarInt(); - int size = in.readVarInt(); - if (size == 0) { - in.readFully(data.getBytes(), store.getPageSize()); - } else if (size == 1) { - // empty - Arrays.fill(data.getBytes(), 0, store.getPageSize(), (byte) 0); - } else { - in.readFully(compressBuffer, size); - try { - compress.expand(compressBuffer, 0, size, - data.getBytes(), 0, store.getPageSize()); - } catch (ArrayIndexOutOfBoundsException e) { - DbException.convertToIOException(e); - } - } - if (stage == RECOVERY_STAGE_UNDO) { - if (!undo.get(pageId)) { - if (trace.isDebugEnabled()) { - trace.debug("log undo {0}", pageId); - } - store.writePage(pageId, data); - undo.set(pageId); - undoAll.set(pageId); - } else { - if (trace.isDebugEnabled()) { - trace.debug("log undo skip {0}", pageId); - } - } - } - } else if (x == ADD) { - int sessionId = in.readVarInt(); - int tableId = in.readVarInt(); - Row row = readRow(store.getDatabase().getRowFactory(), in, data); - if (stage == RECOVERY_STAGE_UNDO) { - store.allocateIfIndexRoot(pos, tableId, row); - } else if (stage == RECOVERY_STAGE_REDO) { - if (isSessionCommitted(sessionId, logId, pos)) { - if (trace.isDebugEnabled()) { - trace.debug("log redo + table: " + tableId + - " s: " + sessionId + " " + row); - } - store.redo(tableId, row, true); - } else { - if (trace.isDebugEnabled()) { - trace.debug("log ignore s: " + sessionId + - " + table: " + tableId + " " + row); - } - } - } - } else if (x == REMOVE) { - int sessionId = in.readVarInt(); - int tableId = in.readVarInt(); - long key = in.readVarLong(); - if (stage == RECOVERY_STAGE_REDO) { - if (isSessionCommitted(sessionId, logId, pos)) { - if (trace.isDebugEnabled()) { - trace.debug("log redo - table: " + tableId + - " s:" + sessionId + " key: " + key); - } - store.redoDelete(tableId, key); - } else { - if (trace.isDebugEnabled()) { - trace.debug("log ignore s: " + sessionId + - " - table: " + tableId + " " + key); - } - } - } - } else if (x == TRUNCATE) { - int sessionId = in.readVarInt(); - int tableId = in.readVarInt(); - if (stage == RECOVERY_STAGE_REDO) { - if (isSessionCommitted(sessionId, logId, pos)) { - if (trace.isDebugEnabled()) { - trace.debug("log redo truncate table: " + tableId); - } - store.redoTruncate(tableId); - } else { - if (trace.isDebugEnabled()) { - trace.debug("log ignore s: "+ sessionId + - " truncate table: " + tableId); - } - } - } - } else if (x == PREPARE_COMMIT) { - int sessionId = in.readVarInt(); - String transaction = in.readString(); - if (trace.isDebugEnabled()) { - trace.debug("log prepare commit " + sessionId + " " + - transaction + " pos: " + pos); - } - if (stage == RECOVERY_STAGE_UNDO) { - int page = pageIn.getDataPage(); - setPrepareCommit(sessionId, page, transaction); - } - } else if (x == ROLLBACK) { - int sessionId = in.readVarInt(); - if (trace.isDebugEnabled()) { - trace.debug("log rollback " + sessionId + " pos: " + pos); - } - // ignore - this entry is just informational - } else if (x == COMMIT) { - int sessionId = in.readVarInt(); - if (trace.isDebugEnabled()) { - trace.debug("log commit " + sessionId + " pos: " + pos); - } - if (stage == RECOVERY_STAGE_UNDO) { - setLastCommitForSession(sessionId, logId, pos); - } - } else if (x == NOOP) { - // nothing to do - } else if (x == CHECKPOINT) { - logId++; - } else if (x == FREE_LOG) { - int count = in.readVarInt(); - for (int i = 0; i < count; i++) { - int pageId = in.readVarInt(); - if (stage == RECOVERY_STAGE_REDO) { - if (!usedLogPages.get(pageId)) { - store.free(pageId, false); - } - } - } - } else { - if (trace.isDebugEnabled()) { - trace.debug("log end"); - break; - } - } - } - } catch (DbException e) { - if (e.getErrorCode() == ErrorCode.FILE_CORRUPTED_1) { - trace.debug("log recovery stopped"); - } else { - throw e; - } - } catch (IOException e) { - trace.debug("log recovery completed"); - } - undo = new BitSet(); - if (stage == RECOVERY_STAGE_REDO) { - usedLogPages = null; - } - return isEmpty; - } - - /** - * This method is called when a 'prepare commit' log entry is read when - * opening the database. - * - * @param sessionId the session id - * @param pageId the data page with the prepare entry - * @param transaction the transaction name, or null to rollback - */ - private void setPrepareCommit(int sessionId, int pageId, String transaction) { - SessionState state = getOrAddSessionState(sessionId); - PageStoreInDoubtTransaction doubt; - if (transaction == null) { - doubt = null; - } else { - doubt = new PageStoreInDoubtTransaction(store, sessionId, pageId, - transaction); - } - state.inDoubtTransaction = doubt; - } - - /** - * Read a row from an input stream. - * - * @param rowFactory the row factory - * @param in the input stream - * @param data a temporary buffer - * @return the row - */ - public static Row readRow(RowFactory rowFactory, DataReader in, Data data) throws IOException { - long key = in.readVarLong(); - int len = in.readVarInt(); - data.reset(); - data.checkCapacity(len); - in.readFully(data.getBytes(), len); - int columnCount = data.readVarInt(); - Value[] values = new Value[columnCount]; - for (int i = 0; i < columnCount; i++) { - values[i] = data.readValue(); - } - Row row = rowFactory.createRow(values, Row.MEMORY_CALCULATE); - row.setKey(key); - return row; - } - - /** - * Check if the undo entry was already written for the given page. - * - * @param pageId the page - * @return true if it was written - */ - boolean getUndo(int pageId) { - return undo.get(pageId); - } - - /** - * Add an undo entry to the log. The page data is only written once until - * the next checkpoint. - * - * @param pageId the page id - * @param page the old page data - */ - void addUndo(int pageId, Data page) { - if (undo.get(pageId) || freeing) { - return; - } - if (trace.isDebugEnabled()) { - trace.debug("log undo " + pageId); - } - if (page == null) { - DbException.throwInternalError("Undo entry not written"); - } - undo.set(pageId); - undoAll.set(pageId); - Data buffer = getBuffer(); - buffer.writeByte((byte) UNDO); - buffer.writeVarInt(pageId); - if (page.getBytes()[0] == 0) { - buffer.writeVarInt(1); - } else { - int pageSize = store.getPageSize(); - if (COMPRESS_UNDO) { - int size = compress.compress(page.getBytes(), - pageSize, compressBuffer, 0); - if (size < pageSize) { - buffer.writeVarInt(size); - buffer.checkCapacity(size); - buffer.write(compressBuffer, 0, size); - } else { - buffer.writeVarInt(0); - buffer.checkCapacity(pageSize); - buffer.write(page.getBytes(), 0, pageSize); - } - } else { - buffer.writeVarInt(0); - buffer.checkCapacity(pageSize); - buffer.write(page.getBytes(), 0, pageSize); - } - } - write(buffer); - } - - private void freeLogPages(IntArray pages) { - if (trace.isDebugEnabled()) { - trace.debug("log frees " + pages.get(0) + ".." + - pages.get(pages.size() - 1)); - } - Data buffer = getBuffer(); - buffer.writeByte((byte) FREE_LOG); - int size = pages.size(); - buffer.writeVarInt(size); - for (int i = 0; i < size; i++) { - buffer.writeVarInt(pages.get(i)); - } - write(buffer); - } - - private void write(Data data) { - pageOut.write(data.getBytes(), 0, data.length()); - data.reset(); - } - - /** - * Mark a transaction as committed. - * - * @param sessionId the session - */ - void commit(int sessionId) { - if (trace.isDebugEnabled()) { - trace.debug("log commit s: " + sessionId); - } - if (store.getDatabase().getPageStore() == null) { - // database already closed - return; - } - Data buffer = getBuffer(); - buffer.writeByte((byte) COMMIT); - buffer.writeVarInt(sessionId); - write(buffer); - if (store.getDatabase().getFlushOnEachCommit()) { - flush(); - } - } - - /** - * Prepare a transaction. - * - * @param session the session - * @param transaction the name of the transaction - */ - void prepareCommit(Session session, String transaction) { - if (trace.isDebugEnabled()) { - trace.debug("log prepare commit s: " + session.getId() + ", " + transaction); - } - if (store.getDatabase().getPageStore() == null) { - // database already closed - return; - } - // store it on a separate log page - int pageSize = store.getPageSize(); - pageOut.flush(); - pageOut.fillPage(); - Data buffer = getBuffer(); - buffer.writeByte((byte) PREPARE_COMMIT); - buffer.writeVarInt(session.getId()); - buffer.writeString(transaction); - if (buffer.length() >= PageStreamData.getCapacity(pageSize)) { - throw DbException.getInvalidValueException( - "transaction name (too long)", transaction); - } - write(buffer); - // store it on a separate log page - flushOut(); - pageOut.fillPage(); - if (store.getDatabase().getFlushOnEachCommit()) { - flush(); - } - } - - /** - * A record is added to a table, or removed from a table. - * - * @param session the session - * @param tableId the table id - * @param row the row to add - * @param add true if the row is added, false if it is removed - */ - void logAddOrRemoveRow(Session session, int tableId, Row row, boolean add) { - if (trace.isDebugEnabled()) { - trace.debug("log " + (add ? "+" : "-") + - " s: " + session.getId() + " table: " + tableId + " row: " + row); - } - session.addLogPos(logSectionId, logPos); - logPos++; - Data data = dataBuffer; - data.reset(); - int columns = row.getColumnCount(); - data.writeVarInt(columns); - data.checkCapacity(row.getByteCount(data)); - if (session.isRedoLogBinaryEnabled()) { - for (int i = 0; i < columns; i++) { - data.writeValue(row.getValue(i)); - } - } else { - for (int i = 0; i < columns; i++) { - Value v = row.getValue(i); - if (v.getValueType() == Value.BYTES) { - data.writeValue(ValueNull.INSTANCE); - } else { - data.writeValue(v); - } - } - } - Data buffer = getBuffer(); - buffer.writeByte((byte) (add ? ADD : REMOVE)); - buffer.writeVarInt(session.getId()); - buffer.writeVarInt(tableId); - buffer.writeVarLong(row.getKey()); - if (add) { - buffer.writeVarInt(data.length()); - buffer.checkCapacity(data.length()); - buffer.write(data.getBytes(), 0, data.length()); - } - write(buffer); - } - - /** - * A table is truncated. - * - * @param session the session - * @param tableId the table id - */ - void logTruncate(Session session, int tableId) { - if (trace.isDebugEnabled()) { - trace.debug("log truncate s: " + session.getId() + " table: " + tableId); - } - session.addLogPos(logSectionId, logPos); - logPos++; - Data buffer = getBuffer(); - buffer.writeByte((byte) TRUNCATE); - buffer.writeVarInt(session.getId()); - buffer.writeVarInt(tableId); - write(buffer); - } - - /** - * Flush the transaction log. - */ - void flush() { - if (pageOut != null) { - flushOut(); - } - } - - /** - * Switch to a new log section. - */ - void checkpoint() { - Data buffer = getBuffer(); - buffer.writeByte((byte) CHECKPOINT); - write(buffer); - undo = new BitSet(); - logSectionId++; - logPos = 0; - pageOut.flush(); - pageOut.fillPage(); - int currentDataPage = pageOut.getCurrentDataPageId(); - logSectionPageMap.put(logSectionId, currentDataPage); - } - - int getLogSectionId() { - return logSectionId; - } - - int getLogFirstSectionId() { - return firstSectionId; - } - - int getLogPos() { - return logPos; - } - - /** - * Remove all pages until the given log (excluding). - * - * @param firstUncommittedSection the first log section to keep - */ - void removeUntil(int firstUncommittedSection) { - if (firstUncommittedSection == 0) { - return; - } - int firstDataPageToKeep = logSectionPageMap.get(firstUncommittedSection); - firstTrunkPage = removeUntil(firstTrunkPage, firstDataPageToKeep); - store.setLogFirstPage(logKey, firstTrunkPage, firstDataPageToKeep); - while (firstSectionId < firstUncommittedSection) { - if (firstSectionId > 0) { - // there is no entry for log 0 - logSectionPageMap.remove(firstSectionId); - } - firstSectionId++; - } - } - - /** - * Remove all pages until the given data page. - * - * @param trunkPage the first trunk page - * @param firstDataPageToKeep the first data page to keep - * @return the trunk page of the data page to keep - */ - private int removeUntil(int trunkPage, int firstDataPageToKeep) { - trace.debug("log.removeUntil " + trunkPage + " " + firstDataPageToKeep); - int last = trunkPage; - while (true) { - Page p = store.getPage(trunkPage); - PageStreamTrunk t = (PageStreamTrunk) p; - if (t == null) { - throw DbException.throwInternalError( - "log.removeUntil not found: " + firstDataPageToKeep + " last " + last); - } - logKey = t.getLogKey(); - last = t.getPos(); - if (t.contains(firstDataPageToKeep)) { - return last; - } - trunkPage = t.getNextTrunk(); - IntArray list = new IntArray(); - list.add(t.getPos()); - for (int i = 0;; i++) { - int next = t.getPageData(i); - if (next == -1) { - break; - } - list.add(next); - } - freeLogPages(list); - pageOut.free(t); - } - } - - /** - * Close without further writing. - */ - void close() { - trace.debug("log close"); - if (pageOut != null) { - pageOut.close(); - pageOut = null; - } - writeBuffer = null; - } - - /** - * Check if the session committed after than the given position. - * - * @param sessionId the session id - * @param logId the log id - * @param pos the position in the log - * @return true if it is committed - */ - private boolean isSessionCommitted(int sessionId, int logId, int pos) { - SessionState state = sessionStates.get(sessionId); - if (state == null) { - return false; - } - return state.isCommitted(logId, pos); - } - - /** - * Set the last commit record for a session. - * - * @param sessionId the session id - * @param logId the log id - * @param pos the position in the log - */ - private void setLastCommitForSession(int sessionId, int logId, int pos) { - SessionState state = getOrAddSessionState(sessionId); - state.lastCommitLog = logId; - state.lastCommitPos = pos; - state.inDoubtTransaction = null; - } - - /** - * Get the session state for this session. A new object is created if there - * is no session state yet. - * - * @param sessionId the session id - * @return the session state object - */ - private SessionState getOrAddSessionState(int sessionId) { - Integer key = sessionId; - SessionState state = sessionStates.get(key); - if (state == null) { - state = new SessionState(); - sessionStates.put(key, state); - state.sessionId = sessionId; - } - return state; - } - - long getSize() { - return pageOut == null ? 0 : pageOut.getSize(); - } - - ArrayList getInDoubtTransactions() { - ArrayList list = Utils.newSmallArrayList(); - for (SessionState state : sessionStates.values()) { - PageStoreInDoubtTransaction in = state.inDoubtTransaction; - if (in != null) { - list.add(in); - } - } - return list; - } - - /** - * Set the state of an in-doubt transaction. - * - * @param sessionId the session - * @param pageId the page where the commit was prepared - * @param commit whether the transaction should be committed - */ - void setInDoubtTransactionState(int sessionId, int pageId, boolean commit) { - PageStreamData d = (PageStreamData) store.getPage(pageId); - d.initWrite(); - Data buff = store.createData(); - buff.writeByte((byte) (commit ? COMMIT : ROLLBACK)); - buff.writeVarInt(sessionId); - byte[] bytes = buff.getBytes(); - d.write(bytes, 0, bytes.length); - bytes = new byte[d.getRemaining()]; - d.write(bytes, 0, bytes.length); - d.write(); - } - - /** - * Called after the recovery has been completed. - */ - void recoverEnd() { - sessionStates = new HashMap<>(); - } - - private void flushOut() { - pageOut.flush(); - } - - private Data getBuffer() { - if (writeBuffer.length() == 0) { - return writeBuffer; - } - return store.createData(); - } - - - /** - * Get the smallest possible page id used. This is the trunk page if only - * appending at the end of the file, or 0. - * - * @return the smallest possible page. - */ - int getMinPageId() { - return pageOut == null ? 0 : pageOut.getMinPageId(); - } - -} diff --git a/h2/src/main/org/h2/store/PageOutputStream.java b/h2/src/main/org/h2/store/PageOutputStream.java deleted file mode 100644 index 7b8b35bc15..0000000000 --- a/h2/src/main/org/h2/store/PageOutputStream.java +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store; - -import java.util.BitSet; - -import org.h2.message.DbException; -import org.h2.message.Trace; -import org.h2.util.IntArray; - -/** - * An output stream that writes into a page store. - */ -public class PageOutputStream { - - private PageStore store; - private final Trace trace; - private final BitSet exclude; - private final boolean atEnd; - private final int minPageId; - - private int trunkPageId; - private int trunkNext; - private IntArray reservedPages = new IntArray(); - private PageStreamTrunk trunk; - private int trunkIndex; - private PageStreamData data; - private int reserved; - private boolean needFlush; - private boolean writing; - private int pageCount; - private int logKey; - - /** - * Create a new page output stream. - * - * @param store the page store - * @param trunkPage the first trunk page (already allocated) - * @param exclude the pages not to use - * @param logKey the log key of the first trunk page - * @param atEnd whether only pages at the end of the file should be used - */ - public PageOutputStream(PageStore store, int trunkPage, BitSet exclude, - int logKey, boolean atEnd) { - this.trace = store.getTrace(); - this.store = store; - this.trunkPageId = trunkPage; - this.exclude = exclude; - // minus one, because we increment before creating a trunk page - this.logKey = logKey - 1; - this.atEnd = atEnd; - minPageId = atEnd ? trunkPage : 0; - } - - /** - * Allocate the required pages so that no pages need to be allocated while - * writing. - * - * @param minBuffer the number of bytes to allocate - */ - void reserve(int minBuffer) { - if (reserved < minBuffer) { - int pageSize = store.getPageSize(); - int capacityPerPage = PageStreamData.getCapacity(pageSize); - int pages = PageStreamTrunk.getPagesAddressed(pageSize); - int pagesToAllocate = 0, totalCapacity = 0; - do { - // allocate x data pages plus one trunk page - pagesToAllocate += pages + 1; - totalCapacity += pages * capacityPerPage; - } while (totalCapacity < minBuffer); - int firstPageToUse = atEnd ? trunkPageId : 0; - store.allocatePages(reservedPages, pagesToAllocate, exclude, firstPageToUse); - reserved += totalCapacity; - if (data == null) { - initNextData(); - } - } - } - - private void initNextData() { - int nextData = trunk == null ? -1 : trunk.getPageData(trunkIndex++); - if (nextData == -1) { - int parent = trunkPageId; - if (trunkNext != 0) { - trunkPageId = trunkNext; - } - int len = PageStreamTrunk.getPagesAddressed(store.getPageSize()); - int[] pageIds = new int[len]; - for (int i = 0; i < len; i++) { - pageIds[i] = reservedPages.get(i); - } - trunkNext = reservedPages.get(len); - logKey++; - trunk = PageStreamTrunk.create(store, parent, trunkPageId, - trunkNext, logKey, pageIds); - trunkIndex = 0; - pageCount++; - trunk.write(); - reservedPages.removeRange(0, len + 1); - nextData = trunk.getPageData(trunkIndex++); - } - data = PageStreamData.create(store, nextData, trunk.getPos(), logKey); - pageCount++; - data.initWrite(); - } - - /** - * Write the data. - * - * @param b the buffer - * @param off the offset - * @param len the length - */ - public void write(byte[] b, int off, int len) { - if (len <= 0) { - return; - } - if (writing) { - DbException.throwInternalError("writing while still writing"); - } - try { - reserve(len); - writing = true; - while (len > 0) { - int l = data.write(b, off, len); - if (l < len) { - storePage(); - initNextData(); - } - reserved -= l; - off += l; - len -= l; - } - needFlush = true; - } finally { - writing = false; - } - } - - private void storePage() { - if (trace.isDebugEnabled()) { - trace.debug("pageOut.storePage " + data); - } - data.write(); - } - - /** - * Write all data. - */ - public void flush() { - if (needFlush) { - storePage(); - needFlush = false; - } - } - - /** - * Close the stream. - */ - public void close() { - store = null; - } - - int getCurrentDataPageId() { - return data.getPos(); - } - - /** - * Fill the data page with zeros and write it. - * This is required for a checkpoint. - */ - void fillPage() { - if (trace.isDebugEnabled()) { - trace.debug("pageOut.storePage fill " + data.getPos()); - } - reserve(data.getRemaining() + 1); - reserved -= data.getRemaining(); - data.write(); - initNextData(); - } - - long getSize() { - return pageCount * store.getPageSize(); - } - - /** - * Remove a trunk page from the stream. - * - * @param t the trunk page - */ - void free(PageStreamTrunk t) { - pageCount -= t.free(0); - } - - /** - * Free up all reserved pages. - */ - void freeReserved() { - if (reservedPages.size() > 0) { - int[] array = new int[reservedPages.size()]; - reservedPages.toArray(array); - reservedPages = new IntArray(); - reserved = 0; - for (int p : array) { - store.free(p, false); - } - } - } - - /** - * Get the smallest possible page id used. This is the trunk page if only - * appending at the end of the file, or 0. - * - * @return the smallest possible page. - */ - int getMinPageId() { - return minPageId; - } - -} diff --git a/h2/src/main/org/h2/store/PageStore.java b/h2/src/main/org/h2/store/PageStore.java deleted file mode 100644 index d2c566ef06..0000000000 --- a/h2/src/main/org/h2/store/PageStore.java +++ /dev/null @@ -1,2037 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store; - -import java.io.IOException; -import java.io.OutputStream; -import java.util.ArrayList; -import java.util.BitSet; -import java.util.Collections; -import java.util.HashMap; -import java.util.concurrent.TimeUnit; -import java.util.zip.CRC32; - -import org.h2.api.ErrorCode; -import org.h2.command.CommandInterface; -import org.h2.command.ddl.CreateTableData; -import org.h2.engine.Constants; -import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.engine.SysProperties; -import org.h2.index.Cursor; -import org.h2.index.Index; -import org.h2.index.IndexType; -import org.h2.index.PageBtreeIndex; -import org.h2.index.PageBtreeLeaf; -import org.h2.index.PageBtreeNode; -import org.h2.index.PageDataIndex; -import org.h2.index.PageDataLeaf; -import org.h2.index.PageDataNode; -import org.h2.index.PageDataOverflow; -import org.h2.index.PageDelegateIndex; -import org.h2.index.PageIndex; -import org.h2.message.DbException; -import org.h2.message.Trace; -import org.h2.result.Row; -import org.h2.schema.Schema; -import org.h2.store.fs.FileUtils; -import org.h2.table.Column; -import org.h2.table.IndexColumn; -import org.h2.table.PageStoreTable; -import org.h2.table.Table; -import org.h2.table.TableType; -import org.h2.util.Cache; -import org.h2.util.CacheLRU; -import org.h2.util.CacheObject; -import org.h2.util.CacheWriter; -import org.h2.util.IntArray; -import org.h2.util.IntIntHashMap; -import org.h2.util.StringUtils; -import org.h2.value.CompareMode; -import org.h2.value.Value; -import org.h2.value.ValueInt; -import org.h2.value.ValueString; - -/** - * This class represents a file that is organized as a number of pages. Page 0 - * contains a static file header, and pages 1 and 2 both contain the variable - * file header (page 2 is a copy of page 1 and is only read if the checksum of - * page 1 is invalid). The format of page 0 is: - *
            - *
          • 0-47: file header (3 time "-- H2 0.5/B -- \n")
          • - *
          • 48-51: page size in bytes (512 - 32768, must be a power of 2)
          • - *
          • 52: write version (read-only if larger than 1)
          • - *
          • 53: read version (opening fails if larger than 1)
          • - *
          - * The format of page 1 and 2 is: - *
            - *
          • CRC32 of the remaining data: int (0-3)
          • - *
          • write counter (incremented on each write): long (4-11)
          • - *
          • log trunk key: int (12-15)
          • - *
          • log trunk page (0 for none): int (16-19)
          • - *
          • log data page (0 for none): int (20-23)
          • - *
          - * Page 3 contains the first free list page. - * Page 4 contains the meta table root page. - */ -public class PageStore implements CacheWriter { - - // TODO test running out of disk space (using a special file system) - // TODO unused pages should be freed once in a while - // TODO node row counts are incorrect (it's not splitting row counts) - // TODO after opening the database, delay writing until required - // TODO optimization: try to avoid allocating a byte array per page - // TODO optimization: check if calling Data.getValueLen slows things down - // TODO order pages so that searching for a key only seeks forward - // TODO optimization: update: only log the key and changed values - // TODO index creation: use less space (ordered, split at insertion point) - // TODO detect circles in linked lists - // (input stream, free list, extend pages...) - // at runtime and recovery - // TODO remove trace or use isDebugEnabled - // TODO recover tool: support syntax to delete a row with a key - // TODO don't store default values (store a special value) - // TODO check for file size (exception if not exact size expected) - // TODO online backup using bsdiff - - /** - * The smallest possible page size. - */ - public static final int PAGE_SIZE_MIN = 64; - - /** - * The biggest possible page size. - */ - public static final int PAGE_SIZE_MAX = 32768; - - /** - * This log mode means the transaction log is not used. - */ - public static final int LOG_MODE_OFF = 0; - - /** - * This log mode means the transaction log is used and FileDescriptor.sync() - * is called for each checkpoint. This is the default level. - */ - public static final int LOG_MODE_SYNC = 2; - private static final int PAGE_ID_FREE_LIST_ROOT = 3; - private static final int PAGE_ID_META_ROOT = 4; - private static final int MIN_PAGE_COUNT = 5; - private static final int INCREMENT_KB = 1024; - private static final int INCREMENT_PERCENT_MIN = 35; - private static final int READ_VERSION = 3; - private static final int WRITE_VERSION = 3; - private static final int META_TYPE_DATA_INDEX = 0; - private static final int META_TYPE_BTREE_INDEX = 1; - private static final int META_TABLE_ID = -1; - private static final int COMPACT_BLOCK_SIZE = 1536; - private final Database database; - private final Trace trace; - private final String fileName; - private FileStore file; - private String accessMode; - private int pageSize = Constants.DEFAULT_PAGE_SIZE; - private int pageSizeShift; - private long writeCountBase, writeCount, readCount; - private int logKey, logFirstTrunkPage, logFirstDataPage; - private final Cache cache; - private int freeListPagesPerList; - private boolean recoveryRunning; - private boolean ignoreBigLog; - - /** - * The index to the first free-list page that potentially has free space. - */ - private int firstFreeListIndex; - - /** - * The file size in bytes. - */ - private long fileLength; - - /** - * Number of pages (including free pages). - */ - private int pageCount; - - private PageLog log; - private Schema metaSchema; - private PageStoreTable metaTable; - private PageDataIndex metaIndex; - private final IntIntHashMap metaRootPageId = new IntIntHashMap(); - private final HashMap metaObjects = new HashMap<>(); - private HashMap tempObjects; - - /** - * The map of reserved pages, to ensure index head pages - * are not used for regular data during recovery. The key is the page id, - * and the value the latest transaction position where this page is used. - */ - private HashMap reservedPages; - private boolean isNew; - private long maxLogSize = Constants.DEFAULT_MAX_LOG_SIZE; - private final Session pageStoreSession; - - /** - * Each free page is marked with a set bit. - */ - private final BitSet freed = new BitSet(); - private final ArrayList freeLists = new ArrayList<>(); - - private boolean recordPageReads; - private ArrayList recordedPagesList; - private IntIntHashMap recordedPagesIndex; - - /** - * The change count is something like a "micro-transaction-id". - * It is used to ensure that changed pages are not written to the file - * before the current operation is not finished. This is only a problem - * when using a very small cache size. The value starts at 1 so that - * pages with change count 0 can be evicted from the cache. - */ - private long changeCount = 1; - - private Data emptyPage; - private long logSizeBase; - private HashMap statistics; - private int logMode = LOG_MODE_SYNC; - private boolean lockFile; - private boolean readMode; - private int backupLevel; - - /** - * Create a new page store object. - * - * @param database the database - * @param fileName the file name - * @param accessMode the access mode - * @param cacheSizeDefault the default cache size - */ - public PageStore(Database database, String fileName, String accessMode, - int cacheSizeDefault) { - this.fileName = fileName; - this.accessMode = accessMode; - this.database = database; - trace = database.getTrace(Trace.PAGE_STORE); - // if (fileName.endsWith("X.h2.db")) - // trace.setLevel(TraceSystem.DEBUG); - String cacheType = database.getCacheType(); - this.cache = CacheLRU.getCache(this, cacheType, cacheSizeDefault); - pageStoreSession = new Session(database, null, 0); - } - - /** - * Start collecting statistics. - */ - public void statisticsStart() { - statistics = new HashMap<>(); - } - - /** - * Stop collecting statistics. - * - * @return the statistics - */ - public HashMap statisticsEnd() { - HashMap result = statistics; - statistics = null; - return result; - } - - private void statisticsIncrement(String key) { - if (statistics != null) { - Integer old = statistics.get(key); - statistics.put(key, old == null ? 1 : old + 1); - } - } - - /** - * Copy the next page to the output stream. - * - * @param pageId the page to copy - * @param out the output stream - * @return the new position, or -1 if there is no more data to copy - */ - public synchronized int copyDirect(int pageId, OutputStream out) - throws IOException { - byte[] buffer = new byte[pageSize]; - if (pageId >= pageCount) { - return -1; - } - file.seek((long) pageId << pageSizeShift); - file.readFullyDirect(buffer, 0, pageSize); - readCount++; - out.write(buffer, 0, pageSize); - return pageId + 1; - } - - /** - * Open the file and read the header. - */ - public synchronized void open() { - try { - metaRootPageId.put(META_TABLE_ID, PAGE_ID_META_ROOT); - if (FileUtils.exists(fileName)) { - long length = FileUtils.size(fileName); - if (length < MIN_PAGE_COUNT * PAGE_SIZE_MIN) { - if (database.isReadOnly()) { - throw DbException.get( - ErrorCode.FILE_CORRUPTED_1, fileName + " length: " + length); - } - // the database was not fully created - openNew(); - } else { - openExisting(); - } - } else { - openNew(); - } - } catch (DbException e) { - close(); - throw e; - } - } - - private void openNew() { - setPageSize(pageSize); - freeListPagesPerList = PageFreeList.getPagesAddressed(pageSize); - file = database.openFile(fileName, accessMode, false); - lockFile(); - recoveryRunning = true; - writeStaticHeader(); - writeVariableHeader(); - log = new PageLog(this); - increaseFileSize(MIN_PAGE_COUNT); - openMetaIndex(); - logFirstTrunkPage = allocatePage(); - log.openForWriting(logFirstTrunkPage, false); - isNew = true; - recoveryRunning = false; - increaseFileSize(); - } - - private void lockFile() { - if (lockFile) { - if (!file.tryLock()) { - throw DbException.get( - ErrorCode.DATABASE_ALREADY_OPEN_1, fileName); - } - } - } - - private void openExisting() { - try { - file = database.openFile(fileName, accessMode, true); - } catch (DbException e) { - if (e.getErrorCode() == ErrorCode.IO_EXCEPTION_2) { - if (e.getMessage().contains("locked")) { - // in Windows, you can't open a locked file - // (in other operating systems, you can) - // the exact error message is: - // "The process cannot access the file because - // another process has locked a portion of the file" - throw DbException.get( - ErrorCode.DATABASE_ALREADY_OPEN_1, e, fileName); - } - } - throw e; - } - lockFile(); - readStaticHeader(); - freeListPagesPerList = PageFreeList.getPagesAddressed(pageSize); - fileLength = file.length(); - pageCount = (int) (fileLength / pageSize); - if (pageCount < MIN_PAGE_COUNT) { - if (database.isReadOnly()) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - fileName + " pageCount: " + pageCount); - } - file.releaseLock(); - file.close(); - FileUtils.delete(fileName); - openNew(); - return; - } - readVariableHeader(); - log = new PageLog(this); - log.openForReading(logKey, logFirstTrunkPage, logFirstDataPage); - boolean isEmpty = recover(); - if (!database.isReadOnly()) { - readMode = true; - if (!isEmpty || !SysProperties.MODIFY_ON_WRITE || tempObjects != null) { - openForWriting(); - removeOldTempIndexes(); - } - } - } - - private void openForWriting() { - if (!readMode || database.isReadOnly()) { - return; - } - readMode = false; - recoveryRunning = true; - log.free(); - logFirstTrunkPage = allocatePage(); - log.openForWriting(logFirstTrunkPage, false); - recoveryRunning = false; - freed.set(0, pageCount, true); - checkpoint(); - } - - private void removeOldTempIndexes() { - if (tempObjects != null) { - metaObjects.putAll(tempObjects); - for (PageIndex index: tempObjects.values()) { - if (index.getTable().isTemporary()) { - index.truncate(pageStoreSession); - index.remove(pageStoreSession); - } - } - pageStoreSession.commit(true); - tempObjects = null; - } - metaObjects.clear(); - metaObjects.put(-1, metaIndex); - } - - private void writeIndexRowCounts() { - for (PageIndex index: metaObjects.values()) { - index.writeRowCount(); - } - } - - private void writeBack() { - ArrayList list = cache.getAllChanged(); - Collections.sort(list); - for (CacheObject cacheObject : list) { - writeBack(cacheObject); - } - } - - /** - * Flush all pending changes to disk, and switch the new transaction log. - */ - public synchronized void checkpoint() { - trace.debug("checkpoint"); - if (log == null || readMode || database.isReadOnly() || backupLevel > 0) { - // the file was never fully opened, or is read-only, - // or checkpoint is currently disabled - return; - } - database.checkPowerOff(); - writeIndexRowCounts(); - - log.checkpoint(); - writeBack(); - - int firstUncommittedSection = getFirstUncommittedSection(); - - log.removeUntil(firstUncommittedSection); - - // write back the free list - writeBack(); - - // ensure the free list is backed up again - log.checkpoint(); - - if (trace.isDebugEnabled()) { - trace.debug("writeFree"); - } - byte[] test = new byte[16]; - byte[] empty = new byte[pageSize]; - for (int i = PAGE_ID_FREE_LIST_ROOT; i < pageCount; i++) { - if (isUsed(i)) { - freed.clear(i); - } else if (!freed.get(i)) { - if (trace.isDebugEnabled()) { - trace.debug("free " + i); - } - file.seek((long) i << pageSizeShift); - file.readFully(test, 0, 16); - if (test[0] != 0) { - file.seek((long) i << pageSizeShift); - file.write(empty, 0, pageSize); - writeCount++; - } - freed.set(i); - } - } - } - - /** - * Shrink the file so there are no empty pages at the end. - * - * @param compactMode 0 if no compacting should happen, otherwise - * TransactionCommand.SHUTDOWN_COMPACT or TransactionCommand.SHUTDOWN_DEFRAG - */ - public synchronized void compact(int compactMode) { - if (!database.getSettings().pageStoreTrim) { - return; - } - if (SysProperties.MODIFY_ON_WRITE && readMode && - compactMode == 0) { - return; - } - openForWriting(); - // find the last used page - int lastUsed = -1; - for (int i = getFreeListId(pageCount); i >= 0; i--) { - lastUsed = getFreeList(i).getLastUsed(); - if (lastUsed != -1) { - break; - } - } - // open a new log at the very end - // (to be truncated later) - writeBack(); - log.free(); - recoveryRunning = true; - try { - logFirstTrunkPage = lastUsed + 1; - allocatePage(logFirstTrunkPage); - log.openForWriting(logFirstTrunkPage, true); - // ensure the free list is backed up again - log.checkpoint(); - } finally { - recoveryRunning = false; - } - long start = System.nanoTime(); - boolean isCompactFully = compactMode == - CommandInterface.SHUTDOWN_COMPACT; - boolean isDefrag = compactMode == - CommandInterface.SHUTDOWN_DEFRAG; - - if (database.getSettings().defragAlways) { - isCompactFully = isDefrag = true; - } - - int maxCompactTime = database.getSettings().maxCompactTime; - int maxMove = database.getSettings().maxCompactCount; - - if (isCompactFully || isDefrag) { - maxCompactTime = Integer.MAX_VALUE; - maxMove = Integer.MAX_VALUE; - } - int blockSize = isCompactFully ? COMPACT_BLOCK_SIZE : 1; - int firstFree = MIN_PAGE_COUNT; - for (int x = lastUsed, j = 0; x > MIN_PAGE_COUNT && - j < maxMove; x -= blockSize) { - for (int full = x - blockSize + 1; full <= x; full++) { - if (full > MIN_PAGE_COUNT && isUsed(full)) { - synchronized (this) { - firstFree = getFirstFree(firstFree); - if (firstFree == -1 || firstFree >= full) { - j = maxMove; - break; - } - if (compact(full, firstFree)) { - j++; - long now = System.nanoTime(); - if (now > start + TimeUnit.MILLISECONDS.toNanos(maxCompactTime)) { - j = maxMove; - break; - } - } - } - } - } - } - if (isDefrag) { - log.checkpoint(); - writeBack(); - cache.clear(); - ArrayList
          tables = database.getAllTablesAndViews(false); - recordedPagesList = new ArrayList<>(); - recordedPagesIndex = new IntIntHashMap(); - recordPageReads = true; - Session sysSession = database.getSystemSession(); - for (Table table : tables) { - if (!table.isTemporary() && TableType.TABLE == table.getTableType()) { - Index scanIndex = table.getScanIndex(sysSession); - Cursor cursor = scanIndex.find(sysSession, null, null); - while (cursor.next()) { - cursor.get(); - } - for (Index index : table.getIndexes()) { - if (index != scanIndex && index.canScan()) { - cursor = index.find(sysSession, null, null); - while (cursor.next()) { - // the data is already read - } - } - } - } - } - recordPageReads = false; - int target = MIN_PAGE_COUNT - 1; - int temp = 0; - for (int i = 0, size = recordedPagesList.size(); i < size; i++) { - log.checkpoint(); - writeBack(); - int source = recordedPagesList.get(i); - Page pageSource = getPage(source); - if (!pageSource.canMove()) { - continue; - } - while (true) { - Page pageTarget = getPage(++target); - if (pageTarget == null || pageTarget.canMove()) { - break; - } - } - if (target == source) { - continue; - } - temp = getFirstFree(temp); - if (temp == -1) { - DbException.throwInternalError("no free page for defrag"); - } - cache.clear(); - swap(source, target, temp); - int index = recordedPagesIndex.get(target); - if (index != IntIntHashMap.NOT_FOUND) { - recordedPagesList.set(index, source); - recordedPagesIndex.put(source, index); - } - recordedPagesList.set(i, target); - recordedPagesIndex.put(target, i); - } - recordedPagesList = null; - recordedPagesIndex = null; - } - // TODO can most likely be simplified - checkpoint(); - log.checkpoint(); - writeIndexRowCounts(); - log.checkpoint(); - writeBack(); - commit(pageStoreSession); - writeBack(); - log.checkpoint(); - - log.free(); - // truncate the log - recoveryRunning = true; - try { - setLogFirstPage(++logKey, 0, 0); - } finally { - recoveryRunning = false; - } - writeBack(); - for (int i = getFreeListId(pageCount); i >= 0; i--) { - lastUsed = getFreeList(i).getLastUsed(); - if (lastUsed != -1) { - break; - } - } - int newPageCount = lastUsed + 1; - if (newPageCount < pageCount) { - freed.set(newPageCount, pageCount, false); - } - pageCount = newPageCount; - // the easiest way to remove superfluous entries - freeLists.clear(); - trace.debug("pageCount: " + pageCount); - long newLength = (long) pageCount << pageSizeShift; - if (file.length() != newLength) { - file.setLength(newLength); - writeCount++; - } - } - - private int getFirstFree(int start) { - int free = -1; - for (int id = getFreeListId(start); start < pageCount; id++) { - free = getFreeList(id).getFirstFree(start); - if (free != -1) { - break; - } - } - return free; - } - - private void swap(int a, int b, int free) { - if (a < MIN_PAGE_COUNT || b < MIN_PAGE_COUNT) { - System.out.println(isUsed(a) + " " + isUsed(b)); - DbException.throwInternalError("can't swap " + a + " and " + b); - } - Page f = (Page) cache.get(free); - if (f != null) { - DbException.throwInternalError("not free: " + f); - } - if (trace.isDebugEnabled()) { - trace.debug("swap " + a + " and " + b + " via " + free); - } - Page pageA = null; - if (isUsed(a)) { - pageA = getPage(a); - if (pageA != null) { - pageA.moveTo(pageStoreSession, free); - } - free(a); - } - if (free != b) { - if (isUsed(b)) { - Page pageB = getPage(b); - if (pageB != null) { - pageB.moveTo(pageStoreSession, a); - } - free(b); - } - if (pageA != null) { - f = getPage(free); - if (f != null) { - f.moveTo(pageStoreSession, b); - } - free(free); - } - } - } - - private boolean compact(int full, int free) { - if (full < MIN_PAGE_COUNT || free == -1 || free >= full || !isUsed(full)) { - return false; - } - Page f = (Page) cache.get(free); - if (f != null) { - DbException.throwInternalError("not free: " + f); - } - Page p = getPage(full); - if (p == null) { - freePage(full); - } else if (p instanceof PageStreamData || p instanceof PageStreamTrunk) { - if (p.getPos() < log.getMinPageId()) { - // an old transaction log page - // probably a leftover from a crash - freePage(full); - } - } else { - if (trace.isDebugEnabled()) { - trace.debug("move " + p.getPos() + " to " + free); - } - try { - p.moveTo(pageStoreSession, free); - } finally { - if (++changeCount < 0) { - throw DbException.throwInternalError( - "changeCount has wrapped"); - } - } - } - return true; - } - - /** - * Read a page from the store. - * - * @param pageId the page id - * @return the page - */ - public synchronized Page getPage(int pageId) { - Page p = (Page) cache.get(pageId); - if (p != null) { - return p; - } - - Data data = createData(); - readPage(pageId, data); - int type = data.readByte(); - if (type == Page.TYPE_EMPTY) { - return null; - } - data.readShortInt(); - data.readInt(); - if (!checksumTest(data.getBytes(), pageId, pageSize)) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "wrong checksum"); - } - switch (type & ~Page.FLAG_LAST) { - case Page.TYPE_FREE_LIST: - p = PageFreeList.read(this, data, pageId); - break; - case Page.TYPE_DATA_LEAF: { - int indexId = data.readVarInt(); - PageIndex idx = metaObjects.get(indexId); - if (idx == null) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "index not found " + indexId); - } - if (!(idx instanceof PageDataIndex)) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "not a data index " + indexId + " " + idx); - } - PageDataIndex index = (PageDataIndex) idx; - if (statistics != null) { - statisticsIncrement(index.getTable().getName() + "." + - index.getName() + " read"); - } - p = PageDataLeaf.read(index, data, pageId); - break; - } - case Page.TYPE_DATA_NODE: { - int indexId = data.readVarInt(); - PageIndex idx = metaObjects.get(indexId); - if (idx == null) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "index not found " + indexId); - } - if (!(idx instanceof PageDataIndex)) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "not a data index " + indexId + " " + idx); - } - PageDataIndex index = (PageDataIndex) idx; - if (statistics != null) { - statisticsIncrement(index.getTable().getName() + "." + - index.getName() + " read"); - } - p = PageDataNode.read(index, data, pageId); - break; - } - case Page.TYPE_DATA_OVERFLOW: { - p = PageDataOverflow.read(this, data, pageId); - if (statistics != null) { - statisticsIncrement("overflow read"); - } - break; - } - case Page.TYPE_BTREE_LEAF: { - int indexId = data.readVarInt(); - PageIndex idx = metaObjects.get(indexId); - if (idx == null) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "index not found " + indexId); - } - if (!(idx instanceof PageBtreeIndex)) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "not a btree index " + indexId + " " + idx); - } - PageBtreeIndex index = (PageBtreeIndex) idx; - if (statistics != null) { - statisticsIncrement(index.getTable().getName() + "." + - index.getName() + " read"); - } - p = PageBtreeLeaf.read(index, data, pageId); - break; - } - case Page.TYPE_BTREE_NODE: { - int indexId = data.readVarInt(); - PageIndex idx = metaObjects.get(indexId); - if (idx == null) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "index not found " + indexId); - } - if (!(idx instanceof PageBtreeIndex)) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "not a btree index " + indexId + " " + idx); - } - PageBtreeIndex index = (PageBtreeIndex) idx; - if (statistics != null) { - statisticsIncrement(index.getTable().getName() + - "." + index.getName() + " read"); - } - p = PageBtreeNode.read(index, data, pageId); - break; - } - case Page.TYPE_STREAM_TRUNK: - p = PageStreamTrunk.read(this, data, pageId); - break; - case Page.TYPE_STREAM_DATA: - p = PageStreamData.read(this, data, pageId); - break; - default: - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "page=" + pageId + " type=" + type); - } - cache.put(p); - return p; - } - - private int getFirstUncommittedSection() { - trace.debug("getFirstUncommittedSection"); - Session[] sessions = database.getSessions(true); - int firstUncommittedSection = log.getLogSectionId(); - for (Session session : sessions) { - int firstUncommitted = session.getFirstUncommittedLog(); - if (firstUncommitted != Session.LOG_WRITTEN) { - if (firstUncommitted < firstUncommittedSection) { - firstUncommittedSection = firstUncommitted; - } - } - } - return firstUncommittedSection; - } - - private void readStaticHeader() { - file.seek(FileStore.HEADER_LENGTH); - Data page = Data.create(database, - new byte[PAGE_SIZE_MIN - FileStore.HEADER_LENGTH], false); - file.readFully(page.getBytes(), 0, - PAGE_SIZE_MIN - FileStore.HEADER_LENGTH); - readCount++; - setPageSize(page.readInt()); - int writeVersion = page.readByte(); - int readVersion = page.readByte(); - if (readVersion > READ_VERSION) { - throw DbException.get( - ErrorCode.FILE_VERSION_ERROR_1, fileName); - } - if (writeVersion > WRITE_VERSION) { - close(); - database.setReadOnly(true); - accessMode = "r"; - file = database.openFile(fileName, accessMode, true); - } - } - - private void readVariableHeader() { - Data page = createData(); - for (int i = 1;; i++) { - if (i == 3) { - throw DbException.get( - ErrorCode.FILE_CORRUPTED_1, fileName); - } - page.reset(); - readPage(i, page); - CRC32 crc = new CRC32(); - crc.update(page.getBytes(), 4, pageSize - 4); - int expected = (int) crc.getValue(); - int got = page.readInt(); - if (expected == got) { - writeCountBase = page.readLong(); - logKey = page.readInt(); - logFirstTrunkPage = page.readInt(); - logFirstDataPage = page.readInt(); - break; - } - } - } - - /** - * Set the page size. The size must be a power of two. This method must be - * called before opening. - * - * @param size the page size - */ - public void setPageSize(int size) { - if (size < PAGE_SIZE_MIN || size > PAGE_SIZE_MAX) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - fileName + " pageSize: " + size); - } - boolean good = false; - int shift = 0; - for (int i = 1; i <= size;) { - if (size == i) { - good = true; - break; - } - shift++; - i += i; - } - if (!good) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, fileName); - } - pageSize = size; - emptyPage = createData(); - pageSizeShift = shift; - } - - private void writeStaticHeader() { - Data page = Data.create(database, new byte[pageSize - FileStore.HEADER_LENGTH], false); - page.writeInt(pageSize); - page.writeByte((byte) WRITE_VERSION); - page.writeByte((byte) READ_VERSION); - file.seek(FileStore.HEADER_LENGTH); - file.write(page.getBytes(), 0, pageSize - FileStore.HEADER_LENGTH); - writeCount++; - } - - /** - * Set the trunk page and data page id of the log. - * - * @param logKey the log key of the trunk page - * @param trunkPageId the trunk page id - * @param dataPageId the data page id - */ - void setLogFirstPage(int logKey, int trunkPageId, int dataPageId) { - if (trace.isDebugEnabled()) { - trace.debug("setLogFirstPage key: " + logKey + - " trunk: "+ trunkPageId +" data: " + dataPageId); - } - this.logKey = logKey; - this.logFirstTrunkPage = trunkPageId; - this.logFirstDataPage = dataPageId; - writeVariableHeader(); - } - - private void writeVariableHeader() { - trace.debug("writeVariableHeader"); - if (logMode == LOG_MODE_SYNC) { - file.sync(); - } - Data page = createData(); - page.writeInt(0); - page.writeLong(getWriteCountTotal()); - page.writeInt(logKey); - page.writeInt(logFirstTrunkPage); - page.writeInt(logFirstDataPage); - CRC32 crc = new CRC32(); - crc.update(page.getBytes(), 4, pageSize - 4); - page.setInt(0, (int) crc.getValue()); - file.seek(pageSize); - file.write(page.getBytes(), 0, pageSize); - file.seek(pageSize + pageSize); - file.write(page.getBytes(), 0, pageSize); - // don't increment the write counter, because it was just written - } - - /** - * Close the file without further writing. - */ - public synchronized void close() { - trace.debug("close"); - if (log != null) { - log.close(); - log = null; - } - if (file != null) { - try { - file.releaseLock(); - file.close(); - } finally { - file = null; - } - } - } - - @Override - public synchronized void flushLog() { - if (file != null) { - log.flush(); - } - } - - /** - * Flush the transaction log and sync the file. - */ - public synchronized void sync() { - if (file != null) { - log.flush(); - file.sync(); - } - } - - @Override - public Trace getTrace() { - return trace; - } - - @Override - public synchronized void writeBack(CacheObject obj) { - Page record = (Page) obj; - if (trace.isDebugEnabled()) { - trace.debug("writeBack " + record); - } - record.write(); - record.setChanged(false); - } - - /** - * Write an undo log entry if required. - * - * @param page the page - * @param old the old data (if known) or null - */ - public synchronized void logUndo(Page page, Data old) { - if (logMode == LOG_MODE_OFF) { - return; - } - checkOpen(); - database.checkWritingAllowed(); - if (!recoveryRunning) { - int pos = page.getPos(); - if (!log.getUndo(pos)) { - if (old == null) { - old = readPage(pos); - } - openForWriting(); - log.addUndo(pos, old); - } - } - } - - /** - * Update a page. - * - * @param page the page - */ - public synchronized void update(Page page) { - if (trace.isDebugEnabled()) { - if (!page.isChanged()) { - trace.debug("updateRecord " + page.toString()); - } - } - checkOpen(); - database.checkWritingAllowed(); - page.setChanged(true); - int pos = page.getPos(); - if (SysProperties.CHECK && !recoveryRunning) { - // ensure the undo entry is already written - if (logMode != LOG_MODE_OFF) { - log.addUndo(pos, null); - } - } - allocatePage(pos); - cache.update(pos, page); - } - - private int getFreeListId(int pageId) { - return (pageId - PAGE_ID_FREE_LIST_ROOT) / freeListPagesPerList; - } - - private PageFreeList getFreeListForPage(int pageId) { - return getFreeList(getFreeListId(pageId)); - } - - private PageFreeList getFreeList(int i) { - PageFreeList list = null; - if (i < freeLists.size()) { - list = freeLists.get(i); - if (list != null) { - return list; - } - } - int p = PAGE_ID_FREE_LIST_ROOT + i * freeListPagesPerList; - while (p >= pageCount) { - increaseFileSize(); - } - if (p < pageCount) { - list = (PageFreeList) getPage(p); - } - if (list == null) { - list = PageFreeList.create(this, p); - cache.put(list); - } - while (freeLists.size() <= i) { - freeLists.add(null); - } - freeLists.set(i, list); - return list; - } - - private void freePage(int pageId) { - int index = getFreeListId(pageId); - PageFreeList list = getFreeList(index); - firstFreeListIndex = Math.min(index, firstFreeListIndex); - list.free(pageId); - } - - /** - * Set the bit of an already allocated page. - * - * @param pageId the page to allocate - */ - void allocatePage(int pageId) { - PageFreeList list = getFreeListForPage(pageId); - list.allocate(pageId); - } - - private boolean isUsed(int pageId) { - return getFreeListForPage(pageId).isUsed(pageId); - } - - /** - * Allocate a number of pages. - * - * @param list the list where to add the allocated pages - * @param pagesToAllocate the number of pages to allocate - * @param exclude the exclude list - * @param after all allocated pages are higher than this page - */ - void allocatePages(IntArray list, int pagesToAllocate, BitSet exclude, - int after) { - list.ensureCapacity(list.size() + pagesToAllocate); - for (int i = 0; i < pagesToAllocate; i++) { - int page = allocatePage(exclude, after); - after = page; - list.add(page); - } - } - - /** - * Allocate a page. - * - * @return the page id - */ - public synchronized int allocatePage() { - openForWriting(); - int pos = allocatePage(null, 0); - if (!recoveryRunning) { - if (logMode != LOG_MODE_OFF) { - log.addUndo(pos, emptyPage); - } - } - return pos; - } - - private int allocatePage(BitSet exclude, int first) { - int page; - for (int i = firstFreeListIndex;; i++) { - PageFreeList list = getFreeList(i); - page = list.allocate(exclude, first); - if (page >= 0) { - firstFreeListIndex = i; - break; - } - } - while (page >= pageCount) { - increaseFileSize(); - } - if (trace.isDebugEnabled()) { - // trace.debug("allocatePage " + pos); - } - return page; - } - - private void increaseFileSize() { - int increment = INCREMENT_KB * 1024 / pageSize; - int percent = pageCount * INCREMENT_PERCENT_MIN / 100; - if (increment < percent) { - increment = (1 + (percent / increment)) * increment; - } - int max = database.getSettings().pageStoreMaxGrowth; - if (max < increment) { - increment = max; - } - increaseFileSize(increment); - } - - private void increaseFileSize(int increment) { - for (int i = pageCount; i < pageCount + increment; i++) { - freed.set(i); - } - pageCount += increment; - long newLength = (long) pageCount << pageSizeShift; - file.setLength(newLength); - writeCount++; - fileLength = newLength; - } - - /** - * Add a page to the free list. The undo log entry must have been written. - * - * @param pageId the page id - */ - public synchronized void free(int pageId) { - free(pageId, true); - } - - /** - * Add a page to the free list. - * - * @param pageId the page id - * @param undo if the undo record must have been written - */ - void free(int pageId, boolean undo) { - if (trace.isDebugEnabled()) { - // trace.debug("free " + pageId + " " + undo); - } - cache.remove(pageId); - if (SysProperties.CHECK && !recoveryRunning && undo) { - // ensure the undo entry is already written - if (logMode != LOG_MODE_OFF) { - log.addUndo(pageId, null); - } - } - freePage(pageId); - if (recoveryRunning) { - writePage(pageId, createData()); - if (reservedPages != null && reservedPages.containsKey(pageId)) { - // re-allocate the page if it is used later on again - int latestPos = reservedPages.get(pageId); - if (latestPos > log.getLogPos()) { - allocatePage(pageId); - } - } - } - } - - /** - * Add a page to the free list. The page is not used, therefore doesn't need - * to be overwritten. - * - * @param pageId the page id - */ - void freeUnused(int pageId) { - if (trace.isDebugEnabled()) { - trace.debug("freeUnused " + pageId); - } - cache.remove(pageId); - freePage(pageId); - freed.set(pageId); - } - - /** - * Create a data object. - * - * @return the data page. - */ - public Data createData() { - return Data.create(database, new byte[pageSize], false); - } - - /** - * Read a page. - * - * @param pos the page id - * @return the page - */ - public synchronized Data readPage(int pos) { - Data page = createData(); - readPage(pos, page); - return page; - } - - /** - * Read a page. - * - * @param pos the page id - * @param page the page - */ - void readPage(int pos, Data page) { - if (recordPageReads) { - if (pos >= MIN_PAGE_COUNT && - recordedPagesIndex.get(pos) == IntIntHashMap.NOT_FOUND) { - recordedPagesIndex.put(pos, recordedPagesList.size()); - recordedPagesList.add(pos); - } - } - if (pos < 0 || pos >= pageCount) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, pos + - " of " + pageCount); - } - file.seek((long) pos << pageSizeShift); - file.readFully(page.getBytes(), 0, pageSize); - readCount++; - } - - /** - * Get the page size. - * - * @return the page size - */ - public int getPageSize() { - return pageSize; - } - - /** - * Get the number of pages (including free pages). - * - * @return the page count - */ - public int getPageCount() { - return pageCount; - } - - /** - * Write a page. - * - * @param pageId the page id - * @param data the data - */ - public synchronized void writePage(int pageId, Data data) { - if (pageId <= 0) { - DbException.throwInternalError("write to page " + pageId); - } - byte[] bytes = data.getBytes(); - if (SysProperties.CHECK) { - boolean shouldBeFreeList = (pageId - PAGE_ID_FREE_LIST_ROOT) % - freeListPagesPerList == 0; - boolean isFreeList = bytes[0] == Page.TYPE_FREE_LIST; - if (bytes[0] != 0 && shouldBeFreeList != isFreeList) { - throw DbException.throwInternalError(); - } - } - checksumSet(bytes, pageId); - file.seek((long) pageId << pageSizeShift); - file.write(bytes, 0, pageSize); - writeCount++; - } - - /** - * Remove a page from the cache. - * - * @param pageId the page id - */ - public synchronized void removeFromCache(int pageId) { - cache.remove(pageId); - } - - Database getDatabase() { - return database; - } - - /** - * Run recovery. - * - * @return whether the transaction log was empty - */ - private boolean recover() { - trace.debug("log recover"); - recoveryRunning = true; - boolean isEmpty = true; - isEmpty &= log.recover(PageLog.RECOVERY_STAGE_UNDO); - if (reservedPages != null) { - for (int r : reservedPages.keySet()) { - if (trace.isDebugEnabled()) { - trace.debug("reserve " + r); - } - allocatePage(r); - } - } - isEmpty &= log.recover(PageLog.RECOVERY_STAGE_ALLOCATE); - openMetaIndex(); - readMetaData(); - isEmpty &= log.recover(PageLog.RECOVERY_STAGE_REDO); - boolean setReadOnly = false; - if (!database.isReadOnly()) { - if (log.getInDoubtTransactions().isEmpty()) { - log.recoverEnd(); - int firstUncommittedSection = getFirstUncommittedSection(); - log.removeUntil(firstUncommittedSection); - } else { - setReadOnly = true; - } - } - PageDataIndex systemTable = (PageDataIndex) metaObjects.get(0); - isNew = systemTable == null; - for (PageIndex index : metaObjects.values()) { - if (index.getTable().isTemporary()) { - // temporary indexes are removed after opening - if (tempObjects == null) { - tempObjects = new HashMap<>(); - } - tempObjects.put(index.getId(), index); - } else { - index.close(pageStoreSession); - } - } - - allocatePage(PAGE_ID_META_ROOT); - writeIndexRowCounts(); - recoveryRunning = false; - reservedPages = null; - - writeBack(); - // clear the cache because it contains pages with closed indexes - cache.clear(); - freeLists.clear(); - - metaObjects.clear(); - metaObjects.put(-1, metaIndex); - - if (setReadOnly) { - database.setReadOnly(true); - } - trace.debug("log recover done"); - return isEmpty; - } - - /** - * A record is added to a table, or removed from a table. - * - * @param session the session - * @param tableId the table id - * @param row the row to add - * @param add true if the row is added, false if it is removed - */ - public synchronized void logAddOrRemoveRow(Session session, int tableId, - Row row, boolean add) { - if (logMode != LOG_MODE_OFF) { - if (!recoveryRunning) { - log.logAddOrRemoveRow(session, tableId, row, add); - } - } - } - - /** - * Mark a committed transaction. - * - * @param session the session - */ - public synchronized void commit(Session session) { - checkOpen(); - openForWriting(); - log.commit(session.getId()); - long size = log.getSize(); - if (size - logSizeBase > maxLogSize / 2) { - int firstSection = log.getLogFirstSectionId(); - checkpoint(); - int newSection = log.getLogSectionId(); - if (newSection - firstSection <= 2) { - // one section is always kept, and checkpoint - // advances two sections each time it is called - return; - } - long newSize = log.getSize(); - if (newSize < size || size < maxLogSize) { - ignoreBigLog = false; - return; - } - if (!ignoreBigLog) { - ignoreBigLog = true; - trace.error(null, - "Transaction log could not be truncated; size: " + - (newSize / 1024 / 1024) + " MB"); - } - logSizeBase = log.getSize(); - } - } - - /** - * Prepare a transaction. - * - * @param session the session - * @param transaction the name of the transaction - */ - public synchronized void prepareCommit(Session session, String transaction) { - log.prepareCommit(session, transaction); - } - - /** - * Check whether this is a new database. - * - * @return true if it is - */ - public boolean isNew() { - return isNew; - } - - /** - * Reserve the page if this is a index root page entry. - * - * @param logPos the redo log position - * @param tableId the table id - * @param row the row - */ - void allocateIfIndexRoot(int logPos, int tableId, Row row) { - if (tableId == META_TABLE_ID) { - int rootPageId = row.getValue(3).getInt(); - if (reservedPages == null) { - reservedPages = new HashMap<>(); - } - reservedPages.put(rootPageId, logPos); - } - } - - /** - * Redo a delete in a table. - * - * @param tableId the object id of the table - * @param key the key of the row to delete - */ - void redoDelete(int tableId, long key) { - Index index = metaObjects.get(tableId); - PageDataIndex scan = (PageDataIndex) index; - Row row = scan.getRowWithKey(key); - if (row == null || row.getKey() != key) { - trace.error(null, "Entry not found: " + key + - " found instead: " + row + " - ignoring"); - return; - } - redo(tableId, row, false); - } - - /** - * Redo a change in a table. - * - * @param tableId the object id of the table - * @param row the row - * @param add true if the record is added, false if deleted - */ - void redo(int tableId, Row row, boolean add) { - if (tableId == META_TABLE_ID) { - if (add) { - addMeta(row, pageStoreSession, true); - } else { - removeMeta(row); - } - } - Index index = metaObjects.get(tableId); - if (index == null) { - throw DbException.throwInternalError( - "Table not found: " + tableId + " " + row + " " + add); - } - Table table = index.getTable(); - if (add) { - table.addRow(pageStoreSession, row); - } else { - table.removeRow(pageStoreSession, row); - } - } - - /** - * Redo a truncate. - * - * @param tableId the object id of the table - */ - void redoTruncate(int tableId) { - Index index = metaObjects.get(tableId); - Table table = index.getTable(); - table.truncate(pageStoreSession); - } - - private void openMetaIndex() { - CreateTableData data = new CreateTableData(); - ArrayList cols = data.columns; - cols.add(new Column("ID", Value.INT)); - cols.add(new Column("TYPE", Value.INT)); - cols.add(new Column("PARENT", Value.INT)); - cols.add(new Column("HEAD", Value.INT)); - cols.add(new Column("OPTIONS", Value.STRING)); - cols.add(new Column("COLUMNS", Value.STRING)); - metaSchema = new Schema(database, 0, "", null, true); - data.schema = metaSchema; - data.tableName = "PAGE_INDEX"; - data.id = META_TABLE_ID; - data.temporary = false; - data.persistData = true; - data.persistIndexes = true; - data.create = false; - data.session = pageStoreSession; - metaTable = new PageStoreTable(data); - metaIndex = (PageDataIndex) metaTable.getScanIndex( - pageStoreSession); - metaObjects.clear(); - metaObjects.put(-1, metaIndex); - } - - private void readMetaData() { - Cursor cursor = metaIndex.find(pageStoreSession, null, null); - // first, create all tables - while (cursor.next()) { - Row row = cursor.get(); - int type = row.getValue(1).getInt(); - if (type == META_TYPE_DATA_INDEX) { - addMeta(row, pageStoreSession, false); - } - } - // now create all secondary indexes - // otherwise the table might not be created yet - cursor = metaIndex.find(pageStoreSession, null, null); - while (cursor.next()) { - Row row = cursor.get(); - int type = row.getValue(1).getInt(); - if (type != META_TYPE_DATA_INDEX) { - addMeta(row, pageStoreSession, false); - } - } - } - - private void removeMeta(Row row) { - int id = row.getValue(0).getInt(); - PageIndex index = metaObjects.get(id); - index.getTable().removeIndex(index); - if (index instanceof PageBtreeIndex || index instanceof PageDelegateIndex) { - if (index.isTemporary()) { - pageStoreSession.removeLocalTempTableIndex(index); - } else { - index.getSchema().remove(index); - } - } - index.remove(pageStoreSession); - metaObjects.remove(id); - } - - private void addMeta(Row row, Session session, boolean redo) { - int id = row.getValue(0).getInt(); - int type = row.getValue(1).getInt(); - int parent = row.getValue(2).getInt(); - int rootPageId = row.getValue(3).getInt(); - String[] options = StringUtils.arraySplit( - row.getValue(4).getString(), ',', false); - String columnList = row.getValue(5).getString(); - String[] columns = StringUtils.arraySplit(columnList, ',', false); - Index meta; - if (trace.isDebugEnabled()) { - trace.debug("addMeta id="+ id +" type=" + type + - " root=" + rootPageId + " parent=" + parent + " columns=" + columnList); - } - if (redo && rootPageId != 0) { - // ensure the page is empty, but not used by regular data - writePage(rootPageId, createData()); - allocatePage(rootPageId); - } - metaRootPageId.put(id, rootPageId); - if (type == META_TYPE_DATA_INDEX) { - CreateTableData data = new CreateTableData(); - if (columns == null) { - throw DbException.throwInternalError(row.toString()); - } - for (int i = 0, len = columns.length; i < len; i++) { - Column col = new Column("C" + i, Value.INT); - data.columns.add(col); - } - data.schema = metaSchema; - data.tableName = "T" + id; - data.id = id; - data.temporary = options[2].equals("temp"); - data.persistData = true; - data.persistIndexes = true; - data.create = false; - data.session = session; - PageStoreTable table = new PageStoreTable(data); - boolean binaryUnsigned = SysProperties.SORT_BINARY_UNSIGNED; - if (options.length > 3) { - binaryUnsigned = Boolean.parseBoolean(options[3]); - } - boolean uuidUnsigned = SysProperties.SORT_UUID_UNSIGNED; - if (options.length > 4) { - uuidUnsigned = Boolean.parseBoolean(options[4]); - } - CompareMode mode = CompareMode.getInstance( - options[0], Integer.parseInt(options[1]), binaryUnsigned, uuidUnsigned); - table.setCompareMode(mode); - meta = table.getScanIndex(session); - } else { - Index p = metaObjects.get(parent); - if (p == null) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "Table not found:" + parent + " for " + row + " meta:" + metaObjects); - } - PageStoreTable table = (PageStoreTable) p.getTable(); - Column[] tableCols = table.getColumns(); - int len = columns.length; - IndexColumn[] cols = new IndexColumn[len]; - for (int i = 0; i < len; i++) { - String c = columns[i]; - IndexColumn ic = new IndexColumn(); - int idx = c.indexOf('/'); - if (idx >= 0) { - String s = c.substring(idx + 1); - ic.sortType = Integer.parseInt(s); - c = c.substring(0, idx); - } - ic.column = tableCols[Integer.parseInt(c)]; - cols[i] = ic; - } - IndexType indexType; - if (options[3].equals("d")) { - indexType = IndexType.createPrimaryKey(true, false); - Column[] tableColumns = table.getColumns(); - for (IndexColumn indexColumn : cols) { - tableColumns[indexColumn.column.getColumnId()].setNullable(false); - } - } else { - indexType = IndexType.createNonUnique(true); - } - meta = table.addIndex(session, "I" + id, id, cols, indexType, false, null); - } - metaObjects.put(id, (PageIndex) meta); - } - - /** - * Add an index to the in-memory index map. - * - * @param index the index - */ - public synchronized void addIndex(PageIndex index) { - metaObjects.put(index.getId(), index); - } - - /** - * Add the meta data of an index. - * - * @param index the index to add - * @param session the session - */ - public void addMeta(PageIndex index, Session session) { - Table table = index.getTable(); - if (SysProperties.CHECK) { - if (!table.isTemporary()) { - // to prevent ABBA locking problems, we need to always take - // the Database lock before we take the PageStore lock - synchronized (database) { - synchronized (this) { - database.verifyMetaLocked(session); - } - } - } - } - synchronized (this) { - int type = index instanceof PageDataIndex ? - META_TYPE_DATA_INDEX : META_TYPE_BTREE_INDEX; - IndexColumn[] columns = index.getIndexColumns(); - StringBuilder builder = new StringBuilder(); - for (int i = 0, length = columns.length; i < length; i++) { - if (i > 0) { - builder.append(','); - } - IndexColumn col = columns[i]; - int id = col.column.getColumnId(); - builder.append(id); - int sortType = col.sortType; - if (sortType != 0) { - builder.append('/').append(sortType); - } - } - String columnList = builder.toString(); - CompareMode mode = table.getCompareMode(); - StringBuilder options = new StringBuilder().append(mode.getName()).append(',').append(mode.getStrength()) - .append(','); - if (table.isTemporary()) { - options.append("temp"); - } - options.append(','); - if (index instanceof PageDelegateIndex) { - options.append('d'); - } - options.append(',').append(mode.isBinaryUnsigned()).append(',').append(mode.isUuidUnsigned()); - Row row = metaTable.getTemplateRow(); - row.setValue(0, ValueInt.get(index.getId())); - row.setValue(1, ValueInt.get(type)); - row.setValue(2, ValueInt.get(table.getId())); - row.setValue(3, ValueInt.get(index.getRootPageId())); - row.setValue(4, ValueString.get(options.toString())); - row.setValue(5, ValueString.get(columnList)); - row.setKey(index.getId() + 1); - metaIndex.add(session, row); - } - } - - /** - * Remove the meta data of an index. - * - * @param index the index to remove - * @param session the session - */ - public void removeMeta(Index index, Session session) { - if (SysProperties.CHECK) { - if (!index.getTable().isTemporary()) { - // to prevent ABBA locking problems, we need to always take - // the Database lock before we take the PageStore lock - synchronized (database) { - synchronized (this) { - database.verifyMetaLocked(session); - } - } - } - } - synchronized (this) { - if (!recoveryRunning) { - removeMetaIndex(index, session); - metaObjects.remove(index.getId()); - } - } - } - - private void removeMetaIndex(Index index, Session session) { - int key = index.getId() + 1; - Row row = metaIndex.getRow(session, key); - if (row.getKey() != key) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "key: " + key + " index: " + index + - " table: " + index.getTable() + " row: " + row); - } - metaIndex.remove(session, row); - } - - /** - * Set the maximum transaction log size in megabytes. - * - * @param maxSize the new maximum log size - */ - public void setMaxLogSize(long maxSize) { - this.maxLogSize = maxSize; - } - - /** - * Commit or rollback a prepared transaction after opening a database with - * in-doubt transactions. - * - * @param sessionId the session id - * @param pageId the page where the transaction was prepared - * @param commit if the transaction should be committed - */ - public synchronized void setInDoubtTransactionState(int sessionId, - int pageId, boolean commit) { - boolean old = database.isReadOnly(); - try { - database.setReadOnly(false); - log.setInDoubtTransactionState(sessionId, pageId, commit); - } finally { - database.setReadOnly(old); - } - } - - /** - * Get the list of in-doubt transaction. - * - * @return the list - */ - public ArrayList getInDoubtTransactions() { - return log.getInDoubtTransactions(); - } - - /** - * Check whether the recovery process is currently running. - * - * @return true if it is - */ - public boolean isRecoveryRunning() { - return recoveryRunning; - } - - private void checkOpen() { - if (file == null) { - throw DbException.get(ErrorCode.DATABASE_IS_CLOSED); - } - } - - /** - * Get the file write count since the database was created. - * - * @return the write count - */ - public long getWriteCountTotal() { - return writeCount + writeCountBase; - } - - /** - * Get the file write count since the database was opened. - * - * @return the write count - */ - public long getWriteCount() { - return writeCount; - } - - /** - * Get the file read count since the database was opened. - * - * @return the read count - */ - public long getReadCount() { - return readCount; - } - - /** - * A table is truncated. - * - * @param session the session - * @param tableId the table id - */ - public synchronized void logTruncate(Session session, int tableId) { - if (!recoveryRunning) { - openForWriting(); - log.logTruncate(session, tableId); - } - } - - /** - * Get the root page of an index. - * - * @param indexId the index id - * @return the root page - */ - public int getRootPageId(int indexId) { - return metaRootPageId.get(indexId); - } - - public Cache getCache() { - return cache; - } - - private void checksumSet(byte[] d, int pageId) { - int ps = pageSize; - int type = d[0]; - if (type == Page.TYPE_EMPTY) { - return; - } - int s1 = 255 + (type & 255), s2 = 255 + s1; - s2 += s1 += d[6] & 255; - s2 += s1 += d[(ps >> 1) - 1] & 255; - s2 += s1 += d[ps >> 1] & 255; - s2 += s1 += d[ps - 2] & 255; - s2 += s1 += d[ps - 1] & 255; - d[1] = (byte) (((s1 & 255) + (s1 >> 8)) ^ pageId); - d[2] = (byte) (((s2 & 255) + (s2 >> 8)) ^ (pageId >> 8)); - } - - /** - * Check if the stored checksum is correct - * @param d the data - * @param pageId the page id - * @param pageSize the page size - * @return true if it is correct - */ - public static boolean checksumTest(byte[] d, int pageId, int pageSize) { - int s1 = 255 + (d[0] & 255), s2 = 255 + s1; - s2 += s1 += d[6] & 255; - s2 += s1 += d[(pageSize >> 1) - 1] & 255; - s2 += s1 += d[pageSize >> 1] & 255; - s2 += s1 += d[pageSize - 2] & 255; - s2 += s1 += d[pageSize - 1] & 255; - return d[1] == (byte) (((s1 & 255) + (s1 >> 8)) ^ pageId) && d[2] == (byte) (((s2 & 255) + (s2 >> 8)) ^ (pageId - >> 8)); - } - - /** - * Increment the change count. To be done after the operation has finished. - */ - public void incrementChangeCount() { - if (++changeCount < 0) { - throw DbException.throwInternalError("changeCount has wrapped"); - } - } - - /** - * Get the current change count. The first value is 1 - * - * @return the change count - */ - public long getChangeCount() { - return changeCount; - } - - public void setLogMode(int logMode) { - this.logMode = logMode; - } - - public int getLogMode() { - return logMode; - } - - public void setLockFile(boolean lockFile) { - this.lockFile = lockFile; - } - - public BitSet getObjectIds() { - BitSet f = new BitSet(); - Cursor cursor = metaIndex.find(pageStoreSession, null, null); - while (cursor.next()) { - Row row = cursor.get(); - int id = row.getValue(0).getInt(); - if (id > 0) { - f.set(id); - } - } - return f; - } - - public Session getPageStoreSession() { - return pageStoreSession; - } - - public synchronized void setBackup(boolean start) { - backupLevel += start ? 1 : -1; - } - - public synchronized void setMaxCacheMemory(int size) { - cache.setMaxMemory(size); - } - -} diff --git a/h2/src/main/org/h2/store/PageStoreInDoubtTransaction.java b/h2/src/main/org/h2/store/PageStoreInDoubtTransaction.java deleted file mode 100644 index 4aa08da7c6..0000000000 --- a/h2/src/main/org/h2/store/PageStoreInDoubtTransaction.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store; - -import org.h2.message.DbException; - -/** - * Represents an in-doubt transaction (a transaction in the prepare phase). - */ -public class PageStoreInDoubtTransaction implements InDoubtTransaction { - - private final PageStore store; - private final int sessionId; - private final int pos; - private final String transactionName; - private int state; - - /** - * Create a new in-doubt transaction info object. - * - * @param store the page store - * @param sessionId the session id - * @param pos the position - * @param transaction the transaction name - */ - public PageStoreInDoubtTransaction(PageStore store, int sessionId, int pos, - String transaction) { - this.store = store; - this.sessionId = sessionId; - this.pos = pos; - this.transactionName = transaction; - this.state = IN_DOUBT; - } - - @Override - public void setState(int state) { - switch (state) { - case COMMIT: - store.setInDoubtTransactionState(sessionId, pos, true); - break; - case ROLLBACK: - store.setInDoubtTransactionState(sessionId, pos, false); - break; - default: - DbException.throwInternalError("state="+state); - } - this.state = state; - } - - @Override - public String getState() { - switch (state) { - case IN_DOUBT: - return "IN_DOUBT"; - case COMMIT: - return "COMMIT"; - case ROLLBACK: - return "ROLLBACK"; - default: - throw DbException.throwInternalError("state="+state); - } - } - - @Override - public String getTransactionName() { - return transactionName; - } - -} diff --git a/h2/src/main/org/h2/store/PageStreamData.java b/h2/src/main/org/h2/store/PageStreamData.java deleted file mode 100644 index d49fe521d2..0000000000 --- a/h2/src/main/org/h2/store/PageStreamData.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store; - -import org.h2.engine.Session; - -/** - * A data page of a stream. The format is: - *
            - *
          • page type: byte (0)
          • - *
          • checksum: short (1-2)
          • - *
          • the trunk page id: int (3-6)
          • - *
          • log key: int (7-10)
          • - *
          • data (11-)
          • - *
          - */ -public class PageStreamData extends Page { - - private static final int DATA_START = 11; - - private final PageStore store; - private int trunk; - private int logKey; - private Data data; - private int remaining; - - private PageStreamData(PageStore store, int pageId, int trunk, int logKey) { - setPos(pageId); - this.store = store; - this.trunk = trunk; - this.logKey = logKey; - } - - /** - * Read a stream data page. - * - * @param store the page store - * @param data the data - * @param pageId the page id - * @return the page - */ - static PageStreamData read(PageStore store, Data data, int pageId) { - PageStreamData p = new PageStreamData(store, pageId, 0, 0); - p.data = data; - p.read(); - return p; - } - - /** - * Create a new stream trunk page. - * - * @param store the page store - * @param pageId the page id - * @param trunk the trunk page - * @param logKey the log key - * @return the page - */ - static PageStreamData create(PageStore store, int pageId, int trunk, - int logKey) { - return new PageStreamData(store, pageId, trunk, logKey); - } - - /** - * Read the page from the disk. - */ - private void read() { - data.reset(); - data.readByte(); - data.readShortInt(); - trunk = data.readInt(); - logKey = data.readInt(); - } - - /** - * Write the header data. - */ - void initWrite() { - data = store.createData(); - data.writeByte((byte) Page.TYPE_STREAM_DATA); - data.writeShortInt(0); - data.writeInt(trunk); - data.writeInt(logKey); - remaining = store.getPageSize() - data.length(); - } - - /** - * Write the data to the buffer. - * - * @param buff the source data - * @param offset the offset in the source buffer - * @param len the number of bytes to write - * @return the number of bytes written - */ - int write(byte[] buff, int offset, int len) { - int max = Math.min(remaining, len); - data.write(buff, offset, max); - remaining -= max; - return max; - } - - @Override - public void write() { - store.writePage(getPos(), data); - } - - /** - * Get the number of bytes that fit in a page. - * - * @param pageSize the page size - * @return the number of bytes - */ - static int getCapacity(int pageSize) { - return pageSize - DATA_START; - } - - /** - * Read the next bytes from the buffer. - * - * @param startPos the position in the data page - * @param buff the target buffer - * @param off the offset in the target buffer - * @param len the number of bytes to read - */ - void read(int startPos, byte[] buff, int off, int len) { - System.arraycopy(data.getBytes(), startPos, buff, off, len); - } - - /** - * Get the number of remaining data bytes of this page. - * - * @return the remaining byte count - */ - int getRemaining() { - return remaining; - } - - /** - * Get the estimated memory size. - * - * @return number of double words (4 bytes) - */ - @Override - public int getMemory() { - return store.getPageSize() >> 2; - } - - @Override - public void moveTo(Session session, int newPos) { - // not required - } - - int getLogKey() { - return logKey; - } - - @Override - public String toString() { - return "[" + getPos() + "] stream data key:" + logKey + - " pos:" + data.length() + " remaining:" + remaining; - } - - @Override - public boolean canRemove() { - return true; - } - - public static int getReadStart() { - return DATA_START; - } - - @Override - public boolean canMove() { - return false; - } - -} \ No newline at end of file diff --git a/h2/src/main/org/h2/store/PageStreamTrunk.java b/h2/src/main/org/h2/store/PageStreamTrunk.java deleted file mode 100644 index a32770a3e3..0000000000 --- a/h2/src/main/org/h2/store/PageStreamTrunk.java +++ /dev/null @@ -1,303 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store; - -import org.h2.api.ErrorCode; -import org.h2.engine.Session; -import org.h2.message.DbException; - -/** - * A trunk page of a stream. It contains the page numbers of the stream, and the - * page number of the next trunk. The format is: - *
            - *
          • page type: byte (0)
          • - *
          • checksum: short (1-2)
          • - *
          • previous trunk page, or 0 if none: int (3-6)
          • - *
          • log key: int (7-10)
          • - *
          • next trunk page: int (11-14)
          • - *
          • number of pages: short (15-16)
          • - *
          • page ids (17-)
          • - *
          - */ -public class PageStreamTrunk extends Page { - - private static final int DATA_START = 17; - - /** - * The previous stream trunk. - */ - int parent; - - /** - * The next stream trunk. - */ - int nextTrunk; - - private final PageStore store; - private int logKey; - private int[] pageIds; - private int pageCount; - private Data data; - - private PageStreamTrunk(PageStore store, int parent, int pageId, int next, - int logKey, int[] pageIds) { - setPos(pageId); - this.parent = parent; - this.store = store; - this.nextTrunk = next; - this.logKey = logKey; - this.pageCount = pageIds.length; - this.pageIds = pageIds; - } - - private PageStreamTrunk(PageStore store, Data data, int pageId) { - setPos(pageId); - this.data = data; - this.store = store; - } - - /** - * Read a stream trunk page. - * - * @param store the page store - * @param data the data - * @param pageId the page id - * @return the page - */ - static PageStreamTrunk read(PageStore store, Data data, int pageId) { - PageStreamTrunk p = new PageStreamTrunk(store, data, pageId); - p.read(); - return p; - } - - /** - * Create a new stream trunk page. - * - * @param store the page store - * @param parent the parent page - * @param pageId the page id - * @param next the next trunk page - * @param logKey the log key - * @param pageIds the stream data page ids - * @return the page - */ - static PageStreamTrunk create(PageStore store, int parent, int pageId, - int next, int logKey, int[] pageIds) { - return new PageStreamTrunk(store, parent, pageId, next, logKey, pageIds); - } - - /** - * Read the page from the disk. - */ - private void read() { - data.reset(); - data.readByte(); - data.readShortInt(); - parent = data.readInt(); - logKey = data.readInt(); - nextTrunk = data.readInt(); - pageCount = data.readShortInt(); - pageIds = new int[pageCount]; - for (int i = 0; i < pageCount; i++) { - pageIds[i] = data.readInt(); - } - } - - /** - * Get the data page id at the given position. - * - * @param index the index (0, 1, ...) - * @return the value, or -1 if the index is too large - */ - int getPageData(int index) { - if (index >= pageIds.length) { - return -1; - } - return pageIds[index]; - } - - @Override - public void write() { - data = store.createData(); - data.writeByte((byte) Page.TYPE_STREAM_TRUNK); - data.writeShortInt(0); - data.writeInt(parent); - data.writeInt(logKey); - data.writeInt(nextTrunk); - data.writeShortInt(pageCount); - for (int i = 0; i < pageCount; i++) { - data.writeInt(pageIds[i]); - } - store.writePage(getPos(), data); - } - - /** - * Get the number of pages that can be addressed in a stream trunk page. - * - * @param pageSize the page size - * @return the number of pages - */ - static int getPagesAddressed(int pageSize) { - return (pageSize - DATA_START) / 4; - } - - /** - * Check if the given data page is in this trunk page. - * - * @param dataPageId the page id - * @return true if it is - */ - boolean contains(int dataPageId) { - for (int i = 0; i < pageCount; i++) { - if (pageIds[i] == dataPageId) { - return true; - } - } - return false; - } - - /** - * Free this page and all data pages. Pages after the last used data page - * (if within this list) are empty and therefore not just freed, but marked - * as not used. - * - * @param lastUsedPage the last used data page - * @return the number of pages freed - */ - int free(int lastUsedPage) { - store.free(getPos(), false); - int freed = 1; - boolean notUsed = false; - for (int i = 0; i < pageCount; i++) { - int page = pageIds[i]; - if (notUsed) { - store.freeUnused(page); - } else { - store.free(page, false); - } - freed++; - if (page == lastUsedPage) { - notUsed = true; - } - } - return freed; - } - - /** - * Get the estimated memory size. - * - * @return number of double words (4 bytes) - */ - @Override - public int getMemory() { - return store.getPageSize() >> 2; - } - - @Override - public void moveTo(Session session, int newPos) { - // not required - } - - int getLogKey() { - return logKey; - } - - public int getNextTrunk() { - return nextTrunk; - } - - /** - * An iterator over page stream trunk pages. - */ - static class Iterator { - - private final PageStore store; - private int first; - private int next; - private int previous; - private boolean canDelete; - private int current; - - Iterator(PageStore store, int first) { - this.store = store; - this.next = first; - } - - int getCurrentPageId() { - return current; - } - - /** - * Get the next trunk page or null if no next trunk page. - * - * @return the next trunk page or null - */ - PageStreamTrunk next() { - canDelete = false; - if (first == 0) { - first = next; - } else if (first == next) { - return null; - } - if (next == 0 || next >= store.getPageCount()) { - return null; - } - Page p; - current = next; - try { - p = store.getPage(next); - } catch (DbException e) { - if (e.getErrorCode() == ErrorCode.FILE_CORRUPTED_1) { - // wrong checksum means end of stream - return null; - } - throw e; - } - if (p == null || p instanceof PageStreamTrunk || - p instanceof PageStreamData) { - canDelete = true; - } - if (!(p instanceof PageStreamTrunk)) { - return null; - } - PageStreamTrunk t = (PageStreamTrunk) p; - if (previous > 0 && t.parent != previous) { - return null; - } - previous = next; - next = t.nextTrunk; - return t; - } - - /** - * Check if the current page can be deleted. It can if it's empty, a - * stream trunk, or a stream data page. - * - * @return true if it can be deleted - */ - boolean canDelete() { - return canDelete; - } - - } - - @Override - public boolean canRemove() { - return true; - } - - @Override - public String toString() { - return "page[" + getPos() + "] stream trunk key:" + logKey + - " next:" + nextTrunk; - } - - @Override - public boolean canMove() { - return false; - } - -} diff --git a/h2/src/main/org/h2/store/RangeInputStream.java b/h2/src/main/org/h2/store/RangeInputStream.java index 61de660146..d4401d4428 100644 --- a/h2/src/main/org/h2/store/RangeInputStream.java +++ b/h2/src/main/org/h2/store/RangeInputStream.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store; diff --git a/h2/src/main/org/h2/store/RangeReader.java b/h2/src/main/org/h2/store/RangeReader.java index dda6e2975e..d0a6e0dc41 100644 --- a/h2/src/main/org/h2/store/RangeReader.java +++ b/h2/src/main/org/h2/store/RangeReader.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store; diff --git a/h2/src/main/org/h2/store/RecoverTester.java b/h2/src/main/org/h2/store/RecoverTester.java index 793e0e1d1b..3c4c94e9de 100644 --- a/h2/src/main/org/h2/store/RecoverTester.java +++ b/h2/src/main/org/h2/store/RecoverTester.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store; @@ -10,17 +10,16 @@ import java.io.PrintWriter; import java.sql.SQLException; import java.util.HashSet; -import java.util.Properties; import org.h2.api.ErrorCode; import org.h2.engine.ConnectionInfo; import org.h2.engine.Constants; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; -import org.h2.store.fs.FilePathRec; import org.h2.store.fs.FileUtils; import org.h2.store.fs.Recorder; +import org.h2.store.fs.rec.FilePathRec; import org.h2.tools.Recover; import org.h2.util.IOUtils; import org.h2.util.StringUtils; @@ -32,7 +31,7 @@ */ public class RecoverTester implements Recorder { - private static RecoverTester instance; + private static final RecoverTester instance = new RecoverTester(); private String testDatabase = "memFS:reopen"; private int writeCount = Utils.getProperty("h2.recoverTestOffset", 0); @@ -49,18 +48,10 @@ public class RecoverTester implements Recorder { * @param recoverTest the value of the recover test parameter */ public static synchronized void init(String recoverTest) { - RecoverTester tester = RecoverTester.getInstance(); if (StringUtils.isNumber(recoverTest)) { - tester.setTestEvery(Integer.parseInt(recoverTest)); + instance.setTestEvery(Integer.parseInt(recoverTest)); } - FilePathRec.setRecorder(tester); - } - - public static synchronized RecoverTester getInstance() { - if (instance == null) { - instance = new RecoverTester(); - } - return instance; + FilePathRec.setRecorder(instance); } @Override @@ -68,8 +59,7 @@ public void log(int op, String fileName, byte[] data, long x) { if (op != Recorder.WRITE && op != Recorder.TRUNCATE) { return; } - if (!fileName.endsWith(Constants.SUFFIX_PAGE_FILE) && - !fileName.endsWith(Constants.SUFFIX_MV_FILE)) { + if (!fileName.endsWith(Constants.SUFFIX_MV_FILE)) { return; } writeCount++; @@ -102,23 +92,14 @@ public void log(int op, String fileName, byte[] data, long x) { private synchronized void testDatabase(String fileName, PrintWriter out) { out.println("+ write #" + writeCount + " verify #" + verifyCount); try { - IOUtils.copyFiles(fileName, testDatabase + Constants.SUFFIX_PAGE_FILE); - String mvFileName = fileName.substring(0, fileName.length() - - Constants.SUFFIX_PAGE_FILE.length()) + - Constants.SUFFIX_MV_FILE; - if (FileUtils.exists(mvFileName)) { - IOUtils.copyFiles(mvFileName, testDatabase + Constants.SUFFIX_MV_FILE); - } + IOUtils.copyFiles(fileName, testDatabase + Constants.SUFFIX_MV_FILE); verifyCount++; // avoid using the Engine class to avoid deadlocks - Properties p = new Properties(); - p.setProperty("user", ""); - p.setProperty("password", ""); ConnectionInfo ci = new ConnectionInfo("jdbc:h2:" + testDatabase + - ";FILE_LOCK=NO;TRACE_LEVEL_FILE=0", p); + ";FILE_LOCK=NO;TRACE_LEVEL_FILE=0", null, "", ""); Database database = new Database(ci, null); // close the database - Session sysSession = database.getSystemSession(); + SessionLocal sysSession = database.getSystemSession(); sysSession.prepare("script to '" + testDatabase + ".sql'").query(0); sysSession.prepare("shutdown immediately").update(); database.removeSession(null); @@ -154,11 +135,10 @@ private synchronized void testDatabase(String fileName, PrintWriter out) { } testDatabase += "X"; try { - IOUtils.copyFiles(fileName, testDatabase + Constants.SUFFIX_PAGE_FILE); + IOUtils.copyFiles(fileName, testDatabase + Constants.SUFFIX_MV_FILE); // avoid using the Engine class to avoid deadlocks - Properties p = new Properties(); ConnectionInfo ci = new ConnectionInfo("jdbc:h2:" + - testDatabase + ";FILE_LOCK=NO", p); + testDatabase + ";FILE_LOCK=NO", null, null, null); Database database = new Database(ci, null); // close the database database.removeSession(null); diff --git a/h2/src/main/org/h2/store/SessionState.java b/h2/src/main/org/h2/store/SessionState.java deleted file mode 100644 index 55f0b63fc6..0000000000 --- a/h2/src/main/org/h2/store/SessionState.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store; - - -/** - * The session state contains information about when was the last commit of a - * session. It is only used during recovery. - */ -class SessionState { - - /** - * The session id - */ - public int sessionId; - - /** - * The last log id where a commit for this session is found. - */ - public int lastCommitLog; - - /** - * The position where a commit for this session is found. - */ - public int lastCommitPos; - - /** - * The in-doubt transaction if there is one. - */ - public PageStoreInDoubtTransaction inDoubtTransaction; - - /** - * Check if this session state is already committed at this point. - * - * @param logId the log id - * @param pos the position in the log - * @return true if it is committed - */ - public boolean isCommitted(int logId, int pos) { - if (logId != lastCommitLog) { - return lastCommitLog > logId; - } - return lastCommitPos >= pos; - } - - @Override - public String toString() { - return "sessionId:" + sessionId + " log:" + lastCommitLog + - " pos:" + lastCommitPos + " inDoubt:" + inDoubtTransaction; - } -} diff --git a/h2/src/main/org/h2/store/WriterThread.java b/h2/src/main/org/h2/store/WriterThread.java deleted file mode 100644 index 29b7791394..0000000000 --- a/h2/src/main/org/h2/store/WriterThread.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store; - -import java.lang.ref.WeakReference; -import java.security.AccessControlException; -import org.h2.Driver; -import org.h2.engine.Constants; -import org.h2.engine.Database; -import org.h2.message.Trace; -import org.h2.message.TraceSystem; - -/** - * The writer thread is responsible to flush the transaction log - * from time to time. - */ -public class WriterThread implements Runnable { - - /** - * The reference to the database. - * - * Thread objects are not garbage collected - * until they returned from the run() method - * (even if they where never started) - * so if the connection was not closed, - * the database object cannot get reclaimed - * by the garbage collector if we use a hard reference. - */ - private volatile WeakReference databaseRef; - - private int writeDelay; - private Thread thread; - private volatile boolean stop; - - private WriterThread(Database database, int writeDelay) { - this.databaseRef = new WeakReference<>(database); - this.writeDelay = writeDelay; - } - - /** - * Change the write delay - * - * @param writeDelay the new write delay - */ - public void setWriteDelay(int writeDelay) { - this.writeDelay = writeDelay; - } - - /** - * Create and start a new writer thread for the given database. If the - * thread can't be created, this method returns null. - * - * @param database the database - * @param writeDelay the delay - * @return the writer thread object or null - */ - public static WriterThread create(Database database, int writeDelay) { - try { - WriterThread writer = new WriterThread(database, writeDelay); - writer.thread = new Thread(writer, "H2 Log Writer " + database.getShortName()); - Driver.setThreadContextClassLoader(writer.thread); - writer.thread.setDaemon(true); - return writer; - } catch (AccessControlException e) { - // // Google App Engine does not allow threads - return null; - } - } - - @Override - public void run() { - while (!stop) { - Database database = databaseRef.get(); - if (database == null) { - break; - } - int wait = writeDelay; - try { - if (database.isFileLockSerialized()) { - wait = Constants.MIN_WRITE_DELAY; - database.checkpointIfRequired(); - } else { - database.flush(); - } - } catch (Exception e) { - TraceSystem traceSystem = database.getTraceSystem(); - if (traceSystem != null) { - traceSystem.getTrace(Trace.DATABASE).error(e, "flush"); - } - } - - // wait 0 mean wait forever, which is not what we want - wait = Math.max(wait, Constants.MIN_WRITE_DELAY); - synchronized (this) { - while (!stop && wait > 0) { - // wait 100 ms at a time - int w = Math.min(wait, 100); - try { - wait(w); - } catch (InterruptedException e) { - // ignore - } - wait -= w; - } - } - } - databaseRef = null; - } - - /** - * Stop the thread. This method is called when closing the database. - */ - public void stopThread() { - stop = true; - synchronized (this) { - notify(); - } - // can't do thread.join(), because this thread might be holding - // a lock that the writer thread is waiting for - } - - /** - * Start the thread. This method is called after opening the database - * (to avoid deadlocks) - */ - public void startThread() { - thread.start(); - thread = null; - } - -} diff --git a/h2/src/main/org/h2/store/fs/FakeFileChannel.java b/h2/src/main/org/h2/store/fs/FakeFileChannel.java index 7a7b2abbf9..62793ce317 100644 --- a/h2/src/main/org/h2/store/fs/FakeFileChannel.java +++ b/h2/src/main/org/h2/store/fs/FakeFileChannel.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store.fs; diff --git a/h2/src/main/org/h2/store/fs/FileBase.java b/h2/src/main/org/h2/store/fs/FileBase.java index f804716631..b8bf353535 100644 --- a/h2/src/main/org/h2/store/fs/FileBase.java +++ b/h2/src/main/org/h2/store/fs/FileBase.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store.fs; @@ -18,21 +18,6 @@ */ public abstract class FileBase extends FileChannel { - @Override - public abstract long size() throws IOException; - - @Override - public abstract long position() throws IOException; - - @Override - public abstract FileChannel position(long newPosition) throws IOException; - - @Override - public abstract int read(ByteBuffer dst) throws IOException; - - @Override - public abstract int write(ByteBuffer src) throws IOException; - @Override public synchronized int read(ByteBuffer dst, long position) throws IOException { @@ -53,9 +38,6 @@ public synchronized int write(ByteBuffer src, long position) return len; } - @Override - public abstract FileChannel truncate(long size) throws IOException; - @Override public void force(boolean metaData) throws IOException { // ignore diff --git a/h2/src/main/org/h2/store/fs/FileBaseDefault.java b/h2/src/main/org/h2/store/fs/FileBaseDefault.java new file mode 100644 index 0000000000..38a0bded77 --- /dev/null +++ b/h2/src/main/org/h2/store/fs/FileBaseDefault.java @@ -0,0 +1,68 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; + +/** + * Default implementation of the slow operations that need synchronization because they + * involve the file position. + */ +public abstract class FileBaseDefault extends FileBase { + + private long position = 0; + + @Override + public final synchronized long position() throws IOException { + return position; + } + + @Override + public final synchronized FileChannel position(long newPosition) throws IOException { + if (newPosition < 0) { + throw new IllegalArgumentException(); + } + position = newPosition; + return this; + } + + @Override + public final synchronized int read(ByteBuffer dst) throws IOException { + int read = read(dst, position); + if (read > 0) { + position += read; + } + return read; + } + + @Override + public final synchronized int write(ByteBuffer src) throws IOException { + int written = write(src, position); + if (written > 0) { + position += written; + } + return written; + } + + @Override + public final synchronized FileChannel truncate(long newLength) throws IOException { + implTruncate(newLength); + if (newLength < position) { + position = newLength; + } + return this; + } + + /** + * The truncate implementation. + * + * @param size the new size + * @throws IOException on failure + */ + protected abstract void implTruncate(long size) throws IOException; +} diff --git a/h2/src/main/org/h2/store/fs/FileChannelInputStream.java b/h2/src/main/org/h2/store/fs/FileChannelInputStream.java index f249fa50ca..5677a385e2 100644 --- a/h2/src/main/org/h2/store/fs/FileChannelInputStream.java +++ b/h2/src/main/org/h2/store/fs/FileChannelInputStream.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store.fs; diff --git a/h2/src/main/org/h2/store/fs/FileChannelOutputStream.java b/h2/src/main/org/h2/store/fs/FileChannelOutputStream.java deleted file mode 100644 index 2186e92a0f..0000000000 --- a/h2/src/main/org/h2/store/fs/FileChannelOutputStream.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store.fs; - -import java.io.IOException; -import java.io.OutputStream; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; - -/** - * Allows to write to a file channel like an output stream. - */ -public class FileChannelOutputStream extends OutputStream { - - private final FileChannel channel; - private final byte[] buffer = { 0 }; - - /** - * Create a new file object output stream from the file channel. - * - * @param channel the file channel - * @param append true for append mode, false for truncate and overwrite - */ - public FileChannelOutputStream(FileChannel channel, boolean append) - throws IOException { - this.channel = channel; - if (append) { - channel.position(channel.size()); - } else { - channel.position(0); - channel.truncate(0); - } - } - - @Override - public void write(int b) throws IOException { - buffer[0] = (byte) b; - FileUtils.writeFully(channel, ByteBuffer.wrap(buffer)); - } - - @Override - public void write(byte[] b) throws IOException { - FileUtils.writeFully(channel, ByteBuffer.wrap(b)); - } - - @Override - public void write(byte[] b, int off, int len) throws IOException { - FileUtils.writeFully(channel, ByteBuffer.wrap(b, off, len)); - } - - @Override - public void close() throws IOException { - channel.close(); - } - -} diff --git a/h2/src/main/org/h2/store/fs/FilePath.java b/h2/src/main/org/h2/store/fs/FilePath.java index 5dc99f22c9..1225165163 100644 --- a/h2/src/main/org/h2/store/fs/FilePath.java +++ b/h2/src/main/org/h2/store/fs/FilePath.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store.fs; @@ -8,9 +8,11 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.nio.channels.Channels; import java.nio.channels.FileChannel; import java.util.List; import java.util.concurrent.ConcurrentHashMap; +import org.h2.store.fs.disk.FilePathDisk; import org.h2.util.MathUtils; /** @@ -21,9 +23,9 @@ */ public abstract class FilePath { - private static FilePath defaultProvider; + private static final FilePath defaultProvider; - private static ConcurrentHashMap providers; + private static final ConcurrentHashMap providers; /** * The prefix for temporary files. @@ -35,7 +37,39 @@ public abstract class FilePath { * The complete path (which may be absolute or relative, depending on the * file system). */ - protected String name; + public String name; + + static { + FilePath def = null; + ConcurrentHashMap map = new ConcurrentHashMap<>(); + for (String c : new String[] { + "org.h2.store.fs.disk.FilePathDisk", + "org.h2.store.fs.mem.FilePathMem", + "org.h2.store.fs.mem.FilePathMemLZF", + "org.h2.store.fs.niomem.FilePathNioMem", + "org.h2.store.fs.niomem.FilePathNioMemLZF", + "org.h2.store.fs.split.FilePathSplit", + "org.h2.store.fs.niomapped.FilePathNioMapped", + "org.h2.store.fs.async.FilePathAsync", + "org.h2.store.fs.zip.FilePathZip", + "org.h2.store.fs.retry.FilePathRetryOnInterrupt" + }) { + try { + FilePath p = (FilePath) Class.forName(c).getDeclaredConstructor().newInstance(); + map.put(p.getScheme(), p); + if (p.getClass() == FilePathDisk.class) { + map.put("nio", p); + } + if (def == null) { + def = p; + } + } catch (Exception e) { + // ignore - the files may be excluded in purpose + } + } + defaultProvider = def; + providers = map; + } /** * Get the file path object for the given path. @@ -47,7 +81,6 @@ public abstract class FilePath { public static FilePath get(String path) { path = path.replace('\\', '/'); int index = path.indexOf(':'); - registerDefaultProviders(); if (index < 2) { // use the default provider if no prefix or // only a single character (drive name) @@ -62,43 +95,12 @@ public static FilePath get(String path) { return p.getPath(path); } - private static void registerDefaultProviders() { - if (providers == null || defaultProvider == null) { - ConcurrentHashMap map = new ConcurrentHashMap<>(); - for (String c : new String[] { - "org.h2.store.fs.FilePathDisk", - "org.h2.store.fs.FilePathMem", - "org.h2.store.fs.FilePathMemLZF", - "org.h2.store.fs.FilePathNioMem", - "org.h2.store.fs.FilePathNioMemLZF", - "org.h2.store.fs.FilePathSplit", - "org.h2.store.fs.FilePathNio", - "org.h2.store.fs.FilePathNioMapped", - "org.h2.store.fs.FilePathAsync", - "org.h2.store.fs.FilePathZip", - "org.h2.store.fs.FilePathRetryOnInterrupt" - }) { - try { - FilePath p = (FilePath) Class.forName(c).getDeclaredConstructor().newInstance(); - map.put(p.getScheme(), p); - if (defaultProvider == null) { - defaultProvider = p; - } - } catch (Exception e) { - // ignore - the files may be excluded in purpose - } - } - providers = map; - } - } - /** * Register a file provider. * * @param provider the file provider */ public static void register(FilePath provider) { - registerDefaultProviders(); providers.put(provider.getScheme(), provider); } @@ -108,7 +110,6 @@ public static void register(FilePath provider) { * @param provider the file provider */ public static void unregister(FilePath provider) { - registerDefaultProviders(); providers.remove(provider.getScheme()); } @@ -220,7 +221,28 @@ public String getName() { * @return the output stream * @throws IOException If an I/O error occurs */ - public abstract OutputStream newOutputStream(boolean append) throws IOException; + public OutputStream newOutputStream(boolean append) throws IOException { + return newFileChannelOutputStream(open("rw"), append); + } + + /** + * Create a new output stream from the channel. + * + * @param channel the file channel + * @param append true for append mode, false for truncate and overwrite + * @return the output stream + * @throws IOException on I/O exception + */ + public static final OutputStream newFileChannelOutputStream(FileChannel channel, boolean append) + throws IOException { + if (append) { + channel.position(channel.size()); + } else { + channel.position(0); + channel.truncate(0); + } + return Channels.newOutputStream(channel); + } /** * Open a random access file object. @@ -237,7 +259,9 @@ public String getName() { * @return the input stream * @throws IOException If an I/O error occurs */ - public abstract InputStream newInputStream() throws IOException; + public InputStream newInputStream() throws IOException { + return Channels.newInputStream(open("r")); + } /** * Disable the ability to write. @@ -252,6 +276,7 @@ public String getName() { * @param suffix the suffix * @param inTempDir if the file should be stored in the temporary directory * @return the name of the created file + * @throws IOException on failure */ @SuppressWarnings("unused") public FilePath createTempFile(String suffix, boolean inTempDir) throws IOException { @@ -273,7 +298,7 @@ public FilePath createTempFile(String suffix, boolean inTempDir) throws IOExcept * @param newRandom if the random part of the filename should change * @return the file name part */ - protected static synchronized String getNextTempFileNamePart( + private static synchronized String getNextTempFileNamePart( boolean newRandom) { if (newRandom || tempRandom == null) { tempRandom = MathUtils.randomInt(Integer.MAX_VALUE) + "."; diff --git a/h2/src/main/org/h2/store/fs/FilePathAsync.java b/h2/src/main/org/h2/store/fs/FilePathAsync.java deleted file mode 100644 index 9ba9ccfa69..0000000000 --- a/h2/src/main/org/h2/store/fs/FilePathAsync.java +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store.fs; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.AsynchronousFileChannel; -import java.nio.channels.FileChannel; -import java.nio.channels.FileLock; -import java.nio.channels.NonWritableChannelException; -import java.nio.file.OpenOption; -import java.nio.file.Paths; -import java.nio.file.StandardOpenOption; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; - -/** - * This file system stores files on disk and uses - * java.nio.channels.AsynchronousFileChannel to access the files. - */ -public class FilePathAsync extends FilePathWrapper { - - private static final boolean AVAILABLE; - - /* - * Android has NIO2 only since API 26. - */ - static { - boolean a = false; - try { - AsynchronousFileChannel.class.getName(); - a = true; - } catch (Throwable e) { - // Nothing to do - } - AVAILABLE = a; - } - - /** - * Creates new instance of FilePathAsync. - */ - public FilePathAsync() { - if (!AVAILABLE) { - throw new UnsupportedOperationException("NIO2 is not available"); - } - } - - @Override - public FileChannel open(String mode) throws IOException { - return new FileAsync(name.substring(getScheme().length() + 1), mode); - } - - @Override - public String getScheme() { - return "async"; - } - -} - -/** - * File which uses NIO2 AsynchronousFileChannel. - */ -class FileAsync extends FileBase { - - private static final OpenOption[] R = { StandardOpenOption.READ }; - - private static final OpenOption[] W = { StandardOpenOption.READ, StandardOpenOption.WRITE, - StandardOpenOption.CREATE }; - - private static final OpenOption[] RWS = { StandardOpenOption.READ, StandardOpenOption.WRITE, - StandardOpenOption.CREATE, StandardOpenOption.SYNC }; - - private static final OpenOption[] RWD = { StandardOpenOption.READ, StandardOpenOption.WRITE, - StandardOpenOption.CREATE, StandardOpenOption.DSYNC }; - - private final String name; - - private final AsynchronousFileChannel channel; - - private long position; - - private static T complete(Future future) throws IOException { - boolean interrupted = false; - for (;;) { - try { - T result = future.get(); - if (interrupted) { - Thread.currentThread().interrupt(); - } - return result; - } catch (InterruptedException e) { - interrupted = true; - } catch (ExecutionException e) { - throw new IOException(e.getCause()); - } - } - } - - FileAsync(String fileName, String mode) throws IOException { - this.name = fileName; - OpenOption[] options; - switch (mode) { - case "r": - options = R; - break; - case "rw": - options = W; - break; - case "rws": - options = RWS; - break; - case "rwd": - options = RWD; - break; - default: - throw new IllegalArgumentException(mode); - } - channel = AsynchronousFileChannel.open(Paths.get(fileName), options); - } - - @Override - public void implCloseChannel() throws IOException { - channel.close(); - } - - @Override - public long position() throws IOException { - return position; - } - - @Override - public long size() throws IOException { - return channel.size(); - } - - @Override - public int read(ByteBuffer dst) throws IOException { - int read = complete(channel.read(dst, position)); - if (read > 0) { - position += read; - } - return read; - } - - @Override - public FileChannel position(long pos) throws IOException { - if (pos < 0) { - throw new IllegalArgumentException(); - } - position = pos; - return this; - } - - @Override - public int read(ByteBuffer dst, long position) throws IOException { - return complete(channel.read(dst, position)); - } - - @Override - public int write(ByteBuffer src, long position) throws IOException { - try { - return complete(channel.write(src, position)); - } catch (NonWritableChannelException e) { - throw new IOException("read only"); - } - } - - @Override - public FileChannel truncate(long newLength) throws IOException { - channel.truncate(newLength); - if (newLength < position) { - position = newLength; - } - return this; - } - - @Override - public void force(boolean metaData) throws IOException { - channel.force(metaData); - } - - @Override - public int write(ByteBuffer src) throws IOException { - int written; - try { - written = complete(channel.write(src, position)); - position += written; - } catch (NonWritableChannelException e) { - throw new IOException("read only"); - } - return written; - } - - @Override - public synchronized FileLock tryLock(long position, long size, boolean shared) throws IOException { - return channel.tryLock(position, size, shared); - } - - @Override - public String toString() { - return "async:" + name; - } - -} diff --git a/h2/src/main/org/h2/store/fs/FilePathDisk.java b/h2/src/main/org/h2/store/fs/FilePathDisk.java deleted file mode 100644 index 9ee45195e6..0000000000 --- a/h2/src/main/org/h2/store/fs/FilePathDisk.java +++ /dev/null @@ -1,502 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store.fs; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.io.RandomAccessFile; -import java.net.URL; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.channels.FileLock; -import java.nio.channels.NonWritableChannelException; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.List; - -import org.h2.api.ErrorCode; -import org.h2.engine.SysProperties; -import org.h2.message.DbException; -import org.h2.util.IOUtils; - -/** - * This file system stores files on disk. - * This is the most common file system. - */ -public class FilePathDisk extends FilePath { - - private static final String CLASSPATH_PREFIX = "classpath:"; - - @Override - public FilePathDisk getPath(String path) { - FilePathDisk p = new FilePathDisk(); - p.name = translateFileName(path); - return p; - } - - @Override - public long size() { - if (name.startsWith(CLASSPATH_PREFIX)) { - try { - String fileName = name.substring(CLASSPATH_PREFIX.length()); - // Force absolute resolution in Class.getResource - if (!fileName.startsWith("/")) { - fileName = "/" + fileName; - } - URL resource = this.getClass().getResource(fileName); - if (resource != null) { - return Files.size(Paths.get(resource.toURI())); - } else { - return 0; - } - } catch (Exception e) { - return 0; - } - } - return new File(name).length(); - } - - /** - * Translate the file name to the native format. This will replace '\' with - * '/' and expand the home directory ('~'). - * - * @param fileName the file name - * @return the native file name - */ - protected static String translateFileName(String fileName) { - fileName = fileName.replace('\\', '/'); - if (fileName.startsWith("file:")) { - fileName = fileName.substring("file:".length()); - } - return expandUserHomeDirectory(fileName); - } - - /** - * Expand '~' to the user home directory. It is only be expanded if the '~' - * stands alone, or is followed by '/' or '\'. - * - * @param fileName the file name - * @return the native file name - */ - public static String expandUserHomeDirectory(String fileName) { - if (fileName.startsWith("~") && (fileName.length() == 1 || - fileName.startsWith("~/"))) { - String userDir = SysProperties.USER_HOME; - fileName = userDir + fileName.substring(1); - } - return fileName; - } - - @Override - public void moveTo(FilePath newName, boolean atomicReplace) { - File oldFile = new File(name); - File newFile = new File(newName.name); - if (oldFile.getAbsolutePath().equals(newFile.getAbsolutePath())) { - return; - } - if (!oldFile.exists()) { - throw DbException.get(ErrorCode.FILE_RENAME_FAILED_2, - name + " (not found)", - newName.name); - } - // Java 7: use java.nio.file.Files.move(Path source, Path target, - // CopyOption... options) - // with CopyOptions "REPLACE_EXISTING" and "ATOMIC_MOVE". - if (atomicReplace) { - boolean ok = oldFile.renameTo(newFile); - if (ok) { - return; - } - throw DbException.get(ErrorCode.FILE_RENAME_FAILED_2, name, newName.name); - } - if (newFile.exists()) { - throw DbException.get(ErrorCode.FILE_RENAME_FAILED_2, name, newName + " (exists)"); - } - for (int i = 0; i < SysProperties.MAX_FILE_RETRY; i++) { - IOUtils.trace("rename", name + " >" + newName, null); - boolean ok = oldFile.renameTo(newFile); - if (ok) { - return; - } - wait(i); - } - throw DbException.get(ErrorCode.FILE_RENAME_FAILED_2, name, newName.name); - } - - private static void wait(int i) { - if (i == 8) { - System.gc(); - } - try { - // sleep at most 256 ms - long sleep = Math.min(256, i * i); - Thread.sleep(sleep); - } catch (InterruptedException e) { - // ignore - } - } - - @Override - public boolean createFile() { - File file = new File(name); - for (int i = 0; i < SysProperties.MAX_FILE_RETRY; i++) { - try { - return file.createNewFile(); - } catch (IOException e) { - // 'access denied' is really a concurrent access problem - wait(i); - } - } - return false; - } - - @Override - public boolean exists() { - return new File(name).exists(); - } - - @Override - public void delete() { - File file = new File(name); - for (int i = 0; i < SysProperties.MAX_FILE_RETRY; i++) { - IOUtils.trace("delete", name, null); - boolean ok = file.delete(); - if (ok || !file.exists()) { - return; - } - wait(i); - } - throw DbException.get(ErrorCode.FILE_DELETE_FAILED_1, name); - } - - @Override - public List newDirectoryStream() { - ArrayList list = new ArrayList<>(); - File f = new File(name); - try { - String[] files = f.list(); - if (files != null) { - String base = f.getCanonicalPath(); - if (!base.endsWith(SysProperties.FILE_SEPARATOR)) { - base += SysProperties.FILE_SEPARATOR; - } - list.ensureCapacity(files.length); - for (String file : files) { - list.add(getPath(base + file)); - } - } - return list; - } catch (IOException e) { - throw DbException.convertIOException(e, name); - } - } - - @Override - public boolean canWrite() { - return canWriteInternal(new File(name)); - } - - @Override - public boolean setReadOnly() { - File f = new File(name); - return f.setReadOnly(); - } - - @Override - public FilePathDisk toRealPath() { - try { - String fileName = new File(name).getCanonicalPath(); - return getPath(fileName); - } catch (IOException e) { - throw DbException.convertIOException(e, name); - } - } - - @Override - public FilePath getParent() { - String p = new File(name).getParent(); - return p == null ? null : getPath(p); - } - - @Override - public boolean isDirectory() { - return new File(name).isDirectory(); - } - - @Override - public boolean isAbsolute() { - return new File(name).isAbsolute(); - } - - @Override - public long lastModified() { - return new File(name).lastModified(); - } - - private static boolean canWriteInternal(File file) { - try { - if (!file.canWrite()) { - return false; - } - } catch (Exception e) { - // workaround for GAE which throws a - // java.security.AccessControlException - return false; - } - // File.canWrite() does not respect windows user permissions, - // so we must try to open it using the mode "rw". - // See also http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4420020 - RandomAccessFile r = null; - try { - r = new RandomAccessFile(file, "rw"); - return true; - } catch (FileNotFoundException e) { - return false; - } finally { - if (r != null) { - try { - r.close(); - } catch (IOException e) { - // ignore - } - } - } - } - - @Override - public void createDirectory() { - File dir = new File(name); - for (int i = 0; i < SysProperties.MAX_FILE_RETRY; i++) { - if (dir.exists()) { - if (dir.isDirectory()) { - return; - } - throw DbException.get(ErrorCode.FILE_CREATION_FAILED_1, - name + " (a file with this name already exists)"); - } else if (dir.mkdir()) { - return; - } - wait(i); - } - throw DbException.get(ErrorCode.FILE_CREATION_FAILED_1, name); - } - - @Override - public OutputStream newOutputStream(boolean append) throws IOException { - try { - File file = new File(name); - File parent = file.getParentFile(); - if (parent != null) { - FileUtils.createDirectories(parent.getAbsolutePath()); - } - FileOutputStream out = new FileOutputStream(name, append); - IOUtils.trace("openFileOutputStream", name, out); - return out; - } catch (IOException e) { - freeMemoryAndFinalize(); - return new FileOutputStream(name); - } - } - - @Override - public InputStream newInputStream() throws IOException { - if (name.matches("[a-zA-Z]{2,19}:.*")) { - // if the ':' is in position 1, a windows file access is assumed: - // C:.. or D:, and if the ':' is not at the beginning, assume its a - // file name with a colon - if (name.startsWith(CLASSPATH_PREFIX)) { - String fileName = name.substring(CLASSPATH_PREFIX.length()); - // Force absolute resolution in Class.getResourceAsStream - if (!fileName.startsWith("/")) { - fileName = "/" + fileName; - } - InputStream in = getClass().getResourceAsStream(fileName); - if (in == null) { - // ClassLoader.getResourceAsStream doesn't need leading "/" - in = Thread.currentThread().getContextClassLoader(). - getResourceAsStream(fileName.substring(1)); - } - if (in == null) { - throw new FileNotFoundException("resource " + fileName); - } - return in; - } - // otherwise an URL is assumed - URL url = new URL(name); - return url.openStream(); - } - FileInputStream in = new FileInputStream(name); - IOUtils.trace("openFileInputStream", name, in); - return in; - } - - /** - * Call the garbage collection and run finalization. This close all files - * that were not closed, and are no longer referenced. - */ - static void freeMemoryAndFinalize() { - IOUtils.trace("freeMemoryAndFinalize", null, null); - Runtime rt = Runtime.getRuntime(); - long mem = rt.freeMemory(); - for (int i = 0; i < 16; i++) { - rt.gc(); - long now = rt.freeMemory(); - rt.runFinalization(); - if (now == mem) { - break; - } - mem = now; - } - } - - @Override - public FileChannel open(String mode) throws IOException { - FileDisk f; - try { - f = new FileDisk(name, mode); - IOUtils.trace("open", name, f); - } catch (IOException e) { - freeMemoryAndFinalize(); - try { - f = new FileDisk(name, mode); - } catch (IOException e2) { - throw e; - } - } - return f; - } - - @Override - public String getScheme() { - return "file"; - } - - @Override - public FilePath createTempFile(String suffix, boolean inTempDir) throws IOException { - String fileName = name + "."; - String prefix = new File(fileName).getName(); - File dir; - if (inTempDir) { - dir = new File(System.getProperty("java.io.tmpdir", ".")); - } else { - dir = new File(fileName).getAbsoluteFile().getParentFile(); - } - FileUtils.createDirectories(dir.getAbsolutePath()); - while (true) { - File f = new File(dir, prefix + getNextTempFileNamePart(false) + suffix); - if (f.exists() || !f.createNewFile()) { - // in theory, the random number could collide - getNextTempFileNamePart(true); - continue; - } - return get(f.getCanonicalPath()); - } - } - -} - -/** - * Uses java.io.RandomAccessFile to access a file. - */ -class FileDisk extends FileBase { - - private final RandomAccessFile file; - private final String name; - private final boolean readOnly; - - FileDisk(String fileName, String mode) throws FileNotFoundException { - this.file = new RandomAccessFile(fileName, mode); - this.name = fileName; - this.readOnly = mode.equals("r"); - } - - @Override - public void force(boolean metaData) throws IOException { - String m = SysProperties.SYNC_METHOD; - if ("".equals(m)) { - // do nothing - } else if ("sync".equals(m)) { - file.getFD().sync(); - } else if ("force".equals(m)) { - file.getChannel().force(true); - } else if ("forceFalse".equals(m)) { - file.getChannel().force(false); - } else { - file.getFD().sync(); - } - } - - @Override - public FileChannel truncate(long newLength) throws IOException { - // compatibility with JDK FileChannel#truncate - if (readOnly) { - throw new NonWritableChannelException(); - } - /* - * RandomAccessFile.setLength() does not always work here since Java 9 for - * unknown reason so use FileChannel.truncate(). - */ - file.getChannel().truncate(newLength); - return this; - } - - @Override - public synchronized FileLock tryLock(long position, long size, - boolean shared) throws IOException { - return file.getChannel().tryLock(position, size, shared); - } - - @Override - public void implCloseChannel() throws IOException { - file.close(); - } - - @Override - public long position() throws IOException { - return file.getFilePointer(); - } - - @Override - public long size() throws IOException { - return file.length(); - } - - @Override - public int read(ByteBuffer dst) throws IOException { - int len = file.read(dst.array(), dst.arrayOffset() + dst.position(), - dst.remaining()); - if (len > 0) { - dst.position(dst.position() + len); - } - return len; - } - - @Override - public FileChannel position(long pos) throws IOException { - file.seek(pos); - return this; - } - - @Override - public int write(ByteBuffer src) throws IOException { - int len = src.remaining(); - file.write(src.array(), src.arrayOffset() + src.position(), len); - src.position(src.position() + len); - return len; - } - - @Override - public String toString() { - return name; - } - -} diff --git a/h2/src/main/org/h2/store/fs/FilePathEncrypt.java b/h2/src/main/org/h2/store/fs/FilePathEncrypt.java deleted file mode 100644 index f1959d679b..0000000000 --- a/h2/src/main/org/h2/store/fs/FilePathEncrypt.java +++ /dev/null @@ -1,529 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store.fs; - -import java.io.EOFException; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.channels.FileLock; -import java.nio.charset.StandardCharsets; -import java.util.Arrays; - -import org.h2.security.AES; -import org.h2.security.BlockCipher; -import org.h2.security.SHA256; -import org.h2.util.MathUtils; - -/** - * An encrypted file. - */ -public class FilePathEncrypt extends FilePathWrapper { - - private static final String SCHEME = "encrypt"; - - /** - * Register this file system. - */ - public static void register() { - FilePath.register(new FilePathEncrypt()); - } - - @Override - public FileChannel open(String mode) throws IOException { - String[] parsed = parse(name); - FileChannel file = FileUtils.open(parsed[1], mode); - byte[] passwordBytes = parsed[0].getBytes(StandardCharsets.UTF_8); - return new FileEncrypt(name, passwordBytes, file); - } - - @Override - public String getScheme() { - return SCHEME; - } - - @Override - protected String getPrefix() { - String[] parsed = parse(name); - return getScheme() + ":" + parsed[0] + ":"; - } - - @Override - public FilePath unwrap(String fileName) { - return FilePath.get(parse(fileName)[1]); - } - - @Override - public long size() { - long size = getBase().size() - FileEncrypt.HEADER_LENGTH; - size = Math.max(0, size); - if ((size & FileEncrypt.BLOCK_SIZE_MASK) != 0) { - size -= FileEncrypt.BLOCK_SIZE; - } - return size; - } - - @Override - public OutputStream newOutputStream(boolean append) throws IOException { - return new FileChannelOutputStream(open("rw"), append); - } - - @Override - public InputStream newInputStream() throws IOException { - return new FileChannelInputStream(open("r"), true); - } - - /** - * Split the file name into algorithm, password, and base file name. - * - * @param fileName the file name - * @return an array with algorithm, password, and base file name - */ - private String[] parse(String fileName) { - if (!fileName.startsWith(getScheme())) { - throw new IllegalArgumentException(fileName + - " doesn't start with " + getScheme()); - } - fileName = fileName.substring(getScheme().length() + 1); - int idx = fileName.indexOf(':'); - String password; - if (idx < 0) { - throw new IllegalArgumentException(fileName + - " doesn't contain encryption algorithm and password"); - } - password = fileName.substring(0, idx); - fileName = fileName.substring(idx + 1); - return new String[] { password, fileName }; - } - - /** - * Convert a char array to a byte array, in UTF-16 format. The char array is - * not cleared after use (this must be done by the caller). - * - * @param passwordChars the password characters - * @return the byte array - */ - public static byte[] getPasswordBytes(char[] passwordChars) { - // using UTF-16 - int len = passwordChars.length; - byte[] password = new byte[len * 2]; - for (int i = 0; i < len; i++) { - char c = passwordChars[i]; - password[i + i] = (byte) (c >>> 8); - password[i + i + 1] = (byte) c; - } - return password; - } - - /** - * An encrypted file with a read cache. - */ - public static class FileEncrypt extends FileBase { - - /** - * The block size. - */ - static final int BLOCK_SIZE = 4096; - - /** - * The block size bit mask. - */ - static final int BLOCK_SIZE_MASK = BLOCK_SIZE - 1; - - /** - * The length of the file header. Using a smaller header is possible, - * but would mean reads and writes are not aligned to the block size. - */ - static final int HEADER_LENGTH = BLOCK_SIZE; - - private static final byte[] HEADER = "H2encrypt\n".getBytes(); - private static final int SALT_POS = HEADER.length; - - /** - * The length of the salt, in bytes. - */ - private static final int SALT_LENGTH = 8; - - /** - * The number of iterations. It is relatively low; a higher value would - * slow down opening files on Android too much. - */ - private static final int HASH_ITERATIONS = 10; - - private final FileChannel base; - - /** - * The current position within the file, from a user perspective. - */ - private long pos; - - /** - * The current file size, from a user perspective. - */ - private long size; - - private final String name; - - private XTS xts; - - private byte[] encryptionKey; - - public FileEncrypt(String name, byte[] encryptionKey, FileChannel base) { - // don't do any read or write operations here, because they could - // fail if the file is locked, and we want to give the caller a - // chance to lock the file first - this.name = name; - this.base = base; - this.encryptionKey = encryptionKey; - } - - private void init() throws IOException { - if (xts != null) { - return; - } - this.size = base.size() - HEADER_LENGTH; - boolean newFile = size < 0; - byte[] salt; - if (newFile) { - byte[] header = Arrays.copyOf(HEADER, BLOCK_SIZE); - salt = MathUtils.secureRandomBytes(SALT_LENGTH); - System.arraycopy(salt, 0, header, SALT_POS, salt.length); - writeFully(base, 0, ByteBuffer.wrap(header)); - size = 0; - } else { - salt = new byte[SALT_LENGTH]; - readFully(base, SALT_POS, ByteBuffer.wrap(salt)); - if ((size & BLOCK_SIZE_MASK) != 0) { - size -= BLOCK_SIZE; - } - } - AES cipher = new AES(); - cipher.setKey(SHA256.getPBKDF2( - encryptionKey, salt, HASH_ITERATIONS, 16)); - encryptionKey = null; - xts = new XTS(cipher); - } - - @Override - protected void implCloseChannel() throws IOException { - base.close(); - } - - @Override - public FileChannel position(long newPosition) throws IOException { - this.pos = newPosition; - return this; - } - - @Override - public long position() throws IOException { - return pos; - } - - @Override - public int read(ByteBuffer dst) throws IOException { - int len = read(dst, pos); - if (len > 0) { - pos += len; - } - return len; - } - - @Override - public int read(ByteBuffer dst, long position) throws IOException { - int len = dst.remaining(); - if (len == 0) { - return 0; - } - init(); - len = (int) Math.min(len, size - position); - if (position >= size) { - return -1; - } else if (position < 0) { - throw new IllegalArgumentException("pos: " + position); - } - if ((position & BLOCK_SIZE_MASK) != 0 || - (len & BLOCK_SIZE_MASK) != 0) { - // either the position or the len is unaligned: - // read aligned, and then truncate - long p = position / BLOCK_SIZE * BLOCK_SIZE; - int offset = (int) (position - p); - int l = (len + offset + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE; - ByteBuffer temp = ByteBuffer.allocate(l); - readInternal(temp, p, l); - temp.flip(); - temp.limit(offset + len); - temp.position(offset); - dst.put(temp); - return len; - } - readInternal(dst, position, len); - return len; - } - - private void readInternal(ByteBuffer dst, long position, int len) - throws IOException { - int x = dst.position(); - readFully(base, position + HEADER_LENGTH, dst); - long block = position / BLOCK_SIZE; - while (len > 0) { - xts.decrypt(block++, BLOCK_SIZE, dst.array(), dst.arrayOffset() + x); - x += BLOCK_SIZE; - len -= BLOCK_SIZE; - } - } - - private static void readFully(FileChannel file, long pos, ByteBuffer dst) - throws IOException { - do { - int len = file.read(dst, pos); - if (len < 0) { - throw new EOFException(); - } - pos += len; - } while (dst.remaining() > 0); - } - - @Override - public int write(ByteBuffer src, long position) throws IOException { - init(); - int len = src.remaining(); - if ((position & BLOCK_SIZE_MASK) != 0 || - (len & BLOCK_SIZE_MASK) != 0) { - // either the position or the len is unaligned: - // read aligned, and then truncate - long p = position / BLOCK_SIZE * BLOCK_SIZE; - int offset = (int) (position - p); - int l = (len + offset + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE; - ByteBuffer temp = ByteBuffer.allocate(l); - int available = (int) (size - p + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE; - int readLen = Math.min(l, available); - if (readLen > 0) { - readInternal(temp, p, readLen); - temp.rewind(); - } - temp.limit(offset + len); - temp.position(offset); - temp.put(src); - temp.limit(l); - temp.rewind(); - writeInternal(temp, p, l); - long p2 = position + len; - size = Math.max(size, p2); - int plus = (int) (size & BLOCK_SIZE_MASK); - if (plus > 0) { - temp = ByteBuffer.allocate(plus); - writeFully(base, p + HEADER_LENGTH + l, temp); - } - return len; - } - writeInternal(src, position, len); - long p2 = position + len; - size = Math.max(size, p2); - return len; - } - - private void writeInternal(ByteBuffer src, long position, int len) - throws IOException { - ByteBuffer crypt = ByteBuffer.allocate(len); - crypt.put(src); - crypt.flip(); - long block = position / BLOCK_SIZE; - int x = 0, l = len; - while (l > 0) { - xts.encrypt(block++, BLOCK_SIZE, crypt.array(), crypt.arrayOffset() + x); - x += BLOCK_SIZE; - l -= BLOCK_SIZE; - } - writeFully(base, position + HEADER_LENGTH, crypt); - } - - private static void writeFully(FileChannel file, long pos, - ByteBuffer src) throws IOException { - int off = 0; - do { - int len = file.write(src, pos + off); - off += len; - } while (src.remaining() > 0); - } - - @Override - public int write(ByteBuffer src) throws IOException { - int len = write(src, pos); - if (len > 0) { - pos += len; - } - return len; - } - - @Override - public long size() throws IOException { - init(); - return size; - } - - @Override - public FileChannel truncate(long newSize) throws IOException { - init(); - if (newSize > size) { - return this; - } - if (newSize < 0) { - throw new IllegalArgumentException("newSize: " + newSize); - } - int offset = (int) (newSize & BLOCK_SIZE_MASK); - if (offset > 0) { - base.truncate(newSize + HEADER_LENGTH + BLOCK_SIZE); - } else { - base.truncate(newSize + HEADER_LENGTH); - } - this.size = newSize; - pos = Math.min(pos, size); - return this; - } - - @Override - public void force(boolean metaData) throws IOException { - base.force(metaData); - } - - @Override - public FileLock tryLock(long position, long size, boolean shared) - throws IOException { - return base.tryLock(position, size, shared); - } - - @Override - public String toString() { - return name; - } - - } - - /** - * An XTS implementation as described in - * IEEE P1619 (Standard Architecture for Encrypted Shared Storage Media). - * See also - * http://axelkenzo.ru/downloads/1619-2007-NIST-Submission.pdf - */ - static class XTS { - - /** - * Galois field feedback. - */ - private static final int GF_128_FEEDBACK = 0x87; - - /** - * The AES encryption block size. - */ - private static final int CIPHER_BLOCK_SIZE = 16; - - private final BlockCipher cipher; - - XTS(BlockCipher cipher) { - this.cipher = cipher; - } - - /** - * Encrypt the data. - * - * @param id the (sector) id - * @param len the number of bytes - * @param data the data - * @param offset the offset within the data - */ - void encrypt(long id, int len, byte[] data, int offset) { - byte[] tweak = initTweak(id); - int i = 0; - for (; i + CIPHER_BLOCK_SIZE <= len; i += CIPHER_BLOCK_SIZE) { - if (i > 0) { - updateTweak(tweak); - } - xorTweak(data, i + offset, tweak); - cipher.encrypt(data, i + offset, CIPHER_BLOCK_SIZE); - xorTweak(data, i + offset, tweak); - } - if (i < len) { - updateTweak(tweak); - swap(data, i + offset, i - CIPHER_BLOCK_SIZE + offset, len - i); - xorTweak(data, i - CIPHER_BLOCK_SIZE + offset, tweak); - cipher.encrypt(data, i - CIPHER_BLOCK_SIZE + offset, CIPHER_BLOCK_SIZE); - xorTweak(data, i - CIPHER_BLOCK_SIZE + offset, tweak); - } - } - - /** - * Decrypt the data. - * - * @param id the (sector) id - * @param len the number of bytes - * @param data the data - * @param offset the offset within the data - */ - void decrypt(long id, int len, byte[] data, int offset) { - byte[] tweak = initTweak(id), tweakEnd = tweak; - int i = 0; - for (; i + CIPHER_BLOCK_SIZE <= len; i += CIPHER_BLOCK_SIZE) { - if (i > 0) { - updateTweak(tweak); - if (i + CIPHER_BLOCK_SIZE + CIPHER_BLOCK_SIZE > len && - i + CIPHER_BLOCK_SIZE < len) { - tweakEnd = tweak.clone(); - updateTweak(tweak); - } - } - xorTweak(data, i + offset, tweak); - cipher.decrypt(data, i + offset, CIPHER_BLOCK_SIZE); - xorTweak(data, i + offset, tweak); - } - if (i < len) { - swap(data, i, i - CIPHER_BLOCK_SIZE + offset, len - i + offset); - xorTweak(data, i - CIPHER_BLOCK_SIZE + offset, tweakEnd); - cipher.decrypt(data, i - CIPHER_BLOCK_SIZE + offset, CIPHER_BLOCK_SIZE); - xorTweak(data, i - CIPHER_BLOCK_SIZE + offset, tweakEnd); - } - } - - private byte[] initTweak(long id) { - byte[] tweak = new byte[CIPHER_BLOCK_SIZE]; - for (int j = 0; j < CIPHER_BLOCK_SIZE; j++, id >>>= 8) { - tweak[j] = (byte) (id & 0xff); - } - cipher.encrypt(tweak, 0, CIPHER_BLOCK_SIZE); - return tweak; - } - - private static void xorTweak(byte[] data, int pos, byte[] tweak) { - for (int i = 0; i < CIPHER_BLOCK_SIZE; i++) { - data[pos + i] ^= tweak[i]; - } - } - - private static void updateTweak(byte[] tweak) { - byte ci = 0, co = 0; - for (int i = 0; i < CIPHER_BLOCK_SIZE; i++) { - co = (byte) ((tweak[i] >> 7) & 1); - tweak[i] = (byte) (((tweak[i] << 1) + ci) & 255); - ci = co; - } - if (co != 0) { - tweak[0] ^= GF_128_FEEDBACK; - } - } - - private static void swap(byte[] data, int source, int target, int len) { - for (int i = 0; i < len; i++) { - byte temp = data[source + i]; - data[source + i] = data[target + i]; - data[target + i] = temp; - } - } - - } - -} diff --git a/h2/src/main/org/h2/store/fs/FilePathMem.java b/h2/src/main/org/h2/store/fs/FilePathMem.java deleted file mode 100644 index f5b7b1281f..0000000000 --- a/h2/src/main/org/h2/store/fs/FilePathMem.java +++ /dev/null @@ -1,803 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store.fs; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; -import java.nio.channels.FileChannel; -import java.nio.channels.FileLock; -import java.nio.channels.NonWritableChannelException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; -import java.util.concurrent.atomic.AtomicReference; -import org.h2.api.ErrorCode; -import org.h2.compress.CompressLZF; -import org.h2.message.DbException; -import org.h2.util.MathUtils; - -/** - * This file system keeps files fully in memory. There is an option to compress - * file blocks to save memory. - */ -public class FilePathMem extends FilePath { - - private static final TreeMap MEMORY_FILES = - new TreeMap<>(); - private static final FileMemData DIRECTORY = new FileMemData("", false); - - @Override - public FilePathMem getPath(String path) { - FilePathMem p = new FilePathMem(); - p.name = getCanonicalPath(path); - return p; - } - - @Override - public long size() { - return getMemoryFile().length(); - } - - @Override - public void moveTo(FilePath newName, boolean atomicReplace) { - synchronized (MEMORY_FILES) { - if (!atomicReplace && !newName.name.equals(name) && - MEMORY_FILES.containsKey(newName.name)) { - throw DbException.get(ErrorCode.FILE_RENAME_FAILED_2, name, newName + " (exists)"); - } - FileMemData f = getMemoryFile(); - f.setName(newName.name); - MEMORY_FILES.remove(name); - MEMORY_FILES.put(newName.name, f); - } - } - - @Override - public boolean createFile() { - synchronized (MEMORY_FILES) { - if (exists()) { - return false; - } - getMemoryFile(); - } - return true; - } - - @Override - public boolean exists() { - if (isRoot()) { - return true; - } - synchronized (MEMORY_FILES) { - return MEMORY_FILES.get(name) != null; - } - } - - @Override - public void delete() { - if (isRoot()) { - return; - } - synchronized (MEMORY_FILES) { - FileMemData old = MEMORY_FILES.remove(name); - if (old != null) { - old.truncate(0); - } - } - } - - @Override - public List newDirectoryStream() { - ArrayList list = new ArrayList<>(); - synchronized (MEMORY_FILES) { - for (String n : MEMORY_FILES.tailMap(name).keySet()) { - if (n.startsWith(name)) { - if (!n.equals(name) && n.indexOf('/', name.length() + 1) < 0) { - list.add(getPath(n)); - } - } else { - break; - } - } - return list; - } - } - - @Override - public boolean setReadOnly() { - return getMemoryFile().setReadOnly(); - } - - @Override - public boolean canWrite() { - return getMemoryFile().canWrite(); - } - - @Override - public FilePathMem getParent() { - int idx = name.lastIndexOf('/'); - return idx < 0 ? null : getPath(name.substring(0, idx)); - } - - @Override - public boolean isDirectory() { - if (isRoot()) { - return true; - } - synchronized (MEMORY_FILES) { - FileMemData d = MEMORY_FILES.get(name); - return d == DIRECTORY; - } - } - - @Override - public boolean isAbsolute() { - // TODO relative files are not supported - return true; - } - - @Override - public FilePathMem toRealPath() { - return this; - } - - @Override - public long lastModified() { - return getMemoryFile().getLastModified(); - } - - @Override - public void createDirectory() { - if (exists()) { - throw DbException.get(ErrorCode.FILE_CREATION_FAILED_1, - name + " (a file with this name already exists)"); - } - synchronized (MEMORY_FILES) { - MEMORY_FILES.put(name, DIRECTORY); - } - } - - @Override - public OutputStream newOutputStream(boolean append) throws IOException { - FileMemData obj = getMemoryFile(); - FileMem m = new FileMem(obj, false); - return new FileChannelOutputStream(m, append); - } - - @Override - public InputStream newInputStream() { - FileMemData obj = getMemoryFile(); - FileMem m = new FileMem(obj, true); - return new FileChannelInputStream(m, true); - } - - @Override - public FileChannel open(String mode) { - FileMemData obj = getMemoryFile(); - return new FileMem(obj, "r".equals(mode)); - } - - private FileMemData getMemoryFile() { - synchronized (MEMORY_FILES) { - FileMemData m = MEMORY_FILES.get(name); - if (m == DIRECTORY) { - throw DbException.get(ErrorCode.FILE_CREATION_FAILED_1, - name + " (a directory with this name already exists)"); - } - if (m == null) { - m = new FileMemData(name, compressed()); - MEMORY_FILES.put(name, m); - } - return m; - } - } - - private boolean isRoot() { - return name.equals(getScheme() + ":"); - } - - /** - * Get the canonical path for this file name. - * - * @param fileName the file name - * @return the canonical path - */ - protected static String getCanonicalPath(String fileName) { - fileName = fileName.replace('\\', '/'); - int idx = fileName.indexOf(':') + 1; - if (fileName.length() > idx && fileName.charAt(idx) != '/') { - fileName = fileName.substring(0, idx) + "/" + fileName.substring(idx); - } - return fileName; - } - - @Override - public String getScheme() { - return "memFS"; - } - - /** - * Whether the file should be compressed. - * - * @return if it should be compressed. - */ - boolean compressed() { - return false; - } - -} - -/** - * A memory file system that compresses blocks to conserve memory. - */ -class FilePathMemLZF extends FilePathMem { - - @Override - public FilePathMem getPath(String path) { - FilePathMemLZF p = new FilePathMemLZF(); - p.name = getCanonicalPath(path); - return p; - } - - @Override - boolean compressed() { - return true; - } - - @Override - public String getScheme() { - return "memLZF"; - } - -} - -/** - * This class represents an in-memory file. - */ -class FileMem extends FileBase { - - /** - * The file data. - */ - FileMemData data; - - private final boolean readOnly; - private long pos; - - FileMem(FileMemData data, boolean readOnly) { - this.data = data; - this.readOnly = readOnly; - } - - @Override - public long size() { - return data.length(); - } - - @Override - public FileChannel truncate(long newLength) throws IOException { - // compatibility with JDK FileChannel#truncate - if (readOnly) { - throw new NonWritableChannelException(); - } - if (data == null) { - throw new ClosedChannelException(); - } - if (newLength < size()) { - data.touch(readOnly); - pos = Math.min(pos, newLength); - data.truncate(newLength); - } - return this; - } - - @Override - public FileChannel position(long newPos) { - this.pos = newPos; - return this; - } - - @Override - public int write(ByteBuffer src, long position) throws IOException { - if (data == null) { - throw new ClosedChannelException(); - } - int len = src.remaining(); - if (len == 0) { - return 0; - } - data.touch(readOnly); - data.readWrite(position, src.array(), - src.arrayOffset() + src.position(), len, true); - src.position(src.position() + len); - return len; - } - - @Override - public int write(ByteBuffer src) throws IOException { - if (data == null) { - throw new ClosedChannelException(); - } - int len = src.remaining(); - if (len == 0) { - return 0; - } - data.touch(readOnly); - pos = data.readWrite(pos, src.array(), - src.arrayOffset() + src.position(), len, true); - src.position(src.position() + len); - return len; - } - - @Override - public int read(ByteBuffer dst, long position) throws IOException { - if (data == null) { - throw new ClosedChannelException(); - } - int len = dst.remaining(); - if (len == 0) { - return 0; - } - long newPos = data.readWrite(position, dst.array(), - dst.arrayOffset() + dst.position(), len, false); - len = (int) (newPos - position); - if (len <= 0) { - return -1; - } - dst.position(dst.position() + len); - return len; - } - - @Override - public int read(ByteBuffer dst) throws IOException { - if (data == null) { - throw new ClosedChannelException(); - } - int len = dst.remaining(); - if (len == 0) { - return 0; - } - long newPos = data.readWrite(pos, dst.array(), - dst.arrayOffset() + dst.position(), len, false); - len = (int) (newPos - pos); - if (len <= 0) { - return -1; - } - dst.position(dst.position() + len); - pos = newPos; - return len; - } - - @Override - public long position() { - return pos; - } - - @Override - public void implCloseChannel() throws IOException { - pos = 0; - data = null; - } - - @Override - public void force(boolean metaData) throws IOException { - // do nothing - } - - @Override - public synchronized FileLock tryLock(long position, long size, - boolean shared) throws IOException { - if (data == null) { - throw new ClosedChannelException(); - } - if (shared) { - if (!data.lockShared()) { - return null; - } - } else { - if (!data.lockExclusive()) { - return null; - } - } - - return new FileLock(FakeFileChannel.INSTANCE, position, size, shared) { - - @Override - public boolean isValid() { - return true; - } - - @Override - public void release() throws IOException { - data.unlock(); - } - }; - } - - @Override - public String toString() { - return data == null ? "" : data.getName(); - } - -} - -/** - * This class contains the data of an in-memory random access file. - * Data compression using the LZF algorithm is supported as well. - */ -class FileMemData { - - private static final int CACHE_SIZE = 8; - private static final int BLOCK_SIZE_SHIFT = 10; - private static final int BLOCK_SIZE = 1 << BLOCK_SIZE_SHIFT; - private static final int BLOCK_SIZE_MASK = BLOCK_SIZE - 1; - private static final CompressLZF LZF = new CompressLZF(); - private static final byte[] BUFFER = new byte[BLOCK_SIZE * 2]; - private static final byte[] COMPRESSED_EMPTY_BLOCK; - - private static final Cache COMPRESS_LATER = - new Cache<>(CACHE_SIZE); - - private String name; - private final int id; - private final boolean compress; - private long length; - private AtomicReference[] data; - private long lastModified; - private boolean isReadOnly; - private boolean isLockedExclusive; - private int sharedLockCount; - - static { - byte[] n = new byte[BLOCK_SIZE]; - int len = LZF.compress(n, BLOCK_SIZE, BUFFER, 0); - COMPRESSED_EMPTY_BLOCK = Arrays.copyOf(BUFFER, len); - } - - @SuppressWarnings("unchecked") - FileMemData(String name, boolean compress) { - this.name = name; - this.id = name.hashCode(); - this.compress = compress; - this.data = new AtomicReference[0]; - lastModified = System.currentTimeMillis(); - } - - /** - * Get the page if it exists. - * - * @param page the page id - * @return the byte array, or null - */ - byte[] getPage(int page) { - AtomicReference[] b = data; - if (page >= b.length) { - return null; - } - return b[page].get(); - } - - /** - * Set the page data. - * - * @param page the page id - * @param oldData the old data - * @param newData the new data - * @param force whether the data should be overwritten even if the old data - * doesn't match - */ - void setPage(int page, byte[] oldData, byte[] newData, boolean force) { - AtomicReference[] b = data; - if (page >= b.length) { - return; - } - if (force) { - b[page].set(newData); - } else { - b[page].compareAndSet(oldData, newData); - } - } - - int getId() { - return id; - } - - /** - * Lock the file in exclusive mode if possible. - * - * @return if locking was successful - */ - synchronized boolean lockExclusive() { - if (sharedLockCount > 0 || isLockedExclusive) { - return false; - } - isLockedExclusive = true; - return true; - } - - /** - * Lock the file in shared mode if possible. - * - * @return if locking was successful - */ - synchronized boolean lockShared() { - if (isLockedExclusive) { - return false; - } - sharedLockCount++; - return true; - } - - /** - * Unlock the file. - */ - synchronized void unlock() throws IOException { - if (isLockedExclusive) { - isLockedExclusive = false; - } else if (sharedLockCount > 0) { - sharedLockCount--; - } else { - throw new IOException("not locked"); - } - } - - /** - * This small cache compresses the data if an element leaves the cache. - */ - static class Cache extends LinkedHashMap { - - private static final long serialVersionUID = 1L; - private final int size; - - Cache(int size) { - super(size, (float) 0.75, true); - this.size = size; - } - - @Override - public synchronized V put(K key, V value) { - return super.put(key, value); - } - - @Override - protected boolean removeEldestEntry(Map.Entry eldest) { - if (size() < size) { - return false; - } - CompressItem c = (CompressItem) eldest.getKey(); - c.file.compress(c.page); - return true; - } - } - - /** - * Points to a block of bytes that needs to be compressed. - */ - static class CompressItem { - - /** - * The file. - */ - FileMemData file; - - /** - * The page to compress. - */ - int page; - - @Override - public int hashCode() { - return page ^ file.getId(); - } - - @Override - public boolean equals(Object o) { - if (o instanceof CompressItem) { - CompressItem c = (CompressItem) o; - return c.page == page && c.file == file; - } - return false; - } - - } - - private void compressLater(int page) { - CompressItem c = new CompressItem(); - c.file = this; - c.page = page; - synchronized (LZF) { - COMPRESS_LATER.put(c, c); - } - } - - private byte[] expand(int page) { - byte[] d = getPage(page); - if (d.length == BLOCK_SIZE) { - return d; - } - byte[] out = new byte[BLOCK_SIZE]; - if (d != COMPRESSED_EMPTY_BLOCK) { - synchronized (LZF) { - LZF.expand(d, 0, d.length, out, 0, BLOCK_SIZE); - } - } - setPage(page, d, out, false); - return out; - } - - /** - * Compress the data in a byte array. - * - * @param page which page to compress - */ - void compress(int page) { - byte[] old = getPage(page); - if (old == null || old.length != BLOCK_SIZE) { - // not yet initialized or already compressed - return; - } - synchronized (LZF) { - int len = LZF.compress(old, BLOCK_SIZE, BUFFER, 0); - if (len <= BLOCK_SIZE) { - byte[] d = Arrays.copyOf(BUFFER, len); - // maybe data was changed in the meantime - setPage(page, old, d, false); - } - } - } - - /** - * Update the last modified time. - * - * @param openReadOnly if the file was opened in read-only mode - */ - void touch(boolean openReadOnly) throws IOException { - if (isReadOnly || openReadOnly) { - throw new IOException("Read only"); - } - lastModified = System.currentTimeMillis(); - } - - /** - * Get the file length. - * - * @return the length - */ - long length() { - return length; - } - - /** - * Truncate the file. - * - * @param newLength the new length - */ - void truncate(long newLength) { - changeLength(newLength); - long end = MathUtils.roundUpLong(newLength, BLOCK_SIZE); - if (end != newLength) { - int lastPage = (int) (newLength >>> BLOCK_SIZE_SHIFT); - byte[] d = expand(lastPage); - byte[] d2 = Arrays.copyOf(d, d.length); - for (int i = (int) (newLength & BLOCK_SIZE_MASK); i < BLOCK_SIZE; i++) { - d2[i] = 0; - } - setPage(lastPage, d, d2, true); - if (compress) { - compressLater(lastPage); - } - } - } - - private void changeLength(long len) { - length = len; - len = MathUtils.roundUpLong(len, BLOCK_SIZE); - int blocks = (int) (len >>> BLOCK_SIZE_SHIFT); - if (blocks != data.length) { - AtomicReference[] n = Arrays.copyOf(data, blocks); - for (int i = data.length; i < blocks; i++) { - n[i] = new AtomicReference<>(COMPRESSED_EMPTY_BLOCK); - } - data = n; - } - } - - /** - * Read or write. - * - * @param pos the position - * @param b the byte array - * @param off the offset within the byte array - * @param len the number of bytes - * @param write true for writing - * @return the new position - */ - long readWrite(long pos, byte[] b, int off, int len, boolean write) { - long end = pos + len; - if (end > length) { - if (write) { - changeLength(end); - } else { - len = (int) (length - pos); - } - } - while (len > 0) { - int l = (int) Math.min(len, BLOCK_SIZE - (pos & BLOCK_SIZE_MASK)); - int page = (int) (pos >>> BLOCK_SIZE_SHIFT); - byte[] block = expand(page); - int blockOffset = (int) (pos & BLOCK_SIZE_MASK); - if (write) { - byte[] p2 = Arrays.copyOf(block, block.length); - System.arraycopy(b, off, p2, blockOffset, l); - setPage(page, block, p2, true); - } else { - System.arraycopy(block, blockOffset, b, off, l); - } - if (compress) { - compressLater(page); - } - off += l; - pos += l; - len -= l; - } - return pos; - } - - /** - * Set the file name. - * - * @param name the name - */ - void setName(String name) { - this.name = name; - } - - /** - * Get the file name - * - * @return the name - */ - String getName() { - return name; - } - - /** - * Get the last modified time. - * - * @return the time - */ - long getLastModified() { - return lastModified; - } - - /** - * Check whether writing is allowed. - * - * @return true if it is - */ - boolean canWrite() { - return !isReadOnly; - } - - /** - * Set the read-only flag. - * - * @return true - */ - boolean setReadOnly() { - isReadOnly = true; - return true; - } - -} - - diff --git a/h2/src/main/org/h2/store/fs/FilePathNio.java b/h2/src/main/org/h2/store/fs/FilePathNio.java deleted file mode 100644 index 331960215b..0000000000 --- a/h2/src/main/org/h2/store/fs/FilePathNio.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store.fs; - -import java.io.IOException; -import java.io.RandomAccessFile; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.channels.FileLock; -import java.nio.channels.NonWritableChannelException; - -/** - * This file system stores files on disk and uses java.nio to access the files. - * This class uses FileChannel. - */ -public class FilePathNio extends FilePathWrapper { - - @Override - public FileChannel open(String mode) throws IOException { - return new FileNio(name.substring(getScheme().length() + 1), mode); - } - - @Override - public String getScheme() { - return "nio"; - } - -} - -/** - * File which uses NIO FileChannel. - */ -class FileNio extends FileBase { - - private final String name; - private final FileChannel channel; - - FileNio(String fileName, String mode) throws IOException { - this.name = fileName; - channel = new RandomAccessFile(fileName, mode).getChannel(); - } - - @Override - public void implCloseChannel() throws IOException { - channel.close(); - } - - @Override - public long position() throws IOException { - return channel.position(); - } - - @Override - public long size() throws IOException { - return channel.size(); - } - - @Override - public int read(ByteBuffer dst) throws IOException { - return channel.read(dst); - } - - @Override - public FileChannel position(long pos) throws IOException { - channel.position(pos); - return this; - } - - @Override - public int read(ByteBuffer dst, long position) throws IOException { - return channel.read(dst, position); - } - - @Override - public int write(ByteBuffer src, long position) throws IOException { - return channel.write(src, position); - } - - @Override - public FileChannel truncate(long newLength) throws IOException { - long size = channel.size(); - if (newLength < size) { - long pos = channel.position(); - channel.truncate(newLength); - long newPos = channel.position(); - if (pos < newLength) { - // position should stay - // in theory, this should not be needed - if (newPos != pos) { - channel.position(pos); - } - } else if (newPos > newLength) { - // looks like a bug in this FileChannel implementation, as - // the documentation says the position needs to be changed - channel.position(newLength); - } - } - return this; - } - - @Override - public void force(boolean metaData) throws IOException { - channel.force(metaData); - } - - @Override - public int write(ByteBuffer src) throws IOException { - try { - return channel.write(src); - } catch (NonWritableChannelException e) { - throw new IOException("read only"); - } - } - - @Override - public synchronized FileLock tryLock(long position, long size, - boolean shared) throws IOException { - return channel.tryLock(position, size, shared); - } - - @Override - public String toString() { - return "nio:" + name; - } - -} diff --git a/h2/src/main/org/h2/store/fs/FilePathNioMem.java b/h2/src/main/org/h2/store/fs/FilePathNioMem.java deleted file mode 100644 index 1334b936a9..0000000000 --- a/h2/src/main/org/h2/store/fs/FilePathNioMem.java +++ /dev/null @@ -1,814 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store.fs; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; -import java.nio.channels.FileChannel; -import java.nio.channels.FileLock; -import java.nio.channels.NonWritableChannelException; -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.h2.api.ErrorCode; -import org.h2.compress.CompressLZF; -import org.h2.message.DbException; -import org.h2.util.MathUtils; - -/** - * This file system keeps files fully in memory. There is an option to compress - * file blocks to save memory. - */ -public class FilePathNioMem extends FilePath { - - private static final TreeMap MEMORY_FILES = - new TreeMap<>(); - - /** - * The percentage of uncompressed (cached) entries. - */ - float compressLaterCachePercent = 1; - - @Override - public FilePathNioMem getPath(String path) { - FilePathNioMem p = new FilePathNioMem(); - p.name = getCanonicalPath(path); - return p; - } - - @Override - public long size() { - return getMemoryFile().length(); - } - - @Override - public void moveTo(FilePath newName, boolean atomicReplace) { - synchronized (MEMORY_FILES) { - if (!atomicReplace && !name.equals(newName.name) && - MEMORY_FILES.containsKey(newName.name)) { - throw DbException.get(ErrorCode.FILE_RENAME_FAILED_2, name, newName + " (exists)"); - } - FileNioMemData f = getMemoryFile(); - f.setName(newName.name); - MEMORY_FILES.remove(name); - MEMORY_FILES.put(newName.name, f); - } - } - - @Override - public boolean createFile() { - synchronized (MEMORY_FILES) { - if (exists()) { - return false; - } - getMemoryFile(); - } - return true; - } - - @Override - public boolean exists() { - if (isRoot()) { - return true; - } - synchronized (MEMORY_FILES) { - return MEMORY_FILES.get(name) != null; - } - } - - @Override - public void delete() { - if (isRoot()) { - return; - } - synchronized (MEMORY_FILES) { - MEMORY_FILES.remove(name); - } - } - - @Override - public List newDirectoryStream() { - ArrayList list = new ArrayList<>(); - synchronized (MEMORY_FILES) { - for (String n : MEMORY_FILES.tailMap(name).keySet()) { - if (n.startsWith(name)) { - list.add(getPath(n)); - } else { - break; - } - } - return list; - } - } - - @Override - public boolean setReadOnly() { - return getMemoryFile().setReadOnly(); - } - - @Override - public boolean canWrite() { - return getMemoryFile().canWrite(); - } - - @Override - public FilePathNioMem getParent() { - int idx = name.lastIndexOf('/'); - return idx < 0 ? null : getPath(name.substring(0, idx)); - } - - @Override - public boolean isDirectory() { - if (isRoot()) { - return true; - } - // TODO in memory file system currently - // does not really support directories - synchronized (MEMORY_FILES) { - return MEMORY_FILES.get(name) == null; - } - } - - @Override - public boolean isAbsolute() { - // TODO relative files are not supported - return true; - } - - @Override - public FilePathNioMem toRealPath() { - return this; - } - - @Override - public long lastModified() { - return getMemoryFile().getLastModified(); - } - - @Override - public void createDirectory() { - if (exists() && isDirectory()) { - throw DbException.get(ErrorCode.FILE_CREATION_FAILED_1, - name + " (a file with this name already exists)"); - } - // TODO directories are not really supported - } - - @Override - public OutputStream newOutputStream(boolean append) throws IOException { - FileNioMemData obj = getMemoryFile(); - FileNioMem m = new FileNioMem(obj, false); - return new FileChannelOutputStream(m, append); - } - - @Override - public InputStream newInputStream() { - FileNioMemData obj = getMemoryFile(); - FileNioMem m = new FileNioMem(obj, true); - return new FileChannelInputStream(m, true); - } - - @Override - public FileChannel open(String mode) { - FileNioMemData obj = getMemoryFile(); - return new FileNioMem(obj, "r".equals(mode)); - } - - private FileNioMemData getMemoryFile() { - synchronized (MEMORY_FILES) { - FileNioMemData m = MEMORY_FILES.get(name); - if (m == null) { - m = new FileNioMemData(name, compressed(), compressLaterCachePercent); - MEMORY_FILES.put(name, m); - } - return m; - } - } - - protected boolean isRoot() { - return name.equals(getScheme() + ":"); - } - - /** - * Get the canonical path of a file (with backslashes replaced with forward - * slashes). - * - * @param fileName the file name - * @return the canonical path - */ - protected static String getCanonicalPath(String fileName) { - fileName = fileName.replace('\\', '/'); - int idx = fileName.lastIndexOf(':') + 1; - if (fileName.length() > idx && fileName.charAt(idx) != '/') { - fileName = fileName.substring(0, idx) + "/" + fileName.substring(idx); - } - return fileName; - } - - @Override - public String getScheme() { - return "nioMemFS"; - } - - /** - * Whether the file should be compressed. - * - * @return true if it should be compressed. - */ - boolean compressed() { - return false; - } - -} - -/** - * A memory file system that compresses blocks to conserve memory. - */ -class FilePathNioMemLZF extends FilePathNioMem { - - @Override - boolean compressed() { - return true; - } - - @Override - public FilePathNioMem getPath(String path) { - if (!path.startsWith(getScheme())) { - throw new IllegalArgumentException(path + - " doesn't start with " + getScheme()); - } - int idx1 = path.indexOf(':'); - int idx2 = path.lastIndexOf(':'); - final FilePathNioMemLZF p = new FilePathNioMemLZF(); - if (idx1 != -1 && idx1 != idx2) { - p.compressLaterCachePercent = Float.parseFloat(path.substring(idx1 + 1, idx2)); - } - p.name = getCanonicalPath(path); - return p; - } - - @Override - protected boolean isRoot() { - return name.lastIndexOf(':') == name.length() - 1; - } - - @Override - public String getScheme() { - return "nioMemLZF"; - } - -} - -/** - * This class represents an in-memory file. - */ -class FileNioMem extends FileBase { - - /** - * The file data. - */ - FileNioMemData data; - - private final boolean readOnly; - private long pos; - - FileNioMem(FileNioMemData data, boolean readOnly) { - this.data = data; - this.readOnly = readOnly; - } - - @Override - public long size() { - return data.length(); - } - - @Override - public FileChannel truncate(long newLength) throws IOException { - // compatibility with JDK FileChannel#truncate - if (readOnly) { - throw new NonWritableChannelException(); - } - if (data == null) { - throw new ClosedChannelException(); - } - if (newLength < size()) { - data.touch(readOnly); - pos = Math.min(pos, newLength); - data.truncate(newLength); - } - return this; - } - - @Override - public FileChannel position(long newPos) { - this.pos = (int) newPos; - return this; - } - - @Override - public int write(ByteBuffer src) throws IOException { - if (data == null) { - throw new ClosedChannelException(); - } - int len = src.remaining(); - if (len == 0) { - return 0; - } - data.touch(readOnly); - // offset is 0 because we start writing from src.position() - pos = data.readWrite(pos, src, 0, len, true); - src.position(src.position() + len); - return len; - } - - @Override - public int read(ByteBuffer dst) throws IOException { - if (data == null) { - throw new ClosedChannelException(); - } - int len = dst.remaining(); - if (len == 0) { - return 0; - } - long newPos = data.readWrite(pos, dst, dst.position(), len, false); - len = (int) (newPos - pos); - if (len <= 0) { - return -1; - } - dst.position(dst.position() + len); - pos = newPos; - return len; - } - - @Override - public int read(ByteBuffer dst, long position) throws IOException { - if (data == null) { - throw new ClosedChannelException(); - } - int len = dst.remaining(); - if (len == 0) { - return 0; - } - long newPos; - newPos = data.readWrite(position, dst, dst.position(), len, false); - len = (int) (newPos - position); - if (len <= 0) { - return -1; - } - dst.position(dst.position() + len); - return len; - } - - @Override - public long position() { - return pos; - } - - @Override - public void implCloseChannel() throws IOException { - pos = 0; - data = null; - } - - @Override - public void force(boolean metaData) throws IOException { - // do nothing - } - - @Override - public synchronized FileLock tryLock(long position, long size, - boolean shared) throws IOException { - if (data == null) { - throw new ClosedChannelException(); - } - if (shared) { - if (!data.lockShared()) { - return null; - } - } else { - if (!data.lockExclusive()) { - return null; - } - } - - return new FileLock(FakeFileChannel.INSTANCE, position, size, shared) { - - @Override - public boolean isValid() { - return true; - } - - @Override - public void release() throws IOException { - data.unlock(); - } - }; - } - - @Override - public String toString() { - return data == null ? "" : data.getName(); - } - -} - -/** - * This class contains the data of an in-memory random access file. - * Data compression using the LZF algorithm is supported as well. - */ -class FileNioMemData { - - private static final int CACHE_MIN_SIZE = 8; - private static final int BLOCK_SIZE_SHIFT = 16; - - private static final int BLOCK_SIZE = 1 << BLOCK_SIZE_SHIFT; - private static final int BLOCK_SIZE_MASK = BLOCK_SIZE - 1; - private static final ByteBuffer COMPRESSED_EMPTY_BLOCK; - - private static final ThreadLocal LZF_THREAD_LOCAL = - new ThreadLocal() { - @Override - protected CompressLZF initialValue() { - return new CompressLZF(); - } - }; - /** the output buffer when compressing */ - private static final ThreadLocal COMPRESS_OUT_BUF_THREAD_LOCAL = - new ThreadLocal() { - @Override - protected byte[] initialValue() { - return new byte[BLOCK_SIZE * 2]; - } - }; - - /** - * The hash code of the name. - */ - final int nameHashCode; - - private final CompressLaterCache compressLaterCache = - new CompressLaterCache<>(CACHE_MIN_SIZE); - - private String name; - private final boolean compress; - private final float compressLaterCachePercent; - private long length; - private AtomicReference[] buffers; - private long lastModified; - private boolean isReadOnly; - private boolean isLockedExclusive; - private int sharedLockCount; - private final ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock(); - - static { - final byte[] n = new byte[BLOCK_SIZE]; - final byte[] output = new byte[BLOCK_SIZE * 2]; - int len = new CompressLZF().compress(n, BLOCK_SIZE, output, 0); - COMPRESSED_EMPTY_BLOCK = ByteBuffer.allocateDirect(len); - COMPRESSED_EMPTY_BLOCK.put(output, 0, len); - } - - @SuppressWarnings("unchecked") - FileNioMemData(String name, boolean compress, float compressLaterCachePercent) { - this.name = name; - this.nameHashCode = name.hashCode(); - this.compress = compress; - this.compressLaterCachePercent = compressLaterCachePercent; - buffers = new AtomicReference[0]; - lastModified = System.currentTimeMillis(); - } - - /** - * Lock the file in exclusive mode if possible. - * - * @return if locking was successful - */ - synchronized boolean lockExclusive() { - if (sharedLockCount > 0 || isLockedExclusive) { - return false; - } - isLockedExclusive = true; - return true; - } - - /** - * Lock the file in shared mode if possible. - * - * @return if locking was successful - */ - synchronized boolean lockShared() { - if (isLockedExclusive) { - return false; - } - sharedLockCount++; - return true; - } - - /** - * Unlock the file. - */ - synchronized void unlock() { - if (isLockedExclusive) { - isLockedExclusive = false; - } else { - sharedLockCount = Math.max(0, sharedLockCount - 1); - } - } - - /** - * This small cache compresses the data if an element leaves the cache. - */ - static class CompressLaterCache extends LinkedHashMap { - - private static final long serialVersionUID = 1L; - private int size; - - CompressLaterCache(int size) { - super(size, (float) 0.75, true); - this.size = size; - } - - @Override - public synchronized V put(K key, V value) { - return super.put(key, value); - } - - @Override - protected boolean removeEldestEntry(Map.Entry eldest) { - if (size() < size) { - return false; - } - CompressItem c = (CompressItem) eldest.getKey(); - c.data.compressPage(c.page); - return true; - } - - public void setCacheSize(int size) { - this.size = size; - } - } - - /** - * Represents a compressed item. - */ - static class CompressItem { - - /** - * The file data. - */ - public final FileNioMemData data; - - /** - * The page to compress. - */ - public final int page; - - public CompressItem(FileNioMemData data, int page) { - this.data = data; - this.page = page; - } - - @Override - public int hashCode() { - return page ^ data.nameHashCode; - } - - @Override - public boolean equals(Object o) { - if (o instanceof CompressItem) { - CompressItem c = (CompressItem) o; - return c.data == data && c.page == page; - } - return false; - } - - } - - private void addToCompressLaterCache(int page) { - CompressItem c = new CompressItem(this, page); - compressLaterCache.put(c, c); - } - - private ByteBuffer expandPage(int page) { - final ByteBuffer d = buffers[page].get(); - if (d.capacity() == BLOCK_SIZE) { - // already expanded, or not compressed - return d; - } - synchronized (d) { - if (d.capacity() == BLOCK_SIZE) { - return d; - } - ByteBuffer out = ByteBuffer.allocateDirect(BLOCK_SIZE); - if (d != COMPRESSED_EMPTY_BLOCK) { - d.position(0); - CompressLZF.expand(d, out); - } - buffers[page].compareAndSet(d, out); - return out; - } - } - - /** - * Compress the data in a byte array. - * - * @param page which page to compress - */ - void compressPage(int page) { - final ByteBuffer d = buffers[page].get(); - synchronized (d) { - if (d.capacity() != BLOCK_SIZE) { - // already compressed - return; - } - final byte[] compressOutputBuffer = COMPRESS_OUT_BUF_THREAD_LOCAL.get(); - int len = LZF_THREAD_LOCAL.get().compress(d, 0, compressOutputBuffer, 0); - ByteBuffer out = ByteBuffer.allocateDirect(len); - out.put(compressOutputBuffer, 0, len); - buffers[page].compareAndSet(d, out); - } - } - - /** - * Update the last modified time. - * - * @param openReadOnly if the file was opened in read-only mode - */ - void touch(boolean openReadOnly) throws IOException { - if (isReadOnly || openReadOnly) { - throw new IOException("Read only"); - } - lastModified = System.currentTimeMillis(); - } - - /** - * Get the file length. - * - * @return the length - */ - long length() { - return length; - } - - /** - * Truncate the file. - * - * @param newLength the new length - */ - void truncate(long newLength) { - rwLock.writeLock().lock(); - try { - changeLength(newLength); - long end = MathUtils.roundUpLong(newLength, BLOCK_SIZE); - if (end != newLength) { - int lastPage = (int) (newLength >>> BLOCK_SIZE_SHIFT); - ByteBuffer d = expandPage(lastPage); - for (int i = (int) (newLength & BLOCK_SIZE_MASK); i < BLOCK_SIZE; i++) { - d.put(i, (byte) 0); - } - if (compress) { - addToCompressLaterCache(lastPage); - } - } - } finally { - rwLock.writeLock().unlock(); - } - } - - @SuppressWarnings("unchecked") - private void changeLength(long len) { - length = len; - len = MathUtils.roundUpLong(len, BLOCK_SIZE); - int blocks = (int) (len >>> BLOCK_SIZE_SHIFT); - if (blocks != buffers.length) { - final AtomicReference[] newBuffers = new AtomicReference[blocks]; - System.arraycopy(buffers, 0, newBuffers, 0, - Math.min(buffers.length, newBuffers.length)); - for (int i = buffers.length; i < blocks; i++) { - newBuffers[i] = new AtomicReference<>(COMPRESSED_EMPTY_BLOCK); - } - buffers = newBuffers; - } - compressLaterCache.setCacheSize(Math.max(CACHE_MIN_SIZE, (int) (blocks * - compressLaterCachePercent / 100))); - } - - /** - * Read or write. - * - * @param pos the position - * @param b the byte array - * @param off the offset within the byte array - * @param len the number of bytes - * @param write true for writing - * @return the new position - */ - long readWrite(long pos, ByteBuffer b, int off, int len, boolean write) { - final java.util.concurrent.locks.Lock lock = write ? rwLock.writeLock() - : rwLock.readLock(); - lock.lock(); - try { - - long end = pos + len; - if (end > length) { - if (write) { - changeLength(end); - } else { - len = (int) (length - pos); - } - } - while (len > 0) { - final int l = (int) Math.min(len, BLOCK_SIZE - (pos & BLOCK_SIZE_MASK)); - final int page = (int) (pos >>> BLOCK_SIZE_SHIFT); - final ByteBuffer block = expandPage(page); - int blockOffset = (int) (pos & BLOCK_SIZE_MASK); - if (write) { - final ByteBuffer srcTmp = b.slice(); - final ByteBuffer dstTmp = block.duplicate(); - srcTmp.position(off); - srcTmp.limit(off + l); - dstTmp.position(blockOffset); - dstTmp.put(srcTmp); - } else { - // duplicate, so this can be done concurrently - final ByteBuffer tmp = block.duplicate(); - tmp.position(blockOffset); - tmp.limit(l + blockOffset); - int oldPosition = b.position(); - b.position(off); - b.put(tmp); - // restore old position - b.position(oldPosition); - } - if (compress) { - addToCompressLaterCache(page); - } - off += l; - pos += l; - len -= l; - } - return pos; - } finally { - lock.unlock(); - } - } - - /** - * Set the file name. - * - * @param name the name - */ - void setName(String name) { - this.name = name; - } - - /** - * Get the file name - * - * @return the name - */ - String getName() { - return name; - } - - /** - * Get the last modified time. - * - * @return the time - */ - long getLastModified() { - return lastModified; - } - - /** - * Check whether writing is allowed. - * - * @return true if it is - */ - boolean canWrite() { - return !isReadOnly; - } - - /** - * Set the read-only flag. - * - * @return true - */ - boolean setReadOnly() { - isReadOnly = true; - return true; - } - -} - - diff --git a/h2/src/main/org/h2/store/fs/FilePathSplit.java b/h2/src/main/org/h2/store/fs/FilePathSplit.java deleted file mode 100644 index 79b7af2fd8..0000000000 --- a/h2/src/main/org/h2/store/fs/FilePathSplit.java +++ /dev/null @@ -1,449 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.store.fs; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.io.SequenceInputStream; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.channels.FileLock; -import java.util.ArrayList; -import java.util.List; - -import org.h2.engine.SysProperties; -import org.h2.message.DbException; - -/** - * A file system that may split files into multiple smaller files. - * (required for a FAT32 because it only support files up to 2 GB). - */ -public class FilePathSplit extends FilePathWrapper { - - private static final String PART_SUFFIX = ".part"; - - @Override - protected String getPrefix() { - return getScheme() + ":" + parse(name)[0] + ":"; - } - - @Override - public FilePath unwrap(String fileName) { - return FilePath.get(parse(fileName)[1]); - } - - @Override - public boolean setReadOnly() { - boolean result = false; - for (int i = 0;; i++) { - FilePath f = getBase(i); - if (f.exists()) { - result = f.setReadOnly(); - } else { - break; - } - } - return result; - } - - @Override - public void delete() { - for (int i = 0;; i++) { - FilePath f = getBase(i); - if (f.exists()) { - f.delete(); - } else { - break; - } - } - } - - @Override - public long lastModified() { - long lastModified = 0; - for (int i = 0;; i++) { - FilePath f = getBase(i); - if (f.exists()) { - long l = f.lastModified(); - lastModified = Math.max(lastModified, l); - } else { - break; - } - } - return lastModified; - } - - @Override - public long size() { - long length = 0; - for (int i = 0;; i++) { - FilePath f = getBase(i); - if (f.exists()) { - length += f.size(); - } else { - break; - } - } - return length; - } - - @Override - public ArrayList newDirectoryStream() { - List list = getBase().newDirectoryStream(); - ArrayList newList = new ArrayList<>(); - for (FilePath f : list) { - if (!f.getName().endsWith(PART_SUFFIX)) { - newList.add(wrap(f)); - } - } - return newList; - } - - @Override - public InputStream newInputStream() throws IOException { - InputStream input = getBase().newInputStream(); - for (int i = 1;; i++) { - FilePath f = getBase(i); - if (f.exists()) { - InputStream i2 = f.newInputStream(); - input = new SequenceInputStream(input, i2); - } else { - break; - } - } - return input; - } - - @Override - public FileChannel open(String mode) throws IOException { - ArrayList list = new ArrayList<>(); - list.add(getBase().open(mode)); - for (int i = 1;; i++) { - FilePath f = getBase(i); - if (f.exists()) { - list.add(f.open(mode)); - } else { - break; - } - } - FileChannel[] array = list.toArray(new FileChannel[0]); - long maxLength = array[0].size(); - long length = maxLength; - if (array.length == 1) { - long defaultMaxLength = getDefaultMaxLength(); - if (maxLength < defaultMaxLength) { - maxLength = defaultMaxLength; - } - } else { - if (maxLength == 0) { - closeAndThrow(0, array, array[0], maxLength); - } - for (int i = 1; i < array.length - 1; i++) { - FileChannel c = array[i]; - long l = c.size(); - length += l; - if (l != maxLength) { - closeAndThrow(i, array, c, maxLength); - } - } - FileChannel c = array[array.length - 1]; - long l = c.size(); - length += l; - if (l > maxLength) { - closeAndThrow(array.length - 1, array, c, maxLength); - } - } - return new FileSplit(this, mode, array, length, maxLength); - } - - private long getDefaultMaxLength() { - return 1L << Integer.decode(parse(name)[0]); - } - - private void closeAndThrow(int id, FileChannel[] array, FileChannel o, - long maxLength) throws IOException { - String message = "Expected file length: " + maxLength + " got: " + - o.size() + " for " + getName(id); - for (FileChannel f : array) { - f.close(); - } - throw new IOException(message); - } - - @Override - public OutputStream newOutputStream(boolean append) throws IOException { - return new FileChannelOutputStream(open("rw"), append); - } - - @Override - public void moveTo(FilePath path, boolean atomicReplace) { - FilePathSplit newName = (FilePathSplit) path; - for (int i = 0;; i++) { - FilePath o = getBase(i); - if (o.exists()) { - o.moveTo(newName.getBase(i), atomicReplace); - } else if (newName.getBase(i).exists()) { - newName.getBase(i).delete(); - } else { - break; - } - } - } - - /** - * Split the file name into size and base file name. - * - * @param fileName the file name - * @return an array with size and file name - */ - private String[] parse(String fileName) { - if (!fileName.startsWith(getScheme())) { - DbException.throwInternalError(fileName + " doesn't start with " + getScheme()); - } - fileName = fileName.substring(getScheme().length() + 1); - String size; - if (fileName.length() > 0 && Character.isDigit(fileName.charAt(0))) { - int idx = fileName.indexOf(':'); - size = fileName.substring(0, idx); - try { - fileName = fileName.substring(idx + 1); - } catch (NumberFormatException e) { - // ignore - } - } else { - size = Long.toString(SysProperties.SPLIT_FILE_SIZE_SHIFT); - } - return new String[] { size, fileName }; - } - - /** - * Get the file name of a part file. - * - * @param id the part id - * @return the file name including the part id - */ - FilePath getBase(int id) { - return FilePath.get(getName(id)); - } - - private String getName(int id) { - return id > 0 ? getBase().name + "." + id + PART_SUFFIX : getBase().name; - } - - @Override - public String getScheme() { - return "split"; - } - -} - -/** - * A file that may be split into multiple smaller files. - */ -class FileSplit extends FileBase { - - private final FilePathSplit file; - private final String mode; - private final long maxLength; - private FileChannel[] list; - private long filePointer; - private long length; - - FileSplit(FilePathSplit file, String mode, FileChannel[] list, long length, - long maxLength) { - this.file = file; - this.mode = mode; - this.list = list; - this.length = length; - this.maxLength = maxLength; - } - - @Override - public void implCloseChannel() throws IOException { - for (FileChannel c : list) { - c.close(); - } - } - - @Override - public long position() { - return filePointer; - } - - @Override - public long size() { - return length; - } - - @Override - public synchronized int read(ByteBuffer dst, long position) - throws IOException { - int len = dst.remaining(); - if (len == 0) { - return 0; - } - len = (int) Math.min(len, length - position); - if (len <= 0) { - return -1; - } - long offset = position % maxLength; - len = (int) Math.min(len, maxLength - offset); - FileChannel channel = getFileChannel(position); - return channel.read(dst, offset); - } - - @Override - public int read(ByteBuffer dst) throws IOException { - int len = dst.remaining(); - if (len == 0) { - return 0; - } - len = (int) Math.min(len, length - filePointer); - if (len <= 0) { - return -1; - } - long offset = filePointer % maxLength; - len = (int) Math.min(len, maxLength - offset); - FileChannel channel = getFileChannel(filePointer); - channel.position(offset); - len = channel.read(dst); - filePointer += len; - return len; - } - - @Override - public FileChannel position(long pos) { - filePointer = pos; - return this; - } - - private FileChannel getFileChannel(long position) throws IOException { - int id = (int) (position / maxLength); - while (id >= list.length) { - int i = list.length; - FileChannel[] newList = new FileChannel[i + 1]; - System.arraycopy(list, 0, newList, 0, i); - FilePath f = file.getBase(i); - newList[i] = f.open(mode); - list = newList; - } - return list[id]; - } - - @Override - public FileChannel truncate(long newLength) throws IOException { - if (newLength >= length) { - return this; - } - filePointer = Math.min(filePointer, newLength); - int newFileCount = 1 + (int) (newLength / maxLength); - if (newFileCount < list.length) { - // delete some of the files - FileChannel[] newList = new FileChannel[newFileCount]; - // delete backwards, so that truncating is somewhat transactional - for (int i = list.length - 1; i >= newFileCount; i--) { - // verify the file is writable - list[i].truncate(0); - list[i].close(); - try { - file.getBase(i).delete(); - } catch (DbException e) { - throw DbException.convertToIOException(e); - } - } - System.arraycopy(list, 0, newList, 0, newList.length); - list = newList; - } - long size = newLength - maxLength * (newFileCount - 1); - list[list.length - 1].truncate(size); - this.length = newLength; - return this; - } - - @Override - public void force(boolean metaData) throws IOException { - for (FileChannel c : list) { - c.force(metaData); - } - } - - @Override - public int write(ByteBuffer src, long position) throws IOException { - if (position >= length && position > maxLength) { - // may need to extend and create files - long oldFilePointer = position; - long x = length - (length % maxLength) + maxLength; - for (; x < position; x += maxLength) { - if (x > length) { - // expand the file size - position(x - 1); - write(ByteBuffer.wrap(new byte[1])); - } - position = oldFilePointer; - } - } - long offset = position % maxLength; - int len = src.remaining(); - FileChannel channel = getFileChannel(position); - int l = (int) Math.min(len, maxLength - offset); - if (l == len) { - l = channel.write(src, offset); - } else { - int oldLimit = src.limit(); - src.limit(src.position() + l); - l = channel.write(src, offset); - src.limit(oldLimit); - } - length = Math.max(length, position + l); - return l; - } - - @Override - public int write(ByteBuffer src) throws IOException { - if (filePointer >= length && filePointer > maxLength) { - // may need to extend and create files - long oldFilePointer = filePointer; - long x = length - (length % maxLength) + maxLength; - for (; x < filePointer; x += maxLength) { - if (x > length) { - // expand the file size - position(x - 1); - write(ByteBuffer.wrap(new byte[1])); - } - filePointer = oldFilePointer; - } - } - long offset = filePointer % maxLength; - int len = src.remaining(); - FileChannel channel = getFileChannel(filePointer); - channel.position(offset); - int l = (int) Math.min(len, maxLength - offset); - if (l == len) { - l = channel.write(src); - } else { - int oldLimit = src.limit(); - src.limit(src.position() + l); - l = channel.write(src); - src.limit(oldLimit); - } - filePointer += l; - length = Math.max(length, filePointer); - return l; - } - - @Override - public synchronized FileLock tryLock(long position, long size, - boolean shared) throws IOException { - return list[0].tryLock(position, size, shared); - } - - @Override - public String toString() { - return file.toString(); - } - -} diff --git a/h2/src/main/org/h2/store/fs/FilePathWrapper.java b/h2/src/main/org/h2/store/fs/FilePathWrapper.java index c28aa3c242..f3b14c00b8 100644 --- a/h2/src/main/org/h2/store/fs/FilePathWrapper.java +++ b/h2/src/main/org/h2/store/fs/FilePathWrapper.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store.fs; diff --git a/h2/src/main/org/h2/store/fs/FileUtils.java b/h2/src/main/org/h2/store/fs/FileUtils.java index 6a809a6d06..276114d780 100644 --- a/h2/src/main/org/h2/store/fs/FileUtils.java +++ b/h2/src/main/org/h2/store/fs/FileUtils.java @@ -1,19 +1,30 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store.fs; +import java.io.BufferedReader; import java.io.EOFException; import java.io.File; import java.io.IOException; import java.io.InputStream; +import java.io.InputStreamReader; import java.io.OutputStream; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; +import java.nio.charset.Charset; +import java.nio.file.OpenOption; +import java.nio.file.StandardOpenOption; +import java.nio.file.attribute.FileAttribute; import java.util.ArrayList; +import java.util.Collections; +import java.util.EnumSet; import java.util.List; +import java.util.Set; + +import org.h2.engine.Constants; /** * This utility class contains utility functions that use the file system @@ -21,6 +32,37 @@ */ public class FileUtils { + /** + * {@link StandardOpenOption#READ}. + */ + public static final Set R = Collections.singleton(StandardOpenOption.READ); + + /** + * {@link StandardOpenOption#READ}, {@link StandardOpenOption#WRITE}, and + * {@link StandardOpenOption#CREATE}. + */ + public static final Set RW = Collections + .unmodifiableSet(EnumSet.of(StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE)); + + /** + * {@link StandardOpenOption#READ}, {@link StandardOpenOption#WRITE}, + * {@link StandardOpenOption#CREATE}, and {@link StandardOpenOption#SYNC}. + */ + public static final Set RWS = Collections.unmodifiableSet(EnumSet.of(StandardOpenOption.READ, + StandardOpenOption.WRITE, StandardOpenOption.CREATE, StandardOpenOption.SYNC)); + + /** + * {@link StandardOpenOption#READ}, {@link StandardOpenOption#WRITE}, + * {@link StandardOpenOption#CREATE}, and {@link StandardOpenOption#DSYNC}. + */ + public static final Set RWD = Collections.unmodifiableSet(EnumSet.of(StandardOpenOption.READ, + StandardOpenOption.WRITE, StandardOpenOption.CREATE, StandardOpenOption.DSYNC)); + + /** + * No file attributes. + */ + public static final FileAttribute[] NO_ATTRIBUTES = new FileAttribute[0]; + /** * Checks if a file exists. * This method is similar to Java 7 java.nio.file.Path.exists. @@ -102,7 +144,7 @@ public static boolean isAbsolute(String fileName) { return FilePath.get(fileName).isAbsolute() // Allows Windows to recognize "/path" as absolute. // Makes the same configuration work on all platforms. - || fileName.startsWith(File.pathSeparator) + || fileName.startsWith(File.separator) // Just in case of non-normalized path on Windows || fileName.startsWith("/"); } @@ -205,6 +247,7 @@ public static boolean isDirectory(String fileName) { * @param fileName the file name * @param mode the access mode. Supported are r, rw, rws, rwd * @return the file object + * @throws IOException on failure */ public static FileChannel open(String fileName, String mode) throws IOException { @@ -214,28 +257,42 @@ public static FileChannel open(String fileName, String mode) /** * Create an input stream to read from the file. * This method is similar to Java 7 - * java.nio.file.Path.newInputStream. + * java.nio.file.Files.newInputStream(). * * @param fileName the file name * @return the input stream + * @throws IOException on failure */ - public static InputStream newInputStream(String fileName) - throws IOException { + public static InputStream newInputStream(String fileName) throws IOException { return FilePath.get(fileName).newInputStream(); } + /** + * Create a buffered reader to read from the file. + * This method is similar to + * java.nio.file.Files.newBufferedReader(). + * + * @param fileName the file name + * @param charset the charset + * @return the buffered reader + * @throws IOException on failure + */ + public static BufferedReader newBufferedReader(String fileName, Charset charset) throws IOException { + return new BufferedReader(new InputStreamReader(newInputStream(fileName), charset), Constants.IO_BUFFER_SIZE); + } + /** * Create an output stream to write into the file. - * This method is similar to Java 7 - * java.nio.file.Path.newOutputStream. + * This method is similar to + * java.nio.file.Files.newOutputStream(). * * @param fileName the file name * @param append if true, the file will grow, if false, the file will be * truncated first * @return the output stream + * @throws IOException on failure */ - public static OutputStream newOutputStream(String fileName, boolean append) - throws IOException { + public static OutputStream newOutputStream(String fileName, boolean append) throws IOException { return FilePath.get(fileName).newOutputStream(append); } @@ -340,6 +397,7 @@ public static boolean tryDelete(String path) { * @param suffix the suffix * @param inTempDir if the file should be stored in the temporary directory * @return the name of the created file + * @throws IOException on failure */ public static String createTempFile(String prefix, String suffix, boolean inTempDir) throws IOException { @@ -352,6 +410,7 @@ public static String createTempFile(String prefix, String suffix, * * @param channel the file channel * @param dst the byte buffer + * @throws IOException on failure */ public static void readFully(FileChannel channel, ByteBuffer dst) throws IOException { @@ -368,6 +427,7 @@ public static void readFully(FileChannel channel, ByteBuffer dst) * * @param channel the file channel * @param src the byte buffer + * @throws IOException on failure */ public static void writeFully(FileChannel channel, ByteBuffer src) throws IOException { @@ -376,4 +436,31 @@ public static void writeFully(FileChannel channel, ByteBuffer src) } while (src.remaining() > 0); } + /** + * Convert the string representation to a set. + * + * @param mode the mode as a string + * @return the set + */ + public static Set modeToOptions(String mode) { + Set options; + switch (mode) { + case "r": + options = R; + break; + case "rw": + options = RW; + break; + case "rws": + options = RWS; + break; + case "rwd": + options = RWD; + break; + default: + throw new IllegalArgumentException(mode); + } + return options; + } + } diff --git a/h2/src/main/org/h2/store/fs/Recorder.java b/h2/src/main/org/h2/store/fs/Recorder.java index 70b913574b..829be53177 100644 --- a/h2/src/main/org/h2/store/fs/Recorder.java +++ b/h2/src/main/org/h2/store/fs/Recorder.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.store.fs; diff --git a/h2/src/main/org/h2/store/fs/async/FileAsync.java b/h2/src/main/org/h2/store/fs/async/FileAsync.java new file mode 100644 index 0000000000..427d41542c --- /dev/null +++ b/h2/src/main/org/h2/store/fs/async/FileAsync.java @@ -0,0 +1,89 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.async; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.AsynchronousFileChannel; +import java.nio.channels.FileLock; +import java.nio.file.Paths; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import org.h2.store.fs.FileBaseDefault; +import org.h2.store.fs.FileUtils; + +/** + * File which uses NIO2 AsynchronousFileChannel. + */ +class FileAsync extends FileBaseDefault { + + private final String name; + private final AsynchronousFileChannel channel; + + private static T complete(Future future) throws IOException { + boolean interrupted = false; + for (;;) { + try { + T result = future.get(); + if (interrupted) { + Thread.currentThread().interrupt(); + } + return result; + } catch (InterruptedException e) { + interrupted = true; + } catch (ExecutionException e) { + throw new IOException(e.getCause()); + } + } + } + + FileAsync(String fileName, String mode) throws IOException { + this.name = fileName; + channel = AsynchronousFileChannel.open(Paths.get(fileName), FileUtils.modeToOptions(mode), null, + FileUtils.NO_ATTRIBUTES); + } + + @Override + public void implCloseChannel() throws IOException { + channel.close(); + } + + @Override + public long size() throws IOException { + return channel.size(); + } + + @Override + public int read(ByteBuffer dst, long position) throws IOException { + return complete(channel.read(dst, position)); + } + + @Override + public int write(ByteBuffer src, long position) throws IOException { + return complete(channel.write(src, position)); + } + + @Override + protected void implTruncate(long newLength) throws IOException { + channel.truncate(newLength); + } + + @Override + public void force(boolean metaData) throws IOException { + channel.force(metaData); + } + + @Override + public FileLock tryLock(long position, long size, boolean shared) throws IOException { + return channel.tryLock(position, size, shared); + } + + @Override + public String toString() { + return "async:" + name; + } + +} \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/async/FilePathAsync.java b/h2/src/main/org/h2/store/fs/async/FilePathAsync.java new file mode 100644 index 0000000000..b853fe884f --- /dev/null +++ b/h2/src/main/org/h2/store/fs/async/FilePathAsync.java @@ -0,0 +1,28 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.async; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import org.h2.store.fs.FilePathWrapper; + +/** + * This file system stores files on disk and uses + * java.nio.channels.AsynchronousFileChannel to access the files. + */ +public class FilePathAsync extends FilePathWrapper { + + @Override + public FileChannel open(String mode) throws IOException { + return new FileAsync(name.substring(getScheme().length() + 1), mode); + } + + @Override + public String getScheme() { + return "async"; + } + +} diff --git a/h2/src/main/org/h2/store/fs/async/package.html b/h2/src/main/org/h2/store/fs/async/package.html new file mode 100644 index 0000000000..b4736bf6fd --- /dev/null +++ b/h2/src/main/org/h2/store/fs/async/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

          + +This file system stores files on disk and uses java.nio.channels.AsynchronousFileChannel to access the files. + +

          \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/disk/FilePathDisk.java b/h2/src/main/org/h2/store/fs/disk/FilePathDisk.java new file mode 100644 index 0000000000..ba3395f694 --- /dev/null +++ b/h2/src/main/org/h2/store/fs/disk/FilePathDisk.java @@ -0,0 +1,445 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.disk; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URL; +import java.nio.channels.FileChannel; +import java.nio.file.AtomicMoveNotSupportedException; +import java.nio.file.CopyOption; +import java.nio.file.DirectoryNotEmptyException; +import java.nio.file.FileAlreadyExistsException; +import java.nio.file.FileStore; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.OpenOption; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.nio.file.StandardOpenOption; +import java.nio.file.attribute.DosFileAttributeView; +import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.attribute.PosixFilePermission; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.stream.Stream; + +import org.h2.api.ErrorCode; +import org.h2.engine.SysProperties; +import org.h2.message.DbException; +import org.h2.store.fs.FilePath; +import org.h2.store.fs.FileUtils; +import org.h2.util.IOUtils; + +/** + * This file system stores files on disk. + * This is the most common file system. + */ +public class FilePathDisk extends FilePath { + + private static final String CLASSPATH_PREFIX = "classpath:"; + + @Override + public FilePathDisk getPath(String path) { + FilePathDisk p = new FilePathDisk(); + p.name = translateFileName(path); + return p; + } + + @Override + public long size() { + if (name.startsWith(CLASSPATH_PREFIX)) { + try { + String fileName = name.substring(CLASSPATH_PREFIX.length()); + // Force absolute resolution in Class.getResource + if (!fileName.startsWith("/")) { + fileName = "/" + fileName; + } + URL resource = this.getClass().getResource(fileName); + if (resource != null) { + return Files.size(Paths.get(resource.toURI())); + } else { + return 0; + } + } catch (Exception e) { + return 0; + } + } + try { + return Files.size(Paths.get(name)); + } catch (IOException e) { + return 0L; + } + } + + /** + * Translate the file name to the native format. This will replace '\' with + * '/' and expand the home directory ('~'). + * + * @param fileName the file name + * @return the native file name + */ + protected static String translateFileName(String fileName) { + fileName = fileName.replace('\\', '/'); + if (fileName.startsWith("file:")) { + fileName = fileName.substring(5); + } else if (fileName.startsWith("nio:")) { + fileName = fileName.substring(4); + } + return expandUserHomeDirectory(fileName); + } + + /** + * Expand '~' to the user home directory. It is only be expanded if the '~' + * stands alone, or is followed by '/' or '\'. + * + * @param fileName the file name + * @return the native file name + */ + public static String expandUserHomeDirectory(String fileName) { + if (fileName.startsWith("~") && (fileName.length() == 1 || + fileName.startsWith("~/"))) { + String userDir = SysProperties.USER_HOME; + fileName = userDir + fileName.substring(1); + } + return fileName; + } + + @Override + public void moveTo(FilePath newName, boolean atomicReplace) { + Path oldFile = Paths.get(name); + Path newFile = Paths.get(newName.name); + if (!Files.exists(oldFile)) { + throw DbException.get(ErrorCode.FILE_RENAME_FAILED_2, name + " (not found)", newName.name); + } + if (atomicReplace) { + try { + Files.move(oldFile, newFile, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE); + return; + } catch (AtomicMoveNotSupportedException ex) { + // Ignore + } catch (IOException ex) { + throw DbException.get(ErrorCode.FILE_RENAME_FAILED_2, ex, name, newName.name); + } + } + CopyOption[] copyOptions = atomicReplace ? new CopyOption[] { StandardCopyOption.REPLACE_EXISTING } + : new CopyOption[0]; + IOException cause; + try { + Files.move(oldFile, newFile, copyOptions); + } catch (FileAlreadyExistsException ex) { + throw DbException.get(ErrorCode.FILE_RENAME_FAILED_2, name, newName + " (exists)"); + } catch (IOException ex) { + cause = ex; + for (int i = 0; i < SysProperties.MAX_FILE_RETRY; i++) { + IOUtils.trace("rename", name + " >" + newName, null); + try { + Files.move(oldFile, newFile, copyOptions); + return; + } catch (FileAlreadyExistsException ex2) { + throw DbException.get(ErrorCode.FILE_RENAME_FAILED_2, name, newName + " (exists)"); + } catch (IOException ex2) { + cause = ex; + } + wait(i); + } + throw DbException.get(ErrorCode.FILE_RENAME_FAILED_2, cause, name, newName.name); + } + } + + private static void wait(int i) { + if (i == 8) { + System.gc(); + } + try { + // sleep at most 256 ms + long sleep = Math.min(256, i * i); + Thread.sleep(sleep); + } catch (InterruptedException e) { + // ignore + } + } + + @Override + public boolean createFile() { + Path file = Paths.get(name); + for (int i = 0; i < SysProperties.MAX_FILE_RETRY; i++) { + try { + Files.createFile(file); + return true; + } catch (FileAlreadyExistsException e) { + return false; + } catch (IOException e) { + // 'access denied' is really a concurrent access problem + wait(i); + } + } + return false; + } + + @Override + public boolean exists() { + return Files.exists(Paths.get(name)); + } + + @Override + public void delete() { + Path file = Paths.get(name); + IOException cause = null; + for (int i = 0; i < SysProperties.MAX_FILE_RETRY; i++) { + IOUtils.trace("delete", name, null); + try { + Files.deleteIfExists(file); + return; + } catch (DirectoryNotEmptyException e) { + throw DbException.get(ErrorCode.FILE_DELETE_FAILED_1, e, name); + } catch (IOException e) { + cause = e; + } + wait(i); + } + throw DbException.get(ErrorCode.FILE_DELETE_FAILED_1, cause, name); + } + + @Override + public List newDirectoryStream() { + try (Stream files = Files.list(Paths.get(name).toRealPath())) { + return files.collect(ArrayList::new, (t, u) -> t.add(getPath(u.toString())), ArrayList::addAll); + } catch (NoSuchFileException e) { + return Collections.emptyList(); + } catch (IOException e) { + throw DbException.convertIOException(e, name); + } + } + + @Override + public boolean canWrite() { + try { + return Files.isWritable(Paths.get(name)); + } catch (Exception e) { + // Catch security exceptions + return false; + } + } + + @Override + public boolean setReadOnly() { + Path f = Paths.get(name); + try { + FileStore fileStore = Files.getFileStore(f); + /* + * Need to check PosixFileAttributeView first because + * DosFileAttributeView is also supported by recent Java versions on + * non-Windows file systems, but it doesn't affect real access + * permissions. + */ + if (fileStore.supportsFileAttributeView(PosixFileAttributeView.class)) { + HashSet permissions = new HashSet<>(); + for (PosixFilePermission p : Files.getPosixFilePermissions(f)) { + switch (p) { + case OWNER_WRITE: + case GROUP_WRITE: + case OTHERS_WRITE: + break; + default: + permissions.add(p); + } + } + Files.setPosixFilePermissions(f, permissions); + } else if (fileStore.supportsFileAttributeView(DosFileAttributeView.class)) { + Files.setAttribute(f, "dos:readonly", true); + } else { + return false; + } + return true; + } catch (IOException e) { + return false; + } + } + + @Override + public FilePathDisk toRealPath() { + Path path = Paths.get(name); + try { + return getPath(path.toRealPath().toString()); + } catch (IOException e) { + /* + * File does not exist or isn't accessible, try to get the real path + * of parent directory. + */ + return getPath(toRealPath(path.toAbsolutePath().normalize()).toString()); + } + } + + private static Path toRealPath(Path path) { + Path parent = path.getParent(); + if (parent == null) { + return path; + } + try { + parent = parent.toRealPath(); + } catch (IOException e) { + parent = toRealPath(parent); + } + return parent.resolve(path.getFileName()); + } + + @Override + public FilePath getParent() { + Path p = Paths.get(name).getParent(); + return p == null ? null : getPath(p.toString()); + } + + @Override + public boolean isDirectory() { + return Files.isDirectory(Paths.get(name)); + } + + @Override + public boolean isAbsolute() { + return Paths.get(name).isAbsolute(); + } + + @Override + public long lastModified() { + try { + return Files.getLastModifiedTime(Paths.get(name)).toMillis(); + } catch (IOException e) { + return 0L; + } + } + + @Override + public void createDirectory() { + Path dir = Paths.get(name); + try { + Files.createDirectory(dir); + } catch (FileAlreadyExistsException e) { + throw DbException.get(ErrorCode.FILE_CREATION_FAILED_1, name + " (a file with this name already exists)"); + } catch (IOException e) { + IOException cause = e; + for (int i = 0; i < SysProperties.MAX_FILE_RETRY; i++) { + if (Files.isDirectory(dir)) { + return; + } + try { + Files.createDirectory(dir); + } catch (FileAlreadyExistsException ex) { + throw DbException.get(ErrorCode.FILE_CREATION_FAILED_1, + name + " (a file with this name already exists)"); + } catch (IOException ex) { + cause = ex; + } + wait(i); + } + throw DbException.get(ErrorCode.FILE_CREATION_FAILED_1, cause, name); + } + } + + @Override + public OutputStream newOutputStream(boolean append) throws IOException { + Path file = Paths.get(name); + OpenOption[] options = append // + ? new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.APPEND } + : new OpenOption[0]; + try { + Path parent = file.getParent(); + if (parent != null) { + Files.createDirectories(parent); + } + OutputStream out = Files.newOutputStream(file, options); + IOUtils.trace("openFileOutputStream", name, out); + return out; + } catch (IOException e) { + freeMemoryAndFinalize(); + return Files.newOutputStream(file, options); + } + } + + @Override + public InputStream newInputStream() throws IOException { + if (name.matches("[a-zA-Z]{2,19}:.*")) { + // if the ':' is in position 1, a windows file access is assumed: + // C:.. or D:, and if the ':' is not at the beginning, assume its a + // file name with a colon + if (name.startsWith(CLASSPATH_PREFIX)) { + String fileName = name.substring(CLASSPATH_PREFIX.length()); + // Force absolute resolution in Class.getResourceAsStream + if (!fileName.startsWith("/")) { + fileName = "/" + fileName; + } + InputStream in = getClass().getResourceAsStream(fileName); + if (in == null) { + // ClassLoader.getResourceAsStream doesn't need leading "/" + in = Thread.currentThread().getContextClassLoader(). + getResourceAsStream(fileName.substring(1)); + } + if (in == null) { + throw new FileNotFoundException("resource " + fileName); + } + return in; + } + // otherwise a URL is assumed + URL url = new URL(name); + return url.openStream(); + } + InputStream in = Files.newInputStream(Paths.get(name)); + IOUtils.trace("openFileInputStream", name, in); + return in; + } + + /** + * Call the garbage collection and run finalization. This close all files + * that were not closed, and are no longer referenced. + */ + static void freeMemoryAndFinalize() { + IOUtils.trace("freeMemoryAndFinalize", null, null); + Runtime rt = Runtime.getRuntime(); + long mem = rt.freeMemory(); + for (int i = 0; i < 16; i++) { + rt.gc(); + long now = rt.freeMemory(); + rt.runFinalization(); + if (now == mem) { + break; + } + mem = now; + } + } + + @Override + public FileChannel open(String mode) throws IOException { + FileChannel f = FileChannel.open(Paths.get(name), FileUtils.modeToOptions(mode), FileUtils.NO_ATTRIBUTES); + IOUtils.trace("open", name, f); + return f; + } + + @Override + public String getScheme() { + return "file"; + } + + @Override + public FilePath createTempFile(String suffix, boolean inTempDir) throws IOException { + Path file = Paths.get(name + '.').toAbsolutePath(); + String prefix = file.getFileName().toString(); + if (inTempDir) { + Files.createDirectories(Paths.get(System.getProperty("java.io.tmpdir", "."))); + file = Files.createTempFile(prefix, suffix); + } else { + Path dir = file.getParent(); + Files.createDirectories(dir); + file = Files.createTempFile(dir, prefix, suffix); + } + return get(file.toString()); + } + +} diff --git a/h2/src/main/org/h2/store/fs/disk/package.html b/h2/src/main/org/h2/store/fs/disk/package.html new file mode 100644 index 0000000000..7156f31e1f --- /dev/null +++ b/h2/src/main/org/h2/store/fs/disk/package.html @@ -0,0 +1,16 @@ + + + + +Javadoc package documentation +

          + +This file system stores files on disk. +
          +This is the most common file system. + +

          \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/encrypt/FileEncrypt.java b/h2/src/main/org/h2/store/fs/encrypt/FileEncrypt.java new file mode 100644 index 0000000000..38bc227b04 --- /dev/null +++ b/h2/src/main/org/h2/store/fs/encrypt/FileEncrypt.java @@ -0,0 +1,261 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.encrypt; + +import java.io.EOFException; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.channels.FileLock; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import org.h2.security.AES; +import org.h2.security.SHA256; +import org.h2.store.fs.FileBaseDefault; +import org.h2.util.MathUtils; + +/** + * An encrypted file with a read cache. + */ +public class FileEncrypt extends FileBaseDefault { + + /** + * The block size. + */ + public static final int BLOCK_SIZE = 4096; + + /** + * The block size bit mask. + */ + static final int BLOCK_SIZE_MASK = BLOCK_SIZE - 1; + + /** + * The length of the file header. Using a smaller header is possible, + * but would mean reads and writes are not aligned to the block size. + */ + static final int HEADER_LENGTH = BLOCK_SIZE; + + private static final byte[] HEADER = "H2encrypt\n".getBytes(StandardCharsets.ISO_8859_1); + private static final int SALT_POS = HEADER.length; + + /** + * The length of the salt, in bytes. + */ + private static final int SALT_LENGTH = 8; + + /** + * The number of iterations. It is relatively low; a higher value would + * slow down opening files on Android too much. + */ + private static final int HASH_ITERATIONS = 10; + + private final FileChannel base; + + /** + * The current file size, from a user perspective. + */ + private volatile long size; + + private final String name; + + private volatile XTS xts; + + private byte[] encryptionKey; + + public FileEncrypt(String name, byte[] encryptionKey, FileChannel base) { + // don't do any read or write operations here, because they could + // fail if the file is locked, and we want to give the caller a + // chance to lock the file first + this.name = name; + this.base = base; + this.encryptionKey = encryptionKey; + } + + private XTS init() throws IOException { + // Keep this method small to allow inlining + XTS xts = this.xts; + if (xts == null) { + xts = createXTS(); + } + return xts; + } + + private synchronized XTS createXTS() throws IOException { + XTS xts = this.xts; + if (xts != null) { + return xts; + } + this.size = base.size() - HEADER_LENGTH; + boolean newFile = size < 0; + byte[] salt; + if (newFile) { + byte[] header = Arrays.copyOf(HEADER, BLOCK_SIZE); + salt = MathUtils.secureRandomBytes(SALT_LENGTH); + System.arraycopy(salt, 0, header, SALT_POS, salt.length); + writeFully(base, 0, ByteBuffer.wrap(header)); + size = 0; + } else { + salt = new byte[SALT_LENGTH]; + readFully(base, SALT_POS, ByteBuffer.wrap(salt)); + if ((size & BLOCK_SIZE_MASK) != 0) { + size -= BLOCK_SIZE; + } + } + AES cipher = new AES(); + cipher.setKey(SHA256.getPBKDF2(encryptionKey, salt, HASH_ITERATIONS, 16)); + encryptionKey = null; + return this.xts = new XTS(cipher); + } + + @Override + protected void implCloseChannel() throws IOException { + base.close(); + } + + @Override + public int read(ByteBuffer dst, long position) throws IOException { + int len = dst.remaining(); + if (len == 0) { + return 0; + } + XTS xts = init(); + len = (int) Math.min(len, size - position); + if (position >= size) { + return -1; + } else if (position < 0) { + throw new IllegalArgumentException("pos: " + position); + } + if ((position & BLOCK_SIZE_MASK) != 0 || (len & BLOCK_SIZE_MASK) != 0) { + // either the position or the len is unaligned: + // read aligned, and then truncate + long p = position / BLOCK_SIZE * BLOCK_SIZE; + int offset = (int) (position - p); + int l = (len + offset + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE; + ByteBuffer temp = ByteBuffer.allocate(l); + readInternal(temp, p, l, xts); + temp.flip().limit(offset + len).position(offset); + dst.put(temp); + return len; + } + readInternal(dst, position, len, xts); + return len; + } + + private void readInternal(ByteBuffer dst, long position, int len, XTS xts) throws IOException { + int x = dst.position(); + readFully(base, position + HEADER_LENGTH, dst); + long block = position / BLOCK_SIZE; + while (len > 0) { + xts.decrypt(block++, BLOCK_SIZE, dst.array(), dst.arrayOffset() + x); + x += BLOCK_SIZE; + len -= BLOCK_SIZE; + } + } + + private static void readFully(FileChannel file, long pos, ByteBuffer dst) throws IOException { + do { + int len = file.read(dst, pos); + if (len < 0) { + throw new EOFException(); + } + pos += len; + } while (dst.remaining() > 0); + } + + @Override + public int write(ByteBuffer src, long position) throws IOException { + XTS xts = init(); + int len = src.remaining(); + if ((position & BLOCK_SIZE_MASK) != 0 || (len & BLOCK_SIZE_MASK) != 0) { + // either the position or the len is unaligned: + // read aligned, and then truncate + long p = position / BLOCK_SIZE * BLOCK_SIZE; + int offset = (int) (position - p); + int l = (len + offset + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE; + ByteBuffer temp = ByteBuffer.allocate(l); + int available = (int) (size - p + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE; + int readLen = Math.min(l, available); + if (readLen > 0) { + readInternal(temp, p, readLen, xts); + temp.rewind(); + } + temp.limit(offset + len).position(offset); + temp.put(src).limit(l).rewind(); + writeInternal(temp, p, l, xts); + long p2 = position + len; + size = Math.max(size, p2); + int plus = (int) (size & BLOCK_SIZE_MASK); + if (plus > 0) { + temp = ByteBuffer.allocate(plus); + writeFully(base, p + HEADER_LENGTH + l, temp); + } + return len; + } + writeInternal(src, position, len, xts); + long p2 = position + len; + size = Math.max(size, p2); + return len; + } + + private void writeInternal(ByteBuffer src, long position, int len, XTS xts) throws IOException { + ByteBuffer crypt = ByteBuffer.allocate(len).put(src); + crypt.flip(); + long block = position / BLOCK_SIZE; + int x = 0, l = len; + while (l > 0) { + xts.encrypt(block++, BLOCK_SIZE, crypt.array(), crypt.arrayOffset() + x); + x += BLOCK_SIZE; + l -= BLOCK_SIZE; + } + writeFully(base, position + HEADER_LENGTH, crypt); + } + + private static void writeFully(FileChannel file, long pos, ByteBuffer src) throws IOException { + do { + pos += file.write(src, pos); + } while (src.remaining() > 0); + } + + @Override + public long size() throws IOException { + init(); + return size; + } + + @Override + protected void implTruncate(long newSize) throws IOException { + init(); + if (newSize > size) { + return; + } + if (newSize < 0) { + throw new IllegalArgumentException("newSize: " + newSize); + } + int offset = (int) (newSize & BLOCK_SIZE_MASK); + if (offset > 0) { + base.truncate(newSize + HEADER_LENGTH + BLOCK_SIZE); + } else { + base.truncate(newSize + HEADER_LENGTH); + } + this.size = newSize; + } + + @Override + public void force(boolean metaData) throws IOException { + base.force(metaData); + } + + @Override + public FileLock tryLock(long position, long size, boolean shared) throws IOException { + return base.tryLock(position, size, shared); + } + + @Override + public String toString() { + return name; + } + +} diff --git a/h2/src/main/org/h2/store/fs/encrypt/FilePathEncrypt.java b/h2/src/main/org/h2/store/fs/encrypt/FilePathEncrypt.java new file mode 100644 index 0000000000..40dffc5821 --- /dev/null +++ b/h2/src/main/org/h2/store/fs/encrypt/FilePathEncrypt.java @@ -0,0 +1,118 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.encrypt; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.channels.Channels; +import java.nio.channels.FileChannel; +import java.nio.charset.StandardCharsets; +import org.h2.store.fs.FilePath; +import org.h2.store.fs.FilePathWrapper; +import org.h2.store.fs.FileUtils; + +/** + * An encrypted file. + */ +public class FilePathEncrypt extends FilePathWrapper { + + private static final String SCHEME = "encrypt"; + + /** + * Register this file system. + */ + public static void register() { + FilePath.register(new FilePathEncrypt()); + } + + @Override + public FileChannel open(String mode) throws IOException { + String[] parsed = parse(name); + FileChannel file = FileUtils.open(parsed[1], mode); + byte[] passwordBytes = parsed[0].getBytes(StandardCharsets.UTF_8); + return new FileEncrypt(name, passwordBytes, file); + } + + @Override + public String getScheme() { + return SCHEME; + } + + @Override + protected String getPrefix() { + String[] parsed = parse(name); + return getScheme() + ":" + parsed[0] + ":"; + } + + @Override + public FilePath unwrap(String fileName) { + return FilePath.get(parse(fileName)[1]); + } + + @Override + public long size() { + long size = getBase().size() - FileEncrypt.HEADER_LENGTH; + size = Math.max(0, size); + if ((size & FileEncrypt.BLOCK_SIZE_MASK) != 0) { + size -= FileEncrypt.BLOCK_SIZE; + } + return size; + } + + @Override + public OutputStream newOutputStream(boolean append) throws IOException { + return newFileChannelOutputStream(open("rw"), append); + } + + @Override + public InputStream newInputStream() throws IOException { + return Channels.newInputStream(open("r")); + } + + /** + * Split the file name into algorithm, password, and base file name. + * + * @param fileName the file name + * @return an array with algorithm, password, and base file name + */ + private String[] parse(String fileName) { + if (!fileName.startsWith(getScheme())) { + throw new IllegalArgumentException(fileName + + " doesn't start with " + getScheme()); + } + fileName = fileName.substring(getScheme().length() + 1); + int idx = fileName.indexOf(':'); + String password; + if (idx < 0) { + throw new IllegalArgumentException(fileName + + " doesn't contain encryption algorithm and password"); + } + password = fileName.substring(0, idx); + fileName = fileName.substring(idx + 1); + return new String[] { password, fileName }; + } + + /** + * Convert a char array to a byte array, in UTF-16 format. The char array is + * not cleared after use (this must be done by the caller). + * + * @param passwordChars the password characters + * @return the byte array + */ + public static byte[] getPasswordBytes(char[] passwordChars) { + // using UTF-16 + int len = passwordChars.length; + byte[] password = new byte[len * 2]; + for (int i = 0; i < len; i++) { + char c = passwordChars[i]; + password[i + i] = (byte) (c >>> 8); + password[i + i + 1] = (byte) c; + } + return password; + } + +} diff --git a/h2/src/main/org/h2/store/fs/encrypt/XTS.java b/h2/src/main/org/h2/store/fs/encrypt/XTS.java new file mode 100644 index 0000000000..570ec3f8b7 --- /dev/null +++ b/h2/src/main/org/h2/store/fs/encrypt/XTS.java @@ -0,0 +1,129 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.encrypt; + +import org.h2.security.BlockCipher; + +/** + * An XTS implementation as described in + * IEEE P1619 (Standard Architecture for Encrypted Shared Storage Media). + * See also + * http://axelkenzo.ru/downloads/1619-2007-NIST-Submission.pdf + */ +class XTS { + + /** + * Galois field feedback. + */ + private static final int GF_128_FEEDBACK = 0x87; + + /** + * The AES encryption block size. + */ + private static final int CIPHER_BLOCK_SIZE = 16; + + private final BlockCipher cipher; + + XTS(BlockCipher cipher) { + this.cipher = cipher; + } + + /** + * Encrypt the data. + * + * @param id the (sector) id + * @param len the number of bytes + * @param data the data + * @param offset the offset within the data + */ + void encrypt(long id, int len, byte[] data, int offset) { + byte[] tweak = initTweak(id); + int i = 0; + for (; i + CIPHER_BLOCK_SIZE <= len; i += CIPHER_BLOCK_SIZE) { + if (i > 0) { + updateTweak(tweak); + } + xorTweak(data, i + offset, tweak); + cipher.encrypt(data, i + offset, CIPHER_BLOCK_SIZE); + xorTweak(data, i + offset, tweak); + } + if (i < len) { + updateTweak(tweak); + swap(data, i + offset, i - CIPHER_BLOCK_SIZE + offset, len - i); + xorTweak(data, i - CIPHER_BLOCK_SIZE + offset, tweak); + cipher.encrypt(data, i - CIPHER_BLOCK_SIZE + offset, CIPHER_BLOCK_SIZE); + xorTweak(data, i - CIPHER_BLOCK_SIZE + offset, tweak); + } + } + + /** + * Decrypt the data. + * + * @param id the (sector) id + * @param len the number of bytes + * @param data the data + * @param offset the offset within the data + */ + void decrypt(long id, int len, byte[] data, int offset) { + byte[] tweak = initTweak(id), tweakEnd = tweak; + int i = 0; + for (; i + CIPHER_BLOCK_SIZE <= len; i += CIPHER_BLOCK_SIZE) { + if (i > 0) { + updateTweak(tweak); + if (i + CIPHER_BLOCK_SIZE + CIPHER_BLOCK_SIZE > len && + i + CIPHER_BLOCK_SIZE < len) { + tweakEnd = tweak.clone(); + updateTweak(tweak); + } + } + xorTweak(data, i + offset, tweak); + cipher.decrypt(data, i + offset, CIPHER_BLOCK_SIZE); + xorTweak(data, i + offset, tweak); + } + if (i < len) { + swap(data, i, i - CIPHER_BLOCK_SIZE + offset, len - i + offset); + xorTweak(data, i - CIPHER_BLOCK_SIZE + offset, tweakEnd); + cipher.decrypt(data, i - CIPHER_BLOCK_SIZE + offset, CIPHER_BLOCK_SIZE); + xorTweak(data, i - CIPHER_BLOCK_SIZE + offset, tweakEnd); + } + } + + private byte[] initTweak(long id) { + byte[] tweak = new byte[CIPHER_BLOCK_SIZE]; + for (int j = 0; j < CIPHER_BLOCK_SIZE; j++, id >>>= 8) { + tweak[j] = (byte) (id & 0xff); + } + cipher.encrypt(tweak, 0, CIPHER_BLOCK_SIZE); + return tweak; + } + + private static void xorTweak(byte[] data, int pos, byte[] tweak) { + for (int i = 0; i < CIPHER_BLOCK_SIZE; i++) { + data[pos + i] ^= tweak[i]; + } + } + + private static void updateTweak(byte[] tweak) { + byte ci = 0, co = 0; + for (int i = 0; i < CIPHER_BLOCK_SIZE; i++) { + co = (byte) ((tweak[i] >> 7) & 1); + tweak[i] = (byte) (((tweak[i] << 1) + ci) & 255); + ci = co; + } + if (co != 0) { + tweak[0] ^= GF_128_FEEDBACK; + } + } + + private static void swap(byte[] data, int source, int target, int len) { + for (int i = 0; i < len; i++) { + byte temp = data[source + i]; + data[source + i] = data[target + i]; + data[target + i] = temp; + } + } + +} \ No newline at end of file diff --git a/h2/src/main/org/h2/upgrade/package.html b/h2/src/main/org/h2/store/fs/encrypt/package.html similarity index 63% rename from h2/src/main/org/h2/upgrade/package.html rename to h2/src/main/org/h2/store/fs/encrypt/package.html index c444579c8e..84d70fcc39 100644 --- a/h2/src/main/org/h2/upgrade/package.html +++ b/h2/src/main/org/h2/store/fs/encrypt/package.html @@ -1,14 +1,14 @@ Javadoc package documentation - +

          -Implementation of the database upgrade mechanism. +An encrypted file system abstraction. - \ No newline at end of file +

          \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/mem/FileMem.java b/h2/src/main/org/h2/store/fs/mem/FileMem.java new file mode 100644 index 0000000000..ecf21aed4e --- /dev/null +++ b/h2/src/main/org/h2/store/fs/mem/FileMem.java @@ -0,0 +1,137 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.mem; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.FileLock; +import java.nio.channels.NonWritableChannelException; +import org.h2.store.fs.FakeFileChannel; +import org.h2.store.fs.FileBaseDefault; + +/** + * This class represents an in-memory file. + */ +class FileMem extends FileBaseDefault { + + /** + * The file data. + */ + final FileMemData data; + + private final boolean readOnly; + private volatile boolean closed; + + FileMem(FileMemData data, boolean readOnly) { + this.data = data; + this.readOnly = readOnly; + } + + @Override + public long size() { + return data.length(); + } + + @Override + protected void implTruncate(long newLength) throws IOException { + // compatibility with JDK FileChannel#truncate + if (readOnly) { + throw new NonWritableChannelException(); + } + if (closed) { + throw new ClosedChannelException(); + } + if (newLength < size()) { + data.touch(readOnly); + data.truncate(newLength); + } + } + + @Override + public int write(ByteBuffer src, long position) throws IOException { + if (closed) { + throw new ClosedChannelException(); + } + if (readOnly) { + throw new NonWritableChannelException(); + } + int len = src.remaining(); + if (len == 0) { + return 0; + } + data.touch(readOnly); + data.readWrite(position, src.array(), + src.arrayOffset() + src.position(), len, true); + src.position(src.position() + len); + return len; + } + + @Override + public int read(ByteBuffer dst, long position) throws IOException { + if (closed) { + throw new ClosedChannelException(); + } + int len = dst.remaining(); + if (len == 0) { + return 0; + } + long newPos = data.readWrite(position, dst.array(), + dst.arrayOffset() + dst.position(), len, false); + len = (int) (newPos - position); + if (len <= 0) { + return -1; + } + dst.position(dst.position() + len); + return len; + } + + @Override + public void implCloseChannel() throws IOException { + closed = true; + } + + @Override + public void force(boolean metaData) throws IOException { + // do nothing + } + + @Override + public FileLock tryLock(long position, long size, + boolean shared) throws IOException { + if (closed) { + throw new ClosedChannelException(); + } + if (shared) { + if (!data.lockShared()) { + return null; + } + } else { + if (!data.lockExclusive()) { + return null; + } + } + + return new FileLock(FakeFileChannel.INSTANCE, position, size, shared) { + + @Override + public boolean isValid() { + return true; + } + + @Override + public void release() throws IOException { + data.unlock(); + } + }; + } + + @Override + public String toString() { + return closed ? "" : data.getName(); + } + +} \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/mem/FileMemData.java b/h2/src/main/org/h2/store/fs/mem/FileMemData.java new file mode 100644 index 0000000000..3d15676f2c --- /dev/null +++ b/h2/src/main/org/h2/store/fs/mem/FileMemData.java @@ -0,0 +1,385 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.mem; + +import java.io.IOException; +import java.nio.channels.NonWritableChannelException; +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; +import org.h2.compress.CompressLZF; +import org.h2.util.MathUtils; + +/** + * This class contains the data of an in-memory random access file. + * Data compression using the LZF algorithm is supported as well. + */ +class FileMemData { + + private static final int CACHE_SIZE = 8; + private static final int BLOCK_SIZE_SHIFT = 10; + private static final int BLOCK_SIZE = 1 << BLOCK_SIZE_SHIFT; + private static final int BLOCK_SIZE_MASK = BLOCK_SIZE - 1; + private static final CompressLZF LZF = new CompressLZF(); + private static final byte[] BUFFER = new byte[BLOCK_SIZE * 2]; + private static final byte[] COMPRESSED_EMPTY_BLOCK; + + private static final Cache COMPRESS_LATER = + new Cache<>(CACHE_SIZE); + + private String name; + private final int id; + private final boolean compress; + private volatile long length; + private AtomicReference[] data; + private long lastModified; + private boolean isReadOnly; + private boolean isLockedExclusive; + private int sharedLockCount; + + static { + byte[] n = new byte[BLOCK_SIZE]; + int len = LZF.compress(n, 0, BLOCK_SIZE, BUFFER, 0); + COMPRESSED_EMPTY_BLOCK = Arrays.copyOf(BUFFER, len); + } + + @SuppressWarnings("unchecked") + FileMemData(String name, boolean compress) { + this.name = name; + this.id = name.hashCode(); + this.compress = compress; + this.data = new AtomicReference[0]; + lastModified = System.currentTimeMillis(); + } + + /** + * Get the page if it exists. + * + * @param page the page id + * @return the byte array, or null + */ + private byte[] getPage(int page) { + AtomicReference[] b = data; + if (page >= b.length) { + return null; + } + return b[page].get(); + } + + /** + * Set the page data. + * + * @param page the page id + * @param oldData the old data + * @param newData the new data + * @param force whether the data should be overwritten even if the old data + * doesn't match + */ + private void setPage(int page, byte[] oldData, byte[] newData, boolean force) { + AtomicReference[] b = data; + if (page >= b.length) { + return; + } + if (force) { + b[page].set(newData); + } else { + b[page].compareAndSet(oldData, newData); + } + } + + int getId() { + return id; + } + + /** + * Lock the file in exclusive mode if possible. + * + * @return if locking was successful + */ + synchronized boolean lockExclusive() { + if (sharedLockCount > 0 || isLockedExclusive) { + return false; + } + isLockedExclusive = true; + return true; + } + + /** + * Lock the file in shared mode if possible. + * + * @return if locking was successful + */ + synchronized boolean lockShared() { + if (isLockedExclusive) { + return false; + } + sharedLockCount++; + return true; + } + + /** + * Unlock the file. + */ + synchronized void unlock() throws IOException { + if (isLockedExclusive) { + isLockedExclusive = false; + } else if (sharedLockCount > 0) { + sharedLockCount--; + } else { + throw new IOException("not locked"); + } + } + + /** + * This small cache compresses the data if an element leaves the cache. + */ + static class Cache extends LinkedHashMap { + + private static final long serialVersionUID = 1L; + private final int size; + + Cache(int size) { + super(size, (float) 0.75, true); + this.size = size; + } + + @Override + public synchronized V put(K key, V value) { + return super.put(key, value); + } + + @Override + protected boolean removeEldestEntry(Map.Entry eldest) { + if (size() < size) { + return false; + } + CompressItem c = (CompressItem) eldest.getKey(); + c.file.compress(c.page); + return true; + } + } + + /** + * Points to a block of bytes that needs to be compressed. + */ + static class CompressItem { + + /** + * The file. + */ + FileMemData file; + + /** + * The page to compress. + */ + int page; + + @Override + public int hashCode() { + return page ^ file.getId(); + } + + @Override + public boolean equals(Object o) { + if (o instanceof CompressItem) { + CompressItem c = (CompressItem) o; + return c.page == page && c.file == file; + } + return false; + } + + } + + private void compressLater(int page) { + CompressItem c = new CompressItem(); + c.file = this; + c.page = page; + synchronized (LZF) { + COMPRESS_LATER.put(c, c); + } + } + + private byte[] expand(int page) { + byte[] d = getPage(page); + if (d.length == BLOCK_SIZE) { + return d; + } + byte[] out = new byte[BLOCK_SIZE]; + if (d != COMPRESSED_EMPTY_BLOCK) { + synchronized (LZF) { + LZF.expand(d, 0, d.length, out, 0, BLOCK_SIZE); + } + } + setPage(page, d, out, false); + return out; + } + + /** + * Compress the data in a byte array. + * + * @param page which page to compress + */ + void compress(int page) { + byte[] old = getPage(page); + if (old == null || old.length != BLOCK_SIZE) { + // not yet initialized or already compressed + return; + } + synchronized (LZF) { + int len = LZF.compress(old, 0, BLOCK_SIZE, BUFFER, 0); + if (len <= BLOCK_SIZE) { + byte[] d = Arrays.copyOf(BUFFER, len); + // maybe data was changed in the meantime + setPage(page, old, d, false); + } + } + } + + /** + * Update the last modified time. + * + * @param openReadOnly if the file was opened in read-only mode + */ + void touch(boolean openReadOnly) { + if (isReadOnly || openReadOnly) { + throw new NonWritableChannelException(); + } + lastModified = System.currentTimeMillis(); + } + + /** + * Get the file length. + * + * @return the length + */ + long length() { + return length; + } + + /** + * Truncate the file. + * + * @param newLength the new length + */ + void truncate(long newLength) { + changeLength(newLength); + long end = MathUtils.roundUpLong(newLength, BLOCK_SIZE); + if (end != newLength) { + int lastPage = (int) (newLength >>> BLOCK_SIZE_SHIFT); + byte[] d = expand(lastPage); + byte[] d2 = Arrays.copyOf(d, d.length); + for (int i = (int) (newLength & BLOCK_SIZE_MASK); i < BLOCK_SIZE; i++) { + d2[i] = 0; + } + setPage(lastPage, d, d2, true); + if (compress) { + compressLater(lastPage); + } + } + } + + private void changeLength(long len) { + length = len; + len = MathUtils.roundUpLong(len, BLOCK_SIZE); + int blocks = (int) (len >>> BLOCK_SIZE_SHIFT); + if (blocks != data.length) { + AtomicReference[] n = Arrays.copyOf(data, blocks); + for (int i = data.length; i < blocks; i++) { + n[i] = new AtomicReference<>(COMPRESSED_EMPTY_BLOCK); + } + data = n; + } + } + + /** + * Read or write. + * + * @param pos the position + * @param b the byte array + * @param off the offset within the byte array + * @param len the number of bytes + * @param write true for writing + * @return the new position + */ + long readWrite(long pos, byte[] b, int off, int len, boolean write) { + long end = pos + len; + if (end > length) { + if (write) { + changeLength(end); + } else { + len = (int) (length - pos); + } + } + while (len > 0) { + int l = (int) Math.min(len, BLOCK_SIZE - (pos & BLOCK_SIZE_MASK)); + int page = (int) (pos >>> BLOCK_SIZE_SHIFT); + byte[] block = expand(page); + int blockOffset = (int) (pos & BLOCK_SIZE_MASK); + if (write) { + byte[] p2 = Arrays.copyOf(block, block.length); + System.arraycopy(b, off, p2, blockOffset, l); + setPage(page, block, p2, true); + } else { + System.arraycopy(block, blockOffset, b, off, l); + } + if (compress) { + compressLater(page); + } + off += l; + pos += l; + len -= l; + } + return pos; + } + + /** + * Set the file name. + * + * @param name the name + */ + void setName(String name) { + this.name = name; + } + + /** + * Get the file name + * + * @return the name + */ + String getName() { + return name; + } + + /** + * Get the last modified time. + * + * @return the time + */ + long getLastModified() { + return lastModified; + } + + /** + * Check whether writing is allowed. + * + * @return true if it is + */ + boolean canWrite() { + return !isReadOnly; + } + + /** + * Set the read-only flag. + * + * @return true + */ + boolean setReadOnly() { + isReadOnly = true; + return true; + } + +} \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/mem/FilePathMem.java b/h2/src/main/org/h2/store/fs/mem/FilePathMem.java new file mode 100644 index 0000000000..502f321f14 --- /dev/null +++ b/h2/src/main/org/h2/store/fs/mem/FilePathMem.java @@ -0,0 +1,213 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.mem; + +import java.nio.channels.FileChannel; +import java.util.ArrayList; +import java.util.List; +import java.util.TreeMap; +import org.h2.api.ErrorCode; +import org.h2.message.DbException; +import org.h2.store.fs.FilePath; + +/** + * This file system keeps files fully in memory. There is an option to compress + * file blocks to save memory. + */ +public class FilePathMem extends FilePath { + + private static final TreeMap MEMORY_FILES = + new TreeMap<>(); + private static final FileMemData DIRECTORY = new FileMemData("", false); + + @Override + public FilePathMem getPath(String path) { + FilePathMem p = new FilePathMem(); + p.name = getCanonicalPath(path); + return p; + } + + @Override + public long size() { + return getMemoryFile().length(); + } + + @Override + public void moveTo(FilePath newName, boolean atomicReplace) { + synchronized (MEMORY_FILES) { + if (!atomicReplace && !newName.name.equals(name) && + MEMORY_FILES.containsKey(newName.name)) { + throw DbException.get(ErrorCode.FILE_RENAME_FAILED_2, name, newName + " (exists)"); + } + FileMemData f = getMemoryFile(); + f.setName(newName.name); + MEMORY_FILES.remove(name); + MEMORY_FILES.put(newName.name, f); + } + } + + @Override + public boolean createFile() { + synchronized (MEMORY_FILES) { + if (exists()) { + return false; + } + getMemoryFile(); + } + return true; + } + + @Override + public boolean exists() { + if (isRoot()) { + return true; + } + synchronized (MEMORY_FILES) { + return MEMORY_FILES.get(name) != null; + } + } + + @Override + public void delete() { + if (isRoot()) { + return; + } + synchronized (MEMORY_FILES) { + FileMemData old = MEMORY_FILES.remove(name); + if (old != null) { + old.truncate(0); + } + } + } + + @Override + public List newDirectoryStream() { + ArrayList list = new ArrayList<>(); + synchronized (MEMORY_FILES) { + for (String n : MEMORY_FILES.tailMap(name).keySet()) { + if (n.startsWith(name)) { + if (!n.equals(name) && n.indexOf('/', name.length() + 1) < 0) { + list.add(getPath(n)); + } + } else { + break; + } + } + return list; + } + } + + @Override + public boolean setReadOnly() { + return getMemoryFile().setReadOnly(); + } + + @Override + public boolean canWrite() { + return getMemoryFile().canWrite(); + } + + @Override + public FilePathMem getParent() { + int idx = name.lastIndexOf('/'); + return idx < 0 ? null : getPath(name.substring(0, idx)); + } + + @Override + public boolean isDirectory() { + if (isRoot()) { + return true; + } + synchronized (MEMORY_FILES) { + FileMemData d = MEMORY_FILES.get(name); + return d == DIRECTORY; + } + } + + @Override + public boolean isAbsolute() { + // TODO relative files are not supported + return true; + } + + @Override + public FilePathMem toRealPath() { + return this; + } + + @Override + public long lastModified() { + return getMemoryFile().getLastModified(); + } + + @Override + public void createDirectory() { + if (exists()) { + throw DbException.get(ErrorCode.FILE_CREATION_FAILED_1, + name + " (a file with this name already exists)"); + } + synchronized (MEMORY_FILES) { + MEMORY_FILES.put(name, DIRECTORY); + } + } + + @Override + public FileChannel open(String mode) { + FileMemData obj = getMemoryFile(); + return new FileMem(obj, "r".equals(mode)); + } + + private FileMemData getMemoryFile() { + synchronized (MEMORY_FILES) { + FileMemData m = MEMORY_FILES.get(name); + if (m == DIRECTORY) { + throw DbException.get(ErrorCode.FILE_CREATION_FAILED_1, + name + " (a directory with this name already exists)"); + } + if (m == null) { + m = new FileMemData(name, compressed()); + MEMORY_FILES.put(name, m); + } + return m; + } + } + + private boolean isRoot() { + return name.equals(getScheme() + ":"); + } + + /** + * Get the canonical path for this file name. + * + * @param fileName the file name + * @return the canonical path + */ + protected static String getCanonicalPath(String fileName) { + fileName = fileName.replace('\\', '/'); + int idx = fileName.indexOf(':') + 1; + if (fileName.length() > idx && fileName.charAt(idx) != '/') { + fileName = fileName.substring(0, idx) + "/" + fileName.substring(idx); + } + return fileName; + } + + @Override + public String getScheme() { + return "memFS"; + } + + /** + * Whether the file should be compressed. + * + * @return if it should be compressed. + */ + boolean compressed() { + return false; + } + +} + + diff --git a/h2/src/main/org/h2/store/fs/mem/FilePathMemLZF.java b/h2/src/main/org/h2/store/fs/mem/FilePathMemLZF.java new file mode 100644 index 0000000000..19c7abae56 --- /dev/null +++ b/h2/src/main/org/h2/store/fs/mem/FilePathMemLZF.java @@ -0,0 +1,30 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.mem; + +/** + * A memory file system that compresses blocks to conserve memory. + */ +public class FilePathMemLZF extends FilePathMem { + + @Override + public FilePathMem getPath(String path) { + FilePathMemLZF p = new FilePathMemLZF(); + p.name = getCanonicalPath(path); + return p; + } + + @Override + boolean compressed() { + return true; + } + + @Override + public String getScheme() { + return "memLZF"; + } + +} \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/mem/package.html b/h2/src/main/org/h2/store/fs/mem/package.html new file mode 100644 index 0000000000..3858793bf6 --- /dev/null +++ b/h2/src/main/org/h2/store/fs/mem/package.html @@ -0,0 +1,15 @@ + + + + +Javadoc package documentation +

          + +This file system keeps files fully in memory. +There is an option to compress file blocks to save memory. + +

          \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/FilePathNioMapped.java b/h2/src/main/org/h2/store/fs/niomapped/FileNioMapped.java similarity index 65% rename from h2/src/main/org/h2/store/fs/FilePathNioMapped.java rename to h2/src/main/org/h2/store/fs/niomapped/FileNioMapped.java index 9f8ecfa728..2ea73ddc09 100644 --- a/h2/src/main/org/h2/store/fs/FilePathNioMapped.java +++ b/h2/src/main/org/h2/store/fs/niomapped/FileNioMapped.java @@ -1,13 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ -package org.h2.store.fs; +package org.h2.store.fs.niomapped; import java.io.EOFException; import java.io.IOException; -import java.io.RandomAccessFile; import java.lang.ref.WeakReference; import java.nio.BufferUnderflowException; import java.nio.ByteBuffer; @@ -15,48 +14,25 @@ import java.nio.channels.FileChannel; import java.nio.channels.FileLock; import java.nio.channels.NonWritableChannelException; -import java.util.concurrent.TimeUnit; - +import java.nio.file.Paths; import org.h2.engine.SysProperties; +import org.h2.store.fs.FileBaseDefault; +import org.h2.store.fs.FileUtils; import org.h2.util.MemoryUnmapper; -/** - * This file system stores files on disk and uses java.nio to access the files. - * This class used memory mapped files. - */ -public class FilePathNioMapped extends FilePathNio { - - @Override - public FileChannel open(String mode) throws IOException { - return new FileNioMapped(name.substring(getScheme().length() + 1), mode); - } - - @Override - public String getScheme() { - return "nioMapped"; - } - -} - /** * Uses memory mapped files. * The file size is limited to 2 GB. */ -class FileNioMapped extends FileBase { +class FileNioMapped extends FileBaseDefault { - private static final long GC_TIMEOUT_MS = 10_000; + private static final int GC_TIMEOUT_MS = 10_000; private final String name; private final MapMode mode; - private RandomAccessFile file; + private FileChannel channel; private MappedByteBuffer mapped; private long fileLength; - /** - * The position within the file. Can't use the position of the mapped buffer - * because it doesn't support seeking past the end of the file. - */ - private int pos; - FileNioMapped(String fileName, String mode) throws IOException { if ("r".equals(mode)) { this.mode = MapMode.READ_ONLY; @@ -64,7 +40,7 @@ class FileNioMapped extends FileBase { this.mode = MapMode.READ_WRITE; } this.name = fileName; - file = new RandomAccessFile(fileName, mode); + channel = FileChannel.open(Paths.get(fileName), FileUtils.modeToOptions(mode), FileUtils.NO_ATTRIBUTES); reMap(); } @@ -76,7 +52,7 @@ private void unMap() throws IOException { mapped.force(); // need to dispose old direct buffer, see bug - // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4724038 + // https://bugs.openjdk.java.net/browse/JDK-4724038 if (SysProperties.NIO_CLEANER_HACK) { if (MemoryUnmapper.unmap(mapped)) { @@ -86,11 +62,10 @@ private void unMap() throws IOException { } WeakReference bufferWeakRef = new WeakReference<>(mapped); mapped = null; - long start = System.nanoTime(); + long stopAt = System.nanoTime() + GC_TIMEOUT_MS * 1_000_000L; while (bufferWeakRef.get() != null) { - if (System.nanoTime() - start > TimeUnit.MILLISECONDS.toNanos(GC_TIMEOUT_MS)) { - throw new IOException("Timeout (" + GC_TIMEOUT_MS - + " ms) reached while trying to GC mapped buffer"); + if (System.nanoTime() - stopAt > 0L) { + throw new IOException("Timeout (" + GC_TIMEOUT_MS + " ms) reached while trying to GC mapped buffer"); } System.gc(); Thread.yield(); @@ -102,15 +77,13 @@ private void unMap() throws IOException { * was created. */ private void reMap() throws IOException { - int oldPos = 0; if (mapped != null) { - oldPos = pos; unMap(); } - fileLength = file.length(); + fileLength = channel.size(); checkFileSizeLimit(fileLength); // maps new MappedByteBuffer; the old one is disposed during GC - mapped = file.getChannel().map(mode, 0, fileLength); + mapped = channel.map(mode, 0, fileLength); int limit = mapped.limit(); int capacity = mapped.capacity(); if (limit < fileLength || capacity < fileLength) { @@ -120,7 +93,6 @@ private void reMap() throws IOException { if (SysProperties.NIO_LOAD_MAPPED) { mapped.load(); } - this.pos = Math.min(oldPos, (int) fileLength); } private static void checkFileSizeLimit(long length) throws IOException { @@ -132,18 +104,13 @@ private static void checkFileSizeLimit(long length) throws IOException { @Override public void implCloseChannel() throws IOException { - if (file != null) { + if (channel != null) { unMap(); - file.close(); - file = null; + channel.close(); + channel = null; } } - @Override - public long position() { - return pos; - } - @Override public String toString() { return "nioMapped:" + name; @@ -155,7 +122,8 @@ public synchronized long size() throws IOException { } @Override - public synchronized int read(ByteBuffer dst) throws IOException { + public synchronized int read(ByteBuffer dst, long pos) throws IOException { + checkFileSizeLimit(pos); try { int len = dst.remaining(); if (len == 0) { @@ -165,7 +133,7 @@ public synchronized int read(ByteBuffer dst) throws IOException { if (len <= 0) { return -1; } - mapped.position(pos); + mapped.position((int)pos); mapped.get(dst.array(), dst.arrayOffset() + dst.position(), len); dst.position(dst.position() + len); pos += len; @@ -178,14 +146,7 @@ public synchronized int read(ByteBuffer dst) throws IOException { } @Override - public FileChannel position(long pos) throws IOException { - checkFileSizeLimit(pos); - this.pos = (int) pos; - return this; - } - - @Override - public synchronized FileChannel truncate(long newLength) throws IOException { + protected void implTruncate(long newLength) throws IOException { // compatibility with JDK FileChannel#truncate if (mode == MapMode.READ_ONLY) { throw new NonWritableChannelException(); @@ -193,16 +154,22 @@ public synchronized FileChannel truncate(long newLength) throws IOException { if (newLength < size()) { setFileLength(newLength); } - return this; } public synchronized void setFileLength(long newLength) throws IOException { + if (mode == MapMode.READ_ONLY) { + throw new NonWritableChannelException(); + } checkFileSizeLimit(newLength); - int oldPos = pos; unMap(); for (int i = 0;; i++) { try { - file.setLength(newLength); + long length = channel.size(); + if (length >= newLength) { + channel.truncate(newLength); + } else { + channel.write(ByteBuffer.wrap(new byte[1]), newLength - 1); + } break; } catch (IOException e) { if (i > 16 || !e.toString().contains("user-mapped section open")) { @@ -212,32 +179,31 @@ public synchronized void setFileLength(long newLength) throws IOException { System.gc(); } reMap(); - pos = (int) Math.min(newLength, oldPos); } @Override public void force(boolean metaData) throws IOException { mapped.force(); - file.getFD().sync(); + channel.force(metaData); } @Override - public synchronized int write(ByteBuffer src) throws IOException { + public synchronized int write(ByteBuffer src, long position) throws IOException { + checkFileSizeLimit(position); int len = src.remaining(); // check if need to expand file - if (mapped.capacity() < pos + len) { - setFileLength(pos + len); + if (mapped.capacity() < position + len) { + setFileLength(position + len); } - mapped.position(pos); + mapped.position((int)position); mapped.put(src); - pos += len; return len; } @Override public synchronized FileLock tryLock(long position, long size, boolean shared) throws IOException { - return file.getChannel().tryLock(position, size, shared); + return channel.tryLock(position, size, shared); } -} +} \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/niomapped/FilePathNioMapped.java b/h2/src/main/org/h2/store/fs/niomapped/FilePathNioMapped.java new file mode 100644 index 0000000000..2479478f90 --- /dev/null +++ b/h2/src/main/org/h2/store/fs/niomapped/FilePathNioMapped.java @@ -0,0 +1,28 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.niomapped; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import org.h2.store.fs.FilePathWrapper; + +/** + * This file system stores files on disk and uses java.nio to access the files. + * This class used memory mapped files. + */ +public class FilePathNioMapped extends FilePathWrapper { + + @Override + public FileChannel open(String mode) throws IOException { + return new FileNioMapped(name.substring(getScheme().length() + 1), mode); + } + + @Override + public String getScheme() { + return "nioMapped"; + } + +} diff --git a/h2/src/main/org/h2/store/fs/niomapped/package.html b/h2/src/main/org/h2/store/fs/niomapped/package.html new file mode 100644 index 0000000000..ef22adf716 --- /dev/null +++ b/h2/src/main/org/h2/store/fs/niomapped/package.html @@ -0,0 +1,15 @@ + + + + +Javadoc package documentation +

          + +This file system stores files on disk and uses java.nio to access the files. +This class used memory mapped files. + +

          \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/niomem/FileNioMem.java b/h2/src/main/org/h2/store/fs/niomem/FileNioMem.java new file mode 100644 index 0000000000..5bc4ad22e6 --- /dev/null +++ b/h2/src/main/org/h2/store/fs/niomem/FileNioMem.java @@ -0,0 +1,131 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.niomem; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.FileLock; +import java.nio.channels.NonWritableChannelException; +import org.h2.store.fs.FakeFileChannel; +import org.h2.store.fs.FileBaseDefault; + +/** + * This class represents an in-memory file. + */ +class FileNioMem extends FileBaseDefault { + + /** + * The file data. + */ + final FileNioMemData data; + + private final boolean readOnly; + private volatile boolean closed; + + FileNioMem(FileNioMemData data, boolean readOnly) { + this.data = data; + this.readOnly = readOnly; + } + + @Override + public long size() { + return data.length(); + } + + @Override + protected void implTruncate(long newLength) throws IOException { + // compatibility with JDK FileChannel#truncate + if (readOnly) { + throw new NonWritableChannelException(); + } + if (closed) { + throw new ClosedChannelException(); + } + if (newLength < size()) { + data.touch(readOnly); + data.truncate(newLength); + } + } + + @Override + public int write(ByteBuffer src, long position) throws IOException { + if (closed) { + throw new ClosedChannelException(); + } + data.touch(readOnly); + // offset is 0 because we start writing from src.position() + long newPosition = data.readWrite(position, src, 0, src.remaining(), true); + int len = (int)(newPosition - position); + src.position(src.position() + len); + return len; + } + + @Override + public int read(ByteBuffer dst, long position) throws IOException { + if (closed) { + throw new ClosedChannelException(); + } + int len = dst.remaining(); + if (len == 0) { + return 0; + } + long newPos; + newPos = data.readWrite(position, dst, dst.position(), len, false); + len = (int) (newPos - position); + if (len <= 0) { + return -1; + } + dst.position(dst.position() + len); + return len; + } + + @Override + public void implCloseChannel() throws IOException { + closed = true; + } + + @Override + public void force(boolean metaData) throws IOException { + // do nothing + } + + @Override + public FileLock tryLock(long position, long size, + boolean shared) throws IOException { + if (closed) { + throw new ClosedChannelException(); + } + if (shared) { + if (!data.lockShared()) { + return null; + } + } else { + if (!data.lockExclusive()) { + return null; + } + } + + return new FileLock(FakeFileChannel.INSTANCE, position, size, shared) { + + @Override + public boolean isValid() { + return true; + } + + @Override + public void release() throws IOException { + data.unlock(); + } + }; + } + + @Override + public String toString() { + return closed ? "" : data.getName(); + } + +} \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/niomem/FileNioMemData.java b/h2/src/main/org/h2/store/fs/niomem/FileNioMemData.java new file mode 100644 index 0000000000..e98f7d81cd --- /dev/null +++ b/h2/src/main/org/h2/store/fs/niomem/FileNioMemData.java @@ -0,0 +1,394 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.niomem; + +import java.nio.ByteBuffer; +import java.nio.channels.NonWritableChannelException; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import org.h2.compress.CompressLZF; +import org.h2.util.MathUtils; + +/** + * This class contains the data of an in-memory random access file. + * Data compression using the LZF algorithm is supported as well. + */ +class FileNioMemData { + + private static final int CACHE_MIN_SIZE = 8; + private static final int BLOCK_SIZE_SHIFT = 16; + + private static final int BLOCK_SIZE = 1 << BLOCK_SIZE_SHIFT; + private static final int BLOCK_SIZE_MASK = BLOCK_SIZE - 1; + private static final ByteBuffer COMPRESSED_EMPTY_BLOCK; + + private static final ThreadLocal LZF_THREAD_LOCAL = ThreadLocal.withInitial(CompressLZF::new); + + /** the output buffer when compressing */ + private static final ThreadLocal COMPRESS_OUT_BUF_THREAD_LOCAL = ThreadLocal + .withInitial(() -> new byte[BLOCK_SIZE * 2]); + + /** + * The hash code of the name. + */ + final int nameHashCode; + + private final CompressLaterCache compressLaterCache = + new CompressLaterCache<>(CACHE_MIN_SIZE); + + private String name; + private final boolean compress; + private final float compressLaterCachePercent; + private volatile long length; + private AtomicReference[] buffers; + private long lastModified; + private boolean isReadOnly; + private boolean isLockedExclusive; + private int sharedLockCount; + private final ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock(); + + static { + final byte[] n = new byte[BLOCK_SIZE]; + final byte[] output = new byte[BLOCK_SIZE * 2]; + int len = new CompressLZF().compress(n, 0, BLOCK_SIZE, output, 0); + COMPRESSED_EMPTY_BLOCK = ByteBuffer.allocateDirect(len); + COMPRESSED_EMPTY_BLOCK.put(output, 0, len); + } + + @SuppressWarnings("unchecked") + FileNioMemData(String name, boolean compress, float compressLaterCachePercent) { + this.name = name; + this.nameHashCode = name.hashCode(); + this.compress = compress; + this.compressLaterCachePercent = compressLaterCachePercent; + buffers = new AtomicReference[0]; + lastModified = System.currentTimeMillis(); + } + + /** + * Lock the file in exclusive mode if possible. + * + * @return if locking was successful + */ + synchronized boolean lockExclusive() { + if (sharedLockCount > 0 || isLockedExclusive) { + return false; + } + isLockedExclusive = true; + return true; + } + + /** + * Lock the file in shared mode if possible. + * + * @return if locking was successful + */ + synchronized boolean lockShared() { + if (isLockedExclusive) { + return false; + } + sharedLockCount++; + return true; + } + + /** + * Unlock the file. + */ + synchronized void unlock() { + if (isLockedExclusive) { + isLockedExclusive = false; + } else { + sharedLockCount = Math.max(0, sharedLockCount - 1); + } + } + + /** + * This small cache compresses the data if an element leaves the cache. + */ + static class CompressLaterCache extends LinkedHashMap { + + private static final long serialVersionUID = 1L; + private int size; + + CompressLaterCache(int size) { + super(size, (float) 0.75, true); + this.size = size; + } + + @Override + public synchronized V put(K key, V value) { + return super.put(key, value); + } + + @Override + protected boolean removeEldestEntry(Map.Entry eldest) { + if (size() < size) { + return false; + } + CompressItem c = (CompressItem) eldest.getKey(); + c.data.compressPage(c.page); + return true; + } + + public void setCacheSize(int size) { + this.size = size; + } + } + + /** + * Represents a compressed item. + */ + static class CompressItem { + + /** + * The file data. + */ + public final FileNioMemData data; + + /** + * The page to compress. + */ + public final int page; + + public CompressItem(FileNioMemData data, int page) { + this.data = data; + this.page = page; + } + + @Override + public int hashCode() { + return page ^ data.nameHashCode; + } + + @Override + public boolean equals(Object o) { + if (o instanceof CompressItem) { + CompressItem c = (CompressItem) o; + return c.data == data && c.page == page; + } + return false; + } + + } + + private void addToCompressLaterCache(int page) { + CompressItem c = new CompressItem(this, page); + compressLaterCache.put(c, c); + } + + private ByteBuffer expandPage(int page) { + final ByteBuffer d = buffers[page].get(); + if (d.capacity() == BLOCK_SIZE) { + // already expanded, or not compressed + return d; + } + synchronized (d) { + if (d.capacity() == BLOCK_SIZE) { + return d; + } + ByteBuffer out = ByteBuffer.allocateDirect(BLOCK_SIZE); + if (d != COMPRESSED_EMPTY_BLOCK) { + d.position(0); + CompressLZF.expand(d, out); + } + buffers[page].compareAndSet(d, out); + return out; + } + } + + /** + * Compress the data in a byte array. + * + * @param page which page to compress + */ + void compressPage(int page) { + final ByteBuffer d = buffers[page].get(); + synchronized (d) { + if (d.capacity() != BLOCK_SIZE) { + // already compressed + return; + } + final byte[] compressOutputBuffer = COMPRESS_OUT_BUF_THREAD_LOCAL.get(); + int len = LZF_THREAD_LOCAL.get().compress(d, 0, compressOutputBuffer, 0); + ByteBuffer out = ByteBuffer.allocateDirect(len); + out.put(compressOutputBuffer, 0, len); + buffers[page].compareAndSet(d, out); + } + } + + /** + * Update the last modified time. + * + * @param openReadOnly if the file was opened in read-only mode + */ + void touch(boolean openReadOnly) { + if (isReadOnly || openReadOnly) { + throw new NonWritableChannelException(); + } + lastModified = System.currentTimeMillis(); + } + + /** + * Get the file length. + * + * @return the length + */ + long length() { + return length; + } + + /** + * Truncate the file. + * + * @param newLength the new length + */ + void truncate(long newLength) { + rwLock.writeLock().lock(); + try { + changeLength(newLength); + long end = MathUtils.roundUpLong(newLength, BLOCK_SIZE); + if (end != newLength) { + int lastPage = (int) (newLength >>> BLOCK_SIZE_SHIFT); + ByteBuffer d = expandPage(lastPage); + for (int i = (int) (newLength & BLOCK_SIZE_MASK); i < BLOCK_SIZE; i++) { + d.put(i, (byte) 0); + } + if (compress) { + addToCompressLaterCache(lastPage); + } + } + } finally { + rwLock.writeLock().unlock(); + } + } + + @SuppressWarnings("unchecked") + private void changeLength(long len) { + length = len; + len = MathUtils.roundUpLong(len, BLOCK_SIZE); + int blocks = (int) (len >>> BLOCK_SIZE_SHIFT); + if (blocks != buffers.length) { + final AtomicReference[] newBuffers = new AtomicReference[blocks]; + System.arraycopy(buffers, 0, newBuffers, 0, + Math.min(buffers.length, newBuffers.length)); + for (int i = buffers.length; i < blocks; i++) { + newBuffers[i] = new AtomicReference<>(COMPRESSED_EMPTY_BLOCK); + } + buffers = newBuffers; + } + compressLaterCache.setCacheSize(Math.max(CACHE_MIN_SIZE, (int) (blocks * + compressLaterCachePercent / 100))); + } + + /** + * Read or write. + * + * @param pos the position + * @param b the byte array + * @param off the offset within the byte array + * @param len the number of bytes + * @param write true for writing + * @return the new position + */ + long readWrite(long pos, ByteBuffer b, int off, int len, boolean write) { + final java.util.concurrent.locks.Lock lock = write ? rwLock.writeLock() + : rwLock.readLock(); + lock.lock(); + try { + + long end = pos + len; + if (end > length) { + if (write) { + changeLength(end); + } else { + len = (int) (length - pos); + } + } + while (len > 0) { + final int l = (int) Math.min(len, BLOCK_SIZE - (pos & BLOCK_SIZE_MASK)); + final int page = (int) (pos >>> BLOCK_SIZE_SHIFT); + final ByteBuffer block = expandPage(page); + int blockOffset = (int) (pos & BLOCK_SIZE_MASK); + if (write) { + final ByteBuffer srcTmp = b.slice(); + final ByteBuffer dstTmp = block.duplicate(); + srcTmp.position(off); + srcTmp.limit(off + l); + dstTmp.position(blockOffset); + dstTmp.put(srcTmp); + } else { + // duplicate, so this can be done concurrently + final ByteBuffer tmp = block.duplicate(); + tmp.position(blockOffset); + tmp.limit(l + blockOffset); + int oldPosition = b.position(); + b.position(off); + b.put(tmp); + // restore old position + b.position(oldPosition); + } + if (compress) { + addToCompressLaterCache(page); + } + off += l; + pos += l; + len -= l; + } + return pos; + } finally { + lock.unlock(); + } + } + + /** + * Set the file name. + * + * @param name the name + */ + void setName(String name) { + this.name = name; + } + + /** + * Get the file name + * + * @return the name + */ + String getName() { + return name; + } + + /** + * Get the last modified time. + * + * @return the time + */ + long getLastModified() { + return lastModified; + } + + /** + * Check whether writing is allowed. + * + * @return true if it is + */ + boolean canWrite() { + return !isReadOnly; + } + + /** + * Set the read-only flag. + * + * @return true + */ + boolean setReadOnly() { + isReadOnly = true; + return true; + } + +} \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/niomem/FilePathNioMem.java b/h2/src/main/org/h2/store/fs/niomem/FilePathNioMem.java new file mode 100644 index 0000000000..ed23c6fb9f --- /dev/null +++ b/h2/src/main/org/h2/store/fs/niomem/FilePathNioMem.java @@ -0,0 +1,208 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.niomem; + +import java.nio.channels.FileChannel; +import java.util.ArrayList; +import java.util.List; +import java.util.TreeMap; +import org.h2.api.ErrorCode; +import org.h2.message.DbException; +import org.h2.store.fs.FilePath; + +/** + * This file system keeps files fully in off-java-heap memory. There is an option to compress + * file blocks to save memory. + */ +public class FilePathNioMem extends FilePath { + + private static final TreeMap MEMORY_FILES = + new TreeMap<>(); + + /** + * The percentage of uncompressed (cached) entries. + */ + float compressLaterCachePercent = 1; + + @Override + public FilePathNioMem getPath(String path) { + FilePathNioMem p = new FilePathNioMem(); + p.name = getCanonicalPath(path); + return p; + } + + @Override + public long size() { + return getMemoryFile().length(); + } + + @Override + public void moveTo(FilePath newName, boolean atomicReplace) { + synchronized (MEMORY_FILES) { + if (!atomicReplace && !name.equals(newName.name) && + MEMORY_FILES.containsKey(newName.name)) { + throw DbException.get(ErrorCode.FILE_RENAME_FAILED_2, name, newName + " (exists)"); + } + FileNioMemData f = getMemoryFile(); + f.setName(newName.name); + MEMORY_FILES.remove(name); + MEMORY_FILES.put(newName.name, f); + } + } + + @Override + public boolean createFile() { + synchronized (MEMORY_FILES) { + if (exists()) { + return false; + } + getMemoryFile(); + } + return true; + } + + @Override + public boolean exists() { + if (isRoot()) { + return true; + } + synchronized (MEMORY_FILES) { + return MEMORY_FILES.get(name) != null; + } + } + + @Override + public void delete() { + if (isRoot()) { + return; + } + synchronized (MEMORY_FILES) { + MEMORY_FILES.remove(name); + } + } + + @Override + public List newDirectoryStream() { + ArrayList list = new ArrayList<>(); + synchronized (MEMORY_FILES) { + for (String n : MEMORY_FILES.tailMap(name).keySet()) { + if (n.startsWith(name)) { + list.add(getPath(n)); + } else { + break; + } + } + return list; + } + } + + @Override + public boolean setReadOnly() { + return getMemoryFile().setReadOnly(); + } + + @Override + public boolean canWrite() { + return getMemoryFile().canWrite(); + } + + @Override + public FilePathNioMem getParent() { + int idx = name.lastIndexOf('/'); + return idx < 0 ? null : getPath(name.substring(0, idx)); + } + + @Override + public boolean isDirectory() { + if (isRoot()) { + return true; + } + // TODO in memory file system currently + // does not really support directories + synchronized (MEMORY_FILES) { + return MEMORY_FILES.get(name) == null; + } + } + + @Override + public boolean isAbsolute() { + // TODO relative files are not supported + return true; + } + + @Override + public FilePathNioMem toRealPath() { + return this; + } + + @Override + public long lastModified() { + return getMemoryFile().getLastModified(); + } + + @Override + public void createDirectory() { + if (exists() && isDirectory()) { + throw DbException.get(ErrorCode.FILE_CREATION_FAILED_1, + name + " (a file with this name already exists)"); + } + // TODO directories are not really supported + } + + @Override + public FileChannel open(String mode) { + FileNioMemData obj = getMemoryFile(); + return new FileNioMem(obj, "r".equals(mode)); + } + + private FileNioMemData getMemoryFile() { + synchronized (MEMORY_FILES) { + FileNioMemData m = MEMORY_FILES.get(name); + if (m == null) { + m = new FileNioMemData(name, compressed(), compressLaterCachePercent); + MEMORY_FILES.put(name, m); + } + return m; + } + } + + protected boolean isRoot() { + return name.equals(getScheme() + ":"); + } + + /** + * Get the canonical path of a file (with backslashes replaced with forward + * slashes). + * + * @param fileName the file name + * @return the canonical path + */ + protected static String getCanonicalPath(String fileName) { + fileName = fileName.replace('\\', '/'); + int idx = fileName.lastIndexOf(':') + 1; + if (fileName.length() > idx && fileName.charAt(idx) != '/') { + fileName = fileName.substring(0, idx) + "/" + fileName.substring(idx); + } + return fileName; + } + + @Override + public String getScheme() { + return "nioMemFS"; + } + + /** + * Whether the file should be compressed. + * + * @return true if it should be compressed. + */ + boolean compressed() { + return false; + } + +} + + diff --git a/h2/src/main/org/h2/store/fs/niomem/FilePathNioMemLZF.java b/h2/src/main/org/h2/store/fs/niomem/FilePathNioMemLZF.java new file mode 100644 index 0000000000..7ef048fd9f --- /dev/null +++ b/h2/src/main/org/h2/store/fs/niomem/FilePathNioMemLZF.java @@ -0,0 +1,44 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.niomem; + +/** + * A memory file system that compresses blocks to conserve memory. + */ +public class FilePathNioMemLZF extends FilePathNioMem { + + @Override + boolean compressed() { + return true; + } + + @Override + public FilePathNioMem getPath(String path) { + if (!path.startsWith(getScheme())) { + throw new IllegalArgumentException(path + + " doesn't start with " + getScheme()); + } + int idx1 = path.indexOf(':'); + int idx2 = path.lastIndexOf(':'); + final FilePathNioMemLZF p = new FilePathNioMemLZF(); + if (idx1 != -1 && idx1 != idx2) { + p.compressLaterCachePercent = Float.parseFloat(path.substring(idx1 + 1, idx2)); + } + p.name = getCanonicalPath(path); + return p; + } + + @Override + protected boolean isRoot() { + return name.lastIndexOf(':') == name.length() - 1; + } + + @Override + public String getScheme() { + return "nioMemLZF"; + } + +} \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/niomem/package.html b/h2/src/main/org/h2/store/fs/niomem/package.html new file mode 100644 index 0000000000..6197af1edc --- /dev/null +++ b/h2/src/main/org/h2/store/fs/niomem/package.html @@ -0,0 +1,15 @@ + + + + +Javadoc package documentation +

          + +This file system keeps files fully in off-java-heap memory. +There is an option to compress file blocks to save memory. + +

          \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/package.html b/h2/src/main/org/h2/store/fs/package.html index ac244829a5..1797c0eb3f 100644 --- a/h2/src/main/org/h2/store/fs/package.html +++ b/h2/src/main/org/h2/store/fs/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/store/fs/rec/FilePathRec.java b/h2/src/main/org/h2/store/fs/rec/FilePathRec.java new file mode 100644 index 0000000000..14c63bbfb0 --- /dev/null +++ b/h2/src/main/org/h2/store/fs/rec/FilePathRec.java @@ -0,0 +1,119 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.rec; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.channels.FileChannel; +import org.h2.store.fs.FilePath; +import org.h2.store.fs.FilePathWrapper; +import org.h2.store.fs.Recorder; + +/** + * A file system that records all write operations and can re-play them. + */ +public class FilePathRec extends FilePathWrapper { + + private static final FilePathRec INSTANCE = new FilePathRec(); + + private static Recorder recorder; + + private boolean trace; + + /** + * Register the file system. + */ + public static void register() { + FilePath.register(INSTANCE); + } + + /** + * Set the recorder class. + * + * @param recorder the recorder + */ + public static void setRecorder(Recorder recorder) { + FilePathRec.recorder = recorder; + } + + @Override + public boolean createFile() { + log(Recorder.CREATE_NEW_FILE, name); + return super.createFile(); + } + + @Override + public FilePath createTempFile(String suffix, boolean inTempDir) throws IOException { + log(Recorder.CREATE_TEMP_FILE, unwrap(name) + ":" + suffix + ":" + inTempDir); + return super.createTempFile(suffix, inTempDir); + } + + @Override + public void delete() { + log(Recorder.DELETE, name); + super.delete(); + } + + @Override + public FileChannel open(String mode) throws IOException { + return new FileRec(this, super.open(mode), name); + } + + @Override + public OutputStream newOutputStream(boolean append) throws IOException { + log(Recorder.OPEN_OUTPUT_STREAM, name); + return super.newOutputStream(append); + } + + @Override + public void moveTo(FilePath newPath, boolean atomicReplace) { + log(Recorder.RENAME, unwrap(name) + ":" + unwrap(newPath.name)); + super.moveTo(newPath, atomicReplace); + } + + public boolean isTrace() { + return trace; + } + + public void setTrace(boolean trace) { + this.trace = trace; + } + + /** + * Log the operation. + * + * @param op the operation + * @param fileName the file name(s) + */ + void log(int op, String fileName) { + log(op, fileName, null, 0); + } + + /** + * Log the operation. + * + * @param op the operation + * @param fileName the file name + * @param data the data or null + * @param x the value or 0 + */ + void log(int op, String fileName, byte[] data, long x) { + if (recorder != null) { + recorder.log(op, fileName, data, x); + } + } + + /** + * Get the prefix for this file system. + * + * @return the prefix + */ + @Override + public String getScheme() { + return "rec"; + } + +} diff --git a/h2/src/main/org/h2/store/fs/FilePathRec.java b/h2/src/main/org/h2/store/fs/rec/FileRec.java similarity index 51% rename from h2/src/main/org/h2/store/fs/FilePathRec.java rename to h2/src/main/org/h2/store/fs/rec/FileRec.java index 369e4af253..3bee02e3ab 100644 --- a/h2/src/main/org/h2/store/fs/FilePathRec.java +++ b/h2/src/main/org/h2/store/fs/rec/FileRec.java @@ -1,122 +1,17 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ -package org.h2.store.fs; +package org.h2.store.fs.rec; import java.io.IOException; -import java.io.OutputStream; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.channels.FileLock; import java.util.Arrays; - -/** - * A file system that records all write operations and can re-play them. - */ -public class FilePathRec extends FilePathWrapper { - - private static final FilePathRec INSTANCE = new FilePathRec(); - - private static Recorder recorder; - - private boolean trace; - - /** - * Register the file system. - */ - public static void register() { - FilePath.register(INSTANCE); - } - - /** - * Set the recorder class. - * - * @param recorder the recorder - */ - public static void setRecorder(Recorder recorder) { - FilePathRec.recorder = recorder; - } - - @Override - public boolean createFile() { - log(Recorder.CREATE_NEW_FILE, name); - return super.createFile(); - } - - @Override - public FilePath createTempFile(String suffix, boolean inTempDir) throws IOException { - log(Recorder.CREATE_TEMP_FILE, unwrap(name) + ":" + suffix + ":" + inTempDir); - return super.createTempFile(suffix, inTempDir); - } - - @Override - public void delete() { - log(Recorder.DELETE, name); - super.delete(); - } - - @Override - public FileChannel open(String mode) throws IOException { - return new FileRec(this, super.open(mode), name); - } - - @Override - public OutputStream newOutputStream(boolean append) throws IOException { - log(Recorder.OPEN_OUTPUT_STREAM, name); - return super.newOutputStream(append); - } - - @Override - public void moveTo(FilePath newPath, boolean atomicReplace) { - log(Recorder.RENAME, unwrap(name) + ":" + unwrap(newPath.name)); - super.moveTo(newPath, atomicReplace); - } - - public boolean isTrace() { - return trace; - } - - public void setTrace(boolean trace) { - this.trace = trace; - } - - /** - * Log the operation. - * - * @param op the operation - * @param fileName the file name(s) - */ - void log(int op, String fileName) { - log(op, fileName, null, 0); - } - - /** - * Log the operation. - * - * @param op the operation - * @param fileName the file name - * @param data the data or null - * @param x the value or 0 - */ - void log(int op, String fileName, byte[] data, long x) { - if (recorder != null) { - recorder.log(op, fileName, data, x); - } - } - - /** - * Get the prefix for this file system. - * - * @return the prefix - */ - @Override - public String getScheme() { - return "rec"; - } - -} +import org.h2.store.fs.FileBase; +import org.h2.store.fs.Recorder; /** * A file object that records all write operations and can re-play them. @@ -213,4 +108,4 @@ public String toString() { return name; } -} +} \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/rec/package.html b/h2/src/main/org/h2/store/fs/rec/package.html new file mode 100644 index 0000000000..23ddc8dcca --- /dev/null +++ b/h2/src/main/org/h2/store/fs/rec/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

          + +A file system that records all write operations and can re-play them. + +

          \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/retry/FilePathRetryOnInterrupt.java b/h2/src/main/org/h2/store/fs/retry/FilePathRetryOnInterrupt.java new file mode 100644 index 0000000000..1279be117f --- /dev/null +++ b/h2/src/main/org/h2/store/fs/retry/FilePathRetryOnInterrupt.java @@ -0,0 +1,35 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.retry; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import org.h2.store.fs.FilePathWrapper; + +/** + * A file system that re-opens and re-tries the operation if the file was + * closed, because a thread was interrupted. This will clear the interrupt flag. + * It is mainly useful for applications that call Thread.interrupt by mistake. + */ +public class FilePathRetryOnInterrupt extends FilePathWrapper { + + /** + * The prefix. + */ + static final String SCHEME = "retry"; + + @Override + public FileChannel open(String mode) throws IOException { + return new FileRetryOnInterrupt(name.substring(getScheme().length() + 1), mode); + } + + @Override + public String getScheme() { + return SCHEME; + } + +} + diff --git a/h2/src/main/org/h2/store/fs/FilePathRetryOnInterrupt.java b/h2/src/main/org/h2/store/fs/retry/FileRetryOnInterrupt.java similarity index 87% rename from h2/src/main/org/h2/store/fs/FilePathRetryOnInterrupt.java rename to h2/src/main/org/h2/store/fs/retry/FileRetryOnInterrupt.java index 348bd72e04..e3508b2efb 100644 --- a/h2/src/main/org/h2/store/fs/FilePathRetryOnInterrupt.java +++ b/h2/src/main/org/h2/store/fs/retry/FileRetryOnInterrupt.java @@ -1,9 +1,9 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ -package org.h2.store.fs; +package org.h2.store.fs.retry; import java.io.IOException; import java.nio.ByteBuffer; @@ -11,30 +11,8 @@ import java.nio.channels.ClosedChannelException; import java.nio.channels.FileChannel; import java.nio.channels.FileLock; - -/** - * A file system that re-opens and re-tries the operation if the file was - * closed, because a thread was interrupted. This will clear the interrupt flag. - * It is mainly useful for applications that call Thread.interrupt by mistake. - */ -public class FilePathRetryOnInterrupt extends FilePathWrapper { - - /** - * The prefix. - */ - static final String SCHEME = "retry"; - - @Override - public FileChannel open(String mode) throws IOException { - return new FileRetryOnInterrupt(name.substring(getScheme().length() + 1), mode); - } - - @Override - public String getScheme() { - return SCHEME; - } - -} +import org.h2.store.fs.FileBase; +import org.h2.store.fs.FileUtils; /** * A file object that re-opens and re-tries the operation if the file was @@ -253,5 +231,4 @@ public String toString() { return FilePathRetryOnInterrupt.SCHEME + ":" + fileName; } -} - +} \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/retry/package.html b/h2/src/main/org/h2/store/fs/retry/package.html new file mode 100644 index 0000000000..6908e6a5f5 --- /dev/null +++ b/h2/src/main/org/h2/store/fs/retry/package.html @@ -0,0 +1,16 @@ + + + + +Javadoc package documentation +

          + +A file system that re-opens and re-tries the operation if the file was closed, because a thread was interrupted. +This will clear the interrupt flag. +It is mainly useful for applications that call Thread.interrupt by mistake. + +

          \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/split/FilePathSplit.java b/h2/src/main/org/h2/store/fs/split/FilePathSplit.java new file mode 100644 index 0000000000..7f3abb4573 --- /dev/null +++ b/h2/src/main/org/h2/store/fs/split/FilePathSplit.java @@ -0,0 +1,242 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.split; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.SequenceInputStream; +import java.nio.channels.FileChannel; +import java.util.ArrayList; +import java.util.List; + +import org.h2.engine.SysProperties; +import org.h2.message.DbException; +import org.h2.store.fs.FilePath; +import org.h2.store.fs.FilePathWrapper; + +/** + * A file system that may split files into multiple smaller files. + * (required for a FAT32 because it only support files up to 2 GB). + */ +public class FilePathSplit extends FilePathWrapper { + + private static final String PART_SUFFIX = ".part"; + + @Override + protected String getPrefix() { + return getScheme() + ":" + parse(name)[0] + ":"; + } + + @Override + public FilePath unwrap(String fileName) { + return FilePath.get(parse(fileName)[1]); + } + + @Override + public boolean setReadOnly() { + boolean result = false; + for (int i = 0;; i++) { + FilePath f = getBase(i); + if (f.exists()) { + result = f.setReadOnly(); + } else { + break; + } + } + return result; + } + + @Override + public void delete() { + for (int i = 0;; i++) { + FilePath f = getBase(i); + if (f.exists()) { + f.delete(); + } else { + break; + } + } + } + + @Override + public long lastModified() { + long lastModified = 0; + for (int i = 0;; i++) { + FilePath f = getBase(i); + if (f.exists()) { + long l = f.lastModified(); + lastModified = Math.max(lastModified, l); + } else { + break; + } + } + return lastModified; + } + + @Override + public long size() { + long length = 0; + for (int i = 0;; i++) { + FilePath f = getBase(i); + if (f.exists()) { + length += f.size(); + } else { + break; + } + } + return length; + } + + @Override + public ArrayList newDirectoryStream() { + List list = getBase().newDirectoryStream(); + ArrayList newList = new ArrayList<>(); + for (FilePath f : list) { + if (!f.getName().endsWith(PART_SUFFIX)) { + newList.add(wrap(f)); + } + } + return newList; + } + + @Override + public InputStream newInputStream() throws IOException { + InputStream input = getBase().newInputStream(); + for (int i = 1;; i++) { + FilePath f = getBase(i); + if (f.exists()) { + InputStream i2 = f.newInputStream(); + input = new SequenceInputStream(input, i2); + } else { + break; + } + } + return input; + } + + @Override + public FileChannel open(String mode) throws IOException { + ArrayList list = new ArrayList<>(); + list.add(getBase().open(mode)); + for (int i = 1;; i++) { + FilePath f = getBase(i); + if (f.exists()) { + list.add(f.open(mode)); + } else { + break; + } + } + FileChannel[] array = list.toArray(new FileChannel[0]); + long maxLength = array[0].size(); + long length = maxLength; + if (array.length == 1) { + long defaultMaxLength = getDefaultMaxLength(); + if (maxLength < defaultMaxLength) { + maxLength = defaultMaxLength; + } + } else { + if (maxLength == 0) { + closeAndThrow(0, array, array[0], maxLength); + } + for (int i = 1; i < array.length - 1; i++) { + FileChannel c = array[i]; + long l = c.size(); + length += l; + if (l != maxLength) { + closeAndThrow(i, array, c, maxLength); + } + } + FileChannel c = array[array.length - 1]; + long l = c.size(); + length += l; + if (l > maxLength) { + closeAndThrow(array.length - 1, array, c, maxLength); + } + } + return new FileSplit(this, mode, array, length, maxLength); + } + + private long getDefaultMaxLength() { + return 1L << Integer.decode(parse(name)[0]); + } + + private void closeAndThrow(int id, FileChannel[] array, FileChannel o, + long maxLength) throws IOException { + String message = "Expected file length: " + maxLength + " got: " + + o.size() + " for " + getName(id); + for (FileChannel f : array) { + f.close(); + } + throw new IOException(message); + } + + @Override + public OutputStream newOutputStream(boolean append) throws IOException { + return newFileChannelOutputStream(open("rw"), append); + } + + @Override + public void moveTo(FilePath path, boolean atomicReplace) { + FilePathSplit newName = (FilePathSplit) path; + for (int i = 0;; i++) { + FilePath o = getBase(i); + if (o.exists()) { + o.moveTo(newName.getBase(i), atomicReplace); + } else if (newName.getBase(i).exists()) { + newName.getBase(i).delete(); + } else { + break; + } + } + } + + /** + * Split the file name into size and base file name. + * + * @param fileName the file name + * @return an array with size and file name + */ + private String[] parse(String fileName) { + if (!fileName.startsWith(getScheme())) { + throw DbException.getInternalError(fileName + " doesn't start with " + getScheme()); + } + fileName = fileName.substring(getScheme().length() + 1); + String size; + if (fileName.length() > 0 && Character.isDigit(fileName.charAt(0))) { + int idx = fileName.indexOf(':'); + size = fileName.substring(0, idx); + try { + fileName = fileName.substring(idx + 1); + } catch (NumberFormatException e) { + // ignore + } + } else { + size = Long.toString(SysProperties.SPLIT_FILE_SIZE_SHIFT); + } + return new String[] { size, fileName }; + } + + /** + * Get the file name of a part file. + * + * @param id the part id + * @return the file name including the part id + */ + FilePath getBase(int id) { + return FilePath.get(getName(id)); + } + + private String getName(int id) { + return id > 0 ? getBase().name + "." + id + PART_SUFFIX : getBase().name; + } + + @Override + public String getScheme() { + return "split"; + } + +} diff --git a/h2/src/main/org/h2/store/fs/split/FileSplit.java b/h2/src/main/org/h2/store/fs/split/FileSplit.java new file mode 100644 index 0000000000..2cb8e212b9 --- /dev/null +++ b/h2/src/main/org/h2/store/fs/split/FileSplit.java @@ -0,0 +1,156 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.split; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.channels.FileLock; +import org.h2.message.DbException; +import org.h2.mvstore.DataUtils; +import org.h2.store.fs.FileBaseDefault; +import org.h2.store.fs.FilePath; + +/** + * A file that may be split into multiple smaller files. + */ +class FileSplit extends FileBaseDefault { + + private final FilePathSplit filePath; + private final String mode; + private final long maxLength; + private FileChannel[] list; + private volatile long length; + + FileSplit(FilePathSplit file, String mode, FileChannel[] list, long length, + long maxLength) { + this.filePath = file; + this.mode = mode; + this.list = list; + this.length = length; + this.maxLength = maxLength; + } + + @Override + public synchronized void implCloseChannel() throws IOException { + for (FileChannel c : list) { + c.close(); + } + } + + @Override + public long size() { + return length; + } + + @Override + public synchronized int read(ByteBuffer dst, long position) + throws IOException { + int len = dst.remaining(); + if (len == 0) { + return 0; + } + len = (int) Math.min(len, length - position); + if (len <= 0) { + return -1; + } + long offset = position % maxLength; + len = (int) Math.min(len, maxLength - offset); + FileChannel channel = getFileChannel(position); + return channel.read(dst, offset); + } + + private FileChannel getFileChannel(long position) throws IOException { + int id = (int) (position / maxLength); + while (id >= list.length) { + int i = list.length; + FileChannel[] newList = new FileChannel[i + 1]; + System.arraycopy(list, 0, newList, 0, i); + FilePath f = filePath.getBase(i); + newList[i] = f.open(mode); + list = newList; + } + return list[id]; + } + + @Override + protected void implTruncate(long newLength) throws IOException { + if (newLength >= length) { + return; + } + int newFileCount = 1 + (int) (newLength / maxLength); + if (newFileCount < list.length) { + // delete some of the files + FileChannel[] newList = new FileChannel[newFileCount]; + // delete backwards, so that truncating is somewhat transactional + for (int i = list.length - 1; i >= newFileCount; i--) { + // verify the file is writable + list[i].truncate(0); + list[i].close(); + try { + filePath.getBase(i).delete(); + } catch (DbException e) { + throw DataUtils.convertToIOException(e); + } + } + System.arraycopy(list, 0, newList, 0, newList.length); + list = newList; + } + long size = newLength - maxLength * (newFileCount - 1); + list[list.length - 1].truncate(size); + this.length = newLength; + } + + @Override + public synchronized void force(boolean metaData) throws IOException { + for (FileChannel c : list) { + c.force(metaData); + } + } + + @Override + public synchronized int write(ByteBuffer src, long position) throws IOException { + if (position >= length && position > maxLength) { + // may need to extend and create files + long oldFilePointer = position; + long x = length - (length % maxLength) + maxLength; + for (; x < position; x += maxLength) { + if (x > length) { + // expand the file size + position(x - 1); + write(ByteBuffer.wrap(new byte[1])); + } + position = oldFilePointer; + } + } + long offset = position % maxLength; + int len = src.remaining(); + FileChannel channel = getFileChannel(position); + int l = (int) Math.min(len, maxLength - offset); + if (l == len) { + l = channel.write(src, offset); + } else { + int oldLimit = src.limit(); + src.limit(src.position() + l); + l = channel.write(src, offset); + src.limit(oldLimit); + } + length = Math.max(length, position + l); + return l; + } + + @Override + public synchronized FileLock tryLock(long position, long size, + boolean shared) throws IOException { + return list[0].tryLock(position, size, shared); + } + + @Override + public String toString() { + return filePath.toString(); + } + +} \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/split/package.html b/h2/src/main/org/h2/store/fs/split/package.html new file mode 100644 index 0000000000..ef8d718c3c --- /dev/null +++ b/h2/src/main/org/h2/store/fs/split/package.html @@ -0,0 +1,15 @@ + + + + +Javadoc package documentation +

          + +A file system that may split files into multiple smaller files +(required for a FAT32 because it only support files up to 2 GB). + +

          \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/FilePathZip.java b/h2/src/main/org/h2/store/fs/zip/FilePathZip.java similarity index 64% rename from h2/src/main/org/h2/store/fs/FilePathZip.java rename to h2/src/main/org/h2/store/fs/zip/FilePathZip.java index 0a323702e2..7262fd5e49 100644 --- a/h2/src/main/org/h2/store/fs/FilePathZip.java +++ b/h2/src/main/org/h2/store/fs/zip/FilePathZip.java @@ -1,23 +1,21 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ -package org.h2.store.fs; +package org.h2.store.fs.zip; import java.io.FileNotFoundException; import java.io.IOException; -import java.io.InputStream; import java.io.OutputStream; -import java.nio.ByteBuffer; import java.nio.channels.FileChannel; -import java.nio.channels.FileLock; import java.util.ArrayList; import java.util.Enumeration; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; import org.h2.message.DbException; -import org.h2.util.IOUtils; +import org.h2.store.fs.FilePath; +import org.h2.store.fs.disk.FilePathDisk; /** * This is a read-only file system that allows @@ -171,11 +169,6 @@ public ArrayList newDirectoryStream() { } } - @Override - public InputStream newInputStream() throws IOException { - return new FileChannelInputStream(open("r"), true); - } - @Override public FileChannel open(String mode) throws IOException { ZipFile file = openZipFile(); @@ -247,132 +240,3 @@ public String getScheme() { } } - -/** - * The file is read from a stream. When reading from start to end, the same - * input stream is re-used, however when reading from end to start, a new input - * stream is opened for each request. - */ -class FileZip extends FileBase { - - private static final byte[] SKIP_BUFFER = new byte[1024]; - - private final ZipFile file; - private final ZipEntry entry; - private long pos; - private InputStream in; - private long inPos; - private final long length; - private boolean skipUsingRead; - - FileZip(ZipFile file, ZipEntry entry) { - this.file = file; - this.entry = entry; - length = entry.getSize(); - } - - @Override - public long position() { - return pos; - } - - @Override - public long size() { - return length; - } - - @Override - public int read(ByteBuffer dst) throws IOException { - seek(); - int len = in.read(dst.array(), dst.arrayOffset() + dst.position(), - dst.remaining()); - if (len > 0) { - dst.position(dst.position() + len); - pos += len; - inPos += len; - } - return len; - } - - private void seek() throws IOException { - if (inPos > pos) { - if (in != null) { - in.close(); - } - in = null; - } - if (in == null) { - in = file.getInputStream(entry); - inPos = 0; - } - if (inPos < pos) { - long skip = pos - inPos; - if (!skipUsingRead) { - try { - IOUtils.skipFully(in, skip); - } catch (NullPointerException e) { - // workaround for Android - skipUsingRead = true; - } - } - if (skipUsingRead) { - while (skip > 0) { - int s = (int) Math.min(SKIP_BUFFER.length, skip); - s = in.read(SKIP_BUFFER, 0, s); - skip -= s; - } - } - inPos = pos; - } - } - - @Override - public FileChannel position(long newPos) { - this.pos = newPos; - return this; - } - - @Override - public FileChannel truncate(long newLength) throws IOException { - throw new IOException("File is read-only"); - } - - @Override - public void force(boolean metaData) throws IOException { - // nothing to do - } - - @Override - public int write(ByteBuffer src) throws IOException { - throw new IOException("File is read-only"); - } - - @Override - public synchronized FileLock tryLock(long position, long size, - boolean shared) throws IOException { - if (shared) { - return new FileLock(FakeFileChannel.INSTANCE, position, size, shared) { - - @Override - public boolean isValid() { - return true; - } - - @Override - public void release() throws IOException { - // ignore - }}; - } - return null; - } - - @Override - protected void implCloseChannel() throws IOException { - if (in != null) { - in.close(); - in = null; - } - file.close(); - } - -} diff --git a/h2/src/main/org/h2/store/fs/zip/FileZip.java b/h2/src/main/org/h2/store/fs/zip/FileZip.java new file mode 100644 index 0000000000..1488cfc5ee --- /dev/null +++ b/h2/src/main/org/h2/store/fs/zip/FileZip.java @@ -0,0 +1,147 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.store.fs.zip; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.channels.FileLock; +import java.nio.channels.NonWritableChannelException; +import java.util.zip.ZipEntry; +import java.util.zip.ZipFile; +import org.h2.store.fs.FakeFileChannel; +import org.h2.store.fs.FileBase; +import org.h2.util.IOUtils; + +/** + * The file is read from a stream. When reading from start to end, the same + * input stream is re-used, however when reading from end to start, a new input + * stream is opened for each request. + */ +class FileZip extends FileBase { + + private static final byte[] SKIP_BUFFER = new byte[1024]; + + private final ZipFile file; + private final ZipEntry entry; + private long pos; + private InputStream in; + private long inPos; + private final long length; + private boolean skipUsingRead; + + FileZip(ZipFile file, ZipEntry entry) { + this.file = file; + this.entry = entry; + length = entry.getSize(); + } + + @Override + public long position() { + return pos; + } + + @Override + public long size() { + return length; + } + + @Override + public int read(ByteBuffer dst) throws IOException { + seek(); + int len = in.read(dst.array(), dst.arrayOffset() + dst.position(), + dst.remaining()); + if (len > 0) { + dst.position(dst.position() + len); + pos += len; + inPos += len; + } + return len; + } + + private void seek() throws IOException { + if (inPos > pos) { + if (in != null) { + in.close(); + } + in = null; + } + if (in == null) { + in = file.getInputStream(entry); + inPos = 0; + } + if (inPos < pos) { + long skip = pos - inPos; + if (!skipUsingRead) { + try { + IOUtils.skipFully(in, skip); + } catch (NullPointerException e) { + // workaround for Android + skipUsingRead = true; + } + } + if (skipUsingRead) { + while (skip > 0) { + int s = (int) Math.min(SKIP_BUFFER.length, skip); + s = in.read(SKIP_BUFFER, 0, s); + skip -= s; + } + } + inPos = pos; + } + } + + @Override + public FileChannel position(long newPos) { + this.pos = newPos; + return this; + } + + @Override + public FileChannel truncate(long newLength) throws IOException { + throw new IOException("File is read-only"); + } + + @Override + public void force(boolean metaData) throws IOException { + // nothing to do + } + + @Override + public int write(ByteBuffer src) throws IOException { + throw new NonWritableChannelException(); + } + + @Override + public synchronized FileLock tryLock(long position, long size, + boolean shared) throws IOException { + if (shared) { + return new FileLock(FakeFileChannel.INSTANCE, position, size, shared) { + + @Override + public boolean isValid() { + return true; + } + + @Override + public void release() throws IOException { + // ignore + }}; + } + return null; + } + + @Override + protected void implCloseChannel() throws IOException { + if (in != null) { + in.close(); + in = null; + } + file.close(); + } + +} \ No newline at end of file diff --git a/h2/src/main/org/h2/store/fs/zip/package.html b/h2/src/main/org/h2/store/fs/zip/package.html new file mode 100644 index 0000000000..d314bc5695 --- /dev/null +++ b/h2/src/main/org/h2/store/fs/zip/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

          + +A zip-file base file system abstraction. + +

          \ No newline at end of file diff --git a/h2/src/main/org/h2/store/package.html b/h2/src/main/org/h2/store/package.html index 24bffe3b33..157780ff65 100644 --- a/h2/src/main/org/h2/store/package.html +++ b/h2/src/main/org/h2/store/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/table/Column.java b/h2/src/main/org/h2/table/Column.java index cf02c275ab..1dd1d56e57 100644 --- a/h2/src/main/org/h2/table/Column.java +++ b/h2/src/main/org/h2/table/Column.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.table; @@ -11,35 +11,30 @@ import org.h2.api.ErrorCode; import org.h2.command.Parser; import org.h2.command.ddl.SequenceOptions; +import org.h2.engine.CastDataProvider; import org.h2.engine.Constants; -import org.h2.engine.Domain; -import org.h2.engine.Mode; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionVisitor; -import org.h2.expression.SequenceValue; import org.h2.expression.ValueExpression; -import org.h2.expression.condition.ConditionAndOr; import org.h2.message.DbException; import org.h2.result.Row; +import org.h2.schema.Domain; import org.h2.schema.Schema; import org.h2.schema.Sequence; -import org.h2.util.MathUtils; +import org.h2.util.HasSQL; +import org.h2.util.ParserUtil; import org.h2.util.StringUtils; -import org.h2.value.DataType; import org.h2.value.TypeInfo; +import org.h2.value.Typed; import org.h2.value.Value; -import org.h2.value.ValueInt; -import org.h2.value.ValueLong; import org.h2.value.ValueNull; -import org.h2.value.ValueString; -import org.h2.value.ValueTime; import org.h2.value.ValueUuid; /** * This class represents a column in a table. */ -public class Column { +public final class Column implements HasSQL, Typed, ColumnTemplate { /** * The name of the rowid pseudo column. @@ -64,23 +59,19 @@ public class Column { public static final int NULLABLE_UNKNOWN = ResultSetMetaData.columnNullableUnknown; - private final TypeInfo type; + private TypeInfo type; private Table table; private String name; private int columnId; private boolean nullable = true; private Expression defaultExpression; private Expression onUpdateExpression; - private Expression checkConstraint; - private String checkConstraintSQL; - private String originalSQL; - private SequenceOptions autoIncrementOptions; - private boolean convertNullToDefault; + private SequenceOptions identityOptions; + private boolean defaultOnNull; private Sequence sequence; - private boolean isComputed; - private TableFilter computeTableFilter; + private boolean isGeneratedAlways; + private GeneratedColumnResolver generatedTableFilter; private int selectivity; - private SingleColumnResolver resolver; private String comment; private boolean primaryKey; private boolean visible = true; @@ -94,16 +85,16 @@ public class Column { * string builder * @param columns * columns - * @param alwaysQuote - * quote all identifiers + * @param sqlFlags + * formatting flags * @return the specified string builder */ - public static StringBuilder writeColumns(StringBuilder builder, Column[] columns, boolean alwaysQuote) { + public static StringBuilder writeColumns(StringBuilder builder, Column[] columns, int sqlFlags) { for (int i = 0, l = columns.length; i < l; i++) { if (i > 0) { builder.append(", "); } - columns[i].getSQL(builder, alwaysQuote); + columns[i].getSQL(builder, sqlFlags); } return builder; } @@ -119,28 +110,31 @@ public static StringBuilder writeColumns(StringBuilder builder, Column[] columns * separator * @param suffix * additional SQL to append after each column - * @param alwaysQuote - * quote all identifiers + * @param sqlFlags + * formatting flags * @return the specified string builder */ public static StringBuilder writeColumns(StringBuilder builder, Column[] columns, String separator, - String suffix, boolean alwaysQuote) { + String suffix, int sqlFlags) { for (int i = 0, l = columns.length; i < l; i++) { if (i > 0) { builder.append(separator); } - columns[i].getSQL(builder, alwaysQuote).append(suffix); + columns[i].getSQL(builder, sqlFlags).append(suffix); } return builder; } - public Column(String name, int valueType) { - this(name, TypeInfo.getTypeInfo(valueType)); + public Column(String name, TypeInfo type) { + this.name = name; + this.type = type; } - public Column(String name, TypeInfo type) { + public Column(String name, TypeInfo type, Table table, int columnId) { this.name = name; this.type = type; + this.table = table; + this.columnId = columnId; } @Override @@ -176,64 +170,60 @@ public Column getClone() { } /** - * Convert a value to this column's type. - * - * @param v the value - * @return the value - */ - public Value convert(Value v) { - return convert(v, null); - } - - /** - * Convert a value to this column's type using the given {@link Mode}. - *

          - * Use this method in case the conversion is Mode-dependent. + * Convert a value to this column's type without precision and scale checks. * + * @param provider the cast information provider * @param v the value - * @param mode the database {@link Mode} to use * @return the value */ - public Value convert(Value v, Mode mode) { + public Value convert(CastDataProvider provider, Value v) { try { - return v.convertTo(type, mode, this); + return v.convertTo(type, provider, this); } catch (DbException e) { if (e.getErrorCode() == ErrorCode.DATA_CONVERSION_ERROR_1) { - String target = (table == null ? "" : table.getName() + ": ") + - getCreateSQL(); - throw DbException.get( - ErrorCode.DATA_CONVERSION_ERROR_1, e, - v.getTraceSQL() + " (" + target + ")"); + e = getDataConversionError(v, e); } throw e; } } - boolean getComputed() { - return isComputed; + /** + * Returns whether this column is an identity column. + * + * @return whether this column is an identity column + */ + public boolean isIdentity() { + return sequence != null || identityOptions != null; } /** - * Compute the value of this computed column. + * Returns whether this column is a generated column. * - * @param session the session - * @param row the row - * @return the value + * @return whether this column is a generated column + */ + public boolean isGenerated() { + return isGeneratedAlways && defaultExpression != null; + } + + /** + * Returns whether this column is a generated column or always generated + * identity column. + * + * @return whether this column is a generated column or always generated + * identity column */ - synchronized Value computeValue(Session session, Row row) { - computeTableFilter.setSession(session); - computeTableFilter.set(row); - return defaultExpression.getValue(session); + public boolean isGeneratedAlways() { + return isGeneratedAlways; } /** - * Set the default value in the form of a computed expression of other + * Set the default value in the form of a generated expression of other * columns. * * @param expression the computed expression */ - public void setComputedExpression(Expression expression) { - this.isComputed = true; + public void setGeneratedExpression(Expression expression) { + this.isGeneratedAlways = true; this.defaultExpression = expression; } @@ -252,14 +242,8 @@ public Table getTable() { return table; } - /** - * Set the default expression. - * - * @param session the session - * @param defaultExpression the default expression - */ - public void setDefaultExpression(Session session, - Expression defaultExpression) { + @Override + public void setDefaultExpression(SessionLocal session, Expression defaultExpression) { // also to test that no column names are used if (defaultExpression != null) { defaultExpression = defaultExpression.optimize(session); @@ -269,15 +253,11 @@ public void setDefaultExpression(Session session, } } this.defaultExpression = defaultExpression; + this.isGeneratedAlways = false; } - /** - * Set the on update expression. - * - * @param session the session - * @param onUpdateExpression the on update expression - */ - public void setOnUpdateExpression(Session session, Expression onUpdateExpression) { + @Override + public void setOnUpdateExpression(SessionLocal session, Expression onUpdateExpression) { // also to test that no column names are used if (onUpdateExpression != null) { onUpdateExpression = onUpdateExpression.optimize(session); @@ -292,32 +272,32 @@ public int getColumnId() { return columnId; } - /** - * Get the SQL representation of the column. - * - * @param alwaysQuote whether to always quote the name - * @return the SQL representation - */ - public String getSQL(boolean alwaysQuote) { - return rowId ? name : Parser.quoteIdentifier(name, alwaysQuote); + @Override + public String getSQL(int sqlFlags) { + return rowId ? name : Parser.quoteIdentifier(name, sqlFlags); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return rowId ? builder.append(name) : ParserUtil.quoteIdentifier(builder, name, sqlFlags); } /** - * Appends the column name to the specified builder. - * The name is quoted, unless if this is a row id column. + * Appends the table name and column name to the specified builder. * * @param builder the string builder - * @param alwaysQuote quote all identifiers + * @param sqlFlags formatting flags * @return the specified string builder */ - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - return rowId ? builder.append(name) : Parser.quoteIdentifier(builder, name, alwaysQuote); + public StringBuilder getSQLWithTable(StringBuilder builder, int sqlFlags) { + return getSQL(table.getSQL(builder, sqlFlags).append('.'), sqlFlags); } public String getName() { return name; } + @Override public TypeInfo getType() { return type; } @@ -334,10 +314,12 @@ public void setVisible(boolean b) { visible = b; } + @Override public Domain getDomain() { return domain; } + @Override public void setDomain(Domain domain) { this.domain = domain; } @@ -363,115 +345,100 @@ public void setRowId(boolean rowId) { /** * Validate the value, convert it if required, and update the sequence value * if required. If the value is null, the default value (NULL if no default - * is set) is returned. Check constraints are validated as well. + * is set) is returned. Domain constraints are validated as well. * * @param session the session * @param value the value or null + * @param row the row * @return the new or converted value */ - public Value validateConvertUpdateSequence(Session session, Value value) { - // take a local copy of defaultExpression to avoid holding the lock - // while calling getValue - final Expression localDefaultExpression; - synchronized (this) { - localDefaultExpression = defaultExpression; - } - Mode mode = session.getDatabase().getMode(); - if (value == null) { - if (localDefaultExpression == null) { - value = ValueNull.INSTANCE; - } else { - value = convert(localDefaultExpression.getValue(session), mode); - if (!localDefaultExpression.isConstant()) { - session.getGeneratedKeys().add(this); - } - if (primaryKey) { - session.setLastIdentity(value); - } - } - } - if (value == ValueNull.INSTANCE) { - if (convertNullToDefault) { - value = convert(localDefaultExpression.getValue(session), mode); - if (!localDefaultExpression.isConstant()) { - session.getGeneratedKeys().add(this); + Value validateConvertUpdateSequence(SessionLocal session, Value value, Row row) { + check: { + if (value == null) { + if (sequence != null) { + value = session.getNextValueFor(sequence, null); + break check; } + value = getDefaultOrGenerated(session, row); } if (value == ValueNull.INSTANCE && !nullable) { - if (mode.convertInsertNullToZero) { - int t = type.getValueType(); - DataType dt = DataType.getDataType(t); - if (dt.decimal) { - value = ValueInt.get(0).convertTo(t); - } else if (dt.type == Value.TIMESTAMP) { - value = session.getCurrentCommandStart().convertTo(Value.TIMESTAMP); - } else if (dt.type == Value.TIMESTAMP_TZ) { - value = session.getCurrentCommandStart(); - } else if (dt.type == Value.TIME) { - value = ValueTime.fromNanos(0); - } else if (dt.type == Value.DATE) { - value = session.getCurrentCommandStart().convertTo(Value.DATE); - } else { - value = ValueString.get("").convertTo(t); - } - } else { - throw DbException.get(ErrorCode.NULL_NOT_ALLOWED, name); - } + throw DbException.get(ErrorCode.NULL_NOT_ALLOWED, name); } } - if (checkConstraint != null) { - resolver.setValue(value); - Value v; - synchronized (this) { - v = checkConstraint.getValue(session); - } - // Both TRUE and NULL are ok - if (v != ValueNull.INSTANCE && !v.getBoolean()) { - throw DbException.get(ErrorCode.CHECK_CONSTRAINT_VIOLATED_1, checkConstraint.getSQL(false)); + try { + value = value.convertForAssignTo(type, session, name); + } catch (DbException e) { + if (e.getErrorCode() == ErrorCode.DATA_CONVERSION_ERROR_1) { + e = getDataConversionError(value, e); } + throw e; } - value = value.convertScale(mode.convertOnlyToSmallerScale, type.getScale()); - long precision = type.getPrecision(); - if (precision > 0) { - if (!value.checkPrecision(precision)) { - String s = value.getTraceSQL(); - if (s.length() > 127) { - s = s.substring(0, 128) + "..."; + if (domain != null) { + domain.checkConstraints(session, value); + } + if (sequence != null && session.getMode().updateSequenceOnManualIdentityInsertion) { + updateSequenceIfRequired(session, value.getLong()); + } + return value; + } + + private Value getDefaultOrGenerated(SessionLocal session, Row row) { + Value value; + Expression localDefaultExpression = getEffectiveDefaultExpression(); + if (localDefaultExpression == null) { + value = ValueNull.INSTANCE; + } else { + if (isGeneratedAlways) { + synchronized (this) { + generatedTableFilter.set(row); + try { + value = localDefaultExpression.getValue(session); + } finally { + generatedTableFilter.set(null); + } } - throw DbException.get(ErrorCode.VALUE_TOO_LONG_2, getCreateSQL(), - s + " (" + value.getType().getPrecision() + ')'); + } else { + value = localDefaultExpression.getValue(session); } } - if (value != ValueNull.INSTANCE && DataType.isExtInfoType(type.getValueType()) - && type.getExtTypeInfo() != null) { - value = type.getExtTypeInfo().cast(value); - } - updateSequenceIfRequired(session, value); return value; } - private void updateSequenceIfRequired(Session session, Value value) { - if (sequence != null) { - long current = sequence.getCurrentValue(); - long inc = sequence.getIncrement(); - long now = value.getLong(); - boolean update = false; - if (inc > 0 && now > current) { - update = true; - } else if (inc < 0 && now < current) { - update = true; + private DbException getDataConversionError(Value value, DbException cause) { + StringBuilder builder = new StringBuilder().append(value.getTraceSQL()).append(" ("); + if (table != null) { + builder.append(table.getName()).append(": "); + } + builder.append(getCreateSQL()).append(')'); + return DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, cause, builder.toString()); + } + + private void updateSequenceIfRequired(SessionLocal session, long value) { + if (sequence.getCycle() == Sequence.Cycle.EXHAUSTED) { + return; + } + long current = sequence.getCurrentValue(); + long inc = sequence.getIncrement(); + if (inc > 0) { + if (value < current) { + return; } - if (update) { - sequence.modify(now + inc, null, null, null); - session.setLastIdentity(ValueLong.get(now)); - sequence.flush(session); + } else if (value > current) { + return; + } + try { + sequence.modify(value + inc, null, null, null, null, null, null); + } catch (DbException ex) { + if (ex.getErrorCode() == ErrorCode.SEQUENCE_ATTRIBUTES_INVALID_7) { + return; } + throw ex; } + sequence.flush(session); } /** - * Convert the auto-increment flag to a sequence that is linked with this - * table. + * Initialize the sequence for this column. * * @param session the session * @param schema the schema where the sequence should be generated @@ -479,155 +446,190 @@ private void updateSequenceIfRequired(Session session, Value value) { * @param temporary true if the sequence is temporary and does not need to * be stored */ - public void convertAutoIncrementToSequence(Session session, Schema schema, - int id, boolean temporary) { - if (autoIncrementOptions == null) { - DbException.throwInternalError(); - } - if ("IDENTITY".equals(originalSQL)) { - originalSQL = "BIGINT"; - } else if ("SERIAL".equals(originalSQL)) { - originalSQL = "INT"; + public void initializeSequence(SessionLocal session, Schema schema, int id, boolean temporary) { + if (identityOptions == null) { + throw DbException.getInternalError(); } String sequenceName; do { - ValueUuid uuid = ValueUuid.getNewRandom(); - String s = uuid.getString(); - s = StringUtils.toUpperEnglish(s.replace('-', '_')); - sequenceName = "SYSTEM_SEQUENCE_" + s; + sequenceName = "SYSTEM_SEQUENCE_" + + StringUtils.toUpperEnglish(ValueUuid.getNewRandom().getString().replace('-', '_')); } while (schema.findSequence(sequenceName) != null); - Sequence seq = new Sequence(schema, id, sequenceName, autoIncrementOptions.getStartValue(session), - autoIncrementOptions.getIncrement(session), autoIncrementOptions.getCacheSize(session), - autoIncrementOptions.getMinValue(null, session), autoIncrementOptions.getMaxValue(null, session), - Boolean.TRUE.equals(autoIncrementOptions.getCycle()), true); + identityOptions.setDataType(type); + Sequence seq = new Sequence(session, schema, id, sequenceName, identityOptions, true); seq.setTemporary(temporary); session.getDatabase().addSchemaObject(session, seq); - setAutoIncrementOptions(null); - SequenceValue seqValue = new SequenceValue(seq); - setDefaultExpression(session, seqValue); - setSequence(seq); + // This method also ensures NOT NULL + setSequence(seq, isGeneratedAlways); } - /** - * Prepare all expressions of this column. - * - * @param session the session - */ - public void prepareExpression(Session session) { - if (defaultExpression != null || onUpdateExpression != null) { - computeTableFilter = new TableFilter(session, table, null, false, null, 0, null); - if (defaultExpression != null) { - defaultExpression.mapColumns(computeTableFilter, 0, Expression.MAP_INITIAL); - defaultExpression = defaultExpression.optimize(session); - } - if (onUpdateExpression != null) { - onUpdateExpression.mapColumns(computeTableFilter, 0, Expression.MAP_INITIAL); - onUpdateExpression = onUpdateExpression.optimize(session); + @Override + public void prepareExpressions(SessionLocal session) { + if (defaultExpression != null) { + if (isGeneratedAlways) { + generatedTableFilter = new GeneratedColumnResolver(table); + defaultExpression.mapColumns(generatedTableFilter, 0, Expression.MAP_INITIAL); } + defaultExpression = defaultExpression.optimize(session); + } + if (onUpdateExpression != null) { + onUpdateExpression = onUpdateExpression.optimize(session); + } + if (domain != null) { + domain.prepareExpressions(session); } } public String getCreateSQLWithoutName() { - return getCreateSQL(false); + return getCreateSQL(new StringBuilder(), false); } public String getCreateSQL() { - return getCreateSQL(true); + return getCreateSQL(false); } - private String getCreateSQL(boolean includeName) { - StringBuilder buff = new StringBuilder(); - if (includeName && name != null) { - Parser.quoteIdentifier(buff, name, true).append(' '); + /** + * Get this columns part of CREATE TABLE SQL statement. + * + * @param forMeta whether this is for the metadata table + * @return the SQL statement + */ + public String getCreateSQL(boolean forMeta) { + StringBuilder builder = new StringBuilder(); + if (name != null) { + ParserUtil.quoteIdentifier(builder, name, DEFAULT_SQL_FLAGS).append(' '); } - if (originalSQL != null) { - buff.append(originalSQL); + return getCreateSQL(builder, forMeta); + } + + private String getCreateSQL(StringBuilder builder, boolean forMeta) { + if (domain != null) { + domain.getSQL(builder, DEFAULT_SQL_FLAGS); } else { - type.getSQL(buff); + type.getSQL(builder, DEFAULT_SQL_FLAGS); } - if (!visible) { - buff.append(" INVISIBLE "); + builder.append(" INVISIBLE "); } - - if (defaultExpression != null) { - if (isComputed) { - buff.append(" AS "); - defaultExpression.getSQL(buff, true); - } else if (defaultExpression != null) { - buff.append(" DEFAULT "); - defaultExpression.getSQL(buff, true); + if (sequence != null) { + builder.append(" GENERATED ").append(isGeneratedAlways ? "ALWAYS" : "BY DEFAULT").append(" AS IDENTITY"); + if (!forMeta) { + sequence.getSequenceOptionsSQL(builder.append('(')).append(')'); + } + } else if (defaultExpression != null) { + if (isGeneratedAlways) { + defaultExpression.getEnclosedSQL(builder.append(" GENERATED ALWAYS AS "), DEFAULT_SQL_FLAGS); + } else { + defaultExpression.getUnenclosedSQL(builder.append(" DEFAULT "), DEFAULT_SQL_FLAGS); } } if (onUpdateExpression != null) { - buff.append(" ON UPDATE "); - onUpdateExpression.getSQL(buff, true); - } - if (!nullable) { - buff.append(" NOT NULL"); - } else if (domain != null && !domain.getColumn().isNullable()) { - buff.append(" NULL"); + onUpdateExpression.getUnenclosedSQL(builder.append(" ON UPDATE "), DEFAULT_SQL_FLAGS); } - if (convertNullToDefault) { - buff.append(" NULL_TO_DEFAULT"); + if (defaultOnNull) { + builder.append(" DEFAULT ON NULL"); } - if (sequence != null) { - buff.append(" SEQUENCE "); - sequence.getSQL(buff, true); + if (forMeta && sequence != null) { + sequence.getSQL(builder.append(" SEQUENCE "), DEFAULT_SQL_FLAGS); } if (selectivity != 0) { - buff.append(" SELECTIVITY ").append(selectivity); + builder.append(" SELECTIVITY ").append(selectivity); } if (comment != null) { - buff.append(" COMMENT "); - StringUtils.quoteStringSQL(buff, comment); + StringUtils.quoteStringSQL(builder.append(" COMMENT "), comment); } - if (checkConstraint != null) { - buff.append(" CHECK ").append(checkConstraintSQL); + if (!nullable) { + builder.append(" NOT NULL"); } - return buff.toString(); + return builder.toString(); } public boolean isNullable() { return nullable; } - public void setOriginalSQL(String original) { - originalSQL = original; - } - - public String getOriginalSQL() { - return originalSQL; - } - + @Override public Expression getDefaultExpression() { return defaultExpression; } + @Override + public Expression getEffectiveDefaultExpression() { + /* + * Identity columns may not have a default expression and may not use an + * expression from domain. + * + * Generated columns always have an own expression. + */ + if (sequence != null) { + return null; + } + return defaultExpression != null ? defaultExpression + : domain != null ? domain.getEffectiveDefaultExpression() : null; + } + + @Override public Expression getOnUpdateExpression() { return onUpdateExpression; } - public boolean isAutoIncrement() { - return autoIncrementOptions != null; + @Override + public Expression getEffectiveOnUpdateExpression() { + /* + * Identity and generated columns may not have an on update expression + * and may not use an expression from domain. + */ + if (sequence != null || isGeneratedAlways) { + return null; + } + return onUpdateExpression != null ? onUpdateExpression + : domain != null ? domain.getEffectiveOnUpdateExpression() : null; } /** - * Set the autoincrement flag and related options of this column. + * Whether the column has any identity options. * - * @param sequenceOptions - * sequence options, or {@code null} to reset the flag + * @return true if yes */ - public void setAutoIncrementOptions(SequenceOptions sequenceOptions) { - this.autoIncrementOptions = sequenceOptions; - this.nullable = false; - if (sequenceOptions != null) { - convertNullToDefault = true; - } + public boolean hasIdentityOptions() { + return identityOptions != null; + } + + /** + * Set the identity options of this column. + * + * @param identityOptions + * identity column options + * @param generatedAlways + * whether value should be always generated + */ + public void setIdentityOptions(SequenceOptions identityOptions, boolean generatedAlways) { + this.identityOptions = identityOptions; + this.isGeneratedAlways = generatedAlways; + removeNonIdentityProperties(); } - public void setConvertNullToDefault(boolean convert) { - this.convertNullToDefault = convert; + private void removeNonIdentityProperties() { + nullable = false; + onUpdateExpression = defaultExpression = null; + } + + /** + * Returns identity column options, or {@code null} if sequence was already + * created or this column is not an identity column. + * + * @return identity column options, or {@code null} + */ + public SequenceOptions getIdentityOptions() { + return identityOptions; + } + + public void setDefaultOnNull(boolean defaultOnNull) { + this.defaultOnNull = defaultOnNull; + } + + public boolean isDefaultOnNull() { + return defaultOnNull; } /** @@ -640,8 +642,22 @@ public void rename(String newName) { this.name = newName; } - public void setSequence(Sequence sequence) { + /** + * Set the sequence to generate the value. + * + * @param sequence the sequence + * @param generatedAlways whether the value of the sequence is always used + */ + public void setSequence(Sequence sequence, boolean generatedAlways) { this.sequence = sequence; + this.isGeneratedAlways = generatedAlways; + this.identityOptions = null; + if (sequence != null) { + removeNonIdentityProperties(); + if (sequence.getDatabase().getMode().identityColumnsHaveDefaultOnNull) { + defaultOnNull = true; + } + } } public Sequence getSequence() { @@ -668,102 +684,20 @@ public void setSelectivity(int selectivity) { this.selectivity = selectivity; } - /** - * Add a check constraint expression to this column. An existing check - * constraint is added using AND. - * - * @param session the session - * @param expr the (additional) constraint - */ - public void addCheckConstraint(Session session, Expression expr) { - if (expr == null) { - return; - } - if (resolver == null) { - resolver = new SingleColumnResolver(this); - } - synchronized (this) { - String oldName = name; - if (name == null) { - name = "VALUE"; - } - expr.mapColumns(resolver, 0, Expression.MAP_INITIAL); - name = oldName; - } - expr = expr.optimize(session); - resolver.setValue(ValueNull.INSTANCE); - // check if the column is mapped - synchronized (this) { - expr.getValue(session); - } - if (checkConstraint == null) { - checkConstraint = expr; - } else if (!expr.getSQL(true).equals(checkConstraintSQL)) { - checkConstraint = new ConditionAndOr(ConditionAndOr.AND, checkConstraint, expr); - } - checkConstraintSQL = getCheckConstraintSQL(session, name); - } - - /** - * Remove the check constraint if there is one. - */ - public void removeCheckConstraint() { - checkConstraint = null; - checkConstraintSQL = null; - } - - /** - * Get the check constraint expression for this column if set. - * - * @param session the session - * @param asColumnName the column name to use - * @return the constraint expression - */ - public Expression getCheckConstraint(Session session, String asColumnName) { - if (checkConstraint == null) { - return null; - } - Parser parser = new Parser(session); - String sql; - synchronized (this) { - String oldName = name; - name = asColumnName; - sql = checkConstraint.getSQL(true); - name = oldName; - } - return parser.parseExpression(sql); - } - - String getDefaultSQL() { - return defaultExpression == null ? null : defaultExpression.getSQL(true); - } - - String getOnUpdateSQL() { - return onUpdateExpression == null ? null : onUpdateExpression.getSQL(true); - } - - int getPrecisionAsInt() { - return MathUtils.convertLongToInt(type.getPrecision()); - } - - DataType getDataType() { - return DataType.getDataType(type.getValueType()); + @Override + public String getDefaultSQL() { + return defaultExpression == null ? null + : defaultExpression.getUnenclosedSQL(new StringBuilder(), DEFAULT_SQL_FLAGS).toString(); } - /** - * Get the check constraint SQL snippet. - * - * @param session the session - * @param asColumnName the column name to use - * @return the SQL snippet - */ - String getCheckConstraintSQL(Session session, String asColumnName) { - Expression constraint = getCheckConstraint(session, asColumnName); - return constraint == null ? "" : constraint.getSQL(true); + @Override + public String getOnUpdateSQL() { + return onUpdateExpression == null ? null + : onUpdateExpression.getUnenclosedSQL(new StringBuilder(), DEFAULT_SQL_FLAGS).toString(); } public void setComment(String comment) { - this.comment = comment; + this.comment = comment != null && !comment.isEmpty() ? comment : null; } public String getComment() { @@ -788,10 +722,12 @@ boolean isEverything(ExpressionVisitor visitor) { visitor.getDependencies().add(sequence); } } - if (defaultExpression != null && !defaultExpression.isEverything(visitor)) { + Expression e = getEffectiveDefaultExpression(); + if (e != null && !e.isEverything(visitor)) { return false; } - if (checkConstraint != null && !checkConstraint.isEverything(visitor)) { + e = getEffectiveOnUpdateExpression(); + if (e != null && !e.isEverything(visitor)) { return false; } return true; @@ -814,45 +750,44 @@ public String toString() { * @return true if the new column is compatible */ public boolean isWideningConversion(Column newColumn) { - if (type != newColumn.type) { + TypeInfo newType = newColumn.type; + int valueType = type.getValueType(); + if (valueType != newType.getValueType()) { return false; } - if (type.getPrecision() > newColumn.type.getPrecision()) { + long precision = type.getPrecision(); + long newPrecision = newType.getPrecision(); + if (precision > newPrecision + || precision < newPrecision && (valueType == Value.CHAR || valueType == Value.BINARY)) { return false; } - if (type.getScale() != newColumn.type.getScale()) { + if (type.getScale() != newType.getScale()) { return false; } - if (nullable && !newColumn.nullable) { + if (!Objects.equals(type.getExtTypeInfo(), newType.getExtTypeInfo())) { return false; } - if (convertNullToDefault != newColumn.convertNullToDefault) { + if (nullable && !newColumn.nullable) { return false; } if (primaryKey != newColumn.primaryKey) { return false; } - if (autoIncrementOptions != null || newColumn.autoIncrementOptions != null) { - return false; - } - if (checkConstraint != null || newColumn.checkConstraint != null) { + if (identityOptions != null || newColumn.identityOptions != null) { return false; } - if (convertNullToDefault || newColumn.convertNullToDefault) { + if (domain != newColumn.domain) { return false; } if (defaultExpression != null || newColumn.defaultExpression != null) { return false; } - if (isComputed || newColumn.isComputed) { + if (isGeneratedAlways || newColumn.isGeneratedAlways) { return false; } if (onUpdateExpression != null || newColumn.onUpdateExpression != null) { return false; } - if (!Objects.equals(type.getExtTypeInfo(), newColumn.type.getExtTypeInfo())) { - return false; - } return true; } @@ -862,21 +797,20 @@ public boolean isWideningConversion(Column newColumn) { * @param source the source column */ public void copy(Column source) { - checkConstraint = source.checkConstraint; - checkConstraintSQL = source.checkConstraintSQL; name = source.name; + type = source.type; + domain = source.domain; // table is not set // columnId is not set nullable = source.nullable; defaultExpression = source.defaultExpression; onUpdateExpression = source.onUpdateExpression; - originalSQL = source.originalSQL; - // autoIncrement, start, increment is not set - convertNullToDefault = source.convertNullToDefault; + // identityOptions field is not set + defaultOnNull = source.defaultOnNull; sequence = source.sequence; comment = source.comment; - computeTableFilter = source.computeTableFilter; - isComputed = source.isComputed; + generatedTableFilter = source.generatedTableFilter; + isGeneratedAlways = source.isGeneratedAlways; selectivity = source.selectivity; primaryKey = source.primaryKey; visible = source.visible; diff --git a/h2/src/main/org/h2/table/ColumnResolver.java b/h2/src/main/org/h2/table/ColumnResolver.java index 3959b2e274..6942d21b20 100644 --- a/h2/src/main/org/h2/table/ColumnResolver.java +++ b/h2/src/main/org/h2/table/ColumnResolver.java @@ -1,11 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.table; -import org.h2.command.dml.Select; +import org.h2.command.query.Select; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.value.Value; @@ -21,7 +21,9 @@ public interface ColumnResolver { * * @return the table alias */ - String getTableAlias(); + default String getTableAlias() { + return null; + } /** * Get the column list. @@ -31,33 +33,61 @@ public interface ColumnResolver { Column[] getColumns(); /** - * Get derived column name, or {@code null}. + * Get the column with the specified name. + * + * @param name + * the column name, must be a derived name if this column + * resolver has a derived column list + * @return the column with the specified name, or {@code null} + */ + Column findColumn(String name); + + /** + * Get the name of the specified column. * * @param column column - * @return derived column name, or {@code null} + * @return column name + */ + default String getColumnName(Column column) { + return column.getName(); + } + + /** + * Returns whether this column resolver has a derived column list. + * + * @return {@code true} if this column resolver has a derived column list, + * {@code false} otherwise */ - String getDerivedColumnName(Column column); + default boolean hasDerivedColumnList() { + return false; + } /** * Get the list of system columns, if any. * * @return the system columns or null */ - Column[] getSystemColumns(); + default Column[] getSystemColumns() { + return null; + } /** * Get the row id pseudo column, if there is one. * * @return the row id column or null */ - Column getRowIdColumn(); + default Column getRowIdColumn() { + return null; + } /** - * Get the schema name. + * Get the schema name or null. * - * @return the schema name + * @return the schema name or null */ - String getSchemaName(); + default String getSchemaName() { + return null; + } /** * Get the value for the given column. @@ -72,14 +102,18 @@ public interface ColumnResolver { * * @return the table filter */ - TableFilter getTableFilter(); + default TableFilter getTableFilter() { + return null; + } /** * Get the select statement. * * @return the select statement */ - Select getSelect(); + default Select getSelect() { + return null; + } /** * Get the expression that represents this column. @@ -88,6 +122,8 @@ public interface ColumnResolver { * @param column the column * @return the optimized expression */ - Expression optimize(ExpressionColumn expressionColumn, Column column); + default Expression optimize(ExpressionColumn expressionColumn, Column column) { + return expressionColumn; + } } diff --git a/h2/src/main/org/h2/table/ColumnTemplate.java b/h2/src/main/org/h2/table/ColumnTemplate.java new file mode 100644 index 0000000000..44459cac22 --- /dev/null +++ b/h2/src/main/org/h2/table/ColumnTemplate.java @@ -0,0 +1,61 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.table; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.schema.Domain; + +/** + * Column or domain. + */ +public interface ColumnTemplate { + + Domain getDomain(); + + void setDomain(Domain domain); + + /** + * Set the default expression. + * + * @param session + * the session + * @param defaultExpression + * the default expression + */ + void setDefaultExpression(SessionLocal session, Expression defaultExpression); + + Expression getDefaultExpression(); + + Expression getEffectiveDefaultExpression(); + + String getDefaultSQL(); + + /** + * Set the on update expression. + * + * @param session + * the session + * @param onUpdateExpression + * the on update expression + */ + void setOnUpdateExpression(SessionLocal session, Expression onUpdateExpression); + + Expression getOnUpdateExpression(); + + Expression getEffectiveOnUpdateExpression(); + + String getOnUpdateSQL(); + + /** + * Prepare all expressions of this column or domain. + * + * @param session + * the session + */ + void prepareExpressions(SessionLocal session); + +} diff --git a/h2/src/main/org/h2/table/DataChangeDeltaTable.java b/h2/src/main/org/h2/table/DataChangeDeltaTable.java new file mode 100644 index 0000000000..e9046a3130 --- /dev/null +++ b/h2/src/main/org/h2/table/DataChangeDeltaTable.java @@ -0,0 +1,134 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.table; + +import org.h2.command.dml.DataChangeStatement; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.result.LocalResult; +import org.h2.result.ResultInterface; +import org.h2.result.ResultTarget; +import org.h2.result.Row; +import org.h2.schema.Schema; + +/** + * A data change delta table. + */ +public class DataChangeDeltaTable extends VirtualConstructedTable { + + /** + * Result option. + */ + public enum ResultOption { + + /** + * OLD row. + */ + OLD, + + /** + * NEW row with evaluated default expressions, but before triggers. + */ + NEW, + + /** + * FINAL rows after triggers. + */ + FINAL; + + } + + /** + * Collects final row for INSERT operations. + * + * @param session + * the session + * @param table + * the table + * @param deltaChangeCollector + * target result + * @param deltaChangeCollectionMode + * collection mode + * @param newRow + * the inserted row + */ + public static void collectInsertedFinalRow(SessionLocal session, Table table, ResultTarget deltaChangeCollector, + ResultOption deltaChangeCollectionMode, Row newRow) { + if (session.getMode().takeInsertedIdentity) { + Column column = table.getIdentityColumn(); + if (column != null) { + session.setLastIdentity(newRow.getValue(column.getColumnId())); + } + } + if (deltaChangeCollectionMode == ResultOption.FINAL) { + deltaChangeCollector.addRow(newRow.getValueList()); + } + } + + private final DataChangeStatement statement; + + private final ResultOption resultOption; + + private final Expression[] expressions; + + public DataChangeDeltaTable(Schema schema, SessionLocal session, DataChangeStatement statement, + ResultOption resultOption) { + super(schema, 0, statement.getStatementName()); + this.statement = statement; + this.resultOption = resultOption; + Table table = statement.getTable(); + Column[] tableColumns = table.getColumns(); + int columnCount = tableColumns.length; + Column[] c = new Column[columnCount]; + for (int i = 0; i < columnCount; i++) { + c[i] = tableColumns[i].getClone(); + } + setColumns(c); + Expression[] expressions = new Expression[columnCount]; + String tableName = getName(); + for (int i = 0; i < columnCount; i++) { + expressions[i] = new ExpressionColumn(database, null, tableName, c[i].getName()); + } + this.expressions = expressions; + } + + @Override + public boolean canGetRowCount(SessionLocal session) { + return false; + } + + @Override + public long getRowCount(SessionLocal session) { + return Long.MAX_VALUE; + } + + @Override + public long getRowCountApproximation(SessionLocal session) { + return Long.MAX_VALUE; + } + + @Override + public ResultInterface getResult(SessionLocal session) { + statement.prepare(); + int columnCount = expressions.length; + LocalResult result = new LocalResult(session, expressions, columnCount, columnCount); + result.setForDataChangeDeltaTable(); + statement.update(result, resultOption); + return result; + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return builder.append(resultOption.name()).append(" TABLE (").append(statement.getSQL()).append(')'); + } + + @Override + public boolean isDeterministic() { + return false; + } + +} diff --git a/h2/src/main/org/h2/table/DualTable.java b/h2/src/main/org/h2/table/DualTable.java new file mode 100644 index 0000000000..5f9b5ed189 --- /dev/null +++ b/h2/src/main/org/h2/table/DualTable.java @@ -0,0 +1,74 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.table; + +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.index.DualIndex; +import org.h2.index.Index; + +/** + * The DUAL table for selects without a FROM clause. + */ +public class DualTable extends VirtualTable { + + /** + * The name of the range table. + */ + public static final String NAME = "DUAL"; + + /** + * Create a new range with the given start and end expressions. + * + * @param database + * the database + */ + public DualTable(Database database) { + super(database.getMainSchema(), 0, NAME); + setColumns(new Column[0]); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return builder.append(NAME); + } + + @Override + public boolean canGetRowCount(SessionLocal session) { + return true; + } + + @Override + public long getRowCount(SessionLocal session) { + return 1L; + } + + @Override + public TableType getTableType() { + return TableType.SYSTEM_TABLE; + } + + @Override + public Index getScanIndex(SessionLocal session) { + return new DualIndex(this); + } + + @Override + public long getMaxDataModificationId() { + return 0L; + } + + @Override + public long getRowCountApproximation(SessionLocal session) { + return 1L; + } + + @Override + public boolean isDeterministic() { + return true; + } + +} diff --git a/h2/src/main/org/h2/table/FunctionTable.java b/h2/src/main/org/h2/table/FunctionTable.java index c3a17ec5b6..61ea951735 100644 --- a/h2/src/main/org/h2/table/FunctionTable.java +++ b/h2/src/main/org/h2/table/FunctionTable.java @@ -1,69 +1,28 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.table; -import java.util.ArrayList; -import org.h2.api.ErrorCode; -import org.h2.engine.Session; -import org.h2.expression.Expression; -import org.h2.expression.function.FunctionCall; -import org.h2.expression.function.TableFunction; -import org.h2.index.FunctionIndex; -import org.h2.index.Index; -import org.h2.index.IndexType; -import org.h2.message.DbException; +import org.h2.engine.SessionLocal; +import org.h2.expression.function.table.TableFunction; import org.h2.result.ResultInterface; -import org.h2.result.Row; import org.h2.schema.Schema; -import org.h2.value.Value; -import org.h2.value.ValueNull; -import org.h2.value.ValueResultSet; /** * A table backed by a system or user-defined function that returns a result * set. */ -public class FunctionTable extends Table { +public class FunctionTable extends VirtualConstructedTable { - private final FunctionCall function; - private final long rowCount; - private Expression functionExpr; - private ResultInterface cachedResult; - private Value cachedValue; + private final TableFunction function; - public FunctionTable(Schema schema, Session session, - Expression functionExpr, FunctionCall function) { - super(schema, 0, function.getName(), false, true); - this.functionExpr = functionExpr; + public FunctionTable(Schema schema, SessionLocal session, TableFunction function) { + super(schema, 0, function.getName()); this.function = function; - if (function instanceof TableFunction) { - rowCount = ((TableFunction) function).getRowCount(); - } else { - rowCount = Long.MAX_VALUE; - } function.optimize(session); - int type = function.getValueType(); - if (type != Value.RESULT_SET) { - throw DbException.get( - ErrorCode.FUNCTION_MUST_RETURN_RESULT_SET_1, function.getName()); - } - Expression[] args = function.getArgs(); - int numParams = args.length; - Expression[] columnListArgs = new Expression[numParams]; - for (int i = 0; i < numParams; i++) { - args[i] = args[i].optimize(session); - columnListArgs[i] = args[i]; - } - ValueResultSet template = function.getValueForColumnList( - session, columnListArgs); - if (template == null) { - throw DbException.get( - ErrorCode.FUNCTION_MUST_RETURN_RESULT_SET_1, function.getName()); - } - ResultInterface result = template.getResult(); + ResultInterface result = function.getValueTemplate(session); int columnCount = result.getVisibleColumnCount(); Column[] cols = new Column[columnCount]; for (int i = 0; i < columnCount; i++) { @@ -73,176 +32,33 @@ public FunctionTable(Schema schema, Session session, } @Override - public boolean lock(Session session, boolean exclusive, boolean forceLockEvenInMvcc) { - // nothing to do + public boolean canGetRowCount(SessionLocal session) { return false; } @Override - public void close(Session session) { - // nothing to do - } - - @Override - public void unlock(Session s) { - // nothing to do - } - - @Override - public boolean isLockedExclusively() { - return false; - } - - @Override - public Index addIndex(Session session, String indexName, int indexId, - IndexColumn[] cols, IndexType indexType, boolean create, - String indexComment) { - throw DbException.getUnsupportedException("ALIAS"); - } - - @Override - public void removeRow(Session session, Row row) { - throw DbException.getUnsupportedException("ALIAS"); - } - - @Override - public void truncate(Session session) { - throw DbException.getUnsupportedException("ALIAS"); - } - - @Override - public boolean canDrop() { - throw DbException.throwInternalError(toString()); - } - - @Override - public void addRow(Session session, Row row) { - throw DbException.getUnsupportedException("ALIAS"); - } - - @Override - public void checkSupportAlter() { - throw DbException.getUnsupportedException("ALIAS"); - } - - @Override - public TableType getTableType() { - return null; - } - - @Override - public Index getScanIndex(Session session) { - return new FunctionIndex(this, IndexColumn.wrap(columns)); - } - - @Override - public ArrayList getIndexes() { - return null; - } - - @Override - public boolean canGetRowCount() { - return rowCount != Long.MAX_VALUE; - } - - @Override - public long getRowCount(Session session) { - return rowCount; - } - - @Override - public String getCreateSQL() { - return null; - } - - @Override - public String getDropSQL() { - return null; - } - - @Override - public void checkRename() { - throw DbException.getUnsupportedException("ALIAS"); - } - - /** - * Read the result from the function. This method buffers the result in a - * temporary file. - * - * @param session the session - * @return the result - */ - public ResultInterface getResult(Session session) { - ValueResultSet v = getValueResultSet(session); - if (v == null) { - return null; - } - if (cachedResult != null && cachedValue == v) { - cachedResult.reset(); - return cachedResult; - } - ResultInterface result = v.getResult(); - if (function.isDeterministic()) { - cachedResult = result; - cachedValue = v; - } - return result; - } - - /** - * Read the result set from the function. This method doesn't cache. - * - * @param session the session - * @return the result - */ - public ResultInterface getResultSet(Session session) { - ValueResultSet v = getValueResultSet(session); - return v == null ? null : v.getResult(); - } - - private ValueResultSet getValueResultSet(Session session) { - functionExpr = functionExpr.optimize(session); - Value v = functionExpr.getValue(session); - if (v == ValueNull.INSTANCE) { - return null; - } - return (ValueResultSet) v; - } - - public boolean isBufferResultSetToLocalTemp() { - return function.isBufferResultSetToLocalTemp(); - } - - @Override - public long getMaxDataModificationId() { - // TODO optimization: table-as-a-function currently doesn't know the - // last modified date + public long getRowCount(SessionLocal session) { return Long.MAX_VALUE; } @Override - public Index getUniqueIndex() { - return null; - } - - @Override - public String getSQL(boolean alwaysQuote) { - return function.getSQL(alwaysQuote); + public long getRowCountApproximation(SessionLocal session) { + return Long.MAX_VALUE; } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - return builder.append(function.getSQL(alwaysQuote)); + public ResultInterface getResult(SessionLocal session) { + return function.getValue(session); } @Override - public long getRowCountApproximation() { - return rowCount; + public String getSQL(int sqlFlags) { + return function.getSQL(sqlFlags); } @Override - public long getDiskSpaceUsed() { - return 0; + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return builder.append(function.getSQL(sqlFlags)); } @Override @@ -250,9 +66,4 @@ public boolean isDeterministic() { return function.isDeterministic(); } - @Override - public boolean canReference() { - return false; - } - } diff --git a/h2/src/main/org/h2/table/GeneratedColumnResolver.java b/h2/src/main/org/h2/table/GeneratedColumnResolver.java new file mode 100644 index 0000000000..a7883de6e4 --- /dev/null +++ b/h2/src/main/org/h2/table/GeneratedColumnResolver.java @@ -0,0 +1,101 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.table; + +import java.util.HashMap; + +import org.h2.result.Row; +import org.h2.value.Value; +import org.h2.value.ValueBigint; + +/** + * Column resolver for generated columns. + */ +class GeneratedColumnResolver implements ColumnResolver { + + private final Table table; + + private Column[] columns; + + private HashMap columnMap; + + private Row current; + + /** + * Column resolver for generated columns. + * + * @param table + * the table + */ + GeneratedColumnResolver(Table table) { + this.table = table; + } + + /** + * Set the current row. + * + * @param current + * the current row + */ + void set(Row current) { + this.current = current; + } + + @Override + public Column[] getColumns() { + Column[] columns = this.columns; + if (columns == null) { + this.columns = columns = createColumns(); + } + return columns; + } + + private Column[] createColumns() { + Column[] allColumns = table.getColumns(); + int totalCount = allColumns.length, baseCount = totalCount; + for (int i = 0; i < totalCount; i++) { + if (allColumns[i].isGenerated()) { + baseCount--; + } + } + Column[] baseColumns = new Column[baseCount]; + for (int i = 0, j = 0; i < totalCount; i++) { + Column c = allColumns[i]; + if (!c.isGenerated()) { + baseColumns[j++] = c; + } + } + return baseColumns; + } + + @Override + public Column findColumn(String name) { + HashMap columnMap = this.columnMap; + if (columnMap == null) { + columnMap = table.getDatabase().newStringMap(); + for (Column c : getColumns()) { + columnMap.put(c.getName(), c); + } + this.columnMap = columnMap; + } + return columnMap.get(name); + } + + @Override + public Value getValue(Column column) { + int columnId = column.getColumnId(); + if (columnId == -1) { + return ValueBigint.get(current.getKey()); + } + return current.getValue(columnId); + } + + @Override + public Column getRowIdColumn() { + return table.getRowIdColumn(); + } + +} diff --git a/h2/src/main/org/h2/table/IndexColumn.java b/h2/src/main/org/h2/table/IndexColumn.java index a3e5e9074a..16cfbf8b45 100644 --- a/h2/src/main/org/h2/table/IndexColumn.java +++ b/h2/src/main/org/h2/table/IndexColumn.java @@ -1,11 +1,13 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.table; import org.h2.result.SortOrder; +import org.h2.util.HasSQL; +import org.h2.util.ParserUtil; /** * This represents a column item of an index. This is required because some @@ -13,10 +15,15 @@ */ public class IndexColumn { + /** + * Do not append ordering. + */ + public static final int SQL_NO_ORDER = 0x8000_0000; + /** * The column name. */ - public String columnName; + public final String columnName; /** * The column, or null if not set. @@ -36,15 +43,36 @@ public class IndexColumn { * string builder * @param columns * index columns - * @param alwaysQuote quote all identifiers + * @param sqlFlags + * formatting flags * @return the specified string builder */ - public static StringBuilder writeColumns(StringBuilder builder, IndexColumn[] columns, boolean alwaysQuote) { - for (int i = 0, l = columns.length; i < l; i++) { - if (i > 0) { + public static StringBuilder writeColumns(StringBuilder builder, IndexColumn[] columns, int sqlFlags) { + return writeColumns(builder, columns, 0, columns.length, sqlFlags); + } + + /** + * Appends the specified columns to the specified builder. + * + * @param builder + * string builder + * @param startOffset + * start offset, inclusive + * @param endOffset + * end offset, exclusive + * @param columns + * index columns + * @param sqlFlags + * formatting flags + * @return the specified string builder + */ + public static StringBuilder writeColumns(StringBuilder builder, IndexColumn[] columns, int startOffset, + int endOffset, int sqlFlags) { + for (int i = startOffset; i < endOffset; i++) { + if (i > startOffset) { builder.append(", "); } - columns[i].getSQL(builder, alwaysQuote); + columns[i].getSQL(builder, sqlFlags); } return builder; } @@ -60,31 +88,73 @@ public static StringBuilder writeColumns(StringBuilder builder, IndexColumn[] co * separator * @param suffix * additional SQL to append after each column - * @param alwaysQuote quote all identifiers + * @param sqlFlags + * formatting flags * @return the specified string builder */ public static StringBuilder writeColumns(StringBuilder builder, IndexColumn[] columns, String separator, - String suffix, boolean alwaysQuote) { + String suffix, int sqlFlags) { for (int i = 0, l = columns.length; i < l; i++) { if (i > 0) { builder.append(separator); } - columns[i].getSQL(builder, alwaysQuote).append(suffix); + columns[i].getSQL(builder, sqlFlags).append(suffix); } return builder; } + /** + * Creates a new instance with the specified name. + * + * @param columnName + * the column name + */ + public IndexColumn(String columnName) { + this.columnName = columnName; + } + + /** + * Creates a new instance with the specified name. + * + * @param columnName + * the column name + * @param sortType + * the sort type + */ + public IndexColumn(String columnName, int sortType) { + this.columnName = columnName; + this.sortType = sortType; + } + + /** + * Creates a new instance with the specified column. + * + * @param column + * the column + */ + public IndexColumn(Column column) { + columnName = null; + this.column = column; + } + /** * Appends the SQL snippet for this index column to the specified string builder. * * @param builder * string builder - * @param alwaysQuote - * quote all identifiers + * @param sqlFlags + * formatting flags * @return the specified string builder */ - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { - SortOrder.typeToString(column.getSQL(builder, alwaysQuote), sortType); + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + if (column != null) { + column.getSQL(builder, sqlFlags); + } else { + ParserUtil.quoteIdentifier(builder, columnName, sqlFlags); + } + if ((sqlFlags & SQL_NO_ORDER) == 0) { + SortOrder.typeToString(builder, sortType); + } return builder; } @@ -98,8 +168,7 @@ public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { public static IndexColumn[] wrap(Column[] columns) { IndexColumn[] list = new IndexColumn[columns.length]; for (int i = 0; i < list.length; i++) { - list[i] = new IndexColumn(); - list[i].column = columns[i]; + list[i] = new IndexColumn(columns[i]); } return list; } @@ -118,6 +187,6 @@ public static void mapColumns(IndexColumn[] indexColumns, Table table) { @Override public String toString() { - return getSQL(new StringBuilder("IndexColumn "), false).toString(); + return getSQL(new StringBuilder("IndexColumn "), HasSQL.TRACE_SQL_FLAGS).toString(); } } diff --git a/h2/src/main/org/h2/table/IndexHints.java b/h2/src/main/org/h2/table/IndexHints.java index 4467da7d9b..30a3e1b025 100644 --- a/h2/src/main/org/h2/table/IndexHints.java +++ b/h2/src/main/org/h2/table/IndexHints.java @@ -1,15 +1,15 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.table; -import org.h2.index.Index; - import java.util.LinkedHashSet; import java.util.Set; +import org.h2.index.Index; + /** * Contains the hints for which index to use for a specific table. Currently * allows a list of "use indexes" to be specified. diff --git a/h2/src/main/org/h2/table/InformationSchemaTable.java b/h2/src/main/org/h2/table/InformationSchemaTable.java new file mode 100644 index 0000000000..f7957bbb63 --- /dev/null +++ b/h2/src/main/org/h2/table/InformationSchemaTable.java @@ -0,0 +1,3480 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.table; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.HashSet; +import java.util.Locale; +import java.util.Map; + +import org.h2.api.IntervalQualifier; +import org.h2.api.Trigger; +import org.h2.command.Command; +import org.h2.command.Parser; +import org.h2.constraint.Constraint; +import org.h2.constraint.Constraint.Type; +import org.h2.constraint.ConstraintCheck; +import org.h2.constraint.ConstraintDomain; +import org.h2.constraint.ConstraintReferential; +import org.h2.constraint.ConstraintUnique; +import org.h2.engine.Constants; +import org.h2.engine.DbObject; +import org.h2.engine.QueryStatisticsData; +import org.h2.engine.Right; +import org.h2.engine.RightOwner; +import org.h2.engine.Role; +import org.h2.engine.SessionLocal; +import org.h2.engine.SessionLocal.State; +import org.h2.engine.Setting; +import org.h2.engine.User; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.ValueExpression; +import org.h2.index.Index; +import org.h2.index.MetaIndex; +import org.h2.message.DbException; +import org.h2.mvstore.FileStore; +import org.h2.mvstore.MVStore; +import org.h2.mvstore.db.Store; +import org.h2.result.Row; +import org.h2.result.SearchRow; +import org.h2.result.SortOrder; +import org.h2.schema.Constant; +import org.h2.schema.Domain; +import org.h2.schema.FunctionAlias; +import org.h2.schema.Schema; +import org.h2.schema.Sequence; +import org.h2.schema.TriggerObject; +import org.h2.schema.UserDefinedFunction; +import org.h2.schema.FunctionAlias.JavaMethod; +import org.h2.store.InDoubtTransaction; +import org.h2.util.DateTimeUtils; +import org.h2.util.MathUtils; +import org.h2.util.NetworkConnectionInfo; +import org.h2.util.StringUtils; +import org.h2.util.TimeZoneProvider; +import org.h2.util.Utils; +import org.h2.util.geometry.EWKTUtils; +import org.h2.value.CompareMode; +import org.h2.value.DataType; +import org.h2.value.ExtTypeInfoEnum; +import org.h2.value.ExtTypeInfoGeometry; +import org.h2.value.ExtTypeInfoRow; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueDouble; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueToObjectConverter2; +import org.h2.value.ValueVarchar; + +/** + * This class is responsible to build the INFORMATION_SCHEMA tables. + */ +public final class InformationSchemaTable extends MetaTable { + + private static final String CHARACTER_SET_NAME = "Unicode"; + + // Standard table + + private static final int INFORMATION_SCHEMA_CATALOG_NAME = 0; + + // Standard views + + private static final int CHECK_CONSTRAINTS = INFORMATION_SCHEMA_CATALOG_NAME + 1; + + private static final int COLLATIONS = CHECK_CONSTRAINTS + 1; + + private static final int COLUMNS = COLLATIONS + 1; + + private static final int COLUMN_PRIVILEGES = COLUMNS + 1; + + private static final int CONSTRAINT_COLUMN_USAGE = COLUMN_PRIVILEGES + 1; + + private static final int DOMAINS = CONSTRAINT_COLUMN_USAGE + 1; + + private static final int DOMAIN_CONSTRAINTS = DOMAINS + 1; + + private static final int ELEMENT_TYPES = DOMAIN_CONSTRAINTS + 1; + + private static final int FIELDS = ELEMENT_TYPES + 1; + + private static final int KEY_COLUMN_USAGE = FIELDS + 1; + + private static final int PARAMETERS = KEY_COLUMN_USAGE + 1; + + private static final int REFERENTIAL_CONSTRAINTS = PARAMETERS + 1; + + private static final int ROUTINES = REFERENTIAL_CONSTRAINTS + 1; + + private static final int SCHEMATA = ROUTINES + 1; + + private static final int SEQUENCES = SCHEMATA + 1; + + private static final int TABLES = SEQUENCES + 1; + + private static final int TABLE_CONSTRAINTS = TABLES + 1; + + private static final int TABLE_PRIVILEGES = TABLE_CONSTRAINTS + 1; + + private static final int TRIGGERS = TABLE_PRIVILEGES + 1; + + private static final int VIEWS = TRIGGERS + 1; + + // Extensions + + private static final int CONSTANTS = VIEWS + 1; + + private static final int ENUM_VALUES = CONSTANTS + 1; + + private static final int INDEXES = ENUM_VALUES + 1; + + private static final int INDEX_COLUMNS = INDEXES + 1; + + private static final int IN_DOUBT = INDEX_COLUMNS + 1; + + private static final int LOCKS = IN_DOUBT + 1; + + private static final int QUERY_STATISTICS = LOCKS + 1; + + private static final int RIGHTS = QUERY_STATISTICS + 1; + + private static final int ROLES = RIGHTS + 1; + + private static final int SESSIONS = ROLES + 1; + + private static final int SESSION_STATE = SESSIONS + 1; + + private static final int SETTINGS = SESSION_STATE + 1; + + private static final int SYNONYMS = SETTINGS + 1; + + private static final int USERS = SYNONYMS + 1; + + /** + * The number of meta table types. Supported meta table types are + * {@code 0..META_TABLE_TYPE_COUNT - 1}. + */ + public static final int META_TABLE_TYPE_COUNT = USERS + 1; + + private final boolean isView; + + /** + * Create a new metadata table. + * + * @param schema the schema + * @param id the object id + * @param type the meta table type + */ + public InformationSchemaTable(Schema schema, int id, int type) { + super(schema, id, type); + Column[] cols; + String indexColumnName = null; + boolean isView = true; + switch (type) { + // Standard table + case INFORMATION_SCHEMA_CATALOG_NAME: + setMetaTableName("INFORMATION_SCHEMA_CATALOG_NAME"); + isView = false; + cols = new Column[] { + column("CATALOG_NAME"), // + }; + break; + // Standard views + case CHECK_CONSTRAINTS: + setMetaTableName("CHECK_CONSTRAINTS"); + cols = new Column[] { + column("CONSTRAINT_CATALOG"), // + column("CONSTRAINT_SCHEMA"), // + column("CONSTRAINT_NAME"), // + column("CHECK_CLAUSE"), // + }; + indexColumnName = "CONSTRAINT_NAME"; + break; + case COLLATIONS: + setMetaTableName("COLLATIONS"); + cols = new Column[] { + column("COLLATION_CATALOG"), // + column("COLLATION_SCHEMA"), // + column("COLLATION_NAME"), // + column("PAD_ATTRIBUTE"), // + // extensions + column("LANGUAGE_TAG"), // + }; + break; + case COLUMNS: + setMetaTableName("COLUMNS"); + cols = new Column[] { + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("COLUMN_NAME"), // + column("ORDINAL_POSITION", TypeInfo.TYPE_INTEGER), // + column("COLUMN_DEFAULT"), // + column("IS_NULLABLE"), // + column("DATA_TYPE"), // + column("CHARACTER_MAXIMUM_LENGTH", TypeInfo.TYPE_BIGINT), // + column("CHARACTER_OCTET_LENGTH", TypeInfo.TYPE_BIGINT), // + column("NUMERIC_PRECISION", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_PRECISION_RADIX", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_SCALE", TypeInfo.TYPE_INTEGER), // + column("DATETIME_PRECISION", TypeInfo.TYPE_INTEGER), // + column("INTERVAL_TYPE"), // + column("INTERVAL_PRECISION", TypeInfo.TYPE_INTEGER), // + column("CHARACTER_SET_CATALOG"), // + column("CHARACTER_SET_SCHEMA"), // + column("CHARACTER_SET_NAME"), // + column("COLLATION_CATALOG"), // + column("COLLATION_SCHEMA"), // + column("COLLATION_NAME"), // + column("DOMAIN_CATALOG"), // + column("DOMAIN_SCHEMA"), // + column("DOMAIN_NAME"), // + column("MAXIMUM_CARDINALITY", TypeInfo.TYPE_INTEGER), // + column("DTD_IDENTIFIER"), // + column("IS_IDENTITY"), // + column("IDENTITY_GENERATION"), // + column("IDENTITY_START", TypeInfo.TYPE_BIGINT), // + column("IDENTITY_INCREMENT", TypeInfo.TYPE_BIGINT), // + column("IDENTITY_MAXIMUM", TypeInfo.TYPE_BIGINT), // + column("IDENTITY_MINIMUM", TypeInfo.TYPE_BIGINT), // + column("IDENTITY_CYCLE"), // + column("IS_GENERATED"), // + column("GENERATION_EXPRESSION"), // + column("DECLARED_DATA_TYPE"), // + column("DECLARED_NUMERIC_PRECISION", TypeInfo.TYPE_INTEGER), // + column("DECLARED_NUMERIC_SCALE", TypeInfo.TYPE_INTEGER), // + // extensions + column("GEOMETRY_TYPE"), // + column("GEOMETRY_SRID", TypeInfo.TYPE_INTEGER), // + column("IDENTITY_BASE", TypeInfo.TYPE_BIGINT), // + column("IDENTITY_CACHE", TypeInfo.TYPE_BIGINT), // + column("COLUMN_ON_UPDATE"), // + column("IS_VISIBLE", TypeInfo.TYPE_BOOLEAN), // + column("DEFAULT_ON_NULL", TypeInfo.TYPE_BOOLEAN), // + column("SELECTIVITY", TypeInfo.TYPE_INTEGER), // + column("REMARKS"), // + }; + indexColumnName = "TABLE_NAME"; + break; + case COLUMN_PRIVILEGES: + setMetaTableName("COLUMN_PRIVILEGES"); + cols = new Column[] { + column("GRANTOR"), // + column("GRANTEE"), // + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("COLUMN_NAME"), // + column("PRIVILEGE_TYPE"), // + column("IS_GRANTABLE"), // + }; + indexColumnName = "TABLE_NAME"; + break; + case CONSTRAINT_COLUMN_USAGE: + setMetaTableName("CONSTRAINT_COLUMN_USAGE"); + cols = new Column[] { + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("COLUMN_NAME"), // + column("CONSTRAINT_CATALOG"), // + column("CONSTRAINT_SCHEMA"), // + column("CONSTRAINT_NAME"), // + }; + indexColumnName = "TABLE_NAME"; + break; + case DOMAINS: + setMetaTableName("DOMAINS"); + cols = new Column[] { + column("DOMAIN_CATALOG"), // + column("DOMAIN_SCHEMA"), // + column("DOMAIN_NAME"), // + column("DATA_TYPE"), // + column("CHARACTER_MAXIMUM_LENGTH", TypeInfo.TYPE_BIGINT), // + column("CHARACTER_OCTET_LENGTH", TypeInfo.TYPE_BIGINT), // + column("CHARACTER_SET_CATALOG"), // + column("CHARACTER_SET_SCHEMA"), // + column("CHARACTER_SET_NAME"), // + column("COLLATION_CATALOG"), // + column("COLLATION_SCHEMA"), // + column("COLLATION_NAME"), // + column("NUMERIC_PRECISION", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_PRECISION_RADIX", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_SCALE", TypeInfo.TYPE_INTEGER), // + column("DATETIME_PRECISION", TypeInfo.TYPE_INTEGER), // + column("INTERVAL_TYPE"), // + column("INTERVAL_PRECISION", TypeInfo.TYPE_INTEGER), // + column("DOMAIN_DEFAULT"), // + column("MAXIMUM_CARDINALITY", TypeInfo.TYPE_INTEGER), // + column("DTD_IDENTIFIER"), // + column("DECLARED_DATA_TYPE"), // + column("DECLARED_NUMERIC_PRECISION", TypeInfo.TYPE_INTEGER), // + column("DECLARED_NUMERIC_SCALE", TypeInfo.TYPE_INTEGER), // + // extensions + column("GEOMETRY_TYPE"), // + column("GEOMETRY_SRID", TypeInfo.TYPE_INTEGER), // + column("DOMAIN_ON_UPDATE"), // + column("PARENT_DOMAIN_CATALOG"), // + column("PARENT_DOMAIN_SCHEMA"), // + column("PARENT_DOMAIN_NAME"), // + column("REMARKS"), // + }; + indexColumnName = "DOMAIN_NAME"; + break; + case DOMAIN_CONSTRAINTS: + setMetaTableName("DOMAIN_CONSTRAINTS"); + cols = new Column[] { + column("CONSTRAINT_CATALOG"), // + column("CONSTRAINT_SCHEMA"), // + column("CONSTRAINT_NAME"), // + column("DOMAIN_CATALOG"), // + column("DOMAIN_SCHEMA"), // + column("DOMAIN_NAME"), // + column("IS_DEFERRABLE"), // + column("INITIALLY_DEFERRED"), // + // extensions + column("REMARKS"), // + }; + indexColumnName = "DOMAIN_NAME"; + break; + case ELEMENT_TYPES: + setMetaTableName("ELEMENT_TYPES"); + cols = new Column[] { + column("OBJECT_CATALOG"), // + column("OBJECT_SCHEMA"), // + column("OBJECT_NAME"), // + column("OBJECT_TYPE"), // + column("COLLECTION_TYPE_IDENTIFIER"), // + column("DATA_TYPE"), // + column("CHARACTER_MAXIMUM_LENGTH", TypeInfo.TYPE_BIGINT), // + column("CHARACTER_OCTET_LENGTH", TypeInfo.TYPE_BIGINT), // + column("CHARACTER_SET_CATALOG"), // + column("CHARACTER_SET_SCHEMA"), // + column("CHARACTER_SET_NAME"), // + column("COLLATION_CATALOG"), // + column("COLLATION_SCHEMA"), // + column("COLLATION_NAME"), // + column("NUMERIC_PRECISION", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_PRECISION_RADIX", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_SCALE", TypeInfo.TYPE_INTEGER), // + column("DATETIME_PRECISION", TypeInfo.TYPE_INTEGER), // + column("INTERVAL_TYPE"), // + column("INTERVAL_PRECISION", TypeInfo.TYPE_INTEGER), // + column("MAXIMUM_CARDINALITY", TypeInfo.TYPE_INTEGER), // + column("DTD_IDENTIFIER"), // + column("DECLARED_DATA_TYPE"), // + column("DECLARED_NUMERIC_PRECISION", TypeInfo.TYPE_INTEGER), // + column("DECLARED_NUMERIC_SCALE", TypeInfo.TYPE_INTEGER), // + // extensions + column("GEOMETRY_TYPE"), // + column("GEOMETRY_SRID", TypeInfo.TYPE_INTEGER), // + }; + break; + case FIELDS: + setMetaTableName("FIELDS"); + cols = new Column[] { + column("OBJECT_CATALOG"), // + column("OBJECT_SCHEMA"), // + column("OBJECT_NAME"), // + column("OBJECT_TYPE"), // + column("ROW_IDENTIFIER"), // + column("FIELD_NAME"), // + column("ORDINAL_POSITION", TypeInfo.TYPE_INTEGER), // + column("DATA_TYPE"), // + column("CHARACTER_MAXIMUM_LENGTH", TypeInfo.TYPE_BIGINT), // + column("CHARACTER_OCTET_LENGTH", TypeInfo.TYPE_BIGINT), // + column("CHARACTER_SET_CATALOG"), // + column("CHARACTER_SET_SCHEMA"), // + column("CHARACTER_SET_NAME"), // + column("COLLATION_CATALOG"), // + column("COLLATION_SCHEMA"), // + column("COLLATION_NAME"), // + column("NUMERIC_PRECISION", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_PRECISION_RADIX", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_SCALE", TypeInfo.TYPE_INTEGER), // + column("DATETIME_PRECISION", TypeInfo.TYPE_INTEGER), // + column("INTERVAL_TYPE"), // + column("INTERVAL_PRECISION", TypeInfo.TYPE_INTEGER), // + column("MAXIMUM_CARDINALITY", TypeInfo.TYPE_INTEGER), // + column("DTD_IDENTIFIER"), // + column("DECLARED_DATA_TYPE"), // + column("DECLARED_NUMERIC_PRECISION", TypeInfo.TYPE_INTEGER), // + column("DECLARED_NUMERIC_SCALE", TypeInfo.TYPE_INTEGER), // + // extensions + column("GEOMETRY_TYPE"), // + column("GEOMETRY_SRID", TypeInfo.TYPE_INTEGER), // + }; + break; + case KEY_COLUMN_USAGE: + setMetaTableName("KEY_COLUMN_USAGE"); + cols = new Column[] { + column("CONSTRAINT_CATALOG"), // + column("CONSTRAINT_SCHEMA"), // + column("CONSTRAINT_NAME"), // + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("COLUMN_NAME"), // + column("ORDINAL_POSITION", TypeInfo.TYPE_INTEGER), // + column("POSITION_IN_UNIQUE_CONSTRAINT", TypeInfo.TYPE_INTEGER), // + }; + indexColumnName = "TABLE_NAME"; + break; + case PARAMETERS: + setMetaTableName("PARAMETERS"); + cols = new Column[] { + column("SPECIFIC_CATALOG"), // + column("SPECIFIC_SCHEMA"), // + column("SPECIFIC_NAME"), // + column("ORDINAL_POSITION", TypeInfo.TYPE_INTEGER), // + column("PARAMETER_MODE"), // + column("IS_RESULT"), // + column("AS_LOCATOR"), // + column("PARAMETER_NAME"), // + column("DATA_TYPE"), // + column("CHARACTER_MAXIMUM_LENGTH", TypeInfo.TYPE_BIGINT), // + column("CHARACTER_OCTET_LENGTH", TypeInfo.TYPE_BIGINT), // + column("CHARACTER_SET_CATALOG"), // + column("CHARACTER_SET_SCHEMA"), // + column("CHARACTER_SET_NAME"), // + column("COLLATION_CATALOG"), // + column("COLLATION_SCHEMA"), // + column("COLLATION_NAME"), // + column("NUMERIC_PRECISION", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_PRECISION_RADIX", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_SCALE", TypeInfo.TYPE_INTEGER), // + column("DATETIME_PRECISION", TypeInfo.TYPE_INTEGER), // + column("INTERVAL_TYPE"), // + column("INTERVAL_PRECISION", TypeInfo.TYPE_INTEGER), // + column("MAXIMUM_CARDINALITY", TypeInfo.TYPE_INTEGER), // + column("DTD_IDENTIFIER"), // + column("DECLARED_DATA_TYPE"), // + column("DECLARED_NUMERIC_PRECISION", TypeInfo.TYPE_INTEGER), // + column("DECLARED_NUMERIC_SCALE", TypeInfo.TYPE_INTEGER), // + column("PARAMETER_DEFAULT"), // + // extensions + column("GEOMETRY_TYPE"), // + column("GEOMETRY_SRID", TypeInfo.TYPE_INTEGER), // + }; + break; + case REFERENTIAL_CONSTRAINTS: + setMetaTableName("REFERENTIAL_CONSTRAINTS"); + cols = new Column[] { + column("CONSTRAINT_CATALOG"), // + column("CONSTRAINT_SCHEMA"), // + column("CONSTRAINT_NAME"), // + column("UNIQUE_CONSTRAINT_CATALOG"), // + column("UNIQUE_CONSTRAINT_SCHEMA"), // + column("UNIQUE_CONSTRAINT_NAME"), // + column("MATCH_OPTION"), // + column("UPDATE_RULE"), // + column("DELETE_RULE"), // + }; + indexColumnName = "CONSTRAINT_NAME"; + break; + case ROUTINES: + setMetaTableName("ROUTINES"); + cols = new Column[] { + column("SPECIFIC_CATALOG"), // + column("SPECIFIC_SCHEMA"), // + column("SPECIFIC_NAME"), // + column("ROUTINE_CATALOG"), // + column("ROUTINE_SCHEMA"), // + column("ROUTINE_NAME"), // + column("ROUTINE_TYPE"), // + column("DATA_TYPE"), // + column("CHARACTER_MAXIMUM_LENGTH", TypeInfo.TYPE_BIGINT), // + column("CHARACTER_OCTET_LENGTH", TypeInfo.TYPE_BIGINT), // + column("CHARACTER_SET_CATALOG"), // + column("CHARACTER_SET_SCHEMA"), // + column("CHARACTER_SET_NAME"), // + column("COLLATION_CATALOG"), // + column("COLLATION_SCHEMA"), // + column("COLLATION_NAME"), // + column("NUMERIC_PRECISION", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_PRECISION_RADIX", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_SCALE", TypeInfo.TYPE_INTEGER), // + column("DATETIME_PRECISION", TypeInfo.TYPE_INTEGER), // + column("INTERVAL_TYPE"), // + column("INTERVAL_PRECISION", TypeInfo.TYPE_INTEGER), // + column("MAXIMUM_CARDINALITY", TypeInfo.TYPE_INTEGER), // + column("DTD_IDENTIFIER"), // + column("ROUTINE_BODY"), // + column("ROUTINE_DEFINITION"), // + column("EXTERNAL_NAME"), // + column("EXTERNAL_LANGUAGE"), // + column("PARAMETER_STYLE"), // + column("IS_DETERMINISTIC"), // + column("DECLARED_DATA_TYPE"), // + column("DECLARED_NUMERIC_PRECISION", TypeInfo.TYPE_INTEGER), // + column("DECLARED_NUMERIC_SCALE", TypeInfo.TYPE_INTEGER), // + // extensions + column("GEOMETRY_TYPE"), // + column("GEOMETRY_SRID", TypeInfo.TYPE_INTEGER), // + column("REMARKS"), // + }; + break; + case SCHEMATA: + setMetaTableName("SCHEMATA"); + cols = new Column[] { + column("CATALOG_NAME"), // + column("SCHEMA_NAME"), // + column("SCHEMA_OWNER"), // + column("DEFAULT_CHARACTER_SET_CATALOG"), // + column("DEFAULT_CHARACTER_SET_SCHEMA"), // + column("DEFAULT_CHARACTER_SET_NAME"), // + column("SQL_PATH"), // + // extensions + column("DEFAULT_COLLATION_NAME"), // // MySQL + column("REMARKS"), // + }; + break; + case SEQUENCES: + setMetaTableName("SEQUENCES"); + cols = new Column[] { + column("SEQUENCE_CATALOG"), // + column("SEQUENCE_SCHEMA"), // + column("SEQUENCE_NAME"), // + column("DATA_TYPE"), // + column("NUMERIC_PRECISION", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_PRECISION_RADIX", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_SCALE", TypeInfo.TYPE_INTEGER), // + column("START_VALUE", TypeInfo.TYPE_BIGINT), // + column("MINIMUM_VALUE", TypeInfo.TYPE_BIGINT), // + column("MAXIMUM_VALUE", TypeInfo.TYPE_BIGINT), // + column("INCREMENT", TypeInfo.TYPE_BIGINT), // + column("CYCLE_OPTION"), // + column("DECLARED_DATA_TYPE"), // + column("DECLARED_NUMERIC_PRECISION", TypeInfo.TYPE_INTEGER), // + column("DECLARED_NUMERIC_SCALE", TypeInfo.TYPE_INTEGER), // + // extensions + column("BASE_VALUE", TypeInfo.TYPE_BIGINT), // + column("CACHE", TypeInfo.TYPE_BIGINT), // + column("REMARKS"), // + }; + indexColumnName = "SEQUENCE_NAME"; + break; + case TABLES: + setMetaTableName("TABLES"); + cols = new Column[] { + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("TABLE_TYPE"), // + column("IS_INSERTABLE_INTO"), // + column("COMMIT_ACTION"), // + // extensions + column("STORAGE_TYPE"), // + column("REMARKS"), // + column("LAST_MODIFICATION", TypeInfo.TYPE_BIGINT), // + column("TABLE_CLASS"), // + column("ROW_COUNT_ESTIMATE", TypeInfo.TYPE_BIGINT), // + }; + indexColumnName = "TABLE_NAME"; + break; + case TABLE_CONSTRAINTS: + setMetaTableName("TABLE_CONSTRAINTS"); + cols = new Column[] { + column("CONSTRAINT_CATALOG"), // + column("CONSTRAINT_SCHEMA"), // + column("CONSTRAINT_NAME"), // + column("CONSTRAINT_TYPE"), // + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("IS_DEFERRABLE"), // + column("INITIALLY_DEFERRED"), // + column("ENFORCED"), // + // extensions + column("INDEX_CATALOG"), // + column("INDEX_SCHEMA"), // + column("INDEX_NAME"), // + column("REMARKS"), // + }; + indexColumnName = "TABLE_NAME"; + break; + case TABLE_PRIVILEGES: + setMetaTableName("TABLE_PRIVILEGES"); + cols = new Column[] { + column("GRANTOR"), // + column("GRANTEE"), // + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("PRIVILEGE_TYPE"), // + column("IS_GRANTABLE"), // + column("WITH_HIERARCHY"), // + }; + indexColumnName = "TABLE_NAME"; + break; + case TRIGGERS: + setMetaTableName("TRIGGERS"); + cols = new Column[] { + column("TRIGGER_CATALOG"), // + column("TRIGGER_SCHEMA"), // + column("TRIGGER_NAME"), // + column("EVENT_MANIPULATION"), // + column("EVENT_OBJECT_CATALOG"), // + column("EVENT_OBJECT_SCHEMA"), // + column("EVENT_OBJECT_TABLE"), // + column("ACTION_ORIENTATION"), // + column("ACTION_TIMING"), // + // extensions + column("IS_ROLLBACK", TypeInfo.TYPE_BOOLEAN), // + column("JAVA_CLASS"), // + column("QUEUE_SIZE", TypeInfo.TYPE_INTEGER), // + column("NO_WAIT", TypeInfo.TYPE_BOOLEAN), // + column("REMARKS"), // + }; + indexColumnName = "EVENT_OBJECT_TABLE"; + break; + case VIEWS: + setMetaTableName("VIEWS"); + cols = new Column[] { + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("VIEW_DEFINITION"), // + column("CHECK_OPTION"), // + column("IS_UPDATABLE"), // + column("INSERTABLE_INTO"), // + column("IS_TRIGGER_UPDATABLE"), // + column("IS_TRIGGER_DELETABLE"), // + column("IS_TRIGGER_INSERTABLE_INTO"), // + // extensions + column("STATUS"), // + column("REMARKS"), // + }; + indexColumnName = "TABLE_NAME"; + break; + // Extensions + case CONSTANTS: + setMetaTableName("CONSTANTS"); + isView = false; + cols = new Column[] { + column("CONSTANT_CATALOG"), // + column("CONSTANT_SCHEMA"), // + column("CONSTANT_NAME"), // + column("VALUE_DEFINITION"), // + column("DATA_TYPE"), // + column("CHARACTER_MAXIMUM_LENGTH", TypeInfo.TYPE_BIGINT), // + column("CHARACTER_OCTET_LENGTH", TypeInfo.TYPE_BIGINT), // + column("CHARACTER_SET_CATALOG"), // + column("CHARACTER_SET_SCHEMA"), // + column("CHARACTER_SET_NAME"), // + column("COLLATION_CATALOG"), // + column("COLLATION_SCHEMA"), // + column("COLLATION_NAME"), // + column("NUMERIC_PRECISION", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_PRECISION_RADIX", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_SCALE", TypeInfo.TYPE_INTEGER), // + column("DATETIME_PRECISION", TypeInfo.TYPE_INTEGER), // + column("INTERVAL_TYPE"), // + column("INTERVAL_PRECISION", TypeInfo.TYPE_INTEGER), // + column("MAXIMUM_CARDINALITY", TypeInfo.TYPE_INTEGER), // + column("DTD_IDENTIFIER"), // + column("DECLARED_DATA_TYPE"), // + column("DECLARED_NUMERIC_PRECISION", TypeInfo.TYPE_INTEGER), // + column("DECLARED_NUMERIC_SCALE", TypeInfo.TYPE_INTEGER), // + column("GEOMETRY_TYPE"), // + column("GEOMETRY_SRID", TypeInfo.TYPE_INTEGER), // + column("REMARKS"), // + }; + indexColumnName = "CONSTANT_NAME"; + break; + case ENUM_VALUES: + setMetaTableName("ENUM_VALUES"); + isView = false; + cols = new Column[] { + column("OBJECT_CATALOG"), // + column("OBJECT_SCHEMA"), // + column("OBJECT_NAME"), // + column("OBJECT_TYPE"), // + column("ENUM_IDENTIFIER"), // + column("VALUE_NAME"), // + column("VALUE_ORDINAL"), // + }; + break; + case INDEXES: + setMetaTableName("INDEXES"); + isView = false; + cols = new Column[] { + column("INDEX_CATALOG"), // + column("INDEX_SCHEMA"), // + column("INDEX_NAME"), // + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("INDEX_TYPE_NAME"), // + column("IS_GENERATED", TypeInfo.TYPE_BOOLEAN), // + column("REMARKS"), // + column("INDEX_CLASS"), // + }; + indexColumnName = "TABLE_NAME"; + break; + case INDEX_COLUMNS: + setMetaTableName("INDEX_COLUMNS"); + isView = false; + cols = new Column[] { + column("INDEX_CATALOG"), // + column("INDEX_SCHEMA"), // + column("INDEX_NAME"), // + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("COLUMN_NAME"), // + column("ORDINAL_POSITION", TypeInfo.TYPE_INTEGER), // + column("ORDERING_SPECIFICATION"), // + column("NULL_ORDERING"), // + column("IS_UNIQUE", TypeInfo.TYPE_BOOLEAN), // + }; + indexColumnName = "TABLE_NAME"; + break; + case IN_DOUBT: + setMetaTableName("IN_DOUBT"); + isView = false; + cols = new Column[] { + column("TRANSACTION_NAME"), // + column("TRANSACTION_STATE"), // + }; + break; + case LOCKS: + setMetaTableName("LOCKS"); + isView = false; + cols = new Column[] { + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("SESSION_ID", TypeInfo.TYPE_INTEGER), // + column("LOCK_TYPE"), // + }; + break; + case QUERY_STATISTICS: + setMetaTableName("QUERY_STATISTICS"); + isView = false; + cols = new Column[] { + column("SQL_STATEMENT"), // + column("EXECUTION_COUNT", TypeInfo.TYPE_INTEGER), // + column("MIN_EXECUTION_TIME", TypeInfo.TYPE_DOUBLE), // + column("MAX_EXECUTION_TIME", TypeInfo.TYPE_DOUBLE), // + column("CUMULATIVE_EXECUTION_TIME", TypeInfo.TYPE_DOUBLE), // + column("AVERAGE_EXECUTION_TIME", TypeInfo.TYPE_DOUBLE), // + column("STD_DEV_EXECUTION_TIME", TypeInfo.TYPE_DOUBLE), // + column("MIN_ROW_COUNT", TypeInfo.TYPE_BIGINT), // + column("MAX_ROW_COUNT", TypeInfo.TYPE_BIGINT), // + column("CUMULATIVE_ROW_COUNT", TypeInfo.TYPE_BIGINT), // + column("AVERAGE_ROW_COUNT", TypeInfo.TYPE_DOUBLE), // + column("STD_DEV_ROW_COUNT", TypeInfo.TYPE_DOUBLE), // + }; + break; + case RIGHTS: + setMetaTableName("RIGHTS"); + isView = false; + cols = new Column[] { + column("GRANTEE"), // + column("GRANTEETYPE"), // + column("GRANTEDROLE"), // + column("RIGHTS"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + }; + indexColumnName = "TABLE_NAME"; + break; + case ROLES: + setMetaTableName("ROLES"); + isView = false; + cols = new Column[] { + column("ROLE_NAME"), // + column("REMARKS"), // + }; + break; + case SESSIONS: + setMetaTableName("SESSIONS"); + isView = false; + cols = new Column[] { + column("SESSION_ID", TypeInfo.TYPE_INTEGER), // + column("USER_NAME"), // + column("SERVER"), // + column("CLIENT_ADDR"), // + column("CLIENT_INFO"), // + column("SESSION_START", TypeInfo.TYPE_TIMESTAMP_TZ), // + column("ISOLATION_LEVEL"), // + column("EXECUTING_STATEMENT"), // + column("EXECUTING_STATEMENT_START", TypeInfo.TYPE_TIMESTAMP_TZ), // + column("CONTAINS_UNCOMMITTED", TypeInfo.TYPE_BOOLEAN), // + column("SESSION_STATE"), // + column("BLOCKER_ID", TypeInfo.TYPE_INTEGER), // + column("SLEEP_SINCE", TypeInfo.TYPE_TIMESTAMP_TZ), // + }; + break; + case SESSION_STATE: + setMetaTableName("SESSION_STATE"); + isView = false; + cols = new Column[] { + column("STATE_KEY"), // + column("STATE_COMMAND"), // + }; + break; + case SETTINGS: + setMetaTableName("SETTINGS"); + isView = false; + cols = new Column[] { + column("SETTING_NAME"), // + column("SETTING_VALUE"), // + }; + break; + case SYNONYMS: + setMetaTableName("SYNONYMS"); + isView = false; + cols = new Column[] { + column("SYNONYM_CATALOG"), // + column("SYNONYM_SCHEMA"), // + column("SYNONYM_NAME"), // + column("SYNONYM_FOR"), // + column("SYNONYM_FOR_SCHEMA"), // + column("TYPE_NAME"), // + column("STATUS"), // + column("REMARKS"), // + }; + indexColumnName = "SYNONYM_NAME"; + break; + case USERS: + setMetaTableName("USERS"); + isView = false; + cols = new Column[] { + column("USER_NAME"), // + column("IS_ADMIN", TypeInfo.TYPE_BOOLEAN), + column("REMARKS"), // + }; + break; + default: + throw DbException.getInternalError("type=" + type); + } + setColumns(cols); + + if (indexColumnName == null) { + indexColumn = -1; + metaIndex = null; + } else { + indexColumn = getColumn(database.sysIdentifier(indexColumnName)).getColumnId(); + IndexColumn[] indexCols = IndexColumn.wrap(new Column[] { cols[indexColumn] }); + metaIndex = new MetaIndex(this, indexCols, false); + } + this.isView = isView; + } + + @Override + public ArrayList generateRows(SessionLocal session, SearchRow first, SearchRow last) { + Value indexFrom = null, indexTo = null; + if (indexColumn >= 0) { + if (first != null) { + indexFrom = first.getValue(indexColumn); + } + if (last != null) { + indexTo = last.getValue(indexColumn); + } + } + ArrayList rows = Utils.newSmallArrayList(); + String catalog = database.getShortName(); + switch (type) { + // Standard table + case INFORMATION_SCHEMA_CATALOG_NAME: + informationSchemaCatalogName(session, rows, catalog); + break; + // Standard views + case CHECK_CONSTRAINTS: + checkConstraints(session, indexFrom, indexTo, rows, catalog); + break; + case COLLATIONS: + collations(session, rows, catalog); + break; + case COLUMNS: + columns(session, indexFrom, indexTo, rows, catalog); + break; + case COLUMN_PRIVILEGES: + columnPrivileges(session, indexFrom, indexTo, rows, catalog); + break; + case CONSTRAINT_COLUMN_USAGE: + constraintColumnUsage(session, indexFrom, indexTo, rows, catalog); + break; + case DOMAINS: + domains(session, indexFrom, indexTo, rows, catalog); + break; + case DOMAIN_CONSTRAINTS: + domainConstraints(session, indexFrom, indexTo, rows, catalog); + break; + case ELEMENT_TYPES: + elementTypesFields(session, rows, catalog, ELEMENT_TYPES); + break; + case FIELDS: + elementTypesFields(session, rows, catalog, FIELDS); + break; + case KEY_COLUMN_USAGE: + keyColumnUsage(session, indexFrom, indexTo, rows, catalog); + break; + case PARAMETERS: + parameters(session, rows, catalog); + break; + case REFERENTIAL_CONSTRAINTS: + referentialConstraints(session, indexFrom, indexTo, rows, catalog); + break; + case ROUTINES: + routines(session, rows, catalog); + break; + case SCHEMATA: + schemata(session, rows, catalog); + break; + case SEQUENCES: + sequences(session, indexFrom, indexTo, rows, catalog); + break; + case TABLES: + tables(session, indexFrom, indexTo, rows, catalog); + break; + case TABLE_CONSTRAINTS: + tableConstraints(session, indexFrom, indexTo, rows, catalog); + break; + case TABLE_PRIVILEGES: + tablePrivileges(session, indexFrom, indexTo, rows, catalog); + break; + case TRIGGERS: + triggers(session, indexFrom, indexTo, rows, catalog); + break; + case VIEWS: + views(session, indexFrom, indexTo, rows, catalog); + break; + // Extensions + case CONSTANTS: + constants(session, indexFrom, indexTo, rows, catalog); + break; + case ENUM_VALUES: + elementTypesFields(session, rows, catalog, ENUM_VALUES); + break; + case INDEXES: + indexes(session, indexFrom, indexTo, rows, catalog, false); + break; + case INDEX_COLUMNS: + indexes(session, indexFrom, indexTo, rows, catalog, true); + break; + case IN_DOUBT: + inDoubt(session, rows); + break; + case LOCKS: + locks(session, rows); + break; + case QUERY_STATISTICS: + queryStatistics(session, rows); + break; + case RIGHTS: + rights(session, indexFrom, indexTo, rows); + break; + case ROLES: + roles(session, rows); + break; + case SESSIONS: + sessions(session, rows); + break; + case SESSION_STATE: + sessionState(session, rows); + break; + case SETTINGS: + settings(session, rows); + break; + case SYNONYMS: + synonyms(session, rows, catalog); + break; + case USERS: + users(session, rows); + break; + default: + throw DbException.getInternalError("type=" + type); + } + return rows; + } + + private void informationSchemaCatalogName(SessionLocal session, ArrayList rows, String catalog) { + add(session, rows, + // CATALOG_NAME + catalog); + } + + private void checkConstraints(SessionLocal session, Value indexFrom, Value indexTo, ArrayList rows, + String catalog) { + for (Schema schema : database.getAllSchemas()) { + for (Constraint constraint : schema.getAllConstraints()) { + Type constraintType = constraint.getConstraintType(); + if (constraintType == Constraint.Type.CHECK) { + ConstraintCheck check = (ConstraintCheck) constraint; + Table table = check.getTable(); + if (hideTable(table, session)) { + continue; + } + } else if (constraintType != Constraint.Type.DOMAIN) { + continue; + } + String constraintName = constraint.getName(); + if (!checkIndex(session, constraintName, indexFrom, indexTo)) { + continue; + } + checkConstraints(session, rows, catalog, constraint, constraintName); + } + } + } + + private void checkConstraints(SessionLocal session, ArrayList rows, String catalog, Constraint constraint, + String constraintName) { + add(session, rows, + // CONSTRAINT_CATALOG + catalog, + // CONSTRAINT_SCHEMA + constraint.getSchema().getName(), + // CONSTRAINT_NAME + constraintName, + // CHECK_CLAUSE + constraint.getExpression().getSQL(DEFAULT_SQL_FLAGS, Expression.WITHOUT_PARENTHESES) + ); + } + + private void collations(SessionLocal session, ArrayList rows, String catalog) { + String mainSchemaName = database.getMainSchema().getName(); + collations(session, rows, catalog, mainSchemaName, "OFF", null); + for (Locale l : CompareMode.getCollationLocales(false)) { + collations(session, rows, catalog, mainSchemaName, CompareMode.getName(l), l.toLanguageTag()); + } + } + + private void collations(SessionLocal session, ArrayList rows, String catalog, String mainSchemaName, + String name, String languageTag) { + if ("und".equals(languageTag)) { + languageTag = null; + } + add(session, rows, + // COLLATION_CATALOG + catalog, + // COLLATION_SCHEMA + mainSchemaName, + // COLLATION_NAME + name, + // PAD_ATTRIBUTE + "NO PAD", + // extensions + // LANGUAGE_TAG + languageTag + ); + } + + private void columns(SessionLocal session, Value indexFrom, Value indexTo, ArrayList rows, String catalog) { + String mainSchemaName = database.getMainSchema().getName(); + String collation = database.getCompareMode().getName(); + if (indexFrom != null && indexFrom.equals(indexTo)) { + String tableName = indexFrom.getString(); + if (tableName == null) { + return; + } + for (Schema schema : database.getAllSchemas()) { + Table table = schema.getTableOrViewByName(session, tableName); + if (table != null) { + columns(session, rows, catalog, mainSchemaName, collation, table, table.getName()); + } + } + Table table = session.findLocalTempTable(tableName); + if (table != null) { + columns(session, rows, catalog, mainSchemaName, collation, table, table.getName()); + } + } else { + for (Schema schema : database.getAllSchemas()) { + for (Table table : schema.getAllTablesAndViews(session)) { + String tableName = table.getName(); + if (checkIndex(session, tableName, indexFrom, indexTo)) { + columns(session, rows, catalog, mainSchemaName, collation, table, tableName); + } + } + } + for (Table table : session.getLocalTempTables()) { + String tableName = table.getName(); + if (checkIndex(session, tableName, indexFrom, indexTo)) { + columns(session, rows, catalog, mainSchemaName, collation, table, tableName); + } + } + } + } + + private void columns(SessionLocal session, ArrayList rows, String catalog, String mainSchemaName, + String collation, Table table, String tableName) { + if (hideTable(table, session)) { + return; + } + Column[] cols = table.getColumns(); + for (int i = 0, l = cols.length; i < l;) { + columns(session, rows, catalog, mainSchemaName, collation, table, tableName, cols[i], ++i); + } + } + + private void columns(SessionLocal session, ArrayList rows, String catalog, String mainSchemaName, + String collation, Table table, String tableName, Column c, int ordinalPosition) { + TypeInfo typeInfo = c.getType(); + DataTypeInformation dt = DataTypeInformation.valueOf(typeInfo); + String characterSetCatalog, characterSetSchema, characterSetName, collationName; + if (dt.hasCharsetAndCollation) { + characterSetCatalog = catalog; + characterSetSchema = mainSchemaName; + characterSetName = CHARACTER_SET_NAME; + collationName = collation; + } else { + characterSetCatalog = characterSetSchema = characterSetName = collationName = null; + } + Domain domain = c.getDomain(); + String domainCatalog = null, domainSchema = null, domainName = null; + if (domain != null) { + domainCatalog = catalog; + domainSchema = domain.getSchema().getName(); + domainName = domain.getName(); + } + String columnDefault, isGenerated, generationExpression; + String isIdentity, identityGeneration, identityCycle; + Value identityStart, identityIncrement, identityMaximum, identityMinimum, identityBase, identityCache; + Sequence sequence = c.getSequence(); + if (sequence != null) { + columnDefault = null; + isGenerated = "NEVER"; + generationExpression = null; + isIdentity = "YES"; + identityGeneration = c.isGeneratedAlways() ? "ALWAYS" : "BY DEFAULT"; + identityStart = ValueBigint.get(sequence.getStartValue()); + identityIncrement = ValueBigint.get(sequence.getIncrement()); + identityMaximum = ValueBigint.get(sequence.getMaxValue()); + identityMinimum = ValueBigint.get(sequence.getMinValue()); + Sequence.Cycle cycle = sequence.getCycle(); + identityCycle = cycle.isCycle() ? "YES" : "NO"; + identityBase = cycle != Sequence.Cycle.EXHAUSTED ? ValueBigint.get(sequence.getBaseValue()) : null; + identityCache = ValueBigint.get(sequence.getCacheSize()); + } else { + if (c.isGenerated()) { + columnDefault = null; + isGenerated = "ALWAYS"; + generationExpression = c.getDefaultSQL(); + } else { + columnDefault = c.getDefaultSQL(); + isGenerated = "NEVER"; + generationExpression = null; + } + isIdentity = "NO"; + identityGeneration = identityCycle = null; + identityStart = identityIncrement = identityMaximum = identityMinimum = identityBase = identityCache + = null; + } + add(session, rows, + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + tableName, + // COLUMN_NAME + c.getName(), + // ORDINAL_POSITION + ValueInteger.get(ordinalPosition), + // COLUMN_DEFAULT + columnDefault, + // IS_NULLABLE + c.isNullable() ? "YES" : "NO", + // DATA_TYPE + identifier(dt.dataType), + // CHARACTER_MAXIMUM_LENGTH + dt.characterPrecision, + // CHARACTER_OCTET_LENGTH + dt.characterPrecision, + // NUMERIC_PRECISION + dt.numericPrecision, + // NUMERIC_PRECISION_RADIX + dt.numericPrecisionRadix, + // NUMERIC_SCALE + dt.numericScale, + // DATETIME_PRECISION + dt.datetimePrecision, + // INTERVAL_TYPE + dt.intervalType, + // INTERVAL_PRECISION + dt.intervalPrecision, + // CHARACTER_SET_CATALOG + characterSetCatalog, + // CHARACTER_SET_SCHEMA + characterSetSchema, + // CHARACTER_SET_NAME + characterSetName, + // COLLATION_CATALOG + characterSetCatalog, + // COLLATION_SCHEMA + characterSetSchema, + // COLLATION_NAME + collationName, + // DOMAIN_CATALOG + domainCatalog, + // DOMAIN_SCHEMA + domainSchema, + // DOMAIN_NAME + domainName, + // MAXIMUM_CARDINALITY + dt.maximumCardinality, + // DTD_IDENTIFIER + Integer.toString(ordinalPosition), + // IS_IDENTITY + isIdentity, + // IDENTITY_GENERATION + identityGeneration, + // IDENTITY_START + identityStart, + // IDENTITY_INCREMENT + identityIncrement, + // IDENTITY_MAXIMUM + identityMaximum, + // IDENTITY_MINIMUM + identityMinimum, + // IDENTITY_CYCLE + identityCycle, + // IS_GENERATED + isGenerated, + // GENERATION_EXPRESSION + generationExpression, + // DECLARED_DATA_TYPE + dt.declaredDataType, + // DECLARED_NUMERIC_PRECISION + dt.declaredNumericPrecision, + // DECLARED_NUMERIC_SCALE + dt.declaredNumericScale, + // extensions + // GEOMETRY_TYPE + dt.geometryType, + // GEOMETRY_SRID + dt.geometrySrid, + // IDENTITY_BASE + identityBase, + // IDENTITY_CACHE + identityCache, + // COLUMN_ON_UPDATE + c.getOnUpdateSQL(), + // IS_VISIBLE + ValueBoolean.get(c.getVisible()), + // DEFAULT_ON_NULL + ValueBoolean.get(c.isDefaultOnNull()), + // SELECTIVITY + ValueInteger.get(c.getSelectivity()), + // REMARKS + c.getComment() + ); + } + + private void columnPrivileges(SessionLocal session, Value indexFrom, Value indexTo, ArrayList rows, + String catalog) { + for (Right r : database.getAllRights()) { + DbObject object = r.getGrantedObject(); + if (!(object instanceof Table)) { + continue; + } + Table table = (Table) object; + if (hideTable(table, session)) { + continue; + } + String tableName = table.getName(); + if (!checkIndex(session, tableName, indexFrom, indexTo)) { + continue; + } + DbObject grantee = r.getGrantee(); + int mask = r.getRightMask(); + for (Column column : table.getColumns()) { + addPrivileges(session, rows, grantee, catalog, table, column.getName(), mask); + } + } + } + + private void constraintColumnUsage(SessionLocal session, Value indexFrom, Value indexTo, ArrayList rows, + String catalog) { + for (Schema schema : database.getAllSchemas()) { + for (Constraint constraint : schema.getAllConstraints()) { + constraintColumnUsage(session, indexFrom, indexTo, rows, catalog, constraint); + } + } + } + + private void constraintColumnUsage(SessionLocal session, Value indexFrom, Value indexTo, ArrayList rows, + String catalog, Constraint constraint) { + switch (constraint.getConstraintType()) { + case CHECK: + case DOMAIN: { + HashSet columns = new HashSet<>(); + constraint.getExpression().isEverything(ExpressionVisitor.getColumnsVisitor(columns, null)); + for (Column column : columns) { + Table table = column.getTable(); + if (checkIndex(session, table.getName(), indexFrom, indexTo) && !hideTable(table, session)) { + addConstraintColumnUsage(session, rows, catalog, constraint, column); + } + } + break; + } + case REFERENTIAL: { + Table table = constraint.getRefTable(); + if (checkIndex(session, table.getName(), indexFrom, indexTo) && !hideTable(table, session)) { + for (Column column : constraint.getReferencedColumns(table)) { + addConstraintColumnUsage(session, rows, catalog, constraint, column); + } + } + } + //$FALL-THROUGH$ + case PRIMARY_KEY: + case UNIQUE: { + Table table = constraint.getTable(); + if (checkIndex(session, table.getName(), indexFrom, indexTo) && !hideTable(table, session)) { + for (Column column : constraint.getReferencedColumns(table)) { + addConstraintColumnUsage(session, rows, catalog, constraint, column); + } + } + } + } + } + + private void domains(SessionLocal session, Value indexFrom, Value indexTo, ArrayList rows, String catalog) { + String mainSchemaName = database.getMainSchema().getName(); + String collation = database.getCompareMode().getName(); + for (Schema schema : database.getAllSchemas()) { + for (Domain domain : schema.getAllDomains()) { + String domainName = domain.getName(); + if (!checkIndex(session, domainName, indexFrom, indexTo)) { + continue; + } + domains(session, rows, catalog, mainSchemaName, collation, domain, domainName); + } + } + } + + private void domains(SessionLocal session, ArrayList rows, String catalog, String mainSchemaName, + String collation, Domain domain, String domainName) { + Domain parentDomain = domain.getDomain(); + TypeInfo typeInfo = domain.getDataType(); + DataTypeInformation dt = DataTypeInformation.valueOf(typeInfo); + String characterSetCatalog, characterSetSchema, characterSetName, collationName; + if (dt.hasCharsetAndCollation) { + characterSetCatalog = catalog; + characterSetSchema = mainSchemaName; + characterSetName = CHARACTER_SET_NAME; + collationName = collation; + } else { + characterSetCatalog = characterSetSchema = characterSetName = collationName = null; + } + add(session, rows, + // DOMAIN_CATALOG + catalog, + // DOMAIN_SCHEMA + domain.getSchema().getName(), + // DOMAIN_NAME + domainName, + // DATA_TYPE + dt.dataType, + // CHARACTER_MAXIMUM_LENGTH + dt.characterPrecision, + // CHARACTER_OCTET_LENGTH + dt.characterPrecision, + // CHARACTER_SET_CATALOG + characterSetCatalog, + // CHARACTER_SET_SCHEMA + characterSetSchema, + // CHARACTER_SET_NAME + characterSetName, + // COLLATION_CATALOG + characterSetCatalog, + // COLLATION_SCHEMA + characterSetSchema, + // COLLATION_NAME + collationName, + // NUMERIC_PRECISION + dt.numericPrecision, + // NUMERIC_PRECISION_RADIX + dt.numericPrecisionRadix, + // NUMERIC_SCALE + dt.numericScale, + // DATETIME_PRECISION + dt.datetimePrecision, + // INTERVAL_TYPE + dt.intervalType, + // INTERVAL_PRECISION + dt.intervalPrecision, + // DOMAIN_DEFAULT + domain.getDefaultSQL(), + // MAXIMUM_CARDINALITY + dt.maximumCardinality, + // DTD_IDENTIFIER + "TYPE", + // DECLARED_DATA_TYPE + dt.declaredDataType, + // DECLARED_NUMERIC_PRECISION INT + dt.declaredNumericPrecision, + // DECLARED_NUMERIC_SCALE INT + dt.declaredNumericScale, + // extensions + // GEOMETRY_TYPE + dt.geometryType, + // GEOMETRY_SRID INT + dt.geometrySrid, + // DOMAIN_ON_UPDATE + domain.getOnUpdateSQL(), + // PARENT_DOMAIN_CATALOG + parentDomain != null ? catalog : null, + // PARENT_DOMAIN_SCHEMA + parentDomain != null ? parentDomain.getSchema().getName() : null, + // PARENT_DOMAIN_NAME + parentDomain != null ? parentDomain.getName() : null, + // REMARKS + domain.getComment() + ); + } + + private void domainConstraints(SessionLocal session, Value indexFrom, Value indexTo, ArrayList rows, + String catalog) { + for (Schema schema : database.getAllSchemas()) { + for (Constraint constraint : schema.getAllConstraints()) { + if (constraint.getConstraintType() != Constraint.Type.DOMAIN) { + continue; + } + ConstraintDomain domainConstraint = (ConstraintDomain) constraint; + Domain domain = domainConstraint.getDomain(); + String domainName = domain.getName(); + if (!checkIndex(session, domainName, indexFrom, indexTo)) { + continue; + } + domainConstraints(session, rows, catalog, domainConstraint, domain, domainName); + } + } + } + + private void domainConstraints(SessionLocal session, ArrayList rows, String catalog, + ConstraintDomain constraint, Domain domain, String domainName) { + add(session, rows, + // CONSTRAINT_CATALOG + catalog, + // CONSTRAINT_SCHEMA + constraint.getSchema().getName(), + // CONSTRAINT_NAME + constraint.getName(), + // DOMAIN_CATALOG + catalog, + // DOMAIN_SCHEMA + domain.getSchema().getName(), + // DOMAIN_NAME + domainName, + // IS_DEFERRABLE + "NO", + // INITIALLY_DEFERRED + "NO", + // extensions + // REMARKS + constraint.getComment() + ); + } + + private void elementTypesFields(SessionLocal session, ArrayList rows, String catalog, int type) { + String mainSchemaName = database.getMainSchema().getName(); + String collation = database.getCompareMode().getName(); + for (Schema schema : database.getAllSchemas()) { + String schemaName = schema.getName(); + for (Table table : schema.getAllTablesAndViews(session)) { + elementTypesFieldsForTable(session, rows, catalog, type, mainSchemaName, collation, schemaName, + table); + } + for (Domain domain : schema.getAllDomains()) { + elementTypesFieldsRow(session, rows, catalog, type, mainSchemaName, collation, schemaName, + domain.getName(), "DOMAIN", "TYPE", domain.getDataType()); + } + for (UserDefinedFunction userDefinedFunction : schema.getAllFunctionsAndAggregates()) { + if (userDefinedFunction instanceof FunctionAlias) { + String name = userDefinedFunction.getName(); + JavaMethod[] methods; + try { + methods = ((FunctionAlias) userDefinedFunction).getJavaMethods(); + } catch (DbException e) { + continue; + } + for (int i = 0; i < methods.length; i++) { + FunctionAlias.JavaMethod method = methods[i]; + TypeInfo typeInfo = method.getDataType(); + String specificName = name + '_' + (i + 1); + if (typeInfo != null && typeInfo.getValueType() != Value.NULL) { + elementTypesFieldsRow(session, rows, catalog, type, mainSchemaName, collation, schemaName, + specificName, "ROUTINE", "RESULT", typeInfo); + } + Class[] columnList = method.getColumnClasses(); + for (int o = 1, p = method.hasConnectionParam() ? 1 + : 0, n = columnList.length; p < n; o++, p++) { + elementTypesFieldsRow(session, rows, catalog, type, mainSchemaName, collation, schemaName, + specificName, "ROUTINE", Integer.toString(o), + ValueToObjectConverter2.classToType(columnList[p])); + } + } + } + } + for (Constant constant : schema.getAllConstants()) { + elementTypesFieldsRow(session, rows, catalog, type, mainSchemaName, collation, schemaName, + constant.getName(), "CONSTANT", "TYPE", constant.getValue().getType()); + } + } + for (Table table : session.getLocalTempTables()) { + elementTypesFieldsForTable(session, rows, catalog, type, mainSchemaName, collation, + table.getSchema().getName(), + table); + } + } + + private void elementTypesFieldsForTable(SessionLocal session, ArrayList rows, String catalog, int type, + String mainSchemaName, String collation, String schemaName, Table table) { + if (hideTable(table, session)) { + return; + } + String tableName = table.getName(); + Column[] cols = table.getColumns(); + for (int i = 0; i < cols.length; i++) { + elementTypesFieldsRow(session, rows, catalog, type, mainSchemaName, collation, schemaName, + tableName, "TABLE", Integer.toString(i + 1), cols[i].getType()); + } + } + + private void elementTypesFieldsRow(SessionLocal session, ArrayList rows, String catalog, int type, + String mainSchemaName, String collation, String objectSchema, String objectName, String objectType, + String identifier, TypeInfo typeInfo) { + switch (typeInfo.getValueType()) { + case Value.ENUM: + if (type == ENUM_VALUES) { + enumValues(session, rows, catalog, objectSchema, objectName, objectType, identifier, typeInfo); + } + break; + case Value.ARRAY: { + typeInfo = (TypeInfo) typeInfo.getExtTypeInfo(); + String dtdIdentifier = identifier + '_'; + if (type == ELEMENT_TYPES) { + elementTypes(session, rows, catalog, mainSchemaName, collation, objectSchema, objectName, + objectType, identifier, dtdIdentifier, typeInfo); + } + elementTypesFieldsRow(session, rows, catalog, type, mainSchemaName, collation, objectSchema, + objectName, objectType, dtdIdentifier, typeInfo); + break; + } + case Value.ROW: { + ExtTypeInfoRow ext = (ExtTypeInfoRow) typeInfo.getExtTypeInfo(); + int ordinalPosition = 0; + for (Map.Entry entry : ext.getFields()) { + typeInfo = entry.getValue(); + String fieldName = entry.getKey(); + String dtdIdentifier = identifier + '_' + ++ordinalPosition; + if (type == FIELDS) { + fields(session, rows, catalog, mainSchemaName, collation, objectSchema, objectName, + objectType, identifier, fieldName, ordinalPosition, dtdIdentifier, typeInfo); + } + elementTypesFieldsRow(session, rows, catalog, type, mainSchemaName, collation, objectSchema, + objectName, objectType, dtdIdentifier, typeInfo); + } + } + } + } + + private void elementTypes(SessionLocal session, ArrayList rows, String catalog, String mainSchemaName, + String collation, String objectSchema, String objectName, String objectType, String collectionIdentifier, + String dtdIdentifier, TypeInfo typeInfo) { + DataTypeInformation dt = DataTypeInformation.valueOf(typeInfo); + String characterSetCatalog, characterSetSchema, characterSetName, collationName; + if (dt.hasCharsetAndCollation) { + characterSetCatalog = catalog; + characterSetSchema = mainSchemaName; + characterSetName = CHARACTER_SET_NAME; + collationName = collation; + } else { + characterSetCatalog = characterSetSchema = characterSetName = collationName = null; + } + add(session, rows, + // OBJECT_CATALOG + catalog, + // OBJECT_SCHEMA + objectSchema, + // OBJECT_NAME + objectName, + // OBJECT_TYPE + objectType, + // COLLECTION_TYPE_IDENTIFIER + collectionIdentifier, + // DATA_TYPE + dt.dataType, + // CHARACTER_MAXIMUM_LENGTH + dt.characterPrecision, + // CHARACTER_OCTET_LENGTH + dt.characterPrecision, + // CHARACTER_SET_CATALOG + characterSetCatalog, + // CHARACTER_SET_SCHEMA + characterSetSchema, + // CHARACTER_SET_NAME + characterSetName, + // COLLATION_CATALOG + characterSetCatalog, + // COLLATION_SCHEMA + characterSetSchema, + // COLLATION_NAME + collationName, + // NUMERIC_PRECISION + dt.numericPrecision, + // NUMERIC_PRECISION_RADIX + dt.numericPrecisionRadix, + // NUMERIC_SCALE + dt.numericScale, + // DATETIME_PRECISION + dt.datetimePrecision, + // INTERVAL_TYPE + dt.intervalType, + // INTERVAL_PRECISION + dt.intervalPrecision, + // MAXIMUM_CARDINALITY + dt.maximumCardinality, + // DTD_IDENTIFIER + dtdIdentifier, + // DECLARED_DATA_TYPE + dt.declaredDataType, + // DECLARED_NUMERIC_PRECISION INT + dt.declaredNumericPrecision, + // DECLARED_NUMERIC_SCALE INT + dt.declaredNumericScale, + // extensions + // GEOMETRY_TYPE + dt.geometryType, + // GEOMETRY_SRID INT + dt.geometrySrid + ); + } + + private void fields(SessionLocal session, ArrayList rows, String catalog, String mainSchemaName, + String collation, String objectSchema, String objectName, String objectType, String rowIdentifier, + String fieldName, int ordinalPosition, String dtdIdentifier, TypeInfo typeInfo) { + DataTypeInformation dt = DataTypeInformation.valueOf(typeInfo); + String characterSetCatalog, characterSetSchema, characterSetName, collationName; + if (dt.hasCharsetAndCollation) { + characterSetCatalog = catalog; + characterSetSchema = mainSchemaName; + characterSetName = CHARACTER_SET_NAME; + collationName = collation; + } else { + characterSetCatalog = characterSetSchema = characterSetName = collationName = null; + } + add(session, rows, + // OBJECT_CATALOG + catalog, + // OBJECT_SCHEMA + objectSchema, + // OBJECT_NAME + objectName, + // OBJECT_TYPE + objectType, + // ROW_IDENTIFIER + rowIdentifier, + // FIELD_NAME + fieldName, + // ORDINAL_POSITION + ValueInteger.get(ordinalPosition), + // DATA_TYPE + dt.dataType, + // CHARACTER_MAXIMUM_LENGTH + dt.characterPrecision, + // CHARACTER_OCTET_LENGTH + dt.characterPrecision, + // CHARACTER_SET_CATALOG + characterSetCatalog, + // CHARACTER_SET_SCHEMA + characterSetSchema, + // CHARACTER_SET_NAME + characterSetName, + // COLLATION_CATALOG + characterSetCatalog, + // COLLATION_SCHEMA + characterSetSchema, + // COLLATION_NAME + collationName, + // NUMERIC_PRECISION + dt.numericPrecision, + // NUMERIC_PRECISION_RADIX + dt.numericPrecisionRadix, + // NUMERIC_SCALE + dt.numericScale, + // DATETIME_PRECISION + dt.datetimePrecision, + // INTERVAL_TYPE + dt.intervalType, + // INTERVAL_PRECISION + dt.intervalPrecision, + // MAXIMUM_CARDINALITY + dt.maximumCardinality, + // DTD_IDENTIFIER + dtdIdentifier, + // DECLARED_DATA_TYPE + dt.declaredDataType, + // DECLARED_NUMERIC_PRECISION INT + dt.declaredNumericPrecision, + // DECLARED_NUMERIC_SCALE INT + dt.declaredNumericScale, + // extensions + // GEOMETRY_TYPE + dt.geometryType, + // GEOMETRY_SRID INT + dt.geometrySrid + ); + } + + private void keyColumnUsage(SessionLocal session, Value indexFrom, Value indexTo, ArrayList rows, + String catalog) { + for (Schema schema : database.getAllSchemas()) { + for (Constraint constraint : schema.getAllConstraints()) { + Constraint.Type constraintType = constraint.getConstraintType(); + IndexColumn[] indexColumns = null; + if (constraintType == Constraint.Type.UNIQUE || constraintType == Constraint.Type.PRIMARY_KEY) { + indexColumns = ((ConstraintUnique) constraint).getColumns(); + } else if (constraintType == Constraint.Type.REFERENTIAL) { + indexColumns = ((ConstraintReferential) constraint).getColumns(); + } + if (indexColumns == null) { + continue; + } + Table table = constraint.getTable(); + if (hideTable(table, session)) { + continue; + } + String tableName = table.getName(); + if (!checkIndex(session, tableName, indexFrom, indexTo)) { + continue; + } + keyColumnUsage(session, rows, catalog, constraint, constraintType, indexColumns, table, tableName); + } + } + } + + private void keyColumnUsage(SessionLocal session, ArrayList rows, String catalog, Constraint constraint, + Constraint.Type constraintType, IndexColumn[] indexColumns, Table table, String tableName) { + ConstraintUnique referenced; + if (constraintType == Constraint.Type.REFERENTIAL) { + referenced = ((ConstraintReferential) constraint).getReferencedConstraint(); + } else { + referenced = null; + } + for (int i = 0; i < indexColumns.length; i++) { + IndexColumn indexColumn = indexColumns[i]; + ValueInteger ordinalPosition = ValueInteger.get(i + 1); + ValueInteger positionInUniqueConstraint = null; + if (referenced != null) { + Column c = ((ConstraintReferential) constraint).getRefColumns()[i].column; + IndexColumn[] refColumns = referenced.getColumns(); + for (int j = 0; j < refColumns.length; j++) { + if (refColumns[j].column.equals(c)) { + positionInUniqueConstraint = ValueInteger.get(j + 1); + break; + } + } + } + add(session, rows, + // CONSTRAINT_CATALOG + catalog, + // CONSTRAINT_SCHEMA + constraint.getSchema().getName(), + // CONSTRAINT_NAME + constraint.getName(), + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + tableName, + // COLUMN_NAME + indexColumn.columnName, + // ORDINAL_POSITION + ordinalPosition, + // POSITION_IN_UNIQUE_CONSTRAINT + positionInUniqueConstraint + ); + } + } + + private void parameters(SessionLocal session, ArrayList rows, String catalog) { + String mainSchemaName = database.getMainSchema().getName(); + String collation = database.getCompareMode().getName(); + for (Schema schema : database.getAllSchemas()) { + for (UserDefinedFunction userDefinedFunction : schema.getAllFunctionsAndAggregates()) { + if (userDefinedFunction instanceof FunctionAlias) { + JavaMethod[] methods; + try { + methods = ((FunctionAlias) userDefinedFunction).getJavaMethods(); + } catch (DbException e) { + continue; + } + for (int i = 0; i < methods.length; i++) { + FunctionAlias.JavaMethod method = methods[i]; + Class[] columnList = method.getColumnClasses(); + for (int o = 1, p = method.hasConnectionParam() ? 1 + : 0, n = columnList.length; p < n; o++, p++) { + parameters(session, rows, catalog, mainSchemaName, collation, schema.getName(), + userDefinedFunction.getName() + '_' + (i + 1), + ValueToObjectConverter2.classToType(columnList[p]), o); + } + } + } + } + } + } + + private void parameters(SessionLocal session, ArrayList rows, String catalog, String mainSchemaName, + String collation, String schema, String specificName, TypeInfo typeInfo, int pos) { + DataTypeInformation dt = DataTypeInformation.valueOf(typeInfo); + String characterSetCatalog, characterSetSchema, characterSetName, collationName; + if (dt.hasCharsetAndCollation) { + characterSetCatalog = catalog; + characterSetSchema = mainSchemaName; + characterSetName = CHARACTER_SET_NAME; + collationName = collation; + } else { + characterSetCatalog = characterSetSchema = characterSetName = collationName = null; + } + add(session, rows, + // SPECIFIC_CATALOG + catalog, + // SPECIFIC_SCHEMA + schema, + // SPECIFIC_NAME + specificName, + // ORDINAL_POSITION + ValueInteger.get(pos), + // PARAMETER_MODE + "IN", + // IS_RESULT + "NO", + // AS_LOCATOR + DataType.isLargeObject(typeInfo.getValueType()) ? "YES" : "NO", + // PARAMETER_NAME + "P" + pos, + // DATA_TYPE + identifier(dt.dataType), + // CHARACTER_MAXIMUM_LENGTH + dt.characterPrecision, + // CHARACTER_OCTET_LENGTH + dt.characterPrecision, + // CHARACTER_SET_CATALOG + characterSetCatalog, + // CHARACTER_SET_SCHEMA + characterSetSchema, + // CHARACTER_SET_NAME + characterSetName, + // COLLATION_CATALOG + characterSetCatalog, + // COLLATION_SCHEMA + characterSetSchema, + // COLLATION_NAME + collationName, + // NUMERIC_PRECISION + dt.numericPrecision, + // NUMERIC_PRECISION_RADIX + dt.numericPrecisionRadix, + // NUMERIC_SCALE + dt.numericScale, + // DATETIME_PRECISION + dt.datetimePrecision, + // INTERVAL_TYPE + dt.intervalType, + // INTERVAL_PRECISION + dt.intervalPrecision, + // MAXIMUM_CARDINALITY + dt.maximumCardinality, + // DTD_IDENTIFIER + Integer.toString(pos), + // DECLARED_DATA_TYPE + dt.declaredDataType, + // DECLARED_NUMERIC_PRECISION INT + dt.declaredNumericPrecision, + // DECLARED_NUMERIC_SCALE INT + dt.declaredNumericScale, + // PARAMETER_DEFAULT + null, + // extensions + // GEOMETRY_TYPE + dt.geometryType, + // GEOMETRY_SRID INT + dt.geometrySrid + ); + } + + private void referentialConstraints(SessionLocal session, Value indexFrom, Value indexTo, ArrayList rows, + String catalog) { + for (Schema schema : database.getAllSchemas()) { + for (Constraint constraint : schema.getAllConstraints()) { + if (constraint.getConstraintType() != Constraint.Type.REFERENTIAL) { + continue; + } + if (hideTable(constraint.getTable(), session)) { + continue; + } + String constraintName = constraint.getName(); + if (!checkIndex(session, constraintName, indexFrom, indexTo)) { + continue; + } + referentialConstraints(session, rows, catalog, (ConstraintReferential) constraint, constraintName); + } + } + } + + private void referentialConstraints(SessionLocal session, ArrayList rows, String catalog, + ConstraintReferential constraint, String constraintName) { + ConstraintUnique unique = constraint.getReferencedConstraint(); + add(session, rows, + // CONSTRAINT_CATALOG + catalog, + // CONSTRAINT_SCHEMA + constraint.getSchema().getName(), + // CONSTRAINT_NAME + constraintName, + // UNIQUE_CONSTRAINT_CATALOG + catalog, + // UNIQUE_CONSTRAINT_SCHEMA + unique.getSchema().getName(), + // UNIQUE_CONSTRAINT_NAME + unique.getName(), + // MATCH_OPTION + "NONE", + // UPDATE_RULE + constraint.getUpdateAction().getSqlName(), + // DELETE_RULE + constraint.getDeleteAction().getSqlName() + ); + } + + private void routines(SessionLocal session, ArrayList rows, String catalog) { + boolean admin = session.getUser().isAdmin(); + String mainSchemaName = database.getMainSchema().getName(); + String collation = database.getCompareMode().getName(); + for (Schema schema : database.getAllSchemas()) { + String schemaName = schema.getName(); + for (UserDefinedFunction userDefinedFunction : schema.getAllFunctionsAndAggregates()) { + String name = userDefinedFunction.getName(); + if (userDefinedFunction instanceof FunctionAlias) { + FunctionAlias alias = (FunctionAlias) userDefinedFunction; + JavaMethod[] methods; + try { + methods = alias.getJavaMethods(); + } catch (DbException e) { + continue; + } + for (int i = 0; i < methods.length; i++) { + FunctionAlias.JavaMethod method = methods[i]; + TypeInfo typeInfo = method.getDataType(); + String routineType; + if (typeInfo != null && typeInfo.getValueType() == Value.NULL) { + routineType = "PROCEDURE"; + typeInfo = null; + } else { + routineType = "FUNCTION"; + } + routines(session, rows, catalog, mainSchemaName, collation, schemaName, name, + name + '_' + (i + 1), routineType, admin ? alias.getSource() : null, + alias.getJavaClassName() + '.' + alias.getJavaMethodName(), typeInfo, + alias.isDeterministic(), alias.getComment()); + } + } else { + routines(session, rows, catalog, mainSchemaName, collation, schemaName, name, name, "AGGREGATE", + null, userDefinedFunction.getJavaClassName(), TypeInfo.TYPE_NULL, false, + userDefinedFunction.getComment()); + } + } + } + } + + private void routines(SessionLocal session, ArrayList rows, String catalog, String mainSchemaName, // + String collation, String schema, String name, String specificName, String routineType, String definition, + String externalName, TypeInfo typeInfo, boolean deterministic, String remarks) { + DataTypeInformation dt = typeInfo != null ? DataTypeInformation.valueOf(typeInfo) : DataTypeInformation.NULL; + String characterSetCatalog, characterSetSchema, characterSetName, collationName; + if (dt.hasCharsetAndCollation) { + characterSetCatalog = catalog; + characterSetSchema = mainSchemaName; + characterSetName = CHARACTER_SET_NAME; + collationName = collation; + } else { + characterSetCatalog = characterSetSchema = characterSetName = collationName = null; + } + add(session, rows, + // SPECIFIC_CATALOG + catalog, + // SPECIFIC_SCHEMA + schema, + // SPECIFIC_NAME + specificName, + // ROUTINE_CATALOG + catalog, + // ROUTINE_SCHEMA + schema, + // ROUTINE_NAME + name, + // ROUTINE_TYPE + routineType, + // DATA_TYPE + identifier(dt.dataType), + // CHARACTER_MAXIMUM_LENGTH + dt.characterPrecision, + // CHARACTER_OCTET_LENGTH + dt.characterPrecision, + // CHARACTER_SET_CATALOG + characterSetCatalog, + // CHARACTER_SET_SCHEMA + characterSetSchema, + // CHARACTER_SET_NAME + characterSetName, + // COLLATION_CATALOG + characterSetCatalog, + // COLLATION_SCHEMA + characterSetSchema, + // COLLATION_NAME + collationName, + // NUMERIC_PRECISION + dt.numericPrecision, + // NUMERIC_PRECISION_RADIX + dt.numericPrecisionRadix, + // NUMERIC_SCALE + dt.numericScale, + // DATETIME_PRECISION + dt.datetimePrecision, + // INTERVAL_TYPE + dt.intervalType, + // INTERVAL_PRECISION + dt.intervalPrecision, + // MAXIMUM_CARDINALITY + dt.maximumCardinality, + // DTD_IDENTIFIER + "RESULT", + // ROUTINE_BODY + "EXTERNAL", + // ROUTINE_DEFINITION + definition, + // EXTERNAL_NAME + externalName, + // EXTERNAL_LANGUAGE + "JAVA", + // PARAMETER_STYLE + "GENERAL", + // IS_DETERMINISTIC + deterministic ? "YES" : "NO", + // DECLARED_DATA_TYPE + dt.declaredDataType, + // DECLARED_NUMERIC_PRECISION INT + dt.declaredNumericPrecision, + // DECLARED_NUMERIC_SCALE INT + dt.declaredNumericScale, + // extensions + // GEOMETRY_TYPE + dt.geometryType, + // GEOMETRY_SRID INT + dt.geometrySrid, + // REMARKS + remarks + ); + } + + private void schemata(SessionLocal session, ArrayList rows, String catalog) { + String mainSchemaName = database.getMainSchema().getName(); + String collation = database.getCompareMode().getName(); + for (Schema schema : database.getAllSchemas()) { + add(session, rows, + // CATALOG_NAME + catalog, + // SCHEMA_NAME + schema.getName(), + // SCHEMA_OWNER + identifier(schema.getOwner().getName()), + // DEFAULT_CHARACTER_SET_CATALOG + catalog, + // DEFAULT_CHARACTER_SET_SCHEMA + mainSchemaName, + // DEFAULT_CHARACTER_SET_NAME + CHARACTER_SET_NAME, + // SQL_PATH + null, + // extensions + // DEFAULT_COLLATION_NAME + collation, + // REMARKS + schema.getComment() + ); + } + } + + private void sequences(SessionLocal session, Value indexFrom, Value indexTo, ArrayList rows, String catalog) { + for (Schema schema : database.getAllSchemas()) { + for (Sequence sequence : schema.getAllSequences()) { + if (sequence.getBelongsToTable()) { + continue; + } + String sequenceName = sequence.getName(); + if (!checkIndex(session, sequenceName, indexFrom, indexTo)) { + continue; + } + sequences(session, rows, catalog, sequence, sequenceName); + } + } + } + + private void sequences(SessionLocal session, ArrayList rows, String catalog, Sequence sequence, + String sequenceName) { + DataTypeInformation dt = DataTypeInformation.valueOf(sequence.getDataType()); + Sequence.Cycle cycle = sequence.getCycle(); + add(session, rows, + // SEQUENCE_CATALOG + catalog, + // SEQUENCE_SCHEMA + sequence.getSchema().getName(), + // SEQUENCE_NAME + sequenceName, + // DATA_TYPE + dt.dataType, + // NUMERIC_PRECISION + ValueInteger.get(sequence.getEffectivePrecision()), + // NUMERIC_PRECISION_RADIX + dt.numericPrecisionRadix, + // NUMERIC_SCALE + dt.numericScale, + // START_VALUE + ValueBigint.get(sequence.getStartValue()), + // MINIMUM_VALUE + ValueBigint.get(sequence.getMinValue()), + // MAXIMUM_VALUE + ValueBigint.get(sequence.getMaxValue()), + // INCREMENT + ValueBigint.get(sequence.getIncrement()), + // CYCLE_OPTION + cycle.isCycle() ? "YES" : "NO", + // DECLARED_DATA_TYPE + dt.declaredDataType, + // DECLARED_NUMERIC_PRECISION + dt.declaredNumericPrecision, + // DECLARED_NUMERIC_SCALE + dt.declaredNumericScale, + // extensions + // BASE_VALUE + cycle != Sequence.Cycle.EXHAUSTED ? ValueBigint.get(sequence.getBaseValue()) : null, + // CACHE + ValueBigint.get(sequence.getCacheSize()), + // REMARKS + sequence.getComment() + ); + } + + private void tables(SessionLocal session, Value indexFrom, Value indexTo, ArrayList rows, String catalog) { + for (Schema schema : database.getAllSchemas()) { + for (Table table : schema.getAllTablesAndViews(session)) { + String tableName = table.getName(); + if (checkIndex(session, tableName, indexFrom, indexTo)) { + tables(session, rows, catalog, table, tableName); + } + } + } + for (Table table : session.getLocalTempTables()) { + String tableName = table.getName(); + if (checkIndex(session, tableName, indexFrom, indexTo)) { + tables(session, rows, catalog, table, tableName); + } + } + } + + private void tables(SessionLocal session, ArrayList rows, String catalog, Table table, + String tableName) { + if (hideTable(table, session)) { + return; + } + String commitAction, storageType; + if (table.isTemporary()) { + commitAction = table.getOnCommitTruncate() ? "DELETE" : table.getOnCommitDrop() ? "DROP" : "PRESERVE"; + storageType = table.isGlobalTemporary() ? "GLOBAL TEMPORARY" : "LOCAL TEMPORARY"; + } else { + commitAction = null; + switch (table.getTableType()) { + case TABLE_LINK: + storageType = "TABLE LINK"; + break; + case EXTERNAL_TABLE_ENGINE: + storageType = "EXTERNAL"; + break; + default: + storageType = table.isPersistIndexes() ? "CACHED" : "MEMORY"; + break; + } + } + long lastModification = table.getMaxDataModificationId(); + add(session, rows, + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + tableName, + // TABLE_TYPE + table.getSQLTableType(), + // IS_INSERTABLE_INTO" + table.isInsertable() ? "YES" : "NO", + // COMMIT_ACTION + commitAction, + // extensions + // STORAGE_TYPE + storageType, + // REMARKS + table.getComment(), + // LAST_MODIFICATION + lastModification != Long.MAX_VALUE ? ValueBigint.get(lastModification) : null, + // TABLE_CLASS + table.getClass().getName(), + // ROW_COUNT_ESTIMATE + ValueBigint.get(table.getRowCountApproximation(session)) + ); + } + + private void tableConstraints(SessionLocal session, Value indexFrom, Value indexTo, ArrayList rows, + String catalog) { + for (Schema schema : database.getAllSchemas()) { + for (Constraint constraint : schema.getAllConstraints()) { + Constraint.Type constraintType = constraint.getConstraintType(); + if (constraintType == Constraint.Type.DOMAIN) { + continue; + } + Table table = constraint.getTable(); + if (hideTable(table, session)) { + continue; + } + String tableName = table.getName(); + if (!checkIndex(session, tableName, indexFrom, indexTo)) { + continue; + } + tableConstraints(session, rows, catalog, constraint, constraintType, table, tableName); + } + } + } + + private void tableConstraints(SessionLocal session, ArrayList rows, String catalog, Constraint constraint, + Constraint.Type constraintType, Table table, String tableName) { + Index index = constraint.getIndex(); + boolean enforced; + if (constraintType != Constraint.Type.REFERENTIAL) { + enforced = true; + } else { + enforced = database.getReferentialIntegrity() && table.getCheckForeignKeyConstraints() + && ((ConstraintReferential) constraint).getRefTable().getCheckForeignKeyConstraints(); + } + add(session, rows, + // CONSTRAINT_CATALOG + catalog, + // CONSTRAINT_SCHEMA + constraint.getSchema().getName(), + // CONSTRAINT_NAME + constraint.getName(), + // CONSTRAINT_TYPE + constraintType.getSqlName(), + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + tableName, + // IS_DEFERRABLE + "NO", + // INITIALLY_DEFERRED + "NO", + // ENFORCED + enforced ? "YES" : "NO", + // extensions + // INDEX_CATALOG + index != null ? catalog : null, + // INDEX_SCHEMA + index != null ? index.getSchema().getName() : null, + // INDEX_NAME + index != null ? index.getName() : null, + // REMARKS + constraint.getComment() + ); + } + + private void tablePrivileges(SessionLocal session, Value indexFrom, Value indexTo, ArrayList rows, // + String catalog) { + for (Right r : database.getAllRights()) { + DbObject object = r.getGrantedObject(); + if (!(object instanceof Table)) { + continue; + } + Table table = (Table) object; + if (hideTable(table, session)) { + continue; + } + String tableName = table.getName(); + if (!checkIndex(session, tableName, indexFrom, indexTo)) { + continue; + } + addPrivileges(session, rows, r.getGrantee(), catalog, table, null, r.getRightMask()); + } + } + + private void triggers(SessionLocal session, Value indexFrom, Value indexTo, ArrayList rows, String catalog) { + for (Schema schema : database.getAllSchemas()) { + for (TriggerObject trigger : schema.getAllTriggers()) { + Table table = trigger.getTable(); + String tableName = table.getName(); + if (!checkIndex(session, tableName, indexFrom, indexTo)) { + continue; + } + int typeMask = trigger.getTypeMask(); + if ((typeMask & Trigger.INSERT) != 0) { + triggers(session, rows, catalog, trigger, "INSERT", table, tableName); + } + if ((typeMask & Trigger.UPDATE) != 0) { + triggers(session, rows, catalog, trigger, "UPDATE", table, tableName); + } + if ((typeMask & Trigger.DELETE) != 0) { + triggers(session, rows, catalog, trigger, "DELETE", table, tableName); + } + if ((typeMask & Trigger.SELECT) != 0) { + triggers(session, rows, catalog, trigger, "SELECT", table, tableName); + } + } + } + } + + private void triggers(SessionLocal session, ArrayList rows, String catalog, TriggerObject trigger, + String eventManipulation, Table table, String tableName) { + add(session, rows, + // TRIGGER_CATALOG + catalog, + // TRIGGER_SCHEMA + trigger.getSchema().getName(), + // TRIGGER_NAME + trigger.getName(), + // EVENT_MANIPULATION + eventManipulation, + // EVENT_OBJECT_CATALOG + catalog, + // EVENT_OBJECT_SCHEMA + table.getSchema().getName(), + // EVENT_OBJECT_TABLE + tableName, + // ACTION_ORIENTATION + trigger.isRowBased() ? "ROW" : "STATEMENT", + // ACTION_TIMING + trigger.isInsteadOf() ? "INSTEAD OF" : trigger.isBefore() ? "BEFORE" : "AFTER", + // extensions + // IS_ROLLBACK + ValueBoolean.get(trigger.isOnRollback()), + // JAVA_CLASS + trigger.getTriggerClassName(), + // QUEUE_SIZE + ValueInteger.get(trigger.getQueueSize()), + // NO_WAIT + ValueBoolean.get(trigger.isNoWait()), + // REMARKS + trigger.getComment() + ); + } + + private void views(SessionLocal session, Value indexFrom, Value indexTo, ArrayList rows, String catalog) { + for (Schema schema : database.getAllSchemas()) { + for (Table table : schema.getAllTablesAndViews(session)) { + if (table.isView()) { + String tableName = table.getName(); + if (checkIndex(session, tableName, indexFrom, indexTo)) { + views(session, rows, catalog, table, tableName); + } + } + } + } + for (Table table : session.getLocalTempTables()) { + if (table.isView()) { + String tableName = table.getName(); + if (checkIndex(session, tableName, indexFrom, indexTo)) { + views(session, rows, catalog, table, tableName); + } + } + } + } + + private void views(SessionLocal session, ArrayList rows, String catalog, Table table, String tableName) { + String viewDefinition, status = "VALID"; + if (table instanceof TableView) { + TableView view = (TableView) table; + viewDefinition = view.getQuery(); + if (view.isInvalid()) { + status = "INVALID"; + } + } else { + viewDefinition = null; + } + int mask = 0; + ArrayList triggers = table.getTriggers(); + if (triggers != null) { + for (TriggerObject trigger : triggers) { + if (trigger.isInsteadOf()) { + mask |= trigger.getTypeMask(); + } + } + } + add(session, rows, + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + tableName, + // VIEW_DEFINITION + viewDefinition, + // CHECK_OPTION + "NONE", + // IS_UPDATABLE + "NO", + // INSERTABLE_INTO + "NO", + // IS_TRIGGER_UPDATABLE + (mask & Trigger.UPDATE) != 0 ? "YES" : "NO", + // IS_TRIGGER_DELETABLE + (mask & Trigger.DELETE) != 0 ? "YES" : "NO", + // IS_TRIGGER_INSERTABLE_INTO + (mask & Trigger.INSERT) != 0 ? "YES" : "NO", + // extensions + // STATUS + status, + // REMARKS + table.getComment() + ); + } + + private void constants(SessionLocal session, Value indexFrom, Value indexTo, ArrayList rows, String catalog) { + String mainSchemaName = database.getMainSchema().getName(); + String collation = database.getCompareMode().getName(); + for (Schema schema : database.getAllSchemas()) { + for (Constant constant : schema.getAllConstants()) { + String constantName = constant.getName(); + if (!checkIndex(session, constantName, indexFrom, indexTo)) { + continue; + } + constants(session, rows, catalog, mainSchemaName, collation, constant, constantName); + } + } + } + + private void constants(SessionLocal session, ArrayList rows, String catalog, String mainSchemaName, + String collation, Constant constant, String constantName) { + ValueExpression expr = constant.getValue(); + TypeInfo typeInfo = expr.getType(); + DataTypeInformation dt = DataTypeInformation.valueOf(typeInfo); + String characterSetCatalog, characterSetSchema, characterSetName, collationName; + if (dt.hasCharsetAndCollation) { + characterSetCatalog = catalog; + characterSetSchema = mainSchemaName; + characterSetName = CHARACTER_SET_NAME; + collationName = collation; + } else { + characterSetCatalog = characterSetSchema = characterSetName = collationName = null; + } + add(session, rows, + // CONSTANT_CATALOG + catalog, + // CONSTANT_SCHEMA + constant.getSchema().getName(), + // CONSTANT_NAME + constantName, + // VALUE_DEFINITION + expr.getSQL(DEFAULT_SQL_FLAGS), + // DATA_TYPE + dt.dataType, + // CHARACTER_MAXIMUM_LENGTH + dt.characterPrecision, + // CHARACTER_OCTET_LENGTH + dt.characterPrecision, + // CHARACTER_SET_CATALOG + characterSetCatalog, + // CHARACTER_SET_SCHEMA + characterSetSchema, + // CHARACTER_SET_NAME + characterSetName, + // COLLATION_CATALOG + characterSetCatalog, + // COLLATION_SCHEMA + characterSetSchema, + // COLLATION_NAME + collationName, + // NUMERIC_PRECISION + dt.numericPrecision, + // NUMERIC_PRECISION_RADIX + dt.numericPrecisionRadix, + // NUMERIC_SCALE + dt.numericScale, + // DATETIME_PRECISION + dt.datetimePrecision, + // INTERVAL_TYPE + dt.intervalType, + // INTERVAL_PRECISION + dt.intervalPrecision, + // MAXIMUM_CARDINALITY + dt.maximumCardinality, + // DTD_IDENTIFIER + "TYPE", + // DECLARED_DATA_TYPE + dt.declaredDataType, + // DECLARED_NUMERIC_PRECISION INT + dt.declaredNumericPrecision, + // DECLARED_NUMERIC_SCALE INT + dt.declaredNumericScale, + // GEOMETRY_TYPE + dt.geometryType, + // GEOMETRY_SRID INT + dt.geometrySrid, + // REMARKS + constant.getComment() + ); + } + + private void enumValues(SessionLocal session, ArrayList rows, String catalog, String objectSchema, + String objectName, String objectType, String enumIdentifier, TypeInfo typeInfo) { + ExtTypeInfoEnum ext = (ExtTypeInfoEnum) typeInfo.getExtTypeInfo(); + if (ext == null) { + return; + } + for (int i = 0, ordinal = session.zeroBasedEnums() ? 0 : 1, l = ext.getCount(); i < l; i++, ordinal++) { + add(session, rows, + // OBJECT_CATALOG + catalog, + // OBJECT_SCHEMA + objectSchema, + // OBJECT_NAME + objectName, + // OBJECT_TYPE + objectType, + // ENUM_IDENTIFIER + enumIdentifier, + // VALUE_NAME + ext.getEnumerator(i), + // VALUE_ORDINAL + ValueInteger.get(ordinal) + ); + } + } + + private void indexes(SessionLocal session, Value indexFrom, Value indexTo, ArrayList rows, String catalog, + boolean columns) { + if (indexFrom != null && indexFrom.equals(indexTo)) { + String tableName = indexFrom.getString(); + if (tableName == null) { + return; + } + for (Schema schema : database.getAllSchemas()) { + Table table = schema.getTableOrViewByName(session, tableName); + if (table != null) { + indexes(session, rows, catalog, columns, table, table.getName()); + } + } + Table table = session.findLocalTempTable(tableName); + if (table != null) { + indexes(session, rows, catalog, columns, table, table.getName()); + } + } else { + for (Schema schema : database.getAllSchemas()) { + for (Table table : schema.getAllTablesAndViews(session)) { + String tableName = table.getName(); + if (checkIndex(session, tableName, indexFrom, indexTo)) { + indexes(session, rows, catalog, columns, table, tableName); + } + } + } + for (Table table : session.getLocalTempTables()) { + String tableName = table.getName(); + if (checkIndex(session, tableName, indexFrom, indexTo)) { + indexes(session, rows, catalog, columns, table, tableName); + } + } + } + } + + private void indexes(SessionLocal session, ArrayList rows, String catalog, boolean columns, Table table, + String tableName) { + if (hideTable(table, session)) { + return; + } + ArrayList indexes = table.getIndexes(); + if (indexes == null) { + return; + } + for (Index index : indexes) { + if (index.getCreateSQL() == null) { + continue; + } + if (columns) { + indexColumns(session, rows, catalog, table, tableName, index); + } else { + indexes(session, rows, catalog, table, tableName, index); + } + } + } + + private void indexes(SessionLocal session, ArrayList rows, String catalog, Table table, String tableName, + Index index) { + add(session, rows, + // INDEX_CATALOG + catalog, + // INDEX_SCHEMA + index.getSchema().getName(), + // INDEX_NAME + index.getName(), + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + tableName, + // INDEX_TYPE_NAME + index.getIndexType().getSQL(), + // IS_GENERATED + ValueBoolean.get(index.getIndexType().getBelongsToConstraint()), + // REMARKS + index.getComment(), + // INDEX_CLASS + index.getClass().getName() + ); + } + + private void indexColumns(SessionLocal session, ArrayList rows, String catalog, Table table, + String tableName, Index index) { + IndexColumn[] cols = index.getIndexColumns(); + int uniqueColumnCount = index.getUniqueColumnCount(); + for (int i = 0, l = cols.length; i < l;) { + IndexColumn idxCol = cols[i]; + int sortType = idxCol.sortType; + add(session, rows, + // INDEX_CATALOG + catalog, + // INDEX_SCHEMA + index.getSchema().getName(), + // INDEX_NAME + index.getName(), + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + tableName, + // COLUMN_NAME + idxCol.column.getName(), + // ORDINAL_POSITION + ValueInteger.get(++i), + // ORDERING_SPECIFICATION + (sortType & SortOrder.DESCENDING) == 0 ? "ASC" : "DESC", + // NULL_ORDERING + (sortType & SortOrder.NULLS_FIRST) != 0 ? "FIRST" + : (sortType & SortOrder.NULLS_LAST) != 0 ? "LAST" : null, + // IS_UNIQUE + ValueBoolean.get(i <= uniqueColumnCount) + ); + } + } + + private void inDoubt(SessionLocal session, ArrayList rows) { + if (session.getUser().isAdmin()) { + ArrayList prepared = database.getInDoubtTransactions(); + if (prepared != null) { + for (InDoubtTransaction prep : prepared) { + add(session, rows, + // TRANSACTION_NAME + prep.getTransactionName(), + // TRANSACTION_STATE + prep.getStateDescription() + ); + } + } + } + } + + private void locks(SessionLocal session, ArrayList rows) { + if (session.getUser().isAdmin()) { + for (SessionLocal s : database.getSessions(false)) { + locks(session, rows, s); + } + } else { + locks(session, rows, session); + } + } + + private void locks(SessionLocal session, ArrayList rows, SessionLocal sessionWithLocks) { + for (Table table : sessionWithLocks.getLocks()) { + add(session, rows, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + table.getName(), + // SESSION_ID + ValueInteger.get(sessionWithLocks.getId()), + // LOCK_TYPE + table.isLockedExclusivelyBy(sessionWithLocks) ? "WRITE" : "READ" + ); + } + } + + private void queryStatistics(SessionLocal session, ArrayList rows) { + QueryStatisticsData control = database.getQueryStatisticsData(); + if (control != null) { + for (QueryStatisticsData.QueryEntry entry : control.getQueries()) { + add(session, rows, + // SQL_STATEMENT + entry.sqlStatement, + // EXECUTION_COUNT + ValueInteger.get(entry.count), + // MIN_EXECUTION_TIME + ValueDouble.get(entry.executionTimeMinNanos / 1_000_000d), + // MAX_EXECUTION_TIME + ValueDouble.get(entry.executionTimeMaxNanos / 1_000_000d), + // CUMULATIVE_EXECUTION_TIME + ValueDouble.get(entry.executionTimeCumulativeNanos / 1_000_000d), + // AVERAGE_EXECUTION_TIME + ValueDouble.get(entry.executionTimeMeanNanos / 1_000_000d), + // STD_DEV_EXECUTION_TIME + ValueDouble.get(entry.getExecutionTimeStandardDeviation() / 1_000_000d), + // MIN_ROW_COUNT + ValueBigint.get(entry.rowCountMin), + // MAX_ROW_COUNT + ValueBigint.get(entry.rowCountMax), + // CUMULATIVE_ROW_COUNT + ValueBigint.get(entry.rowCountCumulative), + // AVERAGE_ROW_COUNT + ValueDouble.get(entry.rowCountMean), + // STD_DEV_ROW_COUNT + ValueDouble.get(entry.getRowCountStandardDeviation()) + ); + } + } + } + + private void rights(SessionLocal session, Value indexFrom, Value indexTo, ArrayList rows) { + if (!session.getUser().isAdmin()) { + return; + } + for (Right r : database.getAllRights()) { + Role role = r.getGrantedRole(); + DbObject grantee = r.getGrantee(); + String rightType = grantee.getType() == DbObject.USER ? "USER" : "ROLE"; + if (role == null) { + DbObject object = r.getGrantedObject(); + Schema schema = null; + Table table = null; + if (object != null) { + if (object instanceof Schema) { + schema = (Schema) object; + } else if (object instanceof Table) { + table = (Table) object; + schema = table.getSchema(); + } + } + String tableName = (table != null) ? table.getName() : ""; + String schemaName = (schema != null) ? schema.getName() : ""; + if (!checkIndex(session, tableName, indexFrom, indexTo)) { + continue; + } + add(session, rows, + // GRANTEE + identifier(grantee.getName()), + // GRANTEETYPE + rightType, + // GRANTEDROLE + null, + // RIGHTS + r.getRights(), + // TABLE_SCHEMA + schemaName, + // TABLE_NAME + tableName + ); + } else { + add(session, rows, + // GRANTEE + identifier(grantee.getName()), + // GRANTEETYPE + rightType, + // GRANTEDROLE + identifier(role.getName()), + // RIGHTS + null, + // TABLE_SCHEMA + null, + // TABLE_NAME + null + ); + } + } + } + + private void roles(SessionLocal session, ArrayList rows) { + boolean admin = session.getUser().isAdmin(); + for (RightOwner rightOwner : database.getAllUsersAndRoles()) { + if (rightOwner instanceof Role) { + Role r = (Role) rightOwner; + if (admin || session.getUser().isRoleGranted(r)) { + add(session, rows, + // ROLE_NAME + identifier(r.getName()), + // REMARKS + r.getComment() + ); + } + } + } + } + + private void sessions(SessionLocal session, ArrayList rows) { + if (session.getUser().isAdmin()) { + for (SessionLocal s : database.getSessions(false)) { + sessions(session, rows, s); + } + } else { + sessions(session, rows, session); + } + } + + private void sessions(SessionLocal session, ArrayList rows, SessionLocal s) { + NetworkConnectionInfo networkConnectionInfo = s.getNetworkConnectionInfo(); + Command command = s.getCurrentCommand(); + int blockingSessionId = s.getBlockingSessionId(); + add(session, rows, + // SESSION_ID + ValueInteger.get(s.getId()), + // USER_NAME + s.getUser().getName(), + // SERVER + networkConnectionInfo == null ? null : networkConnectionInfo.getServer(), + // CLIENT_ADDR + networkConnectionInfo == null ? null : networkConnectionInfo.getClient(), + // CLIENT_INFO + networkConnectionInfo == null ? null : networkConnectionInfo.getClientInfo(), + // SESSION_START + s.getSessionStart(), + // ISOLATION_LEVEL + session.getIsolationLevel().getSQL(), + // EXECUTING_STATEMENT + command == null ? null : command.toString(), + // EXECUTING_STATEMENT_START + command == null ? null : s.getCommandStartOrEnd(), + // CONTAINS_UNCOMMITTED + ValueBoolean.get(s.hasPendingTransaction()), + // SESSION_STATE + String.valueOf(s.getState()), + // BLOCKER_ID + blockingSessionId == 0 ? null : ValueInteger.get(blockingSessionId), + // SLEEP_SINCE + s.getState() == State.SLEEP ? s.getCommandStartOrEnd() : null + ); + } + + private void sessionState(SessionLocal session, ArrayList rows) { + for (String name : session.getVariableNames()) { + Value v = session.getVariable(name); + StringBuilder builder = new StringBuilder().append("SET @").append(name).append(' '); + v.getSQL(builder, DEFAULT_SQL_FLAGS); + add(session, rows, + // STATE_KEY + "@" + name, + // STATE_COMMAND + builder.toString() + ); + } + for (Table table : session.getLocalTempTables()) { + add(session, rows, + // STATE_KEY + "TABLE " + table.getName(), + // STATE_COMMAND + table.getCreateSQL() + ); + } + String[] path = session.getSchemaSearchPath(); + if (path != null && path.length > 0) { + StringBuilder builder = new StringBuilder("SET SCHEMA_SEARCH_PATH "); + for (int i = 0, l = path.length; i < l; i++) { + if (i > 0) { + builder.append(", "); + } + StringUtils.quoteIdentifier(builder, path[i]); + } + add(session, rows, + // STATE_KEY + "SCHEMA_SEARCH_PATH", + // STATE_COMMAND + builder.toString() + ); + } + String schema = session.getCurrentSchemaName(); + if (schema != null) { + add(session, rows, + // STATE_KEY + "SCHEMA", + // STATE_COMMAND + StringUtils.quoteIdentifier(new StringBuilder("SET SCHEMA "), schema).toString() + ); + } + TimeZoneProvider currentTimeZone = session.currentTimeZone(); + if (!currentTimeZone.equals(DateTimeUtils.getTimeZone())) { + add(session, rows, + // STATE_KEY + "TIME ZONE", + // STATE_COMMAND + StringUtils.quoteStringSQL(new StringBuilder("SET TIME ZONE "), currentTimeZone.getId()) + .toString() + ); + } + } + + private void settings(SessionLocal session, ArrayList rows) { + for (Setting s : database.getAllSettings()) { + String value = s.getStringValue(); + if (value == null) { + value = Integer.toString(s.getIntValue()); + } + add(session, rows, identifier(s.getName()), value); + } + add(session, rows, "info.BUILD_ID", "" + Constants.BUILD_ID); + add(session, rows, "info.VERSION_MAJOR", "" + Constants.VERSION_MAJOR); + add(session, rows, "info.VERSION_MINOR", "" + Constants.VERSION_MINOR); + add(session, rows, "info.VERSION", Constants.FULL_VERSION); + if (session.getUser().isAdmin()) { + String[] settings = { + "java.runtime.version", "java.vm.name", + "java.vendor", "os.name", "os.arch", "os.version", + "sun.os.patch.level", "file.separator", + "path.separator", "line.separator", "user.country", + "user.language", "user.variant", "file.encoding" }; + for (String s : settings) { + add(session, rows, "property." + s, Utils.getProperty(s, "")); + } + } + add(session, rows, "DEFAULT_NULL_ORDERING", database.getDefaultNullOrdering().name()); + add(session, rows, "EXCLUSIVE", database.getExclusiveSession() == null ? "FALSE" : "TRUE"); + add(session, rows, "MODE", database.getMode().getName()); + add(session, rows, "QUERY_TIMEOUT", Integer.toString(session.getQueryTimeout())); + add(session, rows, "TIME ZONE", session.currentTimeZone().getId()); + add(session, rows, "TRUNCATE_LARGE_LENGTH", session.isTruncateLargeLength() ? "TRUE" : "FALSE"); + add(session, rows, "VARIABLE_BINARY", session.isVariableBinary() ? "TRUE" : "FALSE"); + add(session, rows, "OLD_INFORMATION_SCHEMA", session.isOldInformationSchema() ? "TRUE" : "FALSE"); + BitSet nonKeywords = session.getNonKeywords(); + if (nonKeywords != null) { + add(session, rows, "NON_KEYWORDS", Parser.formatNonKeywords(nonKeywords)); + } + add(session, rows, "RETENTION_TIME", Integer.toString(database.getRetentionTime())); + // database settings + for (Map.Entry entry : database.getSettings().getSortedSettings()) { + add(session, rows, entry.getKey(), entry.getValue()); + } + Store store = database.getStore(); + MVStore mvStore = store.getMvStore(); + FileStore fs = mvStore.getFileStore(); + if (fs != null) { + add(session, rows, + "info.FILE_WRITE", Long.toString(fs.getWriteCount())); + add(session, rows, + "info.FILE_WRITE_BYTES", Long.toString(fs.getWriteBytes())); + add(session, rows, + "info.FILE_READ", Long.toString(fs.getReadCount())); + add(session, rows, + "info.FILE_READ_BYTES", Long.toString(fs.getReadBytes())); + add(session, rows, + "info.UPDATE_FAILURE_PERCENT", + String.format(Locale.ENGLISH, "%.2f%%", 100 * mvStore.getUpdateFailureRatio())); + add(session, rows, + "info.FILL_RATE", Integer.toString(mvStore.getFillRate())); + add(session, rows, + "info.CHUNKS_FILL_RATE", Integer.toString(mvStore.getChunksFillRate())); + add(session, rows, + "info.CHUNKS_FILL_RATE_RW", Integer.toString(mvStore.getRewritableChunksFillRate())); + try { + add(session, rows, + "info.FILE_SIZE", Long.toString(fs.getFile().size())); + } catch (IOException ignore) {/**/} + add(session, rows, + "info.CHUNK_COUNT", Long.toString(mvStore.getChunkCount())); + add(session, rows, + "info.PAGE_COUNT", Long.toString(mvStore.getPageCount())); + add(session, rows, + "info.PAGE_COUNT_LIVE", Long.toString(mvStore.getLivePageCount())); + add(session, rows, + "info.PAGE_SIZE", Integer.toString(mvStore.getPageSplitSize())); + add(session, rows, + "info.CACHE_MAX_SIZE", Integer.toString(mvStore.getCacheSize())); + add(session, rows, + "info.CACHE_SIZE", Integer.toString(mvStore.getCacheSizeUsed())); + add(session, rows, + "info.CACHE_HIT_RATIO", Integer.toString(mvStore.getCacheHitRatio())); + add(session, rows, "info.TOC_CACHE_HIT_RATIO", + Integer.toString(mvStore.getTocCacheHitRatio())); + add(session, rows, + "info.LEAF_RATIO", Integer.toString(mvStore.getLeafRatio())); + } + } + + private void synonyms(SessionLocal session, ArrayList rows, String catalog) { + for (TableSynonym synonym : database.getAllSynonyms()) { + add(session, rows, + // SYNONYM_CATALOG + catalog, + // SYNONYM_SCHEMA + synonym.getSchema().getName(), + // SYNONYM_NAME + synonym.getName(), + // SYNONYM_FOR + synonym.getSynonymForName(), + // SYNONYM_FOR_SCHEMA + synonym.getSynonymForSchema().getName(), + // TYPE NAME + "SYNONYM", + // STATUS + "VALID", + // REMARKS + synonym.getComment() + ); + } + } + + private void users(SessionLocal session, ArrayList rows) { + User currentUser = session.getUser(); + if (currentUser.isAdmin()) { + for (RightOwner rightOwner : database.getAllUsersAndRoles()) { + if (rightOwner instanceof User) { + users(session, rows, (User) rightOwner); + } + } + } else { + users(session, rows, currentUser); + } + } + + private void users(SessionLocal session, ArrayList rows, User user) { + add(session, rows, + // USER_NAME + identifier(user.getName()), + // IS_ADMIN + ValueBoolean.get(user.isAdmin()), + // REMARKS + user.getComment() + ); + } + + private void addConstraintColumnUsage(SessionLocal session, ArrayList rows, String catalog, + Constraint constraint, Column column) { + Table table = column.getTable(); + add(session, rows, + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + table.getName(), + // COLUMN_NAME + column.getName(), + // CONSTRAINT_CATALOG + catalog, + // CONSTRAINT_SCHEMA + constraint.getSchema().getName(), + // CONSTRAINT_NAME + constraint.getName() + ); + } + + private void addPrivileges(SessionLocal session, ArrayList rows, DbObject grantee, String catalog, // + Table table, String column, int rightMask) { + if ((rightMask & Right.SELECT) != 0) { + addPrivilege(session, rows, grantee, catalog, table, column, "SELECT"); + } + if ((rightMask & Right.INSERT) != 0) { + addPrivilege(session, rows, grantee, catalog, table, column, "INSERT"); + } + if ((rightMask & Right.UPDATE) != 0) { + addPrivilege(session, rows, grantee, catalog, table, column, "UPDATE"); + } + if ((rightMask & Right.DELETE) != 0) { + addPrivilege(session, rows, grantee, catalog, table, column, "DELETE"); + } + } + + private void addPrivilege(SessionLocal session, ArrayList rows, DbObject grantee, String catalog, Table table, + String column, String right) { + String isGrantable = "NO"; + if (grantee.getType() == DbObject.USER) { + User user = (User) grantee; + if (user.isAdmin()) { + // the right is grantable if the grantee is an admin + isGrantable = "YES"; + } + } + if (column == null) { + add(session, rows, + // GRANTOR + null, + // GRANTEE + identifier(grantee.getName()), + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + table.getName(), + // PRIVILEGE_TYPE + right, + // IS_GRANTABLE + isGrantable, + // WITH_HIERARCHY + "NO" + ); + } else { + add(session, rows, + // GRANTOR + null, + // GRANTEE + identifier(grantee.getName()), + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + table.getName(), + // COLUMN_NAME + column, + // PRIVILEGE_TYPE + right, + // IS_GRANTABLE + isGrantable + ); + } + } + + @Override + public long getMaxDataModificationId() { + switch (type) { + case SETTINGS: + case SEQUENCES: + case IN_DOUBT: + case SESSIONS: + case LOCKS: + case SESSION_STATE: + return Long.MAX_VALUE; + } + return database.getModificationDataId(); + } + + @Override + public boolean isView() { + return isView; + } + + @Override + public long getRowCount(SessionLocal session) { + return getRowCount(session, false); + } + + @Override + public long getRowCountApproximation(SessionLocal session) { + return getRowCount(session, true); + } + + private long getRowCount(SessionLocal session, boolean approximation) { + switch (type) { + case INFORMATION_SCHEMA_CATALOG_NAME: + return 1L; + case COLLATIONS: { + Locale[] locales = CompareMode.getCollationLocales(approximation); + if (locales != null) { + return locales.length + 1; + } + break; + } + case SCHEMATA: + return session.getDatabase().getAllSchemas().size(); + case IN_DOUBT: + if (session.getUser().isAdmin()) { + ArrayList inDoubt = session.getDatabase().getInDoubtTransactions(); + if (inDoubt != null) { + return inDoubt.size(); + } + } + return 0L; + case ROLES: + if (session.getUser().isAdmin()) { + long count = 0L; + for (RightOwner rightOwner : session.getDatabase().getAllUsersAndRoles()) { + if (rightOwner instanceof Role) { + count++; + } + } + return count; + } + break; + case SESSIONS: + if (session.getUser().isAdmin()) { + return session.getDatabase().getSessionCount(); + } else { + return 1L; + } + case USERS: + if (session.getUser().isAdmin()) { + long count = 0L; + for (RightOwner rightOwner : session.getDatabase().getAllUsersAndRoles()) { + if (rightOwner instanceof User) { + count++; + } + } + return count; + } else { + return 1L; + } + } + if (approximation) { + return ROW_COUNT_APPROXIMATION; + } + throw DbException.getInternalError(toString()); + } + + @Override + public boolean canGetRowCount(SessionLocal session) { + switch (type) { + case INFORMATION_SCHEMA_CATALOG_NAME: + case COLLATIONS: + case SCHEMATA: + case IN_DOUBT: + case SESSIONS: + case USERS: + return true; + case ROLES: + if (session.getUser().isAdmin()) { + return true; + } + break; + } + return false; + } + + /** + * Data type information. + */ + static final class DataTypeInformation { + + static final DataTypeInformation NULL = new DataTypeInformation(null, null, null, null, null, null, null, null, + null, false, null, null, null, null, null); + + /** + * DATA_TYPE. + */ + final String dataType; + + /** + * CHARACTER_MAXIMUM_LENGTH and CHARACTER_OCTET_LENGTH. + */ + final Value characterPrecision; + + /** + * NUMERIC_PRECISION. + */ + final Value numericPrecision; + + /** + * NUMERIC_PRECISION_RADIX. + */ + final Value numericPrecisionRadix; + + /** + * NUMERIC_SCALE. + */ + final Value numericScale; + + /** + * DATETIME_PRECISION. + */ + final Value datetimePrecision; + + /** + * INTERVAL_PRECISION. + */ + final Value intervalPrecision; + + /** + * INTERVAL_TYPE. + */ + final Value intervalType; + + /** + * MAXIMUM_CARDINALITY. + */ + final Value maximumCardinality; + + final boolean hasCharsetAndCollation; + + /** + * DECLARED_DATA_TYPE. + */ + final String declaredDataType; + + /** + * DECLARED_NUMERIC_PRECISION. + */ + final Value declaredNumericPrecision; + + /** + * DECLARED_NUMERIC_SCALE. + */ + final Value declaredNumericScale; + + /** + * GEOMETRY_TYPE. + */ + final String geometryType; + + /** + * GEOMETRY_SRID. + */ + final Value geometrySrid; + + static DataTypeInformation valueOf(TypeInfo typeInfo) { + int type = typeInfo.getValueType(); + String dataType = Value.getTypeName(type); + ValueBigint characterPrecision = null; + ValueInteger numericPrecision = null, numericScale = null, numericPrecisionRadix = null, + datetimePrecision = null, intervalPrecision = null, maximumCardinality = null; + String intervalType = null; + boolean hasCharsetAndCollation = false; + String declaredDataType = null; + ValueInteger declaredNumericPrecision = null, declaredNumericScale = null; + String geometryType = null; + ValueInteger geometrySrid = null; + switch (type) { + case Value.CHAR: + case Value.VARCHAR: + case Value.CLOB: + case Value.VARCHAR_IGNORECASE: + hasCharsetAndCollation = true; + //$FALL-THROUGH$ + case Value.BINARY: + case Value.VARBINARY: + case Value.BLOB: + case Value.JAVA_OBJECT: + case Value.JSON: + characterPrecision = ValueBigint.get(typeInfo.getPrecision()); + break; + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + case Value.BIGINT: + numericPrecision = ValueInteger.get(MathUtils.convertLongToInt(typeInfo.getPrecision())); + numericScale = ValueInteger.get(0); + numericPrecisionRadix = ValueInteger.get(2); + declaredDataType = dataType; + break; + case Value.NUMERIC: { + numericPrecision = ValueInteger.get(MathUtils.convertLongToInt(typeInfo.getPrecision())); + numericScale = ValueInteger.get(typeInfo.getScale()); + numericPrecisionRadix = ValueInteger.get(10); + declaredDataType = typeInfo.getExtTypeInfo() != null ? "DECIMAL" : "NUMERIC"; + if (typeInfo.getDeclaredPrecision() >= 0L) { + declaredNumericPrecision = numericPrecision; + } + if (typeInfo.getDeclaredScale() >= 0) { + declaredNumericScale = numericScale; + } + break; + } + case Value.REAL: + case Value.DOUBLE: { + numericPrecision = ValueInteger.get(MathUtils.convertLongToInt(typeInfo.getPrecision())); + numericPrecisionRadix = ValueInteger.get(2); + long declaredPrecision = typeInfo.getDeclaredPrecision(); + if (declaredPrecision >= 0) { + declaredDataType = "FLOAT"; + if (declaredPrecision > 0) { + declaredNumericPrecision = ValueInteger.get((int) declaredPrecision); + } + } else { + declaredDataType = dataType; + } + break; + } + case Value.DECFLOAT: + numericPrecision = ValueInteger.get(MathUtils.convertLongToInt(typeInfo.getPrecision())); + numericPrecisionRadix = ValueInteger.get(10); + declaredDataType = dataType; + if (typeInfo.getDeclaredPrecision() >= 0L) { + declaredNumericPrecision = numericPrecision; + } + break; + case Value.INTERVAL_YEAR: + case Value.INTERVAL_MONTH: + case Value.INTERVAL_DAY: + case Value.INTERVAL_HOUR: + case Value.INTERVAL_MINUTE: + case Value.INTERVAL_SECOND: + case Value.INTERVAL_YEAR_TO_MONTH: + case Value.INTERVAL_DAY_TO_HOUR: + case Value.INTERVAL_DAY_TO_MINUTE: + case Value.INTERVAL_DAY_TO_SECOND: + case Value.INTERVAL_HOUR_TO_MINUTE: + case Value.INTERVAL_HOUR_TO_SECOND: + case Value.INTERVAL_MINUTE_TO_SECOND: + intervalType = IntervalQualifier.valueOf(type - Value.INTERVAL_YEAR).toString(); + dataType = "INTERVAL"; + intervalPrecision = ValueInteger.get(MathUtils.convertLongToInt(typeInfo.getPrecision())); + //$FALL-THROUGH$ + case Value.DATE: + case Value.TIME: + case Value.TIME_TZ: + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + datetimePrecision = ValueInteger.get(typeInfo.getScale()); + break; + case Value.GEOMETRY: { + ExtTypeInfoGeometry extTypeInfo = (ExtTypeInfoGeometry) typeInfo.getExtTypeInfo(); + if (extTypeInfo != null) { + int typeCode = extTypeInfo.getType(); + if (typeCode != 0) { + geometryType = EWKTUtils.formatGeometryTypeAndDimensionSystem(new StringBuilder(), typeCode) + .toString(); + } + Integer srid = extTypeInfo.getSrid(); + if (srid != null) { + geometrySrid = ValueInteger.get(srid); + } + } + break; + } + case Value.ARRAY: + maximumCardinality = ValueInteger.get(MathUtils.convertLongToInt(typeInfo.getPrecision())); + } + return new DataTypeInformation(dataType, characterPrecision, numericPrecision, numericPrecisionRadix, + numericScale, datetimePrecision, intervalPrecision, + intervalType != null ? ValueVarchar.get(intervalType) : ValueNull.INSTANCE, maximumCardinality, + hasCharsetAndCollation, declaredDataType, declaredNumericPrecision, declaredNumericScale, + geometryType, geometrySrid); + } + + private DataTypeInformation(String dataType, Value characterPrecision, Value numericPrecision, + Value numericPrecisionRadix, Value numericScale, Value datetimePrecision, Value intervalPrecision, + Value intervalType, Value maximumCardinality, boolean hasCharsetAndCollation, String declaredDataType, + Value declaredNumericPrecision, Value declaredNumericScale, String geometryType, Value geometrySrid) { + this.dataType = dataType; + this.characterPrecision = characterPrecision; + this.numericPrecision = numericPrecision; + this.numericPrecisionRadix = numericPrecisionRadix; + this.numericScale = numericScale; + this.datetimePrecision = datetimePrecision; + this.intervalPrecision = intervalPrecision; + this.intervalType = intervalType; + this.maximumCardinality = maximumCardinality; + this.hasCharsetAndCollation = hasCharsetAndCollation; + this.declaredDataType = declaredDataType; + this.declaredNumericPrecision = declaredNumericPrecision; + this.declaredNumericScale = declaredNumericScale; + this.geometryType = geometryType; + this.geometrySrid = geometrySrid; + } + + } + +} diff --git a/h2/src/main/org/h2/table/InformationSchemaTableLegacy.java b/h2/src/main/org/h2/table/InformationSchemaTableLegacy.java new file mode 100644 index 0000000000..e55ec11929 --- /dev/null +++ b/h2/src/main/org/h2/table/InformationSchemaTableLegacy.java @@ -0,0 +1,2519 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.table; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.Reader; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.Types; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.HashSet; +import java.util.Locale; +import java.util.Map; + +import org.h2.command.Command; +import org.h2.command.Parser; +import org.h2.command.dml.Help; +import org.h2.constraint.Constraint; +import org.h2.constraint.Constraint.Type; +import org.h2.constraint.ConstraintActionType; +import org.h2.constraint.ConstraintCheck; +import org.h2.constraint.ConstraintDomain; +import org.h2.constraint.ConstraintReferential; +import org.h2.constraint.ConstraintUnique; +import org.h2.engine.Constants; +import org.h2.engine.DbObject; +import org.h2.engine.QueryStatisticsData; +import org.h2.engine.Right; +import org.h2.engine.RightOwner; +import org.h2.engine.Role; +import org.h2.engine.SessionLocal; +import org.h2.engine.SessionLocal.State; +import org.h2.engine.Setting; +import org.h2.engine.User; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.ValueExpression; +import org.h2.index.Index; +import org.h2.index.MetaIndex; +import org.h2.message.DbException; +import org.h2.mvstore.FileStore; +import org.h2.mvstore.MVStore; +import org.h2.mvstore.db.Store; +import org.h2.result.Row; +import org.h2.result.SearchRow; +import org.h2.result.SortOrder; +import org.h2.schema.Constant; +import org.h2.schema.Domain; +import org.h2.schema.FunctionAlias; +import org.h2.schema.FunctionAlias.JavaMethod; +import org.h2.schema.Schema; +import org.h2.schema.SchemaObject; +import org.h2.schema.Sequence; +import org.h2.schema.TriggerObject; +import org.h2.schema.UserDefinedFunction; +import org.h2.store.InDoubtTransaction; +import org.h2.tools.Csv; +import org.h2.util.DateTimeUtils; +import org.h2.util.HasSQL; +import org.h2.util.MathUtils; +import org.h2.util.NetworkConnectionInfo; +import org.h2.util.StringUtils; +import org.h2.util.TimeZoneProvider; +import org.h2.util.Utils; +import org.h2.value.CompareMode; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueDouble; +import org.h2.value.ValueInteger; +import org.h2.value.ValueSmallint; +import org.h2.value.ValueToObjectConverter2; + +/** + * This class is responsible to build the legacy variant of INFORMATION_SCHEMA + * tables. + */ +public final class InformationSchemaTableLegacy extends MetaTable { + + private static final String CHARACTER_SET_NAME = "Unicode"; + + private static final int TABLES = 0; + private static final int COLUMNS = TABLES + 1; + private static final int INDEXES = COLUMNS + 1; + private static final int TABLE_TYPES = INDEXES + 1; + private static final int TYPE_INFO = TABLE_TYPES + 1; + private static final int CATALOGS = TYPE_INFO + 1; + private static final int SETTINGS = CATALOGS + 1; + private static final int HELP = SETTINGS + 1; + private static final int SEQUENCES = HELP + 1; + private static final int USERS = SEQUENCES + 1; + private static final int ROLES = USERS + 1; + private static final int RIGHTS = ROLES + 1; + private static final int FUNCTION_ALIASES = RIGHTS + 1; + private static final int SCHEMATA = FUNCTION_ALIASES + 1; + private static final int TABLE_PRIVILEGES = SCHEMATA + 1; + private static final int COLUMN_PRIVILEGES = TABLE_PRIVILEGES + 1; + private static final int COLLATIONS = COLUMN_PRIVILEGES + 1; + private static final int VIEWS = COLLATIONS + 1; + private static final int IN_DOUBT = VIEWS + 1; + private static final int CROSS_REFERENCES = IN_DOUBT + 1; + private static final int FUNCTION_COLUMNS = CROSS_REFERENCES + 1; + private static final int CONSTRAINTS = FUNCTION_COLUMNS + 1; + private static final int CONSTANTS = CONSTRAINTS + 1; + private static final int DOMAINS = CONSTANTS + 1; + private static final int TRIGGERS = DOMAINS + 1; + private static final int SESSIONS = TRIGGERS + 1; + private static final int LOCKS = SESSIONS + 1; + private static final int SESSION_STATE = LOCKS + 1; + private static final int QUERY_STATISTICS = SESSION_STATE + 1; + private static final int SYNONYMS = QUERY_STATISTICS + 1; + private static final int TABLE_CONSTRAINTS = SYNONYMS + 1; + private static final int DOMAIN_CONSTRAINTS = TABLE_CONSTRAINTS + 1; + private static final int KEY_COLUMN_USAGE = DOMAIN_CONSTRAINTS + 1; + private static final int REFERENTIAL_CONSTRAINTS = KEY_COLUMN_USAGE + 1; + private static final int CHECK_CONSTRAINTS = REFERENTIAL_CONSTRAINTS + 1; + private static final int CONSTRAINT_COLUMN_USAGE = CHECK_CONSTRAINTS + 1; + + /** + * The number of meta table types. Supported meta table types are + * {@code 0..META_TABLE_TYPE_COUNT - 1}. + */ + public static final int META_TABLE_TYPE_COUNT = CONSTRAINT_COLUMN_USAGE + 1; + + /** + * Create a new metadata table. + * + * @param schema the schema + * @param id the object id + * @param type the meta table type + */ + public InformationSchemaTableLegacy(Schema schema, int id, int type) { + super(schema, id, type); + Column[] cols; + String indexColumnName = null; + switch (type) { + case TABLES: + setMetaTableName("TABLES"); + cols = new Column[] { + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("TABLE_TYPE"), // + // extensions + column("STORAGE_TYPE"), // + column("SQL"), // + column("REMARKS"), // + column("LAST_MODIFICATION", TypeInfo.TYPE_BIGINT), // + column("ID", TypeInfo.TYPE_INTEGER), // + column("TYPE_NAME"), // + column("TABLE_CLASS"), // + column("ROW_COUNT_ESTIMATE", TypeInfo.TYPE_BIGINT), // + }; + indexColumnName = "TABLE_NAME"; + break; + case COLUMNS: + setMetaTableName("COLUMNS"); + cols = new Column[] { + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("COLUMN_NAME"), // + column("ORDINAL_POSITION", TypeInfo.TYPE_INTEGER), // + column("COLUMN_DEFAULT"), // + column("IS_NULLABLE"), // + column("DATA_TYPE", TypeInfo.TYPE_INTEGER), // + column("CHARACTER_MAXIMUM_LENGTH", TypeInfo.TYPE_INTEGER), // + column("CHARACTER_OCTET_LENGTH", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_PRECISION", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_PRECISION_RADIX", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_SCALE", TypeInfo.TYPE_INTEGER), // + column("DATETIME_PRECISION", TypeInfo.TYPE_INTEGER), // + column("INTERVAL_TYPE"), // + column("INTERVAL_PRECISION", TypeInfo.TYPE_INTEGER), // + column("CHARACTER_SET_NAME"), // + column("COLLATION_NAME"), // + column("DOMAIN_CATALOG"), // + column("DOMAIN_SCHEMA"), // + column("DOMAIN_NAME"), // + column("IS_GENERATED"), // + column("GENERATION_EXPRESSION"), // + // extensions + column("TYPE_NAME"), // + column("NULLABLE", TypeInfo.TYPE_INTEGER), // + column("IS_COMPUTED", TypeInfo.TYPE_BOOLEAN), // + column("SELECTIVITY", TypeInfo.TYPE_INTEGER), // + column("SEQUENCE_NAME"), // + column("REMARKS"), // + column("SOURCE_DATA_TYPE", TypeInfo.TYPE_SMALLINT), // + column("COLUMN_TYPE"), // + column("COLUMN_ON_UPDATE"), // + column("IS_VISIBLE"), // + // compatibility + column("CHECK_CONSTRAINT"), // + }; + indexColumnName = "TABLE_NAME"; + break; + case INDEXES: + setMetaTableName("INDEXES"); + cols = new Column[] { + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("NON_UNIQUE", TypeInfo.TYPE_BOOLEAN), // + column("INDEX_NAME"), // + column("ORDINAL_POSITION", TypeInfo.TYPE_SMALLINT), // + column("COLUMN_NAME"), // + column("CARDINALITY", TypeInfo.TYPE_INTEGER), // + column("PRIMARY_KEY", TypeInfo.TYPE_BOOLEAN), // + column("INDEX_TYPE_NAME"), // + column("IS_GENERATED", TypeInfo.TYPE_BOOLEAN), // + column("INDEX_TYPE", TypeInfo.TYPE_SMALLINT), // + column("ASC_OR_DESC"), // + column("PAGES", TypeInfo.TYPE_INTEGER), // + column("FILTER_CONDITION"), // + column("REMARKS"), // + column("SQL"), // + column("ID", TypeInfo.TYPE_INTEGER), // + column("SORT_TYPE", TypeInfo.TYPE_INTEGER), // + column("CONSTRAINT_NAME"), // + column("INDEX_CLASS"), // + }; + indexColumnName = "TABLE_NAME"; + break; + case TABLE_TYPES: + setMetaTableName("TABLE_TYPES"); + cols = new Column[] { + column("TYPE"), // + }; + break; + case TYPE_INFO: + setMetaTableName("TYPE_INFO"); + cols = new Column[] { + column("TYPE_NAME"), // + column("DATA_TYPE", TypeInfo.TYPE_INTEGER), // + column("PRECISION", TypeInfo.TYPE_INTEGER), // + column("PREFIX"), // + column("SUFFIX"), // + column("PARAMS"), // + column("AUTO_INCREMENT", TypeInfo.TYPE_BOOLEAN), // + column("MINIMUM_SCALE", TypeInfo.TYPE_SMALLINT), // + column("MAXIMUM_SCALE", TypeInfo.TYPE_SMALLINT), // + column("RADIX", TypeInfo.TYPE_INTEGER), // + column("POS", TypeInfo.TYPE_INTEGER), // + column("CASE_SENSITIVE", TypeInfo.TYPE_BOOLEAN), // + column("NULLABLE", TypeInfo.TYPE_SMALLINT), // + column("SEARCHABLE", TypeInfo.TYPE_SMALLINT), // + }; + break; + case CATALOGS: + setMetaTableName("CATALOGS"); + cols = new Column[] { + column("CATALOG_NAME"), // + }; + break; + case SETTINGS: + setMetaTableName("SETTINGS"); + cols = new Column[] { + column("NAME"), // + column("VALUE"), // + }; + break; + case HELP: + setMetaTableName("HELP"); + cols = new Column[] { + column("ID", TypeInfo.TYPE_INTEGER), // + column("SECTION"), // + column("TOPIC"), // + column("SYNTAX"), // + column("TEXT"), // + }; + break; + case SEQUENCES: + setMetaTableName("SEQUENCES"); + cols = new Column[] { + column("SEQUENCE_CATALOG"), // + column("SEQUENCE_SCHEMA"), // + column("SEQUENCE_NAME"), // + column("DATA_TYPE"), // + column("NUMERIC_PRECISION", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_PRECISION_RADIX", TypeInfo.TYPE_INTEGER), // + column("NUMERIC_SCALE", TypeInfo.TYPE_INTEGER), // + column("START_VALUE", TypeInfo.TYPE_BIGINT), // + column("MINIMUM_VALUE", TypeInfo.TYPE_BIGINT), // + column("MAXIMUM_VALUE", TypeInfo.TYPE_BIGINT), // + column("INCREMENT", TypeInfo.TYPE_BIGINT), // + column("CYCLE_OPTION"), // + column("DECLARED_DATA_TYPE"), // + column("DECLARED_NUMERIC_PRECISION", TypeInfo.TYPE_INTEGER), // + column("DECLARED_NUMERIC_SCALE", TypeInfo.TYPE_INTEGER), // + column("CURRENT_VALUE", TypeInfo.TYPE_BIGINT), // + column("IS_GENERATED", TypeInfo.TYPE_BOOLEAN), // + column("REMARKS"), // + column("CACHE", TypeInfo.TYPE_BIGINT), // + column("ID", TypeInfo.TYPE_INTEGER), // + // compatibility + column("MIN_VALUE", TypeInfo.TYPE_BIGINT), // + column("MAX_VALUE", TypeInfo.TYPE_BIGINT), // + column("IS_CYCLE", TypeInfo.TYPE_BOOLEAN), // + }; + break; + case USERS: + setMetaTableName("USERS"); + cols = new Column[] { + column("NAME"), // + column("ADMIN"), // + column("REMARKS"), // + column("ID", TypeInfo.TYPE_INTEGER), // + }; + break; + case ROLES: + setMetaTableName("ROLES"); + cols = new Column[] { + column("NAME"), // + column("REMARKS"), // + column("ID", TypeInfo.TYPE_INTEGER), // + }; + break; + case RIGHTS: + setMetaTableName("RIGHTS"); + cols = new Column[] { + column("GRANTEE"), // + column("GRANTEETYPE"), // + column("GRANTEDROLE"), // + column("RIGHTS"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("ID", TypeInfo.TYPE_INTEGER), // + }; + indexColumnName = "TABLE_NAME"; + break; + case FUNCTION_ALIASES: + setMetaTableName("FUNCTION_ALIASES"); + cols = new Column[] { + column("ALIAS_CATALOG"), // + column("ALIAS_SCHEMA"), // + column("ALIAS_NAME"), // + column("JAVA_CLASS"), // + column("JAVA_METHOD"), // + column("DATA_TYPE", TypeInfo.TYPE_INTEGER), // + column("TYPE_NAME"), // + column("COLUMN_COUNT", TypeInfo.TYPE_INTEGER), // + column("RETURNS_RESULT", TypeInfo.TYPE_SMALLINT), // + column("REMARKS"), // + column("ID", TypeInfo.TYPE_INTEGER), // + column("SOURCE"), // + }; + break; + case FUNCTION_COLUMNS: + setMetaTableName("FUNCTION_COLUMNS"); + cols = new Column[] { + column("ALIAS_CATALOG"), // + column("ALIAS_SCHEMA"), // + column("ALIAS_NAME"), // + column("JAVA_CLASS"), // + column("JAVA_METHOD"), // + column("COLUMN_COUNT", TypeInfo.TYPE_INTEGER), // + column("POS", TypeInfo.TYPE_INTEGER), // + column("COLUMN_NAME"), // + column("DATA_TYPE", TypeInfo.TYPE_INTEGER), // + column("TYPE_NAME"), // + column("PRECISION", TypeInfo.TYPE_INTEGER), // + column("SCALE", TypeInfo.TYPE_SMALLINT), // + column("RADIX", TypeInfo.TYPE_SMALLINT), // + column("NULLABLE", TypeInfo.TYPE_SMALLINT), // + column("COLUMN_TYPE", TypeInfo.TYPE_SMALLINT), // + column("REMARKS"), // + column("COLUMN_DEFAULT"), // + }; + break; + case SCHEMATA: + setMetaTableName("SCHEMATA"); + cols = new Column[] { + column("CATALOG_NAME"), // + column("SCHEMA_NAME"), // + column("SCHEMA_OWNER"), // + column("DEFAULT_CHARACTER_SET_NAME"), // + column("DEFAULT_COLLATION_NAME"), // + column("IS_DEFAULT", TypeInfo.TYPE_BOOLEAN), // + column("REMARKS"), // + column("ID", TypeInfo.TYPE_INTEGER), // + }; + break; + case TABLE_PRIVILEGES: + setMetaTableName("TABLE_PRIVILEGES"); + cols = new Column[] { + column("GRANTOR"), // + column("GRANTEE"), // + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("PRIVILEGE_TYPE"), // + column("IS_GRANTABLE"), // + }; + indexColumnName = "TABLE_NAME"; + break; + case COLUMN_PRIVILEGES: + setMetaTableName("COLUMN_PRIVILEGES"); + cols = new Column[] { + column("GRANTOR"), // + column("GRANTEE"), // + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("COLUMN_NAME"), // + column("PRIVILEGE_TYPE"), // + column("IS_GRANTABLE"), // + }; + indexColumnName = "TABLE_NAME"; + break; + case COLLATIONS: + setMetaTableName("COLLATIONS"); + cols = new Column[] { + column("NAME"), // + column("KEY"), // + }; + break; + case VIEWS: + setMetaTableName("VIEWS"); + cols = new Column[] { + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("VIEW_DEFINITION"), // + column("CHECK_OPTION"), // + column("IS_UPDATABLE"), // + column("STATUS"), // + column("REMARKS"), // + column("ID", TypeInfo.TYPE_INTEGER), // + }; + indexColumnName = "TABLE_NAME"; + break; + case IN_DOUBT: + setMetaTableName("IN_DOUBT"); + cols = new Column[] { + column("TRANSACTION"), // + column("STATE"), // + }; + break; + case CROSS_REFERENCES: + setMetaTableName("CROSS_REFERENCES"); + cols = new Column[] { + column("PKTABLE_CATALOG"), // + column("PKTABLE_SCHEMA"), // + column("PKTABLE_NAME"), // + column("PKCOLUMN_NAME"), // + column("FKTABLE_CATALOG"), // + column("FKTABLE_SCHEMA"), // + column("FKTABLE_NAME"), // + column("FKCOLUMN_NAME"), // + column("ORDINAL_POSITION", TypeInfo.TYPE_SMALLINT), // + column("UPDATE_RULE", TypeInfo.TYPE_SMALLINT), // + column("DELETE_RULE", TypeInfo.TYPE_SMALLINT), // + column("FK_NAME"), // + column("PK_NAME"), // + column("DEFERRABILITY", TypeInfo.TYPE_SMALLINT), // + }; + indexColumnName = "PKTABLE_NAME"; + break; + case CONSTRAINTS: + setMetaTableName("CONSTRAINTS"); + cols = new Column[] { + column("CONSTRAINT_CATALOG"), // + column("CONSTRAINT_SCHEMA"), // + column("CONSTRAINT_NAME"), // + column("CONSTRAINT_TYPE"), // + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("UNIQUE_INDEX_NAME"), // + column("CHECK_EXPRESSION"), // + column("COLUMN_LIST"), // + column("REMARKS"), // + column("SQL"), // + column("ID", TypeInfo.TYPE_INTEGER), // + }; + indexColumnName = "TABLE_NAME"; + break; + case CONSTANTS: + setMetaTableName("CONSTANTS"); + cols = new Column[] { + column("CONSTANT_CATALOG"), // + column("CONSTANT_SCHEMA"), // + column("CONSTANT_NAME"), // + column("DATA_TYPE", TypeInfo.TYPE_INTEGER), // + column("REMARKS"), // + column("SQL"), // + column("ID", TypeInfo.TYPE_INTEGER), // + }; + break; + case DOMAINS: + setMetaTableName("DOMAINS"); + cols = new Column[] { + column("DOMAIN_CATALOG"), // + column("DOMAIN_SCHEMA"), // + column("DOMAIN_NAME"), // + column("DOMAIN_DEFAULT"), // + column("DOMAIN_ON_UPDATE"), // + column("DATA_TYPE", TypeInfo.TYPE_INTEGER), // + column("PRECISION", TypeInfo.TYPE_INTEGER), // + column("SCALE", TypeInfo.TYPE_INTEGER), // + column("TYPE_NAME"), // + column("PARENT_DOMAIN_CATALOG"), // + column("PARENT_DOMAIN_SCHEMA"), // + column("PARENT_DOMAIN_NAME"), // + column("SELECTIVITY", TypeInfo.TYPE_INTEGER), // + column("REMARKS"), // + column("SQL"), // + column("ID", TypeInfo.TYPE_INTEGER), // + // compatibility + column("COLUMN_DEFAULT"), // + column("IS_NULLABLE"), // + column("CHECK_CONSTRAINT"), // + }; + break; + case TRIGGERS: + setMetaTableName("TRIGGERS"); + cols = new Column[] { + column("TRIGGER_CATALOG"), // + column("TRIGGER_SCHEMA"), // + column("TRIGGER_NAME"), // + column("TRIGGER_TYPE"), // + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("BEFORE", TypeInfo.TYPE_BOOLEAN), // + column("JAVA_CLASS"), // + column("QUEUE_SIZE", TypeInfo.TYPE_INTEGER), // + column("NO_WAIT", TypeInfo.TYPE_BOOLEAN), // + column("REMARKS"), // + column("SQL"), // + column("ID", TypeInfo.TYPE_INTEGER), // + }; + break; + case SESSIONS: { + setMetaTableName("SESSIONS"); + cols = new Column[] { + column("ID", TypeInfo.TYPE_INTEGER), // + column("USER_NAME"), // + column("SERVER"), // + column("CLIENT_ADDR"), // + column("CLIENT_INFO"), // + column("SESSION_START", TypeInfo.TYPE_TIMESTAMP_TZ), // + column("ISOLATION_LEVEL"), // + column("STATEMENT"), // + column("STATEMENT_START", TypeInfo.TYPE_TIMESTAMP_TZ), // + column("CONTAINS_UNCOMMITTED", TypeInfo.TYPE_BOOLEAN), // + column("STATE"), // + column("BLOCKER_ID", TypeInfo.TYPE_INTEGER), // + column("SLEEP_SINCE", TypeInfo.TYPE_TIMESTAMP_TZ), // + }; + break; + } + case LOCKS: { + setMetaTableName("LOCKS"); + cols = new Column[] { + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("SESSION_ID", TypeInfo.TYPE_INTEGER), // + column("LOCK_TYPE"), // + }; + break; + } + case SESSION_STATE: { + setMetaTableName("SESSION_STATE"); + cols = new Column[] { + column("KEY"), // + column("SQL"), // + }; + break; + } + case QUERY_STATISTICS: { + setMetaTableName("QUERY_STATISTICS"); + cols = new Column[] { + column("SQL_STATEMENT"), // + column("EXECUTION_COUNT", TypeInfo.TYPE_INTEGER), // + column("MIN_EXECUTION_TIME", TypeInfo.TYPE_DOUBLE), // + column("MAX_EXECUTION_TIME", TypeInfo.TYPE_DOUBLE), // + column("CUMULATIVE_EXECUTION_TIME", TypeInfo.TYPE_DOUBLE), // + column("AVERAGE_EXECUTION_TIME", TypeInfo.TYPE_DOUBLE), // + column("STD_DEV_EXECUTION_TIME", TypeInfo.TYPE_DOUBLE), // + column("MIN_ROW_COUNT", TypeInfo.TYPE_BIGINT), // + column("MAX_ROW_COUNT", TypeInfo.TYPE_BIGINT), // + column("CUMULATIVE_ROW_COUNT", TypeInfo.TYPE_BIGINT), // + column("AVERAGE_ROW_COUNT", TypeInfo.TYPE_DOUBLE), // + column("STD_DEV_ROW_COUNT", TypeInfo.TYPE_DOUBLE), // + }; + break; + } + case SYNONYMS: { + setMetaTableName("SYNONYMS"); + cols = new Column[] { + column("SYNONYM_CATALOG"), // + column("SYNONYM_SCHEMA"), // + column("SYNONYM_NAME"), // + column("SYNONYM_FOR"), // + column("SYNONYM_FOR_SCHEMA"), // + column("TYPE_NAME"), // + column("STATUS"), // + column("REMARKS"), // + column("ID", TypeInfo.TYPE_INTEGER), // + }; + indexColumnName = "SYNONYM_NAME"; + break; + } + case TABLE_CONSTRAINTS: { + setMetaTableName("TABLE_CONSTRAINTS"); + cols = new Column[] { + column("CONSTRAINT_CATALOG"), // + column("CONSTRAINT_SCHEMA"), // + column("CONSTRAINT_NAME"), // + column("CONSTRAINT_TYPE"), // + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("IS_DEFERRABLE"), // + column("INITIALLY_DEFERRED"), // + column("REMARKS"), // + column("SQL"), // + column("ID", TypeInfo.TYPE_INTEGER), // + }; + indexColumnName = "TABLE_NAME"; + break; + } + case DOMAIN_CONSTRAINTS: { + setMetaTableName("DOMAIN_CONSTRAINTS"); + cols = new Column[] { + column("CONSTRAINT_CATALOG"), // + column("CONSTRAINT_SCHEMA"), // + column("CONSTRAINT_NAME"), // + column("DOMAIN_CATALOG"), // + column("DOMAIN_SCHEMA"), // + column("DOMAIN_NAME"), // + column("IS_DEFERRABLE"), // + column("INITIALLY_DEFERRED"), // + column("REMARKS"), // + column("SQL"), // + column("ID", TypeInfo.TYPE_INTEGER), // + }; + break; + } + case KEY_COLUMN_USAGE: { + setMetaTableName("KEY_COLUMN_USAGE"); + cols = new Column[] { + column("CONSTRAINT_CATALOG"), // + column("CONSTRAINT_SCHEMA"), // + column("CONSTRAINT_NAME"), // + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("COLUMN_NAME"), // + column("ORDINAL_POSITION", TypeInfo.TYPE_INTEGER), // + column("POSITION_IN_UNIQUE_CONSTRAINT", TypeInfo.TYPE_INTEGER), // + column("INDEX_CATALOG"), // + column("INDEX_SCHEMA"), // + column("INDEX_NAME"), // + }; + indexColumnName = "TABLE_NAME"; + break; + } + case REFERENTIAL_CONSTRAINTS: { + setMetaTableName("REFERENTIAL_CONSTRAINTS"); + cols = new Column[] { + column("CONSTRAINT_CATALOG"), // + column("CONSTRAINT_SCHEMA"), // + column("CONSTRAINT_NAME"), // + column("UNIQUE_CONSTRAINT_CATALOG"), // + column("UNIQUE_CONSTRAINT_SCHEMA"), // + column("UNIQUE_CONSTRAINT_NAME"), // + column("MATCH_OPTION"), // + column("UPDATE_RULE"), // + column("DELETE_RULE"), // + }; + break; + } + case CHECK_CONSTRAINTS: { + setMetaTableName("CHECK_CONSTRAINTS"); + cols = new Column[] { + column("CONSTRAINT_CATALOG"), // + column("CONSTRAINT_SCHEMA"), // + column("CONSTRAINT_NAME"), // + column("CHECK_CLAUSE"), // + }; + break; + } + case CONSTRAINT_COLUMN_USAGE: { + setMetaTableName("CONSTRAINT_COLUMN_USAGE"); + cols = new Column[] { + column("TABLE_CATALOG"), // + column("TABLE_SCHEMA"), // + column("TABLE_NAME"), // + column("COLUMN_NAME"), // + column("CONSTRAINT_CATALOG"), // + column("CONSTRAINT_SCHEMA"), // + column("CONSTRAINT_NAME"), // + }; + indexColumnName = "TABLE_NAME"; + break; + } + default: + throw DbException.getInternalError("type=" + type); + } + setColumns(cols); + + if (indexColumnName == null) { + indexColumn = -1; + metaIndex = null; + } else { + indexColumn = getColumn(database.sysIdentifier(indexColumnName)).getColumnId(); + IndexColumn[] indexCols = IndexColumn.wrap( + new Column[] { cols[indexColumn] }); + metaIndex = new MetaIndex(this, indexCols, false); + } + } + + private static String replaceNullWithEmpty(String s) { + return s == null ? "" : s; + } + + @Override + public ArrayList generateRows(SessionLocal session, SearchRow first, SearchRow last) { + Value indexFrom = null, indexTo = null; + + if (indexColumn >= 0) { + if (first != null) { + indexFrom = first.getValue(indexColumn); + } + if (last != null) { + indexTo = last.getValue(indexColumn); + } + } + + ArrayList rows = Utils.newSmallArrayList(); + String catalog = database.getShortName(); + boolean admin = session.getUser().isAdmin(); + switch (type) { + case TABLES: { + for (Table table : getAllTables(session)) { + String tableName = table.getName(); + if (!checkIndex(session, tableName, indexFrom, indexTo)) { + continue; + } + if (hideTable(table, session)) { + continue; + } + String storageType; + if (table.isTemporary()) { + if (table.isGlobalTemporary()) { + storageType = "GLOBAL TEMPORARY"; + } else { + storageType = "LOCAL TEMPORARY"; + } + } else { + storageType = table.isPersistIndexes() ? + "CACHED" : "MEMORY"; + } + String sql = table.getCreateSQL(); + if (!admin) { + if (sql != null && sql.contains(DbException.HIDE_SQL)) { + // hide the password of linked tables + sql = "-"; + } + } + add(session, + rows, + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + tableName, + // TABLE_TYPE + table.getTableType().toString(), + // STORAGE_TYPE + storageType, + // SQL + sql, + // REMARKS + replaceNullWithEmpty(table.getComment()), + // LAST_MODIFICATION + ValueBigint.get(table.getMaxDataModificationId()), + // ID + ValueInteger.get(table.getId()), + // TYPE_NAME + null, + // TABLE_CLASS + table.getClass().getName(), + // ROW_COUNT_ESTIMATE + ValueBigint.get(table.getRowCountApproximation(session)) + ); + } + break; + } + case COLUMNS: { + // reduce the number of tables to scan - makes some metadata queries + // 10x faster + final ArrayList

          tablesToList; + if (indexFrom != null && indexFrom.equals(indexTo)) { + String tableName = indexFrom.getString(); + if (tableName == null) { + break; + } + tablesToList = getTablesByName(session, tableName); + } else { + tablesToList = getAllTables(session); + } + for (Table table : tablesToList) { + String tableName = table.getName(); + if (!checkIndex(session, tableName, indexFrom, indexTo)) { + continue; + } + if (hideTable(table, session)) { + continue; + } + Column[] cols = table.getColumns(); + String collation = database.getCompareMode().getName(); + for (int j = 0; j < cols.length; j++) { + Column c = cols[j]; + Domain domain = c.getDomain(); + TypeInfo typeInfo = c.getType(); + ValueInteger precision = ValueInteger.get(MathUtils.convertLongToInt(typeInfo.getPrecision())); + ValueInteger scale = ValueInteger.get(typeInfo.getScale()); + Sequence sequence = c.getSequence(); + boolean hasDateTimePrecision; + int type = typeInfo.getValueType(); + switch (type) { + case Value.TIME: + case Value.TIME_TZ: + case Value.DATE: + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + case Value.INTERVAL_SECOND: + case Value.INTERVAL_DAY_TO_SECOND: + case Value.INTERVAL_HOUR_TO_SECOND: + case Value.INTERVAL_MINUTE_TO_SECOND: + hasDateTimePrecision = true; + break; + default: + hasDateTimePrecision = false; + } + boolean isGenerated = c.isGenerated(); + boolean isInterval = DataType.isIntervalType(type); + String createSQLWithoutName = c.getCreateSQLWithoutName(); + add(session, + rows, + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + tableName, + // COLUMN_NAME + c.getName(), + // ORDINAL_POSITION + ValueInteger.get(j + 1), + // COLUMN_DEFAULT + isGenerated ? null : c.getDefaultSQL(), + // IS_NULLABLE + c.isNullable() ? "YES" : "NO", + // DATA_TYPE + ValueInteger.get(DataType.convertTypeToSQLType(typeInfo)), + // CHARACTER_MAXIMUM_LENGTH + precision, + // CHARACTER_OCTET_LENGTH + precision, + // NUMERIC_PRECISION + precision, + // NUMERIC_PRECISION_RADIX + ValueInteger.get(10), + // NUMERIC_SCALE + scale, + // DATETIME_PRECISION + hasDateTimePrecision ? scale : null, + // INTERVAL_TYPE + isInterval ? createSQLWithoutName.substring(9) : null, + // INTERVAL_PRECISION + isInterval ? precision : null, + // CHARACTER_SET_NAME + CHARACTER_SET_NAME, + // COLLATION_NAME + collation, + // DOMAIN_CATALOG + domain != null ? catalog : null, + // DOMAIN_SCHEMA + domain != null ? domain.getSchema().getName() : null, + // DOMAIN_NAME + domain != null ? domain.getName() : null, + // IS_GENERATED + isGenerated ? "ALWAYS" : "NEVER", + // GENERATION_EXPRESSION + isGenerated ? c.getDefaultSQL() : null, + // TYPE_NAME + identifier(isInterval ? "INTERVAL" : typeInfo.getDeclaredTypeName()), + // NULLABLE + ValueInteger.get(c.isNullable() + ? DatabaseMetaData.columnNullable : DatabaseMetaData.columnNoNulls), + // IS_COMPUTED + ValueBoolean.get(isGenerated), + // SELECTIVITY + ValueInteger.get(c.getSelectivity()), + // SEQUENCE_NAME + sequence == null ? null : sequence.getName(), + // REMARKS + replaceNullWithEmpty(c.getComment()), + // SOURCE_DATA_TYPE + // SMALLINT + null, + // COLUMN_TYPE + createSQLWithoutName, + // COLUMN_ON_UPDATE + c.getOnUpdateSQL(), + // IS_VISIBLE + ValueBoolean.get(c.getVisible()), + // CHECK_CONSTRAINT + null + ); + } + } + break; + } + case INDEXES: { + // reduce the number of tables to scan - makes some metadata queries + // 10x faster + final ArrayList
          tablesToList; + if (indexFrom != null && indexFrom.equals(indexTo)) { + String tableName = indexFrom.getString(); + if (tableName == null) { + break; + } + tablesToList = getTablesByName(session, tableName); + } else { + tablesToList = getAllTables(session); + } + for (Table table : tablesToList) { + String tableName = table.getName(); + if (!checkIndex(session, tableName, indexFrom, indexTo)) { + continue; + } + if (hideTable(table, session)) { + continue; + } + ArrayList indexes = table.getIndexes(); + ArrayList constraints = table.getConstraints(); + for (int j = 0; indexes != null && j < indexes.size(); j++) { + Index index = indexes.get(j); + if (index.getCreateSQL() == null) { + continue; + } + String constraintName = null; + for (int k = 0; constraints != null && k < constraints.size(); k++) { + Constraint constraint = constraints.get(k); + if (constraint.usesIndex(index)) { + if (index.getIndexType().isPrimaryKey()) { + if (constraint.getConstraintType() == Constraint.Type.PRIMARY_KEY) { + constraintName = constraint.getName(); + } + } else { + constraintName = constraint.getName(); + } + } + } + IndexColumn[] cols = index.getIndexColumns(); + int uniqueColumnCount = index.getUniqueColumnCount(); + String indexClass = index.getClass().getName(); + for (int k = 0; k < cols.length; k++) { + IndexColumn idxCol = cols[k]; + Column column = idxCol.column; + add(session, + rows, + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + tableName, + // NON_UNIQUE + ValueBoolean.get(k >= uniqueColumnCount), + // INDEX_NAME + index.getName(), + // ORDINAL_POSITION + ValueSmallint.get((short) (k + 1)), + // COLUMN_NAME + column.getName(), + // CARDINALITY + ValueInteger.get(0), + // PRIMARY_KEY + ValueBoolean.get(index.getIndexType().isPrimaryKey()), + // INDEX_TYPE_NAME + index.getIndexType().getSQL(), + // IS_GENERATED + ValueBoolean.get(index.getIndexType().getBelongsToConstraint()), + // INDEX_TYPE + ValueSmallint.get(DatabaseMetaData.tableIndexOther), + // ASC_OR_DESC + (idxCol.sortType & SortOrder.DESCENDING) != 0 ? "D" : "A", + // PAGES + ValueInteger.get(0), + // FILTER_CONDITION + "", + // REMARKS + replaceNullWithEmpty(index.getComment()), + // SQL + index.getCreateSQL(), + // ID + ValueInteger.get(index.getId()), + // SORT_TYPE + ValueInteger.get(idxCol.sortType), + // CONSTRAINT_NAME + constraintName, + // INDEX_CLASS + indexClass + ); + } + } + } + break; + } + case TABLE_TYPES: { + add(session, rows, TableType.TABLE.toString()); + add(session, rows, TableType.TABLE_LINK.toString()); + add(session, rows, TableType.SYSTEM_TABLE.toString()); + add(session, rows, TableType.VIEW.toString()); + add(session, rows, TableType.EXTERNAL_TABLE_ENGINE.toString()); + break; + } + case TYPE_INFO: { + for (int i = 1, l = Value.TYPE_COUNT; i < l; i++) { + DataType t = DataType.getDataType(i); + add(session, + rows, + // TYPE_NAME + Value.getTypeName(t.type), + // DATA_TYPE + ValueInteger.get(t.sqlType), + // PRECISION + ValueInteger.get(MathUtils.convertLongToInt(t.maxPrecision)), + // PREFIX + t.prefix, + // SUFFIX + t.suffix, + // PARAMS + t.params, + // AUTO_INCREMENT + ValueBoolean.FALSE, + // MINIMUM_SCALE + ValueSmallint.get(MathUtils.convertIntToShort(t.minScale)), + // MAXIMUM_SCALE + ValueSmallint.get(MathUtils.convertIntToShort(t.maxScale)), + // RADIX + DataType.isNumericType(i) ? ValueInteger.get(10) : null, + // POS + ValueInteger.get(t.type), + // CASE_SENSITIVE + ValueBoolean.get(t.caseSensitive), + // NULLABLE + ValueSmallint.get((short) DatabaseMetaData.typeNullable), + // SEARCHABLE + ValueSmallint.get((short) DatabaseMetaData.typeSearchable) + ); + } + break; + } + case CATALOGS: { + add(session, rows, catalog); + break; + } + case SETTINGS: { + for (Setting s : database.getAllSettings()) { + String value = s.getStringValue(); + if (value == null) { + value = Integer.toString(s.getIntValue()); + } + add(session, + rows, + identifier(s.getName()), value + ); + } + add(session, rows, "info.BUILD_ID", "" + Constants.BUILD_ID); + add(session, rows, "info.VERSION_MAJOR", "" + Constants.VERSION_MAJOR); + add(session, rows, "info.VERSION_MINOR", "" + Constants.VERSION_MINOR); + add(session, rows, "info.VERSION", Constants.FULL_VERSION); + if (admin) { + String[] settings = { + "java.runtime.version", "java.vm.name", + "java.vendor", "os.name", "os.arch", "os.version", + "sun.os.patch.level", "file.separator", + "path.separator", "line.separator", "user.country", + "user.language", "user.variant", "file.encoding" }; + for (String s : settings) { + add(session, rows, "property." + s, Utils.getProperty(s, "")); + } + } + add(session, rows, "DEFAULT_NULL_ORDERING", database.getDefaultNullOrdering().name()); + add(session, rows, "EXCLUSIVE", database.getExclusiveSession() == null ? + "FALSE" : "TRUE"); + add(session, rows, "MODE", database.getMode().getName()); + add(session, rows, "QUERY_TIMEOUT", Integer.toString(session.getQueryTimeout())); + add(session, rows, "TIME ZONE", session.currentTimeZone().getId()); + add(session, rows, "TRUNCATE_LARGE_LENGTH", session.isTruncateLargeLength() ? "TRUE" : "FALSE"); + add(session, rows, "VARIABLE_BINARY", session.isVariableBinary() ? "TRUE" : "FALSE"); + add(session, rows, "OLD_INFORMATION_SCHEMA", session.isOldInformationSchema() ? "TRUE" : "FALSE"); + BitSet nonKeywords = session.getNonKeywords(); + if (nonKeywords != null) { + add(session, rows, "NON_KEYWORDS", Parser.formatNonKeywords(nonKeywords)); + } + add(session, rows, "RETENTION_TIME", Integer.toString(database.getRetentionTime())); + // database settings + for (Map.Entry entry : database.getSettings().getSortedSettings()) { + add(session, rows, entry.getKey(), entry.getValue()); + } + Store store = database.getStore(); + MVStore mvStore = store.getMvStore(); + FileStore fs = mvStore.getFileStore(); + if (fs != null) { + add(session, rows, + "info.FILE_WRITE", Long.toString(fs.getWriteCount())); + add(session, rows, + "info.FILE_WRITE_BYTES", Long.toString(fs.getWriteBytes())); + add(session, rows, + "info.FILE_READ", Long.toString(fs.getReadCount())); + add(session, rows, + "info.FILE_READ_BYTES", Long.toString(fs.getReadBytes())); + add(session, rows, + "info.UPDATE_FAILURE_PERCENT", + String.format(Locale.ENGLISH, "%.2f%%", 100 * mvStore.getUpdateFailureRatio())); + add(session, rows, + "info.FILL_RATE", Integer.toString(mvStore.getFillRate())); + add(session, rows, + "info.CHUNKS_FILL_RATE", Integer.toString(mvStore.getChunksFillRate())); + add(session, rows, + "info.CHUNKS_FILL_RATE_RW", Integer.toString(mvStore.getRewritableChunksFillRate())); + try { + add(session, rows, + "info.FILE_SIZE", Long.toString(fs.getFile().size())); + } catch (IOException ignore) {/**/} + add(session, rows, + "info.CHUNK_COUNT", Long.toString(mvStore.getChunkCount())); + add(session, rows, + "info.PAGE_COUNT", Long.toString(mvStore.getPageCount())); + add(session, rows, + "info.PAGE_COUNT_LIVE", Long.toString(mvStore.getLivePageCount())); + add(session, rows, + "info.PAGE_SIZE", Integer.toString(mvStore.getPageSplitSize())); + add(session, rows, + "info.CACHE_MAX_SIZE", Integer.toString(mvStore.getCacheSize())); + add(session, rows, + "info.CACHE_SIZE", Integer.toString(mvStore.getCacheSizeUsed())); + add(session, rows, + "info.CACHE_HIT_RATIO", Integer.toString(mvStore.getCacheHitRatio())); + add(session, rows, "info.TOC_CACHE_HIT_RATIO", + Integer.toString(mvStore.getTocCacheHitRatio())); + add(session, rows, + "info.LEAF_RATIO", Integer.toString(mvStore.getLeafRatio())); + } + break; + } + case HELP: { + String resource = "/org/h2/res/help.csv"; + try { + final byte[] data = Utils.getResource(resource); + final Reader reader = new InputStreamReader( + new ByteArrayInputStream(data)); + final Csv csv = new Csv(); + csv.setLineCommentCharacter('#'); + final ResultSet rs = csv.read(reader, null); + final int columnCount = rs.getMetaData().getColumnCount() - 1; + final String[] values = new String[5]; + for (int i = 0; rs.next(); i++) { + for (int j = 0; j < columnCount; j++) { + String s = rs.getString(1 + j); + switch (j) { + case 2: // SYNTAX column + // Strip out the special annotations we use to help build + // the railroad/BNF diagrams + s = Help.stripAnnotationsFromSyntax(s); + break; + case 3: // TEXT column + s = Help.processHelpText(s); + } + values[j] = s.trim(); + } + add(session, + rows, + // ID + ValueInteger.get(i), + // SECTION + values[0], + // TOPIC + values[1], + // SYNTAX + values[2], + // TEXT + values[3] + ); + } + } catch (Exception e) { + throw DbException.convert(e); + } + break; + } + case SEQUENCES: { + for (SchemaObject obj : getAllSchemaObjects(DbObject.SEQUENCE)) { + Sequence s = (Sequence) obj; + TypeInfo dataType = s.getDataType(); + String dataTypeName = Value.getTypeName(dataType.getValueType()); + ValueInteger declaredScale = ValueInteger.get(dataType.getScale()); + add(session, + rows, + // SEQUENCE_CATALOG + catalog, + // SEQUENCE_SCHEMA + s.getSchema().getName(), + // SEQUENCE_NAME + s.getName(), + // DATA_TYPE + dataTypeName, + // NUMERIC_PRECISION + ValueInteger.get(s.getEffectivePrecision()), + // NUMERIC_PRECISION_RADIX + ValueInteger.get(10), + // NUMERIC_SCALE + declaredScale, + // START_VALUE + ValueBigint.get(s.getStartValue()), + // MINIMUM_VALUE + ValueBigint.get(s.getMinValue()), + // MAXIMUM_VALUE + ValueBigint.get(s.getMaxValue()), + // INCREMENT + ValueBigint.get(s.getIncrement()), + // CYCLE_OPTION + s.getCycle().isCycle() ? "YES" : "NO", + // DECLARED_DATA_TYPE + dataTypeName, + // DECLARED_NUMERIC_PRECISION + ValueInteger.get((int) dataType.getPrecision()), + // DECLARED_NUMERIC_SCALE + declaredScale, + // CURRENT_VALUE + ValueBigint.get(s.getCurrentValue()), + // IS_GENERATED + ValueBoolean.get(s.getBelongsToTable()), + // REMARKS + replaceNullWithEmpty(s.getComment()), + // CACHE + ValueBigint.get(s.getCacheSize()), + // ID + ValueInteger.get(s.getId()), + // MIN_VALUE + ValueBigint.get(s.getMinValue()), + // MAX_VALUE + ValueBigint.get(s.getMaxValue()), + // IS_CYCLE + ValueBoolean.get(s.getCycle().isCycle()) + ); + } + break; + } + case USERS: { + for (RightOwner rightOwner : database.getAllUsersAndRoles()) { + if (rightOwner instanceof User) { + User u = (User) rightOwner; + if (admin || session.getUser() == u) { + add(session, + rows, + // NAME + identifier(u.getName()), + // ADMIN + String.valueOf(u.isAdmin()), + // REMARKS + replaceNullWithEmpty(u.getComment()), + // ID + ValueInteger.get(u.getId()) + ); + } + } + } + break; + } + case ROLES: { + for (RightOwner rightOwner : database.getAllUsersAndRoles()) { + if (rightOwner instanceof Role) { + Role r = (Role) rightOwner; + if (admin || session.getUser().isRoleGranted(r)) { + add(session, + rows, + // NAME + identifier(r.getName()), + // REMARKS + replaceNullWithEmpty(r.getComment()), + // ID + ValueInteger.get(r.getId()) + ); + } + } + } + break; + } + case RIGHTS: { + if (admin) { + for (Right r : database.getAllRights()) { + Role role = r.getGrantedRole(); + DbObject grantee = r.getGrantee(); + String rightType = grantee.getType() == DbObject.USER ? "USER" : "ROLE"; + if (role == null) { + DbObject object = r.getGrantedObject(); + Schema schema = null; + Table table = null; + if (object != null) { + if (object instanceof Schema) { + schema = (Schema) object; + } else if (object instanceof Table) { + table = (Table) object; + schema = table.getSchema(); + } + } + String tableName = (table != null) ? table.getName() : ""; + String schemaName = (schema != null) ? schema.getName() : ""; + if (!checkIndex(session, tableName, indexFrom, indexTo)) { + continue; + } + add(session, + rows, + // GRANTEE + identifier(grantee.getName()), + // GRANTEETYPE + rightType, + // GRANTEDROLE + "", + // RIGHTS + r.getRights(), + // TABLE_SCHEMA + schemaName, + // TABLE_NAME + tableName, + // ID + ValueInteger.get(r.getId()) + ); + } else { + add(session, + rows, + // GRANTEE + identifier(grantee.getName()), + // GRANTEETYPE + rightType, + // GRANTEDROLE + identifier(role.getName()), + // RIGHTS + "", + // TABLE_SCHEMA + "", + // TABLE_NAME + "", + // ID + ValueInteger.get(r.getId()) + ); + } + } + } + break; + } + case FUNCTION_ALIASES: + for (Schema schema : database.getAllSchemas()) { + for (UserDefinedFunction userDefinedFunction : schema.getAllFunctionsAndAggregates()) { + if (userDefinedFunction instanceof FunctionAlias) { + FunctionAlias alias = (FunctionAlias) userDefinedFunction; + JavaMethod[] methods; + try { + methods = alias.getJavaMethods(); + } catch (DbException e) { + continue; + } + for (FunctionAlias.JavaMethod method : methods) { + TypeInfo typeInfo = method.getDataType(); + if (typeInfo == null) { + typeInfo = TypeInfo.TYPE_NULL; + } + add(session, + rows, + // ALIAS_CATALOG + catalog, + // ALIAS_SCHEMA + alias.getSchema().getName(), + // ALIAS_NAME + alias.getName(), + // JAVA_CLASS + alias.getJavaClassName(), + // JAVA_METHOD + alias.getJavaMethodName(), + // DATA_TYPE + ValueInteger.get(DataType.convertTypeToSQLType(typeInfo)), + // TYPE_NAME + typeInfo.getDeclaredTypeName(), + // COLUMN_COUNT + ValueInteger.get(method.getParameterCount()), + // RETURNS_RESULT + ValueSmallint.get(typeInfo.getValueType() == Value.NULL + ? (short) DatabaseMetaData.procedureNoResult + : (short) DatabaseMetaData.procedureReturnsResult), + // REMARKS + replaceNullWithEmpty(alias.getComment()), + // ID + ValueInteger.get(alias.getId()), + // SOURCE + alias.getSource() + // when adding more columns, see also below + ); + } + } else { + add(session, + rows, + // ALIAS_CATALOG + catalog, + // ALIAS_SCHEMA + database.getMainSchema().getName(), + // ALIAS_NAME + userDefinedFunction.getName(), + // JAVA_CLASS + userDefinedFunction.getJavaClassName(), + // JAVA_METHOD + "", + // DATA_TYPE + ValueInteger.get(Types.NULL), + // TYPE_NAME + "NULL", + // COLUMN_COUNT + ValueInteger.get(1), + // RETURNS_RESULT + ValueSmallint.get((short) DatabaseMetaData.procedureReturnsResult), + // REMARKS + replaceNullWithEmpty(userDefinedFunction.getComment()), + // ID + ValueInteger.get(userDefinedFunction.getId()), + // SOURCE + "" + // when adding more columns, see also below + ); + } + } + } + break; + case FUNCTION_COLUMNS: + for (Schema schema : database.getAllSchemas()) { + for (UserDefinedFunction userDefinedFunction : schema.getAllFunctionsAndAggregates()) { + if (userDefinedFunction instanceof FunctionAlias) { + FunctionAlias alias = (FunctionAlias) userDefinedFunction; + JavaMethod[] methods; + try { + methods = alias.getJavaMethods(); + } catch (DbException e) { + continue; + } + for (FunctionAlias.JavaMethod method : methods) { + // Add return column index 0 + TypeInfo typeInfo = method.getDataType(); + if (typeInfo != null && typeInfo.getValueType() != Value.NULL) { + DataType dt = DataType.getDataType(typeInfo.getValueType()); + add(session, + rows, + // ALIAS_CATALOG + catalog, + // ALIAS_SCHEMA + alias.getSchema().getName(), + // ALIAS_NAME + alias.getName(), + // JAVA_CLASS + alias.getJavaClassName(), + // JAVA_METHOD + alias.getJavaMethodName(), + // COLUMN_COUNT + ValueInteger.get(method.getParameterCount()), + // POS + ValueInteger.get(0), + // COLUMN_NAME + "P0", + // DATA_TYPE + ValueInteger.get(DataType.convertTypeToSQLType(typeInfo)), + // TYPE_NAME + typeInfo.getDeclaredTypeName(), + // PRECISION + ValueInteger.get(MathUtils.convertLongToInt(dt.defaultPrecision)), + // SCALE + ValueSmallint.get(MathUtils.convertIntToShort(dt.defaultScale)), + // RADIX + ValueSmallint.get((short) 10), + // NULLABLE + ValueSmallint.get((short) DatabaseMetaData.columnNullableUnknown), + // COLUMN_TYPE + ValueSmallint.get((short) DatabaseMetaData.procedureColumnReturn), + // REMARKS + "", + // COLUMN_DEFAULT + null + ); + } + Class[] columnList = method.getColumnClasses(); + for (int k = 0; k < columnList.length; k++) { + if (method.hasConnectionParam() && k == 0) { + continue; + } + Class clazz = columnList[k]; + TypeInfo columnTypeInfo = ValueToObjectConverter2.classToType(clazz); + DataType dt = DataType.getDataType(columnTypeInfo.getValueType()); + add(session, + rows, + // ALIAS_CATALOG + catalog, + // ALIAS_SCHEMA + alias.getSchema().getName(), + // ALIAS_NAME + alias.getName(), + // JAVA_CLASS + alias.getJavaClassName(), + // JAVA_METHOD + alias.getJavaMethodName(), + // COLUMN_COUNT + ValueInteger.get(method.getParameterCount()), + // POS + ValueInteger.get(k + (method.hasConnectionParam() ? 0 : 1)), + // COLUMN_NAME + "P" + (k + 1), + // DATA_TYPE + ValueInteger.get(DataType.convertTypeToSQLType(columnTypeInfo)), + // TYPE_NAME + columnTypeInfo.getDeclaredTypeName(), + // PRECISION + ValueInteger.get(MathUtils.convertLongToInt(dt.defaultPrecision)), + // SCALE + ValueSmallint.get(MathUtils.convertIntToShort(dt.defaultScale)), + // RADIX + ValueSmallint.get((short) 10), + // NULLABLE + ValueSmallint.get(clazz.isPrimitive() + ? (short) DatabaseMetaData.columnNoNulls + : (short) DatabaseMetaData.columnNullable), + // COLUMN_TYPE + ValueSmallint.get((short) DatabaseMetaData.procedureColumnIn), + // REMARKS + "", + // COLUMN_DEFAULT + null + ); + } + } + } + } + } + break; + case SCHEMATA: { + String collation = database.getCompareMode().getName(); + for (Schema schema : database.getAllSchemas()) { + add(session, + rows, + // CATALOG_NAME + catalog, + // SCHEMA_NAME + schema.getName(), + // SCHEMA_OWNER + identifier(schema.getOwner().getName()), + // DEFAULT_CHARACTER_SET_NAME + CHARACTER_SET_NAME, + // DEFAULT_COLLATION_NAME + collation, + // IS_DEFAULT + ValueBoolean.get(schema.getId() == Constants.MAIN_SCHEMA_ID), + // REMARKS + replaceNullWithEmpty(schema.getComment()), + // ID + ValueInteger.get(schema.getId()) + ); + } + break; + } + case TABLE_PRIVILEGES: { + for (Right r : database.getAllRights()) { + DbObject object = r.getGrantedObject(); + if (!(object instanceof Table)) { + continue; + } + Table table = (Table) object; + if (hideTable(table, session)) { + continue; + } + String tableName = table.getName(); + if (!checkIndex(session, tableName, indexFrom, indexTo)) { + continue; + } + addPrivileges(session, rows, r.getGrantee(), catalog, table, null, r.getRightMask()); + } + break; + } + case COLUMN_PRIVILEGES: { + for (Right r : database.getAllRights()) { + DbObject object = r.getGrantedObject(); + if (!(object instanceof Table)) { + continue; + } + Table table = (Table) object; + if (hideTable(table, session)) { + continue; + } + String tableName = table.getName(); + if (!checkIndex(session, tableName, indexFrom, indexTo)) { + continue; + } + DbObject grantee = r.getGrantee(); + int mask = r.getRightMask(); + for (Column column : table.getColumns()) { + addPrivileges(session, rows, grantee, catalog, table, column.getName(), mask); + } + } + break; + } + case COLLATIONS: { + for (Locale l : CompareMode.getCollationLocales(false)) { + add(session, + rows, + // NAME + CompareMode.getName(l), // KEY + l.toString() + ); + } + break; + } + case VIEWS: { + for (Table table : getAllTables(session)) { + if (table.getTableType() != TableType.VIEW) { + continue; + } + String tableName = table.getName(); + if (!checkIndex(session, tableName, indexFrom, indexTo)) { + continue; + } + TableView view = (TableView) table; + add(session, + rows, + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + tableName, + // VIEW_DEFINITION + table.getCreateSQL(), + // CHECK_OPTION + "NONE", + // IS_UPDATABLE + "NO", + // STATUS + view.isInvalid() ? "INVALID" : "VALID", + // REMARKS + replaceNullWithEmpty(view.getComment()), + // ID + ValueInteger.get(view.getId()) + ); + } + break; + } + case IN_DOUBT: { + ArrayList prepared = database.getInDoubtTransactions(); + if (prepared != null && admin) { + for (InDoubtTransaction prep : prepared) { + add(session, + rows, + // TRANSACTION + prep.getTransactionName(), // STATE + prep.getStateDescription() + ); + } + } + break; + } + case CROSS_REFERENCES: { + for (SchemaObject obj : getAllSchemaObjects( + DbObject.CONSTRAINT)) { + Constraint constraint = (Constraint) obj; + if (constraint.getConstraintType() != Constraint.Type.REFERENTIAL) { + continue; + } + ConstraintReferential ref = (ConstraintReferential) constraint; + IndexColumn[] cols = ref.getColumns(); + IndexColumn[] refCols = ref.getRefColumns(); + Table tab = ref.getTable(); + Table refTab = ref.getRefTable(); + String tableName = refTab.getName(); + if (!checkIndex(session, tableName, indexFrom, indexTo)) { + continue; + } + ValueSmallint update = ValueSmallint.get(getRefAction(ref.getUpdateAction())); + ValueSmallint delete = ValueSmallint.get(getRefAction(ref.getDeleteAction())); + for (int j = 0; j < cols.length; j++) { + add(session, + rows, + // PKTABLE_CATALOG + catalog, + // PKTABLE_SCHEMA + refTab.getSchema().getName(), + // PKTABLE_NAME + refTab.getName(), + // PKCOLUMN_NAME + refCols[j].column.getName(), + // FKTABLE_CATALOG + catalog, + // FKTABLE_SCHEMA + tab.getSchema().getName(), + // FKTABLE_NAME + tab.getName(), + // FKCOLUMN_NAME + cols[j].column.getName(), + // ORDINAL_POSITION + ValueSmallint.get((short) (j + 1)), + // UPDATE_RULE + update, + // DELETE_RULE + delete, + // FK_NAME + ref.getName(), + // PK_NAME + ref.getReferencedConstraint().getName(), + // DEFERRABILITY + ValueSmallint.get((short) DatabaseMetaData.importedKeyNotDeferrable) + ); + } + } + break; + } + case CONSTRAINTS: { + for (SchemaObject obj : getAllSchemaObjects( + DbObject.CONSTRAINT)) { + Constraint constraint = (Constraint) obj; + Constraint.Type constraintType = constraint.getConstraintType(); + String checkExpression = null; + IndexColumn[] indexColumns = null; + Table table = constraint.getTable(); + if (hideTable(table, session)) { + continue; + } + Index index = constraint.getIndex(); + String uniqueIndexName = null; + if (index != null) { + uniqueIndexName = index.getName(); + } + String tableName = table.getName(); + if (!checkIndex(session, tableName, indexFrom, indexTo)) { + continue; + } + if (constraintType == Constraint.Type.CHECK) { + checkExpression = ((ConstraintCheck) constraint).getExpression().getSQL(HasSQL.DEFAULT_SQL_FLAGS); + } else if (constraintType == Constraint.Type.UNIQUE || + constraintType == Constraint.Type.PRIMARY_KEY) { + indexColumns = ((ConstraintUnique) constraint).getColumns(); + } else if (constraintType == Constraint.Type.REFERENTIAL) { + indexColumns = ((ConstraintReferential) constraint).getColumns(); + } + String columnList = null; + if (indexColumns != null) { + StringBuilder builder = new StringBuilder(); + for (int i = 0, length = indexColumns.length; i < length; i++) { + if (i > 0) { + builder.append(','); + } + builder.append(indexColumns[i].column.getName()); + } + columnList = builder.toString(); + } + add(session, + rows, + // CONSTRAINT_CATALOG + catalog, + // CONSTRAINT_SCHEMA + constraint.getSchema().getName(), + // CONSTRAINT_NAME + constraint.getName(), + // CONSTRAINT_TYPE + constraintType == Constraint.Type.PRIMARY_KEY ? + constraintType.getSqlName() : constraintType.name(), + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + tableName, + // UNIQUE_INDEX_NAME + uniqueIndexName, + // CHECK_EXPRESSION + checkExpression, + // COLUMN_LIST + columnList, + // REMARKS + replaceNullWithEmpty(constraint.getComment()), + // SQL + constraint.getCreateSQL(), + // ID + ValueInteger.get(constraint.getId()) + ); + } + break; + } + case CONSTANTS: { + for (SchemaObject obj : getAllSchemaObjects( + DbObject.CONSTANT)) { + Constant constant = (Constant) obj; + ValueExpression expr = constant.getValue(); + add(session, + rows, + // CONSTANT_CATALOG + catalog, + // CONSTANT_SCHEMA + constant.getSchema().getName(), + // CONSTANT_NAME + constant.getName(), + // DATA_TYPE + ValueInteger.get(DataType.convertTypeToSQLType(expr.getType())), + // REMARKS + replaceNullWithEmpty(constant.getComment()), + // SQL + expr.getSQL(DEFAULT_SQL_FLAGS), + // ID + ValueInteger.get(constant.getId()) + ); + } + break; + } + case DOMAINS: { + for (SchemaObject obj : getAllSchemaObjects(DbObject.DOMAIN)) { + Domain domain = (Domain) obj; + Domain parentDomain = domain.getDomain(); + TypeInfo typeInfo = domain.getDataType(); + add(session, + rows, + // DOMAIN_CATALOG + catalog, + // DOMAIN_SCHEMA + domain.getSchema().getName(), + // DOMAIN_NAME + domain.getName(), + // DOMAIN_DEFAULT + domain.getDefaultSQL(), + // DOMAIN_ON_UPDATE + domain.getOnUpdateSQL(), + // DATA_TYPE + ValueInteger.get(DataType.convertTypeToSQLType(typeInfo)), + // PRECISION + ValueInteger.get(MathUtils.convertLongToInt(typeInfo.getPrecision())), + // SCALE + ValueInteger.get(typeInfo.getScale()), + // TYPE_NAME + typeInfo.getDeclaredTypeName(), + // PARENT_DOMAIN_CATALOG + parentDomain != null ? catalog : null, + // PARENT_DOMAIN_SCHEMA + parentDomain != null ? parentDomain.getSchema().getName() : null, + // PARENT_DOMAIN_NAME + parentDomain != null ? parentDomain.getName() : null, + // SELECTIVITY INT + ValueInteger.get(Constants.SELECTIVITY_DEFAULT), + // REMARKS + replaceNullWithEmpty(domain.getComment()), + // SQL + domain.getCreateSQL(), + // ID + ValueInteger.get(domain.getId()), + // COLUMN_DEFAULT + domain.getDefaultSQL(), + // IS_NULLABLE + "YES", + // CHECK_CONSTRAINT + null + ); + } + break; + } + case TRIGGERS: { + for (SchemaObject obj : getAllSchemaObjects( + DbObject.TRIGGER)) { + TriggerObject trigger = (TriggerObject) obj; + Table table = trigger.getTable(); + add(session, + rows, + // TRIGGER_CATALOG + catalog, + // TRIGGER_SCHEMA + trigger.getSchema().getName(), + // TRIGGER_NAME + trigger.getName(), + // TRIGGER_TYPE + trigger.getTypeNameList(new StringBuilder()).toString(), + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + table.getName(), + // BEFORE + ValueBoolean.get(trigger.isBefore()), + // JAVA_CLASS + trigger.getTriggerClassName(), + // QUEUE_SIZE + ValueInteger.get(trigger.getQueueSize()), + // NO_WAIT + ValueBoolean.get(trigger.isNoWait()), + // REMARKS + replaceNullWithEmpty(trigger.getComment()), + // SQL + trigger.getCreateSQL(), + // ID + ValueInteger.get(trigger.getId()) + ); + } + break; + } + case SESSIONS: { + for (SessionLocal s : database.getSessions(false)) { + if (admin || s == session) { + NetworkConnectionInfo networkConnectionInfo = s.getNetworkConnectionInfo(); + Command command = s.getCurrentCommand(); + int blockingSessionId = s.getBlockingSessionId(); + add(session, + rows, + // ID + ValueInteger.get(s.getId()), + // USER_NAME + s.getUser().getName(), + // SERVER + networkConnectionInfo == null ? null : networkConnectionInfo.getServer(), + // CLIENT_ADDR + networkConnectionInfo == null ? null : networkConnectionInfo.getClient(), + // CLIENT_INFO + networkConnectionInfo == null ? null : networkConnectionInfo.getClientInfo(), + // SESSION_START + s.getSessionStart(), + // ISOLATION_LEVEL + session.getIsolationLevel().getSQL(), + // STATEMENT + command == null ? null : command.toString(), + // STATEMENT_START + command == null ? null : s.getCommandStartOrEnd(), + // CONTAINS_UNCOMMITTED + ValueBoolean.get(s.hasPendingTransaction()), + // STATE + String.valueOf(s.getState()), + // BLOCKER_ID + blockingSessionId == 0 ? null : ValueInteger.get(blockingSessionId), + // SLEEP_SINCE + s.getState() == State.SLEEP ? s.getCommandStartOrEnd() : null + ); + } + } + break; + } + case LOCKS: { + for (SessionLocal s : database.getSessions(false)) { + if (admin || s == session) { + for (Table table : s.getLocks()) { + add(session, + rows, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + table.getName(), + // SESSION_ID + ValueInteger.get(s.getId()), + // LOCK_TYPE + table.isLockedExclusivelyBy(s) ? "WRITE" : "READ" + ); + } + } + } + break; + } + case SESSION_STATE: { + for (String name : session.getVariableNames()) { + Value v = session.getVariable(name); + StringBuilder builder = new StringBuilder().append("SET @").append(name).append(' '); + v.getSQL(builder, DEFAULT_SQL_FLAGS); + add(session, + rows, + // KEY + "@" + name, + // SQL + builder.toString() + ); + } + for (Table table : session.getLocalTempTables()) { + add(session, + rows, + // KEY + "TABLE " + table.getName(), + // SQL + table.getCreateSQL() + ); + } + String[] path = session.getSchemaSearchPath(); + if (path != null && path.length > 0) { + StringBuilder builder = new StringBuilder("SET SCHEMA_SEARCH_PATH "); + for (int i = 0, l = path.length; i < l; i++) { + if (i > 0) { + builder.append(", "); + } + StringUtils.quoteIdentifier(builder, path[i]); + } + add(session, + rows, + // KEY + "SCHEMA_SEARCH_PATH", + // SQL + builder.toString() + ); + } + String schema = session.getCurrentSchemaName(); + if (schema != null) { + add(session, + rows, + // KEY + "SCHEMA", + // SQL + StringUtils.quoteIdentifier(new StringBuilder("SET SCHEMA "), schema).toString() + ); + } + TimeZoneProvider currentTimeZone = session.currentTimeZone(); + if (!currentTimeZone.equals(DateTimeUtils.getTimeZone())) { + add(session, + rows, + // KEY + "TIME ZONE", + // SQL + StringUtils.quoteStringSQL(new StringBuilder("SET TIME ZONE "), currentTimeZone.getId()) + .toString() + ); + } + break; + } + case QUERY_STATISTICS: { + QueryStatisticsData control = database.getQueryStatisticsData(); + if (control != null) { + for (QueryStatisticsData.QueryEntry entry : control.getQueries()) { + add(session, + rows, + // SQL_STATEMENT + entry.sqlStatement, + // EXECUTION_COUNT + ValueInteger.get(entry.count), + // MIN_EXECUTION_TIME + ValueDouble.get(entry.executionTimeMinNanos / 1_000_000d), + // MAX_EXECUTION_TIME + ValueDouble.get(entry.executionTimeMaxNanos / 1_000_000d), + // CUMULATIVE_EXECUTION_TIME + ValueDouble.get(entry.executionTimeCumulativeNanos / 1_000_000d), + // AVERAGE_EXECUTION_TIME + ValueDouble.get(entry.executionTimeMeanNanos / 1_000_000d), + // STD_DEV_EXECUTION_TIME + ValueDouble.get(entry.getExecutionTimeStandardDeviation() / 1_000_000d), + // MIN_ROW_COUNT + ValueBigint.get(entry.rowCountMin), + // MAX_ROW_COUNT + ValueBigint.get(entry.rowCountMax), + // CUMULATIVE_ROW_COUNT + ValueBigint.get(entry.rowCountCumulative), + // AVERAGE_ROW_COUNT + ValueDouble.get(entry.rowCountMean), + // STD_DEV_ROW_COUNT + ValueDouble.get(entry.getRowCountStandardDeviation()) + ); + } + } + break; + } + case SYNONYMS: { + for (TableSynonym synonym : database.getAllSynonyms()) { + add(session, + rows, + // SYNONYM_CATALOG + catalog, + // SYNONYM_SCHEMA + synonym.getSchema().getName(), + // SYNONYM_NAME + synonym.getName(), + // SYNONYM_FOR + synonym.getSynonymForName(), + // SYNONYM_FOR_SCHEMA + synonym.getSynonymForSchema().getName(), + // TYPE NAME + "SYNONYM", + // STATUS + "VALID", + // REMARKS + replaceNullWithEmpty(synonym.getComment()), + // ID + ValueInteger.get(synonym.getId()) + ); + } + break; + } + case TABLE_CONSTRAINTS: { + for (SchemaObject obj : getAllSchemaObjects(DbObject.CONSTRAINT)) { + Constraint constraint = (Constraint) obj; + Constraint.Type constraintType = constraint.getConstraintType(); + if (constraintType == Constraint.Type.DOMAIN) { + continue; + } + Table table = constraint.getTable(); + if (hideTable(table, session)) { + continue; + } + String tableName = table.getName(); + if (!checkIndex(session, tableName, indexFrom, indexTo)) { + continue; + } + add(session, + rows, + // CONSTRAINT_CATALOG + catalog, + // CONSTRAINT_SCHEMA + constraint.getSchema().getName(), + // CONSTRAINT_NAME + constraint.getName(), + // CONSTRAINT_TYPE + constraintType.getSqlName(), + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + tableName, + // IS_DEFERRABLE + "NO", + // INITIALLY_DEFERRED + "NO", + // REMARKS + replaceNullWithEmpty(constraint.getComment()), + // SQL + constraint.getCreateSQL(), + // ID + ValueInteger.get(constraint.getId()) + ); + } + break; + } + case DOMAIN_CONSTRAINTS: { + for (SchemaObject obj : getAllSchemaObjects(DbObject.CONSTRAINT)) { + if (((Constraint) obj).getConstraintType() != Constraint.Type.DOMAIN) { + continue; + } + ConstraintDomain constraint = (ConstraintDomain) obj; + Domain domain = constraint.getDomain(); + add(session, + rows, + // CONSTRAINT_CATALOG + catalog, + // CONSTRAINT_SCHEMA + constraint.getSchema().getName(), + // CONSTRAINT_NAME + constraint.getName(), + // DOMAIN_CATALOG + catalog, + // DOMAIN_SCHEMA + domain.getSchema().getName(), + // DOMAIN_NAME + domain.getName(), + // IS_DEFERRABLE + "NO", + // INITIALLY_DEFERRED + "NO", + // REMARKS + replaceNullWithEmpty(constraint.getComment()), + // SQL + constraint.getCreateSQL(), + // ID + ValueInteger.get(constraint.getId()) + ); + } + break; + } + case KEY_COLUMN_USAGE: { + for (SchemaObject obj : getAllSchemaObjects(DbObject.CONSTRAINT)) { + Constraint constraint = (Constraint) obj; + Constraint.Type constraintType = constraint.getConstraintType(); + IndexColumn[] indexColumns = null; + if (constraintType == Constraint.Type.UNIQUE || constraintType == Constraint.Type.PRIMARY_KEY) { + indexColumns = ((ConstraintUnique) constraint).getColumns(); + } else if (constraintType == Constraint.Type.REFERENTIAL) { + indexColumns = ((ConstraintReferential) constraint).getColumns(); + } + if (indexColumns == null) { + continue; + } + Table table = constraint.getTable(); + if (hideTable(table, session)) { + continue; + } + String tableName = table.getName(); + if (!checkIndex(session, tableName, indexFrom, indexTo)) { + continue; + } + ConstraintUnique referenced; + if (constraintType == Constraint.Type.REFERENTIAL) { + referenced = ((ConstraintReferential) constraint).getReferencedConstraint(); + } else { + referenced = null; + } + Index index = constraint.getIndex(); + for (int i = 0; i < indexColumns.length; i++) { + IndexColumn indexColumn = indexColumns[i]; + ValueInteger ordinalPosition = ValueInteger.get(i + 1); + ValueInteger positionInUniqueConstraint = null; + if (referenced != null) { + Column c = ((ConstraintReferential) constraint).getRefColumns()[i].column; + IndexColumn[] refColumns = referenced.getColumns(); + for (int j = 0; j < refColumns.length; j++) { + if (refColumns[j].column.equals(c)) { + positionInUniqueConstraint = ValueInteger.get(j + 1); + break; + } + } + } + add(session, + rows, + // CONSTRAINT_CATALOG + catalog, + // CONSTRAINT_SCHEMA + constraint.getSchema().getName(), + // CONSTRAINT_NAME + constraint.getName(), + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + tableName, + // COLUMN_NAME + indexColumn.columnName, + // ORDINAL_POSITION + ordinalPosition, + // POSITION_IN_UNIQUE_CONSTRAINT + positionInUniqueConstraint, + // INDEX_CATALOG + index != null ? catalog : null, + // INDEX_SCHEMA + index != null ? index.getSchema().getName() : null, + // INDEX_NAME + index != null ? index.getName() : null + ); + } + } + break; + } + case REFERENTIAL_CONSTRAINTS: { + for (SchemaObject obj : getAllSchemaObjects(DbObject.CONSTRAINT)) { + if (((Constraint) obj).getConstraintType() != Constraint.Type.REFERENTIAL) { + continue; + } + ConstraintReferential constraint = (ConstraintReferential) obj; + Table table = constraint.getTable(); + if (hideTable(table, session)) { + continue; + } + ConstraintUnique unique = constraint.getReferencedConstraint(); + add(session, + rows, + // CONSTRAINT_CATALOG + catalog, + // CONSTRAINT_SCHEMA + constraint.getSchema().getName(), + // CONSTRAINT_NAME + constraint.getName(), + // UNIQUE_CONSTRAINT_CATALOG + catalog, + // UNIQUE_CONSTRAINT_SCHEMA + unique.getSchema().getName(), + // UNIQUE_CONSTRAINT_NAME + unique.getName(), + // MATCH_OPTION + "NONE", + // UPDATE_RULE + constraint.getUpdateAction().getSqlName(), + // DELETE_RULE + constraint.getDeleteAction().getSqlName() + ); + } + break; + } + case CHECK_CONSTRAINTS: { + for (SchemaObject obj : getAllSchemaObjects(DbObject.CONSTRAINT)) { + Constraint constraint = (Constraint) obj; + Type constraintType = constraint.getConstraintType(); + if (constraintType == Constraint.Type.CHECK) { + ConstraintCheck check = (ConstraintCheck) obj; + Table table = check.getTable(); + if (hideTable(table, session)) { + continue; + } + } else if (constraintType != Constraint.Type.DOMAIN) { + continue; + } + add(session, + rows, + // CONSTRAINT_CATALOG + catalog, + // CONSTRAINT_SCHEMA + obj.getSchema().getName(), + // CONSTRAINT_NAME + obj.getName(), + // CHECK_CLAUSE + constraint.getExpression().getSQL(DEFAULT_SQL_FLAGS, Expression.WITHOUT_PARENTHESES) + ); + } + break; + } + case CONSTRAINT_COLUMN_USAGE: { + for (SchemaObject obj : getAllSchemaObjects(DbObject.CONSTRAINT)) { + Constraint constraint = (Constraint) obj; + switch (constraint.getConstraintType()) { + case CHECK: + case DOMAIN: { + HashSet columns = new HashSet<>(); + constraint.getExpression().isEverything(ExpressionVisitor.getColumnsVisitor(columns, null)); + for (Column column: columns) { + Table table = column.getTable(); + if (checkIndex(session, table.getName(), indexFrom, indexTo) && !hideTable(table, session)) { + addConstraintColumnUsage(session, rows, catalog, constraint, column); + } + } + break; + } + case REFERENTIAL: { + Table table = constraint.getRefTable(); + if (checkIndex(session, table.getName(), indexFrom, indexTo) && !hideTable(table, session)) { + for (Column column : constraint.getReferencedColumns(table)) { + addConstraintColumnUsage(session, rows, catalog, constraint, column); + } + } + } + //$FALL-THROUGH$ + case PRIMARY_KEY: + case UNIQUE: { + Table table = constraint.getTable(); + if (checkIndex(session, table.getName(), indexFrom, indexTo) && !hideTable(table, session)) { + for (Column column : constraint.getReferencedColumns(table)) { + addConstraintColumnUsage(session, rows, catalog, constraint, column); + } + } + } + } + } + break; + } + default: + throw DbException.getInternalError("type=" + type); + } + return rows; + } + + private static short getRefAction(ConstraintActionType action) { + switch (action) { + case CASCADE: + return DatabaseMetaData.importedKeyCascade; + case RESTRICT: + return DatabaseMetaData.importedKeyRestrict; + case SET_DEFAULT: + return DatabaseMetaData.importedKeySetDefault; + case SET_NULL: + return DatabaseMetaData.importedKeySetNull; + default: + throw DbException.getInternalError("action="+action); + } + } + + private void addConstraintColumnUsage(SessionLocal session, ArrayList rows, String catalog, + Constraint constraint, Column column) { + Table table = column.getTable(); + add(session, + rows, + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + table.getName(), + // COLUMN_NAME + column.getName(), + // CONSTRAINT_CATALOG + catalog, + // CONSTRAINT_SCHEMA + constraint.getSchema().getName(), + // CONSTRAINT_NAME + constraint.getName() + ); + } + + private void addPrivileges(SessionLocal session, ArrayList rows, DbObject grantee, + String catalog, Table table, String column, int rightMask) { + if ((rightMask & Right.SELECT) != 0) { + addPrivilege(session, rows, grantee, catalog, table, column, "SELECT"); + } + if ((rightMask & Right.INSERT) != 0) { + addPrivilege(session, rows, grantee, catalog, table, column, "INSERT"); + } + if ((rightMask & Right.UPDATE) != 0) { + addPrivilege(session, rows, grantee, catalog, table, column, "UPDATE"); + } + if ((rightMask & Right.DELETE) != 0) { + addPrivilege(session, rows, grantee, catalog, table, column, "DELETE"); + } + } + + private void addPrivilege(SessionLocal session, ArrayList rows, DbObject grantee, + String catalog, Table table, String column, String right) { + String isGrantable = "NO"; + if (grantee.getType() == DbObject.USER) { + User user = (User) grantee; + if (user.isAdmin()) { + // the right is grantable if the grantee is an admin + isGrantable = "YES"; + } + } + if (column == null) { + add(session, + rows, + // GRANTOR + null, + // GRANTEE + identifier(grantee.getName()), + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + table.getName(), + // PRIVILEGE_TYPE + right, + // IS_GRANTABLE + isGrantable + ); + } else { + add(session, + rows, + // GRANTOR + null, + // GRANTEE + identifier(grantee.getName()), + // TABLE_CATALOG + catalog, + // TABLE_SCHEMA + table.getSchema().getName(), + // TABLE_NAME + table.getName(), + // COLUMN_NAME + column, + // PRIVILEGE_TYPE + right, + // IS_GRANTABLE + isGrantable + ); + } + } + + private ArrayList getAllSchemaObjects(int type) { + ArrayList list = new ArrayList<>(); + for (Schema schema : database.getAllSchemas()) { + schema.getAll(type, list); + } + return list; + } + + /** + * Get all tables of this database, including local temporary tables for the + * session. + * + * @param session the session + * @return the array of tables + */ + private ArrayList
          getAllTables(SessionLocal session) { + ArrayList
          tables = new ArrayList<>(); + for (Schema schema : database.getAllSchemas()) { + tables.addAll(schema.getAllTablesAndViews(session)); + } + tables.addAll(session.getLocalTempTables()); + return tables; + } + + private ArrayList
          getTablesByName(SessionLocal session, String tableName) { + // we expect that at most one table matches, at least in most cases + ArrayList
          tables = new ArrayList<>(1); + for (Schema schema : database.getAllSchemas()) { + Table table = schema.getTableOrViewByName(session, tableName); + if (table != null) { + tables.add(table); + } + } + Table table = session.findLocalTempTable(tableName); + if (table != null) { + tables.add(table); + } + return tables; + } + + @Override + public long getMaxDataModificationId() { + switch (type) { + case SETTINGS: + case SEQUENCES: + case IN_DOUBT: + case SESSIONS: + case LOCKS: + case SESSION_STATE: + return Long.MAX_VALUE; + } + return database.getModificationDataId(); + } + +} diff --git a/h2/src/main/org/h2/table/JoinBatch.java b/h2/src/main/org/h2/table/JoinBatch.java deleted file mode 100644 index 546f907b5a..0000000000 --- a/h2/src/main/org/h2/table/JoinBatch.java +++ /dev/null @@ -1,1128 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.table; - -import java.util.AbstractList; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.Future; - -import org.h2.command.dml.Query; -import org.h2.command.dml.Select; -import org.h2.command.dml.SelectUnion; -import org.h2.index.BaseIndex; -import org.h2.index.Cursor; -import org.h2.index.IndexCursor; -import org.h2.index.IndexLookupBatch; -import org.h2.index.ViewCursor; -import org.h2.index.ViewIndex; -import org.h2.message.DbException; -import org.h2.result.ResultInterface; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.util.DoneFuture; -import org.h2.util.LazyFuture; -import org.h2.util.Utils; -import org.h2.value.Value; -import org.h2.value.ValueLong; - -/** - * Support for asynchronous batched index lookups on joins. - * - * @see BaseIndex#createLookupBatch(org.h2.table.TableFilter[], int) - * @see IndexLookupBatch - * @author Sergi Vladykin - */ -public final class JoinBatch { - - /** - * An empty cursor. - */ - static final Cursor EMPTY_CURSOR = new Cursor() { - @Override - public boolean previous() { - return false; - } - - @Override - public boolean next() { - return false; - } - - @Override - public SearchRow getSearchRow() { - return null; - } - - @Override - public Row get() { - return null; - } - - @Override - public String toString() { - return "EMPTY_CURSOR"; - } - }; - - /** - * An empty future cursor. - */ - static final Future EMPTY_FUTURE_CURSOR = new DoneFuture<>(EMPTY_CURSOR); - - /** - * The top cursor. - */ - Future viewTopFutureCursor; - - /** - * The top filter. - */ - JoinFilter top; - - /** - * The filters. - */ - final JoinFilter[] filters; - - /** - * Whether this is a batched subquery. - */ - boolean batchedSubQuery; - - private boolean started; - - private JoinRow current; - private boolean found; - - /** - * This filter joined after this batched join and can be used normally. - */ - private final TableFilter additionalFilter; - - /** - * @param filtersCount number of filters participating in this batched join - * @param additionalFilter table filter after this batched join. - */ - public JoinBatch(int filtersCount, TableFilter additionalFilter) { - if (filtersCount > 32) { - // This is because we store state in a 64 bit field, 2 bits per - // joined table. - throw DbException.getUnsupportedException( - "Too many tables in join (at most 32 supported)."); - } - filters = new JoinFilter[filtersCount]; - this.additionalFilter = additionalFilter; - } - - /** - * Get the lookup batch for the given table filter. - * - * @param joinFilterId joined table filter id - * @return lookup batch - */ - public IndexLookupBatch getLookupBatch(int joinFilterId) { - return filters[joinFilterId].lookupBatch; - } - - /** - * Reset state of this batch. - * - * @param beforeQuery {@code true} if reset was called before the query run, - * {@code false} if after - */ - public void reset(boolean beforeQuery) { - current = null; - started = false; - found = false; - for (JoinFilter jf : filters) { - jf.reset(beforeQuery); - } - if (beforeQuery && additionalFilter != null) { - additionalFilter.reset(); - } - } - - /** - * Register the table filter and lookup batch. - * - * @param filter table filter - * @param lookupBatch lookup batch - */ - public void register(TableFilter filter, IndexLookupBatch lookupBatch) { - assert filter != null; - top = new JoinFilter(lookupBatch, filter, top); - filters[top.id] = top; - } - - /** - * Get the value for the given column. - * - * @param filterId table filter id - * @param column the column - * @return column value for current row - */ - public Value getValue(int filterId, Column column) { - if (current == null) { - return null; - } - Object x = current.row(filterId); - assert x != null; - Row row = current.isRow(filterId) ? (Row) x : ((Cursor) x).get(); - int columnId = column.getColumnId(); - if (columnId == -1) { - return ValueLong.get(row.getKey()); - } - Value value = row.getValue(column.getColumnId()); - if (value == null) { - throw DbException.throwInternalError("value is null: " + column + " " + row); - } - return value; - } - - private void start() { - // initialize current row - current = new JoinRow(new Object[filters.length]); - // initialize top cursor - Cursor cursor; - if (batchedSubQuery) { - assert viewTopFutureCursor != null; - cursor = get(viewTopFutureCursor); - } else { - // setup usual index cursor - TableFilter f = top.filter; - IndexCursor indexCursor = f.getIndexCursor(); - indexCursor.find(f.getSession(), f.getIndexConditions()); - cursor = indexCursor; - } - current.updateRow(top.id, cursor, JoinRow.S_NULL, JoinRow.S_CURSOR); - // we need fake first row because batchedNext always will move to the - // next row - JoinRow fake = new JoinRow(null); - fake.next = current; - current = fake; - } - - /** - * Get next row from the join batch. - * - * @return true if there is a next row - */ - public boolean next() { - if (!started) { - start(); - started = true; - } - if (additionalFilter == null) { - if (batchedNext()) { - assert current.isComplete(); - return true; - } - return false; - } - while (true) { - if (!found) { - if (!batchedNext()) { - return false; - } - assert current.isComplete(); - found = true; - additionalFilter.reset(); - } - // we call furtherFilter in usual way outside of this batch because - // it is more effective - if (additionalFilter.next()) { - return true; - } - found = false; - } - } - - private static Cursor get(Future f) { - Cursor c; - try { - c = f.get(); - } catch (Exception e) { - throw DbException.convert(e); - } - return c == null ? EMPTY_CURSOR : c; - } - - private boolean batchedNext() { - if (current == null) { - // after last - return false; - } - // go next - current = current.next; - if (current == null) { - return false; - } - current.prev = null; - - final int lastJfId = filters.length - 1; - - int jfId = lastJfId; - while (current.row(jfId) == null) { - // lookup for the first non fetched filter for the current row - jfId--; - } - - while (true) { - fetchCurrent(jfId); - - if (!current.isDropped()) { - // if current was not dropped then it must be fetched - // successfully - if (jfId == lastJfId) { - // the whole join row is ready to be returned - return true; - } - JoinFilter join = filters[jfId + 1]; - if (join.isBatchFull()) { - // get future cursors for join and go right to fetch them - current = join.find(current); - } - if (current.row(join.id) != null) { - // either find called or outer join with null-row - jfId = join.id; - continue; - } - } - // we have to go down and fetch next cursors for jfId if it is - // possible - if (current.next == null) { - // either dropped or null-row - if (current.isDropped()) { - current = current.prev; - if (current == null) { - return false; - } - } - assert !current.isDropped(); - assert jfId != lastJfId; - - jfId = 0; - while (current.row(jfId) != null) { - jfId++; - } - // force find on half filled batch (there must be either - // searchRows or Cursor.EMPTY set for null-rows) - current = filters[jfId].find(current); - } else { - // here we don't care if the current was dropped - current = current.next; - assert !current.isRow(jfId); - while (current.row(jfId) == null) { - assert jfId != top.id; - // need to go left and fetch more search rows - jfId--; - assert !current.isRow(jfId); - } - } - } - } - - @SuppressWarnings("unchecked") - private void fetchCurrent(final int jfId) { - assert current.prev == null || current.prev.isRow(jfId) : "prev must be already fetched"; - assert jfId == 0 || current.isRow(jfId - 1) : "left must be already fetched"; - - assert !current.isRow(jfId) : "double fetching"; - - Object x = current.row(jfId); - assert x != null : "x null"; - - // in case of outer join we don't have any future around empty cursor - boolean newCursor = x == EMPTY_CURSOR; - - if (newCursor) { - if (jfId == 0) { - // the top cursor is new and empty, then the whole select will - // not produce any rows - current.drop(); - return; - } - } else if (current.isFuture(jfId)) { - // get cursor from a future - x = get((Future) x); - current.updateRow(jfId, x, JoinRow.S_FUTURE, JoinRow.S_CURSOR); - newCursor = true; - } - - final JoinFilter jf = filters[jfId]; - Cursor c = (Cursor) x; - assert c != null; - JoinFilter join = jf.join; - - while (true) { - if (c == null || !c.next()) { - if (newCursor && jf.isOuterJoin()) { - // replace cursor with null-row - current.updateRow(jfId, jf.getNullRow(), JoinRow.S_CURSOR, JoinRow.S_ROW); - c = null; - newCursor = false; - } else { - // cursor is done, drop it - current.drop(); - return; - } - } - if (!jf.isOk(c == null)) { - // try another row from the cursor - continue; - } - boolean joinEmpty = false; - if (join != null && !join.collectSearchRows()) { - if (join.isOuterJoin()) { - joinEmpty = true; - } else { - // join will fail, try next row in the cursor - continue; - } - } - if (c != null) { - current = current.copyBehind(jfId); - // update jf, set current row from cursor - current.updateRow(jfId, c.get(), JoinRow.S_CURSOR, JoinRow.S_ROW); - } - if (joinEmpty) { - // update jf.join, set an empty cursor - current.updateRow(join.id, EMPTY_CURSOR, JoinRow.S_NULL, JoinRow.S_CURSOR); - } - return; - } - } - - /** - * @return Adapter to allow joining to this batch in sub-queries and views. - */ - private IndexLookupBatch viewIndexLookupBatch(ViewIndex viewIndex) { - return new ViewIndexLookupBatch(viewIndex); - } - - /** - * Create index lookup batch for a view index. - * - * @param viewIndex view index - * @return index lookup batch or {@code null} if batching is not supported - * for this query - */ - public static IndexLookupBatch createViewIndexLookupBatch(ViewIndex viewIndex) { - Query query = viewIndex.getQuery(); - if (query.isUnion()) { - ViewIndexLookupBatchUnion unionBatch = new ViewIndexLookupBatchUnion(viewIndex); - return unionBatch.initialize() ? unionBatch : null; - } - JoinBatch jb = ((Select) query).getJoinBatch(); - if (jb == null || jb.getLookupBatch(0) == null) { - // our sub-query is not batched or is top batched sub-query - return null; - } - assert !jb.batchedSubQuery; - jb.batchedSubQuery = true; - return jb.viewIndexLookupBatch(viewIndex); - } - - /** - * Create fake index lookup batch for non-batched table filter. - * - * @param filter the table filter - * @return fake index lookup batch - */ - public static IndexLookupBatch createFakeIndexLookupBatch(TableFilter filter) { - return new FakeLookupBatch(filter); - } - - @Override - public String toString() { - return "JoinBatch->\n" + "prev->" + (current == null ? null : current.prev) + - "\n" + "curr->" + current + - "\n" + "next->" + (current == null ? null : current.next); - } - - /** - * Table filter participating in batched join. - */ - private static final class JoinFilter { - final IndexLookupBatch lookupBatch; - final int id; - final JoinFilter join; - final TableFilter filter; - - JoinFilter(IndexLookupBatch lookupBatch, TableFilter filter, JoinFilter join) { - this.filter = filter; - this.id = filter.getJoinFilterId(); - this.join = join; - this.lookupBatch = lookupBatch; - assert lookupBatch != null || id == 0; - } - - void reset(boolean beforeQuery) { - if (lookupBatch != null) { - lookupBatch.reset(beforeQuery); - } - } - - Row getNullRow() { - return filter.getTable().getNullRow(); - } - - boolean isOuterJoin() { - return filter.isJoinOuter(); - } - - boolean isBatchFull() { - return lookupBatch.isBatchFull(); - } - - boolean isOk(boolean ignoreJoinCondition) { - boolean filterOk = filter.isOk(filter.getFilterCondition()); - boolean joinOk = filter.isOk(filter.getJoinCondition()); - - return filterOk && (ignoreJoinCondition || joinOk); - } - - boolean collectSearchRows() { - assert !isBatchFull(); - IndexCursor c = filter.getIndexCursor(); - c.prepare(filter.getSession(), filter.getIndexConditions()); - if (c.isAlwaysFalse()) { - return false; - } - return lookupBatch.addSearchRows(c.getStart(), c.getEnd()); - } - - List> find() { - return lookupBatch.find(); - } - - JoinRow find(JoinRow current) { - assert current != null; - - // lookupBatch is allowed to be empty when we have some null-rows - // and forced find call - List> result = lookupBatch.find(); - - // go backwards and assign futures - for (int i = result.size(); i > 0;) { - assert current.isRow(id - 1); - if (current.row(id) == EMPTY_CURSOR) { - // outer join support - skip row with existing empty cursor - current = current.prev; - continue; - } - assert current.row(id) == null; - Future future = result.get(--i); - if (future == null) { - current.updateRow(id, EMPTY_CURSOR, JoinRow.S_NULL, JoinRow.S_CURSOR); - } else { - current.updateRow(id, future, JoinRow.S_NULL, JoinRow.S_FUTURE); - } - if (current.prev == null || i == 0) { - break; - } - current = current.prev; - } - - // handle empty cursors (because of outer joins) at the beginning - while (current.prev != null && current.prev.row(id) == EMPTY_CURSOR) { - current = current.prev; - } - assert current.prev == null || current.prev.isRow(id); - assert current.row(id) != null; - assert !current.isRow(id); - - // the last updated row - return current; - } - - @Override - public String toString() { - return "JoinFilter->" + filter; - } - } - - /** - * Linked row in batched join. - */ - private static final class JoinRow { - private static final long S_NULL = 0; - private static final long S_FUTURE = 1; - private static final long S_CURSOR = 2; - private static final long S_ROW = 3; - - private static final long S_MASK = 3; - - JoinRow prev; - JoinRow next; - - /** - * May contain one of the following: - *
            - *
          • {@code null}: means that we need to get future cursor - * for this row
          • - *
          • {@link Future}: means that we need to get a new {@link Cursor} - * from the {@link Future}
          • - *
          • {@link Cursor}: means that we need to fetch {@link Row}s from the - * {@link Cursor}
          • - *
          • {@link Row}: the {@link Row} is already fetched and is ready to - * be used
          • - *
          - */ - private Object[] row; - private long state; - - /** - * @param row Row. - */ - JoinRow(Object[] row) { - this.row = row; - } - - /** - * @param joinFilterId Join filter id. - * @return Row state. - */ - private long getState(int joinFilterId) { - return (state >>> (joinFilterId << 1)) & S_MASK; - } - - /** - * Allows to do a state transition in the following order: - * 0. Slot contains {@code null} ({@link #S_NULL}). - * 1. Slot contains {@link Future} ({@link #S_FUTURE}). - * 2. Slot contains {@link Cursor} ({@link #S_CURSOR}). - * 3. Slot contains {@link Row} ({@link #S_ROW}). - * - * @param joinFilterId {@link JoinRow} filter id. - * @param i Increment by this number of moves. - */ - private void incrementState(int joinFilterId, long i) { - assert i > 0 : i; - state += i << (joinFilterId << 1); - } - - void updateRow(int joinFilterId, Object x, long oldState, long newState) { - assert getState(joinFilterId) == oldState : "old state: " + getState(joinFilterId); - row[joinFilterId] = x; - incrementState(joinFilterId, newState - oldState); - assert getState(joinFilterId) == newState : "new state: " + getState(joinFilterId); - } - - Object row(int joinFilterId) { - return row[joinFilterId]; - } - - boolean isRow(int joinFilterId) { - return getState(joinFilterId) == S_ROW; - } - - boolean isFuture(int joinFilterId) { - return getState(joinFilterId) == S_FUTURE; - } - - private boolean isCursor(int joinFilterId) { - return getState(joinFilterId) == S_CURSOR; - } - - boolean isComplete() { - return isRow(row.length - 1); - } - - boolean isDropped() { - return row == null; - } - - void drop() { - if (prev != null) { - prev.next = next; - } - if (next != null) { - next.prev = prev; - } - row = null; - } - - /** - * Copy this JoinRow behind itself in linked list of all in progress - * rows. - * - * @param jfId The last fetched filter id. - * @return The copy. - */ - JoinRow copyBehind(int jfId) { - assert isCursor(jfId); - assert jfId + 1 == row.length || row[jfId + 1] == null; - - Object[] r = new Object[row.length]; - if (jfId != 0) { - System.arraycopy(row, 0, r, 0, jfId); - } - JoinRow copy = new JoinRow(r); - copy.state = state; - - if (prev != null) { - copy.prev = prev; - prev.next = copy; - } - prev = copy; - copy.next = this; - - return copy; - } - - @Override - public String toString() { - return "JoinRow->" + Arrays.toString(row); - } - } - - /** - * Fake Lookup batch for indexes which do not support batching but have to - * participate in batched joins. - */ - private static final class FakeLookupBatch implements IndexLookupBatch { - private final TableFilter filter; - - private SearchRow first; - private SearchRow last; - - private boolean full; - - private final List> result = new SingletonList<>(); - - FakeLookupBatch(TableFilter filter) { - this.filter = filter; - } - - @Override - public String getPlanSQL() { - return "fake"; - } - - @Override - public void reset(boolean beforeQuery) { - full = false; - first = last = null; - result.set(0, null); - } - - @Override - public boolean addSearchRows(SearchRow first, SearchRow last) { - assert !full; - this.first = first; - this.last = last; - full = true; - return true; - } - - @Override - public boolean isBatchFull() { - return full; - } - - @Override - public List> find() { - if (!full) { - return Collections.emptyList(); - } - Cursor c = filter.getIndex().find(filter, first, last); - result.set(0, new DoneFuture<>(c)); - full = false; - first = last = null; - return result; - } - } - - /** - * Simple singleton list. - * @param Element type. - */ - static final class SingletonList extends AbstractList { - private E element; - - @Override - public E get(int index) { - assert index == 0; - return element; - } - - @Override - public E set(int index, E element) { - assert index == 0; - this.element = element; - return null; - } - - @Override - public int size() { - return 1; - } - } - - /** - * Base class for SELECT and SELECT UNION view index lookup batches. - * @param Runner type. - */ - private abstract static class ViewIndexLookupBatchBase - implements IndexLookupBatch { - protected final ViewIndex viewIndex; - private final ArrayList> result = Utils.newSmallArrayList(); - private int resultSize; - private boolean findCalled; - - protected ViewIndexLookupBatchBase(ViewIndex viewIndex) { - this.viewIndex = viewIndex; - } - - @Override - public String getPlanSQL() { - return "view"; - } - - protected abstract boolean collectSearchRows(R r); - - protected abstract R newQueryRunner(); - - protected abstract void startQueryRunners(int resultSize); - - protected final boolean resetAfterFind() { - if (!findCalled) { - return false; - } - findCalled = false; - // method find was called, we need to reset futures to initial state - // for reuse - for (int i = 0; i < resultSize; i++) { - queryRunner(i).reset(); - } - resultSize = 0; - return true; - } - - @SuppressWarnings("unchecked") - protected R queryRunner(int i) { - return (R) result.get(i); - } - - @Override - public final boolean addSearchRows(SearchRow first, SearchRow last) { - resetAfterFind(); - viewIndex.setupQueryParameters(viewIndex.getSession(), first, last, null); - R r; - if (resultSize < result.size()) { - // get reused runner - r = queryRunner(resultSize); - } else { - // create new runner - result.add(r = newQueryRunner()); - } - r.first = first; - r.last = last; - if (!collectSearchRows(r)) { - r.clear(); - return false; - } - resultSize++; - return true; - } - - @Override - public void reset(boolean beforeQuery) { - if (resultSize != 0 && !resetAfterFind()) { - // find was not called, need to just clear runners - for (int i = 0; i < resultSize; i++) { - queryRunner(i).clear(); - } - resultSize = 0; - } - } - - @Override - public final List> find() { - if (resultSize == 0) { - return Collections.emptyList(); - } - findCalled = true; - startQueryRunners(resultSize); - return resultSize == result.size() ? result : result.subList(0, resultSize); - } - } - - /** - * Lazy query runner base for subqueries and views. - */ - private abstract static class QueryRunnerBase extends LazyFuture { - protected final ViewIndex viewIndex; - protected SearchRow first; - protected SearchRow last; - private boolean isLazyResult; - - QueryRunnerBase(ViewIndex viewIndex) { - this.viewIndex = viewIndex; - } - - protected void clear() { - first = last = null; - } - - @Override - public final boolean reset() { - if (isLazyResult) { - resetViewTopFutureCursorAfterQuery(); - } - if (super.reset()) { - return true; - } - // this query runner was never executed, need to clear manually - clear(); - return false; - } - - protected final ViewCursor newCursor(ResultInterface localResult) { - isLazyResult = localResult.isLazy(); - ViewCursor cursor = new ViewCursor(viewIndex, localResult, first, last); - clear(); - return cursor; - } - - protected abstract void resetViewTopFutureCursorAfterQuery(); - } - - /** - * View index lookup batch for a simple SELECT. - */ - private final class ViewIndexLookupBatch extends ViewIndexLookupBatchBase { - ViewIndexLookupBatch(ViewIndex viewIndex) { - super(viewIndex); - } - - @Override - protected QueryRunner newQueryRunner() { - return new QueryRunner(viewIndex); - } - - @Override - protected boolean collectSearchRows(QueryRunner r) { - return top.collectSearchRows(); - } - - @Override - public boolean isBatchFull() { - return top.isBatchFull(); - } - - @Override - protected void startQueryRunners(int resultSize) { - // we do batched find only for top table filter and then lazily run - // the ViewIndex query for each received top future cursor - List> topFutureCursors = top.find(); - if (topFutureCursors.size() != resultSize) { - throw DbException - .throwInternalError("Unexpected result size: " + - topFutureCursors.size() + ", expected :" + - resultSize); - } - for (int i = 0; i < resultSize; i++) { - QueryRunner r = queryRunner(i); - r.topFutureCursor = topFutureCursors.get(i); - } - } - } - - /** - * Query runner for SELECT. - */ - private final class QueryRunner extends QueryRunnerBase { - Future topFutureCursor; - - QueryRunner(ViewIndex viewIndex) { - super(viewIndex); - } - - @Override - protected void clear() { - super.clear(); - topFutureCursor = null; - } - - @Override - protected Cursor run() throws Exception { - if (topFutureCursor == null) { - // if the top cursor is empty then the whole query will produce - // empty result - return EMPTY_CURSOR; - } - viewIndex.setupQueryParameters(viewIndex.getSession(), first, last, null); - JoinBatch.this.viewTopFutureCursor = topFutureCursor; - ResultInterface localResult; - boolean lazy = false; - try { - localResult = viewIndex.getQuery().query(0); - lazy = localResult.isLazy(); - } finally { - if (!lazy) { - resetViewTopFutureCursorAfterQuery(); - } - } - return newCursor(localResult); - } - - @Override - protected void resetViewTopFutureCursorAfterQuery() { - JoinBatch.this.viewTopFutureCursor = null; - } - } - - /** - * View index lookup batch for UNION queries. - */ - private static final class ViewIndexLookupBatchUnion - extends ViewIndexLookupBatchBase { - ArrayList filters; - ArrayList joinBatches; - private boolean onlyBatchedQueries = true; - - protected ViewIndexLookupBatchUnion(ViewIndex viewIndex) { - super(viewIndex); - } - - boolean initialize() { - return collectJoinBatches(viewIndex.getQuery()) && joinBatches != null; - } - - private boolean collectJoinBatches(Query query) { - if (query.isUnion()) { - SelectUnion union = (SelectUnion) query; - return collectJoinBatches(union.getLeft()) && - collectJoinBatches(union.getRight()); - } - Select select = (Select) query; - JoinBatch jb = select.getJoinBatch(); - if (jb == null) { - onlyBatchedQueries = false; - } else { - if (jb.getLookupBatch(0) == null) { - // we are top sub-query - return false; - } - assert !jb.batchedSubQuery; - jb.batchedSubQuery = true; - if (joinBatches == null) { - joinBatches = Utils.newSmallArrayList(); - filters = Utils.newSmallArrayList(); - } - filters.add(jb.filters[0]); - joinBatches.add(jb); - } - return true; - } - - @Override - public boolean isBatchFull() { - // if at least one is full - for (JoinFilter filter : filters) { - if (filter.isBatchFull()) { - return true; - } - } - return false; - } - - @Override - protected boolean collectSearchRows(QueryRunnerUnion r) { - boolean collected = false; - for (int i = 0; i < filters.size(); i++) { - if (filters.get(i).collectSearchRows()) { - collected = true; - } else { - r.topFutureCursors[i] = EMPTY_FUTURE_CURSOR; - } - } - return collected || !onlyBatchedQueries; - } - - @Override - protected QueryRunnerUnion newQueryRunner() { - return new QueryRunnerUnion(this); - } - - @Override - protected void startQueryRunners(int resultSize) { - for (int f = 0; f < filters.size(); f++) { - List> topFutureCursors = filters.get(f).find(); - int r = 0, c = 0; - for (; r < resultSize; r++) { - Future[] cs = queryRunner(r).topFutureCursors; - if (cs[f] == null) { - cs[f] = topFutureCursors.get(c++); - } - } - assert r == resultSize; - assert c == topFutureCursors.size(); - } - } - } - - /** - * Query runner for UNION. - */ - private static class QueryRunnerUnion extends QueryRunnerBase { - final Future[] topFutureCursors; - private final ViewIndexLookupBatchUnion batchUnion; - - @SuppressWarnings("unchecked") - QueryRunnerUnion(ViewIndexLookupBatchUnion batchUnion) { - super(batchUnion.viewIndex); - this.batchUnion = batchUnion; - topFutureCursors = new Future[batchUnion.filters.size()]; - } - - @Override - protected void clear() { - super.clear(); - for (int i = 0; i < topFutureCursors.length; i++) { - topFutureCursors[i] = null; - } - } - - @Override - protected Cursor run() throws Exception { - viewIndex.setupQueryParameters(viewIndex.getSession(), first, last, null); - ArrayList joinBatches = batchUnion.joinBatches; - for (int i = 0, size = joinBatches.size(); i < size; i++) { - assert topFutureCursors[i] != null; - joinBatches.get(i).viewTopFutureCursor = topFutureCursors[i]; - } - ResultInterface localResult; - boolean lazy = false; - try { - localResult = viewIndex.getQuery().query(0); - lazy = localResult.isLazy(); - } finally { - if (!lazy) { - resetViewTopFutureCursorAfterQuery(); - } - } - return newCursor(localResult); - } - - @Override - protected void resetViewTopFutureCursorAfterQuery() { - ArrayList joinBatches = batchUnion.joinBatches; - if (joinBatches == null) { - return; - } - for (JoinBatch joinBatch : joinBatches) { - joinBatch.viewTopFutureCursor = null; - } - } - } -} - diff --git a/h2/src/main/org/h2/table/LinkSchema.java b/h2/src/main/org/h2/table/LinkSchema.java deleted file mode 100644 index 544f23d009..0000000000 --- a/h2/src/main/org/h2/table/LinkSchema.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.table; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.sql.Types; -import org.h2.message.DbException; -import org.h2.tools.SimpleResultSet; -import org.h2.util.JdbcUtils; -import org.h2.util.StringUtils; - -/** - * A utility class to create table links for a whole schema. - */ -public class LinkSchema { - - private LinkSchema() { - // utility class - } - - /** - * Link all tables of a schema to the database. - * - * @param conn the connection to the database where the links are to be - * created - * @param targetSchema the schema name where the objects should be created - * @param driver the driver class name of the linked database - * @param url the database URL of the linked database - * @param user the user name - * @param password the password - * @param sourceSchema the schema where the existing tables are - * @return a result set with the created tables - */ - public static ResultSet linkSchema(Connection conn, String targetSchema, - String driver, String url, String user, String password, - String sourceSchema) { - Connection c2 = null; - Statement stat = null; - ResultSet rs = null; - SimpleResultSet result = new SimpleResultSet(); - result.setAutoClose(false); - result.addColumn("TABLE_NAME", Types.VARCHAR, Integer.MAX_VALUE, 0); - try { - c2 = JdbcUtils.getConnection(driver, url, user, password); - stat = conn.createStatement(); - stat.execute(StringUtils.quoteIdentifier(new StringBuilder("CREATE SCHEMA IF NOT EXISTS "), targetSchema) - .toString()); - //Workaround for PostgreSQL to avoid index names - if (url.startsWith("jdbc:postgresql:")) { - rs = c2.getMetaData().getTables(null, sourceSchema, null, - new String[] { "TABLE", "LINKED TABLE", "VIEW", "EXTERNAL" }); - } else { - rs = c2.getMetaData().getTables(null, sourceSchema, null, null); - } - while (rs.next()) { - String table = rs.getString("TABLE_NAME"); - StringBuilder buff = new StringBuilder(); - buff.append("DROP TABLE IF EXISTS "); - StringUtils.quoteIdentifier(buff, targetSchema). - append('.'); - StringUtils.quoteIdentifier(buff, table); - stat.execute(buff.toString()); - buff.setLength(0); - buff.append("CREATE LINKED TABLE "); - StringUtils.quoteIdentifier(buff, targetSchema). - append('.'); - StringUtils.quoteIdentifier(buff, table). - append('('); - StringUtils.quoteStringSQL(buff, driver).append(", "); - StringUtils.quoteStringSQL(buff, url).append(", "); - StringUtils.quoteStringSQL(buff, user).append(", "); - StringUtils.quoteStringSQL(buff, password).append(", "); - StringUtils.quoteStringSQL(buff, sourceSchema).append(", "); - StringUtils.quoteStringSQL(buff, table).append(')'); - stat.execute(buff.toString()); - result.addRow(table); - } - } catch (SQLException e) { - throw DbException.convert(e); - } finally { - JdbcUtils.closeSilently(rs); - JdbcUtils.closeSilently(c2); - JdbcUtils.closeSilently(stat); - } - return result; - } -} diff --git a/h2/src/main/org/h2/table/MetaTable.java b/h2/src/main/org/h2/table/MetaTable.java index bf86ccc3d8..19f10fd30f 100644 --- a/h2/src/main/org/h2/table/MetaTable.java +++ b/h2/src/main/org/h2/table/MetaTable.java @@ -1,127 +1,51 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.table; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.Reader; -import java.sql.DatabaseMetaData; -import java.sql.ResultSet; -import java.sql.Types; -import java.text.Collator; import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.Locale; -import org.h2.command.Command; -import org.h2.constraint.Constraint; -import org.h2.constraint.ConstraintActionType; -import org.h2.constraint.ConstraintCheck; -import org.h2.constraint.ConstraintReferential; -import org.h2.constraint.ConstraintUnique; -import org.h2.engine.Constants; -import org.h2.engine.Database; -import org.h2.engine.DbObject; -import org.h2.engine.Domain; -import org.h2.engine.FunctionAlias; -import org.h2.engine.FunctionAlias.JavaMethod; -import org.h2.engine.QueryStatisticsData; -import org.h2.engine.Right; -import org.h2.engine.Role; -import org.h2.engine.Session; -import org.h2.engine.Setting; -import org.h2.engine.User; -import org.h2.engine.UserAggregate; -import org.h2.expression.ValueExpression; +import org.h2.engine.SessionLocal; import org.h2.index.Index; import org.h2.index.IndexType; import org.h2.index.MetaIndex; import org.h2.message.DbException; -import org.h2.mvstore.FileStore; -import org.h2.mvstore.MVStore; -import org.h2.mvstore.db.MVTableEngine.Store; import org.h2.result.Row; import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.schema.Constant; import org.h2.schema.Schema; -import org.h2.schema.SchemaObject; -import org.h2.schema.Sequence; -import org.h2.schema.TriggerObject; -import org.h2.store.InDoubtTransaction; -import org.h2.store.PageStore; -import org.h2.tools.Csv; -import org.h2.util.DateTimeUtils; -import org.h2.util.MathUtils; import org.h2.util.StringUtils; -import org.h2.util.Utils; -import org.h2.value.CompareMode; -import org.h2.value.DataType; +import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueBoolean; -import org.h2.value.ValueDouble; -import org.h2.value.ValueInt; -import org.h2.value.ValueLong; import org.h2.value.ValueNull; -import org.h2.value.ValueShort; -import org.h2.value.ValueString; -import org.h2.value.ValueStringIgnoreCase; +import org.h2.value.ValueVarchar; +import org.h2.value.ValueVarcharIgnoreCase; /** * This class is responsible to build the database meta data pseudo tables. */ -public class MetaTable extends Table { +public abstract class MetaTable extends Table { /** * The approximate number of rows of a meta table. */ public static final long ROW_COUNT_APPROXIMATION = 1000; - private static final String CHARACTER_SET_NAME = "Unicode"; + /** + * The table type. + */ + protected final int type; - private static final int TABLES = 0; - private static final int COLUMNS = 1; - private static final int INDEXES = 2; - private static final int TABLE_TYPES = 3; - private static final int TYPE_INFO = 4; - private static final int CATALOGS = 5; - private static final int SETTINGS = 6; - private static final int HELP = 7; - private static final int SEQUENCES = 8; - private static final int USERS = 9; - private static final int ROLES = 10; - private static final int RIGHTS = 11; - private static final int FUNCTION_ALIASES = 12; - private static final int SCHEMATA = 13; - private static final int TABLE_PRIVILEGES = 14; - private static final int COLUMN_PRIVILEGES = 15; - private static final int COLLATIONS = 16; - private static final int VIEWS = 17; - private static final int IN_DOUBT = 18; - private static final int CROSS_REFERENCES = 19; - private static final int CONSTRAINTS = 20; - private static final int FUNCTION_COLUMNS = 21; - private static final int CONSTANTS = 22; - private static final int DOMAINS = 23; - private static final int TRIGGERS = 24; - private static final int SESSIONS = 25; - private static final int LOCKS = 26; - private static final int SESSION_STATE = 27; - private static final int QUERY_STATISTICS = 28; - private static final int SYNONYMS = 29; - private static final int TABLE_CONSTRAINTS = 30; - private static final int KEY_COLUMN_USAGE = 31; - private static final int REFERENTIAL_CONSTRAINTS = 32; - private static final int META_TABLE_TYPE_COUNT = REFERENTIAL_CONSTRAINTS + 1; + /** + * The indexed column. + */ + protected int indexColumn; - private final int type; - private final int indexColumn; - private final MetaIndex metaIndex; + /** + * The index for this table. + */ + protected MetaIndex metaIndex; /** * Create a new metadata table. @@ -130,615 +54,103 @@ public class MetaTable extends Table { * @param id the object id * @param type the meta table type */ - public MetaTable(Schema schema, int id, int type) { + protected MetaTable(Schema schema, int id, int type) { // tableName will be set later super(schema, id, null, true, true); this.type = type; - Column[] cols; - String indexColumnName = null; - switch (type) { - case TABLES: - setMetaTableName("TABLES"); - cols = createColumns( - "TABLE_CATALOG", - "TABLE_SCHEMA", - "TABLE_NAME", - "TABLE_TYPE", - // extensions - "STORAGE_TYPE", - "SQL", - "REMARKS", - "LAST_MODIFICATION BIGINT", - "ID INT", - "TYPE_NAME", - "TABLE_CLASS", - "ROW_COUNT_ESTIMATE BIGINT" - ); - indexColumnName = "TABLE_NAME"; - break; - case COLUMNS: - setMetaTableName("COLUMNS"); - cols = createColumns( - "TABLE_CATALOG", - "TABLE_SCHEMA", - "TABLE_NAME", - "COLUMN_NAME", - "ORDINAL_POSITION INT", - "DOMAIN_CATALOG", - "DOMAIN_SCHEMA", - "DOMAIN_NAME", - "COLUMN_DEFAULT", - "IS_NULLABLE", - "DATA_TYPE INT", - "CHARACTER_MAXIMUM_LENGTH INT", - "CHARACTER_OCTET_LENGTH INT", - "NUMERIC_PRECISION INT", - "NUMERIC_PRECISION_RADIX INT", - "NUMERIC_SCALE INT", - "DATETIME_PRECISION INT", - "INTERVAL_TYPE", - "INTERVAL_PRECISION INT", - "CHARACTER_SET_NAME", - "COLLATION_NAME", - // extensions - "TYPE_NAME", - "NULLABLE INT", - "IS_COMPUTED BIT", - "SELECTIVITY INT", - "CHECK_CONSTRAINT", - "SEQUENCE_NAME", - "REMARKS", - "SOURCE_DATA_TYPE SMALLINT", - "COLUMN_TYPE", - "COLUMN_ON_UPDATE", - "IS_VISIBLE" - ); - indexColumnName = "TABLE_NAME"; - break; - case INDEXES: - setMetaTableName("INDEXES"); - cols = createColumns( - "TABLE_CATALOG", - "TABLE_SCHEMA", - "TABLE_NAME", - "NON_UNIQUE BIT", - "INDEX_NAME", - "ORDINAL_POSITION SMALLINT", - "COLUMN_NAME", - "CARDINALITY INT", - "PRIMARY_KEY BIT", - "INDEX_TYPE_NAME", - "IS_GENERATED BIT", - "INDEX_TYPE SMALLINT", - "ASC_OR_DESC", - "PAGES INT", - "FILTER_CONDITION", - "REMARKS", - "SQL", - "ID INT", - "SORT_TYPE INT", - "CONSTRAINT_NAME", - "INDEX_CLASS", - "AFFINITY BIT" - ); - indexColumnName = "TABLE_NAME"; - break; - case TABLE_TYPES: - setMetaTableName("TABLE_TYPES"); - cols = createColumns("TYPE"); - break; - case TYPE_INFO: - setMetaTableName("TYPE_INFO"); - cols = createColumns( - "TYPE_NAME", - "DATA_TYPE INT", - "PRECISION INT", - "PREFIX", - "SUFFIX", - "PARAMS", - "AUTO_INCREMENT BIT", - "MINIMUM_SCALE SMALLINT", - "MAXIMUM_SCALE SMALLINT", - "RADIX INT", - "POS INT", - "CASE_SENSITIVE BIT", - "NULLABLE SMALLINT", - "SEARCHABLE SMALLINT" - ); - break; - case CATALOGS: - setMetaTableName("CATALOGS"); - cols = createColumns("CATALOG_NAME"); - break; - case SETTINGS: - setMetaTableName("SETTINGS"); - cols = createColumns("NAME", "VALUE"); - break; - case HELP: - setMetaTableName("HELP"); - cols = createColumns( - "ID INT", - "SECTION", - "TOPIC", - "SYNTAX", - "TEXT" - ); - break; - case SEQUENCES: - setMetaTableName("SEQUENCES"); - cols = createColumns( - "SEQUENCE_CATALOG", - "SEQUENCE_SCHEMA", - "SEQUENCE_NAME", - "CURRENT_VALUE BIGINT", - "INCREMENT BIGINT", - "IS_GENERATED BIT", - "REMARKS", - "CACHE BIGINT", - "MIN_VALUE BIGINT", - "MAX_VALUE BIGINT", - "IS_CYCLE BIT", - "ID INT" - ); - break; - case USERS: - setMetaTableName("USERS"); - cols = createColumns( - "NAME", - "ADMIN", - "REMARKS", - "ID INT" - ); - break; - case ROLES: - setMetaTableName("ROLES"); - cols = createColumns( - "NAME", - "REMARKS", - "ID INT" - ); - break; - case RIGHTS: - setMetaTableName("RIGHTS"); - cols = createColumns( - "GRANTEE", - "GRANTEETYPE", - "GRANTEDROLE", - "RIGHTS", - "TABLE_SCHEMA", - "TABLE_NAME", - "ID INT" - ); - indexColumnName = "TABLE_NAME"; - break; - case FUNCTION_ALIASES: - setMetaTableName("FUNCTION_ALIASES"); - cols = createColumns( - "ALIAS_CATALOG", - "ALIAS_SCHEMA", - "ALIAS_NAME", - "JAVA_CLASS", - "JAVA_METHOD", - "DATA_TYPE INT", - "TYPE_NAME", - "COLUMN_COUNT INT", - "RETURNS_RESULT SMALLINT", - "REMARKS", - "ID INT", - "SOURCE" - ); - break; - case FUNCTION_COLUMNS: - setMetaTableName("FUNCTION_COLUMNS"); - cols = createColumns( - "ALIAS_CATALOG", - "ALIAS_SCHEMA", - "ALIAS_NAME", - "JAVA_CLASS", - "JAVA_METHOD", - "COLUMN_COUNT INT", - "POS INT", - "COLUMN_NAME", - "DATA_TYPE INT", - "TYPE_NAME", - "PRECISION INT", - "SCALE SMALLINT", - "RADIX SMALLINT", - "NULLABLE SMALLINT", - "COLUMN_TYPE SMALLINT", - "REMARKS", - "COLUMN_DEFAULT" - ); - break; - case SCHEMATA: - setMetaTableName("SCHEMATA"); - cols = createColumns( - "CATALOG_NAME", - "SCHEMA_NAME", - "SCHEMA_OWNER", - "DEFAULT_CHARACTER_SET_NAME", - "DEFAULT_COLLATION_NAME", - "IS_DEFAULT BIT", - "REMARKS", - "ID INT" - ); - break; - case TABLE_PRIVILEGES: - setMetaTableName("TABLE_PRIVILEGES"); - cols = createColumns( - "GRANTOR", - "GRANTEE", - "TABLE_CATALOG", - "TABLE_SCHEMA", - "TABLE_NAME", - "PRIVILEGE_TYPE", - "IS_GRANTABLE" - ); - indexColumnName = "TABLE_NAME"; - break; - case COLUMN_PRIVILEGES: - setMetaTableName("COLUMN_PRIVILEGES"); - cols = createColumns( - "GRANTOR", - "GRANTEE", - "TABLE_CATALOG", - "TABLE_SCHEMA", - "TABLE_NAME", - "COLUMN_NAME", - "PRIVILEGE_TYPE", - "IS_GRANTABLE" - ); - indexColumnName = "TABLE_NAME"; - break; - case COLLATIONS: - setMetaTableName("COLLATIONS"); - cols = createColumns( - "NAME", - "KEY" - ); - break; - case VIEWS: - setMetaTableName("VIEWS"); - cols = createColumns( - "TABLE_CATALOG", - "TABLE_SCHEMA", - "TABLE_NAME", - "VIEW_DEFINITION", - "CHECK_OPTION", - "IS_UPDATABLE", - "STATUS", - "REMARKS", - "ID INT" - ); - indexColumnName = "TABLE_NAME"; - break; - case IN_DOUBT: - setMetaTableName("IN_DOUBT"); - cols = createColumns( - "TRANSACTION", - "STATE" - ); - break; - case CROSS_REFERENCES: - setMetaTableName("CROSS_REFERENCES"); - cols = createColumns( - "PKTABLE_CATALOG", - "PKTABLE_SCHEMA", - "PKTABLE_NAME", - "PKCOLUMN_NAME", - "FKTABLE_CATALOG", - "FKTABLE_SCHEMA", - "FKTABLE_NAME", - "FKCOLUMN_NAME", - "ORDINAL_POSITION SMALLINT", - "UPDATE_RULE SMALLINT", - "DELETE_RULE SMALLINT", - "FK_NAME", - "PK_NAME", - "DEFERRABILITY SMALLINT" - ); - indexColumnName = "PKTABLE_NAME"; - break; - case CONSTRAINTS: - setMetaTableName("CONSTRAINTS"); - cols = createColumns( - "CONSTRAINT_CATALOG", - "CONSTRAINT_SCHEMA", - "CONSTRAINT_NAME", - "CONSTRAINT_TYPE", - "TABLE_CATALOG", - "TABLE_SCHEMA", - "TABLE_NAME", - "UNIQUE_INDEX_NAME", - "CHECK_EXPRESSION", - "COLUMN_LIST", - "REMARKS", - "SQL", - "ID INT" - ); - indexColumnName = "TABLE_NAME"; - break; - case CONSTANTS: - setMetaTableName("CONSTANTS"); - cols = createColumns( - "CONSTANT_CATALOG", - "CONSTANT_SCHEMA", - "CONSTANT_NAME", - "DATA_TYPE INT", - "REMARKS", - "SQL", - "ID INT" - ); - break; - case DOMAINS: - setMetaTableName("DOMAINS"); - cols = createColumns( - "DOMAIN_CATALOG", - "DOMAIN_SCHEMA", - "DOMAIN_NAME", - "COLUMN_DEFAULT", - "IS_NULLABLE", - "DATA_TYPE INT", - "PRECISION INT", - "SCALE INT", - "TYPE_NAME", - "SELECTIVITY INT", - "CHECK_CONSTRAINT", - "REMARKS", - "SQL", - "ID INT" - ); - break; - case TRIGGERS: - setMetaTableName("TRIGGERS"); - cols = createColumns( - "TRIGGER_CATALOG", - "TRIGGER_SCHEMA", - "TRIGGER_NAME", - "TRIGGER_TYPE", - "TABLE_CATALOG", - "TABLE_SCHEMA", - "TABLE_NAME", - "BEFORE BIT", - "JAVA_CLASS", - "QUEUE_SIZE INT", - "NO_WAIT BIT", - "REMARKS", - "SQL", - "ID INT" - ); - break; - case SESSIONS: { - setMetaTableName("SESSIONS"); - cols = createColumns( - "ID INT", - "USER_NAME", - "SESSION_START TIMESTAMP WITH TIME ZONE", - "STATEMENT", - "STATEMENT_START TIMESTAMP WITH TIME ZONE", - "CONTAINS_UNCOMMITTED BIT", - "STATE", - "BLOCKER_ID INT" - ); - break; - } - case LOCKS: { - setMetaTableName("LOCKS"); - cols = createColumns( - "TABLE_SCHEMA", - "TABLE_NAME", - "SESSION_ID INT", - "LOCK_TYPE" - ); - break; - } - case SESSION_STATE: { - setMetaTableName("SESSION_STATE"); - cols = createColumns( - "KEY", - "SQL" - ); - break; - } - case QUERY_STATISTICS: { - setMetaTableName("QUERY_STATISTICS"); - cols = createColumns( - "SQL_STATEMENT", - "EXECUTION_COUNT INT", - "MIN_EXECUTION_TIME DOUBLE", - "MAX_EXECUTION_TIME DOUBLE", - "CUMULATIVE_EXECUTION_TIME DOUBLE", - "AVERAGE_EXECUTION_TIME DOUBLE", - "STD_DEV_EXECUTION_TIME DOUBLE", - "MIN_ROW_COUNT INT", - "MAX_ROW_COUNT INT", - "CUMULATIVE_ROW_COUNT LONG", - "AVERAGE_ROW_COUNT DOUBLE", - "STD_DEV_ROW_COUNT DOUBLE" - ); - break; - } - case SYNONYMS: { - setMetaTableName("SYNONYMS"); - cols = createColumns( - "SYNONYM_CATALOG", - "SYNONYM_SCHEMA", - "SYNONYM_NAME", - "SYNONYM_FOR", - "SYNONYM_FOR_SCHEMA", - "TYPE_NAME", - "STATUS", - "REMARKS", - "ID INT" - ); - indexColumnName = "SYNONYM_NAME"; - break; - } - case TABLE_CONSTRAINTS: { - setMetaTableName("TABLE_CONSTRAINTS"); - cols = createColumns( - "CONSTRAINT_CATALOG", - "CONSTRAINT_SCHEMA", - "CONSTRAINT_NAME", - "CONSTRAINT_TYPE", - "TABLE_CATALOG", - "TABLE_SCHEMA", - "TABLE_NAME", - "IS_DEFERRABLE", - "INITIALLY_DEFERRED" - ); - indexColumnName = "TABLE_NAME"; - break; - } - case KEY_COLUMN_USAGE: { - setMetaTableName("KEY_COLUMN_USAGE"); - cols = createColumns( - "CONSTRAINT_CATALOG", - "CONSTRAINT_SCHEMA", - "CONSTRAINT_NAME", - "TABLE_CATALOG", - "TABLE_SCHEMA", - "TABLE_NAME", - "COLUMN_NAME", - "ORDINAL_POSITION INT", - "POSITION_IN_UNIQUE_CONSTRAINT INT" - ); - indexColumnName = "TABLE_NAME"; - break; - } - case REFERENTIAL_CONSTRAINTS: { - setMetaTableName("REFERENTIAL_CONSTRAINTS"); - cols = createColumns( - "CONSTRAINT_CATALOG", - "CONSTRAINT_SCHEMA", - "CONSTRAINT_NAME", - "UNIQUE_CONSTRAINT_CATALOG", - "UNIQUE_CONSTRAINT_SCHEMA", - "UNIQUE_CONSTRAINT_NAME", - "MATCH_OPTION", - "UPDATE_RULE", - "DELETE_RULE" - ); - break; - } - default: - throw DbException.throwInternalError("type="+type); - } - setColumns(cols); - - if (indexColumnName == null) { - indexColumn = -1; - metaIndex = null; - } else { - indexColumn = getColumn(database.sysIdentifier(indexColumnName)).getColumnId(); - IndexColumn[] indexCols = IndexColumn.wrap( - new Column[] { cols[indexColumn] }); - metaIndex = new MetaIndex(this, indexCols, false); - } } - private void setMetaTableName(String upperName) { + protected final void setMetaTableName(String upperName) { setObjectName(database.sysIdentifier(upperName)); } - private Column[] createColumns(String... names) { - Column[] cols = new Column[names.length]; - for (int i = 0; i < names.length; i++) { - String nameType = names[i]; - int idx = nameType.indexOf(' '); - int dataType; - String name; - if (idx < 0) { - dataType = database.getMode().lowerCaseIdentifiers ? - Value.STRING_IGNORECASE : Value.STRING; - name = nameType; - } else { - dataType = DataType.getTypeByName(nameType.substring(idx + 1), database.getMode()).type; - name = nameType.substring(0, idx); - } - cols[i] = new Column(database.sysIdentifier(name), dataType); - } - return cols; + /** + * Creates a column with the specified name and character string data type. + * + * @param name + * the uppercase column name + * @return the column + */ + final Column column(String name) { + return new Column(database.sysIdentifier(name), + database.getSettings().caseInsensitiveIdentifiers ? TypeInfo.TYPE_VARCHAR_IGNORECASE + : TypeInfo.TYPE_VARCHAR); } - @Override - public String getDropSQL() { - return null; + /** + * Creates a column with the specified name and data type. + * + * @param name + * the uppercase column name + * @param type + * the data type + * @return the column + */ + protected final Column column(String name, TypeInfo type) { + return new Column(database.sysIdentifier(name), type); } @Override - public String getCreateSQL() { + public final String getCreateSQL() { return null; } @Override - public Index addIndex(Session session, String indexName, int indexId, - IndexColumn[] cols, IndexType indexType, boolean create, - String indexComment) { + public final Index addIndex(SessionLocal session, String indexName, int indexId, IndexColumn[] cols, + int uniqueColumnCount, IndexType indexType, boolean create, String indexComment) { throw DbException.getUnsupportedException("META"); } - @Override - public boolean lock(Session session, boolean exclusive, boolean forceLockEvenInMvcc) { - // nothing to do - return false; - } - - @Override - public boolean isLockedExclusively() { - return false; - } - - private String identifier(String s) { - if (database.getMode().lowerCaseIdentifiers) { + /** + * If needed, convert the identifier to lower case. + * + * @param s the identifier to convert + * @return the converted identifier + */ + protected final String identifier(String s) { + if (database.getSettings().databaseToLower) { s = s == null ? null : StringUtils.toLowerEnglish(s); } return s; } - private ArrayList
          getAllTables(Session session) { - ArrayList
          tables = database.getAllTablesAndViews(true); - ArrayList
          tempTables = session.getLocalTempTables(); - tables.addAll(tempTables); - return tables; - } - - private ArrayList
          getTablesByName(Session session, String tableName) { - ArrayList
          tables = database.getTableOrViewByName(tableName); - for (Table temp : session.getLocalTempTables()) { - if (temp.getName().equals(tableName)) { - tables.add(temp); - } - } - return tables; - } - - private boolean checkIndex(Session session, String value, Value indexFrom, - Value indexTo) { + /** + * Checks index conditions. + * + * @param session the session + * @param value the value + * @param indexFrom the lower bound of value, or {@code null} + * @param indexTo the higher bound of value, or {@code null} + * @return whether row should be included into result + */ + protected final boolean checkIndex(SessionLocal session, String value, Value indexFrom, Value indexTo) { if (value == null || (indexFrom == null && indexTo == null)) { return true; } - Database db = session.getDatabase(); Value v; - if (database.getMode().lowerCaseIdentifiers) { - v = ValueStringIgnoreCase.get(value); + if (database.getSettings().caseInsensitiveIdentifiers) { + v = ValueVarcharIgnoreCase.get(value); } else { - v = ValueString.get(value); + v = ValueVarchar.get(value); } - if (indexFrom != null && db.compare(v, indexFrom) < 0) { + if (indexFrom != null && session.compare(v, indexFrom) < 0) { return false; } - if (indexTo != null && db.compare(v, indexTo) > 0) { + if (indexTo != null && session.compare(v, indexTo) > 0) { return false; } return true; } - private static String replaceNullWithEmpty(String s) { - return s == null ? "" : s; - } - - private boolean hideTable(Table table, Session session) { + /** + * Check whether to hide the table. Tables are never hidden in the system + * session. + * + * @param table the table + * @param session the session + * @return whether the table is hidden + */ + protected final boolean hideTable(Table table, SessionLocal session) { return table.isHidden() && session != database.getSystemSession(); } @@ -751,1585 +163,92 @@ private boolean hideTable(Table table, Session session) { * @param last the last row to return * @return the generated rows */ - public ArrayList generateRows(Session session, SearchRow first, - SearchRow last) { - Value indexFrom = null, indexTo = null; - - if (indexColumn >= 0) { - if (first != null) { - indexFrom = first.getValue(indexColumn); - } - if (last != null) { - indexTo = last.getValue(indexColumn); - } - } - - ArrayList rows = Utils.newSmallArrayList(); - String catalog = database.getShortName(); - boolean admin = session.getUser().isAdmin(); - switch (type) { - case TABLES: { - for (Table table : getAllTables(session)) { - String tableName = table.getName(); - if (!checkIndex(session, tableName, indexFrom, indexTo)) { - continue; - } - if (hideTable(table, session)) { - continue; - } - String storageType; - if (table.isTemporary()) { - if (table.isGlobalTemporary()) { - storageType = "GLOBAL TEMPORARY"; - } else { - storageType = "LOCAL TEMPORARY"; - } - } else { - storageType = table.isPersistIndexes() ? - "CACHED" : "MEMORY"; - } - String sql = table.getCreateSQL(); - if (!admin) { - if (sql != null && sql.contains(DbException.HIDE_SQL)) { - // hide the password of linked tables - sql = "-"; - } - } - add(rows, - // TABLE_CATALOG - catalog, - // TABLE_SCHEMA - table.getSchema().getName(), - // TABLE_NAME - tableName, - // TABLE_TYPE - table.getTableType().toString(), - // STORAGE_TYPE - storageType, - // SQL - sql, - // REMARKS - replaceNullWithEmpty(table.getComment()), - // LAST_MODIFICATION - ValueLong.get(table.getMaxDataModificationId()), - // ID - ValueInt.get(table.getId()), - // TYPE_NAME - null, - // TABLE_CLASS - table.getClass().getName(), - // ROW_COUNT_ESTIMATE - ValueLong.get(table.getRowCountApproximation()) - ); - } - break; - } - case COLUMNS: { - // reduce the number of tables to scan - makes some metadata queries - // 10x faster - final ArrayList
          tablesToList; - if (indexFrom != null && indexFrom.equals(indexTo)) { - String tableName = indexFrom.getString(); - if (tableName == null) { - break; - } - tablesToList = getTablesByName(session, tableName); - } else { - tablesToList = getAllTables(session); - } - for (Table table : tablesToList) { - String tableName = table.getName(); - if (!checkIndex(session, tableName, indexFrom, indexTo)) { - continue; - } - if (hideTable(table, session)) { - continue; - } - Column[] cols = table.getColumns(); - String collation = database.getCompareMode().getName(); - for (int j = 0; j < cols.length; j++) { - Column c = cols[j]; - Domain domain = c.getDomain(); - DataType dataType = c.getDataType(); - ValueInt precision = ValueInt.get(c.getPrecisionAsInt()); - ValueInt scale = ValueInt.get(c.getType().getScale()); - Sequence sequence = c.getSequence(); - boolean hasDateTimePrecision; - int type = dataType.type; - switch (type) { - case Value.TIME: - case Value.DATE: - case Value.TIMESTAMP: - case Value.TIMESTAMP_TZ: - case Value.INTERVAL_SECOND: - case Value.INTERVAL_DAY_TO_SECOND: - case Value.INTERVAL_HOUR_TO_SECOND: - case Value.INTERVAL_MINUTE_TO_SECOND: - hasDateTimePrecision = true; - break; - default: - hasDateTimePrecision = false; - } - boolean isInterval = DataType.isIntervalType(type); - String createSQLWithoutName = c.getCreateSQLWithoutName(); - add(rows, - // TABLE_CATALOG - catalog, - // TABLE_SCHEMA - table.getSchema().getName(), - // TABLE_NAME - tableName, - // COLUMN_NAME - c.getName(), - // ORDINAL_POSITION - ValueInt.get(j + 1), - // DOMAIN_CATALOG - domain != null ? catalog : null, - // DOMAIN_SCHEMA - domain != null ? database.getMainSchema().getName() : null, - // DOMAIN_NAME - domain != null ? domain.getName() : null, - // COLUMN_DEFAULT - c.getDefaultSQL(), - // IS_NULLABLE - c.isNullable() ? "YES" : "NO", - // DATA_TYPE - ValueInt.get(dataType.sqlType), - // CHARACTER_MAXIMUM_LENGTH - precision, - // CHARACTER_OCTET_LENGTH - precision, - // NUMERIC_PRECISION - precision, - // NUMERIC_PRECISION_RADIX - ValueInt.get(10), - // NUMERIC_SCALE - scale, - // DATETIME_PRECISION - hasDateTimePrecision ? scale : null, - // INTERVAL_TYPE - isInterval ? createSQLWithoutName.substring(9) : null, - // INTERVAL_PRECISION - isInterval ? precision : null, - // CHARACTER_SET_NAME - CHARACTER_SET_NAME, - // COLLATION_NAME - collation, - // TYPE_NAME - identifier(isInterval ? "INTERVAL" : dataType.name), - // NULLABLE - ValueInt.get(c.isNullable() - ? DatabaseMetaData.columnNullable : DatabaseMetaData.columnNoNulls), - // IS_COMPUTED - ValueBoolean.get(c.getComputed()), - // SELECTIVITY - ValueInt.get(c.getSelectivity()), - // CHECK_CONSTRAINT - c.getCheckConstraintSQL(session, c.getName()), - // SEQUENCE_NAME - sequence == null ? null : sequence.getName(), - // REMARKS - replaceNullWithEmpty(c.getComment()), - // SOURCE_DATA_TYPE - // SMALLINT - null, - // COLUMN_TYPE - createSQLWithoutName, - // COLUMN_ON_UPDATE - c.getOnUpdateSQL(), - // IS_VISIBLE - ValueBoolean.get(c.getVisible()) - ); - } - } - break; - } - case INDEXES: { - // reduce the number of tables to scan - makes some metadata queries - // 10x faster - final ArrayList
          tablesToList; - if (indexFrom != null && indexFrom.equals(indexTo)) { - String tableName = indexFrom.getString(); - if (tableName == null) { - break; - } - tablesToList = getTablesByName(session, tableName); - } else { - tablesToList = getAllTables(session); - } - for (Table table : tablesToList) { - String tableName = table.getName(); - if (!checkIndex(session, tableName, indexFrom, indexTo)) { - continue; - } - if (hideTable(table, session)) { - continue; - } - ArrayList indexes = table.getIndexes(); - ArrayList constraints = table.getConstraints(); - for (int j = 0; indexes != null && j < indexes.size(); j++) { - Index index = indexes.get(j); - if (index.getCreateSQL() == null) { - continue; - } - String constraintName = null; - for (int k = 0; constraints != null && k < constraints.size(); k++) { - Constraint constraint = constraints.get(k); - if (constraint.usesIndex(index)) { - if (index.getIndexType().isPrimaryKey()) { - if (constraint.getConstraintType() == Constraint.Type.PRIMARY_KEY) { - constraintName = constraint.getName(); - } - } else { - constraintName = constraint.getName(); - } - } - } - IndexColumn[] cols = index.getIndexColumns(); - String indexClass = index.getClass().getName(); - for (int k = 0; k < cols.length; k++) { - IndexColumn idxCol = cols[k]; - Column column = idxCol.column; - add(rows, - // TABLE_CATALOG - catalog, - // TABLE_SCHEMA - table.getSchema().getName(), - // TABLE_NAME - tableName, - // NON_UNIQUE - ValueBoolean.get(!index.getIndexType().isUnique()), - // INDEX_NAME - index.getName(), - // ORDINAL_POSITION - ValueShort.get((short) (k + 1)), - // COLUMN_NAME - column.getName(), - // CARDINALITY - ValueInt.get(0), - // PRIMARY_KEY - ValueBoolean.get(index.getIndexType().isPrimaryKey()), - // INDEX_TYPE_NAME - index.getIndexType().getSQL(), - // IS_GENERATED - ValueBoolean.get(index.getIndexType().getBelongsToConstraint()), - // INDEX_TYPE - ValueShort.get(DatabaseMetaData.tableIndexOther), - // ASC_OR_DESC - (idxCol.sortType & SortOrder.DESCENDING) != 0 ? "D" : "A", - // PAGES - ValueInt.get(0), - // FILTER_CONDITION - "", - // REMARKS - replaceNullWithEmpty(index.getComment()), - // SQL - index.getCreateSQL(), - // ID - ValueInt.get(index.getId()), - // SORT_TYPE - ValueInt.get(idxCol.sortType), - // CONSTRAINT_NAME - constraintName, - // INDEX_CLASS - indexClass, - // AFFINITY - ValueBoolean.get(index.getIndexType().isAffinity()) - ); - } - } - } - break; - } - case TABLE_TYPES: { - add(rows, TableType.TABLE.toString()); - add(rows, TableType.TABLE_LINK.toString()); - add(rows, TableType.SYSTEM_TABLE.toString()); - add(rows, TableType.VIEW.toString()); - add(rows, TableType.EXTERNAL_TABLE_ENGINE.toString()); - break; - } - case TYPE_INFO: { - for (DataType t : DataType.getTypes()) { - if (t.hidden || t.sqlType == Value.NULL) { - continue; - } - add(rows, - // TYPE_NAME - t.name, - // DATA_TYPE - ValueInt.get(t.sqlType), - // PRECISION - ValueInt.get(MathUtils.convertLongToInt(t.maxPrecision)), - // PREFIX - t.prefix, - // SUFFIX - t.suffix, - // PARAMS - t.params, - // AUTO_INCREMENT - ValueBoolean.get(t.autoIncrement), - // MINIMUM_SCALE - ValueShort.get((short) t.minScale), - // MAXIMUM_SCALE - ValueShort.get((short) t.maxScale), - // RADIX - t.decimal ? ValueInt.get(10) : null, - // POS - ValueInt.get(t.sqlTypePos), - // CASE_SENSITIVE - ValueBoolean.get(t.caseSensitive), - // NULLABLE - ValueShort.get((short) DatabaseMetaData.typeNullable), - // SEARCHABLE - ValueShort.get((short) DatabaseMetaData.typeSearchable) - ); - } - break; - } - case CATALOGS: { - add(rows, catalog); - break; - } - case SETTINGS: { - for (Setting s : database.getAllSettings()) { - String value = s.getStringValue(); - if (value == null) { - value = Integer.toString(s.getIntValue()); - } - add(rows, - identifier(s.getName()), - value - ); - } - add(rows, "info.BUILD_ID", "" + Constants.BUILD_ID); - add(rows, "info.VERSION_MAJOR", "" + Constants.VERSION_MAJOR); - add(rows, "info.VERSION_MINOR", "" + Constants.VERSION_MINOR); - add(rows, "info.VERSION", Constants.getFullVersion()); - if (admin) { - String[] settings = { - "java.runtime.version", "java.vm.name", - "java.vendor", "os.name", "os.arch", "os.version", - "sun.os.patch.level", "file.separator", - "path.separator", "line.separator", "user.country", - "user.language", "user.variant", "file.encoding" }; - for (String s : settings) { - add(rows, "property." + s, Utils.getProperty(s, "")); - } - } - add(rows, "EXCLUSIVE", database.getExclusiveSession() == null ? - "FALSE" : "TRUE"); - add(rows, "MODE", database.getMode().getName()); - add(rows, "MULTI_THREADED", database.isMultiThreaded() ? "1" : "0"); - add(rows, "QUERY_TIMEOUT", Integer.toString(session.getQueryTimeout())); - add(rows, "RETENTION_TIME", Integer.toString(database.getRetentionTime())); - add(rows, "LOG", Integer.toString(database.getLogMode())); - // database settings - HashMap s = database.getSettings().getSettings(); - ArrayList settingNames = new ArrayList<>(s.size()); - settingNames.addAll(s.keySet()); - Collections.sort(settingNames); - for (String k : settingNames) { - add(rows, k, s.get(k)); - } - if (database.isPersistent()) { - PageStore pageStore = database.getPageStore(); - if (pageStore != null) { - add(rows, "info.FILE_WRITE_TOTAL", - Long.toString(pageStore.getWriteCountTotal())); - add(rows, "info.FILE_WRITE", - Long.toString(pageStore.getWriteCount())); - add(rows, "info.FILE_READ", - Long.toString(pageStore.getReadCount())); - add(rows, "info.PAGE_COUNT", - Integer.toString(pageStore.getPageCount())); - add(rows, "info.PAGE_SIZE", - Integer.toString(pageStore.getPageSize())); - add(rows, "info.CACHE_MAX_SIZE", - Integer.toString(pageStore.getCache().getMaxMemory())); - add(rows, "info.CACHE_SIZE", - Integer.toString(pageStore.getCache().getMemory())); - } - Store store = database.getStore(); - if (store != null) { - MVStore mvStore = store.getMvStore(); - FileStore fs = mvStore.getFileStore(); - add(rows, "info.FILE_WRITE", - Long.toString(fs.getWriteCount())); - add(rows, "info.FILE_READ", - Long.toString(fs.getReadCount())); - add(rows, "info.UPDATE_FAILURE_PERCENT", - String.format(Locale.ENGLISH, "%.2f%%", 100 * mvStore.getUpdateFailureRatio())); - long size; - try { - size = fs.getFile().size(); - } catch (IOException e) { - throw DbException.convertIOException(e, "Can not get size"); - } - int pageSize = 4 * 1024; - long pageCount = size / pageSize; - add(rows, "info.PAGE_COUNT", - Long.toString(pageCount)); - add(rows, "info.PAGE_SIZE", - Integer.toString(mvStore.getPageSplitSize())); - add(rows, "info.CACHE_MAX_SIZE", - Integer.toString(mvStore.getCacheSize())); - add(rows, "info.CACHE_SIZE", - Integer.toString(mvStore.getCacheSizeUsed())); - } - } - break; - } - case HELP: { - String resource = "/org/h2/res/help.csv"; - try { - byte[] data = Utils.getResource(resource); - Reader reader = new InputStreamReader( - new ByteArrayInputStream(data)); - Csv csv = new Csv(); - csv.setLineCommentCharacter('#'); - ResultSet rs = csv.read(reader, null); - for (int i = 0; rs.next(); i++) { - add(rows, - // ID - ValueInt.get(i), - // SECTION - rs.getString(1).trim(), - // TOPIC - rs.getString(2).trim(), - // SYNTAX - rs.getString(3).trim(), - // TEXT - rs.getString(4).trim() - ); - } - } catch (Exception e) { - throw DbException.convert(e); - } - break; - } - case SEQUENCES: { - for (SchemaObject obj : database.getAllSchemaObjects( - DbObject.SEQUENCE)) { - Sequence s = (Sequence) obj; - add(rows, - // SEQUENCE_CATALOG - catalog, - // SEQUENCE_SCHEMA - s.getSchema().getName(), - // SEQUENCE_NAME - s.getName(), - // CURRENT_VALUE - ValueLong.get(s.getCurrentValue()), - // INCREMENT - ValueLong.get(s.getIncrement()), - // IS_GENERATED - ValueBoolean.get(s.getBelongsToTable()), - // REMARKS - replaceNullWithEmpty(s.getComment()), - // CACHE - ValueLong.get(s.getCacheSize()), - // MIN_VALUE - ValueLong.get(s.getMinValue()), - // MAX_VALUE - ValueLong.get(s.getMaxValue()), - // IS_CYCLE - ValueBoolean.get(s.getCycle()), - // ID - ValueInt.get(s.getId()) - ); - } - break; - } - case USERS: { - for (User u : database.getAllUsers()) { - if (admin || session.getUser() == u) { - add(rows, - // NAME - identifier(u.getName()), - // ADMIN - String.valueOf(u.isAdmin()), - // REMARKS - replaceNullWithEmpty(u.getComment()), - // ID - ValueInt.get(u.getId()) - ); - } - } - break; - } - case ROLES: { - for (Role r : database.getAllRoles()) { - if (admin || session.getUser().isRoleGranted(r)) { - add(rows, - // NAME - identifier(r.getName()), - // REMARKS - replaceNullWithEmpty(r.getComment()), - // ID - ValueInt.get(r.getId()) - ); - } - } - break; - } - case RIGHTS: { - if (admin) { - for (Right r : database.getAllRights()) { - Role role = r.getGrantedRole(); - DbObject grantee = r.getGrantee(); - String rightType = grantee.getType() == DbObject.USER ? "USER" : "ROLE"; - if (role == null) { - DbObject object = r.getGrantedObject(); - Schema schema = null; - Table table = null; - if (object != null) { - if (object instanceof Schema) { - schema = (Schema) object; - } else if (object instanceof Table) { - table = (Table) object; - schema = table.getSchema(); - } - } - String tableName = (table != null) ? table.getName() : ""; - String schemaName = (schema != null) ? schema.getName() : ""; - if (!checkIndex(session, tableName, indexFrom, indexTo)) { - continue; - } - add(rows, - // GRANTEE - identifier(grantee.getName()), - // GRANTEETYPE - rightType, - // GRANTEDROLE - "", - // RIGHTS - r.getRights(), - // TABLE_SCHEMA - schemaName, - // TABLE_NAME - tableName, - // ID - ValueInt.get(r.getId()) - ); - } else { - add(rows, - // GRANTEE - identifier(grantee.getName()), - // GRANTEETYPE - rightType, - // GRANTEDROLE - identifier(role.getName()), - // RIGHTS - "", - // TABLE_SCHEMA - "", - // TABLE_NAME - "", - // ID - ValueInt.get(r.getId()) - ); - } - } - } - break; - } - case FUNCTION_ALIASES: { - for (SchemaObject aliasAsSchemaObject : - database.getAllSchemaObjects(DbObject.FUNCTION_ALIAS)) { - FunctionAlias alias = (FunctionAlias) aliasAsSchemaObject; - JavaMethod[] methods; - try { - methods = alias.getJavaMethods(); - } catch (DbException e) { - methods = new JavaMethod[0]; - } - for (FunctionAlias.JavaMethod method : methods) { - add(rows, - // ALIAS_CATALOG - catalog, - // ALIAS_SCHEMA - alias.getSchema().getName(), - // ALIAS_NAME - alias.getName(), - // JAVA_CLASS - alias.getJavaClassName(), - // JAVA_METHOD - alias.getJavaMethodName(), - // DATA_TYPE - ValueInt.get(DataType.convertTypeToSQLType(method.getDataType())), - // TYPE_NAME - DataType.getDataType(method.getDataType()).name, - // COLUMN_COUNT - ValueInt.get(method.getParameterCount()), - // RETURNS_RESULT - ValueShort.get(method.getDataType() == Value.NULL - ? (short) DatabaseMetaData.procedureNoResult - : (short) DatabaseMetaData.procedureReturnsResult), - // REMARKS - replaceNullWithEmpty(alias.getComment()), - // ID - ValueInt.get(alias.getId()), - // SOURCE - alias.getSource() - // when adding more columns, see also below - ); - } - } - for (UserAggregate agg : database.getAllAggregates()) { - add(rows, - // ALIAS_CATALOG - catalog, - // ALIAS_SCHEMA - database.getMainSchema().getName(), - // ALIAS_NAME - agg.getName(), - // JAVA_CLASS - agg.getJavaClassName(), - // JAVA_METHOD - "", - // DATA_TYPE - ValueInt.get(Types.NULL), - // TYPE_NAME - DataType.getDataType(Value.NULL).name, - // COLUMN_COUNT - ValueInt.get(1), - // RETURNS_RESULT - ValueShort.get((short) DatabaseMetaData.procedureReturnsResult), - // REMARKS - replaceNullWithEmpty(agg.getComment()), - // ID - ValueInt.get(agg.getId()), - // SOURCE - "" - // when adding more columns, see also below - ); - } - break; - } - case FUNCTION_COLUMNS: { - for (SchemaObject aliasAsSchemaObject : - database.getAllSchemaObjects(DbObject.FUNCTION_ALIAS)) { - FunctionAlias alias = (FunctionAlias) aliasAsSchemaObject; - JavaMethod[] methods; - try { - methods = alias.getJavaMethods(); - } catch (DbException e) { - methods = new JavaMethod[0]; - } - for (FunctionAlias.JavaMethod method : methods) { - // Add return column index 0 - if (method.getDataType() != Value.NULL) { - DataType dt = DataType.getDataType(method.getDataType()); - add(rows, - // ALIAS_CATALOG - catalog, - // ALIAS_SCHEMA - alias.getSchema().getName(), - // ALIAS_NAME - alias.getName(), - // JAVA_CLASS - alias.getJavaClassName(), - // JAVA_METHOD - alias.getJavaMethodName(), - // COLUMN_COUNT - ValueInt.get(method.getParameterCount()), - // POS - ValueInt.get(0), - // COLUMN_NAME - "P0", - // DATA_TYPE - ValueInt.get(DataType.convertTypeToSQLType(method.getDataType())), - // TYPE_NAME - dt.name, - // PRECISION - ValueInt.get(MathUtils.convertLongToInt(dt.defaultPrecision)), - // SCALE - ValueShort.get((short) dt.defaultScale), - // RADIX - ValueShort.get((short) 10), - // NULLABLE - ValueShort.get((short) DatabaseMetaData.columnNullableUnknown), - // COLUMN_TYPE - ValueShort.get((short) DatabaseMetaData.procedureColumnReturn), - // REMARKS - "", - // COLUMN_DEFAULT - null - ); - } - Class[] columnList = method.getColumnClasses(); - for (int k = 0; k < columnList.length; k++) { - if (method.hasConnectionParam() && k == 0) { - continue; - } - Class clazz = columnList[k]; - int dataType = DataType.getTypeFromClass(clazz); - DataType dt = DataType.getDataType(dataType); - add(rows, - // ALIAS_CATALOG - catalog, - // ALIAS_SCHEMA - alias.getSchema().getName(), - // ALIAS_NAME - alias.getName(), - // JAVA_CLASS - alias.getJavaClassName(), - // JAVA_METHOD - alias.getJavaMethodName(), - // COLUMN_COUNT - ValueInt.get(method.getParameterCount()), - // POS - ValueInt.get(k + (method.hasConnectionParam() ? 0 : 1)), - // COLUMN_NAME - "P" + (k + 1), - // DATA_TYPE - ValueInt.get(DataType.convertTypeToSQLType(dt.type)), - // TYPE_NAME - dt.name, - // PRECISION - ValueInt.get(MathUtils.convertLongToInt(dt.defaultPrecision)), - // SCALE - ValueShort.get((short) dt.defaultScale), - // RADIX - ValueShort.get((short) 10), - // NULLABLE - ValueShort.get(clazz.isPrimitive() - ? (short) DatabaseMetaData.columnNoNulls - : (short) DatabaseMetaData.columnNullable), - // COLUMN_TYPE - ValueShort.get((short) DatabaseMetaData.procedureColumnIn), - // REMARKS - "", - // COLUMN_DEFAULT - null - ); - } - } - } - break; - } - case SCHEMATA: { - String collation = database.getCompareMode().getName(); - for (Schema schema : database.getAllSchemas()) { - add(rows, - // CATALOG_NAME - catalog, - // SCHEMA_NAME - schema.getName(), - // SCHEMA_OWNER - identifier(schema.getOwner().getName()), - // DEFAULT_CHARACTER_SET_NAME - CHARACTER_SET_NAME, - // DEFAULT_COLLATION_NAME - collation, - // IS_DEFAULT - ValueBoolean.get(schema.getId() == Constants.MAIN_SCHEMA_ID), - // REMARKS - replaceNullWithEmpty(schema.getComment()), - // ID - ValueInt.get(schema.getId()) - ); - } - break; - } - case TABLE_PRIVILEGES: { - for (Right r : database.getAllRights()) { - DbObject object = r.getGrantedObject(); - if (!(object instanceof Table)) { - continue; - } - Table table = (Table) object; - if (hideTable(table, session)) { - continue; - } - String tableName = table.getName(); - if (!checkIndex(session, tableName, indexFrom, indexTo)) { - continue; - } - addPrivileges(rows, r.getGrantee(), catalog, table, null, - r.getRightMask()); - } - break; - } - case COLUMN_PRIVILEGES: { - for (Right r : database.getAllRights()) { - DbObject object = r.getGrantedObject(); - if (!(object instanceof Table)) { - continue; - } - Table table = (Table) object; - if (hideTable(table, session)) { - continue; - } - String tableName = table.getName(); - if (!checkIndex(session, tableName, indexFrom, indexTo)) { - continue; - } - DbObject grantee = r.getGrantee(); - int mask = r.getRightMask(); - for (Column column : table.getColumns()) { - addPrivileges(rows, grantee, catalog, table, - column.getName(), mask); - } - } - break; - } - case COLLATIONS: { - for (Locale l : Collator.getAvailableLocales()) { - add(rows, - // NAME - CompareMode.getName(l), - // KEY - l.toString() - ); - } - break; - } - case VIEWS: { - for (Table table : getAllTables(session)) { - if (table.getTableType() != TableType.VIEW) { - continue; - } - String tableName = table.getName(); - if (!checkIndex(session, tableName, indexFrom, indexTo)) { - continue; - } - TableView view = (TableView) table; - add(rows, - // TABLE_CATALOG - catalog, - // TABLE_SCHEMA - table.getSchema().getName(), - // TABLE_NAME - tableName, - // VIEW_DEFINITION - table.getCreateSQL(), - // CHECK_OPTION - "NONE", - // IS_UPDATABLE - "NO", - // STATUS - view.isInvalid() ? "INVALID" : "VALID", - // REMARKS - replaceNullWithEmpty(view.getComment()), - // ID - ValueInt.get(view.getId()) - ); - } - break; - } - case IN_DOUBT: { - ArrayList prepared = database.getInDoubtTransactions(); - if (prepared != null && admin) { - for (InDoubtTransaction prep : prepared) { - add(rows, - // TRANSACTION - prep.getTransactionName(), - // STATE - prep.getState() - ); - } - } - break; - } - case CROSS_REFERENCES: { - for (SchemaObject obj : database.getAllSchemaObjects( - DbObject.CONSTRAINT)) { - Constraint constraint = (Constraint) obj; - if (constraint.getConstraintType() != Constraint.Type.REFERENTIAL) { - continue; - } - ConstraintReferential ref = (ConstraintReferential) constraint; - IndexColumn[] cols = ref.getColumns(); - IndexColumn[] refCols = ref.getRefColumns(); - Table tab = ref.getTable(); - Table refTab = ref.getRefTable(); - String tableName = refTab.getName(); - if (!checkIndex(session, tableName, indexFrom, indexTo)) { - continue; - } - ValueShort update = ValueShort.get(getRefAction(ref.getUpdateAction())); - ValueShort delete = ValueShort.get(getRefAction(ref.getDeleteAction())); - for (int j = 0; j < cols.length; j++) { - add(rows, - // PKTABLE_CATALOG - catalog, - // PKTABLE_SCHEMA - refTab.getSchema().getName(), - // PKTABLE_NAME - refTab.getName(), - // PKCOLUMN_NAME - refCols[j].column.getName(), - // FKTABLE_CATALOG - catalog, - // FKTABLE_SCHEMA - tab.getSchema().getName(), - // FKTABLE_NAME - tab.getName(), - // FKCOLUMN_NAME - cols[j].column.getName(), - // ORDINAL_POSITION - ValueShort.get((short) (j + 1)), - // UPDATE_RULE - update, - // DELETE_RULE - delete, - // FK_NAME - ref.getName(), - // PK_NAME - ref.getUniqueIndex().getName(), - // DEFERRABILITY - ValueShort.get((short) DatabaseMetaData.importedKeyNotDeferrable) - ); - } - } - break; - } - case CONSTRAINTS: { - for (SchemaObject obj : database.getAllSchemaObjects( - DbObject.CONSTRAINT)) { - Constraint constraint = (Constraint) obj; - Constraint.Type constraintType = constraint.getConstraintType(); - String checkExpression = null; - IndexColumn[] indexColumns = null; - Table table = constraint.getTable(); - if (hideTable(table, session)) { - continue; - } - Index index = constraint.getUniqueIndex(); - String uniqueIndexName = null; - if (index != null) { - uniqueIndexName = index.getName(); - } - String tableName = table.getName(); - if (!checkIndex(session, tableName, indexFrom, indexTo)) { - continue; - } - if (constraintType == Constraint.Type.CHECK) { - checkExpression = ((ConstraintCheck) constraint).getExpression().getSQL(true); - } else if (constraintType == Constraint.Type.UNIQUE || - constraintType == Constraint.Type.PRIMARY_KEY) { - indexColumns = ((ConstraintUnique) constraint).getColumns(); - } else if (constraintType == Constraint.Type.REFERENTIAL) { - indexColumns = ((ConstraintReferential) constraint).getColumns(); - } - String columnList = null; - if (indexColumns != null) { - StringBuilder builder = new StringBuilder(); - for (int i = 0, length = indexColumns.length; i < length; i++) { - if (i > 0) { - builder.append(','); - } - builder.append(indexColumns[i].column.getName()); - } - columnList = builder.toString(); - } - add(rows, - // CONSTRAINT_CATALOG - catalog, - // CONSTRAINT_SCHEMA - constraint.getSchema().getName(), - // CONSTRAINT_NAME - constraint.getName(), - // CONSTRAINT_TYPE - constraintType == Constraint.Type.PRIMARY_KEY ? - constraintType.getSqlName() : constraintType.name(), - // TABLE_CATALOG - catalog, - // TABLE_SCHEMA - table.getSchema().getName(), - // TABLE_NAME - tableName, - // UNIQUE_INDEX_NAME - uniqueIndexName, - // CHECK_EXPRESSION - checkExpression, - // COLUMN_LIST - columnList, - // REMARKS - replaceNullWithEmpty(constraint.getComment()), - // SQL - constraint.getCreateSQL(), - // ID - ValueInt.get(constraint.getId()) - ); - } - break; - } - case CONSTANTS: { - for (SchemaObject obj : database.getAllSchemaObjects( - DbObject.CONSTANT)) { - Constant constant = (Constant) obj; - ValueExpression expr = constant.getValue(); - add(rows, - // CONSTANT_CATALOG - catalog, - // CONSTANT_SCHEMA - constant.getSchema().getName(), - // CONSTANT_NAME - constant.getName(), - // DATA_TYPE - ValueInt.get(DataType.convertTypeToSQLType(expr.getType().getValueType())), - // REMARKS - replaceNullWithEmpty(constant.getComment()), - // SQL - expr.getSQL(true), - // ID - ValueInt.get(constant.getId()) - ); - } - break; - } - case DOMAINS: { - for (Domain dt : database.getAllDomains()) { - Column col = dt.getColumn(); - add(rows, - // DOMAIN_CATALOG - catalog, - // DOMAIN_SCHEMA - database.getMainSchema().getName(), - // DOMAIN_NAME - dt.getName(), - // COLUMN_DEFAULT - col.getDefaultSQL(), - // IS_NULLABLE - col.isNullable() ? "YES" : "NO", - // DATA_TYPE - ValueInt.get(col.getDataType().sqlType), - // PRECISION - ValueInt.get(col.getPrecisionAsInt()), - // SCALE - ValueInt.get(col.getType().getScale()), - // TYPE_NAME - col.getDataType().name, - // SELECTIVITY INT - ValueInt.get(col.getSelectivity()), - // CHECK_CONSTRAINT - col.getCheckConstraintSQL(session, "VALUE"), - // REMARKS - replaceNullWithEmpty(dt.getComment()), - // SQL - dt.getCreateSQL(), - // ID - ValueInt.get(dt.getId()) - ); - } - break; - } - case TRIGGERS: { - for (SchemaObject obj : database.getAllSchemaObjects( - DbObject.TRIGGER)) { - TriggerObject trigger = (TriggerObject) obj; - Table table = trigger.getTable(); - add(rows, - // TRIGGER_CATALOG - catalog, - // TRIGGER_SCHEMA - trigger.getSchema().getName(), - // TRIGGER_NAME - trigger.getName(), - // TRIGGER_TYPE - trigger.getTypeNameList(new StringBuilder()).toString(), - // TABLE_CATALOG - catalog, - // TABLE_SCHEMA - table.getSchema().getName(), - // TABLE_NAME - table.getName(), - // BEFORE - ValueBoolean.get(trigger.isBefore()), - // JAVA_CLASS - trigger.getTriggerClassName(), - // QUEUE_SIZE - ValueInt.get(trigger.getQueueSize()), - // NO_WAIT - ValueBoolean.get(trigger.isNoWait()), - // REMARKS - replaceNullWithEmpty(trigger.getComment()), - // SQL - trigger.getCreateSQL(), - // ID - ValueInt.get(trigger.getId()) - ); - } - break; - } - case SESSIONS: { - for (Session s : database.getSessions(false)) { - if (admin || s == session) { - Command command = s.getCurrentCommand(); - int blockingSessionId = s.getBlockingSessionId(); - add(rows, - // ID - ValueInt.get(s.getId()), - // USER_NAME - s.getUser().getName(), - // SESSION_START - DateTimeUtils.timestampTimeZoneFromMillis(s.getSessionStart()), - // STATEMENT - command == null ? null : command.toString(), - // STATEMENT_START - command == null ? null : s.getCurrentCommandStart(), - // CONTAINS_UNCOMMITTED - ValueBoolean.get(s.containsUncommitted()), - // STATE - String.valueOf(s.getState()), - // BLOCKER_ID - blockingSessionId == 0 ? null : ValueInt.get(blockingSessionId) - ); - } - } - break; - } - case LOCKS: { - for (Session s : database.getSessions(false)) { - if (admin || s == session) { - for (Table table : s.getLocks()) { - add(rows, - // TABLE_SCHEMA - table.getSchema().getName(), - // TABLE_NAME - table.getName(), - // SESSION_ID - ValueInt.get(s.getId()), - // LOCK_TYPE - table.isLockedExclusivelyBy(s) ? "WRITE" : "READ" - ); - } - } - } - break; - } - case SESSION_STATE: { - for (String name : session.getVariableNames()) { - Value v = session.getVariable(name); - StringBuilder builder = new StringBuilder().append("SET @").append(name).append(' '); - v.getSQL(builder); - add(rows, - // KEY - "@" + name, - builder.toString() - ); - } - for (Table table : session.getLocalTempTables()) { - add(rows, - // KEY - "TABLE " + table.getName(), - // SQL - table.getCreateSQL() - ); - } - String[] path = session.getSchemaSearchPath(); - if (path != null && path.length > 0) { - StringBuilder builder = new StringBuilder("SET SCHEMA_SEARCH_PATH "); - for (int i = 0, l = path.length; i < l; i++) { - if (i > 0) { - builder.append(", "); - } - StringUtils.quoteIdentifier(builder, path[i]); - } - add(rows, - // KEY - "SCHEMA_SEARCH_PATH", - // SQL - builder.toString() - ); - } - String schema = session.getCurrentSchemaName(); - if (schema != null) { - add(rows, - // KEY - "SCHEMA", - // SQL - StringUtils.quoteIdentifier(new StringBuilder("SET SCHEMA "), schema).toString() - ); - } - break; - } - case QUERY_STATISTICS: { - QueryStatisticsData control = database.getQueryStatisticsData(); - if (control != null) { - for (QueryStatisticsData.QueryEntry entry : control.getQueries()) { - add(rows, - // SQL_STATEMENT - entry.sqlStatement, - // EXECUTION_COUNT - ValueInt.get(entry.count), - // MIN_EXECUTION_TIME - ValueDouble.get(entry.executionTimeMinNanos / 1_000_000d), - // MAX_EXECUTION_TIME - ValueDouble.get(entry.executionTimeMaxNanos / 1_000_000d), - // CUMULATIVE_EXECUTION_TIME - ValueDouble.get(entry.executionTimeCumulativeNanos / 1_000_000d), - // AVERAGE_EXECUTION_TIME - ValueDouble.get(entry.executionTimeMeanNanos / 1_000_000d), - // STD_DEV_EXECUTION_TIME - ValueDouble.get(entry.getExecutionTimeStandardDeviation() / 1_000_000d), - // MIN_ROW_COUNT - ValueInt.get(entry.rowCountMin), - // MAX_ROW_COUNT - ValueInt.get(entry.rowCountMax), - // CUMULATIVE_ROW_COUNT - ValueLong.get(entry.rowCountCumulative), - // AVERAGE_ROW_COUNT - ValueDouble.get(entry.rowCountMean), - // STD_DEV_ROW_COUNT - ValueDouble.get(entry.getRowCountStandardDeviation()) - ); - } - } - break; - } - case SYNONYMS: { - for (TableSynonym synonym : database.getAllSynonyms()) { - add(rows, - // SYNONYM_CATALOG - catalog, - // SYNONYM_SCHEMA - synonym.getSchema().getName(), - // SYNONYM_NAME - synonym.getName(), - // SYNONYM_FOR - synonym.getSynonymForName(), - // SYNONYM_FOR_SCHEMA - synonym.getSynonymForSchema().getName(), - // TYPE NAME - "SYNONYM", - // STATUS - "VALID", - // REMARKS - replaceNullWithEmpty(synonym.getComment()), - // ID - ValueInt.get(synonym.getId()) - ); - } - break; - } - case TABLE_CONSTRAINTS: { - for (SchemaObject obj : database.getAllSchemaObjects(DbObject.CONSTRAINT)) { - Constraint constraint = (Constraint) obj; - Constraint.Type constraintType = constraint.getConstraintType(); - Table table = constraint.getTable(); - if (hideTable(table, session)) { - continue; - } - String tableName = table.getName(); - if (!checkIndex(session, tableName, indexFrom, indexTo)) { - continue; - } - add(rows, - // CONSTRAINT_CATALOG - catalog, - // CONSTRAINT_SCHEMA - constraint.getSchema().getName(), - // CONSTRAINT_NAME - constraint.getName(), - // CONSTRAINT_TYPE - constraintType.getSqlName(), - // TABLE_CATALOG - catalog, - // TABLE_SCHEMA - table.getSchema().getName(), - // TABLE_NAME - tableName, - // IS_DEFERRABLE - "NO", - // INITIALLY_DEFERRED - "NO" - ); - } - break; - } - case KEY_COLUMN_USAGE: { - for (SchemaObject obj : database.getAllSchemaObjects(DbObject.CONSTRAINT)) { - Constraint constraint = (Constraint) obj; - Constraint.Type constraintType = constraint.getConstraintType(); - IndexColumn[] indexColumns = null; - Table table = constraint.getTable(); - if (hideTable(table, session)) { - continue; - } - String tableName = table.getName(); - if (!checkIndex(session, tableName, indexFrom, indexTo)) { - continue; - } - if (constraintType == Constraint.Type.UNIQUE || - constraintType == Constraint.Type.PRIMARY_KEY) { - indexColumns = ((ConstraintUnique) constraint).getColumns(); - } else if (constraintType == Constraint.Type.REFERENTIAL) { - indexColumns = ((ConstraintReferential) constraint).getColumns(); - } - if (indexColumns == null) { - continue; - } - ConstraintUnique referenced; - if (constraintType == Constraint.Type.REFERENTIAL) { - referenced = lookupUniqueForReferential((ConstraintReferential) constraint); - } else { - referenced = null; - } - for (int i = 0; i < indexColumns.length; i++) { - IndexColumn indexColumn = indexColumns[i]; - ValueInt ordinalPosition = ValueInt.get(i + 1); - ValueInt positionInUniqueConstraint; - if (constraintType == Constraint.Type.REFERENTIAL) { - positionInUniqueConstraint = ordinalPosition; - if (referenced != null) { - Column c = ((ConstraintReferential) constraint).getRefColumns()[i].column; - IndexColumn[] refColumns = referenced.getColumns(); - for (int j = 0; j < refColumns.length; j++) { - if (refColumns[j].column.equals(c)) { - positionInUniqueConstraint = ValueInt.get(j + 1); - break; - } - } - } - } else { - positionInUniqueConstraint = null; - } - add(rows, - // CONSTRAINT_CATALOG - catalog, - // CONSTRAINT_SCHEMA - constraint.getSchema().getName(), - // CONSTRAINT_NAME - constraint.getName(), - // TABLE_CATALOG - catalog, - // TABLE_SCHEMA - table.getSchema().getName(), - // TABLE_NAME - tableName, - // COLUMN_NAME - indexColumn.columnName, - // ORDINAL_POSITION - ordinalPosition, - // POSITION_IN_UNIQUE_CONSTRAINT - positionInUniqueConstraint - ); - } - } - break; - } - case REFERENTIAL_CONSTRAINTS: { - for (SchemaObject obj : database.getAllSchemaObjects(DbObject.CONSTRAINT)) { - if (((Constraint) obj).getConstraintType() != Constraint.Type.REFERENTIAL) { - continue; - } - ConstraintReferential constraint = (ConstraintReferential) obj; - Table table = constraint.getTable(); - if (hideTable(table, session)) { - continue; - } - // Should be referenced unique constraint, but H2 uses indexes instead. - // So try to find matching unique constraint first and there is no such - // constraint use index name to return something. - SchemaObject unique = lookupUniqueForReferential(constraint); - if (unique == null) { - unique = constraint.getUniqueIndex(); - } - add(rows, - // CONSTRAINT_CATALOG - catalog, - // CONSTRAINT_SCHEMA - constraint.getSchema().getName(), - // CONSTRAINT_NAME - constraint.getName(), - // UNIQUE_CONSTRAINT_CATALOG - catalog, - // UNIQUE_CONSTRAINT_SCHEMA - unique.getSchema().getName(), - // UNIQUE_CONSTRAINT_NAME - unique.getName(), - // MATCH_OPTION - "NONE", - // UPDATE_RULE - constraint.getUpdateAction().getSqlName(), - // DELETE_RULE - constraint.getDeleteAction().getSqlName() - ); - } - break; - } - default: - DbException.throwInternalError("type="+type); - } - return rows; - } - - private static short getRefAction(ConstraintActionType action) { - switch (action) { - case CASCADE: - return DatabaseMetaData.importedKeyCascade; - case RESTRICT: - return DatabaseMetaData.importedKeyRestrict; - case SET_DEFAULT: - return DatabaseMetaData.importedKeySetDefault; - case SET_NULL: - return DatabaseMetaData.importedKeySetNull; - default: - throw DbException.throwInternalError("action="+action); - } - } - - private static ConstraintUnique lookupUniqueForReferential(ConstraintReferential referential) { - Table table = referential.getRefTable(); - for (Constraint c : table.getConstraints()) { - if (c.getConstraintType() == Constraint.Type.UNIQUE) { - ConstraintUnique unique = (ConstraintUnique) c; - if (unique.getReferencedColumns(table).equals(referential.getReferencedColumns(table))) { - return unique; - } - } - } - return null; - } + public abstract ArrayList generateRows(SessionLocal session, SearchRow first, SearchRow last); @Override - public void removeRow(Session session, Row row) { - throw DbException.getUnsupportedException("META"); + public boolean isInsertable() { + return false; } @Override - public void addRow(Session session, Row row) { + public final void removeRow(SessionLocal session, Row row) { throw DbException.getUnsupportedException("META"); } @Override - public void removeChildrenAndResources(Session session) { + public final void addRow(SessionLocal session, Row row) { throw DbException.getUnsupportedException("META"); } @Override - public void close(Session session) { - // nothing to do + public final void removeChildrenAndResources(SessionLocal session) { + throw DbException.getUnsupportedException("META"); } @Override - public void unlock(Session s) { + public final void close(SessionLocal session) { // nothing to do } - private void addPrivileges(ArrayList rows, DbObject grantee, - String catalog, Table table, String column, int rightMask) { - if ((rightMask & Right.SELECT) != 0) { - addPrivilege(rows, grantee, catalog, table, column, "SELECT"); - } - if ((rightMask & Right.INSERT) != 0) { - addPrivilege(rows, grantee, catalog, table, column, "INSERT"); - } - if ((rightMask & Right.UPDATE) != 0) { - addPrivilege(rows, grantee, catalog, table, column, "UPDATE"); - } - if ((rightMask & Right.DELETE) != 0) { - addPrivilege(rows, grantee, catalog, table, column, "DELETE"); - } - } - - private void addPrivilege(ArrayList rows, DbObject grantee, - String catalog, Table table, String column, String right) { - String isGrantable = "NO"; - if (grantee.getType() == DbObject.USER) { - User user = (User) grantee; - if (user.isAdmin()) { - // the right is grantable if the grantee is an admin - isGrantable = "YES"; - } - } - if (column == null) { - add(rows, - // GRANTOR - null, - // GRANTEE - identifier(grantee.getName()), - // TABLE_CATALOG - catalog, - // TABLE_SCHEMA - table.getSchema().getName(), - // TABLE_NAME - table.getName(), - // PRIVILEGE_TYPE - right, - // IS_GRANTABLE - isGrantable - ); - } else { - add(rows, - // GRANTOR - null, - // GRANTEE - identifier(grantee.getName()), - // TABLE_CATALOG - catalog, - // TABLE_SCHEMA - table.getSchema().getName(), - // TABLE_NAME - table.getName(), - // COLUMN_NAME - column, - // PRIVILEGE_TYPE - right, - // IS_GRANTABLE - isGrantable - ); - } - } - - private void add(ArrayList rows, Object... stringsOrValues) { + /** + * Add a row to a list. + * + * @param session the session + * @param rows the original row list + * @param stringsOrValues the values, or strings + */ + protected final void add(SessionLocal session, ArrayList rows, Object... stringsOrValues) { Value[] values = new Value[stringsOrValues.length]; for (int i = 0; i < stringsOrValues.length; i++) { Object s = stringsOrValues[i]; - Value v = s == null ? ValueNull.INSTANCE : s instanceof String ? ValueString.get((String) s) : (Value) s; - values[i] = columns[i].convert(v); + Value v = s == null ? ValueNull.INSTANCE : s instanceof String ? ValueVarchar.get((String) s) : (Value) s; + values[i] = columns[i].convert(session, v); } - Row row = database.createRow(values, 1); - row.setKey(rows.size()); - rows.add(row); + rows.add(Row.get(values, 1, rows.size())); } @Override - public void checkRename() { + public final void checkRename() { throw DbException.getUnsupportedException("META"); } @Override - public void checkSupportAlter() { + public final void checkSupportAlter() { throw DbException.getUnsupportedException("META"); } @Override - public void truncate(Session session) { + public final long truncate(SessionLocal session) { throw DbException.getUnsupportedException("META"); } @Override - public long getRowCount(Session session) { - throw DbException.throwInternalError(toString()); + public long getRowCount(SessionLocal session) { + throw DbException.getInternalError(toString()); } @Override - public boolean canGetRowCount() { + public boolean canGetRowCount(SessionLocal session) { return false; } @Override - public boolean canDrop() { + public final boolean canDrop() { return false; } @Override - public TableType getTableType() { + public final TableType getTableType() { return TableType.SYSTEM_TABLE; } @Override - public Index getScanIndex(Session session) { + public final Index getScanIndex(SessionLocal session) { return new MetaIndex(this, IndexColumn.wrap(columns), true); } @Override - public ArrayList getIndexes() { + public final ArrayList getIndexes() { ArrayList list = new ArrayList<>(2); if (metaIndex == null) { return list; @@ -2341,50 +260,17 @@ public ArrayList getIndexes() { } @Override - public long getMaxDataModificationId() { - switch (type) { - case SETTINGS: - case IN_DOUBT: - case SESSIONS: - case LOCKS: - case SESSION_STATE: - return Long.MAX_VALUE; - } - return database.getModificationDataId(); - } - - @Override - public Index getUniqueIndex() { - return null; - } - - /** - * Get the number of meta table types. Supported meta table - * types are 0 .. this value - 1. - * - * @return the number of meta table types - */ - public static int getMetaTableTypeCount() { - return META_TABLE_TYPE_COUNT; - } - - @Override - public long getRowCountApproximation() { + public long getRowCountApproximation(SessionLocal session) { return ROW_COUNT_APPROXIMATION; } @Override - public long getDiskSpaceUsed() { - return 0; - } - - @Override - public boolean isDeterministic() { + public final boolean isDeterministic() { return true; } @Override - public boolean canReference() { + public final boolean canReference() { return false; } diff --git a/h2/src/main/org/h2/table/PageStoreTable.java b/h2/src/main/org/h2/table/PageStoreTable.java deleted file mode 100644 index 45f14be654..0000000000 --- a/h2/src/main/org/h2/table/PageStoreTable.java +++ /dev/null @@ -1,534 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.table; - -import java.util.ArrayDeque; -import java.util.ArrayList; -import java.util.concurrent.TimeUnit; -import org.h2.api.DatabaseEventListener; -import org.h2.api.ErrorCode; -import org.h2.command.ddl.CreateTableData; -import org.h2.engine.Constants; -import org.h2.engine.DbObject; -import org.h2.engine.Session; -import org.h2.engine.SysProperties; -import org.h2.index.Cursor; -import org.h2.index.HashIndex; -import org.h2.index.Index; -import org.h2.index.IndexType; -import org.h2.index.NonUniqueHashIndex; -import org.h2.index.PageBtreeIndex; -import org.h2.index.PageDataIndex; -import org.h2.index.PageDelegateIndex; -import org.h2.index.ScanIndex; -import org.h2.index.SpatialTreeIndex; -import org.h2.index.TreeIndex; -import org.h2.message.DbException; -import org.h2.message.Trace; -import org.h2.result.Row; -import org.h2.schema.SchemaObject; -import org.h2.util.MathUtils; -import org.h2.util.Utils; -import org.h2.value.CompareMode; - -/** - * A table store in a PageStore. - */ -public class PageStoreTable extends RegularTable { - - private Index scanIndex; - private long rowCount; - - /** - * The queue of sessions waiting to lock the table. It is a FIFO queue to - * prevent starvation, since Java's synchronized locking is biased. - */ - private final ArrayDeque waitingSessions = new ArrayDeque<>(); - private final Trace traceLock; - private final ArrayList indexes = Utils.newSmallArrayList(); - private long lastModificationId; - private final PageDataIndex mainIndex; - private int changesSinceAnalyze; - private int nextAnalyze; - - public PageStoreTable(CreateTableData data) { - super(data); - nextAnalyze = database.getSettings().analyzeAuto; - if (data.persistData && database.isPersistent()) { - mainIndex = new PageDataIndex(this, data.id, - IndexColumn.wrap(getColumns()), - IndexType.createScan(data.persistData), - data.create, data.session); - scanIndex = mainIndex; - } else { - mainIndex = null; - scanIndex = new ScanIndex(this, data.id, - IndexColumn.wrap(getColumns()), IndexType.createScan(data.persistData)); - } - indexes.add(scanIndex); - traceLock = database.getTrace(Trace.LOCK); - } - - @Override - public void close(Session session) { - for (Index index : indexes) { - index.close(session); - } - } - - @Override - public Row getRow(Session session, long key) { - return scanIndex.getRow(session, key); - } - - @Override - public void addRow(Session session, Row row) { - lastModificationId = database.getNextModificationDataId(); - int i = 0; - try { - for (int size = indexes.size(); i < size; i++) { - Index index = indexes.get(i); - index.add(session, row); - checkRowCount(session, index, 1); - } - rowCount++; - } catch (Throwable e) { - try { - while (--i >= 0) { - Index index = indexes.get(i); - index.remove(session, row); - checkRowCount(session, index, 0); - } - } catch (DbException e2) { - // this could happen, for example on failure in the storage - // but if that is not the case it means there is something wrong - // with the database - trace.error(e2, "could not undo operation"); - throw e2; - } - throw DbException.convert(e); - } - analyzeIfRequired(session); - } - - private void checkRowCount(Session session, Index index, int offset) { - if (SysProperties.CHECK) { - if (!(index instanceof PageDelegateIndex)) { - long rc = index.getRowCount(session); - if (rc != rowCount + offset) { - DbException.throwInternalError( - "rowCount expected " + (rowCount + offset) + - " got " + rc + " " + getName() + "." + index.getName()); - } - } - } - } - - @Override - public Index getScanIndex(Session session) { - return indexes.get(0); - } - - @Override - public Index getUniqueIndex() { - for (Index idx : indexes) { - if (idx.getIndexType().isUnique()) { - return idx; - } - } - return null; - } - - @Override - public ArrayList getIndexes() { - return indexes; - } - - @Override - public Index addIndex(Session session, String indexName, int indexId, - IndexColumn[] cols, IndexType indexType, boolean create, - String indexComment) { - if (indexType.isPrimaryKey()) { - for (IndexColumn c : cols) { - Column column = c.column; - if (column.isNullable()) { - throw DbException.get( - ErrorCode.COLUMN_MUST_NOT_BE_NULLABLE_1, column.getName()); - } - column.setPrimaryKey(true); - } - } - boolean isSessionTemporary = isTemporary() && !isGlobalTemporary(); - if (!isSessionTemporary) { - database.lockMeta(session); - } - Index index; - if (isPersistIndexes() && indexType.isPersistent()) { - int mainIndexColumn; - if (database.isStarting() && - database.getPageStore().getRootPageId(indexId) != 0) { - mainIndexColumn = -1; - } else if (!database.isStarting() && mainIndex.getRowCount(session) != 0 - || mainIndex.getMainIndexColumn() != -1) { - mainIndexColumn = -1; - } else { - mainIndexColumn = getMainIndexColumn(indexType, cols); - } - if (mainIndexColumn != -1) { - mainIndex.setMainIndexColumn(mainIndexColumn); - index = new PageDelegateIndex(this, indexId, indexName, - indexType, mainIndex, create, session); - } else if (indexType.isSpatial()) { - index = new SpatialTreeIndex(this, indexId, indexName, cols, - indexType, true, create, session); - } else { - index = new PageBtreeIndex(this, indexId, indexName, cols, - indexType, create, session); - } - } else { - if (indexType.isHash()) { - if (cols.length != 1) { - throw DbException.getUnsupportedException( - "hash indexes may index only one column"); - } - if (indexType.isUnique()) { - index = new HashIndex(this, indexId, indexName, cols, - indexType); - } else { - index = new NonUniqueHashIndex(this, indexId, indexName, - cols, indexType); - } - } else if (indexType.isSpatial()) { - index = new SpatialTreeIndex(this, indexId, indexName, cols, - indexType, false, true, session); - } else { - index = new TreeIndex(this, indexId, indexName, cols, indexType); - } - } - if (index.needRebuild() && rowCount > 0) { - try { - Index scan = getScanIndex(session); - long remaining = scan.getRowCount(session); - long total = remaining; - Cursor cursor = scan.find(session, null, null); - long i = 0; - int bufferSize = (int) Math.min(rowCount, database.getMaxMemoryRows()); - ArrayList buffer = new ArrayList<>(bufferSize); - String n = getName() + ":" + index.getName(); - int t = MathUtils.convertLongToInt(total); - while (cursor.next()) { - database.setProgress(DatabaseEventListener.STATE_CREATE_INDEX, n, - MathUtils.convertLongToInt(i++), t); - Row row = cursor.get(); - buffer.add(row); - if (buffer.size() >= bufferSize) { - addRowsToIndex(session, buffer, index); - } - remaining--; - } - addRowsToIndex(session, buffer, index); - if (remaining != 0) { - DbException.throwInternalError("rowcount remaining=" + - remaining + " " + getName()); - } - } catch (DbException e) { - getSchema().freeUniqueName(indexName); - try { - index.remove(session); - } catch (DbException e2) { - // this could happen, for example on failure in the storage - // but if that is not the case it means - // there is something wrong with the database - trace.error(e2, "could not remove index"); - throw e2; - } - throw e; - } - } - index.setTemporary(isTemporary()); - if (index.getCreateSQL() != null) { - index.setComment(indexComment); - if (isSessionTemporary) { - session.addLocalTempTableIndex(index); - } else { - database.addSchemaObject(session, index); - } - } - indexes.add(index); - setModified(); - return index; - } - - @Override - public long getRowCount(Session session) { - return rowCount; - } - - @Override - public void removeRow(Session session, Row row) { - lastModificationId = database.getNextModificationDataId(); - int i = indexes.size() - 1; - try { - for (; i >= 0; i--) { - Index index = indexes.get(i); - index.remove(session, row); - checkRowCount(session, index, -1); - } - rowCount--; - } catch (Throwable e) { - try { - while (++i < indexes.size()) { - Index index = indexes.get(i); - index.add(session, row); - checkRowCount(session, index, 0); - } - } catch (DbException e2) { - // this could happen, for example on failure in the storage - // but if that is not the case it means there is something wrong - // with the database - trace.error(e2, "could not undo operation"); - throw e2; - } - throw DbException.convert(e); - } - analyzeIfRequired(session); - } - - @Override - public void truncate(Session session) { - lastModificationId = database.getNextModificationDataId(); - for (int i = indexes.size() - 1; i >= 0; i--) { - Index index = indexes.get(i); - index.truncate(session); - } - rowCount = 0; - changesSinceAnalyze = 0; - } - - private void analyzeIfRequired(Session session) { - if (nextAnalyze == 0 || nextAnalyze > changesSinceAnalyze++) { - return; - } - changesSinceAnalyze = 0; - int n = 2 * nextAnalyze; - if (n > 0) { - nextAnalyze = n; - } - session.markTableForAnalyze(this); - } - - @Override - public boolean lock(Session session, boolean exclusive, - boolean forceLockEvenInMvcc) { - int lockMode = database.getLockMode(); - if (lockMode == Constants.LOCK_MODE_OFF) { - return lockExclusiveSession != null; - } - if (lockExclusiveSession == session) { - return true; - } - if (!exclusive && lockSharedSessions.containsKey(session)) { - return true; - } - synchronized (database) { - if (!exclusive && lockSharedSessions.contains(session)) { - return true; - } - session.setWaitForLock(this, Thread.currentThread()); - waitingSessions.addLast(session); - try { - doLock1(session, lockMode, exclusive); - } finally { - session.setWaitForLock(null, null); - waitingSessions.remove(session); - } - } - return false; - } - - private void doLock1(Session session, int lockMode, boolean exclusive) { - traceLock(session, exclusive, "requesting for"); - // don't get the current time unless necessary - long max = 0; - boolean checkDeadlock = false; - while (true) { - // if I'm the next one in the queue - if (waitingSessions.getFirst() == session) { - if (doLock2(session, lockMode, exclusive)) { - return; - } - } - if (checkDeadlock) { - ArrayList sessions = checkDeadlock(session, null, null); - if (sessions != null) { - throw DbException.get(ErrorCode.DEADLOCK_1, - getDeadlockDetails(sessions, exclusive)); - } - } else { - // check for deadlocks from now on - checkDeadlock = true; - } - long now = System.nanoTime(); - if (max == 0) { - // try at least one more time - max = now + TimeUnit.MILLISECONDS.toNanos(session.getLockTimeout()); - } else if (now >= max) { - traceLock(session, exclusive, "timeout after " + session.getLockTimeout()); - throw DbException.get(ErrorCode.LOCK_TIMEOUT_1, getName()); - } - try { - traceLock(session, exclusive, "waiting for"); - if (database.getLockMode() == Constants.LOCK_MODE_TABLE_GC) { - for (int i = 0; i < 20; i++) { - long free = Runtime.getRuntime().freeMemory(); - System.gc(); - long free2 = Runtime.getRuntime().freeMemory(); - if (free == free2) { - break; - } - } - } - // don't wait too long so that deadlocks are detected early - long sleep = Math.min(Constants.DEADLOCK_CHECK, - TimeUnit.NANOSECONDS.toMillis(max - now)); - if (sleep == 0) { - sleep = 1; - } - database.wait(sleep); - } catch (InterruptedException e) { - // ignore - } - } - } - - private boolean doLock2(Session session, int lockMode, boolean exclusive) { - if (exclusive) { - if (lockExclusiveSession == null) { - if (lockSharedSessions.isEmpty()) { - traceLock(session, exclusive, "added for"); - session.addLock(this); - lockExclusiveSession = session; - return true; - } else if (lockSharedSessions.size() == 1 && - lockSharedSessions.containsKey(session)) { - traceLock(session, exclusive, "add (upgraded) for "); - lockExclusiveSession = session; - return true; - } - } - } else { - if (lockExclusiveSession == null) { - if (lockMode == Constants.LOCK_MODE_READ_COMMITTED) { - if (!database.isMultiThreaded()) { - // READ_COMMITTED: a read lock is acquired, - // but released immediately after the operation - // is complete. - // When allowing only one thread, no lock is - // required. - // Row level locks work like read committed. - return true; - } - } - if (!lockSharedSessions.containsKey(session)) { - traceLock(session, exclusive, "ok"); - session.addLock(this); - lockSharedSessions.put(session, session); - } - return true; - } - } - return false; - } - - private void traceLock(Session session, boolean exclusive, String s) { - if (traceLock.isDebugEnabled()) { - traceLock.debug("{0} {1} {2} {3}", session.getId(), - exclusive ? "exclusive write lock" : "shared read lock", s, getName()); - } - } - - @Override - public void unlock(Session s) { - if (database != null) { - traceLock(s, lockExclusiveSession == s, "unlock"); - if (lockExclusiveSession == s) { - lockSharedSessions.remove(s); - lockExclusiveSession = null; - } - synchronized (database) { - if (!lockSharedSessions.isEmpty()) { - lockSharedSessions.remove(s); - } - if (!waitingSessions.isEmpty()) { - database.notifyAll(); - } - } - } - } - - /** - * Set the row count of this table. - * - * @param count the row count - */ - public void setRowCount(long count) { - this.rowCount = count; - } - - @Override - public void removeChildrenAndResources(Session session) { - if (containsLargeObject) { - // unfortunately, the data is gone on rollback - truncate(session); - database.getLobStorage().removeAllForTable(getId()); - database.lockMeta(session); - } - super.removeChildrenAndResources(session); - // go backwards because database.removeIndex will call table.removeIndex - while (indexes.size() > 1) { - Index index = indexes.get(1); - if (index.getName() != null) { - database.removeSchemaObject(session, index); - } - // needed for session temporary indexes - indexes.remove(index); - } - if (SysProperties.CHECK) { - for (SchemaObject obj : database.getAllSchemaObjects(DbObject.INDEX)) { - Index index = (Index) obj; - if (index.getTable() == this) { - DbException.throwInternalError("index not dropped: " + index.getName()); - } - } - } - scanIndex.remove(session); - database.removeMeta(session, getId()); - scanIndex = null; - lockExclusiveSession = null; - lockSharedSessions.clear(); - invalidate(); - } - - @Override - public long getMaxDataModificationId() { - return lastModificationId; - } - - @Override - public long getRowCountApproximation() { - return scanIndex.getRowCountApproximation(); - } - - @Override - public long getDiskSpaceUsed() { - return scanIndex.getDiskSpaceUsed(); - } - - public void setCompareMode(CompareMode compareMode) { - this.compareMode = compareMode; - } - -} diff --git a/h2/src/main/org/h2/table/Plan.java b/h2/src/main/org/h2/table/Plan.java index 254a99bd42..635aa2aea1 100644 --- a/h2/src/main/org/h2/table/Plan.java +++ b/h2/src/main/org/h2/table/Plan.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.table; @@ -9,12 +9,11 @@ import java.util.Arrays; import java.util.HashMap; -import org.h2.command.dml.AllColumnsForPlan; -import org.h2.engine.Session; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionVisitor; import org.h2.message.Trace; -import org.h2.table.TableFilter.TableFilterVisitor; /** * A possible query execution plan. The time required to execute a query depends @@ -44,13 +43,10 @@ public Plan(TableFilter[] filters, int count, Expression condition) { } for (int i = 0; i < count; i++) { TableFilter f = filters[i]; - f.visit(new TableFilterVisitor() { - @Override - public void accept(TableFilter f) { - all.add(f); - if (f.getJoinCondition() != null) { - allCond.add(f.getJoinCondition()); - } + f.visit(f1 -> { + all.add(f1); + if (f1.getJoinCondition() != null) { + allCond.add(f1.getJoinCondition()); } }); } @@ -84,12 +80,11 @@ public void removeUnusableIndexConditions() { for (int i = 0; i < allFilters.length; i++) { TableFilter f = allFilters[i]; setEvaluatable(f, true); - if (i < allFilters.length - 1 || - f.getSession().getDatabase().getSettings().earlyFilter) { + if (i < allFilters.length - 1) { // the last table doesn't need the optimization, // otherwise the expression is calculated twice unnecessarily // (not that bad but not optimal) - f.optimizeFullCondition(false); + f.optimizeFullCondition(); } f.removeUnusableIndexConditions(); } @@ -105,7 +100,7 @@ public void removeUnusableIndexConditions() { * @param allColumnsSet calculates all columns on-demand * @return the cost */ - public double calculateCost(Session session, AllColumnsForPlan allColumnsSet) { + public double calculateCost(SessionLocal session, AllColumnsForPlan allColumnsSet) { Trace t = session.getTrace(); if (t.isDebugEnabled()) { t.debug("Plan : calculate cost for plan {0}", Arrays.toString(allFilters)); diff --git a/h2/src/main/org/h2/table/PlanItem.java b/h2/src/main/org/h2/table/PlanItem.java index 60543db701..5d834eef65 100644 --- a/h2/src/main/org/h2/table/PlanItem.java +++ b/h2/src/main/org/h2/table/PlanItem.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.table; diff --git a/h2/src/main/org/h2/table/RangeTable.java b/h2/src/main/org/h2/table/RangeTable.java index c643f41928..774e42974e 100644 --- a/h2/src/main/org/h2/table/RangeTable.java +++ b/h2/src/main/org/h2/table/RangeTable.java @@ -1,27 +1,25 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.table; import java.util.ArrayList; import org.h2.api.ErrorCode; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.index.Index; -import org.h2.index.IndexType; import org.h2.index.RangeIndex; import org.h2.message.DbException; -import org.h2.result.Row; import org.h2.schema.Schema; -import org.h2.value.Value; +import org.h2.value.TypeInfo; /** * The table SYSTEM_RANGE is a virtual table that generates incrementing numbers * with a given start end point. */ -public class RangeTable extends Table { +public class RangeTable extends VirtualTable { /** * The name of the range table. @@ -36,112 +34,47 @@ public class RangeTable extends Table { private Expression min, max, step; private boolean optimized; + private final RangeIndex index; + /** * Create a new range with the given start and end expressions. * * @param schema the schema (always the main schema) * @param min the start expression * @param max the end expression - * @param noColumns whether this table has no columns */ - public RangeTable(Schema schema, Expression min, Expression max, - boolean noColumns) { - super(schema, 0, NAME, true, true); - Column[] cols = noColumns ? new Column[0] : new Column[] { new Column( - "X", Value.LONG) }; + public RangeTable(Schema schema, Expression min, Expression max) { + super(schema, 0, NAME); this.min = min; this.max = max; - setColumns(cols); + Column[] columns = new Column[] { new Column("X", TypeInfo.TYPE_BIGINT) }; + setColumns(columns); + index = new RangeIndex(this, IndexColumn.wrap(columns)); } - public RangeTable(Schema schema, Expression min, Expression max, - Expression step, boolean noColumns) { - this(schema, min, max, noColumns); + public RangeTable(Schema schema, Expression min, Expression max, Expression step) { + this(schema, min, max); this.step = step; } @Override - public String getDropSQL() { - return null; - } - - @Override - public String getCreateSQL() { - return null; - } - - @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { builder.append(NAME).append('('); - min.getSQL(builder, alwaysQuote).append(", "); - max.getSQL(builder, alwaysQuote); + min.getUnenclosedSQL(builder, sqlFlags).append(", "); + max.getUnenclosedSQL(builder, sqlFlags); if (step != null) { - builder.append(", "); - step.getSQL(builder, alwaysQuote); + step.getUnenclosedSQL(builder.append(", "), sqlFlags); } return builder.append(')'); } @Override - public boolean lock(Session session, boolean exclusive, boolean forceLockEvenInMvcc) { - // nothing to do - return false; - } - - @Override - public void close(Session session) { - // nothing to do - } - - @Override - public void unlock(Session s) { - // nothing to do - } - - @Override - public boolean isLockedExclusively() { - return false; - } - - @Override - public Index addIndex(Session session, String indexName, - int indexId, IndexColumn[] cols, IndexType indexType, - boolean create, String indexComment) { - throw DbException.getUnsupportedException("SYSTEM_RANGE"); - } - - @Override - public void removeRow(Session session, Row row) { - throw DbException.getUnsupportedException("SYSTEM_RANGE"); - } - - @Override - public void addRow(Session session, Row row) { - throw DbException.getUnsupportedException("SYSTEM_RANGE"); - } - - @Override - public void checkSupportAlter() { - throw DbException.getUnsupportedException("SYSTEM_RANGE"); - } - - @Override - public void checkRename() { - throw DbException.getUnsupportedException("SYSTEM_RANGE"); - } - - @Override - public boolean canGetRowCount() { + public boolean canGetRowCount(SessionLocal session) { return true; } @Override - public boolean canDrop() { - return false; - } - - @Override - public long getRowCount(Session session) { + public long getRowCount(SessionLocal session) { long step = getStep(session); if (step == 0L) { throw DbException.get(ErrorCode.STEP_SIZE_MUST_NOT_BE_ZERO); @@ -163,11 +96,18 @@ public TableType getTableType() { } @Override - public Index getScanIndex(Session session) { - if (getStep(session) == 0) { - throw DbException.get(ErrorCode.STEP_SIZE_MUST_NOT_BE_ZERO); - } - return new RangeIndex(this, IndexColumn.wrap(columns)); + public Index getScanIndex(SessionLocal session) { + return index; + } + + @Override + public ArrayList getIndexes() { + ArrayList list = new ArrayList<>(2); + // Scan index (ignored by MIN/MAX optimization) + list.add(index); + // Normal index + list.add(index); + return list; } /** @@ -176,7 +116,7 @@ public Index getScanIndex(Session session) { * @param session the session * @return the start value */ - public long getMin(Session session) { + public long getMin(SessionLocal session) { optimize(session); return min.getValue(session).getLong(); } @@ -187,7 +127,7 @@ public long getMin(Session session) { * @param session the session * @return the end value */ - public long getMax(Session session) { + public long getMax(SessionLocal session) { optimize(session); return max.getValue(session).getLong(); } @@ -198,7 +138,7 @@ public long getMax(Session session) { * @param session the session * @return the increment (1 by default) */ - public long getStep(Session session) { + public long getStep(SessionLocal session) { optimize(session); if (step == null) { return 1; @@ -206,7 +146,7 @@ public long getStep(Session session) { return step.getValue(session).getLong(); } - private void optimize(Session s) { + private void optimize(SessionLocal s) { if (!optimized) { min = min.optimize(s); max = max.optimize(s); @@ -217,44 +157,19 @@ private void optimize(Session s) { } } - @Override - public ArrayList getIndexes() { - return null; - } - - @Override - public void truncate(Session session) { - throw DbException.getUnsupportedException("SYSTEM_RANGE"); - } - @Override public long getMaxDataModificationId() { return 0; } @Override - public Index getUniqueIndex() { - return null; - } - - @Override - public long getRowCountApproximation() { + public long getRowCountApproximation(SessionLocal session) { return 100; } - @Override - public long getDiskSpaceUsed() { - return 0; - } - @Override public boolean isDeterministic() { return true; } - @Override - public boolean canReference() { - return false; - } - } diff --git a/h2/src/main/org/h2/table/RegularTable.java b/h2/src/main/org/h2/table/RegularTable.java deleted file mode 100644 index b223dfd75a..0000000000 --- a/h2/src/main/org/h2/table/RegularTable.java +++ /dev/null @@ -1,265 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.table; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashSet; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; - -import org.h2.command.ddl.CreateTableData; -import org.h2.constraint.Constraint; -import org.h2.constraint.ConstraintReferential; -import org.h2.engine.Session; -import org.h2.index.Index; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.value.DataType; -import org.h2.value.Value; - -/** - * Most tables are an instance of this class. For this table, the data is stored - * in the database. The actual data is not kept here, instead it is kept in the - * indexes. There is at least one index, the scan index. - */ -public abstract class RegularTable extends TableBase { - - /** - * Appends the specified rows to the specified index. - * - * @param session - * the session - * @param list - * the rows, list is cleared on completion - * @param index - * the index to append to - */ - protected static void addRowsToIndex(Session session, ArrayList list, Index index) { - sortRows(list, index); - for (Row row : list) { - index.add(session, row); - } - list.clear(); - } - - /** - * Formats details of a deadlock. - * - * @param sessions - * the list of sessions - * @param exclusive - * true if waiting for exclusive lock, false otherwise - * @return formatted details of a deadlock - */ - protected static String getDeadlockDetails(ArrayList sessions, boolean exclusive) { - // We add the thread details here to make it easier for customers to - // match up these error messages with their own logs. - StringBuilder builder = new StringBuilder(); - for (Session s : sessions) { - Table lock = s.getWaitForLock(); - Thread thread = s.getWaitForLockThread(); - builder.append("\nSession ").append(s.toString()).append(" on thread ").append(thread.getName()) - .append(" is waiting to lock ").append(lock.toString()) - .append(exclusive ? " (exclusive)" : " (shared)").append(" while locking "); - Table[] locks = s.getLocks(); - for (int i = 0, length = locks.length; i < length; i++) { - Table t = locks[i]; - if (i > 0) { - builder.append(", "); - } - builder.append(t.toString()); - if (t instanceof RegularTable) { - if (((RegularTable) t).lockExclusiveSession == s) { - builder.append(" (exclusive)"); - } else { - builder.append(" (shared)"); - } - } - } - builder.append('.'); - } - return builder.toString(); - } - - /** - * Sorts the specified list of rows for a specified index. - * - * @param list - * the list of rows - * @param index - * the index to sort for - */ - protected static void sortRows(ArrayList list, final Index index) { - Collections.sort(list, new Comparator() { - @Override - public int compare(SearchRow r1, SearchRow r2) { - return index.compareRows(r1, r2); - } - }); - } - - /** - * Whether the table contains a CLOB or BLOB. - */ - protected final boolean containsLargeObject; - - /** - * The session (if any) that has exclusively locked this table. - */ - protected volatile Session lockExclusiveSession; - - /** - * The set of sessions (if any) that have a shared lock on the table. Here - * we are using using a ConcurrentHashMap as a set, as there is no - * ConcurrentHashSet. - */ - protected final ConcurrentHashMap lockSharedSessions = new ConcurrentHashMap<>(); - - private Column rowIdColumn; - - protected RegularTable(CreateTableData data) { - super(data); - this.isHidden = data.isHidden; - boolean b = false; - for (Column col : getColumns()) { - if (DataType.isLargeObject(col.getType().getValueType())) { - b = true; - break; - } - } - containsLargeObject = b; - } - - @Override - public boolean canDrop() { - return true; - } - - @Override - public boolean canGetRowCount() { - return true; - } - - @Override - public boolean canTruncate() { - if (getCheckForeignKeyConstraints() && database.getReferentialIntegrity()) { - ArrayList constraints = getConstraints(); - if (constraints != null) { - for (Constraint c : constraints) { - if (c.getConstraintType() != Constraint.Type.REFERENTIAL) { - continue; - } - ConstraintReferential ref = (ConstraintReferential) c; - if (ref.getRefTable() == this) { - return false; - } - } - } - } - return true; - } - - @Override - public ArrayList checkDeadlock(Session session, Session clash, Set visited) { - // only one deadlock check at any given time - synchronized (getClass()) { - if (clash == null) { - // verification is started - clash = session; - visited = new HashSet<>(); - } else if (clash == session) { - // we found a cycle where this session is involved - return new ArrayList<>(0); - } else if (visited.contains(session)) { - // we have already checked this session. - // there is a cycle, but the sessions in the cycle need to - // find it out themselves - return null; - } - visited.add(session); - ArrayList error = null; - for (Session s : lockSharedSessions.keySet()) { - if (s == session) { - // it doesn't matter if we have locked the object already - continue; - } - Table t = s.getWaitForLock(); - if (t != null) { - error = t.checkDeadlock(s, clash, visited); - if (error != null) { - error.add(session); - break; - } - } - } - // take a local copy so we don't see inconsistent data, since we are - // not locked while checking the lockExclusiveSession value - Session copyOfLockExclusiveSession = lockExclusiveSession; - if (error == null && copyOfLockExclusiveSession != null) { - Table t = copyOfLockExclusiveSession.getWaitForLock(); - if (t != null) { - error = t.checkDeadlock(copyOfLockExclusiveSession, clash, visited); - if (error != null) { - error.add(session); - } - } - } - return error; - } - } - - @Override - public void checkRename() { - // ok - } - - @Override - public void checkSupportAlter() { - // ok - } - - public boolean getContainsLargeObject() { - return containsLargeObject; - } - - @Override - public Column getRowIdColumn() { - if (rowIdColumn == null) { - rowIdColumn = new Column(Column.ROWID, Value.LONG); - rowIdColumn.setTable(this, SearchRow.ROWID_INDEX); - rowIdColumn.setRowId(true); - } - return rowIdColumn; - } - - @Override - public TableType getTableType() { - return TableType.TABLE; - } - - @Override - public boolean isDeterministic() { - return true; - } - - @Override - public boolean isLockedExclusively() { - return lockExclusiveSession != null; - } - - @Override - public boolean isLockedExclusivelyBy(Session session) { - return lockExclusiveSession == session; - } - - @Override - public String toString() { - return getSQL(false); - } - -} diff --git a/h2/src/main/org/h2/table/SingleColumnResolver.java b/h2/src/main/org/h2/table/SingleColumnResolver.java deleted file mode 100644 index ab4d7d4f9a..0000000000 --- a/h2/src/main/org/h2/table/SingleColumnResolver.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.table; - -import org.h2.command.dml.Select; -import org.h2.expression.Expression; -import org.h2.expression.ExpressionColumn; -import org.h2.value.Value; - -/** - * The single column resolver is like a table with exactly one row. - * It is used to parse a simple one-column check constraint. - */ -public class SingleColumnResolver implements ColumnResolver { - - private final Column column; - private Value value; - - SingleColumnResolver(Column column) { - this.column = column; - } - - @Override - public String getTableAlias() { - return null; - } - - void setValue(Value value) { - this.value = value; - } - - @Override - public Value getValue(Column col) { - return value; - } - - @Override - public Column[] getColumns() { - return new Column[] { column }; - } - - @Override - public String getDerivedColumnName(Column column) { - return null; - } - - @Override - public String getSchemaName() { - return null; - } - - @Override - public TableFilter getTableFilter() { - return null; - } - - @Override - public Select getSelect() { - return null; - } - - @Override - public Column[] getSystemColumns() { - return null; - } - - @Override - public Column getRowIdColumn() { - return null; - } - - @Override - public Expression optimize(ExpressionColumn expressionColumn, Column col) { - return expressionColumn; - } - -} diff --git a/h2/src/main/org/h2/table/SubQueryInfo.java b/h2/src/main/org/h2/table/SubQueryInfo.java deleted file mode 100644 index 3ec5640665..0000000000 --- a/h2/src/main/org/h2/table/SubQueryInfo.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ - -package org.h2.table; - -import org.h2.result.SortOrder; - -/** - * Information about current sub-query being prepared. - * - * @author Sergi Vladykin - */ -public class SubQueryInfo { - - private final int[] masks; - private final TableFilter[] filters; - private final int filter; - private final SortOrder sortOrder; - private final SubQueryInfo upper; - - /** - * @param upper upper level sub-query if any - * @param masks index conditions masks - * @param filters table filters - * @param filter current filter - * @param sortOrder sort order - */ - public SubQueryInfo(SubQueryInfo upper, int[] masks, TableFilter[] filters, int filter, - SortOrder sortOrder) { - this.upper = upper; - this.masks = masks; - this.filters = filters; - this.filter = filter; - this.sortOrder = sortOrder; - } - - public SubQueryInfo getUpper() { - return upper; - } - - public int[] getMasks() { - return masks; - } - - public TableFilter[] getFilters() { - return filters; - } - - public int getFilter() { - return filter; - } - - public SortOrder getSortOrder() { - return sortOrder; - } -} diff --git a/h2/src/main/org/h2/table/Table.java b/h2/src/main/org/h2/table/Table.java index 395d91ef8c..c2b5b14fbc 100644 --- a/h2/src/main/org/h2/table/Table.java +++ b/h2/src/main/org/h2/table/Table.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.table; @@ -14,27 +14,28 @@ import org.h2.api.ErrorCode; import org.h2.command.Prepared; -import org.h2.command.dml.AllColumnsForPlan; +import org.h2.command.query.AllColumnsForPlan; import org.h2.constraint.Constraint; +import org.h2.constraint.Constraint.Type; +import org.h2.engine.CastDataProvider; import org.h2.engine.Constants; import org.h2.engine.DbObject; import org.h2.engine.Right; -import org.h2.engine.Session; -import org.h2.engine.UndoLogRecord; -import org.h2.expression.Expression; +import org.h2.engine.SessionLocal; import org.h2.expression.ExpressionVisitor; import org.h2.index.Index; import org.h2.index.IndexType; import org.h2.message.DbException; import org.h2.message.Trace; +import org.h2.result.DefaultRow; +import org.h2.result.LocalResult; import org.h2.result.Row; -import org.h2.result.RowList; +import org.h2.result.RowFactory; import org.h2.result.SearchRow; -import org.h2.result.SimpleRow; import org.h2.result.SimpleRowValue; import org.h2.result.SortOrder; import org.h2.schema.Schema; -import org.h2.schema.SchemaObjectBase; +import org.h2.schema.SchemaObject; import org.h2.schema.Sequence; import org.h2.schema.TriggerObject; import org.h2.util.Utils; @@ -46,7 +47,7 @@ * This is the base class for most tables. * A table contains a list of columns and a list of rows. */ -public abstract class Table extends SchemaObjectBase { +public abstract class Table extends SchemaObject { /** * The table type that means this table is a regular persistent table. @@ -58,6 +59,21 @@ public abstract class Table extends SchemaObjectBase { */ public static final int TYPE_MEMORY = 1; + /** + * Read lock. + */ + public static final int READ_LOCK = 0; + + /** + * Write lock. + */ + public static final int WRITE_LOCK = 1; + + /** + * Exclusive lock. + */ + public static final int EXCLUSIVE_LOCK = 2; + /** * The columns of this table. */ @@ -89,10 +105,10 @@ public abstract class Table extends SchemaObjectBase { private boolean checkForeignKeyConstraints = true; private boolean onCommitDrop, onCommitTruncate; private volatile Row nullRow; + private RowFactory rowFactory = RowFactory.getRowFactory(); private boolean tableExpression; - public Table(Schema schema, int id, String name, boolean persistIndexes, - boolean persistData) { + protected Table(Schema schema, int id, String name, boolean persistIndexes, boolean persistData) { super(schema, id, name, Trace.TABLE); columnMap = schema.getDatabase().newStringMap(); this.persistIndexes = persistIndexes; @@ -119,26 +135,28 @@ public boolean isView() { * This method waits until the lock is granted. * * @param session the session - * @param exclusive true for write locks, false for read locks - * @param forceLockEvenInMvcc lock even in the MVCC mode + * @param lockType the type of lock * @return true if the table was already exclusively locked by this session. * @throws DbException if a lock timeout occurred */ - public abstract boolean lock(Session session, boolean exclusive, boolean forceLockEvenInMvcc); + public boolean lock(SessionLocal session, int lockType) { + return false; + } /** * Close the table object and flush changes. * * @param session the session */ - public abstract void close(Session session); + public abstract void close(SessionLocal session); /** * Release the lock for this session. * * @param s the session */ - public abstract void unlock(Session s); + public void unlock(SessionLocal s) { + } /** * Create an index for this table @@ -147,14 +165,14 @@ public boolean isView() { * @param indexName the name of the index * @param indexId the id * @param cols the index columns + * @param uniqueColumnCount the count of unique columns * @param indexType the index type * @param create whether this is a new index * @param indexComment the comment * @return the index */ - public abstract Index addIndex(Session session, String indexName, - int indexId, IndexColumn[] cols, IndexType indexType, - boolean create, String indexComment); + public abstract Index addIndex(SessionLocal session, String indexName, int indexId, IndexColumn[] cols, + int uniqueColumnCount, IndexType indexType, boolean create, String indexComment); /** * Get the given row. @@ -164,17 +182,26 @@ public abstract Index addIndex(Session session, String indexName, * @return the row */ @SuppressWarnings("unused") - public Row getRow(Session session, long key) { + public Row getRow(SessionLocal session, long key) { return null; } + /** + * Returns whether this table is insertable. + * + * @return whether this table is insertable + */ + public boolean isInsertable() { + return true; + } + /** * Remove a row from the table and all indexes. * * @param session the session * @param row the row */ - public abstract void removeRow(Session session, Row row); + public abstract void removeRow(SessionLocal session, Row row); /** * Locks row, preventing any updated to it, except from the session specified. @@ -183,7 +210,7 @@ public Row getRow(Session session, long key) { * @param row to lock * @return locked row, or null if row does not exist anymore */ - public Row lockRow(Session session, Row row) { + public Row lockRow(SessionLocal session, Row row) { throw DbException.getUnsupportedException("lockRow()"); } @@ -191,8 +218,9 @@ public Row lockRow(Session session, Row row) { * Remove all rows from the table and indexes. * * @param session the session + * @return number of removed rows, possibly including uncommitted rows */ - public abstract void truncate(Session session); + public abstract long truncate(SessionLocal session); /** * Add a row to the table and all indexes. @@ -201,7 +229,7 @@ public Row lockRow(Session session, Row row) { * @param row the row * @throws DbException if a constraint was violated */ - public abstract void addRow(Session session, Row row); + public abstract void addRow(SessionLocal session, Row row); /** * Update a row to the table and all indexes. @@ -211,7 +239,7 @@ public Row lockRow(Session session, Row row) { * @param newRow the row with updated values (_rowid_ suppose to be the same) * @throws DbException if a constraint was violated */ - public void updateRow(Session session, Row oldRow, Row newRow) { + public void updateRow(SessionLocal session, Row oldRow, Row newRow) { newRow.setKey(oldRow.getKey()); removeRow(session, oldRow); addRow(session, newRow); @@ -231,13 +259,28 @@ public void updateRow(Session session, Row oldRow, Row newRow) { */ public abstract TableType getTableType(); + /** + * Return SQL table type for INFORMATION_SCHEMA. + * + * @return SQL table type for INFORMATION_SCHEMA + */ + public String getSQLTableType() { + if (isView()) { + return "VIEW"; + } + if (isTemporary()) { + return isGlobalTemporary() ? "GLOBAL TEMPORARY" : "LOCAL TEMPORARY"; + } + return "BASE TABLE"; + } + /** * Get the scan index to iterate through all rows. * * @param session the session * @return the index */ - public abstract Index getScanIndex(Session session); + public abstract Index getScanIndex(SessionLocal session); /** * Get the scan index for this table. @@ -251,19 +294,12 @@ public void updateRow(Session session, Row oldRow, Row newRow) { * @return the scan index */ @SuppressWarnings("unused") - public Index getScanIndex(Session session, int[] masks, + public Index getScanIndex(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, AllColumnsForPlan allColumnsSet) { return getScanIndex(session); } - /** - * Get any unique index for this table if one exists. - * - * @return a unique index - */ - public abstract Index getUniqueIndex(); - /** * Get all indexes for this table. * @@ -294,7 +330,9 @@ public Index getIndex(String indexName) { * * @return true if it is. */ - public abstract boolean isLockedExclusively(); + public boolean isLockedExclusively() { + return false; + } /** * Get the last data modification id. @@ -313,9 +351,10 @@ public Index getIndex(String indexName) { /** * Check if the row count can be retrieved quickly. * + * @param session the session * @return true if it can */ - public abstract boolean canGetRowCount(); + public abstract boolean canGetRowCount(SessionLocal session); /** * Check if this table can be referenced. @@ -339,16 +378,19 @@ public boolean canReference() { * @param session the session * @return the row count */ - public abstract long getRowCount(Session session); + public abstract long getRowCount(SessionLocal session); /** * Get the approximated row count for this table. * + * @param session the session * @return the approximated row count */ - public abstract long getRowCountApproximation(); + public abstract long getRowCountApproximation(SessionLocal session); - public abstract long getDiskSpaceUsed(); + public long getDiskSpaceUsed() { + return 0L; + } /** * Get the row id column if this table has one. @@ -361,7 +403,7 @@ public Column getRowIdColumn() { @Override public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(toString()); + throw DbException.getInternalError(toString()); } /** @@ -431,6 +473,9 @@ public ArrayList getChildren() { } protected void setColumns(Column[] columns) { + if (columns.length > Constants.MAX_COLUMNS) { + throw DbException.get(ErrorCode.TOO_MANY_COLUMNS_1, "" + Constants.MAX_COLUMNS); + } this.columns = columns; if (columnMap.size() > 0) { columnMap.clear(); @@ -439,16 +484,16 @@ protected void setColumns(Column[] columns) { Column col = columns[i]; int dataType = col.getType().getValueType(); if (dataType == Value.UNKNOWN) { - throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, col.getSQL(false)); + throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, col.getTraceSQL()); } col.setTable(this, i); String columnName = col.getName(); - if (columnMap.get(columnName) != null) { - throw DbException.get( - ErrorCode.DUPLICATE_COLUMN_NAME_1, columnName); + if (columnMap.putIfAbsent(columnName, col) != null) { + throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, columnName); } - columnMap.put(columnName, col); } + rowFactory = database.getRowFactory().createRowFactory(database, database.getCompareMode(), database, columns, + null, false); } /** @@ -479,7 +524,7 @@ public void renameColumn(Column column, String newName) { * @return true if it is */ @SuppressWarnings("unused") - public boolean isLockedExclusivelyBy(Session session) { + public boolean isLockedExclusivelyBy(SessionLocal session) { return false; } @@ -491,16 +536,16 @@ public boolean isLockedExclusivelyBy(Session session) { * @param rows a list of row pairs of the form old row, new row, old row, * new row,... */ - public void updateRows(Prepared prepared, Session session, RowList rows) { + public void updateRows(Prepared prepared, SessionLocal session, LocalResult rows) { // in case we need to undo the update - Session.Savepoint rollback = session.setSavepoint(); + SessionLocal.Savepoint rollback = session.setSavepoint(); // remove the old rows int rowScanCount = 0; - for (rows.reset(); rows.hasNext();) { + while (rows.next()) { if ((++rowScanCount & 127) == 0) { prepared.checkCanceled(); } - Row o = rows.next(); + Row o = rows.currentRowForTable(); rows.next(); try { removeRow(session, o); @@ -508,31 +553,26 @@ public void updateRows(Prepared prepared, Session session, RowList rows) { if (e.getErrorCode() == ErrorCode.CONCURRENT_UPDATE_1 || e.getErrorCode() == ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1) { session.rollbackTo(rollback); - session.startStatementWithinTransaction(); - rollback = session.setSavepoint(); } throw e; } - session.log(this, UndoLogRecord.DELETE, o); } // add the new rows - for (rows.reset(); rows.hasNext();) { + rows.reset(); + while (rows.next()) { if ((++rowScanCount & 127) == 0) { prepared.checkCanceled(); } rows.next(); - Row n = rows.next(); + Row n = rows.currentRowForTable(); try { addRow(session, n); } catch (DbException e) { if (e.getErrorCode() == ErrorCode.CONCURRENT_UPDATE_1) { session.rollbackTo(rollback); - session.startStatementWithinTransaction(); - rollback = session.setSavepoint(); } throw e; } - session.log(this, UndoLogRecord.INSERT, n); } } @@ -541,7 +581,7 @@ public CopyOnWriteArrayList getDependentViews() { } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { while (!dependentViews.isEmpty()) { TableView view = dependentViews.get(0); dependentViews.remove(0); @@ -587,7 +627,7 @@ public void removeChildrenAndResources(Session session) { * @throws DbException if the column is referenced by multi-column * constraints or indexes */ - public void dropMultipleColumnsConstraintsAndIndexes(Session session, + public void dropMultipleColumnsConstraintsAndIndexes(SessionLocal session, ArrayList columnsToDrop) { HashSet constraintsToDrop = new HashSet<>(); if (constraints != null) { @@ -600,7 +640,7 @@ public void dropMultipleColumnsConstraintsAndIndexes(Session session, if (columns.size() == 1) { constraintsToDrop.add(constraint); } else { - throw DbException.get(ErrorCode.COLUMN_IS_REFERENCED_1, constraint.getSQL(false)); + throw DbException.get(ErrorCode.COLUMN_IS_REFERENCED_1, constraint.getTraceSQL()); } } } @@ -619,13 +659,15 @@ public void dropMultipleColumnsConstraintsAndIndexes(Session session, if (index.getColumns().length == 1) { indexesToDrop.add(index); } else { - throw DbException.get(ErrorCode.COLUMN_IS_REFERENCED_1, index.getSQL(false)); + throw DbException.get(ErrorCode.COLUMN_IS_REFERENCED_1, index.getTraceSQL()); } } } } for (Constraint c : constraintsToDrop) { - session.getDatabase().removeSchemaObject(session, c); + if (c.isValid()) { + session.getDatabase().removeSchemaObject(session, c); + } } for (Index i : indexesToDrop) { // the index may already have been dropped when dropping the @@ -636,19 +678,23 @@ public void dropMultipleColumnsConstraintsAndIndexes(Session session, } } + public RowFactory getRowFactory() { + return rowFactory; + } + /** - * Create a new row for a table. + * Create a new row for this table. * - * @param data the values. - * @param memory whether the row is in memory. - * @return the created row. + * @param data the values + * @param memory the estimated memory usage in bytes + * @return the created row */ public Row createRow(Value[] data, int memory) { - return database.createRow(data, memory); + return rowFactory.createRow(data, memory); } public Row getTemplateRow() { - return createRow(new Value[columns.length], Row.MEMORY_CALCULATE); + return createRow(new Value[getColumns().length], DefaultRow.MEMORY_CALCULATE); } /** @@ -661,17 +707,17 @@ public SearchRow getTemplateSimpleRow(boolean singleColumn) { if (singleColumn) { return new SimpleRowValue(columns.length); } - return new SimpleRow(new Value[columns.length]); + return new DefaultRow(new Value[columns.length]); } - Row getNullRow() { + public Row getNullRow() { Row row = nullRow; if (row == null) { // Here can be concurrently produced more than one row, but it must // be ok. Value[] values = new Value[columns.length]; Arrays.fill(values, ValueNull.INSTANCE); - nullRow = row = database.createRow(values, 1); + nullRow = row = createRow(values, 1); } return row; } @@ -710,6 +756,32 @@ public Column getColumn(String columnName) { return column; } + /** + * Get the column with the given name. + * + * @param columnName the column name + * @param ifExists if (@code true) return {@code null} if column does not exist + * @return the column + * @throws DbException if the column was not found + */ + public Column getColumn(String columnName, boolean ifExists) { + Column column = columnMap.get(columnName); + if (column == null && !ifExists) { + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, columnName); + } + return column; + } + + /** + * Get the column with the given name if it exists. + * + * @param columnName the column name, or {@code null} + * @return the column + */ + public Column findColumn(String columnName) { + return columnMap.get(columnName); + } + /** * Does the column with the given name exist? * @@ -720,6 +792,20 @@ public boolean doesColumnExist(String columnName) { return columnMap.containsKey(columnName); } + /** + * Returns first identity column, or {@code null}. + * + * @return first identity column, or {@code null} + */ + public Column getIdentityColumn() { + for (Column column : columns) { + if (column.isIdentity()) { + return column; + } + } + return null; + } + /** * Get the best plan for the given search mask. * @@ -732,7 +818,7 @@ public boolean doesColumnExist(String columnName) { * @param allColumnsSet the set of all columns * @return the plan item */ - public PlanItem getBestPlanItem(Session session, int[] masks, + public PlanItem getBestPlanItem(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, AllColumnsForPlan allColumnsSet) { PlanItem item = new PlanItem(); @@ -804,28 +890,99 @@ public Index getPrimaryKey() { } /** - * Validate all values in this row, convert the values if required, and - * update the sequence values if required. This call will also set the - * default values if required and set the computed column if there are any. + * Prepares the specified row for INSERT operation. + * + * Identity, default, and generated values are evaluated, all values are + * converted to target data types and validated. Base value of identity + * column is updated when required by compatibility mode. * * @param session the session + * @param overridingSystem + * {@link Boolean#TRUE} for {@code OVERRIDING SYSTEM VALUES}, + * {@link Boolean#FALSE} for {@code OVERRIDING USER VALUES}, + * {@code null} if override clause is not specified * @param row the row */ - public void validateConvertUpdateSequence(Session session, Row row) { - for (int i = 0; i < columns.length; i++) { + public void convertInsertRow(SessionLocal session, Row row, Boolean overridingSystem) { + int length = columns.length, generated = 0; + for (int i = 0; i < length; i++) { Value value = row.getValue(i); Column column = columns[i]; - Value v2; - if (column.getComputed()) { - // force updating the value + if (value == ValueNull.INSTANCE && column.isDefaultOnNull()) { value = null; - v2 = column.computeValue(session, row); } - v2 = column.validateConvertUpdateSequence(session, value); + if (column.isIdentity()) { + if (overridingSystem != null) { + if (!overridingSystem) { + value = null; + } + } else if (value != null && column.isGeneratedAlways()) { + throw DbException.get(ErrorCode.GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1, + column.getSQLWithTable(new StringBuilder(), TRACE_SQL_FLAGS).toString()); + } + } else if (column.isGeneratedAlways()) { + if (value != null) { + throw DbException.get(ErrorCode.GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1, + column.getSQLWithTable(new StringBuilder(), TRACE_SQL_FLAGS).toString()); + } + generated++; + continue; + } + Value v2 = column.validateConvertUpdateSequence(session, value, row); + if (v2 != value) { + row.setValue(i, v2); + } + } + if (generated > 0) { + for (int i = 0; i < length; i++) { + Value value = row.getValue(i); + if (value == null) { + row.setValue(i, columns[i].validateConvertUpdateSequence(session, null, row)); + } + } + } + } + + /** + * Prepares the specified row for UPDATE operation. + * + * Default and generated values are evaluated, all values are converted to + * target data types and validated. Base value of identity column is updated + * when required by compatibility mode. + * + * @param session the session + * @param row the row + * @param fromTrigger {@code true} if row was modified by INSERT or UPDATE trigger + */ + public void convertUpdateRow(SessionLocal session, Row row, boolean fromTrigger) { + int length = columns.length, generated = 0; + for (int i = 0; i < length; i++) { + Value value = row.getValue(i); + Column column = columns[i]; + if (column.isGenerated()) { + if (value != null) { + if (!fromTrigger) { + throw DbException.get(ErrorCode.GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1, + column.getSQLWithTable(new StringBuilder(), TRACE_SQL_FLAGS).toString()); + } + row.setValue(i, null); + } + generated++; + continue; + } + Value v2 = column.validateConvertUpdateSequence(session, value, row); if (v2 != value) { row.setValue(i, v2); } } + if (generated > 0) { + for (int i = 0; i < length; i++) { + Value value = row.getValue(i); + if (value == null) { + row.setValue(i, columns[i].validateConvertUpdateSequence(session, null, row)); + } + } + } } private static void remove(ArrayList list, DbObject obj) { @@ -963,7 +1120,7 @@ private static ArrayList add(ArrayList list, T obj) { * @param type the trigger type * @param beforeAction whether 'before' triggers should be called */ - public void fire(Session session, int type, boolean beforeAction) { + public void fire(SessionLocal session, int type, boolean beforeAction) { if (triggers != null) { for (TriggerObject trigger : triggers) { trigger.fire(session, type, beforeAction); @@ -1006,13 +1163,13 @@ public boolean fireRow() { * @param newRow the new data or null for a delete * @return true if no further action is required (for 'instead of' triggers) */ - public boolean fireBeforeRow(Session session, Row oldRow, Row newRow) { + public boolean fireBeforeRow(SessionLocal session, Row oldRow, Row newRow) { boolean done = fireRow(session, oldRow, newRow, true, false); fireConstraints(session, oldRow, newRow, true); return done; } - private void fireConstraints(Session session, Row oldRow, Row newRow, + private void fireConstraints(SessionLocal session, Row oldRow, Row newRow, boolean before) { if (constraints != null) { for (Constraint constraint : constraints) { @@ -1031,7 +1188,7 @@ private void fireConstraints(Session session, Row oldRow, Row newRow, * @param newRow the new data or null for a delete * @param rollback when the operation occurred within a rollback */ - public void fireAfterRow(Session session, Row oldRow, Row newRow, + public void fireAfterRow(SessionLocal session, Row oldRow, Row newRow, boolean rollback) { fireRow(session, oldRow, newRow, false, rollback); if (!rollback) { @@ -1039,7 +1196,7 @@ public void fireAfterRow(Session session, Row oldRow, Row newRow, } } - private boolean fireRow(Session session, Row oldRow, Row newRow, + private boolean fireRow(SessionLocal session, Row oldRow, Row newRow, boolean beforeAction, boolean rollback) { if (triggers != null) { for (TriggerObject trigger : triggers) { @@ -1073,12 +1230,13 @@ public boolean canTruncate() { * @param checkExisting true if existing rows must be checked during this * call */ - public void setCheckForeignKeyConstraints(Session session, boolean enabled, - boolean checkExisting) { + public void setCheckForeignKeyConstraints(SessionLocal session, boolean enabled, boolean checkExisting) { if (enabled && checkExisting) { if (constraints != null) { for (Constraint c : constraints) { - c.checkExistingData(session); + if (c.getConstraintType() == Type.REFERENTIAL) { + c.checkExistingData(session); + } } } } @@ -1100,7 +1258,7 @@ public boolean getCheckForeignKeyConstraints() { * @param needGetFirstOrLast if the returned index must be able * to do {@link Index#canGetFirstOrLast()} * @param needFindNext if the returned index must be able to do - * {@link Index#findNext(Session, SearchRow, SearchRow)} + * {@link Index#findNext(SessionLocal, SearchRow, SearchRow)} * @return the index or null */ public Index getIndexForColumn(Column column, @@ -1151,7 +1309,7 @@ public void setOnCommitTruncate(boolean onCommitTruncate) { * @param session the session * @param index the index that is no longer required */ - public void removeIndexOrTransferOwnership(Session session, Index index) { + public void removeIndexOrTransferOwnership(SessionLocal session, Index index) { boolean stillNeeded = false; if (constraints != null) { for (Constraint cons : constraints) { @@ -1167,6 +1325,19 @@ public void removeIndexOrTransferOwnership(Session session, Index index) { } } + /** + * Removes dependencies of column expressions, used for tables with circular + * dependencies. + * + * @param session the session + */ + public void removeColumnExpressionsDependencies(SessionLocal session) { + for (Column column : columns) { + column.setDefaultExpression(session, null); + column.setOnUpdateExpression(session, null); + } + } + /** * Check if a deadlock occurred. This method is called recursively. There is * a circle if the session to be tested has already being visited. If this @@ -1184,8 +1355,8 @@ public void removeIndexOrTransferOwnership(Session session, Index index) { * null */ @SuppressWarnings("unused") - public ArrayList checkDeadlock(Session session, Session clash, - Set visited) { + public ArrayList checkDeadlock(SessionLocal session, SessionLocal clash, + Set visited) { return null; } @@ -1201,13 +1372,14 @@ public boolean isPersistData() { * Compare two values with the current comparison mode. The values may be of * different type. * + * @param provider the cast information provider * @param a the first value * @param b the second value * @return 0 if both values are equal, -1 if the first value is smaller, and * 1 otherwise */ - public int compareValues(Value a, Value b) { - return a.compareTo(b, database.getMode(), compareMode); + public int compareValues(CastDataProvider provider, Value a, Value b) { + return a.compareTo(b, provider, compareMode); } public CompareMode getCompareMode() { @@ -1223,38 +1395,6 @@ public void checkWritingAllowed() { database.checkWritingAllowed(); } - private static Value getGeneratedValue(Session session, Column column, Expression expression) { - Value v; - if (expression == null) { - v = column.validateConvertUpdateSequence(session, null); - } else { - v = expression.getValue(session); - } - return column.convert(v); - } - - /** - * Get or generate a default value for the given column. - * - * @param session the session - * @param column the column - * @return the value - */ - public Value getDefaultValue(Session session, Column column) { - return getGeneratedValue(session, column, column.getDefaultExpression()); - } - - /** - * Generates on update value for the given column. - * - * @param session the session - * @param column the column - * @return the value - */ - public Value getOnUpdateValue(Session session, Column column) { - return getGeneratedValue(session, column, column.getOnUpdateExpression()); - } - @Override public boolean isHidden() { return isHidden; @@ -1264,7 +1404,11 @@ public void setHidden(boolean hidden) { this.isHidden = hidden; } - public boolean isMVStore() { + /** + * Views, function tables, links, etc. do not support locks + * @return true if table supports row-level locks + */ + public boolean isRowLockable() { return false; } @@ -1275,4 +1419,23 @@ public void setTableExpression(boolean tableExpression) { public boolean isTableExpression() { return tableExpression; } + + /** + * Return list of triggers. + * + * @return list of triggers + */ + public ArrayList getTriggers() { + return triggers; + } + + /** + * Returns ID of main index column, or {@link SearchRow#ROWID_INDEX}. + * + * @return ID of main index column, or {@link SearchRow#ROWID_INDEX} + */ + public int getMainIndexColumn() { + return SearchRow.ROWID_INDEX; + } + } diff --git a/h2/src/main/org/h2/table/TableBase.java b/h2/src/main/org/h2/table/TableBase.java index c746779b68..a2858ba570 100644 --- a/h2/src/main/org/h2/table/TableBase.java +++ b/h2/src/main/org/h2/table/TableBase.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.table; @@ -9,9 +9,7 @@ import java.util.List; import org.h2.command.ddl.CreateTableData; import org.h2.engine.Database; -import org.h2.engine.DbSettings; import org.h2.index.IndexType; -import org.h2.mvstore.db.MVTableEngine; import org.h2.result.SearchRow; import org.h2.result.SortOrder; import org.h2.util.StringUtils; @@ -47,14 +45,14 @@ public static int getMainIndexColumn(IndexType indexType, IndexColumn[] cols) { return SearchRow.ROWID_INDEX; } IndexColumn first = cols[0]; - if (first.sortType != SortOrder.ASCENDING) { + if ((first.sortType & SortOrder.DESCENDING) != 0) { return SearchRow.ROWID_INDEX; } switch (first.column.getType().getValueType()) { - case Value.BYTE: - case Value.SHORT: - case Value.INT: - case Value.LONG: + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + case Value.BIGINT: return first.column.getColumnId(); default: return SearchRow.ROWID_INDEX; @@ -78,12 +76,21 @@ public TableBase(CreateTableData data) { @Override public String getDropSQL() { StringBuilder builder = new StringBuilder("DROP TABLE IF EXISTS "); - getSQL(builder, true).append(" CASCADE"); + getSQL(builder, DEFAULT_SQL_FLAGS).append(" CASCADE"); return builder.toString(); } + @Override + public String getCreateSQLForMeta() { + return getCreateSQL(true); + } + @Override public String getCreateSQL() { + return getCreateSQL(false); + } + + private String getCreateSQL(boolean forMeta) { Database db = getDatabase(); if (db == null) { // closed @@ -106,7 +113,7 @@ public String getCreateSQL() { if (isHidden) { buff.append("IF NOT EXISTS "); } - getSQL(buff, true); + getSQL(buff, DEFAULT_SQL_FLAGS); if (comment != null) { buff.append(" COMMENT "); StringUtils.quoteStringSQL(buff, comment); @@ -116,15 +123,11 @@ public String getCreateSQL() { if (i > 0) { buff.append(",\n "); } - buff.append(columns[i].getCreateSQL()); + buff.append(columns[i].getCreateSQL(forMeta)); } buff.append("\n)"); if (tableEngine != null) { - DbSettings s = db.getSettings(); - String d = s.defaultTableEngine; - if (d == null && s.mvStore) { - d = MVTableEngine.class.getName(); - } + String d = db.getSettings().defaultTableEngine; if (d == null || !tableEngine.endsWith(d)) { buff.append("\nENGINE "); StringUtils.quoteIdentifier(buff, tableEngine); diff --git a/h2/src/main/org/h2/table/TableFilter.java b/h2/src/main/org/h2/table/TableFilter.java index 5f9faf7b4d..990467a718 100644 --- a/h2/src/main/org/h2/table/TableFilter.java +++ b/h2/src/main/org/h2/table/TableFilter.java @@ -1,38 +1,43 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.table; import java.util.ArrayList; +import java.util.Comparator; import java.util.HashMap; import java.util.LinkedHashMap; +import java.util.Map.Entry; import org.h2.api.ErrorCode; -import org.h2.command.Parser; -import org.h2.command.dml.AllColumnsForPlan; -import org.h2.command.dml.Select; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.command.query.Select; +import org.h2.engine.Database; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; -import org.h2.expression.ExpressionColumn; import org.h2.expression.condition.Comparison; import org.h2.expression.condition.ConditionAndOr; import org.h2.index.Index; import org.h2.index.IndexCondition; import org.h2.index.IndexCursor; -import org.h2.index.IndexLookupBatch; -import org.h2.index.ViewIndex; import org.h2.message.DbException; import org.h2.result.Row; import org.h2.result.SearchRow; import org.h2.result.SortOrder; +import org.h2.util.HasSQL; +import org.h2.util.ParserUtil; import org.h2.util.StringUtils; import org.h2.util.Utils; +import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueLong; +import org.h2.value.ValueBigint; +import org.h2.value.ValueInteger; import org.h2.value.ValueNull; +import org.h2.value.ValueSmallint; +import org.h2.value.ValueTinyint; /** * A table filter represents a table that is used in a query. There is one such @@ -41,15 +46,25 @@ */ public class TableFilter implements ColumnResolver { - private static final int BEFORE_FIRST = 0, FOUND = 1, AFTER_LAST = 2, - NULL_ROW = 3; + private static final int BEFORE_FIRST = 0, FOUND = 1, AFTER_LAST = 2, NULL_ROW = 3; + + /** + * Comparator that uses order in FROM clause as a sort key. + */ + public static final Comparator ORDER_IN_FROM_COMPARATOR = + Comparator.comparing(TableFilter::getOrderInFrom); + + /** + * A visitor that sets joinOuterIndirect to true. + */ + private static final TableFilterVisitor JOI_VISITOR = f -> f.joinOuterIndirect = true; /** * Whether this is a direct or indirect (nested) outer join */ protected boolean joinOuterIndirect; - private Session session; + private SessionLocal session; private final Table table; private final Select select; @@ -60,12 +75,6 @@ public class TableFilter implements ColumnResolver { private int scanCount; private boolean evaluatable; - /** - * Batched join support. - */ - private JoinBatch joinBatch; - private int joinFilterId = -1; - /** * Indicates that this filter is used in the plan. */ @@ -81,11 +90,6 @@ public class TableFilter implements ColumnResolver { */ private final ArrayList indexConditions = Utils.newSmallArrayList(); - /** - * Whether new window conditions should not be accepted. - */ - private boolean doneWithIndexConditions; - /** * Additional conditions that can't be used for index lookup, but for row * filter for this table (ID=ID, NAME LIKE '%X%') @@ -116,12 +120,23 @@ public class TableFilter implements ColumnResolver { */ private TableFilter nestedJoin; - private ArrayList naturalJoinColumns; + /** + * Map of common join columns, used for NATURAL joins and USING clause of + * other joins. This map preserves original order of the columns. + */ + private LinkedHashMap commonJoinColumns; + + private TableFilter commonJoinColumnsFilter; + private ArrayList commonJoinColumnsToExclude; private boolean foundOne; private Expression fullCondition; private final int hashCode; private final int orderInFrom; + /** + * Map of derived column names. This map preserves original order of the + * columns. + */ private LinkedHashMap derivedColumnMap; /** @@ -135,15 +150,15 @@ public class TableFilter implements ColumnResolver { * @param orderInFrom original order number (index) of this table filter in * @param indexHints the index hints to be used by the query planner */ - public TableFilter(Session session, Table table, String alias, + public TableFilter(SessionLocal session, Table table, String alias, boolean rightsChecked, Select select, int orderInFrom, IndexHints indexHints) { this.session = session; this.table = table; this.alias = alias; this.select = select; - this.cursor = new IndexCursor(this); + this.cursor = new IndexCursor(); if (!rightsChecked) { - session.getUser().checkRight(table, Right.SELECT); + session.getUser().checkTableRight(table, Right.SELECT); } hashCode = session.nextObjectId(); this.orderInFrom = orderInFrom; @@ -177,13 +192,11 @@ public Table getTable() { * Lock the table. This will also lock joined tables. * * @param s the session - * @param exclusive true if an exclusive lock is required - * @param forceLockEvenInMvcc lock even in the MVCC mode */ - public void lock(Session s, boolean exclusive, boolean forceLockEvenInMvcc) { - table.lock(s, exclusive, forceLockEvenInMvcc); + public void lock(SessionLocal s) { + table.lock(s, Table.READ_LOCK); if (join != null) { - join.lock(s, exclusive, forceLockEvenInMvcc); + join.lock(s); } } @@ -197,7 +210,7 @@ public void lock(Session s, boolean exclusive, boolean forceLockEvenInMvcc) { * @param allColumnsSet the set of all columns * @return the best plan item */ - public PlanItem getBestPlanItem(Session s, TableFilter[] filters, int filter, + public PlanItem getBestPlanItem(SessionLocal s, TableFilter[] filters, int filter, AllColumnsForPlan allColumnsSet) { PlanItem item1 = null; SortOrder sortOrder = null; @@ -321,21 +334,21 @@ public void prepare() { } if (nestedJoin != null) { if (nestedJoin == this) { - DbException.throwInternalError("self join"); + throw DbException.getInternalError("self join"); } nestedJoin.prepare(); } if (join != null) { if (join == this) { - DbException.throwInternalError("self join"); + throw DbException.getInternalError("self join"); } join.prepare(); } if (filterCondition != null) { - filterCondition = filterCondition.optimize(session); + filterCondition = filterCondition.optimizeCondition(session); } if (joinCondition != null) { - joinCondition = joinCondition.optimize(session); + joinCondition = joinCondition.optimizeCondition(session); } } @@ -344,7 +357,7 @@ public void prepare() { * * @param s the session */ - public void startQuery(Session s) { + public void startQuery(SessionLocal s) { this.session = s; scanCount = 0; if (nestedJoin != null) { @@ -359,11 +372,6 @@ public void startQuery(Session s) { * Reset to the current position. */ public void reset() { - if (joinBatch != null && joinFilterId == 0) { - // reset join batch only on top table filter - joinBatch.reset(true); - return; - } if (nestedJoin != null) { nestedJoin.reset(); } @@ -374,101 +382,12 @@ public void reset() { foundOne = false; } - private boolean isAlwaysTopTableFilter(int filter) { - if (filter != 0) { - return false; - } - // check if we are at the top table filters all the way up - SubQueryInfo info = session.getSubQueryInfo(); - while (true) { - if (info == null) { - return true; - } - if (info.getFilter() != 0) { - return false; - } - info = info.getUpper(); - } - } - - /** - * Attempt to initialize batched join. - * - * @param jb join batch if it is already created - * @param filters the table filters - * @param filter the filter index (0, 1,...) - * @return join batch if query runs over index which supports batched - * lookups, {@code null} otherwise - */ - public JoinBatch prepareJoinBatch(JoinBatch jb, TableFilter[] filters, int filter) { - assert filters[filter] == this; - joinBatch = null; - joinFilterId = -1; - if (getTable().isView()) { - session.pushSubQueryInfo(masks, filters, filter, select.getSortOrder()); - try { - ((ViewIndex) index).getQuery().prepareJoinBatch(); - } finally { - session.popSubQueryInfo(); - } - } - // For globally top table filter we don't need to create lookup batch, - // because currently it will not be used (this will be shown in - // ViewIndex.getPlanSQL()). Probably later on it will make sense to - // create it to better support X IN (...) conditions, but this needs to - // be implemented separately. If isAlwaysTopTableFilter is false then we - // either not a top table filter or top table filter in a sub-query, - // which in turn is not top in outer query, thus we need to enable - // batching here to allow outer query run batched join against this - // sub-query. - IndexLookupBatch lookupBatch = null; - if (jb == null && select != null && !isAlwaysTopTableFilter(filter)) { - lookupBatch = index.createLookupBatch(filters, filter); - if (lookupBatch != null) { - jb = new JoinBatch(filter + 1, join); - } - } - if (jb != null) { - if (nestedJoin != null) { - throw DbException.throwInternalError(); - } - joinBatch = jb; - joinFilterId = filter; - if (lookupBatch == null && !isAlwaysTopTableFilter(filter)) { - // createLookupBatch will be called at most once because jb can - // be created only if lookupBatch is already not null from the - // call above. - lookupBatch = index.createLookupBatch(filters, filter); - if (lookupBatch == null) { - // the index does not support lookup batching, need to fake - // it because we are not top - lookupBatch = JoinBatch.createFakeIndexLookupBatch(this); - } - } - jb.register(this, lookupBatch); - } - return jb; - } - - public int getJoinFilterId() { - return joinFilterId; - } - - public JoinBatch getJoinBatch() { - return joinBatch; - } - /** * Check if there are more rows to read. * * @return true if there are */ public boolean next() { - if (joinBatch != null) { - // will happen only on topTableFilter since joinBatch.next() does - // not call join.next() - return joinBatch.next(); - } if (state == AFTER_LAST) { return false; } else if (state == BEFORE_FIRST) { @@ -555,6 +474,10 @@ public boolean next() { return false; } + public boolean isNullRow() { + return state == NULL_ROW; + } + /** * Set the state of this and all nested tables to the NULL row. */ @@ -563,12 +486,7 @@ protected void setNullRow() { current = table.getNullRow(); currentSearchRow = current; if (nestedJoin != null) { - nestedJoin.visit(new TableFilterVisitor() { - @Override - public void accept(TableFilter f) { - f.setNullRow(); - } - }); + nestedJoin.visit(TableFilter::setNullRow); } } @@ -629,16 +547,7 @@ public String getTableAlias() { * @param condition the index condition */ public void addIndexCondition(IndexCondition condition) { - if (!doneWithIndexConditions) { - indexConditions.add(condition); - } - } - - /** - * Used to reject all additional index conditions. - */ - public void doneWithIndexConditions() { - this.doneWithIndexConditions = true; + indexConditions.add(condition); } /** @@ -683,7 +592,7 @@ public void addJoin(TableFilter filter, boolean outer, Expression on) { join = filter; filter.joinOuter = outer; if (outer) { - filter.visit(new JOIVisitor()); + filter.visit(JOI_VISITOR); } if (on != null) { filter.mapAndAddFilter(on); @@ -723,10 +632,12 @@ public void mapAndAddFilter(Expression on) { */ public void createIndexConditions() { if (joinCondition != null) { - joinCondition = joinCondition.optimize(session); - joinCondition.createIndexConditions(session, this); - if (nestedJoin != null) { - joinCondition.createIndexConditions(session, nestedJoin); + joinCondition = joinCondition.optimizeCondition(session); + if (joinCondition != null) { + joinCondition.createIndexConditions(session, this); + if (nestedJoin != null) { + joinCondition.createIndexConditions(session, nestedJoin); + } } } if (join != null) { @@ -766,10 +677,10 @@ public boolean isJoinOuterIndirect() { * * @param builder string builder to append to * @param isJoin if this is a joined table - * @param alwaysQuote quote all identifiers + * @param sqlFlags formatting flags * @return the specified builder */ - public StringBuilder getPlanSQL(StringBuilder builder, boolean isJoin, boolean alwaysQuote) { + public StringBuilder getPlanSQL(StringBuilder builder, boolean isJoin, int sqlFlags) { if (isJoin) { if (joinOuter) { builder.append("LEFT OUTER JOIN "); @@ -781,7 +692,7 @@ public StringBuilder getPlanSQL(StringBuilder builder, boolean isJoin, boolean a StringBuilder buffNested = new StringBuilder(); TableFilter n = nestedJoin; do { - n.getPlanSQL(buffNested, n != nestedJoin, alwaysQuote).append('\n'); + n.getPlanSQL(buffNested, n != nestedJoin, sqlFlags).append('\n'); n = n.getJoin(); } while (n != null); String nested = buffNested.toString(); @@ -800,23 +711,23 @@ public StringBuilder getPlanSQL(StringBuilder builder, boolean isJoin, boolean a // otherwise the nesting is unclear builder.append("1=1"); } else { - joinCondition.getUnenclosedSQL(builder, alwaysQuote); + joinCondition.getUnenclosedSQL(builder, sqlFlags); } } return builder; } - if (table.isView() && ((TableView) table).isRecursive()) { - table.getSchema().getSQL(builder, alwaysQuote).append('.'); - Parser.quoteIdentifier(builder, table.getName(), alwaysQuote); + if (table instanceof TableView && ((TableView) table).isRecursive()) { + table.getSchema().getSQL(builder, sqlFlags).append('.'); + ParserUtil.quoteIdentifier(builder, table.getName(), sqlFlags); } else { - table.getSQL(builder, alwaysQuote); + table.getSQL(builder, sqlFlags); } - if (table.isView() && ((TableView) table).isInvalid()) { + if (table instanceof TableView && ((TableView) table).isInvalid()) { throw DbException.get(ErrorCode.VIEW_IS_INVALID_2, table.getName(), "not compiled"); } if (alias != null) { builder.append(' '); - Parser.quoteIdentifier(builder, alias, alwaysQuote); + ParserUtil.quoteIdentifier(builder, alias, sqlFlags); if (derivedColumnMap != null) { builder.append('('); boolean f = false; @@ -825,7 +736,7 @@ public StringBuilder getPlanSQL(StringBuilder builder, boolean isJoin, boolean a builder.append(", "); } f = true; - Parser.quoteIdentifier(builder, name, alwaysQuote); + ParserUtil.quoteIdentifier(builder, name, sqlFlags); } builder.append(')'); } @@ -839,37 +750,24 @@ public StringBuilder getPlanSQL(StringBuilder builder, boolean isJoin, boolean a } else { first = false; } - Parser.quoteIdentifier(builder, index, alwaysQuote); + ParserUtil.quoteIdentifier(builder, index, sqlFlags); } builder.append(")"); } - if (index != null) { + if (index != null && (sqlFlags & HasSQL.ADD_PLAN_INFORMATION) != 0) { builder.append('\n'); - StringBuilder planBuilder = new StringBuilder(); - if (joinBatch != null) { - IndexLookupBatch lookupBatch = joinBatch.getLookupBatch(joinFilterId); - if (lookupBatch == null) { - if (joinFilterId != 0) { - throw DbException.throwInternalError(Integer.toString(joinFilterId)); - } - } else { - planBuilder.append("batched:").append(lookupBatch.getPlanSQL()).append(' '); - } - } - planBuilder.append(index.getPlanSQL()); + StringBuilder planBuilder = new StringBuilder().append("/* ").append(index.getPlanSQL()); if (!indexConditions.isEmpty()) { planBuilder.append(": "); for (int i = 0, size = indexConditions.size(); i < size; i++) { if (i > 0) { planBuilder.append("\n AND "); } - planBuilder.append(indexConditions.get(i).getSQL(false)); + planBuilder.append(indexConditions.get(i).getSQL( + HasSQL.TRACE_SQL_FLAGS | HasSQL.ADD_PLAN_INFORMATION)); } } - String plan = StringUtils.quoteRemarkSQL(planBuilder.toString()); - planBuilder.setLength(0); - planBuilder.append("/* ").append(plan); - if (plan.indexOf('\n') >= 0) { + if (planBuilder.indexOf("\n", 3) >= 0) { planBuilder.append('\n'); } StringUtils.indent(builder, planBuilder.append(" */").toString(), 4, false); @@ -881,17 +779,20 @@ public StringBuilder getPlanSQL(StringBuilder builder, boolean isJoin, boolean a // unclear builder.append("1=1"); } else { - joinCondition.getUnenclosedSQL(builder, alwaysQuote); + joinCondition.getUnenclosedSQL(builder, sqlFlags); } } - if (filterCondition != null) { - builder.append('\n'); - String condition = StringUtils.unEnclose(filterCondition.getSQL(false)); - condition = "/* WHERE " + StringUtils.quoteRemarkSQL(condition) + "\n*/"; - StringUtils.indent(builder, condition, 4, false); - } - if (scanCount > 0) { - builder.append("\n /* scanCount: ").append(scanCount).append(" */"); + if ((sqlFlags & HasSQL.ADD_PLAN_INFORMATION) != 0) { + if (filterCondition != null) { + builder.append('\n'); + String condition = filterCondition.getSQL(HasSQL.TRACE_SQL_FLAGS | HasSQL.ADD_PLAN_INFORMATION, + Expression.WITHOUT_PARENTHESES); + condition = "/* WHERE " + condition + "\n*/"; + StringUtils.indent(builder, condition, 4, false); + } + if (scanCount > 0) { + builder.append("\n /* scanCount: ").append(scanCount).append(" */"); + } } return builder; } @@ -903,7 +804,7 @@ void removeUnusableIndexConditions() { // the indexConditions list may be modified here for (int i = 0; i < indexConditions.size(); i++) { IndexCondition cond = indexConditions.get(i); - if (!cond.isEvaluatable()) { + if (cond.getMask(indexConditions) == 0 || !cond.isEvaluatable()) { indexConditions.remove(i--); } } @@ -934,15 +835,6 @@ public boolean isUsed() { return used; } - /** - * Set the session of this table filter. - * - * @param session the new session - */ - void setSession(Session session) { - this.session = session; - } - /** * Remove the joined table */ @@ -982,17 +874,15 @@ public void setFullCondition(Expression condition) { /** * Optimize the full condition. This will add the full condition to the * filter condition. - * - * @param fromOuterJoin if this method was called from an outer joined table */ - void optimizeFullCondition(boolean fromOuterJoin) { - if (fullCondition != null) { - fullCondition.addFilterConditions(this, fromOuterJoin || joinOuter); + void optimizeFullCondition() { + if (!joinOuter && fullCondition != null) { + fullCondition.addFilterConditions(this); if (nestedJoin != null) { - nestedJoin.optimizeFullCondition(fromOuterJoin || joinOuter); + nestedJoin.optimizeFullCondition(); } if (join != null) { - join.optimizeFullCondition(fromOuterJoin || joinOuter); + join.optimizeFullCondition(); } } } @@ -1031,7 +921,10 @@ public void setEvaluatable(boolean evaluatable) { @Override public String getSchemaName() { - return table.getSchema().getName(); + if (alias == null && !(table instanceof VirtualTable)) { + return table.getSchema().getName(); + } + return null; } @Override @@ -1040,9 +933,59 @@ public Column[] getColumns() { } @Override - public String getDerivedColumnName(Column column) { + public Column findColumn(String name) { + HashMap map = derivedColumnMap; + if (map != null) { + Database db = session.getDatabase(); + for (Entry entry : derivedColumnMap.entrySet()) { + if (db.equalsIdentifiers(entry.getValue(), name)) { + return entry.getKey(); + } + } + return null; + } + return table.findColumn(name); + } + + @Override + public String getColumnName(Column column) { + HashMap map = derivedColumnMap; + return map != null ? map.get(column) : column.getName(); + } + + @Override + public boolean hasDerivedColumnList() { + return derivedColumnMap != null; + } + + /** + * Get the column with the given name. + * + * @param columnName + * the column name + * @param ifExists + * if (@code true) return {@code null} if column does not exist + * @return the column + * @throws DbException + * if the column was not found and {@code ifExists} is + * {@code false} + */ + public Column getColumn(String columnName, boolean ifExists) { HashMap map = derivedColumnMap; - return map != null ? map.get(column) : null; + if (map != null) { + Database database = session.getDatabase(); + for (Entry entry : map.entrySet()) { + if (database.equalsIdentifiers(columnName, entry.getValue())) { + return entry.getKey(); + } + } + if (ifExists) { + return null; + } else { + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, columnName); + } + } + return table.getColumn(columnName, ifExists); } /** @@ -1057,13 +1000,10 @@ public Column[] getSystemColumns() { if (!session.getDatabase().getMode().systemColumns) { return null; } - Column[] sys = new Column[3]; - sys[0] = new Column("oid", Value.INT); - sys[0].setTable(table, 0); - sys[1] = new Column("ctid", Value.STRING); - sys[1].setTable(table, 0); - sys[2] = new Column("CTID", Value.STRING); - sys[2].setTable(table, 0); + Column[] sys = { // + new Column("oid", TypeInfo.TYPE_INTEGER, table, 0), // + new Column("ctid", TypeInfo.TYPE_VARCHAR, table, 0) // + }; return sys; } @@ -1074,21 +1014,21 @@ public Column getRowIdColumn() { @Override public Value getValue(Column column) { - if (joinBatch != null) { - return joinBatch.getValue(joinFilterId, column); - } if (currentSearchRow == null) { return null; } int columnId = column.getColumnId(); if (columnId == -1) { - return ValueLong.get(currentSearchRow.getKey()); + return ValueBigint.get(currentSearchRow.getKey()); } if (current == null) { Value v = currentSearchRow.getValue(columnId); if (v != null) { return v; } + if (columnId == column.getTable().getMainIndexColumn()) { + return getDelegatedValue(column); + } current = cursor.get(); if (current == null) { return ValueNull.INSTANCE; @@ -1097,6 +1037,22 @@ public Value getValue(Column column) { return current.getValue(columnId); } + private Value getDelegatedValue(Column column) { + long key = currentSearchRow.getKey(); + switch (column.getType().getValueType()) { + case Value.TINYINT: + return ValueTinyint.get((byte) key); + case Value.SMALLINT: + return ValueSmallint.get((short) key); + case Value.INTEGER: + return ValueInteger.get((int) key); + case Value.BIGINT: + return ValueBigint.get(key); + default: + throw DbException.getInternalError(); + } + } + @Override public TableFilter getTableFilter() { return this; @@ -1130,36 +1086,72 @@ public void setDerivedColumns(ArrayList derivedColumnNames) { this.derivedColumnMap = map; } - @Override - public Expression optimize(ExpressionColumn expressionColumn, Column column) { - return expressionColumn; - } - @Override public String toString() { return alias != null ? alias : table.toString(); } /** - * Add a column to the natural join key column list. + * Add a column to the common join column list for a left table filter. + * + * @param leftColumn + * the column on the left side + * @param replacementColumn + * the column to use instead, may be the same as column on the + * left side + * @param replacementFilter + * the table filter for replacement columns + */ + public void addCommonJoinColumns(Column leftColumn, Column replacementColumn, TableFilter replacementFilter) { + if (commonJoinColumns == null) { + commonJoinColumns = new LinkedHashMap<>(); + commonJoinColumnsFilter = replacementFilter; + } else { + assert commonJoinColumnsFilter == replacementFilter; + } + commonJoinColumns.put(leftColumn, replacementColumn); + } + + /** + * Add an excluded column to the common join column list. * - * @param c the column to add + * @param columnToExclude + * the column to exclude */ - public void addNaturalJoinColumn(Column c) { - if (naturalJoinColumns == null) { - naturalJoinColumns = Utils.newSmallArrayList(); + public void addCommonJoinColumnToExclude(Column columnToExclude) { + if (commonJoinColumnsToExclude == null) { + commonJoinColumnsToExclude = Utils.newSmallArrayList(); } - naturalJoinColumns.add(c); + commonJoinColumnsToExclude.add(columnToExclude); + } + + /** + * Returns common join columns map. + * + * @return common join columns map, or {@code null} + */ + public LinkedHashMap getCommonJoinColumns() { + return commonJoinColumns; } /** - * Check if the given column is a natural join column. + * Returns common join columns table filter. * - * @param c the column to check - * @return true if this is a joined natural join column + * @return common join columns table filter, or {@code null} */ - public boolean isNaturalJoinColumn(Column c) { - return naturalJoinColumns != null && naturalJoinColumns.contains(c); + public TableFilter getCommonJoinColumnsFilter() { + return commonJoinColumnsFilter; + } + + /** + * Check if the given column is an excluded common join column. + * + * @param c + * the column to check + * @return true if this is an excluded common join column + */ + public boolean isCommonJoinColumnToExclude(Column c) { + return commonJoinColumnsToExclude != null && commonJoinColumnsToExclude.contains(c); } @Override @@ -1182,17 +1174,6 @@ public boolean hasInComparisons() { return false; } - /** - * Add the current row to the array, if there is a current row. - * - * @param rows the rows to lock - */ - public void lockRowAdd(ArrayList rows) { - if (state == FOUND) { - rows.add(get()); - } - } - public TableFilter getNestedJoin() { return nestedJoin; } @@ -1218,7 +1199,7 @@ public boolean isEvaluatable() { return evaluatable; } - public Session getSession() { + public SessionLocal getSession() { return session; } @@ -1226,6 +1207,17 @@ public IndexHints getIndexHints() { return indexHints; } + /** + * Returns whether this is a table filter with implicit DUAL table for a + * SELECT without a FROM clause. + * + * @return whether this is a table filter with implicit DUAL table + */ + public boolean isNoFromClauseFilter() { + return table instanceof DualTable && join == null && nestedJoin == null + && joinCondition == null && filterCondition == null; + } + /** * A visitor for table filters. */ @@ -1255,17 +1247,4 @@ public void accept(TableFilter f) { } } - /** - * A visitor that sets joinOuterIndirect to true. - */ - private static final class JOIVisitor implements TableFilterVisitor { - JOIVisitor() { - } - - @Override - public void accept(TableFilter f) { - f.joinOuterIndirect = true; - } - } - } diff --git a/h2/src/main/org/h2/table/TableLink.java b/h2/src/main/org/h2/table/TableLink.java index 7cb103b730..ca34042e66 100644 --- a/h2/src/main/org/h2/table/TableLink.java +++ b/h2/src/main/org/h2/table/TableLink.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.table; @@ -19,15 +19,18 @@ import org.h2.api.ErrorCode; import org.h2.command.Prepared; -import org.h2.engine.Session; -import org.h2.engine.UndoLogRecord; +import org.h2.engine.SessionLocal; import org.h2.index.Index; import org.h2.index.IndexType; import org.h2.index.LinkedIndex; +import org.h2.jdbc.JdbcConnection; +import org.h2.jdbc.JdbcResultSet; import org.h2.message.DbException; +import org.h2.result.LocalResult; +import org.h2.result.ResultInterface; import org.h2.result.Row; -import org.h2.result.RowList; import org.h2.schema.Schema; +import org.h2.util.JdbcUtils; import org.h2.util.StringUtils; import org.h2.util.Utils; import org.h2.value.DataType; @@ -61,7 +64,9 @@ public class TableLink extends Table { private boolean supportsMixedCaseIdentifiers; private boolean globalTemporary; private boolean readOnly; - private boolean targetsMySql; + private final boolean targetsMySql; + private int fetchSize = 0; + private boolean autocommit =true; public TableLink(Schema schema, int id, String name, String driver, String url, String user, String password, String originalSchema, @@ -83,8 +88,7 @@ public TableLink(Schema schema, int id, String name, String driver, } Column[] cols = { }; setColumns(cols); - linkedIndex = new LinkedIndex(this, id, IndexColumn.wrap(cols), - IndexType.createNonUnique(false)); + linkedIndex = new LinkedIndex(this, id, IndexColumn.wrap(cols), 0, IndexType.createNonUnique(false)); indexes.add(linkedIndex); } } @@ -94,6 +98,7 @@ private void connect() { for (int retry = 0;; retry++) { try { conn = database.getLinkConnection(driver, url, user, password); + conn.setAutoCommit(autocommit); synchronized (conn) { try { readMetaData(); @@ -120,62 +125,74 @@ private void readMetaData() throws SQLException { storesMixedCase = meta.storesMixedCaseIdentifiers(); storesMixedCaseQuoted = meta.storesMixedCaseQuotedIdentifiers(); supportsMixedCaseIdentifiers = meta.supportsMixedCaseIdentifiers(); - ResultSet rs = meta.getTables(null, originalSchema, originalTable, null); - if (rs.next() && rs.next()) { - throw DbException.get(ErrorCode.SCHEMA_NAME_MUST_MATCH, originalTable); - } - rs.close(); - rs = meta.getColumns(null, originalSchema, originalTable, null); - int i = 0; ArrayList columnList = Utils.newSmallArrayList(); HashMap columnMap = new HashMap<>(); - String catalog = null, schema = null; - while (rs.next()) { - String thisCatalog = rs.getString("TABLE_CAT"); - if (catalog == null) { - catalog = thisCatalog; - } - String thisSchema = rs.getString("TABLE_SCHEM"); - if (schema == null) { - schema = thisSchema; + String schema = null; + boolean isQuery = originalTable.startsWith("("); + if (!isQuery) { + try (ResultSet rs = meta.getTables(null, originalSchema, originalTable, null)) { + if (rs.next() && rs.next()) { + throw DbException.get(ErrorCode.SCHEMA_NAME_MUST_MATCH, originalTable); + } } - if (!Objects.equals(catalog, thisCatalog) || - !Objects.equals(schema, thisSchema)) { - // if the table exists in multiple schemas or tables, - // use the alternative solution - columnMap.clear(); - columnList.clear(); - break; + try (ResultSet rs = meta.getColumns(null, originalSchema, originalTable, null)) { + int i = 0; + String catalog = null; + while (rs.next()) { + String thisCatalog = rs.getString("TABLE_CAT"); + if (catalog == null) { + catalog = thisCatalog; + } + String thisSchema = rs.getString("TABLE_SCHEM"); + if (schema == null) { + schema = thisSchema; + } + if (!Objects.equals(catalog, thisCatalog) || + !Objects.equals(schema, thisSchema)) { + // if the table exists in multiple schemas or tables, + // use the alternative solution + columnMap.clear(); + columnList.clear(); + break; + } + String n = rs.getString("COLUMN_NAME"); + n = convertColumnName(n); + int sqlType = rs.getInt("DATA_TYPE"); + String sqlTypeName = rs.getString("TYPE_NAME"); + long precision = rs.getInt("COLUMN_SIZE"); + precision = convertPrecision(sqlType, precision); + int scale = rs.getInt("DECIMAL_DIGITS"); + scale = convertScale(sqlType, scale); + int type = DataType.convertSQLTypeToValueType(sqlType, sqlTypeName); + Column col = new Column(n, TypeInfo.getTypeInfo(type, precision, scale, null), this, i++); + columnList.add(col); + columnMap.put(n, col); + } } - String n = rs.getString("COLUMN_NAME"); - n = convertColumnName(n); - int sqlType = rs.getInt("DATA_TYPE"); - String sqlTypeName = rs.getString("TYPE_NAME"); - long precision = rs.getInt("COLUMN_SIZE"); - precision = convertPrecision(sqlType, precision); - int scale = rs.getInt("DECIMAL_DIGITS"); - scale = convertScale(sqlType, scale); - int type = DataType.convertSQLTypeToValueType(sqlType, sqlTypeName); - Column col = new Column(n, TypeInfo.getTypeInfo(type, precision, scale, null)); - col.setTable(this, i++); - columnList.add(col); - columnMap.put(n, col); } - rs.close(); if (originalTable.indexOf('.') < 0 && !StringUtils.isNullOrEmpty(schema)) { - qualifiedTableName = schema + "." + originalTable; + qualifiedTableName = schema + '.' + originalTable; } else { qualifiedTableName = originalTable; } // check if the table is accessible - try (Statement stat = conn.getConnection().createStatement()) { - rs = stat.executeQuery("SELECT * FROM " + - qualifiedTableName + " T WHERE 1=0"); - if (columnList.isEmpty()) { + try (Statement stat = conn.getConnection().createStatement(); + ResultSet rs = stat.executeQuery("SELECT * FROM " + qualifiedTableName + " T WHERE 1=0")) { + if (rs instanceof JdbcResultSet) { + ResultInterface result = ((JdbcResultSet) rs).getResult(); + columnList.clear(); + columnMap.clear(); + for (int i = 0, l = result.getVisibleColumnCount(); i < l;) { + String n = result.getColumnName(i); + Column col = new Column(n, result.getColumnType(i), this, ++i); + columnList.add(col); + columnMap.put(n, col); + } + } else if (columnList.isEmpty()) { // alternative solution ResultSetMetaData rsMeta = rs.getMetaData(); - for (i = 0; i < rsMeta.getColumnCount();) { + for (int i = 0, l = rsMeta.getColumnCount(); i < l;) { String n = rsMeta.getColumnName(i + 1); n = convertColumnName(n); int sqlType = rsMeta.getColumnType(i + 1); @@ -184,97 +201,105 @@ private void readMetaData() throws SQLException { int scale = rsMeta.getScale(i + 1); scale = convertScale(sqlType, scale); int type = DataType.getValueTypeFromResultSet(rsMeta, i + 1); - Column col = new Column(n, TypeInfo.getTypeInfo(type, precision, scale, null)); - col.setTable(this, i++); + Column col = new Column(n, TypeInfo.getTypeInfo(type, precision, scale, null), this, i++); columnList.add(col); columnMap.put(n, col); } } - rs.close(); } catch (Exception e) { throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, e, - originalTable + "(" + e.toString() + ")"); + originalTable + '(' + e + ')'); } Column[] cols = columnList.toArray(new Column[0]); setColumns(cols); int id = getId(); - linkedIndex = new LinkedIndex(this, id, IndexColumn.wrap(cols), - IndexType.createNonUnique(false)); + linkedIndex = new LinkedIndex(this, id, IndexColumn.wrap(cols), 0, IndexType.createNonUnique(false)); indexes.add(linkedIndex); - try { - rs = meta.getPrimaryKeys(null, originalSchema, originalTable); + if (!isQuery) { + readIndexes(meta, columnMap); + } + } + + private void readIndexes(DatabaseMetaData meta, HashMap columnMap) { + String pkName = null; + try (ResultSet rs = meta.getPrimaryKeys(null, originalSchema, originalTable)) { + if (rs.next()) { + pkName = readPrimaryKey(rs, columnMap); + } } catch (Exception e) { // Some ODBC bridge drivers don't support it: // some combinations of "DataDirect SequeLink(R) for JDBC" - // http://www.datadirect.com/index.ssp - rs = null; + // https://www.progress.com/odbc/sequelink } - String pkName = ""; - ArrayList list; - if (rs != null && rs.next()) { - // the problem is, the rows are not sorted by KEY_SEQ - list = Utils.newSmallArrayList(); - do { - int idx = rs.getInt("KEY_SEQ"); - if (pkName == null) { - pkName = rs.getString("PK_NAME"); - } - while (list.size() < idx) { - list.add(null); - } - String col = rs.getString("COLUMN_NAME"); - col = convertColumnName(col); - Column column = columnMap.get(col); - if (idx == 0) { - // workaround for a bug in the SQLite JDBC driver - list.add(column); - } else { - list.set(idx - 1, column); - } - } while (rs.next()); - addIndex(list, IndexType.createPrimaryKey(false, false)); - rs.close(); - } - try { - rs = meta.getIndexInfo(null, originalSchema, originalTable, false, true); + try (ResultSet rs = meta.getIndexInfo(null, originalSchema, originalTable, false, true)) { + readIndexes(rs, columnMap, pkName); } catch (Exception e) { // Oracle throws an exception if the table is not found or is a // SYNONYM - rs = null; } + } + + private String readPrimaryKey(ResultSet rs, HashMap columnMap) throws SQLException { + String pkName = null; + // the problem is, the rows are not sorted by KEY_SEQ + ArrayList list = Utils.newSmallArrayList(); + do { + int idx = rs.getInt("KEY_SEQ"); + if (StringUtils.isNullOrEmpty(pkName)) { + pkName = rs.getString("PK_NAME"); + } + while (list.size() < idx) { + list.add(null); + } + String col = rs.getString("COLUMN_NAME"); + col = convertColumnName(col); + Column column = columnMap.get(col); + if (idx == 0) { + // workaround for a bug in the SQLite JDBC driver + list.add(column); + } else { + list.set(idx - 1, column); + } + } while (rs.next()); + addIndex(list, list.size(), IndexType.createPrimaryKey(false, false)); + return pkName; + } + + private void readIndexes(ResultSet rs, HashMap columnMap, String pkName) throws SQLException { String indexName = null; - list = Utils.newSmallArrayList(); + ArrayList list = Utils.newSmallArrayList(); + int uniqueColumnCount = 0; IndexType indexType = null; - if (rs != null) { - while (rs.next()) { - if (rs.getShort("TYPE") == DatabaseMetaData.tableIndexStatistic) { - // ignore index statistics - continue; - } - String newIndex = rs.getString("INDEX_NAME"); - if (pkName.equals(newIndex)) { - continue; - } - if (indexName != null && !indexName.equals(newIndex)) { - addIndex(list, indexType); - indexName = null; - } - if (indexName == null) { - indexName = newIndex; - list.clear(); - } - boolean unique = !rs.getBoolean("NON_UNIQUE"); - indexType = unique ? IndexType.createUnique(false, false) : - IndexType.createNonUnique(false); - String col = rs.getString("COLUMN_NAME"); - col = convertColumnName(col); - Column column = columnMap.get(col); - list.add(column); + while (rs.next()) { + if (rs.getShort("TYPE") == DatabaseMetaData.tableIndexStatistic) { + // ignore index statistics + continue; } - rs.close(); + String newIndex = rs.getString("INDEX_NAME"); + if (pkName != null && pkName.equals(newIndex)) { + continue; + } + if (indexName != null && !indexName.equals(newIndex)) { + addIndex(list, uniqueColumnCount, indexType); + uniqueColumnCount = 0; + indexName = null; + } + if (indexName == null) { + indexName = newIndex; + list.clear(); + } + if (!rs.getBoolean("NON_UNIQUE")) { + uniqueColumnCount++; + } + indexType = uniqueColumnCount > 0 ? IndexType.createUnique(false, false) : + IndexType.createNonUnique(false); + String col = rs.getString("COLUMN_NAME"); + col = convertColumnName(col); + Column column = columnMap.get(col); + list.add(column); } if (indexName != null) { - addIndex(list, indexType); + addIndex(list, uniqueColumnCount, indexType); } } @@ -333,7 +358,7 @@ private String convertColumnName(String columnName) { return columnName; } - private void addIndex(List list, IndexType indexType) { + private void addIndex(List list, int uniqueColumnCount, IndexType indexType) { // bind the index to the leading recognized columns in the index // (null columns might come from a function-based index) int firstNull = list.indexOf(null); @@ -347,14 +372,14 @@ private void addIndex(List list, IndexType indexType) { list = list.subList(0, firstNull); } Column[] cols = list.toArray(new Column[0]); - Index index = new LinkedIndex(this, 0, IndexColumn.wrap(cols), indexType); + Index index = new LinkedIndex(this, 0, IndexColumn.wrap(cols), uniqueColumnCount, indexType); indexes.add(index); } @Override public String getDropSQL() { StringBuilder builder = new StringBuilder("DROP TABLE IF EXISTS "); - return getSQL(builder, true).toString(); + return getSQL(builder, DEFAULT_SQL_FLAGS).toString(); } @Override @@ -369,7 +394,7 @@ public String getCreateSQL() { buff.append("TEMPORARY "); } buff.append("LINKED TABLE "); - getSQL(buff, true); + getSQL(buff, DEFAULT_SQL_FLAGS); if (comment != null) { buff.append(" COMMENT "); StringUtils.quoteStringSQL(buff, comment); @@ -386,31 +411,30 @@ public String getCreateSQL() { if (readOnly) { buff.append(" READONLY"); } + if (fetchSize != 0) { + buff.append(" FETCH_SIZE ").append(fetchSize); + } + if(!autocommit) { + buff.append(" AUTOCOMMIT OFF"); + } buff.append(" /*").append(DbException.HIDE_SQL).append("*/"); return buff.toString(); } @Override - public Index addIndex(Session session, String indexName, int indexId, - IndexColumn[] cols, IndexType indexType, boolean create, - String indexComment) { + public Index addIndex(SessionLocal session, String indexName, int indexId, IndexColumn[] cols, + int uniqueColumnCount, IndexType indexType, boolean create, String indexComment) { throw DbException.getUnsupportedException("LINK"); } @Override - public boolean lock(Session session, boolean exclusive, boolean forceLockEvenInMvcc) { - // nothing to do - return false; - } - - @Override - public boolean isLockedExclusively() { - return false; + public Index getScanIndex(SessionLocal session) { + return linkedIndex; } @Override - public Index getScanIndex(Session session) { - return linkedIndex; + public boolean isInsertable() { + return !readOnly; } private void checkReadOnly() { @@ -420,19 +444,19 @@ private void checkReadOnly() { } @Override - public void removeRow(Session session, Row row) { + public void removeRow(SessionLocal session, Row row) { checkReadOnly(); getScanIndex(session).remove(session, row); } @Override - public void addRow(Session session, Row row) { + public void addRow(SessionLocal session, Row row) { checkReadOnly(); getScanIndex(session).add(session, row); } @Override - public void close(Session session) { + public void close(SessionLocal session) { if (conn != null) { try { conn.close(false); @@ -443,11 +467,11 @@ public void close(Session session) { } @Override - public synchronized long getRowCount(Session session) { + public synchronized long getRowCount(SessionLocal session) { //The foo alias is used to support the PostgreSQL syntax String sql = "SELECT COUNT(*) FROM " + qualifiedTableName + " as foo"; try { - PreparedStatement prep = execute(sql, null, false); + PreparedStatement prep = execute(sql, null, false, session); ResultSet rs = prep.getResultSet(); rs.next(); long count = rs.getLong(1); @@ -483,10 +507,11 @@ public String getQualifiedTable() { * @param sql the SQL statement * @param params the parameters or null * @param reusePrepared if the prepared statement can be re-used immediately + * @param session the session * @return the prepared statement, or null if it is re-used */ - public PreparedStatement execute(String sql, ArrayList params, - boolean reusePrepared) { + public PreparedStatement execute(String sql, ArrayList params, boolean reusePrepared, // + SessionLocal session) { if (conn == null) { throw connectException; } @@ -496,6 +521,9 @@ public PreparedStatement execute(String sql, ArrayList params, PreparedStatement prep = preparedMap.remove(sql); if (prep == null) { prep = conn.getConnection().prepareStatement(sql); + if (fetchSize != 0) { + prep.setFetchSize(fetchSize); + } } if (trace.isDebugEnabled()) { StringBuilder builder = new StringBuilder(getName()).append(":\n").append(sql); @@ -507,7 +535,7 @@ public PreparedStatement execute(String sql, ArrayList params, builder.append(", "); } builder.append(++i).append(": "); - v.getSQL(builder); + v.getSQL(builder, DEFAULT_SQL_FLAGS); } builder.append('}'); } @@ -515,9 +543,10 @@ public PreparedStatement execute(String sql, ArrayList params, trace.debug(builder.toString()); } if (params != null) { + JdbcConnection ownConnection = session.createConnection(false); for (int i = 0, size = params.size(); i < size; i++) { Value v = params.get(i); - v.set(prep, i + 1); + JdbcUtils.set(prep, i + 1, v, ownConnection); } } prep.execute(); @@ -537,28 +566,18 @@ public PreparedStatement execute(String sql, ArrayList params, } } - @Override - public void unlock(Session s) { - // nothing to do - } - - @Override - public void checkRename() { - // ok - } - @Override public void checkSupportAlter() { throw DbException.getUnsupportedException("LINK"); } @Override - public void truncate(Session session) { + public long truncate(SessionLocal session) { throw DbException.getUnsupportedException("LINK"); } @Override - public boolean canGetRowCount() { + public boolean canGetRowCount(SessionLocal session) { return true; } @@ -573,7 +592,7 @@ public TableType getTableType() { } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { super.removeChildrenAndResources(session); close(session); database.removeMeta(session, getId()); @@ -604,33 +623,17 @@ public long getMaxDataModificationId() { } @Override - public Index getUniqueIndex() { - for (Index idx : indexes) { - if (idx.getIndexType().isUnique()) { - return idx; - } - } - return null; - } - - @Override - public void updateRows(Prepared prepared, Session session, RowList rows) { - boolean deleteInsert; + public void updateRows(Prepared prepared, SessionLocal session, LocalResult rows) { checkReadOnly(); if (emitUpdates) { - for (rows.reset(); rows.hasNext();) { + while (rows.next()) { prepared.checkCanceled(); - Row oldRow = rows.next(); - Row newRow = rows.next(); - linkedIndex.update(oldRow, newRow); - session.log(this, UndoLogRecord.DELETE, oldRow); - session.log(this, UndoLogRecord.INSERT, newRow); + Row oldRow = rows.currentRowForTable(); + rows.next(); + Row newRow = rows.currentRowForTable(); + linkedIndex.update(oldRow, newRow, session); } - deleteInsert = false; } else { - deleteInsert = true; - } - if (deleteInsert) { super.updateRows(prepared, session, rows); } } @@ -644,15 +647,10 @@ public void setReadOnly(boolean readOnly) { } @Override - public long getRowCountApproximation() { + public long getRowCountApproximation(SessionLocal session) { return ROW_COUNT_APPROXIMATION; } - @Override - public long getDiskSpaceUsed() { - return 0; - } - /** * Add this prepared statement to the list of cached statements. * @@ -679,21 +677,23 @@ public void checkWritingAllowed() { // only the target database can verify this } - /** - * Convert the values if required. Default values are not set (kept as - * null). - * - * @param session the session - * @param row the row - */ @Override - public void validateConvertUpdateSequence(Session session, Row row) { + public void convertInsertRow(SessionLocal session, Row row, Boolean overridingSystem) { + convertRow(session, row); + } + + @Override + public void convertUpdateRow(SessionLocal session, Row row, boolean fromTrigger) { + convertRow(session, row); + } + + private void convertRow(SessionLocal session, Row row) { for (int i = 0; i < columns.length; i++) { Value value = row.getValue(i); if (value != null) { // null means use the default value Column column = columns[i]; - Value v2 = column.validateConvertUpdateSequence(session, value); + Value v2 = column.validateConvertUpdateSequence(session, value, row); if (v2 != value) { row.setValue(i, v2); } @@ -702,16 +702,39 @@ public void validateConvertUpdateSequence(Session session, Row row) { } /** - * Get or generate a default value for the given column. Default values are - * not set (kept as null). + * Specify the number of rows fetched by the linked table command * - * @param session the session - * @param column the column - * @return the value + * @param fetchSize to set */ - @Override - public Value getDefaultValue(Session session, Column column) { - return null; + public void setFetchSize(int fetchSize) { + this.fetchSize = fetchSize; + } + + /** + * Specify if the autocommit mode is activated or not + * + * @param mode to set + */ + public void setAutoCommit(boolean mode) { + this.autocommit= mode; + } + + /** + * The autocommit mode + * @return true if autocommit is on + */ + public boolean getAutocommit(){ + return autocommit; + } + + /** + * The number of rows to fetch + * default is 0 + * + * @return number of rows to fetch + */ + public int getFetchSize() { + return fetchSize; } } diff --git a/h2/src/main/org/h2/table/TableLinkConnection.java b/h2/src/main/org/h2/table/TableLinkConnection.java index 8158757110..2286e7de48 100644 --- a/h2/src/main/org/h2/table/TableLinkConnection.java +++ b/h2/src/main/org/h2/table/TableLinkConnection.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.table; @@ -37,6 +37,7 @@ public class TableLinkConnection { * How many times the connection is used. */ private int useCounter; + private boolean autocommit =true; private TableLinkConnection( HashMap map, @@ -142,4 +143,21 @@ void close(boolean force) { } } + /** + * Specify if the autocommit mode is activated or not + * + * @param mode to set + */ + public void setAutoCommit(boolean mode) { + this.autocommit= mode; + } + + /** + * The autocommit mode + * @return true if autocommit is on + */ + public boolean getAutocommit(){ + return autocommit; + } + } diff --git a/h2/src/main/org/h2/table/TableSynonym.java b/h2/src/main/org/h2/table/TableSynonym.java index ec9b204535..cf35d038b7 100644 --- a/h2/src/main/org/h2/table/TableSynonym.java +++ b/h2/src/main/org/h2/table/TableSynonym.java @@ -1,23 +1,23 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.table; -import org.h2.command.Parser; import org.h2.command.ddl.CreateSynonymData; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.message.Trace; import org.h2.schema.Schema; -import org.h2.schema.SchemaObjectBase; +import org.h2.schema.SchemaObject; +import org.h2.util.ParserUtil; /** * Synonym for an existing table or view. All DML requests are forwarded to the backing table. * Adding indices to a synonym or altering the table is not supported. */ -public class TableSynonym extends SchemaObjectBase { +public class TableSynonym extends SchemaObject { private CreateSynonymData data; @@ -61,7 +61,7 @@ public String getCreateSQLForCopy(Table table, String quotedName) { public void rename(String newName) { throw DbException.getUnsupportedException("SYNONYM"); } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { synonymFor.removeSynonym(this); database.removeMeta(session, getId()); } @@ -69,16 +69,15 @@ public void removeChildrenAndResources(Session session) { @Override public String getCreateSQL() { StringBuilder builder = new StringBuilder("CREATE SYNONYM "); - getSQL(builder, true).append(" FOR "); - Parser.quoteIdentifier(builder, data.synonymForSchema.getName(), true).append('.'); - Parser.quoteIdentifier(builder, data.synonymFor, true); + getSQL(builder, DEFAULT_SQL_FLAGS).append(" FOR "); + ParserUtil.quoteIdentifier(builder, data.synonymForSchema.getName(), DEFAULT_SQL_FLAGS).append('.'); + ParserUtil.quoteIdentifier(builder, data.synonymFor, DEFAULT_SQL_FLAGS); return builder.toString(); } @Override public String getDropSQL() { - StringBuilder builder = new StringBuilder("DROP SYNONYM "); - return getSQL(builder, true).toString(); + return getSQL(new StringBuilder("DROP SYNONYM "), DEFAULT_SQL_FLAGS).toString(); } @Override diff --git a/h2/src/main/org/h2/table/TableType.java b/h2/src/main/org/h2/table/TableType.java index 644c0c0420..0e406bd2ee 100644 --- a/h2/src/main/org/h2/table/TableType.java +++ b/h2/src/main/org/h2/table/TableType.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.table; diff --git a/h2/src/main/org/h2/table/TableValueConstructorTable.java b/h2/src/main/org/h2/table/TableValueConstructorTable.java new file mode 100644 index 0000000000..c532e44082 --- /dev/null +++ b/h2/src/main/org/h2/table/TableValueConstructorTable.java @@ -0,0 +1,70 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.table; + +import java.util.ArrayList; + +import org.h2.command.query.TableValueConstructor; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.result.ResultInterface; +import org.h2.result.SimpleResult; +import org.h2.schema.Schema; + +/** + * A table for table value constructor. + */ +public class TableValueConstructorTable extends VirtualConstructedTable { + + private final ArrayList> rows; + + public TableValueConstructorTable(Schema schema, SessionLocal session, Column[] columns, + ArrayList> rows) { + super(schema, 0, "VALUES"); + setColumns(columns); + this.rows = rows; + } + + @Override + public boolean canGetRowCount(SessionLocal session) { + return true; + } + + @Override + public long getRowCount(SessionLocal session) { + return rows.size(); + } + + @Override + public long getRowCountApproximation(SessionLocal session) { + return rows.size(); + } + + @Override + public ResultInterface getResult(SessionLocal session) { + SimpleResult simple = new SimpleResult(); + int columnCount = columns.length; + for (int i = 0; i < columnCount; i++) { + Column column = columns[i]; + simple.addColumn(column.getName(), column.getType()); + } + TableValueConstructor.getVisibleResult(session, simple, columns, rows); + return simple; + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + builder.append('('); + TableValueConstructor.getValuesSQL(builder, sqlFlags, rows); + return builder.append(')'); + } + + @Override + public boolean isDeterministic() { + return true; + } + +} diff --git a/h2/src/main/org/h2/table/TableView.java b/h2/src/main/org/h2/table/TableView.java index 911e079516..eba1b12fa6 100644 --- a/h2/src/main/org/h2/table/TableView.java +++ b/h2/src/main/org/h2/table/TableView.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.table; @@ -14,15 +14,13 @@ import org.h2.api.ErrorCode; import org.h2.command.Prepared; import org.h2.command.ddl.CreateTableData; -import org.h2.command.dml.AllColumnsForPlan; -import org.h2.command.dml.Query; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.command.query.Query; import org.h2.engine.Database; import org.h2.engine.DbObject; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.engine.User; -import org.h2.expression.Alias; import org.h2.expression.Expression; -import org.h2.expression.ExpressionColumn; import org.h2.expression.ExpressionVisitor; import org.h2.expression.Parameter; import org.h2.index.Index; @@ -33,7 +31,6 @@ import org.h2.result.Row; import org.h2.result.SortOrder; import org.h2.schema.Schema; -import org.h2.util.ColumnNamer; import org.h2.util.StringUtils; import org.h2.util.Utils; import org.h2.value.TypeInfo; @@ -64,7 +61,7 @@ public class TableView extends Table { private boolean isTableExpression; public TableView(Schema schema, int id, String name, String querySQL, - ArrayList params, Column[] columnTemplates, Session session, + ArrayList params, Column[] columnTemplates, SessionLocal session, boolean allowRecursive, boolean literalsChecked, boolean isTableExpression, boolean isTemporary) { super(schema, id, name, false, true); setTemporary(isTemporary); @@ -82,15 +79,12 @@ public TableView(Schema schema, int id, String name, String querySQL, * @param force if errors should be ignored * @param literalsChecked if literals have been checked */ - public void replace(String querySQL, Column[] newColumnTemplates, Session session, + public void replace(String querySQL, Column[] newColumnTemplates, SessionLocal session, boolean recursive, boolean force, boolean literalsChecked) { String oldQuerySQL = this.querySQL; Column[] oldColumnTemplates = this.columnTemplates; boolean oldRecursive = this.allowRecursive; - init(querySQL, null, - newColumnTemplates == null ? this.columnTemplates - : newColumnTemplates, - session, recursive, literalsChecked, isTableExpression); + init(querySQL, null, newColumnTemplates, session, recursive, literalsChecked, isTableExpression); DbException e = recompile(session, force, true); if (e != null) { init(oldQuerySQL, null, oldColumnTemplates, session, oldRecursive, @@ -101,7 +95,7 @@ public void replace(String querySQL, Column[] newColumnTemplates, Session sessi } private synchronized void init(String querySQL, ArrayList params, - Column[] columnTemplates, Session session, boolean allowRecursive, boolean literalsChecked, + Column[] columnTemplates, SessionLocal session, boolean allowRecursive, boolean literalsChecked, boolean isTableExpression) { this.querySQL = querySQL; this.columnTemplates = columnTemplates; @@ -112,13 +106,13 @@ private synchronized void init(String querySQL, ArrayList params, initColumnsAndTables(session, literalsChecked); } - private Query compileViewQuery(Session session, String sql, boolean literalsChecked, String viewName) { + private Query compileViewQuery(SessionLocal session, String sql, boolean literalsChecked) { Prepared p; - session.setParsingCreateView(true, viewName); + session.setParsingCreateView(true); try { p = session.prepare(sql, false, literalsChecked); } finally { - session.setParsingCreateView(false, viewName); + session.setParsingCreateView(false); } if (!(p instanceof Query)) { throw DbException.getSyntaxError(sql, 0); @@ -140,10 +134,10 @@ private Query compileViewQuery(Session session, String sql, boolean literalsChec * @return the exception if re-compiling this or any dependent view failed * (only when force is disabled) */ - public synchronized DbException recompile(Session session, boolean force, + public synchronized DbException recompile(SessionLocal session, boolean force, boolean clearIndexCache) { try { - compileViewQuery(session, querySQL, false, getName()); + compileViewQuery(session, querySQL, false); } catch (DbException e) { if (!force) { return e; @@ -163,16 +157,15 @@ public synchronized DbException recompile(Session session, boolean force, return force ? null : createException; } - private void initColumnsAndTables(Session session, boolean literalsChecked) { + private void initColumnsAndTables(SessionLocal session, boolean literalsChecked) { Column[] cols; removeCurrentViewFromOtherTables(); setTableExpression(isTableExpression); try { - Query compiledQuery = compileViewQuery(session, querySQL, literalsChecked, getName()); - this.querySQL = compiledQuery.getPlanSQL(true); + Query compiledQuery = compileViewQuery(session, querySQL, literalsChecked); + this.querySQL = compiledQuery.getPlanSQL(DEFAULT_SQL_FLAGS); tables = new ArrayList<>(compiledQuery.getTables()); ArrayList expressions = compiledQuery.getExpressions(); - ColumnNamer columnNamer = new ColumnNamer(session); final int count = compiledQuery.getColumnCount(); ArrayList list = new ArrayList<>(count); for (int i = 0; i < count; i++) { @@ -184,37 +177,20 @@ private void initColumnsAndTables(Session session, boolean literalsChecked) { type = columnTemplates[i].getType(); } if (name == null) { - name = expr.getAlias(); + name = expr.getColumnNameForView(session, i); } - name = columnNamer.getColumnName(expr, i, name); if (type.getValueType() == Value.UNKNOWN) { type = expr.getType(); } - Column col = new Column(name, type); - col.setTable(this, i); - // Fetch check constraint from view column source - ExpressionColumn fromColumn = null; - if (expr instanceof ExpressionColumn) { - fromColumn = (ExpressionColumn) expr; - } else if (expr instanceof Alias) { - Expression aliasExpr = expr.getNonAliasExpression(); - if (aliasExpr instanceof ExpressionColumn) { - fromColumn = (ExpressionColumn) aliasExpr; - } - } - if (fromColumn != null) { - Expression checkExpression = fromColumn.getColumn() - .getCheckConstraint(session, name); - if (checkExpression != null) { - col.addCheckConstraint(session, checkExpression); - } - } - list.add(col); + list.add(new Column(name, type, this, i)); } cols = list.toArray(new Column[0]); createException = null; viewQuery = compiledQuery; } catch (DbException e) { + if (e.getErrorCode() == ErrorCode.COLUMN_ALIAS_IS_NOT_SPECIFIED_1) { + throw e; + } e.addSQL(getCreateSQL()); createException = e; // If it can't be compiled, then it's a 'zero column table' @@ -257,7 +233,7 @@ public boolean isInvalid() { } @Override - public PlanItem getBestPlanItem(Session session, int[] masks, + public PlanItem getBestPlanItem(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, AllColumnsForPlan allColumnsSet) { final CacheKey cacheKey = new CacheKey(masks, this); @@ -296,7 +272,7 @@ public Query getTopQuery() { @Override public String getDropSQL() { - return "DROP VIEW IF EXISTS " + getSQL(true) + " CASCADE"; + return getSQL(new StringBuilder("DROP VIEW IF EXISTS "), DEFAULT_SQL_FLAGS).append(" CASCADE").toString(); } @Override @@ -318,7 +294,7 @@ public String getCreateSQL() { * @return the SQL statement */ public String getCreateSQL(boolean orReplace, boolean force) { - return getCreateSQL(orReplace, force, getSQL(true)); + return getCreateSQL(orReplace, force, getSQL(DEFAULT_SQL_FLAGS)); } private String getCreateSQL(boolean orReplace, boolean force, String quotedName) { @@ -340,56 +316,39 @@ private String getCreateSQL(boolean orReplace, boolean force, String quotedName) } if (columns != null && columns.length > 0) { builder.append('('); - Column.writeColumns(builder, columns, true); + Column.writeColumns(builder, columns, DEFAULT_SQL_FLAGS); builder.append(')'); } else if (columnTemplates != null) { builder.append('('); - Column.writeColumns(builder, columnTemplates, true); + Column.writeColumns(builder, columnTemplates, DEFAULT_SQL_FLAGS); builder.append(')'); } return builder.append(" AS\n").append(querySQL).toString(); } @Override - public void checkRename() { - // ok - } - - @Override - public boolean lock(Session session, boolean exclusive, boolean forceLockEvenInMvcc) { - // exclusive lock means: the view will be dropped - return false; - } - - @Override - public void close(Session session) { + public void close(SessionLocal session) { // nothing to do } @Override - public void unlock(Session s) { - // nothing to do + public Index addIndex(SessionLocal session, String indexName, int indexId, IndexColumn[] cols, + int uniqueColumnCount, IndexType indexType, boolean create, String indexComment) { + throw DbException.getUnsupportedException("VIEW"); } @Override - public boolean isLockedExclusively() { + public boolean isInsertable() { return false; } @Override - public Index addIndex(Session session, String indexName, int indexId, - IndexColumn[] cols, IndexType indexType, boolean create, - String indexComment) { + public void removeRow(SessionLocal session, Row row) { throw DbException.getUnsupportedException("VIEW"); } @Override - public void removeRow(Session session, Row row) { - throw DbException.getUnsupportedException("VIEW"); - } - - @Override - public void addRow(Session session, Row row) { + public void addRow(SessionLocal session, Row row) { throw DbException.getUnsupportedException("VIEW"); } @@ -399,17 +358,17 @@ public void checkSupportAlter() { } @Override - public void truncate(Session session) { + public long truncate(SessionLocal session) { throw DbException.getUnsupportedException("VIEW"); } @Override - public long getRowCount(Session session) { - throw DbException.throwInternalError(toString()); + public long getRowCount(SessionLocal session) { + throw DbException.getInternalError(toString()); } @Override - public boolean canGetRowCount() { + public boolean canGetRowCount(SessionLocal session) { // TODO view: could get the row count, but not that easy return false; } @@ -425,7 +384,7 @@ public TableType getTableType() { } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { removeCurrentViewFromOtherTables(); super.removeChildrenAndResources(session); database.removeMeta(session, getId()); @@ -441,18 +400,18 @@ public void removeChildrenAndResources(Session session) { * @param database the database */ public static void clearIndexCaches(Database database) { - for (Session s : database.getSessions(true)) { + for (SessionLocal s : database.getSessions(true)) { s.clearViewIndexCache(); } } @Override - public StringBuilder getSQL(StringBuilder builder, boolean alwaysQuote) { + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { if (isTemporary() && querySQL != null) { builder.append("(\n"); return StringUtils.indent(builder, querySQL, 4, true).append(')'); } - return super.getSQL(builder, alwaysQuote); + return super.getSQL(builder, sqlFlags); } public String getQuery() { @@ -460,18 +419,17 @@ public String getQuery() { } @Override - public Index getScanIndex(Session session) { + public Index getScanIndex(SessionLocal session) { return getBestPlanItem(session, null, null, -1, null, null).getIndex(); } @Override - public Index getScanIndex(Session session, int[] masks, + public Index getScanIndex(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, AllColumnsForPlan allColumnsSet) { if (createException != null) { String msg = createException.getMessage(); - throw DbException.get(ErrorCode.VIEW_IS_INVALID_2, - createException, getSQL(false), msg); + throw DbException.get(ErrorCode.VIEW_IS_INVALID_2, createException, getTraceSQL(), msg); } PlanItem item = getBestPlanItem(session, masks, filters, filter, sortOrder, allColumnsSet); return item.getIndex(); @@ -506,11 +464,6 @@ public long getMaxDataModificationId() { return maxDataModificationId; } - @Override - public Index getUniqueIndex() { - return null; - } - private void removeCurrentViewFromOtherTables() { if (tables != null) { for (Table t : tables) { @@ -540,16 +493,17 @@ public User getOwner() { * @param session the session * @param owner the owner of the query * @param name the view name + * @param columnTemplates column templates, or {@code null} * @param query the query * @param topQuery the top level query * @return the view table */ - public static TableView createTempView(Session session, User owner, - String name, Query query, Query topQuery) { + public static TableView createTempView(SessionLocal session, User owner, + String name, Column[] columnTemplates, Query query, Query topQuery) { Schema mainSchema = session.getDatabase().getMainSchema(); - String querySQL = query.getPlanSQL(true); + String querySQL = query.getPlanSQL(DEFAULT_SQL_FLAGS); TableView v = new TableView(mainSchema, 0, name, - querySQL, query.getParameters(), null /* column templates */, session, + querySQL, query.getParameters(), columnTemplates, session, false/* allow recursive */, true /* literals have already been checked when parsing original query */, false /* is table expression */, true/*temporary*/); if (v.createException != null) { @@ -566,15 +520,10 @@ private void setTopQuery(Query topQuery) { } @Override - public long getRowCountApproximation() { + public long getRowCountApproximation(SessionLocal session) { return ROW_COUNT_APPROXIMATION; } - @Override - public long getDiskSpaceUsed() { - return 0; - } - /** * Get the index of the first parameter. * @@ -592,7 +541,9 @@ public int getParameterOffset(ArrayList additionalParameters) { private static int getMaxParameterIndex(ArrayList parameters) { int result = -1; for (Parameter p : parameters) { - result = Math.max(result, p.getIndex()); + if (p != null) { + result = Math.max(result, p.getIndex()); + } } return result; } @@ -689,7 +640,11 @@ private boolean isRecursiveQueryExceptionDetected(DbException exception) { if (exception == null) { return false; } - if (exception.getErrorCode() != ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1) { + int errorCode = exception.getErrorCode(); + if (errorCode != ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1 && + errorCode != ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1 && + errorCode != ErrorCode.TABLE_OR_VIEW_NOT_FOUND_WITH_CANDIDATES_2 + ) { return false; } return exception.getMessage().contains("\"" + this.getName() + "\""); @@ -716,7 +671,7 @@ public List
          getTables() { * @return the view */ public static TableView createTableViewMaybeRecursive(Schema schema, int id, String name, String querySQL, - ArrayList parameters, Column[] columnTemplates, Session session, + ArrayList parameters, Column[] columnTemplates, SessionLocal session, boolean literalsChecked, boolean isTableExpression, boolean isTemporary, Database db) { @@ -724,7 +679,7 @@ public static TableView createTableViewMaybeRecursive(Schema schema, int id, Str schema, Arrays.asList(columnTemplates), db); List columnTemplateList; - String[] querySQLOutput = {null}; + String[] querySQLOutput = new String[1]; ArrayList columnNames = new ArrayList<>(); for (Column columnTemplate: columnTemplates) { columnNames.add(columnTemplate.getName()); @@ -752,7 +707,7 @@ public static TableView createTableViewMaybeRecursive(Schema schema, int id, Str if (!view.isRecursiveQueryDetected()) { if (!isTemporary) { db.addSchemaObject(session, view); - view.lock(session, true, true); + view.lock(session, Table.EXCLUSIVE_LOCK); session.getDatabase().removeSchemaObject(session, view); // during database startup - this method does not normally get called - and it @@ -789,15 +744,15 @@ public static List createQueryColumnTemplateList(String[] cols, theQuery.prepare(); // String array of length 1 is to receive extra 'output' field in addition to // return value - querySQLOutput[0] = StringUtils.cache(theQuery.getPlanSQL(true)); - ColumnNamer columnNamer = new ColumnNamer(theQuery.getSession()); + querySQLOutput[0] = StringUtils.cache(theQuery.getPlanSQL(ADD_PLAN_INFORMATION)); + SessionLocal session = theQuery.getSession(); ArrayList withExpressions = theQuery.getExpressions(); for (int i = 0; i < withExpressions.size(); ++i) { Expression columnExp = withExpressions.get(i); // use the passed in column name if supplied, otherwise use alias // (if found) otherwise use column name derived from column // expression - String columnName = columnNamer.getColumnName(columnExp, i, cols); + String columnName = cols != null && cols.length > i ? cols[i] : columnExp.getColumnNameForView(session, i); columnTemplateList.add(new Column(columnName, columnExp.getType())); } @@ -815,7 +770,7 @@ public static List createQueryColumnTemplateList(String[] cols, * @param db the database * @return the table */ - public static Table createShadowTableForRecursiveTableExpression(boolean isTemporary, Session targetSession, + public static Table createShadowTableForRecursiveTableExpression(boolean isTemporary, SessionLocal targetSession, String cteViewName, Schema schema, List columns, Database db) { // create table data object @@ -826,7 +781,6 @@ public static Table createShadowTableForRecursiveTableExpression(boolean isTempo recursiveTableData.temporary = isTemporary; recursiveTableData.persistData = true; recursiveTableData.persistIndexes = !isTemporary; - recursiveTableData.create = true; recursiveTableData.session = targetSession; // this gets a meta table lock that is not released @@ -851,11 +805,11 @@ public static Table createShadowTableForRecursiveTableExpression(boolean isTempo * @param targetSession the session * @param recursiveTable the table */ - public static void destroyShadowTableForRecursiveExpression(boolean isTemporary, Session targetSession, + public static void destroyShadowTableForRecursiveExpression(boolean isTemporary, SessionLocal targetSession, Table recursiveTable) { if (recursiveTable != null) { if (!isTemporary) { - recursiveTable.lock(targetSession, true, true); + recursiveTable.lock(targetSession, Table.EXCLUSIVE_LOCK); targetSession.getDatabase().removeSchemaObject(targetSession, recursiveTable); } else { diff --git a/h2/src/main/org/h2/table/VirtualConstructedTable.java b/h2/src/main/org/h2/table/VirtualConstructedTable.java new file mode 100644 index 0000000000..77f6ec2f1f --- /dev/null +++ b/h2/src/main/org/h2/table/VirtualConstructedTable.java @@ -0,0 +1,44 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.table; + +import org.h2.engine.SessionLocal; +import org.h2.index.Index; +import org.h2.index.VirtualConstructedTableIndex; +import org.h2.result.ResultInterface; +import org.h2.schema.Schema; + +/** + * A base class for virtual tables that construct all their content at once. + */ +public abstract class VirtualConstructedTable extends VirtualTable { + + protected VirtualConstructedTable(Schema schema, int id, String name) { + super(schema, id, name); + } + + /** + * Read the rows from the table. + * + * @param session + * the session + * @return the result + */ + public abstract ResultInterface getResult(SessionLocal session); + + @Override + public Index getScanIndex(SessionLocal session) { + return new VirtualConstructedTableIndex(this, IndexColumn.wrap(columns)); + } + + @Override + public long getMaxDataModificationId() { + // TODO optimization: virtual table currently doesn't know the + // last modified date + return Long.MAX_VALUE; + } + +} diff --git a/h2/src/main/org/h2/table/VirtualTable.java b/h2/src/main/org/h2/table/VirtualTable.java new file mode 100644 index 0000000000..a0dead3956 --- /dev/null +++ b/h2/src/main/org/h2/table/VirtualTable.java @@ -0,0 +1,93 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.table; + +import java.util.ArrayList; + +import org.h2.engine.SessionLocal; +import org.h2.index.Index; +import org.h2.index.IndexType; +import org.h2.message.DbException; +import org.h2.result.Row; +import org.h2.schema.Schema; + +/** + * A base class for virtual tables. + */ +public abstract class VirtualTable extends Table { + + protected VirtualTable(Schema schema, int id, String name) { + super(schema, id, name, false, true); + } + + @Override + public void close(SessionLocal session) { + // Nothing to do + } + + @Override + public Index addIndex(SessionLocal session, String indexName, int indexId, IndexColumn[] cols, + int uniqueColumnCount, IndexType indexType, boolean create, String indexComment) { + throw DbException.getUnsupportedException("Virtual table"); + } + + @Override + public boolean isInsertable() { + return false; + } + + @Override + public void removeRow(SessionLocal session, Row row) { + throw DbException.getUnsupportedException("Virtual table"); + + } + + @Override + public long truncate(SessionLocal session) { + throw DbException.getUnsupportedException("Virtual table"); + } + + @Override + public void addRow(SessionLocal session, Row row) { + throw DbException.getUnsupportedException("Virtual table"); + } + + @Override + public void checkSupportAlter() { + throw DbException.getUnsupportedException("Virtual table"); + } + + @Override + public TableType getTableType() { + return null; + } + + @Override + public ArrayList getIndexes() { + return null; + } + + @Override + public boolean canReference() { + return false; + } + + @Override + public boolean canDrop() { + throw DbException.getInternalError(toString()); + } + + @Override + public String getCreateSQL() { + return null; + } + + @Override + public void checkRename() { + throw DbException.getUnsupportedException("Virtual table"); + } + +} diff --git a/h2/src/main/org/h2/table/package.html b/h2/src/main/org/h2/table/package.html index 383831f089..5ae6ac1b19 100644 --- a/h2/src/main/org/h2/table/package.html +++ b/h2/src/main/org/h2/table/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/tools/Backup.java b/h2/src/main/org/h2/tools/Backup.java index 68a49a7e66..afb464b14f 100644 --- a/h2/src/main/org/h2/tools/Backup.java +++ b/h2/src/main/org/h2/tools/Backup.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.tools; @@ -23,21 +23,20 @@ /** * Creates a backup of a database. - *
          + * * This tool copies all database files. The database must be closed before using * this tool. To create a backup while the database is in use, run the BACKUP * SQL statement. In an emergency, for example if the application is not * responding, creating a backup using the Backup tool is possible by using the * quiet mode. However, if the database is changed while the backup is running * in quiet mode, the backup could be corrupt. - * - * @h2.resource */ public class Backup extends Tool { /** - * Options are case sensitive. Supported options are: + * Options are case sensitive. *
          + * * * * @@ -49,9 +48,9 @@ public class Backup extends Tool { * * *
          Supported options are:
          [-help] or [-?]Print the list of options
          [-file <filename>]
          [-quiet]Do not print progress information
          - * @h2.resource * * @param args the command line arguments + * @throws SQLException on failure */ public static void main(String... args) throws SQLException { new Backup().runTool(args); @@ -95,6 +94,7 @@ public void runTool(String... args) throws SQLException { * @param db the source database name (null if there is only one database, * and empty string to backup all files in this directory) * @param quiet don't print progress information + * @throws SQLException on failure */ public static void execute(String zipFileName, String directory, String db, boolean quiet) throws SQLException { @@ -132,7 +132,6 @@ private void process(String zipFileName, String directory, String db, String base = ""; for (String fileName : list) { if (allFiles || - fileName.endsWith(Constants.SUFFIX_PAGE_FILE) || fileName.endsWith(Constants.SUFFIX_MV_FILE)) { base = FileUtils.getParent(fileName); break; @@ -141,7 +140,7 @@ private void process(String zipFileName, String directory, String db, for (String fileName : list) { String f = FileUtils.toRealPath(fileName); if (!f.startsWith(base)) { - DbException.throwInternalError(f + " does not start with " + base); + throw DbException.getInternalError(f + " does not start with " + base); } if (f.endsWith(zipFileName)) { continue; diff --git a/h2/src/main/org/h2/tools/ChangeFileEncryption.java b/h2/src/main/org/h2/tools/ChangeFileEncryption.java index 928885922e..e1f600f3f9 100644 --- a/h2/src/main/org/h2/tools/ChangeFileEncryption.java +++ b/h2/src/main/org/h2/tools/ChangeFileEncryption.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.tools; @@ -8,6 +8,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.nio.channels.Channels; import java.nio.channels.FileChannel; import java.sql.SQLException; import java.util.ArrayList; @@ -15,35 +16,30 @@ import org.h2.engine.Constants; import org.h2.message.DbException; import org.h2.mvstore.MVStore; -import org.h2.security.SHA256; import org.h2.store.FileLister; -import org.h2.store.FileStore; -import org.h2.store.fs.FileChannelInputStream; -import org.h2.store.fs.FileChannelOutputStream; import org.h2.store.fs.FilePath; -import org.h2.store.fs.FilePathEncrypt; import org.h2.store.fs.FileUtils; +import org.h2.store.fs.encrypt.FileEncrypt; +import org.h2.store.fs.encrypt.FilePathEncrypt; import org.h2.util.Tool; /** * Allows changing the database file encryption password or algorithm. - *
          + * * This tool can not be used to change a password of a user. * The database must be closed before using this tool. - * @h2.resource */ public class ChangeFileEncryption extends Tool { private String directory; private String cipherType; - private byte[] decrypt; - private byte[] encrypt; private byte[] decryptKey; private byte[] encryptKey; /** - * Options are case sensitive. Supported options are: + * Options are case sensitive. * + * * * * @@ -59,7 +55,6 @@ public class ChangeFileEncryption extends Tool { * * *
          Supported options
          [-help] or [-?]Print the list of options
          [-cipher type]
          [-quiet]Do not print progress information
          - * @h2.resource * * @param args the command line arguments */ @@ -113,20 +108,6 @@ public void runTool(String... args) throws SQLException { } } - /** - * Get the file encryption key for a given password. - * - * @param password the password as a char array - * @return the encryption key - */ - private static byte[] getFileEncryptionKey(char[] password) { - if (password == null) { - return null; - } - // the clone is to avoid the unhelpful array cleaning - return SHA256.getKeyPasswordHash("file", password.clone()); - } - /** * Changes the password for a database. The passwords must be supplied as * char arrays and are cleaned in this method. The database must be closed @@ -138,6 +119,7 @@ private static byte[] getFileEncryptionKey(char[] password) { * @param decryptPassword the decryption password as a char array * @param encryptPassword the encryption password as a char array * @param quiet don't print progress information + * @throws SQLException on failure */ public static void execute(String dir, String db, String cipher, char[] decryptPassword, char[] encryptPassword, boolean quiet) @@ -162,11 +144,9 @@ private void process(String dir, String db, String cipher, } } change.encryptKey = FilePathEncrypt.getPasswordBytes(encryptPassword); - change.encrypt = getFileEncryptionKey(encryptPassword); } if (decryptPassword != null) { change.decryptKey = FilePathEncrypt.getPasswordBytes(decryptPassword); - change.decrypt = getFileEncryptionKey(decryptPassword); } change.out = out; change.directory = dir; @@ -207,18 +187,6 @@ private void process(String fileName, boolean quiet, char[] decryptPassword) thr } return; } - final FileStore in; - if (decrypt == null) { - in = FileStore.open(null, fileName, "r"); - } else { - in = FileStore.open(null, fileName, "r", cipherType, decrypt); - } - try { - in.init(); - copyPageStore(fileName, in, encrypt, quiet); - } finally { - in.closeSilently(); - } } private void copyMvStore(String fileName, boolean quiet, char[] decryptPassword) throws IOException, SQLException { @@ -239,10 +207,9 @@ private void copyMvStore(String fileName, boolean quiet, char[] decryptPassword) String temp = directory + "/temp.db"; try (FileChannel fileIn = getFileChannel(fileName, "r", decryptKey)){ - try(InputStream inStream = new FileChannelInputStream(fileIn, true)) { + try (InputStream inStream = Channels.newInputStream(fileIn)) { FileUtils.delete(temp); - try (OutputStream outStream = new FileChannelOutputStream(getFileChannel(temp, "rw", encryptKey), - true)) { + try (OutputStream outStream = Channels.newOutputStream(getFileChannel(temp, "rw", encryptKey))) { final byte[] buffer = new byte[4 * 1024]; long remaining = fileIn.size(); long total = remaining; @@ -268,45 +235,10 @@ private static FileChannel getFileChannel(String fileName, String r, byte[] decryptKey) throws IOException { FileChannel fileIn = FilePath.get(fileName).open(r); if (decryptKey != null) { - fileIn = new FilePathEncrypt.FileEncrypt(fileName, decryptKey, + fileIn = new FileEncrypt(fileName, decryptKey, fileIn); } return fileIn; } - private void copyPageStore(String fileName, FileStore in, byte[] key, boolean quiet) { - if (FileUtils.isDirectory(fileName)) { - return; - } - final String temp = directory + "/temp.db"; - FileUtils.delete(temp); - FileStore fileOut; - if (key == null) { - fileOut = FileStore.open(null, temp, "rw"); - } else { - fileOut = FileStore.open(null, temp, "rw", cipherType, key); - } - final byte[] buffer = new byte[4 * 1024]; - fileOut.init(); - long remaining = in.length() - FileStore.HEADER_LENGTH; - long total = remaining; - in.seek(FileStore.HEADER_LENGTH); - fileOut.seek(FileStore.HEADER_LENGTH); - long time = System.nanoTime(); - while (remaining > 0) { - if (!quiet && System.nanoTime() - time > TimeUnit.SECONDS.toNanos(1)) { - out.println(fileName + ": " + (100 - 100 * remaining / total) + "%"); - time = System.nanoTime(); - } - int len = (int) Math.min(buffer.length, remaining); - in.readFully(buffer, 0, len); - fileOut.write(buffer, 0, len); - remaining -= len; - } - in.close(); - fileOut.close(); - FileUtils.delete(fileName); - FileUtils.move(temp, fileName); - } - } diff --git a/h2/src/main/org/h2/tools/CompressTool.java b/h2/src/main/org/h2/tools/CompressTool.java index d8051733de..7fa7d50702 100644 --- a/h2/src/main/org/h2/tools/CompressTool.java +++ b/h2/src/main/org/h2/tools/CompressTool.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.tools; @@ -34,9 +34,9 @@ */ public class CompressTool { - private static final int MAX_BUFFER_SIZE = - 3 * Constants.IO_BUFFER_SIZE_COMPRESS; - private byte[] cachedBuffer; + private static final int MAX_BUFFER_SIZE = 3 * Constants.IO_BUFFER_SIZE_COMPRESS; + + private byte[] buffer; private CompressTool() { // don't allow construction @@ -46,10 +46,10 @@ private byte[] getBuffer(int min) { if (min > MAX_BUFFER_SIZE) { return Utils.newBytes(min); } - if (cachedBuffer == null || cachedBuffer.length < min) { - cachedBuffer = Utils.newBytes(min); + if (buffer == null || buffer.length < min) { + buffer = Utils.newBytes(min); } - return cachedBuffer; + return buffer; } /** @@ -84,10 +84,9 @@ public byte[] compress(byte[] in, String algorithm) { private static int compress(byte[] in, int len, Compressor compress, byte[] out) { - int newLen = 0; out[0] = (byte) compress.getAlgorithm(); int start = 1 + writeVariableInt(out, 1, len); - newLen = compress.compress(in, len, out, start); + int newLen = compress.compress(in, 0, len, out, start); if (newLen > len + start || newLen <= 0) { out[0] = Compressor.NO; System.arraycopy(in, 0, out, start, len); @@ -103,6 +102,9 @@ private static int compress(byte[] in, int len, Compressor compress, * @return the uncompressed data */ public byte[] expand(byte[] in) { + if (in.length == 0) { + throw DbException.get(ErrorCode.COMPRESSION_ERROR); + } int algorithm = in[0]; Compressor compress = getCompressor(algorithm); try { @@ -118,6 +120,9 @@ public byte[] expand(byte[] in) { /** * INTERNAL + * @param in compressed data + * @param out uncompressed result + * @param outPos the offset at the output array */ public static void expand(byte[] in, byte[] out, int outPos) { int algorithm = in[0]; @@ -237,8 +242,10 @@ private static Compressor getCompressor(String algorithm) { /** * INTERNAL + * @param algorithm to translate into index + * @return index of the specified algorithm */ - public static int getCompressAlgorithm(String algorithm) { + private static int getCompressAlgorithm(String algorithm) { algorithm = StringUtils.toUpperEnglish(algorithm); if ("NO".equals(algorithm)) { return Compressor.NO; @@ -270,6 +277,10 @@ private static Compressor getCompressor(int algorithm) { /** * INTERNAL + * @param out stream + * @param compressionAlgorithm to be used + * @param entryName in a zip file + * @return compressed stream */ public static OutputStream wrapOutputStream(OutputStream out, String compressionAlgorithm, String entryName) { @@ -297,6 +308,10 @@ public static OutputStream wrapOutputStream(OutputStream out, /** * INTERNAL + * @param in stream + * @param compressionAlgorithm to be used + * @param entryName in a zip file + * @return in stream or null if there is no such entry */ public static InputStream wrapInputStream(InputStream in, String compressionAlgorithm, String entryName) { diff --git a/h2/src/main/org/h2/tools/Console.java b/h2/src/main/org/h2/tools/Console.java index 8c53721f04..42624300f1 100644 --- a/h2/src/main/org/h2/tools/Console.java +++ b/h2/src/main/org/h2/tools/Console.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.tools; @@ -16,7 +16,6 @@ /** * Starts the H2 Console (web-) server, as well as the TCP and PG server. - * @h2.resource * * @author Thomas Mueller, Ridvan Agar */ @@ -30,9 +29,10 @@ public class Console extends Tool implements ShutdownHandler { /** * When running without options, -tcp, -web, -browser and -pg are started. - *
          - * Options are case sensitive. Supported options are: + * + * Options are case sensitive. * + * * * * @@ -55,12 +55,12 @@ public class Console extends Tool implements ShutdownHandler { * *
          Supported options
          [-help] or [-?]Print the list of options
          [-url]Start the PG server
          * For each Server, additional options are available; - * for details, see the Server tool.
          + * for details, see the Server tool. * If a service can not be started, the program * terminates with an exit code of 1. - * @h2.resource * * @param args the command line arguments + * @throws SQLException on failure */ public static void main(String... args) throws SQLException { Console console; @@ -113,6 +113,8 @@ public void runTool(String... args) throws SQLException { } else if ("-webAllowOthers".equals(arg)) { // no parameters webAllowOthers = true; + } else if ("-webExternalNames".equals(arg)) { + i++; } else if ("-webDaemon".equals(arg)) { // no parameters } else if ("-webSSL".equals(arg)) { diff --git a/h2/src/main/org/h2/tools/ConvertTraceFile.java b/h2/src/main/org/h2/tools/ConvertTraceFile.java index c386d05dd2..c9de53f6c6 100644 --- a/h2/src/main/org/h2/tools/ConvertTraceFile.java +++ b/h2/src/main/org/h2/tools/ConvertTraceFile.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.tools; @@ -20,9 +20,8 @@ /** * Converts a .trace.db file to a SQL script and Java source code. - *
          + * * SQL statement statistics are listed as well. - * @h2.resource */ public class ConvertTraceFile extends Tool { @@ -55,8 +54,9 @@ public int compareTo(Stat other) { } /** - * Options are case sensitive. Supported options are: + * Options are case sensitive. * + * * * * @@ -66,9 +66,9 @@ public int compareTo(Stat other) { * * *
          Supported options
          [-help] or [-?]Print the list of options
          [-traceFile <file>]
          [-javaClass <file>]The Java directory and class file name (default: Test)
          - * @h2.resource * * @param args the command line arguments + * @throws SQLException on failure */ public static void main(String... args) throws SQLException { new ConvertTraceFile().runTool(args); @@ -107,7 +107,7 @@ public void runTool(String... args) throws SQLException { private void convertFile(String traceFileName, String javaClassName, String script) throws IOException { LineNumberReader reader = new LineNumberReader( - IOUtils.getBufferedReader( + IOUtils.getReader( FileUtils.newInputStream(traceFileName))); PrintWriter javaWriter = new PrintWriter( IOUtils.getBufferedWriter( diff --git a/h2/src/main/org/h2/tools/CreateCluster.java b/h2/src/main/org/h2/tools/CreateCluster.java index b67511c75d..04508c784e 100644 --- a/h2/src/main/org/h2/tools/CreateCluster.java +++ b/h2/src/main/org/h2/tools/CreateCluster.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.tools; @@ -8,8 +8,6 @@ import java.io.IOException; import java.io.PipedReader; import java.io.PipedWriter; -import java.sql.Connection; -import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; @@ -17,19 +15,20 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import org.h2.jdbc.JdbcConnection; import org.h2.util.Tool; /** * Creates a cluster from a stand-alone database. - *
          + * * Copies a database to another location if required. - * @h2.resource */ public class CreateCluster extends Tool { /** - * Options are case sensitive. Supported options are: + * Options are case sensitive. * + * * * * @@ -43,9 +42,9 @@ public class CreateCluster extends Tool { * * *
          Supported options
          [-help] or [-?]Print the list of options
          [-urlSource "<url>"]
          [-serverList <list>]The comma separated list of host names or IP addresses
          - * @h2.resource * * @param args the command line arguments + * @throws SQLException on failure */ public static void main(String... args) throws SQLException { new CreateCluster().runTool(args); @@ -92,6 +91,7 @@ public void runTool(String... args) throws SQLException { * @param user the user name * @param password the password * @param serverList the server list + * @throws SQLException on failure */ public void execute(String urlSource, String urlTarget, String user, String password, String serverList) throws SQLException { @@ -100,11 +100,9 @@ public void execute(String urlSource, String urlTarget, private static void process(String urlSource, String urlTarget, String user, String password, String serverList) throws SQLException { - org.h2.Driver.load(); - // use cluster='' so connecting is possible // even if the cluster is enabled - try (Connection connSource = DriverManager.getConnection(urlSource + ";CLUSTER=''", user, password); + try (JdbcConnection connSource = new JdbcConnection(urlSource + ";CLUSTER=''", null, user, password, false); Statement statSource = connSource.createStatement()) { // enable the exclusive mode and close other connections, // so that data can't change while restoring the second database @@ -122,7 +120,7 @@ private static void performTransfer(Statement statSource, String urlTarget, Stri String serverList) throws SQLException { // Delete the target database first. - try (Connection connTarget = DriverManager.getConnection(urlTarget + ";CLUSTER=''", user, password); + try (JdbcConnection connTarget = new JdbcConnection(urlTarget + ";CLUSTER=''", null, user, password, false); Statement statTarget = connTarget.createStatement()) { statTarget.execute("DROP ALL OBJECTS DELETE FILES"); } @@ -131,7 +129,7 @@ private static void performTransfer(Statement statSource, String urlTarget, Stri Future threadFuture = startWriter(pipeReader, statSource); // Read data from pipe reader, restore on target. - try (Connection connTarget = DriverManager.getConnection(urlTarget, user, password); + try (JdbcConnection connTarget = new JdbcConnection(urlTarget, null, user, password, false); Statement statTarget = connTarget.createStatement()) { RunScript.execute(connTarget, pipeReader); @@ -159,22 +157,19 @@ private static Future startWriter(final PipedReader pipeReader, final PipedWriter pipeWriter = new PipedWriter(pipeReader); // Since exceptions cannot be thrown across thread boundaries, return // the task's future so we can check manually - Future threadFuture = thread.submit(new Runnable() { - @Override - public void run() { - // If the creation of the piped writer fails, the reader will - // throw an IOException as soon as read() is called: IOException - // - if the pipe is broken, unconnected, closed, or an I/O error - // occurs. The reader's IOException will then trigger the - // finally{} that releases exclusive mode on the source DB. - try (PipedWriter writer = pipeWriter; - final ResultSet rs = statSource.executeQuery("SCRIPT")) { - while (rs.next()) { - writer.write(rs.getString(1) + "\n"); - } - } catch (SQLException | IOException ex) { - throw new IllegalStateException("Producing script from the source DB is failing.", ex); + Future threadFuture = thread.submit(() -> { + // If the creation of the piped writer fails, the reader will + // throw an IOException as soon as read() is called: IOException + // - if the pipe is broken, unconnected, closed, or an I/O error + // occurs. The reader's IOException will then trigger the + // finally{} that releases exclusive mode on the source DB. + try (PipedWriter writer = pipeWriter; + final ResultSet rs = statSource.executeQuery("SCRIPT")) { + while (rs.next()) { + writer.write(rs.getString(1) + "\n"); } + } catch (SQLException | IOException ex) { + throw new IllegalStateException("Producing script from the source DB is failing.", ex); } }); diff --git a/h2/src/main/org/h2/tools/Csv.java b/h2/src/main/org/h2/tools/Csv.java index ca027b708c..60b9c3777f 100644 --- a/h2/src/main/org/h2/tools/Csv.java +++ b/h2/src/main/org/h2/tools/Csv.java @@ -1,21 +1,20 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.tools; -import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.Reader; import java.io.Writer; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.sql.Connection; import java.sql.ResultSet; import java.sql.ResultSetMetaData; @@ -25,8 +24,8 @@ import java.util.ArrayList; import org.h2.api.ErrorCode; import org.h2.engine.Constants; -import org.h2.engine.SysProperties; import org.h2.message.DbException; +import org.h2.mvstore.DataUtils; import org.h2.store.fs.FileUtils; import org.h2.util.IOUtils; import org.h2.util.JdbcUtils; @@ -53,11 +52,11 @@ public class Csv implements SimpleRowSource { private boolean preserveWhitespace; private boolean writeColumnHeader = true; private char lineComment; - private String lineSeparator = SysProperties.LINE_SEPARATOR; + private String lineSeparator = System.lineSeparator(); private String nullString = ""; private String fileName; - private Reader input; + private BufferedReader input; private char[] inputBuffer; private int inputBufferPos; private int inputBufferStart = -1; @@ -71,31 +70,15 @@ private int writeResultSet(ResultSet rs) throws SQLException { ResultSetMetaData meta = rs.getMetaData(); int columnCount = meta.getColumnCount(); String[] row = new String[columnCount]; - int[] sqlTypes = new int[columnCount]; for (int i = 0; i < columnCount; i++) { row[i] = meta.getColumnLabel(i + 1); - sqlTypes[i] = meta.getColumnType(i + 1); } if (writeColumnHeader) { writeRow(row); } while (rs.next()) { for (int i = 0; i < columnCount; i++) { - Object o; - switch (sqlTypes[i]) { - case Types.DATE: - o = rs.getDate(i + 1); - break; - case Types.TIME: - o = rs.getTime(i + 1); - break; - case Types.TIMESTAMP: - o = rs.getTimestamp(i + 1); - break; - default: - o = rs.getString(i + 1); - } - row[i] = o == null ? null : o.toString(); + row[i] = rs.getString(i + 1); } writeRow(row); rows++; @@ -116,6 +99,7 @@ private int writeResultSet(ResultSet rs) throws SQLException { * @param writer the writer * @param rs the result set * @return the number of rows written + * @throws SQLException on failure */ public int write(Writer writer, ResultSet rs) throws SQLException { this.output = writer; @@ -137,6 +121,7 @@ public int write(Writer writer, ResultSet rs) throws SQLException { * first row. * @param charset the charset or null to use the system default charset * @return the number of rows written + * @throws SQLException on failure */ public int write(String outputFileName, ResultSet rs, String charset) throws SQLException { @@ -158,6 +143,7 @@ public int write(String outputFileName, ResultSet rs, String charset) * @param charset the charset or null to use the system default charset * (see system property file.encoding) * @return the number of rows written + * @throws SQLException on failure */ public int write(Connection conn, String outputFileName, String sql, String charset) throws SQLException { @@ -172,7 +158,7 @@ public int write(Connection conn, String outputFileName, String sql, * Reads from the CSV file and returns a result set. The rows in the result * set are created on demand, that means the file is kept open until all * rows are read or the result set is closed. - *
          + * * If the columns are read from the CSV file, then the following rules are * used: columns names that start with a letter or '_', and only * contain letters, '_', and digits, are considered case insensitive @@ -184,6 +170,7 @@ public int write(Connection conn, String outputFileName, String sql, * file * @param charset the charset or null to use the system default charset * @return the result set + * @throws SQLException on failure */ public ResultSet read(String inputFileName, String[] colNames, String charset) throws SQLException { @@ -204,10 +191,12 @@ public ResultSet read(String inputFileName, String[] colNames, * @param colNames or null if the column names should be read from the CSV * file * @return the result set + * @throws IOException on failure */ public ResultSet read(Reader reader, String[] colNames) throws IOException { init(null, null); - this.input = reader; + this.input = reader instanceof BufferedReader ? (BufferedReader) reader + : new BufferedReader(reader, Constants.IO_BUFFER_SIZE); return readResultSet(colNames); } @@ -256,7 +245,7 @@ private void initWrite() throws IOException { new OutputStreamWriter(out, characterSet) : new OutputStreamWriter(out)); } catch (Exception e) { close(); - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } } } @@ -309,17 +298,13 @@ private String escape(String data) { private void initRead() throws IOException { if (input == null) { try { - InputStream in = FileUtils.newInputStream(fileName); - in = new BufferedInputStream(in, Constants.IO_BUFFER_SIZE); - input = characterSet != null ? new InputStreamReader(in, characterSet) : new InputStreamReader(in); + input = FileUtils.newBufferedReader(fileName, + characterSet != null ? Charset.forName(characterSet) : StandardCharsets.UTF_8); } catch (IOException e) { close(); throw e; } } - if (!input.markSupported()) { - input = new BufferedReader(input); - } input.mark(1); int bom = input.read(); if (bom != 0xfeff) { diff --git a/h2/src/main/org/h2/tools/DeleteDbFiles.java b/h2/src/main/org/h2/tools/DeleteDbFiles.java index 277b95f5ce..45fe453fd9 100644 --- a/h2/src/main/org/h2/tools/DeleteDbFiles.java +++ b/h2/src/main/org/h2/tools/DeleteDbFiles.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.tools; @@ -15,15 +15,15 @@ /** * Deletes all files belonging to a database. - *
          + * * The database must be closed before calling this tool. - * @h2.resource */ public class DeleteDbFiles extends Tool { /** - * Options are case sensitive. Supported options are: + * Options are case sensitive. * + * * * * @@ -33,9 +33,9 @@ public class DeleteDbFiles extends Tool { * * *
          Supported options
          [-help] or [-?]Print the list of options
          [-dir <dir>]
          [-quiet]Do not print progress information
          - * @h2.resource * * @param args the command line arguments + * @throws SQLException on failure */ public static void main(String... args) throws SQLException { new DeleteDbFiles().runTool(args); diff --git a/h2/src/main/org/h2/tools/GUIConsole.java b/h2/src/main/org/h2/tools/GUIConsole.java index 53c0d3b563..c2b9d1e8c3 100644 --- a/h2/src/main/org/h2/tools/GUIConsole.java +++ b/h2/src/main/org/h2/tools/GUIConsole.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.tools; @@ -31,10 +31,9 @@ import java.awt.event.WindowEvent; import java.awt.event.WindowListener; import java.io.IOException; -import java.sql.DriverManager; import java.util.Locale; -import java.util.concurrent.TimeUnit; +import org.h2.jdbc.JdbcConnection; import org.h2.util.Utils; /** @@ -287,8 +286,8 @@ private void startBrowser() { if (urlText != null) { urlText.setText(url); } - long now = System.nanoTime(); - if (lastOpenNs == 0 || lastOpenNs + TimeUnit.MILLISECONDS.toNanos(100) < now) { + long now = Utils.currentNanoTime(); + if (lastOpenNs == 0 || now - lastOpenNs > 100_000_000L) { lastOpenNs = now; openBrowser(url); } @@ -465,7 +464,7 @@ private void createDatabase() { } String url = "jdbc:h2:" + path; try { - DriverManager.getConnection(url, user, password).close(); + new JdbcConnection(url, null, user, password, false).close(); errorArea.setForeground(new Color(0, 0x99, 0)); errorArea.setText("Database was created successfully.\n\n" + "JDBC URL for H2 Console:\n" diff --git a/h2/src/main/org/h2/tools/MultiDimension.java b/h2/src/main/org/h2/tools/MultiDimension.java index 1019c0ce7b..7c694d576d 100644 --- a/h2/src/main/org/h2/tools/MultiDimension.java +++ b/h2/src/main/org/h2/tools/MultiDimension.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.tools; @@ -9,7 +9,6 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; -import java.util.Collections; import java.util.Comparator; import org.h2.util.StringUtils; @@ -175,6 +174,7 @@ public String generatePreparedQuery(String table, String scalarColumn, * @param min the lower values * @param max the upper values * @return the result set + * @throws SQLException on failure */ public ResultSet getResult(PreparedStatement prep, int[] min, int[] max) throws SQLException { @@ -241,7 +241,7 @@ private static int getSize(int[] min, int[] max, int len) { * @param total product of the gap lengths */ private void combineEntries(ArrayList list, int total) { - Collections.sort(list, this); + list.sort(this); for (int minGap = 10; minGap < total; minGap += minGap / 2) { for (int i = 0; i < list.size() - 1; i++) { long[] current = list.get(i); diff --git a/h2/src/main/org/h2/tools/Recover.java b/h2/src/main/org/h2/tools/Recover.java index 658ef4d995..ee267c10c3 100644 --- a/h2/src/main/org/h2/tools/Recover.java +++ b/h2/src/main/org/h2/tools/Recover.java @@ -1,17 +1,15 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.tools; -import java.io.BufferedInputStream; import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; -import java.io.OutputStream; import java.io.PrintWriter; import java.io.Reader; import java.io.SequenceInputStream; @@ -21,7 +19,6 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; -import java.util.BitSet; import java.util.Collections; import java.util.Enumeration; import java.util.HashMap; @@ -29,58 +26,43 @@ import java.util.Iterator; import java.util.Map; import java.util.Map.Entry; -import java.util.zip.CRC32; -import org.h2.api.ErrorCode; -import org.h2.api.JavaObjectSerializer; -import org.h2.compress.CompressLZF; import org.h2.engine.Constants; import org.h2.engine.DbObject; import org.h2.engine.MetaRecord; -import org.h2.jdbc.JdbcConnection; import org.h2.message.DbException; import org.h2.mvstore.MVMap; import org.h2.mvstore.MVStore; import org.h2.mvstore.MVStoreTool; import org.h2.mvstore.StreamStore; +import org.h2.mvstore.db.LobStorageMap; import org.h2.mvstore.db.ValueDataType; import org.h2.mvstore.tx.TransactionMap; import org.h2.mvstore.tx.TransactionStore; +import org.h2.mvstore.type.DataType; +import org.h2.mvstore.type.MetaType; +import org.h2.mvstore.type.StringDataType; import org.h2.result.Row; -import org.h2.result.RowFactory; -import org.h2.result.SimpleRow; -import org.h2.security.SHA256; -import org.h2.store.Data; import org.h2.store.DataHandler; -import org.h2.store.DataReader; import org.h2.store.FileLister; import org.h2.store.FileStore; -import org.h2.store.FileStoreInputStream; -import org.h2.store.LobStorageBackend; import org.h2.store.LobStorageFrontend; -import org.h2.store.LobStorageMap; -import org.h2.store.Page; -import org.h2.store.PageFreeList; -import org.h2.store.PageLog; -import org.h2.store.PageStore; +import org.h2.store.LobStorageInterface; import org.h2.store.fs.FileUtils; +import org.h2.util.HasSQL; import org.h2.util.IOUtils; -import org.h2.util.IntArray; -import org.h2.util.MathUtils; import org.h2.util.SmallLRUCache; import org.h2.util.StringUtils; import org.h2.util.TempFileDeleter; import org.h2.util.Tool; -import org.h2.util.Utils; import org.h2.value.CompareMode; import org.h2.value.Value; -import org.h2.value.ValueArray; +import org.h2.value.ValueCollectionBase; import org.h2.value.ValueLob; -import org.h2.value.ValueLobDb; -import org.h2.value.ValueLong; +import org.h2.value.lob.LobData; +import org.h2.value.lob.LobDataDatabase; /** * Helps recovering a corrupted database. - * @h2.resource */ public class Recover extends Tool implements DataHandler { @@ -90,54 +72,16 @@ public class Recover extends Tool implements DataHandler { private int recordLength; private int valueId; private boolean trace; - private boolean transactionLog; private ArrayList schema; private HashSet objectIdSet; private HashMap tableMap; private HashMap columnTypeMap; - private boolean remove; - - private int pageSize; - private FileStore store; - private int[] parents; - - private Stats stat; private boolean lobMaps; /** - * Statistic data - */ - static class Stats { - - /** - * The empty space in bytes in a data leaf pages. - */ - long pageDataEmpty; - - /** - * The number of bytes used for data. - */ - long pageDataRows; - - /** - * The number of bytes used for the page headers. - */ - long pageDataHead; - - /** - * The count per page type. - */ - final int[] pageTypeCount = new int[Page.TYPE_STREAM_DATA + 2]; - - /** - * The number of free pages. - */ - int free; - } - - /** - * Options are case sensitive. Supported options are: + * Options are case sensitive. * + * * * * @@ -150,9 +94,9 @@ static class Stats { * *
          Supported options
          [-help] or [-?]Print the list of options
          [-dir <dir>]Print the transaction log
          * Encrypted databases need to be decrypted first. - * @h2.resource * * @param args the command line arguments + * @throws SQLException on failure */ public static void main(String... args) throws SQLException { new Recover().runTool(args); @@ -168,6 +112,7 @@ public static void main(String... args) throws SQLException { * a hardware problem. * * @param args the command line arguments + * @throws SQLException on failure */ @Override public void runTool(String... args) throws SQLException { @@ -179,12 +124,8 @@ public void runTool(String... args) throws SQLException { dir = args[++i]; } else if ("-db".equals(arg)) { db = args[++i]; - } else if ("-removePassword".equals(arg)) { - remove = true; } else if ("-trace".equals(arg)) { trace = true; - } else if ("-transactionLog".equals(arg)) { - transactionLog = true; } else if (arg.equals("-help") || arg.equals("-?")) { showUsage(); return; @@ -197,55 +138,11 @@ public void runTool(String... args) throws SQLException { /** * INTERNAL - */ - public static Reader readClob(String fileName) throws IOException { - return new BufferedReader(new InputStreamReader(readBlob(fileName), - StandardCharsets.UTF_8)); - } - - /** - * INTERNAL - */ - public static InputStream readBlob(String fileName) throws IOException { - return new BufferedInputStream(FileUtils.newInputStream(fileName)); - } - - /** - * INTERNAL - */ - public static ValueLobDb readBlobDb(Connection conn, long lobId, - long precision) { - DataHandler h = ((JdbcConnection) conn).getSession().getDataHandler(); - verifyPageStore(h); - ValueLobDb lob = ValueLobDb.create(Value.BLOB, h, LobStorageFrontend.TABLE_TEMP, - lobId, null, precision); - lob.setRecoveryReference(true); - return lob; - } - - private static void verifyPageStore(DataHandler h) { - if (h.getLobStorage() instanceof LobStorageMap) { - throw DbException.get(ErrorCode.FEATURE_NOT_SUPPORTED_1, - "Restore page store recovery SQL script " + - "can only be restored to a PageStore file"); - } - } - - /** - * INTERNAL - */ - public static ValueLobDb readClobDb(Connection conn, long lobId, - long precision) { - DataHandler h = ((JdbcConnection) conn).getSession().getDataHandler(); - verifyPageStore(h); - ValueLobDb lob = ValueLobDb.create(Value.CLOB, h, LobStorageFrontend.TABLE_TEMP, - lobId, null, precision); - lob.setRecoveryReference(true); - return lob; - } - - /** - * INTERNAL + * @param conn to use + * @param lobId id of the LOB stream + * @param precision not used + * @return InputStream to read LOB content from + * @throws SQLException on failure */ public static InputStream readBlobMap(Connection conn, long lobId, long precision) throws SQLException { @@ -292,6 +189,11 @@ public InputStream nextElement() { /** * INTERNAL + * @param conn to use + * @param lobId id of the LOB stream + * @param precision not used + * @return Reader to read LOB content from + * @throws SQLException on failure */ public static Reader readClobMap(Connection conn, long lobId, long precision) throws Exception { @@ -317,6 +219,7 @@ private void traceError(String message, Throwable t) { * * @param dir the directory * @param db the database name (null for all databases) + * @throws SQLException on failure */ public static void execute(String dir, String db) throws SQLException { try { @@ -332,13 +235,9 @@ private void process(String dir, String db) { printNoDatabaseFilesFound(dir, db); } for (String fileName : list) { - if (fileName.endsWith(Constants.SUFFIX_PAGE_FILE)) { - dumpPageStore(fileName); - } else if (fileName.endsWith(Constants.SUFFIX_LOB_FILE)) { - dumpLob(fileName, false); - } else if (fileName.endsWith(Constants.SUFFIX_MV_FILE)) { + if (fileName.endsWith(Constants.SUFFIX_MV_FILE)) { String f = fileName.substring(0, fileName.length() - - Constants.SUFFIX_PAGE_FILE.length()); + Constants.SUFFIX_MV_FILE.length()); try (PrintWriter writer = getWriter(fileName, ".txt")) { MVStoreTool.dump(fileName, writer, true); MVStoreTool.info(fileName, writer); @@ -362,86 +261,22 @@ private PrintWriter getWriter(String fileName, String suffix) { } } - private void writeDataError(PrintWriter writer, String error, byte[] data) { - writer.println("-- ERROR: " + error + " storageId: " - + storageId + " recordLength: " + recordLength + " valueId: " + valueId); - StringBuilder sb = new StringBuilder(); - for (byte aData1 : data) { - int x = aData1 & 0xff; - if (x >= ' ' && x < 128) { - sb.append((char) x); - } else { - sb.append('?'); - } - } - writer.println("-- dump: " + sb.toString()); - sb = new StringBuilder(); - for (byte aData : data) { - int x = aData & 0xff; - sb.append(' '); - if (x < 16) { - sb.append('0'); - } - sb.append(Integer.toHexString(x)); - } - writer.println("-- dump: " + sb.toString()); - } - - private void dumpLob(String fileName, boolean lobCompression) { - OutputStream fileOut = null; - FileStore fileStore = null; - long size = 0; - String n = fileName + (lobCompression ? ".comp" : "") + ".txt"; - InputStream in = null; - try { - fileOut = FileUtils.newOutputStream(n, false); - fileStore = FileStore.open(null, fileName, "r"); - fileStore.init(); - in = new FileStoreInputStream(fileStore, this, lobCompression, false); - size = IOUtils.copy(in, fileOut); - } catch (Throwable e) { - // this is usually not a problem, because we try both compressed and - // uncompressed - } finally { - IOUtils.closeSilently(fileOut); - IOUtils.closeSilently(in); - closeSilently(fileStore); - } - if (size == 0) { - try { - FileUtils.delete(n); - } catch (Exception e) { - traceError(n, e); - } - } - } - private void getSQL(StringBuilder builder, String column, Value v) { if (v instanceof ValueLob) { ValueLob lob = (ValueLob) v; - byte[] small = lob.getSmall(); - if (small == null) { - String file = lob.getFileName(); - String type = lob.getValueType() == Value.BLOB ? "BLOB" : "CLOB"; - if (lob.isCompressed()) { - dumpLob(file, true); - file += ".comp"; - } - builder.append("READ_").append(type).append("('").append(file).append(".txt')"); - return; - } - } else if (v instanceof ValueLobDb) { - ValueLobDb lob = (ValueLobDb) v; - byte[] small = lob.getSmall(); - if (small == null) { - int type = lob.getValueType(); - long id = lob.getLobId(); - long precision = lob.getType().getPrecision(); + LobData lobData = lob.getLobData(); + if (lobData instanceof LobDataDatabase) { + LobDataDatabase lobDataDatabase = (LobDataDatabase) lobData; + int type = v.getValueType(); + long id = lobDataDatabase.getLobId(); + long precision; String columnType; if (type == Value.BLOB) { + precision = lob.octetLength(); columnType = "BLOB"; builder.append("READ_BLOB"); } else { + precision = lob.charLength(); columnType = "CLOB"; builder.append("READ_CLOB"); } @@ -455,189 +290,52 @@ private void getSQL(StringBuilder builder, String column, Value v) { return; } } - v.getSQL(builder); + v.getSQL(builder, HasSQL.NO_CASTS); } private void setDatabaseName(String name) { databaseName = name; } - private void dumpPageStore(String fileName) { + private void dumpMVStoreFile(PrintWriter writer, String fileName) { + writer.println("-- MVStore"); + String className = getClass().getName(); + writer.println("CREATE ALIAS IF NOT EXISTS READ_BLOB_MAP FOR '" + className + ".readBlobMap';"); + writer.println("CREATE ALIAS IF NOT EXISTS READ_CLOB_MAP FOR '" + className + ".readClobMap';"); + resetSchema(); setDatabaseName(fileName.substring(0, fileName.length() - - Constants.SUFFIX_PAGE_FILE.length())); - PrintWriter writer = null; - stat = new Stats(); - try { - writer = getWriter(fileName, ".sql"); - writer.println("CREATE ALIAS IF NOT EXISTS READ_BLOB FOR \"" + - this.getClass().getName() + ".readBlob\";"); - writer.println("CREATE ALIAS IF NOT EXISTS READ_CLOB FOR \"" + - this.getClass().getName() + ".readClob\";"); - writer.println("CREATE ALIAS IF NOT EXISTS READ_BLOB_DB FOR \"" + - this.getClass().getName() + ".readBlobDb\";"); - writer.println("CREATE ALIAS IF NOT EXISTS READ_CLOB_DB FOR \"" + - this.getClass().getName() + ".readClobDb\";"); - resetSchema(); - store = FileStore.open(null, fileName, remove ? "rw" : "r"); - long length = store.length(); + Constants.SUFFIX_MV_FILE.length())); + try (MVStore mv = new MVStore.Builder(). + fileName(fileName).recoveryMode().readOnly().open()) { + dumpLobMaps(writer, mv); + writer.println("-- Layout"); + dumpLayout(writer, mv); + writer.println("-- Meta"); + dumpMeta(writer, mv); + writer.println("-- Types"); + dumpTypes(writer, mv); + writer.println("-- Tables"); + TransactionStore store = new TransactionStore(mv, new ValueDataType()); try { store.init(); - } catch (Exception e) { + } catch (Throwable e) { writeError(writer, e); } - Data s = Data.create(this, 128, false); - seek(0); - store.readFully(s.getBytes(), 0, 128); - s.setPos(48); - pageSize = s.readInt(); - int writeVersion = s.readByte(); - int readVersion = s.readByte(); - writer.println("-- pageSize: " + pageSize + - " writeVersion: " + writeVersion + - " readVersion: " + readVersion); - if (pageSize < PageStore.PAGE_SIZE_MIN || - pageSize > PageStore.PAGE_SIZE_MAX) { - pageSize = Constants.DEFAULT_PAGE_SIZE; - writer.println("-- ERROR: page size; using " + pageSize); - } - long pageCount = length / pageSize; - parents = new int[(int) pageCount]; - s = Data.create(this, pageSize, false); - for (long i = 3; i < pageCount; i++) { - s.reset(); - seek(i); - store.readFully(s.getBytes(), 0, 32); - s.readByte(); - s.readShortInt(); - parents[(int) i] = s.readInt(); - } - int logKey = 0, logFirstTrunkPage = 0, logFirstDataPage = 0; - s = Data.create(this, pageSize, false); - for (long i = 1;; i++) { - if (i == 3) { - break; - } - s.reset(); - seek(i); - store.readFully(s.getBytes(), 0, pageSize); - CRC32 crc = new CRC32(); - crc.update(s.getBytes(), 4, pageSize - 4); - int expected = (int) crc.getValue(); - int got = s.readInt(); - long writeCounter = s.readLong(); - int key = s.readInt(); - int firstTrunkPage = s.readInt(); - int firstDataPage = s.readInt(); - if (expected == got) { - logKey = key; - logFirstTrunkPage = firstTrunkPage; - logFirstDataPage = firstDataPage; - } - writer.println("-- head " + i + - ": writeCounter: " + writeCounter + - " log " + key + ":" + firstTrunkPage + "/" + firstDataPage + - " crc " + got + " (" + (expected == got ? - "ok" : ("expected: " + expected)) + ")"); - } - writer.println("-- log " + logKey + ":" + logFirstTrunkPage + - "/" + logFirstDataPage); - - PrintWriter devNull = new PrintWriter(new OutputStream() { - @Override - public void write(int b) { - // ignore - } - }); - dumpPageStore(devNull, pageCount); - stat = new Stats(); - schema.clear(); - objectIdSet = new HashSet<>(); - dumpPageStore(writer, pageCount); - writeSchemaSET(writer); - writeSchema(writer); - try { - dumpPageLogStream(writer, logKey, logFirstTrunkPage, - logFirstDataPage, pageCount); - } catch (IOException e) { - // ignore - } - writer.println("---- Statistics ----"); - writer.println("-- page count: " + pageCount + ", free: " + stat.free); - long total = Math.max(1, stat.pageDataRows + - stat.pageDataEmpty + stat.pageDataHead); - writer.println("-- page data bytes: head " + stat.pageDataHead + - ", empty " + stat.pageDataEmpty + - ", rows " + stat.pageDataRows + - " (" + (100 - 100L * stat.pageDataEmpty / total) + "% full)"); - for (int i = 0; i < stat.pageTypeCount.length; i++) { - int count = stat.pageTypeCount[i]; - if (count > 0) { - writer.println("-- " + getPageType(i) + " " + - (100 * count / pageCount) + "%, " + count + " page(s)"); - } - } - writer.close(); - } catch (Throwable e) { - writeError(writer, e); - } finally { - IOUtils.closeSilently(writer); - closeSilently(store); - } - } - private void dumpMVStoreFile(PrintWriter writer, String fileName) { - writer.println("-- MVStore"); - writer.println("CREATE ALIAS IF NOT EXISTS READ_BLOB FOR \"" + - this.getClass().getName() + ".readBlob\";"); - writer.println("CREATE ALIAS IF NOT EXISTS READ_CLOB FOR \"" + - this.getClass().getName() + ".readClob\";"); - writer.println("CREATE ALIAS IF NOT EXISTS READ_BLOB_DB FOR \"" + - this.getClass().getName() + ".readBlobDb\";"); - writer.println("CREATE ALIAS IF NOT EXISTS READ_CLOB_DB FOR \"" + - this.getClass().getName() + ".readClobDb\";"); - writer.println("CREATE ALIAS IF NOT EXISTS READ_BLOB_MAP FOR \"" + - this.getClass().getName() + ".readBlobMap\";"); - writer.println("CREATE ALIAS IF NOT EXISTS READ_CLOB_MAP FOR \"" + - this.getClass().getName() + ".readClobMap\";"); - resetSchema(); - setDatabaseName(fileName.substring(0, fileName.length() - - Constants.SUFFIX_MV_FILE.length())); - MVStore mv = new MVStore.Builder(). - fileName(fileName).readOnly().open(); - dumpLobMaps(writer, mv); - writer.println("-- Meta"); - dumpMeta(writer, mv); - writer.println("-- Tables"); - TransactionStore store = new TransactionStore(mv); - try { - store.init(); - } catch (Throwable e) { - writeError(writer, e); - } - try { // extract the metadata so we can dump the settings - ValueDataType type = new ValueDataType(); for (String mapName : mv.getMapNames()) { if (!mapName.startsWith("table.")) { continue; } String tableId = mapName.substring("table.".length()); if (Integer.parseInt(tableId) == 0) { - TransactionMap dataMap = store.begin().openMap(mapName, type, type); - Iterator dataIt = dataMap.keyIterator(null); + TransactionMap dataMap = store.begin().openMap(mapName); + Iterator dataIt = dataMap.keyIterator(null); while (dataIt.hasNext()) { - Value rowId = dataIt.next(); - Value[] values = ((ValueArray) dataMap.get(rowId)) - .getList(); + Long rowId = dataIt.next(); + Row row = dataMap.get(rowId); try { - SimpleRow r = new SimpleRow(values); - MetaRecord meta = new MetaRecord(r); - schema.add(meta); - if (meta.getObjectType() == DbObject.TABLE_OR_VIEW) { - String sql = values[3].getString(); - String name = extractTableOrViewName(sql); - tableMap.put(meta.getId(), name); - } + writeMetaRow(row); } catch (Throwable t) { writeError(writer, t); } @@ -656,13 +354,20 @@ private void dumpMVStoreFile(PrintWriter writer, String fileName) { if (Integer.parseInt(tableId) == 0) { continue; } - TransactionMap dataMap = store.begin().openMap(mapName, type, type); - Iterator dataIt = dataMap.keyIterator(null); + TransactionMap dataMap = store.begin().openMap(mapName); + Iterator dataIt = dataMap.keyIterator(null); boolean init = false; while (dataIt.hasNext()) { - Value rowId = dataIt.next(); - Value[] values = ((ValueArray) dataMap.get(rowId)).getList(); - recordLength = values.length; + Object rowId = dataIt.next(); + Object value = dataMap.get(rowId); + Value[] values; + if (value instanceof Row) { + values = ((Row) value).getValueList(); + recordLength = values.length; + } else { + values = ((ValueCollectionBase) value).getList(); + recordLength = values.length - 1; + } if (!init) { setStorage(Integer.parseInt(tableId)); // init the column types @@ -695,8 +400,13 @@ private void dumpMVStoreFile(PrintWriter writer, String fileName) { writer.println("DROP TABLE IF EXISTS INFORMATION_SCHEMA.LOB_BLOCKS;"); } catch (Throwable e) { writeError(writer, e); - } finally { - mv.close(); + } + } + + private static void dumpLayout(PrintWriter writer, MVStore mv) { + MVMap layout = mv.getLayoutMap(); + for (Entry e : layout.entrySet()) { + writer.println("-- " + e.getKey() + " = " + e.getValue()); } } @@ -707,24 +417,35 @@ private static void dumpMeta(PrintWriter writer, MVStore mv) { } } + private static void dumpTypes(PrintWriter writer, MVStore mv) { + MVMap.Builder> builder = new MVMap.Builder>() + .keyType(StringDataType.INSTANCE) + .valueType(new MetaType<>(null, null)); + MVMap> map = mv.openMap("_", builder); + for (Entry e : map.entrySet()) { + writer.println("-- " + e.getKey() + " = " + e.getValue()); + } + } + private void dumpLobMaps(PrintWriter writer, MVStore mv) { lobMaps = mv.hasMap("lobData"); if (!lobMaps) { return; } - MVMap lobData = mv.openMap("lobData"); + TransactionStore txStore = new TransactionStore(mv); + MVMap lobData = LobStorageMap.openLobDataMap(txStore); StreamStore streamStore = new StreamStore(lobData); - MVMap lobMap = mv.openMap("lobMap"); + MVMap lobMap = LobStorageMap.openLobMap(txStore); writer.println("-- LOB"); writer.println("CREATE TABLE IF NOT EXISTS " + "INFORMATION_SCHEMA.LOB_BLOCKS(" + - "LOB_ID BIGINT, SEQ INT, DATA BINARY, " + + "LOB_ID BIGINT, SEQ INT, DATA VARBINARY, " + "PRIMARY KEY(LOB_ID, SEQ));"); boolean hasErrors = false; - for (Entry e : lobMap.entrySet()) { + for (Entry e : lobMap.entrySet()) { long lobId = e.getKey(); - Object[] value = e.getValue(); - byte[] streamStoreId = (byte[]) value[0]; + LobStorageMap.BlobMeta value = e.getValue(); + byte[] streamStoreId = value.streamStoreId; InputStream in = streamStore.get(streamStoreId); int len = 8 * 1024; byte[] block = new byte[len]; @@ -733,7 +454,7 @@ private void dumpLobMaps(PrintWriter writer, MVStore mv) { int l = IOUtils.readFully(in, block, block.length); if (l > 0) { writer.print("INSERT INTO INFORMATION_SCHEMA.LOB_BLOCKS " + - "VALUES(" + lobId + ", " + seq + ", '"); + "VALUES(" + lobId + ", " + seq + ", X'"); writer.print(StringUtils.convertBytesToHex(block, l)); writer.println("');"); } @@ -752,8 +473,8 @@ private void dumpLobMaps(PrintWriter writer, MVStore mv) { if (hasErrors) { writer.println("-- lobMap"); for (Long k : lobMap.keyList()) { - Object[] value = lobMap.get(k); - byte[] streamStoreId = (byte[]) value[0]; + LobStorageMap.BlobMeta value = lobMap.get(k); + byte[] streamStoreId = value.streamStoreId; writer.println("-- " + k + " " + StreamStore.toString(streamStoreId)); } writer.println("-- lobData"); @@ -763,767 +484,21 @@ private void dumpLobMaps(PrintWriter writer, MVStore mv) { } } - private static String getPageType(int type) { - switch (type) { - case 0: - return "free"; - case Page.TYPE_DATA_LEAF: - return "data leaf"; - case Page.TYPE_DATA_NODE: - return "data node"; - case Page.TYPE_DATA_OVERFLOW: - return "data overflow"; - case Page.TYPE_BTREE_LEAF: - return "btree leaf"; - case Page.TYPE_BTREE_NODE: - return "btree node"; - case Page.TYPE_FREE_LIST: - return "free list"; - case Page.TYPE_STREAM_TRUNK: - return "stream trunk"; - case Page.TYPE_STREAM_DATA: - return "stream data"; - } - return "[" + type + "]"; - } - - private void dumpPageStore(PrintWriter writer, long pageCount) { - Data s = Data.create(this, pageSize, false); - for (long page = 3; page < pageCount; page++) { - s = Data.create(this, pageSize, false); - seek(page); - store.readFully(s.getBytes(), 0, pageSize); - dumpPage(writer, s, page, pageCount); - } - } - - private void dumpPage(PrintWriter writer, Data s, long page, long pageCount) { - try { - int type = s.readByte(); - switch (type) { - case Page.TYPE_EMPTY: - stat.pageTypeCount[type]++; - return; - } - boolean last = (type & Page.FLAG_LAST) != 0; - type &= ~Page.FLAG_LAST; - if (!PageStore.checksumTest(s.getBytes(), (int) page, pageSize)) { - writeDataError(writer, "checksum mismatch type: " + type, s.getBytes()); - } - s.readShortInt(); - switch (type) { - // type 1 - case Page.TYPE_DATA_LEAF: { - stat.pageTypeCount[type]++; - int parentPageId = s.readInt(); - setStorage(s.readVarInt()); - int columnCount = s.readVarInt(); - int entries = s.readShortInt(); - writer.println("-- page " + page + ": data leaf " + - (last ? "(last) " : "") + "parent: " + parentPageId + - " table: " + storageId + " entries: " + entries + - " columns: " + columnCount); - dumpPageDataLeaf(writer, s, last, page, columnCount, entries); - break; - } - // type 2 - case Page.TYPE_DATA_NODE: { - stat.pageTypeCount[type]++; - int parentPageId = s.readInt(); - setStorage(s.readVarInt()); - int rowCount = s.readInt(); - int entries = s.readShortInt(); - writer.println("-- page " + page + ": data node " + - (last ? "(last) " : "") + "parent: " + parentPageId + - " table: " + storageId + " entries: " + entries + - " rowCount: " + rowCount); - dumpPageDataNode(writer, s, page, entries); - break; - } - // type 3 - case Page.TYPE_DATA_OVERFLOW: - stat.pageTypeCount[type]++; - writer.println("-- page " + page + ": data overflow " + - (last ? "(last) " : "")); - break; - // type 4 - case Page.TYPE_BTREE_LEAF: { - stat.pageTypeCount[type]++; - int parentPageId = s.readInt(); - setStorage(s.readVarInt()); - int entries = s.readShortInt(); - writer.println("-- page " + page + ": b-tree leaf " + - (last ? "(last) " : "") + "parent: " + parentPageId + - " index: " + storageId + " entries: " + entries); - if (trace) { - dumpPageBtreeLeaf(writer, s, entries, !last); - } - break; - } - // type 5 - case Page.TYPE_BTREE_NODE: - stat.pageTypeCount[type]++; - int parentPageId = s.readInt(); - setStorage(s.readVarInt()); - writer.println("-- page " + page + ": b-tree node " + - (last ? "(last) " : "") + "parent: " + parentPageId + - " index: " + storageId); - dumpPageBtreeNode(writer, s, page, !last); - break; - // type 6 - case Page.TYPE_FREE_LIST: - stat.pageTypeCount[type]++; - writer.println("-- page " + page + ": free list " + (last ? "(last)" : "")); - stat.free += dumpPageFreeList(writer, s, page, pageCount); - break; - // type 7 - case Page.TYPE_STREAM_TRUNK: - stat.pageTypeCount[type]++; - writer.println("-- page " + page + ": log trunk"); - break; - // type 8 - case Page.TYPE_STREAM_DATA: - stat.pageTypeCount[type]++; - writer.println("-- page " + page + ": log data"); - break; - default: - writer.println("-- ERROR page " + page + " unknown type " + type); - break; - } - } catch (Exception e) { - writeError(writer, e); - } - } - - private void dumpPageLogStream(PrintWriter writer, int logKey, - int logFirstTrunkPage, int logFirstDataPage, long pageCount) - throws IOException { - Data s = Data.create(this, pageSize, false); - DataReader in = new DataReader( - new PageInputStream(writer, this, store, logKey, - logFirstTrunkPage, logFirstDataPage, pageSize) - ); - writer.println("---- Transaction log ----"); - CompressLZF compress = new CompressLZF(); - while (true) { - int x = in.readByte(); - if (x < 0) { - break; - } - if (x == PageLog.NOOP) { - // ignore - } else if (x == PageLog.UNDO) { - int pageId = in.readVarInt(); - int size = in.readVarInt(); - byte[] data = new byte[pageSize]; - if (size == 0) { - in.readFully(data, pageSize); - } else if (size == 1) { - // empty - } else { - byte[] compressBuffer = new byte[size]; - in.readFully(compressBuffer, size); - try { - compress.expand(compressBuffer, 0, size, data, 0, pageSize); - } catch (ArrayIndexOutOfBoundsException e) { - throw DbException.convertToIOException(e); - } - } - String typeName = ""; - int type = data[0]; - boolean last = (type & Page.FLAG_LAST) != 0; - type &= ~Page.FLAG_LAST; - switch (type) { - case Page.TYPE_EMPTY: - typeName = "empty"; - break; - case Page.TYPE_DATA_LEAF: - typeName = "data leaf " + (last ? "(last)" : ""); - break; - case Page.TYPE_DATA_NODE: - typeName = "data node " + (last ? "(last)" : ""); - break; - case Page.TYPE_DATA_OVERFLOW: - typeName = "data overflow " + (last ? "(last)" : ""); - break; - case Page.TYPE_BTREE_LEAF: - typeName = "b-tree leaf " + (last ? "(last)" : ""); - break; - case Page.TYPE_BTREE_NODE: - typeName = "b-tree node " + (last ? "(last)" : ""); - break; - case Page.TYPE_FREE_LIST: - typeName = "free list " + (last ? "(last)" : ""); - break; - case Page.TYPE_STREAM_TRUNK: - typeName = "log trunk"; - break; - case Page.TYPE_STREAM_DATA: - typeName = "log data"; - break; - default: - typeName = "ERROR: unknown type " + type; - break; - } - writer.println("-- undo page " + pageId + " " + typeName); - if (trace) { - Data d = Data.create(null, data, false); - dumpPage(writer, d, pageId, pageCount); - } - } else if (x == PageLog.ADD) { - int sessionId = in.readVarInt(); - setStorage(in.readVarInt()); - Row row = PageLog.readRow(RowFactory.DEFAULT, in, s); - writer.println("-- session " + sessionId + - " table " + storageId + - " + " + row.toString()); - if (transactionLog) { - if (storageId == 0 && row.getColumnCount() >= 4) { - int tableId = (int) row.getKey(); - String sql = row.getValue(3).getString(); - String name = extractTableOrViewName(sql); - if (row.getValue(2).getInt() == DbObject.TABLE_OR_VIEW) { - tableMap.put(tableId, name); - } - writer.println(sql + ";"); - } else { - String tableName = tableMap.get(storageId); - if (tableName != null) { - StringBuilder builder = new StringBuilder(); - builder.append("INSERT INTO ").append(tableName). - append(" VALUES("); - for (int i = 0; i < row.getColumnCount(); i++) { - if (i > 0) { - builder.append(", "); - } - row.getValue(i).getSQL(builder); - } - builder.append(");"); - writer.println(builder.toString()); - } - } - } - } else if (x == PageLog.REMOVE) { - int sessionId = in.readVarInt(); - setStorage(in.readVarInt()); - long key = in.readVarLong(); - writer.println("-- session " + sessionId + - " table " + storageId + - " - " + key); - if (transactionLog) { - if (storageId == 0) { - int tableId = (int) key; - String tableName = tableMap.get(tableId); - if (tableName != null) { - writer.println("DROP TABLE IF EXISTS " + tableName + ";"); - } - } else { - String tableName = tableMap.get(storageId); - if (tableName != null) { - String sql = "DELETE FROM " + tableName + - " WHERE _ROWID_ = " + key + ";"; - writer.println(sql); - } - } - } - } else if (x == PageLog.TRUNCATE) { - int sessionId = in.readVarInt(); - setStorage(in.readVarInt()); - writer.println("-- session " + sessionId + - " table " + storageId + - " truncate"); - if (transactionLog) { - writer.println("TRUNCATE TABLE " + storageId); - } - } else if (x == PageLog.COMMIT) { - int sessionId = in.readVarInt(); - writer.println("-- commit " + sessionId); - } else if (x == PageLog.ROLLBACK) { - int sessionId = in.readVarInt(); - writer.println("-- rollback " + sessionId); - } else if (x == PageLog.PREPARE_COMMIT) { - int sessionId = in.readVarInt(); - String transaction = in.readString(); - writer.println("-- prepare commit " + sessionId + " " + transaction); - } else if (x == PageLog.NOOP) { - // nothing to do - } else if (x == PageLog.CHECKPOINT) { - writer.println("-- checkpoint"); - } else if (x == PageLog.FREE_LOG) { - int size = in.readVarInt(); - StringBuilder buff = new StringBuilder("-- free"); - for (int i = 0; i < size; i++) { - buff.append(' ').append(in.readVarInt()); - } - writer.println(buff); - } else { - writer.println("-- ERROR: unknown operation " + x); - break; - } - } - } - private String setStorage(int storageId) { this.storageId = storageId; this.storageName = "O_" + Integer.toString(storageId).replace('-', 'M'); return storageName; } - /** - * An input stream that reads the data from a page store. - */ - static class PageInputStream extends InputStream { - - private final PrintWriter writer; - private final FileStore store; - private final Data page; - private final int pageSize; - private long trunkPage; - private long nextTrunkPage; - private long dataPage; - private final IntArray dataPages = new IntArray(); - private boolean endOfFile; - private int remaining; - private int logKey; - - public PageInputStream(PrintWriter writer, DataHandler handler, - FileStore store, int logKey, long firstTrunkPage, - long firstDataPage, int pageSize) { - this.writer = writer; - this.store = store; - this.pageSize = pageSize; - this.logKey = logKey - 1; - this.nextTrunkPage = firstTrunkPage; - this.dataPage = firstDataPage; - page = Data.create(handler, pageSize, false); - } - - @Override - public int read() { - byte[] b = { 0 }; - int len = read(b); - return len < 0 ? -1 : (b[0] & 255); - } - - @Override - public int read(byte[] b) { - return read(b, 0, b.length); - } - - @Override - public int read(byte[] b, int off, int len) { - if (len == 0) { - return 0; - } - int read = 0; - while (len > 0) { - int r = readBlock(b, off, len); - if (r < 0) { - break; - } - read += r; - off += r; - len -= r; - } - return read == 0 ? -1 : read; - } - - private int readBlock(byte[] buff, int off, int len) { - fillBuffer(); - if (endOfFile) { - return -1; - } - int l = Math.min(remaining, len); - page.read(buff, off, l); - remaining -= l; - return l; - } - - private void fillBuffer() { - if (remaining > 0 || endOfFile) { - return; - } - while (dataPages.size() == 0) { - if (nextTrunkPage == 0) { - endOfFile = true; - return; - } - trunkPage = nextTrunkPage; - store.seek(trunkPage * pageSize); - store.readFully(page.getBytes(), 0, pageSize); - page.reset(); - if (!PageStore.checksumTest(page.getBytes(), (int) trunkPage, pageSize)) { - writer.println("-- ERROR: checksum mismatch page: " +trunkPage); - endOfFile = true; - return; - } - int t = page.readByte(); - page.readShortInt(); - if (t != Page.TYPE_STREAM_TRUNK) { - writer.println("-- log eof " + trunkPage + " type: " + t + - " expected type: " + Page.TYPE_STREAM_TRUNK); - endOfFile = true; - return; - } - page.readInt(); - int key = page.readInt(); - logKey++; - if (key != logKey) { - writer.println("-- log eof " + trunkPage + - " type: " + t + " expected key: " + logKey + " got: " + key); - } - nextTrunkPage = page.readInt(); - writer.println("-- log " + key + ":" + trunkPage + - " next: " + nextTrunkPage); - int pageCount = page.readShortInt(); - for (int i = 0; i < pageCount; i++) { - int d = page.readInt(); - if (dataPage != 0) { - if (d == dataPage) { - dataPage = 0; - } else { - // ignore the pages before the starting page - continue; - } - } - dataPages.add(d); - } - } - if (dataPages.size() > 0) { - page.reset(); - long nextPage = dataPages.get(0); - dataPages.remove(0); - store.seek(nextPage * pageSize); - store.readFully(page.getBytes(), 0, pageSize); - page.reset(); - int t = page.readByte(); - if (t != 0 && !PageStore.checksumTest(page.getBytes(), - (int) nextPage, pageSize)) { - writer.println("-- ERROR: checksum mismatch page: " +nextPage); - endOfFile = true; - return; - } - page.readShortInt(); - int p = page.readInt(); - int k = page.readInt(); - writer.println("-- log " + k + ":" + trunkPage + "/" + nextPage); - if (t != Page.TYPE_STREAM_DATA) { - writer.println("-- log eof " +nextPage+ " type: " + t + " parent: " + p + - " expected type: " + Page.TYPE_STREAM_DATA); - endOfFile = true; - return; - } else if (k != logKey) { - writer.println("-- log eof " +nextPage+ " type: " + t + " parent: " + p + - " expected key: " + logKey + " got: " + k); - endOfFile = true; - return; - } - remaining = pageSize - page.length(); - } - } - } - - private void dumpPageBtreeNode(PrintWriter writer, Data s, long pageId, - boolean positionOnly) { - int rowCount = s.readInt(); - int entryCount = s.readShortInt(); - int[] children = new int[entryCount + 1]; - int[] offsets = new int[entryCount]; - children[entryCount] = s.readInt(); - checkParent(writer, pageId, children, entryCount); - int empty = Integer.MAX_VALUE; - for (int i = 0; i < entryCount; i++) { - children[i] = s.readInt(); - checkParent(writer, pageId, children, i); - int off = s.readShortInt(); - empty = Math.min(off, empty); - offsets[i] = off; - } - empty = empty - s.length(); - if (!trace) { + private void writeMetaRow(Row r) { + MetaRecord meta = new MetaRecord(r); + int objectType = meta.getObjectType(); + if (objectType == DbObject.INDEX && meta.getSQL().startsWith("CREATE PRIMARY KEY ")) { return; } - writer.println("-- empty: " + empty); - for (int i = 0; i < entryCount; i++) { - int off = offsets[i]; - s.setPos(off); - long key = s.readVarLong(); - Value data; - if (positionOnly) { - data = ValueLong.get(key); - } else { - try { - data = s.readValue(); - } catch (Throwable e) { - writeDataError(writer, "exception " + e, s.getBytes()); - continue; - } - } - writer.println("-- [" + i + "] child: " + children[i] + - " key: " + key + " data: " + data); - } - writer.println("-- [" + entryCount + "] child: " + - children[entryCount] + " rowCount: " + rowCount); - } - - private int dumpPageFreeList(PrintWriter writer, Data s, long pageId, - long pageCount) { - int pagesAddressed = PageFreeList.getPagesAddressed(pageSize); - int len = pagesAddressed >> 3; - byte[] b = new byte[len]; - s.read(b, 0, len); - BitSet used = BitSet.valueOf(b); - int free = 0; - for (long i = 0, j = pageId; i < pagesAddressed && j < pageCount; i++, j++) { - if (i == 0 || j % 100 == 0) { - if (i > 0) { - writer.println(); - } - writer.print("-- " + j + " "); - } else if (j % 20 == 0) { - writer.print(" - "); - } else if (j % 10 == 0) { - writer.print(' '); - } - writer.print(used.get((int) i) ? '1' : '0'); - if (!used.get((int) i)) { - free++; - } - } - writer.println(); - return free; - } - - private void dumpPageBtreeLeaf(PrintWriter writer, Data s, int entryCount, - boolean positionOnly) { - int[] offsets = new int[entryCount]; - int empty = Integer.MAX_VALUE; - for (int i = 0; i < entryCount; i++) { - int off = s.readShortInt(); - empty = Math.min(off, empty); - offsets[i] = off; - } - empty = empty - s.length(); - writer.println("-- empty: " + empty); - for (int i = 0; i < entryCount; i++) { - int off = offsets[i]; - s.setPos(off); - long key = s.readVarLong(); - Value data; - if (positionOnly) { - data = ValueLong.get(key); - } else { - try { - data = s.readValue(); - } catch (Throwable e) { - writeDataError(writer, "exception " + e, s.getBytes()); - continue; - } - } - writer.println("-- [" + i + "] key: " + key + " data: " + data); - } - } - - private void checkParent(PrintWriter writer, long pageId, int[] children, - int index) { - int child = children[index]; - if (child < 0 || child >= parents.length) { - writer.println("-- ERROR [" + pageId + "] child[" + - index + "]: " + child + " >= page count: " + parents.length); - } else if (parents[child] != pageId) { - writer.println("-- ERROR [" + pageId + "] child[" + - index + "]: " + child + " parent: " + parents[child]); - } - } - - private void dumpPageDataNode(PrintWriter writer, Data s, long pageId, - int entryCount) { - int[] children = new int[entryCount + 1]; - long[] keys = new long[entryCount]; - children[entryCount] = s.readInt(); - checkParent(writer, pageId, children, entryCount); - for (int i = 0; i < entryCount; i++) { - children[i] = s.readInt(); - checkParent(writer, pageId, children, i); - keys[i] = s.readVarLong(); - } - if (!trace) { - return; - } - for (int i = 0; i < entryCount; i++) { - writer.println("-- [" + i + "] child: " + children[i] + " key: " + keys[i]); - } - writer.println("-- [" + entryCount + "] child: " + children[entryCount]); - } - - private void dumpPageDataLeaf(PrintWriter writer, Data s, boolean last, - long pageId, int columnCount, int entryCount) { - long[] keys = new long[entryCount]; - int[] offsets = new int[entryCount]; - long next = 0; - if (!last) { - next = s.readInt(); - writer.println("-- next: " + next); - } - int empty = pageSize; - for (int i = 0; i < entryCount; i++) { - keys[i] = s.readVarLong(); - int off = s.readShortInt(); - empty = Math.min(off, empty); - offsets[i] = off; - } - stat.pageDataRows += pageSize - empty; - empty = empty - s.length(); - stat.pageDataHead += s.length(); - stat.pageDataEmpty += empty; - if (trace) { - writer.println("-- empty: " + empty); - } - if (!last) { - Data s2 = Data.create(this, pageSize, false); - s.setPos(pageSize); - long parent = pageId; - while (true) { - checkParent(writer, parent, new int[]{(int) next}, 0); - parent = next; - seek(next); - store.readFully(s2.getBytes(), 0, pageSize); - s2.reset(); - int type = s2.readByte(); - s2.readShortInt(); - s2.readInt(); - if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) { - int size = s2.readShortInt(); - writer.println("-- chain: " + next + - " type: " + type + " size: " + size); - s.checkCapacity(size); - s.write(s2.getBytes(), s2.length(), size); - break; - } else if (type == Page.TYPE_DATA_OVERFLOW) { - next = s2.readInt(); - if (next == 0) { - writeDataError(writer, "next:0", s2.getBytes()); - break; - } - int size = pageSize - s2.length(); - writer.println("-- chain: " + next + " type: " + type + - " size: " + size + " next: " + next); - s.checkCapacity(size); - s.write(s2.getBytes(), s2.length(), size); - } else { - writeDataError(writer, "type: " + type, s2.getBytes()); - break; - } - } - } - for (int i = 0; i < entryCount; i++) { - long key = keys[i]; - int off = offsets[i]; - if (trace) { - writer.println("-- [" + i + "] storage: " + storageId + - " key: " + key + " off: " + off); - } - s.setPos(off); - Value[] data = createRecord(writer, s, columnCount); - if (data != null) { - createTemporaryTable(writer); - writeRow(writer, s, data); - if (remove && storageId == 0) { - String sql = data[3].getString(); - if (sql.startsWith("CREATE USER ")) { - int saltIndex = Utils.indexOf(s.getBytes(), "SALT ".getBytes(), off); - if (saltIndex >= 0) { - String userName = sql.substring("CREATE USER ".length(), - sql.indexOf("SALT ") - 1); - if (userName.startsWith("IF NOT EXISTS ")) { - userName = userName.substring("IF NOT EXISTS ".length()); - } - if (userName.startsWith("\"")) { - // TODO doesn't work for all cases ("" inside - // user name) - userName = userName.substring(1, userName.length() - 1); - } - byte[] userPasswordHash = SHA256.getKeyPasswordHash( - userName, "".toCharArray()); - byte[] salt = MathUtils.secureRandomBytes(Constants.SALT_LEN); - byte[] passwordHash = SHA256.getHashWithSalt( - userPasswordHash, salt); - StringBuilder buff = new StringBuilder() - .append("SALT '"); - StringUtils.convertBytesToHex(buff, salt) - .append("' HASH '"); - StringUtils.convertBytesToHex(buff, passwordHash) - .append('\''); - byte[] replacement = buff.toString().getBytes(); - System.arraycopy(replacement, 0, s.getBytes(), - saltIndex, replacement.length); - seek(pageId); - store.write(s.getBytes(), 0, pageSize); - if (trace) { - out.println("User: " + userName); - } - remove = false; - } - } - } - } - } - } - - private void seek(long page) { - // page is long to avoid integer overflow - store.seek(page * pageSize); - } - - private Value[] createRecord(PrintWriter writer, Data s, int columnCount) { - recordLength = columnCount; - if (columnCount <= 0) { - writeDataError(writer, "columnCount<0", s.getBytes()); - return null; - } - Value[] data; - try { - data = new Value[columnCount]; - } catch (OutOfMemoryError e) { - writeDataError(writer, "out of memory", s.getBytes()); - return null; - } - return data; - } - - private void writeRow(PrintWriter writer, Data s, Value[] data) { - StringBuilder sb = new StringBuilder(); - sb.append("INSERT INTO ").append(storageName).append(" VALUES("); - for (valueId = 0; valueId < recordLength; valueId++) { - try { - Value v = s.readValue(); - data[valueId] = v; - if (valueId > 0) { - sb.append(", "); - } - String columnName = storageName + "." + valueId; - getSQL(sb, columnName, v); - } catch (Exception e) { - writeDataError(writer, "exception " + e, s.getBytes()); - } catch (OutOfMemoryError e) { - writeDataError(writer, "out of memory", s.getBytes()); - } - } - sb.append(");"); - writer.println(sb.toString()); - if (storageId == 0) { - try { - SimpleRow r = new SimpleRow(data); - MetaRecord meta = new MetaRecord(r); - schema.add(meta); - if (meta.getObjectType() == DbObject.TABLE_OR_VIEW) { - String sql = data[3].getString(); - String name = extractTableOrViewName(sql); - tableMap.put(meta.getId(), name); - } - } catch (Throwable t) { - writeError(writer, t); - } + schema.add(meta); + if (objectType == DbObject.TABLE_OR_VIEW) { + tableMap.put(meta.getId(), extractTableOrViewName(meta.getSQL())); } } @@ -1592,20 +567,26 @@ private void writeSchema(PrintWriter writer) { setStorage(objectId); writer.println("DROP TABLE " + storageName + ";"); } - writer.println("DROP ALIAS READ_BLOB;"); - writer.println("DROP ALIAS READ_CLOB;"); - writer.println("DROP ALIAS READ_BLOB_DB;"); - writer.println("DROP ALIAS READ_CLOB_DB;"); if (deleteLobs) { writer.println("DELETE FROM INFORMATION_SCHEMA.LOBS WHERE `TABLE` = " + LobStorageFrontend.TABLE_TEMP + ";"); } + ArrayList referentialConstraints = new ArrayList<>(); for (MetaRecord m : schema) { if (isSchemaObjectTypeDelayed(m)) { String sql = m.getSQL(); - writer.println(sql + ";"); + // TODO parse SQL properly + if (m.getObjectType() == DbObject.CONSTRAINT && sql.endsWith("NOCHECK") + && sql.contains(" FOREIGN KEY") && sql.contains("REFERENCES ")) { + referentialConstraints.add(sql); + } else { + writer.println(sql + ';'); + } } } + for (String sql : referentialConstraints) { + writer.println(sql + ';'); + } } private static boolean isLobTable(String name) { @@ -1679,12 +660,6 @@ private static String extractTableOrViewName(String sql) { } - private static void closeSilently(FileStore fileStore) { - if (fileStore != null) { - fileStore.closeSilently(); - } - } - private void writeError(PrintWriter writer, Throwable e) { if (writer != null) { writer.println("// error: " + e); @@ -1729,15 +704,7 @@ public void checkWritingAllowed() { */ @Override public int getMaxLengthInplaceLob() { - throw DbException.throwInternalError(); - } - - /** - * INTERNAL - */ - @Override - public String getLobCompressionAlgorithm(int type) { - return null; + throw DbException.getInternalError(); } /** @@ -1768,7 +735,7 @@ public TempFileDeleter getTempFileDeleter() { * INTERNAL */ @Override - public LobStorageBackend getLobStorage() { + public LobStorageInterface getLobStorage() { return null; } @@ -1776,14 +743,8 @@ public LobStorageBackend getLobStorage() { * INTERNAL */ @Override - public int readLob(long lobId, byte[] hmac, long offset, byte[] buff, - int off, int length) { - throw DbException.throwInternalError(); - } - - @Override - public JavaObjectSerializer getJavaObjectSerializer() { - return null; + public int readLob(long lobId, byte[] hmac, long offset, byte[] buff, int off, int length) { + throw DbException.getInternalError(); } @Override diff --git a/h2/src/main/org/h2/tools/Restore.java b/h2/src/main/org/h2/tools/Restore.java index afddea7610..426abca58b 100644 --- a/h2/src/main/org/h2/tools/Restore.java +++ b/h2/src/main/org/h2/tools/Restore.java @@ -1,10 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.tools; +import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -12,7 +13,6 @@ import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; import org.h2.engine.Constants; -import org.h2.engine.SysProperties; import org.h2.message.DbException; import org.h2.store.fs.FileUtils; import org.h2.util.IOUtils; @@ -20,13 +20,13 @@ /** * Restores a H2 database by extracting the database files from a .zip file. - * @h2.resource */ public class Restore extends Tool { /** - * Options are case sensitive. Supported options are: + * Options are case sensitive. Supported options * + * * * * @@ -38,9 +38,9 @@ public class Restore extends Tool { * * *
          Supported options
          [-help] or [-?]Print the list of options
          [-file <filename>]
          [-quiet]Do not print progress information
          - * @h2.resource * * @param args the command line arguments + * @throws SQLException on failure */ public static void main(String... args) throws SQLException { new Restore().runTool(args); @@ -117,10 +117,6 @@ private static String getOriginalDbName(String fileName, String db) * @return the database name or null */ private static String getDatabaseNameFromFileName(String fileName) { - if (fileName.endsWith(Constants.SUFFIX_PAGE_FILE)) { - return fileName.substring(0, - fileName.length() - Constants.SUFFIX_PAGE_FILE.length()); - } if (fileName.endsWith(Constants.SUFFIX_MV_FILE)) { return fileName.substring(0, fileName.length() - Constants.SUFFIX_MV_FILE.length()); @@ -149,7 +145,7 @@ public static void execute(String zipFileName, String directory, String db) { if (originalDbName == null) { throw new IOException("No database named " + db + " found"); } - if (originalDbName.startsWith(SysProperties.FILE_SEPARATOR)) { + if (originalDbName.startsWith(File.separator)) { originalDbName = originalDbName.substring(1); } originalDbLen = originalDbName.length(); @@ -163,9 +159,8 @@ public static void execute(String zipFileName, String directory, String db) { } String fileName = entry.getName(); // restoring windows backups on linux and vice versa - fileName = fileName.replace('\\', SysProperties.FILE_SEPARATOR.charAt(0)); - fileName = fileName.replace('/', SysProperties.FILE_SEPARATOR.charAt(0)); - if (fileName.startsWith(SysProperties.FILE_SEPARATOR)) { + fileName = IOUtils.nameSeparatorsToNative(fileName); + if (fileName.startsWith(File.separator)) { fileName = fileName.substring(1); } boolean copy = false; @@ -178,8 +173,7 @@ public static void execute(String zipFileName, String directory, String db) { if (copy) { OutputStream o = null; try { - o = FileUtils.newOutputStream( - directory + SysProperties.FILE_SEPARATOR + fileName, false); + o = FileUtils.newOutputStream(directory + File.separatorChar + fileName, false); IOUtils.copy(zipIn, o); o.close(); } finally { diff --git a/h2/src/main/org/h2/tools/RunScript.java b/h2/src/main/org/h2/tools/RunScript.java index cef387b1cc..fc0efbab3b 100644 --- a/h2/src/main/org/h2/tools/RunScript.java +++ b/h2/src/main/org/h2/tools/RunScript.java @@ -1,26 +1,22 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.tools; -import java.io.BufferedInputStream; +import java.io.BufferedReader; +import java.io.File; import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; import java.io.Reader; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.sql.Connection; -import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.concurrent.TimeUnit; -import org.h2.engine.Constants; -import org.h2.engine.SysProperties; import org.h2.message.DbException; import org.h2.store.fs.FileUtils; import org.h2.util.IOUtils; @@ -31,7 +27,6 @@ /** * Runs a SQL script against a database. - * @h2.resource */ public class RunScript extends Tool { @@ -39,8 +34,9 @@ public class RunScript extends Tool { private boolean checkResults; /** - * Options are case sensitive. Supported options are: + * Options are case sensitive. * + * * * * @@ -62,9 +58,9 @@ public class RunScript extends Tool { * * *
          Supported options
          [-help] or [-?]Print the list of options
          [-url "<url>"]
          [-options ...]RUNSCRIPT options (embedded H2; -*Results not supported)
          - * @h2.resource * * @param args the command line arguments + * @throws SQLException on failure */ public static void main(String... args) throws SQLException { new RunScript().runTool(args); @@ -154,6 +150,7 @@ public void runTool(String... args) throws SQLException { * @param conn the connection to a database * @param reader the reader * @return the last result set + * @throws SQLException on failure */ public static ResultSet execute(Connection conn, Reader reader) throws SQLException { @@ -184,14 +181,11 @@ public static ResultSet execute(Connection conn, Reader reader) private void process(Connection conn, String fileName, boolean continueOnError, Charset charset) throws SQLException, IOException { - InputStream in = FileUtils.newInputStream(fileName); - String path = FileUtils.getParent(fileName); + BufferedReader reader = FileUtils.newBufferedReader(fileName, charset); try { - in = new BufferedInputStream(in, Constants.IO_BUFFER_SIZE); - Reader reader = new InputStreamReader(in, charset); - process(conn, continueOnError, path, reader, charset); + process(conn, continueOnError, FileUtils.getParent(fileName), reader, charset); } finally { - IOUtils.closeSilently(in); + IOUtils.closeSilently(reader); } } @@ -212,7 +206,7 @@ private void process(Connection conn, boolean continueOnError, String path, startsWith("@INCLUDE")) { sql = StringUtils.trimSubstring(sql, "@INCLUDE".length()); if (!FileUtils.isAbsolute(sql)) { - sql = path + SysProperties.FILE_SEPARATOR + sql; + sql = path + File.separatorChar + sql; } process(conn, sql, continueOnError, charset); } else { @@ -271,19 +265,12 @@ private void process(Connection conn, boolean continueOnError, String path, } } - private static void processRunscript(String url, String user, String password, - String fileName, String options) throws SQLException { - Connection conn = null; - Statement stat = null; - try { - org.h2.Driver.load(); - conn = DriverManager.getConnection(url, user, password); - stat = conn.createStatement(); + private static void processRunscript(String url, String user, String password, String fileName, String options) + throws SQLException { + try (Connection conn = JdbcUtils.getConnection(null, url, user, password); + Statement stat = conn.createStatement()) { String sql = "RUNSCRIPT FROM '" + fileName + "' " + options; stat.execute(sql); - } finally { - JdbcUtils.closeSilently(stat); - JdbcUtils.closeSilently(conn); } } @@ -297,6 +284,7 @@ private static void processRunscript(String url, String user, String password, * @param charset the character set or null for UTF-8 * @param continueOnError if execution should be continued if an error * occurs + * @throws SQLException on failure */ public static void execute(String url, String user, String password, String fileName, Charset charset, boolean continueOnError) @@ -316,17 +304,13 @@ public static void execute(String url, String user, String password, * @param continueOnError if execution should be continued if an error * occurs */ - void process(String url, String user, String password, - String fileName, Charset charset, - boolean continueOnError) throws SQLException { - try { - org.h2.Driver.load(); - if (charset == null) { - charset = StandardCharsets.UTF_8; - } - try (Connection conn = DriverManager.getConnection(url, user, password)) { - process(conn, fileName, continueOnError, charset); - } + void process(String url, String user, String password, String fileName, Charset charset, boolean continueOnError) + throws SQLException { + if (charset == null) { + charset = StandardCharsets.UTF_8; + } + try (Connection conn = JdbcUtils.getConnection(null, url, user, password)) { + process(conn, fileName, continueOnError, charset); } catch (IOException e) { throw DbException.convertIOException(e, fileName); } diff --git a/h2/src/main/org/h2/tools/Script.java b/h2/src/main/org/h2/tools/Script.java index 15bd885c08..8a2f5dae76 100644 --- a/h2/src/main/org/h2/tools/Script.java +++ b/h2/src/main/org/h2/tools/Script.java @@ -1,12 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.tools; import java.sql.Connection; -import java.sql.DriverManager; import java.sql.SQLException; import java.sql.Statement; import org.h2.util.JdbcUtils; @@ -15,13 +14,13 @@ /** * Creates a SQL script file by extracting the schema and data of a database. - * @h2.resource */ public class Script extends Tool { /** - * Options are case sensitive. Supported options are: + * Options are case sensitive. * + * * * * @@ -37,9 +36,9 @@ public class Script extends Tool { * * *
          Supported options
          [-help] or [-?]Print the list of options
          [-url "<url>"]
          [-quiet]Do not print progress information
          - * @h2.resource * * @param args the command line arguments + * @throws SQLException on failure */ public static void main(String... args) throws SQLException { new Script().runTool(args); @@ -109,16 +108,12 @@ public void runTool(String... args) throws SQLException { * @param fileName the target file name * @param options1 the options before the file name (may be an empty string) * @param options2 the options after the file name (may be an empty string) + * @throws SQLException on failure */ - public static void process(String url, String user, String password, - String fileName, String options1, String options2) throws SQLException { - Connection conn = null; - try { - org.h2.Driver.load(); - conn = DriverManager.getConnection(url, user, password); + public static void process(String url, String user, String password, String fileName, String options1, + String options2) throws SQLException { + try (Connection conn = JdbcUtils.getConnection(null, url, user, password)) { process(conn, fileName, options1, options2); - } finally { - JdbcUtils.closeSilently(conn); } } @@ -130,6 +125,7 @@ public static void process(String url, String user, String password, * @param fileName the target file name * @param options1 the options before the file name * @param options2 the options after the file name + * @throws SQLException on failure */ public static void process(Connection conn, String fileName, String options1, String options2) throws SQLException { diff --git a/h2/src/main/org/h2/tools/Server.java b/h2/src/main/org/h2/tools/Server.java index 134bc6f114..c1956012ca 100644 --- a/h2/src/main/org/h2/tools/Server.java +++ b/h2/src/main/org/h2/tools/Server.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.tools; @@ -23,7 +23,6 @@ /** * Starts the H2 Console (web-) server, TCP, and PG server. - * @h2.resource */ public class Server extends Tool implements Runnable, ShutdownHandler { @@ -42,6 +41,7 @@ public Server() { * * @param service the service * @param args the command line arguments + * @throws SQLException on failure */ public Server(Service service, String... args) throws SQLException { verifyArgs(args); @@ -55,15 +55,19 @@ public Server(Service service, String... args) throws SQLException { /** * When running without options, -tcp, -web, -browser and -pg are started. - *
          - * Options are case sensitive. Supported options are: + * + * Options are case sensitive. * + * * * * * * * + * + * * * * @@ -112,11 +116,11 @@ public Server(Service service, String... args) throws SQLException { * *
          Supported options
          [-help] or [-?]Print the list of options
          [-web]Start the web server with the H2 Console
          [-webAllowOthers]Allow other computers to connect - see below
          [-webExternalNames <names>]The comma-separated list of external names and IP addresses of this server, + * used together with -webAllowOthers
          [-webDaemon]Use a daemon thread
          [-webPort <port>]Allows to map a database name to another (all servers)
          * The options -xAllowOthers are potentially risky. - *
          + * * For details, see Advanced Topics / Protection against Remote Access. - * @h2.resource * * @param args the command line arguments + * @throws SQLException on failure */ public static void main(String... args) throws SQLException { new Server().runTool(args); @@ -133,14 +137,16 @@ private void verifyArgs(String... args) throws SQLException { // ok } else if ("-webAllowOthers".equals(arg)) { // no parameters - } else if ("-webDaemon".equals(arg)) { + } else if ("-webExternalNames".equals(arg)) { + i++; + } else if ("-webDaemon".equals(arg)) { // no parameters } else if ("-webSSL".equals(arg)) { // no parameters } else if ("-webPort".equals(arg)) { i++; } else if ("-webAdminPassword".equals(arg)) { - i += 2; + i++; } else { throwUnsupportedOption(arg); } @@ -241,7 +247,7 @@ public void runTool(String... args) throws SQLException { } else if ("-webPort".equals(arg)) { i++; } else if ("-webAdminPassword".equals(arg)) { - i += 2; + i++; } else { showUsageAndThrowUnsupportedOption(arg); } @@ -375,6 +381,7 @@ public void runTool(String... args) throws SQLException { * @param force the shutdown (don't wait) * @param all whether all TCP servers that are running in the JVM should be * stopped + * @throws SQLException on failure */ public static void shutdownTcpServer(String url, String password, boolean force, boolean all) throws SQLException { @@ -424,6 +431,7 @@ public String getStatus() { * * @param args the argument list * @return the server + * @throws SQLException on failure */ public static Server createWebServer(String... args) throws SQLException { return createWebServer(args, null, false); @@ -468,6 +476,7 @@ static Server createWebServer(String[] args, String key, boolean allowSecureCrea * * @param args the argument list * @return the server + * @throws SQLException on failure */ public static Server createTcpServer(String... args) throws SQLException { TcpServer service = new TcpServer(); @@ -495,6 +504,7 @@ public static Server createTcpServer(String... args) throws SQLException { * * @param args the argument list * @return the server + * @throws SQLException on failure */ public static Server createPgServer(String... args) throws SQLException { return new Server(new PgServer(), args); @@ -615,6 +625,7 @@ public void run() { /** * INTERNAL + * @param shutdownHandler to set */ public void setShutdownHandler(ShutdownHandler shutdownHandler) { this.shutdownHandler = shutdownHandler; @@ -645,6 +656,7 @@ public Service getService() { * Open a new browser tab or window with the given URL. * * @param url the URL to open + * @throws Exception on failure */ public static void openBrowser(String url) throws Exception { try { @@ -736,6 +748,7 @@ public static void openBrowser(String url) throws Exception { * user has disconnected. * * @param conn the database connection (the database must be open) + * @throws SQLException on failure */ public static void startWebServer(Connection conn) throws SQLException { startWebServer(conn, false); @@ -750,6 +763,7 @@ public static void startWebServer(Connection conn) throws SQLException { * @param conn the database connection (the database must be open) * @param ignoreProperties if {@code true} properties from * {@code .h2.server.properties} will be ignored + * @throws SQLException on failure */ public static void startWebServer(Connection conn, boolean ignoreProperties) throws SQLException { WebServer webServer = new WebServer(); diff --git a/h2/src/main/org/h2/tools/Shell.java b/h2/src/main/org/h2/tools/Shell.java index fdde29fa13..a85b84e91c 100644 --- a/h2/src/main/org/h2/tools/Shell.java +++ b/h2/src/main/org/h2/tools/Shell.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.tools; @@ -12,7 +12,6 @@ import java.io.PrintStream; import java.io.StringReader; import java.sql.Connection; -import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; @@ -33,7 +32,6 @@ /** * Interactive command line tool to access a database using JDBC. - * @h2.resource */ public class Shell extends Tool implements Runnable { @@ -54,8 +52,9 @@ public class Shell extends Tool implements Runnable { private String serverPropertiesDir = Constants.SERVER_PROPERTIES_DIR; /** - * Options are case sensitive. Supported options are: + * Options are case sensitive. * + * * * * @@ -73,9 +72,9 @@ public class Shell extends Tool implements Runnable { *
          Supported options
          [-help] or [-?]Print the list of options
          [-url "<url>"]
          * If special characters don't work as expected, you may need to use * -Dfile.encoding=UTF-8 (Mac OS X) or CP850 (Windows). - * @h2.resource * * @param args the command line arguments + * @throws SQLException on failure */ public static void main(String... args) throws SQLException { new Shell().runTool(args); @@ -115,6 +114,7 @@ public void setInReader(BufferedReader reader) { */ @Override public void runTool(String... args) throws SQLException { + String driver = null; String url = null; String user = ""; String password = ""; @@ -128,8 +128,7 @@ public void runTool(String... args) throws SQLException { } else if (arg.equals("-password")) { password = args[++i]; } else if (arg.equals("-driver")) { - String driver = args[++i]; - JdbcUtils.loadUserClass(driver); + driver = args[++i]; } else if (arg.equals("-sql")) { sql = args[++i]; } else if (arg.equals("-properties")) { @@ -144,8 +143,7 @@ public void runTool(String... args) throws SQLException { } } if (url != null) { - org.h2.Driver.load(); - conn = DriverManager.getConnection(url, user, password); + conn = JdbcUtils.getConnection(driver, url, user, password); stat = conn.createStatement(); } if (sql == null) { @@ -176,6 +174,7 @@ public void runTool(String... args) throws SQLException { * * @param conn the connection * @param args the command line settings + * @throws SQLException on failure */ public void runTool(Connection conn, String... args) throws SQLException { this.conn = conn; @@ -196,7 +195,7 @@ private void showHelp() { private void promptLoop() { println(""); - println("Welcome to H2 Shell " + Constants.getFullVersion()); + println("Welcome to H2 Shell " + Constants.FULL_VERSION); println("Exit with Ctrl+C"); if (conn != null) { showHelp(); @@ -364,29 +363,31 @@ private void connect() throws IOException, SQLException { println("[Enter] " + user); print("User "); user = readLine(user); + conn = url.startsWith(Constants.START_URL) ? connectH2(driver, url, user) + : JdbcUtils.getConnection(driver, url, user, readPassword()); + stat = conn.createStatement(); + println("Connected"); + } + + private Connection connectH2(String driver, String url, String user) throws IOException, SQLException { for (;;) { String password = readPassword(); try { - conn = JdbcUtils.getConnection(driver, url + ";IFEXISTS=TRUE", user, password); - break; + return JdbcUtils.getConnection(driver, url + ";IFEXISTS=TRUE", user, password); } catch (SQLException ex) { - if (ex.getErrorCode() == ErrorCode.DATABASE_NOT_FOUND_2) { + if (ex.getErrorCode() == ErrorCode.DATABASE_NOT_FOUND_WITH_IF_EXISTS_1) { println("Type the same password again to confirm database creation."); String password2 = readPassword(); if (password.equals(password2)) { - conn = JdbcUtils.getConnection(driver, url, user, password); - break; + return JdbcUtils.getConnection(driver, url, user, password); } else { println("Passwords don't match. Try again."); - continue; } } else { throw ex; } } } - stat = conn.createStatement(); - println("Connected"); } /** @@ -467,14 +468,22 @@ private void execute(String sql) { try { ResultSet rs = null; try { - if (stat.execute(sql)) { + if (sql.startsWith("@")) { + rs = JdbcUtils.getMetaResultSet(conn, sql); + printResult(rs, listMode); + } else if (stat.execute(sql)) { rs = stat.getResultSet(); int rowCount = printResult(rs, listMode); time = System.nanoTime() - time; println("(" + rowCount + (rowCount == 1 ? " row, " : " rows, ") + TimeUnit.NANOSECONDS.toMillis(time) + " ms)"); } else { - int updateCount = stat.getUpdateCount(); + long updateCount; + try { + updateCount = stat.getLargeUpdateCount(); + } catch (UnsupportedOperationException e) { + updateCount = stat.getUpdateCount(); + } time = System.nanoTime() - time; println("(Update count: " + updateCount + ", " + TimeUnit.NANOSECONDS.toMillis(time) + " ms)"); @@ -554,7 +563,7 @@ private int[] printRows(ArrayList rows, int len) { max = Math.max(max, row[i].length()); } if (len > 1) { - Math.min(maxColumnSize, max); + max = Math.min(maxColumnSize, max); } columnSizes[i] = max; } diff --git a/h2/src/main/org/h2/tools/SimpleResultSet.java b/h2/src/main/org/h2/tools/SimpleResultSet.java index 7453b8bcea..35d2964e10 100644 --- a/h2/src/main/org/h2/tools/SimpleResultSet.java +++ b/h2/src/main/org/h2/tools/SimpleResultSet.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.tools; @@ -31,7 +31,6 @@ import java.util.Map; import java.util.UUID; import org.h2.api.ErrorCode; -import org.h2.jdbc.JdbcResultSetBackwardsCompat; import org.h2.message.DbException; import org.h2.util.Bits; import org.h2.util.JdbcUtils; @@ -39,6 +38,8 @@ import org.h2.util.SimpleColumnInfo; import org.h2.util.Utils; import org.h2.value.DataType; +import org.h2.value.Value; +import org.h2.value.ValueToObjectConverter; /** * This class is a simple result set and meta data implementation. @@ -58,8 +59,7 @@ *
          * */ -public class SimpleResultSet implements ResultSet, ResultSetMetaData, - JdbcResultSetBackwardsCompat { +public class SimpleResultSet implements ResultSet, ResultSetMetaData { private ArrayList rows; private Object[] currentRow; @@ -99,8 +99,7 @@ public SimpleResultSet(SimpleRowSource source) { */ public void addColumn(String name, int sqlType, int precision, int scale) { int valueType = DataType.convertSQLTypeToValueType(sqlType); - addColumn(name, sqlType, DataType.getDataType(valueType).name, - precision, scale); + addColumn(name, sqlType, Value.getTypeName(valueType), precision, scale); } /** @@ -840,48 +839,48 @@ public Object getObject(String columnLabel) throws SQLException { * @param type the class of the returned value * @return the value */ + @SuppressWarnings("unchecked") @Override public T getObject(int columnIndex, Class type) throws SQLException { - if (wasNull()) { + if (get(columnIndex) == null) { return null; } - if (type == BigDecimal.class) { - return type.cast(getBigDecimal(columnIndex)); + return (T) getBigDecimal(columnIndex); } else if (type == BigInteger.class) { - return type.cast(getBigDecimal(columnIndex).toBigInteger()); + return (T) getBigDecimal(columnIndex).toBigInteger(); } else if (type == String.class) { - return type.cast(getString(columnIndex)); + return (T) getString(columnIndex); } else if (type == Boolean.class) { - return type.cast(getBoolean(columnIndex)); + return (T) (Boolean) getBoolean(columnIndex); } else if (type == Byte.class) { - return type.cast(getByte(columnIndex)); + return (T) (Byte) getByte(columnIndex); } else if (type == Short.class) { - return type.cast(getShort(columnIndex)); + return (T) (Short) getShort(columnIndex); } else if (type == Integer.class) { - return type.cast(getInt(columnIndex)); + return (T) (Integer) getInt(columnIndex); } else if (type == Long.class) { - return type.cast(getLong(columnIndex)); + return (T) (Long) getLong(columnIndex); } else if (type == Float.class) { - return type.cast(getFloat(columnIndex)); + return (T) (Float) getFloat(columnIndex); } else if (type == Double.class) { - return type.cast(getDouble(columnIndex)); + return (T) (Double) getDouble(columnIndex); } else if (type == Date.class) { - return type.cast(getDate(columnIndex)); + return (T) getDate(columnIndex); } else if (type == Time.class) { - return type.cast(getTime(columnIndex)); + return (T) getTime(columnIndex); } else if (type == Timestamp.class) { - return type.cast(getTimestamp(columnIndex)); + return (T) getTimestamp(columnIndex); } else if (type == UUID.class) { - return type.cast(getObject(columnIndex)); + return (T) getObject(columnIndex); } else if (type == byte[].class) { - return type.cast(getBytes(columnIndex)); + return (T) getBytes(columnIndex); } else if (type == java.sql.Array.class) { - return type.cast(getArray(columnIndex)); + return (T) getArray(columnIndex); } else if (type == Blob.class) { - return type.cast(getBlob(columnIndex)); + return (T) getBlob(columnIndex); } else if (type == Clob.class) { - return type.cast(getClob(columnIndex)); + return (T) getClob(columnIndex); } else { throw getUnsupportedException(); } @@ -2003,7 +2002,7 @@ public String getCatalogName(int columnIndex) { @Override public String getColumnClassName(int columnIndex) throws SQLException { int type = DataType.getValueTypeFromResultSet(this, columnIndex); - return DataType.getTypeClassName(type, true); + return ValueToObjectConverter.getDefaultClass(type, true).getName(); } /** @@ -2204,7 +2203,7 @@ public boolean rowInserted() throws SQLException { */ @Override public boolean rowUpdated() throws SQLException { - throw getUnsupportedException(); + return true; } /** @@ -2317,19 +2316,33 @@ public boolean isClosed() { } /** - * INTERNAL + * Return an object of this class if possible. + * + * @param iface the class + * @return this */ @Override + @SuppressWarnings("unchecked") public T unwrap(Class iface) throws SQLException { - throw getUnsupportedException(); + try { + if (isWrapperFor(iface)) { + return (T) this; + } + throw DbException.getInvalidValueException("iface", iface); + } catch (Exception e) { + throw DbException.toSQLException(e); + } } /** - * INTERNAL + * Checks if unwrap can return an object of this class. + * + * @param iface the class + * @return whether or not the interface is assignable from this class */ @Override public boolean isWrapperFor(Class iface) throws SQLException { - throw getUnsupportedException(); + return iface != null && iface.isAssignableFrom(getClass()); } /** diff --git a/h2/src/main/org/h2/tools/SimpleRowSource.java b/h2/src/main/org/h2/tools/SimpleRowSource.java index d6c46031b3..c1a38f4463 100644 --- a/h2/src/main/org/h2/tools/SimpleRowSource.java +++ b/h2/src/main/org/h2/tools/SimpleRowSource.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.tools; @@ -17,6 +17,7 @@ public interface SimpleRowSource { * Get the next row. Must return null if no more rows are available. * * @return the row or null + * @throws SQLException on failure */ Object[] readRow() throws SQLException; diff --git a/h2/src/main/org/h2/tools/TriggerAdapter.java b/h2/src/main/org/h2/tools/TriggerAdapter.java index 0ced3f55e4..06b25ac9aa 100644 --- a/h2/src/main/org/h2/tools/TriggerAdapter.java +++ b/h2/src/main/org/h2/tools/TriggerAdapter.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.tools; @@ -9,6 +9,7 @@ import java.sql.ResultSet; import java.sql.SQLException; import org.h2.api.Trigger; +import org.h2.message.DbException; /** * An adapter for the trigger interface that allows to use the ResultSet @@ -43,9 +44,6 @@ public abstract class TriggerAdapter implements Trigger { */ protected int type; - private SimpleResultSet oldResultSet, newResultSet; - private TriggerRowSource oldSource, newSource; - /** * This method is called by the database engine once when initializing the * trigger. It is called when the trigger is created, as well as when the @@ -66,20 +64,6 @@ public abstract class TriggerAdapter implements Trigger { public void init(Connection conn, String schemaName, String triggerName, String tableName, boolean before, int type) throws SQLException { - ResultSet rs = conn.getMetaData().getColumns( - null, schemaName, tableName, null); - oldSource = new TriggerRowSource(); - newSource = new TriggerRowSource(); - oldResultSet = new SimpleResultSet(oldSource); - newResultSet = new SimpleResultSet(newSource); - while (rs.next()) { - String column = rs.getString("COLUMN_NAME"); - int dataType = rs.getInt("DATA_TYPE"); - int precision = rs.getInt("COLUMN_SIZE"); - int scale = rs.getInt("DECIMAL_DIGITS"); - oldResultSet.addColumn(column, dataType, precision, scale); - newResultSet.addColumn(column, dataType, precision, scale); - } this.schemaName = schemaName; this.triggerName = triggerName; this.tableName = tableName; @@ -87,69 +71,14 @@ public void init(Connection conn, String schemaName, this.type = type; } - /** - * A row source that allows to set the next row. - */ - static class TriggerRowSource implements SimpleRowSource { - - private Object[] row; - - void setRow(Object[] row) { - this.row = row; - } - - @Override - public Object[] readRow() { - return row; - } - - @Override - public void close() { - // ignore - } - - @Override - public void reset() { - // ignore - } - - } - - /** - * This method is called for each triggered action. The method is called - * immediately when the operation occurred (before it is committed). A - * transaction rollback will also rollback the operations that were done - * within the trigger, if the operations occurred within the same database. - * If the trigger changes state outside the database, a rollback trigger - * should be used. - *

          - * The row arrays contain all columns of the table, in the same order - * as defined in the table. - *

          - *

          - * The default implementation calls the fire method with the ResultSet - * parameters. - *

          - * - * @param conn a connection to the database - * @param oldRow the old row, or null if no old row is available (for - * INSERT) - * @param newRow the new row, or null if no new row is available (for - * DELETE) - * @throws SQLException if the operation must be undone - */ @Override - public void fire(Connection conn, Object[] oldRow, Object[] newRow) - throws SQLException { - fire(conn, wrap(oldResultSet, oldSource, oldRow), - wrap(newResultSet, newSource, newRow)); + public final void fire(Connection conn, Object[] oldRow, Object[] newRow) throws SQLException { + throw DbException.getInternalError(); } /** * This method is called for each triggered action by the default * fire(Connection conn, Object[] oldRow, Object[] newRow) method. - * ResultSet.next does not need to be called (and calling it has no effect; - * it will always return true). *

          * For "before" triggers, the new values of the new row may be changed * using the ResultSet.updateX methods. @@ -165,34 +94,4 @@ public void fire(Connection conn, Object[] oldRow, Object[] newRow) public abstract void fire(Connection conn, ResultSet oldRow, ResultSet newRow) throws SQLException; - private static SimpleResultSet wrap(SimpleResultSet rs, - TriggerRowSource source, Object[] row) throws SQLException { - if (row == null) { - return null; - } - source.setRow(row); - rs.next(); - return rs; - } - - /** - * This method is called when the database is closed. - * If the method throws an exception, it will be logged, but - * closing the database will continue. - * The default implementation does nothing. - */ - @Override - public void remove() throws SQLException { - // do nothing by default - } - - /** - * This method is called when the trigger is dropped. - * The default implementation does nothing. - */ - @Override - public void close() throws SQLException { - // do nothing by default - } - } diff --git a/h2/src/main/org/h2/tools/Upgrade.java b/h2/src/main/org/h2/tools/Upgrade.java new file mode 100644 index 0000000000..ca77508028 --- /dev/null +++ b/h2/src/main/org/h2/tools/Upgrade.java @@ -0,0 +1,384 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.tools; + +import java.io.BufferedInputStream; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.sql.Connection; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Properties; +import java.util.UUID; +import java.util.zip.ZipEntry; +import java.util.zip.ZipInputStream; + +import org.h2.engine.ConnectionInfo; +import org.h2.engine.Constants; +import org.h2.jdbc.JdbcConnection; +import org.h2.util.IOUtils; +import org.h2.util.StringUtils; + +/** + * Upgrade utility. + */ +public final class Upgrade { + + private static final String[] CHECKSUMS = { + /* 1.2.120 */ "6fca37906aa3916ba609f47258c4abb4c749cd51aa28718a2339d9aa234a480c", + /* 1.2.121 */ "3233d38ee11e15243f66c98ad388da9f12cf038a203cf507415081e3329ac4f4", + /* 1.2.122 */ "7451e9f234f32fd9f07e4e5e682c0595806a803de656228a43887a525019ea74", + /* 1.2.123 */ "5a4dfaf211d32860623fdc5627f12a9cf8446b9cfabc742e7c0bad26835a8bb1", + /* 1.2.124 */ "f75efcaf9ccb91d94de920322c32328435e9705c19cc06b510c5f09c0a6245bf", + /* 1.2.125 */ "0ca368055dd72d539084c916642147780c944b90d98d2306da86814b174d1145", + /* 1.2.126 */ "4d9143f5b80f8878ca56edc383ae6d0a183a3b5879e83228dbacbe288007455c", + /* 1.2.127 */ "3df7aedd564cf61a464f4e95ec364eb7bb2b51d36863ed54edeb6ff2fed7b376", + /* 1.2.128 */ "7e8af7b5eca6334013fc024dab02e173a017b2d1c22c8481ed64a6af873d0819", + /* 1.2.129 */ "9a705009830ae80a368b1b66c8ba63071845fe25d8f6b0964aa14a3f31b46bdd", + /* 1.2.130 */ "8810d72867508b033a68830024e7fe7dd5a99e6f5bbb38c5a933aeb23badff00", + /* 1.2.131 */ "c8debc05829db1db2e6b6507a3f0561e1f72bd966d36f322bdf294baca29ed22", + /* 1.2.132 */ "75819d4adbf76d66af904e76b52b57afe26e9bc0e15aceed4e3c72cd7586b0d3", + /* 1.2.133 */ "c9ea3e95e77ae560322bca37d51601ae4b1d07ae90988af1e9fe1ceda80cd9ce", + /* 1.2.134 */ "1f4753d8d862d7d22d234625f617d3d7e91b73799c89b8a6036895f944a863eb", + /* 1.2.135 */ "eed53fcd3cf6e1159c90e57ef2b4cbd1fa3aff7a936988bb018af6fc17a2b6d9", + /* 1.2.136 */ "d3101d540ed004493952732d28bdf90a7968990bab7a2e04d16805469aa4eedd", + /* 1.2.137 */ "035dd78af874ada48339b90e8e4f1ffba0f32bb0fa37dec37ed523afa96a9c32", + /* 1.2.138 */ "1d03156b22b40812e39cca4d250eededfed4db8476bfbae78d60a24975cbe6d8", + /* 1.2.139 */ "8102cc96257d71caeff04f02c97020ae39268a32c1f0aa8fcdfda4e948ce48c8", + /* 1.2.140 */ "134ceafcae6ca661d8acd64c8e67d30f6ead609065dba9f6d3a0cde0d7bef6e3", + /* 1.2.141 */ "e453faccaaf7d8fe4eb8be744549c4a2395c7b3dcfcbc19173588c3756baff1e", + /* 1.2.142 */ "5973b4b467f1e0a69cf8c7b02d03d9dcadb4171d8a9635c85442a5829200e76f", + /* 1.2.143 */ "711cc225d8fe5325458c3947dda2093ef3a1cd4923e916082b27e87e41ca6735", + /* 1.2.144 */ "682f6997495a8389f4881b93cb8224685b9c6cbed487bcb445712402e52a4b80", + /* 1.2.145 */ "1407913cc6ba2f8c2928e8ad544c232273365d6eb66fdf84ec4213abf71449d5", + /* 1.3.146 */ "7756a89f10d5d5df23936bbb613de8b481e32d1099e5228968046fee62fee882", + /* 1.2.147 */ "2649d19db9eebbddc826029d236886dfece9404cd108ca590e82d3fd7d888278", + /* 1.3.148 */ "66f9389748f176c11c66c201a3737ebad0b1f4ace37cc2cd3da8962c92c72128", + /* 1.3.149 */ "7c3e3b93ffaf617393126870be7f8e1708bbe8e05b931c51c638a8cb03f79a36", + /* 1.3.150 */ "1d6dc1095d3d4b105a99034ab61ab5943c4dbb31551e7b244b403cb3c324964f", + /* 1.3.151 */ "8eabfde7cf64cedb7c25dc25ee7fe75a633c5cbeb18a1060da2045293fd53b14", + /* 1.3.152 */ "a9840c6024f8570ad3aa4d54388b4dd605640cb5ab163c444a123f7d4739aa09", + /* 1.3.153 */ "33d80491417eb117a0d64442dc3e60b78cf014ad099bb36a55d3835bb69e6248", + /* 1.3.154 */ "f153d03466acc00b66e699213fe092277e457502b5caf48c417ed3745f50eaac", + /* 1.3.155 */ "244b29d22939b43ecdcd3b0bfd279899df18e3af20a50241278b5b27bcf1a902", + /* 1.3.156 */ "070f9e4898044880e01232b269fea5285dbf7b814b7092701e755aa7d6941832", + /* 1.3.157 */ "4666d8f01c661054b973bc0f01f8b20f298d8e134e6fd26d78c74d43eeffd54e", + /* 1.3.158 */ "b0d95f18474beea619fcfba83f033e5702483457e0f0a1d1ffb4b757c5182582", + /* 1.3.159 */ "17aa5ced25f13f9adc2820e0ccc3010e3ce55944d10c9e2c0c631b77674d039b", + /* 1.3.160 */ "7fe66e211202733c52f02a328b55b30975287d9c509751bf87507e6227c6a2a7", + /* 1.3.161 */ "42e2ebbb7bdf29dd2de4ab16fc8fb511af6337d223afd66a5ee5fe183de05d57", + /* 1.3.162 */ "89e362f9525adf36d58487ff756ee93254bf92595a7098258a4c030e08e0742e", + /* 1.3.163 */ "1d1be843af365e8881e22732c8640e2b04c2821a0d7aa61d4152ac3f991bb735", + /* 1.3.164 */ "dbc88bb8cd8177b5f13b655d6afb525637129369422f0b7be0fe187950ea5132", + /* 1.3.165 */ "03f60ca37c0124fd2b9b177726396a51853ed0cade444e1674a090b73d341b08", + /* 1.3.166 */ "35103656071f1ffd1078b1a8c8028c9577297f31c5f8c7dcc845c7b4b6392619", + /* 1.3.167 */ "fa97521a2e72174485a96276bcf6f573d5e44ca6aba2f62de87b33b5bb0d4b91", + /* 1.3.168 */ "46d7ff55ccd910def16f9afd21d983f2eb2f9a6850fb501916f6673caebc2694", + /* 1.3.169 */ "0d99d51b8d7b8e94732d048438b9f555e031ecd52225613d7bea45290571886d", + /* 1.3.170 */ "0aca5eea86e8619e91ad61b82b77fb9d0e51e939c5603ab8da41be32c6f25664", + /* 1.3.171 */ "144d4ddb5d9f610b8b26809f1c65f442864cc55136325d3f02d7a93fb878a1db", + /* 1.3.172 */ "6ca30e38ccaa0c6f4264ef013327ef9ba5303f4be3d8fdbce0c3ae6451178c1e", + /* 1.3.173 */ "43908ee9db698cb335e2b85375d68a9d03d818869a0542b85d8d4e416619795b", + /* 1.3.174 */ "990b94cdfc89987281af4168fc2f6c9067be96a8533f5a6eb0f33da4d30d3e4b", + /* 1.3.175 */ "cc329a8742fb6e7168b00ebd0015816ff0d2462409add7c9d223826486de4691", + /* 1.3.176 */ "6ae3cc11a8bbaa5bd1d8494e62bccea4d354eaf042da468eac3bc5009fd33b67", + /* 1.4.177 */ "f281673f3248a4b5cb03fdc0cc39b944fe978366be959d0e8106fcc3197f4705", + /* 1.4.178 */ "da08fef0b2bc0ff8876f895e17605daf514405a064e3c2c11d2275a19d301be6", + /* 1.4.179 */ "2b76304ce4256ee9fd61156f9b6ef82c049ffdc8dc89af07fcf59e9532c7e7cd", + /* 1.4.180 */ "16428fd1e6a3e5baa8067c1c2e777e1e99af68c6ef3ff7fbbf1938937a048a82", + /* 1.4.181 */ "44673ff2834428fdb7f11dac3b9d679fb3039ea32194a69452971fdd7150a08b", + /* 1.4.182 */ "1025d0d70a4e899c41bc8fd7370cd3768826e78da91b66fd9357e44d03d79d30", + /* 1.4.183 */ "b3ff2ebe161976124965a9a841877ec4f6e913dbadcc31af27f1b99f6abd57e9", + /* 1.4.184 */ "9e47e14d5b4b9ead127b15a33b107ff06f0a7dd3f98b5d6c149e6ccae05dc0a2", + /* 1.4.185 */ "c4ac74be5971445e270bbd4344be58d9a06dc927223614217e5a87257a7edc03", + /* 1.4.186 */ "e3b7a39a2b45b61fa1521ef33b3ba676a5a9e1a397bc3ef4fb678d861a1b0ae4", + /* 1.4.187 */ "6204d0c206443681911fb9e04a3af5198b253b5627d16d8d8d79180d13319212", + /* 1.4.188 */ "11d6bff477f7ca392288f5f6d42ee61d0ccb63a34c99ba2d91710b2409673897", + /* 1.4.189 */ "c8dac03b66c8011cca4e44dcc7a8b1c8f8df769927c7672be1704e76f9ee7926", + /* 1.4.190 */ "23ba495a07bbbb3bd6c3084d10a96dad7a23741b8b6d64b213459a784195a98c", + /* 1.4.191 */ "e21ea665b74ec0115344b5afda5ec70ea27b528c3f103524e74c9854b1c4a284", + /* 1.4.192 */ "225b22e9857235c46c93861410b60b8c81c10dc8985f4faf188985ba5445126c", + /* 1.4.193 */ "b1cf34c64871014aa73580281cc464dfa72450d8860cc0752fc175e87edd6544", + /* 1.4.194 */ "b5b0c1836cead6831a50bd3e1b6c16fe6e583d4d2b7c4f41b4f838745c27cd01", + /* 1.4.195 */ "b99ea1f785c62b2a021664e72de696f8ea896f0da392a1c7baa3d4d47020b126", + /* 1.4.196 */ "0a05f4a0d5b85840148aadce63a423b5d3c36ef44756389b4faad08d2733faf5", + /* 1.4.197 */ "37f5216e14af2772930dff9b8734353f0a80e89ba3f33e065441de6537c5e842", + /* 1.4.198 */ "32dd6b149cb722aa4c2dd4d40a74a9cd41e32ac59a4e755a66e5753660d61d46", + /* 1.4.199 */ "3125a16743bc6b4cfbb61abba783203f1fb68230aa0fdc97898f796f99a5d42e", + /* 1.4.200 */ "3ad9ac4b6aae9cd9d3ac1c447465e1ed06019b851b893dd6a8d76ddb6d85bca6", + // + }; + + private static final String REPOSITORY = "https://repo1.maven.org/maven2"; + + /** + * Performs database upgrade from an older version of H2. + * + * @param url + * the JDBC connection URL + * @param info + * the connection properties ("user", "password", etc). + * @param version + * the old version of H2 + * @return {@code true} on success, {@code false} if URL is a remote or + * in-memory URL + * @throws Exception + * on failure + */ + public static boolean upgrade(String url, Properties info, int version) throws Exception { + Properties oldInfo = new Properties(); + oldInfo.putAll(info); + Object password = info.get("password"); + if (password instanceof char[]) { + oldInfo.put("password", ((char[]) password).clone()); + } + ConnectionInfo ci = new ConnectionInfo(url, info, null, null); + if (!ci.isPersistent() || ci.isRemote()) { + return false; + } + String name = ci.getName(); + String script = name + ".script.sql"; + StringBuilder oldUrl = new StringBuilder("jdbc:h2:").append(name).append(";ACCESS_MODE_DATA=r"); + copyProperty(ci, oldUrl, "FILE_LOCK"); + copyProperty(ci, oldUrl, "MV_STORE"); + String cipher = copyProperty(ci, oldUrl, "CIPHER"); + String scriptCommandSuffix = cipher == null ? "" : " CIPHER AES PASSWORD '" + UUID.randomUUID() + "' --hide--"; + java.sql.Driver driver = loadH2(version); + try (Connection conn = driver.connect(oldUrl.toString(), oldInfo)) { + conn.createStatement().execute(StringUtils.quoteStringSQL(new StringBuilder("SCRIPT TO "), script) + .append(scriptCommandSuffix).toString()); + } finally { + unloadH2(driver); + } + rename(name, false); + try (JdbcConnection conn = new JdbcConnection(url, info, null, null, false)) { + StringBuilder builder = StringUtils.quoteStringSQL(new StringBuilder("RUNSCRIPT FROM "), script) + .append(scriptCommandSuffix); + if (version <= 200) { + builder.append(" FROM_1X"); + } + conn.createStatement().execute(builder.toString()); + } catch (Throwable t) { + rename(name, true); + throw t; + } finally { + Files.deleteIfExists(Paths.get(script)); + } + return true; + } + + private static void rename(String name, boolean back) throws IOException { + rename(name, Constants.SUFFIX_MV_FILE, back); + rename(name, ".lobs.db", back); + } + + private static void rename(String name, String suffix, boolean back) throws IOException { + String source = name + suffix; + String target = source + ".bak"; + if (back) { + String t = source; + source = target; + target = t; + } + Path p = Paths.get(source); + if (Files.exists(p)) { + Files.move(p, Paths.get(target), StandardCopyOption.ATOMIC_MOVE); + } + } + + private static String copyProperty(ConnectionInfo ci, StringBuilder oldUrl, String name) { + try { + String value = ci.getProperty(name, null); + if (value != null) { + oldUrl.append(';').append(name).append('=').append(value); + } + return value; + } catch (Exception e) { + return null; + } + } + + /** + * Loads the specified version of H2 in a separate class loader. + * + * @param version + * the version to load + * @return the driver of the specified version + * @throws IOException + * on I/O exception + * @throws ReflectiveOperationException + * on exception during initialization of the driver + */ + public static java.sql.Driver loadH2(int version) throws IOException, ReflectiveOperationException { + String prefix; + if (version >= 201) { + if ((version & 1) != 0 || version > Constants.BUILD_ID) { + throw new IllegalArgumentException("version=" + version); + } + prefix = "2.0."; + } else if (version >= 177) { + prefix = "1.4."; + } else if (version >= 146 && version != 147) { + prefix = "1.3."; + } else if (version >= 120) { + prefix = "1.2."; + } else { + throw new IllegalArgumentException("version=" + version); + } + String fullVersion = prefix + version; + byte[] data = downloadUsingMaven("com.h2database", "h2", fullVersion, CHECKSUMS[version - 120]); + ZipInputStream is = new ZipInputStream(new ByteArrayInputStream(data)); + HashMap map = new HashMap<>(version >= 198 ? 2048 : 1024); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + for (ZipEntry ze; (ze = is.getNextEntry()) != null;) { + if (ze.isDirectory()) { + continue; + } + IOUtils.copy(is, baos); + map.put(ze.getName(), baos.toByteArray()); + baos.reset(); + } + ClassLoader cl = new ClassLoader(null) { + @Override + protected Class findClass(String name) throws ClassNotFoundException { + String resourceName = name.replace('.', '/') + ".class"; + byte[] b = map.get(resourceName); + if (b == null) { + return ClassLoader.getSystemClassLoader().loadClass(name); + } + return defineClass(name, b, 0, b.length); + } + + @Override + public InputStream getResourceAsStream(String name) { + byte[] b = map.get(name); + return b != null ? new ByteArrayInputStream(b) : null; + } + }; + return (java.sql.Driver) cl.loadClass("org.h2.Driver").getDeclaredMethod("load").invoke(null); + } + + /** + * Unloads the specified driver of H2. + * + * @param driver + * the driver to unload + * @throws ReflectiveOperationException + * on exception + */ + public static void unloadH2(java.sql.Driver driver) throws ReflectiveOperationException { + driver.getClass().getDeclaredMethod("unload").invoke(null); + } + + private static byte[] downloadUsingMaven(String group, String artifact, String version, String sha256Checksum) + throws IOException { + String repoFile = group.replace('.', '/') + '/' + artifact + '/' + version + '/' + artifact + '-' + version + + ".jar"; + Path localMavenDir = Paths.get(System.getProperty("user.home") + "/.m2/repository"); + if (Files.isDirectory(localMavenDir)) { + Path f = localMavenDir.resolve(repoFile); + if (!Files.exists(f)) { + try { + ArrayList args = new ArrayList<>(); + if (System.getProperty("os.name").toLowerCase().contains("windows")) { + args.add("cmd"); + args.add("/C"); + } + args.add("mvn"); + args.add("org.apache.maven.plugins:maven-dependency-plugin:2.1:get"); + args.add("-D" + "repoUrl=" + REPOSITORY); + args.add("-D" + "artifact=" + group + ':' + artifact + ':' + version); + exec(args); + } catch (RuntimeException e) { + System.out.println("Could not download using Maven: " + e.toString()); + } + } + if (Files.exists(f)) { + return check(Files.readAllBytes(f), sha256Checksum, f.toAbsolutePath().toString()); + } + } + return download(REPOSITORY + '/' + repoFile, sha256Checksum); + } + + private static int exec(ArrayList args) { + try { + ProcessBuilder pb = new ProcessBuilder(); + pb.command(args.toArray(new String[0])); + pb.inheritIO(); + Process p = pb.start(); + p.waitFor(); + return p.exitValue(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private static byte[] download(String fileURL, String sha256Checksum) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try { + System.out.println("Downloading " + fileURL); + URL url = new URL(fileURL); + InputStream in = new BufferedInputStream(url.openStream()); + long last = System.nanoTime(); + int len = 0; + while (true) { + long now = System.nanoTime(); + if (now - last > 1_000_000_000L) { + System.out.println("Downloaded " + len + " bytes"); + last = now; + } + int x = in.read(); + len++; + if (x < 0) { + break; + } + baos.write(x); + } + in.close(); + } catch (IOException e) { + throw new RuntimeException("Error downloading " + fileURL, e); + } + return check(baos.toByteArray(), sha256Checksum, null); + } + + private static byte[] check(byte[] data, String sha256Checksum, String checksummedFile) { + String got = getSHA256(data); + if (sha256Checksum == null) { + System.out.println('"' + got + '"'); + } else { + if (!got.equals(sha256Checksum)) { + StringBuilder builder = new StringBuilder().append("SHA-256 checksum mismatch; got: ").append(got) + .append(" expected: ").append(sha256Checksum); + if (checksummedFile != null) { + builder.append(" for file ").append(checksummedFile); + } + throw new RuntimeException(builder.toString()); + } + } + return data; + } + + private static String getSHA256(byte[] data) { + try { + return StringUtils.convertBytesToHex(MessageDigest.getInstance("SHA-256").digest(data)); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + } + + private Upgrade() { + } + +} diff --git a/h2/src/main/org/h2/tools/package.html b/h2/src/main/org/h2/tools/package.html index 18faf91b4f..806c13eddf 100644 --- a/h2/src/main/org/h2/tools/package.html +++ b/h2/src/main/org/h2/tools/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/upgrade/DbUpgrade.java b/h2/src/main/org/h2/upgrade/DbUpgrade.java deleted file mode 100644 index 5960017041..0000000000 --- a/h2/src/main/org/h2/upgrade/DbUpgrade.java +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.upgrade; - -import java.io.File; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.Properties; -import java.util.UUID; -import org.h2.engine.ConnectionInfo; -import org.h2.engine.Constants; -import org.h2.jdbc.JdbcConnection; -import org.h2.message.DbException; -import org.h2.store.fs.FileUtils; -import org.h2.util.StringUtils; -import org.h2.util.Utils; - -/** - * This class starts the conversion from older database versions to the current - * version if the respective classes are found. - */ -public class DbUpgrade { - - private static final boolean UPGRADE_CLASSES_PRESENT; - - private static boolean scriptInTempDir; - private static boolean deleteOldDb; - - static { - UPGRADE_CLASSES_PRESENT = Utils.isClassPresent("org.h2.upgrade.v1_1.Driver"); - } - - /** - * If the upgrade classes are present, upgrade the database, or connect - * using the old version (if the parameter NO_UPGRADE is set to true). If - * the database is upgraded, or if no upgrade is possible or needed, this - * methods returns null. - * - * @param url the database URL - * @param info the properties - * @return the connection if connected with the old version (NO_UPGRADE) - */ - public static Connection connectOrUpgrade(String url, Properties info) - throws SQLException { - if (!UPGRADE_CLASSES_PRESENT) { - return null; - } - Properties i2 = new Properties(); - i2.putAll(info); - // clone so that the password (if set as a char array) is not cleared - Object o = info.get("password"); - if (o instanceof char[]) { - i2.put("password", StringUtils.cloneCharArray((char[]) o)); - } - info = i2; - ConnectionInfo ci = new ConnectionInfo(url, info); - if (ci.isRemote() || !ci.isPersistent()) { - return null; - } - String name = ci.getName(); - if (FileUtils.exists(name + Constants.SUFFIX_PAGE_FILE)) { - return null; - } - if (!FileUtils.exists(name + Constants.SUFFIX_OLD_DATABASE_FILE)) { - return null; - } - if (ci.removeProperty("NO_UPGRADE", false)) { - return connectWithOldVersion(url, info); - } - synchronized (DbUpgrade.class) { - upgrade(ci, info); - return null; - } - } - - /** - * The conversion script file will per default be created in the db - * directory. Use this method to change the directory to the temp - * directory. - * - * @param scriptInTempDir true if the conversion script should be - * located in the temp directory. - */ - public static void setScriptInTempDir(boolean scriptInTempDir) { - DbUpgrade.scriptInTempDir = scriptInTempDir; - } - - /** - * Old files will be renamed to .backup after a successful conversion. To - * delete them after the conversion, use this method with the parameter - * 'true'. - * - * @param deleteOldDb if true, the old db files will be deleted. - */ - public static void setDeleteOldDb(boolean deleteOldDb) { - DbUpgrade.deleteOldDb = deleteOldDb; - } - - private static Connection connectWithOldVersion(String url, Properties info) - throws SQLException { - url = "jdbc:h2v1_1:" + url.substring("jdbc:h2:".length()) + - ";IGNORE_UNKNOWN_SETTINGS=TRUE"; - return DriverManager.getConnection(url, info); - } - - private static void upgrade(ConnectionInfo ci, Properties info) - throws SQLException { - String name = ci.getName(); - String data = name + Constants.SUFFIX_OLD_DATABASE_FILE; - String index = name + ".index.db"; - String lobs = name + ".lobs.db"; - String backupData = data + ".backup"; - String backupIndex = index + ".backup"; - String backupLobs = lobs + ".backup"; - String script = null; - try { - if (scriptInTempDir) { - new File(Utils.getProperty("java.io.tmpdir", ".")).mkdirs(); - script = File.createTempFile( - "h2dbmigration", "backup.sql").getAbsolutePath(); - } else { - script = name + ".script.sql"; - } - String oldUrl = "jdbc:h2v1_1:" + name + - ";UNDO_LOG=0;LOG=0;LOCK_MODE=0"; - String cipher = ci.getProperty("CIPHER", null); - if (cipher != null) { - oldUrl += ";CIPHER=" + cipher; - } - Connection conn = DriverManager.getConnection(oldUrl, info); - Statement stat = conn.createStatement(); - String uuid = UUID.randomUUID().toString(); - if (cipher != null) { - stat.execute("script to '" + script + - "' cipher aes password '" + uuid + "' --hide--"); - } else { - stat.execute("script to '" + script + "'"); - } - conn.close(); - FileUtils.move(data, backupData); - FileUtils.move(index, backupIndex); - if (FileUtils.exists(lobs)) { - FileUtils.move(lobs, backupLobs); - } - ci.removeProperty("IFEXISTS", false); - conn = new JdbcConnection(ci, true); - stat = conn.createStatement(); - if (cipher != null) { - stat.execute("runscript from '" + script + - "' cipher aes password '" + uuid + "' --hide--"); - } else { - stat.execute("runscript from '" + script + "'"); - } - stat.execute("analyze"); - stat.execute("shutdown compact"); - stat.close(); - conn.close(); - if (deleteOldDb) { - FileUtils.delete(backupData); - FileUtils.delete(backupIndex); - FileUtils.deleteRecursive(backupLobs, false); - } - } catch (Exception e) { - if (FileUtils.exists(backupData)) { - FileUtils.move(backupData, data); - } - if (FileUtils.exists(backupIndex)) { - FileUtils.move(backupIndex, index); - } - if (FileUtils.exists(backupLobs)) { - FileUtils.move(backupLobs, lobs); - } - FileUtils.delete(name + ".h2.db"); - throw DbException.toSQLException(e); - } finally { - if (script != null) { - FileUtils.delete(script); - } - } - } - -} diff --git a/h2/src/main/org/h2/util/AbbaDetector.java b/h2/src/main/org/h2/util/AbbaDetector.java index 89495d0c62..b4c41bc909 100644 --- a/h2/src/main/org/h2/util/AbbaDetector.java +++ b/h2/src/main/org/h2/util/AbbaDetector.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; @@ -19,12 +19,7 @@ public class AbbaDetector { private static final boolean TRACE = false; - private static final ThreadLocal> STACK = - new ThreadLocal>() { - @Override protected Deque initialValue() { - return new ArrayDeque<>(); - } - }; + private static final ThreadLocal> STACK = ThreadLocal.withInitial(ArrayDeque::new); /** * Map of (object A) -> ( diff --git a/h2/src/main/org/h2/util/AbbaLockingDetector.java b/h2/src/main/org/h2/util/AbbaLockingDetector.java index 1e9ef6c5ad..62b67f48b8 100644 --- a/h2/src/main/org/h2/util/AbbaLockingDetector.java +++ b/h2/src/main/org/h2/util/AbbaLockingDetector.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; @@ -11,7 +11,6 @@ import java.lang.management.ThreadMXBean; import java.util.ArrayList; import java.util.Arrays; -import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -119,15 +118,9 @@ private void processThreadList(ThreadInfo[] threadInfoList) { * We cannot simply call getLockedMonitors because it is not guaranteed to * return the locks in the correct order. */ - private static void generateOrdering(final List lockOrder, - ThreadInfo info) { + private static void generateOrdering(List lockOrder, ThreadInfo info) { final MonitorInfo[] lockedMonitors = info.getLockedMonitors(); - Arrays.sort(lockedMonitors, new Comparator() { - @Override - public int compare(MonitorInfo a, MonitorInfo b) { - return b.getLockedStackDepth() - a.getLockedStackDepth(); - } - }); + Arrays.sort(lockedMonitors, (a, b) -> b.getLockedStackDepth() - a.getLockedStackDepth()); for (MonitorInfo mi : lockedMonitors) { String lockName = getObjectName(mi); if (lockName.equals("sun.misc.Launcher$AppClassLoader")) { diff --git a/h2/src/main/org/h2/util/Bits.java b/h2/src/main/org/h2/util/Bits.java index 6d5fb11948..f910c5a1e6 100644 --- a/h2/src/main/org/h2/util/Bits.java +++ b/h2/src/main/org/h2/util/Bits.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; diff --git a/h2/src/main/org/h2/util/ByteStack.java b/h2/src/main/org/h2/util/ByteStack.java new file mode 100644 index 0000000000..f1764ad290 --- /dev/null +++ b/h2/src/main/org/h2/util/ByteStack.java @@ -0,0 +1,122 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util; + +import java.util.Arrays; +import java.util.NoSuchElementException; + +/** + * The stack of byte values. This class is not synchronized and should not be + * used by multiple threads concurrently. + */ +public final class ByteStack { + + private static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; + + private int size; + + private byte[] array; + + /** + * Creates a new empty instance. + */ + public ByteStack() { + array = Utils.EMPTY_BYTES; + } + + /** + * Pushes an item onto the top of this stack. + * + * @param item + * the item to push + */ + public void push(byte item) { + int index = size; + int oldLength = array.length; + if (index >= oldLength) { + grow(oldLength); + } + array[index] = item; + size = index + 1; + } + + /** + * Removes the item at the top of this stack and returns that item. + * + * @return the item at the top of this stack + * @throws NoSuchElementException + * if stack is empty + */ + public byte pop() { + int index = size - 1; + if (index < 0) { + throw new NoSuchElementException(); + } + size = index; + return array[index]; + } + + /** + * Removes the item at the top of this stack and returns that item. + * + * @param defaultValue + * value to return if stack is empty + * @return the item at the top of this stack, or default value + */ + public int poll(int defaultValue) { + int index = size - 1; + if (index < 0) { + return defaultValue; + } + size = index; + return array[index]; + } + + /** + * Looks at the item at the top of this stack without removing it. + * + * @param defaultValue + * value to return if stack is empty + * @return the item at the top of this stack, or default value + */ + public int peek(int defaultValue) { + int index = size - 1; + if (index < 0) { + return defaultValue; + } + return array[index]; + } + + /** + * Returns {@code true} if this stack is empty. + * + * @return {@code true} if this stack is empty + */ + public boolean isEmpty() { + return size == 0; + } + + /** + * Returns the number of items in this stack. + * + * @return the number of items in this stack + */ + public int size() { + return size; + } + + private void grow(int length) { + if (length == 0) { + length = 0x10; + } else if (length >= MAX_ARRAY_SIZE) { + throw new OutOfMemoryError(); + } else if ((length <<= 1) < 0) { + length = MAX_ARRAY_SIZE; + } + array = Arrays.copyOf(array, length); + } + +} diff --git a/h2/src/main/org/h2/util/Cache.java b/h2/src/main/org/h2/util/Cache.java index 6616d6757a..9ea4857c4a 100644 --- a/h2/src/main/org/h2/util/Cache.java +++ b/h2/src/main/org/h2/util/Cache.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; diff --git a/h2/src/main/org/h2/util/CacheHead.java b/h2/src/main/org/h2/util/CacheHead.java index b816e6be72..d18bb13b97 100644 --- a/h2/src/main/org/h2/util/CacheHead.java +++ b/h2/src/main/org/h2/util/CacheHead.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; diff --git a/h2/src/main/org/h2/util/CacheLRU.java b/h2/src/main/org/h2/util/CacheLRU.java index 29e52d65cf..7dbd58193d 100644 --- a/h2/src/main/org/h2/util/CacheLRU.java +++ b/h2/src/main/org/h2/util/CacheLRU.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; @@ -78,7 +78,7 @@ public static Cache getCache(CacheWriter writer, String cacheType, int cacheSize) { Map secondLevel = null; if (cacheType.startsWith("SOFT_")) { - secondLevel = new SoftHashMap<>(); + secondLevel = new SoftValuesHashMap<>(); cacheType = cacheType.substring("SOFT_".length()); } Cache cache; @@ -111,9 +111,7 @@ public void put(CacheObject rec) { int pos = rec.getPos(); CacheObject old = find(pos); if (old != null) { - DbException - .throwInternalError("try to add a record twice at pos " + - pos); + throw DbException.getInternalError("try to add a record twice at pos " + pos); } } int index = rec.getPos() & mask; @@ -132,7 +130,7 @@ public CacheObject update(int pos, CacheObject rec) { put(rec); } else { if (old != rec) { - DbException.throwInternalError("old!=record pos:" + pos + " old:" + old + " new:" + rec); + throw DbException.getInternalError("old!=record pos:" + pos + " old:" + old + " new:" + rec); } if (!fifo) { removeFromLinkedList(rec); @@ -188,7 +186,7 @@ private void removeOld() { } } if (check == head) { - DbException.throwInternalError("try to remove head"); + throw DbException.getInternalError("try to remove head"); } // we are not allowed to remove it if the log is not yet written // (because we need to log before writing the data) @@ -228,7 +226,7 @@ private void removeOld() { CacheObject rec = changed.get(i); remove(rec.getPos()); if (rec.cacheNext != null) { - throw DbException.throwInternalError(); + throw DbException.getInternalError(); } } } @@ -236,7 +234,7 @@ private void removeOld() { private void addToFront(CacheObject rec) { if (rec == head) { - DbException.throwInternalError("try to move head"); + throw DbException.getInternalError("try to move head"); } rec.cacheNext = head; rec.cachePrevious = head.cachePrevious; @@ -246,7 +244,7 @@ private void addToFront(CacheObject rec) { private void removeFromLinkedList(CacheObject rec) { if (rec == head) { - DbException.throwInternalError("try to remove head"); + throw DbException.getInternalError("try to remove head"); } rec.cachePrevious.cacheNext = rec.cacheNext; rec.cacheNext.cachePrevious = rec.cachePrevious; @@ -283,7 +281,7 @@ public boolean remove(int pos) { rec.cacheChained = null; CacheObject o = find(pos); if (o != null) { - DbException.throwInternalError("not removed: " + o); + throw DbException.getInternalError("not removed: " + o); } } return true; diff --git a/h2/src/main/org/h2/util/CacheObject.java b/h2/src/main/org/h2/util/CacheObject.java index fb09b8daff..2cbf84db53 100644 --- a/h2/src/main/org/h2/util/CacheObject.java +++ b/h2/src/main/org/h2/util/CacheObject.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; @@ -49,7 +49,7 @@ public abstract class CacheObject implements Comparable { public void setPos(int pos) { if (cachePrevious != null || cacheNext != null || cacheChained != null) { - DbException.throwInternalError("setPos too late"); + throw DbException.getInternalError("setPos too late"); } this.pos = pos; } diff --git a/h2/src/main/org/h2/util/CacheSecondLevel.java b/h2/src/main/org/h2/util/CacheSecondLevel.java index e156c29a72..7d0469deba 100644 --- a/h2/src/main/org/h2/util/CacheSecondLevel.java +++ b/h2/src/main/org/h2/util/CacheSecondLevel.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Jan Kotek */ package org.h2.util; diff --git a/h2/src/main/org/h2/util/CacheTQ.java b/h2/src/main/org/h2/util/CacheTQ.java index d94a3c8707..b05c574696 100644 --- a/h2/src/main/org/h2/util/CacheTQ.java +++ b/h2/src/main/org/h2/util/CacheTQ.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; diff --git a/h2/src/main/org/h2/util/CacheWriter.java b/h2/src/main/org/h2/util/CacheWriter.java index 286708db11..4277471384 100644 --- a/h2/src/main/org/h2/util/CacheWriter.java +++ b/h2/src/main/org/h2/util/CacheWriter.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; diff --git a/h2/src/main/org/h2/util/CloseWatcher.java b/h2/src/main/org/h2/util/CloseWatcher.java index 69a33b395d..3a5911f8f8 100644 --- a/h2/src/main/org/h2/util/CloseWatcher.java +++ b/h2/src/main/org/h2/util/CloseWatcher.java @@ -1,13 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group * Iso8601: * Initial Developer: Robert Rathsack (firstName dot lastName at gmx dot de) */ package org.h2.util; -import java.io.Closeable; import java.io.PrintWriter; import java.io.StringWriter; import java.lang.ref.PhantomReference; @@ -24,13 +23,13 @@ public class CloseWatcher extends PhantomReference { /** * The queue (might be set to null at any time). */ - private static ReferenceQueue queue = new ReferenceQueue<>(); + private static final ReferenceQueue queue = new ReferenceQueue<>(); /** * The reference set. Must keep it, otherwise the references are garbage * collected first and thus never enqueued. */ - private static Set refs = createSet(); + private static final Set refs = Collections.synchronizedSet(new HashSet<>()); /** * The stack trace of when the object was created. It is converted to a @@ -42,30 +41,22 @@ public class CloseWatcher extends PhantomReference { /** * The closeable object. */ - private Closeable closeable; + private AutoCloseable closeable; public CloseWatcher(Object referent, ReferenceQueue q, - Closeable closeable) { + AutoCloseable closeable) { super(referent, q); this.closeable = closeable; } - private static Set createSet() { - return Collections.synchronizedSet(new HashSet()); - } - /** * Check for an collected object. * * @return the first watcher */ public static CloseWatcher pollUnclosed() { - ReferenceQueue q = queue; - if (q == null) { - return null; - } while (true) { - CloseWatcher cw = (CloseWatcher) q.poll(); + CloseWatcher cw = (CloseWatcher) queue.poll(); if (cw == null) { return null; } @@ -88,23 +79,14 @@ public static CloseWatcher pollUnclosed() { * relatively slow) * @return the close watcher */ - public static CloseWatcher register(Object o, Closeable closeable, - boolean stackTrace) { - ReferenceQueue q = queue; - if (q == null) { - q = new ReferenceQueue<>(); - queue = q; - } - CloseWatcher cw = new CloseWatcher(o, q, closeable); + public static CloseWatcher register(Object o, AutoCloseable closeable, boolean stackTrace) { + CloseWatcher cw = new CloseWatcher(o, queue, closeable); if (stackTrace) { Exception e = new Exception("Open Stack Trace"); StringWriter s = new StringWriter(); e.printStackTrace(new PrintWriter(s)); cw.openStackTrace = s.toString(); } - if (refs == null) { - refs = createSet(); - } refs.add(cw); return cw; } @@ -128,7 +110,7 @@ public String getOpenStackTrace() { return openStackTrace; } - public Closeable getCloseable() { + public AutoCloseable getCloseable() { return closeable; } diff --git a/h2/src/main/org/h2/util/ColumnNamer.java b/h2/src/main/org/h2/util/ColumnNamer.java deleted file mode 100644 index 9e7ce18a7a..0000000000 --- a/h2/src/main/org/h2/util/ColumnNamer.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - */ -package org.h2.util; - -import java.util.HashSet; -import java.util.Set; -import java.util.regex.Pattern; -import org.h2.engine.Session; -import org.h2.expression.Expression; - -/** - * A factory for column names. - */ -public class ColumnNamer { - - private static final String DEFAULT_COLUMN_NAME = "DEFAULT"; - - private final ColumnNamerConfiguration configuration; - private final Set existingColumnNames = new HashSet<>(); - - public ColumnNamer(Session session) { - if (session != null && session.getColumnNamerConfiguration() != null) { - // use original from session - this.configuration = session.getColumnNamerConfiguration(); - } else { - // detached namer, create new - this.configuration = ColumnNamerConfiguration.getDefault(); - if (session != null) { - session.setColumnNamerConfiguration(this.configuration); - } - } - } - - /** - * Create a standardized column name that isn't null and doesn't have a CR/LF in it. - * @param columnExp the column expression - * @param indexOfColumn index of column in below array - * @param columnNameOverides array of overriding column names - * @return the new column name - */ - public String getColumnName(Expression columnExp, int indexOfColumn, String[] columnNameOverides) { - String columnNameOverride = null; - if (columnNameOverides != null && columnNameOverides.length > indexOfColumn) { - columnNameOverride = columnNameOverides[indexOfColumn]; - } - return getColumnName(columnExp, indexOfColumn, columnNameOverride); - } - - /** - * Create a standardized column name that isn't null and doesn't have a CR/LF in it. - * @param columnExp the column expression - * @param indexOfColumn index of column in below array - * @param columnNameOverride single overriding column name - * @return the new column name - */ - public String getColumnName(Expression columnExp, int indexOfColumn, String columnNameOverride) { - // try a name from the column name override - String columnName = getColumnName(columnNameOverride, null); - if (columnName == null) { - // try a name from the column alias - columnName = getColumnName(columnExp.getAlias(), DEFAULT_COLUMN_NAME); - if (columnName == null) { - // try a name derived from the column expression SQL - columnName = getColumnName(columnExp.getColumnName(), DEFAULT_COLUMN_NAME); - if (columnName == null) { - // try a name derived from the column expression plan SQL - columnName = getColumnName(columnExp.getSQL(false), DEFAULT_COLUMN_NAME); - // go with a innocuous default name pattern - if (columnName == null) { - columnName = configuration.getDefaultColumnNamePattern() - .replace("$$", Integer.toString(indexOfColumn + 1)); - } - } - } - } - if (existingColumnNames.contains(columnName) && configuration.isGenerateUniqueColumnNames()) { - columnName = generateUniqueName(columnName); - } - existingColumnNames.add(columnName); - return columnName; - } - - private String getColumnName(String proposedName, String disallowedName) { - String columnName = null; - if (proposedName != null && !proposedName.equals(disallowedName)) { - if (isAllowableColumnName(proposedName)) { - columnName = proposedName; - } else { - proposedName = fixColumnName(proposedName); - if (isAllowableColumnName(proposedName)) { - columnName = proposedName; - } - } - } - return columnName; - } - - private String generateUniqueName(String columnName) { - String newColumnName = columnName; - int loopCount = 2; - while (existingColumnNames.contains(newColumnName)) { - String loopCountString = "_" + loopCount; - newColumnName = columnName.substring(0, - Math.min(columnName.length(), configuration.getMaxIdentiferLength() - loopCountString.length())) - + loopCountString; - loopCount++; - } - return newColumnName; - } - - private boolean isAllowableColumnName(String proposedName) { - // check null - if (proposedName == null) { - return false; - } - // check size limits - int length = proposedName.length(); - if (length > configuration.getMaxIdentiferLength() || length == 0) { - return false; - } - Pattern allowed = configuration.getCompiledRegularExpressionMatchAllowed(); - return allowed == null || allowed.matcher(proposedName).matches(); - } - - private String fixColumnName(String proposedName) { - Pattern disallowed = configuration.getCompiledRegularExpressionMatchDisallowed(); - if (disallowed == null) { - proposedName = StringUtils.replaceAll(proposedName, "\u0000", ""); - } else { - proposedName = disallowed.matcher(proposedName).replaceAll(""); - } - - // check size limits - then truncate - int length = proposedName.length(), maxLength = configuration.getMaxIdentiferLength(); - if (length > maxLength) { - proposedName = proposedName.substring(0, maxLength); - } - - return proposedName; - } - - public ColumnNamerConfiguration getConfiguration() { - return configuration; - } - -} diff --git a/h2/src/main/org/h2/util/ColumnNamerConfiguration.java b/h2/src/main/org/h2/util/ColumnNamerConfiguration.java deleted file mode 100644 index cbc6dc7649..0000000000 --- a/h2/src/main/org/h2/util/ColumnNamerConfiguration.java +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - */ -package org.h2.util; - -import java.util.regex.Pattern; -import org.h2.engine.Mode.ModeEnum; -import static org.h2.engine.Mode.ModeEnum.*; -import org.h2.message.DbException; - -/** - * The configuration for the allowed column names. - */ -public class ColumnNamerConfiguration { - - private static final String DEFAULT_COMMAND = "DEFAULT"; - private static final String REGULAR_EXPRESSION_MATCH_DISALLOWED = "REGULAR_EXPRESSION_MATCH_DISALLOWED = "; - private static final String REGULAR_EXPRESSION_MATCH_ALLOWED = "REGULAR_EXPRESSION_MATCH_ALLOWED = "; - private static final String DEFAULT_COLUMN_NAME_PATTERN = "DEFAULT_COLUMN_NAME_PATTERN = "; - private static final String MAX_IDENTIFIER_LENGTH = "MAX_IDENTIFIER_LENGTH = "; - private static final String EMULATE_COMMAND = "EMULATE = "; - private static final String GENERATE_UNIQUE_COLUMN_NAMES = "GENERATE_UNIQUE_COLUMN_NAMES = "; - - private int maxIdentiferLength; - private String regularExpressionMatchAllowed; - private String regularExpressionMatchDisallowed; - private String defaultColumnNamePattern; - private boolean generateUniqueColumnNames; - private Pattern compiledRegularExpressionMatchAllowed; - private Pattern compiledRegularExpressionMatchDisallowed; - - public ColumnNamerConfiguration(int maxIdentiferLength, String regularExpressionMatchAllowed, - String regularExpressionMatchDisallowed, String defaultColumnNamePattern, - boolean generateUniqueColumnNames) { - - this.maxIdentiferLength = maxIdentiferLength; - this.regularExpressionMatchAllowed = regularExpressionMatchAllowed; - this.regularExpressionMatchDisallowed = regularExpressionMatchDisallowed; - this.defaultColumnNamePattern = defaultColumnNamePattern; - this.generateUniqueColumnNames = generateUniqueColumnNames; - - recompilePatterns(); - } - - public int getMaxIdentiferLength() { - return maxIdentiferLength; - } - - public void setMaxIdentiferLength(int maxIdentiferLength) { - this.maxIdentiferLength = Math.max(30, maxIdentiferLength); - if (maxIdentiferLength != getMaxIdentiferLength()) { - throw DbException.getInvalidValueException("Illegal value (<30) in SET COLUMN_NAME_RULES", - "MAX_IDENTIFIER_LENGTH=" + maxIdentiferLength); - } - } - - public String getRegularExpressionMatchAllowed() { - return regularExpressionMatchAllowed; - } - - public void setRegularExpressionMatchAllowed(String regularExpressionMatchAllowed) { - this.regularExpressionMatchAllowed = regularExpressionMatchAllowed; - } - - public String getRegularExpressionMatchDisallowed() { - return regularExpressionMatchDisallowed; - } - - public void setRegularExpressionMatchDisallowed(String regularExpressionMatchDisallowed) { - this.regularExpressionMatchDisallowed = regularExpressionMatchDisallowed; - } - - public String getDefaultColumnNamePattern() { - return defaultColumnNamePattern; - } - - public void setDefaultColumnNamePattern(String defaultColumnNamePattern) { - this.defaultColumnNamePattern = defaultColumnNamePattern; - } - - /** - * Returns compiled pattern for allowed names. - * - * @return compiled pattern, or null for default - */ - public Pattern getCompiledRegularExpressionMatchAllowed() { - return compiledRegularExpressionMatchAllowed; - } - - public void setCompiledRegularExpressionMatchAllowed(Pattern compiledRegularExpressionMatchAllowed) { - this.compiledRegularExpressionMatchAllowed = compiledRegularExpressionMatchAllowed; - } - - /** - * Returns compiled pattern for disallowed names. - * - * @return compiled pattern, or null for default - */ - public Pattern getCompiledRegularExpressionMatchDisallowed() { - return compiledRegularExpressionMatchDisallowed; - } - - public void setCompiledRegularExpressionMatchDisallowed(Pattern compiledRegularExpressionMatchDisallowed) { - this.compiledRegularExpressionMatchDisallowed = compiledRegularExpressionMatchDisallowed; - } - - /** - * Configure the column namer. - * - * @param stringValue the configuration - */ - public void configure(String stringValue) { - try { - if (stringValue.equalsIgnoreCase(DEFAULT_COMMAND)) { - configure(REGULAR); - } else if (stringValue.startsWith(EMULATE_COMMAND)) { - configure(ModeEnum.valueOf(unquoteString(stringValue.substring(EMULATE_COMMAND.length())))); - } else if (stringValue.startsWith(MAX_IDENTIFIER_LENGTH)) { - int maxLength = Integer.parseInt(stringValue.substring(MAX_IDENTIFIER_LENGTH.length())); - setMaxIdentiferLength(maxLength); - } else if (stringValue.startsWith(GENERATE_UNIQUE_COLUMN_NAMES)) { - setGenerateUniqueColumnNames( - Integer.parseInt(stringValue.substring(GENERATE_UNIQUE_COLUMN_NAMES.length())) == 1); - } else if (stringValue.startsWith(DEFAULT_COLUMN_NAME_PATTERN)) { - setDefaultColumnNamePattern( - unquoteString(stringValue.substring(DEFAULT_COLUMN_NAME_PATTERN.length()))); - } else if (stringValue.startsWith(REGULAR_EXPRESSION_MATCH_ALLOWED)) { - setRegularExpressionMatchAllowed( - unquoteString(stringValue.substring(REGULAR_EXPRESSION_MATCH_ALLOWED.length()))); - } else if (stringValue.startsWith(REGULAR_EXPRESSION_MATCH_DISALLOWED)) { - setRegularExpressionMatchDisallowed( - unquoteString(stringValue.substring(REGULAR_EXPRESSION_MATCH_DISALLOWED.length()))); - } else { - throw DbException.getInvalidValueException("SET COLUMN_NAME_RULES: unknown id:" + stringValue, - stringValue); - } - recompilePatterns(); - } - // Including NumberFormatException|PatternSyntaxException - catch (RuntimeException e) { - throw DbException.getInvalidValueException("SET COLUMN_NAME_RULES:" + e.getMessage(), stringValue); - - } - } - - private void recompilePatterns() { - try { - // recompile RE patterns - setCompiledRegularExpressionMatchAllowed( - regularExpressionMatchAllowed != null ? Pattern.compile(regularExpressionMatchAllowed) : null); - setCompiledRegularExpressionMatchDisallowed( - regularExpressionMatchDisallowed != null ? Pattern.compile(regularExpressionMatchDisallowed) - : null); - } catch (Exception e) { - configure(REGULAR); - throw e; - } - } - - public static ColumnNamerConfiguration getDefault() { - return new ColumnNamerConfiguration(Integer.MAX_VALUE, null, null, "_UNNAMED_$$", false); - } - - private static String unquoteString(String s) { - if (s.startsWith("'") && s.endsWith("'")) { - s = s.substring(1, s.length() - 1); - return s; - } - return s; - } - - public boolean isGenerateUniqueColumnNames() { - return generateUniqueColumnNames; - } - - public void setGenerateUniqueColumnNames(boolean generateUniqueColumnNames) { - this.generateUniqueColumnNames = generateUniqueColumnNames; - } - - /** - * Configure the rules. - * - * @param modeEnum the mode - */ - public void configure(ModeEnum modeEnum) { - switch (modeEnum) { - case Oracle: - // Nonquoted identifiers can contain only alphanumeric characters - // from your database character set and the underscore (_), dollar - // sign ($), and pound sign (#). - setMaxIdentiferLength(128); - setRegularExpressionMatchAllowed("(?m)(?s)\"?[A-Za-z0-9_\\$#]+\"?"); - setRegularExpressionMatchDisallowed("(?m)(?s)[^A-Za-z0-9_\"\\$#]"); - setDefaultColumnNamePattern("_UNNAMED_$$"); - setGenerateUniqueColumnNames(false); - break; - - case MSSQLServer: - // https://docs.microsoft.com/en-us/sql/sql-server/maximum-capacity-specifications-for-sql-server - setMaxIdentiferLength(128); - // allows [] around names - setRegularExpressionMatchAllowed("(?m)(?s)[A-Za-z0-9_\\[\\]]+"); - setRegularExpressionMatchDisallowed("(?m)(?s)[^A-Za-z0-9_\\[\\]]"); - setDefaultColumnNamePattern("_UNNAMED_$$"); - setGenerateUniqueColumnNames(false); - break; - - case PostgreSQL: - // https://www.postgresql.org/docs/current/static/sql-syntax-lexical.html - setMaxIdentiferLength(63); - setRegularExpressionMatchAllowed("(?m)(?s)[A-Za-z0-9_\\$]+"); - setRegularExpressionMatchDisallowed("(?m)(?s)[^A-Za-z0-9_\\$]"); - setDefaultColumnNamePattern("_UNNAMED_$$"); - setGenerateUniqueColumnNames(false); - break; - - case MySQL: - // https://dev.mysql.com/doc/refman/8.0/en/identifiers.html - // https://mariadb.com/kb/en/library/identifier-names/ - setMaxIdentiferLength(64); - setRegularExpressionMatchAllowed("(?m)(?s)`?[A-Za-z0-9_`\\$]+`?"); - setRegularExpressionMatchDisallowed("(?m)(?s)[^A-Za-z0-9_`\\$]"); - setDefaultColumnNamePattern("_UNNAMED_$$"); - setGenerateUniqueColumnNames(false); - break; - - case REGULAR: - case DB2: - case Derby: - case HSQLDB: - case Ignite: - default: - setMaxIdentiferLength(Integer.MAX_VALUE); - setRegularExpressionMatchAllowed(null); - setRegularExpressionMatchDisallowed(null); - setDefaultColumnNamePattern("_UNNAMED_$$"); - setGenerateUniqueColumnNames(false); - break; - } - recompilePatterns(); - } - -} diff --git a/h2/src/main/org/h2/util/CurrentTimestamp.java b/h2/src/main/org/h2/util/CurrentTimestamp.java deleted file mode 100644 index 4079362309..0000000000 --- a/h2/src/main/org/h2/util/CurrentTimestamp.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.util; - -import org.h2.value.ValueTimestampTimeZone; - -public final class CurrentTimestamp { - - /* - * Signatures of methods should match with - * h2/src/java9/src/org/h2/util/CurrentTimestamp.java and precompiled - * h2/src/java9/precompiled/org/h2/util/CurrentTimestamp.class. - */ - - /** - * Returns current timestamp. - * - * @return current timestamp - */ - public static ValueTimestampTimeZone get() { - return DateTimeUtils.timestampTimeZoneFromMillis(System.currentTimeMillis()); - } - - private CurrentTimestamp() { - } - -} diff --git a/h2/src/main/org/h2/util/DateTimeUtils.java b/h2/src/main/org/h2/util/DateTimeUtils.java index fb888ac3cb..1ee7c9118e 100644 --- a/h2/src/main/org/h2/util/DateTimeUtils.java +++ b/h2/src/main/org/h2/util/DateTimeUtils.java @@ -1,23 +1,22 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, and the - * EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, and the + * EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group * Iso8601: Initial Developer: Robert Rathsack (firstName dot lastName at gmx * dot de) */ package org.h2.util; -import java.sql.Date; -import java.sql.Time; -import java.sql.Timestamp; -import java.util.Calendar; -import java.util.GregorianCalendar; -import java.util.TimeZone; -import org.h2.engine.Mode; +import java.time.Instant; + +import org.h2.api.ErrorCode; +import org.h2.engine.CastDataProvider; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueDate; -import org.h2.value.ValueNull; import org.h2.value.ValueTime; +import org.h2.value.ValueTimeTimeZone; import org.h2.value.ValueTimestamp; import org.h2.value.ValueTimestampTimeZone; @@ -39,11 +38,6 @@ public class DateTimeUtils { */ public static final long SECONDS_PER_DAY = 24 * 60 * 60; - /** - * UTC time zone. - */ - public static final TimeZone UTC = TimeZone.getTimeZone("UTC"); - /** * The number of nanoseconds per second. */ @@ -64,271 +58,98 @@ public class DateTimeUtils { */ public static final long NANOS_PER_DAY = MILLIS_PER_DAY * 1_000_000; - private static final int SHIFT_YEAR = 9; - private static final int SHIFT_MONTH = 5; - /** - * Date value for 1970-01-01. + * The offset of year bits in date values. */ - public static final int EPOCH_DATE_VALUE = (1970 << SHIFT_YEAR) + (1 << SHIFT_MONTH) + 1; - - private static final int[] NORMAL_DAYS_PER_MONTH = { 0, 31, 28, 31, 30, 31, - 30, 31, 31, 30, 31, 30, 31 }; + public static final int SHIFT_YEAR = 9; /** - * Offsets of month within a year, starting with March, April,... + * The offset of month bits in date values. */ - private static final int[] DAYS_OFFSET = { 0, 31, 61, 92, 122, 153, 184, - 214, 245, 275, 306, 337, 366 }; + public static final int SHIFT_MONTH = 5; /** - * Multipliers for {@link #convertScale(long, int)}. + * Date value for 1970-01-01. */ - private static final int[] CONVERT_SCALE_TABLE = { 1_000_000_000, 100_000_000, - 10_000_000, 1_000_000, 100_000, 10_000, 1_000, 100, 10 }; + public static final int EPOCH_DATE_VALUE = (1970 << SHIFT_YEAR) + (1 << SHIFT_MONTH) + 1; /** - * The thread local. Can not override initialValue because this would result - * in an inner class, which would not be garbage collected in a web - * container, and prevent the class loader of H2 from being garbage - * collected. Using a ThreadLocal on a system class like Calendar does not - * have that problem, and while it is still a small memory leak, it is not a - * class loader memory leak. + * Minimum possible date value. */ - private static final ThreadLocal CACHED_CALENDAR = new ThreadLocal<>(); + public static final long MIN_DATE_VALUE = (-1_000_000_000L << SHIFT_YEAR) + (1 << SHIFT_MONTH) + 1; /** - * A cached instance of Calendar used when a timezone is specified. + * Maximum possible date value. */ - private static final ThreadLocal CACHED_CALENDAR_NON_DEFAULT_TIMEZONE = - new ThreadLocal<>(); + public static final long MAX_DATE_VALUE = (1_000_000_000L << SHIFT_YEAR) + (12 << SHIFT_MONTH) + 31; - /** - * Cached local time zone. - */ - private static volatile TimeZone timeZone; + private static final int[] NORMAL_DAYS_PER_MONTH = { 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; /** - * Raw offset doesn't change during DST transitions, but changes during - * other transitions that some time zones have. H2 1.4.193 and later - * versions use zone offset that is valid for startup time for performance - * reasons. This code is now used only by old PageStore engine and its - * datetime storage code has issues with all time zone transitions, so this - * buggy logic is preserved as is too. + * Multipliers for {@link #convertScale(long, int, long)} and + * {@link #appendNanos(StringBuilder, int)}. */ - private static int zoneOffsetMillis = createGregorianCalendar().get(Calendar.ZONE_OFFSET); + private static final int[] FRACTIONAL_SECONDS_TABLE = { 1_000_000_000, 100_000_000, + 10_000_000, 1_000_000, 100_000, 10_000, 1_000, 100, 10, 1 }; + + private static volatile TimeZoneProvider LOCAL; private DateTimeUtils() { // utility class } - /** - * Returns local time zone offset for a specified timestamp. - * - * @param ms milliseconds since Epoch in UTC - * @return local time zone offset - */ - public static int getTimeZoneOffset(long ms) { - TimeZone tz = timeZone; - if (tz == null) { - timeZone = tz = TimeZone.getDefault(); - } - return tz.getOffset(ms); - } - /** * Reset the cached calendar for default timezone, for example after * changing the default timezone. */ public static void resetCalendar() { - CACHED_CALENDAR.remove(); - timeZone = null; - zoneOffsetMillis = createGregorianCalendar().get(Calendar.ZONE_OFFSET); - } - - /** - * Get a calendar for the default timezone. - * - * @return a calendar instance. A cached instance is returned where possible - */ - public static GregorianCalendar getCalendar() { - GregorianCalendar c = CACHED_CALENDAR.get(); - if (c == null) { - c = createGregorianCalendar(); - CACHED_CALENDAR.set(c); - } - c.clear(); - return c; - } - - /** - * Get a calendar for the given timezone. - * - * @param tz timezone for the calendar, is never null - * @return a calendar instance. A cached instance is returned where possible - */ - private static GregorianCalendar getCalendar(TimeZone tz) { - GregorianCalendar c = CACHED_CALENDAR_NON_DEFAULT_TIMEZONE.get(); - if (c == null || !c.getTimeZone().equals(tz)) { - c = createGregorianCalendar(tz); - CACHED_CALENDAR_NON_DEFAULT_TIMEZONE.set(c); - } - c.clear(); - return c; - } - - /** - * Creates a Gregorian calendar for the default timezone using the default - * locale. Dates in H2 are represented in a Gregorian calendar. So this - * method should be used instead of Calendar.getInstance() to ensure that - * the Gregorian calendar is used for all date processing instead of a - * default locale calendar that can be non-Gregorian in some locales. - * - * @return a new calendar instance. - */ - public static GregorianCalendar createGregorianCalendar() { - return new GregorianCalendar(); + LOCAL = null; } /** - * Creates a Gregorian calendar for the given timezone using the default - * locale. Dates in H2 are represented in a Gregorian calendar. So this - * method should be used instead of Calendar.getInstance() to ensure that - * the Gregorian calendar is used for all date processing instead of a - * default locale calendar that can be non-Gregorian in some locales. + * Get the time zone provider for the default time zone. * - * @param tz timezone for the calendar, is never null - * @return a new calendar instance. + * @return the time zone provider for the default time zone */ - public static GregorianCalendar createGregorianCalendar(TimeZone tz) { - return new GregorianCalendar(tz); - } - - /** - * Convert the date to the specified time zone. - * - * @param value the date (might be ValueNull) - * @param calendar the calendar - * @return the date using the correct time zone - */ - public static Date convertDate(Value value, Calendar calendar) { - if (value == ValueNull.INSTANCE) { - return null; + public static TimeZoneProvider getTimeZone() { + TimeZoneProvider local = LOCAL; + if (local == null) { + LOCAL = local = TimeZoneProvider.getDefault(); } - ValueDate d = (ValueDate) value.convertTo(Value.DATE); - Calendar cal = (Calendar) calendar.clone(); - cal.clear(); - cal.setLenient(true); - long dateValue = d.getDateValue(); - long ms = convertToMillis(cal, yearFromDateValue(dateValue), - monthFromDateValue(dateValue), dayFromDateValue(dateValue), 0, - 0, 0, 0); - return new Date(ms); + return local; } /** - * Convert the time to the specified time zone. + * Returns current timestamp. * - * @param value the time (might be ValueNull) - * @param calendar the calendar - * @return the time using the correct time zone + * @param timeZone + * the time zone + * @return current timestamp */ - public static Time convertTime(Value value, Calendar calendar) { - if (value == ValueNull.INSTANCE) { - return null; - } - ValueTime t = (ValueTime) value.convertTo(Value.TIME); - Calendar cal = (Calendar) calendar.clone(); - cal.clear(); - cal.setLenient(true); - long nanos = t.getNanos(); - long millis = nanos / 1_000_000; - nanos -= millis * 1_000_000; - long s = millis / 1_000; - millis -= s * 1_000; - long m = s / 60; - s -= m * 60; - long h = m / 60; - m -= h * 60; - return new Time(convertToMillis(cal, 1970, 1, 1, (int) h, (int) m, (int) s, (int) millis)); + public static ValueTimestampTimeZone currentTimestamp(TimeZoneProvider timeZone) { + return currentTimestamp(timeZone, Instant.now()); } /** - * Convert the timestamp to the specified time zone. + * Returns current timestamp using the specified instant for its value. * - * @param value the timestamp (might be ValueNull) - * @param calendar the calendar - * @return the timestamp using the correct time zone - */ - public static Timestamp convertTimestamp(Value value, Calendar calendar) { - if (value == ValueNull.INSTANCE) { - return null; - } - ValueTimestamp ts = (ValueTimestamp) value.convertTo(Value.TIMESTAMP); - Calendar cal = (Calendar) calendar.clone(); - cal.clear(); - cal.setLenient(true); - long dateValue = ts.getDateValue(); - long nanos = ts.getTimeNanos(); - long millis = nanos / 1_000_000; - nanos -= millis * 1_000_000; - long s = millis / 1_000; - millis -= s * 1_000; - long m = s / 60; - s -= m * 60; - long h = m / 60; - m -= h * 60; - long ms = convertToMillis(cal, yearFromDateValue(dateValue), - monthFromDateValue(dateValue), dayFromDateValue(dateValue), - (int) h, (int) m, (int) s, (int) millis); - Timestamp x = new Timestamp(ms); - x.setNanos((int) (nanos + millis * 1_000_000)); - return x; - } - - /** - * Convert a java.util.Date using the specified calendar. - * - * @param x the date - * @param calendar the calendar - * @return the date - */ - public static ValueDate convertDate(Date x, Calendar calendar) { - Calendar cal = (Calendar) calendar.clone(); - cal.setTimeInMillis(x.getTime()); - long dateValue = dateValueFromCalendar(cal); - return ValueDate.fromDateValue(dateValue); - } - - /** - * Convert the time using the specified calendar. - * - * @param x the time - * @param calendar the calendar - * @return the time - */ - public static ValueTime convertTime(Time x, Calendar calendar) { - Calendar cal = (Calendar) calendar.clone(); - cal.setTimeInMillis(x.getTime()); - long nanos = nanosFromCalendar(cal); - return ValueTime.fromNanos(nanos); - } - - /** - * Convert the timestamp using the specified calendar. - * - * @param x the time - * @param calendar the calendar - * @return the timestamp - */ - public static ValueTimestamp convertTimestamp(Timestamp x, - Calendar calendar) { - Calendar cal = (Calendar) calendar.clone(); - cal.setTimeInMillis(x.getTime()); - long dateValue = dateValueFromCalendar(cal); - long nanos = nanosFromCalendar(cal); - nanos += x.getNanos() % 1_000_000; - return ValueTimestamp.fromDateValueAndNanos(dateValue, nanos); + * @param timeZone + * the time zone + * @param now + * timestamp source, must be greater than or equal to + * 1970-01-01T00:00:00Z + * @return current timestamp + */ + public static ValueTimestampTimeZone currentTimestamp(TimeZoneProvider timeZone, Instant now) { + /* + * This code intentionally does not support properly dates before UNIX + * epoch because such support is not required for current dates. + */ + long second = now.getEpochSecond(); + int offset = timeZone.getTimeZoneOffsetUTC(second); + second += offset; + return ValueTimestampTimeZone.fromDateValueAndNanos(dateValueFromAbsoluteDay(second / SECONDS_PER_DAY), + second % SECONDS_PER_DAY * 1_000_000_000 + now.getNano(), offset); } /** @@ -484,38 +305,27 @@ static int parseNanos(String s, int start, int end) { return nanos; } - /** - * See: - * https://stackoverflow.com/questions/3976616/how-to-find-nth-occurrence-of-character-in-a-string#answer-3976656 - */ - private static int findNthIndexOf(String str, char chr, int n) { - int pos = str.indexOf(chr); - while (--n > 0 && pos != -1) { - pos = str.indexOf(chr, pos + 1); - } - return pos; - } - /** * Parses timestamp value from the specified string. * * @param s * string to parse - * @param mode - * database mode, or {@code null} + * @param provider + * the cast information provider, may be {@code null} for + * Standard-compliant literals * @param withTimeZone * if {@code true} return {@link ValueTimestampTimeZone} instead of * {@link ValueTimestamp} * @return parsed timestamp */ - public static Value parseTimestamp(String s, Mode mode, boolean withTimeZone) { + public static Value parseTimestamp(String s, CastDataProvider provider, boolean withTimeZone) { int dateEnd = s.indexOf(' '); if (dateEnd < 0) { // ISO 8601 compatibility dateEnd = s.indexOf('T'); - if (dateEnd < 0 && mode != null && mode.allowDB2TimestampFormat) { + if (dateEnd < 0 && provider != null && provider.getMode().allowDB2TimestampFormat) { // DB2 also allows dash between date and time - dateEnd = findNthIndexOf(s, '-', 3); + dateEnd = s.indexOf('-', s.indexOf('-', s.indexOf('-') + 1) + 1); } } int timeStart; @@ -527,19 +337,19 @@ public static Value parseTimestamp(String s, Mode mode, boolean withTimeZone) { } long dateValue = parseDateValue(s, 0, dateEnd); long nanos; - short tzMinutes = 0; + TimeZoneProvider tz = null; if (timeStart < 0) { nanos = 0; } else { - int timeEnd = s.length(); - TimeZone tz = null; + dateEnd++; + int timeEnd; if (s.endsWith("Z")) { - tz = UTC; - timeEnd--; + tz = TimeZoneProvider.UTC; + timeEnd = s.length() - 1; } else { - int timeZoneStart = s.indexOf('+', dateEnd + 1); + int timeZoneStart = s.indexOf('+', dateEnd); if (timeZoneStart < 0) { - timeZoneStart = s.indexOf('-', dateEnd + 1); + timeZoneStart = s.indexOf('-', dateEnd); } if (timeZoneStart >= 0) { // Allow [timeZoneName] part after time zone offset @@ -547,155 +357,99 @@ public static Value parseTimestamp(String s, Mode mode, boolean withTimeZone) { if (offsetEnd < 0) { offsetEnd = s.length(); } - String tzName = "GMT" + s.substring(timeZoneStart, offsetEnd); - tz = TimeZone.getTimeZone(tzName); - if (!tz.getID().startsWith(tzName)) { - throw new IllegalArgumentException( - tzName + " (" + tz.getID() + "?)"); - } + tz = TimeZoneProvider.ofId(s.substring(timeZoneStart, offsetEnd)); if (s.charAt(timeZoneStart - 1) == ' ') { timeZoneStart--; } timeEnd = timeZoneStart; } else { - timeZoneStart = s.indexOf(' ', dateEnd + 1); + timeZoneStart = s.indexOf(' ', dateEnd); if (timeZoneStart > 0) { - String tzName = s.substring(timeZoneStart + 1); - tz = TimeZone.getTimeZone(tzName); - if (!tz.getID().startsWith(tzName)) { - throw new IllegalArgumentException(tzName); - } + tz = TimeZoneProvider.ofId(s.substring(timeZoneStart + 1)); timeEnd = timeZoneStart; + } else { + timeEnd = s.length(); } } } - nanos = parseTimeNanos(s, dateEnd + 1, timeEnd); - if (tz != null) { - if (withTimeZone) { - if (tz != UTC) { - long millis = convertDateTimeValueToMillis(tz, dateValue, nanos / 1_000_000); - tzMinutes = (short) (tz.getOffset(millis) / 60_000); - } - } else { - long millis = convertDateTimeValueToMillis(tz, dateValue, nanos / 1_000_000); - millis += getTimeZoneOffset(millis); - dateValue = dateValueFromLocalMillis(millis); - nanos = nanos % 1_000_000 + nanosFromLocalMillis(millis); - } - } + nanos = parseTimeNanos(s, dateEnd, timeEnd); } if (withTimeZone) { - return ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, nanos, tzMinutes); + int tzSeconds; + if (tz == null) { + tz = provider != null ? provider.currentTimeZone() : DateTimeUtils.getTimeZone(); + } + if (tz != TimeZoneProvider.UTC) { + tzSeconds = tz.getTimeZoneOffsetUTC(tz.getEpochSecondsFromLocal(dateValue, nanos)); + } else { + tzSeconds = 0; + } + return ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, nanos, tzSeconds); + } else if (tz != null) { + long seconds = tz.getEpochSecondsFromLocal(dateValue, nanos); + seconds += (provider != null ? provider.currentTimeZone() : DateTimeUtils.getTimeZone()) + .getTimeZoneOffsetUTC(seconds); + dateValue = dateValueFromLocalSeconds(seconds); + nanos = nanos % 1_000_000_000 + nanosFromLocalSeconds(seconds); } return ValueTimestamp.fromDateValueAndNanos(dateValue, nanos); } /** - * Calculates the time zone offset in minutes for the specified time zone, date - * value, and nanoseconds since midnight. + * Parses TIME WITH TIME ZONE value from the specified string. * - * @param tz - * time zone, or {@code null} for default - * @param dateValue - * date value - * @param timeNanos - * nanoseconds since midnight - * @return time zone offset in milliseconds - */ - public static int getTimeZoneOffsetMillis(TimeZone tz, long dateValue, long timeNanos) { - long msec = timeNanos / 1_000_000; - long utc = convertDateTimeValueToMillis(tz, dateValue, msec); - long local = absoluteDayFromDateValue(dateValue) * MILLIS_PER_DAY + msec; - return (int) (local - utc); + * @param s + * string to parse + * @param provider + * the cast information provider, or {@code null} + * @return parsed time with time zone + */ + public static ValueTimeTimeZone parseTimeWithTimeZone(String s, CastDataProvider provider) { + int timeEnd; + TimeZoneProvider tz; + if (s.endsWith("Z")) { + tz = TimeZoneProvider.UTC; + timeEnd = s.length() - 1; + } else { + int timeZoneStart = s.indexOf('+', 1); + if (timeZoneStart < 0) { + timeZoneStart = s.indexOf('-', 1); + } + if (timeZoneStart >= 0) { + tz = TimeZoneProvider.ofId(s.substring(timeZoneStart)); + if (s.charAt(timeZoneStart - 1) == ' ') { + timeZoneStart--; + } + timeEnd = timeZoneStart; + } else { + timeZoneStart = s.indexOf(' ', 1); + if (timeZoneStart > 0) { + tz = TimeZoneProvider.ofId(s.substring(timeZoneStart + 1)); + timeEnd = timeZoneStart; + } else { + throw DbException.get(ErrorCode.INVALID_DATETIME_CONSTANT_2, "TIME WITH TIME ZONE", s); + } + } + if (!tz.hasFixedOffset()) { + throw DbException.get(ErrorCode.INVALID_DATETIME_CONSTANT_2, "TIME WITH TIME ZONE", s); + } + } + return ValueTimeTimeZone.fromNanos(parseTimeNanos(s, 0, timeEnd), tz.getTimeZoneOffsetUTC(0L)); } /** - * Calculates the milliseconds since epoch for the specified date value, + * Calculates the seconds since epoch for the specified date value, * nanoseconds since midnight, and time zone offset. * @param dateValue * date value * @param timeNanos * nanoseconds since midnight - * @param offsetMins - * time zone offset in minutes - * @return milliseconds since epoch in UTC + * @param offsetSeconds + * time zone offset in seconds + * @return seconds since epoch in UTC */ - public static long getMillis(long dateValue, long timeNanos, short offsetMins) { - return absoluteDayFromDateValue(dateValue) * MILLIS_PER_DAY - + timeNanos / 1_000_000 - offsetMins * 60_000; - } - - /** - * Calculate the milliseconds since 1970-01-01 (UTC) for the given date and - * time (in the specified timezone). - * - * @param tz the timezone of the parameters, or null for the default - * timezone - * @param year the absolute year (positive or negative) - * @param month the month (1-12) - * @param day the day (1-31) - * @param hour the hour (0-23) - * @param minute the minutes (0-59) - * @param second the number of seconds (0-59) - * @param millis the number of milliseconds - * @return the number of milliseconds (UTC) - */ - public static long getMillis(TimeZone tz, int year, int month, int day, - int hour, int minute, int second, int millis) { - GregorianCalendar c; - if (tz == null) { - c = getCalendar(); - } else { - c = getCalendar(tz); - } - c.setLenient(false); - try { - return convertToMillis(c, year, month, day, hour, minute, second, millis); - } catch (IllegalArgumentException e) { - // special case: if the time simply doesn't exist because of - // daylight saving time changes, use the lenient version - String message = e.toString(); - if (message.indexOf("HOUR_OF_DAY") > 0) { - if (hour < 0 || hour > 23) { - throw e; - } - } else if (message.indexOf("DAY_OF_MONTH") > 0) { - int maxDay; - if (month == 2) { - maxDay = c.isLeapYear(year) ? 29 : 28; - } else { - maxDay = NORMAL_DAYS_PER_MONTH[month]; - } - if (day < 1 || day > maxDay) { - throw e; - } - // DAY_OF_MONTH is thrown for years > 2037 - // using the timezone Brasilia and others, - // for example for 2042-10-12 00:00:00. - hour += 6; - } - c.setLenient(true); - return convertToMillis(c, year, month, day, hour, minute, second, millis); - } - } - - private static long convertToMillis(Calendar cal, int year, int month, int day, - int hour, int minute, int second, int millis) { - if (year <= 0) { - cal.set(Calendar.ERA, GregorianCalendar.BC); - cal.set(Calendar.YEAR, 1 - year); - } else { - cal.set(Calendar.ERA, GregorianCalendar.AD); - cal.set(Calendar.YEAR, year); - } - // january is 0 - cal.set(Calendar.MONTH, month - 1); - cal.set(Calendar.DAY_OF_MONTH, day); - cal.set(Calendar.HOUR_OF_DAY, hour); - cal.set(Calendar.MINUTE, minute); - cal.set(Calendar.SECOND, second); - cal.set(Calendar.MILLISECOND, millis); - return cal.getTimeInMillis(); + public static long getEpochSeconds(long dateValue, long timeNanos, int offsetSeconds) { + return absoluteDayFromDateValue(dateValue) * SECONDS_PER_DAY + timeNanos / NANOS_PER_SECOND - offsetSeconds; } /** @@ -703,9 +457,11 @@ private static long convertToMillis(Calendar cal, int year, int month, int day, * * @param value * value to extract fields from + * @param provider + * the cast information provider * @return array with date value and nanos of day */ - public static long[] dateAndTimeFromValue(Value value) { + public static long[] dateAndTimeFromValue(Value value, CastDataProvider provider) { long dateValue = EPOCH_DATE_VALUE; long timeNanos = 0; if (value instanceof ValueTimestamp) { @@ -720,8 +476,10 @@ public static long[] dateAndTimeFromValue(Value value) { ValueTimestampTimeZone v = (ValueTimestampTimeZone) value; dateValue = v.getDateValue(); timeNanos = v.getTimeNanos(); + } else if (value instanceof ValueTimeTimeZone) { + timeNanos = ((ValueTimeTimeZone) value).getNanos(); } else { - ValueTimestamp v = (ValueTimestamp) value.convertTo(Value.TIMESTAMP); + ValueTimestamp v = (ValueTimestamp) value.convertTo(TypeInfo.TYPE_TIMESTAMP, provider); dateValue = v.getDateValue(); timeNanos = v.getTimeNanos(); } @@ -730,8 +488,8 @@ public static long[] dateAndTimeFromValue(Value value) { /** * Creates a new date-time value with the same type as original value. If - * original value is a ValueTimestampTimeZone, returned value will have the same - * time zone offset as original value. + * original value is a ValueTimestampTimeZone or ValueTimeTimeZone, returned + * value will have the same time zone offset as original value. * * @param original * original value @@ -739,49 +497,23 @@ public static long[] dateAndTimeFromValue(Value value) { * date value for the returned value * @param timeNanos * nanos of day for the returned value - * @param forceTimestamp - * if {@code true} return ValueTimestamp if original argument is - * ValueDate or ValueTime * @return new value with specified date value and nanos of day */ - public static Value dateTimeToValue(Value original, long dateValue, long timeNanos, boolean forceTimestamp) { - if (!(original instanceof ValueTimestamp)) { - if (!forceTimestamp) { - if (original instanceof ValueDate) { - return ValueDate.fromDateValue(dateValue); - } - if (original instanceof ValueTime) { - return ValueTime.fromNanos(timeNanos); - } - } - if (original instanceof ValueTimestampTimeZone) { - return ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, timeNanos, - ((ValueTimestampTimeZone) original).getTimeZoneOffsetMins()); - } + public static Value dateTimeToValue(Value original, long dateValue, long timeNanos) { + switch (original.getValueType()) { + case Value.DATE: + return ValueDate.fromDateValue(dateValue); + case Value.TIME: + return ValueTime.fromNanos(timeNanos); + case Value.TIME_TZ: + return ValueTimeTimeZone.fromNanos(timeNanos, ((ValueTimeTimeZone) original).getTimeZoneOffsetSeconds()); + case Value.TIMESTAMP: + default: + return ValueTimestamp.fromDateValueAndNanos(dateValue, timeNanos); + case Value.TIMESTAMP_TZ: + return ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, timeNanos, + ((ValueTimestampTimeZone) original).getTimeZoneOffsetSeconds()); } - return ValueTimestamp.fromDateValueAndNanos(dateValue, timeNanos); - } - - /** - * Get the number of milliseconds since 1970-01-01 in the local timezone, - * but without daylight saving time into account. - * - * @param d the date - * @return the milliseconds - */ - public static long getTimeLocalWithoutDst(java.util.Date d) { - return d.getTime() + zoneOffsetMillis; - } - - /** - * Convert the number of milliseconds since 1970-01-01 in the local timezone - * to UTC, but without daylight saving time into account. - * - * @param millis the number of milliseconds in the local timezone - * @return the number of milliseconds in UTC - */ - public static long getTimeUTCWithoutDst(long millis) { - return millis - zoneOffsetMillis; } /** @@ -818,7 +550,16 @@ public static int getDayOfWeekFromAbsolute(long absoluteValue, int firstDayOfWee * @return number of day in year */ public static int getDayOfYear(long dateValue) { - return (int) (absoluteDayFromDateValue(dateValue) - absoluteDayFromYear(yearFromDateValue(dateValue))) + 1; + int m = monthFromDateValue(dateValue); + int a = (367 * m - 362) / 12 + dayFromDateValue(dateValue); + if (m > 2) { + a--; + long y = yearFromDateValue(dateValue); + if ((y & 3) != 0 || (y % 100 == 0 && y % 400 != 0)) { + a--; + } + } + return a; } /** @@ -886,19 +627,30 @@ public static int getSundayDayOfWeek(long dateValue) { public static int getWeekOfYear(long dateValue, int firstDayOfWeek, int minimalDaysInFirstWeek) { long abs = absoluteDayFromDateValue(dateValue); int year = yearFromDateValue(dateValue); - long base = getWeekOfYearBase(year, firstDayOfWeek, minimalDaysInFirstWeek); + long base = getWeekYearAbsoluteStart(year, firstDayOfWeek, minimalDaysInFirstWeek); if (abs - base < 0) { - base = getWeekOfYearBase(year - 1, firstDayOfWeek, minimalDaysInFirstWeek); + base = getWeekYearAbsoluteStart(year - 1, firstDayOfWeek, minimalDaysInFirstWeek); } else if (monthFromDateValue(dateValue) == 12 && 24 + minimalDaysInFirstWeek < dayFromDateValue(dateValue)) { - if (abs >= getWeekOfYearBase(year + 1, firstDayOfWeek, minimalDaysInFirstWeek)) { + if (abs >= getWeekYearAbsoluteStart(year + 1, firstDayOfWeek, minimalDaysInFirstWeek)) { return 1; } } return (int) ((abs - base) / 7) + 1; } - private static long getWeekOfYearBase(int year, int firstDayOfWeek, int minimalDaysInFirstWeek) { - long first = absoluteDayFromYear(year); + /** + * Get absolute day of the first day in the week year. + * + * @param weekYear + * the week year + * @param firstDayOfWeek + * first day of week, Monday as 1, Sunday as 7 or 0 + * @param minimalDaysInFirstWeek + * minimal days in first week of year + * @return absolute day of the first day in the week year + */ + public static long getWeekYearAbsoluteStart(int weekYear, int firstDayOfWeek, int minimalDaysInFirstWeek) { + long first = absoluteDayFromYear(weekYear); int daysInFirstWeek = 8 - getDayOfWeekFromAbsolute(first, firstDayOfWeek); long base = first + daysInFirstWeek; if (daysInFirstWeek >= minimalDaysInFirstWeek) { @@ -922,11 +674,11 @@ private static long getWeekOfYearBase(int year, int firstDayOfWeek, int minimalD public static int getWeekYear(long dateValue, int firstDayOfWeek, int minimalDaysInFirstWeek) { long abs = absoluteDayFromDateValue(dateValue); int year = yearFromDateValue(dateValue); - long base = getWeekOfYearBase(year, firstDayOfWeek, minimalDaysInFirstWeek); - if (abs - base < 0) { + long base = getWeekYearAbsoluteStart(year, firstDayOfWeek, minimalDaysInFirstWeek); + if (abs < base) { return year - 1; } else if (monthFromDateValue(dateValue) == 12 && 24 + minimalDaysInFirstWeek < dayFromDateValue(dateValue)) { - if (abs >= getWeekOfYearBase(year + 1, firstDayOfWeek, minimalDaysInFirstWeek)) { + if (abs >= getWeekYearAbsoluteStart(year + 1, firstDayOfWeek, minimalDaysInFirstWeek)) { return year + 1; } } @@ -944,13 +696,7 @@ public static int getDaysInMonth(int year, int month) { if (month != 2) { return NORMAL_DAYS_PER_MONTH[month]; } - // All leap years divisible by 4 - return (year & 3) == 0 - // All such years before 1582 are Julian and leap - && (year < 1582 - // Otherwise check Gregorian conditions - || year % 100 != 0 || year % 400 == 0) - ? 29 : 28; + return (year & 3) == 0 && (year % 100 != 0 || year % 400 == 0) ? 29 : 28; } /** @@ -962,97 +708,7 @@ public static int getDaysInMonth(int year, int month) { * @return true if it is valid */ public static boolean isValidDate(int year, int month, int day) { - if (month < 1 || month > 12 || day < 1) { - return false; - } - if (year == 1582 && month == 10) { - // special case: days 1582-10-05 .. 1582-10-14 don't exist - return day < 5 || (day > 14 && day <= 31); - } - return day <= getDaysInMonth(year, month); - } - - /** - * Convert an encoded date value to a java.util.Date, using the default - * timezone. - * - * @param dateValue the date value - * @return the date - */ - public static Date convertDateValueToDate(long dateValue) { - long millis = getMillis(null, yearFromDateValue(dateValue), - monthFromDateValue(dateValue), dayFromDateValue(dateValue), 0, - 0, 0, 0); - return new Date(millis); - } - - /** - * Convert an encoded date-time value to millis, using the supplied timezone. - * - * @param tz the timezone - * @param dateValue the date value - * @param ms milliseconds of day - * @return the date - */ - public static long convertDateTimeValueToMillis(TimeZone tz, long dateValue, long ms) { - long second = ms / 1000; - ms -= second * 1000; - int minute = (int) (second / 60); - second -= minute * 60; - int hour = minute / 60; - minute -= hour * 60; - return getMillis(tz, yearFromDateValue(dateValue), monthFromDateValue(dateValue), dayFromDateValue(dateValue), - hour, minute, (int) second, (int) ms); - } - - /** - * Convert an encoded date value / time value to a timestamp, using the - * default timezone. - * - * @param dateValue the date value - * @param timeNanos the nanoseconds since midnight - * @return the timestamp - */ - public static Timestamp convertDateValueToTimestamp(long dateValue, - long timeNanos) { - Timestamp ts = new Timestamp(convertDateTimeValueToMillis(null, dateValue, timeNanos / 1_000_000)); - // This method expects the complete nanoseconds value including milliseconds - ts.setNanos((int) (timeNanos % NANOS_PER_SECOND)); - return ts; - } - - /** - * Convert an encoded date value / time value to a timestamp using the specified - * time zone offset. - * - * @param dateValue the date value - * @param timeNanos the nanoseconds since midnight - * @param offsetMins time zone offset in minutes - * @return the timestamp - */ - public static Timestamp convertTimestampTimeZoneToTimestamp(long dateValue, long timeNanos, short offsetMins) { - Timestamp ts = new Timestamp(getMillis(dateValue, timeNanos, offsetMins)); - ts.setNanos((int) (timeNanos % NANOS_PER_SECOND)); - return ts; - } - - /** - * Convert a time value to a time, using the default timezone. - * - * @param nanosSinceMidnight the nanoseconds since midnight - * @return the time - */ - public static Time convertNanoToTime(long nanosSinceMidnight) { - long millis = nanosSinceMidnight / 1_000_000; - long s = millis / 1_000; - millis -= s * 1_000; - long m = s / 60; - s -= m * 60; - long h = m / 60; - m -= h * 60; - long ms = getMillis(null, 1970, 1, 1, (int) (h % 24), (int) m, (int) s, - (int) millis); - return new Time(ms); + return month >= 1 && month <= 12 && day >= 1 && day <= getDaysInMonth(year, month); } /** @@ -1130,132 +786,46 @@ public static long dateValueFromDenormalizedDate(long year, long month, int day) } /** - * Convert a local datetime in millis to an encoded date. + * Convert a local seconds to an encoded date. * - * @param ms the milliseconds + * @param localSeconds the seconds since 1970-01-01 * @return the date value */ - public static long dateValueFromLocalMillis(long ms) { - long absoluteDay = ms / MILLIS_PER_DAY; + public static long dateValueFromLocalSeconds(long localSeconds) { + long absoluteDay = localSeconds / SECONDS_PER_DAY; // Round toward negative infinity - if (ms < 0 && (absoluteDay * MILLIS_PER_DAY != ms)) { + if (localSeconds < 0 && (absoluteDay * SECONDS_PER_DAY != localSeconds)) { absoluteDay--; } return dateValueFromAbsoluteDay(absoluteDay); } /** - * Calculate the encoded date value from a given calendar. - * - * @param cal the calendar - * @return the date value - */ - private static long dateValueFromCalendar(Calendar cal) { - int year = cal.get(Calendar.YEAR); - if (cal.get(Calendar.ERA) == GregorianCalendar.BC) { - year = 1 - year; - } - int month = cal.get(Calendar.MONTH) + 1; - int day = cal.get(Calendar.DAY_OF_MONTH); - return ((long) year << SHIFT_YEAR) | (month << SHIFT_MONTH) | day; - } - - /** - * Convert a time in milliseconds in local time to the nanoseconds since midnight. + * Convert a time in seconds in local time to the nanoseconds since midnight. * - * @param ms the milliseconds + * @param localSeconds the seconds since 1970-01-01 * @return the nanoseconds */ - public static long nanosFromLocalMillis(long ms) { - long absoluteDay = ms / MILLIS_PER_DAY; - // Round toward negative infinity - if (ms < 0 && (absoluteDay * MILLIS_PER_DAY != ms)) { - absoluteDay--; + public static long nanosFromLocalSeconds(long localSeconds) { + localSeconds %= SECONDS_PER_DAY; + if (localSeconds < 0) { + localSeconds += SECONDS_PER_DAY; } - return (ms - absoluteDay * MILLIS_PER_DAY) * 1_000_000; - } - - /** - * Convert a java.util.Calendar to nanoseconds since midnight. - * - * @param cal the calendar - * @return the nanoseconds - */ - private static long nanosFromCalendar(Calendar cal) { - int h = cal.get(Calendar.HOUR_OF_DAY); - int m = cal.get(Calendar.MINUTE); - int s = cal.get(Calendar.SECOND); - int millis = cal.get(Calendar.MILLISECOND); - return ((((((h * 60L) + m) * 60) + s) * 1000) + millis) * 1000000; + return localSeconds * NANOS_PER_SECOND; } /** - * Calculate the normalized timestamp. + * Calculate the normalized nanos of day. * - * @param absoluteDay the absolute day * @param nanos the nanoseconds (may be negative or larger than one day) - * @return the timestamp - */ - public static ValueTimestamp normalizeTimestamp(long absoluteDay, - long nanos) { - if (nanos > NANOS_PER_DAY || nanos < 0) { - long d; - if (nanos > NANOS_PER_DAY) { - d = nanos / NANOS_PER_DAY; - } else { - d = (nanos - NANOS_PER_DAY + 1) / NANOS_PER_DAY; - } - nanos -= d * NANOS_PER_DAY; - absoluteDay += d; - } - return ValueTimestamp.fromDateValueAndNanos( - dateValueFromAbsoluteDay(absoluteDay), nanos); - } - - /** - * Converts local date value and nanoseconds to timestamp with time zone. - * - * @param dateValue - * date value - * @param timeNanos - * nanoseconds since midnight - * @return timestamp with time zone + * @return the nanos of day within a day */ - public static ValueTimestampTimeZone timestampTimeZoneFromLocalDateValueAndNanos(long dateValue, long timeNanos) { - int timeZoneOffset = getTimeZoneOffsetMillis(null, dateValue, timeNanos); - int offsetMins = timeZoneOffset / 60_000; - int correction = timeZoneOffset % 60_000; - if (correction != 0) { - timeNanos -= correction; - if (timeNanos < 0) { - timeNanos += NANOS_PER_DAY; - dateValue = decrementDateValue(dateValue); - } else if (timeNanos >= NANOS_PER_DAY) { - timeNanos -= NANOS_PER_DAY; - dateValue = incrementDateValue(dateValue); - } - } - return ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, timeNanos, (short) offsetMins); - } - - /** - * Creates the instance of the {@link ValueTimestampTimeZone} from milliseconds. - * - * @param ms milliseconds since 1970-01-01 (UTC) - * @return timestamp with time zone with specified value and current time zone - */ - public static ValueTimestampTimeZone timestampTimeZoneFromMillis(long ms) { - int offset = getTimeZoneOffset(ms); - ms += offset; - long absoluteDay = ms / MILLIS_PER_DAY; - // Round toward negative infinity - if (ms < 0 && (absoluteDay * MILLIS_PER_DAY != ms)) { - absoluteDay--; + public static long normalizeNanosOfDay(long nanos) { + nanos %= NANOS_PER_DAY; + if (nanos < 0) { + nanos += NANOS_PER_DAY; } - return ValueTimestampTimeZone.fromDateValueAndNanos( - dateValueFromAbsoluteDay(absoluteDay), - (ms - absoluteDay * MILLIS_PER_DAY) * 1_000_000, - (short) (offset / 60_000)); + return nanos; } /** @@ -1266,14 +836,11 @@ public static ValueTimestampTimeZone timestampTimeZoneFromMillis(long ms) { * @return the absolute day */ public static long absoluteDayFromYear(long year) { - year--; - long a = ((year * 1461L) >> 2) - 719_177; - if (year < 1582) { - // Julian calendar - a += 13; - } else if (year < 1900 || year > 2099) { - // Gregorian calendar (slow mode) - a += (year / 400) - (year / 100) + 15; + long a = 365 * year - 719_528; + if (year >= 0) { + a += (year + 3) / 4 - (year + 99) / 100 + (year + 399) / 400; + } else { + a -= year / -4 - year / -100 + year / -400; } return a; } @@ -1285,43 +852,24 @@ public static long absoluteDayFromYear(long year) { * @return the absolute day */ public static long absoluteDayFromDateValue(long dateValue) { - long y = yearFromDateValue(dateValue); - int m = monthFromDateValue(dateValue); - int d = dayFromDateValue(dateValue); - if (m <= 2) { - y--; - m += 12; - } - long a = ((y * 1461L) >> 2) + DAYS_OFFSET[m - 3] + d - 719_484; - if (y <= 1582 && ((y < 1582) || (m * 100 + d < 10_15))) { - // Julian calendar (cutover at 1582-10-04 / 1582-10-15) - a += 13; - } else if (y < 1900 || y > 2099) { - // Gregorian calendar (slow mode) - a += (y / 400) - (y / 100) + 15; - } - return a; + return absoluteDay(yearFromDateValue(dateValue), monthFromDateValue(dateValue), dayFromDateValue(dateValue)); } /** - * Calculate the absolute day from an encoded date value in proleptic Gregorian - * calendar. + * Calculate the absolute day. * - * @param dateValue the date value - * @return the absolute day in proleptic Gregorian calendar + * @param y year + * @param m month + * @param d day + * @return the absolute day */ - public static long prolepticGregorianAbsoluteDayFromDateValue(long dateValue) { - long y = yearFromDateValue(dateValue); - int m = monthFromDateValue(dateValue); - int d = dayFromDateValue(dateValue); - if (m <= 2) { - y--; - m += 12; - } - long a = ((y * 1461L) >> 2) + DAYS_OFFSET[m - 3] + d - 719_484; - if (y < 1900 || y > 2099) { - // Slow mode - a += (y / 400) - (y / 100) + 15; + static long absoluteDay(long y, int m, int d) { + long a = absoluteDayFromYear(y) + (367 * m - 362) / 12 + d - 1; + if (m > 2) { + a--; + if ((y & 3) != 0 || (y % 100 == 0 && y % 400 != 0)) { + a--; + } } return a; } @@ -1334,37 +882,26 @@ public static long prolepticGregorianAbsoluteDayFromDateValue(long dateValue) { */ public static long dateValueFromAbsoluteDay(long absoluteDay) { long d = absoluteDay + 719_468; - long y100, offset; - if (d > 578_040) { - // Gregorian calendar - long y400 = d / 146_097; - d -= y400 * 146_097; - y100 = d / 36_524; - d -= y100 * 36_524; - offset = y400 * 400 + y100 * 100; - } else { - // Julian calendar - y100 = 0; - d += 292_200_000_002L; - offset = -800_000_000; + long a = 0; + if (d < 0) { + a = (d + 1) / 146_097 - 1; + d -= a * 146_097; + a *= 400; } - long y4 = d / 1461; - d -= y4 * 1461; - long y = d / 365; - d -= y * 365; - if (d == 0 && (y == 4 || y100 == 4)) { + long y = (400 * d + 591) / 146_097; + int day = (int) (d - (365 * y + y / 4 - y / 100 + y / 400)); + if (day < 0) { y--; - d += 365; + day = (int) (d - (365 * y + y / 4 - y / 100 + y / 400)); } - y += offset + y4 * 4; - // month of a day - int m = ((int) d * 2 + 1) * 5 / 306; - d -= DAYS_OFFSET[m] - 1; + y += a; + int m = (day * 5 + 2) / 153; + day -= (m * 306 + 5) / 10 - 1; if (m >= 10) { y++; m -= 12; } - return dateValue(y, m + 3, (int) d); + return dateValue(y, m + 3, day); } /** @@ -1375,15 +912,11 @@ public static long dateValueFromAbsoluteDay(long absoluteDay) { * @return the next date value */ public static long incrementDateValue(long dateValue) { - int year = yearFromDateValue(dateValue); - if (year == 1582) { - // Use slow way instead of rarely needed large custom code. - return dateValueFromAbsoluteDay(absoluteDayFromDateValue(dateValue) + 1); - } int day = dayFromDateValue(dateValue); if (day < 28) { return dateValue + 1; } + int year = yearFromDateValue(dateValue); int month = monthFromDateValue(dateValue); if (day < getDaysInMonth(year, month)) { return dateValue + 1; @@ -1405,14 +938,10 @@ public static long incrementDateValue(long dateValue) { * @return the previous date value */ public static long decrementDateValue(long dateValue) { - int year = yearFromDateValue(dateValue); - if (year == 1582) { - // Use slow way instead of rarely needed large custom code. - return dateValueFromAbsoluteDay(absoluteDayFromDateValue(dateValue) - 1); - } if (dayFromDateValue(dateValue) > 1) { return dateValue - 1; } + int year = yearFromDateValue(dateValue); int month = monthFromDateValue(dateValue); if (month > 1) { month--; @@ -1426,160 +955,197 @@ public static long decrementDateValue(long dateValue) { /** * Append a date to the string builder. * - * @param buff the target string builder + * @param builder the target string builder * @param dateValue the date value + * @return the specified string builder */ - public static void appendDate(StringBuilder buff, long dateValue) { + public static StringBuilder appendDate(StringBuilder builder, long dateValue) { int y = yearFromDateValue(dateValue); - int m = monthFromDateValue(dateValue); - int d = dayFromDateValue(dateValue); - if (y > 0 && y < 10_000) { - StringUtils.appendZeroPadded(buff, 4, y); + if (y < 1_000 && y > -1_000) { + if (y < 0) { + builder.append('-'); + y = -y; + } + StringUtils.appendZeroPadded(builder, 4, y); } else { - buff.append(y); + builder.append(y); } - buff.append('-'); - StringUtils.appendZeroPadded(buff, 2, m); - buff.append('-'); - StringUtils.appendZeroPadded(buff, 2, d); + StringUtils.appendTwoDigits(builder.append('-'), monthFromDateValue(dateValue)).append('-'); + return StringUtils.appendTwoDigits(builder, dayFromDateValue(dateValue)); } /** * Append a time to the string builder. * - * @param buff the target string builder + * @param builder the target string builder * @param nanos the time in nanoseconds + * @return the specified string builder */ - public static void appendTime(StringBuilder buff, long nanos) { + public static StringBuilder appendTime(StringBuilder builder, long nanos) { if (nanos < 0) { - buff.append('-'); + builder.append('-'); nanos = -nanos; } /* * nanos now either in range from 0 to Long.MAX_VALUE or equals to - * Long.MIN_VALUE. We need to divide nanos by 1000000 with unsigned division to - * get correct result. The simplest way to do this with such constraints is to - * divide -nanos by -1000000. + * Long.MIN_VALUE. We need to divide nanos by 1,000,000,000 with + * unsigned division to get correct result. The simplest way to do this + * with such constraints is to divide -nanos by -1,000,000,000. */ - long ms = -nanos / -1_000_000; - nanos -= ms * 1_000_000; - long s = ms / 1_000; - ms -= s * 1_000; - long m = s / 60; + long s = -nanos / -1_000_000_000; + nanos -= s * 1_000_000_000; + int m = (int) (s / 60); s -= m * 60; - long h = m / 60; + int h = m / 60; m -= h * 60; - StringUtils.appendZeroPadded(buff, 2, h); - buff.append(':'); - StringUtils.appendZeroPadded(buff, 2, m); - buff.append(':'); - StringUtils.appendZeroPadded(buff, 2, s); - if (ms > 0 || nanos > 0) { - buff.append('.'); - StringUtils.appendZeroPadded(buff, 3, ms); - if (nanos > 0) { - StringUtils.appendZeroPadded(buff, 6, nanos); - } - stripTrailingZeroes(buff); - } + StringUtils.appendTwoDigits(builder, h).append(':'); + StringUtils.appendTwoDigits(builder, m).append(':'); + StringUtils.appendTwoDigits(builder, (int) s); + return appendNanos(builder, (int) nanos); } /** - * Skip trailing zeroes. + * Append nanoseconds of time, if any. * - * @param buff String buffer. - */ - static void stripTrailingZeroes(StringBuilder buff) { - int i = buff.length() - 1; - if (buff.charAt(i) == '0') { - while (buff.charAt(--i) == '0') { - // do nothing + * @param builder string builder to append to + * @param nanos nanoseconds of second + * @return the specified string builder + */ + static StringBuilder appendNanos(StringBuilder builder, int nanos) { + if (nanos > 0) { + builder.append('.'); + for (int i = 1; nanos < FRACTIONAL_SECONDS_TABLE[i]; i++) { + builder.append('0'); + } + if (nanos % 1_000 == 0) { + nanos /= 1_000; + if (nanos % 1_000 == 0) { + nanos /= 1_000; + } + } + if (nanos % 10 == 0) { + nanos /= 10; + if (nanos % 10 == 0) { + nanos /= 10; + } } - buff.setLength(i + 1); + builder.append(nanos); } + return builder; } /** * Append a time zone to the string builder. * - * @param buff the target string builder - * @param tz the time zone in minutes + * @param builder the target string builder + * @param tz the time zone offset in seconds + * @return the specified string builder */ - public static void appendTimeZone(StringBuilder buff, short tz) { + public static StringBuilder appendTimeZone(StringBuilder builder, int tz) { if (tz < 0) { - buff.append('-'); - tz = (short) -tz; + builder.append('-'); + tz = -tz; } else { - buff.append('+'); + builder.append('+'); } - int hours = tz / 60; - tz -= hours * 60; - int mins = tz; - StringUtils.appendZeroPadded(buff, 2, hours); - if (mins != 0) { - buff.append(':'); - StringUtils.appendZeroPadded(buff, 2, mins); + int rem = tz / 3_600; + StringUtils.appendTwoDigits(builder, rem); + tz -= rem * 3_600; + if (tz != 0) { + rem = tz / 60; + StringUtils.appendTwoDigits(builder.append(':'), rem); + tz -= rem * 60; + if (tz != 0) { + StringUtils.appendTwoDigits(builder.append(':'), tz); + } } + return builder; } /** - * Formats timestamp with time zone as string. - * - * @param buff the target string builder - * @param dateValue the year-month-day bit field - * @param timeNanos nanoseconds since midnight - * @param timeZoneOffsetMins the time zone offset in minutes - */ - public static void appendTimestampTimeZone(StringBuilder buff, long dateValue, long timeNanos, - short timeZoneOffsetMins) { - appendDate(buff, dateValue); - buff.append(' '); - appendTime(buff, timeNanos); - appendTimeZone(buff, timeZoneOffsetMins); - } - - /** - * Generates time zone name for the specified offset in minutes. + * Generates time zone name for the specified offset in seconds. * - * @param offsetMins - * offset in minutes + * @param offsetSeconds + * time zone offset in seconds * @return time zone name */ - public static String timeZoneNameFromOffsetMins(int offsetMins) { - if (offsetMins == 0) { + public static String timeZoneNameFromOffsetSeconds(int offsetSeconds) { + if (offsetSeconds == 0) { return "UTC"; } - StringBuilder b = new StringBuilder(9); + StringBuilder b = new StringBuilder(12); b.append("GMT"); - if (offsetMins < 0) { + if (offsetSeconds < 0) { b.append('-'); - offsetMins = -offsetMins; + offsetSeconds = -offsetSeconds; } else { b.append('+'); } - StringUtils.appendZeroPadded(b, 2, offsetMins / 60); - b.append(':'); - StringUtils.appendZeroPadded(b, 2, offsetMins % 60); + StringUtils.appendTwoDigits(b, offsetSeconds / 3_600).append(':'); + offsetSeconds %= 3_600; + StringUtils.appendTwoDigits(b, offsetSeconds / 60); + offsetSeconds %= 60; + if (offsetSeconds != 0) { + b.append(':'); + StringUtils.appendTwoDigits(b, offsetSeconds); + } return b.toString(); } + /** * Converts scale of nanoseconds. * * @param nanosOfDay nanoseconds of day * @param scale fractional seconds precision + * @param range the allowed range of values (0..range-1) * @return scaled value */ - public static long convertScale(long nanosOfDay, int scale) { + public static long convertScale(long nanosOfDay, int scale, long range) { if (scale >= 9) { return nanosOfDay; } - int m = CONVERT_SCALE_TABLE[scale]; + int m = FRACTIONAL_SECONDS_TABLE[scale]; long mod = nanosOfDay % m; if (mod >= m >>> 1) { nanosOfDay += m; } - return nanosOfDay - mod; + long r = nanosOfDay - mod; + if (r >= range) { + r = range - m; + } + return r; + } + + /** + * Moves timestamp with time zone to a new time zone. + * + * @param dateValue the date value + * @param timeNanos the nanoseconds since midnight + * @param oldOffset old offset + * @param newOffset new offset + * @return timestamp with time zone with new offset + */ + public static ValueTimestampTimeZone timestampTimeZoneAtOffset(long dateValue, long timeNanos, int oldOffset, + int newOffset) { + timeNanos += (newOffset - oldOffset) * DateTimeUtils.NANOS_PER_SECOND; + // Value can be 18+18 hours before or after the limit + if (timeNanos < 0) { + timeNanos += DateTimeUtils.NANOS_PER_DAY; + dateValue = DateTimeUtils.decrementDateValue(dateValue); + if (timeNanos < 0) { + timeNanos += DateTimeUtils.NANOS_PER_DAY; + dateValue = DateTimeUtils.decrementDateValue(dateValue); + } + } else if (timeNanos >= DateTimeUtils.NANOS_PER_DAY) { + timeNanos -= DateTimeUtils.NANOS_PER_DAY; + dateValue = DateTimeUtils.incrementDateValue(dateValue); + if (timeNanos >= DateTimeUtils.NANOS_PER_DAY) { + timeNanos -= DateTimeUtils.NANOS_PER_DAY; + dateValue = DateTimeUtils.incrementDateValue(dateValue); + } + } + return ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, timeNanos, newOffset); } } diff --git a/h2/src/main/org/h2/util/DbDriverActivator.java b/h2/src/main/org/h2/util/DbDriverActivator.java index 0236595262..cf388f66b9 100644 --- a/h2/src/main/org/h2/util/DbDriverActivator.java +++ b/h2/src/main/org/h2/util/DbDriverActivator.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; diff --git a/h2/src/main/org/h2/util/DebuggingThreadLocal.java b/h2/src/main/org/h2/util/DebuggingThreadLocal.java index 0d16e1b377..9413de4d28 100644 --- a/h2/src/main/org/h2/util/DebuggingThreadLocal.java +++ b/h2/src/main/org/h2/util/DebuggingThreadLocal.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; diff --git a/h2/src/main/org/h2/util/DoneFuture.java b/h2/src/main/org/h2/util/DoneFuture.java deleted file mode 100644 index 000e2274fa..0000000000 --- a/h2/src/main/org/h2/util/DoneFuture.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.util; - -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * Future which is already done. - * - * @param Result value. - * @author Sergi Vladykin - */ -public class DoneFuture implements Future { - final T x; - - public DoneFuture(T x) { - this.x = x; - } - - @Override - public T get() throws InterruptedException, ExecutionException { - return x; - } - - @Override - public T get(long timeout, TimeUnit unit) throws InterruptedException, - ExecutionException, TimeoutException { - return x; - } - - @Override - public boolean isDone() { - return true; - } - - @Override - public boolean cancel(boolean mayInterruptIfRunning) { - return false; - } - - @Override - public boolean isCancelled() { - return false; - } - - @Override - public String toString() { - return "DoneFuture->" + x; - } -} diff --git a/h2/src/main/org/h2/util/HasSQL.java b/h2/src/main/org/h2/util/HasSQL.java new file mode 100644 index 0000000000..a57716cc1a --- /dev/null +++ b/h2/src/main/org/h2/util/HasSQL.java @@ -0,0 +1,76 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util; + +/** + * An object that has an SQL representation. + */ +public interface HasSQL { + + /** + * Quote identifiers only when it is strictly required (different case or + * identifier is also a keyword). + */ + int QUOTE_ONLY_WHEN_REQUIRED = 1; + + /** + * Replace long LOB values with some generated values. + */ + int REPLACE_LOBS_FOR_TRACE = 2; + + /** + * Don't add casts around literals. + */ + int NO_CASTS = 4; + + /** + * Add execution plan information. + */ + int ADD_PLAN_INFORMATION = 8; + + /** + * Default flags. + */ + int DEFAULT_SQL_FLAGS = 0; + + /** + * Combined flags for trace. + */ + int TRACE_SQL_FLAGS = QUOTE_ONLY_WHEN_REQUIRED | REPLACE_LOBS_FOR_TRACE; + + /** + * Get a medium size SQL expression for debugging or tracing. + * + * @return the SQL expression + */ + default String getTraceSQL() { + return getSQL(TRACE_SQL_FLAGS); + } + + /** + * Get the SQL statement of this expression. This may not always be the + * original SQL statement, specially after optimization. + * + * @param sqlFlags + * formatting flags + * @return the SQL statement + */ + default String getSQL(int sqlFlags) { + return getSQL(new StringBuilder(), sqlFlags).toString(); + } + + /** + * Appends the SQL statement of this object to the specified builder. + * + * @param builder + * string builder + * @param sqlFlags + * formatting flags + * @return the specified string builder + */ + StringBuilder getSQL(StringBuilder builder, int sqlFlags); + +} diff --git a/h2/src/main/org/h2/util/HashBase.java b/h2/src/main/org/h2/util/HashBase.java deleted file mode 100644 index 6d2bf5949f..0000000000 --- a/h2/src/main/org/h2/util/HashBase.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.util; - - -/** - * The base for other hash classes. - */ -public abstract class HashBase { - - /** - * The maximum load, in percent. - * declared as long so we do long arithmetic so we don't overflow. - */ - private static final long MAX_LOAD = 90; - - /** - * The bit mask to get the index from the hash code. - */ - protected int mask; - - /** - * The number of slots in the table. - */ - protected int len; - - /** - * The number of occupied slots, excluding the zero key (if any). - */ - protected int size; - - /** - * The number of deleted slots. - */ - protected int deletedCount; - - /** - * The level. The number of slots is 2 ^ level. - */ - protected int level; - - /** - * Whether the zero key is used. - */ - protected boolean zeroKey; - - private int maxSize, minSize, maxDeleted; - - public HashBase() { - reset(2); - } - - /** - * Increase the size of the underlying table and re-distribute the elements. - * - * @param newLevel the new level - */ - protected abstract void rehash(int newLevel); - - /** - * Get the size of the map. - * - * @return the size - */ - public int size() { - return size + (zeroKey ? 1 : 0); - } - - /** - * Check the size before adding an entry. This method resizes the map if - * required. - */ - void checkSizePut() { - if (deletedCount > size) { - rehash(level); - } - if (size + deletedCount >= maxSize) { - rehash(level + 1); - } - } - - /** - * Check the size before removing an entry. This method resizes the map if - * required. - */ - protected void checkSizeRemove() { - if (size < minSize && level > 0) { - rehash(level - 1); - } else if (deletedCount > maxDeleted) { - rehash(level); - } - } - - /** - * Clear the map and reset the level to the specified value. - * - * @param newLevel the new level - */ - protected void reset(int newLevel) { - // can't exceed 30 or we will generate a negative value - // for the "len" field - if (newLevel > 30) { - throw new IllegalStateException("exceeded max size of hash table"); - } - size = 0; - level = newLevel; - len = 2 << level; - mask = len - 1; - minSize = (int) ((1 << level) * MAX_LOAD / 100); - maxSize = (int) (len * MAX_LOAD / 100); - deletedCount = 0; - maxDeleted = 20 + len / 2; - } - - /** - * Calculate the index for this hash code. - * - * @param hash the hash code - * @return the index - */ - protected int getIndex(int hash) { - return hash & mask; - } - -} diff --git a/h2/src/main/org/h2/util/IOUtils.java b/h2/src/main/org/h2/util/IOUtils.java index 8fe8409360..8a131a36a1 100644 --- a/h2/src/main/org/h2/util/IOUtils.java +++ b/h2/src/main/org/h2/util/IOUtils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; @@ -10,6 +10,7 @@ import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.EOFException; +import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; @@ -22,7 +23,7 @@ import org.h2.engine.Constants; import org.h2.engine.SysProperties; -import org.h2.message.DbException; +import org.h2.mvstore.DataUtils; import org.h2.store.fs.FileUtils; /** @@ -69,7 +70,7 @@ public static void skipFully(InputStream in, long skip) throws IOException { skip -= skipped; } } catch (Exception e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } } @@ -92,7 +93,7 @@ public static void skipFully(Reader reader, long skip) throws IOException { skip -= skipped; } } catch (Exception e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } } @@ -103,6 +104,7 @@ public static void skipFully(Reader reader, long skip) throws IOException { * @param in the input stream * @param out the output stream * @return the number of bytes copied + * @throws IOException on failure */ public static long copyAndClose(InputStream in, OutputStream out) throws IOException { @@ -111,7 +113,7 @@ public static long copyAndClose(InputStream in, OutputStream out) out.close(); return len; } catch (Exception e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } finally { closeSilently(out); } @@ -124,13 +126,14 @@ public static long copyAndClose(InputStream in, OutputStream out) * @param in the input stream * @param out the output stream (null if writing is not required) * @return the number of bytes copied + * @throws IOException on failure */ public static long copyAndCloseInput(InputStream in, OutputStream out) throws IOException { try { return copy(in, out); } catch (Exception e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } finally { closeSilently(in); } @@ -143,6 +146,7 @@ public static long copyAndCloseInput(InputStream in, OutputStream out) * @param in the input stream * @param out the output stream (null if writing is not required) * @return the number of bytes copied + * @throws IOException on failure */ public static long copy(InputStream in, OutputStream out) throws IOException { @@ -157,6 +161,7 @@ public static long copy(InputStream in, OutputStream out) * @param out the output stream (null if writing is not required) * @param length the maximum number of bytes to copy * @return the number of bytes copied + * @throws IOException on failure */ public static long copy(InputStream in, OutputStream out, long length) throws IOException { @@ -178,7 +183,7 @@ public static long copy(InputStream in, OutputStream out, long length) } return copied; } catch (Exception e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } } @@ -190,6 +195,7 @@ public static long copy(InputStream in, OutputStream out, long length) * @param out the writer (null if writing is not required) * @param length the maximum number of bytes to copy * @return the number of characters copied + * @throws IOException on failure */ public static long copyAndCloseInput(Reader in, Writer out, long length) throws IOException { @@ -205,13 +211,13 @@ public static long copyAndCloseInput(Reader in, Writer out, long length) if (out != null) { out.write(buffer, 0, len); } + copied += len; length -= len; len = (int) Math.min(length, Constants.IO_BUFFER_SIZE); - copied += len; } return copied; } catch (Exception e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } finally { in.close(); } @@ -224,6 +230,7 @@ public static long copyAndCloseInput(Reader in, Writer out, long length) * @param length the maximum number of bytes to read, or -1 to read until * the end of file * @return the bytes read + * @throws IOException on failure */ public static byte[] readBytesAndClose(InputStream in, int length) throws IOException { @@ -236,7 +243,7 @@ public static byte[] readBytesAndClose(InputStream in, int length) copy(in, out, length); return out.toByteArray(); } catch (Exception e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } finally { in.close(); } @@ -249,6 +256,7 @@ public static byte[] readBytesAndClose(InputStream in, int length) * @param length the maximum number of characters to read, or -1 to read * until the end of file * @return the string read + * @throws IOException on failure */ public static String readStringAndClose(Reader in, int length) throws IOException { @@ -274,6 +282,7 @@ public static String readStringAndClose(Reader in, int length) * @param buffer the output buffer * @param max the number of bytes to read at most * @return the number of bytes read, 0 meaning EOF + * @throws IOException on failure */ public static int readFully(InputStream in, byte[] buffer, int max) throws IOException { @@ -289,7 +298,7 @@ public static int readFully(InputStream in, byte[] buffer, int max) } return result; } catch (Exception e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } } @@ -302,6 +311,7 @@ public static int readFully(InputStream in, byte[] buffer, int max) * @param buffer the output buffer * @param max the number of characters to read at most * @return the number of characters read, 0 meaning EOF + * @throws IOException on failure */ public static int readFully(Reader in, char[] buffer, int max) throws IOException { @@ -317,24 +327,10 @@ public static int readFully(Reader in, char[] buffer, int max) } return result; } catch (Exception e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } } - /** - * Create a buffered reader to read from an input stream using the UTF-8 - * format. If the input stream is null, this method returns null. The - * InputStreamReader that is used here is not exact, that means it may read - * some additional bytes when buffering. - * - * @param in the input stream or null - * @return the reader - */ - public static Reader getBufferedReader(InputStream in) { - return in == null ? null : new BufferedReader( - new InputStreamReader(in, StandardCharsets.UTF_8)); - } - /** * Create a reader to read from an input stream using the UTF-8 format. If * the input stream is null, this method returns null. The InputStreamReader @@ -406,6 +402,7 @@ public static InputStream getInputStreamFromString(String s) { * * @param original the original file name * @param copy the file name of the copy + * @throws IOException on failure */ public static void copyFiles(String original, String copy) throws IOException { InputStream in = FileUtils.newInputStream(original); @@ -413,4 +410,14 @@ public static void copyFiles(String original, String copy) throws IOException { copyAndClose(in, out); } + /** + * Converts / and \ name separators in path to native separators. + * + * @param path path to convert + * @return path with converted separators + */ + public static String nameSeparatorsToNative(String path) { + return File.separatorChar == '/' ? path.replace('\\', '/') : path.replace('/', '\\'); + } + } diff --git a/h2/src/main/org/h2/util/IntArray.java b/h2/src/main/org/h2/util/IntArray.java index e79ffa122a..50dce12545 100644 --- a/h2/src/main/org/h2/util/IntArray.java +++ b/h2/src/main/org/h2/util/IntArray.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; diff --git a/h2/src/main/org/h2/util/IntIntHashMap.java b/h2/src/main/org/h2/util/IntIntHashMap.java deleted file mode 100644 index d0df90bab3..0000000000 --- a/h2/src/main/org/h2/util/IntIntHashMap.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.util; - -import org.h2.message.DbException; - -/** - * A hash map with int key and int values. There is a restriction: the - * value -1 (NOT_FOUND) cannot be stored in the map. 0 can be stored. - * An empty record has key=0 and value=0. - * A deleted record has key=0 and value=DELETED - */ -public class IntIntHashMap extends HashBase { - - /** - * The value indicating that the entry has not been found. - */ - public static final int NOT_FOUND = -1; - - private static final int DELETED = 1; - private int[] keys; - private int[] values; - private int zeroValue; - - @Override - protected void reset(int newLevel) { - super.reset(newLevel); - keys = new int[len]; - values = new int[len]; - } - - /** - * Store the given key-value pair. The value is overwritten or added. - * - * @param key the key - * @param value the value (-1 is not supported) - */ - public void put(int key, int value) { - if (key == 0) { - zeroKey = true; - zeroValue = value; - return; - } - checkSizePut(); - internalPut(key, value); - } - - private void internalPut(int key, int value) { - int index = getIndex(key); - int plus = 1; - int deleted = -1; - do { - int k = keys[index]; - if (k == 0) { - if (values[index] != DELETED) { - // found an empty record - if (deleted >= 0) { - index = deleted; - deletedCount--; - } - size++; - keys[index] = key; - values[index] = value; - return; - } - // found a deleted record - if (deleted < 0) { - deleted = index; - } - } else if (k == key) { - // update existing - values[index] = value; - return; - } - index = (index + plus++) & mask; - } while (plus <= len); - // no space - DbException.throwInternalError("hashmap is full"); - } - - /** - * Remove the key-value pair with the given key. - * - * @param key the key - */ - public void remove(int key) { - if (key == 0) { - zeroKey = false; - return; - } - checkSizeRemove(); - int index = getIndex(key); - int plus = 1; - do { - int k = keys[index]; - if (k == key) { - // found the record - keys[index] = 0; - values[index] = DELETED; - deletedCount++; - size--; - return; - } else if (k == 0 && values[index] == 0) { - // found an empty record - return; - } - index = (index + plus++) & mask; - } while (plus <= len); - // not found - } - - @Override - protected void rehash(int newLevel) { - int[] oldKeys = keys; - int[] oldValues = values; - reset(newLevel); - for (int i = 0; i < oldKeys.length; i++) { - int k = oldKeys[i]; - if (k != 0) { - // skip the checkSizePut so we don't end up - // accidentally recursing - internalPut(k, oldValues[i]); - } - } - } - - /** - * Get the value for the given key. This method returns NOT_FOUND if the - * entry has not been found. - * - * @param key the key - * @return the value or NOT_FOUND - */ - public int get(int key) { - if (key == 0) { - return zeroKey ? zeroValue : NOT_FOUND; - } - int index = getIndex(key); - int plus = 1; - do { - int k = keys[index]; - if (k == 0 && values[index] == 0) { - // found an empty record - return NOT_FOUND; - } else if (k == key) { - // found it - return values[index]; - } - index = (index + plus++) & mask; - } while (plus <= len); - return NOT_FOUND; - } - -} diff --git a/h2/src/main/org/h2/util/IntervalUtils.java b/h2/src/main/org/h2/util/IntervalUtils.java index 261a0218e5..1761f91d12 100644 --- a/h2/src/main/org/h2/util/IntervalUtils.java +++ b/h2/src/main/org/h2/util/IntervalUtils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; @@ -414,64 +414,53 @@ public static StringBuilder appendInterval(StringBuilder buff, IntervalQualifier buff.append(leading); break; case SECOND: - buff.append(leading); - appendNanos(buff, remaining); + DateTimeUtils.appendNanos(buff.append(leading), (int) remaining); break; case YEAR_TO_MONTH: buff.append(leading).append('-').append(remaining); break; case DAY_TO_HOUR: buff.append(leading).append(' '); - StringUtils.appendZeroPadded(buff, 2, remaining); + StringUtils.appendTwoDigits(buff, (int) remaining); break; - case DAY_TO_MINUTE: + case DAY_TO_MINUTE: { buff.append(leading).append(' '); - StringUtils.appendZeroPadded(buff, 2, remaining / 60); - buff.append(':'); - StringUtils.appendZeroPadded(buff, 2, remaining % 60); + int r = (int) remaining; + StringUtils.appendTwoDigits(buff, r / 60).append(':'); + StringUtils.appendTwoDigits(buff, r % 60); break; + } case DAY_TO_SECOND: { long nanos = remaining % NANOS_PER_MINUTE; - remaining /= NANOS_PER_MINUTE; + int r = (int) (remaining / NANOS_PER_MINUTE); buff.append(leading).append(' '); - StringUtils.appendZeroPadded(buff, 2, remaining / 60); - buff.append(':'); - StringUtils.appendZeroPadded(buff, 2, remaining % 60); - buff.append(':'); - appendSecondsWithNanos(buff, nanos); + StringUtils.appendTwoDigits(buff, r / 60).append(':'); + StringUtils.appendTwoDigits(buff, r % 60).append(':'); + StringUtils.appendTwoDigits(buff, (int) (nanos / NANOS_PER_SECOND)); + DateTimeUtils.appendNanos(buff, (int) (nanos % NANOS_PER_SECOND)); break; } case HOUR_TO_MINUTE: buff.append(leading).append(':'); - StringUtils.appendZeroPadded(buff, 2, remaining); + StringUtils.appendTwoDigits(buff, (int) remaining); break; - case HOUR_TO_SECOND: + case HOUR_TO_SECOND: { buff.append(leading).append(':'); - StringUtils.appendZeroPadded(buff, 2, remaining / NANOS_PER_MINUTE); - buff.append(':'); - appendSecondsWithNanos(buff, remaining % NANOS_PER_MINUTE); + StringUtils.appendTwoDigits(buff, (int) (remaining / NANOS_PER_MINUTE)).append(':'); + long s = remaining % NANOS_PER_MINUTE; + StringUtils.appendTwoDigits(buff, (int) (s / NANOS_PER_SECOND)); + DateTimeUtils.appendNanos(buff, (int) (s % NANOS_PER_SECOND)); break; + } case MINUTE_TO_SECOND: buff.append(leading).append(':'); - appendSecondsWithNanos(buff, remaining); + StringUtils.appendTwoDigits(buff, (int) (remaining / NANOS_PER_SECOND)); + DateTimeUtils.appendNanos(buff, (int) (remaining % NANOS_PER_SECOND)); break; } return buff.append("' ").append(qualifier); } - private static void appendSecondsWithNanos(StringBuilder buff, long nanos) { - StringUtils.appendZeroPadded(buff, 2, nanos / NANOS_PER_SECOND); - appendNanos(buff, nanos % NANOS_PER_SECOND); - } - - private static void appendNanos(StringBuilder buff, long nanos) { - if (nanos > 0) { - buff.append('.'); - StringUtils.appendZeroPadded(buff, 9, nanos); - DateTimeUtils.stripTrailingZeroes(buff); - } - } - /** * Converts interval value to an absolute value. * @@ -703,8 +692,8 @@ public static long yearsFromInterval(IntervalQualifier qualifier, boolean negati * values of all remaining fields * @return months, or 0 */ - public static long monthsFromInterval(IntervalQualifier qualifier, boolean negative, long leading, long remaining) - { + public static long monthsFromInterval(IntervalQualifier qualifier, boolean negative, long leading, // + long remaining) { long v; if (qualifier == IntervalQualifier.MONTH) { v = leading; diff --git a/h2/src/main/org/h2/util/JSR310Utils.java b/h2/src/main/org/h2/util/JSR310Utils.java new file mode 100644 index 0000000000..c53bce3d53 --- /dev/null +++ b/h2/src/main/org/h2/util/JSR310Utils.java @@ -0,0 +1,424 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util; + +import static org.h2.util.DateTimeUtils.NANOS_PER_SECOND; +import static org.h2.util.DateTimeUtils.SECONDS_PER_DAY; +import static org.h2.util.DateTimeUtils.SHIFT_MONTH; +import static org.h2.util.DateTimeUtils.SHIFT_YEAR; +import static org.h2.util.DateTimeUtils.absoluteDayFromDateValue; +import static org.h2.util.DateTimeUtils.dateValue; +import static org.h2.util.DateTimeUtils.dateValueFromAbsoluteDay; +import static org.h2.util.DateTimeUtils.dayFromDateValue; +import static org.h2.util.DateTimeUtils.monthFromDateValue; +import static org.h2.util.DateTimeUtils.yearFromDateValue; + +import java.math.BigInteger; +import java.time.Duration; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.time.Period; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; + +import org.h2.api.ErrorCode; +import org.h2.api.IntervalQualifier; +import org.h2.engine.CastDataProvider; +import org.h2.message.DbException; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueDate; +import org.h2.value.ValueInterval; +import org.h2.value.ValueTime; +import org.h2.value.ValueTimeTimeZone; +import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; + +/** + * This utility class provides access to JSR 310 classes. + */ +public class JSR310Utils { + + private static final long MIN_DATE_VALUE = (-999_999_999L << SHIFT_YEAR) + + (1 << SHIFT_MONTH) + 1; + + private static final long MAX_DATE_VALUE = (999_999_999L << SHIFT_YEAR) + + (12 << SHIFT_MONTH) + 31; + + private static final long MIN_INSTANT_SECOND = -31_557_014_167_219_200L; + + private static final long MAX_INSTANT_SECOND = 31_556_889_864_403_199L; + + private JSR310Utils() { + // utility class + } + + /** + * Converts a value to a LocalDate. + * + * This method should only be called from Java 8 or later version. + * + * @param value + * the value to convert + * @param provider + * the cast information provider + * @return the LocalDate + */ + public static LocalDate valueToLocalDate(Value value, CastDataProvider provider) { + long dateValue = value.convertToDate(provider).getDateValue(); + if (dateValue > MAX_DATE_VALUE) { + return LocalDate.MAX; + } else if (dateValue < MIN_DATE_VALUE) { + return LocalDate.MIN; + } + return LocalDate.of(yearFromDateValue(dateValue), monthFromDateValue(dateValue), + dayFromDateValue(dateValue)); + } + + /** + * Converts a value to a LocalTime. + * + * This method should only be called from Java 8 or later version. + * + * @param value + * the value to convert + * @param provider + * the cast information provider + * @return the LocalTime + */ + public static LocalTime valueToLocalTime(Value value, CastDataProvider provider) { + return LocalTime.ofNanoOfDay(((ValueTime) value.convertTo(TypeInfo.TYPE_TIME, provider)).getNanos()); + } + + /** + * Converts a value to a LocalDateTime. + * + * This method should only be called from Java 8 or later version. + * + * @param value + * the value to convert + * @param provider + * the cast information provider + * @return the LocalDateTime + */ + public static LocalDateTime valueToLocalDateTime(Value value, CastDataProvider provider) { + ValueTimestamp valueTimestamp = (ValueTimestamp) value.convertTo(TypeInfo.TYPE_TIMESTAMP, provider); + return localDateTimeFromDateNanos(valueTimestamp.getDateValue(), valueTimestamp.getTimeNanos()); + } + + /** + * Converts a value to a Instant. + * + * This method should only be called from Java 8 or later version. + * + * @param value + * the value to convert + * @param provider + * the cast information provider + * @return the Instant + */ + public static Instant valueToInstant(Value value, CastDataProvider provider) { + ValueTimestampTimeZone valueTimestampTimeZone = (ValueTimestampTimeZone) value + .convertTo(TypeInfo.TYPE_TIMESTAMP_TZ, provider); + long timeNanos = valueTimestampTimeZone.getTimeNanos(); + long epochSecond = absoluteDayFromDateValue(valueTimestampTimeZone.getDateValue()) + * SECONDS_PER_DAY // + + timeNanos / NANOS_PER_SECOND // + - valueTimestampTimeZone.getTimeZoneOffsetSeconds(); + if (epochSecond > MAX_INSTANT_SECOND) { + return Instant.MAX; + } else if (epochSecond < MIN_INSTANT_SECOND) { + return Instant.MIN; + } + return Instant.ofEpochSecond(epochSecond, timeNanos % NANOS_PER_SECOND); + } + + /** + * Converts a value to a OffsetDateTime. + * + * This method should only be called from Java 8 or later version. + * + * @param value + * the value to convert + * @param provider + * the cast information provider + * @return the OffsetDateTime + */ + public static OffsetDateTime valueToOffsetDateTime(Value value, CastDataProvider provider) { + ValueTimestampTimeZone v = (ValueTimestampTimeZone) value.convertTo(TypeInfo.TYPE_TIMESTAMP_TZ, provider); + return OffsetDateTime.of(localDateTimeFromDateNanos(v.getDateValue(), v.getTimeNanos()), + ZoneOffset.ofTotalSeconds(v.getTimeZoneOffsetSeconds())); + } + + /** + * Converts a value to a ZonedDateTime. + * + * This method should only be called from Java 8 or later version. + * + * @param value + * the value to convert + * @param provider + * the cast information provider + * @return the ZonedDateTime + */ + public static ZonedDateTime valueToZonedDateTime(Value value, CastDataProvider provider) { + ValueTimestampTimeZone v = (ValueTimestampTimeZone) value.convertTo(TypeInfo.TYPE_TIMESTAMP_TZ, provider); + return ZonedDateTime.of(localDateTimeFromDateNanos(v.getDateValue(), v.getTimeNanos()), + ZoneOffset.ofTotalSeconds(v.getTimeZoneOffsetSeconds())); + } + + /** + * Converts a value to a OffsetTime. + * + * This method should only be called from Java 8 or later version. + * + * @param value + * the value to convert + * @param provider + * the cast information provider + * @return the OffsetTime + */ + public static OffsetTime valueToOffsetTime(Value value, CastDataProvider provider) { + ValueTimeTimeZone valueTimeTimeZone = (ValueTimeTimeZone) value.convertTo(TypeInfo.TYPE_TIME_TZ, provider); + return OffsetTime.of(LocalTime.ofNanoOfDay(valueTimeTimeZone.getNanos()), + ZoneOffset.ofTotalSeconds(valueTimeTimeZone.getTimeZoneOffsetSeconds())); + } + + /** + * Converts a value to a Period. + * + * This method should only be called from Java 8 or later version. + * + * @param value + * the value to convert + * @return the Period + */ + public static Period valueToPeriod(Value value) { + if (!(value instanceof ValueInterval)) { + value = value.convertTo(TypeInfo.TYPE_INTERVAL_YEAR_TO_MONTH); + } + if (!DataType.isYearMonthIntervalType(value.getValueType())) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, (Throwable) null, value.getString()); + } + ValueInterval v = (ValueInterval) value; + IntervalQualifier qualifier = v.getQualifier(); + boolean negative = v.isNegative(); + long leading = v.getLeading(); + long remaining = v.getRemaining(); + int y = Value.convertToInt(IntervalUtils.yearsFromInterval(qualifier, negative, leading, remaining), null); + int m = Value.convertToInt(IntervalUtils.monthsFromInterval(qualifier, negative, leading, remaining), null); + return Period.of(y, m, 0); + } + + /** + * Converts a value to a Duration. + * + * This method should only be called from Java 8 or later version. + * + * @param value + * the value to convert + * @return the Duration + */ + public static Duration valueToDuration(Value value) { + if (!(value instanceof ValueInterval)) { + value = value.convertTo(TypeInfo.TYPE_INTERVAL_DAY_TO_SECOND); + } + if (DataType.isYearMonthIntervalType(value.getValueType())) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, (Throwable) null, value.getString()); + } + BigInteger[] dr = IntervalUtils.intervalToAbsolute((ValueInterval) value) + .divideAndRemainder(BigInteger.valueOf(1_000_000_000)); + return Duration.ofSeconds(dr[0].longValue(), dr[1].longValue()); + } + + /** + * Converts a LocalDate to a Value. + * + * @param localDate + * the LocalDate to convert, not {@code null} + * @return the value + */ + public static ValueDate localDateToValue(LocalDate localDate) { + return ValueDate.fromDateValue( + dateValue(localDate.getYear(), localDate.getMonthValue(), localDate.getDayOfMonth())); + } + + /** + * Converts a LocalTime to a Value. + * + * @param localTime + * the LocalTime to convert, not {@code null} + * @return the value + */ + public static ValueTime localTimeToValue(LocalTime localTime) { + return ValueTime.fromNanos(localTime.toNanoOfDay()); + } + + /** + * Converts a LocalDateTime to a Value. + * + * @param localDateTime + * the LocalDateTime to convert, not {@code null} + * @return the value + */ + public static ValueTimestamp localDateTimeToValue(LocalDateTime localDateTime) { + LocalDate localDate = localDateTime.toLocalDate(); + return ValueTimestamp.fromDateValueAndNanos( + dateValue(localDate.getYear(), localDate.getMonthValue(), localDate.getDayOfMonth()), + localDateTime.toLocalTime().toNanoOfDay()); + } + + /** + * Converts a Instant to a Value. + * + * @param instant + * the Instant to convert, not {@code null} + * @return the value + */ + public static ValueTimestampTimeZone instantToValue(Instant instant) { + long epochSecond = instant.getEpochSecond(); + int nano = instant.getNano(); + long absoluteDay = epochSecond / 86_400; + // Round toward negative infinity + if (epochSecond < 0 && (absoluteDay * 86_400 != epochSecond)) { + absoluteDay--; + } + long timeNanos = (epochSecond - absoluteDay * 86_400) * 1_000_000_000 + nano; + return ValueTimestampTimeZone.fromDateValueAndNanos(dateValueFromAbsoluteDay(absoluteDay), + timeNanos, 0); + } + + /** + * Converts a OffsetDateTime to a Value. + * + * @param offsetDateTime + * the OffsetDateTime to convert, not {@code null} + * @return the value + */ + public static ValueTimestampTimeZone offsetDateTimeToValue(OffsetDateTime offsetDateTime) { + LocalDateTime localDateTime = offsetDateTime.toLocalDateTime(); + LocalDate localDate = localDateTime.toLocalDate(); + return ValueTimestampTimeZone.fromDateValueAndNanos( + dateValue(localDate.getYear(), localDate.getMonthValue(), localDate.getDayOfMonth()), + localDateTime.toLocalTime().toNanoOfDay(), // + offsetDateTime.getOffset().getTotalSeconds()); + } + + /** + * Converts a ZonedDateTime to a Value. + * + * @param zonedDateTime + * the ZonedDateTime to convert, not {@code null} + * @return the value + */ + public static ValueTimestampTimeZone zonedDateTimeToValue(ZonedDateTime zonedDateTime) { + LocalDateTime localDateTime = zonedDateTime.toLocalDateTime(); + LocalDate localDate = localDateTime.toLocalDate(); + return ValueTimestampTimeZone.fromDateValueAndNanos( + dateValue(localDate.getYear(), localDate.getMonthValue(), localDate.getDayOfMonth()), + localDateTime.toLocalTime().toNanoOfDay(), // + zonedDateTime.getOffset().getTotalSeconds()); + } + + /** + * Converts a OffsetTime to a Value. + * + * @param offsetTime + * the OffsetTime to convert, not {@code null} + * @return the value + */ + public static ValueTimeTimeZone offsetTimeToValue(OffsetTime offsetTime) { + return ValueTimeTimeZone.fromNanos(offsetTime.toLocalTime().toNanoOfDay(), + offsetTime.getOffset().getTotalSeconds()); + } + + private static LocalDateTime localDateTimeFromDateNanos(long dateValue, long timeNanos) { + if (dateValue > MAX_DATE_VALUE) { + return LocalDateTime.MAX; + } else if (dateValue < MIN_DATE_VALUE) { + return LocalDateTime.MIN; + } + return LocalDateTime.of(LocalDate.of(yearFromDateValue(dateValue), + monthFromDateValue(dateValue), dayFromDateValue(dateValue)), + LocalTime.ofNanoOfDay(timeNanos)); + } + + /** + * Converts a Period to a Value. + * + * @param period + * the Period to convert, not {@code null} + * @return the value + */ + public static ValueInterval periodToValue(Period period) { + int days = period.getDays(); + if (days != 0) { + throw DbException.getInvalidValueException("Period.days", days); + } + int years = period.getYears(); + int months = period.getMonths(); + IntervalQualifier qualifier; + boolean negative = false; + long leading = 0L, remaining = 0L; + if (years == 0) { + if (months == 0L) { + // Use generic qualifier + qualifier = IntervalQualifier.YEAR_TO_MONTH; + } else { + qualifier = IntervalQualifier.MONTH; + leading = months; + if (leading < 0) { + leading = -leading; + negative = true; + } + } + } else { + if (months == 0L) { + qualifier = IntervalQualifier.YEAR; + leading = years; + if (leading < 0) { + leading = -leading; + negative = true; + } + } else { + qualifier = IntervalQualifier.YEAR_TO_MONTH; + leading = years * 12 + months; + if (leading < 0) { + leading = -leading; + negative = true; + } + remaining = leading % 12; + leading /= 12; + } + } + return ValueInterval.from(qualifier, negative, leading, remaining); + } + + /** + * Converts a Duration to a Value. + * + * @param duration + * the Duration to convert, not {@code null} + * @return the value + */ + public static ValueInterval durationToValue(Duration duration) { + long seconds = duration.getSeconds(); + int nano = duration.getNano(); + boolean negative = seconds < 0; + seconds = Math.abs(seconds); + if (negative && nano != 0) { + nano = 1_000_000_000 - nano; + seconds--; + } + return ValueInterval.from(IntervalQualifier.SECOND, negative, seconds, nano); + } + +} diff --git a/h2/src/main/org/h2/util/JdbcUtils.java b/h2/src/main/org/h2/util/JdbcUtils.java index b97fba7621..03a126c0a1 100644 --- a/h2/src/main/org/h2/util/JdbcUtils.java +++ b/h2/src/main/org/h2/util/JdbcUtils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; @@ -11,19 +11,39 @@ import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.ObjectStreamClass; -import java.sql.*; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.nio.charset.StandardCharsets; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.Driver; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; import java.util.HashSet; import java.util.Properties; + import javax.naming.Context; import javax.sql.DataSource; -import org.h2.api.CustomDataTypesHandler; import org.h2.api.ErrorCode; import org.h2.api.JavaObjectSerializer; +import org.h2.engine.Constants; import org.h2.engine.SysProperties; +import org.h2.jdbc.JdbcConnection; +import org.h2.jdbc.JdbcPreparedStatement; import org.h2.message.DbException; -import org.h2.store.DataHandler; +import org.h2.tools.SimpleResultSet; import org.h2.util.Utils.ClassFactory; +import org.h2.value.Value; +import org.h2.value.ValueLob; +import org.h2.value.ValueToObjectConverter; +import org.h2.value.ValueUuid; /** * This is a utility class with JDBC helper functions. @@ -35,20 +55,15 @@ public class JdbcUtils { */ public static JavaObjectSerializer serializer; - /** - * Custom data types handler to use. - */ - public static CustomDataTypesHandler customDataTypesHandler; - private static final String[] DRIVERS = { "h2:", "org.h2.Driver", "Cache:", "com.intersys.jdbc.CacheDriver", "daffodilDB://", "in.co.daffodil.db.rmi.RmiDaffodilDBDriver", "daffodil", "in.co.daffodil.db.jdbc.DaffodilDBDriver", "db2:", "com.ibm.db2.jcc.DB2Driver", - "derby:net:", "org.apache.derby.jdbc.ClientDriver", - "derby://", "org.apache.derby.jdbc.ClientDriver", - "derby:", "org.apache.derby.jdbc.EmbeddedDriver", + "derby:net:", "org.apache.derby.client.ClientAutoloadedDriver", + "derby://", "org.apache.derby.client.ClientAutoloadedDriver", + "derby:", "org.apache.derby.iapi.jdbc.AutoloadedDriver", "FrontBase:", "com.frontbase.jdbc.FBJDriver", "firebirdsql:", "org.firebirdsql.jdbc.FBDriver", "hsqldb:", "org.hsqldb.jdbcDriver", @@ -56,7 +71,8 @@ public class JdbcUtils { "jtds:", "net.sourceforge.jtds.jdbc.Driver", "microsoft:", "com.microsoft.jdbc.sqlserver.SQLServerDriver", "mimer:", "com.mimer.jdbc.Driver", - "mysql:", "com.mysql.jdbc.Driver", + "mysql:", "com.mysql.cj.jdbc.Driver", + "mariadb:", "org.mariadb.jdbc.Driver", "odbc:", "sun.jdbc.odbc.JdbcOdbcDriver", "oracle:", "oracle.jdbc.driver.OracleDriver", "pervasive:", "com.pervasive.jdbc.v2.Driver", @@ -68,14 +84,17 @@ public class JdbcUtils { "teradata:", "com.ncr.teradata.TeraDriver", }; + private static final byte[] UUID_PREFIX = + "\254\355\0\5sr\0\16java.util.UUID\274\231\3\367\230m\205/\2\0\2J\0\14leastSigBitsJ\0\13mostSigBitsxp" + .getBytes(StandardCharsets.ISO_8859_1); + private static boolean allowAllClasses; private static HashSet allowedClassNames; /** * In order to manage more than one class loader */ - private static ArrayList userClassFactories = - new ArrayList<>(); + private static final ArrayList userClassFactories = new ArrayList<>(); private static String[] allowedClassNamePrefixes; @@ -89,7 +108,7 @@ private JdbcUtils() { * @param classFactory An object that implements ClassFactory */ public static void addClassFactory(ClassFactory classFactory) { - getUserClassFactories().add(classFactory); + userClassFactories.add(classFactory); } /** @@ -98,16 +117,7 @@ public static void addClassFactory(ClassFactory classFactory) { * @param classFactory Already inserted class factory instance */ public static void removeClassFactory(ClassFactory classFactory) { - getUserClassFactories().remove(classFactory); - } - - private static ArrayList getUserClassFactories() { - if (userClassFactories == null) { - // initially, it is empty - // but Apache Tomcat may clear the fields as well - userClassFactories = new ArrayList<>(); - } - return userClassFactories; + userClassFactories.remove(classFactory); } static { @@ -119,16 +129,6 @@ private static ArrayList getUserClassFactories() { throw DbException.convert(e); } } - - String customTypeHandlerClass = SysProperties.CUSTOM_DATA_TYPES_HANDLER; - if (customTypeHandlerClass != null) { - try { - customDataTypesHandler = (CustomDataTypesHandler) - loadUserClass(customTypeHandlerClass).getDeclaredConstructor().newInstance(); - } catch (Exception e) { - throw DbException.convert(e); - } - } } /** @@ -136,6 +136,7 @@ private static ArrayList getUserClassFactories() { * perform access rights checking, the system property h2.allowedClasses * needs to be set to a list of class file name prefixes. * + * @param generic return type * @param className the name of the class * @return the class object */ @@ -165,6 +166,7 @@ public static Class loadUserClass(String className) { for (String s : allowedClassNamePrefixes) { if (className.startsWith(s)) { allowed = true; + break; } } if (!allowed) { @@ -173,7 +175,7 @@ public static Class loadUserClass(String className) { } } // Use provided class factory first. - for (ClassFactory classFactory : getUserClassFactories()) { + for (ClassFactory classFactory : userClassFactories) { if (classFactory.match(className)) { try { Class userClass = classFactory.loadClass(className); @@ -261,17 +263,11 @@ public static void closeSilently(ResultSet rs) { * @param user the user name * @param password the password * @return the database connection + * @throws SQLException on failure */ public static Connection getConnection(String driver, String url, String user, String password) throws SQLException { - Properties prop = new Properties(); - if (user != null) { - prop.setProperty("user", user); - } - if (password != null) { - prop.setProperty("password", password); - } - return getConnection(driver, url, prop); + return getConnection(driver, url, user, password, null, false); } /** @@ -279,11 +275,22 @@ public static Connection getConnection(String driver, String url, * * @param driver the driver class name * @param url the database URL - * @param prop the properties containing at least the user name and password + * @param user the user name or {@code null} + * @param password the password or {@code null} + * @param networkConnectionInfo the network connection information, or {@code null} + * @param forbidCreation whether database creation is forbidden * @return the database connection + * @throws SQLException on failure */ - public static Connection getConnection(String driver, String url, - Properties prop) throws SQLException { + public static Connection getConnection(String driver, String url, String user, String password, + NetworkConnectionInfo networkConnectionInfo, boolean forbidCreation) throws SQLException { + if (url.startsWith(Constants.START_URL)) { + JdbcConnection connection = new JdbcConnection(url, null, user, password, forbidCreation); + if (networkConnectionInfo != null) { + connection.getSession().setNetworkConnectionInfo(networkConnectionInfo); + } + return connection; + } if (StringUtils.isNullOrEmpty(driver)) { JdbcUtils.load(url); } else { @@ -291,6 +298,13 @@ public static Connection getConnection(String driver, String url, try { if (java.sql.Driver.class.isAssignableFrom(d)) { Driver driverInstance = (Driver) d.getDeclaredConstructor().newInstance(); + Properties prop = new Properties(); + if (user != null) { + prop.setProperty("user", user); + } + if (password != null) { + prop.setProperty("password", password); + } /* * fix issue #695 with drivers with the same jdbc * subprotocol in classpath of jdbc drivers (as example @@ -302,11 +316,12 @@ public static Connection getConnection(String driver, String url, } throw new SQLException("Driver " + driver + " is not suitable for " + url, "08001"); } else if (javax.naming.Context.class.isAssignableFrom(d)) { + if (!url.startsWith("java:")) { + throw new SQLException("Only java scheme is supported for JNDI lookups", "08001"); + } // JNDI context Context context = (Context) d.getDeclaredConstructor().newInstance(); DataSource ds = (DataSource) context.lookup(url); - String user = prop.getProperty("user"); - String password = prop.getProperty("password"); if (StringUtils.isNullOrEmpty(user) && StringUtils.isNullOrEmpty(password)) { return ds.getConnection(); } @@ -317,7 +332,7 @@ public static Connection getConnection(String driver, String url, } // don't know, but maybe it loaded a JDBC Driver } - return DriverManager.getConnection(url, prop); + return DriverManager.getConnection(url, user, password); } /** @@ -357,17 +372,13 @@ public static void load(String url) { * the connection info if set, or the default serializer. * * @param obj the object to serialize - * @param dataHandler provides the object serializer (may be null) + * @param javaObjectSerializer the object serializer (may be null) * @return the byte array */ - public static byte[] serialize(Object obj, DataHandler dataHandler) { + public static byte[] serialize(Object obj, JavaObjectSerializer javaObjectSerializer) { try { - JavaObjectSerializer handlerSerializer = null; - if (dataHandler != null) { - handlerSerializer = dataHandler.getJavaObjectSerializer(); - } - if (handlerSerializer != null) { - return handlerSerializer.serialize(obj); + if (javaObjectSerializer != null) { + return javaObjectSerializer.serialize(obj); } if (serializer != null) { return serializer.serialize(obj); @@ -386,18 +397,14 @@ public static byte[] serialize(Object obj, DataHandler dataHandler) { * specified by the connection info. * * @param data the byte array - * @param dataHandler provides the object serializer (may be null) + * @param javaObjectSerializer the object serializer (may be null) * @return the object * @throws DbException if serialization fails */ - public static Object deserialize(byte[] data, DataHandler dataHandler) { + public static Object deserialize(byte[] data, JavaObjectSerializer javaObjectSerializer) { try { - JavaObjectSerializer dbJavaObjectSerializer = null; - if (dataHandler != null) { - dbJavaObjectSerializer = dataHandler.getJavaObjectSerializer(); - } - if (dbJavaObjectSerializer != null) { - return dbJavaObjectSerializer.deserialize(data); + if (javaObjectSerializer != null) { + return javaObjectSerializer.deserialize(data); } if (serializer != null) { return serializer.deserialize(data); @@ -426,4 +433,345 @@ protected Class resolveClass(ObjectStreamClass desc) } } + /** + * De-serialize the byte array to a UUID object. This method is called on + * the server side where regular de-serialization of user-supplied Java + * objects may create a security hole if object was maliciously crafted. + * Unlike {@link #deserialize(byte[], JavaObjectSerializer)}, this method + * does not try to de-serialize instances of other classes. + * + * @param data the byte array + * @return the UUID object + * @throws DbException if serialization fails + */ + public static ValueUuid deserializeUuid(byte[] data) { + uuid: if (data.length == 80) { + for (int i = 0; i < 64; i++) { + if (data[i] != UUID_PREFIX[i]) { + break uuid; + } + } + return ValueUuid.get(Bits.readLong(data, 72), Bits.readLong(data, 64)); + } + throw DbException.get(ErrorCode.DESERIALIZATION_FAILED_1, "Is not a UUID"); + } + + /** + * Set a value as a parameter in a prepared statement. + * + * @param prep the prepared statement + * @param parameterIndex the parameter index + * @param value the value + * @param conn the own connection + * @throws SQLException on failure + */ + public static void set(PreparedStatement prep, int parameterIndex, Value value, JdbcConnection conn) + throws SQLException { + if (prep instanceof JdbcPreparedStatement) { + if (value instanceof ValueLob) { + setLob(prep, parameterIndex, (ValueLob) value); + } else { + prep.setObject(parameterIndex, value); + } + } else { + setOther(prep, parameterIndex, value, conn); + } + } + + private static void setOther(PreparedStatement prep, int parameterIndex, Value value, JdbcConnection conn) + throws SQLException { + int valueType = value.getValueType(); + switch (valueType) { + case Value.NULL: + prep.setNull(parameterIndex, Types.NULL); + break; + case Value.BOOLEAN: + prep.setBoolean(parameterIndex, value.getBoolean()); + break; + case Value.TINYINT: + prep.setByte(parameterIndex, value.getByte()); + break; + case Value.SMALLINT: + prep.setShort(parameterIndex, value.getShort()); + break; + case Value.INTEGER: + prep.setInt(parameterIndex, value.getInt()); + break; + case Value.BIGINT: + prep.setLong(parameterIndex, value.getLong()); + break; + case Value.NUMERIC: + case Value.DECFLOAT: + prep.setBigDecimal(parameterIndex, value.getBigDecimal()); + break; + case Value.DOUBLE: + prep.setDouble(parameterIndex, value.getDouble()); + break; + case Value.REAL: + prep.setFloat(parameterIndex, value.getFloat()); + break; + case Value.TIME: + try { + prep.setObject(parameterIndex, JSR310Utils.valueToLocalTime(value, null), Types.TIME); + } catch (SQLException ignore) { + prep.setTime(parameterIndex, LegacyDateTimeUtils.toTime(null, null, value)); + } + break; + case Value.DATE: + try { + prep.setObject(parameterIndex, JSR310Utils.valueToLocalDate(value, null), Types.DATE); + } catch (SQLException ignore) { + prep.setDate(parameterIndex, LegacyDateTimeUtils.toDate(null, null, value)); + } + break; + case Value.TIMESTAMP: + try { + prep.setObject(parameterIndex, JSR310Utils.valueToLocalDateTime(value, null), Types.TIMESTAMP); + } catch (SQLException ignore) { + prep.setTimestamp(parameterIndex, LegacyDateTimeUtils.toTimestamp(null, null, value)); + } + break; + case Value.VARBINARY: + case Value.BINARY: + case Value.GEOMETRY: + case Value.JSON: + prep.setBytes(parameterIndex, value.getBytesNoCopy()); + break; + case Value.VARCHAR: + case Value.VARCHAR_IGNORECASE: + case Value.ENUM: + case Value.INTERVAL_YEAR: + case Value.INTERVAL_MONTH: + case Value.INTERVAL_DAY: + case Value.INTERVAL_HOUR: + case Value.INTERVAL_MINUTE: + case Value.INTERVAL_SECOND: + case Value.INTERVAL_YEAR_TO_MONTH: + case Value.INTERVAL_DAY_TO_HOUR: + case Value.INTERVAL_DAY_TO_MINUTE: + case Value.INTERVAL_DAY_TO_SECOND: + case Value.INTERVAL_HOUR_TO_MINUTE: + case Value.INTERVAL_HOUR_TO_SECOND: + case Value.INTERVAL_MINUTE_TO_SECOND: + prep.setString(parameterIndex, value.getString()); + break; + case Value.BLOB: + case Value.CLOB: + setLob(prep, parameterIndex, (ValueLob) value); + break; + case Value.ARRAY: + prep.setArray(parameterIndex, prep.getConnection().createArrayOf("NULL", + (Object[]) ValueToObjectConverter.valueToDefaultObject(value, conn, true))); + break; + case Value.JAVA_OBJECT: + prep.setObject(parameterIndex, + JdbcUtils.deserialize(value.getBytesNoCopy(), conn.getJavaObjectSerializer()), + Types.JAVA_OBJECT); + break; + case Value.UUID: + prep.setBytes(parameterIndex, value.getBytes()); + break; + case Value.CHAR: + try { + prep.setObject(parameterIndex, value.getString(), Types.CHAR); + } catch (SQLException ignore) { + prep.setString(parameterIndex, value.getString()); + } + break; + case Value.TIMESTAMP_TZ: + try { + prep.setObject(parameterIndex, JSR310Utils.valueToOffsetDateTime(value, null), + Types.TIMESTAMP_WITH_TIMEZONE); + return; + } catch (SQLException ignore) { + prep.setString(parameterIndex, value.getString()); + } + break; + case Value.TIME_TZ: + try { + prep.setObject(parameterIndex, JSR310Utils.valueToOffsetTime(value, null), Types.TIME_WITH_TIMEZONE); + return; + } catch (SQLException ignore) { + prep.setString(parameterIndex, value.getString()); + } + break; + default: + throw DbException.getUnsupportedException(Value.getTypeName(valueType)); + } + } + + private static void setLob(PreparedStatement prep, int parameterIndex, ValueLob value) throws SQLException { + if (value.getValueType() == Value.BLOB) { + long p = value.octetLength(); + prep.setBinaryStream(parameterIndex, value.getInputStream(), p > Integer.MAX_VALUE ? -1 : (int) p); + } else { + long p = value.charLength(); + prep.setCharacterStream(parameterIndex, value.getReader(), p > Integer.MAX_VALUE ? -1 : (int) p); + } + } + + /** + * Get metadata from the database. + * + * @param conn the connection + * @param sql the SQL statement + * @return the metadata + * @throws SQLException on failure + */ + public static ResultSet getMetaResultSet(Connection conn, String sql) + throws SQLException { + DatabaseMetaData meta = conn.getMetaData(); + if (isBuiltIn(sql, "@best_row_identifier")) { + String[] p = split(sql); + int scale = p[4] == null ? 0 : Integer.parseInt(p[4]); + boolean nullable = Boolean.parseBoolean(p[5]); + return meta.getBestRowIdentifier(p[1], p[2], p[3], scale, nullable); + } else if (isBuiltIn(sql, "@catalogs")) { + return meta.getCatalogs(); + } else if (isBuiltIn(sql, "@columns")) { + String[] p = split(sql); + return meta.getColumns(p[1], p[2], p[3], p[4]); + } else if (isBuiltIn(sql, "@column_privileges")) { + String[] p = split(sql); + return meta.getColumnPrivileges(p[1], p[2], p[3], p[4]); + } else if (isBuiltIn(sql, "@cross_references")) { + String[] p = split(sql); + return meta.getCrossReference(p[1], p[2], p[3], p[4], p[5], p[6]); + } else if (isBuiltIn(sql, "@exported_keys")) { + String[] p = split(sql); + return meta.getExportedKeys(p[1], p[2], p[3]); + } else if (isBuiltIn(sql, "@imported_keys")) { + String[] p = split(sql); + return meta.getImportedKeys(p[1], p[2], p[3]); + } else if (isBuiltIn(sql, "@index_info")) { + String[] p = split(sql); + boolean unique = Boolean.parseBoolean(p[4]); + boolean approx = Boolean.parseBoolean(p[5]); + return meta.getIndexInfo(p[1], p[2], p[3], unique, approx); + } else if (isBuiltIn(sql, "@primary_keys")) { + String[] p = split(sql); + return meta.getPrimaryKeys(p[1], p[2], p[3]); + } else if (isBuiltIn(sql, "@procedures")) { + String[] p = split(sql); + return meta.getProcedures(p[1], p[2], p[3]); + } else if (isBuiltIn(sql, "@procedure_columns")) { + String[] p = split(sql); + return meta.getProcedureColumns(p[1], p[2], p[3], p[4]); + } else if (isBuiltIn(sql, "@schemas")) { + return meta.getSchemas(); + } else if (isBuiltIn(sql, "@tables")) { + String[] p = split(sql); + String[] types = p[4] == null ? null : StringUtils.arraySplit(p[4], ',', false); + return meta.getTables(p[1], p[2], p[3], types); + } else if (isBuiltIn(sql, "@table_privileges")) { + String[] p = split(sql); + return meta.getTablePrivileges(p[1], p[2], p[3]); + } else if (isBuiltIn(sql, "@table_types")) { + return meta.getTableTypes(); + } else if (isBuiltIn(sql, "@type_info")) { + return meta.getTypeInfo(); + } else if (isBuiltIn(sql, "@udts")) { + String[] p = split(sql); + int[] types; + if (p[4] == null) { + types = null; + } else { + String[] t = StringUtils.arraySplit(p[4], ',', false); + types = new int[t.length]; + for (int i = 0; i < t.length; i++) { + types[i] = Integer.parseInt(t[i]); + } + } + return meta.getUDTs(p[1], p[2], p[3], types); + } else if (isBuiltIn(sql, "@version_columns")) { + String[] p = split(sql); + return meta.getVersionColumns(p[1], p[2], p[3]); + } else if (isBuiltIn(sql, "@memory")) { + SimpleResultSet rs = new SimpleResultSet(); + rs.addColumn("Type", Types.VARCHAR, 0, 0); + rs.addColumn("KB", Types.VARCHAR, 0, 0); + rs.addRow("Used Memory", Long.toString(Utils.getMemoryUsed())); + rs.addRow("Free Memory", Long.toString(Utils.getMemoryFree())); + return rs; + } else if (isBuiltIn(sql, "@info")) { + SimpleResultSet rs = new SimpleResultSet(); + rs.addColumn("KEY", Types.VARCHAR, 0, 0); + rs.addColumn("VALUE", Types.VARCHAR, 0, 0); + rs.addRow("conn.getCatalog", conn.getCatalog()); + rs.addRow("conn.getAutoCommit", Boolean.toString(conn.getAutoCommit())); + rs.addRow("conn.getTransactionIsolation", Integer.toString(conn.getTransactionIsolation())); + rs.addRow("conn.getWarnings", String.valueOf(conn.getWarnings())); + String map; + try { + map = String.valueOf(conn.getTypeMap()); + } catch (SQLException e) { + map = e.toString(); + } + rs.addRow("conn.getTypeMap", map); + rs.addRow("conn.isReadOnly", Boolean.toString(conn.isReadOnly())); + rs.addRow("conn.getHoldability", Integer.toString(conn.getHoldability())); + addDatabaseMetaData(rs, meta); + return rs; + } else if (isBuiltIn(sql, "@attributes")) { + String[] p = split(sql); + return meta.getAttributes(p[1], p[2], p[3], p[4]); + } else if (isBuiltIn(sql, "@super_tables")) { + String[] p = split(sql); + return meta.getSuperTables(p[1], p[2], p[3]); + } else if (isBuiltIn(sql, "@super_types")) { + String[] p = split(sql); + return meta.getSuperTypes(p[1], p[2], p[3]); + } else if (isBuiltIn(sql, "@pseudo_columns")) { + String[] p = split(sql); + return meta.getPseudoColumns(p[1], p[2], p[3], p[4]); + } + return null; + } + + private static void addDatabaseMetaData(SimpleResultSet rs, + DatabaseMetaData meta) { + Method[] methods = DatabaseMetaData.class.getDeclaredMethods(); + Arrays.sort(methods, Comparator.comparing(Method::toString)); + for (Method m : methods) { + if (m.getParameterTypes().length == 0) { + try { + Object o = m.invoke(meta); + rs.addRow("meta." + m.getName(), String.valueOf(o)); + } catch (InvocationTargetException e) { + rs.addRow("meta." + m.getName(), e.getTargetException().toString()); + } catch (Exception e) { + rs.addRow("meta." + m.getName(), e.toString()); + } + } + } + } + + /** + * Check is the SQL string starts with a prefix (case insensitive). + * + * @param sql the SQL statement + * @param builtIn the prefix + * @return true if yes + */ + public static boolean isBuiltIn(String sql, String builtIn) { + return sql.regionMatches(true, 0, builtIn, 0, builtIn.length()); + } + + /** + * Split the string using the space separator into at least 10 entries. + * + * @param s the string + * @return the array + */ + public static String[] split(String s) { + String[] t = StringUtils.arraySplit(s, ' ', true); + String[] list = new String[Math.max(10, t.length)]; + System.arraycopy(t, 0, list, 0, t.length); + for (int i = 0; i < list.length; i++) { + if ("null".equals(list[i])) { + list[i] = null; + } + } + return list; + } } diff --git a/h2/src/main/org/h2/util/LazyFuture.java b/h2/src/main/org/h2/util/LazyFuture.java deleted file mode 100644 index 77d81778ed..0000000000 --- a/h2/src/main/org/h2/util/LazyFuture.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.util; - -import java.util.concurrent.CancellationException; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import org.h2.message.DbException; - -/** - * Single threaded lazy future. - * - * @author Sergi Vladykin - * - * @param the result type - */ -public abstract class LazyFuture implements Future { - - private static final int S_READY = 0; - private static final int S_DONE = 1; - private static final int S_ERROR = 2; - private static final int S_CANCELED = 3; - - private int state = S_READY; - private T result; - private Exception error; - - /** - * Reset this future to the initial state. - * - * @return {@code false} if it was already in initial state - */ - public boolean reset() { - if (state == S_READY) { - return false; - } - state = S_READY; - result = null; - error = null; - return true; - } - - /** - * Run computation and produce the result. - * - * @return the result of computation - */ - protected abstract T run() throws Exception; - - @Override - public boolean cancel(boolean mayInterruptIfRunning) { - if (state != S_READY) { - return false; - } - state = S_CANCELED; - return true; - } - - @Override - public T get() throws InterruptedException, ExecutionException { - switch (state) { - case S_READY: - try { - result = run(); - state = S_DONE; - } catch (Exception e) { - error = e; - if (e instanceof InterruptedException) { - throw (InterruptedException) e; - } - throw new ExecutionException(e); - } finally { - if (state != S_DONE) { - state = S_ERROR; - } - } - return result; - case S_DONE: - return result; - case S_ERROR: - throw new ExecutionException(error); - case S_CANCELED: - throw new CancellationException(); - default: - throw DbException.throwInternalError(Integer.toString(state)); - } - } - - @Override - public T get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException { - return get(); - } - - @Override - public boolean isCancelled() { - return state == S_CANCELED; - } - - @Override - public boolean isDone() { - return state != S_READY; - } -} diff --git a/h2/src/main/org/h2/util/LegacyDateTimeUtils.java b/h2/src/main/org/h2/util/LegacyDateTimeUtils.java new file mode 100644 index 0000000000..254c7ffab4 --- /dev/null +++ b/h2/src/main/org/h2/util/LegacyDateTimeUtils.java @@ -0,0 +1,328 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util; + +import static org.h2.util.DateTimeUtils.MILLIS_PER_DAY; +import static org.h2.util.DateTimeUtils.NANOS_PER_SECOND; + +import java.sql.Date; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.Calendar; +import java.util.GregorianCalendar; +import java.util.TimeZone; + +import org.h2.engine.CastDataProvider; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueDate; +import org.h2.value.ValueNull; +import org.h2.value.ValueTime; +import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; + +/** + * Date and time utilities for {@link Date}, {@link Time}, and {@link Timestamp} + * classes. + */ +public final class LegacyDateTimeUtils { + + /** + * Gregorian change date for a {@link java.util.GregorianCalendar} that + * represents a proleptic Gregorian calendar. + */ + public static final Date PROLEPTIC_GREGORIAN_CHANGE = new Date(Long.MIN_VALUE); + + /** + * UTC time zone. + */ + public static final TimeZone UTC = TimeZone.getTimeZone("UTC"); + + private LegacyDateTimeUtils() { + } + + /** + * Get or create a date value for the given date. + * + * @param provider + * the cast information provider + * @param timeZone + * time zone, or {@code null} for default + * @param date + * the date + * @return the value + */ + public static ValueDate fromDate(CastDataProvider provider, TimeZone timeZone, Date date) { + long ms = date.getTime(); + return ValueDate.fromDateValue(dateValueFromLocalMillis( + ms + (timeZone == null ? getTimeZoneOffsetMillis(provider, ms) : timeZone.getOffset(ms)))); + } + + /** + * Get or create a time value for the given time. + * + * @param provider + * the cast information provider + * @param timeZone + * time zone, or {@code null} for default + * @param time + * the time + * @return the value + */ + public static ValueTime fromTime(CastDataProvider provider, TimeZone timeZone, Time time) { + long ms = time.getTime(); + return ValueTime.fromNanos(nanosFromLocalMillis( + ms + (timeZone == null ? getTimeZoneOffsetMillis(provider, ms) : timeZone.getOffset(ms)))); + } + + /** + * Get or create a timestamp value for the given timestamp. + * + * @param provider + * the cast information provider + * @param timeZone + * time zone, or {@code null} for default + * @param timestamp + * the timestamp + * @return the value + */ + public static ValueTimestamp fromTimestamp(CastDataProvider provider, TimeZone timeZone, Timestamp timestamp) { + long ms = timestamp.getTime(); + return timestampFromLocalMillis( + ms + (timeZone == null ? getTimeZoneOffsetMillis(provider, ms) : timeZone.getOffset(ms)), + timestamp.getNanos() % 1_000_000); + } + + /** + * Get or create a timestamp value for the given date/time in millis. + * + * @param provider + * the cast information provider + * @param ms + * the milliseconds + * @param nanos + * the nanoseconds + * @return the value + */ + public static ValueTimestamp fromTimestamp(CastDataProvider provider, long ms, int nanos) { + return timestampFromLocalMillis(ms + getTimeZoneOffsetMillis(provider, ms), nanos); + } + + private static ValueTimestamp timestampFromLocalMillis(long ms, int nanos) { + long dateValue = dateValueFromLocalMillis(ms); + long timeNanos = nanos + nanosFromLocalMillis(ms); + return ValueTimestamp.fromDateValueAndNanos(dateValue, timeNanos); + } + + /** + * Convert a local datetime in millis to an encoded date. + * + * @param ms + * the milliseconds + * @return the date value + */ + public static long dateValueFromLocalMillis(long ms) { + long absoluteDay = ms / MILLIS_PER_DAY; + // Round toward negative infinity + if (ms < 0 && (absoluteDay * MILLIS_PER_DAY != ms)) { + absoluteDay--; + } + return DateTimeUtils.dateValueFromAbsoluteDay(absoluteDay); + } + + /** + * Convert a time in milliseconds in local time to the nanoseconds since + * midnight. + * + * @param ms + * the milliseconds + * @return the nanoseconds + */ + public static long nanosFromLocalMillis(long ms) { + ms %= MILLIS_PER_DAY; + if (ms < 0) { + ms += MILLIS_PER_DAY; + } + return ms * 1_000_000; + } + + /** + * Get the date value converted to the specified time zone. + * + * @param provider the cast information provider + * @param timeZone the target time zone + * @param value the value to convert + * @return the date + */ + public static Date toDate(CastDataProvider provider, TimeZone timeZone, Value value) { + return value != ValueNull.INSTANCE + ? new Date(getMillis(provider, timeZone, value.convertToDate(provider).getDateValue(), 0)) : null; + } + + /** + * Get the time value converted to the specified time zone. + * + * @param provider the cast information provider + * @param timeZone the target time zone + * @param value the value to convert + * @return the time + */ + public static Time toTime(CastDataProvider provider, TimeZone timeZone, Value value) { + switch (value.getValueType()) { + case Value.NULL: + return null; + default: + value = value.convertTo(TypeInfo.TYPE_TIME, provider); + //$FALL-THROUGH$ + case Value.TIME: + return new Time( + getMillis(provider, timeZone, DateTimeUtils.EPOCH_DATE_VALUE, ((ValueTime) value).getNanos())); + } + } + + /** + * Get the timestamp value converted to the specified time zone. + * + * @param provider the cast information provider + * @param timeZone the target time zone + * @param value the value to convert + * @return the timestamp + */ + public static Timestamp toTimestamp(CastDataProvider provider, TimeZone timeZone, Value value) { + switch (value.getValueType()) { + case Value.NULL: + return null; + default: + value = value.convertTo(TypeInfo.TYPE_TIMESTAMP, provider); + //$FALL-THROUGH$ + case Value.TIMESTAMP: { + ValueTimestamp v = (ValueTimestamp) value; + long timeNanos = v.getTimeNanos(); + Timestamp ts = new Timestamp(getMillis(provider, timeZone, v.getDateValue(), timeNanos)); + ts.setNanos((int) (timeNanos % NANOS_PER_SECOND)); + return ts; + } + case Value.TIMESTAMP_TZ: { + ValueTimestampTimeZone v = (ValueTimestampTimeZone) value; + long timeNanos = v.getTimeNanos(); + Timestamp ts = new Timestamp(DateTimeUtils.absoluteDayFromDateValue(v.getDateValue()) * MILLIS_PER_DAY + + timeNanos / 1_000_000 - v.getTimeZoneOffsetSeconds() * 1_000); + ts.setNanos((int) (timeNanos % NANOS_PER_SECOND)); + return ts; + } + } + } + + /** + * Calculate the milliseconds since 1970-01-01 (UTC) for the given date and + * time (in the specified timezone). + * + * @param provider the cast information provider + * @param tz the timezone of the parameters, or null for the default + * timezone + * @param dateValue date value + * @param timeNanos nanoseconds since midnight + * @return the number of milliseconds (UTC) + */ + public static long getMillis(CastDataProvider provider, TimeZone tz, long dateValue, long timeNanos) { + return (tz == null ? provider != null ? provider.currentTimeZone() : DateTimeUtils.getTimeZone() + : TimeZoneProvider.ofId(tz.getID())).getEpochSecondsFromLocal(dateValue, timeNanos) * 1_000 + + timeNanos / 1_000_000 % 1_000; + } + + /** + * Returns local time zone offset for a specified timestamp. + * + * @param provider the cast information provider + * @param ms milliseconds since Epoch in UTC + * @return local time zone offset + */ + public static int getTimeZoneOffsetMillis(CastDataProvider provider, long ms) { + long seconds = ms / 1_000; + // Round toward negative infinity + if (ms < 0 && (seconds * 1_000 != ms)) { + seconds--; + } + return (provider != null ? provider.currentTimeZone() : DateTimeUtils.getTimeZone()) + .getTimeZoneOffsetUTC(seconds) * 1_000; + } + + /** + * Convert a legacy Java object to a value. + * + * @param session + * the session + * @param x + * the value + * @return the value, or {@code null} if not supported + */ + public static Value legacyObjectToValue(CastDataProvider session, Object x) { + if (x instanceof Date) { + return fromDate(session, null, (Date) x); + } else if (x instanceof Time) { + return fromTime(session, null, (Time) x); + } else if (x instanceof Timestamp) { + return fromTimestamp(session, null, (Timestamp) x); + } else if (x instanceof java.util.Date) { + return fromTimestamp(session, ((java.util.Date) x).getTime(), 0); + } else if (x instanceof Calendar) { + Calendar gc = (Calendar) x; + long ms = gc.getTimeInMillis(); + return timestampFromLocalMillis(ms + gc.getTimeZone().getOffset(ms), 0); + } else { + return null; + } + } + + /** + * Converts the specified value to an object of the specified legacy type. + * + * @param the type + * @param type the class + * @param value the value + * @param provider the cast information provider + * @return an instance of the specified class, or {@code null} if not supported + */ + @SuppressWarnings("unchecked") + public static T valueToLegacyType(Class type, Value value, CastDataProvider provider) { + if (type == Date.class) { + return (T) toDate(provider, null, value); + } else if (type == Time.class) { + return (T) toTime(provider, null, value); + } else if (type == Timestamp.class) { + return (T) toTimestamp(provider, null, value); + } else if (type == java.util.Date.class) { + return (T) new java.util.Date(toTimestamp(provider, null, value).getTime()); + } else if (type == Calendar.class) { + GregorianCalendar calendar = new GregorianCalendar(); + calendar.setGregorianChange(PROLEPTIC_GREGORIAN_CHANGE); + calendar.setTime(toTimestamp(provider, calendar.getTimeZone(), value)); + return (T) calendar; + } else { + return null; + } + } + + /** + * Get the type information for the given legacy Java class. + * + * @param clazz + * the Java class + * @return the value type, or {@code null} if not supported + */ + public static TypeInfo legacyClassToType(Class clazz) { + if (Date.class.isAssignableFrom(clazz)) { + return TypeInfo.TYPE_DATE; + } else if (Time.class.isAssignableFrom(clazz)) { + return TypeInfo.TYPE_TIME; + } else if (java.util.Date.class.isAssignableFrom(clazz) || Calendar.class.isAssignableFrom(clazz)) { + return TypeInfo.TYPE_TIMESTAMP; + } else{ + return null; + } + } + +} diff --git a/h2/src/main/org/h2/util/LocalDateTimeUtils.java b/h2/src/main/org/h2/util/LocalDateTimeUtils.java deleted file mode 100644 index 66ac19c8c0..0000000000 --- a/h2/src/main/org/h2/util/LocalDateTimeUtils.java +++ /dev/null @@ -1,701 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - * Iso8601: Initial Developer: Philippe Marschall (firstName dot lastName - * at gmail dot com) - */ -package org.h2.util; - -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.math.BigInteger; -import java.sql.Timestamp; -import java.util.Arrays; -import java.util.concurrent.TimeUnit; -import org.h2.api.ErrorCode; -import org.h2.api.IntervalQualifier; -import org.h2.message.DbException; -import org.h2.value.DataType; -import org.h2.value.Value; -import org.h2.value.ValueDate; -import org.h2.value.ValueInterval; -import org.h2.value.ValueTime; -import org.h2.value.ValueTimestamp; -import org.h2.value.ValueTimestampTimeZone; - -/** - * This utility class contains time conversion functions for Java 8 - * Date and Time API classes. - * - *

          This class is implemented using reflection so that it compiles on - * Java 7 as well.

          - * - *

          Custom conversion methods between H2 internal values and JSR-310 classes - * are used in most cases without intermediate conversions to java.sql classes. - * Direct conversion is simpler, faster, and it does not inherit limitations - * and issues from java.sql classes and conversion methods provided by JDK.

          - * - *

          The only one exclusion is a conversion between {@link Timestamp} and - * Instant.

          - * - *

          Once the driver requires Java 8 and Android API 26 all the reflection - * can be removed.

          - */ -public class LocalDateTimeUtils { - - /** - * {@code Class} or {@code null}. - */ - public static final Class LOCAL_DATE; - /** - * {@code Class} or {@code null}. - */ - public static final Class LOCAL_TIME; - /** - * {@code Class} or {@code null}. - */ - public static final Class LOCAL_DATE_TIME; - /** - * {@code Class} or {@code null}. - */ - public static final Class INSTANT; - /** - * {@code Class} or {@code null}. - */ - public static final Class OFFSET_DATE_TIME; - - /** - * {@code Class} or {@code null}. - */ - private static final Class ZONE_OFFSET; - - /** - * {@code Class} or {@code null}. - */ - public static final Class PERIOD; - - /** - * {@code Class} or {@code null}. - */ - public static final Class DURATION; - - /** - * {@code java.time.LocalTime#ofNanoOfDay()} or {@code null}. - */ - private static final Method LOCAL_TIME_OF_NANO; - - /** - * {@code java.time.LocalTime#toNanoOfDay()} or {@code null}. - */ - private static final Method LOCAL_TIME_TO_NANO; - - /** - * {@code java.time.LocalDate#of(int, int, int)} or {@code null}. - */ - private static final Method LOCAL_DATE_OF_YEAR_MONTH_DAY; - /** - * {@code java.time.LocalDate#getYear()} or {@code null}. - */ - private static final Method LOCAL_DATE_GET_YEAR; - /** - * {@code java.time.LocalDate#getMonthValue()} or {@code null}. - */ - private static final Method LOCAL_DATE_GET_MONTH_VALUE; - /** - * {@code java.time.LocalDate#getDayOfMonth()} or {@code null}. - */ - private static final Method LOCAL_DATE_GET_DAY_OF_MONTH; - /** - * {@code java.time.LocalDate#atStartOfDay()} or {@code null}. - */ - private static final Method LOCAL_DATE_AT_START_OF_DAY; - - /** - * {@code java.time.Instant#getEpochSecond()} or {@code null}. - */ - private static final Method INSTANT_GET_EPOCH_SECOND; - /** - * {@code java.time.Instant#getNano()} or {@code null}. - */ - private static final Method INSTANT_GET_NANO; - /** - * {@code java.sql.Timestamp.toInstant()} or {@code null}. - */ - private static final Method TIMESTAMP_TO_INSTANT; - - /** - * {@code java.time.LocalDateTime#plusNanos(long)} or {@code null}. - */ - private static final Method LOCAL_DATE_TIME_PLUS_NANOS; - /** - * {@code java.time.LocalDateTime#toLocalDate()} or {@code null}. - */ - private static final Method LOCAL_DATE_TIME_TO_LOCAL_DATE; - /** - * {@code java.time.LocalDateTime#toLocalTime()} or {@code null}. - */ - private static final Method LOCAL_DATE_TIME_TO_LOCAL_TIME; - - /** - * {@code java.time.ZoneOffset#ofTotalSeconds(int)} or {@code null}. - */ - private static final Method ZONE_OFFSET_OF_TOTAL_SECONDS; - - /** - * {@code java.time.OffsetDateTime#of(LocalDateTime, ZoneOffset)} or - * {@code null}. - */ - private static final Method OFFSET_DATE_TIME_OF_LOCAL_DATE_TIME_ZONE_OFFSET; - /** - * {@code java.time.OffsetDateTime#toLocalDateTime()} or {@code null}. - */ - private static final Method OFFSET_DATE_TIME_TO_LOCAL_DATE_TIME; - /** - * {@code java.time.OffsetDateTime#getOffset()} or {@code null}. - */ - private static final Method OFFSET_DATE_TIME_GET_OFFSET; - - /** - * {@code java.time.ZoneOffset#getTotalSeconds()} or {@code null}. - */ - private static final Method ZONE_OFFSET_GET_TOTAL_SECONDS; - - /** - * {@code java.time.Period#of(int, int, int)} or {@code null}. - */ - private static final Method PERIOD_OF; - - /** - * {@code java.time.Period#getYears()} or {@code null}. - */ - private static final Method PERIOD_GET_YEARS; - - /** - * {@code java.time.Period#getMonths()} or {@code null}. - */ - private static final Method PERIOD_GET_MONTHS; - - /** - * {@code java.time.Period#getDays()} or {@code null}. - */ - private static final Method PERIOD_GET_DAYS; - - /** - * {@code java.time.Duration#ofSeconds(long, long)} or {@code null}. - */ - private static final Method DURATION_OF_SECONDS; - - /** - * {@code java.time.Duration#getSeconds()} or {@code null}. - */ - private static final Method DURATION_GET_SECONDS; - - /** - * {@code java.time.Duration#getNano()} or {@code null}. - */ - private static final Method DURATION_GET_NANO; - - private static final boolean IS_JAVA8_DATE_API_PRESENT; - - static { - LOCAL_DATE = tryGetClass("java.time.LocalDate"); - LOCAL_TIME = tryGetClass("java.time.LocalTime"); - LOCAL_DATE_TIME = tryGetClass("java.time.LocalDateTime"); - INSTANT = tryGetClass("java.time.Instant"); - OFFSET_DATE_TIME = tryGetClass("java.time.OffsetDateTime"); - ZONE_OFFSET = tryGetClass("java.time.ZoneOffset"); - PERIOD = tryGetClass("java.time.Period"); - DURATION = tryGetClass("java.time.Duration"); - IS_JAVA8_DATE_API_PRESENT = LOCAL_DATE != null && LOCAL_TIME != null && - LOCAL_DATE_TIME != null && INSTANT != null && - OFFSET_DATE_TIME != null && ZONE_OFFSET != null && PERIOD != null && DURATION != null; - - if (IS_JAVA8_DATE_API_PRESENT) { - LOCAL_TIME_OF_NANO = getMethod(LOCAL_TIME, "ofNanoOfDay", long.class); - - LOCAL_TIME_TO_NANO = getMethod(LOCAL_TIME, "toNanoOfDay"); - - LOCAL_DATE_OF_YEAR_MONTH_DAY = getMethod(LOCAL_DATE, "of", - int.class, int.class, int.class); - LOCAL_DATE_GET_YEAR = getMethod(LOCAL_DATE, "getYear"); - LOCAL_DATE_GET_MONTH_VALUE = getMethod(LOCAL_DATE, "getMonthValue"); - LOCAL_DATE_GET_DAY_OF_MONTH = getMethod(LOCAL_DATE, "getDayOfMonth"); - LOCAL_DATE_AT_START_OF_DAY = getMethod(LOCAL_DATE, "atStartOfDay"); - - INSTANT_GET_EPOCH_SECOND = getMethod(INSTANT, "getEpochSecond"); - INSTANT_GET_NANO = getMethod(INSTANT, "getNano"); - TIMESTAMP_TO_INSTANT = getMethod(Timestamp.class, "toInstant"); - - LOCAL_DATE_TIME_PLUS_NANOS = getMethod(LOCAL_DATE_TIME, "plusNanos", long.class); - LOCAL_DATE_TIME_TO_LOCAL_DATE = getMethod(LOCAL_DATE_TIME, "toLocalDate"); - LOCAL_DATE_TIME_TO_LOCAL_TIME = getMethod(LOCAL_DATE_TIME, "toLocalTime"); - - ZONE_OFFSET_OF_TOTAL_SECONDS = getMethod(ZONE_OFFSET, "ofTotalSeconds", int.class); - - OFFSET_DATE_TIME_TO_LOCAL_DATE_TIME = getMethod(OFFSET_DATE_TIME, "toLocalDateTime"); - OFFSET_DATE_TIME_GET_OFFSET = getMethod(OFFSET_DATE_TIME, "getOffset"); - OFFSET_DATE_TIME_OF_LOCAL_DATE_TIME_ZONE_OFFSET = getMethod( - OFFSET_DATE_TIME, "of", LOCAL_DATE_TIME, ZONE_OFFSET); - - ZONE_OFFSET_GET_TOTAL_SECONDS = getMethod(ZONE_OFFSET, "getTotalSeconds"); - - PERIOD_OF = getMethod(PERIOD, "of", int.class, int.class, int.class); - PERIOD_GET_YEARS = getMethod(PERIOD, "getYears"); - PERIOD_GET_MONTHS = getMethod(PERIOD, "getMonths"); - PERIOD_GET_DAYS = getMethod(PERIOD, "getDays"); - - DURATION_OF_SECONDS = getMethod(DURATION, "ofSeconds", long.class, long.class); - DURATION_GET_SECONDS = getMethod(DURATION, "getSeconds"); - DURATION_GET_NANO = getMethod(DURATION, "getNano"); - } else { - LOCAL_TIME_OF_NANO = null; - LOCAL_TIME_TO_NANO = null; - LOCAL_DATE_OF_YEAR_MONTH_DAY = null; - LOCAL_DATE_GET_YEAR = null; - LOCAL_DATE_GET_MONTH_VALUE = null; - LOCAL_DATE_GET_DAY_OF_MONTH = null; - LOCAL_DATE_AT_START_OF_DAY = null; - INSTANT_GET_EPOCH_SECOND = null; - INSTANT_GET_NANO = null; - TIMESTAMP_TO_INSTANT = null; - LOCAL_DATE_TIME_PLUS_NANOS = null; - LOCAL_DATE_TIME_TO_LOCAL_DATE = null; - LOCAL_DATE_TIME_TO_LOCAL_TIME = null; - ZONE_OFFSET_OF_TOTAL_SECONDS = null; - OFFSET_DATE_TIME_TO_LOCAL_DATE_TIME = null; - OFFSET_DATE_TIME_GET_OFFSET = null; - OFFSET_DATE_TIME_OF_LOCAL_DATE_TIME_ZONE_OFFSET = null; - ZONE_OFFSET_GET_TOTAL_SECONDS = null; - PERIOD_OF = null; - PERIOD_GET_YEARS = null; - PERIOD_GET_MONTHS = null; - PERIOD_GET_DAYS = null; - DURATION_OF_SECONDS = null; - DURATION_GET_SECONDS = null; - DURATION_GET_NANO = null; - } - } - - private LocalDateTimeUtils() { - // utility class - } - - /** - * Checks if the Java 8 Date and Time API is present. - * - *

          This is the case on Java 8 and later and not the case on - * Java 7. Versions older than Java 7 are not supported.

          - * - * @return if the Java 8 Date and Time API is present - */ - public static boolean isJava8DateApiPresent() { - return IS_JAVA8_DATE_API_PRESENT; - } - - private static Class tryGetClass(String className) { - try { - return Class.forName(className); - } catch (ClassNotFoundException e) { - return null; - } - } - - private static Method getMethod(Class clazz, String methodName, - Class... parameterTypes) { - try { - return clazz.getMethod(methodName, parameterTypes); - } catch (NoSuchMethodException e) { - throw new IllegalStateException("Java 8 or later but method " + - clazz.getName() + "#" + methodName + "(" + - Arrays.toString(parameterTypes) + ") is missing", e); - } - } - - /** - * Converts a value to a LocalDate. - * - *

          This method should only called from Java 8 or later.

          - * - * @param value the value to convert - * @return the LocalDate - */ - public static Object valueToLocalDate(Value value) { - try { - return localDateFromDateValue(((ValueDate) value.convertTo(Value.DATE)).getDateValue()); - } catch (IllegalAccessException e) { - throw DbException.convert(e); - } catch (InvocationTargetException e) { - throw DbException.convertInvocation(e, "date conversion failed"); - } - } - - /** - * Converts a value to a LocalTime. - * - *

          This method should only called from Java 8 or later.

          - * - * @param value the value to convert - * @return the LocalTime - */ - public static Object valueToLocalTime(Value value) { - try { - return LOCAL_TIME_OF_NANO.invoke(null, - ((ValueTime) value.convertTo(Value.TIME)).getNanos()); - } catch (IllegalAccessException e) { - throw DbException.convert(e); - } catch (InvocationTargetException e) { - throw DbException.convertInvocation(e, "time conversion failed"); - } - } - - /** - * Converts a value to a LocalDateTime. - * - *

          This method should only called from Java 8 or later.

          - * - * @param value the value to convert - * @return the LocalDateTime - */ - public static Object valueToLocalDateTime(Value value) { - ValueTimestamp valueTimestamp = (ValueTimestamp) value.convertTo(Value.TIMESTAMP); - long dateValue = valueTimestamp.getDateValue(); - long timeNanos = valueTimestamp.getTimeNanos(); - try { - return localDateTimeFromDateNanos(dateValue, timeNanos); - } catch (IllegalAccessException e) { - throw DbException.convert(e); - } catch (InvocationTargetException e) { - throw DbException.convertInvocation(e, "timestamp conversion failed"); - } - } - - /** - * Converts a value to a Instant. - * - *

          This method should only called from Java 8 or later.

          - * - * @param value the value to convert - * @return the Instant - */ - public static Object valueToInstant(Value value) { - try { - return TIMESTAMP_TO_INSTANT.invoke(value.getTimestamp()); - } catch (IllegalAccessException e) { - throw DbException.convert(e); - } catch (InvocationTargetException e) { - throw DbException.convertInvocation(e, "timestamp conversion failed"); - } - } - - /** - * Converts a value to a OffsetDateTime. - * - *

          This method should only called from Java 8 or later.

          - * - * @param value the value to convert - * @return the OffsetDateTime - */ - public static Object valueToOffsetDateTime(Value value) { - ValueTimestampTimeZone valueTimestampTimeZone = (ValueTimestampTimeZone) value.convertTo(Value.TIMESTAMP_TZ); - long dateValue = valueTimestampTimeZone.getDateValue(); - long timeNanos = valueTimestampTimeZone.getTimeNanos(); - try { - Object localDateTime = localDateTimeFromDateNanos(dateValue, timeNanos); - - short timeZoneOffsetMins = valueTimestampTimeZone.getTimeZoneOffsetMins(); - int offsetSeconds = (int) TimeUnit.MINUTES.toSeconds(timeZoneOffsetMins); - - Object offset = ZONE_OFFSET_OF_TOTAL_SECONDS.invoke(null, offsetSeconds); - - return OFFSET_DATE_TIME_OF_LOCAL_DATE_TIME_ZONE_OFFSET.invoke(null, - localDateTime, offset); - } catch (IllegalAccessException e) { - throw DbException.convert(e); - } catch (InvocationTargetException e) { - throw DbException.convertInvocation(e, "timestamp with time zone conversion failed"); - } - } - - /** - * Converts a value to a Period. - * - *

          This method should only called from Java 8 or later.

          - * - * @param value the value to convert - * @return the Period - */ - public static Object valueToPeriod(Value value) { - if (!(value instanceof ValueInterval)) { - value = value.convertTo(Value.INTERVAL_YEAR_TO_MONTH); - } - if (!DataType.isYearMonthIntervalType(value.getValueType())) { - throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, (Throwable) null, value.getString()); - } - ValueInterval v = (ValueInterval) value; - IntervalQualifier qualifier = v.getQualifier(); - boolean negative = v.isNegative(); - long leading = v.getLeading(); - long remaining = v.getRemaining(); - int y = Value.convertToInt(IntervalUtils.yearsFromInterval(qualifier, negative, leading, remaining), null); - int m = Value.convertToInt(IntervalUtils.monthsFromInterval(qualifier, negative, leading, remaining), null); - try { - return PERIOD_OF.invoke(null, y, m, 0); - } catch (IllegalAccessException e) { - throw DbException.convert(e); - } catch (InvocationTargetException e) { - throw DbException.convertInvocation(e, "timestamp with time zone conversion failed"); - } - } - - /** - * Converts a value to a Duration. - * - *

          This method should only called from Java 8 or later.

          - * - * @param value the value to convert - * @return the Duration - */ - public static Object valueToDuration(Value value) { - if (!(value instanceof ValueInterval)) { - value = value.convertTo(Value.INTERVAL_DAY_TO_SECOND); - } - if (DataType.isYearMonthIntervalType(value.getValueType())) { - throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, (Throwable) null, value.getString()); - } - BigInteger[] dr = IntervalUtils.intervalToAbsolute((ValueInterval) value) - .divideAndRemainder(BigInteger.valueOf(1_000_000_000)); - try { - return DURATION_OF_SECONDS.invoke(null, dr[0].longValue(), dr[1].longValue()); - } catch (IllegalAccessException e) { - throw DbException.convert(e); - } catch (InvocationTargetException e) { - throw DbException.convertInvocation(e, "timestamp with time zone conversion failed"); - } - } - - /** - * Converts a LocalDate to a Value. - * - * @param localDate the LocalDate to convert, not {@code null} - * @return the value - */ - public static Value localDateToDateValue(Object localDate) { - try { - return ValueDate.fromDateValue(dateValueFromLocalDate(localDate)); - } catch (IllegalAccessException e) { - throw DbException.convert(e); - } catch (InvocationTargetException e) { - throw DbException.convertInvocation(e, "date conversion failed"); - } - } - - /** - * Converts a LocalTime to a Value. - * - * @param localTime the LocalTime to convert, not {@code null} - * @return the value - */ - public static Value localTimeToTimeValue(Object localTime) { - try { - return ValueTime.fromNanos((Long) LOCAL_TIME_TO_NANO.invoke(localTime)); - } catch (IllegalAccessException e) { - throw DbException.convert(e); - } catch (InvocationTargetException e) { - throw DbException.convertInvocation(e, "time conversion failed"); - } - } - - /** - * Converts a LocalDateTime to a Value. - * - * @param localDateTime the LocalDateTime to convert, not {@code null} - * @return the value - */ - public static Value localDateTimeToValue(Object localDateTime) { - try { - Object localDate = LOCAL_DATE_TIME_TO_LOCAL_DATE.invoke(localDateTime); - long dateValue = dateValueFromLocalDate(localDate); - long timeNanos = timeNanosFromLocalDateTime(localDateTime); - return ValueTimestamp.fromDateValueAndNanos(dateValue, timeNanos); - } catch (IllegalAccessException e) { - throw DbException.convert(e); - } catch (InvocationTargetException e) { - throw DbException.convertInvocation(e, "local date time conversion failed"); - } - } - - /** - * Converts a Instant to a Value. - * - * @param instant the Instant to convert, not {@code null} - * @return the value - */ - public static Value instantToValue(Object instant) { - try { - long epochSecond = (long) INSTANT_GET_EPOCH_SECOND.invoke(instant); - int nano = (int) INSTANT_GET_NANO.invoke(instant); - long absoluteDay = epochSecond / 86_400; - // Round toward negative infinity - if (epochSecond < 0 && (absoluteDay * 86_400 != epochSecond)) { - absoluteDay--; - } - long timeNanos = (epochSecond - absoluteDay * 86_400) * 1_000_000_000 + nano; - return ValueTimestampTimeZone.fromDateValueAndNanos( - DateTimeUtils.dateValueFromAbsoluteDay(absoluteDay), timeNanos, (short) 0); - } catch (IllegalAccessException e) { - throw DbException.convert(e); - } catch (InvocationTargetException e) { - throw DbException.convertInvocation(e, "instant conversion failed"); - } - } - - /** - * Converts a OffsetDateTime to a Value. - * - * @param offsetDateTime the OffsetDateTime to convert, not {@code null} - * @return the value - */ - public static ValueTimestampTimeZone offsetDateTimeToValue(Object offsetDateTime) { - try { - Object localDateTime = OFFSET_DATE_TIME_TO_LOCAL_DATE_TIME.invoke(offsetDateTime); - Object localDate = LOCAL_DATE_TIME_TO_LOCAL_DATE.invoke(localDateTime); - Object zoneOffset = OFFSET_DATE_TIME_GET_OFFSET.invoke(offsetDateTime); - - long dateValue = dateValueFromLocalDate(localDate); - long timeNanos = timeNanosFromLocalDateTime(localDateTime); - short timeZoneOffsetMins = zoneOffsetToOffsetMinute(zoneOffset); - return ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, - timeNanos, timeZoneOffsetMins); - } catch (IllegalAccessException e) { - throw DbException.convert(e); - } catch (InvocationTargetException e) { - throw DbException.convertInvocation(e, "time conversion failed"); - } - } - - private static long dateValueFromLocalDate(Object localDate) - throws IllegalAccessException, InvocationTargetException { - int year = (Integer) LOCAL_DATE_GET_YEAR.invoke(localDate); - int month = (Integer) LOCAL_DATE_GET_MONTH_VALUE.invoke(localDate); - int day = (Integer) LOCAL_DATE_GET_DAY_OF_MONTH.invoke(localDate); - return DateTimeUtils.dateValue(year, month, day); - } - - private static long timeNanosFromLocalDateTime(Object localDateTime) - throws IllegalAccessException, InvocationTargetException { - Object localTime = LOCAL_DATE_TIME_TO_LOCAL_TIME.invoke(localDateTime); - return (Long) LOCAL_TIME_TO_NANO.invoke(localTime); - } - - private static short zoneOffsetToOffsetMinute(Object zoneOffset) - throws IllegalAccessException, InvocationTargetException { - int totalSeconds = (Integer) ZONE_OFFSET_GET_TOTAL_SECONDS.invoke(zoneOffset); - return (short) TimeUnit.SECONDS.toMinutes(totalSeconds); - } - - private static Object localDateFromDateValue(long dateValue) - throws IllegalAccessException, InvocationTargetException { - - int year = DateTimeUtils.yearFromDateValue(dateValue); - int month = DateTimeUtils.monthFromDateValue(dateValue); - int day = DateTimeUtils.dayFromDateValue(dateValue); - try { - return LOCAL_DATE_OF_YEAR_MONTH_DAY.invoke(null, year, month, day); - } catch (InvocationTargetException e) { - if (year <= 1500 && (year & 3) == 0 && month == 2 && day == 29) { - // If proleptic Gregorian doesn't have such date use the next day - return LOCAL_DATE_OF_YEAR_MONTH_DAY.invoke(null, year, 3, 1); - } - throw e; - } - } - - private static Object localDateTimeFromDateNanos(long dateValue, long timeNanos) - throws IllegalAccessException, InvocationTargetException { - Object localDate = localDateFromDateValue(dateValue); - Object localDateTime = LOCAL_DATE_AT_START_OF_DAY.invoke(localDate); - return LOCAL_DATE_TIME_PLUS_NANOS.invoke(localDateTime, timeNanos); - } - - /** - * Converts a Period to a Value. - * - * @param period the Period to convert, not {@code null} - * @return the value - */ - public static ValueInterval periodToValue(Object period) { - try { - int days = (int) PERIOD_GET_DAYS.invoke(period); - if (days != 0) { - throw DbException.getInvalidValueException("Period.days", days); - } - int years = (int) PERIOD_GET_YEARS.invoke(period); - int months = (int) PERIOD_GET_MONTHS.invoke(period); - IntervalQualifier qualifier; - boolean negative = false; - long leading = 0L, remaining = 0L; - if (years == 0) { - if (months == 0L) { - // Use generic qualifier - qualifier = IntervalQualifier.YEAR_TO_MONTH; - } else { - qualifier = IntervalQualifier.MONTH; - leading = months; - if (leading < 0) { - leading = -leading; - negative = true; - } - } - } else { - if (months == 0L) { - qualifier = IntervalQualifier.YEAR; - leading = years; - if (leading < 0) { - leading = -leading; - negative = true; - } - } else { - qualifier = IntervalQualifier.YEAR_TO_MONTH; - leading = years * 12 + months; - if (leading < 0) { - leading = -leading; - negative = true; - } - remaining = leading % 12; - leading /= 12; - } - } - return ValueInterval.from(qualifier, negative, leading, remaining); - } catch (IllegalAccessException e) { - throw DbException.convert(e); - } catch (InvocationTargetException e) { - throw DbException.convertInvocation(e, "interval conversion failed"); - } - } - - /** - * Converts a Duration to a Value. - * - * @param duration the Duration to convert, not {@code null} - * @return the value - */ - public static ValueInterval durationToValue(Object duration) { - try { - long seconds = (long) DURATION_GET_SECONDS.invoke(duration); - int nano = (int) DURATION_GET_NANO.invoke(duration); - boolean negative = seconds < 0; - seconds = Math.abs(seconds); - if (negative && nano != 0) { - nano = 1_000_000_000 - nano; - seconds--; - } - return ValueInterval.from(IntervalQualifier.SECOND, negative, seconds, nano); - } catch (IllegalAccessException e) { - throw DbException.convert(e); - } catch (InvocationTargetException e) { - throw DbException.convertInvocation(e, "interval conversion failed"); - } - } - -} diff --git a/h2/src/main/org/h2/util/MathUtils.java b/h2/src/main/org/h2/util/MathUtils.java index 61cb4a7e0a..2a84beb7ff 100644 --- a/h2/src/main/org/h2/util/MathUtils.java +++ b/h2/src/main/org/h2/util/MathUtils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; @@ -21,7 +21,7 @@ public class MathUtils { /** * The secure random object. */ - static SecureRandom cachedSecureRandom; + static SecureRandom secureRandom; /** * True if the secure random object is seeded. @@ -62,32 +62,29 @@ public static long roundUpLong(long x, long blockSizePowerOf2) { } private static synchronized SecureRandom getSecureRandom() { - if (cachedSecureRandom != null) { - return cachedSecureRandom; + if (secureRandom != null) { + return secureRandom; } // Workaround for SecureRandom problem as described in - // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6202721 + // https://bugs.openjdk.java.net/browse/JDK-6202721 // Can not do that in a static initializer block, because // threads are not started until after the initializer block exits try { - cachedSecureRandom = SecureRandom.getInstance("SHA1PRNG"); + secureRandom = SecureRandom.getInstance("SHA1PRNG"); // On some systems, secureRandom.generateSeed() is very slow. // In this case it is initialized using our own seed implementation // and afterwards (in the thread) using the regular algorithm. - Runnable runnable = new Runnable() { - @Override - public void run() { - try { - SecureRandom sr = SecureRandom.getInstance("SHA1PRNG"); - byte[] seed = sr.generateSeed(20); - synchronized (cachedSecureRandom) { - cachedSecureRandom.setSeed(seed); - seeded = true; - } - } catch (Exception e) { - // NoSuchAlgorithmException - warn("SecureRandom", e); + Runnable runnable = () -> { + try { + SecureRandom sr = SecureRandom.getInstance("SHA1PRNG"); + byte[] seed = sr.generateSeed(20); + synchronized (secureRandom) { + secureRandom.setSeed(seed); + seeded = true; } + } catch (Exception e) { + // NoSuchAlgorithmException + warn("SecureRandom", e); } }; @@ -107,8 +104,8 @@ public void run() { if (!seeded) { byte[] seed = generateAlternativeSeed(); // this never reduces randomness - synchronized (cachedSecureRandom) { - cachedSecureRandom.setSeed(seed); + synchronized (secureRandom) { + secureRandom.setSeed(seed); } } } catch (SecurityException e) { @@ -120,9 +117,9 @@ public void run() { } catch (Exception e) { // NoSuchAlgorithmException warn("SecureRandom", e); - cachedSecureRandom = new SecureRandom(); + secureRandom = new SecureRandom(); } - return cachedSecureRandom; + return secureRandom; } /** @@ -219,27 +216,19 @@ static void warn(String s, Throwable t) { * * @param x the original value * @return the next power of two value - * @throws IllegalArgumentException if x < 0 or x > 0x40000000 + * @throws IllegalArgumentException if x < 0 or x > 0x40000000 */ public static int nextPowerOf2(int x) throws IllegalArgumentException { - if (x == 0) { - return 1; - } else if (x < 0 || x > 0x4000_0000 ) { + if (x + Integer.MIN_VALUE > (0x4000_0000 + Integer.MIN_VALUE)) { throw new IllegalArgumentException("Argument out of range" + " [0x0-0x40000000]. Argument was: " + x); } - x--; - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - x |= x >> 8; - x |= x >> 16; - return ++x; + return x <= 1 ? 1 : (-1 >>> Integer.numberOfLeadingZeros(x - 1)) + 1; } /** * Convert a long value to an int value. Values larger than the biggest int - * value is converted to the biggest int value, and values smaller than the + * value are converted to the biggest int value, and values smaller than the * smallest int value are converted to the smallest int value. * * @param l the value to convert @@ -255,6 +244,24 @@ public static int convertLongToInt(long l) { } } + /** + * Convert an int value to a short value. Values larger than the biggest + * short value are converted to the biggest short value, and values smaller + * than the smallest short value are converted to the smallest short value. + * + * @param i the value to convert + * @return the converted short value + */ + public static short convertIntToShort(int i) { + if (i <= Short.MIN_VALUE) { + return Short.MIN_VALUE; + } else if (i >= Short.MAX_VALUE) { + return Short.MAX_VALUE; + } else { + return (short) i; + } + } + /** * Get a cryptographically secure pseudo random long value. * diff --git a/h2/src/main/org/h2/util/MemoryEstimator.java b/h2/src/main/org/h2/util/MemoryEstimator.java new file mode 100644 index 0000000000..ed662a6ce4 --- /dev/null +++ b/h2/src/main/org/h2/util/MemoryEstimator.java @@ -0,0 +1,195 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util; + +import static org.h2.engine.Constants.MEMORY_POINTER; + +import java.util.concurrent.atomic.AtomicLong; + +import org.h2.mvstore.type.DataType; + +/** + * Class MemoryEstimator. + * + * Calculation of the amount of memory occupied by keys, values and pages of the MVTable + * may become expensive operation for complex data types like Row. + * On the other hand, result of the calculation is used by page cache to limit it's size + * and determine when eviction is needed. Another usage is to trigger auto commit, + * based on amount of unsaved changes. In both cases reasonable (lets say ~30%) approximation + * would be good enough and will do the job. + * This class replaces exact calculation with an estimate based on + * a sliding window average of last 256 values. + * If estimation gets close to the exact value, then next N calculations are skipped + * and replaced with the estimate, where N depends on the estimation error. + * + * @author Andrei Tokar + */ +public final class MemoryEstimator { + + // Structure of statsData long value: + // 0 - 7 skip counter (how many more requests will skip calculation and use an estimate instead) + // 8 - 23 total number of skips between last 256 calculations + // (used for sampling percentage calculation only) + // 24 bit is 0 when window is not completely filled yet, 1 once it become full + // 25 - 31 unused + // 32 - 63 sliding window sum of estimated values + + private static final int SKIP_SUM_SHIFT = 8; + private static final int COUNTER_MASK = (1 << SKIP_SUM_SHIFT) - 1; + private static final int SKIP_SUM_MASK = 0xFFFF; + private static final int INIT_BIT_SHIFT = 24; + private static final int INIT_BIT = 1 << INIT_BIT_SHIFT; + private static final int WINDOW_SHIFT = 8; + private static final int MAGNITUDE_LIMIT = WINDOW_SHIFT - 1; + private static final int WINDOW_SIZE = 1 << WINDOW_SHIFT; + private static final int WINDOW_HALF_SIZE = WINDOW_SIZE >> 1; + private static final int SUM_SHIFT = 32; + + private MemoryEstimator() {} + + /** + * Estimates memory size of the data based on previous values. + * @param stats AtomicLong holding statistical data about the estimated sequence + * @param dataType used for calculation of the next sequence value, if necessary + * @param data which size is to be calculated as the next sequence value, if necessary + * @param type of the data + * @return next estimated or calculated value of the sequence + */ + public static int estimateMemory(AtomicLong stats, DataType dataType, T data) { + long statsData = stats.get(); + int counter = getCounter(statsData); + int skipSum = getSkipSum(statsData); + long initialized = statsData & INIT_BIT; + long sum = statsData >>> SUM_SHIFT; + int mem = 0; + int cnt = 0; + if (initialized == 0 || counter-- == 0) { + cnt = 1; + mem = data == null ? 0 : dataType.getMemory(data); + long delta = ((long) mem << WINDOW_SHIFT) - sum; + if (initialized == 0) { + if (++counter == WINDOW_SIZE) { + initialized = INIT_BIT; + } + sum = (sum * counter + delta + (counter >> 1)) / counter; + } else { + long absDelta = delta >= 0 ? delta : -delta; + int magnitude = calculateMagnitude(sum, absDelta); + sum += ((delta >> (MAGNITUDE_LIMIT - magnitude)) + 1) >> 1; + counter = ((1 << magnitude) - 1) & COUNTER_MASK; + + delta = (counter << WINDOW_SHIFT) - skipSum; + skipSum += (delta + WINDOW_HALF_SIZE) >> WINDOW_SHIFT; + } + } + long updatedStatsData = updateStatsData(stats, statsData, counter, skipSum, initialized, sum, cnt, mem); + return getAverage(updatedStatsData); + } + + /** + * Estimates memory size of the data set based on previous values. + * @param stats AtomicLong holding statistical data about the estimated sequence + * @param dataType used for calculation of the next sequence value, if necessary + * @param storage of the data set, which size is to be calculated + * @param count number of data items in the storage + * @param type of the data in the storage + * @return next estimated or calculated size of the storage + */ + public static int estimateMemory(AtomicLong stats, DataType dataType, T[] storage, int count) { + long statsData = stats.get(); + int counter = getCounter(statsData); + int skipSum = getSkipSum(statsData); + long initialized = statsData & INIT_BIT; + long sum = statsData >>> SUM_SHIFT; + int index = 0; + int memSum = 0; + if (initialized != 0 && counter >= count) { + counter -= count; + } else { + int cnt = count; + while (cnt-- > 0) { + T data = storage[index++]; + int mem = data == null ? 0 : dataType.getMemory(data); + memSum += mem; + long delta = ((long) mem << WINDOW_SHIFT) - sum; + if (initialized == 0) { + if (++counter == WINDOW_SIZE) { + initialized = INIT_BIT; + } + sum = (sum * counter + delta + (counter >> 1)) / counter; + } else { + cnt -= counter; + long absDelta = delta >= 0 ? delta : -delta; + int magnitude = calculateMagnitude(sum, absDelta); + sum += ((delta >> (MAGNITUDE_LIMIT - magnitude)) + 1) >> 1; + counter += ((1 << magnitude) - 1) & COUNTER_MASK; + + delta = ((long) counter << WINDOW_SHIFT) - skipSum; + skipSum += (delta + WINDOW_HALF_SIZE) >> WINDOW_SHIFT; + } + } + } + long updatedStatsData = updateStatsData(stats, statsData, counter, skipSum, initialized, sum, index, memSum); + return (getAverage(updatedStatsData) + MEMORY_POINTER) * count; + } + + /** + * Calculates percentage of how many times actual calculation happened (vs. estimation) + * @param stats AtomicLong holding statistical data about the estimated sequence + * @return sampling percentage in range 0 - 100 + */ + public static int samplingPct(AtomicLong stats) { + long statsData = stats.get(); + int count = (statsData & INIT_BIT) == 0 ? getCounter(statsData) : WINDOW_SIZE; + int total = getSkipSum(statsData) + count; + return (count * 100 + (total >> 1)) / total; + } + + private static int calculateMagnitude(long sum, long absDelta) { + int magnitude = 0; + while (absDelta < sum && magnitude < MAGNITUDE_LIMIT) { + ++magnitude; + absDelta <<= 1; + } + return magnitude; + } + + private static long updateStatsData(AtomicLong stats, long statsData, + int counter, int skipSum, long initialized, long sum, + int itemsCount, int itemsMem) { + return updateStatsData(stats, statsData, + constructStatsData(sum, initialized, skipSum, counter), itemsCount, itemsMem); + } + + private static long constructStatsData(long sum, long initialized, int skipSum, int counter) { + return (sum << SUM_SHIFT) | initialized | ((long) skipSum << SKIP_SUM_SHIFT) | counter; + } + + private static long updateStatsData(AtomicLong stats, long statsData, long updatedStatsData, + int itemsCount, int itemsMem) { + while (!stats.compareAndSet(statsData, updatedStatsData)) { + statsData = stats.get(); + long sum = statsData >>> SUM_SHIFT; + if (itemsCount > 0) { + sum += itemsMem - ((sum * itemsCount + WINDOW_HALF_SIZE) >> WINDOW_SHIFT); + } + updatedStatsData = (sum << SUM_SHIFT) | (statsData & (INIT_BIT | SKIP_SUM_MASK | COUNTER_MASK)); + } + return updatedStatsData; + } + + private static int getCounter(long statsData) { + return (int)(statsData & COUNTER_MASK); + } + + private static int getSkipSum(long statsData) { + return (int)((statsData >> SKIP_SUM_SHIFT) & SKIP_SUM_MASK); + } + + private static int getAverage(long updatedStatsData) { + return (int)(updatedStatsData >>> (SUM_SHIFT + WINDOW_SHIFT)); + } +} diff --git a/h2/src/main/org/h2/util/MemoryUnmapper.java b/h2/src/main/org/h2/util/MemoryUnmapper.java index 5f3da7e259..4994263d3b 100644 --- a/h2/src/main/org/h2/util/MemoryUnmapper.java +++ b/h2/src/main/org/h2/util/MemoryUnmapper.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; @@ -37,7 +37,7 @@ public final class MemoryUnmapper { // This method exists only on Java 9 and later versions invokeCleaner = clazz.getMethod("invokeCleaner", ByteBuffer.class); } catch (ReflectiveOperationException e) { - // Java 7 or 8 + // Java 8 unsafe = null; // invokeCleaner can be only null here } catch (Throwable e) { @@ -72,7 +72,7 @@ public static boolean unmap(ByteBuffer buffer) { INVOKE_CLEANER.invoke(UNSAFE, buffer); return true; } - // Java 7 or 8 + // Java 8 Method cleanerMethod = buffer.getClass().getMethod("cleaner"); cleanerMethod.setAccessible(true); Object cleaner = cleanerMethod.invoke(buffer); diff --git a/h2/src/main/org/h2/util/NetUtils.java b/h2/src/main/org/h2/util/NetUtils.java index 670b951b6e..972ff6f7e6 100644 --- a/h2/src/main/org/h2/util/NetUtils.java +++ b/h2/src/main/org/h2/util/NetUtils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; @@ -13,7 +13,6 @@ import java.net.ServerSocket; import java.net.Socket; import java.net.UnknownHostException; -import java.util.concurrent.TimeUnit; import org.h2.api.ErrorCode; import org.h2.engine.SysProperties; @@ -41,6 +40,7 @@ private NetUtils() { * @param port the port * @param ssl if SSL should be used * @return the socket + * @throws IOException on failure */ public static Socket createLoopbackSocket(int port, boolean ssl) throws IOException { @@ -65,9 +65,25 @@ public static Socket createLoopbackSocket(int port, boolean ssl) * address) * @param ssl if SSL should be used * @return the socket + * @throws IOException on failure + */ + public static Socket createSocket(String server, int defaultPort, boolean ssl) throws IOException { + return createSocket(server, defaultPort, ssl, 0); + } + + /** + * Create a client socket that is connected to the given address and port. + * + * @param server to connect to (including an optional port) + * @param defaultPort the default port (if not specified in the server + * address) + * @param ssl if SSL should be used + * @param networkTimeout socket so timeout + * @return the socket + * @throws IOException on failure */ public static Socket createSocket(String server, int defaultPort, - boolean ssl) throws IOException { + boolean ssl, int networkTimeout) throws IOException { int port = defaultPort; // IPv6: RFC 2732 format is '[a:b:c:d:e:f:g:h]' or // '[a:b:c:d:e:f:g:h]:port' @@ -80,7 +96,7 @@ public static Socket createSocket(String server, int defaultPort, server = server.substring(0, idx); } InetAddress address = InetAddress.getByName(server); - return createSocket(address, port, ssl); + return createSocket(address, port, ssl, networkTimeout); } /** @@ -90,8 +106,23 @@ public static Socket createSocket(String server, int defaultPort, * @param port the port * @param ssl if SSL should be used * @return the socket + * @throws IOException on failure */ public static Socket createSocket(InetAddress address, int port, boolean ssl) + throws IOException { + return createSocket(address, port, ssl, 0); + } + /** + * Create a client socket that is connected to the given address and port. + * + * @param address the address to connect to + * @param port the port + * @param ssl if SSL should be used + * @param networkTimeout socket so timeout + * @return the socket + * @throws IOException on failure + */ + public static Socket createSocket(InetAddress address, int port, boolean ssl, int networkTimeout) throws IOException { long start = System.nanoTime(); for (int i = 0;; i++) { @@ -100,12 +131,12 @@ public static Socket createSocket(InetAddress address, int port, boolean ssl) return CipherFactory.createSocket(address, port); } Socket socket = new Socket(); + socket.setSoTimeout(networkTimeout); socket.connect(new InetSocketAddress(address, port), SysProperties.SOCKET_CONNECT_TIMEOUT); return socket; } catch (IOException e) { - if (System.nanoTime() - start >= - TimeUnit.MILLISECONDS.toNanos(SysProperties.SOCKET_CONNECT_TIMEOUT)) { + if (System.nanoTime() - start >= SysProperties.SOCKET_CONNECT_TIMEOUT * 1_000_000L) { // either it was a connect timeout, // or list of different exceptions throw e; @@ -189,6 +220,7 @@ private static ServerSocket createServerSocketTry(int port, boolean ssl) { * * @param socket the socket * @return true if it is + * @throws UnknownHostException on failure */ public static boolean isLocalAddress(Socket socket) throws UnknownHostException { @@ -232,10 +264,8 @@ public static ServerSocket closeSilently(ServerSocket socket) { */ public static synchronized String getLocalAddress() { long now = System.nanoTime(); - if (cachedLocalAddress != null) { - if (cachedLocalAddressTime + TimeUnit.MILLISECONDS.toNanos(CACHE_MILLIS) > now) { - return cachedLocalAddress; - } + if (cachedLocalAddress != null && now - cachedLocalAddressTime < CACHE_MILLIS * 1_000_000L) { + return cachedLocalAddress; } InetAddress bind = null; boolean useLocalhost = false; @@ -292,4 +322,78 @@ public static String getHostName(String localAddress) { } } + /** + * Appends short representation of the specified IP address to the string + * builder. + * + * @param builder + * string builder to append to, or {@code null} + * @param address + * IP address + * @param addBrackets + * if ({@code true}, add brackets around IPv6 addresses + * @return the specified or the new string builder with short representation + * of specified address + */ + public static StringBuilder ipToShortForm(StringBuilder builder, byte[] address, boolean addBrackets) { + switch (address.length) { + case 4: + if (builder == null) { + builder = new StringBuilder(15); + } + builder // + .append(address[0] & 0xff).append('.') // + .append(address[1] & 0xff).append('.') // + .append(address[2] & 0xff).append('.') // + .append(address[3] & 0xff); + break; + case 16: + short[] a = new short[8]; + int maxStart = 0, maxLen = 0, currentLen = 0; + for (int i = 0, offset = 0; i < 8; i++) { + if ((a[i] = (short) ((address[offset++] & 0xff) << 8 | address[offset++] & 0xff)) == 0) { + currentLen++; + if (currentLen > maxLen) { + maxLen = currentLen; + maxStart = i - currentLen + 1; + } + } else { + currentLen = 0; + } + } + if (builder == null) { + builder = new StringBuilder(addBrackets ? 41 : 39); + } + if (addBrackets) { + builder.append('['); + } + int start; + if (maxLen > 1) { + for (int i = 0; i < maxStart; i++) { + builder.append(Integer.toHexString(a[i] & 0xffff)).append(':'); + } + if (maxStart == 0) { + builder.append(':'); + } + builder.append(':'); + start = maxStart + maxLen; + } else { + start = 0; + } + for (int i = start; i < 8; i++) { + builder.append(Integer.toHexString(a[i] & 0xffff)); + if (i < 7) { + builder.append(':'); + } + } + if (addBrackets) { + builder.append(']'); + } + break; + default: + StringUtils.convertBytesToHex(builder, address); + } + return builder; + } + } diff --git a/h2/src/main/org/h2/util/NetworkConnectionInfo.java b/h2/src/main/org/h2/util/NetworkConnectionInfo.java new file mode 100644 index 0000000000..d562dc7864 --- /dev/null +++ b/h2/src/main/org/h2/util/NetworkConnectionInfo.java @@ -0,0 +1,104 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util; + +import java.net.InetAddress; +import java.net.UnknownHostException; + +/** + * Network connection information. + */ +public final class NetworkConnectionInfo { + + private final String server; + + private final byte[] clientAddr; + + private final int clientPort; + + private final String clientInfo; + + /** + * Creates new instance of network connection information. + * + * @param server + * the protocol and port of the server + * @param clientAddr + * the client address + * @param clientPort + * the client port + * @throws UnknownHostException + * if clientAddr cannot be resolved + */ + public NetworkConnectionInfo(String server, String clientAddr, int clientPort) throws UnknownHostException { + this(server, InetAddress.getByName(clientAddr).getAddress(), clientPort, null); + } + + /** + * Creates new instance of network connection information. + * + * @param server + * the protocol and port of the server + * @param clientAddr + * the client address + * @param clientPort + * the client port + * @param clientInfo + * additional client information, or {@code null} + */ + public NetworkConnectionInfo(String server, byte[] clientAddr, int clientPort, String clientInfo) { + this.server = server; + this.clientAddr = clientAddr; + this.clientPort = clientPort; + this.clientInfo = clientInfo; + } + + /** + * Returns the protocol and port of the server. + * + * @return the protocol and port of the server + */ + public String getServer() { + return server; + } + + /** + * Returns the client address. + * + * @return the client address + */ + public byte[] getClientAddr() { + return clientAddr; + } + + /** + * Returns the client port. + * + * @return the client port + */ + public int getClientPort() { + return clientPort; + } + + /** + * Returns additional client information, or {@code null}. + * + * @return additional client information, or {@code null} + */ + public String getClientInfo() { + return clientInfo; + } + + /** + * Returns the client address and port. + * + * @return the client address and port + */ + public String getClient() { + return NetUtils.ipToShortForm(new StringBuilder(), clientAddr, true).append(':').append(clientPort).toString(); + } + +} diff --git a/h2/src/main/org/h2/util/OsgiDataSourceFactory.java b/h2/src/main/org/h2/util/OsgiDataSourceFactory.java index 9b91d4b87a..002f605db5 100644 --- a/h2/src/main/org/h2/util/OsgiDataSourceFactory.java +++ b/h2/src/main/org/h2/util/OsgiDataSourceFactory.java @@ -1,12 +1,13 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; +import java.util.Hashtable; import java.util.Properties; import javax.sql.ConnectionPoolDataSource; import javax.sql.DataSource; @@ -288,7 +289,7 @@ private static void rejectPoolingOptions(Properties p) */ static void registerService(BundleContext bundleContext, org.h2.Driver driver) { - Properties properties = new Properties(); + Hashtable properties = new Hashtable<>(); properties.put( DataSourceFactory.OSGI_JDBC_DRIVER_CLASS, org.h2.Driver.class.getName()); @@ -297,7 +298,7 @@ static void registerService(BundleContext bundleContext, "H2 JDBC Driver"); properties.put( DataSourceFactory.OSGI_JDBC_DRIVER_VERSION, - Constants.getFullVersion()); + Constants.FULL_VERSION); bundleContext.registerService( DataSourceFactory.class.getName(), new OsgiDataSourceFactory(driver), properties); diff --git a/h2/src/main/org/h2/util/ParserUtil.java b/h2/src/main/org/h2/util/ParserUtil.java index bd76b23c44..95498a4a26 100644 --- a/h2/src/main/org/h2/util/ParserUtil.java +++ b/h2/src/main/org/h2/util/ParserUtil.java @@ -1,10 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; +import java.util.HashMap; + public class ParserUtil { /** @@ -17,25 +19,62 @@ public class ParserUtil { */ public static final int IDENTIFIER = 2; + // Constants below must be sorted + /** * The token "ALL". */ public static final int ALL = IDENTIFIER + 1; + /** + * The token "AND". + */ + public static final int AND = ALL + 1; + + /** + * The token "ANY". + */ + public static final int ANY = AND + 1; + /** * The token "ARRAY". */ - public static final int ARRAY = ALL + 1; + public static final int ARRAY = ANY + 1; + + /** + * The token "AS". + */ + public static final int AS = ARRAY + 1; + + /** + * The token "ASYMMETRIC". + */ + public static final int ASYMMETRIC = AS + 1; + + /** + * The token "AUTHORIZATION". + */ + public static final int AUTHORIZATION = ASYMMETRIC + 1; + + /** + * The token "BETWEEN". + */ + public static final int BETWEEN = AUTHORIZATION + 1; /** * The token "CASE". */ - public static final int CASE = ARRAY + 1; + public static final int CASE = BETWEEN + 1; + + /** + * The token "CAST". + */ + public static final int CAST = CASE + 1; /** * The token "CHECK". */ - public static final int CHECK = CASE + 1; + public static final int CHECK = CAST + 1; /** * The token "CONSTRAINT". @@ -47,15 +86,35 @@ public class ParserUtil { */ public static final int CROSS = CONSTRAINT + 1; + /** + * The token "CURRENT_CATALOG". + */ + public static final int CURRENT_CATALOG = CROSS + 1; + /** * The token "CURRENT_DATE". */ - public static final int CURRENT_DATE = CROSS + 1; + public static final int CURRENT_DATE = CURRENT_CATALOG + 1; + + /** + * The token "CURRENT_PATH". + */ + public static final int CURRENT_PATH = CURRENT_DATE + 1; + + /** + * The token "CURRENT_ROLE". + */ + public static final int CURRENT_ROLE = CURRENT_PATH + 1; + + /** + * The token "CURRENT_SCHEMA". + */ + public static final int CURRENT_SCHEMA = CURRENT_ROLE + 1; /** * The token "CURRENT_TIME". */ - public static final int CURRENT_TIME = CURRENT_DATE + 1; + public static final int CURRENT_TIME = CURRENT_SCHEMA + 1; /** * The token "CURRENT_TIMESTAMP". @@ -67,15 +126,35 @@ public class ParserUtil { */ public static final int CURRENT_USER = CURRENT_TIMESTAMP + 1; + /** + * The token "DAY". + */ + public static final int DAY = CURRENT_USER + 1; + + /** + * The token "DEFAULT". + */ + public static final int DEFAULT = DAY + 1; + /** * The token "DISTINCT". */ - public static final int DISTINCT = CURRENT_USER + 1; + public static final int DISTINCT = DEFAULT + 1; + + /** + * The token "ELSE". + */ + public static final int ELSE = DISTINCT + 1; + + /** + * The token "END". + */ + public static final int END = ELSE + 1; /** * The token "EXCEPT". */ - public static final int EXCEPT = DISTINCT + 1; + public static final int EXCEPT = END + 1; /** * The token "EXISTS". @@ -122,30 +201,35 @@ public class ParserUtil { */ public static final int HAVING = GROUP + 1; + /** + * The token "HOUR". + */ + public static final int HOUR = HAVING + 1; + /** * The token "IF". */ - public static final int IF = HAVING + 1; + public static final int IF = HOUR + 1; /** - * The token "INNER". + * The token "IN". */ - public static final int INNER = IF + 1; + public static final int IN = IF + 1; /** - * The token "INTERSECT". + * The token "INNER". */ - public static final int INTERSECT = INNER + 1; + public static final int INNER = IN + 1; /** - * The token "INTERSECTS". + * The token "INTERSECT". */ - public static final int INTERSECTS = INTERSECT + 1; + public static final int INTERSECT = INNER + 1; /** * The token "INTERVAL". */ - public static final int INTERVAL = INTERSECTS + 1; + public static final int INTERVAL = INTERSECT + 1; /** * The token "IS". @@ -157,10 +241,20 @@ public class ParserUtil { */ public static final int JOIN = IS + 1; + /** + * The token "KEY". + */ + public static final int KEY = JOIN + 1; + + /** + * The token "LEFT". + */ + public static final int LEFT = KEY + 1; + /** * The token "LIKE". */ - public static final int LIKE = JOIN + 1; + public static final int LIKE = LEFT + 1; /** * The token "LIMIT". @@ -182,10 +276,20 @@ public class ParserUtil { */ public static final int MINUS = LOCALTIMESTAMP + 1; + /** + * The token "MINUTE". + */ + public static final int MINUTE = MINUS + 1; + + /** + * The token "MONTH". + */ + public static final int MONTH = MINUTE + 1; + /** * The token "NATURAL". */ - public static final int NATURAL = MINUS + 1; + public static final int NATURAL = MONTH + 1; /** * The token "NOT". @@ -207,10 +311,15 @@ public class ParserUtil { */ public static final int ON = OFFSET + 1; + /** + * The token "OR". + */ + public static final int OR = ON + 1; + /** * The token "ORDER". */ - public static final int ORDER = ON + 1; + public static final int ORDER = OR + 1; /** * The token "PRIMARY". @@ -223,54 +332,119 @@ public class ParserUtil { public static final int QUALIFY = PRIMARY + 1; /** - * The token "ROW". + * The token "RIGHT". */ - public static final int ROW = QUALIFY + 1; + public static final int RIGHT = QUALIFY + 1; /** - * The token "_ROWID_". + * The token "ROW". */ - public static final int _ROWID_ = ROW + 1; + public static final int ROW = RIGHT + 1; /** * The token "ROWNUM". */ - public static final int ROWNUM = _ROWID_ + 1; + public static final int ROWNUM = ROW + 1; + + /** + * The token "SECOND". + */ + public static final int SECOND = ROWNUM + 1; /** * The token "SELECT". */ - public static final int SELECT = ROWNUM + 1; + public static final int SELECT = SECOND + 1; + + /** + * The token "SESSION_USER". + */ + public static final int SESSION_USER = SELECT + 1; + + /** + * The token "SET". + */ + public static final int SET = SESSION_USER + 1; + + /** + * The token "SOME". + */ + public static final int SOME = SET + 1; + + /** + * The token "SYMMETRIC". + */ + public static final int SYMMETRIC = SOME + 1; + + /** + * The token "SYSTEM_USER". + */ + public static final int SYSTEM_USER = SYMMETRIC + 1; /** * The token "TABLE". */ - public static final int TABLE = SELECT + 1; + public static final int TABLE = SYSTEM_USER + 1; + + /** + * The token "TO". + */ + public static final int TO = TABLE + 1; /** * The token "TRUE". */ - public static final int TRUE = TABLE + 1; + public static final int TRUE = TO + 1; + + /** + * The token "UESCAPE". + */ + public static final int UESCAPE = TRUE + 1; /** * The token "UNION". */ - public static final int UNION = TRUE + 1; + public static final int UNION = UESCAPE + 1; /** * The token "UNIQUE". */ public static final int UNIQUE = UNION + 1; + /** + * The token "UNKNOWN". + */ + public static final int UNKNOWN = UNIQUE + 1; + + /** + * The token "USER". + */ + public static final int USER = UNKNOWN + 1; + + /** + * The token "USING". + */ + public static final int USING = USER + 1; + + /** + * The token "VALUE". + */ + public static final int VALUE = USING + 1; + /** * The token "VALUES". */ - public static final int VALUES = UNIQUE + 1; + public static final int VALUES = VALUE + 1; + + /** + * The token "WHEN". + */ + public static final int WHEN = VALUES + 1; /** * The token "WHERE". */ - public static final int WHERE = VALUES + 1; + public static final int WHERE = WHEN + 1; /** * The token "WINDOW". @@ -282,39 +456,160 @@ public class ParserUtil { */ public static final int WITH = WINDOW + 1; - private static final int UPPER_OR_OTHER_LETTER = - 1 << Character.UPPERCASE_LETTER - | 1 << Character.MODIFIER_LETTER - | 1 << Character.OTHER_LETTER; - - private static final int UPPER_OR_OTHER_LETTER_OR_DIGIT = - UPPER_OR_OTHER_LETTER - | 1 << Character.DECIMAL_DIGIT_NUMBER; - - private static final int LOWER_OR_OTHER_LETTER = - 1 << Character.LOWERCASE_LETTER - | 1 << Character.MODIFIER_LETTER - | 1 << Character.OTHER_LETTER; - - private static final int LOWER_OR_OTHER_LETTER_OR_DIGIT = - LOWER_OR_OTHER_LETTER - | 1 << Character.DECIMAL_DIGIT_NUMBER; - - private static final int LETTER = - 1 << Character.UPPERCASE_LETTER - | 1 << Character.LOWERCASE_LETTER - | 1 << Character.TITLECASE_LETTER - | 1 << Character.MODIFIER_LETTER - | 1 << Character.OTHER_LETTER; + /** + * The token "YEAR". + */ + public static final int YEAR = WITH + 1; - private static final int LETTER_OR_DIGIT = - LETTER - | 1 << Character.DECIMAL_DIGIT_NUMBER; + /** + * The token "_ROWID_". + */ + public static final int _ROWID_ = YEAR + 1; + + // Constants above must be sorted + + /** + * The ordinal number of the first keyword. + */ + public static final int FIRST_KEYWORD = IDENTIFIER + 1; + + /** + * The ordinal number of the last keyword. + */ + public static final int LAST_KEYWORD = _ROWID_; + + private static final HashMap KEYWORDS; + + static { + HashMap map = new HashMap<>(256); + map.put("ALL", ALL); + map.put("AND", AND); + map.put("ANY", ANY); + map.put("ARRAY", ARRAY); + map.put("AS", AS); + map.put("ASYMMETRIC", ASYMMETRIC); + map.put("AUTHORIZATION", AUTHORIZATION); + map.put("BETWEEN", BETWEEN); + map.put("CASE", CASE); + map.put("CAST", CAST); + map.put("CHECK", CHECK); + map.put("CONSTRAINT", CONSTRAINT); + map.put("CROSS", CROSS); + map.put("CURRENT_CATALOG", CURRENT_CATALOG); + map.put("CURRENT_DATE", CURRENT_DATE); + map.put("CURRENT_PATH", CURRENT_PATH); + map.put("CURRENT_ROLE", CURRENT_ROLE); + map.put("CURRENT_SCHEMA", CURRENT_SCHEMA); + map.put("CURRENT_TIME", CURRENT_TIME); + map.put("CURRENT_TIMESTAMP", CURRENT_TIMESTAMP); + map.put("CURRENT_USER", CURRENT_USER); + map.put("DAY", DAY); + map.put("DEFAULT", DEFAULT); + map.put("DISTINCT", DISTINCT); + map.put("ELSE", ELSE); + map.put("END", END); + map.put("EXCEPT", EXCEPT); + map.put("EXISTS", EXISTS); + map.put("FALSE", FALSE); + map.put("FETCH", FETCH); + map.put("FOR", FOR); + map.put("FOREIGN", FOREIGN); + map.put("FROM", FROM); + map.put("FULL", FULL); + map.put("GROUP", GROUP); + map.put("HAVING", HAVING); + map.put("HOUR", HOUR); + map.put("IF", IF); + map.put("IN", IN); + map.put("INNER", INNER); + map.put("INTERSECT", INTERSECT); + map.put("INTERVAL", INTERVAL); + map.put("IS", IS); + map.put("JOIN", JOIN); + map.put("KEY", KEY); + map.put("LEFT", LEFT); + map.put("LIKE", LIKE); + map.put("LIMIT", LIMIT); + map.put("LOCALTIME", LOCALTIME); + map.put("LOCALTIMESTAMP", LOCALTIMESTAMP); + map.put("MINUS", MINUS); + map.put("MINUTE", MINUTE); + map.put("MONTH", MONTH); + map.put("NATURAL", NATURAL); + map.put("NOT", NOT); + map.put("NULL", NULL); + map.put("OFFSET", OFFSET); + map.put("ON", ON); + map.put("OR", OR); + map.put("ORDER", ORDER); + map.put("PRIMARY", PRIMARY); + map.put("QUALIFY", QUALIFY); + map.put("RIGHT", RIGHT); + map.put("ROW", ROW); + map.put("ROWNUM", ROWNUM); + map.put("SECOND", SECOND); + map.put("SELECT", SELECT); + map.put("SESSION_USER", SESSION_USER); + map.put("SET", SET); + map.put("SOME", SOME); + map.put("SYMMETRIC", SYMMETRIC); + map.put("SYSTEM_USER", SYSTEM_USER); + map.put("TABLE", TABLE); + map.put("TO", TO); + map.put("TRUE", TRUE); + map.put("UESCAPE", UESCAPE); + map.put("UNION", UNION); + map.put("UNIQUE", UNIQUE); + map.put("UNKNOWN", UNKNOWN); + map.put("USER", USER); + map.put("USING", USING); + map.put("VALUE", VALUE); + map.put("VALUES", VALUES); + map.put("WHEN", WHEN); + map.put("WHERE", WHERE); + map.put("WINDOW", WINDOW); + map.put("WITH", WITH); + map.put("YEAR", YEAR); + map.put("_ROWID_", _ROWID_); + // Additional keywords + map.put("BOTH", KEYWORD); + map.put("FILTER", KEYWORD); + map.put("GROUPS", KEYWORD); + map.put("ILIKE", KEYWORD); + map.put("LEADING", KEYWORD); + map.put("OVER", KEYWORD); + map.put("PARTITION", KEYWORD); + map.put("RANGE", KEYWORD); + map.put("REGEXP", KEYWORD); + map.put("ROWS", KEYWORD); + map.put("TOP", KEYWORD); + map.put("TRAILING", KEYWORD); + KEYWORDS = map; + } private ParserUtil() { // utility class } + /** + * Add double quotes around an identifier if required and appends it to the + * specified string builder. + * + * @param builder string builder to append to + * @param s the identifier + * @param sqlFlags formatting flags + * @return the specified builder + */ + public static StringBuilder quoteIdentifier(StringBuilder builder, String s, int sqlFlags) { + if (s == null) { + return builder.append("\"\""); + } + if ((sqlFlags & HasSQL.QUOTE_ONLY_WHEN_REQUIRED) != 0 && isSimpleIdentifier(s, false, false)) { + return builder.append(s); + } + return StringUtils.quoteIdentifier(builder, s); + } + /** * Checks if this string is a SQL keyword. * @@ -324,11 +619,7 @@ private ParserUtil() { * @return true if it is a keyword */ public static boolean isKeyword(String s, boolean ignoreCase) { - int length = s.length(); - if (length == 0) { - return false; - } - return getSaveTokenType(s, ignoreCase, 0, length, false) != IDENTIFIER; + return getTokenType(s, ignoreCase, false) != IDENTIFIER; } /** @@ -341,38 +632,37 @@ public static boolean isKeyword(String s, boolean ignoreCase) { * @throws NullPointerException if s is {@code null} */ public static boolean isSimpleIdentifier(String s, boolean databaseToUpper, boolean databaseToLower) { + if (databaseToUpper && databaseToLower) { + throw new IllegalArgumentException("databaseToUpper && databaseToLower"); + } int length = s.length(); - if (length == 0) { + if (length == 0 || !checkLetter(databaseToUpper, databaseToLower, s.charAt(0))) { return false; } - int startFlags, partFlags; + for (int i = 1; i < length; i++) { + char c = s.charAt(i); + if (c != '_' && (c < '0' || c > '9') && !checkLetter(databaseToUpper, databaseToLower, c)) { + return false; + } + } + return getTokenType(s, !databaseToUpper, true) == IDENTIFIER; + } + + private static boolean checkLetter(boolean databaseToUpper, boolean databaseToLower, char c) { if (databaseToUpper) { - if (databaseToLower) { - throw new IllegalArgumentException("databaseToUpper && databaseToLower"); - } else { - startFlags = UPPER_OR_OTHER_LETTER; - partFlags = UPPER_OR_OTHER_LETTER_OR_DIGIT; + if (c < 'A' || c > 'Z') { + return false; } - } else { - if (databaseToLower) { - startFlags = LOWER_OR_OTHER_LETTER; - partFlags = LOWER_OR_OTHER_LETTER_OR_DIGIT; - } else { - startFlags = LETTER; - partFlags = LETTER_OR_DIGIT; + } else if (databaseToLower) { + if (c < 'a' || c > 'z') { + return false; } - } - char c = s.charAt(0); - if ((startFlags >>> Character.getType(c) & 1) == 0 && c != '_') { - return false; - } - for (int i = 1; i < length; i++) { - c = s.charAt(i); - if ((partFlags >>> Character.getType(c) & 1) == 0 && c != '_') { + } else { + if ((c < 'A' || c > 'Z') && (c < 'a' || c > 'z')) { return false; } } - return getSaveTokenType(s, !databaseToUpper, 0, length, true) == IDENTIFIER; + return true; } /** @@ -381,265 +671,25 @@ public static boolean isSimpleIdentifier(String s, boolean databaseToUpper, bool * @param s the string with token * @param ignoreCase true if case should be ignored, false if only upper case * tokens are detected as keywords - * @param start start index of token - * @param end index of token, exclusive; must be greater than start index - * @param additionalKeywords whether TOP, INTERSECTS, and "current data / - * time" functions are keywords + * @param additionalKeywords + * whether context-sensitive keywords are returned as + * {@link #KEYWORD} * @return the token type */ - public static int getSaveTokenType(String s, boolean ignoreCase, int start, int end, boolean additionalKeywords) { - /* - * JdbcDatabaseMetaData.getSQLKeywords() and tests should be updated when new - * non-SQL:2003 keywords are introduced here. - */ - char c = s.charAt(start); + public static int getTokenType(String s, boolean ignoreCase, boolean additionalKeywords) { + int length = s.length(); + if (length <= 1 || length > 17) { + return IDENTIFIER; + } if (ignoreCase) { - // Convert a-z to A-Z and 0x7f to _ (need special handling). - c &= 0xffdf; + s = StringUtils.toUpperEnglish(s); } - switch (c) { - case 'A': - if (eq("ALL", s, ignoreCase, start, end)) { - return ALL; - } else if (eq("ARRAY", s, ignoreCase, start, end)) { - return ARRAY; - } - if (additionalKeywords) { - if (eq("AND", s, ignoreCase, start, end) || eq("AS", s, ignoreCase, start, end)) { - return KEYWORD; - } - } - return IDENTIFIER; - case 'B': - if (additionalKeywords) { - if (eq("BETWEEN", s, ignoreCase, start, end) || eq("BOTH", s, ignoreCase, start, end)) { - return KEYWORD; - } - } - return IDENTIFIER; - case 'C': - if (eq("CASE", s, ignoreCase, start, end)) { - return CASE; - } else if (eq("CHECK", s, ignoreCase, start, end)) { - return CHECK; - } else if (eq("CONSTRAINT", s, ignoreCase, start, end)) { - return CONSTRAINT; - } else if (eq("CROSS", s, ignoreCase, start, end)) { - return CROSS; - } else if (eq("CURRENT_DATE", s, ignoreCase, start, end)) { - return CURRENT_DATE; - } else if (eq("CURRENT_TIME", s, ignoreCase, start, end)) { - return CURRENT_TIME; - } else if (eq("CURRENT_TIMESTAMP", s, ignoreCase, start, end)) { - return CURRENT_TIMESTAMP; - } else if (eq("CURRENT_USER", s, ignoreCase, start, end)) { - return CURRENT_USER; - } - return IDENTIFIER; - case 'D': - if (eq("DISTINCT", s, ignoreCase, start, end)) { - return DISTINCT; - } - return IDENTIFIER; - case 'E': - if (eq("EXCEPT", s, ignoreCase, start, end)) { - return EXCEPT; - } else if (eq("EXISTS", s, ignoreCase, start, end)) { - return EXISTS; - } - return IDENTIFIER; - case 'F': - if (eq("FETCH", s, ignoreCase, start, end)) { - return FETCH; - } else if (eq("FROM", s, ignoreCase, start, end)) { - return FROM; - } else if (eq("FOR", s, ignoreCase, start, end)) { - return FOR; - } else if (eq("FOREIGN", s, ignoreCase, start, end)) { - return FOREIGN; - } else if (eq("FULL", s, ignoreCase, start, end)) { - return FULL; - } else if (eq("FALSE", s, ignoreCase, start, end)) { - return FALSE; - } - if (additionalKeywords) { - if (eq("FILTER", s, ignoreCase, start, end)) { - return KEYWORD; - } - } - return IDENTIFIER; - case 'G': - if (eq("GROUP", s, ignoreCase, start, end)) { - return GROUP; - } - if (additionalKeywords) { - if (eq("GROUPS", s, ignoreCase, start, end)) { - return KEYWORD; - } - } - return IDENTIFIER; - case 'H': - if (eq("HAVING", s, ignoreCase, start, end)) { - return HAVING; - } - return IDENTIFIER; - case 'I': - if (eq("IF", s, ignoreCase, start, end)) { - return IF; - } else if (eq("INNER", s, ignoreCase, start, end)) { - return INNER; - } else if (eq("INTERSECT", s, ignoreCase, start, end)) { - return INTERSECT; - } else if (eq("INTERSECTS", s, ignoreCase, start, end)) { - return INTERSECTS; - } else if (eq("INTERVAL", s, ignoreCase, start, end)) { - return INTERVAL; - } else if (eq("IS", s, ignoreCase, start, end)) { - return IS; - } - if (additionalKeywords) { - if (eq("ILIKE", s, ignoreCase, start, end) || eq("IN", s, ignoreCase, start, end)) { - return KEYWORD; - } - } - return IDENTIFIER; - case 'J': - if (eq("JOIN", s, ignoreCase, start, end)) { - return JOIN; - } - return IDENTIFIER; - case 'L': - if (eq("LIMIT", s, ignoreCase, start, end)) { - return LIMIT; - } else if (eq("LIKE", s, ignoreCase, start, end)) { - return LIKE; - } else if (eq("LOCALTIME", s, ignoreCase, start, end)) { - return LOCALTIME; - } else if (eq("LOCALTIMESTAMP", s, ignoreCase, start, end)) { - return LOCALTIMESTAMP; - } - if (additionalKeywords) { - if (eq("LEADING", s, ignoreCase, start, end) || eq("LEFT", s, ignoreCase, start, end)) { - return KEYWORD; - } - } - return IDENTIFIER; - case 'M': - if (eq("MINUS", s, ignoreCase, start, end)) { - return MINUS; - } - return IDENTIFIER; - case 'N': - if (eq("NOT", s, ignoreCase, start, end)) { - return NOT; - } else if (eq("NATURAL", s, ignoreCase, start, end)) { - return NATURAL; - } else if (eq("NULL", s, ignoreCase, start, end)) { - return NULL; - } - return IDENTIFIER; - case 'O': - if (eq("OFFSET", s, ignoreCase, start, end)) { - return OFFSET; - } else if (eq("ON", s, ignoreCase, start, end)) { - return ON; - } else if (eq("ORDER", s, ignoreCase, start, end)) { - return ORDER; - } - if (additionalKeywords) { - if (eq("OR", s, ignoreCase, start, end) || eq("OVER", s, ignoreCase, start, end)) { - return KEYWORD; - } - } - return IDENTIFIER; - case 'P': - if (eq("PRIMARY", s, ignoreCase, start, end)) { - return PRIMARY; - } - if (additionalKeywords) { - if (eq("PARTITION", s, ignoreCase, start, end)) { - return KEYWORD; - } - } - return IDENTIFIER; - case 'Q': - if (eq("QUALIFY", s, ignoreCase, start, end)) { - return QUALIFY; - } - return IDENTIFIER; - case 'R': - if (eq("ROW", s, ignoreCase, start, end)) { - return ROW; - } else if (eq("ROWNUM", s, ignoreCase, start, end)) { - return ROWNUM; - } - if (additionalKeywords) { - if (eq("RANGE", s, ignoreCase, start, end) || eq("REGEXP", s, ignoreCase, start, end) - || eq("ROWS", s, ignoreCase, start, end) || eq("RIGHT", s, ignoreCase, start, end)) { - return KEYWORD; - } - } - return IDENTIFIER; - case 'S': - if (eq("SELECT", s, ignoreCase, start, end)) { - return SELECT; - } - if (additionalKeywords) { - if (eq("SYSDATE", s, ignoreCase, start, end) || eq("SYSTIME", s, ignoreCase, start, end) - || eq("SYSTIMESTAMP", s, ignoreCase, start, end)) { - return KEYWORD; - } - } - return IDENTIFIER; - case 'T': - if (eq("TABLE", s, ignoreCase, start, end)) { - return TABLE; - } else if (eq("TRUE", s, ignoreCase, start, end)) { - return TRUE; - } - if (additionalKeywords) { - if (eq("TODAY", s, ignoreCase, start, end) || eq("TOP", s, ignoreCase, start, end) - || eq("TRAILING", s, ignoreCase, start, end)) { - return KEYWORD; - } - } - return IDENTIFIER; - case 'U': - if (eq("UNIQUE", s, ignoreCase, start, end)) { - return UNIQUE; - } else if (eq("UNION", s, ignoreCase, start, end)) { - return UNION; - } - return IDENTIFIER; - case 'V': - if (eq("VALUES", s, ignoreCase, start, end)) { - return VALUES; - } - return IDENTIFIER; - case 'W': - if (eq("WHERE", s, ignoreCase, start, end)) { - return WHERE; - } else if (eq("WINDOW", s, ignoreCase, start, end)) { - return WINDOW; - } else if (eq("WITH", s, ignoreCase, start, end)) { - return WITH; - } - return IDENTIFIER; - case '_': - // Cannot use eq() because 0x7f can be converted to '_' (0x5f) - if (end - start == 7 && "_ROWID_".regionMatches(ignoreCase, 0, s, start, 7)) { - return _ROWID_; - } - //$FALL-THROUGH$ - default: + Integer type = KEYWORDS.get(s); + if (type == null) { return IDENTIFIER; } - } - - private static boolean eq(String expected, String s, boolean ignoreCase, int start, int end) { - int len = expected.length(); - // First letter was already checked - return end - start == len && expected.regionMatches(ignoreCase, 1, s, start + 1, len - 1); + int t = type; + return t == KEYWORD && !additionalKeywords ? IDENTIFIER : t; } } diff --git a/h2/src/main/org/h2/util/Permutations.java b/h2/src/main/org/h2/util/Permutations.java index 475a00ddc0..08a3bf6fed 100644 --- a/h2/src/main/org/h2/util/Permutations.java +++ b/h2/src/main/org/h2/util/Permutations.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group * * According to a mail from Alan Tucker to Chris H Miller from IBM, @@ -41,7 +41,7 @@ private Permutations(T[] in, T[] out, int m) { this.n = in.length; this.m = m; if (n < m || m < 0) { - DbException.throwInternalError("n < m or m < 0"); + throw DbException.getInternalError("n < m or m < 0"); } this.in = in; this.out = out; diff --git a/h2/src/main/org/h2/util/Profiler.java b/h2/src/main/org/h2/util/Profiler.java index 5ec24dd49c..4bcc28f993 100644 --- a/h2/src/main/org/h2/util/Profiler.java +++ b/h2/src/main/org/h2/util/Profiler.java @@ -1,21 +1,22 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; import java.io.ByteArrayOutputStream; -import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; -import java.io.InputStreamReader; import java.io.LineNumberReader; import java.io.OutputStream; import java.io.Reader; import java.io.StringReader; import java.lang.instrument.Instrumentation; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -166,20 +167,17 @@ private void run(String... args) { } continue; } - try (Reader reader = new InputStreamReader(new FileInputStream(arg))) { + Path file = Paths.get(arg); + try (Reader reader = Files.newBufferedReader(file)) { LineNumberReader r = new LineNumberReader(reader); - while (true) { - String line = r.readLine(); - if (line == null) { - break; - } else if (line.startsWith("Full thread dump")) { + for (String line; (line = r.readLine()) != null;) { + if (line.startsWith("Full thread dump")) { threadDumps++; } } } - try (Reader reader = new InputStreamReader(new FileInputStream(arg))) { - LineNumberReader r = new LineNumberReader(reader); - processList(readStackTrace(r)); + try (Reader reader = Files.newBufferedReader(file)) { + processList(readStackTrace(new LineNumberReader(reader))); } } System.out.println(getTopTraces(5)); @@ -268,11 +266,11 @@ private static String exec(String... args) { copyInThread(p.getInputStream(), out); copyInThread(p.getErrorStream(), err); p.waitFor(); - String e = new String(err.toByteArray(), StandardCharsets.UTF_8); + String e = Utils10.byteArrayOutputStreamToString(err, StandardCharsets.UTF_8); if (e.length() > 0) { throw new RuntimeException(e); } - return new String(out.toByteArray(), StandardCharsets.UTF_8); + return Utils10.byteArrayOutputStreamToString(out, StandardCharsets.UTF_8); } catch (Exception e) { throw new RuntimeException(e); } diff --git a/h2/src/main/org/h2/util/ScriptReader.java b/h2/src/main/org/h2/util/ScriptReader.java index 91bb99b937..8a930ca42c 100644 --- a/h2/src/main/org/h2/util/ScriptReader.java +++ b/h2/src/main/org/h2/util/ScriptReader.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; @@ -168,6 +168,7 @@ private String readStatementLoop() throws IOException { if (c == '*') { // block comment startRemark(true); + int level = 1; while (true) { c = read(); if (c < 0) { @@ -180,9 +181,20 @@ private String readStatementLoop() throws IOException { break; } if (c == '/') { - endRemark(); + if (--level == 0) { + endRemark(); + break; + } + } + } else if (c == '/') { + c = read(); + if (c < 0) { + clearRemark(); break; } + if (c == '*') { + level++; + } } } c = read(); diff --git a/h2/src/main/org/h2/util/SimpleColumnInfo.java b/h2/src/main/org/h2/util/SimpleColumnInfo.java index 1b8f2a1e98..4e0672e607 100644 --- a/h2/src/main/org/h2/util/SimpleColumnInfo.java +++ b/h2/src/main/org/h2/util/SimpleColumnInfo.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; diff --git a/h2/src/main/org/h2/util/SmallLRUCache.java b/h2/src/main/org/h2/util/SmallLRUCache.java index 6721db61ef..7b9d67f55b 100644 --- a/h2/src/main/org/h2/util/SmallLRUCache.java +++ b/h2/src/main/org/h2/util/SmallLRUCache.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; diff --git a/h2/src/main/org/h2/util/SmallMap.java b/h2/src/main/org/h2/util/SmallMap.java index bc3699efe8..3dc55a0e7e 100644 --- a/h2/src/main/org/h2/util/SmallMap.java +++ b/h2/src/main/org/h2/util/SmallMap.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; diff --git a/h2/src/main/org/h2/util/SoftHashMap.java b/h2/src/main/org/h2/util/SoftValuesHashMap.java similarity index 91% rename from h2/src/main/org/h2/util/SoftHashMap.java rename to h2/src/main/org/h2/util/SoftValuesHashMap.java index f11be6b633..ddade87f51 100644 --- a/h2/src/main/org/h2/util/SoftHashMap.java +++ b/h2/src/main/org/h2/util/SoftValuesHashMap.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; @@ -21,12 +21,12 @@ * @param the key type * @param the value type */ -public class SoftHashMap extends AbstractMap { +public class SoftValuesHashMap extends AbstractMap { private final Map> map; private final ReferenceQueue queue = new ReferenceQueue<>(); - public SoftHashMap() { + public SoftValuesHashMap() { map = new HashMap<>(); } diff --git a/h2/src/main/org/h2/util/SortedProperties.java b/h2/src/main/org/h2/util/SortedProperties.java index 4b89048889..7989dd83ae 100644 --- a/h2/src/main/org/h2/util/SortedProperties.java +++ b/h2/src/main/org/h2/util/SortedProperties.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; @@ -95,6 +95,7 @@ public static String getStringProperty(Properties prop, String key, String def) * * @param fileName the name of the properties file * @return the properties object + * @throws IOException on failure */ public static synchronized SortedProperties loadProperties(String fileName) throws IOException { @@ -111,6 +112,7 @@ public static synchronized SortedProperties loadProperties(String fileName) * Store a properties file. The header and the date is not written. * * @param fileName the target file name + * @throws IOException on failure */ public synchronized void store(String fileName) throws IOException { ByteArrayOutputStream out = new ByteArrayOutputStream(); diff --git a/h2/src/main/org/h2/util/SourceCompiler.java b/h2/src/main/org/h2/util/SourceCompiler.java index dbd77cff9d..38fed8f456 100644 --- a/h2/src/main/org/h2/util/SourceCompiler.java +++ b/h2/src/main/org/h2/util/SourceCompiler.java @@ -1,27 +1,26 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; import java.io.BufferedReader; import java.io.ByteArrayOutputStream; -import java.io.DataInputStream; -import java.io.File; -import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.PrintStream; import java.io.StringReader; import java.io.StringWriter; -import java.io.Writer; import java.lang.reflect.Array; import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.net.URI; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.security.SecureClassLoader; import java.util.ArrayList; import java.util.HashMap; @@ -44,7 +43,6 @@ import org.h2.api.ErrorCode; import org.h2.engine.SysProperties; import org.h2.message.DbException; -import org.h2.store.fs.FileUtils; /** * This class allows to convert source code to a class. It uses one class loader @@ -126,6 +124,7 @@ public void setJavaSystemCompiler(boolean enabled) { * * @param packageAndClassName the class name * @return the class + * @throws ClassNotFoundException on failure */ public Class getClass(String packageAndClassName) throws ClassNotFoundException { @@ -203,6 +202,7 @@ public static boolean isJavaxScriptSource(String source) { * * @param packageAndClassName the package and class name * @return the compiled script + * @throws ScriptException on failure */ public CompiledScript getCompiledScript(String packageAndClassName) throws ScriptException { CompiledScript compiledScript = compiledScripts.get(packageAndClassName); @@ -229,6 +229,7 @@ public CompiledScript getCompiledScript(String packageAndClassName) throws Scrip * * @param className the class name * @return the method name + * @throws ClassNotFoundException on failure */ public Method getMethod(String className) throws ClassNotFoundException { Class clazz = getClass(className); @@ -256,34 +257,37 @@ public Method getMethod(String className) throws ClassNotFoundException { * @return the class file */ byte[] javacCompile(String packageName, String className, String source) { - File dir = new File(COMPILE_DIR); + Path dir = Paths.get(COMPILE_DIR); if (packageName != null) { - dir = new File(dir, packageName.replace('.', '/')); - FileUtils.createDirectories(dir.getAbsolutePath()); + dir = dir.resolve(packageName.replace('.', '/')); + try { + Files.createDirectories(dir); + } catch (Exception e) { + throw DbException.convert(e); + } } - File javaFile = new File(dir, className + ".java"); - File classFile = new File(dir, className + ".class"); + Path javaFile = dir.resolve(className + ".java"); + Path classFile = dir.resolve(className + ".class"); try { - OutputStream f = FileUtils.newOutputStream(javaFile.getAbsolutePath(), false); - Writer out = IOUtils.getBufferedWriter(f); - classFile.delete(); - out.write(source); - out.close(); + Files.write(javaFile, source.getBytes(StandardCharsets.UTF_8)); + Files.deleteIfExists(classFile); if (JAVAC_SUN != null) { javacSun(javaFile); } else { javacProcess(javaFile); } - byte[] data = new byte[(int) classFile.length()]; - DataInputStream in = new DataInputStream(new FileInputStream(classFile)); - in.readFully(data); - in.close(); - return data; + return Files.readAllBytes(classFile); } catch (Exception e) { throw DbException.convert(e); } finally { - javaFile.delete(); - classFile.delete(); + try { + Files.deleteIfExists(javaFile); + } catch (IOException e) { + } + try { + Files.deleteIfExists(classFile); + } catch (IOException e) { + } } } @@ -352,12 +356,12 @@ Class javaxToolsJavac(String packageName, String className, String source) { } } - private static void javacProcess(File javaFile) { + private static void javacProcess(Path javaFile) { exec("javac", "-sourcepath", COMPILE_DIR, "-d", COMPILE_DIR, "-encoding", "UTF-8", - javaFile.getAbsolutePath()); + javaFile.toAbsolutePath().toString()); } private static int exec(String... args) { @@ -375,7 +379,7 @@ private static int exec(String... args) { copyInThread(p.getInputStream(), buff); copyInThread(p.getErrorStream(), buff); p.waitFor(); - String output = new String(buff.toByteArray(), StandardCharsets.UTF_8); + String output = Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8); handleSyntaxError(output, p.exitValue()); return p.exitValue(); } catch (Exception e) { @@ -392,12 +396,11 @@ public void call() throws IOException { }.execute(); } - private static synchronized void javacSun(File javaFile) { + private static synchronized void javacSun(Path javaFile) { PrintStream old = System.err; ByteArrayOutputStream buff = new ByteArrayOutputStream(); - PrintStream temp = new PrintStream(buff); try { - System.setErr(temp); + System.setErr(new PrintStream(buff, false, "UTF-8")); Method compile; compile = JAVAC_SUN.getMethod("compile", String[].class); Object javac = JAVAC_SUN.getDeclaredConstructor().newInstance(); @@ -409,8 +412,8 @@ private static synchronized void javacSun(File javaFile) { // "-Xlint:unchecked", "-d", COMPILE_DIR, "-encoding", "UTF-8", - javaFile.getAbsolutePath() }); - String output = new String(buff.toByteArray(), StandardCharsets.UTF_8); + javaFile.toAbsolutePath().toString() }); + String output = Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8); handleSyntaxError(output, status); } catch (Exception e) { throw DbException.convert(e); @@ -563,9 +566,20 @@ static class ClassFileManager extends ForwardingJavaFileManager { /** - * The class (only one class is kept). + * We use map because there can be nested, anonymous etc classes. */ - JavaClassObject classObject; + Map classObjectsByName = new HashMap<>(); + + private SecureClassLoader classLoader = new SecureClassLoader() { + + @Override + protected Class findClass(String name) + throws ClassNotFoundException { + byte[] bytes = classObjectsByName.get(name).getBytes(); + return super.defineClass(name, bytes, 0, + bytes.length); + } + }; public ClassFileManager(StandardJavaFileManager standardManager) { super(standardManager); @@ -573,21 +587,14 @@ public ClassFileManager(StandardJavaFileManager standardManager) { @Override public ClassLoader getClassLoader(Location location) { - return new SecureClassLoader() { - @Override - protected Class findClass(String name) - throws ClassNotFoundException { - byte[] bytes = classObject.getBytes(); - return super.defineClass(name, bytes, 0, - bytes.length); - } - }; + return this.classLoader; } @Override public JavaFileObject getJavaFileForOutput(Location location, String className, Kind kind, FileObject sibling) throws IOException { - classObject = new JavaClassObject(className, kind); + JavaClassObject classObject = new JavaClassObject(className, kind); + classObjectsByName.put(className, classObject); return classObject; } } diff --git a/h2/src/main/org/h2/util/StringUtils.java b/h2/src/main/org/h2/util/StringUtils.java index ed6b94009e..85bca6b51f 100644 --- a/h2/src/main/org/h2/util/StringUtils.java +++ b/h2/src/main/org/h2/util/StringUtils.java @@ -1,10 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; +import java.io.ByteArrayOutputStream; import java.lang.ref.SoftReference; import java.net.URLEncoder; import java.nio.charset.StandardCharsets; @@ -52,9 +53,6 @@ private StringUtils() { private static String[] getCache() { String[] cache; - // softCache can be null due to a Tomcat problem - // a workaround is disable the system property org.apache. - // catalina.loader.WebappClassLoader.ENABLE_CLEAR_REFERENCES if (softCache != null) { cache = softCache.get(); if (cache != null) { @@ -125,37 +123,68 @@ public static String quoteStringSQL(String s) { } /** - * Convert a string to a SQL literal. Null is converted to NULL. The text is - * enclosed in single quotes. If there are any special characters, the - * method STRINGDECODE is used. + * Convert a string to a SQL character string literal. Null is converted to + * NULL. If there are any special characters, the Unicode character string + * literal is used. * * @param builder * string builder to append result to - * @param s the text to convert. + * @param s the text to convert * @return the specified string builder */ public static StringBuilder quoteStringSQL(StringBuilder builder, String s) { if (s == null) { return builder.append("NULL"); } - int builderLength = builder.length(); - int length = s.length(); - builder.append('\''); - for (int i = 0; i < length; i++) { - char c = s.charAt(i); - if (c == '\'') { - builder.append(c); - } else if (c < ' ' || c > 127) { - // need to start from the beginning because maybe there was a \ - // that was not quoted - builder.setLength(builderLength); - builder.append("STRINGDECODE('"); - javaEncode(s, builder, true); - return builder.append("')"); + return quoteIdentifierOrLiteral(builder, s, '\''); + } + + /** + * Decodes a Unicode SQL string. + * + * @param s + * the string to decode + * @param uencode + * the code point of UENCODE character, or '\\' + * @return the decoded string + * @throws DbException + * on format exception + */ + public static String decodeUnicodeStringSQL(String s, int uencode) { + int l = s.length(); + StringBuilder builder = new StringBuilder(l); + for (int i = 0; i < l;) { + int cp = s.codePointAt(i); + i += Character.charCount(cp); + if (cp == uencode) { + if (i >= l) { + throw getFormatException(s, i); + } + cp = s.codePointAt(i); + if (cp == uencode) { + i += Character.charCount(cp); + } else { + if (i + 4 > l) { + throw getFormatException(s, i); + } + char ch = s.charAt(i); + try { + if (ch == '+') { + if (i + 7 > l) { + throw getFormatException(s, i); + } + cp = Integer.parseUnsignedInt(s.substring(i + 1, i += 7), 16); + } else { + cp = Integer.parseUnsignedInt(s.substring(i, i += 4), 16); + } + } catch (NumberFormatException e) { + throw getFormatException(s, i); + } + } } - builder.append(c); + builder.appendCodePoint(cp); } - return builder.append('\''); + return builder.toString(); } /** @@ -314,6 +343,9 @@ public static String javaDecode(String s) { buff.append('\\'); break; case 'u': { + if (i + 4 >= length) { + throw getFormatException(s, i); + } try { c = (char) (Integer.parseInt(s.substring(i + 1, i + 5), 16)); } catch (NumberFormatException e) { @@ -324,7 +356,7 @@ public static String javaDecode(String s) { break; } default: - if (c >= '0' && c <= '9') { + if (c >= '0' && c <= '9' && i + 2 < length) { try { c = (char) (Integer.parseInt(s.substring(i, i + 3), 8)); } catch (NumberFormatException e) { @@ -402,20 +434,7 @@ public static String quoteJavaIntArray(int[] array) { } /** - * Remove enclosing '(' and ')' if this text is enclosed. - * - * @param s the potentially enclosed string - * @return the string - */ - public static String unEnclose(String s) { - if (s.startsWith("(") && s.endsWith(")")) { - return s.substring(1, s.length() - 1); - } - return s; - } - - /** - * Encode the string as an URL. + * Encode the string as a URL. * * @param s the string to encode * @return the encoded string @@ -522,24 +541,6 @@ public static String arrayCombine(String[] list, char separatorChar) { return builder.toString(); } - /** - * Join specified strings and add them to the specified string builder. - * - * @param builder string builder - * @param strings strings to join - * @param separator separator - * @return the specified string builder - */ - public static StringBuilder join(StringBuilder builder, ArrayList strings, String separator) { - for (int i = 0, l = strings.size(); i < l; i++) { - if (i > 0) { - builder.append(separator); - } - builder.append(strings.get(i)); - } - return builder; - } - /** * Creates an XML attribute of the form name="value". * A single space is prepended to the name, @@ -777,7 +778,7 @@ public static String replaceAll(String s, String before, String after) { * @return the double quoted text */ public static String quoteIdentifier(String s) { - return quoteIdentifier(new StringBuilder(s.length() + 2), s).toString(); + return quoteIdentifierOrLiteral(new StringBuilder(s.length() + 2), s, '"').toString(); } /** @@ -790,15 +791,42 @@ public static String quoteIdentifier(String s) { * @return the specified builder */ public static StringBuilder quoteIdentifier(StringBuilder builder, String s) { - builder.append('"'); - for (int i = 0, length = s.length(); i < length; i++) { - char c = s.charAt(i); - if (c == '"') { - builder.append(c); + return quoteIdentifierOrLiteral(builder, s, '"'); + } + + private static StringBuilder quoteIdentifierOrLiteral(StringBuilder builder, String s, char q) { + int builderLength = builder.length(); + builder.append(q); + for (int i = 0, l = s.length(); i < l;) { + int cp = s.codePointAt(i); + i += Character.charCount(cp); + if (cp < ' ' || cp > 127) { + // need to start from the beginning + builder.setLength(builderLength); + builder.append("U&").append(q); + for (i = 0; i < l;) { + cp = s.codePointAt(i); + i += Character.charCount(cp); + if (cp >= ' ' && cp < 127) { + char ch = (char) cp; + if (ch == q || ch == '\\') { + builder.append(ch); + } + builder.append(ch); + } else if (cp <= 0xffff) { + appendHex(builder.append('\\'), cp, 2); + } else { + appendHex(builder.append("\\+"), cp, 3); + } + } + break; } - builder.append(c); + if (cp == q) { + builder.append(q); + } + builder.append((char) cp); } - return builder.append('"'); + return builder.append(q); } /** @@ -811,17 +839,6 @@ public static boolean isNullOrEmpty(String s) { return s == null || s.isEmpty(); } - /** - * In a string, replace block comment marks with /++ .. ++/. - * - * @param sql the string - * @return the resulting string - */ - public static String quoteRemarkSQL(String sql) { - sql = replaceAll(sql, "*/", "++/"); - return replaceAll(sql, "/*", "/++"); - } - /** * Pad a string. This method is used for the SQL function RPAD and LPAD. * @@ -958,6 +975,29 @@ public static StringBuilder trimSubstring(StringBuilder builder, String s, int b return builder.append(s, beginIndex, endIndex); } + /** + * Truncates the specified string to the specified length. This method, + * unlike {@link String#substring(int, int)}, doesn't break Unicode code + * points. If the specified length in characters breaks a valid pair of + * surrogates, the whole pair is not included into result. + * + * @param s + * the string to truncate + * @param maximumLength + * the maximum length in characters + * @return the specified string if it isn't longer than the specified + * maximum length, and the truncated string otherwise + */ + public static String truncateString(String s, int maximumLength) { + if (s.length() > maximumLength) { + s = maximumLength > 0 ? s.substring(0, + Character.isSurrogatePair(s.charAt(maximumLength - 1), s.charAt(maximumLength)) ? maximumLength - 1 + : maximumLength) + : ""; + } + return s; + } + /** * Get the string from the cache if possible. If the string has not been * found, it is added to the cache. If there is such a string in the cache, @@ -1057,6 +1097,58 @@ public static byte[] convertHexToBytes(String s) { return buff; } + /** + * Parses a hex encoded string with possible space separators and appends + * the decoded binary string to the specified output stream. + * + * @param baos the output stream, or {@code null} + * @param s the hex encoded string + * @param start the start index + * @param end the end index, exclusive + * @return the specified output stream or a new output stream + */ + public static ByteArrayOutputStream convertHexWithSpacesToBytes(ByteArrayOutputStream baos, String s, int start, + int end) { + if (baos == null) { + baos = new ByteArrayOutputStream((end - start) >>> 1); + } + int mask = 0; + int[] hex = HEX_DECODE; + try { + loop: for (int i = start;;) { + char c1, c2; + do { + if (i >= end) { + break loop; + } + c1 = s.charAt(i++); + } while (c1 == ' '); + do { + if (i >= end) { + if (((mask | hex[c1]) & ~255) != 0) { + throw getHexStringException(ErrorCode.HEX_STRING_WRONG_1, s, start, end); + } + throw getHexStringException(ErrorCode.HEX_STRING_ODD_1, s, start, end); + } + c2 = s.charAt(i++); + } while (c2 == ' '); + int d = hex[c1] << 4 | hex[c2]; + mask |= d; + baos.write(d); + } + } catch (ArrayIndexOutOfBoundsException e) { + throw getHexStringException(ErrorCode.HEX_STRING_WRONG_1, s, start, end); + } + if ((mask & ~255) != 0) { + throw getHexStringException(ErrorCode.HEX_STRING_WRONG_1, s, start, end); + } + return baos; + } + + private static DbException getHexStringException(int code, String s, int start, int end) { + return DbException.get(code, s.substring(start, end)); + } + /** * Convert a byte array to a hex encoded string. * @@ -1075,14 +1167,14 @@ public static String convertBytesToHex(byte[] value) { * @return the hex encoded string */ public static String convertBytesToHex(byte[] value, int len) { - char[] buff = new char[len + len]; + byte[] bytes = new byte[len * 2]; char[] hex = HEX; - for (int i = 0; i < len; i++) { + for (int i = 0, j = 0; i < len; i++) { int c = value[i] & 0xff; - buff[i + i] = hex[c >> 4]; - buff[i + i + 1] = hex[c & 0xf]; + bytes[j++] = (byte) hex[c >> 4]; + bytes[j++] = (byte) hex[c & 0xf]; } - return new String(buff); + return new String(bytes, StandardCharsets.ISO_8859_1); } /** @@ -1168,29 +1260,35 @@ public static boolean isWhitespaceOrEmpty(String s) { return true; } + /** + * Append a zero-padded number from 00 to 99 to a string builder. + * + * @param builder the string builder + * @param positiveValue the number to append + * @return the specified string builder + */ + public static StringBuilder appendTwoDigits(StringBuilder builder, int positiveValue) { + if (positiveValue < 10) { + builder.append('0'); + } + return builder.append(positiveValue); + } + /** * Append a zero-padded number to a string builder. * - * @param buff the string builder + * @param builder the string builder * @param length the number of characters to append * @param positiveValue the number to append + * @return the specified string builder */ - public static void appendZeroPadded(StringBuilder buff, int length, - long positiveValue) { - if (length == 2) { - if (positiveValue < 10) { - buff.append('0'); - } - buff.append(positiveValue); - } else { - String s = Long.toString(positiveValue); - length -= s.length(); - while (length > 0) { - buff.append('0'); - length--; - } - buff.append(s); + public static StringBuilder appendZeroPadded(StringBuilder builder, int length, long positiveValue) { + String s = Long.toString(positiveValue); + length -= s.length(); + for (; length > 0; length--) { + builder.append('0'); } + return builder.append(s); } /** diff --git a/h2/src/main/org/h2/util/Task.java b/h2/src/main/org/h2/util/Task.java index 4e61f8bba4..b238ee10c7 100644 --- a/h2/src/main/org/h2/util/Task.java +++ b/h2/src/main/org/h2/util/Task.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; diff --git a/h2/src/main/org/h2/util/TempFileDeleter.java b/h2/src/main/org/h2/util/TempFileDeleter.java index 55a9e7754a..1afe2da7cb 100644 --- a/h2/src/main/org/h2/util/TempFileDeleter.java +++ b/h2/src/main/org/h2/util/TempFileDeleter.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; @@ -64,7 +64,7 @@ public synchronized void deleteFile(Reference ref, Object resource) { if (f2 != null) { if (SysProperties.CHECK) { if (resource != null && !f2.equals(resource)) { - DbException.throwInternalError("f2:" + f2 + " f:" + resource); + throw DbException.getInternalError("f2:" + f2 + " f:" + resource); } } resource = f2; @@ -128,8 +128,7 @@ public void stopAutoDelete(Reference ref, Object resource) { Object f2 = refMap.remove(ref); if (SysProperties.CHECK) { if (f2 == null || !f2.equals(resource)) { - DbException.throwInternalError("f2:" + f2 + - " " + (f2 == null ? "" : f2) + " f:" + resource); + throw DbException.getInternalError("f2:" + f2 + ' ' + (f2 == null ? "" : f2) + " f:" + resource); } } } diff --git a/h2/src/main/org/h2/util/ThreadDeadlockDetector.java b/h2/src/main/org/h2/util/ThreadDeadlockDetector.java index 99476c6ff8..8acdb9c019 100644 --- a/h2/src/main/org/h2/util/ThreadDeadlockDetector.java +++ b/h2/src/main/org/h2/util/ThreadDeadlockDetector.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; diff --git a/h2/src/main/org/h2/util/TimeZoneProvider.java b/h2/src/main/org/h2/util/TimeZoneProvider.java new file mode 100644 index 0000000000..f5b7bc2f64 --- /dev/null +++ b/h2/src/main/org/h2/util/TimeZoneProvider.java @@ -0,0 +1,441 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util; + +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.time.zone.ZoneRules; +import java.util.Locale; + +/** + * Provides access to time zone API. + */ +public abstract class TimeZoneProvider { + + /** + * The UTC time zone provider. + */ + public static final TimeZoneProvider UTC = new Simple((short) 0); + + /** + * A small cache for timezone providers. + */ + public static TimeZoneProvider[] CACHE; + + /** + * The number of cache elements (needs to be a power of 2). + */ + private static final int CACHE_SIZE = 32; + + /** + * Returns the time zone provider with the specified offset. + * + * @param offset + * UTC offset in seconds + * @return the time zone provider with the specified offset + */ + public static TimeZoneProvider ofOffset(int offset) { + if (offset == 0) { + return UTC; + } + if (offset < (-18 * 60 * 60) || offset > (18 * 60 * 60)) { + throw new IllegalArgumentException("Time zone offset " + offset + " seconds is out of range"); + } + return new Simple(offset); + } + + /** + * Returns the time zone provider with the specified name. + * + * @param id + * the ID of the time zone + * @return the time zone provider with the specified name + * @throws RuntimeException + * if time zone with specified ID isn't known + */ + public static TimeZoneProvider ofId(String id) throws RuntimeException { + int length = id.length(); + if (length == 1 && id.charAt(0) == 'Z') { + return UTC; + } + int index = 0; + if (id.startsWith("GMT") || id.startsWith("UTC")) { + if (length == 3) { + return UTC; + } + index = 3; + } + if (length > index) { + boolean negative = false; + char c = id.charAt(index); + if (length > index + 1) { + if (c == '+') { + c = id.charAt(++index); + } else if (c == '-') { + negative = true; + c = id.charAt(++index); + } + } + if (index != 3 && c >= '0' && c <= '9') { + int hour = c - '0'; + if (++index < length) { + c = id.charAt(index); + if (c >= '0' && c <= '9') { + hour = hour * 10 + c - '0'; + index++; + } + } + if (index == length) { + int offset = hour * 3_600; + return ofOffset(negative ? -offset : offset); + } + if (id.charAt(index) == ':') { + if (++index < length) { + c = id.charAt(index); + if (c >= '0' && c <= '9') { + int minute = c - '0'; + if (++index < length) { + c = id.charAt(index); + if (c >= '0' && c <= '9') { + minute = minute * 10 + c - '0'; + index++; + } + } + if (index == length) { + int offset = (hour * 60 + minute) * 60; + return ofOffset(negative ? -offset : offset); + } + if (id.charAt(index) == ':') { + if (++index < length) { + c = id.charAt(index); + if (c >= '0' && c <= '9') { + int second = c - '0'; + if (++index < length) { + c = id.charAt(index); + if (c >= '0' && c <= '9') { + second = second * 10 + c - '0'; + index++; + } + } + if (index == length) { + int offset = (hour * 60 + minute) * 60 + second; + return ofOffset(negative ? -offset : offset); + } + } + } + } + } + } + } + } + if (index > 0) { + throw new IllegalArgumentException(id); + } + } + int hash = id.hashCode() & (CACHE_SIZE - 1); + TimeZoneProvider[] cache = CACHE; + if (cache != null) { + TimeZoneProvider provider = cache[hash]; + if (provider != null && provider.getId().equals(id)) { + return provider; + } + } + TimeZoneProvider provider = new WithTimeZone(ZoneId.of(id, ZoneId.SHORT_IDS)); + if (cache == null) { + CACHE = cache = new TimeZoneProvider[CACHE_SIZE]; + } + cache[hash] = provider; + return provider; + } + + /** + * Returns the time zone provider for the system default time zone. + * + * @return the time zone provider for the system default time zone + */ + public static TimeZoneProvider getDefault() { + ZoneId zoneId = ZoneId.systemDefault(); + ZoneOffset offset; + if (zoneId instanceof ZoneOffset) { + offset = (ZoneOffset) zoneId; + } else { + ZoneRules rules = zoneId.getRules(); + if (!rules.isFixedOffset()) { + return new WithTimeZone(zoneId); + } + offset = rules.getOffset(Instant.EPOCH); + } + return ofOffset(offset.getTotalSeconds()); + } + + /** + * Calculates the time zone offset in seconds for the specified EPOCH + * seconds. + * + * @param epochSeconds + * seconds since EPOCH + * @return time zone offset in minutes + */ + public abstract int getTimeZoneOffsetUTC(long epochSeconds); + + /** + * Calculates the time zone offset in seconds for the specified date value + * and nanoseconds since midnight in local time. + * + * @param dateValue + * date value + * @param timeNanos + * nanoseconds since midnight + * @return time zone offset in minutes + */ + public abstract int getTimeZoneOffsetLocal(long dateValue, long timeNanos); + + /** + * Calculates the epoch seconds from local date and time. + * + * @param dateValue + * date value + * @param timeNanos + * nanoseconds since midnight + * @return the epoch seconds value + */ + public abstract long getEpochSecondsFromLocal(long dateValue, long timeNanos); + + /** + * Returns the ID of the time zone. + * + * @return the ID of the time zone + */ + public abstract String getId(); + + /** + * Get the standard time name or daylight saving time name of the time zone. + * + * @param epochSeconds + * seconds since EPOCH + * @return the standard time name or daylight saving time name of the time + * zone + */ + public abstract String getShortId(long epochSeconds); + + /** + * Returns whether this is a simple time zone provider with a fixed offset + * from UTC. + * + * @return whether this is a simple time zone provider with a fixed offset + * from UTC + */ + public boolean hasFixedOffset() { + return false; + } + + /** + * Time zone provider with offset. + */ + private static final class Simple extends TimeZoneProvider { + + private final int offset; + + private volatile String id; + + Simple(int offset) { + this.offset = offset; + } + + @Override + public int hashCode() { + return offset + 129607; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || obj.getClass() != Simple.class) { + return false; + } + return offset == ((Simple) obj).offset; + } + + @Override + public int getTimeZoneOffsetUTC(long epochSeconds) { + return offset; + } + + @Override + public int getTimeZoneOffsetLocal(long dateValue, long timeNanos) { + return offset; + } + + @Override + public long getEpochSecondsFromLocal(long dateValue, long timeNanos) { + return DateTimeUtils.getEpochSeconds(dateValue, timeNanos, offset); + } + + @Override + public String getId() { + String id = this.id; + if (id == null) { + this.id = id = DateTimeUtils.timeZoneNameFromOffsetSeconds(offset); + } + return id; + } + + @Override + public String getShortId(long epochSeconds) { + return getId(); + } + + @Override + public boolean hasFixedOffset() { + return true; + } + + @Override + public String toString() { + return "TimeZoneProvider " + getId(); + } + + } + + /** + * Time zone provider with time zone. + */ + static final class WithTimeZone extends TimeZoneProvider { + + /** + * Number of seconds in 400 years. + */ + static final long SECONDS_PER_PERIOD = 146_097L * 60 * 60 * 24; + + /** + * Number of seconds per year. + */ + static final long SECONDS_PER_YEAR = SECONDS_PER_PERIOD / 400; + + private static volatile DateTimeFormatter TIME_ZONE_FORMATTER; + + private final ZoneId zoneId; + + WithTimeZone(ZoneId timeZone) { + this.zoneId = timeZone; + } + + @Override + public int hashCode() { + return zoneId.hashCode() + 951689; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || obj.getClass() != WithTimeZone.class) { + return false; + } + return zoneId.equals(((WithTimeZone) obj).zoneId); + } + + @Override + public int getTimeZoneOffsetUTC(long epochSeconds) { + /* + * Construct an Instant with EPOCH seconds within the range + * -31,557,014,135,532,000..31,556,889,832,715,999 + * (-999999999-01-01T00:00-18:00.. + * +999999999-12-31T23:59:59.999999999+18:00). Too large and too + * small EPOCH seconds are replaced with EPOCH seconds within the + * range using the 400 years period of the Gregorian calendar. + * + * H2 has slightly wider range of EPOCH seconds than Instant, and + * ZoneRules.getOffset(Instant) does not support all Instant values + * in all time zones. + */ + if (epochSeconds > 31_556_889_832_715_999L) { + epochSeconds -= SECONDS_PER_PERIOD; + } else if (epochSeconds < -31_557_014_135_532_000L) { + epochSeconds += SECONDS_PER_PERIOD; + } + return zoneId.getRules().getOffset(Instant.ofEpochSecond(epochSeconds)).getTotalSeconds(); + } + + @Override + public int getTimeZoneOffsetLocal(long dateValue, long timeNanos) { + int second = (int) (timeNanos / DateTimeUtils.NANOS_PER_SECOND); + int minute = second / 60; + second -= minute * 60; + int hour = minute / 60; + minute -= hour * 60; + return ZonedDateTime.of(LocalDateTime.of(yearForCalendar(DateTimeUtils.yearFromDateValue(dateValue)), + DateTimeUtils.monthFromDateValue(dateValue), DateTimeUtils.dayFromDateValue(dateValue), hour, + minute, second), zoneId).getOffset().getTotalSeconds(); + } + + @Override + public long getEpochSecondsFromLocal(long dateValue, long timeNanos) { + int second = (int) (timeNanos / DateTimeUtils.NANOS_PER_SECOND); + int minute = second / 60; + second -= minute * 60; + int hour = minute / 60; + minute -= hour * 60; + int year = DateTimeUtils.yearFromDateValue(dateValue); + int yearForCalendar = yearForCalendar(year); + long epoch = ZonedDateTime + .of(LocalDateTime.of(yearForCalendar, DateTimeUtils.monthFromDateValue(dateValue), + DateTimeUtils.dayFromDateValue(dateValue), hour, minute, second), zoneId) + .toOffsetDateTime().toEpochSecond(); + return epoch + (year - yearForCalendar) * SECONDS_PER_YEAR; + } + + @Override + public String getId() { + return zoneId.getId(); + } + + @Override + public String getShortId(long epochSeconds) { + DateTimeFormatter timeZoneFormatter = TIME_ZONE_FORMATTER; + if (timeZoneFormatter == null) { + TIME_ZONE_FORMATTER = timeZoneFormatter = DateTimeFormatter.ofPattern("z", Locale.ENGLISH); + } + return ZonedDateTime.ofInstant(Instant.ofEpochSecond(epochSeconds), zoneId).format(timeZoneFormatter); + } + + /** + * Returns a year within the range -999,999,999..999,999,999 for the + * given year. Too large and too small years are replaced with years + * within the range using the 400 years period of the Gregorian + * calendar. + * + * Because we need them only to calculate a time zone offset, it's safe + * to normalize them to such range. + * + * @param year + * the year + * @return the specified year or the replacement year within the range + */ + private static int yearForCalendar(int year) { + if (year > 999_999_999) { + year -= 400; + } else if (year < -999_999_999) { + year += 400; + } + return year; + } + + @Override + public String toString() { + return "TimeZoneProvider " + zoneId.getId(); + } + + } + +} diff --git a/h2/src/main/org/h2/util/Tool.java b/h2/src/main/org/h2/util/Tool.java index 72f041dca6..1ad65d98d8 100644 --- a/h2/src/main/org/h2/util/Tool.java +++ b/h2/src/main/org/h2/util/Tool.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; @@ -41,6 +41,7 @@ public void setOut(PrintStream out) { * Run the tool with the given output stream and arguments. * * @param args the argument list + * @throws SQLException on failure */ public abstract void runTool(String... args) throws SQLException; @@ -49,6 +50,7 @@ public void setOut(PrintStream out) { * * @param option the unsupported option * @return this method never returns normally + * @throws SQLException on failure */ protected SQLException showUsageAndThrowUnsupportedOption(String option) throws SQLException { @@ -61,6 +63,7 @@ protected SQLException showUsageAndThrowUnsupportedOption(String option) * * @param option the unsupported option * @return this method never returns normally + * @throws SQLException on failure */ protected SQLException throwUnsupportedOption(String option) throws SQLException { @@ -111,7 +114,7 @@ protected void showUsage() { out.println(resources.get(className)); out.println("Usage: java "+getClass().getName() + " "); out.println(resources.get(className + ".main")); - out.println("See also http://h2database.com/javadoc/" + + out.println("See also https://h2database.com/javadoc/" + className.replace('.', '/') + ".html"); } diff --git a/h2/src/main/org/h2/util/Utils.java b/h2/src/main/org/h2/util/Utils.java index 57e832be1e..4594146fba 100644 --- a/h2/src/main/org/h2/util/Utils.java +++ b/h2/src/main/org/h2/util/Utils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util; @@ -18,7 +18,6 @@ import java.util.Arrays; import java.util.Comparator; import java.util.HashMap; -import java.util.concurrent.TimeUnit; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; @@ -37,15 +36,6 @@ public class Utils { */ public static final int[] EMPTY_INT_ARRAY = {}; - /** - * An 0-size long array. - */ - private static final long[] EMPTY_LONG_ARRAY = {}; - - private static final int GC_DELAY = 50; - private static final int MAX_GC = 8; - private static long lastGC; - private static final HashMap RESOURCES = new HashMap<>(); private Utils() { @@ -236,11 +226,10 @@ public static byte[] cloneByteArray(byte[] b) { * * @return the used memory */ - public static int getMemoryUsed() { + public static long getMemoryUsed() { collectGarbage(); Runtime rt = Runtime.getRuntime(); - long mem = rt.totalMemory() - rt.freeMemory(); - return (int) (mem >> 10); + return rt.totalMemory() - rt.freeMemory() >> 10; } /** @@ -249,11 +238,9 @@ public static int getMemoryUsed() { * * @return the free memory */ - public static int getMemoryFree() { + public static long getMemoryFree() { collectGarbage(); - Runtime rt = Runtime.getRuntime(); - long mem = rt.freeMemory(); - return (int) (mem >> 10); + return Runtime.getRuntime().freeMemory() >> 10; } /** @@ -262,8 +249,7 @@ public static int getMemoryFree() { * @return the maximum memory */ public static long getMemoryMax() { - long max = Runtime.getRuntime().maxMemory(); - return max / 1024; + return Runtime.getRuntime().maxMemory() >> 10; } public static long getGarbageCollectionTime() { @@ -277,34 +263,30 @@ public static long getGarbageCollectionTime() { return totalGCTime; } - private static synchronized void collectGarbage() { - Runtime runtime = Runtime.getRuntime(); - long total = runtime.totalMemory(); - long time = System.nanoTime(); - if (lastGC + TimeUnit.MILLISECONDS.toNanos(GC_DELAY) < time) { - for (int i = 0; i < MAX_GC; i++) { - runtime.gc(); - long now = runtime.totalMemory(); - if (now == total) { - lastGC = System.nanoTime(); - break; - } - total = now; + public static long getGarbageCollectionCount() { + long totalGCCount = 0; + int poolCount = 0; + for (GarbageCollectorMXBean gcMXBean : ManagementFactory.getGarbageCollectorMXBeans()) { + long collectionCount = gcMXBean.getCollectionTime(); + if(collectionCount > 0) { + totalGCCount += collectionCount; + poolCount += gcMXBean.getMemoryPoolNames().length; } } + poolCount = Math.max(poolCount, 1); + return (totalGCCount + (poolCount >> 1)) / poolCount; } /** - * Create an int array with the given size. - * - * @param len the number of bytes requested - * @return the int array + * Run Java memory garbage collection. */ - public static int[] newIntArray(int len) { - if (len == 0) { - return EMPTY_INT_ARRAY; + public static synchronized void collectGarbage() { + Runtime runtime = Runtime.getRuntime(); + long garbageCollectionCount = getGarbageCollectionCount(); + while (garbageCollectionCount == getGarbageCollectionCount()) { + runtime.gc(); + Thread.yield(); } - return new int[len]; } /** @@ -317,56 +299,48 @@ public static ArrayList newSmallArrayList() { return new ArrayList<>(4); } - /** - * Create a long array with the given size. - * - * @param len the number of bytes requested - * @return the int array - */ - public static long[] newLongArray(int len) { - if (len == 0) { - return EMPTY_LONG_ARRAY; - } - return new long[len]; - } - /** * Find the top limit values using given comparator and place them as in a * full array sort, in descending order. * + * @param the type of elements * @param array the array. - * @param offset the offset. - * @param limit the limit. + * @param fromInclusive the start index, inclusive + * @param toExclusive the end index, exclusive * @param comp the comparator. */ - public static void sortTopN(X[] array, int offset, int limit, - Comparator comp) { - partitionTopN(array, offset, limit, comp); - Arrays.sort(array, offset, - (int) Math.min((long) offset + limit, array.length), comp); + public static void sortTopN(X[] array, int fromInclusive, int toExclusive, Comparator comp) { + int highInclusive = array.length - 1; + if (highInclusive > 0 && toExclusive > fromInclusive) { + partialQuickSort(array, 0, highInclusive, comp, fromInclusive, toExclusive - 1); + Arrays.sort(array, fromInclusive, toExclusive, comp); + } } /** - * Find the top limit values using given comparator and place them as in a - * full array sort. This method does not sort the top elements themselves. + * Partial quick sort. + * + *

          + * Works with elements from {@code low} to {@code high} indexes, inclusive. + *

          + *

          + * Moves smallest elements to {@code low..start-1} positions and largest + * elements to {@code end+1..high} positions. Middle elements are placed + * into {@code start..end} positions. All these regions aren't fully sorted. + *

          * - * @param array the array - * @param offset the offset - * @param limit the limit + * @param the type of elements + * @param array the array to sort + * @param low the lower index with data, inclusive + * @param high the higher index with data, inclusive, {@code high > low} * @param comp the comparator + * @param start the start index of requested region, inclusive + * @param end the end index of requested region, inclusive, {@code end >= start} */ - private static void partitionTopN(X[] array, int offset, int limit, - Comparator comp) { - partialQuickSort(array, 0, array.length - 1, comp, offset, offset + - limit - 1); - } - private static void partialQuickSort(X[] array, int low, int high, Comparator comp, int start, int end) { - if (low > end || high < start || (low > start && high < end)) { - return; - } - if (low == high) { + if (low >= start && high <= end) { + // Don't sort blocks entirely contained in the middle region return; } int i = low, j = high; @@ -391,46 +365,20 @@ private static void partialQuickSort(X[] array, int low, int high, array[j--] = temp; } } - if (low < j) { + if (low < j && /* Intersection with middle region */ start <= j) { partialQuickSort(array, low, j, comp, start, end); } - if (i < high) { + if (i < high && /* Intersection with middle region */ i <= end) { partialQuickSort(array, i, high, comp, start, end); } } - /** - * Checks if given classes have a common Comparable superclass. - * - * @param c1 the first class - * @param c2 the second class - * @return true if they have - */ - public static boolean haveCommonComparableSuperclass( - Class c1, Class c2) { - if (c1 == c2 || c1.isAssignableFrom(c2) || c2.isAssignableFrom(c1)) { - return true; - } - Class top1; - do { - top1 = c1; - c1 = c1.getSuperclass(); - } while (Comparable.class.isAssignableFrom(c1)); - - Class top2; - do { - top2 = c2; - c2 = c2.getSuperclass(); - } while (Comparable.class.isAssignableFrom(c2)); - - return top1 == top2; - } - /** * Get a resource from the resource map. * * @param name the name of the resource * @return the resource data + * @throws IOException on failure */ public static byte[] getResource(String name) throws IOException { byte[] data = RESOURCES.get(name); @@ -487,6 +435,7 @@ private static byte[] loadResource(String name) throws IOException { * "java.lang.System.gc" * @param params the method parameters * @return the return value from this call + * @throws Exception on failure */ public static Object callStaticMethod(String classAndMethod, Object... params) throws Exception { @@ -505,6 +454,7 @@ public static Object callStaticMethod(String classAndMethod, * @param methodName a string with the method name * @param params the method parameters * @return the return value from this call + * @throws Exception on failure */ public static Object callMethod( Object instance, @@ -544,6 +494,7 @@ private static Object callMethod( * @param className a string with the entire class, eg. "java.lang.Integer" * @param params the constructor parameters * @return the newly created object + * @throws Exception on failure */ public static Object newInstance(String className, Object... params) throws Exception { @@ -583,47 +534,6 @@ private static int match(Class[] params, Object[] values) { return 0; } - /** - * Returns a static field. - * - * @param classAndField a string with the entire class and field name - * @return the field value - */ - public static Object getStaticField(String classAndField) throws Exception { - int lastDot = classAndField.lastIndexOf('.'); - String className = classAndField.substring(0, lastDot); - String fieldName = classAndField.substring(lastDot + 1); - return Class.forName(className).getField(fieldName).get(null); - } - - /** - * Returns a static field. - * - * @param instance the instance on which the call is done - * @param fieldName the field name - * @return the field value - */ - public static Object getField(Object instance, String fieldName) - throws Exception { - return instance.getClass().getField(fieldName).get(instance); - } - - /** - * Returns true if the class is present in the current class loader. - * - * @param fullyQualifiedClassName a string with the entire class name, eg. - * "java.lang.System" - * @return true if the class is present - */ - public static boolean isClassPresent(String fullyQualifiedClassName) { - try { - Class.forName(fullyQualifiedClassName); - return true; - } catch (ClassNotFoundException e) { - return false; - } - } - /** * Convert primitive class names to java.lang.* class names. * @@ -783,10 +693,58 @@ public static int scaleForAvailableMemory(int value) { return (int) (value * physicalMemorySize / (1024 * 1024 * 1024)); } catch (Exception e) { // ignore + } catch (Error error) { + // ignore } return value; } + /** + * Returns the current value of the high-resolution time source. + * + * @return time in nanoseconds, never equal to 0 + * @see System#nanoTime() + */ + public static long currentNanoTime() { + long time = System.nanoTime(); + if (time == 0L) { + time = 1L; + } + return time; + } + + /** + * Returns the current value of the high-resolution time source plus the + * specified offset. + * + * @param ms + * additional offset in milliseconds + * @return time in nanoseconds, never equal to 0 + * @see System#nanoTime() + */ + public static long currentNanoTimePlusMillis(int ms) { + return nanoTimePlusMillis(System.nanoTime(), ms); + } + + /** + * Returns the current value of the high-resolution time source plus the + * specified offset. + * + * @param nanoTime + * time in nanoseconds + * @param ms + * additional offset in milliseconds + * @return time in nanoseconds, never equal to 0 + * @see System#nanoTime() + */ + public static long nanoTimePlusMillis(long nanoTime, int ms) { + long time = nanoTime + ms * 1_000_000L; + if (time == 0L) { + time = 1L; + } + return time; + } + /** * The utility methods will try to use the provided class factories to * convert binary name of class to Class object. Used by H2 OSGi Activator diff --git a/h2/src/main/org/h2/util/Utils10.java b/h2/src/main/org/h2/util/Utils10.java new file mode 100644 index 0000000000..a77dbac435 --- /dev/null +++ b/h2/src/main/org/h2/util/Utils10.java @@ -0,0 +1,78 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.net.Socket; +import java.nio.charset.Charset; + +/** + * Utilities with specialized implementations for Java 10 and later versions. + * + * This class contains basic implementations for Java 8 and 9 and it is + * overridden in multi-release JARs. + */ +public final class Utils10 { + + /* + * Signatures of methods should match with + * h2/src/java10/src/org/h2/util/Utils10.java and precompiled + * h2/src/java10/precompiled/org/h2/util/Utils10.class. + */ + + /** + * Converts the buffer's contents into a string by decoding the bytes using + * the specified {@link java.nio.charset.Charset charset}. + * + * @param baos + * the buffer to decode + * @param charset + * the charset to use + * @return the decoded string + */ + public static String byteArrayOutputStreamToString(ByteArrayOutputStream baos, Charset charset) { + try { + return baos.toString(charset.name()); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); + } + } + + /** + * Returns the value of TCP_QUICKACK option. + * + * @param socket + * the socket + * @return the current value of TCP_QUICKACK option + * @throws IOException + * on I/O exception + * @throws UnsupportedOperationException + * if TCP_QUICKACK is not supported + */ + public static boolean getTcpQuickack(Socket socket) throws IOException { + throw new UnsupportedOperationException(); + } + + /** + * Sets the value of TCP_QUICKACK option. + * + * @param socket + * the socket + * @param value + * the value to set + * @return whether operation was successful + */ + public static boolean setTcpQuickack(Socket socket, boolean value) { + // The default implementation does nothing + return false; + } + + private Utils10() { + } + +} diff --git a/h2/src/main/org/h2/util/geometry/EWKBUtils.java b/h2/src/main/org/h2/util/geometry/EWKBUtils.java index b4972424c7..8a598dc775 100644 --- a/h2/src/main/org/h2/util/geometry/EWKBUtils.java +++ b/h2/src/main/org/h2/util/geometry/EWKBUtils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util.geometry; @@ -26,7 +26,6 @@ import org.h2.util.Bits; import org.h2.util.StringUtils; -import org.h2.util.geometry.GeometryUtils.DimensionSystemTarget; import org.h2.util.geometry.GeometryUtils.Target; /** @@ -38,7 +37,7 @@ * extensions. This class can read dimension system marks in both OGC WKB and * EWKB formats, but always writes them in EWKB format. SRID support from EWKB * is implemented. As an addition POINT EMPTY is stored with NaN values as - * specified in OGC 12-128r15. + * specified in OGC 12-128r15. *

          */ public final class EWKBUtils { @@ -90,8 +89,17 @@ protected void startLineString(int numPoints) { @Override protected void startPolygon(int numInner, int numPoints) { writeHeader(POLYGON); - writeInt(numInner + 1); - writeInt(numPoints); + if (numInner == 0 && numPoints == 0) { + /* + * Representation of POLYGON EMPTY is not defined is + * specification. We store it as a polygon with 0 rings, as + * PostGIS does. + */ + writeInt(0); + } else { + writeInt(numInner + 1); + writeInt(numPoints); + } } @Override @@ -146,9 +154,13 @@ protected void addCoordinate(double x, double y, double z, double m, int index, writeDouble(y); if ((dimensionSystem & DIMENSION_SYSTEM_XYZ) != 0) { writeDouble(check ? checkFinite(z) : z); + } else if (check && !Double.isNaN(z)) { + throw new IllegalArgumentException(); } if ((dimensionSystem & DIMENSION_SYSTEM_XYM) != 0) { writeDouble(check ? checkFinite(m) : m); + } else if (check && !Double.isNaN(m)) { + throw new IllegalArgumentException(); } } @@ -254,11 +266,7 @@ public String toString() { * @return canonical EWKB, may be the same as the source */ public static byte[] ewkb2ewkb(byte[] ewkb) { - // Determine dimension system first - DimensionSystemTarget dimensionTarget = new DimensionSystemTarget(); - parseEWKB(ewkb, dimensionTarget); - // Write an EWKB - return ewkb2ewkb(ewkb, dimensionTarget.getDimensionSystem()); + return ewkb2ewkb(ewkb, getDimensionSystem(ewkb)); } /** @@ -393,22 +401,26 @@ private static void parseEWKB(EWKBSource source, Target target, int parentType) if (parentType != 0 && parentType != MULTI_POLYGON && parentType != GEOMETRY_COLLECTION) { throw new IllegalArgumentException(); } - int numInner = source.readInt() - 1; - if (numInner < 0) { + int numRings = source.readInt(); + if (numRings == 0) { + target.startPolygon(0, 0); + break; + } else if (numRings < 0) { throw new IllegalArgumentException(); } + numRings--; int size = source.readInt(); // Size may be 0 (EMPTY) or 4+ if (size < 0 || size >= 1 && size <= 3) { throw new IllegalArgumentException(); } - if (size == 0 && numInner > 0) { + if (size == 0 && numRings > 0) { throw new IllegalArgumentException(); } - target.startPolygon(numInner, size); + target.startPolygon(numRings, size); if (size > 0) { addRing(source, target, useZ, useM, size); - for (int i = 0; i < numInner; i++) { + for (int i = 0; i < numRings; i++) { size = source.readInt(); // Size may be 0 (EMPTY) or 4+ if (size < 0 || size >= 1 && size <= 3) { @@ -436,14 +448,14 @@ private static void parseEWKB(EWKBSource source, Target target, int parentType) for (int i = 0; i < numItems; i++) { Target innerTarget = target.startCollectionItem(i, numItems); parseEWKB(source, innerTarget, type); - target.endCollectionItem(innerTarget, i, numItems); + target.endCollectionItem(innerTarget, type, i, numItems); } - target.endCollection(type); break; } default: throw new IllegalArgumentException(); } + target.endObject(type); } private static void addRing(EWKBSource source, Target target, boolean useZ, boolean useM, int size) { @@ -477,6 +489,29 @@ private static void addCoordinate(EWKBSource source, Target target, boolean useZ index, total); } + /** + * Reads the dimension system from EWKB. + * + * @param ewkb + * EWKB + * @return the dimension system + */ + public static int getDimensionSystem(byte[] ewkb) { + EWKBSource source = new EWKBSource(ewkb); + // Read byte order of a next geometry + switch (source.readByte()) { + case 0: + source.bigEndian = true; + break; + case 1: + source.bigEndian = false; + break; + default: + throw new IllegalArgumentException(); + } + return type2dimensionSystem(source.readInt()); + } + /** * Converts an envelope to a WKB. * diff --git a/h2/src/main/org/h2/util/geometry/EWKTUtils.java b/h2/src/main/org/h2/util/geometry/EWKTUtils.java index 3fc719c69d..ccf245d615 100644 --- a/h2/src/main/org/h2/util/geometry/EWKTUtils.java +++ b/h2/src/main/org/h2/util/geometry/EWKTUtils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util.geometry; @@ -24,8 +24,8 @@ import java.io.ByteArrayOutputStream; import java.util.ArrayList; +import org.h2.util.StringUtils; import org.h2.util.geometry.EWKBUtils.EWKBTarget; -import org.h2.util.geometry.GeometryUtils.DimensionSystemTarget; import org.h2.util.geometry.GeometryUtils.Target; /** @@ -44,7 +44,7 @@ public final class EWKTUtils { * 0-based type names of geometries, subtract 1 from type code to get index * in this array. */ - private static final String[] TYPES = { // + static final String[] TYPES = { // "POINT", // "LINESTRING", // "POLYGON", // @@ -146,31 +146,7 @@ private void writeHeader(int type) { if (inMulti) { return; } - switch (type) { - case POINT: - output.append("POINT"); - break; - case LINE_STRING: - output.append("LINESTRING"); - break; - case POLYGON: - output.append("POLYGON"); - break; - case MULTI_POINT: - output.append("MULTIPOINT"); - break; - case MULTI_LINE_STRING: - output.append("MULTILINESTRING"); - break; - case MULTI_POLYGON: - output.append("MULTIPOLYGON"); - break; - case GEOMETRY_COLLECTION: - output.append("GEOMETRYCOLLECTION"); - break; - default: - throw new IllegalArgumentException(); - } + output.append(TYPES[type - 1]); switch (dimensionSystem) { case DIMENSION_SYSTEM_XYZ: output.append(" Z"); @@ -195,15 +171,18 @@ protected Target startCollectionItem(int index, int total) { } @Override - protected void endCollectionItem(Target target, int index, int total) { + protected void endCollectionItem(Target target, int type, int index, int total) { if (index + 1 == total) { output.append(')'); } } @Override - protected void endCollection(int type) { - if (type != GEOMETRY_COLLECTION) { + protected void endObject(int type) { + switch (type) { + case MULTI_POINT: + case MULTI_LINE_STRING: + case MULTI_POLYGON: inMulti = false; } } @@ -276,7 +255,7 @@ int readSRID() { while (ewkt.charAt(end - 1) <= ' ') { end--; } - srid = Integer.parseInt(ewkt.substring(offset, end).trim()); + srid = Integer.parseInt(StringUtils.trimSubstring(ewkt, offset, end)); offset = idx + 1; } else { srid = 0; @@ -542,11 +521,7 @@ public String toString() { * @return EWKT representation */ public static String ewkb2ewkt(byte[] ewkb) { - // Determine dimension system first - DimensionSystemTarget dimensionTarget = new DimensionSystemTarget(); - EWKBUtils.parseEWKB(ewkb, dimensionTarget); - // Write an EWKT - return ewkb2ewkt(ewkb, dimensionTarget.getDimensionSystem()); + return ewkb2ewkt(ewkb, EWKBUtils.getDimensionSystem(ewkb)); } /** @@ -560,8 +535,7 @@ public static String ewkb2ewkt(byte[] ewkb) { */ public static String ewkb2ewkt(byte[] ewkb, int dimensionSystem) { StringBuilder output = new StringBuilder(); - EWKTTarget target = new EWKTTarget(output, dimensionSystem); - EWKBUtils.parseEWKB(ewkb, target); + EWKBUtils.parseEWKB(ewkb, new EWKTTarget(output, dimensionSystem)); return output.toString(); } @@ -573,11 +547,7 @@ public static String ewkb2ewkt(byte[] ewkb, int dimensionSystem) { * @return EWKB representation */ public static byte[] ewkt2ewkb(String ewkt) { - // Determine dimension system first - DimensionSystemTarget dimensionTarget = new DimensionSystemTarget(); - parseEWKT(ewkt, dimensionTarget); - // Write an EWKB - return ewkt2ewkb(ewkt, dimensionTarget.getDimensionSystem()); + return ewkt2ewkb(ewkt, getDimensionSystem(ewkt)); } /** @@ -597,7 +567,7 @@ public static byte[] ewkt2ewkb(String ewkt, int dimensionSystem) { } /** - * Parses a EWKB. + * Parses a EWKT. * * @param ewkt * source EWKT @@ -655,22 +625,24 @@ public static int parseDimensionSystem(String s) { /** * Formats type and dimension system as a string. * + * @param builder + * string builder * @param type * OGC geometry code format (type + dimensionSystem * 1000) - * @return formatted string + * @return the specified string builder * @throws IllegalArgumentException * if type is not valid */ - public static String formatGeometryTypeAndDimensionSystem(int type) { + public static StringBuilder formatGeometryTypeAndDimensionSystem(StringBuilder builder, int type) { int t = type % 1_000, d = type / 1_000; if (t < POINT || t > GEOMETRY_COLLECTION || d < DIMENSION_SYSTEM_XY || d > DIMENSION_SYSTEM_XYZM) { throw new IllegalArgumentException(); } - String result = TYPES[t - 1]; + builder.append(TYPES[t - 1]); if (d != DIMENSION_SYSTEM_XY) { - result = result + ' ' + DIMENSION_SYSTEMS[d]; + builder.append(' ').append(DIMENSION_SYSTEMS[d]); } - return result; + return builder; } /** @@ -787,13 +759,9 @@ private static void parseEWKT(EWKTSource source, Target target, int parentType, break; } case MULTI_POINT: - parseCollection(source, target, MULTI_POINT, parentType, dimensionSystem); - break; case MULTI_LINE_STRING: - parseCollection(source, target, MULTI_LINE_STRING, parentType, dimensionSystem); - break; case MULTI_POLYGON: - parseCollection(source, target, MULTI_POLYGON, parentType, dimensionSystem); + parseCollection(source, target, type, parentType, dimensionSystem); break; case GEOMETRY_COLLECTION: parseCollection(source, target, GEOMETRY_COLLECTION, parentType, 0); @@ -801,6 +769,7 @@ private static void parseEWKT(EWKTSource source, Target target, int parentType, default: throw new IllegalArgumentException(); } + target.endObject(type); if (parentType == 0 && source.hasData()) { throw new IllegalArgumentException(); } @@ -825,12 +794,11 @@ private static void parseCollection(EWKTSource source, Target target, int type, } Target innerTarget = target.startCollectionItem(i, numItems); parseEWKT(source, innerTarget, type, dimensionSystem); - target.endCollectionItem(innerTarget, i, numItems); + target.endCollectionItem(innerTarget, type, i, numItems); } source.read(')'); } } - target.endCollection(type); } private static void parseMultiPointAlternative(EWKTSource source, Target target, int dimensionSystem) { @@ -846,7 +814,7 @@ private static void parseMultiPointAlternative(EWKTSource source, Target target, target.startPoint(); double[] c = points.get(i); target.addCoordinate(c[X], c[Y], c[Z], c[M], 0, 1); - target.endCollectionItem(innerTarget, i, numItems); + target.endCollectionItem(innerTarget, MULTI_POINT, i, numItems); } } @@ -887,37 +855,33 @@ private static void addRing(ArrayList ring, Target target) { private static void addCoordinate(EWKTSource source, Target target, int dimensionSystem, int index, int total) { double x = source.readCoordinate(); double y = source.readCoordinate(); - double z = Double.NaN, m = Double.NaN; - if (source.hasCoordinate()) { - if (dimensionSystem == DIMENSION_SYSTEM_XYM) { - m = source.readCoordinate(); - } else { - z = source.readCoordinate(); - if (source.hasCoordinate()) { - m = source.readCoordinate(); - } - } - } + double z = (dimensionSystem & DIMENSION_SYSTEM_XYZ) != 0 ? source.readCoordinate() : Double.NaN; + double m = (dimensionSystem & DIMENSION_SYSTEM_XYM) != 0 ? source.readCoordinate() : Double.NaN; target.addCoordinate(x, y, z, m, index, total); } private static double[] readCoordinate(EWKTSource source, int dimensionSystem) { double x = source.readCoordinate(); double y = source.readCoordinate(); - double z = Double.NaN, m = Double.NaN; - if (source.hasCoordinate()) { - if (dimensionSystem == DIMENSION_SYSTEM_XYM) { - m = source.readCoordinate(); - } else { - z = source.readCoordinate(); - if (source.hasCoordinate()) { - m = source.readCoordinate(); - } - } - } + double z = (dimensionSystem & DIMENSION_SYSTEM_XYZ) != 0 ? source.readCoordinate() : Double.NaN; + double m = (dimensionSystem & DIMENSION_SYSTEM_XYM) != 0 ? source.readCoordinate() : Double.NaN; return new double[] { x, y, z, m }; } + /** + * Reads the dimension system from EWKT. + * + * @param ewkt + * EWKT source + * @return the dimension system + */ + public static int getDimensionSystem(String ewkt) { + EWKTSource source = new EWKTSource(ewkt); + source.readSRID(); + source.readType(); + return source.readDimensionSystem(); + } + private EWKTUtils() { } diff --git a/h2/src/main/org/h2/util/geometry/GeoJsonUtils.java b/h2/src/main/org/h2/util/geometry/GeoJsonUtils.java new file mode 100644 index 0000000000..1ba11df788 --- /dev/null +++ b/h2/src/main/org/h2/util/geometry/GeoJsonUtils.java @@ -0,0 +1,455 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.geometry; + +import static org.h2.util.geometry.GeometryUtils.DIMENSION_SYSTEM_XYM; +import static org.h2.util.geometry.GeometryUtils.DIMENSION_SYSTEM_XYZ; +import static org.h2.util.geometry.GeometryUtils.GEOMETRY_COLLECTION; +import static org.h2.util.geometry.GeometryUtils.LINE_STRING; +import static org.h2.util.geometry.GeometryUtils.M; +import static org.h2.util.geometry.GeometryUtils.MULTI_LINE_STRING; +import static org.h2.util.geometry.GeometryUtils.MULTI_POINT; +import static org.h2.util.geometry.GeometryUtils.MULTI_POLYGON; +import static org.h2.util.geometry.GeometryUtils.POINT; +import static org.h2.util.geometry.GeometryUtils.POLYGON; +import static org.h2.util.geometry.GeometryUtils.X; +import static org.h2.util.geometry.GeometryUtils.Y; +import static org.h2.util.geometry.GeometryUtils.Z; + +import java.io.ByteArrayOutputStream; +import java.math.BigDecimal; + +import org.h2.api.ErrorCode; +import org.h2.message.DbException; +import org.h2.util.geometry.EWKBUtils.EWKBTarget; +import org.h2.util.geometry.GeometryUtils.DimensionSystemTarget; +import org.h2.util.geometry.GeometryUtils.Target; +import org.h2.util.json.JSONArray; +import org.h2.util.json.JSONByteArrayTarget; +import org.h2.util.json.JSONBytesSource; +import org.h2.util.json.JSONNull; +import org.h2.util.json.JSONNumber; +import org.h2.util.json.JSONObject; +import org.h2.util.json.JSONString; +import org.h2.util.json.JSONValue; +import org.h2.util.json.JSONValueTarget; + +/** + * GeoJson format support for GEOMETRY data type. + */ +public final class GeoJsonUtils { + + /** + * 0-based type names of geometries, subtract 1 from type code to get index + * in this array. + */ + static final String[] TYPES = { // + "Point", // + "LineString", // + "Polygon", // + "MultiPoint", // + "MultiLineString", // + "MultiPolygon", // + "GeometryCollection", // + }; + + /** + * Converter output target that writes a GeoJson. + */ + public static final class GeoJsonTarget extends Target { + + private final JSONByteArrayTarget output; + + private final int dimensionSystem; + + private int type; + + private boolean inMulti, inMultiLine, wasEmpty; + + /** + * Creates new GeoJson output target. + * + * @param output + * output JSON target + * @param dimensionSystem + * dimension system to use + */ + public GeoJsonTarget(JSONByteArrayTarget output, int dimensionSystem) { + if (dimensionSystem == DIMENSION_SYSTEM_XYM) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, + "M (XYM) dimension system is not supported in GeoJson"); + } + this.output = output; + this.dimensionSystem = dimensionSystem; + } + + @Override + protected void startPoint() { + type = POINT; + wasEmpty = false; + } + + @Override + protected void startLineString(int numPoints) { + writeHeader(LINE_STRING); + if (numPoints == 0) { + output.endArray(); + } + } + + @Override + protected void startPolygon(int numInner, int numPoints) { + writeHeader(POLYGON); + if (numPoints == 0) { + output.endArray(); + } else { + output.startArray(); + } + } + + @Override + protected void startPolygonInner(int numInner) { + output.startArray(); + if (numInner == 0) { + output.endArray(); + } + } + + @Override + protected void endNonEmptyPolygon() { + output.endArray(); + } + + @Override + protected void startCollection(int type, int numItems) { + writeHeader(type); + if (type != GEOMETRY_COLLECTION) { + inMulti = true; + if (type == MULTI_LINE_STRING || type == MULTI_POLYGON) { + inMultiLine = true; + } + } + } + + @Override + protected Target startCollectionItem(int index, int total) { + if (inMultiLine) { + output.startArray(); + } + return this; + } + + @Override + protected void endObject(int type) { + switch (type) { + case MULTI_POINT: + case MULTI_LINE_STRING: + case MULTI_POLYGON: + inMultiLine = inMulti = false; + //$FALL-THROUGH$ + case GEOMETRY_COLLECTION: + output.endArray(); + } + if (!inMulti && !wasEmpty) { + output.endObject(); + } + } + + private void writeHeader(int type) { + this.type = type; + wasEmpty = false; + if (!inMulti) { + writeStartObject(type); + } + } + + @Override + protected void addCoordinate(double x, double y, double z, double m, int index, int total) { + if (type == POINT) { + if (Double.isNaN(x) && Double.isNaN(y) && Double.isNaN(z) && Double.isNaN(m)) { + wasEmpty = true; + output.valueNull(); + return; + } + if (!inMulti) { + writeStartObject(POINT); + } + } + output.startArray(); + writeDouble(x); + writeDouble(y); + if ((dimensionSystem & DIMENSION_SYSTEM_XYZ) != 0) { + writeDouble(z); + } + if ((dimensionSystem & DIMENSION_SYSTEM_XYM) != 0) { + writeDouble(m); + } + output.endArray(); + if (type != POINT && index + 1 == total) { + output.endArray(); + } + } + + private void writeStartObject(int type) { + output.startObject(); + output.member("type"); + output.valueString(TYPES[type - 1]); + output.member(type != GEOMETRY_COLLECTION ? "coordinates" : "geometries"); + if (type != POINT) { + output.startArray(); + } + } + + private void writeDouble(double v) { + output.valueNumber(BigDecimal.valueOf(GeometryUtils.checkFinite(v)).stripTrailingZeros()); + } + + } + + /** + * Converts EWKB with known dimension system to GeoJson. + * + * @param ewkb + * geometry object in EWKB format + * @param dimensionSystem + * dimension system of the specified object, may be the same or + * smaller than its real dimension system. M dimension system is + * not supported. + * @return GeoJson representation of the specified geometry + * @throws DbException + * on unsupported dimension system + */ + public static byte[] ewkbToGeoJson(byte[] ewkb, int dimensionSystem) { + JSONByteArrayTarget output = new JSONByteArrayTarget(); + GeoJsonTarget target = new GeoJsonTarget(output, dimensionSystem); + EWKBUtils.parseEWKB(ewkb, target); + return output.getResult(); + } + + /** + * Converts EWKB with known dimension system to GeoJson. + * + * @param json + * geometry object in GeoJson format + * @param srid + * the SRID of geometry + * @return GeoJson representation of the specified geometry + * @throws DbException + * on unsupported dimension system + */ + public static byte[] geoJsonToEwkb(byte[] json, int srid) { + JSONValue v = JSONBytesSource.parse(json, new JSONValueTarget()); + DimensionSystemTarget dst = new DimensionSystemTarget(); + parse(v, dst); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + EWKBTarget target = new EWKBTarget(baos, dst.getDimensionSystem()); + target.init(srid); + parse(v, target); + return baos.toByteArray(); + } + + private static void parse(JSONValue v, GeometryUtils.Target target) { + if (v instanceof JSONNull) { + target.startPoint(); + target.addCoordinate(Double.NaN, Double.NaN, Double.NaN, Double.NaN, 0, 1); + target.endObject(POINT); + } else if (v instanceof JSONObject) { + JSONObject o = (JSONObject) v; + JSONValue t = o.getFirst("type"); + if (!(t instanceof JSONString)) { + throw new IllegalArgumentException(); + } + switch (((JSONString) t).getString()) { + case "Point": + parse(o, target, POINT); + break; + case "LineString": + parse(o, target, LINE_STRING); + break; + case "Polygon": + parse(o, target, POLYGON); + break; + case "MultiPoint": + parse(o, target, MULTI_POINT); + break; + case "MultiLineString": + parse(o, target, MULTI_LINE_STRING); + break; + case "MultiPolygon": + parse(o, target, MULTI_POLYGON); + break; + case "GeometryCollection": + parseGeometryCollection(o, target); + break; + default: + throw new IllegalArgumentException(); + } + } else { + throw new IllegalArgumentException(); + } + } + + private static void parse(JSONObject o, Target target, int type) { + JSONValue t = o.getFirst("coordinates"); + if (!(t instanceof JSONArray)) { + throw new IllegalArgumentException(); + } + JSONArray a = (JSONArray) t; + switch (type) { + case POINT: + target.startPoint(); + parseCoordinate(a, target, 0, 1); + target.endObject(POINT); + break; + case LINE_STRING: { + parseLineString(a, target); + break; + } + case POLYGON: { + parsePolygon(a, target); + break; + } + case MULTI_POINT: { + JSONValue[] points = a.getArray(); + int numPoints = points.length; + target.startCollection(MULTI_POINT, numPoints); + for (int i = 0; i < numPoints; i++) { + target.startPoint(); + parseCoordinate(points[i], target, 0, 1); + target.endObject(POINT); + target.endCollectionItem(target, MULTI_POINT, i, numPoints); + } + target.endObject(MULTI_POINT); + break; + } + case MULTI_LINE_STRING: { + JSONValue[] strings = a.getArray(); + int numStrings = strings.length; + target.startCollection(MULTI_LINE_STRING, numStrings); + for (int i = 0; i < numStrings; i++) { + JSONValue string = strings[i]; + if (!(string instanceof JSONArray)) { + throw new IllegalArgumentException(); + } + parseLineString((JSONArray) string, target); + target.endCollectionItem(target, MULTI_LINE_STRING, i, numStrings); + } + target.endObject(MULTI_LINE_STRING); + break; + } + case MULTI_POLYGON: { + JSONValue[] polygons = a.getArray(); + int numPolygons = polygons.length; + target.startCollection(MULTI_POLYGON, numPolygons); + for (int i = 0; i < numPolygons; i++) { + JSONValue string = polygons[i]; + if (!(string instanceof JSONArray)) { + throw new IllegalArgumentException(); + } + parsePolygon((JSONArray) string, target); + target.endCollectionItem(target, MULTI_POLYGON, i, numPolygons); + } + target.endObject(MULTI_POLYGON); + break; + } + default: + throw new IllegalArgumentException(); + } + } + + private static void parseGeometryCollection(JSONObject o, Target target) { + JSONValue t = o.getFirst("geometries"); + if (!(t instanceof JSONArray)) { + throw new IllegalArgumentException(); + } + JSONArray a = (JSONArray) t; + JSONValue[] geometries = a.getArray(); + int numGeometries = geometries.length; + target.startCollection(GEOMETRY_COLLECTION, numGeometries); + for (int i = 0; i < numGeometries; i++) { + JSONValue geometry = geometries[i]; + parse(geometry, target); + target.endCollectionItem(target, GEOMETRY_COLLECTION, i, numGeometries); + } + target.endObject(GEOMETRY_COLLECTION); + } + + private static void parseLineString(JSONArray a, Target target) { + JSONValue[] points = a.getArray(); + int numPoints = points.length; + target.startLineString(numPoints); + for (int i = 0; i < numPoints; i++) { + parseCoordinate(points[i], target, i, numPoints); + } + target.endObject(LINE_STRING); + } + + private static void parsePolygon(JSONArray a, Target target) { + JSONValue[] rings = a.getArray(); + int numRings = rings.length; + if (numRings == 0) { + target.startPolygon(0, 0); + } else { + JSONValue ring = rings[0]; + if (!(ring instanceof JSONArray)) { + throw new IllegalArgumentException(); + } + JSONValue[] points = ((JSONArray) ring).getArray(); + target.startPolygon(numRings - 1, points.length); + parseRing(points, target); + for (int i = 1; i < numRings; i++) { + ring = rings[i]; + if (!(ring instanceof JSONArray)) { + throw new IllegalArgumentException(); + } + points = ((JSONArray) ring).getArray(); + target.startPolygonInner(points.length); + parseRing(points, target); + } + target.endNonEmptyPolygon(); + } + target.endObject(POLYGON); + } + + private static void parseRing(JSONValue[] points, Target target) { + int numPoints = points.length; + for (int i = 0; i < numPoints; i++) { + parseCoordinate(points[i], target, i, numPoints); + } + } + + private static void parseCoordinate(JSONValue v, Target target, int index, int total) { + if (v instanceof JSONNull) { + target.addCoordinate(Double.NaN, Double.NaN, Double.NaN, Double.NaN, 0, 1); + return; + } + if (!(v instanceof JSONArray)) { + throw new IllegalArgumentException(); + } + JSONValue[] values = ((JSONArray) v).getArray(); + int length = values.length; + if (length < 2) { + throw new IllegalArgumentException(); + } + target.addCoordinate(readCoordinate(values, X), readCoordinate(values, Y), readCoordinate(values, Z), + readCoordinate(values, M), index, total); + } + + private static double readCoordinate(JSONValue[] values, int index) { + if (index >= values.length) { + return Double.NaN; + } + JSONValue v = values[index]; + if (!(v instanceof JSONNumber)) { + throw new IllegalArgumentException(); + } + return ((JSONNumber) v).getBigDecimal().doubleValue(); + } + + private GeoJsonUtils() { + } + +} diff --git a/h2/src/main/org/h2/util/geometry/GeometryUtils.java b/h2/src/main/org/h2/util/geometry/GeometryUtils.java index c5fd6d40f7..ef4bf8ea74 100644 --- a/h2/src/main/org/h2/util/geometry/GeometryUtils.java +++ b/h2/src/main/org/h2/util/geometry/GeometryUtils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util.geometry; @@ -13,7 +13,7 @@ public final class GeometryUtils { /** * Converter output target. */ - public static abstract class Target { + public abstract static class Target { public Target() { } @@ -52,7 +52,8 @@ protected void startLineString(int numPoints) { } /** - * Invoked before writing a POLYGON. + * Invoked before writing a POLYGON. If polygon is empty, both + * parameters are 0. * * @param numInner * number of inner polygons @@ -114,21 +115,23 @@ protected Target startCollectionItem(int index, int total) { * * @param target * the result of {@link #startCollectionItem(int, int)} + * @param type + * type of collection * @param index * 0-based index of this item in the collection * @param total * total number of items in the collection */ - protected void endCollectionItem(Target target, int index, int total) { + protected void endCollectionItem(Target target, int type, int index, int total) { } /** - * Invoked after writing of a collection. + * Invoked after writing of the object. * * @param type - * type of collection, see {@link #startCollection(int, int)} + * type of the object */ - protected void endCollection(int type) { + protected void endObject(int type) { } /** @@ -278,116 +281,6 @@ public int getDimensionSystem() { } - /** - * Converter output target that calculates an envelope and determines the - * minimal dimension system. - */ - public static final class EnvelopeAndDimensionSystemTarget extends Target { - - /** - * Enables or disables the envelope calculation. Inner rings of polygons - * are not counted. - */ - private boolean enabled; - - /** - * Whether envelope was set. - */ - private boolean set; - - private double minX, maxX, minY, maxY; - - private boolean hasZ; - - private boolean hasM; - - /** - * Creates a new envelope and dimension system calculation target. - */ - public EnvelopeAndDimensionSystemTarget() { - } - - @Override - protected void dimensionSystem(int dimensionSystem) { - if ((dimensionSystem & DIMENSION_SYSTEM_XYZ) != 0) { - hasZ = true; - } - if ((dimensionSystem & DIMENSION_SYSTEM_XYM) != 0) { - hasM = true; - } - } - - @Override - protected void startPoint() { - enabled = true; - } - - @Override - protected void startLineString(int numPoints) { - enabled = true; - } - - @Override - protected void startPolygon(int numInner, int numPoints) { - enabled = true; - } - - @Override - protected void startPolygonInner(int numInner) { - enabled = false; - } - - @Override - protected void addCoordinate(double x, double y, double z, double m, int index, int total) { - if (!hasZ && !Double.isNaN(z)) { - hasZ = true; - } - if (!hasM && !Double.isNaN(m)) { - hasM = true; - } - // POINT EMPTY has NaNs - if (enabled && !Double.isNaN(x) && !Double.isNaN(y)) { - if (!set) { - minX = maxX = x; - minY = maxY = y; - set = true; - } else { - if (minX > x) { - minX = x; - } - if (maxX < x) { - maxX = x; - } - if (minY > y) { - minY = y; - } - if (maxY < y) { - maxY = y; - } - } - } - } - - /** - * Returns the envelope. - * - * @return the envelope, or null - */ - public double[] getEnvelope() { - return set ? new double[] { minX, maxX, minY, maxY } : null; - } - - /** - * Returns the minimal dimension system. - * - * @return the minimal dimension system - */ - public int getDimensionSystem() { - return (hasZ ? DIMENSION_SYSTEM_XYZ : 0) | (hasM ? DIMENSION_SYSTEM_XYM : 0); - } - - } - /** * POINT geometry type. */ @@ -569,12 +462,13 @@ static double toCanonicalDouble(double d) { /** * Throw exception if param is not finite value (ie. NaN/inf/etc) - * @param d double value - * @return same double value + * + * @param d + * a double value + * @return the same double value */ static double checkFinite(double d) { - // Do not push this negation down, it will break NaN rejection - if (!(Math.abs(d) <= Double.MAX_VALUE)) { + if (!Double.isFinite(d)) { throw new IllegalArgumentException(); } return d; diff --git a/h2/src/main/org/h2/util/geometry/JTSUtils.java b/h2/src/main/org/h2/util/geometry/JTSUtils.java index 44386607f8..40d1dc3b17 100644 --- a/h2/src/main/org/h2/util/geometry/JTSUtils.java +++ b/h2/src/main/org/h2/util/geometry/JTSUtils.java @@ -1,12 +1,14 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.util.geometry; +import static org.h2.util.geometry.GeometryUtils.DIMENSION_SYSTEM_XY; import static org.h2.util.geometry.GeometryUtils.DIMENSION_SYSTEM_XYM; import static org.h2.util.geometry.GeometryUtils.DIMENSION_SYSTEM_XYZ; +import static org.h2.util.geometry.GeometryUtils.DIMENSION_SYSTEM_XYZM; import static org.h2.util.geometry.GeometryUtils.GEOMETRY_COLLECTION; import static org.h2.util.geometry.GeometryUtils.LINE_STRING; import static org.h2.util.geometry.GeometryUtils.M; @@ -22,14 +24,11 @@ import static org.h2.util.geometry.GeometryUtils.toCanonicalDouble; import java.io.ByteArrayOutputStream; -import java.lang.reflect.Method; import org.h2.message.DbException; import org.h2.util.geometry.EWKBUtils.EWKBTarget; -import org.h2.util.geometry.GeometryUtils.DimensionSystemTarget; import org.h2.util.geometry.GeometryUtils.Target; import org.locationtech.jts.geom.CoordinateSequence; -import org.locationtech.jts.geom.CoordinateSequenceFactory; import org.locationtech.jts.geom.Geometry; import org.locationtech.jts.geom.GeometryCollection; import org.locationtech.jts.geom.GeometryFactory; @@ -49,33 +48,6 @@ */ public final class JTSUtils { - /** - * {@code true} if M dimension is supported by used version of JTS, - * {@code false} if M dimension is only partially supported (JTS 1.15). - */ - public static final boolean M_IS_SUPPORTED; - - /** - * create(int,int,int) method from CoordinateSequenceFactory, if it exists - */ - static final Method CREATE; - - private static final Method GET_MEASURES; - - static { - Method create, getMeasures; - try { - create = CoordinateSequenceFactory.class.getMethod("create", int.class, int.class, int.class); - getMeasures = CoordinateSequence.class.getMethod("getMeasures"); - } catch (ReflectiveOperationException e) { - create = null; - getMeasures = null; - } - M_IS_SUPPORTED = create != null; - CREATE = create; - GET_MEASURES = getMeasures; - } - /** * Converter output target that creates a JTS Geometry. */ @@ -171,7 +143,7 @@ protected Target startCollectionItem(int index, int total) { } @Override - protected void endCollectionItem(Target target, int index, int total) { + protected void endCollectionItem(Target target, int type, int index, int total) { subgeometries[index] = ((GeometryTarget) target).getGeometry(); } @@ -180,19 +152,28 @@ private void initCoordinates(int numPoints) { } private CoordinateSequence createCoordinates(int numPoints) { - if ((dimensionSystem & DIMENSION_SYSTEM_XYM) != 0) { - if (M_IS_SUPPORTED) { - try { - return (CoordinateSequence) CREATE.invoke(factory.getCoordinateSequenceFactory(), numPoints, 4, - 1); - } catch (ReflectiveOperationException e) { - throw DbException.convert(e); - } - } - return factory.getCoordinateSequenceFactory().create(numPoints, 4); - } else { - return factory.getCoordinateSequenceFactory().create(numPoints, 3); + int d, m; + switch (dimensionSystem) { + case DIMENSION_SYSTEM_XY: + d = 2; + m = 0; + break; + case DIMENSION_SYSTEM_XYZ: + d = 3; + m = 0; + break; + case DIMENSION_SYSTEM_XYM: + d = 3; + m = 1; + break; + case DIMENSION_SYSTEM_XYZM: + d = 4; + m = 1; + break; + default: + throw DbException.getInternalError(); } + return factory.getCoordinateSequenceFactory().create(numPoints, d, m); } @Override @@ -204,10 +185,15 @@ protected void addCoordinate(double x, double y, double z, double m, int index, CoordinateSequence coordinates = innerOffset < 0 ? this.coordinates : innerCoordinates[innerOffset]; coordinates.setOrdinate(index, X, checkFinite(x)); coordinates.setOrdinate(index, Y, checkFinite(y)); - coordinates.setOrdinate(index, Z, - (dimensionSystem & DIMENSION_SYSTEM_XYZ) != 0 ? checkFinite(z) : Double.NaN); - if ((dimensionSystem & DIMENSION_SYSTEM_XYM) != 0) { + switch (dimensionSystem) { + case DIMENSION_SYSTEM_XYZM: coordinates.setOrdinate(index, M, checkFinite(m)); + //$FALL-THROUGH$ + case DIMENSION_SYSTEM_XYZ: + coordinates.setOrdinate(index, Z, checkFinite(z)); + break; + case DIMENSION_SYSTEM_XYM: + coordinates.setOrdinate(index, 2, checkFinite(m)); } } @@ -249,11 +235,7 @@ Geometry getGeometry() { * @return JTS geometry object */ public static Geometry ewkb2geometry(byte[] ewkb) { - // Determine dimension system first - DimensionSystemTarget dimensionTarget = new DimensionSystemTarget(); - EWKBUtils.parseEWKB(ewkb, dimensionTarget); - // Generate a Geometry - return ewkb2geometry(ewkb, dimensionTarget.getDimensionSystem()); + return ewkb2geometry(ewkb, EWKBUtils.getDimensionSystem(ewkb)); } /** @@ -279,11 +261,7 @@ public static Geometry ewkb2geometry(byte[] ewkb, int dimensionSystem) { * @return EWKB representation */ public static byte[] geometry2ewkb(Geometry geometry) { - // Determine dimension system first - DimensionSystemTarget dimensionTarget = new DimensionSystemTarget(); - parseGeometry(geometry, dimensionTarget); - // Write an EWKB - return geometry2ewkb(geometry, dimensionTarget.getDimensionSystem()); + return geometry2ewkb(geometry, getDimensionSystem(geometry)); } /** @@ -338,9 +316,9 @@ private static void parseGeometry(Geometry geometry, Target target, int parentTy if (p.isEmpty()) { target.addCoordinate(Double.NaN, Double.NaN, Double.NaN, Double.NaN, 0, 1); } else { - CoordinateSequence sequence = p.getCoordinateSequence(); - addCoordinate(sequence, target, 0, 1, getMeasures(sequence)); + addCoordinate(p.getCoordinateSequence(), target, 0, 1); } + target.endObject(POINT); } else if (geometry instanceof LineString) { if (parentType != 0 && parentType != MULTI_LINE_STRING && parentType != GEOMETRY_COLLECTION) { throw new IllegalArgumentException(); @@ -348,27 +326,24 @@ private static void parseGeometry(Geometry geometry, Target target, int parentTy LineString ls = (LineString) geometry; CoordinateSequence cs = ls.getCoordinateSequence(); int numPoints = cs.size(); - if (numPoints < 0 || numPoints == 1) { + if (numPoints == 1) { throw new IllegalArgumentException(); } target.startLineString(numPoints); - int measures = getMeasures(cs); for (int i = 0; i < numPoints; i++) { - addCoordinate(cs, target, i, numPoints, measures); + addCoordinate(cs, target, i, numPoints); } + target.endObject(LINE_STRING); } else if (geometry instanceof Polygon) { if (parentType != 0 && parentType != MULTI_POLYGON && parentType != GEOMETRY_COLLECTION) { throw new IllegalArgumentException(); } Polygon p = (Polygon) geometry; int numInner = p.getNumInteriorRing(); - if (numInner < 0) { - throw new IllegalArgumentException(); - } CoordinateSequence cs = p.getExteriorRing().getCoordinateSequence(); int size = cs.size(); // Size may be 0 (EMPTY) or 4+ - if (size < 0 || size >= 1 && size <= 3) { + if (size >= 1 && size <= 3) { throw new IllegalArgumentException(); } if (size == 0 && numInner > 0) { @@ -376,20 +351,20 @@ private static void parseGeometry(Geometry geometry, Target target, int parentTy } target.startPolygon(numInner, size); if (size > 0) { - int measures = getMeasures(cs); - addRing(cs, target, size, measures); + addRing(cs, target, size); for (int i = 0; i < numInner; i++) { cs = p.getInteriorRingN(i).getCoordinateSequence(); size = cs.size(); // Size may be 0 (EMPTY) or 4+ - if (size < 0 || size >= 1 && size <= 3) { + if (size >= 1 && size <= 3) { throw new IllegalArgumentException(); } target.startPolygonInner(size); - addRing(cs, target, size, measures); + addRing(cs, target, size); } target.endNonEmptyPolygon(); } + target.endObject(POLYGON); } else if (geometry instanceof GeometryCollection) { if (parentType != 0 && parentType != GEOMETRY_COLLECTION) { throw new IllegalArgumentException(); @@ -406,29 +381,25 @@ private static void parseGeometry(Geometry geometry, Target target, int parentTy type = GEOMETRY_COLLECTION; } int numItems = gc.getNumGeometries(); - if (numItems < 0) { - throw new IllegalArgumentException(); - } target.startCollection(type, numItems); for (int i = 0; i < numItems; i++) { Target innerTarget = target.startCollectionItem(i, numItems); parseGeometry(gc.getGeometryN(i), innerTarget, type); - target.endCollectionItem(innerTarget, i, numItems); + target.endCollectionItem(innerTarget, type, i, numItems); } - target.endCollection(type); + target.endObject(type); } else { throw new IllegalArgumentException(); } - } - private static void addRing(CoordinateSequence sequence, Target target, int size, int measures) { + private static void addRing(CoordinateSequence sequence, Target target, int size) { // 0 or 4+ are valid if (size >= 4) { double startX = toCanonicalDouble(sequence.getX(0)), startY = toCanonicalDouble(sequence.getY(0)); - addCoordinate(sequence, target, 0, size, startX, startY, measures); + addCoordinate(sequence, target, 0, size, startX, startY); for (int i = 1; i < size - 1; i++) { - addCoordinate(sequence, target, i, size, measures); + addCoordinate(sequence, target, i, size); } double endX = toCanonicalDouble(sequence.getX(size - 1)), // endY = toCanonicalDouble(sequence.getY(size - 1)); @@ -439,42 +410,76 @@ private static void addRing(CoordinateSequence sequence, Target target, int size if (startX != endX || startY != endY) { throw new IllegalArgumentException(); } - addCoordinate(sequence, target, size - 1, size, endX, endY, measures); + addCoordinate(sequence, target, size - 1, size, endX, endY); } } - private static void addCoordinate(CoordinateSequence sequence, Target target, int index, int total, int measures) { + private static void addCoordinate(CoordinateSequence sequence, Target target, int index, int total) { addCoordinate(sequence, target, index, total, toCanonicalDouble(sequence.getX(index)), - toCanonicalDouble(sequence.getY(index)), measures); + toCanonicalDouble(sequence.getY(index))); } private static void addCoordinate(CoordinateSequence sequence, Target target, int index, int total, double x, - double y, int measures) { - double m, z; - int d = sequence.getDimension(); - if (M_IS_SUPPORTED) { - d -= measures; - z = d > 2 ? toCanonicalDouble(sequence.getOrdinate(index, Z)) : Double.NaN; - m = measures >= 1 ? toCanonicalDouble(sequence.getOrdinate(index, d)) : Double.NaN; - } else { - z = d >= 3 ? toCanonicalDouble(sequence.getOrdinate(index, Z)) : Double.NaN; - m = d >= 4 ? toCanonicalDouble(sequence.getOrdinate(index, M)) : Double.NaN; - } + double y) { + double z = toCanonicalDouble(sequence.getZ(index)); + double m = toCanonicalDouble(sequence.getM(index)); target.addCoordinate(x, y, z, m, index, total); } - private static int getMeasures(CoordinateSequence sequence) { - int m; - if (M_IS_SUPPORTED) { - try { - m = (int) GET_MEASURES.invoke(sequence); - } catch (ReflectiveOperationException e) { - throw DbException.convert(e); + /** + * Determines a dimension system of a JTS Geometry object. + * + * @param geometry + * geometry to parse + * @return the dimension system + */ + public static int getDimensionSystem(Geometry geometry) { + int d = getDimensionSystem1(geometry); + return d >= 0 ? d : 0; + } + + private static int getDimensionSystem1(Geometry geometry) { + int d; + if (geometry instanceof Point) { + d = getDimensionSystemFromSequence(((Point) geometry).getCoordinateSequence()); + } else if (geometry instanceof LineString) { + d = getDimensionSystemFromSequence(((LineString) geometry).getCoordinateSequence()); + } else if (geometry instanceof Polygon) { + d = getDimensionSystemFromSequence(((Polygon) geometry).getExteriorRing().getCoordinateSequence()); + } else if (geometry instanceof GeometryCollection) { + d = -1; + GeometryCollection gc = (GeometryCollection) geometry; + for (int i = 0, l = gc.getNumGeometries(); i < l; i++) { + d = getDimensionSystem1(gc.getGeometryN(i)); + if (d >= 0) { + break; + } } } else { - m = 0; + throw new IllegalArgumentException(); + } + return d; + } + + private static int getDimensionSystemFromSequence(CoordinateSequence sequence) { + int size = sequence.size(); + if (size > 0) { + for (int i = 0; i < size; i++) { + int d = getDimensionSystemFromCoordinate(sequence, i); + if (d >= 0) { + return d; + } + } + } + return (sequence.hasZ() ? DIMENSION_SYSTEM_XYZ : 0) | (sequence.hasM() ? DIMENSION_SYSTEM_XYM : 0); + } + + private static int getDimensionSystemFromCoordinate(CoordinateSequence sequence, int index) { + if (Double.isNaN(sequence.getX(index))) { + return -1; } - return m; + return (!Double.isNaN(sequence.getZ(index)) ? DIMENSION_SYSTEM_XYZ : 0) + | (!Double.isNaN(sequence.getM(index)) ? DIMENSION_SYSTEM_XYM : 0); } private JTSUtils() { diff --git a/h2/src/main/org/h2/util/geometry/package.html b/h2/src/main/org/h2/util/geometry/package.html index 0fc618ae94..b6d0df09ee 100644 --- a/h2/src/main/org/h2/util/geometry/package.html +++ b/h2/src/main/org/h2/util/geometry/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/util/json/JSONArray.java b/h2/src/main/org/h2/util/json/JSONArray.java new file mode 100644 index 0000000000..69e3564fc2 --- /dev/null +++ b/h2/src/main/org/h2/util/json/JSONArray.java @@ -0,0 +1,47 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.json; + +import java.util.ArrayList; + +/** + * JSON array. + */ +public class JSONArray extends JSONValue { + + private final ArrayList elements = new ArrayList<>(); + + JSONArray() { + } + + /** + * Add a value to the array. + * + * @param value the value to add + */ + void addElement(JSONValue value) { + elements.add(value); + } + + @Override + public void addTo(JSONTarget target) { + target.startArray(); + for (JSONValue element : elements) { + element.addTo(target); + } + target.endArray(); + } + + /** + * Returns the value. + * + * @return the value + */ + public JSONValue[] getArray() { + return elements.toArray(new JSONValue[0]); + } + +} diff --git a/h2/src/main/org/h2/util/json/JSONBoolean.java b/h2/src/main/org/h2/util/json/JSONBoolean.java new file mode 100644 index 0000000000..dd00c07876 --- /dev/null +++ b/h2/src/main/org/h2/util/json/JSONBoolean.java @@ -0,0 +1,47 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.json; + +/** + * JSON boolean. + */ +public class JSONBoolean extends JSONValue { + + /** + * {@code false} value. + */ + public static final JSONBoolean FALSE = new JSONBoolean(false); + + /** + * {@code true} value. + */ + public static final JSONBoolean TRUE = new JSONBoolean(true); + + private final boolean value; + + private JSONBoolean(boolean value) { + this.value = value; + } + + @Override + public void addTo(JSONTarget target) { + if (value) { + target.valueTrue(); + } else { + target.valueFalse(); + } + } + + /** + * Returns the value. + * + * @return the value + */ + public boolean getBoolean() { + return value; + } + +} diff --git a/h2/src/main/org/h2/util/json/JSONByteArrayTarget.java b/h2/src/main/org/h2/util/json/JSONByteArrayTarget.java new file mode 100644 index 0000000000..9082b8a9d6 --- /dev/null +++ b/h2/src/main/org/h2/util/json/JSONByteArrayTarget.java @@ -0,0 +1,246 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.json; + +import static org.h2.util.json.JSONStringTarget.ARRAY; +import static org.h2.util.json.JSONStringTarget.HEX; +import static org.h2.util.json.JSONStringTarget.OBJECT; + +import java.io.ByteArrayOutputStream; +import java.math.BigDecimal; +import java.nio.charset.StandardCharsets; + +import org.h2.util.ByteStack; + +/** + * JSON byte array target. + */ +public final class JSONByteArrayTarget extends JSONTarget { + + private static final byte[] NULL_BYTES = "null".getBytes(StandardCharsets.ISO_8859_1); + + private static final byte[] FALSE_BYTES = "false".getBytes(StandardCharsets.ISO_8859_1); + + private static final byte[] TRUE_BYTES = "true".getBytes(StandardCharsets.ISO_8859_1); + + private static final byte[] U00_BYTES = "\\u00".getBytes(StandardCharsets.ISO_8859_1); + + /** + * Encodes a JSON string and appends it to the specified output stream. + * + * @param baos + * the output stream to append to + * @param s + * the string to encode + * @return the specified output stream + */ + public static ByteArrayOutputStream encodeString(ByteArrayOutputStream baos, String s) { + baos.write('"'); + for (int i = 0, length = s.length(); i < length; i++) { + char c = s.charAt(i); + switch (c) { + case '\b': + baos.write('\\'); + baos.write('b'); + break; + case '\t': + baos.write('\\'); + baos.write('t'); + break; + case '\f': + baos.write('\\'); + baos.write('f'); + break; + case '\n': + baos.write('\\'); + baos.write('n'); + break; + case '\r': + baos.write('\\'); + baos.write('r'); + break; + case '"': + baos.write('\\'); + baos.write('"'); + break; + case '\\': + baos.write('\\'); + baos.write('\\'); + break; + default: + if (c >= ' ') { + if (c < 0x80) { + baos.write(c); + } else if (c < 0x800) { + baos.write(0xc0 | c >> 6); + baos.write(0x80 | c & 0x3f); + } else if (!Character.isSurrogate(c)) { + baos.write(0xe0 | c >> 12); + baos.write(0x80 | c >> 6 & 0x3f); + baos.write(0x80 | c & 0x3f); + } else { + char c2; + if (!Character.isHighSurrogate(c) || ++i >= length + || !Character.isLowSurrogate(c2 = s.charAt(i))) { + throw new IllegalArgumentException(); + } + int uc = Character.toCodePoint(c, c2); + baos.write(0xf0 | uc >> 18); + baos.write(0x80 | uc >> 12 & 0x3f); + baos.write(0x80 | uc >> 6 & 0x3f); + baos.write(0x80 | uc & 0x3f); + } + } else { + baos.write(U00_BYTES, 0, 4); + baos.write(HEX[c >>> 4 & 0xf]); + baos.write(HEX[c & 0xf]); + } + } + } + baos.write('"'); + return baos; + } + + private final ByteArrayOutputStream baos; + + private final ByteStack stack; + + private boolean needSeparator; + + private boolean afterName; + + /** + * Creates new instance of JSON byte array target. + */ + public JSONByteArrayTarget() { + baos = new ByteArrayOutputStream(); + stack = new ByteStack(); + } + + @Override + public void startObject() { + beforeValue(); + afterName = false; + stack.push(OBJECT); + baos.write('{'); + } + + @Override + public void endObject() { + if (afterName || stack.poll(-1) != OBJECT) { + throw new IllegalStateException(); + } + baos.write('}'); + afterValue(); + } + + @Override + public void startArray() { + beforeValue(); + afterName = false; + stack.push(ARRAY); + baos.write('['); + } + + @Override + public void endArray() { + if (stack.poll(-1) != ARRAY) { + throw new IllegalStateException(); + } + baos.write(']'); + afterValue(); + } + + @Override + public void member(String name) { + if (afterName || stack.peek(-1) != OBJECT) { + throw new IllegalStateException(); + } + afterName = true; + beforeValue(); + encodeString(baos, name).write(':'); + } + + @Override + public void valueNull() { + beforeValue(); + baos.write(NULL_BYTES, 0, 4); + afterValue(); + } + + @Override + public void valueFalse() { + beforeValue(); + baos.write(FALSE_BYTES, 0, 5); + afterValue(); + } + + @Override + public void valueTrue() { + beforeValue(); + baos.write(TRUE_BYTES, 0, 4); + afterValue(); + } + + @Override + public void valueNumber(BigDecimal number) { + beforeValue(); + String s = number.toString(); + int index = s.indexOf('E'); + byte[] b = s.getBytes(StandardCharsets.ISO_8859_1); + if (index >= 0 && s.charAt(++index) == '+') { + baos.write(b, 0, index); + baos.write(b, index + 1, b.length - index - 1); + } else { + baos.write(b, 0, b.length); + } + afterValue(); + } + + @Override + public void valueString(String string) { + beforeValue(); + encodeString(baos, string); + afterValue(); + } + + private void beforeValue() { + if (!afterName && stack.peek(-1) == OBJECT) { + throw new IllegalStateException(); + } + if (needSeparator) { + if (stack.isEmpty()) { + throw new IllegalStateException(); + } + needSeparator = false; + baos.write(','); + } + } + + private void afterValue() { + needSeparator = true; + afterName = false; + } + + @Override + public boolean isPropertyExpected() { + return !afterName && stack.peek(-1) == OBJECT; + } + + @Override + public boolean isValueSeparatorExpected() { + return needSeparator; + } + + @Override + public byte[] getResult() { + if (!stack.isEmpty() || baos.size() == 0) { + throw new IllegalStateException(); + } + return baos.toByteArray(); + } + +} diff --git a/h2/src/main/org/h2/util/json/JSONBytesSource.java b/h2/src/main/org/h2/util/json/JSONBytesSource.java new file mode 100644 index 0000000000..bb42c32fcd --- /dev/null +++ b/h2/src/main/org/h2/util/json/JSONBytesSource.java @@ -0,0 +1,258 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.json; + +import java.math.BigDecimal; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; + +/** + * JSON byte array source. + */ +public final class JSONBytesSource extends JSONTextSource { + + /** + * Parses source bytes to a specified target. + * + * @param bytes + * source + * @param target + * target + * @param + * the type of the result + * @return the result of the target + */ + public static R parse(byte[] bytes, JSONTarget target) { + int length = bytes.length; + Charset charset = null; + if (length >= 4) { + byte b0 = bytes[0]; + byte b1 = bytes[1]; + byte b2 = bytes[2]; + byte b3 = bytes[3]; + switch (b0) { + case -2: + if (b1 == -1) { + charset = StandardCharsets.UTF_16BE; + } + break; + case -1: + if (b1 == -2) { + if (b2 == 0 && b3 == 0) { + charset = Charset.forName("UTF-32LE"); + } else { + charset = StandardCharsets.UTF_16LE; + } + } + break; + case 0: + if (b1 != 0) { + charset = StandardCharsets.UTF_16BE; + } else if (b2 == 0 && b3 != 0 || b2 == -2 && b3 == -1) { + charset = Charset.forName("UTF-32BE"); + } + break; + default: + if (b1 == 0) { + if (b2 == 0 && b3 == 0) { + charset = Charset.forName("UTF-32LE"); + } else { + charset = StandardCharsets.UTF_16LE; + } + } + break; + } + } else if (length >= 2) { + byte b0 = bytes[0]; + byte b1 = bytes[1]; + if (b0 != 0) { + if (b1 == 0) { + charset = StandardCharsets.UTF_16LE; + } + } else if (b1 != 0) { + charset = StandardCharsets.UTF_16BE; + } + } + (charset == null ? new JSONBytesSource(bytes, target) + : new JSONStringSource(new String(bytes, charset), target)).parse(); + return target.getResult(); + } + + /** + * Converts bytes into normalized JSON representation. + * + * @param bytes + * source representation + * @return normalized representation + */ + public static byte[] normalize(byte[] bytes) { + return parse(bytes, new JSONByteArrayTarget()); + } + + private final byte[] bytes; + + private final int length; + + private int index; + + JSONBytesSource(byte[] bytes, JSONTarget target) { + super(target); + this.bytes = bytes; + this.length = bytes.length; + // Ignore BOM + if (nextChar() != '\uFEFF') { + index = 0; + } + } + + @Override + int nextCharAfterWhitespace() { + int index = this.index; + while (index < length) { + byte ch = bytes[index++]; + switch (ch) { + case '\t': + case '\n': + case '\r': + case ' ': + break; + default: + if (ch < 0) { + throw new IllegalArgumentException(); + } + this.index = index; + return ch; + } + } + return -1; + } + + @Override + void readKeyword1(String keyword) { + int l = keyword.length() - 1; + if (index + l > length) { + throw new IllegalArgumentException(); + } + for (int i = index, j = 1; j <= l; i++, j++) { + if (bytes[i] != keyword.charAt(j)) { + throw new IllegalArgumentException(); + } + } + index += l; + } + + @Override + void parseNumber(boolean positive) { + int index = this.index; + int start = index - 1; + index = skipInt(index, positive); + l: if (index < length) { + byte ch = bytes[index]; + if (ch == '.') { + index = skipInt(index + 1, false); + if (index >= length) { + break l; + } + ch = bytes[index]; + } + if (ch == 'E' || ch == 'e') { + if (++index >= length) { + throw new IllegalArgumentException(); + } + ch = bytes[index]; + if (ch == '+' || ch == '-') { + index++; + } + index = skipInt(index, false); + } + } + target.valueNumber(new BigDecimal(new String(bytes, start, index - start))); + this.index = index; + } + + private int skipInt(int index, boolean hasInt) { + while (index < length) { + byte ch = bytes[index]; + if (ch >= '0' && ch <= '9') { + hasInt = true; + index++; + } else { + break; + } + } + if (!hasInt) { + throw new IllegalArgumentException(); + } + return index; + } + + @Override + int nextChar() { + if (index >= length) { + throw new IllegalArgumentException(); + } + int b1 = bytes[index++] & 0xff; + if (b1 >= 0x80) { + if (b1 >= 0xe0) { + if (b1 >= 0xf0) { + if (index + 2 >= length) { + throw new IllegalArgumentException(); + } + int b2 = bytes[index++] & 0xff; + int b3 = bytes[index++] & 0xff; + int b4 = bytes[index++] & 0xff; + b1 = ((b1 & 0xf) << 18) + ((b2 & 0x3f) << 12) + ((b3 & 0x3f) << 6) + (b4 & 0x3f); + if (b1 < 0x10000 || b1 > Character.MAX_CODE_POINT || (b2 & 0xc0) != 0x80 || (b3 & 0xc0) != 0x80 + || (b4 & 0xc0) != 0x80) { + throw new IllegalArgumentException(); + } + } else { + if (index + 1 >= length) { + throw new IllegalArgumentException(); + } + int b2 = bytes[index++] & 0xff; + int b3 = bytes[index++] & 0xff; + b1 = ((b1 & 0xf) << 12) + ((b2 & 0x3f) << 6) + (b3 & 0x3f); + if (b1 < 0x800 || (b2 & 0xc0) != 0x80 || (b3 & 0xc0) != 0x80) { + throw new IllegalArgumentException(); + } + } + } else { + if (index >= length) { + throw new IllegalArgumentException(); + } + int b2 = bytes[index++] & 0xff; + b1 = ((b1 & 0x1f) << 6) + (b2 & 0x3f); + if (b1 < 0x80 || (b2 & 0xc0) != 0x80) { + throw new IllegalArgumentException(); + } + } + } + return b1; + } + + @Override + char readHex() { + if (index + 3 >= length) { + throw new IllegalArgumentException(); + } + int ch; + try { + ch = Integer.parseInt(new String(bytes, index, 4), 16); + } catch (NumberFormatException e) { + throw new IllegalArgumentException(); + } + index += 4; + return (char) ch; + } + + @Override + public String toString() { + return new String(bytes, 0, index, StandardCharsets.UTF_8) + "[*]" + + new String(bytes, index, length, StandardCharsets.UTF_8); + } + +} diff --git a/h2/src/main/org/h2/util/json/JSONItemType.java b/h2/src/main/org/h2/util/json/JSONItemType.java new file mode 100644 index 0000000000..696e67ce6d --- /dev/null +++ b/h2/src/main/org/h2/util/json/JSONItemType.java @@ -0,0 +1,48 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.json; + +/** + * JSON item type. + */ +public enum JSONItemType { + + /** + * Either {@link #ARRAY}, {@link #OBJECT}, or {@link #SCALAR}. + */ + VALUE, + + /** + * JSON array. + */ + ARRAY, + + /** + * JSON object. + */ + OBJECT, + + /** + * JSON scalar value: string, number, {@code true}, {@code false}, or + * {@code null}. + */ + SCALAR; + + /** + * Checks whether this item type includes the specified item type. + * + * @param type + * item type to check + * @return whether this item type includes the specified item type + */ + public boolean includes(JSONItemType type) { + if (type == null) { + throw new NullPointerException(); + } + return this == VALUE || this == type; + } + +} diff --git a/h2/src/main/org/h2/util/json/JSONNull.java b/h2/src/main/org/h2/util/json/JSONNull.java new file mode 100644 index 0000000000..d5ea3ac93e --- /dev/null +++ b/h2/src/main/org/h2/util/json/JSONNull.java @@ -0,0 +1,26 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.json; + +/** + * JSON null. + */ +public class JSONNull extends JSONValue { + + /** + * {@code null} value. + */ + public static final JSONNull NULL = new JSONNull(); + + private JSONNull() { + } + + @Override + public void addTo(JSONTarget target) { + target.valueNull(); + } + +} diff --git a/h2/src/main/org/h2/util/json/JSONNumber.java b/h2/src/main/org/h2/util/json/JSONNumber.java new file mode 100644 index 0000000000..de998d61fc --- /dev/null +++ b/h2/src/main/org/h2/util/json/JSONNumber.java @@ -0,0 +1,35 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.json; + +import java.math.BigDecimal; + +/** + * JSON number. + */ +public class JSONNumber extends JSONValue { + + private final BigDecimal value; + + JSONNumber(BigDecimal value) { + this.value = value; + } + + @Override + public void addTo(JSONTarget target) { + target.valueNumber(value); + } + + /** + * Returns the value. + * + * @return the value + */ + public BigDecimal getBigDecimal() { + return value; + } + +} diff --git a/h2/src/main/org/h2/util/json/JSONObject.java b/h2/src/main/org/h2/util/json/JSONObject.java new file mode 100644 index 0000000000..2f3565d194 --- /dev/null +++ b/h2/src/main/org/h2/util/json/JSONObject.java @@ -0,0 +1,69 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.json; + +import java.util.AbstractMap.SimpleImmutableEntry; +import java.util.ArrayList; +import java.util.Map.Entry; + +/** + * JSON object. + */ +public class JSONObject extends JSONValue { + + private final ArrayList> members = new ArrayList<>(); + + JSONObject() { + } + + /** + * Add a key-value pair. + * + * @param name the key + * @param value the value + */ + void addMember(String name, JSONValue value) { + members.add(new SimpleImmutableEntry<>(name, value)); + } + + @Override + public void addTo(JSONTarget target) { + target.startObject(); + for (SimpleImmutableEntry member : members) { + target.member(member.getKey()); + member.getValue().addTo(target); + } + target.endObject(); + } + + /** + * Returns the value. + * + * @return the value + */ + @SuppressWarnings("unchecked") + public Entry[] getMembers() { + return members.toArray(new Entry[0]); + } + + /** + * Returns value of the first member with the specified name. + * + * @param name + * name of the member + * @return value of the first member with the specified name, or + * {@code null} + */ + public JSONValue getFirst(String name) { + for (SimpleImmutableEntry entry : members) { + if (name.equals(entry.getKey())) { + return entry.getValue(); + } + } + return null; + } + +} diff --git a/h2/src/main/org/h2/util/json/JSONString.java b/h2/src/main/org/h2/util/json/JSONString.java new file mode 100644 index 0000000000..98659d16dc --- /dev/null +++ b/h2/src/main/org/h2/util/json/JSONString.java @@ -0,0 +1,33 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.json; + +/** + * JSON string. + */ +public class JSONString extends JSONValue { + + private final String value; + + JSONString(String value) { + this.value = value; + } + + @Override + public void addTo(JSONTarget target) { + target.valueString(value); + } + + /** + * Returns the value. + * + * @return the value + */ + public String getString() { + return value; + } + +} diff --git a/h2/src/main/org/h2/util/json/JSONStringSource.java b/h2/src/main/org/h2/util/json/JSONStringSource.java new file mode 100644 index 0000000000..b6ff80edd9 --- /dev/null +++ b/h2/src/main/org/h2/util/json/JSONStringSource.java @@ -0,0 +1,161 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.json; + +import java.math.BigDecimal; + +import org.h2.util.StringUtils; + +/** + * JSON string source. + */ +public final class JSONStringSource extends JSONTextSource { + + /** + * Parses source string to a specified target. + * + * @param string + * source + * @param target + * target + * @param + * the type of the result + * @return the result of the target + */ + public static R parse(String string, JSONTarget target) { + new JSONStringSource(string, target).parse(); + return target.getResult(); + } + + /** + * Normalizes textual JSON representation. + * + * @param string + * source representation + * @return normalized representation + */ + public static byte[] normalize(String string) { + return parse(string, new JSONByteArrayTarget()); + } + + private final String string; + + private final int length; + + private int index; + + JSONStringSource(String string, JSONTarget target) { + super(target); + this.string = string; + this.length = string.length(); + if (length == 0) { + throw new IllegalArgumentException(); + } + // Ignore BOM + if (string.charAt(index) == '\uFEFF') { + index++; + } + } + + @Override + int nextCharAfterWhitespace() { + int index = this.index; + while (index < length) { + char ch = string.charAt(index++); + switch (ch) { + case '\t': + case '\n': + case '\r': + case ' ': + break; + default: + this.index = index; + return ch; + } + } + return -1; + } + + @Override + void readKeyword1(String keyword) { + int l = keyword.length() - 1; + if (!string.regionMatches(index, keyword, 1, l)) { + throw new IllegalArgumentException(); + } + index += l; + } + + @Override + void parseNumber(boolean positive) { + int index = this.index; + int start = index - 1; + index = skipInt(index, positive); + l: if (index < length) { + char ch = string.charAt(index); + if (ch == '.') { + index = skipInt(index + 1, false); + if (index >= length) { + break l; + } + ch = string.charAt(index); + } + if (ch == 'E' || ch == 'e') { + if (++index >= length) { + throw new IllegalArgumentException(); + } + ch = string.charAt(index); + if (ch == '+' || ch == '-') { + index++; + } + index = skipInt(index, false); + } + } + target.valueNumber(new BigDecimal(string.substring(start, index))); + this.index = index; + } + + private int skipInt(int index, boolean hasInt) { + while (index < length) { + char ch = string.charAt(index); + if (ch >= '0' && ch <= '9') { + hasInt = true; + index++; + } else { + break; + } + } + if (!hasInt) { + throw new IllegalArgumentException(); + } + return index; + } + + @Override + int nextChar() { + if (index >= length) { + throw new IllegalArgumentException(); + } + return string.charAt(index++); + } + + @Override + char readHex() { + if (index + 3 >= length) { + throw new IllegalArgumentException(); + } + try { + return (char) Integer.parseInt(string.substring(index, index += 4), 16); + } catch (NumberFormatException e) { + throw new IllegalArgumentException(); + } + } + + @Override + public String toString() { + return StringUtils.addAsterisk(string, index); + } + +} diff --git a/h2/src/main/org/h2/util/json/JSONStringTarget.java b/h2/src/main/org/h2/util/json/JSONStringTarget.java new file mode 100644 index 0000000000..5646dcbab5 --- /dev/null +++ b/h2/src/main/org/h2/util/json/JSONStringTarget.java @@ -0,0 +1,247 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.json; + +import java.math.BigDecimal; + +import org.h2.util.ByteStack; + +/** + * JSON String target. + */ +public final class JSONStringTarget extends JSONTarget { + + /** + * The hex characters. + */ + static final char[] HEX = "0123456789abcdef".toCharArray(); + + /** + * A JSON object. + */ + static final byte OBJECT = 1; + + /** + * A JSON array. + */ + static final byte ARRAY = 2; + + /** + * Encodes a JSON string and appends it to the specified string builder. + * + * @param builder + * the string builder to append to + * @param s + * the string to encode + * @param asciiPrintableOnly + * whether all non-printable, non-ASCII characters, and {@code '} + * (single quote) characters should be escaped + * @return the specified string builder + */ + public static StringBuilder encodeString(StringBuilder builder, String s, boolean asciiPrintableOnly) { + builder.append('"'); + for (int i = 0, length = s.length(); i < length; i++) { + char c = s.charAt(i); + switch (c) { + case '\b': + builder.append("\\b"); + break; + case '\t': + builder.append("\\t"); + break; + case '\f': + builder.append("\\f"); + break; + case '\n': + builder.append("\\n"); + break; + case '\r': + builder.append("\\r"); + break; + case '"': + builder.append("\\\""); + break; + case '\'': + if (asciiPrintableOnly) { + builder.append("\\u0027"); + } else { + builder.append('\''); + } + break; + case '\\': + builder.append("\\\\"); + break; + default: + if (c < ' ') { + builder.append("\\u00") // + .append(HEX[c >>> 4 & 0xf]) // + .append(HEX[c & 0xf]); + } else if (!asciiPrintableOnly || c <= 0x7f) { + builder.append(c); + } else { + builder.append("\\u") // + .append(HEX[c >>> 12 & 0xf]) // + .append(HEX[c >>> 8 & 0xf]) // + .append(HEX[c >>> 4 & 0xf]) // + .append(HEX[c & 0xf]); + } + } + } + return builder.append('"'); + } + + private final StringBuilder builder; + + private final ByteStack stack; + + private final boolean asciiPrintableOnly; + + private boolean needSeparator; + + private boolean afterName; + + /** + * Creates new instance of JSON String target. + */ + public JSONStringTarget() { + this(false); + } + + /** + * Creates new instance of JSON String target. + * + * @param asciiPrintableOnly + * whether all non-printable, non-ASCII characters, and {@code '} + * (single quote) characters should be escaped + */ + public JSONStringTarget(boolean asciiPrintableOnly) { + builder = new StringBuilder(); + stack = new ByteStack(); + this.asciiPrintableOnly = asciiPrintableOnly; + } + + @Override + public void startObject() { + beforeValue(); + afterName = false; + stack.push(OBJECT); + builder.append('{'); + } + + @Override + public void endObject() { + if (afterName || stack.poll(-1) != OBJECT) { + throw new IllegalStateException(); + } + builder.append('}'); + afterValue(); + } + + @Override + public void startArray() { + beforeValue(); + afterName = false; + stack.push(ARRAY); + builder.append('['); + } + + @Override + public void endArray() { + if (stack.poll(-1) != ARRAY) { + throw new IllegalStateException(); + } + builder.append(']'); + afterValue(); + } + + @Override + public void member(String name) { + if (afterName || stack.peek(-1) != OBJECT) { + throw new IllegalStateException(); + } + afterName = true; + beforeValue(); + encodeString(builder, name, asciiPrintableOnly).append(':'); + } + + @Override + public void valueNull() { + beforeValue(); + builder.append("null"); + afterValue(); + } + + @Override + public void valueFalse() { + beforeValue(); + builder.append("false"); + afterValue(); + } + + @Override + public void valueTrue() { + beforeValue(); + builder.append("true"); + afterValue(); + } + + @Override + public void valueNumber(BigDecimal number) { + beforeValue(); + String s = number.toString(); + int index = s.indexOf('E'); + if (index >= 0 && s.charAt(++index) == '+') { + builder.append(s, 0, index).append(s, index + 1, s.length()); + } else { + builder.append(s); + } + afterValue(); + } + + @Override + public void valueString(String string) { + beforeValue(); + encodeString(builder, string, asciiPrintableOnly); + afterValue(); + } + + private void beforeValue() { + if (!afterName && stack.peek(-1) == OBJECT) { + throw new IllegalStateException(); + } + if (needSeparator) { + if (stack.isEmpty()) { + throw new IllegalStateException(); + } + needSeparator = false; + builder.append(','); + } + } + + private void afterValue() { + needSeparator = true; + afterName = false; + } + + @Override + public boolean isPropertyExpected() { + return !afterName && stack.peek(-1) == OBJECT; + } + + @Override + public boolean isValueSeparatorExpected() { + return needSeparator; + } + + @Override + public String getResult() { + if (!stack.isEmpty() || builder.length() == 0) { + throw new IllegalStateException(); + } + return builder.toString(); + } + +} diff --git a/h2/src/main/org/h2/util/json/JSONTarget.java b/h2/src/main/org/h2/util/json/JSONTarget.java new file mode 100644 index 0000000000..921857f280 --- /dev/null +++ b/h2/src/main/org/h2/util/json/JSONTarget.java @@ -0,0 +1,105 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.json; + +import java.math.BigDecimal; + +/** + * Abstract JSON output target. + * + * @param + * the type of the result + */ +public abstract class JSONTarget { + + /** + * Start of an object. + */ + public abstract void startObject(); + + /** + * End of the current object. + */ + public abstract void endObject(); + + /** + * Start of an array. + */ + public abstract void startArray(); + + /** + * End of the current array. + */ + public abstract void endArray(); + + /** + * Name of a member. + * + * @param name + * the name + */ + public abstract void member(String name); + + /** + * Parse "null". + * + * {@code null} value. + */ + public abstract void valueNull(); + + /** + * Parse "false". + * + * {@code false} value. + */ + public abstract void valueFalse(); + + /** + * Parse "true". + * + * {@code true} value. + */ + public abstract void valueTrue(); + + /** + * A number value. + * + * @param number + * the number + */ + public abstract void valueNumber(BigDecimal number); + + /** + * A string value. + * + * @param string + * the string + */ + public abstract void valueString(String string); + + /** + * Returns whether member's name or the end of the current object is + * expected. + * + * @return {@code true} if it is, {@code false} otherwise + */ + public abstract boolean isPropertyExpected(); + + /** + * Returns whether value separator expected before the next member or value. + * + * @return {@code true} if it is, {@code false} otherwise + */ + public abstract boolean isValueSeparatorExpected(); + + /** + * Returns the result. + * + * @return the result + */ + public abstract R getResult(); + +} diff --git a/h2/src/main/org/h2/util/json/JSONTextSource.java b/h2/src/main/org/h2/util/json/JSONTextSource.java new file mode 100644 index 0000000000..e50451447c --- /dev/null +++ b/h2/src/main/org/h2/util/json/JSONTextSource.java @@ -0,0 +1,216 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.json; + +/** + * JSON text source. + */ +public abstract class JSONTextSource { + + /** + * The output. + */ + final JSONTarget target; + + private final StringBuilder builder; + + JSONTextSource(JSONTarget target) { + this.target = target; + builder = new StringBuilder(); + } + + /** + * Parse the text and write it to the output. + */ + final void parse() { + boolean comma = false; + for (int ch; (ch = nextCharAfterWhitespace()) >= 0;) { + if (ch == '}' || ch == ']') { + if (comma) { + throw new IllegalArgumentException(); + } + if (ch == '}') { + target.endObject(); + } else { + target.endArray(); + } + continue; + } + if (ch == ',') { + if (comma || !target.isValueSeparatorExpected()) { + throw new IllegalArgumentException(); + } + comma = true; + continue; + } + if (comma != target.isValueSeparatorExpected()) { + throw new IllegalArgumentException(); + } + comma = false; + switch (ch) { + case 'f': + readKeyword1("false"); + target.valueFalse(); + break; + case 'n': + readKeyword1("null"); + target.valueNull(); + break; + case 't': + readKeyword1("true"); + target.valueTrue(); + break; + case '{': + target.startObject(); + break; + case '[': + target.startArray(); + break; + case '"': { + String s = readString(); + if (target.isPropertyExpected()) { + if (nextCharAfterWhitespace() != ':') { + throw new IllegalArgumentException(); + } + target.member(s); + } else { + target.valueString(s); + } + break; + } + case '-': + parseNumber(false); + break; + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + parseNumber(true); + break; + default: + throw new IllegalArgumentException(); + } + } + } + + /** + * Skip all whitespace characters, and get the next character. + * + * @return the character code + */ + abstract int nextCharAfterWhitespace(); + + /** + * Read the specified keyword, or (it there is no match), throw an + * IllegalArgumentException. + * + * @param keyword the expected keyword + */ + abstract void readKeyword1(String keyword); + + /** + * Parse a number. + * + * @param positive whether it needs to be positive + */ + abstract void parseNumber(boolean positive); + + /** + * Read the next character. + * + * @return the character code + */ + abstract int nextChar(); + + /** + * Read 4 hex characters (0-9, a-f, A-F), and return the Unicode character. + * + * @return the character + */ + abstract char readHex(); + + private String readString() { + builder.setLength(0); + boolean inSurrogate = false; + for (;;) { + int ch = nextChar(); + switch (ch) { + case '"': + if (inSurrogate) { + throw new IllegalArgumentException(); + } + return builder.toString(); + case '\\': + ch = nextChar(); + switch (ch) { + case '"': + case '/': + case '\\': + appendNonSurrogate((char) ch, inSurrogate); + break; + case 'b': + appendNonSurrogate('\b', inSurrogate); + break; + case 'f': + appendNonSurrogate('\f', inSurrogate); + break; + case 'n': + appendNonSurrogate('\n', inSurrogate); + break; + case 'r': + appendNonSurrogate('\r', inSurrogate); + break; + case 't': + appendNonSurrogate('\t', inSurrogate); + break; + case 'u': + inSurrogate = appendChar(readHex(), inSurrogate); + break; + default: + throw new IllegalArgumentException(); + } + break; + default: + if (Character.isBmpCodePoint(ch)) { + inSurrogate = appendChar((char) ch, inSurrogate); + } else { + if (inSurrogate) { + throw new IllegalArgumentException(); + } + builder.appendCodePoint(ch); + inSurrogate = false; + } + } + } + } + + private void appendNonSurrogate(char ch, boolean inSurrogate) { + if (inSurrogate) { + throw new IllegalArgumentException(); + } + builder.append(ch); + } + + private boolean appendChar(char ch, boolean inSurrogate) { + if (inSurrogate != Character.isLowSurrogate(ch)) { + throw new IllegalArgumentException(); + } + if (inSurrogate) { + inSurrogate = false; + } else if (Character.isHighSurrogate(ch)) { + inSurrogate = true; + } + builder.append(ch); + return inSurrogate; + } + +} diff --git a/h2/src/main/org/h2/util/json/JSONValidationTarget.java b/h2/src/main/org/h2/util/json/JSONValidationTarget.java new file mode 100644 index 0000000000..04b880afec --- /dev/null +++ b/h2/src/main/org/h2/util/json/JSONValidationTarget.java @@ -0,0 +1,20 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.json; + +/** + * JSON validation target. + */ +public abstract class JSONValidationTarget extends JSONTarget { + + /** + * @return JSON item type of the top-level item, may not return + * {@link JSONItemType#VALUE} + */ + @Override + public abstract JSONItemType getResult(); + +} diff --git a/h2/src/main/org/h2/util/json/JSONValidationTargetWithUniqueKeys.java b/h2/src/main/org/h2/util/json/JSONValidationTargetWithUniqueKeys.java new file mode 100644 index 0000000000..1f5b9ad07b --- /dev/null +++ b/h2/src/main/org/h2/util/json/JSONValidationTargetWithUniqueKeys.java @@ -0,0 +1,157 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.json; + +import java.math.BigDecimal; +import java.util.ArrayDeque; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; + +/** + * JSON validation target with unique keys. + */ +public final class JSONValidationTargetWithUniqueKeys extends JSONValidationTarget { + + private final ArrayDeque stack; + + private final ArrayDeque names; + + private boolean needSeparator; + + private String memberName; + + private JSONItemType type; + + /** + * Creates new instance of JSON validation target with unique keys. + */ + public JSONValidationTargetWithUniqueKeys() { + stack = new ArrayDeque<>(); + names = new ArrayDeque<>(); + } + + @Override + public void startObject() { + beforeValue(); + names.push(memberName != null ? memberName : ""); + memberName = null; + stack.push(new HashSet<>()); + } + + @Override + public void endObject() { + if (memberName != null) { + throw new IllegalStateException(); + } + if (!(stack.poll() instanceof HashSet)) { + throw new IllegalStateException(); + } + memberName = names.pop(); + afterValue(JSONItemType.OBJECT); + } + + @Override + public void startArray() { + beforeValue(); + names.push(memberName != null ? memberName : ""); + memberName = null; + stack.push(Collections.emptyList()); + } + + @Override + public void endArray() { + if (!(stack.poll() instanceof List)) { + throw new IllegalStateException(); + } + memberName = names.pop(); + afterValue(JSONItemType.ARRAY); + } + + @Override + public void member(String name) { + if (memberName != null || !(stack.peek() instanceof HashSet)) { + throw new IllegalStateException(); + } + memberName = name; + beforeValue(); + } + + @Override + public void valueNull() { + beforeValue(); + afterValue(JSONItemType.SCALAR); + } + + @Override + public void valueFalse() { + beforeValue(); + afterValue(JSONItemType.SCALAR); + } + + @Override + public void valueTrue() { + beforeValue(); + afterValue(JSONItemType.SCALAR); + } + + @Override + public void valueNumber(BigDecimal number) { + beforeValue(); + afterValue(JSONItemType.SCALAR); + } + + @Override + public void valueString(String string) { + beforeValue(); + afterValue(JSONItemType.SCALAR); + } + + private void beforeValue() { + if (memberName == null && stack.peek() instanceof HashSet) { + throw new IllegalStateException(); + } + if (needSeparator) { + if (stack.isEmpty()) { + throw new IllegalStateException(); + } + needSeparator = false; + } + } + + @SuppressWarnings("unchecked") + private void afterValue(JSONItemType type) { + Object parent = stack.peek(); + if (parent == null) { + this.type = type; + } else if (parent instanceof HashSet) { + if (!((HashSet) parent).add(memberName)) { + throw new IllegalStateException(); + } + } + needSeparator = true; + memberName = null; + } + + @Override + public boolean isPropertyExpected() { + return memberName == null && stack.peek() instanceof HashSet; + } + + @Override + public boolean isValueSeparatorExpected() { + return needSeparator; + } + + @Override + public JSONItemType getResult() { + if (!stack.isEmpty() || type == null) { + throw new IllegalStateException(); + } + return type; + } + +} diff --git a/h2/src/main/org/h2/util/json/JSONValidationTargetWithoutUniqueKeys.java b/h2/src/main/org/h2/util/json/JSONValidationTargetWithoutUniqueKeys.java new file mode 100644 index 0000000000..85d03a8391 --- /dev/null +++ b/h2/src/main/org/h2/util/json/JSONValidationTargetWithoutUniqueKeys.java @@ -0,0 +1,143 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.json; + +import java.math.BigDecimal; + +import org.h2.util.ByteStack; + +/** + * JSON validation target without unique keys. + */ +public final class JSONValidationTargetWithoutUniqueKeys extends JSONValidationTarget { + + private static final byte OBJECT = 1; + + private static final byte ARRAY = 2; + + private JSONItemType type; + + private final ByteStack stack; + + private boolean needSeparator; + + private boolean afterName; + + /** + * Creates new instance of JSON validation target without unique keys. + */ + public JSONValidationTargetWithoutUniqueKeys() { + stack = new ByteStack(); + } + + @Override + public void startObject() { + beforeValue(); + afterName = false; + stack.push(OBJECT); + } + + @Override + public void endObject() { + if (afterName || stack.poll(-1) != OBJECT) { + throw new IllegalStateException(); + } + afterValue(JSONItemType.OBJECT); + } + + @Override + public void startArray() { + beforeValue(); + afterName = false; + stack.push(ARRAY); + } + + @Override + public void endArray() { + if (stack.poll(-1) != ARRAY) { + throw new IllegalStateException(); + } + afterValue(JSONItemType.ARRAY); + } + + @Override + public void member(String name) { + if (afterName || stack.peek(-1) != OBJECT) { + throw new IllegalStateException(); + } + afterName = true; + beforeValue(); + } + + @Override + public void valueNull() { + beforeValue(); + afterValue(JSONItemType.SCALAR); + } + + @Override + public void valueFalse() { + beforeValue(); + afterValue(JSONItemType.SCALAR); + } + + @Override + public void valueTrue() { + beforeValue(); + afterValue(JSONItemType.SCALAR); + } + + @Override + public void valueNumber(BigDecimal number) { + beforeValue(); + afterValue(JSONItemType.SCALAR); + } + + @Override + public void valueString(String string) { + beforeValue(); + afterValue(JSONItemType.SCALAR); + } + + private void beforeValue() { + if (!afterName && stack.peek(-1) == OBJECT) { + throw new IllegalStateException(); + } + if (needSeparator) { + if (stack.isEmpty()) { + throw new IllegalStateException(); + } + needSeparator = false; + } + } + + private void afterValue(JSONItemType type) { + needSeparator = true; + afterName = false; + if (stack.isEmpty()) { + this.type = type; + } + } + + @Override + public boolean isPropertyExpected() { + return !afterName && stack.peek(-1) == OBJECT; + } + + @Override + public boolean isValueSeparatorExpected() { + return needSeparator; + } + + @Override + public JSONItemType getResult() { + if (!stack.isEmpty() || type == null) { + throw new IllegalStateException(); + } + return type; + } + +} diff --git a/h2/src/main/org/h2/util/json/JSONValue.java b/h2/src/main/org/h2/util/json/JSONValue.java new file mode 100644 index 0000000000..89bfbf456c --- /dev/null +++ b/h2/src/main/org/h2/util/json/JSONValue.java @@ -0,0 +1,31 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.json; + +/** + * JSON value. + */ +public abstract class JSONValue { + + JSONValue() { + } + + /** + * Appends this value to the specified target. + * + * @param target + * the target + */ + public abstract void addTo(JSONTarget target); + + @Override + public final String toString() { + JSONStringTarget target = new JSONStringTarget(); + addTo(target); + return target.getResult(); + } + +} diff --git a/h2/src/main/org/h2/util/json/JSONValueTarget.java b/h2/src/main/org/h2/util/json/JSONValueTarget.java new file mode 100644 index 0000000000..2df696265f --- /dev/null +++ b/h2/src/main/org/h2/util/json/JSONValueTarget.java @@ -0,0 +1,155 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.json; + +import java.math.BigDecimal; +import java.util.ArrayDeque; + +/** + * JSON value target. + */ +public final class JSONValueTarget extends JSONTarget { + + private final ArrayDeque stack; + + private final ArrayDeque names; + + private boolean needSeparator; + + private String memberName; + + private JSONValue result; + + /** + * Creates new instance of JSON value target. + */ + public JSONValueTarget() { + stack = new ArrayDeque<>(); + names = new ArrayDeque<>(); + } + + @Override + public void startObject() { + beforeValue(); + names.push(memberName != null ? memberName : ""); + memberName = null; + stack.push(new JSONObject()); + } + + @Override + public void endObject() { + if (memberName != null) { + throw new IllegalStateException(); + } + JSONValue value = stack.poll(); + if (!(value instanceof JSONObject)) { + throw new IllegalStateException(); + } + memberName = names.pop(); + afterValue(value); + } + + @Override + public void startArray() { + beforeValue(); + names.push(memberName != null ? memberName : ""); + memberName = null; + stack.push(new JSONArray()); + } + + @Override + public void endArray() { + JSONValue value = stack.poll(); + if (!(value instanceof JSONArray)) { + throw new IllegalStateException(); + } + memberName = names.pop(); + afterValue(value); + } + + @Override + public void member(String name) { + if (memberName != null || !(stack.peek() instanceof JSONObject)) { + throw new IllegalStateException(); + } + memberName = name; + beforeValue(); + } + + @Override + public void valueNull() { + beforeValue(); + afterValue(JSONNull.NULL); + } + + @Override + public void valueFalse() { + beforeValue(); + afterValue(JSONBoolean.FALSE); + } + + @Override + public void valueTrue() { + beforeValue(); + afterValue(JSONBoolean.TRUE); + } + + @Override + public void valueNumber(BigDecimal number) { + beforeValue(); + afterValue(new JSONNumber(number)); + } + + @Override + public void valueString(String string) { + beforeValue(); + afterValue(new JSONString(string)); + } + + private void beforeValue() { + if (memberName == null && stack.peek() instanceof JSONObject) { + throw new IllegalStateException(); + } + if (needSeparator) { + if (stack.isEmpty()) { + throw new IllegalStateException(); + } + needSeparator = false; + } + } + + private void afterValue(JSONValue value) { + JSONValue parent = stack.peek(); + if (parent == null) { + result = value; + } else if (parent instanceof JSONObject) { + ((JSONObject) parent).addMember(memberName, value); + } else { + ((JSONArray) parent).addElement(value); + } + needSeparator = true; + memberName = null; + } + + @Override + public boolean isPropertyExpected() { + return memberName == null && stack.peek() instanceof JSONObject; + } + + @Override + public boolean isValueSeparatorExpected() { + return needSeparator; + } + + @Override + public JSONValue getResult() { + if (!stack.isEmpty() || result == null) { + throw new IllegalStateException(); + } + return result; + } + +} diff --git a/h2/src/main/org/h2/util/json/JsonConstructorUtils.java b/h2/src/main/org/h2/util/json/JsonConstructorUtils.java new file mode 100644 index 0000000000..b05f813ada --- /dev/null +++ b/h2/src/main/org/h2/util/json/JsonConstructorUtils.java @@ -0,0 +1,105 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util.json; + +import java.io.ByteArrayOutputStream; + +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueJson; +import org.h2.value.ValueNull; + +/** + * Utilities for JSON constructors. + */ +public final class JsonConstructorUtils { + + /** + * The ABSENT ON NULL flag. + */ + public static final int JSON_ABSENT_ON_NULL = 1; + + /** + * The WITH UNIQUE KEYS flag. + */ + public static final int JSON_WITH_UNIQUE_KEYS = 2; + + private JsonConstructorUtils() { + } + + /** + * Appends a value to a JSON object in the specified string builder. + * + * @param baos + * the output stream to append to + * @param key + * the name of the property + * @param value + * the value of the property + */ + public static void jsonObjectAppend(ByteArrayOutputStream baos, String key, Value value) { + if (baos.size() > 1) { + baos.write(','); + } + JSONByteArrayTarget.encodeString(baos, key).write(':'); + byte[] b = value.convertTo(TypeInfo.TYPE_JSON).getBytesNoCopy(); + baos.write(b, 0, b.length); + } + + /** + * Appends trailing closing brace to the specified string builder with a + * JSON object, validates it, and converts to a JSON value. + * + * @param baos + * the output stream with the object + * @param flags + * the flags ({@link #JSON_WITH_UNIQUE_KEYS}) + * @return the JSON value + * @throws DbException + * if {@link #JSON_WITH_UNIQUE_KEYS} is specified and keys are + * not unique + */ + public static Value jsonObjectFinish(ByteArrayOutputStream baos, int flags) { + baos.write('}'); + byte[] result = baos.toByteArray(); + if ((flags & JSON_WITH_UNIQUE_KEYS) != 0) { + try { + JSONBytesSource.parse(result, new JSONValidationTargetWithUniqueKeys()); + } catch (RuntimeException ex) { + String s = JSONBytesSource.parse(result, new JSONStringTarget()); + throw DbException.getInvalidValueException("JSON WITH UNIQUE KEYS", + s.length() < 128 ? result : s.substring(0, 128) + "..."); + } + } + return ValueJson.getInternal(result); + } + + /** + * Appends a value to a JSON array in the specified output stream. + * + * @param baos + * the output stream to append to + * @param value + * the value + * @param flags + * the flags ({@link #JSON_ABSENT_ON_NULL}) + */ + public static void jsonArrayAppend(ByteArrayOutputStream baos, Value value, int flags) { + if (value == ValueNull.INSTANCE) { + if ((flags & JSON_ABSENT_ON_NULL) != 0) { + return; + } + value = ValueJson.NULL; + } + if (baos.size() > 1) { + baos.write(','); + } + byte[] b = value.convertTo(TypeInfo.TYPE_JSON).getBytesNoCopy(); + baos.write(b, 0, b.length); + } + +} diff --git a/h2/src/main/org/h2/util/json/package.html b/h2/src/main/org/h2/util/json/package.html new file mode 100644 index 0000000000..c34f97e9b7 --- /dev/null +++ b/h2/src/main/org/h2/util/json/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

          + +Internal utility classes for JSON data type. + +

          \ No newline at end of file diff --git a/h2/src/main/org/h2/util/package.html b/h2/src/main/org/h2/util/package.html index c24d890f40..fc268b59a2 100644 --- a/h2/src/main/org/h2/util/package.html +++ b/h2/src/main/org/h2/util/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/value/CaseInsensitiveConcurrentMap.java b/h2/src/main/org/h2/value/CaseInsensitiveConcurrentMap.java index 2cea9d9680..838366e30e 100644 --- a/h2/src/main/org/h2/value/CaseInsensitiveConcurrentMap.java +++ b/h2/src/main/org/h2/value/CaseInsensitiveConcurrentMap.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; @@ -28,6 +28,11 @@ public V put(String key, V value) { return super.put(StringUtils.toUpperEnglish(key), value); } + @Override + public V putIfAbsent(String key, V value) { + return super.putIfAbsent(StringUtils.toUpperEnglish(key), value); + } + @Override public boolean containsKey(Object key) { return super.containsKey(StringUtils.toUpperEnglish((String) key)); diff --git a/h2/src/main/org/h2/value/CaseInsensitiveMap.java b/h2/src/main/org/h2/value/CaseInsensitiveMap.java index 16715f7ee0..230ac7dd71 100644 --- a/h2/src/main/org/h2/value/CaseInsensitiveMap.java +++ b/h2/src/main/org/h2/value/CaseInsensitiveMap.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; @@ -17,6 +17,22 @@ public class CaseInsensitiveMap extends HashMap { private static final long serialVersionUID = 1L; + /** + * Creates new instance of case-insensitive map. + */ + public CaseInsensitiveMap() { + } + + /** + * Creates new instance of case-insensitive map with specified initial + * capacity. + * + * @param initialCapacity the initial capacity + */ + public CaseInsensitiveMap(int initialCapacity) { + super(initialCapacity); + } + @Override public V get(Object key) { return super.get(StringUtils.toUpperEnglish((String) key)); @@ -27,6 +43,11 @@ public V put(String key, V value) { return super.put(StringUtils.toUpperEnglish(key), value); } + @Override + public V putIfAbsent(String key, V value) { + return super.putIfAbsent(StringUtils.toUpperEnglish(key), value); + } + @Override public boolean containsKey(Object key) { return super.containsKey(StringUtils.toUpperEnglish((String) key)); diff --git a/h2/src/main/org/h2/value/CharsetCollator.java b/h2/src/main/org/h2/value/CharsetCollator.java index 5764b6b900..a824924220 100644 --- a/h2/src/main/org/h2/value/CharsetCollator.java +++ b/h2/src/main/org/h2/value/CharsetCollator.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; @@ -9,6 +9,9 @@ import java.text.CollationKey; import java.text.Collator; import java.util.Comparator; +import java.util.Locale; + +import org.h2.util.Bits; /** * The charset collator sorts strings according to the order in the given charset. @@ -18,19 +21,8 @@ public class CharsetCollator extends Collator { /** * The comparator used to compare byte arrays. */ - static final Comparator COMPARATOR = new Comparator() { - @Override - public int compare(byte[] b1, byte[] b2) { - int minLength = Math.min(b1.length, b2.length); - for (int index = 0; index < minLength; index++) { - int result = b1[index] - b2[index]; - if (result != 0) { - return result; - } - } - return b1.length - b2.length; - } - }; + static final Comparator COMPARATOR = Bits::compareNotNullSigned; + private final Charset charset; public CharsetCollator(Charset charset) { @@ -53,11 +45,15 @@ public int compare(String source, String target) { * @return the bytes */ byte[] toBytes(String source) { + if (getStrength() <= Collator.SECONDARY) { + // TODO perform case-insensitive comparison properly + source = source.toUpperCase(Locale.ROOT); + } return source.getBytes(charset); } @Override - public CollationKey getCollationKey(final String source) { + public CollationKey getCollationKey(String source) { return new CharsetCollationKey(source); } @@ -68,18 +64,21 @@ public int hashCode() { private class CharsetCollationKey extends CollationKey { + private final byte[] bytes; + CharsetCollationKey(String source) { super(source); + bytes = toBytes(source); } @Override public int compareTo(CollationKey target) { - return COMPARATOR.compare(toByteArray(), toBytes(target.getSourceString())); + return COMPARATOR.compare(bytes, target.toByteArray()); } @Override public byte[] toByteArray() { - return toBytes(getSourceString()); + return bytes; } } diff --git a/h2/src/main/org/h2/value/CompareMode.java b/h2/src/main/org/h2/value/CompareMode.java index bcfadf4190..aeea652e1c 100644 --- a/h2/src/main/org/h2/value/CompareMode.java +++ b/h2/src/main/org/h2/value/CompareMode.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; @@ -11,7 +11,6 @@ import java.util.Locale; import java.util.Objects; -import org.h2.engine.SysProperties; import org.h2.util.StringUtils; /** @@ -44,17 +43,7 @@ public class CompareMode implements Comparator { */ public static final String CHARSET = "CHARSET_"; - /** - * This constant means that the BINARY or UUID columns are sorted as if the - * bytes were signed. - */ - public static final String SIGNED = "SIGNED"; - - /** - * This constant means that the BINARY or UUID columns are sorted as if the - * bytes were unsigned. - */ - public static final String UNSIGNED = "UNSIGNED"; + private static Locale[] LOCALES; private static volatile CompareMode lastUsed; @@ -74,22 +63,9 @@ public class CompareMode implements Comparator { private final String name; private final int strength; - /** - * If true, sort BINARY columns as if they contain unsigned bytes. - */ - private final boolean binaryUnsigned; - - /** - * If true, sort UUID columns as if they contain unsigned bytes instead of - * Java-compatible sorting. - */ - private final boolean uuidUnsigned; - - protected CompareMode(String name, int strength, boolean binaryUnsigned, boolean uuidUnsigned) { + protected CompareMode(String name, int strength) { this.name = name; this.strength = strength; - this.binaryUnsigned = binaryUnsigned; - this.uuidUnsigned = uuidUnsigned; } /** @@ -103,33 +79,12 @@ protected CompareMode(String name, int strength, boolean binaryUnsigned, boolean * @return the compare mode */ public static CompareMode getInstance(String name, int strength) { - return getInstance(name, strength, SysProperties.SORT_BINARY_UNSIGNED, SysProperties.SORT_UUID_UNSIGNED); - } - - /** - * Create a new compare mode with the given collator and strength. If - * required, a new CompareMode is created, or if possible the last one is - * returned. A cache is used to speed up comparison when using a collator; - * CollationKey objects are cached. - * - * @param name the collation name or null - * @param strength the collation strength - * @param binaryUnsigned whether to compare binaries as unsigned - * @param uuidUnsigned whether to compare UUIDs as unsigned - * @return the compare mode - */ - public static CompareMode getInstance(String name, int strength, boolean binaryUnsigned, boolean uuidUnsigned) { CompareMode last = lastUsed; - if (last != null) { - if (Objects.equals(last.name, name) && - last.strength == strength && - last.binaryUnsigned == binaryUnsigned && - last.uuidUnsigned == uuidUnsigned) { - return last; - } + if (last != null && Objects.equals(last.name, name) && last.strength == strength) { + return last; } if (name == null || name.equals(OFF)) { - last = new CompareMode(name, strength, binaryUnsigned, uuidUnsigned); + last = new CompareMode(name, strength); } else { boolean useICU4J; if (name.startsWith(ICU4J)) { @@ -138,19 +93,37 @@ public static CompareMode getInstance(String name, int strength, boolean binaryU } else if (name.startsWith(DEFAULT)) { useICU4J = false; name = name.substring(DEFAULT.length()); + } else if (name.startsWith(CHARSET)) { + useICU4J = false; } else { useICU4J = CAN_USE_ICU4J; } if (useICU4J) { - last = new CompareModeIcu4J(name, strength, binaryUnsigned, uuidUnsigned); + last = new CompareModeIcu4J(name, strength); } else { - last = new CompareModeDefault(name, strength, binaryUnsigned, uuidUnsigned); + last = new CompareModeDefault(name, strength); } } lastUsed = last; return last; } + /** + * Returns available locales for collations. + * + * @param onlyIfInitialized + * if {@code true}, returns {@code null} when locales are not yet + * initialized + * @return available locales for collations. + */ + public static Locale[] getCollationLocales(boolean onlyIfInitialized) { + Locale[] locales = LOCALES; + if (locales == null && !onlyIfInitialized) { + LOCALES = locales = Collator.getAvailableLocales(); + } + return locales; + } + /** * Compare two characters in a string. * @@ -161,15 +134,19 @@ public static CompareMode getInstance(String name, int strength, boolean binaryU * @param ignoreCase true if a case-insensitive comparison should be made * @return true if the characters are equals */ - public boolean equalsChars(String a, int ai, String b, int bi, - boolean ignoreCase) { + public boolean equalsChars(String a, int ai, String b, int bi, boolean ignoreCase) { char ca = a.charAt(ai); char cb = b.charAt(bi); + if (ca == cb) { + return true; + } if (ignoreCase) { - ca = Character.toUpperCase(ca); - cb = Character.toUpperCase(cb); + if (Character.toUpperCase(ca) == Character.toUpperCase(cb) + || Character.toLowerCase(ca) == Character.toLowerCase(cb)) { + return true; + } } - return ca == cb; + return false; } /** @@ -211,7 +188,7 @@ public static String getName(Locale l) { * @return true if they match */ static boolean compareLocaleNames(Locale locale, String name) { - return name.equalsIgnoreCase(locale.toString()) || + return name.equalsIgnoreCase(locale.toString()) || name.equalsIgnoreCase(locale.toLanguageTag()) || name.equalsIgnoreCase(getName(locale)); } @@ -248,9 +225,14 @@ public static Collator getCollator(String name) { result = Collator.getInstance(locale); } } + } else if (name.indexOf('-') > 0) { + Locale locale = Locale.forLanguageTag(name); + if (!locale.getLanguage().isEmpty()) { + return Collator.getInstance(locale); + } } if (result == null) { - for (Locale locale : Collator.getAvailableLocales()) { + for (Locale locale : getCollationLocales(false)) { if (compareLocaleNames(locale, name)) { result = Collator.getInstance(locale); break; @@ -268,14 +250,6 @@ public int getStrength() { return strength; } - public boolean isBinaryUnsigned() { - return binaryUnsigned; - } - - public boolean isUuidUnsigned() { - return uuidUnsigned; - } - @Override public boolean equals(Object obj) { if (obj == this) { @@ -290,12 +264,6 @@ public boolean equals(Object obj) { if (strength != o.strength) { return false; } - if (binaryUnsigned != o.binaryUnsigned) { - return false; - } - if (uuidUnsigned != o.uuidUnsigned) { - return false; - } return true; } @@ -304,8 +272,6 @@ public int hashCode() { int result = 1; result = 31 * result + getName().hashCode(); result = 31 * result + strength; - result = 31 * result + (binaryUnsigned ? 1231 : 1237); - result = 31 * result + (uuidUnsigned ? 1231 : 1237); return result; } diff --git a/h2/src/main/org/h2/value/CompareModeDefault.java b/h2/src/main/org/h2/value/CompareModeDefault.java index cf0f55ccb3..fe4ac13396 100644 --- a/h2/src/main/org/h2/value/CompareModeDefault.java +++ b/h2/src/main/org/h2/value/CompareModeDefault.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; @@ -20,11 +20,13 @@ public class CompareModeDefault extends CompareMode { private final Collator collator; private final SmallLRUCache collationKeys; - protected CompareModeDefault(String name, int strength, boolean binaryUnsigned, boolean uuidUnsigned) { - super(name, strength, binaryUnsigned, uuidUnsigned); + private volatile CompareModeDefault caseInsensitive; + + protected CompareModeDefault(String name, int strength) { + super(name, strength); collator = CompareMode.getCollator(name); if (collator == null) { - throw DbException.throwInternalError(name); + throw DbException.getInternalError(name); } collator.setStrength(strength); int cacheSize = SysProperties.COLLATOR_CACHE_SIZE; @@ -37,10 +39,12 @@ protected CompareModeDefault(String name, int strength, boolean binaryUnsigned, @Override public int compareString(String a, String b, boolean ignoreCase) { - if (ignoreCase) { - // this is locale sensitive - a = a.toUpperCase(); - b = b.toUpperCase(); + if (ignoreCase && getStrength() > Collator.SECONDARY) { + CompareModeDefault i = caseInsensitive; + if (i == null) { + caseInsensitive = i = new CompareModeDefault(getName(), Collator.SECONDARY); + } + return i.compareString(a, b, false); } int comp; if (collationKeys != null) { diff --git a/h2/src/main/org/h2/value/CompareModeIcu4J.java b/h2/src/main/org/h2/value/CompareModeIcu4J.java index 31d2e646a6..19312f8f5c 100644 --- a/h2/src/main/org/h2/value/CompareModeIcu4J.java +++ b/h2/src/main/org/h2/value/CompareModeIcu4J.java @@ -1,11 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; import java.lang.reflect.Method; +import java.text.Collator; import java.util.Comparator; import java.util.Locale; @@ -20,16 +21,21 @@ public class CompareModeIcu4J extends CompareMode { private final Comparator collator; - protected CompareModeIcu4J(String name, int strength, boolean binaryUnsigned, boolean uuidUnsigned) { - super(name, strength, binaryUnsigned, uuidUnsigned); + private volatile CompareModeIcu4J caseInsensitive; + + protected CompareModeIcu4J(String name, int strength) { + super(name, strength); collator = getIcu4jCollator(name, strength); } @Override public int compareString(String a, String b, boolean ignoreCase) { - if (ignoreCase) { - a = a.toUpperCase(); - b = b.toUpperCase(); + if (ignoreCase && getStrength() > Collator.SECONDARY) { + CompareModeIcu4J i = caseInsensitive; + if (i == null) { + caseInsensitive = i = new CompareModeIcu4J(getName(), Collator.SECONDARY); + } + return i.compareString(a, b, false); } return collator.compare(a, b); } diff --git a/h2/src/main/org/h2/value/DataType.java b/h2/src/main/org/h2/value/DataType.java index 3e3d5023d6..29ec4fcb10 100644 --- a/h2/src/main/org/h2/value/DataType.java +++ b/h2/src/main/org/h2/value/DataType.java @@ -1,46 +1,25 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; -import java.io.BufferedReader; -import java.io.InputStream; -import java.io.Reader; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.charset.StandardCharsets; -import java.sql.Array; -import java.sql.Blob; -import java.sql.Clob; -import java.sql.Date; -import java.sql.ResultSet; +import java.sql.JDBCType; import java.sql.ResultSetMetaData; import java.sql.SQLException; -import java.sql.Time; -import java.sql.Timestamp; +import java.sql.SQLType; import java.sql.Types; -import java.util.ArrayList; import java.util.HashMap; -import java.util.UUID; +import java.util.Map; import org.h2.api.ErrorCode; -import org.h2.api.Interval; +import org.h2.api.H2Type; import org.h2.api.IntervalQualifier; -import org.h2.api.TimestampWithTimeZone; +import org.h2.engine.Constants; import org.h2.engine.Mode; -import org.h2.engine.SessionInterface; -import org.h2.engine.SysProperties; -import org.h2.jdbc.JdbcArray; -import org.h2.jdbc.JdbcBlob; -import org.h2.jdbc.JdbcClob; -import org.h2.jdbc.JdbcConnection; -import org.h2.jdbc.JdbcLob; import org.h2.message.DbException; -import org.h2.util.JdbcUtils; -import org.h2.util.LocalDateTimeUtils; -import org.h2.util.Utils; +import org.h2.util.StringUtils; /** * This class contains meta data information about data types, @@ -49,27 +28,10 @@ public class DataType { /** - * This constant is used to represent the type of a ResultSet. There is no - * equivalent java.sql.Types value, but Oracle uses it to represent a - * ResultSet (OracleTypes.CURSOR = -10). + * The map of types. */ - public static final int TYPE_RESULT_SET = -10; - - /** - * The Geometry class. This object is null if the jts jar file is not in the - * classpath. - */ - public static final Class GEOMETRY_CLASS; - - private static final String GEOMETRY_CLASS_NAME = - "org.locationtech.jts.geom.Geometry"; - - /** - * The list of types. An ArrayList so that Tomcat doesn't set it to null - * when clearing references. - */ - private static final ArrayList TYPES = new ArrayList<>(96); private static final HashMap TYPES_BY_NAME = new HashMap<>(128); + /** * Mapping from Value type numbers to DataType. */ @@ -80,21 +42,15 @@ public class DataType { */ public int type; - /** - * The data type name. - */ - public String name; - /** * The SQL type. */ public int sqlType; /** - * How closely the data type maps to the corresponding JDBC SQL type (low is - * best). + * The minimum supported precision. */ - public int sqlTypePos; + public long minPrecision; /** * The maximum supported precision. @@ -111,11 +67,6 @@ public class DataType { */ public int maxScale; - /** - * If this is a numeric type. - */ - public boolean decimal; - /** * The prefix required for the SQL literal representation. */ @@ -132,12 +83,7 @@ public class DataType { public String params; /** - * If this is an autoincrement type. - */ - public boolean autoIncrement; - - /** - * If this data type is an autoincrement type. + * If this data type is case sensitive. */ public boolean caseSensitive; @@ -162,209 +108,113 @@ public class DataType { public int defaultScale; /** - * If this data type should not be listed in the database meta data. + * If precision and scale have non-standard default values. */ - public boolean hidden; + public boolean specialPrecisionScale; static { - Class g; - try { - g = JdbcUtils.loadUserClass(GEOMETRY_CLASS_NAME); - } catch (Exception e) { - // class is not in the classpath - ignore - g = null; - } - GEOMETRY_CLASS = g; - DataType dataType = new DataType(); - dataType.defaultPrecision = dataType.maxPrecision = ValueNull.PRECISION; - add(Value.NULL, Types.NULL, - dataType, - new String[]{"NULL"} - ); - add(Value.STRING, Types.VARCHAR, - createString(true), - new String[]{"VARCHAR", "CHARACTER VARYING", "VARCHAR2", "NVARCHAR", "NVARCHAR2", - "VARCHAR_CASESENSITIVE", "TID"} - ); - add(Value.STRING, Types.LONGVARCHAR, - createString(true), - new String[]{"LONGVARCHAR", "LONGNVARCHAR"} - ); - add(Value.STRING_FIXED, Types.CHAR, - createString(true), - new String[]{"CHAR", "CHARACTER", "NCHAR"} - ); - add(Value.STRING_IGNORECASE, Types.VARCHAR, - createString(false), - new String[]{"VARCHAR_IGNORECASE"} - ); - add(Value.BOOLEAN, Types.BOOLEAN, - createNumeric(ValueBoolean.PRECISION, 0, false), - new String[]{"BOOLEAN", "BIT", "BOOL"} - ); - add(Value.BYTE, Types.TINYINT, - createNumeric(ValueByte.PRECISION, 0, false), - new String[]{"TINYINT"} - ); - add(Value.SHORT, Types.SMALLINT, - createNumeric(ValueShort.PRECISION, 0, false), - new String[]{"SMALLINT", "YEAR", "INT2"} - ); - add(Value.INT, Types.INTEGER, - createNumeric(ValueInt.PRECISION, 0, false), - new String[]{"INTEGER", "INT", "MEDIUMINT", "INT4", "SIGNED"} - ); - add(Value.INT, Types.INTEGER, - createNumeric(ValueInt.PRECISION, 0, true), - new String[]{"SERIAL"} - ); - add(Value.LONG, Types.BIGINT, - createNumeric(ValueLong.PRECISION, 0, false), - new String[]{"BIGINT", "INT8", "LONG"} - ); - add(Value.LONG, Types.BIGINT, - createNumeric(ValueLong.PRECISION, 0, true), - new String[]{"IDENTITY", "BIGSERIAL"} - ); - if (SysProperties.BIG_DECIMAL_IS_DECIMAL) { - addDecimal(); - addNumeric(); - } else { - addNumeric(); - addDecimal(); - } - add(Value.FLOAT, Types.REAL, - createNumeric(ValueFloat.PRECISION, 0, false), - new String[] {"REAL", "FLOAT4"} - ); - add(Value.DOUBLE, Types.DOUBLE, - createNumeric(ValueDouble.PRECISION, 0, false), - new String[] { "DOUBLE", "DOUBLE PRECISION" } - ); - add(Value.DOUBLE, Types.FLOAT, - createNumeric(ValueDouble.PRECISION, 0, false), - new String[] {"FLOAT", "FLOAT8" } - ); + dataType.defaultPrecision = dataType.maxPrecision = dataType.minPrecision = ValueNull.PRECISION; + add(Value.NULL, Types.NULL, dataType, "NULL"); + add(Value.CHAR, Types.CHAR, createString(true, true), + "CHARACTER", "CHAR", "NCHAR", "NATIONAL CHARACTER", "NATIONAL CHAR"); + add(Value.VARCHAR, Types.VARCHAR, createString(true, false), + "CHARACTER VARYING", "VARCHAR", "CHAR VARYING", + "NCHAR VARYING", "NATIONAL CHARACTER VARYING", "NATIONAL CHAR VARYING", + "VARCHAR2", "NVARCHAR", "NVARCHAR2", + "VARCHAR_CASESENSITIVE", "TID", + "LONGVARCHAR", "LONGNVARCHAR"); + add(Value.CLOB, Types.CLOB, createLob(true), + "CHARACTER LARGE OBJECT", "CLOB", "CHAR LARGE OBJECT", "TINYTEXT", "TEXT", "MEDIUMTEXT", + "LONGTEXT", "NTEXT", "NCLOB", "NCHAR LARGE OBJECT", "NATIONAL CHARACTER LARGE OBJECT"); + add(Value.VARCHAR_IGNORECASE, Types.VARCHAR, createString(false, false), "VARCHAR_IGNORECASE"); + add(Value.BINARY, Types.BINARY, createBinary(true), "BINARY"); + add(Value.VARBINARY, Types.VARBINARY, createBinary(false), + "BINARY VARYING", "VARBINARY", "RAW", "BYTEA", "LONG RAW", "LONGVARBINARY"); + add(Value.BLOB, Types.BLOB, createLob(false), + "BINARY LARGE OBJECT", "BLOB", "TINYBLOB", "MEDIUMBLOB", "LONGBLOB", "IMAGE"); + add(Value.BOOLEAN, Types.BOOLEAN, createNumeric(ValueBoolean.PRECISION, 0), "BOOLEAN", "BIT", "BOOL"); + add(Value.TINYINT, Types.TINYINT, createNumeric(ValueTinyint.PRECISION, 0), "TINYINT"); + add(Value.SMALLINT, Types.SMALLINT, createNumeric(ValueSmallint.PRECISION, 0), "SMALLINT", "INT2"); + add(Value.INTEGER, Types.INTEGER, createNumeric(ValueInteger.PRECISION, 0), + "INTEGER", "INT", "MEDIUMINT", "INT4", "SIGNED" + ); + add(Value.BIGINT, Types.BIGINT, createNumeric(ValueBigint.PRECISION, 0), + "BIGINT", "INT8", "LONG"); + dataType = new DataType(); + dataType.minPrecision = 1; + dataType.defaultPrecision = dataType.maxPrecision = Constants.MAX_NUMERIC_PRECISION; + dataType.defaultScale = ValueNumeric.DEFAULT_SCALE; + dataType.maxScale = ValueNumeric.MAXIMUM_SCALE; + dataType.minScale = 0; + dataType.params = "PRECISION,SCALE"; + dataType.supportsPrecision = true; + dataType.supportsScale = true; + add(Value.NUMERIC, Types.NUMERIC, dataType, "NUMERIC", "DECIMAL", "DEC"); + add(Value.REAL, Types.REAL, createNumeric(ValueReal.PRECISION, 0), "REAL", "FLOAT4"); + add(Value.DOUBLE, Types.DOUBLE, createNumeric(ValueDouble.PRECISION, 0), + "DOUBLE PRECISION", "DOUBLE", "FLOAT8"); + add(Value.DOUBLE, Types.FLOAT, createNumeric(ValueDouble.PRECISION, 0), "FLOAT"); + dataType = new DataType(); + dataType.minPrecision = 1; + dataType.defaultPrecision = dataType.maxPrecision = Constants.MAX_NUMERIC_PRECISION; + dataType.params = "PRECISION"; + dataType.supportsPrecision = true; + add(Value.DECFLOAT, Types.NUMERIC, dataType, "DECFLOAT"); + add(Value.DATE, Types.DATE, createDate(ValueDate.PRECISION, ValueDate.PRECISION, "DATE", false, 0, 0), "DATE"); add(Value.TIME, Types.TIME, createDate(ValueTime.MAXIMUM_PRECISION, ValueTime.DEFAULT_PRECISION, "TIME", true, ValueTime.DEFAULT_SCALE, ValueTime.MAXIMUM_SCALE), - new String[]{"TIME", "TIME WITHOUT TIME ZONE"} - ); - add(Value.DATE, Types.DATE, - createDate(ValueDate.PRECISION, ValueDate.PRECISION, - "DATE", false, 0, 0), - new String[]{"DATE"} - ); + "TIME", "TIME WITHOUT TIME ZONE"); + add(Value.TIME_TZ, Types.TIME_WITH_TIMEZONE, + createDate(ValueTimeTimeZone.MAXIMUM_PRECISION, ValueTimeTimeZone.DEFAULT_PRECISION, + "TIME WITH TIME ZONE", true, ValueTime.DEFAULT_SCALE, ValueTime.MAXIMUM_SCALE), + "TIME WITH TIME ZONE"); add(Value.TIMESTAMP, Types.TIMESTAMP, createDate(ValueTimestamp.MAXIMUM_PRECISION, ValueTimestamp.DEFAULT_PRECISION, "TIMESTAMP", true, ValueTimestamp.DEFAULT_SCALE, ValueTimestamp.MAXIMUM_SCALE), - new String[]{"TIMESTAMP", "TIMESTAMP WITHOUT TIME ZONE", - "DATETIME", "DATETIME2", "SMALLDATETIME"} - ); - // 2014 is the value of Types.TIMESTAMP_WITH_TIMEZONE - // use the value instead of the reference because the code has to - // compile (on Java 1.7). Can be replaced with - // Types.TIMESTAMP_WITH_TIMEZONE once Java 1.8 is required. - add(Value.TIMESTAMP_TZ, 2014, + "TIMESTAMP", "TIMESTAMP WITHOUT TIME ZONE", "DATETIME", "DATETIME2", "SMALLDATETIME"); + add(Value.TIMESTAMP_TZ, Types.TIMESTAMP_WITH_TIMEZONE, createDate(ValueTimestampTimeZone.MAXIMUM_PRECISION, ValueTimestampTimeZone.DEFAULT_PRECISION, - "TIMESTAMP_TZ", true, ValueTimestampTimeZone.DEFAULT_SCALE, - ValueTimestampTimeZone.MAXIMUM_SCALE), - new String[]{"TIMESTAMP WITH TIME ZONE"} - ); - add(Value.BYTES, Types.VARBINARY, - createString(false), - new String[]{"VARBINARY", "BINARY VARYING"} - ); - add(Value.BYTES, Types.BINARY, - createString(false), - new String[]{"BINARY", "RAW", "BYTEA", "LONG RAW"} - ); - add(Value.BYTES, Types.LONGVARBINARY, - createString(false), - new String[]{"LONGVARBINARY"} - ); + "TIMESTAMP WITH TIME ZONE", true, ValueTimestamp.DEFAULT_SCALE, ValueTimestamp.MAXIMUM_SCALE), + "TIMESTAMP WITH TIME ZONE"); + for (int i = Value.INTERVAL_YEAR; i <= Value.INTERVAL_MINUTE_TO_SECOND; i++) { + addInterval(i); + } + add(Value.JAVA_OBJECT, Types.JAVA_OBJECT, createBinary(false), "JAVA_OBJECT", "OBJECT", "OTHER"); + dataType = createString(false, false); + dataType.supportsPrecision = false; + dataType.params = "ELEMENT [,...]"; + add(Value.ENUM, Types.OTHER, dataType, "ENUM"); + add(Value.GEOMETRY, Types.OTHER, createGeometry(), "GEOMETRY"); + add(Value.JSON, Types.OTHER, createString(true, false, "JSON '", "'"), "JSON"); dataType = new DataType(); dataType.prefix = dataType.suffix = "'"; - dataType.defaultPrecision = dataType.maxPrecision = ValueUuid.PRECISION; - add(Value.UUID, Types.BINARY, - createString(false), - // UNIQUEIDENTIFIER is the MSSQL mode equivalent - new String[]{"UUID", "UNIQUEIDENTIFIER"} - ); - add(Value.JAVA_OBJECT, Types.OTHER, - createString(false), - new String[]{"OTHER", "OBJECT", "JAVA_OBJECT"} - ); - add(Value.BLOB, Types.BLOB, - createLob(), - new String[]{"BLOB", "BINARY LARGE OBJECT", "TINYBLOB", "MEDIUMBLOB", - "LONGBLOB", "IMAGE", "OID"} - ); - add(Value.CLOB, Types.CLOB, - createLob(), - new String[]{"CLOB", "CHARACTER LARGE OBJECT", "TINYTEXT", "TEXT", "MEDIUMTEXT", - "LONGTEXT", "NTEXT", "NCLOB"} - ); - add(Value.GEOMETRY, Types.OTHER, - createGeometry(), - new String[]{"GEOMETRY"} - ); + dataType.defaultPrecision = dataType.maxPrecision = dataType.minPrecision = ValueUuid.PRECISION; + add(Value.UUID, Types.BINARY, dataType, "UUID"); dataType = new DataType(); dataType.prefix = "ARRAY["; dataType.suffix = "]"; - add(Value.ARRAY, Types.ARRAY, - dataType, - new String[]{"ARRAY"} - ); - dataType = new DataType(); - dataType.maxPrecision = dataType.defaultPrecision = Integer.MAX_VALUE; - add(Value.RESULT_SET, DataType.TYPE_RESULT_SET, - dataType, - new String[]{"RESULT_SET"} - ); - dataType = createString(false); - dataType.supportsPrecision = false; - dataType.supportsScale = false; - add(Value.ENUM, Types.OTHER, - dataType, - new String[]{"ENUM"} - ); - for (int i = Value.INTERVAL_YEAR; i <= Value.INTERVAL_MINUTE_TO_SECOND; i++) { - addInterval(i); - } - // Row value doesn't have a type name + dataType.params = "CARDINALITY"; + dataType.supportsPrecision = true; + dataType.defaultPrecision = dataType.maxPrecision = Constants.MAX_ARRAY_CARDINALITY; + add(Value.ARRAY, Types.ARRAY, dataType, "ARRAY"); dataType = new DataType(); - dataType.type = Value.ROW; - dataType.name = "ROW"; - dataType.sqlType = Types.OTHER; dataType.prefix = "ROW("; dataType.suffix = ")"; - TYPES_BY_VALUE_TYPE[Value.ROW] = dataType; - } - - private static void addDecimal() { - add(Value.DECIMAL, Types.DECIMAL, - createNumeric(Integer.MAX_VALUE, ValueDecimal.DEFAULT_PRECISION, ValueDecimal.DEFAULT_SCALE), - new String[]{"DECIMAL", "DEC"} - ); - } - - private static void addNumeric() { - add(Value.DECIMAL, Types.NUMERIC, - createNumeric(Integer.MAX_VALUE, ValueDecimal.DEFAULT_PRECISION, ValueDecimal.DEFAULT_SCALE), - new String[]{"NUMERIC", "NUMBER"} - ); + dataType.params = "NAME DATA_TYPE [,...]"; + add(Value.ROW, Types.OTHER, dataType, "ROW"); } private static void addInterval(int type) { IntervalQualifier qualifier = IntervalQualifier.valueOf(type - Value.INTERVAL_YEAR); String name = qualifier.toString(); DataType dataType = new DataType(); - dataType.prefix = "INTERVAL "; - dataType.suffix = ' ' + name; + dataType.prefix = "INTERVAL '"; + dataType.suffix = "' " + name; dataType.supportsPrecision = true; dataType.defaultPrecision = ValueInterval.DEFAULT_PRECISION; + dataType.minPrecision = 1; dataType.maxPrecision = ValueInterval.MAXIMUM_PRECISION; if (qualifier.hasSeconds()) { dataType.supportsScale = true; @@ -374,80 +224,31 @@ private static void addInterval(int type) { } else { dataType.params = "PRECISION"; } - add(type, Types.OTHER, dataType, - new String[]{("INTERVAL " + name).intern()} - ); + add(type, Types.OTHER, dataType, ("INTERVAL " + name).intern()); } - private static void add(int type, int sqlType, - DataType dataType, String[] names) { - for (int i = 0; i < names.length; i++) { - DataType dt = new DataType(); - dt.type = type; - dt.sqlType = sqlType; - dt.name = names[i]; - dt.autoIncrement = dataType.autoIncrement; - dt.decimal = dataType.decimal; - dt.maxPrecision = dataType.maxPrecision; - dt.maxScale = dataType.maxScale; - dt.minScale = dataType.minScale; - dt.params = dataType.params; - dt.prefix = dataType.prefix; - dt.suffix = dataType.suffix; - dt.supportsPrecision = dataType.supportsPrecision; - dt.supportsScale = dataType.supportsScale; - dt.defaultPrecision = dataType.defaultPrecision; - dt.defaultScale = dataType.defaultScale; - dt.caseSensitive = dataType.caseSensitive; - dt.hidden = i > 0; - for (DataType t2 : TYPES) { - if (t2.sqlType == dt.sqlType) { - dt.sqlTypePos++; - } - } - TYPES_BY_NAME.put(dt.name, dt); - if (TYPES_BY_VALUE_TYPE[type] == null) { - TYPES_BY_VALUE_TYPE[type] = dt; - } - TYPES.add(dt); + private static void add(int type, int sqlType, DataType dataType, String... names) { + dataType.type = type; + dataType.sqlType = sqlType; + if (TYPES_BY_VALUE_TYPE[type] == null) { + TYPES_BY_VALUE_TYPE[type] = dataType; + } + for (String name : names) { + TYPES_BY_NAME.put(name, dataType); } } /** - * Create a width numeric data type without parameters. + * Create a numeric data type without parameters. * * @param precision precision * @param scale scale - * @param autoInc whether the data type is an auto-increment type * @return data type */ - public static DataType createNumeric(int precision, int scale, boolean autoInc) { + public static DataType createNumeric(int precision, int scale) { DataType dataType = new DataType(); - dataType.defaultPrecision = dataType.maxPrecision = precision; + dataType.defaultPrecision = dataType.maxPrecision = dataType.minPrecision = precision; dataType.defaultScale = dataType.maxScale = dataType.minScale = scale; - dataType.decimal = true; - dataType.autoIncrement = autoInc; - return dataType; - } - - /** - * Create a numeric data type. - * - * @param maxPrecision maximum supported precision - * @param defaultPrecision default precision - * @param defaultScale default scale - * @return data type - */ - public static DataType createNumeric(int maxPrecision, int defaultPrecision, int defaultScale) { - DataType dataType = new DataType(); - dataType.maxPrecision = maxPrecision; - dataType.defaultPrecision = defaultPrecision; - dataType.defaultScale = defaultScale; - dataType.params = "PRECISION,SCALE"; - dataType.supportsPrecision = true; - dataType.supportsScale = true; - dataType.maxScale = maxPrecision; - dataType.decimal = true; return dataType; } @@ -468,7 +269,7 @@ public static DataType createDate(int maxPrecision, int precision, String prefix dataType.prefix = prefix + " '"; dataType.suffix = "'"; dataType.maxPrecision = maxPrecision; - dataType.defaultPrecision = precision; + dataType.defaultPrecision = dataType.minPrecision = precision; if (supportsScale) { dataType.params = "SCALE"; dataType.supportsScale = true; @@ -478,20 +279,29 @@ public static DataType createDate(int maxPrecision, int precision, String prefix return dataType; } - private static DataType createString(boolean caseSensitive) { + private static DataType createString(boolean caseSensitive, boolean fixedLength) { + return createString(caseSensitive, fixedLength, "'", "'"); + } + + private static DataType createBinary(boolean fixedLength) { + return createString(false, fixedLength, "X'", "'"); + } + + private static DataType createString(boolean caseSensitive, boolean fixedLength, String prefix, String suffix) { DataType dataType = new DataType(); - dataType.prefix = "'"; - dataType.suffix = "'"; + dataType.prefix = prefix; + dataType.suffix = suffix; dataType.params = "LENGTH"; dataType.caseSensitive = caseSensitive; dataType.supportsPrecision = true; - dataType.maxPrecision = Integer.MAX_VALUE; - dataType.defaultPrecision = Integer.MAX_VALUE; + dataType.minPrecision = 1; + dataType.maxPrecision = Constants.MAX_STRING_LENGTH; + dataType.defaultPrecision = fixedLength ? 1 : Constants.MAX_STRING_LENGTH; return dataType; } - private static DataType createLob() { - DataType t = createString(true); + private static DataType createLob(boolean clob) { + DataType t = clob ? createString(true, false) : createBinary(false); t.maxPrecision = Long.MAX_VALUE; t.defaultPrecision = Long.MAX_VALUE; return t; @@ -502,398 +312,11 @@ private static DataType createGeometry() { dataType.prefix = "'"; dataType.suffix = "'"; dataType.params = "TYPE,SRID"; - dataType.maxPrecision = Integer.MAX_VALUE; - dataType.defaultPrecision = Integer.MAX_VALUE; + dataType.maxPrecision = Long.MAX_VALUE; + dataType.defaultPrecision = Long.MAX_VALUE; return dataType; } - /** - * Get the list of data types. - * - * @return the list - */ - public static ArrayList getTypes() { - return TYPES; - } - - /** - * Read a value from the given result set. - * - * @param session the session - * @param rs the result set - * @param columnIndex the column index (1 based) - * @param type the data type - * @return the value - */ - public static Value readValue(SessionInterface session, ResultSet rs, - int columnIndex, int type) { - try { - Value v; - switch (type) { - case Value.NULL: { - return ValueNull.INSTANCE; - } - case Value.BYTES: { - /* - * Both BINARY and UUID may be mapped to Value.BYTES. getObject() returns byte[] - * for SQL BINARY, UUID for SQL UUID and null for SQL NULL. - */ - Object o = rs.getObject(columnIndex); - if (o instanceof byte[]) { - v = ValueBytes.getNoCopy((byte[]) o); - } else if (o != null) { - v = ValueUuid.get((UUID) o); - } else { - v = ValueNull.INSTANCE; - } - break; - } - case Value.UUID: { - Object o = rs.getObject(columnIndex); - if (o instanceof UUID) { - v = ValueUuid.get((UUID) o); - } else if (o != null) { - v = ValueUuid.get((byte[]) o); - } else { - v = ValueNull.INSTANCE; - } - break; - } - case Value.BOOLEAN: { - boolean value = rs.getBoolean(columnIndex); - v = rs.wasNull() ? (Value) ValueNull.INSTANCE : - ValueBoolean.get(value); - break; - } - case Value.BYTE: { - byte value = rs.getByte(columnIndex); - v = rs.wasNull() ? (Value) ValueNull.INSTANCE : - ValueByte.get(value); - break; - } - case Value.DATE: { - Date value = rs.getDate(columnIndex); - v = value == null ? (Value) ValueNull.INSTANCE : - ValueDate.get(value); - break; - } - case Value.TIME: { - Time value = rs.getTime(columnIndex); - v = value == null ? (Value) ValueNull.INSTANCE : - ValueTime.get(value); - break; - } - case Value.TIMESTAMP: { - Timestamp value = rs.getTimestamp(columnIndex); - v = value == null ? (Value) ValueNull.INSTANCE : - ValueTimestamp.get(value); - break; - } - case Value.TIMESTAMP_TZ: { - Object obj = rs.getObject(columnIndex); - if (obj == null) { - v = ValueNull.INSTANCE; - } else if (LocalDateTimeUtils.isJava8DateApiPresent() - && LocalDateTimeUtils.OFFSET_DATE_TIME.isInstance(obj)) { - v = LocalDateTimeUtils.offsetDateTimeToValue(obj); - } else { - TimestampWithTimeZone value = (TimestampWithTimeZone) obj; - v = ValueTimestampTimeZone.get(value); - } - break; - } - case Value.DECIMAL: { - BigDecimal value = rs.getBigDecimal(columnIndex); - v = value == null ? (Value) ValueNull.INSTANCE : - ValueDecimal.get(value); - break; - } - case Value.DOUBLE: { - double value = rs.getDouble(columnIndex); - v = rs.wasNull() ? (Value) ValueNull.INSTANCE : - ValueDouble.get(value); - break; - } - case Value.FLOAT: { - float value = rs.getFloat(columnIndex); - v = rs.wasNull() ? (Value) ValueNull.INSTANCE : - ValueFloat.get(value); - break; - } - case Value.INT: { - int value = rs.getInt(columnIndex); - v = rs.wasNull() ? (Value) ValueNull.INSTANCE : - ValueInt.get(value); - break; - } - case Value.LONG: { - long value = rs.getLong(columnIndex); - v = rs.wasNull() ? (Value) ValueNull.INSTANCE : - ValueLong.get(value); - break; - } - case Value.SHORT: { - short value = rs.getShort(columnIndex); - v = rs.wasNull() ? (Value) ValueNull.INSTANCE : - ValueShort.get(value); - break; - } - case Value.STRING_IGNORECASE: { - String s = rs.getString(columnIndex); - v = (s == null) ? (Value) ValueNull.INSTANCE : - ValueStringIgnoreCase.get(s); - break; - } - case Value.STRING_FIXED: { - String s = rs.getString(columnIndex); - v = (s == null) ? (Value) ValueNull.INSTANCE : - ValueStringFixed.get(s); - break; - } - case Value.STRING: { - String s = rs.getString(columnIndex); - v = (s == null) ? (Value) ValueNull.INSTANCE : - ValueString.get(s); - break; - } - case Value.CLOB: { - if (session == null) { - String s = rs.getString(columnIndex); - v = s == null ? ValueNull.INSTANCE : - ValueLobDb.createSmallLob(Value.CLOB, s.getBytes(StandardCharsets.UTF_8)); - } else { - Reader in = rs.getCharacterStream(columnIndex); - if (in == null) { - v = ValueNull.INSTANCE; - } else { - v = session.getDataHandler().getLobStorage(). - createClob(new BufferedReader(in), -1); - } - } - if (session != null) { - session.addTemporaryLob(v); - } - break; - } - case Value.BLOB: { - if (session == null) { - byte[] buff = rs.getBytes(columnIndex); - return buff == null ? ValueNull.INSTANCE : - ValueLobDb.createSmallLob(Value.BLOB, buff); - } - InputStream in = rs.getBinaryStream(columnIndex); - v = (in == null) ? (Value) ValueNull.INSTANCE : - session.getDataHandler().getLobStorage().createBlob(in, -1); - session.addTemporaryLob(v); - break; - } - case Value.JAVA_OBJECT: { - if (SysProperties.serializeJavaObject) { - byte[] buff = rs.getBytes(columnIndex); - v = buff == null ? ValueNull.INSTANCE : - ValueJavaObject.getNoCopy(null, buff, session.getDataHandler()); - } else { - Object o = rs.getObject(columnIndex); - v = o == null ? ValueNull.INSTANCE : - ValueJavaObject.getNoCopy(o, null, session.getDataHandler()); - } - break; - } - case Value.ARRAY: { - Array array = rs.getArray(columnIndex); - if (array == null) { - return ValueNull.INSTANCE; - } - Object[] list = (Object[]) array.getArray(); - if (list == null) { - return ValueNull.INSTANCE; - } - int len = list.length; - Value[] values = new Value[len]; - for (int i = 0; i < len; i++) { - values[i] = DataType.convertToValue(session, list[i], Value.NULL); - } - v = ValueArray.get(values); - break; - } - case Value.ENUM: { - int value = rs.getInt(columnIndex); - v = rs.wasNull() ? (Value) ValueNull.INSTANCE : - ValueInt.get(value); - break; - } - case Value.ROW: { - Object[] list = (Object[]) rs.getObject(columnIndex); - if (list == null) { - return ValueNull.INSTANCE; - } - int len = list.length; - Value[] values = new Value[len]; - for (int i = 0; i < len; i++) { - values[i] = DataType.convertToValue(session, list[i], Value.NULL); - } - v = ValueRow.get(values); - break; - } - case Value.RESULT_SET: { - ResultSet x = (ResultSet) rs.getObject(columnIndex); - if (x == null) { - return ValueNull.INSTANCE; - } - return ValueResultSet.get(session, x, Integer.MAX_VALUE); - } - case Value.GEOMETRY: { - Object x = rs.getObject(columnIndex); - if (x == null) { - return ValueNull.INSTANCE; - } - return ValueGeometry.getFromGeometry(x); - } - case Value.INTERVAL_YEAR: - case Value.INTERVAL_MONTH: - case Value.INTERVAL_DAY: - case Value.INTERVAL_HOUR: - case Value.INTERVAL_MINUTE: - case Value.INTERVAL_SECOND: - case Value.INTERVAL_YEAR_TO_MONTH: - case Value.INTERVAL_DAY_TO_HOUR: - case Value.INTERVAL_DAY_TO_MINUTE: - case Value.INTERVAL_DAY_TO_SECOND: - case Value.INTERVAL_HOUR_TO_MINUTE: - case Value.INTERVAL_HOUR_TO_SECOND: - case Value.INTERVAL_MINUTE_TO_SECOND: { - Object x = rs.getObject(columnIndex); - if (x == null) { - return ValueNull.INSTANCE; - } - Interval interval = (Interval) x; - return ValueInterval.from(interval.getQualifier(), interval.isNegative(), - interval.getLeading(), interval.getRemaining()); - } - default: - if (JdbcUtils.customDataTypesHandler != null) { - return JdbcUtils.customDataTypesHandler.getValue(type, - rs.getObject(columnIndex), - session.getDataHandler()); - } - throw DbException.throwInternalError("type="+type); - } - return v; - } catch (SQLException e) { - throw DbException.convert(e); - } - } - - /** - * Get the name of the Java class for the given value type. - * - * @param type the value type - * @param forResultSet return mapping for result set - * @return the class name - */ - public static String getTypeClassName(int type, boolean forResultSet) { - switch (type) { - case Value.BOOLEAN: - // "java.lang.Boolean"; - return Boolean.class.getName(); - case Value.BYTE: - if (forResultSet && !SysProperties.OLD_RESULT_SET_GET_OBJECT) { - // "java.lang.Integer"; - return Integer.class.getName(); - } - // "java.lang.Byte"; - return Byte.class.getName(); - case Value.SHORT: - if (forResultSet && !SysProperties.OLD_RESULT_SET_GET_OBJECT) { - // "java.lang.Integer"; - return Integer.class.getName(); - } - // "java.lang.Short"; - return Short.class.getName(); - case Value.INT: - // "java.lang.Integer"; - return Integer.class.getName(); - case Value.LONG: - // "java.lang.Long"; - return Long.class.getName(); - case Value.DECIMAL: - // "java.math.BigDecimal"; - return BigDecimal.class.getName(); - case Value.TIME: - // "java.sql.Time"; - return Time.class.getName(); - case Value.DATE: - // "java.sql.Date"; - return Date.class.getName(); - case Value.TIMESTAMP: - // "java.sql.Timestamp"; - return Timestamp.class.getName(); - case Value.TIMESTAMP_TZ: - if (SysProperties.RETURN_OFFSET_DATE_TIME && LocalDateTimeUtils.isJava8DateApiPresent()) { - // "java.time.OffsetDateTime"; - return LocalDateTimeUtils.OFFSET_DATE_TIME.getName(); - } - // "org.h2.api.TimestampWithTimeZone"; - return TimestampWithTimeZone.class.getName(); - case Value.BYTES: - case Value.UUID: - // "[B", not "byte[]"; - return byte[].class.getName(); - case Value.STRING: - case Value.STRING_IGNORECASE: - case Value.STRING_FIXED: - case Value.ENUM: - // "java.lang.String"; - return String.class.getName(); - case Value.BLOB: - // "java.sql.Blob"; - return java.sql.Blob.class.getName(); - case Value.CLOB: - // "java.sql.Clob"; - return java.sql.Clob.class.getName(); - case Value.DOUBLE: - // "java.lang.Double"; - return Double.class.getName(); - case Value.FLOAT: - // "java.lang.Float"; - return Float.class.getName(); - case Value.NULL: - return null; - case Value.JAVA_OBJECT: - // "java.lang.Object"; - return Object.class.getName(); - case Value.UNKNOWN: - // anything - return Object.class.getName(); - case Value.ARRAY: - return Array.class.getName(); - case Value.RESULT_SET: - return ResultSet.class.getName(); - case Value.GEOMETRY: - return GEOMETRY_CLASS != null ? GEOMETRY_CLASS_NAME : String.class.getName(); - case Value.INTERVAL_YEAR: - case Value.INTERVAL_MONTH: - case Value.INTERVAL_DAY: - case Value.INTERVAL_HOUR: - case Value.INTERVAL_MINUTE: - case Value.INTERVAL_SECOND: - case Value.INTERVAL_YEAR_TO_MONTH: - case Value.INTERVAL_DAY_TO_HOUR: - case Value.INTERVAL_DAY_TO_MINUTE: - case Value.INTERVAL_DAY_TO_SECOND: - case Value.INTERVAL_HOUR_TO_MINUTE: - case Value.INTERVAL_HOUR_TO_SECOND: - case Value.INTERVAL_MINUTE_TO_SECOND: - // "org.h2.api.Interval" - return Interval.class.getName(); - default: - if (JdbcUtils.customDataTypesHandler != null) { - return JdbcUtils.customDataTypesHandler.getDataTypeClassName(type); - } - throw DbException.throwInternalError("type="+type); - } - } - /** * Get the data type object for the given value type. * @@ -905,16 +328,7 @@ public static DataType getDataType(int type) { throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, "?"); } if (type >= Value.NULL && type < Value.TYPE_COUNT) { - DataType dt = TYPES_BY_VALUE_TYPE[type]; - if (dt != null) { - return dt; - } - } - if (JdbcUtils.customDataTypesHandler != null) { - DataType dt = JdbcUtils.customDataTypesHandler.getDataTypeById(type); - if (dt != null) { - return dt; - } + return TYPES_BY_VALUE_TYPE[type]; } return TYPES_BY_VALUE_TYPE[Value.NULL]; } @@ -922,11 +336,22 @@ public static DataType getDataType(int type) { /** * Convert a value type to a SQL type. * - * @param type the value type + * @param type the type * @return the SQL type */ - public static int convertTypeToSQLType(int type) { - return getDataType(type).sqlType; + public static int convertTypeToSQLType(TypeInfo type) { + int valueType = type.getValueType(); + switch (valueType) { + case Value.NUMERIC: + return type.getExtTypeInfo() != null ? Types.DECIMAL : Types.NUMERIC; + case Value.REAL: + case Value.DOUBLE: + if (type.getDeclaredPrecision() >= 0) { + return Types.FLOAT; + } + break; + } + return getDataType(valueType).sqlType; } /** @@ -944,11 +369,12 @@ public static int convertSQLTypeToValueType(int sqlType, String sqlTypeName) { return Value.UUID; } break; - case Types.OTHER: - case Types.JAVA_OBJECT: - if (sqlTypeName.equalsIgnoreCase("geometry")) { - return Value.GEOMETRY; + case Types.OTHER: { + DataType type = TYPES_BY_NAME.get(StringUtils.toUpperEnglish(sqlTypeName)); + if (type != null) { + return type.type; } + } } return convertSQLTypeToValueType(sqlType); } @@ -960,6 +386,7 @@ public static int convertSQLTypeToValueType(int sqlType, String sqlTypeName) { * @param meta the meta data * @param columnIndex the column index (1, 2,...) * @return the value type + * @throws SQLException on failure */ public static int getValueTypeFromResultSet(ResultSetMetaData meta, int columnIndex) throws SQLException { @@ -968,6 +395,51 @@ public static int getValueTypeFromResultSet(ResultSetMetaData meta, meta.getColumnTypeName(columnIndex)); } + /** + * Check whether the specified column needs the binary representation. + * + * @param meta + * metadata + * @param column + * column index + * @return {@code true} if column needs the binary representation, + * {@code false} otherwise + * @throws SQLException + * on SQL exception + */ + public static boolean isBinaryColumn(ResultSetMetaData meta, int column) throws SQLException { + switch (meta.getColumnType(column)) { + case Types.BINARY: + if (meta.getColumnTypeName(column).equals("UUID")) { + break; + } + //$FALL-THROUGH$ + case Types.LONGVARBINARY: + case Types.VARBINARY: + case Types.JAVA_OBJECT: + case Types.BLOB: + return true; + } + return false; + } + + /** + * Convert a SQL type to a value type. + * + * @param sqlType the SQL type + * @return the value type + */ + public static int convertSQLTypeToValueType(SQLType sqlType) { + if (sqlType instanceof H2Type) { + return sqlType.getVendorTypeNumber(); + } else if (sqlType instanceof JDBCType) { + return convertSQLTypeToValueType(sqlType.getVendorTypeNumber()); + } else { + throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, sqlType == null ? "" + : unknownSqlTypeToString(new StringBuilder(), sqlType).toString()); + } + } + /** * Convert a SQL type to a value type. * @@ -978,36 +450,38 @@ public static int convertSQLTypeToValueType(int sqlType) { switch (sqlType) { case Types.CHAR: case Types.NCHAR: - return Value.STRING_FIXED; + return Value.CHAR; case Types.VARCHAR: case Types.LONGVARCHAR: case Types.NVARCHAR: case Types.LONGNVARCHAR: - return Value.STRING; + return Value.VARCHAR; case Types.NUMERIC: case Types.DECIMAL: - return Value.DECIMAL; + return Value.NUMERIC; case Types.BIT: case Types.BOOLEAN: return Value.BOOLEAN; case Types.INTEGER: - return Value.INT; + return Value.INTEGER; case Types.SMALLINT: - return Value.SHORT; + return Value.SMALLINT; case Types.TINYINT: - return Value.BYTE; + return Value.TINYINT; case Types.BIGINT: - return Value.LONG; + return Value.BIGINT; case Types.REAL: - return Value.FLOAT; + return Value.REAL; case Types.DOUBLE: case Types.FLOAT: return Value.DOUBLE; case Types.BINARY: + return Value.BINARY; case Types.VARBINARY: case Types.LONGVARBINARY: - return Value.BYTES; + return Value.VARBINARY; case Types.OTHER: + return Value.UNKNOWN; case Types.JAVA_OBJECT: return Value.JAVA_OBJECT; case Types.DATE: @@ -1016,7 +490,9 @@ public static int convertSQLTypeToValueType(int sqlType) { return Value.TIME; case Types.TIMESTAMP: return Value.TIMESTAMP; - case 2014: // Types.TIMESTAMP_WITH_TIMEZONE + case Types.TIME_WITH_TIMEZONE: + return Value.TIME_TZ; + case Types.TIMESTAMP_WITH_TIMEZONE: return Value.TIMESTAMP_TZ; case Types.BLOB: return Value.BLOB; @@ -1027,8 +503,6 @@ public static int convertSQLTypeToValueType(int sqlType) { return Value.NULL; case Types.ARRAY: return Value.ARRAY; - case DataType.TYPE_RESULT_SET: - return Value.RESULT_SET; default: throw DbException.get( ErrorCode.UNKNOWN_DATA_TYPE_1, Integer.toString(sqlType)); @@ -1036,278 +510,120 @@ public static int convertSQLTypeToValueType(int sqlType) { } /** - * Get the value type for the given Java class. + * Convert a SQL type to a debug string. * - * @param x the Java class - * @return the value type + * @param sqlType the SQL type + * @return the textual representation */ - public static int getTypeFromClass(Class x) { - // TODO refactor: too many if/else in functions, can reduce! - if (x == null || Void.TYPE == x) { - return Value.NULL; + public static String sqlTypeToString(SQLType sqlType) { + if (sqlType == null) { + return "null"; } - if (x.isPrimitive()) { - x = Utils.getNonPrimitiveClass(x); + if (sqlType instanceof JDBCType) { + return "JDBCType." + sqlType.getName(); } - if (String.class == x) { - return Value.STRING; - } else if (Integer.class == x) { - return Value.INT; - } else if (Long.class == x) { - return Value.LONG; - } else if (Boolean.class == x) { - return Value.BOOLEAN; - } else if (Double.class == x) { - return Value.DOUBLE; - } else if (Byte.class == x) { - return Value.BYTE; - } else if (Short.class == x) { - return Value.SHORT; - } else if (Character.class == x) { - throw DbException.get( - ErrorCode.DATA_CONVERSION_ERROR_1, "char (not supported)"); - } else if (Float.class == x) { - return Value.FLOAT; - } else if (byte[].class == x) { - return Value.BYTES; - } else if (UUID.class == x) { - return Value.UUID; - } else if (Void.class == x) { - return Value.NULL; - } else if (BigDecimal.class.isAssignableFrom(x)) { - return Value.DECIMAL; - } else if (ResultSet.class.isAssignableFrom(x)) { - return Value.RESULT_SET; - } else if (ValueLobDb.class.isAssignableFrom(x)) { - return Value.BLOB; -// FIXME no way to distinguish between these 2 types -// } else if (ValueLobDb.class.isAssignableFrom(x)) { -// return Value.CLOB; - } else if (Date.class.isAssignableFrom(x)) { - return Value.DATE; - } else if (Time.class.isAssignableFrom(x)) { - return Value.TIME; - } else if (Timestamp.class.isAssignableFrom(x)) { - return Value.TIMESTAMP; - } else if (java.util.Date.class.isAssignableFrom(x)) { - return Value.TIMESTAMP; - } else if (java.io.Reader.class.isAssignableFrom(x)) { - return Value.CLOB; - } else if (java.sql.Clob.class.isAssignableFrom(x)) { - return Value.CLOB; - } else if (java.io.InputStream.class.isAssignableFrom(x)) { - return Value.BLOB; - } else if (java.sql.Blob.class.isAssignableFrom(x)) { - return Value.BLOB; - } else if (Object[].class.isAssignableFrom(x)) { - // this includes String[] and so on - return Value.ARRAY; - } else if (isGeometryClass(x)) { - return Value.GEOMETRY; - } else if (LocalDateTimeUtils.LOCAL_DATE == x) { - return Value.DATE; - } else if (LocalDateTimeUtils.LOCAL_TIME == x) { - return Value.TIME; - } else if (LocalDateTimeUtils.LOCAL_DATE_TIME == x) { - return Value.TIMESTAMP; - } else if (LocalDateTimeUtils.OFFSET_DATE_TIME == x || LocalDateTimeUtils.INSTANT == x) { - return Value.TIMESTAMP_TZ; - } else { - if (JdbcUtils.customDataTypesHandler != null) { - return JdbcUtils.customDataTypesHandler.getTypeIdFromClass(x); - } - return Value.JAVA_OBJECT; + if (sqlType instanceof H2Type) { + return sqlType.toString(); } + return unknownSqlTypeToString(new StringBuilder("/* "), sqlType).append(" */ null").toString(); } - /** - * Convert a Java object to a value. - * - * @param session the session - * @param x the value - * @param type the value type - * @return the value - */ - public static Value convertToValue(SessionInterface session, Object x, - int type) { - Value v = convertToValue1(session, x, type); - if (session != null) { - session.addTemporaryLob(v); - } - return v; + private static StringBuilder unknownSqlTypeToString(StringBuilder builder, SQLType sqlType) { + return builder.append(StringUtils.quoteJavaString(sqlType.getVendor())).append('/') + .append(StringUtils.quoteJavaString(sqlType.getName())).append(" [") + .append(sqlType.getVendorTypeNumber()).append(']'); } - private static Value convertToValue1(SessionInterface session, Object x, - int type) { - if (x == null) { - return ValueNull.INSTANCE; - } - if (type == Value.JAVA_OBJECT) { - return ValueJavaObject.getNoCopy(x, null, session.getDataHandler()); - } - if (x instanceof String) { - return ValueString.get((String) x); - } else if (x instanceof Value) { - return (Value) x; - } else if (x instanceof Long) { - return ValueLong.get((Long) x); - } else if (x instanceof Integer) { - return ValueInt.get((Integer) x); - } else if (x instanceof BigInteger) { - return ValueDecimal.get(new BigDecimal((BigInteger) x)); - } else if (x instanceof BigDecimal) { - return ValueDecimal.get((BigDecimal) x); - } else if (x instanceof Boolean) { - return ValueBoolean.get((Boolean) x); - } else if (x instanceof Byte) { - return ValueByte.get((Byte) x); - } else if (x instanceof Short) { - return ValueShort.get((Short) x); - } else if (x instanceof Float) { - return ValueFloat.get((Float) x); - } else if (x instanceof Double) { - return ValueDouble.get((Double) x); - } else if (x instanceof byte[]) { - return ValueBytes.get((byte[]) x); - } else if (x instanceof Date) { - return ValueDate.get((Date) x); - } else if (x instanceof Time) { - return ValueTime.get((Time) x); - } else if (x instanceof Timestamp) { - return ValueTimestamp.get((Timestamp) x); - } else if (x instanceof java.util.Date) { - return ValueTimestamp.fromMillis(((java.util.Date) x).getTime()); - } else if (x instanceof java.io.Reader) { - Reader r = new BufferedReader((java.io.Reader) x); - return session.getDataHandler().getLobStorage(). - createClob(r, -1); - } else if (x instanceof java.sql.Clob) { - try { - java.sql.Clob clob = (java.sql.Clob) x; - Reader r = new BufferedReader(clob.getCharacterStream()); - return session.getDataHandler().getLobStorage(). - createClob(r, clob.length()); - } catch (SQLException e) { - throw DbException.convert(e); - } - } else if (x instanceof java.io.InputStream) { - return session.getDataHandler().getLobStorage(). - createBlob((java.io.InputStream) x, -1); - } else if (x instanceof java.sql.Blob) { - try { - java.sql.Blob blob = (java.sql.Blob) x; - return session.getDataHandler().getLobStorage(). - createBlob(blob.getBinaryStream(), blob.length()); - } catch (SQLException e) { - throw DbException.convert(e); - } - } else if (x instanceof java.sql.SQLXML) { - try { - java.sql.SQLXML clob = (java.sql.SQLXML) x; - Reader r = new BufferedReader(clob.getCharacterStream()); - return session.getDataHandler().getLobStorage(). - createClob(r, -1); - } catch (SQLException e) { - throw DbException.convert(e); - } - } else if (x instanceof java.sql.Array) { - java.sql.Array array = (java.sql.Array) x; - try { - return convertToValue(session, array.getArray(), Value.ARRAY); - } catch (SQLException e) { - throw DbException.convert(e); - } - } else if (x instanceof ResultSet) { - return ValueResultSet.get(session, (ResultSet) x, Integer.MAX_VALUE); - } else if (x instanceof UUID) { - return ValueUuid.get((UUID) x); - } - Class clazz = x.getClass(); - if (x instanceof Object[]) { - // (a.getClass().isArray()); - // (a.getClass().getComponentType().isPrimitive()); - Object[] o = (Object[]) x; - int len = o.length; - Value[] v = new Value[len]; - for (int i = 0; i < len; i++) { - v[i] = convertToValue(session, o[i], type); - } - return ValueArray.get(clazz.getComponentType(), v); - } else if (x instanceof Character) { - return ValueStringFixed.get(((Character) x).toString()); - } else if (isGeometry(x)) { - return ValueGeometry.getFromGeometry(x); - } else if (clazz == LocalDateTimeUtils.LOCAL_DATE) { - return LocalDateTimeUtils.localDateToDateValue(x); - } else if (clazz == LocalDateTimeUtils.LOCAL_TIME) { - return LocalDateTimeUtils.localTimeToTimeValue(x); - } else if (clazz == LocalDateTimeUtils.LOCAL_DATE_TIME) { - return LocalDateTimeUtils.localDateTimeToValue(x); - } else if (clazz == LocalDateTimeUtils.INSTANT) { - return LocalDateTimeUtils.instantToValue(x); - } else if (clazz == LocalDateTimeUtils.OFFSET_DATE_TIME) { - return LocalDateTimeUtils.offsetDateTimeToValue(x); - } else if (x instanceof TimestampWithTimeZone) { - return ValueTimestampTimeZone.get((TimestampWithTimeZone) x); - } else if (x instanceof Interval) { - Interval i = (Interval) x; - return ValueInterval.from(i.getQualifier(), i.isNegative(), i.getLeading(), i.getRemaining()); - } else if (clazz == LocalDateTimeUtils.PERIOD) { - return LocalDateTimeUtils.periodToValue(x); - } else if (clazz == LocalDateTimeUtils.DURATION) { - return LocalDateTimeUtils.durationToValue(x); - } else { - if (JdbcUtils.customDataTypesHandler != null) { - return JdbcUtils.customDataTypesHandler.getValue(type, x, - session.getDataHandler()); - } - return ValueJavaObject.getNoCopy(x, null, session.getDataHandler()); - } - } - - /** - * Check whether a given class matches the Geometry class. + * Get a data type object from a type name. * - * @param x the class - * @return true if it is a Geometry class + * @param s the type name + * @param mode database mode + * @return the data type object */ - public static boolean isGeometryClass(Class x) { - if (x == null || GEOMETRY_CLASS == null) { - return false; + public static DataType getTypeByName(String s, Mode mode) { + DataType result = mode.typeByNameMap.get(s); + if (result == null) { + result = TYPES_BY_NAME.get(s); } - return GEOMETRY_CLASS.isAssignableFrom(x); + return result; } /** - * Check whether a given object is a Geometry object. + * Returns whether columns with the specified data type may have an index. * - * @param x the object - * @return true if it is a Geometry object + * @param type the data type + * @return whether an index is allowed */ - public static boolean isGeometry(Object x) { - if (x == null) { + public static boolean isIndexable(TypeInfo type) { + switch(type.getValueType()) { + case Value.UNKNOWN: + case Value.NULL: + case Value.BLOB: + case Value.CLOB: return false; + case Value.ARRAY: + return isIndexable((TypeInfo) type.getExtTypeInfo()); + case Value.ROW: { + ExtTypeInfoRow ext = (ExtTypeInfoRow) type.getExtTypeInfo(); + for (Map.Entry entry : ext.getFields()) { + if (!isIndexable(entry.getValue())) { + return false; + } + } + } + //$FALL-THROUGH$ + default: + return true; } - return isGeometryClass(x.getClass()); } /** - * Get a data type object from a type name. + * Returns whether values of the specified data types have + * session-independent compare results. * - * @param s the type name - * @param mode database mode - * @return the data type object - */ - public static DataType getTypeByName(String s, Mode mode) { - DataType result = mode.typeByNameMap.get(s); - if (result == null) { - result = TYPES_BY_NAME.get(s); - if (result == null && JdbcUtils.customDataTypesHandler != null) { - result = JdbcUtils.customDataTypesHandler.getDataTypeByName(s); + * @param type1 + * the first data type + * @param type2 + * the second data type + * @return are values have session-independent compare results + */ + public static boolean areStableComparable(TypeInfo type1, TypeInfo type2) { + int t1 = type1.getValueType(); + int t2 = type2.getValueType(); + switch (t1) { + case Value.UNKNOWN: + case Value.NULL: + case Value.BLOB: + case Value.CLOB: + case Value.ROW: + return false; + case Value.DATE: + case Value.TIMESTAMP: + // DATE is equal to TIMESTAMP at midnight + return t2 == Value.DATE || t2 == Value.TIMESTAMP; + case Value.TIME: + case Value.TIME_TZ: + case Value.TIMESTAMP_TZ: + // Conversions depend on current timestamp and time zone + return t1 == t2; + case Value.ARRAY: + if (t2 == Value.ARRAY) { + return areStableComparable((TypeInfo) type1.getExtTypeInfo(), (TypeInfo) type2.getExtTypeInfo()); + } + return false; + default: + switch (t2) { + case Value.UNKNOWN: + case Value.NULL: + case Value.BLOB: + case Value.CLOB: + case Value.ROW: + return false; + default: + return true; } } - return result; } /** @@ -1318,15 +634,7 @@ public static DataType getTypeByName(String s, Mode mode) { * @return true if the value type is a date-time type */ public static boolean isDateTimeType(int type) { - switch (type) { - case Value.TIME: - case Value.DATE: - case Value.TIMESTAMP: - case Value.TIMESTAMP_TZ: - return true; - default: - return false; - } + return type >= Value.DATE && type <= Value.TIMESTAMP_TZ; } /** @@ -1366,7 +674,27 @@ public static boolean isLargeObject(int type) { * @return true if the value type is a numeric type */ public static boolean isNumericType(int type) { - return type >= Value.BYTE && type <= Value.FLOAT; + return type >= Value.TINYINT && type <= Value.DECFLOAT; + } + + /** + * Check if the given value type is a binary string type. + * + * @param type the value type + * @return true if the value type is a binary string type + */ + public static boolean isBinaryStringType(int type) { + return type >= Value.BINARY && type <= Value.BLOB; + } + + /** + * Check if the given value type is a character string type. + * + * @param type the value type + * @return true if the value type is a character string type + */ + public static boolean isCharacterStringType(int type) { + return type >= Value.CHAR && type <= Value.VARCHAR_IGNORECASE; } /** @@ -1376,17 +704,31 @@ public static boolean isNumericType(int type) { * @return true if the value type is a String type */ public static boolean isStringType(int type) { - return type == Value.STRING || type == Value.STRING_FIXED || type == Value.STRING_IGNORECASE; + return type == Value.VARCHAR || type == Value.CHAR || type == Value.VARCHAR_IGNORECASE; } /** - * Check if the given type may have extended type information. + * Check if the given value type is a binary string type or a compatible + * special data type such as Java object, UUID, geometry object, or JSON. * - * @param type the value type - * @return true if the value type may have extended type information + * @param type + * the value type + * @return true if the value type is a binary string type or a compatible + * special data type */ - public static boolean isExtInfoType(int type) { - return type == Value.GEOMETRY || type == Value.ENUM; + public static boolean isBinaryStringOrSpecialBinaryType(int type) { + switch (type) { + case Value.VARBINARY: + case Value.BINARY: + case Value.BLOB: + case Value.JAVA_OBJECT: + case Value.UUID: + case Value.GEOMETRY: + case Value.JSON: + return true; + default: + return false; + } } /** @@ -1398,17 +740,17 @@ public static boolean isExtInfoType(int type) { public static boolean hasTotalOrdering(int type) { switch (type) { case Value.BOOLEAN: - case Value.BYTE: - case Value.SHORT: - case Value.INT: - case Value.LONG: + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + case Value.BIGINT: // Negative zeroes and NaNs are normalized case Value.DOUBLE: - case Value.FLOAT: + case Value.REAL: case Value.TIME: case Value.DATE: case Value.TIMESTAMP: - case Value.BYTES: + case Value.VARBINARY: // Serialized data is compared case Value.JAVA_OBJECT: case Value.UUID: @@ -1428,6 +770,7 @@ public static boolean hasTotalOrdering(int type) { case Value.INTERVAL_HOUR_TO_MINUTE: case Value.INTERVAL_HOUR_TO_SECOND: case Value.INTERVAL_MINUTE_TO_SECOND: + case Value.BINARY: return true; default: return false; @@ -1435,120 +778,21 @@ public static boolean hasTotalOrdering(int type) { } /** - * Check if the given value type supports the add operation. - * - * @param type the value type - * @return true if add is supported - */ - public static boolean supportsAdd(int type) { - switch (type) { - case Value.BYTE: - case Value.DECIMAL: - case Value.DOUBLE: - case Value.FLOAT: - case Value.INT: - case Value.LONG: - case Value.SHORT: - case Value.INTERVAL_YEAR: - case Value.INTERVAL_MONTH: - case Value.INTERVAL_DAY: - case Value.INTERVAL_HOUR: - case Value.INTERVAL_MINUTE: - case Value.INTERVAL_SECOND: - case Value.INTERVAL_YEAR_TO_MONTH: - case Value.INTERVAL_DAY_TO_HOUR: - case Value.INTERVAL_DAY_TO_MINUTE: - case Value.INTERVAL_DAY_TO_SECOND: - case Value.INTERVAL_HOUR_TO_MINUTE: - case Value.INTERVAL_HOUR_TO_SECOND: - case Value.INTERVAL_MINUTE_TO_SECOND: - return true; - case Value.BOOLEAN: - case Value.TIME: - case Value.DATE: - case Value.TIMESTAMP: - case Value.TIMESTAMP_TZ: - case Value.BYTES: - case Value.UUID: - case Value.STRING: - case Value.STRING_IGNORECASE: - case Value.STRING_FIXED: - case Value.BLOB: - case Value.CLOB: - case Value.NULL: - case Value.JAVA_OBJECT: - case Value.UNKNOWN: - case Value.ARRAY: - case Value.RESULT_SET: - case Value.GEOMETRY: - return false; - default: - if (JdbcUtils.customDataTypesHandler != null) { - return JdbcUtils.customDataTypesHandler.supportsAdd(type); - } - return false; - } - } - - /** - * Get the data type that will not overflow when calling 'add' 2 billion - * times. + * Performs saturated addition of precision values. * - * @param type the value type - * @return the data type that supports adding + * @param p1 + * the first summand + * @param p2 + * the second summand + * @return the sum of summands, or {@link Long#MAX_VALUE} if either argument + * is negative or sum is out of range */ - public static int getAddProofType(int type) { - switch (type) { - case Value.BYTE: - return Value.LONG; - case Value.FLOAT: - return Value.DOUBLE; - case Value.INT: - return Value.LONG; - case Value.LONG: - return Value.DECIMAL; - case Value.SHORT: - return Value.LONG; - case Value.BOOLEAN: - case Value.DECIMAL: - case Value.TIME: - case Value.DATE: - case Value.TIMESTAMP: - case Value.TIMESTAMP_TZ: - case Value.BYTES: - case Value.UUID: - case Value.STRING: - case Value.STRING_IGNORECASE: - case Value.STRING_FIXED: - case Value.BLOB: - case Value.CLOB: - case Value.DOUBLE: - case Value.NULL: - case Value.JAVA_OBJECT: - case Value.UNKNOWN: - case Value.ARRAY: - case Value.RESULT_SET: - case Value.GEOMETRY: - case Value.INTERVAL_YEAR: - case Value.INTERVAL_MONTH: - case Value.INTERVAL_DAY: - case Value.INTERVAL_HOUR: - case Value.INTERVAL_MINUTE: - case Value.INTERVAL_SECOND: - case Value.INTERVAL_YEAR_TO_MONTH: - case Value.INTERVAL_DAY_TO_HOUR: - case Value.INTERVAL_DAY_TO_MINUTE: - case Value.INTERVAL_DAY_TO_SECOND: - case Value.INTERVAL_HOUR_TO_MINUTE: - case Value.INTERVAL_HOUR_TO_SECOND: - case Value.INTERVAL_MINUTE_TO_SECOND: - return type; - default: - if (JdbcUtils.customDataTypesHandler != null) { - return JdbcUtils.customDataTypesHandler.getAddProofType(type); - } - return type; + public static long addPrecision(long p1, long p2) { + long sum = p1 + p2; + if ((p1 | p2 | sum) < 0) { + return Long.MAX_VALUE; } + return sum; } /** @@ -1576,67 +820,7 @@ public static Object getDefaultForPrimitiveType(Class clazz) { } else if (clazz == Double.TYPE) { return (double) 0; } - throw DbException.throwInternalError( - "primitive=" + clazz.toString()); - } - - /** - * Convert a value to the specified class. - * - * @param conn the database connection - * @param v the value - * @param paramClass the target class - * @return the converted object - */ - public static Object convertTo(JdbcConnection conn, Value v, - Class paramClass) { - if (paramClass == Blob.class) { - return new JdbcBlob(conn, v, JdbcLob.State.WITH_VALUE, 0); - } else if (paramClass == Clob.class) { - return new JdbcClob(conn, v, JdbcLob.State.WITH_VALUE, 0); - } else if (paramClass == Array.class) { - return new JdbcArray(conn, v, 0); - } - switch (v.getValueType()) { - case Value.JAVA_OBJECT: { - Object o = SysProperties.serializeJavaObject ? JdbcUtils.deserialize(v.getBytes(), - conn.getSession().getDataHandler()) : v.getObject(); - if (paramClass.isAssignableFrom(o.getClass())) { - return o; - } - break; - } - case Value.BOOLEAN: - case Value.BYTE: - case Value.SHORT: - case Value.INT: - case Value.LONG: - case Value.DECIMAL: - case Value.TIME: - case Value.DATE: - case Value.TIMESTAMP: - case Value.TIMESTAMP_TZ: - case Value.BYTES: - case Value.UUID: - case Value.STRING: - case Value.STRING_IGNORECASE: - case Value.STRING_FIXED: - case Value.BLOB: - case Value.CLOB: - case Value.DOUBLE: - case Value.FLOAT: - case Value.NULL: - case Value.UNKNOWN: - case Value.ARRAY: - case Value.RESULT_SET: - case Value.GEOMETRY: - break; - default: - if (JdbcUtils.customDataTypesHandler != null) { - return JdbcUtils.customDataTypesHandler.getObject(v, paramClass); - } - } - throw DbException.getUnsupportedException("converting to class " + paramClass.getName()); + throw DbException.getInternalError("primitive=" + clazz.toString()); } } diff --git a/h2/src/main/org/h2/value/ExtTypeInfo.java b/h2/src/main/org/h2/value/ExtTypeInfo.java index 7496e6af9e..98c5446062 100644 --- a/h2/src/main/org/h2/value/ExtTypeInfo.java +++ b/h2/src/main/org/h2/value/ExtTypeInfo.java @@ -1,34 +1,20 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; +import org.h2.util.HasSQL; + /** * Extended parameters of a data type. */ -public abstract class ExtTypeInfo { - - /** - * Casts a specified value to this data type. - * - * @param value - * value to cast - * @return casted value - */ - public abstract Value cast(Value value); - - /** - * Returns SQL including parentheses that should be appended to a type name. - * - * @return SQL including parentheses that should be appended to a type name - */ - public abstract String getCreateSQL(); +public abstract class ExtTypeInfo implements HasSQL { @Override public String toString() { - return getCreateSQL(); + return getSQL(QUOTE_ONLY_WHEN_REQUIRED); } } diff --git a/h2/src/main/org/h2/value/ExtTypeInfoEnum.java b/h2/src/main/org/h2/value/ExtTypeInfoEnum.java index d9e86c8a6c..3c3651f727 100644 --- a/h2/src/main/org/h2/value/ExtTypeInfoEnum.java +++ b/h2/src/main/org/h2/value/ExtTypeInfoEnum.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; @@ -9,6 +9,8 @@ import java.util.Locale; import org.h2.api.ErrorCode; +import org.h2.engine.CastDataProvider; +import org.h2.engine.Constants; import org.h2.message.DbException; /** @@ -42,29 +44,34 @@ public static ExtTypeInfoEnum getEnumeratorsForBinaryOperation(Value left, Value } private static String sanitize(String label) { - return label == null ? null : label.trim().toUpperCase(Locale.ENGLISH); + if (label == null) { + return null; + } + int length = label.length(); + if (length > Constants.MAX_STRING_LENGTH) { + throw DbException.getValueTooLongException("ENUM", label, length); + } + return label.trim().toUpperCase(Locale.ENGLISH); } - private static String toSQL(String[] enumerators) { - StringBuilder result = new StringBuilder(); - result.append('('); + private static StringBuilder toSQL(StringBuilder builder, String[] enumerators) { + builder.append('('); for (int i = 0; i < enumerators.length; i++) { if (i != 0) { - result.append(", "); + builder.append(", "); } - result.append('\''); + builder.append('\''); String s = enumerators[i]; for (int j = 0, length = s.length(); j < length; j++) { char c = s.charAt(j); if (c == '\'') { - result.append('\''); + builder.append('\''); } - result.append(c); + builder.append(c); } - result.append('\''); + builder.append('\''); } - result.append(')'); - return result.toString(); + return builder.append(')'); } /** @@ -74,18 +81,23 @@ private static String toSQL(String[] enumerators) { * the enumerators. May not be modified by caller or this class. */ public ExtTypeInfoEnum(String[] enumerators) { - if (enumerators == null || enumerators.length == 0) { + int length; + if (enumerators == null || (length = enumerators.length) == 0) { throw DbException.get(ErrorCode.ENUM_EMPTY); } - final String[] cleaned = new String[enumerators.length]; - for (int i = 0; i < enumerators.length; i++) { + if (length > Constants.MAX_ARRAY_CARDINALITY) { + throw DbException.getValueTooLongException("ENUM", "(" + length + " elements)", length); + } + final String[] cleaned = new String[length]; + for (int i = 0; i < length; i++) { String l = sanitize(enumerators[i]); if (l == null || l.isEmpty()) { throw DbException.get(ErrorCode.ENUM_EMPTY); } for (int j = 0; j < i; j++) { if (l.equals(cleaned[j])) { - throw DbException.get(ErrorCode.ENUM_DUPLICATE, toSQL(enumerators)); + throw DbException.get(ErrorCode.ENUM_DUPLICATE, // + toSQL(new StringBuilder(), enumerators).toString()); } } cleaned[i] = l; @@ -104,38 +116,18 @@ TypeInfo getType() { p = l; } } - this.type = type = new TypeInfo(Value.ENUM, p, 0, p, this); + this.type = type = new TypeInfo(Value.ENUM, p, 0, this); } return type; } - @Override - public Value cast(Value value) { - switch (value.getValueType()) { - case Value.ENUM: - if (value instanceof ValueEnum && ((ValueEnum) value).getEnumerators().equals(this)) { - return value; - } - //$FALL-THROUGH$ - case Value.STRING: - case Value.STRING_FIXED: - case Value.STRING_IGNORECASE: - ValueEnum v = getValueOrNull(value.getString()); - if (v != null) { - return v; - } - break; - default: - int ordinal = value.getInt(); - if (ordinal >= 0 && ordinal < enumerators.length) { - return new ValueEnum(this, enumerators[ordinal], ordinal); - } - } - String s = value.getTraceSQL(); - if (s.length() > 127) { - s = s.substring(0, 128) + "..."; - } - throw DbException.get(ErrorCode.ENUM_VALUE_NOT_PERMITTED, toString(), s); + /** + * Get count of elements in enumeration. + * + * @return count of elements in enumeration + */ + public int getCount() { + return enumerators.length; } /** @@ -152,35 +144,46 @@ public String getEnumerator(int ordinal) { /** * Get ValueEnum instance for an ordinal. * @param ordinal ordinal value of an enum + * @param provider the cast information provider * @return ValueEnum instance */ - public ValueEnum getValue(int ordinal) { - if (ordinal < 0 || ordinal >= enumerators.length) { - throw DbException.get(ErrorCode.ENUM_VALUE_NOT_PERMITTED, enumerators.toString(), - Integer.toString(ordinal)); + public ValueEnum getValue(int ordinal, CastDataProvider provider) { + String label; + if (provider == null || !provider.zeroBasedEnums()) { + if (ordinal < 1 || ordinal > enumerators.length) { + throw DbException.get(ErrorCode.ENUM_VALUE_NOT_PERMITTED, getTraceSQL(), Integer.toString(ordinal)); + } + label = enumerators[ordinal - 1]; + } else { + if (ordinal < 0 || ordinal >= enumerators.length) { + throw DbException.get(ErrorCode.ENUM_VALUE_NOT_PERMITTED, getTraceSQL(), Integer.toString(ordinal)); + } + label = enumerators[ordinal]; } - return new ValueEnum(this, enumerators[ordinal], ordinal); + return new ValueEnum(this, label, ordinal); } /** * Get ValueEnum instance for a label string. * @param label label string + * @param provider the cast information provider * @return ValueEnum instance */ - public ValueEnum getValue(String label) { - ValueEnum value = getValueOrNull(label); + public ValueEnum getValue(String label, CastDataProvider provider) { + ValueEnum value = getValueOrNull(label, provider); if (value == null) { throw DbException.get(ErrorCode.ENUM_VALUE_NOT_PERMITTED, toString(), label); } return value; } - private ValueEnum getValueOrNull(String label) { + private ValueEnum getValueOrNull(String label, CastDataProvider provider) { String l = sanitize(label); if (l != null) { - for (int ordinal = 0; ordinal < cleaned.length; ordinal++) { - if (l.equals(cleaned[ordinal])) { - return new ValueEnum(this, enumerators[ordinal], ordinal); + for (int i = 0, ordinal = provider == null || !provider.zeroBasedEnums() ? 1 + : 0; i < cleaned.length; i++, ordinal++) { + if (l.equals(cleaned[i])) { + return new ValueEnum(this, enumerators[i], ordinal); } } } @@ -204,8 +207,8 @@ public boolean equals(Object obj) { } @Override - public String getCreateSQL() { - return toSQL(enumerators); + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return toSQL(builder, enumerators); } } diff --git a/h2/src/main/org/h2/value/ExtTypeInfoGeometry.java b/h2/src/main/org/h2/value/ExtTypeInfoGeometry.java index cc6b99c88f..6f5b086f34 100644 --- a/h2/src/main/org/h2/value/ExtTypeInfoGeometry.java +++ b/h2/src/main/org/h2/value/ExtTypeInfoGeometry.java @@ -1,12 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; -import org.h2.api.ErrorCode; -import org.h2.message.DbException; +import java.util.Objects; + import org.h2.util.geometry.EWKTUtils; /** @@ -18,21 +18,20 @@ public final class ExtTypeInfoGeometry extends ExtTypeInfo { private final Integer srid; - private static String toSQL(int type, Integer srid) { + static StringBuilder toSQL(StringBuilder builder, int type, Integer srid) { if (type == 0 && srid == null) { - return ""; + return builder; } - StringBuilder builder = new StringBuilder(); builder.append('('); if (type == 0) { builder.append("GEOMETRY"); } else { - builder.append(EWKTUtils.formatGeometryTypeAndDimensionSystem(type)); + EWKTUtils.formatGeometryTypeAndDimensionSystem(builder, type); } if (srid != null) { builder.append(", ").append((int) srid); } - return builder.append(')').toString(); + return builder.append(')'); } /** @@ -50,21 +49,44 @@ public ExtTypeInfoGeometry(int type, Integer srid) { } @Override - public Value cast(Value value) { - if (value.getValueType() != Value.GEOMETRY) { - value = value.convertTo(Value.GEOMETRY); + public int hashCode() { + return 31 * ((srid == null) ? 0 : srid.hashCode()) + type; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; } - ValueGeometry g = (ValueGeometry) value; - if (type != 0 && g.getTypeAndDimensionSystem() != type || srid != null && g.getSRID() != srid) { - throw DbException.get(ErrorCode.CHECK_CONSTRAINT_VIOLATED_1, - toSQL(g.getTypeAndDimensionSystem(), g.getSRID()) + " <> " + toString()); + if (obj == null || obj.getClass() != ExtTypeInfoGeometry.class) { + return false; } - return g; + ExtTypeInfoGeometry other = (ExtTypeInfoGeometry) obj; + return type == other.type && Objects.equals(srid, other.srid); } @Override - public String getCreateSQL() { - return toSQL(type, srid); + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return toSQL(builder, type, srid); + } + + /** + * Returns the type and dimension system of geometries. + * + * @return the type and dimension system of geometries, or 0 if not + * constrained + */ + public int getType() { + return type; + } + + /** + * Returns the SRID of geometries. + * + * @return the SRID of geometries, or {@code null} if not constrained + */ + public Integer getSrid() { + return srid; } } diff --git a/h2/src/main/org/h2/value/ExtTypeInfoNumeric.java b/h2/src/main/org/h2/value/ExtTypeInfoNumeric.java new file mode 100644 index 0000000000..dafc52b4e6 --- /dev/null +++ b/h2/src/main/org/h2/value/ExtTypeInfoNumeric.java @@ -0,0 +1,26 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +/** + * Extended parameters of the NUMERIC data type. + */ +public final class ExtTypeInfoNumeric extends ExtTypeInfo { + + /** + * DECIMAL data type. + */ + public static final ExtTypeInfoNumeric DECIMAL = new ExtTypeInfoNumeric(); + + private ExtTypeInfoNumeric() { + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return builder.append("DECIMAL"); + } + +} diff --git a/h2/src/main/org/h2/value/ExtTypeInfoRow.java b/h2/src/main/org/h2/value/ExtTypeInfoRow.java new file mode 100644 index 0000000000..2fd2864393 --- /dev/null +++ b/h2/src/main/org/h2/value/ExtTypeInfoRow.java @@ -0,0 +1,130 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Set; + +import org.h2.api.ErrorCode; +import org.h2.engine.Constants; +import org.h2.message.DbException; +import org.h2.util.ParserUtil; + +/** + * Extended parameters of the ROW data type. + */ +public final class ExtTypeInfoRow extends ExtTypeInfo { + + private final LinkedHashMap fields; + + private int hash; + + /** + * Creates new instance of extended parameters of ROW data type. + * + * @param fields + * fields + */ + public ExtTypeInfoRow(Typed[] fields) { + this(fields, fields.length); + } + + /** + * Creates new instance of extended parameters of ROW data type. + * + * @param fields + * fields + * @param degree + * number of fields to use + */ + public ExtTypeInfoRow(Typed[] fields, int degree) { + if (degree > Constants.MAX_COLUMNS) { + throw DbException.get(ErrorCode.TOO_MANY_COLUMNS_1, "" + Constants.MAX_COLUMNS); + } + LinkedHashMap map = new LinkedHashMap<>((int) Math.ceil(degree / .75)); + for (int i = 0; i < degree;) { + TypeInfo t = fields[i].getType(); + map.put("C" + ++i, t); + } + this.fields = map; + } + + /** + * Creates new instance of extended parameters of ROW data type. + * + * @param fields + * fields + */ + public ExtTypeInfoRow(LinkedHashMap fields) { + if (fields.size() > Constants.MAX_COLUMNS) { + throw DbException.get(ErrorCode.TOO_MANY_COLUMNS_1, "" + Constants.MAX_COLUMNS); + } + this.fields = fields; + } + + /** + * Returns fields. + * + * @return fields + */ + public Set> getFields() { + return fields.entrySet(); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + builder.append('('); + boolean f = false; + for (Map.Entry field : fields.entrySet()) { + if (f) { + builder.append(", "); + } + f = true; + ParserUtil.quoteIdentifier(builder, field.getKey(), sqlFlags).append(' '); + field.getValue().getSQL(builder, sqlFlags); + } + return builder.append(')'); + } + + @Override + public int hashCode() { + int h = hash; + if (h != 0) { + return h; + } + h = 67_378_403; + for (Map.Entry entry : fields.entrySet()) { + h = (h * 31 + entry.getKey().hashCode()) * 37 + entry.getValue().hashCode(); + } + return hash = h; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj.getClass() != ExtTypeInfoRow.class) { + return false; + } + LinkedHashMap fields2 = ((ExtTypeInfoRow) obj).fields; + int degree = fields.size(); + if (degree != fields2.size()) { + return false; + } + for (Iterator> i1 = fields.entrySet().iterator(), i2 = fields2.entrySet() + .iterator(); i1.hasNext();) { + Map.Entry e1 = i1.next(), e2 = i2.next(); + if (!e1.getKey().equals(e2.getKey()) || !e1.getValue().equals(e2.getValue())) { + return false; + } + } + return true; + } + +} diff --git a/h2/src/main/org/h2/value/Transfer.java b/h2/src/main/org/h2/value/Transfer.java index 246e18b236..62496b00c7 100644 --- a/h2/src/main/org/h2/value/Transfer.java +++ b/h2/src/main/org/h2/value/Transfer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; @@ -14,29 +14,36 @@ import java.math.BigDecimal; import java.net.InetAddress; import java.net.Socket; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Set; +import java.util.Map.Entry; + import org.h2.api.ErrorCode; import org.h2.api.IntervalQualifier; import org.h2.engine.Constants; -import org.h2.engine.SessionInterface; +import org.h2.engine.Session; import org.h2.message.DbException; -import org.h2.result.ResultInterface; -import org.h2.result.SimpleResult; import org.h2.security.SHA256; import org.h2.store.Data; import org.h2.store.DataReader; import org.h2.util.Bits; +import org.h2.util.DateTimeUtils; import org.h2.util.IOUtils; -import org.h2.util.JdbcUtils; import org.h2.util.MathUtils; import org.h2.util.NetUtils; import org.h2.util.StringUtils; import org.h2.util.Utils; +import org.h2.value.lob.LobData; +import org.h2.value.lob.LobDataDatabase; +import org.h2.value.lob.LobDataFetchOnDemand; /** * The transfer class is used to send and receive Value objects. * It is used on both the client side, and on the server side. */ -public class Transfer { +public final class Transfer { private static final int BUFFER_SIZE = 64 * 1024; private static final int LOB_MAGIC = 0x1234; @@ -44,36 +51,98 @@ public class Transfer { private static final int NULL = 0; private static final int BOOLEAN = 1; - private static final int BYTE = 2; - private static final int SHORT = 3; - private static final int INT = 4; - private static final int LONG = 5; - private static final int DECIMAL = 6; + private static final int TINYINT = 2; + private static final int SMALLINT = 3; + private static final int INTEGER = 4; + private static final int BIGINT = 5; + private static final int NUMERIC = 6; private static final int DOUBLE = 7; - private static final int FLOAT = 8; + private static final int REAL = 8; private static final int TIME = 9; private static final int DATE = 10; private static final int TIMESTAMP = 11; - private static final int BYTES = 12; - private static final int STRING = 13; - private static final int STRING_IGNORECASE = 14; + private static final int VARBINARY = 12; + private static final int VARCHAR = 13; + private static final int VARCHAR_IGNORECASE = 14; private static final int BLOB = 15; private static final int CLOB = 16; private static final int ARRAY = 17; - private static final int RESULT_SET = 18; private static final int JAVA_OBJECT = 19; private static final int UUID = 20; - private static final int STRING_FIXED = 21; + private static final int CHAR = 21; private static final int GEOMETRY = 22; + // 1.4.192 private static final int TIMESTAMP_TZ = 24; + // 1.4.195 private static final int ENUM = 25; + // 1.4.198 private static final int INTERVAL = 26; private static final int ROW = 27; + // 1.4.200 + private static final int JSON = 28; + private static final int TIME_TZ = 29; + // 2.0.202 + private static final int BINARY = 30; + private static final int DECFLOAT = 31; + + private static final int[] VALUE_TO_TI = new int[Value.TYPE_COUNT + 1]; + private static final int[] TI_TO_VALUE = new int[45]; + + static { + addType(-1, Value.UNKNOWN); + addType(NULL, Value.NULL); + addType(BOOLEAN, Value.BOOLEAN); + addType(TINYINT, Value.TINYINT); + addType(SMALLINT, Value.SMALLINT); + addType(INTEGER, Value.INTEGER); + addType(BIGINT, Value.BIGINT); + addType(NUMERIC, Value.NUMERIC); + addType(DOUBLE, Value.DOUBLE); + addType(REAL, Value.REAL); + addType(TIME, Value.TIME); + addType(DATE, Value.DATE); + addType(TIMESTAMP, Value.TIMESTAMP); + addType(VARBINARY, Value.VARBINARY); + addType(VARCHAR, Value.VARCHAR); + addType(VARCHAR_IGNORECASE, Value.VARCHAR_IGNORECASE); + addType(BLOB, Value.BLOB); + addType(CLOB, Value.CLOB); + addType(ARRAY, Value.ARRAY); + addType(JAVA_OBJECT, Value.JAVA_OBJECT); + addType(UUID, Value.UUID); + addType(CHAR, Value.CHAR); + addType(GEOMETRY, Value.GEOMETRY); + addType(TIMESTAMP_TZ, Value.TIMESTAMP_TZ); + addType(ENUM, Value.ENUM); + addType(26, Value.INTERVAL_YEAR); + addType(27, Value.INTERVAL_MONTH); + addType(28, Value.INTERVAL_DAY); + addType(29, Value.INTERVAL_HOUR); + addType(30, Value.INTERVAL_MINUTE); + addType(31, Value.INTERVAL_SECOND); + addType(32, Value.INTERVAL_YEAR_TO_MONTH); + addType(33, Value.INTERVAL_DAY_TO_HOUR); + addType(34, Value.INTERVAL_DAY_TO_MINUTE); + addType(35, Value.INTERVAL_DAY_TO_SECOND); + addType(36, Value.INTERVAL_HOUR_TO_MINUTE); + addType(37, Value.INTERVAL_HOUR_TO_SECOND); + addType(38, Value.INTERVAL_MINUTE_TO_SECOND); + addType(39, Value.ROW); + addType(40, Value.JSON); + addType(41, Value.TIME_TZ); + addType(42, Value.BINARY); + addType(43, Value.DECFLOAT); + } + + private static void addType(int typeInformationType, int valueType) { + VALUE_TO_TI[valueType + 1] = typeInformationType; + TI_TO_VALUE[typeInformationType + 1] = valueType; + } private Socket socket; private DataInputStream in; private DataOutputStream out; - private SessionInterface session; + private Session session; private boolean ssl; private int version; private byte[] lobMacSalt; @@ -84,7 +153,7 @@ public class Transfer { * @param session the session * @param s the socket */ - public Transfer(SessionInterface session, Socket s) { + public Transfer(Session session, Socket s) { this.session = session; this.socket = s; } @@ -92,6 +161,7 @@ public Transfer(SessionInterface session, Socket s) { /** * Initialize the transfer object. This method will try to open an input and * output stream. + * @throws IOException on failure */ public synchronized void init() throws IOException { if (socket != null) { @@ -106,6 +176,7 @@ public synchronized void init() throws IOException { /** * Write pending changes. + * @throws IOException on failure */ public void flush() throws IOException { out.flush(); @@ -116,6 +187,7 @@ public void flush() throws IOException { * * @param x the value * @return itself + * @throws IOException on failure */ public Transfer writeBoolean(boolean x) throws IOException { out.writeByte((byte) (x ? 1 : 0)); @@ -126,6 +198,7 @@ public Transfer writeBoolean(boolean x) throws IOException { * Read a boolean. * * @return the value + * @throws IOException on failure */ public boolean readBoolean() throws IOException { return in.readByte() != 0; @@ -136,8 +209,9 @@ public boolean readBoolean() throws IOException { * * @param x the value * @return itself + * @throws IOException on failure */ - private Transfer writeByte(byte x) throws IOException { + public Transfer writeByte(byte x) throws IOException { out.writeByte(x); return this; } @@ -146,16 +220,40 @@ private Transfer writeByte(byte x) throws IOException { * Read a byte. * * @return the value + * @throws IOException on failure */ - private byte readByte() throws IOException { + public byte readByte() throws IOException { return in.readByte(); } + /** + * Write a short. + * + * @param x the value + * @return itself + * @throws IOException on failure + */ + private Transfer writeShort(short x) throws IOException { + out.writeShort(x); + return this; + } + + /** + * Read a short. + * + * @return the value + * @throws IOException on failure + */ + private short readShort() throws IOException { + return in.readShort(); + } + /** * Write an int. * * @param x the value * @return itself + * @throws IOException on failure */ public Transfer writeInt(int x) throws IOException { out.writeInt(x); @@ -166,6 +264,7 @@ public Transfer writeInt(int x) throws IOException { * Read an int. * * @return the value + * @throws IOException on failure */ public int readInt() throws IOException { return in.readInt(); @@ -176,6 +275,7 @@ public int readInt() throws IOException { * * @param x the value * @return itself + * @throws IOException on failure */ public Transfer writeLong(long x) throws IOException { out.writeLong(x); @@ -186,6 +286,7 @@ public Transfer writeLong(long x) throws IOException { * Read a long. * * @return the value + * @throws IOException on failure */ public long readLong() throws IOException { return in.readLong(); @@ -196,6 +297,7 @@ public long readLong() throws IOException { * * @param i the value * @return itself + * @throws IOException on failure */ private Transfer writeDouble(double i) throws IOException { out.writeDouble(i); @@ -217,6 +319,7 @@ private Transfer writeFloat(float i) throws IOException { * Read a double. * * @return the value + * @throws IOException on failure */ private double readDouble() throws IOException { return in.readDouble(); @@ -226,6 +329,7 @@ private double readDouble() throws IOException { * Read a float. * * @return the value + * @throws IOException on failure */ private float readFloat() throws IOException { return in.readFloat(); @@ -236,6 +340,7 @@ private float readFloat() throws IOException { * * @param s the value * @return itself + * @throws IOException on failure */ public Transfer writeString(String s) throws IOException { if (s == null) { @@ -251,6 +356,7 @@ public Transfer writeString(String s) throws IOException { * Read a string. * * @return the value + * @throws IOException on failure */ public String readString() throws IOException { int len = in.readInt(); @@ -271,6 +377,7 @@ public String readString() throws IOException { * * @param data the value * @return itself + * @throws IOException on failure */ public Transfer writeBytes(byte[] data) throws IOException { if (data == null) { @@ -289,6 +396,7 @@ public Transfer writeBytes(byte[] data) throws IOException { * @param off the offset * @param len the length * @return itself + * @throws IOException on failure */ public Transfer writeBytes(byte[] buff, int off, int len) throws IOException { out.write(buff, off, len); @@ -299,6 +407,7 @@ public Transfer writeBytes(byte[] buff, int off, int len) throws IOException { * Read a byte array. * * @return the value + * @throws IOException on failure */ public byte[] readBytes() throws IOException { int len = readInt(); @@ -316,6 +425,7 @@ public byte[] readBytes() throws IOException { * @param buff the target buffer * @param off the offset * @param len the number of bytes to read + * @throws IOException on failure */ public void readBytes(byte[] buff, int off, int len) throws IOException { in.readFully(buff, off, len); @@ -330,9 +440,7 @@ public synchronized void close() { if (out != null) { out.flush(); } - if (socket != null) { - socket.close(); - } + socket.close(); } catch (IOException e) { DbException.traceThrowable(e); } finally { @@ -341,10 +449,321 @@ public synchronized void close() { } } + /** + * Write value type, precision, and scale. + * + * @param type data type information + * @return itself + * @throws IOException on failure + */ + public Transfer writeTypeInfo(TypeInfo type) throws IOException { + if (version >= Constants.TCP_PROTOCOL_VERSION_20) { + writeTypeInfo20(type); + } else { + writeTypeInfo19(type); + } + return this; + } + + private void writeTypeInfo20(TypeInfo type) throws IOException { + int valueType = type.getValueType(); + writeInt(VALUE_TO_TI[valueType + 1]); + switch (valueType) { + case Value.UNKNOWN: + case Value.NULL: + case Value.BOOLEAN: + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + case Value.BIGINT: + case Value.DATE: + case Value.UUID: + break; + case Value.CHAR: + case Value.VARCHAR: + case Value.VARCHAR_IGNORECASE: + case Value.BINARY: + case Value.VARBINARY: + case Value.DECFLOAT: + case Value.JAVA_OBJECT: + case Value.JSON: + writeInt((int) type.getDeclaredPrecision()); + break; + case Value.CLOB: + case Value.BLOB: + writeLong(type.getDeclaredPrecision()); + break; + case Value.NUMERIC: + writeInt((int) type.getDeclaredPrecision()); + writeInt(type.getDeclaredScale()); + writeBoolean(type.getExtTypeInfo() != null); + break; + case Value.REAL: + case Value.DOUBLE: + case Value.INTERVAL_YEAR: + case Value.INTERVAL_MONTH: + case Value.INTERVAL_DAY: + case Value.INTERVAL_HOUR: + case Value.INTERVAL_MINUTE: + case Value.INTERVAL_YEAR_TO_MONTH: + case Value.INTERVAL_DAY_TO_HOUR: + case Value.INTERVAL_DAY_TO_MINUTE: + case Value.INTERVAL_HOUR_TO_MINUTE: + writeBytePrecisionWithDefault(type.getDeclaredPrecision()); + break; + case Value.TIME: + case Value.TIME_TZ: + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + writeByteScaleWithDefault(type.getDeclaredScale()); + break; + case Value.INTERVAL_SECOND: + case Value.INTERVAL_DAY_TO_SECOND: + case Value.INTERVAL_HOUR_TO_SECOND: + case Value.INTERVAL_MINUTE_TO_SECOND: + writeBytePrecisionWithDefault(type.getDeclaredPrecision()); + writeByteScaleWithDefault(type.getDeclaredScale()); + break; + case Value.ENUM: + writeTypeInfoEnum(type); + break; + case Value.GEOMETRY: + writeTypeInfoGeometry(type); + break; + case Value.ARRAY: + writeInt((int) type.getDeclaredPrecision()); + writeTypeInfo((TypeInfo) type.getExtTypeInfo()); + break; + case Value.ROW: + writeTypeInfoRow(type); + break; + default: + throw DbException.getUnsupportedException("value type " + valueType); + } + } + + private void writeBytePrecisionWithDefault(long precision) throws IOException { + writeByte(precision >= 0 ? (byte) precision : -1); + } + + private void writeByteScaleWithDefault(int scale) throws IOException { + writeByte(scale >= 0 ? (byte) scale : -1); + } + + private void writeTypeInfoEnum(TypeInfo type) throws IOException { + ExtTypeInfoEnum ext = (ExtTypeInfoEnum) type.getExtTypeInfo(); + if (ext != null) { + int c = ext.getCount(); + writeInt(c); + for (int i = 0; i < c; i++) { + writeString(ext.getEnumerator(i)); + } + } else { + writeInt(0); + } + } + + private void writeTypeInfoGeometry(TypeInfo type) throws IOException { + ExtTypeInfoGeometry ext = (ExtTypeInfoGeometry) type.getExtTypeInfo(); + if (ext == null) { + writeByte((byte) 0); + } else { + int t = ext.getType(); + Integer srid = ext.getSrid(); + if (t == 0) { + if (srid == null) { + writeByte((byte) 0); + } else { + writeByte((byte) 2); + writeInt(srid); + } + } else { + if (srid == null) { + writeByte((byte) 1); + writeShort((short) t); + } else { + writeByte((byte) 3); + writeShort((short) t); + writeInt(srid); + } + } + } + } + + private void writeTypeInfoRow(TypeInfo type) throws IOException { + Set> fields = ((ExtTypeInfoRow) type.getExtTypeInfo()).getFields(); + writeInt(fields.size()); + for (Map.Entry field : fields) { + writeString(field.getKey()).writeTypeInfo(field.getValue()); + } + } + + private void writeTypeInfo19(TypeInfo type) throws IOException { + int valueType = type.getValueType(); + switch (valueType) { + case Value.BINARY: + valueType = Value.VARBINARY; + break; + case Value.DECFLOAT: + valueType = Value.NUMERIC; + break; + } + writeInt(VALUE_TO_TI[valueType + 1]).writeLong(type.getPrecision()).writeInt(type.getScale()); + } + + /** + * Read a type information. + * + * @return the type information + * @throws IOException on failure + */ + public TypeInfo readTypeInfo() throws IOException { + if (version >= Constants.TCP_PROTOCOL_VERSION_20) { + return readTypeInfo20(); + } else { + return readTypeInfo19(); + } + } + + private TypeInfo readTypeInfo20() throws IOException { + int valueType = TI_TO_VALUE[readInt() + 1]; + long precision = -1L; + int scale = -1; + ExtTypeInfo ext = null; + switch (valueType) { + case Value.UNKNOWN: + case Value.NULL: + case Value.BOOLEAN: + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + case Value.BIGINT: + case Value.DATE: + case Value.UUID: + break; + case Value.CHAR: + case Value.VARCHAR: + case Value.VARCHAR_IGNORECASE: + case Value.BINARY: + case Value.VARBINARY: + case Value.DECFLOAT: + case Value.JAVA_OBJECT: + case Value.JSON: + precision = readInt(); + break; + case Value.CLOB: + case Value.BLOB: + precision = readLong(); + break; + case Value.NUMERIC: + precision = readInt(); + scale = readInt(); + if (readBoolean()) { + ext = ExtTypeInfoNumeric.DECIMAL; + } + break; + case Value.REAL: + case Value.DOUBLE: + case Value.INTERVAL_YEAR: + case Value.INTERVAL_MONTH: + case Value.INTERVAL_DAY: + case Value.INTERVAL_HOUR: + case Value.INTERVAL_MINUTE: + case Value.INTERVAL_YEAR_TO_MONTH: + case Value.INTERVAL_DAY_TO_HOUR: + case Value.INTERVAL_DAY_TO_MINUTE: + case Value.INTERVAL_HOUR_TO_MINUTE: + precision = readByte(); + break; + case Value.TIME: + case Value.TIME_TZ: + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + scale = readByte(); + break; + case Value.INTERVAL_SECOND: + case Value.INTERVAL_DAY_TO_SECOND: + case Value.INTERVAL_HOUR_TO_SECOND: + case Value.INTERVAL_MINUTE_TO_SECOND: + precision = readByte(); + scale = readByte(); + break; + case Value.ENUM: + ext = readTypeInfoEnum(); + break; + case Value.GEOMETRY: + ext = readTypeInfoGeometry(); + break; + case Value.ARRAY: + precision = readInt(); + ext = readTypeInfo(); + break; + case Value.ROW: + ext = readTypeInfoRow(); + break; + default: + throw DbException.getUnsupportedException("value type " + valueType); + } + return TypeInfo.getTypeInfo(valueType, precision, scale, ext); + } + + private ExtTypeInfo readTypeInfoEnum() throws IOException { + ExtTypeInfo ext; + int c = readInt(); + if (c > 0) { + String[] enumerators = new String[c]; + for (int i = 0; i < c; i++) { + enumerators[i] = readString(); + } + ext = new ExtTypeInfoEnum(enumerators); + } else { + ext = null; + } + return ext; + } + + private ExtTypeInfo readTypeInfoGeometry() throws IOException { + ExtTypeInfo ext; + int e = readByte(); + switch (e) { + case 0: + ext = null; + break; + case 1: + ext = new ExtTypeInfoGeometry(readShort(), null); + break; + case 2: + ext = new ExtTypeInfoGeometry(0, readInt()); + break; + case 3: + ext = new ExtTypeInfoGeometry(readShort(), readInt()); + break; + default: + throw DbException.getUnsupportedException("GEOMETRY type encoding " + e); + } + return ext; + } + + private ExtTypeInfo readTypeInfoRow() throws IOException { + LinkedHashMap fields = new LinkedHashMap<>(); + for (int i = 0, l = readInt(); i < l; i++) { + String name = readString(); + if (fields.putIfAbsent(name, readTypeInfo()) != null) { + throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, name); + } + } + return new ExtTypeInfoRow(fields); + } + + private TypeInfo readTypeInfo19() throws IOException { + return TypeInfo.getTypeInfo(TI_TO_VALUE[readInt() + 1], readLong(), readInt(), null); + } + /** * Write a value. * * @param v the value + * @throws IOException on failure */ public void writeValue(Value v) throws IOException { int type = v.getValueType(); @@ -352,8 +771,15 @@ public void writeValue(Value v) throws IOException { case Value.NULL: writeInt(NULL); break; - case Value.BYTES: - writeInt(BYTES); + case Value.BINARY: + if (version >= Constants.TCP_PROTOCOL_VERSION_20) { + writeInt(BINARY); + writeBytes(v.getBytesNoCopy()); + break; + } + //$FALL-THROUGH$ + case Value.VARBINARY: + writeInt(VARBINARY); writeBytes(v.getBytesNoCopy()); break; case Value.JAVA_OBJECT: @@ -371,14 +797,35 @@ public void writeValue(Value v) throws IOException { writeInt(BOOLEAN); writeBoolean(v.getBoolean()); break; - case Value.BYTE: - writeInt(BYTE); + case Value.TINYINT: + writeInt(TINYINT); writeByte(v.getByte()); break; case Value.TIME: writeInt(TIME); writeLong(((ValueTime) v).getNanos()); break; + case Value.TIME_TZ: { + ValueTimeTimeZone t = (ValueTimeTimeZone) v; + if (version >= Constants.TCP_PROTOCOL_VERSION_19) { + writeInt(TIME_TZ); + writeLong(t.getNanos()); + writeInt(t.getTimeZoneOffsetSeconds()); + } else { + writeInt(TIME); + /* + * Don't call SessionRemote.currentTimestamp(), it may require + * own remote call and old server will not return custom time + * zone anyway. + */ + ValueTimestampTimeZone current = session.isRemote() + ? DateTimeUtils.currentTimestamp(DateTimeUtils.getTimeZone()) : session.currentTimestamp(); + writeLong(DateTimeUtils.normalizeNanosOfDay(t.getNanos() + + (t.getTimeZoneOffsetSeconds() - current.getTimeZoneOffsetSeconds()) + * DateTimeUtils.NANOS_PER_DAY)); + } + break; + } case Value.DATE: writeInt(DATE); writeLong(((ValueDate) v).getDateValue()); @@ -395,69 +842,78 @@ public void writeValue(Value v) throws IOException { ValueTimestampTimeZone ts = (ValueTimestampTimeZone) v; writeLong(ts.getDateValue()); writeLong(ts.getTimeNanos()); - writeInt(ts.getTimeZoneOffsetMins()); + int timeZoneOffset = ts.getTimeZoneOffsetSeconds(); + writeInt(version >= Constants.TCP_PROTOCOL_VERSION_19 // + ? timeZoneOffset : timeZoneOffset / 60); break; } - case Value.DECIMAL: - writeInt(DECIMAL); + case Value.DECFLOAT: + if (version >= Constants.TCP_PROTOCOL_VERSION_20) { + writeInt(DECFLOAT); + writeString(v.getString()); + break; + } + //$FALL-THROUGH$ + case Value.NUMERIC: + writeInt(NUMERIC); writeString(v.getString()); break; case Value.DOUBLE: writeInt(DOUBLE); writeDouble(v.getDouble()); break; - case Value.FLOAT: - writeInt(FLOAT); + case Value.REAL: + writeInt(REAL); writeFloat(v.getFloat()); break; - case Value.INT: - writeInt(INT); + case Value.INTEGER: + writeInt(INTEGER); writeInt(v.getInt()); break; - case Value.LONG: - writeInt(LONG); + case Value.BIGINT: + writeInt(BIGINT); writeLong(v.getLong()); break; - case Value.SHORT: - writeInt(SHORT); - writeInt(v.getShort()); + case Value.SMALLINT: + writeInt(SMALLINT); + if (version >= Constants.TCP_PROTOCOL_VERSION_20) { + writeShort(v.getShort()); + } else { + writeInt(v.getShort()); + } break; - case Value.STRING: - writeInt(STRING); + case Value.VARCHAR: + writeInt(VARCHAR); writeString(v.getString()); break; - case Value.STRING_IGNORECASE: - writeInt(STRING_IGNORECASE); + case Value.VARCHAR_IGNORECASE: + writeInt(VARCHAR_IGNORECASE); writeString(v.getString()); break; - case Value.STRING_FIXED: - writeInt(STRING_FIXED); + case Value.CHAR: + writeInt(CHAR); writeString(v.getString()); break; case Value.BLOB: { writeInt(BLOB); - if (version >= Constants.TCP_PROTOCOL_VERSION_11) { - if (v instanceof ValueLobDb) { - ValueLobDb lob = (ValueLobDb) v; - if (lob.isStored()) { - writeLong(-1); - writeInt(lob.getTableId()); - writeLong(lob.getLobId()); - if (version >= Constants.TCP_PROTOCOL_VERSION_12) { - writeBytes(calculateLobMac(lob.getLobId())); - } - writeLong(lob.getType().getPrecision()); - break; - } - } + ValueBlob lob = (ValueBlob) v; + LobData lobData = lob.getLobData(); + long length = lob.octetLength(); + if (lobData instanceof LobDataDatabase) { + LobDataDatabase lobDataDatabase = (LobDataDatabase) lobData; + writeLong(-1); + writeInt(lobDataDatabase.getTableId()); + writeLong(lobDataDatabase.getLobId()); + writeBytes(calculateLobMac(lobDataDatabase.getLobId())); + writeLong(length); + break; } - long length = v.getType().getPrecision(); if (length < 0) { throw DbException.get( ErrorCode.CONNECTION_BROKEN_1, "length=" + length); } writeLong(length); - long written = IOUtils.copyAndCloseInput(v.getInputStream(), out); + long written = IOUtils.copyAndCloseInput(lob.getInputStream(), out); if (written != length) { throw DbException.get( ErrorCode.CONNECTION_BROKEN_1, "length:" + length + " written:" + written); @@ -467,28 +923,27 @@ public void writeValue(Value v) throws IOException { } case Value.CLOB: { writeInt(CLOB); - if (version >= Constants.TCP_PROTOCOL_VERSION_11) { - if (v instanceof ValueLobDb) { - ValueLobDb lob = (ValueLobDb) v; - if (lob.isStored()) { - writeLong(-1); - writeInt(lob.getTableId()); - writeLong(lob.getLobId()); - if (version >= Constants.TCP_PROTOCOL_VERSION_12) { - writeBytes(calculateLobMac(lob.getLobId())); - } - writeLong(lob.getType().getPrecision()); - break; - } + ValueClob lob = (ValueClob) v; + LobData lobData = lob.getLobData(); + long charLength = lob.charLength(); + if (lobData instanceof LobDataDatabase) { + LobDataDatabase lobDataDatabase = (LobDataDatabase) lobData; + writeLong(-1); + writeInt(lobDataDatabase.getTableId()); + writeLong(lobDataDatabase.getLobId()); + writeBytes(calculateLobMac(lobDataDatabase.getLobId())); + if (version >= Constants.TCP_PROTOCOL_VERSION_20) { + writeLong(lob.octetLength()); } + writeLong(charLength); + break; } - long length = v.getType().getPrecision(); - if (length < 0) { + if (charLength < 0) { throw DbException.get( - ErrorCode.CONNECTION_BROKEN_1, "length=" + length); + ErrorCode.CONNECTION_BROKEN_1, "length=" + charLength); } - writeLong(length); - Reader reader = v.getReader(); + writeLong(charLength); + Reader reader = lob.getReader(); Data.copyString(reader, out); writeInt(LOB_MAGIC); break; @@ -498,13 +953,7 @@ public void writeValue(Value v) throws IOException { ValueArray va = (ValueArray) v; Value[] list = va.getList(); int len = list.length; - Class componentType = va.getComponentType(); - if (componentType == Object.class) { - writeInt(len); - } else { - writeInt(-(len + 1)); - writeString(componentType.getName()); - } + writeInt(len); for (Value value : list) { writeValue(value); } @@ -524,45 +973,14 @@ public void writeValue(Value v) throws IOException { case Value.ENUM: { writeInt(ENUM); writeInt(v.getInt()); - writeString(v.getString()); - break; - } - case Value.RESULT_SET: { - writeInt(RESULT_SET); - ResultInterface result = ((ValueResultSet) v).getResult(); - int columnCount = result.getVisibleColumnCount(); - writeInt(columnCount); - for (int i = 0; i < columnCount; i++) { - TypeInfo columnType = result.getColumnType(i); - if (version >= Constants.TCP_PROTOCOL_VERSION_18) { - writeString(result.getAlias(i)); - writeString(result.getColumnName(i)); - writeInt(columnType.getValueType()); - writeLong(columnType.getPrecision()); - } else { - writeString(result.getColumnName(i)); - writeInt(DataType.getDataType(columnType.getValueType()).sqlType); - writeInt(MathUtils.convertLongToInt(columnType.getPrecision())); - } - writeInt(columnType.getScale()); - } - while (result.next()) { - writeBoolean(true); - Value[] row = result.currentRow(); - for (int i = 0; i < columnCount; i++) { - writeValue(row[i]); - } + if (version < Constants.TCP_PROTOCOL_VERSION_20) { + writeString(v.getString()); } - writeBoolean(false); break; } case Value.GEOMETRY: writeInt(GEOMETRY); - if (version >= Constants.TCP_PROTOCOL_VERSION_14) { - writeBytes(v.getBytesNoCopy()); - } else { - writeString(v.getString()); - } + writeBytes(v.getBytesNoCopy()); break; case Value.INTERVAL_YEAR: case Value.INTERVAL_MONTH: @@ -579,7 +997,7 @@ public void writeValue(Value v) throws IOException { writeByte((byte) ordinal); writeLong(interval.getLeading()); } else { - writeInt(STRING); + writeInt(VARCHAR); writeString(v.getString()); } break; @@ -602,16 +1020,16 @@ public void writeValue(Value v) throws IOException { writeLong(interval.getLeading()); writeLong(interval.getRemaining()); } else { - writeInt(STRING); + writeInt(VARCHAR); writeString(v.getString()); } break; + case Value.JSON: { + writeInt(JSON); + writeBytes(v.getBytesNoCopy()); + break; + } default: - if (JdbcUtils.customDataTypesHandler != null) { - writeInt(type); - writeBytes(v.getBytesNoCopy()); - break; - } throw DbException.get(ErrorCode.CONNECTION_BROKEN_1, "type=" + type); } } @@ -619,71 +1037,79 @@ public void writeValue(Value v) throws IOException { /** * Read a value. * + * @param columnType the data type of value, or {@code null} * @return the value + * @throws IOException on failure */ - public Value readValue() throws IOException { + public Value readValue(TypeInfo columnType) throws IOException { int type = readInt(); switch (type) { case NULL: return ValueNull.INSTANCE; - case BYTES: - return ValueBytes.getNoCopy(readBytes()); + case VARBINARY: + return ValueVarbinary.getNoCopy(readBytes()); + case BINARY: + return ValueBinary.getNoCopy(readBytes()); case UUID: return ValueUuid.get(readLong(), readLong()); case JAVA_OBJECT: - return ValueJavaObject.getNoCopy(null, readBytes(), session.getDataHandler()); + return ValueJavaObject.getNoCopy(readBytes()); case BOOLEAN: return ValueBoolean.get(readBoolean()); - case BYTE: - return ValueByte.get(readByte()); + case TINYINT: + return ValueTinyint.get(readByte()); case DATE: return ValueDate.fromDateValue(readLong()); case TIME: return ValueTime.fromNanos(readLong()); + case TIME_TZ: + return ValueTimeTimeZone.fromNanos(readLong(), readInt()); case TIMESTAMP: return ValueTimestamp.fromDateValueAndNanos(readLong(), readLong()); case TIMESTAMP_TZ: { - return ValueTimestampTimeZone.fromDateValueAndNanos(readLong(), readLong(), (short) readInt()); + long dateValue = readLong(), timeNanos = readLong(); + int timeZoneOffset = readInt(); + return ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, timeNanos, + version >= Constants.TCP_PROTOCOL_VERSION_19 ? timeZoneOffset : timeZoneOffset * 60); } - case DECIMAL: - return ValueDecimal.get(new BigDecimal(readString())); + case NUMERIC: + return ValueNumeric.get(new BigDecimal(readString())); case DOUBLE: return ValueDouble.get(readDouble()); - case FLOAT: - return ValueFloat.get(readFloat()); + case REAL: + return ValueReal.get(readFloat()); case ENUM: { - final int ordinal = readInt(); - final String label = readString(); - return ValueEnumBase.get(label, ordinal); - } - case INT: - return ValueInt.get(readInt()); - case LONG: - return ValueLong.get(readLong()); - case SHORT: - return ValueShort.get((short) readInt()); - case STRING: - return ValueString.get(readString()); - case STRING_IGNORECASE: - return ValueStringIgnoreCase.get(readString()); - case STRING_FIXED: - return ValueStringFixed.get(readString()); + int ordinal = readInt(); + if (version >= Constants.TCP_PROTOCOL_VERSION_20) { + return ((ExtTypeInfoEnum) columnType.getExtTypeInfo()).getValue(ordinal, session); + } + return ValueEnumBase.get(readString(), ordinal); + } + case INTEGER: + return ValueInteger.get(readInt()); + case BIGINT: + return ValueBigint.get(readLong()); + case SMALLINT: + if (version >= Constants.TCP_PROTOCOL_VERSION_20) { + return ValueSmallint.get(readShort()); + } else { + return ValueSmallint.get((short) readInt()); + } + case VARCHAR: + return ValueVarchar.get(readString()); + case VARCHAR_IGNORECASE: + return ValueVarcharIgnoreCase.get(readString()); + case CHAR: + return ValueChar.get(readString()); case BLOB: { long length = readLong(); - if (version >= Constants.TCP_PROTOCOL_VERSION_11) { - if (length == -1) { - int tableId = readInt(); - long id = readLong(); - byte[] hmac; - if (version >= Constants.TCP_PROTOCOL_VERSION_12) { - hmac = readBytes(); - } else { - hmac = null; - } - long precision = readLong(); - return ValueLobDb.create( - Value.BLOB, session.getDataHandler(), tableId, id, hmac, precision); - } + if (length == -1) { + // fetch-on-demand LOB + int tableId = readInt(); + long id = readLong(); + byte[] hmac = readBytes(); + long precision = readLong(); + return new ValueBlob(new LobDataFetchOnDemand(session.getDataHandler(), tableId, id, hmac), precision); } Value v = session.getDataHandler().getLobStorage().createBlob(in, length); int magic = readInt(); @@ -694,28 +1120,23 @@ public Value readValue() throws IOException { return v; } case CLOB: { - long length = readLong(); - if (version >= Constants.TCP_PROTOCOL_VERSION_11) { - if (length == -1) { - int tableId = readInt(); - long id = readLong(); - byte[] hmac; - if (version >= Constants.TCP_PROTOCOL_VERSION_12) { - hmac = readBytes(); - } else { - hmac = null; - } - long precision = readLong(); - return ValueLobDb.create( - Value.CLOB, session.getDataHandler(), tableId, id, hmac, precision); - } - if (length < 0) { - throw DbException.get( - ErrorCode.CONNECTION_BROKEN_1, "length="+ length); - } + long charLength = readLong(); + if (charLength == -1) { + // fetch-on-demand LOB + int tableId = readInt(); + long id = readLong(); + byte[] hmac = readBytes(); + long octetLength = version >= Constants.TCP_PROTOCOL_VERSION_20 ? readLong() : -1L; + charLength = readLong(); + return new ValueClob(new LobDataFetchOnDemand(session.getDataHandler(), tableId, id, hmac), + octetLength, charLength); + } + if (charLength < 0) { + throw DbException.get( + ErrorCode.CONNECTION_BROKEN_1, "length="+ charLength); } Value v = session.getDataHandler().getLobStorage(). - createClob(new DataReader(in), length); + createClob(new DataReader(in), charLength); int magic = readInt(); if (magic != LOB_MAGIC) { throw DbException.get( @@ -725,50 +1146,35 @@ public Value readValue() throws IOException { } case ARRAY: { int len = readInt(); - Class componentType = Object.class; if (len < 0) { - len = -(len + 1); - componentType = JdbcUtils.loadUserClass(readString()); + // Unlikely, but possible with H2 1.4.200 and older versions + len = ~len; + readString(); } - Value[] list = new Value[len]; - for (int i = 0; i < len; i++) { - list[i] = readValue(); + if (columnType != null) { + TypeInfo elementType = (TypeInfo) columnType.getExtTypeInfo(); + return ValueArray.get(elementType, readArrayElements(len, elementType), session); } - return ValueArray.get(componentType, list); + return ValueArray.get(readArrayElements(len, null), session); } case ROW: { int len = readInt(); Value[] list = new Value[len]; - for (int i = 0; i < len; i++) { - list[i] = readValue(); - } - return ValueRow.get(list); - } - case RESULT_SET: { - SimpleResult rs = new SimpleResult(); - int columns = readInt(); - for (int i = 0; i < columns; i++) { - if (version >= Constants.TCP_PROTOCOL_VERSION_18) { - rs.addColumn(readString(), readString(), readInt(), readLong(), readInt()); - } else { - String name = readString(); - rs.addColumn(name, name, DataType.convertSQLTypeToValueType(readInt()), readInt(), readInt()); + if (columnType != null) { + ExtTypeInfoRow extTypeInfoRow = (ExtTypeInfoRow) columnType.getExtTypeInfo(); + Iterator> fields = extTypeInfoRow.getFields().iterator(); + for (int i = 0; i < len; i++) { + list[i] = readValue(fields.next().getValue()); } + return ValueRow.get(columnType, list); } - while (readBoolean()) { - Value[] o = new Value[columns]; - for (int i = 0; i < columns; i++) { - o[i] = readValue(); - } - rs.addRow(o); + for (int i = 0; i < len; i++) { + list[i] = readValue(null); } - return ValueResultSet.get(rs); + return ValueRow.get(list); } case GEOMETRY: - if (version >= Constants.TCP_PROTOCOL_VERSION_14) { - return ValueGeometry.get(readBytes()); - } - return ValueGeometry.get(readString()); + return ValueGeometry.get(readBytes()); case INTERVAL: { int ordinal = readByte(); boolean negative = ordinal < 0; @@ -778,15 +1184,57 @@ public Value readValue() throws IOException { return ValueInterval.from(IntervalQualifier.valueOf(ordinal), negative, readLong(), ordinal < 5 ? 0 : readLong()); } - default: - if (JdbcUtils.customDataTypesHandler != null) { - return JdbcUtils.customDataTypesHandler.convert( - ValueBytes.getNoCopy(readBytes()), type); + case JSON: + // Do not trust the value + return ValueJson.fromJson(readBytes()); + case DECFLOAT: { + String s = readString(); + switch (s) { + case "-Infinity": + return ValueDecfloat.NEGATIVE_INFINITY; + case "Infinity": + return ValueDecfloat.POSITIVE_INFINITY; + case "NaN": + return ValueDecfloat.NAN; + default: + return ValueDecfloat.get(new BigDecimal(s)); } + } + default: throw DbException.get(ErrorCode.CONNECTION_BROKEN_1, "type=" + type); } } + private Value[] readArrayElements(int len, TypeInfo elementType) throws IOException { + Value[] list = new Value[len]; + for (int i = 0; i < len; i++) { + list[i] = readValue(elementType); + } + return list; + } + + /** + * Read a row count. + * + * @return the row count + * @throws IOException on failure + */ + public long readRowCount() throws IOException { + return version >= Constants.TCP_PROTOCOL_VERSION_20 ? readLong() : readInt(); + } + + /** + * Write a row count. + * + * @param rowCount the row count + * @return itself + * @throws IOException on failure + */ + public Transfer writeRowCount(long rowCount) throws IOException { + return version >= Constants.TCP_PROTOCOL_VERSION_20 ? writeLong(rowCount) + : writeInt(rowCount < Integer.MAX_VALUE ? (int) rowCount : Integer.MAX_VALUE); + } + /** * Get the socket. * @@ -801,7 +1249,7 @@ public Socket getSocket() { * * @param session the session */ - public void setSession(SessionInterface session) { + public void setSession(Session session) { this.session = session; } @@ -818,6 +1266,7 @@ public void setSSL(boolean ssl) { * Open a new connection to the same address and port as this one. * * @return the new transfer object + * @throws IOException on failure */ public Transfer openNewConnection() throws IOException { InetAddress address = socket.getInetAddress(); @@ -832,6 +1281,10 @@ public void setVersion(int version) { this.version = version; } + public int getVersion() { + return version; + } + public synchronized boolean isClosed() { return socket == null || socket.isClosed(); } diff --git a/h2/src/main/org/h2/value/TypeInfo.java b/h2/src/main/org/h2/value/TypeInfo.java index 6ec4c8d879..cc607a239b 100644 --- a/h2/src/main/org/h2/value/TypeInfo.java +++ b/h2/src/main/org/h2/value/TypeInfo.java @@ -1,21 +1,26 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; -import org.h2.api.CustomDataTypesHandler; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.Set; + import org.h2.api.ErrorCode; import org.h2.api.IntervalQualifier; +import org.h2.engine.Constants; import org.h2.message.DbException; -import org.h2.util.JdbcUtils; -import org.h2.util.MathUtils; /** * Data type with parameters. */ -public class TypeInfo { +public class TypeInfo extends ExtTypeInfo implements Typed { /** * UNKNOWN type with parameters. @@ -27,55 +32,100 @@ public class TypeInfo { */ public static final TypeInfo TYPE_NULL; + /** + * CHAR type with default parameters. + */ + public static final TypeInfo TYPE_CHAR; + + /** + * CHARACTER VARYING type with maximum parameters. + */ + public static final TypeInfo TYPE_VARCHAR; + + /** + * VARCHAR_IGNORECASE type with maximum parameters. + */ + public static final TypeInfo TYPE_VARCHAR_IGNORECASE; + + /** + * CHARACTER LARGE OBJECT type with maximum parameters. + */ + public static final TypeInfo TYPE_CLOB; + + /** + * BINARY type with default parameters. + */ + public static final TypeInfo TYPE_BINARY; + + /** + * BINARY VARYING type with maximum parameters. + */ + public static final TypeInfo TYPE_VARBINARY; + + /** + * BINARY LARGE OBJECT type with maximum parameters. + */ + public static final TypeInfo TYPE_BLOB; + /** * BOOLEAN type with parameters. */ public static final TypeInfo TYPE_BOOLEAN; /** - * BYTE type with parameters. + * TINYINT type with parameters. */ - public static final TypeInfo TYPE_BYTE; + public static final TypeInfo TYPE_TINYINT; /** - * SHORT type with parameters. + * SMALLINT type with parameters. */ - public static final TypeInfo TYPE_SHORT; + public static final TypeInfo TYPE_SMALLINT; /** - * INT type with parameters. + * INTEGER type with parameters. */ - public static final TypeInfo TYPE_INT; + public static final TypeInfo TYPE_INTEGER; /** - * LONG type with parameters. + * BIGINT type with parameters. */ - public static final TypeInfo TYPE_LONG; + public static final TypeInfo TYPE_BIGINT; /** - * DECIMAL type with maximum parameters. + * NUMERIC type with maximum precision and scale 0. */ - public static final TypeInfo TYPE_DECIMAL; + public static final TypeInfo TYPE_NUMERIC_SCALE_0; /** - * DECIMAL type with default parameters. + * NUMERIC type with parameters enough to hold a BIGINT value. */ - public static final TypeInfo TYPE_DECIMAL_DEFAULT; + public static final TypeInfo TYPE_NUMERIC_BIGINT; /** - * DOUBLE type with parameters. + * NUMERIC type that can hold values with floating point. + */ + public static final TypeInfo TYPE_NUMERIC_FLOATING_POINT; + + /** + * REAL type with parameters. + */ + public static final TypeInfo TYPE_REAL; + + /** + * DOUBLE PRECISION type with parameters. */ public static final TypeInfo TYPE_DOUBLE; /** - * FLOAT type with parameters. + * DECFLOAT type with maximum parameters. */ - public static final TypeInfo TYPE_FLOAT; + public static final TypeInfo TYPE_DECFLOAT; /** - * TIME type with maximum parameters. + * DECFLOAT type with parameters enough to hold a BIGINT value. */ - public static final TypeInfo TYPE_TIME; + public static final TypeInfo TYPE_DECFLOAT_BIGINT; /** * DATE type with parameters. @@ -83,44 +133,49 @@ public class TypeInfo { public static final TypeInfo TYPE_DATE; /** - * TIMESTAMP type with maximum parameters. + * TIME type with maximum parameters. */ - public static final TypeInfo TYPE_TIMESTAMP; + public static final TypeInfo TYPE_TIME; /** - * STRING type with maximum parameters. + * TIME WITH TIME ZONE type with maximum parameters. */ - public static final TypeInfo TYPE_STRING; + public static final TypeInfo TYPE_TIME_TZ; /** - * ARRAY type with parameters. + * TIMESTAMP type with maximum parameters. */ - public static final TypeInfo TYPE_ARRAY; + public static final TypeInfo TYPE_TIMESTAMP; /** - * RESULT_SET type with parameters. + * TIMESTAMP WITH TIME ZONE type with maximum parameters. */ - public static final TypeInfo TYPE_RESULT_SET; + public static final TypeInfo TYPE_TIMESTAMP_TZ; /** - * JAVA_OBJECT type with parameters. + * INTERVAL DAY type with maximum parameters. */ - public static final TypeInfo TYPE_JAVA_OBJECT; + public static final TypeInfo TYPE_INTERVAL_DAY; /** - * UUID type with parameters. + * INTERVAL YEAR TO MONTH type with maximum parameters. */ - public static final TypeInfo TYPE_UUID; + public static final TypeInfo TYPE_INTERVAL_YEAR_TO_MONTH; /** - * GEOMETRY type with default parameters. + * INTERVAL DAY TO SECOND type with maximum parameters. */ - public static final TypeInfo TYPE_GEOMETRY; + public static final TypeInfo TYPE_INTERVAL_DAY_TO_SECOND; /** - * TIMESTAMP WITH TIME ZONE type with maximum parameters. + * INTERVAL HOUR TO SECOND type with maximum parameters. */ - public static final TypeInfo TYPE_TIMESTAMP_TZ; + public static final TypeInfo TYPE_INTERVAL_HOUR_TO_SECOND; + + /** + * JAVA_OBJECT type with maximum parameters. + */ + public static final TypeInfo TYPE_JAVA_OBJECT; /** * ENUM type with undefined parameters. @@ -128,24 +183,29 @@ public class TypeInfo { public static final TypeInfo TYPE_ENUM_UNDEFINED; /** - * INTERVAL DAY type with maximum parameters. + * GEOMETRY type with default parameters. */ - public static final TypeInfo TYPE_INTERVAL_DAY; + public static final TypeInfo TYPE_GEOMETRY; /** - * INTERVAL DAY TO SECOND type with maximum parameters. + * JSON type. */ - public static final TypeInfo TYPE_INTERVAL_DAY_TO_SECOND; + public static final TypeInfo TYPE_JSON; /** - * INTERVAL HOUR TO SECOND type with maximum parameters. + * UUID type with parameters. */ - public static final TypeInfo TYPE_INTERVAL_HOUR_TO_SECOND; + public static final TypeInfo TYPE_UUID; + + /** + * ARRAY type with unknown parameters. + */ + public static final TypeInfo TYPE_ARRAY_UNKNOWN; /** - * ROW (row value) type with parameters. + * ROW (row value) type without fields. */ - public static final TypeInfo TYPE_ROW; + public static final TypeInfo TYPE_ROW_EMPTY; private static final TypeInfo[] TYPE_INFOS_BY_VALUE_TYPE; @@ -155,66 +215,63 @@ public class TypeInfo { private final int scale; - private final int displaySize; - private final ExtTypeInfo extTypeInfo; static { TypeInfo[] infos = new TypeInfo[Value.TYPE_COUNT]; - TYPE_UNKNOWN = new TypeInfo(Value.UNKNOWN, -1L, -1, -1, null); - infos[Value.NULL] = TYPE_NULL = new TypeInfo(Value.NULL, ValueNull.PRECISION, 0, ValueNull.DISPLAY_SIZE, null); - infos[Value.BOOLEAN] = TYPE_BOOLEAN = new TypeInfo(Value.BOOLEAN, ValueBoolean.PRECISION, 0, - ValueBoolean.DISPLAY_SIZE, null); - infos[Value.BYTE] = TYPE_BYTE = new TypeInfo(Value.BYTE, ValueByte.PRECISION, 0, ValueByte.DISPLAY_SIZE, null); - infos[Value.SHORT] = TYPE_SHORT = new TypeInfo(Value.SHORT, ValueShort.PRECISION, 0, ValueShort.DISPLAY_SIZE, - null); - infos[Value.INT] = TYPE_INT = new TypeInfo(Value.INT, ValueInt.PRECISION, 0, ValueInt.DISPLAY_SIZE, null); - infos[Value.LONG] = TYPE_LONG = new TypeInfo(Value.LONG, ValueLong.PRECISION, 0, ValueLong.DISPLAY_SIZE, null); - infos[Value.DECIMAL] = TYPE_DECIMAL= new TypeInfo(Value.DECIMAL, Integer.MAX_VALUE, Integer.MAX_VALUE, - Integer.MAX_VALUE, null); - TYPE_DECIMAL_DEFAULT = new TypeInfo(Value.DECIMAL, ValueDecimal.DEFAULT_PRECISION, ValueDecimal.DEFAULT_SCALE, - ValueDecimal.DEFAULT_PRECISION + 2, null); - infos[Value.DOUBLE] = TYPE_DOUBLE = new TypeInfo(Value.DOUBLE, ValueDouble.PRECISION, 0, - ValueDouble.DISPLAY_SIZE, null); - infos[Value.FLOAT] = TYPE_FLOAT = new TypeInfo(Value.FLOAT, ValueFloat.PRECISION, 0, ValueFloat.DISPLAY_SIZE, - null); - infos[Value.TIME] = TYPE_TIME = new TypeInfo(Value.TIME, ValueTime.MAXIMUM_PRECISION, ValueTime.MAXIMUM_SCALE, - ValueTime.MAXIMUM_PRECISION, null); - infos[Value.DATE] = TYPE_DATE = new TypeInfo(Value.DATE, ValueDate.PRECISION, 0, ValueDate.PRECISION, null); - infos[Value.TIMESTAMP] = TYPE_TIMESTAMP = new TypeInfo(Value.TIMESTAMP, ValueTimestamp.MAXIMUM_PRECISION, - ValueTimestamp.MAXIMUM_SCALE, ValueTimestamp.MAXIMUM_PRECISION, null); - infos[Value.BYTES] = new TypeInfo(Value.BYTES, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, null); - infos[Value.STRING] = TYPE_STRING = new TypeInfo(Value.STRING, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, null); - infos[Value.STRING_IGNORECASE] = new TypeInfo(Value.STRING_IGNORECASE, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, - null); - infos[Value.BLOB] = new TypeInfo(Value.BLOB, Long.MAX_VALUE, 0, Integer.MAX_VALUE, null); - infos[Value.CLOB] = new TypeInfo(Value.CLOB, Long.MAX_VALUE, 0, Integer.MAX_VALUE, null); - infos[Value.ARRAY] = TYPE_ARRAY = new TypeInfo(Value.ARRAY, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, null); - infos[Value.RESULT_SET] = TYPE_RESULT_SET = new TypeInfo(Value.RESULT_SET, Integer.MAX_VALUE, - Integer.MAX_VALUE, Integer.MAX_VALUE, null); - infos[Value.JAVA_OBJECT] = TYPE_JAVA_OBJECT = new TypeInfo(Value.JAVA_OBJECT, Integer.MAX_VALUE, 0, - Integer.MAX_VALUE, null); - infos[Value.UUID] = TYPE_UUID = new TypeInfo(Value.UUID, ValueUuid.PRECISION, 0, ValueUuid.DISPLAY_SIZE, null); - infos[Value.STRING_FIXED] = new TypeInfo(Value.STRING_FIXED, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, - null); - infos[Value.GEOMETRY] = TYPE_GEOMETRY = new TypeInfo(Value.GEOMETRY, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, - null); - infos[Value.TIMESTAMP_TZ] = TYPE_TIMESTAMP_TZ = new TypeInfo(Value.TIMESTAMP_TZ, - ValueTimestampTimeZone.MAXIMUM_PRECISION, ValueTimestampTimeZone.MAXIMUM_SCALE, - ValueTimestampTimeZone.MAXIMUM_PRECISION, null); - infos[Value.ENUM] = TYPE_ENUM_UNDEFINED = new TypeInfo(Value.ENUM, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, - null); + TYPE_UNKNOWN = new TypeInfo(Value.UNKNOWN); + // NULL + infos[Value.NULL] = TYPE_NULL = new TypeInfo(Value.NULL); + // CHARACTER + infos[Value.CHAR] = TYPE_CHAR = new TypeInfo(Value.CHAR, -1L); + infos[Value.VARCHAR] = TYPE_VARCHAR = new TypeInfo(Value.VARCHAR); + infos[Value.CLOB] = TYPE_CLOB = new TypeInfo(Value.CLOB); + infos[Value.VARCHAR_IGNORECASE] = TYPE_VARCHAR_IGNORECASE = new TypeInfo(Value.VARCHAR_IGNORECASE); + // BINARY + infos[Value.BINARY] = TYPE_BINARY = new TypeInfo(Value.BINARY, -1L); + infos[Value.VARBINARY] = TYPE_VARBINARY = new TypeInfo(Value.VARBINARY); + infos[Value.BLOB] = TYPE_BLOB = new TypeInfo(Value.BLOB); + // BOOLEAN + infos[Value.BOOLEAN] = TYPE_BOOLEAN = new TypeInfo(Value.BOOLEAN); + // NUMERIC + infos[Value.TINYINT] = TYPE_TINYINT = new TypeInfo(Value.TINYINT); + infos[Value.SMALLINT] = TYPE_SMALLINT = new TypeInfo(Value.SMALLINT); + infos[Value.INTEGER] = TYPE_INTEGER = new TypeInfo(Value.INTEGER); + infos[Value.BIGINT] = TYPE_BIGINT = new TypeInfo(Value.BIGINT); + TYPE_NUMERIC_SCALE_0 = new TypeInfo(Value.NUMERIC, Constants.MAX_NUMERIC_PRECISION, 0, null); + TYPE_NUMERIC_BIGINT = new TypeInfo(Value.NUMERIC, ValueBigint.DECIMAL_PRECISION, 0, null); + infos[Value.NUMERIC] = TYPE_NUMERIC_FLOATING_POINT = new TypeInfo(Value.NUMERIC, + Constants.MAX_NUMERIC_PRECISION, Constants.MAX_NUMERIC_PRECISION / 2, null); + infos[Value.REAL] = TYPE_REAL = new TypeInfo(Value.REAL); + infos[Value.DOUBLE] = TYPE_DOUBLE = new TypeInfo(Value.DOUBLE); + infos[Value.DECFLOAT] = TYPE_DECFLOAT = new TypeInfo(Value.DECFLOAT); + TYPE_DECFLOAT_BIGINT = new TypeInfo(Value.DECFLOAT, (long) ValueBigint.DECIMAL_PRECISION); + // DATETIME + infos[Value.DATE] = TYPE_DATE = new TypeInfo(Value.DATE); + infos[Value.TIME] = TYPE_TIME = new TypeInfo(Value.TIME, ValueTime.MAXIMUM_SCALE); + infos[Value.TIME_TZ] = TYPE_TIME_TZ = new TypeInfo(Value.TIME_TZ, ValueTime.MAXIMUM_SCALE); + infos[Value.TIMESTAMP] = TYPE_TIMESTAMP = new TypeInfo(Value.TIMESTAMP, ValueTimestamp.MAXIMUM_SCALE); + infos[Value.TIMESTAMP_TZ] = TYPE_TIMESTAMP_TZ = new TypeInfo(Value.TIMESTAMP_TZ, ValueTimestamp.MAXIMUM_SCALE); + // INTERVAL for (int i = Value.INTERVAL_YEAR; i <= Value.INTERVAL_MINUTE_TO_SECOND; i++) { infos[i] = new TypeInfo(i, ValueInterval.MAXIMUM_PRECISION, - IntervalQualifier.valueOf(i - Value.INTERVAL_YEAR).hasSeconds() ? ValueInterval.MAXIMUM_SCALE : 0, - ValueInterval.getDisplaySize(i, ValueInterval.MAXIMUM_PRECISION, - // Scale will be ignored if it is not supported - ValueInterval.MAXIMUM_SCALE), null); + IntervalQualifier.valueOf(i - Value.INTERVAL_YEAR).hasSeconds() ? ValueInterval.MAXIMUM_SCALE : -1, + null); } TYPE_INTERVAL_DAY = infos[Value.INTERVAL_DAY]; + TYPE_INTERVAL_YEAR_TO_MONTH = infos[Value.INTERVAL_YEAR_TO_MONTH]; TYPE_INTERVAL_DAY_TO_SECOND = infos[Value.INTERVAL_DAY_TO_SECOND]; TYPE_INTERVAL_HOUR_TO_SECOND = infos[Value.INTERVAL_HOUR_TO_SECOND]; - infos[Value.ROW] = TYPE_ROW = new TypeInfo(Value.ROW, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, null); + // OTHER + infos[Value.JAVA_OBJECT] = TYPE_JAVA_OBJECT = new TypeInfo(Value.JAVA_OBJECT); + infos[Value.ENUM] = TYPE_ENUM_UNDEFINED = new TypeInfo(Value.ENUM); + infos[Value.GEOMETRY] = TYPE_GEOMETRY = new TypeInfo(Value.GEOMETRY); + infos[Value.JSON] = TYPE_JSON = new TypeInfo(Value.JSON); + infos[Value.UUID] = TYPE_UUID = new TypeInfo(Value.UUID); + // COLLECTION + infos[Value.ARRAY] = TYPE_ARRAY_UNKNOWN = new TypeInfo(Value.ARRAY); + infos[Value.ROW] = TYPE_ROW_EMPTY = new TypeInfo(Value.ROW, -1L, -1, // + new ExtTypeInfoRow(new LinkedHashMap<>())); TYPE_INFOS_BY_VALUE_TYPE = infos; } @@ -236,13 +293,6 @@ public static TypeInfo getTypeInfo(int type) { return t; } } - CustomDataTypesHandler handler = JdbcUtils.customDataTypesHandler; - if (handler != null) { - DataType dt = handler.getDataTypeById(type); - if (dt != null) { - return handler.getTypeInfoById(type, dt.maxPrecision, dt.maxScale, null); - } - } return TYPE_NULL; } @@ -253,98 +303,134 @@ public static TypeInfo getTypeInfo(int type) { * @param type * the value type * @param precision - * the precision + * the precision or {@code -1L} for default * @param scale - * the scale + * the scale or {@code -1} for default * @param extTypeInfo - * the extended type information, or null + * the extended type information or null * @return the data type with parameters object */ public static TypeInfo getTypeInfo(int type, long precision, int scale, ExtTypeInfo extTypeInfo) { switch (type) { case Value.NULL: case Value.BOOLEAN: - case Value.BYTE: - case Value.SHORT: - case Value.INT: - case Value.LONG: - case Value.DOUBLE: - case Value.FLOAT: + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + case Value.BIGINT: case Value.DATE: - case Value.ARRAY: - case Value.RESULT_SET: - case Value.JAVA_OBJECT: case Value.UUID: - case Value.ROW: return TYPE_INFOS_BY_VALUE_TYPE[type]; case Value.UNKNOWN: return TYPE_UNKNOWN; - case Value.DECIMAL: - if (precision < 0) { - precision = ValueDecimal.DEFAULT_PRECISION; + case Value.CHAR: + if (precision < 1) { + return TYPE_CHAR; } - if (scale < 0) { - scale = ValueDecimal.DEFAULT_SCALE; + if (precision > Constants.MAX_STRING_LENGTH) { + precision = Constants.MAX_STRING_LENGTH; } - if (precision < scale) { - precision = scale; + return new TypeInfo(Value.CHAR, precision); + case Value.VARCHAR: + if (precision < 1 || precision >= Constants.MAX_STRING_LENGTH) { + if (precision != 0) { + return TYPE_VARCHAR; + } + precision = 1; } - return new TypeInfo(Value.DECIMAL, precision, scale, MathUtils.convertLongToInt(precision + 2), null); - case Value.TIME: { - if (scale < 0 || scale >= ValueTime.MAXIMUM_SCALE) { - return TYPE_TIME; + return new TypeInfo(Value.VARCHAR, precision); + case Value.CLOB: + if (precision < 1) { + return TYPE_CLOB; } - int d = scale == 0 ? 8 : 9 + scale; - return new TypeInfo(Value.TIME, d, scale, d, null); - } - case Value.TIMESTAMP: { - if (scale < 0 || scale >= ValueTimestamp.MAXIMUM_SCALE) { - return TYPE_TIMESTAMP; + return new TypeInfo(Value.CLOB, precision); + case Value.VARCHAR_IGNORECASE: + if (precision < 1 || precision >= Constants.MAX_STRING_LENGTH) { + if (precision != 0) { + return TYPE_VARCHAR_IGNORECASE; + } + precision = 1; } - int d = scale == 0 ? 19 : 20 + scale; - return new TypeInfo(Value.TIMESTAMP, d, scale, d, null); - } - case Value.TIMESTAMP_TZ: { - if (scale < 0 || scale >= ValueTimestampTimeZone.MAXIMUM_SCALE) { - return TYPE_TIMESTAMP_TZ; + return new TypeInfo(Value.VARCHAR_IGNORECASE, precision); + case Value.BINARY: + if (precision < 1) { + return TYPE_BINARY; } - int d = scale == 0 ? 25 : 26 + scale; - return new TypeInfo(Value.TIMESTAMP_TZ, d, scale, d, null); - } - case Value.BYTES: - if (precision < 0) { - precision = Integer.MAX_VALUE; - } - return new TypeInfo(Value.BYTES, precision, 0, MathUtils.convertLongToInt(precision) * 2, null); - case Value.STRING: - if (precision < 0) { - return TYPE_STRING; + if (precision > Constants.MAX_STRING_LENGTH) { + precision = Constants.MAX_STRING_LENGTH; } - //$FALL-THROUGH$ - case Value.STRING_FIXED: - case Value.STRING_IGNORECASE: - if (precision < 0) { - precision = Integer.MAX_VALUE; + return new TypeInfo(Value.BINARY, precision); + case Value.VARBINARY: + if (precision < 1 || precision >= Constants.MAX_STRING_LENGTH) { + if (precision != 0) { + return TYPE_VARBINARY; + } + precision = 1; } - return new TypeInfo(type, precision, 0, MathUtils.convertLongToInt(precision), null); + return new TypeInfo(Value.VARBINARY, precision); case Value.BLOB: - case Value.CLOB: - if (precision < 0) { - precision = Long.MAX_VALUE; + if (precision < 1) { + return TYPE_BLOB; } - return new TypeInfo(type, precision, 0, MathUtils.convertLongToInt(precision), null); - case Value.GEOMETRY: - if (extTypeInfo instanceof ExtTypeInfoGeometry) { - return new TypeInfo(Value.GEOMETRY, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, extTypeInfo); - } else { - return TYPE_GEOMETRY; + return new TypeInfo(Value.BLOB, precision); + case Value.NUMERIC: + if (precision < 1) { + precision = -1L; + } else if (precision > Constants.MAX_NUMERIC_PRECISION) { + precision = Constants.MAX_NUMERIC_PRECISION; } - case Value.ENUM: - if (extTypeInfo instanceof ExtTypeInfoEnum) { - return ((ExtTypeInfoEnum) extTypeInfo).getType(); - } else { - return TYPE_ENUM_UNDEFINED; + if (scale < 0) { + scale = -1; + } else if (scale > ValueNumeric.MAXIMUM_SCALE) { + scale = ValueNumeric.MAXIMUM_SCALE; + } + return new TypeInfo(Value.NUMERIC, precision, scale, + extTypeInfo instanceof ExtTypeInfoNumeric ? extTypeInfo : null); + case Value.REAL: + if (precision >= 1 && precision <= 24) { + return new TypeInfo(Value.REAL, precision, -1, extTypeInfo); + } + return TYPE_REAL; + case Value.DOUBLE: + if (precision == 0 || precision >= 25 && precision <= 53) { + return new TypeInfo(Value.DOUBLE, precision, -1, extTypeInfo); + } + return TYPE_DOUBLE; + case Value.DECFLOAT: + if (precision < 1) { + precision = -1L; + } else if (precision >= Constants.MAX_NUMERIC_PRECISION) { + return TYPE_DECFLOAT; } + return new TypeInfo(Value.DECFLOAT, precision, -1, null); + case Value.TIME: + if (scale < 0) { + scale = -1; + } else if (scale >= ValueTime.MAXIMUM_SCALE) { + return TYPE_TIME; + } + return new TypeInfo(Value.TIME, scale); + case Value.TIME_TZ: + if (scale < 0) { + scale = -1; + } else if (scale >= ValueTime.MAXIMUM_SCALE) { + return TYPE_TIME_TZ; + } + return new TypeInfo(Value.TIME_TZ, scale); + case Value.TIMESTAMP: + if (scale < 0) { + scale = -1; + } else if (scale >= ValueTimestamp.MAXIMUM_SCALE) { + return TYPE_TIMESTAMP; + } + return new TypeInfo(Value.TIMESTAMP, scale); + case Value.TIMESTAMP_TZ: + if (scale < 0) { + scale = -1; + } else if (scale >= ValueTimestamp.MAXIMUM_SCALE) { + return TYPE_TIMESTAMP_TZ; + } + return new TypeInfo(Value.TIMESTAMP_TZ, scale); case Value.INTERVAL_YEAR: case Value.INTERVAL_MONTH: case Value.INTERVAL_DAY: @@ -354,32 +440,485 @@ public static TypeInfo getTypeInfo(int type, long precision, int scale, ExtTypeI case Value.INTERVAL_DAY_TO_HOUR: case Value.INTERVAL_DAY_TO_MINUTE: case Value.INTERVAL_HOUR_TO_MINUTE: - if (precision < 1 || precision > ValueInterval.MAXIMUM_PRECISION) { + if (precision < 1) { + precision = -1L; + } else if (precision > ValueInterval.MAXIMUM_PRECISION) { precision = ValueInterval.MAXIMUM_PRECISION; } - return new TypeInfo(type, precision, 0, ValueInterval.getDisplaySize(type, (int) precision, 0), null); + return new TypeInfo(type, precision); case Value.INTERVAL_SECOND: case Value.INTERVAL_DAY_TO_SECOND: case Value.INTERVAL_HOUR_TO_SECOND: case Value.INTERVAL_MINUTE_TO_SECOND: - if (precision < 1 || precision > ValueInterval.MAXIMUM_PRECISION) { + if (precision < 1) { + precision = -1L; + } else if (precision > ValueInterval.MAXIMUM_PRECISION) { precision = ValueInterval.MAXIMUM_PRECISION; } - if (scale < 0 || scale > ValueInterval.MAXIMUM_SCALE) { + if (scale < 0) { + scale = -1; + } else if (scale > ValueInterval.MAXIMUM_SCALE) { scale = ValueInterval.MAXIMUM_SCALE; } - return new TypeInfo(type, precision, scale, ValueInterval.getDisplaySize(type, (int) precision, scale), - null); - } - CustomDataTypesHandler handler = JdbcUtils.customDataTypesHandler; - if (handler != null) { - if (handler.getDataTypeById(type) != null) { - return handler.getTypeInfoById(type, precision, scale, extTypeInfo); + return new TypeInfo(type, precision, scale, null); + case Value.JAVA_OBJECT: + if (precision < 1) { + return TYPE_JAVA_OBJECT; + } else if (precision > Constants.MAX_STRING_LENGTH) { + precision = Constants.MAX_STRING_LENGTH; + } + return new TypeInfo(Value.JAVA_OBJECT, precision); + case Value.ENUM: + if (extTypeInfo instanceof ExtTypeInfoEnum) { + return ((ExtTypeInfoEnum) extTypeInfo).getType(); + } else { + return TYPE_ENUM_UNDEFINED; + } + case Value.GEOMETRY: + if (extTypeInfo instanceof ExtTypeInfoGeometry) { + return new TypeInfo(Value.GEOMETRY, -1L, -1, extTypeInfo); + } else { + return TYPE_GEOMETRY; + } + case Value.JSON: + if (precision < 1) { + return TYPE_JSON; + } else if (precision > Constants.MAX_STRING_LENGTH) { + precision = Constants.MAX_STRING_LENGTH; } + return new TypeInfo(Value.JSON, precision); + case Value.ARRAY: + if (!(extTypeInfo instanceof TypeInfo)) { + throw new IllegalArgumentException(); + } + if (precision < 0 || precision >= Constants.MAX_ARRAY_CARDINALITY) { + precision = -1L; + } + return new TypeInfo(Value.ARRAY, precision, -1, extTypeInfo); + case Value.ROW: + if (!(extTypeInfo instanceof ExtTypeInfoRow)) { + throw new IllegalArgumentException(); + } + return new TypeInfo(Value.ROW, -1L, -1, extTypeInfo); } return TYPE_NULL; } + /** + * Get the higher data type of all values. + * + * @param values + * the values + * @return the higher data type + */ + public static TypeInfo getHigherType(Typed[] values) { + int cardinality = values.length; + TypeInfo type; + if (cardinality == 0) { + type = TypeInfo.TYPE_NULL; + } else { + type = values[0].getType(); + boolean hasUnknown = false, hasNull = false; + switch (type.getValueType()) { + case Value.UNKNOWN: + hasUnknown = true; + break; + case Value.NULL: + hasNull = true; + } + for (int i = 1; i < cardinality; i++) { + TypeInfo t = values[i].getType(); + switch (t.getValueType()) { + case Value.UNKNOWN: + hasUnknown = true; + break; + case Value.NULL: + hasNull = true; + break; + default: + type = getHigherType(type, t); + } + } + if (type.getValueType() <= Value.NULL && hasUnknown) { + throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, hasNull ? "NULL, ?" : "?"); + } + } + return type; + } + + /** + * Get the higher data type of two data types. If values need to be + * converted to match the other operands data type, the value with the lower + * order is converted to the value with the higher order. + * + * @param type1 + * the first data type + * @param type2 + * the second data type + * @return the higher data type of the two + */ + public static TypeInfo getHigherType(TypeInfo type1, TypeInfo type2) { + int t1 = type1.getValueType(), t2 = type2.getValueType(), dataType; + if (t1 == t2) { + if (t1 == Value.UNKNOWN) { + throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, "?, ?"); + } + dataType = t1; + } else { + if (t1 < t2) { + int t = t1; + t1 = t2; + t2 = t; + TypeInfo type = type1; + type1 = type2; + type2 = type; + } + if (t1 == Value.UNKNOWN) { + if (t2 == Value.NULL) { + throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, "?, NULL"); + } + return type2; + } else if (t2 == Value.UNKNOWN) { + if (t1 == Value.NULL) { + throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, "NULL, ?"); + } + return type1; + } + if (t2 == Value.NULL) { + return type1; + } + dataType = Value.getHigherOrderKnown(t1, t2); + } + long precision; + switch (dataType) { + case Value.NUMERIC: { + type1 = type1.toNumericType(); + type2 = type2.toNumericType(); + long precision1 = type1.getPrecision(), precision2 = type2.getPrecision(); + int scale1 = type1.getScale(), scale2 = type2.getScale(), scale; + if (scale1 < scale2) { + precision1 += scale2 - scale1; + scale = scale2; + } else { + precision2 += scale1 - scale2; + scale = scale1; + } + return TypeInfo.getTypeInfo(Value.NUMERIC, Math.max(precision1, precision2), scale, null); + } + case Value.REAL: + case Value.DOUBLE: + precision = -1L; + break; + case Value.ARRAY: + return getHigherArray(type1, type2, dimensions(type1), dimensions(type2)); + case Value.ROW: + return getHigherRow(type1, type2); + default: + precision = Math.max(type1.getPrecision(), type2.getPrecision()); + } + ExtTypeInfo ext1 = type1.extTypeInfo; + return TypeInfo.getTypeInfo(dataType, // + precision, // + Math.max(type1.getScale(), type2.getScale()), // + dataType == t1 && ext1 != null ? ext1 : dataType == t2 ? type2.extTypeInfo : null); + } + + private static int dimensions(TypeInfo type) { + int result; + for (result = 0; type.getValueType() == Value.ARRAY; result++) { + type = (TypeInfo) type.extTypeInfo; + } + return result; + } + + private static TypeInfo getHigherArray(TypeInfo type1, TypeInfo type2, int d1, int d2) { + long precision; + if (d1 > d2) { + d1--; + precision = Math.max(type1.getPrecision(), 1L); + type1 = (TypeInfo) type1.extTypeInfo; + } else if (d1 < d2) { + d2--; + precision = Math.max(1L, type2.getPrecision()); + type2 = (TypeInfo) type2.extTypeInfo; + } else if (d1 > 0) { + d1--; + d2--; + precision = Math.max(type1.getPrecision(), type2.getPrecision()); + type1 = (TypeInfo) type1.extTypeInfo; + type2 = (TypeInfo) type2.extTypeInfo; + } else { + return getHigherType(type1, type2); + } + return TypeInfo.getTypeInfo(Value.ARRAY, precision, 0, getHigherArray(type1, type2, d1, d2)); + } + + private static TypeInfo getHigherRow(TypeInfo type1, TypeInfo type2) { + if (type1.getValueType() != Value.ROW) { + type1 = typeToRow(type1); + } + if (type2.getValueType() != Value.ROW) { + type2 = typeToRow(type2); + } + ExtTypeInfoRow ext1 = (ExtTypeInfoRow) type1.getExtTypeInfo(), ext2 = (ExtTypeInfoRow) type2.getExtTypeInfo(); + if (ext1.equals(ext2)) { + return type1; + } + Set> m1 = ext1.getFields(), m2 = ext2.getFields(); + int degree = m1.size(); + if (m2.size() != degree) { + throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); + } + LinkedHashMap m = new LinkedHashMap<>((int) Math.ceil(degree / .75)); + for (Iterator> i1 = m1.iterator(), i2 = m2.iterator(); i1.hasNext();) { + Map.Entry e1 = i1.next(); + m.put(e1.getKey(), getHigherType(e1.getValue(), i2.next().getValue())); + } + return TypeInfo.getTypeInfo(Value.ROW, 0, 0, new ExtTypeInfoRow(m)); + } + + private static TypeInfo typeToRow(TypeInfo type) { + LinkedHashMap map = new LinkedHashMap<>(2); + map.put("C1", type); + return TypeInfo.getTypeInfo(Value.ROW, 0, 0, new ExtTypeInfoRow(map)); + } + + /** + * Determines whether two specified types are the same data types without + * taking precision or scale into account. + * + * @param t1 + * first data type + * @param t2 + * second data type + * @return whether types are the same + */ + public static boolean areSameTypes(TypeInfo t1, TypeInfo t2) { + for (;;) { + int valueType = t1.getValueType(); + if (valueType != t2.getValueType()) { + return false; + } + ExtTypeInfo ext1 = t1.getExtTypeInfo(), ext2 = t2.getExtTypeInfo(); + if (valueType != Value.ARRAY) { + return Objects.equals(ext1, ext2); + } + t1 = (TypeInfo) ext1; + t2 = (TypeInfo) ext2; + } + } + + /** + * Checks whether two specified types are comparable and throws an exception + * otherwise. + * + * @param t1 + * first data type + * @param t2 + * second data type + * @throws DbException + * if types aren't comparable + */ + public static void checkComparable(TypeInfo t1, TypeInfo t2) { + if (!areComparable(t1, t2)) { + throw DbException.get(ErrorCode.TYPES_ARE_NOT_COMPARABLE_2, t1.getTraceSQL(), t2.getTraceSQL()); + } + } + + /** + * Determines whether two specified types are comparable. + * + * @param t1 + * first data type + * @param t2 + * second data type + * @return whether types are comparable + */ + private static boolean areComparable(TypeInfo t1, TypeInfo t2) { + int vt1 = (t1 = t1.unwrapRow()).getValueType(), vt2 = (t2 = t2.unwrapRow()).getValueType(); + if (vt1 > vt2) { + int vt = vt1; + vt1 = vt2; + vt2 = vt; + TypeInfo t = t1; + t1 = t2; + t2 = t; + } + if (vt1 <= Value.NULL) { + return true; + } + if (vt1 == vt2) { + switch (vt1) { + case Value.ARRAY: + return areComparable((TypeInfo) t1.getExtTypeInfo(), (TypeInfo) t2.getExtTypeInfo()); + case Value.ROW: { + Set> f1 = ((ExtTypeInfoRow) t1.getExtTypeInfo()).getFields(); + Set> f2 = ((ExtTypeInfoRow) t2.getExtTypeInfo()).getFields(); + int degree = f1.size(); + if (f2.size() != degree) { + return false; + } + Iterator> i1 = f1.iterator(), i2 = f2.iterator(); + while (i1.hasNext()) { + if (!areComparable(i1.next().getValue(), i2.next().getValue())) { + return false; + } + } + } + //$FALL-THROUGH$ + default: + return true; + } + } + byte g1 = Value.GROUPS[vt1], g2 = Value.GROUPS[vt2]; + if (g1 == g2) { + switch (g1) { + default: + return true; + case Value.GROUP_DATETIME: + return vt1 != Value.DATE || vt2 != Value.TIME && vt2 != Value.TIME_TZ; + case Value.GROUP_OTHER: + case Value.GROUP_COLLECTION: + return false; + } + } + switch (g1) { + case Value.GROUP_CHARACTER_STRING: + switch (g2) { + case Value.GROUP_NUMERIC: + case Value.GROUP_DATETIME: + case Value.GROUP_INTERVAL_YM: + case Value.GROUP_INTERVAL_DT: + return true; + case Value.GROUP_OTHER: + switch (vt2) { + case Value.ENUM: + case Value.GEOMETRY: + case Value.JSON: + case Value.UUID: + return true; + default: + return false; + } + default: + return false; + } + case Value.GROUP_BINARY_STRING: + switch (vt2) { + case Value.JAVA_OBJECT: + case Value.GEOMETRY: + case Value.JSON: + case Value.UUID: + return true; + default: + return false; + } + } + return false; + } + + /** + * Determines whether two specified types have the same ordering rules. + * + * @param t1 + * first data type + * @param t2 + * second data type + * @return whether types are comparable + */ + public static boolean haveSameOrdering(TypeInfo t1, TypeInfo t2) { + int vt1 = (t1 = t1.unwrapRow()).getValueType(), vt2 = (t2 = t2.unwrapRow()).getValueType(); + if (vt1 > vt2) { + int vt = vt1; + vt1 = vt2; + vt2 = vt; + TypeInfo t = t1; + t1 = t2; + t2 = t; + } + if (vt1 <= Value.NULL) { + return true; + } + if (vt1 == vt2) { + switch (vt1) { + case Value.ARRAY: + return haveSameOrdering((TypeInfo) t1.getExtTypeInfo(), (TypeInfo) t2.getExtTypeInfo()); + case Value.ROW: { + Set> f1 = ((ExtTypeInfoRow) t1.getExtTypeInfo()).getFields(); + Set> f2 = ((ExtTypeInfoRow) t2.getExtTypeInfo()).getFields(); + int degree = f1.size(); + if (f2.size() != degree) { + return false; + } + Iterator> i1 = f1.iterator(), i2 = f2.iterator(); + while (i1.hasNext()) { + if (!haveSameOrdering(i1.next().getValue(), i2.next().getValue())) { + return false; + } + } + } + //$FALL-THROUGH$ + default: + return true; + } + } + byte g1 = Value.GROUPS[vt1], g2 = Value.GROUPS[vt2]; + if (g1 == g2) { + switch (g1) { + default: + return true; + case Value.GROUP_CHARACTER_STRING: + return (vt1 == Value.VARCHAR_IGNORECASE) == (vt2 == Value.VARCHAR_IGNORECASE); + case Value.GROUP_DATETIME: + switch (vt1) { + case Value.DATE: + return vt2 == Value.TIMESTAMP || vt2 == Value.TIMESTAMP_TZ; + case Value.TIME: + case Value.TIME_TZ: + return vt2 == Value.TIME || vt2 == Value.TIME_TZ; + default: // TIMESTAMP TIMESTAMP_TZ + return true; + } + case Value.GROUP_OTHER: + case Value.GROUP_COLLECTION: + return false; + } + } + if (g1 == Value.GROUP_BINARY_STRING) { + switch (vt2) { + case Value.JAVA_OBJECT: + case Value.GEOMETRY: + case Value.JSON: + case Value.UUID: + return true; + default: + return false; + } + } + return false; + } + + private TypeInfo(int valueType) { + this.valueType = valueType; + precision = -1L; + scale = -1; + extTypeInfo = null; + } + + private TypeInfo(int valueType, long precision) { + this.valueType = valueType; + this.precision = precision; + scale = -1; + extTypeInfo = null; + } + + private TypeInfo(int valueType, int scale) { + this.valueType = valueType; + precision = -1L; + this.scale = scale; + extTypeInfo = null; + } + /** * Creates new instance of data type with parameters. * @@ -389,19 +928,26 @@ public static TypeInfo getTypeInfo(int type, long precision, int scale, ExtTypeI * the precision * @param scale * the scale - * @param displaySize - * the display size in characters * @param extTypeInfo * the extended type information, or null */ - public TypeInfo(int valueType, long precision, int scale, int displaySize, ExtTypeInfo extTypeInfo) { + public TypeInfo(int valueType, long precision, int scale, ExtTypeInfo extTypeInfo) { this.valueType = valueType; this.precision = precision; this.scale = scale; - this.displaySize = displaySize; this.extTypeInfo = extTypeInfo; } + /** + * Returns this type information. + * + * @return this + */ + @Override + public TypeInfo getType() { + return this; + } + /** * Returns the value type. * @@ -417,6 +963,94 @@ public int getValueType() { * @return the precision */ public long getPrecision() { + switch (valueType) { + case Value.UNKNOWN: + return -1L; + case Value.NULL: + return ValueNull.PRECISION; + case Value.CHAR: + case Value.BINARY: + return precision >= 0L ? precision : 1L; + case Value.VARCHAR: + case Value.VARCHAR_IGNORECASE: + case Value.VARBINARY: + case Value.JAVA_OBJECT: + case Value.ENUM: + case Value.GEOMETRY: + case Value.JSON: + return precision >= 0L ? precision : Constants.MAX_STRING_LENGTH; + case Value.CLOB: + case Value.BLOB: + return precision >= 0L ? precision : Long.MAX_VALUE; + case Value.BOOLEAN: + return ValueBoolean.PRECISION; + case Value.TINYINT: + return ValueTinyint.PRECISION; + case Value.SMALLINT: + return ValueSmallint.PRECISION; + case Value.INTEGER: + return ValueInteger.PRECISION; + case Value.BIGINT: + return ValueBigint.PRECISION; + case Value.NUMERIC: + return precision >= 0L ? precision : Constants.MAX_NUMERIC_PRECISION; + case Value.REAL: + return ValueReal.PRECISION; + case Value.DOUBLE: + return ValueDouble.PRECISION; + case Value.DECFLOAT: + return precision >= 0L ? precision : Constants.MAX_NUMERIC_PRECISION; + case Value.DATE: + return ValueDate.PRECISION; + case Value.TIME: { + int s = scale >= 0 ? scale : ValueTime.DEFAULT_SCALE; + return s == 0 ? 8 : 9 + s; + } + case Value.TIME_TZ: { + int s = scale >= 0 ? scale : ValueTime.DEFAULT_SCALE; + return s == 0 ? 14 : 15 + s; + } + case Value.TIMESTAMP: { + int s = scale >= 0 ? scale : ValueTimestamp.DEFAULT_SCALE; + return s == 0 ? 19 : 20 + s; + } + case Value.TIMESTAMP_TZ: { + int s = scale >= 0 ? scale : ValueTimestamp.DEFAULT_SCALE; + return s == 0 ? 25 : 26 + s; + } + case Value.INTERVAL_YEAR: + case Value.INTERVAL_MONTH: + case Value.INTERVAL_DAY: + case Value.INTERVAL_HOUR: + case Value.INTERVAL_MINUTE: + case Value.INTERVAL_SECOND: + case Value.INTERVAL_YEAR_TO_MONTH: + case Value.INTERVAL_DAY_TO_HOUR: + case Value.INTERVAL_DAY_TO_MINUTE: + case Value.INTERVAL_DAY_TO_SECOND: + case Value.INTERVAL_HOUR_TO_MINUTE: + case Value.INTERVAL_HOUR_TO_SECOND: + case Value.INTERVAL_MINUTE_TO_SECOND: + return precision >= 0L ? precision : ValueInterval.DEFAULT_PRECISION; + case Value.ROW: + return Integer.MAX_VALUE; + case Value.UUID: + return ValueUuid.PRECISION; + case Value.ARRAY: + return precision >= 0L ? precision : Constants.MAX_ARRAY_CARDINALITY; + default: + return precision; + } + } + + /** + * Returns the precision, or {@code -1L} if not specified in data type + * definition. + * + * @return the precision, or {@code -1L} if not specified in data type + * definition + */ + public long getDeclaredPrecision() { return precision; } @@ -426,6 +1060,68 @@ public long getPrecision() { * @return the scale */ public int getScale() { + switch (valueType) { + case Value.UNKNOWN: + return -1; + case Value.NULL: + case Value.CHAR: + case Value.VARCHAR: + case Value.CLOB: + case Value.VARCHAR_IGNORECASE: + case Value.BINARY: + case Value.VARBINARY: + case Value.BLOB: + case Value.BOOLEAN: + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + case Value.BIGINT: + case Value.REAL: + case Value.DOUBLE: + case Value.DECFLOAT: + case Value.DATE: + case Value.INTERVAL_YEAR: + case Value.INTERVAL_MONTH: + case Value.INTERVAL_DAY: + case Value.INTERVAL_HOUR: + case Value.INTERVAL_MINUTE: + case Value.INTERVAL_YEAR_TO_MONTH: + case Value.INTERVAL_DAY_TO_HOUR: + case Value.INTERVAL_DAY_TO_MINUTE: + case Value.INTERVAL_HOUR_TO_MINUTE: + case Value.JAVA_OBJECT: + case Value.ENUM: + case Value.GEOMETRY: + case Value.JSON: + case Value.UUID: + case Value.ARRAY: + case Value.ROW: + return 0; + case Value.NUMERIC: + return scale >= 0 ? scale : 0; + case Value.TIME: + case Value.TIME_TZ: + return scale >= 0 ? scale : ValueTime.DEFAULT_SCALE; + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + return scale >= 0 ? scale : ValueTimestamp.DEFAULT_SCALE; + case Value.INTERVAL_SECOND: + case Value.INTERVAL_DAY_TO_SECOND: + case Value.INTERVAL_HOUR_TO_SECOND: + case Value.INTERVAL_MINUTE_TO_SECOND: + return scale >= 0 ? scale : ValueInterval.DEFAULT_SCALE; + default: + return scale; + } + } + + /** + * Returns the scale, or {@code -1} if not specified in data type + * definition. + * + * @return the scale, or {@code -1} if not specified in data type definition + */ + public int getDeclaredScale() { return scale; } @@ -435,7 +1131,88 @@ public int getScale() { * @return the display size */ public int getDisplaySize() { - return displaySize; + switch (valueType) { + case Value.UNKNOWN: + default: + return -1; + case Value.NULL: + return ValueNull.DISPLAY_SIZE; + case Value.CHAR: + return precision >= 0 ? (int) precision : 1; + case Value.VARCHAR: + case Value.VARCHAR_IGNORECASE: + case Value.JSON: + return precision >= 0 ? (int) precision : Constants.MAX_STRING_LENGTH; + case Value.CLOB: + return precision >= 0 && precision <= Integer.MAX_VALUE ? (int) precision : Integer.MAX_VALUE; + case Value.BINARY: + return precision >= 0 ? (int) precision * 2 : 2; + case Value.VARBINARY: + case Value.JAVA_OBJECT: + return precision >= 0 ? (int) precision * 2 : Constants.MAX_STRING_LENGTH * 2; + case Value.BLOB: + return precision >= 0 && precision <= Integer.MAX_VALUE / 2 ? (int) precision * 2 : Integer.MAX_VALUE; + case Value.BOOLEAN: + return ValueBoolean.DISPLAY_SIZE; + case Value.TINYINT: + return ValueTinyint.DISPLAY_SIZE; + case Value.SMALLINT: + return ValueSmallint.DISPLAY_SIZE; + case Value.INTEGER: + return ValueInteger.DISPLAY_SIZE; + case Value.BIGINT: + return ValueBigint.DISPLAY_SIZE; + case Value.NUMERIC: + return precision >= 0 ? (int) precision + 2 : Constants.MAX_NUMERIC_PRECISION + 2; + case Value.REAL: + return ValueReal.DISPLAY_SIZE; + case Value.DOUBLE: + return ValueDouble.DISPLAY_SIZE; + case Value.DECFLOAT: + return precision >= 0 ? (int) precision + 12 : Constants.MAX_NUMERIC_PRECISION + 12; + case Value.DATE: + return ValueDate.PRECISION; + case Value.TIME: { + int s = scale >= 0 ? scale : ValueTime.DEFAULT_SCALE; + return s == 0 ? 8 : 9 + s; + } + case Value.TIME_TZ: { + int s = scale >= 0 ? scale : ValueTime.DEFAULT_SCALE; + return s == 0 ? 14 : 15 + s; + } + case Value.TIMESTAMP: { + int s = scale >= 0 ? scale : ValueTimestamp.DEFAULT_SCALE; + return s == 0 ? 19 : 20 + s; + } + case Value.TIMESTAMP_TZ: { + int s = scale >= 0 ? scale : ValueTimestamp.DEFAULT_SCALE; + return s == 0 ? 25 : 26 + s; + } + case Value.INTERVAL_YEAR: + case Value.INTERVAL_MONTH: + case Value.INTERVAL_DAY: + case Value.INTERVAL_HOUR: + case Value.INTERVAL_MINUTE: + case Value.INTERVAL_SECOND: + case Value.INTERVAL_YEAR_TO_MONTH: + case Value.INTERVAL_DAY_TO_HOUR: + case Value.INTERVAL_DAY_TO_MINUTE: + case Value.INTERVAL_DAY_TO_SECOND: + case Value.INTERVAL_HOUR_TO_MINUTE: + case Value.INTERVAL_HOUR_TO_SECOND: + case Value.INTERVAL_MINUTE_TO_SECOND: + return ValueInterval.getDisplaySize(valueType, + precision >= 0 ? (int) precision : ValueInterval.DEFAULT_PRECISION, + scale >= 0 ? scale : ValueInterval.DEFAULT_SCALE); + case Value.GEOMETRY: + case Value.ARRAY: + case Value.ROW: + return Integer.MAX_VALUE; + case Value.ENUM: + return extTypeInfo != null ? (int) precision : Constants.MAX_STRING_LENGTH; + case Value.UUID: + return ValueUuid.DISPLAY_SIZE; + } } /** @@ -447,52 +1224,273 @@ public ExtTypeInfo getExtTypeInfo() { return extTypeInfo; } - /** - * Appends SQL representation of this object to the specified string - * builder. - * - * @param builder - * string builder - * @return the specified string builder - */ - public StringBuilder getSQL(StringBuilder builder) { - DataType dataType = DataType.getDataType(valueType); - if (valueType == Value.TIMESTAMP_TZ) { - builder.append("TIMESTAMP"); - } else { - builder.append(dataType.name); - } + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { switch (valueType) { - case Value.DECIMAL: - builder.append('(').append(precision).append(", ").append(scale).append(')'); + case Value.CHAR: + case Value.VARCHAR: + case Value.CLOB: + case Value.VARCHAR_IGNORECASE: + case Value.BINARY: + case Value.VARBINARY: + case Value.BLOB: + case Value.JAVA_OBJECT: + case Value.JSON: + builder.append(Value.getTypeName(valueType)); + if (precision >= 0L) { + builder.append('(').append(precision).append(')'); + } break; - case Value.GEOMETRY: - if (extTypeInfo == null) { - break; + case Value.NUMERIC: { + if (extTypeInfo != null) { + extTypeInfo.getSQL(builder, sqlFlags); + } else { + builder.append("NUMERIC"); + } + boolean withPrecision = precision >= 0; + boolean withScale = scale >= 0; + if (withPrecision || withScale) { + builder.append('(').append(withPrecision ? precision : Constants.MAX_NUMERIC_PRECISION); + if (withScale) { + builder.append(", ").append(scale); + } + builder.append(')'); } - //$FALL-THROUGH$ - case Value.ENUM: - builder.append(extTypeInfo.getCreateSQL()); break; - case Value.BYTES: - case Value.STRING: - case Value.STRING_IGNORECASE: - case Value.STRING_FIXED: - if (precision < Integer.MAX_VALUE) { + } + case Value.REAL: + case Value.DOUBLE: + if (precision < 0) { + builder.append(Value.getTypeName(valueType)); + } else { + builder.append("FLOAT"); + if (precision > 0) { + builder.append('(').append(precision).append(')'); + } + } + break; + case Value.DECFLOAT: + builder.append("DECFLOAT"); + if (precision >= 0) { builder.append('(').append(precision).append(')'); } break; case Value.TIME: + case Value.TIME_TZ: + builder.append("TIME"); + if (scale >= 0) { + builder.append('(').append(scale).append(')'); + } + if (valueType == Value.TIME_TZ) { + builder.append(" WITH TIME ZONE"); + } + break; case Value.TIMESTAMP: case Value.TIMESTAMP_TZ: - if (scale != dataType.defaultScale) { + builder.append("TIMESTAMP"); + if (scale >= 0) { builder.append('(').append(scale).append(')'); } if (valueType == Value.TIMESTAMP_TZ) { builder.append(" WITH TIME ZONE"); } + break; + case Value.INTERVAL_YEAR: + case Value.INTERVAL_MONTH: + case Value.INTERVAL_DAY: + case Value.INTERVAL_HOUR: + case Value.INTERVAL_MINUTE: + case Value.INTERVAL_SECOND: + case Value.INTERVAL_YEAR_TO_MONTH: + case Value.INTERVAL_DAY_TO_HOUR: + case Value.INTERVAL_DAY_TO_MINUTE: + case Value.INTERVAL_DAY_TO_SECOND: + case Value.INTERVAL_HOUR_TO_MINUTE: + case Value.INTERVAL_HOUR_TO_SECOND: + case Value.INTERVAL_MINUTE_TO_SECOND: + IntervalQualifier.valueOf(valueType - Value.INTERVAL_YEAR).getTypeName(builder, (int) precision, scale, + false); + break; + case Value.ENUM: + extTypeInfo.getSQL(builder.append("ENUM"), sqlFlags); + break; + case Value.GEOMETRY: + builder.append("GEOMETRY"); + if (extTypeInfo != null) { + extTypeInfo.getSQL(builder, sqlFlags); + } + break; + case Value.ARRAY: + if (extTypeInfo != null) { + extTypeInfo.getSQL(builder, sqlFlags).append(' '); + } + builder.append("ARRAY"); + if (precision >= 0L) { + builder.append('[').append(precision).append(']'); + } + break; + case Value.ROW: + builder.append("ROW"); + if (extTypeInfo != null) { + extTypeInfo.getSQL(builder, sqlFlags); + } + break; + default: + builder.append(Value.getTypeName(valueType)); } return builder; } + @Override + public int hashCode() { + int result = 1; + result = 31 * result + valueType; + result = 31 * result + (int) (precision ^ (precision >>> 32)); + result = 31 * result + scale; + result = 31 * result + ((extTypeInfo == null) ? 0 : extTypeInfo.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || obj.getClass() != TypeInfo.class) { + return false; + } + TypeInfo other = (TypeInfo) obj; + return valueType == other.valueType && precision == other.precision && scale == other.scale + && Objects.equals(extTypeInfo, other.extTypeInfo); + } + + /** + * Convert this type information to compatible NUMERIC type information. + * + * @return NUMERIC type information + */ + public TypeInfo toNumericType() { + switch (valueType) { + case Value.BOOLEAN: + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + return getTypeInfo(Value.NUMERIC, getDecimalPrecision(), 0, null); + case Value.BIGINT: + return TYPE_NUMERIC_BIGINT; + case Value.NUMERIC: + return this; + case Value.REAL: + // Smallest REAL value is 1.4E-45 with precision 2 and scale 46 + // Largest REAL value is 3.4028235E+38 with precision 8 and scale + // -31 + return getTypeInfo(Value.NUMERIC, 85, 46, null); + case Value.DOUBLE: + // Smallest DOUBLE value is 4.9E-324 with precision 2 and scale 325 + // Largest DOUBLE value is 1.7976931348623157E+308 with precision 17 + // and scale -292 + return getTypeInfo(Value.NUMERIC, 634, 325, null); + default: + return TYPE_NUMERIC_FLOATING_POINT; + } + } + + /** + * Convert this type information to compatible DECFLOAT type information. + * + * @return DECFLOAT type information + */ + public TypeInfo toDecfloatType() { + switch (valueType) { + case Value.BOOLEAN: + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + return getTypeInfo(Value.DECFLOAT, getDecimalPrecision(), 0, null); + case Value.BIGINT: + return TYPE_DECFLOAT_BIGINT; + case Value.NUMERIC: + return getTypeInfo(Value.DECFLOAT, getPrecision(), 0, null); + case Value.REAL: + return getTypeInfo(Value.DECFLOAT, ValueReal.DECIMAL_PRECISION, 0, null); + case Value.DOUBLE: + return getTypeInfo(Value.DECFLOAT, ValueReal.DECIMAL_PRECISION, 0, null); + case Value.DECFLOAT: + return this; + default: + return TYPE_DECFLOAT; + } + } + + /** + * Returns unwrapped data type if this data type is a row type with degree 1 + * or this type otherwise. + * + * @return unwrapped data type if this data type is a row type with degree 1 + * or this type otherwise + */ + public TypeInfo unwrapRow() { + if (valueType == Value.ROW) { + Set> fields = ((ExtTypeInfoRow) extTypeInfo).getFields(); + if (fields.size() == 1) { + return fields.iterator().next().getValue().unwrapRow(); + } + } + return this; + } + + /** + * Returns approximate precision in decimal digits for binary numeric data + * types and precision for all other types. + * + * @return precision in decimal digits + */ + public long getDecimalPrecision() { + switch (valueType) { + case Value.TINYINT: + return ValueTinyint.DECIMAL_PRECISION; + case Value.SMALLINT: + return ValueSmallint.DECIMAL_PRECISION; + case Value.INTEGER: + return ValueInteger.DECIMAL_PRECISION; + case Value.BIGINT: + return ValueBigint.DECIMAL_PRECISION; + case Value.REAL: + return ValueReal.DECIMAL_PRECISION; + case Value.DOUBLE: + return ValueDouble.DECIMAL_PRECISION; + default: + return precision; + } + } + + /** + * Returns the declared name of this data type with precision, scale, + * length, cardinality etc. parameters removed, excluding parameters of ENUM + * data type, GEOMETRY data type, ARRAY elements, and ROW fields. + * + * @return the declared name + */ + public String getDeclaredTypeName() { + switch (valueType) { + case Value.NUMERIC: + return extTypeInfo != null ? "DECIMAL" : "NUMERIC"; + case Value.REAL: + case Value.DOUBLE: + if (extTypeInfo != null) { + return "FLOAT"; + } + break; + case Value.ENUM: + case Value.GEOMETRY: + case Value.ROW: + return getSQL(DEFAULT_SQL_FLAGS); + case Value.ARRAY: + TypeInfo typeInfo = (TypeInfo) extTypeInfo; + // Use full type names with parameters for elements + return typeInfo.getSQL(new StringBuilder(), DEFAULT_SQL_FLAGS).append(" ARRAY").toString(); + } + return Value.getTypeName(valueType); + } + } diff --git a/h2/src/main/org/h2/value/Typed.java b/h2/src/main/org/h2/value/Typed.java new file mode 100644 index 0000000000..8ec898eeaa --- /dev/null +++ b/h2/src/main/org/h2/value/Typed.java @@ -0,0 +1,20 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +/** + * An object with data type. + */ +public interface Typed { + + /** + * Returns the data type. + * + * @return the data type + */ + TypeInfo getType(); + +} diff --git a/h2/src/main/org/h2/value/Value.java b/h2/src/main/org/h2/value/Value.java index fd018a4733..dfc57e3100 100644 --- a/h2/src/main/org/h2/value/Value.java +++ b/h2/src/main/org/h2/value/Value.java @@ -1,11 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; import java.io.InputStream; import java.io.Reader; import java.io.StringReader; @@ -13,24 +14,30 @@ import java.math.BigDecimal; import java.math.RoundingMode; import java.nio.charset.StandardCharsets; -import java.sql.Date; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.sql.Time; -import java.sql.Timestamp; +import java.util.Arrays; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; + import org.h2.api.ErrorCode; import org.h2.api.IntervalQualifier; -import org.h2.engine.Mode; +import org.h2.engine.CastDataProvider; +import org.h2.engine.Mode.CharPadding; import org.h2.engine.SysProperties; import org.h2.message.DbException; -import org.h2.result.ResultInterface; -import org.h2.result.SimpleResult; import org.h2.store.DataHandler; import org.h2.util.Bits; import org.h2.util.DateTimeUtils; +import org.h2.util.HasSQL; import org.h2.util.IntervalUtils; import org.h2.util.JdbcUtils; +import org.h2.util.MathUtils; import org.h2.util.StringUtils; +import org.h2.util.geometry.GeoJsonUtils; +import org.h2.util.json.JsonConstructorUtils; +import org.h2.value.lob.LobData; +import org.h2.value.lob.LobDataDatabase; +import org.h2.value.lob.LobDataInMemory; /** * This is the base class for all value classes. @@ -40,7 +47,7 @@ * @author Noel Grandin * @author Nicolas Fortin, Atelier SIG, IRSTV FR CNRS 24888 */ -public abstract class Value extends VersionedValue { +public abstract class Value extends VersionedValue implements HasSQL, Typed { /** * The data type is unknown at this time. @@ -50,216 +57,361 @@ public abstract class Value extends VersionedValue { /** * The value type for NULL. */ - public static final int NULL = 0; - - /** - * The value type for BOOLEAN values. - */ - public static final int BOOLEAN = 1; - - /** - * The value type for BYTE values. - */ - public static final int BYTE = 2; + public static final int NULL = UNKNOWN + 1; /** - * The value type for SHORT values. + * The value type for CHARACTER values. */ - public static final int SHORT = 3; + public static final int CHAR = NULL + 1; /** - * The value type for INT values. + * The value type for CHARACTER VARYING values. */ - public static final int INT = 4; + public static final int VARCHAR = CHAR + 1; /** - * The value type for LONG values. + * The value type for CHARACTER LARGE OBJECT values. */ - public static final int LONG = 5; + public static final int CLOB = VARCHAR + 1; /** - * The value type for DECIMAL values. + * The value type for VARCHAR_IGNORECASE values. */ - public static final int DECIMAL = 6; + public static final int VARCHAR_IGNORECASE = CLOB + 1; /** - * The value type for DOUBLE values. + * The value type for BINARY values. */ - public static final int DOUBLE = 7; + public static final int BINARY = VARCHAR_IGNORECASE + 1; /** - * The value type for FLOAT values. + * The value type for BINARY VARYING values. */ - public static final int FLOAT = 8; + public static final int VARBINARY = BINARY + 1; /** - * The value type for TIME values. - */ - public static final int TIME = 9; - - /** - * The value type for DATE values. + * The value type for BINARY LARGE OBJECT values. */ - public static final int DATE = 10; + public static final int BLOB = VARBINARY + 1; /** - * The value type for TIMESTAMP values. + * The value type for BOOLEAN values. */ - public static final int TIMESTAMP = 11; + public static final int BOOLEAN = BLOB + 1; /** - * The value type for BYTES values. + * The value type for TINYINT values. */ - public static final int BYTES = 12; + public static final int TINYINT = BOOLEAN + 1; /** - * The value type for STRING values. + * The value type for SMALLINT values. */ - public static final int STRING = 13; + public static final int SMALLINT = TINYINT + 1; /** - * The value type for case insensitive STRING values. + * The value type for INTEGER values. */ - public static final int STRING_IGNORECASE = 14; + public static final int INTEGER = SMALLINT + 1; /** - * The value type for BLOB values. + * The value type for BIGINT values. */ - public static final int BLOB = 15; + public static final int BIGINT = INTEGER + 1; /** - * The value type for CLOB values. + * The value type for NUMERIC values. */ - public static final int CLOB = 16; + public static final int NUMERIC = BIGINT + 1; /** - * The value type for ARRAY values. + * The value type for REAL values. */ - public static final int ARRAY = 17; + public static final int REAL = NUMERIC + 1; /** - * The value type for RESULT_SET values. + * The value type for DOUBLE PRECISION values. */ - public static final int RESULT_SET = 18; + public static final int DOUBLE = REAL + 1; /** - * The value type for JAVA_OBJECT values. + * The value type for DECFLOAT values. */ - public static final int JAVA_OBJECT = 19; + public static final int DECFLOAT = DOUBLE + 1; /** - * The value type for UUID values. + * The value type for DATE values. */ - public static final int UUID = 20; + public static final int DATE = DECFLOAT + 1; /** - * The value type for string values with a fixed size. + * The value type for TIME values. */ - public static final int STRING_FIXED = 21; + public static final int TIME = DATE + 1; /** - * The value type for string values with a fixed size. + * The value type for TIME WITH TIME ZONE values. */ - public static final int GEOMETRY = 22; + public static final int TIME_TZ = TIME + 1; /** - * 23 was a short-lived experiment "TIMESTAMP UTC" which has been removed. + * The value type for TIMESTAMP values. */ + public static final int TIMESTAMP = TIME_TZ + 1; /** * The value type for TIMESTAMP WITH TIME ZONE values. */ - public static final int TIMESTAMP_TZ = 24; - - /** - * The value type for ENUM values. - */ - public static final int ENUM = 25; + public static final int TIMESTAMP_TZ = TIMESTAMP + 1; /** * The value type for {@code INTERVAL YEAR} values. */ - public static final int INTERVAL_YEAR = 26; + public static final int INTERVAL_YEAR = TIMESTAMP_TZ + 1; /** * The value type for {@code INTERVAL MONTH} values. */ - public static final int INTERVAL_MONTH = 27; + public static final int INTERVAL_MONTH = INTERVAL_YEAR + 1; /** * The value type for {@code INTERVAL DAY} values. */ - public static final int INTERVAL_DAY = 28; + public static final int INTERVAL_DAY = INTERVAL_MONTH + 1; /** * The value type for {@code INTERVAL HOUR} values. */ - public static final int INTERVAL_HOUR = 29; + public static final int INTERVAL_HOUR = INTERVAL_DAY + 1; /** * The value type for {@code INTERVAL MINUTE} values. */ - public static final int INTERVAL_MINUTE = 30; + public static final int INTERVAL_MINUTE = INTERVAL_HOUR + 1; /** * The value type for {@code INTERVAL SECOND} values. */ - public static final int INTERVAL_SECOND = 31; + public static final int INTERVAL_SECOND = INTERVAL_MINUTE + 1; /** * The value type for {@code INTERVAL YEAR TO MONTH} values. */ - public static final int INTERVAL_YEAR_TO_MONTH = 32; + public static final int INTERVAL_YEAR_TO_MONTH = INTERVAL_SECOND + 1; /** * The value type for {@code INTERVAL DAY TO HOUR} values. */ - public static final int INTERVAL_DAY_TO_HOUR = 33; + public static final int INTERVAL_DAY_TO_HOUR = INTERVAL_YEAR_TO_MONTH + 1; /** * The value type for {@code INTERVAL DAY TO MINUTE} values. */ - public static final int INTERVAL_DAY_TO_MINUTE = 34; + public static final int INTERVAL_DAY_TO_MINUTE = INTERVAL_DAY_TO_HOUR + 1; /** * The value type for {@code INTERVAL DAY TO SECOND} values. */ - public static final int INTERVAL_DAY_TO_SECOND = 35; + public static final int INTERVAL_DAY_TO_SECOND = INTERVAL_DAY_TO_MINUTE + 1; /** * The value type for {@code INTERVAL HOUR TO MINUTE} values. */ - public static final int INTERVAL_HOUR_TO_MINUTE = 36; + public static final int INTERVAL_HOUR_TO_MINUTE = INTERVAL_DAY_TO_SECOND + 1; /** * The value type for {@code INTERVAL HOUR TO SECOND} values. */ - public static final int INTERVAL_HOUR_TO_SECOND = 37; + public static final int INTERVAL_HOUR_TO_SECOND = INTERVAL_HOUR_TO_MINUTE + 1; /** * The value type for {@code INTERVAL MINUTE TO SECOND} values. */ - public static final int INTERVAL_MINUTE_TO_SECOND = 38; + public static final int INTERVAL_MINUTE_TO_SECOND = INTERVAL_HOUR_TO_SECOND + 1; + + /** + * The value type for JAVA_OBJECT values. + */ + public static final int JAVA_OBJECT = INTERVAL_MINUTE_TO_SECOND + 1; + + /** + * The value type for ENUM values. + */ + public static final int ENUM = JAVA_OBJECT + 1; + + /** + * The value type for string values with a fixed size. + */ + public static final int GEOMETRY = ENUM + 1; + + /** + * The value type for JSON values. + */ + public static final int JSON = GEOMETRY + 1; + + /** + * The value type for UUID values. + */ + public static final int UUID = JSON + 1; + + /** + * The value type for ARRAY values. + */ + public static final int ARRAY = UUID + 1; /** * The value type for ROW values. */ - public static final int ROW = 39; + public static final int ROW = ARRAY + 1; /** * The number of value types. */ public static final int TYPE_COUNT = ROW + 1; + /** + * Group for untyped NULL data type. + */ + static final int GROUP_NULL = 0; + + /** + * Group for character string data types. + */ + static final int GROUP_CHARACTER_STRING = GROUP_NULL + 1; + + /** + * Group for binary string data types. + */ + static final int GROUP_BINARY_STRING = GROUP_CHARACTER_STRING + 1; + + /** + * Group for BINARY data type. + */ + static final int GROUP_BOOLEAN = GROUP_BINARY_STRING + 1; + + /** + * Group for numeric data types. + */ + static final int GROUP_NUMERIC = GROUP_BOOLEAN + 1; + + /** + * Group for datetime data types. + */ + static final int GROUP_DATETIME = GROUP_NUMERIC + 1; + + /** + * Group for year-month interval data types. + */ + static final int GROUP_INTERVAL_YM = GROUP_DATETIME + 1; + + /** + * Group for day-time interval data types. + */ + static final int GROUP_INTERVAL_DT = GROUP_INTERVAL_YM + 1; + + /** + * Group for other data types (JAVA_OBJECT, UUID, GEOMETRY, ENUM, JSON). + */ + static final int GROUP_OTHER = GROUP_INTERVAL_DT + 1; + + /** + * Group for collection data types (ARRAY, ROW). + */ + static final int GROUP_COLLECTION = GROUP_OTHER + 1; + + static final byte GROUPS[] = { + // NULL + GROUP_NULL, + // CHAR, VARCHAR, CLOB, VARCHAR_IGNORECASE + GROUP_CHARACTER_STRING, GROUP_CHARACTER_STRING, GROUP_CHARACTER_STRING, GROUP_CHARACTER_STRING, + // BINARY, VARBINARY, BLOB + GROUP_BINARY_STRING, GROUP_BINARY_STRING, GROUP_BINARY_STRING, + // BOOLEAN + GROUP_BOOLEAN, + // TINYINT, SMALLINT, INTEGER, BIGINT, NUMERIC, REAL, DOUBLE, DECFLOAT + GROUP_NUMERIC, GROUP_NUMERIC, GROUP_NUMERIC, GROUP_NUMERIC, GROUP_NUMERIC, GROUP_NUMERIC, GROUP_NUMERIC, + GROUP_NUMERIC, + // DATE, TIME, TIME_TZ, TIMESTAMP, TIMESTAMP_TZ + GROUP_DATETIME, GROUP_DATETIME, GROUP_DATETIME, GROUP_DATETIME, GROUP_DATETIME, + // INTERVAL_YEAR, INTERVAL_MONTH + GROUP_INTERVAL_YM, GROUP_INTERVAL_YM, + // INTERVAL_DAY, INTERVAL_HOUR, INTERVAL_MINUTE, INTERVAL_SECOND + GROUP_INTERVAL_DT, GROUP_INTERVAL_DT, GROUP_INTERVAL_DT, GROUP_INTERVAL_DT, + // INTERVAL_YEAR_TO_MONTH + GROUP_INTERVAL_YM, + // INTERVAL_DAY_TO_HOUR, INTERVAL_DAY_TO_MINUTE, + // INTERVAL_DAY_TO_SECOND, INTERVAL_HOUR_TO_MINUTE, + // INTERVAL_HOUR_TO_SECOND, INTERVAL_MINUTE_TO_SECOND + GROUP_INTERVAL_DT, GROUP_INTERVAL_DT, GROUP_INTERVAL_DT, GROUP_INTERVAL_DT, GROUP_INTERVAL_DT, + GROUP_INTERVAL_DT, + // JAVA_OBJECT, ENUM, GEOMETRY, JSON, UUID + GROUP_OTHER, GROUP_OTHER, GROUP_OTHER, GROUP_OTHER, GROUP_OTHER, + // ARRAY, ROW + GROUP_COLLECTION, GROUP_COLLECTION, + // + }; + + private static final String NAMES[] = { + "UNKNOWN", + "NULL", // + "CHARACTER", "CHARACTER VARYING", "CHARACTER LARGE OBJECT", "VARCHAR_IGNORECASE", // + "BINARY", "BINARY VARYING", "BINARY LARGE OBJECT", // + "BOOLEAN", // + "TINYINT", "SMALLINT", "INTEGER", "BIGINT", // + "NUMERIC", "REAL", "DOUBLE PRECISION", "DECFLOAT", // + "DATE", "TIME", "TIME WITH TIME ZONE", "TIMESTAMP", "TIMESTAMP WITH TIME ZONE", // + "INTERVAL YEAR", "INTERVAL MONTH", // + "INTERVAL DAY", "INTERVAL HOUR", "INTERVAL MINUTE", "INTERVAL SECOND", // + "INTERVAL YEAR TO MONTH", // + "INTERVAL DAY TO HOUR", "INTERVAL DAY TO MINUTE", "INTERVAL DAY TO SECOND", // + "INTERVAL HOUR TO MINUTE", "INTERVAL HOUR TO SECOND", "INTERVAL MINUTE TO SECOND", // + "JAVA_OBJECT", "ENUM", "GEOMETRY", "JSON", "UUID", // + "ARRAY", "ROW", // + }; + + /** + * Empty array of values. + */ + public static final Value[] EMPTY_VALUES = new Value[0]; + private static SoftReference softCache; - private static final BigDecimal MAX_LONG_DECIMAL = BigDecimal.valueOf(Long.MAX_VALUE); + static final BigDecimal MAX_LONG_DECIMAL = BigDecimal.valueOf(Long.MAX_VALUE); /** * The smallest Long value, as a BigDecimal. */ public static final BigDecimal MIN_LONG_DECIMAL = BigDecimal.valueOf(Long.MIN_VALUE); + /** + * Convert a value to the specified type without taking scale and precision + * into account. + */ + static final int CONVERT_TO = 0; + + /** + * Cast a value to the specified type. The scale is set if applicable. The + * value is truncated to a required precision. + */ + static final int CAST_TO = 1; + + /** + * Cast a value to the specified type for assignment. The scale is set if + * applicable. If precision is too large an exception is thrown. + */ + static final int ASSIGN_TO = 2; + + /** + * Returns name of the specified data type. + * + * @param valueType + * the value type + * @return the name + */ + public static String getTypeName(int valueType) { + return NAMES[valueType + 1]; + } + /** * Check the range of the parameters. * @@ -276,29 +428,7 @@ static void rangeCheck(long zeroBasedOffset, long length, long dataSize) { } } - /** - * Get the SQL expression for this value. - * - * @return the SQL expression - */ - public String getSQL() { - return getSQL(new StringBuilder()).toString(); - } - - /** - * Appends the SQL expression for this value to the specified builder. - * - * @param builder - * string builder - * @return the specified string builder - */ - public abstract StringBuilder getSQL(StringBuilder builder); - - /** - * Returns the data type. - * - * @return the data type - */ + @Override public abstract TypeInfo getType(); /** @@ -321,29 +451,6 @@ public int getMemory() { return 24; } - /** - * Get the value as a string. - * - * @return the string - */ - public abstract String getString(); - - /** - * Get the value as an object. - * - * @return the object - */ - public abstract Object getObject(); - - /** - * Set the value as a parameter in a prepared statement. - * - * @param prep the prepared statement - * @param parameterIndex the parameter index - */ - public abstract void set(PreparedStatement prep, int parameterIndex) - throws SQLException; - @Override public abstract int hashCode(); @@ -359,102 +466,6 @@ public abstract void set(PreparedStatement prep, int parameterIndex) @Override public abstract boolean equals(Object other); - /** - * Get the order of this value type. - * - * @param type the value type - * @return the order number - */ - static int getOrder(int type) { - switch (type) { - case UNKNOWN: - return 1_000; - case NULL: - return 2_000; - case STRING: - return 10_000; - case CLOB: - return 11_000; - case STRING_FIXED: - return 12_000; - case STRING_IGNORECASE: - return 13_000; - case BOOLEAN: - return 20_000; - case BYTE: - return 21_000; - case SHORT: - return 22_000; - case INT: - return 23_000; - case LONG: - return 24_000; - case DECIMAL: - return 25_000; - case FLOAT: - return 26_000; - case DOUBLE: - return 27_000; - case INTERVAL_YEAR: - return 28_000; - case INTERVAL_MONTH: - return 28_100; - case INTERVAL_YEAR_TO_MONTH: - return 28_200; - case INTERVAL_DAY: - return 29_000; - case INTERVAL_HOUR: - return 29_100; - case INTERVAL_DAY_TO_HOUR: - return 29_200; - case INTERVAL_MINUTE: - return 29_300; - case INTERVAL_HOUR_TO_MINUTE: - return 29_400; - case INTERVAL_DAY_TO_MINUTE: - return 29_500; - case INTERVAL_SECOND: - return 29_600; - case INTERVAL_MINUTE_TO_SECOND: - return 29_700; - case INTERVAL_HOUR_TO_SECOND: - return 29_800; - case INTERVAL_DAY_TO_SECOND: - return 29_900; - case TIME: - return 30_000; - case DATE: - return 31_000; - case TIMESTAMP: - return 32_000; - case TIMESTAMP_TZ: - return 34_000; - case BYTES: - return 40_000; - case BLOB: - return 41_000; - case JAVA_OBJECT: - return 42_000; - case UUID: - return 43_000; - case GEOMETRY: - return 44_000; - case ENUM: - return 45_000; - case ARRAY: - return 50_000; - case ROW: - return 51_000; - case RESULT_SET: - return 52_000; - default: - if (JdbcUtils.customDataTypesHandler != null) { - return JdbcUtils.customDataTypesHandler.getDataTypeOrder(type); - } - throw DbException.throwInternalError("type:"+type); - } - } - /** * Get the higher value order type of two value types. If values need to be * converted to match the other operands value type, the value with the @@ -465,40 +476,244 @@ static int getOrder(int type) { * @return the higher value type of the two */ public static int getHigherOrder(int t1, int t2) { - if (t1 == Value.UNKNOWN || t2 == Value.UNKNOWN) { - if (t1 == t2) { + if (t1 == t2) { + if (t1 == UNKNOWN) { throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, "?, ?"); - } else if (t1 == Value.NULL) { - throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, "NULL, ?"); - } else if (t2 == Value.NULL) { + } + return t1; + } + if (t1 < t2) { + int t = t1; + t1 = t2; + t2 = t; + } + if (t1 == UNKNOWN) { + if (t2 == NULL) { throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, "?, NULL"); } + return t2; + } else if (t2 == UNKNOWN) { + if (t1 == NULL) { + throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, "NULL, ?"); + } + return t1; } - if (t1 == t2) { + if (t2 == NULL) { return t1; } - int o1 = getOrder(t1); - int o2 = getOrder(t2); - return o1 > o2 ? t1 : t2; + return getHigherOrderKnown(t1, t2); } - /** - * Get the higher data type of two data types. If values need to be - * converted to match the other operands data type, the value with the - * lower order is converted to the value with the higher order. - * - * @param type1 the first data type - * @param type2 the second data type - * @return the higher data type of the two - */ - public static TypeInfo getHigherType(TypeInfo type1, TypeInfo type2) { - int t1 = type1.getValueType(), t2 = type2.getValueType(); - int dataType = getHigherOrder(t1, t2); - long precision = Math.max(type1.getPrecision(), type2.getPrecision()); - int scale = Math.max(type1.getScale(), type2.getScale()); - ExtTypeInfo ext1 = type1.getExtTypeInfo(); - ExtTypeInfo ext = dataType == t1 && ext1 != null ? ext1 : dataType == t2 ? type2.getExtTypeInfo() : null; - return TypeInfo.getTypeInfo(dataType, precision, scale, ext); + static int getHigherOrderKnown(int t1, int t2) { + int g1 = GROUPS[t1], g2 = GROUPS[t2]; + switch (g1) { + case GROUP_BOOLEAN: + if (g2 == GROUP_BINARY_STRING) { + throw getDataTypeCombinationException(BOOLEAN, t2); + } + break; + case GROUP_NUMERIC: + return getHigherNumeric(t1, t2, g2); + case GROUP_DATETIME: + return getHigherDateTime(t1, t2, g2); + case GROUP_INTERVAL_YM: + return getHigherIntervalYearMonth(t1, t2, g2); + case GROUP_INTERVAL_DT: + return getHigherIntervalDayTime(t1, t2, g2); + case GROUP_OTHER: + return getHigherOther(t1, t2, g2); + } + return t1; + } + + private static int getHigherNumeric(int t1, int t2, int g2) { + if (g2 == GROUP_NUMERIC) { + switch (t1) { + case REAL: + switch (t2) { + case INTEGER: + return DOUBLE; + case BIGINT: + case NUMERIC: + return DECFLOAT; + } + break; + case DOUBLE: + switch (t2) { + case BIGINT: + case NUMERIC: + return DECFLOAT; + } + break; + } + } else if (g2 == GROUP_BINARY_STRING) { + throw getDataTypeCombinationException(t1, t2); + } + return t1; + } + + private static int getHigherDateTime(int t1, int t2, int g2) { + if (g2 == GROUP_CHARACTER_STRING) { + return t1; + } + if (g2 != GROUP_DATETIME) { + throw getDataTypeCombinationException(t1, t2); + } + switch (t1) { + case TIME: + if (t2 == DATE) { + return TIMESTAMP; + } + break; + case TIME_TZ: + if (t2 == DATE) { + return TIMESTAMP_TZ; + } + break; + case TIMESTAMP: + if (t2 == TIME_TZ) { + return TIMESTAMP_TZ; + } + } + return t1; + } + + private static int getHigherIntervalYearMonth(int t1, int t2, int g2) { + switch (g2) { + case GROUP_INTERVAL_YM: + if (t1 == INTERVAL_MONTH && t2 == INTERVAL_YEAR) { + return INTERVAL_YEAR_TO_MONTH; + } + //$FALL-THROUGH$ + case GROUP_CHARACTER_STRING: + case GROUP_NUMERIC: + return t1; + default: + throw getDataTypeCombinationException(t1, t2); + } + } + + private static int getHigherIntervalDayTime(int t1, int t2, int g2) { + switch (g2) { + case GROUP_INTERVAL_DT: + break; + case GROUP_CHARACTER_STRING: + case GROUP_NUMERIC: + return t1; + default: + throw getDataTypeCombinationException(t1, t2); + } + switch (t1) { + case INTERVAL_HOUR: + return INTERVAL_DAY_TO_HOUR; + case INTERVAL_MINUTE: + if (t2 == INTERVAL_DAY) { + return INTERVAL_DAY_TO_MINUTE; + } + return INTERVAL_HOUR_TO_MINUTE; + case INTERVAL_SECOND: + if (t2 == INTERVAL_DAY) { + return INTERVAL_DAY_TO_SECOND; + } + if (t2 == INTERVAL_HOUR) { + return INTERVAL_HOUR_TO_SECOND; + } + return INTERVAL_MINUTE_TO_SECOND; + case INTERVAL_DAY_TO_HOUR: + if (t2 == INTERVAL_MINUTE) { + return INTERVAL_DAY_TO_MINUTE; + } + if (t2 == INTERVAL_SECOND) { + return INTERVAL_DAY_TO_SECOND; + } + break; + case INTERVAL_DAY_TO_MINUTE: + if (t2 == INTERVAL_SECOND) { + return INTERVAL_DAY_TO_SECOND; + } + break; + case INTERVAL_HOUR_TO_MINUTE: + switch (t2) { + case INTERVAL_DAY: + case INTERVAL_DAY_TO_HOUR: + case INTERVAL_DAY_TO_MINUTE: + return INTERVAL_DAY_TO_MINUTE; + case INTERVAL_SECOND: + return INTERVAL_HOUR_TO_SECOND; + case INTERVAL_DAY_TO_SECOND: + return INTERVAL_DAY_TO_SECOND; + } + break; + case INTERVAL_HOUR_TO_SECOND: + switch (t2) { + case INTERVAL_DAY: + case INTERVAL_DAY_TO_HOUR: + case INTERVAL_DAY_TO_MINUTE: + case INTERVAL_DAY_TO_SECOND: + return INTERVAL_DAY_TO_SECOND; + } + break; + case INTERVAL_MINUTE_TO_SECOND: + switch (t2) { + case INTERVAL_DAY: + case INTERVAL_DAY_TO_HOUR: + case INTERVAL_DAY_TO_MINUTE: + case INTERVAL_DAY_TO_SECOND: + return INTERVAL_DAY_TO_SECOND; + case INTERVAL_HOUR: + case INTERVAL_HOUR_TO_MINUTE: + case INTERVAL_HOUR_TO_SECOND: + return INTERVAL_HOUR_TO_SECOND; + } + } + return t1; + } + + private static int getHigherOther(int t1, int t2, int g2) { + switch (t1) { + case JAVA_OBJECT: + if (g2 != GROUP_BINARY_STRING) { + throw getDataTypeCombinationException(t1, t2); + } + break; + case ENUM: + if (g2 != GROUP_CHARACTER_STRING && (g2 != GROUP_NUMERIC || t2 > INTEGER)) { + throw getDataTypeCombinationException(t1, t2); + } + break; + case GEOMETRY: + if (g2 != GROUP_CHARACTER_STRING && g2 != GROUP_BINARY_STRING) { + throw getDataTypeCombinationException(t1, t2); + } + break; + case JSON: + switch (g2) { + case GROUP_DATETIME: + case GROUP_INTERVAL_YM: + case GROUP_INTERVAL_DT: + case GROUP_OTHER: + throw getDataTypeCombinationException(t1, t2); + } + break; + case UUID: + switch (g2) { + case GROUP_CHARACTER_STRING: + case GROUP_BINARY_STRING: + break; + case GROUP_OTHER: + if (t2 == JAVA_OBJECT) { + break; + } + //$FALL-THROUGH$ + default: + throw getDataTypeCombinationException(t1, t2); + } + } + return t1; + } + + private static DbException getDataTypeCombinationException(int t1, int t2) { + return DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, getTypeName(t1) + ", " + getTypeName(t2)); } /** @@ -541,93 +756,146 @@ public static void clearCache() { softCache = null; } - public boolean getBoolean() { - return ((ValueBoolean) convertTo(Value.BOOLEAN)).getBoolean(); - } - - public Date getDate() { - return ((ValueDate) convertTo(Value.DATE)).getDate(); - } + /** + * Get the value as a string. + * + * @return the string + */ + public abstract String getString(); - public Time getTime() { - return ((ValueTime) convertTo(Value.TIME)).getTime(); + public Reader getReader() { + return new StringReader(getString()); } - public Timestamp getTimestamp() { - return ((ValueTimestamp) convertTo(Value.TIMESTAMP)).getTimestamp(); + /** + * Get the reader + * + * @param oneBasedOffset the offset (1 means no offset) + * @param length the requested length + * @return the new reader + */ + public Reader getReader(long oneBasedOffset, long length) { + String string = getString(); + long zeroBasedOffset = oneBasedOffset - 1; + rangeCheck(zeroBasedOffset, length, string.length()); + int offset = (int) zeroBasedOffset; + return new StringReader(string.substring(offset, offset + (int) length)); } public byte[] getBytes() { - return ((ValueBytes) convertTo(Value.BYTES)).getBytes(); + throw getDataConversionError(VARBINARY); } public byte[] getBytesNoCopy() { - return ((ValueBytes) convertTo(Value.BYTES)).getBytesNoCopy(); + return getBytes(); } - public byte getByte() { - return ((ValueByte) convertTo(Value.BYTE)).getByte(); + public InputStream getInputStream() { + return new ByteArrayInputStream(getBytesNoCopy()); } - public short getShort() { - return ((ValueShort) convertTo(Value.SHORT)).getShort(); + /** + * Get the input stream + * + * @param oneBasedOffset the offset (1 means no offset) + * @param length the requested length + * @return the new input stream + */ + public InputStream getInputStream(long oneBasedOffset, long length) { + byte[] bytes = getBytesNoCopy(); + long zeroBasedOffset = oneBasedOffset - 1; + rangeCheck(zeroBasedOffset, length, bytes.length); + return new ByteArrayInputStream(bytes, (int) zeroBasedOffset, (int) length); } - public BigDecimal getBigDecimal() { - return ((ValueDecimal) convertTo(Value.DECIMAL)).getBigDecimal(); + /** + * Returns this value as a Java {@code boolean} value. + * + * @throws DbException + * if this value is {@code NULL} or cannot be casted to + * {@code BOOLEAN} + * @return value + * @see #isTrue() + * @see #isFalse() + */ + public boolean getBoolean() { + return convertToBoolean().getBoolean(); } - public double getDouble() { - return ((ValueDouble) convertTo(Value.DOUBLE)).getDouble(); + /** + * Returns this value as a Java {@code byte} value. + * + * @throws DbException + * if this value is {@code NULL} or cannot be casted to + * {@code TINYINT} + * @return value + */ + public byte getByte() { + return convertToTinyint(null).getByte(); } - public float getFloat() { - return ((ValueFloat) convertTo(Value.FLOAT)).getFloat(); + /** + * Returns this value as a Java {@code short} value. + * + * @throws DbException + * if this value is {@code NULL} or cannot be casted to + * {@code SMALLINT} + * @return value + */ + public short getShort() { + return convertToSmallint(null).getShort(); } + /** + * Returns this value as a Java {@code int} value. + * + * @throws DbException + * if this value is {@code NULL} or cannot be casted to + * {@code INTEGER} + * @return value + */ public int getInt() { - return ((ValueInt) convertTo(Value.INT)).getInt(); + return convertToInt(null).getInt(); } + /** + * Returns this value as a Java {@code long} value. + * + * @throws DbException + * if this value is {@code NULL} or cannot be casted to + * {@code BIGINT} + * @return value + */ public long getLong() { - return ((ValueLong) convertTo(Value.LONG)).getLong(); + return convertToBigint(null).getLong(); } - public InputStream getInputStream() { - return new ByteArrayInputStream(getBytesNoCopy()); + public BigDecimal getBigDecimal() { + throw getDataConversionError(NUMERIC); } /** - * Get the input stream + * Returns this value as a Java {@code float} value. * - * @param oneBasedOffset the offset (1 means no offset) - * @param length the requested length - * @return the new input stream + * @throws DbException + * if this value is {@code NULL} or cannot be casted to + * {@code REAL} + * @return value */ - public InputStream getInputStream(long oneBasedOffset, long length) { - byte[] bytes = getBytesNoCopy(); - long zeroBasedOffset = oneBasedOffset - 1; - rangeCheck(zeroBasedOffset, length, bytes.length); - return new ByteArrayInputStream(bytes, (int) zeroBasedOffset, (int) length); - } - - public Reader getReader() { - return new StringReader(getString()); + public float getFloat() { + throw getDataConversionError(REAL); } /** - * Get the reader + * Returns this value as a Java {@code double} value. * - * @param oneBasedOffset the offset (1 means no offset) - * @param length the requested length - * @return the new reader + * @throws DbException + * if this value is {@code NULL} or cannot be casted to + * {@code DOUBLE PRECISION} + * @return value */ - public Reader getReader(long oneBasedOffset, long length) { - String string = getString(); - long zeroBasedOffset = oneBasedOffset - 1; - rangeCheck(zeroBasedOffset, length, string.length()); - int offset = (int) zeroBasedOffset; - return new StringReader(string.substring(offset, offset + (int) length)); + public double getDouble() { + throw getDataConversionError(DOUBLE); } /** @@ -666,10 +934,12 @@ public Value subtract(@SuppressWarnings("unused") Value v) { /** * Divide by a value and return the result. * - * @param v the value to divide by + * @param v the divisor + * @param quotientType the type of quotient (used only to read precision and scale + * when applicable) * @return the result */ - public Value divide(@SuppressWarnings("unused") Value v) { + public Value divide(@SuppressWarnings("unused") Value v, TypeInfo quotientType) { throw getUnsupportedExceptionForOperation("/"); } @@ -694,689 +964,1612 @@ public Value modulus(@SuppressWarnings("unused") Value v) { } /** - * Compare a value to the specified type. + * Convert a value to the specified type without taking scale and precision + * into account. * * @param targetType the type of the returned value * @return the converted value */ public final Value convertTo(int targetType) { - return convertTo(targetType, null, null, null); + return convertTo(targetType, null); } /** - * Convert value to ENUM value - * @param enumerators the extended type information for the ENUM data type - * @return value represented as ENUM + * Convert a value to the specified type without taking scale and precision + * into account. + * + * @param targetType the type of the returned value + * @return the converted value */ - private Value convertToEnum(ExtTypeInfo enumerators) { - return convertTo(ENUM, null, null, enumerators); + public final Value convertTo(TypeInfo targetType) { + return convertTo(targetType, null, CONVERT_TO, null); } /** - * Convert a value to the specified type. + * Convert a value to the specified type without taking scale and precision + * into account. * * @param targetType the type of the returned value - * @param mode the mode + * @param provider the cast information provider * @return the converted value */ - public final Value convertTo(int targetType, Mode mode) { - return convertTo(targetType, mode, null, null); + public final Value convertTo(int targetType, CastDataProvider provider) { + switch (targetType) { + case ARRAY: + return convertToAnyArray(provider); + case ROW: + return convertToAnyRow(); + default: + return convertTo(TypeInfo.getTypeInfo(targetType), provider, CONVERT_TO, null); + } } /** - * Convert a value to the specified type. + * Convert a value to the specified type without taking scale and precision + * into account. * - * @param targetType the type of the returned value - * @param mode the conversion mode - * @param column the column (if any), used for to improve the error message if conversion fails + * @param targetType + * the type of the returned value + * @param provider + * the cast information provider * @return the converted value */ - public final Value convertTo(TypeInfo targetType, Mode mode, Object column) { - return convertTo(targetType.getValueType(), mode, column, targetType.getExtTypeInfo()); + public final Value convertTo(TypeInfo targetType, CastDataProvider provider) { + return convertTo(targetType, provider, CONVERT_TO, null); } /** - * Convert a value to the specified type. + * Convert a value to the specified type without taking scale and precision + * into account. * - * @param targetType the type of the returned value - * @param mode the conversion mode - * @param column the column (if any), used for to improve the error message if conversion fails - * @param extTypeInfo the extended data type information, or null + * @param targetType + * the type of the returned value + * @param provider + * the cast information provider + * @param column + * the column, used to improve the error message if conversion + * fails * @return the converted value */ - protected Value convertTo(int targetType, Mode mode, Object column, ExtTypeInfo extTypeInfo) { - // converting NULL is done in ValueNull - // converting BLOB to CLOB and vice versa is done in ValueLob - if (getValueType() == targetType) { - return this; + public final Value convertTo(TypeInfo targetType, CastDataProvider provider, Object column) { + return convertTo(targetType, provider, CONVERT_TO, column); + } + + /** + * Convert this value to any ARRAY data type. + * + * @param provider + * the cast information provider + * @return a row value + */ + public final ValueArray convertToAnyArray(CastDataProvider provider) { + if (getValueType() == Value.ARRAY) { + return (ValueArray) this; } - try { - switch (targetType) { - case NULL: - return ValueNull.INSTANCE; - case BOOLEAN: - return convertToBoolean(); - case BYTE: - return convertToByte(column); - case SHORT: - return convertToShort(column); - case INT: - return convertToInt(column); - case LONG: - return convertToLong(column); - case DECIMAL: - return convertToDecimal(); - case DOUBLE: - return convertToDouble(); - case FLOAT: - return convertToFloat(); - case DATE: - return convertToDate(); - case TIME: - return convertToTime(); - case TIMESTAMP: - return convertToTimestamp(mode); - case TIMESTAMP_TZ: - return convertToTimestampTimeZone(); - case BYTES: - return convertToBytes(mode); - case STRING: - return convertToString(mode); - case STRING_IGNORECASE: - return convertToStringIgnoreCase(mode); - case STRING_FIXED: - return convertToStringFixed(mode); - case JAVA_OBJECT: - return convertToJavaObject(); - case ENUM: - return convertToEnumInternal((ExtTypeInfoEnum) extTypeInfo); - case BLOB: - return convertToBlob(); - case CLOB: - return convertToClob(); - case UUID: - return convertToUuid(); - case GEOMETRY: - return convertToGeometry((ExtTypeInfoGeometry) extTypeInfo); - case Value.INTERVAL_YEAR: - case Value.INTERVAL_MONTH: - case Value.INTERVAL_YEAR_TO_MONTH: - return convertToIntervalYearMonth(targetType); - case Value.INTERVAL_DAY: - case Value.INTERVAL_HOUR: - case Value.INTERVAL_MINUTE: - case Value.INTERVAL_SECOND: - case Value.INTERVAL_DAY_TO_HOUR: - case Value.INTERVAL_DAY_TO_MINUTE: - case Value.INTERVAL_DAY_TO_SECOND: - case Value.INTERVAL_HOUR_TO_MINUTE: - case Value.INTERVAL_HOUR_TO_SECOND: - case Value.INTERVAL_MINUTE_TO_SECOND: - return convertToIntervalDayTime(targetType); - case ARRAY: - return convertToArray(); - case ROW: - return convertToRow(); - case RESULT_SET: - return convertToResultSet(); - default: - if (JdbcUtils.customDataTypesHandler != null) { - return JdbcUtils.customDataTypesHandler.convert(this, targetType); - } - throw getDataConversionError(targetType); - } - } catch (NumberFormatException e) { - throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, e, getString()); + return ValueArray.get(this.getType(), new Value[] { this }, provider); + } + + /** + * Convert this value to any ROW data type. + * + * @return a row value + */ + public final ValueRow convertToAnyRow() { + if (getValueType() == Value.ROW) { + return (ValueRow) this; } + return ValueRow.get(new Value[] { this }); } - private ValueBoolean convertToBoolean() { - switch (getValueType()) { - case BYTE: - case SHORT: - case INT: - case LONG: - case DECIMAL: + /** + * Cast a value to the specified type. The scale is set if applicable. The + * value is truncated to the required precision. + * + * @param targetType + * the type of the returned value + * @param provider + * the cast information provider + * @return the converted value + */ + public final Value castTo(TypeInfo targetType, CastDataProvider provider) { + return convertTo(targetType, provider, CAST_TO, null); + } + + /** + * Cast a value to the specified type for assignment. The scale is set if + * applicable. If precision is too large an exception is thrown. + * + * @param targetType + * the type of the returned value + * @param provider + * the cast information provider + * @param column + * the column, used to improve the error message if conversion + * fails + * @return the converted value + */ + public final Value convertForAssignTo(TypeInfo targetType, CastDataProvider provider, Object column) { + return convertTo(targetType, provider, ASSIGN_TO, column); + } + + /** + * Convert a value to the specified type. + * + * @param targetType the type of the returned value + * @param provider the cast information provider + * @param conversionMode conversion mode + * @param column the column (if any), used to improve the error message if conversion fails + * @return the converted value + */ + private Value convertTo(TypeInfo targetType, CastDataProvider provider, int conversionMode, Object column) { + int valueType = getValueType(), targetValueType; + if (valueType == NULL + || valueType == (targetValueType = targetType.getValueType()) && conversionMode == CONVERT_TO + && targetType.getExtTypeInfo() == null && valueType != CHAR) { + return this; + } + switch (targetValueType) { + case NULL: + return ValueNull.INSTANCE; + case CHAR: + return convertToChar(targetType, provider, conversionMode, column); + case VARCHAR: + return convertToVarchar(targetType, provider, conversionMode, column); + case CLOB: + return convertToClob(targetType, conversionMode, column); + case VARCHAR_IGNORECASE: + return convertToVarcharIgnoreCase(targetType, conversionMode, column); + case BINARY: + return convertToBinary(targetType, conversionMode, column); + case VARBINARY: + return convertToVarbinary(targetType, conversionMode, column); + case BLOB: + return convertToBlob(targetType, conversionMode, column); + case BOOLEAN: + return convertToBoolean(); + case TINYINT: + return convertToTinyint(column); + case SMALLINT: + return convertToSmallint(column); + case INTEGER: + return convertToInt(column); + case BIGINT: + return convertToBigint(column); + case NUMERIC: + return convertToNumeric(targetType, provider, conversionMode, column); + case REAL: + return convertToReal(); case DOUBLE: - case FLOAT: - return ValueBoolean.get(getSignum() != 0); - case TIME: + return convertToDouble(); + case DECFLOAT: + return convertToDecfloat(targetType, conversionMode); case DATE: + return convertToDate(provider); + case TIME: + return convertToTime(targetType, provider, conversionMode); + case TIME_TZ: + return convertToTimeTimeZone(targetType, provider, conversionMode); case TIMESTAMP: + return convertToTimestamp(targetType, provider, conversionMode); case TIMESTAMP_TZ: - case BYTES: + return convertToTimestampTimeZone(targetType, provider, conversionMode); + case INTERVAL_YEAR: + case INTERVAL_MONTH: + case INTERVAL_YEAR_TO_MONTH: + return convertToIntervalYearMonth(targetType, conversionMode, column); + case INTERVAL_DAY: + case INTERVAL_HOUR: + case INTERVAL_MINUTE: + case INTERVAL_SECOND: + case INTERVAL_DAY_TO_HOUR: + case INTERVAL_DAY_TO_MINUTE: + case INTERVAL_DAY_TO_SECOND: + case INTERVAL_HOUR_TO_MINUTE: + case INTERVAL_HOUR_TO_SECOND: + case INTERVAL_MINUTE_TO_SECOND: + return convertToIntervalDayTime(targetType, conversionMode, column); case JAVA_OBJECT: - case UUID: + return convertToJavaObject(targetType, conversionMode, column); case ENUM: - throw getDataConversionError(BOOLEAN); - } - String s = getString(); - if (s.equalsIgnoreCase("true") || s.equalsIgnoreCase("t") || s.equalsIgnoreCase("yes") - || s.equalsIgnoreCase("y")) { - return ValueBoolean.TRUE; - } else if (s.equalsIgnoreCase("false") || s.equalsIgnoreCase("f") || s.equalsIgnoreCase("no") - || s.equalsIgnoreCase("n")) { - return ValueBoolean.FALSE; - } else { - // convert to a number, and if it is not 0 then it is true - return ValueBoolean.get(new BigDecimal(s).signum() != 0); + return convertToEnum((ExtTypeInfoEnum) targetType.getExtTypeInfo(), provider); + case GEOMETRY: + return convertToGeometry((ExtTypeInfoGeometry) targetType.getExtTypeInfo()); + case JSON: + return convertToJson(targetType, conversionMode, column); + case UUID: + return convertToUuid(); + case ARRAY: + return convertToArray(targetType, provider, conversionMode, column); + case ROW: + return convertToRow(targetType, provider, conversionMode, column); + default: + throw getDataConversionError(targetValueType); } } - private ValueByte convertToByte(Object column) { - switch (getValueType()) { - case BOOLEAN: - return ValueByte.get(getBoolean() ? (byte) 1 : (byte) 0); - case SHORT: - case ENUM: - case INT: - return ValueByte.get(convertToByte(getInt(), column)); - case LONG: - return ValueByte.get(convertToByte(getLong(), column)); - case DECIMAL: - return ValueByte.get(convertToByte(convertToLong(getBigDecimal(), column), column)); - case DOUBLE: - return ValueByte.get(convertToByte(convertToLong(getDouble(), column), column)); - case FLOAT: - return ValueByte.get(convertToByte(convertToLong(getFloat(), column), column)); - case BYTES: - return ValueByte.get((byte) Integer.parseInt(getString(), 16)); - case TIMESTAMP_TZ: - throw getDataConversionError(BYTE); - } - return ValueByte.get(Byte.parseByte(getString().trim())); + /** + * Converts this value to a CHAR value. May not be called on a NULL value. + * + * @return a CHAR value. + */ + public ValueChar convertToChar() { + return convertToChar(TypeInfo.getTypeInfo(CHAR), null, CONVERT_TO, null); } - private ValueShort convertToShort(Object column) { - switch (getValueType()) { - case BOOLEAN: - return ValueShort.get(getBoolean() ? (short) 1 : (short) 0); - case BYTE: - return ValueShort.get(getByte()); - case ENUM: - case INT: - return ValueShort.get(convertToShort(getInt(), column)); - case LONG: - return ValueShort.get(convertToShort(getLong(), column)); - case DECIMAL: - return ValueShort.get(convertToShort(convertToLong(getBigDecimal(), column), column)); - case DOUBLE: - return ValueShort.get(convertToShort(convertToLong(getDouble(), column), column)); - case FLOAT: - return ValueShort.get(convertToShort(convertToLong(getFloat(), column), column)); - case BYTES: - return ValueShort.get((short) Integer.parseInt(getString(), 16)); - case TIMESTAMP_TZ: - throw getDataConversionError(SHORT); + private ValueChar convertToChar(TypeInfo targetType, CastDataProvider provider, int conversionMode, // + Object column) { + int valueType = getValueType(); + switch (valueType) { + case BLOB: + case JAVA_OBJECT: + throw getDataConversionError(targetType.getValueType()); + } + String s = getString(); + int length = s.length(), newLength = length; + if (conversionMode == CONVERT_TO) { + while (newLength > 0 && s.charAt(newLength - 1) == ' ') { + newLength--; + } + } else { + int p = MathUtils.convertLongToInt(targetType.getPrecision()); + if (provider == null || provider.getMode().charPadding == CharPadding.ALWAYS) { + if (newLength != p) { + if (newLength < p) { + return ValueChar.get(StringUtils.pad(s, p, null, true)); + } else if (conversionMode == CAST_TO) { + newLength = p; + } else { + do { + if (s.charAt(--newLength) != ' ') { + throw getValueTooLongException(targetType, column); + } + } while (newLength > p); + } + } + } else { + if (conversionMode == CAST_TO && newLength > p) { + newLength = p; + } + while (newLength > 0 && s.charAt(newLength - 1) == ' ') { + newLength--; + } + if (conversionMode == ASSIGN_TO && newLength > p) { + throw getValueTooLongException(targetType, column); + } + } + } + if (length != newLength) { + s = s.substring(0, newLength); + } else if (valueType == CHAR) { + return (ValueChar) this; } - return ValueShort.get(Short.parseShort(getString().trim())); + return ValueChar.get(s); } - private ValueInt convertToInt(Object column) { - switch (getValueType()) { - case BOOLEAN: - return ValueInt.get(getBoolean() ? 1 : 0); - case BYTE: - case ENUM: - case SHORT: - return ValueInt.get(getInt()); - case LONG: - return ValueInt.get(convertToInt(getLong(), column)); - case DECIMAL: - return ValueInt.get(convertToInt(convertToLong(getBigDecimal(), column), column)); - case DOUBLE: - return ValueInt.get(convertToInt(convertToLong(getDouble(), column), column)); - case FLOAT: - return ValueInt.get(convertToInt(convertToLong(getFloat(), column), column)); - case BYTES: - return ValueInt.get((int) Long.parseLong(getString(), 16)); - case TIMESTAMP_TZ: - throw getDataConversionError(INT); + private Value convertToVarchar(TypeInfo targetType, CastDataProvider provider, int conversionMode, Object column) { + int valueType = getValueType(); + switch (valueType) { + case BLOB: + case JAVA_OBJECT: + throw getDataConversionError(targetType.getValueType()); + } + if (conversionMode != CONVERT_TO) { + String s = getString(); + int p = MathUtils.convertLongToInt(targetType.getPrecision()); + if (s.length() > p) { + if (conversionMode != CAST_TO) { + throw getValueTooLongException(targetType, column); + } + return ValueVarchar.get(s.substring(0, p), provider); + } } - return ValueInt.get(Integer.parseInt(getString().trim())); + return valueType == Value.VARCHAR ? this : ValueVarchar.get(getString(), provider); } - private ValueLong convertToLong(Object column) { + private ValueClob convertToClob(TypeInfo targetType, int conversionMode, Object column) { + ValueClob v; switch (getValueType()) { - case BOOLEAN: - return ValueLong.get(getBoolean() ? 1 : 0); - case BYTE: - case SHORT: - case ENUM: - case INT: - return ValueLong.get(getInt()); - case DECIMAL: - return ValueLong.get(convertToLong(getBigDecimal(), column)); - case DOUBLE: - return ValueLong.get(convertToLong(getDouble(), column)); - case FLOAT: - return ValueLong.get(convertToLong(getFloat(), column)); - case BYTES: { - // parseLong doesn't work for ffffffffffffffff - byte[] d = getBytes(); - if (d.length == 8) { - return ValueLong.get(Bits.readLong(d, 0)); + case CLOB: + v = (ValueClob) this; + break; + case JAVA_OBJECT: + throw getDataConversionError(targetType.getValueType()); + case BLOB: { + LobData data = ((ValueBlob) this).lobData; + // Try to reuse the array, if possible + if (data instanceof LobDataInMemory) { + byte[] small = ((LobDataInMemory) data).getSmall(); + byte[] bytes = new String(small, StandardCharsets.UTF_8).getBytes(StandardCharsets.UTF_8); + if (Arrays.equals(bytes, small)) { + bytes = small; + } + v = ValueClob.createSmall(bytes); + break; + } else if (data instanceof LobDataDatabase) { + v = data.getDataHandler().getLobStorage().createClob(getReader(), -1); + break; } - return ValueLong.get(Long.parseLong(getString(), 16)); } - case TIMESTAMP_TZ: - throw getDataConversionError(LONG); + //$FALL-THROUGH$ + default: + v = ValueClob.createSmall(getString()); } - return ValueLong.get(Long.parseLong(getString().trim())); + if (conversionMode != CONVERT_TO) { + if (conversionMode == CAST_TO) { + v = v.convertPrecision(targetType.getPrecision()); + } else if (v.charLength() > targetType.getPrecision()) { + throw v.getValueTooLongException(targetType, column); + } + } + return v; } - private ValueDecimal convertToDecimal() { - switch (getValueType()) { - case BOOLEAN: - return (ValueDecimal) (getBoolean() ? ValueDecimal.ONE : ValueDecimal.ZERO); - case BYTE: - case SHORT: - case ENUM: - case INT: - return ValueDecimal.get(BigDecimal.valueOf(getInt())); - case LONG: - return ValueDecimal.get(BigDecimal.valueOf(getLong())); - case DOUBLE: { - double d = getDouble(); - if (Double.isInfinite(d) || Double.isNaN(d)) { - throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, Double.toString(d)); + private Value convertToVarcharIgnoreCase(TypeInfo targetType, int conversionMode, Object column) { + int valueType = getValueType(); + switch (valueType) { + case BLOB: + case JAVA_OBJECT: + throw getDataConversionError(targetType.getValueType()); + } + if (conversionMode != CONVERT_TO) { + String s = getString(); + int p = MathUtils.convertLongToInt(targetType.getPrecision()); + if (s.length() > p) { + if (conversionMode != CAST_TO) { + throw getValueTooLongException(targetType, column); + } + return ValueVarcharIgnoreCase.get(s.substring(0, p)); } - return ValueDecimal.get(BigDecimal.valueOf(d)); } - case FLOAT: { - float f = getFloat(); - if (Float.isInfinite(f) || Float.isNaN(f)) { - throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, Float.toString(f)); + return valueType == Value.VARCHAR_IGNORECASE ? this : ValueVarcharIgnoreCase.get(getString()); + } + + private ValueBinary convertToBinary(TypeInfo targetType, int conversionMode, Object column) { + ValueBinary v; + if (getValueType() == BINARY) { + v = (ValueBinary) this; + } else { + try { + v = ValueBinary.getNoCopy(getBytesNoCopy()); + } catch (DbException e) { + if (e.getErrorCode() == ErrorCode.DATA_CONVERSION_ERROR_1) { + throw getDataConversionError(BINARY); + } + throw e; } - // better rounding behavior than BigDecimal.valueOf(f) - return ValueDecimal.get(new BigDecimal(Float.toString(f))); } - case TIMESTAMP_TZ: - throw getDataConversionError(DECIMAL); + if (conversionMode != CONVERT_TO) { + byte[] value = v.getBytesNoCopy(); + int length = value.length; + int p = MathUtils.convertLongToInt(targetType.getPrecision()); + if (length != p) { + if (conversionMode == ASSIGN_TO && length > p) { + throw v.getValueTooLongException(targetType, column); + } + v = ValueBinary.getNoCopy(Arrays.copyOf(value, p)); + } } - return ValueDecimal.get(new BigDecimal(getString().trim())); + return v; } - private ValueDouble convertToDouble() { - switch (getValueType()) { - case BOOLEAN: - return getBoolean() ? ValueDouble.ONE : ValueDouble.ZERO; - case BYTE: - case SHORT: - case INT: - return ValueDouble.get(getInt()); - case LONG: - return ValueDouble.get(getLong()); - case DECIMAL: - return ValueDouble.get(getBigDecimal().doubleValue()); - case FLOAT: - return ValueDouble.get(getFloat()); - case ENUM: - case TIMESTAMP_TZ: - throw getDataConversionError(DOUBLE); + private ValueVarbinary convertToVarbinary(TypeInfo targetType, int conversionMode, Object column) { + ValueVarbinary v; + if (getValueType() == VARBINARY) { + v = (ValueVarbinary) this; + } else { + v = ValueVarbinary.getNoCopy(getBytesNoCopy()); + } + if (conversionMode != CONVERT_TO) { + byte[] value = v.getBytesNoCopy(); + int length = value.length; + int p = MathUtils.convertLongToInt(targetType.getPrecision()); + if (conversionMode == CAST_TO) { + if (length > p) { + v = ValueVarbinary.getNoCopy(Arrays.copyOf(value, p)); + } + } else if (length > p) { + throw v.getValueTooLongException(targetType, column); + } } - return ValueDouble.get(Double.parseDouble(getString().trim())); + return v; } - private ValueFloat convertToFloat() { + private ValueBlob convertToBlob(TypeInfo targetType, int conversionMode, Object column) { + ValueBlob v; switch (getValueType()) { - case BOOLEAN: - return getBoolean() ? ValueFloat.ONE : ValueFloat.ZERO; - case BYTE: - case SHORT: - case INT: - return ValueFloat.get(getInt()); - case LONG: - return ValueFloat.get(getLong()); - case DECIMAL: - return ValueFloat.get(getBigDecimal().floatValue()); - case DOUBLE: - return ValueFloat.get((float) getDouble()); - case ENUM: - case TIMESTAMP_TZ: - throw getDataConversionError(FLOAT); - } - return ValueFloat.get(Float.parseFloat(getString().trim())); + case BLOB: + v = (ValueBlob) this; + break; + case CLOB: + DataHandler handler = ((ValueLob) this).lobData.getDataHandler(); + if (handler != null) { + v = handler.getLobStorage().createBlob(getInputStream(), -1); + break; + } + //$FALL-THROUGH$ + default: + try { + v = ValueBlob.createSmall(getBytesNoCopy()); + } catch (DbException e) { + if (e.getErrorCode() == ErrorCode.DATA_CONVERSION_ERROR_1) { + throw getDataConversionError(BLOB); + } + throw e; + } + break; + } + if (conversionMode != CONVERT_TO) { + if (conversionMode == CAST_TO) { + v = v.convertPrecision(targetType.getPrecision()); + } else if (v.octetLength() > targetType.getPrecision()) { + throw v.getValueTooLongException(targetType, column); + } + } + return v; } - private ValueDate convertToDate() { + /** + * Converts this value to a BOOLEAN value. May not be called on a NULL + * value. + * + * @return the BOOLEAN value + */ + public final ValueBoolean convertToBoolean() { switch (getValueType()) { - case TIME: - // because the time has set the date to 1970-01-01, - // this will be the result - return ValueDate.fromDateValue(DateTimeUtils.EPOCH_DATE_VALUE); + case BOOLEAN: + return (ValueBoolean) this; + case CHAR: + case VARCHAR: + case VARCHAR_IGNORECASE: + return ValueBoolean.get(getBoolean()); + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + case NUMERIC: + case DOUBLE: + case REAL: + case DECFLOAT: + return ValueBoolean.get(getSignum() != 0); + default: + throw getDataConversionError(BOOLEAN); + case NULL: + throw DbException.getInternalError(); + } + } + + /** + * Converts this value to a TINYINT value. May not be called on a NULL + * value. + * + * @param column + * the column, used for to improve the error message if + * conversion fails + * @return the TINYINT value + */ + public final ValueTinyint convertToTinyint(Object column) { + switch (getValueType()) { + case TINYINT: + return (ValueTinyint) this; + case CHAR: + case VARCHAR: + case VARCHAR_IGNORECASE: + case BOOLEAN: + return ValueTinyint.get(getByte()); + case SMALLINT: + case ENUM: + case INTEGER: + return ValueTinyint.get(convertToByte(getInt(), column)); + case BIGINT: + case INTERVAL_YEAR: + case INTERVAL_MONTH: + case INTERVAL_DAY: + case INTERVAL_HOUR: + case INTERVAL_MINUTE: + case INTERVAL_SECOND: + case INTERVAL_YEAR_TO_MONTH: + case INTERVAL_DAY_TO_HOUR: + case INTERVAL_DAY_TO_MINUTE: + case INTERVAL_DAY_TO_SECOND: + case INTERVAL_HOUR_TO_MINUTE: + case INTERVAL_HOUR_TO_SECOND: + case INTERVAL_MINUTE_TO_SECOND: + return ValueTinyint.get(convertToByte(getLong(), column)); + case NUMERIC: + case DECFLOAT: + return ValueTinyint.get(convertToByte(convertToLong(getBigDecimal(), column), column)); + case REAL: + case DOUBLE: + return ValueTinyint.get(convertToByte(convertToLong(getDouble(), column), column)); + case BINARY: + case VARBINARY: { + byte[] bytes = getBytesNoCopy(); + if (bytes.length == 1) { + return ValueTinyint.get(bytes[0]); + } + } + //$FALL-THROUGH$ + default: + throw getDataConversionError(TINYINT); + case NULL: + throw DbException.getInternalError(); + } + } + + /** + * Converts this value to a SMALLINT value. May not be called on a NULL value. + * + * @param column + * the column, used for to improve the error message if + * conversion fails + * @return the SMALLINT value + */ + public final ValueSmallint convertToSmallint(Object column) { + switch (getValueType()) { + case SMALLINT: + return (ValueSmallint) this; + case CHAR: + case VARCHAR: + case VARCHAR_IGNORECASE: + case BOOLEAN: + case TINYINT: + return ValueSmallint.get(getShort()); + case ENUM: + case INTEGER: + return ValueSmallint.get(convertToShort(getInt(), column)); + case BIGINT: + case INTERVAL_YEAR: + case INTERVAL_MONTH: + case INTERVAL_DAY: + case INTERVAL_HOUR: + case INTERVAL_MINUTE: + case INTERVAL_SECOND: + case INTERVAL_YEAR_TO_MONTH: + case INTERVAL_DAY_TO_HOUR: + case INTERVAL_DAY_TO_MINUTE: + case INTERVAL_DAY_TO_SECOND: + case INTERVAL_HOUR_TO_MINUTE: + case INTERVAL_HOUR_TO_SECOND: + case INTERVAL_MINUTE_TO_SECOND: + return ValueSmallint.get(convertToShort(getLong(), column)); + case NUMERIC: + case DECFLOAT: + return ValueSmallint.get(convertToShort(convertToLong(getBigDecimal(), column), column)); + case REAL: + case DOUBLE: + return ValueSmallint.get(convertToShort(convertToLong(getDouble(), column), column)); + case BINARY: + case VARBINARY: { + byte[] bytes = getBytesNoCopy(); + if (bytes.length == 2) { + return ValueSmallint.get((short) ((bytes[0] << 8) + (bytes[1] & 0xff))); + } + } + //$FALL-THROUGH$ + default: + throw getDataConversionError(SMALLINT); + case NULL: + throw DbException.getInternalError(); + } + } + + /** + * Converts this value to a INT value. May not be called on a NULL value. + * + * @param column + * the column, used for to improve the error message if + * conversion fails + * @return the INT value + */ + public final ValueInteger convertToInt(Object column) { + switch (getValueType()) { + case INTEGER: + return (ValueInteger) this; + case CHAR: + case VARCHAR: + case VARCHAR_IGNORECASE: + case BOOLEAN: + case TINYINT: + case ENUM: + case SMALLINT: + return ValueInteger.get(getInt()); + case BIGINT: + case INTERVAL_YEAR: + case INTERVAL_MONTH: + case INTERVAL_DAY: + case INTERVAL_HOUR: + case INTERVAL_MINUTE: + case INTERVAL_SECOND: + case INTERVAL_YEAR_TO_MONTH: + case INTERVAL_DAY_TO_HOUR: + case INTERVAL_DAY_TO_MINUTE: + case INTERVAL_DAY_TO_SECOND: + case INTERVAL_HOUR_TO_MINUTE: + case INTERVAL_HOUR_TO_SECOND: + case INTERVAL_MINUTE_TO_SECOND: + return ValueInteger.get(convertToInt(getLong(), column)); + case NUMERIC: + case DECFLOAT: + return ValueInteger.get(convertToInt(convertToLong(getBigDecimal(), column), column)); + case REAL: + case DOUBLE: + return ValueInteger.get(convertToInt(convertToLong(getDouble(), column), column)); + case BINARY: + case VARBINARY: { + byte[] bytes = getBytesNoCopy(); + if (bytes.length == 4) { + return ValueInteger.get(Bits.readInt(bytes, 0)); + } + } + //$FALL-THROUGH$ + default: + throw getDataConversionError(INTEGER); + case NULL: + throw DbException.getInternalError(); + } + } + + /** + * Converts this value to a BIGINT value. May not be called on a NULL value. + * + * @param column + * the column, used for to improve the error message if + * conversion fails + * @return the BIGINT value + */ + public final ValueBigint convertToBigint(Object column) { + switch (getValueType()) { + case BIGINT: + return (ValueBigint) this; + case CHAR: + case VARCHAR: + case VARCHAR_IGNORECASE: + case BOOLEAN: + case TINYINT: + case SMALLINT: + case INTEGER: + case INTERVAL_YEAR: + case INTERVAL_MONTH: + case INTERVAL_DAY: + case INTERVAL_HOUR: + case INTERVAL_MINUTE: + case INTERVAL_SECOND: + case INTERVAL_YEAR_TO_MONTH: + case INTERVAL_DAY_TO_HOUR: + case INTERVAL_DAY_TO_MINUTE: + case INTERVAL_DAY_TO_SECOND: + case INTERVAL_HOUR_TO_MINUTE: + case INTERVAL_HOUR_TO_SECOND: + case INTERVAL_MINUTE_TO_SECOND: + case ENUM: + return ValueBigint.get(getLong()); + case NUMERIC: + case DECFLOAT: + return ValueBigint.get(convertToLong(getBigDecimal(), column)); + case REAL: + case DOUBLE: + return ValueBigint.get(convertToLong(getDouble(), column)); + case BINARY: + case VARBINARY: { + byte[] bytes = getBytesNoCopy(); + if (bytes.length == 8) { + return ValueBigint.get(Bits.readLong(bytes, 0)); + } + } + //$FALL-THROUGH$ + default: + throw getDataConversionError(BIGINT); + case NULL: + throw DbException.getInternalError(); + } + } + + private ValueNumeric convertToNumeric(TypeInfo targetType, CastDataProvider provider, int conversionMode, + Object column) { + ValueNumeric v; + switch (getValueType()) { + case NUMERIC: + v = (ValueNumeric) this; + break; + case BOOLEAN: + v = getBoolean() ? ValueNumeric.ONE : ValueNumeric.ZERO; + break; + default: { + BigDecimal value = getBigDecimal(); + int targetScale = targetType.getScale(); + int scale = value.scale(); + if (scale < 0 || scale > ValueNumeric.MAXIMUM_SCALE || conversionMode != CONVERT_TO && scale != targetScale + && (scale >= targetScale || !provider.getMode().convertOnlyToSmallerScale)) { + value = ValueNumeric.setScale(value, targetScale); + } + if (conversionMode != CONVERT_TO + && value.precision() > targetType.getPrecision() - targetScale + value.scale()) { + throw getValueTooLongException(targetType, column); + } + return ValueNumeric.get(value); + } + case NULL: + throw DbException.getInternalError(); + } + if (conversionMode != CONVERT_TO) { + int targetScale = targetType.getScale(); + BigDecimal value = v.getBigDecimal(); + int scale = value.scale(); + if (scale != targetScale && (scale >= targetScale || !provider.getMode().convertOnlyToSmallerScale)) { + v = ValueNumeric.get(ValueNumeric.setScale(value, targetScale)); + } + BigDecimal bd = v.getBigDecimal(); + if (bd.precision() > targetType.getPrecision() - targetScale + bd.scale()) { + throw v.getValueTooLongException(targetType, column); + } + } + return v; + } + + /** + * Converts this value to a REAL value. May not be called on a NULL value. + * + * @return the REAL value + */ + public final ValueReal convertToReal() { + switch (getValueType()) { + case REAL: + return (ValueReal) this; + case BOOLEAN: + return getBoolean() ? ValueReal.ONE : ValueReal.ZERO; + default: + return ValueReal.get(getFloat()); + case NULL: + throw DbException.getInternalError(); + } + } + + /** + * Converts this value to a DOUBLE value. May not be called on a NULL value. + * + * @return the DOUBLE value + */ + public final ValueDouble convertToDouble() { + switch (getValueType()) { + case DOUBLE: + return (ValueDouble) this; + case BOOLEAN: + return getBoolean() ? ValueDouble.ONE : ValueDouble.ZERO; + default: + return ValueDouble.get(getDouble()); + case NULL: + throw DbException.getInternalError(); + } + } + + private ValueDecfloat convertToDecfloat(TypeInfo targetType, int conversionMode) { + ValueDecfloat v; + switch (getValueType()) { + case DECFLOAT: + v = (ValueDecfloat) this; + if (v.value == null) { + return v; + } + break; + case CHAR: + case VARCHAR: + case VARCHAR_IGNORECASE: { + String s = getString().trim(); + try { + v = ValueDecfloat.get(new BigDecimal(s)); + } catch (NumberFormatException e) { + switch (s) { + case "-Infinity": + return ValueDecfloat.NEGATIVE_INFINITY; + case "Infinity": + case "+Infinity": + return ValueDecfloat.POSITIVE_INFINITY; + case "NaN": + case "-NaN": + case "+NaN": + return ValueDecfloat.NAN; + default: + throw getDataConversionError(DECFLOAT); + } + } + break; + } + case BOOLEAN: + v = getBoolean() ? ValueDecfloat.ONE : ValueDecfloat.ZERO; + break; + case REAL: { + float value = getFloat(); + if (Float.isFinite(value)) { + v = ValueDecfloat.get(new BigDecimal(Float.toString(value))); + } else if (value == Float.POSITIVE_INFINITY) { + return ValueDecfloat.POSITIVE_INFINITY; + } else if (value == Float.NEGATIVE_INFINITY) { + return ValueDecfloat.NEGATIVE_INFINITY; + } else { + return ValueDecfloat.NAN; + } + break; + } + case DOUBLE: { + double value = getDouble(); + if (Double.isFinite(value)) { + v = ValueDecfloat.get(new BigDecimal(Double.toString(value))); + } else if (value == Double.POSITIVE_INFINITY) { + return ValueDecfloat.POSITIVE_INFINITY; + } else if (value == Double.NEGATIVE_INFINITY) { + return ValueDecfloat.NEGATIVE_INFINITY; + } else { + return ValueDecfloat.NAN; + } + break; + } + default: + try { + v = ValueDecfloat.get(getBigDecimal()); + } catch (DbException e) { + if (e.getErrorCode() == ErrorCode.DATA_CONVERSION_ERROR_1) { + throw getDataConversionError(DECFLOAT); + } + throw e; + } + break; + case NULL: + throw DbException.getInternalError(); + } + if (conversionMode != CONVERT_TO) { + BigDecimal bd = v.value; + int precision = bd.precision(), targetPrecision = (int) targetType.getPrecision(); + if (precision > targetPrecision) { + v = ValueDecfloat.get(bd.setScale(bd.scale() - precision + targetPrecision, RoundingMode.HALF_UP)); + } + } + return v; + } + + /** + * Converts this value to a DATE value. May not be called on a NULL value. + * + * @param provider + * the cast information provider + * @return the DATE value + */ + public final ValueDate convertToDate(CastDataProvider provider) { + switch (getValueType()) { + case DATE: + return (ValueDate) this; case TIMESTAMP: return ValueDate.fromDateValue(((ValueTimestamp) this).getDateValue()); case TIMESTAMP_TZ: { ValueTimestampTimeZone ts = (ValueTimestampTimeZone) this; - long dateValue = ts.getDateValue(), timeNanos = ts.getTimeNanos(); - long millis = DateTimeUtils.getMillis(dateValue, timeNanos, ts.getTimeZoneOffsetMins()); - return ValueDate.fromMillis(millis); + long timeNanos = ts.getTimeNanos(); + long epochSeconds = DateTimeUtils.getEpochSeconds(ts.getDateValue(), timeNanos, + ts.getTimeZoneOffsetSeconds()); + return ValueDate.fromDateValue(DateTimeUtils + .dateValueFromLocalSeconds(epochSeconds + + provider.currentTimeZone().getTimeZoneOffsetUTC(epochSeconds))); + } + case VARCHAR: + case VARCHAR_IGNORECASE: + case CHAR: + return ValueDate.parse(getString().trim()); + default: + throw getDataConversionError(DATE); + case NULL: + throw DbException.getInternalError(); + } + } + + private ValueTime convertToTime(TypeInfo targetType, CastDataProvider provider, int conversionMode) { + ValueTime v; + switch (getValueType()) { + case TIME: + v = (ValueTime) this; + break; + case TIME_TZ: + v = ValueTime.fromNanos(getLocalTimeNanos(provider)); + break; + case TIMESTAMP: + v = ValueTime.fromNanos(((ValueTimestamp) this).getTimeNanos()); + break; + case TIMESTAMP_TZ: { + ValueTimestampTimeZone ts = (ValueTimestampTimeZone) this; + long timeNanos = ts.getTimeNanos(); + long epochSeconds = DateTimeUtils.getEpochSeconds(ts.getDateValue(), timeNanos, + ts.getTimeZoneOffsetSeconds()); + v = ValueTime.fromNanos( + DateTimeUtils.nanosFromLocalSeconds(epochSeconds + + provider.currentTimeZone().getTimeZoneOffsetUTC(epochSeconds)) + + timeNanos % DateTimeUtils.NANOS_PER_SECOND); + break; + } + case VARCHAR: + case VARCHAR_IGNORECASE: + case CHAR: + v = ValueTime.parse(getString().trim()); + break; + default: + throw getDataConversionError(TIME); + } + if (conversionMode != CONVERT_TO) { + int targetScale = targetType.getScale(); + if (targetScale < ValueTime.MAXIMUM_SCALE) { + long n = v.getNanos(); + long n2 = DateTimeUtils.convertScale(n, targetScale, DateTimeUtils.NANOS_PER_DAY); + if (n2 != n) { + v = ValueTime.fromNanos(n2); + } + } + } + return v; + } + + private ValueTimeTimeZone convertToTimeTimeZone(TypeInfo targetType, CastDataProvider provider, + int conversionMode) { + ValueTimeTimeZone v; + switch (getValueType()) { + case TIME_TZ: + v = (ValueTimeTimeZone) this; + break; + case TIME: + v = ValueTimeTimeZone.fromNanos(((ValueTime) this).getNanos(), + provider.currentTimestamp().getTimeZoneOffsetSeconds()); + break; + case TIMESTAMP: { + ValueTimestamp ts = (ValueTimestamp) this; + long timeNanos = ts.getTimeNanos(); + v = ValueTimeTimeZone.fromNanos(timeNanos, + provider.currentTimeZone().getTimeZoneOffsetLocal(ts.getDateValue(), timeNanos)); + break; + } + case TIMESTAMP_TZ: { + ValueTimestampTimeZone ts = (ValueTimestampTimeZone) this; + v = ValueTimeTimeZone.fromNanos(ts.getTimeNanos(), ts.getTimeZoneOffsetSeconds()); + break; + } + case VARCHAR: + case VARCHAR_IGNORECASE: + case CHAR: + v = ValueTimeTimeZone.parse(getString().trim()); + break; + default: + throw getDataConversionError(TIME_TZ); + } + if (conversionMode != CONVERT_TO) { + int targetScale = targetType.getScale(); + if (targetScale < ValueTime.MAXIMUM_SCALE) { + long n = v.getNanos(); + long n2 = DateTimeUtils.convertScale(n, targetScale, DateTimeUtils.NANOS_PER_DAY); + if (n2 != n) { + v = ValueTimeTimeZone.fromNanos(n2, v.getTimeZoneOffsetSeconds()); + } + } + } + return v; + } + + private ValueTimestamp convertToTimestamp(TypeInfo targetType, CastDataProvider provider, int conversionMode) { + ValueTimestamp v; + switch (getValueType()) { + case TIMESTAMP: + v = (ValueTimestamp) this; + break; + case TIME: + v = ValueTimestamp.fromDateValueAndNanos(provider.currentTimestamp().getDateValue(), + ((ValueTime) this).getNanos()); + break; + case TIME_TZ: + v = ValueTimestamp.fromDateValueAndNanos(provider.currentTimestamp().getDateValue(), + getLocalTimeNanos(provider)); + break; + case DATE: + // Scale is always 0 + return ValueTimestamp.fromDateValueAndNanos(((ValueDate) this).getDateValue(), 0); + case TIMESTAMP_TZ: { + ValueTimestampTimeZone ts = (ValueTimestampTimeZone) this; + long timeNanos = ts.getTimeNanos(); + long epochSeconds = DateTimeUtils.getEpochSeconds(ts.getDateValue(), timeNanos, + ts.getTimeZoneOffsetSeconds()); + epochSeconds += provider.currentTimeZone().getTimeZoneOffsetUTC(epochSeconds); + v = ValueTimestamp.fromDateValueAndNanos(DateTimeUtils.dateValueFromLocalSeconds(epochSeconds), + DateTimeUtils.nanosFromLocalSeconds(epochSeconds) + timeNanos % DateTimeUtils.NANOS_PER_SECOND); + break; + } + case VARCHAR: + case VARCHAR_IGNORECASE: + case CHAR: + v = ValueTimestamp.parse(getString().trim(), provider); + break; + default: + throw getDataConversionError(TIMESTAMP); + } + if (conversionMode != CONVERT_TO) { + int targetScale = targetType.getScale(); + if (targetScale < ValueTimestamp.MAXIMUM_SCALE) { + long dv = v.getDateValue(), n = v.getTimeNanos(); + long n2 = DateTimeUtils.convertScale(n, targetScale, + dv == DateTimeUtils.MAX_DATE_VALUE ? DateTimeUtils.NANOS_PER_DAY : Long.MAX_VALUE); + if (n2 != n) { + if (n2 >= DateTimeUtils.NANOS_PER_DAY) { + n2 -= DateTimeUtils.NANOS_PER_DAY; + dv = DateTimeUtils.incrementDateValue(dv); + } + v = ValueTimestamp.fromDateValueAndNanos(dv, n2); + } + } + } + return v; + } + + private long getLocalTimeNanos(CastDataProvider provider) { + ValueTimeTimeZone ts = (ValueTimeTimeZone) this; + int localOffset = provider.currentTimestamp().getTimeZoneOffsetSeconds(); + return DateTimeUtils.normalizeNanosOfDay(ts.getNanos() + + (ts.getTimeZoneOffsetSeconds() - localOffset) * DateTimeUtils.NANOS_PER_DAY); + } + + private ValueTimestampTimeZone convertToTimestampTimeZone(TypeInfo targetType, CastDataProvider provider, + int conversionMode) { + ValueTimestampTimeZone v; + switch (getValueType()) { + case TIMESTAMP_TZ: + v = (ValueTimestampTimeZone) this; + break; + case TIME: { + long dateValue = provider.currentTimestamp().getDateValue(); + long timeNanos = ((ValueTime) this).getNanos(); + v = ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, timeNanos, + provider.currentTimeZone().getTimeZoneOffsetLocal(dateValue, timeNanos)); + break; + } + case TIME_TZ: { + ValueTimeTimeZone t = (ValueTimeTimeZone) this; + v = ValueTimestampTimeZone.fromDateValueAndNanos(provider.currentTimestamp().getDateValue(), + t.getNanos(), t.getTimeZoneOffsetSeconds()); + break; + } + case DATE: { + long dateValue = ((ValueDate) this).getDateValue(); + // Scale is always 0 + return ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, 0L, + provider.currentTimeZone().getTimeZoneOffsetLocal(dateValue, 0L)); + } + case TIMESTAMP: { + ValueTimestamp ts = (ValueTimestamp) this; + long dateValue = ts.getDateValue(); + long timeNanos = ts.getTimeNanos(); + v = ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, timeNanos, + provider.currentTimeZone().getTimeZoneOffsetLocal(dateValue, timeNanos)); + break; + } + case VARCHAR: + case VARCHAR_IGNORECASE: + case CHAR: + v = ValueTimestampTimeZone.parse(getString().trim(), provider); + break; + default: + throw getDataConversionError(TIMESTAMP_TZ); + } + if (conversionMode != CONVERT_TO) { + int targetScale = targetType.getScale(); + if (targetScale < ValueTimestamp.MAXIMUM_SCALE) { + long dv = v.getDateValue(); + long n = v.getTimeNanos(); + long n2 = DateTimeUtils.convertScale(n, targetScale, + dv == DateTimeUtils.MAX_DATE_VALUE ? DateTimeUtils.NANOS_PER_DAY : Long.MAX_VALUE); + if (n2 != n) { + if (n2 >= DateTimeUtils.NANOS_PER_DAY) { + n2 -= DateTimeUtils.NANOS_PER_DAY; + dv = DateTimeUtils.incrementDateValue(dv); + } + v = ValueTimestampTimeZone.fromDateValueAndNanos(dv, n2, v.getTimeZoneOffsetSeconds()); + } + } + } + return v; + } + + private ValueInterval convertToIntervalYearMonth(TypeInfo targetType, int conversionMode, Object column) { + ValueInterval v = convertToIntervalYearMonth(targetType.getValueType(), column); + if (conversionMode != CONVERT_TO) { + if (!v.checkPrecision(targetType.getPrecision())) { + throw v.getValueTooLongException(targetType, column); + } + } + return v; + } + + private ValueInterval convertToIntervalYearMonth(int targetType, Object column) { + long leading; + switch (getValueType()) { + case TINYINT: + case SMALLINT: + case INTEGER: + leading = getInt(); + break; + case BIGINT: + leading = getLong(); + break; + case REAL: + case DOUBLE: + if (targetType == INTERVAL_YEAR_TO_MONTH) { + return IntervalUtils.intervalFromAbsolute(IntervalQualifier.YEAR_TO_MONTH, getBigDecimal() + .multiply(BigDecimal.valueOf(12)).setScale(0, RoundingMode.HALF_UP).toBigInteger()); + } + leading = convertToLong(getDouble(), column); + break; + case NUMERIC: + case DECFLOAT: + if (targetType == INTERVAL_YEAR_TO_MONTH) { + return IntervalUtils.intervalFromAbsolute(IntervalQualifier.YEAR_TO_MONTH, getBigDecimal() + .multiply(BigDecimal.valueOf(12)).setScale(0, RoundingMode.HALF_UP).toBigInteger()); + } + leading = convertToLong(getBigDecimal(), column); + break; + case VARCHAR: + case VARCHAR_IGNORECASE: + case CHAR: { + String s = getString(); + try { + return (ValueInterval) IntervalUtils + .parseFormattedInterval(IntervalQualifier.valueOf(targetType - INTERVAL_YEAR), s) + .convertTo(targetType); + } catch (Exception e) { + throw DbException.get(ErrorCode.INVALID_DATETIME_CONSTANT_2, e, "INTERVAL", s); + } + } + case INTERVAL_YEAR: + case INTERVAL_MONTH: + case INTERVAL_YEAR_TO_MONTH: + return IntervalUtils.intervalFromAbsolute(IntervalQualifier.valueOf(targetType - INTERVAL_YEAR), + IntervalUtils.intervalToAbsolute((ValueInterval) this)); + default: + throw getDataConversionError(targetType); + } + boolean negative = false; + if (leading < 0) { + negative = true; + leading = -leading; + } + return ValueInterval.from(IntervalQualifier.valueOf(targetType - INTERVAL_YEAR), negative, leading, + 0L); + } + + private ValueInterval convertToIntervalDayTime(TypeInfo targetType, int conversionMode, Object column) { + ValueInterval v = convertToIntervalDayTime(targetType.getValueType(), column); + if (conversionMode != CONVERT_TO) { + v = v.setPrecisionAndScale(targetType, column); + } + return v; + } + + private ValueInterval convertToIntervalDayTime(int targetType, Object column) { + long leading; + switch (getValueType()) { + case TINYINT: + case SMALLINT: + case INTEGER: + leading = getInt(); + break; + case BIGINT: + leading = getLong(); + break; + case REAL: + case DOUBLE: + if (targetType > INTERVAL_MINUTE) { + return convertToIntervalDayTime(getBigDecimal(), targetType); + } + leading = convertToLong(getDouble(), column); + break; + case NUMERIC: + case DECFLOAT: + if (targetType > INTERVAL_MINUTE) { + return convertToIntervalDayTime(getBigDecimal(), targetType); + } + leading = convertToLong(getBigDecimal(), column); + break; + case VARCHAR: + case VARCHAR_IGNORECASE: + case CHAR: { + String s = getString(); + try { + return (ValueInterval) IntervalUtils + .parseFormattedInterval(IntervalQualifier.valueOf(targetType - INTERVAL_YEAR), s) + .convertTo(targetType); + } catch (Exception e) { + throw DbException.get(ErrorCode.INVALID_DATETIME_CONSTANT_2, e, "INTERVAL", s); + } + } + case INTERVAL_DAY: + case INTERVAL_HOUR: + case INTERVAL_MINUTE: + case INTERVAL_SECOND: + case INTERVAL_DAY_TO_HOUR: + case INTERVAL_DAY_TO_MINUTE: + case INTERVAL_DAY_TO_SECOND: + case INTERVAL_HOUR_TO_MINUTE: + case INTERVAL_HOUR_TO_SECOND: + case INTERVAL_MINUTE_TO_SECOND: + return IntervalUtils.intervalFromAbsolute(IntervalQualifier.valueOf(targetType - INTERVAL_YEAR), + IntervalUtils.intervalToAbsolute((ValueInterval) this)); + default: + throw getDataConversionError(targetType); } - case ENUM: - throw getDataConversionError(DATE); + boolean negative = false; + if (leading < 0) { + negative = true; + leading = -leading; } - return ValueDate.parse(getString().trim()); + return ValueInterval.from(IntervalQualifier.valueOf(targetType - INTERVAL_YEAR), negative, leading, + 0L); } - private ValueTime convertToTime() { - switch (getValueType()) { - case DATE: - // need to normalize the year, month and day because a date - // has the time set to 0, the result will be 0 - return ValueTime.fromNanos(0); - case TIMESTAMP: - return ValueTime.fromNanos(((ValueTimestamp) this).getTimeNanos()); - case TIMESTAMP_TZ: { - ValueTimestampTimeZone ts = (ValueTimestampTimeZone) this; - long dateValue = ts.getDateValue(), timeNanos = ts.getTimeNanos(); - long millis = DateTimeUtils.getMillis(dateValue, timeNanos, ts.getTimeZoneOffsetMins()); - return ValueTime.fromNanos( - DateTimeUtils.nanosFromLocalMillis(millis + DateTimeUtils.getTimeZoneOffset(millis)) - + timeNanos % 1_000_000); - } - case ENUM: - throw getDataConversionError(TIME); + private ValueInterval convertToIntervalDayTime(BigDecimal bigDecimal, int targetType) { + long multiplier; + switch (targetType) { + case INTERVAL_SECOND: + multiplier = DateTimeUtils.NANOS_PER_SECOND; + break; + case INTERVAL_DAY_TO_HOUR: + case INTERVAL_DAY_TO_MINUTE: + case INTERVAL_DAY_TO_SECOND: + multiplier = DateTimeUtils.NANOS_PER_DAY; + break; + case INTERVAL_HOUR_TO_MINUTE: + case INTERVAL_HOUR_TO_SECOND: + multiplier = DateTimeUtils.NANOS_PER_HOUR; + break; + case INTERVAL_MINUTE_TO_SECOND: + multiplier = DateTimeUtils.NANOS_PER_MINUTE; + break; + default: + throw getDataConversionError(targetType); } - return ValueTime.parse(getString().trim()); + return IntervalUtils.intervalFromAbsolute(IntervalQualifier.valueOf(targetType - INTERVAL_YEAR), + bigDecimal.multiply(BigDecimal.valueOf(multiplier)).setScale(0, RoundingMode.HALF_UP).toBigInteger()); } - private ValueTimestamp convertToTimestamp(Mode mode) { + /** + * Converts this value to a JAVA_OBJECT value. May not be called on a NULL + * value. + * + * @param targetType + * the type of the returned value + * @param conversionMode + * conversion mode + * @param column + * the column (if any), used to improve the error message if + * conversion fails + * @return the JAVA_OBJECT value + */ + public final ValueJavaObject convertToJavaObject(TypeInfo targetType, int conversionMode, Object column) { + ValueJavaObject v; switch (getValueType()) { - case TIME: - return DateTimeUtils.normalizeTimestamp(0, ((ValueTime) this).getNanos()); - case DATE: - return ValueTimestamp.fromDateValueAndNanos(((ValueDate) this).getDateValue(), 0); - case TIMESTAMP_TZ: { - ValueTimestampTimeZone ts = (ValueTimestampTimeZone) this; - long dateValue = ts.getDateValue(), timeNanos = ts.getTimeNanos(); - long millis = DateTimeUtils.getMillis(dateValue, timeNanos, ts.getTimeZoneOffsetMins()); - return ValueTimestamp.fromMillisNanos(millis, (int) (timeNanos % 1_000_000)); + case JAVA_OBJECT: + v = (ValueJavaObject) this; + break; + case BINARY: + case VARBINARY: + case BLOB: + v = ValueJavaObject.getNoCopy(getBytesNoCopy()); + break; + default: + throw getDataConversionError(JAVA_OBJECT); + case NULL: + throw DbException.getInternalError(); } - case ENUM: - throw getDataConversionError(TIMESTAMP); + if (conversionMode != CONVERT_TO && v.getBytesNoCopy().length > targetType.getPrecision()) { + throw v.getValueTooLongException(targetType, column); } - return ValueTimestamp.parse(getString().trim(), mode); + return v; } - private ValueTimestampTimeZone convertToTimestampTimeZone() { + /** + * Converts this value to an ENUM value. May not be called on a NULL value. + * + * @param extTypeInfo + * the extended data type information + * @param provider + * the cast information provider + * @return the ENUM value + */ + public final ValueEnum convertToEnum(ExtTypeInfoEnum extTypeInfo, CastDataProvider provider) { switch (getValueType()) { - case TIME: { - ValueTimestamp ts = DateTimeUtils.normalizeTimestamp(0, ((ValueTime) this).getNanos()); - return DateTimeUtils.timestampTimeZoneFromLocalDateValueAndNanos(ts.getDateValue(), ts.getTimeNanos()); - } - case DATE: - return DateTimeUtils.timestampTimeZoneFromLocalDateValueAndNanos(((ValueDate) this).getDateValue(), 0); - case TIMESTAMP: { - ValueTimestamp ts = (ValueTimestamp) this; - return DateTimeUtils.timestampTimeZoneFromLocalDateValueAndNanos(ts.getDateValue(), ts.getTimeNanos()); + case ENUM: { + ValueEnum v = (ValueEnum) this; + if (extTypeInfo.equals(v.getEnumerators())) { + return v; + } + return extTypeInfo.getValue(v.getString(), provider); } - case ENUM: - throw getDataConversionError(TIMESTAMP_TZ); + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + case NUMERIC: + case DECFLOAT: + return extTypeInfo.getValue(getInt(), provider); + case VARCHAR: + case VARCHAR_IGNORECASE: + case CHAR: + return extTypeInfo.getValue(getString(), provider); + default: + throw getDataConversionError(ENUM); + case NULL: + throw DbException.getInternalError(); } - return ValueTimestampTimeZone.parse(getString().trim()); } - private ValueBytes convertToBytes(Mode mode) { + /** + * Converts this value to a GEOMETRY value. May not be called on a NULL + * value. + * + * @param extTypeInfo + * the extended data type information, or null + * @return the GEOMETRY value + */ + public final ValueGeometry convertToGeometry(ExtTypeInfoGeometry extTypeInfo) { + ValueGeometry result; switch (getValueType()) { - case JAVA_OBJECT: - case BLOB: - return ValueBytes.getNoCopy(getBytesNoCopy()); - case UUID: case GEOMETRY: - return ValueBytes.getNoCopy(getBytes()); - case BYTE: - return ValueBytes.getNoCopy(new byte[] { getByte() }); - case SHORT: { - int x = getShort(); - return ValueBytes.getNoCopy(new byte[] { (byte) (x >> 8), (byte) x }); - } - case INT: { - byte[] b = new byte[4]; - Bits.writeInt(b, 0, getInt()); - return ValueBytes.getNoCopy(b); - } - case LONG: { - byte[] b = new byte[8]; - Bits.writeLong(b, 0, getLong()); - return ValueBytes.getNoCopy(b); - } - case ENUM: - case TIMESTAMP_TZ: - throw getDataConversionError(BYTES); - } - String s = getString(); - return ValueBytes.getNoCopy(mode != null && mode.charToBinaryInUtf8 ? s.getBytes(StandardCharsets.UTF_8) - : StringUtils.convertHexToBytes(s.trim())); - } - - private ValueString convertToString(Mode mode) { - String s; - if (getValueType() == BYTES && mode != null && mode.charToBinaryInUtf8) { - // Bugfix - Can't use the locale encoding when enabling - // charToBinaryInUtf8 in mode. - // The following two target types also are the same issue. - // @since 2018-07-19 little-pan - s = new String(getBytesNoCopy(), StandardCharsets.UTF_8); - } else { - s = getString(); + result = (ValueGeometry) this; + break; + case BINARY: + case VARBINARY: + case BLOB: + result = ValueGeometry.getFromEWKB(getBytesNoCopy()); + break; + case JSON: { + int srid = 0; + if (extTypeInfo != null) { + Integer s = extTypeInfo.getSrid(); + if (s != null) { + srid = s; + } + } + try { + result = ValueGeometry.get(GeoJsonUtils.geoJsonToEwkb(getBytesNoCopy(), srid)); + } catch (RuntimeException ex) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, getTraceSQL()); + } + break; } - return (ValueString) ValueString.get(s); - } - - private ValueString convertToStringIgnoreCase(Mode mode) { - String s; - if (getValueType() == BYTES && mode != null && mode.charToBinaryInUtf8) { - s = new String(getBytesNoCopy(), StandardCharsets.UTF_8); - } else { - s = getString(); + case CHAR: + case VARCHAR: + case CLOB: + case VARCHAR_IGNORECASE: + result = ValueGeometry.get(getString()); + break; + default: + throw getDataConversionError(GEOMETRY); + case NULL: + throw DbException.getInternalError(); } - return ValueStringIgnoreCase.get(s); - } - - private ValueString convertToStringFixed(Mode mode) { - String s; - if (getValueType() == BYTES && mode != null && mode.charToBinaryInUtf8) { - s = new String(getBytesNoCopy(), StandardCharsets.UTF_8); - } else { - s = getString(); + if (extTypeInfo != null) { + int type = extTypeInfo.getType(); + Integer srid = extTypeInfo.getSrid(); + if (type != 0 && result.getTypeAndDimensionSystem() != type || srid != null && result.getSRID() != srid) { + StringBuilder builder = ExtTypeInfoGeometry + .toSQL(new StringBuilder(), result.getTypeAndDimensionSystem(), result.getSRID()) + .append(" -> "); + extTypeInfo.getSQL(builder, TRACE_SQL_FLAGS); + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, builder.toString()); + } } - return ValueStringFixed.get(s); + return result; } - private ValueJavaObject convertToJavaObject() { + private ValueJson convertToJson(TypeInfo targetType, int conversionMode, Object column) { + ValueJson v; switch (getValueType()) { - case BYTES: + case JSON: + v = (ValueJson) this; + break; + case BOOLEAN: + v = ValueJson.get(getBoolean()); + break; + case TINYINT: + case SMALLINT: + case INTEGER: + v = ValueJson.get(getInt()); + break; + case BIGINT: + v = ValueJson.get(getLong()); + break; + case REAL: + case DOUBLE: + case NUMERIC: + case DECFLOAT: + v = ValueJson.get(getBigDecimal()); + break; + case BINARY: + case VARBINARY: case BLOB: - return ValueJavaObject.getNoCopy(null, getBytesNoCopy(), getDataHandler()); - case ENUM: + v = ValueJson.fromJson(getBytesNoCopy()); + break; + case CHAR: + case VARCHAR: + case CLOB: + case VARCHAR_IGNORECASE: + case DATE: + case TIME: + case TIME_TZ: + case UUID: + v = ValueJson.get(getString()); + break; + case TIMESTAMP: + v = ValueJson.get(((ValueTimestamp) this).getISOString()); + break; case TIMESTAMP_TZ: - throw getDataConversionError(JAVA_OBJECT); + v = ValueJson.get(((ValueTimestampTimeZone) this).getISOString()); + break; + case GEOMETRY: { + ValueGeometry vg = (ValueGeometry) this; + v = ValueJson.getInternal(GeoJsonUtils.ewkbToGeoJson(vg.getBytesNoCopy(), vg.getDimensionSystem())); + break; } - return ValueJavaObject.getNoCopy(null, StringUtils.convertHexToBytes(getString().trim()), getDataHandler()); - } - - private ValueEnum convertToEnumInternal(ExtTypeInfoEnum extTypeInfo) { - switch (getValueType()) { - case BYTE: - case SHORT: - case INT: - case LONG: - case DECIMAL: - return extTypeInfo.getValue(getInt()); - case STRING: - case STRING_IGNORECASE: - case STRING_FIXED: - return extTypeInfo.getValue(getString()); - case JAVA_OBJECT: - Object object = JdbcUtils.deserialize(getBytesNoCopy(), getDataHandler()); - if (object instanceof String) { - return extTypeInfo.getValue((String) object); - } else if (object instanceof Integer) { - return extTypeInfo.getValue((int) object); + case ARRAY: { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + baos.write('['); + for (Value e : ((ValueArray) this).getList()) { + JsonConstructorUtils.jsonArrayAppend(baos, e, 0); } - //$FALL-THROUGH$ + baos.write(']'); + v = ValueJson.getInternal(baos.toByteArray()); + break; } - throw getDataConversionError(ENUM); - } - - private ValueLobDb convertToBlob() { - switch (getValueType()) { - case BYTES: - return ValueLobDb.createSmallLob(Value.BLOB, getBytesNoCopy()); - case TIMESTAMP_TZ: - throw getDataConversionError(BLOB); + default: + throw getDataConversionError(JSON); } - return ValueLobDb.createSmallLob(BLOB, StringUtils.convertHexToBytes(getString().trim())); - } - - private ValueLobDb convertToClob() { - return ValueLobDb.createSmallLob(CLOB, getString().getBytes(StandardCharsets.UTF_8)); + if (conversionMode != CONVERT_TO && v.getBytesNoCopy().length > targetType.getPrecision()) { + throw v.getValueTooLongException(targetType, column); + } + return v; } - private ValueUuid convertToUuid() { + /** + * Converts this value to a UUID value. May not be called on a NULL value. + * + * @return the UUID value + */ + public final ValueUuid convertToUuid() { switch (getValueType()) { - case BYTES: + case UUID: + return (ValueUuid) this; + case BINARY: + case VARBINARY: return ValueUuid.get(getBytesNoCopy()); case JAVA_OBJECT: - Object object = JdbcUtils.deserialize(getBytesNoCopy(), getDataHandler()); - if (object instanceof java.util.UUID) { - return ValueUuid.get((java.util.UUID) object); - } - //$FALL-THROUGH$ - case TIMESTAMP_TZ: + return JdbcUtils.deserializeUuid(getBytesNoCopy()); + case CHAR: + case VARCHAR: + case VARCHAR_IGNORECASE: + return ValueUuid.get(getString()); + default: throw getDataConversionError(UUID); + case NULL: + throw DbException.getInternalError(); } - return ValueUuid.get(getString()); } - private Value convertToGeometry(ExtTypeInfoGeometry extTypeInfo) { - ValueGeometry result; - switch (getValueType()) { - case BYTES: - result = ValueGeometry.getFromEWKB(getBytesNoCopy()); - break; - case JAVA_OBJECT: - Object object = JdbcUtils.deserialize(getBytesNoCopy(), getDataHandler()); - if (DataType.isGeometry(object)) { - result = ValueGeometry.getFromGeometry(object); + private ValueArray convertToArray(TypeInfo targetType, CastDataProvider provider, int conversionMode, + Object column) { + TypeInfo componentType = (TypeInfo) targetType.getExtTypeInfo(); + int valueType = getValueType(); + ValueArray v; + if (valueType == ARRAY) { + v = (ValueArray) this; + } else { + Value[] a; + switch (valueType) { + case BLOB: + a = new Value[] { ValueVarbinary.get(getBytesNoCopy()) }; + break; + case CLOB: + a = new Value[] { ValueVarchar.get(getString()) }; break; + default: + a = new Value[] { this }; } - //$FALL-THROUGH$ - case TIMESTAMP_TZ: - throw getDataConversionError(GEOMETRY); - default: - result = ValueGeometry.get(getString()); + v = ValueArray.get(a, provider); } - return extTypeInfo != null ? extTypeInfo.cast(result) : result; - } - - private ValueInterval convertToIntervalYearMonth(int targetType) { - switch (getValueType()) { - case Value.STRING: - case Value.STRING_IGNORECASE: - case Value.STRING_FIXED: { - String s = getString(); - try { - return (ValueInterval) IntervalUtils - .parseFormattedInterval(IntervalQualifier.valueOf(targetType - Value.INTERVAL_YEAR), s) - .convertTo(targetType); - } catch (Exception e) { - throw DbException.get(ErrorCode.INVALID_DATETIME_CONSTANT_2, e, "INTERVAL", s); + if (componentType != null) { + Value[] values = v.getList(); + int length = values.length; + loop: for (int i = 0; i < length; i++) { + Value v1 = values[i]; + Value v2 = v1.convertTo(componentType, provider, conversionMode, column); + if (v1 != v2) { + Value[] newValues = new Value[length]; + System.arraycopy(values, 0, newValues, 0, i); + newValues[i] = v2; + while (++i < length) { + newValues[i] = values[i].convertTo(componentType, provider, conversionMode, column); + } + v = ValueArray.get(componentType, newValues, provider); + break loop; + } } } - case Value.INTERVAL_YEAR: - case Value.INTERVAL_MONTH: - case Value.INTERVAL_YEAR_TO_MONTH: - return IntervalUtils.intervalFromAbsolute(IntervalQualifier.valueOf(targetType - Value.INTERVAL_YEAR), - IntervalUtils.intervalToAbsolute((ValueInterval) this)); - } - throw getDataConversionError(targetType); - } - - private ValueInterval convertToIntervalDayTime(int targetType) { - switch (getValueType()) { - case Value.STRING: - case Value.STRING_IGNORECASE: - case Value.STRING_FIXED: { - String s = getString(); - try { - return (ValueInterval) IntervalUtils - .parseFormattedInterval(IntervalQualifier.valueOf(targetType - Value.INTERVAL_YEAR), s) - .convertTo(targetType); - } catch (Exception e) { - throw DbException.get(ErrorCode.INVALID_DATETIME_CONSTANT_2, e, "INTERVAL", s); + if (conversionMode != CONVERT_TO) { + Value[] values = v.getList(); + int cardinality = values.length; + if (conversionMode == CAST_TO) { + int p = MathUtils.convertLongToInt(targetType.getPrecision()); + if (cardinality > p) { + v = ValueArray.get(v.getComponentType(), Arrays.copyOf(values, p), provider); + } + } else if (cardinality > targetType.getPrecision()) { + throw v.getValueTooLongException(targetType, column); } } - case Value.INTERVAL_DAY: - case Value.INTERVAL_HOUR: - case Value.INTERVAL_MINUTE: - case Value.INTERVAL_SECOND: - case Value.INTERVAL_DAY_TO_HOUR: - case Value.INTERVAL_DAY_TO_MINUTE: - case Value.INTERVAL_DAY_TO_SECOND: - case Value.INTERVAL_HOUR_TO_MINUTE: - case Value.INTERVAL_HOUR_TO_SECOND: - case Value.INTERVAL_MINUTE_TO_SECOND: - return IntervalUtils.intervalFromAbsolute(IntervalQualifier.valueOf(targetType - Value.INTERVAL_YEAR), - IntervalUtils.intervalToAbsolute((ValueInterval) this)); - } - throw getDataConversionError(targetType); + return v; } - private ValueArray convertToArray() { - Value[] a; - switch (getValueType()) { - case ROW: - a = ((ValueRow) this).getList(); - break; - case BLOB: - case CLOB: - case RESULT_SET: - a = new Value[] { ValueString.get(getString()) }; - break; - default: - a = new Value[] { this }; + private Value convertToRow(TypeInfo targetType, CastDataProvider provider, int conversionMode, + Object column) { + ValueRow v; + if (getValueType() == ROW) { + v = (ValueRow) this; + } else { + v = ValueRow.get(new Value[] { this }); } - return ValueArray.get(a); - } - - private Value convertToRow() { - Value[] a; - if (getValueType() == RESULT_SET) { - ResultInterface result = ((ValueResultSet) this).getResult(); - if (result.hasNext()) { - a = result.currentRow(); - if (result.hasNext()) { - throw DbException.get(ErrorCode.SCALAR_SUBQUERY_CONTAINS_MORE_THAN_ONE_ROW); + ExtTypeInfoRow ext = (ExtTypeInfoRow) targetType.getExtTypeInfo(); + if (ext != null) { + Value[] values = v.getList(); + int length = values.length; + Set> fields = ext.getFields(); + if (length != fields.size()) { + throw getDataConversionError(targetType); + } + Iterator> iter = fields.iterator(); + loop: for (int i = 0; i < length; i++) { + Value v1 = values[i]; + TypeInfo componentType = iter.next().getValue(); + Value v2 = v1.convertTo(componentType, provider, conversionMode, column); + if (v1 != v2) { + Value[] newValues = new Value[length]; + System.arraycopy(values, 0, newValues, 0, i); + newValues[i] = v2; + while (++i < length) { + newValues[i] = values[i].convertTo(componentType, provider, conversionMode, column); + } + v = ValueRow.get(targetType, newValues); + break loop; } - } else { - return ValueNull.INSTANCE; } - } else { - a = new Value[] { this }; } - return ValueRow.get(a); + return v; } - private ValueResultSet convertToResultSet() { - SimpleResult result = new SimpleResult(); - result.addColumn("X", "X", getType()); - result.addRow(this); - return ValueResultSet.get(result); + /** + * Creates new instance of the DbException for data conversion error. + * + * @param targetType Target data type. + * @return instance of the DbException. + */ + final DbException getDataConversionError(int targetType) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, getTypeName(getValueType()) + " to " + + getTypeName(targetType)); } /** * Creates new instance of the DbException for data conversion error. * - * @param targetType Target data type. + * @param targetType target data type. * @return instance of the DbException. */ - DbException getDataConversionError(int targetType) { - DataType from = DataType.getDataType(getValueType()); - DataType to = DataType.getDataType(targetType); - throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, (from != null ? from.name : "type=" + getValueType()) - + " to " + (to != null ? to.name : "type=" + targetType)); + final DbException getDataConversionError(TypeInfo targetType) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, getTypeName(getValueType()) + " to " + + targetType.getTraceSQL()); + } + + final DbException getValueTooLongException(TypeInfo targetType, Object column) { + StringBuilder builder = new StringBuilder(); + if (column != null) { + builder.append(column).append(' '); + } + targetType.getSQL(builder, TRACE_SQL_FLAGS); + return DbException.getValueTooLongException(builder.toString(), getTraceSQL(), getType().getPrecision()); } /** @@ -1385,22 +2578,23 @@ DbException getDataConversionError(int targetType) { * * @param v the other value * @param mode the compare mode + * @param provider the cast information provider * @return 0 if both values are equal, -1 if the other value is smaller, and * 1 otherwise */ - public abstract int compareTypeSafe(Value v, CompareMode mode); + public abstract int compareTypeSafe(Value v, CompareMode mode, CastDataProvider provider); /** * Compare this value against another value using the specified compare * mode. * * @param v the other value - * @param databaseMode the database mode + * @param provider the cast information provider * @param compareMode the compare mode * @return 0 if both values are equal, -1 if this value is smaller, and * 1 otherwise */ - public final int compareTo(Value v, Mode databaseMode, CompareMode compareMode) { + public final int compareTo(Value v, CastDataProvider provider, CompareMode compareMode) { if (this == v) { return 0; } @@ -1409,21 +2603,34 @@ public final int compareTo(Value v, Mode databaseMode, CompareMode compareMode) } else if (v == ValueNull.INSTANCE) { return 1; } + return compareToNotNullable(v, provider, compareMode); + } + + private int compareToNotNullable(Value v, CastDataProvider provider, CompareMode compareMode) { Value l = this; int leftType = l.getValueType(); int rightType = v.getValueType(); - if (leftType != rightType || leftType == Value.ENUM) { - int dataType = Value.getHigherOrder(leftType, rightType); - if (dataType == Value.ENUM) { + if (leftType != rightType || leftType == ENUM) { + int dataType = getHigherOrder(leftType, rightType); + if (dataType == ENUM) { ExtTypeInfoEnum enumerators = ExtTypeInfoEnum.getEnumeratorsForBinaryOperation(l, v); - l = l.convertToEnum(enumerators); - v = v.convertToEnum(enumerators); + l = l.convertToEnum(enumerators, provider); + v = v.convertToEnum(enumerators, provider); } else { - l = l.convertTo(dataType, databaseMode); - v = v.convertTo(dataType, databaseMode); + if (dataType <= BLOB) { + if (dataType <= CLOB) { + if (leftType == CHAR || rightType == CHAR) { + dataType = CHAR; + } + } else if (dataType >= BINARY && (leftType == BINARY || rightType == BINARY)) { + dataType = BINARY; + } + } + l = l.convertTo(dataType, provider); + v = v.convertTo(dataType, provider); } } - return l.compareTypeSafe(v, compareMode); + return l.compareTypeSafe(v, compareMode, provider); } /** @@ -1432,31 +2639,18 @@ public final int compareTo(Value v, Mode databaseMode, CompareMode compareMode) * * @param v the other value * @param forEquality perform only check for equality - * @param databaseMode the database mode + * @param provider the cast information provider * @param compareMode the compare mode * @return 0 if both values are equal, -1 if this value is smaller, 1 * if other value is larger, {@link Integer#MIN_VALUE} if order is * not defined due to NULL comparison */ - public int compareWithNull(Value v, boolean forEquality, Mode databaseMode, CompareMode compareMode) { + public int compareWithNull(Value v, boolean forEquality, CastDataProvider provider, + CompareMode compareMode) { if (this == ValueNull.INSTANCE || v == ValueNull.INSTANCE) { return Integer.MIN_VALUE; } - Value l = this; - int leftType = l.getValueType(); - int rightType = v.getValueType(); - if (leftType != rightType || leftType == Value.ENUM) { - int dataType = Value.getHigherOrder(leftType, rightType); - if (dataType == Value.ENUM) { - ExtTypeInfoEnum enumerators = ExtTypeInfoEnum.getEnumeratorsForBinaryOperation(l, v); - l = l.convertToEnum(enumerators); - v = v.convertToEnum(enumerators); - } else { - l = l.convertTo(dataType, databaseMode); - v = v.convertTo(dataType, databaseMode); - } - } - return l.compareTypeSafe(v, compareMode); + return compareToNotNullable(v, provider, compareMode); } /** @@ -1468,32 +2662,6 @@ public boolean containsNull() { return false; } - /** - * Convert the scale. - * - * @param onlyToSmallerScale if the scale should not reduced - * @param targetScale the requested scale - * @return the value - */ - @SuppressWarnings("unused") - public Value convertScale(boolean onlyToSmallerScale, int targetScale) { - return this; - } - - /** - * Convert the precision to the requested value. The precision of the - * returned value may be somewhat larger than requested, because values with - * a fixed precision are not truncated. - * - * @param precision the new precision - * @param force true if losing numeric precision is allowed - * @return the new value - */ - @SuppressWarnings("unused") - public Value convertPrecision(long precision, boolean force) { - return this; - } - private static byte convertToByte(long x, Object column) { if (x > Byte.MAX_VALUE || x < Byte.MIN_VALUE) { throw DbException.get( @@ -1537,7 +2705,7 @@ private static long convertToLong(double x, Object column) { private static long convertToLong(BigDecimal x, Object column) { if (x.compareTo(MAX_LONG_DECIMAL) > 0 || - x.compareTo(Value.MIN_LONG_DECIMAL) < 0) { + x.compareTo(MIN_LONG_DECIMAL) < 0) { throw DbException.get( ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE_2, x.toString(), getColumnName(column)); } @@ -1548,58 +2716,6 @@ private static String getColumnName(Object column) { return column == null ? "" : column.toString(); } - /** - * Copy a large value, to be used in the given table. For values that are - * kept fully in memory this method has no effect. - * - * @param handler the data handler - * @param tableId the table where this object is used - * @return the new value or itself - */ - @SuppressWarnings("unused") - public Value copy(DataHandler handler, int tableId) { - return this; - } - - /** - * Check if this value is linked to a specific table. For values that are - * kept fully in memory, this method returns false. - * - * @return true if it is - */ - public boolean isLinkedToTable() { - return false; - } - - /** - * Remove the underlying resource, if any. For values that are kept fully in - * memory this method has no effect. - */ - public void remove() { - // nothing to do - } - - /** - * Check if the precision is smaller or equal than the given precision. - * - * @param precision the maximum precision - * @return true if the precision of this value is smaller or equal to the - * given precision - */ - public boolean checkPrecision(long precision) { - return getType().getPrecision() <= precision; - } - - /** - * Get a medium size SQL expression for debugging or tracing. If the - * precision is too large, only a subset of the value is returned. - * - * @return the SQL expression - */ - public String getTraceSQL() { - return getSQL(new StringBuilder()).toString(); - } - @Override public String toString() { return getTraceSQL(); @@ -1613,67 +2729,53 @@ public String toString() { * @return the exception */ protected final DbException getUnsupportedExceptionForOperation(String op) { - return DbException.getUnsupportedException( - DataType.getDataType(getValueType()).name + " " + op); - } - - /** - * Get the table (only for LOB object). - * - * @return the table id - */ - public int getTableId() { - return 0; + return DbException.getUnsupportedException(getTypeName(getValueType()) + ' ' + op); } /** - * Get the byte array. + * Returns length of this value in characters. * - * @return the byte array + * @return length of this value in characters + * @throws NullPointerException if this value is {@code NULL} */ - public byte[] getSmall() { - return null; + public long charLength() { + return getString().length(); } /** - * Copy this value to a temporary file if necessary. + * Returns length of this value in bytes. * - * @return the new value + * @return length of this value in bytes + * @throws NullPointerException if this value is {@code NULL} */ - public Value copyToTemp() { - return this; + public long octetLength() { + return getBytesNoCopy().length; } /** - * Create an independent copy of this value if needed, that will be bound to - * a result. If the original row is removed, this copy is still readable. + * Returns whether this value {@code IS TRUE}. * - * @return the value (this for small objects) + * @return {@code true} if it is. For {@code BOOLEAN} values returns + * {@code true} for {@code TRUE} and {@code false} for {@code FALSE} + * and {@code UNKNOWN} ({@code NULL}). + * @see #getBoolean() + * @see #isFalse() */ - public Value copyToResult() { - return this; + public final boolean isTrue() { + return this != ValueNull.INSTANCE ? getBoolean() : false; } /** - * Returns result for result set value, or single-row result with this value - * in column X for other values. + * Returns whether this value {@code IS FALSE}. * - * @return result - */ - public ResultInterface getResult() { - SimpleResult rs = new SimpleResult(); - rs.addColumn("X", "X", getType()); - rs.addRow(this); - return rs; - } - - /** - * Return the data handler for the values that support it - * (actually only Java objects). - * @return the data handler - */ - protected DataHandler getDataHandler() { - return null; + * @return {@code true} if it is. For {@code BOOLEAN} values returns + * {@code true} for {@code FALSE} and {@code false} for {@code TRUE} + * and {@code UNKNOWN} ({@code NULL}). + * @see #getBoolean() + * @see #isTrue() + */ + public final boolean isFalse() { + return this != ValueNull.INSTANCE && !getBoolean(); } } diff --git a/h2/src/main/org/h2/value/ValueArray.java b/h2/src/main/org/h2/value/ValueArray.java index aefa29742b..d6ec12b0bb 100644 --- a/h2/src/main/org/h2/value/ValueArray.java +++ b/h2/src/main/org/h2/value/ValueArray.java @@ -1,31 +1,38 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; -import java.lang.reflect.Array; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.util.Arrays; - -import org.h2.engine.SysProperties; +import org.h2.engine.CastDataProvider; +import org.h2.engine.Constants; +import org.h2.message.DbException; /** * Implementation of the ARRAY data type. */ -public class ValueArray extends ValueCollectionBase { +public final class ValueArray extends ValueCollectionBase { /** * Empty array. */ - private static final Object EMPTY = get(new Value[0]); + public static final ValueArray EMPTY = get(TypeInfo.TYPE_NULL, Value.EMPTY_VALUES, null); + + private TypeInfo type; - private final Class componentType; + private TypeInfo componentType; - private ValueArray(Class componentType, Value[] list) { + private ValueArray(TypeInfo componentType, Value[] list, CastDataProvider provider) { super(list); + int length = list.length; + if (length > Constants.MAX_ARRAY_CARDINALITY) { + String typeName = getTypeName(getValueType()); + throw DbException.getValueTooLongException(typeName, typeName, length); + } + for (int i = 0; i < length; i++) { + list[i] = list[i].castTo(componentType, provider); + } this.componentType = componentType; } @@ -34,31 +41,34 @@ private ValueArray(Class componentType, Value[] list) { * Do not clone the data. * * @param list the value array + * @param provider the cast information provider * @return the value */ - public static ValueArray get(Value[] list) { - return new ValueArray(Object.class, list); + public static ValueArray get(Value[] list, CastDataProvider provider) { + return new ValueArray(TypeInfo.getHigherType(list), list, provider); } /** * Get or create a array value for the given value array. * Do not clone the data. * - * @param componentType the array class (null for Object[]) + * @param componentType the type of elements, or {@code null} * @param list the value array + * @param provider the cast information provider * @return the value */ - public static ValueArray get(Class componentType, Value[] list) { - return new ValueArray(componentType, list); + public static ValueArray get(TypeInfo componentType, Value[] list, CastDataProvider provider) { + return new ValueArray(componentType, list, provider); } - /** - * Returns empty array. - * - * @return empty array - */ - public static ValueArray getEmpty() { - return (ValueArray) EMPTY; + @Override + public TypeInfo getType() { + TypeInfo type = this.type; + if (type == null) { + TypeInfo componentType = getComponentType(); + this.type = type = TypeInfo.getTypeInfo(getValueType(), values.length, 0, componentType); + } + return type; } @Override @@ -66,7 +76,7 @@ public int getValueType() { return ARRAY; } - public Class getComponentType() { + public TypeInfo getComponentType() { return componentType; } @@ -83,7 +93,7 @@ public String getString() { } @Override - public int compareTypeSafe(Value o, CompareMode mode) { + public int compareTypeSafe(Value o, CompareMode mode, CastDataProvider provider) { ValueArray v = (ValueArray) o; if (values == v.values) { return 0; @@ -94,7 +104,7 @@ public int compareTypeSafe(Value o, CompareMode mode) { for (int i = 0; i < len; i++) { Value v1 = values[i]; Value v2 = v.values[i]; - int comp = v1.compareTo(v2, /* TODO */ null, mode); + int comp = v1.compareTo(v2, provider, mode); if (comp != 0) { return comp; } @@ -103,54 +113,18 @@ public int compareTypeSafe(Value o, CompareMode mode) { } @Override - public Object getObject() { - int len = values.length; - Object[] list = (Object[]) Array.newInstance(componentType, len); - for (int i = 0; i < len; i++) { - final Value value = values[i]; - if (!SysProperties.OLD_RESULT_SET_GET_OBJECT) { - final int type = value.getValueType(); - if (type == Value.BYTE || type == Value.SHORT) { - list[i] = value.getInt(); - continue; - } - } - list[i] = value.getObject(); - } - return list; - } - - @Override - public void set(PreparedStatement prep, int parameterIndex) throws SQLException { - prep.setArray(parameterIndex, prep.getConnection().createArrayOf("NULL", (Object[]) getObject())); - } - - @Override - public StringBuilder getSQL(StringBuilder builder) { + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { builder.append("ARRAY ["); int length = values.length; for (int i = 0; i < length; i++) { if (i > 0) { builder.append(", "); } - values[i].getSQL(builder); + values[i].getSQL(builder, sqlFlags); } return builder.append(']'); } - @Override - public String getTraceSQL() { - StringBuilder builder = new StringBuilder("["); - for (int i = 0; i < values.length; i++) { - if (i > 0) { - builder.append(", "); - } - Value v = values[i]; - builder.append(v == null ? "null" : v.getTraceSQL()); - } - return builder.append(']').toString(); - } - @Override public boolean equals(Object other) { if (!(other instanceof ValueArray)) { @@ -172,33 +146,4 @@ public boolean equals(Object other) { return true; } - @Override - public Value convertPrecision(long precision, boolean force) { - if (!force) { - return this; - } - int length = values.length; - Value[] newValues = new Value[length]; - int i = 0; - boolean modified = false; - for (; i < length; i++) { - Value old = values[i]; - Value v = old.convertPrecision(precision, true); - if (v != old) { - modified = true; - } - // empty byte arrays or strings have precision 0 - // they count as precision 1 here - precision -= Math.max(1, v.getType().getPrecision()); - if (precision < 0) { - break; - } - newValues[i] = v; - } - if (i < length) { - return get(componentType, Arrays.copyOf(newValues, i)); - } - return modified ? get(componentType, newValues) : this; - } - } diff --git a/h2/src/main/org/h2/value/ValueBigDecimalBase.java b/h2/src/main/org/h2/value/ValueBigDecimalBase.java new file mode 100644 index 0000000000..5c027b0ad1 --- /dev/null +++ b/h2/src/main/org/h2/value/ValueBigDecimalBase.java @@ -0,0 +1,37 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +import java.math.BigDecimal; + +import org.h2.api.ErrorCode; +import org.h2.engine.Constants; +import org.h2.message.DbException; + +/** + * Base class for BigDecimal-based values. + */ +abstract class ValueBigDecimalBase extends Value { + + final BigDecimal value; + + TypeInfo type; + + ValueBigDecimalBase(BigDecimal value) { + if (value != null) { + if (value.getClass() != BigDecimal.class) { + throw DbException.get(ErrorCode.INVALID_CLASS_2, BigDecimal.class.getName(), + value.getClass().getName()); + } + int length = value.precision(); + if (length > Constants.MAX_NUMERIC_PRECISION) { + throw DbException.getValueTooLongException(getTypeName(getValueType()), value.toString(), length); + } + } + this.value = value; + } + +} diff --git a/h2/src/main/org/h2/value/ValueLong.java b/h2/src/main/org/h2/value/ValueBigint.java similarity index 58% rename from h2/src/main/org/h2/value/ValueLong.java rename to h2/src/main/org/h2/value/ValueBigint.java index 262953a0f4..871aed45d8 100644 --- a/h2/src/main/org/h2/value/ValueLong.java +++ b/h2/src/main/org/h2/value/ValueBigint.java @@ -1,31 +1,32 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; +import java.math.BigDecimal; import java.math.BigInteger; -import java.sql.PreparedStatement; -import java.sql.SQLException; import org.h2.api.ErrorCode; +import org.h2.engine.CastDataProvider; import org.h2.message.DbException; +import org.h2.util.Bits; /** * Implementation of the BIGINT data type. */ -public class ValueLong extends Value { +public final class ValueBigint extends Value { /** * The smallest {@code ValueLong} value. */ - public static final ValueLong MIN = get(Long.MIN_VALUE); + public static final ValueBigint MIN = get(Long.MIN_VALUE); /** * The largest {@code ValueLong} value. */ - public static final ValueLong MAX = get(Long.MAX_VALUE); + public static final ValueBigint MAX = get(Long.MAX_VALUE); /** * The largest Long value, as a BigInteger. @@ -33,36 +34,41 @@ public class ValueLong extends Value { public static final BigInteger MAX_BI = BigInteger.valueOf(Long.MAX_VALUE); /** - * The precision in digits. + * The precision in bits. */ - public static final int PRECISION = 19; + static final int PRECISION = 64; /** - * The maximum display size of a long. - * Example: 9223372036854775808 + * The approximate precision in decimal digits. + */ + public static final int DECIMAL_PRECISION = 19; + + /** + * The maximum display size of a BIGINT. + * Example: -9223372036854775808 */ public static final int DISPLAY_SIZE = 20; private static final int STATIC_SIZE = 100; - private static final ValueLong[] STATIC_CACHE; + private static final ValueBigint[] STATIC_CACHE; private final long value; static { - STATIC_CACHE = new ValueLong[STATIC_SIZE]; + STATIC_CACHE = new ValueBigint[STATIC_SIZE]; for (int i = 0; i < STATIC_SIZE; i++) { - STATIC_CACHE[i] = new ValueLong(i); + STATIC_CACHE[i] = new ValueBigint(i); } } - private ValueLong(long value) { + private ValueBigint(long value) { this.value = value; } @Override public Value add(Value v) { long x = value; - long y = ((ValueLong) v).value; + long y = ((ValueBigint) v).value; long result = x + y; /* * If signs of both summands are different from the sign of the sum there is an @@ -71,7 +77,7 @@ public Value add(Value v) { if (((x ^ result) & (y ^ result)) < 0) { throw getOverflow(); } - return ValueLong.get(result); + return ValueBigint.get(result); } @Override @@ -84,7 +90,7 @@ public Value negate() { if (value == Long.MIN_VALUE) { throw getOverflow(); } - return ValueLong.get(-value); + return ValueBigint.get(-value); } private DbException getOverflow() { @@ -95,7 +101,7 @@ private DbException getOverflow() { @Override public Value subtract(Value v) { long x = value; - long y = ((ValueLong) v).value; + long y = ((ValueBigint) v).value; long result = x - y; /* * If minuend and subtrahend have different signs and minuend and difference @@ -104,13 +110,13 @@ public Value subtract(Value v) { if (((x ^ y) & (x ^ result)) < 0) { throw getOverflow(); } - return ValueLong.get(result); + return ValueBigint.get(result); } @Override public Value multiply(Value v) { long x = value; - long y = ((ValueLong) v).value; + long y = ((ValueBigint) v).value; long result = x * y; // Check whether numbers are large enough to overflow and second value != 0 if ((Math.abs(x) | Math.abs(y)) >>> 31 != 0 && y != 0 @@ -120,44 +126,54 @@ public Value multiply(Value v) { || x == Long.MIN_VALUE && y == -1)) { throw getOverflow(); } - return ValueLong.get(result); + return ValueBigint.get(result); } @Override - public Value divide(Value v) { - long y = ((ValueLong) v).value; + public Value divide(Value v, TypeInfo quotientType) { + long y = ((ValueBigint) v).value; if (y == 0) { - throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getSQL()); + throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getTraceSQL()); } long x = value; if (x == Long.MIN_VALUE && y == -1) { throw getOverflow(); } - return ValueLong.get(x / y); + return ValueBigint.get(x / y); } @Override public Value modulus(Value v) { - ValueLong other = (ValueLong) v; + ValueBigint other = (ValueBigint) v; if (other.value == 0) { - throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getSQL()); + throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getTraceSQL()); } - return ValueLong.get(this.value % other.value); + return ValueBigint.get(this.value % other.value); } @Override - public StringBuilder getSQL(StringBuilder builder) { + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + if ((sqlFlags & NO_CASTS) == 0 && value == (int) value) { + return builder.append("CAST(").append(value).append(" AS BIGINT)"); + } return builder.append(value); } @Override public TypeInfo getType() { - return TypeInfo.TYPE_LONG; + return TypeInfo.TYPE_BIGINT; } @Override public int getValueType() { - return LONG; + return BIGINT; + } + + @Override + public byte[] getBytes() { + byte[] b = new byte[8]; + Bits.writeLong(b, 0, getLong()); + return b; } @Override @@ -166,47 +182,51 @@ public long getLong() { } @Override - public int compareTypeSafe(Value o, CompareMode mode) { - return Long.compare(value, ((ValueLong) o).value); + public BigDecimal getBigDecimal() { + return BigDecimal.valueOf(value); } @Override - public String getString() { - return Long.toString(value); + public float getFloat() { + return value; } @Override - public int hashCode() { - return (int) (value ^ (value >> 32)); + public double getDouble() { + return value; } @Override - public Object getObject() { - return value; + public int compareTypeSafe(Value o, CompareMode mode, CastDataProvider provider) { + return Long.compare(value, ((ValueBigint) o).value); + } + + @Override + public String getString() { + return Long.toString(value); } @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - prep.setLong(parameterIndex, value); + public int hashCode() { + return (int) (value ^ (value >> 32)); } /** - * Get or create a long value for the given long. + * Get or create a BIGINT value for the given long. * * @param i the long * @return the value */ - public static ValueLong get(long i) { + public static ValueBigint get(long i) { if (i >= 0 && i < STATIC_SIZE) { return STATIC_CACHE[(int) i]; } - return (ValueLong) Value.cache(new ValueLong(i)); + return (ValueBigint) Value.cache(new ValueBigint(i)); } @Override public boolean equals(Object other) { - return other instanceof ValueLong && value == ((ValueLong) other).value; + return other instanceof ValueBigint && value == ((ValueBigint) other).value; } } diff --git a/h2/src/main/org/h2/value/ValueBinary.java b/h2/src/main/org/h2/value/ValueBinary.java new file mode 100644 index 0000000000..ef160e4665 --- /dev/null +++ b/h2/src/main/org/h2/value/ValueBinary.java @@ -0,0 +1,90 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +import java.nio.charset.StandardCharsets; +import org.h2.engine.Constants; +import org.h2.engine.SysProperties; +import org.h2.message.DbException; +import org.h2.util.StringUtils; +import org.h2.util.Utils; + +/** + * Implementation of the BINARY data type. + */ +public final class ValueBinary extends ValueBytesBase { + + /** + * Associated TypeInfo. + */ + private TypeInfo type; + + protected ValueBinary(byte[] value) { + super(value); + int length = value.length; + if (length > Constants.MAX_STRING_LENGTH) { + throw DbException.getValueTooLongException(getTypeName(getValueType()), + StringUtils.convertBytesToHex(value, 41), length); + } + } + + /** + * Get or create a VARBINARY value for the given byte array. + * Clone the data. + * + * @param b the byte array + * @return the value + */ + public static ValueBinary get(byte[] b) { + return getNoCopy(Utils.cloneByteArray(b)); + } + + /** + * Get or create a VARBINARY value for the given byte array. + * Do not clone the date. + * + * @param b the byte array + * @return the value + */ + public static ValueBinary getNoCopy(byte[] b) { + ValueBinary obj = new ValueBinary(b); + if (b.length > SysProperties.OBJECT_CACHE_MAX_PER_ELEMENT_SIZE) { + return obj; + } + return (ValueBinary) Value.cache(obj); + } + + @Override + public TypeInfo getType() { + TypeInfo type = this.type; + if (type == null) { + long precision = value.length; + this.type = type = new TypeInfo(BINARY, precision, 0, null); + } + return type; + } + + @Override + public int getValueType() { + return BINARY; + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + if ((sqlFlags & NO_CASTS) == 0) { + int length = value.length; + return super.getSQL(builder.append("CAST("), sqlFlags).append(" AS BINARY(") + .append(length > 0 ? length : 1).append("))"); + } + return super.getSQL(builder, sqlFlags); + } + + @Override + public String getString() { + return new String(value, StandardCharsets.UTF_8); + } + +} diff --git a/h2/src/main/org/h2/value/ValueBlob.java b/h2/src/main/org/h2/value/ValueBlob.java new file mode 100644 index 0000000000..86879f5ee3 --- /dev/null +++ b/h2/src/main/org/h2/value/ValueBlob.java @@ -0,0 +1,329 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; +import java.nio.charset.StandardCharsets; + +import org.h2.engine.CastDataProvider; +import org.h2.engine.Constants; +import org.h2.engine.SysProperties; +import org.h2.message.DbException; +import org.h2.store.DataHandler; +import org.h2.store.FileStore; +import org.h2.store.FileStoreOutputStream; +import org.h2.store.LobStorageInterface; +import org.h2.util.Bits; +import org.h2.util.IOUtils; +import org.h2.util.MathUtils; +import org.h2.util.StringUtils; +import org.h2.util.Utils; +import org.h2.value.lob.LobData; +import org.h2.value.lob.LobDataDatabase; +import org.h2.value.lob.LobDataFetchOnDemand; +import org.h2.value.lob.LobDataFile; +import org.h2.value.lob.LobDataInMemory; + +/** + * Implementation of the BINARY LARGE OBJECT data type. + */ +public final class ValueBlob extends ValueLob { + + /** + * Creates a small BLOB value that can be stored in the row directly. + * + * @param data + * the data + * @return the BLOB + */ + public static ValueBlob createSmall(byte[] data) { + return new ValueBlob(new LobDataInMemory(data), data.length); + } + + /** + * Create a temporary BLOB value from a stream. + * + * @param in + * the input stream + * @param length + * the number of characters to read, or -1 for no limit + * @param handler + * the data handler + * @return the lob value + */ + public static ValueBlob createTempBlob(InputStream in, long length, DataHandler handler) { + try { + long remaining = Long.MAX_VALUE; + if (length >= 0 && length < remaining) { + remaining = length; + } + int len = ValueLob.getBufferSize(handler, remaining); + byte[] buff; + if (len >= Integer.MAX_VALUE) { + buff = IOUtils.readBytesAndClose(in, -1); + len = buff.length; + } else { + buff = Utils.newBytes(len); + len = IOUtils.readFully(in, buff, len); + } + if (len <= handler.getMaxLengthInplaceLob()) { + return ValueBlob.createSmall(Utils.copyBytes(buff, len)); + } + return createTemporary(handler, buff, len, in, remaining); + } catch (IOException e) { + throw DbException.convertIOException(e, null); + } + } + + /** + * Create a BLOB in a temporary file. + */ + private static ValueBlob createTemporary(DataHandler handler, byte[] buff, int len, InputStream in, long remaining) + throws IOException { + String fileName = ValueLob.createTempLobFileName(handler); + FileStore tempFile = handler.openFile(fileName, "rw", false); + tempFile.autoDelete(); + long tmpPrecision = 0; + try (FileStoreOutputStream out = new FileStoreOutputStream(tempFile, null)) { + while (true) { + tmpPrecision += len; + out.write(buff, 0, len); + remaining -= len; + if (remaining <= 0) { + break; + } + len = ValueLob.getBufferSize(handler, remaining); + len = IOUtils.readFully(in, buff, len); + if (len <= 0) { + break; + } + } + } + return new ValueBlob(new LobDataFile(handler, fileName, tempFile), tmpPrecision); + } + + public ValueBlob(LobData lobData, long octetLength) { + super(lobData, octetLength, -1L); + } + + @Override + public int getValueType() { + return BLOB; + } + + @Override + public String getString() { + long p = charLength; + if (p >= 0L) { + if (p > Constants.MAX_STRING_LENGTH) { + throw getStringTooLong(p); + } + return readString((int) p); + } + // 1 Java character may be encoded with up to 3 bytes + if (octetLength > Constants.MAX_STRING_LENGTH * 3) { + throw getStringTooLong(charLength()); + } + String s; + if (lobData instanceof LobDataInMemory) { + s = new String(((LobDataInMemory) lobData).getSmall(), StandardCharsets.UTF_8); + } else { + s = readString(Integer.MAX_VALUE); + } + charLength = p = s.length(); + if (p > Constants.MAX_STRING_LENGTH) { + throw getStringTooLong(p); + } + return s; + } + + @Override + byte[] getBytesInternal() { + if (octetLength > Constants.MAX_STRING_LENGTH) { + throw getBinaryTooLong(octetLength); + } + return readBytes((int) octetLength); + } + + @Override + public InputStream getInputStream() { + return lobData.getInputStream(octetLength); + } + + @Override + public InputStream getInputStream(long oneBasedOffset, long length) { + long p = octetLength; + return rangeInputStream(lobData.getInputStream(p), oneBasedOffset, length, p); + } + + @Override + public Reader getReader(long oneBasedOffset, long length) { + return rangeReader(getReader(), oneBasedOffset, length, -1L); + } + + @Override + public int compareTypeSafe(Value v, CompareMode mode, CastDataProvider provider) { + if (v == this) { + return 0; + } + ValueBlob v2 = (ValueBlob) v; + LobData lobData = this.lobData, lobData2 = v2.lobData; + if (lobData.getClass() == lobData2.getClass()) { + if (lobData instanceof LobDataInMemory) { + return Bits.compareNotNullUnsigned(((LobDataInMemory) lobData).getSmall(), + ((LobDataInMemory) lobData2).getSmall()); + } else if (lobData instanceof LobDataDatabase) { + if (((LobDataDatabase) lobData).getLobId() == ((LobDataDatabase) lobData2).getLobId()) { + return 0; + } + } else if (lobData instanceof LobDataFetchOnDemand) { + if (((LobDataFetchOnDemand) lobData).getLobId() == ((LobDataFetchOnDemand) lobData2).getLobId()) { + return 0; + } + } + } + return compare(this, v2); + } + + /** + * Compares two BLOB values directly. + * + * @param v1 + * first BLOB value + * @param v2 + * second BLOB value + * @return result of comparison + */ + private static int compare(ValueBlob v1, ValueBlob v2) { + long minPrec = Math.min(v1.octetLength, v2.octetLength); + try (InputStream is1 = v1.getInputStream(); InputStream is2 = v2.getInputStream()) { + byte[] buf1 = new byte[BLOCK_COMPARISON_SIZE]; + byte[] buf2 = new byte[BLOCK_COMPARISON_SIZE]; + for (; minPrec >= BLOCK_COMPARISON_SIZE; minPrec -= BLOCK_COMPARISON_SIZE) { + if (IOUtils.readFully(is1, buf1, BLOCK_COMPARISON_SIZE) != BLOCK_COMPARISON_SIZE + || IOUtils.readFully(is2, buf2, BLOCK_COMPARISON_SIZE) != BLOCK_COMPARISON_SIZE) { + throw DbException.getUnsupportedException("Invalid LOB"); + } + int cmp = Bits.compareNotNullUnsigned(buf1, buf2); + if (cmp != 0) { + return cmp; + } + } + for (;;) { + int c1 = is1.read(), c2 = is2.read(); + if (c1 < 0) { + return c2 < 0 ? 0 : -1; + } + if (c2 < 0) { + return 1; + } + if (c1 != c2) { + return (c1 & 0xFF) < (c2 & 0xFF) ? -1 : 1; + } + } + } catch (IOException ex) { + throw DbException.convert(ex); + } + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + if ((sqlFlags & REPLACE_LOBS_FOR_TRACE) != 0 + && (!(lobData instanceof LobDataInMemory) || octetLength > SysProperties.MAX_TRACE_DATA_LENGTH)) { + builder.append("CAST(REPEAT(CHAR(0), ").append(octetLength).append(") AS BINARY VARYING"); + LobDataDatabase lobDb = (LobDataDatabase) lobData; + builder.append(" /* table: ").append(lobDb.getTableId()).append(" id: ").append(lobDb.getLobId()) + .append(" */)"); + } else { + if ((sqlFlags & (REPLACE_LOBS_FOR_TRACE | NO_CASTS)) == 0) { + builder.append("CAST(X'"); + StringUtils.convertBytesToHex(builder, getBytesNoCopy()).append("' AS BINARY LARGE OBJECT(") + .append(octetLength).append("))"); + } else { + builder.append("X'"); + StringUtils.convertBytesToHex(builder, getBytesNoCopy()).append('\''); + } + } + return builder; + } + + /** + * Convert the precision to the requested value. + * + * @param precision + * the new precision + * @return the truncated or this value + */ + ValueBlob convertPrecision(long precision) { + if (this.octetLength <= precision) { + return this; + } + ValueBlob lob; + DataHandler handler = lobData.getDataHandler(); + if (handler != null) { + lob = createTempBlob(getInputStream(), precision, handler); + } else { + try { + lob = createSmall(IOUtils.readBytesAndClose(getInputStream(), MathUtils.convertLongToInt(precision))); + } catch (IOException e) { + throw DbException.convertIOException(e, null); + } + } + return lob; + } + + @Override + public ValueLob copy(DataHandler database, int tableId) { + if (lobData instanceof LobDataInMemory) { + byte[] small = ((LobDataInMemory) lobData).getSmall(); + if (small.length > database.getMaxLengthInplaceLob()) { + LobStorageInterface s = database.getLobStorage(); + ValueBlob v = s.createBlob(getInputStream(), octetLength); + ValueLob v2 = v.copy(database, tableId); + v.remove(); + return v2; + } + return this; + } else if (lobData instanceof LobDataDatabase) { + return database.getLobStorage().copyLob(this, tableId); + } else { + throw new UnsupportedOperationException(); + } + } + + @Override + public long charLength() { + long p = charLength; + if (p < 0L) { + if (lobData instanceof LobDataInMemory) { + p = new String(((LobDataInMemory) lobData).getSmall(), StandardCharsets.UTF_8).length(); + } else { + try (Reader r = getReader()) { + p = 0L; + for (;;) { + p += r.skip(Long.MAX_VALUE); + if (r.read() < 0) { + break; + } + p++; + } + } catch (IOException e) { + throw DbException.convertIOException(e, null); + } + } + charLength = p; + } + return p; + } + + @Override + public long octetLength() { + return octetLength; + } + +} diff --git a/h2/src/main/org/h2/value/ValueBoolean.java b/h2/src/main/org/h2/value/ValueBoolean.java index ac44c3bac7..daac8730a0 100644 --- a/h2/src/main/org/h2/value/ValueBoolean.java +++ b/h2/src/main/org/h2/value/ValueBoolean.java @@ -1,17 +1,18 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; -import java.sql.PreparedStatement; -import java.sql.SQLException; +import java.math.BigDecimal; + +import org.h2.engine.CastDataProvider; /** * Implementation of the BOOLEAN data type. */ -public class ValueBoolean extends Value { +public final class ValueBoolean extends Value { /** * The precision in digits. @@ -57,7 +58,7 @@ public int getMemory() { } @Override - public StringBuilder getSQL(StringBuilder builder) { + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { return builder.append(getString()); } @@ -67,34 +68,58 @@ public String getString() { } @Override - public Value negate() { - return value ? FALSE : TRUE; + public boolean getBoolean() { + return value; } @Override - public boolean getBoolean() { - return value; + public byte getByte() { + return value ? (byte) 1 : (byte) 0; } @Override - public int compareTypeSafe(Value o, CompareMode mode) { - return Boolean.compare(value, ((ValueBoolean) o).value); + public short getShort() { + return value ? (short) 1 : (short) 0; } @Override - public int hashCode() { + public int getInt() { return value ? 1 : 0; } @Override - public Object getObject() { - return value; + public long getLong() { + return value ? 1L : 0L; + } + + @Override + public BigDecimal getBigDecimal() { + return value ? BigDecimal.ONE : BigDecimal.ZERO; + } + + @Override + public float getFloat() { + return value ? 1f : 0f; + } + + @Override + public double getDouble() { + return value ? 1d : 0d; } @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - prep.setBoolean(parameterIndex, value); + public Value negate() { + return value ? FALSE : TRUE; + } + + @Override + public int compareTypeSafe(Value o, CompareMode mode, CastDataProvider provider) { + return Boolean.compare(value, ((ValueBoolean) o).value); + } + + @Override + public int hashCode() { + return value ? 1 : 0; } /** diff --git a/h2/src/main/org/h2/value/ValueByte.java b/h2/src/main/org/h2/value/ValueByte.java deleted file mode 100644 index 3bd1fb2e0a..0000000000 --- a/h2/src/main/org/h2/value/ValueByte.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.value; - -import java.sql.PreparedStatement; -import java.sql.SQLException; - -import org.h2.api.ErrorCode; -import org.h2.message.DbException; - -/** - * Implementation of the BYTE data type. - */ -public class ValueByte extends Value { - - /** - * The precision in digits. - */ - static final int PRECISION = 3; - - /** - * The display size for a byte. - * Example: -127 - */ - static final int DISPLAY_SIZE = 4; - - private final byte value; - - private ValueByte(byte value) { - this.value = value; - } - - @Override - public Value add(Value v) { - ValueByte other = (ValueByte) v; - return checkRange(value + other.value); - } - - private static ValueByte checkRange(int x) { - if ((byte) x != x) { - throw DbException.get(ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE_1, - Integer.toString(x)); - } - return ValueByte.get((byte) x); - } - - @Override - public int getSignum() { - return Integer.signum(value); - } - - @Override - public Value negate() { - return checkRange(-(int) value); - } - - @Override - public Value subtract(Value v) { - ValueByte other = (ValueByte) v; - return checkRange(value - other.value); - } - - @Override - public Value multiply(Value v) { - ValueByte other = (ValueByte) v; - return checkRange(value * other.value); - } - - @Override - public Value divide(Value v) { - ValueByte other = (ValueByte) v; - if (other.value == 0) { - throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getSQL()); - } - return checkRange(value / other.value); - } - - @Override - public Value modulus(Value v) { - ValueByte other = (ValueByte) v; - if (other.value == 0) { - throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getSQL()); - } - return ValueByte.get((byte) (value % other.value)); - } - - @Override - public StringBuilder getSQL(StringBuilder builder) { - return builder.append(value); - } - - @Override - public TypeInfo getType() { - return TypeInfo.TYPE_BYTE; - } - - @Override - public int getValueType() { - return BYTE; - } - - @Override - public byte getByte() { - return value; - } - - @Override - public int getInt() { - return value; - } - - @Override - public int compareTypeSafe(Value o, CompareMode mode) { - return Integer.compare(value, ((ValueByte) o).value); - } - - @Override - public String getString() { - return Integer.toString(value); - } - - @Override - public int hashCode() { - return value; - } - - @Override - public Object getObject() { - return value; - } - - @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - prep.setByte(parameterIndex, value); - } - - /** - * Get or create byte value for the given byte. - * - * @param i the byte - * @return the value - */ - public static ValueByte get(byte i) { - return (ValueByte) Value.cache(new ValueByte(i)); - } - - @Override - public boolean equals(Object other) { - return other instanceof ValueByte && value == ((ValueByte) other).value; - } - -} diff --git a/h2/src/main/org/h2/value/ValueBytes.java b/h2/src/main/org/h2/value/ValueBytes.java deleted file mode 100644 index 5f7d3b5aed..0000000000 --- a/h2/src/main/org/h2/value/ValueBytes.java +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.value; - -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.util.Arrays; - -import org.h2.engine.SysProperties; -import org.h2.util.Bits; -import org.h2.util.MathUtils; -import org.h2.util.StringUtils; -import org.h2.util.Utils; - -/** - * Implementation of the BINARY data type. - * It is also the base class for ValueJavaObject. - */ -public class ValueBytes extends Value { - - /** - * Empty value. - */ - public static final ValueBytes EMPTY = new ValueBytes(Utils.EMPTY_BYTES); - - /** - * The value. - */ - protected byte[] value; - - /** - * Associated TypeInfo. - */ - protected TypeInfo type; - - /** - * The hash code. - */ - protected int hash; - - protected ValueBytes(byte[] v) { - this.value = v; - } - - /** - * Get or create a bytes value for the given byte array. - * Clone the data. - * - * @param b the byte array - * @return the value - */ - public static ValueBytes get(byte[] b) { - if (b.length == 0) { - return EMPTY; - } - b = Utils.cloneByteArray(b); - return getNoCopy(b); - } - - /** - * Get or create a bytes value for the given byte array. - * Do not clone the date. - * - * @param b the byte array - * @return the value - */ - public static ValueBytes getNoCopy(byte[] b) { - if (b.length == 0) { - return EMPTY; - } - ValueBytes obj = new ValueBytes(b); - if (b.length > SysProperties.OBJECT_CACHE_MAX_PER_ELEMENT_SIZE) { - return obj; - } - return (ValueBytes) Value.cache(obj); - } - - @Override - public TypeInfo getType() { - TypeInfo type = this.type; - if (type == null) { - long precision = value.length; - this.type = type = new TypeInfo(BYTES, precision, 0, MathUtils.convertLongToInt(precision * 2), null); - } - return type; - } - - @Override - public int getValueType() { - return BYTES; - } - - @Override - public StringBuilder getSQL(StringBuilder builder) { - builder.append("X'"); - return StringUtils.convertBytesToHex(builder, getBytesNoCopy()).append('\''); - } - - @Override - public byte[] getBytesNoCopy() { - return value; - } - - @Override - public byte[] getBytes() { - return Utils.cloneByteArray(getBytesNoCopy()); - } - - @Override - public int compareTypeSafe(Value v, CompareMode mode) { - byte[] v2 = ((ValueBytes) v).value; - if (mode.isBinaryUnsigned()) { - return Bits.compareNotNullUnsigned(value, v2); - } - return Bits.compareNotNullSigned(value, v2); - } - - @Override - public String getString() { - return StringUtils.convertBytesToHex(value); - } - - @Override - public int hashCode() { - if (hash == 0) { - hash = Utils.getByteArrayHash(value); - } - return hash; - } - - @Override - public Object getObject() { - return getBytes(); - } - - @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - prep.setBytes(parameterIndex, value); - } - - @Override - public int getMemory() { - return value.length + 24; - } - - @Override - public boolean equals(Object other) { - return other instanceof ValueBytes - && Arrays.equals(value, ((ValueBytes) other).value); - } - - @Override - public Value convertPrecision(long precision, boolean force) { - if (value.length <= precision) { - return this; - } - int len = MathUtils.convertLongToInt(precision); - return getNoCopy(Arrays.copyOf(value, len)); - } - -} diff --git a/h2/src/main/org/h2/value/ValueBytesBase.java b/h2/src/main/org/h2/value/ValueBytesBase.java new file mode 100644 index 0000000000..aac8da502b --- /dev/null +++ b/h2/src/main/org/h2/value/ValueBytesBase.java @@ -0,0 +1,77 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +import java.util.Arrays; + +import org.h2.engine.CastDataProvider; +import org.h2.util.Bits; +import org.h2.util.StringUtils; +import org.h2.util.Utils; + +/** + * Base implementation of byte array based data types. + */ +abstract class ValueBytesBase extends Value { + + /** + * The value. + */ + byte[] value; + + /** + * The hash code. + */ + int hash; + + ValueBytesBase(byte[] value) { + this.value = value; + } + + @Override + public final byte[] getBytes() { + return Utils.cloneByteArray(value); + } + + @Override + public final byte[] getBytesNoCopy() { + return value; + } + + @Override + public final int compareTypeSafe(Value v, CompareMode mode, CastDataProvider provider) { + return Bits.compareNotNullUnsigned(value, ((ValueBytesBase) v).value); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return StringUtils.convertBytesToHex(builder.append("X'"), value).append('\''); + } + + @Override + public final int hashCode() { + int h = hash; + if (h == 0) { + h = getClass().hashCode() ^ Utils.getByteArrayHash(value); + if (h == 0) { + h = 1_234_570_417; + } + hash = h; + } + return h; + } + + @Override + public int getMemory() { + return value.length + 24; + } + + @Override + public final boolean equals(Object other) { + return other != null && getClass() == other.getClass() && Arrays.equals(value, ((ValueBytesBase) other).value); + } + +} diff --git a/h2/src/main/org/h2/value/ValueChar.java b/h2/src/main/org/h2/value/ValueChar.java new file mode 100644 index 0000000000..be8aa22646 --- /dev/null +++ b/h2/src/main/org/h2/value/ValueChar.java @@ -0,0 +1,55 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +import org.h2.engine.CastDataProvider; +import org.h2.engine.SysProperties; +import org.h2.util.StringUtils; + +/** + * Implementation of the CHARACTER data type. + */ +public final class ValueChar extends ValueStringBase { + + private ValueChar(String value) { + super(value); + } + + @Override + public int getValueType() { + return CHAR; + } + + @Override + public int compareTypeSafe(Value v, CompareMode mode, CastDataProvider provider) { + return mode.compareString(convertToChar().getString(), v.convertToChar().getString(), false); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + if ((sqlFlags & NO_CASTS) == 0) { + int length = value.length(); + return StringUtils.quoteStringSQL(builder.append("CAST("), value).append(" AS CHAR(") + .append(length > 0 ? length : 1).append("))"); + } + return StringUtils.quoteStringSQL(builder, value); + } + + /** + * Get or create a CHAR value for the given string. + * + * @param s the string + * @return the value + */ + public static ValueChar get(String s) { + ValueChar obj = new ValueChar(StringUtils.cache(s)); + if (s.length() > SysProperties.OBJECT_CACHE_MAX_PER_ELEMENT_SIZE) { + return obj; + } + return (ValueChar) Value.cache(obj); + } + +} diff --git a/h2/src/main/org/h2/value/ValueClob.java b/h2/src/main/org/h2/value/ValueClob.java new file mode 100644 index 0000000000..ce75880f95 --- /dev/null +++ b/h2/src/main/org/h2/value/ValueClob.java @@ -0,0 +1,369 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; +import java.nio.charset.StandardCharsets; + +import org.h2.engine.CastDataProvider; +import org.h2.engine.Constants; +import org.h2.engine.SysProperties; +import org.h2.message.DbException; +import org.h2.store.DataHandler; +import org.h2.store.FileStore; +import org.h2.store.FileStoreOutputStream; +import org.h2.store.LobStorageInterface; +import org.h2.store.RangeReader; +import org.h2.util.Bits; +import org.h2.util.IOUtils; +import org.h2.util.MathUtils; +import org.h2.util.StringUtils; +import org.h2.value.lob.LobData; +import org.h2.value.lob.LobDataDatabase; +import org.h2.value.lob.LobDataFetchOnDemand; +import org.h2.value.lob.LobDataFile; +import org.h2.value.lob.LobDataInMemory; + +/** + * Implementation of the CHARACTER LARGE OBJECT data type. + */ +public final class ValueClob extends ValueLob { + + /** + * Creates a small CLOB value that can be stored in the row directly. + * + * @param data + * the data in UTF-8 encoding + * @return the CLOB + */ + public static ValueClob createSmall(byte[] data) { + return new ValueClob(new LobDataInMemory(data), data.length, + new String(data, StandardCharsets.UTF_8).length()); + } + + /** + * Creates a small CLOB value that can be stored in the row directly. + * + * @param data + * the data in UTF-8 encoding + * @param charLength + * the count of characters, must be exactly the same as count of + * characters in the data + * @return the CLOB + */ + public static ValueClob createSmall(byte[] data, long charLength) { + return new ValueClob(new LobDataInMemory(data), data.length, charLength); + } + + /** + * Creates a small CLOB value that can be stored in the row directly. + * + * @param string + * the string with value + * @return the CLOB + */ + public static ValueClob createSmall(String string) { + byte[] bytes = string.getBytes(StandardCharsets.UTF_8); + return new ValueClob(new LobDataInMemory(bytes), bytes.length, string.length()); + } + + /** + * Create a temporary CLOB value from a stream. + * + * @param in + * the reader + * @param length + * the number of characters to read, or -1 for no limit + * @param handler + * the data handler + * @return the lob value + */ + public static ValueClob createTempClob(Reader in, long length, DataHandler handler) { + if (length >= 0) { + // Otherwise BufferedReader may try to read more data than needed + // and that + // blocks the network level + try { + in = new RangeReader(in, 0, length); + } catch (IOException e) { + throw DbException.convert(e); + } + } + BufferedReader reader; + if (in instanceof BufferedReader) { + reader = (BufferedReader) in; + } else { + reader = new BufferedReader(in, Constants.IO_BUFFER_SIZE); + } + try { + long remaining = Long.MAX_VALUE; + if (length >= 0 && length < remaining) { + remaining = length; + } + int len = ValueLob.getBufferSize(handler, remaining); + char[] buff; + if (len >= Integer.MAX_VALUE) { + String data = IOUtils.readStringAndClose(reader, -1); + buff = data.toCharArray(); + len = buff.length; + } else { + buff = new char[len]; + reader.mark(len); + len = IOUtils.readFully(reader, buff, len); + } + if (len <= handler.getMaxLengthInplaceLob()) { + return ValueClob.createSmall(new String(buff, 0, len)); + } + reader.reset(); + return createTemporary(handler, reader, remaining); + } catch (IOException e) { + throw DbException.convertIOException(e, null); + } + } + + /** + * Create a CLOB in a temporary file. + */ + private static ValueClob createTemporary(DataHandler handler, Reader in, long remaining) throws IOException { + String fileName = ValueLob.createTempLobFileName(handler); + FileStore tempFile = handler.openFile(fileName, "rw", false); + tempFile.autoDelete(); + + long octetLength = 0L, charLength = 0L; + try (FileStoreOutputStream out = new FileStoreOutputStream(tempFile, null)) { + char[] buff = new char[Constants.IO_BUFFER_SIZE]; + while (true) { + int len = ValueLob.getBufferSize(handler, remaining); + len = IOUtils.readFully(in, buff, len); + if (len == 0) { + break; + } + // TODO reduce memory allocation + byte[] data = new String(buff, 0, len).getBytes(StandardCharsets.UTF_8); + out.write(data); + octetLength += data.length; + charLength += len; + } + } + return new ValueClob(new LobDataFile(handler, fileName, tempFile), octetLength, charLength); + } + + public ValueClob(LobData lobData, long octetLength, long charLength) { + super(lobData, octetLength, charLength); + } + + @Override + public int getValueType() { + return CLOB; + } + + @Override + public String getString() { + if (charLength > Constants.MAX_STRING_LENGTH) { + throw getStringTooLong(charLength); + } + if (lobData instanceof LobDataInMemory) { + return new String(((LobDataInMemory) lobData).getSmall(), StandardCharsets.UTF_8); + } + return readString((int) charLength); + } + + @Override + byte[] getBytesInternal() { + long p = octetLength; + if (p >= 0L) { + if (p > Constants.MAX_STRING_LENGTH) { + throw getBinaryTooLong(p); + } + return readBytes((int) p); + } + if (octetLength > Constants.MAX_STRING_LENGTH) { + throw getBinaryTooLong(octetLength()); + } + byte[] b = readBytes(Integer.MAX_VALUE); + octetLength = p = b.length; + if (p > Constants.MAX_STRING_LENGTH) { + throw getBinaryTooLong(p); + } + return b; + } + + @Override + public InputStream getInputStream() { + return lobData.getInputStream(-1L); + } + + @Override + public InputStream getInputStream(long oneBasedOffset, long length) { + return rangeInputStream(lobData.getInputStream(-1L), oneBasedOffset, length, -1L); + } + + @Override + public Reader getReader(long oneBasedOffset, long length) { + return rangeReader(getReader(), oneBasedOffset, length, charLength); + } + + @Override + public int compareTypeSafe(Value v, CompareMode mode, CastDataProvider provider) { + if (v == this) { + return 0; + } + ValueClob v2 = (ValueClob) v; + LobData lobData = this.lobData, lobData2 = v2.lobData; + if (lobData.getClass() == lobData2.getClass()) { + if (lobData instanceof LobDataInMemory) { + return Integer.signum(getString().compareTo(v2.getString())); + } else if (lobData instanceof LobDataDatabase) { + if (((LobDataDatabase) lobData).getLobId() == ((LobDataDatabase) lobData2).getLobId()) { + return 0; + } + } else if (lobData instanceof LobDataFetchOnDemand) { + if (((LobDataFetchOnDemand) lobData).getLobId() == ((LobDataFetchOnDemand) lobData2).getLobId()) { + return 0; + } + } + } + return compare(this, v2); + } + + /** + * Compares two CLOB values directly. + * + * @param v1 + * first CLOB value + * @param v2 + * second CLOB value + * @return result of comparison + */ + private static int compare(ValueClob v1, ValueClob v2) { + long minPrec = Math.min(v1.charLength, v2.charLength); + try (Reader reader1 = v1.getReader(); Reader reader2 = v2.getReader()) { + char[] buf1 = new char[BLOCK_COMPARISON_SIZE]; + char[] buf2 = new char[BLOCK_COMPARISON_SIZE]; + for (; minPrec >= BLOCK_COMPARISON_SIZE; minPrec -= BLOCK_COMPARISON_SIZE) { + if (IOUtils.readFully(reader1, buf1, BLOCK_COMPARISON_SIZE) != BLOCK_COMPARISON_SIZE + || IOUtils.readFully(reader2, buf2, BLOCK_COMPARISON_SIZE) != BLOCK_COMPARISON_SIZE) { + throw DbException.getUnsupportedException("Invalid LOB"); + } + int cmp = Bits.compareNotNull(buf1, buf2); + if (cmp != 0) { + return cmp; + } + } + for (;;) { + int c1 = reader1.read(), c2 = reader2.read(); + if (c1 < 0) { + return c2 < 0 ? 0 : -1; + } + if (c2 < 0) { + return 1; + } + if (c1 != c2) { + return c1 < c2 ? -1 : 1; + } + } + } catch (IOException ex) { + throw DbException.convert(ex); + } + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + if ((sqlFlags & REPLACE_LOBS_FOR_TRACE) != 0 + && (!(lobData instanceof LobDataInMemory) || charLength > SysProperties.MAX_TRACE_DATA_LENGTH)) { + builder.append("SPACE(").append(charLength); + LobDataDatabase lobDb = (LobDataDatabase) lobData; + builder.append(" /* table: ").append(lobDb.getTableId()).append(" id: ").append(lobDb.getLobId()) + .append(" */)"); + } else { + if ((sqlFlags & (REPLACE_LOBS_FOR_TRACE | NO_CASTS)) == 0) { + StringUtils.quoteStringSQL(builder.append("CAST("), getString()).append(" AS CHARACTER LARGE OBJECT(") + .append(charLength).append("))"); + } else { + StringUtils.quoteStringSQL(builder, getString()); + } + } + return builder; + } + + /** + * Convert the precision to the requested value. + * + * @param precision + * the new precision + * @return the truncated or this value + */ + ValueClob convertPrecision(long precision) { + if (this.charLength <= precision) { + return this; + } + ValueClob lob; + DataHandler handler = lobData.getDataHandler(); + if (handler != null) { + lob = createTempClob(getReader(), precision, handler); + } else { + try { + lob = createSmall(IOUtils.readStringAndClose(getReader(), MathUtils.convertLongToInt(precision))); + } catch (IOException e) { + throw DbException.convertIOException(e, null); + } + } + return lob; + } + + @Override + public ValueLob copy(DataHandler database, int tableId) { + if (lobData instanceof LobDataInMemory) { + byte[] small = ((LobDataInMemory) lobData).getSmall(); + if (small.length > database.getMaxLengthInplaceLob()) { + LobStorageInterface s = database.getLobStorage(); + ValueClob v = s.createClob(getReader(), charLength); + ValueLob v2 = v.copy(database, tableId); + v.remove(); + return v2; + } + return this; + } else if (lobData instanceof LobDataDatabase) { + return database.getLobStorage().copyLob(this, tableId); + } else { + throw new UnsupportedOperationException(); + } + } + + @Override + public long charLength() { + return charLength; + } + + @Override + public long octetLength() { + long p = octetLength; + if (p < 0L) { + if (lobData instanceof LobDataInMemory) { + p = ((LobDataInMemory) lobData).getSmall().length; + } else { + try (InputStream is = getInputStream()) { + p = 0L; + for (;;) { + p += is.skip(Long.MAX_VALUE); + if (is.read() < 0) { + break; + } + p++; + } + } catch (IOException e) { + throw DbException.convertIOException(e, null); + } + } + octetLength = p; + } + return p; + } + +} diff --git a/h2/src/main/org/h2/value/ValueCollectionBase.java b/h2/src/main/org/h2/value/ValueCollectionBase.java index d3df142ea6..1136537531 100644 --- a/h2/src/main/org/h2/value/ValueCollectionBase.java +++ b/h2/src/main/org/h2/value/ValueCollectionBase.java @@ -1,15 +1,14 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; import org.h2.api.ErrorCode; +import org.h2.engine.CastDataProvider; import org.h2.engine.Constants; -import org.h2.engine.Mode; import org.h2.message.DbException; -import org.h2.util.MathUtils; /** * Base class for ARRAY and ROW values. @@ -21,8 +20,6 @@ public abstract class ValueCollectionBase extends Value { */ final Value[] values; - private TypeInfo type; - private int hash; ValueCollectionBase(Value[] values) { @@ -47,37 +44,21 @@ public int hashCode() { } @Override - public TypeInfo getType() { - TypeInfo type = this.type; - if (type == null) { - long precision = 0, displaySize = 0; - for (Value v : values) { - TypeInfo t = v.getType(); - precision += t.getPrecision(); - displaySize += t.getDisplaySize(); - } - this.type = type = new TypeInfo(getValueType(), precision, 0, MathUtils.convertLongToInt(displaySize), - null); - } - return type; - } - - @Override - public int compareWithNull(Value v, boolean forEquality, Mode databaseMode, CompareMode compareMode) { + public int compareWithNull(Value v, boolean forEquality, CastDataProvider provider, CompareMode compareMode) { if (v == ValueNull.INSTANCE) { return Integer.MIN_VALUE; } ValueCollectionBase l = this; int leftType = l.getValueType(); int rightType = v.getValueType(); - if (rightType != ARRAY && rightType != ROW) { + if (rightType != leftType) { throw v.getDataConversionError(leftType); } ValueCollectionBase r = (ValueCollectionBase) v; Value[] leftArray = l.values, rightArray = r.values; int leftLength = leftArray.length, rightLength = rightArray.length; if (leftLength != rightLength) { - if (leftType == ROW || rightType == ROW) { + if (leftType == ROW) { throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); } if (forEquality) { @@ -89,7 +70,7 @@ public int compareWithNull(Value v, boolean forEquality, Mode databaseMode, Comp for (int i = 0; i < leftLength; i++) { Value v1 = leftArray[i]; Value v2 = rightArray[i]; - int comp = v1.compareWithNull(v2, forEquality, databaseMode, compareMode); + int comp = v1.compareWithNull(v2, forEquality, provider, compareMode); if (comp != 0) { if (comp != Integer.MIN_VALUE) { return comp; @@ -103,7 +84,7 @@ public int compareWithNull(Value v, boolean forEquality, Mode databaseMode, Comp for (int i = 0; i < len; i++) { Value v1 = leftArray[i]; Value v2 = rightArray[i]; - int comp = v1.compareWithNull(v2, forEquality, databaseMode, compareMode); + int comp = v1.compareWithNull(v2, forEquality, provider, compareMode); if (comp != 0) { return comp; } @@ -123,9 +104,9 @@ public boolean containsNull() { @Override public int getMemory() { - int memory = 72; + int memory = 72 + values.length * Constants.MEMORY_POINTER; for (Value v : values) { - memory += v.getMemory() + Constants.MEMORY_POINTER; + memory += v.getMemory(); } return memory; } diff --git a/h2/src/main/org/h2/value/ValueDate.java b/h2/src/main/org/h2/value/ValueDate.java index c2fa3c4392..5c49e1d20c 100644 --- a/h2/src/main/org/h2/value/ValueDate.java +++ b/h2/src/main/org/h2/value/ValueDate.java @@ -1,22 +1,19 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; -import java.sql.Date; -import java.sql.PreparedStatement; -import java.sql.SQLException; - import org.h2.api.ErrorCode; +import org.h2.engine.CastDataProvider; import org.h2.message.DbException; import org.h2.util.DateTimeUtils; /** * Implementation of the DATE data type. */ -public class ValueDate extends Value { +public final class ValueDate extends Value { /** * The default precision and display size of the textual representation of a date. @@ -27,6 +24,9 @@ public class ValueDate extends Value { private final long dateValue; private ValueDate(long dateValue) { + if (dateValue < DateTimeUtils.MIN_DATE_VALUE || dateValue > DateTimeUtils.MAX_DATE_VALUE) { + throw new IllegalArgumentException("dateValue out of range " + dateValue); + } this.dateValue = dateValue; } @@ -40,28 +40,6 @@ public static ValueDate fromDateValue(long dateValue) { return (ValueDate) Value.cache(new ValueDate(dateValue)); } - /** - * Get or create a date value for the given date. - * - * @param date the date - * @return the value - */ - public static ValueDate get(Date date) { - long ms = date.getTime(); - return fromDateValue(DateTimeUtils.dateValueFromLocalMillis(ms + DateTimeUtils.getTimeZoneOffset(ms))); - } - - /** - * Calculate the date value (in the default timezone) from a given time in - * milliseconds in UTC. - * - * @param ms the milliseconds - * @return the value - */ - public static ValueDate fromMillis(long ms) { - return fromDateValue(DateTimeUtils.dateValueFromLocalMillis(ms + DateTimeUtils.getTimeZoneOffset(ms))); - } - /** * Parse a string to a ValueDate. * @@ -81,11 +59,6 @@ public long getDateValue() { return dateValue; } - @Override - public Date getDate() { - return DateTimeUtils.convertDateValueToDate(dateValue); - } - @Override public TypeInfo getType() { return TypeInfo.TYPE_DATE; @@ -98,30 +71,22 @@ public int getValueType() { @Override public String getString() { - StringBuilder buff = new StringBuilder(PRECISION); - DateTimeUtils.appendDate(buff, dateValue); - return buff.toString(); + return DateTimeUtils.appendDate(new StringBuilder(PRECISION), dateValue).toString(); } @Override - public StringBuilder getSQL(StringBuilder builder) { - builder.append("DATE '"); - DateTimeUtils.appendDate(builder, dateValue); - return builder.append('\''); + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return DateTimeUtils.appendDate(builder.append("DATE '"), dateValue).append('\''); } @Override - public int compareTypeSafe(Value o, CompareMode mode) { + public int compareTypeSafe(Value o, CompareMode mode, CastDataProvider provider) { return Long.compare(dateValue, ((ValueDate) o).dateValue); } @Override public boolean equals(Object other) { - if (this == other) { - return true; - } - return other instanceof ValueDate - && dateValue == (((ValueDate) other).dateValue); + return this == other || other instanceof ValueDate && dateValue == ((ValueDate) other).dateValue; } @Override @@ -129,15 +94,4 @@ public int hashCode() { return (int) (dateValue ^ (dateValue >>> 32)); } - @Override - public Object getObject() { - return getDate(); - } - - @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - prep.setDate(parameterIndex, getDate()); - } - } diff --git a/h2/src/main/org/h2/value/ValueDecfloat.java b/h2/src/main/org/h2/value/ValueDecfloat.java new file mode 100644 index 0000000000..2c08d55f95 --- /dev/null +++ b/h2/src/main/org/h2/value/ValueDecfloat.java @@ -0,0 +1,361 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +import java.math.BigDecimal; +import java.math.RoundingMode; + +import org.h2.api.ErrorCode; +import org.h2.engine.CastDataProvider; +import org.h2.message.DbException; + +/** + * Implementation of the DECFLOAT data type. + */ +public final class ValueDecfloat extends ValueBigDecimalBase { + + /** + * The value 'zero'. + */ + public static final ValueDecfloat ZERO = new ValueDecfloat(BigDecimal.ZERO); + + /** + * The value 'one'. + */ + public static final ValueDecfloat ONE = new ValueDecfloat(BigDecimal.ONE); + + /** + * The positive infinity value. + */ + public static final ValueDecfloat POSITIVE_INFINITY = new ValueDecfloat(null); + + /** + * The negative infinity value. + */ + public static final ValueDecfloat NEGATIVE_INFINITY = new ValueDecfloat(null); + + /** + * The not a number value. + */ + public static final ValueDecfloat NAN = new ValueDecfloat(null); + + private ValueDecfloat(BigDecimal value) { + super(value); + } + + @Override + public String getString() { + if (value == null) { + if (this == POSITIVE_INFINITY) { + return "Infinity"; + } else if (this == NEGATIVE_INFINITY) { + return "-Infinity"; + } else { + return "NaN"; + } + } + return value.toString(); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + if ((sqlFlags & NO_CASTS) == 0) { + return getSQL(builder.append("CAST(")).append(" AS DECFLOAT)"); + } + return getSQL(builder); + } + + private StringBuilder getSQL(StringBuilder builder) { + if (value != null) { + return builder.append(value); + } else if (this == POSITIVE_INFINITY) { + return builder.append("'Infinity'"); + } else if (this == NEGATIVE_INFINITY) { + return builder.append("'-Infinity'"); + } else { + return builder.append("'NaN'"); + } + } + + @Override + public TypeInfo getType() { + TypeInfo type = this.type; + if (type == null) { + this.type = type = new TypeInfo(DECFLOAT, value != null ? value.precision() : 1, 0, null); + } + return type; + } + + @Override + public int getValueType() { + return DECFLOAT; + } + + @Override + public Value add(Value v) { + BigDecimal value2 = ((ValueDecfloat) v).value; + if (value != null) { + if (value2 != null) { + return get(value.add(value2)); + } + return v; + } else if (value2 != null || this == v) { + return this; + } + return NAN; + } + + @Override + public Value subtract(Value v) { + BigDecimal value2 = ((ValueDecfloat) v).value; + if (value != null) { + if (value2 != null) { + return get(value.subtract(value2)); + } + return v == POSITIVE_INFINITY ? NEGATIVE_INFINITY : v == NEGATIVE_INFINITY ? POSITIVE_INFINITY : NAN; + } else if (value2 != null) { + return this; + } else if (this == POSITIVE_INFINITY) { + if (v == NEGATIVE_INFINITY) { + return POSITIVE_INFINITY; + } + } else if (this == NEGATIVE_INFINITY && v == POSITIVE_INFINITY) { + return NEGATIVE_INFINITY; + } + return NAN; + } + + @Override + public Value negate() { + if (value != null) { + return get(value.negate()); + } + return this == POSITIVE_INFINITY ? NEGATIVE_INFINITY : this == NEGATIVE_INFINITY ? POSITIVE_INFINITY : NAN; + } + + @Override + public Value multiply(Value v) { + BigDecimal value2 = ((ValueDecfloat) v).value; + if (value != null) { + if (value2 != null) { + return get(value.multiply(value2)); + } + if (v == POSITIVE_INFINITY) { + int s = value.signum(); + if (s > 0) { + return POSITIVE_INFINITY; + } else if (s < 0) { + return NEGATIVE_INFINITY; + } + } else if (v == NEGATIVE_INFINITY) { + int s = value.signum(); + if (s > 0) { + return NEGATIVE_INFINITY; + } else if (s < 0) { + return POSITIVE_INFINITY; + } + } + } else if (value2 != null) { + if (this == POSITIVE_INFINITY) { + int s = value2.signum(); + if (s > 0) { + return POSITIVE_INFINITY; + } else if (s < 0) { + return NEGATIVE_INFINITY; + } + } else if (this == NEGATIVE_INFINITY) { + int s = value2.signum(); + if (s > 0) { + return NEGATIVE_INFINITY; + } else if (s < 0) { + return POSITIVE_INFINITY; + } + } + } else if (this == POSITIVE_INFINITY) { + if (v == POSITIVE_INFINITY) { + return POSITIVE_INFINITY; + } else if (v == NEGATIVE_INFINITY) { + return NEGATIVE_INFINITY; + } + } else if (this == NEGATIVE_INFINITY) { + if (v == POSITIVE_INFINITY) { + return NEGATIVE_INFINITY; + } else if (v == NEGATIVE_INFINITY) { + return POSITIVE_INFINITY; + } + } + return NAN; + } + + @Override + public Value divide(Value v, TypeInfo quotientType) { + BigDecimal value2 = ((ValueDecfloat) v).value; + if (value2 != null && value2.signum() == 0) { + throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getTraceSQL()); + } + if (value != null) { + if (value2 != null) { + return divide(value, value2, quotientType); + } else { + if (v != NAN) { + return ZERO; + } + } + } else if (value2 != null && this != NAN) { + return (this == POSITIVE_INFINITY) == (value2.signum() > 0) ? POSITIVE_INFINITY : NEGATIVE_INFINITY; + } + return NAN; + } + + /** + * Divides to {@link BigDecimal} values and returns a {@code DECFLOAT} + * result of the specified data type. + * + * @param dividend the dividend + * @param divisor the divisor + * @param quotientType the type of quotient + * @return the quotient + */ + public static ValueDecfloat divide(BigDecimal dividend, BigDecimal divisor, TypeInfo quotientType) { + int quotientPrecision = (int) quotientType.getPrecision(); + BigDecimal quotient = dividend.divide(divisor, + dividend.scale() - dividend.precision() + divisor.precision() - divisor.scale() + quotientPrecision, + RoundingMode.HALF_DOWN); + int precision = quotient.precision(); + if (precision > quotientPrecision) { + quotient = quotient.setScale(quotient.scale() - precision + quotientPrecision, RoundingMode.HALF_UP); + } + return get(quotient); + } + + @Override + public Value modulus(Value v) { + BigDecimal value2 = ((ValueDecfloat) v).value; + if (value2 != null && value2.signum() == 0) { + throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getTraceSQL()); + } + if (value != null) { + if (value2 != null) { + return get(value.remainder(value2)); + } else if (v != NAN) { + return this; + } + } + return NAN; + } + + @Override + public int compareTypeSafe(Value o, CompareMode mode, CastDataProvider provider) { + BigDecimal value2 = ((ValueDecfloat) o).value; + if (value != null) { + if (value2 != null) { + return value.compareTo(value2); + } + return o == NEGATIVE_INFINITY ? 1 : -1; + } else if (value2 != null) { + return this == NEGATIVE_INFINITY ? -1 : 1; + } else if (this == o) { + return 0; + } else if (this == NEGATIVE_INFINITY) { + return -1; + } else if (o == NEGATIVE_INFINITY) { + return 1; + } else { + return this == POSITIVE_INFINITY ? -1 : 1; + } + } + + @Override + public int getSignum() { + if (value != null) { + return value.signum(); + } + return this == POSITIVE_INFINITY ? 1 : this == NEGATIVE_INFINITY ? -1 : 0; + } + + @Override + public BigDecimal getBigDecimal() { + if (value != null) { + return value; + } + throw getDataConversionError(NUMERIC); + } + + @Override + public float getFloat() { + if (value != null) { + return value.floatValue(); + } else if (this == POSITIVE_INFINITY) { + return Float.POSITIVE_INFINITY; + } else if (this == NEGATIVE_INFINITY) { + return Float.NEGATIVE_INFINITY; + } else { + return Float.NaN; + } + } + + @Override + public double getDouble() { + if (value != null) { + return value.doubleValue(); + } else if (this == POSITIVE_INFINITY) { + return Double.POSITIVE_INFINITY; + } else if (this == NEGATIVE_INFINITY) { + return Double.NEGATIVE_INFINITY; + } else { + return Double.NaN; + } + } + + @Override + public int hashCode() { + return value != null ? getClass().hashCode() * 31 + value.hashCode() : System.identityHashCode(this); + } + + @Override + public boolean equals(Object other) { + if (other instanceof ValueDecfloat) { + BigDecimal value2 = ((ValueDecfloat) other).value; + if (value != null) { + return value.equals(value2); + } else if (value2 == null && this == other) { + return true; + } + } + return false; + } + + @Override + public int getMemory() { + return value != null ? value.precision() + 120 : 32; + } + + /** + * Returns {@code true}, if this value is finite. + * + * @return {@code true}, if this value is finite, {@code false} otherwise + */ + public boolean isFinite() { + return value != null; + } + + /** + * Get or create a DECFLOAT value for the given big decimal. + * + * @param dec the big decimal + * @return the value + */ + public static ValueDecfloat get(BigDecimal dec) { + dec = dec.stripTrailingZeros(); + if (BigDecimal.ZERO.equals(dec)) { + return ZERO; + } else if (BigDecimal.ONE.equals(dec)) { + return ONE; + } + return (ValueDecfloat) Value.cache(new ValueDecfloat(dec)); + } + +} diff --git a/h2/src/main/org/h2/value/ValueDecimal.java b/h2/src/main/org/h2/value/ValueDecimal.java deleted file mode 100644 index 6231f378e8..0000000000 --- a/h2/src/main/org/h2/value/ValueDecimal.java +++ /dev/null @@ -1,265 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.value; - -import java.math.BigDecimal; -import java.math.RoundingMode; -import java.sql.PreparedStatement; -import java.sql.SQLException; - -import org.h2.api.ErrorCode; -import org.h2.message.DbException; -import org.h2.util.MathUtils; - -/** - * Implementation of the DECIMAL data type. - */ -public class ValueDecimal extends Value { - - /** - * The value 'zero'. - */ - public static final Object ZERO = new ValueDecimal(BigDecimal.ZERO); - - /** - * The value 'one'. - */ - public static final Object ONE = new ValueDecimal(BigDecimal.ONE); - - /** - * The default precision for a decimal value. - */ - static final int DEFAULT_PRECISION = 65535; - - /** - * The default scale for a decimal value. - */ - static final int DEFAULT_SCALE = 32767; - - /** - * The default display size for a decimal value. - */ - static final int DEFAULT_DISPLAY_SIZE = 65535; - - private static final int DIVIDE_SCALE_ADD = 25; - - /** - * The maximum scale of a BigDecimal value. - */ - private static final int BIG_DECIMAL_SCALE_MAX = 100_000; - - private final BigDecimal value; - private TypeInfo type; - private String valueString; - - private ValueDecimal(BigDecimal value) { - if (value == null) { - throw new IllegalArgumentException("null"); - } else if (value.getClass() != BigDecimal.class) { - throw DbException.get(ErrorCode.INVALID_CLASS_2, - BigDecimal.class.getName(), value.getClass().getName()); - } - this.value = value; - } - - @Override - public Value add(Value v) { - ValueDecimal dec = (ValueDecimal) v; - return ValueDecimal.get(value.add(dec.value)); - } - - @Override - public Value subtract(Value v) { - ValueDecimal dec = (ValueDecimal) v; - return ValueDecimal.get(value.subtract(dec.value)); - } - - @Override - public Value negate() { - return ValueDecimal.get(value.negate()); - } - - @Override - public Value multiply(Value v) { - ValueDecimal dec = (ValueDecimal) v; - return ValueDecimal.get(value.multiply(dec.value)); - } - - @Override - public Value divide(Value v) { - ValueDecimal dec = (ValueDecimal) v; - if (dec.value.signum() == 0) { - throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getSQL()); - } - BigDecimal bd = value.divide(dec.value, - value.scale() + DIVIDE_SCALE_ADD, - RoundingMode.HALF_DOWN); - if (bd.signum() == 0) { - bd = BigDecimal.ZERO; - } else if (bd.scale() > 0) { - if (!bd.unscaledValue().testBit(0)) { - bd = bd.stripTrailingZeros(); - } - } - return ValueDecimal.get(bd); - } - - @Override - public ValueDecimal modulus(Value v) { - ValueDecimal dec = (ValueDecimal) v; - if (dec.value.signum() == 0) { - throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getSQL()); - } - BigDecimal bd = value.remainder(dec.value); - return ValueDecimal.get(bd); - } - - @Override - public StringBuilder getSQL(StringBuilder builder) { - return builder.append(getString()); - } - - @Override - public TypeInfo getType() { - TypeInfo type = this.type; - if (type == null) { - long precision = value.precision(); - this.type = type = new TypeInfo(DECIMAL, precision, value.scale(), - // add 2 characters for '-' and '.' - MathUtils.convertLongToInt(precision + 2), null); - } - return type; - } - - @Override - public int getValueType() { - return DECIMAL; - } - - @Override - public int compareTypeSafe(Value o, CompareMode mode) { - return value.compareTo(((ValueDecimal) o).value); - } - - @Override - public int getSignum() { - return value.signum(); - } - - @Override - public BigDecimal getBigDecimal() { - return value; - } - - @Override - public String getString() { - if (valueString == null) { - String p = value.toPlainString(); - if (p.length() < 40) { - valueString = p; - } else { - valueString = value.toString(); - } - } - return valueString; - } - - @Override - public boolean checkPrecision(long prec) { - if (prec == DEFAULT_PRECISION) { - return true; - } - return value.precision() <= prec; - } - - @Override - public int hashCode() { - return value.hashCode(); - } - - @Override - public Object getObject() { - return value; - } - - @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - prep.setBigDecimal(parameterIndex, value); - } - - @Override - public Value convertScale(boolean onlyToSmallerScale, int targetScale) { - if (value.scale() == targetScale) { - return this; - } - if (onlyToSmallerScale || targetScale >= DEFAULT_SCALE) { - if (value.scale() < targetScale) { - return this; - } - } - BigDecimal bd = ValueDecimal.setScale(value, targetScale); - return ValueDecimal.get(bd); - } - - @Override - public Value convertPrecision(long precision, boolean force) { - if (value.precision() <= precision) { - return this; - } - if (force) { - return get(BigDecimal.valueOf(value.doubleValue())); - } - throw DbException.get( - ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE_1, - Long.toString(precision)); - } - - /** - * Get or create big decimal value for the given big decimal. - * - * @param dec the bit decimal - * @return the value - */ - public static ValueDecimal get(BigDecimal dec) { - if (BigDecimal.ZERO.equals(dec)) { - return (ValueDecimal) ZERO; - } else if (BigDecimal.ONE.equals(dec)) { - return (ValueDecimal) ONE; - } - return (ValueDecimal) Value.cache(new ValueDecimal(dec)); - } - - @Override - public boolean equals(Object other) { - // Two BigDecimal objects are considered equal only if they are equal in - // value and scale (thus 2.0 is not equal to 2.00 when using equals; - // however -0.0 and 0.0 are). Can not use compareTo because 2.0 and 2.00 - // have different hash codes - return other instanceof ValueDecimal && - value.equals(((ValueDecimal) other).value); - } - - @Override - public int getMemory() { - return value.precision() + 120; - } - - /** - * Set the scale of a BigDecimal value. - * - * @param bd the BigDecimal value - * @param scale the new scale - * @return the scaled value - */ - public static BigDecimal setScale(BigDecimal bd, int scale) { - if (scale > BIG_DECIMAL_SCALE_MAX || scale < -BIG_DECIMAL_SCALE_MAX) { - throw DbException.getInvalidValueException("scale", scale); - } - return bd.setScale(scale, RoundingMode.HALF_UP); - } - -} diff --git a/h2/src/main/org/h2/value/ValueDouble.java b/h2/src/main/org/h2/value/ValueDouble.java index a23f0f6fc9..9e3fc9753f 100644 --- a/h2/src/main/org/h2/value/ValueDouble.java +++ b/h2/src/main/org/h2/value/ValueDouble.java @@ -1,28 +1,33 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; -import java.sql.PreparedStatement; -import java.sql.SQLException; +import java.math.BigDecimal; import org.h2.api.ErrorCode; +import org.h2.engine.CastDataProvider; import org.h2.message.DbException; /** - * Implementation of the DOUBLE data type. + * Implementation of the DOUBLE PRECISION data type. */ -public class ValueDouble extends Value { +public final class ValueDouble extends Value { /** - * The precision in digits. + * The precision in bits. */ - public static final int PRECISION = 17; + static final int PRECISION = 53; /** - * The maximum display size of a double. + * The approximate precision in decimal digits. + */ + public static final int DECIMAL_PRECISION = 17; + + /** + * The maximum display size of a DOUBLE. * Example: -3.3333333333333334E-100 */ public static final int DISPLAY_SIZE = 24; @@ -52,14 +57,12 @@ private ValueDouble(double value) { @Override public Value add(Value v) { - ValueDouble v2 = (ValueDouble) v; - return get(value + v2.value); + return get(value + ((ValueDouble) v).value); } @Override public Value subtract(Value v) { - ValueDouble v2 = (ValueDouble) v; - return get(value - v2.value); + return get(value - ((ValueDouble) v).value); } @Override @@ -69,15 +72,14 @@ public Value negate() { @Override public Value multiply(Value v) { - ValueDouble v2 = (ValueDouble) v; - return get(value * v2.value); + return get(value * ((ValueDouble) v).value); } @Override - public Value divide(Value v) { + public Value divide(Value v, TypeInfo quotientType) { ValueDouble v2 = (ValueDouble) v; if (v2.value == 0.0) { - throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getSQL()); + throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getTraceSQL()); } return get(value / v2.value); } @@ -86,23 +88,29 @@ public Value divide(Value v) { public ValueDouble modulus(Value v) { ValueDouble other = (ValueDouble) v; if (other.value == 0) { - throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getSQL()); + throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getTraceSQL()); } return get(value % other.value); } @Override - public StringBuilder getSQL(StringBuilder builder) { + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + if ((sqlFlags & NO_CASTS) == 0) { + return getSQL(builder.append("CAST(")).append(" AS DOUBLE PRECISION)"); + } + return getSQL(builder); + } + + private StringBuilder getSQL(StringBuilder builder) { if (value == Double.POSITIVE_INFINITY) { - builder.append("POWER(0, -1)"); + return builder.append("'Infinity'"); } else if (value == Double.NEGATIVE_INFINITY) { - builder.append("(-POWER(0, -1))"); + return builder.append("'-Infinity'"); } else if (Double.isNaN(value)) { - builder.append("SQRT(-1)"); + return builder.append("'NaN'"); } else { - builder.append(value); + return builder.append(value); } - return builder; } @Override @@ -116,13 +124,27 @@ public int getValueType() { } @Override - public int compareTypeSafe(Value o, CompareMode mode) { + public int compareTypeSafe(Value o, CompareMode mode, CastDataProvider provider) { return Double.compare(value, ((ValueDouble) o).value); } @Override public int getSignum() { - return value == 0 ? 0 : (value < 0 ? -1 : 1); + return value == 0 || Double.isNaN(value) ? 0 : value < 0 ? -1 : 1; + } + + @Override + public BigDecimal getBigDecimal() { + if (Double.isFinite(value)) { + return BigDecimal.valueOf(value); + } + // Infinite or NaN + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, Double.toString(value)); + } + + @Override + public float getFloat() { + return (float) value; } @Override @@ -145,19 +167,8 @@ public int hashCode() { return (int) (hash ^ (hash >>> 32)); } - @Override - public Object getObject() { - return value; - } - - @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - prep.setDouble(parameterIndex, value); - } - /** - * Get or create double value for the given double. + * Get or create a DOUBLE PRECISION value for the given double. * * @param d the double * @return the value @@ -179,7 +190,7 @@ public boolean equals(Object other) { if (!(other instanceof ValueDouble)) { return false; } - return compareTypeSafe((ValueDouble) other, null) == 0; + return compareTypeSafe((ValueDouble) other, null, null) == 0; } } diff --git a/h2/src/main/org/h2/value/ValueEnum.java b/h2/src/main/org/h2/value/ValueEnum.java index fca98db579..2572e28be7 100644 --- a/h2/src/main/org/h2/value/ValueEnum.java +++ b/h2/src/main/org/h2/value/ValueEnum.java @@ -1,14 +1,16 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; +import org.h2.util.StringUtils; + /** * ENUM value. */ -public class ValueEnum extends ValueEnumBase { +public final class ValueEnum extends ValueEnumBase { private final ExtTypeInfoEnum enumerators; @@ -26,4 +28,13 @@ public ExtTypeInfoEnum getEnumerators() { return enumerators; } + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + if ((sqlFlags & NO_CASTS) == 0) { + StringUtils.quoteStringSQL(builder.append("CAST("), label).append(" AS "); + return enumerators.getType().getSQL(builder, sqlFlags).append(')'); + } + return StringUtils.quoteStringSQL(builder, label); + } + } diff --git a/h2/src/main/org/h2/value/ValueEnumBase.java b/h2/src/main/org/h2/value/ValueEnumBase.java index e7844f8b51..5188fd581a 100644 --- a/h2/src/main/org/h2/value/ValueEnumBase.java +++ b/h2/src/main/org/h2/value/ValueEnumBase.java @@ -1,25 +1,24 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; -import java.sql.PreparedStatement; -import java.sql.SQLException; +import java.math.BigDecimal; -import org.h2.engine.Mode; +import org.h2.engine.CastDataProvider; import org.h2.util.StringUtils; /** * Base implementation of the ENUM data type. * - * Currently, this class is used primarily for - * client-server communication. + * This base implementation is only used in 2.0.* clients when they work with + * 1.4.* servers. */ public class ValueEnumBase extends Value { - private final String label; + final String label; private final int ordinal; protected ValueEnumBase(final String label, final int ordinal) { @@ -28,20 +27,20 @@ protected ValueEnumBase(final String label, final int ordinal) { } @Override - public Value add(final Value v) { - final Value iv = v.convertTo(Value.INT); - return convertTo(Value.INT).add(iv); + public Value add(Value v) { + ValueInteger iv = v.convertToInt(null); + return convertToInt(null).add(iv); } @Override - public int compareTypeSafe(Value v, CompareMode mode) { + public int compareTypeSafe(Value v, CompareMode mode, CastDataProvider provider) { return Integer.compare(getInt(), v.getInt()); } @Override - public Value divide(final Value v) { - final Value iv = v.convertTo(Value.INT); - return convertTo(Value.INT).divide(iv); + public Value divide(Value v, TypeInfo quotientType) { + ValueInteger iv = v.convertToInt(null); + return convertToInt(null).divide(iv, quotientType); } @Override @@ -57,7 +56,7 @@ public boolean equals(final Object other) { * @param ordinal the ordinal * @return the value */ - public static ValueEnumBase get(final String label, final int ordinal) { + public static ValueEnumBase get(String label, int ordinal) { return new ValueEnumBase(label, ordinal); } @@ -72,8 +71,18 @@ public long getLong() { } @Override - public Object getObject() { - return label; + public BigDecimal getBigDecimal() { + return BigDecimal.valueOf(ordinal); + } + + @Override + public float getFloat() { + return ordinal; + } + + @Override + public double getDouble() { + return ordinal; } @Override @@ -82,7 +91,7 @@ public int getSignum() { } @Override - public StringBuilder getSQL(StringBuilder builder) { + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { return StringUtils.quoteStringSQL(builder, label); } @@ -115,36 +124,21 @@ public int hashCode() { } @Override - public Value modulus(final Value v) { - final Value iv = v.convertTo(Value.INT); - return convertTo(Value.INT).modulus(iv); - } - - @Override - public Value multiply(final Value v) { - final Value iv = v.convertTo(Value.INT); - return convertTo(Value.INT).multiply(iv); - } - - - @Override - public void set(final PreparedStatement prep, final int parameterIndex) - throws SQLException { - prep.setInt(parameterIndex, ordinal); + public Value modulus(Value v) { + ValueInteger iv = v.convertToInt(null); + return convertToInt(null).modulus(iv); } @Override - public Value subtract(final Value v) { - final Value iv = v.convertTo(Value.INT); - return convertTo(Value.INT).subtract(iv); + public Value multiply(Value v) { + ValueInteger iv = v.convertToInt(null); + return convertToInt(null).multiply(iv); } @Override - protected Value convertTo(int targetType, Mode mode, Object column, ExtTypeInfo extTypeInfo) { - if (targetType == Value.ENUM) { - return extTypeInfo.cast(this); - } - return super.convertTo(targetType, mode, column, extTypeInfo); + public Value subtract(Value v) { + ValueInteger iv = v.convertToInt(null); + return convertToInt(null).subtract(iv); } } diff --git a/h2/src/main/org/h2/value/ValueGeometry.java b/h2/src/main/org/h2/value/ValueGeometry.java index 1527273399..ca33eac03e 100644 --- a/h2/src/main/org/h2/value/ValueGeometry.java +++ b/h2/src/main/org/h2/value/ValueGeometry.java @@ -1,26 +1,22 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; import static org.h2.util.geometry.EWKBUtils.EWKB_SRID; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.util.Arrays; + import org.h2.api.ErrorCode; -import org.h2.engine.Mode; import org.h2.message.DbException; import org.h2.util.Bits; import org.h2.util.StringUtils; -import org.h2.util.Utils; import org.h2.util.geometry.EWKBUtils; import org.h2.util.geometry.EWKTUtils; import org.h2.util.geometry.GeometryUtils; -import org.h2.util.geometry.GeometryUtils.EnvelopeAndDimensionSystemTarget; import org.h2.util.geometry.GeometryUtils.EnvelopeTarget; import org.h2.util.geometry.JTSUtils; +import org.h2.util.geometry.EWKTUtils.EWKTTarget; import org.locationtech.jts.geom.Geometry; /** @@ -30,22 +26,10 @@ * @author Noel Grandin * @author Nicolas Fortin, Atelier SIG, IRSTV FR CNRS 24888 */ -public class ValueGeometry extends Value { +public final class ValueGeometry extends ValueBytesBase { private static final double[] UNKNOWN_ENVELOPE = new double[0]; - /** - * As conversion from/to WKB cost a significant amount of CPU cycles, WKB - * are kept in ValueGeometry instance. - * - * We always calculate the WKB, because not all WKT values can be - * represented in WKB, but since we persist it in WKB format, it has to be - * valid in WKB - */ - private final byte[] bytes; - - private final int hashCode; - /** * Geometry type and dimension system in OGC geometry code format (type + * dimensionSystem * 1000). @@ -75,15 +59,15 @@ public class ValueGeometry extends Value { * @param envelope the envelope */ private ValueGeometry(byte[] bytes, double[] envelope) { + super(bytes); if (bytes.length < 9 || bytes[0] != 0) { throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, StringUtils.convertBytesToHex(bytes)); } - this.bytes = bytes; + this.value = bytes; this.envelope = envelope; int t = Bits.readInt(bytes, 1); srid = (t & EWKB_SRID) != 0 ? Bits.readInt(bytes, 5) : 0; typeAndDimensionSystem = (t & 0xffff) % 1_000 + EWKBUtils.type2dimensionSystem(t) * 1_000; - hashCode = Arrays.hashCode(bytes); } /** @@ -95,11 +79,8 @@ private ValueGeometry(byte[] bytes, double[] envelope) { */ public static ValueGeometry getFromGeometry(Object o) { try { - EnvelopeAndDimensionSystemTarget target = new EnvelopeAndDimensionSystemTarget(); Geometry g = (Geometry) o; - JTSUtils.parseGeometry(g, target); - return (ValueGeometry) Value.cache(new ValueGeometry( // - JTSUtils.geometry2ewkb(g, target.getDimensionSystem()), target.getEnvelope())); + return (ValueGeometry) Value.cache(new ValueGeometry(JTSUtils.geometry2ewkb(g), UNKNOWN_ENVELOPE)); } catch (RuntimeException ex) { throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, String.valueOf(o)); } @@ -113,27 +94,12 @@ public static ValueGeometry getFromGeometry(Object o) { */ public static ValueGeometry get(String s) { try { - EnvelopeAndDimensionSystemTarget target = new EnvelopeAndDimensionSystemTarget(); - EWKTUtils.parseEWKT(s, target); - return (ValueGeometry) Value.cache(new ValueGeometry( // - EWKTUtils.ewkt2ewkb(s, target.getDimensionSystem()), target.getEnvelope())); + return (ValueGeometry) Value.cache(new ValueGeometry(EWKTUtils.ewkt2ewkb(s), UNKNOWN_ENVELOPE)); } catch (RuntimeException ex) { throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, s); } } - /** - * Get or create a geometry value for the given geometry. - * - * @param s the WKT representation of the geometry - * @param srid the srid of the object - * @return the value - */ - public static ValueGeometry get(String s, int srid) { - // This method is not used in H2, but preserved for H2GIS - return get(srid == 0 ? s : "SRID=" + srid + ';' + s); - } - /** * Get or create a geometry value for the given internal EWKB representation. * @@ -152,10 +118,7 @@ public static ValueGeometry get(byte[] bytes) { */ public static ValueGeometry getFromEWKB(byte[] bytes) { try { - EnvelopeAndDimensionSystemTarget target = new EnvelopeAndDimensionSystemTarget(); - EWKBUtils.parseEWKB(bytes, target); - return (ValueGeometry) Value.cache(new ValueGeometry( // - EWKBUtils.ewkb2ewkb(bytes, target.getDimensionSystem()), target.getEnvelope())); + return (ValueGeometry) Value.cache(new ValueGeometry(EWKBUtils.ewkb2ewkb(bytes), UNKNOWN_ENVELOPE)); } catch (RuntimeException ex) { throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, StringUtils.convertBytesToHex(bytes)); } @@ -182,7 +145,7 @@ public static Value fromEnvelope(double[] envelope) { public Geometry getGeometry() { if (geometry == null) { try { - geometry = JTSUtils.ewkb2geometry(bytes, getDimensionSystem()); + geometry = JTSUtils.ewkb2geometry(value, getDimensionSystem()); } catch (RuntimeException ex) { throw DbException.convert(ex); } @@ -235,7 +198,7 @@ public int getSRID() { public double[] getEnvelopeNoCopy() { if (envelope == UNKNOWN_ENVELOPE) { EnvelopeTarget target = new EnvelopeTarget(); - EWKBUtils.parseEWKB(bytes, target); + EWKBUtils.parseEWKB(value, target); envelope = target.getEnvelope(); } return envelope; @@ -273,86 +236,25 @@ public int getValueType() { } @Override - public StringBuilder getSQL(StringBuilder builder) { - // Using bytes is faster than converting to EWKT. - builder.append("X'"); - return StringUtils.convertBytesToHex(builder, getBytesNoCopy()).append("'::Geometry"); - } - - @Override - public int compareTypeSafe(Value v, CompareMode mode) { - return Bits.compareNotNullUnsigned(bytes, ((ValueGeometry) v).bytes); - } - - @Override - public String getString() { - return getEWKT(); - } - - @Override - public int hashCode() { - return hashCode; - } - - @Override - public Object getObject() { - if (DataType.GEOMETRY_CLASS != null) { - return getGeometry(); + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + builder.append("GEOMETRY "); + if ((sqlFlags & ADD_PLAN_INFORMATION) != 0) { + EWKBUtils.parseEWKB(value, new EWKTTarget(builder.append('\''), getDimensionSystem())); + builder.append('\''); + } else { + super.getSQL(builder, DEFAULT_SQL_FLAGS); } - return getEWKT(); - } - - @Override - public byte[] getBytes() { - return Utils.cloneByteArray(bytes); - } - - @Override - public byte[] getBytesNoCopy() { - return bytes; + return builder; } @Override - public void set(PreparedStatement prep, int parameterIndex) throws SQLException { - prep.setBytes(parameterIndex, bytes); + public String getString() { + return EWKTUtils.ewkb2ewkt(value, getDimensionSystem()); } @Override public int getMemory() { - return bytes.length * 20 + 24; - } - - @Override - public boolean equals(Object other) { - return other instanceof ValueGeometry && Arrays.equals(bytes, ((ValueGeometry) other).bytes); - } - - /** - * Get the value in Extended Well-Known Text format. - * - * @return the extended well-known text - */ - public String getEWKT() { - return EWKTUtils.ewkb2ewkt(bytes, getDimensionSystem()); - } - - /** - * Get the value in extended Well-Known Binary format. - * - * @return the extended well-known binary - */ - public byte[] getEWKB() { - return bytes; - } - - @Override - protected Value convertTo(int targetType, Mode mode, Object column, ExtTypeInfo extTypeInfo) { - if (targetType == Value.GEOMETRY) { - return extTypeInfo != null ? extTypeInfo.cast(this) : this; - } else if (targetType == Value.JAVA_OBJECT) { - return this; - } - return super.convertTo(targetType, mode, column, null); + return value.length * 20 + 24; } } diff --git a/h2/src/main/org/h2/value/ValueInt.java b/h2/src/main/org/h2/value/ValueInteger.java similarity index 53% rename from h2/src/main/org/h2/value/ValueInt.java rename to h2/src/main/org/h2/value/ValueInteger.java index 1311e46f05..13ba4cb91b 100644 --- a/h2/src/main/org/h2/value/ValueInt.java +++ b/h2/src/main/org/h2/value/ValueInteger.java @@ -1,28 +1,34 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; -import java.sql.PreparedStatement; -import java.sql.SQLException; +import java.math.BigDecimal; import org.h2.api.ErrorCode; +import org.h2.engine.CastDataProvider; import org.h2.message.DbException; +import org.h2.util.Bits; /** - * Implementation of the INT data type. + * Implementation of the INTEGER data type. */ -public class ValueInt extends Value { +public final class ValueInteger extends Value { /** - * The precision in digits. + * The precision in bits. */ - public static final int PRECISION = 10; + public static final int PRECISION = 32; /** - * The maximum display size of an int. + * The approximate precision in decimal digits. + */ + public static final int DECIMAL_PRECISION = 10; + + /** + * The maximum display size of an INT. * Example: -2147483648 */ public static final int DISPLAY_SIZE = 11; @@ -30,34 +36,34 @@ public class ValueInt extends Value { private static final int STATIC_SIZE = 128; // must be a power of 2 private static final int DYNAMIC_SIZE = 256; - private static final ValueInt[] STATIC_CACHE = new ValueInt[STATIC_SIZE]; - private static final ValueInt[] DYNAMIC_CACHE = new ValueInt[DYNAMIC_SIZE]; + private static final ValueInteger[] STATIC_CACHE = new ValueInteger[STATIC_SIZE]; + private static final ValueInteger[] DYNAMIC_CACHE = new ValueInteger[DYNAMIC_SIZE]; private final int value; static { for (int i = 0; i < STATIC_SIZE; i++) { - STATIC_CACHE[i] = new ValueInt(i); + STATIC_CACHE[i] = new ValueInteger(i); } } - private ValueInt(int value) { + private ValueInteger(int value) { this.value = value; } /** - * Get or create an int value for the given int. + * Get or create an INTEGER value for the given int. * * @param i the int * @return the value */ - public static ValueInt get(int i) { + public static ValueInteger get(int i) { if (i >= 0 && i < STATIC_SIZE) { return STATIC_CACHE[i]; } - ValueInt v = DYNAMIC_CACHE[i & (DYNAMIC_SIZE - 1)]; + ValueInteger v = DYNAMIC_CACHE[i & (DYNAMIC_SIZE - 1)]; if (v == null || v.value != i) { - v = new ValueInt(i); + v = new ValueInteger(i); DYNAMIC_CACHE[i & (DYNAMIC_SIZE - 1)] = v; } return v; @@ -65,15 +71,15 @@ public static ValueInt get(int i) { @Override public Value add(Value v) { - ValueInt other = (ValueInt) v; + ValueInteger other = (ValueInteger) v; return checkRange((long) value + (long) other.value); } - private static ValueInt checkRange(long x) { + private static ValueInteger checkRange(long x) { if ((int) x != x) { throw DbException.get(ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE_1, Long.toString(x)); } - return ValueInt.get((int) x); + return ValueInteger.get((int) x); } @Override @@ -88,51 +94,58 @@ public Value negate() { @Override public Value subtract(Value v) { - ValueInt other = (ValueInt) v; + ValueInteger other = (ValueInteger) v; return checkRange((long) value - (long) other.value); } @Override public Value multiply(Value v) { - ValueInt other = (ValueInt) v; + ValueInteger other = (ValueInteger) v; return checkRange((long) value * (long) other.value); } @Override - public Value divide(Value v) { - int y = ((ValueInt) v).value; + public Value divide(Value v, TypeInfo quotientType) { + int y = ((ValueInteger) v).value; if (y == 0) { - throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getSQL()); + throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getTraceSQL()); } int x = value; if (x == Integer.MIN_VALUE && y == -1) { throw DbException.get(ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE_1, "2147483648"); } - return ValueInt.get(x / y); + return ValueInteger.get(x / y); } @Override public Value modulus(Value v) { - ValueInt other = (ValueInt) v; + ValueInteger other = (ValueInteger) v; if (other.value == 0) { - throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getSQL()); + throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getTraceSQL()); } - return ValueInt.get(value % other.value); + return ValueInteger.get(value % other.value); } @Override - public StringBuilder getSQL(StringBuilder builder) { + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { return builder.append(value); } @Override public TypeInfo getType() { - return TypeInfo.TYPE_INT; + return TypeInfo.TYPE_INTEGER; } @Override public int getValueType() { - return INT; + return INTEGER; + } + + @Override + public byte[] getBytes() { + byte[] b = new byte[4]; + Bits.writeInt(b, 0, getInt()); + return b; } @Override @@ -146,34 +159,38 @@ public long getLong() { } @Override - public int compareTypeSafe(Value o, CompareMode mode) { - return Integer.compare(value, ((ValueInt) o).value); + public BigDecimal getBigDecimal() { + return BigDecimal.valueOf(value); } @Override - public String getString() { - return Integer.toString(value); + public float getFloat() { + return value; } @Override - public int hashCode() { + public double getDouble() { return value; } @Override - public Object getObject() { - return value; + public int compareTypeSafe(Value o, CompareMode mode, CastDataProvider provider) { + return Integer.compare(value, ((ValueInteger) o).value); } @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - prep.setInt(parameterIndex, value); + public String getString() { + return Integer.toString(value); + } + + @Override + public int hashCode() { + return value; } @Override public boolean equals(Object other) { - return other instanceof ValueInt && value == ((ValueInt) other).value; + return other instanceof ValueInteger && value == ((ValueInteger) other).value; } } diff --git a/h2/src/main/org/h2/value/ValueInterval.java b/h2/src/main/org/h2/value/ValueInterval.java index 0a2b874b1b..542e94d517 100644 --- a/h2/src/main/org/h2/value/ValueInterval.java +++ b/h2/src/main/org/h2/value/ValueInterval.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; @@ -10,11 +10,12 @@ import static org.h2.util.DateTimeUtils.NANOS_PER_MINUTE; import static org.h2.util.DateTimeUtils.NANOS_PER_SECOND; -import java.sql.PreparedStatement; -import java.sql.SQLException; +import java.math.BigDecimal; +import java.math.RoundingMode; import org.h2.api.Interval; import org.h2.api.IntervalQualifier; +import org.h2.engine.CastDataProvider; import org.h2.message.DbException; import org.h2.util.DateTimeUtils; import org.h2.util.IntervalUtils; @@ -22,7 +23,7 @@ /** * Implementation of the INTERVAL data type. */ -public class ValueInterval extends Value { +public final class ValueInterval extends Value { /** * The default leading field precision for intervals. @@ -37,16 +38,33 @@ public class ValueInterval extends Value { /** * The default scale for intervals with seconds. */ - static final int DEFAULT_SCALE = 6; + public static final int DEFAULT_SCALE = 6; /** * The maximum scale for intervals with seconds. */ public static final int MAXIMUM_SCALE = 9; - private final int valueType; + private static final long[] MULTIPLIERS = { + // INTERVAL_SECOND + DateTimeUtils.NANOS_PER_SECOND, + // INTERVAL_YEAR_TO_MONTH + 12, + // INTERVAL_DAY_TO_HOUR + 24, + // INTERVAL_DAY_TO_MINUTE + 24 * 60, + // INTERVAL_DAY_TO_SECOND + DateTimeUtils.NANOS_PER_DAY, + // INTERVAL_HOUR_TO_MINUTE: + 60, + // INTERVAL_HOUR_TO_SECOND + DateTimeUtils.NANOS_PER_HOUR, + // INTERVAL_MINUTE_TO_SECOND + DateTimeUtils.NANOS_PER_MINUTE // + }; - private TypeInfo type; + private final int valueType; private final boolean negative; @@ -143,27 +161,13 @@ private ValueInterval(int type, boolean negative, long leading, long remaining) } @Override - public StringBuilder getSQL(StringBuilder builder) { + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { return IntervalUtils.appendInterval(builder, getQualifier(), negative, leading, remaining); } @Override public TypeInfo getType() { - TypeInfo type = this.type; - if (type == null) { - long l = leading; - int precision = 0; - while (l > 0) { - precision++; - l /= 10; - } - if (precision == 0) { - precision = 1; - } - this.type = type = new TypeInfo(valueType, precision, 0, - getDisplaySize(valueType, MAXIMUM_PRECISION, MAXIMUM_SCALE), null); - } - return type; + return TypeInfo.getTypeInfo(valueType); } @Override @@ -177,50 +181,61 @@ public int getMemory() { return 48; } - @Override - public Value convertScale(boolean onlyToSmallerScale, int targetScale) { - if (targetScale >= MAXIMUM_SCALE) { - return this; - } - if (targetScale < 0) { - throw DbException.getInvalidValueException("scale", targetScale); - } - IntervalQualifier qualifier = getQualifier(); - if (!qualifier.hasSeconds()) { - return this; - } - long r = DateTimeUtils.convertScale(remaining, targetScale); - if (r == remaining) { - return this; - } - long l = leading; - switch (valueType) { - case INTERVAL_SECOND: - if (r >= NANOS_PER_SECOND) { - l++; - r -= NANOS_PER_SECOND; - } - break; - case INTERVAL_DAY_TO_SECOND: - if (r >= NANOS_PER_DAY) { - l++; - r -= NANOS_PER_DAY; + /** + * Check if the precision is smaller or equal than the given precision. + * + * @param prec + * the maximum precision + * @return true if the precision of this value is smaller or equal to the + * given precision + */ + boolean checkPrecision(long prec) { + if (prec < MAXIMUM_PRECISION) { + for (long l = leading, p = 1, precision = 0; l >= p; p *= 10) { + if (++precision > prec) { + return false; + } } - break; - case INTERVAL_HOUR_TO_SECOND: - if (r >= NANOS_PER_HOUR) { - l++; - r -= NANOS_PER_HOUR; + } + return true; + } + + ValueInterval setPrecisionAndScale(TypeInfo targetType, Object column) { + int targetScale = targetType.getScale(); + ValueInterval v = this; + convertScale: if (targetScale < ValueInterval.MAXIMUM_SCALE) { + long range; + switch (valueType) { + case INTERVAL_SECOND: + range = NANOS_PER_SECOND; + break; + case INTERVAL_DAY_TO_SECOND: + range = NANOS_PER_DAY; + break; + case INTERVAL_HOUR_TO_SECOND: + range = NANOS_PER_HOUR; + break; + case INTERVAL_MINUTE_TO_SECOND: + range = NANOS_PER_MINUTE; + break; + default: + break convertScale; } - break; - case INTERVAL_MINUTE_TO_SECOND: - if (r >= NANOS_PER_MINUTE) { - l++; - r -= NANOS_PER_MINUTE; + long l = leading; + long r = DateTimeUtils.convertScale(remaining, targetScale, + l == 999_999_999_999_999_999L ? range : Long.MAX_VALUE); + if (r != remaining) { + if (r >= range) { + l++; + r -= range; + } + v = ValueInterval.from(v.getQualifier(), v.isNegative(), l, r); } - break; } - return from(qualifier, negative, l, r); + if (!v.checkPrecision(targetType.getPrecision())) { + throw v.getValueTooLongException(targetType, column); + } + return v; } @Override @@ -230,7 +245,49 @@ public String getString() { } @Override - public Object getObject() { + public long getLong() { + long l = leading; + if (valueType >= INTERVAL_SECOND && remaining != 0L + && remaining >= MULTIPLIERS[valueType - INTERVAL_SECOND] >> 1) { + l++; + } + return negative ? -l : l; + } + + @Override + public BigDecimal getBigDecimal() { + if (valueType < INTERVAL_SECOND || remaining == 0L) { + return BigDecimal.valueOf(negative ? -leading : leading); + } + BigDecimal m = BigDecimal.valueOf(MULTIPLIERS[valueType - INTERVAL_SECOND]); + BigDecimal bd = BigDecimal.valueOf(leading) + .add(BigDecimal.valueOf(remaining).divide(m, m.precision(), RoundingMode.HALF_DOWN)) // + .stripTrailingZeros(); + return negative ? bd.negate() : bd; + } + + @Override + public float getFloat() { + if (valueType < INTERVAL_SECOND || remaining == 0L) { + return negative ? -leading : leading; + } + return getBigDecimal().floatValue(); + } + + @Override + public double getDouble() { + if (valueType < INTERVAL_SECOND || remaining == 0L) { + return negative ? -leading : leading; + } + return getBigDecimal().doubleValue(); + } + + /** + * Returns the interval. + * + * @return the interval + */ + public Interval getInterval() { return new Interval(getQualifier(), negative, leading, remaining); } @@ -272,11 +329,6 @@ public long getRemaining() { return remaining; } - @Override - public void set(PreparedStatement prep, int parameterIndex) throws SQLException { - prep.setString(parameterIndex, getString()); - } - @Override public int hashCode() { final int prime = 31; @@ -302,7 +354,7 @@ public boolean equals(Object obj) { } @Override - public int compareTypeSafe(Value v, CompareMode mode) { + public int compareTypeSafe(Value v, CompareMode mode, CastDataProvider provider) { ValueInterval other = (ValueInterval) v; if (negative != other.negative) { return negative ? -1 : 1; diff --git a/h2/src/main/org/h2/value/ValueJavaObject.java b/h2/src/main/org/h2/value/ValueJavaObject.java index d38ddc2ad1..9eb3a75d29 100644 --- a/h2/src/main/org/h2/value/ValueJavaObject.java +++ b/h2/src/main/org/h2/value/ValueJavaObject.java @@ -1,58 +1,47 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.sql.Types; - +import org.h2.api.ErrorCode; +import org.h2.engine.Constants; import org.h2.engine.SysProperties; -import org.h2.store.DataHandler; -import org.h2.util.Bits; -import org.h2.util.JdbcUtils; +import org.h2.message.DbException; +import org.h2.util.StringUtils; import org.h2.util.Utils; /** - * Implementation of the OBJECT data type. + * Implementation of the JAVA_OBJECT data type. */ -public class ValueJavaObject extends ValueBytes { +public final class ValueJavaObject extends ValueBytesBase { - private static final ValueJavaObject EMPTY = - new ValueJavaObject(Utils.EMPTY_BYTES, null); - private final DataHandler dataHandler; + private static final ValueJavaObject EMPTY = new ValueJavaObject(Utils.EMPTY_BYTES); - protected ValueJavaObject(byte[] v, DataHandler dataHandler) { + protected ValueJavaObject(byte[] v) { super(v); - this.dataHandler = dataHandler; + int length = value.length; + if (length > Constants.MAX_STRING_LENGTH) { + throw DbException.getValueTooLongException(getTypeName(getValueType()), + StringUtils.convertBytesToHex(value, 41), length); + } } /** * Get or create a java object value for the given byte array. * Do not clone the data. * - * @param javaObject the object * @param b the byte array - * @param dataHandler provides the object serializer * @return the value */ - public static ValueJavaObject getNoCopy(Object javaObject, byte[] b, - DataHandler dataHandler) { - if (b != null && b.length == 0) { + public static ValueJavaObject getNoCopy(byte[] b) { + int length = b.length; + if (length == 0) { return EMPTY; } - ValueJavaObject obj; - if (SysProperties.serializeJavaObject) { - if (b == null) { - b = JdbcUtils.serialize(javaObject, dataHandler); - } - obj = new ValueJavaObject(b, dataHandler); - } else { - obj = new NotSerialized(javaObject, b, dataHandler); - } - if (b == null || b.length > SysProperties.OBJECT_CACHE_MAX_PER_ELEMENT_SIZE) { + ValueJavaObject obj = new ValueJavaObject(b); + if (length > SysProperties.OBJECT_CACHE_MAX_PER_ELEMENT_SIZE) { return obj; } return (ValueJavaObject) Value.cache(obj); @@ -69,145 +58,16 @@ public int getValueType() { } @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - Object obj = JdbcUtils.deserialize(getBytesNoCopy(), getDataHandler()); - prep.setObject(parameterIndex, obj, Types.JAVA_OBJECT); - } - - /** - * Value which serializes java object only for I/O operations. - * Used when property {@link SysProperties#serializeJavaObject} is disabled. - * - * @author Sergi Vladykin - */ - private static class NotSerialized extends ValueJavaObject { - - private Object javaObject; - - NotSerialized(Object javaObject, byte[] v, DataHandler dataHandler) { - super(v, dataHandler); - this.javaObject = javaObject; - } - - @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - prep.setObject(parameterIndex, getObject(), Types.JAVA_OBJECT); - } - - @Override - public byte[] getBytesNoCopy() { - if (value == null) { - value = JdbcUtils.serialize(javaObject, null); - } - return value; - } - - @Override - public int compareTypeSafe(Value v, CompareMode mode) { - Object o1 = getObject(); - Object o2 = v.getObject(); - - boolean o1Comparable = o1 instanceof Comparable; - boolean o2Comparable = o2 instanceof Comparable; - - if (o1Comparable && o2Comparable && - Utils.haveCommonComparableSuperclass(o1.getClass(), o2.getClass())) { - @SuppressWarnings("unchecked") - Comparable c1 = (Comparable) o1; - return c1.compareTo(o2); - } - - // group by types - if (o1.getClass() != o2.getClass()) { - if (o1Comparable != o2Comparable) { - return o1Comparable ? -1 : 1; - } - return o1.getClass().getName().compareTo(o2.getClass().getName()); - } - - // compare hash codes - int h1 = hashCode(); - int h2 = v.hashCode(); - - if (h1 == h2) { - if (o1.equals(o2)) { - return 0; - } - return Bits.compareNotNullSigned(getBytesNoCopy(), v.getBytesNoCopy()); - } - - return h1 > h2 ? 1 : -1; - } - - @Override - public TypeInfo getType() { - TypeInfo type = this.type; - if (type == null) { - String string = getString(); - this.type = type = createType(string); - } - return type; - } - - private static TypeInfo createType(String string) { - return new TypeInfo(JAVA_OBJECT, 0, 0, string.length(), null); - } - - @Override - public String getString() { - String str = getObject().toString(); - if (type == null) { - type = createType(str); - } - return str; - } - - @Override - public int hashCode() { - if (hash == 0) { - hash = getObject().hashCode(); - } - return hash; - } - - @Override - public Object getObject() { - if (javaObject == null) { - javaObject = JdbcUtils.deserialize(value, getDataHandler()); - } - return javaObject; - } - - @Override - public int getMemory() { - if (value == null) { - return 40; - } - int mem = 40; - if (javaObject != null) { - mem *= 2; - } - return mem; - } - - @Override - public boolean equals(Object other) { - if (!(other instanceof NotSerialized)) { - return false; - } - return getObject().equals(((NotSerialized) other).getObject()); - } - - @Override - public Value convertPrecision(long precision, boolean force) { - return this; + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + if ((sqlFlags & NO_CASTS) == 0) { + return super.getSQL(builder.append("CAST("), DEFAULT_SQL_FLAGS).append(" AS JAVA_OBJECT)"); } + return super.getSQL(builder, DEFAULT_SQL_FLAGS); } @Override - protected DataHandler getDataHandler() { - return dataHandler; + public String getString() { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, "JAVA_OBJECT to CHARACTER VARYING"); } + } diff --git a/h2/src/main/org/h2/value/ValueJson.java b/h2/src/main/org/h2/value/ValueJson.java new file mode 100644 index 0000000000..aa0011a7ec --- /dev/null +++ b/h2/src/main/org/h2/value/ValueJson.java @@ -0,0 +1,243 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Lazarev Nikita + */ +package org.h2.value; + +import java.io.ByteArrayOutputStream; +import java.math.BigDecimal; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +import org.h2.api.ErrorCode; +import org.h2.engine.Constants; +import org.h2.message.DbException; +import org.h2.util.StringUtils; +import org.h2.util.json.JSONByteArrayTarget; +import org.h2.util.json.JSONBytesSource; +import org.h2.util.json.JSONItemType; +import org.h2.util.json.JSONStringSource; +import org.h2.util.json.JSONStringTarget; + +/** + * Implementation of the JSON data type. + */ +public final class ValueJson extends ValueBytesBase { + + private static final byte[] NULL_BYTES = "null".getBytes(StandardCharsets.ISO_8859_1), + TRUE_BYTES = "true".getBytes(StandardCharsets.ISO_8859_1), + FALSE_BYTES = "false".getBytes(StandardCharsets.ISO_8859_1); + + /** + * {@code null} JSON value. + */ + public static final ValueJson NULL = new ValueJson(NULL_BYTES); + + /** + * {@code true} JSON value. + */ + public static final ValueJson TRUE = new ValueJson(TRUE_BYTES); + + /** + * {@code false} JSON value. + */ + public static final ValueJson FALSE = new ValueJson(FALSE_BYTES); + + /** + * {@code 0} JSON value. + */ + public static final ValueJson ZERO = new ValueJson(new byte[] { '0' }); + + private ValueJson(byte[] value) { + super(value); + int length = value.length; + if (length > Constants.MAX_STRING_LENGTH) { + throw DbException.getValueTooLongException(getTypeName(getValueType()), + StringUtils.convertBytesToHex(value, 41), length); + } + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + String s = JSONBytesSource.parse(value, new JSONStringTarget(true)); + return builder.append("JSON '").append(s).append('\''); + } + + @Override + public TypeInfo getType() { + return TypeInfo.TYPE_JSON; + } + + @Override + public int getValueType() { + return Value.JSON; + } + + @Override + public String getString() { + return new String(value, StandardCharsets.UTF_8); + } + + /** + * Returns JSON item type. + * + * @return JSON item type + */ + public JSONItemType getItemType() { + switch (value[0]) { + case '[': + return JSONItemType.ARRAY; + case '{': + return JSONItemType.OBJECT; + default: + return JSONItemType.SCALAR; + } + } + + /** + * Returns JSON value with the specified content. + * + * @param s + * JSON representation, will be normalized + * @return JSON value + * @throws DbException + * on invalid JSON + */ + public static ValueJson fromJson(String s) { + byte[] bytes; + try { + bytes = JSONStringSource.normalize(s); + } catch (RuntimeException ex) { + if (s.length() > 80) { + s = new StringBuilder(83).append(s, 0, 80).append("...").toString(); + } + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, s); + } + return getInternal(bytes); + } + + /** + * Returns JSON value with the specified content. + * + * @param bytes + * JSON representation, will be normalized + * @return JSON value + * @throws DbException + * on invalid JSON + */ + public static ValueJson fromJson(byte[] bytes) { + try { + bytes = JSONBytesSource.normalize(bytes); + } catch (RuntimeException ex) { + StringBuilder builder = new StringBuilder().append("X'"); + if (bytes.length > 40) { + StringUtils.convertBytesToHex(builder, bytes, 40).append("..."); + } else { + StringUtils.convertBytesToHex(builder, bytes); + } + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, builder.append('\'').toString()); + } + return getInternal(bytes); + } + + /** + * Returns JSON value with the specified boolean content. + * + * @param bool + * boolean value + * @return JSON value + */ + public static ValueJson get(boolean bool) { + return bool ? TRUE : FALSE; + } + + /** + * Returns JSON value with the specified numeric content. + * + * @param number + * integer value + * @return JSON value + */ + public static ValueJson get(int number) { + return number != 0 ? getNumber(Integer.toString(number)) : ZERO; + } + + /** + * Returns JSON value with the specified numeric content. + * + * @param number + * long value + * @return JSON value + */ + public static ValueJson get(long number) { + return number != 0L ? getNumber(Long.toString(number)) : ZERO; + } + + /** + * Returns JSON value with the specified numeric content. + * + * @param number + * big decimal value + * @return JSON value + */ + public static ValueJson get(BigDecimal number) { + if (number.signum() == 0 && number.scale() == 0) { + return ZERO; + } + String s = number.toString(); + int index = s.indexOf('E'); + if (index >= 0 && s.charAt(++index) == '+') { + int length = s.length(); + s = new StringBuilder(length - 1).append(s, 0, index).append(s, index + 1, length).toString(); + } + return getNumber(s); + } + + /** + * Returns JSON value with the specified string content. + * + * @param string + * string value + * @return JSON value + */ + public static ValueJson get(String string) { + return new ValueJson(JSONByteArrayTarget.encodeString( // + new ByteArrayOutputStream(string.length() + 2), string).toByteArray()); + } + + /** + * Returns JSON value with the specified content. + * + * @param bytes + * normalized JSON representation + * @return JSON value + */ + public static ValueJson getInternal(byte[] bytes) { + int l = bytes.length; + switch (l) { + case 1: + if (bytes[0] == '0') { + return ZERO; + } + break; + case 4: + if (Arrays.equals(TRUE_BYTES, bytes)) { + return TRUE; + } else if (Arrays.equals(NULL_BYTES, bytes)) { + return NULL; + } + break; + case 5: + if (Arrays.equals(FALSE_BYTES, bytes)) { + return FALSE; + } + } + return new ValueJson(bytes); + } + + private static ValueJson getNumber(String s) { + return new ValueJson(s.getBytes(StandardCharsets.ISO_8859_1)); + } + +} diff --git a/h2/src/main/org/h2/value/ValueLob.java b/h2/src/main/org/h2/value/ValueLob.java index dcd5fd2024..3e1f91ad0f 100644 --- a/h2/src/main/org/h2/value/ValueLob.java +++ b/h2/src/main/org/h2/value/ValueLob.java @@ -1,41 +1,39 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, and the + * EPL 1.0 (https://h2database.com/html/license.html). Initial Developer: H2 + * Group */ package org.h2.value; -import java.io.BufferedInputStream; -import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.Reader; -import java.sql.PreparedStatement; -import java.sql.SQLException; + import org.h2.engine.Constants; -import org.h2.engine.Mode; import org.h2.engine.SysProperties; import org.h2.message.DbException; import org.h2.store.DataHandler; -import org.h2.store.FileStore; -import org.h2.store.FileStoreInputStream; +import org.h2.store.LobStorageFrontend; +import org.h2.store.LobStorageInterface; import org.h2.store.RangeInputStream; import org.h2.store.RangeReader; import org.h2.store.fs.FileUtils; -import org.h2.util.Bits; import org.h2.util.IOUtils; import org.h2.util.MathUtils; -import org.h2.util.SmallLRUCache; import org.h2.util.StringUtils; import org.h2.util.Utils; +import org.h2.value.lob.LobData; +import org.h2.value.lob.LobDataDatabase; +import org.h2.value.lob.LobDataInMemory; /** - * This is the legacy implementation of LOBs for PageStore databases where the - * LOB was stored in an external file. + * A implementation of the BINARY LARGE OBJECT and CHARACTER LARGE OBJECT data + * types. Small objects are kept in memory and stored in the record. Large + * objects are either stored in the database, or in temporary files. */ -public class ValueLob extends Value { +public abstract class ValueLob extends Value { - private static final int BLOCK_COMPARISON_SIZE = 512; + static final int BLOCK_COMPARISON_SIZE = 512; private static void rangeCheckUnknown(long zeroBasedOffset, long length) { if (zeroBasedOffset < 0) { @@ -55,7 +53,8 @@ private static void rangeCheckUnknown(long zeroBasedOffset, long length) { * @param dataSize the length of the input, in bytes * @return the smaller input stream */ - static InputStream rangeInputStream(InputStream inputStream, long oneBasedOffset, long length, long dataSize) { + protected static InputStream rangeInputStream(InputStream inputStream, long oneBasedOffset, long length, + long dataSize) { if (dataSize > 0) { rangeCheck(oneBasedOffset - 1, length, dataSize); } else { @@ -90,599 +89,209 @@ static Reader rangeReader(Reader reader, long oneBasedOffset, long length, long } } + private TypeInfo type; + + final LobData lobData; + /** - * Compares LOBs of the same type. - * - * @param v1 first LOB value - * @param v2 second LOB value - * @return result of comparison + * Length in bytes. */ - static int compare(Value v1, Value v2) { - int valueType = v1.getValueType(); - assert valueType == v2.getValueType(); - if (v1 instanceof ValueLobDb && v2 instanceof ValueLobDb) { - byte[] small1 = v1.getSmall(), small2 = v2.getSmall(); - if (small1 != null && small2 != null) { - if (valueType == Value.BLOB) { - return Bits.compareNotNullSigned(small1, small2); - } else { - return Integer.signum(v1.getString().compareTo(v2.getString())); - } - } - } - long minPrec = Math.min(v1.getType().getPrecision(), v2.getType().getPrecision()); - if (valueType == Value.BLOB) { - try (InputStream is1 = v1.getInputStream(); - InputStream is2 = v2.getInputStream()) { - byte[] buf1 = new byte[BLOCK_COMPARISON_SIZE]; - byte[] buf2 = new byte[BLOCK_COMPARISON_SIZE]; - for (; minPrec >= BLOCK_COMPARISON_SIZE; minPrec -= BLOCK_COMPARISON_SIZE) { - if (IOUtils.readFully(is1, buf1, BLOCK_COMPARISON_SIZE) != BLOCK_COMPARISON_SIZE - || IOUtils.readFully(is2, buf2, BLOCK_COMPARISON_SIZE) != BLOCK_COMPARISON_SIZE) { - throw DbException.getUnsupportedException("Invalid LOB"); - } - int cmp = Bits.compareNotNullSigned(buf1, buf2); - if (cmp != 0) { - return cmp; - } - } - for (;;) { - int c1 = is1.read(), c2 = is2.read(); - if (c1 < 0) { - return c2 < 0 ? 0 : -1; - } - if (c2 < 0) { - return 1; - } - if (c1 != c2) { - return Integer.compare(c1, c2); - } - } - } catch (IOException ex) { - throw DbException.convert(ex); - } - } else { - try (Reader reader1 = v1.getReader(); - Reader reader2 = v2.getReader()) { - char[] buf1 = new char[BLOCK_COMPARISON_SIZE]; - char[] buf2 = new char[BLOCK_COMPARISON_SIZE]; - for (; minPrec >= BLOCK_COMPARISON_SIZE; minPrec -= BLOCK_COMPARISON_SIZE) { - if (IOUtils.readFully(reader1, buf1, BLOCK_COMPARISON_SIZE) != BLOCK_COMPARISON_SIZE - || IOUtils.readFully(reader2, buf2, BLOCK_COMPARISON_SIZE) != BLOCK_COMPARISON_SIZE) { - throw DbException.getUnsupportedException("Invalid LOB"); - } - int cmp = Bits.compareNotNull(buf1, buf2); - if (cmp != 0) { - return cmp; - } - } - for (;;) { - int c1 = reader1.read(), c2 = reader2.read(); - if (c1 < 0) { - return c2 < 0 ? 0 : -1; - } - if (c2 < 0) { - return 1; - } - if (c1 != c2) { - return Integer.compare(c1, c2); - } - } - } catch (IOException ex) { - throw DbException.convert(ex); - } - } - } + long octetLength; /** - * This counter is used to calculate the next directory to store lobs. It is - * better than using a random number because less directories are created. + * Length in characters. */ - private static int dirCounter; + long charLength; /** - * either Value.BLOB or Value.CLOB + * Cache the hashCode because it can be expensive to compute. */ - private final int valueType; - private TypeInfo type; - private final long precision; - private final DataHandler handler; - private int tableId; - private final int objectId; - private String fileName; - private boolean linked; private int hash; - private final boolean compressed; - - private ValueLob(int type, DataHandler handler, String fileName, - int tableId, int objectId, boolean linked, long precision, - boolean compressed) { - this.valueType = type; - this.handler = handler; - this.fileName = fileName; - this.tableId = tableId; - this.objectId = objectId; - this.linked = linked; - this.precision = precision; - this.compressed = compressed; - } - private static String getFileName(DataHandler handler, int tableId, - int objectId) { - if (tableId == 0 && objectId == 0) { - DbException.throwInternalError("0 LOB"); - } - String table = tableId < 0 ? ".temp" : ".t" + tableId; - return getFileNamePrefix(handler.getDatabasePath(), objectId) + - table + Constants.SUFFIX_LOB_FILE; - } - - /** - * Create a LOB value with the given parameters. - * - * @param type the data type, either Value.BLOB or Value.CLOB - * @param handler the file handler - * @param tableId the table object id - * @param objectId the object id - * @param precision the precision (length in elements) - * @param compression if compression is used - * @return the value object - */ - public static ValueLob openLinked(int type, DataHandler handler, - int tableId, int objectId, long precision, boolean compression) { - String fileName = getFileName(handler, tableId, objectId); - return new ValueLob(type, handler, fileName, tableId, objectId, - true/* linked */, precision, compression); + ValueLob(LobData lobData, long octetLength, long charLength) { + this.lobData = lobData; + this.octetLength = octetLength; + this.charLength = charLength; } /** - * Create a LOB value with the given parameters. - * - * @param type the data type, either Value.BLOB or Value.CLOB - * @param handler the file handler - * @param tableId the table object id - * @param objectId the object id - * @param precision the precision (length in elements) - * @param compression if compression is used - * @param fileName the file name - * @return the value object + * Create file name for temporary LOB storage + * @param handler to get path from + * @return full path and name of the created file + * @throws IOException if file creation fails */ - public static ValueLob openUnlinked(int type, DataHandler handler, - int tableId, int objectId, long precision, boolean compression, - String fileName) { - return new ValueLob(type, handler, fileName, tableId, objectId, - false/* linked */, precision, compression); - } - - private static String getFileNamePrefix(String path, int objectId) { - String name; - int f = objectId % SysProperties.LOB_FILES_PER_DIRECTORY; - if (f > 0) { - name = SysProperties.FILE_SEPARATOR + objectId; - } else { - name = ""; - } - objectId /= SysProperties.LOB_FILES_PER_DIRECTORY; - while (objectId > 0) { - f = objectId % SysProperties.LOB_FILES_PER_DIRECTORY; - name = SysProperties.FILE_SEPARATOR + f + - Constants.SUFFIX_LOBS_DIRECTORY + name; - objectId /= SysProperties.LOB_FILES_PER_DIRECTORY; + static String createTempLobFileName(DataHandler handler) throws IOException { + String path = handler.getDatabasePath(); + if (path.isEmpty()) { + path = SysProperties.PREFIX_TEMP_FILE; } - name = FileUtils.toRealPath(path + - Constants.SUFFIX_LOBS_DIRECTORY + name); - return name; + return FileUtils.createTempFile(path, Constants.SUFFIX_TEMP_FILE, true); } - private static int getNewObjectId(DataHandler h) { - String path = h.getDatabasePath(); - if (path != null && path.isEmpty()) { - path = new File(Utils.getProperty("java.io.tmpdir", "."), - SysProperties.PREFIX_TEMP_FILE).getAbsolutePath(); - } - int newId = 0; - int lobsPerDir = SysProperties.LOB_FILES_PER_DIRECTORY; - while (true) { - String dir = getFileNamePrefix(path, newId); - String[] list = getFileList(h, dir); - int fileCount = 0; - boolean[] used = new boolean[lobsPerDir]; - for (String name : list) { - if (name.endsWith(Constants.SUFFIX_DB_FILE)) { - name = FileUtils.getName(name); - String n = name.substring(0, name.indexOf('.')); - int id; - try { - id = Integer.parseInt(n); - } catch (NumberFormatException e) { - id = -1; - } - if (id > 0) { - fileCount++; - used[id % lobsPerDir] = true; - } - } - } - int fileId = -1; - if (fileCount < lobsPerDir) { - for (int i = 1; i < lobsPerDir; i++) { - if (!used[i]) { - fileId = i; - break; - } - } - } - if (fileId > 0) { - newId += fileId; - invalidateFileList(h, dir); - break; - } - if (newId > Integer.MAX_VALUE / lobsPerDir) { - // this directory path is full: start from zero - newId = 0; - dirCounter = MathUtils.randomInt(lobsPerDir - 1) * lobsPerDir; - } else { - // calculate the directory. - // start with 1 (otherwise we don't know the number of - // directories). - // it doesn't really matter what directory is used, it might as - // well be random (but that would generate more directories): - // int dirId = RandomUtils.nextInt(lobsPerDir - 1) + 1; - int dirId = (dirCounter++ / (lobsPerDir - 1)) + 1; - newId = newId * lobsPerDir; - newId += dirId * lobsPerDir; - } + static int getBufferSize(DataHandler handler, long remaining) { + if (remaining < 0 || remaining > Integer.MAX_VALUE) { + remaining = Integer.MAX_VALUE; } - return newId; - } - - private static void invalidateFileList(DataHandler h, String dir) { - SmallLRUCache cache = h.getLobFileListCache(); - if (cache != null) { - synchronized (cache) { - cache.remove(dir); - } + int inplace = handler.getMaxLengthInplaceLob(); + long m = Constants.IO_BUFFER_SIZE; + if (m < remaining && m <= inplace) { + // using "1L" to force long arithmetic because + // inplace could be Integer.MAX_VALUE + m = Math.min(remaining, inplace + 1L); + // the buffer size must be bigger than the inplace lob, otherwise we + // can't know if it must be stored in-place or not + m = MathUtils.roundUpLong(m, Constants.IO_BUFFER_SIZE); } - } - - private static String[] getFileList(DataHandler h, String dir) { - SmallLRUCache cache = h.getLobFileListCache(); - String[] list; - if (cache == null) { - list = FileUtils.newDirectoryStream(dir).toArray(new String[0]); - } else { - synchronized (cache) { - list = cache.get(dir); - if (list == null) { - list = FileUtils.newDirectoryStream(dir).toArray(new String[0]); - cache.put(dir, list); - } - } + m = Math.min(remaining, m); + m = MathUtils.convertLongToInt(m); + if (m < 0) { + m = Integer.MAX_VALUE; } - return list; + return (int) m; } /** - * Convert a lob to another data type. The data is fully read in memory - * except when converting to BLOB or CLOB. + * Check if this value is linked to a specific table. For values that are + * kept fully in memory, this method returns false. * - * @param t the new type - * @param mode the database mode - * @param column the column (if any), used for to improve the error message if conversion fails - * @param extTypeInfo the extended data type information, or null - * @return the converted value + * @return true if it is */ - @Override - protected Value convertTo(int t, Mode mode, Object column, ExtTypeInfo extTypeInfo) { - if (t == valueType) { - return this; - } else if (t == Value.CLOB) { - return ValueLobDb.createTempClob(getReader(), -1, handler); - } else if (t == Value.BLOB) { - return ValueLobDb.createTempBlob(getInputStream(), -1, handler); - } - return super.convertTo(t, mode, column, null); - } - - @Override public boolean isLinkedToTable() { - return linked; + return lobData.isLinkedToTable(); } /** - * Get the current file name where the lob is saved. - * - * @return the file name or null + * Remove the underlying resource, if any. For values that are kept fully in + * memory this method has no effect. */ - public String getFileName() { - return fileName; - } - - @Override public void remove() { - deleteFile(handler, fileName); - } - - @Override - public Value copy(DataHandler h, int tabId) { - if (linked) { - ValueLob copy = new ValueLob(this.valueType, this.handler, this.fileName, - this.tableId, getNewObjectId(h), this.linked, this.precision, this.compressed); - copy.hash = this.hash; - copy.tableId = tabId; - String live = getFileName(h, copy.tableId, copy.objectId); - copyFileTo(h, fileName, live); - copy.fileName = live; - copy.linked = true; - return copy; - } - if (!linked) { - this.tableId = tabId; - String live = getFileName(h, tableId, objectId); - renameFile(h, fileName, live); - fileName = live; - linked = true; - } - return this; - } - - /** - * Get the current table id of this lob. - * - * @return the table id - */ - @Override - public int getTableId() { - return tableId; + lobData.remove(this); } /** - * Get the current object id of this lob. + * Copy a large value, to be used in the given table. For values that are + * kept fully in memory this method has no effect. * - * @return the object id + * @param database the data handler + * @param tableId the table where this object is used + * @return the new value or itself */ - public int getObjectId() { - return objectId; - } + public abstract ValueLob copy(DataHandler database, int tableId); @Override public TypeInfo getType() { TypeInfo type = this.type; if (type == null) { - this.type = type = new TypeInfo(valueType, precision, 0, MathUtils.convertLongToInt(precision), null); + int valueType = getValueType(); + this.type = type = new TypeInfo(valueType, valueType == CLOB ? charLength : octetLength, 0, null); } return type; } - @Override - public int getValueType() { - return valueType; + DbException getStringTooLong(long precision) { + return DbException.getValueTooLongException("CHARACTER VARYING", readString(81), precision); } - @Override - public String getString() { - int len = precision > Integer.MAX_VALUE || precision == 0 ? - Integer.MAX_VALUE : (int) precision; + String readString(int len) { try { - if (valueType == Value.CLOB) { - return IOUtils.readStringAndClose(getReader(), len); - } - byte[] buff = IOUtils.readBytesAndClose(getInputStream(), len); - return StringUtils.convertBytesToHex(buff); + return IOUtils.readStringAndClose(getReader(), len); } catch (IOException e) { - throw DbException.convertIOException(e, fileName); + throw DbException.convertIOException(e, toString()); } } @Override - public byte[] getBytes() { - if (valueType == CLOB) { - // convert hex to string - return super.getBytes(); - } - byte[] data = getBytesNoCopy(); - return Utils.cloneByteArray(data); + public Reader getReader() { + return IOUtils.getReader(getInputStream()); } @Override - public byte[] getBytesNoCopy() { - if (valueType == CLOB) { - // convert hex to string - return super.getBytesNoCopy(); - } - try { - return IOUtils.readBytesAndClose( - getInputStream(), Integer.MAX_VALUE); - } catch (IOException e) { - throw DbException.convertIOException(e, fileName); + public byte[] getBytes() { + if (lobData instanceof LobDataInMemory) { + return Utils.cloneByteArray(getSmall()); } + return getBytesInternal(); } @Override - public int hashCode() { - if (hash == 0) { - if (precision > 4096) { - // TODO: should calculate the hash code when saving, and store - // it in the database file - return (int) (precision ^ (precision >>> 32)); - } - if (valueType == CLOB) { - hash = getString().hashCode(); - } else { - hash = Utils.getByteArrayHash(getBytes()); - } + public byte[] getBytesNoCopy() { + if (lobData instanceof LobDataInMemory) { + return getSmall(); } - return hash; - } - - @Override - public int compareTypeSafe(Value v, CompareMode mode) { - return compare(this, v); + return getBytesInternal(); } - @Override - public Object getObject() { - if (valueType == Value.CLOB) { - return getReader(); + private byte[] getSmall() { + byte[] small = ((LobDataInMemory) lobData).getSmall(); + int p = small.length; + if (p > Constants.MAX_STRING_LENGTH) { + throw DbException.getValueTooLongException("BINARY VARYING", StringUtils.convertBytesToHex(small, 41), p); } - return getInputStream(); - } - - @Override - public Reader getReader() { - return IOUtils.getBufferedReader(getInputStream()); - } - - @Override - public Reader getReader(long oneBasedOffset, long length) { - return rangeReader(getReader(), oneBasedOffset, length, valueType == Value.CLOB ? precision : -1); + return small; } - @Override - public InputStream getInputStream() { - FileStore store = handler.openFile(fileName, "r", true); - boolean alwaysClose = SysProperties.lobCloseBetweenReads; - return new BufferedInputStream( - new FileStoreInputStream(store, handler, compressed, alwaysClose), - Constants.IO_BUFFER_SIZE); - } + abstract byte[] getBytesInternal(); - @Override - public InputStream getInputStream(long oneBasedOffset, long length) { - FileStore store = handler.openFile(fileName, "r", true); - boolean alwaysClose = SysProperties.lobCloseBetweenReads; - InputStream inputStream = new BufferedInputStream( - new FileStoreInputStream(store, handler, compressed, alwaysClose), - Constants.IO_BUFFER_SIZE); - return rangeInputStream(inputStream, oneBasedOffset, length, store.length()); + DbException getBinaryTooLong(long precision) { + return DbException.getValueTooLongException("BINARY VARYING", StringUtils.convertBytesToHex(readBytes(41)), + precision); } - @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - long p = precision; - if (p > Integer.MAX_VALUE || p <= 0) { - p = -1; - } - if (valueType == Value.BLOB) { - prep.setBinaryStream(parameterIndex, getInputStream(), (int) p); - } else { - prep.setCharacterStream(parameterIndex, getReader(), (int) p); + byte[] readBytes(int len) { + try { + return IOUtils.readBytesAndClose(getInputStream(), len); + } catch (IOException e) { + throw DbException.convertIOException(e, toString()); } } @Override - public StringBuilder getSQL(StringBuilder builder) { - if (valueType == Value.CLOB) { - StringUtils.quoteStringSQL(builder, getString()); - } else { - builder.append("X'"); - StringUtils.convertBytesToHex(builder, getBytes()).append('\''); + public int hashCode() { + if (hash == 0) { + int valueType = getValueType(); + long length = valueType == Value.CLOB ? charLength : octetLength; + if (length > 4096) { + // TODO: should calculate the hash code when saving, and store + // it in the database file + return (int) (length ^ (length >>> 32)); + } + hash = Utils.getByteArrayHash(getBytesNoCopy()); } - return builder; + return hash; } @Override - public String getTraceSQL() { - StringBuilder buff = new StringBuilder(); - if (valueType == Value.CLOB) { - buff.append("SPACE(").append(precision); - } else { - buff.append("CAST(REPEAT('00', ").append(precision).append(") AS BINARY"); - } - buff.append(" /* ").append(fileName).append(" */)"); - return buff.toString(); + public boolean equals(Object other) { + if (!(other instanceof ValueLob)) + return false; + ValueLob otherLob = (ValueLob) other; + if (hashCode() != otherLob.hashCode()) + return false; + return compareTypeSafe((Value) other, null, null) == 0; } - /** - * Get the data if this a small lob value. - * - * @return the data - */ @Override - public byte[] getSmall() { - return null; + public int getMemory() { + return lobData.getMemory(); } - @Override - public boolean equals(Object other) { - if (other instanceof ValueLob) { - ValueLob o = (ValueLob) other; - return valueType == o.valueType && compareTypeSafe(o, null) == 0; - } - return false; + public LobData getLobData() { + return lobData; } /** - * Check if this lob value is compressed. + * Create an independent copy of this value, that will be bound to a result. * - * @return true if it is + * @return the value (this for small objects) */ - public boolean isCompressed() { - return compressed; - } - - private static synchronized void deleteFile(DataHandler handler, - String fileName) { - // synchronize on the database, to avoid concurrent temp file creation / - // deletion / backup - synchronized (handler.getLobSyncObject()) { - FileUtils.delete(fileName); - } - } - - private static synchronized void renameFile(DataHandler handler, - String oldName, String newName) { - synchronized (handler.getLobSyncObject()) { - FileUtils.move(oldName, newName); - } - } - - private static void copyFileTo(DataHandler h, String sourceFileName, - String targetFileName) { - synchronized (h.getLobSyncObject()) { - try { - IOUtils.copyFiles(sourceFileName, targetFileName); - } catch (IOException e) { - throw DbException.convertIOException(e, null); + public ValueLob copyToResult() { + if (lobData instanceof LobDataDatabase) { + LobStorageInterface s = lobData.getDataHandler().getLobStorage(); + if (!s.isReadOnly()) { + return s.copyLob(this, LobStorageFrontend.TABLE_RESULT); } } - } - - @Override - public int getMemory() { - return 140; - } - - /** - * Create an independent copy of this temporary value. - * The file will not be deleted automatically. - * - * @return the value - */ - @Override - public ValueLobDb copyToTemp() { - ValueLobDb lob; - if (valueType == CLOB) { - lob = ValueLobDb.createTempClob(getReader(), precision, handler); - } else { - lob = ValueLobDb.createTempBlob(getInputStream(), precision, handler); - } - return lob; - } - - @Override - public Value convertPrecision(long precision, boolean force) { - if (this.precision <= precision) { - return this; - } - ValueLobDb lob; - if (valueType == CLOB) { - lob = ValueLobDb.createTempClob(getReader(), precision, handler); - } else { - lob = ValueLobDb.createTempBlob(getInputStream(), precision, handler); - } - return lob; + return this; } } diff --git a/h2/src/main/org/h2/value/ValueLobDb.java b/h2/src/main/org/h2/value/ValueLobDb.java deleted file mode 100644 index a38d412e36..0000000000 --- a/h2/src/main/org/h2/value/ValueLobDb.java +++ /dev/null @@ -1,751 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.value; - -import java.io.BufferedInputStream; -import java.io.BufferedReader; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.Reader; -import java.nio.charset.StandardCharsets; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import org.h2.engine.Constants; -import org.h2.engine.Mode; -import org.h2.engine.SysProperties; -import org.h2.message.DbException; -import org.h2.store.DataHandler; -import org.h2.store.FileStore; -import org.h2.store.FileStoreInputStream; -import org.h2.store.FileStoreOutputStream; -import org.h2.store.LobStorageFrontend; -import org.h2.store.LobStorageInterface; -import org.h2.store.RangeReader; -import org.h2.store.fs.FileUtils; -import org.h2.util.IOUtils; -import org.h2.util.MathUtils; -import org.h2.util.StringUtils; -import org.h2.util.Utils; - -/** - * A implementation of the BLOB and CLOB data types. - * - * Small objects are kept in memory and stored in the record. - * Large objects are either stored in the database, or in temporary files. - */ -public class ValueLobDb extends Value { - - /** - * the value type (Value.BLOB or CLOB) - */ - private final int valueType; - - private TypeInfo type; - /** - * If the LOB is managed by the one the LobStorageBackend classes, these are the - * unique key inside that storage. - */ - private final int tableId; - private final long lobId; - /** - * If this is a client-side ValueLobDb object returned by a ResultSet, the - * hmac acts a security cookie that the client can send back to the server - * to ask for data related to this LOB. - */ - private final byte[] hmac; - /** - * If the LOB is below the inline size, we just store/load it directly - * here. - */ - private final byte[] small; - private final DataHandler handler; - /** - * For a BLOB, precision is length in bytes. - * For a CLOB, precision is length in chars. - */ - private final long precision; - /** - * If the LOB is a temporary LOB being managed by a temporary ResultSet, - * it is stored in a temporary file. - */ - private final String fileName; - private final FileStore tempFile; - /** - * Cache the hashCode because it can be expensive to compute. - */ - private int hash; - - //Arbonaut: 13.07.2016 - // Fix for recovery tool. - - private boolean isRecoveryReference; - - private ValueLobDb(int type, DataHandler handler, int tableId, long lobId, - byte[] hmac, long precision) { - this.valueType = type; - this.handler = handler; - this.tableId = tableId; - this.lobId = lobId; - this.hmac = hmac; - this.precision = precision; - this.small = null; - this.fileName = null; - this.tempFile = null; - } - - private ValueLobDb(int type, byte[] small, long precision) { - this.valueType = type; - this.small = small; - this.precision = precision; - this.lobId = 0; - this.hmac = null; - this.handler = null; - this.fileName = null; - this.tempFile = null; - this.tableId = 0; - } - - /** - * Create a CLOB in a temporary file. - */ - private ValueLobDb(DataHandler handler, Reader in, long remaining) - throws IOException { - this.valueType = Value.CLOB; - this.handler = handler; - this.small = null; - this.lobId = 0; - this.hmac = null; - this.fileName = createTempLobFileName(handler); - this.tempFile = this.handler.openFile(fileName, "rw", false); - this.tempFile.autoDelete(); - - long tmpPrecision = 0; - try (FileStoreOutputStream out = new FileStoreOutputStream(tempFile, null, null)) { - char[] buff = new char[Constants.IO_BUFFER_SIZE]; - while (true) { - int len = getBufferSize(this.handler, false, remaining); - len = IOUtils.readFully(in, buff, len); - if (len == 0) { - break; - } - byte[] data = new String(buff, 0, len).getBytes(StandardCharsets.UTF_8); - out.write(data); - tmpPrecision += len; - } - } - this.precision = tmpPrecision; - this.tableId = 0; - } - - /** - * Create a BLOB in a temporary file. - */ - private ValueLobDb(DataHandler handler, byte[] buff, int len, InputStream in, - long remaining) throws IOException { - this.valueType = Value.BLOB; - this.handler = handler; - this.small = null; - this.lobId = 0; - this.hmac = null; - this.fileName = createTempLobFileName(handler); - this.tempFile = this.handler.openFile(fileName, "rw", false); - this.tempFile.autoDelete(); - long tmpPrecision = 0; - boolean compress = this.handler.getLobCompressionAlgorithm(Value.BLOB) != null; - try (FileStoreOutputStream out = new FileStoreOutputStream(tempFile, null, null)) { - while (true) { - tmpPrecision += len; - out.write(buff, 0, len); - remaining -= len; - if (remaining <= 0) { - break; - } - len = getBufferSize(this.handler, compress, remaining); - len = IOUtils.readFully(in, buff, len); - if (len <= 0) { - break; - } - } - } - this.precision = tmpPrecision; - this.tableId = 0; - } - - private static String createTempLobFileName(DataHandler handler) - throws IOException { - String path = handler.getDatabasePath(); - if (path.isEmpty()) { - path = SysProperties.PREFIX_TEMP_FILE; - } - return FileUtils.createTempFile(path, Constants.SUFFIX_TEMP_FILE, true); - } - - /** - * Create a LOB value. - * - * @param type the type (Value.BLOB or CLOB) - * @param handler the data handler - * @param tableId the table id - * @param id the lob id - * @param hmac the message authentication code - * @param precision the precision (number of bytes / characters) - * @return the value - */ - public static ValueLobDb create(int type, DataHandler handler, - int tableId, long id, byte[] hmac, long precision) { - return new ValueLobDb(type, handler, tableId, id, hmac, precision); - } - - /** - * Convert a lob to another data type. The data is fully read in memory - * except when converting to BLOB or CLOB. - * - * @param t the new type - * @param mode the mode - * @param column the column (if any), used for to improve the error message if conversion fails - * @param extTypeInfo the extended data type information, or null - * @return the converted value - */ - @Override - protected Value convertTo(int t, Mode mode, Object column, ExtTypeInfo extTypeInfo) { - if (t == valueType) { - return this; - } else if (t == Value.CLOB) { - if (handler != null) { - return handler.getLobStorage(). - createClob(getReader(), -1); - } else if (small != null) { - return ValueLobDb.createSmallLob(t, small); - } - } else if (t == Value.BLOB) { - if (handler != null) { - return handler.getLobStorage(). - createBlob(getInputStream(), -1); - } else if (small != null) { - return ValueLobDb.createSmallLob(t, small); - } - } - return super.convertTo(t, mode, column, null); - } - - @Override - public boolean isLinkedToTable() { - return small == null && - tableId >= 0; - } - - public boolean isStored() { - return small == null && fileName == null; - } - - @Override - public void remove() { - if (fileName != null) { - if (tempFile != null) { - tempFile.stopAutoDelete(); - } - // synchronize on the database, to avoid concurrent temp file - // creation / deletion / backup - synchronized (handler.getLobSyncObject()) { - FileUtils.delete(fileName); - } - } - if (handler != null) { - handler.getLobStorage().removeLob(this); - } - } - - @Override - public Value copy(DataHandler database, int tableId) { - if (small == null) { - return handler.getLobStorage().copyLob(this, tableId, precision); - } else if (small.length > database.getMaxLengthInplaceLob()) { - LobStorageInterface s = database.getLobStorage(); - Value v; - if (valueType == Value.BLOB) { - v = s.createBlob(getInputStream(), precision); - } else { - v = s.createClob(getReader(), precision); - } - Value v2 = v.copy(database, tableId); - v.remove(); - return v2; - } - return this; - } - - /** - * Get the current table id of this lob. - * - * @return the table id - */ - @Override - public int getTableId() { - return tableId; - } - - @Override - public TypeInfo getType() { - TypeInfo type = this.type; - if (type == null) { - this.type = type = new TypeInfo(valueType, precision, 0, MathUtils.convertLongToInt(precision), null); - } - return type; - } - - @Override - public int getValueType() { - return valueType; - } - - @Override - public String getString() { - int len = precision > Integer.MAX_VALUE || precision == 0 ? - Integer.MAX_VALUE : (int) precision; - try { - if (valueType == Value.CLOB) { - if (small != null) { - return new String(small, StandardCharsets.UTF_8); - } - return IOUtils.readStringAndClose(getReader(), len); - } - byte[] buff; - if (small != null) { - buff = small; - } else { - buff = IOUtils.readBytesAndClose(getInputStream(), len); - } - return StringUtils.convertBytesToHex(buff); - } catch (IOException e) { - throw DbException.convertIOException(e, toString()); - } - } - - @Override - public byte[] getBytes() { - if (valueType == CLOB) { - // convert hex to string - return super.getBytes(); - } - if (small != null) { - return Utils.cloneByteArray(small); - } - try { - return IOUtils.readBytesAndClose(getInputStream(), Integer.MAX_VALUE); - } catch (IOException e) { - throw DbException.convertIOException(e, toString()); - } - } - - @Override - public byte[] getBytesNoCopy() { - if (valueType == CLOB) { - // convert hex to string - return super.getBytesNoCopy(); - } - if (small != null) { - return small; - } - try { - return IOUtils.readBytesAndClose(getInputStream(), Integer.MAX_VALUE); - } catch (IOException e) { - throw DbException.convertIOException(e, toString()); - } - } - - @Override - public int hashCode() { - if (hash == 0) { - if (precision > 4096) { - // TODO: should calculate the hash code when saving, and store - // it in the database file - return (int) (precision ^ (precision >>> 32)); - } - if (valueType == CLOB) { - hash = getString().hashCode(); - } else { - if (small != null) { - hash = Utils.getByteArrayHash(small); - } else { - hash = Utils.getByteArrayHash(getBytes()); - } - } - } - return hash; - } - - @Override - public int compareTypeSafe(Value v, CompareMode mode) { - if (v instanceof ValueLobDb) { - ValueLobDb v2 = (ValueLobDb) v; - if (v == this) { - return 0; - } - if (lobId == v2.lobId && small == null && v2.small == null) { - return 0; - } - } - return ValueLob.compare(this, v); - } - - @Override - public Object getObject() { - if (valueType == Value.CLOB) { - return getReader(); - } - return getInputStream(); - } - - @Override - public Reader getReader() { - return IOUtils.getBufferedReader(getInputStream()); - } - - @Override - public Reader getReader(long oneBasedOffset, long length) { - return ValueLob.rangeReader(getReader(), oneBasedOffset, length, valueType == Value.CLOB ? precision : -1); - } - - @Override - public InputStream getInputStream() { - if (small != null) { - return new ByteArrayInputStream(small); - } else if (fileName != null) { - FileStore store = handler.openFile(fileName, "r", true); - boolean alwaysClose = SysProperties.lobCloseBetweenReads; - return new BufferedInputStream(new FileStoreInputStream(store, - handler, false, alwaysClose), Constants.IO_BUFFER_SIZE); - } - long byteCount = (valueType == Value.BLOB) ? precision : -1; - try { - return handler.getLobStorage().getInputStream(this, hmac, byteCount); - } catch (IOException e) { - throw DbException.convertIOException(e, toString()); - } - } - - @Override - public InputStream getInputStream(long oneBasedOffset, long length) { - long byteCount; - InputStream inputStream; - if (small != null) { - return super.getInputStream(oneBasedOffset, length); - } else if (fileName != null) { - FileStore store = handler.openFile(fileName, "r", true); - boolean alwaysClose = SysProperties.lobCloseBetweenReads; - byteCount = store.length(); - inputStream = new BufferedInputStream(new FileStoreInputStream(store, - handler, false, alwaysClose), Constants.IO_BUFFER_SIZE); - } else { - byteCount = (valueType == Value.BLOB) ? precision : -1; - try { - inputStream = handler.getLobStorage().getInputStream(this, hmac, byteCount); - } catch (IOException e) { - throw DbException.convertIOException(e, toString()); - } - } - return ValueLob.rangeInputStream(inputStream, oneBasedOffset, length, byteCount); - } - - @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - long p = precision; - if (p > Integer.MAX_VALUE || p <= 0) { - p = -1; - } - if (valueType == Value.BLOB) { - prep.setBinaryStream(parameterIndex, getInputStream(), (int) p); - } else { - prep.setCharacterStream(parameterIndex, getReader(), (int) p); - } - } - - @Override - public StringBuilder getSQL(StringBuilder builder) { - if (valueType == Value.CLOB) { - StringUtils.quoteStringSQL(builder, getString()); - } else { - builder.append("X'"); - StringUtils.convertBytesToHex(builder, getBytes()).append('\''); - } - return builder; - } - - @Override - public String getTraceSQL() { - if (small != null && precision <= SysProperties.MAX_TRACE_DATA_LENGTH) { - return getSQL(); - } - StringBuilder buff = new StringBuilder(); - if (valueType == Value.CLOB) { - buff.append("SPACE(").append(precision); - } else { - buff.append("CAST(REPEAT('00', ").append(precision).append(") AS BINARY"); - } - buff.append(" /* table: ").append(tableId).append(" id: ") - .append(lobId).append(" */)"); - return buff.toString(); - } - - /** - * Get the data if this a small lob value. - * - * @return the data - */ - @Override - public byte[] getSmall() { - return small; - } - - @Override - public boolean equals(Object other) { - if (!(other instanceof ValueLobDb)) - return false; - ValueLobDb otherLob = (ValueLobDb) other; - if (hashCode() != otherLob.hashCode()) - return false; - return compareTypeSafe((Value) other, null) == 0; - } - - @Override - public int getMemory() { - if (small != null) { - /* - * Java 11 with -XX:-UseCompressedOops - * 0 bytes: 120 bytes - * 1 byte: 128 bytes - */ - return small.length + 127; - } - return 140; - } - - /** - * Create an independent copy of this temporary value. - * The file will not be deleted automatically. - * - * @return the value - */ - @Override - public ValueLobDb copyToTemp() { - return this; - } - - /** - * Create an independent copy of this value, - * that will be bound to a result. - * - * @return the value (this for small objects) - */ - @Override - public ValueLobDb copyToResult() { - if (handler == null) { - return this; - } - LobStorageInterface s = handler.getLobStorage(); - if (s.isReadOnly()) { - return this; - } - return s.copyLob(this, LobStorageFrontend.TABLE_RESULT, precision); - } - - public long getLobId() { - return lobId; - } - - @Override - public String toString() { - return "lob: " + fileName + " table: " + tableId + " id: " + lobId; - } - - /** - * Create a temporary CLOB value from a stream. - * - * @param in the reader - * @param length the number of characters to read, or -1 for no limit - * @param handler the data handler - * @return the lob value - */ - public static ValueLobDb createTempClob(Reader in, long length, - DataHandler handler) { - if (length >= 0) { - // Otherwise BufferedReader may try to read more data than needed and that - // blocks the network level - try { - in = new RangeReader(in, 0, length); - } catch (IOException e) { - throw DbException.convert(e); - } - } - BufferedReader reader; - if (in instanceof BufferedReader) { - reader = (BufferedReader) in; - } else { - reader = new BufferedReader(in, Constants.IO_BUFFER_SIZE); - } - try { - boolean compress = handler.getLobCompressionAlgorithm(Value.CLOB) != null; - long remaining = Long.MAX_VALUE; - if (length >= 0 && length < remaining) { - remaining = length; - } - int len = getBufferSize(handler, compress, remaining); - char[] buff; - if (len >= Integer.MAX_VALUE) { - String data = IOUtils.readStringAndClose(reader, -1); - buff = data.toCharArray(); - len = buff.length; - } else { - buff = new char[len]; - reader.mark(len); - len = IOUtils.readFully(reader, buff, len); - } - if (len <= handler.getMaxLengthInplaceLob()) { - byte[] small = new String(buff, 0, len).getBytes(StandardCharsets.UTF_8); - return ValueLobDb.createSmallLob(Value.CLOB, small, len); - } - reader.reset(); - return new ValueLobDb(handler, reader, remaining); - } catch (IOException e) { - throw DbException.convertIOException(e, null); - } - } - - /** - * Create a temporary BLOB value from a stream. - * - * @param in the input stream - * @param length the number of characters to read, or -1 for no limit - * @param handler the data handler - * @return the lob value - */ - public static ValueLobDb createTempBlob(InputStream in, long length, - DataHandler handler) { - try { - long remaining = Long.MAX_VALUE; - boolean compress = handler.getLobCompressionAlgorithm(Value.BLOB) != null; - if (length >= 0 && length < remaining) { - remaining = length; - } - int len = getBufferSize(handler, compress, remaining); - byte[] buff; - if (len >= Integer.MAX_VALUE) { - buff = IOUtils.readBytesAndClose(in, -1); - len = buff.length; - } else { - buff = Utils.newBytes(len); - len = IOUtils.readFully(in, buff, len); - } - if (len <= handler.getMaxLengthInplaceLob()) { - byte[] small = Utils.copyBytes(buff, len); - return ValueLobDb.createSmallLob(Value.BLOB, small, small.length); - } - return new ValueLobDb(handler, buff, len, in, remaining); - } catch (IOException e) { - throw DbException.convertIOException(e, null); - } - } - - private static int getBufferSize(DataHandler handler, boolean compress, - long remaining) { - if (remaining < 0 || remaining > Integer.MAX_VALUE) { - remaining = Integer.MAX_VALUE; - } - int inplace = handler.getMaxLengthInplaceLob(); - long m = compress ? Constants.IO_BUFFER_SIZE_COMPRESS - : Constants.IO_BUFFER_SIZE; - if (m < remaining && m <= inplace) { - // using "1L" to force long arithmetic because - // inplace could be Integer.MAX_VALUE - m = Math.min(remaining, inplace + 1L); - // the buffer size must be bigger than the inplace lob, otherwise we - // can't know if it must be stored in-place or not - m = MathUtils.roundUpLong(m, Constants.IO_BUFFER_SIZE); - } - m = Math.min(remaining, m); - m = MathUtils.convertLongToInt(m); - if (m < 0) { - m = Integer.MAX_VALUE; - } - return (int) m; - } - - @Override - public Value convertPrecision(long precision, boolean force) { - if (this.precision <= precision) { - return this; - } - ValueLobDb lob; - if (valueType == CLOB) { - if (handler == null) { - try { - int p = MathUtils.convertLongToInt(precision); - String s = IOUtils.readStringAndClose(getReader(), p); - byte[] data = s.getBytes(StandardCharsets.UTF_8); - lob = ValueLobDb.createSmallLob(valueType, data, s.length()); - } catch (IOException e) { - throw DbException.convertIOException(e, null); - } - } else { - lob = ValueLobDb.createTempClob(getReader(), precision, handler); - } - } else { - if (handler == null) { - try { - int p = MathUtils.convertLongToInt(precision); - byte[] data = IOUtils.readBytesAndClose(getInputStream(), p); - lob = ValueLobDb.createSmallLob(valueType, data, data.length); - } catch (IOException e) { - throw DbException.convertIOException(e, null); - } - } else { - lob = ValueLobDb.createTempBlob(getInputStream(), precision, handler); - } - } - return lob; - } - - /** - * Create a LOB object that fits in memory. - * - * @param type the type (Value.BLOB or CLOB) - * @param small the byte array - * @return the LOB - */ - public static ValueLobDb createSmallLob(int type, byte[] small) { - int precision; - if (type == Value.CLOB) { - precision = new String(small, StandardCharsets.UTF_8).length(); - } else { - precision = small.length; - } - return createSmallLob(type, small, precision); - } - - /** - * Create a LOB object that fits in memory. - * - * @param type the type (Value.BLOB or CLOB) - * @param small the byte array - * @param precision the precision - * @return the LOB - */ - public static ValueLobDb createSmallLob(int type, byte[] small, - long precision) { - return new ValueLobDb(type, small, precision); - } - - - public void setRecoveryReference(boolean isRecoveryReference) { - this.isRecoveryReference = isRecoveryReference; - } - - public boolean isRecoveryReference() { - return isRecoveryReference; - } -} diff --git a/h2/src/main/org/h2/value/ValueNull.java b/h2/src/main/org/h2/value/ValueNull.java index 891f877511..f6cda3bb68 100644 --- a/h2/src/main/org/h2/value/ValueNull.java +++ b/h2/src/main/org/h2/value/ValueNull.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; @@ -8,20 +8,14 @@ import java.io.InputStream; import java.io.Reader; import java.math.BigDecimal; -import java.sql.Date; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.sql.Time; -import java.sql.Timestamp; -import java.sql.Types; - -import org.h2.engine.Mode; + +import org.h2.engine.CastDataProvider; import org.h2.message.DbException; /** * Implementation of NULL. NULL is not a regular data type. */ -public class ValueNull extends Value { +public final class ValueNull extends Value { /** * The main NULL instance. @@ -43,7 +37,7 @@ private ValueNull() { } @Override - public StringBuilder getSQL(StringBuilder builder) { + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { return builder.append("NULL"); } @@ -69,22 +63,12 @@ public String getString() { } @Override - public boolean getBoolean() { - return false; - } - - @Override - public Date getDate() { - return null; - } - - @Override - public Time getTime() { + public Reader getReader() { return null; } @Override - public Timestamp getTimestamp() { + public Reader getReader(long oneBasedOffset, long length) { return null; } @@ -94,58 +78,58 @@ public byte[] getBytes() { } @Override - public byte getByte() { - return 0; + public InputStream getInputStream() { + return null; } @Override - public short getShort() { - return 0; + public InputStream getInputStream(long oneBasedOffset, long length) { + return null; } @Override - public BigDecimal getBigDecimal() { - return null; + public boolean getBoolean() { + throw DbException.getInternalError(); } @Override - public double getDouble() { - return 0.0; + public byte getByte() { + throw DbException.getInternalError(); } @Override - public float getFloat() { - return 0.0F; + public short getShort() { + throw DbException.getInternalError(); } @Override public int getInt() { - return 0; + throw DbException.getInternalError(); } @Override public long getLong() { - return 0; + throw DbException.getInternalError(); } @Override - public InputStream getInputStream() { + public BigDecimal getBigDecimal() { return null; } @Override - public Reader getReader() { - return null; + public float getFloat() { + throw DbException.getInternalError(); } @Override - protected Value convertTo(int type, Mode mode, Object column, ExtTypeInfo extTypeInfo) { - return this; + public double getDouble() { + throw DbException.getInternalError(); } @Override - public int compareTypeSafe(Value v, CompareMode mode) { - throw DbException.throwInternalError("compare null"); + public int compareTypeSafe(Value v, CompareMode mode, CastDataProvider provider) { + throw DbException.getInternalError("compare null"); } @Override @@ -158,17 +142,6 @@ public int hashCode() { return 0; } - @Override - public Object getObject() { - return null; - } - - @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - prep.setNull(parameterIndex, Types.NULL); - } - @Override public boolean equals(Object other) { return other == this; diff --git a/h2/src/main/org/h2/value/ValueNumeric.java b/h2/src/main/org/h2/value/ValueNumeric.java new file mode 100644 index 0000000000..8a7a164093 --- /dev/null +++ b/h2/src/main/org/h2/value/ValueNumeric.java @@ -0,0 +1,218 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.math.RoundingMode; + +import org.h2.api.ErrorCode; +import org.h2.engine.CastDataProvider; +import org.h2.message.DbException; + +/** + * Implementation of the NUMERIC data type. + */ +public final class ValueNumeric extends ValueBigDecimalBase { + + /** + * The value 'zero'. + */ + public static final ValueNumeric ZERO = new ValueNumeric(BigDecimal.ZERO); + + /** + * The value 'one'. + */ + public static final ValueNumeric ONE = new ValueNumeric(BigDecimal.ONE); + + /** + * The default scale for a NUMERIC value. + */ + public static final int DEFAULT_SCALE = 0; + + /** + * The maximum scale. + */ + public static final int MAXIMUM_SCALE = 100_000; + + private ValueNumeric(BigDecimal value) { + super(value); + if (value == null) { + throw new IllegalArgumentException("null"); + } + int scale = value.scale(); + if (scale < 0 || scale > MAXIMUM_SCALE) { + throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), "0", "" + MAXIMUM_SCALE); + } + } + + @Override + public String getString() { + return value.toPlainString(); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + String s = getString(); + if ((sqlFlags & NO_CASTS) == 0 && s.indexOf('.') < 0 && value.compareTo(MAX_LONG_DECIMAL) <= 0 + && value.compareTo(MIN_LONG_DECIMAL) >= 0) { + return builder.append("CAST(").append(value).append(" AS NUMERIC(").append(value.precision()).append("))"); + } + return builder.append(s); + } + + @Override + public TypeInfo getType() { + TypeInfo type = this.type; + if (type == null) { + this.type = type = new TypeInfo(NUMERIC, value.precision(), value.scale(), null); + } + return type; + } + + @Override + public int getValueType() { + return NUMERIC; + } + + @Override + public Value add(Value v) { + return get(value.add(((ValueNumeric) v).value)); + } + + @Override + public Value subtract(Value v) { + return get(value.subtract(((ValueNumeric) v).value)); + } + + @Override + public Value negate() { + return get(value.negate()); + } + + @Override + public Value multiply(Value v) { + return get(value.multiply(((ValueNumeric) v).value)); + } + + @Override + public Value divide(Value v, TypeInfo quotientType) { + BigDecimal divisor = ((ValueNumeric) v).value; + if (divisor.signum() == 0) { + throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getTraceSQL()); + } + return get(value.divide(divisor, quotientType.getScale(), RoundingMode.HALF_DOWN)); + } + + @Override + public Value modulus(Value v) { + ValueBigDecimalBase dec = (ValueNumeric) v; + if (dec.value.signum() == 0) { + throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getTraceSQL()); + } + return get(value.remainder(dec.value)); + } + + @Override + public int compareTypeSafe(Value o, CompareMode mode, CastDataProvider provider) { + return value.compareTo(((ValueNumeric) o).value); + } + + @Override + public int getSignum() { + return value.signum(); + } + + @Override + public BigDecimal getBigDecimal() { + return value; + } + + @Override + public float getFloat() { + return value.floatValue(); + } + + @Override + public double getDouble() { + return value.doubleValue(); + } + + @Override + public int hashCode() { + return getClass().hashCode() * 31 + value.hashCode(); + } + + @Override + public boolean equals(Object other) { + return other instanceof ValueNumeric && value.equals(((ValueNumeric) other).value); + } + + @Override + public int getMemory() { + return value.precision() + 120; + } + + /** + * Get or create a NUMERIC value for the given big decimal. + * + * @param dec the big decimal + * @return the value + */ + public static ValueNumeric get(BigDecimal dec) { + if (BigDecimal.ZERO.equals(dec)) { + return ZERO; + } else if (BigDecimal.ONE.equals(dec)) { + return ONE; + } + return (ValueNumeric) Value.cache(new ValueNumeric(dec)); + } + + /** + * Get or create a NUMERIC value for the given big decimal with possibly + * negative scale. If scale is negative, it is normalized to 0. + * + * @param dec + * the big decimal + * @return the value + */ + public static ValueNumeric getAnyScale(BigDecimal dec) { + if (dec.scale() < 0) { + dec = dec.setScale(0, RoundingMode.UNNECESSARY); + } + return get(dec); + } + + /** + * Get or create a NUMERIC value for the given big integer. + * + * @param bigInteger the big integer + * @return the value + */ + public static ValueNumeric get(BigInteger bigInteger) { + if (bigInteger.signum() == 0) { + return ZERO; + } else if (BigInteger.ONE.equals(bigInteger)) { + return ONE; + } + return (ValueNumeric) Value.cache(new ValueNumeric(new BigDecimal(bigInteger))); + } + + /** + * Set the scale of a BigDecimal value. + * + * @param bd the BigDecimal value + * @param scale the new scale + * @return the scaled value + */ + public static BigDecimal setScale(BigDecimal bd, int scale) { + if (scale < 0 || scale > MAXIMUM_SCALE) { + throw DbException.getInvalidValueException("scale", scale); + } + return bd.setScale(scale, RoundingMode.HALF_UP); + } + +} diff --git a/h2/src/main/org/h2/value/ValueFloat.java b/h2/src/main/org/h2/value/ValueReal.java similarity index 51% rename from h2/src/main/org/h2/value/ValueFloat.java rename to h2/src/main/org/h2/value/ValueReal.java index 7ebc641fa1..3470fa7455 100644 --- a/h2/src/main/org/h2/value/ValueFloat.java +++ b/h2/src/main/org/h2/value/ValueReal.java @@ -1,28 +1,33 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; -import java.sql.PreparedStatement; -import java.sql.SQLException; +import java.math.BigDecimal; import org.h2.api.ErrorCode; +import org.h2.engine.CastDataProvider; import org.h2.message.DbException; /** * Implementation of the REAL data type. */ -public class ValueFloat extends Value { +public final class ValueReal extends Value { /** - * The precision in digits. + * The precision in bits. */ - static final int PRECISION = 7; + static final int PRECISION = 24; /** - * The maximum display size of a float. + * The approximate precision in decimal digits. + */ + static final int DECIMAL_PRECISION = 7; + + /** + * The maximum display size of a REAL. * Example: -1.12345676E-20 */ static final int DISPLAY_SIZE = 15; @@ -35,31 +40,29 @@ public class ValueFloat extends Value { /** * The value 0. */ - public static final ValueFloat ZERO = new ValueFloat(0f); + public static final ValueReal ZERO = new ValueReal(0f); /** * The value 1. */ - public static final ValueFloat ONE = new ValueFloat(1f); + public static final ValueReal ONE = new ValueReal(1f); - private static final ValueFloat NAN = new ValueFloat(Float.NaN); + private static final ValueReal NAN = new ValueReal(Float.NaN); private final float value; - private ValueFloat(float value) { + private ValueReal(float value) { this.value = value; } @Override public Value add(Value v) { - ValueFloat v2 = (ValueFloat) v; - return get(value + v2.value); + return get(value + ((ValueReal) v).value); } @Override public Value subtract(Value v) { - ValueFloat v2 = (ValueFloat) v; - return get(value - v2.value); + return get(value - ((ValueReal) v).value); } @Override @@ -69,60 +72,75 @@ public Value negate() { @Override public Value multiply(Value v) { - ValueFloat v2 = (ValueFloat) v; - return get(value * v2.value); + return get(value * ((ValueReal) v).value); } @Override - public Value divide(Value v) { - ValueFloat v2 = (ValueFloat) v; + public Value divide(Value v, TypeInfo quotientType) { + ValueReal v2 = (ValueReal) v; if (v2.value == 0.0) { - throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getSQL()); + throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getTraceSQL()); } return get(value / v2.value); } @Override public Value modulus(Value v) { - ValueFloat other = (ValueFloat) v; + ValueReal other = (ValueReal) v; if (other.value == 0) { - throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getSQL()); + throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getTraceSQL()); } return get(value % other.value); } @Override - public StringBuilder getSQL(StringBuilder builder) { + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + if ((sqlFlags & NO_CASTS) == 0) { + return getSQL(builder.append("CAST(")).append(" AS REAL)"); + } + return getSQL(builder); + } + + private StringBuilder getSQL(StringBuilder builder) { if (value == Float.POSITIVE_INFINITY) { - builder.append("POWER(0, -1)"); + return builder.append("'Infinity'"); } else if (value == Float.NEGATIVE_INFINITY) { - builder.append("(-POWER(0, -1))"); + return builder.append("'-Infinity'"); } else if (Float.isNaN(value)) { - builder.append("SQRT(-1)"); + return builder.append("'NaN'"); } else { - builder.append(value); + return builder.append(value); } - return builder; } @Override public TypeInfo getType() { - return TypeInfo.TYPE_FLOAT; + return TypeInfo.TYPE_REAL; } @Override public int getValueType() { - return FLOAT; + return REAL; } @Override - public int compareTypeSafe(Value o, CompareMode mode) { - return Float.compare(value, ((ValueFloat) o).value); + public int compareTypeSafe(Value o, CompareMode mode, CastDataProvider provider) { + return Float.compare(value, ((ValueReal) o).value); } @Override public int getSignum() { - return value == 0 ? 0 : (value < 0 ? -1 : 1); + return value == 0 || Float.isNaN(value) ? 0 : value < 0 ? -1 : 1; + } + + @Override + public BigDecimal getBigDecimal() { + if (Float.isFinite(value)) { + // better rounding behavior than BigDecimal.valueOf(f) + return new BigDecimal(Float.toString(value)); + } + // Infinite or NaN + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, Float.toString(value)); } @Override @@ -149,24 +167,13 @@ public int hashCode() { return Float.floatToRawIntBits(value); } - @Override - public Object getObject() { - return value; - } - - @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - prep.setFloat(parameterIndex, value); - } - /** - * Get or create float value for the given float. + * Get or create a REAL value for the given float. * * @param d the float * @return the value */ - public static ValueFloat get(float d) { + public static ValueReal get(float d) { if (d == 1.0F) { return ONE; } else if (d == 0.0F) { @@ -175,15 +182,15 @@ public static ValueFloat get(float d) { } else if (Float.isNaN(d)) { return NAN; } - return (ValueFloat) Value.cache(new ValueFloat(d)); + return (ValueReal) Value.cache(new ValueReal(d)); } @Override public boolean equals(Object other) { - if (!(other instanceof ValueFloat)) { + if (!(other instanceof ValueReal)) { return false; } - return compareTypeSafe((ValueFloat) other, null) == 0; + return compareTypeSafe((ValueReal) other, null, null) == 0; } } diff --git a/h2/src/main/org/h2/value/ValueResultSet.java b/h2/src/main/org/h2/value/ValueResultSet.java deleted file mode 100644 index 97309b57fa..0000000000 --- a/h2/src/main/org/h2/value/ValueResultSet.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.value; - -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.ResultSetMetaData; -import java.sql.SQLException; -import java.util.Arrays; -import org.h2.engine.SessionInterface; -import org.h2.message.DbException; -import org.h2.result.ResultInterface; -import org.h2.result.SimpleResult; - -/** - * Implementation of the RESULT_SET data type. - */ -public class ValueResultSet extends Value { - - private final SimpleResult result; - - private ValueResultSet(SimpleResult result) { - this.result = result; - } - - /** - * Create a result set value. - * - * @param result the result - * @return the value - */ - public static ValueResultSet get(SimpleResult result) { - return new ValueResultSet(result); - } - - /** - * Create a result set value for the given result set. The result set will - * be fully read in memory. The original result set is not closed. - * - * @param session the session - * @param rs the result set - * @param maxrows the maximum number of rows to read (0 to just read the - * meta data) - * @return the value - */ - public static ValueResultSet get(SessionInterface session, ResultSet rs, int maxrows) { - try { - ResultSetMetaData meta = rs.getMetaData(); - int columnCount = meta.getColumnCount(); - SimpleResult simple = new SimpleResult(); - for (int i = 0; i < columnCount; i++) { - String alias = meta.getColumnLabel(i + 1); - String name = meta.getColumnName(i + 1); - int columnType = DataType.convertSQLTypeToValueType(meta.getColumnType(i + 1), - meta.getColumnTypeName(i + 1)); - int precision = meta.getPrecision(i + 1); - int scale = meta.getScale(i + 1); - simple.addColumn(alias, name, columnType, precision, scale); - } - for (int i = 0; i < maxrows && rs.next(); i++) { - Value[] list = new Value[columnCount]; - for (int j = 0; j < columnCount; j++) { - list[j] = DataType.convertToValue(session, rs.getObject(j + 1), - simple.getColumnType(j).getValueType()); - } - simple.addRow(list); - } - return new ValueResultSet(simple); - } catch (SQLException e) { - throw DbException.convert(e); - } - } - - /** - * Create a result set value for the given result. The result will be fully - * read in memory. The original result is not closed. - * - * @param result result - * @param maxrows the maximum number of rows to read (0 to just read the - * meta data) - * @return the value - */ - public static ValueResultSet get(ResultInterface result, int maxrows) { - int columnCount = result.getVisibleColumnCount(); - SimpleResult simple = new SimpleResult(); - for (int i = 0; i < columnCount; i++) { - simple.addColumn(result.getAlias(i), result.getColumnName(i), result.getColumnType(i)); - } - result.reset(); - for (int i = 0; i < maxrows && result.next(); i++) { - simple.addRow(Arrays.copyOf(result.currentRow(), columnCount)); - } - return new ValueResultSet(simple); - } - - @Override - public TypeInfo getType() { - return TypeInfo.TYPE_RESULT_SET; - } - - @Override - public int getValueType() { - return RESULT_SET; - } - - @Override - public int getMemory() { - return result.getRowCount() * result.getVisibleColumnCount() * 32 + 400; - } - - @Override - public String getString() { - StringBuilder buff = new StringBuilder("("); - ResultInterface result = this.result.createShallowCopy(null); - int columnCount = result.getVisibleColumnCount(); - for (int i = 0; result.next(); i++) { - if (i > 0) { - buff.append(", "); - } - buff.append('('); - Value[] row = result.currentRow(); - for (int j = 0; j < columnCount; j++) { - if (j > 0) { - buff.append(", "); - } - buff.append(row[j].getString()); - } - buff.append(')'); - } - return buff.append(')').toString(); - } - - @Override - public int compareTypeSafe(Value v, CompareMode mode) { - return this == v ? 0 : getString().compareTo(v.getString()); - } - - @Override - public boolean equals(Object other) { - return other == this; - } - - @Override - public int hashCode() { - return System.identityHashCode(this); - } - - @Override - public Object getObject() { - return getString(); - } - - @Override - public ResultInterface getResult() { - return result.createShallowCopy(null); - } - - @Override - public void set(PreparedStatement prep, int parameterIndex) { - throw getUnsupportedExceptionForOperation("PreparedStatement.set"); - } - - @Override - public StringBuilder getSQL(StringBuilder builder) { - return builder; - } - - @Override - public Value convertPrecision(long precision, boolean force) { - if (!force) { - return this; - } - return ValueResultSet.get(new SimpleResult()); - } - -} diff --git a/h2/src/main/org/h2/value/ValueRow.java b/h2/src/main/org/h2/value/ValueRow.java index 4cdb537d4e..37095ee517 100644 --- a/h2/src/main/org/h2/value/ValueRow.java +++ b/h2/src/main/org/h2/value/ValueRow.java @@ -1,30 +1,40 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.util.Arrays; - import org.h2.api.ErrorCode; -import org.h2.engine.SysProperties; +import org.h2.engine.CastDataProvider; +import org.h2.engine.Constants; import org.h2.message.DbException; +import org.h2.result.SimpleResult; /** * Row value. */ -public class ValueRow extends ValueCollectionBase { +public final class ValueRow extends ValueCollectionBase { /** * Empty row. */ - private static final Object EMPTY = get(new Value[0]); + public static final ValueRow EMPTY = get(Value.EMPTY_VALUES); + + private TypeInfo type; - private ValueRow(Value[] list) { + private ValueRow(TypeInfo type, Value[] list) { super(list); + int degree = list.length; + if (degree > Constants.MAX_COLUMNS) { + throw DbException.get(ErrorCode.TOO_MANY_COLUMNS_1, "" + Constants.MAX_COLUMNS); + } + if (type != null) { + if (type.getValueType() != ROW || ((ExtTypeInfoRow) type.getExtTypeInfo()).getFields().size() != degree) { + throw DbException.getInternalError(); + } + this.type = type; + } } /** @@ -35,16 +45,40 @@ private ValueRow(Value[] list) { * @return the value */ public static ValueRow get(Value[] list) { - return new ValueRow(list); + return new ValueRow(null, list); + } + + /** + * Get or create a typed row value for the given value array. + * Do not clone the data. + * + * @param extTypeInfo the extended data type information + * @param list the value array + * @return the value + */ + public static ValueRow get(ExtTypeInfoRow extTypeInfo, Value[] list) { + return new ValueRow(new TypeInfo(ROW, -1, -1, extTypeInfo), list); } /** - * Returns empty row. + * Get or create a typed row value for the given value array. + * Do not clone the data. * - * @return empty row + * @param typeInfo the data type information + * @param list the value array + * @return the value */ - public static ValueRow getEmpty() { - return (ValueRow) EMPTY; + public static ValueRow get(TypeInfo typeInfo, Value[] list) { + return new ValueRow(typeInfo, list); + } + + @Override + public TypeInfo getType() { + TypeInfo type = this.type; + if (type == null) { + this.type = type = TypeInfo.getTypeInfo(Value.ROW, 0, 0, new ExtTypeInfoRow(values)); + } + return type; } @Override @@ -64,8 +98,18 @@ public String getString() { return builder.append(')').toString(); } + public SimpleResult getResult() { + SimpleResult result = new SimpleResult(); + for (int i = 0, l = values.length; i < l;) { + Value v = values[i++]; + result.addColumn("C" + i, v.getType()); + } + result.addRow(values); + return result; + } + @Override - public int compareTypeSafe(Value o, CompareMode mode) { + public int compareTypeSafe(Value o, CompareMode mode, CastDataProvider provider) { ValueRow v = (ValueRow) o; if (values == v.values) { return 0; @@ -77,7 +121,7 @@ public int compareTypeSafe(Value o, CompareMode mode) { for (int i = 0; i < len; i++) { Value v1 = values[i]; Value v2 = v.values[i]; - int comp = v1.compareTo(v2, /* TODO */ null, mode); + int comp = v1.compareTo(v2, provider, mode); if (comp != 0) { return comp; } @@ -86,54 +130,18 @@ public int compareTypeSafe(Value o, CompareMode mode) { } @Override - public Object getObject() { - int len = values.length; - Object[] list = new Object[len]; - for (int i = 0; i < len; i++) { - final Value value = values[i]; - if (!SysProperties.OLD_RESULT_SET_GET_OBJECT) { - final int type = value.getValueType(); - if (type == Value.BYTE || type == Value.SHORT) { - list[i] = value.getInt(); - continue; - } - } - list[i] = value.getObject(); - } - return list; - } - - @Override - public void set(PreparedStatement prep, int parameterIndex) throws SQLException { - throw getUnsupportedExceptionForOperation("PreparedStatement.set"); - } - - @Override - public StringBuilder getSQL(StringBuilder builder) { + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { builder.append("ROW ("); int length = values.length; for (int i = 0; i < length; i++) { if (i > 0) { builder.append(", "); } - values[i].getSQL(builder); + values[i].getSQL(builder, sqlFlags); } return builder.append(')'); } - @Override - public String getTraceSQL() { - StringBuilder builder = new StringBuilder("ROW ("); - for (int i = 0; i < values.length; i++) { - if (i > 0) { - builder.append(", "); - } - Value v = values[i]; - builder.append(v == null ? "null" : v.getTraceSQL()); - } - return builder.append(')').toString(); - } - @Override public boolean equals(Object other) { if (!(other instanceof ValueRow)) { @@ -155,33 +163,4 @@ public boolean equals(Object other) { return true; } - @Override - public Value convertPrecision(long precision, boolean force) { - if (!force) { - return this; - } - int length = values.length; - Value[] newValues = new Value[length]; - int i = 0; - boolean modified = false; - for (; i < length; i++) { - Value old = values[i]; - Value v = old.convertPrecision(precision, true); - if (v != old) { - modified = true; - } - // empty byte arrays or strings have precision 0 - // they count as precision 1 here - precision -= Math.max(1, v.getType().getPrecision()); - if (precision < 0) { - break; - } - newValues[i] = v; - } - if (i < length) { - return get(Arrays.copyOf(newValues, i)); - } - return modified ? get(newValues) : this; - } - } diff --git a/h2/src/main/org/h2/value/ValueShort.java b/h2/src/main/org/h2/value/ValueShort.java deleted file mode 100644 index 9bbf2700ef..0000000000 --- a/h2/src/main/org/h2/value/ValueShort.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.value; - -import java.sql.PreparedStatement; -import java.sql.SQLException; - -import org.h2.api.ErrorCode; -import org.h2.message.DbException; - -/** - * Implementation of the SMALLINT data type. - */ -public class ValueShort extends Value { - - /** - * The precision in digits. - */ - static final int PRECISION = 5; - - /** - * The maximum display size of a short. - * Example: -32768 - */ - static final int DISPLAY_SIZE = 6; - - private final short value; - - private ValueShort(short value) { - this.value = value; - } - - @Override - public Value add(Value v) { - ValueShort other = (ValueShort) v; - return checkRange(value + other.value); - } - - private static ValueShort checkRange(int x) { - if ((short) x != x) { - throw DbException.get(ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE_1, - Integer.toString(x)); - } - return ValueShort.get((short) x); - } - - @Override - public int getSignum() { - return Integer.signum(value); - } - - @Override - public Value negate() { - return checkRange(-(int) value); - } - - @Override - public Value subtract(Value v) { - ValueShort other = (ValueShort) v; - return checkRange(value - other.value); - } - - @Override - public Value multiply(Value v) { - ValueShort other = (ValueShort) v; - return checkRange(value * other.value); - } - - @Override - public Value divide(Value v) { - ValueShort other = (ValueShort) v; - if (other.value == 0) { - throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getSQL()); - } - return checkRange(value / other.value); - } - - @Override - public Value modulus(Value v) { - ValueShort other = (ValueShort) v; - if (other.value == 0) { - throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getSQL()); - } - return ValueShort.get((short) (value % other.value)); - } - - @Override - public StringBuilder getSQL(StringBuilder builder) { - return builder.append(value); - } - - @Override - public TypeInfo getType() { - return TypeInfo.TYPE_SHORT; - } - - @Override - public int getValueType() { - return SHORT; - } - - @Override - public short getShort() { - return value; - } - - @Override - public int getInt() { - return value; - } - - @Override - public int compareTypeSafe(Value o, CompareMode mode) { - return Integer.compare(value, ((ValueShort) o).value); - } - - @Override - public String getString() { - return Integer.toString(value); - } - - @Override - public int hashCode() { - return value; - } - - @Override - public Object getObject() { - return value; - } - - @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - prep.setShort(parameterIndex, value); - } - - /** - * Get or create a short value for the given short. - * - * @param i the short - * @return the value - */ - public static ValueShort get(short i) { - return (ValueShort) Value.cache(new ValueShort(i)); - } - - @Override - public boolean equals(Object other) { - return other instanceof ValueShort && value == ((ValueShort) other).value; - } - -} diff --git a/h2/src/main/org/h2/value/ValueSmallint.java b/h2/src/main/org/h2/value/ValueSmallint.java new file mode 100644 index 0000000000..f0608ad7c2 --- /dev/null +++ b/h2/src/main/org/h2/value/ValueSmallint.java @@ -0,0 +1,179 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +import java.math.BigDecimal; + +import org.h2.api.ErrorCode; +import org.h2.engine.CastDataProvider; +import org.h2.message.DbException; + +/** + * Implementation of the SMALLINT data type. + */ +public final class ValueSmallint extends Value { + + /** + * The precision in bits. + */ + static final int PRECISION = 16; + + /** + * The approximate precision in decimal digits. + */ + public static final int DECIMAL_PRECISION = 5; + + /** + * The maximum display size of a SMALLINT. + * Example: -32768 + */ + static final int DISPLAY_SIZE = 6; + + private final short value; + + private ValueSmallint(short value) { + this.value = value; + } + + @Override + public Value add(Value v) { + ValueSmallint other = (ValueSmallint) v; + return checkRange(value + other.value); + } + + private static ValueSmallint checkRange(int x) { + if ((short) x != x) { + throw DbException.get(ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE_1, + Integer.toString(x)); + } + return ValueSmallint.get((short) x); + } + + @Override + public int getSignum() { + return Integer.signum(value); + } + + @Override + public Value negate() { + return checkRange(-(int) value); + } + + @Override + public Value subtract(Value v) { + ValueSmallint other = (ValueSmallint) v; + return checkRange(value - other.value); + } + + @Override + public Value multiply(Value v) { + ValueSmallint other = (ValueSmallint) v; + return checkRange(value * other.value); + } + + @Override + public Value divide(Value v, TypeInfo quotientType) { + ValueSmallint other = (ValueSmallint) v; + if (other.value == 0) { + throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getTraceSQL()); + } + return checkRange(value / other.value); + } + + @Override + public Value modulus(Value v) { + ValueSmallint other = (ValueSmallint) v; + if (other.value == 0) { + throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getTraceSQL()); + } + return ValueSmallint.get((short) (value % other.value)); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + if ((sqlFlags & NO_CASTS) == 0) { + return builder.append("CAST(").append(value).append(" AS SMALLINT)"); + } + return builder.append(value); + } + + @Override + public TypeInfo getType() { + return TypeInfo.TYPE_SMALLINT; + } + + @Override + public int getValueType() { + return SMALLINT; + } + + @Override + public byte[] getBytes() { + short value = this.value; + return new byte[] { (byte) (value >> 8), (byte) value }; + } + + @Override + public short getShort() { + return value; + } + + @Override + public int getInt() { + return value; + } + + @Override + public long getLong() { + return value; + } + + @Override + public BigDecimal getBigDecimal() { + return BigDecimal.valueOf(value); + } + + @Override + public float getFloat() { + return value; + } + + @Override + public double getDouble() { + return value; + } + + @Override + public int compareTypeSafe(Value o, CompareMode mode, CastDataProvider provider) { + return Integer.compare(value, ((ValueSmallint) o).value); + } + + @Override + public String getString() { + return Integer.toString(value); + } + + @Override + public int hashCode() { + return value; + } + + /** + * Get or create a SMALLINT value for the given short. + * + * @param i the short + * @return the value + */ + public static ValueSmallint get(short i) { + return (ValueSmallint) Value.cache(new ValueSmallint(i)); + } + + @Override + public boolean equals(Object other) { + return other instanceof ValueSmallint && value == ((ValueSmallint) other).value; + } + +} diff --git a/h2/src/main/org/h2/value/ValueString.java b/h2/src/main/org/h2/value/ValueString.java deleted file mode 100644 index 7df092f146..0000000000 --- a/h2/src/main/org/h2/value/ValueString.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.value; - -import java.sql.PreparedStatement; -import java.sql.SQLException; - -import org.h2.engine.SysProperties; -import org.h2.util.MathUtils; -import org.h2.util.StringUtils; - -/** - * Implementation of the VARCHAR data type. - * It is also the base class for other ValueString* classes. - */ -public class ValueString extends Value { - - /** - * Empty string. Should not be used in places where empty string can be - * treated as {@code NULL} depending on database mode. - */ - public static final ValueString EMPTY = new ValueString(""); - - /** - * The string data. - */ - protected final String value; - - private TypeInfo type; - - protected ValueString(String value) { - this.value = value; - } - - @Override - public StringBuilder getSQL(StringBuilder builder) { - return StringUtils.quoteStringSQL(builder, value); - } - - @Override - public boolean equals(Object other) { - return other instanceof ValueString - && value.equals(((ValueString) other).value); - } - - @Override - public int compareTypeSafe(Value o, CompareMode mode) { - return mode.compareString(value, ((ValueString) o).value, false); - } - - @Override - public String getString() { - return value; - } - - @Override - public Object getObject() { - return value; - } - - @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - prep.setString(parameterIndex, value); - } - - @Override - public int getMemory() { - /* - * Java 11 with -XX:-UseCompressedOops - * Empty string: 88 bytes - * 1 to 4 UTF-16 chars: 96 bytes - */ - return value.length() * 2 + 94; - } - - @Override - public Value convertPrecision(long precision, boolean force) { - if (precision == 0 || value.length() <= precision) { - return this; - } - int p = MathUtils.convertLongToInt(precision); - return getNew(value.substring(0, p)); - } - - @Override - public int hashCode() { - // TODO hash performance: could build a quicker hash - // by hashing the size and a few characters - return value.hashCode(); - - // proposed code: -// private int hash = 0; -// -// public int hashCode() { -// int h = hash; -// if (h == 0) { -// String s = value; -// int l = s.length(); -// if (l > 0) { -// if (l < 16) -// h = s.hashCode(); -// else { -// h = l; -// for (int i = 1; i <= l; i <<= 1) -// h = 31 * -// (31 * h + s.charAt(i - 1)) + -// s.charAt(l - i); -// } -// hash = h; -// } -// } -// return h; -// } - - } - - @Override - public final TypeInfo getType() { - TypeInfo type = this.type; - if (type == null) { - int length = value.length(); - this.type = type = new TypeInfo(getValueType(), length, 0, length, null); - } - return type; - } - - @Override - public int getValueType() { - return STRING; - } - - /** - * Get or create a string value for the given string. - * - * @param s the string - * @return the value - */ - public static Value get(String s) { - return get(s, false); - } - - /** - * Get or create a string value for the given string. - * - * @param s the string - * @param treatEmptyStringsAsNull whether or not to treat empty strings as - * NULL - * @return the value - */ - public static Value get(String s, boolean treatEmptyStringsAsNull) { - if (s.isEmpty()) { - return treatEmptyStringsAsNull ? ValueNull.INSTANCE : EMPTY; - } - ValueString obj = new ValueString(StringUtils.cache(s)); - if (s.length() > SysProperties.OBJECT_CACHE_MAX_PER_ELEMENT_SIZE) { - return obj; - } - return Value.cache(obj); - // this saves memory, but is really slow - // return new ValueString(s.intern()); - } - - /** - * Create a new String value of the current class. - * This method is meant to be overridden by subclasses. - * - * @param s the string - * @return the value - */ - protected Value getNew(String s) { - return ValueString.get(s); - } - -} diff --git a/h2/src/main/org/h2/value/ValueStringBase.java b/h2/src/main/org/h2/value/ValueStringBase.java new file mode 100644 index 0000000000..7607a4cd95 --- /dev/null +++ b/h2/src/main/org/h2/value/ValueStringBase.java @@ -0,0 +1,188 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +import java.math.BigDecimal; +import java.nio.charset.StandardCharsets; + +import org.h2.api.ErrorCode; +import org.h2.engine.CastDataProvider; +import org.h2.engine.Constants; +import org.h2.message.DbException; + +/** + * Base implementation of String based data types. + */ +abstract class ValueStringBase extends Value { + + /** + * The value. + */ + String value; + + private TypeInfo type; + + ValueStringBase(String v) { + int length = v.length(); + if (length > Constants.MAX_STRING_LENGTH) { + throw DbException.getValueTooLongException(getTypeName(getValueType()), v, length); + } + this.value = v; + } + + @Override + public final TypeInfo getType() { + TypeInfo type = this.type; + if (type == null) { + int length = value.length(); + this.type = type = new TypeInfo(getValueType(), length, 0, null); + } + return type; + } + + @Override + public int compareTypeSafe(Value v, CompareMode mode, CastDataProvider provider) { + return mode.compareString(value, ((ValueStringBase) v).value, false); + } + + @Override + public int hashCode() { + // TODO hash performance: could build a quicker hash + // by hashing the size and a few characters + return getClass().hashCode() ^ value.hashCode(); + + // proposed code: +// private int hash = 0; +// +// public int hashCode() { +// int h = hash; +// if (h == 0) { +// String s = value; +// int l = s.length(); +// if (l > 0) { +// if (l < 16) +// h = s.hashCode(); +// else { +// h = l; +// for (int i = 1; i <= l; i <<= 1) +// h = 31 * +// (31 * h + s.charAt(i - 1)) + +// s.charAt(l - i); +// } +// hash = h; +// } +// } +// return h; +// } + } + + @Override + public final String getString() { + return value; + } + + @Override + public final byte[] getBytes() { + return value.getBytes(StandardCharsets.UTF_8); + } + + @Override + public final boolean getBoolean() { + String s = value.trim(); + if (s.equalsIgnoreCase("true") || s.equalsIgnoreCase("t") || s.equalsIgnoreCase("yes") + || s.equalsIgnoreCase("y")) { + return true; + } else if (s.equalsIgnoreCase("false") || s.equalsIgnoreCase("f") || s.equalsIgnoreCase("no") + || s.equalsIgnoreCase("n")) { + return false; + } + try { + // convert to a number, and if it is not 0 then it is true + return new BigDecimal(s).signum() != 0; + } catch (NumberFormatException e) { + throw getDataConversionError(BOOLEAN); + } + } + + @Override + public final byte getByte() { + try { + return Byte.parseByte(value.trim()); + } catch (NumberFormatException e) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, e, value); + } + } + + @Override + public final short getShort() { + try { + return Short.parseShort(value.trim()); + } catch (NumberFormatException e) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, e, value); + } + } + + @Override + public final int getInt() { + try { + return Integer.parseInt(value.trim()); + } catch (NumberFormatException e) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, e, value); + } + } + + @Override + public final long getLong() { + try { + return Long.parseLong(value.trim()); + } catch (NumberFormatException e) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, e, value); + } + } + + @Override + public final BigDecimal getBigDecimal() { + try { + return new BigDecimal(value.trim()); + } catch (NumberFormatException e) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, e, value); + } + } + + @Override + public final float getFloat() { + try { + return Float.parseFloat(value.trim()); + } catch (NumberFormatException e) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, e, value); + } + } + + @Override + public final double getDouble() { + try { + return Double.parseDouble(value.trim()); + } catch (NumberFormatException e) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, e, value); + } + } + + @Override + public final int getMemory() { + /* + * Java 11 with -XX:-UseCompressedOops + * Empty string: 88 bytes + * 1 to 4 UTF-16 chars: 96 bytes + */ + return value.length() * 2 + 94; + } + + @Override + public boolean equals(Object other) { + return other != null && getClass() == other.getClass() && value.equals(((ValueStringBase) other).value); + } + +} diff --git a/h2/src/main/org/h2/value/ValueStringFixed.java b/h2/src/main/org/h2/value/ValueStringFixed.java deleted file mode 100644 index c9a689bad8..0000000000 --- a/h2/src/main/org/h2/value/ValueStringFixed.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.value; - -import org.h2.engine.SysProperties; -import org.h2.util.StringUtils; - -/** - * Implementation of the CHAR data type. - */ -public class ValueStringFixed extends ValueString { - - private static final ValueStringFixed EMPTY = new ValueStringFixed(""); - - protected ValueStringFixed(String value) { - super(value); - } - - private static String trimRight(String s) { - return trimRight(s, 0); - } - - private static String trimRight(String s, int minLength) { - int endIndex = s.length() - 1; - int i = endIndex; - while (i >= minLength && s.charAt(i) == ' ') { - i--; - } - s = i == endIndex ? s : s.substring(0, i + 1); - return s; - } - - @Override - public int getValueType() { - return STRING_FIXED; - } - - /** - * Get or create a fixed length string value for the given string. - * Spaces at the end of the string will be removed. - * - * @param s the string - * @return the value - */ - public static ValueStringFixed get(String s) { - s = trimRight(s); - int length = s.length(); - if (length == 0) { - return EMPTY; - } - ValueStringFixed obj = new ValueStringFixed(StringUtils.cache(s)); - if (length > SysProperties.OBJECT_CACHE_MAX_PER_ELEMENT_SIZE) { - return obj; - } - return (ValueStringFixed) Value.cache(obj); - } - - @Override - protected ValueString getNew(String s) { - return ValueStringFixed.get(s); - } - -} diff --git a/h2/src/main/org/h2/value/ValueStringIgnoreCase.java b/h2/src/main/org/h2/value/ValueStringIgnoreCase.java deleted file mode 100644 index 1538b196a4..0000000000 --- a/h2/src/main/org/h2/value/ValueStringIgnoreCase.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.value; - -import org.h2.engine.SysProperties; -import org.h2.util.StringUtils; - -/** - * Implementation of the VARCHAR_IGNORECASE data type. - */ -public class ValueStringIgnoreCase extends ValueString { - - private static final ValueStringIgnoreCase EMPTY = - new ValueStringIgnoreCase(""); - private int hash; - - protected ValueStringIgnoreCase(String value) { - super(value); - } - - @Override - public int getValueType() { - return STRING_IGNORECASE; - } - - @Override - public int compareTypeSafe(Value o, CompareMode mode) { - return mode.compareString(value, ((ValueStringIgnoreCase) o).value, true); - } - - @Override - public boolean equals(Object other) { - return other instanceof ValueString - && value.equalsIgnoreCase(((ValueString) other).value); - } - - @Override - public int hashCode() { - if (hash == 0) { - // this is locale sensitive - hash = value.toUpperCase().hashCode(); - } - return hash; - } - - @Override - public StringBuilder getSQL(StringBuilder builder) { - builder.append("CAST("); - return StringUtils.quoteStringSQL(builder, value).append(" AS VARCHAR_IGNORECASE)"); - } - - /** - * Get or create a case insensitive string value for the given string. - * The value will have the same case as the passed string. - * - * @param s the string - * @return the value - */ - public static ValueStringIgnoreCase get(String s) { - int length = s.length(); - if (length == 0) { - return EMPTY; - } - ValueStringIgnoreCase obj = new ValueStringIgnoreCase(StringUtils.cache(s)); - if (length > SysProperties.OBJECT_CACHE_MAX_PER_ELEMENT_SIZE) { - return obj; - } - ValueStringIgnoreCase cache = (ValueStringIgnoreCase) Value.cache(obj); - // the cached object could have the wrong case - // (it would still be 'equal', but we don't like to store it) - if (cache.value.equals(s)) { - return cache; - } - return obj; - } - - @Override - protected ValueString getNew(String s) { - return ValueStringIgnoreCase.get(s); - } - -} diff --git a/h2/src/main/org/h2/value/ValueTime.java b/h2/src/main/org/h2/value/ValueTime.java index 2ee8d9342c..c4ac3a1881 100644 --- a/h2/src/main/org/h2/value/ValueTime.java +++ b/h2/src/main/org/h2/value/ValueTime.java @@ -1,21 +1,19 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.sql.Time; import org.h2.api.ErrorCode; +import org.h2.engine.CastDataProvider; import org.h2.message.DbException; import org.h2.util.DateTimeUtils; /** * Implementation of the TIME data type. */ -public class ValueTime extends Value { +public final class ValueTime extends Value { /** * The default precision and display size of the textual representation of a time. @@ -32,7 +30,7 @@ public class ValueTime extends Value { /** * The default scale for time. */ - static final int DEFAULT_SCALE = 0; + public static final int DEFAULT_SCALE = 0; /** * The maximum scale for time. @@ -59,36 +57,12 @@ private ValueTime(long nanos) { */ public static ValueTime fromNanos(long nanos) { if (nanos < 0L || nanos >= DateTimeUtils.NANOS_PER_DAY) { - StringBuilder builder = new StringBuilder(); - DateTimeUtils.appendTime(builder, nanos); - throw DbException.get(ErrorCode.INVALID_DATETIME_CONSTANT_2, - "TIME", builder.toString()); + throw DbException.get(ErrorCode.INVALID_DATETIME_CONSTANT_2, "TIME", + DateTimeUtils.appendTime(new StringBuilder(), nanos).toString()); } return (ValueTime) Value.cache(new ValueTime(nanos)); } - /** - * Get or create a time value for the given time. - * - * @param time the time - * @return the value - */ - public static ValueTime get(Time time) { - long ms = time.getTime(); - return fromNanos(DateTimeUtils.nanosFromLocalMillis(ms + DateTimeUtils.getTimeZoneOffset(ms))); - } - - /** - * Calculate the time value from a given time in - * milliseconds in UTC. - * - * @param ms the milliseconds - * @return the value - */ - public static ValueTime fromMillis(long ms) { - return fromNanos(DateTimeUtils.nanosFromLocalMillis(ms + DateTimeUtils.getTimeZoneOffset(ms))); - } - /** * Parse a string to a ValueTime. * @@ -111,11 +85,6 @@ public long getNanos() { return nanos; } - @Override - public Time getTime() { - return DateTimeUtils.convertNanoToTime(nanos); - } - @Override public TypeInfo getType() { return TypeInfo.TYPE_TIME; @@ -128,54 +97,22 @@ public int getValueType() { @Override public String getString() { - StringBuilder buff = new StringBuilder(MAXIMUM_PRECISION); - DateTimeUtils.appendTime(buff, nanos); - return buff.toString(); + return DateTimeUtils.appendTime(new StringBuilder(MAXIMUM_PRECISION), nanos).toString(); } @Override - public StringBuilder getSQL(StringBuilder builder) { - builder.append("TIME '"); - DateTimeUtils.appendTime(builder, nanos); - return builder.append('\''); + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return DateTimeUtils.appendTime(builder.append("TIME '"), nanos).append('\''); } @Override - public boolean checkPrecision(long precision) { - // TIME data type does not have precision parameter - return true; - } - - @Override - public Value convertScale(boolean onlyToSmallerScale, int targetScale) { - if (targetScale >= MAXIMUM_SCALE) { - return this; - } - if (targetScale < 0) { - throw DbException.getInvalidValueException("scale", targetScale); - } - long n = nanos; - long n2 = DateTimeUtils.convertScale(n, targetScale); - if (n2 == n) { - return this; - } - if (n2 >= DateTimeUtils.NANOS_PER_DAY) { - n2 = DateTimeUtils.NANOS_PER_DAY - 1; - } - return fromNanos(n2); - } - - @Override - public int compareTypeSafe(Value o, CompareMode mode) { + public int compareTypeSafe(Value o, CompareMode mode, CastDataProvider provider) { return Long.compare(nanos, ((ValueTime) o).nanos); } @Override public boolean equals(Object other) { - if (this == other) { - return true; - } - return other instanceof ValueTime && nanos == (((ValueTime) other).nanos); + return this == other || other instanceof ValueTime && nanos == (((ValueTime) other).nanos); } @Override @@ -183,26 +120,15 @@ public int hashCode() { return (int) (nanos ^ (nanos >>> 32)); } - @Override - public Object getObject() { - return getTime(); - } - - @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - prep.setTime(parameterIndex, getTime()); - } - @Override public Value add(Value v) { - ValueTime t = (ValueTime) v.convertTo(Value.TIME); + ValueTime t = (ValueTime) v; return ValueTime.fromNanos(nanos + t.getNanos()); } @Override public Value subtract(Value v) { - ValueTime t = (ValueTime) v.convertTo(Value.TIME); + ValueTime t = (ValueTime) v; return ValueTime.fromNanos(nanos - t.getNanos()); } @@ -212,7 +138,7 @@ public Value multiply(Value v) { } @Override - public Value divide(Value v) { + public Value divide(Value v, TypeInfo quotientType) { return ValueTime.fromNanos((long) (nanos / v.getDouble())); } diff --git a/h2/src/main/org/h2/value/ValueTimeTimeZone.java b/h2/src/main/org/h2/value/ValueTimeTimeZone.java new file mode 100644 index 0000000000..57248487f2 --- /dev/null +++ b/h2/src/main/org/h2/value/ValueTimeTimeZone.java @@ -0,0 +1,158 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +import org.h2.api.ErrorCode; +import org.h2.engine.CastDataProvider; +import org.h2.message.DbException; +import org.h2.util.DateTimeUtils; + +/** + * Implementation of the TIME WITH TIME ZONE data type. + */ +public final class ValueTimeTimeZone extends Value { + + /** + * The default precision and display size of the textual representation of a + * time. Example: 10:00:00+10:00 + */ + public static final int DEFAULT_PRECISION = 14; + + /** + * The maximum precision and display size of the textual representation of a + * time. Example: 10:00:00.123456789+10:00 + */ + public static final int MAXIMUM_PRECISION = 24; + + /** + * Nanoseconds since midnight + */ + private final long nanos; + + /** + * Time zone offset from UTC in seconds, range of -18 hours to +18 hours. + * This range is compatible with OffsetTime from JSR-310. + */ + private final int timeZoneOffsetSeconds; + + /** + * @param nanos + * nanoseconds since midnight + */ + private ValueTimeTimeZone(long nanos, int timeZoneOffsetSeconds) { + this.nanos = nanos; + this.timeZoneOffsetSeconds = timeZoneOffsetSeconds; + } + + /** + * Get or create a time value. + * + * @param nanos + * the nanoseconds since midnight + * @param timeZoneOffsetSeconds + * the timezone offset in seconds + * @return the value + */ + public static ValueTimeTimeZone fromNanos(long nanos, int timeZoneOffsetSeconds) { + if (nanos < 0L || nanos >= DateTimeUtils.NANOS_PER_DAY) { + throw DbException.get(ErrorCode.INVALID_DATETIME_CONSTANT_2, "TIME WITH TIME ZONE", + DateTimeUtils.appendTime(new StringBuilder(), nanos).toString()); + } + /* + * Some current and historic time zones have offsets larger than 12 + * hours. JSR-310 determines 18 hours as maximum possible offset in both + * directions, so we use this limit too for compatibility. + */ + if (timeZoneOffsetSeconds < (-18 * 60 * 60) || timeZoneOffsetSeconds > (18 * 60 * 60)) { + throw new IllegalArgumentException("timeZoneOffsetSeconds " + timeZoneOffsetSeconds); + } + return (ValueTimeTimeZone) Value.cache(new ValueTimeTimeZone(nanos, timeZoneOffsetSeconds)); + } + + /** + * Parse a string to a ValueTime. + * + * @param s + * the string to parse + * @return the time + */ + public static ValueTimeTimeZone parse(String s) { + try { + return DateTimeUtils.parseTimeWithTimeZone(s, null); + } catch (Exception e) { + throw DbException.get(ErrorCode.INVALID_DATETIME_CONSTANT_2, e, "TIME WITH TIME ZONE", s); + } + } + + /** + * @return nanoseconds since midnight + */ + public long getNanos() { + return nanos; + } + + /** + * The time zone offset in seconds. + * + * @return the offset + */ + public int getTimeZoneOffsetSeconds() { + return timeZoneOffsetSeconds; + } + + @Override + public TypeInfo getType() { + return TypeInfo.TYPE_TIME_TZ; + } + + @Override + public int getValueType() { + return TIME_TZ; + } + + @Override + public int getMemory() { + return 32; + } + + @Override + public String getString() { + return toString(new StringBuilder(MAXIMUM_PRECISION)).toString(); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return toString(builder.append("TIME WITH TIME ZONE '")).append('\''); + } + + private StringBuilder toString(StringBuilder builder) { + return DateTimeUtils.appendTimeZone(DateTimeUtils.appendTime(builder, nanos), timeZoneOffsetSeconds); + } + + @Override + public int compareTypeSafe(Value o, CompareMode mode, CastDataProvider provider) { + ValueTimeTimeZone t = (ValueTimeTimeZone) o; + return Long.compare(nanos - timeZoneOffsetSeconds * DateTimeUtils.NANOS_PER_SECOND, + t.nanos - t.timeZoneOffsetSeconds * DateTimeUtils.NANOS_PER_SECOND); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } else if (!(other instanceof ValueTimeTimeZone)) { + return false; + } + ValueTimeTimeZone t = (ValueTimeTimeZone) other; + return nanos == t.nanos && timeZoneOffsetSeconds == t.timeZoneOffsetSeconds; + } + + @Override + public int hashCode() { + return (int) (nanos ^ (nanos >>> 32) ^ timeZoneOffsetSeconds); + } + +} diff --git a/h2/src/main/org/h2/value/ValueTimestamp.java b/h2/src/main/org/h2/value/ValueTimestamp.java index 175b0fac3d..1f48d23092 100644 --- a/h2/src/main/org/h2/value/ValueTimestamp.java +++ b/h2/src/main/org/h2/value/ValueTimestamp.java @@ -1,22 +1,19 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.sql.Timestamp; import org.h2.api.ErrorCode; -import org.h2.engine.Mode; +import org.h2.engine.CastDataProvider; import org.h2.message.DbException; import org.h2.util.DateTimeUtils; /** * Implementation of the TIMESTAMP data type. */ -public class ValueTimestamp extends Value { +public final class ValueTimestamp extends Value { /** * The default precision and display size of the textual representation of a timestamp. @@ -33,7 +30,7 @@ public class ValueTimestamp extends Value { /** * The default scale for timestamps. */ - static final int DEFAULT_SCALE = 6; + public static final int DEFAULT_SCALE = 6; /** * The maximum scale for timestamps. @@ -51,10 +48,13 @@ public class ValueTimestamp extends Value { private final long timeNanos; private ValueTimestamp(long dateValue, long timeNanos) { - this.dateValue = dateValue; + if (dateValue < DateTimeUtils.MIN_DATE_VALUE || dateValue > DateTimeUtils.MAX_DATE_VALUE) { + throw new IllegalArgumentException("dateValue out of range " + dateValue); + } if (timeNanos < 0 || timeNanos >= DateTimeUtils.NANOS_PER_DAY) { throw new IllegalArgumentException("timeNanos out of range " + timeNanos); } + this.dateValue = dateValue; this.timeNanos = timeNanos; } @@ -71,74 +71,21 @@ public static ValueTimestamp fromDateValueAndNanos(long dateValue, long timeNano } /** - * Get or create a timestamp value for the given timestamp. - * - * @param timestamp the timestamp - * @return the value - */ - public static ValueTimestamp get(Timestamp timestamp) { - long ms = timestamp.getTime(); - long nanos = timestamp.getNanos() % 1_000_000; - ms += DateTimeUtils.getTimeZoneOffset(ms); - long dateValue = DateTimeUtils.dateValueFromLocalMillis(ms); - nanos += DateTimeUtils.nanosFromLocalMillis(ms); - return fromDateValueAndNanos(dateValue, nanos); - } - - /** - * Get or create a timestamp value for the given date/time in millis. - * - * @param ms the milliseconds - * @param nanos the nanoseconds - * @return the value - */ - public static ValueTimestamp fromMillisNanos(long ms, int nanos) { - ms += DateTimeUtils.getTimeZoneOffset(ms); - long dateValue = DateTimeUtils.dateValueFromLocalMillis(ms); - long timeNanos = nanos + DateTimeUtils.nanosFromLocalMillis(ms); - return fromDateValueAndNanos(dateValue, timeNanos); - } - - /** - * Get or create a timestamp value for the given date/time in millis. - * - * @param ms the milliseconds - * @return the value - */ - public static ValueTimestamp fromMillis(long ms) { - ms += DateTimeUtils.getTimeZoneOffset(ms); - long dateValue = DateTimeUtils.dateValueFromLocalMillis(ms); - long nanos = DateTimeUtils.nanosFromLocalMillis(ms); - return fromDateValueAndNanos(dateValue, nanos); - } - - /** - * Parse a string to a ValueTimestamp. This method supports the format - * +/-year-month-day hour[:.]minute[:.]seconds.fractional and an optional timezone - * part. - * - * @param s the string to parse - * @return the date - */ - public static ValueTimestamp parse(String s) { - return parse(s, null); - } - - /** - * Parse a string to a ValueTimestamp, using the given {@link Mode}. + * Parse a string to a ValueTimestamp, using the given {@link CastDataProvider}. * This method supports the format +/-year-month-day[ -]hour[:.]minute[:.]seconds.fractional * and an optional timezone part. * * @param s the string to parse - * @param mode the database {@link Mode} + * @param provider + * the cast information provider, may be {@code null} for + * literals without time zone * @return the date */ - public static ValueTimestamp parse(String s, Mode mode) { + public static ValueTimestamp parse(String s, CastDataProvider provider) { try { - return (ValueTimestamp) DateTimeUtils.parseTimestamp(s, mode, false); + return (ValueTimestamp) DateTimeUtils.parseTimestamp(s, provider, false); } catch (Exception e) { - throw DbException.get(ErrorCode.INVALID_DATETIME_CONSTANT_2, - e, "TIMESTAMP", s); + throw DbException.get(ErrorCode.INVALID_DATETIME_CONSTANT_2, e, "TIMESTAMP", s); } } @@ -161,11 +108,6 @@ public long getTimeNanos() { return timeNanos; } - @Override - public Timestamp getTimestamp() { - return DateTimeUtils.convertDateValueToTimestamp(dateValue, timeNanos); - } - @Override public TypeInfo getType() { return TypeInfo.TYPE_TIMESTAMP; @@ -183,51 +125,30 @@ public int getMemory() { @Override public String getString() { - StringBuilder buff = new StringBuilder(MAXIMUM_PRECISION); - DateTimeUtils.appendDate(buff, dateValue); - buff.append(' '); - DateTimeUtils.appendTime(buff, timeNanos); - return buff.toString(); + return toString(new StringBuilder(MAXIMUM_PRECISION), false).toString(); } - @Override - public StringBuilder getSQL(StringBuilder builder) { - builder.append("TIMESTAMP '"); - DateTimeUtils.appendDate(builder, dateValue); - builder.append(' '); - DateTimeUtils.appendTime(builder, timeNanos); - return builder.append('\''); + /** + * Returns value as string in ISO format. + * + * @return value as string in ISO format + */ + public String getISOString() { + return toString(new StringBuilder(MAXIMUM_PRECISION), true).toString(); } @Override - public boolean checkPrecision(long precision) { - // TIMESTAMP data type does not have precision parameter - return true; + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return toString(builder.append("TIMESTAMP '"), false).append('\''); } - @Override - public Value convertScale(boolean onlyToSmallerScale, int targetScale) { - if (targetScale >= MAXIMUM_SCALE) { - return this; - } - if (targetScale < 0) { - throw DbException.getInvalidValueException("scale", targetScale); - } - long n = timeNanos; - long n2 = DateTimeUtils.convertScale(n, targetScale); - if (n2 == n) { - return this; - } - long dv = dateValue; - if (n2 >= DateTimeUtils.NANOS_PER_DAY) { - n2 -= DateTimeUtils.NANOS_PER_DAY; - dv = DateTimeUtils.incrementDateValue(dv); - } - return fromDateValueAndNanos(dv, n2); + private StringBuilder toString(StringBuilder builder, boolean iso) { + DateTimeUtils.appendDate(builder, dateValue).append(iso ? 'T' : ' '); + return DateTimeUtils.appendTime(builder, timeNanos); } @Override - public int compareTypeSafe(Value o, CompareMode mode) { + public int compareTypeSafe(Value o, CompareMode mode, CastDataProvider provider) { ValueTimestamp t = (ValueTimestamp) o; int c = Long.compare(dateValue, t.dateValue); if (c != 0) { @@ -252,31 +173,30 @@ public int hashCode() { return (int) (dateValue ^ (dateValue >>> 32) ^ timeNanos ^ (timeNanos >>> 32)); } - @Override - public Object getObject() { - return getTimestamp(); - } - - @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - prep.setTimestamp(parameterIndex, getTimestamp()); - } - @Override public Value add(Value v) { - ValueTimestamp t = (ValueTimestamp) v.convertTo(Value.TIMESTAMP); - long d1 = DateTimeUtils.absoluteDayFromDateValue(dateValue); - long d2 = DateTimeUtils.absoluteDayFromDateValue(t.dateValue); - return DateTimeUtils.normalizeTimestamp(d1 + d2, timeNanos + t.timeNanos); + ValueTimestamp t = (ValueTimestamp) v; + long absoluteDay = DateTimeUtils.absoluteDayFromDateValue(dateValue) + + DateTimeUtils.absoluteDayFromDateValue(t.dateValue); + long nanos = timeNanos + t.timeNanos; + if (nanos >= DateTimeUtils.NANOS_PER_DAY) { + nanos -= DateTimeUtils.NANOS_PER_DAY; + absoluteDay++; + } + return ValueTimestamp.fromDateValueAndNanos(DateTimeUtils.dateValueFromAbsoluteDay(absoluteDay), nanos); } @Override public Value subtract(Value v) { - ValueTimestamp t = (ValueTimestamp) v.convertTo(Value.TIMESTAMP); - long d1 = DateTimeUtils.absoluteDayFromDateValue(dateValue); - long d2 = DateTimeUtils.absoluteDayFromDateValue(t.dateValue); - return DateTimeUtils.normalizeTimestamp(d1 - d2, timeNanos - t.timeNanos); + ValueTimestamp t = (ValueTimestamp) v; + long absoluteDay = DateTimeUtils.absoluteDayFromDateValue(dateValue) + - DateTimeUtils.absoluteDayFromDateValue(t.dateValue); + long nanos = timeNanos - t.timeNanos; + if (nanos < 0) { + nanos += DateTimeUtils.NANOS_PER_DAY; + absoluteDay--; + } + return ValueTimestamp.fromDateValueAndNanos(DateTimeUtils.dateValueFromAbsoluteDay(absoluteDay), nanos); } } diff --git a/h2/src/main/org/h2/value/ValueTimestampTimeZone.java b/h2/src/main/org/h2/value/ValueTimestampTimeZone.java index 10d48342b8..f2670bbb09 100644 --- a/h2/src/main/org/h2/value/ValueTimestampTimeZone.java +++ b/h2/src/main/org/h2/value/ValueTimestampTimeZone.java @@ -1,27 +1,19 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.sql.Timestamp; import org.h2.api.ErrorCode; -import org.h2.api.TimestampWithTimeZone; -import org.h2.engine.SysProperties; +import org.h2.engine.CastDataProvider; import org.h2.message.DbException; import org.h2.util.DateTimeUtils; -import org.h2.util.LocalDateTimeUtils; /** * Implementation of the TIMESTAMP WITH TIME ZONE data type. - * - * @see - * ISO 8601 Time zone designators */ -public class ValueTimestampTimeZone extends Value { +public final class ValueTimestampTimeZone extends Value { /** * The default precision and display size of the textual representation of a timestamp. @@ -35,16 +27,6 @@ public class ValueTimestampTimeZone extends Value { */ public static final int MAXIMUM_PRECISION = 35; - /** - * The default scale for timestamps. - */ - static final int DEFAULT_SCALE = ValueTimestamp.DEFAULT_SCALE; - - /** - * The default scale for timestamps. - */ - static final int MAXIMUM_SCALE = ValueTimestamp.MAXIMUM_SCALE; - /** * A bit field with bits for the year, month, and day (see DateTimeUtils for * encoding) @@ -55,13 +37,15 @@ public class ValueTimestampTimeZone extends Value { */ private final long timeNanos; /** - * Time zone offset from UTC in minutes, range of -18 hours to +18 hours. This + * Time zone offset from UTC in seconds, range of -18 hours to +18 hours. This * range is compatible with OffsetDateTime from JSR-310. */ - private final short timeZoneOffsetMins; + private final int timeZoneOffsetSeconds; - private ValueTimestampTimeZone(long dateValue, long timeNanos, - short timeZoneOffsetMins) { + private ValueTimestampTimeZone(long dateValue, long timeNanos, int timeZoneOffsetSeconds) { + if (dateValue < DateTimeUtils.MIN_DATE_VALUE || dateValue > DateTimeUtils.MAX_DATE_VALUE) { + throw new IllegalArgumentException("dateValue out of range " + dateValue); + } if (timeNanos < 0 || timeNanos >= DateTimeUtils.NANOS_PER_DAY) { throw new IllegalArgumentException( "timeNanos out of range " + timeNanos); @@ -71,14 +55,14 @@ private ValueTimestampTimeZone(long dateValue, long timeNanos, * JSR-310 determines 18 hours as maximum possible offset in both directions, so * we use this limit too for compatibility. */ - if (timeZoneOffsetMins < (-18 * 60) - || timeZoneOffsetMins > (18 * 60)) { + if (timeZoneOffsetSeconds < (-18 * 60 * 60) + || timeZoneOffsetSeconds > (18 * 60 * 60)) { throw new IllegalArgumentException( - "timeZoneOffsetMins out of range " + timeZoneOffsetMins); + "timeZoneOffsetSeconds out of range " + timeZoneOffsetSeconds); } this.dateValue = dateValue; this.timeNanos = timeNanos; - this.timeZoneOffsetMins = timeZoneOffsetMins; + this.timeZoneOffsetSeconds = timeZoneOffsetSeconds; } /** @@ -87,25 +71,13 @@ private ValueTimestampTimeZone(long dateValue, long timeNanos, * @param dateValue the date value, a bit field with bits for the year, * month, and day * @param timeNanos the nanoseconds since midnight - * @param timeZoneOffsetMins the timezone offset in minutes + * @param timeZoneOffsetSeconds the timezone offset in seconds * @return the value */ - public static ValueTimestampTimeZone fromDateValueAndNanos(long dateValue, - long timeNanos, short timeZoneOffsetMins) { + public static ValueTimestampTimeZone fromDateValueAndNanos(long dateValue, long timeNanos, + int timeZoneOffsetSeconds) { return (ValueTimestampTimeZone) Value.cache(new ValueTimestampTimeZone( - dateValue, timeNanos, timeZoneOffsetMins)); - } - - /** - * Get or create a timestamp value for the given timestamp. - * - * @param timestamp the timestamp - * @return the value - */ - public static ValueTimestampTimeZone get(TimestampWithTimeZone timestamp) { - return fromDateValueAndNanos(timestamp.getYMD(), - timestamp.getNanosSinceMidnight(), - timestamp.getTimeZoneOffsetMins()); + dateValue, timeNanos, timeZoneOffsetSeconds)); } /** @@ -114,14 +86,16 @@ public static ValueTimestampTimeZone get(TimestampWithTimeZone timestamp) { * part. * * @param s the string to parse + * @param provider + * the cast information provider, may be {@code null} for + * literals with time zone * @return the date */ - public static ValueTimestampTimeZone parse(String s) { + public static ValueTimestampTimeZone parse(String s, CastDataProvider provider) { try { - return (ValueTimestampTimeZone) DateTimeUtils.parseTimestamp(s, null, true); + return (ValueTimestampTimeZone) DateTimeUtils.parseTimestamp(s, provider, true); } catch (Exception e) { - throw DbException.get(ErrorCode.INVALID_DATETIME_CONSTANT_2, e, - "TIMESTAMP WITH TIME ZONE", s); + throw DbException.get(ErrorCode.INVALID_DATETIME_CONSTANT_2, e, "TIMESTAMP WITH TIME ZONE", s); } } @@ -145,17 +119,12 @@ public long getTimeNanos() { } /** - * The timezone offset in minutes. + * The time zone offset in seconds. * * @return the offset */ - public short getTimeZoneOffsetMins() { - return timeZoneOffsetMins; - } - - @Override - public Timestamp getTimestamp() { - return DateTimeUtils.convertTimestampTimeZoneToTimestamp(dateValue, timeNanos, timeZoneOffsetMins); + public int getTimeZoneOffsetSeconds() { + return timeZoneOffsetSeconds; } @Override @@ -176,52 +145,36 @@ public int getMemory() { @Override public String getString() { - StringBuilder builder = new StringBuilder(ValueTimestampTimeZone.MAXIMUM_PRECISION); - DateTimeUtils.appendTimestampTimeZone(builder, dateValue, timeNanos, timeZoneOffsetMins); - return builder.toString(); + return toString(new StringBuilder(MAXIMUM_PRECISION), false).toString(); } - @Override - public StringBuilder getSQL(StringBuilder builder) { - builder.append("TIMESTAMP WITH TIME ZONE '"); - DateTimeUtils.appendTimestampTimeZone(builder, dateValue, timeNanos, timeZoneOffsetMins); - return builder.append('\''); + /** + * Returns value as string in ISO format. + * + * @return value as string in ISO format + */ + public String getISOString() { + return toString(new StringBuilder(MAXIMUM_PRECISION), true).toString(); } @Override - public boolean checkPrecision(long precision) { - // TIMESTAMP WITH TIME ZONE data type does not have precision parameter - return true; + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return toString(builder.append("TIMESTAMP WITH TIME ZONE '"), false).append('\''); } - @Override - public Value convertScale(boolean onlyToSmallerScale, int targetScale) { - if (targetScale >= MAXIMUM_SCALE) { - return this; - } - if (targetScale < 0) { - throw DbException.getInvalidValueException("scale", targetScale); - } - long n = timeNanos; - long n2 = DateTimeUtils.convertScale(n, targetScale); - if (n2 == n) { - return this; - } - long dv = dateValue; - if (n2 >= DateTimeUtils.NANOS_PER_DAY) { - n2 -= DateTimeUtils.NANOS_PER_DAY; - dv = DateTimeUtils.incrementDateValue(dv); - } - return fromDateValueAndNanos(dv, n2, timeZoneOffsetMins); + private StringBuilder toString(StringBuilder builder, boolean iso) { + DateTimeUtils.appendDate(builder, dateValue).append(iso ? 'T' : ' '); + DateTimeUtils.appendTime(builder, timeNanos); + return DateTimeUtils.appendTimeZone(builder, timeZoneOffsetSeconds); } @Override - public int compareTypeSafe(Value o, CompareMode mode) { + public int compareTypeSafe(Value o, CompareMode mode, CastDataProvider provider) { ValueTimestampTimeZone t = (ValueTimestampTimeZone) o; // Maximum time zone offset is +/-18 hours so difference in days between local // and UTC cannot be more than one day long dateValueA = dateValue; - long timeA = timeNanos - timeZoneOffsetMins * 60_000_000_000L; + long timeA = timeNanos - timeZoneOffsetSeconds * DateTimeUtils.NANOS_PER_SECOND; if (timeA < 0) { timeA += DateTimeUtils.NANOS_PER_DAY; dateValueA = DateTimeUtils.decrementDateValue(dateValueA); @@ -230,7 +183,7 @@ public int compareTypeSafe(Value o, CompareMode mode) { dateValueA = DateTimeUtils.incrementDateValue(dateValueA); } long dateValueB = t.dateValue; - long timeB = t.timeNanos - t.timeZoneOffsetMins * 60_000_000_000L; + long timeB = t.timeNanos - t.timeZoneOffsetSeconds * DateTimeUtils.NANOS_PER_SECOND; if (timeB < 0) { timeB += DateTimeUtils.NANOS_PER_DAY; dateValueB = DateTimeUtils.decrementDateValue(dateValueB); @@ -254,39 +207,13 @@ public boolean equals(Object other) { } ValueTimestampTimeZone x = (ValueTimestampTimeZone) other; return dateValue == x.dateValue && timeNanos == x.timeNanos - && timeZoneOffsetMins == x.timeZoneOffsetMins; + && timeZoneOffsetSeconds == x.timeZoneOffsetSeconds; } @Override public int hashCode() { return (int) (dateValue ^ (dateValue >>> 32) ^ timeNanos - ^ (timeNanos >>> 32) ^ timeZoneOffsetMins); - } - - @Override - public Object getObject() { - if (SysProperties.RETURN_OFFSET_DATE_TIME && LocalDateTimeUtils.isJava8DateApiPresent()) { - return LocalDateTimeUtils.valueToOffsetDateTime(this); - } - return new TimestampWithTimeZone(dateValue, timeNanos, timeZoneOffsetMins); - } - - @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - prep.setString(parameterIndex, getString()); - } - - @Override - public Value add(Value v) { - throw DbException.getUnsupportedException( - "manipulating TIMESTAMP WITH TIME ZONE values is unsupported"); - } - - @Override - public Value subtract(Value v) { - throw DbException.getUnsupportedException( - "manipulating TIMESTAMP WITH TIME ZONE values is unsupported"); + ^ (timeNanos >>> 32) ^ timeZoneOffsetSeconds); } } diff --git a/h2/src/main/org/h2/value/ValueTinyint.java b/h2/src/main/org/h2/value/ValueTinyint.java new file mode 100644 index 0000000000..f80ee45b47 --- /dev/null +++ b/h2/src/main/org/h2/value/ValueTinyint.java @@ -0,0 +1,183 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +import java.math.BigDecimal; + +import org.h2.api.ErrorCode; +import org.h2.engine.CastDataProvider; +import org.h2.message.DbException; + +/** + * Implementation of the TINYINT data type. + */ +public final class ValueTinyint extends Value { + + /** + * The precision in bits. + */ + static final int PRECISION = 8; + + /** + * The approximate precision in decimal digits. + */ + public static final int DECIMAL_PRECISION = 3; + + /** + * The display size for a TINYINT. + * Example: -127 + */ + static final int DISPLAY_SIZE = 4; + + private final byte value; + + private ValueTinyint(byte value) { + this.value = value; + } + + @Override + public Value add(Value v) { + ValueTinyint other = (ValueTinyint) v; + return checkRange(value + other.value); + } + + private static ValueTinyint checkRange(int x) { + if ((byte) x != x) { + throw DbException.get(ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE_1, + Integer.toString(x)); + } + return ValueTinyint.get((byte) x); + } + + @Override + public int getSignum() { + return Integer.signum(value); + } + + @Override + public Value negate() { + return checkRange(-(int) value); + } + + @Override + public Value subtract(Value v) { + ValueTinyint other = (ValueTinyint) v; + return checkRange(value - other.value); + } + + @Override + public Value multiply(Value v) { + ValueTinyint other = (ValueTinyint) v; + return checkRange(value * other.value); + } + + @Override + public Value divide(Value v, TypeInfo quotientType) { + ValueTinyint other = (ValueTinyint) v; + if (other.value == 0) { + throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getTraceSQL()); + } + return checkRange(value / other.value); + } + + @Override + public Value modulus(Value v) { + ValueTinyint other = (ValueTinyint) v; + if (other.value == 0) { + throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getTraceSQL()); + } + return ValueTinyint.get((byte) (value % other.value)); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + if ((sqlFlags & NO_CASTS) == 0) { + return builder.append("CAST(").append(value).append(" AS TINYINT)"); + } + return builder.append(value); + } + + @Override + public TypeInfo getType() { + return TypeInfo.TYPE_TINYINT; + } + + @Override + public int getValueType() { + return TINYINT; + } + + @Override + public byte[] getBytes() { + return new byte[] { value }; + } + + @Override + public byte getByte() { + return value; + } + + @Override + public short getShort() { + return value; + } + + @Override + public int getInt() { + return value; + } + + @Override + public long getLong() { + return value; + } + + @Override + public BigDecimal getBigDecimal() { + return BigDecimal.valueOf(value); + } + + @Override + public float getFloat() { + return value; + } + + @Override + public double getDouble() { + return value; + } + + @Override + public int compareTypeSafe(Value o, CompareMode mode, CastDataProvider provider) { + return Integer.compare(value, ((ValueTinyint) o).value); + } + + @Override + public String getString() { + return Integer.toString(value); + } + + @Override + public int hashCode() { + return value; + } + + /** + * Get or create a TINYINT value for the given byte. + * + * @param i the byte + * @return the value + */ + public static ValueTinyint get(byte i) { + return (ValueTinyint) Value.cache(new ValueTinyint(i)); + } + + @Override + public boolean equals(Object other) { + return other instanceof ValueTinyint && value == ((ValueTinyint) other).value; + } + +} diff --git a/h2/src/main/org/h2/value/ValueToObjectConverter.java b/h2/src/main/org/h2/value/ValueToObjectConverter.java new file mode 100644 index 0000000000..84827b8e50 --- /dev/null +++ b/h2/src/main/org/h2/value/ValueToObjectConverter.java @@ -0,0 +1,637 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.SQLXML; +import java.time.Duration; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.time.Period; +import java.time.ZonedDateTime; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map.Entry; +import java.util.UUID; + +import org.h2.api.ErrorCode; +import org.h2.api.Interval; +import org.h2.engine.Session; +import org.h2.jdbc.JdbcArray; +import org.h2.jdbc.JdbcBlob; +import org.h2.jdbc.JdbcClob; +import org.h2.jdbc.JdbcConnection; +import org.h2.jdbc.JdbcLob; +import org.h2.jdbc.JdbcResultSet; +import org.h2.jdbc.JdbcSQLXML; +import org.h2.message.DbException; +import org.h2.message.TraceObject; +import org.h2.util.JSR310Utils; +import org.h2.util.JdbcUtils; +import org.h2.util.LegacyDateTimeUtils; + +/** + * Data type conversion methods between values and Java objects. + */ +public final class ValueToObjectConverter extends TraceObject { + + /** + * The Geometry class. This object is null if the JTS jar file is not in the + * classpath. + */ + public static final Class GEOMETRY_CLASS; + + private static final String GEOMETRY_CLASS_NAME = "org.locationtech.jts.geom.Geometry"; + + static { + Class g; + try { + g = JdbcUtils.loadUserClass(GEOMETRY_CLASS_NAME); + } catch (Exception e) { + g = null; + } + GEOMETRY_CLASS = g; + } + + /** + * Convert a Java object to a value. + * + * @param session + * the session + * @param x + * the value + * @param type + * the suggested value type, or {@code Value#UNKNOWN} + * @return the value + */ + public static Value objectToValue(Session session, Object x, int type) { + if (x == null) { + return ValueNull.INSTANCE; + } else if (type == Value.JAVA_OBJECT) { + return ValueJavaObject.getNoCopy(JdbcUtils.serialize(x, session.getJavaObjectSerializer())); + } else if (x instanceof Value) { + Value v = (Value) x; + if (v instanceof ValueLob) { + session.addTemporaryLob((ValueLob) v); + } + return v; + } + Class clazz = x.getClass(); + if (clazz == String.class) { + return ValueVarchar.get((String) x, session); + } else if (clazz == Long.class) { + return ValueBigint.get((Long) x); + } else if (clazz == Integer.class) { + return ValueInteger.get((Integer) x); + } else if (clazz == Boolean.class) { + return ValueBoolean.get((Boolean) x); + } else if (clazz == Byte.class) { + return ValueTinyint.get((Byte) x); + } else if (clazz == Short.class) { + return ValueSmallint.get((Short) x); + } else if (clazz == Float.class) { + return ValueReal.get((Float) x); + } else if (clazz == Double.class) { + return ValueDouble.get((Double) x); + } else if (clazz == byte[].class) { + return ValueVarbinary.get((byte[]) x); + } else if (clazz == UUID.class) { + return ValueUuid.get((UUID) x); + } else if (clazz == Character.class) { + return ValueChar.get(((Character) x).toString()); + } else if (clazz == LocalDate.class) { + return JSR310Utils.localDateToValue((LocalDate) x); + } else if (clazz == LocalTime.class) { + return JSR310Utils.localTimeToValue((LocalTime) x); + } else if (clazz == LocalDateTime.class) { + return JSR310Utils.localDateTimeToValue((LocalDateTime) x); + } else if (clazz == Instant.class) { + return JSR310Utils.instantToValue((Instant) x); + } else if (clazz == OffsetTime.class) { + return JSR310Utils.offsetTimeToValue((OffsetTime) x); + } else if (clazz == OffsetDateTime.class) { + return JSR310Utils.offsetDateTimeToValue((OffsetDateTime) x); + } else if (clazz == ZonedDateTime.class) { + return JSR310Utils.zonedDateTimeToValue((ZonedDateTime) x); + } else if (clazz == Interval.class) { + Interval i = (Interval) x; + return ValueInterval.from(i.getQualifier(), i.isNegative(), i.getLeading(), i.getRemaining()); + } else if (clazz == Period.class) { + return JSR310Utils.periodToValue((Period) x); + } else if (clazz == Duration.class) { + return JSR310Utils.durationToValue((Duration) x); + } + if (x instanceof Object[]) { + return arrayToValue(session, x); + } else if (GEOMETRY_CLASS != null && GEOMETRY_CLASS.isAssignableFrom(clazz)) { + return ValueGeometry.getFromGeometry(x); + } else if (x instanceof BigInteger) { + return ValueNumeric.get((BigInteger) x); + } else if (x instanceof BigDecimal) { + return ValueNumeric.getAnyScale((BigDecimal) x); + } else { + return otherToValue(session, x); + } + } + + private static Value otherToValue(Session session, Object x) { + if (x instanceof Array) { + Array array = (Array) x; + try { + return arrayToValue(session, array.getArray()); + } catch (SQLException e) { + throw DbException.convert(e); + } + } else if (x instanceof ResultSet) { + return resultSetToValue(session, (ResultSet) x); + } + ValueLob lob; + if (x instanceof Reader) { + Reader r = (Reader) x; + if (!(r instanceof BufferedReader)) { + r = new BufferedReader(r); + } + lob = session.getDataHandler().getLobStorage().createClob(r, -1); + } else if (x instanceof Clob) { + try { + Clob clob = (Clob) x; + Reader r = new BufferedReader(clob.getCharacterStream()); + lob = session.getDataHandler().getLobStorage().createClob(r, clob.length()); + } catch (SQLException e) { + throw DbException.convert(e); + } + } else if (x instanceof InputStream) { + lob = session.getDataHandler().getLobStorage().createBlob((InputStream) x, -1); + } else if (x instanceof Blob) { + try { + Blob blob = (Blob) x; + lob = session.getDataHandler().getLobStorage().createBlob(blob.getBinaryStream(), blob.length()); + } catch (SQLException e) { + throw DbException.convert(e); + } + } else if (x instanceof SQLXML) { + try { + lob = session.getDataHandler().getLobStorage() + .createClob(new BufferedReader(((SQLXML) x).getCharacterStream()), -1); + } catch (SQLException e) { + throw DbException.convert(e); + } + } else { + Value v = LegacyDateTimeUtils.legacyObjectToValue(session, x); + if (v != null) { + return v; + } + return ValueJavaObject.getNoCopy(JdbcUtils.serialize(x, session.getJavaObjectSerializer())); + } + return session.addTemporaryLob(lob); + } + + private static Value arrayToValue(Session session, Object x) { + // (a.getClass().isArray()); + // (a.getClass().getComponentType().isPrimitive()); + Object[] o = (Object[]) x; + int len = o.length; + Value[] v = new Value[len]; + for (int i = 0; i < len; i++) { + v[i] = objectToValue(session, o[i], Value.UNKNOWN); + } + return ValueArray.get(v, session); + } + + static Value resultSetToValue(Session session, ResultSet rs) { + try { + ResultSetMetaData meta = rs.getMetaData(); + int columnCount = meta.getColumnCount(); + LinkedHashMap columns = readResultSetMeta(session, meta, columnCount); + if (!rs.next()) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, "Empty ResultSet to ROW value"); + } + Value[] list = new Value[columnCount]; + Iterator> iterator = columns.entrySet().iterator(); + for (int j = 0; j < columnCount; j++) { + list[j] = ValueToObjectConverter.objectToValue(session, rs.getObject(j + 1), + iterator.next().getValue().getValueType()); + } + if (rs.next()) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, "Multi-row ResultSet to ROW value"); + } + return ValueRow.get(new ExtTypeInfoRow(columns), list); + } catch (SQLException e) { + throw DbException.convert(e); + } + } + + private static LinkedHashMap readResultSetMeta(Session session, ResultSetMetaData meta, + int columnCount) throws SQLException { + LinkedHashMap columns = new LinkedHashMap<>(); + for (int i = 0; i < columnCount; i++) { + String alias = meta.getColumnLabel(i + 1); + String columnTypeName = meta.getColumnTypeName(i + 1); + int columnType = DataType.convertSQLTypeToValueType(meta.getColumnType(i + 1), columnTypeName); + int precision = meta.getPrecision(i + 1); + int scale = meta.getScale(i + 1); + TypeInfo typeInfo; + if (columnType == Value.ARRAY && columnTypeName.endsWith(" ARRAY")) { + typeInfo = TypeInfo + .getTypeInfo(Value.ARRAY, -1L, 0, + TypeInfo.getTypeInfo(DataType.getTypeByName( + columnTypeName.substring(0, columnTypeName.length() - 6), + session.getMode()).type)); + } else { + typeInfo = TypeInfo.getTypeInfo(columnType, precision, scale, null); + } + columns.put(alias, typeInfo); + } + return columns; + } + + /** + * Converts the specified value to an object of the specified type. + * + * @param + * the type + * @param type + * the class + * @param value + * the value + * @param conn + * the connection + * @return the object of the specified class representing the specified + * value, or {@code null} + */ + @SuppressWarnings("unchecked") + public static T valueToObject(Class type, Value value, JdbcConnection conn) { + if (value == ValueNull.INSTANCE) { + return null; + } else if (type == BigDecimal.class) { + return (T) value.getBigDecimal(); + } else if (type == BigInteger.class) { + return (T) value.getBigDecimal().toBigInteger(); + } else if (type == String.class) { + return (T) value.getString(); + } else if (type == Boolean.class) { + return (T) (Boolean) value.getBoolean(); + } else if (type == Byte.class) { + return (T) (Byte) value.getByte(); + } else if (type == Short.class) { + return (T) (Short) value.getShort(); + } else if (type == Integer.class) { + return (T) (Integer) value.getInt(); + } else if (type == Long.class) { + return (T) (Long) value.getLong(); + } else if (type == Float.class) { + return (T) (Float) value.getFloat(); + } else if (type == Double.class) { + return (T) (Double) value.getDouble(); + } else if (type == UUID.class) { + return (T) value.convertToUuid().getUuid(); + } else if (type == byte[].class) { + return (T) value.getBytes(); + } else if (type == Character.class) { + String s = value.getString(); + return (T) (Character) (s.isEmpty() ? ' ' : s.charAt(0)); + } else if (type == Interval.class) { + if (!(value instanceof ValueInterval)) { + value = value.convertTo(TypeInfo.TYPE_INTERVAL_DAY_TO_SECOND); + } + ValueInterval v = (ValueInterval) value; + return (T) new Interval(v.getQualifier(), false, v.getLeading(), v.getRemaining()); + } else if (type == LocalDate.class) { + return (T) JSR310Utils.valueToLocalDate(value, conn); + } else if (type == LocalTime.class) { + return (T) JSR310Utils.valueToLocalTime(value, conn); + } else if (type == LocalDateTime.class) { + return (T) JSR310Utils.valueToLocalDateTime(value, conn); + } else if (type == OffsetTime.class) { + return (T) JSR310Utils.valueToOffsetTime(value, conn); + } else if (type == OffsetDateTime.class) { + return (T) JSR310Utils.valueToOffsetDateTime(value, conn); + } else if (type == ZonedDateTime.class) { + return (T) JSR310Utils.valueToZonedDateTime(value, conn); + } else if (type == Instant.class) { + return (T) JSR310Utils.valueToInstant(value, conn); + } else if (type == Period.class) { + return (T) JSR310Utils.valueToPeriod(value); + } else if (type == Duration.class) { + return (T) JSR310Utils.valueToDuration(value); + } else if (type.isArray()) { + return (T) valueToArray(type, value, conn); + } else if (GEOMETRY_CLASS != null && GEOMETRY_CLASS.isAssignableFrom(type)) { + return (T) value.convertToGeometry(null).getGeometry(); + } else { + return (T) valueToOther(type, value, conn); + } + } + + private static Object valueToArray(Class type, Value value, JdbcConnection conn) { + Value[] array = ((ValueArray) value).getList(); + Class componentType = type.getComponentType(); + int length = array.length; + Object[] objArray = (Object[]) java.lang.reflect.Array.newInstance(componentType, length); + for (int i = 0; i < length; i++) { + objArray[i] = valueToObject(componentType, array[i], conn); + } + return objArray; + } + + private static Object valueToOther(Class type, Value value, JdbcConnection conn) { + if (type == Object.class) { + return JdbcUtils.deserialize( + value.convertToJavaObject(TypeInfo.TYPE_JAVA_OBJECT, Value.CONVERT_TO, null).getBytesNoCopy(), + conn.getJavaObjectSerializer()); + } else if (type == InputStream.class) { + return value.getInputStream(); + } else if (type == Reader.class) { + return value.getReader(); + } else if (type == java.sql.Array.class) { + return new JdbcArray(conn, value, getNextId(TraceObject.ARRAY)); + } else if (type == Blob.class) { + return new JdbcBlob(conn, value, JdbcLob.State.WITH_VALUE, getNextId(TraceObject.BLOB)); + } else if (type == Clob.class) { + return new JdbcClob(conn, value, JdbcLob.State.WITH_VALUE, getNextId(TraceObject.CLOB)); + } else if (type == SQLXML.class) { + return new JdbcSQLXML(conn, value, JdbcLob.State.WITH_VALUE, getNextId(TraceObject.SQLXML)); + } else if (type == ResultSet.class) { + return new JdbcResultSet(conn, null, null, value.convertToAnyRow().getResult(), + getNextId(TraceObject.RESULT_SET), true, false, false); + } else { + Object obj = LegacyDateTimeUtils.valueToLegacyType(type, value, conn); + if (obj != null) { + return obj; + } + if (value.getValueType() == Value.JAVA_OBJECT) { + obj = JdbcUtils.deserialize(value.getBytesNoCopy(), conn.getJavaObjectSerializer()); + if (type.isAssignableFrom(obj.getClass())) { + return obj; + } + } + throw DbException.getUnsupportedException("converting to class " + type.getName()); + } + } + + /** + * Get the name of the Java class for the given value type. + * + * @param type + * the value type + * @param forJdbc + * if {@code true} get class for JDBC layer, if {@code false} get + * class for Java functions API + * @return the class + */ + public static Class getDefaultClass(int type, boolean forJdbc) { + switch (type) { + case Value.NULL: + return Void.class; + case Value.CHAR: + case Value.VARCHAR: + case Value.VARCHAR_IGNORECASE: + case Value.ENUM: + return String.class; + case Value.CLOB: + return Clob.class; + case Value.BINARY: + case Value.VARBINARY: + case Value.JSON: + return byte[].class; + case Value.BLOB: + return Blob.class; + case Value.BOOLEAN: + return Boolean.class; + case Value.TINYINT: + if (forJdbc) { + return Integer.class; + } + return Byte.class; + case Value.SMALLINT: + if (forJdbc) { + return Integer.class; + } + return Short.class; + case Value.INTEGER: + return Integer.class; + case Value.BIGINT: + return Long.class; + case Value.NUMERIC: + case Value.DECFLOAT: + return BigDecimal.class; + case Value.REAL: + return Float.class; + case Value.DOUBLE: + return Double.class; + case Value.DATE: + return forJdbc ? java.sql.Date.class : LocalDate.class; + case Value.TIME: + return forJdbc ? java.sql.Time.class : LocalTime.class; + case Value.TIME_TZ: + return OffsetTime.class; + case Value.TIMESTAMP: + return forJdbc ? java.sql.Timestamp.class : LocalDateTime.class; + case Value.TIMESTAMP_TZ: + return OffsetDateTime.class; + case Value.INTERVAL_YEAR: + case Value.INTERVAL_MONTH: + case Value.INTERVAL_DAY: + case Value.INTERVAL_HOUR: + case Value.INTERVAL_MINUTE: + case Value.INTERVAL_SECOND: + case Value.INTERVAL_YEAR_TO_MONTH: + case Value.INTERVAL_DAY_TO_HOUR: + case Value.INTERVAL_DAY_TO_MINUTE: + case Value.INTERVAL_DAY_TO_SECOND: + case Value.INTERVAL_HOUR_TO_MINUTE: + case Value.INTERVAL_HOUR_TO_SECOND: + case Value.INTERVAL_MINUTE_TO_SECOND: + return Interval.class; + case Value.JAVA_OBJECT: + return forJdbc ? Object.class : byte[].class; + case Value.GEOMETRY: { + Class clazz = GEOMETRY_CLASS; + return clazz != null ? clazz : String.class; + } + case Value.UUID: + return UUID.class; + case Value.ARRAY: + if (forJdbc) { + return Array.class; + } + return Object[].class; + case Value.ROW: + if (forJdbc) { + return ResultSet.class; + } + return Object[].class; + default: + throw DbException.getUnsupportedException("data type " + type); + } + } + + /** + * Converts the specified value to the default Java object for its type. + * + * @param value + * the value + * @param conn + * the connection + * @param forJdbc + * if {@code true} perform conversion for JDBC layer, if + * {@code false} perform conversion for Java functions API + * @return the object + */ + public static Object valueToDefaultObject(Value value, JdbcConnection conn, boolean forJdbc) { + switch (value.getValueType()) { + case Value.NULL: + return null; + case Value.CHAR: + case Value.VARCHAR: + case Value.VARCHAR_IGNORECASE: + case Value.ENUM: + return value.getString(); + case Value.CLOB: + return new JdbcClob(conn, value, JdbcLob.State.WITH_VALUE, getNextId(TraceObject.CLOB)); + case Value.BINARY: + case Value.VARBINARY: + case Value.JSON: + return value.getBytes(); + case Value.BLOB: + return new JdbcBlob(conn, value, JdbcLob.State.WITH_VALUE, getNextId(TraceObject.BLOB)); + case Value.BOOLEAN: + return value.getBoolean(); + case Value.TINYINT: + if (forJdbc) { + return value.getInt(); + } + return value.getByte(); + case Value.SMALLINT: + if (forJdbc) { + return value.getInt(); + } + return value.getShort(); + case Value.INTEGER: + return value.getInt(); + case Value.BIGINT: + return value.getLong(); + case Value.NUMERIC: + case Value.DECFLOAT: + return value.getBigDecimal(); + case Value.REAL: + return value.getFloat(); + case Value.DOUBLE: + return value.getDouble(); + case Value.DATE: + return forJdbc ? LegacyDateTimeUtils.toDate(conn, null, value) : JSR310Utils.valueToLocalDate(value, null); + case Value.TIME: + return forJdbc ? LegacyDateTimeUtils.toTime(conn, null, value) : JSR310Utils.valueToLocalTime(value, null); + case Value.TIME_TZ: + return JSR310Utils.valueToOffsetTime(value, null); + case Value.TIMESTAMP: + return forJdbc ? LegacyDateTimeUtils.toTimestamp(conn, null, value) + : JSR310Utils.valueToLocalDateTime(value, null); + case Value.TIMESTAMP_TZ: + return JSR310Utils.valueToOffsetDateTime(value, null); + case Value.INTERVAL_YEAR: + case Value.INTERVAL_MONTH: + case Value.INTERVAL_DAY: + case Value.INTERVAL_HOUR: + case Value.INTERVAL_MINUTE: + case Value.INTERVAL_SECOND: + case Value.INTERVAL_YEAR_TO_MONTH: + case Value.INTERVAL_DAY_TO_HOUR: + case Value.INTERVAL_DAY_TO_MINUTE: + case Value.INTERVAL_DAY_TO_SECOND: + case Value.INTERVAL_HOUR_TO_MINUTE: + case Value.INTERVAL_HOUR_TO_SECOND: + case Value.INTERVAL_MINUTE_TO_SECOND: + return ((ValueInterval) value).getInterval(); + case Value.JAVA_OBJECT: + return forJdbc ? JdbcUtils.deserialize(value.getBytesNoCopy(), conn.getJavaObjectSerializer()) + : value.getBytes(); + case Value.GEOMETRY: + return GEOMETRY_CLASS != null ? ((ValueGeometry) value).getGeometry() : value.getString(); + case Value.UUID: + return ((ValueUuid) value).getUuid(); + case Value.ARRAY: + if (forJdbc) { + return new JdbcArray(conn, value, getNextId(TraceObject.ARRAY)); + } + return valueToDefaultArray(value, conn, forJdbc); + case Value.ROW: + if (forJdbc) { + return new JdbcResultSet(conn, null, null, ((ValueRow) value).getResult(), + getNextId(TraceObject.RESULT_SET), true, false, false); + } + return valueToDefaultArray(value, conn, forJdbc); + default: + throw DbException.getUnsupportedException("data type " + value.getValueType()); + } + } + + /** + * Converts the specified array value to array of default Java objects for + * its type. + * + * @param value + * the array value + * @param conn + * the connection + * @param forJdbc + * if {@code true} perform conversion for JDBC layer, if + * {@code false} perform conversion for Java functions API + * @return the object + */ + public static Object valueToDefaultArray(Value value, JdbcConnection conn, boolean forJdbc) { + Value[] values = ((ValueCollectionBase) value).getList(); + int len = values.length; + Object[] list = new Object[len]; + for (int i = 0; i < len; i++) { + list[i] = valueToDefaultObject(values[i], conn, forJdbc); + } + return list; + } + + /** + * Read a value from the given result set. + * + * @param session + * the session + * @param rs + * the result set + * @param columnIndex + * the column index (1-based) + * @return the value + */ + public static Value readValue(Session session, JdbcResultSet rs, int columnIndex) { + Value value = rs.getInternal(columnIndex); + switch (value.getValueType()) { + case Value.CLOB: + value = session.addTemporaryLob( + session.getDataHandler().getLobStorage().createClob(new BufferedReader(value.getReader()), -1)); + break; + case Value.BLOB: + value = session + .addTemporaryLob(session.getDataHandler().getLobStorage().createBlob(value.getInputStream(), -1)); + } + return value; + } + + private ValueToObjectConverter() { + } + +} diff --git a/h2/src/main/org/h2/value/ValueToObjectConverter2.java b/h2/src/main/org/h2/value/ValueToObjectConverter2.java new file mode 100644 index 0000000000..c7cb4a95f5 --- /dev/null +++ b/h2/src/main/org/h2/value/ValueToObjectConverter2.java @@ -0,0 +1,432 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +import static org.h2.value.ValueToObjectConverter.GEOMETRY_CLASS; + +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Time; +import java.sql.Timestamp; +import java.time.Duration; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.time.Period; +import java.time.ZonedDateTime; +import java.util.UUID; + +import org.h2.api.IntervalQualifier; +import org.h2.engine.Session; +import org.h2.jdbc.JdbcResultSet; +import org.h2.message.DbException; +import org.h2.message.TraceObject; +import org.h2.util.IntervalUtils; +import org.h2.util.JSR310Utils; +import org.h2.util.JdbcUtils; +import org.h2.util.LegacyDateTimeUtils; +import org.h2.util.Utils; + +/** + * Data type conversion methods between values and Java objects to use on the + * server side on H2 only. + */ +public final class ValueToObjectConverter2 extends TraceObject { + + /** + * Get the type information for the given Java class. + * + * @param clazz + * the Java class + * @return the value type + */ + public static TypeInfo classToType(Class clazz) { + if (clazz == null) { + return TypeInfo.TYPE_NULL; + } + if (clazz.isPrimitive()) { + clazz = Utils.getNonPrimitiveClass(clazz); + } + if (clazz == Void.class) { + return TypeInfo.TYPE_NULL; + } else if (clazz == String.class || clazz == Character.class) { + return TypeInfo.TYPE_VARCHAR; + } else if (clazz == byte[].class) { + return TypeInfo.TYPE_VARBINARY; + } else if (clazz == Boolean.class) { + return TypeInfo.TYPE_BOOLEAN; + } else if (clazz == Byte.class) { + return TypeInfo.TYPE_TINYINT; + } else if (clazz == Short.class) { + return TypeInfo.TYPE_SMALLINT; + } else if (clazz == Integer.class) { + return TypeInfo.TYPE_INTEGER; + } else if (clazz == Long.class) { + return TypeInfo.TYPE_BIGINT; + } else if (clazz == Float.class) { + return TypeInfo.TYPE_REAL; + } else if (clazz == Double.class) { + return TypeInfo.TYPE_DOUBLE; + } else if (clazz == LocalDate.class) { + return TypeInfo.TYPE_DATE; + } else if (clazz == LocalTime.class) { + return TypeInfo.TYPE_TIME; + } else if (clazz == OffsetTime.class) { + return TypeInfo.TYPE_TIME_TZ; + } else if (clazz == LocalDateTime.class) { + return TypeInfo.TYPE_TIMESTAMP; + } else if (clazz == OffsetDateTime.class || clazz == ZonedDateTime.class || clazz == Instant.class) { + return TypeInfo.TYPE_TIMESTAMP_TZ; + } else if (clazz == Period.class) { + return TypeInfo.TYPE_INTERVAL_YEAR_TO_MONTH; + } else if (clazz == Duration.class) { + return TypeInfo.TYPE_INTERVAL_DAY_TO_SECOND; + } else if (UUID.class == clazz) { + return TypeInfo.TYPE_UUID; + } else if (clazz.isArray()) { + return TypeInfo.getTypeInfo(Value.ARRAY, Integer.MAX_VALUE, 0, classToType(clazz.getComponentType())); + } else if (Clob.class.isAssignableFrom(clazz) || Reader.class.isAssignableFrom(clazz)) { + return TypeInfo.TYPE_CLOB; + } else if (Blob.class.isAssignableFrom(clazz) || InputStream.class.isAssignableFrom(clazz)) { + return TypeInfo.TYPE_BLOB; + } else if (BigDecimal.class.isAssignableFrom(clazz)) { + return TypeInfo.TYPE_NUMERIC_FLOATING_POINT; + } else if (GEOMETRY_CLASS != null && GEOMETRY_CLASS.isAssignableFrom(clazz)) { + return TypeInfo.TYPE_GEOMETRY; + } else if (Array.class.isAssignableFrom(clazz)) { + return TypeInfo.TYPE_ARRAY_UNKNOWN; + } else if (ResultSet.class.isAssignableFrom(clazz)) { + return TypeInfo.TYPE_ROW_EMPTY; + } else { + TypeInfo t = LegacyDateTimeUtils.legacyClassToType(clazz); + if (t != null) { + return t; + } + return TypeInfo.TYPE_JAVA_OBJECT; + } + } + + /** + * Read a value from the given result set. + * + * @param session + * the session + * @param rs + * the result set + * @param columnIndex + * the column index (1-based) + * @param type + * the data type + * @return the value + */ + public static Value readValue(Session session, ResultSet rs, int columnIndex, int type) { + Value v; + if (rs instanceof JdbcResultSet) { + v = ValueToObjectConverter.readValue(session, (JdbcResultSet) rs, columnIndex); + } else { + try { + v = readValueOther(session, rs, columnIndex, type); + } catch (SQLException e) { + throw DbException.convert(e); + } + } + return v; + } + + private static Value readValueOther(Session session, ResultSet rs, int columnIndex, int type) + throws SQLException { + Value v; + switch (type) { + case Value.NULL: + v = ValueNull.INSTANCE; + break; + case Value.CHAR: { + String s = rs.getString(columnIndex); + v = (s == null) ? ValueNull.INSTANCE : ValueChar.get(s); + break; + } + case Value.VARCHAR: { + String s = rs.getString(columnIndex); + v = (s == null) ? ValueNull.INSTANCE : ValueVarchar.get(s, session); + break; + } + case Value.CLOB: { + if (session == null) { + String s = rs.getString(columnIndex); + v = s == null ? ValueNull.INSTANCE : ValueClob.createSmall(s); + } else { + Reader in = rs.getCharacterStream(columnIndex); + v = in == null ? ValueNull.INSTANCE + : session.addTemporaryLob( + session.getDataHandler().getLobStorage().createClob(new BufferedReader(in), -1)); + } + break; + } + case Value.VARCHAR_IGNORECASE: { + String s = rs.getString(columnIndex); + v = s == null ? ValueNull.INSTANCE : ValueVarcharIgnoreCase.get(s); + break; + } + case Value.BINARY: { + byte[] bytes = rs.getBytes(columnIndex); + v = bytes == null ? ValueNull.INSTANCE : ValueBinary.getNoCopy(bytes); + break; + } + case Value.VARBINARY: { + byte[] bytes = rs.getBytes(columnIndex); + v = bytes == null ? ValueNull.INSTANCE : ValueVarbinary.getNoCopy(bytes); + break; + } + case Value.BLOB: { + if (session == null) { + byte[] buff = rs.getBytes(columnIndex); + v = buff == null ? ValueNull.INSTANCE : ValueBlob.createSmall(buff); + } else { + InputStream in = rs.getBinaryStream(columnIndex); + v = in == null ? ValueNull.INSTANCE + : session.addTemporaryLob(session.getDataHandler().getLobStorage().createBlob(in, -1)); + } + break; + } + case Value.BOOLEAN: { + boolean value = rs.getBoolean(columnIndex); + v = rs.wasNull() ? ValueNull.INSTANCE : ValueBoolean.get(value); + break; + } + case Value.TINYINT: { + byte value = rs.getByte(columnIndex); + v = rs.wasNull() ? ValueNull.INSTANCE : ValueTinyint.get(value); + break; + } + case Value.SMALLINT: { + short value = rs.getShort(columnIndex); + v = rs.wasNull() ? ValueNull.INSTANCE : ValueSmallint.get(value); + break; + } + case Value.INTEGER: { + int value = rs.getInt(columnIndex); + v = rs.wasNull() ? ValueNull.INSTANCE : ValueInteger.get(value); + break; + } + case Value.BIGINT: { + long value = rs.getLong(columnIndex); + v = rs.wasNull() ? ValueNull.INSTANCE : ValueBigint.get(value); + break; + } + case Value.NUMERIC: { + BigDecimal value = rs.getBigDecimal(columnIndex); + v = value == null ? ValueNull.INSTANCE : ValueNumeric.getAnyScale(value); + break; + } + case Value.REAL: { + float value = rs.getFloat(columnIndex); + v = rs.wasNull() ? ValueNull.INSTANCE : ValueReal.get(value); + break; + } + case Value.DOUBLE: { + double value = rs.getDouble(columnIndex); + v = rs.wasNull() ? ValueNull.INSTANCE : ValueDouble.get(value); + break; + } + case Value.DECFLOAT: { + BigDecimal value = rs.getBigDecimal(columnIndex); + v = value == null ? ValueNull.INSTANCE : ValueDecfloat.get(value); + break; + } + case Value.DATE: { + try { + LocalDate value = rs.getObject(columnIndex, LocalDate.class); + v = value == null ? ValueNull.INSTANCE : JSR310Utils.localDateToValue(value); + break; + } catch (SQLException ignore) { + Date value = rs.getDate(columnIndex); + v = value == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromDate(session, null, value); + } + break; + } + case Value.TIME: { + try { + LocalTime value = rs.getObject(columnIndex, LocalTime.class); + v = value == null ? ValueNull.INSTANCE : JSR310Utils.localTimeToValue(value); + break; + } catch (SQLException ignore) { + Time value = rs.getTime(columnIndex); + v = value == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromTime(session, null, value); + } + break; + } + case Value.TIME_TZ: { + try { + OffsetTime value = rs.getObject(columnIndex, OffsetTime.class); + v = value == null ? ValueNull.INSTANCE : JSR310Utils.offsetTimeToValue(value); + break; + } catch (SQLException ignore) { + Object obj = rs.getObject(columnIndex); + if (obj == null) { + v = ValueNull.INSTANCE; + } else { + v = ValueTimeTimeZone.parse(obj.toString()); + } + } + break; + } + case Value.TIMESTAMP: { + try { + LocalDateTime value = rs.getObject(columnIndex, LocalDateTime.class); + v = value == null ? ValueNull.INSTANCE : JSR310Utils.localDateTimeToValue(value); + break; + } catch (SQLException ignore) { + Timestamp value = rs.getTimestamp(columnIndex); + v = value == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromTimestamp(session, null, value); + } + break; + } + case Value.TIMESTAMP_TZ: { + try { + OffsetDateTime value = rs.getObject(columnIndex, OffsetDateTime.class); + v = value == null ? ValueNull.INSTANCE : JSR310Utils.offsetDateTimeToValue(value); + break; + } catch (SQLException ignore) { + Object obj = rs.getObject(columnIndex); + if (obj == null) { + v = ValueNull.INSTANCE; + } else if (obj instanceof ZonedDateTime) { + v = JSR310Utils.zonedDateTimeToValue((ZonedDateTime) obj); + } else { + v = ValueTimestampTimeZone.parse(obj.toString(), session); + } + } + break; + } + case Value.INTERVAL_YEAR: + case Value.INTERVAL_MONTH: + case Value.INTERVAL_DAY: + case Value.INTERVAL_HOUR: + case Value.INTERVAL_MINUTE: + case Value.INTERVAL_SECOND: + case Value.INTERVAL_YEAR_TO_MONTH: + case Value.INTERVAL_DAY_TO_HOUR: + case Value.INTERVAL_DAY_TO_MINUTE: + case Value.INTERVAL_DAY_TO_SECOND: + case Value.INTERVAL_HOUR_TO_MINUTE: + case Value.INTERVAL_HOUR_TO_SECOND: + case Value.INTERVAL_MINUTE_TO_SECOND: { + String s = rs.getString(columnIndex); + v = s == null ? ValueNull.INSTANCE + : IntervalUtils.parseFormattedInterval(IntervalQualifier.valueOf(type - Value.INTERVAL_YEAR), s); + break; + } + case Value.JAVA_OBJECT: { + byte[] buff; + try { + buff = rs.getBytes(columnIndex); + } catch (SQLException ignore) { + try { + Object o = rs.getObject(columnIndex); + buff = o != null ? JdbcUtils.serialize(o, session.getJavaObjectSerializer()) : null; + } catch (Exception e) { + throw DbException.convert(e); + } + } + v = buff == null ? ValueNull.INSTANCE : ValueJavaObject.getNoCopy(buff); + break; + } + case Value.ENUM: { + int value = rs.getInt(columnIndex); + v = rs.wasNull() ? ValueNull.INSTANCE : ValueInteger.get(value); + break; + } + case Value.GEOMETRY: { + Object x = rs.getObject(columnIndex); + v = x == null ? ValueNull.INSTANCE : ValueGeometry.getFromGeometry(x); + break; + } + case Value.JSON: { + Object x = rs.getObject(columnIndex); + if (x == null) { + v = ValueNull.INSTANCE; + } else { + Class clazz = x.getClass(); + if (clazz == byte[].class) { + v = ValueJson.fromJson((byte[]) x); + } else if (clazz == String.class) { + v = ValueJson.fromJson((String) x); + } else { + v = ValueJson.fromJson(x.toString()); + } + } + break; + } + case Value.UUID: { + Object o = rs.getObject(columnIndex); + if (o == null) { + v = ValueNull.INSTANCE; + } else if (o instanceof UUID) { + v = ValueUuid.get((UUID) o); + } else if (o instanceof byte[]) { + v = ValueUuid.get((byte[]) o); + } else { + v = ValueUuid.get((String) o); + } + break; + } + case Value.ARRAY: { + Array array = rs.getArray(columnIndex); + if (array == null) { + v = ValueNull.INSTANCE; + } else { + Object[] list = (Object[]) array.getArray(); + if (list == null) { + v = ValueNull.INSTANCE; + } else { + int len = list.length; + Value[] values = new Value[len]; + for (int i = 0; i < len; i++) { + values[i] = ValueToObjectConverter.objectToValue(session, list[i], Value.NULL); + } + v = ValueArray.get(values, session); + } + } + break; + } + case Value.ROW: { + Object o = rs.getObject(columnIndex); + if (o == null) { + v = ValueNull.INSTANCE; + } else if (o instanceof ResultSet) { + v = ValueToObjectConverter.resultSetToValue(session, (ResultSet) o); + } else { + Object[] list = (Object[]) o; + int len = list.length; + Value[] values = new Value[len]; + for (int i = 0; i < len; i++) { + values[i] = ValueToObjectConverter.objectToValue(session, list[i], Value.NULL); + } + v = ValueRow.get(values); + } + break; + } + default: + throw DbException.getInternalError("data type " + type); + } + return v; + } + + private ValueToObjectConverter2() { + } + +} diff --git a/h2/src/main/org/h2/value/ValueUuid.java b/h2/src/main/org/h2/value/ValueUuid.java index 18e78c87f6..ca5fa3d73e 100644 --- a/h2/src/main/org/h2/value/ValueUuid.java +++ b/h2/src/main/org/h2/value/ValueUuid.java @@ -1,15 +1,14 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; -import java.sql.PreparedStatement; -import java.sql.SQLException; import java.util.UUID; import org.h2.api.ErrorCode; +import org.h2.engine.CastDataProvider; import org.h2.message.DbException; import org.h2.util.Bits; import org.h2.util.MathUtils; @@ -18,7 +17,7 @@ /** * Implementation of the UUID data type. */ -public class ValueUuid extends Value { +public final class ValueUuid extends Value { /** * The precision of this value in number of bytes. @@ -61,16 +60,15 @@ public static ValueUuid getNewRandom() { /** * Get or create a UUID for the given 16 bytes. * - * @param binary the byte array (must be at least 16 bytes long) + * @param binary the byte array * @return the UUID */ public static ValueUuid get(byte[] binary) { - if (binary.length < 16) { - return get(StringUtils.convertBytesToHex(binary)); + int length = binary.length; + if (length != 16) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, "UUID requires 16 bytes, got " + length); } - long high = Bits.readLong(binary, 0); - long low = Bits.readLong(binary, 8); - return (ValueUuid) Value.cache(new ValueUuid(high, low)); + return get(Bits.readLong(binary, 0), Bits.readLong(binary, 8)); } /** @@ -102,33 +100,36 @@ public static ValueUuid get(UUID uuid) { */ public static ValueUuid get(String s) { long low = 0, high = 0; - for (int i = 0, j = 0, length = s.length(); i < length; i++) { + int j = 0; + for (int i = 0, length = s.length(); i < length; i++) { char c = s.charAt(i); if (c >= '0' && c <= '9') { low = (low << 4) | (c - '0'); } else if (c >= 'a' && c <= 'f') { - low = (low << 4) | (c - 'a' + 0xa); + low = (low << 4) | (c - ('a' - 0xa)); } else if (c == '-') { continue; } else if (c >= 'A' && c <= 'F') { - low = (low << 4) | (c - 'A' + 0xa); + low = (low << 4) | (c - ('A' - 0xa)); } else if (c <= ' ') { continue; } else { throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, s); } - if (j++ == 15) { + if (++j == 16) { high = low; low = 0; } } - return (ValueUuid) Value.cache(new ValueUuid(high, low)); + if (j != 32) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, s); + } + return get(high, low); } @Override - public StringBuilder getSQL(StringBuilder builder) { - builder.append('\''); - return addString(builder).append('\''); + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return addString(builder.append("UUID '")).append('\''); } @Override @@ -151,6 +152,11 @@ public String getString() { return addString(new StringBuilder(36)).toString(); } + @Override + public byte[] getBytes() { + return Bits.uuidToBytes(high, low); + } + private StringBuilder addString(StringBuilder builder) { StringUtils.appendHex(builder, high >> 32, 4).append('-'); StringUtils.appendHex(builder, high >> 16, 2).append('-'); @@ -160,26 +166,13 @@ private StringBuilder addString(StringBuilder builder) { } @Override - public int compareTypeSafe(Value o, CompareMode mode) { + public int compareTypeSafe(Value o, CompareMode mode, CastDataProvider provider) { if (o == this) { return 0; } ValueUuid v = (ValueUuid) o; - long v1 = high, v2 = v.high; - if (v1 == v2) { - v1 = low; - v2 = v.low; - if (mode.isUuidUnsigned()) { - v1 += Long.MIN_VALUE; - v2 += Long.MIN_VALUE; - } - return Long.compare(v1, v2); - } - if (mode.isUuidUnsigned()) { - v1 += Long.MIN_VALUE; - v2 += Long.MIN_VALUE; - } - return v1 > v2 ? 1 : -1; + int cmp = Long.compareUnsigned(high, v.high); + return cmp != 0 ? cmp : Long.compareUnsigned(low, v.low); } @Override @@ -191,22 +184,15 @@ public boolean equals(Object other) { return high == v.high && low == v.low; } - @Override - public Object getObject() { + /** + * Returns the UUID. + * + * @return the UUID + */ + public UUID getUuid() { return new UUID(high, low); } - @Override - public byte[] getBytes() { - return Bits.uuidToBytes(high, low); - } - - @Override - public void set(PreparedStatement prep, int parameterIndex) - throws SQLException { - prep.setBytes(parameterIndex, getBytes()); - } - /** * Get the most significant 64 bits of this UUID. * @@ -225,4 +211,14 @@ public long getLow() { return low; } + @Override + public long charLength() { + return DISPLAY_SIZE; + } + + @Override + public long octetLength() { + return PRECISION; + } + } diff --git a/h2/src/main/org/h2/value/ValueVarbinary.java b/h2/src/main/org/h2/value/ValueVarbinary.java new file mode 100644 index 0000000000..b0d5344432 --- /dev/null +++ b/h2/src/main/org/h2/value/ValueVarbinary.java @@ -0,0 +1,92 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +import java.nio.charset.StandardCharsets; +import org.h2.engine.Constants; +import org.h2.engine.SysProperties; +import org.h2.message.DbException; +import org.h2.util.StringUtils; +import org.h2.util.Utils; + +/** + * Implementation of the BINARY VARYING data type. + */ +public final class ValueVarbinary extends ValueBytesBase { + + /** + * Empty value. + */ + public static final ValueVarbinary EMPTY = new ValueVarbinary(Utils.EMPTY_BYTES); + + /** + * Associated TypeInfo. + */ + private TypeInfo type; + + protected ValueVarbinary(byte[] value) { + super(value); + int length = value.length; + if (length > Constants.MAX_STRING_LENGTH) { + throw DbException.getValueTooLongException(getTypeName(getValueType()), + StringUtils.convertBytesToHex(value, 41), length); + } + } + + /** + * Get or create a VARBINARY value for the given byte array. + * Clone the data. + * + * @param b the byte array + * @return the value + */ + public static ValueVarbinary get(byte[] b) { + if (b.length == 0) { + return EMPTY; + } + b = Utils.cloneByteArray(b); + return getNoCopy(b); + } + + /** + * Get or create a VARBINARY value for the given byte array. + * Do not clone the date. + * + * @param b the byte array + * @return the value + */ + public static ValueVarbinary getNoCopy(byte[] b) { + if (b.length == 0) { + return EMPTY; + } + ValueVarbinary obj = new ValueVarbinary(b); + if (b.length > SysProperties.OBJECT_CACHE_MAX_PER_ELEMENT_SIZE) { + return obj; + } + return (ValueVarbinary) Value.cache(obj); + } + + @Override + public TypeInfo getType() { + TypeInfo type = this.type; + if (type == null) { + long precision = value.length; + this.type = type = new TypeInfo(VARBINARY, precision, 0, null); + } + return type; + } + + @Override + public int getValueType() { + return VARBINARY; + } + + @Override + public String getString() { + return new String(value, StandardCharsets.UTF_8); + } + +} diff --git a/h2/src/main/org/h2/value/ValueVarchar.java b/h2/src/main/org/h2/value/ValueVarchar.java new file mode 100644 index 0000000000..381dfa7b24 --- /dev/null +++ b/h2/src/main/org/h2/value/ValueVarchar.java @@ -0,0 +1,67 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +import org.h2.engine.CastDataProvider; +import org.h2.engine.SysProperties; +import org.h2.util.StringUtils; + +/** + * Implementation of the CHARACTER VARYING data type. + */ +public final class ValueVarchar extends ValueStringBase { + + /** + * Empty string. Should not be used in places where empty string can be + * treated as {@code NULL} depending on database mode. + */ + public static final ValueVarchar EMPTY = new ValueVarchar(""); + + private ValueVarchar(String value) { + super(value); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return StringUtils.quoteStringSQL(builder, value); + } + + @Override + public int getValueType() { + return VARCHAR; + } + + /** + * Get or create a VARCHAR value for the given string. + * + * @param s the string + * @return the value + */ + public static Value get(String s) { + return get(s, null); + } + + /** + * Get or create a VARCHAR value for the given string. + * + * @param s the string + * @param provider the cast information provider, or {@code null} + * @return the value + */ + public static Value get(String s, CastDataProvider provider) { + if (s.isEmpty()) { + return provider != null && provider.getMode().treatEmptyStringsAsNull ? ValueNull.INSTANCE : EMPTY; + } + ValueVarchar obj = new ValueVarchar(StringUtils.cache(s)); + if (s.length() > SysProperties.OBJECT_CACHE_MAX_PER_ELEMENT_SIZE) { + return obj; + } + return Value.cache(obj); + // this saves memory, but is really slow + // return new ValueString(s.intern()); + } + +} diff --git a/h2/src/main/org/h2/value/ValueVarcharIgnoreCase.java b/h2/src/main/org/h2/value/ValueVarcharIgnoreCase.java new file mode 100644 index 0000000000..7b8a032dc7 --- /dev/null +++ b/h2/src/main/org/h2/value/ValueVarcharIgnoreCase.java @@ -0,0 +1,87 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value; + +import org.h2.engine.CastDataProvider; +import org.h2.engine.SysProperties; +import org.h2.util.StringUtils; + +/** + * Implementation of the VARCHAR_IGNORECASE data type. + */ +public final class ValueVarcharIgnoreCase extends ValueStringBase { + + private static final ValueVarcharIgnoreCase EMPTY = new ValueVarcharIgnoreCase(""); + + /** + * The hash code. + */ + private int hash; + + private ValueVarcharIgnoreCase(String value) { + super(value); + } + + @Override + public int getValueType() { + return VARCHAR_IGNORECASE; + } + + @Override + public int compareTypeSafe(Value v, CompareMode mode, CastDataProvider provider) { + return mode.compareString(value, ((ValueStringBase) v).value, true); + } + + @Override + public boolean equals(Object other) { + return other instanceof ValueVarcharIgnoreCase + && value.equalsIgnoreCase(((ValueVarcharIgnoreCase) other).value); + } + + @Override + public int hashCode() { + if (hash == 0) { + // this is locale sensitive + hash = value.toUpperCase().hashCode(); + } + return hash; + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + if ((sqlFlags & NO_CASTS) == 0) { + return StringUtils.quoteStringSQL(builder.append("CAST("), value).append(" AS VARCHAR_IGNORECASE(") + .append(value.length()).append("))"); + } + return StringUtils.quoteStringSQL(builder, value); + } + + /** + * Get or create a VARCHAR_IGNORECASE value for the given string. + * The value will have the same case as the passed string. + * + * @param s the string + * @return the value + */ + public static ValueVarcharIgnoreCase get(String s) { + int length = s.length(); + if (length == 0) { + return EMPTY; + } + ValueVarcharIgnoreCase obj = new ValueVarcharIgnoreCase(StringUtils.cache(s)); + if (length > SysProperties.OBJECT_CACHE_MAX_PER_ELEMENT_SIZE) { + return obj; + } + ValueVarcharIgnoreCase cache = (ValueVarcharIgnoreCase) Value.cache(obj); + // the cached object could have the wrong case + // (it would still be 'equal', but we don't like to store it) + if (cache.value.equals(s)) { + return cache; + } + return obj; + } + +} diff --git a/h2/src/main/org/h2/value/VersionedValue.java b/h2/src/main/org/h2/value/VersionedValue.java index fde0e572b7..be9aceb92a 100644 --- a/h2/src/main/org/h2/value/VersionedValue.java +++ b/h2/src/main/org/h2/value/VersionedValue.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.value; @@ -11,12 +11,7 @@ * Also for uncommitted values it contains operationId - a combination of * transactionId and logId. */ -public class VersionedValue { - - /** - * Used when we don't care about a VersionedValue instance. - */ - public static final VersionedValue DUMMY = new VersionedValue(); +public class VersionedValue { protected VersionedValue() {} @@ -28,12 +23,14 @@ public long getOperationId() { return 0L; } - public Object getCurrentValue() { - return this; + @SuppressWarnings("unchecked") + public T getCurrentValue() { + return (T)this; } - public Object getCommittedValue() { - return this; + @SuppressWarnings("unchecked") + public T getCommittedValue() { + return (T)this; } } diff --git a/h2/src/main/org/h2/value/lob/LobData.java b/h2/src/main/org/h2/value/lob/LobData.java new file mode 100644 index 0000000000..c0fe51f59e --- /dev/null +++ b/h2/src/main/org/h2/value/lob/LobData.java @@ -0,0 +1,53 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value.lob; + +import java.io.InputStream; + +import org.h2.store.DataHandler; +import org.h2.value.ValueLob; + +/** + * LOB data. + */ +public abstract class LobData { + + LobData() { + } + + /** + * Get stream to read LOB data from + * @param precision octet length of the stream, or -1 if unknown + * @return stream to read LOB data from + */ + public abstract InputStream getInputStream(long precision); + + public DataHandler getDataHandler() { + return null; + } + + public boolean isLinkedToTable() { + return false; + } + + /** + * Remove the underlying resource, if any. For values that are kept fully in + * memory this method has no effect. + * @param value to remove + */ + public void remove(ValueLob value) { + } + + /** + * Get the memory used by this object. + * + * @return the memory used in bytes + */ + public int getMemory() { + return 140; + } + +} diff --git a/h2/src/main/org/h2/value/lob/LobDataDatabase.java b/h2/src/main/org/h2/value/lob/LobDataDatabase.java new file mode 100644 index 0000000000..648fad12a3 --- /dev/null +++ b/h2/src/main/org/h2/value/lob/LobDataDatabase.java @@ -0,0 +1,99 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value.lob; + +import java.io.IOException; +import java.io.InputStream; + +import org.h2.message.DbException; +import org.h2.store.DataHandler; +import org.h2.value.ValueLob; + +/** + * LOB data stored in database. + */ +public final class LobDataDatabase extends LobData { + + private DataHandler handler; + + /** + * If the LOB is managed by the one the LobStorageBackend classes, these are + * the unique key inside that storage. + */ + private final int tableId; + + private final long lobId; + + /** + * Fix for recovery tool. + */ + private boolean isRecoveryReference; + + public LobDataDatabase(DataHandler handler, int tableId, long lobId) { + this.handler = handler; + this.tableId = tableId; + this.lobId = lobId; + } + + @Override + public void remove(ValueLob value) { + if (handler != null) { + handler.getLobStorage().removeLob(value); + } + } + + /** + * Check if this value is linked to a specific table. For values that are + * kept fully in memory, this method returns false. + * + * @return true if it is + */ + @Override + public boolean isLinkedToTable() { + return tableId >= 0; + } + + /** + * Get the current table id of this lob. + * + * @return the table id + */ + public int getTableId() { + return tableId; + } + + public long getLobId() { + return lobId; + } + + @Override + public InputStream getInputStream(long precision) { + try { + return handler.getLobStorage().getInputStream(lobId, tableId, precision); + } catch (IOException e) { + throw DbException.convertIOException(e, toString()); + } + } + + @Override + public DataHandler getDataHandler() { + return handler; + } + + @Override + public String toString() { + return "lob-table: table: " + tableId + " id: " + lobId; + } + + public void setRecoveryReference(boolean isRecoveryReference) { + this.isRecoveryReference = isRecoveryReference; + } + + public boolean isRecoveryReference() { + return isRecoveryReference; + } + +} diff --git a/h2/src/main/org/h2/value/lob/LobDataFetchOnDemand.java b/h2/src/main/org/h2/value/lob/LobDataFetchOnDemand.java new file mode 100644 index 0000000000..4b3f50c218 --- /dev/null +++ b/h2/src/main/org/h2/value/lob/LobDataFetchOnDemand.java @@ -0,0 +1,84 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value.lob; + +import java.io.BufferedInputStream; +import java.io.InputStream; + +import org.h2.engine.SessionRemote; +import org.h2.store.DataHandler; +import org.h2.store.LobStorageRemoteInputStream; + +/** + * A implementation of the LOB data used on the client side of a remote H2 + * connection. Fetches the underlying on data from the server. + */ +public final class LobDataFetchOnDemand extends LobData { + + private SessionRemote handler; + + /** + * If the LOB is managed by the one the LobStorageBackend classes, these are + * the unique key inside that storage. + */ + private final int tableId; + + private final long lobId; + + /** + * If this is a client-side ValueLobDb object returned by a ResultSet, the + * hmac acts a security cookie that the client can send back to the server + * to ask for data related to this LOB. + */ + protected final byte[] hmac; + + public LobDataFetchOnDemand(DataHandler handler, int tableId, long lobId, byte[] hmac) { + this.hmac = hmac; + this.handler = (SessionRemote) handler; + this.tableId = tableId; + this.lobId = lobId; + } + + /** + * Check if this value is linked to a specific table. For values that are + * kept fully in memory, this method returns false. + * + * @return true if it is + */ + @Override + public boolean isLinkedToTable() { + throw new IllegalStateException(); + } + + /** + * Get the current table id of this lob. + * + * @return the table id + */ + public int getTableId() { + return tableId; + } + + public long getLobId() { + return lobId; + } + + @Override + public InputStream getInputStream(long precision) { + return new BufferedInputStream(new LobStorageRemoteInputStream(handler, lobId, hmac)); + } + + @Override + public DataHandler getDataHandler() { + return handler; + } + + @Override + public String toString() { + return "lob-table: table: " + tableId + " id: " + lobId; + } + +} diff --git a/h2/src/main/org/h2/value/lob/LobDataFile.java b/h2/src/main/org/h2/value/lob/LobDataFile.java new file mode 100644 index 0000000000..2df7b30a5c --- /dev/null +++ b/h2/src/main/org/h2/value/lob/LobDataFile.java @@ -0,0 +1,72 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value.lob; + +import java.io.BufferedInputStream; +import java.io.InputStream; + +import org.h2.engine.Constants; +import org.h2.engine.SysProperties; +import org.h2.store.DataHandler; +import org.h2.store.FileStore; +import org.h2.store.FileStoreInputStream; +import org.h2.store.fs.FileUtils; +import org.h2.value.ValueLob; + +/** + * LOB data stored in a temporary file. + */ +public final class LobDataFile extends LobData { + + private DataHandler handler; + + /** + * If the LOB is a temporary LOB being managed by a temporary ResultSet, it + * is stored in a temporary file. + */ + private final String fileName; + + private final FileStore tempFile; + + public LobDataFile(DataHandler handler, String fileName, FileStore tempFile) { + this.handler = handler; + this.fileName = fileName; + this.tempFile = tempFile; + } + + @Override + public void remove(ValueLob value) { + if (fileName != null) { + if (tempFile != null) { + tempFile.stopAutoDelete(); + } + // synchronize on the database, to avoid concurrent temp file + // creation / deletion / backup + synchronized (handler.getLobSyncObject()) { + FileUtils.delete(fileName); + } + } + } + + @Override + public InputStream getInputStream(long precision) { + FileStore store = handler.openFile(fileName, "r", true); + boolean alwaysClose = SysProperties.lobCloseBetweenReads; + return new BufferedInputStream(new FileStoreInputStream(store, false, alwaysClose), + Constants.IO_BUFFER_SIZE); + } + + @Override + public DataHandler getDataHandler() { + return handler; + } + + @Override + public String toString() { + return "lob-file: " + fileName; + } + +} diff --git a/h2/src/main/org/h2/value/lob/LobDataInMemory.java b/h2/src/main/org/h2/value/lob/LobDataInMemory.java new file mode 100644 index 0000000000..896c46932b --- /dev/null +++ b/h2/src/main/org/h2/value/lob/LobDataInMemory.java @@ -0,0 +1,51 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.value.lob; + +import java.io.ByteArrayInputStream; +import java.io.InputStream; + +/** + * LOB data stored in memory. + */ +public final class LobDataInMemory extends LobData { + + /** + * If the LOB is below the inline size, we just store/load it directly here. + */ + private final byte[] small; + + public LobDataInMemory(byte[] small) { + if (small == null) { + throw new IllegalStateException(); + } + this.small = small; + } + + @Override + public InputStream getInputStream(long precision) { + return new ByteArrayInputStream(small); + } + + /** + * Get the data if this a small lob value. + * + * @return the data + */ + public byte[] getSmall() { + return small; + } + + @Override + public int getMemory() { + /* + * Java 11 with -XX:-UseCompressedOops 0 bytes: 120 bytes 1 byte: 128 + * bytes + */ + return small.length + 127; + } + +} diff --git a/h2/src/main/org/h2/value/lob/package.html b/h2/src/main/org/h2/value/lob/package.html new file mode 100644 index 0000000000..6a43263746 --- /dev/null +++ b/h2/src/main/org/h2/value/lob/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

          + +LOB data for values. + +

          \ No newline at end of file diff --git a/h2/src/main/org/h2/value/package.html b/h2/src/main/org/h2/value/package.html index 4ac42fff5b..00897ffe93 100644 --- a/h2/src/main/org/h2/value/package.html +++ b/h2/src/main/org/h2/value/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/samples/CachedPreparedStatements.java b/h2/src/test/org/h2/samples/CachedPreparedStatements.java index b4f35ce7dc..0b9cec6727 100644 --- a/h2/src/test/org/h2/samples/CachedPreparedStatements.java +++ b/h2/src/test/org/h2/samples/CachedPreparedStatements.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; @@ -26,6 +26,7 @@ public class CachedPreparedStatements { * command line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { new CachedPreparedStatements().run(); diff --git a/h2/src/test/org/h2/samples/Compact.java b/h2/src/test/org/h2/samples/Compact.java index 98bda8279c..6ed07a3305 100644 --- a/h2/src/test/org/h2/samples/Compact.java +++ b/h2/src/test/org/h2/samples/Compact.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; @@ -11,9 +11,9 @@ import java.sql.Statement; import org.h2.store.fs.FileUtils; -import org.h2.tools.Script; import org.h2.tools.DeleteDbFiles; import org.h2.tools.RunScript; +import org.h2.tools.Script; /** * This sample application shows how to compact the database files. @@ -27,6 +27,7 @@ public class Compact { * command line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { DeleteDbFiles.execute("./data", "test", true); @@ -49,6 +50,7 @@ public static void main(String... args) throws Exception { * @param dbName the database name * @param user the user name * @param password the password + * @throws SQLException on failure */ public static void compact(String dir, String dbName, String user, String password) throws SQLException { diff --git a/h2/src/test/org/h2/samples/CreateScriptFile.java b/h2/src/test/org/h2/samples/CreateScriptFile.java index 1790d40c81..daef2e653d 100644 --- a/h2/src/test/org/h2/samples/CreateScriptFile.java +++ b/h2/src/test/org/h2/samples/CreateScriptFile.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; @@ -37,6 +37,7 @@ public class CreateScriptFile { * command line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { @@ -99,6 +100,7 @@ public static void main(String... args) throws Exception { * @param password the encryption password * @param charset the character set (for example UTF-8) * @return the print writer + * @throws IOException on failure */ public static PrintWriter openScriptWriter(String fileName, String compressionAlgorithm, @@ -111,7 +113,7 @@ public static PrintWriter openScriptWriter(String fileName, FileUtils.delete(fileName); FileStore store = FileStore.open(null, fileName, "rw", cipher, key); store.init(); - out = new FileStoreOutputStream(store, null, compressionAlgorithm); + out = new FileStoreOutputStream(store, compressionAlgorithm); out = new BufferedOutputStream(out, Constants.IO_BUFFER_SIZE_COMPRESS); } else { out = FileUtils.newOutputStream(fileName, false); @@ -134,6 +136,7 @@ public static PrintWriter openScriptWriter(String fileName, * @param password the encryption password * @param charset the character set (for example UTF-8) * @return the script reader + * @throws IOException on failure */ public static LineNumberReader openScriptReader(String fileName, String compressionAlgorithm, @@ -145,8 +148,7 @@ public static LineNumberReader openScriptReader(String fileName, byte[] key = SHA256.getKeyPasswordHash("script", password.toCharArray()); FileStore store = FileStore.open(null, fileName, "rw", cipher, key); store.init(); - in = new FileStoreInputStream(store, null, - compressionAlgorithm != null, false); + in = new FileStoreInputStream(store, compressionAlgorithm != null, false); in = new BufferedInputStream(in, Constants.IO_BUFFER_SIZE_COMPRESS); } else { in = FileUtils.newInputStream(fileName); diff --git a/h2/src/test/org/h2/samples/CsvSample.java b/h2/src/test/org/h2/samples/CsvSample.java index aad209d070..2b73041007 100644 --- a/h2/src/test/org/h2/samples/CsvSample.java +++ b/h2/src/test/org/h2/samples/CsvSample.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; @@ -27,6 +27,7 @@ public class CsvSample { * command line. * * @param args the command line parameters + * @throws SQLException on failure */ public static void main(String... args) throws SQLException { CsvSample.write(); diff --git a/h2/src/test/org/h2/samples/DirectInsert.java b/h2/src/test/org/h2/samples/DirectInsert.java index dceefafc3e..825356a2f5 100644 --- a/h2/src/test/org/h2/samples/DirectInsert.java +++ b/h2/src/test/org/h2/samples/DirectInsert.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; @@ -25,6 +25,7 @@ public class DirectInsert { * command line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { Class.forName("org.h2.Driver"); diff --git a/h2/src/test/org/h2/samples/FileFunctions.java b/h2/src/test/org/h2/samples/FileFunctions.java index 0da1afd10a..985a3138a9 100644 --- a/h2/src/test/org/h2/samples/FileFunctions.java +++ b/h2/src/test/org/h2/samples/FileFunctions.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; @@ -23,17 +23,16 @@ public class FileFunctions { * command line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { Class.forName("org.h2.Driver"); Connection conn = DriverManager.getConnection("jdbc:h2:mem:", "sa", ""); Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS READ_TEXT_FILE " + - "FOR \"org.h2.samples.FileFunctions.readTextFile\" "); + stat.execute("CREATE ALIAS READ_TEXT_FILE FOR 'org.h2.samples.FileFunctions.readTextFile'"); stat.execute("CREATE ALIAS READ_TEXT_FILE_WITH_ENCODING " + - "FOR \"org.h2.samples.FileFunctions.readTextFileWithEncoding\" "); - stat.execute("CREATE ALIAS READ_FILE " + - "FOR \"org.h2.samples.FileFunctions.readFile\" "); + "FOR 'org.h2.samples.FileFunctions.readTextFileWithEncoding'"); + stat.execute("CREATE ALIAS READ_FILE FOR 'org.h2.samples.FileFunctions.readFile'"); ResultSet rs = stat.executeQuery("CALL READ_FILE('test.txt')"); rs.next(); byte[] data = rs.getBytes(1); @@ -52,6 +51,7 @@ public static void main(String... args) throws Exception { * * @param fileName the file name * @return the text + * @throws IOException on failure */ public static String readTextFile(String fileName) throws IOException { byte[] buff = readFile(fileName); @@ -65,6 +65,7 @@ public static String readTextFile(String fileName) throws IOException { * @param fileName the file name * @param encoding the encoding * @return the text + * @throws IOException on failure */ public static String readTextFileWithEncoding(String fileName, String encoding) throws IOException { @@ -78,6 +79,7 @@ public static String readTextFileWithEncoding(String fileName, * * @param fileName the file name * @return the byte array + * @throws IOException on failure */ public static byte[] readFile(String fileName) throws IOException { try (RandomAccessFile file = new RandomAccessFile(fileName, "r")) { diff --git a/h2/src/test/org/h2/samples/Function.java b/h2/src/test/org/h2/samples/Function.java index 6ffb5d133c..cf084b6f04 100644 --- a/h2/src/test/org/h2/samples/Function.java +++ b/h2/src/test/org/h2/samples/Function.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; @@ -26,6 +26,7 @@ public class Function { * command line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { Class.forName("org.h2.Driver"); @@ -34,8 +35,7 @@ public static void main(String... args) throws Exception { Statement stat = conn.createStatement(); // Using a custom Java function - stat.execute("CREATE ALIAS IS_PRIME " + - "FOR \"org.h2.samples.Function.isPrime\" "); + stat.execute("CREATE ALIAS IS_PRIME FOR 'org.h2.samples.Function.isPrime'"); ResultSet rs; rs = stat.executeQuery("SELECT IS_PRIME(X), X " + "FROM SYSTEM_RANGE(1, 20) ORDER BY X"); @@ -64,8 +64,7 @@ public static void main(String... args) throws Exception { rs.close(); // Using a custom function like table - stat.execute("CREATE ALIAS MATRIX " + - "FOR \"org.h2.samples.Function.getMatrix\" "); + stat.execute("CREATE ALIAS MATRIX FOR 'org.h2.samples.Function.getMatrix'"); prep = conn.prepareStatement("SELECT * FROM MATRIX(?) " + "ORDER BY X, Y"); prep.setInt(1, 2); @@ -111,6 +110,7 @@ public static boolean isPrime(int value) { * @param conn the connection * @param sql the SQL statement * @return the result set + * @throws SQLException on failure */ public static ResultSet query(Connection conn, String sql) throws SQLException { return conn.createStatement().executeQuery(sql); @@ -135,6 +135,7 @@ public static ResultSet simpleResultSet() { * @param conn the connection * @param size the number of x and y values * @return the result set with two columns + * @throws SQLException on failure */ public static ResultSet getMatrix(Connection conn, Integer size) throws SQLException { diff --git a/h2/src/test/org/h2/samples/FunctionMultiReturn.java b/h2/src/test/org/h2/samples/FunctionMultiReturn.java index c2e9bb7508..197341d996 100644 --- a/h2/src/test/org/h2/samples/FunctionMultiReturn.java +++ b/h2/src/test/org/h2/samples/FunctionMultiReturn.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; @@ -28,14 +28,14 @@ public class FunctionMultiReturn { * command line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { Class.forName("org.h2.Driver"); Connection conn = DriverManager.getConnection( "jdbc:h2:mem:", "sa", ""); Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS P2C " + - "FOR \"org.h2.samples.FunctionMultiReturn.polar2Cartesian\" "); + stat.execute("CREATE ALIAS P2C FOR 'org.h2.samples.FunctionMultiReturn.polar2Cartesian'"); PreparedStatement prep = conn.prepareStatement( "SELECT X, Y FROM P2C(?, ?)"); prep.setDouble(1, 5.0); @@ -49,8 +49,7 @@ public static void main(String... args) throws Exception { stat.execute("CREATE TABLE TEST(ID IDENTITY, R DOUBLE, A DOUBLE)"); stat.execute("INSERT INTO TEST(R, A) VALUES(5.0, 0.5), (10.0, 0.6)"); - stat.execute("CREATE ALIAS P2C_SET " + - "FOR \"org.h2.samples.FunctionMultiReturn.polar2CartesianSet\" "); + stat.execute("CREATE ALIAS P2C_SET FOR 'org.h2.samples.FunctionMultiReturn.polar2CartesianSet'"); rs = conn.createStatement().executeQuery( "SELECT * FROM P2C_SET('SELECT * FROM TEST')"); while (rs.next()) { @@ -62,8 +61,7 @@ public static void main(String... args) throws Exception { " (x=" + x + ", y="+y+")"); } - stat.execute("CREATE ALIAS P2C_A " + - "FOR \"org.h2.samples.FunctionMultiReturn.polar2CartesianArray\" "); + stat.execute("CREATE ALIAS P2C_A FOR 'org.h2.samples.FunctionMultiReturn.polar2CartesianArray'"); rs = conn.createStatement().executeQuery( "SELECT R, A, P2C_A(R, A) FROM TEST"); while (rs.next()) { @@ -124,10 +122,10 @@ public static ResultSet polar2Cartesian(Double r, Double alpha) { * @param alpha the angle * @return an array two values: x and y */ - public static Object[] polar2CartesianArray(Double r, Double alpha) { + public static Double[] polar2CartesianArray(Double r, Double alpha) { double x = r * Math.cos(alpha); double y = r * Math.sin(alpha); - return new Object[]{x, y}; + return new Double[]{x, y}; } /** @@ -138,6 +136,7 @@ public static Object[] polar2CartesianArray(Double r, Double alpha) { * @param conn the connection * @param query the query * @return a result set with the coordinates + * @throws SQLException on failure */ public static ResultSet polar2CartesianSet(Connection conn, String query) throws SQLException { diff --git a/h2/src/test/org/h2/samples/HelloWorld.java b/h2/src/test/org/h2/samples/HelloWorld.java index 991f29b941..8353d2bd87 100644 --- a/h2/src/test/org/h2/samples/HelloWorld.java +++ b/h2/src/test/org/h2/samples/HelloWorld.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; @@ -21,6 +21,7 @@ public class HelloWorld { * Called when ran from command line. * * @param args ignored + * @throws Exception on failure */ public static void main(String... args) throws Exception { // delete the database named 'test' in the user home directory diff --git a/h2/src/test/org/h2/samples/InitDatabaseFromJar.java b/h2/src/test/org/h2/samples/InitDatabaseFromJar.java index ad785195dd..dccf2aabd2 100644 --- a/h2/src/test/org/h2/samples/InitDatabaseFromJar.java +++ b/h2/src/test/org/h2/samples/InitDatabaseFromJar.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; @@ -25,6 +25,7 @@ public class InitDatabaseFromJar { * command line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { createScript(); diff --git a/h2/src/test/org/h2/samples/MixedMode.java b/h2/src/test/org/h2/samples/MixedMode.java index d862a080e1..a960191569 100644 --- a/h2/src/test/org/h2/samples/MixedMode.java +++ b/h2/src/test/org/h2/samples/MixedMode.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; @@ -23,6 +23,7 @@ public class MixedMode { * command line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { @@ -50,7 +51,7 @@ public static void main(String... args) throws Exception { try { while (true) { // runs forever, except if you drop the table remotely - stat.execute("MERGE INTO TIMER VALUES(1, NOW())"); + stat.execute("MERGE INTO TIMER VALUES(1, LOCALTIME)"); Thread.sleep(1000); } } catch (SQLException e) { diff --git a/h2/src/test/org/h2/samples/Newsfeed.java b/h2/src/test/org/h2/samples/Newsfeed.java index fd6d225a1a..b7602cfe72 100644 --- a/h2/src/test/org/h2/samples/Newsfeed.java +++ b/h2/src/test/org/h2/samples/Newsfeed.java @@ -1,17 +1,16 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; -import java.io.File; -import java.io.FileOutputStream; import java.io.InputStream; import java.io.InputStreamReader; -import java.io.OutputStreamWriter; -import java.io.Writer; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; @@ -31,9 +30,10 @@ public class Newsfeed { * command line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { - String targetDir = args.length == 0 ? "." : args[0]; + Path targetDir = Paths.get(args.length == 0 ? "." : args[0]); Class.forName("org.h2.Driver"); Connection conn = DriverManager.getConnection("jdbc:h2:mem:", "sa", ""); InputStream in = Newsfeed.class.getResourceAsStream("newsfeed.sql"); @@ -45,12 +45,8 @@ public static void main(String... args) throws Exception { if (file.endsWith(".txt")) { content = convertHtml2Text(content); } - new File(targetDir).mkdirs(); - FileOutputStream out = new FileOutputStream(targetDir + "/" + file); - Writer writer = new OutputStreamWriter(out, StandardCharsets.UTF_8); - writer.write(content); - writer.close(); - out.close(); + Files.createDirectories(targetDir); + Files.write(targetDir.resolve(file), content.getBytes(StandardCharsets.UTF_8)); } conn.close(); } diff --git a/h2/src/test/org/h2/samples/ReadOnlyDatabaseInZip.java b/h2/src/test/org/h2/samples/ReadOnlyDatabaseInZip.java index 9d1f1dd247..04b9a643da 100644 --- a/h2/src/test/org/h2/samples/ReadOnlyDatabaseInZip.java +++ b/h2/src/test/org/h2/samples/ReadOnlyDatabaseInZip.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; @@ -25,6 +25,7 @@ public class ReadOnlyDatabaseInZip { * command line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { diff --git a/h2/src/test/org/h2/samples/RowAccessRights.java b/h2/src/test/org/h2/samples/RowAccessRights.java index e85e9ff785..dc75457044 100644 --- a/h2/src/test/org/h2/samples/RowAccessRights.java +++ b/h2/src/test/org/h2/samples/RowAccessRights.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; @@ -26,6 +26,7 @@ public class RowAccessRights extends TriggerAdapter { * Called when ran from command line. * * @param args ignored + * @throws Exception on failure */ public static void main(String... args) throws Exception { DeleteDbFiles.execute("~", "test", true); @@ -36,11 +37,11 @@ public static void main(String... args) throws Exception { Statement stat = conn.createStatement(); stat.execute("create table test_data(" + - "id int, user varchar, data varchar, primary key(id, user))"); - stat.execute("create index on test_data(id, user)"); + "id int, `user` varchar, data varchar, primary key(id, `user`))"); + stat.execute("create index on test_data(id, `user`)"); stat.execute("create view test as select id, data " + - "from test_data where user = user()"); + "from test_data where `user` = user"); stat.execute("create trigger t_test instead of " + "insert, update, delete on test for each row " + "call \"" + RowAccessRights.class.getName() + "\""); @@ -92,7 +93,7 @@ public static void main(String... args) throws Exception { public void init(Connection conn, String schemaName, String triggerName, String tableName, boolean before, int type) throws SQLException { prepDelete = conn.prepareStatement( - "delete from test_data where id = ? and user = ?"); + "delete from test_data where id = ? and `user` = ?"); prepInsert = conn.prepareStatement( "insert into test_data values(?, ?, ?)"); super.init(conn, schemaName, triggerName, tableName, before, type); diff --git a/h2/src/test/org/h2/samples/SQLInjection.java b/h2/src/test/org/h2/samples/SQLInjection.java index 3d32187b44..6ad81ec4bf 100644 --- a/h2/src/test/org/h2/samples/SQLInjection.java +++ b/h2/src/test/org/h2/samples/SQLInjection.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; @@ -30,18 +30,19 @@ public class SQLInjection { * command line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { new SQLInjection().run("org.h2.Driver", - "jdbc:h2:test", "sa", "sa"); + "jdbc:h2:./test", "sa", "sa"); // new SQLInjection().run("org.postgresql.Driver", // "jdbc:postgresql:jpox2", "sa", "sa"); -// new SQLInjection().run("com.mysql.jdbc.Driver", +// new SQLInjection().run("com.mysql.cj.jdbc.Driver", // "jdbc:mysql://localhost/test", "sa", "sa"); // new SQLInjection().run("org.hsqldb.jdbcDriver", // "jdbc:hsqldb:test", "sa", ""); // new SQLInjection().run( -// "org.apache.derby.jdbc.EmbeddedDriver", +// "org.apache.derby.iapi.jdbc.AutoloadedDriver", // "jdbc:derby:test3;create=true", "sa", "sa"); } @@ -146,6 +147,7 @@ void loginByNameInsecure() throws Exception { * @param userName the user name * @param password the password * @return a result set with the user record if the password matches + * @throws Exception on failure */ public static ResultSet getUser(Connection conn, String userName, String password) throws Exception { @@ -164,6 +166,7 @@ public static ResultSet getUser(Connection conn, String userName, * @param userName the user name * @param password the password * @return the new password + * @throws Exception on failure */ public static String changePassword(Connection conn, String userName, String password) throws Exception { @@ -181,10 +184,8 @@ public static String changePassword(Connection conn, String userName, */ void loginStoredProcedureInsecure() throws Exception { System.out.println("Insecure Systems Inc. - login using a stored procedure"); - stat.execute("CREATE ALIAS IF NOT EXISTS " + - "GET_USER FOR \"org.h2.samples.SQLInjection.getUser\""); - stat.execute("CREATE ALIAS IF NOT EXISTS " + - "CHANGE_PASSWORD FOR \"org.h2.samples.SQLInjection.changePassword\""); + stat.execute("CREATE ALIAS IF NOT EXISTS GET_USER FOR 'org.h2.samples.SQLInjection.getUser'"); + stat.execute("CREATE ALIAS IF NOT EXISTS CHANGE_PASSWORD FOR 'org.h2.samples.SQLInjection.changePassword'"); String name = input("Name?"); String password = input("Password?"); ResultSet rs = stat.executeQuery( diff --git a/h2/src/test/org/h2/samples/SecurePassword.java b/h2/src/test/org/h2/samples/SecurePassword.java index fde00dc8be..d33a2c39a0 100644 --- a/h2/src/test/org/h2/samples/SecurePassword.java +++ b/h2/src/test/org/h2/samples/SecurePassword.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; @@ -23,6 +23,7 @@ public class SecurePassword { * command line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { diff --git a/h2/src/test/org/h2/samples/ShowProgress.java b/h2/src/test/org/h2/samples/ShowProgress.java index 3034262ac9..fe34df7269 100644 --- a/h2/src/test/org/h2/samples/ShowProgress.java +++ b/h2/src/test/org/h2/samples/ShowProgress.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; @@ -13,6 +13,7 @@ import java.util.concurrent.TimeUnit; import org.h2.api.DatabaseEventListener; +import org.h2.engine.SessionLocal; import org.h2.jdbc.JdbcConnection; /** @@ -37,6 +38,7 @@ public ShowProgress() { * command line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { new ShowProgress().test(); @@ -47,7 +49,7 @@ public static void main(String... args) throws Exception { */ void test() throws Exception { Class.forName("org.h2.Driver"); - Connection conn = DriverManager.getConnection("jdbc:h2:test", "sa", ""); + Connection conn = DriverManager.getConnection("jdbc:h2:./test", "sa", ""); Statement stat = conn.createStatement(); stat.execute("DROP TABLE IF EXISTS TEST"); stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR)"); @@ -67,7 +69,7 @@ void test() throws Exception { } boolean abnormalTermination = true; if (abnormalTermination) { - ((JdbcConnection) conn).setPowerOffCount(1); + ((SessionLocal) ((JdbcConnection) conn).getSession()).getDatabase().setPowerOffCount(1); try { stat.execute("INSERT INTO TEST VALUES(-1, 'Test' || SPACE(100))"); } catch (SQLException e) { @@ -80,7 +82,7 @@ void test() throws Exception { System.out.println("Open connection..."); time = System.nanoTime(); conn = DriverManager.getConnection( - "jdbc:h2:test;DATABASE_EVENT_LISTENER='" + + "jdbc:h2:./test;DATABASE_EVENT_LISTENER='" + getClass().getName() + "'", "sa", ""); time = System.nanoTime() - time; System.out.println("Done after " + TimeUnit.NANOSECONDS.toMillis(time) + " ms"); @@ -112,7 +114,7 @@ public void exceptionThrown(SQLException e, String sql) { * @param max the 100% mark */ @Override - public void setProgress(int state, String name, int current, int max) { + public void setProgress(int state, String name, long current, long max) { long time = System.nanoTime(); if (time < lastNs + TimeUnit.SECONDS.toNanos(5)) { return; diff --git a/h2/src/test/org/h2/samples/ShutdownServer.java b/h2/src/test/org/h2/samples/ShutdownServer.java index 14c393caf3..3b84c3553b 100644 --- a/h2/src/test/org/h2/samples/ShutdownServer.java +++ b/h2/src/test/org/h2/samples/ShutdownServer.java @@ -1,10 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; +import java.sql.SQLException; + /** * This very simple sample application stops a H2 TCP server * if it is running. @@ -16,8 +18,9 @@ public class ShutdownServer { * command line. * * @param args the command line parameters + * @throws SQLException on failure */ - public static void main(String... args) throws Exception { + public static void main(String... args) throws SQLException { org.h2.tools.Server.shutdownTcpServer("tcp://localhost:9094", "", false, false); } } diff --git a/h2/src/test/org/h2/samples/ToDate.java b/h2/src/test/org/h2/samples/ToDate.java deleted file mode 100644 index ec96a42fd9..0000000000 --- a/h2/src/test/org/h2/samples/ToDate.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.samples; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.ResultSet; -import java.sql.Statement; -import java.text.SimpleDateFormat; -import java.util.Date; -import org.h2.tools.DeleteDbFiles; - -/** - * A very simple class that shows how to load the driver, create a database, - * create a table, and insert some data. - */ -public class ToDate { - - /** - * Called when ran from command line. - * - * @param args ignored - */ - public static void main(String... args) throws Exception { - - // delete the database named 'test' in the user home directory - DeleteDbFiles.execute("~", "test", true); - - Class.forName("org.h2.Driver"); - Connection conn = DriverManager.getConnection("jdbc:h2:~/test"); - Statement stat = conn.createStatement(); - - stat.execute("create table ToDateTest(id int primary key, " + - "start_date datetime, end_date datetime)"); - stat.execute("insert into ToDateTest values(1, " - + "ADD_MONTHS(TO_DATE('2015-11-13', 'yyyy-MM-DD'), 1), " - + "TO_DATE('2015-12-15', 'YYYY-MM-DD'))"); - stat.execute("insert into ToDateTest values(1, " + - "TO_DATE('2015-11-13', 'yyyy-MM-DD'), " + - "TO_DATE('2015-12-15', 'YYYY-MM-DD'))"); - stat.execute("insert into ToDateTest values(2, " + - "TO_DATE('2015-12-12 00:00:00', 'yyyy-MM-DD HH24:MI:ss'), " + - "TO_DATE('2015-12-16 15:00:00', 'YYYY-MM-DD HH24:MI:ss'))"); - stat.execute("insert into ToDateTest values(3, " + - "TO_DATE('2015-12-12 08:00 A.M.', 'yyyy-MM-DD HH:MI AM'), " + - "TO_DATE('2015-12-17 08:00 P.M.', 'YYYY-MM-DD HH:MI AM'))"); - stat.execute("insert into ToDateTest values(4, " + - "TO_DATE(substr('2015-12-12 08:00 A.M.', 1, 10), 'yyyy-MM-DD'), " + - "TO_DATE('2015-12-17 08:00 P.M.', 'YYYY-MM-DD HH:MI AM'))"); - - ResultSet rs = stat.executeQuery("select * from ToDateTest"); - while (rs.next()) { - System.out.println("Start date: " + dateToString(rs.getTimestamp("start_date"))); - System.out.println("End date: " + dateToString(rs.getTimestamp("end_date"))); - System.out.println(); - } - stat.close(); - conn.close(); - } - - private static String dateToString(Date date) { - return new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(date); - } - -} diff --git a/h2/src/test/org/h2/samples/TriggerPassData.java b/h2/src/test/org/h2/samples/TriggerPassData.java index 50e9b009ef..0cd6f8a422 100644 --- a/h2/src/test/org/h2/samples/TriggerPassData.java +++ b/h2/src/test/org/h2/samples/TriggerPassData.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; @@ -28,6 +28,7 @@ public class TriggerPassData implements Trigger { * command line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { Class.forName("org.h2.Driver"); @@ -35,9 +36,9 @@ public static void main(String... args) throws Exception { "jdbc:h2:mem:test", "sa", ""); Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST(ID INT)"); - stat.execute("CREATE ALIAS TRIGGER_SET FOR \"" + + stat.execute("CREATE ALIAS TRIGGER_SET FOR '" + TriggerPassData.class.getName() + - ".setTriggerData\""); + ".setTriggerData'"); stat.execute("CREATE TRIGGER T1 " + "BEFORE INSERT ON TEST " + "FOR EACH ROW CALL \"" + @@ -62,22 +63,13 @@ public void fire(Connection conn, Object[] old, Object[] row) { System.out.println(triggerData + ": " + row[0]); } - @Override - public void close() { - // ignore - } - - @Override - public void remove() { - // ignore - } - /** * Call this method to change a specific trigger. * * @param conn the connection * @param trigger the trigger name * @param data the data + * @throws SQLException on failure */ public static void setTriggerData(Connection conn, String trigger, String data) throws SQLException { @@ -87,7 +79,7 @@ public static void setTriggerData(Connection conn, String trigger, private static String getPrefix(Connection conn) throws SQLException { Statement stat = conn.createStatement(); ResultSet rs = stat.executeQuery( - "call ifnull(database_path() || '_', '') || database() || '_'"); + "call coalesce(database_path() || '_', '') || database() || '_'"); rs.next(); return rs.getString(1); } diff --git a/h2/src/test/org/h2/samples/TriggerSample.java b/h2/src/test/org/h2/samples/TriggerSample.java index 33ee838d23..27c07a8462 100644 --- a/h2/src/test/org/h2/samples/TriggerSample.java +++ b/h2/src/test/org/h2/samples/TriggerSample.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; @@ -25,13 +25,14 @@ public class TriggerSample { * command line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { Class.forName("org.h2.Driver"); Connection conn = DriverManager.getConnection("jdbc:h2:mem:", "sa", ""); Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE INVOICE(ID INT PRIMARY KEY, AMOUNT DECIMAL)"); - stat.execute("CREATE TABLE INVOICE_SUM(AMOUNT DECIMAL)"); + stat.execute("CREATE TABLE INVOICE(ID INT PRIMARY KEY, AMOUNT DECIMAL(10, 2))"); + stat.execute("CREATE TABLE INVOICE_SUM(AMOUNT DECIMAL(10, 2))"); stat.execute("INSERT INTO INVOICE_SUM VALUES(0.0)"); stat.execute("CREATE TRIGGER INV_INS " + diff --git a/h2/src/test/org/h2/samples/UpdatableView.java b/h2/src/test/org/h2/samples/UpdatableView.java index 528b744109..ec60d38461 100644 --- a/h2/src/test/org/h2/samples/UpdatableView.java +++ b/h2/src/test/org/h2/samples/UpdatableView.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.samples; @@ -11,6 +11,8 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.sql.Types; + import org.h2.tools.TriggerAdapter; /** @@ -24,66 +26,105 @@ public class UpdatableView extends TriggerAdapter { * This method is called when executing this sample application from the * command line. * - * @param args the command line parameters + * @param args ignored + * @throws Exception on failure */ public static void main(String... args) throws Exception { Class.forName("org.h2.Driver"); - Connection conn = DriverManager.getConnection("jdbc:h2:mem:"); - Statement stat; - stat = conn.createStatement(); + try (Connection conn = DriverManager.getConnection("jdbc:h2:mem:")) { + Statement stat; + stat = conn.createStatement(); - // create the table and the view - stat.execute("create table test(id int primary key, name varchar)"); - stat.execute("create view test_view as select * from test"); + // Create the table TEST_TABLE and the view TEST_VIEW that simply + // selects everything from the TEST_TABLE. + stat.execute("CREATE TABLE TEST_TABLE" + + "(ID BIGINT GENERATED BY DEFAULT AS IDENTITY DEFAULT ON NULL PRIMARY KEY, NAME VARCHAR)"); + stat.execute("CREATE VIEW TEST_VIEW AS TABLE TEST_TABLE"); - // create the trigger that is called whenever - // the data in the view is modified - stat.execute("create trigger t_test_view instead of " + - "insert, update, delete on test_view for each row " + - "call \"" + UpdatableView.class.getName() + "\""); + // Create the INSTEAD OF trigger that is called whenever the data in + // the view is modified. This trigger makes the view updatable. + stat.execute( + "CREATE TRIGGER T_TEST_VIEW INSTEAD OF INSERT, UPDATE, DELETE ON TEST_VIEW FOR EACH ROW CALL \"" + + UpdatableView.class.getName() + '"'); - // test a few operations - stat.execute("insert into test_view values(1, 'Hello'), (2, 'World')"); - stat.execute("update test_view set name = 'Hallo' where id = 1"); - stat.execute("delete from test_view where id = 2"); + // Test an INSERT operation and check that generated keys from the + // source table are returned as expected. + stat.execute("INSERT INTO TEST_VIEW(NAME) VALUES 'Hello', 'World'", new String[] { "ID" }); + try (ResultSet rs = stat.getGeneratedKeys()) { + while (rs.next()) { + System.out.printf("Key %d was generated%n", rs.getLong(1)); + } + } + System.out.println(); + // Test UPDATE and DELETE operations. + stat.execute("UPDATE TEST_VIEW SET NAME = 'Hallo' WHERE ID = 1"); + stat.execute("DELETE FROM TEST_VIEW WHERE ID = 2"); - // print the contents of the table and the view - System.out.println("table test:"); - ResultSet rs; - rs = stat.executeQuery("select * from test"); - while (rs.next()) { - System.out.println(rs.getInt(1) + " " + rs.getString(2)); - } - System.out.println(); - System.out.println("test_view:"); - rs = stat.executeQuery("select * from test_view"); - while (rs.next()) { - System.out.println(rs.getInt(1) + " " + rs.getString(2)); + // Print the contents of the table and the view, they should be the + // same. + System.out.println("TEST_TABLE:"); + try (ResultSet rs = stat.executeQuery("TABLE TEST_TABLE")) { + while (rs.next()) { + System.out.printf("%d %s%n", rs.getLong(1), rs.getString(2)); + } + } + System.out.println(); + System.out.println("TEST_VIEW:"); + try (ResultSet rs = stat.executeQuery("TABLE TEST_VIEW")) { + while (rs.next()) { + System.out.printf("%d %s%n", rs.getLong(1), rs.getString(2)); + } + } } - - conn.close(); } @Override - public void init(Connection conn, String schemaName, String triggerName, - String tableName, boolean before, int type) throws SQLException { - prepDelete = conn.prepareStatement("delete from test where id = ?"); - prepInsert = conn.prepareStatement("insert into test values(?, ?)"); + public void init(Connection conn, String schemaName, String triggerName, String tableName, boolean before, + int type) throws SQLException { + prepDelete = conn.prepareStatement("DELETE FROM TEST_TABLE WHERE ID = ?"); + // INSERT and UPDATE triggers should return the FINAL values of the row. + // Table TEST_TABLE has a generated column, so the FINAL row can be + // different from the row that we try to insert here. + prepInsert = conn.prepareStatement("SELECT * FROM FINAL TABLE(INSERT INTO TEST_TABLE VALUES (?, ?))"); super.init(conn, schemaName, triggerName, tableName, before, type); } @Override - public void fire(Connection conn, ResultSet oldRow, ResultSet newRow) - throws SQLException { + public void fire(Connection conn, ResultSet oldRow, ResultSet newRow) throws SQLException { if (oldRow != null && oldRow.next()) { - prepDelete.setInt(1, oldRow.getInt(1)); + prepDelete.setLong(1, oldRow.getLong(1)); prepDelete.execute(); } if (newRow != null && newRow.next()) { - prepInsert.setInt(1, newRow.getInt(1)); + long id = newRow.getLong(1); + if (newRow.wasNull()) { + prepInsert.setNull(1, Types.BIGINT); + } else { + prepInsert.setLong(1, id); + } prepInsert.setString(2, newRow.getString(2)); - prepInsert.execute(); + // Now we need to execute the INSERT statement and update the newRow + // with the FINAL values. + // It is necessary for the FINAL TABLE and getGeneratedKeys(); if we + // don't update the newRow, the FINAL TABLE will work like the NEW + // TABLE. + // It is only necessary when the source table has generated columns + // or other columns with default values, or it has a trigger that + // can change the inserted values; without such columns the NEW + // TABLE and the FINAL TABLE are the same. + try (ResultSet rs = prepInsert.executeQuery()) { + rs.next(); + newRow.updateLong(1, rs.getLong(1)); + newRow.updateString(2, rs.getString(2)); + newRow.rowUpdated(); + } } } + @Override + public void close() throws SQLException { + prepInsert.close(); + prepDelete.close(); + } + } diff --git a/h2/src/test/org/h2/samples/fullTextSearch.sql b/h2/src/test/org/h2/samples/fullTextSearch.sql index 30ba8e31cd..a49cf049a6 100644 --- a/h2/src/test/org/h2/samples/fullTextSearch.sql +++ b/h2/src/test/org/h2/samples/fullTextSearch.sql @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ diff --git a/h2/src/test/org/h2/samples/newsfeed.sql b/h2/src/test/org/h2/samples/newsfeed.sql index ebe8c57b95..82c483be5c 100644 --- a/h2/src/test/org/h2/samples/newsfeed.sql +++ b/h2/src/test/org/h2/samples/newsfeed.sql @@ -1,12 +1,17 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ CREATE TABLE VERSION(ID INT PRIMARY KEY, VERSION VARCHAR, CREATED VARCHAR); INSERT INTO VERSION VALUES +(154, '2.1.210', '2022-01-17'), +(153, '2.0.206', '2022-01-04'), +(152, '2.0.204', '2021-12-21'), +(151, '2.0.202', '2021-11-25'), +(150, '1.4.200', '2019-10-14'), (149, '1.4.199', '2019-03-13'), (148, '1.4.198', '2019-02-22'), (147, '1.4.197', '2018-03-18'), @@ -15,34 +20,23 @@ INSERT INTO VERSION VALUES (144, '1.4.194', '2017-03-10'), (143, '1.4.193', '2016-10-31'), (142, '1.4.192', '2016-05-26'), -(141, '1.4.191', '2016-01-21'), -(140, '1.4.190', '2015-10-11'), -(139, '1.4.189', '2015-09-13'), -(138, '1.4.188', '2015-08-01'), -(137, '1.4.187', '2015-04-10'), -(136, '1.4.186', '2015-03-02'), -(135, '1.4.185', '2015-01-16'), -(134, '1.4.184', '2014-12-19'), -(133, '1.4.183', '2014-12-13'); +(141, '1.4.191', '2016-01-21'); CREATE TABLE CHANNEL(TITLE VARCHAR, LINK VARCHAR, DESC VARCHAR, LANGUAGE VARCHAR, PUB TIMESTAMP, LAST TIMESTAMP, AUTHOR VARCHAR); INSERT INTO CHANNEL VALUES('H2 Database Engine' , - 'http://www.h2database.com/', 'H2 Database Engine', 'en-us', NOW(), NOW(), 'Thomas Mueller'); + 'https://h2database.com/', 'H2 Database Engine', 'en-us', LOCALTIMESTAMP, LOCALTIMESTAMP, 'Thomas Mueller'); CREATE VIEW ITEM AS SELECT ID, 'New version available: ' || VERSION || ' (' || CREATED || ')' TITLE, CAST((CREATED || ' 12:00:00') AS TIMESTAMP) ISSUED, $$A new version of H2 is available for -download. +download. (You may have to click 'Refresh').
          For details, see the -change log. -
          -For future plans, see the -roadmap. +change log. $$ AS DESC FROM VERSION; SELECT 'newsfeed-rss.xml' FILE, @@ -72,7 +66,7 @@ SELECT 'newsfeed-atom.xml' FILE, XMLNODE('title', XMLATTR('type', 'text'), C.TITLE) || XMLNODE('id', NULL, XMLTEXT(C.LINK)) || XMLNODE('author', NULL, XMLNODE('name', NULL, C.AUTHOR)) || - XMLNODE('link', XMLATTR('rel', 'self') || XMLATTR('href', 'http://www.h2database.com/html/newsfeed-atom.xml'), NULL) || + XMLNODE('link', XMLATTR('rel', 'self') || XMLATTR('href', 'https://h2database.com/html/newsfeed-atom.xml'), NULL) || XMLNODE('updated', NULL, FORMATDATETIME(C.LAST, 'yyyy-MM-dd''T''HH:mm:ss''Z''', 'en', 'GMT')) || GROUP_CONCAT( XMLNODE('entry', NULL, @@ -91,16 +85,16 @@ UNION SELECT 'doap-h2.rdf' FILE, XMLSTARTDOC() || $$ - + H2 Database Engine - + Java - + H2 Database Engine H2 is a relational database management system written in Java. @@ -118,7 +112,7 @@ $$ - + $$ || GROUP_CONCAT( XMLNODE('release', NULL, diff --git a/h2/src/test/org/h2/samples/optimizations.sql b/h2/src/test/org/h2/samples/optimizations.sql index db35eb260d..208224ae73 100644 --- a/h2/src/test/org/h2/samples/optimizations.sql +++ b/h2/src/test/org/h2/samples/optimizations.sql @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ @@ -76,22 +76,22 @@ DROP TABLE TEST; -- of a column for each group. -- Initialize the data -CREATE TABLE TEST(ID INT PRIMARY KEY, VALUE DECIMAL(100, 2)); +CREATE TABLE TEST(ID INT PRIMARY KEY, "VALUE" DECIMAL(100, 2)); CALL RAND(0); --> 0.730967787376657 ; INSERT INTO TEST SELECT X, RAND()*100 FROM SYSTEM_RANGE(1, 1000); -- Create an index on the column VALUE -CREATE INDEX IDX_TEST_VALUE ON TEST(VALUE); +CREATE INDEX IDX_TEST_VALUE ON TEST("VALUE"); -- Query the largest and smallest value - this is optimized -SELECT MIN(VALUE), MAX(VALUE) FROM TEST; +SELECT MIN("VALUE"), MAX("VALUE") FROM TEST; --> 0.01 99.89 ; -- Display the query plan - 'direct lookup' means it's optimized -EXPLAIN SELECT MIN(VALUE), MAX(VALUE) FROM TEST; +EXPLAIN SELECT MIN("VALUE"), MAX("VALUE") FROM TEST; --> SELECT --> MIN("VALUE"), --> MAX("VALUE") @@ -109,21 +109,21 @@ DROP TABLE TEST; -- of a column for each group. -- Initialize the data -CREATE TABLE TEST(ID INT PRIMARY KEY, TYPE INT, VALUE DECIMAL(100, 2)); +CREATE TABLE TEST(ID INT PRIMARY KEY, TYPE INT, "VALUE" DECIMAL(100, 2)); CALL RAND(0); --> 0.730967787376657 ; INSERT INTO TEST SELECT X, MOD(X, 5), RAND()*100 FROM SYSTEM_RANGE(1, 1000); -- Create an index on the columns TYPE and VALUE -CREATE INDEX IDX_TEST_TYPE_VALUE ON TEST(TYPE, VALUE); +CREATE INDEX IDX_TEST_TYPE_VALUE ON TEST(TYPE, "VALUE"); -- Analyze to optimize the DISTINCT part of the query ANALYZE; -- Query the largest and smallest value - this is optimized -SELECT TYPE, (SELECT VALUE FROM TEST T2 WHERE T.TYPE = T2.TYPE -ORDER BY TYPE, VALUE LIMIT 1) MIN +SELECT TYPE, (SELECT "VALUE" FROM TEST T2 WHERE T.TYPE = T2.TYPE +ORDER BY TYPE, "VALUE" LIMIT 1) MIN FROM (SELECT DISTINCT TYPE FROM TEST) T ORDER BY TYPE; --> 0 0.42 --> 1 0.14 @@ -133,8 +133,8 @@ FROM (SELECT DISTINCT TYPE FROM TEST) T ORDER BY TYPE; ; -- Display the query plan -EXPLAIN SELECT TYPE, (SELECT VALUE FROM TEST T2 WHERE T.TYPE = T2.TYPE -ORDER BY TYPE, VALUE LIMIT 1) MIN +EXPLAIN SELECT TYPE, (SELECT "VALUE" FROM TEST T2 WHERE T.TYPE = T2.TYPE +ORDER BY TYPE, "VALUE" LIMIT 1) MIN FROM (SELECT DISTINCT TYPE FROM TEST) T ORDER BY TYPE; --> SELECT --> "TYPE", @@ -143,21 +143,19 @@ FROM (SELECT DISTINCT TYPE FROM TEST) T ORDER BY TYPE; --> FROM "PUBLIC"."TEST" "T2" --> /* PUBLIC.IDX_TEST_TYPE_VALUE: TYPE = T.TYPE */ --> WHERE "T"."TYPE" = "T2"."TYPE" ---> ORDER BY ="TYPE", 1 +--> ORDER BY "TYPE", 1 --> FETCH FIRST ROW ONLY --> /* index sorted */) AS "MIN" --> FROM ( --> SELECT DISTINCT --> "TYPE" --> FROM "PUBLIC"."TEST" ---> /* PUBLIC.IDX_TEST_TYPE_VALUE */ ---> /* distinct */ --> ) "T" --> /* SELECT DISTINCT --> TYPE --> FROM PUBLIC.TEST ---> /++ PUBLIC.IDX_TEST_TYPE_VALUE ++/ ---> /++ distinct ++/ +--> /* PUBLIC.IDX_TEST_TYPE_VALUE */ +--> /* distinct */ --> */ --> ORDER BY 1 ; @@ -171,24 +169,24 @@ DROP TABLE TEST; -- values of a column for the whole table. -- Initialize the data -CREATE TABLE TEST(ID INT PRIMARY KEY, TYPE INT, VALUE DECIMAL(100, 2)); +CREATE TABLE TEST(ID INT PRIMARY KEY, TYPE INT, "VALUE" DECIMAL(100, 2)); CALL RAND(0); --> 0.730967787376657 ; INSERT INTO TEST SELECT X, MOD(X, 100), RAND()*100 FROM SYSTEM_RANGE(1, 1000); -- Create an index on the column VALUE -CREATE INDEX IDX_TEST_VALUE ON TEST(VALUE); +CREATE INDEX IDX_TEST_VALUE ON TEST("VALUE"); -- Query the smallest 10 values -SELECT VALUE FROM TEST ORDER BY VALUE LIMIT 3; +SELECT "VALUE" FROM TEST ORDER BY "VALUE" LIMIT 3; --> 0.01 --> 0.14 --> 0.16 ; -- Display the query plan - 'index sorted' means the index is used -EXPLAIN SELECT VALUE FROM TEST ORDER BY VALUE LIMIT 10; +EXPLAIN SELECT "VALUE" FROM TEST ORDER BY "VALUE" LIMIT 10; --> SELECT --> "VALUE" --> FROM "PUBLIC"."TEST" @@ -199,17 +197,17 @@ EXPLAIN SELECT VALUE FROM TEST ORDER BY VALUE LIMIT 10; ; -- To optimize getting the largest values, a new descending index is required -CREATE INDEX IDX_TEST_VALUE_D ON TEST(VALUE DESC); +CREATE INDEX IDX_TEST_VALUE_D ON TEST("VALUE" DESC); -- Query the largest 10 values -SELECT VALUE FROM TEST ORDER BY VALUE DESC LIMIT 3; +SELECT "VALUE" FROM TEST ORDER BY "VALUE" DESC LIMIT 3; --> 99.89 --> 99.73 --> 99.68 ; -- Display the query plan - 'index sorted' means the index is used -EXPLAIN SELECT VALUE FROM TEST ORDER BY VALUE DESC LIMIT 10; +EXPLAIN SELECT "VALUE" FROM TEST ORDER BY "VALUE" DESC LIMIT 10; --> SELECT --> "VALUE" --> FROM "PUBLIC"."TEST" @@ -239,7 +237,7 @@ SELECT * FROM TEST WHERE ID IN(1, 1000); -- Display the query plan EXPLAIN SELECT * FROM TEST WHERE ID IN(1, 1000); --> SELECT ---> "TEST"."ID" +--> "PUBLIC"."TEST"."ID" --> FROM "PUBLIC"."TEST" --> /* PUBLIC.PRIMARY_KEY_2: ID IN(1, 1000) */ --> WHERE "ID" IN(1, 1000) @@ -261,10 +259,10 @@ INSERT INTO TEST SELECT X, MOD(X, 10) FROM SYSTEM_RANGE(1, 1000); -- Display the query plan EXPLAIN SELECT * FROM TEST WHERE ID IN (10, 20) AND DATA IN (1, 2); --> SELECT ---> "TEST"."ID", ---> "TEST"."DATA" +--> "PUBLIC"."TEST"."ID", +--> "PUBLIC"."TEST"."DATA" --> FROM "PUBLIC"."TEST" ---> /* PUBLIC.PRIMARY_KEY_2: ID IN(10, 20) */ +--> /* PUBLIC.TEST_DATA: DATA IN(1, 2) */ --> WHERE ("ID" IN(10, 20)) --> AND ("DATA" IN(1, 2)) ; diff --git a/h2/src/test/org/h2/samples/package.html b/h2/src/test/org/h2/samples/package.html index 028e50986c..a65657aaaa 100644 --- a/h2/src/test/org/h2/samples/package.html +++ b/h2/src/test/org/h2/samples/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/TestAll.java b/h2/src/test/org/h2/test/TestAll.java index 9c6ced3b9f..4c5c3e5baf 100644 --- a/h2/src/test/org/h2/test/TestAll.java +++ b/h2/src/test/org/h2/test/TestAll.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test; @@ -9,18 +9,20 @@ import java.sql.SQLException; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.Map.Entry; import java.util.Properties; import java.util.TimerTask; import java.util.concurrent.TimeUnit; import org.h2.Driver; import org.h2.engine.Constants; -import org.h2.store.fs.FilePathRec; import org.h2.store.fs.FileUtils; +import org.h2.store.fs.rec.FilePathRec; import org.h2.test.auth.TestAuthentication; import org.h2.test.bench.TestPerformance; import org.h2.test.db.TestAlter; import org.h2.test.db.TestAlterSchemaRename; +import org.h2.test.db.TestAlterTableNotFound; import org.h2.test.db.TestAnalyzeTableTx; import org.h2.test.db.TestAutoRecompile; import org.h2.test.db.TestBackup; @@ -35,7 +37,6 @@ import org.h2.test.db.TestCsv; import org.h2.test.db.TestDateStorage; import org.h2.test.db.TestDeadlock; -import org.h2.test.db.TestDrop; import org.h2.test.db.TestDuplicateKeyUpdate; import org.h2.test.db.TestEncryptedDb; import org.h2.test.db.TestExclusive; @@ -43,6 +44,7 @@ import org.h2.test.db.TestFunctionOverload; import org.h2.test.db.TestFunctions; import org.h2.test.db.TestGeneralCommonTableQueries; +import org.h2.test.db.TestIgnoreCatalogs; import org.h2.test.db.TestIndex; import org.h2.test.db.TestIndexHints; import org.h2.test.db.TestLargeBlob; @@ -57,7 +59,6 @@ import org.h2.test.db.TestMultiThreadedKernel; import org.h2.test.db.TestOpenClose; import org.h2.test.db.TestOptimizations; -import org.h2.test.db.TestOptimizerHints; import org.h2.test.db.TestOutOfMemory; import org.h2.test.db.TestPersistentCommonTableExpressions; import org.h2.test.db.TestPowerOff; @@ -65,10 +66,9 @@ import org.h2.test.db.TestReadOnly; import org.h2.test.db.TestRecursiveQueries; import org.h2.test.db.TestRights; -import org.h2.test.db.TestRowFactory; import org.h2.test.db.TestRunscript; import org.h2.test.db.TestSQLInjection; -import org.h2.test.db.TestSelectCountNonNullColumn; +import org.h2.test.db.TestSelectTableNotFound; import org.h2.test.db.TestSequence; import org.h2.test.db.TestSessionsLocks; import org.h2.test.db.TestSetCollation; @@ -82,8 +82,6 @@ import org.h2.test.db.TestTransaction; import org.h2.test.db.TestTriggersConstraints; import org.h2.test.db.TestTwoPhaseCommit; -import org.h2.test.db.TestUpgrade; -import org.h2.test.db.TestUsingIndex; import org.h2.test.db.TestView; import org.h2.test.db.TestViewAlterTable; import org.h2.test.db.TestViewDropView; @@ -92,13 +90,10 @@ import org.h2.test.jdbc.TestCancel; import org.h2.test.jdbc.TestConcurrentConnectionUsage; import org.h2.test.jdbc.TestConnection; -import org.h2.test.jdbc.TestCustomDataTypesHandler; import org.h2.test.jdbc.TestDatabaseEventListener; import org.h2.test.jdbc.TestDriver; import org.h2.test.jdbc.TestGetGeneratedKeys; -import org.h2.test.jdbc.TestJavaObject; import org.h2.test.jdbc.TestJavaObjectSerializer; -import org.h2.test.jdbc.TestLimitUpdates; import org.h2.test.jdbc.TestLobApi; import org.h2.test.jdbc.TestManyJdbcObjects; import org.h2.test.jdbc.TestMetaData; @@ -127,12 +122,12 @@ import org.h2.test.scripts.TestScript; import org.h2.test.server.TestAutoServer; import org.h2.test.server.TestInit; +import org.h2.test.server.TestJakartaWeb; import org.h2.test.server.TestNestedLoop; import org.h2.test.server.TestWeb; import org.h2.test.store.TestCacheConcurrentLIRS; import org.h2.test.store.TestCacheLIRS; import org.h2.test.store.TestCacheLongKeyLIRS; -import org.h2.test.store.TestConcurrent; import org.h2.test.store.TestDataUtils; import org.h2.test.store.TestDefrag; import org.h2.test.store.TestFreeSpace; @@ -140,6 +135,7 @@ import org.h2.test.store.TestMVRTree; import org.h2.test.store.TestMVStore; import org.h2.test.store.TestMVStoreBenchmark; +import org.h2.test.store.TestMVStoreConcurrent; import org.h2.test.store.TestMVStoreStopCompact; import org.h2.test.store.TestMVStoreTool; import org.h2.test.store.TestMVTableEngine; @@ -164,22 +160,21 @@ import org.h2.test.synth.TestOuterJoins; import org.h2.test.synth.TestRandomCompare; import org.h2.test.synth.TestRandomSQL; -import org.h2.test.synth.TestStringAggCompatibility; import org.h2.test.synth.TestTimer; import org.h2.test.synth.sql.TestSynth; import org.h2.test.synth.thread.TestMulti; import org.h2.test.unit.TestAnsCompression; import org.h2.test.unit.TestAutoReconnect; import org.h2.test.unit.TestBinaryArithmeticStream; +import org.h2.test.unit.TestBinaryOperation; import org.h2.test.unit.TestBitStream; import org.h2.test.unit.TestBnf; import org.h2.test.unit.TestCache; import org.h2.test.unit.TestCharsetCollator; -import org.h2.test.unit.TestClearReferences; import org.h2.test.unit.TestCollation; import org.h2.test.unit.TestCompress; +import org.h2.test.unit.TestConcurrentJdbc; import org.h2.test.unit.TestConnectionInfo; -import org.h2.test.unit.TestDataPage; import org.h2.test.unit.TestDate; import org.h2.test.unit.TestDateIso8601; import org.h2.test.unit.TestDateTimeUtils; @@ -188,28 +183,23 @@ import org.h2.test.unit.TestFile; import org.h2.test.unit.TestFileLock; import org.h2.test.unit.TestFileLockProcess; -import org.h2.test.unit.TestFileLockSerialized; import org.h2.test.unit.TestFileSystem; import org.h2.test.unit.TestFtp; import org.h2.test.unit.TestGeometryUtils; import org.h2.test.unit.TestIntArray; -import org.h2.test.unit.TestIntIntHashMap; import org.h2.test.unit.TestIntPerfectHash; import org.h2.test.unit.TestInterval; import org.h2.test.unit.TestJmx; +import org.h2.test.unit.TestJsonUtils; import org.h2.test.unit.TestKeywords; -import org.h2.test.unit.TestLocalResultFactory; import org.h2.test.unit.TestLocale; import org.h2.test.unit.TestMVTempResult; import org.h2.test.unit.TestMathUtils; import org.h2.test.unit.TestMemoryUnmapper; import org.h2.test.unit.TestMode; -import org.h2.test.unit.TestModifyOnWrite; import org.h2.test.unit.TestNetUtils; import org.h2.test.unit.TestObjectDeserialization; -import org.h2.test.unit.TestOldVersion; import org.h2.test.unit.TestOverflow; -import org.h2.test.unit.TestPageStore; import org.h2.test.unit.TestPageStoreCoverage; import org.h2.test.unit.TestPattern; import org.h2.test.unit.TestPerfectHash; @@ -228,12 +218,12 @@ import org.h2.test.unit.TestTimeStampWithTimeZone; import org.h2.test.unit.TestTools; import org.h2.test.unit.TestTraceSystem; +import org.h2.test.unit.TestUpgrade; import org.h2.test.unit.TestUtils; import org.h2.test.unit.TestValue; import org.h2.test.unit.TestValueMemory; import org.h2.test.utils.OutputCatcher; import org.h2.test.utils.SelfDestructor; -import org.h2.test.utils.TestColumnNamer; import org.h2.tools.DeleteDbFiles; import org.h2.tools.Server; import org.h2.util.AbbaLockingDetector; @@ -287,11 +277,6 @@ public class TestAll { */ static boolean atLeastOneTestFailed; - /** - * Whether the MVStore storage is used. - */ - public boolean mvStore = true; - /** * If the test should run with many rows. */ @@ -312,11 +297,6 @@ public class TestAll { */ public boolean codeCoverage; - /** - * If the multi-threaded mode should be used. - */ - public boolean multiThreaded; - /** * If lazy queries should be used. */ @@ -358,9 +338,9 @@ public class TestAll { public boolean splitFileSystem; /** - * If only fast/CI/Jenkins/Travis tests should be run. + * If only fast CI tests should be run. */ - public boolean travis; + public boolean ci; /** * the vmlens.com race condition tool @@ -413,11 +393,6 @@ public class TestAll { */ boolean stopOnError; - /** - * If the database should always be defragmented when closing. - */ - boolean defrag; - /** * The cache type. */ @@ -439,6 +414,8 @@ public class TestAll { private Server server; + HashSet excludedTests = new HashSet<>(); + /** * The map of executed tests to detect not executed tests. * Boolean value is 'false' for a disabled test. @@ -487,7 +464,6 @@ private static void run(String... args) throws Exception { -Xmx1500m -D reopenOffset=3 -D reopenShift=1 power failure test -power failure test: MULTI_THREADED=TRUE power failure test: larger binaries and additional index. power failure test with randomly generating / dropping indexes and tables. @@ -501,20 +477,18 @@ private static void run(String... args) throws Exception { ------------- -remove old TODO, move to roadmap - kill a test: kill -9 `jps -l | grep "org.h2.test." | cut -d " " -f 1` */ TestAll test = new TestAll(); if (args.length > 0) { - if ("travis".equals(args[0])) { - test.travis = true; - test.testAll(); + if ("ci".equals(args[0])) { + test.ci = true; + test.testAll(args, 1); } else if ("vmlens".equals(args[0])) { test.vmlens = true; - test.testAll(); + test.testAll(args, 1); } else if ("reopen".equals(args[0])) { System.setProperty("h2.delayWrongPasswordMin", "0"); System.setProperty("h2.analyzeAuto", "100"); @@ -554,15 +528,24 @@ private static void run(String... args) throws Exception { new TestTimer().runTest(test); } } else { - test.testAll(); + test.testAll(args, 0); } - System.out.println(TestBase.formatTime( - TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)) + " total"); + System.out.println(TestBase.formatTime(new StringBuilder(), + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)).append(" total").toString()); } - private void testAll() throws Exception { + private void testAll(String[] args, int offset) throws Exception { + int l = args.length; + while (l > offset + 1) { + if ("-exclude".equals(args[offset])) { + excludedTests.add(args[offset + 1]); + offset += 2; + } else { + break; + } + } runTests(); - if (!travis && !vmlens) { + if (!ci && !vmlens) { Profiler prof = new Profiler(); prof.depth = 16; prof.interval = 1; @@ -608,17 +591,15 @@ private void runTests() throws SQLException { abbaLockingDetector = new AbbaLockingDetector().startCollecting(); } - smallLog = big = networked = memory = ssl = false; + smallLog = big = networked = memory = lazy = ssl = false; diskResult = traceSystemOut = diskUndo = false; traceTest = stopOnError = false; - defrag = false; traceLevelFile = throttle = 0; cipher = null; // memory is a good match for multi-threaded, makes things happen // faster, more chance of exposing race conditions memory = true; - multiThreaded = true; test(); if (vmlens) { return; @@ -626,44 +607,33 @@ private void runTests() throws SQLException { testAdditional(); // test utilities - big = !travis; + big = !ci; testUtils(); big = false; // lazy lazy = true; memory = true; - multiThreaded = true; test(); lazy = false; // but sometimes race conditions need bigger windows memory = false; - multiThreaded = true; test(); testAdditional(); - // a more normal setup - memory = false; - multiThreaded = false; - test(); - testAdditional(); + networked = true; - // basic pagestore testing - memory = false; - multiThreaded = false; - mvStore = false; + memory = true; test(); - testAdditional(); + memory = false; - mvStore = true; - memory = true; - multiThreaded = false; - networked = true; + lazy = true; test(); + lazy = false; - memory = false; networked = false; + diskUndo = true; diskResult = true; traceLevelFile = 3; @@ -678,14 +648,11 @@ private void runTests() throws SQLException { throttle = 0; cacheType = null; cipher = null; - defrag = true; - test(); - if (!travis) { + if (!ci) { traceLevelFile = 0; smallLog = true; networked = true; - defrag = false; ssl = true; test(); @@ -715,18 +682,14 @@ private void runCoverage() throws SQLException { smallLog = big = networked = memory = ssl = false; diskResult = traceSystemOut = diskUndo = false; traceTest = stopOnError = false; - defrag = false; traceLevelFile = throttle = 0; cipher = null; memory = true; - multiThreaded = true; test(); testAdditional(); testUtils(); - multiThreaded = false; - mvStore = false; test(); // testUnit(); } @@ -739,155 +702,152 @@ private void test() throws SQLException { System.out.println("Test " + toString() + " (" + Utils.getMemoryUsed() + " KB used)"); beforeTest(); - - // db - addTest(new TestScript()); - addTest(new TestAlter()); - addTest(new TestAlterSchemaRename()); - addTest(new TestAutoRecompile()); - addTest(new TestBackup()); - addTest(new TestBigDb()); - addTest(new TestBigResult()); - addTest(new TestCases()); - addTest(new TestCheckpoint()); - addTest(new TestCompatibility()); - addTest(new TestCompatibilityOracle()); - addTest(new TestCompatibilitySQLServer()); - addTest(new TestCsv()); - addTest(new TestDeadlock()); - if (vmlens) { - return; - } - addTest(new TestDrop()); - addTest(new TestDuplicateKeyUpdate()); - addTest(new TestEncryptedDb()); - addTest(new TestExclusive()); - addTest(new TestFullText()); - addTest(new TestFunctionOverload()); - addTest(new TestFunctions()); - addTest(new TestInit()); - addTest(new TestIndex()); - addTest(new TestIndexHints()); - addTest(new TestLargeBlob()); - addTest(new TestLinkedTable()); - addTest(new TestListener()); - addTest(new TestLob()); - addTest(new TestMergeUsing()); - addTest(new TestMultiConn()); - addTest(new TestMultiDimension()); - addTest(new TestMultiThreadedKernel()); - addTest(new TestOpenClose()); - addTest(new TestOptimizerHints()); - addTest(new TestReadOnly()); - addTest(new TestRecursiveQueries()); - addTest(new TestGeneralCommonTableQueries()); - if (!memory) { - // requires persistent store for reconnection tests - addTest(new TestPersistentCommonTableExpressions()); + try { + // db + addTest(new TestScript()); + addTest(new TestAlter()); + addTest(new TestAlterSchemaRename()); + addTest(new TestAutoRecompile()); + addTest(new TestBackup()); + addTest(new TestBigDb()); + addTest(new TestBigResult()); + addTest(new TestCases()); + addTest(new TestCheckpoint()); + addTest(new TestCompatibility()); + addTest(new TestCompatibilityOracle()); + addTest(new TestCompatibilitySQLServer()); + addTest(new TestCsv()); + addTest(new TestDeadlock()); + if (vmlens) { + return; + } + addTest(new TestDuplicateKeyUpdate()); + addTest(new TestEncryptedDb()); + addTest(new TestExclusive()); + addTest(new TestFullText()); + addTest(new TestFunctionOverload()); + addTest(new TestFunctions()); + addTest(new TestInit()); + addTest(new TestIndex()); + addTest(new TestIndexHints()); + addTest(new TestLargeBlob()); + addTest(new TestLinkedTable()); + addTest(new TestListener()); + addTest(new TestLob()); + addTest(new TestMergeUsing()); + addTest(new TestMultiConn()); + addTest(new TestMultiDimension()); + addTest(new TestMultiThreadedKernel()); + addTest(new TestOpenClose()); + addTest(new TestReadOnly()); + addTest(new TestRecursiveQueries()); + addTest(new TestGeneralCommonTableQueries()); + addTest(new TestAlterTableNotFound()); + addTest(new TestSelectTableNotFound()); + if (!memory) { + // requires persistent store for reconnection tests + addTest(new TestPersistentCommonTableExpressions()); + } + addTest(new TestRights()); + addTest(new TestRunscript()); + addTest(new TestSQLInjection()); + addTest(new TestSessionsLocks()); + addTest(new TestSequence()); + addTest(new TestSpaceReuse()); + addTest(new TestSpatial()); + addTest(new TestSpeed()); + addTest(new TestTableEngines()); + addTest(new TestTempTables()); + addTest(new TestTransaction()); + addTest(new TestTriggersConstraints()); + addTest(new TestTwoPhaseCommit()); + addTest(new TestView()); + addTest(new TestViewAlterTable()); + addTest(new TestViewDropView()); + addTest(new TestSynonymForTable()); + + // jdbc + addTest(new TestBatchUpdates()); + addTest(new TestCallableStatement()); + addTest(new TestCancel()); + addTest(new TestConcurrentConnectionUsage()); + addTest(new TestConnection()); + addTest(new TestDatabaseEventListener()); + addTest(new TestLobApi()); + addTest(new TestSQLXML()); + addTest(new TestManyJdbcObjects()); + addTest(new TestMetaData()); + addTest(new TestNativeSQL()); + addTest(new TestPreparedStatement()); + addTest(new TestResultSet()); + addTest(new TestStatement()); + addTest(new TestGetGeneratedKeys()); + addTest(new TestTransactionIsolation()); + addTest(new TestUpdatableResultSet()); + addTest(new TestZloty()); + addTest(new TestSetCollation()); + + // jdbcx + addTest(new TestConnectionPool()); + addTest(new TestDataSource()); + addTest(new TestXA()); + addTest(new TestXASimple()); + + // server + addTest(new TestAutoServer()); + addTest(new TestNestedLoop()); + + // mvcc & row level locking + addTest(new TestMvcc1()); + addTest(new TestMvcc2()); + addTest(new TestMvcc3()); + addTest(new TestMvcc4()); + addTest(new TestMvccMultiThreaded()); + addTest(new TestMvccMultiThreaded2()); + addTest(new TestRowLocks()); + addTest(new TestAnalyzeTableTx()); + + // synth + addTest(new TestBtreeIndex()); + addTest(new TestConcurrentUpdate()); + addTest(new TestDiskFull()); + addTest(new TestCrashAPI()); + addTest(new TestFuzzOptimizations()); + addTest(new TestLimit()); + addTest(new TestRandomCompare()); + addTest(new TestKillRestart()); + addTest(new TestKillRestartMulti()); + addTest(new TestMultiThreaded()); + addTest(new TestOuterJoins()); + addTest(new TestNestedJoins()); + + runAddedTests(); + + // serial + addTest(new TestDateStorage()); + addTest(new TestDriver()); + addTest(new TestJavaObjectSerializer()); + addTest(new TestLocale()); + addTest(new TestMemoryUsage()); + addTest(new TestMultiThread()); + addTest(new TestPowerOff()); + addTest(new TestReorderWrites()); + addTest(new TestRandomSQL()); + addTest(new TestQueryCache()); + addTest(new TestUrlJavaObjectSerializer()); + addTest(new TestWeb()); + addTest(new TestJakartaWeb()); + + // other unsafe + addTest(new TestOptimizations()); + addTest(new TestOutOfMemory()); + addTest(new TestIgnoreCatalogs()); + + + runAddedTests(1); + } finally { + afterTest(); } - addTest(new TestRights()); - addTest(new TestRunscript()); - addTest(new TestSQLInjection()); - addTest(new TestSessionsLocks()); - addTest(new TestSelectCountNonNullColumn()); - addTest(new TestSequence()); - addTest(new TestSpaceReuse()); - addTest(new TestSpatial()); - addTest(new TestSpeed()); - addTest(new TestTableEngines()); - addTest(new TestRowFactory()); - addTest(new TestTempTables()); - addTest(new TestTransaction()); - addTest(new TestTriggersConstraints()); - addTest(new TestTwoPhaseCommit()); - addTest(new TestView()); - addTest(new TestViewAlterTable()); - addTest(new TestViewDropView()); - addTest(new TestSynonymForTable()); - addTest(new TestColumnNamer()); - - // jdbc - addTest(new TestBatchUpdates()); - addTest(new TestCallableStatement()); - addTest(new TestCancel()); - addTest(new TestConcurrentConnectionUsage()); - addTest(new TestConnection()); - addTest(new TestDatabaseEventListener()); - addTest(new TestJavaObject()); - addTest(new TestLimitUpdates()); - addTest(new TestLobApi()); - addTest(new TestSQLXML()); - addTest(new TestManyJdbcObjects()); - addTest(new TestMetaData()); - addTest(new TestNativeSQL()); - addTest(new TestPreparedStatement()); - addTest(new TestResultSet()); - addTest(new TestStatement()); - addTest(new TestGetGeneratedKeys()); - addTest(new TestTransactionIsolation()); - addTest(new TestUpdatableResultSet()); - addTest(new TestZloty()); - addTest(new TestCustomDataTypesHandler()); - addTest(new TestSetCollation()); - - // jdbcx - addTest(new TestConnectionPool()); - addTest(new TestDataSource()); - addTest(new TestXA()); - addTest(new TestXASimple()); - - // server - addTest(new TestAutoServer()); - addTest(new TestNestedLoop()); - - // mvcc & row level locking - addTest(new TestMvcc1()); - addTest(new TestMvcc2()); - addTest(new TestMvcc3()); - addTest(new TestMvcc4()); - addTest(new TestMvccMultiThreaded()); - addTest(new TestMvccMultiThreaded2()); - addTest(new TestRowLocks()); - addTest(new TestAnalyzeTableTx()); - - // synth - addTest(new TestBtreeIndex()); - addTest(new TestConcurrentUpdate()); - addTest(new TestDiskFull()); - addTest(new TestCrashAPI()); - addTest(new TestFuzzOptimizations()); - addTest(new TestLimit()); - addTest(new TestRandomCompare()); - addTest(new TestKillRestart()); - addTest(new TestKillRestartMulti()); - addTest(new TestMultiThreaded()); - addTest(new TestOuterJoins()); - addTest(new TestNestedJoins()); - addTest(new TestStringAggCompatibility()); - - runAddedTests(); - - // serial - addTest(new TestDateStorage()); - addTest(new TestDriver()); - addTest(new TestJavaObjectSerializer()); - addTest(new TestLocale()); - addTest(new TestMemoryUsage()); - addTest(new TestMultiThread()); - addTest(new TestPowerOff()); - addTest(new TestReorderWrites()); - addTest(new TestRandomSQL()); - addTest(new TestQueryCache()); - addTest(new TestUrlJavaObjectSerializer()); - addTest(new TestWeb()); - - // other unsafe - addTest(new TestOptimizations()); - addTest(new TestOutOfMemory()); - - runAddedTests(1); - - afterTest(); } /** @@ -908,25 +868,19 @@ private void testAdditional() { addTest(new TestExit()); addTest(new TestFileLock()); addTest(new TestJmx()); - addTest(new TestModifyOnWrite()); - addTest(new TestOldVersion()); addTest(new TestMultiThreadedKernel()); - addTest(new TestPageStore()); addTest(new TestPageStoreCoverage()); addTest(new TestPgServer()); addTest(new TestRecovery()); addTest(new RecoverLobTest()); addTest(createTest("org.h2.test.unit.TestServlet")); + addTest(createTest("org.h2.test.unit.TestJakartaServlet")); addTest(new TestTimeStampWithTimeZone()); - addTest(new TestUpgrade()); - addTest(new TestUsingIndex()); addTest(new TestValue()); - addTest(new TestWeb()); runAddedTests(); addTest(new TestCluster()); - addTest(new TestFileLockSerialized()); addTest(new TestFileLockProcess()); addTest(new TestDefrag()); addTest(new TestTools()); @@ -963,12 +917,12 @@ private void testUtils() { addTest(new TestMVTempResult()); // unit + addTest(new TestConcurrentJdbc()); addTest(new TestAnsCompression()); addTest(new TestBinaryArithmeticStream()); + addTest(new TestBinaryOperation()); addTest(new TestBitStream()); addTest(new TestCharsetCollator()); - addTest(new TestClearReferences()); - addTest(new TestDataPage()); addTest(new TestDateIso8601()); addTest(new TestDbException()); addTest(new TestFile()); @@ -977,8 +931,8 @@ private void testUtils() { addTest(new TestGeometryUtils()); addTest(new TestInterval()); addTest(new TestIntArray()); - addTest(new TestIntIntHashMap()); addTest(new TestIntPerfectHash()); + addTest(new TestJsonUtils()); addTest(new TestKeywords()); addTest(new TestMathUtils()); addTest(new TestMemoryUnmapper()); @@ -995,14 +949,14 @@ private void testUtils() { addTest(new TestStringUtils()); addTest(new TestTraceSystem()); addTest(new TestUtils()); - addTest(new TestLocalResultFactory()); + addTest(new TestUpgrade()); runAddedTests(); // serial addTest(new TestDate()); addTest(new TestDateTimeUtils()); - addTest(new TestConcurrent()); + addTest(new TestMVStoreConcurrent()); addTest(new TestNetUtils()); addTest(new TestPattern()); addTest(new TestStringCache()); @@ -1013,6 +967,9 @@ private void testUtils() { } private void addTest(TestBase test) { + if (excludedTests.contains(test.getClass().getName())) { + return; + } // tests.add(test); // run directly for now, because concurrently running tests // fails on Raspberry Pi quite often (seems to be a JVM problem) @@ -1053,7 +1010,9 @@ public void call() throws Exception { } test = tests.remove(0); } - test.runTest(TestAll.this); + if (!excludedTests.contains(test.getClass().getName())) { + test.runTest(TestAll.this); + } } } }; @@ -1125,7 +1084,7 @@ public int getPort() { */ public static void printSystemInfo() { Properties prop = System.getProperties(); - System.out.println("H2 " + Constants.getFullVersion() + + System.out.println("H2 " + Constants.FULL_VERSION + " @ " + new java.sql.Timestamp(System.currentTimeMillis()).toString()); System.out.println("Java " + prop.getProperty("java.runtime.version") + ", " + @@ -1151,16 +1110,10 @@ public static void printSystemInfo() { public String toString() { StringBuilder buff = new StringBuilder(); appendIf(buff, lazy, "lazy"); - if (mvStore) { - buff.append("mvStore "); - } else { - buff.append("pageStore "); - } appendIf(buff, big, "big"); appendIf(buff, networked, "net"); appendIf(buff, memory, "memory"); appendIf(buff, codeCoverage, "codeCoverage"); - appendIf(buff, multiThreaded, "multiThreaded"); appendIf(buff, cipher != null, cipher); appendIf(buff, cacheType != null, cacheType); appendIf(buff, smallLog, "smallLog"); @@ -1173,7 +1126,6 @@ public String toString() { appendIf(buff, throttle > 0, "throttle:" + throttle); appendIf(buff, traceTest, "traceTest"); appendIf(buff, stopOnError, "stopOnError"); - appendIf(buff, defrag, "defrag"); appendIf(buff, splitFileSystem, "split"); appendIf(buff, collation != null, collation); return buff.toString(); diff --git a/h2/src/test/org/h2/test/TestAllJunit.java b/h2/src/test/org/h2/test/TestAllJunit.java index a027832ced..2ebc55afdc 100644 --- a/h2/src/test/org/h2/test/TestAllJunit.java +++ b/h2/src/test/org/h2/test/TestAllJunit.java @@ -1,11 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test; -import org.junit.Test; +import org.junit.jupiter.api.Test; /** * This class is a bridge between JUnit and the custom test framework @@ -17,7 +17,7 @@ public class TestAllJunit { * Run all the fast tests. */ @Test - public void testTravis() throws Exception { - TestAll.main("travis"); + public void testCI() throws Exception { + TestAll.main("ci"); } } diff --git a/h2/src/test/org/h2/test/TestBase.java b/h2/src/test/org/h2/test/TestBase.java index 8a60af65bf..c0fdffb937 100644 --- a/h2/src/test/org/h2/test/TestBase.java +++ b/h2/src/test/org/h2/test/TestBase.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test; @@ -12,11 +12,9 @@ import java.io.InputStream; import java.io.PrintWriter; import java.io.Reader; -import java.lang.reflect.Constructor; import java.lang.reflect.InvocationHandler; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; -import java.lang.reflect.Modifier; import java.lang.reflect.Proxy; import java.nio.channels.FileChannel; import java.nio.channels.FileLock; @@ -29,19 +27,24 @@ import java.sql.Types; import java.text.DateFormat; import java.text.SimpleDateFormat; +import java.time.LocalTime; +import java.time.format.DateTimeFormatter; import java.util.ArrayList; import java.util.LinkedList; import java.util.Objects; import java.util.SimpleTimeZone; +import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; -import org.h2.engine.SysProperties; +import org.h2.engine.SessionLocal; import org.h2.jdbc.JdbcConnection; import org.h2.message.DbException; +import org.h2.mvstore.MVStoreException; import org.h2.store.fs.FilePath; import org.h2.store.fs.FileUtils; -import org.h2.test.utils.ProxyCodeGenerator; import org.h2.test.utils.ResultVerifier; +import org.h2.util.StringUtils; +import org.h2.util.Utils; /** * The base class for all tests. @@ -68,6 +71,11 @@ public abstract class TestBase { */ private static String baseDir = getTestDir(""); + /** + * The maximum size of byte array. + */ + private static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; + /** * The test configuration. */ @@ -80,7 +88,7 @@ public abstract class TestBase { private final LinkedList memory = new LinkedList<>(); - private static final SimpleDateFormat dateFormat = new SimpleDateFormat("HH:mm:ss"); + private static final DateTimeFormatter timeFormat = DateTimeFormatter.ofPattern("HH:mm:ss"); /** * Get the test directory for this test. @@ -129,9 +137,7 @@ public void runTest(TestAll conf) { try { init(conf); if (!isEnabled()) { - if (!conf.executedTests.containsKey(getClass())) { - conf.executedTests.put(getClass(), false); - } + conf.executedTests.putIfAbsent(getClass(), false); return; } conf.executedTests.put(getClass(), true); @@ -397,7 +403,7 @@ private static void logThrowable(String s, Throwable e) { public void println(String s) { long now = System.nanoTime(); long time = TimeUnit.NANOSECONDS.toMillis(now - start); - printlnWithTime(time, getClass().getName() + " " + s); + printlnWithTime(time, getClass().getName() + ' ' + s); } /** @@ -407,9 +413,9 @@ public void println(String s) { * @param s the message */ static synchronized void printlnWithTime(long millis, String s) { - s = dateFormat.format(new java.util.Date()) + " " + - formatTime(millis) + " " + s; - System.out.println(s); + StringBuilder builder = new StringBuilder(s.length() + 19); + timeFormat.formatTo(LocalTime.now(), builder); + System.out.println(formatTime(builder.append(' '), millis).append(' ').append(s).toString()); } /** @@ -418,24 +424,32 @@ static synchronized void printlnWithTime(long millis, String s) { * @param s the message */ protected void printTime(String s) { - SimpleDateFormat dateFormat = new SimpleDateFormat("HH:mm:ss"); - println(dateFormat.format(new java.util.Date()) + " " + s); + StringBuilder builder = new StringBuilder(s.length() + 9); + timeFormat.formatTo(LocalTime.now(), builder); + println(builder.append(' ').append(s).toString()); } /** - * Format the time in the format hh:mm:ss.1234 where 1234 is milliseconds. + * Format the time in the format mm:ss.123 or hh:mm:ss.123 where 123 is + * milliseconds. * - * @param millis the time in milliseconds - * @return the formatted time + * @param builder the string builder to append to + * @param millis the time in milliseconds, non-negative + * @return the specified string builder */ - static String formatTime(long millis) { - String s = new java.sql.Time( - java.sql.Time.valueOf("0:0:0").getTime() + millis).toString() + - "." + ("" + (1000 + (millis % 1000))).substring(1); - if (s.startsWith("00:")) { - s = s.substring(3); + static StringBuilder formatTime(StringBuilder builder, long millis) { + int s = (int) (millis / 1_000); + int m = s / 60; + s %= 60; + int h = m / 60; + if (h != 0) { + builder.append(h).append(':'); + m %= 60; } - return s; + StringUtils.appendTwoDigits(builder, m).append(':'); + StringUtils.appendTwoDigits(builder, s).append('.'); + StringUtils.appendZeroPadded(builder, 3, millis % 1_000); + return builder; } /** @@ -452,6 +466,18 @@ public boolean isEnabled() { */ public abstract void test() throws Exception; + /** + * Only called from individual test classes main() method, + * makes sure to run the before/after stuff. + * + * @throws Exception if an exception in the test occurs + */ + public final void testFromMain() throws Exception { + config.beforeTest(); + test(); + config.afterTest(); + } + /** * Check if two values are equal, and if not throw an exception. * @@ -1024,20 +1050,19 @@ protected void assertResultSetMeta(ResultSet rs, int columnCount, assertEquals("java.lang.Integer", className); break; case Types.VARCHAR: - assertEquals("VARCHAR", typeName); + assertEquals("CHARACTER VARYING", typeName); assertEquals("java.lang.String", className); break; case Types.SMALLINT: assertEquals("SMALLINT", typeName); - assertEquals(SysProperties.OLD_RESULT_SET_GET_OBJECT ? "java.lang.Short" : "java.lang.Integer", - className); + assertEquals("java.lang.Integer", className); break; case Types.TIMESTAMP: assertEquals("TIMESTAMP", typeName); assertEquals("java.sql.Timestamp", className); break; - case Types.DECIMAL: - assertEquals("DECIMAL", typeName); + case Types.NUMERIC: + assertEquals("NUMERIC", typeName); assertEquals("java.math.BigDecimal", className); break; default: @@ -1059,6 +1084,20 @@ protected void assertResultSetMeta(ResultSet rs, int columnCount, } } + /** + * Check if a result set contains the expected data. + * The sort order is significant + * + * @param rs the result set + * @param data the expected data + * @param ignoreColumns columns to ignore, or {@code null} + * @throws AssertionError if there is a mismatch + */ + protected void assertResultSetOrdered(ResultSet rs, String[][] data, int[] ignoreColumns) + throws SQLException { + assertResultSet(true, rs, data, ignoreColumns); + } + /** * Check if a result set contains the expected data. * The sort order is significant @@ -1069,7 +1108,7 @@ protected void assertResultSetMeta(ResultSet rs, int columnCount, */ protected void assertResultSetOrdered(ResultSet rs, String[][] data) throws SQLException { - assertResultSet(true, rs, data); + assertResultSet(true, rs, data, null); } /** @@ -1078,9 +1117,10 @@ protected void assertResultSetOrdered(ResultSet rs, String[][] data) * @param ordered if the sort order is significant * @param rs the result set * @param data the expected data + * @param ignoreColumns columns to ignore, or {@code null} * @throws AssertionError if there is a mismatch */ - private void assertResultSet(boolean ordered, ResultSet rs, String[][] data) + private void assertResultSet(boolean ordered, ResultSet rs, String[][] data, int[] ignoreColumns) throws SQLException { int len = rs.getMetaData().getColumnCount(); int rows = data.length; @@ -1101,7 +1141,7 @@ private void assertResultSet(boolean ordered, ResultSet rs, String[][] data) String[] row = getData(rs, len); if (ordered) { String[] good = data[i]; - if (!testRow(good, row, good.length)) { + if (!testRow(good, row, good.length, ignoreColumns)) { fail("testResultSet row not equal, got:\n" + formatRow(row) + "\n" + formatRow(good)); } @@ -1109,7 +1149,7 @@ private void assertResultSet(boolean ordered, ResultSet rs, String[][] data) boolean found = false; for (int j = 0; j < rows; j++) { String[] good = data[i]; - if (testRow(good, row, good.length)) { + if (testRow(good, row, good.length, ignoreColumns)) { found = true; break; } @@ -1126,8 +1166,15 @@ private void assertResultSet(boolean ordered, ResultSet rs, String[][] data) } } - private static boolean testRow(String[] a, String[] b, int len) { - for (int i = 0; i < len; i++) { + private static boolean testRow(String[] a, String[] b, int len, int[] ignoreColumns) { + loop: for (int i = 0; i < len; i++) { + if (ignoreColumns != null) { + for (int c : ignoreColumns) { + if (c == i) { + continue loop; + } + } + } String sa = a[i]; String sb = b[i]; if (sa == null || sb == null) { @@ -1169,7 +1216,7 @@ private static String formatRow(String[] row) { * @param conn the database connection */ protected void crash(Connection conn) { - ((JdbcConnection) conn).setPowerOffCount(1); + setPowerOffCount(conn, 1); try { conn.createStatement().execute("SET WRITE_DELAY 0"); conn.createStatement().execute("CREATE TABLE TEST_A(ID INT)"); @@ -1184,6 +1231,31 @@ protected void crash(Connection conn) { } } + /** + * Set the number of disk operations before power failure is simulated. + * To disable the countdown, use 0. + * + * @param conn the connection + * @param i the number of operations + */ + public static void setPowerOffCount(Connection conn, int i) { + SessionLocal session = (SessionLocal) ((JdbcConnection) conn).getSession(); + if (session != null) { + session.getDatabase().setPowerOffCount(i); + } + } + + /** + * Returns the number of disk operations before power failure is simulated. + * + * @param conn the connection + * @return the number of disk operations before power failure is simulated + */ + protected static int getPowerOffCount(Connection conn) { + SessionLocal session = (SessionLocal) ((JdbcConnection) conn).getSession(); + return session != null && !session.isClosed() ? session.getDatabase().getPowerOffCount() : 0; + } + /** * Read a string from the reader. This method reads until end of file. * @@ -1260,8 +1332,7 @@ protected void assertEquals(Integer expected, Integer actual) { protected void assertEqualDatabases(Statement stat1, Statement stat2) throws SQLException { ResultSet rs = stat1.executeQuery( - "select value from information_schema.settings " + - "where name='ANALYZE_AUTO'"); + "SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'ANALYZE_AUTO'"); int analyzeAuto = rs.next() ? rs.getInt(1) : 0; if (analyzeAuto > 0) { stat1.execute("analyze"); @@ -1342,20 +1413,19 @@ public static String getJVM() { * @param remainingKB the number of kilobytes that are not referenced */ protected void eatMemory(int remainingKB) { - byte[] reserve = new byte[remainingKB * 1024]; - // first, eat memory in 16 KB blocks, then eat in 16 byte blocks - for (int size = 16 * 1024; size > 0; size /= 1024) { - while (true) { - try { - byte[] block = new byte[16 * 1024]; - memory.add(block); - } catch (OutOfMemoryError e) { - break; - } + long memoryFreeKB; + try { + while ((memoryFreeKB = Utils.getMemoryFree()) > remainingKB) { + long blockSize = Math.max((memoryFreeKB - remainingKB) / 16, 16) * 1024; + memory.add(new byte[blockSize > MAX_ARRAY_SIZE ? MAX_ARRAY_SIZE : (int) blockSize]); + } + } catch (OutOfMemoryError e) { + if (remainingKB >= 3000) { // OOM is not expected + memory.clear(); + throw e; } + // OOM can be ignored because it's tolerable (separate process?) } - // silly code - makes sure there are no warnings - reserve[0] = reserve[1]; } /** @@ -1381,36 +1451,42 @@ protected void freeMemory() { */ protected T assertThrows(final Class expectedExceptionClass, final T obj) { - return assertThrows(new ResultVerifier() { - @Override - public boolean verify(Object returnValue, Throwable t, Method m, - Object... args) { - if (t == null) { - throw new AssertionError("Expected an exception of type " + - expectedExceptionClass.getSimpleName() + - " to be thrown, but the method returned " + - returnValue + - " for " + ProxyCodeGenerator.formatMethodCall(m, args)); - } - if (!expectedExceptionClass.isAssignableFrom(t.getClass())) { - AssertionError ae = new AssertionError( - "Expected an exception of type\n" + - expectedExceptionClass.getSimpleName() + - " to be thrown, but the method under test " + - "threw an exception of type\n" + - t.getClass().getSimpleName() + - " (see in the 'Caused by' for the exception " + - "that was thrown) " + - " for " + ProxyCodeGenerator. - formatMethodCall(m, args)); - ae.initCause(t); - throw ae; - } - return false; + return assertThrows((returnValue, t, m, args) -> { + if (t == null) { + throw new AssertionError("Expected an exception of type " + + expectedExceptionClass.getSimpleName() + + " to be thrown, but the method returned " + + returnValue + + " for " + formatMethodCall(m, args)); } + if (!expectedExceptionClass.isAssignableFrom(t.getClass())) { + AssertionError ae = new AssertionError("Expected an exception of type\n" + + expectedExceptionClass.getSimpleName() + + " to be thrown, but the method under test threw an exception of type\n" + + t.getClass().getSimpleName() + + " (see in the 'Caused by' for the exception that was thrown) for " + + formatMethodCall(m, args)); + ae.initCause(t); + throw ae; + } + return false; }, obj); } + private static String formatMethodCall(Method m, Object... args) { + StringBuilder builder = new StringBuilder(); + builder.append(m.getName()).append('('); + for (int i = 0; i < args.length; i++) { + Object a = args[i]; + if (i > 0) { + builder.append(", "); + } + builder.append(a == null ? "null" : a.toString()); + } + builder.append(")"); + return builder.toString(); + } + /** * Verify the next method call on the object will throw an exception. * @@ -1419,31 +1495,10 @@ public boolean verify(Object returnValue, Throwable t, Method m, * @param obj the object to wrap * @return a proxy for the object */ - protected T assertThrows(final int expectedErrorCode, final T obj) { - return assertThrows(new ResultVerifier() { - @Override - public boolean verify(Object returnValue, Throwable t, Method m, - Object... args) { - int errorCode; - if (t instanceof DbException) { - errorCode = ((DbException) t).getErrorCode(); - } else if (t instanceof SQLException) { - errorCode = ((SQLException) t).getErrorCode(); - } else { - errorCode = 0; - } - if (errorCode != expectedErrorCode) { - AssertionError ae = new AssertionError( - "Expected an SQLException or DbException with error code " - + expectedErrorCode - + ", but got a " + (t == null ? "null" : - t.getClass().getName() + " exception " - + " with error code " + errorCode)); - ae.initCause(t); - throw ae; - } - return false; - } + protected T assertThrows(int expectedErrorCode, T obj) { + return assertThrows((returnValue, t, m, args) -> { + checkErrorCode(expectedErrorCode, t); + return false; }, obj); } @@ -1501,39 +1556,124 @@ public Object invoke(Object proxy, Method method, Object[] args) } } }; - if (!ProxyCodeGenerator.isGenerated(c)) { - Class[] interfaces = c.getInterfaces(); - if (Modifier.isFinal(c.getModifiers()) - || (interfaces.length > 0 && getClass() != c)) { - // interface class proxies - if (interfaces.length == 0) { - throw new RuntimeException("Can not create a proxy for the class " + - c.getSimpleName() + - " because it doesn't implement any interfaces and is final"); - } - return (T) Proxy.newProxyInstance(c.getClassLoader(), interfaces, ih); - } + Class[] interfaces = c.getInterfaces(); + if (interfaces.length == 0) { + throw new RuntimeException("Can not create a proxy for the class " + + c.getSimpleName() + + " because it doesn't implement any interfaces and is final"); } + return (T) Proxy.newProxyInstance(c.getClassLoader(), interfaces, ih); + } + + @FunctionalInterface + protected interface VoidCallable { + + /** + * call the lambda + */ + void call() throws Exception; + + } + + /** + * Assert that the lambda function throws an exception of the expected class. + * + * @param expectedExceptionClass expected exception class + * @param c lambda function + */ + protected void assertThrows(Class expectedExceptionClass, Callable c) { try { - Class pc = ProxyCodeGenerator.getClassProxy(c); - Constructor cons = pc - .getConstructor(new Class[] { InvocationHandler.class }); - return (T) cons.newInstance(new Object[] { ih }); - } catch (Exception e) { - throw new RuntimeException(e); + Object returnValue = c.call(); + throw new AssertionError("Expected an exception of type " + expectedExceptionClass.getSimpleName() + + " to be thrown, but the method returned " + returnValue); + } catch (Throwable t) { + checkException(expectedExceptionClass, t); } } /** - * Create a proxy class that extends the given class. + * Assert that the lambda function throws an exception of the expected class. * - * @param clazz the class + * @param expectedExceptionClass expected exception class + * @param c lambda function */ - protected void createClassProxy(Class clazz) { + protected void assertThrows(Class expectedExceptionClass, VoidCallable c) { try { - ProxyCodeGenerator.getClassProxy(clazz); - } catch (Exception e) { - throw new RuntimeException(e); + c.call(); + throw new AssertionError("Expected an exception of type " + expectedExceptionClass.getSimpleName() + + " to be thrown, but the method returned successfully"); + } catch (Throwable t) { + checkException(expectedExceptionClass, t); + } + } + + /** + * Assert that the lambda function throws a SQLException or DbException with the + * expected error code. + * + * @param expectedErrorCode SQL error code + * @param c lambda function + */ + protected void assertThrows(int expectedErrorCode, Callable c) { + try { + Object returnValue = c.call(); + throw new AssertionError("Expected an SQLException or DbException with error code " + expectedErrorCode + + " to be thrown, but the method returned " + returnValue); + } catch (Throwable t) { + checkErrorCode(expectedErrorCode, t); + } + } + + /** + * Assert that the lambda function throws a SQLException or DbException with the + * expected error code. + * + * @param expectedErrorCode SQL error code + * @param c lambda function + */ + protected void assertThrows(int expectedErrorCode, VoidCallable c) { + try { + c.call(); + throw new AssertionError("Expected an SQLException or DbException with error code " + expectedErrorCode + + " to be thrown, but the method returned successfully"); + } catch (Throwable t) { + checkErrorCode(expectedErrorCode, t); + } + } + + private static void checkException(Class expectedExceptionClass, Throwable t) throws AssertionError { + if (!expectedExceptionClass.isAssignableFrom(t.getClass())) { + AssertionError ae = new AssertionError("Expected an exception of type\n" + + expectedExceptionClass.getSimpleName() + " to be thrown, but an exception of type\n" + + t.getClass().getSimpleName() + " was thrown"); + ae.initCause(t); + throw ae; + } + } + + /** + * Verify that actual error code is the one expected + * @param expectedErrorCode to compare against + * @param t actual exception to extract error code from + * @throws AssertionError if code is unexpected + */ + public static void checkErrorCode(int expectedErrorCode, Throwable t) throws AssertionError { + int errorCode; + if (t instanceof DbException) { + errorCode = ((DbException) t).getErrorCode(); + } else if (t instanceof SQLException) { + errorCode = ((SQLException) t).getErrorCode(); + } else if (t instanceof MVStoreException) { + errorCode = ((MVStoreException) t).getErrorCode(); + } else { + errorCode = 0; + } + if (errorCode != expectedErrorCode) { + AssertionError ae = new AssertionError("Expected an SQLException or DbException with error code " + + expectedErrorCode + ", but got a " + + (t == null ? "null" : t.getClass().getName() + " exception " + " with error code " + errorCode)); + ae.initCause(t); + throw ae; } } @@ -1563,7 +1703,7 @@ public int read(byte[] buffer, int off, int len) { * @param e the exception to throw */ public static void throwException(Throwable e) { - TestBase.throwThis(e); + TestBase.throwThis(e); } @SuppressWarnings("unchecked") diff --git a/h2/src/test/org/h2/test/TestDb.java b/h2/src/test/org/h2/test/TestDb.java index 39df1e65db..5694a052a8 100644 --- a/h2/src/test/org/h2/test/TestDb.java +++ b/h2/src/test/org/h2/test/TestDb.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test; @@ -20,13 +20,6 @@ */ public abstract class TestDb extends TestBase { - /** - * Start the TCP server if enabled in the configuration. - */ - protected void startServerIfRequired() throws SQLException { - config.beforeTest(); - } - /** * Open a database connection in admin mode. The default user name and * password is used. @@ -63,11 +56,7 @@ public Connection getConnection(String name, String user, String password) protected String getURL(String name, boolean admin) { String url; if (name.startsWith("jdbc:")) { - if (config.mvStore) { - name = addOption(name, "MV_STORE", "true"); - } else { - name = addOption(name, "MV_STORE", "false"); - } + name = addOption(name, "MV_STORE", "true"); return name; } if (admin) { @@ -95,12 +84,8 @@ protected String getURL(String name, boolean admin) { } else { url = name; } - if (config.mvStore) { - url = addOption(url, "MV_STORE", "true"); - url = addOption(url, "MAX_COMPACT_TIME", "0"); // to speed up tests - } else { - url = addOption(url, "MV_STORE", "false"); - } + url = addOption(url, "MV_STORE", "true"); + url = addOption(url, "MAX_COMPACT_TIME", "0"); // to speed up tests if (!config.memory) { if (config.smallLog && admin) { url = addOption(url, "MAX_LOG_SIZE", "1"); @@ -113,7 +98,6 @@ protected String getURL(String name, boolean admin) { url = addOption(url, "TRACE_LEVEL_FILE", "" + config.traceLevelFile); url = addOption(url, "TRACE_MAX_FILE_SIZE", "8"); } - url = addOption(url, "LOG", "1"); if (config.throttleDefault > 0) { url = addOption(url, "THROTTLE", "" + config.throttleDefault); } else if (config.throttle > 0) { @@ -127,7 +111,6 @@ protected String getURL(String name, boolean admin) { // force operations to disk url = addOption(url, "MAX_OPERATION_MEMORY", "1"); } - url = addOption(url, "MULTI_THREADED", config.multiThreaded ? "TRUE" : "FALSE"); if (config.lazy) { url = addOption(url, "LAZY_QUERY_EXECUTION", "1"); } @@ -141,9 +124,6 @@ protected String getURL(String name, boolean admin) { if (config.cipher != null) { url = addOption(url, "CIPHER", config.cipher); } - if (config.defrag) { - url = addOption(url, "DEFRAG_ALWAYS", "TRUE"); - } if (config.collation != null) { url = addOption(url, "COLLATION", config.collation); } diff --git a/h2/src/test/org/h2/test/ap/TestAnnotationProcessor.java b/h2/src/test/org/h2/test/ap/TestAnnotationProcessor.java index 8e2d10d058..16a8b2adcb 100644 --- a/h2/src/test/org/h2/test/ap/TestAnnotationProcessor.java +++ b/h2/src/test/org/h2/test/ap/TestAnnotationProcessor.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.ap; diff --git a/h2/src/test/org/h2/test/ap/package.html b/h2/src/test/org/h2/test/ap/package.html index 97881008ef..588b02ba02 100644 --- a/h2/src/test/org/h2/test/ap/package.html +++ b/h2/src/test/org/h2/test/ap/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/auth/MyLoginModule.java b/h2/src/test/org/h2/test/auth/MyLoginModule.java index acd97f492d..0a899bbdbf 100644 --- a/h2/src/test/org/h2/test/auth/MyLoginModule.java +++ b/h2/src/test/org/h2/test/auth/MyLoginModule.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.test.auth; diff --git a/h2/src/test/org/h2/test/auth/TestAuthentication.java b/h2/src/test/org/h2/test/auth/TestAuthentication.java index d0e8ceac03..68a581c22f 100644 --- a/h2/src/test/org/h2/test/auth/TestAuthentication.java +++ b/h2/src/test/org/h2/test/auth/TestAuthentication.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Alessandro Ventura */ package org.h2.test.auth; @@ -10,7 +10,6 @@ import java.sql.DriverManager; import java.sql.SQLException; import java.util.HashMap; -import java.util.Properties; import java.util.UUID; import javax.security.auth.login.AppConfigurationEntry; @@ -22,7 +21,7 @@ import org.h2.engine.Database; import org.h2.engine.Engine; import org.h2.engine.Role; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.engine.User; import org.h2.jdbcx.JdbcConnectionPool; import org.h2.security.auth.DefaultAuthenticator; @@ -53,7 +52,7 @@ public class TestAuthentication extends TestBase { private String externalUserPassword; private DefaultAuthenticator defaultAuthenticator; - private Session session; + private SessionLocal session; private Database database; /** @@ -62,7 +61,7 @@ public class TestAuthentication extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } /** @@ -126,10 +125,8 @@ public void test() throws Exception { Configuration oldConfiguration = Configuration.getConfiguration(); try { configureJaas(); - Properties properties = new Properties(); - properties.setProperty("USER", "dba"); - ConnectionInfo connectionInfo = new ConnectionInfo(getDatabaseURL(), properties); - session = Engine.getInstance().createSession(connectionInfo); + ConnectionInfo connectionInfo = new ConnectionInfo(getDatabaseURL(), null, "dba", null); + session = Engine.createSession(connectionInfo); database = session.getDatabase(); configureAuthentication(database); try { diff --git a/h2/src/test/org/h2/test/auth/package.html b/h2/src/test/org/h2/test/auth/package.html index 8c810fab5d..3a5a38abf5 100644 --- a/h2/src/test/org/h2/test/auth/package.html +++ b/h2/src/test/org/h2/test/auth/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/bench/Bench.java b/h2/src/test/org/h2/test/bench/Bench.java index a9ed94a099..89c42a1525 100644 --- a/h2/src/test/org/h2/test/bench/Bench.java +++ b/h2/src/test/org/h2/test/bench/Bench.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.bench; diff --git a/h2/src/test/org/h2/test/bench/BenchA.java b/h2/src/test/org/h2/test/bench/BenchA.java index 99da7b234b..16818da381 100644 --- a/h2/src/test/org/h2/test/bench/BenchA.java +++ b/h2/src/test/org/h2/test/bench/BenchA.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.bench; @@ -103,27 +103,16 @@ public void init(Database db, int size) throws SQLException { db.commit(); db.closeConnection(); db.end(); - -// db.start(this, "Open/Close"); -// db.openConnection(); -// db.closeConnection(); -// db.end(); } @Override public void runTest() throws SQLException { - - database.start(this, "Transactions"); database.openConnection(); + database.start(this, "Transactions"); processTransactions(); - database.closeConnection(); database.end(); - - database.openConnection(); - processTransactions(); database.logMemory(this, "Memory Usage"); database.closeConnection(); - } private void processTransactions() throws SQLException { diff --git a/h2/src/test/org/h2/test/bench/BenchB.java b/h2/src/test/org/h2/test/bench/BenchB.java index 78fb7cbd7e..2aa5536ad4 100644 --- a/h2/src/test/org/h2/test/bench/BenchB.java +++ b/h2/src/test/org/h2/test/bench/BenchB.java @@ -1,13 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.bench; import java.sql.Connection; import java.sql.PreparedStatement; -import java.sql.ResultSet; import java.sql.SQLException; import java.util.Random; @@ -34,8 +33,8 @@ public class BenchB implements Bench, Runnable { // client data private BenchB master; private Connection conn; - private PreparedStatement updateAccount; private PreparedStatement selectAccount; + private PreparedStatement updateAccount; private PreparedStatement updateTeller; private PreparedStatement updateBranch; private PreparedStatement insertHistory; @@ -50,10 +49,15 @@ private BenchB(BenchB master, int seed) throws SQLException { random = new Random(seed); conn = master.database.openNewConnection(); conn.setAutoCommit(false); + try { + selectAccount = conn.prepareStatement( + "SELECT ABALANCE FROM ACCOUNTS WHERE AID=? FOR UPDATE"); + } catch (SQLException ignored) { + selectAccount = conn.prepareStatement( + "SELECT ABALANCE FROM ACCOUNTS WHERE AID=?"); + } updateAccount = conn.prepareStatement( "UPDATE ACCOUNTS SET ABALANCE=ABALANCE+? WHERE AID=?"); - selectAccount = conn.prepareStatement( - "SELECT ABALANCE FROM ACCOUNTS WHERE AID=?"); updateTeller = conn.prepareStatement( "UPDATE TELLERS SET TBALANCE=TBALANCE+? WHERE TID=?"); updateBranch = conn.prepareStatement( @@ -85,7 +89,7 @@ public void init(Database db, int size) throws SQLException { "BID INT, ABALANCE INT, FILLER VARCHAR(84))", "CREATE TABLE HISTORY(" + "TID INT, BID INT, AID INT, " + - "DELTA INT, TIME DATETIME, FILLER VARCHAR(22))" }; + "DELTA INT, HTIME DATETIME, FILLER VARCHAR(22))" }; for (String sql : create) { db.update(sql); } @@ -127,10 +131,6 @@ public void init(Database db, int size) throws SQLException { db.commit(); db.closeConnection(); db.end(); -// db.start(this, "Open/Close"); -// db.openConnection(); -// db.closeConnection(); -// db.end(); } /** @@ -147,72 +147,74 @@ protected int getTransactionsPerClient(int size) { public void run() { int accountsPerBranch = ACCOUNTS / BRANCHES; for (int i = 0; i < master.transactionPerClient; i++) { - int branch = random.nextInt(BRANCHES); - int teller = random.nextInt(TELLERS); - int account; - if (random.nextInt(100) < 85) { - account = random.nextInt(accountsPerBranch) + branch * accountsPerBranch; - } else { - account = random.nextInt(ACCOUNTS); + try { + int branch = random.nextInt(BRANCHES); + int teller = random.nextInt(TELLERS); + int account; + if (random.nextInt(100) < 85) { + account = random.nextInt(accountsPerBranch) + branch * accountsPerBranch; + } else { + account = random.nextInt(ACCOUNTS); + } + int delta = random.nextInt(1000); + doOne(branch, teller, account, -delta); + try { + conn.commit(); + } catch (SQLException e) { + e.printStackTrace(); + } + } catch (SQLException ignore) { + try { + conn.rollback(); + } catch (SQLException e) { + e.printStackTrace(); + } } - int delta = random.nextInt(1000); - doOne(branch, teller, account, delta); } try { + conn.setAutoCommit(true); conn.close(); } catch (SQLException e) { - // ignore + e.printStackTrace(); } } - private void doOne(int branch, int teller, int account, int delta) { - try { - // UPDATE ACCOUNTS SET ABALANCE=ABALANCE+? WHERE AID=? - updateAccount.setInt(1, delta); - updateAccount.setInt(2, account); - master.database.update(updateAccount, "UpdateAccounts"); - - // SELECT ABALANCE FROM ACCOUNTS WHERE AID=? - selectAccount.setInt(1, account); - ResultSet rs = master.database.query(selectAccount); - while (rs.next()) { - rs.getInt(1); - } + private void doOne(int branch, int teller, int account, int delta) throws SQLException { + selectAccount.setInt(1, account); + master.database.queryReadResult(selectAccount); - // UPDATE TELLERS SET TBALANCE=TABLANCE+? WHERE TID=? - updateTeller.setInt(1, delta); - updateTeller.setInt(2, teller); - master.database.update(updateTeller, "UpdateTeller"); - - // UPDATE BRANCHES SET BBALANCE=BBALANCE+? WHERE BID=? - updateBranch.setInt(1, delta); - updateBranch.setInt(2, branch); - master.database.update(updateBranch, "UpdateBranch"); - - // INSERT INTO HISTORY(TID, BID, AID, DELTA) VALUES(?, ?, ?, ?) - insertHistory.setInt(1, teller); - insertHistory.setInt(2, branch); - insertHistory.setInt(3, account); - insertHistory.setInt(4, delta); - master.database.update(insertHistory, "InsertHistory"); - conn.commit(); - } catch (SQLException e) { - e.printStackTrace(); - } + updateAccount.setInt(1, delta); + updateAccount.setInt(2, account); + master.database.update(updateAccount, "UpdateAccounts"); + + updateTeller.setInt(1, delta); + updateTeller.setInt(2, teller); + master.database.update(updateTeller, "UpdateTeller"); + + updateBranch.setInt(1, delta); + updateBranch.setInt(2, branch); + master.database.update(updateBranch, "UpdateBranch"); + + insertHistory.setInt(1, teller); + insertHistory.setInt(2, branch); + insertHistory.setInt(3, account); + insertHistory.setInt(4, delta); + master.database.update(insertHistory, "InsertHistory"); } + private void clearHistory() throws SQLException { + database.update("DELETE FROM HISTORY"); + } @Override public void runTest() throws Exception { Database db = database; - db.start(this, "Transactions"); db.openConnection(); + db.start(this, "Transactions"); processTransactions(); - db.closeConnection(); db.end(); - db.openConnection(); - processTransactions(); db.logMemory(this, "Memory Usage"); + clearHistory(); db.closeConnection(); } diff --git a/h2/src/test/org/h2/test/bench/BenchC.java b/h2/src/test/org/h2/test/bench/BenchC.java index 143bedea92..ffe906df61 100644 --- a/h2/src/test/org/h2/test/bench/BenchC.java +++ b/h2/src/test/org/h2/test/bench/BenchC.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.bench; @@ -545,18 +545,13 @@ private void loadDistrict(int wId) throws SQLException { @Override public void runTest() throws SQLException { - database.start(this, "Transactions"); database.openConnection(); + database.start(this, "Transactions"); for (int i = 0; i < 70; i++) { BenchCThread process = new BenchCThread(database, this, random, i); process.process(); } - database.closeConnection(); database.end(); - - database.openConnection(); - BenchCThread process = new BenchCThread(database, this, random, 0); - process.process(); database.logMemory(this, "Memory Usage"); database.closeConnection(); } diff --git a/h2/src/test/org/h2/test/bench/BenchCRandom.java b/h2/src/test/org/h2/test/bench/BenchCRandom.java index e275531de8..4e6e2bbbad 100644 --- a/h2/src/test/org/h2/test/bench/BenchCRandom.java +++ b/h2/src/test/org/h2/test/bench/BenchCRandom.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.bench; diff --git a/h2/src/test/org/h2/test/bench/BenchCThread.java b/h2/src/test/org/h2/test/bench/BenchCThread.java index 3525dc423d..eee6b846ed 100644 --- a/h2/src/test/org/h2/test/bench/BenchCThread.java +++ b/h2/src/test/org/h2/test/bench/BenchCThread.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.bench; diff --git a/h2/src/test/org/h2/test/bench/BenchSimple.java b/h2/src/test/org/h2/test/bench/BenchSimple.java index 45b6836c22..9caf5afa83 100644 --- a/h2/src/test/org/h2/test/bench/BenchSimple.java +++ b/h2/src/test/org/h2/test/bench/BenchSimple.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.bench; @@ -92,18 +92,8 @@ public void runTest() throws SQLException { db.update(prep, "deleteTest"); } db.end(); - - db.closeConnection(); - - db.openConnection(); - prep = db.prepare("SELECT * FROM TEST WHERE ID=?"); - for (int i = 0; i < records; i++) { - prep.setInt(1, random.nextInt(records)); - db.queryReadResult(prep); - } db.logMemory(this, "Memory Usage"); db.closeConnection(); - } @Override diff --git a/h2/src/test/org/h2/test/bench/Database.java b/h2/src/test/org/h2/test/bench/Database.java index b263bbbb43..000f30cd47 100644 --- a/h2/src/test/org/h2/test/bench/Database.java +++ b/h2/src/test/org/h2/test/bench/Database.java @@ -1,11 +1,15 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.bench; +import java.io.IOException; +import java.io.InputStream; import java.io.PrintWriter; +import java.lang.management.GarbageCollectorMXBean; +import java.lang.management.ManagementFactory; import java.lang.reflect.Method; import java.sql.Connection; import java.sql.DriverManager; @@ -23,7 +27,6 @@ import org.h2.test.TestBase; import org.h2.tools.Server; import org.h2.util.StringUtils; -import org.h2.util.Utils; /** * Represents a database in the benchmark test application. @@ -43,11 +46,10 @@ class Database { private Statement stat; private long lastTrace; private final Random random = new Random(1); - private final ArrayList results = new ArrayList<>(); + private ArrayList results = new ArrayList<>(); private int totalTime; private int totalGCTime; private final AtomicInteger executedStatements = new AtomicInteger(); - private int threadCount; private Server serverH2; private Object serverDerby; @@ -85,10 +87,20 @@ int getTotalGCTime() { * * @return the result array */ - ArrayList getResults() { + ArrayList getResults() { return results; } + ArrayList reset() { + executedStatements.set(0); + totalTime = 0; + totalGCTime = 0; + lastTrace = 0; + ArrayList measurements = results; + results = new ArrayList<>(); + return measurements; + } + /** * Get the random number generator. * @@ -103,7 +115,11 @@ Random getRandom() { */ void startServer() throws Exception { if (url.startsWith("jdbc:h2:tcp:")) { - serverH2 = Server.createTcpServer().start(); + try { + serverH2 = Server.createTcpServer("-ifNotExists").start(); + } catch (SQLException e) { + serverH2 = Server.createTcpServer().start(); + } Thread.sleep(100); } else if (url.startsWith("jdbc:derby://")) { serverDerby = Class.forName( @@ -123,9 +139,9 @@ void startServer() throws Exception { } Method m = c.getMethod("main", String[].class); m.invoke(null, new Object[] { new String[] { "-database.0", - "data/mydb;hsqldb.default_table_type=cached", "-dbname.0", "xdb" } }); - // org.hsqldb.Server.main(new String[]{"-database.0", "mydb", - // "-dbname.0", "xdb"}); + "data/mydb;hsqldb.default_table_type=cached;hsqldb.write_delay_millis=1000", + "-dbname.0", "xdb" } }); + // org.hsqldb.Server.main(new String[]{"-database.0", "mydb", "-dbname.0", "xdb"}); serverHSQLDB = true; Thread.sleep(100); } @@ -161,29 +177,28 @@ void stopServer() throws Exception { * @param test the test application * @param id the database id * @param dbString the configuration string - * @param threadCount the number of threads to use + * @param properties to use * @return a new database object with the given settings */ - static Database parse(DatabaseTest test, int id, String dbString, - int threadCount) { + static Database parse(DatabaseTest test, int id, String dbString, Properties properties) { try { StringTokenizer tokenizer = new StringTokenizer(dbString, ","); Database db = new Database(); db.id = id; - db.threadCount = threadCount; db.test = test; db.name = tokenizer.nextToken().trim(); String driver = tokenizer.nextToken().trim(); Class.forName(driver); db.url = tokenizer.nextToken().trim(); db.user = tokenizer.nextToken().trim(); - db.password = ""; + db.password = null; if (tokenizer.hasMoreTokens()) { db.password = tokenizer.nextToken().trim(); } + db.setTranslations(properties); return db; } catch (Exception e) { - System.out.println("Cannot load database " + dbString + " :" + e.toString()); + System.out.println("Cannot load database " + dbString + ": " + e); return null; } } @@ -284,7 +299,7 @@ private String getSQL(String sql) { void start(Bench bench, String action) { this.currentAction = bench.getName() + ": " + action; this.startTimeNs = System.nanoTime(); - this.initialGCTime = Utils.getGarbageCollectionTime(); + this.initialGCTime = getGarbageCollectionTime(); } /** @@ -293,7 +308,7 @@ void start(Bench bench, String action) { */ void end() { long time = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeNs); - long gcCollectionTime = Utils.getGarbageCollectionTime() - initialGCTime; + long gcCollectionTime = getGarbageCollectionTime() - initialGCTime; log(currentAction, "ms", (int) time); if (test.isCollect()) { totalTime += time; @@ -301,6 +316,17 @@ void end() { } } + public static long getGarbageCollectionTime() { + long totalGCTime = 0; + for (GarbageCollectorMXBean gcMXBean : ManagementFactory.getGarbageCollectorMXBeans()) { + long collectionTime = gcMXBean.getCollectionTime(); + if(collectionTime > 0) { + totalGCTime += collectionTime; + } + } + return totalGCTime; + } + /** * Drop a table. Errors are ignored. * @@ -402,12 +428,12 @@ void logMemory(Bench bench, String action) { * If data collection is enabled, add this information to the log. * * @param action the action - * @param scale the scale + * @param unit of the value * @param value the value */ - void log(String action, String scale, int value) { + void log(String action, String unit, int value) { if (test.isCollect()) { - results.add(new Object[] { action, scale, Integer.valueOf(value) }); + results.add(new Measurement(action, unit, value)); } } @@ -436,12 +462,13 @@ ResultSet query(PreparedStatement prep) throws SQLException { * @param prep the prepared statement */ void queryReadResult(PreparedStatement prep) throws SQLException { - ResultSet rs = query(prep); - ResultSetMetaData meta = rs.getMetaData(); - int columnCount = meta.getColumnCount(); - while (rs.next()) { - for (int i = 0; i < columnCount; i++) { - rs.getString(i + 1); + try (ResultSet rs = query(prep)) { + ResultSetMetaData meta = rs.getMetaData(); + int columnCount = meta.getColumnCount(); + while (rs.next()) { + for (int i = 0; i < columnCount; i++) { + rs.getString(i + 1); + } } } } @@ -464,10 +491,6 @@ int getId() { return id; } - int getThreadsCount() { - return threadCount; - } - /** * The interface used for a test. */ @@ -487,6 +510,30 @@ public interface DatabaseTest { */ void trace(String msg); + /** + * Load testing properties + * @return Properties + * @throws IOException on failure + */ + default Properties loadProperties() throws IOException { + Properties prop = new Properties(); + try (InputStream in = getClass().getResourceAsStream("test.properties")) { + prop.load(in); + } + return prop; + } } + public static final class Measurement + { + final String name; + final String unit; + final int value; + + public Measurement(String name, String unit, int value) { + this.name = name; + this.unit = unit; + this.value = value; + } + } } diff --git a/h2/src/test/org/h2/test/bench/TestPerformance.java b/h2/src/test/org/h2/test/bench/TestPerformance.java index dc4cc1837d..e8b8ee4280 100644 --- a/h2/src/test/org/h2/test/bench/TestPerformance.java +++ b/h2/src/test/org/h2/test/bench/TestPerformance.java @@ -1,12 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.bench; import java.io.FileWriter; -import java.io.InputStream; import java.io.PrintWriter; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -19,7 +18,6 @@ import java.util.Properties; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; -import org.h2.util.IOUtils; import org.h2.util.JdbcUtils; /** @@ -72,10 +70,7 @@ private void test(String... args) throws Exception { int dbId = -1; boolean exit = false; String out = "benchmark.html"; - Properties prop = new Properties(); - InputStream in = getClass().getResourceAsStream("test.properties"); - prop.load(in); - in.close(); + Properties prop = loadProperties(); int size = Integer.parseInt(prop.getProperty("size")); for (int i = 0; i < args.length; i++) { String arg = args[i]; @@ -100,9 +95,8 @@ private void test(String... args) throws Exception { } String dbString = prop.getProperty("db" + i); if (dbString != null) { - Database db = Database.parse(this, i, dbString, 1); + Database db = Database.parse(this, i, dbString, prop); if (db != null) { - db.setTranslations(prop); dbs.add(db); } } @@ -117,37 +111,33 @@ private void test(String... args) throws Exception { } testAll(dbs, tests, size); collect = false; - if (dbs.size() == 0) { + if (dbs.isEmpty()) { return; } - ArrayList results = dbs.get(0).getResults(); - Connection conn = null; - PreparedStatement prep = null; - Statement stat = null; - PrintWriter writer = null; - try { + ArrayList results = dbs.get(0).getResults(); + try (Connection conn = getResultConnection()) { openResults(); - conn = getResultConnection(); - stat = conn.createStatement(); - prep = conn.prepareStatement( + try (PreparedStatement prep = conn.prepareStatement( "INSERT INTO RESULTS(TESTID, TEST, " + - "UNIT, DBID, DB, RESULT) VALUES(?, ?, ?, ?, ?, ?)"); - for (int i = 0; i < results.size(); i++) { - Object[] res = results.get(i); - prep.setInt(1, i); - prep.setString(2, res[0].toString()); - prep.setString(3, res[1].toString()); - for (Database db : dbs) { - prep.setInt(4, db.getId()); - prep.setString(5, db.getName()); - Object[] v = db.getResults().get(i); - prep.setString(6, v[2].toString()); - prep.execute(); + "UNIT, DBID, DB, RESULT) VALUES(?, ?, ?, ?, ?, ?)")) { + for (int i = 0; i < results.size(); i++) { + Database.Measurement res = results.get(i); + prep.setInt(1, i); + prep.setString(2, res.name); + prep.setString(3, res.unit); + for (Database db : dbs) { + prep.setInt(4, db.getId()); + prep.setString(5, db.getName()); + Database.Measurement measurement = db.getResults().get(i); + prep.setString(6, String.valueOf(measurement.value)); + prep.execute(); + } } } - writer = new PrintWriter(new FileWriter(out)); - ResultSet rs = stat.executeQuery( + try (Statement stat = conn.createStatement(); + PrintWriter writer = new PrintWriter(new FileWriter(out)); + ResultSet rs = stat.executeQuery( "CALL '' " + "|| (SELECT GROUP_CONCAT('' " + "ORDER BY DBID SEPARATOR '') FROM " + @@ -160,58 +150,13 @@ private void test(String... args) throws Exception { "R2.TESTID = R1.TESTID) || '' " + "ORDER BY TESTID SEPARATOR CHAR(10)) FROM " + "(SELECT DISTINCT TESTID, TEST, UNIT FROM RESULTS) R1)" + - "|| '
          Test CaseUnit' || DB || '
          '" - ); - rs.next(); - String result = rs.getString(1); - writer.println(result); - } finally { - JdbcUtils.closeSilently(prep); - JdbcUtils.closeSilently(stat); - JdbcUtils.closeSilently(conn); - IOUtils.closeSilently(writer); + "|| ''")) { + rs.next(); + String result = rs.getString(1); + writer.println(result); + } } -// ResultSet rsDbs = conn.createStatement().executeQuery( -// "SELECT DB RESULTS GROUP BY DBID, DB ORDER BY DBID"); -// while(rsDbs.next()) { -// writer.println("" + rsDbs.getString(1) + ""); -// } -// ResultSet rs = conn.createStatement().executeQuery( -// "SELECT TEST, UNIT FROM RESULTS " + -// "GROUP BY TESTID, TEST, UNIT ORDER BY TESTID"); -// while(rs.next()) { -// writer.println("" + rs.getString(1) + ""); -// writer.println("" + rs.getString(2) + ""); -// ResultSet rsRes = conn.createStatement().executeQuery( -// "SELECT RESULT FROM RESULTS WHERE TESTID=? ORDER BY DBID"); -// -// -// } - -// PrintWriter writer = -// new PrintWriter(new FileWriter("benchmark.html")); -// writer.println(""); -// for(int j=0; j" + db.getName() + ""); -// } -// writer.println(""); -// for(int i=0; i"); -// writer.println(""); -// for(int j=0; j" + v[2] + ""); -// } -// writer.println(""); -// } -// writer.println("
          Test CaseUnit
          " + res[0] + "" + res[1] + "
          "); - if (exit) { System.exit(0); } @@ -231,17 +176,19 @@ private void testAll(ArrayList dbs, ArrayList tests, db.startServer(); Connection conn = db.openNewConnection(); DatabaseMetaData meta = conn.getMetaData(); - System.out.println(" " + meta.getDatabaseProductName() + " " + - meta.getDatabaseProductVersion()); + System.out.println("Database: " + meta.getDatabaseProductName() + " " + meta.getDatabaseProductVersion()); + System.out.println("Driver: " + meta.getDriverName() + " " + meta.getDriverVersion()); runDatabase(db, tests, 1); runDatabase(db, tests, 1); + db.reset(); collect = true; runDatabase(db, tests, size); conn.close(); db.log("Executed statements", "#", db.getExecutedStatements()); db.log("Total time", "ms", db.getTotalTime()); + System.out.println("Total time: " + db.getTotalTime() + " ms"); int statPerSec = (int) (db.getExecutedStatements() * 1000L / db.getTotalTime()); - db.log("Statements per second", "#", statPerSec); + db.log("Statements per second", "#/s", statPerSec); System.out.println("Statements per second: " + statPerSec); System.out.println("GC overhead: " + (100 * db.getTotalGCTime() / db.getTotalTime()) + "%"); collect = false; diff --git a/h2/src/test/org/h2/test/bench/TestScalability.java b/h2/src/test/org/h2/test/bench/TestScalability.java index bc7af3f2fa..998cde64a5 100644 --- a/h2/src/test/org/h2/test/bench/TestScalability.java +++ b/h2/src/test/org/h2/test/bench/TestScalability.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.bench; @@ -15,10 +15,11 @@ import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; +import java.util.List; +import java.util.Properties; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; -import org.h2.util.IOUtils; -import org.h2.util.JdbcUtils; +import org.h2.test.bench.Database.Measurement; /** * Used to compare scalability between the old engine and the new MVStore @@ -42,7 +43,7 @@ public class TestScalability implements Database.DatabaseTest { * @param args the command line parameters */ public static void main(String... args) throws Exception { - new TestScalability().test(); + new TestScalability().test(args); } private static Connection getResultConnection() throws SQLException { @@ -51,49 +52,59 @@ private static Connection getResultConnection() throws SQLException { } private static void openResults() throws SQLException { - Connection conn = null; - Statement stat = null; - try { - conn = getResultConnection(); - stat = conn.createStatement(); + try (Connection conn = getResultConnection(); + Statement stat = conn.createStatement()) { stat.execute( "CREATE TABLE IF NOT EXISTS RESULTS(TESTID INT, " + - "TEST VARCHAR, UNIT VARCHAR, DBID INT, " + - "DB VARCHAR, TCNT INT, RESULT VARCHAR)"); - } finally { - JdbcUtils.closeSilently(stat); - JdbcUtils.closeSilently(conn); + "TEST VARCHAR, UNIT VARCHAR, DBID INT, " + + "DB VARCHAR, TCNT INT, RESULT VARCHAR)"); } } - private void test() throws Exception { - FileUtils.deleteRecursive("data", true); - final String out = "benchmark.html"; - final int size = 400; - - ArrayList dbs = new ArrayList<>(); - int id = 1; - final String h2Url = "jdbc:h2:./data/test;" + - "LOCK_TIMEOUT=10000;MV_STORE=FALSE"; - dbs.add(createDbEntry(id++, "H2", 1, h2Url)); - dbs.add(createDbEntry(id++, "H2", 2, h2Url)); - dbs.add(createDbEntry(id++, "H2", 4, h2Url)); - dbs.add(createDbEntry(id++, "H2", 8, h2Url)); - dbs.add(createDbEntry(id++, "H2", 16, h2Url)); - dbs.add(createDbEntry(id++, "H2", 32, h2Url)); - dbs.add(createDbEntry(id++, "H2", 64, h2Url)); - - final String mvUrl = "jdbc:h2:./data/mvTest;" + - "MULTI_THREADED=1;LOCK_MODE=0"; - dbs.add(createDbEntry(id++, "MV", 1, mvUrl)); - dbs.add(createDbEntry(id++, "MV", 2, mvUrl)); - dbs.add(createDbEntry(id++, "MV", 4, mvUrl)); - dbs.add(createDbEntry(id++, "MV", 8, mvUrl)); - dbs.add(createDbEntry(id++, "MV", 16, mvUrl)); - dbs.add(createDbEntry(id++, "MV", 32, mvUrl)); - dbs.add(createDbEntry(id++, "MV", 64, mvUrl)); - - final BenchB test = new BenchB() { + private void test(String... args) throws Exception { + int dbId = -1; + boolean exit = false; + String out = "scalability.html"; + int size = 400; + for (int i = 0; i < args.length; i++) { + String arg = args[i]; + if ("-db".equals(arg)) { + dbId = Integer.parseInt(args[++i]); + } else if ("-init".equals(arg)) { + FileUtils.deleteRecursive("data", true); + } else if ("-out".equals(arg)) { + out = args[++i]; + } else if ("-trace".equals(arg)) { + trace = true; + } else if ("-exit".equals(arg)) { + exit = true; + } else if ("-size".equals(arg)) { + size = Integer.parseInt(args[++i]); + } + } + + Properties prop = loadProperties(); + + ArrayList dbs = new ArrayList<>(); + for (int id = 0; id < 100; id++) { + if (dbId != -1 && id != dbId) { + continue; + } + String dbString = prop.getProperty("db" + id); + if (dbString != null) { + Database db = Database.parse(this, id, dbString, prop); + if (db != null) { + int runCount = 8; + String valueStr = prop.getProperty("runCount" + id); + if (valueStr != null) { + runCount = Integer.parseInt(valueStr); + } + dbs.add(new RunSequence(db, runCount)); + } + } + } + + BenchB test = new BenchB() { // Since we focus on scalability here, lets emphasize multi-threaded // part of the test (transactions) and minimize impact of the init. @Override @@ -102,110 +113,125 @@ protected int getTransactionsPerClient(int size) { } }; testAll(dbs, test, size); - collect = false; - ArrayList results = dbs.get(0).getResults(); - Connection conn = null; - PreparedStatement prep = null; - Statement stat = null; - PrintWriter writer = null; - try { + List results = dbs.get(0).results.get(0); + try (Connection conn = getResultConnection()) { openResults(); - conn = getResultConnection(); - stat = conn.createStatement(); - prep = conn.prepareStatement( + try (PreparedStatement prep = conn.prepareStatement( "INSERT INTO RESULTS(TESTID, " + - "TEST, UNIT, DBID, DB, TCNT, RESULT) VALUES(?, ?, ?, ?, ?, ?, ?)"); - for (int i = 0; i < results.size(); i++) { - Object[] res = results.get(i); - prep.setInt(1, i); - prep.setString(2, res[0].toString()); - prep.setString(3, res[1].toString()); - for (Database db : dbs) { - prep.setInt(4, db.getId()); - prep.setString(5, db.getName()); - prep.setInt(6, db.getThreadsCount()); - Object[] v = db.getResults().get(i); - prep.setString(7, v[2].toString()); - prep.execute(); + "TEST, UNIT, DBID, DB, TCNT, RESULT) VALUES(?, ?, ?, ?, ?, ?, ?)")) { + for (int i = 0; i < results.size(); i++) { + Measurement res = results.get(i); + prep.setInt(1, i); + prep.setString(2, res.name); + prep.setString(3, res.unit); + for (RunSequence runSequence : dbs) { + Database db = runSequence.database; + int threadCount = 1; + for (List result : runSequence.results) { + if (result.size() > i) { + Measurement measurement = result.get(i); + prep.setInt(4, db.getId()); + prep.setString(5, db.getName()); + prep.setInt(6, threadCount); + prep.setString(7, String.valueOf(measurement.value)); + prep.execute(); + threadCount <<= 1; + } + } + } } } - writer = new PrintWriter(new FileWriter(out)); - ResultSet rs = stat.executeQuery( - "CALL '" + - "' " + - "|| (SELECT GROUP_CONCAT('' " + - "ORDER BY TCNT SEPARATOR '') FROM " + - "(SELECT TCNT, COUNT(*) COLSPAN FROM (SELECT DISTINCT DB, TCNT FROM RESULTS) GROUP BY TCNT))" + - "|| '' || CHAR(10) " + - "|| '' || (SELECT GROUP_CONCAT('' ORDER BY TCNT, DB SEPARATOR '')" + - " FROM (SELECT DISTINCT DB, TCNT FROM RESULTS)) || '' || CHAR(10) " + - "|| (SELECT GROUP_CONCAT('' || ( " + - "SELECT GROUP_CONCAT('' ORDER BY TCNT,DB SEPARATOR '')" + - " FROM RESULTS R2 WHERE R2.TESTID = R1.TESTID) || '' " + - "ORDER BY TESTID SEPARATOR CHAR(10)) FROM " + - "(SELECT DISTINCT TESTID, TEST, UNIT FROM RESULTS) R1)" + - "|| '
          Test CaseUnit' || TCNT || '
          ' || DB || '
          ' || TEST || '' || UNIT || '' || RESULT || '
          '"); - rs.next(); - String result = rs.getString(1); - writer.println(result); - } finally { - JdbcUtils.closeSilently(prep); - JdbcUtils.closeSilently(stat); - JdbcUtils.closeSilently(conn); - IOUtils.closeSilently(writer); + try (Statement stat = conn.createStatement(); + PrintWriter writer = new PrintWriter(new FileWriter(out)); + ResultSet rs = stat.executeQuery( + "CALL '" + + "' " + + "|| (SELECT GROUP_CONCAT('' " + + "ORDER BY TCNT SEPARATOR '') FROM " + + "(SELECT TCNT, COUNT(*) COLSPAN FROM (SELECT DISTINCT DB, TCNT FROM RESULTS) GROUP BY TCNT))" + + "|| '' || CHAR(10) " + + "|| '' || (SELECT GROUP_CONCAT('' ORDER BY TCNT, DB SEPARATOR '')" + + " FROM (SELECT DISTINCT DB, TCNT FROM RESULTS)) || '' || CHAR(10) " + + "|| (SELECT GROUP_CONCAT('' || ( " + + "SELECT GROUP_CONCAT('' ORDER BY TCNT,DB SEPARATOR '')" + + " FROM RESULTS R2 WHERE R2.TESTID = R1.TESTID) || '' " + + "ORDER BY TESTID SEPARATOR CHAR(10)) FROM " + + "(SELECT DISTINCT TESTID, TEST, UNIT FROM RESULTS) R1)" + + "|| '
          Test CaseUnit' || TCNT || '
          ' || DB || '
          ' || TEST || '' || UNIT || '' || RESULT || '
          '")) { + rs.next(); + String result = rs.getString(1); + writer.println(result); + } } - } - private Database createDbEntry(int id, String namePrefix, - int threadCount, String url) { - Database db = Database.parse(this, id, namePrefix + - ", org.h2.Driver, " + url + ", sa, sa", threadCount); - return db; + if (exit) { + System.exit(0); + } } + private void testAll(ArrayList runSequences, BenchB test, int size) throws Exception { + Database lastDb = null; + Connection conn = null; + for (RunSequence runSequence : runSequences) { + Database db = runSequence.database; + try { + if (lastDb != null) { + conn.close(); + lastDb.stopServer(); + Thread.sleep(1000); + // calls garbage collection + TestBase.getMemoryUsed(); + } + String dbName = db.getName(); + System.out.println("------------------"); + System.out.println("Testing the performance of " + dbName); + db.startServer(); + // hold one connection open during the whole test to keep database up + conn = db.openNewConnection(); + test.init(db, size); + + for (int runNo = 0, threadCount = 1; runNo < runSequence.runCount; runNo++, threadCount <<= 1) { + System.out.println("Testing the performance of " + dbName + + " (" + threadCount + " threads)"); + + DatabaseMetaData meta = conn.getMetaData(); + System.out.println(" " + meta.getDatabaseProductName() + " " + + meta.getDatabaseProductVersion()); + test.setThreadCount(threadCount); - private void testAll(ArrayList dbs, BenchB test, int size) - throws Exception { - for (int i = 0; i < dbs.size(); i++) { - if (i > 0) { - Thread.sleep(1000); + test.runTest(); + test.runTest(); + db.reset(); + collect = true; + test.runTest(); + + int executedStatements = db.getExecutedStatements(); + int totalTime = db.getTotalTime(); + int totalGCTime = db.getTotalGCTime(); + db.log("Executed statements", "#", executedStatements); + db.log("Total time", "ms", totalTime); + int statPerSec = (int) (executedStatements * 1000L / totalTime); + db.log("Statements per second", "#/s", statPerSec); + collect = false; + System.out.println("Statements per second: " + statPerSec); + System.out.println("GC overhead: " + (100 * totalGCTime / totalTime) + "%"); + ArrayList measurements = db.reset(); + runSequence.results.add(measurements); + } + } catch (Throwable ex) { + ex.printStackTrace(); + } finally { + lastDb = db; } - // calls garbage collection - TestBase.getMemoryUsed(); - Database db = dbs.get(i); - System.out.println("Testing the performance of " + db.getName() - + " (" + db.getThreadsCount() + " threads)"); - db.startServer(); - Connection conn = db.openNewConnection(); - DatabaseMetaData meta = conn.getMetaData(); - System.out.println(" " + meta.getDatabaseProductName() + " " + - meta.getDatabaseProductVersion()); - runDatabase(db, test, 1); - runDatabase(db, test, 1); - collect = true; - runDatabase(db, test, size); + } + if (lastDb != null) { conn.close(); - db.log("Executed statements", "#", db.getExecutedStatements()); - db.log("Total time", "ms", db.getTotalTime()); - int statPerSec = (int) (db.getExecutedStatements() * - 1000L / db.getTotalTime()); - db.log("Statements per second", "#", statPerSec); - System.out.println("Statements per second: " + statPerSec); - System.out.println("GC overhead: " + (100 * db.getTotalGCTime() / db.getTotalTime()) + "%"); - collect = false; - db.stopServer(); + lastDb.stopServer(); } } - private static void runDatabase(Database db, BenchB bench, int size) - throws Exception { - bench.init(db, size); - bench.setThreadCount(db.getThreadsCount()); - bench.runTest(); - } - /** * Print a message to system out if trace is enabled. * @@ -222,4 +248,16 @@ public void trace(String s) { public boolean isCollect() { return collect; } + + private static final class RunSequence + { + final Database database; + final int runCount; + final List> results = new ArrayList<>(); + + public RunSequence(Database dataBase, int runCount) { + this.database = dataBase; + this.runCount = runCount; + } + } } diff --git a/h2/src/test/org/h2/test/bench/package.html b/h2/src/test/org/h2/test/bench/package.html index e26331d6a9..e33caee6cf 100644 --- a/h2/src/test/org/h2/test/bench/package.html +++ b/h2/src/test/org/h2/test/bench/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/bench/test.properties b/h2/src/test/org/h2/test/bench/test.properties index 1239af1a7e..f81e595fe3 100644 --- a/h2/src/test/org/h2/test/bench/test.properties +++ b/h2/src/test/org/h2/test/bench/test.properties @@ -1,30 +1,29 @@ db1 = H2, org.h2.Driver, jdbc:h2:./data/test, sa, sa -#xdb1 = H2, org.h2.Driver, jdbc:h2:./data/test;LOCK_TIMEOUT=10000;LOCK_MODE=3;DEFAULT_TABLE_ENGINE=org.h2.mvstore.db.MVTableEngine, sa, sa - -#xdb1 = H2, org.h2.Driver, jdbc:h2:./data/test;LOG=1;LOCK_TIMEOUT=10000;LOCK_MODE=3;ACCESS_MODE_DATA=rwd, sa, sa -#xdb2 = H2 (nio), org.h2.Driver, jdbc:h2:nio:data/test;LOCK_TIMEOUT=10000;LOCK_MODE=3, sa, sa -#xdb3 = H2 (nioMapped), org.h2.Driver, jdbc:h2:nioMapped:data/test;LOCK_TIMEOUT=10000;LOCK_MODE=3, sa, sa -#xdb2 = H2 (MVCC), org.h2.Driver, jdbc:h2:./data/test_mvcc;MVCC=TRUE, sa, sa -#xdb2 = H2 (XTEA), org.h2.Driver, jdbc:h2:./data/test_xtea;LOCK_TIMEOUT=10000;LOCK_MODE=3;CIPHER=XTEA, sa, sa 123 -#xdb3 = H2 (AES), org.h2.Driver, jdbc:h2:./data/test_aes;LOCK_TIMEOUT=10000;LOCK_MODE=3;CIPHER=AES, sa, sa 123 -#xdb4 = H2, org.h2.Driver, jdbc:h2:./data/test;LOCK_TIMEOUT=10000;LOCK_MODE=3;write_mode_log=rws;write_delay=0, sa, sa -#xdb5 = H2_PG, org.postgresql.Driver, jdbc:postgresql://localhost:5435/h2test, sa, sa - -db2 = HSQLDB, org.hsqldb.jdbcDriver, jdbc:hsqldb:data/test;hsqldb.default_table_type=cached;sql.enforce_size=true, sa -db3 = Derby, org.apache.derby.jdbc.EmbeddedDriver, jdbc:derby:data/derby;create=true, sa, sa - -db4 = H2 (Server), org.h2.Driver, jdbc:h2:tcp://localhost/./data/testServer, sa, sa -db5 = HSQLDB, org.hsqldb.jdbcDriver, jdbc:hsqldb:hsql://localhost/xdb, sa -db6 = Derby, org.apache.derby.jdbc.ClientDriver, jdbc:derby://localhost/data/derbyServer;create=true, sa, sa -db7 = PostgreSQL, org.postgresql.Driver, jdbc:postgresql:test, sa, sa -db8 = MySQL, com.mysql.jdbc.Driver, jdbc:mysql://localhost/test?jdbcCompliantTruncation=false, sa, sa - -#db2 = MSSQLServer, com.microsoft.jdbc.sqlserver.SQLServerDriver, jdbc:microsoft:sqlserver://127.0.0.1:1433;DatabaseName=test, test, test -#db2 = Oracle, oracle.jdbc.driver.OracleDriver, jdbc:oracle:thin:@localhost:1521:XE, client, client -#db2 = Firebird, org.firebirdsql.jdbc.FBDriver, jdbc:firebirdsql:localhost:c:/temp/firebird/test, sysdba, masterkey -#db2 = DB2, COM.ibm.db2.jdbc.net.DB2Driver, jdbc:db2://localhost/test, test, test -#db2 = OneDollarDB, in.co.daffodil.db.jdbc.DaffodilDBDriver, jdbc:daffodilDB_embedded:school;path=C:/temp;create=true, sa +#db1 = H2 (forced), org.h2.Driver, jdbc:h2:./data/test;LOG=1;LOCK_TIMEOUT=10000;LOCK_MODE=3;ACCESS_MODE_DATA=rwd, sa, sa +#db1 = H2 (nio), org.h2.Driver, jdbc:h2:nio:data/test;LOCK_TIMEOUT=10000;LOCK_MODE=3, sa, sa +#db1 = H2 (nioMapped), org.h2.Driver, jdbc:h2:nioMapped:data/test;LOCK_TIMEOUT=10000;LOCK_MODE=3, sa, sa +#db1 = H2 (XTEA), org.h2.Driver, jdbc:h2:./data/test_xtea;LOCK_TIMEOUT=10000;LOCK_MODE=3;CIPHER=XTEA, sa, sa 123 +#db1 = H2 (AES), org.h2.Driver, jdbc:h2:./data/test_aes;LOCK_TIMEOUT=10000;LOCK_MODE=3;CIPHER=AES, sa, sa 123 + +db2 = HSQLDB, org.hsqldb.jdbc.JDBCDriver, jdbc:hsqldb:file:./data/test;hsqldb.default_table_type=cached;hsqldb.write_delay_millis=1000;shutdown=true, sa +db3 = Derby, org.apache.derby.jdbc.AutoloadedDriver, jdbc:derby:data/derby;create=true, sa, sa + +db4 = H2 (C/S), org.h2.Driver, jdbc:h2:tcp://localhost/./data/testServer, sa, sa +db5 = HSQLDB (C/S), org.hsqldb.jdbcDriver, jdbc:hsqldb:hsql://localhost/xdb, sa +db6 = Derby (C/S), org.apache.derby.jdbc.ClientDriver, jdbc:derby://localhost/data/derbyServer;create=true, sa, sa +db7 = PG (C/S), org.postgresql.Driver, jdbc:postgresql://localhost:5432/test, sa, sa +db8 = MySQL (C/S), com.mysql.cj.jdbc.Driver, jdbc:mysql://localhost:3306/test, sa, sa + +#db9 = MSSQLServer, com.microsoft.jdbc.sqlserver.SQLServerDriver, jdbc:microsoft:sqlserver://127.0.0.1:1433;DatabaseName=test, test, test +#db9 = Oracle, oracle.jdbc.driver.OracleDriver, jdbc:oracle:thin:@localhost:1521:XE, client, client +#db9 = Firebird, org.firebirdsql.jdbc.FBDriver, jdbc:firebirdsql:localhost:test?encoding=UTF8, sa, sa +#db9 = DB2, COM.ibm.db2.jdbc.net.DB2Driver, jdbc:db2://localhost/test, test, test +#db9 = OneDollarDB, in.co.daffodil.db.jdbc.DaffodilDBDriver, jdbc:daffodilDB_embedded:school;path=C:/temp;create=true, sa +#db9 = SQLite, org.sqlite.JDBC, jdbc:sqlite:data/testSQLite.db, sa, sa + +db11 = H2 (mem), org.h2.Driver, jdbc:h2:mem:test;LOCK_MODE=0, sa, sa +db12 = HSQLDB (mem), org.hsqldb.jdbcDriver, jdbc:hsqldb:mem:data/test;hsqldb.tx=mvcc;shutdown=true, sa firebirdsql.datetime = TIMESTAMP postgresql.datetime = TIMESTAMP @@ -37,3 +36,10 @@ test3 = org.h2.test.bench.BenchB test4 = org.h2.test.bench.BenchC size = 5000 + +runCount3 = 4 +runCount5 = 4 +runCount6 = 4 +runCount7 = 7 +runCount8 = 4 +runCount12 = 5 \ No newline at end of file diff --git a/h2/src/test/org/h2/test/coverage/Coverage.java b/h2/src/test/org/h2/test/coverage/Coverage.java index 411fbe8f3b..380baea54a 100644 --- a/h2/src/test/org/h2/test/coverage/Coverage.java +++ b/h2/src/test/org/h2/test/coverage/Coverage.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.coverage; diff --git a/h2/src/test/org/h2/test/coverage/Profile.java b/h2/src/test/org/h2/test/coverage/Profile.java index 334dcfad57..06d57be0c5 100644 --- a/h2/src/test/org/h2/test/coverage/Profile.java +++ b/h2/src/test/org/h2/test/coverage/Profile.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.coverage; diff --git a/h2/src/test/org/h2/test/coverage/Tokenizer.java b/h2/src/test/org/h2/test/coverage/Tokenizer.java index 7f43b4fa90..611800f001 100644 --- a/h2/src/test/org/h2/test/coverage/Tokenizer.java +++ b/h2/src/test/org/h2/test/coverage/Tokenizer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.coverage; diff --git a/h2/src/test/org/h2/test/coverage/package.html b/h2/src/test/org/h2/test/coverage/package.html index 5232dc9156..72a52ae6ed 100644 --- a/h2/src/test/org/h2/test/coverage/package.html +++ b/h2/src/test/org/h2/test/coverage/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/db/AbstractBaseForCommonTableExpressions.java b/h2/src/test/org/h2/test/db/AbstractBaseForCommonTableExpressions.java index 8cd4165406..89a69297fe 100644 --- a/h2/src/test/org/h2/test/db/AbstractBaseForCommonTableExpressions.java +++ b/h2/src/test/org/h2/test/db/AbstractBaseForCommonTableExpressions.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; diff --git a/h2/src/test/org/h2/test/db/Db.java b/h2/src/test/org/h2/test/db/Db.java index 59cfedac2a..4c0542d68d 100644 --- a/h2/src/test/org/h2/test/db/Db.java +++ b/h2/src/test/org/h2/test/db/Db.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; diff --git a/h2/src/test/org/h2/test/db/TaskDef.java b/h2/src/test/org/h2/test/db/TaskDef.java index 22b219d853..46a2f15cf9 100644 --- a/h2/src/test/org/h2/test/db/TaskDef.java +++ b/h2/src/test/org/h2/test/db/TaskDef.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; diff --git a/h2/src/test/org/h2/test/db/TaskProcess.java b/h2/src/test/org/h2/test/db/TaskProcess.java index b01fab0b48..7fdd01d5c4 100644 --- a/h2/src/test/org/h2/test/db/TaskProcess.java +++ b/h2/src/test/org/h2/test/db/TaskProcess.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; diff --git a/h2/src/test/org/h2/test/db/TestAlter.java b/h2/src/test/org/h2/test/db/TestAlter.java index aff50520b3..1d27fdd419 100644 --- a/h2/src/test/org/h2/test/db/TestAlter.java +++ b/h2/src/test/org/h2/test/db/TestAlter.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -10,7 +10,13 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.util.Collection; + import org.h2.api.ErrorCode; +import org.h2.engine.Session; +import org.h2.engine.SessionLocal; +import org.h2.jdbc.JdbcConnection; +import org.h2.schema.Sequence; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -28,7 +34,7 @@ public class TestAlter extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -37,40 +43,22 @@ public void test() throws Exception { conn = getConnection(getTestName()); stat = conn.createStatement(); testAlterTableRenameConstraint(); - testAlterTableAlterColumnAsSelfColumn(); testAlterTableDropColumnWithReferences(); testAlterTableDropMultipleColumns(); - testAlterTableAlterColumnWithConstraint(); - testAlterTableAlterColumn(); testAlterTableAddColumnIdentity(); testAlterTableDropIdentityColumn(); testAlterTableAddColumnIfNotExists(); testAlterTableAddMultipleColumns(); - testAlterTableAlterColumn2(); testAlterTableAddColumnBefore(); testAlterTableAddColumnAfter(); testAlterTableAddMultipleColumnsBefore(); testAlterTableAddMultipleColumnsAfter(); - testAlterTableModifyColumn(); - testAlterTableModifyColumnSetNull(); - testAlterTableModifyColumnNotNullOracle(); conn.close(); deleteDb(getTestName()); } - private void testAlterTableAlterColumnAsSelfColumn() throws SQLException { - stat.execute("create table test(id int, name varchar)"); - stat.execute("alter table test alter column id int as id+1"); - stat.execute("insert into test values(1, 'Hello')"); - stat.execute("update test set name='World'"); - ResultSet rs = stat.executeQuery("select * from test"); - rs.next(); - assertEquals(3, rs.getInt(1)); - stat.execute("drop table test"); - } - private void testAlterTableDropColumnWithReferences() throws SQLException { - stat.execute("create table parent(id int, b int)"); + stat.execute("create table parent(id int primary key, b int)"); stat.execute("create table child(p int primary key)"); stat.execute("alter table child add foreign key(p) references parent(id)"); stat.execute("alter table parent drop column id"); @@ -133,27 +121,6 @@ private void testAlterTableDropMultipleColumns() throws SQLException { stat.execute("drop table test"); } - /** - * Tests a bug we used to have where altering the name of a column that had - * a check constraint that referenced itself would result in not being able - * to re-open the DB. - */ - private void testAlterTableAlterColumnWithConstraint() throws SQLException { - if (config.memory) { - return; - } - stat.execute("create table test(id int check(id in (1,2)) )"); - stat.execute("alter table test alter id rename to id2"); - // disconnect and reconnect - conn.close(); - conn = getConnection(getTestName()); - stat = conn.createStatement(); - stat.execute("insert into test values(1)"); - assertThrows(ErrorCode.CHECK_CONSTRAINT_VIOLATED_1, stat). - execute("insert into test values(3)"); - stat.execute("drop table test"); - } - private void testAlterTableRenameConstraint() throws SQLException { stat.execute("create table test(id int, name varchar(255))"); stat.execute("alter table test add constraint x check (id > name)"); @@ -162,33 +129,26 @@ private void testAlterTableRenameConstraint() throws SQLException { } private void testAlterTableDropIdentityColumn() throws SQLException { + Session iface = ((JdbcConnection) stat.getConnection()).getSession(); + if (!(iface instanceof SessionLocal)) { + return; + } + Collection allSequences = ((SessionLocal) iface).getDatabase().getMainSchema().getAllSequences(); stat.execute("create table test(id int auto_increment, name varchar)"); stat.execute("alter table test drop column id"); - ResultSet rs = stat.executeQuery("select * from INFORMATION_SCHEMA.SEQUENCES"); - assertFalse(rs.next()); + assertEquals(0, allSequences.size()); stat.execute("drop table test"); stat.execute("create table test(id int auto_increment, name varchar)"); stat.execute("alter table test drop column name"); - rs = stat.executeQuery("select * from INFORMATION_SCHEMA.SEQUENCES"); - assertTrue(rs.next()); + assertEquals(1, allSequences.size()); stat.execute("drop table test"); } - private void testAlterTableAlterColumn() throws SQLException { - stat.execute("create table t(x varchar) as select 'x'"); - assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, stat). - execute("alter table t alter column x int"); - stat.execute("drop table t"); - stat.execute("create table t(id identity, x varchar) as select null, 'x'"); - assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, stat). - execute("alter table t alter column x int"); - stat.execute("drop table t"); - } - private void testAlterTableAddColumnIdentity() throws SQLException { stat.execute("create table t(x varchar)"); - stat.execute("alter table t add id bigint identity(5, 5) not null"); + stat.execute("alter table t add id bigint generated by default as identity(start with 5 increment by 5)" + + " default on null"); stat.execute("insert into t values (null, null)"); stat.execute("insert into t values (null, null)"); ResultSet rs = stat.executeQuery("select id from t order by id"); @@ -281,54 +241,4 @@ private void testAlterTableAddColumnAfter() throws SQLException { stat.execute("drop table T"); } - private void testAlterTableAlterColumn2() throws SQLException { - // ensure that increasing a VARCHAR columns length takes effect because - // we optimize this case - stat.execute("create table t(x varchar(2)) as select 'x'"); - stat.execute("alter table t alter column x varchar(20)"); - stat.execute("insert into t values('Hello')"); - stat.execute("drop table t"); - } - - private void testAlterTableModifyColumn() throws SQLException { - stat.execute("create table t(x int)"); - stat.execute("alter table t modify column x varchar(20)"); - stat.execute("insert into t values('Hello')"); - stat.execute("drop table t"); - } - - /** - * Test for fix "Change not-null / null -constraint to existing column" - * (MySql/ORACLE - SQL style) that failed silently corrupting the changed - * column.
          - * Before the change (added after v1.4.196) following was observed: - *
          -     *  alter table T modify C int null; -- Worked as expected
          -     *  alter table T modify C null;     -- Silently corrupted column C
          -     * 
          - */ - private void testAlterTableModifyColumnSetNull() throws SQLException { - // This worked in v1.4.196 - stat.execute("create table T (C varchar not null)"); - stat.execute("alter table T modify C int null"); - stat.execute("insert into T values(null)"); - stat.execute("drop table T"); - // This failed in v1.4.196 - stat.execute("create table T (C int not null)"); - stat.execute("alter table T modify C null"); // Silently corrupted column C - stat.execute("insert into T values(null)"); // <- Fixed in v1.4.196 - NULL is allowed - stat.execute("drop table T"); - } - - private void testAlterTableModifyColumnNotNullOracle() throws SQLException { - stat.execute("create table foo (bar varchar(255))"); - stat.execute("alter table foo modify (bar varchar(255) not null)"); - try { - stat.execute("insert into foo values(null)"); - fail("Null should not be allowed after modification."); - } - catch(SQLException e) { - // This is what we expect, fails to insert null. - } - } } diff --git a/h2/src/test/org/h2/test/db/TestAlterSchemaRename.java b/h2/src/test/org/h2/test/db/TestAlterSchemaRename.java index 10389e482f..fa778daf0c 100644 --- a/h2/src/test/org/h2/test/db/TestAlterSchemaRename.java +++ b/h2/src/test/org/h2/test/db/TestAlterSchemaRename.java @@ -1,19 +1,19 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; -import org.h2.api.ErrorCode; -import org.h2.test.TestBase; -import org.h2.test.TestDb; - import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import org.h2.api.ErrorCode; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + /** * Test ALTER SCHEMA RENAME statements. */ @@ -28,7 +28,7 @@ public class TestAlterSchemaRename extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/db/TestAlterTableNotFound.java b/h2/src/test/org/h2/test/db/TestAlterTableNotFound.java new file mode 100644 index 0000000000..568f3c95bd --- /dev/null +++ b/h2/src/test/org/h2/test/db/TestAlterTableNotFound.java @@ -0,0 +1,174 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.db; + +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +public class TestAlterTableNotFound extends TestDb { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testWithoutAnyCandidate(); + testWithoutAnyCandidateWhenDatabaseToLower(); + testWithoutAnyCandidateWhenDatabaseToUpper(); + testWithoutAnyCandidateWhenCaseInsensitiveIdentifiers(); + testWithOneCandidate(); + testWithOneCandidateWhenDatabaseToLower(); + testWithOneCandidateWhenDatabaseToUpper(); + testWithOneCandidateWhenCaseInsensitiveIdentifiers(); + testWithTwoCandidates(); + } + + private void testWithoutAnyCandidate() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnectionWithSettings("DATABASE_TO_UPPER=FALSE"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T2 ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.execute("ALTER TABLE t1 DROP COLUMN ID"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found;"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithoutAnyCandidateWhenDatabaseToLower() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnectionWithSettings("DATABASE_TO_LOWER=TRUE"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T2 ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.execute("ALTER TABLE T1 DROP COLUMN ID"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found;"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithoutAnyCandidateWhenDatabaseToUpper() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnectionWithSettings("DATABASE_TO_LOWER=FALSE;DATABASE_TO_UPPER=TRUE"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T2 ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.execute("ALTER TABLE t1 DROP COLUMN ID"); + fail("Table `T1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"T1\" not found;"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithoutAnyCandidateWhenCaseInsensitiveIdentifiers() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnectionWithSettings("DATABASE_TO_UPPER=FALSE;CASE_INSENSITIVE_IDENTIFIERS=TRUE"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T2 ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.execute("ALTER TABLE t1 DROP COLUMN ID"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found;"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithOneCandidate() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnectionWithSettings("DATABASE_TO_UPPER=FALSE"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T1 ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.execute("ALTER TABLE t1 DROP COLUMN ID"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found (candidates are: \"T1\")"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithOneCandidateWhenDatabaseToLower() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnectionWithSettings("DATABASE_TO_LOWER=TRUE"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE t1 ( ID INT GENERATED BY DEFAULT AS IDENTITY, PAYLOAD INT )"); + stat.execute("ALTER TABLE T1 DROP COLUMN PAYLOAD"); + conn.close(); + deleteDb(getTestName()); + } + + private void testWithOneCandidateWhenDatabaseToUpper() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnectionWithSettings("DATABASE_TO_UPPER=TRUE"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T1 ( ID INT GENERATED BY DEFAULT AS IDENTITY, PAYLOAD INT )"); + stat.execute("ALTER TABLE t1 DROP COLUMN PAYLOAD"); + conn.close(); + deleteDb(getTestName()); + } + + private void testWithOneCandidateWhenCaseInsensitiveIdentifiers() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnectionWithSettings("DATABASE_TO_UPPER=FALSE;CASE_INSENSITIVE_IDENTIFIERS=TRUE"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T1 ( ID INT GENERATED BY DEFAULT AS IDENTITY, PAYLOAD INT )"); + stat.execute("ALTER TABLE t1 DROP COLUMN PAYLOAD"); + conn.close(); + deleteDb(getTestName()); + } + + private void testWithTwoCandidates() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnectionWithSettings("DATABASE_TO_UPPER=FALSE"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE Toast ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + stat.execute("CREATE TABLE TOAST ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.execute("ALTER TABLE toast DROP COLUMN ID"); + fail("Table `toast` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"toast\" not found (candidates are: \"TOAST, Toast\")"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private Connection getConnectionWithSettings(String settings) throws SQLException { + return getConnection(getTestName() + ";" + settings); + } +} diff --git a/h2/src/test/org/h2/test/db/TestAnalyzeTableTx.java b/h2/src/test/org/h2/test/db/TestAnalyzeTableTx.java index 4e5d31a335..ca65c1470b 100644 --- a/h2/src/test/org/h2/test/db/TestAnalyzeTableTx.java +++ b/h2/src/test/org/h2/test/db/TestAnalyzeTableTx.java @@ -1,16 +1,17 @@ /* - * Copyright 2004-2017 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; -import org.h2.test.TestBase; -import org.h2.test.TestDb; import java.sql.Connection; import java.sql.ResultSet; import java.sql.Statement; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + public class TestAnalyzeTableTx extends TestDb { private static final int C = 10_000; @@ -20,7 +21,7 @@ public class TestAnalyzeTableTx extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/db/TestAutoRecompile.java b/h2/src/test/org/h2/test/db/TestAutoRecompile.java index 918981f1fe..e7fb639154 100644 --- a/h2/src/test/org/h2/test/db/TestAutoRecompile.java +++ b/h2/src/test/org/h2/test/db/TestAutoRecompile.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -25,7 +25,7 @@ public class TestAutoRecompile extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/db/TestBackup.java b/h2/src/test/org/h2/test/db/TestBackup.java index 81345efb22..31801b20a6 100644 --- a/h2/src/test/org/h2/test/db/TestBackup.java +++ b/h2/src/test/org/h2/test/db/TestBackup.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -31,7 +31,7 @@ public class TestBackup extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -57,7 +57,7 @@ private void testConcurrentBackup() throws SQLException { return; } deleteDb("backup"); - String url = getURL("backup;MULTI_THREADED=TRUE", true); + String url = getURL("backup", true); Connection conn = getConnection(url); final Statement stat = conn.createStatement(); stat.execute("create table test(id int primary key, name varchar)"); @@ -115,27 +115,7 @@ public void call() throws Exception { public static class BackupListener implements DatabaseEventListener { @Override - public void closingDatabase() { - // ignore - } - - @Override - public void exceptionThrown(SQLException e, String sql) { - // ignore - } - - @Override - public void init(String url) { - // ignore - } - - @Override - public void opened() { - // ignore - } - - @Override - public void setProgress(int state, String name, int x, int max) { + public void setProgress(int state, String name, long x, long max) { try { Thread.sleep(1); } catch (InterruptedException e) { @@ -189,7 +169,7 @@ private void testBackup() throws SQLException { stat1.execute("create table testlob" + "(id int primary key, b blob, c clob)"); stat1.execute("insert into testlob values" + - "(1, space(10000), repeat('00', 10000))"); + "(1, repeat(char(0), 10000), space(10000))"); conn2 = getConnection("backup"); stat2 = conn2.createStatement(); stat2.execute("insert into test values(3, 'third')"); diff --git a/h2/src/test/org/h2/test/db/TestBigDb.java b/h2/src/test/org/h2/test/db/TestBigDb.java index 04e2b7ed71..a4e35d0b0a 100644 --- a/h2/src/test/org/h2/test/db/TestBigDb.java +++ b/h2/src/test/org/h2/test/db/TestBigDb.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -27,7 +27,7 @@ public class TestBigDb extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -87,8 +87,8 @@ private void testLargeTable() throws SQLException { + "STATUS_CODE CHAR(3) DEFAULT SECURE_RAND(1)," + "INTRA_STAT_CODE CHAR(12) DEFAULT SECURE_RAND(6)," + "PRD_TITLE CHAR(50) DEFAULT SECURE_RAND(25)," - + "VALID_FROM DATE DEFAULT NOW()," - + "MOD_DATUM DATE DEFAULT NOW())"); + + "VALID_FROM DATE DEFAULT CURRENT_DATE," + + "MOD_DATUM DATE DEFAULT CURRENT_DATE)"); int len = getSize(10, 50000); try { PreparedStatement prep = conn.prepareStatement( @@ -99,7 +99,7 @@ private void testLargeTable() throws SQLException { long t = System.nanoTime(); if (t - time > TimeUnit.SECONDS.toNanos(1)) { time = t; - int free = Utils.getMemoryFree(); + long free = Utils.getMemoryFree(); println("i: " + i + " free: " + free + " used: " + Utils.getMemoryUsed()); } } diff --git a/h2/src/test/org/h2/test/db/TestBigResult.java b/h2/src/test/org/h2/test/db/TestBigResult.java index 12a2cf3b16..bb2e3fbba2 100644 --- a/h2/src/test/org/h2/test/db/TestBigResult.java +++ b/h2/src/test/org/h2/test/db/TestBigResult.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -34,7 +34,7 @@ public class TestBigResult extends TestDb { * ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -84,7 +84,7 @@ private void testSortingAndDistinct() throws SQLException { Connection conn = getConnection("bigResult"); Statement stat = conn.createStatement(); int count = getSize(1000, 4000); - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, VALUE INT NOT NULL)"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, V INT NOT NULL)"); PreparedStatement ps = conn.prepareStatement("INSERT INTO TEST VALUES (?, ?)"); for (int i = 0; i < count; i++) { ps.setInt(1, i); @@ -149,7 +149,7 @@ private void testSortingAndDistinct() throws SQLException { // external result testSortingAndDistinct3(stat, sql, 1, partCount); stat.execute("DROP TABLE TEST"); - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, VALUE INT)"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, V INT)"); ps = conn.prepareStatement("INSERT INTO TEST VALUES (?, ?)"); for (int i = 0; i < count; i++) { ps.setInt(1, i); @@ -164,7 +164,7 @@ private void testSortingAndDistinct() throws SQLException { /* * Sorting and distinct */ - sql = "SELECT DISTINCT VALUE FROM TEST ORDER BY VALUE"; + sql = "SELECT DISTINCT V FROM TEST ORDER BY V"; // local result testSortingAndDistinct4(stat, sql, count, partCount); // external result @@ -172,7 +172,7 @@ private void testSortingAndDistinct() throws SQLException { /* * Distinct only */ - sql = "SELECT DISTINCT VALUE FROM TEST"; + sql = "SELECT DISTINCT V FROM TEST"; // local result testSortingAndDistinct4DistinctOnly(stat, sql, count, partCount); // external result @@ -180,7 +180,7 @@ private void testSortingAndDistinct() throws SQLException { /* * Sorting only */ - sql = "SELECT VALUE FROM TEST ORDER BY VALUE"; + sql = "SELECT V FROM TEST ORDER BY V"; // local result testSortingAndDistinct4SortingOnly(stat, sql, count, partCount); // external result @@ -190,7 +190,7 @@ private void testSortingAndDistinct() throws SQLException { private void testSortingAndDistinct1(Statement stat, int maxRows, int count) throws SQLException { stat.execute("SET MAX_MEMORY_ROWS " + maxRows); - ResultSet rs = stat.executeQuery("SELECT VALUE FROM (SELECT DISTINCT ID, VALUE FROM TEST ORDER BY VALUE)"); + ResultSet rs = stat.executeQuery("SELECT V FROM (SELECT DISTINCT ID, V FROM TEST ORDER BY V)"); for (int i = 1; i <= count; i++) { assertTrue(rs.next()); assertEquals(rs.getInt(1), i); @@ -313,7 +313,7 @@ private void testLOB() throws SQLException { Connection conn = getConnection("bigResult"); Statement stat = conn.createStatement(); stat.execute("SET MAX_MEMORY_ROWS " + 1); - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, VALUE BLOB NOT NULL)"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, V BLOB NOT NULL)"); PreparedStatement ps = conn.prepareStatement("INSERT INTO TEST VALUES (?, ?)"); int length = 1_000_000; byte[] data = new byte[length]; @@ -340,7 +340,7 @@ private void testLOB() throws SQLException { b.free(); } stat.execute("DROP TABLE TEST"); - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, VALUE CLOB NOT NULL)"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, V CLOB NOT NULL)"); ps = conn.prepareStatement("INSERT INTO TEST VALUES (?, ?)"); char[] cdata = new char[length]; for (int i = 1; i <= 10; i++) { diff --git a/h2/src/test/org/h2/test/db/TestCases.java b/h2/src/test/org/h2/test/db/TestCases.java index b3db5f0468..d9512030bc 100644 --- a/h2/src/test/org/h2/test/db/TestCases.java +++ b/h2/src/test/org/h2/test/db/TestCases.java @@ -1,10 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; +import java.io.ByteArrayInputStream; import java.io.File; import java.io.StringReader; import java.sql.Connection; @@ -20,7 +21,6 @@ import java.util.Random; import java.util.concurrent.TimeUnit; import org.h2.api.ErrorCode; -import org.h2.engine.SysProperties; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -36,7 +36,7 @@ public class TestCases extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -52,14 +52,11 @@ public void test() throws Exception { testLargeKeys(); testExtraSemicolonInDatabaseURL(); testGroupSubquery(); - testSelfReferentialColumn(); testCountDistinctNotNull(); testDependencies(); - testDropTable(); testConvertType(); testSortedSelect(); testMaxMemoryRows(); - testDeleteTop(); testLikeExpressions(); testUnicode(); testOuterJoin(); @@ -83,6 +80,8 @@ public void test() throws Exception { testExecuteTrace(); testExplain(); testExplainAnalyze(); + testDataChangeDeltaTable(); + testGroupSortedReset(); if (config.memory) { return; } @@ -102,7 +101,6 @@ public void test() throws Exception { testDefaultQueryReconnect(); testBigString(); testRenameReconnect(); - testAllSizes(); testCreateDrop(); testPolePos(); testQuick(); @@ -111,7 +109,6 @@ public void test() throws Exception { testDoubleRecovery(); testConstraintReconnect(); testCollation(); - testBinaryCollation(); deleteDb("cases"); } @@ -148,6 +145,7 @@ private void testReferenceableIndexUsage() throws SQLException { stat.execute("drop table if exists a, b"); stat.execute("create table a(id int, x int) as select 1, 100"); stat.execute("create index idx1 on a(id, x)"); + stat.execute("alter table a add unique(id)"); stat.execute("create table b(id int primary key, a_id int) as select 1, 1"); stat.execute("alter table b add constraint x " + "foreign key(a_id) references a(id)"); @@ -178,9 +176,9 @@ private void testViewParameters() throws SQLException { Connection conn = getConnection("cases"); Statement stat = conn.createStatement(); stat.execute( - "create view test as select 0 value, 'x' name from dual"); + "create view test as select 0 v, 'x' name from dual"); PreparedStatement prep = conn.prepareStatement( - "select 1 from test where name=? and value=? and value<=?"); + "select 1 from test where name=? and v=? and v<=?"); prep.setString(1, "x"); prep.setInt(2, 0); prep.setInt(3, 1); @@ -231,16 +229,6 @@ private void testGroupSubquery() throws SQLException { conn.close(); } - private void testSelfReferentialColumn() throws SQLException { - deleteDb("selfreferential"); - Connection conn = getConnection("selfreferential"); - Statement stat = conn.createStatement(); - stat.execute("create table sr(id integer, usecount integer as usecount + 1)"); - assertThrows(ErrorCode.NULL_NOT_ALLOWED, stat).execute("insert into sr(id) values (1)"); - assertThrows(ErrorCode.MUST_GROUP_BY_COLUMN_1, stat).execute("select max(id), usecount from sr"); - conn.close(); - } - private void testCountDistinctNotNull() throws SQLException { deleteDb("cases"); Connection conn = getConnection("cases"); @@ -285,71 +273,6 @@ private void testDependencies() throws SQLException { conn.close(); } - private void testDropTable() throws SQLException { - trace("testDropTable"); - final boolean[] booleans = new boolean[] { true, false }; - for (final boolean stdDropTableRestrict : booleans) { - for (final boolean restrict : booleans) { - testDropTableNoReference(stdDropTableRestrict, restrict); - testDropTableViewReference(stdDropTableRestrict, restrict); - testDropTableForeignKeyReference(stdDropTableRestrict, restrict); - } - } - } - - private Statement createTable(final boolean stdDropTableRestrict) throws SQLException { - deleteDb("cases"); - Connection conn = getConnection("cases;STANDARD_DROP_TABLE_RESTRICT=" + stdDropTableRestrict); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int)"); - return stat; - } - - private void dropTable(final boolean restrict, Statement stat, final boolean expectedDropSuccess) - throws SQLException { - assertThrows(expectedDropSuccess ? 0 : ErrorCode.CANNOT_DROP_2, stat) - .execute("drop table test " + (restrict ? "restrict" : "cascade")); - assertThrows(expectedDropSuccess ? ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1 : 0, stat) - .execute("select * from test"); - } - - private void testDropTableNoReference(final boolean stdDropTableRestrict, final boolean restrict) - throws SQLException { - Statement stat = createTable(stdDropTableRestrict); - // always succeed as there's no reference to the table - dropTable(restrict, stat, true); - stat.getConnection().close(); - } - - private void testDropTableViewReference(final boolean stdDropTableRestrict, final boolean restrict) - throws SQLException { - Statement stat = createTable(stdDropTableRestrict); - stat.execute("create view abc as select * from test"); - // drop allowed only if cascade - final boolean expectedDropSuccess = !restrict; - dropTable(restrict, stat, expectedDropSuccess); - // missing view if the drop succeeded - assertThrows(expectedDropSuccess ? ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1 : 0, stat).execute("select * from abc"); - stat.getConnection().close(); - } - - private void testDropTableForeignKeyReference(final boolean stdDropTableRestrict, final boolean restrict) - throws SQLException { - Statement stat = createTable(stdDropTableRestrict); - stat.execute("create table ref(id int, id_test int, foreign key (id_test) references test (id)) "); - // test table is empty, so the foreign key forces ref table to be also - // empty - assertThrows(ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, stat) - .execute("insert into ref values(1,2)"); - // drop allowed if cascade or old style - final boolean expectedDropSuccess = !stdDropTableRestrict || !restrict; - dropTable(restrict, stat, expectedDropSuccess); - // insertion succeeds if the foreign key was dropped - assertThrows(expectedDropSuccess ? 0 : ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, stat) - .execute("insert into ref values(1,2)"); - stat.getConnection().close(); - } - private void testConvertType() throws SQLException { deleteDb("cases"); Connection conn = getConnection("cases"); @@ -380,9 +303,9 @@ private void testMaxMemoryRows() throws SQLException { Statement stat = conn.createStatement(); stat.execute("create table test(id int primary key)"); stat.execute("insert into test values(1), (2)"); - stat.execute("select * from dual where x not in " + + stat.execute("select * from system_range(1, 1) where x not in " + "(select id from test order by id)"); - stat.execute("select * from dual where x not in " + + stat.execute("select * from system_range(1, 1) where x not in " + "(select id from test union select id from test)"); stat.execute("(select id from test order by id) " + "intersect (select id from test order by id)"); @@ -660,7 +583,7 @@ private void testConstraintAlterTable() throws SQLException { deleteDb("cases"); Connection conn = getConnection("cases"); Statement stat = conn.createStatement(); - stat.execute("create table parent (pid int)"); + stat.execute("create table parent (pid int primary key)"); stat.execute("create table child (cid int primary key, pid int)"); stat.execute("alter table child add foreign key (pid) references parent(pid)"); stat.execute("alter table child add column c2 int"); @@ -711,12 +634,12 @@ private void testLobDecrypt() throws SQLException { prep.setCharacterStream(2, new StringReader(value), -1); ResultSet rs = prep.executeQuery(); rs.next(); - String encrypted = rs.getString(1); + byte[] encrypted = rs.getBytes(1); PreparedStatement prep2 = conn.prepareStatement( "CALL TRIM(CHAR(0) FROM " + "UTF8TOSTRING(DECRYPT('AES', RAWTOHEX(?), ?)))"); prep2.setCharacterStream(1, new StringReader(key), -1); - prep2.setCharacterStream(2, new StringReader(encrypted), -1); + prep2.setBinaryStream(2, new ByteArrayInputStream(encrypted), -1); ResultSet rs2 = prep2.executeQuery(); rs2.first(); String decrypted = rs2.getString(1); @@ -741,12 +664,11 @@ private void testReservedKeywordReconnect() throws SQLException { conn.close(); } - private void testInvalidDatabaseName() throws SQLException { + private void testInvalidDatabaseName() { if (config.memory) { return; } - assertThrows(ErrorCode.INVALID_DATABASE_NAME_1, this). - getConnection("cases/"); + assertThrows(ErrorCode.INVALID_DATABASE_NAME_1, () -> getConnection("cases/")); } private void testReuseSpace() throws SQLException { @@ -899,28 +821,25 @@ private void testDisconnect() throws Exception { } deleteDb("cases"); Connection conn = getConnection("cases"); - final Statement stat = conn.createStatement(); + Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST(ID IDENTITY)"); for (int i = 0; i < 1000; i++) { stat.execute("INSERT INTO TEST() VALUES()"); } - final SQLException[] stopped = { null }; - Thread t = new Thread(new Runnable() { - @Override - public void run() { - try { - long time = System.nanoTime(); - ResultSet rs = stat.executeQuery("SELECT MAX(T.ID) " + - "FROM TEST T, TEST, TEST, TEST, TEST, " + - "TEST, TEST, TEST, TEST, TEST, TEST"); - rs.next(); - time = System.nanoTime() - time; - TestBase.logError("query was too quick; result: " + - rs.getInt(1) + " time:" + TimeUnit.NANOSECONDS.toMillis(time), null); - } catch (SQLException e) { - stopped[0] = e; - // ok - } + SQLException[] stopped = { null }; + Thread t = new Thread(() -> { + try { + long time = System.nanoTime(); + ResultSet rs = stat.executeQuery("SELECT MAX(T.ID) " + + "FROM TEST T, TEST, TEST, TEST, TEST, " + + "TEST, TEST, TEST, TEST, TEST, TEST"); + rs.next(); + time = System.nanoTime() - time; + TestBase.logError("query was too quick; result: " + + rs.getInt(1) + " time:" + TimeUnit.NANOSECONDS.toMillis(time), null); + } catch (SQLException e) { + stopped[0] = e; + // ok } }); t.start(); @@ -997,30 +916,30 @@ private void testExplain() throws SQLException { checkExplain(stat, "EXPLAIN SELECT * FROM PERSON WHERE id = ?", "SELECT\n" + - " \"PERSON\".\"ID\",\n" + - " \"PERSON\".\"ORGID\",\n" + - " \"PERSON\".\"NAME\",\n" + - " \"PERSON\".\"SALARY\"\n" + + " \"PUBLIC\".\"PERSON\".\"ID\",\n" + + " \"PUBLIC\".\"PERSON\".\"ORGID\",\n" + + " \"PUBLIC\".\"PERSON\".\"NAME\",\n" + + " \"PUBLIC\".\"PERSON\".\"SALARY\"\n" + "FROM \"PUBLIC\".\"PERSON\"\n" + " /* PUBLIC.PRIMARY_KEY_8: ID = ?1 */\n" + "WHERE \"ID\" = ?1"); checkExplain(stat, "EXPLAIN SELECT * FROM PERSON WHERE id = 50", "SELECT\n" + - " \"PERSON\".\"ID\",\n" + - " \"PERSON\".\"ORGID\",\n" + - " \"PERSON\".\"NAME\",\n" + - " \"PERSON\".\"SALARY\"\n" + + " \"PUBLIC\".\"PERSON\".\"ID\",\n" + + " \"PUBLIC\".\"PERSON\".\"ORGID\",\n" + + " \"PUBLIC\".\"PERSON\".\"NAME\",\n" + + " \"PUBLIC\".\"PERSON\".\"SALARY\"\n" + "FROM \"PUBLIC\".\"PERSON\"\n" + " /* PUBLIC.PRIMARY_KEY_8: ID = 50 */\n" + "WHERE \"ID\" = 50"); checkExplain(stat, "EXPLAIN SELECT * FROM PERSON WHERE salary > ? and salary < ?", "SELECT\n" + - " \"PERSON\".\"ID\",\n" + - " \"PERSON\".\"ORGID\",\n" + - " \"PERSON\".\"NAME\",\n" + - " \"PERSON\".\"SALARY\"\n" + + " \"PUBLIC\".\"PERSON\".\"ID\",\n" + + " \"PUBLIC\".\"PERSON\".\"ORGID\",\n" + + " \"PUBLIC\".\"PERSON\".\"NAME\",\n" + + " \"PUBLIC\".\"PERSON\".\"SALARY\"\n" + "FROM \"PUBLIC\".\"PERSON\"\n" + " /* PUBLIC.PERSON.tableScan */\n" + "WHERE (\"SALARY\" > ?1)\n" + @@ -1028,10 +947,10 @@ private void testExplain() throws SQLException { checkExplain(stat, "EXPLAIN SELECT * FROM PERSON WHERE salary > 1000 and salary < 2000", "SELECT\n" + - " \"PERSON\".\"ID\",\n" + - " \"PERSON\".\"ORGID\",\n" + - " \"PERSON\".\"NAME\",\n" + - " \"PERSON\".\"SALARY\"\n" + + " \"PUBLIC\".\"PERSON\".\"ID\",\n" + + " \"PUBLIC\".\"PERSON\".\"ORGID\",\n" + + " \"PUBLIC\".\"PERSON\".\"NAME\",\n" + + " \"PUBLIC\".\"PERSON\".\"SALARY\"\n" + "FROM \"PUBLIC\".\"PERSON\"\n" + " /* PUBLIC.PERSON.tableScan */\n" + "WHERE (\"SALARY\" > 1000)\n" + @@ -1039,20 +958,20 @@ private void testExplain() throws SQLException { checkExplain(stat, "EXPLAIN SELECT * FROM PERSON WHERE name = lower(?)", "SELECT\n" + - " \"PERSON\".\"ID\",\n" + - " \"PERSON\".\"ORGID\",\n" + - " \"PERSON\".\"NAME\",\n" + - " \"PERSON\".\"SALARY\"\n" + + " \"PUBLIC\".\"PERSON\".\"ID\",\n" + + " \"PUBLIC\".\"PERSON\".\"ORGID\",\n" + + " \"PUBLIC\".\"PERSON\".\"NAME\",\n" + + " \"PUBLIC\".\"PERSON\".\"SALARY\"\n" + "FROM \"PUBLIC\".\"PERSON\"\n" + " /* PUBLIC.PERSON.tableScan */\n" + "WHERE \"NAME\" = LOWER(?1)"); checkExplain(stat, "EXPLAIN SELECT * FROM PERSON WHERE name = lower('Smith')", "SELECT\n" + - " \"PERSON\".\"ID\",\n" + - " \"PERSON\".\"ORGID\",\n" + - " \"PERSON\".\"NAME\",\n" + - " \"PERSON\".\"SALARY\"\n" + + " \"PUBLIC\".\"PERSON\".\"ID\",\n" + + " \"PUBLIC\".\"PERSON\".\"ORGID\",\n" + + " \"PUBLIC\".\"PERSON\".\"NAME\",\n" + + " \"PUBLIC\".\"PERSON\".\"SALARY\"\n" + "FROM \"PUBLIC\".\"PERSON\"\n" + " /* PUBLIC.PERSON.tableScan */\n" + "WHERE \"NAME\" = 'smith'"); @@ -1074,8 +993,8 @@ private void testExplain() throws SQLException { " /* PUBLIC.PRIMARY_KEY_8: ID = O.ID */\n" + " ON 1=1\n" + "WHERE (\"P\".\"ID\" = \"O\".\"ID\")\n" + - " AND ((\"O\".\"ID\" = ?1)\n" + - " AND (\"P\".\"SALARY\" > ?2))"); + " AND (\"O\".\"ID\" = ?1)\n" + + " AND (\"P\".\"SALARY\" > ?2)"); checkExplain(stat, "EXPLAIN SELECT * FROM PERSON p " + "INNER JOIN ORGANIZATION o ON p.id = o.id WHERE o.id = 10 AND p.salary > 1000", @@ -1094,8 +1013,8 @@ private void testExplain() throws SQLException { " /* PUBLIC.PRIMARY_KEY_8: ID = O.ID */\n" + " ON 1=1\n" + "WHERE (\"P\".\"ID\" = \"O\".\"ID\")\n" + - " AND ((\"O\".\"ID\" = 10)\n" + - " AND (\"P\".\"SALARY\" > 1000))"); + " AND (\"O\".\"ID\" = 10)\n" + + " AND (\"P\".\"SALARY\" > 1000)"); PreparedStatement pStat = conn.prepareStatement( "/* bla-bla */ EXPLAIN SELECT ID FROM ORGANIZATION WHERE id = ?"); @@ -1174,8 +1093,6 @@ private void testExplainAnalyze() throws SQLException { " \"O\".\"NAME\"\n" + "FROM \"PUBLIC\".\"PERSON\" \"P\"\n" + " /* PUBLIC.PRIMARY_KEY_8: ID = ?1 */\n" + - " /* WHERE P.ID = ?1\n" + - " */\n" + " /* scanCount: 2 */\n" + "INNER JOIN \"PUBLIC\".\"ORGANIZATION\" \"O\"\n" + " /* PUBLIC.PRIMARY_KEY_D: ID = ?1\n" + @@ -1183,9 +1100,8 @@ private void testExplainAnalyze() throws SQLException { " */\n" + " ON 1=1\n" + " /* scanCount: 2 */\n" + - "WHERE ((\"O\".\"ID\" = ?1)\n" + - " AND (\"O\".\"ID\" = \"P\".\"ID\"))\n" + - " AND (\"P\".\"ID\" = ?1)", + "WHERE (\"O\".\"ID\" = ?1)\n" + + " AND (\"O\".\"ID\" = \"P\".\"ID\")", rs.getString(1)); conn.close(); @@ -1221,7 +1137,7 @@ private void testAlterTableReconnect() throws SQLException { stat.execute("drop table test"); stat.execute("create table test(id identity)"); stat.execute("insert into test values(1)"); - assertThrows(ErrorCode.INVALID_DATETIME_CONSTANT_2, stat). + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, stat). execute("alter table test alter column id date"); conn.close(); conn = getConnection("cases"); @@ -1260,46 +1176,6 @@ private void testCollation() throws SQLException { conn.close(); } - private void testBinaryCollation() throws SQLException { - deleteDb("cases"); - Connection conn = getConnection("cases"); - Statement stat = conn.createStatement(); - ResultSet rs; - - // test the SIGNED mode - stat.execute("SET BINARY_COLLATION SIGNED"); - stat.execute("create table bin( x binary(1) );"); - stat.execute("insert into bin(x) values (x'09'),(x'0a'),(x'99'),(x'aa');"); - rs = stat.executeQuery("select * from bin order by x;"); - rs.next(); - assertEquals("99", rs.getString(1)); - rs.next(); - assertEquals("aa", rs.getString(1)); - rs.next(); - assertEquals("09", rs.getString(1)); - rs.next(); - assertEquals("0a", rs.getString(1)); - stat.execute("drop table bin"); - // test UNSIGNED mode (default) - stat.execute("SET BINARY_COLLATION UNSIGNED"); - stat.execute("create table bin( x binary(1) );"); - stat.execute("insert into bin(x) values (x'09'),(x'0a'),(x'99'),(x'aa');"); - rs = stat.executeQuery("select * from bin order by x;"); - rs.next(); - assertEquals("09", rs.getString(1)); - rs.next(); - assertEquals("0a", rs.getString(1)); - rs.next(); - assertEquals("99", rs.getString(1)); - rs.next(); - assertEquals("aa", rs.getString(1)); - stat.execute("drop table bin"); - stat.execute("SET BINARY_COLLATION " - + (SysProperties.SORT_BINARY_UNSIGNED ? "UNSIGNED" : "SIGNED")); - - conn.close(); - } - private void testPersistentSettings() throws SQLException { deleteDb("cases"); Connection conn = getConnection("cases"); @@ -1366,7 +1242,7 @@ private void testViewReconnect() throws SQLException { conn.close(); conn = getConnection("cases"); stat = conn.createStatement(); - assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, stat). + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, stat). execute("select * from abc"); conn.close(); } @@ -1454,7 +1330,7 @@ private void testConstraintReconnect() throws SQLException { Statement stat = conn.createStatement(); stat.execute("drop table if exists parent"); stat.execute("drop table if exists child"); - stat.execute("create table parent(id int)"); + stat.execute("create table parent(id int primary key)"); stat.execute("create table child(c_id int, p_id int, " + "foreign key(p_id) references parent(id))"); stat.execute("insert into parent values(1), (2)"); @@ -1507,7 +1383,7 @@ private void testRenameReconnect() throws SQLException { deleteDb("cases"); Connection conn = getConnection("cases"); conn.createStatement().execute("CREATE TABLE TEST_SEQ" + - "(ID INT IDENTITY, NAME VARCHAR(255))"); + "(ID INT GENERATED BY DEFAULT AS IDENTITY, NAME VARCHAR(255))"); conn.createStatement().execute("CREATE TABLE TEST" + "(ID INT PRIMARY KEY)"); conn.createStatement().execute("ALTER TABLE TEST RENAME TO TEST2"); @@ -1515,8 +1391,8 @@ private void testRenameReconnect() throws SQLException { "(ID INT PRIMARY KEY, NAME VARCHAR, UNIQUE(NAME))"); conn.close(); conn = getConnection("cases"); - conn.createStatement().execute("INSERT INTO TEST_SEQ(NAME) VALUES('Hi')"); - ResultSet rs = conn.createStatement().executeQuery("CALL IDENTITY()"); + ResultSet rs = conn.createStatement().executeQuery( + "SELECT ID FROM FINAL TABLE(INSERT INTO TEST_SEQ(NAME) VALUES('Hi'))"); rs.next(); assertEquals(1, rs.getInt(1)); conn.createStatement().execute("SELECT * FROM TEST2"); @@ -1525,46 +1401,13 @@ private void testRenameReconnect() throws SQLException { conn.close(); conn = getConnection("cases"); conn.createStatement().execute("SELECT * FROM TEST_B2"); - conn.createStatement().execute( - "INSERT INTO TEST_SEQ(NAME) VALUES('World')"); - rs = conn.createStatement().executeQuery("CALL IDENTITY()"); + rs = conn.createStatement().executeQuery( + "SELECT ID FROM FINAL TABLE(INSERT INTO TEST_SEQ(NAME) VALUES('World'))"); rs.next(); assertEquals(2, rs.getInt(1)); conn.close(); } - private void testAllSizes() throws SQLException { - trace("testAllSizes"); - deleteDb("cases"); - Connection conn = getConnection("cases"); - Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE TEST(A INT, B INT, C INT, DATA VARCHAR)"); - int increment = getSize(100, 1); - for (int i = 1; i < 500; i += increment) { - StringBuilder buff = new StringBuilder(); - buff.append("CREATE TABLE TEST"); - for (int j = 0; j < i; j++) { - buff.append('a'); - } - buff.append("(ID INT)"); - String sql = buff.toString(); - stat.execute(sql); - stat.execute("INSERT INTO TEST VALUES(" + i + ", 0, 0, '" + sql + "')"); - } - conn.close(); - conn = getConnection("cases"); - stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("SELECT * FROM TEST"); - while (rs.next()) { - int id = rs.getInt(1); - String s = rs.getString("DATA"); - if (!s.endsWith(")")) { - fail("id=" + id); - } - } - conn.close(); - } - private void testSelectForUpdate() throws SQLException { trace("testSelectForUpdate"); deleteDb("cases"); @@ -1871,59 +1714,52 @@ private void testMinMaxDirectLookupIndex() throws SQLException { conn.close(); } - private void testDeleteTop() throws SQLException { - deleteDb("cases"); + /** Tests fix for bug #682: Queries with 'like' expressions may filter rows incorrectly */ + private void testLikeExpressions() throws SQLException { Connection conn = getConnection("cases"); Statement stat = conn.createStatement(); - - stat.execute("CREATE TABLE TEST(id int) AS " + - "SELECT x FROM system_range(1, 100)"); - stat.execute("DELETE TOP 10 FROM TEST"); - ResultSet rs = stat.executeQuery("SELECT COUNT(*) FROM TEST"); - assertTrue(rs.next()); - assertEquals(90, rs.getInt(1)); - - stat.execute("DELETE FROM TEST LIMIT ((SELECT COUNT(*) FROM TEST) / 10)"); - rs = stat.executeQuery("SELECT COUNT(*) FROM TEST"); + ResultSet rs = stat.executeQuery("select * from (select 'fo%' a union all select '%oo') where 'foo' like a"); assertTrue(rs.next()); - assertEquals(81, rs.getInt(1)); - - rs = stat.executeQuery("EXPLAIN DELETE " + - "FROM TEST LIMIT ((SELECT COUNT(*) FROM TEST) / 10)"); - rs.next(); - assertEquals("DELETE FROM \"PUBLIC\".\"TEST\"\n" + - " /* PUBLIC.TEST.tableScan */\n" + - "LIMIT ((SELECT\n" + - " COUNT(*)\n" + - "FROM \"PUBLIC\".\"TEST\"\n" + - " /* PUBLIC.TEST.tableScan */\n" + - "/* direct lookup */) / 10)", - rs.getString(1)); - - PreparedStatement prep; - prep = conn.prepareStatement("SELECT * FROM TEST LIMIT ?"); - prep.setInt(1, 10); - prep.execute(); - - prep = conn.prepareStatement("DELETE FROM TEST LIMIT ?"); - prep.setInt(1, 10); - prep.execute(); - rs = stat.executeQuery("SELECT COUNT(*) FROM TEST"); + assertEquals("fo%", rs.getString(1)); assertTrue(rs.next()); - assertEquals(71, rs.getInt(1)); - + assertEquals("%oo", rs.getString(1)); conn.close(); } - /** Tests fix for bug #682: Queries with 'like' expressions may filter rows incorrectly */ - private void testLikeExpressions() throws SQLException { + private void testDataChangeDeltaTable() throws SQLException { + /* + * This test case didn't reproduce the issue in the TestScript. + * + * The same UPDATE is necessary before and after usage of a data change + * delta table. + */ + String updateCommand = "UPDATE TEST SET V = 3 WHERE ID = 1"; + deleteDb("cases"); Connection conn = getConnection("cases"); Statement stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("select * from (select 'fo%' a union all select '%oo') where 'foo' like a"); + stat.execute("CREATE TABLE TEST(ID INT, V INT)"); + assertEquals(0, stat.executeUpdate(updateCommand)); + ResultSet rs = stat.executeQuery("SELECT V FROM FINAL TABLE (INSERT INTO TEST VALUES (1, 1))"); assertTrue(rs.next()); - assertEquals("fo%", rs.getString(1)); + assertEquals(1, rs.getInt(1)); + assertEquals(1, stat.executeUpdate(updateCommand)); + rs = stat.executeQuery("SELECT V FROM TEST"); assertTrue(rs.next()); - assertEquals("%oo", rs.getString(1)); + assertEquals(3, rs.getInt(1)); conn.close(); } + + private void testGroupSortedReset() throws SQLException { + // This test case didn't reproduce the issue in the TestScript. + deleteDb("cases"); + Connection conn = getConnection("cases"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T1(A INT PRIMARY KEY, B INT) AS VALUES (1, 4), (2, 5), (3, 6)"); + String sql = "SELECT B FROM T1 LEFT JOIN (VALUES 2) T2(A) USING(A) WHERE T2.A = 2 GROUP BY T1.A"; + stat.execute(sql); + stat.execute("UPDATE T1 SET B = 7 WHERE A = 3"); + stat.execute(sql); + conn.close(); + } + } diff --git a/h2/src/test/org/h2/test/db/TestCheckpoint.java b/h2/src/test/org/h2/test/db/TestCheckpoint.java index 28e6ccfe08..6cfc1e793f 100644 --- a/h2/src/test/org/h2/test/db/TestCheckpoint.java +++ b/h2/src/test/org/h2/test/db/TestCheckpoint.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -23,7 +23,7 @@ public class TestCheckpoint extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/db/TestCluster.java b/h2/src/test/org/h2/test/db/TestCluster.java index 147e99755b..6884892ff8 100644 --- a/h2/src/test/org/h2/test/db/TestCluster.java +++ b/h2/src/test/org/h2/test/db/TestCluster.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -32,7 +32,7 @@ public class TestCluster extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -428,10 +428,10 @@ private void testStartStopCluster() throws SQLException { // try to connect in standalone mode - should fail // should not be able to connect in standalone mode - assertThrows(ErrorCode.CLUSTER_ERROR_DATABASE_RUNS_CLUSTERED_1, this). - getConnection("jdbc:h2:tcp://localhost:"+port1+"/test", user, password); - assertThrows(ErrorCode.CLUSTER_ERROR_DATABASE_RUNS_CLUSTERED_1, this). - getConnection("jdbc:h2:tcp://localhost:"+port2+"/test", user, password); + assertThrows(ErrorCode.CLUSTER_ERROR_DATABASE_RUNS_CLUSTERED_1, + () -> getConnection("jdbc:h2:tcp://localhost:" + port1 + "/test", user, password)); + assertThrows(ErrorCode.CLUSTER_ERROR_DATABASE_RUNS_CLUSTERED_1, + () -> getConnection("jdbc:h2:tcp://localhost:" + port2 + "/test", user, password)); // test a cluster connection conn = getConnection("jdbc:h2:tcp://" + serverList + "/test", user, password); @@ -510,7 +510,7 @@ private void check(Connection conn, int len, String expectedCluster) assertFalse(rs.next()); } ResultSet rs = conn.createStatement().executeQuery( - "SELECT VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME='CLUSTER'"); + "SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'CLUSTER'"); String cluster = rs.next() ? rs.getString(1) : "''"; assertEquals(expectedCluster, cluster); } diff --git a/h2/src/test/org/h2/test/db/TestCompatibility.java b/h2/src/test/org/h2/test/db/TestCompatibility.java index f843b94524..b64cb97547 100644 --- a/h2/src/test/org/h2/test/db/TestCompatibility.java +++ b/h2/src/test/org/h2/test/db/TestCompatibility.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -32,7 +32,7 @@ public class TestCompatibility extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -40,7 +40,6 @@ public void test() throws SQLException { deleteDb("compatibility"); testCaseSensitiveIdentifiers(); - testKeyAsColumnInMySQLMode(); conn = getConnection("compatibility"); testDomain(); @@ -53,25 +52,19 @@ public void test() throws SQLException { testDB2(); testDerby(); testSybaseAndMSSQLServer(); - testIgnite(); testUnknownSet(); conn.close(); testIdentifiers(); + testIdentifiersCaseInResultSet(); + testDatabaseToLowerParser(); + testOldInformationSchema(); deleteDb("compatibility"); testUnknownURL(); } - private void testKeyAsColumnInMySQLMode() throws SQLException { - Connection c = getConnection("compatibility;MODE=MYSQL"); - Statement stat = c.createStatement(); - stat.execute("create table test(id int primary key, key varchar)"); - stat.execute("drop table test"); - c.close(); - } - private void testCaseSensitiveIdentifiers() throws SQLException { Connection c = getConnection("compatibility;DATABASE_TO_UPPER=FALSE;CASE_INSENSITIVE_IDENTIFIERS=TRUE"); Statement stat = c.createStatement(); @@ -149,22 +142,24 @@ private void testColumnAlias() throws SQLException { String[] modes = { "PostgreSQL", "MySQL", "HSQLDB", "MSSQLServer", "Derby", "Oracle", "Regular" }; String columnAlias; - columnAlias = "MySQL,Regular"; + columnAlias = "HSQLDB,MySQL,Regular"; stat.execute("CREATE TABLE TEST(ID INT)"); for (String mode : modes) { stat.execute("SET MODE " + mode); ResultSet rs = stat.executeQuery("SELECT ID I FROM TEST"); ResultSetMetaData meta = rs.getMetaData(); + assertEquals(mode + " mode", "I", meta.getColumnLabel(1)); String columnName = meta.getColumnName(1); String tableName = meta.getTableName(1); - if ("ID".equals(columnName) && "TEST".equals(tableName)) { - assertTrue(mode + " mode should not support columnAlias", - columnAlias.contains(mode)); - } else if ("I".equals(columnName) && tableName.equals("")) { - assertTrue(mode + " mode should support columnAlias", - columnAlias.indexOf(mode) < 0); + String schemaName = meta.getSchemaName(1); + if (columnAlias.contains(mode)) { + assertEquals(mode + " mode", "ID", columnName); + assertEquals(mode + " mode", "TEST", tableName); + assertEquals(mode + " mode", "PUBLIC", schemaName); } else { - fail(); + assertEquals(mode + " mode", "I", columnName); + assertEquals(mode + " mode", "", tableName); + assertEquals(mode + " mode", "", schemaName); } } stat.execute("DROP TABLE TEST"); @@ -174,7 +169,7 @@ private void testUniqueIndexSingleNull() throws SQLException { Statement stat = conn.createStatement(); String[] modes = { "PostgreSQL", "MySQL", "HSQLDB", "MSSQLServer", "Derby", "Oracle", "Regular" }; - String multiNull = "PostgreSQL,MySQL,Oracle,Regular"; + String multiNull = "PostgreSQL,MySQL,HSQLDB,Oracle,Regular"; for (String mode : modes) { stat.execute("SET MODE " + mode); stat.execute("CREATE TABLE TEST(ID INT)"); @@ -221,18 +216,6 @@ private void testHsqlDb() throws SQLException { stat.execute("CALL TODAY"); stat.execute("DROP TABLE TEST IF EXISTS"); - stat.execute("CREATE TABLE TEST(ID INT)"); - stat.execute("INSERT INTO TEST VALUES(1)"); - PreparedStatement prep = conn.prepareStatement( - "SELECT LIMIT ? 1 ID FROM TEST"); - prep.setInt(1, 2); - prep.executeQuery(); - stat.execute("DROP TABLE TEST IF EXISTS"); - - stat.execute("DROP TABLE TEST IF EXISTS"); - stat.execute("CREATE TABLE TEST(ID INT)"); - stat.executeQuery("SELECT * FROM TEST WHERE ID IN ()"); - stat.execute("DROP TABLE TEST IF EXISTS"); } private void testLog(double expected, Statement stat) throws SQLException { @@ -290,12 +273,7 @@ private void testPostgreSQL() throws SQLException { String[] DISALLOWED_TYPES = {"NUMBER", "IDENTITY", "TINYINT", "BLOB"}; for (String type : DISALLOWED_TYPES) { stat.execute("DROP TABLE IF EXISTS TEST"); - try { - stat.execute("CREATE TABLE TEST(COL " + type + ")"); - fail("Expect type " + type + " to not exist in PostgreSQL mode"); - } catch (SQLException e) { - /* Expected! */ - } + assertThrows(ErrorCode.UNKNOWN_DATA_TYPE_1, stat).execute("CREATE TABLE TEST(COL " + type + ")"); } /* Test MONEY data type */ @@ -312,6 +290,18 @@ private void testPostgreSQL() throws SQLException { assertTrue(rs.next()); assertEquals(new BigDecimal("92233720368547758.07"), rs.getBigDecimal(1)); assertFalse(rs.next()); + + /* Test SET STATEMENT_TIMEOUT */ + assertEquals(0, stat.getQueryTimeout()); + conn.close(); + deleteDb("compatibility"); + // `stat.getQueryTimeout()` caches the result, so create another connection + conn = getConnection("compatibility;MODE=PostgreSQL"); + stat = conn.createStatement(); + // `STATEMENT_TIMEOUT` uses milliseconds + stat.execute("SET STATEMENT_TIMEOUT TO 30000"); + // `stat.getQueryTimeout()` returns seconds + assertEquals(30, stat.getQueryTimeout()); } private void testMySQL() throws SQLException { @@ -411,13 +401,29 @@ private void testMySQL() throws SQLException { stat.execute("CREATE TABLE TEST_4" + "(ID INT PRIMARY KEY) charset=UTF8"); stat.execute("CREATE TABLE TEST_5" + - "(ID INT PRIMARY KEY) ENGINE=InnoDb auto_increment=3 default charset=UTF8"); + "(ID INT AUTO_INCREMENT PRIMARY KEY) ENGINE=InnoDb auto_increment=3 default charset=UTF8"); stat.execute("CREATE TABLE TEST_6" + - "(ID INT PRIMARY KEY) ENGINE=InnoDb auto_increment=3 charset=UTF8"); + "(ID INT AUTO_INCREMENT PRIMARY KEY) ENGINE=MyISAM default character set UTF8MB4, auto_increment 3"); stat.execute("CREATE TABLE TEST_7" + - "(ID INT, KEY TEST_7_IDX(ID) USING BTREE)"); + "(ID INT AUTO_INCREMENT PRIMARY KEY) ENGINE=InnoDb auto_increment=3 charset=UTF8 comment 'text'"); stat.execute("CREATE TABLE TEST_8" + - "(ID INT, UNIQUE KEY TEST_8_IDX(ID) USING BTREE)"); + "(ID INT AUTO_INCREMENT PRIMARY KEY) ENGINE=InnoDb auto_increment=3 character set=UTF8"); + stat.execute("CREATE TABLE TEST_9" + + "(ID INT, KEY TEST_7_IDX(ID) USING BTREE)"); + stat.execute("CREATE TABLE TEST_10" + + "(ID INT, UNIQUE KEY TEST_10_IDX(ID) USING BTREE)"); + stat.execute("CREATE TABLE TEST_11(ID INT) COLLATE UTF8"); + stat.execute("CREATE TABLE TEST_12(ID INT) DEFAULT COLLATE UTF8"); + stat.execute("CREATE TABLE TEST_13(a VARCHAR(10) COLLATE UTF8MB4)"); + stat.execute("CREATE TABLE TEST_14(a VARCHAR(10) NULL CHARACTER SET UTF8MB4 COLLATE UTF8MB4_BIN)"); + stat.execute("ALTER TABLE TEST_14 CONVERT TO CHARACTER SET UTF8MB4 COLLATE UTF8MB4_UNICODE_CI"); + stat.execute("ALTER TABLE TEST_14 MODIFY a VARCHAR(10) NOT NULL CHARACTER SET UTF8MB4 COLLATE UTF8"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat).execute("CREATE TABLE TEST_99" + + "(ID INT PRIMARY KEY) CHARSET UTF8,"); + assertThrows(ErrorCode.COLUMN_NOT_FOUND_1, stat).execute("CREATE TABLE TEST_99" + + "(ID INT PRIMARY KEY) AUTO_INCREMENT 100"); + assertThrows(ErrorCode.COLUMN_NOT_FOUND_1, stat).execute("CREATE TABLE TEST_99" + + "(ID INT) AUTO_INCREMENT 100"); // this maps to SET REFERENTIAL_INTEGRITY TRUE/FALSE stat.execute("SET foreign_key_checks = 0"); @@ -663,31 +669,9 @@ private void testDerby() throws SQLException { conn = getConnection("compatibility"); } - private void testIgnite() throws SQLException { - Statement stat = conn.createStatement(); - stat.execute("SET MODE Ignite"); - stat.execute("DROP TABLE IF EXISTS TEST"); - stat.execute("create table test(id int affinity key)"); - stat.execute("DROP TABLE IF EXISTS TEST"); - stat.execute("create table test(id int affinity primary key)"); - stat.execute("DROP TABLE IF EXISTS TEST"); - stat.execute("create table test(id int, v1 varchar, v2 long affinity key, primary key(v1, id))"); - stat.execute("DROP TABLE IF EXISTS TEST"); - stat.execute("create table test(id int, v1 varchar, v2 long, primary key(v1, id), affinity key (id))"); - - stat.execute("DROP TABLE IF EXISTS TEST"); - stat.execute("create table test(id int shard key)"); - stat.execute("DROP TABLE IF EXISTS TEST"); - stat.execute("create table test(id int shard primary key)"); - stat.execute("DROP TABLE IF EXISTS TEST"); - stat.execute("create table test(id int, v1 varchar, v2 long shard key, primary key(v1, id))"); - stat.execute("DROP TABLE IF EXISTS TEST"); - stat.execute("create table test(id int, v1 varchar, v2 long, primary key(v1, id), shard key (id))"); - } - private void testUnknownSet() throws SQLException { Statement stat = conn.createStatement(); - assertThrows(ErrorCode.UNKNOWN_MODE_1, stat).execute("SET MODE Unknown"); + assertThrows(ErrorCode.UNKNOWN_MODE_1, stat).execute("SET MODE UnknownMode"); } private void testIdentifiers() throws SQLException { @@ -700,8 +684,8 @@ private void testIdentifiers() throws SQLException { testIdentifiers(false, true, true); } - private void testIdentifiers(boolean upper, boolean lower, boolean caseInsensitiveIdentifiers) throws SQLException - { + private void testIdentifiers(boolean upper, boolean lower, boolean caseInsensitiveIdentifiers) // + throws SQLException { try (Connection conn = getConnection("compatibility;DATABASE_TO_UPPER=" + upper + ";DATABASE_TO_LOWER=" + lower + ";CASE_INSENSITIVE_IDENTIFIERS=" + caseInsensitiveIdentifiers)) { Statement stat = conn.createStatement(); @@ -751,19 +735,54 @@ private void testIdentifiers(Statement stat, String table, String column, boolea assertEquals(2, rs.getInt(2)); } } else { - assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, stat).executeQuery(query); + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_WITH_CANDIDATES_2, stat).executeQuery(query); } } - private void testUnknownURL() throws SQLException { - try { + private void testUnknownURL() { + assertThrows(ErrorCode.UNKNOWN_MODE_1, () -> { getConnection("compatibility;MODE=Unknown").close(); deleteDb("compatibility"); - } catch (SQLException ex) { - assertEquals(ErrorCode.UNKNOWN_MODE_1, ex.getErrorCode()); - return; + }); + } + + private void testIdentifiersCaseInResultSet() throws SQLException { + try (Connection conn = getConnection( + "compatibility;DATABASE_TO_UPPER=FALSE;CASE_INSENSITIVE_IDENTIFIERS=TRUE")) { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(A INT)"); + ResultSet rs = stat.executeQuery("SELECT a from test"); + ResultSetMetaData md = rs.getMetaData(); + assertEquals("A", md.getColumnName(1)); + rs = stat.executeQuery("SELECT a FROM (SELECT 1) t(A)"); + md = rs.getMetaData(); + assertEquals("A", md.getColumnName(1)); + } finally { + deleteDb("compatibility"); + } + } + + private void testDatabaseToLowerParser() throws SQLException { + try (Connection conn = getConnection("compatibility;DATABASE_TO_LOWER=TRUE")) { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("SELECT 0x1234567890AbCdEf"); + rs.next(); + assertEquals(0x1234567890ABCDEFL, rs.getLong(1)); + } finally { + deleteDb("compatibility"); + } + } + + private void testOldInformationSchema() throws SQLException { + try (Connection conn = getConnection( + "compatibility;OLD_INFORMATION_SCHEMA=TRUE")) { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("TABLE INFORMATION_SCHEMA.TABLE_TYPES"); + rs.next(); + assertEquals("TABLE", rs.getString(1)); + } finally { + deleteDb("compatibility"); } - fail(); } } diff --git a/h2/src/test/org/h2/test/db/TestCompatibilityOracle.java b/h2/src/test/org/h2/test/db/TestCompatibilityOracle.java index 7d99314c58..82ca638de7 100644 --- a/h2/src/test/org/h2/test/db/TestCompatibilityOracle.java +++ b/h2/src/test/org/h2/test/db/TestCompatibilityOracle.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -15,6 +15,7 @@ import java.text.SimpleDateFormat; import java.util.Arrays; import java.util.Locale; + import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.tools.SimpleResultSet; @@ -31,7 +32,7 @@ public class TestCompatibilityOracle extends TestDb { */ public static void main(String... s) throws Exception { TestBase test = TestBase.createCaller().init(); - test.test(); + test.testFromMain(); } @Override @@ -41,9 +42,11 @@ public void test() throws Exception { testDecimalScale(); testPoundSymbolInColumnName(); testToDate(); - testForbidEmptyInClause(); testSpecialTypes(); testDate(); + testSequenceNextval(); + testVarchar(); + deleteDb("oracle"); } private void testNotNullSyntax() throws SQLException { @@ -158,7 +161,7 @@ private void testTreatEmptyStringsAsNull() throws SQLException { stat, "SELECT * FROM D"); stat.execute("CREATE TABLE E (ID NUMBER, X RAW(1))"); - stat.execute("INSERT INTO E VALUES (1, '0A')"); + stat.execute("INSERT INTO E VALUES (1, HEXTORAW('0A'))"); stat.execute("INSERT INTO E VALUES (2, '')"); assertResult("2", stat, "SELECT COUNT(*) FROM E"); assertResult("1", stat, "SELECT COUNT(*) FROM E WHERE X IS NULL"); @@ -215,7 +218,7 @@ private void testPoundSymbolInColumnName() throws SQLException { } private void testToDate() throws SQLException { - if (Locale.getDefault() != Locale.ENGLISH) { + if (config.ci || Locale.getDefault() != Locale.ENGLISH) { return; } deleteDb("oracle"); @@ -236,22 +239,6 @@ private void testToDate() throws SQLException { conn.close(); } - private void testForbidEmptyInClause() throws SQLException { - deleteDb("oracle"); - Connection conn = getConnection("oracle;MODE=Oracle"); - Statement stat = conn.createStatement(); - - stat.execute("CREATE TABLE A (ID NUMBER, X VARCHAR2(1))"); - try { - stat.executeQuery("SELECT * FROM A WHERE ID IN ()"); - fail(); - } catch (SQLException e) { - // expected - } finally { - conn.close(); - } - } - private void testDate() throws SQLException { deleteDb("oracle"); Connection conn = getConnection("oracle;MODE=Oracle"); @@ -284,6 +271,86 @@ private void testDate() throws SQLException { conn.close(); } + private void testSequenceNextval() throws SQLException { + // Test NEXTVAL without Oracle MODE should return BIGINT + checkSequenceTypeWithMode("REGULAR", Types.BIGINT, false); + // Test NEXTVAL with Oracle MODE should return DECIMAL + checkSequenceTypeWithMode("Oracle", Types.NUMERIC, true); + } + + private void checkSequenceTypeWithMode(String mode, int expectedType, boolean usePseudoColumn) + throws SQLException { + deleteDb("oracle"); + Connection conn = getConnection("oracle;MODE=" + mode); + Statement stat = conn.createStatement(); + + stat.execute("CREATE SEQUENCE seq"); + ResultSet rs = stat.executeQuery( + usePseudoColumn ? "SELECT seq.NEXTVAL FROM DUAL" : "VALUES NEXT VALUE FOR seq"); + // Check type: + assertEquals(rs.getMetaData().getColumnType(1), expectedType); + conn.close(); + } + + private void testVarchar() throws SQLException { + deleteDb("oracle"); + Connection conn = getConnection("oracle;MODE=Oracle"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, V VARCHAR) AS VALUES (1, 'a')"); + PreparedStatement prep = conn.prepareStatement("UPDATE TEST SET V = ? WHERE ID = ?"); + prep.setInt(2, 1); + prep.setString(1, ""); + prep.executeUpdate(); + ResultSet rs = stat.executeQuery("SELECT V FROM TEST"); + assertTrue(rs.next()); + assertNull(rs.getString(1)); + assertFalse(rs.next()); + prep.setNString(1, ""); + prep.executeUpdate(); + Statement stat2 = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE); + rs = stat2.executeQuery("SELECT ID, V FROM TEST"); + assertTrue(rs.next()); + assertNull(rs.getString(2)); + rs.updateString(2, ""); + rs.updateRow(); + assertFalse(rs.next()); + rs = stat2.executeQuery("SELECT ID, V FROM TEST"); + assertTrue(rs.next()); + assertNull(rs.getString(2)); + rs.updateString("V", ""); + rs.updateRow(); + assertFalse(rs.next()); + rs = stat2.executeQuery("SELECT ID, V FROM TEST"); + assertTrue(rs.next()); + assertNull(rs.getString(2)); + rs.updateNString(2, ""); + rs.updateRow(); + assertFalse(rs.next()); + rs = stat2.executeQuery("SELECT ID, V FROM TEST"); + assertTrue(rs.next()); + assertNull(rs.getString(2)); + rs.updateNString("V", ""); + rs.updateRow(); + assertFalse(rs.next()); + rs = stat2.executeQuery("SELECT ID, V FROM TEST"); + assertTrue(rs.next()); + assertNull(rs.getString(2)); + rs.updateObject(2, ""); + rs.updateRow(); + assertFalse(rs.next()); + rs = stat2.executeQuery("SELECT ID, V FROM TEST"); + assertTrue(rs.next()); + assertNull(rs.getString(2)); + rs.updateObject("V", ""); + rs.updateRow(); + assertFalse(rs.next()); + rs = stat.executeQuery("SELECT V FROM TEST"); + assertTrue(rs.next()); + assertNull(rs.getString(1)); + assertFalse(rs.next()); + conn.close(); + } + private void assertResultDate(String expected, Statement stat, String sql) throws SQLException { SimpleDateFormat iso8601 = new SimpleDateFormat( diff --git a/h2/src/test/org/h2/test/db/TestCompatibilitySQLServer.java b/h2/src/test/org/h2/test/db/TestCompatibilitySQLServer.java index c516fa6921..5d1fa2486c 100644 --- a/h2/src/test/org/h2/test/db/TestCompatibilitySQLServer.java +++ b/h2/src/test/org/h2/test/db/TestCompatibilitySQLServer.java @@ -1,18 +1,18 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; -import org.h2.test.TestBase; -import org.h2.test.TestDb; - import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + /** * Test MSSQLServer compatibility mode. */ @@ -25,7 +25,7 @@ public class TestCompatibilitySQLServer extends TestDb { */ public static void main(String... s) throws Exception { TestBase test = TestBase.createCaller().init(); - test.test(); + test.testFromMain(); } @Override @@ -35,7 +35,7 @@ public void test() throws Exception { final Connection conn = getConnection("sqlserver;MODE=MSSQLServer"); try { testDiscardTableHints(conn); - testUseIdentityAsAutoIncrementAlias(conn); + testPrimaryKeyIdentity(conn); } finally { conn.close(); deleteDb("sqlserver"); @@ -67,9 +67,10 @@ private void testDiscardTableHints(Connection conn) throws SQLException { "join child ch with(nolock, index(id, name)) on ch.parent_id = p.id"); } - private void testUseIdentityAsAutoIncrementAlias(Connection conn) throws SQLException { + private void testPrimaryKeyIdentity(Connection conn) throws SQLException { final Statement stat = conn.createStatement(); + // IDENTITY after PRIMARY KEY is an undocumented syntax of MS SQL stat.execute("create table test(id int primary key identity, expected_id int)"); stat.execute("insert into test (expected_id) VALUES (1), (2), (3)"); diff --git a/h2/src/test/org/h2/test/db/TestCsv.java b/h2/src/test/org/h2/test/db/TestCsv.java index 07972ff047..3dc6b1977a 100644 --- a/h2/src/test/org/h2/test/db/TestCsv.java +++ b/h2/src/test/org/h2/test/db/TestCsv.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -25,7 +25,6 @@ import java.util.concurrent.TimeUnit; import org.h2.api.ErrorCode; -import org.h2.engine.SysProperties; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -49,7 +48,7 @@ public class TestCsv extends TestDb { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); test.config.traceTest = true; - test.test(); + test.testFromMain(); } @Override @@ -78,12 +77,12 @@ private void testWriteColumnHeader() throws Exception { Connection conn = getConnection("csv"); Statement stat = conn.createStatement(); stat.execute("call csvwrite('" + getBaseDir() + - "/test.tsv', 'select x from dual', 'writeColumnHeader=false')"); + "/test.tsv', 'select x from system_range(1, 1)', 'writeColumnHeader=false')"); String x = IOUtils.readStringAndClose(IOUtils.getReader( FileUtils.newInputStream(getBaseDir() + "/test.tsv")), -1); assertEquals("\"1\"", x.trim()); stat.execute("call csvwrite('" + getBaseDir() + - "/test.tsv', 'select x from dual', 'writeColumnHeader=true')"); + "/test.tsv', 'select x from system_range(1, 1)', 'writeColumnHeader=true')"); x = IOUtils.readStringAndClose(IOUtils.getReader( FileUtils.newInputStream(getBaseDir() + "/test.tsv")), -1); x = x.trim(); @@ -107,9 +106,7 @@ private void testWriteResultSetDataType() throws Exception { csv.setLineSeparator(";"); csv.write(writer, rs); conn.close(); - // getTimestamp().getString() needs to be used (not for H2, but for - // Oracle) - assertEquals("TS,N;0101-01-01 12:00:00.0,;", writer.toString()); + assertEquals("TS,N;-0100-01-01 12:00:00,;", writer.toString()); } private void testCaseSensitiveColumnNames() throws Exception { @@ -184,7 +181,7 @@ private void testChangeData() throws Exception { private void testOptions() { Csv csv = new Csv(); assertEquals(",", csv.getFieldSeparatorWrite()); - assertEquals(SysProperties.LINE_SEPARATOR, csv.getLineSeparator()); + assertEquals(System.lineSeparator(), csv.getLineSeparator()); assertEquals("", csv.getNullString()); assertEquals('\"', csv.getEscapeCharacter()); assertEquals('"', csv.getFieldDelimiter()); @@ -233,9 +230,7 @@ private void testOptions() { assertEquals("\0", csv.getNullString()); assertEquals("", charset); - createClassProxy(Csv.class); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, csv). - setOptions("escape=a error=b"); + assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, () -> csv.setOptions("escape=a error=b")); assertEquals('a', csv.getEscapeCharacter()); } @@ -492,7 +487,7 @@ private void testAsTable() throws SQLException { assertTrue(rs.next()); assertEquals("Hello", rs.getString(1)); assertFalse(rs.next()); - rs = stat.executeQuery("call csvread('" + getBaseDir() + "/test.csv')"); + rs = stat.executeQuery("select * from csvread('" + getBaseDir() + "/test.csv')"); assertTrue(rs.next()); assertEquals(1, rs.getInt(1)); assertEquals("Hello", rs.getString(2)); @@ -573,7 +568,7 @@ private void testWriteRead() throws SQLException { } trace("read: " + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)); rs = new Csv().read(getBaseDir() + "/testRW.csv", null, "UTF8"); - // stat.execute("CREATE ALIAS CSVREAD FOR \"org.h2.tools.Csv.read\""); + // stat.execute("CREATE ALIAS CSVREAD FOR 'org.h2.tools.Csv.read'"); ResultSetMetaData meta = rs.getMetaData(); assertEquals(2, meta.getColumnCount()); for (int i = 0; i < len; i++) { diff --git a/h2/src/test/org/h2/test/db/TestDateStorage.java b/h2/src/test/org/h2/test/db/TestDateStorage.java index e9752acc30..98a7f05b77 100644 --- a/h2/src/test/org/h2/test/db/TestDateStorage.java +++ b/h2/src/test/org/h2/test/db/TestDateStorage.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -21,7 +21,6 @@ import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.test.unit.TestDate; -import org.h2.util.DateTimeUtils; import org.h2.value.ValueTimestamp; /** @@ -35,7 +34,7 @@ public class TestDateStorage extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -53,15 +52,17 @@ private void testDateTimeTimestampWithCalendar() throws SQLException { stat.execute("create table t(x time primary key)"); stat.execute("create table d(x date)"); Calendar utcCalendar = new GregorianCalendar(new SimpleTimeZone(0, "Z")); + stat.execute("SET TIME ZONE 'PST'"); TimeZone old = TimeZone.getDefault(); - DateTimeUtils.resetCalendar(); TimeZone.setDefault(TimeZone.getTimeZone("PST")); try { + // 2010-03-14T02:15:00Z Timestamp ts1 = Timestamp.valueOf("2010-03-13 18:15:00"); Time t1 = new Time(ts1.getTime()); Date d1 = new Date(ts1.getTime()); // when converted to UTC, this is 03:15, which doesn't actually // exist because of summer time change at that day + // 2010-03-14T03:15:00Z Timestamp ts2 = Timestamp.valueOf("2010-03-13 19:15:00"); Time t2 = new Time(ts2.getTime()); Date d2 = new Date(ts2.getTime()); @@ -140,8 +141,8 @@ private void testDateTimeTimestampWithCalendar() throws SQLException { assertEquals("2010-03-13", rs.getDate("x", utcCalendar).toString()); assertEquals("2010-03-14", rs.getDate("x").toString()); } finally { + stat.execute("SET TIME ZONE LOCAL"); TimeZone.setDefault(old); - DateTimeUtils.resetCalendar(); } stat.execute("drop table ts"); stat.execute("drop table t"); @@ -162,12 +163,13 @@ private static void testCurrentTimeZone() { } private static void test(int year, int month, int day, int hour) { - ValueTimestamp.parse(year + "-" + month + "-" + day + " " + hour + ":00:00"); + ValueTimestamp.parse(year + "-" + month + "-" + day + " " + hour + ":00:00", null); } private void testAllTimeZones() throws SQLException { Connection conn = getConnection(getTestName()); TimeZone defaultTimeZone = TimeZone.getDefault(); + PreparedStatement prepTimeZone = conn.prepareStatement("SET TIME ZONE ?"); PreparedStatement prep = conn.prepareStatement("CALL CAST(? AS DATE)"); try { ArrayList distinct = TestDate.getDistinctTimeZones(); @@ -182,15 +184,15 @@ private void testAllTimeZones() throws SQLException { } } // println(tz.getID()); + prepTimeZone.setString(1, tz.getID()); + prepTimeZone.executeUpdate(); TimeZone.setDefault(tz); - DateTimeUtils.resetCalendar(); for (int d = 101; d < 129; d++) { test(prep, d); } } } finally { TimeZone.setDefault(defaultTimeZone); - DateTimeUtils.resetCalendar(); } conn.close(); deleteDb(getTestName()); diff --git a/h2/src/test/org/h2/test/db/TestDeadlock.java b/h2/src/test/org/h2/test/db/TestDeadlock.java index b09df729e0..03d5b5ceaa 100644 --- a/h2/src/test/org/h2/test/db/TestDeadlock.java +++ b/h2/src/test/org/h2/test/db/TestDeadlock.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -11,8 +11,6 @@ import java.sql.SQLException; import java.sql.Statement; import java.util.concurrent.TimeUnit; - -import org.h2.api.ErrorCode; import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.util.Task; @@ -44,7 +42,7 @@ public class TestDeadlock extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -53,11 +51,7 @@ public void test() throws Exception { testTemporaryTablesAndMetaDataLocking(); testDeadlockInFulltextSearch(); testConcurrentLobReadAndTempResultTableDelete(); - testDiningPhilosophers(); - testLockUpgrade(); - testThreePhilosophers(); testNoDeadlock(); - testThreeSome(); deleteDb("deadlock"); } @@ -235,171 +229,6 @@ public void execute() throws SQLException { } - private void testThreePhilosophers() throws Exception { - if (config.mvStore) { - return; - } - initTest(); - c1.createStatement().execute("CREATE TABLE TEST_A(ID INT PRIMARY KEY)"); - c1.createStatement().execute("CREATE TABLE TEST_B(ID INT PRIMARY KEY)"); - c1.createStatement().execute("CREATE TABLE TEST_C(ID INT PRIMARY KEY)"); - c1.commit(); - c1.createStatement().execute("INSERT INTO TEST_A VALUES(1)"); - c2.createStatement().execute("INSERT INTO TEST_B VALUES(1)"); - c3.createStatement().execute("INSERT INTO TEST_C VALUES(1)"); - DoIt t2 = new DoIt() { - @Override - public void execute() throws SQLException { - c1.createStatement().execute("DELETE FROM TEST_B"); - c1.commit(); - } - }; - t2.start(); - DoIt t3 = new DoIt() { - @Override - public void execute() throws SQLException { - c2.createStatement().execute("DELETE FROM TEST_C"); - c2.commit(); - } - }; - t3.start(); - try { - c3.createStatement().execute("DELETE FROM TEST_A"); - c3.commit(); - } catch (SQLException e) { - catchDeadlock(e); - } - t2.join(); - t3.join(); - checkDeadlock(); - c1.commit(); - c2.commit(); - c3.commit(); - c1.createStatement().execute("DROP TABLE TEST_A, TEST_B, TEST_C"); - end(); - } - - // test case for issue # 61 - // http://code.google.com/p/h2database/issues/detail?id=61) - private void testThreeSome() throws Exception { - if (config.mvStore) { - return; - } - initTest(); - c1.createStatement().execute("CREATE TABLE TEST_A(ID INT PRIMARY KEY)"); - c1.createStatement().execute("CREATE TABLE TEST_B(ID INT PRIMARY KEY)"); - c1.createStatement().execute("CREATE TABLE TEST_C(ID INT PRIMARY KEY)"); - c1.commit(); - c1.createStatement().execute("INSERT INTO TEST_A VALUES(1)"); - c1.createStatement().execute("INSERT INTO TEST_B VALUES(1)"); - c2.createStatement().execute("INSERT INTO TEST_C VALUES(1)"); - DoIt t2 = new DoIt() { - @Override - public void execute() throws SQLException { - c3.createStatement().execute("INSERT INTO TEST_B VALUES(2)"); - c3.commit(); - } - }; - t2.start(); - DoIt t3 = new DoIt() { - @Override - public void execute() throws SQLException { - c2.createStatement().execute("INSERT INTO TEST_A VALUES(2)"); - c2.commit(); - } - }; - t3.start(); - try { - c1.createStatement().execute("INSERT INTO TEST_C VALUES(2)"); - c1.commit(); - } catch (SQLException e) { - catchDeadlock(e); - c1.rollback(); - } - t2.join(); - t3.join(); - checkDeadlock(); - c1.commit(); - c2.commit(); - c3.commit(); - c1.createStatement().execute("DROP TABLE TEST_A, TEST_B, TEST_C"); - end(); - } - - private void testLockUpgrade() throws Exception { - if (config.mvStore) { - return; - } - initTest(); - c1.createStatement().execute("CREATE TABLE TEST(ID INT PRIMARY KEY)"); - c1.createStatement().execute("INSERT INTO TEST VALUES(1)"); - c1.commit(); - c1.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); - c2.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); - c1.createStatement().executeQuery("SELECT * FROM TEST"); - c2.createStatement().executeQuery("SELECT * FROM TEST"); - Thread t1 = new DoIt() { - @Override - public void execute() throws SQLException { - c1.createStatement().execute("DELETE FROM TEST"); - c1.commit(); - } - }; - t1.start(); - try { - c2.createStatement().execute("DELETE FROM TEST"); - c2.commit(); - } catch (SQLException e) { - catchDeadlock(e); - } - t1.join(); - checkDeadlock(); - c1.commit(); - c2.commit(); - c1.createStatement().execute("DROP TABLE TEST"); - end(); - } - - private void testDiningPhilosophers() throws Exception { - if (config.mvStore) { - return; - } - initTest(); - c1.createStatement().execute("CREATE TABLE T1(ID INT)"); - c1.createStatement().execute("CREATE TABLE T2(ID INT)"); - c1.createStatement().execute("INSERT INTO T1 VALUES(1)"); - c2.createStatement().execute("INSERT INTO T2 VALUES(1)"); - DoIt t1 = new DoIt() { - @Override - public void execute() throws SQLException { - c1.createStatement().execute("INSERT INTO T2 VALUES(2)"); - c1.commit(); - } - }; - t1.start(); - try { - c2.createStatement().execute("INSERT INTO T1 VALUES(2)"); - } catch (SQLException e) { - catchDeadlock(e); - } - t1.join(); - checkDeadlock(); - c1.commit(); - c2.commit(); - c1.createStatement().execute("DROP TABLE T1, T2"); - end(); - } - - private void checkDeadlock() throws SQLException { - assertNotNull(lastException); - assertKnownException(lastException); - assertEquals(ErrorCode.DEADLOCK_1, lastException.getErrorCode()); - SQLException e2 = lastException.getNextException(); - if (e2 != null) { - // we have two exception, but there should only be one - throw new SQLException("Expected one exception, got multiple", e2); - } - } // there was a bug in the meta data locking here private void testTemporaryTablesAndMetaDataLocking() throws Exception { @@ -410,7 +239,7 @@ private void testTemporaryTablesAndMetaDataLocking() throws Exception { stmt.execute("CREATE SEQUENCE IF NOT EXISTS SEQ1 START WITH 1000000"); stmt.execute("CREATE FORCE VIEW V1 AS WITH RECURSIVE TEMP(X) AS " + "(SELECT x FROM DUAL) SELECT * FROM TEMP"); - stmt.executeQuery("SELECT SEQ1.NEXTVAL"); + stmt.executeQuery("SELECT NEXT VALUE FOR SEQ1"); conn.close(); } diff --git a/h2/src/test/org/h2/test/db/TestDrop.java b/h2/src/test/org/h2/test/db/TestDrop.java deleted file mode 100644 index 803b57d77c..0000000000 --- a/h2/src/test/org/h2/test/db/TestDrop.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.db; - -import java.sql.Connection; -import java.sql.SQLException; -import java.sql.Statement; -import org.h2.test.TestBase; -import org.h2.test.TestDb; - -/** - * Test DROP statement - */ -public class TestDrop extends TestDb { - - private Connection conn; - private Statement stat; - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws Exception { - deleteDb("drop"); - conn = getConnection("drop"); - stat = conn.createStatement(); - - testTableDependsOnView(); - testComputedColumnDependency(); - testInterSchemaDependency(); - - conn.close(); - deleteDb("drop"); - } - - private void testTableDependsOnView() throws SQLException { - stat.execute("drop all objects"); - stat.execute("create table a(x int)"); - stat.execute("create view b as select * from a"); - stat.execute("create table c(y int check (select count(*) from b) = 0)"); - stat.execute("drop all objects"); - } - - private void testComputedColumnDependency() throws SQLException { - stat.execute("DROP ALL OBJECTS"); - stat.execute("CREATE TABLE A (A INT);"); - stat.execute("CREATE TABLE B (B INT AS SELECT A FROM A);"); - stat.execute("DROP ALL OBJECTS"); - stat.execute("CREATE SCHEMA TEST_SCHEMA"); - stat.execute("CREATE TABLE TEST_SCHEMA.A (A INT);"); - stat.execute("CREATE TABLE TEST_SCHEMA.B " + - "(B INT AS SELECT A FROM TEST_SCHEMA.A);"); - stat.execute("DROP SCHEMA TEST_SCHEMA CASCADE"); - } - - private void testInterSchemaDependency() throws SQLException { - stat.execute("drop all objects;"); - stat.execute("create schema table_view"); - stat.execute("set schema table_view"); - stat.execute("create table test1 (id int, name varchar(20))"); - stat.execute("create view test_view_1 as (select * from test1)"); - stat.execute("set schema public"); - stat.execute("create schema test_run"); - stat.execute("set schema test_run"); - stat.execute("create table test2 (id int, address varchar(20), " + - "constraint a_cons check (id in (select id from table_view.test1)))"); - stat.execute("set schema public"); - stat.execute("drop all objects"); - } -} diff --git a/h2/src/test/org/h2/test/db/TestDuplicateKeyUpdate.java b/h2/src/test/org/h2/test/db/TestDuplicateKeyUpdate.java index 9cf6aefa80..841579370e 100644 --- a/h2/src/test/org/h2/test/db/TestDuplicateKeyUpdate.java +++ b/h2/src/test/org/h2/test/db/TestDuplicateKeyUpdate.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -26,7 +26,7 @@ public class TestDuplicateKeyUpdate extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -193,12 +193,12 @@ private void testOnDuplicateKeyInsertBatch(Connection conn) throws SQLException { Statement stat = conn.createStatement(); stat.execute("create table test " + - "(key varchar(1) primary key, count int not null)"); + "(id varchar(1) primary key, count int not null)"); // Insert multiple values as a batch for (int i = 0; i <= 2; ++i) { PreparedStatement prep = conn.prepareStatement( - "insert into test(key, count) values(?, ?) " + + "insert into test(id, count) values(?, ?) " + "on duplicate key update count = count + 1"); prep.setString(1, "a"); prep.setInt(2, 1); @@ -214,7 +214,7 @@ private void testOnDuplicateKeyInsertBatch(Connection conn) // Check result ResultSet rs = stat.executeQuery( - "select count from test where key = 'a'"); + "select count from test where id = 'a'"); rs.next(); assertEquals(3, rs.getInt(1)); @@ -225,12 +225,12 @@ private void testOnDuplicateKeyInsertMultiValue(Connection conn) throws SQLException { Statement stat = conn.createStatement(); stat.execute("create table test" + - "(key varchar(1) primary key, count int not null)"); + "(id varchar(1) primary key, count int not null)"); // Insert multiple values in single insert operation for (int i = 0; i <= 2; ++i) { PreparedStatement prep = conn.prepareStatement( - "insert into test(key, count) values(?, ?), (?, ?), (?, ?) " + + "insert into test(id, count) values(?, ?), (?, ?), (?, ?) " + "on duplicate key update count = count + 1"); prep.setString(1, "a"); prep.setInt(2, 1); @@ -243,15 +243,14 @@ private void testOnDuplicateKeyInsertMultiValue(Connection conn) conn.commit(); // Check result - ResultSet rs = stat.executeQuery("select count from test where key = 'a'"); + ResultSet rs = stat.executeQuery("select count from test where id = 'a'"); rs.next(); assertEquals(3, rs.getInt(1)); stat.execute("drop table test"); } - private void testPrimaryKeyAndUniqueKey(Connection conn) throws SQLException - { + private void testPrimaryKeyAndUniqueKey(Connection conn) throws SQLException { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE test (id INT, dup INT, " + "counter INT, PRIMARY KEY(id), UNIQUE(dup))"); diff --git a/h2/src/test/org/h2/test/db/TestEncryptedDb.java b/h2/src/test/org/h2/test/db/TestEncryptedDb.java index d7762fa7ec..de2f8fa27f 100644 --- a/h2/src/test/org/h2/test/db/TestEncryptedDb.java +++ b/h2/src/test/org/h2/test/db/TestEncryptedDb.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -25,7 +25,7 @@ public class TestEncryptedDb extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -39,26 +39,28 @@ public boolean isEnabled() { @Override public void test() throws SQLException { deleteDb("encrypted"); - Connection conn = getConnection("encrypted;CIPHER=AES", "sa", "123 123"); - Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE TEST(ID INT)"); - stat.execute("CHECKPOINT"); - stat.execute("SET WRITE_DELAY 0"); - stat.execute("INSERT INTO TEST VALUES(1)"); - stat.execute("SHUTDOWN IMMEDIATELY"); - assertThrows(ErrorCode.DATABASE_IS_CLOSED, conn).close(); - - assertThrows(ErrorCode.FILE_ENCRYPTION_ERROR_1, this). - getConnection("encrypted;CIPHER=AES", "sa", "1234 1234"); + assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, + () -> getConnection("encrypted;CIPHER=AES;PAGE_SIZE=2048", "sa", "1234 1234")); + try (Connection conn = getConnection("encrypted;CIPHER=AES", "sa", "123 123")) { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(ID INT)"); + stat.execute("CHECKPOINT"); + stat.execute("SET WRITE_DELAY 0"); + stat.execute("INSERT INTO TEST VALUES(1)"); + stat.execute("SHUTDOWN IMMEDIATELY"); + } - conn = getConnection("encrypted;CIPHER=AES", "sa", "123 123"); - stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("SELECT * FROM TEST"); - assertTrue(rs.next()); - assertEquals(1, rs.getInt(1)); - assertFalse(rs.next()); + assertThrows(ErrorCode.FILE_ENCRYPTION_ERROR_1, // + () -> getConnection("encrypted;CIPHER=AES", "sa", "1234 1234")); - conn.close(); + try (Connection conn = getConnection("encrypted;CIPHER=AES", "sa", "123 123")) { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("SELECT * FROM TEST"); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertFalse(rs.next()); + } +// conn.close(); deleteDb("encrypted"); } diff --git a/h2/src/test/org/h2/test/db/TestExclusive.java b/h2/src/test/org/h2/test/db/TestExclusive.java index 54eee37d66..0fb4c2ceab 100644 --- a/h2/src/test/org/h2/test/db/TestExclusive.java +++ b/h2/src/test/org/h2/test/db/TestExclusive.java @@ -1,11 +1,13 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.concurrent.atomic.AtomicInteger; @@ -26,17 +28,21 @@ public class TestExclusive extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { + testSetExclusiveTrueFalse(); + testSetExclusiveGetExclusive(); + } + + private void testSetExclusiveTrueFalse() throws Exception { deleteDb("exclusive"); Connection conn = getConnection("exclusive"); Statement stat = conn.createStatement(); stat.execute("set exclusive true"); - assertThrows(ErrorCode.DATABASE_IS_IN_EXCLUSIVE_MODE, this). - getConnection("exclusive"); + assertThrows(ErrorCode.DATABASE_IS_IN_EXCLUSIVE_MODE, () -> getConnection("exclusive")); stat.execute("set exclusive false"); Connection conn2 = getConnection("exclusive"); @@ -65,4 +71,56 @@ public void call() throws SQLException { deleteDb("exclusive"); } + private void testSetExclusiveGetExclusive() throws SQLException { + deleteDb("exclusive"); + try (Connection connection = getConnection("exclusive")) { + assertFalse(getExclusiveMode(connection)); + + setExclusiveMode(connection, 1); + assertTrue(getExclusiveMode(connection)); + + setExclusiveMode(connection, 0); + assertFalse(getExclusiveMode(connection)); + + // Setting to existing mode should not throws exception + setExclusiveMode(connection, 0); + assertFalse(getExclusiveMode(connection)); + + setExclusiveMode(connection, 1); + assertTrue(getExclusiveMode(connection)); + + // Setting to existing mode throws exception + setExclusiveMode(connection, 1); + assertTrue(getExclusiveMode(connection)); + + setExclusiveMode(connection, 2); + assertTrue(getExclusiveMode(connection)); + + setExclusiveMode(connection, 0); + assertFalse(getExclusiveMode(connection)); + } + } + + + private static void setExclusiveMode(Connection connection, int exclusiveMode) throws SQLException { + String sql = "SET EXCLUSIVE " + exclusiveMode; + + try (PreparedStatement statement = connection.prepareStatement(sql)) { + statement.execute(); + } + } + + private static boolean getExclusiveMode(Connection connection) throws SQLException{ + boolean exclusiveMode = false; + + String sql = "SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'EXCLUSIVE'"; + try (PreparedStatement statement = connection.prepareStatement(sql)) { + ResultSet result = statement.executeQuery(); + if (result.next()) { + exclusiveMode = result.getBoolean(1); + } + } + + return exclusiveMode; + } } diff --git a/h2/src/test/org/h2/test/db/TestFullText.java b/h2/src/test/org/h2/test/db/TestFullText.java index fc7faa99a6..0e7da44762 100644 --- a/h2/src/test/org/h2/test/db/TestFullText.java +++ b/h2/src/test/org/h2/test/db/TestFullText.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -46,7 +46,7 @@ public class TestFullText extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -68,9 +68,7 @@ public void test() throws Exception { testCreateDropLucene(); testUuidPrimaryKey(true); testMultiThreaded(true); - if(config.mvStore || !config.multiThreaded) { - testMultiThreaded(false); - } + testMultiThreaded(false); testTransaction(true); test(true, "VARCHAR"); test(true, "CLOB"); @@ -95,7 +93,7 @@ private static void close(Collection list) { private Connection getConnection(String name, Collection list) throws SQLException { - Connection conn = getConnection(name); + Connection conn = getConnection(name + ";MODE=STRICT"); list.add(conn); return conn; } @@ -109,8 +107,7 @@ private void testAutoAnalyze() throws SQLException { conn = getConnection("fullTextNative", connList); stat = conn.createStatement(); - stat.execute("create alias if not exists ft_init " + - "for \"org.h2.fulltext.FullText.init\""); + stat.execute("create alias if not exists ft_init for 'org.h2.fulltext.FullText.init'"); stat.execute("call ft_init()"); stat.execute("create table test(id int primary key, name varchar)"); stat.execute("call ft_create_index('PUBLIC', 'TEST', 'NAME')"); @@ -130,8 +127,7 @@ private void testNativeFeatures() throws SQLException { ArrayList connList = new ArrayList<>(); Connection conn = getConnection("fullTextNative", connList); Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS IF NOT EXISTS FT_INIT " + - "FOR \"org.h2.fulltext.FullText.init\""); + stat.execute("CREATE ALIAS IF NOT EXISTS FT_INIT FOR 'org.h2.fulltext.FullText.init'"); stat.execute("CALL FT_INIT()"); FullText.setIgnoreList(conn, "to,this"); FullText.setWhitespaceChars(conn, " ,.-"); @@ -320,8 +316,7 @@ private void testStreamLob() throws SQLException { deleteDb("fullText"); Connection conn = getConnection("fullText"); Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS IF NOT EXISTS FT_INIT " + - "FOR \"org.h2.fulltext.FullText.init\""); + stat.execute("CREATE ALIAS IF NOT EXISTS FT_INIT FOR 'org.h2.fulltext.FullText.init'"); stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, DATA CLOB)"); FullText.createIndex(conn, "PUBLIC", "TEST", null); conn.setAutoCommit(false); @@ -366,8 +361,7 @@ private void testCreateDropNative() throws SQLException { FileUtils.deleteRecursive(getBaseDir() + "/fullText", false); Connection conn = getConnection("fullText"); Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS IF NOT EXISTS FT_INIT " + - "FOR \"org.h2.fulltext.FullText.init\""); + stat.execute("CREATE ALIAS IF NOT EXISTS FT_INIT FOR 'org.h2.fulltext.FullText.init'"); stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR)"); for (int i = 0; i < 10; i++) { FullText.createIndex(conn, "PUBLIC", "TEST", null); @@ -448,9 +442,19 @@ private void testPerformance(boolean lucene) throws SQLException { initFullText(stat, lucene); stat.execute("DROP TABLE IF EXISTS TEST"); stat.execute( - "CREATE TABLE TEST AS SELECT * FROM INFORMATION_SCHEMA.HELP"); - stat.execute("ALTER TABLE TEST ALTER COLUMN ID INT NOT NULL"); - stat.execute("CREATE PRIMARY KEY ON TEST(ID)"); + "CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY," + + " SECTION VARCHAR, TOPIC VARCHAR, SYNTAX VARCHAR, TEXT VARCHAR)"); + PreparedStatement ps = conn.prepareStatement( + "INSERT INTO TEST(SECTION, TOPIC, SYNTAX, TEXT) VALUES (?, ?, ?, ?)"); + try (ResultSet rs = stat.executeQuery("HELP \"\"")) { + while (rs.next()) { + for (int i = 1; i <= 4; i++) { + ps.setString(i, rs.getString(i)); + } + ps.addBatch(); + } + } + ps.executeUpdate(); long time = System.nanoTime(); stat.execute("CALL " + prefix + "_CREATE_INDEX('PUBLIC', 'TEST', NULL)"); println("create " + prefix + ": " + @@ -492,8 +496,7 @@ private void test(boolean lucene, String dataType) throws SQLException { String prefix = lucene ? "FTL_" : "FT_"; Statement stat = conn.createStatement(); String className = lucene ? "FullTextLucene" : "FullText"; - stat.execute("CREATE ALIAS IF NOT EXISTS " + - prefix + "INIT FOR \"org.h2.fulltext." + className + ".init\""); + stat.execute("CREATE ALIAS IF NOT EXISTS " + prefix + "INIT FOR 'org.h2.fulltext." + className + ".init'"); stat.execute("CALL " + prefix + "INIT()"); stat.execute("DROP TABLE IF EXISTS TEST"); stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME " + dataType + ")"); @@ -630,8 +633,7 @@ private static void initFullText(Statement stat, boolean lucene) throws SQLException { String prefix = lucene ? "FTL" : "FT"; String className = lucene ? "FullTextLucene" : "FullText"; - stat.execute("CREATE ALIAS IF NOT EXISTS " + prefix + - "_INIT FOR \"org.h2.fulltext." + className + ".init\""); + stat.execute("CREATE ALIAS IF NOT EXISTS " + prefix + "_INIT FOR 'org.h2.fulltext." + className + ".init'"); stat.execute("CALL " + prefix + "_INIT()"); } } diff --git a/h2/src/test/org/h2/test/db/TestFunctionOverload.java b/h2/src/test/org/h2/test/db/TestFunctionOverload.java index a76fc7a396..fe598c665f 100644 --- a/h2/src/test/org/h2/test/db/TestFunctionOverload.java +++ b/h2/src/test/org/h2/test/db/TestFunctionOverload.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -32,7 +32,7 @@ public class TestFunctionOverload extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -52,12 +52,12 @@ public void test() throws SQLException { private void testOverloadError() throws SQLException { Statement stat = conn.createStatement(); assertThrows(ErrorCode.METHODS_MUST_HAVE_DIFFERENT_PARAMETER_COUNTS_2, stat). - execute("create alias overloadError for \"" + ME + ".overloadError\""); + execute("create alias overloadError for '" + ME + ".overloadError'"); } private void testControl() throws SQLException { Statement stat = conn.createStatement(); - stat.execute("create alias overload0 for \"" + ME + ".overload0\""); + stat.execute("create alias overload0 for '" + ME + ".overload0'"); ResultSet rs = stat.executeQuery("select overload0() from dual"); assertTrue(rs.next()); assertEquals("0 args", 0, rs.getInt(1)); @@ -69,7 +69,7 @@ private void testControl() throws SQLException { private void testOverload() throws SQLException { Statement stat = conn.createStatement(); - stat.execute("create alias overload1or2 for \"" + ME + ".overload1or2\""); + stat.execute("create alias overload1or2 for '" + ME + ".overload1or2'"); ResultSet rs = stat.executeQuery("select overload1or2(1) from dual"); rs.next(); assertEquals("1 arg", 1, rs.getInt(1)); @@ -80,17 +80,16 @@ private void testOverload() throws SQLException { assertFalse(rs.next()); rs = meta.getProcedures(null, null, "OVERLOAD1OR2"); rs.next(); - assertEquals(1, rs.getInt("NUM_INPUT_PARAMS")); + assertEquals("OVERLOAD1OR2_1", rs.getString("SPECIFIC_NAME")); rs.next(); - assertEquals(2, rs.getInt("NUM_INPUT_PARAMS")); + assertEquals("OVERLOAD1OR2_2", rs.getString("SPECIFIC_NAME")); assertFalse(rs.next()); } private void testOverloadNamedArgs() throws SQLException { Statement stat = conn.createStatement(); - stat.execute("create alias overload1or2Named for \"" + ME + - ".overload1or2(int)\""); + stat.execute("create alias overload1or2Named for '" + ME + ".overload1or2(int)'"); ResultSet rs = stat.executeQuery("select overload1or2Named(1) from dual"); assertTrue("First Row", rs.next()); @@ -105,8 +104,7 @@ private void testOverloadNamedArgs() throws SQLException { private void testOverloadWithConnection() throws SQLException { Statement stat = conn.createStatement(); - stat.execute("create alias overload1or2WithConn for \"" + ME + - ".overload1or2WithConn\""); + stat.execute("create alias overload1or2WithConn for '" + ME + ".overload1or2WithConn'"); ResultSet rs = stat.executeQuery("select overload1or2WithConn(1) from dual"); rs.next(); diff --git a/h2/src/test/org/h2/test/db/TestFunctions.java b/h2/src/test/org/h2/test/db/TestFunctions.java index 9c47f257f5..dd601a5050 100644 --- a/h2/src/test/org/h2/test/db/TestFunctions.java +++ b/h2/src/test/org/h2/test/db/TestFunctions.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -30,6 +30,11 @@ import java.text.DecimalFormatSymbols; import java.text.ParseException; import java.text.SimpleDateFormat; +import java.time.LocalDate; +import java.time.format.DateTimeFormatter; +import java.time.temporal.ChronoUnit; +import java.time.temporal.TemporalQueries; +import java.time.temporal.WeekFields; import java.util.ArrayList; import java.util.Calendar; import java.util.Collections; @@ -46,20 +51,21 @@ import org.h2.api.AggregateFunction; import org.h2.api.ErrorCode; import org.h2.engine.Constants; -import org.h2.engine.Session; -import org.h2.expression.function.ToDateParser; -import org.h2.expression.function.ToChar.Capitalization; +import org.h2.engine.SessionLocal; +import org.h2.expression.function.ToCharFunction; +import org.h2.expression.function.ToCharFunction.Capitalization; import org.h2.jdbc.JdbcConnection; -import org.h2.message.DbException; +import org.h2.mode.ToDateParser; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.test.ap.TestAnnotationProcessor; import org.h2.tools.SimpleResultSet; -import org.h2.util.DateTimeUtils; import org.h2.util.IOUtils; import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; import org.h2.value.Value; +import org.h2.value.ValueNumeric; import org.h2.value.ValueTimestamp; import org.h2.value.ValueTimestampTimeZone; @@ -78,25 +84,25 @@ public class TestFunctions extends TestDb implements AggregateFunction { public static void main(String... a) throws Exception { // Locale.setDefault(Locale.GERMANY); // Locale.setDefault(Locale.US); - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { deleteDb("functions"); testOverrideAlias(); - testIfNull(); + deleteDb("functions"); if (!config.networked) { JdbcConnection conn = (JdbcConnection) getConnection("functions"); - Session session = (Session) conn.getSession(); + SessionLocal session = (SessionLocal) conn.getSession(); testToDate(session); testToDateException(session); conn.close(); } - testDataType(); testVersion(); testFunctionTable(); testFunctionTableVarArgs(); + testArray(); testArrayParameters(); testDefaultConnection(); testFunctionInSchema(); @@ -109,47 +115,27 @@ public void test() throws Exception { testDeterministic(); testTransactionId(); testPrecision(); - testMathFunctions(); testVarArgs(); testAggregate(); testAggregateType(); testFunctions(); + testDateTimeFunctions(); testFileRead(); testValue(); testNvl2(); - testConcatWs(); - testTruncate(); - testDateTrunc(); - testExtract(); testToCharFromDateTime(); testToCharFromNumber(); testToCharFromText(); - testTranslate(); - testGenerateSeries(); testFileWrite(); testThatCurrentTimestampIsSane(); testThatCurrentTimestampStaysTheSameWithinATransaction(); testThatCurrentTimestampUpdatesOutsideATransaction(); testAnnotationProcessorsOutput(); - testRound(); testSignal(); deleteDb("functions"); } - private void testDataType() throws SQLException { - Connection conn = getConnection("functions"); - Statement stat = conn.createStatement(); - assertEquals(Types.DOUBLE, stat.executeQuery( - "select radians(x) from dual"). - getMetaData().getColumnType(1)); - assertEquals(Types.DOUBLE, stat.executeQuery( - "select power(10, 2*x) from dual"). - getMetaData().getColumnType(1)); - stat.close(); - conn.close(); - } - private void testVersion() throws SQLException { Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); @@ -157,7 +143,7 @@ private void testVersion() throws SQLException { ResultSet rs = stat.executeQuery(query); assertTrue(rs.next()); String version = rs.getString(1); - assertEquals(Constants.getVersion(), version); + assertEquals(Constants.VERSION, version); assertFalse(rs.next()); rs.close(); stat.close(); @@ -167,18 +153,26 @@ private void testVersion() throws SQLException { private void testFunctionTable() throws SQLException { Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); - stat.execute("create alias simple_function_table for \"" + - TestFunctions.class.getName() + ".simpleFunctionTable\""); + stat.execute("create alias simple_function_table for '" + + TestFunctions.class.getName() + ".simpleFunctionTable'"); + stat.execute("create alias function_table_with_parameter for '" + + TestFunctions.class.getName() + ".functionTableWithParameter'"); stat.execute("select * from simple_function_table() " + "where a>0 and b in ('x', 'y')"); + PreparedStatement prep = conn.prepareStatement("call function_table_with_parameter(?)"); + prep.setInt(1, 10); + ResultSet rs = prep.executeQuery(); + assertTrue(rs.next()); + assertEquals(10, rs.getInt(1)); + assertEquals("X", rs.getString(2)); conn.close(); } private void testFunctionTableVarArgs() throws SQLException { Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); - stat.execute("create alias varargs_function_table for \"" + TestFunctions.class.getName() - + ".varArgsFunctionTable\""); + stat.execute("create alias varargs_function_table for '" + TestFunctions.class.getName() + + ".varArgsFunctionTable'"); ResultSet rs = stat.executeQuery("select * from varargs_function_table(1,2,3,5,8,13)"); for (int i : new int[] { 1, 2, 3, 5, 8, 13 }) { assertTrue(rs.next()); @@ -202,6 +196,21 @@ public static ResultSet simpleFunctionTable(@SuppressWarnings("unused") Connecti return result; } + /** + * This method is called via reflection from the database. + * + * @param conn the connection + * @param p the parameter + * @return a result set + */ + public static ResultSet functionTableWithParameter(@SuppressWarnings("unused") Connection conn, int p) { + SimpleResultSet result = new SimpleResultSet(); + result.addColumn("A", Types.INTEGER, 0, 0); + result.addColumn("B", Types.CHAR, 0, 0); + result.addRow(p, 'X'); + return result; + } + /** * This method is called via reflection from the database. * @@ -282,58 +291,11 @@ private void testNvl2() throws SQLException { conn.close(); } - private void testConcatWs() throws SQLException { - Connection conn = getConnection("functions"); - Statement stat = conn.createStatement(); - - String createSQL = "CREATE TABLE testConcat(id BIGINT, txt1 " + - "varchar, txt2 varchar, txt3 varchar);"; - stat.execute(createSQL); - stat.execute("insert into testConcat(id, txt1, txt2, txt3) " + - "values(1, 'test1', 'test2', 'test3')"); - stat.execute("insert into testConcat(id, txt1, txt2, txt3) " + - "values(2, 'test1', 'test2', null)"); - stat.execute("insert into testConcat(id, txt1, txt2, txt3) " + - "values(3, 'test1', null, null)"); - stat.execute("insert into testConcat(id, txt1, txt2, txt3) " + - "values(4, null, 'test2', null)"); - stat.execute("insert into testConcat(id, txt1, txt2, txt3) " + - "values(5, null, null, null)"); - - String query = "SELECT concat_ws('_',txt1, txt2, txt3), txt1 " + - "FROM testConcat order by id asc"; - ResultSet rs = stat.executeQuery(query); - rs.next(); - String actual = rs.getString(1); - assertEquals("test1_test2_test3", actual); - rs.next(); - actual = rs.getString(1); - assertEquals("test1_test2", actual); - rs.next(); - actual = rs.getString(1); - assertEquals("test1", actual); - rs.next(); - actual = rs.getString(1); - assertEquals("test2", actual); - rs.next(); - actual = rs.getString(1); - assertEquals("", actual); - rs.close(); - - rs = stat.executeQuery("select concat_ws(null,null,null)"); - rs.next(); - assertNull(rs.getObject(1)); - - stat.execute("drop table testConcat"); - conn.close(); - } - private void testValue() throws SQLException { Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); ResultSet rs; - stat.execute("create alias TO_CHAR_2 for \"" + - getClass().getName() + ".toChar\""); + stat.execute("create alias TO_CHAR_2 for '" + getClass().getName() + ".toChar'"); rs = stat.executeQuery( "call TO_CHAR_2(TIMESTAMP '2001-02-03 04:05:06', 'format')"); rs.next(); @@ -352,14 +314,13 @@ public static Value toChar(Value... args) { if (args.length == 0) { return null; } - return args[0].convertTo(Value.STRING); + return args[0].convertTo(TypeInfo.TYPE_VARCHAR); } private void testDefaultConnection() throws SQLException { Connection conn = getConnection("functions;DEFAULT_CONNECTION=TRUE"); Statement stat = conn.createStatement(); - stat.execute("create alias test for \""+ - TestFunctions.class.getName()+".testDefaultConn\""); + stat.execute("create alias test for '" + TestFunctions.class.getName() + ".testDefaultConn'"); stat.execute("call test()"); stat.execute("drop alias test"); conn.close(); @@ -380,7 +341,7 @@ private void testFunctionInSchema() throws SQLException { stat.execute("create alias schema2.func as 'int x() { return 1; }'"); stat.execute("create view test as select schema2.func()"); ResultSet rs; - rs = stat.executeQuery("select * from information_schema.views"); + rs = stat.executeQuery("select * from information_schema.views where table_schema = 'PUBLIC'"); rs.next(); assertContains(rs.getString("VIEW_DEFINITION"), "\"SCHEMA2\".\"FUNC\""); @@ -421,8 +382,8 @@ private void testSource() throws SQLException { ResultSet rs; stat.execute("create force alias sayHi as 'String test(String name) {\n" + "return \"Hello \" + name;\n}'"); - rs = stat.executeQuery("SELECT ALIAS_NAME " + - "FROM INFORMATION_SCHEMA.FUNCTION_ALIASES"); + rs = stat.executeQuery("SELECT ROUTINE_NAME " + + "FROM INFORMATION_SCHEMA.ROUTINES"); rs.next(); assertEquals("SAY" + "HI", rs.getString(1)); rs = stat.executeQuery("call sayHi('Joe')"); @@ -444,10 +405,9 @@ private void testDynamicArgumentAndReturn() throws SQLException { Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); ResultSet rs; - stat.execute("create alias dynamic deterministic for \"" + - getClass().getName() + ".dynamic\""); + stat.execute("create alias dynamic deterministic for '" + getClass().getName() + ".dynamic'"); setCount(0); - rs = stat.executeQuery("call dynamic(ARRAY['a', 1])[1]"); + rs = stat.executeQuery("call dynamic(ARRAY['a', '1'])[1]"); rs.next(); String a = rs.getString(1); assertEquals("a1", a); @@ -460,8 +420,7 @@ private void testUUID() throws SQLException { Statement stat = conn.createStatement(); ResultSet rs; - stat.execute("create alias xorUUID for \""+ - getClass().getName()+".xorUUID\""); + stat.execute("create alias xorUUID for '" + getClass().getName() + ".xorUUID'"); setCount(0); rs = stat.executeQuery("call xorUUID(random_uuid(), random_uuid())"); rs.next(); @@ -477,8 +436,7 @@ private void testDeterministic() throws SQLException { Statement stat = conn.createStatement(); ResultSet rs; - stat.execute("create alias getCount for \""+ - getClass().getName()+".getCount\""); + stat.execute("create alias getCount for '" + getClass().getName() + ".getCount'"); setCount(0); rs = stat.executeQuery("select getCount() from system_range(1, 2)"); rs.next(); @@ -487,8 +445,7 @@ private void testDeterministic() throws SQLException { assertEquals(1, rs.getInt(1)); stat.execute("drop alias getCount"); - stat.execute("create alias getCount deterministic for \""+ - getClass().getName()+".getCount\""); + stat.execute("create alias getCount deterministic for '" + getClass().getName() + ".getCount'"); setCount(0); rs = stat.executeQuery("select getCount() from system_range(1, 2)"); rs.next(); @@ -497,11 +454,10 @@ private void testDeterministic() throws SQLException { assertEquals(0, rs.getInt(1)); stat.execute("drop alias getCount"); rs = stat.executeQuery("SELECT * FROM " + - "INFORMATION_SCHEMA.FUNCTION_ALIASES " + - "WHERE UPPER(ALIAS_NAME) = 'GET' || 'COUNT'"); + "INFORMATION_SCHEMA.ROUTINES " + + "WHERE UPPER(ROUTINE_NAME) = 'GET' || 'COUNT'"); assertFalse(rs.next()); - stat.execute("create alias reverse deterministic for \""+ - getClass().getName()+".reverse\""); + stat.execute("create alias reverse deterministic for '" + getClass().getName() + ".reverse'"); rs = stat.executeQuery("select reverse(x) from system_range(700, 700)"); rs.next(); assertEquals("007", rs.getString(1)); @@ -537,42 +493,26 @@ private void testTransactionId() throws SQLException { private void testPrecision() throws SQLException { Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); - stat.execute("create alias no_op for \""+getClass().getName()+".noOp\""); + stat.execute("create alias no_op for '" + getClass().getName() + ".noOp'"); PreparedStatement prep = conn.prepareStatement( "select * from dual where no_op(1.6)=?"); prep.setBigDecimal(1, new BigDecimal("1.6")); ResultSet rs = prep.executeQuery(); assertTrue(rs.next()); - stat.execute("create aggregate agg_sum for \""+getClass().getName()+"\""); + stat.execute("create aggregate agg_sum for '" + getClass().getName() + '\''); rs = stat.executeQuery("select agg_sum(1), sum(1.6) from dual"); rs.next(); - assertEquals(Integer.MAX_VALUE, rs.getMetaData().getScale(2)); - assertEquals(Integer.MAX_VALUE, rs.getMetaData().getScale(1)); - stat.executeQuery("select * from information_schema.function_aliases"); - conn.close(); - } - - private void testMathFunctions() throws SQLException { - Connection conn = getConnection("functions"); - Statement stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("CALL SINH(50)"); - assertTrue(rs.next()); - assertEquals(Math.sinh(50), rs.getDouble(1)); - rs = stat.executeQuery("CALL COSH(50)"); - assertTrue(rs.next()); - assertEquals(Math.cosh(50), rs.getDouble(1)); - rs = stat.executeQuery("CALL TANH(50)"); - assertTrue(rs.next()); - assertEquals(Math.tanh(50), rs.getDouble(1)); + assertEquals(1, rs.getMetaData().getScale(2)); + assertEquals(ValueNumeric.MAXIMUM_SCALE / 2, rs.getMetaData().getScale(1)); + stat.executeQuery("select * from information_schema.routines"); conn.close(); } private void testVarArgs() throws SQLException { Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS mean FOR \"" + - getClass().getName() + ".mean\""); + stat.execute("CREATE ALIAS mean FOR '" + getClass().getName() + ".mean'"); ResultSet rs = stat.executeQuery( "select mean(), mean(10), mean(10, 20), mean(10, 20, 30)"); rs.next(); @@ -581,8 +521,7 @@ private void testVarArgs() throws SQLException { assertEquals(15.0, rs.getDouble(3)); assertEquals(20.0, rs.getDouble(4)); - stat.execute("CREATE ALIAS mean2 FOR \"" + - getClass().getName() + ".mean2\""); + stat.execute("CREATE ALIAS mean2 FOR '" + getClass().getName() + ".mean2'"); rs = stat.executeQuery( "select mean2(), mean2(10), mean2(10, 20)"); rs.next(); @@ -593,32 +532,31 @@ private void testVarArgs() throws SQLException { DatabaseMetaData meta = conn.getMetaData(); rs = meta.getProcedureColumns(null, null, "MEAN2", null); assertTrue(rs.next()); - assertEquals("P0", rs.getString("COLUMN_NAME")); + assertEquals("RESULT", rs.getString("COLUMN_NAME")); assertTrue(rs.next()); assertEquals("FUNCTIONS", rs.getString("PROCEDURE_CAT")); assertEquals("PUBLIC", rs.getString("PROCEDURE_SCHEM")); assertEquals("MEAN2", rs.getString("PROCEDURE_NAME")); - assertEquals("P2", rs.getString("COLUMN_NAME")); + assertEquals("P1", rs.getString("COLUMN_NAME")); assertEquals(DatabaseMetaData.procedureColumnIn, rs.getInt("COLUMN_TYPE")); - assertEquals("OTHER", rs.getString("TYPE_NAME")); - assertEquals(Integer.MAX_VALUE, rs.getInt("PRECISION")); - assertEquals(Integer.MAX_VALUE, rs.getInt("LENGTH")); + assertEquals("DOUBLE PRECISION ARRAY", rs.getString("TYPE_NAME")); + assertEquals(Constants.MAX_ARRAY_CARDINALITY, rs.getInt("PRECISION")); + assertEquals(Constants.MAX_ARRAY_CARDINALITY, rs.getInt("LENGTH")); assertEquals(0, rs.getInt("SCALE")); - assertEquals(DatabaseMetaData.columnNullable, + assertEquals(DatabaseMetaData.columnNullableUnknown, rs.getInt("NULLABLE")); - assertEquals("", rs.getString("REMARKS")); + assertNull(rs.getString("REMARKS")); assertEquals(null, rs.getString("COLUMN_DEF")); assertEquals(0, rs.getInt("SQL_DATA_TYPE")); assertEquals(0, rs.getInt("SQL_DATETIME_SUB")); assertEquals(0, rs.getInt("CHAR_OCTET_LENGTH")); assertEquals(1, rs.getInt("ORDINAL_POSITION")); - assertEquals("YES", rs.getString("IS_NULLABLE")); - assertEquals("MEAN2", rs.getString("SPECIFIC_NAME")); + assertEquals("", rs.getString("IS_NULLABLE")); + assertEquals("MEAN2_1", rs.getString("SPECIFIC_NAME")); assertFalse(rs.next()); - stat.execute("CREATE ALIAS printMean FOR \"" + - getClass().getName() + ".printMean\""); + stat.execute("CREATE ALIAS printMean FOR '" + getClass().getName() + ".printMean'"); rs = stat.executeQuery( "select printMean('A'), printMean('A', 10), " + "printMean('BB', 10, 20), printMean ('CCC', 10, 20, 30)"); @@ -718,11 +656,6 @@ public int getType(int[] inputType) { return Types.VARCHAR; } - @Override - public void init(Connection conn) { - // nothing to do - } - } /** @@ -744,12 +677,7 @@ public Object getResult() { @Override public int getInternalType(int[] inputTypes) throws SQLException { - return Value.STRING; - } - - @Override - public void init(Connection conn) { - // nothing to do + return Value.VARCHAR; } } @@ -758,10 +686,8 @@ private void testAggregateType() throws SQLException { deleteDb("functions"); Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); - stat.execute("CREATE AGGREGATE SIMPLE_MEDIAN FOR \"" + - MedianStringType.class.getName() + "\""); - stat.execute("CREATE AGGREGATE IF NOT EXISTS SIMPLE_MEDIAN FOR \"" + - MedianStringType.class.getName() + "\""); + stat.execute("CREATE AGGREGATE SIMPLE_MEDIAN FOR '" + MedianStringType.class.getName() + '\''); + stat.execute("CREATE AGGREGATE IF NOT EXISTS SIMPLE_MEDIAN FOR '" + MedianStringType.class.getName() + '\''); ResultSet rs = stat.executeQuery( "SELECT SIMPLE_MEDIAN(X) FROM SYSTEM_RANGE(1, 9)"); rs.next(); @@ -811,19 +737,21 @@ private void testAggregate() throws SQLException { deleteDb("functions"); Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); - stat.execute("CREATE AGGREGATE SIMPLE_MEDIAN FOR \"" + - MedianString.class.getName() + "\""); - stat.execute("CREATE AGGREGATE IF NOT EXISTS SIMPLE_MEDIAN FOR \"" + - MedianString.class.getName() + "\""); - ResultSet rs = stat.executeQuery( - "SELECT SIMPLE_MEDIAN(X) FROM SYSTEM_RANGE(1, 9)"); + stat.execute("CREATE AGGREGATE SIMPLE_MEDIAN FOR '" + MedianString.class.getName() + '\''); + stat.execute("CREATE AGGREGATE IF NOT EXISTS SIMPLE_MEDIAN FOR '" + MedianString.class.getName() + '\''); + stat.execute("CREATE SCHEMA S1"); + stat.execute("CREATE AGGREGATE S1.MEDIAN2 FOR '" + MedianString.class.getName() + '\''); + ResultSet rs = stat.executeQuery("SELECT SIMPLE_MEDIAN(X) FROM SYSTEM_RANGE(1, 9)"); + rs.next(); + assertEquals("5", rs.getString(1)); + assertThrows(ErrorCode.FUNCTION_NOT_FOUND_1, stat).executeQuery("SELECT MEDIAN2(X) FROM SYSTEM_RANGE(1, 9)"); + rs = stat.executeQuery("SELECT S1.MEDIAN2(X) FROM SYSTEM_RANGE(1, 9)"); rs.next(); assertEquals("5", rs.getString(1)); stat.execute("CREATE TABLE DATA(V INT)"); stat.execute("INSERT INTO DATA VALUES (1), (3), (2), (1), (1), (2), (1), (1), (1), (1), (1)"); - rs = stat.executeQuery( - "SELECT SIMPLE_MEDIAN(V), SIMPLE_MEDIAN(DISTINCT V) FROM DATA"); + rs = stat.executeQuery("SELECT SIMPLE_MEDIAN(V), SIMPLE_MEDIAN(DISTINCT V) FROM DATA"); rs.next(); assertEquals("1", rs.getString(1)); assertEquals("2", rs.getString(2)); @@ -840,18 +768,28 @@ private void testAggregate() throws SQLException { DatabaseMetaData meta = conn.getMetaData(); rs = meta.getProcedures(null, null, "SIMPLE_MEDIAN"); assertTrue(rs.next()); + assertEquals("PUBLIC", rs.getString("PROCEDURE_SCHEM")); + assertFalse(rs.next()); + rs = meta.getProcedures(null, null, "MEDIAN2"); + assertTrue(rs.next()); + assertEquals("S1", rs.getString("PROCEDURE_SCHEM")); assertFalse(rs.next()); rs = stat.executeQuery("SCRIPT"); - boolean found = false; + boolean found1 = false, found2 = false; while (rs.next()) { String sql = rs.getString(1); - if (sql.contains("SIMPLE_MEDIAN")) { - found = true; + if (sql.contains("\"PUBLIC\".\"SIMPLE_MEDIAN\"")) { + found1 = true; + } else if (sql.contains("\"S1\".\"MEDIAN2\"")) { + found2 = true; } } - assertTrue(found); + assertTrue(found1); + assertTrue(found2); stat.execute("DROP AGGREGATE SIMPLE_MEDIAN"); stat.execute("DROP AGGREGATE IF EXISTS SIMPLE_MEDIAN"); + stat.execute("DROP AGGREGATE S1.MEDIAN2"); + stat.execute("DROP SCHEMA S1"); conn.close(); } @@ -864,8 +802,7 @@ private void testFunctions() throws SQLException { assertCallResult("1", stat, "abs(1)"); stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR)"); - stat.execute("CREATE ALIAS ADD_ROW FOR \"" + - getClass().getName() + ".addRow\""); + stat.execute("CREATE ALIAS ADD_ROW FOR '" + getClass().getName() + ".addRow'"); ResultSet rs; rs = stat.executeQuery("CALL ADD_ROW(1, 'Hello')"); rs.next(); @@ -879,37 +816,36 @@ private void testFunctions() throws SQLException { DatabaseMetaData meta = conn.getMetaData(); rs = meta.getProcedureColumns(null, null, "ADD_ROW", null); assertTrue(rs.next()); - assertEquals("P0", rs.getString("COLUMN_NAME")); + assertEquals("RESULT", rs.getString("COLUMN_NAME")); assertTrue(rs.next()); assertEquals("FUNCTIONS", rs.getString("PROCEDURE_CAT")); assertEquals("PUBLIC", rs.getString("PROCEDURE_SCHEM")); assertEquals("ADD_ROW", rs.getString("PROCEDURE_NAME")); - assertEquals("P2", rs.getString("COLUMN_NAME")); + assertEquals("P1", rs.getString("COLUMN_NAME")); assertEquals(DatabaseMetaData.procedureColumnIn, rs.getInt("COLUMN_TYPE")); assertEquals("INTEGER", rs.getString("TYPE_NAME")); - assertEquals(10, rs.getInt("PRECISION")); - assertEquals(10, rs.getInt("LENGTH")); + assertEquals(32, rs.getInt("PRECISION")); + assertEquals(32, rs.getInt("LENGTH")); assertEquals(0, rs.getInt("SCALE")); assertEquals(DatabaseMetaData.columnNoNulls, rs.getInt("NULLABLE")); - assertEquals("", rs.getString("REMARKS")); + assertNull(rs.getString("REMARKS")); assertEquals(null, rs.getString("COLUMN_DEF")); assertEquals(0, rs.getInt("SQL_DATA_TYPE")); assertEquals(0, rs.getInt("SQL_DATETIME_SUB")); assertEquals(0, rs.getInt("CHAR_OCTET_LENGTH")); assertEquals(1, rs.getInt("ORDINAL_POSITION")); - assertEquals("YES", rs.getString("IS_NULLABLE")); - assertEquals("ADD_ROW", rs.getString("SPECIFIC_NAME")); + assertEquals("", rs.getString("IS_NULLABLE")); + assertEquals("ADD_ROW_1", rs.getString("SPECIFIC_NAME")); assertTrue(rs.next()); - assertEquals("P3", rs.getString("COLUMN_NAME")); - assertEquals("VARCHAR", rs.getString("TYPE_NAME")); + assertEquals("P2", rs.getString("COLUMN_NAME")); + assertEquals("CHARACTER VARYING", rs.getString("TYPE_NAME")); assertFalse(rs.next()); stat.executeQuery("CALL ADD_ROW(2, 'World')"); - stat.execute("CREATE ALIAS SELECT_F FOR \"" + - getClass().getName() + ".select\""); - rs = stat.executeQuery("CALL SELECT_F('SELECT * " + + stat.execute("CREATE ALIAS SELECT_F FOR '" + getClass().getName() + ".select'"); + rs = stat.executeQuery("SELECT * FROM SELECT_F('SELECT * " + "FROM TEST ORDER BY ID')"); assertEquals(2, rs.getMetaData().getColumnCount()); rs.next(); @@ -929,26 +865,10 @@ private void testFunctions() throws SQLException { assertEquals("Hello", rs.getString(1)); assertFalse(rs.next()); - rs = stat.executeQuery("SELECT SELECT_F('SELECT * " + - "FROM TEST WHERE ID=' || ID) FROM TEST ORDER BY ID"); - assertEquals(1, rs.getMetaData().getColumnCount()); - rs.next(); - assertEquals("((1, Hello))", rs.getString(1)); - rs.next(); - assertEquals("((2, World))", rs.getString(1)); - assertFalse(rs.next()); - - rs = stat.executeQuery("SELECT SELECT_F('SELECT * " + - "FROM TEST ORDER BY ID') FROM DUAL"); - assertEquals(1, rs.getMetaData().getColumnCount()); - rs.next(); - assertEquals("((1, Hello), (2, World))", rs.getString(1)); - assertFalse(rs.next()); assertThrows(ErrorCode.SYNTAX_ERROR_2, stat). - executeQuery("CALL SELECT_F('ERROR')"); - stat.execute("CREATE ALIAS SIMPLE FOR \"" + - getClass().getName() + ".simpleResultSet\""); - rs = stat.executeQuery("CALL SIMPLE(2, 1, 1, 1, 1, 1, 1, 1)"); + executeQuery("SELECT * FROM SELECT_F('ERROR')"); + stat.execute("CREATE ALIAS SIMPLE FOR '" + getClass().getName() + ".simpleResultSet'"); + rs = stat.executeQuery("SELECT * FROM SIMPLE(2, 1, 1, 1, 1, 1, 1, 1)"); assertEquals(2, rs.getMetaData().getColumnCount()); rs.next(); assertEquals(0, rs.getInt(1)); @@ -965,15 +885,14 @@ private void testFunctions() throws SQLException { assertEquals("Hello", rs.getString(2)); assertFalse(rs.next()); - stat.execute("CREATE ALIAS GET_ARRAY FOR \"" + - getClass().getName() + ".getArray\""); + stat.execute("CREATE ALIAS GET_ARRAY FOR '" + getClass().getName() + ".getArray'"); rs = stat.executeQuery("CALL GET_ARRAY()"); assertEquals(1, rs.getMetaData().getColumnCount()); rs.next(); Array a = rs.getArray(1); Object[] array = (Object[]) a.getArray(); assertEquals(2, array.length); - assertEquals(0, ((Integer) array[0]).intValue()); + assertEquals("0", (String) array[0]); assertEquals("Hello", (String) array[1]); assertThrows(ErrorCode.INVALID_VALUE_2, a).getArray(1, -1); assertEquals(2, ((Object[]) a.getArray(1, 3)).length); @@ -1029,18 +948,13 @@ private void testFunctions() throws SQLException { assertThrows(ErrorCode.OBJECT_CLOSED, a).getArray(); assertThrows(ErrorCode.OBJECT_CLOSED, a).getResultSet(); - stat.execute("CREATE ALIAS ROOT FOR \"" + getClass().getName() + ".root\""); + stat.execute("CREATE ALIAS ROOT FOR '" + getClass().getName() + ".root'"); rs = stat.executeQuery("CALL ROOT(9)"); rs.next(); assertEquals(3, rs.getInt(1)); assertFalse(rs.next()); - stat.execute("CREATE ALIAS MAX_ID FOR \"" + - getClass().getName() + ".selectMaxId\""); - rs = stat.executeQuery("CALL MAX_ID()"); - rs.next(); - assertEquals(2, rs.getInt(1)); - assertFalse(rs.next()); + stat.execute("CREATE ALIAS MAX_ID FOR '" + getClass().getName() + ".selectMaxId'"); rs = stat.executeQuery("SELECT * FROM MAX_ID()"); rs.next(); @@ -1052,14 +966,14 @@ private void testFunctions() throws SQLException { assertEquals(0, rs.getInt(1)); assertFalse(rs.next()); - stat.execute("CREATE ALIAS blob FOR \"" + getClass().getName() + ".blob\""); + stat.execute("CREATE ALIAS blob FOR '" + getClass().getName() + ".blob'"); rs = stat.executeQuery("SELECT blob(CAST('0102' AS BLOB)) FROM DUAL"); while (rs.next()) { // ignore } rs.close(); - stat.execute("CREATE ALIAS clob FOR \"" + getClass().getName() + ".clob\""); + stat.execute("CREATE ALIAS clob FOR '" + getClass().getName() + ".clob'"); rs = stat.executeQuery("SELECT clob(CAST('Hello' AS CLOB)) FROM DUAL"); while (rs.next()) { // ignore @@ -1073,75 +987,67 @@ private void testFunctions() throws SQLException { assertTrue(rs.next()); assertEquals("Hello", rs.getString(1)); - rs = stat.executeQuery("select * from sql('select cast(''4869'' as blob)')"); + rs = stat.executeQuery("select * from sql('select cast(X''4869'' as blob)')"); assertTrue(rs.next()); assertEquals("Hi", new String(rs.getBytes(1))); - rs = stat.executeQuery("select sql('select 1 a, ''Hello'' b')"); - assertTrue(rs.next()); - rs2 = (ResultSet) rs.getObject(1); - rs2.next(); - assertEquals(1, rs2.getInt(1)); - assertEquals("Hello", rs2.getString(2)); - ResultSetMetaData meta2 = rs2.getMetaData(); + rs = stat.executeQuery("select * from sql('select 1 a, ''Hello'' b')"); + rs.next(); + assertEquals(1, rs.getInt(1)); + assertEquals("Hello", rs.getString(2)); + ResultSetMetaData meta2 = rs.getMetaData(); assertEquals(Types.INTEGER, meta2.getColumnType(1)); assertEquals("INTEGER", meta2.getColumnTypeName(1)); assertEquals("java.lang.Integer", meta2.getColumnClassName(1)); assertEquals(Types.VARCHAR, meta2.getColumnType(2)); - assertEquals("VARCHAR", meta2.getColumnTypeName(2)); + assertEquals("CHARACTER VARYING", meta2.getColumnTypeName(2)); assertEquals("java.lang.String", meta2.getColumnClassName(2)); - stat.execute("CREATE ALIAS blob2stream FOR \"" + - getClass().getName() + ".blob2stream\""); - stat.execute("CREATE ALIAS stream2stream FOR \"" + - getClass().getName() + ".stream2stream\""); - stat.execute("CREATE TABLE TEST_BLOB(ID INT PRIMARY KEY, VALUE BLOB)"); + stat.execute("CREATE ALIAS blob2stream FOR '" + getClass().getName() + ".blob2stream'"); + stat.execute("CREATE ALIAS stream2stream FOR '" + getClass().getName() + ".stream2stream'"); + stat.execute("CREATE TABLE TEST_BLOB(ID INT PRIMARY KEY, \"VALUE\" BLOB)"); stat.execute("INSERT INTO TEST_BLOB VALUES(0, null)"); stat.execute("INSERT INTO TEST_BLOB VALUES(1, 'edd1f011edd1f011edd1f011')"); - rs = stat.executeQuery("SELECT blob2stream(VALUE) FROM TEST_BLOB"); + rs = stat.executeQuery("SELECT blob2stream(\"VALUE\") FROM TEST_BLOB"); while (rs.next()) { // ignore } rs.close(); - rs = stat.executeQuery("SELECT stream2stream(VALUE) FROM TEST_BLOB"); + rs = stat.executeQuery("SELECT stream2stream(\"VALUE\") FROM TEST_BLOB"); while (rs.next()) { // ignore } - stat.execute("CREATE ALIAS NULL_RESULT FOR \"" + - getClass().getName() + ".nullResultSet\""); - rs = stat.executeQuery("CALL NULL_RESULT()"); - assertEquals(1, rs.getMetaData().getColumnCount()); - rs.next(); - assertEquals(null, rs.getString(1)); - assertFalse(rs.next()); - - rs = meta.getProcedures(null, null, "NULL_RESULT"); - rs.next(); - assertEquals("FUNCTIONS", rs.getString("PROCEDURE_CAT")); - assertEquals("PUBLIC", rs.getString("PROCEDURE_SCHEM")); - assertEquals("NULL_RESULT", rs.getString("PROCEDURE_NAME")); - assertEquals(0, rs.getInt("NUM_INPUT_PARAMS")); - assertEquals(0, rs.getInt("NUM_OUTPUT_PARAMS")); - assertEquals(0, rs.getInt("NUM_RESULT_SETS")); - assertEquals("", rs.getString("REMARKS")); - assertEquals(DatabaseMetaData.procedureReturnsResult, - rs.getInt("PROCEDURE_TYPE")); - assertEquals("NULL_RESULT", rs.getString("SPECIFIC_NAME")); - - rs = meta.getProcedureColumns(null, null, "NULL_RESULT", null); - assertTrue(rs.next()); - assertEquals("P0", rs.getString("COLUMN_NAME")); - assertFalse(rs.next()); - - stat.execute("CREATE ALIAS RESULT_WITH_NULL FOR \"" + - getClass().getName() + ".resultSetWithNull\""); - rs = stat.executeQuery("CALL RESULT_WITH_NULL()"); - assertEquals(1, rs.getMetaData().getColumnCount()); - rs.next(); - assertEquals(null, rs.getString(1)); - assertFalse(rs.next()); + conn.close(); + } + private void testDateTimeFunctions() throws SQLException { + deleteDb("functions"); + Connection conn = getConnection("functions"); + Statement stat = conn.createStatement(); + ResultSet rs; + WeekFields wf = WeekFields.of(Locale.getDefault()); + for (int y = 2001; y <= 2010; y++) { + for (int d = 1; d <= 7; d++) { + String date1 = y + "-01-0" + d, date2 = y + "-01-0" + (d + 1); + LocalDate local1 = LocalDate.parse(date1), local2 = LocalDate.parse(date2); + rs = stat.executeQuery( + "SELECT EXTRACT(DAY_OF_WEEK FROM C1), EXTRACT(WEEK FROM C1), EXTRACT(WEEK_YEAR FROM C1)," + + " DATEDIFF(WEEK, C1, C2), DATE_TRUNC(WEEK, C1), DATE_TRUNC(WEEK_YEAR, C1) FROM" + + " VALUES (DATE '" + date1 + "', DATE '" + date2 + "')"); + rs.next(); + assertEquals(local1.get(wf.dayOfWeek()), rs.getInt(1)); + int w1 = local1.get(wf.weekOfWeekBasedYear()); + assertEquals(w1, rs.getInt(2)); + int weekYear = local1.get(wf.weekBasedYear()); + assertEquals(weekYear, rs.getInt(3)); + assertEquals(w1 == local2.get(wf.weekOfWeekBasedYear()) ? 0 : 1, rs.getInt(4)); + assertEquals(local1.minus(local1.get(wf.dayOfWeek()) - 1, ChronoUnit.DAYS), + rs.getObject(5, LocalDate.class)); + assertEquals(DateTimeFormatter.ofPattern("Y-w-e").parse(weekYear + "-1-1") + .query(TemporalQueries.localDate()), rs.getObject(6, LocalDate.class)); + } + } conn.close(); } @@ -1173,8 +1079,8 @@ private void testSchemaSearchPath() throws SQLException { stat.execute("SET SCHEMA TEST"); stat.execute("CREATE ALIAS PARSE_INT2 FOR " + "\"java.lang.Integer.parseInt(java.lang.String, int)\";"); - rs = stat.executeQuery("SELECT ALIAS_NAME FROM " + - "INFORMATION_SCHEMA.FUNCTION_ALIASES WHERE ALIAS_SCHEMA ='TEST'"); + rs = stat.executeQuery("SELECT ROUTINE_NAME FROM " + + "INFORMATION_SCHEMA.ROUTINES WHERE ROUTINE_SCHEMA ='TEST'"); rs.next(); assertEquals("PARSE_INT2", rs.getString(1)); stat.execute("DROP ALIAS PARSE_INT2"); @@ -1187,8 +1093,8 @@ private void testSchemaSearchPath() throws SQLException { rs = stat.executeQuery("CALL PARSE_INT2('-FF', 16)"); rs.next(); assertEquals(-255, rs.getInt(1)); - rs = stat.executeQuery("SELECT ALIAS_NAME FROM " + - "INFORMATION_SCHEMA.FUNCTION_ALIASES WHERE ALIAS_SCHEMA ='TEST'"); + rs = stat.executeQuery("SELECT ROUTINE_NAME FROM " + + "INFORMATION_SCHEMA.ROUTINES WHERE ROUTINE_SCHEMA ='TEST'"); rs.next(); assertEquals("PARSE_INT2", rs.getString(1)); rs = stat.executeQuery("CALL TEST.PARSE_INT2('-2147483648', 10)"); @@ -1200,164 +1106,96 @@ private void testSchemaSearchPath() throws SQLException { conn.close(); } + private void testArray() throws SQLException { + deleteDb("functions"); + Connection conn = getConnection("functions"); + PreparedStatement prep = conn.prepareStatement("SELECT ARRAY_MAX_CARDINALITY(?)"); + prep.setObject(1, new Integer[] { 1, 2, 3 }); + try (ResultSet rs = prep.executeQuery()) { + rs.next(); + assertEquals(3, rs.getInt(1)); + } + conn.close(); + } + private void testArrayParameters() throws SQLException { deleteDb("functions"); Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); - ResultSet rs; stat.execute("create alias array_test AS " + "$$ Integer[] array_test(Integer[] in_array) " + "{ return in_array; } $$;"); - PreparedStatement stmt = conn.prepareStatement( + PreparedStatement prep = conn.prepareStatement( "select array_test(?) from dual"); - stmt.setObject(1, new Integer[] { 1, 2 }); - rs = stmt.executeQuery(); - rs.next(); - assertEquals(Integer[].class.getName(), rs.getObject(1).getClass() - .getName()); + prep.setObject(1, new Integer[] { 1, 2 }); + try (ResultSet rs = prep.executeQuery()) { + rs.next(); + assertTrue(rs.getObject(1) instanceof Array); + } CallableStatement call = conn.prepareCall("{ ? = call array_test(?) }"); call.setObject(2, new Integer[] { 2, 1 }); call.registerOutParameter(1, Types.ARRAY); call.execute(); - assertEquals(Integer[].class.getName(), call.getArray(1).getArray() + assertEquals(Object[].class.getName(), call.getArray(1).getArray() .getClass().getName()); - assertEquals(new Integer[]{2, 1}, (Integer[]) call.getObject(1)); + assertEquals(new Object[]{2, 1}, (Object[]) ((Array) call.getObject(1)).getArray()); stat.execute("drop alias array_test"); - conn.close(); - } - - private void testTruncate() throws SQLException { - deleteDb("functions"); - Connection conn = getConnection("functions"); - Statement stat = conn.createStatement(); - - ResultSet rs = stat.executeQuery("SELECT TRUNCATE(1.234, 2) FROM dual"); - rs.next(); - assertEquals(1.23d, rs.getDouble(1)); - - rs = stat.executeQuery( - "SELECT CURRENT_TIMESTAMP(), " + - "TRUNCATE(CURRENT_TIMESTAMP()) FROM dual"); - rs.next(); - Calendar c = DateTimeUtils.createGregorianCalendar(); - c.setTime(rs.getTimestamp(1)); - c.set(Calendar.HOUR_OF_DAY, 0); - c.set(Calendar.MINUTE, 0); - c.set(Calendar.SECOND, 0); - c.set(Calendar.MILLISECOND, 0); - java.util.Date nowDate = c.getTime(); - assertEquals(nowDate, rs.getTimestamp(2)); - - assertThrows(SQLException.class, stat).executeQuery("SELECT TRUNCATE('bad', 1) FROM dual"); - - // check for passing wrong data type - rs = assertThrows(SQLException.class, stat).executeQuery("SELECT TRUNCATE('bad') FROM dual"); - - // check for too many parameters - rs = assertThrows(SQLException.class, stat).executeQuery("SELECT TRUNCATE(1,2,3) FROM dual"); - - conn.close(); - } - - private void testDateTrunc() throws SQLException { - deleteDb("functions"); - Connection conn = getConnection("functions"); - Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE TEST(S VARCHAR, TS TIMESTAMP, D DATE, T TIME, TZ TIMESTAMP WITH TIME ZONE)"); - stat.execute("INSERT INTO TEST VALUES ('2010-01-01 10:11:12', '2010-01-01 10:11:12'," - + " '2010-01-01', '10:11:12', '2010-01-01 10:11:12Z')"); - ResultSetMetaData md = stat.executeQuery("SELECT DATE_TRUNC('HOUR', S), DATE_TRUNC('HOUR', TS)," - + " DATE_TRUNC('HOUR', D), DATE_TRUNC('HOUR', T), DATE_TRUNC('HOUR', TZ) FROM TEST") - .getMetaData(); - assertEquals(Types.TIMESTAMP, md.getColumnType(1)); - assertEquals(Types.TIMESTAMP, md.getColumnType(2)); - assertEquals(Types.TIMESTAMP, md.getColumnType(3)); - assertEquals(Types.TIMESTAMP, md.getColumnType(4)); - assertEquals(/* TODO use Types.TIMESTAMP_WITH_TIMEZONE on Java 8 */ 2014, md.getColumnType(5)); - conn.close(); - } - - private void testExtract() throws SQLException { - deleteDb("functions"); - Connection conn = getConnection("functions"); - Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE TEST(TS TIMESTAMP)"); - stat.execute("INSERT INTO TEST VALUES ('2010-01-01 10:11:12')"); - assertEquals(Types.INTEGER, stat.executeQuery("SELECT EXTRACT(DAY FROM TS) FROM TEST") - .getMetaData().getColumnType(1)); - assertEquals(Types.DECIMAL, stat.executeQuery("SELECT EXTRACT(EPOCH FROM TS) FROM TEST") - .getMetaData().getColumnType(1)); - conn.close(); - } - - private void testTranslate() throws SQLException { - Connection conn = getConnection("functions"); - Statement stat = conn.createStatement(); - - String createSQL = "CREATE TABLE testTranslate(id BIGINT, " + - "txt1 varchar);"; - stat.execute(createSQL); - stat.execute("insert into testTranslate(id, txt1) " + - "values(1, 'test1')"); - stat.execute("insert into testTranslate(id, txt1) " + - "values(2, null)"); - stat.execute("insert into testTranslate(id, txt1) " + - "values(3, '')"); - stat.execute("insert into testTranslate(id, txt1) " + - "values(4, 'caps')"); - - String query = "SELECT translate(txt1, 'p', 'r') " + - "FROM testTranslate order by id asc"; - ResultSet rs = stat.executeQuery(query); - rs.next(); - String actual = rs.getString(1); - assertEquals("test1", actual); - rs.next(); - actual = rs.getString(1); - assertNull(actual); - rs.next(); - actual = rs.getString(1); - assertEquals("", actual); - rs.next(); - actual = rs.getString(1); - assertEquals("cars", actual); - rs.close(); - - rs = stat.executeQuery("select translate(null,null,null)"); - rs.next(); - assertNull(rs.getObject(1)); + stat.execute("CREATE ALIAS F DETERMINISTIC FOR '" + TestFunctions.class.getName() + ".arrayParameters1'"); + prep = conn.prepareStatement("SELECT F(ARRAY[ARRAY['1', '2'], ARRAY['3']])"); + try (ResultSet rs = prep.executeQuery()) { + rs.next(); + assertEquals(new Integer[][] {{1, 2}, {3}}, rs.getObject(1, Integer[][].class)); + } + prep = conn.prepareStatement("SELECT F(ARRAY[ARRAY[1::BIGINT, 2::BIGINT], ARRAY[3::BIGINT]])"); + try (ResultSet rs = prep.executeQuery()) { + rs.next(); + assertEquals(new Short[][] {{1, 2}, {3}}, rs.getObject(1, Short[][].class)); + } + stat.execute("DROP ALIAS F"); - stat.execute("drop table testTranslate"); conn.close(); } - private void testToDateException(Session session) { - try { - ToDateParser.toDate(session, "1979-ThisWillFail-12", "YYYY-MM-DD"); - } catch (Exception e) { - assertEquals(DbException.class.getSimpleName(), e.getClass().getSimpleName()); + /** + * This method is called with reflection. + * + * @param x argument + * @return result + */ + public static Integer[][] arrayParameters1(String[][] x) { + int l = x.length; + Integer[][] result = new Integer[l][]; + for (int i = 0; i < l; i++) { + String[] x1 = x[i]; + int l1 = x1.length; + Integer[] r1 = new Integer[l1]; + for (int j = 0; j < l1; j++) { + r1[j] = Integer.parseInt(x1[j]); + } + result[i] = r1; } + return result; + } - try { - ToDateParser.toDate(session, "1-DEC-0000", "DD-MON-RRRR"); - fail("Oracle to_date should reject year 0 (ORA-01841)"); - } catch (Exception e) { - // expected - } + private void testToDateException(SessionLocal session) { + assertThrows(ErrorCode.INVALID_TO_DATE_FORMAT, + () -> ToDateParser.toDate(session, "1979-ThisWillFail-12", "YYYY-MM-DD")); + assertThrows(ErrorCode.INVALID_TO_DATE_FORMAT, // + () -> ToDateParser.toDate(session, "1-DEC-0000", "DD-MON-RRRR")); } - private void testToDate(Session session) throws ParseException { - GregorianCalendar calendar = DateTimeUtils.createGregorianCalendar(); + private void testToDate(SessionLocal session) { + GregorianCalendar calendar = new GregorianCalendar(); int year = calendar.get(Calendar.YEAR); int month = calendar.get(Calendar.MONTH) + 1; // Default date in Oracle is the first day of the current month String defDate = year + "-" + month + "-1 "; ValueTimestamp date = null; - date = ValueTimestamp.parse("1979-11-12"); + date = ValueTimestamp.parse("1979-11-12", null); assertEquals(date, ToDateParser.toDate(session, "1979-11-12T00:00:00Z", "YYYY-MM-DD\"T\"HH24:MI:SS\"Z\"")); assertEquals(date, ToDateParser.toDate(session, "1979*foo*1112", "YYYY\"*foo*\"MM\"\"DD")); assertEquals(date, ToDateParser.toDate(session, "1979-11-12", "YYYY-MM-DD")); @@ -1367,7 +1205,7 @@ private void testToDate(Session session) throws ParseException { assertEquals(date, ToDateParser.toDate(session, "1979;11;12", "YYYY;MM;DD")); assertEquals(date, ToDateParser.toDate(session, "1979:11:12", "YYYY:MM:DD")); - date = ValueTimestamp.parse("1979-" + month + "-01"); + date = ValueTimestamp.parse("1979-" + month + "-01", null); assertEquals(date, ToDateParser.toDate(session, "1979", "YYYY")); assertEquals(date, ToDateParser.toDate(session, "1979 AD", "YYYY AD")); assertEquals(date, ToDateParser.toDate(session, "1979 A.D.", "YYYY A.D.")); @@ -1375,10 +1213,10 @@ private void testToDate(Session session) throws ParseException { assertEquals(date, ToDateParser.toDate(session, "+1979", "SYYYY")); assertEquals(date, ToDateParser.toDate(session, "79", "RRRR")); - date = ValueTimestamp.parse(defDate + "00:12:00"); + date = ValueTimestamp.parse(defDate + "00:12:00", null); assertEquals(date, ToDateParser.toDate(session, "12", "MI")); - date = ValueTimestamp.parse("1970-11-01"); + date = ValueTimestamp.parse("1970-11-01", null); assertEquals(date, ToDateParser.toDate(session, "11", "MM")); assertEquals(date, ToDateParser.toDate(session, "11", "Mm")); assertEquals(date, ToDateParser.toDate(session, "11", "mM")); @@ -1386,18 +1224,18 @@ private void testToDate(Session session) throws ParseException { assertEquals(date, ToDateParser.toDate(session, "XI", "RM")); int y = (year / 10) * 10 + 9; - date = ValueTimestamp.parse(y + "-" + month + "-01"); + date = ValueTimestamp.parse(y + "-" + month + "-01", null); assertEquals(date, ToDateParser.toDate(session, "9", "Y")); y = (year / 100) * 100 + 79; - date = ValueTimestamp.parse(y + "-" + month + "-01"); + date = ValueTimestamp.parse(y + "-" + month + "-01", null); assertEquals(date, ToDateParser.toDate(session, "79", "YY")); y = (year / 1_000) * 1_000 + 979; - date = ValueTimestamp.parse(y + "-" + month + "-01"); + date = ValueTimestamp.parse(y + "-" + month + "-01", null); assertEquals(date, ToDateParser.toDate(session, "979", "YYY")); // Gregorian calendar does not have a year 0. // 0 = 0001 BC, -1 = 0002 BC, ... so we adjust - date = ValueTimestamp.parse("-99-" + month + "-01"); + date = ValueTimestamp.parse("-99-" + month + "-01", null); assertEquals(date, ToDateParser.toDate(session, "0100 BC", "YYYY BC")); assertEquals(date, ToDateParser.toDate(session, "0100 B.C.", "YYYY B.C.")); assertEquals(date, ToDateParser.toDate(session, "-0100", "SYYYY")); @@ -1406,90 +1244,98 @@ private void testToDate(Session session) throws ParseException { // Gregorian calendar does not have a year 0. // 0 = 0001 BC, -1 = 0002 BC, ... so we adjust y = -((year / 1_000) * 1_000 + 99); - date = ValueTimestamp.parse(y + "-" + month + "-01"); + date = ValueTimestamp.parse(y + "-" + month + "-01", null); assertEquals(date, ToDateParser.toDate(session, "100 BC", "YYY BC")); // Gregorian calendar does not have a year 0. // 0 = 0001 BC, -1 = 0002 BC, ... so we adjust y = -((year / 100) * 100); - date = ValueTimestamp.parse(y + "-" + month + "-01"); + date = ValueTimestamp.parse(y + "-" + month + "-01", null); assertEquals(date, ToDateParser.toDate(session, "01 BC", "YY BC")); y = -((year / 10) * 10); - date = ValueTimestamp.parse(y + "-" + month + "-01"); + date = ValueTimestamp.parse(y + "-" + month + "-01", null); assertEquals(date, ToDateParser.toDate(session, "1 BC", "Y BC")); - date = ValueTimestamp.parse(defDate + "08:12:00"); + date = ValueTimestamp.parse(defDate + "08:12:00", null); assertEquals(date, ToDateParser.toDate(session, "08:12 AM", "HH:MI AM")); assertEquals(date, ToDateParser.toDate(session, "08:12 A.M.", "HH:MI A.M.")); assertEquals(date, ToDateParser.toDate(session, "08:12", "HH24:MI")); - date = ValueTimestamp.parse(defDate + "08:12:00"); + date = ValueTimestamp.parse(defDate + "08:12:00", null); assertEquals(date, ToDateParser.toDate(session, "08:12", "HH:MI")); assertEquals(date, ToDateParser.toDate(session, "08:12", "HH12:MI")); - date = ValueTimestamp.parse(defDate + "08:12:34"); + date = ValueTimestamp.parse(defDate + "08:12:34", null); assertEquals(date, ToDateParser.toDate(session, "08:12:34", "HH:MI:SS")); - date = ValueTimestamp.parse(defDate + "12:00:00"); + date = ValueTimestamp.parse(defDate + "12:00:00", null); assertEquals(date, ToDateParser.toDate(session, "12:00:00 PM", "HH12:MI:SS AM")); - date = ValueTimestamp.parse(defDate + "00:00:00"); + date = ValueTimestamp.parse(defDate + "00:00:00", null); assertEquals(date, ToDateParser.toDate(session, "12:00:00 AM", "HH12:MI:SS AM")); - date = ValueTimestamp.parse(defDate + "00:00:34"); + date = ValueTimestamp.parse(defDate + "00:00:34", null); assertEquals(date, ToDateParser.toDate(session, "34", "SS")); - date = ValueTimestamp.parse(defDate + "08:12:34"); + date = ValueTimestamp.parse(defDate + "08:12:34", null); assertEquals(date, ToDateParser.toDate(session, "29554", "SSSSS")); - date = ValueTimestamp.parse(defDate + "08:12:34.550"); + date = ValueTimestamp.parse(defDate + "08:12:34.550", null); assertEquals(date, ToDateParser.toDate(session, "08:12:34 550", "HH:MI:SS FF")); assertEquals(date, ToDateParser.toDate(session, "08:12:34 55", "HH:MI:SS FF2")); - date = ValueTimestamp.parse(defDate + "14:04:00"); + date = ValueTimestamp.parse(defDate + "14:04:00", null); assertEquals(date, ToDateParser.toDate(session, "02:04 P.M.", "HH:MI p.M.")); assertEquals(date, ToDateParser.toDate(session, "02:04 PM", "HH:MI PM")); - date = ValueTimestamp.parse("1970-" + month + "-12"); + date = ValueTimestamp.parse("1970-" + month + "-12", null); assertEquals(date, ToDateParser.toDate(session, "12", "DD")); - date = ValueTimestamp.parse(year + (calendar.isLeapYear(year) ? "11-11" : "-11-12")); + date = ValueTimestamp.parse(year + (calendar.isLeapYear(year) ? "-11-11" : "-11-12"), null); assertEquals(date, ToDateParser.toDate(session, "316", "DDD")); assertEquals(date, ToDateParser.toDate(session, "316", "DdD")); assertEquals(date, ToDateParser.toDate(session, "316", "dDD")); assertEquals(date, ToDateParser.toDate(session, "316", "ddd")); - date = ValueTimestamp.parse("2013-01-29"); + date = ValueTimestamp.parse("2013-01-29", null); assertEquals(date, ToDateParser.toDate(session, "2456322", "J")); if (Locale.getDefault().getLanguage().equals("en")) { - date = ValueTimestamp.parse("9999-12-31 23:59:59"); + date = ValueTimestamp.parse("9999-12-31 23:59:59", null); assertEquals(date, ToDateParser.toDate(session, "31-DEC-9999 23:59:59", "DD-MON-YYYY HH24:MI:SS")); assertEquals(date, ToDateParser.toDate(session, "31-DEC-9999 23:59:59", "DD-MON-RRRR HH24:MI:SS")); - assertEquals(ValueTimestamp.parse("0001-03-01"), + assertEquals(ValueTimestamp.parse("0001-03-01", null), ToDateParser.toDate(session, "1-MAR-0001", "DD-MON-RRRR")); - assertEquals(ValueTimestamp.parse("9999-03-01"), + assertEquals(ValueTimestamp.parse("9999-03-01", null), ToDateParser.toDate(session, "1-MAR-9999", "DD-MON-RRRR")); - assertEquals(ValueTimestamp.parse("2000-03-01"), ToDateParser.toDate(session, "1-MAR-000", "DD-MON-RRRR")); - assertEquals(ValueTimestamp.parse("1999-03-01"), ToDateParser.toDate(session, "1-MAR-099", "DD-MON-RRRR")); - assertEquals(ValueTimestamp.parse("0100-03-01"), ToDateParser.toDate(session, "1-MAR-100", "DD-MON-RRRR")); - assertEquals(ValueTimestamp.parse("2000-03-01"), ToDateParser.toDate(session, "1-MAR-00", "DD-MON-RRRR")); - assertEquals(ValueTimestamp.parse("2049-03-01"), ToDateParser.toDate(session, "1-MAR-49", "DD-MON-RRRR")); - assertEquals(ValueTimestamp.parse("1950-03-01"), ToDateParser.toDate(session, "1-MAR-50", "DD-MON-RRRR")); - assertEquals(ValueTimestamp.parse("1999-03-01"), ToDateParser.toDate(session, "1-MAR-99", "DD-MON-RRRR")); + assertEquals(ValueTimestamp.parse("2000-03-01", null), + ToDateParser.toDate(session, "1-MAR-000", "DD-MON-RRRR")); + assertEquals(ValueTimestamp.parse("1999-03-01", null), + ToDateParser.toDate(session, "1-MAR-099", "DD-MON-RRRR")); + assertEquals(ValueTimestamp.parse("0100-03-01", null), + ToDateParser.toDate(session, "1-MAR-100", "DD-MON-RRRR")); + assertEquals(ValueTimestamp.parse("2000-03-01", null), + ToDateParser.toDate(session, "1-MAR-00", "DD-MON-RRRR")); + assertEquals(ValueTimestamp.parse("2049-03-01", null), + ToDateParser.toDate(session, "1-MAR-49", "DD-MON-RRRR")); + assertEquals(ValueTimestamp.parse("1950-03-01", null), + ToDateParser.toDate(session, "1-MAR-50", "DD-MON-RRRR")); + assertEquals(ValueTimestamp.parse("1999-03-01", null), + ToDateParser.toDate(session, "1-MAR-99", "DD-MON-RRRR")); } - assertEquals(ValueTimestampTimeZone.parse("2000-05-10 10:11:12-08:15"), + assertEquals(ValueTimestampTimeZone.parse("2000-05-10 10:11:12-08:15", null), ToDateParser.toTimestampTz(session, "2000-05-10 10:11:12 -8:15", "YYYY-MM-DD HH24:MI:SS TZH:TZM")); - assertEquals(ValueTimestampTimeZone.parse("2000-05-10 10:11:12-08:15"), + assertEquals(ValueTimestampTimeZone.parse("2000-05-10 10:11:12-08:15", null), ToDateParser.toTimestampTz(session, "2000-05-10 10:11:12 GMT-08:15", "YYYY-MM-DD HH24:MI:SS TZR")); - assertEquals(ValueTimestampTimeZone.parse("2000-02-10 10:11:12-08"), + assertEquals(ValueTimestampTimeZone.parse("2000-02-10 10:11:12-08", null), ToDateParser.toTimestampTz(session, "2000-02-10 10:11:12 US/Pacific", "YYYY-MM-DD HH24:MI:SS TZR")); - assertEquals(ValueTimestampTimeZone.parse("2000-02-10 10:11:12-08"), + assertEquals(ValueTimestampTimeZone.parse("2000-02-10 10:11:12-08", null), ToDateParser.toTimestampTz(session, "2000-02-10 10:11:12 PST", "YYYY-MM-DD HH24:MI:SS TZD")); } private void testToCharFromDateTime() throws SQLException { + ToCharFunction.clearNames(); deleteDb("functions"); Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); @@ -1499,6 +1345,9 @@ private void testToCharFromDateTime() throws SQLException { boolean daylight = tz.inDaylightTime(timestamp1979); String tzShortName = tz.getDisplayName(daylight, TimeZone.SHORT); String tzLongName = tz.getID(); + if (tzLongName.equals("Etc/UTC")) { + tzLongName = "UTC"; + } stat.executeUpdate("CREATE TABLE T (X TIMESTAMP(6))"); stat.executeUpdate("INSERT INTO T VALUES " + @@ -1508,7 +1357,7 @@ private void testToCharFromDateTime() throws SQLException { "(TIMESTAMP '-100-01-15 14:04:02.120')"); assertResult("1979-11-12 08:12:34.56", stat, "SELECT X FROM T"); - assertResult("-100-01-15 14:04:02.12", stat, "SELECT X FROM U"); + assertResult("-0100-01-15 14:04:02.12", stat, "SELECT X FROM U"); String expected = String.format("%tb", timestamp1979).toUpperCase(); expected = stripTrailingPeriod(expected); assertResult("12-" + expected + "-79 08.12.34.560000000 AM", stat, @@ -1553,8 +1402,9 @@ private void testToCharFromDateTime() throws SQLException { assertResult("014", stat, "SELECT TO_CHAR(DATE '2013-12-30', 'IYY') FROM DUAL"); assertResult("14", stat, "SELECT TO_CHAR(DATE '2013-12-30', 'IY') FROM DUAL"); assertResult("4", stat, "SELECT TO_CHAR(DATE '2013-12-30', 'I') FROM DUAL"); - assertResult("0001", stat, "SELECT TO_CHAR(DATE '-0001-01-01', 'IYYY') FROM DUAL"); - assertResult("0005", stat, "SELECT TO_CHAR(DATE '-0004-01-01', 'IYYY') FROM DUAL"); + assertResult("0002", stat, "SELECT TO_CHAR(DATE '-0001-01-01', 'IYYY') FROM DUAL"); + assertResult("0001", stat, "SELECT TO_CHAR(DATE '-0001-01-04', 'IYYY') FROM DUAL"); + assertResult("0004", stat, "SELECT TO_CHAR(DATE '-0004-01-01', 'IYYY') FROM DUAL"); assertResult("08:12 AM", stat, "SELECT TO_CHAR(X, 'HH:MI AM') FROM T"); assertResult("08:12 A.M.", stat, "SELECT TO_CHAR(X, 'HH:MI A.M.') FROM T"); assertResult("02:04 P.M.", stat, "SELECT TO_CHAR(X, 'HH:MI A.M.') FROM U"); @@ -1681,6 +1531,16 @@ private void testToCharFromDateTime() throws SQLException { "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00+10:30', 'TZR')"); assertResult("GMT+10:30", stat, "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00+10:30', 'TZD')"); + + assertResult("-10", stat, "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00-10:00', 'TZH')"); + assertResult("+10", stat, "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00+10:00', 'TZH')"); + assertResult("+00", stat, "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00+00:00', 'TZH')"); + assertResult("50", stat, "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00+00:50', 'TZM')"); + assertResult("00", stat, "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00+00:00', 'TZM')"); + assertResult("-10:50", stat, "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00-10:50', 'TZH:TZM')"); + assertResult("+10:50", stat, "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00+10:50', 'TZH:TZM')"); + assertResult("+00:00", stat, "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00+00:00', 'TZH:TZM')"); + expected = String.format("%f", 1.1).substring(1, 2); assertResult(expected, stat, "SELECT TO_CHAR(X, 'X') FROM T"); expected = String.format("%,d", 1979); @@ -1695,6 +1555,17 @@ private void testToCharFromDateTime() throws SQLException { assertThrows(ErrorCode.INVALID_TO_CHAR_FORMAT, stat, "SELECT TO_CHAR(X, 'A') FROM T"); + assertResult("01-1 2000-01 1999-52", stat, "SELECT TO_CHAR(DATE '2000-01-01', 'MM-W YYYY-WW IYYY-IW')"); + assertResult("01-1 2000-01 1999-52", stat, "SELECT TO_CHAR(DATE '2000-01-02', 'MM-W YYYY-WW IYYY-IW')"); + assertResult("01-1 2000-01 2000-01", stat, "SELECT TO_CHAR(DATE '2000-01-03', 'MM-W YYYY-WW IYYY-IW')"); + assertResult("01-1 2000-01 2000-01", stat, "SELECT TO_CHAR(DATE '2000-01-04', 'MM-W YYYY-WW IYYY-IW')"); + assertResult("01-1 2000-01 2000-01", stat, "SELECT TO_CHAR(DATE '2000-01-05', 'MM-W YYYY-WW IYYY-IW')"); + assertResult("01-1 2000-01 2000-01", stat, "SELECT TO_CHAR(DATE '2000-01-06', 'MM-W YYYY-WW IYYY-IW')"); + assertResult("01-1 2000-01 2000-01", stat, "SELECT TO_CHAR(DATE '2000-01-07', 'MM-W YYYY-WW IYYY-IW')"); + assertResult("01-2 2000-02 2000-01", stat, "SELECT TO_CHAR(DATE '2000-01-08', 'MM-W YYYY-WW IYYY-IW')"); + assertResult("02-1 2000-05 2000-05", stat, "SELECT TO_CHAR(DATE '2000-02-01', 'MM-W YYYY-WW IYYY-IW')"); + assertResult("12-5 2000-53 2000-52", stat, "SELECT TO_CHAR(DATE '2000-12-31', 'MM-W YYYY-WW IYYY-IW')"); + // check a bug we had when the month or day of the month is 1 digit stat.executeUpdate("TRUNCATE TABLE T"); stat.executeUpdate("INSERT INTO T VALUES (TIMESTAMP '1985-01-01 08:12:34.560')"); @@ -1711,30 +1582,14 @@ private static String stripTrailingPeriod(String expected) { return expected; } - private void testIfNull() throws SQLException { - deleteDb("functions"); - Connection conn = getConnection("functions"); - Statement stat = conn.createStatement( - ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); - stat.execute("CREATE TABLE T(f1 double)"); - stat.executeUpdate("INSERT INTO T VALUES( 1.2 )"); - stat.executeUpdate("INSERT INTO T VALUES( null )"); - ResultSet rs = stat.executeQuery("SELECT IFNULL(f1, 0.0) FROM T"); - ResultSetMetaData metaData = rs.getMetaData(); - assertEquals("java.lang.Double", metaData.getColumnClassName(1)); - rs.next(); - assertEquals("java.lang.Double", rs.getObject(1).getClass().getName()); - rs.next(); - assertEquals("java.lang.Double", rs.getObject(1).getClass().getName()); - conn.close(); - } - private void testToCharFromNumber() throws SQLException { deleteDb("functions"); Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); + Locale.setDefault(new Locale("en")); - Currency currency = Currency.getInstance(Locale.getDefault()); + Locale locale = Locale.getDefault(); + Currency currency = Currency.getInstance(locale.getCountry().length() == 2 ? locale : Locale.US); String cc = currency.getCurrencyCode(); String cs = currency.getSymbol(); @@ -1771,7 +1626,7 @@ private void testToCharFromNumber() throws SQLException { assertResult("######", stat, "SELECT TO_CHAR(12345, '$9999') FROM DUAL"); String expected = String.format("%,d", 12345); - if (Locale.getDefault() == Locale.ENGLISH) { + if (locale == Locale.ENGLISH) { assertResult(String.format("%5s12345", cs), stat, "SELECT TO_CHAR(12345, '$99999999') FROM DUAL"); assertResult(String.format("%6s12,345.35", cs), stat, @@ -1986,6 +1841,9 @@ private void testToCharFromNumber() throws SQLException { assertResult(twoDecimals, stat, "select to_char(0, 'FM0D009') from dual;"); assertResult(oneDecimal, stat, "select to_char(0, 'FM0D09') from dual;"); assertResult(oneDecimal, stat, "select to_char(0, 'FM0D0') from dual;"); + + assertResult("10,000,000.", stat, + "SELECT TO_CHAR(CAST(10000000 AS DOUBLE PRECISION), 'FM999,999,999.99') FROM DUAL"); conn.close(); } @@ -1997,56 +1855,6 @@ private void testToCharFromText() throws SQLException { conn.close(); } - private void testGenerateSeries() throws SQLException { - Connection conn = getConnection("functions"); - Statement stat = conn.createStatement(); - - ResultSet rs = stat.executeQuery("select * from system_range(1,3)"); - rs.next(); - assertEquals(1, rs.getInt(1)); - rs.next(); - assertEquals(2, rs.getInt(1)); - rs.next(); - assertEquals(3, rs.getInt(1)); - - rs = stat.executeQuery("select * from system_range(2,2)"); - assertTrue(rs.next()); - assertEquals(2, rs.getInt(1)); - - rs = stat.executeQuery("select * from system_range(2,1)"); - assertFalse(rs.next()); - - rs = stat.executeQuery("select * from system_range(1,2,-1)"); - assertFalse(rs.next()); - - assertThrows(ErrorCode.STEP_SIZE_MUST_NOT_BE_ZERO, stat).executeQuery( - "select * from system_range(1,2,0)"); - - rs = stat.executeQuery("select * from system_range(2,1,-1)"); - assertTrue(rs.next()); - assertEquals(2, rs.getInt(1)); - assertTrue(rs.next()); - assertEquals(1, rs.getInt(1)); - - rs = stat.executeQuery("select * from system_range(1,5,2)"); - assertTrue(rs.next()); - assertEquals(1, rs.getInt(1)); - assertTrue(rs.next()); - assertEquals(3, rs.getInt(1)); - assertTrue(rs.next()); - assertEquals(5, rs.getInt(1)); - - rs = stat.executeQuery("select * from system_range(1,6,2)"); - assertTrue(rs.next()); - assertEquals(1, rs.getInt(1)); - assertTrue(rs.next()); - assertEquals(3, rs.getInt(1)); - assertTrue(rs.next()); - assertEquals(5, rs.getInt(1)); - - conn.close(); - } - private void testAnnotationProcessorsOutput() { try { System.setProperty(TestAnnotationProcessor.MESSAGES_KEY, "WARNING,foo1|ERROR,foo2"); @@ -2061,41 +1869,18 @@ private void testAnnotationProcessorsOutput() { } } - private void testRound() throws SQLException { - deleteDb("functions"); - - Connection conn = getConnection("functions"); - Statement stat = conn.createStatement(); - - final ResultSet rs = stat.executeQuery( - "select ROUND(-1.2), ROUND(-1.5), ROUND(-1.6), " + - "ROUND(2), ROUND(1.5), ROUND(1.8), ROUND(1.1) from dual"); - - rs.next(); - assertEquals(-1, rs.getInt(1)); - assertEquals(-2, rs.getInt(2)); - assertEquals(-2, rs.getInt(3)); - assertEquals(2, rs.getInt(4)); - assertEquals(2, rs.getInt(5)); - assertEquals(2, rs.getInt(6)); - assertEquals(1, rs.getInt(7)); - - rs.close(); - conn.close(); - } - private void testSignal() throws SQLException { deleteDb("functions"); Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); - assertThrows(ErrorCode.INVALID_VALUE_2, stat).execute("select signal('00145', 'success class is invalid')"); - assertThrows(ErrorCode.INVALID_VALUE_2, stat).execute("select signal('foo', 'SQLSTATE has 5 chars')"); + assertThrows(ErrorCode.INVALID_VALUE_2, stat).execute("call signal('00145', 'success class is invalid')"); + assertThrows(ErrorCode.INVALID_VALUE_2, stat).execute("call signal('foo', 'SQLSTATE has 5 chars')"); assertThrows(ErrorCode.INVALID_VALUE_2, stat) - .execute("select signal('Ab123', 'SQLSTATE has only digits or upper-case letters')"); + .execute("call signal('Ab123', 'SQLSTATE has only digits or upper-case letters')"); try { - stat.execute("select signal('AB123', 'some custom error')"); + stat.execute("call signal('AB123', 'some custom error')"); fail("Should have thrown"); } catch (SQLException e) { assertEquals("AB123", e.getSQLState()); @@ -2160,6 +1945,9 @@ private void testThatCurrentTimestampStaysTheSameWithinATransaction() private void testThatCurrentTimestampUpdatesOutsideATransaction() throws SQLException, InterruptedException { + if (config.lazy && config.networked) { + return; + } deleteDb("functions"); Connection conn = getConnection("functions"); conn.setAutoCommit(true); @@ -2189,13 +1977,12 @@ private void testOverrideAlias() throws SQLException { conn.setAutoCommit(true); Statement stat = conn.createStatement(); - assertThrows(ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, stat).execute("create alias CURRENT_TIMESTAMP for \"" + - getClass().getName() + ".currentTimestamp\""); + assertThrows(ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, stat).execute("create alias CURRENT_TIMESTAMP for '" + + getClass().getName() + ".currentTimestamp'"); stat.execute("set BUILTIN_ALIAS_OVERRIDE true"); - stat.execute("create alias CURRENT_TIMESTAMP for \"" + - getClass().getName() + ".currentTimestampOverride\""); + stat.execute("create alias CURRENT_TIMESTAMP for '" + getClass().getName() + ".currentTimestampOverride'"); assertCallResult("3141", stat, "CURRENT_TIMESTAMP"); @@ -2329,8 +2116,8 @@ public static ResultSet selectMaxId(Connection conn) throws SQLException { * * @return the test array */ - public static Object[] getArray() { - return new Object[] { 0, "Hello" }; + public static String[] getArray() { + return new String[] { "0", "Hello" }; } /** @@ -2345,16 +2132,6 @@ public static ResultSet resultSetWithNull(Connection conn) throws SQLException { return statement.executeQuery(); } - /** - * This method is called via reflection from the database. - * - * @param conn the connection - * @return the result set - */ - public static ResultSet nullResultSet(@SuppressWarnings("unused") Connection conn) { - return null; - } - /** * Test method to create a simple result set. * @@ -2512,12 +2289,12 @@ public static UUID xorUUID(UUID a, UUID b) { * @param args the argument list * @return an array of one element */ - public static Object[] dynamic(Object[] args) { + public static String[] dynamic(String[] args) { StringBuilder buff = new StringBuilder(); for (Object a : args) { buff.append(a); } - return new Object[] { buff.toString() }; + return new String[] { buff.toString() }; } /** @@ -2547,9 +2324,4 @@ public int getType(int[] inputTypes) { return Types.DECIMAL; } - @Override - public void init(Connection conn) { - // ignore - } - } diff --git a/h2/src/test/org/h2/test/db/TestGeneralCommonTableQueries.java b/h2/src/test/org/h2/test/db/TestGeneralCommonTableQueries.java index 3ee40ec6a5..654da27f6e 100644 --- a/h2/src/test/org/h2/test/db/TestGeneralCommonTableQueries.java +++ b/h2/src/test/org/h2/test/db/TestGeneralCommonTableQueries.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -24,7 +24,7 @@ public class TestGeneralCommonTableQueries extends AbstractBaseForCommonTableExp * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -237,7 +237,7 @@ private void testNumberedParameterizedQuery() throws Exception { "- but should not have been."); } catch (SQLException e) { // ensure the T1 table has been removed even without auto commit - assertContains(e.getMessage(), "Table \"T1\" not found;"); + assertContains(e.getMessage(), "Table \"T1\" not found (this database is empty);"); } conn.close(); @@ -253,7 +253,7 @@ private void testInsert() throws Exception { int rowCount; stat = conn.createStatement(); - stat.execute("CREATE TABLE T1 ( ID INT IDENTITY, X INT NULL, Y VARCHAR(100) NULL )"); + stat.execute("CREATE TABLE T1 ( ID INT GENERATED BY DEFAULT AS IDENTITY, X INT NULL, Y VARCHAR(100) NULL )"); prep = conn.prepareStatement("WITH v1 AS (" + " SELECT R.X, 'X1' AS Y FROM SYSTEM_RANGE(?1,?2) R" @@ -517,6 +517,9 @@ private void testSimple2By4RowRecursiveQuery() throws Exception { } private void testSimple3RowRecursiveQueryWithLazyEval() throws Exception { + if (config.lazy && config.networked) { + return; + } String[] expectedRowData = new String[]{"|6"}; String[] expectedColumnTypes = new String[]{"BIGINT"}; @@ -530,11 +533,9 @@ private void testSimple3RowRecursiveQueryWithLazyEval() throws Exception { // Test with settings: lazy mvStore memory multiThreaded // connection url is // mem:script;MV_STORE=true;LOG=1;LOCK_TIMEOUT=50; - // MULTI_THREADED=TRUE;LAZY_QUERY_EXECUTION=1 + // LAZY_QUERY_EXECUTION=1 config.lazy = true; - config.mvStore = true; config.memory = true; - config.multiThreaded = true; String setupSQL = "--no config set"; String withQuery = "select sum(n) from (\n" diff --git a/h2/src/test/org/h2/test/db/TestIgnoreCatalogs.java b/h2/src/test/org/h2/test/db/TestIgnoreCatalogs.java new file mode 100644 index 0000000000..7e0712016a --- /dev/null +++ b/h2/src/test/org/h2/test/db/TestIgnoreCatalogs.java @@ -0,0 +1,240 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.db; + +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; + +import org.h2.api.ErrorCode; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +/** + * @author aschoerk + */ +public class TestIgnoreCatalogs extends TestDb { + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + canCommentOn(); + canUseDefaultSchema(); + canYetIdentifyWrongCatalogName(); + canUseSettingInUrl(); + canUseSetterSyntax(); + canCatalogNameEqualSchemaName(); + canUseCatalogAtIndexName(); + canCommentOn(); + canAllCombined(); + doesNotAcceptEmptySchemaWhenNotMSSQL(); + } + + private void doesNotAcceptEmptySchemaWhenNotMSSQL() throws SQLException { + try (Connection conn = getConnection("ignoreCatalogs;IGNORE_CATALOGS=TRUE")) { + try (Statement stat = conn.createStatement()) { + prepareDbAndSetDefaultSchema(stat); + stat.execute("set schema dbo"); + stat.execute("create table catalog1.dbo.test(id int primary key, name varchar(255))"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on table catalog1..test is 'table comment3'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "create table catalog1..test2(id int primary key, " + + "name varchar(255))"); + stat.execute("comment on table catalog1.dbo.test is 'table comment1'"); + stat.execute("insert into test values(1, 'Hello')"); + stat.execute("insert into cat.dbo.test values(2, 'Hello2')"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column catalog1...test.id is 'id comment1'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column catalog1..test..id is 'id comment1'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column ..test..id is 'id comment1'"); + } + } finally { + deleteDb("ignoreCatalogs"); + } + } + + private void canCommentOn() throws Exception { + try (Connection conn = getConnection("ignoreCatalogs;MODE=MSSQLSERVER;IGNORE_CATALOGS=TRUE;")) { + try (Statement stat = conn.createStatement()) { + prepareDbAndSetDefaultSchema(stat); + stat.execute("create table catalog1.dbo.test(id int primary key, name varchar(255))"); + stat.execute("comment on table catalog1.dbo.test is 'table comment1'"); + stat.execute("comment on table dbo.test is 'table comment2'"); + stat.execute("comment on table catalog1..test is 'table comment3'"); + stat.execute("comment on table test is 'table comment4'"); + stat.execute("comment on column catalog1..test.id is 'id comment1'"); + stat.execute("comment on column catalog1.dbo.test.id is 'id comment1'"); + stat.execute("comment on column dbo.test.id is 'id comment1'"); + stat.execute("comment on column test.id is 'id comment1'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column catalog1...id is 'id comment1'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column catalog1...test.id is 'id comment1'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column catalog1..test..id is 'id comment1'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column ..test..id is 'id comment1'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column test..id is 'id comment1'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column .PUBLIC.TEST.ID 'id comment1'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column .TEST.ID 'id comment1'"); + } + } finally { + deleteDb("ignoreCatalogs"); + } + } + + private void canUseDefaultSchema() throws Exception { + try (Connection conn = getConnection("ignoreCatalogs;MODE=MSSQLSERVER;IGNORE_CATALOGS=TRUE;")) { + try (Statement stat = conn.createStatement()) { + prepareDbAndSetDefaultSchema(stat); + stat.execute("create table catalog1..test(id int primary key, name varchar(255))"); + + stat.execute("create table test2(id int primary key, name varchar(255))"); + // expect table already exists + assertThrows(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, stat, + "create table catalog2.dbo.test(id int primary key, name varchar(255))"); + stat.execute("insert into test values(1, 'Hello')"); + stat.execute("insert into test2 values(1, 'Hello')"); + } + } finally { + deleteDb("ignoreCatalogs"); + } + } + + private void canUseSettingInUrl() throws Exception { + try (Connection conn = getConnection("ignoreCatalogs;MODE=MSSQLSERVER;IGNORE_CATALOGS=TRUE;")) { + try (Statement stat = conn.createStatement()) { + prepareDb(stat); + stat.execute("create table catalog1.dbo.test(id int primary key, name varchar(255))"); + // expect table already exists + assertThrows(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, stat, + "create table catalog2.dbo.test(id int primary key, name varchar(255))"); + stat.execute("insert into dbo.test values(1, 'Hello')"); + } + } finally { + deleteDb("ignoreCatalogs"); + } + + } + + private void canUseSetterSyntax() throws Exception { + try (Connection conn = getConnection("ignoreCatalogs;MODE=MSSQLSERVER;")) { + try (Statement stat = conn.createStatement()) { + prepareDb(stat); + stat.execute("set IGNORE_CATALOGS=TRUE"); + stat.execute("create table catalog1.dbo.test(id int primary key, name varchar(255))"); + // expect table already exists + assertThrows(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, stat, + "create table catalog2.dbo.test(id int primary key, name varchar(255))"); + stat.execute("insert into dbo.test values(1, 'Hello')"); + } + } finally { + deleteDb("ignoreCatalogs"); + } + } + + private void canCatalogNameEqualSchemaName() throws Exception { + try (Connection conn = getConnection("ignoreCatalogs;MODE=MSSQLSERVER;")) { + try (Statement stat = conn.createStatement()) { + prepareDb(stat); + stat.execute("set IGNORE_CATALOGS=TRUE"); + stat.execute("create table dbo.dbo.test(id int primary key, name varchar(255))"); + // expect object already exists + assertThrows(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, stat, + "create table catalog2.dbo.test(id int primary key, name varchar(255))"); + stat.execute("insert into dbo.test values(1, 'Hello')"); + } + } finally { + deleteDb("ignoreCatalogs"); + } + } + + private void canYetIdentifyWrongCatalogName() throws Exception { + try (Connection conn = getConnection("ignoreCatalogs;MODE=MSSQLSERVER;")) { + try (Statement stat = conn.createStatement()) { + prepareDb(stat); + // works, since catalog name equals database name + stat.execute("create table ignoreCatalogs.dbo.test(id int primary key, name varchar(255))"); + // schema test_x not found error + assertThrows(ErrorCode.SCHEMA_NOT_FOUND_1, stat, + "create table test_x.dbo.test(id int primary key, name varchar(255))"); + assertThrows(ErrorCode.DATABASE_NOT_FOUND_1, stat, "comment on column db..test.id is 'id'"); + } + } finally { + deleteDb("ignoreCatalogs"); + } + } + + private void canUseCatalogAtIndexName() throws Exception { + try (Connection conn = getConnection("ignoreCatalogs;MODE=MSSQLSERVER;")) { + try (Statement stat = conn.createStatement()) { + prepareDb(stat); + stat.execute("set IGNORE_CATALOGS=TRUE"); + stat.execute("create table dbo.dbo.test(id int primary key, name varchar(255))"); + stat.execute("create index i on dbo.dbo.test(id,name)"); + stat.execute("create index dbo.i2 on dbo.dbo.test(id,name)"); + stat.execute("create index catalog.dbo.i3 on dbo.dbo.test(id,name)"); + assertThrows(ErrorCode.SCHEMA_NOT_FOUND_1, stat, + "create index dboNotExistent.i4 on dbo.dbo.test(id,name)"); + // expect object already exists + stat.execute("insert into dbo.test values(1, 'Hello')"); + } + } finally { + deleteDb("ignoreCatalogs"); + } + } + + private void canAllCombined() throws SQLException { + try (Connection conn = getConnection("ignoreCatalogs;MODE=MSSQLSERVER;IGNORE_CATALOGS=TRUE;")) { + try (Statement stat = conn.createStatement()) { + prepareDbAndSetDefaultSchema(stat); + stat.execute("create table dbo.test(id int primary key, name varchar(255))"); + stat.execute("create table catalog1.dbo.test2(id int primary key, name varchar(255))"); + stat.execute("insert into dbo.test values(1, 'Hello')"); + stat.execute("insert into dbo.test2 values(1, 'Hello2')"); + stat.execute("set ignore_catalogs=false"); + assertThrows(ErrorCode.SCHEMA_NOT_FOUND_1, stat, + "insert into catalog1.dbo.test2 values(2, 'Hello2')"); + stat.execute("set ignore_catalogs=true"); + assertResult("1", stat, "select * from test"); + assertResult("1", stat, "select * from test2"); + stat.execute("alter table xxx.dbo.test add column (a varchar(200))"); + stat.execute("alter table xxx..test add column (b varchar(200))"); + stat.execute("alter table test add column (c varchar(200))"); + stat.execute("drop table xxx.dbo.test"); + stat.execute("drop table catalog1.dbo.test2"); + stat.execute("drop table if exists xxx.dbo.test"); + stat.execute("drop table if exists catalog1.dbo.test2"); + stat.execute("set ignore_catalogs=false"); + assertThrows(ErrorCode.SCHEMA_NOT_FOUND_1, stat, + "alter table xxx.dbo.test add column (a varchar(200))"); + assertThrows(ErrorCode.SCHEMA_NOT_FOUND_1, stat, + "alter table xxx..test add column (b varchar(200))"); + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, stat, + "alter table test add column (c varchar(200))"); + assertThrows(ErrorCode.SCHEMA_NOT_FOUND_1, stat, + "drop table if exists xxx.dbo.test"); + assertThrows(ErrorCode.SCHEMA_NOT_FOUND_1, stat, + "drop table if exists xxx2..test"); + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, stat, "drop table test"); + } + } finally { + deleteDb("ignoreCatalogs"); + } + } + + private static void prepareDb(Statement stat) throws SQLException { + stat.execute("drop all objects"); + stat.execute("create schema dbo"); + } + + private static void prepareDbAndSetDefaultSchema(Statement stat) throws SQLException { + prepareDb(stat); + stat.execute("set schema dbo"); + } + +} diff --git a/h2/src/test/org/h2/test/db/TestIndex.java b/h2/src/test/org/h2/test/db/TestIndex.java index 3a3f0fdc5e..1b2fa807d0 100644 --- a/h2/src/test/org/h2/test/db/TestIndex.java +++ b/h2/src/test/org/h2/test/db/TestIndex.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -17,12 +17,11 @@ import java.util.Random; import java.util.concurrent.atomic.AtomicInteger; import org.h2.api.ErrorCode; -import org.h2.command.dml.Select; -import org.h2.result.SortOrder; +import org.h2.command.query.Select; import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.tools.SimpleResultSet; -import org.h2.value.ValueInt; +import org.h2.value.ValueInteger; /** * Index tests. @@ -41,7 +40,7 @@ public class TestIndex extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -165,10 +164,10 @@ private void testErrorMessage() throws SQLException { stat.execute("create table test(id int, name int primary key)"); testErrorMessage("PRIMARY", "KEY", " ON PUBLIC.TEST(NAME)"); stat.execute("create table test(id int, name int, unique(name))"); - testErrorMessage("CONSTRAINT_INDEX_2 ON PUBLIC.TEST(NAME)"); + testErrorMessage("CONSTRAINT_INDEX_2 ON PUBLIC.TEST(NAME NULLS FIRST)"); stat.execute("create table test(id int, name int, " + "constraint abc unique(name, id))"); - testErrorMessage("ABC_INDEX_2 ON PUBLIC.TEST(NAME, ID)"); + testErrorMessage("ABC_INDEX_2 ON PUBLIC.TEST(NAME NULLS FIRST, ID NULLS FIRST)"); } private void testErrorMessage(String... expected) throws SQLException { @@ -201,13 +200,13 @@ private void testDuplicateKeyException() throws SQLException { // The format of the VALUES clause varies a little depending on the // type of the index, so just test that we're getting useful info // back. - assertContains(m, "IDX_TEST_NAME ON PUBLIC.TEST(NAME)"); + assertContains(m, "IDX_TEST_NAME ON PUBLIC.TEST(NAME NULLS FIRST)"); assertContains(m, "'Hello'"); } stat.execute("drop table test"); } - private class ConcurrentUpdateThread extends Thread { + private static class ConcurrentUpdateThread extends Thread { private final AtomicInteger concurrentUpdateId, concurrentUpdateValue; private final PreparedStatement psInsert, psDelete; @@ -218,8 +217,8 @@ private class ConcurrentUpdateThread extends Thread { AtomicInteger concurrentUpdateValue) throws SQLException { this.concurrentUpdateId = concurrentUpdateId; this.concurrentUpdateValue = concurrentUpdateValue; - psInsert = c.prepareStatement("insert into test(id, value) values (?, ?)"); - psDelete = c.prepareStatement("delete from test where value = ?"); + psInsert = c.prepareStatement("insert into test(id, v) values (?, ?)"); + psDelete = c.prepareStatement("delete from test where v = ?"); } @Override @@ -255,9 +254,9 @@ public void run() { private void testConcurrentUpdate() throws SQLException { Connection c = getConnection("index"); Statement stat = c.createStatement(); - stat.execute("create table test(id int primary key, value int)"); - stat.execute("create unique index idx_value_name on test(value)"); - PreparedStatement check = c.prepareStatement("select value from test"); + stat.execute("create table test(id int primary key, v int)"); + stat.execute("create unique index idx_value_name on test(v)"); + PreparedStatement check = c.prepareStatement("select v from test"); ConcurrentUpdateThread[] threads = new ConcurrentUpdateThread[4]; AtomicInteger concurrentUpdateId = new AtomicInteger(), concurrentUpdateValue = new AtomicInteger(); @@ -370,7 +369,7 @@ private void testRandomized() throws SQLException { Random rand = new Random(1); reconnect(); stat.execute("drop all objects"); - stat.execute("CREATE TABLE TEST(ID identity)"); + stat.execute("CREATE TABLE TEST(ID identity default on null)"); int len = getSize(100, 1000); for (int i = 0; i < len; i++) { switch (rand.nextInt(4)) { @@ -461,7 +460,6 @@ private void testDescIndex() throws SQLException { rs = conn.getMetaData().getIndexInfo(null, null, "TEST", false, false); rs.next(); assertEquals("D", rs.getString("ASC_OR_DESC")); - assertEquals(SortOrder.DESCENDING, rs.getInt("SORT_TYPE")); stat.execute("INSERT INTO TEST SELECT X FROM SYSTEM_RANGE(1, 30)"); rs = stat.executeQuery( "SELECT COUNT(*) FROM TEST WHERE ID BETWEEN 10 AND 20"); @@ -471,7 +469,6 @@ private void testDescIndex() throws SQLException { rs = conn.getMetaData().getIndexInfo(null, null, "TEST", false, false); rs.next(); assertEquals("D", rs.getString("ASC_OR_DESC")); - assertEquals(SortOrder.DESCENDING, rs.getInt("SORT_TYPE")); rs = stat.executeQuery( "SELECT COUNT(*) FROM TEST WHERE ID BETWEEN 10 AND 20"); rs.next(); @@ -541,8 +538,7 @@ private void testConstraint() throws SQLException { stat.execute("CREATE TABLE CHILD(ID INT PRIMARY KEY, " + "PID INT, FOREIGN KEY(PID) REFERENCES PARENT(ID))"); reconnect(); - stat.execute("DROP TABLE PARENT"); - stat.execute("DROP TABLE CHILD"); + stat.execute("DROP TABLE PARENT, CHILD"); } private void testLargeIndex() throws SQLException { @@ -736,8 +732,8 @@ public static ResultSet testFunctionIndexFunction() { } } SimpleResultSet rs = new SimpleResultSet(); - rs.addColumn("ID", Types.INTEGER, ValueInt.PRECISION, 0); - rs.addColumn("VALUE", Types.INTEGER, ValueInt.PRECISION, 0); + rs.addColumn("ID", Types.INTEGER, ValueInteger.PRECISION, 0); + rs.addColumn("VALUE", Types.INTEGER, ValueInteger.PRECISION, 0); rs.addRow(1, 10); rs.addRow(2, 20); rs.addRow(3, 30); @@ -746,7 +742,7 @@ public static ResultSet testFunctionIndexFunction() { private void testFunctionIndex() throws SQLException { testFunctionIndexCounter = 0; - stat.execute("CREATE ALIAS TEST_INDEX FOR \"" + TestIndex.class.getName() + ".testFunctionIndexFunction\""); + stat.execute("CREATE ALIAS TEST_INDEX FOR '" + TestIndex.class.getName() + ".testFunctionIndexFunction'"); try (ResultSet rs = stat.executeQuery("SELECT * FROM TEST_INDEX() WHERE ID = 1 OR ID = 3")) { assertTrue(rs.next()); assertEquals(1, rs.getInt(1)); diff --git a/h2/src/test/org/h2/test/db/TestIndexHints.java b/h2/src/test/org/h2/test/db/TestIndexHints.java index 65a434328a..a992869d9d 100644 --- a/h2/src/test/org/h2/test/db/TestIndexHints.java +++ b/h2/src/test/org/h2/test/db/TestIndexHints.java @@ -1,19 +1,19 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; -import org.h2.api.ErrorCode; -import org.h2.test.TestBase; -import org.h2.test.TestDb; - import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import org.h2.api.ErrorCode; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + /** * Tests the index hints feature of this database. */ @@ -27,7 +27,7 @@ public class TestIndexHints extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/db/TestLIRSMemoryConsumption.java b/h2/src/test/org/h2/test/db/TestLIRSMemoryConsumption.java index a0c618e8d1..55d27c26c0 100644 --- a/h2/src/test/org/h2/test/db/TestLIRSMemoryConsumption.java +++ b/h2/src/test/org/h2/test/db/TestLIRSMemoryConsumption.java @@ -1,14 +1,15 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; +import java.util.Random; import org.h2.mvstore.cache.CacheLongKeyLIRS; import org.h2.test.TestBase; import org.h2.test.TestDb; -import java.util.Random; +import org.h2.util.Utils; /** * Class TestLIRSMemoryConsumption. @@ -27,7 +28,7 @@ public class TestLIRSMemoryConsumption extends TestDb { * ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -39,14 +40,14 @@ public void test() { testMemoryConsumption(); } - private void testMemoryConsumption() { + private static void testMemoryConsumption() { int size = 1_000_000; Random rng = new Random(); CacheLongKeyLIRS.Config config = new CacheLongKeyLIRS.Config(); for (int mb = 1; mb <= 16; mb *= 2) { config.maxMemory = mb * 1024 * 1024; CacheLongKeyLIRS cache = new CacheLongKeyLIRS<>(config); - long memoryUsedInitial = getMemUsedKb(); + long memoryUsedInitial = Utils.getMemoryUsed(); for (int i = 0; i < size; i++) { cache.put(i, createValue(i), getValueSize(i)); } @@ -73,11 +74,9 @@ private void testMemoryConsumption() { cache.put(key, createValue(key), getValueSize(key)); } } - - eatMemory(1); - freeMemory(); + Utils.collectGarbage(); cache.trimNonResidentQueue(); - long memoryUsed = getMemUsedKb(); + long memoryUsed = Utils.getMemoryUsed(); int sizeHot = cache.sizeHot(); int sizeResident = cache.size(); @@ -101,19 +100,4 @@ private static int getValueSize(long key) { // return 16; return 2560; } - - private static long getMemUsedKb() { - Runtime rt = Runtime.getRuntime(); - long memory = Long.MAX_VALUE; - for (int i = 0; i < 8; i++) { - rt.gc(); - long memNow = (rt.totalMemory() - rt.freeMemory()) / 1024; - if (memNow >= memory) { - break; - } - memory = memNow; - try { Thread.sleep(1000); } catch (InterruptedException e) {/**/} - } - return memory; - } } diff --git a/h2/src/test/org/h2/test/db/TestLargeBlob.java b/h2/src/test/org/h2/test/db/TestLargeBlob.java index cfc80ae305..56a94cd740 100644 --- a/h2/src/test/org/h2/test/db/TestLargeBlob.java +++ b/h2/src/test/org/h2/test/db/TestLargeBlob.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -24,7 +24,7 @@ public class TestLargeBlob extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -42,7 +42,6 @@ public void test() throws Exception { Connection conn = getConnection(url); final long testLength = Integer.MAX_VALUE + 110L; Statement stat = conn.createStatement(); - stat.execute("set COMPRESS_LOB LZF"); stat.execute("create table test(x blob)"); PreparedStatement prep = conn.prepareStatement( "insert into test values(?)"); diff --git a/h2/src/test/org/h2/test/db/TestLinkedTable.java b/h2/src/test/org/h2/test/db/TestLinkedTable.java index 65049a7219..d33f137c67 100644 --- a/h2/src/test/org/h2/test/db/TestLinkedTable.java +++ b/h2/src/test/org/h2/test/db/TestLinkedTable.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -18,7 +18,6 @@ import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; import org.h2.test.TestDb; -import org.h2.value.DataType; /** * Tests the linked table feature (CREATE LINKED TABLE). @@ -31,7 +30,7 @@ public class TestLinkedTable extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -53,6 +52,8 @@ public void test() throws SQLException { testCachingResults(); testLinkedTableInReadOnlyDb(); testGeometry(); + testFetchSize(); + testFetchSizeWithAutoCommit(); deleteDb("linkedTable"); } @@ -237,7 +238,7 @@ private void testMultipleSchemas() throws SQLException { assertSingleValue(sb, "SELECT * FROM T2", 2); sa.execute("DROP ALL OBJECTS"); sb.execute("DROP ALL OBJECTS"); - assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, sa). + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, sa). execute("SELECT * FROM TEST"); ca.close(); cb.close(); @@ -288,9 +289,9 @@ private static void testLinkOtherSchema() throws SQLException { sa.execute("CREATE TABLE GOOD (X NUMBER)"); sa.execute("CREATE SCHEMA S"); sa.execute("CREATE TABLE S.BAD (X NUMBER)"); - sb.execute("CALL LINK_SCHEMA('G', '', " + + sb.execute("SELECT * FROM LINK_SCHEMA('G', '', " + "'jdbc:h2:mem:one', 'sa', 'sa', 'PUBLIC'); "); - sb.execute("CALL LINK_SCHEMA('B', '', " + + sb.execute("SELECT * FROM LINK_SCHEMA('B', '', " + "'jdbc:h2:mem:one', 'sa', 'sa', 'S'); "); // OK sb.executeQuery("SELECT * FROM G.GOOD"); @@ -428,7 +429,7 @@ private void testLinkSchema() throws SQLException { Connection conn2 = DriverManager.getConnection(url2, "sa2", "def def"); Statement stat2 = conn2.createStatement(); - String link = "CALL LINK_SCHEMA('LINKED', '', '" + url1 + + String link = "SELECT * FROM LINK_SCHEMA('LINKED', '', '" + url1 + "', 'sa1', 'abc abc', 'PUBLIC')"; stat2.execute(link); stat2.executeQuery("SELECT * FROM LINKED.TEST1"); @@ -459,7 +460,7 @@ private void testLinkTable() throws SQLException { stat.execute("CREATE TEMP TABLE TEST_TEMP(ID INT PRIMARY KEY)"); stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, " + "NAME VARCHAR(200), XT TINYINT, XD DECIMAL(10,2), " + - "XTS TIMESTAMP, XBY BINARY(255), XBO BIT, XSM SMALLINT, " + + "XTS TIMESTAMP, XBY VARBINARY(255), XBO BIT, XSM SMALLINT, " + "XBI BIGINT, XBL BLOB, XDA DATE, XTI TIME, XCL CLOB, XDO DOUBLE)"); stat.execute("CREATE INDEX IDXNAME ON TEST(NAME)"); stat.execute("INSERT INTO TEST VALUES(0, NULL, NULL, NULL, NULL, " + @@ -495,7 +496,7 @@ private void testLinkTable() throws SQLException { testRow(stat, "LINK_TEST"); ResultSet rs = stat.executeQuery("SELECT * FROM LINK_TEST"); ResultSetMetaData meta = rs.getMetaData(); - assertEquals(10, meta.getPrecision(1)); + assertEquals(32, meta.getPrecision(1)); assertEquals(200, meta.getPrecision(2)); conn.close(); @@ -525,7 +526,7 @@ private void testLinkTable() throws SQLException { rs = stat.executeQuery("SELECT * FROM " + "INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='LINK_TEST'"); rs.next(); - assertEquals("TABLE LINK", rs.getString("TABLE_TYPE")); + assertEquals("TABLE LINK", rs.getString("STORAGE_TYPE")); rs.next(); rs = stat.executeQuery("SELECT * FROM LINK_TEST WHERE ID=0"); @@ -576,7 +577,7 @@ private void testRow(Statement stat, String name) throws SQLException { assertTrue(rs.getBoolean("XBO")); assertEquals(3000, rs.getShort("XSM")); assertEquals(1234567890123456789L, rs.getLong("XBI")); - assertEquals("1122aa", rs.getString("XBL")); + assertEquals(new byte[] {0x11, 0x22, (byte) 0xAA }, rs.getBytes("XBL")); assertEquals("0002-01-01", rs.getString("XDA")); assertEquals("00:00:00", rs.getString("XTI")); assertEquals("J\u00fcrg", rs.getString("XCL")); @@ -694,10 +695,7 @@ private void testLinkedTableInReadOnlyDb() throws SQLException { } private void testGeometry() throws SQLException { - if (config.memory && config.mvStore) { - return; - } - if (DataType.GEOMETRY_CLASS == null) { + if (config.memory) { return; } org.h2.Driver.load(); @@ -705,17 +703,75 @@ private void testGeometry() throws SQLException { Connection cb = DriverManager.getConnection("jdbc:h2:mem:two", "sa", "sa"); Statement sa = ca.createStatement(); Statement sb = cb.createStatement(); - sa.execute("CREATE TABLE TEST(ID SERIAL, the_geom geometry)"); - sa.execute("INSERT INTO TEST(THE_GEOM) VALUES('POINT (1 1)')"); + sa.execute("CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY," + + " THE_GEOM GEOMETRY, THE_GEOM_2 GEOMETRY(POINT, 4326))"); + sa.execute("INSERT INTO TEST(THE_GEOM, THE_GEOM_2) VALUES" + + " (GEOMETRY 'POINT (1 1)', GEOMETRY 'SRID=4326;POINT(2 2)')"); String sql = "CREATE LINKED TABLE T(NULL, " + "'jdbc:h2:mem:one', 'sa', 'sa', 'TEST') READONLY"; sb.execute(sql); try (ResultSet rs = sb.executeQuery("SELECT * FROM T")) { assertTrue(rs.next()); assertEquals("POINT (1 1)", rs.getString("THE_GEOM")); + assertEquals("SRID=4326;POINT (2 2)", rs.getString("THE_GEOM_2")); + } + sb.execute("DROP TABLE T"); + ca.close(); + cb.close(); + } + + private void testFetchSize() throws SQLException { + if (config.memory) { + return; + } + org.h2.Driver.load(); + Connection ca = DriverManager.getConnection("jdbc:h2:mem:one", "sa", "sa"); + Connection cb = DriverManager.getConnection("jdbc:h2:mem:two", "sa", "sa"); + Statement sa = ca.createStatement(); + Statement sb = cb.createStatement(); + sa.execute("DROP TABLE IF EXISTS TEST; " + + "CREATE TABLE TEST as select * from SYSTEM_RANGE(1,1000) as n;"); + String sql = "CREATE LINKED TABLE T(NULL, " + + "'jdbc:h2:mem:one', 'sa', 'sa', 'TEST') FETCH_SIZE 10"; + sb.execute(sql); + try (ResultSet rs = sb.executeQuery("SELECT count(*) FROM T")) { + assertTrue(rs.next()); + assertEquals(1000, rs.getInt(1)); } + ResultSet res = sb.executeQuery("CALL DB_OBJECT_SQL('TABLE', 'PUBLIC', 'T')"); + res.next(); + assertEquals("CREATE FORCE LINKED TABLE \"PUBLIC\".\"T\"(NULL, 'jdbc:h2:mem:one', 'sa', 'sa', 'TEST')" + + " FETCH_SIZE 10 /*--hide--*/", res.getString(1)); sb.execute("DROP TABLE T"); ca.close(); cb.close(); } + + private void testFetchSizeWithAutoCommit() throws SQLException { + if (config.memory) { + return; + } + org.h2.Driver.load(); + Connection ca = DriverManager.getConnection("jdbc:h2:mem:one", "sa", "sa"); + Connection cb = DriverManager.getConnection("jdbc:h2:mem:two", "sa", "sa"); + Statement sa = ca.createStatement(); + Statement sb = cb.createStatement(); + sa.execute("DROP TABLE IF EXISTS TEST; " + + "CREATE TABLE TEST as select * from SYSTEM_RANGE(1,1000) as n;"); + String sql = "CREATE LINKED TABLE T(NULL, " + + "'jdbc:h2:mem:one', 'sa', 'sa', 'TEST') FETCH_SIZE 10 AUTOCOMMIT OFF"; + sb.execute(sql); + try (ResultSet rs = sb.executeQuery("SELECT count(*) FROM T")) { + assertTrue(rs.next()); + assertEquals(1000, rs.getInt(1)); + } + ResultSet res = sb.executeQuery("CALL DB_OBJECT_SQL('TABLE', 'PUBLIC', 'T')"); + res.next(); + assertEquals("CREATE FORCE LINKED TABLE \"PUBLIC\".\"T\"(NULL, 'jdbc:h2:mem:one', 'sa', 'sa', 'TEST')" + + " FETCH_SIZE 10 AUTOCOMMIT OFF /*--hide--*/", res.getString(1)); + sb.execute("DROP TABLE T"); + ca.close(); + cb.close(); + } + } diff --git a/h2/src/test/org/h2/test/db/TestListener.java b/h2/src/test/org/h2/test/db/TestListener.java index 614310955f..5e042743f9 100644 --- a/h2/src/test/org/h2/test/db/TestListener.java +++ b/h2/src/test/org/h2/test/db/TestListener.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -35,7 +35,7 @@ public TestListener() { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -74,7 +74,7 @@ public void exceptionThrown(SQLException e, String sql) { } @Override - public void setProgress(int state, String name, int current, int max) { + public void setProgress(int state, String name, long current, long max) { long time = System.nanoTime(); if (state == lastState && time < last + TimeUnit.SECONDS.toNanos(1)) { return; diff --git a/h2/src/test/org/h2/test/db/TestLob.java b/h2/src/test/org/h2/test/db/TestLob.java index f90f29044c..45203921a2 100644 --- a/h2/src/test/org/h2/test/db/TestLob.java +++ b/h2/src/test/org/h2/test/db/TestLob.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -24,12 +24,17 @@ import java.sql.Savepoint; import java.sql.Statement; import java.sql.Types; +import java.util.ArrayList; +import java.util.Arrays; import java.util.Random; import java.util.concurrent.TimeUnit; + import org.h2.api.ErrorCode; +import org.h2.engine.Constants; import org.h2.engine.SysProperties; import org.h2.jdbc.JdbcConnection; import org.h2.message.DbException; +import org.h2.store.FileLister; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -37,8 +42,10 @@ import org.h2.tools.SimpleResultSet; import org.h2.util.IOUtils; import org.h2.util.JdbcUtils; -import org.h2.util.StringUtils; import org.h2.util.Task; +import org.h2.value.ValueBlob; +import org.h2.value.ValueClob; +import org.h2.value.ValueLob; /** * Tests LOB and CLOB data types. @@ -58,17 +65,16 @@ public class TestLob extends TestDb { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); test.config.big = true; - test.config.mvStore = false; - test.test(); + test.testFromMain(); } @Override public void test() throws Exception { + testReclamationOnInDoubtRollback(); testRemoveAfterDeleteAndClose(); testRemovedAfterTimeout(); testConcurrentRemoveRead(); testCloseLobTwice(); - testCleaningUpLobsOnRollback(); testClobWithRandomUnicodeChars(); testCommitOnExclusiveConnection(); testReadManyLobs(); @@ -78,7 +84,6 @@ public void test() throws Exception { testBlobInputStreamSeek(true); testBlobInputStreamSeek(false); testDeadlock(); - testDeadlock2(); testCopyManyLobs(); testCopyLob(); testConcurrentCreate(); @@ -86,7 +91,6 @@ public void test() throws Exception { testUniqueIndex(); testConvert(); testCreateAsSelect(); - testDelete(); testLobServerMemory(); testUpdatingLobRow(); testBufferedInputStreamBug(); @@ -94,7 +98,6 @@ public void test() throws Exception { return; } testLargeClob(); - testLobCleanupSessionTemporaries(); testLobUpdateMany(); testLobVariable(); testLobDrop(); @@ -104,10 +107,7 @@ public void test() throws Exception { testLobRollbackStop(); testLobCopy(); testLobHibernate(); - testLobCopy(false); - testLobCopy(true); - testLobCompression(false); - testLobCompression(true); + testLobCopy2(); testManyLobs(); testClob(); testUpdateLob(); @@ -115,17 +115,67 @@ public void test() throws Exception { testLob(false); testLob(true); testJavaObject(); - testLobGrowth(); testLobInValueResultSet(); + testLimits(); deleteDb("lob"); } - private void testRemoveAfterDeleteAndClose() throws Exception { + private void testReclamationOnInDoubtRollback() throws Exception { if (config.memory || config.cipher != null) { return; } - // TODO fails in pagestore mode - if (!config.mvStore) { + deleteDb("lob"); + try (Connection conn = getConnection("lob")) { + try (Statement st = conn.createStatement()) { + st.executeUpdate("CREATE TABLE IF NOT EXISTS dataTable(" + + "dataStamp BIGINT PRIMARY KEY, " + + "data BLOB)"); + } + + conn.setAutoCommit(false); + Random rnd = new Random(0); + try (PreparedStatement pstmt = conn.prepareStatement("INSERT INTO dataTable VALUES(?, ?)")) { + for (int i = 0; i < 100; ++i) { + int numBytes = 1024 * 1024; + byte[] data = new byte[numBytes]; + rnd.nextBytes(data); + pstmt.setLong(1, i); + pstmt.setBytes(2, data); + pstmt.executeUpdate(); + } + } + try (Statement st = conn.createStatement()) { + st.executeUpdate("PREPARE COMMIT lobtx"); + st.execute("SHUTDOWN IMMEDIATELY"); + } + } + + try (Connection conn = getConnection("lob")) { + try (Statement st = conn.createStatement(); + ResultSet rs = st.executeQuery("SELECT * FROM INFORMATION_SCHEMA.IN_DOUBT")) { + assertTrue("No in-doubt tx", rs.first()); + assertEquals("LOBTX", rs.getString("TRANSACTION_NAME")); + assertFalse("more than one in-doubt tx", rs.next()); + st.executeUpdate("ROLLBACK TRANSACTION lobtx; CHECKPOINT SYNC"); + } + } + + try (Connection conn = getConnection("lob")) { + try (Statement st = conn.createStatement()) { + st.execute("SHUTDOWN COMPACT"); + } + } + + ArrayList dbFiles = FileLister.getDatabaseFiles(getBaseDir(), "lob", false); + assertEquals(1, dbFiles.size()); + File file = new File(dbFiles.get(0)); + assertTrue(file.exists()); + long fileSize = file.length(); + assertTrue("File size=" + fileSize, fileSize < 13000); + } + + private void testRemoveAfterDeleteAndClose() throws Exception { + if (config.memory || config.cipher != null) { return; } deleteDb("lob"); @@ -242,28 +292,6 @@ private void testCloseLobTwice() throws SQLException { conn.close(); } - private void testCleaningUpLobsOnRollback() throws Exception { - if (config.mvStore) { - return; - } - deleteDb("lob"); - Connection conn = getConnection("lob"); - Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE test(id int, data CLOB)"); - conn.setAutoCommit(false); - stat.executeUpdate("insert into test values (1, '" + - MORE_THAN_128_CHARS + "')"); - conn.rollback(); - ResultSet rs = stat.executeQuery("select count(*) from test"); - rs.next(); - assertEquals(0, rs.getInt(1)); - rs = stat.executeQuery("select * from information_schema.lobs"); - rs = stat.executeQuery("select count(*) from information_schema.lob_data"); - rs.next(); - assertEquals(0, rs.getInt(1)); - conn.close(); - } - private void testReadManyLobs() throws Exception { deleteDb("lob"); Connection conn; @@ -271,7 +299,7 @@ private void testReadManyLobs() throws Exception { Statement stat = conn.createStatement(); stat.execute("create table test(id identity, data clob)"); PreparedStatement prep = conn.prepareStatement( - "insert into test values(null, ?)"); + "insert into test(data) values ?"); byte[] data = new byte[256]; Random r = new Random(1); for (int i = 0; i < 1000; i++) { @@ -370,17 +398,6 @@ private void testBlobInputStreamSeek(boolean upgraded) throws Exception { prep.setBinaryStream(2, new ByteArrayInputStream(buff), -1); prep.execute(); } - if (upgraded) { - if (!config.mvStore) { - if (config.memory) { - stat.execute("update information_schema.lob_map set pos=null"); - } else { - stat.execute("alter table information_schema.lob_map drop column pos"); - conn.close(); - conn = getConnection("lob"); - } - } - } prep = conn.prepareStatement("select * from test where id = ?"); for (int i = 0; i < 1; i++) { random.setSeed(i); @@ -435,125 +452,20 @@ public void call() throws Exception { conn2.close(); } - /** - * A background task. - */ - private final class Deadlock2Task1 extends Task { - - public final Connection conn; - - Deadlock2Task1() throws SQLException { - this.conn = getDeadlock2Connection(); - } - - @Override - public void call() throws Exception { - Random random = new Random(); - Statement stat = conn.createStatement(); - char[] tmp = new char[1024]; - while (!stop) { - try { - ResultSet rs = stat.executeQuery( - "select name from test where id = " + random.nextInt(999)); - if (rs.next()) { - Reader r = rs.getClob("name").getCharacterStream(); - while (r.read(tmp) >= 0) { - // ignore - } - r.close(); - } - rs.close(); - } catch (SQLException ex) { - // ignore "LOB gone away", this can happen - // in the presence of concurrent updates - if (ex.getErrorCode() != ErrorCode.IO_EXCEPTION_2) { - throw ex; - } - } catch (IOException ex) { - // ignore "LOB gone away", this can happen - // in the presence of concurrent updates - Exception e = ex; - if (e.getCause() instanceof DbException) { - e = (Exception) e.getCause(); - } - if (!(e.getCause() instanceof SQLException)) { - throw ex; - } - SQLException e2 = (SQLException) e.getCause(); - if (e2.getErrorCode() != ErrorCode.IO_EXCEPTION_1) { - throw ex; - } - } catch (Exception e) { - e.printStackTrace(System.out); - throw e; - } - } - } - - } - - /** - * A background task. - */ - private final class Deadlock2Task2 extends Task { - - public final Connection conn; - - Deadlock2Task2() throws SQLException { - this.conn = getDeadlock2Connection(); - } - - @Override - public void call() throws Exception { - Random random = new Random(); - Statement stat = conn.createStatement(); - while (!stop) { - stat.execute("update test set counter = " + - random.nextInt(10) + " where id = " + random.nextInt(1000)); - } - } - - } - - private void testDeadlock2() throws Exception { - if (config.mvStore || config.memory) { - return; - } - deleteDb("lob"); - Connection conn = getDeadlock2Connection(); - Statement stat = conn.createStatement(); - stat.execute("create cached table test(id int not null identity, " + - "name clob, counter int)"); - stat.execute("insert into test(id, name) select x, space(100000) " + - "from system_range(1, 100)"); - Deadlock2Task1 task1 = new Deadlock2Task1(); - Deadlock2Task2 task2 = new Deadlock2Task2(); - task1.execute("task1"); - task2.execute("task2"); - for (int i = 0; i < 100; i++) { - stat.execute("insert into test values(null, space(10000 + " + i + "), 1)"); - } - task1.get(); - task1.conn.close(); - task2.get(); - task2.conn.close(); - conn.close(); - } - Connection getDeadlock2Connection() throws SQLException { - return getConnection("lob;MULTI_THREADED=TRUE;LOCK_TIMEOUT=60000"); + return getConnection("lob;LOCK_TIMEOUT=60000"); } private void testCopyManyLobs() throws Exception { deleteDb("lob"); Connection conn = getConnection("lob"); Statement stat = conn.createStatement(); - stat.execute("create table test(id identity, data clob) " + - "as select 1, space(10000)"); - stat.execute("insert into test(id, data) select null, data from test"); - stat.execute("insert into test(id, data) select null, data from test"); - stat.execute("insert into test(id, data) select null, data from test"); - stat.execute("insert into test(id, data) select null, data from test"); + stat.execute("create table test(id identity default on null, data clob) " + + "as select null, space(10000)"); + stat.execute("insert into test(data) select data from test"); + stat.execute("insert into test(data) select data from test"); + stat.execute("insert into test(data) select data from test"); + stat.execute("insert into test(data) select data from test"); stat.execute("delete from test where id < 10"); stat.execute("shutdown compact"); conn.close(); @@ -683,51 +595,6 @@ private void testCreateAsSelect() throws Exception { conn.close(); } - private void testDelete() throws Exception { - if (config.memory || config.mvStore) { - return; - } - // TODO fails in pagestore mode - if (!config.mvStore) { - return; - } - deleteDb("lob"); - Connection conn; - Statement stat; - conn = getConnection("lob"); - stat = conn.createStatement(); - stat.execute("create table test(id int primary key, name clob)"); - stat.execute("insert into test values(1, space(10000))"); - assertSingleValue(stat, - "select count(*) from information_schema.lob_data", 1); - stat.execute("insert into test values(2, space(10000))"); - assertSingleValue(stat, - "select count(*) from information_schema.lob_data", 1); - stat.execute("delete from test where id = 1"); - assertSingleValue(stat, - "select count(*) from information_schema.lob_data", 1); - stat.execute("insert into test values(3, space(10000))"); - assertSingleValue(stat, - "select count(*) from information_schema.lob_data", 1); - stat.execute("insert into test values(4, space(10000))"); - assertSingleValue(stat, - "select count(*) from information_schema.lob_data", 1); - stat.execute("delete from test where id = 2"); - assertSingleValue(stat, - "select count(*) from information_schema.lob_data", 1); - stat.execute("delete from test where id = 3"); - assertSingleValue(stat, - "select count(*) from information_schema.lob_data", 1); - stat.execute("delete from test"); - conn.close(); - conn = getConnection("lob"); - stat = conn.createStatement(); - assertSingleValue(stat, - "select count(*) from information_schema.lob_data", 0); - stat.execute("drop table test"); - conn.close(); - } - private void testLobUpdateMany() throws SQLException { deleteDb("lob"); Connection conn = getConnection("lob"); @@ -740,39 +607,6 @@ private void testLobUpdateMany() throws SQLException { conn.close(); } - private void testLobCleanupSessionTemporaries() throws SQLException { - if (config.mvStore) { - return; - } - // TODO fails in pagestore mode - if (!config.mvStore) { - return; - } - deleteDb("lob"); - Connection conn = getConnection("lob"); - Statement stat = conn.createStatement(); - stat.execute("create table test(data clob)"); - - ResultSet rs = stat.executeQuery("select count(*) " + - "from INFORMATION_SCHEMA.LOBS"); - assertTrue(rs.next()); - assertEquals(0, rs.getInt(1)); - rs.close(); - - PreparedStatement prep = conn.prepareStatement( - "INSERT INTO test(data) VALUES(?)"); - String name = new String(new char[200]).replace((char) 0, 'x'); - prep.setString(1, name); - prep.execute(); - prep.close(); - - rs = stat.executeQuery("select count(*) from INFORMATION_SCHEMA.LOBS"); - assertTrue(rs.next()); - assertEquals(1, rs.getInt(1)); - rs.close(); - conn.close(); - } - private void testLobServerMemory() throws SQLException { deleteDb("lob"); Connection conn = getConnection("lob"); @@ -1114,24 +948,13 @@ private void testLobHibernate() throws Exception { conn0.close(); } - private void testLobCopy(boolean compress) throws SQLException { + private void testLobCopy2() throws SQLException { deleteDb("lob"); Connection conn; conn = reconnect(null); Statement stat = conn.createStatement(); - if (compress) { - stat.execute("SET COMPRESS_LOB LZF"); - } else { - stat.execute("SET COMPRESS_LOB NO"); - } conn = reconnect(conn); stat = conn.createStatement(); - ResultSet rs; - rs = stat.executeQuery("select value from information_schema.settings " + - "where NAME='COMPRESS_LOB'"); - rs.next(); - assertEquals(compress ? "LZF" : "NO", rs.getString(1)); - assertFalse(rs.next()); stat.execute("create table test(text clob)"); stat.execute("create table test2(text clob)"); StringBuilder buff = new StringBuilder(); @@ -1141,7 +964,7 @@ private void testLobCopy(boolean compress) throws SQLException { String spaces = buff.toString(); stat.execute("insert into test values('" + spaces + "')"); stat.execute("insert into test2 select * from test"); - rs = stat.executeQuery("select * from test2"); + ResultSet rs = stat.executeQuery("select * from test2"); rs.next(); assertEquals(spaces, rs.getString(1)); stat.execute("drop table test"); @@ -1155,55 +978,6 @@ private void testLobCopy(boolean compress) throws SQLException { conn.close(); } - private void testLobCompression(boolean compress) throws Exception { - deleteDb("lob"); - Connection conn; - conn = reconnect(null); - if (compress) { - conn.createStatement().execute("SET COMPRESS_LOB LZF"); - } else { - conn.createStatement().execute("SET COMPRESS_LOB NO"); - } - conn.createStatement().execute("CREATE TABLE TEST(ID INT PRIMARY KEY, C CLOB)"); - PreparedStatement prep = conn.prepareStatement( - "INSERT INTO TEST VALUES(?, ?)"); - long time = System.nanoTime(); - int len = getSize(10, 40); - if (config.networked && config.big) { - len = 5; - } - StringBuilder buff = new StringBuilder(); - for (int i = 0; i < 1000; i++) { - buff.append(StringUtils.xmlNode("content", null, "This is a test " + i)); - } - String xml = buff.toString(); - for (int i = 0; i < len; i++) { - prep.setInt(1, i); - prep.setString(2, xml + i); - prep.execute(); - } - for (int i = 0; i < len; i++) { - ResultSet rs = conn.createStatement().executeQuery( - "SELECT * FROM TEST"); - while (rs.next()) { - if (i == 0) { - assertEquals(xml + rs.getInt(1), rs.getString(2)); - } else { - Reader r = rs.getCharacterStream(2); - String result = IOUtils.readStringAndClose(r, -1); - assertEquals(xml + rs.getInt(1), result); - } - } - } - time = System.nanoTime() - time; - trace("time: " + TimeUnit.NANOSECONDS.toMillis(time) + " compress: " + compress); - conn.close(); - if (!config.memory) { - long length = new File(getBaseDir() + "/lob.h2.db").length(); - trace("len: " + length + " compress: " + compress); - } - } - private void testManyLobs() throws Exception { deleteDb("lob"); Connection conn; @@ -1422,7 +1196,7 @@ private void testLob(boolean clob) throws Exception { PreparedStatement prep; ResultSet rs; long time; - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, VALUE " + + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, V " + (clob ? "CLOB" : "BLOB") + ")"); int len = getSize(1, 1000); @@ -1447,7 +1221,7 @@ private void testLob(boolean clob) throws Exception { conn = reconnect(conn); time = System.nanoTime(); - prep = conn.prepareStatement("SELECT ID, VALUE FROM TEST"); + prep = conn.prepareStatement("SELECT ID, V FROM TEST"); rs = prep.executeQuery(); while (rs.next()) { int id = rs.getInt("ID"); @@ -1528,13 +1302,13 @@ private void testJavaObject() throws SQLException { assertFalse(rs.next()); conn.createStatement().execute("drop table test"); - stat.execute("create table test(value other)"); + stat.execute("create table test(v other)"); prep = conn.prepareStatement("insert into test values(?)"); - prep.setObject(1, JdbcUtils.serialize("", conn.getSession().getDataHandler())); + prep.setObject(1, JdbcUtils.serialize("", conn.getJavaObjectSerializer())); prep.execute(); - rs = stat.executeQuery("select value from test"); + rs = stat.executeQuery("select v from test"); while (rs.next()) { - assertEquals("", (String) rs.getObject("value")); + assertEquals("", (String) rs.getObject("v")); } conn.close(); } @@ -1636,7 +1410,7 @@ private void testClobWithRandomUnicodeChars() throws Exception { stat.execute("CREATE TABLE logs" + "(id int primary key auto_increment, message CLOB)"); PreparedStatement s1 = conn.prepareStatement( - "INSERT INTO logs (id, message) VALUES(null, ?)"); + "INSERT INTO logs (message) VALUES ?"); final Random rand = new Random(1); for (int i = 1; i <= 100; i++) { String data = randomUnicodeString(rand); @@ -1691,51 +1465,15 @@ private static String randomUnicodeString(Random rand) { return new String(buffer); } - private void testLobGrowth() throws SQLException { - if (config.mvStore) { - return; - } - final File dbFile = new File(getBaseDir(), "lob.h2.db"); - final byte[] data = new byte[2560]; - deleteDb("lob"); - JdbcConnection conn = (JdbcConnection) getConnection("lob;LOB_TIMEOUT=0"); - Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE TEST(ID IDENTITY PRIMARY KEY, DATA BLOB)"); - PreparedStatement prep = conn - .prepareStatement("INSERT INTO TEST(DATA) VALUES(?)"); - for (int i = 0; i < 100; i++) { - prep.setBinaryStream(1, new ByteArrayInputStream(data)); - prep.executeUpdate(); - } - final long initialSize = dbFile.length(); - prep = conn.prepareStatement("UPDATE test SET data=? WHERE id=?"); - for (int i = 0; i < 20; i++) { - for (int j = 0; j < 100; j++) { - data[0] = (byte)(i); - data[1] = (byte)(j); - prep.setBinaryStream(1, new ByteArrayInputStream(data)); - prep.setInt(2, j); - prep.executeUpdate(); - } - } - assertTrue("dbFile size " + dbFile.length() + " is > initialSize " - + initialSize, dbFile.length() <= (initialSize * 1.5)); - conn.createStatement().execute("drop table test"); - conn.close(); - } - private void testLobInValueResultSet() throws SQLException { deleteDb("lob"); JdbcConnection conn = (JdbcConnection) getConnection("lob"); Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS VRS FOR \"" + getClass().getName() + ".testLobInValueResultSetGet\""); - ResultSet rs = stat.executeQuery("SELECT VRS()"); + stat.execute("CREATE ALIAS VRS FOR '" + getClass().getName() + ".testLobInValueResultSetGet'"); + ResultSet rs = stat.executeQuery("SELECT * FROM VRS()"); assertTrue(rs.next()); - ResultSet rs2 = (ResultSet) rs.getObject(1); + Clob clob = rs.getClob(1); assertFalse(rs.next()); - assertTrue(rs2.next()); - Clob clob = rs2.getClob(1); - assertFalse(rs2.next()); assertEquals(MORE_THAN_128_CHARS, clob.getSubString(1, Integer.MAX_VALUE)); conn.close(); } @@ -1761,4 +1499,83 @@ public Object getObject(int columnIndex) throws SQLException { return rs; } + private void testLimits() throws Exception { + deleteDb("lob"); + JdbcConnection conn = (JdbcConnection) getConnection("lob"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(ID INTEGER, B BLOB, C CLOB)"); + PreparedStatement ps = conn.prepareStatement("INSERT INTO TEST VALUES (?, ?, ?)"); + ps.setInt(1, 1); + byte[] b = new byte[Constants.MAX_STRING_LENGTH]; + Arrays.fill(b, (byte) 'A'); + String s = new String(b, StandardCharsets.UTF_8); + ps.setBytes(2, b); + ps.setString(3, s); + ps.executeUpdate(); + byte[] b2 = new byte[Constants.MAX_STRING_LENGTH + 1]; + Arrays.fill(b2, (byte) 'A'); + String s2 = new String(b2, StandardCharsets.UTF_8); + assertThrows(ErrorCode.VALUE_TOO_LONG_2, ps).setBytes(2, b2); + ps.setBinaryStream(2, new ByteArrayInputStream(b2)); + assertThrows(ErrorCode.VALUE_TOO_LONG_2, ps).setString(3, s2); + ps.setCharacterStream(3, new StringReader(s2)); + ps.executeUpdate(); + try (ResultSet rs = stat.executeQuery("TABLE TEST ORDER BY ID")) { + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + testLimitsSmall(b, s, rs, 2); + testLimitsSmall(b, s, rs, 2); + testLimitsSmall(b, s, rs, 3); + testLimitsSmall(b, s, rs, 3); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + testLimitsLarge(b2, s2, rs, 2); + testLimitsLarge(b2, s2, rs, 2); + testLimitsLarge(b2, s2, rs, 3); + testLimitsLarge(b2, s2, rs, 3); + assertFalse(rs.next()); + } + conn.close(); + testLimitsSmall(b, s, ValueBlob.createSmall(b)); + testLimitsSmall(b, s, ValueClob.createSmall(b, Constants.MAX_STRING_LENGTH)); + testLimitsLarge(b2, s2, ValueBlob.createSmall(b2)); + testLimitsLarge(b2, s2, ValueClob.createSmall(b2, Constants.MAX_STRING_LENGTH + 1)); + } + + private void testLimitsSmall(byte[] b, String s, ResultSet rs, int index) throws SQLException { + assertEquals(b, rs.getBytes(index)); + assertEquals(s, rs.getString(index)); + } + + private void testLimitsLarge(byte[] b, String s, ResultSet rs, int index) throws SQLException, IOException { + assertThrows(ErrorCode.VALUE_TOO_LONG_2, rs).getBytes(index); + assertEquals(b, IOUtils.readBytesAndClose(rs.getBlob(index).getBinaryStream(), -1)); + assertThrows(ErrorCode.VALUE_TOO_LONG_2, rs).getString(index); + assertEquals(s, IOUtils.readStringAndClose(rs.getClob(index).getCharacterStream(), -1)); + } + + private void testLimitsSmall(byte[] b, String s, ValueLob v) { + assertEquals(b, v.getBytesNoCopy()); + assertEquals(s, v.getString()); + assertEquals(s, v.getString()); + } + + private void testLimitsLarge(byte[] b, String s, ValueLob v) throws IOException { + try { + assertEquals(b, v.getBytesNoCopy()); + throw new AssertionError(); + } catch (DbException e) { + assertEquals(ErrorCode.VALUE_TOO_LONG_2, e.getErrorCode()); + } + assertEquals(b, IOUtils.readBytesAndClose(v.getInputStream(), -1)); + for (int i = 0; i < 2; i++) { + try { + assertEquals(s, v.getString()); + throw new AssertionError(); + } catch (DbException e) { + assertEquals(ErrorCode.VALUE_TOO_LONG_2, e.getErrorCode()); + } + assertEquals(s, IOUtils.readStringAndClose(v.getReader(), -1)); + } + } } diff --git a/h2/src/test/org/h2/test/db/TestLobObject.java b/h2/src/test/org/h2/test/db/TestLobObject.java index 8874882819..b150fc512b 100644 --- a/h2/src/test/org/h2/test/db/TestLobObject.java +++ b/h2/src/test/org/h2/test/db/TestLobObject.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; diff --git a/h2/src/test/org/h2/test/db/TestMemoryUsage.java b/h2/src/test/org/h2/test/db/TestMemoryUsage.java index 3d65a41a19..dbf367d113 100644 --- a/h2/src/test/org/h2/test/db/TestMemoryUsage.java +++ b/h2/src/test/org/h2/test/db/TestMemoryUsage.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -31,7 +31,7 @@ public class TestMemoryUsage extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -63,7 +63,8 @@ private void testOpenCloseConnections() throws SQLException { return; } deleteDb("memoryUsage"); - conn = getConnection("memoryUsage"); + // to eliminate background thread interference + conn = getConnection("memoryUsage;WRITE_DELAY=0"); try { eatMemory(4000); for (int i = 0; i < 4000; i++) { @@ -86,13 +87,13 @@ private void testCreateDropLoop() throws SQLException { stat.execute("DROP TABLE TEST"); } stat.execute("checkpoint"); - int used = Utils.getMemoryUsed(); + long used = Utils.getMemoryUsed(); for (int i = 0; i < 1000; i++) { stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY)"); stat.execute("DROP TABLE TEST"); } stat.execute("checkpoint"); - int usedNow = Utils.getMemoryUsed(); + long usedNow = Utils.getMemoryUsed(); if (usedNow > used * 1.3) { // try to lower memory usage (because it might be wrong) // by forcing OOME @@ -127,19 +128,17 @@ private void testClob() throws SQLException { return; } deleteDb("memoryUsageClob"); - conn = getConnection("memoryUsageClob"); + conn = getConnection("memoryUsageClob;WRITE_DELAY=0"); Statement stat = conn.createStatement(); stat.execute("SET MAX_LENGTH_INPLACE_LOB 8192"); stat.execute("SET CACHE_SIZE 8000"); stat.execute("CREATE TABLE TEST(ID IDENTITY, DATA CLOB)"); - freeSoftReferences(); try { - int base = Utils.getMemoryUsed(); + long base = Utils.getMemoryUsed(); for (int i = 0; i < 4; i++) { stat.execute("INSERT INTO TEST(DATA) " + "SELECT SPACE(8000) FROM SYSTEM_RANGE(1, 800)"); - freeSoftReferences(); - int used = Utils.getMemoryUsed(); + long used = Utils.getMemoryUsed(); if ((used - base) > 3 * 8192) { fail("Used: " + (used - base) + " i: " + i); } @@ -167,20 +166,6 @@ private static void closeConnection(Connection conn) throws SQLException { } } - /** - * Eat memory so that all soft references are garbage collected. - */ - void freeSoftReferences() { - try { - eatMemory(1); - } catch (OutOfMemoryError e) { - // ignore - } - System.gc(); - System.gc(); - freeMemory(); - } - private void testCreateIndex() throws SQLException { if (config.memory) { return; @@ -200,11 +185,11 @@ private void testCreateIndex() throws SQLException { prep.setInt(1, i); prep.executeUpdate(); } - int base = Utils.getMemoryUsed(); + long base = Utils.getMemoryUsed(); stat.execute("create index idx_test_id on test(id)"); for (int i = 0;; i++) { System.gc(); - int used = Utils.getMemoryUsed() - base; + long used = Utils.getMemoryUsed() - base; if (used <= getSize(7500, 12000)) { break; } diff --git a/h2/src/test/org/h2/test/db/TestMergeUsing.java b/h2/src/test/org/h2/test/db/TestMergeUsing.java index 1bd33f27ed..f0328a5a7e 100644 --- a/h2/src/test/org/h2/test/db/TestMergeUsing.java +++ b/h2/src/test/org/h2/test/db/TestMergeUsing.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -30,15 +30,11 @@ public class TestMergeUsing extends TestDb implements Trigger { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public boolean isEnabled() { - // TODO breaks in pagestore case - if (!config.mvStore) { - return false; - } return true; } @@ -102,16 +98,6 @@ public void test() throws Exception { "SELECT X AS ID, 'Marcy'||X||X AS NAME FROM SYSTEM_RANGE(2,2) UNION ALL " + "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(3,3)", 3); - // No updates happen: No insert defined, no update or delete happens due - // to ON condition failing always, target table missing PK - testMergeUsing( - "CREATE TABLE PARENT AS (SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2) );", - "MERGE INTO PARENT AS P USING (" + - "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,3) ) AS S ON (P.ID = S.ID AND 1=0) " + - "WHEN MATCHED THEN " + - "UPDATE SET P.NAME = S.NAME||S.ID WHERE P.ID = 2 DELETE WHERE P.ID = 1", - GATHER_ORDERED_RESULTS_SQL, - "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2)", 0); // One insert, one update one delete happens, target table missing PK testMergeUsing( "CREATE TABLE PARENT AS (SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2) );" + @@ -170,36 +156,6 @@ public void test() throws Exception { "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,3) WHERE X<0", 0, "WHEN\""); - // Two updates to same row - update and delete together - emptying the - // parent table - testMergeUsing( - "CREATE TABLE PARENT AS (SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,1) )", - "MERGE INTO PARENT AS P USING (" + - "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,3) ) AS S ON (P.ID = S.ID) " + - "WHEN MATCHED THEN " + - "UPDATE SET P.NAME = P.NAME||S.ID WHERE P.ID = 1 DELETE WHERE P.ID = 1 AND P.NAME = 'Marcy11'", - GATHER_ORDERED_RESULTS_SQL, - "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,1) WHERE X<0", - 2); - // Duplicate source keys but different ROWID update - so no error - // SQL standard says duplicate or repeated updates of same row in same - // statement should cause errors - but because first row is updated, - // deleted (on source row 1) then inserted (on source row 2) - // it's considered different - with respect to ROWID - so no error - // One insert, one update one delete happens (on same row) , target - // table missing PK, no source or target alias - if (false) // TODO - testMergeUsing( - "CREATE TABLE PARENT AS (SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,1) );" + - "CREATE TABLE SOURCE AS (SELECT 1 AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2) );", - "MERGE INTO PARENT USING SOURCE ON (PARENT.ID = SOURCE.ID) WHEN MATCHED THEN " + - "UPDATE SET PARENT.NAME = SOURCE.NAME||SOURCE.ID WHERE PARENT.ID = 2 " + - "DELETE WHERE PARENT.ID = 1 WHEN NOT MATCHED THEN " + - "INSERT (ID, NAME) VALUES (SOURCE.ID, SOURCE.NAME)", - GATHER_ORDERED_RESULTS_SQL, - "SELECT 1 AS ID, 'Marcy'||X||X UNION ALL SELECT 1 AS ID, 'Marcy2'", - 2); - // One insert, one update one delete happens, target table missing PK, // triggers update all NAME fields triggerTestingUpdateCount = 0; @@ -234,7 +190,7 @@ private void testMergeUsing(String setupSQL, String statementUnderTest, int expectedRowUpdateCount) throws Exception { deleteDb("mergeUsingQueries"); - try (Connection conn = getConnection("mergeUsingQueries")) { + try (Connection conn = getConnection("mergeUsingQueries;MODE=Oracle")) { Statement stat = conn.createStatement(); stat.execute(setupSQL); @@ -311,16 +267,6 @@ public void fire(Connection conn, Object[] oldRow, Object[] newRow) } } - @Override - public void close() { - // ignore - } - - @Override - public void remove() { - // ignore - } - @Override public void init(Connection conn, String schemaName, String trigger, String tableName, boolean before, int type) { diff --git a/h2/src/test/org/h2/test/db/TestMultiConn.java b/h2/src/test/org/h2/test/db/TestMultiConn.java index ed005ac9b0..891042cc72 100644 --- a/h2/src/test/org/h2/test/db/TestMultiConn.java +++ b/h2/src/test/org/h2/test/db/TestMultiConn.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -30,7 +30,7 @@ public class TestMultiConn extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -46,7 +46,7 @@ private void testConcurrentShutdownQuery() throws Exception { Connection conn1 = getConnection("multiConn"); Connection conn2 = getConnection("multiConn"); final Statement stat1 = conn1.createStatement(); - stat1.execute("CREATE ALIAS SLEEP FOR \"java.lang.Thread.sleep(long)\""); + stat1.execute("CREATE ALIAS SLEEP FOR 'java.lang.Thread.sleep(long)'"); final Statement stat2 = conn2.createStatement(); stat1.execute("SET THROTTLE 100"); Task t = new Task() { @@ -75,15 +75,15 @@ public void call() throws Exception { private void testThreeThreads() throws Exception { deleteDb("multiConn"); - final Connection conn1 = getConnection("multiConn"); - final Connection conn2 = getConnection("multiConn"); - final Connection conn3 = getConnection("multiConn"); + Connection conn1 = getConnection("multiConn"); + Connection conn2 = getConnection("multiConn"); + Connection conn3 = getConnection("multiConn"); conn1.setAutoCommit(false); conn2.setAutoCommit(false); conn3.setAutoCommit(false); - final Statement s1 = conn1.createStatement(); - final Statement s2 = conn2.createStatement(); - final Statement s3 = conn3.createStatement(); + Statement s1 = conn1.createStatement(); + Statement s2 = conn2.createStatement(); + Statement s3 = conn3.createStatement(); s1.execute("CREATE TABLE TEST1(ID INT)"); s2.execute("CREATE TABLE TEST2(ID INT)"); s3.execute("CREATE TABLE TEST3(ID INT)"); @@ -93,28 +93,22 @@ private void testThreeThreads() throws Exception { s1.execute("SET LOCK_TIMEOUT 1000"); s2.execute("SET LOCK_TIMEOUT 1000"); s3.execute("SET LOCK_TIMEOUT 1000"); - Thread t1 = new Thread(new Runnable() { - @Override - public void run() { - try { - s3.execute("INSERT INTO TEST2 VALUES(4)"); - conn3.commit(); - } catch (SQLException e) { - TestBase.logError("insert", e); - } + Thread t1 = new Thread(() -> { + try { + s3.execute("INSERT INTO TEST2 VALUES(4)"); + conn3.commit(); + } catch (SQLException e) { + TestBase.logError("insert", e); } }); t1.start(); Thread.sleep(20); - Thread t2 = new Thread(new Runnable() { - @Override - public void run() { - try { - s2.execute("INSERT INTO TEST1 VALUES(5)"); - conn2.commit(); - } catch (SQLException e) { - TestBase.logError("insert", e); - } + Thread t2 = new Thread(() -> { + try { + s2.execute("INSERT INTO TEST1 VALUES(5)"); + conn2.commit(); + } catch (SQLException e) { + TestBase.logError("insert", e); } }); t2.start(); @@ -146,16 +140,13 @@ private void testConcurrentOpen() throws Exception { conn.createStatement().execute("SHUTDOWN"); conn.close(); final String listener = MyDatabaseEventListener.class.getName(); - Runnable r = new Runnable() { - @Override - public void run() { - try { - Connection c1 = getConnection("multiConn;DATABASE_EVENT_LISTENER='" + listener - + "';file_lock=socket"); - c1.close(); - } catch (Exception e) { - TestBase.logError("connect", e); - } + Runnable r = () -> { + try { + Connection c1 = getConnection("multiConn;DATABASE_EVENT_LISTENER='" + listener + + "';file_lock=socket"); + c1.close(); + } catch (Exception e) { + TestBase.logError("connect", e); } }; Thread thread = new Thread(r); @@ -208,16 +199,10 @@ private void testCommitRollback() throws SQLException { /** * A database event listener used in this test. */ - public static final class MyDatabaseEventListener implements - DatabaseEventListener { + public static final class MyDatabaseEventListener implements DatabaseEventListener { @Override - public void exceptionThrown(SQLException e, String sql) { - // do nothing - } - - @Override - public void setProgress(int state, String name, int x, int max) { + public void setProgress(int state, String name, long x, long max) { if (wait > 0) { try { Thread.sleep(wait); @@ -227,20 +212,6 @@ public void setProgress(int state, String name, int x, int max) { } } - @Override - public void closingDatabase() { - // do nothing - } - - @Override - public void init(String url) { - // do nothing - } - - @Override - public void opened() { - // do nothing - } } } diff --git a/h2/src/test/org/h2/test/db/TestMultiDimension.java b/h2/src/test/org/h2/test/db/TestMultiDimension.java index 5ee3ae6f7c..afd99bde92 100644 --- a/h2/src/test/org/h2/test/db/TestMultiDimension.java +++ b/h2/src/test/org/h2/test/db/TestMultiDimension.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -30,7 +30,7 @@ public class TestMultiDimension extends TestDb { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); test.config.traceTest = true; - test.test(); + test.testFromMain(); } @Override @@ -80,16 +80,15 @@ private void testHelperMethods() { assertEquals(y, tool.deinterleave(3, xyz, 1)); assertEquals(z, tool.deinterleave(3, xyz, 2)); } - createClassProxy(MultiDimension.class); - assertThrows(IllegalArgumentException.class, m).getMaxValue(1); - assertThrows(IllegalArgumentException.class, m).getMaxValue(33); - assertThrows(IllegalArgumentException.class, m).normalize(2, 10, 11, 12); - assertThrows(IllegalArgumentException.class, m).normalize(2, 5, 10, 0); - assertThrows(IllegalArgumentException.class, m).normalize(2, 10, 0, 9); - assertThrows(IllegalArgumentException.class, m).interleave(-1, 5); - assertThrows(IllegalArgumentException.class, m).interleave(5, -1); - assertThrows(IllegalArgumentException.class, m). - interleave(Integer.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE); + assertThrows(IllegalArgumentException.class, () -> m.getMaxValue(1)); + assertThrows(IllegalArgumentException.class, () -> m.getMaxValue(33)); + assertThrows(IllegalArgumentException.class, () -> m.normalize(2, 10, 11, 12)); + assertThrows(IllegalArgumentException.class, () -> m.normalize(2, 5, 10, 0)); + assertThrows(IllegalArgumentException.class, () -> m.normalize(2, 10, 0, 9)); + assertThrows(IllegalArgumentException.class, () -> m.interleave(-1, 5)); + assertThrows(IllegalArgumentException.class, () -> m.interleave(5, -1)); + assertThrows(IllegalArgumentException.class, + () -> m.interleave(Integer.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE)); } private void testPerformance2d() throws SQLException { @@ -97,8 +96,7 @@ private void testPerformance2d() throws SQLException { Connection conn; conn = getConnection("multiDimension"); Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS MAP FOR \"" + - getClass().getName() + ".interleave\""); + stat.execute("CREATE ALIAS MAP FOR '" + getClass().getName() + ".interleave'"); stat.execute("CREATE TABLE TEST(X INT NOT NULL, Y INT NOT NULL, " + "XY BIGINT AS MAP(X, Y), DATA VARCHAR)"); stat.execute("CREATE INDEX IDX_X ON TEST(X, Y)"); @@ -170,8 +168,7 @@ private void testPerformance3d() throws SQLException { Connection conn; conn = getConnection("multiDimension"); Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS MAP FOR \"" + - getClass().getName() + ".interleave\""); + stat.execute("CREATE ALIAS MAP FOR '" + getClass().getName() + ".interleave'"); stat.execute("CREATE TABLE TEST(X INT NOT NULL, " + "Y INT NOT NULL, Z INT NOT NULL, " + "XYZ BIGINT AS MAP(X, Y, Z), DATA VARCHAR)"); diff --git a/h2/src/test/org/h2/test/db/TestMultiThread.java b/h2/src/test/org/h2/test/db/TestMultiThread.java index 43f3c86dac..ea6f060686 100644 --- a/h2/src/test/org/h2/test/db/TestMultiThread.java +++ b/h2/src/test/org/h2/test/db/TestMultiThread.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -8,7 +8,6 @@ import java.io.StringReader; import java.math.BigDecimal; import java.sql.Connection; -import java.sql.DatabaseMetaData; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; @@ -21,6 +20,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import org.h2.api.ErrorCode; import org.h2.test.TestAll; import org.h2.test.TestBase; @@ -53,37 +53,26 @@ private TestMultiThread(TestAll config, TestMultiThread parent) { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public boolean isEnabled() { - // pagestore and multithreaded was always experimental, we're not going to fix that - if (!config.mvStore) { - return false; - } - return true; + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { testConcurrentSchemaChange(); testConcurrentLobAdd(); - testConcurrentView(); testConcurrentAlter(); - testConcurrentAnalyze(); testConcurrentInsertUpdateSelect(); - testLockModeWithMultiThreaded(); testViews(); testConcurrentInsert(); testConcurrentUpdate(); testConcurrentUpdate2(); + testCheckConstraint(); } private void testConcurrentSchemaChange() throws Exception { String db = getTestName(); deleteDb(db); - final String url = getURL(db + ";MULTI_THREADED=1;LOCK_TIMEOUT=10000", true); + final String url = getURL(db + ";LOCK_TIMEOUT=10000", true); try (Connection conn = getConnection(url)) { Task[] tasks = new Task[2]; for (int i = 0; i < tasks.length; i++) { @@ -114,7 +103,7 @@ public void call() throws Exception { private void testConcurrentLobAdd() throws Exception { String db = getTestName(); deleteDb(db); - final String url = getURL(db + ";MULTI_THREADED=1", true); + final String url = getURL(db, true); try (Connection conn = getConnection(url)) { Statement stat = conn.createStatement(); stat.execute("create table test(id identity, data clob)"); @@ -144,46 +133,6 @@ public void call() throws Exception { } } - private void testConcurrentView() throws Exception { - if (config.mvStore) { - return; - } - String db = getTestName(); - deleteDb(db); - final String url = getURL(db + ";MULTI_THREADED=1", true); - final Random r = new Random(); - try (Connection conn = getConnection(url)) { - Statement stat = conn.createStatement(); - StringBuilder buff = new StringBuilder(); - buff.append("create table test(id int"); - final int len = 3; - for (int i = 0; i < len; i++) { - buff.append(", x" + i + " int"); - } - buff.append(")"); - stat.execute(buff.toString()); - stat.execute("create view test_view as select * from test"); - stat.execute("insert into test(id) select x from system_range(1, 2)"); - Task t = new Task() { - @Override - public void call() throws Exception { - Connection c2 = getConnection(url); - while (!stop) { - c2.prepareStatement("select * from test_view where x" + - r.nextInt(len) + "=1"); - } - c2.close(); - } - }; - t.execute(); - for (int i = 0; i < 1000; i++) { - conn.prepareStatement("select * from test_view where x" + - r.nextInt(len) + "=1"); - } - t.get(); - } - } - private void testConcurrentAlter() throws Exception { deleteDb(getTestName()); try (final Connection conn = getConnection(getTestName())) { @@ -206,36 +155,6 @@ public void call() throws Exception { } } - private void testConcurrentAnalyze() throws Exception { - if (config.mvStore) { - return; - } - deleteDb(getTestName()); - final String url = getURL("concurrentAnalyze;MULTI_THREADED=1", true); - try (Connection conn = getConnection(url)) { - Statement stat = conn.createStatement(); - stat.execute("create table test(id bigint primary key) " + - "as select x from system_range(1, 1000)"); - Task t = new Task() { - @Override - public void call() throws SQLException { - try (Connection conn2 = getConnection(url)) { - for (int i = 0; i < 1000; i++) { - conn2.createStatement().execute("analyze"); - } - } - } - }; - t.execute(); - Thread.yield(); - for (int i = 0; i < 1000; i++) { - conn.createStatement().execute("analyze"); - } - t.get(); - stat.execute("drop table test"); - } - } - private void testConcurrentInsertUpdateSelect() throws Exception { try (Connection conn = getConnection()) { Statement stmt = conn.createStatement(); @@ -270,7 +189,7 @@ public void run() { Statement stmt = conn.createStatement(); while (!parent.stop) { stmt.execute("SELECT COUNT(*) FROM TEST"); - stmt.execute("INSERT INTO TEST VALUES(NULL, 'Hi')"); + stmt.execute("INSERT INTO TEST(NAME) VALUES('Hi')"); PreparedStatement prep = conn.prepareStatement( "UPDATE TEST SET NAME='Hello' WHERE ID=?"); prep.setInt(1, random.nextInt(10000)); @@ -287,23 +206,10 @@ public void run() { } } - private void testLockModeWithMultiThreaded() throws Exception { - deleteDb("lockMode"); - final String url = getURL("lockMode;MULTI_THREADED=1", true); - try (Connection conn = getConnection(url)) { - DatabaseMetaData meta = conn.getMetaData(); - // LOCK_MODE=0 with MULTI_THREADED=TRUE is supported only by MVStore - assertEquals(config.mvStore, meta.supportsTransactionIsolationLevel( - Connection.TRANSACTION_READ_UNCOMMITTED)); - } - deleteDb("lockMode"); - } - private void testViews() throws Exception { - // currently the combination of LOCK_MODE=0 and MULTI_THREADED // is not supported deleteDb("lockMode"); - final String url = getURL("lockMode;MULTI_THREADED=1", true); + String url = getURL("lockMode", true); // create some common tables and views ExecutorService executor = Executors.newFixedThreadPool(8); @@ -326,37 +232,34 @@ private void testViews() throws Exception { ArrayList> jobs = new ArrayList<>(); for (int i = 0; i < 1000; i++) { final int j = i; - jobs.add(executor.submit(new Callable() { - @Override - public Void call() throws Exception { - try (Connection conn2 = getConnection(url)) { - Statement stat2 = conn2.createStatement(); - - stat2.execute("CREATE VIEW INVOICE_VIEW" + j - + " as SELECT * FROM INVOICE_VIEW"); - - // the following query intermittently results in a - // NullPointerException - stat2.execute("CREATE VIEW INVOICE_DETAIL_VIEW" + j - + " as SELECT DTL.* FROM INVOICE_VIEW" + j - + " INV JOIN INVOICE_DETAIL_VIEW DTL " - + "ON INV.INVOICE_ID = DTL.INVOICE_ID" - + " WHERE DESCRIPTION='TEST'"); - - ResultSet rs = stat2 - .executeQuery("SELECT * FROM INVOICE_VIEW" + j); - rs.next(); - rs.close(); - - rs = stat2.executeQuery( - "SELECT * FROM INVOICE_DETAIL_VIEW" + j); - rs.next(); - rs.close(); - - stat2.close(); - } - return null; + jobs.add(executor.submit(() -> { + try (Connection conn2 = getConnection(url)) { + Statement stat2 = conn2.createStatement(); + + stat2.execute("CREATE VIEW INVOICE_VIEW" + j + + " as SELECT * FROM INVOICE_VIEW"); + + // the following query intermittently results in a + // NullPointerException + stat2.execute("CREATE VIEW INVOICE_DETAIL_VIEW" + j + + " as SELECT DTL.* FROM INVOICE_VIEW" + j + + " INV JOIN INVOICE_DETAIL_VIEW DTL " + + "ON INV.INVOICE_ID = DTL.INVOICE_ID" + + " WHERE DESCRIPTION='TEST'"); + + ResultSet rs = stat2 + .executeQuery("SELECT * FROM INVOICE_VIEW" + j); + rs.next(); + rs.close(); + + rs = stat2.executeQuery( + "SELECT * FROM INVOICE_DETAIL_VIEW" + j); + rs.next(); + rs.close(); + + stat2.close(); } + return null; })); } // check for exceptions @@ -385,7 +288,7 @@ public Void call() throws Exception { private void testConcurrentInsert() throws Exception { deleteDb("lockMode"); - final String url = getURL("lockMode;MULTI_THREADED=1;LOCK_TIMEOUT=10000", true); + final String url = getURL("lockMode;LOCK_TIMEOUT=10000", true); int threadCount = 25; ExecutorService executor = Executors.newFixedThreadPool(threadCount); Connection conn = getConnection(url); @@ -396,23 +299,20 @@ private void testConcurrentInsert() throws Exception { final ArrayList> callables = new ArrayList<>(); for (int i = 0; i < threadCount; i++) { final long initialTransactionId = i * 1000000L; - callables.add(new Callable() { - @Override - public Void call() throws Exception { - try (Connection taskConn = getConnection(url)) { - taskConn.setAutoCommit(false); - PreparedStatement insertTranStmt = taskConn - .prepareStatement("INSERT INTO tran (id) VALUES(?)"); - // to guarantee uniqueness - long tranId = initialTransactionId; - for (int j = 0; j < 1000; j++) { - insertTranStmt.setLong(1, tranId++); - insertTranStmt.execute(); - taskConn.commit(); - } + callables.add(() -> { + try (Connection taskConn = getConnection(url)) { + taskConn.setAutoCommit(false); + PreparedStatement insertTranStmt = taskConn + .prepareStatement("INSERT INTO tran (id) VALUES(?)"); + // to guarantee uniqueness + long tranId = initialTransactionId; + for (int j = 0; j < 1000; j++) { + insertTranStmt.setLong(1, tranId++); + insertTranStmt.execute(); + taskConn.commit(); } - return null; } + return null; }); } @@ -437,7 +337,7 @@ private void testConcurrentUpdate() throws Exception { deleteDb("lockMode"); final int objectCount = 10000; - final String url = getURL("lockMode;MULTI_THREADED=1;LOCK_TIMEOUT=10000", true); + final String url = getURL("lockMode;LOCK_TIMEOUT=10000", true); int threadCount = 25; ExecutorService executor = Executors.newFixedThreadPool(threadCount); Connection conn = getConnection(url); @@ -455,22 +355,19 @@ private void testConcurrentUpdate() throws Exception { final ArrayList> callables = new ArrayList<>(); for (int i = 0; i < threadCount; i++) { - callables.add(new Callable() { - @Override - public Void call() throws Exception { - try (Connection taskConn = getConnection(url)) { - taskConn.setAutoCommit(false); - final PreparedStatement updateAcctStmt = taskConn - .prepareStatement("UPDATE account SET balance = ? WHERE id = ?"); - for (int j = 0; j < 1000; j++) { - updateAcctStmt.setDouble(1, Math.random()); - updateAcctStmt.setLong(2, (int) (Math.random() * objectCount)); - updateAcctStmt.execute(); - taskConn.commit(); - } + callables.add(() -> { + try (Connection taskConn = getConnection(url)) { + taskConn.setAutoCommit(false); + final PreparedStatement updateAcctStmt = taskConn + .prepareStatement("UPDATE account SET balance = ? WHERE id = ?"); + for (int j = 0; j < 1000; j++) { + updateAcctStmt.setDouble(1, Math.random()); + updateAcctStmt.setLong(2, (int) (Math.random() * objectCount)); + updateAcctStmt.execute(); + taskConn.commit(); } - return null; } + return null; }); } @@ -551,4 +448,57 @@ private void testConcurrentUpdate2() throws Exception { deleteDb("concurrentUpdate2"); } } + + private void testCheckConstraint() throws Exception { + deleteDb("checkConstraint"); + try (Connection c = getConnection("checkConstraint")) { + Statement s = c.createStatement(); + s.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, A INT, B INT)"); + PreparedStatement ps = c.prepareStatement("INSERT INTO TEST VALUES (?, ?, ?)"); + s.execute("ALTER TABLE TEST ADD CONSTRAINT CHECK_A_B CHECK A = B"); + final int numRows = 10; + for (int i = 0; i < numRows; i++) { + ps.setInt(1, i); + ps.setInt(2, 0); + ps.setInt(3, 0); + ps.executeUpdate(); + } + int numThreads = 4; + Thread[] threads = new Thread[numThreads]; + final AtomicBoolean error = new AtomicBoolean(); + for (int i = 0; i < numThreads; i++) { + threads[i] = new Thread() { + @Override + public void run() { + try (Connection c = getConnection("checkConstraint")) { + PreparedStatement ps = c.prepareStatement("UPDATE TEST SET A = ?, B = ? WHERE ID = ?"); + Random r = new Random(); + for (int i = 0; i < 1_000; i++) { + int v = r.nextInt(1_000); + ps.setInt(1, v); + ps.setInt(2, v); + ps.setInt(3, r.nextInt(numRows)); + ps.executeUpdate(); + } + } catch (SQLException e) { + error.set(true); + synchronized (TestMultiThread.this) { + logError("Error in CHECK constraint", e); + } + } + } + }; + } + for (int i = 0; i < numThreads; i++) { + threads[i].start(); + } + for (int i = 0; i < numThreads; i++) { + threads[i].join(); + } + assertFalse(error.get()); + } finally { + deleteDb("checkConstraint"); + } + } + } diff --git a/h2/src/test/org/h2/test/db/TestMultiThreadedKernel.java b/h2/src/test/org/h2/test/db/TestMultiThreadedKernel.java index 3dce6450fe..b700b2f8b0 100644 --- a/h2/src/test/org/h2/test/db/TestMultiThreadedKernel.java +++ b/h2/src/test/org/h2/test/db/TestMultiThreadedKernel.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -39,7 +39,7 @@ public class TestMultiThreadedKernel extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -178,6 +178,6 @@ public void call() throws SQLException { @Override protected String getURL(String name, boolean admin) { - return super.getURL(name + ";MULTI_THREADED=1;LOCK_TIMEOUT=2000", admin); + return super.getURL(name + ";LOCK_TIMEOUT=2000", admin); } } diff --git a/h2/src/test/org/h2/test/db/TestOpenClose.java b/h2/src/test/org/h2/test/db/TestOpenClose.java index 6cfc3906ce..3a58f0d599 100644 --- a/h2/src/test/org/h2/test/db/TestOpenClose.java +++ b/h2/src/test/org/h2/test/db/TestOpenClose.java @@ -1,12 +1,16 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; +import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.sql.Connection; import java.sql.DriverManager; import java.sql.PreparedStatement; @@ -37,7 +41,7 @@ public class TestOpenClose extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -48,6 +52,7 @@ public void test() throws Exception { testBackup(); testCase(); testReconnectFast(); + test1_1(); deleteDb("openClose"); } @@ -58,8 +63,8 @@ private void testErrorMessageLocked() throws Exception { deleteDb("openClose"); Connection conn; conn = getConnection("jdbc:h2:" + getBaseDir() + "/openClose;FILE_LOCK=FS"); - assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, this).getConnection( - "jdbc:h2:" + getBaseDir() + "/openClose;FILE_LOCK=FS;OPEN_NEW=TRUE"); + assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, + () -> getConnection("jdbc:h2:" + getBaseDir() + "/openClose;FILE_LOCK=FS;OPEN_NEW=TRUE")); conn.close(); } @@ -67,16 +72,10 @@ private void testErrorMessageWrongSplit() throws Exception { if (config.memory || config.reopen) { return; } - String fn = getBaseDir() + "/openClose2"; - if (config.mvStore) { - fn += Constants.SUFFIX_MV_FILE; - } else { - fn += Constants.SUFFIX_PAGE_FILE; - } + String fn = getBaseDir() + "/openClose2" + Constants.SUFFIX_MV_FILE; FileUtils.delete("split:" + fn); Connection conn; - String url = "jdbc:h2:split:18:" + getBaseDir() + "/openClose2"; - url = getURL(url, true); + String url = getURL("jdbc:h2:split:18:" + getBaseDir() + "/openClose2", true); conn = DriverManager.getConnection(url); conn.createStatement().execute("create table test(id int, name varchar) " + "as select 1, space(1000000)"); @@ -85,11 +84,7 @@ private void testErrorMessageWrongSplit() throws Exception { c.position(c.size() * 2 - 1); c.write(ByteBuffer.wrap(new byte[1])); c.close(); - if (config.mvStore) { - assertThrows(ErrorCode.IO_EXCEPTION_1, this).getConnection(url); - } else { - assertThrows(ErrorCode.IO_EXCEPTION_2, this).getConnection(url); - } + assertThrows(ErrorCode.IO_EXCEPTION_1, () -> getConnection(url)); FileUtils.delete("split:" + fn); } @@ -155,7 +150,7 @@ private void testReconnectFast() throws SQLException { conn.close(); conn = DriverManager.getConnection(url, user, password); stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("SELECT * FROM DUAL"); + ResultSet rs = stat.executeQuery("SELECT * FROM SYSTEM_RANGE(1, 1)"); if (rs.next()) { rs.getString(1); } @@ -223,11 +218,22 @@ synchronized int getNextId() { return nextId++; } + private void test1_1() throws IOException { + Path old = Paths.get(getBaseDir()).resolve("db" + Constants.SUFFIX_OLD_DATABASE_FILE); + Files.createFile(old); + try { + assertThrows(ErrorCode.FILE_VERSION_ERROR_1, + () -> DriverManager.getConnection("jdbc:h2:" + getBaseDir() + "/db")); + } finally { + Files.deleteIfExists(old); + } + } + + /** * A database event listener used in this test. */ - public static final class MyDatabaseEventListener implements - DatabaseEventListener { + public static final class MyDatabaseEventListener implements DatabaseEventListener { @Override public void exceptionThrown(SQLException e, String sql) { @@ -235,7 +241,7 @@ public void exceptionThrown(SQLException e, String sql) { } @Override - public void setProgress(int state, String name, int current, int max) { + public void setProgress(int state, String name, long current, long max) { String stateName; switch (state) { case STATE_SCAN_FILE: @@ -261,20 +267,6 @@ public void setProgress(int state, String name, int current, int max) { // System.out.println(": " + stateName); } - @Override - public void closingDatabase() { - // nothing to do - } - - @Override - public void init(String url) { - // nothing to do - } - - @Override - public void opened() { - // nothing to do - } } } diff --git a/h2/src/test/org/h2/test/db/TestOptimizations.java b/h2/src/test/org/h2/test/db/TestOptimizations.java index 073771ab70..2395824362 100644 --- a/h2/src/test/org/h2/test/db/TestOptimizations.java +++ b/h2/src/test/org/h2/test/db/TestOptimizations.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -20,7 +20,6 @@ import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.tools.SimpleResultSet; -import org.h2.util.StringUtils; import org.h2.util.Task; /** @@ -35,12 +34,13 @@ public class TestOptimizations extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { deleteDb("optimizations"); + testConditionsStackOverflow(); testIdentityIndexUsage(); testFastRowIdCondition(); testExplainRoundTrip(); @@ -114,8 +114,8 @@ private void testFastRowIdCondition() throws Exception { private void testExplainRoundTrip() throws Exception { Connection conn = getConnection("optimizations"); - assertExplainRoundTrip(conn, - "SELECT \"X\" FROM DUAL WHERE \"X\" > ANY(SELECT \"X\" FROM DUAL)"); + assertExplainRoundTrip(conn, "SELECT \"X\" FROM SYSTEM_RANGE(1, 1)" + + " WHERE \"X\" > ANY(SELECT DISTINCT \"X\" FROM SYSTEM_RANGE(1, 1))"); conn.close(); } @@ -128,7 +128,6 @@ private void assertExplainRoundTrip(Connection conn, String sql) plan = plan.replaceAll("\\s+", " "); plan = plan.replaceAll("/\\*[^\\*]*\\*/", ""); plan = plan.replaceAll("\\s+", " "); - plan = StringUtils.replaceAll(plan, "SYSTEM_RANGE(1, 1)", "DUAL"); plan = plan.replaceAll("\\( ", "\\("); plan = plan.replaceAll(" \\)", "\\)"); assertEquals(sql, plan); @@ -174,7 +173,7 @@ private void testGroupSubquery() throws Exception { private void testAnalyzeLob() throws Exception { Connection conn = getConnection("optimizations"); Statement stat = conn.createStatement(); - stat.execute("create table test(v varchar, b binary, cl clob, bl blob) as " + + stat.execute("create table test(v varchar, b varbinary, cl clob, bl blob) as " + "select ' ', '00', ' ', '00' from system_range(1, 100)"); stat.execute("analyze"); ResultSet rs = stat.executeQuery("select column_name, selectivity " + @@ -288,7 +287,8 @@ private void testRowId() throws SQLException { stat.execute("insert into test(data) values('World')"); stat.execute("insert into test(_rowid_, data) values(20, 'Hello')"); stat.execute( - "merge into test(_rowid_, data) key(_rowid_) values(20, 'Hallo')"); + "merge into test using (values(20, 'Hallo')) s(id, data) on test._rowid_ = s.id" + + " when matched then update set data = s.data"); rs = stat.executeQuery( "select _rowid_, data from test order by _rowid_"); rs.next(); @@ -362,8 +362,8 @@ private void testAutoAnalyze() throws SQLException { deleteDb("optimizations"); Connection conn = getConnection("optimizations"); Statement stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("select value " + - "from information_schema.settings where name='analyzeAuto'"); + ResultSet rs = stat.executeQuery( + "SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'analyzeAuto'"); int auto = rs.next() ? rs.getInt(1) : 0; if (auto != 0) { stat.execute("create table test(id int)"); @@ -437,7 +437,7 @@ private void testConstantIn1() throws SQLException { stat.execute("create table test(id int primary key, name varchar(255))"); stat.execute("insert into test values(1, 'Hello'), (2, 'World')"); assertSingleValue(stat, - "select count(*) from test where name in ('Hello', 'World', 1)", 2); + "select count(*) from test where name in ('Hello', 'World', '1')", 2); assertSingleValue(stat, "select count(*) from test where name in ('Hello', 'World')", 2); assertSingleValue(stat, @@ -533,12 +533,12 @@ private void testNestedInSelectAndLike() throws SQLException { assertFalse(rs.next()); PreparedStatement prep; - prep = conn.prepareStatement("SELECT * FROM DUAL A " + - "WHERE A.X IN (SELECT B.X FROM DUAL B WHERE B.X LIKE ?)"); + prep = conn.prepareStatement("SELECT * FROM SYSTEM_RANGE(1, 1) A " + + "WHERE A.X IN (SELECT B.X FROM SYSTEM_RANGE(1, 1) B WHERE B.X LIKE ?)"); prep.setString(1, "1"); prep.execute(); - prep = conn.prepareStatement("SELECT * FROM DUAL A " + - "WHERE A.X IN (SELECT B.X FROM DUAL B WHERE B.X IN (?, ?))"); + prep = conn.prepareStatement("SELECT * FROM SYSTEM_RANGE(1, 1) A " + + "WHERE A.X IN (SELECT B.X FROM SYSTEM_RANGE(1, 1) B WHERE B.X IN (?, ?))"); prep.setInt(1, 1); prep.setInt(2, 1); prep.executeQuery(); @@ -580,9 +580,7 @@ private void testOptimizeInJoinSelect() throws SQLException { Statement stat = conn.createStatement(); stat.execute("create table item(id int primary key)"); stat.execute("insert into item values(1)"); - stat.execute("create alias opt for \"" + - getClass().getName() + - ".optimizeInJoinSelect\""); + stat.execute("create alias opt for '" + getClass().getName() + ".optimizeInJoinSelect'"); PreparedStatement prep = conn.prepareStatement( "select * from item where id in (select x from opt())"); ResultSet rs = prep.executeQuery(); @@ -661,10 +659,6 @@ private void testMinMaxNullOptimization() throws SQLException { ResultSet rs = stat.executeQuery( "explain select min(x), max(x) from test"); rs.next(); - if (!config.mvStore) { - String plan = rs.getString(1); - assertContains(plan, "direct"); - } rs = stat.executeQuery("select min(x), max(x) from test"); rs.next(); int min = rs.getInt(1); @@ -767,17 +761,6 @@ private void testDistinctOptimization() throws SQLException { assertEquals(i, rs.getInt(1)); } assertFalse(rs.next()); - rs = stat.executeQuery("SELECT DISTINCT TYPE FROM TEST " + - "ORDER BY TYPE LIMIT -1 OFFSET 0 SAMPLE_SIZE 3"); - // must have at least one row - assertTrue(rs.next()); - for (int i = 0; i < 3; i++) { - rs.getInt(1); - if (i > 0 && !rs.next()) { - break; - } - } - assertFalse(rs.next()); conn.close(); } @@ -868,8 +851,8 @@ private void testMinMaxCountOptimization(boolean memory) Connection conn = getConnection("optimizations"); Statement stat = conn.createStatement(); stat.execute("create " + (memory ? "memory" : "") + - " table test(id int primary key, value int)"); - stat.execute("create index idx_value_id on test(value, id);"); + " table test(id int primary key, v int)"); + stat.execute("create index idx_v_id on test(v, id);"); int len = getSize(1000, 10000); HashMap map = new HashMap<>(); TreeSet set = new TreeSet<>(); @@ -924,7 +907,7 @@ private void testMinMaxCountOptimization(boolean memory) max = set.last(); } ResultSet rs = stat.executeQuery( - "select min(value), max(value), count(*) from test"); + "select min(v), max(v), count(*) from test"); rs.next(); Integer minDb = (Integer) rs.getObject(1); Integer maxDb = (Integer) rs.getObject(2); @@ -956,9 +939,9 @@ private void testIn() throws SQLException { assertFalse(stat.executeQuery("select * from dual " + "where null in(null, 1)").next()); - assertFalse(stat.executeQuery("select * from dual " + + assertFalse(stat.executeQuery("select * from system_range(1, 1) " + "where 1+x in(3, 4)").next()); - assertFalse(stat.executeQuery("select * from dual d1, dual d2 " + + assertFalse(stat.executeQuery("select * from system_range(1, 1) d1, dual d2 " + "where d1.x in(3, 4)").next()); stat.execute("create table test(id int primary key, name varchar)"); @@ -1151,20 +1134,20 @@ private void testUseCoveringIndex() throws SQLException { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TABLE_A(id IDENTITY PRIMARY KEY NOT NULL, " + "name VARCHAR NOT NULL, active BOOLEAN DEFAULT TRUE, " + - "UNIQUE KEY TABLE_A_UK (name) )"); + "CONSTRAINT TABLE_A_UK UNIQUE (name) )"); stat.execute("CREATE TABLE TABLE_B(id IDENTITY PRIMARY KEY NOT NULL, " + - "TABLE_a_id BIGINT NOT NULL, createDate TIMESTAMP DEFAULT NOW(), " + - "UNIQUE KEY TABLE_B_UK (table_a_id, createDate), " + - "FOREIGN KEY (table_a_id) REFERENCES TABLE_A(id) )"); + "TABLE_a_id BIGINT NOT NULL, createDate TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, " + + "CONSTRAINT TABLE_B_UK UNIQUE (table_a_id, createDate))"); + stat.execute("CREATE INDEX TABLE_B_IDX ON TABLE_B(TABLE_A_ID)"); + stat.execute("ALTER TABLE TABLE_B ADD FOREIGN KEY (table_a_id) REFERENCES TABLE_A(id)"); stat.execute("INSERT INTO TABLE_A (name) SELECT 'package_' || CAST(X as VARCHAR) " + "FROM SYSTEM_RANGE(1, 100) WHERE X <= 100"); int count = config.memory ? 30_000 : 50_000; stat.execute("INSERT INTO TABLE_B (table_a_id, createDate) SELECT " + "CASE WHEN table_a_id = 0 THEN 1 ELSE table_a_id END, createDate " + "FROM ( SELECT ROUND((RAND() * 100)) AS table_a_id, " + - "DATEADD('SECOND', X, NOW()) as createDate FROM SYSTEM_RANGE(1, " + count + ") " + + "DATEADD('SECOND', X, CURRENT_TIMESTAMP) as createDate FROM SYSTEM_RANGE(1, " + count + ") " + "WHERE X < " + count + " )"); - stat.execute("CREATE INDEX table_b_idx ON table_b(table_a_id, id)"); stat.execute("ANALYZE"); ResultSet rs = stat.executeQuery("EXPLAIN ANALYZE SELECT MAX(b.id) as id " + @@ -1184,11 +1167,11 @@ private void testConditionAndOrDistributiveLaw() throws SQLException { Connection conn = getConnection("optimizations"); Statement stat = conn.createStatement(); stat.execute("CREATE TABLE IF NOT EXISTS TABLE_A (" + - "id int(10) NOT NULL AUTO_INCREMENT, " + + "id int NOT NULL AUTO_INCREMENT, " + "name VARCHAR(30) NOT NULL," + "occupation VARCHAR(20)," + - "age int(10)," + - "salary int(10)," + + "age int," + + "salary int," + "PRIMARY KEY(id))"); stat.execute("INSERT INTO TABLE_A (name,occupation,age,salary) VALUES" + "('mark', 'doctor',25,5000)," + @@ -1204,4 +1187,18 @@ private void testConditionAndOrDistributiveLaw() throws SQLException { assertTrue("engineer".equals(rs.getString("occupation"))); conn.close(); } + + private void testConditionsStackOverflow() throws SQLException { + deleteDb("optimizations"); + Connection conn = getConnection("optimizations"); + Statement stat = conn.createStatement(); + StringBuilder b = new StringBuilder("SELECT 1"); + for (int i=0; i<10000; i++) { + b.append(" AND 1"); + } + ResultSet rs = stat.executeQuery(b.toString()); + rs.next(); + assertTrue(rs.getBoolean(1)); + conn.close(); + } } diff --git a/h2/src/test/org/h2/test/db/TestOptimizerHints.java b/h2/src/test/org/h2/test/db/TestOptimizerHints.java deleted file mode 100644 index 046f9e0822..0000000000 --- a/h2/src/test/org/h2/test/db/TestOptimizerHints.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.db; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.Arrays; -import org.h2.test.TestBase; -import org.h2.test.TestDb; - -/** - * Test for optimizer hint SET FORCE_JOIN_ORDER. - * - * @author Sergi Vladykin - */ -public class TestOptimizerHints extends TestDb { - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String[] a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws Exception { - deleteDb("testOptimizerHints"); - Connection conn = getConnection("testOptimizerHints;FORCE_JOIN_ORDER=1"); - Statement s = conn.createStatement(); - - s.execute("create table t1(id int unique)"); - s.execute("create table t2(id int unique, t1_id int)"); - s.execute("create table t3(id int unique)"); - s.execute("create table t4(id int unique, t2_id int, t3_id int)"); - - String plan; - - plan = plan(s, "select * from t1, t2 where t1.id = t2.t1_id"); - assertContains(plan, "INNER JOIN \"PUBLIC\".\"T2\""); - - plan = plan(s, "select * from t2, t1 where t1.id = t2.t1_id"); - assertContains(plan, "INNER JOIN \"PUBLIC\".\"T1\""); - - plan = plan(s, "select * from t2, t1 where t1.id = 1"); - assertContains(plan, "INNER JOIN \"PUBLIC\".\"T1\""); - - plan = plan(s, "select * from t2, t1 where t1.id = t2.t1_id and t2.id = 1"); - assertContains(plan, "INNER JOIN \"PUBLIC\".\"T1\""); - - plan = plan(s, "select * from t1, t2 where t1.id = t2.t1_id and t2.id = 1"); - assertContains(plan, "INNER JOIN \"PUBLIC\".\"T2\""); - - checkPlanComma(s, "t1", "t2", "t3", "t4"); - checkPlanComma(s, "t4", "t2", "t3", "t1"); - checkPlanComma(s, "t2", "t1", "t3", "t4"); - checkPlanComma(s, "t1", "t4", "t3", "t2"); - checkPlanComma(s, "t2", "t1", "t4", "t3"); - checkPlanComma(s, "t4", "t3", "t2", "t1"); - - boolean on = false; - boolean left = false; - - checkPlanJoin(s, on, left, "t1", "t2", "t3", "t4"); - checkPlanJoin(s, on, left, "t4", "t2", "t3", "t1"); - checkPlanJoin(s, on, left, "t2", "t1", "t3", "t4"); - checkPlanJoin(s, on, left, "t1", "t4", "t3", "t2"); - checkPlanJoin(s, on, left, "t2", "t1", "t4", "t3"); - checkPlanJoin(s, on, left, "t4", "t3", "t2", "t1"); - - on = false; - left = true; - - checkPlanJoin(s, on, left, "t1", "t2", "t3", "t4"); - checkPlanJoin(s, on, left, "t4", "t2", "t3", "t1"); - checkPlanJoin(s, on, left, "t2", "t1", "t3", "t4"); - checkPlanJoin(s, on, left, "t1", "t4", "t3", "t2"); - checkPlanJoin(s, on, left, "t2", "t1", "t4", "t3"); - checkPlanJoin(s, on, left, "t4", "t3", "t2", "t1"); - - on = true; - left = false; - - checkPlanJoin(s, on, left, "t1", "t2", "t3", "t4"); - checkPlanJoin(s, on, left, "t4", "t2", "t3", "t1"); - checkPlanJoin(s, on, left, "t2", "t1", "t3", "t4"); - checkPlanJoin(s, on, left, "t1", "t4", "t3", "t2"); - checkPlanJoin(s, on, left, "t2", "t1", "t4", "t3"); - checkPlanJoin(s, on, left, "t4", "t3", "t2", "t1"); - - on = true; - left = true; - - checkPlanJoin(s, on, left, "t1", "t2", "t3", "t4"); - checkPlanJoin(s, on, left, "t4", "t2", "t3", "t1"); - checkPlanJoin(s, on, left, "t2", "t1", "t3", "t4"); - checkPlanJoin(s, on, left, "t1", "t4", "t3", "t2"); - checkPlanJoin(s, on, left, "t2", "t1", "t4", "t3"); - checkPlanJoin(s, on, left, "t4", "t3", "t2", "t1"); - - s.close(); - conn.close(); - deleteDb("testOptimizerHints"); - } - - private void checkPlanComma(Statement s, String ... t) throws SQLException { - StringBuilder builder = new StringBuilder("select 1 from "); - for (int i = 0, l = t.length; i < l; i++) { - if (i > 0) { - builder.append(", "); - } - builder.append(t[i]); - } - builder.append(" where t1.id = t2.t1_id and t2.id = t4.t2_id and t3.id = t4.t3_id"); - String plan = plan(s, builder.toString()); - int prev = plan.indexOf("FROM \"PUBLIC\".\"" + t[0].toUpperCase() + '"'); - for (int i = 1; i < t.length; i++) { - int next = plan.indexOf("INNER JOIN \"PUBLIC\".\"" + t[i].toUpperCase() + '"'); - assertTrue("Wrong plan for : " + Arrays.toString(t) + "\n" + plan, next > prev); - prev = next; - } - } - - private void checkPlanJoin(Statement s, boolean on, boolean left, - String... t) throws SQLException { - StringBuilder builder = new StringBuilder("select 1 from "); - for (int i = 0; i < t.length; i++) { - if (i != 0) { - if (left) { - builder.append(" left join "); - } else { - builder.append(" inner join "); - } - } - builder.append(t[i]); - if (on && i != 0) { - builder.append(" on 1=1 "); - } - } - builder.append(" where t1.id = t2.t1_id and t2.id = t4.t2_id and t3.id = t4.t3_id"); - String plan = plan(s, builder.toString()); - int prev = plan.indexOf("FROM \"PUBLIC\".\"" + t[0].toUpperCase() + '"'); - for (int i = 1; i < t.length; i++) { - int next = plan.indexOf( - (!left ? "INNER JOIN \"PUBLIC\".\"" : on ? "LEFT OUTER JOIN \"PUBLIC\".\"" : "\"PUBLIC\".\"") + - t[i].toUpperCase() + '"'); - if (prev > next) { - System.err.println(plan); - fail("Wrong plan for : " + Arrays.toString(t) + "\n" + plan); - } - prev = next; - } - } - - /** - * @param s Statement. - * @param query Query. - * @return Plan. - * @throws SQLException If failed. - */ - private String plan(Statement s, String query) throws SQLException { - ResultSet rs = s.executeQuery("explain " + query); - assertTrue(rs.next()); - String plan = rs.getString(1); - rs.close(); - return plan; - } -} diff --git a/h2/src/test/org/h2/test/db/TestOutOfMemory.java b/h2/src/test/org/h2/test/db/TestOutOfMemory.java index d8cf840c15..c93c5b83ab 100644 --- a/h2/src/test/org/h2/test/db/TestOutOfMemory.java +++ b/h2/src/test/org/h2/test/db/TestOutOfMemory.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -16,11 +16,13 @@ import java.util.concurrent.atomic.AtomicReference; import org.h2.api.ErrorCode; import org.h2.mvstore.MVStore; +import org.h2.mvstore.MVStoreException; import org.h2.store.fs.FilePath; -import org.h2.store.fs.FilePathMem; import org.h2.store.fs.FileUtils; +import org.h2.store.fs.mem.FilePathMem; import org.h2.test.TestBase; import org.h2.test.TestDb; +import org.h2.util.Utils; /** * Tests out of memory situations. The database must not get corrupted, and @@ -36,7 +38,7 @@ public class TestOutOfMemory extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -51,7 +53,7 @@ public boolean isEnabled() { @Override public void test() throws Exception { try { - if (!config.travis) { + if (!config.ci) { System.gc(); testMVStoreUsingInMemoryFileSystem(); System.gc(); @@ -69,15 +71,10 @@ public void test() throws Exception { private void testMVStoreUsingInMemoryFileSystem() { FilePath.register(new FilePathMem()); String fileName = "memFS:" + getTestName(); - final AtomicReference exRef = new AtomicReference<>(); + AtomicReference exRef = new AtomicReference<>(); MVStore store = new MVStore.Builder() .fileName(fileName) - .backgroundExceptionHandler(new Thread.UncaughtExceptionHandler() { - @Override - public void uncaughtException(Thread t, Throwable e) { - exRef.compareAndSet(null, e); - } - }) + .backgroundExceptionHandler((t, e) -> exRef.compareAndSet(null, e)) .open(); try { Map map = store.openMap("test"); @@ -90,14 +87,14 @@ public void uncaughtException(Thread t, Throwable e) { } Throwable throwable = exRef.get(); if(throwable instanceof OutOfMemoryError) throw (OutOfMemoryError)throwable; - if(throwable instanceof IllegalStateException) throw (IllegalStateException)throwable; + if(throwable instanceof MVStoreException) throw (MVStoreException)throwable; fail(); - } catch (OutOfMemoryError | IllegalStateException e) { + } catch (OutOfMemoryError | MVStoreException e) { // expected } try { store.close(); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { // expected } store.closeImmediately(); @@ -118,9 +115,10 @@ private void testDatabaseUsingInMemoryFileSystem() throws SQLException, Interrup try { Connection conn = DriverManager.getConnection(url); Statement stat = conn.createStatement(); + long memoryFree = Utils.getMemoryFree(); try { stat.execute("create table test(id int, name varchar) as " + - "select x, space(10000000+x) from system_range(1, 1000)"); + "select x, space(1000000+x) from system_range(1, 10000)"); fail(); } catch (SQLException e) { assertTrue("Unexpected error code: " + e.getErrorCode(), @@ -129,7 +127,7 @@ private void testDatabaseUsingInMemoryFileSystem() throws SQLException, Interrup ErrorCode.DATABASE_IS_CLOSED == e.getErrorCode() || ErrorCode.GENERAL_ERROR_1 == e.getErrorCode()); } - recoverAfterOOM(); + recoverAfterOOM(memoryFree * 3 / 4); try { conn.close(); fail(); @@ -140,7 +138,7 @@ private void testDatabaseUsingInMemoryFileSystem() throws SQLException, Interrup ErrorCode.DATABASE_IS_CLOSED == e.getErrorCode() || ErrorCode.GENERAL_ERROR_1 == e.getErrorCode()); } - recoverAfterOOM(); + recoverAfterOOM(memoryFree * 3 / 4); conn = DriverManager.getConnection(url); stat = conn.createStatement(); stat.execute("SELECT 1"); @@ -151,9 +149,11 @@ private void testDatabaseUsingInMemoryFileSystem() throws SQLException, Interrup } } - private static void recoverAfterOOM() throws InterruptedException { - for (int i = 0; i < 5; i++) { - System.gc(); + private static void recoverAfterOOM(long expectedFreeMemory) throws InterruptedException { + for (int i = 0; i < 50; i++) { + if (Utils.getMemoryFree() > expectedFreeMemory) { + break; + } Thread.sleep(20); } } @@ -211,8 +211,7 @@ private void testUpdateWhenNearlyOutOfMemory() throws Exception { } } - public static final class MyChild extends TestDb.Child - { + public static final class MyChild extends TestDb.Child { /** * Run just this test. diff --git a/h2/src/test/org/h2/test/db/TestPersistentCommonTableExpressions.java b/h2/src/test/org/h2/test/db/TestPersistentCommonTableExpressions.java index a9e7b2c3d9..e020fbcea8 100644 --- a/h2/src/test/org/h2/test/db/TestPersistentCommonTableExpressions.java +++ b/h2/src/test/org/h2/test/db/TestPersistentCommonTableExpressions.java @@ -1,11 +1,10 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; -import org.h2.engine.SysProperties; import org.h2.test.TestBase; /** @@ -19,7 +18,7 @@ public class TestPersistentCommonTableExpressions extends AbstractBaseForCommonT * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -33,35 +32,13 @@ public void test() throws Exception { } private void testRecursiveTable() throws Exception { - String numericName; - if (SysProperties.BIG_DECIMAL_IS_DECIMAL) { - numericName = "DECIMAL"; - } else { - numericName = "NUMERIC"; - } String[] expectedRowData = new String[]{"|meat|null", "|fruit|3", "|veg|2"}; - String[] expectedColumnTypes = new String[]{"VARCHAR", numericName}; + String[] expectedColumnTypes = new String[]{"CHARACTER VARYING", "NUMERIC"}; String[] expectedColumnNames = new String[]{"VAL", "SUM((SELECT\n" + " X\n" + "FROM PUBLIC.\"\" BB\n" + - " /* SELECT\n" + - " SUM(1) AS X,\n" + - " A\n" + - " FROM PUBLIC.B\n" + - " /++ PUBLIC.B.tableScan ++/\n" + - " /++ WHERE A IS ?1\n" + - " ++/\n" + - " /++ scanCount: 4 ++/\n" + - " INNER JOIN PUBLIC.C\n" + - " /++ PUBLIC.C.tableScan ++/\n" + - " ON 1=1\n" + - " WHERE (A IS ?1)\n" + - " AND (B.VAL = C.B)\n" + - " GROUP BY A: A IS A.VAL\n" + - " */\n" + - " /* scanCount: 1 */\n" + - "WHERE BB.A IS A.VAL))"}; + "WHERE BB.A IS NOT DISTINCT FROM A.VAL))"}; String setupSQL = "DROP TABLE IF EXISTS A; " @@ -92,7 +69,7 @@ private void testRecursiveTable() throws Exception { "GROUP BY a) \n" + "SELECT \n" + "A.val, \n" + - "sum((SELECT X FROM BB WHERE BB.a IS A.val))\n" + + "sum((SELECT X FROM BB WHERE BB.a IS NOT DISTINCT FROM A.val))\n" + "FROM A \n" + "GROUP BY A.val"; int maxRetries = 3; int expectedNumberOfRows = expectedRowData.length; diff --git a/h2/src/test/org/h2/test/db/TestPowerOff.java b/h2/src/test/org/h2/test/db/TestPowerOff.java index 5dd59b98bc..e1f5e67cac 100644 --- a/h2/src/test/org/h2/test/db/TestPowerOff.java +++ b/h2/src/test/org/h2/test/db/TestPowerOff.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -14,7 +14,6 @@ import org.h2.api.ErrorCode; import org.h2.engine.Database; -import org.h2.jdbc.JdbcConnection; import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.util.JdbcUtils; @@ -35,7 +34,7 @@ public class TestPowerOff extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -77,18 +76,18 @@ private void testLobCrash() throws SQLException { conn = getConnection(url); stat = conn.createStatement(); stat.execute("set write_delay 0"); - ((JdbcConnection) conn).setPowerOffCount(Integer.MAX_VALUE); - stat.execute("insert into test values(null, space(11000))"); - int max = Integer.MAX_VALUE - ((JdbcConnection) conn).getPowerOffCount(); + setPowerOffCount(conn, Integer.MAX_VALUE); + stat.execute("insert into test(data) values space(11000)"); + int max = Integer.MAX_VALUE - getPowerOffCount(conn); for (int i = 0; i < max + 10; i++) { conn.close(); conn = getConnection(url); stat = conn.createStatement(); - stat.execute("insert into test values(null, space(11000))"); + stat.execute("insert into test(data) values space(11000)"); stat.execute("set write_delay 0"); - ((JdbcConnection) conn).setPowerOffCount(i); + setPowerOffCount(conn, i); try { - stat.execute("insert into test values(null, space(11000))"); + stat.execute("insert into test(data) values space(11000)"); } catch (SQLException e) { // ignore } @@ -156,7 +155,7 @@ private void testCrash() throws SQLException { conn = getConnection(url); Statement stat = conn.createStatement(); stat.execute("SET WRITE_DELAY 0"); - ((JdbcConnection) conn).setPowerOffCount(random.nextInt(100)); + setPowerOffCount(conn, random.nextInt(100)); try { stat.execute("DROP TABLE IF EXISTS TEST"); stat.execute("CREATE TABLE TEST" + @@ -214,7 +213,7 @@ private void testMemoryTables() throws SQLException { "(ID INT PRIMARY KEY, NAME VARCHAR(255))"); stat.execute("INSERT INTO TEST VALUES(1, 'Hello')"); stat.execute("CHECKPOINT"); - ((JdbcConnection) conn).setPowerOffCount(1); + setPowerOffCount(conn, 1); try { stat.execute("INSERT INTO TEST VALUES(2, 'Hello')"); stat.execute("INSERT INTO TEST VALUES(3, 'Hello')"); @@ -224,7 +223,7 @@ private void testMemoryTables() throws SQLException { assertKnownException(e); } - ((JdbcConnection) conn).setPowerOffCount(0); + setPowerOffCount(conn, 0); try { conn.close(); } catch (SQLException e) { @@ -304,8 +303,7 @@ private int testRun(boolean init) throws SQLException { stat.execute("DROP TABLE TEST"); state = 0; if (init) { - maxPowerOffCount = Integer.MAX_VALUE - - ((JdbcConnection) conn).getPowerOffCount(); + maxPowerOffCount = Integer.MAX_VALUE - getPowerOffCount(conn); } conn.close(); } catch (SQLException e) { @@ -323,7 +321,7 @@ private int recoverAndCheckConsistency() throws SQLException { int state; Database.setInitialPowerOffCount(0); Connection conn = getConnection(url); - assertEquals(0, ((JdbcConnection) conn).getPowerOffCount()); + assertEquals(0, getPowerOffCount(conn)); Statement stat = conn.createStatement(); DatabaseMetaData meta = conn.getMetaData(); ResultSet rs = meta.getTables(null, null, "TEST", null); diff --git a/h2/src/test/org/h2/test/db/TestQueryCache.java b/h2/src/test/org/h2/test/db/TestQueryCache.java index e1f7213294..476bc6519b 100644 --- a/h2/src/test/org/h2/test/db/TestQueryCache.java +++ b/h2/src/test/org/h2/test/db/TestQueryCache.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -25,7 +25,7 @@ public class TestQueryCache extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -96,14 +96,14 @@ private void test1() throws Exception { private void testClearingCacheWithTableStructureChanges() throws Exception { try (Connection conn = getConnection("queryCache;QUERY_CACHE_SIZE=10")) { - assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, conn). + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, conn). prepareStatement("SELECT * FROM TEST"); Statement stat = conn.createStatement(); stat.executeUpdate("CREATE TABLE TEST(col1 bigint, col2 varchar(255))"); PreparedStatement prep = conn.prepareStatement("SELECT * FROM TEST"); prep.close(); stat.executeUpdate("DROP TABLE TEST"); - assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, conn). + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, conn). prepareStatement("SELECT * FROM TEST"); } } diff --git a/h2/src/test/org/h2/test/db/TestReadOnly.java b/h2/src/test/org/h2/test/db/TestReadOnly.java index 406557944e..84bc97b178 100644 --- a/h2/src/test/org/h2/test/db/TestReadOnly.java +++ b/h2/src/test/org/h2/test/db/TestReadOnly.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -33,7 +33,7 @@ public class TestReadOnly extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -198,8 +198,8 @@ private void testReadOnlyConnect() throws SQLException { Statement stat = conn.createStatement(); stat.execute("create table test(id identity)"); stat.execute("insert into test select x from system_range(1, 11)"); - assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, this). - getConnection("readonlyConnect;ACCESS_MODE_DATA=r;OPEN_NEW=TRUE"); + assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, + () -> getConnection("readonlyConnect;ACCESS_MODE_DATA=r;OPEN_NEW=TRUE")); conn.close(); deleteDb("readonlyConnect"); } diff --git a/h2/src/test/org/h2/test/db/TestRecursiveQueries.java b/h2/src/test/org/h2/test/db/TestRecursiveQueries.java index ad8485999f..2a8d27a360 100644 --- a/h2/src/test/org/h2/test/db/TestRecursiveQueries.java +++ b/h2/src/test/org/h2/test/db/TestRecursiveQueries.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -24,7 +24,7 @@ public class TestRecursiveQueries extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -99,9 +99,9 @@ private void testSimpleUnionAll() throws Exception { assertFalse(rs.next()); prep = conn.prepareStatement("with recursive t(n) as " + - "(select @start union all select n+@inc from t where n<@end) " + + "(select @start union all select n+@inc from t where n<@end_index) " + "select * from t"); - prep2 = conn.prepareStatement("select @start:=?, @inc:=?, @end:=?"); + prep2 = conn.prepareStatement("select @start:=?, @inc:=?, @end_index:=?"); prep2.setInt(1, 10); prep2.setInt(2, 2); prep2.setInt(3, 14); @@ -142,7 +142,7 @@ private void testSimpleUnionAll() throws Exception { assertResultSetOrdered(rs, new String[][]{{"100"}, {"103"}}); rs = stat.executeQuery("with recursive t(i, s, d) as " - + "(select 1, '.', now() union all" + + "(select 1, '.', localtimestamp union all" + " select i+1, s||'.', d from t where i<3)" + " select * from t"); assertResultSetMeta(rs, 3, new String[]{ "I", "S", "D" }, diff --git a/h2/src/test/org/h2/test/db/TestRights.java b/h2/src/test/org/h2/test/db/TestRights.java index 40b3485614..dc5656e06c 100644 --- a/h2/src/test/org/h2/test/db/TestRights.java +++ b/h2/src/test/org/h2/test/db/TestRights.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -13,6 +13,7 @@ import java.sql.Statement; import org.h2.api.ErrorCode; +import org.h2.api.Trigger; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -30,7 +31,7 @@ public class TestRights extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -52,6 +53,8 @@ public void test() throws SQLException { testTableRename(); testSchemaRename(); testSchemaDrop(); + testDropTable(); + testSchemaOwner(); deleteDb("rights"); } @@ -68,7 +71,7 @@ private void testNullPassword() throws SQLException { private void testLinkedTableMeta() throws SQLException { deleteDb("rights"); - try (Connection conn = getConnection("rights")) { + try (Connection conn = getConnection("rights;OLD_INFORMATION_SCHEMA=TRUE")) { stat = conn.createStatement(); stat.execute("create user test password 'test'"); stat.execute("create linked table test" + @@ -290,13 +293,13 @@ private void testDisallowedTables() throws SQLException { DatabaseMetaData meta = conn2.getMetaData(); ResultSet rs; - rs = meta.getTables(null, null, "%", new String[]{"TABLE", "VIEW", "SEQUENCE"}); + rs = meta.getTables(null, "PUBLIC", "%", new String[]{"TABLE", "VIEW", "SEQUENCE"}); assertTrue(rs.next()); assertTrue(rs.next()); assertFalse(rs.next()); for (String s : new String[] { - "information_schema.settings where name='property.java.runtime.version'", - "information_schema.users where name='SA'", + "information_schema.settings where setting_name='property.java.runtime.version'", + "information_schema.users where user_name='SA'", "information_schema.roles", "information_schema.rights", "information_schema.sessions where user_name='SA'" @@ -320,8 +323,7 @@ private void testDropOwnUser() throws SQLException { stat.execute("DROP USER " + user); conn.close(); if (!config.memory) { - assertThrows(ErrorCode.WRONG_USER_OR_PASSWORD, this). - getConnection("rights"); + assertThrows(ErrorCode.WRONG_USER_OR_PASSWORD, () -> getConnection("rights")); } } @@ -347,7 +349,7 @@ private void testGetTables() throws SQLException { stat.execute("CREATE USER IF NOT EXISTS TEST PASSWORD 'TEST'"); stat.execute("CREATE TABLE TEST(ID INT)"); - stat.execute("GRANT ALL ON TEST TO TEST"); + stat.execute("GRANT ALL ON TABLE TEST TO TEST"); Connection conn2 = getConnection("rights", "TEST", getPassword("TEST")); DatabaseMetaData meta = conn2.getMetaData(); meta.getTables(null, null, "%", new String[]{"TABLE", "VIEW", "SEQUENCE"}); @@ -380,7 +382,7 @@ private void testSchemaRenameUser() throws SQLException { deleteDb("rights"); Connection conn = getConnection("rights"); stat = conn.createStatement(); - stat.execute("create user test password '' admin"); + stat.execute("create user test password ''"); stat.execute("create schema b authorization test"); stat.execute("create table b.test(id int)"); stat.execute("alter user test rename to test1"); @@ -388,12 +390,9 @@ private void testSchemaRenameUser() throws SQLException { conn = getConnection("rights"); stat = conn.createStatement(); stat.execute("select * from b.test"); - assertThrows(ErrorCode.CANNOT_DROP_2, stat). - execute("alter user test1 admin false"); assertThrows(ErrorCode.CANNOT_DROP_2, stat). execute("drop user test1"); stat.execute("drop schema b cascade"); - stat.execute("alter user test1 admin false"); stat.execute("drop user test1"); conn.close(); } @@ -425,14 +424,16 @@ private void testSchemaAdminRole() throws SQLException { "(ID INT PRIMARY KEY, NAME VARCHAR)"); conn.close(); + String url = "rights"; + // try and fail (no rights yet) - conn = getConnection("rights;LOG=2", "SCHEMA_CREATOR", getPassword("xyz")); + conn = getConnection(url, "SCHEMA_CREATOR", getPassword("xyz")); stat = conn.createStatement(); assertThrows(ErrorCode.ADMIN_RIGHTS_REQUIRED, stat).execute( "CREATE SCHEMA SCHEMA_RIGHT_TEST_WILL_FAIL"); assertThrows(ErrorCode.ADMIN_RIGHTS_REQUIRED, stat).execute( "ALTER SCHEMA SCHEMA_RIGHT_TEST_EXISTS RENAME TO SCHEMA_RIGHT_TEST_WILL_FAIL"); - assertThrows(ErrorCode.ADMIN_RIGHTS_REQUIRED, stat).execute( + assertThrows(ErrorCode.NOT_ENOUGH_RIGHTS_FOR_1, stat).execute( "DROP SCHEMA SCHEMA_RIGHT_TEST_EXISTS"); conn.close(); @@ -443,7 +444,7 @@ private void testSchemaAdminRole() throws SQLException { conn.close(); // try and succeed - conn = getConnection("rights;LOG=2", "SCHEMA_CREATOR", getPassword("xyz")); + conn = getConnection(url, "SCHEMA_CREATOR", getPassword("xyz")); stat = conn.createStatement(); // should be able to create a schema and manipulate tables on that @@ -473,14 +474,14 @@ private void testSchemaAdminRole() throws SQLException { conn.close(); // try again and fail - conn = getConnection("rights;LOG=2", "SCHEMA_CREATOR", getPassword("xyz")); + conn = getConnection(url, "SCHEMA_CREATOR", getPassword("xyz")); stat = conn.createStatement(); assertThrows(ErrorCode.ADMIN_RIGHTS_REQUIRED, stat). execute("CREATE SCHEMA SCHEMA_RIGHT_TEST"); assertThrows(ErrorCode.ADMIN_RIGHTS_REQUIRED, stat). execute("ALTER SCHEMA SCHEMA_RIGHT_TEST_EXISTS " + "RENAME TO SCHEMA_RIGHT_TEST_RENAMED"); - assertThrows(ErrorCode.ADMIN_RIGHTS_REQUIRED, stat). + assertThrows(ErrorCode.NOT_ENOUGH_RIGHTS_FOR_1, stat). execute("DROP SCHEMA SCHEMA_RIGHT_TEST_EXISTS"); assertThrows(ErrorCode.NOT_ENOUGH_RIGHTS_FOR_1, stat). execute("CREATE TABLE SCHEMA_RIGHT_TEST_EXISTS.TEST" + @@ -572,7 +573,8 @@ private void testAccessRights() throws SQLException { executeSuccess("GRANT SELECT, INSERT, UPDATE ON TEST TO PASS_READER"); conn.close(); - conn = getConnection("rights;LOG=2", "PASS_READER", getPassword("abc")); + String url = "rights"; + conn = getConnection(url, "PASS_READER", getPassword("abc")); stat = conn.createStatement(); executeSuccess("SELECT * FROM PASS_NAME"); executeSuccess("SELECT * FROM (SELECT * FROM PASS_NAME)"); @@ -586,7 +588,7 @@ private void testAccessRights() throws SQLException { executeError("SELECT * FROM (SELECT * FROM PASS)"); assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, stat). execute("CREATE VIEW X AS SELECT * FROM PASS_READER"); - assertThrows(ErrorCode.ADMIN_RIGHTS_REQUIRED, stat). + assertThrows(ErrorCode.NOT_ENOUGH_RIGHTS_FOR_1, stat). execute("CREATE VIEW X AS SELECT * FROM PASS_NAME"); conn.close(); @@ -645,7 +647,7 @@ private void testAccessRights() throws SQLException { } catch (SQLException e) { assertKnownException(e); } - conn = getConnection("rights;LOG=2", "TEST", getPassword("def")); + conn = getConnection(url, "TEST", getPassword("def")); stat = conn.createStatement(); assertThrows(ErrorCode.ADMIN_RIGHTS_REQUIRED, stat). @@ -712,6 +714,115 @@ private void testTableType(Connection conn, String type) throws SQLException { executeSuccess("DROP TABLE TEST"); } + private void testDropTable() throws SQLException { + deleteDb("rights"); + Connection conn = getConnection("rights"); + stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(ID INT)"); + stat.execute("CREATE USER U PASSWORD '1'"); + stat.execute("GRANT ALL PRIVILEGES ON TEST TO U"); + Connection conn2 = getConnection("rights", "U", getPassword("1")); + conn.close(); + stat = conn2.createStatement(); + assertEquals(1, stat.executeUpdate("INSERT INTO TEST VALUES 1")); + assertEquals(1, stat.executeUpdate("UPDATE TEST SET ID = 2 WHERE ID = 1")); + assertEquals(1, stat.executeUpdate("DELETE FROM TEST WHERE ID = 2")); + executeError("DROP TABLE TEST"); + conn2.close(); + } + + private void testSchemaOwner() throws SQLException { + deleteDb("rights"); + Connection connAdmin = getConnection("rights"); + Statement statAdmin = connAdmin.createStatement(); + statAdmin.execute("CREATE USER SCHEMA_ADMIN PASSWORD '1'"); + statAdmin.execute("GRANT ALTER ANY SCHEMA TO SCHEMA_ADMIN"); + Connection connSchemaAdmin = getConnection("rights", "SCHEMA_ADMIN", getPassword("1")); + Statement statSchemaAdmin = connSchemaAdmin.createStatement(); + statAdmin.execute("CREATE USER SCHEMA_OWNER PASSWORD '1'"); + Connection connSchemaOwner = getConnection("rights", "SCHEMA_OWNER", getPassword("1")); + Statement statSchemaOwner = connSchemaOwner.createStatement(); + statAdmin.execute("CREATE USER OTHER PASSWORD '1'"); + Connection connOther = getConnection("rights", "OTHER", getPassword("1")); + Statement statOther = connOther.createStatement(); + testSchemaOwner(statAdmin, statSchemaAdmin, statSchemaOwner, statOther, "SCHEMA_OWNER"); + statAdmin.execute("CREATE ROLE SCHEMA_OWNER_ROLE"); + statAdmin.execute("GRANT SCHEMA_OWNER_ROLE TO SCHEMA_OWNER"); + testSchemaOwner(statAdmin, statSchemaAdmin, statSchemaOwner, statOther, "SCHEMA_OWNER_ROLE"); + testAdminAndSchemaOwner(statAdmin, statSchemaAdmin); + statAdmin.close(); + statSchemaAdmin.close(); + statSchemaOwner.close(); + } + + private void testSchemaOwner(Statement statAdmin, Statement statSchemaAdmin, Statement statSchemaOwner, + Statement statOther, String authorization) throws SQLException { + executeSuccessErrorAdmin(statSchemaAdmin, statSchemaOwner, "CREATE SCHEMA S AUTHORIZATION " + authorization); + executeSuccessError(statSchemaOwner, statOther, "CREATE DOMAIN S.D INT"); + executeSuccessError(statSchemaOwner, statOther, "ALTER DOMAIN S.D ADD CONSTRAINT S.D_C CHECK (VALUE > 0)"); + executeSuccessError(statSchemaOwner, statOther, "ALTER DOMAIN S.D DROP CONSTRAINT S.D_C"); + executeSuccessError(statSchemaOwner, statOther, "ALTER DOMAIN S.D RENAME TO S.D2"); + executeSuccessError(statSchemaOwner, statOther, "DROP DOMAIN S.D2"); + executeSuccessError(statSchemaOwner, statOther, "CREATE CONSTANT S.C VALUE 1"); + executeSuccessError(statSchemaOwner, statOther, "DROP CONSTANT S.C"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, "CREATE ALIAS S.F FOR 'java.lang.Math.max(long,long)'"); + executeSuccessError(statSchemaOwner, statOther, "DROP ALIAS S.F"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, + "CREATE AGGREGATE S.A FOR \'" + TestFunctions.MedianStringType.class.getName() + '\''); + executeSuccessError(statSchemaOwner, statOther, "DROP AGGREGATE S.A"); + executeSuccessError(statSchemaOwner, statOther, "CREATE SEQUENCE S.S"); + executeSuccessError(statSchemaOwner, statOther, "ALTER SEQUENCE S.S RESTART WITH 2"); + executeSuccessError(statSchemaOwner, statOther, "DROP SEQUENCE S.S"); + executeSuccessError(statSchemaOwner, statOther, "CREATE VIEW S.V AS SELECT 1"); + executeSuccessError(statSchemaOwner, statOther, "ALTER VIEW S.V RECOMPILE"); + executeSuccessError(statSchemaOwner, statOther, "ALTER VIEW S.V RENAME TO S.V2"); + executeSuccessError(statSchemaOwner, statOther, "DROP VIEW S.V2"); + executeSuccessError(statSchemaOwner, statOther, "CREATE TABLE S.T(ID INT)"); + executeSuccessError(statSchemaOwner, statOther, "ALTER TABLE S.T ADD V INT"); + executeSuccessError(statSchemaOwner, statOther, "ALTER TABLE S.T ADD CONSTRAINT S.T_C UNIQUE(V)"); + executeSuccessError(statSchemaOwner, statOther, "ALTER TABLE S.T DROP CONSTRAINT S.T_C"); + executeSuccessError(statSchemaOwner, statOther, "CREATE UNIQUE INDEX S.I ON S.T(V)"); + executeSuccessError(statSchemaOwner, statOther, "ALTER INDEX S.I RENAME TO S.I2"); + executeSuccessError(statSchemaOwner, statOther, "DROP INDEX S.I2"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, + "CREATE TRIGGER S.G BEFORE INSERT ON S.T FOR EACH ROW CALL \'" + TestTrigger.class.getName() + '\''); + executeSuccessError(statSchemaOwner, statOther, "DROP TRIGGER S.G"); + executeSuccessError(statSchemaOwner, statOther, "GRANT SELECT ON S.T TO OTHER"); + executeSuccessError(statSchemaOwner, statOther, "REVOKE SELECT ON S.T FROM OTHER"); + executeSuccessError(statSchemaOwner, statOther, "ALTER TABLE S.T RENAME TO S.T2"); + executeSuccessError(statSchemaOwner, statOther, "DROP TABLE S.T2"); + executeSuccessError(statSchemaOwner, statOther, "DROP SCHEMA S"); + } + + private void testAdminAndSchemaOwner(Statement statAdmin, Statement statSchemaAdmin) throws SQLException { + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, "GRANT ALTER ANY SCHEMA TO OTHER"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, "REVOKE ALTER ANY SCHEMA FROM OTHER"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, "CREATE USER U PASSWORD '1'"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, "CREATE ROLE R"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, "GRANT R TO U"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, "REVOKE R FROM U"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, "DROP USER U"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, "DROP ROLE R"); + } + + public static class TestTrigger implements Trigger { + + @Override + public void fire(Connection conn, Object[] oldRow, Object[] newRow) throws SQLException { + } + + } + + private void executeSuccessErrorAdmin(Statement success, Statement error, String sql) throws SQLException { + assertThrows(ErrorCode.ADMIN_RIGHTS_REQUIRED, error).execute(sql); + success.execute(sql); + } + + private void executeSuccessError(Statement success, Statement error, String sql) throws SQLException { + assertThrows(ErrorCode.NOT_ENOUGH_RIGHTS_FOR_1, error).execute(sql); + success.execute(sql); + } + private void executeError(String sql) throws SQLException { assertThrows(ErrorCode.NOT_ENOUGH_RIGHTS_FOR_1, stat).execute(sql); } diff --git a/h2/src/test/org/h2/test/db/TestRowFactory.java b/h2/src/test/org/h2/test/db/TestRowFactory.java deleted file mode 100644 index dcce9e5e25..0000000000 --- a/h2/src/test/org/h2/test/db/TestRowFactory.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.db; - -import java.sql.Connection; -import java.sql.Statement; -import java.util.concurrent.atomic.AtomicInteger; -import org.h2.result.Row; -import org.h2.result.RowFactory; -import org.h2.result.RowImpl; -import org.h2.test.TestBase; -import org.h2.test.TestDb; -import org.h2.value.Value; - -/** - * Test {@link RowFactory} setting. - * - * @author Sergi Vladykin - */ -public class TestRowFactory extends TestDb { - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String[] a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws Exception { - deleteDb("rowFactory"); - Connection conn = getConnection("rowFactory;ROW_FACTORY=\"" + - MyTestRowFactory.class.getName() + '"'); - Statement stat = conn.createStatement(); - stat.execute("create table t1(id int, name varchar)"); - for (int i = 0; i < 1000; i++) { - stat.execute("insert into t1 values(" + i + ", 'name')"); - } - assertTrue(MyTestRowFactory.COUNTER.get() >= 1000); - conn.close(); - deleteDb("rowFactory"); - } - - /** - * Test row factory. - */ - public static class MyTestRowFactory extends RowFactory { - - /** - * A simple counter. - */ - static final AtomicInteger COUNTER = new AtomicInteger(); - - @Override - public Row createRow(Value[] data, int memory) { - COUNTER.incrementAndGet(); - return new RowImpl(data, memory); - } - } -} diff --git a/h2/src/test/org/h2/test/db/TestRunscript.java b/h2/src/test/org/h2/test/db/TestRunscript.java index 6fba6c2603..eeba97a95e 100644 --- a/h2/src/test/org/h2/test/db/TestRunscript.java +++ b/h2/src/test/org/h2/test/db/TestRunscript.java @@ -1,17 +1,24 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.sql.Types; +import java.util.Collections; + import org.h2.api.ErrorCode; import org.h2.api.Trigger; +import org.h2.engine.Constants; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -30,7 +37,13 @@ public class TestRunscript extends TestDb implements Trigger { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); + org.h2.test.TestAll config = new org.h2.test.TestAll(); + config.traceLevelFile = 1; + System.out.println(config); + TestBase test = createCaller(); + test.runTest(config); +// TestBase.createCaller().init().testFromMain(); } @Override @@ -51,6 +64,8 @@ public void test() throws Exception { testCancelScript(); testEncoding(); testClobPrimaryKey(); + testTruncateLargeLength(); + testVariableBinary(); deleteDb("runscript"); } @@ -59,7 +74,7 @@ private void testDropReferencedUserDefinedFunction() throws Exception { Connection conn; conn = getConnection("runscript"); Statement stat = conn.createStatement(); - stat.execute("create alias int_decode for \"java.lang.Integer.decode\""); + stat.execute("create alias int_decode for 'java.lang.Integer.decode'"); stat.execute("create table test(x varchar, y int as int_decode(x))"); stat.execute("script simple drop to '" + getBaseDir() + "/backup.sql'"); @@ -100,8 +115,8 @@ private void testScriptExcludeSchema() throws Exception { stat.execute("create schema include_schema2"); stat.execute("script nosettings schema include_schema1, include_schema2"); rs = stat.getResultSet(); - // user and one row per schema = 3 - assertResultRowCount(3, rs); + // version, user, and one row per schema = 4 + assertResultRowCount(4, rs); rs.close(); conn.close(); } @@ -143,8 +158,8 @@ private void testScriptExcludeTable() throws Exception { } stat.execute("script nosettings table a.test1, test2"); rs = stat.getResultSet(); - // user, schemas 'a' & 'b' and 2 rows per table = 7 - assertResultRowCount(7, rs); + // version, user, schemas 'a' & 'b', and 2 rows per table = 7 + assertResultRowCount(8, rs); rs.close(); conn.close(); } @@ -158,7 +173,7 @@ private void testScriptExcludeFunctionAlias() throws Exception { stat.execute("create schema a"); stat.execute("create schema b"); stat.execute("create schema c"); - stat.execute("create alias a.int_decode for \"java.lang.Integer.decode\""); + stat.execute("create alias a.int_decode for 'java.lang.Integer.decode'"); stat.execute("create table a.test(x varchar, y int as a.int_decode(x))"); stat.execute("script schema b"); rs = stat.getResultSet(); @@ -324,7 +339,7 @@ private void testRunscriptFromClasspath() throws Exception { } private void testCancelScript() throws Exception { - if (config.travis) { + if (config.ci) { // fails regularly under Travis, not sure why return; } @@ -418,7 +433,7 @@ private void testClobPrimaryKey() throws SQLException { stat.execute("create table test(id int not null, data clob) " + "as select 1, space(4100)"); // the primary key for SYSTEM_LOB_STREAM used to be named like this - stat.execute("create primary key primary_key_e on test(id)"); + stat.execute("alter table test add constraint primary_key_e primary key(id)"); stat.execute("script to '" + getBaseDir() + "/backup.sql'"); conn.close(); deleteDb("runscript"); @@ -441,8 +456,7 @@ private void test(boolean password) throws SQLException { stat1.execute("create table test2(id int primary key) as " + "select x from system_range(1, 5000)"); stat1.execute("create sequence testSeq start with 100 increment by 10"); - stat1.execute("create alias myTest for \"" + - getClass().getName() + ".test\""); + stat1.execute("create alias myTest for '" + getClass().getName() + ".test'"); stat1.execute("create trigger myTrigger before insert " + "on test nowait call \"" + getClass().getName() + "\""); stat1.execute("create view testView as select * " + @@ -461,7 +475,7 @@ private void test(boolean password) throws SQLException { stat1.execute("grant all on testSchema.child to testUser"); stat1.execute("grant select, insert on testSchema.parent to testRole"); stat1.execute("grant testRole to testUser"); - stat1.execute("create table blob (value blob)"); + stat1.execute("create table blob (v blob)"); PreparedStatement prep = conn1.prepareStatement( "insert into blob values (?)"); prep.setBytes(1, new byte[65536]); @@ -534,7 +548,52 @@ private void test(boolean password) throws SQLException { deleteDb("runscriptRestoreRecover"); FileUtils.delete(getBaseDir() + "/backup.2.sql"); FileUtils.delete(getBaseDir() + "/backup.3.sql"); + FileUtils.delete(getBaseDir() + "/runscript.h2.sql"); + + } + private void testTruncateLargeLength() throws Exception { + deleteDb("runscript"); + Connection conn; + Statement stat; + Files.write(Paths.get(getBaseDir() + "/backup.sql"), + Collections.singleton("CREATE TABLE TEST(V VARCHAR(2147483647))"), // + StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING); + conn = getConnection("runscript"); + stat = conn.createStatement(); + assertThrows(ErrorCode.INVALID_VALUE_PRECISION, stat) + .execute("RUNSCRIPT FROM '" + getBaseDir() + "/backup.sql'"); + stat.execute("RUNSCRIPT FROM '" + getBaseDir() + "/backup.sql' QUIRKS_MODE"); + assertEquals(Constants.MAX_STRING_LENGTH, stat.executeQuery("TABLE TEST").getMetaData().getPrecision(1)); + conn.close(); + deleteDb("runscript"); + FileUtils.delete(getBaseDir() + "/backup.sql"); + } + + private void testVariableBinary() throws SQLException { + deleteDb("runscript"); + Connection conn; + Statement stat; + conn = getConnection("runscript"); + stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(B BINARY)"); + assertEquals(Types.BINARY, stat.executeQuery("TABLE TEST").getMetaData().getColumnType(1)); + stat.execute("SCRIPT TO '" + getBaseDir() + "/backup.sql'"); + conn.close(); + deleteDb("runscript"); + conn = getConnection("runscript"); + stat = conn.createStatement(); + stat.execute("RUNSCRIPT FROM '" + getBaseDir() + "/backup.sql'"); + assertEquals(Types.BINARY, stat.executeQuery("TABLE TEST").getMetaData().getColumnType(1)); + conn.close(); + deleteDb("runscript"); + conn = getConnection("runscript"); + stat = conn.createStatement(); + stat.execute("RUNSCRIPT FROM '" + getBaseDir() + "/backup.sql' VARIABLE_BINARY"); + assertEquals(Types.VARBINARY, stat.executeQuery("TABLE TEST").getMetaData().getColumnType(1)); + conn.close(); + deleteDb("runscript"); + FileUtils.delete(getBaseDir() + "/backup.sql"); } @Override diff --git a/h2/src/test/org/h2/test/db/TestSQLInjection.java b/h2/src/test/org/h2/test/db/TestSQLInjection.java index d24f40cf58..8cb9dcaec6 100644 --- a/h2/src/test/org/h2/test/db/TestSQLInjection.java +++ b/h2/src/test/org/h2/test/db/TestSQLInjection.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -29,7 +29,7 @@ public class TestSQLInjection extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/db/TestSelectCountNonNullColumn.java b/h2/src/test/org/h2/test/db/TestSelectCountNonNullColumn.java deleted file mode 100644 index 464ff725d3..0000000000 --- a/h2/src/test/org/h2/test/db/TestSelectCountNonNullColumn.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.db; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import org.h2.test.TestBase; -import org.h2.test.TestDb; - -/** - * Test that count(column) is converted to count(*) if the column is not - * nullable. - */ -public class TestSelectCountNonNullColumn extends TestDb { - - private static final String DBNAME = "selectCountNonNullColumn"; - private Statement stat; - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws SQLException { - - deleteDb(DBNAME); - Connection conn = getConnection(DBNAME); - stat = conn.createStatement(); - - stat.execute("CREATE TABLE SIMPLE(KEY VARCHAR(25) " + - "PRIMARY KEY, NAME VARCHAR(25))"); - stat.execute("INSERT INTO SIMPLE(KEY) VALUES('k1')"); - stat.execute("INSERT INTO SIMPLE(KEY,NAME) VALUES('k2','name2')"); - - checkKeyCount(-1); - checkNameCount(-1); - checkStarCount(-1); - - checkKeyCount(2); - checkNameCount(1); - checkStarCount(2); - - conn.close(); - - } - - private void checkStarCount(long expect) throws SQLException { - String sql = "SELECT COUNT(*) FROM SIMPLE"; - if (expect < 0) { - sql = "EXPLAIN " + sql; - } - ResultSet rs = stat.executeQuery(sql); - rs.next(); - if (expect >= 0) { - assertEquals(expect, rs.getLong(1)); - } else { - // System.out.println(rs.getString(1)); - assertEquals("SELECT\n COUNT(*)\nFROM \"PUBLIC\".\"SIMPLE\"\n" - + " /* PUBLIC.PRIMARY_KEY_9 */\n" - + "/* direct lookup */", rs.getString(1)); - } - } - - private void checkKeyCount(long expect) throws SQLException { - String sql = "SELECT COUNT(KEY) FROM SIMPLE"; - if (expect < 0) { - sql = "EXPLAIN " + sql; - } - ResultSet rs = stat.executeQuery(sql); - rs.next(); - if (expect >= 0) { - assertEquals(expect, rs.getLong(1)); - } else { - assertEquals("SELECT\n" - + " COUNT(\"KEY\")\n" - + "FROM \"PUBLIC\".\"SIMPLE\"\n" - + " /* PUBLIC.PRIMARY_KEY_9 */\n" - + "/* direct lookup */", rs.getString(1)); - } - } - - private void checkNameCount(long expect) throws SQLException { - String sql = "SELECT COUNT(NAME) FROM SIMPLE"; - if (expect < 0) { - sql = "EXPLAIN " + sql; - } - ResultSet rs = stat.executeQuery(sql); - rs.next(); - if (expect >= 0) { - assertEquals(expect, rs.getLong(1)); - } else { - // System.out.println(rs.getString(1)); - assertEquals("SELECT\n" + " COUNT(\"NAME\")\n" + "FROM \"PUBLIC\".\"SIMPLE\"\n" - + " /* PUBLIC.SIMPLE.tableScan */", rs.getString(1)); - } - } - -} diff --git a/h2/src/test/org/h2/test/db/TestSelectTableNotFound.java b/h2/src/test/org/h2/test/db/TestSelectTableNotFound.java new file mode 100644 index 0000000000..bed6108812 --- /dev/null +++ b/h2/src/test/org/h2/test/db/TestSelectTableNotFound.java @@ -0,0 +1,177 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.db; + +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; + +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +public class TestSelectTableNotFound extends TestDb { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testWithoutAnyCandidate(); + testWithOneCandidate(); + testWithTwoCandidates(); + testWithSchema(); + testWithSchemaSearchPath(); + testWhenSchemaIsEmpty(); + testWithSchemaWhenSchemaIsEmpty(); + testWithSchemaSearchPathWhenSchemaIsEmpty(); + } + + private void testWithoutAnyCandidate() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnection(); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T2 ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.executeQuery("SELECT 1 FROM t1"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found;"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithOneCandidate() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnection(); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T1 ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.executeQuery("SELECT 1 FROM t1"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found (candidates are: \"T1\")"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithTwoCandidates() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnection(); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE Toast ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + stat.execute("CREATE TABLE TOAST ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.executeQuery("SELECT 1 FROM toast"); + fail("Table `toast` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"toast\" not found (candidates are: \"TOAST, Toast\")"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithSchema() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnection(); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T1 ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.executeQuery("SELECT 1 FROM PUBLIC.t1"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found (candidates are: \"T1\")"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithSchemaSearchPath() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnection(); + Statement stat = conn.createStatement(); + stat.execute("SET SCHEMA_SEARCH_PATH PUBLIC"); + stat.execute("CREATE TABLE T1 ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.executeQuery("SELECT 1 FROM t1"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found (candidates are: \"T1\")"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWhenSchemaIsEmpty() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnection(); + Statement stat = conn.createStatement(); + try { + stat.executeQuery("SELECT 1 FROM t1"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found (this database is empty)"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithSchemaWhenSchemaIsEmpty() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnection(); + Statement stat = conn.createStatement(); + try { + stat.executeQuery("SELECT 1 FROM PUBLIC.t1"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found (this database is empty)"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithSchemaSearchPathWhenSchemaIsEmpty() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnection(); + Statement stat = conn.createStatement(); + stat.execute("SET SCHEMA_SEARCH_PATH PUBLIC"); + try { + stat.executeQuery("SELECT 1 FROM t1"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found (this database is empty)"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private Connection getConnection() throws SQLException { + return getConnection(getTestName() + ";DATABASE_TO_UPPER=FALSE"); + } +} diff --git a/h2/src/test/org/h2/test/db/TestSequence.java b/h2/src/test/org/h2/test/db/TestSequence.java index 9ab0fb2ced..689ada2716 100644 --- a/h2/src/test/org/h2/test/db/TestSequence.java +++ b/h2/src/test/org/h2/test/db/TestSequence.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -14,6 +14,7 @@ import java.util.Collections; import java.util.List; import org.h2.api.Trigger; +import org.h2.engine.Constants; import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.util.Task; @@ -29,12 +30,13 @@ public class TestSequence extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { testConcurrentCreate(); + testConcurrentNextAndCurrentValue(); testSchemaSearchPath(); testAlterSequenceColumn(); testAlterSequence(); @@ -51,7 +53,7 @@ public void test() throws Exception { private void testConcurrentCreate() throws Exception { deleteDb("sequence"); - final String url = getURL("sequence;MULTI_THREADED=1;LOCK_TIMEOUT=2000", true); + final String url = getURL("sequence;LOCK_TIMEOUT=2000", true); Connection conn = getConnection(url); Task[] tasks = new Task[2]; try { @@ -104,6 +106,66 @@ private void createDropTrigger(Connection conn) throws Exception { } } + private void testConcurrentNextAndCurrentValue() throws Exception { + deleteDb("sequence"); + final String url = getURL("sequence", true); + Connection conn = getConnection(url); + Task[] tasks = new Task[2]; + try { + Statement stat = conn.createStatement(); + stat.execute("CREATE SEQUENCE SEQ1"); + stat.execute("CREATE SEQUENCE SEQ2"); + for (int i = 0; i < tasks.length; i++) { + tasks[i] = new Task() { + @Override + public void call() throws Exception { + try (Connection conn = getConnection(url)) { + PreparedStatement next1 = conn.prepareStatement("CALL NEXT VALUE FOR SEQ1"); + PreparedStatement next2 = conn.prepareStatement("CALL NEXT VALUE FOR SEQ2"); + PreparedStatement current1 = conn.prepareStatement("CALL CURRENT VALUE FOR SEQ1"); + PreparedStatement current2 = conn.prepareStatement("CALL CURRENT VALUE FOR SEQ2"); + while (!stop) { + long v1, v2; + try (ResultSet rs = next1.executeQuery()) { + rs.next(); + v1 = rs.getLong(1); + } + try (ResultSet rs = next2.executeQuery()) { + rs.next(); + v2 = rs.getLong(1); + } + try (ResultSet rs = current1.executeQuery()) { + rs.next(); + if (v1 != rs.getLong(1)) { + throw new RuntimeException("Unexpected CURRENT VALUE FOR SEQ1"); + } + } + try (ResultSet rs = current2.executeQuery()) { + rs.next(); + if (v2 != rs.getLong(1)) { + throw new RuntimeException("Unexpected CURRENT VALUE FOR SEQ2"); + } + } + } + } + } + }.execute(); + } + Thread.sleep(1000); + for (Task t : tasks) { + Exception e = t.getException(); + if (e != null) { + throw new AssertionError(e.getMessage()); + } + } + } finally { + for (Task t : tasks) { + t.join(); + } + conn.close(); + } + } + private void testSchemaSearchPath() throws SQLException { deleteDb("sequence"); Connection conn = getConnection("sequence"); @@ -111,8 +173,8 @@ private void testSchemaSearchPath() throws SQLException { stat.execute("CREATE SCHEMA TEST"); stat.execute("CREATE SEQUENCE TEST.TEST_SEQ"); stat.execute("SET SCHEMA_SEARCH_PATH PUBLIC, TEST"); - stat.execute("CALL TEST_SEQ.NEXTVAL"); - stat.execute("CALL TEST_SEQ.CURRVAL"); + stat.execute("CALL NEXT VALUE FOR TEST_SEQ"); + stat.execute("CALL CURRENT VALUE FOR TEST_SEQ"); conn.close(); } @@ -122,7 +184,7 @@ private void testAlterSequenceColumn() throws SQLException { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST(ID INT , NAME VARCHAR(255))"); stat.execute("INSERT INTO TEST VALUES(1, 'Hello')"); - stat.execute("ALTER TABLE TEST ALTER COLUMN ID INT IDENTITY"); + stat.execute("ALTER TABLE TEST ALTER COLUMN ID INT GENERATED BY DEFAULT AS IDENTITY"); stat.execute("ALTER TABLE test ALTER COLUMN ID RESTART WITH 3"); stat.execute("INSERT INTO TEST (name) VALUES('Other World')"); conn.close(); @@ -131,8 +193,8 @@ private void testAlterSequenceColumn() throws SQLException { private void testAlterSequence() throws SQLException { test("create sequence s; alter sequence s restart with 2", null, 2, 3, 4); test("create sequence s; alter sequence s restart with 7", null, 7, 8, 9, 10); - test("create sequence s; alter sequence s restart with 11 " + - "minvalue 3 maxvalue 12 cycle", null, 11, 12, 3, 4); + test("create sequence s; alter sequence s start with 3 restart with 11 minvalue 3 maxvalue 12 cycle", + null, 11, 12, 3, 4); test("create sequence s; alter sequence s restart with 5 cache 2", null, 5, 6, 7, 8); test("create sequence s; alter sequence s restart with 9 " + @@ -188,38 +250,35 @@ private void testMetaTable() throws SQLException { assertEquals("SEQUENCE", rs.getString("SEQUENCE_CATALOG")); assertEquals("PUBLIC", rs.getString("SEQUENCE_SCHEMA")); assertEquals("A", rs.getString("SEQUENCE_NAME")); - assertEquals(0, rs.getLong("CURRENT_VALUE")); + assertEquals(1, rs.getLong("BASE_VALUE")); assertEquals(1, rs.getLong("INCREMENT")); - assertEquals(false, rs.getBoolean("IS_GENERATED")); - assertEquals("", rs.getString("REMARKS")); + assertNull(rs.getString("REMARKS")); assertEquals(32, rs.getLong("CACHE")); - assertEquals(1, rs.getLong("MIN_VALUE")); - assertEquals(Long.MAX_VALUE, rs.getLong("MAX_VALUE")); - assertEquals(false, rs.getBoolean("IS_CYCLE")); + assertEquals(1, rs.getLong("MINIMUM_VALUE")); + assertEquals(Long.MAX_VALUE, rs.getLong("MAXIMUM_VALUE")); + assertEquals("NO", rs.getString("CYCLE_OPTION")); rs.next(); assertEquals("SEQUENCE", rs.getString("SEQUENCE_CATALOG")); assertEquals("PUBLIC", rs.getString("SEQUENCE_SCHEMA")); assertEquals("B", rs.getString("SEQUENCE_NAME")); - assertEquals(5, rs.getLong("CURRENT_VALUE")); + assertEquals(7, rs.getLong("BASE_VALUE")); assertEquals(2, rs.getLong("INCREMENT")); - assertEquals(false, rs.getBoolean("IS_GENERATED")); - assertEquals("", rs.getString("REMARKS")); + assertNull(rs.getString("REMARKS")); assertEquals(1, rs.getLong("CACHE")); - assertEquals(5, rs.getLong("MIN_VALUE")); - assertEquals(9, rs.getLong("MAX_VALUE")); - assertEquals(true, rs.getBoolean("IS_CYCLE")); + assertEquals(5, rs.getLong("MINIMUM_VALUE")); + assertEquals(9, rs.getLong("MAXIMUM_VALUE")); + assertEquals("YES", rs.getString("CYCLE_OPTION")); rs.next(); assertEquals("SEQUENCE", rs.getString("SEQUENCE_CATALOG")); assertEquals("PUBLIC", rs.getString("SEQUENCE_SCHEMA")); assertEquals("C", rs.getString("SEQUENCE_NAME")); - assertEquals(-2, rs.getLong("CURRENT_VALUE")); + assertEquals(-4, rs.getLong("BASE_VALUE")); assertEquals(-2, rs.getLong("INCREMENT")); - assertEquals(false, rs.getBoolean("IS_GENERATED")); - assertEquals("", rs.getString("REMARKS")); + assertNull(rs.getString("REMARKS")); assertEquals(3, rs.getLong("CACHE")); - assertEquals(-9, rs.getLong("MIN_VALUE")); - assertEquals(-3, rs.getLong("MAX_VALUE")); - assertEquals(false, rs.getBoolean("IS_CYCLE")); + assertEquals(-9, rs.getLong("MINIMUM_VALUE")); + assertEquals(-3, rs.getLong("MAXIMUM_VALUE")); + assertEquals("NO", rs.getString("CYCLE_OPTION")); assertFalse(rs.next()); conn.close(); } @@ -272,33 +331,33 @@ private void testCreationErrors() throws SQLException { stat, "create sequence a minvalue 5 start with 2", "Unable to create or alter sequence \"A\" because of " + - "invalid attributes (start value \"2\", " + + "invalid attributes (base value \"2\", start value \"2\", " + "min value \"5\", max value \"" + Long.MAX_VALUE + - "\", increment \"1\")"); + "\", increment \"1\", cache size \"32\")"); expectError( stat, "create sequence b maxvalue 5 start with 7", "Unable to create or alter sequence \"B\" because of " + - "invalid attributes (start value \"7\", " + - "min value \"1\", max value \"5\", increment \"1\")"); + "invalid attributes (base value \"7\", start value \"7\", " + + "min value \"1\", max value \"5\", increment \"1\", cache size \"32\")"); expectError( stat, "create sequence c minvalue 5 maxvalue 2", "Unable to create or alter sequence \"C\" because of " + - "invalid attributes (start value \"5\", " + - "min value \"5\", max value \"2\", increment \"1\")"); + "invalid attributes (base value \"5\", start value \"5\", " + + "min value \"5\", max value \"2\", increment \"1\", cache size \"32\")"); expectError( stat, "create sequence d increment by 0", "Unable to create or alter sequence \"D\" because of " + - "invalid attributes (start value \"1\", " + + "invalid attributes (base value \"1\", start value \"1\", " + "min value \"1\", max value \"" + - Long.MAX_VALUE + "\", increment \"0\")"); + Long.MAX_VALUE + "\", increment \"0\", cache size \"32\")"); expectError(stat, "create sequence e minvalue 1 maxvalue 5 increment 99", "Unable to create or alter sequence \"E\" because of " + - "invalid attributes (start value \"1\", " + - "min value \"1\", max value \"5\", increment \"99\")"); + "invalid attributes (base value \"1\", start value \"1\", " + + "min value \"1\", max value \"5\", increment \"99\", cache size \"32\")"); conn.close(); } @@ -319,17 +378,18 @@ private void testCreateSql() throws SQLException { script.add(rs.getString(1)); } Collections.sort(script); - assertEquals("CREATE SEQUENCE \"PUBLIC\".\"A\" START WITH 1;", script.get(0)); + assertEquals("-- H2 " + Constants.VERSION + ";", script.get(0)); + assertEquals("CREATE SEQUENCE \"PUBLIC\".\"A\" START WITH 1;", script.get(1)); assertEquals("CREATE SEQUENCE \"PUBLIC\".\"B\" START " + "WITH 5 INCREMENT BY 2 " + - "MINVALUE 3 MAXVALUE 7 CYCLE CACHE 1;", script.get(1)); + "MINVALUE 3 MAXVALUE 7 CYCLE NO CACHE;", script.get(2)); assertEquals("CREATE SEQUENCE \"PUBLIC\".\"C\" START " + "WITH 3 MINVALUE 2 MAXVALUE 9 CACHE 2;", - script.get(2)); + script.get(3)); assertEquals("CREATE SEQUENCE \"PUBLIC\".\"D\" START " + - "WITH 1 CACHE 1;", script.get(3)); + "WITH 1 NO CACHE;", script.get(4)); assertEquals("CREATE SEQUENCE \"PUBLIC\".\"E\" START " + - "WITH 1 CACHE 1;", script.get(4)); + "WITH 1 NO CACHE;", script.get(5)); conn.close(); } @@ -435,16 +495,6 @@ public void fire(Connection conn, Object[] oldRow, Object[] newRow) // ignore } - @Override - public void close() throws SQLException { - // ignore - } - - @Override - public void remove() throws SQLException { - // ignore - } - } } diff --git a/h2/src/test/org/h2/test/db/TestSessionsLocks.java b/h2/src/test/org/h2/test/db/TestSessionsLocks.java index 49177d2bf3..874cabe692 100644 --- a/h2/src/test/org/h2/test/db/TestSessionsLocks.java +++ b/h2/src/test/org/h2/test/db/TestSessionsLocks.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -9,6 +9,8 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; + +import org.h2.api.ErrorCode; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -23,23 +25,19 @@ public class TestSessionsLocks extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public boolean isEnabled() { - if (!config.multiThreaded) { - return false; - } return true; } @Override public void test() throws Exception { testCancelStatement(); - if (!config.mvStore) { - testLocks(); - } + testLocks(); + testAbortStatement(); deleteDb("sessionsLocks"); } @@ -62,24 +60,13 @@ private void testLocks() throws SQLException { assertEquals("PUBLIC", rs.getString("TABLE_SCHEMA")); assertEquals("TEST", rs.getString("TABLE_NAME")); rs.getString("SESSION_ID"); - if (config.mvStore) { - assertEquals("READ", rs.getString("LOCK_TYPE")); - } else { - assertEquals("WRITE", rs.getString("LOCK_TYPE")); - } + assertEquals("READ", rs.getString("LOCK_TYPE")); assertFalse(rs.next()); conn2.commit(); conn2.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); stat2.execute("SELECT * FROM TEST"); rs = stat.executeQuery("select * from information_schema.locks " + "order by session_id"); - if (!config.mvStore) { - rs.next(); - assertEquals("PUBLIC", rs.getString("TABLE_SCHEMA")); - assertEquals("TEST", rs.getString("TABLE_NAME")); - rs.getString("SESSION_ID"); - assertEquals("READ", rs.getString("LOCK_TYPE")); - } assertFalse(rs.next()); conn2.commit(); rs = stat.executeQuery("select * from information_schema.locks " + @@ -95,36 +82,33 @@ private void testCancelStatement() throws Exception { Statement stat = conn.createStatement(); ResultSet rs; rs = stat.executeQuery("select * from information_schema.sessions " + - "order by SESSION_START, ID"); + "order by SESSION_START, SESSION_ID"); rs.next(); - int sessionId = rs.getInt("ID"); + int sessionId = rs.getInt("SESSION_ID"); rs.getString("USER_NAME"); rs.getTimestamp("SESSION_START"); - rs.getString("STATEMENT"); - rs.getTimestamp("STATEMENT_START"); + rs.getString("EXECUTING_STATEMENT"); + rs.getTimestamp("EXECUTING_STATEMENT_START"); assertFalse(rs.next()); Connection conn2 = getConnection("sessionsLocks"); - final Statement stat2 = conn2.createStatement(); + Statement stat2 = conn2.createStatement(); rs = stat.executeQuery("select * from information_schema.sessions " + - "order by SESSION_START, ID"); + "order by SESSION_START, SESSION_ID"); assertTrue(rs.next()); - assertEquals(sessionId, rs.getInt("ID")); + assertEquals(sessionId, rs.getInt("SESSION_ID")); assertTrue(rs.next()); - int otherId = rs.getInt("ID"); + int otherId = rs.getInt("SESSION_ID"); assertTrue(otherId != sessionId); assertFalse(rs.next()); stat2.execute("set throttle 1"); - final boolean[] done = { false }; - Runnable runnable = new Runnable() { - @Override - public void run() { - try { - stat2.execute("select count(*) from " + - "system_range(1, 10000000) t1, system_range(1, 10000000) t2"); - new Error("Unexpected success").printStackTrace(); - } catch (SQLException e) { - done[0] = true; - } + boolean[] done = { false }; + Runnable runnable = () -> { + try { + stat2.execute("select count(*) from " + + "system_range(1, 10000000) t1, system_range(1, 10000000) t2"); + new Error("Unexpected success").printStackTrace(); + } catch (SQLException e) { + done[0] = true; } }; new Thread(runnable).start(); @@ -147,4 +131,58 @@ public void run() { conn.close(); } + private void testAbortStatement() throws Exception { + deleteDb("sessionsLocks"); + Connection conn = getConnection("sessionsLocks"); + Statement stat = conn.createStatement(); + ResultSet rs; + rs = stat.executeQuery("select session_id() as ID"); + rs.next(); + int sessionId = rs.getInt("ID"); + + // Setup session to be aborted + Connection conn2 = getConnection("sessionsLocks"); + Statement stat2 = conn2.createStatement(); + stat2.execute("create table test(id int primary key, name varchar)"); + conn2.setAutoCommit(false); + stat2.execute("insert into test values(1, 'Hello')"); + conn2.commit(); + // grab a lock + stat2.executeUpdate("update test set name = 'Again' where id = 1"); + + rs = stat2.executeQuery("select session_id() as ID"); + rs.next(); + + int otherId = rs.getInt("ID"); + assertTrue(otherId != sessionId); + assertFalse(rs.next()); + + // expect one lock + assertEquals(1, getLockCountForSession(stat, otherId)); + rs = stat.executeQuery("CALL ABORT_SESSION(" + otherId + ")"); + rs.next(); + assertTrue(rs.getBoolean(1)); + + // expect the lock to be released along with its session + assertEquals(0, getLockCountForSession(stat, otherId)); + rs = stat.executeQuery("CALL ABORT_SESSION(" + otherId + ")"); + rs.next(); + assertFalse("Session is expected to be already aborted", rs.getBoolean(1)); + + // using the connection for the aborted session is expected to throw an + // exception + assertThrows(config.networked ? ErrorCode.CONNECTION_BROKEN_1 : ErrorCode.DATABASE_CALLED_AT_SHUTDOWN, stat2) + .executeQuery("select count(*) from test"); + + conn2.close(); + conn.close(); + } + + private int getLockCountForSession(Statement stmnt, int otherId) throws SQLException { + try (ResultSet rs = stmnt + .executeQuery("select count(*) from information_schema.locks where session_id = " + otherId)) { + assertTrue(rs.next()); + return rs.getInt(1); + } + } } diff --git a/h2/src/test/org/h2/test/db/TestSetCollation.java b/h2/src/test/org/h2/test/db/TestSetCollation.java index 50bd885004..7c0559f107 100644 --- a/h2/src/test/org/h2/test/db/TestSetCollation.java +++ b/h2/src/test/org/h2/test/db/TestSetCollation.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -27,7 +27,7 @@ public class TestSetCollation extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/db/TestSpaceReuse.java b/h2/src/test/org/h2/test/db/TestSpaceReuse.java index 6135db5105..dd21cf549c 100644 --- a/h2/src/test/org/h2/test/db/TestSpaceReuse.java +++ b/h2/src/test/org/h2/test/db/TestSpaceReuse.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -24,7 +24,7 @@ public class TestSpaceReuse extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -43,18 +43,14 @@ public void test() throws SQLException { Connection conn = getConnection("spaceReuse"); Statement stat = conn.createStatement(); stat.execute("set retention_time 0"); + stat.execute("set write_delay 0"); // disable auto-commit so that free-unused runs on commit stat.execute("create table if not exists t(i int)"); stat.execute("insert into t select x from system_range(1, 500)"); conn.close(); conn = getConnection("spaceReuse"); conn.createStatement().execute("delete from t"); conn.close(); - String fileName = getBaseDir() + "/spaceReuse"; - if (config.mvStore) { - fileName += Constants.SUFFIX_MV_FILE; - } else { - fileName += Constants.SUFFIX_PAGE_FILE; - } + String fileName = getBaseDir() + "/spaceReuse" + Constants.SUFFIX_MV_FILE; now = FileUtils.size(fileName); assertTrue(now > 0); if (i < 10) { diff --git a/h2/src/test/org/h2/test/db/TestSpatial.java b/h2/src/test/org/h2/test/db/TestSpatial.java index e1863a8229..0de3de0f74 100644 --- a/h2/src/test/org/h2/test/db/TestSpatial.java +++ b/h2/src/test/org/h2/test/db/TestSpatial.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -13,19 +13,28 @@ import java.sql.Types; import java.util.Random; import org.h2.api.Aggregate; +import org.h2.api.ErrorCode; +import org.h2.message.DbException; import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.tools.SimpleResultSet; import org.h2.tools.SimpleRowSource; -import org.h2.value.DataType; +import org.h2.util.HasSQL; +import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueGeometry; +import org.h2.value.ValueToObjectConverter; +import org.h2.value.ValueToObjectConverter2; import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.CoordinateSequence; import org.locationtech.jts.geom.Envelope; import org.locationtech.jts.geom.Geometry; import org.locationtech.jts.geom.GeometryFactory; +import org.locationtech.jts.geom.MultiPoint; import org.locationtech.jts.geom.Point; import org.locationtech.jts.geom.Polygon; +import org.locationtech.jts.geom.PrecisionModel; +import org.locationtech.jts.geom.impl.CoordinateArraySequenceFactory; import org.locationtech.jts.geom.util.AffineTransformation; import org.locationtech.jts.io.ByteOrderValues; import org.locationtech.jts.io.ParseException; @@ -49,15 +58,15 @@ public class TestSpatial extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public boolean isEnabled() { - if (config.memory && config.mvStore) { + if (config.memory) { return false; } - if (DataType.GEOMETRY_CLASS == null) { + if (ValueToObjectConverter.GEOMETRY_CLASS == null) { return false; } return true; @@ -71,6 +80,7 @@ public void test() throws SQLException { } private void testSpatial() throws SQLException { + testNaNs(); testBug1(); testSpatialValues(); testOverlap(); @@ -86,7 +96,6 @@ private void testSpatial() throws SQLException { testValueConversion(); testEquals(); testTableFunctionGeometry(); - testHashCode(); testAggregateWithGeometry(); testTableViewSpatialPredicate(); testValueGeometryScript(); @@ -103,6 +112,26 @@ private void testSpatial() throws SQLException { testSpatialIndexWithOrder(); } + private void testNaNs() { + GeometryFactory factory = new GeometryFactory(new PrecisionModel(), 0, + CoordinateArraySequenceFactory.instance()); + CoordinateSequence c2 = factory.getCoordinateSequenceFactory().create(1, 2, 0); + c2.setOrdinate(0, 0, 1d); + c2.setOrdinate(0, 1, 1d); + CoordinateSequence c3 = factory.getCoordinateSequenceFactory().create(1, 3, 0); + c3.setOrdinate(0, 0, 1d); + c3.setOrdinate(0, 1, 2d); + c3.setOrdinate(0, 2, 3d); + Point p2 = factory.createPoint(c2); + Point p3 = factory.createPoint(c3); + try { + ValueGeometry.getFromGeometry(new MultiPoint(new Point[] { p2, p3 }, factory)); + fail("Expected exception"); + } catch (DbException e) { + assertEquals(ErrorCode.DATA_CONVERSION_ERROR_1, e.getErrorCode()); + } + } + private void testBug1() throws SQLException { deleteDb("spatial"); Connection conn = getConnection(URL); @@ -118,17 +147,6 @@ private void testBug1() throws SQLException { deleteDb("spatial"); } - private void testHashCode() { - ValueGeometry geomA = ValueGeometry - .get("POLYGON ((67 13 6, 67 18 5, 59 18 4, 59 13 6, 67 13 6))"); - ValueGeometry geomB = ValueGeometry - .get("POLYGON ((67 13 6, 67 18 5, 59 18 4, 59 13 6, 67 13 6))"); - ValueGeometry geomC = ValueGeometry - .get("POLYGON ((67 13 6, 67 18 5, 59 18 4, 59 13 5, 67 13 6))"); - assertEquals(geomA.hashCode(), geomB.hashCode()); - assertFalse(geomA.hashCode() == geomC.hashCode()); - } - private void testSpatialValues() throws SQLException { deleteDb("spatial"); Connection conn = getConnection(URL); @@ -451,9 +469,7 @@ private void testMemorySpatialIndex() throws SQLException { "explain select * from test " + "where polygon && 'POLYGON ((1 1, 1 2, 2 2, 1 1))'::Geometry"); rs.next(); - if (config.mvStore) { - assertContains(rs.getString(1), "/* PUBLIC.IDX_TEST_POLYGON: POLYGON &&"); - } + assertContains(rs.getString(1), "/* PUBLIC.IDX_TEST_POLYGON: POLYGON &&"); // TODO equality should probably also use the spatial index // rs = stat.executeQuery("explain select * from test " + @@ -497,8 +513,7 @@ private void testJavaAlias() throws SQLException { deleteDb("spatial"); try (Connection conn = getConnection(URL)) { Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS T_GEOM_FROM_TEXT FOR \"" + - TestSpatial.class.getName() + ".geomFromText\""); + stat.execute("CREATE ALIAS T_GEOM_FROM_TEXT FOR '" + TestSpatial.class.getName() + ".geomFromText'"); stat.execute("create table test(id int primary key " + "auto_increment, the_geom geometry)"); stat.execute("insert into test(the_geom) values(" + @@ -520,8 +535,8 @@ private void testJavaAliasTableFunction() throws SQLException { deleteDb("spatial"); try (Connection conn = getConnection(URL)) { Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS T_RANDOM_GEOM_TABLE FOR \"" + - TestSpatial.class.getName() + ".getRandomGeometryTable\""); + stat.execute("CREATE ALIAS T_RANDOM_GEOM_TABLE FOR '" + + TestSpatial.class.getName() + ".getRandomGeometryTable'"); stat.execute( "create table test as " + "select * from T_RANDOM_GEOM_TABLE(42,20,-100,100,-100,100,4)"); @@ -589,6 +604,7 @@ public void reset() throws SQLException { */ public static Geometry geomFromText(String text, int srid) throws SQLException { WKTReader wktReader = new WKTReader(); + wktReader.setIsOldJtsCoordinateSyntaxAllowed(false); try { Geometry geom = wktReader.read(text); geom.setSRID(srid); @@ -601,7 +617,7 @@ public static Geometry geomFromText(String text, int srid) throws SQLException { private void testGeometryDataType() { GeometryFactory geometryFactory = new GeometryFactory(); Geometry geometry = geometryFactory.createPoint(new Coordinate(0, 0)); - assertEquals(Value.GEOMETRY, DataType.getTypeFromClass(geometry.getClass())); + assertEquals(TypeInfo.TYPE_GEOMETRY, ValueToObjectConverter2.classToType(geometry.getClass())); } /** @@ -613,9 +629,9 @@ private void testWKB() { assertEquals(ewkt, geom3d.getString()); ValueGeometry copy = ValueGeometry.get(geom3d.getBytes()); Geometry g = copy.getGeometry(); - assertEquals(6, g.getCoordinates()[0].z); - assertEquals(5, g.getCoordinates()[1].z); - assertEquals(4, g.getCoordinates()[2].z); + assertEquals(6, g.getCoordinates()[0].getZ()); + assertEquals(5, g.getCoordinates()[1].getZ()); + assertEquals(4, g.getCoordinates()[2].getZ()); // Test SRID copy = ValueGeometry.get(geom3d.getBytes()); assertEquals(27572, g.getSRID()); @@ -655,9 +671,7 @@ private void testValueConversion() throws SQLException { deleteDb("spatial"); Connection conn = getConnection(URL); Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS OBJ_STRING FOR \"" + - TestSpatial.class.getName() + - ".getObjectString\""); + stat.execute("CREATE ALIAS OBJ_STRING FOR '" + TestSpatial.class.getName() + ".getObjectString'"); ResultSet rs = stat.executeQuery( "select OBJ_STRING('POINT( 15 25 )'::geometry)"); assertTrue(rs.next()); @@ -672,7 +686,7 @@ private void testValueConversion() throws SQLException { * @param object the object * @return the string representation */ - public static String getObjectString(Object object) { + public static String getObjectString(Geometry object) { return object.toString(); } @@ -682,7 +696,7 @@ public static String getObjectString(Object object) { private void testEquals() { // 3d equality test ValueGeometry geom3d = ValueGeometry.get( - "POLYGON ((67 13 6, 67 18 5, 59 18 4, 59 13 6, 67 13 6))"); + "POLYGON Z((67 13 6, 67 18 5, 59 18 4, 59 13 6, 67 13 6))"); ValueGeometry geom2d = ValueGeometry.get( "POLYGON ((67 13, 67 18, 59 18, 59 13, 67 13))"); assertFalse(geom3d.equals(geom2d)); @@ -707,8 +721,7 @@ private void testTableFunctionGeometry() throws SQLException { deleteDb("spatial"); try (Connection conn = getConnection(URL)) { Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS POINT_TABLE FOR \"" + - TestSpatial.class.getName() + ".pointTable\""); + stat.execute("CREATE ALIAS POINT_TABLE FOR '" + TestSpatial.class.getName() + ".pointTable'"); stat.execute("create table test as select * from point_table(1, 1)"); // Read column type ResultSet columnMeta = conn.getMetaData(). @@ -717,11 +730,6 @@ private void testTableFunctionGeometry() throws SQLException { assertEquals("geometry", columnMeta.getString("TYPE_NAME").toLowerCase()); assertFalse(columnMeta.next()); - - ResultSet rs = stat.executeQuery("select point_table(1, 1)"); - assertTrue(rs.next()); - ResultSet rs2 = (ResultSet) rs.getObject(1); - assertEquals("GEOMETRY", rs2.getMetaData().getColumnTypeName(1)); } deleteDb("spatial"); } @@ -736,7 +744,7 @@ private void testTableFunctionGeometry() throws SQLException { public static ResultSet pointTable(double x, double y) { GeometryFactory factory = new GeometryFactory(); SimpleResultSet rs = new SimpleResultSet(); - rs.addColumn("THE_GEOM", Types.JAVA_OBJECT, "GEOMETRY", 0, 0); + rs.addColumn("THE_GEOM", Types.OTHER, "GEOMETRY", 0, 0); rs.addRow(factory.createPoint(new Coordinate(x, y))); return rs; } @@ -745,8 +753,7 @@ private void testAggregateWithGeometry() throws SQLException { deleteDb("spatialIndex"); try (Connection conn = getConnection("spatialIndex")) { Statement st = conn.createStatement(); - st.execute("CREATE AGGREGATE TABLE_ENVELOPE FOR \""+ - TableEnvelope.class.getName()+"\""); + st.execute("CREATE AGGREGATE TABLE_ENVELOPE FOR '" + TableEnvelope.class.getName() + '\''); st.execute("CREATE TABLE test(the_geom GEOMETRY)"); st.execute("INSERT INTO test VALUES ('POINT(1 1)'), (null), (null), ('POINT(10 5)')"); ResultSet rs = st.executeQuery("select TABLE_ENVELOPE(the_geom) from test"); @@ -834,10 +841,10 @@ private void testTableViewSpatialPredicate() throws SQLException { * Check ValueGeometry conversion into SQL script */ private void testValueGeometryScript() throws SQLException { - ValueGeometry valueGeometry = ValueGeometry.get("POINT(1 1 5)"); + ValueGeometry valueGeometry = ValueGeometry.get("POINT Z(1 1 5)"); try (Connection conn = getConnection(URL)) { ResultSet rs = conn.createStatement().executeQuery( - "SELECT " + valueGeometry.getSQL()); + "SELECT " + valueGeometry.getSQL(HasSQL.DEFAULT_SQL_FLAGS)); assertTrue(rs.next()); Object obj = rs.getObject(1); ValueGeometry g = ValueGeometry.getFromGeometry(obj); @@ -874,7 +881,7 @@ private void testScanIndexOnNonSpatialQuery() throws SQLException { Statement stat = conn.createStatement(); stat.execute("drop table if exists test"); stat.execute("create table test(id serial primary key, " + - "value double, the_geom geometry)"); + "v double, the_geom geometry)"); stat.execute("create spatial index spatial on test(the_geom)"); ResultSet rs = stat.executeQuery("explain select * from test where _ROWID_ = 5"); assertTrue(rs.next()); @@ -914,8 +921,9 @@ private void testExplainSpatialIndexWithPk() throws SQLException { try (Connection conn = getConnection(URL)) { Statement stat = conn.createStatement(); stat.execute("drop table if exists pt_cloud;"); - stat.execute("CREATE TABLE PT_CLOUD(id serial, the_geom geometry) AS " + - "SELECT null, CONCAT('POINT(',A.X,' ',B.X,')')::geometry the_geom " + + stat.execute("CREATE TABLE PT_CLOUD(id serial, the_geom geometry)"); + stat.execute("INSERT INTO PT_CLOUD(the_geom) " + + "SELECT 'POINT(' || A.X || ' ' || B.X || ')' " + "from system_range(0,120) A,system_range(0,10) B;"); stat.execute("create spatial index on pt_cloud(the_geom);"); try (ResultSet rs = stat.executeQuery( @@ -1031,7 +1039,7 @@ private void testNullableGeometryInsert() throws SQLException { + "(id identity, the_geom geometry)"); stat.execute("create spatial index on test(the_geom)"); for (int i = 0; i < 1000; i++) { - stat.execute("insert into test values(null, null)"); + stat.execute("insert into test(the_geom) values null"); } ResultSet rs = stat.executeQuery("select * from test"); while (rs.next()) { @@ -1042,10 +1050,6 @@ private void testNullableGeometryInsert() throws SQLException { } private void testNullableGeometryUpdate() throws SQLException { - // TODO breaks in pagestore case - if (!config.mvStore) { - return; - } deleteDb("spatial"); Connection conn = getConnection(URL); Statement stat = conn.createStatement(); @@ -1188,7 +1192,8 @@ private void testSpatialIndexWithOrder() throws SQLException { try (Connection conn = getConnection(URL)) { Statement stat = conn.createStatement(); stat.execute("DROP TABLE IF EXISTS BUILDINGS;" + - "CREATE TABLE BUILDINGS (PK serial, THE_GEOM geometry);" + + "CREATE TABLE BUILDINGS (PK BIGINT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, " + + "THE_GEOM geometry);" + "insert into buildings(the_geom) SELECT 'POINT(1 1)" + "'::geometry from SYSTEM_RANGE(1,10000);\n" + "CREATE SPATIAL INDEX ON PUBLIC.BUILDINGS(THE_GEOM);\n"); diff --git a/h2/src/test/org/h2/test/db/TestSpeed.java b/h2/src/test/org/h2/test/db/TestSpeed.java index f2469dd12e..3e4d6a80a7 100644 --- a/h2/src/test/org/h2/test/db/TestSpeed.java +++ b/h2/src/test/org/h2/test/db/TestSpeed.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -25,7 +25,7 @@ public class TestSpeed extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/db/TestSubqueryPerformanceOnLazyExecutionMode.java b/h2/src/test/org/h2/test/db/TestSubqueryPerformanceOnLazyExecutionMode.java index 1731b897c8..48361bcf1e 100644 --- a/h2/src/test/org/h2/test/db/TestSubqueryPerformanceOnLazyExecutionMode.java +++ b/h2/src/test/org/h2/test/db/TestSubqueryPerformanceOnLazyExecutionMode.java @@ -1,20 +1,20 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; -import org.h2.command.dml.SetTypes; -import org.h2.test.TestBase; -import org.h2.test.TestDb; - import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import org.h2.command.dml.SetTypes; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + /** * Test subquery performance with lazy query execution mode {@link SetTypes#LAZY_QUERY_EXECUTION}. */ @@ -30,12 +30,12 @@ public class TestSubqueryPerformanceOnLazyExecutionMode extends TestDb { * @param a ignored */ public static void main(String[] a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public boolean isEnabled() { - return !config.travis; + return !config.ci; } @Override diff --git a/h2/src/test/org/h2/test/db/TestSynonymForTable.java b/h2/src/test/org/h2/test/db/TestSynonymForTable.java index c51172b83e..61c04084c1 100644 --- a/h2/src/test/org/h2/test/db/TestSynonymForTable.java +++ b/h2/src/test/org/h2/test/db/TestSynonymForTable.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -26,7 +26,7 @@ public class TestSynonymForTable extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -71,7 +71,7 @@ private void testDropSchema() throws SQLException { stat.execute("CREATE OR REPLACE SYNONYM testsynonym FOR s1.backingtable"); stat.execute("DROP SCHEMA s1 CASCADE"); - assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, stat).execute("SELECT id FROM testsynonym"); + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, stat).execute("SELECT id FROM testsynonym"); conn.close(); } @@ -82,7 +82,7 @@ private void testDropTable() throws SQLException { stat.execute("DROP TABLE backingtable"); // Backing table does not exist anymore. - assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, stat).execute("SELECT id FROM testsynonym"); + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, stat).execute("SELECT id FROM testsynonym"); // Synonym should be dropped as well ResultSet synonyms = conn.createStatement().executeQuery( @@ -185,8 +185,7 @@ private void testMetaData() throws SQLException { assertEquals("TESTSYNONYM", synonyms.getString("SYNONYM_NAME")); assertEquals("BACKINGTABLE", synonyms.getString("SYNONYM_FOR")); assertEquals("VALID", synonyms.getString("STATUS")); - assertEquals("", synonyms.getString("REMARKS")); - assertNotNull(synonyms.getString("ID")); + assertNull(synonyms.getString("REMARKS")); assertFalse(synonyms.next()); conn.close(); } diff --git a/h2/src/test/org/h2/test/db/TestTableEngines.java b/h2/src/test/org/h2/test/db/TestTableEngines.java index 9d631ea876..a87646f7e3 100644 --- a/h2/src/test/org/h2/test/db/TestTableEngines.java +++ b/h2/src/test/org/h2/test/db/TestTableEngines.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -16,46 +16,31 @@ import java.util.Comparator; import java.util.Iterator; import java.util.List; -import java.util.Random; import java.util.Set; import java.util.TreeSet; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.atomic.AtomicInteger; import org.h2.api.TableEngine; import org.h2.command.ddl.CreateTableData; -import org.h2.command.dml.AllColumnsForPlan; -import org.h2.engine.Session; -import org.h2.expression.Expression; -import org.h2.index.BaseIndex; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.SessionLocal; import org.h2.index.Cursor; import org.h2.index.Index; -import org.h2.index.IndexLookupBatch; import org.h2.index.IndexType; import org.h2.index.SingleRowCursor; -import org.h2.jdbc.JdbcConnection; import org.h2.message.DbException; import org.h2.result.Row; import org.h2.result.SearchRow; import org.h2.result.SortOrder; import org.h2.table.IndexColumn; -import org.h2.table.PageStoreTable; -import org.h2.table.SubQueryInfo; import org.h2.table.Table; import org.h2.table.TableBase; import org.h2.table.TableFilter; import org.h2.table.TableType; import org.h2.test.TestBase; import org.h2.test.TestDb; -import org.h2.util.DoneFuture; import org.h2.value.Value; -import org.h2.value.ValueInt; +import org.h2.value.ValueInteger; import org.h2.value.ValueNull; -import org.h2.value.ValueString; /** * The class for external table engines mechanism testing. @@ -70,35 +55,17 @@ public class TestTableEngines extends TestDb { * @param a ignored */ public static void main(String[] a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { testQueryExpressionFlag(); testSubQueryInfo(); - testEarlyFilter(); testEngineParams(); testSchemaEngineParams(); testSimpleQuery(); testMultiColumnTreeSetIndex(); - testBatchedJoin(); - testAffinityKey(); - } - - private void testEarlyFilter() throws SQLException { - deleteDb("tableEngine"); - Connection conn = getConnection("tableEngine;EARLY_FILTER=TRUE"); - Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE t1(id int, name varchar) ENGINE \"" + - EndlessTableEngine.class.getName() + "\""); - ResultSet rs = stat.executeQuery( - "SELECT name FROM t1 where id=1 and name is not null"); - assertTrue(rs.next()); - assertEquals("((ID = 1)\n AND (NAME IS NOT NULL))", rs.getString(1)); - rs.close(); - conn.close(); - deleteDb("tableEngine"); } private void testEngineParams() throws SQLException { @@ -228,12 +195,12 @@ private void testMultiColumnTreeSetIndex() throws SQLException { List> dataSet = new ArrayList<>(); - dataSet.add(Arrays.asList(1, "1", 1L)); - dataSet.add(Arrays.asList(1, "0", 2L)); - dataSet.add(Arrays.asList(2, "0", -1L)); - dataSet.add(Arrays.asList(0, "0", 1L)); - dataSet.add(Arrays.asList(0, "1", null)); - dataSet.add(Arrays.asList(2, null, 0L)); + dataSet.add(Arrays.asList(1, "1", 1L)); + dataSet.add(Arrays.asList(1, "0", 2L)); + dataSet.add(Arrays.asList(2, "0", -1L)); + dataSet.add(Arrays.asList(0, "0", 1L)); + dataSet.add(Arrays.asList(0, "1", null)); + dataSet.add(Arrays.asList(2, null, 0L)); PreparedStatement prep = conn.prepareStatement("INSERT INTO T(A,B,C) VALUES(?,?,?)"); for (List row : dataSet) { @@ -265,7 +232,7 @@ private void testMultiColumnTreeSetIndex() throws SQLException { checkPlan(stat, "select * from t where a = 0", "scan"); checkPlan(stat, "select * from t where a > 0 order by c, b", "IDX_C_B_A"); checkPlan(stat, "select * from t where a = 0 and c > 0", "IDX_C_B_A"); - checkPlan(stat, "select * from t where a = 0 and b < 0", "IDX_B_A"); + checkPlan(stat, "select * from t where a = 0 and b < '0'", "IDX_B_A"); assertEquals(6, ((Number) query(stat, "select count(*) from t").get(0).get(0)).intValue()); @@ -441,319 +408,6 @@ private void testSubQueryInfo() throws SQLException { deleteDb("testSubQueryInfo"); } - private void setBatchingEnabled(Statement stat, boolean enabled) throws SQLException { - stat.execute("SET BATCH_JOINS " + enabled); - if (!config.networked) { - Session s = (Session) ((JdbcConnection) stat.getConnection()).getSession(); - assertEquals(enabled, s.isJoinBatchEnabled()); - } - } - - private void testBatchedJoin() throws SQLException { - deleteDb("testBatchedJoin"); - Connection conn = getConnection("testBatchedJoin;OPTIMIZE_REUSE_RESULTS=0;BATCH_JOINS=1"); - Statement stat = conn.createStatement(); - setBatchingEnabled(stat, false); - setBatchingEnabled(stat, true); - - TreeSetIndex.exec = Executors.newFixedThreadPool(8, new ThreadFactory() { - @Override - public Thread newThread(Runnable r) { - Thread t = new Thread(r); - t.setDaemon(true); - return t; - } - }); - - forceJoinOrder(stat, true); - try { - doTestBatchedJoinSubQueryUnion(stat); - - TreeSetIndex.lookupBatches.set(0); - doTestBatchedJoin(stat, 1, 0, 0); - doTestBatchedJoin(stat, 0, 1, 0); - doTestBatchedJoin(stat, 0, 0, 1); - - doTestBatchedJoin(stat, 0, 2, 0); - doTestBatchedJoin(stat, 0, 0, 2); - - doTestBatchedJoin(stat, 0, 0, 3); - doTestBatchedJoin(stat, 0, 0, 4); - doTestBatchedJoin(stat, 0, 0, 5); - - doTestBatchedJoin(stat, 0, 3, 1); - doTestBatchedJoin(stat, 0, 3, 3); - doTestBatchedJoin(stat, 0, 3, 7); - - doTestBatchedJoin(stat, 0, 4, 1); - doTestBatchedJoin(stat, 0, 4, 6); - doTestBatchedJoin(stat, 0, 4, 20); - - doTestBatchedJoin(stat, 0, 10, 0); - doTestBatchedJoin(stat, 0, 0, 10); - - doTestBatchedJoin(stat, 0, 20, 0); - doTestBatchedJoin(stat, 0, 0, 20); - doTestBatchedJoin(stat, 0, 20, 20); - - doTestBatchedJoin(stat, 3, 7, 0); - doTestBatchedJoin(stat, 0, 0, 5); - doTestBatchedJoin(stat, 0, 8, 1); - doTestBatchedJoin(stat, 0, 2, 1); - - assertTrue(TreeSetIndex.lookupBatches.get() > 0); - } finally { - forceJoinOrder(stat, false); - TreeSetIndex.exec.shutdownNow(); - } - conn.close(); - deleteDb("testBatchedJoin"); - } - - private void testAffinityKey() throws SQLException { - deleteDb("tableEngine"); - Connection conn = getConnection("tableEngine;mode=Ignite;MV_STORE=FALSE"); - Statement stat = conn.createStatement(); - - stat.executeUpdate("CREATE TABLE T(ID INT AFFINITY PRIMARY KEY, NAME VARCHAR, AGE INT)" + - " ENGINE \"" + AffinityTableEngine.class.getName() + "\""); - Table tbl = AffinityTableEngine.createdTbl; - // Prevent memory leak - AffinityTableEngine.createdTbl = null; - assertNotNull(tbl); - assertEquals(3, tbl.getIndexes().size()); - Index aff = tbl.getIndexes().get(2); - assertTrue(aff.getIndexType().isAffinity()); - assertEquals("T_AFF", aff.getName()); - assertEquals(1, aff.getIndexColumns().length); - assertEquals("ID", aff.getIndexColumns()[0].columnName); - conn.close(); - deleteDb("tableEngine"); - } - - private static void forceJoinOrder(Statement s, boolean force) throws SQLException { - s.executeUpdate("SET FORCE_JOIN_ORDER " + force); - } - - private void checkPlan(Statement stat, String sql) throws SQLException { - ResultSet rs = stat.executeQuery("EXPLAIN " + sql); - assertTrue(rs.next()); - String plan = rs.getString(1); - assertEquals(normalize(sql), normalize(plan)); - } - - private static String normalize(String sql) { - sql = sql.replace('\n', ' '); - return sql.replaceAll("\\s+", " ").trim(); - } - - private void doTestBatchedJoinSubQueryUnion(Statement stat) throws SQLException { - String engine = '"' + TreeSetIndexTableEngine.class.getName() + '"'; - stat.execute("CREATE TABLE t (a int, b int) ENGINE " + engine); - TreeSetTable t = TreeSetIndexTableEngine.created; - stat.execute("CREATE INDEX T_IDX_A ON t(a)"); - stat.execute("CREATE INDEX T_IDX_B ON t(b)"); - setBatchSize(t, 3); - for (int i = 0; i < 20; i++) { - stat.execute("insert into t values (" + i + "," + (i + 10) + ")"); - } - stat.execute("CREATE TABLE u (a int, b int) ENGINE " + engine); - TreeSetTable u = TreeSetIndexTableEngine.created; - // Prevent memory leak - TreeSetIndexTableEngine.created = null; - stat.execute("CREATE INDEX U_IDX_A ON u(a)"); - stat.execute("CREATE INDEX U_IDX_B ON u(b)"); - setBatchSize(u, 0); - for (int i = 10; i < 25; i++) { - stat.execute("insert into u values (" + i + "," + (i - 15)+ ")"); - } - - checkPlan(stat, "SELECT 1 FROM \"PUBLIC\".\"T\" \"T1\" /* PUBLIC.scan */ " - + "INNER JOIN \"PUBLIC\".\"T\" \"T2\" /* batched:test PUBLIC.T_IDX_B: B = T1.A */ " - + "ON 1=1 WHERE \"T1\".\"A\" = \"T2\".\"B\""); - checkPlan(stat, "SELECT 1 FROM \"PUBLIC\".\"T\" \"T1\" /* PUBLIC.scan */ " - + "INNER JOIN \"PUBLIC\".\"T\" \"T2\" /* batched:test PUBLIC.T_IDX_B: B = T1.A */ " - + "ON 1=1 /* WHERE T1.A = T2.B */ " - + "INNER JOIN \"PUBLIC\".\"T\" \"T3\" /* batched:test PUBLIC.T_IDX_B: B = T2.A */ " - + "ON 1=1 WHERE (\"T2\".\"A\" = \"T3\".\"B\") AND (\"T1\".\"A\" = \"T2\".\"B\")"); - checkPlan(stat, "SELECT 1 FROM \"PUBLIC\".\"T\" \"T1\" /* PUBLIC.scan */ " - + "INNER JOIN \"PUBLIC\".\"U\" /* batched:fake PUBLIC.U_IDX_A: A = T1.A */ " - + "ON 1=1 /* WHERE T1.A = U.A */ " - + "INNER JOIN \"PUBLIC\".\"T\" \"T2\" /* batched:test PUBLIC.T_IDX_B: B = U.B */ " - + "ON 1=1 WHERE (\"T1\".\"A\" = \"U\".\"A\") AND (\"U\".\"B\" = \"T2\".\"B\")"); - checkPlan(stat, "SELECT 1 FROM ( SELECT \"A\" FROM \"PUBLIC\".\"T\" ) \"Z\" " - + "/* SELECT A FROM PUBLIC.T /++ PUBLIC.T_IDX_A ++/ */ " - + "INNER JOIN \"PUBLIC\".\"T\" /* batched:test PUBLIC.T_IDX_B: B = Z.A */ " - + "ON 1=1 WHERE \"Z\".\"A\" = \"T\".\"B\""); - checkPlan(stat, "SELECT 1 FROM \"PUBLIC\".\"T\" /* PUBLIC.T_IDX_B */ " - + "INNER JOIN ( SELECT \"A\" FROM \"PUBLIC\".\"T\" ) \"Z\" " - + "/* batched:view SELECT A FROM PUBLIC.T " - + "/++ batched:test PUBLIC.T_IDX_A: A IS ?1 ++/ " - + "WHERE A IS ?1: A = T.B */ ON 1=1 WHERE \"Z\".\"A\" = \"T\".\"B\""); - checkPlan(stat, "SELECT 1 FROM \"PUBLIC\".\"T\" /* PUBLIC.T_IDX_A */ " - + "INNER JOIN ( ((SELECT \"A\" FROM \"PUBLIC\".\"T\") UNION ALL (SELECT \"B\" FROM \"PUBLIC\".\"U\")) " - + "UNION ALL (SELECT \"B\" FROM \"PUBLIC\".\"T\") ) \"Z\" /* batched:view " - + "((SELECT A FROM PUBLIC.T /++ batched:test PUBLIC.T_IDX_A: A IS ?1 ++/ " - + "WHERE A IS ?1) " - + "UNION ALL " - + "(SELECT B FROM PUBLIC.U /++ PUBLIC.U_IDX_B: " - + "B IS ?1 ++/ WHERE B IS ?1)) " - + "UNION ALL " - + "(SELECT B FROM PUBLIC.T /++ batched:test PUBLIC.T_IDX_B: B IS ?1 ++/ " - + "WHERE B IS ?1): A = T.A */ ON 1=1 WHERE \"Z\".\"A\" = \"T\".\"A\""); - checkPlan(stat, "SELECT 1 FROM \"PUBLIC\".\"T\" /* PUBLIC.T_IDX_A */ " - + "INNER JOIN ( SELECT \"U\".\"A\" FROM \"PUBLIC\".\"U\" INNER JOIN \"PUBLIC\".\"T\" ON 1=1 " - + "WHERE \"U\".\"B\" = \"T\".\"B\" ) \"Z\" " - + "/* batched:view SELECT U.A FROM PUBLIC.U " - + "/++ batched:fake PUBLIC.U_IDX_A: A IS ?1 ++/ " - + "/++ WHERE U.A IS ?1 ++/ INNER JOIN PUBLIC.T " - + "/++ batched:test PUBLIC.T_IDX_B: B = U.B ++/ " - + "ON 1=1 WHERE (U.A IS ?1) AND (U.B = T.B): A = T.A */ " - + "ON 1=1 WHERE \"Z\".\"A\" = \"T\".\"A\""); - checkPlan(stat, "SELECT 1 FROM \"PUBLIC\".\"T\" /* PUBLIC.T_IDX_A */ " - + "INNER JOIN ( SELECT \"A\" FROM \"PUBLIC\".\"U\" ) \"Z\" /* SELECT A FROM PUBLIC.U " - + "/++ PUBLIC.U_IDX_A: A IS ?1 ++/ WHERE A IS ?1: A = T.A */ " - + "ON 1=1 WHERE \"T\".\"A\" = \"Z\".\"A\""); - checkPlan(stat, "SELECT 1 FROM " - + "( SELECT \"U\".\"A\" FROM \"PUBLIC\".\"U\" INNER JOIN \"PUBLIC\".\"T\" " - + "ON 1=1 WHERE \"U\".\"B\" = \"T\".\"B\" ) \"Z\" " - + "/* SELECT U.A FROM PUBLIC.U /++ PUBLIC.scan ++/ " - + "INNER JOIN PUBLIC.T /++ batched:test PUBLIC.T_IDX_B: B = U.B ++/ " - + "ON 1=1 WHERE U.B = T.B */ " - + "INNER JOIN \"PUBLIC\".\"T\" /* batched:test PUBLIC.T_IDX_A: A = Z.A */ ON 1=1 " - + "WHERE \"T\".\"A\" = \"Z\".\"A\""); - checkPlan(stat, "SELECT 1 FROM " - + "( SELECT \"U\".\"A\" FROM \"PUBLIC\".\"T\" INNER JOIN \"PUBLIC\".\"U\" " - + "ON 1=1 WHERE \"T\".\"B\" = \"U\".\"B\" ) \"Z\" " - + "/* SELECT U.A FROM PUBLIC.T /++ PUBLIC.T_IDX_B ++/ " - + "INNER JOIN PUBLIC.U /++ PUBLIC.U_IDX_B: B = T.B ++/ " - + "ON 1=1 WHERE T.B = U.B */ INNER JOIN \"PUBLIC\".\"T\" " - + "/* batched:test PUBLIC.T_IDX_A: A = Z.A */ " - + "ON 1=1 WHERE \"Z\".\"A\" = \"T\".\"A\""); - checkPlan(stat, "SELECT 1 FROM ( (SELECT \"A\" FROM \"PUBLIC\".\"T\") UNION " - + "(SELECT \"A\" FROM \"PUBLIC\".\"U\") ) \"Z\" " - + "/* (SELECT A FROM PUBLIC.T /++ PUBLIC.T_IDX_A ++/) " - + "UNION " - + "(SELECT A FROM PUBLIC.U /++ PUBLIC.U_IDX_A ++/) */ " - + "INNER JOIN \"PUBLIC\".\"T\" /* batched:test PUBLIC.T_IDX_A: A = Z.A */ ON 1=1 " - + "WHERE \"Z\".\"A\" = \"T\".\"A\""); - checkPlan(stat, "SELECT 1 FROM \"PUBLIC\".\"U\" /* PUBLIC.U_IDX_B */ " - + "INNER JOIN ( (SELECT \"A\", \"B\" FROM \"PUBLIC\".\"T\") " - + "UNION (SELECT \"B\", \"A\" FROM \"PUBLIC\".\"U\") ) \"Z\" " - + "/* batched:view (SELECT A, B FROM PUBLIC.T " - + "/++ batched:test PUBLIC.T_IDX_B: B IS ?1 ++/ " - + "WHERE B IS ?1) UNION (SELECT B, A FROM PUBLIC.U " - + "/++ PUBLIC.U_IDX_A: A IS ?1 ++/ " - + "WHERE A IS ?1): B = U.B */ ON 1=1 /* WHERE U.B = Z.B */ " - + "INNER JOIN \"PUBLIC\".\"T\" /* batched:test PUBLIC.T_IDX_A: A = Z.A */ ON 1=1 " - + "WHERE (\"U\".\"B\" = \"Z\".\"B\") AND (\"Z\".\"A\" = \"T\".\"A\")"); - checkPlan(stat, "SELECT 1 FROM \"PUBLIC\".\"U\" /* PUBLIC.U_IDX_A */ " - + "INNER JOIN ( SELECT \"A\", \"B\" FROM \"PUBLIC\".\"U\" ) \"Z\" " - + "/* batched:fake SELECT A, B FROM PUBLIC.U /++ PUBLIC.U_IDX_A: A IS ?1 ++/ " - + "WHERE A IS ?1: A = U.A */ ON 1=1 /* WHERE U.A = Z.A */ " - + "INNER JOIN \"PUBLIC\".\"T\" /* batched:test PUBLIC.T_IDX_B: B = Z.B */ " - + "ON 1=1 WHERE (\"U\".\"A\" = \"Z\".\"A\") AND (\"Z\".\"B\" = \"T\".\"B\")"); - - // t: a = [ 0..20), b = [10..30) - // u: a = [10..25), b = [-5..10) - checkBatchedQueryResult(stat, 10, - "select t.a from t, (select t.b from u, t where u.a = t.a) z " + - "where t.b = z.b"); - checkBatchedQueryResult(stat, 5, - "select t.a from (select t1.b from t t1, t t2 where t1.a = t2.b) z, t " + - "where t.b = z.b + 5"); - checkBatchedQueryResult(stat, 1, - "select t.a from (select u.b from u, t t2 where u.a = t2.b) z, t " + - "where t.b = z.b + 1"); - checkBatchedQueryResult(stat, 15, - "select t.a from (select u.b from u, t t2 where u.a = t2.b) z " + - "left join t on t.b = z.b"); - checkBatchedQueryResult(stat, 15, - "select t.a from (select t1.b from t t1 left join t t2 on t1.a = t2.b) z, t " - + "where t.b = z.b + 5"); - checkBatchedQueryResult(stat, 1, - "select t.a from t,(select 5 as b from t union select 10 from u) z " - + "where t.b = z.b"); - checkBatchedQueryResult(stat, 15, "select t.a from u,(select 5 as b, a from t " - + "union select 10, a from u) z, t where t.b = z.b and z.a = u.a"); - - stat.execute("DROP TABLE T"); - stat.execute("DROP TABLE U"); - } - - private void checkBatchedQueryResult(Statement stat, int size, String sql) - throws SQLException { - setBatchingEnabled(stat, false); - List> expected = query(stat, sql); - assertEquals(size, expected.size()); - setBatchingEnabled(stat, true); - List> actual = query(stat, sql); - if (!expected.equals(actual)) { - fail("\n" + "expected: " + expected + "\n" + "actual: " + actual); - } - } - - private void doTestBatchedJoin(Statement stat, int... batchSizes) throws SQLException { - ArrayList tables = new ArrayList<>(batchSizes.length); - - for (int i = 0; i < batchSizes.length; i++) { - stat.executeUpdate("DROP TABLE IF EXISTS T" + i); - stat.executeUpdate("CREATE TABLE T" + i + "(A INT, B INT) ENGINE \"" + - TreeSetIndexTableEngine.class.getName() + "\""); - tables.add(TreeSetIndexTableEngine.created); - - stat.executeUpdate("CREATE INDEX IDX_B ON T" + i + "(B)"); - stat.executeUpdate("CREATE INDEX IDX_A ON T" + i + "(A)"); - - PreparedStatement insert = stat.getConnection().prepareStatement( - "INSERT INTO T"+ i + " VALUES (?,?)"); - - for (int j = i, size = i + 10; j < size; j++) { - insert.setInt(1, j); - insert.setInt(2, j); - insert.executeUpdate(); - } - - for (TreeSetTable table : tables) { - assertEquals(10, table.getRowCount(null)); - } - } - // Prevent memory leak - TreeSetIndexTableEngine.created = null; - - int[] zeroBatchSizes = new int[batchSizes.length]; - int tests = 1 << (batchSizes.length * 4); - - for (int test = 0; test < tests; test++) { - String query = generateQuery(test, batchSizes.length); - - // System.out.println(Arrays.toString(batchSizes) + - // ": " + test + " -> " + query); - - setBatchSize(tables, batchSizes); - List> res1 = query(stat, query); - - setBatchSize(tables, zeroBatchSizes); - List> res2 = query(stat, query); - - // System.out.println(res1 + " " + res2); - - if (!res2.equals(res1)) { - System.err.println(Arrays.toString(batchSizes) + ": " + res1 + " " + res2); - System.err.println("Test " + test); - System.err.println(query); - for (TreeSetTable table : tables) { - System.err.println(table.getName() + " = " + - query(stat, "select * from " + table.getName())); - } - fail(); - } - } - for (int i = 0; i < batchSizes.length; i++) { - stat.executeUpdate("DROP TABLE IF EXISTS T" + i); - } - } - /** * A static assertion method. * @@ -766,68 +420,6 @@ static void assert0(boolean condition, String message) { } } - private static void setBatchSize(ArrayList tables, int... batchSizes) { - for (int i = 0; i < batchSizes.length; i++) { - int batchSize = batchSizes[i]; - setBatchSize(tables.get(i), batchSize); - } - } - - private static void setBatchSize(TreeSetTable t, int batchSize) { - if (t.getIndexes() == null) { - t.scan.preferredBatchSize = batchSize; - } else { - for (Index idx : t.getIndexes()) { - ((TreeSetIndex) idx).preferredBatchSize = batchSize; - } - } - } - - private static String generateQuery(int t, int tables) { - final int withLeft = 1; - final int withFalse = 2; - final int withWhere = 4; - final int withOnIsNull = 8; - - StringBuilder b = new StringBuilder(); - b.append("select count(*) from "); - - StringBuilder where = new StringBuilder(); - - for (int i = 0; i < tables; i++) { - if (i != 0) { - if ((t & withLeft) != 0) { - b.append(" left "); - } - b.append(" join "); - } - b.append("\nT").append(i).append(' '); - if (i != 0) { - boolean even = (i & 1) == 0; - if ((t & withOnIsNull) != 0) { - b.append(" on T").append(i - 1).append(even ? ".B" : ".A").append(" is null"); - } else if ((t & withFalse) != 0) { - b.append(" on false "); - } else { - b.append(" on T").append(i - 1).append(even ? ".B = " : ".A = "); - b.append("T").append(i).append(even ? ".B " : ".A "); - } - } - if ((t & withWhere) != 0) { - if (where.length() != 0) { - where.append(" and "); - } - where.append(" T").append(i).append(".A > 5"); - } - t >>>= 4; - } - if (where.length() != 0) { - b.append("\n" + "where ").append(where); - } - - return b.toString(); - } - private void checkResultsNoOrder(Statement stat, int size, String query1, String query2) throws SQLException { List> res1 = query(stat, query1); @@ -843,8 +435,8 @@ private void checkResultsNoOrder(Statement stat, int size, String query1, String cols[i] = i; } Comparator> comp = new RowComparator(cols); - Collections.sort(res1, comp); - Collections.sort(res2, comp); + res1.sort(comp); + res2.sort(comp); assertTrue("Wrong data: \n" + res1 + "\n" + res2, res1.equals(res2)); } @@ -882,7 +474,7 @@ private static List> query(List> dataSet, } } if (sort != null) { - Collections.sort(res, sort); + res.sort(sort); } return res; } @@ -922,16 +514,16 @@ private static class OneRowTable extends TableBase { /** * A scan index for one row. */ - public class Scan extends BaseIndex { + public class Scan extends Index { Scan(Table table) { super(table, table.getId(), table.getName() + "_SCAN", - IndexColumn.wrap(table.getColumns()), IndexType.createScan(false)); + IndexColumn.wrap(table.getColumns()), 0, IndexType.createScan(false)); } @Override - public long getRowCountApproximation() { - return table.getRowCountApproximation(); + public long getRowCountApproximation(SessionLocal session) { + return table.getRowCountApproximation(session); } @Override @@ -940,27 +532,22 @@ public long getDiskSpaceUsed() { } @Override - public long getRowCount(Session session) { + public long getRowCount(SessionLocal session) { return table.getRowCount(session); } @Override - public void checkRename() { - // do nothing - } - - @Override - public void truncate(Session session) { + public void truncate(SessionLocal session) { // do nothing } @Override - public void remove(Session session) { + public void remove(SessionLocal session) { // do nothing } @Override - public void remove(Session session, Row r) { + public void remove(SessionLocal session, Row r) { // do nothing } @@ -970,24 +557,24 @@ public boolean needRebuild() { } @Override - public double getCost(Session session, int[] masks, + public double getCost(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, AllColumnsForPlan allColumnsSet) { return 0; } @Override - public Cursor findFirstOrLast(Session session, boolean first) { + public Cursor findFirstOrLast(SessionLocal session, boolean first) { return new SingleRowCursor(row); } @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { return new SingleRowCursor(row); } @Override - public void close(Session session) { + public void close(SessionLocal session) { // do nothing } @@ -997,7 +584,7 @@ public boolean canGetFirstOrLast() { } @Override - public void add(Session session, Row r) { + public void add(SessionLocal session, Row r) { // do nothing } } @@ -1012,14 +599,13 @@ public void add(Session session, Row r) { } @Override - public Index addIndex(Session session, String indexName, - int indexId, IndexColumn[] cols, IndexType indexType, - boolean create, String indexComment) { + public Index addIndex(SessionLocal session, String indexName, int indexId, IndexColumn[] cols, + int uniqueColumnCount, IndexType indexType, boolean create, String indexComment) { return null; } @Override - public void addRow(Session session, Row r) { + public void addRow(SessionLocal session, Row r) { this.row = r; } @@ -1029,7 +615,7 @@ public boolean canDrop() { } @Override - public boolean canGetRowCount() { + public boolean canGetRowCount(SessionLocal session) { return true; } @@ -1039,7 +625,7 @@ public void checkSupportAlter() { } @Override - public void close(Session session) { + public void close(SessionLocal session) { // do nothing } @@ -1054,22 +640,17 @@ public long getMaxDataModificationId() { } @Override - public long getRowCount(Session session) { - return getRowCountApproximation(); + public long getRowCount(SessionLocal session) { + return getRowCountApproximation(session); } @Override - public long getRowCountApproximation() { + public long getRowCountApproximation(SessionLocal session) { return row == null ? 0 : 1; } @Override - public long getDiskSpaceUsed() { - return 0; - } - - @Override - public Index getScanIndex(Session session) { + public Index getScanIndex(SessionLocal session) { return scanIndex; } @@ -1078,45 +659,21 @@ public TableType getTableType() { return TableType.EXTERNAL_TABLE_ENGINE; } - @Override - public Index getUniqueIndex() { - return null; - } - @Override public boolean isDeterministic() { return false; } @Override - public boolean isLockedExclusively() { - return false; - } - - @Override - public boolean lock(Session session, boolean exclusive, boolean force) { - // do nothing - return false; - } - - @Override - public void removeRow(Session session, Row r) { + public void removeRow(SessionLocal session, Row r) { this.row = null; } @Override - public void truncate(Session session) { + public long truncate(SessionLocal session) { + long result = row != null ? 1L : 0L; row = null; - } - - @Override - public void unlock(Session s) { - // do nothing - } - - @Override - public void checkRename() { - // do nothing + return result; } } @@ -1134,149 +691,6 @@ public OneRowTable createTable(CreateTableData data) { } - /** - * A test table factory producing affinity aware tables. - */ - public static class AffinityTableEngine implements TableEngine { - public static Table createdTbl; - - /** - * A table able to handle affinity indexes. - */ - private static class AffinityTable extends PageStoreTable { - - /** - * A (no-op) affinity index. - */ - public class AffinityIndex extends BaseIndex { - AffinityIndex(Table table, int id, String name, IndexColumn[] newIndexColumns) { - super(table, id, name, newIndexColumns, IndexType.createAffinity()); - } - - @Override - public long getRowCountApproximation() { - return table.getRowCountApproximation(); - } - - @Override - public long getDiskSpaceUsed() { - return table.getDiskSpaceUsed(); - } - - @Override - public long getRowCount(Session session) { - return table.getRowCount(session); - } - - @Override - public void checkRename() { - // do nothing - } - - @Override - public void truncate(Session session) { - // do nothing - } - - @Override - public void remove(Session session) { - // do nothing - } - - @Override - public void remove(Session session, Row r) { - // do nothing - } - - @Override - public boolean needRebuild() { - return false; - } - - @Override - public double getCost(Session session, int[] masks, - TableFilter[] filters, int filter, SortOrder sortOrder, - AllColumnsForPlan allColumnsSet) { - return 0; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - throw DbException.getUnsupportedException("TEST"); - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - throw DbException.getUnsupportedException("TEST"); - } - - @Override - public void close(Session session) { - // do nothing - } - - @Override - public boolean canGetFirstOrLast() { - return false; - } - - @Override - public boolean canScan() { - return false; - } - - @Override - public void add(Session session, Row r) { - // do nothing - } - } - - AffinityTable(CreateTableData data) { - super(data); - } - - @Override - public Index addIndex(Session session, String indexName, - int indexId, IndexColumn[] cols, IndexType indexType, - boolean create, String indexComment) { - if (!indexType.isAffinity()) { - return super.addIndex(session, indexName, indexId, cols, indexType, create, indexComment); - } - - boolean isSessionTemporary = isTemporary() && !isGlobalTemporary(); - if (!isSessionTemporary) { - database.lockMeta(session); - } - AffinityIndex index = new AffinityIndex(this, indexId, getName() + "_AFF", cols); - index.setTemporary(isTemporary()); - if (index.getCreateSQL() != null) { - index.setComment(indexComment); - if (isSessionTemporary) { - session.addLocalTempTableIndex(index); - } else { - database.addSchemaObject(session, index); - } - } - getIndexes().add(index); - setModified(); - return index; - } - - } - - /** - * Create a new OneRowTable. - * - * @param data the meta data of the table to create - * @return the new table - */ - @Override - public Table createTable(CreateTableData data) { - return (createdTbl = new AffinityTable(data)); - } - - } - /** * A test table factory. */ @@ -1291,8 +705,7 @@ private static class EndlessTable extends OneRowTableEngine.OneRowTable { EndlessTable(CreateTableData data) { super(data); - row = data.schema.getDatabase().createRow( - new Value[] { ValueInt.get(1), ValueNull.INSTANCE }, 0); + row = Row.get(new Value[] { ValueInteger.get(1), ValueNull.INSTANCE }, 0); scanIndex = new Auto(this); } @@ -1306,25 +719,7 @@ public class Auto extends OneRowTableEngine.OneRowTable.Scan { } @Override - public Cursor find(TableFilter filter, SearchRow first, SearchRow last) { - return find(filter.getFilterCondition()); - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - return find(null); - } - - /** - * Search within the table. - * - * @param filter the table filter (optional) - * @return the cursor - */ - private Cursor find(Expression filter) { - if (filter != null) { - row.setValue(1, ValueString.get(filter.getSQL(false))); - } + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { return new SingleRowCursor(row); } @@ -1370,10 +765,9 @@ private static class TreeSetTable extends TableBase { TreeSetIndex scan = new TreeSetIndex(this, "scan", IndexColumn.wrap(getColumns()), IndexType.createScan(false)) { @Override - public double getCost(Session session, int[] masks, + public double getCost(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, AllColumnsForPlan allColumnsSet) { - doTests(session); return getCostRangeIndex(masks, getRowCount(session), filters, filter, sortOrder, true, allColumnsSet); } @@ -1384,17 +778,8 @@ public double getCost(Session session, int[] masks, } @Override - public void checkRename() { - // No-op. - } - - @Override - public void unlock(Session s) { - // No-op. - } - - @Override - public void truncate(Session session) { + public long truncate(SessionLocal session) { + long result = getRowCountApproximation(session); if (indexes != null) { for (Index index : indexes) { index.truncate(session); @@ -1403,10 +788,11 @@ public void truncate(Session session) { scan.truncate(session); } dataModificationId++; + return result; } @Override - public void removeRow(Session session, Row row) { + public void removeRow(SessionLocal session, Row row) { if (indexes != null) { for (Index index : indexes) { index.remove(session, row); @@ -1418,7 +804,7 @@ public void removeRow(Session session, Row row) { } @Override - public void addRow(Session session, Row row) { + public void addRow(SessionLocal session, Row row) { if (indexes != null) { for (Index index : indexes) { index.add(session, row); @@ -1430,8 +816,8 @@ public void addRow(Session session, Row row) { } @Override - public Index addIndex(Session session, String indexName, int indexId, IndexColumn[] cols, - IndexType indexType, boolean create, String indexComment) { + public Index addIndex(SessionLocal session, String indexName, int indexId, IndexColumn[] cols, + int uniqueColumnCount, IndexType indexType, boolean create, String indexComment) { if (indexes == null) { indexes = new ArrayList<>(2); // Scan must be always at 0. @@ -1447,43 +833,28 @@ public Index addIndex(Session session, String indexName, int indexId, IndexColum return index; } - @Override - public boolean lock(Session session, boolean exclusive, boolean forceLockEvenInMvcc) { - return true; - } - - @Override - public boolean isLockedExclusively() { - return false; - } - @Override public boolean isDeterministic() { return false; } - @Override - public Index getUniqueIndex() { - return null; - } - @Override public TableType getTableType() { return TableType.EXTERNAL_TABLE_ENGINE; } @Override - public Index getScanIndex(Session session) { + public Index getScanIndex(SessionLocal session) { return scan; } @Override - public long getRowCountApproximation() { - return getScanIndex(null).getRowCountApproximation(); + public long getRowCountApproximation(SessionLocal session) { + return getScanIndex(null).getRowCountApproximation(session); } @Override - public long getRowCount(Session session) { + public long getRowCount(SessionLocal session) { return scan.getRowCount(session); } @@ -1498,12 +869,7 @@ public ArrayList getIndexes() { } @Override - public long getDiskSpaceUsed() { - return 0; - } - - @Override - public void close(Session session) { + public void close(SessionLocal session) { // No-op. } @@ -1513,7 +879,7 @@ public void checkSupportAlter() { } @Override - public boolean canGetRowCount() { + public boolean canGetRowCount(SessionLocal session) { return true; } @@ -1526,20 +892,12 @@ public boolean canDrop() { /** * An index that internally uses a tree set. */ - private static class TreeSetIndex extends BaseIndex implements Comparator { - /** - * Executor service to test batched joins. - */ - static ExecutorService exec; - - static AtomicInteger lookupBatches = new AtomicInteger(); - - int preferredBatchSize; + private static class TreeSetIndex extends Index implements Comparator { final TreeSet set = new TreeSet<>(this); TreeSetIndex(Table t, String name, IndexColumn[] cols, IndexType type) { - super(t, 0, name, cols, type); + super(t, 0, name, cols, 0, type); } @Override @@ -1556,92 +914,17 @@ public int compare(SearchRow o1, SearchRow o2) { } @Override - public IndexLookupBatch createLookupBatch(TableFilter[] filters, int f) { - final TableFilter filter = filters[f]; - assert0(filter.getMasks() != null || "scan".equals(getName()), "masks"); - final int preferredSize = preferredBatchSize; - if (preferredSize == 0) { - return null; - } - lookupBatches.incrementAndGet(); - return new IndexLookupBatch() { - List searchRows = new ArrayList<>(); - - @Override - public String getPlanSQL() { - return "test"; - } - - @Override public boolean isBatchFull() { - return searchRows.size() >= preferredSize * 2; - } - - @Override - public List> find() { - List> res = findBatched(filter, searchRows); - searchRows.clear(); - return res; - } - - @Override - public boolean addSearchRows(SearchRow first, SearchRow last) { - assert !isBatchFull(); - searchRows.add(first); - searchRows.add(last); - return true; - } - - @Override - public void reset(boolean beforeQuery) { - searchRows.clear(); - } - }; - } - - public List> findBatched(final TableFilter filter, - List firstLastPairs) { - ArrayList> result = new ArrayList<>(firstLastPairs.size()); - final Random rnd = new Random(); - for (int i = 0; i < firstLastPairs.size(); i += 2) { - final SearchRow first = firstLastPairs.get(i); - final SearchRow last = firstLastPairs.get(i + 1); - Future future; - if (rnd.nextBoolean()) { - IteratorCursor c = (IteratorCursor) find(filter, first, last); - if (c.it.hasNext()) { - future = new DoneFuture(c); - } else { - // we can return null instead of future of empty cursor - future = null; - } - } else { - future = exec.submit(new Callable() { - @Override - public Cursor call() throws Exception { - if (rnd.nextInt(50) == 0) { - Thread.sleep(0, 500); - } - return find(filter, first, last); - } - }); - } - result.add(future); - } - return result; - } - - @Override - public void close(Session session) { + public void close(SessionLocal session) { // No-op. } @Override - public void add(Session session, Row row) { + public void add(SessionLocal session, Row row) { set.add(row); } @Override - public void remove(Session session, Row row) { + public void remove(SessionLocal session, Row row) { set.remove(row); } @@ -1654,7 +937,7 @@ private static SearchRow mark(SearchRow row, boolean first) { } @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { Set subSet; if (first != null && last != null && compareRows(last, first) < 0) { subSet = Collections.emptySet(); @@ -1682,59 +965,21 @@ public Cursor find(Session session, SearchRow first, SearchRow last) { return new IteratorCursor(subSet.iterator()); } - private static String alias(SubQueryInfo info) { - return info.getFilters()[info.getFilter()].getTableAlias(); - } - - private void checkInfo(SubQueryInfo info) { - if (info.getUpper() == null) { - // check 1st level info - assert0(info.getFilters().length == 1, "getFilters().length " + - info.getFilters().length); - String alias = alias(info); - assert0("T5".equals(alias), "alias: " + alias); - } else { - // check 2nd level info - assert0(info.getFilters().length == 2, "getFilters().length " + - info.getFilters().length); - String alias = alias(info); - assert0("T4".equals(alias), "alias: " + alias); - checkInfo(info.getUpper()); - } - } - - protected void doTests(Session session) { - if (getTable().getName().equals("SUB_QUERY_TEST")) { - checkInfo(session.getSubQueryInfo()); - } else if (getTable().getName().equals("EXPR_TEST")) { - assert0(session.getSubQueryInfo() == null, "select expression"); - } else if (getTable().getName().equals("EXPR_TEST2")) { - String alias = alias(session.getSubQueryInfo()); - assert0(alias.equals("ZZ"), "select expression sub-query: " + alias); - assert0(session.getSubQueryInfo().getUpper() == null, "upper"); - } else if (getTable().getName().equals("QUERY_EXPR_TEST")) { - assert0(session.isPreparingQueryExpression(), "preparing query expression"); - } else if (getTable().getName().equals("QUERY_EXPR_TEST_NO")) { - assert0(!session.isPreparingQueryExpression(), "not preparing query expression"); - } - } - @Override - public double getCost(Session session, int[] masks, + public double getCost(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, AllColumnsForPlan allColumnsSet) { - doTests(session); return getCostRangeIndex(masks, set.size(), filters, filter, sortOrder, false, allColumnsSet); } @Override - public void remove(Session session) { + public void remove(SessionLocal session) { // No-op. } @Override - public void truncate(Session session) { + public void truncate(SessionLocal session) { set.clear(); } @@ -1744,7 +989,7 @@ public boolean canGetFirstOrLast() { } @Override - public Cursor findFirstOrLast(Session session, boolean first) { + public Cursor findFirstOrLast(SessionLocal session, boolean first) { return new SingleRowCursor((Row) (set.isEmpty() ? null : first ? set.first() : set.last())); } @@ -1755,24 +1000,15 @@ public boolean needRebuild() { } @Override - public long getRowCount(Session session) { + public long getRowCount(SessionLocal session) { return set.size(); } @Override - public long getRowCountApproximation() { + public long getRowCountApproximation(SessionLocal session) { return getRowCount(null); } - @Override - public long getDiskSpaceUsed() { - return 0; - } - - @Override - public void checkRename() { - // No-op. - } } /** diff --git a/h2/src/test/org/h2/test/db/TestTempTables.java b/h2/src/test/org/h2/test/db/TestTempTables.java index d92e36968f..416c7ae4ed 100644 --- a/h2/src/test/org/h2/test/db/TestTempTables.java +++ b/h2/src/test/org/h2/test/db/TestTempTables.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -11,8 +11,9 @@ import java.sql.SQLException; import java.sql.Statement; import org.h2.api.ErrorCode; -import org.h2.engine.Constants; -import org.h2.store.fs.FileUtils; +import org.h2.engine.Session; +import org.h2.engine.SessionLocal; +import org.h2.jdbc.JdbcConnection; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -27,7 +28,7 @@ public class TestTempTables extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -38,7 +39,6 @@ public void test() throws SQLException { testTempFileResultSet(); testTempTableResultSet(); testTransactionalTemp(); - testDeleteGlobalTempTableWhenClosing(); Connection c1 = getConnection("tempTables"); testAlter(c1); Connection c2 = getConnection("tempTables"); @@ -58,7 +58,7 @@ private void testAnalyzeReuseObjectId() throws SQLException { Statement stat = conn.createStatement(); stat.execute("create local temporary table test(id identity)"); PreparedStatement prep = conn - .prepareStatement("insert into test values(null)"); + .prepareStatement("insert into test default values"); for (int i = 0; i < 10000; i++) { prep.execute(); } @@ -72,21 +72,18 @@ private void testTempSequence() throws SQLException { Connection conn = getConnection("tempTables"); Statement stat = conn.createStatement(); stat.execute("create local temporary table test(id identity)"); - ResultSet rs = stat.executeQuery("script"); - boolean foundSequence = false; - while (rs.next()) { - if (rs.getString(1).startsWith("CREATE SEQUENCE")) { - foundSequence = true; - } + Session iface = ((JdbcConnection) conn).getSession(); + if ((iface instanceof SessionLocal)) { + assertEquals(1, ((SessionLocal) iface).getDatabase().getMainSchema().getAllSequences().size()); } - assertTrue(foundSequence); - stat.execute("insert into test values(null)"); + stat.execute("insert into test default values"); stat.execute("shutdown"); conn.close(); conn = getConnection("tempTables"); - rs = conn.createStatement().executeQuery( - "select * from information_schema.sequences"); - assertFalse(rs.next()); + iface = ((JdbcConnection) conn).getSession(); + if ((iface instanceof SessionLocal)) { + assertEquals(0, ((SessionLocal) iface).getDatabase().getMainSchema().getAllSequences().size()); + } conn.close(); } @@ -198,7 +195,7 @@ private void testTransactionalTemp() throws SQLException { stat.execute("commit"); stat.execute("insert into test values(2)"); stat.execute("create local temporary table temp(" + - "id int primary key, name varchar, constraint x index(name)) transactional"); + "id int primary key, name varchar, constraint x unique(name)) transactional"); stat.execute("insert into temp values(3, 'test')"); stat.execute("rollback"); rs = stat.executeQuery("select * from test"); @@ -209,34 +206,6 @@ private void testTransactionalTemp() throws SQLException { conn.close(); } - private void testDeleteGlobalTempTableWhenClosing() throws SQLException { - if (config.memory) { - return; - } - if (config.mvStore) { - return; - } - deleteDb("tempTables"); - Connection conn = getConnection("tempTables"); - Statement stat = conn.createStatement(); - stat.execute("create global temporary table test(id int, data varchar)"); - stat.execute("insert into test " + - "select x, space(1000) from system_range(1, 1000)"); - stat.execute("shutdown compact"); - try { - conn.close(); - } catch (SQLException e) { - // expected - } - String dbName = getBaseDir() + "/tempTables" + Constants.SUFFIX_PAGE_FILE; - long before = FileUtils.size(dbName); - assertTrue(before > 0); - conn = getConnection("tempTables"); - conn.close(); - long after = FileUtils.size(dbName); - assertEquals(after, before); - } - private void testAlter(Connection conn) throws SQLException { Statement stat; stat = conn.createStatement(); @@ -319,7 +288,7 @@ private void testTables(Connection c1, Connection c2) throws SQLException { assertResultRowCount(1, rs); c1.commit(); // test_temp should have been dropped automatically - assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, s1). + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, s1). executeQuery("select * from test_temp"); } diff --git a/h2/src/test/org/h2/test/db/TestTransaction.java b/h2/src/test/org/h2/test/db/TestTransaction.java index 8ebf0bac66..22b8b9c014 100644 --- a/h2/src/test/org/h2/test/db/TestTransaction.java +++ b/h2/src/test/org/h2/test/db/TestTransaction.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -14,8 +14,8 @@ import java.sql.Statement; import java.util.ArrayList; import java.util.Random; - import org.h2.api.ErrorCode; +import org.h2.engine.Constants; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -32,8 +32,7 @@ public class TestTransaction extends TestDb { */ public static void main(String... a) throws Exception { TestBase init = TestBase.createCaller().init(); - init.config.multiThreaded = true; - init.test(); + init.testFromMain(); } @Override @@ -43,7 +42,6 @@ public void test() throws Exception { testConstraintCreationRollback(); testCommitOnAutoCommitChange(); testConcurrentSelectForUpdate(); - testLogMode(); testRollback(); testRollback2(); testForUpdate(); @@ -56,6 +54,12 @@ public void test() throws Exception { testReferential(); testSavepoint(); testIsolation(); + testIsolationLevels(); + testIsolationLevels2(); + testIsolationLevels3(); + testIsolationLevels4(); + testIsolationLevelsCountAggregate(); + testIsolationLevelsCountAggregate2(); deleteDb("transaction"); } @@ -63,16 +67,11 @@ private void testConstraintCreationRollback() throws SQLException { deleteDb("transaction"); Connection conn = getConnection("transaction"); Statement stat = conn.createStatement(); - stat.execute("create table test(id int, p int)"); - stat.execute("insert into test values(1, 2)"); - try { - stat.execute("alter table test add constraint fail " + - "foreign key(p) references test(id)"); - fail(); - } catch (SQLException e) { - // expected - } + stat.execute("create table test(id int unique, p int)"); stat.execute("insert into test values(1, 2)"); + assertThrows(ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, stat).execute( + "alter table test add constraint fail foreign key(p) references test(id)"); + stat.execute("insert into test values(2, 3)"); stat.execute("drop table test"); conn.close(); } @@ -92,15 +91,9 @@ private void testCommitOnAutoCommitChange() throws SQLException { // should have no effect conn.setAutoCommit(false); - ResultSet rs; - if (config.mvStore) { - rs = stat2.executeQuery("select count(*) from test"); - rs.next(); - assertEquals(0, rs.getInt(1)); - } else { - assertThrows(ErrorCode.LOCK_TIMEOUT_1, stat2). - executeQuery("select count(*) from test"); - } + ResultSet rs = stat2.executeQuery("select count(*) from test"); + rs.next(); + assertEquals(0, rs.getInt(1)); // should commit conn.setAutoCommit(true); @@ -114,51 +107,6 @@ private void testCommitOnAutoCommitChange() throws SQLException { conn.close(); } - private void testLogMode() throws SQLException { - if (config.memory) { - return; - } - if (config.mvStore) { - return; - } - deleteDb("transaction"); - testLogMode(0); - testLogMode(1); - testLogMode(2); - } - - private void testLogMode(int logMode) throws SQLException { - Connection conn; - Statement stat; - ResultSet rs; - conn = getConnection("transaction"); - stat = conn.createStatement(); - stat.execute("create table test(id int primary key) as select 1"); - stat.execute("set write_delay 0"); - stat.execute("set log " + logMode); - rs = stat.executeQuery( - "select value from information_schema.settings where name = 'LOG'"); - rs.next(); - assertEquals(logMode, rs.getInt(1)); - stat.execute("insert into test values(2)"); - stat.execute("shutdown immediately"); - try { - conn.close(); - } catch (SQLException e) { - // expected - } - conn = getConnection("transaction"); - stat = conn.createStatement(); - rs = stat.executeQuery("select * from test order by id"); - assertTrue(rs.next()); - if (logMode != 0) { - assertTrue(rs.next()); - } - assertFalse(rs.next()); - stat.execute("drop table test"); - conn.close(); - } - private void testConcurrentSelectForUpdate() throws SQLException { deleteDb("transaction"); Connection conn = getConnection("transaction"); @@ -216,9 +164,7 @@ private void testForUpdate() throws SQLException { Connection conn2 = getConnection("transaction"); conn2.setAutoCommit(false); Statement stat2 = conn2.createStatement(); - if (config.mvStore) { - stat2.execute("update test set name = 'Welt' where id = 2"); - } + stat2.execute("update test set name = 'Welt' where id = 2"); assertThrows(ErrorCode.LOCK_TIMEOUT_1, stat2). execute("update test set name = 'Hallo' where id = 1"); conn2.close(); @@ -227,7 +173,7 @@ private void testForUpdate() throws SQLException { private void testForUpdate2() throws Exception { // Exclude some configurations to avoid spending too much time in sleep() - if (config.mvStore && !config.multiThreaded || config.networked || config.cipher != null) { + if (config.networked || config.cipher != null) { return; } deleteDb("transaction"); @@ -237,9 +183,7 @@ private void testForUpdate2() throws Exception { stat1.execute("CREATE TABLE TEST (ID INT PRIMARY KEY, V INT)"); conn1.setAutoCommit(false); conn2.createStatement().execute("SET LOCK_TIMEOUT 2000"); - if (config.mvStore) { - testForUpdate2(conn1, stat1, conn2, false); - } + testForUpdate2(conn1, stat1, conn2, false); testForUpdate2(conn1, stat1, conn2, true); conn1.close(); conn2.close(); @@ -302,7 +246,7 @@ public void run() { private void testForUpdate3() throws Exception { // Exclude some configurations to avoid spending too much time in sleep() - if (config.mvStore && !config.multiThreaded || config.networked || config.cipher != null) { + if (config.networked || config.cipher != null) { return; } deleteDb("transaction"); @@ -356,7 +300,7 @@ private void testUpdate() throws Exception { conn2.setAutoCommit(false); Statement stat1 = conn1.createStatement(); Statement stat2 = conn2.createStatement(); - stat1.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, VALUE BOOLEAN) AS " + stat1.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, \"VALUE\" BOOLEAN) AS " + "SELECT X, FALSE FROM GENERATE_SERIES(1, " + count + ')'); conn1.commit(); stat1.executeQuery("SELECT * FROM TEST").close(); @@ -368,7 +312,7 @@ public void run() { int sum = 0; try { PreparedStatement prep = conn1.prepareStatement( - "UPDATE TEST SET VALUE = TRUE WHERE ID = ? AND NOT VALUE"); + "UPDATE TEST SET \"VALUE\" = TRUE WHERE ID = ? AND NOT \"VALUE\""); for (int i = 1; i <= count; i++) { prep.setInt(1, i); prep.addBatch(); @@ -387,7 +331,7 @@ public void run() { t.start(); int sum = 0; PreparedStatement prep = conn2.prepareStatement( - "UPDATE TEST SET VALUE = TRUE WHERE ID = ? AND NOT VALUE"); + "UPDATE TEST SET \"VALUE\" = TRUE WHERE ID = ? AND NOT \"VALUE\""); for (int i = 1; i <= count; i++) { prep.setInt(1, i); prep.addBatch(); @@ -412,7 +356,7 @@ private void testMergeUsing() throws Exception { conn2.setAutoCommit(false); Statement stat1 = conn1.createStatement(); Statement stat2 = conn2.createStatement(); - stat1.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, VALUE BOOLEAN) AS " + stat1.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, \"VALUE\" BOOLEAN) AS " + "SELECT X, FALSE FROM GENERATE_SERIES(1, " + count + ')'); conn1.commit(); stat1.executeQuery("SELECT * FROM TEST").close(); @@ -424,8 +368,8 @@ public void run() { int sum = 0; try { PreparedStatement prep = conn1.prepareStatement( - "MERGE INTO TEST T USING (SELECT ?1::INT X) S ON T.ID = S.X AND NOT T.VALUE" - + " WHEN MATCHED THEN UPDATE SET T.VALUE = TRUE" + "MERGE INTO TEST T USING (SELECT ?1::INT X) S ON T.ID = S.X AND NOT T.\"VALUE\"" + + " WHEN MATCHED THEN UPDATE SET T.\"VALUE\" = TRUE" + " WHEN NOT MATCHED THEN INSERT VALUES (10000 + ?1, FALSE)"); for (int i = 1; i <= count; i++) { prep.setInt(1, i); @@ -445,8 +389,8 @@ public void run() { t.start(); int sum = 0; PreparedStatement prep = conn2.prepareStatement( - "MERGE INTO TEST T USING (SELECT ?1::INT X) S ON T.ID = S.X AND NOT T.VALUE" - + " WHEN MATCHED THEN UPDATE SET T.VALUE = TRUE" + "MERGE INTO TEST T USING (SELECT ?1::INT X) S ON T.ID = S.X AND NOT T.\"VALUE\"" + + " WHEN MATCHED THEN UPDATE SET T.\"VALUE\" = TRUE" + " WHEN NOT MATCHED THEN INSERT VALUES (10000 + ?1, FALSE)"); for (int i = 1; i <= count; i++) { prep.setInt(1, i); @@ -464,8 +408,8 @@ public void run() { } private void testDelete() throws Exception { - String sql1 = "DELETE FROM TEST WHERE ID = ? AND NOT VALUE"; - String sql2 = "UPDATE TEST SET VALUE = TRUE WHERE ID = ? AND NOT VALUE"; + String sql1 = "DELETE FROM TEST WHERE ID = ? AND NOT \"VALUE\""; + String sql2 = "UPDATE TEST SET \"VALUE\" = TRUE WHERE ID = ? AND NOT \"VALUE\""; testDeleteImpl(sql1, sql2); testDeleteImpl(sql2, sql1); } @@ -479,7 +423,7 @@ private void testDeleteImpl(final String sql1, String sql2) throws Exception { conn2.setAutoCommit(false); Statement stat1 = conn1.createStatement(); Statement stat2 = conn2.createStatement(); - stat1.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, VALUE BOOLEAN) AS " + stat1.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, \"VALUE\" BOOLEAN) AS " + "SELECT X, FALSE FROM GENERATE_SERIES(1, " + count + ')'); conn1.commit(); stat1.executeQuery("SELECT * FROM TEST").close(); @@ -549,7 +493,7 @@ private void testRollback() throws SQLException { conn = getConnection("transaction"); stat = conn.createStatement(); - stat.execute("create table master(id int) as select 1"); + stat.execute("create table master(id int primary key) as select 1"); stat.execute("create table child1(id int references master(id) " + "on delete cascade)"); stat.execute("insert into child1 values(1), (1), (1)"); @@ -594,7 +538,7 @@ private void testRollback2() throws SQLException { conn = getConnection("transaction"); stat = conn.createStatement(); - stat.execute("create table master(id int) as select 1"); + stat.execute("create table master(id int primary key) as select 1"); stat.execute("create table child1(id int references master(id) " + "on delete cascade)"); stat.execute("insert into child1 values(1), (1)"); @@ -642,7 +586,7 @@ private void testReferential() throws SQLException { Statement s1 = c1.createStatement(); s1.execute("drop table if exists a"); s1.execute("drop table if exists b"); - s1.execute("create table a (id integer identity not null, " + + s1.execute("create table a (id integer generated by default as identity, " + "code varchar(10) not null, primary key(id))"); s1.execute("create table b (name varchar(100) not null, a integer, " + "primary key(name), foreign key(a) references a(id))"); @@ -650,14 +594,9 @@ private void testReferential() throws SQLException { c2.setAutoCommit(false); s1.executeUpdate("insert into A(code) values('one')"); Statement s2 = c2.createStatement(); - if (config.mvStore) { - assertThrows( - ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, s2). - executeUpdate("insert into B values('two', 1)"); - } else { - assertThrows(ErrorCode.LOCK_TIMEOUT_1, s2). - executeUpdate("insert into B values('two', 1)"); - } + assertThrows( + ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, s2). + executeUpdate("insert into B values('two', 1)"); c2.commit(); c1.rollback(); c1.close(); @@ -672,7 +611,7 @@ private void testClosingConnectionWithLockedTable() throws SQLException { c2.setAutoCommit(false); Statement s1 = c1.createStatement(); - s1.execute("create table a (id integer identity not null, " + + s1.execute("create table a (id integer generated by default as identity, " + "code varchar(10) not null, primary key(id))"); s1.executeUpdate("insert into a(code) values('one')"); c1.commit(); @@ -777,11 +716,9 @@ private void testIsolation() throws SQLException { Connection conn = getConnection("transaction"); trace("default TransactionIsolation=" + conn.getTransactionIsolation()); conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); - assertTrue(conn.getTransactionIsolation() == - Connection.TRANSACTION_READ_COMMITTED); + assertEquals(Connection.TRANSACTION_READ_COMMITTED, conn.getTransactionIsolation()); conn.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); - assertTrue(conn.getTransactionIsolation() == - Connection.TRANSACTION_SERIALIZABLE); + assertEquals(Connection.TRANSACTION_SERIALIZABLE, conn.getTransactionIsolation()); Statement stat = conn.createStatement(); assertTrue(conn.getAutoCommit()); conn.setAutoCommit(false); @@ -806,10 +743,144 @@ private void testIsolation() throws SQLException { conn.close(); } + private void testIsolationLevels() throws SQLException { + for (int isolationLevel : new int[] { Connection.TRANSACTION_REPEATABLE_READ, Constants.TRANSACTION_SNAPSHOT, + Connection.TRANSACTION_SERIALIZABLE }) { + deleteDb("transaction"); + try (Connection conn1 = getConnection("transaction"); Connection conn2 = getConnection("transaction"); + Connection conn3 = getConnection("transaction")) { + conn3.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED); + Statement stat1 = conn1.createStatement(); + Statement stat2 = conn2.createStatement(); + Statement stat3 = conn3.createStatement(); + stat1.execute("CREATE TABLE TEST1(ID INT PRIMARY KEY) AS VALUES 1, 2"); + stat1.execute("CREATE TABLE TEST2(ID INT PRIMARY KEY, V INT) AS VALUES (1, 10), (2, 20)"); + conn2.setAutoCommit(false); + // Read committed + testIsolationLevelsCheckRowsAndCount(stat2, 1, 2); + stat1.execute("INSERT INTO TEST1 VALUES 3"); + testIsolationLevelsCheckRowsAndCount(stat2, 1, 3); + testIsolationLevelsCheckRowsAndCount(stat2, 2, 2); + stat1.execute("INSERT INTO TEST2 VALUES (3, 30)"); + testIsolationLevelsCheckRowsAndCount(stat2, 2, 3); + // Repeatable read or serializable + conn2.setTransactionIsolation(isolationLevel); + testIsolationLevelsCheckRowsAndCount(stat2, 1, 3); + + stat1.execute("INSERT INTO TEST1 VALUES 4"); + testIsolationLevelsCheckRowsAndCount(stat2, 1, 3); + testIsolationLevelsCheckRowsAndCount(stat2, 2, 3); + stat1.execute("INSERT INTO TEST2 VALUES (4, 40)"); + testIsolationLevelsCheckRowsAndCount(stat2, 2, 3); + conn2.commit(); + testIsolationLevelsCheckRowsAndCount(stat2, 1, 4); + testIsolationLevelsCheckRowsAndCount(stat2, 2, 4); + stat1.execute("ALTER TABLE TEST2 ADD CONSTRAINT FK FOREIGN KEY(ID) REFERENCES TEST1(ID)"); + conn2.commit(); + testIsolationLevelsCheckRowsAndCount(stat2, 1, 4); + stat1.execute("INSERT INTO TEST1 VALUES 5"); + stat1.execute("INSERT INTO TEST2 VALUES (5, 50)"); + testIsolationLevelsCheckRowsAndCount(stat2, 1, 4); + testIsolationLevelsCheckRowsAndCount(stat2, 2, 4); + conn2.commit(); + testIsolationLevelsCheckRowsAndCount(stat2, 1, 5); + testIsolationLevelsCheckRowsAndCount(stat2, 2, 5); + stat2.execute("INSERT INTO TEST1 VALUES 6"); + stat2.execute("INSERT INTO TEST2 VALUES (6, 60)"); + stat2.execute("DELETE FROM TEST2 WHERE ID IN (1, 3)"); + stat2.execute("UPDATE TEST2 SET V = 45 WHERE ID = 4"); + stat1.execute("INSERT INTO TEST1 VALUES 7"); + stat1.execute("INSERT INTO TEST2 VALUES (7, 70)"); + stat2.execute("INSERT INTO TEST1 VALUES 8"); + stat2.execute("INSERT INTO TEST2 VALUES (8, 80)"); + stat2.execute("INSERT INTO TEST1 VALUES 9"); + stat2.execute("INSERT INTO TEST2 VALUES (9, 90)"); + stat2.execute("DELETE FROM TEST2 WHERE ID = 9"); + testIsolationLevelsCheckRowsAndCount2(stat2, 1, 1, 2, 3, 4, 5, 6, 8, 9); + // Read uncommitted + testIsolationLevelsCheckRowsAndCount2(stat3, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9); + // Repeatable read or serializable + try (ResultSet rs = stat2.executeQuery("SELECT COUNT(*) FROM TEST2")) { + rs.next(); + assertEquals(5, rs.getLong(1)); + } + try (ResultSet rs = stat2.executeQuery("SELECT ID, V FROM TEST2 ORDER BY ID")) { + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertEquals(20, rs.getInt(2)); + assertTrue(rs.next()); + assertEquals(4, rs.getInt(1)); + assertEquals(45, rs.getInt(2)); + assertTrue(rs.next()); + assertEquals(5, rs.getInt(1)); + assertEquals(50, rs.getInt(2)); + assertTrue(rs.next()); + assertEquals(6, rs.getInt(1)); + assertEquals(60, rs.getInt(2)); + assertTrue(rs.next()); + assertEquals(8, rs.getInt(1)); + assertEquals(80, rs.getInt(2)); + assertFalse(rs.next()); + } + stat1.execute("INSERT INTO TEST1 VALUES 11"); + stat1.execute("INSERT INTO TEST2 VALUES (11, 110)"); + conn2.commit(); + testIsolationLevelsCheckRowsAndCount2(stat1, 2, 2, 4, 5, 6, 7, 8, 11); + testIsolationLevelsCheckRowsAndCount2(stat2, 2, 2, 4, 5, 6, 7, 8, 11); + stat2.execute("INSERT INTO TEST1 VALUES 10"); + stat2.execute("INSERT INTO TEST2 VALUES (9, 90), (10, 100)"); + stat2.execute("DELETE FROM TEST2 WHERE ID = 9"); + testIsolationLevelsCheckRowsAndCount2(stat2, 2, 2, 4, 5, 6, 7, 8, 10, 11); + stat1.execute("ALTER TABLE TEST2 DROP CONSTRAINT FK"); + conn2.commit(); + try (ResultSet rs = stat2.executeQuery("SELECT COUNT(*) FROM TEST1")) { + rs.next(); + assertEquals(11, rs.getLong(1)); + } + stat1.execute("INSERT INTO TEST2 VALUES (20, 200)"); + try (ResultSet rs = stat2.executeQuery("SELECT COUNT(*) FROM TEST2")) { + rs.next(); + assertEquals(isolationLevel != Connection.TRANSACTION_REPEATABLE_READ ? 8 : 9, rs.getLong(1)); + } + } + } + deleteDb("transaction"); + } + + private void testIsolationLevelsCheckRowsAndCount(Statement stat, int table, int expected) + throws SQLException { + try (ResultSet rs = stat.executeQuery("SELECT COUNT(*) FROM TEST" + table)) { + rs.next(); + assertEquals(expected, rs.getLong(1)); + } + try (ResultSet rs = stat.executeQuery("SELECT ID FROM TEST" + table + " ORDER BY ID")) { + for (int i = 0; ++i <= expected;) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + } + assertFalse(rs.next()); + } + } + + private void testIsolationLevelsCheckRowsAndCount2(Statement stat, int table, int... values) + throws SQLException { + try (ResultSet rs = stat.executeQuery("SELECT COUNT(*) FROM TEST" + table)) { + rs.next(); + assertEquals(values.length, rs.getLong(1)); + } + try (ResultSet rs = stat.executeQuery("SELECT ID FROM TEST" + table + " ORDER BY ID")) { + for (int expected : values) { + assertTrue(rs.next()); + assertEquals(expected, rs.getInt(1)); + } + assertFalse(rs.next()); + } + } + private void testNestedResultSets(Connection conn) throws SQLException { Statement stat = conn.createStatement(); - test(stat, "CREATE TABLE NEST1(ID INT PRIMARY KEY,VALUE VARCHAR(255))"); - test(stat, "CREATE TABLE NEST2(ID INT PRIMARY KEY,VALUE VARCHAR(255))"); + test(stat, "CREATE TABLE NEST1(ID INT PRIMARY KEY,\"VALUE\" VARCHAR(255))"); + test(stat, "CREATE TABLE NEST2(ID INT PRIMARY KEY,\"VALUE\" VARCHAR(255))"); DatabaseMetaData meta = conn.getMetaData(); ArrayList result = new ArrayList<>(); ResultSet rs1, rs2; @@ -872,4 +943,329 @@ private void test(Statement stat, String sql) throws SQLException { stat.execute(sql); } + private void testIsolationLevels2() throws SQLException { + for (int isolationLevel : new int[] { Connection.TRANSACTION_READ_UNCOMMITTED, + Connection.TRANSACTION_READ_COMMITTED, Connection.TRANSACTION_REPEATABLE_READ, + Constants.TRANSACTION_SNAPSHOT, Connection.TRANSACTION_SERIALIZABLE }) { + deleteDb("transaction"); + try (Connection conn1 = getConnection("transaction"); Connection conn2 = getConnection("transaction")) { + conn1.setTransactionIsolation(isolationLevel); + conn2.setTransactionIsolation(isolationLevel); + conn1.setAutoCommit(false); + conn2.setAutoCommit(false); + Statement stat1 = conn1.createStatement(); + Statement stat2 = conn2.createStatement(); + // Test a table without constraints + stat1.execute("CREATE TABLE TEST(\"VALUE\" INT)"); + stat1.executeQuery("TABLE TEST").close(); + stat1.execute("DROP TABLE TEST"); + // Other tests + stat1.execute("CREATE TABLE TEST(ID VARCHAR PRIMARY KEY, \"VALUE\" INT)"); + stat1.execute("INSERT INTO TEST VALUES ('1', 1)"); + conn1.commit(); + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST WHERE ID = '1'")) { + rs.next(); + assertEquals(1, rs.getInt(2)); + } + stat2.executeUpdate("UPDATE TEST SET \"VALUE\" = \"VALUE\" + 1"); + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST WHERE ID = '1'")) { + rs.next(); + assertEquals(isolationLevel == Connection.TRANSACTION_READ_UNCOMMITTED ? 2 : 1, rs.getInt(2)); + } + assertThrows(ErrorCode.LOCK_TIMEOUT_1, stat1) + .executeQuery("SELECT * FROM TEST WHERE ID = '1' FOR UPDATE"); + conn2.commit(); + if (isolationLevel >= Connection.TRANSACTION_REPEATABLE_READ) { + assertThrows(ErrorCode.DEADLOCK_1, stat1) + .executeQuery("SELECT * FROM TEST WHERE ID = '1' FOR UPDATE"); + } else { + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST WHERE ID = '1' FOR UPDATE")) { + rs.next(); + assertEquals(2, rs.getInt(2)); + } + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST")) { + rs.next(); + assertEquals(2, rs.getInt(2)); + } + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST WHERE ID = '1'")) { + rs.next(); + assertEquals(2, rs.getInt(2)); + } + } + } + } + deleteDb("transaction"); + } + + private void testIsolationLevels3() throws SQLException { + for (int isolationLevel : new int[] { Connection.TRANSACTION_READ_UNCOMMITTED, + Connection.TRANSACTION_READ_COMMITTED, Connection.TRANSACTION_REPEATABLE_READ, + Constants.TRANSACTION_SNAPSHOT, Connection.TRANSACTION_SERIALIZABLE }) { + deleteDb("transaction"); + try (Connection conn1 = getConnection("transaction"); Connection conn2 = getConnection("transaction")) { + conn1.setTransactionIsolation(isolationLevel); + conn2.setTransactionIsolation(isolationLevel); + conn1.setAutoCommit(false); + conn2.setAutoCommit(false); + Statement stat1 = conn1.createStatement(); + Statement stat2 = conn2.createStatement(); + stat1.execute("CREATE TABLE TEST(ID BIGINT PRIMARY KEY, ID2 INT UNIQUE, \"VALUE\" INT)"); + stat1.execute("INSERT INTO TEST VALUES (1, 1, 1), (2, 2, 2), (3, 3, 3)"); + conn1.commit(); + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST WHERE ID2 IN (1, 2)")) { + rs.next(); + assertEquals(1, rs.getInt(3)); + rs.next(); + assertEquals(2, rs.getInt(3)); + } + stat2.executeUpdate("UPDATE TEST SET ID2 = 4, \"VALUE\" = 5 WHERE ID2 = 2"); + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST WHERE ID2 IN (1, 2)")) { + rs.next(); + assertEquals(1, rs.getInt(3)); + if (isolationLevel == Connection.TRANSACTION_READ_UNCOMMITTED) { + assertFalse(rs.next()); + } else { + assertTrue(rs.next()); + assertEquals(2, rs.getInt(3)); + } + } + if (isolationLevel == Connection.TRANSACTION_READ_UNCOMMITTED) { + assertFalse(stat1.executeQuery("SELECT * FROM TEST WHERE ID2 = 2 FOR UPDATE").next()); + assertThrows(ErrorCode.LOCK_TIMEOUT_1, stat1) + .executeQuery("SELECT * FROM TEST WHERE ID2 = 4 FOR UPDATE"); + } else { + assertThrows(ErrorCode.LOCK_TIMEOUT_1, stat1) + .executeQuery("SELECT * FROM TEST WHERE ID2 = 2 FOR UPDATE"); + assertFalse(stat1.executeQuery("SELECT * FROM TEST WHERE ID2 = 4 FOR UPDATE").next()); + } + stat2.executeUpdate("UPDATE TEST SET \"VALUE\" = 6 WHERE ID2 = 3"); + conn2.commit(); + if (isolationLevel == Connection.TRANSACTION_READ_UNCOMMITTED + || isolationLevel == Connection.TRANSACTION_READ_COMMITTED) { + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST WHERE ID2 = 4 FOR UPDATE")) { + rs.next(); + assertEquals(5, rs.getInt(3)); + } + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST")) { + rs.next(); + assertEquals(1, rs.getInt(3)); + rs.next(); + assertEquals(5, rs.getInt(3)); + rs.next(); + assertEquals(6, rs.getInt(3)); + } + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST WHERE ID2 = 4")) { + rs.next(); + assertEquals(5, rs.getInt(3)); + } + } else { + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST WHERE ID2 = 3")) { + rs.next(); + assertEquals(3, rs.getInt(3)); + } + assertThrows(ErrorCode.DEADLOCK_1, stat1) + .executeQuery("SELECT * FROM TEST WHERE ID2 = 3 FOR UPDATE"); + } + } + } + deleteDb("transaction"); + } + + private void testIsolationLevels4() throws SQLException { + testIsolationLevels4(true); + testIsolationLevels4(false); + } + + private void testIsolationLevels4(boolean primaryKey) throws SQLException { + for (int isolationLevel : new int[] { Connection.TRANSACTION_READ_UNCOMMITTED, + Connection.TRANSACTION_READ_COMMITTED, Connection.TRANSACTION_REPEATABLE_READ, + Constants.TRANSACTION_SNAPSHOT, Connection.TRANSACTION_SERIALIZABLE }) { + deleteDb("transaction"); + try (Connection conn1 = getConnection("transaction"); Connection conn2 = getConnection("transaction")) { + Statement stat1 = conn1.createStatement(); + stat1.execute("CREATE TABLE TEST(ID INT " + (primaryKey ? "PRIMARY KEY" : "UNIQUE") + + ", V INT) AS VALUES (1, 2)"); + conn2.setAutoCommit(false); + conn2.setTransactionIsolation(isolationLevel); + Statement stat2 = conn2.createStatement(); + try (ResultSet rs = stat2.executeQuery("SELECT V FROM TEST WHERE ID = 1")) { + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertFalse(rs.next()); + } + stat1.execute("UPDATE TEST SET V = V + 1"); + try (ResultSet rs = stat2.executeQuery("SELECT V FROM TEST WHERE ID = 1")) { + assertTrue(rs.next()); + assertEquals(isolationLevel >= Connection.TRANSACTION_REPEATABLE_READ ? 2 : 3, rs.getInt(1)); + assertFalse(rs.next()); + } + if (isolationLevel >= Connection.TRANSACTION_REPEATABLE_READ) { + assertThrows(ErrorCode.DEADLOCK_1, stat2).executeUpdate("UPDATE TEST SET V = V + 2"); + try (ResultSet rs = stat2.executeQuery("SELECT V FROM TEST WHERE ID = 1")) { + assertTrue(rs.next()); + assertEquals(3, rs.getInt(1)); + assertFalse(rs.next()); + } + stat1.execute("DELETE FROM TEST"); + assertThrows(ErrorCode.DEADLOCK_1, stat2).executeUpdate("UPDATE TEST SET V = V + 2"); + stat1.execute("INSERT INTO TEST VALUES (1, 2)"); + try (ResultSet rs = stat2.executeQuery("SELECT V FROM TEST WHERE ID = 1")) { + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertFalse(rs.next()); + } + stat1.execute("DELETE FROM TEST"); + stat1.execute("INSERT INTO TEST VALUES (1, 2)"); + if (primaryKey) { + // With a delegate index the row was completely + // restored, so no error + assertEquals(1, stat2.executeUpdate("UPDATE TEST SET V = V + 2")); + try (ResultSet rs = stat2.executeQuery("SELECT V FROM TEST WHERE ID = 1")) { + assertTrue(rs.next()); + assertEquals(4, rs.getInt(1)); + assertFalse(rs.next()); + } + conn2.commit(); + try (ResultSet rs = stat2.executeQuery("SELECT V FROM TEST WHERE ID = 1")) { + assertTrue(rs.next()); + assertEquals(4, rs.getInt(1)); + assertFalse(rs.next()); + } + } else { + // With a secondary index restored row is not the same + assertThrows(ErrorCode.DEADLOCK_1, stat2).executeUpdate("UPDATE TEST SET V = V + 2"); + try (ResultSet rs = stat2.executeQuery("SELECT V FROM TEST WHERE ID = 1")) { + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertFalse(rs.next()); + } + } + stat1.execute("DELETE FROM TEST"); + assertThrows(ErrorCode.DUPLICATE_KEY_1, stat2).execute("INSERT INTO TEST VALUES (1, 3)"); + } + } + } + deleteDb("transaction"); + } + + private void testIsolationLevelsCountAggregate() throws SQLException { + testIsolationLevelsCountAggregate(Connection.TRANSACTION_READ_UNCOMMITTED, 12, 15, 15, 16); + testIsolationLevelsCountAggregate(Connection.TRANSACTION_READ_COMMITTED, 6, 9, 15, 16); + testIsolationLevelsCountAggregate(Connection.TRANSACTION_REPEATABLE_READ, 6, 9, 9, 15); + testIsolationLevelsCountAggregate(Constants.TRANSACTION_SNAPSHOT, 6, 9, 9, 15); + testIsolationLevelsCountAggregate(Connection.TRANSACTION_SERIALIZABLE, 6, 9, 9, 15); + } + + private void testIsolationLevelsCountAggregate(int isolationLevel, long uncommitted1, long uncommitted2, + long committed, long committedOther) throws SQLException { + deleteDb("transaction"); + try (Connection conn1 = getConnection("transaction"); Connection conn2 = getConnection("transaction")) { + Statement stat1 = conn1.createStatement(); + stat1.execute("CREATE TABLE TEST(V BIGINT) AS VALUES 1, 2, 3, 4, 5, 18"); + conn1.setTransactionIsolation(isolationLevel); + conn1.setAutoCommit(false); + PreparedStatement all = conn1.prepareStatement("SELECT COUNT(*) FROM TEST"); + PreparedStatement simple = conn1.prepareStatement("SELECT COUNT(V) FROM TEST"); + conn2.setAutoCommit(false); + Statement stat2 = conn2.createStatement(); + testIsolationLevelsCountAggregate(all, simple, 6); + stat2.executeUpdate("DELETE FROM TEST WHERE V IN(3, 4)"); + stat2.executeUpdate("INSERT INTO TEST SELECT * FROM SYSTEM_RANGE(10, 17)"); + testIsolationLevelsCountAggregate(all, simple, uncommitted1); + stat1.executeUpdate("DELETE FROM TEST WHERE V = 2"); + stat1.executeUpdate("INSERT INTO TEST SELECT * FROM SYSTEM_RANGE(6, 9)"); + testIsolationLevelsCountAggregate(all, simple, uncommitted2); + conn2.commit(); + testIsolationLevelsCountAggregate(all, simple, committed); + conn1.commit(); + testIsolationLevelsCountAggregate(all, simple, 15); + stat2.executeUpdate("DELETE FROM TEST WHERE V = 17"); + stat2.executeUpdate("INSERT INTO TEST VALUES 19, 20"); + conn2.commit(); + testIsolationLevelsCountAggregate(all, simple, committedOther); + } + } + + private void testIsolationLevelsCountAggregate(PreparedStatement all, PreparedStatement simple, long expected) + throws SQLException { + try (ResultSet rs = all.executeQuery()) { + rs.next(); + assertEquals(expected, rs.getLong(1)); + } + try (ResultSet rs = simple.executeQuery()) { + rs.next(); + assertEquals(expected, rs.getLong(1)); + } + } + + private void testIsolationLevelsCountAggregate2() throws SQLException { + testIsolationLevelsCountAggregate2(Connection.TRANSACTION_READ_UNCOMMITTED); + testIsolationLevelsCountAggregate2(Connection.TRANSACTION_READ_COMMITTED); + testIsolationLevelsCountAggregate2(Connection.TRANSACTION_REPEATABLE_READ); + testIsolationLevelsCountAggregate2(Constants.TRANSACTION_SNAPSHOT); + testIsolationLevelsCountAggregate2(Connection.TRANSACTION_SERIALIZABLE); + } + + private void testIsolationLevelsCountAggregate2(int isolationLevel) + throws SQLException { + deleteDb("transaction"); + try (Connection conn1 = getConnection("transaction"); Connection conn2 = getConnection("transaction")) { + conn1.setTransactionIsolation(isolationLevel); + conn1.setAutoCommit(false); + Statement stat1 = conn1.createStatement(); + Statement stat2 = conn2.createStatement(); + stat1.executeUpdate( + "CREATE TABLE TEST(X INTEGER PRIMARY KEY, Y INTEGER) AS SELECT X, 1 FROM SYSTEM_RANGE(1, 100)"); + conn1.commit(); + conn2.setTransactionIsolation(isolationLevel); + conn2.setAutoCommit(false); + PreparedStatement prep = conn1.prepareStatement("SELECT COUNT(*) FROM TEST"); + // Initial count + testIsolationLevelCountAggregate2(prep, 100L); + stat1.executeUpdate("INSERT INTO TEST VALUES (101, 2)"); + stat1.executeUpdate("DELETE FROM TEST WHERE X BETWEEN 2 AND 3"); + stat1.executeUpdate("UPDATE TEST SET Y = 2 WHERE X BETWEEN 4 AND 7"); + // Own uncommitted changes + testIsolationLevelCountAggregate2(prep, 99L); + stat2.executeUpdate("INSERT INTO TEST VALUES (102, 2)"); + stat2.executeUpdate("DELETE FROM TEST WHERE X BETWEEN 12 AND 13"); + stat2.executeUpdate("UPDATE TEST SET Y = 2 WHERE X BETWEEN 14 AND 17"); + // Own and concurrent uncommitted changes + testIsolationLevelCountAggregate2(prep, + isolationLevel == Connection.TRANSACTION_READ_UNCOMMITTED ? 98L : 99L); + conn2.commit(); + // Own uncommitted and concurrent committed changes + testIsolationLevelCountAggregate2(prep, + isolationLevel <= Connection.TRANSACTION_READ_COMMITTED ? 98L: 99L); + conn1.commit(); + // Everything is committed + testIsolationLevelCountAggregate2(prep, 98L); + stat2.executeUpdate("INSERT INTO TEST VALUES (103, 2)"); + stat2.executeUpdate("DELETE FROM TEST WHERE X BETWEEN 22 AND 23"); + stat2.executeUpdate("UPDATE TEST SET Y = 2 WHERE X BETWEEN 24 AND 27"); + // Concurrent uncommitted changes + testIsolationLevelCountAggregate2(prep, + isolationLevel == Connection.TRANSACTION_READ_UNCOMMITTED ? 97L : 98L); + conn2.commit(); + // Concurrent committed changes + testIsolationLevelCountAggregate2(prep, + isolationLevel <= Connection.TRANSACTION_READ_COMMITTED ? 97L: 98L); + conn1.commit(); + // Everything is committed again + testIsolationLevelCountAggregate2(prep, 97L); + stat2.executeUpdate("INSERT INTO TEST VALUES (104, 2)"); + conn1.commit(); + // Transaction was started with concurrent uncommitted change + testIsolationLevelCountAggregate2(prep, + isolationLevel == Connection.TRANSACTION_READ_UNCOMMITTED ? 98L : 97L); + } + } + + private void testIsolationLevelCountAggregate2(PreparedStatement prep, long expected) throws SQLException { + ResultSet rs; + rs = prep.executeQuery(); + rs.next(); + assertEquals(expected, rs.getLong(1)); + } + } diff --git a/h2/src/test/org/h2/test/db/TestTriggersConstraints.java b/h2/src/test/org/h2/test/db/TestTriggersConstraints.java index da0a2fd170..30c3d34bbc 100644 --- a/h2/src/test/org/h2/test/db/TestTriggersConstraints.java +++ b/h2/src/test/org/h2/test/db/TestTriggersConstraints.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -12,15 +12,19 @@ import java.sql.Statement; import java.util.Arrays; import java.util.HashSet; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicIntegerArray; + import org.h2.api.ErrorCode; import org.h2.api.Trigger; -import org.h2.engine.Session; -import org.h2.jdbc.JdbcConnection; +import org.h2.message.DbException; import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.tools.TriggerAdapter; +import org.h2.util.StringUtils; import org.h2.util.Task; -import org.h2.value.ValueLong; +import org.h2.value.ValueBigint; /** * Tests for trigger and constraints. @@ -36,14 +40,14 @@ public class TestTriggersConstraints extends TestDb implements Trigger { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { deleteDb("trigger"); + testWrongDataType(); testTriggerDeadlock(); - testDeleteInTrigger(); testTriggerAdapter(); testTriggerSelectEachRow(); testViewTrigger(); @@ -56,6 +60,7 @@ public void test() throws Exception { testConstraints(); testCheckConstraintErrorMessage(); testMultiPartForeignKeys(); + testConcurrent(); deleteDb("trigger"); } @@ -70,62 +75,121 @@ public void fire(Connection conn, ResultSet oldRow, ResultSet newRow) } } - private void testTriggerDeadlock() throws Exception { - final Connection conn, conn2; - final Statement stat, stat2; - conn = getConnection("trigger"); - conn2 = getConnection("trigger"); - stat = conn.createStatement(); - stat2 = conn2.createStatement(); - stat.execute("create table test(id int) as select 1"); - stat.execute("create table test2(id int) as select 1"); - stat.execute("create trigger test_u before update on test2 " + - "for each row call \"" + DeleteTrigger.class.getName() + "\""); - conn.setAutoCommit(false); - conn2.setAutoCommit(false); - stat2.execute("update test set id = 2"); - Task task = new Task() { - @Override - public void call() throws Exception { - Thread.sleep(300); - stat2.execute("update test2 set id = 4"); - } - }; - task.execute(); - Thread.sleep(100); - try { - stat.execute("update test2 set id = 3"); - task.get(); - } catch (SQLException e) { - int errorCode = e.getErrorCode(); - assertTrue(String.valueOf(errorCode), - ErrorCode.LOCK_TIMEOUT_1 == errorCode || - ErrorCode.DEADLOCK_1 == errorCode || - ErrorCode.COMMIT_ROLLBACK_NOT_ALLOWED == errorCode); + /** + * Trigger that sets value of the wrong data type. + */ + public static class WrongTrigger implements Trigger { + @Override + public void fire(Connection conn, Object[] oldRow, Object[] newRow) throws SQLException { + newRow[1] = "Wrong value"; } - conn2.rollback(); - conn.rollback(); - stat.execute("drop table test"); - stat.execute("drop table test2"); - conn.close(); - conn2.close(); } - private void testDeleteInTrigger() throws SQLException { - if (config.mvStore) { - return; + /** + * Trigger that sets value of the wrong data type. + */ + public static class WrongTriggerAdapter extends TriggerAdapter { + @Override + public void fire(Connection conn, ResultSet oldRow, ResultSet newRow) throws SQLException { + newRow.updateString(2, "Wrong value"); + } + } + + /** + * Trigger that sets null value. + */ + public static class NullTrigger implements Trigger { + @Override + public void fire(Connection conn, Object[] oldRow, Object[] newRow) throws SQLException { + newRow[1] = null; + } + } + + /** + * Trigger that sets null value. + */ + public static class NullTriggerAdapter extends TriggerAdapter { + @Override + public void fire(Connection conn, ResultSet oldRow, ResultSet newRow) throws SQLException { + newRow.updateNull(2); + } + } + + private void testWrongDataType() throws Exception { + try (Connection conn = getConnection("trigger")) { + Statement stat = conn.createStatement(); + stat.executeUpdate("CREATE TABLE TEST(A INTEGER, B INTEGER NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST VALUES (1, 2)"); + + stat.executeUpdate("CREATE TRIGGER TEST_TRIGGER BEFORE INSERT ON TEST FOR EACH ROW CALL '" + + WrongTrigger.class.getName() + '\''); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, prep).executeUpdate(); + stat.executeUpdate("DROP TRIGGER TEST_TRIGGER"); + + stat.executeUpdate("CREATE TRIGGER TEST_TRIGGER BEFORE INSERT ON TEST FOR EACH ROW CALL '" + + WrongTriggerAdapter.class.getName() + '\''); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, prep).executeUpdate(); + stat.executeUpdate("DROP TRIGGER TEST_TRIGGER"); + + stat.executeUpdate("CREATE TRIGGER TEST_TRIGGER BEFORE INSERT ON TEST FOR EACH ROW CALL '" + + NullTrigger.class.getName() + '\''); + assertThrows(ErrorCode.NULL_NOT_ALLOWED, prep).executeUpdate(); + stat.executeUpdate("DROP TRIGGER TEST_TRIGGER"); + + stat.executeUpdate("CREATE TRIGGER TEST_TRIGGER BEFORE INSERT ON TEST FOR EACH ROW CALL '" + + NullTriggerAdapter.class.getName() + '\''); + assertThrows(ErrorCode.NULL_NOT_ALLOWED, prep).executeUpdate(); + stat.executeUpdate("DROP TRIGGER TEST_TRIGGER"); + + stat.executeUpdate("DROP TABLE TEST"); + } + } + + private void testTriggerDeadlock() throws Exception { + final CountDownLatch latch = new CountDownLatch(2); + try (Connection conn = getConnection("trigger")) { + Statement stat = conn.createStatement(); + stat.execute("create table test(id int) as select 1"); + stat.execute("create table test2(id int) as select 1"); + stat.execute("create trigger test_u before update on test2 " + + "for each row call \"" + DeleteTrigger.class.getName() + "\""); + conn.setAutoCommit(false); + stat.execute("update test set id = 2"); + Task task = new Task() { + @Override + public void call() throws Exception { + try (Connection conn2 = getConnection("trigger")) { + conn2.setAutoCommit(false); + try (Statement stat2 = conn2.createStatement()) { + latch.countDown(); + latch.await(); + stat2.execute("update test2 set id = 4"); + } + conn2.rollback(); + } catch (SQLException e) { + int errorCode = e.getErrorCode(); + assertTrue(String.valueOf(errorCode), + ErrorCode.LOCK_TIMEOUT_1 == errorCode || + ErrorCode.DEADLOCK_1 == errorCode); + } + } + }; + task.execute(); + latch.countDown(); + latch.await(); + try { + stat.execute("update test2 set id = 3"); + } catch (SQLException e) { + int errorCode = e.getErrorCode(); + assertTrue(String.valueOf(errorCode), + ErrorCode.LOCK_TIMEOUT_1 == errorCode || + ErrorCode.DEADLOCK_1 == errorCode); + } + task.get(); + conn.rollback(); + stat.execute("drop table test"); + stat.execute("drop table test2"); } - Connection conn; - Statement stat; - conn = getConnection("trigger"); - stat = conn.createStatement(); - stat.execute("create table test(id int) as select 1"); - stat.execute("create trigger test_u before update on test " + - "for each row call \"" + DeleteTrigger.class.getName() + "\""); - // this used to throw a NullPointerException before we fixed it - stat.execute("update test set id = 2"); - stat.execute("drop table test"); - conn.close(); } private void testTriggerAdapter() throws SQLException { @@ -168,7 +232,7 @@ private void testTriggerSelectEachRow() throws SQLException { stat = conn.createStatement(); stat.execute("drop table if exists test"); stat.execute("create table test(id int)"); - assertThrows(ErrorCode.TRIGGER_SELECT_AND_ROW_BASED_NOT_SUPPORTED, stat) + assertThrows(ErrorCode.INVALID_TRIGGER_FLAGS_1, stat) .execute("create trigger test_insert before select on test " + "for each row call \"" + TestTriggerAdapter.class.getName() + "\""); conn.close(); @@ -212,7 +276,7 @@ private void testViewTriggerGeneratedKeys() throws SQLException { conn = getConnection("trigger"); stat = conn.createStatement(); stat.execute("drop table if exists test"); - stat.execute("create table test(id int identity)"); + stat.execute("create table test(id int generated by default as identity)"); stat.execute("create view test_view as select * from test"); stat.execute("create trigger test_view_insert " + "instead of insert on test_view for each row call \"" + @@ -225,12 +289,12 @@ private void testViewTriggerGeneratedKeys() throws SQLException { PreparedStatement pstat; pstat = conn.prepareStatement( - "insert into test_view values()", Statement.RETURN_GENERATED_KEYS); + "insert into test_view values()", new int[] { 1 }); int count = pstat.executeUpdate(); assertEquals(1, count); ResultSet gkRs; - gkRs = stat.executeQuery("select scope_identity()"); + gkRs = pstat.getGeneratedKeys(); assertTrue(gkRs.next()); assertEquals(1, gkRs.getInt(1)); @@ -317,16 +381,6 @@ public void fire(Connection conn, Object[] oldRow, Object[] newRow) } } - @Override - public void close() { - // ignore - } - - @Override - public void remove() { - // ignore - } - } /** @@ -351,23 +405,11 @@ public void fire(Connection conn, Object[] oldRow, Object[] newRow) prepInsert.execute(); ResultSet rs = prepInsert.getGeneratedKeys(); if (rs.next()) { - JdbcConnection jconn = (JdbcConnection) conn; - Session session = (Session) jconn.getSession(); - session.setLastTriggerIdentity(ValueLong.get(rs.getLong(1))); + newRow[0] = ValueBigint.get(rs.getLong(1)); } } } - @Override - public void close() { - // ignore - } - - @Override - public void remove() { - // ignore - } - } private void testTriggerBeforeSelect() throws SQLException { @@ -428,16 +470,6 @@ public void fire(Connection conn, Object[] oldRow, Object[] newRow) prepMeta.execute(); } - @Override - public void close() { - // ignore - } - - @Override - public void remove() { - // ignore - } - } /** @@ -448,13 +480,7 @@ public static class TestTriggerAlterTable implements Trigger { @Override public void fire(Connection conn, Object[] oldRow, Object[] newRow) throws SQLException { - conn.createStatement().execute("call seq.nextval"); - } - - @Override - public void init(Connection conn, String schemaName, - String triggerName, String tableName, boolean before, int type) { - // nothing to do + conn.createStatement().execute("call next value for seq"); } @Override @@ -485,7 +511,7 @@ private void testTriggerAsJavascript() throws SQLException { } private void testTrigger(final String sourceLang) throws SQLException { - final String callSeq = "call seq.nextval"; + final String callSeq = "call next value for seq"; Connection conn = getConnection("trigger"); Statement stat = conn.createStatement(); stat.execute("DROP TABLE IF EXISTS TEST"); @@ -544,19 +570,19 @@ private void testCheckConstraintErrorMessage() throws SQLException { + "company_id int not null, " + "foreign key(company_id) references companies(id))"); stat.execute("create table connections (id identity, company_id int not null, " - + "first int not null, second int not null, " + + "first int not null, `second` int not null, " + "foreign key (company_id) references companies(id), " + "foreign key (first) references departments(id), " - + "foreign key (second) references departments(id), " + + "foreign key (`second`) references departments(id), " + "check (select departments.company_id from departments, companies where " - + " departments.id in (first, second)) = company_id)"); + + " departments.id in (first, `second`)) = company_id)"); stat.execute("insert into companies(id) values(1)"); stat.execute("insert into departments(id, company_id) " + "values(10, 1)"); stat.execute("insert into departments(id, company_id) " + "values(20, 1)"); assertThrows(ErrorCode.CHECK_CONSTRAINT_INVALID, stat) - .execute("insert into connections(id, company_id, first, second) " + .execute("insert into connections(id, company_id, first, `second`) " + "values(100, 1, 10, 20)"); stat.execute("drop table connections"); @@ -591,8 +617,7 @@ private void testMultiPartForeignKeys() throws SQLException { assertSingleValue(stat, "select count(*) from test1", 3); assertSingleValue(stat, "select count(*) from test2", 1); - stat.execute("drop table test1"); - stat.execute("drop table test2"); + stat.execute("drop table test1, test2"); conn.close(); } @@ -607,35 +632,35 @@ private void testTriggers() throws SQLException { // [FOR EACH ROW] [QUEUE n] [NOWAIT] CALL triggeredClass stat.execute("CREATE TRIGGER IF NOT EXISTS INS_BEFORE " + "BEFORE INSERT ON TEST " + - "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\""); + "FOR EACH ROW NOWAIT CALL '" + getClass().getName() + '\''); stat.execute("CREATE TRIGGER IF NOT EXISTS INS_BEFORE " + "BEFORE INSERT ON TEST " + - "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\""); + "FOR EACH ROW NOWAIT CALL '" + getClass().getName() + '\''); stat.execute("CREATE TRIGGER INS_AFTER " + "" + "AFTER INSERT ON TEST " + - "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\""); + "FOR EACH ROW NOWAIT CALL '" + getClass().getName() + '\''); stat.execute("CREATE TRIGGER UPD_BEFORE " + "BEFORE UPDATE ON TEST " + - "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\""); + "FOR EACH ROW NOWAIT CALL '" + getClass().getName() + '\''); stat.execute("CREATE TRIGGER INS_AFTER_ROLLBACK " + "AFTER INSERT, ROLLBACK ON TEST " + - "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\""); + "FOR EACH ROW NOWAIT CALL '" + getClass().getName() + '\''); stat.execute("INSERT INTO TEST VALUES(1, 'Hello')"); ResultSet rs; rs = stat.executeQuery("SCRIPT"); checkRows(rs, new String[] { "CREATE FORCE TRIGGER \"PUBLIC\".\"INS_BEFORE\" " + "BEFORE INSERT ON \"PUBLIC\".\"TEST\" " + - "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\";", + "FOR EACH ROW NOWAIT CALL '" + getClass().getName() + "';", "CREATE FORCE TRIGGER \"PUBLIC\".\"INS_AFTER\" " + "AFTER INSERT ON \"PUBLIC\".\"TEST\" " + - "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\";", + "FOR EACH ROW NOWAIT CALL '" + getClass().getName() + "';", "CREATE FORCE TRIGGER \"PUBLIC\".\"UPD_BEFORE\" " + "BEFORE UPDATE ON \"PUBLIC\".\"TEST\" " + - "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\";", + "FOR EACH ROW NOWAIT CALL '" + getClass().getName() + "';", "CREATE FORCE TRIGGER \"PUBLIC\".\"INS_AFTER_ROLLBACK\" " + "AFTER INSERT, ROLLBACK ON \"PUBLIC\".\"TEST\" " + - "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\";", + "FOR EACH ROW NOWAIT CALL '" + getClass().getName() + "';", }); while (rs.next()) { String sql = rs.getString(1); @@ -682,6 +707,66 @@ private void checkRows(ResultSet rs, String[] expected) throws SQLException { } } + private void testConcurrent() throws Exception { + deleteDb("trigger"); + Connection conn = getConnection("trigger"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(A INT)"); + stat.execute("CREATE TRIGGER TEST_BEFORE BEFORE INSERT, UPDATE ON TEST FOR EACH ROW CALL " + + StringUtils.quoteStringSQL(ConcurrentTrigger.class.getName())); + Thread[] threads = new Thread[ConcurrentTrigger.N_T]; + AtomicInteger a = new AtomicInteger(); + for (int i = 0; i < ConcurrentTrigger.N_T; i++) { + Thread thread = new Thread() { + @Override + public void run() { + try (Connection conn = getConnection("trigger")) { + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(A) VALUES ?"); + for (int j = 0; j < ConcurrentTrigger.N_R; j++) { + prep.setInt(1, a.getAndIncrement()); + prep.executeUpdate(); + } + } catch (SQLException e) { + throw DbException.convert(e); + } + } + }; + threads[i] = thread; + } + synchronized (TestTriggersConstraints.class) { + AtomicIntegerArray array = ConcurrentTrigger.array; + int l = array.length(); + for (int i = 0; i < l; i++) { + array.set(i, 0); + } + for (Thread thread : threads) { + thread.start(); + } + for (Thread thread : threads) { + thread.join(); + } + for (int i = 0; i < l; i++) { + assertEquals(1, array.get(i)); + } + } + conn.close(); + } + + public static final class ConcurrentTrigger extends TriggerAdapter { + + static final int N_T = 4; + + static final int N_R = 250; + + static final AtomicIntegerArray array = new AtomicIntegerArray(N_T * N_R); + + @Override + public void fire(Connection conn, ResultSet oldRow, ResultSet newRow) throws SQLException { + array.set(newRow.getInt(1), 1); + } + + } + @Override public void fire(Connection conn, Object[] oldRow, Object[] newRow) throws SQLException { diff --git a/h2/src/test/org/h2/test/db/TestTwoPhaseCommit.java b/h2/src/test/org/h2/test/db/TestTwoPhaseCommit.java index e354822c1c..3f1380ba29 100644 --- a/h2/src/test/org/h2/test/db/TestTwoPhaseCommit.java +++ b/h2/src/test/org/h2/test/db/TestTwoPhaseCommit.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -24,7 +24,7 @@ public class TestTwoPhaseCommit extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -49,30 +49,9 @@ public void test() throws SQLException { testInDoubtAfterShutdown(); - if (!config.mvStore) { - testLargeTransactionName(); - } deleteDb("twoPhaseCommit"); } - private void testLargeTransactionName() throws SQLException { - Connection conn = getConnection("twoPhaseCommit"); - Statement stat = conn.createStatement(); - conn.setAutoCommit(false); - stat.execute("CREATE TABLE TEST2(ID INT)"); - String name = "tx12345678"; - try { - while (true) { - stat.execute("INSERT INTO TEST2 VALUES(1)"); - name += "x"; - stat.execute("PREPARE COMMIT " + name); - } - } catch (SQLException e) { - assertKnownException(e); - } - conn.close(); - } - private void test(boolean rolledBack) throws SQLException { Connection conn = getConnection("twoPhaseCommit"); Statement stat = conn.createStatement(); @@ -96,7 +75,7 @@ private void openWith(boolean rollback) throws SQLException { ArrayList list = new ArrayList<>(); ResultSet rs = stat.executeQuery("SELECT * FROM INFORMATION_SCHEMA.IN_DOUBT"); while (rs.next()) { - list.add(rs.getString("TRANSACTION")); + list.add(rs.getString("TRANSACTION_NAME")); } for (String s : list) { if (rollback) { @@ -126,10 +105,6 @@ private void testInDoubtAfterShutdown() throws SQLException { if (config.memory) { return; } - // TODO fails in pagestore mode - if (!config.mvStore) { - return; - } deleteDb("twoPhaseCommit"); Connection conn = getConnection("twoPhaseCommit"); Statement stat = conn.createStatement(); @@ -141,7 +116,8 @@ private void testInDoubtAfterShutdown() throws SQLException { stat.execute("SHUTDOWN IMMEDIATELY"); conn = getConnection("twoPhaseCommit"); stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("SELECT TRANSACTION, STATE FROM INFORMATION_SCHEMA.IN_DOUBT"); + ResultSet rs = stat.executeQuery( + "SELECT TRANSACTION_NAME, TRANSACTION_STATE FROM INFORMATION_SCHEMA.IN_DOUBT"); assertFalse(rs.next()); rs = stat.executeQuery("SELECT ID FROM TEST"); assertTrue(rs.next()); @@ -154,7 +130,7 @@ private void testInDoubtAfterShutdown() throws SQLException { stat.execute("SHUTDOWN IMMEDIATELY"); conn = getConnection("twoPhaseCommit"); stat = conn.createStatement(); - rs = stat.executeQuery("SELECT TRANSACTION, STATE FROM INFORMATION_SCHEMA.IN_DOUBT"); + rs = stat.executeQuery("SELECT TRANSACTION_NAME, TRANSACTION_STATE FROM INFORMATION_SCHEMA.IN_DOUBT"); assertFalse(rs.next()); rs = stat.executeQuery("SELECT ID FROM TEST"); assertTrue(rs.next()); @@ -166,10 +142,10 @@ private void testInDoubtAfterShutdown() throws SQLException { stat.execute("SHUTDOWN IMMEDIATELY"); conn = getConnection("twoPhaseCommit"); stat = conn.createStatement(); - rs = stat.executeQuery("SELECT TRANSACTION, STATE FROM INFORMATION_SCHEMA.IN_DOUBT"); + rs = stat.executeQuery("SELECT TRANSACTION_NAME, TRANSACTION_STATE FROM INFORMATION_SCHEMA.IN_DOUBT"); assertTrue(rs.next()); - assertEquals("#3", rs.getString("TRANSACTION")); - assertEquals("IN_DOUBT", rs.getString("STATE")); + assertEquals("#3", rs.getString("TRANSACTION_NAME")); + assertEquals("IN_DOUBT", rs.getString("TRANSACTION_STATE")); rs = stat.executeQuery("SELECT ID FROM TEST"); assertTrue(rs.next()); assertEquals(1, rs.getInt(1)); diff --git a/h2/src/test/org/h2/test/db/TestUpgrade.java b/h2/src/test/org/h2/test/db/TestUpgrade.java deleted file mode 100644 index 5202684459..0000000000 --- a/h2/src/test/org/h2/test/db/TestUpgrade.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.db; - -import java.io.OutputStream; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import org.h2.api.ErrorCode; -import org.h2.store.fs.FileUtils; -import org.h2.test.TestBase; -import org.h2.test.TestDb; -import org.h2.upgrade.DbUpgrade; -import org.h2.util.Utils; - -/** - * Automatic upgrade test cases. - */ -public class TestUpgrade extends TestDb { - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase base = TestBase.createCaller().init(); - base.config.mvStore = false; - base.test(); - } - - @Override - public boolean isEnabled() { - if (config.mvStore) { - return false; - } - if (!Utils.isClassPresent("org.h2.upgrade.v1_1.Driver")) { - return false; - } - return true; - } - - @Override - public void test() throws Exception { - testLobs(); - testErrorUpgrading(); - testNoDb(); - testNoUpgradeOldAndNew(); - testIfExists(); - testCipher(); - } - - private void testLobs() throws Exception { - deleteDb("upgrade"); - Connection conn; - conn = DriverManager.getConnection("jdbc:h2v1_1:" + - getBaseDir() + "/upgrade;PAGE_STORE=FALSE", getUser(), getPassword()); - conn.createStatement().execute( - "create table test(data clob) as select space(100000)"); - conn.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.data.db")); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.index.db")); - DbUpgrade.setDeleteOldDb(true); - DbUpgrade.setScriptInTempDir(true); - conn = getConnection("upgrade"); - assertFalse(FileUtils.exists(getBaseDir() + "/upgrade.data.db")); - assertFalse(FileUtils.exists(getBaseDir() + "/upgrade.index.db")); - ResultSet rs = conn.createStatement().executeQuery("select * from test"); - rs.next(); - assertEquals(new String(new char[100000]).replace((char) 0, ' '), - rs.getString(1)); - conn.close(); - DbUpgrade.setDeleteOldDb(false); - DbUpgrade.setScriptInTempDir(false); - deleteDb("upgrade"); - } - - private void testErrorUpgrading() throws Exception { - deleteDb("upgrade"); - OutputStream out; - out = FileUtils.newOutputStream(getBaseDir() + "/upgrade.data.db", false); - out.write(new byte[10000]); - out.close(); - out = FileUtils.newOutputStream(getBaseDir() + "/upgrade.index.db", false); - out.write(new byte[10000]); - out.close(); - assertThrows(ErrorCode.FILE_VERSION_ERROR_1, this). - getConnection("upgrade"); - - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.data.db")); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.index.db")); - deleteDb("upgrade"); - } - - private void testNoDb() throws SQLException { - deleteDb("upgrade"); - Connection conn = getConnection("upgrade"); - conn.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.h2.db")); - deleteDb("upgrade"); - - conn = getConnection("upgrade;NO_UPGRADE=TRUE"); - conn.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.h2.db")); - deleteDb("upgrade"); - } - - private void testNoUpgradeOldAndNew() throws Exception { - deleteDb("upgrade"); - deleteDb("upgradeOld"); - final String additionalParametersOld = ";AUTO_SERVER=TRUE;OPEN_NEW=TRUE"; - final String additionalParametersNew = ";AUTO_SERVER=TRUE;OPEN_NEW=TRUE;MV_STORE=FALSE"; - - // Create old db - Utils.callStaticMethod("org.h2.upgrade.v1_1.Driver.load"); - Connection connOld = DriverManager.getConnection("jdbc:h2v1_1:" + - getBaseDir() + "/upgradeOld;PAGE_STORE=FALSE" + additionalParametersOld); - // Test auto server, too - Connection connOld2 = DriverManager.getConnection("jdbc:h2v1_1:" + - getBaseDir() + "/upgradeOld;PAGE_STORE=FALSE" + additionalParametersOld); - Statement statOld = connOld.createStatement(); - statOld.execute("create table testOld(id int)"); - connOld.close(); - connOld2.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgradeOld.data.db")); - - // Create new DB - Connection connNew = DriverManager.getConnection("jdbc:h2:" + - getBaseDir() + "/upgrade" + additionalParametersNew); - Connection connNew2 = DriverManager.getConnection("jdbc:h2:" + - getBaseDir() + "/upgrade" + additionalParametersNew); - Statement statNew = connNew.createStatement(); - statNew.execute("create table test(id int)"); - - // Link to old DB without upgrade - statNew.executeUpdate("CREATE LOCAL TEMPORARY LINKED TABLE " + - "linkedTestOld('org.h2.upgrade.v1_1.Driver', 'jdbc:h2v1_1:" + - getBaseDir() + "/upgradeOld" + additionalParametersOld + "', '', '', 'TestOld')"); - statNew.executeQuery("select * from linkedTestOld"); - connNew.close(); - connNew2.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgradeOld.data.db")); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.h2.db")); - - connNew = DriverManager.getConnection("jdbc:h2:" + - getBaseDir() + "/upgrade" + additionalParametersNew); - statNew = connNew.createStatement(); - // Link to old DB with upgrade - statNew.executeUpdate("CREATE LOCAL TEMPORARY LINKED TABLE " + - "linkedTestOld('org.h2.Driver', 'jdbc:h2:" + - getBaseDir() + "/upgradeOld" + additionalParametersNew + "', '', '', 'TestOld')"); - statNew.executeQuery("select * from linkedTestOld"); - connNew.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgradeOld.h2.db")); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.h2.db")); - - deleteDb("upgrade"); - deleteDb("upgradeOld"); - } - - private void testIfExists() throws Exception { - deleteDb("upgrade"); - - // Create old - Utils.callStaticMethod("org.h2.upgrade.v1_1.Driver.load"); - Connection connOld = DriverManager.getConnection( - "jdbc:h2v1_1:" + getBaseDir() + "/upgrade;PAGE_STORE=FALSE"); - // Test auto server, too - Connection connOld2 = DriverManager.getConnection( - "jdbc:h2v1_1:" + getBaseDir() + "/upgrade;PAGE_STORE=FALSE"); - Statement statOld = connOld.createStatement(); - statOld.execute("create table test(id int)"); - connOld.close(); - connOld2.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.data.db")); - - // Upgrade - Connection connOldViaNew = DriverManager.getConnection( - "jdbc:h2:" + getBaseDir() + "/upgrade;ifexists=true;MV_STORE=FALSE"); - Statement statOldViaNew = connOldViaNew.createStatement(); - statOldViaNew.executeQuery("select * from test"); - connOldViaNew.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.h2.db")); - - deleteDb("upgrade"); - } - - private void testCipher() throws Exception { - deleteDb("upgrade"); - - // Create old db - Utils.callStaticMethod("org.h2.upgrade.v1_1.Driver.load"); - Connection conn = DriverManager.getConnection("jdbc:h2v1_1:" + - getBaseDir() + "/upgrade;PAGE_STORE=FALSE;" + - "CIPHER=AES", "abc", "abc abc"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int)"); - conn.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.data.db")); - - // Connect to old DB with upgrade - conn = DriverManager.getConnection("jdbc:h2:" + - getBaseDir() + "/upgrade;CIPHER=AES;MV_STORE=false", "abc", "abc abc"); - stat = conn.createStatement(); - stat.executeQuery("select * from test"); - conn.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.h2.db")); - - deleteDb("upgrade"); - } - - @Override - public void deleteDb(String dbName) { - super.deleteDb(dbName); - try { - Utils.callStaticMethod( - "org.h2.upgrade.v1_1.tools.DeleteDbFiles.execute", - getBaseDir(), dbName, true); - } catch (Exception e) { - throw new RuntimeException(e.getMessage()); - } - FileUtils.delete(getBaseDir() + "/" + - dbName + ".data.db.backup"); - FileUtils.delete(getBaseDir() + "/" + - dbName + ".index.db.backup"); - FileUtils.deleteRecursive(getBaseDir() + "/" + - dbName + ".lobs.db.backup", false); - } - -} \ No newline at end of file diff --git a/h2/src/test/org/h2/test/db/TestUsingIndex.java b/h2/src/test/org/h2/test/db/TestUsingIndex.java deleted file mode 100644 index 0e4fb6dfca..0000000000 --- a/h2/src/test/org/h2/test/db/TestUsingIndex.java +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.db; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import org.h2.test.TestBase; -import org.h2.test.TestDb; -import org.h2.value.DataType; - -/** - * Tests the "create index ... using" syntax. - * - * @author Erwan Bocher Atelier SIG, IRSTV FR CNRS 2488 - */ -public class TestUsingIndex extends TestDb { - - private Connection conn; - private Statement stat; - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws SQLException { - deleteDb("using_index"); - testUsingBadSyntax(); - testUsingGoodSyntax(); - testHashIndex(); - testSpatialIndex(); - testBadSpatialSyntax(); - } - - private void testHashIndex() throws SQLException { - conn = getConnection("using_index"); - stat = conn.createStatement(); - stat.execute("create table test(id int)"); - stat.execute("create index idx_name on test(id) using hash"); - stat.execute("insert into test select x from system_range(1, 1000)"); - ResultSet rs = stat.executeQuery("select * from test where id=100"); - assertTrue(rs.next()); - assertFalse(rs.next()); - stat.execute("delete from test where id=100"); - rs = stat.executeQuery("select * from test where id=100"); - assertFalse(rs.next()); - rs = stat.executeQuery("select min(id), max(id) from test"); - assertTrue(rs.next()); - assertEquals(1, rs.getInt(1)); - assertEquals(1000, rs.getInt(2)); - stat.execute("drop table test"); - conn.close(); - deleteDb("using_index"); - } - - private void testUsingBadSyntax() throws SQLException { - conn = getConnection("using_index"); - stat = conn.createStatement(); - stat.execute("create table test(id int)"); - assertFalse(isSupportedSyntax(stat, - "create hash index idx_name_1 on test(id) using hash")); - assertFalse(isSupportedSyntax(stat, - "create hash index idx_name_2 on test(id) using btree")); - assertFalse(isSupportedSyntax(stat, - "create index idx_name_3 on test(id) using hash_tree")); - assertFalse(isSupportedSyntax(stat, - "create unique hash index idx_name_4 on test(id) using hash")); - assertFalse(isSupportedSyntax(stat, - "create index idx_name_5 on test(id) using hash table")); - conn.close(); - deleteDb("using_index"); - } - - private void testUsingGoodSyntax() throws SQLException { - conn = getConnection("using_index"); - stat = conn.createStatement(); - stat.execute("create table test(id int)"); - assertTrue(isSupportedSyntax(stat, - "create index idx_name_1 on test(id) using hash")); - assertTrue(isSupportedSyntax(stat, - "create index idx_name_2 on test(id) using btree")); - assertTrue(isSupportedSyntax(stat, - "create unique index idx_name_3 on test(id) using hash")); - conn.close(); - deleteDb("using_index"); - } - - /** - * Return if the syntax is supported otherwise false - * - * @param stat the statement - * @param sql the SQL statement - * @return true if the query works, false if it fails - */ - private static boolean isSupportedSyntax(Statement stat, String sql) { - try { - stat.execute(sql); - return true; - } catch (SQLException ex) { - return false; - } - } - - private void testSpatialIndex() throws SQLException { - if (config.memory && config.mvStore) { - return; - } - if (DataType.GEOMETRY_CLASS == null) { - return; - } - deleteDb("spatial"); - conn = getConnection("spatial"); - stat = conn.createStatement(); - stat.execute("create table test" - + "(id int primary key, poly geometry)"); - stat.execute("insert into test values(1, " - + "'POLYGON ((1 1, 1 2, 2 2, 1 1))')"); - stat.execute("insert into test values(2,null)"); - stat.execute("insert into test values(3, " - + "'POLYGON ((3 1, 3 2, 4 2, 3 1))')"); - stat.execute("insert into test values(4,null)"); - stat.execute("insert into test values(5, " - + "'POLYGON ((1 3, 1 4, 2 4, 1 3))')"); - stat.execute("create index on test(poly) using rtree"); - - ResultSet rs = stat.executeQuery( - "select * from test " - + "where poly && 'POINT (1.5 1.5)'::Geometry"); - assertTrue(rs.next()); - assertEquals(1, rs.getInt("id")); - assertFalse(rs.next()); - rs.close(); - conn.close(); - deleteDb("spatial"); - } - - private void testBadSpatialSyntax() throws SQLException { - if (config.memory && config.mvStore) { - return; - } - if (DataType.GEOMETRY_CLASS == null) { - return; - } - deleteDb("spatial"); - conn = getConnection("spatial"); - stat = conn.createStatement(); - stat.execute("create table test" - + "(id int primary key, poly geometry)"); - stat.execute("insert into test values(1, " - + "'POLYGON ((1 1, 1 2, 2 2, 1 1))')"); - assertFalse(isSupportedSyntax(stat, - "create spatial index on test(poly) using rtree")); - conn.close(); - deleteDb("spatial"); - } - -} \ No newline at end of file diff --git a/h2/src/test/org/h2/test/db/TestView.java b/h2/src/test/org/h2/test/db/TestView.java index 35ac6e82b3..1dffd44bec 100644 --- a/h2/src/test/org/h2/test/db/TestView.java +++ b/h2/src/test/org/h2/test/db/TestView.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -11,7 +11,7 @@ import java.sql.SQLException; import java.sql.Statement; import org.h2.api.ErrorCode; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.jdbc.JdbcConnection; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -29,7 +29,7 @@ public class TestView extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -50,7 +50,6 @@ public void test() throws SQLException { testManyViews(); testReferenceView(); testViewAlterAndCommandCache(); - testViewConstraintFromColumnExpression(); deleteDb("view"); } @@ -78,7 +77,7 @@ private void testSubQueryViewIndexCache() throws SQLException { "name varchar(25) unique, age int unique)"); // check that initial cache size is empty - Session s = (Session) ((JdbcConnection) conn).getSession(); + SessionLocal s = (SessionLocal) ((JdbcConnection) conn).getSession(); s.clearViewIndexCache(); assertTrue(s.getViewIndexCache(true).isEmpty()); assertTrue(s.getViewIndexCache(false).isEmpty()); @@ -170,7 +169,7 @@ private void testEmptyColumn() throws SQLException { private void testChangeSchemaSearchPath() throws SQLException { deleteDb("view"); - Connection conn = getConnection("view;FUNCTIONS_IN_SCHEMA=TRUE"); + Connection conn = getConnection("view"); Statement stat = conn.createStatement(); stat.execute("CREATE ALIAS X AS $$ int x() { return 1; } $$;"); stat.execute("CREATE SCHEMA S"); @@ -213,7 +212,7 @@ private void testCacheFunction(boolean deterministic) throws SQLException { x = 8; stat.execute("CREATE ALIAS GET_X " + (deterministic ? "DETERMINISTIC" : "") + - " FOR \"" + getClass().getName() + ".getX\""); + " FOR '" + getClass().getName() + ".getX'"); stat.execute("CREATE VIEW V AS SELECT * FROM (SELECT GET_X())"); ResultSet rs; rs = stat.executeQuery("SELECT * FROM V"); @@ -348,47 +347,4 @@ private void testViewAlterAndCommandCache() throws SQLException { deleteDb("view"); } - /** - * Make sure that the table constraint is still available when create a view - * of other table. - */ - private void testViewConstraintFromColumnExpression() throws SQLException { - deleteDb("view"); - Connection conn = getConnection("view"); - Statement stat = conn.createStatement(); - stat.execute("create table t0(id1 int primary key CHECK ((ID1 % 2) = 0))"); - stat.execute("create table t1(id2 int primary key CHECK ((ID2 % 1) = 0))"); - stat.execute("insert into t0 values(0)"); - stat.execute("insert into t1 values(1)"); - stat.execute("create view v1 as select * from t0,t1"); - // Check with ColumnExpression - ResultSet rs = stat.executeQuery( - "select * from INFORMATION_SCHEMA.COLUMNS where TABLE_NAME = 'V1'"); - assertTrue(rs.next()); - assertEquals("ID1", rs.getString("COLUMN_NAME")); - assertEquals("((\"ID1\" % 2) = 0)", rs.getString("CHECK_CONSTRAINT")); - assertTrue(rs.next()); - assertEquals("ID2", rs.getString("COLUMN_NAME")); - assertEquals("((\"ID2\" % 1) = 0)", rs.getString("CHECK_CONSTRAINT")); - // Check with AliasExpression - stat.execute("create view v2 as select ID1 key1,ID2 key2 from t0,t1"); - rs = stat.executeQuery("select * from INFORMATION_SCHEMA.COLUMNS where TABLE_NAME = 'V2'"); - assertTrue(rs.next()); - assertEquals("KEY1", rs.getString("COLUMN_NAME")); - assertEquals("((\"KEY1\" % 2) = 0)", rs.getString("CHECK_CONSTRAINT")); - assertTrue(rs.next()); - assertEquals("KEY2", rs.getString("COLUMN_NAME")); - assertEquals("((\"KEY2\" % 1) = 0)", rs.getString("CHECK_CONSTRAINT")); - // Check hide of constraint if column is an Operation - stat.execute("create view v3 as select ID1 + 1 ID1, ID2 + 1 ID2 from t0,t1"); - rs = stat.executeQuery("select * from INFORMATION_SCHEMA.COLUMNS where TABLE_NAME = 'V3'"); - assertTrue(rs.next()); - assertEquals("ID1", rs.getString("COLUMN_NAME")); - assertEquals("", rs.getString("CHECK_CONSTRAINT")); - assertTrue(rs.next()); - assertEquals("ID2", rs.getString("COLUMN_NAME")); - assertEquals("", rs.getString("CHECK_CONSTRAINT")); - conn.close(); - deleteDb("view"); - } } diff --git a/h2/src/test/org/h2/test/db/TestViewAlterTable.java b/h2/src/test/org/h2/test/db/TestViewAlterTable.java index dec0e7f412..6e8febc5e1 100644 --- a/h2/src/test/org/h2/test/db/TestViewAlterTable.java +++ b/h2/src/test/org/h2/test/db/TestViewAlterTable.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -9,9 +9,9 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import org.h2.api.ErrorCode; import org.h2.test.TestBase; import org.h2.test.TestDb; -import org.h2.api.ErrorCode; /** * Test the impact of ALTER TABLE statements on views. @@ -27,7 +27,7 @@ public class TestViewAlterTable extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -72,7 +72,7 @@ private void testAlterTableDropColumnNotInView() throws SQLException { private void testAlterTableDropColumnInView() throws SQLException { // simple stat.execute("create table test(id identity, name varchar) " + - "as select x, 'Hello'"); + "as select 1, 'Hello' from dual"); stat.execute("create view test_view as select * from test"); assertThrows(ErrorCode.COLUMN_IS_REFERENCED_1, stat). execute("alter table test drop name"); @@ -157,7 +157,7 @@ private void testForeignKey() throws SQLException { } private void createTestData() throws SQLException { - stat.execute("create table test(a int, b int, c int)"); + stat.execute("create table test(a int primary key, b int, c int)"); stat.execute("insert into test(a, b, c) values (1, 2, 3)"); stat.execute("create view v1 as select a as b, b as a from test"); // child of v1 @@ -203,7 +203,7 @@ private void checkViewRemainsValid() throws SQLException { private void testAlterTableDropColumnInViewWithDoubleQuotes() throws SQLException{ // simple stat.execute("create table \"test\"(id identity, name varchar) " + - "as select x, 'Hello'"); + "as select 1, 'Hello' from dual"); stat.execute("create view test_view as select * from \"test\""); assertThrows(ErrorCode.COLUMN_IS_REFERENCED_1, stat). execute("alter table \"test\" drop name"); diff --git a/h2/src/test/org/h2/test/db/TestViewDropView.java b/h2/src/test/org/h2/test/db/TestViewDropView.java index 88401e4588..6361704af8 100644 --- a/h2/src/test/org/h2/test/db/TestViewDropView.java +++ b/h2/src/test/org/h2/test/db/TestViewDropView.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -28,7 +28,7 @@ public class TestViewDropView extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -50,7 +50,7 @@ public void test() throws Exception { } private void testCreateForceView() throws SQLException { - assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, stat). + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, stat). execute("create view test_view as select * from test"); stat.execute("create force view test_view as select * from test"); stat.execute("create table test(id int)"); @@ -66,8 +66,8 @@ private void testCreateForceView() throws SQLException { private void testDropViewDefaultBehaviour() throws SQLException { createTestData(); - ResultSet rs = stat.executeQuery("select value " + - "from information_schema.settings where name = 'DROP_RESTRICT'"); + ResultSet rs = stat.executeQuery( + "SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'DROP_RESTRICT'"); rs.next(); boolean dropRestrict = rs.getBoolean(1); if (dropRestrict) { diff --git a/h2/src/test/org/h2/test/db/package.html b/h2/src/test/org/h2/test/db/package.html index e0faf3d876..7b975d2567 100644 --- a/h2/src/test/org/h2/test/db/package.html +++ b/h2/src/test/org/h2/test/db/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/jdbc/TestBatchUpdates.java b/h2/src/test/org/h2/test/jdbc/TestBatchUpdates.java index 771e86e966..1a153b9e16 100644 --- a/h2/src/test/org/h2/test/jdbc/TestBatchUpdates.java +++ b/h2/src/test/org/h2/test/jdbc/TestBatchUpdates.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -57,7 +57,7 @@ public class TestBatchUpdates extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -78,13 +78,13 @@ private void testRootCause() throws SQLException { try { stat.executeBatch(); } catch (SQLException e) { - assertContains(e.toString(), "TEST_Y"); + assertContains(e.toString(), "TEST_X"); e = e.getNextException(); assertNotNull(e); - assertContains(e.toString(), "TEST_Y"); + assertContains(e.toString(), "TEST_X"); e = e.getNextException(); assertNotNull(e); - assertContains(e.toString(), "TEST_X"); + assertContains(e.toString(), "TEST_Y"); e = e.getNextException(); assertNull(e); } @@ -97,13 +97,13 @@ private void testRootCause() throws SQLException { try { prep.executeBatch(); } catch (SQLException e) { - assertContains(e.toString(), "TEST_Y"); + assertContains(e.toString(), "TEST_X"); e = e.getNextException(); assertNotNull(e); - assertContains(e.toString(), "TEST_Y"); + assertContains(e.toString(), "TEST_X"); e = e.getNextException(); assertNotNull(e); - assertContains(e.toString(), "TEST_X"); + assertContains(e.toString(), "TEST_Y"); e = e.getNextException(); assertNull(e); } @@ -115,8 +115,7 @@ private void testExecuteCall() throws SQLException { deleteDb("batchUpdates"); conn = getConnection("batchUpdates"); stat = conn.createStatement(); - stat.execute("CREATE ALIAS updatePrices FOR \"" + - getClass().getName() + ".updatePrices\""); + stat.execute("CREATE ALIAS updatePrices FOR '" + getClass().getName() + ".updatePrices'"); CallableStatement call = conn.prepareCall("{call updatePrices(?, ?)}"); call.setString(1, "Hello"); call.setFloat(2, 1.4f); @@ -154,12 +153,7 @@ private void testException() throws SQLException { prep.setString(1, "x"); prep.addBatch(); } - try { - prep.executeBatch(); - fail(); - } catch (BatchUpdateException e) { - // expected - } + assertThrows(BatchUpdateException.class, prep).executeBatch(); conn.close(); } diff --git a/h2/src/test/org/h2/test/jdbc/TestCallableStatement.java b/h2/src/test/org/h2/test/jdbc/TestCallableStatement.java index fd4dff1db6..c1e758553b 100644 --- a/h2/src/test/org/h2/test/jdbc/TestCallableStatement.java +++ b/h2/src/test/org/h2/test/jdbc/TestCallableStatement.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -22,6 +22,9 @@ import java.sql.Statement; import java.sql.Timestamp; import java.sql.Types; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.Collections; import org.h2.api.ErrorCode; @@ -30,7 +33,6 @@ import org.h2.tools.SimpleResultSet; import org.h2.util.IOUtils; import org.h2.util.JdbcUtils; -import org.h2.util.LocalDateTimeUtils; import org.h2.util.Utils; /** @@ -44,7 +46,7 @@ public class TestCallableStatement extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -66,18 +68,16 @@ public void test() throws Exception { } private void testOutParameter(Connection conn) throws SQLException { - conn.createStatement().execute( - "create table test(id identity) as select null"); + conn.createStatement().execute("CREATE SEQUENCE SEQ"); for (int i = 1; i < 20; i++) { - CallableStatement cs = conn.prepareCall("{ ? = call IDENTITY()}"); + CallableStatement cs = conn.prepareCall("{ ? = CALL NEXT VALUE FOR SEQ}"); cs.registerOutParameter(1, Types.BIGINT); cs.execute(); long id = cs.getLong(1); - assertEquals(1, id); + assertEquals(i, id); cs.close(); } - conn.createStatement().execute( - "drop table test"); + conn.createStatement().execute("DROP SEQUENCE SEQ"); } private void testUnsupportedOperations(Connection conn) throws SQLException { @@ -86,7 +86,7 @@ private void testUnsupportedOperations(Connection conn) throws SQLException { assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). getURL(1); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). - getObject(1, Collections.>emptyMap()); + getObject(1, Collections.emptyMap()); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). getRef(1); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). @@ -95,7 +95,7 @@ private void testUnsupportedOperations(Connection conn) throws SQLException { assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). getURL("a"); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). - getObject("a", Collections.>emptyMap()); + getObject("a", Collections.emptyMap()); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). getRef("a"); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). @@ -167,29 +167,20 @@ private void testGetters(Connection conn) throws SQLException { call.registerOutParameter(1, Types.DATE); call.execute(); assertEquals("2000-01-01", call.getDate(1).toString()); - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("2000-01-01", call.getObject(1, - LocalDateTimeUtils.LOCAL_DATE).toString()); - } + assertEquals("2000-01-01", call.getObject(1, LocalDate.class).toString()); call.setTime(2, java.sql.Time.valueOf("01:02:03")); call.registerOutParameter(1, Types.TIME); call.execute(); assertEquals("01:02:03", call.getTime(1).toString()); - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("01:02:03", call.getObject(1, - LocalDateTimeUtils.LOCAL_TIME).toString()); - } + assertEquals("01:02:03", call.getObject(1, LocalTime.class).toString()); call.setTimestamp(2, java.sql.Timestamp.valueOf( "2001-02-03 04:05:06.789")); call.registerOutParameter(1, Types.TIMESTAMP); call.execute(); assertEquals("2001-02-03 04:05:06.789", call.getTimestamp(1).toString()); - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("2001-02-03T04:05:06.789", call.getObject(1, - LocalDateTimeUtils.LOCAL_DATE_TIME).toString()); - } + assertEquals("2001-02-03T04:05:06.789", call.getObject(1, LocalDateTime.class).toString()); call.setBoolean(2, true); call.registerOutParameter(1, Types.BIT); @@ -247,9 +238,8 @@ private void testPrepare(Connection conn) throws Exception { assertEquals(1, rs.getInt(1)); assertEquals("Hello", rs.getString(2)); assertFalse(rs.next()); - stat.execute("CREATE ALIAS testCall FOR \"" + - getClass().getName() + ".testCall\""); - call = conn.prepareCall("{CALL testCall(?, ?, ?, ?)}"); + stat.execute("CREATE ALIAS testCall FOR '" + getClass().getName() + ".testCall'"); + call = conn.prepareCall("{SELECT * FROM testCall(?, ?, ?, ?)}"); call.setInt("A", 50); call.setString("B", "abc"); long t = System.currentTimeMillis(); @@ -258,12 +248,7 @@ private void testPrepare(Connection conn) throws Exception { call.registerOutParameter(1, Types.INTEGER); call.registerOutParameter("B", Types.VARCHAR); call.executeUpdate(); - try { - call.getTimestamp("C"); - fail("not registered out parameter accessible"); - } catch (SQLException e) { - // expected exception - } + assertThrows(ErrorCode.INVALID_VALUE_2, call).getTimestamp("C"); call.registerOutParameter(3, Types.TIMESTAMP); call.registerOutParameter(4, Types.TIMESTAMP); call.executeUpdate(); @@ -273,28 +258,16 @@ private void testPrepare(Connection conn) throws Exception { assertEquals("2001-02-03 10:20:30.0", call.getTimestamp(4).toString()); assertEquals("2001-02-03 10:20:30.0", call.getTimestamp("D").toString()); - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("2001-02-03T10:20:30", call.getObject(4, - LocalDateTimeUtils.LOCAL_DATE_TIME).toString()); - assertEquals("2001-02-03T10:20:30", call.getObject("D", - LocalDateTimeUtils.LOCAL_DATE_TIME).toString()); - } + assertEquals("2001-02-03T10:20:30", call.getObject(4, LocalDateTime.class).toString()); + assertEquals("2001-02-03T10:20:30", call.getObject("D", LocalDateTime.class).toString()); assertEquals("10:20:30", call.getTime(4).toString()); assertEquals("10:20:30", call.getTime("D").toString()); - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("10:20:30", call.getObject(4, - LocalDateTimeUtils.LOCAL_TIME).toString()); - assertEquals("10:20:30", call.getObject("D", - LocalDateTimeUtils.LOCAL_TIME).toString()); - } + assertEquals("10:20:30", call.getObject(4, LocalTime.class).toString()); + assertEquals("10:20:30", call.getObject("D", LocalTime.class).toString()); assertEquals("2001-02-03", call.getDate(4).toString()); assertEquals("2001-02-03", call.getDate("D").toString()); - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("2001-02-03", call.getObject(4, - LocalDateTimeUtils.LOCAL_DATE).toString()); - assertEquals("2001-02-03", call.getObject("D", - LocalDateTimeUtils.LOCAL_DATE).toString()); - } + assertEquals("2001-02-03", call.getObject(4, LocalDate.class).toString()); + assertEquals("2001-02-03", call.getObject("D", LocalDate.class).toString()); assertEquals(100, call.getInt(1)); assertEquals(100, call.getInt("A")); @@ -328,24 +301,9 @@ private void testPrepare(Connection conn) throws Exception { assertEquals("ABC", call.getSQLXML(2).getString()); assertEquals("ABC", call.getSQLXML("B").getString()); - try { - call.getString(100); - fail("incorrect parameter index value"); - } catch (SQLException e) { - // expected exception - } - try { - call.getString(0); - fail("incorrect parameter index value"); - } catch (SQLException e) { - // expected exception - } - try { - call.getBoolean("X"); - fail("incorrect parameter name value"); - } catch (SQLException e) { - // expected exception - } + assertThrows(ErrorCode.INVALID_VALUE_2, call).getString(100); + assertThrows(ErrorCode.INVALID_VALUE_2, call).getString(0); + assertThrows(ErrorCode.INVALID_VALUE_2, call).getBoolean("X"); call.setCharacterStream("B", new StringReader("xyz")); @@ -413,7 +371,7 @@ private void testClassLoader(Connection conn) throws SQLException { JdbcUtils.addClassFactory(myFactory); try { Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS T_CLASSLOADER FOR \"TestClassFactory.testClassF\""); + stat.execute("CREATE ALIAS T_CLASSLOADER FOR 'TestClassFactory.testClassF'"); ResultSet rs = stat.executeQuery("SELECT T_CLASSLOADER(true)"); assertTrue(rs.next()); assertEquals(false, rs.getBoolean(1)); @@ -425,8 +383,7 @@ private void testClassLoader(Connection conn) throws SQLException { private void testArrayArgument(Connection connection) throws SQLException { Array array = connection.createArrayOf("Int", new Object[] {0, 1, 2}); try (Statement statement = connection.createStatement()) { - statement.execute("CREATE ALIAS getArrayLength FOR \"" + - getClass().getName() + ".getArrayLength\""); + statement.execute("CREATE ALIAS getArrayLength FOR '" + getClass().getName() + ".getArrayLength'"); // test setArray try (CallableStatement callableStatement = connection @@ -459,18 +416,16 @@ private void testArrayArgument(Connection connection) throws SQLException { } private void testArrayReturnValue(Connection connection) throws SQLException { - Object[][] arraysToTest = new Object[][] { - new Object[] {0, 1, 2}, - new Object[] {0, "1", 2}, - new Object[] {0, null, 2}, - new Object[] {0, new Object[] {"s", 1}, new Object[] {null, 1L}}, + Integer[][] arraysToTest = new Integer[][] { + {0, 1, 2}, + {0, 1, 2}, + {0, null, 2}, }; try (Statement statement = connection.createStatement()) { - statement.execute("CREATE ALIAS arrayIdentiy FOR \"" + - getClass().getName() + ".arrayIdentiy\""); + statement.execute("CREATE ALIAS arrayIdentiy FOR '" + getClass().getName() + ".arrayIdentiy'"); - for (Object[] arrayToTest : arraysToTest) { - Array sqlInputArray = connection.createArrayOf("ignored", arrayToTest); + for (Integer[] arrayToTest : arraysToTest) { + Array sqlInputArray = connection.createArrayOf("INTEGER", arrayToTest); try { try (CallableStatement callableStatement = connection .prepareCall("{call arrayIdentiy(?)}")) { @@ -526,7 +481,7 @@ public static Boolean testClassF(Boolean b) { * @param array the array * @return the length of the array */ - public static int getArrayLength(Object[] array) { + public static int getArrayLength(Integer[] array) { return array == null ? 0 : array.length; } @@ -536,7 +491,7 @@ public static int getArrayLength(Object[] array) { * @param array the array * @return the array */ - public static Object[] arrayIdentiy(Object[] array) { + public static Integer[] arrayIdentiy(Integer[] array) { return array; } diff --git a/h2/src/test/org/h2/test/jdbc/TestCancel.java b/h2/src/test/org/h2/test/jdbc/TestCancel.java index ffc46ba687..271fd71166 100644 --- a/h2/src/test/org/h2/test/jdbc/TestCancel.java +++ b/h2/src/test/org/h2/test/jdbc/TestCancel.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -29,7 +29,7 @@ public class TestCancel extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } /** @@ -117,8 +117,8 @@ private void testJdbcQueryTimeout() throws SQLException { assertEquals(1, stat.getQueryTimeout()); Statement s2 = conn.createStatement(); assertEquals(1, s2.getQueryTimeout()); - ResultSet rs = s2.executeQuery("SELECT VALUE " + - "FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME = 'QUERY_TIMEOUT'"); + ResultSet rs = s2.executeQuery( + "SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'QUERY_TIMEOUT'"); rs.next(); assertEquals(1000, rs.getInt(1)); assertThrows(ErrorCode.STATEMENT_WAS_CANCELED, stat). @@ -164,11 +164,14 @@ public static int visit(int x) { } private void testCancelStatement() throws Exception { + if (config.lazy && config.networked) { + return; + } deleteDb("cancel"); Connection conn = getConnection("cancel"); Statement stat = conn.createStatement(); stat.execute("DROP TABLE IF EXISTS TEST"); - stat.execute("CREATE ALIAS VISIT FOR \"" + getClass().getName() + ".visit\""); + stat.execute("CREATE ALIAS VISIT FOR '" + getClass().getName() + ".visit'"); stat.execute("CREATE MEMORY TABLE TEST" + "(ID INT PRIMARY KEY, NAME VARCHAR(255))"); PreparedStatement prep = conn.prepareStatement( diff --git a/h2/src/test/org/h2/test/jdbc/TestConcurrentConnectionUsage.java b/h2/src/test/org/h2/test/jdbc/TestConcurrentConnectionUsage.java index 04f4f067b8..e2f15bb8f1 100644 --- a/h2/src/test/org/h2/test/jdbc/TestConcurrentConnectionUsage.java +++ b/h2/src/test/org/h2/test/jdbc/TestConcurrentConnectionUsage.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -25,7 +25,7 @@ public class TestConcurrentConnectionUsage extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/jdbc/TestConnection.java b/h2/src/test/org/h2/test/jdbc/TestConnection.java index 8748ec9b1c..14206376ea 100644 --- a/h2/src/test/org/h2/test/jdbc/TestConnection.java +++ b/h2/src/test/org/h2/test/jdbc/TestConnection.java @@ -1,21 +1,25 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; -import org.h2.api.ErrorCode; -import org.h2.test.TestBase; -import org.h2.test.TestDb; - import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; import java.sql.SQLClientInfoException; import java.sql.SQLException; import java.sql.Statement; import java.util.Properties; +import java.util.TimeZone; - +import org.h2.api.ErrorCode; +import org.h2.engine.Constants; +import org.h2.engine.SysProperties; +import org.h2.test.TestBase; +import org.h2.test.TestDb; +import org.h2.util.DateTimeUtils; /** * Tests the client info @@ -28,7 +32,7 @@ public class TestConnection extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -40,7 +44,14 @@ public void test() throws Exception { testSetUnsupportedClientInfoProperties(); testSetInternalProperty(); testSetInternalPropertyToInitialValue(); + testTransactionIsolationSetAndGet(); testSetGetSchema(); + testCommitOnAutoCommitSetRunner(); + testRollbackOnAutoCommitSetRunner(); + testChangeTransactionLevelCommitRunner(); + testLockTimeout(); + testIgnoreUnknownSettings(); + testTimeZone(); } private void testSetInternalProperty() throws SQLException { @@ -113,27 +124,279 @@ private void testGetUnsupportedClientInfo() throws SQLException { conn.close(); } - private void testSetGetSchema() throws SQLException { - if (config.networked) { - return; + private void testTransactionIsolationSetAndGet() throws Exception { + deleteDb("transactionIsolation"); + try (Connection conn = getConnection("transactionIsolation")) { + assertEquals(Connection.TRANSACTION_READ_COMMITTED, conn.getTransactionIsolation()); + conn.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED); + assertEquals(Connection.TRANSACTION_READ_UNCOMMITTED, conn.getTransactionIsolation()); + conn.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ); + assertEquals(Connection.TRANSACTION_REPEATABLE_READ, + conn.getTransactionIsolation()); + conn.setTransactionIsolation(Constants.TRANSACTION_SNAPSHOT); + assertEquals(Constants.TRANSACTION_SNAPSHOT, + conn.getTransactionIsolation()); + conn.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); + assertEquals(Connection.TRANSACTION_SERIALIZABLE, conn.getTransactionIsolation()); + } finally { + deleteDb("transactionIsolation"); + } + } + + private void testCommitOnAutoCommitSetRunner() throws Exception { + assertFalse("Default value must be false", SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT); + testCommitOnAutoCommitSet(false); + try { + SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT = true; + testCommitOnAutoCommitSet(true); + } finally { + SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT = false; + } + + } + + private void testCommitOnAutoCommitSet(boolean expectedPropertyEnabled) throws Exception { + assertEquals(SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT, expectedPropertyEnabled); + Connection conn = getConnection("clientInfo"); + conn.setAutoCommit(false); + Statement stat = conn.createStatement(); + stat.execute("DROP TABLE IF EXISTS TEST"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR)"); + PreparedStatement prep = conn.prepareStatement( + "INSERT INTO TEST VALUES(?, ?)"); + int index = 1; + prep.setInt(index++, 1); + prep.setString(index++, "test1"); + prep.execute(); + conn.commit(); + // no error expected + + conn.setAutoCommit(true); + index = 1; + prep.setInt(index++, 2); + prep.setString(index++, "test2"); + if (expectedPropertyEnabled) { + prep.execute(); + try { + conn.commit(); + throw new AssertionError("SQLException expected"); + } catch (SQLException e) { + assertTrue(e.getMessage().contains("commit()")); + assertEquals(ErrorCode.METHOD_DISABLED_ON_AUTOCOMMIT_TRUE, e.getErrorCode()); + } + ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM TEST"); + rs.next(); + assertTrue(rs.getInt(1) == 2); + rs.close(); + } else { + prep.execute(); + conn.commit(); + ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM TEST"); + rs.next(); + assertTrue(rs.getInt(1) == 2); + rs.close(); + } + + conn.close(); + prep.close(); + } + + private void testChangeTransactionLevelCommitRunner() throws Exception { + assertFalse("Default value must be false", SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT); + testChangeTransactionLevelCommit(false); + testChangeTransactionLevelCommit(true); + try { + SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT = true; + testChangeTransactionLevelCommit(true); + testChangeTransactionLevelCommit(false); + } finally { + SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT = false; } + } + + private void testChangeTransactionLevelCommit(boolean setAutoCommit) throws Exception { + Connection conn = getConnection("clientInfo"); + conn.setAutoCommit(setAutoCommit); + Statement stat = conn.createStatement(); + stat.execute("DROP TABLE IF EXISTS TEST"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR)"); + PreparedStatement prep = conn.prepareStatement( + "INSERT INTO TEST VALUES(?, ?)"); + int index = 1; + prep.setInt(index++, 1); + prep.setString(index++, "test1"); + prep.execute(); + conn.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED); + + conn.createStatement().executeQuery("SELECT COUNT(*) FROM TEST"); + // throws exception if TransactionIsolation did not commit + + conn.close(); + prep.close(); + } + + private void testRollbackOnAutoCommitSetRunner() throws Exception { + assertFalse("Default value must be false", SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT); + testRollbackOnAutoCommitSet(false); + try { + SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT = true; + testRollbackOnAutoCommitSet(true); + } finally { + SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT = false; + } + } + + private void testRollbackOnAutoCommitSet(boolean expectedPropertyEnabled) throws Exception { + assertEquals(SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT, expectedPropertyEnabled); + Connection conn = getConnection("clientInfo"); + conn.setAutoCommit(false); + Statement stat = conn.createStatement(); + stat.execute("DROP TABLE IF EXISTS TEST"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR)"); + PreparedStatement prep = conn.prepareStatement( + "INSERT INTO TEST VALUES(?, ?)"); + int index = 1; + prep.setInt(index++, 1); + prep.setString(index++, "test1"); + prep.execute(); + conn.rollback(); + // no error expected + + + conn.setAutoCommit(true); + index = 1; + prep.setInt(index++, 2); + prep.setString(index++, "test2"); + if (expectedPropertyEnabled) { + prep.execute(); + try { + conn.rollback(); + throw new AssertionError("SQLException expected"); + } catch (SQLException e) { + assertEquals(ErrorCode.METHOD_DISABLED_ON_AUTOCOMMIT_TRUE, e.getErrorCode()); + assertTrue(e.getMessage().contains("rollback()")); + } + ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM TEST"); + rs.next(); + int count = rs.getInt(1); + assertTrue("Found " +count + " rows", count == 1); + rs.close(); + } else { + prep.execute(); + // rollback is permitted, however has no effects in autocommit=true + conn.rollback(); + ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM TEST"); + rs.next(); + int count = rs.getInt(1); + assertTrue("Found " + count + " rows", count == 1); + rs.close(); + } + + conn.close(); + prep.close(); + } + + private void testSetGetSchema() throws SQLException { deleteDb("schemaSetGet"); Connection conn = getConnection("schemaSetGet"); Statement s = conn.createStatement(); s.executeUpdate("create schema my_test_schema"); - s.executeUpdate("create table my_test_schema.my_test_table(id uuid, nave varchar)"); + s.executeUpdate("create table my_test_schema.my_test_table(id int, nave varchar) as values (1, 'a')"); assertEquals("PUBLIC", conn.getSchema()); assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, s, "select * from my_test_table"); assertThrows(ErrorCode.SCHEMA_NOT_FOUND_1, conn).setSchema("my_test_table"); conn.setSchema("MY_TEST_SCHEMA"); assertEquals("MY_TEST_SCHEMA", conn.getSchema()); - s.executeQuery("select * from my_test_table"); + try (ResultSet rs = s.executeQuery("select * from my_test_table")) { + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertEquals("a", rs.getString(2)); + assertFalse(rs.next()); + } assertThrows(ErrorCode.SCHEMA_NOT_FOUND_1, conn).setSchema("NON_EXISTING_SCHEMA"); assertEquals("MY_TEST_SCHEMA", conn.getSchema()); s.executeUpdate("create schema \"otheR_schEma\""); + s.executeUpdate("create table \"otheR_schEma\".my_test_table(id int, nave varchar) as values (2, 'b')"); conn.setSchema("otheR_schEma"); assertEquals("otheR_schEma", conn.getSchema()); + try (ResultSet rs = s.executeQuery("select * from my_test_table")) { + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertEquals("b", rs.getString(2)); + assertFalse(rs.next()); + } + s.execute("SET SCHEMA \"MY_TEST_SCHEMA\""); + assertEquals("MY_TEST_SCHEMA", conn.getSchema()); s.close(); conn.close(); + deleteDb("schemaSetGet"); + } + + private void testLockTimeout() throws SQLException { + deleteDb("lockTimeout"); + try (Connection conn1 = getConnection("lockTimeout"); + Connection conn2 = getConnection("lockTimeout;LOCK_TIMEOUT=6000")) { + conn1.setAutoCommit(false); + conn2.setAutoCommit(false); + Statement s1 = conn1.createStatement(); + Statement s2 = conn2.createStatement(); + s1.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, V INT) AS VALUES (1, 2)"); + conn1.commit(); + s2.execute("INSERT INTO TEST VALUES (2, 4)"); + s1.execute("UPDATE TEST SET V = 3 WHERE ID = 1"); + s2.execute("SET LOCK_TIMEOUT 50"); + long n = System.nanoTime(); + assertThrows(ErrorCode.LOCK_TIMEOUT_1, s2).execute("UPDATE TEST SET V = 4 WHERE ID = 1"); + if (System.nanoTime() - n > 5_000_000_000L) { + fail("LOCK_TIMEOUT wasn't set"); + } + } finally { + deleteDb("lockTimeout"); + } + } + + private void testIgnoreUnknownSettings() throws SQLException { + deleteDb("ignoreUnknownSettings"); + assertThrows(ErrorCode.UNSUPPORTED_SETTING_1, () -> getConnection("ignoreUnknownSettings;A=1")); + try (Connection c = getConnection("ignoreUnknownSettings;IGNORE_UNKNOWN_SETTINGS=TRUE;A=1")) { + } finally { + deleteDb("ignoreUnknownSettings"); + } + } + + private void testTimeZone() throws SQLException { + deleteDb("timeZone"); + String tz1 = "Europe/London", tz2 = "Europe/Paris", tz3 = "Asia/Tokyo"; + try (Connection c = getConnection("timeZone")) { + TimeZone tz = TimeZone.getDefault(); + try { + TimeZone.setDefault(TimeZone.getTimeZone(tz1)); + DateTimeUtils.resetCalendar(); + try (Connection c1 = getConnection("timeZone")) { + TimeZone.setDefault(TimeZone.getTimeZone(tz2)); + DateTimeUtils.resetCalendar(); + try (Connection c2 = getConnection("timeZone"); + Connection c3 = getConnection("timeZone;TIME ZONE=" + tz3)) { + checkTimeZone(tz1, c1); + checkTimeZone(tz2, c2); + checkTimeZone(tz3, c3); + } + } + } finally { + TimeZone.setDefault(tz); + DateTimeUtils.resetCalendar(); + } + } finally { + deleteDb("timeZone"); + } } + + private void checkTimeZone(String expected, Connection conn) throws SQLException { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery( + "SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'TIME ZONE'"); + rs.next(); + assertEquals(expected, rs.getString(1)); + } + } diff --git a/h2/src/test/org/h2/test/jdbc/TestCustomDataTypesHandler.java b/h2/src/test/org/h2/test/jdbc/TestCustomDataTypesHandler.java deleted file mode 100644 index 1aebdcefeb..0000000000 --- a/h2/src/test/org/h2/test/jdbc/TestCustomDataTypesHandler.java +++ /dev/null @@ -1,595 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.jdbc; - -import java.io.Serializable; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.sql.Types; -import java.text.DecimalFormat; -import java.util.Locale; -import org.h2.api.CustomDataTypesHandler; -import org.h2.api.ErrorCode; -import org.h2.engine.Mode; -import org.h2.message.DbException; -import org.h2.store.DataHandler; -import org.h2.test.TestBase; -import org.h2.test.TestDb; -import org.h2.util.JdbcUtils; -import org.h2.util.StringUtils; -import org.h2.value.CompareMode; -import org.h2.value.DataType; -import org.h2.value.ExtTypeInfo; -import org.h2.value.TypeInfo; -import org.h2.value.Value; -import org.h2.value.ValueBytes; -import org.h2.value.ValueDouble; -import org.h2.value.ValueJavaObject; -import org.h2.value.ValueString; - -/** - * Tests {@link CustomDataTypesHandler}. - */ -public class TestCustomDataTypesHandler extends TestDb { - - /** - * The database name. - */ - public final static String DB_NAME = "customDataTypes"; - - /** - * The system property name. - */ - public final static String HANDLER_NAME_PROPERTY = "h2.customDataTypesHandler"; - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - System.setProperty(HANDLER_NAME_PROPERTY, TestOnlyCustomDataTypesHandler.class.getName()); - TestBase test = createCaller().init(); - test.config.traceTest = true; - test.config.memory = true; - test.config.networked = true; - test.config.beforeTest(); - test.test(); - test.config.afterTest(); - System.clearProperty(HANDLER_NAME_PROPERTY); - } - - @Override - public void test() throws Exception { - try { - JdbcUtils.customDataTypesHandler = new TestOnlyCustomDataTypesHandler(); - - deleteDb(DB_NAME); - Connection conn = getConnection(DB_NAME); - - Statement stat = conn.createStatement(); - - //Test cast - ResultSet rs = stat.executeQuery("select CAST('1-1i' AS complex) + '1+1i' "); - rs.next(); - assertTrue(rs.getObject(1).equals(new ComplexNumber(2, 0))); - - //Test create table - stat.execute("create table t(id int, val complex)"); - rs = conn.getMetaData().getColumns(null, null, "T", "VAL"); - rs.next(); - assertEquals(rs.getString("TYPE_NAME"), "complex"); - assertEquals(rs.getInt("DATA_TYPE"), Types.JAVA_OBJECT); - - rs = stat.executeQuery("select val from t"); - assertEquals(ComplexNumber.class.getName(), rs.getMetaData().getColumnClassName(1)); - - //Test insert - PreparedStatement stmt = conn.prepareStatement( - "insert into t(id, val) values (0, '1.0+1.0i'), (1, ?), (2, ?), (3, ?)"); - stmt.setObject(1, new ComplexNumber(1, -1)); - stmt.setObject(2, "5.0+2.0i"); - stmt.setObject(3, 100.1); - stmt.executeUpdate(); - - //Test selects - ComplexNumber[] expected = new ComplexNumber[4]; - expected[0] = new ComplexNumber(1, 1); - expected[1] = new ComplexNumber(1, -1); - expected[2] = new ComplexNumber(5, 2); - expected[3] = new ComplexNumber(100.1, 0); - - for (int id = 0; id < expected.length; ++id) { - PreparedStatement prepStat =conn.prepareStatement( - "select val from t where id = ?"); - prepStat.setInt(1, id); - rs = prepStat.executeQuery(); - assertTrue(rs.next()); - assertTrue(rs.getObject(1).equals(expected[id])); - } - - for (int id = 0; id < expected.length; ++id) { - PreparedStatement prepStat = conn.prepareStatement( - "select id from t where val = ?"); - prepStat.setObject(1, expected[id]); - rs = prepStat.executeQuery(); - assertTrue(rs.next()); - assertEquals(rs.getInt(1), id); - } - - // Repeat selects with index - stat.execute("create index val_idx on t(val)"); - - for (int id = 0; id < expected.length; ++id) { - PreparedStatement prepStat = conn.prepareStatement( - "select id from t where val = ?"); - prepStat.setObject(1, expected[id]); - rs = prepStat.executeQuery(); - assertTrue(rs.next()); - assertEquals(rs.getInt(1), id); - } - - // sum function - rs = stat.executeQuery("select sum(val) from t"); - rs.next(); - assertTrue(rs.getObject(1).equals(new ComplexNumber(107.1, 2))); - - // user function - stat.execute("create alias complex_mod for \"" - + getClass().getName() + ".complexMod\""); - rs = stat.executeQuery("select complex_mod(val) from t where id=2"); - rs.next(); - assertEquals(complexMod(expected[2]), rs.getDouble(1)); - - conn.close(); - deleteDb(DB_NAME); - } finally { - JdbcUtils.customDataTypesHandler = null; - } - } - - /** - * The modulus function. - * - * @param val complex number - * @return result - */ - public static double complexMod(ComplexNumber val) { - return val.mod(); - } - - /** - * The custom data types handler to use for this test. - */ - public static class TestOnlyCustomDataTypesHandler implements CustomDataTypesHandler { - - /** Type name for complex number */ - public final static String COMPLEX_DATA_TYPE_NAME = "complex"; - - /** Type id for complex number */ - public final static int COMPLEX_DATA_TYPE_ID = 1000; - - /** Order for complex number data type */ - public final static int COMPLEX_DATA_TYPE_ORDER = 100_000; - - /** Cached DataType instance for complex number */ - public final DataType complexDataType; - - /** */ - public TestOnlyCustomDataTypesHandler() { - complexDataType = createComplex(); - } - - @Override - public DataType getDataTypeByName(String name) { - if (name.toLowerCase(Locale.ENGLISH).equals(COMPLEX_DATA_TYPE_NAME)) { - return complexDataType; - } - return null; - } - - @Override - public DataType getDataTypeById(int type) { - if (type == COMPLEX_DATA_TYPE_ID) { - return complexDataType; - } - return null; - } - - @Override - public TypeInfo getTypeInfoById(int type, long precision, int scale, ExtTypeInfo extTypeInfo) { - return new TypeInfo(type, 0, 0, ValueDouble.DISPLAY_SIZE * 2 + 1, null); - } - - @Override - public String getDataTypeClassName(int type) { - if (type == COMPLEX_DATA_TYPE_ID) { - return ComplexNumber.class.getName(); - } - throw DbException.get( - ErrorCode.UNKNOWN_DATA_TYPE_1, "type:" + type); - } - - @Override - public int getTypeIdFromClass(Class cls) { - if (cls == ComplexNumber.class) { - return COMPLEX_DATA_TYPE_ID; - } - return Value.JAVA_OBJECT; - } - - @Override - public Value convert(Value source, int targetType) { - if (source.getValueType() == targetType) { - return source; - } - if (targetType == COMPLEX_DATA_TYPE_ID) { - switch (source.getValueType()) { - case Value.JAVA_OBJECT: { - assert source instanceof ValueJavaObject; - return ValueComplex.get((ComplexNumber) - JdbcUtils.deserialize(source.getBytesNoCopy(), null)); - } - case Value.STRING: { - assert source instanceof ValueString; - return ValueComplex.get( - ComplexNumber.parseComplexNumber(source.getString())); - } - case Value.BYTES: { - assert source instanceof ValueBytes; - return ValueComplex.get((ComplexNumber) - JdbcUtils.deserialize(source.getBytesNoCopy(), null)); - } - case Value.DOUBLE: { - assert source instanceof ValueDouble; - return ValueComplex.get(new ComplexNumber(source.getDouble(), 0)); - } - } - - throw DbException.get( - ErrorCode.DATA_CONVERSION_ERROR_1, source.getString()); - } else { - return source.convertTo(targetType); - } - } - - @Override - public int getDataTypeOrder(int type) { - if (type == COMPLEX_DATA_TYPE_ID) { - return COMPLEX_DATA_TYPE_ORDER; - } - throw DbException.get( - ErrorCode.UNKNOWN_DATA_TYPE_1, "type:" + type); - } - - @Override - public Value getValue(int type, Object data, DataHandler dataHandler) { - if (type == COMPLEX_DATA_TYPE_ID) { - assert data instanceof ComplexNumber; - return ValueComplex.get((ComplexNumber)data); - } - return ValueJavaObject.getNoCopy(data, null, dataHandler); - } - - @Override - public Object getObject(Value value, Class cls) { - if (cls.equals(ComplexNumber.class)) { - if (value.getValueType() == COMPLEX_DATA_TYPE_ID) { - return value.getObject(); - } - return convert(value, COMPLEX_DATA_TYPE_ID).getObject(); - } - throw DbException.get( - ErrorCode.UNKNOWN_DATA_TYPE_1, "type:" + value.getValueType()); - } - - @Override - public boolean supportsAdd(int type) { - if (type == COMPLEX_DATA_TYPE_ID) { - return true; - } - return false; - } - - @Override - public int getAddProofType(int type) { - if (type == COMPLEX_DATA_TYPE_ID) { - return type; - } - throw DbException.get( - ErrorCode.UNKNOWN_DATA_TYPE_1, "type:" + type); - } - - /** Constructs data type instance for complex number type */ - private static DataType createComplex() { - DataType result = new DataType(); - result.type = COMPLEX_DATA_TYPE_ID; - result.name = COMPLEX_DATA_TYPE_NAME; - result.sqlType = Types.JAVA_OBJECT; - return result; - } - } - - /** - * Value type implementation that holds the complex number - */ - public static class ValueComplex extends Value { - - private ComplexNumber val; - - /** - * @param val complex number - */ - public ValueComplex(ComplexNumber val) { - assert val != null; - this.val = val; - } - - /** - * Get ValueComplex instance for given ComplexNumber. - * - * @param val complex number - * @return resulting instance - */ - public static ValueComplex get(ComplexNumber val) { - return new ValueComplex(val); - } - - @Override - public StringBuilder getSQL(StringBuilder builder) { - return builder.append(val.toString()); - } - - @Override - public TypeInfo getType() { - return TypeInfo.getTypeInfo(TestOnlyCustomDataTypesHandler.COMPLEX_DATA_TYPE_ID); - } - - @Override - public int getValueType() { - return TestOnlyCustomDataTypesHandler.COMPLEX_DATA_TYPE_ID; - } - - @Override - public String getString() { - return val.toString(); - } - - @Override - public Object getObject() { - return val; - } - - @Override - public void set(PreparedStatement prep, int parameterIndex) throws SQLException { - Object obj = JdbcUtils.deserialize(getBytesNoCopy(), getDataHandler()); - prep.setObject(parameterIndex, obj, Types.JAVA_OBJECT); - } - - @Override - public int compareTypeSafe(Value v, CompareMode mode) { - return val.compare((ComplexNumber) v.getObject()); - } - - @Override - public int hashCode() { - return val.hashCode(); - } - - @Override - public boolean equals(Object other) { - if (other == null) { - return false; - } - if (!(other instanceof ValueComplex)) { - return false; - } - ValueComplex complex = (ValueComplex)other; - return complex.val.equals(val); - } - - @Override - protected Value convertTo(int targetType, Mode mode, Object column, ExtTypeInfo extTypeInfo) { - if (getValueType() == targetType) { - return this; - } - switch (targetType) { - case Value.BYTES: { - return ValueBytes.getNoCopy(JdbcUtils.serialize(val, null)); - } - case Value.STRING: { - return ValueString.get(val.toString()); - } - case Value.DOUBLE: { - assert val.im == 0; - return ValueDouble.get(val.re); - } - case Value.JAVA_OBJECT: { - return ValueJavaObject.getNoCopy(JdbcUtils.serialize(val, null)); - } - } - - throw DbException.get( - ErrorCode.DATA_CONVERSION_ERROR_1, getString()); - } - - @Override - public Value add(Value value) { - ValueComplex v = (ValueComplex)value; - return ValueComplex.get(val.add(v.val)); - } - } - - /** - * Complex number - */ - public static class ComplexNumber implements Serializable { - /** */ - private static final long serialVersionUID = 1L; - - /** */ - public final static DecimalFormat REAL_FMT = new DecimalFormat("###.###"); - - /** */ - public final static DecimalFormat IMG_FMT = new DecimalFormat("+###.###i;-###.###i"); - - /** - * Real part - */ - double re; - - /** - * Imaginary part - */ - double im; - - /** - * @param re real part - * @param im imaginary part - */ - public ComplexNumber(double re, double im) { - this.re = re; - this.im = im; - } - - /** - * Addition - * @param other value to add - * @return result - */ - public ComplexNumber add(ComplexNumber other) { - return new ComplexNumber(re + other.re, im + other.im); - } - - /** - * Returns modulus - * @return result - */ - public double mod() { - return Math.sqrt(re * re + im * im); - } - - /** - * Compares two complex numbers - * - * True ordering of complex number has no sense, - * so we apply lexicographical order. - * - * @param v number to compare this with - * @return result of comparison - */ - public int compare(ComplexNumber v) { - if (re == v.re && im == v.im) { - return 0; - } - if (re == v.re) { - return im > v.im ? 1 : -1; - } else if (re > v.re) { - return 1; - } else { - return -1; - } - } - - @Override - public int hashCode() { - return (int)re | (int)im; - } - - @Override - public boolean equals(Object other) { - if (other == null) { - return false; - } - if (!(other instanceof ComplexNumber)) { - return false; - } - ComplexNumber complex = (ComplexNumber)other; - return (re==complex.re) && (im == complex.im); - } - - @Override - public String toString() { - if (im == 0.0) { - return REAL_FMT.format(re); - } - if (re == 0.0) { - return IMG_FMT.format(im); - } - return REAL_FMT.format(re) + "" + IMG_FMT.format(im); - } - - /** - * Simple parser for complex numbers. Both real and im components - * must be written in non scientific notation. - * @param s String. - * @return {@link ComplexNumber} object. - */ - public static ComplexNumber parseComplexNumber(String s) { - if (StringUtils.isNullOrEmpty(s)) - return null; - - s = s.replaceAll("\\s", ""); - - boolean hasIm = (s.charAt(s.length() - 1) == 'i'); - int signs = 0; - - int pos = 0; - - int maxSignPos = -1; - - while (pos != -1) { - pos = s.indexOf('-', pos); - if (pos != -1) { - signs++; - maxSignPos = Math.max(maxSignPos, pos++); - } - } - pos = 0; - - while (pos != -1) { - pos = s.indexOf('+', pos); - if (pos != -1) { - signs++; - maxSignPos = Math.max(maxSignPos, pos++); - } - } - - if (signs > 2 || (signs == 2 && !hasIm)) - throw new NumberFormatException(); - double real; - double im; - - if (signs == 0 || (signs == 1 && maxSignPos == 0)) { - if (hasIm) { - real = 0; - if (signs == 0 && s.length() == 1) { - im = 1.0; - } else if (signs > 0 && s.length() == 2) { - im = (s.charAt(0) == '-') ? -1.0 : 1.0; - } else { - im = Double.parseDouble(s.substring(0, s.length() - 1)); - } - } else { - real = Double.parseDouble(s); - im = 0; - } - } else { - real = Double.parseDouble(s.substring(0, maxSignPos)); - if (s.length() - maxSignPos == 2) { - im = (s.charAt(maxSignPos) == '-') ? -1.0 : 1.0; - } else { - im = Double.parseDouble(s.substring(maxSignPos, s.length() - 1)); - } - } - - return new ComplexNumber(real, im); - } - } -} diff --git a/h2/src/test/org/h2/test/jdbc/TestDatabaseEventListener.java b/h2/src/test/org/h2/test/jdbc/TestDatabaseEventListener.java index def81a1bd2..072fe14280 100644 --- a/h2/src/test/org/h2/test/jdbc/TestDatabaseEventListener.java +++ b/h2/src/test/org/h2/test/jdbc/TestDatabaseEventListener.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -13,7 +13,6 @@ import org.h2.Driver; import org.h2.api.DatabaseEventListener; -import org.h2.api.ErrorCode; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -35,7 +34,7 @@ public class TestDatabaseEventListener extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -79,21 +78,6 @@ public void opened() { } } - @Override - public void closingDatabase() { - // nothing to do - } - - @Override - public void exceptionThrown(SQLException e, String sql) { - // nothing to do - } - - @Override - public void setProgress(int state, String name, int x, int max) { - // nothing to do - } - } private void testInit() throws SQLException { @@ -119,31 +103,28 @@ private void testIndexRebuiltOnce() throws SQLException { Properties p = new Properties(); p.setProperty("user", user); p.setProperty("password", password); - Connection conn; Statement stat; - conn = DriverManager.getConnection(url, p); - stat = conn.createStatement(); - // the old.id index head is at position 0 - stat.execute("create table old(id identity) as select 1"); - // the test.id index head is at position 1 - stat.execute("create table test(id identity) as select 1"); - conn.close(); - conn = DriverManager.getConnection(url, p); - stat = conn.createStatement(); - // free up space at position 0 - stat.execute("drop table old"); - stat.execute("insert into test values(2)"); - stat.execute("checkpoint sync"); - stat.execute("shutdown immediately"); - assertThrows(ErrorCode.DATABASE_IS_CLOSED, conn).close(); + try (Connection conn = DriverManager.getConnection(url, p)) { + stat = conn.createStatement(); + // the old.id index head is at position 0 + stat.execute("create table old(id identity) as select 1"); + // the test.id index head is at position 1 + stat.execute("create table test(id identity) as select 1"); + } + try (Connection conn = DriverManager.getConnection(url, p)) { + stat = conn.createStatement(); + // free up space at position 0 + stat.execute("drop table old"); + stat.execute("insert into test values(2)"); + stat.execute("checkpoint sync"); + stat.execute("shutdown immediately"); + } // now the index should be re-built - conn = DriverManager.getConnection(url, p); - conn.close(); + try (Connection conn = DriverManager.getConnection(url, p)) {/**/} calledCreateIndex = false; p.put("DATABASE_EVENT_LISTENER", MyDatabaseEventListener.class.getName()); - conn = org.h2.Driver.load().connect(url, p); - conn.close(); + try (Connection conn = org.h2.Driver.load().connect(url, p)) {/**/} assertFalse(calledCreateIndex); } @@ -248,31 +229,20 @@ private void testCalledForStatement() throws SQLException { /** * The database event listener for this test. */ - public static final class MyDatabaseEventListener implements - DatabaseEventListener { + public static final class MyDatabaseEventListener implements DatabaseEventListener { @Override public void closingDatabase() { calledClosingDatabase = true; } - @Override - public void exceptionThrown(SQLException e, String sql) { - // nothing to do - } - - @Override - public void init(String url) { - // nothing to do - } - @Override public void opened() { calledOpened = true; } @Override - public void setProgress(int state, String name, int x, int max) { + public void setProgress(int state, String name, long x, long max) { if (state == DatabaseEventListener.STATE_SCAN_FILE) { calledScan = true; } diff --git a/h2/src/test/org/h2/test/jdbc/TestDriver.java b/h2/src/test/org/h2/test/jdbc/TestDriver.java index f63e4eac31..64a7eb0fa9 100644 --- a/h2/src/test/org/h2/test/jdbc/TestDriver.java +++ b/h2/src/test/org/h2/test/jdbc/TestDriver.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -12,6 +12,7 @@ import java.util.Properties; import org.h2.Driver; +import org.h2.api.ErrorCode; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -26,13 +27,14 @@ public class TestDriver extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { testSettingsAsProperties(); testDriverObject(); + testURLs(); } private void testSettingsAsProperties() throws Exception { @@ -45,9 +47,9 @@ private void testSettingsAsProperties() throws Exception { Connection conn = DriverManager.getConnection(url, prop); ResultSet rs; rs = conn.createStatement().executeQuery( - "select * from information_schema.settings where name='MAX_COMPACT_TIME'"); + "SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'MAX_COMPACT_TIME'"); rs.next(); - assertEquals(1234, rs.getInt(2)); + assertEquals(1234, rs.getInt(1)); conn.close(); } @@ -55,14 +57,16 @@ private void testDriverObject() throws Exception { Driver instance = Driver.load(); assertTrue(DriverManager.getDriver("jdbc:h2:~/test") == instance); Driver.unload(); - try { - java.sql.Driver d = DriverManager.getDriver("jdbc:h2:~/test"); - fail(d.toString()); - } catch (SQLException e) { - // ignore - } + assertThrows(SQLException.class, () -> DriverManager.getDriver("jdbc:h2:~/test")); Driver.load(); assertTrue(DriverManager.getDriver("jdbc:h2:~/test") == instance); } + private void testURLs() throws Exception { + java.sql.Driver instance = Driver.load(); + assertThrows(ErrorCode.URL_FORMAT_ERROR_2, instance).acceptsURL(null); + assertThrows(ErrorCode.URL_FORMAT_ERROR_2, instance).connect(null, null); + assertNull(instance.connect("jdbc:unknown", null)); + } + } diff --git a/h2/src/test/org/h2/test/jdbc/TestGetGeneratedKeys.java b/h2/src/test/org/h2/test/jdbc/TestGetGeneratedKeys.java index 12896574bb..ebc356548c 100644 --- a/h2/src/test/org/h2/test/jdbc/TestGetGeneratedKeys.java +++ b/h2/src/test/org/h2/test/jdbc/TestGetGeneratedKeys.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -13,9 +13,7 @@ import java.sql.Statement; import java.util.UUID; -import org.h2.api.Trigger; -import org.h2.jdbc.JdbcPreparedStatement; -import org.h2.jdbc.JdbcStatement; +import org.h2.api.ErrorCode; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -24,30 +22,6 @@ */ public class TestGetGeneratedKeys extends TestDb { - public static class TestGetGeneratedKeysTrigger implements Trigger { - - @Override - public void close() throws SQLException { - } - - @Override - public void fire(Connection conn, Object[] oldRow, Object[] newRow) throws SQLException { - if (newRow[0] == null) { - newRow[0] = UUID.randomUUID(); - } - } - - @Override - public void init(Connection conn, String schemaName, String triggerName, String tableName, boolean before, - int type) throws SQLException { - } - - @Override - public void remove() throws SQLException { - } - - } - /** * Run just this test. * @@ -55,7 +29,7 @@ public void remove() throws SQLException { * ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -63,11 +37,14 @@ public void test() throws Exception { deleteDb("getGeneratedKeys"); Connection conn = getConnection("getGeneratedKeys"); testBatchAndMergeInto(conn); - testCalledSequences(conn); + testPrimaryKey(conn); testInsertWithSelect(conn); + testUpdate(conn); testMergeUsing(conn); + testWrongStatement(conn); testMultithreaded(conn); testNameCase(conn); + testColumnNotFound(conn); testPrepareStatement_Execute(conn); testPrepareStatement_ExecuteBatch(conn); @@ -103,7 +80,6 @@ public void test() throws Exception { testStatementExecuteUpdate_intArray(conn); testStatementExecuteUpdate_StringArray(conn); - testTrigger(conn); conn.close(); deleteDb("getGeneratedKeys"); } @@ -118,8 +94,8 @@ public void test() throws Exception { */ private void testBatchAndMergeInto(Connection conn) throws Exception { Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE TEST(ID BIGINT AUTO_INCREMENT, UID UUID DEFAULT RANDOM_UUID(), VALUE INT)"); - PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (?), (?)", + stat.execute("CREATE TABLE TEST(ID BIGINT AUTO_INCREMENT, UID UUID DEFAULT RANDOM_UUID(), V INT)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (?), (?)", Statement.RETURN_GENERATED_KEYS); prep.setInt(1, 1); prep.setInt(2, 2); @@ -152,103 +128,49 @@ private void testBatchAndMergeInto(Connection conn) throws Exception { assertFalse(u1.equals(u2)); assertFalse(u2.equals(u3)); assertFalse(u3.equals(u4)); - prep = conn.prepareStatement("MERGE INTO TEST(ID, VALUE) KEY(ID) VALUES (?, ?)", + prep = conn.prepareStatement("MERGE INTO TEST(ID, V) KEY(ID) VALUES (?, ?)", Statement.RETURN_GENERATED_KEYS); prep.setInt(1, 2); prep.setInt(2, 10); prep.execute(); rs = prep.getGeneratedKeys(); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertEquals(u2, rs.getObject(2)); assertFalse(rs.next()); prep.setInt(1, 5); prep.executeUpdate(); rs = prep.getGeneratedKeys(); rs.next(); - assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(Long.class, rs.getObject(1).getClass()); + assertEquals(UUID.class, rs.getObject(2).getClass()); assertFalse(rs.next()); stat.execute("DROP TABLE TEST"); } /** - * Test for keys generated by sequences. + * Test for PRIMARY KEY columns. * * @param conn * connection * @throws Exception * on exception */ - private void testCalledSequences(Connection conn) throws Exception { + private void testPrimaryKey(Connection conn) throws Exception { Statement stat = conn.createStatement(); - - stat.execute("CREATE SEQUENCE SEQ"); - stat.execute("CREATE TABLE TEST(ID INT)"); - PreparedStatement prep; - - int expected = 1; - expected = testCalledSequencesImpl(conn, "INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)", expected); - expected = testCalledSequencesImpl(conn, "MERGE INTO TEST KEY(ID) VALUES(NEXT VALUE FOR SEQ)", expected); - expected = testCalledSequencesImpl(conn, "INSERT INTO TEST VALUES(NEXTVAL('SEQ'))", expected); - testCalledSequencesImpl(conn, "MERGE INTO TEST KEY(ID) VALUES(NEXTVAL('SEQ'))", expected); - - stat.execute("DROP TABLE TEST"); - stat.execute("DROP SEQUENCE SEQ"); - - ResultSet rs; - stat.execute("CREATE TABLE TEST(ID BIGINT)"); - stat.execute("CREATE SEQUENCE SEQ"); - prep = conn.prepareStatement("INSERT INTO TEST VALUES (30), (NEXT VALUE FOR SEQ)," - + " (NEXTVAL('SEQ')), (NEXT VALUE FOR SEQ), (20)", Statement.RETURN_GENERATED_KEYS); + stat.execute("CREATE TABLE TEST(ID BIGINT PRIMARY KEY, V INT)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(ID, V) VALUES (?, ?)", + Statement.RETURN_GENERATED_KEYS); + prep.setLong(1, 10); + prep.setInt(2, 100); prep.executeUpdate(); - rs = prep.getGeneratedKeys(); - rs.next(); - assertEquals(1L, rs.getLong(1)); - rs.next(); - assertEquals(2L, rs.getLong(1)); - rs.next(); - assertEquals(3L, rs.getLong(1)); - assertFalse(rs.next()); - stat.execute("DROP TABLE TEST"); - stat.execute("DROP SEQUENCE SEQ"); - } - - private int testCalledSequencesImpl(Connection conn, String sql, int expected) throws SQLException { - PreparedStatement prep; - prep = conn.prepareStatement(sql, Statement.RETURN_GENERATED_KEYS); - prep.execute(); ResultSet rs = prep.getGeneratedKeys(); rs.next(); - assertEquals(expected++, rs.getInt(1)); - assertFalse(rs.next()); - - prep = conn.prepareStatement(sql, Statement.RETURN_GENERATED_KEYS); - prep.execute(); - rs = prep.getGeneratedKeys(); - rs.next(); - assertEquals(expected++, rs.getInt(1)); - assertFalse(rs.next()); - - prep = conn.prepareStatement(sql, new int[] { 1 }); - prep.execute(); - rs = prep.getGeneratedKeys(); - rs.next(); - assertEquals(expected++, rs.getInt(1)); - assertFalse(rs.next()); - - prep = conn.prepareStatement(sql, new String[] { "ID" }); - prep.execute(); - rs = prep.getGeneratedKeys(); - rs.next(); - assertEquals(expected++, rs.getInt(1)); + assertEquals(10L, rs.getLong(1)); assertFalse(rs.next()); - - prep = conn.prepareStatement(sql, ResultSet.TYPE_FORWARD_ONLY, - ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); - prep.execute(); - rs = prep.getGeneratedKeys(); - rs.next(); - expected++; - assertFalse(rs.next()); - - return expected; + assertEquals(1, rs.getMetaData().getColumnCount()); + rs.close(); + stat.execute("DROP TABLE TEST"); } /** @@ -261,9 +183,9 @@ private int testCalledSequencesImpl(Connection conn, String sql, int expected) t */ private void testInsertWithSelect(Connection conn) throws Exception { Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT, VALUE INT NOT NULL)"); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT, V INT NOT NULL)"); - PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(VALUE) SELECT 10", + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) SELECT 10", Statement.RETURN_GENERATED_KEYS); prep.executeUpdate(); ResultSet rs = prep.getGeneratedKeys(); @@ -274,6 +196,30 @@ private void testInsertWithSelect(Connection conn) throws Exception { stat.execute("DROP TABLE TEST"); } + /** + * Test method for UPDATE operator. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testUpdate(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT, V INT NOT NULL)"); + stat.execute("INSERT INTO TEST(V) VALUES 10"); + PreparedStatement prep = conn.prepareStatement("UPDATE TEST SET V = ? WHERE V = ?", + Statement.RETURN_GENERATED_KEYS); + prep.setInt(1, 20); + prep.setInt(2, 10); + assertEquals(1, prep.executeUpdate()); + ResultSet rs = prep.getGeneratedKeys(); + assertTrue(rs.next()); + assertEquals(1, rs.getLong(1)); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + /** * Test method for MERGE USING operator. * @@ -285,17 +231,17 @@ private void testInsertWithSelect(Connection conn) throws Exception { private void testMergeUsing(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE SOURCE (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + " UID INT NOT NULL UNIQUE, VALUE INT NOT NULL)"); + + " UID INT NOT NULL UNIQUE, V INT NOT NULL)"); stat.execute("CREATE TABLE DESTINATION (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + " UID INT NOT NULL UNIQUE, VALUE INT NOT NULL)"); - PreparedStatement ps = conn.prepareStatement("INSERT INTO SOURCE(UID, VALUE) VALUES (?, ?)"); + + " UID INT NOT NULL UNIQUE, V INT NOT NULL)"); + PreparedStatement ps = conn.prepareStatement("INSERT INTO SOURCE(UID, V) VALUES (?, ?)"); for (int i = 1; i <= 100; i++) { ps.setInt(1, i); ps.setInt(2, i * 10 + 5); ps.executeUpdate(); } // Insert first half of a rows with different values - ps = conn.prepareStatement("INSERT INTO DESTINATION(UID, VALUE) VALUES (?, ?)"); + ps = conn.prepareStatement("INSERT INTO DESTINATION(UID, V) VALUES (?, ?)"); for (int i = 1; i <= 50; i++) { ps.setInt(1, i); ps.setInt(2, i * 10); @@ -304,21 +250,20 @@ private void testMergeUsing(Connection conn) throws Exception { // And merge second half into it, first half will be updated with a new values ps = conn.prepareStatement( "MERGE INTO DESTINATION USING SOURCE ON (DESTINATION.UID = SOURCE.UID)" - + " WHEN MATCHED THEN UPDATE SET VALUE = SOURCE.VALUE" - + " WHEN NOT MATCHED THEN INSERT (UID, VALUE) VALUES (SOURCE.UID, SOURCE.VALUE)", + + " WHEN MATCHED THEN UPDATE SET V = SOURCE.V" + + " WHEN NOT MATCHED THEN INSERT (UID, V) VALUES (SOURCE.UID, SOURCE.V)", Statement.RETURN_GENERATED_KEYS); // All rows should be either updated or inserted assertEquals(100, ps.executeUpdate()); ResultSet rs = ps.getGeneratedKeys(); - // Only 50 keys for inserted rows should be generated - for (int i = 1; i <= 50; i++) { + for (int i = 1; i <= 100; i++) { assertTrue(rs.next()); - assertEquals(i + 50, rs.getLong(1)); + assertEquals(i, rs.getLong(1)); } assertFalse(rs.next()); rs.close(); // Check merged data - rs = stat.executeQuery("SELECT ID, UID, VALUE FROM DESTINATION ORDER BY ID"); + rs = stat.executeQuery("SELECT ID, UID, V FROM DESTINATION ORDER BY ID"); for (int i = 1; i <= 100; i++) { assertTrue(rs.next()); assertEquals(i, rs.getLong(1)); @@ -330,6 +275,29 @@ private void testMergeUsing(Connection conn) throws Exception { stat.execute("DROP TABLE DESTINATION"); } + /** + * Test method for incompatible statements. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testWrongStatement(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT, V INT)"); + stat.execute("INSERT INTO TEST(V) VALUES 10, 20, 30"); + stat.execute("DELETE FROM TEST WHERE V = 10", Statement.RETURN_GENERATED_KEYS); + ResultSet rs = stat.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.execute("TRUNCATE TABLE TEST", Statement.RETURN_GENERATED_KEYS); + rs = stat.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + /** * Test method for shared connection between several statements in different * threads. @@ -341,7 +309,7 @@ private void testMergeUsing(Connection conn) throws Exception { */ private void testMultithreaded(final Connection conn) throws Exception { Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + "VALUE INT NOT NULL)"); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT, V INT NOT NULL)"); final int count = 4, iterations = 10_000; Thread[] threads = new Thread[count]; final long[] keys = new long[count * iterations]; @@ -351,7 +319,7 @@ private void testMultithreaded(final Connection conn) throws Exception { @Override public void run() { try { - PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (?)", + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (?)", Statement.RETURN_GENERATED_KEYS); for (int i = 0; i < iterations; i++) { int value = iterations * num + i; @@ -374,7 +342,7 @@ public void run() { for (int i = 0; i < count; i++) { threads[i].join(); } - ResultSet rs = stat.executeQuery("SELECT VALUE, ID FROM TEST ORDER BY VALUE"); + ResultSet rs = stat.executeQuery("SELECT V, ID FROM TEST ORDER BY V"); for (int i = 0; i < keys.length; i++) { assertTrue(rs.next()); assertEquals(i, rs.getInt(1)); @@ -396,9 +364,9 @@ public void run() { private void testNameCase(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "\"id\" UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); + + "\"id\" UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); // Test columns with only difference in case - PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)", + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new String[] { "id", "ID" }); prep.executeUpdate(); ResultSet rs = prep.getGeneratedKeys(); @@ -412,26 +380,43 @@ private void testNameCase(Connection conn) throws Exception { rs.close(); // Test lower case name of upper case column stat.execute("ALTER TABLE TEST DROP COLUMN \"id\""); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (20)", new String[] { "id" }); - prep.executeUpdate(); - rs = prep.getGeneratedKeys(); - assertEquals(1, rs.getMetaData().getColumnCount()); - assertEquals("ID", rs.getMetaData().getColumnName(1)); - assertTrue(rs.next()); - assertEquals(2L, rs.getLong(1)); - assertFalse(rs.next()); - rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new String[] { "id" }); + testNameCase1(prep, 2L, true); // Test upper case name of lower case column stat.execute("ALTER TABLE TEST ALTER COLUMN ID RENAME TO \"id\""); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (30)", new String[] { "ID" }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new String[] { "ID" }); + testNameCase1(prep, 3L, false); + stat.execute("DROP TABLE TEST"); + } + + private void testNameCase1(PreparedStatement prep, long id, boolean upper) throws SQLException { prep.executeUpdate(); - rs = prep.getGeneratedKeys(); + ResultSet rs = prep.getGeneratedKeys(); assertEquals(1, rs.getMetaData().getColumnCount()); - assertEquals("id", rs.getMetaData().getColumnName(1)); + assertEquals(upper ? "ID" : "id", rs.getMetaData().getColumnName(1)); assertTrue(rs.next()); - assertEquals(3L, rs.getLong(1)); + assertEquals(id, rs.getLong(1)); assertFalse(rs.next()); rs.close(); + } + + /** + * Test method for column not found exception. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testColumnNotFound(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT, V INT NOT NULL)"); + assertThrows(ErrorCode.COLUMN_NOT_FOUND_1, stat).execute("INSERT INTO TEST(V) VALUES (1)", // + new int[] { 0 }); + assertThrows(ErrorCode.COLUMN_NOT_FOUND_1, stat).execute("INSERT INTO TEST(V) VALUES (1)", // + new int[] { 3 }); + assertThrows(ErrorCode.COLUMN_NOT_FOUND_1, stat).execute("INSERT INTO TEST(V) VALUES (1)", // + new String[] { "X" }); stat.execute("DROP TABLE TEST"); } @@ -448,8 +433,8 @@ private void testNameCase(Connection conn) throws Exception { private void testPrepareStatement_Execute(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)"); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)"); prep.execute(); ResultSet rs = prep.getGeneratedKeys(); assertFalse(rs.next()); @@ -470,8 +455,8 @@ private void testPrepareStatement_Execute(Connection conn) throws Exception { private void testPrepareStatement_ExecuteBatch(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)"); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)"); prep.addBatch(); prep.addBatch(); prep.executeBatch(); @@ -494,9 +479,8 @@ private void testPrepareStatement_ExecuteBatch(Connection conn) throws Exception private void testPrepareStatement_ExecuteLargeBatch(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - JdbcPreparedStatement prep = (JdbcPreparedStatement) conn - .prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)"); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)"); prep.addBatch(); prep.addBatch(); prep.executeLargeBatch(); @@ -519,9 +503,8 @@ private void testPrepareStatement_ExecuteLargeBatch(Connection conn) throws Exce private void testPrepareStatement_ExecuteLargeUpdate(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - JdbcPreparedStatement prep = (JdbcPreparedStatement) conn - .prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)"); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)"); prep.executeLargeUpdate(); ResultSet rs = prep.getGeneratedKeys(); assertFalse(rs.next()); @@ -542,8 +525,8 @@ private void testPrepareStatement_ExecuteLargeUpdate(Connection conn) throws Exc private void testPrepareStatement_ExecuteUpdate(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)"); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)"); prep.executeUpdate(); ResultSet rs = prep.getGeneratedKeys(); assertFalse(rs.next()); @@ -564,14 +547,14 @@ private void testPrepareStatement_ExecuteUpdate(Connection conn) throws Exceptio private void testPrepareStatement_int_Execute(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL, OTHER INT DEFAULT 0)"); - PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)", + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL, OTHER INT DEFAULT 0)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", Statement.NO_GENERATED_KEYS); prep.execute(); ResultSet rs = prep.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (20)", Statement.RETURN_GENERATED_KEYS); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", Statement.RETURN_GENERATED_KEYS); prep.execute(); rs = prep.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); @@ -598,8 +581,8 @@ private void testPrepareStatement_int_Execute(Connection conn) throws Exception private void testPrepareStatement_int_ExecuteBatch(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL, OTHER INT DEFAULT 0)"); - PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)", + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL, OTHER INT DEFAULT 0)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", Statement.NO_GENERATED_KEYS); prep.addBatch(); prep.addBatch(); @@ -607,7 +590,7 @@ private void testPrepareStatement_int_ExecuteBatch(Connection conn) throws Excep ResultSet rs = prep.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (20)", Statement.RETURN_GENERATED_KEYS); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", Statement.RETURN_GENERATED_KEYS); prep.addBatch(); prep.addBatch(); prep.executeBatch(); @@ -644,17 +627,16 @@ private void testPrepareStatement_int_ExecuteBatch(Connection conn) throws Excep private void testPrepareStatement_int_ExecuteLargeBatch(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL, OTHER INT DEFAULT 0)"); - JdbcPreparedStatement prep = (JdbcPreparedStatement) conn - .prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)", Statement.NO_GENERATED_KEYS); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL, OTHER INT DEFAULT 0)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", + Statement.NO_GENERATED_KEYS); prep.addBatch(); prep.addBatch(); prep.executeLargeBatch(); ResultSet rs = prep.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - prep = (JdbcPreparedStatement) conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (20)", - Statement.RETURN_GENERATED_KEYS); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", Statement.RETURN_GENERATED_KEYS); prep.addBatch(); prep.addBatch(); prep.executeLargeBatch(); @@ -692,15 +674,14 @@ private void testPrepareStatement_int_ExecuteLargeBatch(Connection conn) throws private void testPrepareStatement_int_ExecuteLargeUpdate(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL, OTHER INT DEFAULT 0)"); - JdbcPreparedStatement prep = (JdbcPreparedStatement) conn - .prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)", Statement.NO_GENERATED_KEYS); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL, OTHER INT DEFAULT 0)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", + Statement.NO_GENERATED_KEYS); prep.executeLargeUpdate(); ResultSet rs = prep.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - prep = (JdbcPreparedStatement) conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (20)", - Statement.RETURN_GENERATED_KEYS); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", Statement.RETURN_GENERATED_KEYS); prep.executeLargeUpdate(); rs = prep.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); @@ -727,14 +708,14 @@ private void testPrepareStatement_int_ExecuteLargeUpdate(Connection conn) throws private void testPrepareStatement_int_ExecuteUpdate(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL, OTHER INT DEFAULT 0)"); - PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)", + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL, OTHER INT DEFAULT 0)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", Statement.NO_GENERATED_KEYS); prep.executeUpdate(); ResultSet rs = prep.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (20)", Statement.RETURN_GENERATED_KEYS); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", Statement.RETURN_GENERATED_KEYS); prep.executeUpdate(); rs = prep.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); @@ -761,13 +742,13 @@ private void testPrepareStatement_int_ExecuteUpdate(Connection conn) throws Exce private void testPrepareStatement_intArray_Execute(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)", new int[0]); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new int[0]); prep.execute(); ResultSet rs = prep.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (20)", new int[] { 1, 2 }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new int[] { 1, 2 }); prep.execute(); rs = prep.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); @@ -778,7 +759,7 @@ private void testPrepareStatement_intArray_Execute(Connection conn) throws Excep assertEquals(UUID.class, rs.getObject(2).getClass()); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (30)", new int[] { 2, 1 }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new int[] { 2, 1 }); prep.execute(); rs = prep.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); @@ -789,7 +770,7 @@ private void testPrepareStatement_intArray_Execute(Connection conn) throws Excep assertEquals(3L, rs.getLong(2)); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (40)", new int[] { 2 }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new int[] { 2 }); prep.execute(); rs = prep.getGeneratedKeys(); assertEquals(1, rs.getMetaData().getColumnCount()); @@ -814,15 +795,15 @@ private void testPrepareStatement_intArray_Execute(Connection conn) throws Excep private void testPrepareStatement_intArray_ExecuteBatch(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)", new int[0]); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new int[0]); prep.addBatch(); prep.addBatch(); prep.executeBatch(); ResultSet rs = prep.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (20)", new int[] { 1, 2 }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new int[] { 1, 2 }); prep.addBatch(); prep.addBatch(); prep.executeBatch(); @@ -838,7 +819,7 @@ private void testPrepareStatement_intArray_ExecuteBatch(Connection conn) throws assertEquals(UUID.class, rs.getObject(2).getClass()); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (30)", new int[] { 2, 1 }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new int[] { 2, 1 }); prep.addBatch(); prep.addBatch(); prep.executeBatch(); @@ -854,7 +835,7 @@ private void testPrepareStatement_intArray_ExecuteBatch(Connection conn) throws assertEquals(6L, rs.getLong(2)); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (40)", new int[] { 2 }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new int[] { 2 }); prep.addBatch(); prep.addBatch(); prep.executeBatch(); @@ -883,17 +864,15 @@ private void testPrepareStatement_intArray_ExecuteBatch(Connection conn) throws private void testPrepareStatement_intArray_ExecuteLargeBatch(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - JdbcPreparedStatement prep = (JdbcPreparedStatement) conn - .prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)", new int[0]); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new int[0]); prep.addBatch(); prep.addBatch(); prep.executeLargeBatch(); ResultSet rs = prep.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - prep = (JdbcPreparedStatement) conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (20)", - new int[] { 1, 2 }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new int[] { 1, 2 }); prep.addBatch(); prep.addBatch(); prep.executeLargeBatch(); @@ -909,8 +888,7 @@ private void testPrepareStatement_intArray_ExecuteLargeBatch(Connection conn) th assertEquals(UUID.class, rs.getObject(2).getClass()); assertFalse(rs.next()); rs.close(); - prep = (JdbcPreparedStatement) conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (30)", - new int[] { 2, 1 }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new int[] { 2, 1 }); prep.addBatch(); prep.addBatch(); prep.executeLargeBatch(); @@ -926,7 +904,7 @@ private void testPrepareStatement_intArray_ExecuteLargeBatch(Connection conn) th assertEquals(6L, rs.getLong(2)); assertFalse(rs.next()); rs.close(); - prep = (JdbcPreparedStatement) conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (40)", new int[] { 2 }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new int[] { 2 }); prep.addBatch(); prep.addBatch(); prep.executeLargeBatch(); @@ -955,15 +933,13 @@ private void testPrepareStatement_intArray_ExecuteLargeBatch(Connection conn) th private void testPrepareStatement_intArray_ExecuteLargeUpdate(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - JdbcPreparedStatement prep = (JdbcPreparedStatement) conn - .prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)", new int[0]); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new int[0]); prep.executeLargeUpdate(); ResultSet rs = prep.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - prep = (JdbcPreparedStatement) conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (20)", - new int[] { 1, 2 }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new int[] { 1, 2 }); prep.executeLargeUpdate(); rs = prep.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); @@ -974,8 +950,7 @@ private void testPrepareStatement_intArray_ExecuteLargeUpdate(Connection conn) t assertEquals(UUID.class, rs.getObject(2).getClass()); assertFalse(rs.next()); rs.close(); - prep = (JdbcPreparedStatement) conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (30)", - new int[] { 2, 1 }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new int[] { 2, 1 }); prep.executeLargeUpdate(); rs = prep.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); @@ -986,7 +961,7 @@ private void testPrepareStatement_intArray_ExecuteLargeUpdate(Connection conn) t assertEquals(3L, rs.getLong(2)); assertFalse(rs.next()); rs.close(); - prep = (JdbcPreparedStatement) conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (40)", new int[] { 2 }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new int[] { 2 }); prep.executeLargeUpdate(); rs = prep.getGeneratedKeys(); assertEquals(1, rs.getMetaData().getColumnCount()); @@ -1011,13 +986,13 @@ private void testPrepareStatement_intArray_ExecuteLargeUpdate(Connection conn) t private void testPrepareStatement_intArray_ExecuteUpdate(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)", new int[0]); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new int[0]); prep.executeUpdate(); ResultSet rs = prep.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (20)", new int[] { 1, 2 }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new int[] { 1, 2 }); prep.executeUpdate(); rs = prep.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); @@ -1028,7 +1003,7 @@ private void testPrepareStatement_intArray_ExecuteUpdate(Connection conn) throws assertEquals(UUID.class, rs.getObject(2).getClass()); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (30)", new int[] { 2, 1 }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new int[] { 2, 1 }); prep.executeUpdate(); rs = prep.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); @@ -1039,7 +1014,7 @@ private void testPrepareStatement_intArray_ExecuteUpdate(Connection conn) throws assertEquals(3L, rs.getLong(2)); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (40)", new int[] { 2 }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new int[] { 2 }); prep.executeUpdate(); rs = prep.getGeneratedKeys(); assertEquals(1, rs.getMetaData().getColumnCount()); @@ -1064,13 +1039,13 @@ private void testPrepareStatement_intArray_ExecuteUpdate(Connection conn) throws private void testPrepareStatement_StringArray_Execute(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)", new String[0]); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new String[0]); prep.executeUpdate(); ResultSet rs = prep.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (20)", new String[] { "ID", "UID" }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new String[] { "ID", "UID" }); prep.execute(); rs = prep.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); @@ -1081,7 +1056,7 @@ private void testPrepareStatement_StringArray_Execute(Connection conn) throws Ex assertEquals(UUID.class, rs.getObject(2).getClass()); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (30)", new String[] { "UID", "ID" }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new String[] { "UID", "ID" }); prep.execute(); rs = prep.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); @@ -1092,7 +1067,7 @@ private void testPrepareStatement_StringArray_Execute(Connection conn) throws Ex assertEquals(3L, rs.getLong(2)); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (40)", new String[] { "UID" }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new String[] { "UID" }); prep.execute(); rs = prep.getGeneratedKeys(); assertEquals(1, rs.getMetaData().getColumnCount()); @@ -1117,15 +1092,15 @@ private void testPrepareStatement_StringArray_Execute(Connection conn) throws Ex private void testPrepareStatement_StringArray_ExecuteBatch(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)", new String[0]); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new String[0]); prep.addBatch(); prep.addBatch(); prep.executeBatch(); ResultSet rs = prep.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (20)", new String[] { "ID", "UID" }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new String[] { "ID", "UID" }); prep.addBatch(); prep.addBatch(); prep.executeBatch(); @@ -1141,7 +1116,7 @@ private void testPrepareStatement_StringArray_ExecuteBatch(Connection conn) thro assertEquals(UUID.class, rs.getObject(2).getClass()); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (30)", new String[] { "UID", "ID" }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new String[] { "UID", "ID" }); prep.addBatch(); prep.addBatch(); prep.executeBatch(); @@ -1157,7 +1132,7 @@ private void testPrepareStatement_StringArray_ExecuteBatch(Connection conn) thro assertEquals(6L, rs.getLong(2)); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (40)", new String[] { "UID" }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new String[] { "UID" }); prep.addBatch(); prep.addBatch(); prep.executeBatch(); @@ -1186,17 +1161,15 @@ private void testPrepareStatement_StringArray_ExecuteBatch(Connection conn) thro private void testPrepareStatement_StringArray_ExecuteLargeBatch(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - JdbcPreparedStatement prep = (JdbcPreparedStatement) conn - .prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)", new String[0]); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new String[0]); prep.addBatch(); prep.addBatch(); prep.executeLargeBatch(); ResultSet rs = prep.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - prep = (JdbcPreparedStatement) conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (20)", - new String[] { "ID", "UID" }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new String[] { "ID", "UID" }); prep.addBatch(); prep.addBatch(); prep.executeLargeBatch(); @@ -1212,8 +1185,7 @@ private void testPrepareStatement_StringArray_ExecuteLargeBatch(Connection conn) assertEquals(UUID.class, rs.getObject(2).getClass()); assertFalse(rs.next()); rs.close(); - prep = (JdbcPreparedStatement) conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (30)", - new String[] { "UID", "ID" }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new String[] { "UID", "ID" }); prep.addBatch(); prep.addBatch(); prep.executeLargeBatch(); @@ -1229,8 +1201,7 @@ private void testPrepareStatement_StringArray_ExecuteLargeBatch(Connection conn) assertEquals(6L, rs.getLong(2)); assertFalse(rs.next()); rs.close(); - prep = (JdbcPreparedStatement) conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (40)", - new String[] { "UID" }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new String[] { "UID" }); prep.addBatch(); prep.addBatch(); prep.executeLargeBatch(); @@ -1259,15 +1230,13 @@ private void testPrepareStatement_StringArray_ExecuteLargeBatch(Connection conn) private void testPrepareStatement_StringArray_ExecuteLargeUpdate(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - JdbcPreparedStatement prep = (JdbcPreparedStatement) conn - .prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)", new String[0]); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new String[0]); prep.executeLargeUpdate(); ResultSet rs = prep.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - prep = (JdbcPreparedStatement) conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (20)", - new String[] { "ID", "UID" }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new String[] { "ID", "UID" }); prep.executeLargeUpdate(); rs = prep.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); @@ -1278,8 +1247,7 @@ private void testPrepareStatement_StringArray_ExecuteLargeUpdate(Connection conn assertEquals(UUID.class, rs.getObject(2).getClass()); assertFalse(rs.next()); rs.close(); - prep = (JdbcPreparedStatement) conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (30)", - new String[] { "UID", "ID" }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new String[] { "UID", "ID" }); prep.executeLargeUpdate(); rs = prep.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); @@ -1290,8 +1258,7 @@ private void testPrepareStatement_StringArray_ExecuteLargeUpdate(Connection conn assertEquals(3L, rs.getLong(2)); assertFalse(rs.next()); rs.close(); - prep = (JdbcPreparedStatement) conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (40)", - new String[] { "UID" }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new String[] { "UID" }); prep.executeLargeUpdate(); rs = prep.getGeneratedKeys(); assertEquals(1, rs.getMetaData().getColumnCount()); @@ -1315,13 +1282,13 @@ private void testPrepareStatement_StringArray_ExecuteLargeUpdate(Connection conn private void testPrepareStatement_StringArray_ExecuteUpdate(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (10)", new String[0]); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new String[0]); prep.executeUpdate(); ResultSet rs = prep.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (20)", new String[] { "ID", "UID" }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new String[] { "ID", "UID" }); prep.executeUpdate(); rs = prep.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); @@ -1332,7 +1299,7 @@ private void testPrepareStatement_StringArray_ExecuteUpdate(Connection conn) thr assertEquals(UUID.class, rs.getObject(2).getClass()); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (30)", new String[] { "UID", "ID" }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new String[] { "UID", "ID" }); prep.executeUpdate(); rs = prep.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); @@ -1343,7 +1310,7 @@ private void testPrepareStatement_StringArray_ExecuteUpdate(Connection conn) thr assertEquals(3L, rs.getLong(2)); assertFalse(rs.next()); rs.close(); - prep = conn.prepareStatement("INSERT INTO TEST(VALUE) VALUES (40)", new String[] { "UID" }); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new String[] { "UID" }); prep.executeUpdate(); rs = prep.getGeneratedKeys(); assertEquals(1, rs.getMetaData().getColumnCount()); @@ -1366,8 +1333,8 @@ private void testPrepareStatement_StringArray_ExecuteUpdate(Connection conn) thr private void testStatementExecute(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - stat.execute("INSERT INTO TEST(VALUE) VALUES (10)"); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + stat.execute("INSERT INTO TEST(V) VALUES (10)"); ResultSet rs = stat.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); @@ -1385,12 +1352,12 @@ private void testStatementExecute(Connection conn) throws Exception { private void testStatementExecute_int(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL, OTHER INT DEFAULT 0)"); - stat.execute("INSERT INTO TEST(VALUE) VALUES (10)", Statement.NO_GENERATED_KEYS); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL, OTHER INT DEFAULT 0)"); + stat.execute("INSERT INTO TEST(V) VALUES (10)", Statement.NO_GENERATED_KEYS); ResultSet rs = stat.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - stat.execute("INSERT INTO TEST(VALUE) VALUES (20)", Statement.RETURN_GENERATED_KEYS); + stat.execute("INSERT INTO TEST(V) VALUES (20)", Statement.RETURN_GENERATED_KEYS); rs = stat.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); assertEquals("ID", rs.getMetaData().getColumnName(1)); @@ -1414,12 +1381,12 @@ private void testStatementExecute_int(Connection conn) throws Exception { private void testStatementExecute_intArray(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - stat.execute("INSERT INTO TEST(VALUE) VALUES (10)", new int[0]); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + stat.execute("INSERT INTO TEST(V) VALUES (10)", new int[0]); ResultSet rs = stat.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - stat.execute("INSERT INTO TEST(VALUE) VALUES (20)", new int[] { 1, 2 }); + stat.execute("INSERT INTO TEST(V) VALUES (20)", new int[] { 1, 2 }); rs = stat.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); assertEquals("ID", rs.getMetaData().getColumnName(1)); @@ -1429,7 +1396,7 @@ private void testStatementExecute_intArray(Connection conn) throws Exception { assertEquals(UUID.class, rs.getObject(2).getClass()); assertFalse(rs.next()); rs.close(); - stat.execute("INSERT INTO TEST(VALUE) VALUES (30)", new int[] { 2, 1 }); + stat.execute("INSERT INTO TEST(V) VALUES (30)", new int[] { 2, 1 }); rs = stat.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); assertEquals("UID", rs.getMetaData().getColumnName(1)); @@ -1439,7 +1406,7 @@ private void testStatementExecute_intArray(Connection conn) throws Exception { assertEquals(3L, rs.getLong(2)); assertFalse(rs.next()); rs.close(); - stat.execute("INSERT INTO TEST(VALUE) VALUES (40)", new int[] { 2 }); + stat.execute("INSERT INTO TEST(V) VALUES (40)", new int[] { 2 }); rs = stat.getGeneratedKeys(); assertEquals(1, rs.getMetaData().getColumnCount()); assertEquals("UID", rs.getMetaData().getColumnName(1)); @@ -1461,12 +1428,12 @@ private void testStatementExecute_intArray(Connection conn) throws Exception { private void testStatementExecute_StringArray(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - stat.execute("INSERT INTO TEST(VALUE) VALUES (10)", new String[0]); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + stat.execute("INSERT INTO TEST(V) VALUES (10)", new String[0]); ResultSet rs = stat.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - stat.execute("INSERT INTO TEST(VALUE) VALUES (20)", new String[] { "ID", "UID" }); + stat.execute("INSERT INTO TEST(V) VALUES (20)", new String[] { "ID", "UID" }); rs = stat.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); assertEquals("ID", rs.getMetaData().getColumnName(1)); @@ -1476,7 +1443,7 @@ private void testStatementExecute_StringArray(Connection conn) throws Exception assertEquals(UUID.class, rs.getObject(2).getClass()); assertFalse(rs.next()); rs.close(); - stat.execute("INSERT INTO TEST(VALUE) VALUES (30)", new String[] { "UID", "ID" }); + stat.execute("INSERT INTO TEST(V) VALUES (30)", new String[] { "UID", "ID" }); rs = stat.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); assertEquals("UID", rs.getMetaData().getColumnName(1)); @@ -1486,7 +1453,7 @@ private void testStatementExecute_StringArray(Connection conn) throws Exception assertEquals(3L, rs.getLong(2)); assertFalse(rs.next()); rs.close(); - stat.execute("INSERT INTO TEST(VALUE) VALUES (40)", new String[] { "UID" }); + stat.execute("INSERT INTO TEST(V) VALUES (40)", new String[] { "UID" }); rs = stat.getGeneratedKeys(); assertEquals(1, rs.getMetaData().getColumnCount()); assertEquals("UID", rs.getMetaData().getColumnName(1)); @@ -1506,10 +1473,10 @@ private void testStatementExecute_StringArray(Connection conn) throws Exception * on exception */ private void testStatementExecuteLargeUpdate(Connection conn) throws Exception { - JdbcStatement stat = (JdbcStatement) conn.createStatement(); + Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - stat.executeLargeUpdate("INSERT INTO TEST(VALUE) VALUES (10)"); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (10)"); ResultSet rs = stat.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); @@ -1525,14 +1492,14 @@ private void testStatementExecuteLargeUpdate(Connection conn) throws Exception { * on exception */ private void testStatementExecuteLargeUpdate_int(Connection conn) throws Exception { - JdbcStatement stat = (JdbcStatement) conn.createStatement(); + Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL, OTHER INT DEFAULT 0)"); - stat.executeLargeUpdate("INSERT INTO TEST(VALUE) VALUES (10)", Statement.NO_GENERATED_KEYS); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL, OTHER INT DEFAULT 0)"); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (10)", Statement.NO_GENERATED_KEYS); ResultSet rs = stat.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - stat.executeLargeUpdate("INSERT INTO TEST(VALUE) VALUES (20)", Statement.RETURN_GENERATED_KEYS); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (20)", Statement.RETURN_GENERATED_KEYS); rs = stat.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); assertEquals("ID", rs.getMetaData().getColumnName(1)); @@ -1554,14 +1521,14 @@ private void testStatementExecuteLargeUpdate_int(Connection conn) throws Excepti * on exception */ private void testStatementExecuteLargeUpdate_intArray(Connection conn) throws Exception { - JdbcStatement stat = (JdbcStatement) conn.createStatement(); + Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - stat.executeLargeUpdate("INSERT INTO TEST(VALUE) VALUES (10)", new int[0]); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (10)", new int[0]); ResultSet rs = stat.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - stat.executeLargeUpdate("INSERT INTO TEST(VALUE) VALUES (20)", new int[] { 1, 2 }); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (20)", new int[] { 1, 2 }); rs = stat.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); assertEquals("ID", rs.getMetaData().getColumnName(1)); @@ -1571,7 +1538,7 @@ private void testStatementExecuteLargeUpdate_intArray(Connection conn) throws Ex assertEquals(UUID.class, rs.getObject(2).getClass()); assertFalse(rs.next()); rs.close(); - stat.executeLargeUpdate("INSERT INTO TEST(VALUE) VALUES (30)", new int[] { 2, 1 }); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (30)", new int[] { 2, 1 }); rs = stat.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); assertEquals("UID", rs.getMetaData().getColumnName(1)); @@ -1581,7 +1548,7 @@ private void testStatementExecuteLargeUpdate_intArray(Connection conn) throws Ex assertEquals(3L, rs.getLong(2)); assertFalse(rs.next()); rs.close(); - stat.executeLargeUpdate("INSERT INTO TEST(VALUE) VALUES (40)", new int[] { 2 }); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (40)", new int[] { 2 }); rs = stat.getGeneratedKeys(); assertEquals(1, rs.getMetaData().getColumnCount()); assertEquals("UID", rs.getMetaData().getColumnName(1)); @@ -1601,14 +1568,14 @@ private void testStatementExecuteLargeUpdate_intArray(Connection conn) throws Ex * on exception */ private void testStatementExecuteLargeUpdate_StringArray(Connection conn) throws Exception { - JdbcStatement stat = (JdbcStatement) conn.createStatement(); + Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - stat.executeLargeUpdate("INSERT INTO TEST(VALUE) VALUES (10)", new String[0]); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (10)", new String[0]); ResultSet rs = stat.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - stat.executeLargeUpdate("INSERT INTO TEST(VALUE) VALUES (20)", new String[] { "ID", "UID" }); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (20)", new String[] { "ID", "UID" }); rs = stat.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); assertEquals("ID", rs.getMetaData().getColumnName(1)); @@ -1618,7 +1585,7 @@ private void testStatementExecuteLargeUpdate_StringArray(Connection conn) throws assertEquals(UUID.class, rs.getObject(2).getClass()); assertFalse(rs.next()); rs.close(); - stat.executeLargeUpdate("INSERT INTO TEST(VALUE) VALUES (30)", new String[] { "UID", "ID" }); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (30)", new String[] { "UID", "ID" }); rs = stat.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); assertEquals("UID", rs.getMetaData().getColumnName(1)); @@ -1628,7 +1595,7 @@ private void testStatementExecuteLargeUpdate_StringArray(Connection conn) throws assertEquals(3L, rs.getLong(2)); assertFalse(rs.next()); rs.close(); - stat.executeLargeUpdate("INSERT INTO TEST(VALUE) VALUES (40)", new String[] { "UID" }); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (40)", new String[] { "UID" }); rs = stat.getGeneratedKeys(); assertEquals(1, rs.getMetaData().getColumnCount()); assertEquals("UID", rs.getMetaData().getColumnName(1)); @@ -1650,8 +1617,8 @@ private void testStatementExecuteLargeUpdate_StringArray(Connection conn) throws private void testStatementExecuteUpdate(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - stat.executeUpdate("INSERT INTO TEST(VALUE) VALUES (10)"); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (10)"); ResultSet rs = stat.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); @@ -1669,12 +1636,12 @@ private void testStatementExecuteUpdate(Connection conn) throws Exception { private void testStatementExecuteUpdate_int(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL, OTHER INT DEFAULT 0)"); - stat.executeUpdate("INSERT INTO TEST(VALUE) VALUES (10)", Statement.NO_GENERATED_KEYS); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL, OTHER INT DEFAULT 0)"); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (10)", Statement.NO_GENERATED_KEYS); ResultSet rs = stat.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - stat.executeUpdate("INSERT INTO TEST(VALUE) VALUES (20)", Statement.RETURN_GENERATED_KEYS); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (20)", Statement.RETURN_GENERATED_KEYS); rs = stat.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); assertEquals("ID", rs.getMetaData().getColumnName(1)); @@ -1698,12 +1665,12 @@ private void testStatementExecuteUpdate_int(Connection conn) throws Exception { private void testStatementExecuteUpdate_intArray(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - stat.executeUpdate("INSERT INTO TEST(VALUE) VALUES (10)", new int[0]); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (10)", new int[0]); ResultSet rs = stat.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - stat.executeUpdate("INSERT INTO TEST(VALUE) VALUES (20)", new int[] { 1, 2 }); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (20)", new int[] { 1, 2 }); rs = stat.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); assertEquals("ID", rs.getMetaData().getColumnName(1)); @@ -1713,7 +1680,7 @@ private void testStatementExecuteUpdate_intArray(Connection conn) throws Excepti assertEquals(UUID.class, rs.getObject(2).getClass()); assertFalse(rs.next()); rs.close(); - stat.executeUpdate("INSERT INTO TEST(VALUE) VALUES (30)", new int[] { 2, 1 }); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (30)", new int[] { 2, 1 }); rs = stat.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); assertEquals("UID", rs.getMetaData().getColumnName(1)); @@ -1723,7 +1690,7 @@ private void testStatementExecuteUpdate_intArray(Connection conn) throws Excepti assertEquals(3L, rs.getLong(2)); assertFalse(rs.next()); rs.close(); - stat.executeUpdate("INSERT INTO TEST(VALUE) VALUES (40)", new int[] { 2 }); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (40)", new int[] { 2 }); rs = stat.getGeneratedKeys(); assertEquals(1, rs.getMetaData().getColumnCount()); assertEquals("UID", rs.getMetaData().getColumnName(1)); @@ -1745,12 +1712,12 @@ private void testStatementExecuteUpdate_intArray(Connection conn) throws Excepti private void testStatementExecuteUpdate_StringArray(Connection conn) throws Exception { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," - + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), VALUE INT NOT NULL)"); - stat.executeUpdate("INSERT INTO TEST(VALUE) VALUES (10)", new String[0]); + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (10)", new String[0]); ResultSet rs = stat.getGeneratedKeys(); assertFalse(rs.next()); rs.close(); - stat.executeUpdate("INSERT INTO TEST(VALUE) VALUES (20)", new String[] { "ID", "UID" }); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (20)", new String[] { "ID", "UID" }); rs = stat.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); assertEquals("ID", rs.getMetaData().getColumnName(1)); @@ -1760,7 +1727,7 @@ private void testStatementExecuteUpdate_StringArray(Connection conn) throws Exce assertEquals(UUID.class, rs.getObject(2).getClass()); assertFalse(rs.next()); rs.close(); - stat.executeUpdate("INSERT INTO TEST(VALUE) VALUES (30)", new String[] { "UID", "ID" }); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (30)", new String[] { "UID", "ID" }); rs = stat.getGeneratedKeys(); assertEquals(2, rs.getMetaData().getColumnCount()); assertEquals("UID", rs.getMetaData().getColumnName(1)); @@ -1770,7 +1737,7 @@ private void testStatementExecuteUpdate_StringArray(Connection conn) throws Exce assertEquals(3L, rs.getLong(2)); assertFalse(rs.next()); rs.close(); - stat.executeUpdate("INSERT INTO TEST(VALUE) VALUES (40)", new String[] { "UID" }); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (40)", new String[] { "UID" }); rs = stat.getGeneratedKeys(); assertEquals(1, rs.getMetaData().getColumnCount()); assertEquals("UID", rs.getMetaData().getColumnName(1)); @@ -1781,33 +1748,4 @@ private void testStatementExecuteUpdate_StringArray(Connection conn) throws Exce stat.execute("DROP TABLE TEST"); } - /** - * Test for keys generated by trigger. - * - * @param conn - * connection - * @throws Exception - * on exception - */ - private void testTrigger(Connection conn) throws Exception { - Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE TEST(ID UUID, VALUE INT)"); - stat.execute("CREATE TRIGGER TEST_INSERT BEFORE INSERT ON TEST FOR EACH ROW CALL \"" - + TestGetGeneratedKeysTrigger.class.getName() + '"'); - stat.executeUpdate("INSERT INTO TEST(VALUE) VALUES (10), (20)", Statement.RETURN_GENERATED_KEYS); - ResultSet rs = stat.getGeneratedKeys(); - rs.next(); - UUID u1 = (UUID) rs.getObject(1); - rs.next(); - UUID u2 = (UUID) rs.getObject(1); - assertFalse(rs.next()); - rs = stat.executeQuery("SELECT ID FROM TEST ORDER BY VALUE"); - rs.next(); - assertEquals(u1, rs.getObject(1)); - rs.next(); - assertEquals(u2, rs.getObject(1)); - stat.execute("DROP TRIGGER TEST_INSERT"); - stat.execute("DROP TABLE TEST"); - } - } diff --git a/h2/src/test/org/h2/test/jdbc/TestJavaObject.java b/h2/src/test/org/h2/test/jdbc/TestJavaObject.java deleted file mode 100644 index 9b877fc0d0..0000000000 --- a/h2/src/test/org/h2/test/jdbc/TestJavaObject.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.jdbc; - -import java.io.Serializable; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.sql.Types; -import java.util.Arrays; -import java.util.UUID; - -import org.h2.engine.SysProperties; -import org.h2.test.TestBase; -import org.h2.test.TestDb; - -/** - * Tests java object values when SysProperties.SERIALIZE_JAVA_OBJECT property is - * disabled. - * - * @author Sergi Vladykin - */ -public class TestJavaObject extends TestDb { - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase test = createCaller().init(); - test.config.traceTest = true; - test.config.memory = true; - test.config.networked = true; - test.config.beforeTest(); - test.test(); - test.config.afterTest(); - } - - @Override - public void test() throws Exception { - SysProperties.serializeJavaObject = false; - try { - trace("Test Java Object"); - doTest(new MyObj(1), new MyObj(2), false); - doTest(Arrays.asList(UUID.randomUUID(), null), - Arrays.asList(UUID.randomUUID(), UUID.randomUUID()), true); - // doTest(new Timestamp(System.currentTimeMillis()), - // new Timestamp(System.currentTimeMillis() + 10000), - // false); - doTest(200, 100, false); - doTest(200, 100L, true); - // doTest(new Date(System.currentTimeMillis() + 1000), - // new Date(System.currentTimeMillis()), false); - // doTest(new java.util.Date(System.currentTimeMillis() + 1000), - // new java.util.Date(System.currentTimeMillis()), false); - // doTest(new Time(System.currentTimeMillis() + 1000), - // new Date(System.currentTimeMillis()), false); - // doTest(new Time(System.currentTimeMillis() + 1000), - // new Timestamp(System.currentTimeMillis()), false); - } finally { - SysProperties.serializeJavaObject = true; - } - } - - private void doTest(Object o1, Object o2, boolean hash) throws SQLException { - deleteDb("javaObject"); - Connection conn = getConnection("javaObject"); - Statement stat = conn.createStatement(); - stat.execute("create table t(id identity, val other)"); - - PreparedStatement ins = conn.prepareStatement( - "insert into t(val) values(?)"); - - ins.setObject(1, o1, Types.JAVA_OBJECT); - assertEquals(1, ins.executeUpdate()); - - ins.setObject(1, o2, Types.JAVA_OBJECT); - assertEquals(1, ins.executeUpdate()); - - ResultSet rs = stat.executeQuery( - "select val from t order by val limit 1"); - - assertTrue(rs.next()); - - Object smallest; - if (hash) { - if (o1.getClass() != o2.getClass()) { - smallest = o1.getClass().getName().compareTo( - o2.getClass().getName()) < 0 ? o1 : o2; - } else { - assertFalse(o1.hashCode() == o2.hashCode()); - smallest = o1.hashCode() < o2.hashCode() ? o1 : o2; - } - } else { - @SuppressWarnings("unchecked") - int compare = ((Comparable) o1).compareTo(o2); - assertFalse(compare == 0); - smallest = compare < 0 ? o1 : o2; - } - - assertEquals(smallest.toString(), rs.getString(1)); - - Object y = rs.getObject(1); - - assertTrue(smallest.equals(y)); - assertFalse(rs.next()); - rs.close(); - - PreparedStatement prep = conn.prepareStatement( - "select id from t where val = ?"); - - prep.setObject(1, o1, Types.JAVA_OBJECT); - rs = prep.executeQuery(); - assertTrue(rs.next()); - assertEquals(1, rs.getInt(1)); - assertFalse(rs.next()); - rs.close(); - - prep.setObject(1, o2, Types.JAVA_OBJECT); - rs = prep.executeQuery(); - assertTrue(rs.next()); - assertEquals(2, rs.getInt(1)); - assertFalse(rs.next()); - rs.close(); - - stat.close(); - prep.close(); - - conn.close(); - deleteDb("javaObject"); - // trace("ok: " + o1.getClass().getName() + " vs " + - // o2.getClass().getName()); - } - - /** - * A test class. - */ - public static class MyObj implements Comparable, Serializable { - - private static final long serialVersionUID = 1L; - private final int value; - - MyObj(int value) { - this.value = value; - } - - @Override - public String toString() { - return "myObj:" + value; - } - - @Override - public int compareTo(MyObj o) { - return value - o.value; - } - - @Override - public boolean equals(Object o) { - return toString().equals(o.toString()); - } - - @Override - public int hashCode() { - return -value; - } - - } -} diff --git a/h2/src/test/org/h2/test/jdbc/TestJavaObjectSerializer.java b/h2/src/test/org/h2/test/jdbc/TestJavaObjectSerializer.java index 432d5d826f..bb145a23ee 100644 --- a/h2/src/test/org/h2/test/jdbc/TestJavaObjectSerializer.java +++ b/h2/src/test/org/h2/test/jdbc/TestJavaObjectSerializer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -33,9 +33,7 @@ public static void main(String... a) throws Exception { test.config.traceTest = true; test.config.memory = true; test.config.networked = true; - test.config.beforeTest(); - test.test(); - test.config.afterTest(); + test.testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/jdbc/TestLimitUpdates.java b/h2/src/test/org/h2/test/jdbc/TestLimitUpdates.java deleted file mode 100644 index d00c78ca97..0000000000 --- a/h2/src/test/org/h2/test/jdbc/TestLimitUpdates.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.jdbc; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import org.h2.test.TestBase; -import org.h2.test.TestDb; - -/** - * Test for limit updates. - */ -public class TestLimitUpdates extends TestDb { - - private static final String DATABASE_NAME = "limitUpdates"; - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws SQLException { - testLimitUpdates(); - deleteDb(DATABASE_NAME); - } - - private void testLimitUpdates() throws SQLException { - deleteDb(DATABASE_NAME); - Connection conn = null; - PreparedStatement prep = null; - - try { - conn = getConnection(DATABASE_NAME); - prep = conn.prepareStatement( - "CREATE TABLE TEST(KEY_ID INT PRIMARY KEY, VALUE_ID INT)"); - prep.executeUpdate(); - - prep.close(); - prep = conn.prepareStatement("INSERT INTO TEST VALUES(?, ?)"); - int numRows = 10; - for (int i = 0; i < numRows; ++i) { - prep.setInt(1, i); - prep.setInt(2, 0); - prep.execute(); - } - assertEquals(numRows, countWhere(conn, 0)); - - // update all elements than available - prep.close(); - prep = conn.prepareStatement("UPDATE TEST SET VALUE_ID = ?"); - prep.setInt(1, 1); - prep.execute(); - assertEquals(numRows, countWhere(conn, 1)); - - // update less elements than available - updateLimit(conn, 2, numRows / 2); - assertEquals(numRows / 2, countWhere(conn, 2)); - - // update more elements than available - updateLimit(conn, 3, numRows * 2); - assertEquals(numRows, countWhere(conn, 3)); - - // update no elements - updateLimit(conn, 4, 0); - assertEquals(0, countWhere(conn, 4)); - } finally { - if (prep != null) { - prep.close(); - } - if (conn != null) { - conn.close(); - } - } - } - - private static int countWhere(final Connection conn, final int where) - throws SQLException { - PreparedStatement prep = null; - ResultSet rs = null; - try { - prep = conn.prepareStatement( - "SELECT COUNT(*) FROM TEST WHERE VALUE_ID = ?"); - prep.setInt(1, where); - rs = prep.executeQuery(); - rs.next(); - return rs.getInt(1); - } finally { - if (rs != null) { - rs.close(); - } - if (prep != null) { - prep.close(); - } - } - } - - private static void updateLimit(final Connection conn, final int value, - final int limit) throws SQLException { - try (PreparedStatement prep = conn.prepareStatement( - "UPDATE TEST SET VALUE_ID = ? LIMIT ?")) { - prep.setInt(1, value); - prep.setInt(2, limit); - prep.execute(); - } - } -} diff --git a/h2/src/test/org/h2/test/jdbc/TestLobApi.java b/h2/src/test/org/h2/test/jdbc/TestLobApi.java index 79694b361d..cdb6f7d92f 100644 --- a/h2/src/test/org/h2/test/jdbc/TestLobApi.java +++ b/h2/src/test/org/h2/test/jdbc/TestLobApi.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -26,6 +26,7 @@ import org.h2.jdbc.JdbcConnection; import org.h2.test.TestBase; import org.h2.test.TestDb; +import org.h2.test.utils.RandomDataUtils; import org.h2.util.IOUtils; /** @@ -42,7 +43,7 @@ public class TestLobApi extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -116,7 +117,7 @@ private void testLobStaysOpenUntilCommitted() throws Exception { stat = conn.createStatement(); stat.execute("create table test(id identity, c clob, b blob)"); PreparedStatement prep = conn.prepareStatement( - "insert into test values(null, ?, ?)"); + "insert into test(c, b) values(?, ?)"); prep.setString(1, ""); prep.setBytes(2, new byte[0]); prep.execute(); @@ -124,9 +125,7 @@ private void testLobStaysOpenUntilCommitted() throws Exception { Random r = new Random(1); char[] charsSmall = new char[20]; - for (int i = 0; i < charsSmall.length; i++) { - charsSmall[i] = (char) r.nextInt(10000); - } + RandomDataUtils.randomChars(r, charsSmall); String dSmall = new String(charsSmall); prep.setCharacterStream(1, new StringReader(dSmall), -1); byte[] bytesSmall = new byte[20]; @@ -135,9 +134,7 @@ private void testLobStaysOpenUntilCommitted() throws Exception { prep.execute(); char[] chars = new char[100000]; - for (int i = 0; i < chars.length; i++) { - chars[i] = (char) r.nextInt(10000); - } + RandomDataUtils.randomChars(r, chars); String d = new String(chars); prep.setCharacterStream(1, new StringReader(d), -1); byte[] bytes = new byte[100000]; @@ -184,7 +181,7 @@ private void testInputStreamThrowsException(final boolean ioException) stat = conn.createStatement(); stat.execute("create table test(id identity, c clob, b blob)"); PreparedStatement prep = conn.prepareStatement( - "insert into test values(null, ?, ?)"); + "insert into test(c, b) values(?, ?)"); assertThrows(ErrorCode.IO_EXCEPTION_1, prep). setCharacterStream(1, new Reader() { diff --git a/h2/src/test/org/h2/test/jdbc/TestManyJdbcObjects.java b/h2/src/test/org/h2/test/jdbc/TestManyJdbcObjects.java index 2915c05396..d833c80977 100644 --- a/h2/src/test/org/h2/test/jdbc/TestManyJdbcObjects.java +++ b/h2/src/test/org/h2/test/jdbc/TestManyJdbcObjects.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -26,7 +26,7 @@ public class TestManyJdbcObjects extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -46,8 +46,8 @@ private void testNestedResultSets() throws SQLException { DatabaseMetaData meta = conn.getMetaData(); ResultSet rsTables = meta.getColumns(null, null, null, null); while (rsTables.next()) { - meta.getExportedKeys(null, null, null); - meta.getImportedKeys(null, null, null); + meta.getExportedKeys(null, null, "TEST"); + meta.getImportedKeys(null, null, "TEST"); } conn.close(); } diff --git a/h2/src/test/org/h2/test/jdbc/TestMetaData.java b/h2/src/test/org/h2/test/jdbc/TestMetaData.java index a27e6dd607..ebf8879849 100644 --- a/h2/src/test/org/h2/test/jdbc/TestMetaData.java +++ b/h2/src/test/org/h2/test/jdbc/TestMetaData.java @@ -1,27 +1,28 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; +import static org.h2.engine.Constants.MAX_ARRAY_CARDINALITY; +import static org.h2.engine.Constants.MAX_NUMERIC_PRECISION; +import static org.h2.engine.Constants.MAX_STRING_LENGTH; + import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.Driver; -import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; import java.sql.Types; -import java.util.UUID; import org.h2.api.ErrorCode; import org.h2.engine.Constants; -import org.h2.engine.SysProperties; +import org.h2.mode.DefaultNullOrdering; import org.h2.test.TestBase; import org.h2.test.TestDb; -import org.h2.value.DataType; /** * Test for the DatabaseMetaData implementation. @@ -36,7 +37,7 @@ public class TestMetaData extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -45,20 +46,21 @@ public void test() throws SQLException { testUnwrap(); testUnsupportedOperations(); testTempTable(); - testColumnResultSetMeta(); testColumnLobMeta(); testColumnMetaData(); testColumnPrecision(); testColumnDefault(); testColumnGenerated(); + testHiddenColumn(); testCrossReferences(); testProcedureColumns(); + testTypeInfo(); testUDTs(); testStatic(); + testNullsAreSortedAt(); testGeneral(); testAllowLiteralsNone(); testClientInfo(); - testSessionsUncommitted(); testQueryStatistics(); testQueryStatisticsLimit(); } @@ -108,46 +110,6 @@ private void testUnsupportedOperations() throws SQLException { conn.close(); } - private void testColumnResultSetMeta() throws SQLException { - Connection conn = getConnection("metaData"); - Statement stat = conn.createStatement(); - stat.executeUpdate("create table test(data result_set)"); - stat.execute("create alias x as 'ResultSet x(Connection conn, String sql) " + - "throws SQLException { return conn.createStatement(" + - "ResultSet.TYPE_SCROLL_INSENSITIVE, " + - "ResultSet.CONCUR_READ_ONLY).executeQuery(sql); }'"); - stat.execute("insert into test values(" + - "select x('select x from system_range(1, 2)'))"); - ResultSet rs = stat.executeQuery("select * from test"); - ResultSetMetaData rsMeta = rs.getMetaData(); - assertTrue(rsMeta.toString().endsWith(": columns=1")); - assertEquals("java.sql.ResultSet", rsMeta.getColumnClassName(1)); - assertEquals(DataType.TYPE_RESULT_SET, rsMeta.getColumnType(1)); - rs.next(); - assertTrue(rs.getObject(1) instanceof java.sql.ResultSet); - stat.executeUpdate("drop alias x"); - - rs = stat.executeQuery("select 1 from dual"); - rs.next(); - rsMeta = rs.getMetaData(); - assertNotNull(rsMeta.getCatalogName(1)); - assertEquals("1", rsMeta.getColumnLabel(1)); - assertEquals("1", rsMeta.getColumnName(1)); - assertEquals("", rsMeta.getSchemaName(1)); - assertEquals("", rsMeta.getTableName(1)); - assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability()); - assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, rs.getHoldability()); - stat.executeUpdate("drop table test"); - - PreparedStatement prep = conn.prepareStatement("SELECT X FROM TABLE (X UUID = ?)"); - prep.setObject(1, UUID.randomUUID()); - rs = prep.executeQuery(); - rsMeta = rs.getMetaData(); - assertEquals("UUID", rsMeta.getColumnTypeName(1)); - - conn.close(); - } - private void testColumnLobMeta() throws SQLException { Connection conn = getConnection("metaData"); Statement stat = conn.createStatement(); @@ -175,11 +137,11 @@ private void testColumnMetaData() throws SQLException { assertEquals("C", rs.getMetaData().getColumnName(1)); Statement stat = conn.createStatement(); - stat.execute("create table a(x array)"); + stat.execute("create table a(x int array)"); stat.execute("insert into a values(ARRAY[1, 2])"); rs = stat.executeQuery("SELECT x[1] FROM a"); ResultSetMetaData rsMeta = rs.getMetaData(); - assertEquals(Types.NULL, rsMeta.getColumnType(1)); + assertEquals(Types.INTEGER, rsMeta.getColumnType(1)); rs.next(); assertEquals(Integer.class.getName(), rs.getObject(1).getClass().getName()); @@ -188,12 +150,6 @@ private void testColumnMetaData() throws SQLException { } private void testColumnPrecision() throws SQLException { - int numericType; - if (SysProperties.BIG_DECIMAL_IS_DECIMAL) { - numericType = Types.DECIMAL; - } else { - numericType = Types.NUMERIC; - } Connection conn = getConnection("metaData"); Statement stat = conn.createStatement(); stat.execute("CREATE TABLE ONE(X NUMBER(12,2), Y FLOAT)"); @@ -203,15 +159,15 @@ private void testColumnPrecision() throws SQLException { rs = stat.executeQuery("SELECT * FROM ONE"); rsMeta = rs.getMetaData(); assertEquals(12, rsMeta.getPrecision(1)); - assertEquals(17, rsMeta.getPrecision(2)); - assertEquals(numericType, rsMeta.getColumnType(1)); - assertEquals(Types.DOUBLE, rsMeta.getColumnType(2)); + assertEquals(53, rsMeta.getPrecision(2)); + assertEquals(Types.NUMERIC, rsMeta.getColumnType(1)); + assertEquals(Types.FLOAT, rsMeta.getColumnType(2)); rs = stat.executeQuery("SELECT * FROM TWO"); rsMeta = rs.getMetaData(); assertEquals(12, rsMeta.getPrecision(1)); - assertEquals(17, rsMeta.getPrecision(2)); - assertEquals(numericType, rsMeta.getColumnType(1)); - assertEquals(Types.DOUBLE, rsMeta.getColumnType(2)); + assertEquals(53, rsMeta.getPrecision(2)); + assertEquals(Types.NUMERIC, rsMeta.getColumnType(1)); + assertEquals(Types.FLOAT, rsMeta.getColumnType(2)); stat.execute("DROP TABLE ONE, TWO"); conn.close(); } @@ -252,25 +208,46 @@ private void testColumnGenerated() throws SQLException { conn.close(); } + private void testHiddenColumn() throws SQLException { + Connection conn = getConnection("metaData"); + DatabaseMetaData meta = conn.getMetaData(); + ResultSet rs; + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(A INT, B INT INVISIBLE)"); + rs = meta.getColumns(null, null, "TEST", null); + assertTrue(rs.next()); + assertEquals("A", rs.getString("COLUMN_NAME")); + assertFalse(rs.next()); + rs = meta.getPseudoColumns(null, null, "TEST", null); + assertTrue(rs.next()); + assertEquals("B", rs.getString("COLUMN_NAME")); + assertEquals("YES", rs.getString("IS_NULLABLE")); + assertTrue(rs.next()); + assertEquals("_ROWID_", rs.getString("COLUMN_NAME")); + assertEquals("NO", rs.getString("IS_NULLABLE")); + assertFalse(rs.next()); + stat.execute("DROP TABLE TEST"); + conn.close(); + } + private void testProcedureColumns() throws SQLException { Connection conn = getConnection("metaData"); DatabaseMetaData meta = conn.getMetaData(); ResultSet rs; Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS PROP FOR " + - "\"java.lang.System.getProperty(java.lang.String)\""); - stat.execute("CREATE ALIAS EXIT FOR \"java.lang.System.exit\""); + stat.execute("CREATE ALIAS PROP FOR 'java.lang.System.getProperty(java.lang.String)'"); + stat.execute("CREATE ALIAS EXIT FOR 'java.lang.System.exit'"); rs = meta.getProcedures(null, null, "EX%"); assertResultSetMeta(rs, 9, new String[] { "PROCEDURE_CAT", - "PROCEDURE_SCHEM", "PROCEDURE_NAME", "NUM_INPUT_PARAMS", - "NUM_OUTPUT_PARAMS", "NUM_RESULT_SETS", "REMARKS", + "PROCEDURE_SCHEM", "PROCEDURE_NAME", "RESERVED1", + "RESERVED2", "RESERVED3", "REMARKS", "PROCEDURE_TYPE", "SPECIFIC_NAME" }, new int[] { Types.VARCHAR, - Types.VARCHAR, Types.VARCHAR, Types.INTEGER, Types.INTEGER, - Types.INTEGER, Types.VARCHAR, Types.SMALLINT, Types.VARCHAR }, + Types.VARCHAR, Types.VARCHAR, Types.NULL, Types.NULL, + Types.NULL, Types.VARCHAR, Types.SMALLINT, Types.VARCHAR }, null, null); assertResultSetOrdered(rs, new String[][] { { CATALOG, - Constants.SCHEMA_MAIN, "EXIT", "1", "0", "0", "", - "" + DatabaseMetaData.procedureNoResult } }); + Constants.SCHEMA_MAIN, "EXIT", null, null, null, null, + "" + DatabaseMetaData.procedureNoResult, "EXIT_1" } }); rs = meta.getProcedureColumns(null, null, null, null); assertResultSetMeta(rs, 20, new String[] { "PROCEDURE_CAT", "PROCEDURE_SCHEM", "PROCEDURE_NAME", "COLUMN_NAME", @@ -288,23 +265,151 @@ private void testProcedureColumns() throws SQLException { assertResultSetOrdered(rs, new String[][] { { CATALOG, Constants.SCHEMA_MAIN, "EXIT", "P1", "" + DatabaseMetaData.procedureColumnIn, - "" + Types.INTEGER, "INTEGER", "10", "10", "0", "10", - "" + DatabaseMetaData.procedureNoNulls }, - { CATALOG, Constants.SCHEMA_MAIN, "PROP", "P0", + "" + Types.INTEGER, "INTEGER", "32", "32", null, "2", + "" + DatabaseMetaData.procedureNoNulls, + null, null, null, null, null, "1", "", "EXIT_1" }, + { CATALOG, Constants.SCHEMA_MAIN, "PROP", "RESULT", "" + DatabaseMetaData.procedureColumnReturn, - "" + Types.VARCHAR, "VARCHAR", "" + Integer.MAX_VALUE, - "" + Integer.MAX_VALUE, "0", "10", - "" + DatabaseMetaData.procedureNullableUnknown }, + "" + Types.VARCHAR, "CHARACTER VARYING", "" + MAX_STRING_LENGTH, + "" + MAX_STRING_LENGTH, null, null, + "" + DatabaseMetaData.procedureNullableUnknown, + null, null, null, null, "" + MAX_STRING_LENGTH, "0", "", "PROP_1" }, { CATALOG, Constants.SCHEMA_MAIN, "PROP", "P1", "" + DatabaseMetaData.procedureColumnIn, - "" + Types.VARCHAR, "VARCHAR", "" + Integer.MAX_VALUE, - "" + Integer.MAX_VALUE, "0", "10", - "" + DatabaseMetaData.procedureNullable }, }); + "" + Types.VARCHAR, "CHARACTER VARYING", "" + MAX_STRING_LENGTH, + "" + MAX_STRING_LENGTH, null, null, + "" + DatabaseMetaData.procedureNullableUnknown, + null, null, null, null, "" + MAX_STRING_LENGTH, "1", "", "PROP_1" }, }); stat.execute("DROP ALIAS EXIT"); stat.execute("DROP ALIAS PROP"); conn.close(); } + private void testTypeInfo() throws SQLException { + Connection conn = getConnection("metaData"); + DatabaseMetaData meta = conn.getMetaData(); + ResultSet rs; + rs = meta.getTypeInfo(); + assertResultSetMeta(rs, 18, + new String[] { "TYPE_NAME", "DATA_TYPE", "PRECISION", "LITERAL_PREFIX", "LITERAL_SUFFIX", + "CREATE_PARAMS", "NULLABLE", "CASE_SENSITIVE", "SEARCHABLE", "UNSIGNED_ATTRIBUTE", + "FIXED_PREC_SCALE", "AUTO_INCREMENT", "LOCAL_TYPE_NAME", "MINIMUM_SCALE", "MAXIMUM_SCALE", + "SQL_DATA_TYPE", "SQL_DATETIME_SUB", "NUM_PREC_RADIX"}, + new int[] { Types.VARCHAR, Types.INTEGER, Types.INTEGER, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, + Types.SMALLINT, Types.BOOLEAN, Types.SMALLINT, Types.BOOLEAN, Types.BOOLEAN, Types.BOOLEAN, + Types.VARCHAR, Types.SMALLINT, Types.SMALLINT, Types.INTEGER, Types.INTEGER, Types.INTEGER }, + null, null); + testTypeInfo(rs, "TINYINT", Types.TINYINT, 8, null, null, null, false, false, (short) 0, (short) 0, 2); + testTypeInfo(rs, "BIGINT", Types.BIGINT, 64, null, null, null, false, false, (short) 0, (short) 0, 2); + testTypeInfo(rs, "BINARY VARYING", Types.VARBINARY, MAX_STRING_LENGTH, "X'", "'", "LENGTH", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "BINARY", Types.BINARY, MAX_STRING_LENGTH, "X'", "'", "LENGTH", false, false, (short) 0, + (short) 0, 0); + testTypeInfo(rs, "UUID", Types.BINARY, 16, "'", "'", null, false, false, (short) 0, (short) 0, 0); + testTypeInfo(rs, "CHARACTER", Types.CHAR, MAX_STRING_LENGTH, "'", "'", "LENGTH", true, false, (short) 0, + (short) 0, 0); + testTypeInfo(rs, "NUMERIC", Types.NUMERIC, MAX_NUMERIC_PRECISION, null, null, "PRECISION,SCALE", false, true, + (short) 0, Short.MAX_VALUE, 10); + testTypeInfo(rs, "DECFLOAT", Types.NUMERIC, MAX_NUMERIC_PRECISION, null, null, "PRECISION", false, false, + (short) 0, (short) 0, 10); + testTypeInfo(rs, "INTEGER", Types.INTEGER, 32, null, null, null, false, false, (short) 0, + (short) 0, 2); + testTypeInfo(rs, "SMALLINT", Types.SMALLINT, 16, null, null, null, false, false, (short) 0, + (short) 0, 2); + testTypeInfo(rs, "REAL", Types.REAL, 24, null, null, null, false, false, (short) 0, (short) 0, 2); + testTypeInfo(rs, "DOUBLE PRECISION", Types.DOUBLE, 53, null, null, null, false, false, (short) 0, (short) 0, + 2); + testTypeInfo(rs, "CHARACTER VARYING", Types.VARCHAR, MAX_STRING_LENGTH, "'", "'", "LENGTH", true, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "VARCHAR_IGNORECASE", Types.VARCHAR, MAX_STRING_LENGTH, "'", "'", "LENGTH", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "BOOLEAN", Types.BOOLEAN, 1, null, null, null, false, false, (short) 0, + (short) 0, 0); + testTypeInfo(rs, "DATE", Types.DATE, 10, "DATE '", "'", null, false, false, (short) 0, (short) 0, 0); + testTypeInfo(rs, "TIME", Types.TIME, 18, "TIME '", "'", "SCALE", false, false, (short) 0, (short) 9, 0); + testTypeInfo(rs, "TIMESTAMP", Types.TIMESTAMP, 29, "TIMESTAMP '", "'", "SCALE", false, false, (short) 0, + (short) 9, 0); + testTypeInfo(rs, "INTERVAL YEAR", Types.OTHER, 18, "INTERVAL '", "' YEAR", "PRECISION", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "INTERVAL MONTH", Types.OTHER, 18, "INTERVAL '", "' MONTH", "PRECISION", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "INTERVAL DAY", Types.OTHER, 18, "INTERVAL '", "' DAY", "PRECISION", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "INTERVAL HOUR", Types.OTHER, 18, "INTERVAL '", "' HOUR", "PRECISION", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "INTERVAL MINUTE", Types.OTHER, 18, "INTERVAL '", "' MINUTE", "PRECISION", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "INTERVAL SECOND", Types.OTHER, 18, "INTERVAL '", "' SECOND", "PRECISION,SCALE", false, false, + (short) 0, (short) 9, 0); + testTypeInfo(rs, "INTERVAL YEAR TO MONTH", Types.OTHER, 18, "INTERVAL '", "' YEAR TO MONTH", "PRECISION", + false, false, (short) 0, (short) 0, 0); + testTypeInfo(rs, "INTERVAL DAY TO HOUR", Types.OTHER, 18, "INTERVAL '", "' DAY TO HOUR", "PRECISION", + false, false, (short) 0, (short) 0, 0); + testTypeInfo(rs, "INTERVAL DAY TO MINUTE", Types.OTHER, 18, "INTERVAL '", "' DAY TO MINUTE", "PRECISION", + false, false, (short) 0, (short) 0, 0); + testTypeInfo(rs, "INTERVAL DAY TO SECOND", Types.OTHER, 18, "INTERVAL '", "' DAY TO SECOND", "PRECISION,SCALE", + false, false, (short) 0, (short) 9, 0); + testTypeInfo(rs, "INTERVAL HOUR TO MINUTE", Types.OTHER, 18, "INTERVAL '", "' HOUR TO MINUTE", "PRECISION", + false, false, (short) 0, (short) 0, 0); + testTypeInfo(rs, "INTERVAL HOUR TO SECOND", Types.OTHER, 18, "INTERVAL '", "' HOUR TO SECOND", + "PRECISION,SCALE", false, false, (short) 0, (short) 9, 0); + testTypeInfo(rs, "INTERVAL MINUTE TO SECOND", Types.OTHER, 18, "INTERVAL '", "' MINUTE TO SECOND", + "PRECISION,SCALE", false, false, (short) 0, (short) 9, 0); + testTypeInfo(rs, "ENUM", Types.OTHER, MAX_STRING_LENGTH, "'", "'", "ELEMENT [,...]", false, false, (short) 0, + (short) 0, 0); + testTypeInfo(rs, "GEOMETRY", Types.OTHER, Integer.MAX_VALUE, "'", "'", "TYPE,SRID", false, false, (short) 0, + (short) 0, 0); + testTypeInfo(rs, "JSON", Types.OTHER, MAX_STRING_LENGTH, "JSON '", "'", "LENGTH", true, false, (short) 0, + (short) 0, 0); + testTypeInfo(rs, "ROW", Types.OTHER, 0, "ROW(", ")", "NAME DATA_TYPE [,...]", false, false, (short) 0, + (short) 0, 0); + testTypeInfo(rs, "JAVA_OBJECT", Types.JAVA_OBJECT, MAX_STRING_LENGTH, "X'", "'", "LENGTH", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "ARRAY", Types.ARRAY, MAX_ARRAY_CARDINALITY, "ARRAY[", "]", "CARDINALITY", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "BINARY LARGE OBJECT", Types.BLOB, Integer.MAX_VALUE, "X'", "'", "LENGTH", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "CHARACTER LARGE OBJECT", Types.CLOB, Integer.MAX_VALUE, "'", "'", "LENGTH", true, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "TIME WITH TIME ZONE", Types.TIME_WITH_TIMEZONE, 24, "TIME WITH TIME ZONE '", "'", "SCALE", + false, false, (short) 0, (short) 9, 0); + testTypeInfo(rs, "TIMESTAMP WITH TIME ZONE", Types.TIMESTAMP_WITH_TIMEZONE, 35, "TIMESTAMP WITH TIME ZONE '", + "'", "SCALE", false, false, (short) 0, (short) 9, 0); + assertFalse(rs.next()); + conn.close(); + } + + private void testTypeInfo(ResultSet rs, String name, int type, long precision, String prefix, String suffix, + String params, boolean caseSensitive, boolean fixed, short minScale, short maxScale, int radix) + throws SQLException { + assertTrue(rs.next()); + assertEquals(name, rs.getString(1)); + assertEquals(type, rs.getInt(2)); + assertEquals(precision, rs.getLong(3)); + assertEquals(prefix, rs.getString(4)); + assertEquals(suffix, rs.getString(5)); + assertEquals(params, rs.getString(6)); + assertEquals(DatabaseMetaData.typeNullable, rs.getShort(7)); + assertEquals(caseSensitive, rs.getBoolean(8)); + assertEquals(DatabaseMetaData.typeSearchable, rs.getShort(9)); + assertFalse(rs.getBoolean(10)); + assertEquals(fixed, rs.getBoolean(11)); + assertFalse(rs.getBoolean(12)); + assertEquals(name, rs.getString(13)); + assertEquals(minScale, rs.getShort(14)); + assertEquals(maxScale, rs.getShort(15)); + rs.getInt(16); + assertTrue(rs.wasNull()); + rs.getInt(17); + assertTrue(rs.wasNull()); + if (radix != 0) { + assertEquals(radix, rs.getInt(18)); + } else { + rs.getInt(18); + assertTrue(rs.wasNull()); + } + } + private void testUDTs() throws SQLException { Connection conn = getConnection("metaData"); DatabaseMetaData meta = conn.getMetaData(); @@ -314,7 +419,7 @@ private void testUDTs() throws SQLException { new String[] { "TYPE_CAT", "TYPE_SCHEM", "TYPE_NAME", "CLASS_NAME", "DATA_TYPE", "REMARKS", "BASE_TYPE" }, new int[] { Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, - Types.VARCHAR, Types.SMALLINT, Types.VARCHAR, + Types.VARCHAR, Types.INTEGER, Types.VARCHAR, Types.SMALLINT }, null, null); conn.close(); } @@ -333,8 +438,7 @@ private void testCrossReferences() throws SQLException { checkCrossRef(rs); rs = meta.getExportedKeys(null, "PUBLIC", "PARENT"); checkCrossRef(rs); - stat.execute("DROP TABLE PARENT"); - stat.execute("DROP TABLE CHILD"); + stat.execute("DROP TABLE PARENT, CHILD"); conn.close(); } @@ -353,13 +457,13 @@ private void checkCrossRef(ResultSet rs) throws SQLException { Constants.SCHEMA_MAIN, "CHILD", "PA", "1", "" + DatabaseMetaData.importedKeyRestrict, "" + DatabaseMetaData.importedKeyRestrict, "AB", - "PRIMARY_KEY_8", + "CONSTRAINT_8", "" + DatabaseMetaData.importedKeyNotDeferrable }, { CATALOG, Constants.SCHEMA_MAIN, "PARENT", "B", CATALOG, Constants.SCHEMA_MAIN, "CHILD", "PB", "2", "" + DatabaseMetaData.importedKeyRestrict, "" + DatabaseMetaData.importedKeyRestrict, "AB", - "PRIMARY_KEY_8", + "CONSTRAINT_8", "" + DatabaseMetaData.importedKeyNotDeferrable } }); } @@ -422,7 +526,7 @@ private void testStatic() throws SQLException { meta.getDriverMinorVersion()); int majorVersion = 4; assertEquals(majorVersion, meta.getJDBCMajorVersion()); - assertEquals(1, meta.getJDBCMinorVersion()); + assertEquals(2, meta.getJDBCMinorVersion()); assertEquals("H2", meta.getDatabaseProductName()); assertEquals(Connection.TRANSACTION_READ_COMMITTED, meta.getDefaultTransactionIsolation()); @@ -457,22 +561,11 @@ private void testStatic() throws SQLException { assertEquals(ResultSet.CLOSE_CURSORS_AT_COMMIT, meta.getResultSetHoldability()); - assertEquals(DatabaseMetaData.sqlStateSQL99, - meta.getSQLStateType()); + assertEquals(DatabaseMetaData.sqlStateSQL, meta.getSQLStateType()); assertFalse(meta.locatorsUpdateCopy()); assertEquals("schema", meta.getSchemaTerm()); assertEquals("\\", meta.getSearchStringEscape()); - assertEquals("GROUPS," // - + "IF,ILIKE,INTERSECTS," // - + "LIMIT," // - + "MINUS," // - + "OFFSET," // - + "QUALIFY," // - + "REGEXP,_ROWID_,ROWNUM," // - + "SYSDATE,SYSTIME,SYSTIMESTAMP," // - + "TODAY,TOP", // - meta.getSQLKeywords()); assertTrue(meta.getURL().startsWith("jdbc:h2:")); assertTrue(meta.getUserName().length() > 1); @@ -485,10 +578,6 @@ private void testStatic() throws SQLException { assertTrue(meta.isCatalogAtStart()); assertFalse(meta.isReadOnly()); assertTrue(meta.nullPlusNonNullIsNull()); - assertFalse(meta.nullsAreSortedAtEnd()); - assertFalse(meta.nullsAreSortedAtStart()); - assertFalse(meta.nullsAreSortedHigh()); - assertTrue(meta.nullsAreSortedLow()); assertFalse(meta.othersDeletesAreVisible( ResultSet.TYPE_FORWARD_ONLY)); assertFalse(meta.othersDeletesAreVisible( @@ -555,7 +644,7 @@ private void testStatic() throws SQLException { assertFalse(meta.supportsFullOuterJoins()); assertTrue(meta.supportsGetGeneratedKeys()); - assertTrue(meta.supportsMultipleOpenResults()); + assertFalse(meta.supportsMultipleOpenResults()); assertFalse(meta.supportsNamedParameters()); assertTrue(meta.supportsGroupBy()); @@ -576,8 +665,8 @@ private void testStatic() throws SQLException { assertTrue(meta.supportsOpenStatementsAcrossRollback()); assertTrue(meta.supportsOrderByUnrelated()); assertTrue(meta.supportsOuterJoins()); - assertTrue(meta.supportsPositionedDelete()); - assertTrue(meta.supportsPositionedUpdate()); + assertFalse(meta.supportsPositionedDelete()); + assertFalse(meta.supportsPositionedUpdate()); assertTrue(meta.supportsResultSetConcurrency( ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY)); assertTrue(meta.supportsResultSetConcurrency( @@ -617,17 +706,12 @@ private void testStatic() throws SQLException { assertTrue(meta.supportsSubqueriesInQuantifieds()); assertTrue(meta.supportsTableCorrelationNames()); assertTrue(meta.supportsTransactions()); - assertFalse(meta.supportsTransactionIsolationLevel( - Connection.TRANSACTION_NONE)); - assertTrue(meta.supportsTransactionIsolationLevel( - Connection.TRANSACTION_READ_COMMITTED)); - assertEquals(config.mvStore || !config.multiThreaded, - meta.supportsTransactionIsolationLevel( - Connection.TRANSACTION_READ_UNCOMMITTED)); - assertTrue(meta.supportsTransactionIsolationLevel( - Connection.TRANSACTION_REPEATABLE_READ)); - assertTrue(meta.supportsTransactionIsolationLevel( - Connection.TRANSACTION_SERIALIZABLE)); + assertFalse(meta.supportsTransactionIsolationLevel(Connection.TRANSACTION_NONE)); + assertTrue(meta.supportsTransactionIsolationLevel(Connection.TRANSACTION_READ_COMMITTED)); + assertTrue(meta.supportsTransactionIsolationLevel(Connection.TRANSACTION_READ_UNCOMMITTED)); + assertTrue(meta.supportsTransactionIsolationLevel(Connection.TRANSACTION_REPEATABLE_READ)); + assertTrue(meta.supportsTransactionIsolationLevel(Constants.TRANSACTION_SNAPSHOT)); + assertTrue(meta.supportsTransactionIsolationLevel(Connection.TRANSACTION_SERIALIZABLE)); assertTrue(meta.supportsUnion()); assertTrue(meta.supportsUnionAll()); assertFalse(meta.updatesAreDetected(ResultSet.TYPE_FORWARD_ONLY)); @@ -638,16 +722,31 @@ private void testStatic() throws SQLException { conn.close(); } + private void testNullsAreSortedAt() throws SQLException { + Connection conn = getConnection("metaData"); + Statement stat = conn.createStatement(); + DatabaseMetaData meta = conn.getMetaData(); + testNullsAreSortedAt(meta, DefaultNullOrdering.LOW); + stat.execute("SET DEFAULT_NULL_ORDERING LOW"); + testNullsAreSortedAt(meta, DefaultNullOrdering.LOW); + stat.execute("SET DEFAULT_NULL_ORDERING HIGH"); + testNullsAreSortedAt(meta, DefaultNullOrdering.HIGH); + stat.execute("SET DEFAULT_NULL_ORDERING FIRST"); + testNullsAreSortedAt(meta, DefaultNullOrdering.FIRST); + stat.execute("SET DEFAULT_NULL_ORDERING LAST"); + testNullsAreSortedAt(meta, DefaultNullOrdering.LAST); + stat.execute("SET DEFAULT_NULL_ORDERING LOW"); + conn.close(); + } + + private void testNullsAreSortedAt(DatabaseMetaData meta, DefaultNullOrdering ordering) throws SQLException { + assertEquals(ordering == DefaultNullOrdering.HIGH, meta.nullsAreSortedHigh()); + assertEquals(ordering == DefaultNullOrdering.LOW, meta.nullsAreSortedLow()); + assertEquals(ordering == DefaultNullOrdering.FIRST, meta.nullsAreSortedAtStart()); + assertEquals(ordering == DefaultNullOrdering.LAST, meta.nullsAreSortedAtEnd()); + } + private void testMore() throws SQLException { - int numericType; - String numericName; - if (SysProperties.BIG_DECIMAL_IS_DECIMAL) { - numericType = Types.DECIMAL; - numericName = "DECIMAL"; - } else { - numericType = Types.NUMERIC; - numericName = "NUMERIC"; - } Connection conn = getConnection("metaData"); DatabaseMetaData meta = conn.getMetaData(); Statement stat = conn.createStatement(); @@ -710,23 +809,23 @@ private void testMore() throws SQLException { trace("getTables"); rs = meta.getTables(null, Constants.SCHEMA_MAIN, null, new String[] { "TABLE" }); - assertResultSetMeta(rs, 11, new String[] { "TABLE_CAT", "TABLE_SCHEM", + assertResultSetMeta(rs, 10, new String[] { "TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "TABLE_TYPE", "REMARKS", "TYPE_CAT", "TYPE_SCHEM", "TYPE_NAME", "SELF_REFERENCING_COL_NAME", - "REF_GENERATION", "SQL" }, new int[] { Types.VARCHAR, + "REF_GENERATION" }, new int[] { Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, - Types.VARCHAR, Types.VARCHAR }, null, null); + Types.VARCHAR }, null, null); if (rs.next()) { fail("Database is not empty after dropping all tables"); } stat.executeUpdate("CREATE TABLE TEST(" + "ID INT PRIMARY KEY," - + "TEXT_V VARCHAR(120)," + "DEC_V DECIMAL(12,3)," + + "TEXT_V VARCHAR(120)," + "DEC_V DECIMAL(12,3)," + "NUM_V NUMERIC(12,3)," + "DATE_V DATETIME," + "BLOB_V BLOB," + "CLOB_V CLOB" + ")"); rs = meta.getTables(null, Constants.SCHEMA_MAIN, null, new String[] { "TABLE" }); assertResultSetOrdered(rs, new String[][] { { CATALOG, - Constants.SCHEMA_MAIN, "TEST", "TABLE", "" } }); + Constants.SCHEMA_MAIN, "TEST", "BASE TABLE" } }); trace("getColumns"); rs = meta.getColumns(null, null, "TEST", null); assertResultSetMeta(rs, 24, new String[] { "TABLE_CAT", "TABLE_SCHEM", @@ -746,32 +845,34 @@ private void testMore() throws SQLException { null, null); assertResultSetOrdered(rs, new String[][] { { CATALOG, Constants.SCHEMA_MAIN, "TEST", "ID", - "" + Types.INTEGER, "INTEGER", "10", "10", "0", "10", - "" + DatabaseMetaData.columnNoNulls, "", null, - "" + Types.INTEGER, "0", "10", "1", "NO" }, + "" + Types.INTEGER, "INTEGER", "32", null, "0", "2", + "" + DatabaseMetaData.columnNoNulls, null, null, + null, null, "32", "1", "NO" }, { CATALOG, Constants.SCHEMA_MAIN, "TEST", "TEXT_V", - "" + Types.VARCHAR, "VARCHAR", "120", "120", "0", "10", - "" + DatabaseMetaData.columnNullable, "", null, - "" + Types.VARCHAR, "0", "120", "2", "YES" }, + "" + Types.VARCHAR, "CHARACTER VARYING", "120", null, "0", null, + "" + DatabaseMetaData.columnNullable, null, null, + null, null, "120", "2", "YES" }, { CATALOG, Constants.SCHEMA_MAIN, "TEST", "DEC_V", - "" + numericType, numericName, "12", "12", "3", "10", - "" + DatabaseMetaData.columnNullable, "", null, - "" + numericType, "0", "12", "3", "YES" }, + "" + Types.DECIMAL, "DECIMAL", "12", null, "3", "10", + "" + DatabaseMetaData.columnNullable, null, null, + null, null, "12", "3", "YES" }, + { CATALOG, Constants.SCHEMA_MAIN, "TEST", "NUM_V", + "" + Types.NUMERIC, "NUMERIC", "12", null, "3", "10", + "" + DatabaseMetaData.columnNullable, null, null, + null, null, "12", "4", "YES" }, { CATALOG, Constants.SCHEMA_MAIN, "TEST", "DATE_V", - "" + Types.TIMESTAMP, "TIMESTAMP", "26", "26", "6", - "10", "" + DatabaseMetaData.columnNullable, "", null, - "" + Types.TIMESTAMP, "0", "26", "4", "YES" }, + "" + Types.TIMESTAMP, "TIMESTAMP", "26", null, "6", null, + "" + DatabaseMetaData.columnNullable, null, null, + null, null, "26", "5", "YES" }, { CATALOG, Constants.SCHEMA_MAIN, "TEST", "BLOB_V", - "" + Types.BLOB, "BLOB", "" + Integer.MAX_VALUE, - "" + Integer.MAX_VALUE, "0", "10", - "" + DatabaseMetaData.columnNullable, "", null, - "" + Types.BLOB, "0", "" + Integer.MAX_VALUE, "5", + "" + Types.BLOB, "BINARY LARGE OBJECT", "" + Integer.MAX_VALUE, null, "0", null, + "" + DatabaseMetaData.columnNullable, null, null, + null, null, "" + Integer.MAX_VALUE, "6", "YES" }, { CATALOG, Constants.SCHEMA_MAIN, "TEST", "CLOB_V", - "" + Types.CLOB, "CLOB", "" + Integer.MAX_VALUE, - "" + Integer.MAX_VALUE, "0", "10", - "" + DatabaseMetaData.columnNullable, "", null, - "" + Types.CLOB, "0", "" + Integer.MAX_VALUE, "6", + "" + Types.CLOB, "CHARACTER LARGE OBJECT", "" + Integer.MAX_VALUE, null, "0", null, + "" + DatabaseMetaData.columnNullable, null, null, + null, null, "" + Integer.MAX_VALUE, "7", "YES" } }); /* * rs=meta.getColumns(null,null,"TEST",null); while(rs.next()) { int @@ -781,44 +882,46 @@ private void testMore() throws SQLException { stat.executeUpdate("CREATE INDEX IDX_TEXT_DEC ON TEST(TEXT_V,DEC_V)"); stat.executeUpdate("CREATE UNIQUE INDEX IDX_DATE ON TEST(DATE_V)"); rs = meta.getIndexInfo(null, null, "TEST", false, false); - assertResultSetMeta(rs, 14, new String[] { "TABLE_CAT", "TABLE_SCHEM", + assertResultSetMeta(rs, 13, new String[] { "TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "NON_UNIQUE", "INDEX_QUALIFIER", "INDEX_NAME", "TYPE", "ORDINAL_POSITION", "COLUMN_NAME", "ASC_OR_DESC", - "CARDINALITY", "PAGES", "FILTER_CONDITION", "SORT_TYPE" }, + "CARDINALITY", "PAGES", "FILTER_CONDITION" }, new int[] { Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.BOOLEAN, Types.VARCHAR, Types.VARCHAR, Types.SMALLINT, Types.SMALLINT, Types.VARCHAR, - Types.VARCHAR, Types.INTEGER, Types.INTEGER, - Types.VARCHAR, Types.INTEGER }, null, null); + Types.VARCHAR, Types.BIGINT, Types.BIGINT, + Types.VARCHAR }, null, null); assertResultSetOrdered(rs, new String[][] { { CATALOG, Constants.SCHEMA_MAIN, "TEST", "FALSE", CATALOG, "IDX_DATE", "" + DatabaseMetaData.tableIndexOther, "1", - "DATE_V", "A", "0", "0", "" }, + "DATE_V", "A", "0", "0" }, { CATALOG, Constants.SCHEMA_MAIN, "TEST", "FALSE", CATALOG, "PRIMARY_KEY_2", "" + DatabaseMetaData.tableIndexOther, - "1", "ID", "A", "0", "0", "" }, + "1", "ID", "A", "0", "0" }, { CATALOG, Constants.SCHEMA_MAIN, "TEST", "TRUE", CATALOG, "IDX_TEXT_DEC", "" + DatabaseMetaData.tableIndexOther, - "1", "TEXT_V", "A", "0", "0", "" }, + "1", "TEXT_V", "A", "0", "0" }, { CATALOG, Constants.SCHEMA_MAIN, "TEST", "TRUE", CATALOG, "IDX_TEXT_DEC", "" + DatabaseMetaData.tableIndexOther, - "2", "DEC_V", "A", "0", "0", "" }, }); + "2", "DEC_V", "A", "0", "0" }, }, + new int[] { 11 }); stat.executeUpdate("DROP INDEX IDX_TEXT_DEC"); stat.executeUpdate("DROP INDEX IDX_DATE"); rs = meta.getIndexInfo(null, null, "TEST", false, false); - assertResultSetMeta(rs, 14, new String[] { "TABLE_CAT", "TABLE_SCHEM", + assertResultSetMeta(rs, 13, new String[] { "TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "NON_UNIQUE", "INDEX_QUALIFIER", "INDEX_NAME", "TYPE", "ORDINAL_POSITION", "COLUMN_NAME", "ASC_OR_DESC", - "CARDINALITY", "PAGES", "FILTER_CONDITION", "SORT_TYPE" }, + "CARDINALITY", "PAGES", "FILTER_CONDITION" }, new int[] { Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.BOOLEAN, Types.VARCHAR, Types.VARCHAR, Types.SMALLINT, Types.SMALLINT, Types.VARCHAR, - Types.VARCHAR, Types.INTEGER, Types.INTEGER, - Types.VARCHAR, Types.INTEGER }, null, null); + Types.VARCHAR, Types.BIGINT, Types.BIGINT, + Types.VARCHAR }, null, null); assertResultSetOrdered(rs, new String[][] { { CATALOG, Constants.SCHEMA_MAIN, "TEST", "FALSE", CATALOG, "PRIMARY_KEY_2", "" + DatabaseMetaData.tableIndexOther, "1", - "ID", "A", "0", "0", "" } }); + "ID", "A", "0", "0" } }, + new int[] { 11 }); trace("getPrimaryKeys"); rs = meta.getPrimaryKeys(null, null, "TEST"); assertResultSetMeta(rs, 6, new String[] { "TABLE_CAT", "TABLE_SCHEM", @@ -834,37 +937,37 @@ private void testMore() throws SQLException { "CREATE TABLE TX2(B INT,A VARCHAR(6),C INT,PRIMARY KEY(C,A,B))"); rs = meta.getTables(null, null, "T_2", null); assertResultSetOrdered(rs, new String[][] { - { CATALOG, Constants.SCHEMA_MAIN, "TX2", "TABLE", "" }, - { CATALOG, Constants.SCHEMA_MAIN, "T_2", "TABLE", "" } }); + { CATALOG, Constants.SCHEMA_MAIN, "TX2", "BASE TABLE" }, + { CATALOG, Constants.SCHEMA_MAIN, "T_2", "BASE TABLE" } }); trace("getTables - using a quoted _ character"); rs = meta.getTables(null, null, "T\\_2", null); assertResultSetOrdered(rs, new String[][] { { CATALOG, - Constants.SCHEMA_MAIN, "T_2", "TABLE", "" } }); + Constants.SCHEMA_MAIN, "T_2", "BASE TABLE" } }); trace("getTables - using the % wildcard"); rs = meta.getTables(null, Constants.SCHEMA_MAIN, "%", new String[] { "TABLE" }); assertResultSetOrdered(rs, new String[][] { - { CATALOG, Constants.SCHEMA_MAIN, "TEST", "TABLE", "" }, - { CATALOG, Constants.SCHEMA_MAIN, "TX2", "TABLE", "" }, - { CATALOG, Constants.SCHEMA_MAIN, "T_2", "TABLE", "" } }); + { CATALOG, Constants.SCHEMA_MAIN, "TEST", "BASE TABLE" }, + { CATALOG, Constants.SCHEMA_MAIN, "TX2", "BASE TABLE" }, + { CATALOG, Constants.SCHEMA_MAIN, "T_2", "BASE TABLE" } }); stat.execute("DROP TABLE TEST"); trace("getColumns - using wildcards"); rs = meta.getColumns(null, null, "___", "B%"); assertResultSetOrdered(rs, new String[][] { { CATALOG, Constants.SCHEMA_MAIN, "TX2", "B", - "" + Types.INTEGER, "INTEGER", "10" }, + "" + Types.INTEGER, "INTEGER", "32" }, { CATALOG, Constants.SCHEMA_MAIN, "T_2", "B", - "" + Types.INTEGER, "INTEGER", "10" }, }); + "" + Types.INTEGER, "INTEGER", "32" }, }); trace("getColumns - using wildcards"); rs = meta.getColumns(null, null, "_\\__", "%"); assertResultSetOrdered(rs, new String[][] { { CATALOG, Constants.SCHEMA_MAIN, "T_2", "B", - "" + Types.INTEGER, "INTEGER", "10" }, + "" + Types.INTEGER, "INTEGER", "32" }, { CATALOG, Constants.SCHEMA_MAIN, "T_2", "A", - "" + Types.VARCHAR, "VARCHAR", "6" }, + "" + Types.VARCHAR, "CHARACTER VARYING", "6" }, { CATALOG, Constants.SCHEMA_MAIN, "T_2", "C", - "" + Types.INTEGER, "INTEGER", "10" }, }); + "" + Types.INTEGER, "INTEGER", "32" }, }); trace("getIndexInfo"); stat.executeUpdate("CREATE UNIQUE INDEX A_INDEX ON TX2(B,C,A)"); stat.executeUpdate("CREATE INDEX B_INDEX ON TX2(A,B,C)"); @@ -896,7 +999,8 @@ private void testMore() throws SQLException { "B", "A" }, { CATALOG, Constants.SCHEMA_MAIN, "TX2", "TRUE", CATALOG, "B_INDEX", "" + DatabaseMetaData.tableIndexOther, "3", - "C", "A" }, }); + "C", "A" }, }, + new int[] { 11 }); trace("getPrimaryKeys"); rs = meta.getPrimaryKeys(null, null, "T_2"); assertResultSetOrdered(rs, new String[][] { @@ -968,9 +1072,8 @@ private void testMore() throws SQLException { */ rs = meta.getSchemas(); - assertResultSetMeta(rs, 3, new String[] { "TABLE_SCHEM", - "TABLE_CATALOG", "IS_DEFAULT" }, new int[] { Types.VARCHAR, - Types.VARCHAR, Types.BOOLEAN }, null, null); + assertResultSetMeta(rs, 2, new String[] { "TABLE_SCHEM", "TABLE_CATALOG" }, + new int[] { Types.VARCHAR, Types.VARCHAR }, null, null); assertTrue(rs.next()); assertEquals("INFORMATION_SCHEMA", rs.getString(1)); assertTrue(rs.next()); @@ -978,9 +1081,8 @@ private void testMore() throws SQLException { assertFalse(rs.next()); rs = meta.getSchemas(null, null); - assertResultSetMeta(rs, 3, new String[] { "TABLE_SCHEM", - "TABLE_CATALOG", "IS_DEFAULT" }, new int[] { Types.VARCHAR, - Types.VARCHAR, Types.BOOLEAN }, null, null); + assertResultSetMeta(rs, 2, new String[] { "TABLE_SCHEM", "TABLE_CATALOG" }, + new int[] { Types.VARCHAR, Types.VARCHAR }, null, null); assertTrue(rs.next()); assertEquals("INFORMATION_SCHEMA", rs.getString(1)); assertTrue(rs.next()); @@ -996,8 +1098,8 @@ private void testMore() throws SQLException { assertResultSetMeta(rs, 1, new String[] { "TABLE_TYPE" }, new int[] { Types.VARCHAR }, null, null); assertResultSetOrdered(rs, new String[][] { - { "EXTERNAL" }, { "SYSTEM TABLE" }, - { "TABLE" }, { "TABLE LINK" }, { "VIEW" } }); + { "BASE TABLE" }, { "GLOBAL TEMPORARY" }, + { "LOCAL TEMPORARY" }, { "SYNONYM" }, { "VIEW" } }); rs = meta.getTypeInfo(); assertResultSetMeta(rs, 18, new String[] { "TYPE_NAME", "DATA_TYPE", @@ -1072,13 +1174,13 @@ private void testGeneral() throws SQLException { rs = meta.getTableTypes(); rs.next(); - assertEquals("EXTERNAL", rs.getString("TABLE_TYPE")); + assertEquals("BASE TABLE", rs.getString("TABLE_TYPE")); rs.next(); - assertEquals("SYSTEM TABLE", rs.getString("TABLE_TYPE")); + assertEquals("GLOBAL TEMPORARY", rs.getString("TABLE_TYPE")); rs.next(); - assertEquals("TABLE", rs.getString("TABLE_TYPE")); + assertEquals("LOCAL TEMPORARY", rs.getString("TABLE_TYPE")); rs.next(); - assertEquals("TABLE LINK", rs.getString("TABLE_TYPE")); + assertEquals("SYNONYM", rs.getString("TABLE_TYPE")); rs.next(); assertEquals("VIEW", rs.getString("TABLE_TYPE")); assertFalse(rs.next()); @@ -1090,74 +1192,18 @@ private void testGeneral() throws SQLException { assertEquals("TEST", rs.getString("TABLE_NAME")); assertFalse(rs.next()); - rs = meta.getTables(null, "INFORMATION_SCHEMA", - null, new String[] { "TABLE", "SYSTEM TABLE" }); - rs.next(); - assertEquals("CATALOGS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("COLLATIONS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("COLUMNS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("COLUMN_PRIVILEGES", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("CONSTANTS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("CONSTRAINTS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("CROSS_REFERENCES", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("DOMAINS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("FUNCTION_ALIASES", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("FUNCTION_COLUMNS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("HELP", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("INDEXES", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("IN_DOUBT", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("KEY_COLUMN_USAGE", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("LOCKS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("QUERY_STATISTICS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("REFERENTIAL_CONSTRAINTS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("RIGHTS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("ROLES", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("SCHEMATA", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("SEQUENCES", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("SESSIONS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("SESSION_STATE", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("SETTINGS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("SYNONYMS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("TABLES", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("TABLE_CONSTRAINTS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("TABLE_PRIVILEGES", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("TABLE_TYPES", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("TRIGGERS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("TYPE_INFO", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("USERS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("VIEWS", rs.getString("TABLE_NAME")); + rs = meta.getTables(null, "INFORMATION_SCHEMA", null, new String[] { "BASE TABLE", "VIEW" }); + for (String name : new String[] { "CONSTANTS", "ENUM_VALUES", + "INDEXES", "INDEX_COLUMNS", "INFORMATION_SCHEMA_CATALOG_NAME", "IN_DOUBT", "LOCKS", + "QUERY_STATISTICS", "RIGHTS", "ROLES", "SESSIONS", "SESSION_STATE", "SETTINGS", "SYNONYMS", + "USERS", "CHECK_CONSTRAINTS", "COLLATIONS", "COLUMNS", "COLUMN_PRIVILEGES", + "CONSTRAINT_COLUMN_USAGE", "DOMAINS", "DOMAIN_CONSTRAINTS", "ELEMENT_TYPES", "FIELDS", + "KEY_COLUMN_USAGE", "PARAMETERS", + "REFERENTIAL_CONSTRAINTS", "ROUTINES", "SCHEMATA", "SEQUENCES", "TABLES", "TABLE_CONSTRAINTS", + "TABLE_PRIVILEGES", "TRIGGERS", "VIEWS" }) { + rs.next(); + assertEquals(name, rs.getString("TABLE_NAME")); + } assertFalse(rs.next()); rs = meta.getColumns(null, null, "TEST", null); @@ -1201,11 +1247,18 @@ private void testGeneral() throws SQLException { stat.execute("DROP TABLE TEST"); rs = stat.executeQuery("SELECT * FROM INFORMATION_SCHEMA.SETTINGS"); + int mvStoreSettingsCount = 0, pageStoreSettingsCount = 0; while (rs.next()) { - String name = rs.getString("NAME"); - String value = rs.getString("VALUE"); - trace(name + "=" + value); + String name = rs.getString("SETTING_NAME"); + trace(name + '=' + rs.getString("SETTING_VALUE")); + if ("COMPRESS".equals(name) || "REUSE_SPACE".equals(name)) { + mvStoreSettingsCount++; + } else if (name.startsWith("PAGE_STORE_")) { + pageStoreSettingsCount++; + } } + assertEquals(2, mvStoreSettingsCount); + assertEquals(0, pageStoreSettingsCount); testMore(); @@ -1228,18 +1281,18 @@ private void testAllowLiteralsNone() throws SQLException { stat.execute("SET ALLOW_LITERALS NONE"); DatabaseMetaData meta = conn.getMetaData(); // meta.getAttributes(null, null, null, null); - meta.getBestRowIdentifier(null, null, null, 0, false); + meta.getBestRowIdentifier(null, null, "TEST", 0, false); meta.getCatalogs(); // meta.getClientInfoProperties(); - meta.getColumnPrivileges(null, null, null, null); + meta.getColumnPrivileges(null, null, "TEST", null); meta.getColumns(null, null, null, null); - meta.getCrossReference(null, null, null, null, null, null); - meta.getExportedKeys(null, null, null); + meta.getCrossReference(null, null, "TEST", null, null, "TEST"); + meta.getExportedKeys(null, null, "TEST"); // meta.getFunctionColumns(null, null, null, null); // meta.getFunctions(null, null, null); - meta.getImportedKeys(null, null, null); - meta.getIndexInfo(null, null, null, false, false); - meta.getPrimaryKeys(null, null, null); + meta.getImportedKeys(null, null, "TEST"); + meta.getIndexInfo(null, null, "TEST", false, false); + meta.getPrimaryKeys(null, null, "TEST"); meta.getProcedureColumns(null, null, null, null); meta.getProcedures(null, null, null); meta.getSchemas(); @@ -1283,32 +1336,6 @@ private void testClientInfo() throws SQLException { deleteDb("metaData"); } - private void testSessionsUncommitted() throws SQLException { - if (config.mvStore || config.memory) { - return; - } - Connection conn = getConnection("metaData"); - conn.setAutoCommit(false); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int)"); - stat.execute("begin transaction"); - for (int i = 0; i < 6; i++) { - stat.execute("insert into test values (1)"); - } - ResultSet rs = stat.executeQuery("select contains_uncommitted " + - "from INFORMATION_SCHEMA.SESSIONS"); - rs.next(); - assertEquals(true, rs.getBoolean(1)); - rs.close(); - stat.execute("commit"); - rs = stat.executeQuery("select contains_uncommitted " + - "from INFORMATION_SCHEMA.SESSIONS"); - rs.next(); - assertEquals(false, rs.getBoolean(1)); - conn.close(); - deleteDb("metaData"); - } - private void testQueryStatistics() throws SQLException { Connection conn = getConnection("metaData"); Statement stat = conn.createStatement(); diff --git a/h2/src/test/org/h2/test/jdbc/TestNativeSQL.java b/h2/src/test/org/h2/test/jdbc/TestNativeSQL.java index 0b6f3a9a2d..fd17319597 100644 --- a/h2/src/test/org/h2/test/jdbc/TestNativeSQL.java +++ b/h2/src/test/org/h2/test/jdbc/TestNativeSQL.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -70,7 +70,7 @@ public class TestNativeSQL extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/jdbc/TestPreparedStatement.java b/h2/src/test/org/h2/test/jdbc/TestPreparedStatement.java index 6a59765a8e..7bbe4026b3 100644 --- a/h2/src/test/org/h2/test/jdbc/TestPreparedStatement.java +++ b/h2/src/test/org/h2/test/jdbc/TestPreparedStatement.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -9,13 +9,14 @@ import java.io.IOException; import java.io.InputStream; import java.io.StringReader; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; import java.math.BigDecimal; import java.math.BigInteger; +import java.math.RoundingMode; import java.net.URL; +import java.sql.Array; import java.sql.Connection; import java.sql.Date; +import java.sql.JDBCType; import java.sql.ParameterMetaData; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -25,19 +26,26 @@ import java.sql.Statement; import java.sql.Timestamp; import java.sql.Types; +import java.time.Duration; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.time.Period; +import java.time.ZonedDateTime; import java.util.Calendar; import java.util.GregorianCalendar; +import java.util.TimeZone; import java.util.UUID; import org.h2.api.ErrorCode; +import org.h2.api.H2Type; import org.h2.api.Interval; import org.h2.api.IntervalQualifier; -import org.h2.api.Trigger; -import org.h2.engine.SysProperties; -import org.h2.message.DbException; import org.h2.test.TestBase; import org.h2.test.TestDb; -import org.h2.util.LocalDateTimeUtils; import org.h2.util.Task; /** @@ -47,107 +55,13 @@ public class TestPreparedStatement extends TestDb { private static final int LOB_SIZE = 4000, LOB_SIZE_BIG = 512 * 1024; - /** - * {@code java.time.LocalDate#parse(CharSequence)} or {@code null}. - */ - private static final Method LOCAL_DATE_PARSE; - - /** - * {@code java.time.LocalTime#parse(CharSequence)} or {@code null}. - */ - private static final Method LOCAL_TIME_PARSE; - - /** - * {@code java.time.LocalDateTime#parse(CharSequence)} or {@code null}. - */ - private static final Method LOCAL_DATE_TIME_PARSE; - - /** - * {@code java.time.OffsetDateTime#parse(CharSequence)} or {@code null}. - */ - private static final Method OFFSET_DATE_TIME_PARSE; - - static { - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - try { - LOCAL_DATE_PARSE = LocalDateTimeUtils.LOCAL_DATE.getMethod("parse", CharSequence.class); - LOCAL_TIME_PARSE = LocalDateTimeUtils.LOCAL_TIME.getMethod("parse", CharSequence.class); - LOCAL_DATE_TIME_PARSE = LocalDateTimeUtils.LOCAL_DATE_TIME.getMethod("parse", CharSequence.class); - OFFSET_DATE_TIME_PARSE = LocalDateTimeUtils.OFFSET_DATE_TIME.getMethod("parse", CharSequence.class); - } catch (NoSuchMethodException e) { - throw DbException.convert(e); - } - } else { - LOCAL_DATE_PARSE = null; - LOCAL_TIME_PARSE = null; - LOCAL_DATE_TIME_PARSE = null; - OFFSET_DATE_TIME_PARSE = null; - } - } - /** * Run just this test. * * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - /** - * Parses an ISO date string into a java.time.LocalDate. - * - * @param text the ISO date string - * @return the java.time.LocalDate instance - */ - public static Object parseLocalDate(CharSequence text) { - try { - return LOCAL_DATE_PARSE.invoke(null, text); - } catch (IllegalAccessException | InvocationTargetException e) { - throw new IllegalArgumentException("error when parsing text '" + text + "'", e); - } - } - - /** - * Parses an ISO time string into a java.time.LocalTime. - * - * @param text the ISO time string - * @return the java.time.LocalTime instance - */ - public static Object parseLocalTime(CharSequence text) { - try { - return LOCAL_TIME_PARSE.invoke(null, text); - } catch (IllegalAccessException | InvocationTargetException e) { - throw new IllegalArgumentException("error when parsing text '" + text + "'", e); - } - } - - /** - * Parses an ISO date string into a java.time.LocalDateTime. - * - * @param text the ISO date string - * @return the java.time.LocalDateTime instance - */ - public static Object parseLocalDateTime(CharSequence text) { - try { - return LOCAL_DATE_TIME_PARSE.invoke(null, text); - } catch (IllegalAccessException | InvocationTargetException e) { - throw new IllegalArgumentException("error when parsing text '" + text + "'", e); - } - } - - /** - * Parses an ISO date string into a java.time.OffsetDateTime. - * - * @param text the ISO date string - * @return the java.time.OffsetDateTime instance - */ - public static Object parseOffsetDateTime(CharSequence text) { - try { - return OFFSET_DATE_TIME_PARSE.invoke(null, text); - } catch (IllegalAccessException | InvocationTargetException e) { - throw new IllegalArgumentException("error when parsing text '" + text + "'", e); - } + TestBase.createCaller().init().testFromMain(); } @Override @@ -165,7 +79,6 @@ public void test() throws Exception { testEnum(conn); testUUID(conn); testUUIDAsJavaObject(conn); - testScopedGeneratedKey(conn); testLobTempFiles(conn); testExecuteErrorTwice(conn); testTempView(conn); @@ -176,16 +89,21 @@ public void test() throws Exception { testCancelReuse(conn); testCoalesce(conn); testPreparedStatementMetaData(conn); + testBigDecimal(conn); testDate(conn); testDate8(conn); testTime8(conn); + testOffsetTime8(conn); testDateTime8(conn); testOffsetDateTime8(conn); + testZonedDateTime8(conn); testInstant8(conn); testInterval(conn); testInterval8(conn); + testJson(conn); testArray(conn); testSetObject(conn); + testSetObject2(conn); testPreparedSubquery(conn); testLikeIndex(conn); testCasewhen(conn); @@ -198,8 +116,8 @@ public void test() throws Exception { testParameterMetaData(conn); testColumnMetaDataWithEquals(conn); testColumnMetaDataWithIn(conn); - testValueResultSet(conn); testMultipleStatements(conn); + testAfterRollback(conn); conn.close(); testPreparedStatementWithLiteralsNone(); testPreparedStatementWithIndexedParameterAndLiteralsNone(); @@ -272,7 +190,7 @@ private static void testChangeType(Connection conn) throws SQLException { } private static void testCallTablePrepared(Connection conn) throws SQLException { - PreparedStatement prep = conn.prepareStatement("call table(x int = (1))"); + PreparedStatement prep = conn.prepareStatement("select * from table(x int = (1))"); prep.executeQuery(); prep.executeQuery(); } @@ -407,7 +325,7 @@ private void testInsertFunction(Connection conn) throws SQLException { PreparedStatement prep; ResultSet rs; - stat.execute("CREATE TABLE TEST(ID INT, H BINARY)"); + stat.execute("CREATE TABLE TEST(ID INT, H VARBINARY)"); prep = conn.prepareStatement("INSERT INTO TEST " + "VALUES(?, HASH('SHA256', STRINGTOUTF8(?), 5))"); prep.setInt(1, 1); @@ -477,6 +395,8 @@ private void testMaxRowsChange(Connection conn) throws SQLException { private void testUnknownDataType(Connection conn) throws SQLException { assertThrows(ErrorCode.UNKNOWN_DATA_TYPE_1, conn). prepareStatement("SELECT * FROM (SELECT ? FROM DUAL)"); + assertThrows(ErrorCode.UNKNOWN_DATA_TYPE_1, conn). + prepareStatement("VALUES BITAND(?, ?)"); PreparedStatement prep = conn.prepareStatement("SELECT -?"); prep.setInt(1, 1); execute(prep); @@ -488,7 +408,7 @@ private void testUnknownDataType(Connection conn) throws SQLException { private void testCancelReuse(Connection conn) throws Exception { conn.createStatement().execute( - "CREATE ALIAS SLEEP FOR \"java.lang.Thread.sleep\""); + "CREATE ALIAS SLEEP FOR 'java.lang.Thread.sleep'"); // sleep for 10 seconds final PreparedStatement prep = conn.prepareStatement( "SELECT SLEEP(?) FROM SYSTEM_RANGE(1, 10000) LIMIT ?"); @@ -532,11 +452,15 @@ private void testPreparedStatementMetaData(Connection conn) ResultSetMetaData meta = prep.getMetaData(); assertEquals(2, meta.getColumnCount()); assertEquals("INTEGER", meta.getColumnTypeName(1)); - assertEquals("VARCHAR", meta.getColumnTypeName(2)); + assertEquals("CHARACTER VARYING", meta.getColumnTypeName(2)); prep = conn.prepareStatement("call 1"); meta = prep.getMetaData(); assertEquals(1, meta.getColumnCount()); assertEquals("INTEGER", meta.getColumnTypeName(1)); + prep = conn.prepareStatement("SELECT * FROM UNNEST(ARRAY[1, 2])"); + meta = prep.getMetaData(); + assertEquals(1, meta.getColumnCount()); + assertEquals("INTEGER", meta.getColumnTypeName(1)); } private void testArray(Connection conn) throws SQLException { @@ -574,7 +498,7 @@ private void testEnum(Connection conn) throws SQLException { rs.next(); } assertEquals(goodSizes[i], rs.getString(1)); - assertEquals(i, rs.getInt(1)); + assertEquals(i + 1, rs.getInt(1)); Object o = rs.getObject(1); assertEquals(String.class, o.getClass()); } @@ -592,7 +516,7 @@ private void testEnum(Connection conn) throws SQLException { for (int i = 0; i < badSizes.length; i++) { PreparedStatement prep = conn.prepareStatement("SELECT * FROM test_enum WHERE size = ?"); prep.setObject(1, badSizes[i]); - if (config.lazy) { + if (config.lazy && !config.networked) { ResultSet resultSet = prep.executeQuery(); assertThrows(ErrorCode.ENUM_VALUE_NOT_PERMITTED, resultSet).next(); } else { @@ -641,59 +565,6 @@ private void testUUIDAsJavaObject(Connection conn) throws SQLException { stat.execute("drop table test_uuid"); } - /** - * A trigger that creates a sequence value. - */ - public static class SequenceTrigger implements Trigger { - - @Override - public void fire(Connection conn, Object[] oldRow, Object[] newRow) - throws SQLException { - conn.setAutoCommit(false); - conn.createStatement().execute("call next value for seq"); - } - - @Override - public void init(Connection conn, String schemaName, - String triggerName, String tableName, boolean before, int type) { - // ignore - } - - @Override - public void close() { - // ignore - } - - @Override - public void remove() { - // ignore - } - - } - - private void testScopedGeneratedKey(Connection conn) throws SQLException { - Statement stat = conn.createStatement(); - stat.execute("create table test(id identity)"); - stat.execute("create sequence seq start with 1000"); - stat.execute("create trigger test_ins after insert on test call \"" + - SequenceTrigger.class.getName() + "\""); - stat.execute("insert into test values(null)", Statement.RETURN_GENERATED_KEYS); - ResultSet rs = stat.getGeneratedKeys(); - rs.next(); - // Generated key - assertEquals(1, rs.getLong(1)); - stat.execute("insert into test values(100)"); - rs = stat.getGeneratedKeys(); - // No generated keys - assertFalse(rs.next()); - // Value from sequence from trigger - rs = stat.executeQuery("select scope_identity()"); - rs.next(); - assertEquals(100, rs.getLong(1)); - stat.execute("drop sequence seq"); - stat.execute("drop table test"); - } - private void testSetObject(Connection conn) throws SQLException { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST(C CHAR(1))"); @@ -702,15 +573,15 @@ private void testSetObject(Connection conn) throws SQLException { prep.setObject(1, 'x'); prep.execute(); stat.execute("DROP TABLE TEST"); - stat.execute("CREATE TABLE TEST(ID INT, DATA BINARY, JAVA OTHER)"); + stat.execute("CREATE TABLE TEST(ID INT, DATA VARBINARY, JAVA OTHER)"); prep = conn.prepareStatement("INSERT INTO TEST VALUES(?, ?, ?)"); prep.setInt(1, 1); prep.setObject(2, 11); prep.setObject(3, null); prep.execute(); prep.setInt(1, 2); - prep.setObject(2, 101, Types.OTHER); - prep.setObject(3, 103, Types.OTHER); + prep.setObject(2, 101, Types.JAVA_OBJECT); + prep.setObject(3, 103, Types.JAVA_OBJECT); prep.execute(); PreparedStatement p2 = conn.prepareStatement( "SELECT * FROM TEST ORDER BY ID"); @@ -729,6 +600,71 @@ private void testSetObject(Connection conn) throws SQLException { stat.execute("DROP TABLE TEST"); } + private void testSetObject2(Connection conn) throws SQLException { + try (PreparedStatement prep = conn.prepareStatement("VALUES (?1, ?1 IS OF(INTEGER), ?1 IS OF(BIGINT))")) { + for (int i = 1; i <= 6; i++) { + testSetObject2SetObjectType(prep, i, (long) i); + try (ResultSet rs = prep.executeQuery()) { + rs.next(); + // Parameters are converted to VARCHAR by a query + assertEquals(Integer.toString(i), rs.getString(1)); + // Use the type predicate to check a real data type + if (i == 1) { + assertFalse(rs.getBoolean(2)); + assertTrue(rs.getBoolean(3)); + } else { + assertTrue(rs.getBoolean(2)); + assertFalse(rs.getBoolean(3)); + } + } + testSetObject2SetObjectType(prep, i, null); + try (ResultSet rs = prep.executeQuery()) { + rs.next(); + assertNull(rs.getObject(1)); + } + } + prep.setObject(1, 1); + } + } + + private static void testSetObject2SetObjectType(PreparedStatement prep, int method, Object value) + throws SQLException { + switch (method) { + case 1: + prep.setObject(1, value); + break; + case 2: + prep.setObject(1, value, Types.INTEGER); + break; + case 3: + prep.setObject(1, value, JDBCType.INTEGER); + break; + case 4: + prep.setObject(1, value, Types.INTEGER, 0); + break; + case 5: + prep.setObject(1, value, JDBCType.INTEGER, 0); + break; + case 6: + prep.setObject(1, value, H2Type.INTEGER, 0); + } + } + + private void testBigDecimal(Connection conn) throws SQLException { + PreparedStatement prep = conn.prepareStatement("SELECT ?, ?"); + BigDecimal bd = new BigDecimal("12300").setScale(-2, RoundingMode.UNNECESSARY); + prep.setBigDecimal(1, bd); + prep.setObject(2, bd); + ResultSet rs = prep.executeQuery(); + rs.next(); + bd = rs.getBigDecimal(1); + assertEquals(12300, bd.intValue()); + assertEquals(0, bd.scale()); + bd = rs.getBigDecimal(2); + assertEquals(12300, bd.intValue()); + assertEquals(0, bd.scale()); + } + private void testDate(Connection conn) throws SQLException { PreparedStatement prep = conn.prepareStatement("SELECT ?"); Timestamp ts = Timestamp.valueOf("2001-02-03 04:05:06"); @@ -740,169 +676,181 @@ private void testDate(Connection conn) throws SQLException { } private void testDate8(Connection conn) throws SQLException { - if (!LocalDateTimeUtils.isJava8DateApiPresent()) { - return; - } PreparedStatement prep = conn.prepareStatement("SELECT ?"); - Object localDate = parseLocalDate("2001-02-03"); + LocalDate localDate = LocalDate.parse("2001-02-03"); prep.setObject(1, localDate); ResultSet rs = prep.executeQuery(); rs.next(); - Object localDate2 = rs.getObject(1, LocalDateTimeUtils.LOCAL_DATE); + LocalDate localDate2 = rs.getObject(1, LocalDate.class); assertEquals(localDate, localDate2); rs.close(); - localDate = parseLocalDate("-0509-01-01"); + localDate = LocalDate.parse("-0509-01-01"); prep.setObject(1, localDate); rs = prep.executeQuery(); rs.next(); - localDate2 = rs.getObject(1, LocalDateTimeUtils.LOCAL_DATE); + localDate2 = rs.getObject(1, LocalDate.class); assertEquals(localDate, localDate2); rs.close(); - /* - * Check that date that doesn't exist in proleptic Gregorian calendar can be - * read as a next date. - */ - prep.setString(1, "1500-02-29"); - rs = prep.executeQuery(); - rs.next(); - localDate2 = rs.getObject(1, LocalDateTimeUtils.LOCAL_DATE); - assertEquals(parseLocalDate("1500-03-01"), localDate2); - rs.close(); - prep.setString(1, "1400-02-29"); - rs = prep.executeQuery(); - rs.next(); - localDate2 = rs.getObject(1, LocalDateTimeUtils.LOCAL_DATE); - assertEquals(parseLocalDate("1400-03-01"), localDate2); - rs.close(); - prep.setString(1, "1300-02-29"); + prep.setString(1, "1500-02-28"); rs = prep.executeQuery(); rs.next(); - localDate2 = rs.getObject(1, LocalDateTimeUtils.LOCAL_DATE); - assertEquals(parseLocalDate("1300-03-01"), localDate2); + localDate2 = rs.getObject(1, LocalDate.class); + assertEquals(LocalDate.parse("1500-02-28"), localDate2); rs.close(); - prep.setString(1, "-0100-02-29"); + prep.setString(1, "-0100-02-28"); rs = prep.executeQuery(); rs.next(); - localDate2 = rs.getObject(1, LocalDateTimeUtils.LOCAL_DATE); - assertEquals(parseLocalDate("-0100-03-01"), localDate2); + localDate2 = rs.getObject(1, LocalDate.class); + assertEquals(LocalDate.parse("-0100-02-28"), localDate2); rs.close(); /* - * Check that date that doesn't exist in traditional calendar can be set and - * read with LocalDate and can be read with getDate() as a next date. + * Test dates during Julian to Gregorian transition. + * + * java.util.TimeZone doesn't support LMT, so perform this test with + * fixed time zone offset */ - localDate = parseLocalDate("1582-10-05"); - prep.setObject(1, localDate); - rs = prep.executeQuery(); - rs.next(); - localDate2 = rs.getObject(1, LocalDateTimeUtils.LOCAL_DATE); - assertEquals(localDate, localDate2); - assertEquals("1582-10-05", rs.getString(1)); - assertEquals(Date.valueOf("1582-10-15"), rs.getDate(1)); - /* - * Also check that date that doesn't exist in traditional calendar can be read - * with getDate() with custom Calendar properly. - */ - GregorianCalendar gc = new GregorianCalendar(); - gc.setGregorianChange(new java.util.Date(Long.MIN_VALUE)); - gc.clear(); - gc.set(Calendar.YEAR, 1582); - gc.set(Calendar.MONTH, 9); - gc.set(Calendar.DAY_OF_MONTH, 5); - Date expected = new Date(gc.getTimeInMillis()); - gc.clear(); - assertEquals(expected, rs.getDate(1, gc)); - rs.close(); + Statement stat = conn.createStatement(); + stat.execute("SET TIME ZONE '1'"); + TimeZone old = TimeZone.getDefault(); + TimeZone.setDefault(TimeZone.getTimeZone("GMT+01")); + try { + localDate = LocalDate.parse("1582-10-05"); + prep.setObject(1, localDate); + rs = prep.executeQuery(); + rs.next(); + localDate2 = rs.getObject(1, LocalDate.class); + assertEquals(localDate, localDate2); + assertEquals("1582-10-05", rs.getString(1)); + assertEquals(Date.valueOf("1582-09-25"), rs.getDate(1)); + GregorianCalendar gc = new GregorianCalendar(); + gc.setGregorianChange(new java.util.Date(Long.MIN_VALUE)); + gc.clear(); + gc.set(Calendar.YEAR, 1582); + gc.set(Calendar.MONTH, 9); + gc.set(Calendar.DAY_OF_MONTH, 5); + Date expected = new Date(gc.getTimeInMillis()); + gc.clear(); + assertEquals(expected, rs.getDate(1, gc)); + rs.close(); + } finally { + stat.execute("SET TIME ZONE LOCAL"); + TimeZone.setDefault(old); + } } private void testTime8(Connection conn) throws SQLException { - if (!LocalDateTimeUtils.isJava8DateApiPresent()) { - return; - } PreparedStatement prep = conn.prepareStatement("SELECT ?"); - Object localTime = parseLocalTime("04:05:06"); + LocalTime localTime = LocalTime.parse("04:05:06"); prep.setObject(1, localTime); ResultSet rs = prep.executeQuery(); rs.next(); - Object localTime2 = rs.getObject(1, LocalDateTimeUtils.LOCAL_TIME); + LocalTime localTime2 = rs.getObject(1, LocalTime.class); assertEquals(localTime, localTime2); rs.close(); - localTime = parseLocalTime("04:05:06.123456789"); + localTime = LocalTime.parse("04:05:06.123456789"); prep.setObject(1, localTime); rs = prep.executeQuery(); rs.next(); - localTime2 = rs.getObject(1, LocalDateTimeUtils.LOCAL_TIME); + localTime2 = rs.getObject(1, LocalTime.class); assertEquals(localTime, localTime2); rs.close(); } + private void testOffsetTime8(Connection conn) throws SQLException { + PreparedStatement prep = conn.prepareStatement("SELECT ?"); + OffsetTime offsetTime = OffsetTime.parse("04:05:06+02:30"); + prep.setObject(1, offsetTime); + ResultSet rs = prep.executeQuery(); + rs.next(); + OffsetTime offsetTime2 = rs.getObject(1, OffsetTime.class); + assertEquals(offsetTime, offsetTime2); + assertFalse(rs.next()); + rs.close(); + + prep.setObject(1, offsetTime, Types.TIME_WITH_TIMEZONE); + rs = prep.executeQuery(); + rs.next(); + offsetTime2 = rs.getObject(1, OffsetTime.class); + assertEquals(offsetTime, offsetTime2); + assertFalse(rs.next()); + rs.close(); + } + private void testDateTime8(Connection conn) throws SQLException { - if (!LocalDateTimeUtils.isJava8DateApiPresent()) { - return; - } PreparedStatement prep = conn.prepareStatement("SELECT ?"); - Object localDateTime = parseLocalDateTime("2001-02-03T04:05:06"); + LocalDateTime localDateTime = LocalDateTime.parse("2001-02-03T04:05:06"); prep.setObject(1, localDateTime); ResultSet rs = prep.executeQuery(); rs.next(); - Object localDateTime2 = rs.getObject(1, LocalDateTimeUtils.LOCAL_DATE_TIME); + LocalDateTime localDateTime2 = rs.getObject(1, LocalDateTime.class); assertEquals(localDateTime, localDateTime2); rs.close(); } private void testOffsetDateTime8(Connection conn) throws SQLException { - if (!LocalDateTimeUtils.isJava8DateApiPresent()) { - return; - } PreparedStatement prep = conn.prepareStatement("SELECT ?"); - Object offsetDateTime = parseOffsetDateTime("2001-02-03T04:05:06+02:30"); + OffsetDateTime offsetDateTime = OffsetDateTime.parse("2001-02-03T04:05:06+02:30"); prep.setObject(1, offsetDateTime); ResultSet rs = prep.executeQuery(); rs.next(); - Object offsetDateTime2 = rs.getObject(1, LocalDateTimeUtils.OFFSET_DATE_TIME); + OffsetDateTime offsetDateTime2 = rs.getObject(1, OffsetDateTime.class); assertEquals(offsetDateTime, offsetDateTime2); assertFalse(rs.next()); rs.close(); - prep.setObject(1, offsetDateTime, 2014); // Types.TIMESTAMP_WITH_TIMEZONE + prep.setObject(1, offsetDateTime, Types.TIMESTAMP_WITH_TIMEZONE); rs = prep.executeQuery(); rs.next(); - offsetDateTime2 = rs.getObject(1, LocalDateTimeUtils.OFFSET_DATE_TIME); + offsetDateTime2 = rs.getObject(1, OffsetDateTime.class); assertEquals(offsetDateTime, offsetDateTime2); + // Check default mapping + rs.getObject(1); assertFalse(rs.next()); rs.close(); } - private void testInstant8(Connection conn) throws Exception { - if (!LocalDateTimeUtils.isJava8DateApiPresent()) { - return; - } - Method timestampToInstant = Timestamp.class.getMethod("toInstant"); - Method now = LocalDateTimeUtils.INSTANT.getMethod("now"); - Method parse = LocalDateTimeUtils.INSTANT.getMethod("parse", CharSequence.class); - + private void testZonedDateTime8(Connection conn) throws SQLException { PreparedStatement prep = conn.prepareStatement("SELECT ?"); + ZonedDateTime zonedDateTime = ZonedDateTime.parse("2001-02-03T04:05:06+02:30"); + prep.setObject(1, zonedDateTime); + ResultSet rs = prep.executeQuery(); + rs.next(); + ZonedDateTime zonedDateTime2 = rs.getObject(1, ZonedDateTime.class); + assertEquals(zonedDateTime, zonedDateTime2); + assertFalse(rs.next()); + rs.close(); - testInstant8Impl(prep, timestampToInstant, now.invoke(null)); - testInstant8Impl(prep, timestampToInstant, parse.invoke(null, "2000-01-15T12:13:14.123456789Z")); - testInstant8Impl(prep, timestampToInstant, parse.invoke(null, "1500-09-10T23:22:11.123456789Z")); + prep.setObject(1, zonedDateTime, Types.TIMESTAMP_WITH_TIMEZONE); + rs = prep.executeQuery(); + rs.next(); + zonedDateTime2 = rs.getObject(1, ZonedDateTime.class); + assertEquals(zonedDateTime, zonedDateTime2); + assertFalse(rs.next()); + rs.close(); } - private void testInstant8Impl(PreparedStatement prep, Method timestampToInstant, Object instant) - throws SQLException, IllegalAccessException, InvocationTargetException { + private void testInstant8(Connection conn) throws Exception { + PreparedStatement prep = conn.prepareStatement("SELECT ?"); + testInstant8Impl(prep, Instant.now()); + testInstant8Impl(prep, Instant.parse("2000-01-15T12:13:14.123456789Z")); + testInstant8Impl(prep, Instant.parse("1500-09-10T23:22:11.123456789Z")); + } + + private void testInstant8Impl(PreparedStatement prep, Instant instant) throws SQLException { prep.setObject(1, instant); ResultSet rs = prep.executeQuery(); rs.next(); - Object instant2 = rs.getObject(1, LocalDateTimeUtils.INSTANT); + Instant instant2 = rs.getObject(1, Instant.class); assertEquals(instant, instant2); Timestamp ts = rs.getTimestamp(1); - assertEquals(instant, timestampToInstant.invoke(ts)); + assertEquals(instant, ts.toInstant()); assertFalse(rs.next()); rs.close(); prep.setTimestamp(1, ts); rs = prep.executeQuery(); rs.next(); - instant2 = rs.getObject(1, LocalDateTimeUtils.INSTANT); + instant2 = rs.getObject(1, Instant.class); assertEquals(instant, instant2); assertFalse(rs.next()); rs.close(); @@ -920,9 +868,6 @@ private void testInterval(Connection conn) throws SQLException { } private void testInterval8(Connection conn) throws SQLException { - if (!LocalDateTimeUtils.isJava8DateApiPresent()) { - return; - } PreparedStatement prep = conn.prepareStatement("SELECT ?"); testPeriod8(prep, 1, 2, "INTERVAL '1-2' YEAR TO MONTH"); testPeriod8(prep, -1, -2, "INTERVAL '-1-2' YEAR TO MONTH"); @@ -933,26 +878,14 @@ private void testInterval8(Connection conn) throws SQLException { testPeriod8(prep, -100, 0, "INTERVAL '-100' YEAR"); testPeriod8(prep, 0, 100, "INTERVAL '100' MONTH"); testPeriod8(prep, 0, -100, "INTERVAL '-100' MONTH"); - Object period; - try { - Method method = LocalDateTimeUtils.PERIOD.getMethod("of", int.class, int.class, int.class); - period = method.invoke(null, 0, 0, 1); - } catch (ReflectiveOperationException ex) { - throw new RuntimeException(ex); - } + Period period = Period.of(0, 0, 1); assertThrows(ErrorCode.INVALID_VALUE_2, prep).setObject(1, period); - Object duration; - try { - duration = LocalDateTimeUtils.DURATION.getMethod("ofSeconds", long.class, long.class) - .invoke(null, -4, 900_000_000); - } catch (ReflectiveOperationException ex) { - throw new RuntimeException(ex); - } + Duration duration = Duration.ofSeconds(-4, 900_000_000); prep.setObject(1, duration); ResultSet rs = prep.executeQuery(); rs.next(); assertEquals("INTERVAL '-3.1' SECOND", rs.getString(1)); - assertEquals(duration, rs.getObject(1, LocalDateTimeUtils.DURATION)); + assertEquals(duration, rs.getObject(1, Duration.class)); } private void testPeriod8(PreparedStatement prep, int years, int months, String expectedString) @@ -962,19 +895,34 @@ private void testPeriod8(PreparedStatement prep, int years, int months, String e private void testPeriod8(PreparedStatement prep, int years, int months, String expectedString, int expYears, int expMonths) throws SQLException { - Object period, expectedPeriod; - try { - Method method = LocalDateTimeUtils.PERIOD.getMethod("of", int.class, int.class, int.class); - period = method.invoke(null, years, months, 0); - expectedPeriod = method.invoke(null, expYears, expMonths, 0); - } catch (ReflectiveOperationException ex) { - throw new RuntimeException(ex); - } + Period period = Period.of(years, months, 0); + Period expectedPeriod = Period.of(expYears, expMonths, 0); prep.setObject(1, period); ResultSet rs = prep.executeQuery(); rs.next(); assertEquals(expectedString, rs.getString(1)); - assertEquals(expectedPeriod, rs.getObject(1, LocalDateTimeUtils.PERIOD)); + assertEquals(expectedPeriod, rs.getObject(1, Period.class)); + } + + private void testJson(Connection conn) throws SQLException { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(ID BIGINT, J JSON)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST VALUES (?, ?)"); + prep.setInt(1, 1); + prep.setString(2, "[1]"); + prep.executeUpdate(); + prep = conn.prepareStatement("INSERT INTO TEST VALUES (?, ? FORMAT JSON)"); + prep.setInt(1, 2); + prep.setString(2, "[1]"); + prep.executeUpdate(); + try (ResultSet rs = stat.executeQuery("SELECT J FROM TEST ORDER BY ID")) { + assertTrue(rs.next()); + assertEquals("\"[1]\"", rs.getString(1)); + assertTrue(rs.next()); + assertEquals("[1]", rs.getString(1)); + assertFalse(rs.next()); + } + stat.execute("DROP TABLE TEST"); } private void testPreparedSubquery(Connection conn) throws SQLException { @@ -1013,19 +961,10 @@ private void testPreparedSubquery(Connection conn) throws SQLException { } private void testParameterMetaData(Connection conn) throws SQLException { - int numericType; - String numericName; - if (SysProperties.BIG_DECIMAL_IS_DECIMAL) { - numericType = Types.DECIMAL; - numericName = "DECIMAL"; - } else { - numericType = Types.NUMERIC; - numericName = "NUMERIC"; - } PreparedStatement prep = conn.prepareStatement("SELECT ?, ?, ? FROM DUAL"); ParameterMetaData pm = prep.getParameterMetaData(); assertEquals("java.lang.String", pm.getParameterClassName(1)); - assertEquals("VARCHAR", pm.getParameterTypeName(1)); + assertEquals("CHARACTER VARYING", pm.getParameterTypeName(1)); assertEquals(3, pm.getParameterCount()); assertEquals(ParameterMetaData.parameterModeIn, pm.getParameterMode(1)); assertEquals(Types.VARCHAR, pm.getParameterType(1)); @@ -1040,22 +979,25 @@ private void testParameterMetaData(Connection conn) throws SQLException { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST3(ID INT, " + - "NAME VARCHAR(255), DATA DECIMAL(10,2))"); + "NAME VARCHAR(255), DATA1 DECIMAL(10,2), DATA2 NUMERIC(10,2))"); PreparedStatement prep1 = conn.prepareStatement( - "UPDATE TEST3 SET ID=?, NAME=?, DATA=?"); + "UPDATE TEST3 SET ID=?, NAME=?, DATA1=?, DATA2=?"); PreparedStatement prep2 = conn.prepareStatement( - "INSERT INTO TEST3 VALUES(?, ?, ?)"); - checkParameter(prep1, 1, "java.lang.Integer", 4, "INTEGER", 10, 0); - checkParameter(prep1, 2, "java.lang.String", 12, "VARCHAR", 255, 0); - checkParameter(prep1, 3, "java.math.BigDecimal", numericType, numericName, 10, 2); - checkParameter(prep2, 1, "java.lang.Integer", 4, "INTEGER", 10, 0); - checkParameter(prep2, 2, "java.lang.String", 12, "VARCHAR", 255, 0); - checkParameter(prep2, 3, "java.math.BigDecimal", numericType, numericName, 10, 2); + "INSERT INTO TEST3 VALUES(?, ?, ?, ?)"); + checkParameter(prep1, 1, "java.lang.Integer", 4, "INTEGER", 32, 0); + checkParameter(prep1, 2, "java.lang.String", 12, "CHARACTER VARYING", 255, 0); + checkParameter(prep1, 3, "java.math.BigDecimal", Types.DECIMAL, "DECIMAL", 10, 2); + checkParameter(prep1, 4, "java.math.BigDecimal", Types.NUMERIC, "NUMERIC", 10, 2); + checkParameter(prep2, 1, "java.lang.Integer", 4, "INTEGER", 32, 0); + checkParameter(prep2, 2, "java.lang.String", 12, "CHARACTER VARYING", 255, 0); + checkParameter(prep2, 3, "java.math.BigDecimal", Types.DECIMAL, "DECIMAL", 10, 2); + checkParameter(prep2, 4, "java.math.BigDecimal", Types.NUMERIC, "NUMERIC", 10, 2); PreparedStatement prep3 = conn.prepareStatement( - "SELECT * FROM TEST3 WHERE ID=? AND NAME LIKE ? AND ?>DATA"); - checkParameter(prep3, 1, "java.lang.Integer", 4, "INTEGER", 10, 0); - checkParameter(prep3, 2, "java.lang.String", 12, "VARCHAR", 0, 0); - checkParameter(prep3, 3, "java.math.BigDecimal", numericType, numericName, 10, 2); + "SELECT * FROM TEST3 WHERE ID=? AND NAME LIKE ? AND ?>DATA1 AND ?>DATA2"); + checkParameter(prep3, 1, "java.lang.Integer", 4, "INTEGER", 32, 0); + checkParameter(prep3, 2, "java.lang.String", 12, "CHARACTER VARYING", 0, 0); + checkParameter(prep3, 3, "java.math.BigDecimal", Types.DECIMAL, "DECIMAL", 10, 2); + checkParameter(prep3, 4, "java.math.BigDecimal", Types.NUMERIC, "NUMERIC", 10, 2); stat.execute("DROP TABLE TEST3"); } @@ -1072,9 +1014,9 @@ private void checkParameter(PreparedStatement prep, int index, private void testLikeIndex(Connection conn) throws SQLException { Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255))"); - stat.execute("INSERT INTO TEST VALUES(1, 'Hello')"); - stat.execute("INSERT INTO TEST VALUES(2, 'World')"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, V INT, NAME VARCHAR(255))"); + stat.execute("INSERT INTO TEST VALUES(1, 2, 'Hello')"); + stat.execute("INSERT INTO TEST VALUES(2, 4, 'World')"); stat.execute("create index idxname on test(name);"); PreparedStatement prep, prepExe; @@ -1091,7 +1033,7 @@ private void testLikeIndex(Connection conn) throws SQLException { assertContains(plan, ".tableScan"); rs = prepExe.executeQuery(); rs.next(); - assertEquals("World", rs.getString(2)); + assertEquals("World", rs.getString(3)); assertFalse(rs.next()); prep.setString(1, "H%"); @@ -1102,7 +1044,7 @@ private void testLikeIndex(Connection conn) throws SQLException { assertContains(plan1, "IDXNAME"); rs = prepExe.executeQuery(); rs.next(); - assertEquals("Hello", rs.getString(2)); + assertEquals("Hello", rs.getString(3)); assertFalse(rs.next()); stat.execute("DROP TABLE IF EXISTS TEST"); @@ -1197,17 +1139,17 @@ private void testDataTypes(Connection conn) throws SQLException { ResultSet rs; trace("Create tables"); stat.execute("CREATE TABLE T_INT" + - "(ID INT PRIMARY KEY,VALUE INT)"); + "(ID INT PRIMARY KEY,V INT)"); stat.execute("CREATE TABLE T_VARCHAR" + - "(ID INT PRIMARY KEY,VALUE VARCHAR(255))"); + "(ID INT PRIMARY KEY,V VARCHAR(255))"); stat.execute("CREATE TABLE T_DECIMAL_0" + - "(ID INT PRIMARY KEY,VALUE DECIMAL(30,0))"); + "(ID INT PRIMARY KEY,V DECIMAL(30,0))"); stat.execute("CREATE TABLE T_DECIMAL_10" + - "(ID INT PRIMARY KEY,VALUE DECIMAL(20,10))"); + "(ID INT PRIMARY KEY,V DECIMAL(20,10))"); stat.execute("CREATE TABLE T_DATETIME" + - "(ID INT PRIMARY KEY,VALUE DATETIME)"); + "(ID INT PRIMARY KEY,V DATETIME)"); stat.execute("CREATE TABLE T_BIGINT" + - "(ID INT PRIMARY KEY,VALUE DECIMAL(30,0))"); + "(ID INT PRIMARY KEY,V DECIMAL(30,0))"); prep = conn.prepareStatement("INSERT INTO T_INT VALUES(?,?)", ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); prep.setInt(1, 1); @@ -1301,7 +1243,7 @@ private void testDataTypes(Connection conn) throws SQLException { prep.setFloat(2, -40); prep.executeUpdate(); - rs = stat.executeQuery("SELECT VALUE FROM T_DECIMAL_0 ORDER BY ID"); + rs = stat.executeQuery("SELECT V FROM T_DECIMAL_0 ORDER BY ID"); checkBigDecimal(rs, new String[] { "" + Long.MAX_VALUE, "" + Long.MIN_VALUE, "10", "-20", "30", "-40" }); prep = conn.prepareStatement("INSERT INTO T_BIGINT VALUES(?,?)"); @@ -1327,7 +1269,7 @@ private void testDataTypes(Connection conn) throws SQLException { prep.setObject(2, new BigInteger("-60")); prep.executeUpdate(); - rs = stat.executeQuery("SELECT VALUE FROM T_BIGINT ORDER BY ID"); + rs = stat.executeQuery("SELECT V FROM T_BIGINT ORDER BY ID"); checkBigDecimal(rs, new String[] { "" + Long.MAX_VALUE, "" + Long.MIN_VALUE, "10", "-20", "30", "-40", "-60" }); } @@ -1389,13 +1331,13 @@ private void testObject(Connection conn) throws SQLException { prep.setObject(13, new java.util.Date(java.sql.Date.valueOf( "2001-02-03").getTime())); prep.setObject(14, new byte[] { 10, 20, 30 }); - prep.setObject(15, 'a', Types.OTHER); + prep.setObject(15, 'a', Types.JAVA_OBJECT); prep.setObject(16, "2001-01-02", Types.DATE); // converting to null seems strange... prep.setObject(17, "2001-01-02", Types.NULL); prep.setObject(18, "3.725", Types.DOUBLE); prep.setObject(19, "23:22:21", Types.TIME); - prep.setObject(20, new java.math.BigInteger("12345"), Types.OTHER); + prep.setObject(20, new java.math.BigInteger("12345"), Types.JAVA_OBJECT); prep.setArray(21, conn.createArrayOf("TINYINT", new Object[] {(byte) 1})); prep.setArray(22, conn.createArrayOf("SMALLINT", new Object[] {(short) -2})); rs = prep.executeQuery(); @@ -1403,10 +1345,8 @@ private void testObject(Connection conn) throws SQLException { assertTrue(rs.getObject(1).equals(Boolean.TRUE)); assertTrue(rs.getObject(2).equals("Abc")); assertTrue(rs.getObject(3).equals(new BigDecimal("10.2"))); - assertTrue(rs.getObject(4).equals(SysProperties.OLD_RESULT_SET_GET_OBJECT ? - (Object) Byte.valueOf((byte) 0xff) : (Object) Integer.valueOf(-1))); - assertTrue(rs.getObject(5).equals(SysProperties.OLD_RESULT_SET_GET_OBJECT ? - (Object) Short.valueOf(Short.MAX_VALUE) : (Object) Integer.valueOf(Short.MAX_VALUE))); + assertTrue(rs.getObject(4).equals(Integer.valueOf(-1))); + assertTrue(rs.getObject(5).equals(Integer.valueOf(Short.MAX_VALUE))); assertTrue(rs.getObject(6).equals(Integer.MIN_VALUE)); assertTrue(rs.getObject(7).equals(Long.MAX_VALUE)); assertTrue(rs.getObject(8).equals(Float.MAX_VALUE)); @@ -1430,12 +1370,10 @@ private void testObject(Connection conn) throws SQLException { java.sql.Time.valueOf("23:22:21"))); assertTrue(rs.getObject(20).equals( new java.math.BigInteger("12345"))); - Object[] a = (Object[]) rs.getObject(21); - assertEquals(a[0], SysProperties.OLD_RESULT_SET_GET_OBJECT ? - (Object) Byte.valueOf((byte) 1) : (Object) Integer.valueOf(1)); - a = (Object[]) rs.getObject(22); - assertEquals(a[0], SysProperties.OLD_RESULT_SET_GET_OBJECT ? - (Object) Short.valueOf((short) -2) : (Object) Integer.valueOf(-2)); + Object[] a = (Object[]) ((Array) rs.getObject(21)).getArray(); + assertEquals(a[0], Integer.valueOf(1)); + a = (Object[]) ((Array) rs.getObject(22)).getArray(); + assertEquals(a[0], Integer.valueOf(-2)); // } else if(x instanceof java.io.Reader) { // return session.createLob(Value.CLOB, @@ -1661,32 +1599,39 @@ private void testPreparedStatementWithIndexedParameterAndLiteralsNone() throws S private void testPreparedStatementWithAnyParameter() throws SQLException { deleteDb("preparedStatement"); Connection conn = getConnection("preparedStatement"); - conn.prepareStatement("CREATE TABLE TEST(ID INT PRIMARY KEY, VALUE INT UNIQUE)").execute(); - PreparedStatement ps = conn.prepareStatement("INSERT INTO TEST(ID, VALUE) VALUES (?, ?)"); + conn.prepareStatement("CREATE TABLE TEST(ID INT PRIMARY KEY, V INT UNIQUE)").execute(); + PreparedStatement ps = conn.prepareStatement("INSERT INTO TEST(ID, V) VALUES (?, ?)"); for (int i = 0; i < 10_000; i++) { ps.setInt(1, i); ps.setInt(2, i * 10); ps.executeUpdate(); } - Object[] values = {-100, 10, 200, 3_000, 40_000, 500_000}; + Integer[] values = {-100, 10, 200, 3_000, 40_000, 500_000}; int[] expected = {1, 20, 300, 4_000}; // Ensure that other methods return the same results - ps = conn.prepareStatement("SELECT ID FROM TEST WHERE VALUE IN (SELECT * FROM TABLE(X INT=?)) ORDER BY ID"); - anyParameterCheck(ps, values, expected); - ps = conn.prepareStatement("SELECT ID FROM TEST INNER JOIN TABLE(X INT=?) T ON TEST.VALUE = T.X"); + ps = conn.prepareStatement("SELECT ID FROM TEST WHERE V IN (SELECT * FROM TABLE(X INT=?)) ORDER BY ID"); anyParameterCheck(ps, values, expected); - // Test expression IN(UNNEST(?)) - ps = conn.prepareStatement("SELECT ID FROM TEST WHERE VALUE IN(UNNEST(?))"); - assertThrows(ErrorCode.PARAMETER_NOT_SET_1, ps).executeQuery(); + ps = conn.prepareStatement("SELECT ID FROM TEST INNER JOIN TABLE(X INT=?) T ON TEST.V = T.X"); anyParameterCheck(ps, values, expected); - anyParameterCheck(ps, 300, new int[] {30}); - anyParameterCheck(ps, -5, new int[0]); // Test expression = ANY(?) - ps = conn.prepareStatement("SELECT ID FROM TEST WHERE VALUE = ANY(?)"); + ps = conn.prepareStatement("SELECT ID FROM TEST WHERE V = ANY(?)"); assertThrows(ErrorCode.PARAMETER_NOT_SET_1, ps).executeQuery(); anyParameterCheck(ps, values, expected); anyParameterCheck(ps, 300, new int[] {30}); anyParameterCheck(ps, -5, new int[0]); + ps = conn.prepareStatement("SELECT V, CASE V WHEN = ANY(?) THEN 1 ELSE 2 END FROM" + + " (VALUES DATE '2000-01-01', DATE '2010-01-01') T(V) ORDER BY V"); + ps.setObject(1, new LocalDate[] { LocalDate.of(2000, 1, 1), LocalDate.of(2030, 1, 1) }); + try (ResultSet rs = ps.executeQuery()) { + assertTrue(rs.next()); + assertEquals(LocalDate.of(2000, 1, 1), rs.getObject(1, LocalDate.class)); + assertEquals(1, rs.getInt(2)); + assertTrue(rs.next()); + assertEquals(LocalDate.of(2010, 1, 1), rs.getObject(1, LocalDate.class)); + assertEquals(2, rs.getInt(2)); + assertFalse(rs.next()); + assertEquals("CASE V WHEN = ANY(?1) THEN 1 ELSE 2 END", rs.getMetaData().getColumnLabel(2)); + } conn.close(); deleteDb("preparedStatement"); } @@ -1746,19 +1691,6 @@ private void testColumnMetaDataWithIn(Connection conn) throws SQLException { stmt.execute("DROP TABLE TEST"); } - private void testValueResultSet(Connection conn) throws SQLException { - for (int i = 0; i < 2; i++) { - try (PreparedStatement stmt = conn.prepareStatement("SELECT TABLE(X INT = (1))")) { - ResultSet rs = stmt.executeQuery(); - while (rs.next()) { - try (ResultSet rs2 = (ResultSet) rs.getObject(1)) { - assertEquals(1, rs2.getMetaData().getColumnCount()); - } - } - } - } - } - private void testMultipleStatements(Connection conn) throws SQLException { assertThrows(ErrorCode.CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS, conn).prepareStatement("SELECT ?; SELECT ?1"); assertThrows(ErrorCode.CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS, conn).prepareStatement("SELECT ?1; SELECT ?"); @@ -1784,6 +1716,44 @@ private void testMultipleStatements(Connection conn) throws SQLException { assertFalse(rs.next()); } stmt.execute("DROP TABLE TEST"); + ps = conn.prepareStatement("CREATE TABLE A (C1 INT);" // + + "CREATE INDEX A_IDX ON A(C1);" // + + "ALTER TABLE A ADD (C2 INT);" // + + "CREATE TABLE B AS (SELECT C1 FROM A);"); + ps.executeUpdate(); + stmt.execute("DROP TABLE A, B"); + } + + private void testAfterRollback(Connection conn) throws SQLException { + try (Statement stat = conn.createStatement()) { + try { + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255))"); + conn.setAutoCommit(false); + + // insert something into test table + stat.execute("INSERT INTO TEST VALUES(1, 'Hello')"); + + // execute 'SELECT count(*)' with prepared-statements + PreparedStatement pstmt = conn.prepareStatement("SELECT count(*) FROM TEST"); + try (ResultSet rs = pstmt.executeQuery()) { + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + } + + // rollback the insert + conn.rollback(); + + // re-execute the pstmt. + try (ResultSet rs = pstmt.executeQuery()) { + assertTrue(rs.next()); + assertEquals(0, rs.getInt(1)); + } + } finally { + // cleanup + stat.execute("DROP TABLE IF EXISTS TEST"); + conn.setAutoCommit(true); + } + } } } diff --git a/h2/src/test/org/h2/test/jdbc/TestResultSet.java b/h2/src/test/org/h2/test/jdbc/TestResultSet.java index 6afc4bc987..0b0141a7f0 100644 --- a/h2/src/test/org/h2/test/jdbc/TestResultSet.java +++ b/h2/src/test/org/h2/test/jdbc/TestResultSet.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -31,20 +31,28 @@ import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; +import java.time.Duration; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.time.Period; +import java.time.ZonedDateTime; import java.util.Arrays; import java.util.Calendar; import java.util.Collections; +import java.util.GregorianCalendar; import java.util.TimeZone; import org.h2.api.ErrorCode; import org.h2.api.Interval; import org.h2.api.IntervalQualifier; -import org.h2.engine.SysProperties; +import org.h2.engine.Constants; import org.h2.test.TestBase; import org.h2.test.TestDb; -import org.h2.util.DateTimeUtils; import org.h2.util.IOUtils; -import org.h2.util.LocalDateTimeUtils; import org.h2.util.MathUtils; import org.h2.util.StringUtils; @@ -62,7 +70,7 @@ public class TestResultSet extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -89,6 +97,7 @@ public void test() throws Exception { testFindColumn(); testColumnLength(); testArray(); + testRowValue(); testEnum(); testLimitMaxRows(); @@ -130,12 +139,12 @@ private void testUnwrap() throws SQLException { } private void testReuseSimpleResult() throws SQLException { - ResultSet rs = stat.executeQuery("select table(x array=((1)))"); + ResultSet rs = stat.executeQuery("select * from table(x int array=((1)))"); while (rs.next()) { rs.getString(1); } rs.close(); - rs = stat.executeQuery("select table(x array=((1)))"); + rs = stat.executeQuery("select * from table(x int array=((1)))"); while (rs.next()) { rs.getString(1); } @@ -150,9 +159,9 @@ private void testUnsupportedOperations() throws SQLException { assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). getUnicodeStream("x"); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). - getObject(1, Collections.>emptyMap()); + getObject(1, Collections.emptyMap()); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). - getObject("x", Collections.>emptyMap()); + getObject("x", Collections.emptyMap()); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). getRef(1); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). @@ -368,13 +377,13 @@ private void testParseSpecialValue(String x) throws SQLException { } private void testSubstringDataType() throws SQLException { - ResultSet rs = stat.executeQuery("select substr(x, 1, 1) from dual"); + ResultSet rs = stat.executeQuery("select substr(x, 1, 1) from system_range(1, 1)"); rs.next(); assertEquals(Types.VARCHAR, rs.getMetaData().getColumnType(1)); } private void testColumnLabelColumnName() throws SQLException { - ResultSet rs = stat.executeQuery("select x as y from dual"); + ResultSet rs = stat.executeQuery("select x as y from system_range(1, 1)"); rs.next(); rs.getString("x"); rs.getString("y"); @@ -472,7 +481,7 @@ private void testSubstringPrecision() throws SQLException { trace("testSubstringPrecision"); stat.execute("CREATE TABLE TEST(ID INT, NAME VARCHAR(10))"); stat.execute("INSERT INTO TEST VALUES(1, 'Hello'), (2, 'WorldPeace')"); - checkPrecision(0, "SELECT SUBSTR(NAME, 12, 4) FROM TEST"); + checkPrecision(1, "SELECT SUBSTR(NAME, 12, 4) FROM TEST"); checkPrecision(9, "SELECT SUBSTR(NAME, 2) FROM TEST"); checkPrecision(10, "SELECT SUBSTR(NAME, ID) FROM TEST"); checkPrecision(4, "SELECT SUBSTR(NAME, 2, 4) FROM TEST"); @@ -541,20 +550,20 @@ private void testColumnLength() throws SQLException { rs = stat.executeQuery("explain select * from dual"); meta = rs.getMetaData(); - assertEquals(Integer.MAX_VALUE, meta.getColumnDisplaySize(1)); - assertEquals(Integer.MAX_VALUE, meta.getPrecision(1)); + assertEquals(Constants.MAX_STRING_LENGTH, meta.getColumnDisplaySize(1)); + assertEquals(Constants.MAX_STRING_LENGTH, meta.getPrecision(1)); rs = stat.executeQuery("script"); meta = rs.getMetaData(); - assertEquals(Integer.MAX_VALUE, meta.getColumnDisplaySize(1)); - assertEquals(Integer.MAX_VALUE, meta.getPrecision(1)); + assertEquals(Constants.MAX_STRING_LENGTH, meta.getColumnDisplaySize(1)); + assertEquals(Constants.MAX_STRING_LENGTH, meta.getPrecision(1)); rs = stat.executeQuery("select group_concat(table_name) " + "from information_schema.tables"); rs.next(); meta = rs.getMetaData(); - assertEquals(Integer.MAX_VALUE, meta.getColumnDisplaySize(1)); - assertEquals(Integer.MAX_VALUE, meta.getPrecision(1)); + assertEquals(Constants.MAX_STRING_LENGTH, meta.getColumnDisplaySize(1)); + assertEquals(Constants.MAX_STRING_LENGTH, meta.getPrecision(1)); } @@ -565,17 +574,13 @@ private void testLimitMaxRows() throws SQLException { rs = stat.executeQuery("SELECT C || C FROM one;"); ResultSetMetaData md = rs.getMetaData(); assertEquals(20, md.getPrecision(1)); - ResultSet rs2 = stat.executeQuery("SELECT UPPER (C) FROM one;"); - ResultSetMetaData md2 = rs2.getMetaData(); - assertEquals(10, md2.getPrecision(1)); - rs = stat.executeQuery("SELECT UPPER (C), CHAR(10), " + + rs = stat.executeQuery("SELECT CHAR(10), " + "CONCAT(C,C,C), HEXTORAW(C), RAWTOHEX(C) FROM one"); ResultSetMetaData meta = rs.getMetaData(); - assertEquals(10, meta.getPrecision(1)); - assertEquals(1, meta.getPrecision(2)); - assertEquals(30, meta.getPrecision(3)); - assertEquals(3, meta.getPrecision(4)); - assertEquals(40, meta.getPrecision(5)); + assertEquals(1, meta.getPrecision(1)); + assertEquals(30, meta.getPrecision(2)); + assertEquals(2, meta.getPrecision(3)); + assertEquals(40, meta.getPrecision(4)); stat.execute("DROP TABLE one"); } @@ -616,7 +621,7 @@ private void testInt() throws SQLException { ResultSet rs; Object o; - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE INT)"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,\"VALUE\" INT)"); stat.execute("INSERT INTO TEST VALUES(1,-1)"); stat.execute("INSERT INTO TEST VALUES(2,0)"); stat.execute("INSERT INTO TEST VALUES(3,1)"); @@ -657,12 +662,12 @@ private void testInt() throws SQLException { assertFalse(meta.isDefinitelyWritable(1)); assertTrue(meta.getColumnDisplaySize(1) > 0); assertTrue(meta.getColumnDisplaySize(2) > 0); - assertEquals(null, meta.getColumnClassName(3)); + assertEquals(Void.class.getName(), meta.getColumnClassName(3)); assertTrue(rs.getRow() == 0); assertResultSetMeta(rs, 3, new String[] { "ID", "VALUE", "N" }, new int[] { Types.INTEGER, Types.INTEGER, - Types.NULL }, new int[] { 10, 10, 1 }, new int[] { 0, 0, 0 }); + Types.NULL }, new int[] { 32, 32, 1 }, new int[] { 0, 0, 0 }); rs.next(); assertEquals(ResultSet.CONCUR_READ_ONLY, rs.getConcurrency()); assertEquals(ResultSet.FETCH_FORWARD, rs.getFetchDirection()); @@ -769,7 +774,7 @@ private void testSmallInt() throws SQLException { ResultSet rs; Object o; - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE SMALLINT)"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,\"VALUE\" SMALLINT)"); stat.execute("INSERT INTO TEST VALUES(1,-1)"); stat.execute("INSERT INTO TEST VALUES(2,0)"); stat.execute("INSERT INTO TEST VALUES(3,1)"); @@ -791,7 +796,7 @@ private void testSmallInt() throws SQLException { assertTrue(rs.getRow() == 0); assertResultSetMeta(rs, 3, new String[] { "ID", "VALUE", "N" }, new int[] { Types.INTEGER, Types.SMALLINT, - Types.NULL }, new int[] { 10, 5, 1 }, new int[] { 0, 0, 0 }); + Types.NULL }, new int[] { 32, 16, 1 }, new int[] { 0, 0, 0 }); rs.next(); assertTrue(rs.getRow() == 1); @@ -811,7 +816,7 @@ private void testSmallInt() throws SQLException { o = rs.getObject("value"); trace(o.getClass().getName()); - assertTrue(o.getClass() == (SysProperties.OLD_RESULT_SET_GET_OBJECT ? Short.class : Integer.class)); + assertTrue(o.getClass() == Integer.class); assertTrue(((Number) o).intValue() == -1); o = rs.getObject("value", Short.class); trace(o.getClass().getName()); @@ -819,7 +824,7 @@ private void testSmallInt() throws SQLException { assertTrue((Short) o == -1); o = rs.getObject(2); trace(o.getClass().getName()); - assertTrue(o.getClass() == (SysProperties.OLD_RESULT_SET_GET_OBJECT ? Short.class : Integer.class)); + assertTrue(o.getClass() == Integer.class); assertTrue(((Number) o).intValue() == -1); o = rs.getObject(2, Short.class); trace(o.getClass().getName()); @@ -892,7 +897,7 @@ private void testBigInt() throws SQLException { ResultSet rs; Object o; - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE BIGINT)"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,\"VALUE\" BIGINT)"); stat.execute("INSERT INTO TEST VALUES(1,-1)"); stat.execute("INSERT INTO TEST VALUES(2,0)"); stat.execute("INSERT INTO TEST VALUES(3,1)"); @@ -914,7 +919,7 @@ private void testBigInt() throws SQLException { assertTrue(rs.getRow() == 0); assertResultSetMeta(rs, 3, new String[] { "ID", "VALUE", "N" }, new int[] { Types.INTEGER, Types.BIGINT, - Types.NULL }, new int[] { 10, 19, 1 }, new int[] { 0, 0, 0 }); + Types.NULL }, new int[] { 32, 64, 1 }, new int[] { 0, 0, 0 }); rs.next(); assertTrue(rs.getRow() == 1); @@ -1023,7 +1028,7 @@ private void testVarchar() throws SQLException { ResultSet rs; Object o; - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE VARCHAR(255))"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,\"VALUE\" VARCHAR(255))"); stat.execute("INSERT INTO TEST VALUES(1,'')"); stat.execute("INSERT INTO TEST VALUES(2,' ')"); stat.execute("INSERT INTO TEST VALUES(3,' ')"); @@ -1038,7 +1043,7 @@ private void testVarchar() throws SQLException { rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); assertResultSetMeta(rs, 2, new String[] { "ID", "VALUE" }, new int[] { Types.INTEGER, Types.VARCHAR }, new int[] { - 10, 255 }, new int[] { 0, 0 }); + 32, 255 }, new int[] { 0, 0 }); String value; rs.next(); value = rs.getString(2); @@ -1108,17 +1113,11 @@ private void testVarchar() throws SQLException { } private void testDecimal() throws SQLException { - int numericType; - if (SysProperties.BIG_DECIMAL_IS_DECIMAL) { - numericType = Types.DECIMAL; - } else { - numericType = Types.NUMERIC; - } trace("Test DECIMAL"); ResultSet rs; Object o; - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE DECIMAL(10,2))"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,\"VALUE\" DECIMAL(10,2))"); stat.execute("INSERT INTO TEST VALUES(1,-1)"); stat.execute("INSERT INTO TEST VALUES(2,.0)"); stat.execute("INSERT INTO TEST VALUES(3,1.)"); @@ -1128,8 +1127,8 @@ private void testDecimal() throws SQLException { stat.execute("INSERT INTO TEST VALUES(8,NULL)"); rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); assertResultSetMeta(rs, 2, new String[] { "ID", "VALUE" }, - new int[] { Types.INTEGER, numericType }, new int[] { - 10, 10 }, new int[] { 0, 2 }); + new int[] { Types.INTEGER, Types.DECIMAL }, new int[] { + 32, 10 }, new int[] { 0, 2 }); BigDecimal bd; rs.next(); @@ -1176,7 +1175,7 @@ private void testDecimal() throws SQLException { assertFalse(rs.next()); stat.execute("DROP TABLE TEST"); - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE DECIMAL(22,2))"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,\"VALUE\" DECIMAL(22,2))"); stat.execute("INSERT INTO TEST VALUES(1,-12345678909876543210)"); stat.execute("INSERT INTO TEST VALUES(2,12345678901234567890.12345)"); rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); @@ -1194,18 +1193,26 @@ private void testDoubleFloat() throws SQLException { ResultSet rs; Object o; - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, D DOUBLE, R REAL)"); - stat.execute("INSERT INTO TEST VALUES(1, -1, -1)"); - stat.execute("INSERT INTO TEST VALUES(2,.0, .0)"); - stat.execute("INSERT INTO TEST VALUES(3, 1., 1.)"); - stat.execute("INSERT INTO TEST VALUES(4, 12345678.89, 12345678.89)"); - stat.execute("INSERT INTO TEST VALUES(6, 99999999.99, 99999999.99)"); - stat.execute("INSERT INTO TEST VALUES(7, -99999999.99, -99999999.99)"); - stat.execute("INSERT INTO TEST VALUES(8, NULL, NULL)"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, D DOUBLE, R REAL, F DECFLOAT)"); + stat.execute("INSERT INTO TEST VALUES(1, -1, -1, -1)"); + stat.execute("INSERT INTO TEST VALUES(2, .0, .0, .0)"); + stat.execute("INSERT INTO TEST VALUES(3, 1., 1., 1.)"); + stat.execute("INSERT INTO TEST VALUES(4, 12345678.89, 12345678.89, 12345678.89)"); + stat.execute("INSERT INTO TEST VALUES(6, 99999999.99, 99999999.99, 99999999.99)"); + stat.execute("INSERT INTO TEST VALUES(7, -99999999.99, -99999999.99, -99999999.99)"); + stat.execute("INSERT INTO TEST VALUES(8, NULL, NULL, NULL)"); + stat.execute("INSERT INTO TEST VALUES(9, '-Infinity', '-Infinity', '-Infinity')"); + stat.execute("INSERT INTO TEST VALUES(10, 'Infinity', 'Infinity', 'Infinity')"); + stat.execute("INSERT INTO TEST VALUES(11, 'NaN', 'NaN', 'NaN')"); rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); - assertResultSetMeta(rs, 3, new String[] { "ID", "D", "R" }, - new int[] { Types.INTEGER, Types.DOUBLE, Types.REAL }, - new int[] { 10, 17, 7 }, new int[] { 0, 0, 0 }); + assertResultSetMeta(rs, 4, new String[] { "ID", "D", "R", "F" }, + null, + new int[] { 32, 53, 24, 100_000 }, new int[] { 0, 0, 0, 0 }); + ResultSetMetaData md = rs.getMetaData(); + assertEquals("INTEGER", md.getColumnTypeName(1)); + assertEquals("DOUBLE PRECISION", md.getColumnTypeName(2)); + assertEquals("REAL", md.getColumnTypeName(3)); + assertEquals("DECFLOAT", md.getColumnTypeName(4)); BigDecimal bd; rs.next(); assertTrue(rs.getInt(1) == 1); @@ -1232,6 +1239,14 @@ private void testDoubleFloat() throws SQLException { trace(o.getClass().getName()); assertTrue(o instanceof Float); assertTrue(((Float) o).compareTo(-1f) == 0); + o = rs.getObject(4); + trace(o.getClass().getName()); + assertTrue(o instanceof BigDecimal); + assertEquals(BigDecimal.valueOf(-1L, 0), o); + o = rs.getObject(4, BigDecimal.class); + trace(o.getClass().getName()); + assertTrue(o instanceof BigDecimal); + assertEquals(BigDecimal.valueOf(-1L, 0), o); rs.next(); assertTrue(rs.getInt(1) == 2); assertFalse(rs.wasNull()); @@ -1239,27 +1254,58 @@ private void testDoubleFloat() throws SQLException { assertFalse(rs.wasNull()); assertTrue(rs.getInt(3) == 0); assertFalse(rs.wasNull()); + assertTrue(rs.getInt(4) == 0); + assertFalse(rs.wasNull()); bd = rs.getBigDecimal(2); assertTrue(bd.compareTo(new BigDecimal("0.00")) == 0); assertFalse(rs.wasNull()); bd = rs.getBigDecimal(3); assertTrue(bd.compareTo(new BigDecimal("0.00")) == 0); assertFalse(rs.wasNull()); + bd = rs.getBigDecimal(4); + assertTrue(bd.compareTo(new BigDecimal("0.00")) == 0); + assertFalse(rs.wasNull()); rs.next(); assertEquals(1.0, rs.getDouble(2)); assertEquals(1.0f, rs.getFloat(3)); + assertEquals(BigDecimal.ONE, rs.getBigDecimal(4)); rs.next(); assertEquals(12345678.89, rs.getDouble(2)); assertEquals(12345678.89f, rs.getFloat(3)); + assertEquals(BigDecimal.valueOf(12_345_678_89L, 2), rs.getBigDecimal(4)); rs.next(); assertEquals(99999999.99, rs.getDouble(2)); assertEquals(99999999.99f, rs.getFloat(3)); + assertEquals(BigDecimal.valueOf(99_999_999_99L, 2), rs.getBigDecimal(4)); rs.next(); assertEquals(-99999999.99, rs.getDouble(2)); assertEquals(-99999999.99f, rs.getFloat(3)); + assertEquals(BigDecimal.valueOf(-99_999_999_99L, 2), rs.getBigDecimal(4)); rs.next(); checkColumnBigDecimal(rs, 2, 0, null); checkColumnBigDecimal(rs, 3, 0, null); + checkColumnBigDecimal(rs, 4, 0, null); + rs.next(); + assertEquals(Float.NEGATIVE_INFINITY, rs.getFloat(2)); + assertEquals(Double.NEGATIVE_INFINITY, rs.getDouble(3)); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, rs).getBigDecimal(4); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, rs).getObject(4); + assertEquals(Double.NEGATIVE_INFINITY, rs.getDouble(4)); + assertEquals("-Infinity", rs.getString(4)); + rs.next(); + assertEquals(Float.POSITIVE_INFINITY, rs.getFloat(2)); + assertEquals(Double.POSITIVE_INFINITY, rs.getDouble(3)); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, rs).getBigDecimal(4); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, rs).getObject(4); + assertEquals(Double.POSITIVE_INFINITY, rs.getDouble(4)); + assertEquals("Infinity", rs.getString(4)); + rs.next(); + assertEquals(Float.NaN, rs.getFloat(2)); + assertEquals(Double.NaN, rs.getDouble(3)); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, rs).getBigDecimal(4); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, rs).getObject(4); + assertEquals(Double.NaN, rs.getDouble(4)); + assertEquals("NaN", rs.getString(4)); assertFalse(rs.next()); stat.execute("DROP TABLE TEST"); } @@ -1282,21 +1328,21 @@ private void testDatetime() throws SQLException { rs.next(); assertEquals("-99999-12-23 01:02:03", rs.getString(1)); - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE DATETIME)"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,\"VALUE\" DATETIME)"); stat.execute("INSERT INTO TEST VALUES(1,DATE '2011-11-11')"); stat.execute("INSERT INTO TEST VALUES(2,TIMESTAMP '2002-02-02 02:02:02')"); stat.execute("INSERT INTO TEST VALUES(3,TIMESTAMP '1800-1-1 0:0:0')"); stat.execute("INSERT INTO TEST VALUES(4,TIMESTAMP '9999-12-31 23:59:59')"); stat.execute("INSERT INTO TEST VALUES(5,NULL)"); rs = stat.executeQuery("SELECT 0 ID, " + - "TIMESTAMP '9999-12-31 23:59:59' VALUE FROM TEST ORDER BY ID"); + "TIMESTAMP '9999-12-31 23:59:59' \"VALUE\" FROM TEST ORDER BY ID"); assertResultSetMeta(rs, 2, new String[] { "ID", "VALUE" }, new int[] { Types.INTEGER, Types.TIMESTAMP }, - new int[] { 10, 29 }, new int[] { 0, 9 }); + new int[] { 32, 29 }, new int[] { 0, 9 }); rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); assertResultSetMeta(rs, 2, new String[] { "ID", "VALUE" }, new int[] { Types.INTEGER, Types.TIMESTAMP }, - new int[] { 10, 26 }, new int[] { 0, 6 }); + new int[] { 32, 26 }, new int[] { 0, 6 }); rs.next(); java.sql.Date date; java.sql.Time time; @@ -1361,47 +1407,24 @@ private void testDatetime() throws SQLException { assertEquals("2002-02-02 02:02:02.0", ts.toString()); rs.next(); - assertEquals("1800-01-01", rs.getDate("value").toString()); - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("1800-01-01", rs.getObject("value", - LocalDateTimeUtils.LOCAL_DATE).toString()); - } + assertEquals("1800-01-01", rs.getObject("value", LocalDate.class).toString()); assertEquals("00:00:00", rs.getTime("value").toString()); - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("00:00", rs.getObject("value", - LocalDateTimeUtils.LOCAL_TIME).toString()); - } - assertEquals("1800-01-01 00:00:00.0", rs.getTimestamp("value").toString()); - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("1800-01-01T00:00", rs.getObject("value", - LocalDateTimeUtils.LOCAL_DATE_TIME).toString()); - } + assertEquals("00:00", rs.getObject("value", LocalTime.class).toString()); + assertEquals("1800-01-01T00:00", rs.getObject("value", LocalDateTime.class).toString()); rs.next(); assertEquals("9999-12-31", rs.getDate("Value").toString()); - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("9999-12-31", rs.getObject("Value", - LocalDateTimeUtils.LOCAL_DATE).toString()); - } + assertEquals("9999-12-31", rs.getObject("Value", LocalDate.class).toString()); assertEquals("23:59:59", rs.getTime("Value").toString()); - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("23:59:59", rs.getObject("Value", - LocalDateTimeUtils.LOCAL_TIME).toString()); - } + assertEquals("23:59:59", rs.getObject("Value", LocalTime.class).toString()); assertEquals("9999-12-31 23:59:59.0", rs.getTimestamp("Value").toString()); - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("9999-12-31T23:59:59", rs.getObject("Value", - LocalDateTimeUtils.LOCAL_DATE_TIME).toString()); - } + assertEquals("9999-12-31T23:59:59", rs.getObject("Value", LocalDateTime.class).toString()); rs.next(); assertTrue(rs.getDate("Value") == null && rs.wasNull()); assertTrue(rs.getTime("vALUe") == null && rs.wasNull()); assertTrue(rs.getTimestamp(2) == null && rs.wasNull()); - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertTrue(rs.getObject(2, - LocalDateTimeUtils.LOCAL_DATE_TIME) == null && rs.wasNull()); - } + assertTrue(rs.getObject(2, LocalDateTime.class) == null && rs.wasNull()); assertFalse(rs.next()); rs = stat.executeQuery("SELECT DATE '2001-02-03' D, " + @@ -1421,21 +1444,56 @@ private void testDatetime() throws SQLException { assertEquals("2001-02-03", date.toString()); assertEquals("14:15:16", time.toString()); assertEquals("2007-08-09 10:11:12.141516171", ts.toString()); - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("2001-02-03", rs.getObject(1, - LocalDateTimeUtils.LOCAL_DATE).toString()); - } - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("14:15:16", rs.getObject(2, - LocalDateTimeUtils.LOCAL_TIME).toString()); - } - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("2007-08-09T10:11:12.141516171", - rs.getObject(3, LocalDateTimeUtils.LOCAL_DATE_TIME) - .toString()); - } + assertEquals("2001-02-03", rs.getObject(1, LocalDate.class).toString()); + assertEquals("14:15:16", rs.getObject(2, LocalTime.class).toString()); + assertEquals("2007-08-09T10:11:12.141516171", rs.getObject(3, LocalDateTime.class).toString()); stat.execute("DROP TABLE TEST"); + + rs = stat.executeQuery("SELECT LOCALTIME, CURRENT_TIME"); + rs.next(); + assertEquals(rs.getTime(1), rs.getTime(2)); + rs = stat.executeQuery("SELECT LOCALTIMESTAMP, CURRENT_TIMESTAMP"); + rs.next(); + assertEquals(rs.getTimestamp(1), rs.getTimestamp(2)); + + rs = stat.executeQuery("SELECT DATE '-1000000000-01-01', " + "DATE '1000000000-12-31'"); + rs.next(); + assertEquals("-999999999-01-01", rs.getObject(1, LocalDate.class).toString()); + assertEquals("+999999999-12-31", rs.getObject(2, LocalDate.class).toString()); + + rs = stat.executeQuery("SELECT TIMESTAMP '-1000000000-01-01 00:00:00', " + + "TIMESTAMP '1000000000-12-31 23:59:59.999999999'"); + rs.next(); + assertEquals("-999999999-01-01T00:00", rs.getObject(1, LocalDateTime.class).toString()); + assertEquals("+999999999-12-31T23:59:59.999999999", rs.getObject(2, LocalDateTime.class).toString()); + + rs = stat.executeQuery("SELECT TIMESTAMP WITH TIME ZONE '-1000000000-01-01 00:00:00Z', " + + "TIMESTAMP WITH TIME ZONE '1000000000-12-31 23:59:59.999999999Z', " + + "TIMESTAMP WITH TIME ZONE '-1000000000-01-01 00:00:00+18', " + + "TIMESTAMP WITH TIME ZONE '1000000000-12-31 23:59:59.999999999-18'"); + rs.next(); + assertEquals("-999999999-01-01T00:00Z", rs.getObject(1, OffsetDateTime.class).toString()); + assertEquals("+999999999-12-31T23:59:59.999999999Z", rs.getObject(2, OffsetDateTime.class).toString()); + assertEquals("-999999999-01-01T00:00+18:00", rs.getObject(3, OffsetDateTime.class).toString()); + assertEquals("+999999999-12-31T23:59:59.999999999-18:00", rs.getObject(4, OffsetDateTime.class).toString()); + assertEquals("-999999999-01-01T00:00Z", rs.getObject(1, ZonedDateTime.class).toString()); + assertEquals("+999999999-12-31T23:59:59.999999999Z", rs.getObject(2, ZonedDateTime.class).toString()); + assertEquals("-999999999-01-01T00:00+18:00", rs.getObject(3, ZonedDateTime.class).toString()); + assertEquals("+999999999-12-31T23:59:59.999999999-18:00", rs.getObject(4, ZonedDateTime.class).toString()); + assertEquals("-1000000000-01-01T00:00:00Z", rs.getObject(1, Instant.class).toString()); + assertEquals("+1000000000-12-31T23:59:59.999999999Z", rs.getObject(2, Instant.class).toString()); + assertEquals("-1000000000-01-01T00:00:00Z", rs.getObject(3, Instant.class).toString()); + assertEquals("+1000000000-12-31T23:59:59.999999999Z", rs.getObject(4, Instant.class).toString()); + + rs = stat.executeQuery("SELECT LOCALTIME, CURRENT_TIME"); + rs.next(); + assertEquals(rs.getObject(1, LocalTime.class), rs.getObject(2, LocalTime.class)); + assertEquals(rs.getObject(1, OffsetTime.class), rs.getObject(2, OffsetTime.class)); + rs = stat.executeQuery("SELECT LOCALTIMESTAMP, CURRENT_TIMESTAMP"); + rs.next(); + assertEquals(rs.getObject(1, LocalDateTime.class), rs.getObject(2, LocalDateTime.class)); + assertEquals(rs.getObject(1, OffsetDateTime.class), rs.getObject(2, OffsetDateTime.class)); } private void testDatetimeWithCalendar() throws SQLException { @@ -1446,8 +1504,8 @@ private void testDatetimeWithCalendar() throws SQLException { "D DATE, T TIME, TS TIMESTAMP(9))"); PreparedStatement prep = conn.prepareStatement( "INSERT INTO TEST VALUES(?, ?, ?, ?)"); - Calendar regular = DateTimeUtils.createGregorianCalendar(); - Calendar other = null; + GregorianCalendar regular = new GregorianCalendar(); + GregorianCalendar other = null; // search a locale that has a _different_ raw offset long testTime = java.sql.Date.valueOf("2001-02-03").getTime(); for (String s : TimeZone.getAvailableIDs()) { @@ -1459,7 +1517,7 @@ private void testDatetimeWithCalendar() throws SQLException { if (rawOffsetDiff != 0 && rawOffsetDiff != 1000 * 60 * 60 * 24) { if (regular.getTimeZone().getOffset(testTime) != zone.getOffset(testTime)) { - other = DateTimeUtils.createGregorianCalendar(zone); + other = new GregorianCalendar(zone); break; } } @@ -1512,7 +1570,7 @@ private void testDatetimeWithCalendar() throws SQLException { new String[] { "ID", "D", "T", "TS" }, new int[] { Types.INTEGER, Types.DATE, Types.TIME, Types.TIMESTAMP }, - new int[] { 10, 10, 8, 29 }, new int[] { 0, 0, 0, 9 }); + new int[] { 32, 10, 8, 29 }, new int[] { 0, 0, 0, 9 }); rs.next(); assertEquals(0, rs.getInt(1)); @@ -1581,46 +1639,32 @@ private void testInterval() throws SQLException { assertEquals("INTERVAL YEAR", metaData.getColumnTypeName(1)); assertEquals(Interval.class.getName(), metaData.getColumnClassName(1)); assertEquals("INTERVAL '-111222333444555666' YEAR".length(), metaData.getColumnDisplaySize(1)); + // Intervals are not numbers + assertFalse(metaData.isSigned(1)); } private void testInterval8() throws SQLException { - if (!LocalDateTimeUtils.isJava8DateApiPresent()) { - return; - } trace("Test INTERVAL 8"); ResultSet rs; - Object expected; rs = stat.executeQuery("CALL INTERVAL '1-2' YEAR TO MONTH"); rs.next(); assertEquals("INTERVAL '1-2' YEAR TO MONTH", rs.getString(1)); - try { - expected = LocalDateTimeUtils.PERIOD.getMethod("of", int.class, int.class, int.class) - .invoke(null, 1, 2, 0); - } catch (ReflectiveOperationException ex) { - throw new RuntimeException(ex); - } - assertEquals(expected, rs.getObject(1, LocalDateTimeUtils.PERIOD)); - assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, rs).getObject(1, LocalDateTimeUtils.DURATION); + assertEquals(Period.of(1, 2, 0), rs.getObject(1, Period.class)); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, rs).getObject(1, Duration.class); rs = stat.executeQuery("CALL INTERVAL '-3.1' SECOND"); rs.next(); assertEquals("INTERVAL '-3.1' SECOND", rs.getString(1)); - try { - expected = LocalDateTimeUtils.DURATION.getMethod("ofSeconds", long.class, long.class) - .invoke(null, -4, 900_000_000); - } catch (ReflectiveOperationException ex) { - throw new RuntimeException(ex); - } - assertEquals(expected, rs.getObject(1, LocalDateTimeUtils.DURATION)); - assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, rs).getObject(1, LocalDateTimeUtils.PERIOD); + assertEquals(Duration.ofSeconds(-4, 900_000_000), rs.getObject(1, Duration.class)); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, rs).getObject(1, Period.class); } private void testBlob() throws SQLException { trace("Test BLOB"); ResultSet rs; - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE BLOB)"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,\"VALUE\" BLOB)"); stat.execute("INSERT INTO TEST VALUES(1,X'01010101')"); stat.execute("INSERT INTO TEST VALUES(2,X'02020202')"); stat.execute("INSERT INTO TEST VALUES(3,X'00')"); @@ -1634,7 +1678,7 @@ private void testBlob() throws SQLException { rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); assertResultSetMeta(rs, 2, new String[] { "ID", "VALUE" }, new int[] { Types.INTEGER, Types.BLOB }, new int[] { - 10, Integer.MAX_VALUE }, new int[] { 0, 0 }); + 32, Integer.MAX_VALUE }, new int[] { 0, 0 }); rs.next(); assertEqualsWithNull(new byte[] { (byte) 0x01, (byte) 0x01, @@ -1726,7 +1770,7 @@ private void testClob() throws SQLException { String string; stat = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE); - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE CLOB)"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,\"VALUE\" CLOB)"); stat.execute("INSERT INTO TEST VALUES(1,'Test')"); stat.execute("INSERT INTO TEST VALUES(2,'Hello')"); stat.execute("INSERT INTO TEST VALUES(3,'World!')"); @@ -1738,7 +1782,7 @@ private void testClob() throws SQLException { rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); assertResultSetMeta(rs, 2, new String[] { "ID", "VALUE" }, new int[] { Types.INTEGER, Types.CLOB }, new int[] { - 10, Integer.MAX_VALUE }, new int[] { 0, 0 }); + 32, Integer.MAX_VALUE }, new int[] { 0, 0 }); rs.next(); Object obj = rs.getObject(2); assertTrue(obj instanceof java.sql.Clob); @@ -1819,7 +1863,7 @@ private void testClob() throws SQLException { private void testArray() throws SQLException { trace("Test ARRAY"); ResultSet rs; - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, VALUE ARRAY)"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, \"VALUE\" INTEGER ARRAY)"); PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST VALUES(?, ?)"); prep.setInt(1, 1); prep.setObject(2, new Object[] { 1, 2 }); @@ -1827,11 +1871,15 @@ private void testArray() throws SQLException { prep.setInt(1, 2); prep.setObject(2, new Object[] { 11, 12 }); prep.execute(); + prep.setInt(1, 3); + prep.setObject(2, new Object[0]); + prep.execute(); prep.close(); rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); + assertEquals("INTEGER ARRAY", rs.getMetaData().getColumnTypeName(2)); rs.next(); assertEquals(1, rs.getInt(1)); - Object[] list = (Object[]) rs.getObject(2); + Object[] list = (Object[]) ((Array) rs.getObject(2)).getArray(); assertEquals(1, ((Integer) list[0]).intValue()); assertEquals(2, ((Integer) list[1]).intValue()); @@ -1841,9 +1889,10 @@ private void testArray() throws SQLException { assertEquals(2, ((Integer) list2[1]).intValue()); list2 = (Object[]) array.getArray(2, 1); assertEquals(2, ((Integer) list2[0]).intValue()); + rs.next(); assertEquals(2, rs.getInt(1)); - list = (Object[]) rs.getObject(2); + list = (Object[]) ((Array) rs.getObject(2)).getArray(); assertEquals(11, ((Integer) list[0]).intValue()); assertEquals(12, ((Integer) list[1]).intValue()); @@ -1854,13 +1903,35 @@ private void testArray() throws SQLException { list2 = (Object[]) array.getArray(2, 1); assertEquals(12, ((Integer) list2[0]).intValue()); - list2 = (Object[]) array.getArray(Collections.>emptyMap()); + list2 = (Object[]) array.getArray(Collections.emptyMap()); assertEquals(11, ((Integer) list2[0]).intValue()); - assertEquals(Types.NULL, array.getBaseType()); - assertEquals("NULL", array.getBaseTypeName()); + assertEquals(Types.INTEGER, array.getBaseType()); + assertEquals("INTEGER", array.getBaseTypeName()); - assertTrue(array.toString().endsWith(": [11, 12]")); + assertTrue(array.toString().endsWith(": ARRAY [11, 12]")); + + rs.next(); + assertEquals(3, rs.getInt(1)); + list = (Object[]) ((Array) rs.getObject(2)).getArray(); + assertEquals(0, list.length); + + array = rs.getArray("VALUE"); + list2 = (Object[]) array.getArray(); + assertEquals(0, list2.length); + list2 = (Object[]) array.getArray(1, 0); + assertEquals(0, list2.length); + list2 = (Object[]) array.getArray(1, 1); + assertEquals(0, list2.length); + + list2 = (Object[]) array.getArray(Collections.emptyMap()); + assertEquals(0, list2.length); + + // TODO + // assertEquals(Types.INTEGER, array.getBaseType()); + // assertEquals("INTEGER", array.getBaseTypeName()); + + assertTrue(array.toString().endsWith(": ARRAY []")); // free array.free(); @@ -1880,9 +1951,10 @@ private void testArray() throws SQLException { assertTrue(rs.next()); rs.updateArray("VALUE", conn.createArrayOf("INT", new Object[] {11, 22})); rs.updateRow(); + assertTrue(rs.next()); assertFalse(rs.next()); rs.moveToInsertRow(); - rs.updateInt(1, 3); + rs.updateInt(1, 4); rs.updateArray(2, null); rs.insertRow(); } @@ -1890,22 +1962,51 @@ private void testArray() throws SQLException { rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); assertTrue(rs.next()); assertEquals(1, rs.getInt(1)); - assertEquals(new Object[] {10, 20}, (Object[]) rs.getObject(2)); + assertEquals(new Object[] {10, 20}, (Object[]) ((Array) rs.getObject(2)).getArray()); assertTrue(rs.next()); assertEquals(2, rs.getInt(1)); - assertEquals(new Object[] {11, 22}, (Object[]) rs.getObject(2)); + assertEquals(new Object[] {11, 22}, (Object[]) ((Array) rs.getObject(2)).getArray()); assertTrue(rs.next()); assertEquals(3, rs.getInt(1)); + assertEquals(new Object[0], (Object[]) ((Array) rs.getObject(2)).getArray()); + assertTrue(rs.next()); + assertEquals(4, rs.getInt(1)); assertNull(rs.getObject(2)); assertFalse(rs.next()); stat.execute("DROP TABLE TEST"); } + private void testRowValue() throws SQLException { + trace("Test ROW value"); + ResultSet rs; + rs = stat.executeQuery("SELECT (1, 'test')"); + assertEquals("ROW(\"C1\" INTEGER, \"C2\" CHARACTER VARYING(4))", rs.getMetaData().getColumnTypeName(1)); + rs.next(); + testRowValue((ResultSet) rs.getObject(1)); + ResultSet rowAsResultSet = rs.getObject(1, ResultSet.class); + testRowValue(rowAsResultSet); + } + + private void testRowValue(ResultSet rowAsResultSet) throws SQLException { + ResultSetMetaData md = rowAsResultSet.getMetaData(); + assertEquals(2, md.getColumnCount()); + assertEquals("C1", md.getColumnLabel(1)); + assertEquals("C1", md.getColumnName(1)); + assertEquals("C2", md.getColumnLabel(2)); + assertEquals("C2", md.getColumnName(2)); + assertEquals(Types.INTEGER, md.getColumnType(1)); + assertEquals(Types.VARCHAR, md.getColumnType(2)); + assertTrue(rowAsResultSet.next()); + assertEquals(1, rowAsResultSet.getInt(1)); + assertEquals("test", rowAsResultSet.getString(2)); + assertFalse(rowAsResultSet.next()); + } + private void testEnum() throws SQLException { trace("Test ENUM"); - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, VALUE ENUM('A', 'B', 'C', 'D', 'E', 'F', 'G'))"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, \"VALUE\" ENUM('A', 'B', 'C', 'D', 'E', 'F', 'G'))"); PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST VALUES(?, ?)"); prep.setInt(1, 1); prep.setString(2, "A"); @@ -1914,7 +2015,7 @@ private void testEnum() throws SQLException { prep.setObject(2, "B"); prep.executeUpdate(); prep.setInt(1, 3); - prep.setInt(2, 2); + prep.setInt(2, 3); prep.executeUpdate(); prep.setInt(1, 4); prep.setObject(2, "D", Types.VARCHAR); @@ -1923,20 +2024,21 @@ private void testEnum() throws SQLException { prep.setObject(2, "E", Types.OTHER); prep.executeUpdate(); prep.setInt(1, 6); - prep.setObject(2, 5, Types.OTHER); + prep.setObject(2, 6, Types.OTHER); prep.executeUpdate(); prep.setInt(1, 7); - prep.setObject(2, 6, Types.INTEGER); + prep.setObject(2, 7, Types.INTEGER); prep.executeUpdate(); ResultSet rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); - testEnumResult(rs, 1, "A", 0); - testEnumResult(rs, 2, "B", 1); - testEnumResult(rs, 3, "C", 2); - testEnumResult(rs, 4, "D", 3); - testEnumResult(rs, 5, "E", 4); - testEnumResult(rs, 6, "F", 5); - testEnumResult(rs, 7, "G", 6); + assertEquals("ENUM('A', 'B', 'C', 'D', 'E', 'F', 'G')", rs.getMetaData().getColumnTypeName(2)); + testEnumResult(rs, 1, "A", 1); + testEnumResult(rs, 2, "B", 2); + testEnumResult(rs, 3, "C", 3); + testEnumResult(rs, 4, "D", 4); + testEnumResult(rs, 5, "E", 5); + testEnumResult(rs, 6, "F", 6); + testEnumResult(rs, 7, "G", 7); assertFalse(rs.next()); stat.execute("DROP TABLE TEST"); diff --git a/h2/src/test/org/h2/test/jdbc/TestSQLXML.java b/h2/src/test/org/h2/test/jdbc/TestSQLXML.java index 7821146e16..940b803570 100644 --- a/h2/src/test/org/h2/test/jdbc/TestSQLXML.java +++ b/h2/src/test/org/h2/test/jdbc/TestSQLXML.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -59,7 +59,7 @@ public class TestSQLXML extends TestDb { * ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/jdbc/TestStatement.java b/h2/src/test/org/h2/test/jdbc/TestStatement.java index 442df4913c..658b68fcb5 100644 --- a/h2/src/test/org/h2/test/jdbc/TestStatement.java +++ b/h2/src/test/org/h2/test/jdbc/TestStatement.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -16,9 +16,7 @@ import org.h2.api.ErrorCode; import org.h2.engine.SysProperties; -import org.h2.jdbc.JdbcPreparedStatementBackwardsCompat; import org.h2.jdbc.JdbcStatement; -import org.h2.jdbc.JdbcStatementBackwardsCompat; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -36,7 +34,7 @@ public class TestStatement extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -50,8 +48,8 @@ public void test() throws Exception { testConnectionRollback(); testStatement(); testPreparedStatement(); + testCloseOnCompletion(); testIdentityMerge(); - testIdentity(); conn.close(); deleteDb("statement"); testIdentifiers(); @@ -207,9 +205,9 @@ private void testStatement() throws SQLException { assertEquals(ResultSet.CONCUR_READ_ONLY, stat2.getResultSetConcurrency()); assertEquals(0, stat.getMaxFieldSize()); - assertFalse(((JdbcStatement) stat2).isClosed()); + assertFalse(stat2.isClosed()); stat2.close(); - assertTrue(((JdbcStatement) stat2).isClosed()); + assertTrue(stat2.isClosed()); ResultSet rs; @@ -240,38 +238,37 @@ private void testStatement() throws SQLException { assertTrue(stat.getQueryTimeout() == 0); trace("executeUpdate"); count = stat.executeUpdate( - "CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE VARCHAR(255))"); + "CREATE TABLE TEST(ID INT PRIMARY KEY,V VARCHAR(255))"); assertEquals(0, count); count = stat.executeUpdate( "INSERT INTO TEST VALUES(1,'Hello')"); assertEquals(1, count); count = stat.executeUpdate( - "INSERT INTO TEST(VALUE,ID) VALUES('JDBC',2)"); + "INSERT INTO TEST(V,ID) VALUES('JDBC',2)"); assertEquals(1, count); count = stat.executeUpdate( - "UPDATE TEST SET VALUE='LDBC' WHERE ID=2 OR ID=1"); + "UPDATE TEST SET V='LDBC' WHERE ID=2 OR ID=1"); assertEquals(2, count); count = stat.executeUpdate( - "UPDATE TEST SET VALUE='\\LDBC\\' WHERE VALUE LIKE 'LDBC' "); + "UPDATE TEST SET V='\\LDBC\\' WHERE V LIKE 'LDBC' "); assertEquals(2, count); count = stat.executeUpdate( - "UPDATE TEST SET VALUE='LDBC' WHERE VALUE LIKE '\\\\LDBC\\\\'"); + "UPDATE TEST SET V='LDBC' WHERE V LIKE '\\\\LDBC\\\\'"); trace("count:" + count); assertEquals(2, count); count = stat.executeUpdate("DELETE FROM TEST WHERE ID=-1"); assertEquals(0, count); count = stat.executeUpdate("DELETE FROM TEST WHERE ID=2"); assertEquals(1, count); - JdbcStatementBackwardsCompat statBC = (JdbcStatementBackwardsCompat) stat; - largeCount = statBC.executeLargeUpdate("DELETE FROM TEST WHERE ID=-1"); + largeCount = stat.executeLargeUpdate("DELETE FROM TEST WHERE ID=-1"); assertEquals(0, largeCount); - assertEquals(0, statBC.getLargeUpdateCount()); - largeCount = statBC.executeLargeUpdate("INSERT INTO TEST(VALUE,ID) VALUES('JDBC',2)"); + assertEquals(0, stat.getLargeUpdateCount()); + largeCount = stat.executeLargeUpdate("INSERT INTO TEST(V,ID) VALUES('JDBC',2)"); assertEquals(1, largeCount); - assertEquals(1, statBC.getLargeUpdateCount()); - largeCount = statBC.executeLargeUpdate("DELETE FROM TEST WHERE ID=2"); + assertEquals(1, stat.getLargeUpdateCount()); + largeCount = stat.executeLargeUpdate("DELETE FROM TEST WHERE ID=2"); assertEquals(1, largeCount); - assertEquals(1, statBC.getLargeUpdateCount()); + assertEquals(1, stat.getLargeUpdateCount()); assertThrows(ErrorCode.METHOD_NOT_ALLOWED_FOR_QUERY, stat). executeUpdate("SELECT * FROM TEST"); @@ -281,13 +278,13 @@ private void testStatement() throws SQLException { trace("execute"); result = stat.execute( - "CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE VARCHAR(255))"); + "CREATE TABLE TEST(ID INT PRIMARY KEY,V VARCHAR(255))"); assertFalse(result); result = stat.execute("INSERT INTO TEST VALUES(1,'Hello')"); assertFalse(result); - result = stat.execute("INSERT INTO TEST(VALUE,ID) VALUES('JDBC',2)"); + result = stat.execute("INSERT INTO TEST(V,ID) VALUES('JDBC',2)"); assertFalse(result); - result = stat.execute("UPDATE TEST SET VALUE='LDBC' WHERE ID=2"); + result = stat.execute("UPDATE TEST SET V='LDBC' WHERE ID=2"); assertFalse(result); result = stat.execute("DELETE FROM TEST WHERE ID=3"); assertFalse(result); @@ -297,15 +294,15 @@ private void testStatement() throws SQLException { assertFalse(result); assertThrows(ErrorCode.METHOD_ONLY_ALLOWED_FOR_QUERY, stat). - executeQuery("CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE VARCHAR(255))"); + executeQuery("CREATE TABLE TEST(ID INT PRIMARY KEY,V VARCHAR(255))"); - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE VARCHAR(255))"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,V VARCHAR(255))"); assertThrows(ErrorCode.METHOD_ONLY_ALLOWED_FOR_QUERY, stat). executeQuery("INSERT INTO TEST VALUES(1,'Hello')"); assertThrows(ErrorCode.METHOD_ONLY_ALLOWED_FOR_QUERY, stat). - executeQuery("UPDATE TEST SET VALUE='LDBC' WHERE ID=2"); + executeQuery("UPDATE TEST SET V='LDBC' WHERE ID=2"); assertThrows(ErrorCode.METHOD_ONLY_ALLOWED_FOR_QUERY, stat). executeQuery("DELETE FROM TEST WHERE ID=3"); @@ -335,6 +332,30 @@ private void testStatement() throws SQLException { stat.close(); } + private void testCloseOnCompletion() throws SQLException { + Statement stat = conn.createStatement(); + assertFalse(stat.isCloseOnCompletion()); + ResultSet rs = stat.executeQuery("VALUES 1"); + assertFalse(stat.isCloseOnCompletion()); + stat.closeOnCompletion(); + assertTrue(stat.isCloseOnCompletion()); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertFalse(rs.next()); + rs.close(); + assertTrue(stat.isClosed()); + assertThrows(ErrorCode.OBJECT_CLOSED, stat).isCloseOnCompletion(); + assertThrows(ErrorCode.OBJECT_CLOSED, stat).closeOnCompletion(); + stat = conn.createStatement(); + stat.closeOnCompletion(); + rs = stat.executeQuery("VALUES 1"); + ResultSet rs2 = stat.executeQuery("VALUES 2"); + rs.close(); + assertFalse(stat.isClosed()); + rs2.close(); + assertTrue(stat.isClosed()); + } + private void testIdentityMerge() throws SQLException { Statement stat = conn.createStatement(); stat.execute("drop table if exists test1"); @@ -351,6 +372,8 @@ private void testIdentityMerge() throws SQLException { stat.execute("merge into test1(x) key(x) values(5)", Statement.RETURN_GENERATED_KEYS); keys = stat.getGeneratedKeys(); + keys.next(); + assertEquals(1, keys.getInt(1)); assertFalse(keys.next()); stat.execute("merge into test1(x) key(x) values(6)", Statement.RETURN_GENERATED_KEYS); @@ -360,64 +383,6 @@ private void testIdentityMerge() throws SQLException { stat.execute("drop table test1, test2"); } - private void testIdentity() throws SQLException { - Statement stat = conn.createStatement(); - stat.execute("CREATE SEQUENCE SEQ"); - stat.execute("CREATE TABLE TEST(ID INT)"); - stat.execute("INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)", - Statement.RETURN_GENERATED_KEYS); - ResultSet rs = stat.getGeneratedKeys(); - rs.next(); - assertEquals(1, rs.getInt(1)); - assertFalse(rs.next()); - stat.execute("INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)", - Statement.RETURN_GENERATED_KEYS); - rs = stat.getGeneratedKeys(); - rs.next(); - assertEquals(2, rs.getInt(1)); - assertFalse(rs.next()); - stat.execute("INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)", - new int[] { 1 }); - rs = stat.getGeneratedKeys(); - rs.next(); - assertEquals(3, rs.getInt(1)); - assertFalse(rs.next()); - stat.execute("INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)", - new String[] { "ID" }); - rs = stat.getGeneratedKeys(); - rs.next(); - assertEquals(4, rs.getInt(1)); - assertFalse(rs.next()); - stat.executeUpdate("INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)", - Statement.RETURN_GENERATED_KEYS); - rs = stat.getGeneratedKeys(); - rs.next(); - assertEquals(5, rs.getInt(1)); - assertFalse(rs.next()); - stat.executeUpdate("INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)", - new int[] { 1 }); - rs = stat.getGeneratedKeys(); - rs.next(); - assertEquals(6, rs.getInt(1)); - assertFalse(rs.next()); - stat.executeUpdate("INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)", - new String[] { "ID" }); - rs = stat.getGeneratedKeys(); - rs.next(); - assertEquals(7, rs.getInt(1)); - assertFalse(rs.next()); - - stat.execute("CREATE TABLE TEST2(ID identity primary key)"); - stat.execute("INSERT INTO TEST2 VALUES()"); - stat.execute("SET @X = IDENTITY()"); - rs = stat.executeQuery("SELECT @X"); - rs.next(); - assertEquals(1, rs.getInt(1)); - - stat.execute("DROP TABLE TEST"); - stat.execute("DROP TABLE TEST2"); - } - private void testPreparedStatement() throws SQLException{ Statement stat = conn.createStatement(); stat.execute("create table test(id int primary key, name varchar(255))"); @@ -462,15 +427,15 @@ private void testPreparedStatement() throws SQLException{ ps.setInt(1, 6); ps.setString(2, "v6"); ps.addBatch(); - assertTrue(Arrays.equals(new long[] {1, 1}, ((JdbcStatementBackwardsCompat) ps).executeLargeBatch())); + assertTrue(Arrays.equals(new long[] {1, 1}, ps.executeLargeBatch())); ps.setInt(1, 7); ps.setString(2, "v7"); assertEquals(1, ps.executeUpdate()); assertEquals(1, ps.getUpdateCount()); ps.setInt(1, 8); ps.setString(2, "v8"); - assertEquals(1, ((JdbcPreparedStatementBackwardsCompat) ps).executeLargeUpdate()); - assertEquals(1, ((JdbcStatementBackwardsCompat) ps).getLargeUpdateCount()); + assertEquals(1, ps.executeLargeUpdate()); + assertEquals(1, ps.getLargeUpdateCount()); stat.execute("drop table test"); } @@ -484,74 +449,80 @@ private void testIdentifiers() throws SQLException { assertEquals("\"FROM\"", stat.enquoteIdentifier("FROM", false)); assertEquals("\"Test\"", stat.enquoteIdentifier("Test", false)); assertEquals("\"test\"", stat.enquoteIdentifier("test", false)); - assertEquals("\"TODAY\"", stat.enquoteIdentifier("TODAY", false)); + assertEquals("\"TOP\"", stat.enquoteIdentifier("TOP", false)); assertEquals("\"Test\"", stat.enquoteIdentifier("\"Test\"", false)); assertEquals("\"Test\"", stat.enquoteIdentifier("\"Test\"", true)); assertEquals("\"\"\"Test\"", stat.enquoteIdentifier("\"\"\"Test\"", true)); - try { - stat.enquoteIdentifier("\"Test", true); - fail(); - } catch (SQLException ex) { - // OK - } - // Other lower case characters don't have upper case mappings - assertEquals("\u02B0", stat.enquoteIdentifier("\u02B0", false)); - - assertTrue(stat.isSimpleIdentifier("SOME_ID")); + assertEquals("\"\"", stat.enquoteIdentifier("", false)); + assertEquals("\"\"", stat.enquoteIdentifier("", true)); + assertEquals("U&\"\"", stat.enquoteIdentifier("U&\"\"", false)); + assertEquals("U&\"\"", stat.enquoteIdentifier("U&\"\"", true)); + assertEquals("U&\"\0100\"", stat.enquoteIdentifier("U&\"\0100\"", false)); + assertEquals("U&\"\0100\"", stat.enquoteIdentifier("U&\"\0100\"", true)); + assertThrows(NullPointerException.class, () -> stat.enquoteIdentifier(null, false)); + assertThrows(ErrorCode.INVALID_NAME_1, () -> stat.enquoteIdentifier("\"Test", true)); + assertThrows(ErrorCode.INVALID_NAME_1, () -> stat.enquoteIdentifier("\"a\"a\"", true)); + assertThrows(ErrorCode.INVALID_NAME_1, () -> stat.enquoteIdentifier("U&\"a\"a\"", true)); + assertThrows(ErrorCode.STRING_FORMAT_ERROR_1, () -> stat.enquoteIdentifier("U&\"\\111\"", true)); + assertEquals("U&\"\\02b0\"", stat.enquoteIdentifier("\u02B0", false)); + + assertTrue(stat.isSimpleIdentifier("SOME_ID_1")); assertFalse(stat.isSimpleIdentifier("SOME ID")); assertFalse(stat.isSimpleIdentifier("FROM")); assertFalse(stat.isSimpleIdentifier("Test")); assertFalse(stat.isSimpleIdentifier("test")); - assertFalse(stat.isSimpleIdentifier("TODAY")); - // Other lower case characters don't have upper case mappings - assertTrue(stat.isSimpleIdentifier("\u02B0")); + assertFalse(stat.isSimpleIdentifier("TOP")); + assertFalse(stat.isSimpleIdentifier("_")); + assertFalse(stat.isSimpleIdentifier("_1")); + assertFalse(stat.isSimpleIdentifier("\u02B0")); conn.close(); deleteDb("statement"); conn = getConnection("statement;DATABASE_TO_LOWER=TRUE"); - stat = (JdbcStatement) conn.createStatement(); - assertEquals("some_id", stat.enquoteIdentifier("some_id", false)); - assertEquals("\"some id\"", stat.enquoteIdentifier("some id", false)); - assertEquals("\"some_id\"", stat.enquoteIdentifier("some_id", true)); - assertEquals("\"from\"", stat.enquoteIdentifier("from", false)); - assertEquals("\"Test\"", stat.enquoteIdentifier("Test", false)); - assertEquals("\"TEST\"", stat.enquoteIdentifier("TEST", false)); - assertEquals("\"today\"", stat.enquoteIdentifier("today", false)); - - assertTrue(stat.isSimpleIdentifier("some_id")); - assertFalse(stat.isSimpleIdentifier("some id")); - assertFalse(stat.isSimpleIdentifier("from")); - assertFalse(stat.isSimpleIdentifier("Test")); - assertFalse(stat.isSimpleIdentifier("TEST")); - assertFalse(stat.isSimpleIdentifier("today")); + JdbcStatement stat2 = (JdbcStatement) conn.createStatement(); + assertEquals("some_id", stat2.enquoteIdentifier("some_id", false)); + assertEquals("\"some id\"", stat2.enquoteIdentifier("some id", false)); + assertEquals("\"some_id\"", stat2.enquoteIdentifier("some_id", true)); + assertEquals("\"from\"", stat2.enquoteIdentifier("from", false)); + assertEquals("\"Test\"", stat2.enquoteIdentifier("Test", false)); + assertEquals("\"TEST\"", stat2.enquoteIdentifier("TEST", false)); + assertEquals("\"top\"", stat2.enquoteIdentifier("top", false)); + + assertTrue(stat2.isSimpleIdentifier("some_id")); + assertFalse(stat2.isSimpleIdentifier("some id")); + assertFalse(stat2.isSimpleIdentifier("from")); + assertFalse(stat2.isSimpleIdentifier("Test")); + assertFalse(stat2.isSimpleIdentifier("TEST")); + assertFalse(stat2.isSimpleIdentifier("top")); conn.close(); deleteDb("statement"); conn = getConnection("statement;DATABASE_TO_UPPER=FALSE"); - stat = (JdbcStatement) conn.createStatement(); - assertEquals("SOME_ID", stat.enquoteIdentifier("SOME_ID", false)); - assertEquals("some_id", stat.enquoteIdentifier("some_id", false)); - assertEquals("\"SOME ID\"", stat.enquoteIdentifier("SOME ID", false)); - assertEquals("\"some id\"", stat.enquoteIdentifier("some id", false)); - assertEquals("\"SOME_ID\"", stat.enquoteIdentifier("SOME_ID", true)); - assertEquals("\"some_id\"", stat.enquoteIdentifier("some_id", true)); - assertEquals("\"FROM\"", stat.enquoteIdentifier("FROM", false)); - assertEquals("\"from\"", stat.enquoteIdentifier("from", false)); - assertEquals("Test", stat.enquoteIdentifier("Test", false)); - assertEquals("\"TODAY\"", stat.enquoteIdentifier("TODAY", false)); - assertEquals("\"today\"", stat.enquoteIdentifier("today", false)); - - assertTrue(stat.isSimpleIdentifier("SOME_ID")); - assertTrue(stat.isSimpleIdentifier("some_id")); - assertFalse(stat.isSimpleIdentifier("SOME ID")); - assertFalse(stat.isSimpleIdentifier("some id")); - assertFalse(stat.isSimpleIdentifier("FROM")); - assertFalse(stat.isSimpleIdentifier("from")); - assertTrue(stat.isSimpleIdentifier("Test")); - assertFalse(stat.isSimpleIdentifier("TODAY")); - assertFalse(stat.isSimpleIdentifier("today")); + JdbcStatement stat3 = (JdbcStatement) conn.createStatement(); + assertEquals("SOME_ID", stat3.enquoteIdentifier("SOME_ID", false)); + assertEquals("some_id", stat3.enquoteIdentifier("some_id", false)); + assertEquals("\"SOME ID\"", stat3.enquoteIdentifier("SOME ID", false)); + assertEquals("\"some id\"", stat3.enquoteIdentifier("some id", false)); + assertEquals("\"SOME_ID\"", stat3.enquoteIdentifier("SOME_ID", true)); + assertEquals("\"some_id\"", stat3.enquoteIdentifier("some_id", true)); + assertEquals("\"FROM\"", stat3.enquoteIdentifier("FROM", false)); + assertEquals("\"from\"", stat3.enquoteIdentifier("from", false)); + assertEquals("Test", stat3.enquoteIdentifier("Test", false)); + assertEquals("\"TOP\"", stat3.enquoteIdentifier("TOP", false)); + assertEquals("\"top\"", stat3.enquoteIdentifier("top", false)); + + assertTrue(stat3.isSimpleIdentifier("SOME_ID")); + assertTrue(stat3.isSimpleIdentifier("some_id")); + assertFalse(stat3.isSimpleIdentifier("SOME ID")); + assertFalse(stat3.isSimpleIdentifier("some id")); + assertFalse(stat3.isSimpleIdentifier("FROM")); + assertFalse(stat3.isSimpleIdentifier("from")); + assertTrue(stat3.isSimpleIdentifier("Test")); + assertFalse(stat3.isSimpleIdentifier("TOP")); + assertFalse(stat3.isSimpleIdentifier("top")); + assertThrows(NullPointerException.class, () -> stat3.isSimpleIdentifier(null)); conn.close(); } diff --git a/h2/src/test/org/h2/test/jdbc/TestTransactionIsolation.java b/h2/src/test/org/h2/test/jdbc/TestTransactionIsolation.java index 13b9e3dece..234bad5c8f 100644 --- a/h2/src/test/org/h2/test/jdbc/TestTransactionIsolation.java +++ b/h2/src/test/org/h2/test/jdbc/TestTransactionIsolation.java @@ -1,12 +1,13 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; import java.sql.Connection; import java.sql.SQLException; +import java.sql.Statement; import org.h2.api.ErrorCode; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -24,16 +25,7 @@ public class TestTransactionIsolation extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public boolean isEnabled() { - if (config.mvStore) { - // no tests yet - return false; - } - return true; + TestBase.createCaller().init().testFromMain(); } @Override @@ -43,68 +35,77 @@ public void test() throws SQLException { private void testTableLevelLocking() throws SQLException { deleteDb("transactionIsolation"); + conn1 = getConnection("transactionIsolation"); - assertEquals(Connection.TRANSACTION_READ_COMMITTED, - conn1.getTransactionIsolation()); - conn1.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); - assertEquals(Connection.TRANSACTION_SERIALIZABLE, - conn1.getTransactionIsolation()); - conn1.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED); - assertEquals(Connection.TRANSACTION_READ_UNCOMMITTED, - conn1.getTransactionIsolation()); - assertSingleValue(conn1.createStatement(), "CALL LOCK_MODE()", 0); - conn1.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); - assertSingleValue(conn1.createStatement(), "CALL LOCK_MODE()", 3); - assertEquals(Connection.TRANSACTION_READ_COMMITTED, - conn1.getTransactionIsolation()); - conn1.createStatement().execute("SET LOCK_MODE 1"); - assertEquals(Connection.TRANSACTION_SERIALIZABLE, - conn1.getTransactionIsolation()); - conn1.createStatement().execute("CREATE TABLE TEST(ID INT)"); - conn1.createStatement().execute("INSERT INTO TEST VALUES(1)"); conn1.setAutoCommit(false); conn2 = getConnection("transactionIsolation"); conn2.setAutoCommit(false); - conn1.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); + assertEquals(Connection.TRANSACTION_READ_COMMITTED, conn1.getMetaData().getDefaultTransactionIsolation()); + assertEquals(Connection.TRANSACTION_READ_COMMITTED, conn1.getTransactionIsolation()); - // serializable: just reading - assertSingleValue(conn1.createStatement(), "SELECT * FROM TEST", 1); - assertSingleValue(conn2.createStatement(), "SELECT * FROM TEST", 1); - conn1.commit(); - conn2.commit(); + try (Connection conn = getConnection("transactionIsolation"); + Statement stmt = conn.createStatement()) { + stmt.execute("CREATE TABLE TEST(ID INT)"); + } + testIt(Connection.TRANSACTION_READ_UNCOMMITTED); + testIt(Connection.TRANSACTION_READ_COMMITTED); + testIt(Connection.TRANSACTION_REPEATABLE_READ); + testIt(Connection.TRANSACTION_SERIALIZABLE); + + try (Connection conn = getConnection("transactionIsolation"); + Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE TEST"); + stmt.execute("CREATE TABLE TEST(ID INT UNIQUE)"); + } + testIt(Connection.TRANSACTION_READ_UNCOMMITTED); + testIt(Connection.TRANSACTION_READ_COMMITTED); + testIt(Connection.TRANSACTION_REPEATABLE_READ); + testIt(Connection.TRANSACTION_SERIALIZABLE); - // serializable: write lock - conn1.createStatement().executeUpdate("UPDATE TEST SET ID=2"); - assertThrows(ErrorCode.LOCK_TIMEOUT_1, conn2.createStatement()). - executeQuery("SELECT * FROM TEST"); - conn1.commit(); - conn2.commit(); + conn2.close(); + conn1.close(); + deleteDb("transactionIsolation"); + } - conn1.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); + private void testIt(int isolationLevel2) throws SQLException { + try (Connection conn = getConnection("transactionIsolation"); + Statement stmt = conn.createStatement()) { + stmt.execute("DELETE FROM TEST"); + stmt.execute("INSERT INTO TEST VALUES(1)"); + } - // read-committed: #1 read, #2 update, #1 read again - assertSingleValue(conn1.createStatement(), "SELECT * FROM TEST", 2); - conn2.createStatement().executeUpdate("UPDATE TEST SET ID=3"); - conn2.commit(); - assertSingleValue(conn1.createStatement(), "SELECT * FROM TEST", 3); - conn1.commit(); - - // read-committed: #1 read, #2 read, #2 update, #1 delete - assertSingleValue(conn1.createStatement(), "SELECT * FROM TEST", 3); - assertSingleValue(conn2.createStatement(), "SELECT * FROM TEST", 3); - conn2.createStatement().executeUpdate("UPDATE TEST SET ID=4"); - assertThrows(ErrorCode.LOCK_TIMEOUT_1, conn1.createStatement()). - executeUpdate("DELETE FROM TEST"); - conn2.commit(); - conn1.commit(); - assertSingleValue(conn1.createStatement(), "SELECT * FROM TEST", 4); - assertSingleValue(conn2.createStatement(), "SELECT * FROM TEST", 4); + conn2.setTransactionIsolation(isolationLevel2); + assertEquals(isolationLevel2, conn2.getTransactionIsolation()); - conn1.close(); - conn2.close(); - deleteDb("transactionIsolation"); + testRowLocks(Connection.TRANSACTION_READ_UNCOMMITTED); + testRowLocks(Connection.TRANSACTION_READ_COMMITTED); + testRowLocks(Connection.TRANSACTION_REPEATABLE_READ); + testRowLocks(Connection.TRANSACTION_SERIALIZABLE); + + testDirtyRead(Connection.TRANSACTION_READ_UNCOMMITTED, 1, true, true); + testDirtyRead(Connection.TRANSACTION_READ_COMMITTED, 2, false, true); + testDirtyRead(Connection.TRANSACTION_REPEATABLE_READ, 3, false, false); + testDirtyRead(Connection.TRANSACTION_SERIALIZABLE, 4, false, false); } + private void testDirtyRead(int isolationLevel, int value, boolean dirtyVisible, boolean committedVisible) + throws SQLException { + conn1.setTransactionIsolation(isolationLevel); + assertSingleValue(conn1.createStatement(), "SELECT * FROM TEST", value); + int newValue = value + 1; + conn2.createStatement().executeUpdate("UPDATE TEST SET ID=" + newValue); + assertSingleValue(conn1.createStatement(), "SELECT * FROM TEST", dirtyVisible ? newValue : value); + conn2.commit(); + assertSingleValue(conn1.createStatement(), "SELECT * FROM TEST", committedVisible ? newValue : value); + } + + private void testRowLocks(int isolationLevel) throws SQLException { + conn1.setTransactionIsolation(isolationLevel); + assertSingleValue(conn1.createStatement(), "SELECT * FROM TEST", 1); + assertSingleValue(conn2.createStatement(), "SELECT * FROM TEST FOR UPDATE", 1); + assertThrows(ErrorCode.LOCK_TIMEOUT_1, conn1.createStatement()).executeUpdate("DELETE FROM TEST"); + conn2.commit(); + } } diff --git a/h2/src/test/org/h2/test/jdbc/TestUpdatableResultSet.java b/h2/src/test/org/h2/test/jdbc/TestUpdatableResultSet.java index b24ae9fcc3..217232db0c 100644 --- a/h2/src/test/org/h2/test/jdbc/TestUpdatableResultSet.java +++ b/h2/src/test/org/h2/test/jdbc/TestUpdatableResultSet.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -12,6 +12,7 @@ import java.sql.Blob; import java.sql.Connection; import java.sql.Date; +import java.sql.JDBCType; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; @@ -20,9 +21,13 @@ import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; +import java.time.LocalDate; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; import org.h2.api.ErrorCode; -import org.h2.engine.SysProperties; +import org.h2.api.H2Type; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -37,7 +42,7 @@ public class TestUpdatableResultSet extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -48,6 +53,7 @@ public void test() throws Exception { testUpdateDeleteInsert(); testUpdateDataType(); testUpdateResetRead(); + testUpdateObject(); deleteDb("updatableResultSet"); } @@ -65,6 +71,8 @@ private void testDetectUpdatable() throws SQLException { rs = stat.executeQuery("select name from test"); assertEquals(ResultSet.CONCUR_READ_ONLY, rs.getConcurrency()); stat.execute("drop table test"); + rs = stat.executeQuery("SELECT"); + assertEquals(ResultSet.CONCUR_READ_ONLY, rs.getConcurrency()); stat.execute("create table test(a int, b int, " + "name varchar, primary key(a, b))"); @@ -297,29 +305,30 @@ private void testUpdateDataType() throws Exception { Statement stat = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE); stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255), " - + "DEC DECIMAL(10,2), BOO BIT, BYE TINYINT, BIN BINARY(100), " - + "D DATE, T TIME, TS TIMESTAMP(9), DB DOUBLE, R REAL, L BIGINT, " + + "DEC DECIMAL(10,2), BOO BIT, BYE TINYINT, BIN VARBINARY(100), " + + "D DATE, T TIME, TS TIMESTAMP(9), TSTZ TIMESTAMP(9) WITH TIME ZONE, DB DOUBLE, R REAL, L BIGINT, " + "O_I INT, SH SMALLINT, CL CLOB, BL BLOB)"); + final int clobIndex = 16, blobIndex = 17; ResultSet rs = stat.executeQuery("SELECT * FROM TEST"); ResultSetMetaData meta = rs.getMetaData(); - assertEquals("java.lang.Integer", meta.getColumnClassName(1)); - assertEquals("java.lang.String", meta.getColumnClassName(2)); - assertEquals("java.math.BigDecimal", meta.getColumnClassName(3)); - assertEquals("java.lang.Boolean", meta.getColumnClassName(4)); - assertEquals(SysProperties.OLD_RESULT_SET_GET_OBJECT ? "java.lang.Byte" : "java.lang.Integer", - meta.getColumnClassName(5)); - assertEquals("[B", meta.getColumnClassName(6)); - assertEquals("java.sql.Date", meta.getColumnClassName(7)); - assertEquals("java.sql.Time", meta.getColumnClassName(8)); - assertEquals("java.sql.Timestamp", meta.getColumnClassName(9)); - assertEquals("java.lang.Double", meta.getColumnClassName(10)); - assertEquals("java.lang.Float", meta.getColumnClassName(11)); - assertEquals("java.lang.Long", meta.getColumnClassName(12)); - assertEquals("java.lang.Integer", meta.getColumnClassName(13)); - assertEquals(SysProperties.OLD_RESULT_SET_GET_OBJECT ? "java.lang.Short" : "java.lang.Integer", - meta.getColumnClassName(14)); - assertEquals("java.sql.Clob", meta.getColumnClassName(15)); - assertEquals("java.sql.Blob", meta.getColumnClassName(16)); + int c = 0; + assertEquals("java.lang.Integer", meta.getColumnClassName(++c)); + assertEquals("java.lang.String", meta.getColumnClassName(++c)); + assertEquals("java.math.BigDecimal", meta.getColumnClassName(++c)); + assertEquals("java.lang.Boolean", meta.getColumnClassName(++c)); + assertEquals("java.lang.Integer", meta.getColumnClassName(++c)); + assertEquals("[B", meta.getColumnClassName(++c)); + assertEquals("java.sql.Date", meta.getColumnClassName(++c)); + assertEquals("java.sql.Time", meta.getColumnClassName(++c)); + assertEquals("java.sql.Timestamp", meta.getColumnClassName(++c)); + assertEquals("java.time.OffsetDateTime", meta.getColumnClassName(++c)); + assertEquals("java.lang.Double", meta.getColumnClassName(++c)); + assertEquals("java.lang.Float", meta.getColumnClassName(++c)); + assertEquals("java.lang.Long", meta.getColumnClassName(++c)); + assertEquals("java.lang.Integer", meta.getColumnClassName(++c)); + assertEquals("java.lang.Integer", meta.getColumnClassName(++c)); + assertEquals("java.sql.Clob", meta.getColumnClassName(++c)); + assertEquals("java.sql.Blob", meta.getColumnClassName(++c)); rs.moveToInsertRow(); rs.updateInt(1, 0); rs.updateNull(2); @@ -329,22 +338,24 @@ private void testUpdateDataType() throws Exception { rs.insertRow(); rs.moveToInsertRow(); - rs.updateInt(1, 1); - rs.updateString(2, null); - rs.updateBigDecimal(3, null); - rs.updateBoolean(4, false); - rs.updateByte(5, (byte) 0); - rs.updateBytes(6, null); - rs.updateDate(7, null); - rs.updateTime(8, null); - rs.updateTimestamp(9, null); - rs.updateDouble(10, 0.0); - rs.updateFloat(11, (float) 0.0); - rs.updateLong(12, 0L); - rs.updateObject(13, null); - rs.updateShort(14, (short) 0); - rs.updateCharacterStream(15, new StringReader("test"), 0); - rs.updateBinaryStream(16, + c = 0; + rs.updateInt(++c, 1); + rs.updateString(++c, null); + rs.updateBigDecimal(++c, null); + rs.updateBoolean(++c, false); + rs.updateByte(++c, (byte) 0); + rs.updateBytes(++c, null); + rs.updateDate(++c, null); + rs.updateTime(++c, null); + rs.updateTimestamp(++c, null); + rs.updateObject(++c, null); + rs.updateDouble(++c, 0.0); + rs.updateFloat(++c, 0.0f); + rs.updateLong(++c, 0L); + rs.updateObject(++c, null); + rs.updateShort(++c, (short) 0); + rs.updateCharacterStream(++c, new StringReader("test"), 0); + rs.updateBinaryStream(++c, new ByteArrayInputStream(new byte[] { (byte) 0xff, 0x00 }), 0); rs.insertRow(); @@ -359,8 +370,10 @@ private void testUpdateDataType() throws Exception { rs.updateTime("T", Time.valueOf("21:46:28")); rs.updateTimestamp("TS", Timestamp.valueOf("2005-09-21 21:47:09.567890123")); + rs.updateObject("TSTZ", OffsetDateTime.of(LocalDate.of(2005, 9, 21), + LocalTime.ofNanoOfDay(81_189_123_456_789L), ZoneOffset.ofHours(1))); rs.updateDouble("DB", 1.725); - rs.updateFloat("R", (float) 2.5); + rs.updateFloat("R", 2.5f); rs.updateLong("L", Long.MAX_VALUE); rs.updateObject("O_I", 10); rs.updateShort("SH", Short.MIN_VALUE); @@ -379,8 +392,8 @@ private void testUpdateDataType() throws Exception { rs.moveToInsertRow(); rs.updateInt("ID", 4); - rs.updateCharacterStream(15, new StringReader("\u00ef\u00f6\u00fc")); - rs.updateBinaryStream(16, + rs.updateCharacterStream(clobIndex, new StringReader("\u00ef\u00f6\u00fc")); + rs.updateBinaryStream(blobIndex, new ByteArrayInputStream(new byte[] { (byte) 0xab, 0x12 })); rs.insertRow(); @@ -393,8 +406,8 @@ private void testUpdateDataType() throws Exception { rs.moveToInsertRow(); rs.updateInt("ID", 6); - rs.updateClob(15, new StringReader("\u00ef\u00f6\u00fc")); - rs.updateBlob(16, + rs.updateClob(clobIndex, new StringReader("\u00ef\u00f6\u00fc")); + rs.updateBlob(blobIndex, new ByteArrayInputStream(new byte[] { (byte) 0xab, 0x12 })); rs.insertRow(); @@ -410,8 +423,8 @@ private void testUpdateDataType() throws Exception { rs.moveToInsertRow(); rs.updateInt("ID", 8); - rs.updateNClob(15, new StringReader("\u00ef\u00f6\u00fc")); - rs.updateBlob(16, b); + rs.updateNClob(clobIndex, new StringReader("\u00ef\u00f6\u00fc")); + rs.updateBlob(blobIndex, b); rs.insertRow(); rs.moveToInsertRow(); @@ -422,8 +435,8 @@ private void testUpdateDataType() throws Exception { rs.moveToInsertRow(); rs.updateInt("ID", 10); - rs.updateNClob(15, new StringReader("\u00ef\u00f6\u00fc"), -1); - rs.updateBlob(16, b); + rs.updateNClob(clobIndex, new StringReader("\u00ef\u00f6\u00fc"), -1); + rs.updateBlob(blobIndex, b); rs.insertRow(); rs.moveToInsertRow(); @@ -435,9 +448,9 @@ private void testUpdateDataType() throws Exception { rs.moveToInsertRow(); rs.updateInt("ID", 12); - rs.updateNCharacterStream(15, + rs.updateNCharacterStream(clobIndex, new StringReader("\u00ef\u00f6\u00fc"), -1); - rs.updateBlob(16, b); + rs.updateBlob(blobIndex, b); rs.insertRow(); rs.moveToInsertRow(); @@ -449,75 +462,121 @@ private void testUpdateDataType() throws Exception { rs.moveToInsertRow(); rs.updateInt("ID", 14); - rs.updateNCharacterStream(15, + rs.updateNCharacterStream(clobIndex, new StringReader("\u00ef\u00f6\u00fc")); - rs.updateBlob(16, b); + rs.updateBlob(blobIndex, b); rs.insertRow(); rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID NULLS FIRST"); rs.next(); - assertTrue(rs.getInt(1) == 0); - assertTrue(rs.getString(2) == null && rs.wasNull()); - assertTrue(rs.getBigDecimal(3) == null && rs.wasNull()); - assertTrue(!rs.getBoolean(4) && rs.wasNull()); - assertTrue(rs.getByte(5) == 0 && rs.wasNull()); - assertTrue(rs.getBytes(6) == null && rs.wasNull()); - assertTrue(rs.getDate(7) == null && rs.wasNull()); - assertTrue(rs.getTime(8) == null && rs.wasNull()); - assertTrue(rs.getTimestamp(9) == null && rs.wasNull()); - assertTrue(rs.getDouble(10) == 0.0 && rs.wasNull()); - assertTrue(rs.getFloat(11) == 0.0 && rs.wasNull()); - assertTrue(rs.getLong(12) == 0 && rs.wasNull()); - assertTrue(rs.getObject(13) == null && rs.wasNull()); - assertTrue(rs.getShort(14) == 0 && rs.wasNull()); - assertTrue(rs.getCharacterStream(15) == null && rs.wasNull()); - assertTrue(rs.getBinaryStream(16) == null && rs.wasNull()); + c = 0; + assertTrue(rs.getInt(++c) == 0); + assertTrue(rs.getString(++c) == null && rs.wasNull()); + assertTrue(rs.getBigDecimal(++c) == null && rs.wasNull()); + assertTrue(!rs.getBoolean(++c) && rs.wasNull()); + assertTrue(rs.getByte(++c) == 0 && rs.wasNull()); + assertTrue(rs.getBytes(++c) == null && rs.wasNull()); + assertTrue(rs.getDate(++c) == null && rs.wasNull()); + assertTrue(rs.getTime(++c) == null && rs.wasNull()); + assertTrue(rs.getTimestamp(++c) == null && rs.wasNull()); + assertTrue(rs.getDouble(++c) == 0.0 && rs.wasNull()); + assertTrue(rs.getFloat(++c) == 0.0 && rs.wasNull()); + assertTrue(rs.getLong(++c) == 0 && rs.wasNull()); + assertTrue(rs.getObject(++c) == null && rs.wasNull()); + assertTrue(rs.getShort(++c) == 0 && rs.wasNull()); + assertTrue(rs.getCharacterStream(++c) == null && rs.wasNull()); + assertTrue(rs.getBinaryStream(++c) == null && rs.wasNull()); rs.next(); - assertTrue(rs.getInt(1) == 1); - assertTrue(rs.getString(2) == null && rs.wasNull()); - assertTrue(rs.getBigDecimal(3) == null && rs.wasNull()); - assertTrue(!rs.getBoolean(4) && !rs.wasNull()); - assertTrue(rs.getByte(5) == 0 && !rs.wasNull()); - assertTrue(rs.getBytes(6) == null && rs.wasNull()); - assertTrue(rs.getDate(7) == null && rs.wasNull()); - assertTrue(rs.getTime(8) == null && rs.wasNull()); - assertTrue(rs.getTimestamp(9) == null && rs.wasNull()); - assertTrue(rs.getDouble(10) == 0.0 && !rs.wasNull()); - assertTrue(rs.getFloat(11) == 0.0 && !rs.wasNull()); - assertTrue(rs.getLong(12) == 0 && !rs.wasNull()); - assertTrue(rs.getObject(13) == null && rs.wasNull()); - assertTrue(rs.getShort(14) == 0 && !rs.wasNull()); - assertEquals("test", rs.getString(15)); - assertEquals(new byte[] { (byte) 0xff, 0x00 }, rs.getBytes(16)); + c = 0; + assertTrue(rs.getInt(++c) == 1); + assertTrue(rs.getString(++c) == null && rs.wasNull()); + assertTrue(rs.getBigDecimal(++c) == null && rs.wasNull()); + assertTrue(!rs.getBoolean(++c) && !rs.wasNull()); + assertTrue(rs.getByte(++c) == 0 && !rs.wasNull()); + assertTrue(rs.getBytes(++c) == null && rs.wasNull()); + assertTrue(rs.getDate(++c) == null && rs.wasNull()); + assertTrue(rs.getTime(++c) == null && rs.wasNull()); + assertTrue(rs.getTimestamp(++c) == null && rs.wasNull()); + assertTrue(rs.getObject(++c) == null && rs.wasNull()); + assertTrue(rs.getDouble(++c) == 0.0 && !rs.wasNull()); + assertTrue(rs.getFloat(++c) == 0.0 && !rs.wasNull()); + assertTrue(rs.getLong(++c) == 0 && !rs.wasNull()); + assertTrue(rs.getObject(++c) == null && rs.wasNull()); + assertTrue(rs.getShort(++c) == 0 && !rs.wasNull()); + assertEquals("test", rs.getString(++c)); + assertEquals(new byte[] { (byte) 0xff, 0x00 }, rs.getBytes(++c)); rs.next(); - assertTrue(rs.getInt(1) == 2); - assertEquals("+", rs.getString(2)); - assertEquals("1.20", rs.getBigDecimal(3).toString()); - assertTrue(rs.getBoolean(4)); - assertTrue((rs.getByte(5) & 0xff) == 0xff); - assertEquals(new byte[] { 0x00, (byte) 0xff }, rs.getBytes(6)); - assertEquals("2005-09-21", rs.getDate(7).toString()); - assertEquals("21:46:28", rs.getTime(8).toString()); - assertEquals("2005-09-21 21:47:09.567890123", rs.getTimestamp(9).toString()); - assertTrue(rs.getDouble(10) == 1.725); - assertTrue(rs.getFloat(11) == (float) 2.5); - assertTrue(rs.getLong(12) == Long.MAX_VALUE); - assertEquals(10, ((Integer) rs.getObject(13)).intValue()); - assertTrue(rs.getShort(14) == Short.MIN_VALUE); + c = 0; + assertTrue(rs.getInt(++c) == 2); + assertEquals("+", rs.getString(++c)); + assertEquals("1.20", rs.getBigDecimal(++c).toString()); + assertTrue(rs.getBoolean(++c)); + assertTrue((rs.getByte(++c) & 0xff) == 0xff); + assertEquals(new byte[] { 0x00, (byte) 0xff }, rs.getBytes(++c)); + assertEquals("2005-09-21", rs.getDate(++c).toString()); + assertEquals("21:46:28", rs.getTime(++c).toString()); + assertEquals("2005-09-21 21:47:09.567890123", rs.getTimestamp(++c).toString()); + assertEquals("2005-09-21T22:33:09.123456789+01:00", rs.getObject(++c).toString()); + assertTrue(rs.getDouble(++c) == 1.725); + assertTrue(rs.getFloat(++c) == 2.5f); + assertTrue(rs.getLong(++c) == Long.MAX_VALUE); + assertEquals(10, ((Integer) rs.getObject(++c)).intValue()); + assertTrue(rs.getShort(++c) == Short.MIN_VALUE); // auml ouml uuml - assertEquals("\u00ef\u00f6\u00fc", rs.getString(15)); - assertEquals(new byte[] { (byte) 0xab, 0x12 }, rs.getBytes(16)); + assertEquals("\u00ef\u00f6\u00fc", rs.getString(++c)); + assertEquals(new byte[] { (byte) 0xab, 0x12 }, rs.getBytes(++c)); + c = 1; + rs.updateString(++c, "-"); + rs.updateBigDecimal(++c, new BigDecimal("1.30")); + rs.updateBoolean(++c, false); + rs.updateByte(++c, (byte) 0x55); + rs.updateBytes(++c, new byte[] { 0x01, (byte) 0xfe }); + rs.updateDate(++c, Date.valueOf("2005-09-22")); + rs.updateTime(++c, Time.valueOf("21:46:29")); + rs.updateTimestamp(++c, Timestamp.valueOf("2005-09-21 21:47:10.111222333")); + rs.updateObject(++c, OffsetDateTime.of(LocalDate.of(2005, 9, 22), LocalTime.ofNanoOfDay(10_111_222_333L), + ZoneOffset.ofHours(2))); + rs.updateDouble(++c, 2.25); + rs.updateFloat(++c, 3.5f); + rs.updateLong(++c, Long.MAX_VALUE - 1); + rs.updateInt(++c, 11); + rs.updateShort(++c, (short) -1_000); + rs.updateString(++c, "ABCD"); + rs.updateBytes(++c, new byte[] { 1, 2 }); + rs.updateRow(); for (int i = 3; i <= 14; i++) { rs.next(); assertEquals(i, rs.getInt(1)); - assertEquals("\u00ef\u00f6\u00fc", rs.getString(15)); - assertEquals(new byte[] { (byte) 0xab, 0x12 }, rs.getBytes(16)); + assertEquals("\u00ef\u00f6\u00fc", rs.getString(clobIndex)); + assertEquals(new byte[] { (byte) 0xab, 0x12 }, rs.getBytes(blobIndex)); } assertFalse(rs.next()); + rs = stat.executeQuery("SELECT * FROM TEST WHERE ID = 2"); + rs.next(); + c = 0; + assertTrue(rs.getInt(++c) == 2); + assertEquals("-", rs.getString(++c)); + assertEquals("1.30", rs.getBigDecimal(++c).toString()); + assertFalse(rs.getBoolean(++c)); + assertTrue((rs.getByte(++c) & 0xff) == 0x55); + assertEquals(new byte[] { 0x01, (byte) 0xfe }, rs.getBytes(++c)); + assertEquals("2005-09-22", rs.getDate(++c).toString()); + assertEquals("21:46:29", rs.getTime(++c).toString()); + assertEquals("2005-09-21 21:47:10.111222333", rs.getTimestamp(++c).toString()); + assertEquals("2005-09-22T00:00:10.111222333+02:00", rs.getObject(++c).toString()); + assertTrue(rs.getDouble(++c) == 2.25); + assertTrue(rs.getFloat(++c) == 3.5f); + assertTrue(rs.getLong(++c) == Long.MAX_VALUE - 1); + assertEquals(11, ((Integer) rs.getObject(++c)).intValue()); + assertTrue(rs.getShort(++c) == -1_000); + assertEquals("ABCD", rs.getString(++c)); + assertEquals(new byte[] { 1, 2 }, rs.getBytes(++c)); + assertFalse(rs.next()); + stat.execute("DROP TABLE TEST"); conn.close(); } @@ -675,6 +734,89 @@ private void testScrollResultSet(Statement stat, int type, int rows) } } + private void testUpdateObject() throws SQLException { + deleteDb("updatableResultSet"); + Connection conn = getConnection("updatableResultSet"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, V INT)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST VALUES (?1, ?1)"); + for (int i = 1; i <= 12; i++) { + prep.setInt(1, i); + prep.executeUpdate(); + } + prep = conn.prepareStatement("TABLE TEST ORDER BY ID", ResultSet.TYPE_FORWARD_ONLY, + ResultSet.CONCUR_UPDATABLE); + try (ResultSet rs = prep.executeQuery()) { + for (int i = 1; i <= 12; i++) { + rs.next(); + assertEquals(i, rs.getInt(1)); + assertEquals(i, rs.getInt(2)); + testUpdateObjectUpdateRow(rs, i, i * 10); + rs.updateRow(); + } + assertFalse(rs.next()); + } + try (ResultSet rs = prep.executeQuery()) { + for (int i = 1; i <= 12; i++) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + assertEquals(i * 10, rs.getInt(2)); + testUpdateObjectUpdateRow(rs, i, null); + rs.updateRow(); + } + assertFalse(rs.next()); + } + try (ResultSet rs = prep.executeQuery()) { + for (int i = 1; i <= 12; i++) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + assertNull(rs.getObject(2)); + } + assertFalse(rs.next()); + } + conn.close(); + } + + private static void testUpdateObjectUpdateRow(ResultSet rs, int method, Object value) throws SQLException { + switch (method) { + case 1: + rs.updateObject(2, value); + break; + case 2: + rs.updateObject("V", value); + break; + case 3: + rs.updateObject(2, value, 0); + break; + case 4: + rs.updateObject(2, value, JDBCType.INTEGER); + break; + case 5: + rs.updateObject(2, value, H2Type.INTEGER); + break; + case 6: + rs.updateObject("V", value, 0); + break; + case 7: + rs.updateObject("V", value, JDBCType.INTEGER); + break; + case 8: + rs.updateObject("V", value, H2Type.INTEGER); + break; + case 9: + rs.updateObject(2, value, JDBCType.INTEGER, 0); + break; + case 10: + rs.updateObject(2, value, H2Type.INTEGER, 0); + break; + case 11: + rs.updateObject("V", value, JDBCType.INTEGER, 0); + break; + case 12: + rs.updateObject("V", value, H2Type.INTEGER, 0); + } + } + private void assertState(ResultSet rs, boolean beforeFirst, boolean first, boolean last, boolean afterLast) throws SQLException { assertEquals(beforeFirst, rs.isBeforeFirst()); diff --git a/h2/src/test/org/h2/test/jdbc/TestUrlJavaObjectSerializer.java b/h2/src/test/org/h2/test/jdbc/TestUrlJavaObjectSerializer.java index 5b9147103d..b1e7634c3c 100644 --- a/h2/src/test/org/h2/test/jdbc/TestUrlJavaObjectSerializer.java +++ b/h2/src/test/org/h2/test/jdbc/TestUrlJavaObjectSerializer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -31,9 +31,7 @@ public static void main(String... a) throws Exception { test.config.traceTest = true; test.config.memory = true; test.config.networked = true; - test.config.beforeTest(); - test.test(); - test.config.afterTest(); + test.testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/jdbc/TestZloty.java b/h2/src/test/org/h2/test/jdbc/TestZloty.java index eabd8be329..e915849826 100644 --- a/h2/src/test/org/h2/test/jdbc/TestZloty.java +++ b/h2/src/test/org/h2/test/jdbc/TestZloty.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -27,7 +27,7 @@ public class TestZloty extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/jdbc/package.html b/h2/src/test/org/h2/test/jdbc/package.html index 4ecf60a792..bf78702576 100644 --- a/h2/src/test/org/h2/test/jdbc/package.html +++ b/h2/src/test/org/h2/test/jdbc/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/jdbcx/SimpleXid.java b/h2/src/test/org/h2/test/jdbcx/SimpleXid.java index 75d017769e..666239b426 100644 --- a/h2/src/test/org/h2/test/jdbcx/SimpleXid.java +++ b/h2/src/test/org/h2/test/jdbcx/SimpleXid.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbcx; diff --git a/h2/src/test/org/h2/test/jdbcx/TestConnectionPool.java b/h2/src/test/org/h2/test/jdbcx/TestConnectionPool.java index 442e7383ac..dab7d296a7 100644 --- a/h2/src/test/org/h2/test/jdbcx/TestConnectionPool.java +++ b/h2/src/test/org/h2/test/jdbcx/TestConnectionPool.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbcx; @@ -12,9 +12,11 @@ import java.sql.SQLException; import java.sql.Statement; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import javax.sql.DataSource; +import org.h2.api.ErrorCode; import org.h2.jdbcx.JdbcConnectionPool; import org.h2.jdbcx.JdbcDataSource; import org.h2.test.TestBase; @@ -32,7 +34,7 @@ public class TestConnectionPool extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -46,6 +48,7 @@ public void test() throws Exception { testKeepOpen(); testConnect(); testThreads(); + testUnwrap(); deleteDb("connectionPool"); deleteDb("connectionPool2"); } @@ -61,7 +64,7 @@ private void testShutdown() throws SQLException { conn1.close(); conn2.createStatement().execute("shutdown immediately"); cp.dispose(); - assertTrue(w.toString().length() > 0); + assertTrue(w.toString().length() == 0); cp.dispose(); } @@ -71,7 +74,7 @@ private void testWrongUrl() { try { cp.getConnection(); } catch (SQLException e) { - assertEquals(8001, e.getErrorCode()); + assertEquals(ErrorCode.URL_FORMAT_ERROR_2, e.getErrorCode()); } cp.dispose(); } @@ -81,9 +84,7 @@ private void testTimeout() throws Exception { String password = getPassword(); final JdbcConnectionPool man = JdbcConnectionPool.create(url, user, password); man.setLoginTimeout(1); - createClassProxy(man.getClass()); - assertThrows(IllegalArgumentException.class, man). - setMaxConnections(-1); + assertThrows(IllegalArgumentException.class, () -> man.setMaxConnections(-1)); man.setMaxConnections(2); // connection 1 (of 2) Connection conn = man.getConnection(); @@ -189,7 +190,7 @@ private void testKeepOpen() throws Exception { private void testThreads() throws Exception { final int len = getSize(4, 20); final JdbcConnectionPool man = getConnectionPool(len - 2); - final boolean[] stop = { false }; + final AtomicBoolean stop = new AtomicBoolean(); /** * This class gets and returns connections from the pool. @@ -198,7 +199,7 @@ class TestRunner implements Runnable { @Override public void run() { try { - while (!stop[0]) { + while (!stop.get()) { Connection conn = man.getConnection(); if (man.getActiveConnections() >= len + 1) { throw new Exception("a: " + @@ -221,7 +222,7 @@ public void run() { threads[i].start(); } Thread.sleep(1000); - stop[0] = true; + stop.set(true); for (int i = 0; i < len; i++) { threads[i].join(); } @@ -253,4 +254,16 @@ private void testConnect() throws SQLException { getConnection(null, null); } + private void testUnwrap() throws SQLException { + JdbcConnectionPool pool = JdbcConnectionPool.create(new JdbcDataSource()); + assertTrue(pool.isWrapperFor(Object.class)); + assertTrue(pool.isWrapperFor(DataSource.class)); + assertTrue(pool.isWrapperFor(pool.getClass())); + assertFalse(pool.isWrapperFor(Integer.class)); + assertTrue(pool == pool.unwrap(Object.class)); + assertTrue(pool == pool.unwrap(DataSource.class)); + assertTrue(pool == pool.unwrap(pool.getClass())); + assertThrows(ErrorCode.INVALID_VALUE_2, () -> pool.unwrap(Integer.class)); + } + } diff --git a/h2/src/test/org/h2/test/jdbcx/TestDataSource.java b/h2/src/test/org/h2/test/jdbcx/TestDataSource.java index c56c7e6173..20c9213cbe 100644 --- a/h2/src/test/org/h2/test/jdbcx/TestDataSource.java +++ b/h2/src/test/org/h2/test/jdbcx/TestDataSource.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbcx; @@ -38,7 +38,7 @@ public class TestDataSource extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } // public static void main(String... args) throws SQLException { @@ -53,7 +53,7 @@ public static void main(String... a) throws Exception { // System.setProperty(Context.PROVIDER_URL, "rmi://localhost:1099"); // // JdbcDataSource ds = new JdbcDataSource(); -// ds.setURL("jdbc:h2:test"); +// ds.setURL("jdbc:h2:./test"); // ds.setUser("test"); // ds.setPassword(""); // @@ -204,12 +204,7 @@ private void testUnwrap() throws SQLException { assertFalse(ds.isWrapperFor(String.class)); assertTrue(ds == ds.unwrap(Object.class)); assertTrue(ds == ds.unwrap(DataSource.class)); - try { - ds.unwrap(String.class); - fail(); - } catch (SQLException ex) { - assertEquals(ErrorCode.INVALID_VALUE_2, ex.getErrorCode()); - } + assertThrows(ErrorCode.INVALID_VALUE_2, () -> ds.unwrap(String.class)); } } diff --git a/h2/src/test/org/h2/test/jdbcx/TestXA.java b/h2/src/test/org/h2/test/jdbcx/TestXA.java index 83677c08bd..2914518649 100644 --- a/h2/src/test/org/h2/test/jdbcx/TestXA.java +++ b/h2/src/test/org/h2/test/jdbcx/TestXA.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: James Devenish */ package org.h2.test.jdbcx; @@ -33,7 +33,7 @@ public class TestXA extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -283,20 +283,20 @@ private void testXA(boolean useOneDatabase) throws SQLException { trace("stmt1.executeUpdate(\"CREATE TABLE xatest1 " + "(id INT PRIMARY KEY, value INT)\")"); stat1.executeUpdate("CREATE TABLE xatest1 " + - "(id INT PRIMARY KEY, value INT)"); + "(id INT PRIMARY KEY, v INT)"); trace("stmt2.executeUpdate(\"CREATE TABLE xatest2 " + - "(id INT PRIMARY KEY, value INT)\")"); + "(id INT PRIMARY KEY, v INT)\")"); stat2.executeUpdate("CREATE TABLE xatest2 " + - "(id INT PRIMARY KEY, value INT)"); + "(id INT PRIMARY KEY, v INT)"); } else { trace("stmt1.executeUpdate(\"CREATE TABLE xatest " + "(id INT PRIMARY KEY, value INT)\")"); stat1.executeUpdate("CREATE TABLE xatest " + - "(id INT PRIMARY KEY, value INT)"); + "(id INT PRIMARY KEY, v INT)"); trace("stmt2.executeUpdate(\"CREATE TABLE xatest " + - "(id INT PRIMARY KEY, value INT)\")"); + "(id INT PRIMARY KEY, v INT)\")"); stat2.executeUpdate("CREATE TABLE xatest " + - "(id INT PRIMARY KEY, value INT)"); + "(id INT PRIMARY KEY, v INT)"); } if (useOneDatabase) { @@ -343,22 +343,22 @@ private void testXA(boolean useOneDatabase) throws SQLException { if (useOneDatabase) { trace("stmt1.executeUpdate(\"UPDATE xatest1 " + - "SET value=1 WHERE id=1\")"); + "SET v=1 WHERE id=1\")"); stat1.executeUpdate("UPDATE xatest1 " + - "SET value=1 WHERE id=1"); + "SET v=1 WHERE id=1"); trace("stmt2.executeUpdate(\"UPDATE xatest2 " + - "SET value=1 WHERE id=2\")"); + "SET v=1 WHERE id=2\")"); stat2.executeUpdate("UPDATE xatest2 " + - "SET value=1 WHERE id=2"); + "SET v=1 WHERE id=2"); } else { trace("stmt1.executeUpdate(\"UPDATE xatest " + - "SET value=1 WHERE id=1\")"); + "SET v=1 WHERE id=1\")"); stat1.executeUpdate("UPDATE xatest " + - "SET value=1 WHERE id=1"); + "SET v=1 WHERE id=1"); trace("stmt2.executeUpdate(\"UPDATE xatest " + - "SET value=1 WHERE id=2\")"); + "SET v=1 WHERE id=2\")"); stat2.executeUpdate("UPDATE xatest " + - "SET value=1 WHERE id=2"); + "SET v=1 WHERE id=2"); } trace("xares1.end(xid1, XAResource.TMSUCCESS)"); diff --git a/h2/src/test/org/h2/test/jdbcx/TestXASimple.java b/h2/src/test/org/h2/test/jdbcx/TestXASimple.java index 0f5c0716b8..16f68cdf3e 100644 --- a/h2/src/test/org/h2/test/jdbcx/TestXASimple.java +++ b/h2/src/test/org/h2/test/jdbcx/TestXASimple.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbcx; @@ -28,7 +28,7 @@ public class TestXASimple extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/jdbcx/package.html b/h2/src/test/org/h2/test/jdbcx/package.html index d37874cbc2..41fa5358b5 100644 --- a/h2/src/test/org/h2/test/jdbcx/package.html +++ b/h2/src/test/org/h2/test/jdbcx/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/mvcc/TestMvcc1.java b/h2/src/test/org/h2/test/mvcc/TestMvcc1.java index 8b264304fc..954d27d159 100644 --- a/h2/src/test/org/h2/test/mvcc/TestMvcc1.java +++ b/h2/src/test/org/h2/test/mvcc/TestMvcc1.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.mvcc; @@ -31,14 +31,11 @@ public class TestMvcc1 extends TestDb { */ public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); - test.test(); + test.testFromMain(); } @Override public boolean isEnabled() { - if (!config.mvStore) { - return false; - } return true; } @@ -90,7 +87,7 @@ private void testCases() throws SQLException { c2.commit(); // referential integrity problem - s1.execute("create table a (id integer identity not null, " + + s1.execute("create table a (id integer generated by default as identity, " + "code varchar(10) not null, primary key(id))"); s1.execute("create table b (name varchar(100) not null, a integer, " + "primary key(name), foreign key(a) references a(id))"); @@ -217,14 +214,14 @@ private void testCases() throws SQLException { s1.execute("DROP TABLE TEST"); c1.commit(); - s1.execute("CREATE TABLE TEST(ID INT IDENTITY, NAME VARCHAR)"); + s1.execute("CREATE TABLE TEST(ID INT GENERATED BY DEFAULT AS IDENTITY, NAME VARCHAR)"); s1.execute("INSERT INTO TEST(NAME) VALUES('Ruebezahl')"); assertResult("0", s2, "SELECT COUNT(*) FROM TEST"); assertResult("1", s1, "SELECT COUNT(*) FROM TEST"); s1.execute("DROP TABLE TEST"); c1.commit(); - s1.execute("CREATE TABLE TEST(ID INT IDENTITY, NAME VARCHAR)"); + s1.execute("CREATE TABLE TEST(ID INT GENERATED BY DEFAULT AS IDENTITY, NAME VARCHAR)"); s1.execute("INSERT INTO TEST(NAME) VALUES('Ruebezahl')"); s1.execute("INSERT INTO TEST(NAME) VALUES('Ruebezahl')"); s1.execute("DROP TABLE TEST"); @@ -239,7 +236,7 @@ private void testCases() throws SQLException { c1.commit(); Random random = new Random(1); - s1.execute("CREATE TABLE TEST(ID INT IDENTITY, NAME VARCHAR)"); + s1.execute("CREATE TABLE TEST(ID INT GENERATED BY DEFAULT AS IDENTITY, NAME VARCHAR)"); Statement s; Connection c; for (int i = 0; i < 1000; i++) { diff --git a/h2/src/test/org/h2/test/mvcc/TestMvcc2.java b/h2/src/test/org/h2/test/mvcc/TestMvcc2.java index 41ff3ec279..93ce063569 100644 --- a/h2/src/test/org/h2/test/mvcc/TestMvcc2.java +++ b/h2/src/test/org/h2/test/mvcc/TestMvcc2.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.mvcc; @@ -35,14 +35,11 @@ public class TestMvcc2 extends TestDb { */ public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); - test.test(); + test.testFromMain(); } @Override public boolean isEnabled() { - if (!config.mvStore) { - return false; - } return true; } diff --git a/h2/src/test/org/h2/test/mvcc/TestMvcc3.java b/h2/src/test/org/h2/test/mvcc/TestMvcc3.java index 1132311232..ebf6bfadbf 100644 --- a/h2/src/test/org/h2/test/mvcc/TestMvcc3.java +++ b/h2/src/test/org/h2/test/mvcc/TestMvcc3.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.mvcc; @@ -27,7 +27,7 @@ public class TestMvcc3 extends TestDb { */ public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); - test.test(); + test.testFromMain(); } @Override @@ -36,7 +36,6 @@ public void test() throws SQLException { testConcurrentUpdate(); testInsertUpdateRollback(); testCreateTableAsSelect(); - testSequence(); testDisableAutoCommit(); testRollback(); deleteDb("mvcc3"); @@ -63,9 +62,6 @@ private void testFailedUpdate() throws SQLException { } private void testConcurrentUpdate() throws SQLException { - if (!config.mvStore) { - return; - } deleteDb("mvcc3"); Connection c1 = getConnection("mvcc3"); c1.setAutoCommit(false); @@ -102,10 +98,6 @@ private void testConcurrentUpdate() throws SQLException { } private void testInsertUpdateRollback() throws SQLException { - if (!config.mvStore) { - return; - } - deleteDb("mvcc3"); Connection c1 = getConnection("mvcc3"); Statement s1 = c1.createStatement(); @@ -147,9 +139,6 @@ private void printRows(String s, Statement s1, Statement s2) } private void testCreateTableAsSelect() throws SQLException { - if (!config.mvStore) { - return; - } deleteDb("mvcc3"); Connection c1 = getConnection("mvcc3"); Statement s1 = c1.createStatement(); @@ -165,10 +154,6 @@ private void testCreateTableAsSelect() throws SQLException { } private void testRollback() throws SQLException { - if (!config.mvStore) { - return; - } - deleteDb("mvcc3"); Connection conn = getConnection("mvcc3"); Statement stat = conn.createStatement(); @@ -218,9 +203,6 @@ private void testRollback() throws SQLException { } private void testDisableAutoCommit() throws SQLException { - if (!config.mvStore) { - return; - } deleteDb("mvcc3"); Connection conn = getConnection("mvcc3"); Statement stat = conn.createStatement(); @@ -236,30 +218,4 @@ private void testDisableAutoCommit() throws SQLException { conn.close(); } - private void testSequence() throws SQLException { - if (config.memory) { - return; - } - - deleteDb("mvcc3"); - Connection conn; - ResultSet rs; - - conn = getConnection("mvcc3"); - conn.createStatement().execute("create sequence abc"); - conn.close(); - - conn = getConnection("mvcc3"); - rs = conn.createStatement().executeQuery("call abc.nextval"); - rs.next(); - assertEquals(1, rs.getInt(1)); - conn.close(); - - conn = getConnection("mvcc3"); - rs = conn.createStatement().executeQuery("call abc.currval"); - rs.next(); - assertEquals(1, rs.getInt(1)); - conn.close(); - } - } diff --git a/h2/src/test/org/h2/test/mvcc/TestMvcc4.java b/h2/src/test/org/h2/test/mvcc/TestMvcc4.java index 5578708e99..b99637a2d0 100644 --- a/h2/src/test/org/h2/test/mvcc/TestMvcc4.java +++ b/h2/src/test/org/h2/test/mvcc/TestMvcc4.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.mvcc; @@ -29,12 +29,12 @@ public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); test.config.lockTimeout = 20000; test.config.memory = true; - test.test(); + test.testFromMain(); } @Override public boolean isEnabled() { - if (config.networked || !config.mvStore) { + if (config.networked) { return false; } return true; @@ -47,7 +47,7 @@ public void test() throws SQLException { private void testSelectForUpdateAndUpdateConcurrency() throws SQLException { deleteDb("mvcc4"); - Connection setup = getConnection("mvcc4;MULTI_THREADED=TRUE"); + Connection setup = getConnection("mvcc4"); setup.setAutoCommit(false); { diff --git a/h2/src/test/org/h2/test/mvcc/TestMvccMultiThreaded.java b/h2/src/test/org/h2/test/mvcc/TestMvccMultiThreaded.java index ddfd9e5499..26f3ab3e54 100644 --- a/h2/src/test/org/h2/test/mvcc/TestMvccMultiThreaded.java +++ b/h2/src/test/org/h2/test/mvcc/TestMvccMultiThreaded.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.mvcc; @@ -26,14 +26,11 @@ public class TestMvccMultiThreaded extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public boolean isEnabled() { - if (!config.mvStore) { - return false; - } return true; } @@ -47,7 +44,7 @@ public void test() throws Exception { private void testConcurrentSelectForUpdate() throws Exception { deleteDb(getTestName()); - Connection conn = getConnection(getTestName() + ";MULTI_THREADED=TRUE"); + Connection conn = getConnection(getTestName()); Statement stat = conn.createStatement(); stat.execute("create table test(id int not null primary key, updated int not null)"); stat.execute("insert into test(id, updated) values(1, 100)"); @@ -139,7 +136,7 @@ private void testConcurrentUpdate() throws Exception { } Connection conn = connList[0]; conn.createStatement().execute( - "create table test(id int primary key, value int)"); + "create table test(id int primary key, v int)"); conn.createStatement().execute( "insert into test values(0, 0)"); final int count = 1000; @@ -157,10 +154,10 @@ private void testConcurrentUpdate() throws Exception { public void call() throws Exception { for (int a = 0; a < count; a++) { ResultSet rs = connList[x].createStatement().executeQuery( - "select value from test for update"); + "select v from test for update"); assertTrue(rs.next()); connList[x].createStatement().execute( - "update test set value=value+1"); + "update test set v=v+1"); connList[x].commit(); barrier.await(); } @@ -171,7 +168,7 @@ public void call() throws Exception { for (int i = 0; i < len; i++) { tasks[i].get(); } - ResultSet rs = conn.createStatement().executeQuery("select value from test"); + ResultSet rs = conn.createStatement().executeQuery("select v from test"); rs.next(); assertEquals(count * len, rs.getInt(1)); for (int i = 0; i < len; i++) { diff --git a/h2/src/test/org/h2/test/mvcc/TestMvccMultiThreaded2.java b/h2/src/test/org/h2/test/mvcc/TestMvccMultiThreaded2.java index 590af5e4c5..1f6231eed4 100644 --- a/h2/src/test/org/h2/test/mvcc/TestMvccMultiThreaded2.java +++ b/h2/src/test/org/h2/test/mvcc/TestMvccMultiThreaded2.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.mvcc; @@ -11,6 +11,7 @@ import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; +import java.util.concurrent.CountDownLatch; import org.h2.message.DbException; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -25,7 +26,7 @@ public class TestMvccMultiThreaded2 extends TestDb { private static final int TEST_TIME_SECONDS = 60; private static final boolean DISPLAY_STATS = false; - private static final String URL = ";LOCK_TIMEOUT=120000;MULTI_THREADED=TRUE"; + private static final String URL = ";LOCK_TIMEOUT=120000"; /** * Run just this test. @@ -36,8 +37,7 @@ public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); test.config.lockTimeout = 120000; test.config.memory = true; - test.config.multiThreaded = true; - test.test(); + test.testFromMain(); } int getTestDuration() { @@ -47,9 +47,6 @@ int getTestDuration() { @Override public boolean isEnabled() { - if (!config.mvStore) { - return false; - } return true; } @@ -81,16 +78,16 @@ private void testSelectForUpdateConcurrency() ps.executeUpdate(); conn.commit(); + CountDownLatch latch = new CountDownLatch(TEST_THREAD_COUNT + 1); ArrayList threads = new ArrayList<>(); for (int i = 0; i < TEST_THREAD_COUNT; i++) { - SelectForUpdate sfu = new SelectForUpdate(); + SelectForUpdate sfu = new SelectForUpdate(latch); sfu.setName("Test SelectForUpdate Thread#"+i); threads.add(sfu); sfu.start(); } - // give any of the 100 threads a chance to start by yielding the processor to them - Thread.yield(); + latch.countDown(); // gather stats on threads after they finished @SuppressWarnings("unused") @@ -127,26 +124,27 @@ private void testSelectForUpdateConcurrency() /** * Worker test thread selecting for update */ - private class SelectForUpdate extends Thread { - + private class SelectForUpdate extends Thread + { + private final CountDownLatch latch; public int iterationsProcessed; public boolean ok; - SelectForUpdate() { + SelectForUpdate(CountDownLatch latch) { + this.latch = latch; } @Override public void run() { final long start = System.currentTimeMillis(); boolean done = false; - Connection conn = null; - try { - conn = getConnection(getTestName() + URL); + try (Connection conn = getConnection(getTestName() + URL)) { conn.setAutoCommit(false); // give the other threads a chance to start up before going into our work loop - Thread.yield(); + latch.countDown(); + latch.await(); PreparedStatement ps = conn.prepareStatement( "SELECT * FROM test WHERE entity_id = ? FOR UPDATE"); @@ -174,6 +172,8 @@ public void run() { done = true; } } + ok = true; + } catch (InterruptedException ignore) { } catch (SQLException e) { TestBase.logError("SQL error from thread "+getName(), e); throw DbException.convert(e); @@ -181,8 +181,6 @@ public void run() { TestBase.logError("General error from thread "+getName(), e); throw e; } - IOUtils.closeSilently(conn); - ok = true; } } } diff --git a/h2/src/test/org/h2/test/mvcc/package.html b/h2/src/test/org/h2/test/mvcc/package.html index d2d4ad6c1f..73ab19a52e 100644 --- a/h2/src/test/org/h2/test/mvcc/package.html +++ b/h2/src/test/org/h2/test/mvcc/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/otherDatabases.txt b/h2/src/test/org/h2/test/otherDatabases.txt index b04c85adcf..48a689b49b 100644 --- a/h2/src/test/org/h2/test/otherDatabases.txt +++ b/h2/src/test/org/h2/test/otherDatabases.txt @@ -69,7 +69,7 @@ Derby To call getFD().sync() (which results in the OS call fsync()), set the system property derby.storage.fileSyncTransactionLog to true. See -http://db.apache.org/derby/javadoc/engine/org/apache/derby/iapi/reference/Property.html#FILESYNC_TRANSACTION_LOG +https://db.apache.org/derby/javadoc/engine/org/apache/derby/iapi/reference/Property.html#FILESYNC_TRANSACTION_LOG Missing features: LIMIT OFFSET is not supported. No optimization for COUNT(*) diff --git a/h2/src/test/org/h2/test/package.html b/h2/src/test/org/h2/test/package.html index 805b8d0927..b2fcea6040 100644 --- a/h2/src/test/org/h2/test/package.html +++ b/h2/src/test/org/h2/test/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/poweroff/Listener.java b/h2/src/test/org/h2/test/poweroff/Listener.java index 70194870de..2b49cac156 100644 --- a/h2/src/test/org/h2/test/poweroff/Listener.java +++ b/h2/src/test/org/h2/test/poweroff/Listener.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.poweroff; diff --git a/h2/src/test/org/h2/test/poweroff/Test.java b/h2/src/test/org/h2/test/poweroff/Test.java index 59f8571778..2875236632 100644 --- a/h2/src/test/org/h2/test/poweroff/Test.java +++ b/h2/src/test/org/h2/test/poweroff/Test.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.poweroff; @@ -142,21 +142,21 @@ private static void testFile(DataOutputStream out) throws IOException { private static void testDatabases(DataOutputStream out) throws Exception { Test[] dbs = { new Test("org.h2.Driver", - "jdbc:h2:test1", "sa", "", true), + "jdbc:h2:./test1", "sa", "", true), new Test("org.h2.Driver", - "jdbc:h2:test2", "sa", "", false), + "jdbc:h2:./test2", "sa", "", false), new Test("org.hsqldb.jdbcDriver", "jdbc:hsqldb:test4", "sa", "", false), - // new Test("com.mysql.jdbc.Driver", + // new Test("com.mysql.cj.jdbc.Driver", // "jdbc:mysql://localhost/test", "sa", ""), new Test("org.postgresql.Driver", "jdbc:postgresql:test", "sa", "sa", false), - new Test("org.apache.derby.jdbc.EmbeddedDriver", + new Test("org.apache.derby.iapi.jdbc.AutoloadedDriver", "jdbc:derby:test;create=true", "sa", "", false), new Test("org.h2.Driver", - "jdbc:h2:test5", "sa", "", true), + "jdbc:h2:./test5", "sa", "", true), new Test("org.h2.Driver", - "jdbc:h2:test6", "sa", "", false), }; + "jdbc:h2:./test6", "sa", "", false), }; for (int i = 0;; i++) { for (Test t : dbs) { t.insert(i); diff --git a/h2/src/test/org/h2/test/poweroff/TestRecover.java b/h2/src/test/org/h2/test/poweroff/TestRecover.java index 494b4368c7..922d43fbbb 100644 --- a/h2/src/test/org/h2/test/poweroff/TestRecover.java +++ b/h2/src/test/org/h2/test/poweroff/TestRecover.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.poweroff; @@ -20,9 +20,9 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; -import java.text.SimpleDateFormat; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; import java.util.ArrayList; -import java.util.Date; import java.util.List; import java.util.Random; import java.util.zip.ZipEntry; @@ -54,7 +54,7 @@ public class TestRecover { // "jdbc:derby:/temp/derby/data/test;create=true"); // private static final String DRIVER = // System.getProperty("test.driver", - // "org.apache.derby.jdbc.EmbeddedDriver"); + // "org.apache.derby.iapi.jdbc.AutoloadedDriver"); /** * This method is called when executing this application from the command @@ -103,8 +103,7 @@ private static File backup(String sourcePath, String targetPath, } oldest.delete(); } - SimpleDateFormat sd = new SimpleDateFormat("yyMMdd-HHmmss"); - String date = sd.format(new Date()); + String date = DateTimeFormatter.ofPattern("yyMMdd-HHmmss").format(LocalDateTime.now()); File zipFile = new File(root, "backup-" + date + "-" + node + ".zip"); ArrayList list = new ArrayList<>(); File base = new File(sourcePath); diff --git a/h2/src/test/org/h2/test/poweroff/TestRecoverKillLoop.java b/h2/src/test/org/h2/test/poweroff/TestRecoverKillLoop.java index 511f642195..20c9a4db06 100644 --- a/h2/src/test/org/h2/test/poweroff/TestRecoverKillLoop.java +++ b/h2/src/test/org/h2/test/poweroff/TestRecoverKillLoop.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.poweroff; diff --git a/h2/src/test/org/h2/test/poweroff/TestReorderWrites.java b/h2/src/test/org/h2/test/poweroff/TestReorderWrites.java index bde3366a0a..a6bfba0b95 100644 --- a/h2/src/test/org/h2/test/poweroff/TestReorderWrites.java +++ b/h2/src/test/org/h2/test/poweroff/TestReorderWrites.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.poweroff; @@ -12,6 +12,7 @@ import java.util.Map; import java.util.Random; import org.h2.mvstore.MVStore; +import org.h2.mvstore.MVStoreException; import org.h2.mvstore.MVStoreTool; import org.h2.store.fs.FilePath; import org.h2.store.fs.FileUtils; @@ -32,20 +33,27 @@ public class TestReorderWrites extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { - testMVStore(); - testFileSystem(); + testMVStore(false); + testMVStore(true); + testFileSystem(false); + testFileSystem(true); } - private void testMVStore() { + private void testMVStore(final boolean partialWrite) { + // Add partial write test + // @since 2019-07-31 little-pan + println(String.format("testMVStore(): %s partial write", partialWrite? "Enable": "Disable")); + FilePathReorderWrites.setPartialWrites(partialWrite); + FilePathReorderWrites fs = FilePathReorderWrites.register(); String fileName = "reorder:memFS:test.mv"; try { - for (int i = 0; i < 1000; i++) { + for (int i = 0; i < (config.big ? 1000 : 100); i++) { log(i + " --------------------------------"); // this test is not interested in power off failures during // initial creation @@ -62,7 +70,7 @@ private void testMVStore() { store.commit(); store.getFileStore().sync(); Random r = new Random(i); - int stop = 4 + r.nextInt(20); + int stop = 4 + r.nextInt(config.big ? 150 : 20); log("countdown start"); fs.setPowerOffCountdown(stop, i); try { @@ -92,13 +100,13 @@ private void testMVStore() { } // write has to fail at some point fail(); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { log("stop " + e + ", cause: " + e.getCause()); // expected } try { store.close(); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { // expected store.closeImmediately(); } @@ -136,10 +144,14 @@ private static void log(String message) { } } - private void testFileSystem() throws IOException { + private void testFileSystem(final boolean partialWrite) throws IOException { FilePathReorderWrites fs = FilePathReorderWrites.register(); - // disable this for now, still bug(s) in our code - FilePathReorderWrites.setPartialWrites(false); + // *disable this for now, still bug(s) in our code* + // Add partial write enable test + // @since 2019-07-31 little-pan + FilePathReorderWrites.setPartialWrites(partialWrite); + println(String.format("testFileSystem(): %s partial write", partialWrite? "Enable": "Disable")); + String fileName = "reorder:memFS:test"; final ByteBuffer empty = ByteBuffer.allocate(1024); Random r = new Random(1); diff --git a/h2/src/test/org/h2/test/poweroff/TestWrite.java b/h2/src/test/org/h2/test/poweroff/TestWrite.java index 0e0214a3cd..b7d75a0037 100644 --- a/h2/src/test/org/h2/test/poweroff/TestWrite.java +++ b/h2/src/test/org/h2/test/poweroff/TestWrite.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.poweroff; @@ -39,12 +39,12 @@ public static void main(String... args) throws Exception { testFile("rwd", true); testFile("rws", true); testDatabase("org.h2.Driver", - "jdbc:h2:test", "sa", ""); + "jdbc:h2:./test", "sa", ""); testDatabase("org.hsqldb.jdbcDriver", "jdbc:hsqldb:test4", "sa", ""); - testDatabase("org.apache.derby.jdbc.EmbeddedDriver", + testDatabase("org.apache.derby.iapi.jdbc.AutoloadedDriver", "jdbc:derby:test;create=true", "sa", ""); - testDatabase("com.mysql.jdbc.Driver", + testDatabase("com.mysql.cj.jdbc.Driver", "jdbc:mysql://localhost/test", "sa", "sa"); testDatabase("org.postgresql.Driver", "jdbc:postgresql:test", "sa", "sa"); diff --git a/h2/src/test/org/h2/test/poweroff/package.html b/h2/src/test/org/h2/test/poweroff/package.html index d2d4ad6c1f..73ab19a52e 100644 --- a/h2/src/test/org/h2/test/poweroff/package.html +++ b/h2/src/test/org/h2/test/poweroff/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/recover/RecoverLobTest.java b/h2/src/test/org/h2/test/recover/RecoverLobTest.java index 926cfd6e7d..fb93f5b1b5 100644 --- a/h2/src/test/org/h2/test/recover/RecoverLobTest.java +++ b/h2/src/test/org/h2/test/recover/RecoverLobTest.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.recover; @@ -24,7 +24,7 @@ public class RecoverLobTest extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/recover/package.html b/h2/src/test/org/h2/test/recover/package.html index 27b0d2898d..05ddb3e212 100644 --- a/h2/src/test/org/h2/test/recover/package.html +++ b/h2/src/test/org/h2/test/recover/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/rowlock/TestRowLocks.java b/h2/src/test/org/h2/test/rowlock/TestRowLocks.java index 1ea6d87875..3c481d4355 100644 --- a/h2/src/test/org/h2/test/rowlock/TestRowLocks.java +++ b/h2/src/test/org/h2/test/rowlock/TestRowLocks.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.rowlock; @@ -33,29 +33,15 @@ public class TestRowLocks extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { - testSetMode(); - if (config.mvStore) { - testCases(); - } + testCases(); deleteDb(getTestName()); } - private void testSetMode() throws SQLException { - deleteDb(getTestName()); - c1 = getConnection(getTestName()); - Statement stat = c1.createStatement(); - stat.execute("SET LOCK_MODE 2"); - ResultSet rs = stat.executeQuery("call lock_mode()"); - rs.next(); - assertEquals("2", rs.getString(1)); - c1.close(); - } - private void testCases() throws Exception { deleteDb(getTestName()); c1 = getConnection(getTestName()); diff --git a/h2/src/test/org/h2/test/rowlock/package.html b/h2/src/test/org/h2/test/rowlock/package.html index d2372fb863..ce78426472 100644 --- a/h2/src/test/org/h2/test/rowlock/package.html +++ b/h2/src/test/org/h2/test/rowlock/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/scripts/Aggregate1.java b/h2/src/test/org/h2/test/scripts/Aggregate1.java new file mode 100644 index 0000000000..038a93794e --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/Aggregate1.java @@ -0,0 +1,32 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.scripts; + +import java.sql.SQLException; + +import org.h2.api.Aggregate; +import org.h2.api.H2Type; + +/** + * An aggregate function for tests. + */ +public class Aggregate1 implements Aggregate { + + @Override + public int getInternalType(int[] inputTypes) throws SQLException { + return H2Type.INTEGER.getVendorTypeNumber(); + } + + @Override + public void add(Object value) throws SQLException { + } + + @Override + public Object getResult() throws SQLException { + return 0; + } + +} diff --git a/h2/src/test/org/h2/test/scripts/TestScript.java b/h2/src/test/org/h2/test/scripts/TestScript.java index 2e4b2d48c6..0e7686b693 100644 --- a/h2/src/test/org/h2/test/scripts/TestScript.java +++ b/h2/src/test/org/h2/test/scripts/TestScript.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.scripts; @@ -32,14 +32,16 @@ import org.h2.command.CommandContainer; import org.h2.command.CommandInterface; import org.h2.command.Prepared; -import org.h2.command.dml.Query; -import org.h2.engine.SysProperties; +import org.h2.command.dml.ScriptCommand; +import org.h2.command.query.Query; +import org.h2.engine.Mode.ModeEnum; import org.h2.jdbc.JdbcConnection; import org.h2.jdbc.JdbcPreparedStatement; import org.h2.test.TestAll; import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.util.StringUtils; +import org.h2.value.DataType; /** * This test runs a SQL script file and compares the output with the expected @@ -70,7 +72,7 @@ public class TestScript extends TestDb { private PrintStream out; private final ArrayList result = new ArrayList<>(); private final ArrayDeque putBack = new ArrayDeque<>(); - private StringBuilder errors; + private boolean foundErrors; private Random random = new Random(1); @@ -92,7 +94,7 @@ public class TestScript extends TestDb { */ public static void main(String... a) throws Exception { CHECK_ORDERING = true; - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } /** @@ -129,64 +131,67 @@ public void test() throws Exception { if (!config.memory && !config.big && !config.networked) { testScript("testSimple.sql"); } - testScript("comments.sql"); - testScript("derived-column-names.sql"); - testScript("distinct.sql"); testScript("dual.sql"); testScript("indexes.sql"); testScript("information_schema.sql"); - testScript("joins.sql"); testScript("range_table.sql"); testScript("altertable-index-reuse.sql"); testScript("altertable-fk.sql"); testScript("default-and-on_update.sql"); - testScript("query-optimisations.sql"); - testScript("window.sql"); - String decimal2; - if (SysProperties.BIG_DECIMAL_IS_DECIMAL) { - decimal2 = "decimal_decimal"; - } else { - decimal2 = "decimal_numeric"; - } + for (String s : new String[] { "add_months", "compatibility", "group_by", "strict_and_legacy"}) { + testScript("compatibility/" + s + ".sql"); + } for (String s : new String[] { "array", "bigint", "binary", "blob", - "boolean", "char", "clob", "date", "decimal", decimal2, "double", "enum", - "geometry", "identity", "int", "interval", "other", "real", "row", "smallint", - "time", "timestamp-with-timezone", "timestamp", "tinyint", - "uuid", "varchar", "varchar-ignorecase" }) { + "boolean", "char", "clob", "date", "decfloat", "double_precision", "enum", + "geometry", "identity", "int", "interval", "java_object", "json", "numeric", "real", "row", "smallint", + "time-with-time-zone", "time", "timestamp-with-time-zone", "timestamp", "tinyint", + "uuid", "varbinary", "varchar", "varchar-ignorecase" }) { testScript("datatypes/" + s + ".sql"); } - for (String s : new String[] { "alterTableAdd", "alterTableAlterColumn", "alterTableDropColumn", - "alterTableRename", "createAlias", "createSequence", "createSynonym", "createTable", "createTrigger", - "createView", "dropDomain", "dropIndex", "dropSchema", "truncateTable" }) { + for (String s : new String[] { "alterDomain", "alterTableAdd", "alterTableAlterColumn", "alterTableDropColumn", + "alterTableDropConstraint", + "alterTableRename", "alterTableRenameConstraint", + "analyze", "commentOn", "createAlias", "createConstant", "createDomain", + "createIndex", "createSchema", "createSequence", "createSynonym", + "createTable", "createTrigger", "createView", "dropAllObjects", "dropDomain", "dropIndex", + "dropSchema", "dropTable", "grant", "truncateTable" }) { testScript("ddl/" + s + ".sql"); } - for (String s : new String[] { "delete", "error_reporting", "insert", "insertIgnore", "merge", "mergeUsing", - "replace", "script", "select", "show", "table", "update", "values", "with" }) { + for (String s : new String[] { "delete", "error_reporting", "execute_immediate", "insert", "insertIgnore", + "merge", "mergeUsing", "replace", "script", "show", "update", "with" }) { testScript("dml/" + s + ".sql"); } - for (String s : new String[] { "help" }) { - testScript("other/" + s + ".sql"); - } - for (String s : new String[] { "any", "array-agg", "avg", "bit-and", "bit-or", "count", "envelope", - "every", "histogram", "listagg", "max", "min", "mode", "percentile", "rank", "selectivity", - "stddev-pop", "stddev-samp", "sum", "var-pop", "var-samp" }) { + for (String s : new String[] { "any", "array_agg", "avg", "bit_and_agg", "bit_or_agg", "bit_xor_agg", + "corr", + "count", + "covar_pop", "covar_samp", + "envelope", "every", "histogram", + "json_arrayagg", "json_objectagg", + "listagg", "max", "min", "mode", "percentile", "rank", + "regr_avgx", "regr_avgy", "regr_count", "regr_intercept", "regr_r2", "regr_slope", + "regr_sxx", "regr_sxy", "regr_syy", + "stddev_pop", "stddev_samp", "sum", "var_pop", "var_samp" }) { testScript("functions/aggregate/" + s + ".sql"); } + for (String s : new String[] { "json_array", "json_object" }) { + testScript("functions/json/" + s + ".sql"); + } for (String s : new String[] { "abs", "acos", "asin", "atan", "atan2", - "bitand", "bitget", "bitor", "bitxor", "ceil", "compress", + "bitand", "bitcount", "bitget", "bitnot", "bitor", "bitxor", "ceil", "compress", "cos", "cosh", "cot", "decrypt", "degrees", "encrypt", "exp", - "expand", "floor", "hash", "length", "log", "mod", "ora-hash", "pi", - "power", "radians", "rand", "random-uuid", "round", - "roundmagic", "secure-rand", "sign", "sin", "sinh", "sqrt", + "expand", "floor", "hash", "length", "log", "lshift", "mod", "ora-hash", "pi", + "power", "radians", "rand", "random-uuid", "rotate", "round", + "roundmagic", "rshift", "secure-rand", "sign", "sin", "sinh", "sqrt", "tan", "tanh", "truncate", "zero" }) { testScript("functions/numeric/" + s + ".sql"); } - for (String s : new String[] { "ascii", "bit-length", "char", "concat", - "concat-ws", "difference", "hextoraw", "insert", "instr", + for (String s : new String[] { "array-to-string", + "ascii", "bit-length", "char", "concat", + "concat-ws", "difference", "hextoraw", "insert", "left", "length", "locate", "lower", "lpad", "ltrim", - "octet-length", "position", "rawtohex", "regexp-like", - "regex-replace", "repeat", "replace", "right", "rpad", "rtrim", + "octet-length", "quote_ident", "rawtohex", "regexp-like", + "regex-replace", "regexp-substr", "repeat", "replace", "right", "rpad", "rtrim", "soundex", "space", "stringdecode", "stringencode", "stringtoutf8", "substring", "to-char", "translate", "trim", "upper", "utf8tostring", "xmlattr", "xmlcdata", "xmlcomment", @@ -194,17 +199,18 @@ public void test() throws Exception { testScript("functions/string/" + s + ".sql"); } for (String s : new String[] { "array-cat", "array-contains", "array-get", - "array-length","array-slice", "autocommit", "cancel-session", "casewhen", - "cast", "coalesce", "convert", "csvread", "csvwrite", "currval", - "database-path", "database", "decode", "disk-space-used", + "array-slice", "autocommit", "cancel-session", "casewhen", + "cardinality", "cast", "coalesce", "convert", "csvread", "csvwrite", "current_catalog", + "current_schema", "current_user", "currval", "data_type_sql", + "database-path", "db_object", "decode", "disk-space-used", "file-read", "file-write", "greatest", "h2version", "identity", - "ifnull", "least", "link-schema", "lock-mode", "lock-timeout", + "ifnull", "last-insert-id", "least", "link-schema", "lock-mode", "lock-timeout", "memory-free", "memory-used", "nextval", "nullif", "nvl2", - "readonly", "rownum", "schema", "scope-identity", "session-id", - "set", "table", "transaction-id", "truncate-value", "unnest", "user" }) { + "readonly", "rownum", "session-id", + "table", "transaction-id", "trim_array", "truncate-value", "unnest" }) { testScript("functions/system/" + s + ".sql"); } - for (String s : new String[] { "add_months", "current_date", "current_timestamp", + for (String s : new String[] { "current_date", "current_timestamp", "current-time", "dateadd", "datediff", "dayname", "day-of-month", "day-of-week", "day-of-year", "extract", "formatdatetime", "hour", "minute", "month", "monthname", @@ -214,9 +220,28 @@ public void test() throws Exception { for (String s : new String[] { "lead", "nth_value", "ntile", "ratio_to_report", "row_number" }) { testScript("functions/window/" + s + ".sql"); } + for (String s : new String[] { "at-time-zone", "boolean-test", "case", "concatenation", "conditions", + "data-change-delta-table", "field-reference", "help", "sequence", "set" }) { + testScript("other/" + s + ".sql"); + } + for (String s : new String[] { "comments", "identifiers" }) { + testScript("parser/" + s + ".sql"); + } + for (String s : new String[] { "between", "distinct", "in", "like", "null", "type", "unique" }) { + testScript("predicates/" + s + ".sql"); + } + for (String s : new String[] { "derived-column-names", "distinct", "joins", "query-optimisations", "select", + "table", "values", "window" }) { + testScript("queries/" + s + ".sql"); + } + testScript("other/two_phase_commit.sql"); + testScript("other/unique_include.sql"); deleteDb("script"); System.out.flush(); + if (foundErrors) { + throw new Exception("errors in script found"); + } } private void testScript(String scriptFileName) throws Exception { @@ -231,11 +256,7 @@ private void testScript(String scriptFileName) throws Exception { out = null; result.clear(); putBack.clear(); - errors = null; - if (statements == null) { - println("Running commands in " + scriptFileName); - } String outFile; if (FIX_OUTPUT) { outFile = scriptFileName; @@ -249,7 +270,6 @@ private void testScript(String scriptFileName) throws Exception { conn = getConnection("script"); stat = conn.createStatement(); out = new PrintStream(new FileOutputStream(outFile)); - errors = new StringBuilder(); testFile(BASE_DIR + scriptFileName); conn.close(); out.close(); @@ -275,9 +295,6 @@ private void testScript(String scriptFileName) throws Exception { file.renameTo(new File("h2/src/test/org/h2/test/scripts/" + scriptFileName)); return; } - if (errors.length() > 0) { - throw new Exception("errors in " + scriptFileName + " found"); - } } private String readLine() throws IOException { @@ -289,40 +306,7 @@ private String readNextLine() throws IOException { String s; boolean comment = false; while ((s = in.readLine()) != null) { - if (s.startsWith("#")) { - int end = s.indexOf('#', 1); - if (end < 3) { - fail("Bad line \"" + s + '\"'); - } - boolean val; - switch (s.charAt(1)) { - case '+': - val = true; - break; - case '-': - val = false; - break; - default: - fail("Bad line \"" + s + '\"'); - return null; - } - String flag = s.substring(2, end); - s = s.substring(end + 1); - switch (flag) { - case "mvStore": - if (config.mvStore == val) { - out.print("#" + (val ? '+' : '-') + flag + '#'); - break; - } else { - if (FIX_OUTPUT) { - write("#" + (val ? '+' : '-') + flag + '#' + s); - } - continue; - } - default: - fail("Unknown flag \"" + flag + '\"'); - } - } else if (s.startsWith("--")) { + if (s.startsWith("--")) { write(s); comment = true; continue; @@ -387,6 +371,12 @@ private void testFile(String inFile) throws Exception { write(""); allowReconnect = false; break; + case "@autocommit on": + conn.setAutoCommit(true); + break; + case "@autocommit off": + conn.setAutoCommit(false); + break; default: addWriteResultError("", sql); } @@ -415,7 +405,8 @@ private boolean containsTempTables() throws SQLException { private void process(String sql, boolean allowReconnect) throws Exception { if (allowReconnect && reconnectOften) { - if (!containsTempTables() && ((JdbcConnection) conn).isRegularMode() + if (!containsTempTables() + && ((JdbcConnection) conn).getMode().getEnum() == ModeEnum.REGULAR && conn.getSchema().equals("PUBLIC")) { boolean autocommit = conn.getAutoCommit(); if (autocommit && random.nextInt(10) < 1) { @@ -427,7 +418,7 @@ private void process(String sql, boolean allowReconnect) throws Exception { if (statements != null) { statements.add(sql); } - if (sql.indexOf('?') == -1) { + if (!hasParameters(sql)) { processStatement(sql); } else { String param = readLine(); @@ -454,6 +445,21 @@ private void process(String sql, boolean allowReconnect) throws Exception { write(""); } + private static boolean hasParameters(String sql) { + int index = 0; + for (;;) { + index = sql.indexOf('?', index); + if (index < 0) { + return false; + } + int length = sql.length(); + if (++index == length || sql.charAt(index) != '?') { + return true; + } + index++; + } + } + private void reconnect(boolean autocommit) throws SQLException { conn.close(); conn = getConnection("script"); @@ -546,6 +552,13 @@ private static String formatString(String s) { return s; } + private static String formatBinary(byte[] b) { + if (b == null) { + return "null"; + } + return StringUtils.convertBytesToHex(new StringBuilder("X'"), b).append('\'').toString(); + } + private void writeResultSet(String sql, ResultSet rs) throws Exception { ResultSetMetaData meta = rs.getMetaData(); int len = meta.getColumnCount(); @@ -554,7 +567,7 @@ private void writeResultSet(String sql, ResultSet rs) throws Exception { while (rs.next()) { String[] row = new String[len]; for (int i = 0; i < len; i++) { - String data = formatString(rs.getString(i + 1)); + String data = readValue(rs, meta, i + 1); if (max[i] < data.length()) { max[i] = data.length(); } @@ -578,6 +591,8 @@ private void writeResultSet(String sql, ResultSet rs) throws Exception { Prepared p = (Prepared) PREPARED.get(ci); if (p instanceof Query) { gotOrdered = ((Query) p).hasOrder(); + } else if (p instanceof ScriptCommand) { + gotOrdered = true; } } } @@ -650,6 +665,11 @@ private void writeResultSet(String sql, ResultSet rs) throws Exception { null); } + private static String readValue(ResultSet rs, ResultSetMetaData meta, int column) throws SQLException { + return DataType.isBinaryColumn(meta, column) ? formatBinary(rs.getBytes(column)) + : formatString(rs.getString(column)); + } + private static String format(String[] row, int[] max) { int length = max.length; StringBuilder buff = new StringBuilder(); @@ -725,12 +745,12 @@ private void writeResult(String sql, String s, SQLException ex, String prefix) t } private void addWriteResultError(String expected, String got) { - int idx = errors.length(); - errors.append(fileName).append('\n'); - errors.append("line: ").append(in.getLineNumber()).append('\n'); - errors.append("exp: ").append(expected).append('\n'); - errors.append("got: ").append(got).append('\n'); - TestBase.logErrorMessage(errors.substring(idx)); + foundErrors = true; + final String msg = fileName + '\n' + // + "line: " + in.getLineNumber() + '\n' + // + "exp: " + expected + '\n' + // + "got: " + got + '\n'; + TestBase.logErrorMessage(msg); } private void write(String s) { diff --git a/h2/src/test/org/h2/test/scripts/Trigger1.java b/h2/src/test/org/h2/test/scripts/Trigger1.java new file mode 100644 index 0000000000..b110511299 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/Trigger1.java @@ -0,0 +1,25 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.scripts; + +import java.sql.Connection; +import java.sql.SQLException; + +import org.h2.api.Trigger; + +/** + * A trigger for tests. + */ +public class Trigger1 implements Trigger { + + @Override + public void fire(Connection conn, Object[] oldRow, Object[] newRow) throws SQLException { + if (newRow != null) { + newRow[2] = ((int) newRow[2]) * 10; + } + } + +} diff --git a/h2/src/test/org/h2/test/scripts/Trigger2.java b/h2/src/test/org/h2/test/scripts/Trigger2.java new file mode 100644 index 0000000000..ff773336d1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/Trigger2.java @@ -0,0 +1,59 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.scripts; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +import org.h2.api.Trigger; + +/** + * A trigger for tests. + */ +public class Trigger2 implements Trigger { + + @Override + public void fire(Connection conn, Object[] oldRow, Object[] newRow) throws SQLException { + if (oldRow == null && newRow != null) { + Long id = (Long) newRow[0]; + PreparedStatement prep; + int i = 0; + if (id == null) { + prep = conn.prepareStatement("SELECT * FROM FINAL TABLE (INSERT INTO TEST VALUES (DEFAULT, ?, ?))"); + } else { + prep = conn.prepareStatement("SELECT * FROM FINAL TABLE (INSERT INTO TEST VALUES (?, ?, ?))"); + prep.setLong(++i, id); + } + prep.setInt(++i, (int) newRow[1]); + prep.setInt(++i, (int) newRow[2]); + executeAndReadFinalTable(prep, newRow); + } else if (oldRow != null && newRow != null) { + PreparedStatement prep = conn.prepareStatement( + "SELECT * FROM FINAL TABLE (UPDATE TEST SET (ID, A, B) = (?, ?, ?) WHERE ID = ?)"); + prep.setLong(1, (long) newRow[0]); + prep.setInt(2, (int) newRow[1]); + prep.setInt(3, (int) newRow[2]); + prep.setLong(4, (long) oldRow[0]); + executeAndReadFinalTable(prep, newRow); + } else if (oldRow != null && newRow == null) { + PreparedStatement prep = conn.prepareStatement("DELETE FROM TEST WHERE ID = ?"); + prep.setLong(1, (long) oldRow[0]); + prep.executeUpdate(); + } + } + + private static void executeAndReadFinalTable(PreparedStatement prep, Object[] newRow) throws SQLException { + try (ResultSet rs = prep.executeQuery()) { + rs.next(); + newRow[0] = rs.getLong(1); + newRow[1] = rs.getInt(2); + newRow[2] = rs.getInt(3); + } + } + +} diff --git a/h2/src/test/org/h2/test/scripts/altertable-fk.sql b/h2/src/test/org/h2/test/scripts/altertable-fk.sql index 7886a7c953..73adb9d586 100644 --- a/h2/src/test/org/h2/test/scripts/altertable-fk.sql +++ b/h2/src/test/org/h2/test/scripts/altertable-fk.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/altertable-index-reuse.sql b/h2/src/test/org/h2/test/scripts/altertable-index-reuse.sql index 8a45b4684f..f93f90e7e0 100644 --- a/h2/src/test/org/h2/test/scripts/altertable-index-reuse.sql +++ b/h2/src/test/org/h2/test/scripts/altertable-index-reuse.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/add_months.sql b/h2/src/test/org/h2/test/scripts/compatibility/add_months.sql similarity index 76% rename from h2/src/test/org/h2/test/scripts/functions/timeanddate/add_months.sql rename to h2/src/test/org/h2/test/scripts/compatibility/add_months.sql index 511fc12e96..69e7100854 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/add_months.sql +++ b/h2/src/test/org/h2/test/scripts/compatibility/add_months.sql @@ -1,8 +1,11 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- +SET MODE Oracle; +> ok + -- 01-Aug-03 + 3 months = 01-Nov-03 SELECT ADD_MONTHS('2003-08-01', 3); >> 2003-11-01 00:00:00 diff --git a/h2/src/test/org/h2/test/scripts/compatibility/compatibility.sql b/h2/src/test/org/h2/test/scripts/compatibility/compatibility.sql new file mode 100644 index 0000000000..a05dec4eba --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/compatibility/compatibility.sql @@ -0,0 +1,751 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- EXEC and EXECUTE in MSSQLServer mode + +CREATE ALIAS MY_NO_ARG AS 'int f() { return 1; }'; +> ok + +CREATE ALIAS MY_SQRT FOR "java.lang.Math.sqrt"; +> ok + +CREATE ALIAS MY_REMAINDER FOR "java.lang.Math.IEEEremainder"; +> ok + +EXEC MY_SQRT 4; +> exception SYNTAX_ERROR_2 + +-- PostgreSQL-style EXECUTE doesn't work with MSSQLServer-style arguments +EXECUTE MY_SQRT 4; +> exception FUNCTION_ALIAS_NOT_FOUND_1 + +SET MODE MSSQLServer; +> ok + +-- PostgreSQL-style PREPARE is not available in MSSQLServer mode +PREPARE TEST AS SELECT 1; +> exception SYNTAX_ERROR_2 + +-- PostgreSQL-style DEALLOCATE is not available in MSSQLServer mode +DEALLOCATE TEST; +> exception SYNTAX_ERROR_2 + +EXEC MY_NO_ARG; +>> 1 + +EXEC MY_SQRT 4; +>> 2.0 + +EXEC MY_REMAINDER 4, 3; +>> 1.0 + +EXECUTE MY_SQRT 4; +>> 2.0 + +EXEC PUBLIC.MY_SQRT 4; +>> 2.0 + +EXEC SCRIPT.PUBLIC.MY_SQRT 4; +>> 2.0 + +EXEC UNKNOWN_PROCEDURE; +> exception FUNCTION_NOT_FOUND_1 + +EXEC UNKNOWN_SCHEMA.MY_SQRT 4; +> exception SCHEMA_NOT_FOUND_1 + +EXEC UNKNOWN_DATABASE.PUBLIC.MY_SQRT 4; +> exception DATABASE_NOT_FOUND_1 + +SET MODE Regular; +> ok + +DROP ALIAS MY_NO_ARG; +> ok + +DROP ALIAS MY_SQRT; +> ok + +DROP ALIAS MY_REMAINDER; +> ok + +-- UPDATE TOP (n) in MSSQLServer mode + +CREATE TABLE TEST(A INT, B INT) AS VALUES (1, 2), (3, 4), (5, 6); +> ok + +UPDATE TOP (1) TEST SET B = 10; +> exception TABLE_OR_VIEW_NOT_FOUND_1 + +SET MODE MSSQLServer; +> ok + +UPDATE TOP (1) TEST SET B = 10; +> update count: 1 + +SELECT COUNT(*) FILTER (WHERE B = 10) N, COUNT(*) FILTER (WHERE B <> 10) O FROM TEST; +> N O +> - - +> 1 2 +> rows: 1 + +UPDATE TEST SET B = 10 WHERE B <> 10; +> update count: 2 + +UPDATE TOP (1) TEST SET B = 10 LIMIT 1; +> exception SYNTAX_ERROR_1 + +SET MODE Regular; +> ok + +DROP TABLE TEST; +> ok + +SET MODE MySQL; +> ok + +CREATE TABLE A (A INT PRIMARY KEY, X INT); +> ok + +ALTER TABLE A ADD INDEX A_IDX(X); +> ok + +ALTER TABLE A DROP INDEX A_IDX_1; +> exception CONSTRAINT_NOT_FOUND_1 + +ALTER TABLE A DROP INDEX IF EXISTS A_IDX_1; +> ok + +ALTER TABLE A DROP INDEX IF EXISTS A_IDX; +> ok + +ALTER TABLE A DROP INDEX A_IDX; +> exception CONSTRAINT_NOT_FOUND_1 + +CREATE TABLE B (B INT PRIMARY KEY, A INT); +> ok + +ALTER TABLE B ADD CONSTRAINT B_FK FOREIGN KEY (A) REFERENCES A(A); +> ok + +ALTER TABLE B DROP FOREIGN KEY B_FK_1; +> exception CONSTRAINT_NOT_FOUND_1 + +-- MariaDB compatibility +ALTER TABLE B DROP FOREIGN KEY IF EXISTS B_FK_1; +> ok + +ALTER TABLE B DROP FOREIGN KEY IF EXISTS B_FK; +> ok + +ALTER TABLE B DROP FOREIGN KEY B_FK; +> exception CONSTRAINT_NOT_FOUND_1 + +DROP TABLE A, B; +> ok + +SET MODE Regular; +> ok + +-- PostgreSQL-style CREATE INDEX ... USING +CREATE TABLE TEST(B1 INT, B2 INT, H INT, R GEOMETRY, T INT); +> ok + +CREATE INDEX TEST_BTREE_IDX ON TEST USING BTREE(B1, B2); +> ok + +CREATE INDEX TEST_HASH_IDX ON TEST USING HASH(H); +> ok + +CREATE INDEX TEST_RTREE_IDX ON TEST USING RTREE(R); +> ok + +SELECT INDEX_NAME, INDEX_TYPE_NAME FROM INFORMATION_SCHEMA.INDEXES WHERE TABLE_NAME = 'TEST'; +> INDEX_NAME INDEX_TYPE_NAME +> -------------- --------------- +> TEST_BTREE_IDX INDEX +> TEST_HASH_IDX HASH INDEX +> TEST_RTREE_IDX SPATIAL INDEX +> rows: 3 + +SELECT INDEX_NAME, COLUMN_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.INDEX_COLUMNS WHERE TABLE_NAME = 'TEST'; +> INDEX_NAME COLUMN_NAME ORDINAL_POSITION +> -------------- ----------- ---------------- +> TEST_BTREE_IDX B1 1 +> TEST_BTREE_IDX B2 2 +> TEST_HASH_IDX H 1 +> TEST_RTREE_IDX R 1 +> rows: 4 + +CREATE HASH INDEX TEST_BAD_IDX ON TEST USING HASH(T); +> exception SYNTAX_ERROR_2 + +CREATE SPATIAL INDEX TEST_BAD_IDX ON TEST USING RTREE(T); +> exception SYNTAX_ERROR_2 + +DROP TABLE TEST; +> ok + +SET MODE MySQL; +> ok + +CREATE TABLE test (id int(25) NOT NULL auto_increment, name varchar NOT NULL, PRIMARY KEY (id,name)); +> ok + +drop table test; +> ok + +create memory table word(word_id integer, name varchar); +> ok + +alter table word alter column word_id integer(10) auto_increment; +> ok + +insert into word(name) values('Hello'); +> update count: 1 + +alter table word alter column word_id restart with 30872; +> ok + +insert into word(name) values('World'); +> update count: 1 + +select * from word; +> WORD_ID NAME +> ------- ----- +> 1 Hello +> 30872 World +> rows: 2 + +drop table word; +> ok + +CREATE MEMORY TABLE TEST1(ID BIGINT(20) NOT NULL PRIMARY KEY COMMENT 'COMMENT1', FIELD_NAME VARCHAR(100) NOT NULL COMMENT 'COMMENT2'); +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST1; +> SCRIPT +> ------------------------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST1"( "ID" BIGINT COMMENT 'COMMENT1' NOT NULL, "FIELD_NAME" CHARACTER VARYING(100) COMMENT 'COMMENT2' NOT NULL ); +> ALTER TABLE "PUBLIC"."TEST1" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_4" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST1; +> rows (ordered): 4 + +CREATE TABLE TEST2(ID BIGINT(20) NOT NULL AUTO_INCREMENT PRIMARY KEY COMMENT 'COMMENT1', FIELD_NAME VARCHAR(100) NOT NULL COMMENT 'COMMENT2' COMMENT 'COMMENT3'); +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST3(ID BIGINT(20) NOT NULL AUTO_INCREMENT PRIMARY KEY COMMENT 'COMMENT1' CHECK(ID > 0), FIELD_NAME VARCHAR(100) NOT NULL COMMENT 'COMMENT2'); +> ok + +CREATE TABLE TEST4(ID BIGINT(20) NOT NULL AUTO_INCREMENT PRIMARY KEY CHECK(ID > 0) COMMENT 'COMMENT1', FIELD_NAME VARCHAR(100) NOT NULL COMMENT 'COMMENT2'); +> ok + +DROP TABLE TEST1, TEST3, TEST4; +> ok + +SET MODE Regular; +> ok + +-- Keywords as identifiers + +CREATE TABLE TEST(KEY INT, VALUE INT); +> exception SYNTAX_ERROR_2 + +@reconnect off + +SET NON_KEYWORDS KEY, VALUE, AS, SET, DAY; +> ok + +CREATE TABLE TEST(KEY INT, VALUE INT, AS INT, SET INT, DAY INT); +> ok + +INSERT INTO TEST(KEY, VALUE, AS, SET, DAY) VALUES (1, 2, 3, 4, 5), (6, 7, 8, 9, 10); +> update count: 2 + +SELECT KEY, VALUE, AS, SET, DAY FROM TEST WHERE KEY <> 6 AND VALUE <> 7 AND AS <> 8 AND SET <> 9 AND DAY <> 10; +> KEY VALUE AS SET DAY +> --- ----- -- --- --- +> 1 2 3 4 5 +> rows: 1 + +DROP TABLE TEST; +> ok + +SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'NON_KEYWORDS'; +>> AS,DAY,KEY,SET,VALUE + +SET NON_KEYWORDS; +> ok + +@reconnect on + +SELECT COUNT(*) FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'NON_KEYWORDS'; +>> 0 + +CREATE TABLE TEST(KEY INT, VALUE INT); +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST1(C VARCHAR(1 CHAR)); +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST2(C VARCHAR(1 BYTE)); +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST3(C BINARY_FLOAT); +> exception UNKNOWN_DATA_TYPE_1 + +CREATE TABLE TEST4(C BINARY_DOUBLE); +> exception UNKNOWN_DATA_TYPE_1 + +SET MODE Oracle; +> ok + +CREATE TABLE TEST1(C VARCHAR(1 CHAR)); +> ok + +CREATE TABLE TEST2(C VARCHAR(1 BYTE)); +> ok + +CREATE TABLE TEST3(C BINARY_FLOAT); +> ok + +CREATE TABLE TEST4(C BINARY_DOUBLE); +> ok + +SELECT TABLE_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME IN ('TEST3', 'TEST4'); +> TABLE_NAME DATA_TYPE +> ---------- ---------------- +> TEST3 REAL +> TEST4 DOUBLE PRECISION +> rows: 2 + +DROP TABLE TEST1, TEST2, TEST3, TEST4; +> ok + +SET MODE PostgreSQL; +> ok + +EXPLAIN VALUES VERSION(); +>> VALUES (VERSION()) + +SET MODE Regular; +> ok + +CREATE TABLE TEST(A INT) AS VALUES 0; +> ok + +SELECT SIN(A), A+1, A FROM TEST; +> SIN(A) A + 1 A +> ------ ----- - +> 0.0 1 0 +> rows: 1 + +CREATE VIEW V AS SELECT SIN(A), A+1, (((((A + 1) * A + 1) * A + 1) * A + 1) * A + 1) * A + 1 FROM TEST; +> ok + +TABLE V; +> SIN(A) A + 1 ((((((((((A + 1) * A) + 1) * A) + 1) * A) + 1) * A) + 1) * A) + 1 +> ------ ----- ----------------------------------------------------------------- +> 0.0 1 1 +> rows: 1 + +DROP VIEW V; +> ok + +CREATE VIEW V AS SELECT SIN(0), COS(0); +> ok + +TABLE V; +> 0.0 1.0 +> --- --- +> 0.0 1.0 +> rows: 1 + +DROP VIEW V; +> ok + +SET MODE DB2; +> ok + +SELECT SIN(A), A+1, A FROM TEST; +> 1 2 A +> --- - - +> 0.0 1 0 +> rows: 1 + +CREATE VIEW V AS SELECT SIN(A), A+1, (((((A + 1) * A + 1) * A + 1) * A + 1) * A + 1) * A + 1 FROM TEST; +> exception COLUMN_ALIAS_IS_NOT_SPECIFIED_1 + +SET MODE Derby; +> ok + +SELECT SIN(A), A+1, A FROM TEST; +> 1 2 A +> --- - - +> 0.0 1 0 +> rows: 1 + +CREATE VIEW V AS SELECT SIN(A), A+1, (((((A + 1) * A + 1) * A + 1) * A + 1) * A + 1) * A + 1 FROM TEST; +> exception COLUMN_ALIAS_IS_NOT_SPECIFIED_1 + +SET MODE MSSQLServer; +> ok + +SELECT SIN(A), A+1, A FROM TEST; +> A +> --- - - +> 0.0 1 0 +> rows: 1 + +CREATE VIEW V AS SELECT SIN(A), A+1, (((((A + 1) * A + 1) * A + 1) * A + 1) * A + 1) * A + 1 FROM TEST; +> exception COLUMN_ALIAS_IS_NOT_SPECIFIED_1 + +SET MODE HSQLDB; +> ok + +SELECT SIN(A), A+1, A FROM TEST; +> C1 C2 A +> --- -- - +> 0.0 1 0 +> rows: 1 + +CREATE VIEW V AS SELECT SIN(A), A+1, (((((A + 1) * A + 1) * A + 1) * A + 1) * A + 1) * A + 1 FROM TEST; +> ok + +TABLE V; +> C1 C2 C3 +> --- -- -- +> 0.0 1 1 +> rows: 1 + +DROP VIEW V; +> ok + +SET MODE MySQL; +> ok + +SELECT SIN(A), A+1, A FROM TEST; +> SIN(A) A + 1 A +> ------ ----- - +> 0.0 1 0 +> rows: 1 + +CREATE VIEW V AS SELECT SIN(A), A+1, (((((A + 1) * A + 1) * A + 1) * A + 1) * A + 1) * A + 1 FROM TEST; +> ok + +TABLE V; +> SIN(A) A + 1 Name_exp_3 +> ------ ----- ---------- +> 0.0 1 1 +> rows: 1 + +DROP VIEW V; +> ok + +CREATE VIEW V AS SELECT SIN(0), COS(0); +> ok + +TABLE V; +> SIN(0) COS(0) +> ------ ------ +> 0.0 1.0 +> rows: 1 + +DROP VIEW V; +> ok + +SET MODE Oracle; +> ok + +SELECT SIN(A), A+1, A FROM TEST; +> SIN(A) A + 1 A +> ------ ----- - +> 0.0 1 0 +> rows: 1 + +SET MODE PostgreSQL; +> ok + +SELECT SIN(A), A+1, A FROM TEST; +> sin ?column? A +> --- -------- - +> 0.0 1 0 +> rows: 1 + +CREATE VIEW V AS SELECT SIN(A), A+1, (((((A + 1) * A + 1) * A + 1) * A + 1) * A + 1) * A + 1 FROM TEST; +> exception DUPLICATE_COLUMN_NAME_1 + +CREATE VIEW V AS SELECT SIN(0), COS(0); +> ok + +TABLE V; +> sin cos +> --- --- +> 0.0 1.0 +> rows: 1 + +DROP VIEW V; +> ok + +SET MODE Regular; +> ok + +DROP TABLE TEST; +> ok + +--- sequence with manual value ------------------ + +SET MODE MySQL; +> ok + +CREATE TABLE TEST(ID bigint generated by default as identity (start with 1), name varchar); +> ok + +SET AUTOCOMMIT FALSE; +> ok + +insert into test(name) values('Hello'); +> update count: 1 + +select id from final table (insert into test(name) values('World')); +>> 2 + +select id from final table (insert into test(id, name) values(1234567890123456, 'World')); +>> 1234567890123456 + +select id from final table (insert into test(name) values('World')); +>> 1234567890123457 + +select * from test order by id; +> ID NAME +> ---------------- ----- +> 1 Hello +> 2 World +> 1234567890123456 World +> 1234567890123457 World +> rows (ordered): 4 + +SET AUTOCOMMIT TRUE; +> ok + +drop table if exists test; +> ok + +CREATE TABLE TEST(ID bigint generated by default as identity (start with 1), name varchar); +> ok + +SET AUTOCOMMIT FALSE; +> ok + +insert into test(name) values('Hello'); +> update count: 1 + +select id from final table (insert into test(name) values('World')); +>> 2 + +select id from final table (insert into test(id, name) values(1234567890123456, 'World')); +>> 1234567890123456 + +select id from final table (insert into test(name) values('World')); +>> 1234567890123457 + +select * from test order by id; +> ID NAME +> ---------------- ----- +> 1 Hello +> 2 World +> 1234567890123456 World +> 1234567890123457 World +> rows (ordered): 4 + +SET AUTOCOMMIT TRUE; +> ok + +drop table test; +> ok + +SET MODE PostgreSQL; +> ok + +-- To reset last identity +DROP ALL OBJECTS; +> ok + +SELECT LASTVAL(); +> exception CURRENT_SEQUENCE_VALUE_IS_NOT_DEFINED_IN_SESSION_1 + +CREATE SEQUENCE SEQ START WITH 100; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> 100 + +SELECT LASTVAL(); +>> 100 + +DROP SEQUENCE SEQ; +> ok + +SET MODE MSSQLServer; +> ok + +-- To reset last identity +DROP ALL OBJECTS; +> ok + +SELECT SCOPE_IDENTITY(); +>> null + +CREATE TABLE TEST(ID BIGINT IDENTITY, V INT); +> ok + +INSERT INTO TEST(V) VALUES (10); +> update count: 1 + +SELECT SCOPE_IDENTITY(); +>> 1 + +DROP TABLE TEST; +> ok + +SET MODE DB2; +> ok + +-- To reset last identity +DROP ALL OBJECTS; +> ok + +SELECT IDENTITY_VAL_LOCAL(); +>> null + +CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY, V INT); +> ok + +INSERT INTO TEST(V) VALUES 10; +> update count: 1 + +SELECT IDENTITY_VAL_LOCAL(); +>> 1 + +DROP TABLE TEST; +> ok + +SET MODE Derby; +> ok + +-- To reset last identity +DROP ALL OBJECTS; +> ok + +SELECT IDENTITY_VAL_LOCAL(); +>> null + +CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY, V INT); +> ok + +INSERT INTO TEST(V) VALUES 10; +> update count: 1 + +SELECT IDENTITY_VAL_LOCAL(); +>> 1 + +DROP TABLE TEST; +> ok + +SET MODE Regular; +> ok + +SET MODE MSSQLServer; +> ok + +CREATE TABLE TEST(ID BIGINT NOT NULL IDENTITY(10, 5), NAME VARCHAR); +> ok + +INSERT INTO TEST(NAME) VALUES('Hello'), ('World'); +> update count: 2 + +SELECT * FROM TEST; +> ID NAME +> -- ----- +> 10 Hello +> 15 World +> rows: 2 + +DROP TABLE TEST; +> ok + +SET MODE PostgreSQL; +> ok + +SELECT TO_DATE('24-12-2025','DD-MM-YYYY'); +>> 2025-12-24 + +SET TIME ZONE 'UTC'; +> ok + +SELECT TO_TIMESTAMP('24-12-2025 14:13:12','DD-MM-YYYY HH24:MI:SS'); +>> 2025-12-24 14:13:12+00 + +SET TIME ZONE LOCAL; +> ok + +SET MODE Regular; +> ok + +SELECT 1 = TRUE; +> exception TYPES_ARE_NOT_COMPARABLE_2 + +SET MODE MySQL; +> ok + +SELECT 1 = TRUE; +>> TRUE + +SELECT TRUE = 0; +>> FALSE + +SELECT 1 > TRUE; +> exception TYPES_ARE_NOT_COMPARABLE_2 + +CREATE TABLE TEST(ID BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, B BOOLEAN, I INTEGER); +> ok + +CREATE INDEX TEST_B_IDX ON TEST(B); +> ok + +CREATE INDEX TEST_I_IDX ON TEST(I); +> ok + +INSERT INTO TEST(B, I) VALUES (TRUE, 1), (TRUE, 1), (FALSE, 0), (TRUE, 1), (UNKNOWN, NULL); +> update count: 5 + +SELECT * FROM TEST WHERE B = 1; +> ID B I +> -- ---- - +> 1 TRUE 1 +> 2 TRUE 1 +> 4 TRUE 1 +> rows: 3 + +EXPLAIN SELECT * FROM TEST WHERE B = 1; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."I" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE "B" = 1 + +SELECT * FROM TEST WHERE I = TRUE; +> ID B I +> -- ---- - +> 1 TRUE 1 +> 2 TRUE 1 +> 4 TRUE 1 +> rows: 3 + +EXPLAIN SELECT * FROM TEST WHERE I = TRUE; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."I" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_I_IDX: I = 1 */ WHERE "I" = 1 + +DROP TABLE TEST; +> ok + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/compatibility/group_by.sql b/h2/src/test/org/h2/test/scripts/compatibility/group_by.sql new file mode 100644 index 0000000000..f156ea5ebc --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/compatibility/group_by.sql @@ -0,0 +1,57 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- GROUP BY column index for MySQL/MariaDB/PostgreSQL compatibility mode + +CREATE TABLE MYTAB(X INT , Y INT, Z INT) AS VALUES (1,123,2), (1,456,2), (3,789,4); +> ok + +SET MODE MySQL; +> ok + +SELECT SUM(Y) AS S , X + Z FROM MYTAB GROUP BY 2; +> S X + Z +> --- ----- +> 579 3 +> 789 7 +> rows: 2 + +EXPLAIN SELECT SUM(Y) AS S , X + Z FROM MYTAB GROUP BY 2; +> PLAN +> ------------------------------------------------------------------------------------------------------- +> SELECT SUM("Y") AS "S", "X" + "Z" FROM "PUBLIC"."MYTAB" /* PUBLIC.MYTAB.tableScan */ GROUP BY "X" + "Z" +> rows: 1 + +SELECT SUM(Y) AS S , X + Z FROM MYTAB GROUP BY 3; +> exception GROUP_BY_NOT_IN_THE_RESULT + +SELECT MYTAB.*, SUM(Y) AS S FROM MYTAB GROUP BY 1; +> exception SYNTAX_ERROR_2 + +SET MODE MariaDB; +> ok + +SELECT SUM(Y) AS S , X + Z FROM MYTAB GROUP BY 2; +> S X + Z +> --- ----- +> 579 3 +> 789 7 +> rows: 2 + +SET MODE PostgreSQL; +> ok + +SELECT SUM(Y) AS S , X + Z FROM MYTAB GROUP BY 2; +> S ?column? +> --- -------- +> 579 3 +> 789 7 +> rows: 2 + +SET MODE Oracle; +> ok + +SELECT SUM(Y) AS S , X FROM MYTAB GROUP BY 2; +> exception MUST_GROUP_BY_COLUMN_1 diff --git a/h2/src/test/org/h2/test/scripts/compatibility/strict_and_legacy.sql b/h2/src/test/org/h2/test/scripts/compatibility/strict_and_legacy.sql new file mode 100644 index 0000000000..7fbc8317ce --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/compatibility/strict_and_legacy.sql @@ -0,0 +1,101 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SET MODE STRICT; +> ok + +VALUES 1 IN (); +> exception SYNTAX_ERROR_2 + +SELECT TOP 1 * FROM (VALUES 1, 2); +> exception SYNTAX_ERROR_1 + +SELECT * FROM (VALUES 1, 2) LIMIT 1; +> exception SYNTAX_ERROR_1 + +CREATE TABLE TEST(ID IDENTITY); +> exception UNKNOWN_DATA_TYPE_1 + +CREATE TABLE TEST(ID BIGINT AUTO_INCREMENT); +> exception SYNTAX_ERROR_2 + +SET MODE LEGACY; +> ok + +CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, V INTEGER NOT NULL); +> ok + +INSERT INTO TEST(ID, V) VALUES (10, 15); +> update count: 1 + +INSERT INTO TEST(V) VALUES 20; +> update count: 1 + +TABLE TEST; +> ID V +> -- -- +> 10 15 +> 11 20 +> rows: 2 + +UPDATE TOP(1) TEST SET V = V + 1; +> update count: 1 + +TABLE TEST; +> ID V +> -- -- +> 10 16 +> 11 20 +> rows: 2 + +MERGE INTO TEST T USING (VALUES (10, 17), (11, 30)) I(ID, V) ON T.ID = I.ID +WHEN MATCHED THEN UPDATE SET V = I.V WHERE T.ID > 10; +> update count: 1 + +TABLE TEST; +> ID V +> -- -- +> 10 16 +> 11 30 +> rows: 2 + +CREATE TABLE T2(ID BIGINT PRIMARY KEY, V INT REFERENCES TEST(V)); +> ok + +DROP TABLE T2, TEST; +> ok + +CREATE TABLE TEST(ID BIGINT IDENTITY(1, 10)); +> ok + +DROP TABLE TEST; +> ok + +CREATE SEQUENCE SEQ; +> ok + +SELECT SEQ.NEXTVAL; +>> 1 + +SELECT SEQ.CURRVAL; +>> 1 + +DROP SEQUENCE SEQ; +> ok + +SELECT 1 = TRUE; +>> TRUE + +SET MODE STRICT; +> ok + +CREATE TABLE TEST(LIMIT INTEGER, MINUS INTEGER); +> ok + +DROP TABLE TEST; +> ok + +SET MODE REGULAR; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/array.sql b/h2/src/test/org/h2/test/scripts/datatypes/array.sql index 078444dd26..f083ce9947 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/array.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/array.sql @@ -1,19 +1,10 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- SELECT (10, 20, 30)[1]; ->> 10 - -SELECT (10, 20, 30)[3]; ->> 30 - -SELECT (10, 20, 30)[0]; ->> null - -SELECT (10, 20, 30)[4]; ->> null +> exception INVALID_VALUE_2 SELECT ARRAY[]; >> [] @@ -24,6 +15,18 @@ SELECT ARRAY[10]; SELECT ARRAY[10, 20, 30]; >> [10, 20, 30] +SELECT ARRAY[10, 20, 30][1]; +>> 10 + +SELECT ARRAY[10, 20, 30][3]; +>> 30 + +SELECT ARRAY[10, 20, 30][0]; +> exception ARRAY_ELEMENT_ERROR_2 + +SELECT ARRAY[10, 20, 30][4]; +> exception ARRAY_ELEMENT_ERROR_2 + SELECT ARRAY[1, NULL] IS NOT DISTINCT FROM ARRAY[1, NULL]; >> TRUE @@ -67,6 +70,9 @@ SELECT ARRAY[1, NULL] IN (ARRAY[1, NULL]); >> null CREATE TABLE TEST(A ARRAY); +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST(A INTEGER ARRAY); > ok INSERT INTO TEST VALUES (ARRAY[1, NULL]), (ARRAY[1, 2]); @@ -84,13 +90,181 @@ SELECT ARRAY[1, NULL] IN (SELECT A FROM TEST); SELECT ROW (ARRAY[1, NULL]) IN (SELECT A FROM TEST); >> null --- Compatibility with H2 1.4.197 and older SELECT A FROM TEST WHERE A = (1, 2); ->> [1, 2] +> exception TYPES_ARE_NOT_COMPARABLE_2 + +DROP TABLE TEST; +> ok + +SELECT ARRAY[1, 2] || 3; +>> [1, 2, 3] + +SELECT 1 || ARRAY[2, 3]; +>> [1, 2, 3] + +SELECT ARRAY[1, 2] || ARRAY[3]; +>> [1, 2, 3] + +SELECT ARRAY[1, 2] || ARRAY[3, 4]; +>> [1, 2, 3, 4] + +SELECT ARRAY[1, 2] || NULL; +>> null + +SELECT NULL::INT ARRAY || ARRAY[2]; +>> null + +CREATE TABLE TEST(ID INT, A1 INT ARRAY, A2 INT ARRAY[2]); +> ok + +SELECT COLUMN_NAME, DATA_TYPE, MAXIMUM_CARDINALITY + FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE MAXIMUM_CARDINALITY +> ----------- --------- ------------------- +> ID INTEGER null +> A1 ARRAY 65536 +> A2 ARRAY 2 +> rows (ordered): 3 + +INSERT INTO TEST VALUES (1, ARRAY[], ARRAY[]), (2, ARRAY[1, 2], ARRAY[1, 2]); +> update count: 2 + +INSERT INTO TEST VALUES (3, ARRAY[], ARRAY[1, 2, 3]); +> exception VALUE_TOO_LONG_2 + +TABLE TEST; +> ID A1 A2 +> -- ------ ------ +> 1 [] [] +> 2 [1, 2] [1, 2] +> rows: 2 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE TEST(A1 INT ARRAY, A2 INT ARRAY[2], A3 INT ARRAY[0]); +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> -------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "A1" INTEGER ARRAY, "A2" INTEGER ARRAY[2], "A3" INTEGER ARRAY[0] ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +INSERT INTO TEST(A3) VALUES ARRAY[NULL]; +> exception VALUE_TOO_LONG_2 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE TEST1(I INT ARRAY, I2 INT ARRAY[2]); +> ok --- Compatibility with H2 1.4.197 and older -INSERT INTO TEST VALUES ((1, 3)); +INSERT INTO TEST1 VALUES (ARRAY[1, 2, 3.0], ARRAY[1, NULL]); > update count: 1 +@reconnect + +TABLE TEST1; +> I I2 +> --------- --------- +> [1, 2, 3] [1, null] +> rows: 1 + +INSERT INTO TEST1 VALUES (ARRAY[], ARRAY['abc']); +> exception DATA_CONVERSION_ERROR_1 + +CREATE MEMORY TABLE TEST2 AS (TABLE TEST1) WITH NO DATA; +> ok + +CREATE MEMORY TABLE TEST3(A TIME ARRAY[10] ARRAY[2]); +> ok + +INSERT INTO TEST3 VALUES ARRAY[ARRAY[TIME '10:00:00']]; +> update count: 1 + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> --------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST1"( "I" INTEGER ARRAY, "I2" INTEGER ARRAY[2] ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST1; +> INSERT INTO "PUBLIC"."TEST1" VALUES (ARRAY [1, 2, 3], ARRAY [1, NULL]); +> CREATE MEMORY TABLE "PUBLIC"."TEST2"( "I" INTEGER ARRAY, "I2" INTEGER ARRAY[2] ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST2; +> CREATE MEMORY TABLE "PUBLIC"."TEST3"( "A" TIME ARRAY[10] ARRAY[2] ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST3; +> INSERT INTO "PUBLIC"."TEST3" VALUES (ARRAY [ARRAY [TIME '10:00:00']]); +> rows (ordered): 9 + +DROP TABLE TEST1, TEST2, TEST3; +> ok + +VALUES CAST(ARRAY['1', '2'] AS DOUBLE PRECISION ARRAY); +>> [1.0, 2.0] + +EXPLAIN VALUES CAST(ARRAY['1', '2'] AS DOUBLE PRECISION ARRAY); +>> VALUES (CAST(ARRAY [1.0, 2.0] AS DOUBLE PRECISION ARRAY)) + +CREATE TABLE TEST(A1 TIMESTAMP ARRAY, A2 TIMESTAMP ARRAY ARRAY); +> ok + +CREATE INDEX IDX3 ON TEST(A1); +> ok + +CREATE INDEX IDX4 ON TEST(A2); +> ok + DROP TABLE TEST; > ok + +VALUES CAST(ARRAY[ARRAY[1, 2], ARRAY[3, 4]] AS INT ARRAY[2] ARRAY[1]); +>> [[1, 2]] + +VALUES CAST(ARRAY[ARRAY[1, 2], ARRAY[3, 4]] AS INT ARRAY[1] ARRAY[2]); +>> [[1], [3]] + +VALUES CAST(ARRAY[1, 2] AS INT ARRAY[0]); +>> [] + +VALUES ARRAY??(1??); +>> [1] + +EXPLAIN VALUES ARRAY??(1, 2??); +>> VALUES (ARRAY [1, 2]) + +VALUES ARRAY(SELECT X FROM SYSTEM_RANGE(1, 10)); +>> [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + +CREATE TABLE TEST AS VALUES ARRAY(SELECT X FROM SYSTEM_RANGE(1, 1) WHERE FALSE) WITH NO DATA; +> ok + +SELECT DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +>> ARRAY + +SELECT DATA_TYPE FROM INFORMATION_SCHEMA.ELEMENT_TYPES WHERE OBJECT_NAME = 'TEST'; +>> BIGINT + +DROP TABLE TEST; +> ok + +VALUES ARRAY(SELECT); +> exception SUBQUERY_IS_NOT_SINGLE_COLUMN + +VALUES ARRAY(SELECT 1, 2); +> exception SUBQUERY_IS_NOT_SINGLE_COLUMN + +EXPLAIN VALUES ARRAY[NULL, 1, '3']; +>> VALUES (ARRAY [NULL, 1, 3]) + +CREATE TABLE TEST(A INTEGER ARRAY[65536]); +> ok + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INTEGER ARRAY[65537]); +> exception INVALID_VALUE_PRECISION diff --git a/h2/src/test/org/h2/test/scripts/datatypes/bigint.sql b/h2/src/test/org/h2/test/scripts/datatypes/bigint.sql index 651d5b0102..3b2bacf124 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/bigint.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/bigint.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -51,3 +51,18 @@ SELECT CAST(-9223372036854775808 AS BIGINT) / CAST(1 AS BIGINT); SELECT CAST(-9223372036854775808 AS BIGINT) / CAST(-1 AS BIGINT); > exception NUMERIC_VALUE_OUT_OF_RANGE_1 + +SELECT 0x1L; +> 1 +> - +> 1 +> rows: 1 + +SELECT 0x1234567890abL; +> 20015998341291 +> -------------- +> 20015998341291 +> rows: 1 + +EXPLAIN VALUES (1L, -2147483648L, 2147483647L, -2147483649L, 2147483648L); +>> VALUES (CAST(1 AS BIGINT), -2147483648, CAST(2147483647 AS BIGINT), -2147483649, 2147483648) diff --git a/h2/src/test/org/h2/test/scripts/datatypes/binary.sql b/h2/src/test/org/h2/test/scripts/datatypes/binary.sql index 26db10a974..fadf19999c 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/binary.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/binary.sql @@ -1,23 +1,58 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -CREATE TABLE TEST(B1 VARBINARY, B2 BINARY VARYING, B3 BINARY, B4 RAW, B5 BYTEA, B6 LONG RAW, B7 LONGVARBINARY); +CREATE TABLE TEST(B1 BINARY, B2 BINARY(10)); > ok -SELECT COLUMN_NAME, DATA_TYPE, TYPE_NAME, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS +SELECT COLUMN_NAME, DATA_TYPE, CHARACTER_OCTET_LENGTH FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; -> COLUMN_NAME DATA_TYPE TYPE_NAME COLUMN_TYPE -> ----------- --------- --------- -------------- -> B1 -3 VARBINARY VARBINARY -> B2 -3 VARBINARY BINARY VARYING -> B3 -3 VARBINARY BINARY -> B4 -3 VARBINARY RAW -> B5 -3 VARBINARY BYTEA -> B6 -3 VARBINARY LONG RAW -> B7 -3 VARBINARY LONGVARBINARY -> rows (ordered): 7 +> COLUMN_NAME DATA_TYPE CHARACTER_OCTET_LENGTH +> ----------- --------- ---------------------- +> B1 BINARY 1 +> B2 BINARY 10 +> rows (ordered): 2 DROP TABLE TEST; > ok + +SELECT CAST(X'11' AS BINARY) || CAST(NULL AS BINARY); +>> null + +SELECT CAST(NULL AS BINARY) || CAST(X'11' AS BINARY); +>> null + +EXPLAIN VALUES CAST(X'01' AS BINARY); +>> VALUES (CAST(X'01' AS BINARY(1))) + +CREATE TABLE T(C BINARY(0)); +> exception INVALID_VALUE_2 + +VALUES CAST(X'0102' AS BINARY); +>> X'01' + +CREATE TABLE T1(A BINARY(1048576)); +> ok + +CREATE TABLE T2(A BINARY(1048577)); +> exception INVALID_VALUE_PRECISION + +SET TRUNCATE_LARGE_LENGTH TRUE; +> ok + +CREATE TABLE T2(A BINARY(1048577)); +> ok + +SELECT TABLE_NAME, CHARACTER_OCTET_LENGTH FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_NAME CHARACTER_OCTET_LENGTH +> ---------- ---------------------- +> T1 1048576 +> T2 1048576 +> rows: 2 + +SET TRUNCATE_LARGE_LENGTH FALSE; +> ok + +DROP TABLE T1, T2; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/blob.sql b/h2/src/test/org/h2/test/scripts/datatypes/blob.sql index ebfb4a51f6..05cc2eb5ea 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/blob.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/blob.sql @@ -1,23 +1,22 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -CREATE TABLE TEST(B1 BLOB, B2 BINARY LARGE OBJECT, B3 TINYBLOB, B4 MEDIUMBLOB, B5 LONGBLOB, B6 IMAGE, B7 OID); +CREATE TABLE TEST(B1 BLOB, B2 BINARY LARGE OBJECT, B3 TINYBLOB, B4 MEDIUMBLOB, B5 LONGBLOB, B6 IMAGE); > ok -SELECT COLUMN_NAME, DATA_TYPE, TYPE_NAME, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; -> COLUMN_NAME DATA_TYPE TYPE_NAME COLUMN_TYPE -> ----------- --------- --------- ------------------- -> B1 2004 BLOB BLOB -> B2 2004 BLOB BINARY LARGE OBJECT -> B3 2004 BLOB TINYBLOB -> B4 2004 BLOB MEDIUMBLOB -> B5 2004 BLOB LONGBLOB -> B6 2004 BLOB IMAGE -> B7 2004 BLOB OID -> rows (ordered): 7 +> COLUMN_NAME DATA_TYPE +> ----------- ------------------- +> B1 BINARY LARGE OBJECT +> B2 BINARY LARGE OBJECT +> B3 BINARY LARGE OBJECT +> B4 BINARY LARGE OBJECT +> B5 BINARY LARGE OBJECT +> B6 BINARY LARGE OBJECT +> rows (ordered): 6 DROP TABLE TEST; > ok @@ -25,29 +24,38 @@ DROP TABLE TEST; CREATE TABLE TEST(B0 BLOB(10), B1 BLOB(10K), B2 BLOB(10M), B3 BLOB(10G), B4 BLOB(10T), B5 BLOB(10P)); > ok -SELECT COLUMN_NAME, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS +SELECT COLUMN_NAME, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; -> COLUMN_NAME COLUMN_TYPE -> ----------- ----------------------- -> B0 BLOB(10) -> B1 BLOB(10240) -> B2 BLOB(10485760) -> B3 BLOB(10737418240) -> B4 BLOB(10995116277760) -> B5 BLOB(11258999068426240) +> COLUMN_NAME DATA_TYPE CHARACTER_MAXIMUM_LENGTH +> ----------- ------------------- ------------------------ +> B0 BINARY LARGE OBJECT 10 +> B1 BINARY LARGE OBJECT 10240 +> B2 BINARY LARGE OBJECT 10485760 +> B3 BINARY LARGE OBJECT 10737418240 +> B4 BINARY LARGE OBJECT 10995116277760 +> B5 BINARY LARGE OBJECT 11258999068426240 > rows (ordered): 6 -INSERT INTO TEST(B0) VALUES ('0102030405060708091011'); +INSERT INTO TEST(B0) VALUES (X'0102030405060708091011'); > exception VALUE_TOO_LONG_2 -INSERT INTO TEST(B0) VALUES ('01020304050607080910'); +INSERT INTO TEST(B0) VALUES (X'01020304050607080910'); > update count: 1 SELECT B0 FROM TEST; ->> 01020304050607080910 +>> X'01020304050607080910' DROP TABLE TEST; > ok CREATE TABLE TEST(B BLOB(8192P)); > exception INVALID_VALUE_2 + +EXPLAIN VALUES CAST(X'00' AS BLOB(1)); +>> VALUES (CAST(X'00' AS BINARY LARGE OBJECT(1))) + +CREATE TABLE T(C BLOB(0)); +> exception INVALID_VALUE_2 + +CREATE TABLE TEST(C1 BLOB(1K CHARACTERS), C2 BLOB(1K OCTETS)); +> exception SYNTAX_ERROR_2 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/boolean.sql b/h2/src/test/org/h2/test/scripts/datatypes/boolean.sql index 008a885806..979a5e7385 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/boolean.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/boolean.sql @@ -1,4 +1,42 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- + +CREATE TABLE TEST(B BOOLEAN) AS (VALUES TRUE, FALSE, UNKNOWN); +> ok + +SELECT * FROM TEST ORDER BY B; +> B +> ----- +> null +> FALSE +> TRUE +> rows (ordered): 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST AS (SELECT UNKNOWN B); +> ok + +SELECT DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +>> BOOLEAN + +EXPLAIN SELECT CAST(NULL AS BOOLEAN); +>> SELECT UNKNOWN + +SELECT NOT TRUE A, NOT FALSE B, NOT NULL C, NOT UNKNOWN D; +> A B C D +> ----- ---- ---- ---- +> FALSE TRUE null null +> rows: 1 + +DROP TABLE TEST; +> ok + +EXPLAIN VALUES (TRUE, FALSE, UNKNOWN); +>> VALUES (TRUE, FALSE, UNKNOWN) + +EXPLAIN SELECT A IS TRUE OR B IS FALSE FROM (VALUES (TRUE, TRUE)) T(A, B); +>> SELECT ("A" IS TRUE) OR ("B" IS FALSE) FROM (VALUES (TRUE, TRUE)) "T"("A", "B") /* table scan */ diff --git a/h2/src/test/org/h2/test/scripts/datatypes/char.sql b/h2/src/test/org/h2/test/scripts/datatypes/char.sql index 963ba831fe..c76241463a 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/char.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/char.sql @@ -1,19 +1,21 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -CREATE TABLE TEST(C1 CHAR, C2 CHARACTER, C3 NCHAR); +CREATE TABLE TEST(C1 CHAR, C2 CHARACTER, C3 NCHAR, C4 NATIONAL CHARACTER, C5 NATIONAL CHAR); > ok -SELECT COLUMN_NAME, DATA_TYPE, TYPE_NAME, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; -> COLUMN_NAME DATA_TYPE TYPE_NAME COLUMN_TYPE -> ----------- --------- --------- ----------- -> C1 1 CHAR CHAR -> C2 1 CHAR CHARACTER -> C3 1 CHAR NCHAR -> rows (ordered): 3 +> COLUMN_NAME DATA_TYPE +> ----------- --------- +> C1 CHARACTER +> C2 CHARACTER +> C3 CHARACTER +> C4 CHARACTER +> C5 CHARACTER +> rows (ordered): 5 DROP TABLE TEST; > ok @@ -37,7 +39,7 @@ SELECT C || 'x' V FROM TEST; > V > --- > aax -> bx +> b x > rows: 2 DROP TABLE TEST; @@ -73,3 +75,124 @@ DROP TABLE TEST; SET MODE Regular; > ok + +EXPLAIN VALUES CAST('a' AS CHAR(1)); +>> VALUES (CAST('a' AS CHAR(1))) + +EXPLAIN VALUES CAST('' AS CHAR(1)); +>> VALUES (CAST(' ' AS CHAR(1))) + +CREATE TABLE T(C CHAR(0)); +> exception INVALID_VALUE_2 + +CREATE TABLE T(C1 CHAR(1 CHARACTERS), C2 CHAR(1 OCTETS)); +> ok + +DROP TABLE T; +> ok + +VALUES CAST('ab' AS CHAR); +>> a + +CREATE TABLE TEST(A CHAR(2) NOT NULL, B CHAR(3) NOT NULL); +> ok + +INSERT INTO TEST VALUES ('a', 'a'), ('aa', 'aaa'), ('bb ', 'bb'); +> update count: 3 + +INSERT INTO TEST VALUES ('a a', 'a a'); +> exception VALUE_TOO_LONG_2 + +VALUES CAST('a a' AS CHAR(2)) || '*'; +>> a * + +SELECT A || '*', B || '*', A || B || '*', CHAR_LENGTH(A), A = B FROM TEST; +> A || '*' B || '*' A || B || '*' CHAR_LENGTH(A) A = B +> -------- -------- ------------- -------------- ----- +> a * a * a a * 2 TRUE +> aa* aaa* aaaaa* 2 FALSE +> bb* bb * bbbb * 2 TRUE +> rows: 3 + +DROP TABLE TEST; +> ok + +SET MODE MySQL; +> ok + +CREATE TABLE TEST(A CHAR(2) NOT NULL, B CHAR(3) NOT NULL); +> ok + +INSERT INTO TEST VALUES ('a', 'a'), ('aa', 'aaa'), ('bb ', 'bb'); +> update count: 3 + +INSERT INTO TEST VALUES ('a a', 'a a'); +> exception VALUE_TOO_LONG_2 + +VALUES CAST('a a' AS CHAR(2)) || '*'; +>> a* + +SELECT A || '*', B || '*', A || B || '*', CHAR_LENGTH(A), A = B FROM TEST; +> A || '*' B || '*' A || B || '*' CHAR_LENGTH(A) A = B +> -------- -------- ------------- -------------- ----- +> a* a* aa* 1 TRUE +> aa* aaa* aaaaa* 2 FALSE +> bb* bb* bbbb* 2 TRUE +> rows: 3 + +DROP TABLE TEST; +> ok + +SET MODE PostgreSQL; +> ok + +CREATE TABLE TEST(A CHAR(2) NOT NULL, B CHAR(3) NOT NULL); +> ok + +INSERT INTO TEST VALUES ('a', 'a'), ('aa', 'aaa'), ('bb ', 'bb'); +> update count: 3 + +INSERT INTO TEST VALUES ('a a', 'a a'); +> exception VALUE_TOO_LONG_2 + +VALUES CAST('a a' AS CHAR(2)) || '*'; +>> a* + +SELECT A || '*', B || '*', A || B || '*', CHAR_LENGTH(A), A = B FROM TEST; +> ?column? ?column? ?column? char_length ?column? +> -------- -------- -------- ----------- -------- +> a* a* aa* 1 TRUE +> aa* aaa* aaaaa* 2 FALSE +> bb* bb* bbbb* 2 TRUE +> rows: 3 + +DROP TABLE TEST; +> ok + +SET MODE Regular; +> ok + +CREATE TABLE T1(A CHARACTER(1048576)); +> ok + +CREATE TABLE T2(A CHARACTER(1048577)); +> exception INVALID_VALUE_PRECISION + +SET TRUNCATE_LARGE_LENGTH TRUE; +> ok + +CREATE TABLE T2(A CHARACTER(1048577)); +> ok + +SELECT TABLE_NAME, CHARACTER_MAXIMUM_LENGTH FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_NAME CHARACTER_MAXIMUM_LENGTH +> ---------- ------------------------ +> T1 1048576 +> T2 1048576 +> rows: 2 + +SET TRUNCATE_LARGE_LENGTH FALSE; +> ok + +DROP TABLE T1, T2; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/clob.sql b/h2/src/test/org/h2/test/scripts/datatypes/clob.sql index 2d3697957f..20cb6db086 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/clob.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/clob.sql @@ -1,25 +1,28 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- CREATE TABLE TEST(C1 CLOB, C2 CHARACTER LARGE OBJECT, C3 TINYTEXT, C4 TEXT, C5 MEDIUMTEXT, C6 LONGTEXT, C7 NTEXT, - C8 NCLOB); + C8 NCLOB, C9 CHAR LARGE OBJECT, C10 NCHAR LARGE OBJECT, C11 NATIONAL CHARACTER LARGE OBJECT); > ok -SELECT COLUMN_NAME, DATA_TYPE, TYPE_NAME, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; -> COLUMN_NAME DATA_TYPE TYPE_NAME COLUMN_TYPE -> ----------- --------- --------- ---------------------- -> C1 2005 CLOB CLOB -> C2 2005 CLOB CHARACTER LARGE OBJECT -> C3 2005 CLOB TINYTEXT -> C4 2005 CLOB TEXT -> C5 2005 CLOB MEDIUMTEXT -> C6 2005 CLOB LONGTEXT -> C7 2005 CLOB NTEXT -> C8 2005 CLOB NCLOB -> rows (ordered): 8 +> COLUMN_NAME DATA_TYPE +> ----------- ---------------------- +> C1 CHARACTER LARGE OBJECT +> C2 CHARACTER LARGE OBJECT +> C3 CHARACTER LARGE OBJECT +> C4 CHARACTER LARGE OBJECT +> C5 CHARACTER LARGE OBJECT +> C6 CHARACTER LARGE OBJECT +> C7 CHARACTER LARGE OBJECT +> C8 CHARACTER LARGE OBJECT +> C9 CHARACTER LARGE OBJECT +> C10 CHARACTER LARGE OBJECT +> C11 CHARACTER LARGE OBJECT +> rows (ordered): 11 DROP TABLE TEST; > ok @@ -27,16 +30,16 @@ DROP TABLE TEST; CREATE TABLE TEST(C0 CLOB(10), C1 CLOB(10K), C2 CLOB(10M CHARACTERS), C3 CLOB(10G OCTETS), C4 CLOB(10T), C5 CLOB(10P)); > ok -SELECT COLUMN_NAME, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS +SELECT COLUMN_NAME, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH, FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; -> COLUMN_NAME COLUMN_TYPE -> ----------- ----------------------- -> C0 CLOB(10) -> C1 CLOB(10240) -> C2 CLOB(10485760) -> C3 CLOB(10737418240) -> C4 CLOB(10995116277760) -> C5 CLOB(11258999068426240) +> COLUMN_NAME DATA_TYPE CHARACTER_MAXIMUM_LENGTH +> ----------- ---------------------- ------------------------ +> C0 CHARACTER LARGE OBJECT 10 +> C1 CHARACTER LARGE OBJECT 10240 +> C2 CHARACTER LARGE OBJECT 10485760 +> C3 CHARACTER LARGE OBJECT 10737418240 +> C4 CHARACTER LARGE OBJECT 10995116277760 +> C5 CHARACTER LARGE OBJECT 11258999068426240 > rows (ordered): 6 INSERT INTO TEST(C0) VALUES ('12345678901'); @@ -53,3 +56,15 @@ DROP TABLE TEST; CREATE TABLE TEST(C CLOB(8192P)); > exception INVALID_VALUE_2 + +EXPLAIN VALUES CAST(' ' AS CLOB(1)); +>> VALUES (CAST(' ' AS CHARACTER LARGE OBJECT(1))) + +CREATE TABLE T(C CLOB(0)); +> exception INVALID_VALUE_2 + +CREATE TABLE TEST(C1 CLOB(1K CHARACTERS), C2 CLOB(1K OCTETS)); +> ok + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/date.sql b/h2/src/test/org/h2/test/scripts/datatypes/date.sql index 54de18ac5a..9d48a4b87e 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/date.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/date.sql @@ -1,16 +1,16 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- CREATE TABLE TEST(D1 DATE); > ok -SELECT COLUMN_NAME, DATA_TYPE, TYPE_NAME, COLUMN_TYPE, NUMERIC_SCALE, DATETIME_PRECISION FROM INFORMATION_SCHEMA.COLUMNS +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; -> COLUMN_NAME DATA_TYPE TYPE_NAME COLUMN_TYPE NUMERIC_SCALE DATETIME_PRECISION -> ----------- --------- --------- ----------- ------------- ------------------ -> D1 91 DATE DATE 0 0 +> COLUMN_NAME DATA_TYPE +> ----------- --------- +> D1 DATE > rows (ordered): 1 DROP TABLE TEST; @@ -23,7 +23,7 @@ SELECT DATE '20000102'; >> 2000-01-02 SELECT DATE '-1000102'; ->> -100-01-02 +>> -0100-01-02 SELECT DATE '3001231'; >> 0300-12-31 @@ -31,3 +31,30 @@ SELECT DATE '3001231'; -- PostgreSQL returns 2020-12-31 SELECT DATE '201231'; > exception INVALID_DATETIME_CONSTANT_2 + +CALL DATE '-1000000000-01-01'; +>> -1000000000-01-01 + +CALL DATE '1000000000-12-31'; +>> 1000000000-12-31 + +CALL DATE '-1000000001-12-31'; +> exception INVALID_DATETIME_CONSTANT_2 + +CALL DATE '1000000001-01-01'; +> exception INVALID_DATETIME_CONSTANT_2 + +SELECT CAST (TIMESTAMP '1000000000-12-31 00:00:00' AS DATE); +>> 1000000000-12-31 + +SELECT CAST (DATE '1000000000-12-31' AS TIMESTAMP); +>> 1000000000-12-31 00:00:00 + +SELECT CAST (TIMESTAMP '-1000000000-01-01 00:00:00' AS DATE); +>> -1000000000-01-01 + +SELECT CAST (DATE '-1000000000-01-01' AS TIMESTAMP); +>> -1000000000-01-01 00:00:00 + +SELECT CAST (DATE '2000-01-01' AS TIME); +> exception DATA_CONVERSION_ERROR_1 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/decfloat.sql b/h2/src/test/org/h2/test/scripts/datatypes/decfloat.sql new file mode 100644 index 0000000000..f311f90115 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/decfloat.sql @@ -0,0 +1,283 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE MEMORY TABLE TEST(D1 DECFLOAT, D2 DECFLOAT(5), D3 DECFLOAT(10), X NUMBER); +> ok + +INSERT INTO TEST VALUES(1, 1, 9999999999, 1.23); +> update count: 1 + +TABLE TEST; +> D1 D2 D3 X +> -- -- ---------- ---- +> 1 1 9999999999 1.23 +> rows: 1 + +SELECT COLUMN_NAME, DATA_TYPE, NUMERIC_PRECISION, NUMERIC_PRECISION_RADIX, NUMERIC_SCALE, + DECLARED_DATA_TYPE, DECLARED_NUMERIC_PRECISION, DECLARED_NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE NUMERIC_PRECISION NUMERIC_PRECISION_RADIX NUMERIC_SCALE DECLARED_DATA_TYPE DECLARED_NUMERIC_PRECISION DECLARED_NUMERIC_SCALE +> ----------- --------- ----------------- ----------------------- ------------- ------------------ -------------------------- ---------------------- +> D1 DECFLOAT 100000 10 null DECFLOAT null null +> D2 DECFLOAT 5 10 null DECFLOAT 5 null +> D3 DECFLOAT 10 10 null DECFLOAT 10 null +> X DECFLOAT 40 10 null DECFLOAT 40 null +> rows (ordered): 4 + +SELECT D2 + D3 A, D2 - D3 S, D2 * D3 M, D2 / D3 D FROM TEST; +> A S M D +> ----- ----------- ---------- ---------------- +> 1E+10 -9999999998 9999999999 1.0000000001E-10 +> rows: 1 + +CREATE TABLE RESULT AS SELECT D2 + D3 A, D2 - D3 S, D2 * D3 M, D2 / D3 D FROM TEST; +> ok + +TABLE RESULT; +> A S M D +> ----- ----------- ---------- ---------------- +> 1E+10 -9999999998 9999999999 1.0000000001E-10 +> rows: 1 + +SELECT COLUMN_NAME, DATA_TYPE, NUMERIC_PRECISION, NUMERIC_PRECISION_RADIX, NUMERIC_SCALE, + DECLARED_DATA_TYPE, DECLARED_NUMERIC_PRECISION, DECLARED_NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'RESULT' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE NUMERIC_PRECISION NUMERIC_PRECISION_RADIX NUMERIC_SCALE DECLARED_DATA_TYPE DECLARED_NUMERIC_PRECISION DECLARED_NUMERIC_SCALE +> ----------- --------- ----------------- ----------------------- ------------- ------------------ -------------------------- ---------------------- +> A DECFLOAT 11 10 null DECFLOAT 11 null +> S DECFLOAT 11 10 null DECFLOAT 11 null +> M DECFLOAT 15 10 null DECFLOAT 15 null +> D DECFLOAT 11 10 null DECFLOAT 11 null +> rows (ordered): 4 + +DROP TABLE TEST, RESULT; +> ok + +EXPLAIN VALUES (CAST(-9223372036854775808 AS DECFLOAT(19)), CAST(9223372036854775807 AS DECFLOAT(19)), 1.0, -9223372036854775809, + 9223372036854775808); +>> VALUES (CAST(-9223372036854775808 AS DECFLOAT), CAST(9223372036854775807 AS DECFLOAT), 1.0, -9223372036854775809, 9223372036854775808) + +CREATE TABLE T(C DECFLOAT(0)); +> exception INVALID_VALUE_2 + +SELECT CAST(11 AS DECFLOAT(1)); +>> 1E+1 + +SELECT 1E1 IS OF(DECFLOAT); +>> TRUE + +SELECT (CAST(1 AS REAL) + CAST(1 AS SMALLINT)) IS OF(REAL); +>> TRUE + +SELECT (CAST(1 AS REAL) + CAST(1 AS BIGINT)) IS OF(DECFLOAT); +>> TRUE + +SELECT (CAST(1 AS REAL) + CAST(1 AS NUMERIC)) IS OF(DECFLOAT); +>> TRUE + +SELECT MOD(CAST(5 AS DECFLOAT), CAST(2 AS DECFLOAT)); +>> 1 + +EXPLAIN SELECT 1.1E0, 1E1; +>> SELECT CAST(1.1 AS DECFLOAT), CAST(1E+1 AS DECFLOAT) + +CREATE MEMORY TABLE TEST(D DECFLOAT(8)) AS VALUES '-Infinity', '-1', '0', '1', '1.5', 'Infinity', 'NaN'; +> ok + +@reconnect + +SELECT D, -D, SIGN(D) FROM TEST ORDER BY D; +> D - D SIGN(D) +> --------- --------- ------- +> -Infinity Infinity -1 +> -1 1 -1 +> 0 0 0 +> 1 -1 1 +> 1.5 -1.5 1 +> Infinity -Infinity 1 +> NaN NaN 0 +> rows (ordered): 7 + +SELECT A.D, B.D, A.D + B.D, A.D - B.D, A.D * B.D FROM TEST A JOIN TEST B ORDER BY A.D, B.D; +> D D A.D + B.D A.D - B.D A.D * B.D +> --------- --------- --------- --------- --------- +> -Infinity -Infinity -Infinity NaN Infinity +> -Infinity -1 -Infinity -Infinity Infinity +> -Infinity 0 -Infinity -Infinity NaN +> -Infinity 1 -Infinity -Infinity -Infinity +> -Infinity 1.5 -Infinity -Infinity -Infinity +> -Infinity Infinity NaN -Infinity -Infinity +> -Infinity NaN NaN NaN NaN +> -1 -Infinity -Infinity Infinity Infinity +> -1 -1 -2 0 1 +> -1 0 -1 -1 0 +> -1 1 0 -2 -1 +> -1 1.5 0.5 -2.5 -1.5 +> -1 Infinity Infinity -Infinity -Infinity +> -1 NaN NaN NaN NaN +> 0 -Infinity -Infinity Infinity NaN +> 0 -1 -1 1 0 +> 0 0 0 0 0 +> 0 1 1 -1 0 +> 0 1.5 1.5 -1.5 0 +> 0 Infinity Infinity -Infinity NaN +> 0 NaN NaN NaN NaN +> 1 -Infinity -Infinity Infinity -Infinity +> 1 -1 0 2 -1 +> 1 0 1 1 0 +> 1 1 2 0 1 +> 1 1.5 2.5 -0.5 1.5 +> 1 Infinity Infinity -Infinity Infinity +> 1 NaN NaN NaN NaN +> 1.5 -Infinity -Infinity Infinity -Infinity +> 1.5 -1 0.5 2.5 -1.5 +> 1.5 0 1.5 1.5 0 +> 1.5 1 2.5 0.5 1.5 +> 1.5 1.5 3 0 2.25 +> 1.5 Infinity Infinity -Infinity Infinity +> 1.5 NaN NaN NaN NaN +> Infinity -Infinity NaN Infinity -Infinity +> Infinity -1 Infinity Infinity -Infinity +> Infinity 0 Infinity Infinity NaN +> Infinity 1 Infinity Infinity Infinity +> Infinity 1.5 Infinity Infinity Infinity +> Infinity Infinity Infinity NaN Infinity +> Infinity NaN NaN NaN NaN +> NaN -Infinity NaN NaN NaN +> NaN -1 NaN NaN NaN +> NaN 0 NaN NaN NaN +> NaN 1 NaN NaN NaN +> NaN 1.5 NaN NaN NaN +> NaN Infinity NaN NaN NaN +> NaN NaN NaN NaN NaN +> rows (ordered): 49 + +SELECT A.D, B.D, A.D / B.D, MOD(A.D, B.D) FROM TEST A JOIN TEST B WHERE B.D <> 0 ORDER BY A.D, B.D; +> D D A.D / B.D MOD(A.D, B.D) +> --------- --------- ------------ ------------- +> -Infinity -Infinity NaN NaN +> -Infinity -1 Infinity NaN +> -Infinity 1 -Infinity NaN +> -Infinity 1.5 -Infinity NaN +> -Infinity Infinity NaN NaN +> -Infinity NaN NaN NaN +> -1 -Infinity 0 -1 +> -1 -1 1 0 +> -1 1 -1 0 +> -1 1.5 -0.666666667 -1 +> -1 Infinity 0 -1 +> -1 NaN NaN NaN +> 0 -Infinity 0 0 +> 0 -1 0 0 +> 0 1 0 0 +> 0 1.5 0 0 +> 0 Infinity 0 0 +> 0 NaN NaN NaN +> 1 -Infinity 0 1 +> 1 -1 -1 0 +> 1 1 1 0 +> 1 1.5 0.666666667 1 +> 1 Infinity 0 1 +> 1 NaN NaN NaN +> 1.5 -Infinity 0 1.5 +> 1.5 -1 -1.5 0.5 +> 1.5 1 1.5 0.5 +> 1.5 1.5 1 0 +> 1.5 Infinity 0 1.5 +> 1.5 NaN NaN NaN +> Infinity -Infinity NaN NaN +> Infinity -1 -Infinity NaN +> Infinity 1 Infinity NaN +> Infinity 1.5 Infinity NaN +> Infinity Infinity NaN NaN +> Infinity NaN NaN NaN +> NaN -Infinity NaN NaN +> NaN -1 NaN NaN +> NaN 1 NaN NaN +> NaN 1.5 NaN NaN +> NaN Infinity NaN NaN +> NaN NaN NaN NaN +> rows (ordered): 42 + +SELECT A.D, B.D, A.D > B.D, A.D = B.D, A.D < B.D FROM TEST A JOIN TEST B ORDER BY A.D, B.D; +> D D A.D > B.D A.D = B.D A.D < B.D +> --------- --------- --------- --------- --------- +> -Infinity -Infinity FALSE TRUE FALSE +> -Infinity -1 FALSE FALSE TRUE +> -Infinity 0 FALSE FALSE TRUE +> -Infinity 1 FALSE FALSE TRUE +> -Infinity 1.5 FALSE FALSE TRUE +> -Infinity Infinity FALSE FALSE TRUE +> -Infinity NaN FALSE FALSE TRUE +> -1 -Infinity TRUE FALSE FALSE +> -1 -1 FALSE TRUE FALSE +> -1 0 FALSE FALSE TRUE +> -1 1 FALSE FALSE TRUE +> -1 1.5 FALSE FALSE TRUE +> -1 Infinity FALSE FALSE TRUE +> -1 NaN FALSE FALSE TRUE +> 0 -Infinity TRUE FALSE FALSE +> 0 -1 TRUE FALSE FALSE +> 0 0 FALSE TRUE FALSE +> 0 1 FALSE FALSE TRUE +> 0 1.5 FALSE FALSE TRUE +> 0 Infinity FALSE FALSE TRUE +> 0 NaN FALSE FALSE TRUE +> 1 -Infinity TRUE FALSE FALSE +> 1 -1 TRUE FALSE FALSE +> 1 0 TRUE FALSE FALSE +> 1 1 FALSE TRUE FALSE +> 1 1.5 FALSE FALSE TRUE +> 1 Infinity FALSE FALSE TRUE +> 1 NaN FALSE FALSE TRUE +> 1.5 -Infinity TRUE FALSE FALSE +> 1.5 -1 TRUE FALSE FALSE +> 1.5 0 TRUE FALSE FALSE +> 1.5 1 TRUE FALSE FALSE +> 1.5 1.5 FALSE TRUE FALSE +> 1.5 Infinity FALSE FALSE TRUE +> 1.5 NaN FALSE FALSE TRUE +> Infinity -Infinity TRUE FALSE FALSE +> Infinity -1 TRUE FALSE FALSE +> Infinity 0 TRUE FALSE FALSE +> Infinity 1 TRUE FALSE FALSE +> Infinity 1.5 TRUE FALSE FALSE +> Infinity Infinity FALSE TRUE FALSE +> Infinity NaN FALSE FALSE TRUE +> NaN -Infinity TRUE FALSE FALSE +> NaN -1 TRUE FALSE FALSE +> NaN 0 TRUE FALSE FALSE +> NaN 1 TRUE FALSE FALSE +> NaN 1.5 TRUE FALSE FALSE +> NaN Infinity TRUE FALSE FALSE +> NaN NaN FALSE TRUE FALSE +> rows (ordered): 49 + +SELECT D, CAST(D AS REAL) D1, CAST(D AS DOUBLE PRECISION) D2 FROM TEST ORDER BY D; +> D D1 D2 +> --------- --------- --------- +> -Infinity -Infinity -Infinity +> -1 -1.0 -1.0 +> 0 0.0 0.0 +> 1 1.0 1.0 +> 1.5 1.5 1.5 +> Infinity Infinity Infinity +> NaN NaN NaN +> rows (ordered): 7 + +EXPLAIN SELECT CAST('Infinity' AS DECFLOAT), CAST('-Infinity' AS DECFLOAT), CAST('NaN' AS DECFLOAT), CAST(0 AS DECFLOAT); +>> SELECT CAST('Infinity' AS DECFLOAT), CAST('-Infinity' AS DECFLOAT), CAST('NaN' AS DECFLOAT), CAST(0 AS DECFLOAT) + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ----------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "D" DECFLOAT(8) ); +> -- 7 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES ('-Infinity'), (-1), (0), (1), (1.5), ('Infinity'), ('NaN'); +> rows (ordered): 4 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/decimal.sql b/h2/src/test/org/h2/test/scripts/datatypes/decimal.sql deleted file mode 100644 index 7fc6cf9c5f..0000000000 --- a/h2/src/test/org/h2/test/scripts/datatypes/decimal.sql +++ /dev/null @@ -1,107 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- - -CREATE TABLE TEST(I NUMERIC(-1)); -> exception INVALID_VALUE_2 - -CREATE TABLE TEST(I NUMERIC(-1, -1)); -> exception INVALID_VALUE_2 - -CREATE TABLE TEST (N NUMERIC) AS VALUES (0), (0.0), (NULL); -> ok - -SELECT * FROM TEST; -> N -> ---- -> 0 -> 0.0 -> null -> rows: 3 - -SELECT DISTINCT * FROM TEST; -> N -> ---- -> 0 -> null -> rows: 2 - -DROP TABLE TEST; -> ok - -CREATE TABLE TEST (N NUMERIC) AS VALUES (0), (0.0), (2), (NULL); -> ok - -CREATE INDEX TEST_IDX ON TEST(N); -> ok - -SELECT N FROM TEST WHERE N IN (0.000, 0.00, 1.0); -> N -> --- -> 0 -> 0.0 -> rows: 2 - -SELECT N FROM TEST WHERE N IN (SELECT DISTINCT ON(B) A FROM VALUES (0.000, 1), (0.00, 2), (1.0, 3) T(A, B)); -> N -> --- -> 0 -> 0.0 -> rows: 2 - -DROP INDEX TEST_IDX; -> ok - -CREATE UNIQUE INDEX TEST_IDX ON TEST(N); -> exception DUPLICATE_KEY_1 - -DROP TABLE TEST; -> ok - -CREATE MEMORY TABLE TEST(N NUMERIC) AS VALUES (0), (0.0), (2), (NULL); -> ok - -CREATE HASH INDEX TEST_IDX ON TEST(N); -> ok - -SELECT N FROM TEST WHERE N = 0; -> N -> --- -> 0 -> 0.0 -> rows: 2 - -DROP INDEX TEST_IDX; -> ok - -CREATE UNIQUE HASH INDEX TEST_IDX ON TEST(N); -> exception DUPLICATE_KEY_1 - -DELETE FROM TEST WHERE N = 0 LIMIT 1; -> update count: 1 - -CREATE UNIQUE HASH INDEX TEST_IDX ON TEST(N); -> ok - -SELECT 1 FROM TEST WHERE N = 0; ->> 1 - -INSERT INTO TEST VALUES (NULL); -> update count: 1 - -SELECT N FROM TEST WHERE N IS NULL; -> N -> ---- -> null -> null -> rows: 2 - -DELETE FROM TEST WHERE N IS NULL LIMIT 1; -> update count: 1 - -SELECT N FROM TEST WHERE N IS NULL; ->> null - -DROP TABLE TEST; -> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/decimal_decimal.sql b/h2/src/test/org/h2/test/scripts/datatypes/decimal_decimal.sql deleted file mode 100644 index 59d3d3d26d..0000000000 --- a/h2/src/test/org/h2/test/scripts/datatypes/decimal_decimal.sql +++ /dev/null @@ -1,47 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- --- h2.bigDecimalIsDecimal=true --- - -create memory table orders ( orderid varchar(10), name varchar(20), customer_id varchar(10), completed numeric(1) not null, verified numeric(1) ); -> ok - -select * from information_schema.columns where table_name = 'ORDERS'; -> TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION DOMAIN_CATALOG DOMAIN_SCHEMA DOMAIN_NAME COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_PRECISION_RADIX NUMERIC_SCALE DATETIME_PRECISION INTERVAL_TYPE INTERVAL_PRECISION CHARACTER_SET_NAME COLLATION_NAME TYPE_NAME NULLABLE IS_COMPUTED SELECTIVITY CHECK_CONSTRAINT SEQUENCE_NAME REMARKS SOURCE_DATA_TYPE COLUMN_TYPE COLUMN_ON_UPDATE IS_VISIBLE -> ------------- ------------ ---------- ----------- ---------------- -------------- ------------- ----------- -------------- ----------- --------- ------------------------ ---------------------- ----------------- ----------------------- ------------- ------------------ ------------- ------------------ ------------------ -------------- --------- -------- ----------- ----------- ---------------- ------------- ------- ---------------- ------------------- ---------------- ---------- -> SCRIPT PUBLIC ORDERS COMPLETED 4 null null null null NO 3 1 1 1 10 0 null null null Unicode OFF DECIMAL 0 FALSE 50 null null NUMERIC(1) NOT NULL null TRUE -> SCRIPT PUBLIC ORDERS CUSTOMER_ID 3 null null null null YES 12 10 10 10 10 0 null null null Unicode OFF VARCHAR 1 FALSE 50 null null VARCHAR(10) null TRUE -> SCRIPT PUBLIC ORDERS NAME 2 null null null null YES 12 20 20 20 10 0 null null null Unicode OFF VARCHAR 1 FALSE 50 null null VARCHAR(20) null TRUE -> SCRIPT PUBLIC ORDERS ORDERID 1 null null null null YES 12 10 10 10 10 0 null null null Unicode OFF VARCHAR 1 FALSE 50 null null VARCHAR(10) null TRUE -> SCRIPT PUBLIC ORDERS VERIFIED 5 null null null null YES 3 1 1 1 10 0 null null null Unicode OFF DECIMAL 1 FALSE 50 null null NUMERIC(1) null TRUE -> rows: 5 - -drop table orders; -> ok - -CREATE TABLE TEST(ID INT, X1 BIT, XT TINYINT, X_SM SMALLINT, XB BIGINT, XD DECIMAL(10,2), XD2 DOUBLE PRECISION, XR REAL); -> ok - -INSERT INTO TEST VALUES(?, ?, ?, ?, ?, ?, ?, ?); -{ -0,FALSE,0,0,0,0.0,0.0,0.0 -1,TRUE,1,1,1,1.0,1.0,1.0 -4,TRUE,4,4,4,4.0,4.0,4.0 --1,FALSE,-1,-1,-1,-1.0,-1.0,-1.0 -NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL -}; -> update count: 5 - -SELECT ID, CAST(XT AS NUMBER(10,1)), -CAST(X_SM AS NUMBER(10,1)), CAST(XB AS NUMBER(10,1)), CAST(XD AS NUMBER(10,1)), -CAST(XD2 AS NUMBER(10,1)), CAST(XR AS NUMBER(10,1)) FROM TEST; -> ID CAST(XT AS DECIMAL(10, 1)) CAST(X_SM AS DECIMAL(10, 1)) CAST(XB AS DECIMAL(10, 1)) CAST(XD AS DECIMAL(10, 1)) CAST(XD2 AS DECIMAL(10, 1)) CAST(XR AS DECIMAL(10, 1)) -> ---- -------------------------- ---------------------------- -------------------------- -------------------------- --------------------------- -------------------------- -> -1 -1.0 -1.0 -1.0 -1.0 -1.0 -1.0 -> 0 0.0 0.0 0.0 0.0 0.0 0.0 -> 1 1.0 1.0 1.0 1.0 1.0 1.0 -> 4 4.0 4.0 4.0 4.0 4.0 4.0 -> null null null null null null null -> rows: 5 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/decimal_numeric.sql b/h2/src/test/org/h2/test/scripts/datatypes/decimal_numeric.sql deleted file mode 100644 index 92a9c0d225..0000000000 --- a/h2/src/test/org/h2/test/scripts/datatypes/decimal_numeric.sql +++ /dev/null @@ -1,47 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- --- h2.bigDecimalIsDecimal=false --- - -create memory table orders ( orderid varchar(10), name varchar(20), customer_id varchar(10), completed numeric(1) not null, verified numeric(1) ); -> ok - -select * from information_schema.columns where table_name = 'ORDERS'; -> TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION DOMAIN_CATALOG DOMAIN_SCHEMA DOMAIN_NAME COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_PRECISION_RADIX NUMERIC_SCALE DATETIME_PRECISION INTERVAL_TYPE INTERVAL_PRECISION CHARACTER_SET_NAME COLLATION_NAME TYPE_NAME NULLABLE IS_COMPUTED SELECTIVITY CHECK_CONSTRAINT SEQUENCE_NAME REMARKS SOURCE_DATA_TYPE COLUMN_TYPE COLUMN_ON_UPDATE IS_VISIBLE -> ------------- ------------ ---------- ----------- ---------------- -------------- ------------- ----------- -------------- ----------- --------- ------------------------ ---------------------- ----------------- ----------------------- ------------- ------------------ ------------- ------------------ ------------------ -------------- --------- -------- ----------- ----------- ---------------- ------------- ------- ---------------- ------------------- ---------------- ---------- -> SCRIPT PUBLIC ORDERS COMPLETED 4 null null null null NO 2 1 1 1 10 0 null null null Unicode OFF NUMERIC 0 FALSE 50 null null NUMERIC(1) NOT NULL null TRUE -> SCRIPT PUBLIC ORDERS CUSTOMER_ID 3 null null null null YES 12 10 10 10 10 0 null null null Unicode OFF VARCHAR 1 FALSE 50 null null VARCHAR(10) null TRUE -> SCRIPT PUBLIC ORDERS NAME 2 null null null null YES 12 20 20 20 10 0 null null null Unicode OFF VARCHAR 1 FALSE 50 null null VARCHAR(20) null TRUE -> SCRIPT PUBLIC ORDERS ORDERID 1 null null null null YES 12 10 10 10 10 0 null null null Unicode OFF VARCHAR 1 FALSE 50 null null VARCHAR(10) null TRUE -> SCRIPT PUBLIC ORDERS VERIFIED 5 null null null null YES 2 1 1 1 10 0 null null null Unicode OFF NUMERIC 1 FALSE 50 null null NUMERIC(1) null TRUE -> rows: 5 - -drop table orders; -> ok - -CREATE TABLE TEST(ID INT, X1 BIT, XT TINYINT, X_SM SMALLINT, XB BIGINT, XD DECIMAL(10,2), XD2 DOUBLE PRECISION, XR REAL); -> ok - -INSERT INTO TEST VALUES(?, ?, ?, ?, ?, ?, ?, ?); -{ -0,FALSE,0,0,0,0.0,0.0,0.0 -1,TRUE,1,1,1,1.0,1.0,1.0 -4,TRUE,4,4,4,4.0,4.0,4.0 --1,FALSE,-1,-1,-1,-1.0,-1.0,-1.0 -NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL -}; -> update count: 5 - -SELECT ID, CAST(XT AS NUMBER(10,1)), -CAST(X_SM AS NUMBER(10,1)), CAST(XB AS NUMBER(10,1)), CAST(XD AS NUMBER(10,1)), -CAST(XD2 AS NUMBER(10,1)), CAST(XR AS NUMBER(10,1)) FROM TEST; -> ID CAST(XT AS NUMERIC(10, 1)) CAST(X_SM AS NUMERIC(10, 1)) CAST(XB AS NUMERIC(10, 1)) CAST(XD AS NUMERIC(10, 1)) CAST(XD2 AS NUMERIC(10, 1)) CAST(XR AS NUMERIC(10, 1)) -> ---- -------------------------- ---------------------------- -------------------------- -------------------------- --------------------------- -------------------------- -> -1 -1.0 -1.0 -1.0 -1.0 -1.0 -1.0 -> 0 0.0 0.0 0.0 0.0 0.0 0.0 -> 1 1.0 1.0 1.0 1.0 1.0 1.0 -> 4 4.0 4.0 4.0 4.0 4.0 4.0 -> null null null null null null null -> rows: 5 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/double.sql b/h2/src/test/org/h2/test/scripts/datatypes/double.sql deleted file mode 100644 index f4a2f9a478..0000000000 --- a/h2/src/test/org/h2/test/scripts/datatypes/double.sql +++ /dev/null @@ -1,32 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- - -CREATE MEMORY TABLE TEST(D1 DOUBLE, D2 DOUBLE PRECISION, D3 FLOAT, D4 FLOAT(25), D5 FLOAT(53)); -> ok - -ALTER TABLE TEST ADD COLUMN D6 FLOAT(54); -> exception INVALID_VALUE_SCALE_PRECISION - -SELECT COLUMN_NAME, DATA_TYPE, TYPE_NAME, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS - WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; -> COLUMN_NAME DATA_TYPE TYPE_NAME COLUMN_TYPE -> ----------- --------- --------- ---------------- -> D1 8 DOUBLE DOUBLE -> D2 8 DOUBLE DOUBLE PRECISION -> D3 8 DOUBLE FLOAT -> D4 8 DOUBLE FLOAT(25) -> D5 8 DOUBLE FLOAT(53) -> rows (ordered): 5 - -SCRIPT NODATA NOPASSWORDS NOSETTINGS TABLE TEST; -> SCRIPT -> ---------------------------------------------------------------------------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "D1" DOUBLE, "D2" DOUBLE PRECISION, "D3" FLOAT, "D4" FLOAT(25), "D5" FLOAT(53) ); -> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> rows: 3 - -DROP TABLE TEST; -> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/double_precision.sql b/h2/src/test/org/h2/test/scripts/datatypes/double_precision.sql new file mode 100644 index 0000000000..3d86efdfb1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/double_precision.sql @@ -0,0 +1,233 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE MEMORY TABLE TEST(D1 DOUBLE, D2 DOUBLE PRECISION, D3 FLOAT, D4 FLOAT(25), D5 FLOAT(53)); +> ok + +ALTER TABLE TEST ADD COLUMN D6 FLOAT(54); +> exception INVALID_VALUE_PRECISION + +SELECT COLUMN_NAME, DATA_TYPE, NUMERIC_PRECISION, NUMERIC_PRECISION_RADIX, NUMERIC_SCALE, + DECLARED_DATA_TYPE, DECLARED_NUMERIC_PRECISION, DECLARED_NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE NUMERIC_PRECISION NUMERIC_PRECISION_RADIX NUMERIC_SCALE DECLARED_DATA_TYPE DECLARED_NUMERIC_PRECISION DECLARED_NUMERIC_SCALE +> ----------- ---------------- ----------------- ----------------------- ------------- ------------------ -------------------------- ---------------------- +> D1 DOUBLE PRECISION 53 2 null DOUBLE PRECISION null null +> D2 DOUBLE PRECISION 53 2 null DOUBLE PRECISION null null +> D3 DOUBLE PRECISION 53 2 null FLOAT null null +> D4 DOUBLE PRECISION 53 2 null FLOAT 25 null +> D5 DOUBLE PRECISION 53 2 null FLOAT 53 null +> rows (ordered): 5 + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> -------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "D1" DOUBLE PRECISION, "D2" DOUBLE PRECISION, "D3" FLOAT, "D4" FLOAT(25), "D5" FLOAT(53) ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +DROP TABLE TEST; +> ok + +EXPLAIN VALUES CAST(0 AS DOUBLE); +>> VALUES (CAST(0.0 AS DOUBLE PRECISION)) + +CREATE MEMORY TABLE TEST(D DOUBLE PRECISION) AS VALUES '-Infinity', '-1', '0', '1', '1.5', 'Infinity', 'NaN'; +> ok + +SELECT D, -D, SIGN(D) FROM TEST ORDER BY D; +> D - D SIGN(D) +> --------- --------- ------- +> -Infinity Infinity -1 +> -1.0 1.0 -1 +> 0.0 0.0 0 +> 1.0 -1.0 1 +> 1.5 -1.5 1 +> Infinity -Infinity 1 +> NaN NaN 0 +> rows (ordered): 7 + +SELECT A.D, B.D, A.D + B.D, A.D - B.D, A.D * B.D FROM TEST A JOIN TEST B ORDER BY A.D, B.D; +> D D A.D + B.D A.D - B.D A.D * B.D +> --------- --------- --------- --------- --------- +> -Infinity -Infinity -Infinity NaN Infinity +> -Infinity -1.0 -Infinity -Infinity Infinity +> -Infinity 0.0 -Infinity -Infinity NaN +> -Infinity 1.0 -Infinity -Infinity -Infinity +> -Infinity 1.5 -Infinity -Infinity -Infinity +> -Infinity Infinity NaN -Infinity -Infinity +> -Infinity NaN NaN NaN NaN +> -1.0 -Infinity -Infinity Infinity Infinity +> -1.0 -1.0 -2.0 0.0 1.0 +> -1.0 0.0 -1.0 -1.0 0.0 +> -1.0 1.0 0.0 -2.0 -1.0 +> -1.0 1.5 0.5 -2.5 -1.5 +> -1.0 Infinity Infinity -Infinity -Infinity +> -1.0 NaN NaN NaN NaN +> 0.0 -Infinity -Infinity Infinity NaN +> 0.0 -1.0 -1.0 1.0 0.0 +> 0.0 0.0 0.0 0.0 0.0 +> 0.0 1.0 1.0 -1.0 0.0 +> 0.0 1.5 1.5 -1.5 0.0 +> 0.0 Infinity Infinity -Infinity NaN +> 0.0 NaN NaN NaN NaN +> 1.0 -Infinity -Infinity Infinity -Infinity +> 1.0 -1.0 0.0 2.0 -1.0 +> 1.0 0.0 1.0 1.0 0.0 +> 1.0 1.0 2.0 0.0 1.0 +> 1.0 1.5 2.5 -0.5 1.5 +> 1.0 Infinity Infinity -Infinity Infinity +> 1.0 NaN NaN NaN NaN +> 1.5 -Infinity -Infinity Infinity -Infinity +> 1.5 -1.0 0.5 2.5 -1.5 +> 1.5 0.0 1.5 1.5 0.0 +> 1.5 1.0 2.5 0.5 1.5 +> 1.5 1.5 3.0 0.0 2.25 +> 1.5 Infinity Infinity -Infinity Infinity +> 1.5 NaN NaN NaN NaN +> Infinity -Infinity NaN Infinity -Infinity +> Infinity -1.0 Infinity Infinity -Infinity +> Infinity 0.0 Infinity Infinity NaN +> Infinity 1.0 Infinity Infinity Infinity +> Infinity 1.5 Infinity Infinity Infinity +> Infinity Infinity Infinity NaN Infinity +> Infinity NaN NaN NaN NaN +> NaN -Infinity NaN NaN NaN +> NaN -1.0 NaN NaN NaN +> NaN 0.0 NaN NaN NaN +> NaN 1.0 NaN NaN NaN +> NaN 1.5 NaN NaN NaN +> NaN Infinity NaN NaN NaN +> NaN NaN NaN NaN NaN +> rows (ordered): 49 + +SELECT A.D, B.D, A.D / B.D, MOD(A.D, B.D) FROM TEST A JOIN TEST B WHERE B.D <> 0 ORDER BY A.D, B.D; +> D D A.D / B.D MOD(A.D, B.D) +> --------- --------- ------------------- ------------- +> -Infinity -Infinity NaN NaN +> -Infinity -1.0 Infinity NaN +> -Infinity 1.0 -Infinity NaN +> -Infinity 1.5 -Infinity NaN +> -Infinity Infinity NaN NaN +> -Infinity NaN NaN NaN +> -1.0 -Infinity 0.0 -1.0 +> -1.0 -1.0 1.0 0.0 +> -1.0 1.0 -1.0 0.0 +> -1.0 1.5 -0.6666666666666666 -1.0 +> -1.0 Infinity 0.0 -1.0 +> -1.0 NaN NaN NaN +> 0.0 -Infinity 0.0 0.0 +> 0.0 -1.0 0.0 0.0 +> 0.0 1.0 0.0 0.0 +> 0.0 1.5 0.0 0.0 +> 0.0 Infinity 0.0 0.0 +> 0.0 NaN NaN NaN +> 1.0 -Infinity 0.0 1.0 +> 1.0 -1.0 -1.0 0.0 +> 1.0 1.0 1.0 0.0 +> 1.0 1.5 0.6666666666666666 1.0 +> 1.0 Infinity 0.0 1.0 +> 1.0 NaN NaN NaN +> 1.5 -Infinity 0.0 1.5 +> 1.5 -1.0 -1.5 0.5 +> 1.5 1.0 1.5 0.5 +> 1.5 1.5 1.0 0.0 +> 1.5 Infinity 0.0 1.5 +> 1.5 NaN NaN NaN +> Infinity -Infinity NaN NaN +> Infinity -1.0 -Infinity NaN +> Infinity 1.0 Infinity NaN +> Infinity 1.5 Infinity NaN +> Infinity Infinity NaN NaN +> Infinity NaN NaN NaN +> NaN -Infinity NaN NaN +> NaN -1.0 NaN NaN +> NaN 1.0 NaN NaN +> NaN 1.5 NaN NaN +> NaN Infinity NaN NaN +> NaN NaN NaN NaN +> rows (ordered): 42 + +SELECT A.D, B.D, A.D > B.D, A.D = B.D, A.D < B.D FROM TEST A JOIN TEST B ORDER BY A.D, B.D; +> D D A.D > B.D A.D = B.D A.D < B.D +> --------- --------- --------- --------- --------- +> -Infinity -Infinity FALSE TRUE FALSE +> -Infinity -1.0 FALSE FALSE TRUE +> -Infinity 0.0 FALSE FALSE TRUE +> -Infinity 1.0 FALSE FALSE TRUE +> -Infinity 1.5 FALSE FALSE TRUE +> -Infinity Infinity FALSE FALSE TRUE +> -Infinity NaN FALSE FALSE TRUE +> -1.0 -Infinity TRUE FALSE FALSE +> -1.0 -1.0 FALSE TRUE FALSE +> -1.0 0.0 FALSE FALSE TRUE +> -1.0 1.0 FALSE FALSE TRUE +> -1.0 1.5 FALSE FALSE TRUE +> -1.0 Infinity FALSE FALSE TRUE +> -1.0 NaN FALSE FALSE TRUE +> 0.0 -Infinity TRUE FALSE FALSE +> 0.0 -1.0 TRUE FALSE FALSE +> 0.0 0.0 FALSE TRUE FALSE +> 0.0 1.0 FALSE FALSE TRUE +> 0.0 1.5 FALSE FALSE TRUE +> 0.0 Infinity FALSE FALSE TRUE +> 0.0 NaN FALSE FALSE TRUE +> 1.0 -Infinity TRUE FALSE FALSE +> 1.0 -1.0 TRUE FALSE FALSE +> 1.0 0.0 TRUE FALSE FALSE +> 1.0 1.0 FALSE TRUE FALSE +> 1.0 1.5 FALSE FALSE TRUE +> 1.0 Infinity FALSE FALSE TRUE +> 1.0 NaN FALSE FALSE TRUE +> 1.5 -Infinity TRUE FALSE FALSE +> 1.5 -1.0 TRUE FALSE FALSE +> 1.5 0.0 TRUE FALSE FALSE +> 1.5 1.0 TRUE FALSE FALSE +> 1.5 1.5 FALSE TRUE FALSE +> 1.5 Infinity FALSE FALSE TRUE +> 1.5 NaN FALSE FALSE TRUE +> Infinity -Infinity TRUE FALSE FALSE +> Infinity -1.0 TRUE FALSE FALSE +> Infinity 0.0 TRUE FALSE FALSE +> Infinity 1.0 TRUE FALSE FALSE +> Infinity 1.5 TRUE FALSE FALSE +> Infinity Infinity FALSE TRUE FALSE +> Infinity NaN FALSE FALSE TRUE +> NaN -Infinity TRUE FALSE FALSE +> NaN -1.0 TRUE FALSE FALSE +> NaN 0.0 TRUE FALSE FALSE +> NaN 1.0 TRUE FALSE FALSE +> NaN 1.5 TRUE FALSE FALSE +> NaN Infinity TRUE FALSE FALSE +> NaN NaN FALSE TRUE FALSE +> rows (ordered): 49 + +SELECT D, CAST(D AS REAL) D1, CAST(D AS DECFLOAT) D2 FROM TEST ORDER BY D; +> D D1 D2 +> --------- --------- --------- +> -Infinity -Infinity -Infinity +> -1.0 -1.0 -1 +> 0.0 0.0 0 +> 1.0 1.0 1 +> 1.5 1.5 1.5 +> Infinity Infinity Infinity +> NaN NaN NaN +> rows (ordered): 7 + +EXPLAIN SELECT CAST('Infinity' AS DOUBLE PRECISION), CAST('-Infinity' AS DOUBLE PRECISION), CAST('NaN' AS DOUBLE PRECISION), CAST(0 AS DOUBLE PRECISION); +>> SELECT CAST('Infinity' AS DOUBLE PRECISION), CAST('-Infinity' AS DOUBLE PRECISION), CAST('NaN' AS DOUBLE PRECISION), CAST(0.0 AS DOUBLE PRECISION) + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ----------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "D" DOUBLE PRECISION ); +> -- 7 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES ('-Infinity'), (-1.0), (0.0), (1.0), (1.5), ('Infinity'), ('NaN'); +> rows (ordered): 4 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/enum.sql b/h2/src/test/org/h2/test/scripts/datatypes/enum.sql index f78c9fd140..cd10233159 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/enum.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/enum.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -60,9 +60,6 @@ select suit, count(rank) from card group by suit order by suit, count(rank); select rank from card where suit = 'diamonds'; >> 8 -select column_type from information_schema.columns where COLUMN_NAME = 'SUIT'; ->> ENUM('''none''', 'hearts', 'clubs', 'spades', 'diamonds') - alter table card alter column suit enum('hearts', 'clubs', 'spades', 'diamonds'); > ok @@ -74,17 +71,20 @@ insert into card (rank, suit) values (11, 'long_enum_value_of_128_chars_00000000 --- ENUM integer-based operations -select rank from card where suit = 1; +select rank from card where suit = 2; +> exception TYPES_ARE_NOT_COMPARABLE_2 + +select rank from card where cast(suit as integer) = 2; > RANK > ---- > 0 > 10 > rows: 2 -insert into card (rank, suit) values(5, 2); +insert into card (rank, suit) values(5, 3); > update count: 1 -select * from card where rank = 5; +select * from card where cast(rank as integer) = 5; > RANK SUIT > ---- ------ > 5 spades @@ -247,30 +247,41 @@ CREATE VIEW V1 AS SELECT E + 2 AS E FROM TEST; > ok SELECT * FROM V1; ->> 3 +>> 4 CREATE VIEW V2 AS SELECT E + E AS E FROM TEST; > ok SELECT * FROM V2; ->> 2 +>> 4 CREATE VIEW V3 AS SELECT -E AS E FROM TEST; > ok SELECT * FROM V3; ->> -1 - -SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'E' ORDER BY TABLE_NAME; -> TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION DOMAIN_CATALOG DOMAIN_SCHEMA DOMAIN_NAME COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_PRECISION_RADIX NUMERIC_SCALE DATETIME_PRECISION INTERVAL_TYPE INTERVAL_PRECISION CHARACTER_SET_NAME COLLATION_NAME TYPE_NAME NULLABLE IS_COMPUTED SELECTIVITY CHECK_CONSTRAINT SEQUENCE_NAME REMARKS SOURCE_DATA_TYPE COLUMN_TYPE COLUMN_ON_UPDATE IS_VISIBLE -> ------------- ------------ ---------- ----------- ---------------- -------------- ------------- ----------- -------------- ----------- --------- ------------------------ ---------------------- ----------------- ----------------------- ------------- ------------------ ------------- ------------------ ------------------ -------------- --------- -------- ----------- ----------- ---------------- ------------- ------- ---------------- -------------- ---------------- ---------- -> SCRIPT PUBLIC TEST E 1 null null null null YES 1111 1 1 1 10 0 null null null Unicode OFF ENUM 1 FALSE 50 null null ENUM('A', 'B') null TRUE -> SCRIPT PUBLIC V E 1 null null null null YES 1111 1 1 1 10 0 null null null Unicode OFF ENUM 1 FALSE 50 null null ENUM('A', 'B') null TRUE -> SCRIPT PUBLIC V1 E 1 null null null null YES 4 10 10 10 10 0 null null null Unicode OFF INTEGER 1 FALSE 50 null null INTEGER null TRUE -> SCRIPT PUBLIC V2 E 1 null null null null YES 4 10 10 10 10 0 null null null Unicode OFF INTEGER 1 FALSE 50 null null INTEGER null TRUE -> SCRIPT PUBLIC V3 E 1 null null null null YES 4 10 10 10 10 0 null null null Unicode OFF INTEGER 1 FALSE 50 null null INTEGER null TRUE +>> -2 + +SELECT TABLE_NAME, DATA_TYPE + FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'E' ORDER BY TABLE_NAME; +> TABLE_NAME DATA_TYPE +> ---------- --------- +> TEST ENUM +> V ENUM +> V1 INTEGER +> V2 INTEGER +> V3 INTEGER > rows (ordered): 5 +SELECT OBJECT_NAME, OBJECT_TYPE, ENUM_IDENTIFIER, VALUE_NAME, VALUE_ORDINAL FROM INFORMATION_SCHEMA.ENUM_VALUES + WHERE OBJECT_SCHEMA = 'PUBLIC'; +> OBJECT_NAME OBJECT_TYPE ENUM_IDENTIFIER VALUE_NAME VALUE_ORDINAL +> ----------- ----------- --------------- ---------- ------------- +> TEST TABLE 1 A 1 +> TEST TABLE 1 B 2 +> V TABLE 1 A 1 +> V TABLE 1 B 2 +> rows: 4 + DROP VIEW V; > ok @@ -287,13 +298,13 @@ DROP TABLE TEST; > ok SELECT CAST (2 AS ENUM('a', 'b', 'c', 'd')); ->> c +>> b CREATE TABLE TEST(E ENUM('a', 'b')); > ok EXPLAIN SELECT * FROM TEST WHERE E = 'a'; ->> SELECT "TEST"."E" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE "E" = 'a' +>> SELECT "PUBLIC"."TEST"."E" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE "E" = CAST('a' AS ENUM('a', 'b')) INSERT INTO TEST VALUES ('a'); > update count: 1 @@ -312,3 +323,66 @@ INSERT INTO TEST VALUES ('a'); DROP TABLE TEST; > ok + +EXPLAIN VALUES CAST('A' AS ENUM('A', 'B')); +>> VALUES (CAST('A' AS ENUM('A', 'B'))) + +CREATE TABLE TEST(E1 ENUM('a', 'b'), E2 ENUM('e', 'c') ARRAY, E3 ROW(E ENUM('x', 'y'))); +> ok + +SELECT COLUMN_NAME, DATA_TYPE, DTD_IDENTIFIER FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME DATA_TYPE DTD_IDENTIFIER +> ----------- --------- -------------- +> E1 ENUM 1 +> E2 ARRAY 2 +> E3 ROW 3 +> rows: 3 + +SELECT COLLECTION_TYPE_IDENTIFIER, DATA_TYPE, DTD_IDENTIFIER FROM INFORMATION_SCHEMA.ELEMENT_TYPES WHERE OBJECT_NAME = 'TEST'; +> COLLECTION_TYPE_IDENTIFIER DATA_TYPE DTD_IDENTIFIER +> -------------------------- --------- -------------- +> 2 ENUM 2_ +> rows: 1 + +SELECT ROW_IDENTIFIER, FIELD_NAME, DATA_TYPE, DTD_IDENTIFIER FROM INFORMATION_SCHEMA.FIELDS WHERE OBJECT_NAME = 'TEST'; +> ROW_IDENTIFIER FIELD_NAME DATA_TYPE DTD_IDENTIFIER +> -------------- ---------- --------- -------------- +> 3 E ENUM 3_1 +> rows: 1 + +SELECT * FROM INFORMATION_SCHEMA.ENUM_VALUES WHERE OBJECT_NAME = 'TEST'; +> OBJECT_CATALOG OBJECT_SCHEMA OBJECT_NAME OBJECT_TYPE ENUM_IDENTIFIER VALUE_NAME VALUE_ORDINAL +> -------------- ------------- ----------- ----------- --------------- ---------- ------------- +> SCRIPT PUBLIC TEST TABLE 1 a 1 +> SCRIPT PUBLIC TEST TABLE 1 b 2 +> SCRIPT PUBLIC TEST TABLE 2_ c 2 +> SCRIPT PUBLIC TEST TABLE 2_ e 1 +> SCRIPT PUBLIC TEST TABLE 3_1 x 1 +> SCRIPT PUBLIC TEST TABLE 3_1 y 2 +> rows: 6 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A ENUM('A', 'B') ARRAY, B ROW(V ENUM('C', 'D'))); +> ok + +INSERT INTO TEST VALUES (ARRAY['A', 'B'], ROW('C')); +> update count: 1 + +TABLE TEST; +> A B +> ------ ------- +> [A, B] ROW (C) +> rows: 1 + +@reconnect + +TABLE TEST; +> A B +> ------ ------- +> [A, B] ROW (C) +> rows: 1 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/geometry.sql b/h2/src/test/org/h2/test/scripts/datatypes/geometry.sql index 51b603902a..4b6675bf74 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/geometry.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/geometry.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -19,27 +19,27 @@ INSERT INTO TEST VALUES ('POINT EMPTY', 'SRID=1;POINT EMPTY', 'POINT EMPTY', 'SR 'GEOMETRYCOLLECTION EMPTY'); > update count: 1 -SELECT COLUMN_NAME, TYPE_NAME, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS +SELECT COLUMN_NAME, DATA_TYPE, GEOMETRY_TYPE, GEOMETRY_SRID FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; -> COLUMN_NAME TYPE_NAME COLUMN_TYPE -> ----------- --------- ---------------------------- -> G GEOMETRY GEOMETRY -> G_S GEOMETRY GEOMETRY(GEOMETRY, 1) -> P GEOMETRY GEOMETRY(POINT) -> P_S GEOMETRY GEOMETRY(POINT, 1) -> PZ1 GEOMETRY GEOMETRY(POINT Z) -> PZ2 GEOMETRY GEOMETRY(POINT Z) -> PZ1_S GEOMETRY GEOMETRY(POINT Z, 1) -> PZ2_S GEOMETRY GEOMETRY(POINT Z, 1) -> PM GEOMETRY GEOMETRY(POINT M) -> PZM GEOMETRY GEOMETRY(POINT ZM) -> PZM_S GEOMETRY GEOMETRY(POINT ZM, -100) -> LS GEOMETRY GEOMETRY(LINESTRING) -> PG GEOMETRY GEOMETRY(POLYGON) -> MP GEOMETRY GEOMETRY(MULTIPOINT) -> MLS GEOMETRY GEOMETRY(MULTILINESTRING) -> MPG GEOMETRY GEOMETRY(MULTIPOLYGON) -> GC GEOMETRY GEOMETRY(GEOMETRYCOLLECTION) +> COLUMN_NAME DATA_TYPE GEOMETRY_TYPE GEOMETRY_SRID +> ----------- --------- ------------------ ------------- +> G GEOMETRY null null +> G_S GEOMETRY null 1 +> P GEOMETRY POINT null +> P_S GEOMETRY POINT 1 +> PZ1 GEOMETRY POINT Z null +> PZ2 GEOMETRY POINT Z null +> PZ1_S GEOMETRY POINT Z 1 +> PZ2_S GEOMETRY POINT Z 1 +> PM GEOMETRY POINT M null +> PZM GEOMETRY POINT ZM null +> PZM_S GEOMETRY POINT ZM -100 +> LS GEOMETRY LINESTRING null +> PG GEOMETRY POLYGON null +> MP GEOMETRY MULTIPOINT null +> MLS GEOMETRY MULTILINESTRING null +> MPG GEOMETRY MULTIPOLYGON null +> GC GEOMETRY GEOMETRYCOLLECTION null > rows (ordered): 17 UPDATE TEST SET G = 'SRID=10;LINESTRING EMPTY'; @@ -49,16 +49,16 @@ UPDATE TEST SET GC = 'SRID=8;GEOMETRYCOLLECTION(POINT (1 1))'; > update count: 1 UPDATE TEST SET G_S = 'POINT (1 1)'; -> exception CHECK_CONSTRAINT_VIOLATED_1 +> exception DATA_CONVERSION_ERROR_1 UPDATE TEST SET P = 'POINT Z EMPTY'; -> exception CHECK_CONSTRAINT_VIOLATED_1 +> exception DATA_CONVERSION_ERROR_1 UPDATE TEST SET P = 'POLYGON EMPTY'; -> exception CHECK_CONSTRAINT_VIOLATED_1 +> exception DATA_CONVERSION_ERROR_1 UPDATE TEST SET PZ1 = 'POINT EMPTY'; -> exception CHECK_CONSTRAINT_VIOLATED_1 +> exception DATA_CONVERSION_ERROR_1 SELECT * FROM TEST; > G G_S P P_S PZ1 PZ2 PZ1_S PZ2_S PM PZM PZM_S LS PG MP MLS MPG GC @@ -70,7 +70,7 @@ SELECT G FROM TEST WHERE P_S = 'SRID=1;POINT EMPTY'; >> SRID=10;LINESTRING EMPTY SELECT G FROM TEST WHERE P_S = 'GEOMETRYCOLLECTION Z EMPTY'; -> exception CHECK_CONSTRAINT_VIOLATED_1 +> exception DATA_CONVERSION_ERROR_1 CREATE SPATIAL INDEX IDX ON TEST(GC); > ok @@ -79,22 +79,199 @@ SELECT P FROM TEST WHERE GC = 'SRID=8;GEOMETRYCOLLECTION (POINT (1 1))'; >> POINT EMPTY SELECT P FROM TEST WHERE GC = 'SRID=8;GEOMETRYCOLLECTION Z (POINT (1 1 1))'; -> exception CHECK_CONSTRAINT_VIOLATED_1 +> exception DATA_CONVERSION_ERROR_1 SELECT CAST('POINT EMPTY' AS GEOMETRY(POINT)); >> POINT EMPTY SELECT CAST('POINT EMPTY' AS GEOMETRY(POINT Z)); -> exception CHECK_CONSTRAINT_VIOLATED_1 +> exception DATA_CONVERSION_ERROR_1 SELECT CAST('POINT EMPTY' AS GEOMETRY(POINT, 0)); >> POINT EMPTY SELECT CAST('POINT EMPTY' AS GEOMETRY(POINT, 1)); -> exception CHECK_CONSTRAINT_VIOLATED_1 +> exception DATA_CONVERSION_ERROR_1 SELECT CAST('POINT EMPTY' AS GEOMETRY(POLYGON)); -> exception CHECK_CONSTRAINT_VIOLATED_1 +> exception DATA_CONVERSION_ERROR_1 DROP TABLE TEST; > ok + +SELECT CAST('POINT EMPTY'::GEOMETRY AS JSON); +>> null + +SELECT CAST('null' FORMAT JSON AS GEOMETRY); +>> POINT EMPTY + +SELECT CAST('POINT (1 2)'::GEOMETRY AS JSON); +>> {"type":"Point","coordinates":[1,2]} + +SELECT CAST('{"type":"Point","coordinates":[1,2]}' FORMAT JSON AS GEOMETRY); +>> POINT (1 2) + +SELECT CAST('POINT Z (1 2 3)'::GEOMETRY AS JSON); +>> {"type":"Point","coordinates":[1,2,3]} + +SELECT CAST('{"type":"Point","coordinates":[1,2,3]}' FORMAT JSON AS GEOMETRY); +>> POINT Z (1 2 3) + +SELECT CAST('POINT ZM (1 2 3 4)'::GEOMETRY AS JSON); +>> {"type":"Point","coordinates":[1,2,3,4]} + +SELECT CAST('{"type":"Point","coordinates":[1,2,3,4]}' FORMAT JSON AS GEOMETRY); +>> POINT ZM (1 2 3 4) + +SELECT CAST('POINT M (1 2 4)'::GEOMETRY AS JSON); +> exception DATA_CONVERSION_ERROR_1 + +SELECT CAST('SRID=4326;POINT (1 2)'::GEOMETRY AS JSON); +>> {"type":"Point","coordinates":[1,2]} + +SELECT CAST('{"type":"Point","coordinates":[1,2]}' FORMAT JSON AS GEOMETRY(POINT)); +>> POINT (1 2) + +SELECT CAST('{"type":"Point","coordinates":[1,2]}' FORMAT JSON AS GEOMETRY(GEOMETRY, 4326)); +>> SRID=4326;POINT (1 2) + +SELECT CAST('LINESTRING EMPTY'::GEOMETRY AS JSON); +>> {"type":"LineString","coordinates":[]} + +SELECT CAST('{"type":"LineString","coordinates":[]}' FORMAT JSON AS GEOMETRY); +>> LINESTRING EMPTY + +SELECT CAST('LINESTRING (1 2, 3 4)'::GEOMETRY AS JSON); +>> {"type":"LineString","coordinates":[[1,2],[3,4]]} + +SELECT CAST('{"type":"LineString","coordinates":[[1,2],[3,4]]}' FORMAT JSON AS GEOMETRY); +>> LINESTRING (1 2, 3 4) + +SELECT CAST('POLYGON EMPTY'::GEOMETRY AS JSON); +>> {"type":"Polygon","coordinates":[]} + +SELECT CAST('{"type":"Polygon","coordinates":[]}' FORMAT JSON AS GEOMETRY); +>> POLYGON EMPTY + +SELECT CAST('POLYGON ((-1 -2, 10 1, 2 20, -1 -2))'::GEOMETRY AS JSON); +>> {"type":"Polygon","coordinates":[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]]]} + +SELECT CAST('{"type":"Polygon","coordinates":[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]]]}' FORMAT JSON AS GEOMETRY); +>> POLYGON ((-1 -2, 10 1, 2 20, -1 -2)) + +SELECT CAST('POLYGON ((-1 -2, 10 1, 2 20, -1 -2), (0.5 0.5, 1 0.5, 1 1, 0.5 0.5), EMPTY)'::GEOMETRY AS JSON); +>> {"type":"Polygon","coordinates":[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]],[[0.5,0.5],[1,0.5],[1,1],[0.5,0.5]],[]]} + +SELECT CAST('{"type":"Polygon","coordinates":[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]],[[0.5,0.5],[1,0.5],[1,1],[0.5,0.5]],[]]}' FORMAT JSON AS GEOMETRY); +>> POLYGON ((-1 -2, 10 1, 2 20, -1 -2), (0.5 0.5, 1 0.5, 1 1, 0.5 0.5), EMPTY) + +SELECT CAST('MULTIPOINT EMPTY'::GEOMETRY AS JSON); +>> {"type":"MultiPoint","coordinates":[]} + +SELECT CAST('{"type":"MultiPoint","coordinates":[]}' FORMAT JSON AS GEOMETRY); +>> MULTIPOINT EMPTY + +SELECT CAST('MULTIPOINT ((1 2))'::GEOMETRY AS JSON); +>> {"type":"MultiPoint","coordinates":[[1,2]]} + +SELECT CAST('{"type":"MultiPoint","coordinates":[[1,2]]}' FORMAT JSON AS GEOMETRY); +>> MULTIPOINT ((1 2)) + +SELECT CAST('MULTIPOINT ((1 2), (3 4))'::GEOMETRY AS JSON); +>> {"type":"MultiPoint","coordinates":[[1,2],[3,4]]} + +SELECT CAST('{"type":"MultiPoint","coordinates":[[1,2],[3,4]]}' FORMAT JSON AS GEOMETRY); +>> MULTIPOINT ((1 2), (3 4)) + +SELECT CAST('MULTIPOINT ((1 0), EMPTY, EMPTY, (2 2))'::GEOMETRY AS JSON); +>> {"type":"MultiPoint","coordinates":[[1,0],null,null,[2,2]]} + +SELECT CAST('{"type":"MultiPoint","coordinates":[[1,0],null,null,[2,2]]}' FORMAT JSON AS GEOMETRY); +>> MULTIPOINT ((1 0), EMPTY, EMPTY, (2 2)) + +SELECT CAST('MULTILINESTRING EMPTY'::GEOMETRY AS JSON); +>> {"type":"MultiLineString","coordinates":[]} + +SELECT CAST('{"type":"MultiLineString","coordinates":[]}' FORMAT JSON AS GEOMETRY); +>> MULTILINESTRING EMPTY + +SELECT CAST('MULTILINESTRING ((1 2, 3 4, 5 7))'::GEOMETRY AS JSON); +>> {"type":"MultiLineString","coordinates":[[[1,2],[3,4],[5,7]]]} + +SELECT CAST('{"type":"MultiLineString","coordinates":[[[1,2],[3,4],[5,7]]]}' FORMAT JSON AS GEOMETRY); +>> MULTILINESTRING ((1 2, 3 4, 5 7)) + +SELECT CAST('MULTILINESTRING ((1 2, 3 4, 5 7), (-1 -1, 0 0, 2 2, 4 6.01), EMPTY)'::GEOMETRY AS JSON); +>> {"type":"MultiLineString","coordinates":[[[1,2],[3,4],[5,7]],[[-1,-1],[0,0],[2,2],[4,6.01]],[]]} + +SELECT CAST('{"type":"MultiLineString","coordinates":[[[1,2],[3,4],[5,7]],[[-1,-1],[0,0],[2,2],[4,6.01]],[]]}' FORMAT JSON AS GEOMETRY); +>> MULTILINESTRING ((1 2, 3 4, 5 7), (-1 -1, 0 0, 2 2, 4 6.01), EMPTY) + +SELECT CAST('MULTIPOLYGON EMPTY'::GEOMETRY AS JSON); +>> {"type":"MultiPolygon","coordinates":[]} + +SELECT CAST('{"type":"MultiPolygon","coordinates":[]}' FORMAT JSON AS GEOMETRY); +>> MULTIPOLYGON EMPTY + +SELECT CAST('MULTIPOLYGON (((-1 -2, 10 1, 2 20, -1 -2)))'::GEOMETRY AS JSON); +>> {"type":"MultiPolygon","coordinates":[[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]]]]} + +SELECT CAST('{"type":"MultiPolygon","coordinates":[[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]]]]}' FORMAT JSON AS GEOMETRY); +>> MULTIPOLYGON (((-1 -2, 10 1, 2 20, -1 -2))) + +SELECT CAST('MULTIPOLYGON (((-1 -2, 10 1, 2 20, -1 -2)), ((1 2, 2 2, 3 3, 1 2)))'::GEOMETRY AS JSON); +>> {"type":"MultiPolygon","coordinates":[[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]]],[[[1,2],[2,2],[3,3],[1,2]]]]} + +SELECT CAST('{"type":"MultiPolygon","coordinates":[[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]]],[[[1,2],[2,2],[3,3],[1,2]]]]}' FORMAT JSON AS GEOMETRY); +>> MULTIPOLYGON (((-1 -2, 10 1, 2 20, -1 -2)), ((1 2, 2 2, 3 3, 1 2))) + +SELECT CAST('MULTIPOLYGON (((-1 -2, 10 1, 2 20, -1 -2), (0.5 0.5, 1 0.5, 1 1, 0.5 0.5)))'::GEOMETRY AS JSON); +>> {"type":"MultiPolygon","coordinates":[[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]],[[0.5,0.5],[1,0.5],[1,1],[0.5,0.5]]]]} + +SELECT CAST('{"type":"MultiPolygon","coordinates":[[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]],[[0.5,0.5],[1,0.5],[1,1],[0.5,0.5]]]]}' FORMAT JSON AS GEOMETRY); +>> MULTIPOLYGON (((-1 -2, 10 1, 2 20, -1 -2), (0.5 0.5, 1 0.5, 1 1, 0.5 0.5))) + +SELECT CAST('GEOMETRYCOLLECTION EMPTY'::GEOMETRY AS JSON); +>> {"type":"GeometryCollection","geometries":[]} + +SELECT CAST('{"type":"GeometryCollection","geometries":[]}' FORMAT JSON AS GEOMETRY); +>> GEOMETRYCOLLECTION EMPTY + +SELECT CAST('GEOMETRYCOLLECTION (POINT (1 2))'::GEOMETRY AS JSON); +>> {"type":"GeometryCollection","geometries":[{"type":"Point","coordinates":[1,2]}]} + +SELECT CAST('{"type":"GeometryCollection","geometries":[{"type":"Point","coordinates":[1,2]}]}' FORMAT JSON AS GEOMETRY); +>> GEOMETRYCOLLECTION (POINT (1 2)) + +SELECT CAST('GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (1 3)), MULTIPOINT ((4 8)))'::GEOMETRY AS JSON); +>> {"type":"GeometryCollection","geometries":[{"type":"GeometryCollection","geometries":[{"type":"Point","coordinates":[1,3]}]},{"type":"MultiPoint","coordinates":[[4,8]]}]} + +SELECT CAST('{"type":"GeometryCollection","geometries":[{"type":"GeometryCollection","geometries":[{"type":"Point","coordinates":[1,3]}]},{"type":"MultiPoint","coordinates":[[4,8]]}]}' FORMAT JSON AS GEOMETRY); +>> GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (1 3)), MULTIPOINT ((4 8))) + +SELECT CAST('{"type":"Unknown","coordinates":[1,2]}' FORMAT JSON AS GEOMETRY); +> exception DATA_CONVERSION_ERROR_1 + +EXPLAIN VALUES GEOMETRY 'POINT EMPTY'; +>> VALUES (GEOMETRY 'POINT EMPTY') + +EXPLAIN VALUES GEOMETRY X'00000000017ff80000000000007ff8000000000000'; +>> VALUES (GEOMETRY 'POINT EMPTY') + +EXPLAIN VALUES CAST(CAST('POINT EMPTY' AS GEOMETRY) AS VARBINARY); +>> VALUES (CAST(X'00000000017ff80000000000007ff8000000000000' AS BINARY VARYING)) + +SELECT GEOMETRY X'000000000300000000'; +>> POLYGON EMPTY + +SELECT GEOMETRY X'00000000030000000100000000'; +>> POLYGON EMPTY + +SELECT CAST(GEOMETRY 'POLYGON EMPTY' AS VARBINARY); +>> X'000000000300000000' + +SELECT CAST(GEOMETRY X'00000000030000000100000000' AS VARBINARY); +>> X'000000000300000000' + +VALUES GEOMETRY 'POINT (1 2 3)'; +> exception DATA_CONVERSION_ERROR_1 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/identity.sql b/h2/src/test/org/h2/test/scripts/datatypes/identity.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/identity.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/identity.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/datatypes/int.sql b/h2/src/test/org/h2/test/scripts/datatypes/int.sql index f2fa6e45bb..266abcca4b 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/int.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/int.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -13,3 +13,6 @@ SELECT CAST(-2147483648 AS INT) / CAST(1 AS INT); SELECT CAST(-2147483648 AS INT) / CAST(-1 AS INT); > exception NUMERIC_VALUE_OUT_OF_RANGE_1 + +EXPLAIN VALUES 1; +>> VALUES (1) diff --git a/h2/src/test/org/h2/test/scripts/datatypes/interval.sql b/h2/src/test/org/h2/test/scripts/datatypes/interval.sql index 1d484b485b..89b53900e5 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/interval.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/interval.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -14,39 +14,38 @@ CREATE TABLE TEST(ID INT PRIMARY KEY, J12 INTERVAL HOUR(5) TO SECOND(9), J13 INTERVAL MINUTE(5) TO SECOND(9)); > ok -SELECT COLUMN_NAME, DATA_TYPE, TYPE_NAME, COLUMN_TYPE, NUMERIC_PRECISION, NUMERIC_SCALE, DATETIME_PRECISION, - INTERVAL_TYPE, INTERVAL_PRECISION +SELECT COLUMN_NAME, DATA_TYPE, DATETIME_PRECISION, INTERVAL_TYPE, INTERVAL_PRECISION FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; -> COLUMN_NAME DATA_TYPE TYPE_NAME COLUMN_TYPE NUMERIC_PRECISION NUMERIC_SCALE DATETIME_PRECISION INTERVAL_TYPE INTERVAL_PRECISION -> ----------- --------- --------- ------------------------------- ----------------- ------------- ------------------ ---------------------- ------------------ -> ID 4 INTEGER INT NOT NULL 10 0 null null null -> I01 1111 INTERVAL INTERVAL YEAR 2 0 null YEAR 2 -> I02 1111 INTERVAL INTERVAL MONTH 2 0 null MONTH 2 -> I03 1111 INTERVAL INTERVAL DAY 2 0 null DAY 2 -> I04 1111 INTERVAL INTERVAL HOUR 2 0 null HOUR 2 -> I05 1111 INTERVAL INTERVAL MINUTE 2 0 null MINUTE 2 -> I06 1111 INTERVAL INTERVAL SECOND 2 6 6 SECOND 2 -> I07 1111 INTERVAL INTERVAL YEAR TO MONTH 2 0 null YEAR TO MONTH 2 -> I08 1111 INTERVAL INTERVAL DAY TO HOUR 2 0 null DAY TO HOUR 2 -> I09 1111 INTERVAL INTERVAL DAY TO MINUTE 2 0 null DAY TO MINUTE 2 -> I10 1111 INTERVAL INTERVAL DAY TO SECOND 2 6 6 DAY TO SECOND 2 -> I11 1111 INTERVAL INTERVAL HOUR TO MINUTE 2 0 null HOUR TO MINUTE 2 -> I12 1111 INTERVAL INTERVAL HOUR TO SECOND 2 6 6 HOUR TO SECOND 2 -> I13 1111 INTERVAL INTERVAL MINUTE TO SECOND 2 6 6 MINUTE TO SECOND 2 -> J01 1111 INTERVAL INTERVAL YEAR(5) 5 0 null YEAR(5) 5 -> J02 1111 INTERVAL INTERVAL MONTH(5) 5 0 null MONTH(5) 5 -> J03 1111 INTERVAL INTERVAL DAY(5) 5 0 null DAY(5) 5 -> J04 1111 INTERVAL INTERVAL HOUR(5) 5 0 null HOUR(5) 5 -> J05 1111 INTERVAL INTERVAL MINUTE(5) 5 0 null MINUTE(5) 5 -> J06 1111 INTERVAL INTERVAL SECOND(5, 9) 5 9 9 SECOND(5, 9) 5 -> J07 1111 INTERVAL INTERVAL YEAR(5) TO MONTH 5 0 null YEAR(5) TO MONTH 5 -> J08 1111 INTERVAL INTERVAL DAY(5) TO HOUR 5 0 null DAY(5) TO HOUR 5 -> J09 1111 INTERVAL INTERVAL DAY(5) TO MINUTE 5 0 null DAY(5) TO MINUTE 5 -> J10 1111 INTERVAL INTERVAL DAY(5) TO SECOND(9) 5 9 9 DAY(5) TO SECOND(9) 5 -> J11 1111 INTERVAL INTERVAL HOUR(5) TO MINUTE 5 0 null HOUR(5) TO MINUTE 5 -> J12 1111 INTERVAL INTERVAL HOUR(5) TO SECOND(9) 5 9 9 HOUR(5) TO SECOND(9) 5 -> J13 1111 INTERVAL INTERVAL MINUTE(5) TO SECOND(9) 5 9 9 MINUTE(5) TO SECOND(9) 5 +> COLUMN_NAME DATA_TYPE DATETIME_PRECISION INTERVAL_TYPE INTERVAL_PRECISION +> ----------- --------- ------------------ ---------------- ------------------ +> ID INTEGER null null null +> I01 INTERVAL 0 YEAR 2 +> I02 INTERVAL 0 MONTH 2 +> I03 INTERVAL 0 DAY 2 +> I04 INTERVAL 0 HOUR 2 +> I05 INTERVAL 0 MINUTE 2 +> I06 INTERVAL 6 SECOND 2 +> I07 INTERVAL 0 YEAR TO MONTH 2 +> I08 INTERVAL 0 DAY TO HOUR 2 +> I09 INTERVAL 0 DAY TO MINUTE 2 +> I10 INTERVAL 6 DAY TO SECOND 2 +> I11 INTERVAL 0 HOUR TO MINUTE 2 +> I12 INTERVAL 6 HOUR TO SECOND 2 +> I13 INTERVAL 6 MINUTE TO SECOND 2 +> J01 INTERVAL 0 YEAR 5 +> J02 INTERVAL 0 MONTH 5 +> J03 INTERVAL 0 DAY 5 +> J04 INTERVAL 0 HOUR 5 +> J05 INTERVAL 0 MINUTE 5 +> J06 INTERVAL 9 SECOND 5 +> J07 INTERVAL 0 YEAR TO MONTH 5 +> J08 INTERVAL 0 DAY TO HOUR 5 +> J09 INTERVAL 0 DAY TO MINUTE 5 +> J10 INTERVAL 9 DAY TO SECOND 5 +> J11 INTERVAL 0 HOUR TO MINUTE 5 +> J12 INTERVAL 9 HOUR TO SECOND 5 +> J13 INTERVAL 9 MINUTE TO SECOND 5 > rows (ordered): 27 INSERT INTO TEST VALUES ( @@ -120,7 +119,7 @@ DROP TABLE TEST; -- Year-month casts -SELECT CAST(INTERVAL '-10' YEAR AS INTERVAL MONTH); +SELECT CAST(INTERVAL '-10' YEAR AS INTERVAL MONTH(3)); >> INTERVAL '-120' MONTH SELECT CAST(INTERVAL '-10' YEAR AS INTERVAL YEAR TO MONTH); @@ -135,18 +134,18 @@ SELECT CAST(INTERVAL '-20' MONTH AS INTERVAL YEAR TO MONTH); SELECT CAST(INTERVAL '-20-10' YEAR TO MONTH AS INTERVAL YEAR); >> INTERVAL '-20' YEAR -SELECT CAST(INTERVAL '-20-10' YEAR TO MONTH AS INTERVAL MONTH); +SELECT CAST(INTERVAL '-20-10' YEAR TO MONTH AS INTERVAL MONTH(3)); >> INTERVAL '-250' MONTH -- Day-time casts: DAY -SELECT CAST(INTERVAL '-10' DAY AS INTERVAL HOUR); +SELECT CAST(INTERVAL '-10' DAY AS INTERVAL HOUR(3)); >> INTERVAL '-240' HOUR -SELECT CAST(INTERVAL '-10' DAY AS INTERVAL MINUTE); +SELECT CAST(INTERVAL '-10' DAY AS INTERVAL MINUTE(5)); >> INTERVAL '-14400' MINUTE -SELECT CAST(INTERVAL '-10' DAY AS INTERVAL SECOND); +SELECT CAST(INTERVAL '-10' DAY AS INTERVAL SECOND(6)); >> INTERVAL '-864000' SECOND SELECT CAST(INTERVAL '-10' DAY AS INTERVAL DAY TO HOUR); @@ -158,13 +157,13 @@ SELECT CAST(INTERVAL '-10' DAY AS INTERVAL DAY TO MINUTE); SELECT CAST(INTERVAL '-10' DAY AS INTERVAL DAY TO SECOND); >> INTERVAL '-10 00:00:00' DAY TO SECOND -SELECT CAST(INTERVAL '-10' DAY AS INTERVAL HOUR TO MINUTE); +SELECT CAST(INTERVAL '-10' DAY AS INTERVAL HOUR(3) TO MINUTE); >> INTERVAL '-240:00' HOUR TO MINUTE -SELECT CAST(INTERVAL '-10' DAY AS INTERVAL HOUR TO SECOND); +SELECT CAST(INTERVAL '-10' DAY AS INTERVAL HOUR(3) TO SECOND); >> INTERVAL '-240:00:00' HOUR TO SECOND -SELECT CAST(INTERVAL '-10' DAY AS INTERVAL MINUTE TO SECOND); +SELECT CAST(INTERVAL '-10' DAY AS INTERVAL MINUTE(5) TO SECOND); >> INTERVAL '-14400:00' MINUTE TO SECOND -- Day-time casts: HOUR @@ -172,10 +171,10 @@ SELECT CAST(INTERVAL '-10' DAY AS INTERVAL MINUTE TO SECOND); SELECT CAST(INTERVAL '-30' HOUR AS INTERVAL DAY); >> INTERVAL '-1' DAY -SELECT CAST(INTERVAL '-30' HOUR AS INTERVAL MINUTE); +SELECT CAST(INTERVAL '-30' HOUR AS INTERVAL MINUTE(4)); >> INTERVAL '-1800' MINUTE -SELECT CAST(INTERVAL '-30' HOUR AS INTERVAL SECOND); +SELECT CAST(INTERVAL '-30' HOUR AS INTERVAL SECOND(6)); >> INTERVAL '-108000' SECOND SELECT CAST(INTERVAL '-30' HOUR AS INTERVAL DAY TO HOUR); @@ -193,7 +192,7 @@ SELECT CAST(INTERVAL '-30' HOUR AS INTERVAL HOUR TO MINUTE); SELECT CAST(INTERVAL '-30' HOUR AS INTERVAL HOUR TO SECOND); >> INTERVAL '-30:00:00' HOUR TO SECOND -SELECT CAST(INTERVAL '-30' HOUR AS INTERVAL MINUTE TO SECOND); +SELECT CAST(INTERVAL '-30' HOUR AS INTERVAL MINUTE(4) TO SECOND); >> INTERVAL '-1800:00' MINUTE TO SECOND -- Day-time casts: MINUTE @@ -204,7 +203,7 @@ SELECT CAST(INTERVAL '-1570' MINUTE AS INTERVAL DAY); SELECT CAST(INTERVAL '-1570' MINUTE AS INTERVAL HOUR); >> INTERVAL '-26' HOUR -SELECT CAST(INTERVAL '-1570' MINUTE AS INTERVAL SECOND); +SELECT CAST(INTERVAL '-1570' MINUTE AS INTERVAL SECOND(5)); >> INTERVAL '-94200' SECOND SELECT CAST(INTERVAL '-1570' MINUTE AS INTERVAL DAY TO HOUR); @@ -222,7 +221,7 @@ SELECT CAST(INTERVAL '-1570' MINUTE AS INTERVAL HOUR TO MINUTE); SELECT CAST(INTERVAL '-1570' MINUTE AS INTERVAL HOUR TO SECOND); >> INTERVAL '-26:10:00' HOUR TO SECOND -SELECT CAST(INTERVAL '-1570' MINUTE AS INTERVAL MINUTE TO SECOND); +SELECT CAST(INTERVAL '-1570' MINUTE AS INTERVAL MINUTE(4) TO SECOND); >> INTERVAL '-1570:00' MINUTE TO SECOND -- Day-time casts: SECOND @@ -233,7 +232,7 @@ SELECT CAST(INTERVAL '-93784.123456789' SECOND AS INTERVAL DAY); SELECT CAST(INTERVAL '-93784.123456789' SECOND AS INTERVAL HOUR); >> INTERVAL '-26' HOUR -SELECT CAST(INTERVAL '-93784.123456789' SECOND AS INTERVAL MINUTE); +SELECT CAST(INTERVAL '-93784.123456789' SECOND AS INTERVAL MINUTE(4)); >> INTERVAL '-1563' MINUTE SELECT CAST(INTERVAL '-93784.123456789' SECOND AS INTERVAL DAY TO HOUR); @@ -251,7 +250,7 @@ SELECT CAST(INTERVAL '-93784.123456789' SECOND AS INTERVAL HOUR TO MINUTE); SELECT CAST(INTERVAL '-93784.123456789' SECOND AS INTERVAL HOUR TO SECOND); >> INTERVAL '-26:03:04.123457' HOUR TO SECOND -SELECT CAST(INTERVAL '-93784.123456789' SECOND AS INTERVAL MINUTE TO SECOND); +SELECT CAST(INTERVAL '-93784.123456789' SECOND AS INTERVAL MINUTE(4) TO SECOND); >> INTERVAL '-1563:04.123457' MINUTE TO SECOND -- Day-time casts: DAY TO HOUR @@ -262,10 +261,10 @@ SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL DAY); SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL HOUR); >> INTERVAL '-26' HOUR -SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL MINUTE); +SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL MINUTE(4)); >> INTERVAL '-1560' MINUTE -SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL SECOND); +SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL SECOND(5)); >> INTERVAL '-93600' SECOND SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL DAY TO MINUTE); @@ -280,7 +279,7 @@ SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL HOUR TO MINUTE); SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL HOUR TO SECOND); >> INTERVAL '-26:00:00' HOUR TO SECOND -SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL MINUTE TO SECOND); +SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL MINUTE(4) TO SECOND); >> INTERVAL '-1560:00' MINUTE TO SECOND -- Day-time casts: DAY TO MINUTE @@ -291,10 +290,10 @@ SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL DAY); SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL HOUR); >> INTERVAL '-26' HOUR -SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL MINUTE); +SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL MINUTE(4)); >> INTERVAL '-1563' MINUTE -SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL SECOND); +SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL SECOND(5)); >> INTERVAL '-93780' SECOND SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL DAY TO HOUR); @@ -309,7 +308,7 @@ SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL HOUR TO MINUTE); SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL HOUR TO SECOND); >> INTERVAL '-26:03:00' HOUR TO SECOND -SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL MINUTE TO SECOND); +SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL MINUTE(4) TO SECOND); >> INTERVAL '-1563:00' MINUTE TO SECOND -- Day-time casts: DAY TO SECOND @@ -320,10 +319,10 @@ SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL DAY); SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL HOUR); >> INTERVAL '-26' HOUR -SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL MINUTE); +SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL MINUTE(4)); >> INTERVAL '-1563' MINUTE -SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL SECOND); +SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL SECOND(5)); >> INTERVAL '-93784.123457' SECOND SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL DAY TO HOUR); @@ -338,7 +337,7 @@ SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL HOUR TO MINU SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL HOUR TO SECOND); >> INTERVAL '-26:03:04.123457' HOUR TO SECOND -SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL MINUTE TO SECOND); +SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL MINUTE(4) TO SECOND); >> INTERVAL '-1563:04.123457' MINUTE TO SECOND -- Day-time casts: HOUR TO MINUTE @@ -349,10 +348,10 @@ SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL DAY); SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL HOUR); >> INTERVAL '-30' HOUR -SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL MINUTE); +SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL MINUTE(4)); >> INTERVAL '-1802' MINUTE -SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL SECOND); +SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL SECOND(6)); >> INTERVAL '-108120' SECOND SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL DAY TO HOUR); @@ -367,7 +366,7 @@ SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL DAY TO SECOND); SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL HOUR TO SECOND); >> INTERVAL '-30:02:00' HOUR TO SECOND -SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL MINUTE TO SECOND); +SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL MINUTE(4) TO SECOND); >> INTERVAL '-1802:00' MINUTE TO SECOND -- Day-time casts: HOUR TO SECOND @@ -378,10 +377,10 @@ SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL DAY); SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL HOUR); >> INTERVAL '-30' HOUR -SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL MINUTE); +SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL MINUTE(4)); >> INTERVAL '-1802' MINUTE -SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL SECOND); +SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL SECOND(6)); >> INTERVAL '-108124.123457' SECOND SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL DAY TO HOUR); @@ -396,7 +395,7 @@ SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL DAY TO SECON SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL HOUR TO MINUTE); >> INTERVAL '-30:02' HOUR TO MINUTE -SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL MINUTE TO SECOND); +SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL MINUTE(4) TO SECOND); >> INTERVAL '-1802:04.123457' MINUTE TO SECOND -- Day-time casts: MINUTE TO SECOND @@ -407,10 +406,10 @@ SELECT CAST(INTERVAL '-1803:4.123456789' MINUTE TO SECOND AS INTERVAL DAY); SELECT CAST(INTERVAL '-1803:4.123456789' MINUTE TO SECOND AS INTERVAL HOUR); >> INTERVAL '-30' HOUR -SELECT CAST(INTERVAL '-1803:4.123456789' MINUTE TO SECOND AS INTERVAL MINUTE); +SELECT CAST(INTERVAL '-1803:4.123456789' MINUTE TO SECOND AS INTERVAL MINUTE(4)); >> INTERVAL '-1803' MINUTE -SELECT CAST(INTERVAL '-1803:4.123456789' MINUTE TO SECOND AS INTERVAL SECOND); +SELECT CAST(INTERVAL '-1803:4.123456789' MINUTE TO SECOND AS INTERVAL SECOND(6)); >> INTERVAL '-108184.123457' SECOND SELECT CAST(INTERVAL '-1803:4.123456789' MINUTE TO SECOND AS INTERVAL DAY TO HOUR); @@ -587,16 +586,16 @@ SELECT INTERVAL -'0.1' SECOND; -- Arithmetic SELECT INTERVAL '1000' SECOND + INTERVAL '10' MINUTE; ->> INTERVAL '1600' SECOND +>> INTERVAL '26:40' MINUTE TO SECOND SELECT INTERVAL '1000' SECOND - INTERVAL '10' MINUTE; ->> INTERVAL '400' SECOND +>> INTERVAL '6:40' MINUTE TO SECOND SELECT INTERVAL '10' YEAR + INTERVAL '1' MONTH; ->> INTERVAL '121' MONTH +>> INTERVAL '10-1' YEAR TO MONTH SELECT INTERVAL '10' YEAR - INTERVAL '1' MONTH; ->> INTERVAL '119' MONTH +>> INTERVAL '9-11' YEAR TO MONTH SELECT INTERVAL '1000' SECOND * 2; >> INTERVAL '2000' SECOND @@ -750,7 +749,7 @@ DROP TABLE TEST; > ok CREATE TABLE TEST(I INTERVAL DAY(0)); -> exception INVALID_VALUE_SCALE_PRECISION +> exception INVALID_VALUE_PRECISION CREATE TABLE TEST(I INTERVAL DAY(18)); > ok @@ -759,7 +758,7 @@ DROP TABLE TEST; > ok CREATE TABLE TEST(I INTERVAL DAY(19)); -> exception INVALID_VALUE_SCALE_PRECISION +> exception INVALID_VALUE_PRECISION CREATE TABLE TEST(I INTERVAL HOUR TO SECOND(0)); > ok @@ -774,7 +773,7 @@ DROP TABLE TEST; > ok CREATE TABLE TEST(I INTERVAL HOUR TO SECOND(10)); -> exception INVALID_VALUE_SCALE_PRECISION +> exception INVALID_VALUE_SCALE SELECT TIMESTAMP '2018-09-10 23:30:00' - TIMESTAMP '2014-09-11 23:30:00'; >> INTERVAL '1460 00:00:00' DAY TO SECOND @@ -784,3 +783,320 @@ SELECT TIMESTAMP WITH TIME ZONE '2014-09-11 23:30:00Z' - TIMESTAMP WITH TIME ZON SELECT DATE '2018-09-10' - DATE '2014-09-11'; >> INTERVAL '1460' DAY + +SELECT INTERVAL -'1-2' YEAR TO MONTH / INTERVAL '1' MONTH; +>> -14.0000000000000000000000000000000000000000 + +SELECT INTERVAL '1 12:03:40.123456789' DAY TO SECOND / INTERVAL '1' SECOND; +>> 129820.1234567890000000000000000000000000000000000000000000000000000000 + +SELECT INTERVAL -'0.000000001' SECOND / INTERVAL '1' SECOND; +>> -0.0000000010000000000000000000000000000000000000000000000000000000 + +SELECT INTERVAL -'1-2' YEAR TO MONTH / INTERVAL '1' DAY; +> exception FEATURE_NOT_SUPPORTED_1 + +SELECT INTERVAL '1' DAY / INTERVAL '0' DAY; +> exception DIVISION_BY_ZERO_1 + +CALL CAST(INTERVAL '999999999999999998.999999999' SECOND AS INTERVAL SECOND(18)); +>> INTERVAL '999999999999999999' SECOND + +CALL CAST(INTERVAL '999999999999999999.999999999' SECOND AS INTERVAL SECOND(18)); +>> INTERVAL '999999999999999999.999999' SECOND + +CALL CAST(INTERVAL '999999999999999998 23:59:59.999999999' DAY TO SECOND AS INTERVAL DAY(18) TO SECOND); +>> INTERVAL '999999999999999999 00:00:00' DAY TO SECOND + +CALL CAST(INTERVAL '999999999999999999 23:59:59.999999999' DAY TO SECOND AS INTERVAL DAY(18) TO SECOND); +>> INTERVAL '999999999999999999 23:59:59.999999' DAY TO SECOND + +CALL CAST(INTERVAL '999999999999999998:59:59.999999999' HOUR TO SECOND AS INTERVAL HOUR(18) TO SECOND); +>> INTERVAL '999999999999999999:00:00' HOUR TO SECOND + +CALL CAST(INTERVAL '999999999999999999:59:59.999999999' HOUR TO SECOND AS INTERVAL HOUR(18) TO SECOND); +>> INTERVAL '999999999999999999:59:59.999999' HOUR TO SECOND + +CALL CAST(INTERVAL '999999999999999998:59.999999999' MINUTE TO SECOND AS INTERVAL MINUTE(18) TO SECOND); +>> INTERVAL '999999999999999999:00' MINUTE TO SECOND + +CALL CAST(INTERVAL '999999999999999999:59.999999999' MINUTE TO SECOND AS INTERVAL MINUTE(18) TO SECOND); +>> INTERVAL '999999999999999999:59.999999' MINUTE TO SECOND + +CALL CAST(INTERVAL '99' DAY AS INTERVAL DAY); +>> INTERVAL '99' DAY + +CALL CAST(INTERVAL '-99' DAY AS INTERVAL DAY); +>> INTERVAL '-99' DAY + +CALL CAST(INTERVAL '100' DAY AS INTERVAL DAY); +> exception VALUE_TOO_LONG_2 + +CALL CAST(INTERVAL '-100' DAY AS INTERVAL DAY); +> exception VALUE_TOO_LONG_2 + +SELECT (TIMESTAMP '2010-01-01 10:00:00' - TIMESTAMP '1990-05-06 00:30:00'); +>> INTERVAL '7180 09:30:00' DAY TO SECOND + +SELECT (TIMESTAMP '2010-01-01 10:00:00' - TIMESTAMP '1990-05-06 00:30:00') HOUR; +> exception VALUE_TOO_LONG_2 + +SELECT (TIMESTAMP '2010-01-01 10:00:00' - TIMESTAMP '1990-05-06 00:30:00') HOUR(6); +>> INTERVAL '172329' HOUR + +SELECT (TIMESTAMP '2010-01-01 10:00:00' - INTERVAL '1' YEAR) YEAR; +> exception SYNTAX_ERROR_2 + +SELECT (INTERVAL '10' HOUR - INTERVAL '1' HOUR) HOUR; +> exception SYNTAX_ERROR_2 + +SELECT (10 - 2) SECOND; +> exception SYNTAX_ERROR_2 + +SELECT (TIMESTAMP '2010-01-01 10:00:00' - TIMESTAMP '1990-05-06 00:30:00') HOUR TO SECOND; +> exception VALUE_TOO_LONG_2 + +SELECT (TIMESTAMP '2010-01-01 10:00:00' - TIMESTAMP '1990-05-06 00:30:00') HOUR(6) TO SECOND; +>> INTERVAL '172329:30:00' HOUR TO SECOND + +EXPLAIN SELECT (TIMESTAMP '2010-01-01 10:00:00' - TIMESTAMP '1990-05-06 00:30:00') HOUR TO SECOND; +>> SELECT (TIMESTAMP '2010-01-01 10:00:00' - TIMESTAMP '1990-05-06 00:30:00') HOUR TO SECOND + +EXPLAIN SELECT (TIMESTAMP '2010-01-01 10:00:00' - TIMESTAMP '1990-05-06 00:30:00') HOUR TO SECOND(9); +>> SELECT (TIMESTAMP '2010-01-01 10:00:00' - TIMESTAMP '1990-05-06 00:30:00') HOUR TO SECOND(9) + +CREATE TABLE TEST(S VARCHAR) AS VALUES '1'; +> ok + +SELECT S DAY FROM TEST; +>> INTERVAL '1' DAY + +EXPLAIN SELECT S DAY FROM TEST; +>> SELECT CAST("S" AS INTERVAL DAY) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +SELECT CAST(10 AS INTERVAL YEAR); +>> INTERVAL '10' YEAR + +SELECT CAST(INTERVAL '10' YEAR AS INTEGER); +>> 10 + +SELECT CAST(-10 AS INTERVAL YEAR); +>> INTERVAL '-10' YEAR + +SELECT CAST(INTERVAL '-10' YEAR AS INTEGER); +>> -10 + +SELECT CAST(10::BIGINT AS INTERVAL YEAR); +>> INTERVAL '10' YEAR + +SELECT CAST(INTERVAL '10' YEAR AS BIGINT); +>> 10 + +SELECT CAST(INTERVAL '10' YEAR AS SMALLINT); +>> 10 + +SELECT CAST(INTERVAL '10' YEAR AS TINYINT); +>> 10 + +SELECT CAST(10::DOUBLE AS INTERVAL YEAR); +>> INTERVAL '10' YEAR + +SELECT CAST(INTERVAL '10' YEAR AS REAL); +>> 10.0 + +SELECT CAST(INTERVAL '10' YEAR AS DOUBLE); +>> 10.0 + +SELECT CAST(INTERVAL '10' YEAR AS NUMERIC); +>> 10 + +SELECT CAST(INTERVAL '-10' YEAR AS NUMERIC); +>> -10 + +SELECT CAST(10.123456789123456789 AS INTERVAL YEAR); +>> INTERVAL '10' YEAR + +SELECT CAST(10 AS INTERVAL MONTH); +>> INTERVAL '10' MONTH + +SELECT CAST(INTERVAL '10' MONTH AS NUMERIC); +>> 10 + +SELECT CAST(10.123456789123456789 AS INTERVAL MONTH); +>> INTERVAL '10' MONTH + +SELECT CAST(10 AS INTERVAL DAY); +>> INTERVAL '10' DAY + +SELECT CAST(INTERVAL '10' DAY AS NUMERIC); +>> 10 + +SELECT CAST(-10 AS INTERVAL DAY); +>> INTERVAL '-10' DAY + +SELECT CAST(10.123456789123456789 AS INTERVAL DAY); +>> INTERVAL '10' DAY + +SELECT CAST(10 AS INTERVAL HOUR); +>> INTERVAL '10' HOUR + +SELECT CAST(INTERVAL '10' HOUR AS NUMERIC); +>> 10 + +SELECT CAST(10::BIGINT AS INTERVAL HOUR); +>> INTERVAL '10' HOUR + +SELECT CAST(10::DOUBLE AS INTERVAL HOUR); +>> INTERVAL '10' HOUR + +SELECT CAST(10.123456789123456789 AS INTERVAL HOUR); +>> INTERVAL '10' HOUR + +SELECT CAST(10 AS INTERVAL MINUTE); +>> INTERVAL '10' MINUTE + +SELECT CAST(INTERVAL '10' MINUTE AS NUMERIC); +>> 10 + +SELECT CAST(10.123456789123456789 AS INTERVAL MINUTE); +>> INTERVAL '10' MINUTE + +SELECT CAST(10 AS INTERVAL SECOND); +>> INTERVAL '10' SECOND + +SELECT CAST(INTERVAL '10' SECOND AS NUMERIC); +>> 10 + +SELECT CAST(10.123456789123456789 AS INTERVAL SECOND); +>> INTERVAL '10.123457' SECOND + +SELECT CAST(INTERVAL '10.123457' SECOND AS INT); +>> 10 + +SELECT CAST(INTERVAL '10.123457' SECOND AS NUMERIC(8, 6)); +>> 10.123457 + +SELECT CAST(10 AS INTERVAL YEAR TO MONTH); +>> INTERVAL '10-0' YEAR TO MONTH + +SELECT CAST(10::DOUBLE AS INTERVAL YEAR TO MONTH); +>> INTERVAL '10-0' YEAR TO MONTH + +SELECT CAST(10.123456789123456789 AS INTERVAL YEAR TO MONTH); +>> INTERVAL '10-1' YEAR TO MONTH + +SELECT CAST(INTERVAL '10-1' YEAR TO MONTH AS NUMERIC(4, 2)); +>> 10.08 + +SELECT CAST(10 AS INTERVAL DAY TO HOUR); +>> INTERVAL '10 00' DAY TO HOUR + +SELECT CAST(10::DOUBLE AS INTERVAL DAY TO HOUR); +>> INTERVAL '10 00' DAY TO HOUR + +SELECT CAST(10.123456789123456789 AS INTERVAL DAY TO HOUR); +>> INTERVAL '10 02' DAY TO HOUR + +SELECT CAST(INTERVAL '10 02' DAY TO HOUR AS NUMERIC(4, 2)); +>> 10.08 + +SELECT CAST(INTERVAL '-10 02' DAY TO HOUR AS NUMERIC(4, 2)); +>> -10.08 + +SELECT CAST(10 AS INTERVAL DAY TO MINUTE); +>> INTERVAL '10 00:00' DAY TO MINUTE + +SELECT CAST(10.123456789123456789 AS INTERVAL DAY TO MINUTE); +>> INTERVAL '10 02:57' DAY TO MINUTE + +SELECT CAST(INTERVAL '10 02:57' DAY TO MINUTE AS NUMERIC(6, 4)); +>> 10.1229 + +SELECT CAST(10 AS INTERVAL DAY TO SECOND); +>> INTERVAL '10 00:00:00' DAY TO SECOND + +SELECT CAST(10.123456789123456789 AS INTERVAL DAY TO SECOND); +>> INTERVAL '10 02:57:46.66658' DAY TO SECOND + +SELECT CAST(INTERVAL '10 02:57:46.66658' DAY TO SECOND AS NUMERIC(16, 14)); +>> 10.12345678912037 + +SELECT CAST(10 AS INTERVAL HOUR TO MINUTE); +>> INTERVAL '10:00' HOUR TO MINUTE + +SELECT CAST(10.123456789123456789 AS INTERVAL HOUR TO MINUTE); +>> INTERVAL '10:07' HOUR TO MINUTE + +SELECT CAST(INTERVAL '10:07' HOUR TO MINUTE AS NUMERIC(4, 2)); +>> 10.12 + +SELECT CAST(10 AS INTERVAL HOUR TO SECOND); +>> INTERVAL '10:00:00' HOUR TO SECOND + +SELECT CAST(10.123456789123456789 AS INTERVAL HOUR TO SECOND); +>> INTERVAL '10:07:24.444441' HOUR TO SECOND + +SELECT CAST(INTERVAL '10:07:24.444441' HOUR TO SECOND AS NUMERIC(15, 13)); +>> 10.1234567891667 + +SELECT CAST(10 AS INTERVAL MINUTE TO SECOND); +>> INTERVAL '10:00' MINUTE TO SECOND + +SELECT CAST(10.123456789123456789 AS INTERVAL MINUTE TO SECOND); +>> INTERVAL '10:07.407407' MINUTE TO SECOND + +SELECT CAST(INTERVAL '10:07.407407' MINUTE TO SECOND AS NUMERIC(13, 11)); +>> 10.12345678333 + +-- H2 uses 1970-01-01 as start datetime + +SELECT TIMESTAMP '2001-01-05 10:30:00' - TIME '11:45:30.5'; +>> INTERVAL '11326 22:44:29.5' DAY TO SECOND + +SELECT TIME '11:45:30.5' - TIMESTAMP '2001-01-05 10:30:00'; +>> INTERVAL '-11326 22:44:29.5' DAY TO SECOND + +EXPLAIN VALUES INTERVAL '1' DAY; +>> VALUES (INTERVAL '1' DAY) + +SELECT CAST(INTERVAL '1000000000000000' MINUTE AS BIGINT); +>> 1000000000000000 + +SELECT CAST(INTERVAL '999999999999999999:30' HOUR TO SECOND AS NUMERIC); +>> 1000000000000000000 + +SELECT CAST(INTERVAL '999999999999999999:30' HOUR TO SECOND AS NUMERIC(20, 1)); +>> 999999999999999999.5 + +SELECT CAST(INTERVAL '999999999999999999:30' HOUR TO MINUTE AS BIGINT); +>> 1000000000000000000 + +SELECT D1, D2, (D1 - D2) YEAR TO MONTH, (D2 - D1) YEAR TO MONTH FROM (VALUES + (DATE '1999-05-12', DATE '2020-05-11'), + (DATE '1999-05-12', DATE '2020-05-12'), + (DATE '1999-05-12', DATE '2020-05-13') +) T(D1, D2); +> D1 D2 (D1 - D2) YEAR TO MONTH (D2 - D1) YEAR TO MONTH +> ---------- ---------- ------------------------------- ------------------------------ +> 1999-05-12 2020-05-11 INTERVAL '-20-11' YEAR TO MONTH INTERVAL '20-11' YEAR TO MONTH +> 1999-05-12 2020-05-12 INTERVAL '-21-0' YEAR TO MONTH INTERVAL '21-0' YEAR TO MONTH +> 1999-05-12 2020-05-13 INTERVAL '-21-0' YEAR TO MONTH INTERVAL '21-0' YEAR TO MONTH +> rows: 3 + +SELECT T1, T2, (T1 - T2) YEAR TO MONTH, (T2 - T1) YEAR TO MONTH FROM (VALUES + (TIMESTAMP '1999-05-12 12:00:00', TIMESTAMP '2020-05-12 11:00:00'), + (TIMESTAMP '1999-05-12 12:00:00', TIMESTAMP '2020-05-12 12:00:00'), + (TIMESTAMP '1999-05-12 12:00:00', TIMESTAMP '2020-05-12 13:00:00') +) T(T1, T2); +> T1 T2 (T1 - T2) YEAR TO MONTH (T2 - T1) YEAR TO MONTH +> ------------------- ------------------- ------------------------------- ------------------------------ +> 1999-05-12 12:00:00 2020-05-12 11:00:00 INTERVAL '-20-11' YEAR TO MONTH INTERVAL '20-11' YEAR TO MONTH +> 1999-05-12 12:00:00 2020-05-12 12:00:00 INTERVAL '-21-0' YEAR TO MONTH INTERVAL '21-0' YEAR TO MONTH +> 1999-05-12 12:00:00 2020-05-12 13:00:00 INTERVAL '-21-0' YEAR TO MONTH INTERVAL '21-0' YEAR TO MONTH +> rows: 3 + +SELECT (DATE '2010-01-02' - DATE '2000-01-01') YEAR; +>> INTERVAL '10' YEAR diff --git a/h2/src/test/org/h2/test/scripts/datatypes/java_object.sql b/h2/src/test/org/h2/test/scripts/datatypes/java_object.sql new file mode 100644 index 0000000000..bbe0f8ece9 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/java_object.sql @@ -0,0 +1,53 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +EXPLAIN VALUES CAST(X'' AS JAVA_OBJECT); +>> VALUES (CAST(X'' AS JAVA_OBJECT)) + +VALUES CAST(CAST(X'00' AS JAVA_OBJECT) AS VARCHAR(2)); +> exception DATA_CONVERSION_ERROR_1 + +VALUES CAST(CAST(X'00' AS JAVA_OBJECT) AS CHAR(2)); +> exception DATA_CONVERSION_ERROR_1 + +VALUES CAST('00' AS JAVA_OBJECT); +> exception DATA_CONVERSION_ERROR_1 + +VALUES CAST(CAST('00' AS CHAR(2)) AS JAVA_OBJECT); +> exception DATA_CONVERSION_ERROR_1 + +VALUES CAST(X'0000' AS JAVA_OBJECT(1)); +> exception VALUE_TOO_LONG_2 + +VALUES CAST(CAST (X'0000' AS JAVA_OBJECT) AS JAVA_OBJECT(1)); +> exception VALUE_TOO_LONG_2 + +CREATE TABLE T(C JAVA_OBJECT(0)); +> exception INVALID_VALUE_2 + +CREATE TABLE T1(A JAVA_OBJECT(1048576)); +> ok + +CREATE TABLE T2(A JAVA_OBJECT(1048577)); +> exception INVALID_VALUE_PRECISION + +SET TRUNCATE_LARGE_LENGTH TRUE; +> ok + +CREATE TABLE T2(A JAVA_OBJECT(1048577)); +> ok + +SELECT TABLE_NAME, CHARACTER_OCTET_LENGTH FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_NAME CHARACTER_OCTET_LENGTH +> ---------- ---------------------- +> T1 1048576 +> T2 1048576 +> rows: 2 + +SET TRUNCATE_LARGE_LENGTH FALSE; +> ok + +DROP TABLE T1, T2; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/json.sql b/h2/src/test/org/h2/test/scripts/datatypes/json.sql new file mode 100644 index 0000000000..4bf8ece132 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/json.sql @@ -0,0 +1,360 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT '{"tag1":"simple string"}' FORMAT JSON; +>> {"tag1":"simple string"} + +SELECT CAST('{"tag1":"simple string"}' FORMAT JSON AS JSON); +>> {"tag1":"simple string"} + +SELECT CAST('text' AS JSON); +>> "text" + +SELECT X'31' FORMAT JSON; +>> 1 + +SELECT 0::JSON; +>> 0 + +SELECT '0' FORMAT JSON; +>> 0 + +SELECT JSON '1', JSON X'31', JSON '1' IS OF (JSON), JSON X'31' IS OF (JSON); +> JSON '1' JSON '1' TRUE TRUE +> -------- -------- ---- ---- +> 1 1 TRUE TRUE +> rows: 1 + +SELECT JSON 'tr' 'ue', JSON X'7472' '7565', JSON 'tr' 'ue' IS OF (JSON), JSON X'7472' '7565' IS OF (JSON); +> JSON 'true' JSON 'true' TRUE TRUE +> ----------- ----------- ---- ---- +> true true TRUE TRUE +> rows: 1 + +SELECT 1::JSON; +>> 1 + +SELECT 1L::JSON; +>> 1 + +SELECT 1000000000000L::JSON; +>> 1000000000000 + +SELECT CAST(1e100::FLOAT AS JSON); +>> 1.0E100 + +SELECT CAST(1e100::DOUBLE AS JSON); +>> 1.0E100 + +SELECT CAST(1e100 AS JSON); +>> 1E100 + +SELECT CAST(TRUE AS JSON); +>> true + +SELECT CAST('true' FORMAT JSON AS JSON); +>> true + +SELECT CAST(FALSE AS JSON); +>> false + +SELECT CAST('false' FORMAT JSON AS JSON); +>> false + +SELECT CAST('null' FORMAT JSON AS JSON); +>> null + +SELECT CAST('10' FORMAT JSON AS VARBINARY); +>> X'3130' + +SELECT CAST('10' FORMAT JSON AS BLOB); +>> X'3130' + +CREATE TABLE TEST (ID INT, DATA JSON); +> ok + +INSERT INTO TEST VALUES +(1, '{"tag1":"simple string", "tag2": 333, "tag3":[1, 2, 3]}' format json), +(2, '{"tag1":"another string", "tag4":{"lvl1":"lvl2"}}' format json), +(3, '["string", 5555, {"arr":"yes"}]' format json), +(4, '{"1":"val1"}' format json); +> update count: 4 + +@reconnect + +SELECT ID, DATA FROM TEST; +> ID DATA +> -- -------------------------------------------------- +> 1 {"tag1":"simple string","tag2":333,"tag3":[1,2,3]} +> 2 {"tag1":"another string","tag4":{"lvl1":"lvl2"}} +> 3 ["string",5555,{"arr":"yes"}] +> 4 {"1":"val1"} +> rows: 4 + +INSERT INTO TEST VALUES (5, '}' FORMAT JSON); +> exception DATA_CONVERSION_ERROR_1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT, S VARCHAR, B VARBINARY, J JSON) AS VALUES + (1, '{"a":1,"a":2}', STRINGTOUTF8('{"a":1,"a":2}'), '{"a":1,"a":2}' FORMAT JSON), + (2, '{"a":1,"b":2}', STRINGTOUTF8('{"a":1,"b":2}'), '{"a":1,"b":2}' FORMAT JSON), + (3, '{"a":1,"b":2', STRINGTOUTF8('{"a":1,"b":2'), null), + (4, null, null, null); +> ok + +SELECT S IS JSON, B IS JSON WITHOUT UNIQUE, J IS JSON WITHOUT UNIQUE KEYS FROM TEST ORDER BY ID; +> S IS JSON B IS JSON J IS JSON +> --------- --------- --------- +> TRUE TRUE TRUE +> TRUE TRUE TRUE +> FALSE FALSE null +> null null null +> rows (ordered): 4 + +SELECT S IS NOT JSON, B IS NOT JSON WITHOUT UNIQUE, J IS NOT JSON WITHOUT UNIQUE KEYS FROM TEST ORDER BY ID; +> S IS NOT JSON B IS NOT JSON J IS NOT JSON +> ------------- ------------- ------------- +> FALSE FALSE FALSE +> FALSE FALSE FALSE +> TRUE TRUE null +> null null null +> rows (ordered): 4 + +SELECT S IS JSON WITH UNIQUE KEYS, B IS JSON WITH UNIQUE, J IS JSON WITH UNIQUE KEYS FROM TEST ORDER BY ID; +> S IS JSON WITH UNIQUE KEYS B IS JSON WITH UNIQUE KEYS J IS JSON WITH UNIQUE KEYS +> -------------------------- -------------------------- -------------------------- +> FALSE FALSE FALSE +> TRUE TRUE TRUE +> FALSE FALSE null +> null null null +> rows (ordered): 4 + +SELECT S IS NOT JSON WITH UNIQUE KEYS, B IS NOT JSON WITH UNIQUE, J IS NOT JSON WITH UNIQUE KEYS FROM TEST ORDER BY ID; +> S IS NOT JSON WITH UNIQUE KEYS B IS NOT JSON WITH UNIQUE KEYS J IS NOT JSON WITH UNIQUE KEYS +> ------------------------------ ------------------------------ ------------------------------ +> TRUE TRUE TRUE +> FALSE FALSE FALSE +> TRUE TRUE null +> null null null +> rows (ordered): 4 + +DROP TABLE TEST; +> ok + +SELECT 1 IS JSON; +>> FALSE + +SELECT 1 IS NOT JSON; +>> TRUE + +CREATE TABLE TEST(ID INT, S VARCHAR) AS VALUES + (1, '[{"a":1}]'), (2, '{"a":[3]}'), + (3, 'null'), (4, '{"a":1,"a":2}'), + (5, 'X'), (6, NULL); +> ok + +EXPLAIN SELECT S FORMAT JSON FORMAT JSON, (S FORMAT JSON) FORMAT JSON FROM TEST; +>> SELECT "S" FORMAT JSON, "S" FORMAT JSON FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +ALTER TABLE TEST ADD J JSON; +> ok + +UPDATE TEST SET J = S FORMAT JSON WHERE S IS JSON; +> update count: 4 + +SELECT S IS JSON, S IS JSON VALUE, S IS JSON ARRAY, S IS JSON OBJECT, S IS JSON SCALAR FROM TEST ORDER BY ID; +> S IS JSON S IS JSON S IS JSON ARRAY S IS JSON OBJECT S IS JSON SCALAR +> --------- --------- --------------- ---------------- ---------------- +> TRUE TRUE TRUE FALSE FALSE +> TRUE TRUE FALSE TRUE FALSE +> TRUE TRUE FALSE FALSE TRUE +> TRUE TRUE FALSE TRUE FALSE +> FALSE FALSE FALSE FALSE FALSE +> null null null null null +> rows (ordered): 6 + +SELECT J IS JSON, J IS JSON VALUE, J IS JSON ARRAY, J IS JSON OBJECT, J IS JSON SCALAR FROM TEST ORDER BY ID; +> J IS JSON J IS JSON J IS JSON ARRAY J IS JSON OBJECT J IS JSON SCALAR +> --------- --------- --------------- ---------------- ---------------- +> TRUE TRUE TRUE FALSE FALSE +> TRUE TRUE FALSE TRUE FALSE +> TRUE TRUE FALSE FALSE TRUE +> TRUE TRUE FALSE TRUE FALSE +> null null null null null +> null null null null null +> rows (ordered): 6 + +SELECT J IS JSON WITH UNIQUE KEYS, J IS JSON VALUE WITH UNIQUE KEYS, J IS JSON ARRAY WITH UNIQUE KEYS, + J IS JSON OBJECT WITH UNIQUE KEYS, J IS JSON SCALAR WITH UNIQUE KEYS FROM TEST ORDER BY ID; +> J IS JSON WITH UNIQUE KEYS J IS JSON WITH UNIQUE KEYS J IS JSON ARRAY WITH UNIQUE KEYS J IS JSON OBJECT WITH UNIQUE KEYS J IS JSON SCALAR WITH UNIQUE KEYS +> -------------------------- -------------------------- -------------------------------- --------------------------------- --------------------------------- +> TRUE TRUE TRUE FALSE FALSE +> TRUE TRUE FALSE TRUE FALSE +> TRUE TRUE FALSE FALSE TRUE +> FALSE FALSE FALSE FALSE FALSE +> null null null null null +> null null null null null +> rows (ordered): 6 + +SELECT S IS NOT JSON, S IS NOT JSON VALUE, S IS NOT JSON ARRAY, S IS NOT JSON OBJECT, S IS NOT JSON SCALAR + FROM TEST ORDER BY ID; +> S IS NOT JSON S IS NOT JSON S IS NOT JSON ARRAY S IS NOT JSON OBJECT S IS NOT JSON SCALAR +> ------------- ------------- ------------------- -------------------- -------------------- +> FALSE FALSE FALSE TRUE TRUE +> FALSE FALSE TRUE FALSE TRUE +> FALSE FALSE TRUE TRUE FALSE +> FALSE FALSE TRUE FALSE TRUE +> TRUE TRUE TRUE TRUE TRUE +> null null null null null +> rows (ordered): 6 + +SELECT NOT S IS NOT JSON, NOT S IS NOT JSON VALUE, NOT S IS NOT JSON ARRAY, NOT S IS NOT JSON OBJECT, + NOT S IS NOT JSON SCALAR FROM TEST ORDER BY ID; +> S IS JSON S IS JSON S IS JSON ARRAY S IS JSON OBJECT S IS JSON SCALAR +> --------- --------- --------------- ---------------- ---------------- +> TRUE TRUE TRUE FALSE FALSE +> TRUE TRUE FALSE TRUE FALSE +> TRUE TRUE FALSE FALSE TRUE +> TRUE TRUE FALSE TRUE FALSE +> FALSE FALSE FALSE FALSE FALSE +> null null null null null +> rows (ordered): 6 + +DROP TABLE TEST; +> ok + +SELECT NULL FORMAT JSON, (NULL FORMAT JSON) IS NULL; +> JSON 'null' FALSE +> ----------- ----- +> null FALSE +> rows: 1 + +CREATE MEMORY TABLE TEST(J JSON) AS VALUES ('["\u00A7''",{}]' FORMAT JSON); +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ---------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "J" JSON ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES (JSON '["\u00a7\u0027",{}]'); +> rows (ordered): 4 + +DROP TABLE TEST; +> ok + +CREATE TABLE T(C JSON(0)); +> exception INVALID_VALUE_2 + +CREATE TABLE TEST(J JSON(3)); +> ok + +INSERT INTO TEST VALUES JSON '[1]'; +> update count: 1 + +INSERT INTO TEST VALUES JSON 'null'; +> exception VALUE_TOO_LONG_2 + +DROP TABLE TEST; +> ok + +SELECT CAST(JSON 'null' AS JSON(3)); +> exception VALUE_TOO_LONG_2 + +CREATE TABLE TEST(J JSONB); +> exception UNKNOWN_DATA_TYPE_1 + +SET MODE PostgreSQL; +> ok + +CREATE TABLE TEST(J JSONB); +> ok + +DROP TABLE TEST; +> ok + +SET MODE Regular; +> ok + +EXPLAIN SELECT A IS JSON AND B IS JSON FROM (VALUES (JSON 'null', 1)) T(A, B); +>> SELECT ("A" IS JSON) AND ("B" IS JSON) FROM (VALUES (JSON 'null', 1)) "T"("A", "B") /* table scan */ + +CREATE TABLE T1(A JSON(1048576)); +> ok + +CREATE TABLE T2(A JSON(1048577)); +> exception INVALID_VALUE_PRECISION + +SET TRUNCATE_LARGE_LENGTH TRUE; +> ok + +CREATE TABLE T2(A JSON(1048577)); +> ok + +SELECT TABLE_NAME, CHARACTER_OCTET_LENGTH FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_NAME CHARACTER_OCTET_LENGTH +> ---------- ---------------------- +> T1 1048576 +> T2 1048576 +> rows: 2 + +SET TRUNCATE_LARGE_LENGTH FALSE; +> ok + +DROP TABLE T1, T2; +> ok + +SELECT JSON_OBJECT( + 'CHAR' : CAST('C' AS CHAR), + 'VARCHAR' : 'C', + 'CLOB' : CAST('C' AS CLOB), + 'IGNORECASE' : CAST('C' AS VARCHAR_IGNORECASE)); +>> {"CHAR":"C","VARCHAR":"C","CLOB":"C","IGNORECASE":"C"} + +SELECT JSON_OBJECT( + 'BINARY' : CAST(X'7b7d' AS BINARY(2)), + 'VARBINARY' : CAST(X'7b7d' AS VARBINARY), + 'BLOB' : CAST(X'7b7d' AS BLOB)); +>> {"BINARY":{},"VARBINARY":{},"BLOB":{}} + +SELECT CAST(TRUE AS JSON); +>> true + +SELECT JSON_OBJECT( + 'TINYINT' : CAST(1 AS TINYINT), + 'SMALLINT' : CAST(2 AS SMALLINT), + 'INTEGER' : 3, + 'BIGINT' : 4L, + 'NUMERIC' : 1.1, + 'REAL' : CAST(1.2 AS REAL), + 'DOUBLE' : CAST(1.3 AS DOUBLE), + 'DECFLOAT' : 1e-1); +>> {"TINYINT":1,"SMALLINT":2,"INTEGER":3,"BIGINT":4,"NUMERIC":1.1,"REAL":1.2,"DOUBLE":1.3,"DECFLOAT":0.1} + +SELECT JSON_OBJECT( + 'DATE' : DATE '2001-01-31', + 'TIME' : TIME '10:00:00.123456789', + 'TIME_TZ' : TIME WITH TIME ZONE '10:00:00.123456789+10:00'); +>> {"DATE":"2001-01-31","TIME":"10:00:00.123456789","TIME_TZ":"10:00:00.123456789+10"} + +SELECT JSON_OBJECT( + 'TIMESTAMP' : TIMESTAMP '2001-01-31 10:00:00.123456789', + 'TIMESTAMP_TZ' : TIMESTAMP WITH TIME ZONE '2001-01-31 10:00:00.123456789+10:00'); +>> {"TIMESTAMP":"2001-01-31T10:00:00.123456789","TIMESTAMP_TZ":"2001-01-31T10:00:00.123456789+10"} + +SELECT JSON_OBJECT( + 'GEOMETRY' : GEOMETRY 'POINT (1 2)', + 'JSON' : JSON '[]', + 'UUID' : UUID '01234567-89ab-cdef-fedc-ba9876543210'); +>> {"GEOMETRY":{"type":"Point","coordinates":[1,2]},"JSON":[],"UUID":"01234567-89ab-cdef-fedc-ba9876543210"} + +SELECT CAST(ARRAY[JSON '[]', JSON '{}'] AS JSON); +>> [[],{}] + +SELECT CAST(ARRAY[1, 2] AS JSON); +>> [1,2] diff --git a/h2/src/test/org/h2/test/scripts/datatypes/numeric.sql b/h2/src/test/org/h2/test/scripts/datatypes/numeric.sql new file mode 100644 index 0000000000..43536cefb0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/numeric.sql @@ -0,0 +1,188 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE MEMORY TABLE TEST( + N1 NUMERIC, N2 NUMERIC(10), N3 NUMERIC(10, 0), N4 NUMERIC(10, 2), + D1 DECIMAL, D2 DECIMAL(10), D3 DECIMAL(10, 0), D4 DECIMAL(10, 2), D5 DEC, + X1 NUMBER(10), X2 NUMBER(10, 2)); +> ok + +SELECT COLUMN_NAME, DATA_TYPE, NUMERIC_PRECISION, NUMERIC_PRECISION_RADIX, NUMERIC_SCALE, + DECLARED_DATA_TYPE, DECLARED_NUMERIC_PRECISION, DECLARED_NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE NUMERIC_PRECISION NUMERIC_PRECISION_RADIX NUMERIC_SCALE DECLARED_DATA_TYPE DECLARED_NUMERIC_PRECISION DECLARED_NUMERIC_SCALE +> ----------- --------- ----------------- ----------------------- ------------- ------------------ -------------------------- ---------------------- +> N1 NUMERIC 100000 10 0 NUMERIC null null +> N2 NUMERIC 10 10 0 NUMERIC 10 null +> N3 NUMERIC 10 10 0 NUMERIC 10 0 +> N4 NUMERIC 10 10 2 NUMERIC 10 2 +> D1 NUMERIC 100000 10 0 DECIMAL null null +> D2 NUMERIC 10 10 0 DECIMAL 10 null +> D3 NUMERIC 10 10 0 DECIMAL 10 0 +> D4 NUMERIC 10 10 2 DECIMAL 10 2 +> D5 NUMERIC 100000 10 0 DECIMAL null null +> X1 NUMERIC 10 10 0 NUMERIC 10 null +> X2 NUMERIC 10 10 2 NUMERIC 10 2 +> rows (ordered): 11 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(N NUMERIC(2, -1)); +> exception INVALID_VALUE_SCALE + +CREATE TABLE TEST(ID INT, X1 BIT, XT TINYINT, X_SM SMALLINT, XB BIGINT, XD DECIMAL(10,2), XD2 DOUBLE PRECISION, XR REAL); +> ok + +INSERT INTO TEST VALUES(?, ?, ?, ?, ?, ?, ?, ?); +{ +0,FALSE,0,0,0,0.0,0.0,0.0 +1,TRUE,1,1,1,1.0,1.0,1.0 +4,TRUE,4,4,4,4.0,4.0,4.0 +-1,FALSE,-1,-1,-1,-1.0,-1.0,-1.0 +NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL +}; +> update count: 5 + +SELECT ID, CAST(XT AS NUMBER(10,1)), +CAST(X_SM AS NUMBER(10,1)), CAST(XB AS NUMBER(10,1)), CAST(XD AS NUMBER(10,1)), +CAST(XD2 AS NUMBER(10,1)), CAST(XR AS NUMBER(10,1)) FROM TEST; +> ID CAST(XT AS NUMERIC(10, 1)) CAST(X_SM AS NUMERIC(10, 1)) CAST(XB AS NUMERIC(10, 1)) CAST(XD AS NUMERIC(10, 1)) CAST(XD2 AS NUMERIC(10, 1)) CAST(XR AS NUMERIC(10, 1)) +> ---- -------------------------- ---------------------------- -------------------------- -------------------------- --------------------------- -------------------------- +> -1 -1.0 -1.0 -1.0 -1.0 -1.0 -1.0 +> 0 0.0 0.0 0.0 0.0 0.0 0.0 +> 1 1.0 1.0 1.0 1.0 1.0 1.0 +> 4 4.0 4.0 4.0 4.0 4.0 4.0 +> null null null null null null null +> rows: 5 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(I NUMERIC(-1)); +> exception INVALID_VALUE_2 + +CREATE TABLE TEST(I NUMERIC(-1, -1)); +> exception INVALID_VALUE_2 + +CREATE TABLE TEST (N NUMERIC(3, 1)) AS VALUES (0), (0.0), (NULL); +> ok + +SELECT * FROM TEST; +> N +> ---- +> 0.0 +> 0.0 +> null +> rows: 3 + +DROP TABLE TEST; +> ok + +SELECT CAST(10000 AS NUMERIC(5)); +>> 10000 + +CREATE DOMAIN N AS NUMERIC(10, 1); +> ok + +CREATE TABLE TEST(V N); +> ok + +SELECT NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'V'; +>> 1 + +DROP TABLE TEST; +> ok + +DROP DOMAIN N; +> ok + +CREATE TABLE TEST(I INT PRIMARY KEY, V NUMERIC(1, 3)); +> ok + +INSERT INTO TEST VALUES (1, 1e-3), (2, 1.1e-3), (3, 1e-4); +> update count: 3 + +INSERT INTO TEST VALUES (4, 1e-2); +> exception VALUE_TOO_LONG_2 + +TABLE TEST; +> I V +> - ----- +> 1 0.001 +> 2 0.001 +> 3 0.000 +> rows: 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(I INT PRIMARY KEY, V NUMERIC(2)); +> ok + +INSERT INTO TEST VALUES (1, 1e-1), (2, 2e0), (3, 3e1); +> update count: 3 + +TABLE TEST; +> I V +> - -- +> 1 0 +> 2 2 +> 3 30 +> rows: 3 + +DROP TABLE TEST; +> ok + +EXPLAIN VALUES (CAST(-9223372036854775808 AS NUMERIC(19)), CAST(9223372036854775807 AS NUMERIC(19)), 1.0, -9223372036854775809, + 9223372036854775808); +>> VALUES (CAST(-9223372036854775808 AS NUMERIC(19)), CAST(9223372036854775807 AS NUMERIC(19)), 1.0, -9223372036854775809, 9223372036854775808) + +CREATE TABLE T(C NUMERIC(0)); +> exception INVALID_VALUE_2 + +CREATE TABLE T1(A NUMERIC(100000)); +> ok + +CREATE TABLE T2(A NUMERIC(100001)); +> exception INVALID_VALUE_PRECISION + +SET TRUNCATE_LARGE_LENGTH TRUE; +> ok + +CREATE TABLE T2(A NUMERIC(100001)); +> ok + +SELECT TABLE_NAME, NUMERIC_PRECISION, DECLARED_NUMERIC_PRECISION FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_NAME NUMERIC_PRECISION DECLARED_NUMERIC_PRECISION +> ---------- ----------------- -------------------------- +> T1 100000 100000 +> T2 100000 100000 +> rows: 2 + +SET TRUNCATE_LARGE_LENGTH FALSE; +> ok + +DROP TABLE T1, T2; +> ok + +SET MODE Oracle; +> ok + +CREATE TABLE TEST(N NUMERIC(2, 1)); +> ok + +INSERT INTO TEST VALUES 20; +> exception VALUE_TOO_LONG_2 + +INSERT INTO TEST VALUES CAST(20 AS NUMERIC(2)); +> exception VALUE_TOO_LONG_2 + +DROP TABLE TEST; +> ok + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/other.sql b/h2/src/test/org/h2/test/scripts/datatypes/other.sql deleted file mode 100644 index 008a885806..0000000000 --- a/h2/src/test/org/h2/test/scripts/datatypes/other.sql +++ /dev/null @@ -1,4 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- diff --git a/h2/src/test/org/h2/test/scripts/datatypes/real.sql b/h2/src/test/org/h2/test/scripts/datatypes/real.sql index 142a199fc9..d3e350eb0c 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/real.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/real.sql @@ -1,31 +1,247 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -CREATE MEMORY TABLE TEST(D1 REAL, D2 FLOAT4, D3 FLOAT(0), D4 FLOAT(24)); +CREATE MEMORY TABLE TEST(D1 REAL, D2 FLOAT4, D3 FLOAT(1), D4 FLOAT(24)); > ok +ALTER TABLE TEST ADD COLUMN D5 FLOAT(0); +> exception INVALID_VALUE_PRECISION + ALTER TABLE TEST ADD COLUMN D5 FLOAT(-1); > exception INVALID_VALUE_2 -SELECT COLUMN_NAME, DATA_TYPE, TYPE_NAME, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS +SELECT COLUMN_NAME, DATA_TYPE, NUMERIC_PRECISION, NUMERIC_PRECISION_RADIX, NUMERIC_SCALE, + DECLARED_DATA_TYPE, DECLARED_NUMERIC_PRECISION, DECLARED_NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; -> COLUMN_NAME DATA_TYPE TYPE_NAME COLUMN_TYPE -> ----------- --------- --------- ----------- -> D1 7 REAL REAL -> D2 7 REAL FLOAT4 -> D3 7 REAL FLOAT(0) -> D4 7 REAL FLOAT(24) +> COLUMN_NAME DATA_TYPE NUMERIC_PRECISION NUMERIC_PRECISION_RADIX NUMERIC_SCALE DECLARED_DATA_TYPE DECLARED_NUMERIC_PRECISION DECLARED_NUMERIC_SCALE +> ----------- --------- ----------------- ----------------------- ------------- ------------------ -------------------------- ---------------------- +> D1 REAL 24 2 null REAL null null +> D2 REAL 24 2 null REAL null null +> D3 REAL 24 2 null FLOAT 1 null +> D4 REAL 24 2 null FLOAT 24 null > rows (ordered): 4 -SCRIPT NODATA NOPASSWORDS NOSETTINGS TABLE TEST; +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; > SCRIPT -> --------------------------------------------------------------------------------------------- +> ------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "D1" REAL, "D2" REAL, "D3" FLOAT(1), "D4" FLOAT(24) ); > -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "D1" REAL, "D2" FLOAT4, "D3" FLOAT(0), "D4" FLOAT(24) ); +> rows (ordered): 3 + +DROP TABLE TEST; +> ok + +EXPLAIN VALUES CAST(0 AS REAL); +>> VALUES (CAST(0.0 AS REAL)) + +CREATE TABLE TEST(F REAL, I INT) AS VALUES (2000000000, 2000000001); +> ok + +SELECT F, I, F = I FROM TEST; +> F I F = I +> ----- ---------- ----- +> 2.0E9 2000000001 FALSE +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE TEST(D REAL) AS VALUES '-Infinity', '-1', '0', '1', '1.5', 'Infinity', 'NaN'; +> ok + +SELECT D, -D, SIGN(D) FROM TEST ORDER BY D; +> D - D SIGN(D) +> --------- --------- ------- +> -Infinity Infinity -1 +> -1.0 1.0 -1 +> 0.0 0.0 0 +> 1.0 -1.0 1 +> 1.5 -1.5 1 +> Infinity -Infinity 1 +> NaN NaN 0 +> rows (ordered): 7 + +SELECT A.D, B.D, A.D + B.D, A.D - B.D, A.D * B.D FROM TEST A JOIN TEST B ORDER BY A.D, B.D; +> D D A.D + B.D A.D - B.D A.D * B.D +> --------- --------- --------- --------- --------- +> -Infinity -Infinity -Infinity NaN Infinity +> -Infinity -1.0 -Infinity -Infinity Infinity +> -Infinity 0.0 -Infinity -Infinity NaN +> -Infinity 1.0 -Infinity -Infinity -Infinity +> -Infinity 1.5 -Infinity -Infinity -Infinity +> -Infinity Infinity NaN -Infinity -Infinity +> -Infinity NaN NaN NaN NaN +> -1.0 -Infinity -Infinity Infinity Infinity +> -1.0 -1.0 -2.0 0.0 1.0 +> -1.0 0.0 -1.0 -1.0 0.0 +> -1.0 1.0 0.0 -2.0 -1.0 +> -1.0 1.5 0.5 -2.5 -1.5 +> -1.0 Infinity Infinity -Infinity -Infinity +> -1.0 NaN NaN NaN NaN +> 0.0 -Infinity -Infinity Infinity NaN +> 0.0 -1.0 -1.0 1.0 0.0 +> 0.0 0.0 0.0 0.0 0.0 +> 0.0 1.0 1.0 -1.0 0.0 +> 0.0 1.5 1.5 -1.5 0.0 +> 0.0 Infinity Infinity -Infinity NaN +> 0.0 NaN NaN NaN NaN +> 1.0 -Infinity -Infinity Infinity -Infinity +> 1.0 -1.0 0.0 2.0 -1.0 +> 1.0 0.0 1.0 1.0 0.0 +> 1.0 1.0 2.0 0.0 1.0 +> 1.0 1.5 2.5 -0.5 1.5 +> 1.0 Infinity Infinity -Infinity Infinity +> 1.0 NaN NaN NaN NaN +> 1.5 -Infinity -Infinity Infinity -Infinity +> 1.5 -1.0 0.5 2.5 -1.5 +> 1.5 0.0 1.5 1.5 0.0 +> 1.5 1.0 2.5 0.5 1.5 +> 1.5 1.5 3.0 0.0 2.25 +> 1.5 Infinity Infinity -Infinity Infinity +> 1.5 NaN NaN NaN NaN +> Infinity -Infinity NaN Infinity -Infinity +> Infinity -1.0 Infinity Infinity -Infinity +> Infinity 0.0 Infinity Infinity NaN +> Infinity 1.0 Infinity Infinity Infinity +> Infinity 1.5 Infinity Infinity Infinity +> Infinity Infinity Infinity NaN Infinity +> Infinity NaN NaN NaN NaN +> NaN -Infinity NaN NaN NaN +> NaN -1.0 NaN NaN NaN +> NaN 0.0 NaN NaN NaN +> NaN 1.0 NaN NaN NaN +> NaN 1.5 NaN NaN NaN +> NaN Infinity NaN NaN NaN +> NaN NaN NaN NaN NaN +> rows (ordered): 49 + +SELECT A.D, B.D, A.D / B.D, MOD(A.D, B.D) FROM TEST A JOIN TEST B WHERE B.D <> 0 ORDER BY A.D, B.D; +> D D A.D / B.D MOD(A.D, B.D) +> --------- --------- ---------- ------------- +> -Infinity -Infinity NaN NaN +> -Infinity -1.0 Infinity NaN +> -Infinity 1.0 -Infinity NaN +> -Infinity 1.5 -Infinity NaN +> -Infinity Infinity NaN NaN +> -Infinity NaN NaN NaN +> -1.0 -Infinity 0.0 -1.0 +> -1.0 -1.0 1.0 0.0 +> -1.0 1.0 -1.0 0.0 +> -1.0 1.5 -0.6666667 -1.0 +> -1.0 Infinity 0.0 -1.0 +> -1.0 NaN NaN NaN +> 0.0 -Infinity 0.0 0.0 +> 0.0 -1.0 0.0 0.0 +> 0.0 1.0 0.0 0.0 +> 0.0 1.5 0.0 0.0 +> 0.0 Infinity 0.0 0.0 +> 0.0 NaN NaN NaN +> 1.0 -Infinity 0.0 1.0 +> 1.0 -1.0 -1.0 0.0 +> 1.0 1.0 1.0 0.0 +> 1.0 1.5 0.6666667 1.0 +> 1.0 Infinity 0.0 1.0 +> 1.0 NaN NaN NaN +> 1.5 -Infinity 0.0 1.5 +> 1.5 -1.0 -1.5 0.5 +> 1.5 1.0 1.5 0.5 +> 1.5 1.5 1.0 0.0 +> 1.5 Infinity 0.0 1.5 +> 1.5 NaN NaN NaN +> Infinity -Infinity NaN NaN +> Infinity -1.0 -Infinity NaN +> Infinity 1.0 Infinity NaN +> Infinity 1.5 Infinity NaN +> Infinity Infinity NaN NaN +> Infinity NaN NaN NaN +> NaN -Infinity NaN NaN +> NaN -1.0 NaN NaN +> NaN 1.0 NaN NaN +> NaN 1.5 NaN NaN +> NaN Infinity NaN NaN +> NaN NaN NaN NaN +> rows (ordered): 42 + +SELECT A.D, B.D, A.D > B.D, A.D = B.D, A.D < B.D FROM TEST A JOIN TEST B ORDER BY A.D, B.D; +> D D A.D > B.D A.D = B.D A.D < B.D +> --------- --------- --------- --------- --------- +> -Infinity -Infinity FALSE TRUE FALSE +> -Infinity -1.0 FALSE FALSE TRUE +> -Infinity 0.0 FALSE FALSE TRUE +> -Infinity 1.0 FALSE FALSE TRUE +> -Infinity 1.5 FALSE FALSE TRUE +> -Infinity Infinity FALSE FALSE TRUE +> -Infinity NaN FALSE FALSE TRUE +> -1.0 -Infinity TRUE FALSE FALSE +> -1.0 -1.0 FALSE TRUE FALSE +> -1.0 0.0 FALSE FALSE TRUE +> -1.0 1.0 FALSE FALSE TRUE +> -1.0 1.5 FALSE FALSE TRUE +> -1.0 Infinity FALSE FALSE TRUE +> -1.0 NaN FALSE FALSE TRUE +> 0.0 -Infinity TRUE FALSE FALSE +> 0.0 -1.0 TRUE FALSE FALSE +> 0.0 0.0 FALSE TRUE FALSE +> 0.0 1.0 FALSE FALSE TRUE +> 0.0 1.5 FALSE FALSE TRUE +> 0.0 Infinity FALSE FALSE TRUE +> 0.0 NaN FALSE FALSE TRUE +> 1.0 -Infinity TRUE FALSE FALSE +> 1.0 -1.0 TRUE FALSE FALSE +> 1.0 0.0 TRUE FALSE FALSE +> 1.0 1.0 FALSE TRUE FALSE +> 1.0 1.5 FALSE FALSE TRUE +> 1.0 Infinity FALSE FALSE TRUE +> 1.0 NaN FALSE FALSE TRUE +> 1.5 -Infinity TRUE FALSE FALSE +> 1.5 -1.0 TRUE FALSE FALSE +> 1.5 0.0 TRUE FALSE FALSE +> 1.5 1.0 TRUE FALSE FALSE +> 1.5 1.5 FALSE TRUE FALSE +> 1.5 Infinity FALSE FALSE TRUE +> 1.5 NaN FALSE FALSE TRUE +> Infinity -Infinity TRUE FALSE FALSE +> Infinity -1.0 TRUE FALSE FALSE +> Infinity 0.0 TRUE FALSE FALSE +> Infinity 1.0 TRUE FALSE FALSE +> Infinity 1.5 TRUE FALSE FALSE +> Infinity Infinity FALSE TRUE FALSE +> Infinity NaN FALSE FALSE TRUE +> NaN -Infinity TRUE FALSE FALSE +> NaN -1.0 TRUE FALSE FALSE +> NaN 0.0 TRUE FALSE FALSE +> NaN 1.0 TRUE FALSE FALSE +> NaN 1.5 TRUE FALSE FALSE +> NaN Infinity TRUE FALSE FALSE +> NaN NaN FALSE TRUE FALSE +> rows (ordered): 49 + +SELECT D, CAST(D AS DOUBLE PRECISION) D1, CAST(D AS DECFLOAT) D2 FROM TEST ORDER BY D; +> D D1 D2 +> --------- --------- --------- +> -Infinity -Infinity -Infinity +> -1.0 -1.0 -1 +> 0.0 0.0 0 +> 1.0 1.0 1 +> 1.5 1.5 1.5 +> Infinity Infinity Infinity +> NaN NaN NaN +> rows (ordered): 7 + +EXPLAIN SELECT CAST('Infinity' AS REAL), CAST('-Infinity' AS REAL), CAST('NaN' AS REAL), CAST(0 AS REAL); +>> SELECT CAST('Infinity' AS REAL), CAST('-Infinity' AS REAL), CAST('NaN' AS REAL), CAST(0.0 AS REAL) + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ----------------------------------------------------------------------------------------------------- > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> rows: 3 +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "D" REAL ); +> -- 7 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES ('-Infinity'), (-1.0), (0.0), (1.0), (1.5), ('Infinity'), ('NaN'); +> rows (ordered): 4 DROP TABLE TEST; > ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/row.sql b/h2/src/test/org/h2/test/scripts/datatypes/row.sql index 6c15f1ed9a..d1bd2443ee 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/row.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/row.sql @@ -1,8 +1,20 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- +SELECT (); +>> ROW () + +SELECT (1,); +> exception SYNTAX_ERROR_2 + +SELECT ROW (); +>> ROW () + +SELECT ROW (1,); +> exception SYNTAX_ERROR_2 + SELECT ROW (10); >> ROW (10) @@ -72,6 +84,133 @@ SELECT (1, ARRAY[1]) IN (SELECT 1, ARRAY[2]); SELECT (1, ARRAY[NULL]) IN (SELECT 1, ARRAY[NULL]); >> null +CREATE TABLE TEST (R ROW(A INT, B VARCHAR)); +> ok + +INSERT INTO TEST VALUES ((1, 2)); +> update count: 1 + +INSERT INTO TEST VALUES ((1, X'3341')); +> update count: 1 + +TABLE TEST; +> R +> ----------- +> ROW (1, 2) +> ROW (1, 3A) +> rows: 2 + +DROP TABLE TEST; +> ok + +SELECT CAST((1, 2.1) AS ROW(A INT, B INT)); +>> ROW (1, 2) + +SELECT CAST((1, 2.1) AS ROW(A INT, B INT, C INT)); +> exception DATA_CONVERSION_ERROR_1 + +SELECT CAST(1 AS ROW(V INT)); +>> ROW (1) + +SELECT CAST((1, 2) AS ROW(A INT, A INT)); +> exception DUPLICATE_COLUMN_NAME_1 + +CREATE DOMAIN D1 AS ROW(A INT); +> ok + +CREATE DOMAIN D2 AS BIGINT ARRAY; +> ok + +CREATE TABLE TEST(A ROW(A INT, B INT ARRAY[1]) ARRAY, B BIGINT ARRAY[2] ARRAY[3], C ROW(V BIGINT, A INT ARRAY), + D D1, E D2); +> ok + +SELECT COLUMN_NAME, DATA_TYPE, DOMAIN_NAME, MAXIMUM_CARDINALITY, DTD_IDENTIFIER FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_SCHEMA = 'PUBLIC'; +> COLUMN_NAME DATA_TYPE DOMAIN_NAME MAXIMUM_CARDINALITY DTD_IDENTIFIER +> ----------- --------- ----------- ------------------- -------------- +> A ARRAY null 65536 1 +> B ARRAY null 3 2 +> C ROW null null 3 +> D ROW D1 null 4 +> E ARRAY D2 65536 5 +> rows: 5 + +SELECT OBJECT_NAME, OBJECT_TYPE, COLLECTION_TYPE_IDENTIFIER, DATA_TYPE, MAXIMUM_CARDINALITY, DTD_IDENTIFIER + FROM INFORMATION_SCHEMA.ELEMENT_TYPES; +> OBJECT_NAME OBJECT_TYPE COLLECTION_TYPE_IDENTIFIER DATA_TYPE MAXIMUM_CARDINALITY DTD_IDENTIFIER +> ----------- ----------- -------------------------- --------- ------------------- -------------- +> D2 DOMAIN TYPE BIGINT null TYPE_ +> TEST TABLE 1 ROW null 1_ +> TEST TABLE 1__2 INTEGER null 1__2_ +> TEST TABLE 2 ARRAY 2 2_ +> TEST TABLE 2_ BIGINT null 2__ +> TEST TABLE 3_2 INTEGER null 3_2_ +> TEST TABLE 5 BIGINT null 5_ +> rows: 7 + +SELECT OBJECT_NAME, OBJECT_TYPE, ROW_IDENTIFIER, FIELD_NAME, ORDINAL_POSITION, DATA_TYPE, MAXIMUM_CARDINALITY, + DTD_IDENTIFIER + FROM INFORMATION_SCHEMA.FIELDS; +> OBJECT_NAME OBJECT_TYPE ROW_IDENTIFIER FIELD_NAME ORDINAL_POSITION DATA_TYPE MAXIMUM_CARDINALITY DTD_IDENTIFIER +> ----------- ----------- -------------- ---------- ---------------- --------- ------------------- -------------- +> D1 DOMAIN TYPE A 1 INTEGER null TYPE_1 +> TEST TABLE 1_ A 1 INTEGER null 1__1 +> TEST TABLE 1_ B 2 ARRAY 1 1__2 +> TEST TABLE 3 A 2 ARRAY 65536 3_2 +> TEST TABLE 3 V 1 BIGINT null 3_1 +> TEST TABLE 4 A 1 INTEGER null 4_1 +> rows: 6 + +DROP TABLE TEST; +> ok + +DROP DOMAIN D1; +> ok + +DROP DOMAIN D2; +> ok + +@reconnect off + +CREATE LOCAL TEMPORARY TABLE TEST AS (SELECT ROW(1, 2) R); +> ok + +CREATE INDEX IDX ON TEST(R); +> ok + +DROP TABLE TEST; +> ok + +CREATE LOCAL TEMPORARY TABLE TEST(R ROW(C CLOB)); +> ok + +CREATE INDEX IDX ON TEST(R); +> exception FEATURE_NOT_SUPPORTED_1 + +DROP TABLE TEST; +> ok + +@reconnect on + +EXECUTE IMMEDIATE 'CREATE TABLE TEST AS SELECT (' || (SELECT LISTAGG('1') FROM SYSTEM_RANGE(1, 16384)) || ')'; +> ok + +DROP TABLE TEST; +> ok + +EXECUTE IMMEDIATE 'CREATE TABLE TEST AS SELECT (' || (SELECT LISTAGG('1') FROM SYSTEM_RANGE(1, 16385)) || ')'; +> exception TOO_MANY_COLUMNS_1 + +EXECUTE IMMEDIATE 'CREATE TABLE TEST(R ROW(' || (SELECT LISTAGG('C' || X || ' INTEGER') FROM SYSTEM_RANGE(1, 16384)) || '))'; +> ok + +DROP TABLE TEST; +> ok + +EXECUTE IMMEDIATE 'CREATE TABLE TEST(R ROW(' || (SELECT LISTAGG('C' || X || ' INTEGER') FROM SYSTEM_RANGE(1, 16385)) || '))'; +> exception TOO_MANY_COLUMNS_1 + -- The next tests should be at the of this file SET MAX_MEMORY_ROWS = 2; diff --git a/h2/src/test/org/h2/test/scripts/datatypes/smallint.sql b/h2/src/test/org/h2/test/scripts/datatypes/smallint.sql index 922bcfb3a4..53362fef48 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/smallint.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/smallint.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -13,3 +13,18 @@ SELECT CAST(-32768 AS SMALLINT) / CAST(1 AS SMALLINT); SELECT CAST(-32768 AS SMALLINT) / CAST(-1 AS SMALLINT); > exception NUMERIC_VALUE_OUT_OF_RANGE_1 + +EXPLAIN VALUES CAST(1 AS SMALLINT); +>> VALUES (CAST(1 AS SMALLINT)) + +EXPLAIN VALUES CAST(1 AS YEAR); +> exception UNKNOWN_DATA_TYPE_1 + +SET MODE MySQL; +> ok + +EXPLAIN VALUES CAST(1 AS YEAR); +>> VALUES (CAST(1 AS SMALLINT)) + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/time-with-time-zone.sql b/h2/src/test/org/h2/test/scripts/datatypes/time-with-time-zone.sql new file mode 100644 index 0000000000..b400394075 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/time-with-time-zone.sql @@ -0,0 +1,98 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(T1 TIME WITH TIME ZONE, T2 TIME WITH TIME ZONE); +> ok + +INSERT INTO TEST(T1, T2) VALUES (TIME WITH TIME ZONE '10:00:00+01', TIME WITH TIME ZONE '11:00:00+02'); +> update count: 1 + +SELECT T1, T2, T1 = T2 FROM TEST; +> T1 T2 T1 = T2 +> ----------- ----------- ------- +> 10:00:00+01 11:00:00+02 TRUE +> rows: 1 + +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE +> ----------- ------------------- +> T1 TIME WITH TIME ZONE +> T2 TIME WITH TIME ZONE +> rows (ordered): 2 + +ALTER TABLE TEST ADD (T3 TIME(0), T4 TIME(9) WITHOUT TIME ZONE); +> ok + +SELECT COLUMN_NAME, DATA_TYPE, DATETIME_PRECISION FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE DATETIME_PRECISION +> ----------- ------------------- ------------------ +> T1 TIME WITH TIME ZONE 0 +> T2 TIME WITH TIME ZONE 0 +> T3 TIME 0 +> T4 TIME 9 +> rows (ordered): 4 + +ALTER TABLE TEST ADD T5 TIME(10); +> exception INVALID_VALUE_SCALE + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(T TIME WITH TIME ZONE, T0 TIME(0) WITH TIME ZONE, T1 TIME(1) WITH TIME ZONE, + T2 TIME(2) WITH TIME ZONE, T3 TIME(3) WITH TIME ZONE, T4 TIME(4) WITH TIME ZONE, T5 TIME(5) WITH TIME ZONE, + T6 TIME(6) WITH TIME ZONE, T7 TIME(7) WITH TIME ZONE, T8 TIME(8) WITH TIME ZONE, T9 TIME(9) WITH TIME ZONE); +> ok + +INSERT INTO TEST VALUES ('08:00:00.123456789-01', '08:00:00.123456789Z', '08:00:00.123456789+01:02:03', + '08:00:00.123456789-3:00', '08:00:00.123456789+4:30', '08:00:00.123456789Z', '08:00:00.123456789Z', + '08:00:00.123456789Z', '08:00:00.123456789Z', '08:00:00.123456789Z', '08:00:00.123456789Z'); +> update count: 1 + +SELECT * FROM TEST; +> T T0 T1 T2 T3 T4 T5 T6 T7 T8 T9 +> ----------- ----------- ------------------- -------------- ------------------ ---------------- ----------------- ------------------ ------------------- -------------------- --------------------- +> 08:00:00-01 08:00:00+00 08:00:00.1+01:02:03 08:00:00.12-03 08:00:00.123+04:30 08:00:00.1235+00 08:00:00.12346+00 08:00:00.123457+00 08:00:00.1234568+00 08:00:00.12345679+00 08:00:00.123456789+00 +> rows: 1 + +DELETE FROM TEST; +> update count: 1 + +INSERT INTO TEST(T0, T8) VALUES ('23:59:59.999999999Z', '23:59:59.999999999Z'); +> update count: 1 + +SELECT T0 FROM TEST; +>> 23:59:59+00 + +SELECT T8 FROM TEST; +>> 23:59:59.99999999+00 + +DROP TABLE TEST; +> ok + +SELECT TIME WITH TIME ZONE '11:22:33'; +> exception INVALID_DATETIME_CONSTANT_2 + +SELECT TIME WITH TIME ZONE '11:22:33 Europe/London'; +> exception INVALID_DATETIME_CONSTANT_2 + +SELECT CAST (TIMESTAMP WITH TIME ZONE '1000000000-12-31 11:22:33.123456789+02' AS TIME WITH TIME ZONE); +>> 11:22:33+02 + +SELECT CAST (TIMESTAMP WITH TIME ZONE '1000000000-12-31 11:22:33.123456789+02' AS TIME(9) WITH TIME ZONE); +>> 11:22:33.123456789+02 + +SELECT CAST (TIMESTAMP WITH TIME ZONE '-1000000000-12-31 11:22:33.123456789+02' AS TIME(9) WITH TIME ZONE); +>> 11:22:33.123456789+02 + +SELECT CAST (TIME WITH TIME ZONE '10:00:00Z' AS DATE); +> exception DATA_CONVERSION_ERROR_1 + +SELECT TIME WITH TIME ZONE '23:00:00+01' - TIME WITH TIME ZONE '00:00:30-01'; +>> INTERVAL '20:59:30' HOUR TO SECOND + +SELECT TIME WITH TIME ZONE '10:00:00-10' + INTERVAL '30' MINUTE; +>> 10:30:00-10 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/time.sql b/h2/src/test/org/h2/test/scripts/datatypes/time.sql index 1a994582a2..a51b23425c 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/time.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/time.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -15,29 +15,29 @@ SELECT T1, T2, T1 = T2 FROM TEST; > 10:00:00 10:00:00 TRUE > rows: 1 -SELECT COLUMN_NAME, DATA_TYPE, TYPE_NAME, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; -> COLUMN_NAME DATA_TYPE TYPE_NAME COLUMN_TYPE -> ----------- --------- --------- ---------------------- -> T1 92 TIME TIME -> T2 92 TIME TIME WITHOUT TIME ZONE +> COLUMN_NAME DATA_TYPE +> ----------- --------- +> T1 TIME +> T2 TIME > rows (ordered): 2 ALTER TABLE TEST ADD (T3 TIME(0), T4 TIME(9) WITHOUT TIME ZONE); > ok -SELECT COLUMN_NAME, DATA_TYPE, TYPE_NAME, COLUMN_TYPE, NUMERIC_SCALE, DATETIME_PRECISION FROM INFORMATION_SCHEMA.COLUMNS +SELECT COLUMN_NAME, DATA_TYPE, DATETIME_PRECISION FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; -> COLUMN_NAME DATA_TYPE TYPE_NAME COLUMN_TYPE NUMERIC_SCALE DATETIME_PRECISION -> ----------- --------- --------- ------------------------- ------------- ------------------ -> T1 92 TIME TIME 0 0 -> T2 92 TIME TIME WITHOUT TIME ZONE 0 0 -> T3 92 TIME TIME(0) 0 0 -> T4 92 TIME TIME(9) WITHOUT TIME ZONE 9 9 +> COLUMN_NAME DATA_TYPE DATETIME_PRECISION +> ----------- --------- ------------------ +> T1 TIME 0 +> T2 TIME 0 +> T3 TIME 0 +> T4 TIME 9 > rows (ordered): 4 ALTER TABLE TEST ADD T5 TIME(10); -> exception INVALID_VALUE_SCALE_PRECISION +> exception INVALID_VALUE_SCALE DROP TABLE TEST; > ok @@ -73,11 +73,14 @@ SELECT * FROM TEST; DELETE FROM TEST; > update count: 1 -INSERT INTO TEST(T0) VALUES ('23:59:59.999999999'); +INSERT INTO TEST(T0, T8) VALUES ('23:59:59.999999999', '23:59:59.999999999'); > update count: 1 SELECT T0 FROM TEST; ->> 23:59:59.999999999 +>> 23:59:59 + +SELECT T8 FROM TEST; +>> 23:59:59.99999999 DROP TABLE TEST; > ok @@ -111,3 +114,15 @@ SELECT TIME '12233.1'; SELECT TIME '1122.1'; > exception INVALID_DATETIME_CONSTANT_2 + +SELECT CAST (TIMESTAMP '1000000000-12-31 11:22:33.123456789' AS TIME); +>> 11:22:33 + +SELECT CAST (TIMESTAMP '1000000000-12-31 11:22:33.123456789' AS TIME(9)); +>> 11:22:33.123456789 + +SELECT CAST (TIMESTAMP '-1000000000-12-31 11:22:33.123456789' AS TIME(9)); +>> 11:22:33.123456789 + +SELECT CAST (TIME '10:00:00' AS DATE); +> exception DATA_CONVERSION_ERROR_1 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/timestamp-with-timezone.sql b/h2/src/test/org/h2/test/scripts/datatypes/timestamp-with-time-zone.sql similarity index 70% rename from h2/src/test/org/h2/test/scripts/datatypes/timestamp-with-timezone.sql rename to h2/src/test/org/h2/test/scripts/datatypes/timestamp-with-time-zone.sql index d2aec66330..290d975fe9 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/timestamp-with-timezone.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/timestamp-with-time-zone.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -9,8 +9,8 @@ CREATE TABLE tab_with_timezone(x TIMESTAMP WITH TIME ZONE); INSERT INTO tab_with_timezone(x) VALUES ('2017-01-01'); > update count: 1 -SELECT "Query".* FROM (select * from tab_with_timezone where x > '2016-01-01') AS "Query"; ->> 2017-01-01 00:00:00+00 +SELECT CAST("Query".X AS TIMESTAMP) FROM (select * from tab_with_timezone where x > '2016-01-01') AS "Query"; +>> 2017-01-01 00:00:00 DELETE FROM tab_with_timezone; > update count: 1 @@ -37,17 +37,17 @@ SELECT TIMESTAMP WITH TIME ZONE '2000-01-10 00:00:00 -02' AS A, CREATE TABLE TEST(T1 TIMESTAMP WITH TIME ZONE, T2 TIMESTAMP(0) WITH TIME ZONE, T3 TIMESTAMP(9) WITH TIME ZONE); > ok -SELECT COLUMN_NAME, DATA_TYPE, TYPE_NAME, COLUMN_TYPE, NUMERIC_SCALE, DATETIME_PRECISION FROM INFORMATION_SCHEMA.COLUMNS +SELECT COLUMN_NAME, DATA_TYPE, DATETIME_PRECISION FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; -> COLUMN_NAME DATA_TYPE TYPE_NAME COLUMN_TYPE NUMERIC_SCALE DATETIME_PRECISION -> ----------- --------- ------------------------ --------------------------- ------------- ------------------ -> T1 2014 TIMESTAMP WITH TIME ZONE TIMESTAMP WITH TIME ZONE 6 6 -> T2 2014 TIMESTAMP WITH TIME ZONE TIMESTAMP(0) WITH TIME ZONE 0 0 -> T3 2014 TIMESTAMP WITH TIME ZONE TIMESTAMP(9) WITH TIME ZONE 9 9 +> COLUMN_NAME DATA_TYPE DATETIME_PRECISION +> ----------- ------------------------ ------------------ +> T1 TIMESTAMP WITH TIME ZONE 6 +> T2 TIMESTAMP WITH TIME ZONE 0 +> T3 TIMESTAMP WITH TIME ZONE 9 > rows (ordered): 3 ALTER TABLE TEST ADD T4 TIMESTAMP (10) WITH TIME ZONE; -> exception INVALID_VALUE_SCALE_PRECISION +> exception INVALID_VALUE_SCALE DROP TABLE TEST; > ok @@ -112,3 +112,27 @@ SELECT (TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:00+01' - 1) A; > ---------------------- > 2009-12-31 10:00:00+01 > rows: 1 + +CALL TIMESTAMP WITH TIME ZONE '-1000000000-01-01 00:00:00Z'; +>> -1000000000-01-01 00:00:00+00 + +CALL TIMESTAMP WITH TIME ZONE '1000000000-12-31 23:59:59.999999999Z'; +>> 1000000000-12-31 23:59:59.999999999+00 + +CALL TIMESTAMP WITH TIME ZONE '-1000000001-12-31 23:59:59.999999999Z'; +> exception INVALID_DATETIME_CONSTANT_2 + +CALL TIMESTAMP WITH TIME ZONE '1000000001-01-01 00:00:00Z'; +> exception INVALID_DATETIME_CONSTANT_2 + +SELECT CAST (TIMESTAMP WITH TIME ZONE '2000-01-01 23:59:59.999999999Z' AS TIMESTAMP WITH TIME ZONE); +>> 2000-01-02 00:00:00+00 + +SELECT CAST (TIMESTAMP WITH TIME ZONE '1000000000-12-31 23:59:59.999999999Z' AS TIMESTAMP WITH TIME ZONE); +>> 1000000000-12-31 23:59:59.999999+00 + +SELECT CAST (CAST (TIMESTAMP '1000000000-12-31 23:59:59.999999999' AS TIMESTAMP(9) WITH TIME ZONE) AS TIMESTAMP(9)); +>> 1000000000-12-31 23:59:59.999999999 + +SELECT CAST (CAST (TIMESTAMP '-1000000000-12-31 00:00:00' AS TIMESTAMP(9) WITH TIME ZONE) AS TIMESTAMP(9)); +>> -1000000000-12-31 00:00:00 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/timestamp.sql b/h2/src/test/org/h2/test/scripts/datatypes/timestamp.sql index f1af8512e8..b2bfa5f0d0 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/timestamp.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/timestamp.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -21,31 +21,31 @@ ALTER TABLE TEST ADD (T3 TIMESTAMP(0), T4 TIMESTAMP(9) WITHOUT TIME ZONE, SDT1 SMALLDATETIME); > ok -SELECT COLUMN_NAME, DATA_TYPE, TYPE_NAME, COLUMN_TYPE, NUMERIC_SCALE, DATETIME_PRECISION FROM INFORMATION_SCHEMA.COLUMNS +SELECT COLUMN_NAME, DATA_TYPE, DATETIME_PRECISION FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; -> COLUMN_NAME DATA_TYPE TYPE_NAME COLUMN_TYPE NUMERIC_SCALE DATETIME_PRECISION -> ----------- --------- --------- ------------------------------ ------------- ------------------ -> T1 93 TIMESTAMP TIMESTAMP 6 6 -> T2 93 TIMESTAMP TIMESTAMP WITHOUT TIME ZONE 6 6 -> T3 93 TIMESTAMP TIMESTAMP(0) 0 0 -> T4 93 TIMESTAMP TIMESTAMP(9) WITHOUT TIME ZONE 9 9 -> DT1 93 TIMESTAMP DATETIME 6 6 -> DT2 93 TIMESTAMP DATETIME(0) 0 0 -> DT3 93 TIMESTAMP DATETIME(9) 9 9 -> DT2_1 93 TIMESTAMP DATETIME2 6 6 -> DT2_2 93 TIMESTAMP DATETIME2(0) 0 0 -> DT2_3 93 TIMESTAMP DATETIME2(7) 7 7 -> SDT1 93 TIMESTAMP SMALLDATETIME 0 0 +> COLUMN_NAME DATA_TYPE DATETIME_PRECISION +> ----------- --------- ------------------ +> T1 TIMESTAMP 6 +> T2 TIMESTAMP 6 +> T3 TIMESTAMP 0 +> T4 TIMESTAMP 9 +> DT1 TIMESTAMP 6 +> DT2 TIMESTAMP 0 +> DT3 TIMESTAMP 9 +> DT2_1 TIMESTAMP 6 +> DT2_2 TIMESTAMP 0 +> DT2_3 TIMESTAMP 7 +> SDT1 TIMESTAMP 0 > rows (ordered): 11 ALTER TABLE TEST ADD T5 TIMESTAMP(10); -> exception INVALID_VALUE_SCALE_PRECISION +> exception INVALID_VALUE_SCALE ALTER TABLE TEST ADD DT4 DATETIME(10); -> exception INVALID_VALUE_SCALE_PRECISION +> exception INVALID_VALUE_SCALE ALTER TABLE TEST ADD DT2_4 DATETIME2(10); -> exception INVALID_VALUE_SCALE_PRECISION +> exception INVALID_VALUE_SCALE ALTER TABLE TEST ADD STD2 SMALLDATETIME(1); > exception SYNTAX_ERROR_1 @@ -153,3 +153,21 @@ SELECT TIMESTAMP '20000102 112233'; SELECT TIMESTAMP '20000102T112233'; >> 2000-01-02 11:22:33 + +CALL TIMESTAMP '-1000000000-01-01 00:00:00'; +>> -1000000000-01-01 00:00:00 + +CALL TIMESTAMP '1000000000-12-31 23:59:59.999999999'; +>> 1000000000-12-31 23:59:59.999999999 + +CALL TIMESTAMP '-1000000001-12-31 23:59:59.999999999'; +> exception INVALID_DATETIME_CONSTANT_2 + +CALL TIMESTAMP '1000000001-01-01 00:00:00'; +> exception INVALID_DATETIME_CONSTANT_2 + +SELECT CAST (TIMESTAMP '2000-01-01 23:59:59.999999999' AS TIMESTAMP); +>> 2000-01-02 00:00:00 + +SELECT CAST (TIMESTAMP '1000000000-12-31 23:59:59.999999999' AS TIMESTAMP); +>> 1000000000-12-31 23:59:59.999999 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/tinyint.sql b/h2/src/test/org/h2/test/scripts/datatypes/tinyint.sql index 1755982f06..c389b6e17f 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/tinyint.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/tinyint.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -13,3 +13,6 @@ SELECT CAST(-128 AS TINYINT) / CAST(1 AS TINYINT); SELECT CAST(-128 AS TINYINT) / CAST(-1 AS TINYINT); > exception NUMERIC_VALUE_OUT_OF_RANGE_1 + +EXPLAIN VALUES CAST(1 AS TINYINT); +>> VALUES (CAST(1 AS TINYINT)) diff --git a/h2/src/test/org/h2/test/scripts/datatypes/uuid.sql b/h2/src/test/org/h2/test/scripts/datatypes/uuid.sql index a39e798fde..39686caa06 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/uuid.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/uuid.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -11,37 +11,32 @@ CREATE TABLE TEST(U UUID) AS (SELECT * FROM VALUES SELECT U FROM TEST ORDER BY U; > U > ------------------------------------ -> aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa -> 00000000-0000-0000-9000-000000000000 > 00000000-0000-0000-0000-000000000000 +> 00000000-0000-0000-9000-000000000000 > 11111111-1111-1111-1111-111111111111 +> aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa > rows (ordered): 4 -SET UUID_COLLATION UNSIGNED; -> exception COLLATION_CHANGE_WITH_DATA_TABLE_1 - DROP TABLE TEST; > ok -SET UUID_COLLATION UNSIGNED; -> ok +EXPLAIN VALUES UUID '11111111-1111-1111-1111-111111111111'; +>> VALUES (UUID '11111111-1111-1111-1111-111111111111') -CREATE TABLE TEST(U UUID) AS (SELECT * FROM VALUES - ('00000000-0000-0000-0000-000000000000'), ('00000000-0000-0000-9000-000000000000'), - ('11111111-1111-1111-1111-111111111111'), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')); -> ok +VALUES CAST('01234567-89AB-CDEF-0123-456789ABCDE' AS UUID); +> exception DATA_CONVERSION_ERROR_1 -SELECT U FROM TEST ORDER BY U; -> U -> ------------------------------------ -> 00000000-0000-0000-0000-000000000000 -> 00000000-0000-0000-9000-000000000000 -> 11111111-1111-1111-1111-111111111111 -> aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa -> rows (ordered): 4 +VALUES CAST(X'0123456789ABCDEF0123456789ABCD' AS UUID); +> exception DATA_CONVERSION_ERROR_1 -DROP TABLE TEST; -> ok +VALUES CAST('01234567-89AB-CDEF-0123-456789ABCDEF' AS UUID); +>> 01234567-89ab-cdef-0123-456789abcdef -SET UUID_COLLATION SIGNED; -> ok +VALUES CAST(X'0123456789ABCDEF0123456789ABCDEF' AS UUID); +>> 01234567-89ab-cdef-0123-456789abcdef + +VALUES CAST('01234567-89AB-CDEF-0123-456789ABCDEF-0' AS UUID); +> exception DATA_CONVERSION_ERROR_1 + +VALUES CAST(X'0123456789ABCDEF0123456789ABCDEF01' AS UUID); +> exception DATA_CONVERSION_ERROR_1 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/varbinary.sql b/h2/src/test/org/h2/test/scripts/datatypes/varbinary.sql new file mode 100644 index 0000000000..881b3a7923 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/varbinary.sql @@ -0,0 +1,143 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(B1 VARBINARY, B2 BINARY VARYING, B3 RAW, B4 BYTEA, B5 LONG RAW, B6 LONGVARBINARY); +> ok + +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE +> ----------- -------------- +> B1 BINARY VARYING +> B2 BINARY VARYING +> B3 BINARY VARYING +> B4 BINARY VARYING +> B5 BINARY VARYING +> B6 BINARY VARYING +> rows (ordered): 6 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE TEST AS (VALUES X'11' || X'25'); +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> -------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "C1" BINARY VARYING(2) ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES (X'1125'); +> rows (ordered): 4 + +EXPLAIN SELECT C1 || X'10' FROM TEST; +>> SELECT "C1" || X'10' FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +SELECT X'11' || CAST(NULL AS VARBINARY); +>> null + +SELECT CAST(NULL AS VARBINARY) || X'11'; +>> null + +SELECT X'1'; +> exception HEX_STRING_ODD_1 + +SELECT X'1' '1'; +> exception HEX_STRING_ODD_1 + +SELECT X' 1 2 3 4 '; +>> X'1234' + +SELECT X'1 2 3'; +> exception HEX_STRING_ODD_1 + +SELECT X'~'; +> exception HEX_STRING_WRONG_1 + +SELECT X'G'; +> exception HEX_STRING_WRONG_1 + +SELECT X'TT'; +> exception HEX_STRING_WRONG_1 + +SELECT X' TT'; +> exception HEX_STRING_WRONG_1 + +SELECT X'AB' 'CD'; +>> X'abcd' + +SELECT X'AB' /* comment*/ 'CD' 'EF'; +>> X'abcdef' + +SELECT X'AB' 'CX'; +> exception HEX_STRING_WRONG_1 + +SELECT 0xabcd; +>> 43981 + +SET MODE MSSQLServer; +> ok + +SELECT 0x, 0x12ab; +> +> --- ------- +> X'' X'12ab' +> rows: 1 + +SELECT 0xZ; +> exception HEX_STRING_WRONG_1 + +SET MODE MySQL; +> ok + +SELECT 0x, 0x12ab; +> X'' X'12ab' +> --- ------- +> X'' X'12ab' +> rows: 1 + +SELECT 0xZ; +> exception HEX_STRING_WRONG_1 + +SET MODE Regular; +> ok + +EXPLAIN VALUES X''; +>> VALUES (X'') + +CREATE TABLE T(C VARBINARY(0)); +> exception INVALID_VALUE_2 + +CREATE TABLE T1(A BINARY VARYING(1048576)); +> ok + +CREATE TABLE T2(A BINARY VARYING(1048577)); +> exception INVALID_VALUE_PRECISION + +SET TRUNCATE_LARGE_LENGTH TRUE; +> ok + +CREATE TABLE T2(A BINARY VARYING(1048577)); +> ok + +SELECT TABLE_NAME, CHARACTER_OCTET_LENGTH FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_NAME CHARACTER_OCTET_LENGTH +> ---------- ---------------------- +> T1 1048576 +> T2 1048576 +> rows: 2 + +SET TRUNCATE_LARGE_LENGTH FALSE; +> ok + +DROP TABLE T1, T2; +> ok + +SELECT X'ab''cd'; +> exception SYNTAX_ERROR_1 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/varchar-ignorecase.sql b/h2/src/test/org/h2/test/scripts/datatypes/varchar-ignorecase.sql index b822e05c2d..268b906706 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/varchar-ignorecase.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/varchar-ignorecase.sql @@ -1,17 +1,191 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- CREATE TABLE TEST(C1 VARCHAR_IGNORECASE); > ok -SELECT COLUMN_NAME, DATA_TYPE, TYPE_NAME, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; -> COLUMN_NAME DATA_TYPE TYPE_NAME COLUMN_TYPE -> ----------- --------- ------------------ ------------------ -> C1 12 VARCHAR_IGNORECASE VARCHAR_IGNORECASE +> COLUMN_NAME DATA_TYPE +> ----------- ------------------ +> C1 VARCHAR_IGNORECASE > rows (ordered): 1 DROP TABLE TEST; > ok + +CREATE TABLE TEST (N VARCHAR_IGNORECASE) AS VALUES 'A', 'a', NULL; +> ok + +SELECT DISTINCT * FROM TEST; +> N +> ---- +> A +> null +> rows: 2 + +SELECT * FROM TEST; +> N +> ---- +> A +> a +> null +> rows: 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST (N VARCHAR_IGNORECASE) AS VALUES 'A', 'a', 'C', NULL; +> ok + +CREATE INDEX TEST_IDX ON TEST(N); +> ok + +SELECT N FROM TEST WHERE N IN ('a', 'A', 'B'); +> N +> - +> A +> a +> rows: 2 + +EXPLAIN SELECT N FROM TEST WHERE N IN (SELECT DISTINCT ON(B) A FROM VALUES ('a', 1), ('A', 2), ('B', 3) T(A, B)); +>> SELECT "N" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX */ WHERE "N" IN( SELECT DISTINCT ON("B") "A" FROM (VALUES ('a', 1), ('A', 2), ('B', 3)) "T"("A", "B") /* table scan */) + +SELECT N FROM TEST WHERE N IN (SELECT DISTINCT ON(B) A FROM VALUES ('a', 1), ('A', 2), ('B', 3) T(A, B)); +> N +> - +> A +> a +> rows: 2 + +SELECT N FROM TEST WHERE N IN (SELECT DISTINCT ON(B) A FROM VALUES ('a'::VARCHAR_IGNORECASE, 1), + ('A'::VARCHAR_IGNORECASE, 2), ('B'::VARCHAR_IGNORECASE, 3) T(A, B)); +> N +> - +> A +> a +> rows: 2 + +EXPLAIN SELECT N FROM TEST WHERE N IN (SELECT DISTINCT ON(B) A FROM VALUES ('a'::VARCHAR_IGNORECASE(1), 1), + ('A'::VARCHAR_IGNORECASE(1), 2), ('B'::VARCHAR_IGNORECASE(1), 3) T(A, B)); +>> SELECT "N" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX: N IN(SELECT DISTINCT ON(B) A FROM (VALUES (CAST('a' AS VARCHAR_IGNORECASE(1)), 1), (CAST('A' AS VARCHAR_IGNORECASE(1)), 2), (CAST('B' AS VARCHAR_IGNORECASE(1)), 3)) T(A, B) /* table scan */) */ WHERE "N" IN( SELECT DISTINCT ON("B") "A" FROM (VALUES (CAST('a' AS VARCHAR_IGNORECASE(1)), 1), (CAST('A' AS VARCHAR_IGNORECASE(1)), 2), (CAST('B' AS VARCHAR_IGNORECASE(1)), 3)) "T"("A", "B") /* table scan */) + +DROP INDEX TEST_IDX; +> ok + +CREATE UNIQUE INDEX TEST_IDX ON TEST(N); +> exception DUPLICATE_KEY_1 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE TEST(N VARCHAR_IGNORECASE) AS VALUES ('A'), ('a'), ('C'), (NULL); +> ok + +CREATE HASH INDEX TEST_IDX ON TEST(N); +> ok + +SELECT N FROM TEST WHERE N = 'A'; +> N +> - +> A +> a +> rows: 2 + +DROP INDEX TEST_IDX; +> ok + +CREATE UNIQUE HASH INDEX TEST_IDX ON TEST(N); +> exception DUPLICATE_KEY_1 + +DELETE FROM TEST WHERE N = 'A' LIMIT 1; +> update count: 1 + +CREATE UNIQUE HASH INDEX TEST_IDX ON TEST(N); +> ok + +SELECT 1 FROM TEST WHERE N = 'A'; +>> 1 + +INSERT INTO TEST VALUES (NULL); +> update count: 1 + +SELECT N FROM TEST WHERE N IS NULL; +> N +> ---- +> null +> null +> rows: 2 + +DELETE FROM TEST WHERE N IS NULL LIMIT 1; +> update count: 1 + +SELECT N FROM TEST WHERE N IS NULL; +>> null + +DROP TABLE TEST; +> ok + +EXPLAIN VALUES CAST('a' AS VARCHAR_IGNORECASE(1)); +>> VALUES (CAST('a' AS VARCHAR_IGNORECASE(1))) + +CREATE TABLE T(C VARCHAR_IGNORECASE(0)); +> exception INVALID_VALUE_2 + +CREATE TABLE T(C1 VARCHAR_IGNORECASE(1 CHARACTERS), C2 VARCHAR_IGNORECASE(1 OCTETS)); +> ok + +DROP TABLE T; +> ok + +SELECT 'I' ILIKE CHAR(0x130); +>> TRUE + +SET COLLATION TURKISH STRENGTH IDENTICAL; +> ok + +CREATE TABLE TEST(V VARCHAR_IGNORECASE UNIQUE); +> ok + +INSERT INTO TEST VALUES 'I', 'i'; +> update count: 2 + +INSERT INTO TEST VALUES CHAR(0x0130); +> exception DUPLICATE_KEY_1 + +INSERT INTO TEST VALUES CHAR(0x0131); +> exception DUPLICATE_KEY_1 + +DROP TABLE TEST; +> ok + +SET COLLATION OFF; +> ok + + +CREATE TABLE T1(A VARCHAR_IGNORECASE(1048576)); +> ok + +CREATE TABLE T2(A VARCHAR_IGNORECASE(1048577)); +> exception INVALID_VALUE_PRECISION + +SET TRUNCATE_LARGE_LENGTH TRUE; +> ok + +CREATE TABLE T2(A VARCHAR_IGNORECASE(1048577)); +> ok + +SELECT TABLE_NAME, CHARACTER_MAXIMUM_LENGTH FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_NAME CHARACTER_MAXIMUM_LENGTH +> ---------- ------------------------ +> T1 1048576 +> T2 1048576 +> rows: 2 + +SET TRUNCATE_LARGE_LENGTH FALSE; +> ok + +DROP TABLE T1, T2; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/varchar.sql b/h2/src/test/org/h2/test/scripts/datatypes/varchar.sql index 361e431083..d7ebecfa0b 100644 --- a/h2/src/test/org/h2/test/scripts/datatypes/varchar.sql +++ b/h2/src/test/org/h2/test/scripts/datatypes/varchar.sql @@ -1,25 +1,126 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- +SELECT 'A' 'b' + 'c'; +>> Abc + +SELECT N'A' 'b' + 'c'; +>> Abc + CREATE TABLE TEST(C1 VARCHAR, C2 CHARACTER VARYING, C3 VARCHAR2, C4 NVARCHAR, C5 NVARCHAR2, C6 VARCHAR_CASESENSITIVE, - C7 LONGVARCHAR, C8 TID); + C7 LONGVARCHAR, C8 TID, C9 CHAR VARYING, C10 NCHAR VARYING, C11 NATIONAL CHARACTER VARYING, C12 NATIONAL CHAR VARYING); > ok -SELECT COLUMN_NAME, DATA_TYPE, TYPE_NAME, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; -> COLUMN_NAME DATA_TYPE TYPE_NAME COLUMN_TYPE -> ----------- --------- --------- --------------------- -> C1 12 VARCHAR VARCHAR -> C2 12 VARCHAR CHARACTER VARYING -> C3 12 VARCHAR VARCHAR2 -> C4 12 VARCHAR NVARCHAR -> C5 12 VARCHAR NVARCHAR2 -> C6 12 VARCHAR VARCHAR_CASESENSITIVE -> C7 12 VARCHAR LONGVARCHAR -> C8 12 VARCHAR TID -> rows (ordered): 8 +> COLUMN_NAME DATA_TYPE +> ----------- ----------------- +> C1 CHARACTER VARYING +> C2 CHARACTER VARYING +> C3 CHARACTER VARYING +> C4 CHARACTER VARYING +> C5 CHARACTER VARYING +> C6 CHARACTER VARYING +> C7 CHARACTER VARYING +> C8 CHARACTER VARYING +> C9 CHARACTER VARYING +> C10 CHARACTER VARYING +> C11 CHARACTER VARYING +> C12 CHARACTER VARYING +> rows (ordered): 12 DROP TABLE TEST; > ok + +CREATE TABLE T(C VARCHAR(0)); +> exception INVALID_VALUE_2 + +CREATE TABLE T(C VARCHAR(1K)); +> exception SYNTAX_ERROR_2 + +CREATE TABLE T(C1 VARCHAR(1 CHARACTERS), C2 VARCHAR(1 OCTETS)); +> ok + +DROP TABLE T; +> ok + + +CREATE TABLE T1(A CHARACTER VARYING(1048576)); +> ok + +CREATE TABLE T2(A CHARACTER VARYING(1048577)); +> exception INVALID_VALUE_PRECISION + +SET TRUNCATE_LARGE_LENGTH TRUE; +> ok + +CREATE TABLE T2(A CHARACTER VARYING(1048577)); +> ok + +SELECT TABLE_NAME, CHARACTER_MAXIMUM_LENGTH FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_NAME CHARACTER_MAXIMUM_LENGTH +> ---------- ------------------------ +> T1 1048576 +> T2 1048576 +> rows: 2 + +SET TRUNCATE_LARGE_LENGTH FALSE; +> ok + +DROP TABLE T1, T2; +> ok + +SELECT U&'a\0030a\+000025a'; +>> a0a%a + +SELECT U&'az0030az+000025a' UESCAPE 'z'; +>> a0a%a + +EXPLAIN SELECT U&'\fffd\+100000'; +>> SELECT U&'\fffd\+100000' + +SELECT U&'\'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\0'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\00'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\003'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\0030'; +>> 0 + +SELECT U&'\zzzz'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\+0'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\+00'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\+000'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\+0000'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\+00003'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\+000030'; +>> 0 + +SELECT U&'\+zzzzzz'; +> exception STRING_FORMAT_ERROR_1 + +EXPLAIN SELECT U&'''\\', U&'''\\\fffd'; +>> SELECT '''\', U&'''\\\fffd' diff --git a/h2/src/test/org/h2/test/scripts/ddl/alterDomain.sql b/h2/src/test/org/h2/test/scripts/ddl/alterDomain.sql new file mode 100644 index 0000000000..94bc2ae007 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/alterDomain.sql @@ -0,0 +1,346 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE DOMAIN D1 INT DEFAULT 1; +> ok + +CREATE DOMAIN D2 D1 DEFAULT 2; +> ok + +CREATE DOMAIN D3 D1; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, S1 D1, S2 D2, S3 D3, C1 D1 DEFAULT 4, C2 D2 DEFAULT 5, C3 D3 DEFAULT 6); +> ok + +INSERT INTO TEST(ID) VALUES 1; +> update count: 1 + +TABLE TEST; +> ID S1 S2 S3 C1 C2 C3 +> -- -- -- -- -- -- -- +> 1 1 2 1 4 5 6 +> rows: 1 + +ALTER DOMAIN D1 SET DEFAULT 3; +> ok + +INSERT INTO TEST(ID) VALUES 2; +> update count: 1 + +SELECT * FROM TEST WHERE ID = 2; +> ID S1 S2 S3 C1 C2 C3 +> -- -- -- -- -- -- -- +> 2 3 2 3 4 5 6 +> rows: 1 + +ALTER DOMAIN D1 DROP DEFAULT; +> ok + +SELECT DOMAIN_NAME, DOMAIN_DEFAULT FROM INFORMATION_SCHEMA.DOMAINS WHERE DOMAIN_SCHEMA = 'PUBLIC'; +> DOMAIN_NAME DOMAIN_DEFAULT +> ----------- -------------- +> D1 null +> D2 2 +> D3 3 +> rows: 3 + +SELECT COLUMN_NAME, COLUMN_DEFAULT FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> COLUMN_NAME COLUMN_DEFAULT +> ----------- -------------- +> C1 4 +> C2 5 +> C3 6 +> ID null +> S1 3 +> S2 null +> S3 null +> rows: 7 + +ALTER DOMAIN D1 SET DEFAULT 3; +> ok + +ALTER DOMAIN D3 DROP DEFAULT; +> ok + +ALTER TABLE TEST ALTER COLUMN S1 DROP DEFAULT; +> ok + +SELECT DOMAIN_NAME, DOMAIN_DEFAULT FROM INFORMATION_SCHEMA.DOMAINS WHERE DOMAIN_SCHEMA = 'PUBLIC'; +> DOMAIN_NAME DOMAIN_DEFAULT +> ----------- -------------- +> D1 3 +> D2 2 +> D3 null +> rows: 3 + +SELECT COLUMN_NAME, COLUMN_DEFAULT FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> COLUMN_NAME COLUMN_DEFAULT +> ----------- -------------- +> C1 4 +> C2 5 +> C3 6 +> ID null +> S1 null +> S2 null +> S3 null +> rows: 7 + +DROP DOMAIN D1 CASCADE; +> ok + +SELECT DOMAIN_NAME, DOMAIN_DEFAULT FROM INFORMATION_SCHEMA.DOMAINS WHERE DOMAIN_SCHEMA = 'PUBLIC'; +> DOMAIN_NAME DOMAIN_DEFAULT +> ----------- -------------- +> D2 2 +> D3 3 +> rows: 2 + +SELECT COLUMN_NAME, COLUMN_DEFAULT FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> COLUMN_NAME COLUMN_DEFAULT +> ----------- -------------- +> C1 4 +> C2 5 +> C3 6 +> ID null +> S1 3 +> S2 null +> S3 null +> rows: 7 + +DROP TABLE TEST; +> ok + +DROP DOMAIN D2; +> ok + +DROP DOMAIN D3; +> ok + +CREATE DOMAIN D1 INT ON UPDATE 1; +> ok + +CREATE DOMAIN D2 D1 ON UPDATE 2; +> ok + +CREATE DOMAIN D3 D1; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, S1 D1, S2 D2, S3 D3, C1 D1 ON UPDATE 4, C2 D2 ON UPDATE 5, C3 D3 ON UPDATE 6); +> ok + +ALTER DOMAIN D1 SET ON UPDATE 3; +> ok + +ALTER DOMAIN D1 DROP ON UPDATE; +> ok + +SELECT DOMAIN_NAME, DOMAIN_ON_UPDATE FROM INFORMATION_SCHEMA.DOMAINS WHERE DOMAIN_SCHEMA = 'PUBLIC'; +> DOMAIN_NAME DOMAIN_ON_UPDATE +> ----------- ---------------- +> D1 null +> D2 2 +> D3 3 +> rows: 3 + +SELECT COLUMN_NAME, COLUMN_ON_UPDATE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> COLUMN_NAME COLUMN_ON_UPDATE +> ----------- ---------------- +> C1 4 +> C2 5 +> C3 6 +> ID null +> S1 3 +> S2 null +> S3 null +> rows: 7 + +ALTER DOMAIN D1 SET ON UPDATE 3; +> ok + +ALTER DOMAIN D3 DROP ON UPDATE; +> ok + +ALTER TABLE TEST ALTER COLUMN S1 DROP ON UPDATE; +> ok + +SELECT DOMAIN_NAME, DOMAIN_ON_UPDATE FROM INFORMATION_SCHEMA.DOMAINS WHERE DOMAIN_SCHEMA = 'PUBLIC'; +> DOMAIN_NAME DOMAIN_ON_UPDATE +> ----------- ---------------- +> D1 3 +> D2 2 +> D3 null +> rows: 3 + +SELECT COLUMN_NAME, COLUMN_ON_UPDATE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> COLUMN_NAME COLUMN_ON_UPDATE +> ----------- ---------------- +> C1 4 +> C2 5 +> C3 6 +> ID null +> S1 null +> S2 null +> S3 null +> rows: 7 + +DROP DOMAIN D1 CASCADE; +> ok + +SELECT DOMAIN_NAME, DOMAIN_ON_UPDATE FROM INFORMATION_SCHEMA.DOMAINS WHERE DOMAIN_SCHEMA = 'PUBLIC'; +> DOMAIN_NAME DOMAIN_ON_UPDATE +> ----------- ---------------- +> D2 2 +> D3 3 +> rows: 2 + +SELECT COLUMN_NAME, COLUMN_ON_UPDATE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> COLUMN_NAME COLUMN_ON_UPDATE +> ----------- ---------------- +> C1 4 +> C2 5 +> C3 6 +> ID null +> S1 3 +> S2 null +> S3 null +> rows: 7 + +DROP TABLE TEST; +> ok + +DROP DOMAIN D2; +> ok + +DROP DOMAIN D3; +> ok + +CREATE DOMAIN D1 AS INT; +> ok + +CREATE DOMAIN D2 AS D1; +> ok + +CREATE TABLE T(C1 D1, C2 D2, L BIGINT); +> ok + +ALTER DOMAIN D1 RENAME TO D3; +> ok + +SELECT DOMAIN_NAME, DATA_TYPE, PARENT_DOMAIN_NAME FROM INFORMATION_SCHEMA.DOMAINS; +> DOMAIN_NAME DATA_TYPE PARENT_DOMAIN_NAME +> ----------- --------- ------------------ +> D2 INTEGER D3 +> D3 INTEGER null +> rows: 2 + +SELECT COLUMN_NAME, DOMAIN_NAME FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'T' AND COLUMN_NAME LIKE 'C_'; +> COLUMN_NAME DOMAIN_NAME +> ----------- ----------- +> C1 D3 +> C2 D2 +> rows: 2 + +@reconnect + +SELECT DOMAIN_NAME, DATA_TYPE, PARENT_DOMAIN_NAME FROM INFORMATION_SCHEMA.DOMAINS; +> DOMAIN_NAME DATA_TYPE PARENT_DOMAIN_NAME +> ----------- --------- ------------------ +> D2 INTEGER D3 +> D3 INTEGER null +> rows: 2 + +SELECT COLUMN_NAME, DOMAIN_NAME FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'T' AND COLUMN_NAME LIKE 'C_'; +> COLUMN_NAME DOMAIN_NAME +> ----------- ----------- +> C1 D3 +> C2 D2 +> rows: 2 + +DROP TABLE T; +> ok + +DROP DOMAIN D2; +> ok + +DROP DOMAIN D3; +> ok + +CREATE DOMAIN D1 AS INT; +> ok + +CREATE DOMAIN D2 AS D1; +> ok + +CREATE TABLE TEST(A INT, C D2) AS VALUES (1, 1); +> ok + +ALTER TABLE TEST ADD CHECK (C > 1); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +ALTER DOMAIN D2 ADD CHECK (VALUE > 1); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +ALTER DOMAIN D1 ADD CHECK (VALUE > 1); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +CREATE UNIQUE INDEX TEST_A_IDX ON TEST(A); +> ok + +ALTER TABLE TEST ADD CHECK (C > 1); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +ALTER DOMAIN D2 ADD CHECK (VALUE > 1); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +ALTER DOMAIN D1 ADD CHECK (VALUE > 1); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +CREATE INDEX TEST_C_IDX ON TEST(C); +> ok + +ALTER TABLE TEST ADD CHECK (C > 1); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +ALTER DOMAIN D2 ADD CHECK (VALUE > 1); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +ALTER DOMAIN D1 ADD CHECK (VALUE > 1); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +ALTER DOMAIN D1 ADD CHECK (VALUE > 1) NOCHECK; +> ok + +DROP TABLE TEST; +> ok + +ALTER DOMAIN D1 ADD CONSTRAINT T CHECK (VALUE < 100); +> ok + +ALTER DOMAIN D3 RENAME CONSTRAINT T TO T1; +> exception DOMAIN_NOT_FOUND_1 + +ALTER DOMAIN IF EXISTS D3 RENAME CONSTRAINT T TO T1; +> ok + +ALTER DOMAIN D2 RENAME CONSTRAINT T TO T2; +> exception CONSTRAINT_NOT_FOUND_1 + +ALTER DOMAIN D1 RENAME CONSTRAINT T TO T3; +> ok + +SELECT CONSTRAINT_NAME, DOMAIN_NAME FROM INFORMATION_SCHEMA.DOMAIN_CONSTRAINTS WHERE CONSTRAINT_NAME LIKE 'T%'; +> CONSTRAINT_NAME DOMAIN_NAME +> --------------- ----------- +> T3 D1 +> rows: 1 + +DROP DOMAIN D2; +> ok + +DROP DOMAIN D1; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/alterTableAdd.sql b/h2/src/test/org/h2/test/scripts/ddl/alterTableAdd.sql index 22ca5afb51..9f00abb42f 100644 --- a/h2/src/test/org/h2/test/scripts/ddl/alterTableAdd.sql +++ b/h2/src/test/org/h2/test/scripts/ddl/alterTableAdd.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -101,3 +101,295 @@ SELECT CONSTRAINT_NAME, CONSTRAINT_TYPE FROM INFORMATION_SCHEMA.TABLE_CONSTRAINT DROP TABLE TEST; > ok + +CREATE TABLE PARENT(ID INT); +> ok + +CREATE INDEX PARENT_ID_IDX ON PARENT(ID); +> ok + +CREATE TABLE CHILD(ID INT PRIMARY KEY, P INT); +> ok + +ALTER TABLE CHILD ADD CONSTRAINT CHILD_P_FK FOREIGN KEY (P) REFERENCES PARENT(ID); +> exception CONSTRAINT_NOT_FOUND_1 + +SET MODE MySQL; +> ok + +ALTER TABLE CHILD ADD CONSTRAINT CHILD_P_FK FOREIGN KEY (P) REFERENCES PARENT(ID); +> ok + +SET MODE Regular; +> ok + +INSERT INTO PARENT VALUES 1, 1; +> exception DUPLICATE_KEY_1 + +DROP TABLE CHILD, PARENT; +> ok + +CREATE TABLE PARENT(ID INT CONSTRAINT P1 PRIMARY KEY); +> ok + +CREATE TABLE CHILD(ID INT CONSTRAINT P2 PRIMARY KEY, CHILD INT CONSTRAINT C REFERENCES PARENT); +> ok + +ALTER TABLE PARENT DROP CONSTRAINT P1 RESTRICT; +> exception CONSTRAINT_IS_USED_BY_CONSTRAINT_2 + +ALTER TABLE PARENT DROP CONSTRAINT P1 RESTRICT; +> exception CONSTRAINT_IS_USED_BY_CONSTRAINT_2 + +ALTER TABLE PARENT DROP CONSTRAINT P1 CASCADE; +> ok + +DROP TABLE PARENT, CHILD; +> ok + +CREATE TABLE A(A TIMESTAMP PRIMARY KEY, B INT ARRAY UNIQUE, C TIME ARRAY UNIQUE); +> ok + +CREATE TABLE B(A TIMESTAMP WITH TIME ZONE, B DATE, C INT ARRAY, D TIME ARRAY, E TIME WITH TIME ZONE ARRAY); +> ok + +ALTER TABLE B ADD FOREIGN KEY(A) REFERENCES A(A); +> exception UNCOMPARABLE_REFERENCED_COLUMN_2 + +ALTER TABLE B ADD FOREIGN KEY(B) REFERENCES A(A); +> ok + +ALTER TABLE B ADD FOREIGN KEY(C) REFERENCES A(B); +> ok + +ALTER TABLE B ADD FOREIGN KEY(C) REFERENCES A(C); +> exception TYPES_ARE_NOT_COMPARABLE_2 + +ALTER TABLE B ADD FOREIGN KEY(D) REFERENCES A(B); +> exception UNCOMPARABLE_REFERENCED_COLUMN_2 + +ALTER TABLE B ADD FOREIGN KEY(D) REFERENCES A(C); +> ok + +ALTER TABLE B ADD FOREIGN KEY(E) REFERENCES A(B); +> exception UNCOMPARABLE_REFERENCED_COLUMN_2 + +ALTER TABLE B ADD FOREIGN KEY(E) REFERENCES A(C); +> exception UNCOMPARABLE_REFERENCED_COLUMN_2 + +DROP TABLE B, A; +> ok + +CREATE TABLE PARENT(ID INT PRIMARY KEY, K INT UNIQUE); +> ok + +CREATE TABLE CHILD(ID INT PRIMARY KEY, P INT GENERATED ALWAYS AS (ID)); +> ok + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON DELETE CASCADE; +> ok + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON DELETE RESTRICT; +> ok + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON DELETE NO ACTION; +> ok + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON DELETE SET DEFAULT; +> exception GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2 + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON DELETE SET NULL; +> exception GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2 + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON UPDATE CASCADE; +> exception GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2 + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON UPDATE RESTRICT; +> ok + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON UPDATE NO ACTION; +> ok + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON UPDATE SET DEFAULT; +> exception GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2 + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON UPDATE SET NULL; +> exception GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2 + +DROP TABLE CHILD, PARENT; +> ok + +CREATE TABLE T1(B INT, G INT GENERATED ALWAYS AS (B + 1) UNIQUE); +> ok + +CREATE TABLE T2(A INT, G INT REFERENCES T1(G) ON UPDATE CASCADE); +> ok + +INSERT INTO T1(B) VALUES 1; +> update count: 1 + +INSERT INTO T2 VALUES (1, 2); +> update count: 1 + +TABLE T2; +> A G +> - - +> 1 2 +> rows: 1 + +UPDATE T1 SET B = 2; +> update count: 1 + +TABLE T2; +> A G +> - - +> 1 3 +> rows: 1 + +DROP TABLE T2, T1; +> ok + +CREATE SCHEMA S1; +> ok + +CREATE TABLE S1.T1(ID INT PRIMARY KEY); +> ok + +CREATE SCHEMA S2; +> ok + +CREATE TABLE S2.T2(ID INT, FK INT REFERENCES S1.T1(ID)); +> ok + +SELECT CONSTRAINT_SCHEMA, CONSTRAINT_TYPE, TABLE_SCHEMA, TABLE_NAME, INDEX_SCHEMA + FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE TABLE_SCHEMA LIKE 'S%'; +> CONSTRAINT_SCHEMA CONSTRAINT_TYPE TABLE_SCHEMA TABLE_NAME INDEX_SCHEMA +> ----------------- --------------- ------------ ---------- ------------ +> S1 PRIMARY KEY S1 T1 S1 +> S2 FOREIGN KEY S2 T2 S2 +> rows: 2 + +SELECT INDEX_SCHEMA, TABLE_SCHEMA, TABLE_NAME, INDEX_TYPE_NAME, IS_GENERATED FROM INFORMATION_SCHEMA.INDEXES + WHERE TABLE_SCHEMA LIKE 'S%'; +> INDEX_SCHEMA TABLE_SCHEMA TABLE_NAME INDEX_TYPE_NAME IS_GENERATED +> ------------ ------------ ---------- --------------- ------------ +> S1 S1 T1 PRIMARY KEY TRUE +> S2 S2 T2 INDEX TRUE +> rows: 2 + +SELECT INDEX_SCHEMA, TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME FROM INFORMATION_SCHEMA.INDEX_COLUMNS + WHERE TABLE_SCHEMA LIKE 'S%'; +> INDEX_SCHEMA TABLE_SCHEMA TABLE_NAME COLUMN_NAME +> ------------ ------------ ---------- ----------- +> S1 S1 T1 ID +> S2 S2 T2 FK +> rows: 2 + +@reconnect + +DROP SCHEMA S2 CASCADE; +> ok + +DROP SCHEMA S1 CASCADE; +> ok + +EXECUTE IMMEDIATE 'CREATE TABLE TEST(' || (SELECT LISTAGG('C' || X || ' INT') FROM SYSTEM_RANGE(1, 16384)) || ')'; +> ok + +ALTER TABLE TEST ADD COLUMN(X INTEGER); +> exception TOO_MANY_COLUMNS_1 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE TEST(ID BIGINT NOT NULL); +> ok + +ALTER TABLE TEST ADD PRIMARY KEY(ID); +> ok + +SELECT INDEX_TYPE_NAME, IS_GENERATED FROM INFORMATION_SCHEMA.INDEXES WHERE TABLE_NAME = 'TEST'; +> INDEX_TYPE_NAME IS_GENERATED +> --------------- ------------ +> PRIMARY KEY TRUE +> rows: 1 + +CALL DB_OBJECT_SQL('INDEX', 'PUBLIC', 'PRIMARY_KEY_2'); +>> CREATE PRIMARY KEY "PUBLIC"."PRIMARY_KEY_2" ON "PUBLIC"."TEST"("ID") + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" BIGINT NOT NULL ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 4 + +@reconnect + +SELECT INDEX_TYPE_NAME, IS_GENERATED FROM INFORMATION_SCHEMA.INDEXES WHERE TABLE_NAME = 'TEST'; +> INDEX_TYPE_NAME IS_GENERATED +> --------------- ------------ +> PRIMARY KEY TRUE +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT, C INT INVISIBLE, CONSTRAINT TEST_UNIQUE_2 UNIQUE(VALUE)); +> ok + +ALTER TABLE TEST ADD COLUMN D INT; +> ok + +ALTER TABLE TEST ADD CONSTRAINT TEST_UNIQUE_3 UNIQUE(VALUE); +> ok + +SELECT CONSTRAINT_NAME, COLUMN_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE + WHERE TABLE_NAME = 'TEST'; +> CONSTRAINT_NAME COLUMN_NAME ORDINAL_POSITION +> --------------- ----------- ---------------- +> TEST_UNIQUE_2 A 1 +> TEST_UNIQUE_2 B 2 +> TEST_UNIQUE_3 A 1 +> TEST_UNIQUE_3 B 2 +> TEST_UNIQUE_3 D 3 +> rows: 5 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(); +> ok + +ALTER TABLE TEST ADD UNIQUE (VALUE); +> exception SYNTAX_ERROR_1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT) AS VALUES (3, 4); +> ok + +ALTER TABLE TEST ADD G INT GENERATED ALWAYS AS (A + B); +> ok + +ALTER TABLE TEST ADD ID BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY FIRST; +> ok + +ALTER TABLE TEST ADD C INT AFTER B; +> ok + +INSERT INTO TEST(A, B) VALUES (5, 6); +> update count: 1 + +TABLE TEST; +> ID A B C G +> -- - - ---- -- +> 1 3 4 null 7 +> 2 5 6 null 11 +> rows: 2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/alterTableAlterColumn.sql b/h2/src/test/org/h2/test/scripts/ddl/alterTableAlterColumn.sql index 04cc5785dc..cda63ed105 100644 --- a/h2/src/test/org/h2/test/scripts/ddl/alterTableAlterColumn.sql +++ b/h2/src/test/org/h2/test/scripts/ddl/alterTableAlterColumn.sql @@ -1,41 +1,41 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- CREATE TABLE TEST(T INT); > ok -SELECT COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; ->> INT +SELECT DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; +>> INTEGER -- SET DEFAULT ALTER TABLE TEST ALTER COLUMN T SET DEFAULT 1; > ok -SELECT COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; ->> INT DEFAULT 1 +SELECT COLUMN_DEFAULT FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; +>> 1 -- DROP DEFAULT ALTER TABLE TEST ALTER COLUMN T DROP DEFAULT; > ok -SELECT COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; ->> INT +SELECT COLUMN_DEFAULT FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; +>> null -- SET NOT NULL ALTER TABLE TEST ALTER COLUMN T SET NOT NULL; > ok -SELECT COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; ->> INT NOT NULL +SELECT IS_NULLABLE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; +>> NO -- DROP NOT NULL ALTER TABLE TEST ALTER COLUMN T DROP NOT NULL; > ok -SELECT COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; ->> INT +SELECT IS_NULLABLE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; +>> YES ALTER TABLE TEST ALTER COLUMN T SET NOT NULL; > ok @@ -44,27 +44,773 @@ ALTER TABLE TEST ALTER COLUMN T SET NOT NULL; ALTER TABLE TEST ALTER COLUMN T SET NULL; > ok -SELECT COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; ->> INT +SELECT IS_NULLABLE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; +>> YES -- SET DATA TYPE ALTER TABLE TEST ALTER COLUMN T SET DATA TYPE BIGINT; > ok -SELECT COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; +SELECT DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; >> BIGINT -ALTER TABLE TEST ALTER COLUMN T INT INVISIBLE DEFAULT 1 ON UPDATE 2 NOT NULL COMMENT 'C' CHECK T < 100; +ALTER TABLE TEST ALTER COLUMN T INT INVISIBLE DEFAULT 1 ON UPDATE 2 NOT NULL COMMENT 'C'; > ok -SELECT COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; ->> INT INVISIBLE DEFAULT 1 ON UPDATE 2 NOT NULL COMMENT 'C' CHECK ("T" < 100) +SELECT DATA_TYPE, IS_VISIBLE, COLUMN_DEFAULT, COLUMN_ON_UPDATE, REMARKS, IS_NULLABLE + FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; +> DATA_TYPE IS_VISIBLE COLUMN_DEFAULT COLUMN_ON_UPDATE REMARKS IS_NULLABLE +> --------- ---------- -------------- ---------------- ------- ----------- +> INTEGER FALSE 1 2 C NO +> rows: 1 ALTER TABLE TEST ALTER COLUMN T SET DATA TYPE BIGINT; > ok -SELECT COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; ->> BIGINT INVISIBLE DEFAULT 1 ON UPDATE 2 NOT NULL COMMENT 'C' CHECK ("T" < 100) +SELECT DATA_TYPE, IS_VISIBLE, COLUMN_DEFAULT, COLUMN_ON_UPDATE, REMARKS, IS_NULLABLE + FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; +> DATA_TYPE IS_VISIBLE COLUMN_DEFAULT COLUMN_ON_UPDATE REMARKS IS_NULLABLE +> --------- ---------- -------------- ---------------- ------- ----------- +> BIGINT FALSE 1 2 C NO +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT AUTO_INCREMENT PRIMARY KEY, V INT NOT NULL); +> ok + +ALTER TABLE TEST ALTER COLUMN ID RESTART WITH 100; +> ok + +INSERT INTO TEST(V) VALUES (1); +> update count: 1 + +ALTER TABLE TEST AUTO_INCREMENT = 200; +> exception SYNTAX_ERROR_2 + +SET MODE MySQL; +> ok + +ALTER TABLE TEST AUTO_INCREMENT = 200; +> ok + +INSERT INTO TEST(V) VALUES (2); +> update count: 1 + +ALTER TABLE TEST AUTO_INCREMENT 300; +> ok + +INSERT INTO TEST(V) VALUES (3); +> update count: 1 + +SELECT * FROM TEST ORDER BY ID; +> ID V +> --- - +> 100 1 +> 200 2 +> 300 3 +> rows (ordered): 3 + +ALTER TABLE TEST DROP PRIMARY KEY; +> ok + +ALTER TABLE TEST AUTO_INCREMENT = 400; +> exception COLUMN_NOT_FOUND_1 + +ALTER TABLE TEST ADD PRIMARY KEY(V); +> ok + +ALTER TABLE TEST AUTO_INCREMENT = 400; +> exception COLUMN_NOT_FOUND_1 + +SET MODE Regular; +> ok + +DROP TABLE TEST; +> ok + +-- Compatibility syntax + +SET MODE MySQL; +> ok + +create table test(id int primary key, name varchar); +> ok + +insert into test(id) values(1); +> update count: 1 + +alter table test change column id id2 int; +> ok + +select id2 from test; +> ID2 +> --- +> 1 +> rows: 1 + +drop table test; +> ok + +SET MODE Oracle; +> ok + +CREATE MEMORY TABLE TEST(V INT NOT NULL); +> ok + +ALTER TABLE TEST MODIFY COLUMN V BIGINT; +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ----------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "V" BIGINT NOT NULL ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +SET MODE MySQL; +> ok + +ALTER TABLE TEST MODIFY COLUMN V INT; +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> --------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "V" INTEGER ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +ALTER TABLE TEST MODIFY COLUMN V BIGINT NOT NULL; +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ----------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "V" BIGINT NOT NULL ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +SET MODE Regular; +> ok + +DROP TABLE TEST; +> ok + +create table test(id int, name varchar); +> ok + +alter table test alter column id int as id+1; +> exception COLUMN_NOT_FOUND_1 + +drop table test; +> ok + +create table t(x varchar) as select 'x'; +> ok + +alter table t alter column x int; +> exception DATA_CONVERSION_ERROR_1 + +drop table t; +> ok + +create table t(id identity default on null, x varchar) as select null, 'x'; +> ok + +alter table t alter column x int; +> exception DATA_CONVERSION_ERROR_1 + +drop table t; +> ok + +-- ensure that increasing a VARCHAR columns length takes effect because we optimize this case +create table t(x varchar(2)) as select 'x'; +> ok + +alter table t alter column x varchar(20); +> ok + +insert into t values 'Hello'; +> update count: 1 + +drop table t; +> ok + +SET MODE MySQL; +> ok + +create table t(x int); +> ok + +alter table t modify column x varchar(20); +> ok + +insert into t values('Hello'); +> update count: 1 + +drop table t; +> ok + +-- This worked in v1.4.196 +create table T (C varchar not null); +> ok + +alter table T modify C int null; +> ok + +insert into T values(null); +> update count: 1 + +drop table T; +> ok + +-- This failed in v1.4.196 +create table T (C int not null); +> ok + +-- Silently corrupted column C +alter table T modify C null; +> ok + +insert into T values(null); +> update count: 1 + +drop table T; +> ok + +SET MODE Oracle; +> ok + +create table foo (bar varchar(255)); +> ok + +alter table foo modify (bar varchar(255) not null); +> ok + +insert into foo values(null); +> exception NULL_NOT_ALLOWED + +DROP TABLE FOO; +> ok + +SET MODE Regular; +> ok + +-- Tests a bug we used to have where altering the name of a column that had +-- a check constraint that referenced itself would result in not being able +-- to re-open the DB. +create table test(id int check(id in (1,2)) ); +> ok + +alter table test alter id rename to id2; +> ok + +@reconnect + +insert into test values 1; +> update count: 1 + +insert into test values 3; +> exception CHECK_CONSTRAINT_VIOLATED_1 + +drop table test; +> ok + +CREATE MEMORY TABLE TEST(C INT); +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D RENAME TO E; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS C RENAME TO D; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E SET NOT NULL; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D SET NOT NULL; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E SET DEFAULT 1; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D SET DEFAULT 1; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E SET ON UPDATE 2; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D SET ON UPDATE 2; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E SET DATA TYPE BIGINT; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D SET DATA TYPE BIGINT; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E SET INVISIBLE; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D SET INVISIBLE; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E SELECTIVITY 3; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D SELECTIVITY 3; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E RESTART WITH 4; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D RESTART WITH 4 SET MAXVALUE 1000; +> ok + +SELECT COLUMN_NAME, IS_IDENTITY, IDENTITY_GENERATION, IDENTITY_START, IDENTITY_INCREMENT, IDENTITY_MAXIMUM, + IDENTITY_MINIMUM, IDENTITY_CYCLE, IDENTITY_BASE, IDENTITY_CACHE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME IS_IDENTITY IDENTITY_GENERATION IDENTITY_START IDENTITY_INCREMENT IDENTITY_MAXIMUM IDENTITY_MINIMUM IDENTITY_CYCLE IDENTITY_BASE IDENTITY_CACHE +> ----------- ----------- ------------------- -------------- ------------------ ---------------- ---------------- -------------- ------------- -------------- +> D YES BY DEFAULT 1 1 1000 1 NO 4 32 +> rows: 1 + +ALTER TABLE TEST ALTER COLUMN D SET CYCLE; +> ok + +SELECT IDENTITY_CYCLE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +>> YES + +ALTER TABLE TEST ALTER COLUMN D DROP IDENTITY; +> ok + +SELECT IS_IDENTITY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +>> NO + +ALTER TABLE TEST ALTER COLUMN D DROP IDENTITY; +> ok + +ALTER TABLE TEST ALTER COLUMN E DROP IDENTITY; +> exception COLUMN_NOT_FOUND_1 + +ALTER TABLE TEST ALTER COLUMN D SET GENERATED BY DEFAULT; +> ok + +ALTER TABLE TEST ALTER COLUMN D SET DEFAULT (1); +> ok + +SELECT COLUMN_DEFAULT, IS_IDENTITY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_DEFAULT IS_IDENTITY +> -------------- ----------- +> null YES +> rows: 1 + +ALTER TABLE TEST ALTER COLUMN D DROP IDENTITY; +> ok + +ALTER TABLE TEST ALTER COLUMN D SET GENERATED ALWAYS; +> ok + +SELECT IS_IDENTITY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +>> YES + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E DROP IDENTITY; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E DROP NOT NULL; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D DROP NOT NULL; +> exception COLUMN_MUST_NOT_BE_NULLABLE_1 + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E DROP DEFAULT; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D DROP DEFAULT; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E DROP ON UPDATE; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D DROP ON UPDATE; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E INT; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D INT; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E SET VISIBLE; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D SET VISIBLE; +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ------------------------------------------------------------ +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "D" INTEGER NOT NULL ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT GENERATED ALWAYS AS IDENTITY (MINVALUE 1 MAXVALUE 10 INCREMENT BY -1), V INT); +> ok + +INSERT INTO TEST(V) VALUES 1; +> update count: 1 + +TABLE TEST; +> ID V +> -- - +> 10 1 +> rows: 1 + +DELETE FROM TEST; +> update count: 1 + +ALTER TABLE TEST ALTER COLUMN ID RESTART; +> ok + +INSERT INTO TEST(V) VALUES 1; +> update count: 1 + +TABLE TEST; +> ID V +> -- - +> 10 1 +> rows: 1 + +ALTER TABLE TEST ALTER COLUMN ID RESTART WITH 5; +> ok + +INSERT INTO TEST(V) VALUES 2; +> update count: 1 + +TABLE TEST; +> ID V +> -- - +> 10 1 +> 5 2 +> rows: 2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT) AS VALUES 1, 2, 3; +> ok + +ALTER TABLE TEST ALTER COLUMN A SET DATA TYPE BIGINT USING A * 10; +> ok + +TABLE TEST; +> A +> -- +> 10 +> 20 +> 30 +> rows: 3 + +ALTER TABLE TEST ADD COLUMN B INT NOT NULL USING A + 1; +> ok + +TABLE TEST; +> A B +> -- -- +> 10 11 +> 20 21 +> 30 31 +> rows: 3 + +ALTER TABLE TEST ADD COLUMN C VARCHAR(2) USING A; +> ok + +TABLE TEST; +> A B C +> -- -- -- +> 10 11 10 +> 20 21 20 +> 30 31 30 +> rows: 3 + +ALTER TABLE TEST ALTER COLUMN C SET DATA TYPE VARCHAR(3) USING C || '*'; +> ok + +TABLE TEST; +> A B C +> -- -- --- +> 10 11 10* +> 20 21 20* +> 30 31 30* +> rows: 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(B BINARY) AS VALUES X'00'; +> ok + +ALTER TABLE TEST ALTER COLUMN B SET DATA TYPE BINARY(2); +> ok + +TABLE TEST; +>> X'0000' + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(D INT DEFAULT 8, G INT GENERATED ALWAYS AS (D + 1), S INT GENERATED ALWAYS AS IDENTITY); +> ok + +SELECT COLUMN_NAME, COLUMN_DEFAULT, IS_IDENTITY, IS_GENERATED, GENERATION_EXPRESSION + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME COLUMN_DEFAULT IS_IDENTITY IS_GENERATED GENERATION_EXPRESSION +> ----------- -------------- ----------- ------------ --------------------- +> D 8 NO NEVER null +> G null NO ALWAYS "D" + 1 +> S null YES NEVER null +> rows: 3 + +ALTER TABLE TEST ALTER COLUMN D SET ON UPDATE 1; +> ok + +ALTER TABLE TEST ALTER COLUMN G SET ON UPDATE 1; +> ok + +ALTER TABLE TEST ALTER COLUMN S SET ON UPDATE 1; +> ok + +SELECT COLUMN_NAME, COLUMN_DEFAULT, IS_IDENTITY, IS_GENERATED, GENERATION_EXPRESSION, COLUMN_ON_UPDATE + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME COLUMN_DEFAULT IS_IDENTITY IS_GENERATED GENERATION_EXPRESSION COLUMN_ON_UPDATE +> ----------- -------------- ----------- ------------ --------------------- ---------------- +> D 8 NO NEVER null 1 +> G null NO ALWAYS "D" + 1 null +> S null YES NEVER null null +> rows: 3 + +ALTER TABLE TEST ALTER COLUMN D DROP ON UPDATE; +> ok + +ALTER TABLE TEST ALTER COLUMN G DROP ON UPDATE; +> ok + +ALTER TABLE TEST ALTER COLUMN S DROP ON UPDATE; +> ok + +SELECT COLUMN_NAME, COLUMN_DEFAULT, IS_IDENTITY, IS_GENERATED, GENERATION_EXPRESSION, COLUMN_ON_UPDATE + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME COLUMN_DEFAULT IS_IDENTITY IS_GENERATED GENERATION_EXPRESSION COLUMN_ON_UPDATE +> ----------- -------------- ----------- ------------ --------------------- ---------------- +> D 8 NO NEVER null null +> G null NO ALWAYS "D" + 1 null +> S null YES NEVER null null +> rows: 3 + +ALTER TABLE TEST ALTER COLUMN G DROP DEFAULT; +> ok + +ALTER TABLE TEST ALTER COLUMN S DROP DEFAULT; +> ok + +ALTER TABLE TEST ALTER COLUMN D DROP EXPRESSION; +> ok + +ALTER TABLE TEST ALTER COLUMN S DROP EXPRESSION; +> ok + +ALTER TABLE TEST ALTER COLUMN D DROP IDENTITY; +> ok + +ALTER TABLE TEST ALTER COLUMN G DROP IDENTITY; +> ok + +ALTER TABLE TEST ALTER COLUMN G SET DEFAULT ("D" + 2); +> ok + +SELECT COLUMN_NAME, COLUMN_DEFAULT, IS_IDENTITY, IS_GENERATED, GENERATION_EXPRESSION + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME COLUMN_DEFAULT IS_IDENTITY IS_GENERATED GENERATION_EXPRESSION +> ----------- -------------- ----------- ------------ --------------------- +> D 8 NO NEVER null +> G null NO ALWAYS "D" + 1 +> S null YES NEVER null +> rows: 3 + +ALTER TABLE TEST ALTER COLUMN D DROP DEFAULT; +> ok + +ALTER TABLE TEST ALTER COLUMN G DROP EXPRESSION; +> ok + +ALTER TABLE TEST ALTER COLUMN S DROP IDENTITY; +> ok + +SELECT COLUMN_NAME, COLUMN_DEFAULT, IS_IDENTITY, IS_GENERATED, GENERATION_EXPRESSION + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME COLUMN_DEFAULT IS_IDENTITY IS_GENERATED GENERATION_EXPRESSION +> ----------- -------------- ----------- ------------ --------------------- +> D null NO NEVER null +> G null NO NEVER null +> S null NO NEVER null +> rows: 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY(START WITH 10 MINVALUE 3 INCREMENT BY 2 CYCLE CACHE 16), V INT); +> ok + +INSERT INTO TEST(V) VALUES 1, 2; +> update count: 2 + +DELETE FROM TEST WHERE V = 2; +> update count: 1 + +SELECT COLUMN_NAME, DATA_TYPE, IS_IDENTITY, IDENTITY_START, IDENTITY_INCREMENT, IDENTITY_MAXIMUM, IDENTITY_MINIMUM, + IDENTITY_CYCLE, IDENTITY_BASE, IDENTITY_CACHE + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'ID'; +> COLUMN_NAME DATA_TYPE IS_IDENTITY IDENTITY_START IDENTITY_INCREMENT IDENTITY_MAXIMUM IDENTITY_MINIMUM IDENTITY_CYCLE IDENTITY_BASE IDENTITY_CACHE +> ----------- --------- ----------- -------------- ------------------ ------------------- ---------------- -------------- ------------- -------------- +> ID BIGINT YES 10 2 9223372036854775807 3 YES 14 16 +> rows: 1 + +ALTER TABLE TEST ALTER COLUMN ID SET DATA TYPE INTEGER; +> ok + +SELECT COLUMN_NAME, DATA_TYPE, IS_IDENTITY, IDENTITY_START, IDENTITY_INCREMENT, IDENTITY_MAXIMUM, IDENTITY_MINIMUM, + IDENTITY_CYCLE, IDENTITY_BASE, IDENTITY_CACHE + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'ID'; +> COLUMN_NAME DATA_TYPE IS_IDENTITY IDENTITY_START IDENTITY_INCREMENT IDENTITY_MAXIMUM IDENTITY_MINIMUM IDENTITY_CYCLE IDENTITY_BASE IDENTITY_CACHE +> ----------- --------- ----------- -------------- ------------------ ---------------- ---------------- -------------- ------------- -------------- +> ID INTEGER YES 10 2 2147483647 3 YES 14 16 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE TEST(ID BIGINT GENERATED ALWAYS AS IDENTITY, V INT); +> ok + +SELECT COLUMN_NAME, IS_IDENTITY, IDENTITY_GENERATION + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'ID'; +> COLUMN_NAME IS_IDENTITY IDENTITY_GENERATION +> ----------- ----------- ------------------- +> ID YES ALWAYS +> rows: 1 + +INSERT INTO TEST(V) VALUES 10; +> update count: 1 + +INSERT INTO TEST(ID, V) VALUES (2, 20); +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +UPDATE TEST SET ID = ID + 1; +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +MERGE INTO TEST(ID, V) KEY(V) VALUES (2, 10); +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +MERGE INTO TEST USING (VALUES (2, 20)) S(ID, V) ON TEST.ID = S.ID + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.V); +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +@reconnect + +SELECT COLUMN_NAME, IS_IDENTITY, IDENTITY_GENERATION + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'ID'; +> COLUMN_NAME IS_IDENTITY IDENTITY_GENERATION +> ----------- ----------- ------------------- +> ID YES ALWAYS +> rows: 1 + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ----------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" BIGINT GENERATED ALWAYS AS IDENTITY(START WITH 1 RESTART WITH 2) NOT NULL, "V" INTEGER ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID BIGINT, V INT); +> ok + +ALTER TABLE TEST ALTER COLUMN ID SET GENERATED ALWAYS; +> ok + +INSERT INTO TEST(V) VALUES 1; +> update count: 1 + +SELECT COLUMN_NAME, IS_IDENTITY, IDENTITY_GENERATION, IDENTITY_BASE + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'ID'; +> COLUMN_NAME IS_IDENTITY IDENTITY_GENERATION IDENTITY_BASE +> ----------- ----------- ------------------- ------------- +> ID YES ALWAYS 2 +> rows: 1 + +ALTER TABLE TEST ALTER COLUMN ID SET GENERATED BY DEFAULT; +> ok + +INSERT INTO TEST(V) VALUES 2; +> update count: 1 + +SELECT COLUMN_NAME, IS_IDENTITY, IDENTITY_GENERATION, IDENTITY_BASE + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'ID'; +> COLUMN_NAME IS_IDENTITY IDENTITY_GENERATION IDENTITY_BASE +> ----------- ----------- ------------------- ------------- +> ID YES BY DEFAULT 3 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT DEFAULT 1, B INT DEFAULT 2 DEFAULT ON NULL); +> ok + +SELECT COLUMN_NAME, COLUMN_DEFAULT, DEFAULT_ON_NULL FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME COLUMN_DEFAULT DEFAULT_ON_NULL +> ----------- -------------- --------------- +> A 1 FALSE +> B 2 TRUE +> rows: 2 + +ALTER TABLE TEST ALTER COLUMN A SET DEFAULT ON NULL; +> ok + +ALTER TABLE TEST ALTER COLUMN B DROP DEFAULT ON NULL; +> ok + +SELECT COLUMN_NAME, COLUMN_DEFAULT, DEFAULT_ON_NULL FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME COLUMN_DEFAULT DEFAULT_ON_NULL +> ----------- -------------- --------------- +> A 1 TRUE +> B 2 FALSE +> rows: 2 + +ALTER TABLE TEST ALTER COLUMN A SET DEFAULT ON NULL; +> ok + +ALTER TABLE TEST ALTER COLUMN B DROP DEFAULT ON NULL; +> ok + +SELECT COLUMN_NAME, COLUMN_DEFAULT, DEFAULT_ON_NULL FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME COLUMN_DEFAULT DEFAULT_ON_NULL +> ----------- -------------- --------------- +> A 1 TRUE +> B 2 FALSE +> rows: 2 DROP TABLE TEST; > ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/alterTableDropColumn.sql b/h2/src/test/org/h2/test/scripts/ddl/alterTableDropColumn.sql index 76a42a4dc5..a7825a5e18 100644 --- a/h2/src/test/org/h2/test/scripts/ddl/alterTableDropColumn.sql +++ b/h2/src/test/org/h2/test/scripts/ddl/alterTableDropColumn.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -16,7 +16,7 @@ ALTER TABLE IF EXISTS TEST DROP COLUMN A; > ok ALTER TABLE TEST DROP COLUMN A; -> exception TABLE_OR_VIEW_NOT_FOUND_1 +> exception TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1 CREATE TABLE TEST(A INT, B INT, C INT, D INT, E INT, F INT, G INT, H INT, I INT, J INT); > ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/alterTableDropConstraint.sql b/h2/src/test/org/h2/test/scripts/ddl/alterTableDropConstraint.sql new file mode 100644 index 0000000000..2be6935581 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/alterTableDropConstraint.sql @@ -0,0 +1,19 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE A(A INT PRIMARY KEY); +> ok + +CREATE TABLE B(B INT PRIMARY KEY, A INT CONSTRAINT C REFERENCES A(A)); +> ok + +ALTER TABLE A DROP CONSTRAINT C; +> exception CONSTRAINT_NOT_FOUND_1 + +ALTER TABLE B DROP CONSTRAINT C; +> ok + +DROP TABLE B, A; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/alterTableRename.sql b/h2/src/test/org/h2/test/scripts/ddl/alterTableRename.sql index d499a0b876..53683cb754 100644 --- a/h2/src/test/org/h2/test/scripts/ddl/alterTableRename.sql +++ b/h2/src/test/org/h2/test/scripts/ddl/alterTableRename.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/ddl/alterTableRenameConstraint.sql b/h2/src/test/org/h2/test/scripts/ddl/alterTableRenameConstraint.sql new file mode 100644 index 0000000000..6c1dbdc4a1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/alterTableRenameConstraint.sql @@ -0,0 +1,19 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE A(A INT PRIMARY KEY); +> ok + +CREATE TABLE B(B INT PRIMARY KEY, A INT CONSTRAINT C REFERENCES A(A)); +> ok + +ALTER TABLE A RENAME CONSTRAINT C TO C1; +> exception CONSTRAINT_NOT_FOUND_1 + +ALTER TABLE B RENAME CONSTRAINT C TO C1; +> ok + +DROP TABLE B, A; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/analyze.sql b/h2/src/test/org/h2/test/scripts/ddl/analyze.sql new file mode 100644 index 0000000000..706fe121f9 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/analyze.sql @@ -0,0 +1,67 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(X INT, B BLOB(1)); +> ok + +INSERT INTO TEST(X) VALUES 1, 2, 3, 3, NULL, NULL; +> update count: 6 + +ANALYZE TABLE TEST; +> ok + +SELECT SELECTIVITY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'X'; +>> 66 + +INSERT INTO TEST(X) VALUES 6, 7, 8, 9; +> update count: 4 + +ANALYZE TABLE TEST; +> ok + +SELECT SELECTIVITY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'X'; +>> 80 + +TRUNCATE TABLE TEST; +> update count: 10 + +INSERT INTO TEST(X) VALUES 1, 2, 3; +> update count: 3 + +ANALYZE TABLE TEST; +> ok + +SELECT SELECTIVITY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'X'; +>> 100 + +TRUNCATE TABLE TEST; +> update count: 3 + +INSERT INTO TEST(X) VALUES 1, 1, 1, 1; +> update count: 4 + +ANALYZE TABLE TEST; +> ok + +SELECT SELECTIVITY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'X'; +>> 25 + +ANALYZE TABLE TEST SAMPLE_SIZE 3; +> ok + +SELECT SELECTIVITY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'X'; +>> 33 + +TRUNCATE TABLE TEST; +> update count: 4 + +ANALYZE TABLE TEST; +> ok + +SELECT SELECTIVITY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'X'; +>> 50 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/commentOn.sql b/h2/src/test/org/h2/test/scripts/ddl/commentOn.sql new file mode 100644 index 0000000000..ea9d89b0a8 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/commentOn.sql @@ -0,0 +1,66 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(A INT COMMENT NULL, B INT COMMENT '', C INT COMMENT 'comment 1', D INT COMMENT 'comment 2'); +> ok + +SELECT COLUMN_NAME, REMARKS FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME REMARKS +> ----------- --------- +> A null +> B null +> C comment 1 +> D comment 2 +> rows: 4 + +COMMENT ON COLUMN TEST.A IS 'comment 3'; +> ok + +COMMENT ON COLUMN TEST.B IS 'comment 4'; +> ok + +COMMENT ON COLUMN TEST.C IS NULL; +> ok + +COMMENT ON COLUMN TEST.D IS ''; +> ok + +SELECT COLUMN_NAME, REMARKS FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME REMARKS +> ----------- --------- +> A comment 3 +> B comment 4 +> C null +> D null +> rows: 4 + +DROP TABLE TEST; +> ok + +CREATE USER U1 COMMENT NULL PASSWORD '1'; +> ok + +CREATE USER U2 COMMENT '' PASSWORD '1'; +> ok + +CREATE USER U3 COMMENT 'comment' PASSWORD '1'; +> ok + +SELECT USER_NAME, REMARKS FROM INFORMATION_SCHEMA.USERS WHERE USER_NAME LIKE 'U_'; +> USER_NAME REMARKS +> --------- ------- +> U1 null +> U2 null +> U3 comment +> rows: 3 + +DROP USER U1; +> ok + +DROP USER U2; +> ok + +DROP USER U3; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/createAlias.sql b/h2/src/test/org/h2/test/scripts/ddl/createAlias.sql index d7ff04533e..3a4234e1f3 100644 --- a/h2/src/test/org/h2/test/scripts/ddl/createAlias.sql +++ b/h2/src/test/org/h2/test/scripts/ddl/createAlias.sql @@ -1,21 +1,18 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create alias "SYSDATE" for "java.lang.Integer.parseInt(java.lang.String)"; +create alias "MIN" for 'java.lang.Integer.parseInt(java.lang.String)'; > exception FUNCTION_ALIAS_ALREADY_EXISTS_1 -create alias "MIN" for "java.lang.Integer.parseInt(java.lang.String)"; -> exception FUNCTION_ALIAS_ALREADY_EXISTS_1 - -create alias "CAST" for "java.lang.Integer.parseInt(java.lang.String)"; +create alias "CAST" for 'java.lang.Integer.parseInt(java.lang.String)'; > exception FUNCTION_ALIAS_ALREADY_EXISTS_1 @reconnect off --- function alias --------------------------------------------------------------------------------------------- -CREATE ALIAS MY_SQRT FOR "java.lang.Math.sqrt"; +CREATE ALIAS MY_SQRT FOR 'java.lang.Math.sqrt'; > ok SELECT MY_SQRT(2.0) MS, SQRT(2.0); @@ -36,17 +33,25 @@ SELECT MY_SQRT(-1.0) MS, SQRT(NULL) S; > NaN null > rows: 1 -SCRIPT NOPASSWORDS NOSETTINGS; +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; > SCRIPT > ---------------------------------------------------------------- -> CREATE FORCE ALIAS "PUBLIC"."MY_SQRT" FOR "java.lang.Math.sqrt"; > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> rows: 2 +> CREATE FORCE ALIAS "PUBLIC"."MY_SQRT" FOR 'java.lang.Math.sqrt'; +> rows (ordered): 2 + +SELECT SPECIFIC_NAME, ROUTINE_NAME, ROUTINE_TYPE, DATA_TYPE, ROUTINE_BODY, EXTERNAL_NAME, EXTERNAL_LANGUAGE, + IS_DETERMINISTIC, REMARKS FROM INFORMATION_SCHEMA.ROUTINES; +> SPECIFIC_NAME ROUTINE_NAME ROUTINE_TYPE DATA_TYPE ROUTINE_BODY EXTERNAL_NAME EXTERNAL_LANGUAGE IS_DETERMINISTIC REMARKS +> ------------- ------------ ------------ ---------------- ------------ ------------------- ----------------- ---------------- ------- +> MY_SQRT_1 MY_SQRT FUNCTION DOUBLE PRECISION EXTERNAL java.lang.Math.sqrt JAVA NO null +> rows: 1 -SELECT ALIAS_NAME, JAVA_CLASS, JAVA_METHOD, DATA_TYPE, COLUMN_COUNT, RETURNS_RESULT, REMARKS FROM INFORMATION_SCHEMA.FUNCTION_ALIASES; -> ALIAS_NAME JAVA_CLASS JAVA_METHOD DATA_TYPE COLUMN_COUNT RETURNS_RESULT REMARKS -> ---------- -------------- ----------- --------- ------------ -------------- ------- -> MY_SQRT java.lang.Math sqrt 8 1 2 +SELECT SPECIFIC_NAME, ORDINAL_POSITION, PARAMETER_MODE, IS_RESULT, AS_LOCATOR, PARAMETER_NAME, DATA_TYPE, + PARAMETER_DEFAULT FROM INFORMATION_SCHEMA.PARAMETERS; +> SPECIFIC_NAME ORDINAL_POSITION PARAMETER_MODE IS_RESULT AS_LOCATOR PARAMETER_NAME DATA_TYPE PARAMETER_DEFAULT +> ------------- ---------------- -------------- --------- ---------- -------------- ---------------- ----------------- +> MY_SQRT_1 1 IN NO NO P1 DOUBLE PRECISION null > rows: 1 DROP ALIAS MY_SQRT; @@ -55,19 +60,19 @@ DROP ALIAS MY_SQRT; CREATE SCHEMA TEST_SCHEMA; > ok -CREATE ALIAS TRUNC FOR "java.lang.Math.floor(double)"; +CREATE ALIAS TRUNC FOR 'java.lang.Math.floor(double)'; > exception FUNCTION_ALIAS_ALREADY_EXISTS_1 -CREATE ALIAS PUBLIC.TRUNC FOR "java.lang.Math.floor(double)"; +CREATE ALIAS PUBLIC.TRUNC FOR 'java.lang.Math.floor(double)'; > exception FUNCTION_ALIAS_ALREADY_EXISTS_1 -CREATE ALIAS TEST_SCHEMA.TRUNC FOR "java.lang.Math.round(double)"; +CREATE ALIAS TEST_SCHEMA.TRUNC FOR 'java.lang.Math.round(double)'; > exception FUNCTION_ALIAS_ALREADY_EXISTS_1 SET BUILTIN_ALIAS_OVERRIDE=1; > ok -CREATE ALIAS TRUNC FOR "java.lang.Math.floor(double)"; +CREATE ALIAS TRUNC FOR 'java.lang.Math.floor(double)'; > ok SELECT TRUNC(1.5); @@ -79,10 +84,20 @@ SELECT TRUNC(-1.5); DROP ALIAS TRUNC; > ok -CREATE ALIAS PUBLIC.TRUNC FOR "java.lang.Math.floor(double)"; +-- Compatibility syntax with identifier +CREATE ALIAS TRUNC FOR "java.lang.Math.floor(double)"; +> ok + +SELECT TRUNC(-1.5); +>> -2.0 + +DROP ALIAS TRUNC; +> ok + +CREATE ALIAS PUBLIC.TRUNC FOR 'java.lang.Math.floor(double)'; > ok -CREATE ALIAS TEST_SCHEMA.TRUNC FOR "java.lang.Math.round(double)"; +CREATE ALIAS TEST_SCHEMA.TRUNC FOR 'java.lang.Math.round(double)'; > ok SELECT PUBLIC.TRUNC(1.5); diff --git a/h2/src/test/org/h2/test/scripts/ddl/createConstant.sql b/h2/src/test/org/h2/test/scripts/ddl/createConstant.sql new file mode 100644 index 0000000000..a2b941ae7a --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/createConstant.sql @@ -0,0 +1,82 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE SCHEMA CONST; +> ok + +CREATE CONSTANT IF NOT EXISTS ONE VALUE 1; +> ok + +COMMENT ON CONSTANT ONE IS 'Eins'; +> ok + +CREATE CONSTANT IF NOT EXISTS ONE VALUE 1; +> ok + +CREATE CONSTANT CONST.ONE VALUE 1; +> ok + +SELECT CONSTANT_SCHEMA, CONSTANT_NAME, VALUE_DEFINITION, DATA_TYPE, NUMERIC_PRECISION, REMARKS FROM INFORMATION_SCHEMA.CONSTANTS; +> CONSTANT_SCHEMA CONSTANT_NAME VALUE_DEFINITION DATA_TYPE NUMERIC_PRECISION REMARKS +> --------------- ------------- ---------------- --------- ----------------- ------- +> CONST ONE 1 INTEGER 32 null +> PUBLIC ONE 1 INTEGER 32 Eins +> rows: 2 + +SELECT ONE, CONST.ONE; +> 1 1 +> - - +> 1 1 +> rows: 1 + +COMMENT ON CONSTANT ONE IS NULL; +> ok + +DROP SCHEMA CONST CASCADE; +> ok + +SELECT CONSTANT_SCHEMA, CONSTANT_NAME, VALUE_DEFINITION, DATA_TYPE, REMARKS FROM INFORMATION_SCHEMA.CONSTANTS; +> CONSTANT_SCHEMA CONSTANT_NAME VALUE_DEFINITION DATA_TYPE REMARKS +> --------------- ------------- ---------------- --------- ------- +> PUBLIC ONE 1 INTEGER null +> rows: 1 + +DROP CONSTANT ONE; +> ok + +DROP CONSTANT IF EXISTS ONE; +> ok + +create constant abc value 1; +> ok + +call abc; +> 1 +> - +> 1 +> rows: 1 + +drop all objects; +> ok + +call abc; +> exception COLUMN_NOT_FOUND_1 + +create constant abc value 1; +> ok + +comment on constant abc is 'One'; +> ok + +select remarks from information_schema.constants where constant_name = 'ABC'; +>> One + +@reconnect + +select remarks from information_schema.constants where constant_name = 'ABC'; +>> One + +drop constant abc; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/createDomain.sql b/h2/src/test/org/h2/test/scripts/ddl/createDomain.sql new file mode 100644 index 0000000000..e0936e3b21 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/createDomain.sql @@ -0,0 +1,259 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE SCHEMA S1; +> ok + +CREATE SCHEMA S2; +> ok + +CREATE DOMAIN S1.D1 AS INT DEFAULT 1; +> ok + +CREATE DOMAIN S2.D2 AS TIMESTAMP WITH TIME ZONE ON UPDATE CURRENT_TIMESTAMP; +> ok + +CREATE TABLE TEST(C1 S1.D1, C2 S2.D2); +> ok + +SELECT COLUMN_NAME, DOMAIN_CATALOG, DOMAIN_SCHEMA, DOMAIN_NAME, COLUMN_DEFAULT, COLUMN_ON_UPDATE + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DOMAIN_CATALOG DOMAIN_SCHEMA DOMAIN_NAME COLUMN_DEFAULT COLUMN_ON_UPDATE +> ----------- -------------- ------------- ----------- -------------- ---------------- +> C1 SCRIPT S1 D1 null null +> C2 SCRIPT S2 D2 null null +> rows (ordered): 2 + +SELECT DOMAIN_CATALOG, DOMAIN_SCHEMA, DOMAIN_NAME, DOMAIN_DEFAULT, DOMAIN_ON_UPDATE, DATA_TYPE FROM INFORMATION_SCHEMA.DOMAINS; +> DOMAIN_CATALOG DOMAIN_SCHEMA DOMAIN_NAME DOMAIN_DEFAULT DOMAIN_ON_UPDATE DATA_TYPE +> -------------- ------------- ----------- -------------- ----------------- ------------------------ +> SCRIPT S1 D1 1 null INTEGER +> SCRIPT S2 D2 null CURRENT_TIMESTAMP TIMESTAMP WITH TIME ZONE +> rows: 2 + +DROP TABLE TEST; +> ok + +DROP DOMAIN S1.D1; +> ok + +DROP SCHEMA S1 RESTRICT; +> ok + +DROP SCHEMA S2 RESTRICT; +> exception CANNOT_DROP_2 + +DROP SCHEMA S2 CASCADE; +> ok + +CREATE DOMAIN D INT; +> ok + +CREATE MEMORY TABLE TEST(C D); +> ok + +ALTER DOMAIN D ADD CHECK (VALUE <> 0); +> ok + +ALTER DOMAIN D ADD CONSTRAINT D1 CHECK (VALUE > 0); +> ok + +ALTER DOMAIN D ADD CONSTRAINT D1 CHECK (VALUE > 0); +> exception CONSTRAINT_ALREADY_EXISTS_1 + +ALTER DOMAIN D ADD CONSTRAINT IF NOT EXISTS D1 CHECK (VALUE > 0); +> ok + +ALTER DOMAIN X ADD CHECK (VALUE > 0); +> exception DOMAIN_NOT_FOUND_1 + +ALTER DOMAIN IF EXISTS X ADD CHECK (VALUE > 0); +> ok + +INSERT INTO TEST VALUES -1; +> exception CHECK_CONSTRAINT_VIOLATED_1 + +ALTER DOMAIN D DROP CONSTRAINT D1; +> ok + +ALTER DOMAIN D DROP CONSTRAINT D1; +> exception CONSTRAINT_NOT_FOUND_1 + +ALTER DOMAIN D DROP CONSTRAINT IF EXISTS D1; +> ok + +ALTER DOMAIN IF EXISTS X DROP CONSTRAINT D1; +> ok + +ALTER DOMAIN X DROP CONSTRAINT IF EXISTS D1; +> exception DOMAIN_NOT_FOUND_1 + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE DOMAIN "PUBLIC"."D" AS INTEGER; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "C" "PUBLIC"."D" ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> ALTER DOMAIN "PUBLIC"."D" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_4" CHECK(VALUE <> 0) NOCHECK; +> rows (ordered): 5 + +SELECT CONSTRAINT_NAME, DOMAIN_NAME FROM INFORMATION_SCHEMA.DOMAIN_CONSTRAINTS; +> CONSTRAINT_NAME DOMAIN_NAME +> --------------- ----------- +> CONSTRAINT_4 D +> rows: 1 + +TABLE INFORMATION_SCHEMA.CHECK_CONSTRAINTS; +> CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME CHECK_CLAUSE +> ------------------ ----------------- --------------- ------------ +> SCRIPT PUBLIC CONSTRAINT_4 VALUE <> 0 +> rows: 1 + +SELECT COUNT(*) FROM INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE; +>> 0 + +INSERT INTO TEST VALUES -1; +> update count: 1 + +INSERT INTO TEST VALUES 0; +> exception CHECK_CONSTRAINT_VIOLATED_1 + +DROP DOMAIN D RESTRICT; +> exception CANNOT_DROP_2 + +DROP DOMAIN D CASCADE; +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "C" INTEGER ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES (-1); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" CHECK("C" <> 0) NOCHECK; +> rows (ordered): 5 + +SELECT CONSTRAINT_NAME, CONSTRAINT_TYPE, TABLE_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS; +> CONSTRAINT_NAME CONSTRAINT_TYPE TABLE_NAME +> --------------- --------------- ---------- +> CONSTRAINT_2 CHECK TEST +> rows: 1 + +TABLE INFORMATION_SCHEMA.CHECK_CONSTRAINTS; +> CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME CHECK_CLAUSE +> ------------------ ----------------- --------------- ------------ +> SCRIPT PUBLIC CONSTRAINT_2 "C" <> 0 +> rows: 1 + +TABLE INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE; +> TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME +> ------------- ------------ ---------- ----------- ------------------ ----------------- --------------- +> SCRIPT PUBLIC TEST C SCRIPT PUBLIC CONSTRAINT_2 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE DOMAIN D1 AS INT DEFAULT 1 CHECK (VALUE >= 1); +> ok + +CREATE DOMAIN D2 AS D1 DEFAULT 2; +> ok + +CREATE DOMAIN D3 AS D1 CHECK (VALUE >= 3); +> ok + +CREATE DOMAIN D4 AS D1 DEFAULT 4 CHECK (VALUE >= 4); +> ok + +SELECT DOMAIN_CATALOG, DOMAIN_SCHEMA, DOMAIN_NAME, DOMAIN_DEFAULT, DOMAIN_ON_UPDATE, DATA_TYPE, NUMERIC_PRECISION, + PARENT_DOMAIN_CATALOG, PARENT_DOMAIN_SCHEMA, PARENT_DOMAIN_NAME FROM INFORMATION_SCHEMA.DOMAINS WHERE DOMAIN_SCHEMA = 'PUBLIC'; +> DOMAIN_CATALOG DOMAIN_SCHEMA DOMAIN_NAME DOMAIN_DEFAULT DOMAIN_ON_UPDATE DATA_TYPE NUMERIC_PRECISION PARENT_DOMAIN_CATALOG PARENT_DOMAIN_SCHEMA PARENT_DOMAIN_NAME +> -------------- ------------- ----------- -------------- ---------------- --------- ----------------- --------------------- -------------------- ------------------ +> SCRIPT PUBLIC D1 1 null INTEGER 32 null null null +> SCRIPT PUBLIC D2 2 null INTEGER 32 SCRIPT PUBLIC D1 +> SCRIPT PUBLIC D3 null null INTEGER 32 SCRIPT PUBLIC D1 +> SCRIPT PUBLIC D4 4 null INTEGER 32 SCRIPT PUBLIC D1 +> rows: 4 + +SELECT DOMAIN_NAME, CHECK_CLAUSE FROM INFORMATION_SCHEMA.DOMAIN_CONSTRAINTS D JOIN INFORMATION_SCHEMA.CHECK_CONSTRAINTS C + ON D.CONSTRAINT_CATALOG = C.CONSTRAINT_CATALOG AND D.CONSTRAINT_SCHEMA = C.CONSTRAINT_SCHEMA AND D.CONSTRAINT_NAME = C.CONSTRAINT_NAME + WHERE C.CONSTRAINT_SCHEMA = 'PUBLIC'; +> DOMAIN_NAME CHECK_CLAUSE +> ----------- ------------ +> D1 VALUE >= 1 +> D3 VALUE >= 3 +> D4 VALUE >= 4 +> rows: 3 + +VALUES CAST(0 AS D2); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +DROP DOMAIN D1; +> exception CANNOT_DROP_2 + +DROP DOMAIN D1 CASCADE; +> ok + +SELECT DOMAIN_CATALOG, DOMAIN_SCHEMA, DOMAIN_NAME, DOMAIN_DEFAULT, DOMAIN_ON_UPDATE, DATA_TYPE, NUMERIC_PRECISION, + PARENT_DOMAIN_CATALOG, PARENT_DOMAIN_SCHEMA, PARENT_DOMAIN_NAME FROM INFORMATION_SCHEMA.DOMAINS WHERE DOMAIN_SCHEMA = 'PUBLIC'; +> DOMAIN_CATALOG DOMAIN_SCHEMA DOMAIN_NAME DOMAIN_DEFAULT DOMAIN_ON_UPDATE DATA_TYPE NUMERIC_PRECISION PARENT_DOMAIN_CATALOG PARENT_DOMAIN_SCHEMA PARENT_DOMAIN_NAME +> -------------- ------------- ----------- -------------- ---------------- --------- ----------------- --------------------- -------------------- ------------------ +> SCRIPT PUBLIC D2 2 null INTEGER 32 null null null +> SCRIPT PUBLIC D3 1 null INTEGER 32 null null null +> SCRIPT PUBLIC D4 4 null INTEGER 32 null null null +> rows: 3 + +SELECT DOMAIN_NAME, CHECK_CLAUSE FROM INFORMATION_SCHEMA.DOMAIN_CONSTRAINTS D JOIN INFORMATION_SCHEMA.CHECK_CONSTRAINTS C + ON D.CONSTRAINT_CATALOG = C.CONSTRAINT_CATALOG AND D.CONSTRAINT_SCHEMA = C.CONSTRAINT_SCHEMA AND D.CONSTRAINT_NAME = C.CONSTRAINT_NAME + WHERE C.CONSTRAINT_SCHEMA = 'PUBLIC'; +> DOMAIN_NAME CHECK_CLAUSE +> ----------- ------------ +> D2 VALUE >= 1 +> D3 VALUE >= 1 +> D3 VALUE >= 3 +> D4 VALUE >= 1 +> D4 VALUE >= 4 +> rows: 5 + +DROP DOMAIN D2; +> ok + +DROP DOMAIN D3; +> ok + +DROP DOMAIN D4; +> ok + +CREATE DOMAIN D1 INT; +> ok + +CREATE DOMAIN D2 INT; +> ok + +DROP DOMAIN D1; +> ok + +CREATE DOMAIN D3 D2; +> ok + +@reconnect + +DROP DOMAIN D3; +> ok + +DROP DOMAIN D2; +> ok + +CREATE DOMAIN D AS CHARACTER VARYING CHECK (VALUE LIKE '%1%'); +> ok + +ALTER DOMAIN D ADD CHECK (VALUE ILIKE '%2%'); +> ok + +DROP DOMAIN D; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/createIndex.sql b/h2/src/test/org/h2/test/scripts/ddl/createIndex.sql new file mode 100644 index 0000000000..4f99d98afe --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/createIndex.sql @@ -0,0 +1,34 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(G GEOMETRY); +> ok + +CREATE UNIQUE SPATIAL INDEX IDX ON TEST(G); +> exception SYNTAX_ERROR_2 + +CREATE HASH SPATIAL INDEX IDX ON TEST(G); +> exception SYNTAX_ERROR_2 + +CREATE UNIQUE HASH SPATIAL INDEX IDX ON TEST(G); +> exception SYNTAX_ERROR_2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT, C INT); +> ok + +CREATE INDEX TEST_IDX ON TEST(C) INCLUDE(B); +> exception SYNTAX_ERROR_1 + +CREATE UNIQUE INDEX TEST_IDX ON TEST(C) INCLUDE(B); +> ok + +DROP INDEX TEST_IDX; +> ok + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/createSchema.sql b/h2/src/test/org/h2/test/scripts/ddl/createSchema.sql new file mode 100644 index 0000000000..e48583182e --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/createSchema.sql @@ -0,0 +1,64 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE USER TEST_USER PASSWORD 'test'; +> ok + +CREATE ROLE TEST_ROLE; +> ok + +CREATE SCHEMA S1; +> ok + +CREATE SCHEMA S2 AUTHORIZATION TEST_USER; +> ok + +CREATE SCHEMA S3 AUTHORIZATION TEST_ROLE; +> ok + +CREATE SCHEMA AUTHORIZATION TEST_USER; +> ok + +CREATE SCHEMA AUTHORIZATION TEST_ROLE; +> ok + +TABLE INFORMATION_SCHEMA.SCHEMATA; +> CATALOG_NAME SCHEMA_NAME SCHEMA_OWNER DEFAULT_CHARACTER_SET_CATALOG DEFAULT_CHARACTER_SET_SCHEMA DEFAULT_CHARACTER_SET_NAME SQL_PATH DEFAULT_COLLATION_NAME REMARKS +> ------------ ------------------ ------------ ----------------------------- ---------------------------- -------------------------- -------- ---------------------- ------- +> SCRIPT INFORMATION_SCHEMA SA SCRIPT PUBLIC Unicode null OFF null +> SCRIPT PUBLIC SA SCRIPT PUBLIC Unicode null OFF null +> SCRIPT S1 SA SCRIPT PUBLIC Unicode null OFF null +> SCRIPT S2 TEST_USER SCRIPT PUBLIC Unicode null OFF null +> SCRIPT S3 TEST_ROLE SCRIPT PUBLIC Unicode null OFF null +> SCRIPT TEST_ROLE TEST_ROLE SCRIPT PUBLIC Unicode null OFF null +> SCRIPT TEST_USER TEST_USER SCRIPT PUBLIC Unicode null OFF null +> rows: 7 + +DROP SCHEMA S1; +> ok + +DROP SCHEMA S2; +> ok + +DROP SCHEMA S3; +> ok + +DROP USER TEST_USER; +> exception CANNOT_DROP_2 + +DROP ROLE TEST_ROLE; +> exception CANNOT_DROP_2 + +DROP SCHEMA TEST_USER; +> ok + +DROP SCHEMA TEST_ROLE; +> ok + +DROP USER TEST_USER; +> ok + +DROP ROLE TEST_ROLE; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/createSequence.sql b/h2/src/test/org/h2/test/scripts/ddl/createSequence.sql index cb8ea1caed..e6f3cb8d29 100644 --- a/h2/src/test/org/h2/test/scripts/ddl/createSequence.sql +++ b/h2/src/test/org/h2/test/scripts/ddl/createSequence.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -10,19 +10,19 @@ DROP SEQUENCE SEQ; > ok CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY 1 MINVALUE 0 MAXVALUE 0; -> exception SEQUENCE_ATTRIBUTES_INVALID +> exception SEQUENCE_ATTRIBUTES_INVALID_7 CREATE SEQUENCE SEQ START WITH 1 INCREMENT BY 1 MINVALUE 1 MAXVALUE 0; -> exception SEQUENCE_ATTRIBUTES_INVALID +> exception SEQUENCE_ATTRIBUTES_INVALID_7 CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY 0 MINVALUE 0 MAXVALUE 1; -> exception SEQUENCE_ATTRIBUTES_INVALID +> exception SEQUENCE_ATTRIBUTES_INVALID_7 CREATE SEQUENCE SEQ START WITH 1 INCREMENT BY 1 MINVALUE 2 MAXVALUE 10; -> exception SEQUENCE_ATTRIBUTES_INVALID +> exception SEQUENCE_ATTRIBUTES_INVALID_7 CREATE SEQUENCE SEQ START WITH 20 INCREMENT BY 1 MINVALUE 1 MAXVALUE 10; -> exception SEQUENCE_ATTRIBUTES_INVALID +> exception SEQUENCE_ATTRIBUTES_INVALID_7 CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY 9223372036854775807 MINVALUE -9223372036854775808 MAXVALUE 9223372036854775807; > ok @@ -30,54 +30,118 @@ CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY 9223372036854775807 MINVALUE -9223 DROP SEQUENCE SEQ; > ok +CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY 9223372036854775807 MINVALUE -9223372036854775808 MAXVALUE 9223372036854775807 CACHE 2; +> exception SEQUENCE_ATTRIBUTES_INVALID_7 + CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY -9223372036854775808 MINVALUE -9223372036854775808 MAXVALUE 9223372036854775807; > ok DROP SEQUENCE SEQ; > ok -CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY -9223372036854775808 MINVALUE -1 MAXVALUE 9223372036854775807; +CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY -9223372036854775808 MINVALUE -9223372036854775808 MAXVALUE 9223372036854775807 CACHE 2; +> exception SEQUENCE_ATTRIBUTES_INVALID_7 + +CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY -9223372036854775808 MINVALUE -1 MAXVALUE 9223372036854775807 NO CACHE; > ok DROP SEQUENCE SEQ; > ok -CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY -9223372036854775808 MINVALUE 0 MAXVALUE 9223372036854775807; -> exception SEQUENCE_ATTRIBUTES_INVALID +CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY -9223372036854775808 MINVALUE 0 MAXVALUE 9223372036854775807 NO CACHE; +> exception SEQUENCE_ATTRIBUTES_INVALID_7 + +CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY -9223372036854775808 MINVALUE -1 MAXVALUE 9223372036854775807 CACHE 2; +> exception SEQUENCE_ATTRIBUTES_INVALID_7 + +CREATE SEQUENCE SEQ CACHE -1; +> exception SEQUENCE_ATTRIBUTES_INVALID_7 + +CREATE SEQUENCE SEQ MINVALUE 10 START WITH 9 RESTART WITH 10; +> exception SEQUENCE_ATTRIBUTES_INVALID_7 + +CREATE SEQUENCE SEQ MAXVALUE 10 START WITH 11 RESTART WITH 1; +> exception SEQUENCE_ATTRIBUTES_INVALID_7 CREATE SEQUENCE SEQ START WITH 0 MINVALUE -10 MAXVALUE 10; > ok -SELECT SEQUENCE_NAME, CURRENT_VALUE, INCREMENT, CACHE, MIN_VALUE, MAX_VALUE, IS_CYCLE +SELECT SEQUENCE_NAME, START_VALUE, MINIMUM_VALUE, MAXIMUM_VALUE, INCREMENT, CYCLE_OPTION, BASE_VALUE, CACHE FROM INFORMATION_SCHEMA.SEQUENCES; -> SEQUENCE_NAME CURRENT_VALUE INCREMENT CACHE MIN_VALUE MAX_VALUE IS_CYCLE -> ------------- ------------- --------- ----- --------- --------- -------- -> SEQ -1 1 32 -10 10 FALSE +> SEQUENCE_NAME START_VALUE MINIMUM_VALUE MAXIMUM_VALUE INCREMENT CYCLE_OPTION BASE_VALUE CACHE +> ------------- ----------- ------------- ------------- --------- ------------ ---------- ----- +> SEQ 0 -10 10 1 NO 0 21 > rows: 1 ALTER SEQUENCE SEQ NO MINVALUE NO MAXVALUE; > ok -SELECT SEQUENCE_NAME, CURRENT_VALUE, INCREMENT, CACHE, MIN_VALUE, MAX_VALUE, IS_CYCLE +SELECT SEQUENCE_NAME, START_VALUE, MINIMUM_VALUE, MAXIMUM_VALUE, INCREMENT, CYCLE_OPTION, BASE_VALUE, CACHE FROM INFORMATION_SCHEMA.SEQUENCES; -> SEQUENCE_NAME CURRENT_VALUE INCREMENT CACHE MIN_VALUE MAX_VALUE IS_CYCLE -> ------------- ------------- --------- ----- --------- ------------------- -------- -> SEQ -1 1 32 0 9223372036854775807 FALSE +> SEQUENCE_NAME START_VALUE MINIMUM_VALUE MAXIMUM_VALUE INCREMENT CYCLE_OPTION BASE_VALUE CACHE +> ------------- ----------- ------------- ------------------- --------- ------------ ---------- ----- +> SEQ 0 0 9223372036854775807 1 NO 0 21 > rows: 1 ALTER SEQUENCE SEQ MINVALUE -100 MAXVALUE 100; > ok -SELECT SEQUENCE_NAME, CURRENT_VALUE, INCREMENT, CACHE, MIN_VALUE, MAX_VALUE, IS_CYCLE +SELECT SEQUENCE_NAME, START_VALUE, MINIMUM_VALUE, MAXIMUM_VALUE, INCREMENT, CYCLE_OPTION, BASE_VALUE, CACHE + FROM INFORMATION_SCHEMA.SEQUENCES; +> SEQUENCE_NAME START_VALUE MINIMUM_VALUE MAXIMUM_VALUE INCREMENT CYCLE_OPTION BASE_VALUE CACHE +> ------------- ----------- ------------- ------------- --------- ------------ ---------- ----- +> SEQ 0 -100 100 1 NO 0 21 +> rows: 1 + +VALUES NEXT VALUE FOR SEQ; +>> 0 + +ALTER SEQUENCE SEQ START WITH 10; +> ok + +SELECT SEQUENCE_NAME, START_VALUE, MINIMUM_VALUE, MAXIMUM_VALUE, INCREMENT, CYCLE_OPTION, BASE_VALUE, CACHE FROM INFORMATION_SCHEMA.SEQUENCES; -> SEQUENCE_NAME CURRENT_VALUE INCREMENT CACHE MIN_VALUE MAX_VALUE IS_CYCLE -> ------------- ------------- --------- ----- --------- --------- -------- -> SEQ -1 1 32 -100 100 FALSE +> SEQUENCE_NAME START_VALUE MINIMUM_VALUE MAXIMUM_VALUE INCREMENT CYCLE_OPTION BASE_VALUE CACHE +> ------------- ----------- ------------- ------------- --------- ------------ ---------- ----- +> SEQ 10 -100 100 1 NO 1 21 +> rows: 1 + +VALUES NEXT VALUE FOR SEQ; +>> 1 + +ALTER SEQUENCE SEQ RESTART; +> ok + +VALUES NEXT VALUE FOR SEQ; +>> 10 + +ALTER SEQUENCE SEQ START WITH 5 RESTART WITH 20; +> ok + +VALUES NEXT VALUE FOR SEQ; +>> 20 + +@reconnect + +SELECT SEQUENCE_NAME, START_VALUE, MINIMUM_VALUE, MAXIMUM_VALUE, INCREMENT, CYCLE_OPTION, BASE_VALUE, CACHE + FROM INFORMATION_SCHEMA.SEQUENCES; +> SEQUENCE_NAME START_VALUE MINIMUM_VALUE MAXIMUM_VALUE INCREMENT CYCLE_OPTION BASE_VALUE CACHE +> ------------- ----------- ------------- ------------- --------- ------------ ---------- ----- +> SEQ 5 -100 100 1 NO 21 21 > rows: 1 DROP SEQUENCE SEQ; > ok +CREATE SEQUENCE SEQ START WITH 10 RESTART WITH 20; +> ok + +VALUES NEXT VALUE FOR SEQ; +>> 20 + +DROP SEQUENCE SEQ; +> ok + SET AUTOCOMMIT OFF; > ok @@ -98,3 +162,35 @@ COMMIT; SET AUTOCOMMIT ON; > ok + +CREATE SEQUENCE SEQ MINVALUE 1 MAXVALUE 10 INCREMENT BY -1; +> ok + +VALUES NEXT VALUE FOR SEQ, NEXT VALUE FOR SEQ; +> C1 +> -- +> 10 +> 9 +> rows: 2 + +ALTER SEQUENCE SEQ RESTART; +> ok + +VALUES NEXT VALUE FOR SEQ, NEXT VALUE FOR SEQ; +> C1 +> -- +> 10 +> 9 +> rows: 2 + +ALTER SEQUENCE SEQ RESTART WITH 1; +> ok + +VALUES NEXT VALUE FOR SEQ; +>> 1 + +VALUES NEXT VALUE FOR SEQ; +> exception SEQUENCE_EXHAUSTED + +DROP SEQUENCE SEQ; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/createSynonym.sql b/h2/src/test/org/h2/test/scripts/ddl/createSynonym.sql index 4e3bd509be..b359f386a7 100644 --- a/h2/src/test/org/h2/test/scripts/ddl/createSynonym.sql +++ b/h2/src/test/org/h2/test/scripts/ddl/createSynonym.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/ddl/createTable.sql b/h2/src/test/org/h2/test/scripts/ddl/createTable.sql index 07e25e055e..01d94e367a 100644 --- a/h2/src/test/org/h2/test/scripts/ddl/createTable.sql +++ b/h2/src/test/org/h2/test/scripts/ddl/createTable.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -99,20 +99,16 @@ DROP TABLE TEST; CREATE TABLE TEST1(ID IDENTITY); > ok -CREATE TABLE TEST2(ID BIGINT IDENTITY); -> ok - -CREATE TABLE TEST3(ID BIGINT GENERATED BY DEFAULT AS IDENTITY); +CREATE TABLE TEST2(ID BIGINT GENERATED BY DEFAULT AS IDENTITY); > ok SELECT CONSTRAINT_TYPE, TABLE_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE TABLE_SCHEMA = 'PUBLIC'; > CONSTRAINT_TYPE TABLE_NAME > --------------- ---------- > PRIMARY KEY TEST1 -> PRIMARY KEY TEST2 -> rows: 2 +> rows: 1 -DROP TABLE TEST1, TEST2, TEST3; +DROP TABLE TEST1, TEST2; > ok CREATE TABLE TEST(A); @@ -121,13 +117,151 @@ CREATE TABLE TEST(A); CREATE TABLE TEST(A, B, C) AS SELECT 1, 2, CAST ('A' AS VARCHAR); > ok -SELECT COLUMN_NAME, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; -> COLUMN_NAME COLUMN_TYPE -> ----------- ----------- +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME DATA_TYPE +> ----------- ----------------- > A INTEGER > B INTEGER -> C VARCHAR +> C CHARACTER VARYING +> rows: 3 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE TEST(A INT, B INT GENERATED ALWAYS AS (1), C INT GENERATED ALWAYS AS (B + 1)); +> exception COLUMN_NOT_FOUND_1 + +CREATE MEMORY TABLE TEST(A INT, B INT GENERATED ALWAYS AS (1), C INT GENERATED ALWAYS AS (A + 1)); +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ----------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "A" INTEGER, "B" INTEGER GENERATED ALWAYS AS (1), "C" INTEGER GENERATED ALWAYS AS ("A" + 1) ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT GENERATED BY DEFAULT AS (1)); +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST(A IDENTITY GENERATED ALWAYS AS (1)); +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST(A IDENTITY AS (1)); +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST1(ID BIGINT GENERATED ALWAYS AS IDENTITY); +> ok + +CREATE TABLE TEST2(ID BIGINT GENERATED BY DEFAULT AS IDENTITY); +> ok + +CREATE TABLE TEST3(ID BIGINT NULL GENERATED ALWAYS AS IDENTITY); +> exception COLUMN_MUST_NOT_BE_NULLABLE_1 + +CREATE TABLE TEST3(ID BIGINT GENERATED BY DEFAULT AS IDENTITY NULL); +> exception COLUMN_MUST_NOT_BE_NULLABLE_1 + +SELECT COLUMN_NAME, IDENTITY_GENERATION, IS_NULLABLE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> COLUMN_NAME IDENTITY_GENERATION IS_NULLABLE +> ----------- ------------------- ----------- +> ID ALWAYS NO +> ID BY DEFAULT NO +> rows: 2 + +DROP TABLE TEST1, TEST2; +> ok + +CREATE TABLE TEST(ID BIGINT GENERATED ALWAYS AS IDENTITY(MINVALUE 1 MAXVALUE 2), V INT); +> ok + +INSERT INTO TEST(V) VALUES 1; +> update count: 1 + +SELECT IDENTITY_BASE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'ID'; +>> 2 + +INSERT INTO TEST(V) VALUES 2; +> update count: 1 + +SELECT IDENTITY_BASE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'ID'; +>> null + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, V INT); +> ok + +INSERT INTO TEST(V) VALUES 1; +> update count: 1 + +INSERT INTO TEST VALUES (2, 2); +> update count: 1 + +INSERT INTO TEST(V) VALUES 3; +> exception DUPLICATE_KEY_1 + +TABLE TEST; +> ID V +> -- - +> 1 1 +> 2 2 +> rows: 2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST1(R BIGINT GENERATED BY DEFAULT AS IDENTITY); +> ok + +SET MODE HSQLDB; +> ok + +CREATE TABLE TEST2(M BIGINT GENERATED BY DEFAULT AS IDENTITY); +> ok + +SET MODE MySQL; +> ok + +CREATE TABLE TEST3(H BIGINT GENERATED BY DEFAULT AS IDENTITY); +> ok + +SET MODE Regular; +> ok + +SELECT COLUMN_NAME, DEFAULT_ON_NULL FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> COLUMN_NAME DEFAULT_ON_NULL +> ----------- --------------- +> H TRUE +> M TRUE +> R FALSE > rows: 3 +DROP TABLE TEST1, TEST2, TEST3; +> ok + +EXECUTE IMMEDIATE 'CREATE TABLE TEST(' || (SELECT LISTAGG('C' || X || ' INT') FROM SYSTEM_RANGE(1, 16384)) || ')'; +> ok + +DROP TABLE TEST; +> ok + +EXECUTE IMMEDIATE 'CREATE TABLE TEST(' || (SELECT LISTAGG('C' || X || ' INT') FROM SYSTEM_RANGE(1, 16385)) || ')'; +> exception TOO_MANY_COLUMNS_1 + +CREATE TABLE TEST AS (SELECT REPEAT('A', 300)); +> ok + +TABLE TEST; +> C1 +> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +> AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +> rows: 1 + DROP TABLE TEST; > ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/createTrigger.sql b/h2/src/test/org/h2/test/scripts/ddl/createTrigger.sql index 3b6db9e7be..672263520a 100644 --- a/h2/src/test/org/h2/test/scripts/ddl/createTrigger.sql +++ b/h2/src/test/org/h2/test/scripts/ddl/createTrigger.sql @@ -1,12 +1,12 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- CREATE TABLE COUNT(X INT); > ok -CREATE FORCE TRIGGER T_COUNT BEFORE INSERT ON COUNT CALL "com.Unknown"; +CREATE FORCE TRIGGER T_COUNT BEFORE INSERT ON COUNT CALL 'com.Unknown'; > ok INSERT INTO COUNT VALUES(NULL); @@ -30,6 +30,80 @@ insert into items values(DEFAULT); drop table items, count; > ok +CREATE TABLE TEST(A VARCHAR, B VARCHAR, C VARCHAR); +> ok + +CREATE TRIGGER T1 BEFORE INSERT, UPDATE ON TEST FOR EACH ROW CALL 'org.h2.test.scripts.Trigger1'; +> ok + +INSERT INTO TEST VALUES ('a', 'b', 'c'); +> exception ERROR_EXECUTING_TRIGGER_3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A VARCHAR, B VARCHAR, C INT); +> ok + +CREATE TRIGGER T1 BEFORE INSERT ON TEST FOR EACH ROW CALL 'org.h2.test.scripts.Trigger1'; +> ok + +INSERT INTO TEST VALUES ('1', 'a', 1); +> update count: 1 + +DROP TRIGGER T1; +> ok + +CREATE TRIGGER T1 BEFORE INSERT ON TEST FOR EACH STATEMENT CALL 'org.h2.test.scripts.Trigger1'; +> ok + +INSERT INTO TEST VALUES ('2', 'b', 2); +> update count: 1 + +DROP TRIGGER T1; +> ok + +TABLE TEST; +> A B C +> - - -- +> 1 a 10 +> 2 b 2 +> rows: 2 + +DROP TABLE TEST; +> ok + +-- --------------------------------------------------------------------------- +-- Checking multiple classes in trigger source +-- --------------------------------------------------------------------------- + +CREATE TABLE TEST(A VARCHAR, B VARCHAR, C VARCHAR); +> ok + +CREATE TRIGGER T1 BEFORE INSERT, UPDATE ON TEST FOR EACH ROW AS STRINGDECODE( +'org.h2.api.Trigger create() { + return new org.h2.api.Trigger() { + public void fire(Connection conn, Object[] oldRow, Object[] newRow) { + if (newRow != null) { + newRow[2] = newRow[2] + "1"\u003B + } + } + }\u003B +}'); +> ok + +INSERT INTO TEST VALUES ('a', 'b', 'c'); +> update count: 1 + +TABLE TEST; +> A B C +> - - -- +> a b c1 +> rows: 1 + +DROP TABLE TEST; +> ok + -- --------------------------------------------------------------------------- -- PostgreSQL syntax tests -- --------------------------------------------------------------------------- @@ -43,7 +117,7 @@ CREATE TABLE COUNT(X INT); INSERT INTO COUNT VALUES(1); > update count: 1 -CREATE FORCE TRIGGER T_COUNT BEFORE INSERT OR UPDATE ON COUNT CALL "com.Unknown"; +CREATE FORCE TRIGGER T_COUNT BEFORE INSERT OR UPDATE ON COUNT CALL 'com.Unknown'; > ok INSERT INTO COUNT VALUES(NULL); @@ -51,3 +125,106 @@ INSERT INTO COUNT VALUES(NULL); UPDATE COUNT SET X=2 WHERE X=1; > exception ERROR_CREATING_TRIGGER_OBJECT_3 + +DROP TABLE COUNT; +> ok + +SET MODE Regular; +> ok + +CREATE MEMORY TABLE T(ID INT PRIMARY KEY, V INT); +> ok + +CREATE VIEW V1 AS TABLE T; +> ok + +CREATE VIEW V2 AS TABLE T; +> ok + +CREATE VIEW V3 AS TABLE T; +> ok + +CREATE TRIGGER T1 INSTEAD OF INSERT ON V1 FOR EACH ROW AS STRINGDECODE( +'org.h2.api.Trigger create() { + return new org.h2.api.Trigger() { + public void fire(Connection conn, Object[] oldRow, Object[] newRow) { + } + }\u003B +}'); +> ok + +CREATE TRIGGER T2 INSTEAD OF UPDATE ON V2 FOR EACH ROW AS STRINGDECODE( +'org.h2.api.Trigger create() { + return new org.h2.api.Trigger() { + public void fire(Connection conn, Object[] oldRow, Object[] newRow) { + } + }\u003B +}'); +> ok + +CREATE TRIGGER T3 INSTEAD OF DELETE ON V3 FOR EACH ROW AS STRINGDECODE( +'org.h2.api.Trigger create() { + return new org.h2.api.Trigger() { + public void fire(Connection conn, Object[] oldRow, Object[] newRow) { + } + }\u003B +}'); +> ok + +SELECT TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, IS_INSERTABLE_INTO, COMMIT_ACTION + FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_CATALOG TABLE_SCHEMA TABLE_NAME TABLE_TYPE IS_INSERTABLE_INTO COMMIT_ACTION +> ------------- ------------ ---------- ---------- ------------------ ------------- +> SCRIPT PUBLIC T BASE TABLE YES null +> SCRIPT PUBLIC V1 VIEW NO null +> SCRIPT PUBLIC V2 VIEW NO null +> SCRIPT PUBLIC V3 VIEW NO null +> rows: 4 + +SELECT TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, VIEW_DEFINITION, CHECK_OPTION, IS_UPDATABLE, INSERTABLE_INTO, + IS_TRIGGER_UPDATABLE, IS_TRIGGER_DELETABLE, IS_TRIGGER_INSERTABLE_INTO + FROM INFORMATION_SCHEMA.VIEWS WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE INSERTABLE_INTO IS_TRIGGER_UPDATABLE IS_TRIGGER_DELETABLE IS_TRIGGER_INSERTABLE_INTO +> ------------- ------------ ---------- ------------------ ------------ ------------ --------------- -------------------- -------------------- -------------------------- +> SCRIPT PUBLIC V1 TABLE "PUBLIC"."T" NONE NO NO NO NO YES +> SCRIPT PUBLIC V2 TABLE "PUBLIC"."T" NONE NO NO YES NO NO +> SCRIPT PUBLIC V3 TABLE "PUBLIC"."T" NONE NO NO NO YES NO +> rows: 3 + +SELECT * FROM INFORMATION_SCHEMA.TRIGGERS; +> TRIGGER_CATALOG TRIGGER_SCHEMA TRIGGER_NAME EVENT_MANIPULATION EVENT_OBJECT_CATALOG EVENT_OBJECT_SCHEMA EVENT_OBJECT_TABLE ACTION_ORIENTATION ACTION_TIMING IS_ROLLBACK JAVA_CLASS QUEUE_SIZE NO_WAIT REMARKS +> --------------- -------------- ------------ ------------------ -------------------- ------------------- ------------------ ------------------ ------------- ----------- ---------- ---------- ------- ------- +> SCRIPT PUBLIC T1 INSERT SCRIPT PUBLIC V1 ROW INSTEAD OF FALSE null 1024 FALSE null +> SCRIPT PUBLIC T2 UPDATE SCRIPT PUBLIC V2 ROW INSTEAD OF FALSE null 1024 FALSE null +> SCRIPT PUBLIC T3 DELETE SCRIPT PUBLIC V3 ROW INSTEAD OF FALSE null 1024 FALSE null +> rows: 3 + +CREATE TRIGGER T4 BEFORE ROLLBACK ON TEST FOR EACH ROW AS STRINGDECODE( +'org.h2.api.Trigger create() { + return new org.h2.api.Trigger() { + public void fire(Connection conn, Object[] oldRow, Object[] newRow) { + } + }\u003B +}'); +> exception INVALID_TRIGGER_FLAGS_1 + +CREATE TRIGGER T4 BEFORE SELECT ON TEST FOR EACH ROW AS STRINGDECODE( +'org.h2.api.Trigger create() { + return new org.h2.api.Trigger() { + public void fire(Connection conn, Object[] oldRow, Object[] newRow) { + } + }\u003B +}'); +> exception INVALID_TRIGGER_FLAGS_1 + +CREATE TRIGGER T4 BEFORE SELECT, ROLLBACK ON TEST FOR EACH STATEMENT AS STRINGDECODE( +'org.h2.api.Trigger create() { + return new org.h2.api.Trigger() { + public void fire(Connection conn, Object[] oldRow, Object[] newRow) { + } + }\u003B +}'); +> exception INVALID_TRIGGER_FLAGS_1 + +DROP TABLE T CASCADE; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/createView.sql b/h2/src/test/org/h2/test/scripts/ddl/createView.sql index 92c60b830a..b049555439 100644 --- a/h2/src/test/org/h2/test/scripts/ddl/createView.sql +++ b/h2/src/test/org/h2/test/scripts/ddl/createView.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -14,3 +14,41 @@ SELECT * FROM TEST_VIEW; > - - > b c > rows: 1 + +SELECT TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, VIEW_DEFINITION, CHECK_OPTION, IS_UPDATABLE, STATUS, REMARKS + FROM INFORMATION_SCHEMA.VIEWS WHERE TABLE_NAME = 'TEST_VIEW'; +> TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE STATUS REMARKS +> ------------- ------------ ---------- --------------- ------------ ------------ ------ ------- +> SCRIPT PUBLIC TEST_VIEW SELECT 'b', 'c' NONE NO VALID null +> rows: 1 + +DROP VIEW TEST_VIEW; +> ok + +CREATE TABLE TEST(C1 INT) AS (VALUES 1, 2); +> ok + +CREATE OR REPLACE VIEW TEST_VIEW AS (SELECT C1 AS A FROM TEST); +> ok + +ALTER TABLE TEST ADD COLUMN C2 INT; +> ok + +UPDATE TEST SET C2 = C1 + 1; +> update count: 2 + +CREATE OR REPLACE VIEW TEST_VIEW AS (SELECT C1 AS A, C2 AS B FROM TEST); +> ok + +CREATE OR REPLACE VIEW TEST_VIEW AS (SELECT C2 AS B, C1 AS A FROM TEST); +> ok + +SELECT * FROM TEST_VIEW; +> B A +> - - +> 2 1 +> 3 2 +> rows: 2 + +DROP TABLE TEST CASCADE; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/dropAllObjects.sql b/h2/src/test/org/h2/test/scripts/ddl/dropAllObjects.sql new file mode 100644 index 0000000000..2d570e5934 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/dropAllObjects.sql @@ -0,0 +1,61 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +@reconnect off + +-- Test table depends on view + +create table a(x int); +> ok + +create view b as select * from a; +> ok + +create table c(y int check (select count(*) from b) = 0); +> ok + +drop all objects; +> ok + +-- Test inter-schema dependency + +create schema table_view; +> ok + +set schema table_view; +> ok + +create table test1 (id int, name varchar(20)); +> ok + +create view test_view_1 as (select * from test1); +> ok + +set schema public; +> ok + +create schema test_run; +> ok + +set schema test_run; +> ok + +create table test2 (id int, address varchar(20), constraint a_cons check (id in (select id from table_view.test1))); +> ok + +set schema public; +> ok + +drop all objects; +> ok + +CREATE DOMAIN D INT; +> ok + +DROP ALL OBJECTS; +> ok + +SELECT COUNT(*) FROM INFORMATION_SCHEMA.DOMAINS WHERE DOMAIN_SCHEMA = 'PUBLIC'; +>> 0 diff --git a/h2/src/test/org/h2/test/scripts/ddl/dropDomain.sql b/h2/src/test/org/h2/test/scripts/ddl/dropDomain.sql index ce3ef9ab2e..2fc644b3c1 100644 --- a/h2/src/test/org/h2/test/scripts/ddl/dropDomain.sql +++ b/h2/src/test/org/h2/test/scripts/ddl/dropDomain.sql @@ -1,29 +1,25 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- CREATE DOMAIN E AS ENUM('A', 'B'); > ok -CREATE DOMAIN E_NN AS ENUM('A', 'B') NOT NULL; +CREATE TABLE TEST(I INT PRIMARY KEY, E1 E, E2 E NOT NULL); > ok -CREATE TABLE TEST(I INT PRIMARY KEY, E1 E, E2 E NOT NULL, E3 E_NN, E4 E_NN NULL); -> ok - -INSERT INTO TEST VALUES (1, 'A', 'B', 'A', 'B'); +INSERT INTO TEST VALUES (1, 'A', 'B'); > update count: 1 -SELECT COLUMN_NAME, DOMAIN_CATALOG, DOMAIN_SCHEMA, DOMAIN_NAME, NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; -> COLUMN_NAME DOMAIN_CATALOG DOMAIN_SCHEMA DOMAIN_NAME NULLABLE COLUMN_TYPE -> ----------- -------------- ------------- ----------- -------- --------------- -> I null null null 0 INT NOT NULL -> E1 SCRIPT PUBLIC E 1 "E" -> E2 SCRIPT PUBLIC E 0 "E" NOT NULL -> E3 SCRIPT PUBLIC E_NN 0 "E_NN" NOT NULL -> E4 SCRIPT PUBLIC E_NN 1 "E_NN" NULL -> rows (ordered): 5 +SELECT COLUMN_NAME, DOMAIN_CATALOG, DOMAIN_SCHEMA, DOMAIN_NAME, IS_NULLABLE, DATA_TYPE + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DOMAIN_CATALOG DOMAIN_SCHEMA DOMAIN_NAME IS_NULLABLE DATA_TYPE +> ----------- -------------- ------------- ----------- ----------- --------- +> I null null null NO INTEGER +> E1 SCRIPT PUBLIC E YES ENUM +> E2 SCRIPT PUBLIC E NO ENUM +> rows (ordered): 3 DROP DOMAIN E RESTRICT; > exception CANNOT_DROP_2 @@ -31,17 +27,49 @@ DROP DOMAIN E RESTRICT; DROP DOMAIN E CASCADE; > ok -DROP DOMAIN E_NN CASCADE; +SELECT COLUMN_NAME, DOMAIN_CATALOG, DOMAIN_SCHEMA, DOMAIN_NAME, IS_NULLABLE, DATA_TYPE + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DOMAIN_CATALOG DOMAIN_SCHEMA DOMAIN_NAME IS_NULLABLE DATA_TYPE +> ----------- -------------- ------------- ----------- ----------- --------- +> I null null null NO INTEGER +> E1 null null null YES ENUM +> E2 null null null NO ENUM +> rows (ordered): 3 + +DROP TABLE TEST; +> ok + +CREATE DOMAIN D INT CHECK (VALUE > 0); +> ok + +CREATE MEMORY TABLE TEST(C D); > ok -SELECT COLUMN_NAME, DOMAIN_CATALOG, DOMAIN_SCHEMA, DOMAIN_NAME, NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; -> COLUMN_NAME DOMAIN_CATALOG DOMAIN_SCHEMA DOMAIN_NAME NULLABLE COLUMN_TYPE -> ----------- -------------- ------------- ----------- -------- ----------------------- -> I null null null 0 INT NOT NULL -> E1 null null null 1 ENUM('A', 'B') -> E2 null null null 0 ENUM('A', 'B') NOT NULL -> E3 null null null 0 ENUM('A', 'B') NOT NULL -> E4 null null null 1 ENUM('A', 'B') +DROP DOMAIN D CASCADE; +> ok + +INSERT INTO TEST VALUES 1; +> update count: 1 + +INSERT INTO TEST VALUES -1; +> exception CHECK_CONSTRAINT_VIOLATED_1 + +@reconnect + +INSERT INTO TEST VALUES 1; +> update count: 1 + +INSERT INTO TEST VALUES -1; +> exception CHECK_CONSTRAINT_VIOLATED_1 + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ------------------------------------------------------------------------------------------ +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "C" INTEGER ); +> -- 2 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES (1), (1); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" CHECK("C" > 0) NOCHECK; > rows (ordered): 5 DROP TABLE TEST; diff --git a/h2/src/test/org/h2/test/scripts/ddl/dropIndex.sql b/h2/src/test/org/h2/test/scripts/ddl/dropIndex.sql index b0d7b3fa44..a933bb56bf 100644 --- a/h2/src/test/org/h2/test/scripts/ddl/dropIndex.sql +++ b/h2/src/test/org/h2/test/scripts/ddl/dropIndex.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -14,6 +14,9 @@ CREATE TABLE TEST.TBL ( CREATE UNIQUE INDEX NAME_INDEX ON TEST.TBL(NAME); > ok +SET MODE MySQL; +> ok + -- MySQL compatibility syntax ALTER TABLE TEST.TBL DROP INDEX NAME_INDEX; > ok @@ -41,3 +44,32 @@ ALTER TABLE TEST.TBL DROP INDEX TEST.NAME_INDEX; DROP SCHEMA TEST CASCADE; > ok + +create table test(id int primary key, name varchar); +> ok + +alter table test alter column id int auto_increment; +> ok + +create table otherTest(id int primary key, name varchar); +> ok + +alter table otherTest add constraint fk foreign key(id) references test(id); +> ok + +-- MySQL compatibility syntax +alter table otherTest drop foreign key fk; +> ok + +create unique index idx on otherTest(name); +> ok + +-- MySQL compatibility syntax +alter table otherTest drop index idx; +> ok + +drop table test, otherTest; +> ok + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/dropSchema.sql b/h2/src/test/org/h2/test/scripts/ddl/dropSchema.sql index 16bc2de0b0..4285f88c5f 100644 --- a/h2/src/test/org/h2/test/scripts/ddl/dropSchema.sql +++ b/h2/src/test/org/h2/test/scripts/ddl/dropSchema.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -88,3 +88,62 @@ DROP SCHEMA TEST_SCHEMA RESTRICT; DROP SCHEMA TEST_SCHEMA CASCADE; > ok + +-- Test computed column dependency + +CREATE TABLE A (A INT); +> ok + +CREATE TABLE B (B INT AS SELECT A FROM A); +> ok + +DROP ALL OBJECTS; +> ok + +CREATE SCHEMA TEST_SCHEMA; +> ok + +CREATE TABLE TEST_SCHEMA.A (A INT); +> ok + +CREATE TABLE TEST_SCHEMA.B (B INT AS SELECT A FROM TEST_SCHEMA.A); +> ok + +DROP SCHEMA TEST_SCHEMA CASCADE; +> ok + +CREATE SCHEMA A; +> ok + +CREATE TABLE A.A1(ID INT); +> ok + +CREATE SCHEMA B; +> ok + +CREATE TABLE B.B1(ID INT, X INT DEFAULT (SELECT MAX(ID) FROM A.A1)); +> ok + +DROP SCHEMA A CASCADE; +> exception CANNOT_DROP_2 + +DROP SCHEMA B CASCADE; +> ok + +DROP SCHEMA A CASCADE; +> ok + +CREATE SCHEMA A; +> ok + +CREATE TABLE A.A1(ID INT, X INT); +> ok + +CREATE TABLE A.A2(ID INT, X INT DEFAULT (SELECT MAX(ID) FROM A.A1)); +> ok + +ALTER TABLE A.A1 ALTER COLUMN X SET DEFAULT (SELECT MAX(ID) FROM A.A2); +> ok + +DROP SCHEMA A CASCADE; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/dropTable.sql b/h2/src/test/org/h2/test/scripts/ddl/dropTable.sql new file mode 100644 index 0000000000..05a606a0a0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/dropTable.sql @@ -0,0 +1,64 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE T1(ID1 INT PRIMARY KEY, ID2 INT); +> ok + +CREATE TABLE T2(ID2 INT, ID1 INT); +> ok + +ALTER TABLE T2 ADD CONSTRAINT C1 FOREIGN KEY(ID1) REFERENCES T1(ID1); +> ok + +DROP TABLE T1 RESTRICT; +> exception CANNOT_DROP_2 + +DROP TABLE T1 CASCADE; +> ok + +CREATE TABLE T1(ID1 INT PRIMARY KEY, ID2 INT); +> ok + +ALTER TABLE T2 ADD CONSTRAINT C1 FOREIGN KEY(ID1) REFERENCES T1(ID1); +> ok + +DROP TABLE T2 RESTRICT; +> ok + +CREATE VIEW V1 AS SELECT * FROM T1; +> ok + +DROP TABLE T1 RESTRICT; +> exception CANNOT_DROP_2 + +DROP TABLE T1 CASCADE; +> ok + +SELECT * FROM V1; +> exception TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1 + +CREATE TABLE T1(ID1 INT); +> ok + +ALTER TABLE T1 ADD CONSTRAINT C1 CHECK ID1 > 0; +> ok + +DROP TABLE T1 RESTRICT; +> ok + +CREATE TABLE T1(ID1 INT PRIMARY KEY, ID2 INT); +> ok + +CREATE TABLE T2(ID2 INT PRIMARY KEY, ID1 INT); +> ok + +ALTER TABLE T2 ADD CONSTRAINT C1 FOREIGN KEY(ID1) REFERENCES T1(ID1); +> ok + +ALTER TABLE T1 ADD CONSTRAINT C2 FOREIGN KEY(ID2) REFERENCES T2(ID2); +> ok + +DROP TABLE T1, T2 RESTRICT; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/grant.sql b/h2/src/test/org/h2/test/scripts/ddl/grant.sql new file mode 100644 index 0000000000..e3b7e159e9 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/grant.sql @@ -0,0 +1,57 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE MEMORY TABLE TEST1(ID BIGINT PRIMARY KEY); +> ok + +CREATE MEMORY TABLE TEST2(ID BIGINT PRIMARY KEY); +> ok + +CREATE USER TEST_USER PASSWORD 'test'; +> ok + +GRANT SELECT, INSERT ON TEST1, TEST2 TO TEST_USER; +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> --------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE USER IF NOT EXISTS "TEST_USER" PASSWORD ''; +> CREATE MEMORY TABLE "PUBLIC"."TEST1"( "ID" BIGINT NOT NULL ); +> ALTER TABLE "PUBLIC"."TEST1" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_4" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST1; +> CREATE MEMORY TABLE "PUBLIC"."TEST2"( "ID" BIGINT NOT NULL ); +> ALTER TABLE "PUBLIC"."TEST2" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_4C" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST2; +> GRANT SELECT, INSERT ON "PUBLIC"."TEST1" TO "TEST_USER"; +> GRANT SELECT, INSERT ON "PUBLIC"."TEST2" TO "TEST_USER"; +> rows (ordered): 10 + +REVOKE INSERT ON TEST1 FROM TEST_USER; +> ok + +REVOKE ALL ON TEST2 FROM TEST_USER; +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> --------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE USER IF NOT EXISTS "TEST_USER" PASSWORD ''; +> CREATE MEMORY TABLE "PUBLIC"."TEST1"( "ID" BIGINT NOT NULL ); +> ALTER TABLE "PUBLIC"."TEST1" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_4" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST1; +> CREATE MEMORY TABLE "PUBLIC"."TEST2"( "ID" BIGINT NOT NULL ); +> ALTER TABLE "PUBLIC"."TEST2" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_4C" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST2; +> GRANT SELECT ON "PUBLIC"."TEST1" TO "TEST_USER"; +> rows (ordered): 9 + +DROP USER TEST_USER; +> ok + +DROP TABLE TEST1, TEST2; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/truncateTable.sql b/h2/src/test/org/h2/test/scripts/ddl/truncateTable.sql index b237494d6a..0ac0093f66 100644 --- a/h2/src/test/org/h2/test/scripts/ddl/truncateTable.sql +++ b/h2/src/test/org/h2/test/scripts/ddl/truncateTable.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -28,7 +28,7 @@ INSERT INTO TEST VALUES(1, 'Hello'), (2, 'World'); > update count: 2 TRUNCATE TABLE TEST; -> ok +> update count: 2 SELECT * FROM TEST; > ID NAME @@ -66,13 +66,13 @@ CREATE TABLE TEST( ID1 BIGINT AUTO_INCREMENT NOT NULL, ID2 BIGINT NOT NULL DEFAULT NEXT VALUE FOR SEQ2 NULL_TO_DEFAULT SEQUENCE SEQ2, ID3 BIGINT NOT NULL DEFAULT NEXT VALUE FOR SEQ3 NULL_TO_DEFAULT, - VALUE INT NOT NULL); + "VALUE" INT NOT NULL); > ok -INSERT INTO TEST(VALUE) VALUES (1), (2); +INSERT INTO TEST("VALUE") VALUES (1), (2); > update count: 2 -SELECT * FROM TEST ORDER BY VALUE; +SELECT * FROM TEST ORDER BY "VALUE"; > ID1 ID2 ID3 VALUE > --- --- --- ----- > 1 1 1 1 @@ -80,12 +80,12 @@ SELECT * FROM TEST ORDER BY VALUE; > rows (ordered): 2 TRUNCATE TABLE TEST; -> ok +> update count: 2 -INSERT INTO TEST(VALUE) VALUES (1), (2); +INSERT INTO TEST("VALUE") VALUES (1), (2); > update count: 2 -SELECT * FROM TEST ORDER BY VALUE; +SELECT * FROM TEST ORDER BY "VALUE"; > ID1 ID2 ID3 VALUE > --- --- --- ----- > 3 3 3 1 @@ -93,12 +93,12 @@ SELECT * FROM TEST ORDER BY VALUE; > rows (ordered): 2 TRUNCATE TABLE TEST CONTINUE IDENTITY; -> ok +> update count: 2 -INSERT INTO TEST(VALUE) VALUES (1), (2); +INSERT INTO TEST("VALUE") VALUES (1), (2); > update count: 2 -SELECT * FROM TEST ORDER BY VALUE; +SELECT * FROM TEST ORDER BY "VALUE"; > ID1 ID2 ID3 VALUE > --- --- --- ----- > 5 5 5 1 @@ -106,20 +106,84 @@ SELECT * FROM TEST ORDER BY VALUE; > rows (ordered): 2 TRUNCATE TABLE TEST RESTART IDENTITY; -> ok +> update count: 2 -INSERT INTO TEST(VALUE) VALUES (1), (2); +INSERT INTO TEST("VALUE") VALUES (1), (2); > update count: 2 -SELECT * FROM TEST ORDER BY VALUE; +SELECT * FROM TEST ORDER BY "VALUE"; > ID1 ID2 ID3 VALUE > --- --- --- ----- > 1 1 7 1 > 2 2 8 2 > rows (ordered): 2 +SET MODE MSSQLServer; +> ok + +TRUNCATE TABLE TEST; +> update count: 2 + +INSERT INTO TEST("VALUE") VALUES (1), (2); +> update count: 2 + +SELECT * FROM TEST ORDER BY "VALUE"; +> ID1 ID2 ID3 VALUE +> --- --- --- ----- +> 1 1 9 1 +> 2 2 10 2 +> rows (ordered): 2 + +SET MODE MySQL; +> ok + +TRUNCATE TABLE TEST; +> update count: 2 + +INSERT INTO TEST("VALUE") VALUES (1), (2); +> update count: 2 + +SELECT * FROM TEST ORDER BY "VALUE"; +> ID1 ID2 ID3 VALUE +> --- --- --- ----- +> 1 1 11 1 +> 2 2 12 2 +> rows (ordered): 2 + +SET MODE Regular; +> ok + DROP TABLE TEST; > ok DROP SEQUENCE SEQ3; > ok + +CREATE TABLE TEST(ID INT GENERATED BY DEFAULT AS IDENTITY(MINVALUE 1 MAXVALUE 10 INCREMENT BY -1), V INT); +> ok + +INSERT INTO TEST(V) VALUES 1, 2; +> update count: 2 + +TABLE TEST; +> ID V +> -- - +> 10 1 +> 9 2 +> rows: 2 + +TRUNCATE TABLE TEST RESTART IDENTITY; +> update count: 2 + +INSERT INTO TEST(V) VALUES 1, 2; +> update count: 2 + +TABLE TEST; +> ID V +> -- - +> 10 1 +> 9 2 +> rows: 2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/default-and-on_update.sql b/h2/src/test/org/h2/test/scripts/default-and-on_update.sql index 94f3e0f24e..aeb273792e 100644 --- a/h2/src/test/org/h2/test/scripts/default-and-on_update.sql +++ b/h2/src/test/org/h2/test/scripts/default-and-on_update.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -86,22 +86,22 @@ ALTER TABLE TEST ALTER COLUMN V SET ON UPDATE NULL; > ok SELECT COLUMN_NAME, COLUMN_DEFAULT, COLUMN_ON_UPDATE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY COLUMN_NAME; -> COLUMN_NAME COLUMN_DEFAULT COLUMN_ON_UPDATE -> ----------- ------------------------------- ----------------- -> ID null null -> V (NEXT VALUE FOR "PUBLIC"."SEQ") NULL -> V2 null CURRENT_TIMESTAMP +> COLUMN_NAME COLUMN_DEFAULT COLUMN_ON_UPDATE +> ----------- ----------------------------- ----------------- +> ID null null +> V NEXT VALUE FOR "PUBLIC"."SEQ" NULL +> V2 null CURRENT_TIMESTAMP > rows (ordered): 3 ALTER TABLE TEST ALTER COLUMN V DROP ON UPDATE; > ok SELECT COLUMN_NAME, COLUMN_DEFAULT, COLUMN_ON_UPDATE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY COLUMN_NAME; -> COLUMN_NAME COLUMN_DEFAULT COLUMN_ON_UPDATE -> ----------- ------------------------------- ----------------- -> ID null null -> V (NEXT VALUE FOR "PUBLIC"."SEQ") null -> V2 null CURRENT_TIMESTAMP +> COLUMN_NAME COLUMN_DEFAULT COLUMN_ON_UPDATE +> ----------- ----------------------------- ----------------- +> ID null null +> V NEXT VALUE FOR "PUBLIC"."SEQ" null +> V2 null CURRENT_TIMESTAMP > rows (ordered): 3 DROP TABLE TEST; diff --git a/h2/src/test/org/h2/test/scripts/dml/delete.sql b/h2/src/test/org/h2/test/scripts/dml/delete.sql index 96ae591813..60a7f792f0 100644 --- a/h2/src/test/org/h2/test/scripts/dml/delete.sql +++ b/h2/src/test/org/h2/test/scripts/dml/delete.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -21,3 +21,81 @@ SELECT ID FROM TEST; DROP TABLE TEST; > ok + +CREATE TABLE TEST(ID INT PRIMARY KEY) AS SELECT * FROM SYSTEM_RANGE(1, 13); +> ok + +DELETE FROM TEST WHERE ID <= 12 FETCH FIRST ROW ONLY; +> update count: 1 + +DELETE FROM TEST WHERE ID <= 12 FETCH FIRST ROWS ONLY; +> update count: 1 + +DELETE FROM TEST WHERE ID <= 12 FETCH NEXT ROW ONLY; +> update count: 1 + +DELETE FROM TEST WHERE ID <= 12 FETCH NEXT ROWS ONLY; +> update count: 1 + +DELETE FROM TEST WHERE ID <= 12 FETCH FIRST 2 ROW ONLY; +> update count: 2 + +DELETE FROM TEST WHERE ID <= 12 FETCH FIRST 2 ROWS ONLY; +> update count: 2 + +DELETE FROM TEST WHERE ID <= 12 FETCH NEXT 2 ROW ONLY; +> update count: 2 + +DELETE FROM TEST WHERE ID <= 12 FETCH NEXT 2 ROWS ONLY; +> update count: 2 + +EXPLAIN DELETE FROM TEST WHERE ID <= 12 FETCH FIRST 2 ROWS ONLY; +>> DELETE FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID <= 12 */ WHERE "ID" <= 12 FETCH FIRST 2 ROWS ONLY + +EXPLAIN DELETE FROM TEST FETCH FIRST 1 ROW ONLY; +>> DELETE FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ FETCH FIRST ROW ONLY + +EXPLAIN DELETE FROM TEST; +>> DELETE FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +TABLE TEST; +>> 13 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(id int) AS SELECT x FROM system_range(1, 100); +> ok + +SET MODE MSSQLServer; +> ok + +DELETE TOP 10 FROM TEST; +> update count: 10 + +SET MODE Regular; +> ok + +SELECT COUNT(*) FROM TEST; +>> 90 + +DELETE FROM TEST LIMIT ((SELECT COUNT(*) FROM TEST) / 10); +> update count: 9 + +SELECT COUNT(*) FROM TEST; +>> 81 + +EXPLAIN DELETE FROM TEST LIMIT ((SELECT COUNT(*) FROM TEST) / 10); +>> DELETE FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ FETCH FIRST (SELECT COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */) / 10 ROWS ONLY + +DELETE FROM TEST LIMIT ?; +{ +10 +}; +> update count: 10 + +SELECT COUNT(*) FROM TEST; +>> 71 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/error_reporting.sql b/h2/src/test/org/h2/test/scripts/dml/error_reporting.sql index 3b2e2b3a31..9da42977b0 100644 --- a/h2/src/test/org/h2/test/scripts/dml/error_reporting.sql +++ b/h2/src/test/org/h2/test/scripts/dml/error_reporting.sql @@ -1,19 +1,37 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- +SELECT 0x; +> exception SYNTAX_ERROR_2 + +SELECT 0xZ; +> exception SYNTAX_ERROR_2 + +SELECT 0xAAZ; +> exception SYNTAX_ERROR_2 + +SELECT 0x1LZ; +> exception SYNTAX_ERROR_2 + +SELECT 0x1234567890abZ; +> exception SYNTAX_ERROR_2 + +SELECT 0x1234567890abLZ; +> exception SYNTAX_ERROR_2 + CREATE TABLE test (id INT NOT NULL, name VARCHAR); > ok select * from test where id = ARRAY [1, 2]; -> exception COMPARING_ARRAY_TO_SCALAR +> exception TYPES_ARE_NOT_COMPARABLE_2 insert into test values (1, 't'); > update count: 1 select * from test where id = (1, 2); -> exception COLUMN_COUNT_DOES_NOT_MATCH +> exception TYPES_ARE_NOT_COMPARABLE_2 drop table test; > ok diff --git a/h2/src/test/org/h2/test/scripts/dml/execute_immediate.sql b/h2/src/test/org/h2/test/scripts/dml/execute_immediate.sql new file mode 100644 index 0000000000..b3aa0057aa --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/dml/execute_immediate.sql @@ -0,0 +1,33 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE MEMORY TABLE TEST(ID INT UNIQUE); +> ok + +EXECUTE IMMEDIATE 'INSERT INTO TEST VALUES ' || 1; +> update count: 1 + +EXECUTE IMMEDIATE 'INSERT INTO TEST2 VALUES 1'; +> exception TABLE_OR_VIEW_NOT_FOUND_1 + +EXECUTE IMMEDIATE 'SELECT 1'; +> exception SYNTAX_ERROR_2 + +EXECUTE IMMEDIATE 'ALTER TABLE TEST DROP CONSTRAINT ' || + QUOTE_IDENT((SELECT CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS + WHERE TABLE_SCHEMA = 'PUBLIC' AND TABLE_NAME = 'TEST' AND CONSTRAINT_TYPE = 'UNIQUE')); +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ---------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES (1); +> rows (ordered): 4 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/insert.sql b/h2/src/test/org/h2/test/scripts/dml/insert.sql index 45b5c5ebbe..804fca813a 100644 --- a/h2/src/test/org/h2/test/scripts/dml/insert.sql +++ b/h2/src/test/org/h2/test/scripts/dml/insert.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -44,3 +44,107 @@ SELECT _ROWID_, ID FROM TEST; DROP TABLE TEST; > ok + +CREATE TABLE TEST(A INT, B INT DEFAULT 5); +> ok + +INSERT INTO TEST VALUES (1, DEFAULT); +> update count: 1 + +INSERT INTO TEST SET A = 2, B = DEFAULT; +> update count: 1 + +TABLE TEST; +> A B +> - - +> 1 5 +> 2 5 +> rows: 2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT GENERATED ALWAYS AS (A + 1)); +> ok + +INSERT INTO TEST VALUES (1, 1); +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +INSERT INTO TEST(B) VALUES 1; +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +INSERT INTO TEST VALUES (1, DEFAULT); +> update count: 1 + +INSERT INTO TEST DEFAULT VALUES; +> update count: 1 + +TABLE TEST; +> A B +> ---- ---- +> 1 2 +> null null +> rows: 2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID NUMERIC(20) GENERATED BY DEFAULT AS IDENTITY, V INT); +> ok + +INSERT INTO TEST VALUES (12345678901234567890, 1); +> update count: 1 + +TABLE TEST; +> ID V +> -------------------- - +> 12345678901234567890 1 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY, V INT); +> ok + +INSERT INTO TEST VALUES (10, 20); +> update count: 1 + +INSERT INTO TEST OVERRIDING USER VALUE VALUES (20, 30); +> update count: 1 + +INSERT INTO TEST OVERRIDING SYSTEM VALUE VALUES (30, 40); +> update count: 1 + +TABLE TEST; +> ID V +> -- -- +> 1 30 +> 10 20 +> 30 40 +> rows: 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID BIGINT GENERATED ALWAYS AS IDENTITY, V INT); +> ok + +INSERT INTO TEST VALUES (10, 20); +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +INSERT INTO TEST OVERRIDING USER VALUE VALUES (20, 30); +> update count: 1 + +INSERT INTO TEST OVERRIDING SYSTEM VALUE VALUES (30, 40); +> update count: 1 + +TABLE TEST; +> ID V +> -- -- +> 1 30 +> 30 40 +> rows: 2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/insertIgnore.sql b/h2/src/test/org/h2/test/scripts/dml/insertIgnore.sql index 0a5db0da31..bdbf726a69 100644 --- a/h2/src/test/org/h2/test/scripts/dml/insertIgnore.sql +++ b/h2/src/test/org/h2/test/scripts/dml/insertIgnore.sql @@ -1,12 +1,12 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- SET MODE MySQL; > ok -CREATE TABLE TEST(ID BIGINT PRIMARY KEY, VALUE INT NOT NULL); +CREATE TABLE TEST(ID BIGINT PRIMARY KEY, `VALUE` INT NOT NULL); > ok INSERT INTO TEST VALUES (1, 10), (2, 20), (3, 30), (4, 40); @@ -40,13 +40,13 @@ SELECT * FROM TEST ORDER BY ID; > 5 52 > rows (ordered): 5 -CREATE TABLE TESTREF(ID BIGINT PRIMARY KEY, VALUE INT NOT NULL); +CREATE TABLE TESTREF(ID BIGINT PRIMARY KEY, `VALUE` INT NOT NULL); > ok INSERT INTO TESTREF VALUES (1, 11), (2, 21), (6, 61), (7, 71); > update count: 4 -INSERT INTO TEST (ID, VALUE) SELECT ID, VALUE FROM TESTREF; +INSERT INTO TEST (ID, `VALUE`) SELECT ID, `VALUE` FROM TESTREF; > exception DUPLICATE_KEY_1 SELECT * FROM TEST ORDER BY ID; @@ -59,10 +59,10 @@ SELECT * FROM TEST ORDER BY ID; > 5 52 > rows (ordered): 5 -INSERT IGNORE INTO TEST (ID, VALUE) SELECT ID, VALUE FROM TESTREF; +INSERT IGNORE INTO TEST (ID, `VALUE`) SELECT ID, `VALUE` FROM TESTREF; > update count: 2 -INSERT IGNORE INTO TEST (ID, VALUE) SELECT ID, VALUE FROM TESTREF; +INSERT IGNORE INTO TEST (ID, `VALUE`) SELECT ID, `VALUE` FROM TESTREF; > ok SELECT * FROM TEST ORDER BY ID; @@ -80,7 +80,7 @@ SELECT * FROM TEST ORDER BY ID; INSERT INTO TESTREF VALUES (8, 81), (9, 91); > update count: 2 -INSERT INTO TEST (ID, VALUE) SELECT ID, VALUE FROM TESTREF ON DUPLICATE KEY UPDATE VALUE=83; +INSERT INTO TEST (ID, `VALUE`) SELECT ID, `VALUE` FROM TESTREF ON DUPLICATE KEY UPDATE `VALUE`=83; > update count: 10 SELECT * FROM TEST ORDER BY ID; @@ -96,3 +96,32 @@ SELECT * FROM TEST ORDER BY ID; > 8 81 > 9 91 > rows (ordered): 9 + +SET MODE Regular; +> ok + +INSERT INTO TEST (ID, `VALUE`) VALUES (9, 90), (10, 100); +> exception DUPLICATE_KEY_1 + +INSERT INTO TEST (ID, `VALUE`) VALUES (9, 90), (10, 100) ON CONFLICT DO NOTHING; +> exception SYNTAX_ERROR_1 + +SET MODE PostgreSQL; +> ok + +INSERT INTO TEST (ID, `VALUE`) VALUES (9, 90), (10, 100); +> exception DUPLICATE_KEY_1 + +INSERT INTO TEST (ID, `VALUE`) VALUES (9, 90), (10, 100) ON CONFLICT DO NOTHING; +> update count: 1 + +SELECT * FROM TEST WHERE ID >= 8 ORDER BY ID; +> ID VALUE +> -- ----- +> 8 81 +> 9 91 +> 10 100 +> rows (ordered): 3 + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/merge.sql b/h2/src/test/org/h2/test/scripts/dml/merge.sql index 367a209768..93509d46a4 100644 --- a/h2/src/test/org/h2/test/scripts/dml/merge.sql +++ b/h2/src/test/org/h2/test/scripts/dml/merge.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -16,7 +16,7 @@ CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); > ok EXPLAIN SELECT * FROM TEST WHERE ID=1; ->> SELECT "TEST"."ID", "TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ WHERE "ID" = 1 +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ WHERE "ID" = 1 EXPLAIN MERGE INTO TEST VALUES(1, 'Hello'); >> MERGE INTO "PUBLIC"."TEST"("ID", "NAME") KEY("ID") VALUES (1, 'Hello') @@ -105,3 +105,57 @@ MERGE INTO TEST KEY (ID) VALUES (1, 2, 3), (2, 2, 3); DROP TABLE TEST; > ok + +CREATE TABLE TEST(A INT, B INT DEFAULT 5); +> ok + +MERGE INTO TEST KEY(A) VALUES (1, DEFAULT); +> update count: 1 + +TABLE TEST; +> A B +> - - +> 1 5 +> rows: 1 + +UPDATE TEST SET B = 1 WHERE A = 1; +> update count: 1 + +SELECT B FROM TEST WHERE A = 1; +>> 1 + +MERGE INTO TEST KEY(A) VALUES (1, DEFAULT); +> update count: 1 + +SELECT B FROM TEST WHERE A = 1; +>> 5 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT GENERATED ALWAYS AS (A + 1)); +> ok + +MERGE INTO TEST KEY(A) VALUES (1, 1); +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +MERGE INTO TEST KEY(A) VALUES (1, DEFAULT); +> update count: 1 + +MERGE INTO TEST KEY(A) VALUES (1, 1); +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +MERGE INTO TEST KEY(A) VALUES (1, DEFAULT); +> update count: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT, G INT GENERATED ALWAYS AS (ID + 1)); +> ok + +MERGE INTO TEST(G) KEY(ID) VALUES (1); +> exception SYNTAX_ERROR_2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/mergeUsing.sql b/h2/src/test/org/h2/test/scripts/dml/mergeUsing.sql index 952254046b..051241645c 100644 --- a/h2/src/test/org/h2/test/scripts/dml/mergeUsing.sql +++ b/h2/src/test/org/h2/test/scripts/dml/mergeUsing.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- CREATE TABLE PARENT(ID INT, NAME VARCHAR, PRIMARY KEY(ID) ); @@ -9,8 +9,18 @@ MERGE INTO PARENT AS P USING (SELECT X AS ID, 'Coco'||X AS NAME FROM SYSTEM_RANGE(1,2) ) AS S ON (P.ID = S.ID AND 1=1 AND S.ID = P.ID) WHEN MATCHED THEN - UPDATE SET P.NAME = S.NAME WHERE 2 = 2 WHEN NOT - MATCHED THEN + UPDATE SET P.NAME = S.NAME WHERE 2 = 2; +> exception SYNTAX_ERROR_1 + +SET MODE Oracle; +> ok + +MERGE INTO PARENT AS P + USING (SELECT X AS ID, 'Coco'||X AS NAME FROM SYSTEM_RANGE(1,2) ) AS S + ON (P.ID = S.ID AND 1=1 AND S.ID = P.ID) + WHEN MATCHED THEN + UPDATE SET P.NAME = S.NAME WHERE 2 = 2 + WHEN NOT MATCHED THEN INSERT (ID, NAME) VALUES (S.ID, S.NAME); > update count: 2 @@ -26,10 +36,13 @@ EXPLAIN PLAN USING (SELECT X AS ID, 'Coco'||X AS NAME FROM SYSTEM_RANGE(1,2) ) AS S ON (P.ID = S.ID AND 1=1 AND S.ID = P.ID) WHEN MATCHED THEN - UPDATE SET P.NAME = S.NAME WHERE 2 = 2 WHEN NOT - MATCHED THEN + UPDATE SET P.NAME = S.NAME WHERE 2 = 2 + WHEN NOT MATCHED THEN INSERT (ID, NAME) VALUES (S.ID, S.NAME); ->> MERGE INTO "PUBLIC"."PARENT" USING SELECT "X" AS "ID", ('Coco' || "X") AS "NAME" FROM SYSTEM_RANGE(1, 2) /* PUBLIC.RANGE_INDEX */ +>> MERGE INTO "PUBLIC"."PARENT" "P" /* PUBLIC.PRIMARY_KEY_8: ID = S.ID AND ID = S.ID */ USING ( SELECT "X" AS "ID", CONCAT('Coco', "X") AS "NAME" FROM SYSTEM_RANGE(1, 2) ) "S" /* SELECT X AS ID, CONCAT('Coco', X) AS NAME FROM SYSTEM_RANGE(1, 2) /* range index */ */ WHEN MATCHED THEN UPDATE SET "NAME" = "S"."NAME" WHEN NOT MATCHED THEN INSERT ("ID", "NAME") VALUES ("S"."ID", "S"."NAME") + +SET MODE Regular; +> ok DROP TABLE PARENT; > ok @@ -37,7 +50,7 @@ DROP TABLE PARENT; CREATE SCHEMA SOURCESCHEMA; > ok -CREATE TABLE SOURCESCHEMA.SOURCE(ID INT PRIMARY KEY, VALUE INT); +CREATE TABLE SOURCESCHEMA.SOURCE(ID INT PRIMARY KEY, "VALUE" INT); > ok INSERT INTO SOURCESCHEMA.SOURCE VALUES (1, 10), (3, 30), (5, 50); @@ -46,15 +59,15 @@ INSERT INTO SOURCESCHEMA.SOURCE VALUES (1, 10), (3, 30), (5, 50); CREATE SCHEMA DESTSCHEMA; > ok -CREATE TABLE DESTSCHEMA.DESTINATION(ID INT PRIMARY KEY, VALUE INT); +CREATE TABLE DESTSCHEMA.DESTINATION(ID INT PRIMARY KEY, "VALUE" INT); > ok INSERT INTO DESTSCHEMA.DESTINATION VALUES (3, 300), (6, 600); > update count: 2 MERGE INTO DESTSCHEMA.DESTINATION USING SOURCESCHEMA.SOURCE ON (DESTSCHEMA.DESTINATION.ID = SOURCESCHEMA.SOURCE.ID) - WHEN MATCHED THEN UPDATE SET VALUE = SOURCESCHEMA.SOURCE.VALUE - WHEN NOT MATCHED THEN INSERT (ID, VALUE) VALUES (SOURCESCHEMA.SOURCE.ID, SOURCESCHEMA.SOURCE.VALUE); + WHEN MATCHED THEN UPDATE SET "VALUE" = SOURCESCHEMA.SOURCE."VALUE" + WHEN NOT MATCHED THEN INSERT (ID, "VALUE") VALUES (SOURCESCHEMA.SOURCE.ID, SOURCESCHEMA.SOURCE."VALUE"); > update count: 3 SELECT * FROM DESTSCHEMA.DESTINATION; @@ -162,26 +175,26 @@ SELECT * FROM TEST ORDER BY C1, C2; DROP TABLE TEST; > ok -CREATE TABLE TEST (ID INT, VALUE INT); +CREATE TABLE TEST (ID INT, "VALUE" INT); > ok MERGE INTO TEST USING DUAL ON (ID = 1) - WHEN MATCHED THEN UPDATE SET VALUE = 1 + WHEN MATCHED THEN UPDATE SET "VALUE" = 1 WHEN; > exception SYNTAX_ERROR_2 MERGE INTO TEST USING DUAL ON (ID = 1) - WHEN MATCHED THEN UPDATE SET VALUE = 1 + WHEN MATCHED THEN UPDATE SET "VALUE" = 1 WHEN NOT MATCHED THEN; > exception SYNTAX_ERROR_2 MERGE INTO TEST USING DUAL ON (ID = 1) - WHEN NOT MATCHED THEN INSERT (ID, VALUE) VALUES (1, 1) + WHEN NOT MATCHED THEN INSERT (ID, "VALUE") VALUES (1, 1) WHEN; > exception SYNTAX_ERROR_2 MERGE INTO TEST USING DUAL ON (ID = 1) - WHEN NOT MATCHED THEN INSERT (ID, VALUE) VALUES (1, 1) + WHEN NOT MATCHED THEN INSERT (ID, "VALUE") VALUES (1, 1) WHEN MATCHED THEN; > exception SYNTAX_ERROR_2 @@ -214,14 +227,14 @@ MERGE INTO TEST USING (SELECT 40) ON UNKNOWN_COLUMN = 1 WHEN NOT MATCHED THEN IN DROP TABLE TEST; > ok -CREATE TABLE TEST(ID INT PRIMARY KEY, VALUE INT); +CREATE TABLE TEST(ID INT PRIMARY KEY, "VALUE" INT); > ok INSERT INTO TEST VALUES (1, 10), (2, 20); > update count: 2 MERGE INTO TEST USING (SELECT 1) ON (ID < 0) - WHEN MATCHED THEN UPDATE SET VALUE = 30 + WHEN MATCHED THEN UPDATE SET "VALUE" = 30 WHEN NOT MATCHED THEN INSERT VALUES (3, 30); > update count: 1 @@ -234,7 +247,7 @@ SELECT * FROM TEST; > rows: 3 MERGE INTO TEST USING (SELECT 1) ON (ID = ID) - WHEN MATCHED THEN UPDATE SET VALUE = 40 + WHEN MATCHED THEN UPDATE SET "VALUE" = 40 WHEN NOT MATCHED THEN INSERT VALUES (4, 40); > update count: 3 @@ -247,7 +260,7 @@ SELECT * FROM TEST; > rows: 3 MERGE INTO TEST USING (SELECT 1) ON (1 = 1) - WHEN MATCHED THEN UPDATE SET VALUE = 50 + WHEN MATCHED THEN UPDATE SET "VALUE" = 50 WHEN NOT MATCHED THEN INSERT VALUES (5, 50); > update count: 3 @@ -260,42 +273,34 @@ SELECT * FROM TEST; > rows: 3 MERGE INTO TEST USING (SELECT 1) ON 1 = 1 - WHEN MATCHED THEN UPDATE SET VALUE = 60 WHERE ID = 3 DELETE WHERE ID = 2; -> update count: 1 + WHEN MATCHED THEN UPDATE SET "VALUE" = 60 WHERE ID = 3 DELETE WHERE ID = 2; +> exception SYNTAX_ERROR_1 -SELECT * FROM TEST; -> ID VALUE -> -- ----- -> 1 50 -> 2 50 -> 3 60 -> rows: 3 +MERGE INTO TEST USING (SELECT 1 A) ON 1 = 1 + WHEN MATCHED THEN DELETE WHERE ID = 2; +> exception SYNTAX_ERROR_1 -MERGE INTO TEST USING (SELECT 1) ON 1 = 1 +SET MODE Oracle; +> ok + +MERGE INTO TEST USING (SELECT 1 A) ON 1 = 1 WHEN MATCHED THEN DELETE WHERE ID = 2; > update count: 1 -SELECT * FROM TEST; -> ID VALUE -> -- ----- -> 1 50 -> 3 60 -> rows: 2 - -MERGE INTO TEST USING (SELECT 1) ON 1 = 1 - WHEN MATCHED THEN UPDATE SET VALUE = 70 WHERE ID = 3 DELETE WHERE VALUE = 70; -> update count: 2 +SET MODE Regular; +> ok SELECT * FROM TEST; > ID VALUE > -- ----- > 1 50 -> rows: 1 +> 3 50 +> rows: 2 DROP TABLE TEST; > ok -CREATE TABLE T(ID INT, F BOOLEAN, VALUE INT); +CREATE TABLE T(ID INT, F BOOLEAN, "VALUE" INT); > ok INSERT INTO T VALUES (1, FALSE, 10), (2, TRUE, 20); @@ -308,7 +313,7 @@ INSERT INTO S VALUES (1, FALSE, 100), (2, TRUE, 200), (3, FALSE, 300), (4, TRUE, > update count: 4 MERGE INTO T USING S ON ID = S_ID - WHEN MATCHED AND F THEN UPDATE SET VALUE = S_VALUE + WHEN MATCHED AND F THEN UPDATE SET "VALUE" = S_VALUE WHEN MATCHED AND NOT F THEN DELETE WHEN NOT MATCHED AND S_F THEN INSERT VALUES (S_ID, S_F, S_VALUE); > update count: 3 @@ -322,3 +327,215 @@ SELECT * FROM T; DROP TABLE T, S; > ok + +CREATE TABLE T(ID INT, A INT, B INT) AS VALUES (1, 1, 1), (2, 1, 2); +> ok + +CREATE TABLE S(ID INT, A INT, B INT) AS VALUES (1, 1, 3), (2, 1, 4); +> ok + +MERGE INTO T USING S ON T.A = S.A WHEN MATCHED THEN UPDATE SET B = S.B; +> exception DUPLICATE_KEY_1 + +CREATE TABLE S2(ID INT, A INT, B INT) AS VALUES (3, 3, 3); +> ok + +MERGE INTO T USING (SELECT * FROM S UNION SELECT * FROM S2) S ON T.ID = S.ID + WHEN MATCHED THEN UPDATE SET A = S.A, B = S.B + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.A, S.B); +> update count: 3 + +TABLE T; +> ID A B +> -- - - +> 1 1 3 +> 2 1 4 +> 3 3 3 +> rows: 3 + +MERGE INTO T USING (S) ON T.ID = S.ID + WHEN MATCHED THEN UPDATE SET B = S.B + 1; +> update count: 2 + +TABLE T; +> ID A B +> -- - - +> 1 1 4 +> 2 1 5 +> 3 3 3 +> rows: 3 + +DROP TABLE T, S, S2 CASCADE; +> ok + +CREATE TABLE TEST(ID INT, V INT); +> ok + +MERGE INTO TEST USING VALUES (1, 2) S ON TEST.ID = S.C1 WHEN NOT MATCHED THEN INSERT VALUES (1, 2), (3, 4); +> exception SYNTAX_ERROR_1 + +DROP TABLE TEST; +> ok + +CREATE TABLE T(A INT); +> ok + +MERGE INTO T USING (SELECT 1 A) S ON (TRUE) +WHEN NOT MATCHED AND S.X THEN INSERT VALUES (1); +> exception COLUMN_NOT_FOUND_1 + +DROP TABLE T; +> ok + +CREATE TABLE A(ID INT, V INT) AS VALUES (1, 1), (2, 2); +> ok + +CREATE TABLE B(ID INT, V INT) AS VALUES (2, 4), (3, 6); +> ok + +MERGE INTO A USING (SELECT * FROM B) S + ON A.ID = S.ID + WHEN MATCHED THEN UPDATE SET V = S.V; +> update count: 1 + +TABLE A; +> ID V +> -- - +> 1 1 +> 2 4 +> rows: 2 + +DROP TABLE A, B; +> ok + +CREATE TABLE TARGET(ID INT, V INT); +> ok + +MERGE INTO TARGET T USING (VALUES (1, 2)) S(ID, V) + ON T.ID = S.ID + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.V); +> update count: 1 + +CREATE TABLE SOURCE(ID INT, V INT) AS VALUES (3, 4); +> ok + +MERGE INTO TARGET T USING SOURCE S(ID, V) + ON T.ID = S.ID + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.V); +> update count: 1 + +TABLE TARGET; +> ID V +> -- - +> 1 2 +> 3 4 +> rows: 2 + +DROP TABLE SOURCE, TARGET; +> ok + +CREATE TABLE T(ID INT, V INT) AS VALUES (1, 1), (2, 2); +> ok + +MERGE INTO T USING (SELECT 1) ON (TRUE) + WHEN MATCHED THEN UPDATE SET V = 2 + WHEN MATCHED AND ID = 2 THEN UPDATE SET V = 3; +> update count: 2 + +TABLE T; +> ID V +> -- - +> 1 2 +> 2 2 +> rows: 2 + +TRUNCATE TABLE T; +> update count: 2 + +INSERT INTO T VALUES (1, 1); +> update count: 1 + +MERGE INTO T USING (SELECT 1) ON (ID = 1) + WHEN MATCHED THEN UPDATE SET V = 2 + WHEN MATCHED THEN UPDATE SET V = 3; +> update count: 1 + +TABLE T; +> ID V +> -- - +> 1 2 +> rows: 1 + +SELECT * FROM FINAL TABLE (MERGE INTO T USING (SELECT 1) ON (ID = 1) + WHEN MATCHED THEN UPDATE SET V = 4 + WHEN MATCHED THEN UPDATE SET V = 5); +> ID V +> -- - +> 1 4 +> rows: 1 + +EXPLAIN MERGE INTO T USING (VALUES (1, 2)) S(ID, V) ON T.ID = S.ID + WHEN NOT MATCHED AND T.ID = 1 THEN INSERT VALUES (S.ID, S.V) + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.V + 1) + WHEN MATCHED AND T.ID = 2 THEN UPDATE SET V = S.ID + 2 + WHEN MATCHED THEN UPDATE SET V = S.ID + 3; +>> MERGE INTO "PUBLIC"."T" /* PUBLIC.T.tableScan */ USING (VALUES (1, 2)) "S"("ID", "V") /* table scan */ WHEN NOT MATCHED AND "T"."ID" = 1 THEN INSERT ("ID", "V") VALUES ("S"."ID", "S"."V") WHEN NOT MATCHED THEN INSERT ("ID", "V") VALUES ("S"."ID", "S"."V" + 1) WHEN MATCHED AND "T"."ID" = 2 THEN UPDATE SET "V" = "S"."ID" + 2 WHEN MATCHED THEN UPDATE SET "V" = "S"."ID" + 3 + +EXPLAIN MERGE INTO T USING (VALUES (1, 2)) S(ID, V) ON T.ID = S.ID + WHEN MATCHED AND T.ID = 1 THEN DELETE + WHEN MATCHED THEN DELETE; +>> MERGE INTO "PUBLIC"."T" /* PUBLIC.T.tableScan */ USING (VALUES (1, 2)) "S"("ID", "V") /* table scan */ WHEN MATCHED AND "T"."ID" = 1 THEN DELETE WHEN MATCHED THEN DELETE + +DROP TABLE T; +> ok + +CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY, V INT); +> ok + +MERGE INTO TEST USING (VALUES (10, 20)) SOURCE(ID, V) ON TEST.ID = SOURCE.ID + WHEN NOT MATCHED THEN INSERT VALUES(SOURCE.ID, SOURCE.V); +> update count: 1 + +MERGE INTO TEST USING (VALUES (20, 30)) SOURCE(ID, V) ON TEST.ID = SOURCE.ID + WHEN NOT MATCHED THEN INSERT OVERRIDING USER VALUE VALUES(SOURCE.ID, SOURCE.V); +> update count: 1 + +MERGE INTO TEST USING (VALUES (30, 40)) SOURCE(ID, V) ON TEST.ID = SOURCE.ID + WHEN NOT MATCHED THEN INSERT OVERRIDING SYSTEM VALUE VALUES(SOURCE.ID, SOURCE.V); +> update count: 1 + +TABLE TEST; +> ID V +> -- -- +> 1 30 +> 10 20 +> 30 40 +> rows: 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID BIGINT GENERATED ALWAYS AS IDENTITY, V INT); +> ok + +MERGE INTO TEST USING (VALUES (10, 20)) SOURCE(ID, V) ON TEST.ID = SOURCE.ID + WHEN NOT MATCHED THEN INSERT VALUES(SOURCE.ID, SOURCE.V); +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +MERGE INTO TEST USING (VALUES (20, 30)) SOURCE(ID, V) ON TEST.ID = SOURCE.ID + WHEN NOT MATCHED THEN INSERT OVERRIDING USER VALUE VALUES(SOURCE.ID, SOURCE.V); +> update count: 1 + +MERGE INTO TEST USING (VALUES (30, 40)) SOURCE(ID, V) ON TEST.ID = SOURCE.ID + WHEN NOT MATCHED THEN INSERT OVERRIDING SYSTEM VALUE VALUES(SOURCE.ID, SOURCE.V); +> update count: 1 + +TABLE TEST; +> ID V +> -- -- +> 1 30 +> 30 40 +> rows: 2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/replace.sql b/h2/src/test/org/h2/test/scripts/dml/replace.sql index d17c670c13..cad90d682b 100644 --- a/h2/src/test/org/h2/test/scripts/dml/replace.sql +++ b/h2/src/test/org/h2/test/scripts/dml/replace.sql @@ -1,8 +1,11 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- +SET MODE MySQL; +> ok + CREATE TABLE TABLE_WORD ( WORD_ID int(11) NOT NULL AUTO_INCREMENT, WORD varchar(128) NOT NULL, @@ -40,5 +43,11 @@ REPLACE INTO TABLE_WORD(WORD_ID, WORD) SELECT 1, 'REPLACED2'; SELECT WORD FROM TABLE_WORD where WORD_ID = 1; >> REPLACED2 +SET MODE Regular; +> ok + +REPLACE INTO TABLE_WORD(WORD) VALUES ('aaaaaaaaaa'); +> exception SYNTAX_ERROR_2 + DROP TABLE TABLE_WORD; > ok diff --git a/h2/src/test/org/h2/test/scripts/dml/script.sql b/h2/src/test/org/h2/test/scripts/dml/script.sql index 4b492baa06..b0289136d9 100644 --- a/h2/src/test/org/h2/test/scripts/dml/script.sql +++ b/h2/src/test/org/h2/test/scripts/dml/script.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -9,22 +9,134 @@ create memory table test(id int primary key, name varchar(255)); INSERT INTO TEST VALUES(2, STRINGDECODE('abcsond\344rzeich\344 ') || char(22222) || STRINGDECODE(' \366\344\374\326\304\334\351\350\340\361!')); > update count: 1 -SCRIPT NOPASSWORDS NOSETTINGS; +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; > SCRIPT -> ------------------------------------------------------------------------------------------------------------------------------------------------------- -> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> ------------------------------------------------------------------------------------------------------------------------------ +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "NAME" CHARACTER VARYING(255) ); > ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INT NOT NULL, "NAME" VARCHAR(255) ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES (2, U&'abcsond\00e4rzeich\00e4 \56ce \00f6\00e4\00fc\00d6\00c4\00dc\00e9\00e8\00e0\00f1!'); +> rows (ordered): 5 + +SCRIPT COLUMNS NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> -------------------------------------------------------------------------------------------------------------------------------------------- > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> INSERT INTO "PUBLIC"."TEST" VALUES (2, STRINGDECODE('abcsond\u00e4rzeich\u00e4 \u56ce \u00f6\u00e4\u00fc\u00d6\u00c4\u00dc\u00e9\u00e8\u00e0\u00f1!')); -> rows: 5 +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "NAME" CHARACTER VARYING(255) ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST"("ID", "NAME") VALUES (2, U&'abcsond\00e4rzeich\00e4 \56ce \00f6\00e4\00fc\00d6\00c4\00dc\00e9\00e8\00e0\00f1!'); +> rows (ordered): 5 + +DROP TABLE TEST; +> ok -SCRIPT COLUMNS NOPASSWORDS NOSETTINGS; +CREATE MEMORY TABLE TEST(ID BIGINT GENERATED ALWAYS AS IDENTITY, V INT, G INT GENERATED ALWAYS AS (V + 1)); +> ok + +INSERT INTO TEST(V) VALUES 5; +> update count: 1 + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; > SCRIPT -> --------------------------------------------------------------------------------------------------------------------------------------------------------------------- +> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" BIGINT GENERATED ALWAYS AS IDENTITY(START WITH 1 RESTART WITH 2) NOT NULL, "V" INTEGER, "G" INTEGER GENERATED ALWAYS AS ("V" + 1) ); > -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INT NOT NULL, "NAME" VARCHAR(255) ); +> INSERT INTO "PUBLIC"."TEST"("ID", "V") OVERRIDING SYSTEM VALUE VALUES (1, 5); +> rows (ordered): 4 + +DROP TABLE TEST; +> ok + +CREATE DOMAIN C AS INT; +> ok + +CREATE DOMAIN B AS C; +> ok + +CREATE DOMAIN A AS B; +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ------------------------------------------------- > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> INSERT INTO "PUBLIC"."TEST"("ID", "NAME") VALUES (2, STRINGDECODE('abcsond\u00e4rzeich\u00e4 \u56ce \u00f6\u00e4\u00fc\u00d6\u00c4\u00dc\u00e9\u00e8\u00e0\u00f1!')); -> rows: 5 +> CREATE DOMAIN "PUBLIC"."C" AS INTEGER; +> CREATE DOMAIN "PUBLIC"."B" AS "PUBLIC"."C"; +> CREATE DOMAIN "PUBLIC"."A" AS "PUBLIC"."B"; +> rows (ordered): 4 + +DROP DOMAIN A; +> ok + +DROP DOMAIN B; +> ok + +DROP DOMAIN C; +> ok + +CREATE DOMAIN A AS INT; +> ok + +CREATE DOMAIN B AS A; +> ok + +CREATE DOMAIN X AS INT; +> ok + +CREATE DOMAIN Y AS X; +> ok + +CREATE DOMAIN Z AS Y; +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE DOMAIN "PUBLIC"."A" AS INTEGER; +> CREATE DOMAIN "PUBLIC"."X" AS INTEGER; +> CREATE DOMAIN "PUBLIC"."B" AS "PUBLIC"."A"; +> CREATE DOMAIN "PUBLIC"."Y" AS "PUBLIC"."X"; +> CREATE DOMAIN "PUBLIC"."Z" AS "PUBLIC"."Y"; +> rows (ordered): 6 + +DROP ALL OBJECTS; +> ok + +CREATE SCHEMA S1; +> ok + +CREATE SCHEMA S2; +> ok + +CREATE SCHEMA S3; +> ok + +CREATE DOMAIN S1.D1 AS INTEGER; +> ok + +CREATE DOMAIN S2.D2 AS S1.D1; +> ok + +CREATE DOMAIN S3.D3 AS S2.D2; +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION SCHEMA S3; +> SCRIPT +> ---------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE SCHEMA IF NOT EXISTS "S3" AUTHORIZATION "SA"; +> CREATE DOMAIN "S3"."D3" AS "S2"."D2"; +> rows (ordered): 3 + +DROP SCHEMA S3 CASCADE; +> ok + +DROP SCHEMA S2 CASCADE; +> ok + +DROP SCHEMA S1 CASCADE; +> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/select.sql b/h2/src/test/org/h2/test/scripts/dml/select.sql deleted file mode 100644 index 6be8141ec4..0000000000 --- a/h2/src/test/org/h2/test/scripts/dml/select.sql +++ /dev/null @@ -1,715 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- - -CREATE TABLE TEST(A INT, B INT, C INT); -> ok - -INSERT INTO TEST VALUES (1, 1, 1), (1, 1, 2), (1, 1, 3), (1, 2, 1), (1, 2, 2), (1, 2, 3), - (2, 1, 1), (2, 1, 2), (2, 1, 3), (2, 2, 1), (2, 2, 2), (2, 2, 3); -> update count: 12 - -SELECT * FROM TEST ORDER BY A, B; -> A B C -> - - - -> 1 1 1 -> 1 1 2 -> 1 1 3 -> 1 2 1 -> 1 2 2 -> 1 2 3 -> 2 1 1 -> 2 1 2 -> 2 1 3 -> 2 2 1 -> 2 2 2 -> 2 2 3 -> rows (partially ordered): 12 - -SELECT * FROM TEST ORDER BY A, B, C FETCH FIRST 4 ROWS ONLY; -> A B C -> - - - -> 1 1 1 -> 1 1 2 -> 1 1 3 -> 1 2 1 -> rows (ordered): 4 - -SELECT * FROM TEST ORDER BY A, B, C FETCH FIRST 4 ROWS WITH TIES; -> A B C -> - - - -> 1 1 1 -> 1 1 2 -> 1 1 3 -> 1 2 1 -> rows (ordered): 4 - -SELECT * FROM TEST ORDER BY A, B FETCH FIRST 4 ROWS WITH TIES; -> A B C -> - - - -> 1 1 1 -> 1 1 2 -> 1 1 3 -> 1 2 1 -> 1 2 2 -> 1 2 3 -> rows (partially ordered): 6 - -SELECT * FROM TEST ORDER BY A FETCH FIRST ROW WITH TIES; -> A B C -> - - - -> 1 1 1 -> 1 1 2 -> 1 1 3 -> 1 2 1 -> 1 2 2 -> 1 2 3 -> rows (partially ordered): 6 - -SELECT TOP (1) WITH TIES * FROM TEST ORDER BY A; -> A B C -> - - - -> 1 1 1 -> 1 1 2 -> 1 1 3 -> 1 2 1 -> 1 2 2 -> 1 2 3 -> rows (partially ordered): 6 - -SELECT TOP 1 PERCENT WITH TIES * FROM TEST ORDER BY A; -> A B C -> - - - -> 1 1 1 -> 1 1 2 -> 1 1 3 -> 1 2 1 -> 1 2 2 -> 1 2 3 -> rows (partially ordered): 6 - -SELECT TOP 51 PERCENT WITH TIES * FROM TEST ORDER BY A, B; -> A B C -> - - - -> 1 1 1 -> 1 1 2 -> 1 1 3 -> 1 2 1 -> 1 2 2 -> 1 2 3 -> 2 1 1 -> 2 1 2 -> 2 1 3 -> rows (partially ordered): 9 - -SELECT * FROM TEST ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 1 ROW WITH TIES; -> A B C -> - - - -> 1 2 1 -> 1 2 2 -> 1 2 3 -> rows (partially ordered): 3 - -SELECT * FROM TEST FETCH NEXT ROWS ONLY; -> A B C -> - - - -> 1 1 1 -> rows: 1 - -SELECT * FROM TEST FETCH FIRST 101 PERCENT ROWS ONLY; -> exception INVALID_VALUE_2 - -SELECT * FROM TEST FETCH FIRST -1 PERCENT ROWS ONLY; -> exception INVALID_VALUE_2 - -SELECT * FROM TEST FETCH FIRST 0 PERCENT ROWS ONLY; -> A B C -> - - - -> rows: 0 - -SELECT * FROM TEST FETCH FIRST 1 PERCENT ROWS ONLY; -> A B C -> - - - -> 1 1 1 -> rows: 1 - -SELECT * FROM TEST FETCH FIRST 10 PERCENT ROWS ONLY; -> A B C -> - - - -> 1 1 1 -> 1 1 2 -> rows: 2 - -SELECT * FROM TEST OFFSET 2 ROWS FETCH NEXT 10 PERCENT ROWS ONLY; -> A B C -> - - - -> 1 1 3 -> 1 2 1 -> rows: 2 - -CREATE INDEX TEST_A_IDX ON TEST(A); -> ok - -CREATE INDEX TEST_A_B_IDX ON TEST(A, B); -> ok - -SELECT * FROM TEST ORDER BY A FETCH FIRST 1 ROW WITH TIES; -> A B C -> - - - -> 1 1 1 -> 1 1 2 -> 1 1 3 -> 1 2 1 -> 1 2 2 -> 1 2 3 -> rows (partially ordered): 6 - -SELECT * FROM TEST ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 1 ROW WITH TIES; -> A B C -> - - - -> 1 2 1 -> 1 2 2 -> 1 2 3 -> rows (partially ordered): 3 - -SELECT * FROM TEST FETCH FIRST 1 ROW WITH TIES; -> exception WITH_TIES_WITHOUT_ORDER_BY - -(SELECT * FROM TEST) UNION (SELECT 1, 2, 4) ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 1 ROW WITH TIES; -> A B C -> - - - -> 1 2 1 -> 1 2 2 -> 1 2 3 -> 1 2 4 -> rows (partially ordered): 4 - -(SELECT * FROM TEST) UNION (SELECT 1, 2, 4) ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 50 PERCENT ROWS ONLY; -> A B C -> - - - -> 1 2 1 -> 1 2 2 -> 1 2 3 -> 1 2 4 -> 2 1 1 -> 2 1 2 -> 2 1 3 -> rows (partially ordered): 7 - -(SELECT * FROM TEST) UNION (SELECT 1, 2, 4) ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 40 PERCENT ROWS WITH TIES; -> A B C -> - - - -> 1 2 1 -> 1 2 2 -> 1 2 3 -> 1 2 4 -> 2 1 1 -> 2 1 2 -> 2 1 3 -> rows (partially ordered): 7 - -(SELECT * FROM TEST) UNION (SELECT 1, 2, 4) FETCH NEXT 1 ROW WITH TIES; -> exception WITH_TIES_WITHOUT_ORDER_BY - -EXPLAIN SELECT * FROM TEST ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 1 ROW WITH TIES; ->> SELECT "TEST"."A", "TEST"."B", "TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_A_B_IDX */ ORDER BY 1, 2 OFFSET 3 ROWS FETCH NEXT ROW WITH TIES /* index sorted */ - -EXPLAIN SELECT * FROM TEST ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 1 PERCENT ROWS WITH TIES; ->> SELECT "TEST"."A", "TEST"."B", "TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_A_B_IDX */ ORDER BY 1, 2 OFFSET 3 ROWS FETCH NEXT 1 PERCENT ROWS WITH TIES /* index sorted */ - -DROP TABLE TEST; -> ok - -CREATE TABLE TEST(A NUMERIC, B NUMERIC); -> ok - -INSERT INTO TEST VALUES (0, 1), (0.0, 2), (0, 3), (1, 4); -> update count: 4 - -SELECT A, B FROM TEST ORDER BY A FETCH FIRST 1 ROW WITH TIES; -> A B -> --- - -> 0 1 -> 0 3 -> 0.0 2 -> rows (partially ordered): 3 - -DROP TABLE TEST; -> ok - -CREATE TABLE TEST(A INT, B INT); -> ok - -INSERT INTO TEST VALUES (1, 1), (1, 2), (2, 1), (2, 2), (2, 3); -> update count: 5 - -SELECT A, COUNT(B) FROM TEST GROUP BY A ORDER BY A OFFSET 1; -> A COUNT(B) -> - -------- -> 2 3 -> rows (ordered): 1 - -DROP TABLE TEST; -> ok - -CREATE TABLE TEST1(A INT, B INT, C INT) AS SELECT 1, 2, 3; -> ok - -CREATE TABLE TEST2(A INT, B INT, C INT) AS SELECT 4, 5, 6; -> ok - -SELECT A, B FROM TEST1 UNION SELECT A, B FROM TEST2 ORDER BY 1.1; -> exception ORDER_BY_NOT_IN_RESULT - -DROP TABLE TEST1; -> ok - -DROP TABLE TEST2; -> ok - --- Disallowed mixed OFFSET/FETCH/LIMIT/TOP clauses -CREATE TABLE TEST (ID BIGINT); -> ok - -SELECT TOP 1 ID FROM TEST OFFSET 1 ROW; -> exception SYNTAX_ERROR_1 - -SELECT TOP 1 ID FROM TEST FETCH NEXT ROW ONLY; -> exception SYNTAX_ERROR_1 - -SELECT TOP 1 ID FROM TEST LIMIT 1; -> exception SYNTAX_ERROR_1 - -SELECT ID FROM TEST OFFSET 1 ROW LIMIT 1; -> exception SYNTAX_ERROR_1 - -SELECT ID FROM TEST FETCH NEXT ROW ONLY LIMIT 1; -> exception SYNTAX_ERROR_1 - -DROP TABLE TEST; -> ok - --- ORDER BY with parameter -CREATE TABLE TEST(A INT, B INT); -> ok - -INSERT INTO TEST VALUES (1, 1), (1, 2), (2, 1), (2, 2); -> update count: 4 - -SELECT * FROM TEST ORDER BY ?, ? FETCH FIRST ROW ONLY; -{ -1, 2 -> A B -> - - -> 1 1 -> rows (ordered): 1 --1, 2 -> A B -> - - -> 2 1 -> rows (ordered): 1 -1, -2 -> A B -> - - -> 1 2 -> rows (ordered): 1 --1, -2 -> A B -> - - -> 2 2 -> rows (ordered): 1 -2, -1 -> A B -> - - -> 2 1 -> rows (ordered): 1 -} -> update count: 0 - -DROP TABLE TEST; -> ok - -CREATE TABLE TEST1(A INT, B INT, C INT) AS SELECT 1, 2, 3; -> ok - -CREATE TABLE TEST2(A INT, D INT) AS SELECT 4, 5; -> ok - -SELECT * FROM TEST1, TEST2; -> A B C A D -> - - - - - -> 1 2 3 4 5 -> rows: 1 - -SELECT * EXCEPT (A) FROM TEST1; -> B C -> - - -> 2 3 -> rows: 1 - -SELECT * EXCEPT (TEST1.A) FROM TEST1; -> B C -> - - -> 2 3 -> rows: 1 - -SELECT * EXCEPT (PUBLIC.TEST1.A) FROM TEST1; -> B C -> - - -> 2 3 -> rows: 1 - -SELECT * EXCEPT (SCRIPT.PUBLIC.TEST1.A) FROM TEST1; -> B C -> - - -> 2 3 -> rows: 1 - -SELECT * EXCEPT (Z) FROM TEST1; -> exception COLUMN_NOT_FOUND_1 - -SELECT * EXCEPT (B, TEST1.B) FROM TEST1; -> exception DUPLICATE_COLUMN_NAME_1 - -SELECT * EXCEPT (A) FROM TEST1, TEST2; -> exception AMBIGUOUS_COLUMN_NAME_1 - -SELECT * EXCEPT (TEST1.A, B, TEST2.D) FROM TEST1, TEST2; -> C A -> - - -> 3 4 -> rows: 1 - -SELECT TEST1.*, TEST2.* FROM TEST1, TEST2; -> A B C A D -> - - - - - -> 1 2 3 4 5 -> rows: 1 - -SELECT TEST1.* EXCEPT (A), TEST2.* EXCEPT (A) FROM TEST1, TEST2; -> B C D -> - - - -> 2 3 5 -> rows: 1 - -SELECT TEST1.* EXCEPT (A), TEST2.* EXCEPT (D) FROM TEST1, TEST2; -> B C A -> - - - -> 2 3 4 -> rows: 1 - -SELECT * EXCEPT (T1.A, T2.D) FROM TEST1 T1, TEST2 T2; -> B C A -> - - - -> 2 3 4 -> rows: 1 - -DROP TABLE TEST1, TEST2; -> ok - -CREATE TABLE TEST(ID INT PRIMARY KEY, VALUE INT NOT NULL); -> ok - -INSERT INTO TEST VALUES (1, 1), (2, 1), (3, 2); -> update count: 3 - -SELECT ID, VALUE FROM TEST FOR UPDATE; -> ID VALUE -> -- ----- -> 1 1 -> 2 1 -> 3 2 -> rows: 3 - --- Check that NULL row is returned from SELECT FOR UPDATE -CREATE TABLE T1(A INT PRIMARY KEY) AS VALUES 1, 2; -> ok - -CREATE TABLE T2(B INT PRIMARY KEY) AS VALUES 1; -> ok - -SELECT * FROM T1 LEFT JOIN T2 ON A = B FOR UPDATE; -> A B -> - ---- -> 1 1 -> 2 null -> rows: 2 - -DROP TABLE T1, T2; -> ok - -SELECT DISTINCT VALUE FROM TEST FOR UPDATE; -> exception FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT - -SELECT DISTINCT ON(VALUE) ID, VALUE FROM TEST FOR UPDATE; -> exception FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT - -SELECT SUM(VALUE) FROM TEST FOR UPDATE; -> exception FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT - -SELECT ID FROM TEST GROUP BY VALUE FOR UPDATE; -> exception FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT - -SELECT 1 FROM TEST HAVING TRUE FOR UPDATE; -> exception FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT - -DROP TABLE TEST; -> ok - -CREATE TABLE TEST(ID INT PRIMARY KEY, V INT) AS SELECT X, X + 1 FROM SYSTEM_RANGE(1, 3); -> ok - -SELECT ID FROM TEST WHERE ID != ALL (SELECT ID FROM TEST WHERE ID IN(1, 3)); -> ID -> -- -> 2 -> rows: 1 - -SELECT (1, 3) > ANY (SELECT ID, V FROM TEST); ->> TRUE - -SELECT (1, 2) > ANY (SELECT ID, V FROM TEST); ->> FALSE - -SELECT (2, 3) = ANY (SELECT ID, V FROM TEST); ->> TRUE - -SELECT (3, 4) > ALL (SELECT ID, V FROM TEST); ->> FALSE - -DROP TABLE TEST; -> ok - -SELECT 1 = ALL (SELECT * FROM VALUES (NULL), (1), (2), (NULL) ORDER BY 1); ->> FALSE - -CREATE TABLE TEST(G INT, V INT); -> ok - -INSERT INTO TEST VALUES (10, 1), (11, 2), (20, 4); -> update count: 3 - -SELECT G / 10 G1, G / 10 G2, SUM(T.V) S FROM TEST T GROUP BY G / 10, G / 10; -> G1 G2 S -> -- -- - -> 1 1 3 -> 2 2 4 -> rows: 2 - -SELECT G / 10 G1, G / 10 G2, SUM(T.V) S FROM TEST T GROUP BY G2; -> G1 G2 S -> -- -- - -> 1 1 3 -> 2 2 4 -> rows: 2 - -DROP TABLE TEST; -> ok - -@reconnect off - -CALL RAND(0); ->> 0.730967787376657 - -SELECT RAND(), RAND() + 1, RAND() + 1, RAND() GROUP BY RAND() + 1; -> RAND() RAND() + 1 RAND() + 1 RAND() -> ------------------ ------------------ ------------------ ------------------ -> 0.6374174253501083 1.2405364156714858 1.2405364156714858 0.5504370051176339 -> rows: 1 - -SELECT RAND() A, RAND() + 1 B, RAND() + 1 C, RAND() D, RAND() + 2 E, RAND() + 3 F GROUP BY B, C, E, F; -> A B C D E F -> ------------------ ------------------ ------------------ ------------------ ------------------ ------------------ -> 0.8791825178724801 1.3332183994766498 1.3332183994766498 0.9412491794821144 2.3851891847407183 3.9848415401998087 -> rows: 1 - -@reconnect on - -CREATE TABLE TEST (A INT, B INT, C INT); -> ok - -INSERT INTO TEST VALUES (11, 12, 13), (21, 22, 23), (31, 32, 33); -> update count: 3 - -SELECT * FROM TEST WHERE (A, B) IN (VALUES (11, 12), (21, 22), (41, 42)); -> A B C -> -- -- -- -> 11 12 13 -> 21 22 23 -> rows: 2 - -SELECT * FROM TEST WHERE (A, B) = (VALUES (11, 12)); -> A B C -> -- -- -- -> 11 12 13 -> rows: 1 - -DROP TABLE TEST; -> ok - -CREATE TABLE TEST(A BIGINT, B INT) AS VALUES (1::BIGINT, 2); -> ok - -SELECT * FROM TEST WHERE (A, B) IN ((1, 2), (3, 4)); -> A B -> - - -> 1 2 -> rows: 1 - -UPDATE TEST SET A = 1000000000000; -> update count: 1 - -SELECT * FROM TEST WHERE (A, B) IN ((1, 2), (3, 4)); -> A B -> - - -> rows: 0 - -DROP TABLE TEST; -> ok - -CREATE TABLE TEST(A BIGINT, B INT) AS VALUES (1, 2); -> ok - -SELECT * FROM TEST WHERE (A, B) IN ((1::BIGINT, 2), (3, 4)); -> A B -> - - -> 1 2 -> rows: 1 - -SELECT * FROM TEST WHERE (A, B) IN ((1000000000000, 2), (3, 4)); -> A B -> - - -> rows: 0 - -DROP TABLE TEST; -> ok - -CREATE TABLE TEST(I) AS VALUES 1, 2, 3; -> ok - -SELECT COUNT(*) C FROM TEST HAVING C < 1; -> C -> - -> rows: 0 - -SELECT COUNT(*) C FROM TEST QUALIFY C < 1; -> C -> - -> rows: 0 - -DROP TABLE TEST; -> ok - -SELECT A, ROW_NUMBER() OVER (ORDER BY B) R -FROM (VALUES (1, 2), (2, 1), (3, 3)) T(A, B); -> A R -> - - -> 1 2 -> 2 1 -> 3 3 -> rows: 3 - -SELECT X, A, ROW_NUMBER() OVER (ORDER BY B) R -FROM (SELECT 1 X), (VALUES (1, 2), (2, 1), (3, 3)) T(A, B); -> X A R -> - - - -> 1 1 2 -> 1 2 1 -> 1 3 3 -> rows: 3 - -SELECT A, SUM(S) OVER (ORDER BY S) FROM - (SELECT A, SUM(B) FROM (VALUES (1, 2), (1, 3), (3, 5), (3, 10)) V(A, B) GROUP BY A) S(A, S); -> A SUM(S) OVER (ORDER BY S) -> - ------------------------ -> 1 5 -> 3 20 -> rows: 2 - -SELECT A, SUM(A) OVER W SUM FROM (VALUES 1, 2) T(A) WINDOW W AS (ORDER BY A); -> A SUM -> - --- -> 1 1 -> 2 3 -> rows: 2 - -SELECT A, B, C FROM (SELECT A, B, C FROM (VALUES (1, 2, 3)) V(A, B, C)); -> A B C -> - - - -> 1 2 3 -> rows: 1 - -SELECT * FROM (SELECT * FROM (VALUES (1, 2, 3)) V(A, B, C)); -> A B C -> - - - -> 1 2 3 -> rows: 1 - -SELECT * FROM - (SELECT X * X, Y FROM - (SELECT A + 5, B FROM - (VALUES (1, 2)) V(A, B) - ) T(X, Y) - ); -> X * X Y -> ----- - -> 36 2 -> rows: 1 - -CREATE TABLE TEST("_ROWID_" INT) AS VALUES 2; -> ok - -SELECT _ROWID_ S1, TEST._ROWID_ S2, PUBLIC.TEST._ROWID_ S3, SCRIPT.PUBLIC.TEST._ROWID_ S4, - "_ROWID_" U1, TEST."_ROWID_" U2, PUBLIC.TEST."_ROWID_" U3, SCRIPT.PUBLIC.TEST."_ROWID_" U4 - FROM TEST; -> S1 S2 S3 S4 U1 U2 U3 U4 -> -- -- -- -- -- -- -- -- -> 1 1 1 1 2 2 2 2 -> rows: 1 - -DROP TABLE TEST; -> ok - -CREATE TABLE TEST(ID BIGINT PRIMARY KEY); -> ok - -SELECT X.ID FROM TEST X JOIN TEST Y ON Y.ID IN (SELECT 1); -> ID -> -- -> rows: 0 - -DROP TABLE TEST; -> ok - -CREATE TABLE TEST(A INT, B INT) AS VALUES (1, 10), (2, 20), (4, 40); -> ok - -SELECT T1.A, T2.ARR FROM TEST T1 JOIN ( - SELECT A, ARRAY_AGG(B) OVER (ORDER BY B ROWS BETWEEN 1 FOLLOWING AND 2 FOLLOWING) ARR FROM TEST -) T2 ON T1.A = T2.A; -> A ARR -> - -------- -> 1 [20, 40] -> 2 [40] -> 4 null -> rows: 3 - -DROP TABLE TEST; -> ok - -CREATE TABLE TEST(ID INT PRIMARY KEY, V INT UNIQUE); -> ok - -EXPLAIN SELECT * FROM TEST ORDER BY ID FOR UPDATE; ->> SELECT "TEST"."ID", "TEST"."V" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2 */ ORDER BY 1 FOR UPDATE /* index sorted */ - -EXPLAIN SELECT * FROM TEST ORDER BY V; ->> SELECT "TEST"."ID", "TEST"."V" FROM "PUBLIC"."TEST" /* PUBLIC.CONSTRAINT_INDEX_2 */ ORDER BY 2 /* index sorted */ - -EXPLAIN SELECT * FROM TEST ORDER BY V FOR UPDATE; -#+mvStore#>> SELECT "TEST"."ID", "TEST"."V" FROM "PUBLIC"."TEST" /* PUBLIC.CONSTRAINT_INDEX_2 */ ORDER BY 2 FOR UPDATE -#-mvStore#>> SELECT "TEST"."ID", "TEST"."V" FROM "PUBLIC"."TEST" /* PUBLIC.CONSTRAINT_INDEX_2 */ ORDER BY 2 FOR UPDATE /* index sorted */ - -DROP TABLE TEST; -> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/show.sql b/h2/src/test/org/h2/test/scripts/dml/show.sql index ee789939d7..a6c2c13ef3 100644 --- a/h2/src/test/org/h2/test/scripts/dml/show.sql +++ b/h2/src/test/org/h2/test/scripts/dml/show.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -75,19 +75,19 @@ SHOW TABLES FROM SCH; > rows (ordered): 1 SHOW COLUMNS FROM TEST_P; -> FIELD TYPE NULL KEY DEFAULT -> ----- ------------ ---- --- ------- -> ID_P INTEGER(10) NO PRI NULL -> U_P VARCHAR(255) YES UNI NULL -> N_P INTEGER(10) YES 1 +> FIELD TYPE NULL KEY DEFAULT +> ----- ---------------------- ---- --- ------- +> ID_P INTEGER NO PRI NULL +> U_P CHARACTER VARYING(255) YES UNI NULL +> N_P INTEGER YES 1 > rows (ordered): 3 SHOW COLUMNS FROM TEST_S FROM SCH; -> FIELD TYPE NULL KEY DEFAULT -> ----- ------------ ---- --- ------- -> ID_S INTEGER(10) NO PRI NULL -> U_S VARCHAR(255) YES UNI NULL -> N_S INTEGER(10) YES 1 +> FIELD TYPE NULL KEY DEFAULT +> ----- ---------------------- ---- --- ------- +> ID_S INTEGER NO PRI NULL +> U_S CHARACTER VARYING(255) YES UNI NULL +> N_S INTEGER YES 1 > rows (ordered): 3 SHOW DATABASES; diff --git a/h2/src/test/org/h2/test/scripts/dml/update.sql b/h2/src/test/org/h2/test/scripts/dml/update.sql index 0eb8ea5bc1..7f67503625 100644 --- a/h2/src/test/org/h2/test/scripts/dml/update.sql +++ b/h2/src/test/org/h2/test/scripts/dml/update.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -36,30 +36,310 @@ UPDATE TEST SET (B) = (7); SELECT B FROM TEST; >> 7 +UPDATE TEST SET (B) = (2, 3); +> exception COLUMN_COUNT_DOES_NOT_MATCH + +-- TODO +-- UPDATE TEST SET (A, B) = ARRAY[3, 4]; +-- > exception COLUMN_COUNT_DOES_NOT_MATCH + +EXPLAIN UPDATE TEST SET (A) = ROW(3), B = 4; +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET "A" = 3, "B" = 4 + +EXPLAIN UPDATE TEST SET A = 3, (B) = 4; +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET "A" = 3, "B" = 4 + +UPDATE TEST SET (A, B) = (1, 2), (B, A) = (2, 1); +> exception DUPLICATE_COLUMN_NAME_1 + +UPDATE TEST SET (A) = A * 3; +> update count: 1 + DROP TABLE TEST; > ok CREATE TABLE TEST(ID INT) AS VALUES 100; > ok -SELECT _ROWID_ FROM TEST; ->> 1 - --- _ROWID_ modifications are ignored +-- _ROWID_ modifications are not allowed UPDATE TEST SET _ROWID_ = 2 WHERE ID = 100; +> exception SYNTAX_ERROR_2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT GENERATED ALWAYS AS (A + 1)); +> ok + +INSERT INTO TEST(A) VALUES 1; +> update count: 1 + +UPDATE TEST SET A = 2, B = DEFAULT; +> update count: 1 + +TABLE TEST; +> A B +> - - +> 2 3 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT GENERATED ALWAYS AS (A + 1)); +> ok + +INSERT INTO TEST(A) VALUES 1; +> update count: 1 + +UPDATE TEST SET B = 1; +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +UPDATE TEST SET B = DEFAULT; +> update count: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, A INT, B INT, C INT, D INT, E INT, F INT) AS VALUES (1, 1, 1, 1, 1, 1, 1); +> ok + +EXPLAIN UPDATE TEST SET + (F, C, A) = (SELECT 2, 3, 4 FROM TEST FETCH FIRST ROW ONLY), + (B, E) = (SELECT 5, 6 FROM TEST FETCH FIRST ROW ONLY) + WHERE ID = 1; +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ SET ("F", "C", "A") = (SELECT 2, 3, 4 FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ FETCH FIRST ROW ONLY), ("B", "E") = (SELECT 5, 6 FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ FETCH FIRST ROW ONLY) WHERE "ID" = 1 + +UPDATE TEST SET + (F, C, A) = (SELECT 2, 3, 4 FROM TEST FETCH FIRST ROW ONLY), + (B, E) = (SELECT 5, 6 FROM TEST FETCH FIRST ROW ONLY) + WHERE ID = 1; +> update count: 1 + +TABLE TEST; +> ID A B C D E F +> -- - - - - - - +> 1 4 5 3 1 6 2 +> rows: 1 + +UPDATE TEST SET (C, C) = (SELECT 1, 2 FROM TEST); +> exception DUPLICATE_COLUMN_NAME_1 + +UPDATE TEST SET (A, B) = (SELECT 1, 2, 3 FROM TEST); +> exception COLUMN_COUNT_DOES_NOT_MATCH + +UPDATE TEST SET (D, E) = NULL; +> exception DATA_CONVERSION_ERROR_1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID BIGINT GENERATED ALWAYS AS IDENTITY, ID2 BIGINT GENERATED ALWAYS AS (ID + 1), + V INT, U INT ON UPDATE (5)); +> ok + +INSERT INTO TEST(V) VALUES 1; +> update count: 1 + +TABLE TEST; +> ID ID2 V U +> -- --- - ---- +> 1 2 1 null +> rows: 1 + +UPDATE TEST SET V = V + 1; +> update count: 1 + +UPDATE TEST SET V = V + 1, ID = DEFAULT, ID2 = DEFAULT; +> update count: 1 + +TABLE TEST; +> ID ID2 V U +> -- --- - - +> 1 2 3 5 +> rows: 1 + +MERGE INTO TEST USING (VALUES 1) T(X) ON TRUE WHEN MATCHED THEN UPDATE SET V = V + 1; +> update count: 1 + +MERGE INTO TEST USING (VALUES 1) T(X) ON TRUE WHEN MATCHED THEN UPDATE SET V = V + 1, ID = DEFAULT, ID2 = DEFAULT; +> update count: 1 + +TABLE TEST; +> ID ID2 V U +> -- --- - - +> 1 2 5 5 +> rows: 1 + +MERGE INTO TEST KEY(V) VALUES (DEFAULT, DEFAULT, 5, 1); +> update count: 1 + +TABLE TEST; +> ID ID2 V U +> -- --- - - +> 1 2 5 1 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE DOMAIN D AS BIGINT DEFAULT 100 ON UPDATE 200; +> ok + +CREATE TABLE TEST(ID D GENERATED BY DEFAULT AS IDENTITY, V INT, G D GENERATED ALWAYS AS (V + 1)); +> ok + +INSERT INTO TEST(V) VALUES 1; +> update count: 1 + +TABLE TEST; +> ID V G +> -- - - +> 1 1 2 +> rows: 1 + +UPDATE TEST SET V = 2; +> update count: 1 + +TABLE TEST; +> ID V G +> -- - - +> 1 2 3 +> rows: 1 + +DROP TABLE TEST; +> ok + +DROP DOMAIN D; +> ok + +CREATE TABLE TEST(A INT, B INT, C INT) AS VALUES (0, 0, 1), (0, 0, 3); +> ok + +CREATE TABLE S1(A INT, B INT) AS VALUES (1, 2); +> ok + +CREATE TABLE S2(A INT, B INT) AS VALUES (3, 4); +> ok + +UPDATE TEST SET (A, B) = (SELECT * FROM S1 WHERE C = A UNION SELECT * FROM S2 WHERE C = A); +> update count: 2 + +TABLE TEST; +> A B C +> - - - +> 1 2 1 +> 3 4 3 +> rows: 2 + +DROP TABLE TEST, S1, S2; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, V INT) AS SELECT X, X FROM SYSTEM_RANGE(1, 13); +> ok + +UPDATE TEST SET V = V + 1 WHERE ID <= 12 FETCH FIRST ROW ONLY; > update count: 1 -UPDATE TEST SET TEST._ROWID_ = 3 WHERE ID = 100; +UPDATE TEST SET V = V + 1 WHERE ID <= 12 FETCH FIRST ROWS ONLY; > update count: 1 -UPDATE TEST SET PUBLIC.TEST._ROWID_ = 4 WHERE ID = 100; +UPDATE TEST SET V = V + 1 WHERE ID <= 12 FETCH NEXT ROW ONLY; > update count: 1 -UPDATE TEST SET SCRIPT.PUBLIC.TEST._ROWID_ = 5 WHERE ID = 100; +UPDATE TEST SET V = V + 1 WHERE ID <= 12 FETCH NEXT ROWS ONLY; > update count: 1 -SELECT _ROWID_ FROM TEST; ->> 1 +UPDATE TEST SET V = V + 1 WHERE ID <= 12 FETCH FIRST 2 ROW ONLY; +> update count: 2 + +UPDATE TEST SET V = V + 1 WHERE ID <= 12 FETCH FIRST 2 ROWS ONLY; +> update count: 2 + +UPDATE TEST SET V = V + 1 WHERE ID <= 12 FETCH NEXT 2 ROW ONLY; +> update count: 2 + +UPDATE TEST SET V = V + 1 WHERE ID <= 12 FETCH NEXT 2 ROWS ONLY; +> update count: 2 + +EXPLAIN UPDATE TEST SET V = V + 1 WHERE ID <= 12 FETCH FIRST 2 ROWS ONLY; +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID <= 12 */ SET "V" = "V" + 1 WHERE "ID" <= 12 FETCH FIRST 2 ROWS ONLY + +EXPLAIN UPDATE TEST SET V = V + 1 FETCH FIRST 1 ROW ONLY; +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET "V" = "V" + 1 FETCH FIRST ROW ONLY + +EXPLAIN UPDATE TEST SET V = V + 1; +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET "V" = "V" + 1 + +SELECT SUM(V) FROM TEST; +>> 103 + +UPDATE TEST SET V = V + 1 FETCH FIRST 100 ROWS ONLY; +> update count: 13 + +SELECT SUM(V) FROM TEST; +>> 116 + +-- legacy syntax +EXPLAIN UPDATE TEST SET V = V + 1 LIMIT 2; +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET "V" = "V" + 1 FETCH FIRST 2 ROWS ONLY + +UPDATE TEST SET V = V + 1 LIMIT 2; +> update count: 2 + +SELECT SUM(V) FROM TEST; +>> 118 DROP TABLE TEST; > ok + +CREATE TABLE FOO (ID INT, VAL VARCHAR) AS VALUES(1, 'foo1'), (2, 'foo2'), (3, 'foo3'); +> ok + +CREATE TABLE BAR (ID INT, VAL VARCHAR) AS VALUES(1, 'bar1'), (3, 'bar3'), (4, 'bar4'); +> ok + +SET MODE PostgreSQL; +> ok + +UPDATE FOO SET VAL = BAR.VAL FROM BAR WHERE FOO.ID = BAR.ID; +> update count: 2 + +TABLE FOO; +> ID VAL +> -- ---- +> 1 bar1 +> 2 foo2 +> 3 bar3 +> rows: 3 + +UPDATE FOO SET BAR.VAL = FOO.VAL FROM BAR WHERE FOO.ID = BAR.ID; +> exception TABLE_OR_VIEW_NOT_FOUND_1 + +SET MODE Regular; +> ok + +CREATE TABLE DEST(ID INT, X INT, Y INT); +> ok + +INSERT INTO DEST VALUES (1, 10, 11), (2, 20, 21); +> update count: 2 + +CREATE TABLE SRC(ID INT, X INT, Y INT); +> ok + +INSERT INTO SRC VALUES (1, 100, 101); +> update count: 1 + +UPDATE DEST SET (X, Y) = (SELECT X, Y FROM SRC WHERE SRC.ID = DEST.ID); +> update count: 2 + +TABLE DEST; +> ID X Y +> -- ---- ---- +> 1 100 101 +> 2 null null +> rows: 2 + +DROP TABLE SRC, DEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/values.sql b/h2/src/test/org/h2/test/scripts/dml/values.sql deleted file mode 100644 index 295522c7dc..0000000000 --- a/h2/src/test/org/h2/test/scripts/dml/values.sql +++ /dev/null @@ -1,53 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- - -VALUES (1, 2); -> C1 C2 -> -- -- -> 1 2 -> rows: 1 - -VALUES ROW (1, 2); -> C1 C2 -> -- -- -> 1 2 -> rows: 1 - -VALUES 1, 2; -> C1 -> -- -> 1 -> 2 -> rows: 2 - -VALUES 4, 3, 1, 2 ORDER BY 1 FETCH FIRST 75 PERCENT ROWS ONLY; -> C1 -> -- -> 1 -> 2 -> 3 -> rows (ordered): 3 - -SELECT * FROM (VALUES (1::BIGINT, 2)) T (A, B) WHERE (A, B) IN (VALUES(1, 2)); -> A B -> - - -> 1 2 -> rows: 1 - -SELECT * FROM (VALUES (1000000000000, 2)) T (A, B) WHERE (A, B) IN (VALUES(1, 2)); -> A B -> - - -> rows: 0 - -SELECT * FROM (VALUES (1, 2)) T (A, B) WHERE (A, B) IN (VALUES(1::BIGINT, 2)); -> A B -> - - -> 1 2 -> rows: 1 - -SELECT * FROM (VALUES (1, 2)) T (A, B) WHERE (A, B) IN (VALUES(1000000000000, 2)); -> A B -> - - -> rows: 0 diff --git a/h2/src/test/org/h2/test/scripts/dml/with.sql b/h2/src/test/org/h2/test/scripts/dml/with.sql index ff1f89cfb4..758127e770 100644 --- a/h2/src/test/org/h2/test/scripts/dml/with.sql +++ b/h2/src/test/org/h2/test/scripts/dml/with.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -36,13 +36,13 @@ explain with recursive r(n) as ( (select 1) union all (select n+1 from r where n < 3) ) select n from r; ->> WITH RECURSIVE "PUBLIC"."R"("N") AS ( (SELECT 1 FROM SYSTEM_RANGE(1, 1) /* PUBLIC.RANGE_INDEX */) UNION ALL (SELECT ("N" + 1) FROM "PUBLIC"."R" /* PUBLIC.R.tableScan */ WHERE "N" < 3) ) SELECT "N" FROM "PUBLIC"."R" "R" /* null */ +>> WITH RECURSIVE "PUBLIC"."R"("N") AS ( (SELECT 1) UNION ALL (SELECT "N" + 1 FROM "PUBLIC"."R" /* PUBLIC.R.tableScan */ WHERE "N" < 3) ) SELECT "N" FROM "PUBLIC"."R" "R" /* null */ explain with recursive "r"(n) as ( (select 1) union all (select n+1 from "r" where n < 3) ) select n from "r"; ->> WITH RECURSIVE "PUBLIC"."r"("N") AS ( (SELECT 1 FROM SYSTEM_RANGE(1, 1) /* PUBLIC.RANGE_INDEX */) UNION ALL (SELECT ("N" + 1) FROM "PUBLIC"."r" /* PUBLIC.r.tableScan */ WHERE "N" < 3) ) SELECT "N" FROM "PUBLIC"."r" "r" /* null */ +>> WITH RECURSIVE "PUBLIC"."r"("N") AS ( (SELECT 1) UNION ALL (SELECT "N" + 1 FROM "PUBLIC"."r" /* PUBLIC.r.tableScan */ WHERE "N" < 3) ) SELECT "N" FROM "PUBLIC"."r" "r" /* null */ select sum(n) from ( with recursive r(n) as ( @@ -160,3 +160,86 @@ WITH CTE_TEST AS (TABLE TEST) ((SELECT A, B, C FROM CTE_TEST)); DROP TABLE TEST; > ok + +WITH RECURSIVE V(V1, V2) AS ( + SELECT 0 V1, 1 V2 + UNION ALL + SELECT V1 + 1, V2 + 1 FROM V WHERE V2 < 4 +) +SELECT V1, V2, COUNT(*) FROM V +LEFT JOIN (SELECT T1 / T2 R FROM (VALUES (10, 0)) T(T1, T2) WHERE T2*T2*T2*T2*T2*T2 <> 0) X ON X.R > V.V1 AND X.R < V.V2 +GROUP BY V1, V2; +> V1 V2 COUNT(*) +> -- -- -------- +> 0 1 1 +> 1 2 1 +> 2 3 1 +> 3 4 1 +> rows: 4 + +EXPLAIN WITH RECURSIVE V(V1, V2) AS ( + SELECT 0 V1, 1 V2 + UNION ALL + SELECT V1 + 1, V2 + 1 FROM V WHERE V2 < 10 +) +SELECT V1, V2, COUNT(*) FROM V +LEFT JOIN (SELECT T1 / T2 R FROM (VALUES (10, 0)) T(T1, T2) WHERE T2*T2*T2*T2*T2*T2 <> 0) X ON X.R > V.V1 AND X.R < V.V2 +GROUP BY V1, V2; +>> WITH RECURSIVE "PUBLIC"."V"("V1", "V2") AS ( (SELECT 0 AS "V1", 1 AS "V2") UNION ALL (SELECT "V1" + 1, "V2" + 1 FROM "PUBLIC"."V" /* PUBLIC.V.tableScan */ WHERE "V2" < 10) ) SELECT "V1", "V2", COUNT(*) FROM "PUBLIC"."V" "V" /* null */ LEFT OUTER JOIN ( SELECT "T1" / "T2" AS "R" FROM (VALUES (10, 0)) "T"("T1", "T2") WHERE ((((("T2" * "T2") * "T2") * "T2") * "T2") * "T2") <> 0 ) "X" /* SELECT T1 / T2 AS R FROM (VALUES (10, 0)) T(T1, T2) /* table scan */ WHERE ((((((T2 * T2) * T2) * T2) * T2) * T2) <> 0) _LOCAL_AND_GLOBAL_ (((T1 / T2) >= ?1) AND ((T1 / T2) <= ?2)): R > V.V1 AND R < V.V2 */ ON ("X"."R" > "V"."V1") AND ("X"."R" < "V"."V2") GROUP BY "V1", "V2" + +-- Data change delta tables in WITH +CREATE TABLE TEST("VALUE" INT NOT NULL PRIMARY KEY); +> ok + +WITH W AS (SELECT NULL FROM FINAL TABLE (INSERT INTO TEST VALUES 1, 2)) +SELECT COUNT (*) FROM W; +>> 2 + +WITH W AS (SELECT NULL FROM FINAL TABLE (UPDATE TEST SET "VALUE" = 3 WHERE "VALUE" = 2)) +SELECT COUNT (*) FROM W; +>> 1 + +WITH W AS (SELECT NULL FROM FINAL TABLE (MERGE INTO TEST VALUES 4, 5)) +SELECT COUNT (*) FROM W; +>> 2 + +WITH W AS (SELECT NULL FROM OLD TABLE (DELETE FROM TEST WHERE "VALUE" = 4)) +SELECT COUNT (*) FROM W; +>> 1 + +SET MODE MySQL; +> ok + +WITH W AS (SELECT NULL FROM FINAL TABLE (REPLACE INTO TEST VALUES 4, 5)) +SELECT COUNT (*) FROM W; +>> 2 + +SET MODE Regular; +> ok + +DROP TABLE TEST; +> ok + +CREATE TABLE T(C INT); +> ok + +INSERT INTO T WITH W(C) AS (VALUES 1) SELECT C FROM W; +> update count: 1 + +TABLE W; +> exception TABLE_OR_VIEW_NOT_FOUND_1 + +TABLE T; +>> 1 + +DROP TABLE T; +> ok + +WITH T(X) AS (SELECT 1) +(SELECT 2 Y) UNION (SELECT 3 Z) UNION (SELECT * FROM T); +> Y +> - +> 1 +> 2 +> 3 +> rows: 3 diff --git a/h2/src/test/org/h2/test/scripts/dual.sql b/h2/src/test/org/h2/test/scripts/dual.sql index 3f29ac0ecc..9df679a474 100644 --- a/h2/src/test/org/h2/test/scripts/dual.sql +++ b/h2/src/test/org/h2/test/scripts/dual.sql @@ -1,10 +1,13 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- SELECT * FROM DUAL; ->> 1 +> +> +> +> rows: 1 CREATE TABLE DUAL(A INT); > ok @@ -16,7 +19,10 @@ SELECT A FROM DUAL; >> 2 SELECT * FROM SYS.DUAL; ->> 1 +> +> +> +> rows: 1 DROP TABLE DUAL; > ok @@ -25,7 +31,10 @@ SET MODE DB2; > ok SELECT * FROM SYSDUMMY1; ->> 1 +> +> +> +> rows: 1 CREATE TABLE SYSDUMMY1(A INT); > ok @@ -37,7 +46,10 @@ SELECT A FROM SYSDUMMY1; >> 2 SELECT * FROM SYSIBM.SYSDUMMY1; ->> 1 +> +> +> +> rows: 1 DROP TABLE SYSDUMMY1; > ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/any.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/any.sql index 882a17d93c..41b27d5731 100644 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/any.sql +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/any.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -10,8 +10,8 @@ INSERT INTO TEST VALUES (1, 1), (1, 3), (2, 1), (2, 5), (3, 4); > update count: 5 SELECT A, ANY(B < 2), SOME(B > 3), BOOL_OR(B = 1), ANY(B = 1) FILTER (WHERE A = 1) FROM TEST GROUP BY A; -> A ANY(B < 2) ANY(B > 3) ANY(B = 1) ANY(B = 1) FILTER (WHERE (A = 1)) -> - ---------- ---------- ---------- --------------------------------- +> A ANY(B < 2) ANY(B > 3) ANY(B = 1) ANY(B = 1) FILTER (WHERE A = 1) +> - ---------- ---------- ---------- ------------------------------- > 1 TRUE FALSE TRUE TRUE > 2 TRUE TRUE TRUE null > 3 FALSE TRUE FALSE null @@ -20,14 +20,14 @@ SELECT A, ANY(B < 2), SOME(B > 3), BOOL_OR(B = 1), ANY(B = 1) FILTER (WHERE A = DROP TABLE TEST; > ok -SELECT TRUE = (ANY((SELECT TRUE))); -> TRUE = (ANY((SELECT TRUE FROM SYSTEM_RANGE(1, 1) /* PUBLIC.RANGE_INDEX */ /* scanCount: 2 */))) -> ----------------------------------------------------------------------------------------------- +SELECT TRUE = (ANY((SELECT X > 0 FROM SYSTEM_RANGE(1, 1)))); +> TRUE = (ANY((SELECT X > 0 FROM SYSTEM_RANGE(1, 1)))) +> ---------------------------------------------------- > TRUE > rows: 1 -SELECT TRUE = (ANY((SELECT FALSE))); -> TRUE = (ANY((SELECT FALSE FROM SYSTEM_RANGE(1, 1) /* PUBLIC.RANGE_INDEX */ /* scanCount: 2 */))) -> ------------------------------------------------------------------------------------------------ +SELECT TRUE = (ANY((SELECT X < 0 FROM SYSTEM_RANGE(1, 1)))); +> TRUE = (ANY((SELECT X < 0 FROM SYSTEM_RANGE(1, 1)))) +> ---------------------------------------------------- > FALSE > rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/array-agg.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/array_agg.sql similarity index 76% rename from h2/src/test/org/h2/test/scripts/functions/aggregate/array-agg.sql rename to h2/src/test/org/h2/test/scripts/functions/aggregate/array_agg.sql index b40072ff73..ab39ce4b3e 100644 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/array-agg.sql +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/array_agg.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: Alex Nordlund -- @@ -14,8 +14,8 @@ insert into test values ('1'), ('2'), ('3'), ('4'), ('5'), ('6'), ('7'), ('8'), select array_agg(v order by v asc), array_agg(v order by v desc) filter (where v >= '4') from test where v >= '2'; -> ARRAY_AGG(V ORDER BY V) ARRAY_AGG(V ORDER BY V DESC) FILTER (WHERE (V >= '4')) -> ------------------------ ------------------------------------------------------ +> ARRAY_AGG(V ORDER BY V) ARRAY_AGG(V ORDER BY V DESC) FILTER (WHERE V >= '4') +> ------------------------ ---------------------------------------------------- > [2, 3, 4, 5, 6, 7, 8, 9] [9, 8, 7, 6, 5, 4] > rows: 1 @@ -25,16 +25,16 @@ create index test_idx on test(v); select ARRAY_AGG(v order by v asc), ARRAY_AGG(v order by v desc) filter (where v >= '4') from test where v >= '2'; -> ARRAY_AGG(V ORDER BY V) ARRAY_AGG(V ORDER BY V DESC) FILTER (WHERE (V >= '4')) -> ------------------------ ------------------------------------------------------ +> ARRAY_AGG(V ORDER BY V) ARRAY_AGG(V ORDER BY V DESC) FILTER (WHERE V >= '4') +> ------------------------ ---------------------------------------------------- > [2, 3, 4, 5, 6, 7, 8, 9] [9, 8, 7, 6, 5, 4] > rows: 1 select ARRAY_AGG(v order by v asc), ARRAY_AGG(v order by v desc) filter (where v >= '4') from test; -> ARRAY_AGG(V ORDER BY V) ARRAY_AGG(V ORDER BY V DESC) FILTER (WHERE (V >= '4')) -> --------------------------- ------------------------------------------------------ +> ARRAY_AGG(V ORDER BY V) ARRAY_AGG(V ORDER BY V DESC) FILTER (WHERE V >= '4') +> --------------------------- ---------------------------------------------------- > [1, 2, 3, 4, 5, 6, 7, 8, 9] [9, 8, 7, 6, 5, 4] > rows: 1 @@ -182,7 +182,7 @@ EXPLAIN WHERE ID <> 5 GROUP BY NAME HAVING ARRAY_AGG(ID ORDER BY ID)[1] > 1 QUALIFY ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) OVER (PARTITION BY NAME) <> ARRAY[ARRAY[3]]; ->> SELECT ARRAY_AGG(ARRAY_AGG("ID" ORDER BY "ID")) OVER (PARTITION BY "NAME"), "NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE "ID" <> 5 GROUP BY "NAME" HAVING ARRAY_GET(ARRAY_AGG("ID" ORDER BY "ID"), 1) > 1 QUALIFY ARRAY_AGG(ARRAY_AGG("ID" ORDER BY "ID")) OVER (PARTITION BY "NAME") <> ARRAY [ARRAY [3]] +>> SELECT ARRAY_AGG(ARRAY_AGG("ID" ORDER BY "ID")) OVER (PARTITION BY "NAME"), "NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE "ID" <> 5 GROUP BY "NAME" HAVING ARRAY_AGG("ID" ORDER BY "ID")[1] > 1 QUALIFY ARRAY_AGG(ARRAY_AGG("ID" ORDER BY "ID")) OVER (PARTITION BY "NAME") <> ARRAY [ARRAY [3]] SELECT ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) OVER (PARTITION BY NAME), NAME FROM TEST GROUP BY NAME ORDER BY NAME OFFSET 1 ROW; @@ -194,33 +194,33 @@ SELECT ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) OVER (PARTITION BY NAME), NAME FROM SELECT ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE NAME > 'b') OVER (PARTITION BY NAME), NAME FROM TEST GROUP BY NAME ORDER BY NAME; -> ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE (NAME > 'b')) OVER (PARTITION BY NAME) NAME -> ----------------------------------------------------------------------------------------- ---- -> null a -> null b -> [[4, 5, 6]] c +> ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE NAME > 'b') OVER (PARTITION BY NAME) NAME +> --------------------------------------------------------------------------------------- ---- +> null a +> null b +> [[4, 5, 6]] c > rows (ordered): 3 SELECT ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE NAME > 'c') OVER (PARTITION BY NAME), NAME FROM TEST GROUP BY NAME ORDER BY NAME; -> ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE (NAME > 'c')) OVER (PARTITION BY NAME) NAME -> ----------------------------------------------------------------------------------------- ---- -> null a -> null b -> null c +> ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE NAME > 'c') OVER (PARTITION BY NAME) NAME +> --------------------------------------------------------------------------------------- ---- +> null a +> null b +> null c > rows (ordered): 3 SELECT ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE NAME > 'b') OVER () FROM TEST GROUP BY NAME ORDER BY NAME; -> ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE (NAME > 'b')) OVER () -> ------------------------------------------------------------------------ +> ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE NAME > 'b') OVER () +> ---------------------------------------------------------------------- > [[4, 5, 6]] > [[4, 5, 6]] > [[4, 5, 6]] > rows (ordered): 3 SELECT ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE NAME > 'c') OVER () FROM TEST GROUP BY NAME ORDER BY NAME; -> ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE (NAME > 'c')) OVER () -> ------------------------------------------------------------------------ +> ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE NAME > 'c') OVER () +> ---------------------------------------------------------------------- > null > null > null @@ -309,7 +309,7 @@ SELECT DROP TABLE TEST; > ok -CREATE TABLE TEST(ID INT, VALUE INT); +CREATE TABLE TEST(ID INT, "VALUE" INT); > ok INSERT INTO TEST VALUES @@ -324,13 +324,13 @@ INSERT INTO TEST VALUES > update count: 8 SELECT *, - ARRAY_AGG(ID) OVER (ORDER BY VALUE ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) R_ID, - ARRAY_AGG(VALUE) OVER (ORDER BY VALUE ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) R_V, - ARRAY_AGG(ID) OVER (ORDER BY VALUE RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) V_ID, - ARRAY_AGG(VALUE) OVER (ORDER BY VALUE RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) V_V, - ARRAY_AGG(VALUE) OVER (ORDER BY VALUE DESC RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) V_V_R, - ARRAY_AGG(ID) OVER (ORDER BY VALUE GROUPS BETWEEN 1 PRECEDING AND 1 FOLLOWING) G_ID, - ARRAY_AGG(VALUE) OVER (ORDER BY VALUE GROUPS BETWEEN 1 PRECEDING AND 1 FOLLOWING) G_V + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) R_ID, + ARRAY_AGG("VALUE") OVER (ORDER BY "VALUE" ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) R_V, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) V_ID, + ARRAY_AGG("VALUE") OVER (ORDER BY "VALUE" RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) V_V, + ARRAY_AGG("VALUE") OVER (ORDER BY "VALUE" DESC RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) V_V_R, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN 1 PRECEDING AND 1 FOLLOWING) G_ID, + ARRAY_AGG("VALUE") OVER (ORDER BY "VALUE" GROUPS BETWEEN 1 PRECEDING AND 1 FOLLOWING) G_V FROM TEST; > ID VALUE R_ID R_V V_ID V_V V_V_R G_ID G_V > -- ----- --------- --------- --------------- --------------- --------------- ------------------ ------------------ @@ -345,8 +345,8 @@ SELECT *, > rows: 8 SELECT *, - ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY VALUE RANGE BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) A1, - ARRAY_AGG(ID) OVER (ORDER BY VALUE RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) A2 + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY "VALUE" RANGE BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) A1, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) A2 FROM TEST; > ID VALUE A1 A2 > -- ----- ------------------------ ------------------------ @@ -360,7 +360,7 @@ SELECT *, > 8 9 [4, 5, 6, 7, 8] [1, 2, 3, 4, 5, 6, 7, 8] > rows: 8 -SELECT *, ARRAY_AGG(ID) OVER (ORDER BY VALUE ROWS -1 PRECEDING) FROM TEST; +SELECT *, ARRAY_AGG(ID) OVER (ORDER BY "VALUE" ROWS -1 PRECEDING) FROM TEST; > exception INVALID_PRECEDING_OR_FOLLOWING_1 SELECT *, ARRAY_AGG(ID) OVER (ORDER BY ID ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING) FROM TEST FETCH FIRST 4 ROWS ONLY; @@ -400,9 +400,9 @@ SELECT *, ARRAY_AGG(ID) OVER (ORDER BY ID RANGE BETWEEN 1 FOLLOWING AND 2 FOLLOW > rows: 4 SELECT *, - ARRAY_AGG(ID) OVER (ORDER BY VALUE GROUPS BETWEEN 0 PRECEDING AND 0 FOLLOWING) N, - ARRAY_AGG(ID) OVER (ORDER BY VALUE GROUPS BETWEEN 0 PRECEDING AND 0 FOLLOWING EXCLUDE TIES) T, - ARRAY_AGG(ID) OVER (ORDER BY VALUE GROUPS BETWEEN 1 PRECEDING AND 0 FOLLOWING EXCLUDE TIES) T1 + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN 0 PRECEDING AND 0 FOLLOWING) N, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN 0 PRECEDING AND 0 FOLLOWING EXCLUDE TIES) T, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN 1 PRECEDING AND 0 FOLLOWING EXCLUDE TIES) T1 FROM TEST; > ID VALUE N T T1 > -- ----- --------- --- ------------ @@ -417,10 +417,10 @@ SELECT *, > rows: 8 SELECT *, - ARRAY_AGG(ID) OVER (ORDER BY VALUE GROUPS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) U_P, - ARRAY_AGG(ID) OVER (ORDER BY VALUE GROUPS BETWEEN 2 PRECEDING AND 1 PRECEDING) P, - ARRAY_AGG(ID) OVER (ORDER BY VALUE GROUPS BETWEEN 1 FOLLOWING AND 2 FOLLOWING) F, - ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY VALUE GROUPS BETWEEN 1 FOLLOWING AND UNBOUNDED FOLLOWING) U_F + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) U_P, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN 2 PRECEDING AND 1 PRECEDING) P, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN 1 FOLLOWING AND 2 FOLLOWING) F, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN 1 FOLLOWING AND UNBOUNDED FOLLOWING) U_F FROM TEST; > ID VALUE U_P P F U_F > -- ----- ------------------ ------------ --------------- ------------------ @@ -435,8 +435,8 @@ SELECT *, > rows: 8 SELECT *, - ARRAY_AGG(ID) OVER (ORDER BY VALUE GROUPS BETWEEN 1 PRECEDING AND 0 PRECEDING) P, - ARRAY_AGG(ID) OVER (ORDER BY VALUE GROUPS BETWEEN 0 FOLLOWING AND 1 FOLLOWING) F + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN 1 PRECEDING AND 0 PRECEDING) P, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN 0 FOLLOWING AND 1 FOLLOWING) F FROM TEST; > ID VALUE P F > -- ----- --------------- --------------- @@ -450,9 +450,9 @@ SELECT *, > 8 9 [4, 5, 6, 7, 8] [7, 8] > rows: 8 -SELECT ID, VALUE, - ARRAY_AGG(ID) OVER (ORDER BY VALUE ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING EXCLUDE GROUP) G, - ARRAY_AGG(ID) OVER (ORDER BY VALUE ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING EXCLUDE TIES) T +SELECT ID, "VALUE", + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING EXCLUDE GROUP) G, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING EXCLUDE TIES) T FROM TEST; > ID VALUE G T > -- ----- ------------ --------------- @@ -466,7 +466,7 @@ SELECT ID, VALUE, > 8 9 [6] [6, 8] > rows: 8 -SELECT ID, VALUE, ARRAY_AGG(ID) OVER(ORDER BY VALUE ROWS BETWEEN 1 FOLLOWING AND 2 FOLLOWING EXCLUDE GROUP) G +SELECT ID, "VALUE", ARRAY_AGG(ID) OVER(ORDER BY "VALUE" ROWS BETWEEN 1 FOLLOWING AND 2 FOLLOWING EXCLUDE GROUP) G FROM TEST ORDER BY ID FETCH FIRST 3 ROWS ONLY; > ID VALUE G > -- ----- ------ @@ -475,7 +475,7 @@ SELECT ID, VALUE, ARRAY_AGG(ID) OVER(ORDER BY VALUE ROWS BETWEEN 1 FOLLOWING AND > 3 5 [4, 5] > rows (ordered): 3 -SELECT ID, VALUE, ARRAY_AGG(ID) OVER(ORDER BY VALUE ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING EXCLUDE GROUP) G +SELECT ID, "VALUE", ARRAY_AGG(ID) OVER(ORDER BY "VALUE" ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING EXCLUDE GROUP) G FROM TEST ORDER BY ID FETCH FIRST 3 ROWS ONLY; > ID VALUE G > -- ----- ------ @@ -484,7 +484,7 @@ SELECT ID, VALUE, ARRAY_AGG(ID) OVER(ORDER BY VALUE ROWS BETWEEN 2 PRECEDING AND > 3 5 [1, 2] > rows (ordered): 3 -SELECT ID, VALUE, ARRAY_AGG(ID) OVER (ORDER BY VALUE RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING) A +SELECT ID, "VALUE", ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING) A FROM TEST; > ID VALUE A > -- ----- --------- @@ -498,13 +498,13 @@ SELECT ID, VALUE, ARRAY_AGG(ID) OVER (ORDER BY VALUE RANGE BETWEEN 2 PRECEDING A > 8 9 [4, 5, 6] > rows: 8 -SELECT ID, VALUE, - ARRAY_AGG(ID) OVER (ORDER BY VALUE ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) CP, - ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY VALUE ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) CF, - ARRAY_AGG(ID) OVER (ORDER BY VALUE RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) RP, - ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY VALUE RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) RF, - ARRAY_AGG(ID) OVER (ORDER BY VALUE GROUPS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) GP, - ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY VALUE GROUPS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) GF +SELECT ID, "VALUE", + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) CP, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY "VALUE" ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) CF, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) RP, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY "VALUE" RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) RF, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) GP, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) GF FROM TEST; > ID VALUE CP CF RP RF GP GF > -- ----- ------------------------ ------------------------ ------------------------ ------------------------ ------------------------ ------------------------ @@ -524,7 +524,7 @@ SELECT *, ARRAY_AGG(ID) OVER (ORDER BY ID RANGE BETWEEN CURRENT ROW AND 1 PRECED DROP TABLE TEST; > ok -CREATE TABLE TEST (ID INT, VALUE INT); +CREATE TABLE TEST (ID INT, "VALUE" INT); > ok INSERT INTO TEST VALUES @@ -538,9 +538,9 @@ INSERT INTO TEST VALUES (8, 4); > update count: 8 -SELECT *, ARRAY_AGG(ID) OVER (ORDER BY VALUE RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING) FROM TEST; -> ID VALUE ARRAY_AGG(ID) OVER (ORDER BY VALUE RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING) -> -- ----- ----------------------------------------------------------------------------- +SELECT *, ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING) FROM TEST; +> ID VALUE ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING) +> -- ----- ------------------------------------------------------------------------------- > 1 1 null > 2 1 null > 3 2 [1, 2] @@ -551,9 +551,9 @@ SELECT *, ARRAY_AGG(ID) OVER (ORDER BY VALUE RANGE BETWEEN 2 PRECEDING AND 1 PRE > 8 4 [3, 4, 5, 6] > rows: 8 -SELECT *, ARRAY_AGG(ID) OVER (ORDER BY VALUE RANGE BETWEEN 1 FOLLOWING AND 2 FOLLOWING) FROM TEST; -> ID VALUE ARRAY_AGG(ID) OVER (ORDER BY VALUE RANGE BETWEEN 1 FOLLOWING AND 2 FOLLOWING) -> -- ----- ----------------------------------------------------------------------------- +SELECT *, ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN 1 FOLLOWING AND 2 FOLLOWING) FROM TEST; +> ID VALUE ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN 1 FOLLOWING AND 2 FOLLOWING) +> -- ----- ------------------------------------------------------------------------------- > 1 1 [3, 4, 5, 6] > 2 1 [3, 4, 5, 6] > 3 2 [5, 6, 7, 8] @@ -564,7 +564,7 @@ SELECT *, ARRAY_AGG(ID) OVER (ORDER BY VALUE RANGE BETWEEN 1 FOLLOWING AND 2 FOL > 8 4 null > rows: 8 -SELECT ID, VALUE, ARRAY_AGG(ID) OVER (ORDER BY VALUE RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING EXCLUDE CURRENT ROW) A +SELECT ID, "VALUE", ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING EXCLUDE CURRENT ROW) A FROM TEST; > ID VALUE A > -- ----- ------------ @@ -578,7 +578,7 @@ SELECT ID, VALUE, ARRAY_AGG(ID) OVER (ORDER BY VALUE RANGE BETWEEN 2 PRECEDING A > 8 4 [3, 4, 5, 6] > rows: 8 -SELECT ID, VALUE, ARRAY_AGG(ID) OVER (ORDER BY VALUE RANGE BETWEEN 1 FOLLOWING AND 1 FOLLOWING EXCLUDE CURRENT ROW) A +SELECT ID, "VALUE", ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN 1 FOLLOWING AND 1 FOLLOWING EXCLUDE CURRENT ROW) A FROM TEST; > ID VALUE A > -- ----- ------ @@ -592,13 +592,13 @@ SELECT ID, VALUE, ARRAY_AGG(ID) OVER (ORDER BY VALUE RANGE BETWEEN 1 FOLLOWING A > 8 4 null > rows: 8 -SELECT ID, VALUE, - ARRAY_AGG(ID) OVER (ORDER BY VALUE ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) CP, - ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY VALUE ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) CF, - ARRAY_AGG(ID) OVER (ORDER BY VALUE RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) RP, - ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY VALUE RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) RF, - ARRAY_AGG(ID) OVER (ORDER BY VALUE GROUPS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) GP, - ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY VALUE GROUPS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) GF +SELECT ID, "VALUE", + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) CP, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY "VALUE" ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) CF, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) RP, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY "VALUE" RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) RF, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) GP, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) GF FROM TEST; > ID VALUE CP CF RP RF GP GF > -- ----- ------------------------ ------------------------ ------------------------ ------------------------ ------------------------ ------------------------ @@ -612,11 +612,11 @@ SELECT ID, VALUE, > 8 4 [1, 2, 3, 4, 5, 6, 7, 8] [8] [1, 2, 3, 4, 5, 6, 7, 8] [7, 8] [1, 2, 3, 4, 5, 6, 7, 8] [7, 8] > rows: 8 -SELECT ID, VALUE, - ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY ID RANGE BETWEEN UNBOUNDED PRECEDING AND VALUE FOLLOWING) RG, - ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY ID RANGE BETWEEN VALUE PRECEDING AND UNBOUNDED FOLLOWING) RGR, - ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY ID ROWS BETWEEN UNBOUNDED PRECEDING AND VALUE FOLLOWING) R, - ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY ID ROWS BETWEEN VALUE PRECEDING AND UNBOUNDED FOLLOWING) RR +SELECT ID, "VALUE", + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY ID RANGE BETWEEN UNBOUNDED PRECEDING AND "VALUE" FOLLOWING) RG, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY ID RANGE BETWEEN "VALUE" PRECEDING AND UNBOUNDED FOLLOWING) RGR, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY ID ROWS BETWEEN UNBOUNDED PRECEDING AND "VALUE" FOLLOWING) R, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY ID ROWS BETWEEN "VALUE" PRECEDING AND UNBOUNDED FOLLOWING) RR FROM TEST; > ID VALUE RG RGR R RR > -- ----- ------------------------ ------------------------ ------------------------ ------------------------ @@ -630,13 +630,13 @@ SELECT ID, VALUE, > 8 4 [1, 2, 3, 4, 5, 6, 7, 8] [4, 5, 6, 7, 8] [1, 2, 3, 4, 5, 6, 7, 8] [4, 5, 6, 7, 8] > rows: 8 -SELECT ID, VALUE, +SELECT ID, "VALUE", ARRAY_AGG(ID ORDER BY ID) OVER - (PARTITION BY VALUE ORDER BY ID ROWS BETWEEN VALUE / 3 PRECEDING AND VALUE / 3 FOLLOWING) A, + (PARTITION BY "VALUE" ORDER BY ID ROWS BETWEEN "VALUE" / 3 PRECEDING AND "VALUE" / 3 FOLLOWING) A, ARRAY_AGG(ID ORDER BY ID) OVER - (PARTITION BY VALUE ORDER BY ID ROWS BETWEEN UNBOUNDED PRECEDING AND VALUE / 3 FOLLOWING) AP, + (PARTITION BY "VALUE" ORDER BY ID ROWS BETWEEN UNBOUNDED PRECEDING AND "VALUE" / 3 FOLLOWING) AP, ARRAY_AGG(ID ORDER BY ID) OVER - (PARTITION BY VALUE ORDER BY ID ROWS BETWEEN VALUE / 3 PRECEDING AND UNBOUNDED FOLLOWING) AF + (PARTITION BY "VALUE" ORDER BY ID ROWS BETWEEN "VALUE" / 3 PRECEDING AND UNBOUNDED FOLLOWING) AF FROM TEST; > ID VALUE A AP AF > -- ----- ------ ------ ------ @@ -650,5 +650,29 @@ SELECT ID, VALUE, > 8 4 [7, 8] [7, 8] [7, 8] > rows: 8 +INSERT INTO TEST VALUES (9, NULL); +> update count: 1 + +SELECT ARRAY_AGG("VALUE") FROM TEST; +>> [1, 1, 2, 2, 3, 3, 4, 4, null] + +SELECT ARRAY_AGG("VALUE" ORDER BY ID) FROM TEST; +>> [1, 1, 2, 2, 3, 3, 4, 4, null] + +SELECT ARRAY_AGG("VALUE" ORDER BY ID) FILTER (WHERE "VALUE" IS NOT NULL) FROM TEST; +>> [1, 1, 2, 2, 3, 3, 4, 4] + +SELECT ARRAY_AGG("VALUE" ORDER BY "VALUE") FROM TEST; +>> [null, 1, 1, 2, 2, 3, 3, 4, 4] + +SELECT ARRAY_AGG("VALUE" ORDER BY "VALUE" NULLS LAST) FROM TEST; +>> [1, 1, 2, 2, 3, 3, 4, 4, null] + DROP TABLE TEST; > ok + +SELECT ARRAY_AGG(DISTINCT A ORDER BY B) FROM (VALUES (4, 3), (5, 1), (5, 2)) T(A, B); +>> [5, 4] + +EXPLAIN SELECT ARRAY_AGG(A ORDER BY 'a') FROM (VALUES 1, 2) T(A); +>> SELECT ARRAY_AGG("A") FROM (VALUES (1), (2)) "T"("A") /* table scan */ diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/avg.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/avg.sql index 4ce04f59f3..1b70b6e58e 100644 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/avg.sql +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/avg.sql @@ -1,8 +1,20 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- +select avg(cast(x as int)) from system_range(2147483547, 2147483637); +>> 2.147483592E9 + +select avg(x) from system_range(9223372036854775707, 9223372036854775797); +>> 9223372036854775752.0000000000 + +select avg(cast(100 as tinyint)) from system_range(1, 1000); +>> 100.0 + +select avg(cast(100 as smallint)) from system_range(1, 1000); +>> 100.0 + -- with filter condition create table test(v int); @@ -12,19 +24,113 @@ insert into test values (10), (20), (30), (40), (50), (60), (70), (80), (90), (1 > update count: 12 select avg(v), avg(v) filter (where v >= 40) from test where v <= 100; -> AVG(V) AVG(V) FILTER (WHERE (V >= 40)) -> ------ ------------------------------- -> 55 70 +> AVG(V) AVG(V) FILTER (WHERE V >= 40) +> ------ ----------------------------- +> 55.0 70.0 > rows: 1 create index test_idx on test(v); > ok select avg(v), avg(v) filter (where v >= 40) from test where v <= 100; -> AVG(V) AVG(V) FILTER (WHERE (V >= 40)) -> ------ ------------------------------- -> 55 70 +> AVG(V) AVG(V) FILTER (WHERE V >= 40) +> ------ ----------------------------- +> 55.0 70.0 > rows: 1 drop table test; > ok + +CREATE TABLE S( + N1 TINYINT, + N2 SMALLINT, + N4 INTEGER, + N8 BIGINT, + N NUMERIC(10, 2), + F4 REAL, + F8 DOUBLE PRECISION, + D DECFLOAT(10), + I1 INTERVAL YEAR(3), + I2 INTERVAL MONTH(3), + I3 INTERVAL DAY(3), + I4 INTERVAL HOUR(3), + I5 INTERVAL MINUTE(3), + I6 INTERVAL SECOND(2), + I7 INTERVAL YEAR(3) TO MONTH, + I8 INTERVAL DAY(3) TO HOUR, + I9 INTERVAL DAY(3) TO MINUTE, + I10 INTERVAL DAY(3) TO SECOND(2), + I11 INTERVAL HOUR(3) TO MINUTE, + I12 INTERVAL HOUR(3) TO SECOND(2), + I13 INTERVAL MINUTE(3) TO SECOND(2)); +> ok + +CREATE TABLE A AS SELECT + AVG(N1) N1, + AVG(N2) N2, + AVG(N4) N4, + AVG(N8) N8, + AVG(N) N, + AVG(F4) F4, + AVG(F8) F8, + AVG(D) D, + AVG(I1) I1, + AVG(I2) I2, + AVG(I3) I3, + AVG(I4) I4, + AVG(I5) I5, + AVG(I6) I6, + AVG(I7) I7, + AVG(I8) I8, + AVG(I9) I9, + AVG(I10) I10, + AVG(I11) I11, + AVG(I12) I12, + AVG(I13) I13 + FROM S; +> ok + +SELECT COLUMN_NAME, DATA_TYPE_SQL('PUBLIC', 'A', 'TABLE', DTD_IDENTIFIER) TYPE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'A' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME TYPE +> ----------- ------------------------------- +> N1 DOUBLE PRECISION +> N2 DOUBLE PRECISION +> N4 DOUBLE PRECISION +> N8 NUMERIC(29, 10) +> N NUMERIC(20, 12) +> F4 DOUBLE PRECISION +> F8 DECFLOAT(27) +> D DECFLOAT(20) +> I1 INTERVAL YEAR(3) TO MONTH +> I2 INTERVAL MONTH(3) +> I3 INTERVAL DAY(3) TO SECOND(9) +> I4 INTERVAL HOUR(3) TO SECOND(9) +> I5 INTERVAL MINUTE(3) TO SECOND(9) +> I6 INTERVAL SECOND(2, 9) +> I7 INTERVAL YEAR(3) TO MONTH +> I8 INTERVAL DAY(3) TO SECOND(9) +> I9 INTERVAL DAY(3) TO SECOND(9) +> I10 INTERVAL DAY(3) TO SECOND(9) +> I11 INTERVAL HOUR(3) TO SECOND(9) +> I12 INTERVAL HOUR(3) TO SECOND(9) +> I13 INTERVAL MINUTE(3) TO SECOND(9) +> rows (ordered): 21 + +DROP TABLE S, A; +> ok + +SELECT AVG(X) FROM (VALUES INTERVAL '1' DAY, INTERVAL '2' DAY) T(X); +>> INTERVAL '1 12:00:00' DAY TO SECOND + +SELECT AVG(X) FROM (VALUES CAST(1 AS NUMERIC(1)), CAST(2 AS NUMERIC(1))) T(X); +>> 1.5000000000 + +SELECT AVG(I) FROM (VALUES 9e99999 - 1, 1e99999 + 1) T(I); +>> 5E+99999 + +SELECT AVG(I) = 5E99999 FROM (VALUES CAST(9e99999 - 1 AS NUMERIC(100000)), CAST(1e99999 + 1 AS NUMERIC(100000))) T(I); +>> TRUE + +SELECT AVG(I) FROM (VALUES INTERVAL '999999999999999999' SECOND, INTERVAL '1' SECOND) T(I); +>> INTERVAL '500000000000000000' SECOND diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/bit-and.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/bit-and.sql deleted file mode 100644 index 6e154fbdd3..0000000000 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/bit-and.sql +++ /dev/null @@ -1,33 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- - --- with filter condition - -create table test(v bigint); -> ok - -insert into test values - (0xfffffffffff0), (0xffffffffff0f), (0xfffffffff0ff), (0xffffffff0fff), - (0xfffffff0ffff), (0xffffff0fffff), (0xfffff0ffffff), (0xffff0fffffff), - (0xfff0ffffffff), (0xff0fffffffff), (0xf0ffffffffff), (0x0fffffffffff); -> update count: 12 - -select bit_and(v), bit_and(v) filter (where v <= 0xffffffff0fff) from test where v >= 0xff0fffffffff; -> BIT_AND(V) BIT_AND(V) FILTER (WHERE (V <= 281474976649215)) -> --------------- ------------------------------------------------ -> 280375465082880 280375465086975 -> rows: 1 - -create index test_idx on test(v); -> ok - -select bit_and(v), bit_and(v) filter (where v <= 0xffffffff0fff) from test where v >= 0xff0fffffffff; -> BIT_AND(V) BIT_AND(V) FILTER (WHERE (V <= 281474976649215)) -> --------------- ------------------------------------------------ -> 280375465082880 280375465086975 -> rows: 1 - -drop table test; -> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/bit-or.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/bit-or.sql deleted file mode 100644 index 40a1aa53cb..0000000000 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/bit-or.sql +++ /dev/null @@ -1,32 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- - --- with filter condition - --- with filter condition - -create table test(v bigint); -> ok - -insert into test values (1), (2), (4), (8), (16), (32), (64), (128), (256), (512), (1024), (2048); -> update count: 12 - -select bit_or(v), bit_or(v) filter (where v >= 8) from test where v <= 512; -> BIT_OR(V) BIT_OR(V) FILTER (WHERE (V >= 8)) -> --------- --------------------------------- -> 1023 1016 -> rows: 1 - -create index test_idx on test(v); -> ok - -select bit_or(v), bit_or(v) filter (where v >= 8) from test where v <= 512; -> BIT_OR(V) BIT_OR(V) FILTER (WHERE (V >= 8)) -> --------- --------------------------------- -> 1023 1016 -> rows: 1 - -drop table test; -> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/bit_and_agg.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/bit_and_agg.sql new file mode 100644 index 0000000000..52212634ed --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/bit_and_agg.sql @@ -0,0 +1,48 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- with filter condition + +create table test(v bigint); +> ok + +insert into test values + (0xfffffffffff0), (0xffffffffff0f), (0xfffffffff0ff), (0xffffffff0fff), + (0xfffffff0ffff), (0xffffff0fffff), (0xfffff0ffffff), (0xffff0fffffff), + (0xfff0ffffffff), (0xff0fffffffff), (0xf0ffffffffff), (0x0fffffffffff); +> update count: 12 + +select BIT_AND_AGG(v), BIT_AND_AGG(v) filter (where v <= 0xffffffff0fff) from test where v >= 0xff0fffffffff; +> BIT_AND_AGG(V) BIT_AND_AGG(V) FILTER (WHERE V <= 281474976649215) +> --------------- -------------------------------------------------- +> 280375465082880 280375465086975 +> rows: 1 + +SELECT BIT_NAND_AGG(V), BIT_NAND_AGG(V) FILTER (WHERE V <= 0xffffffff0fff) FROM TEST WHERE V >= 0xff0fffffffff; +> BIT_NAND_AGG(V) BIT_NAND_AGG(V) FILTER (WHERE V <= 281474976649215) +> ---------------- --------------------------------------------------- +> -280375465082881 -280375465086976 +> rows: 1 + +create index test_idx on test(v); +> ok + +select BIT_AND_AGG(v), BIT_AND_AGG(v) filter (where v <= 0xffffffff0fff) from test where v >= 0xff0fffffffff; +> BIT_AND_AGG(V) BIT_AND_AGG(V) FILTER (WHERE V <= 281474976649215) +> --------------- -------------------------------------------------- +> 280375465082880 280375465086975 +> rows: 1 + +SELECT BIT_NAND_AGG(V), BIT_NAND_AGG(V) FILTER (WHERE V <= 0xffffffff0fff) FROM TEST WHERE V >= 0xff0fffffffff; +> BIT_NAND_AGG(V) BIT_NAND_AGG(V) FILTER (WHERE V <= 281474976649215) +> ---------------- --------------------------------------------------- +> -280375465082881 -280375465086976 +> rows: 1 + +EXPLAIN SELECT BITNOT(BIT_AND_AGG(V)), BITNOT(BIT_NAND_AGG(V)) FROM TEST; +>> SELECT BIT_NAND_AGG("V"), BIT_AND_AGG("V") FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX */ + +drop table test; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/bit_or_agg.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/bit_or_agg.sql new file mode 100644 index 0000000000..ba91746c04 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/bit_or_agg.sql @@ -0,0 +1,45 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- with filter condition + +create table test(v bigint); +> ok + +insert into test values (1), (2), (4), (8), (16), (32), (64), (128), (256), (512), (1024), (2048); +> update count: 12 + +select BIT_OR_AGG(v), BIT_OR_AGG(v) filter (where v >= 8) from test where v <= 512; +> BIT_OR_AGG(V) BIT_OR_AGG(V) FILTER (WHERE V >= 8) +> ------------- ----------------------------------- +> 1023 1016 +> rows: 1 + +SELECT BIT_NOR_AGG(V), BIT_NOR_AGG(V) FILTER (WHERE V >= 8) FROM TEST WHERE V <= 512; +> BIT_NOR_AGG(V) BIT_NOR_AGG(V) FILTER (WHERE V >= 8) +> -------------- ------------------------------------ +> -1024 -1017 +> rows: 1 + +create index test_idx on test(v); +> ok + +select BIT_OR_AGG(v), BIT_OR_AGG(v) filter (where v >= 8) from test where v <= 512; +> BIT_OR_AGG(V) BIT_OR_AGG(V) FILTER (WHERE V >= 8) +> ------------- ----------------------------------- +> 1023 1016 +> rows: 1 + +SELECT BIT_NOR_AGG(V), BIT_NOR_AGG(V) FILTER (WHERE V >= 8) FROM TEST WHERE V <= 512; +> BIT_NOR_AGG(V) BIT_NOR_AGG(V) FILTER (WHERE V >= 8) +> -------------- ------------------------------------ +> -1024 -1017 +> rows: 1 + +EXPLAIN SELECT BITNOT(BIT_OR_AGG(V)), BITNOT(BIT_NOR_AGG(V)) FROM TEST; +>> SELECT BIT_NOR_AGG("V"), BIT_OR_AGG("V") FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX */ + +drop table test; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/bit_xor_agg.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/bit_xor_agg.sql new file mode 100644 index 0000000000..1092a4d00a --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/bit_xor_agg.sql @@ -0,0 +1,25 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT BIT_XOR_AGG(V), BIT_XOR_AGG(DISTINCT V), BIT_XOR_AGG(V) FILTER (WHERE V <> 1) FROM (VALUES 1, 1, 2, 3, 4) T(V); +> BIT_XOR_AGG(V) BIT_XOR_AGG(DISTINCT V) BIT_XOR_AGG(V) FILTER (WHERE V <> 1) +> -------------- ----------------------- ------------------------------------ +> 5 4 5 +> rows: 1 + +SELECT BIT_XNOR_AGG(V), BIT_XNOR_AGG(DISTINCT V), BIT_XNOR_AGG(V) FILTER (WHERE V <> 1) FROM (VALUES 1, 1, 2, 3, 4) T(V); +> BIT_XNOR_AGG(V) BIT_XNOR_AGG(DISTINCT V) BIT_XNOR_AGG(V) FILTER (WHERE V <> 1) +> --------------- ------------------------ ------------------------------------- +> -6 -5 -6 +> rows: 1 + +CREATE TABLE TEST(V BIGINT); +> ok + +EXPLAIN SELECT BITNOT(BIT_XOR_AGG(V)), BITNOT(BIT_XNOR_AGG(V)) FROM TEST; +>> SELECT BIT_XNOR_AGG("V"), BIT_XOR_AGG("V") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/corr.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/corr.sql new file mode 100644 index 0000000000..45a9fb38d0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/corr.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT CORR(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> CORR(Y, X) OVER (ORDER BY R) +> ---------------------------- +> null +> null +> null +> null +> null +> 0.9966158955401239 +> 0.9958932064677037 +> 0.9922153572367626 +> 0.9582302043304856 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/count.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/count.sql index 532b68b18a..1d151de2ba 100644 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/count.sql +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/count.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -12,20 +12,20 @@ insert into test values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11), > update count: 13 select count(v), count(v) filter (where v >= 4) from test where v <= 10; -> COUNT(V) COUNT(V) FILTER (WHERE (V >= 4)) -> -------- -------------------------------- +> COUNT(V) COUNT(V) FILTER (WHERE V >= 4) +> -------- ------------------------------ > 10 7 > rows: 1 select count(*), count(*) filter (where v >= 4) from test; -> COUNT(*) COUNT(*) FILTER (WHERE (V >= 4)) -> -------- -------------------------------- +> COUNT(*) COUNT(*) FILTER (WHERE V >= 4) +> -------- ------------------------------ > 13 9 > rows: 1 select count(*), count(*) filter (where v >= 4) from test where v <= 10; -> COUNT(*) COUNT(*) FILTER (WHERE (V >= 4)) -> -------- -------------------------------- +> COUNT(*) COUNT(*) FILTER (WHERE V >= 4) +> -------- ------------------------------ > 10 7 > rows: 1 @@ -33,14 +33,14 @@ create index test_idx on test(v); > ok select count(v), count(v) filter (where v >= 4) from test where v <= 10; -> COUNT(V) COUNT(V) FILTER (WHERE (V >= 4)) -> -------- -------------------------------- +> COUNT(V) COUNT(V) FILTER (WHERE V >= 4) +> -------- ------------------------------ > 10 7 > rows: 1 select count(v), count(v) filter (where v >= 4) from test; -> COUNT(V) COUNT(V) FILTER (WHERE (V >= 4)) -> -------- -------------------------------- +> COUNT(V) COUNT(V) FILTER (WHERE V >= 4) +> -------- ------------------------------ > 12 9 > rows: 1 @@ -153,3 +153,83 @@ SELECT COUNT(*) OVER (PARTITION BY A, B) C1, COUNT(*) OVER (PARTITION BY (A, B)) DROP TABLE TEST; > ok + +CREATE TABLE TEST(X INT) AS (VALUES 1, 2, NULL); +> ok + +SELECT COUNT(*) FROM TEST; +>> 3 + +SELECT COUNT(1) FROM TEST; +>> 3 + +SELECT COUNT(DISTINCT 1) FROM TEST; +>> 1 + +SELECT COUNT(1) FROM TEST FILTER WHERE X <> 1; +>> 1 + +SELECT COUNT(1) OVER(PARTITION BY X IS NULL) FROM TEST; +> COUNT(*) OVER (PARTITION BY X IS NULL) +> -------------------------------------- +> 1 +> 2 +> 2 +> rows: 3 + +SELECT COUNT(NULL) FROM TEST; +>> 0 + +SELECT COUNT(DISTINCT NULL) FROM TEST; +>> 0 + +EXPLAIN SELECT COUNT(*) FROM TEST; +>> SELECT COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */ + +EXPLAIN SELECT COUNT(*) FILTER (WHERE TRUE) FROM TEST; +>> SELECT COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */ + +EXPLAIN SELECT COUNT(1) FROM TEST; +>> SELECT COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */ + +EXPLAIN SELECT COUNT(DISTINCT 1) FROM TEST; +>> SELECT COUNT(DISTINCT 1) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT COUNT(1) FROM TEST FILTER WHERE X <> 1; +>> SELECT COUNT(*) FROM "PUBLIC"."TEST" "FILTER" /* PUBLIC.TEST.tableScan */ WHERE "X" <> 1 + +EXPLAIN SELECT COUNT(1) OVER(PARTITION BY X IS NULL) FROM TEST; +>> SELECT COUNT(*) OVER (PARTITION BY "X" IS NULL) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT COUNT(NULL) FROM TEST; +>> SELECT CAST(0 AS BIGINT) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY () /* direct lookup */ + +EXPLAIN SELECT COUNT(DISTINCT NULL) FROM TEST; +>> SELECT CAST(0 AS BIGINT) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY () /* direct lookup */ + +SELECT COUNT(X) FROM TEST; +>> 2 + +EXPLAIN SELECT COUNT(X) FROM TEST; +>> SELECT COUNT("X") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DELETE FROM TEST WHERE X IS NULL; +> update count: 1 + +ALTER TABLE TEST ALTER COLUMN X SET NOT NULL; +> ok + +SELECT COUNT(X) FROM TEST; +>> 2 + +EXPLAIN SELECT COUNT(X) FROM TEST; +>> SELECT COUNT("X") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */ + +SELECT COUNT(DISTINCT X) FROM TEST; +>> 2 + +EXPLAIN SELECT COUNT(DISTINCT X) FROM TEST; +>> SELECT COUNT(DISTINCT "X") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/covar_pop.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/covar_pop.sql new file mode 100644 index 0000000000..2db80694cd --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/covar_pop.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT COVAR_POP(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> COVAR_POP(Y, X) OVER (ORDER BY R) +> --------------------------------- +> null +> null +> null +> 0.0 +> 0.0 +> 30.333333333333332 +> 35.75 +> 35.88 +> 31.277777777777775 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/covar_samp.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/covar_samp.sql new file mode 100644 index 0000000000..8b09c45d1d --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/covar_samp.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT COVAR_SAMP(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> COVAR_SAMP(Y, X) OVER (ORDER BY R) +> ---------------------------------- +> null +> null +> null +> null +> 0.0 +> 45.5 +> 47.666666666666664 +> 44.85 +> 37.53333333333333 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/envelope.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/envelope.sql index fdd58cb5d5..9879b92ad8 100644 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/envelope.sql +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/envelope.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -56,7 +56,7 @@ SELECT ENVELOPE(V) FROM TEST; >> POLYGON ((-1.0000000001 1, -1.0000000001 2, 3 2, 3 1, -1.0000000001 1)) TRUNCATE TABLE TEST; -> ok +> update count: 5 -- Without index SELECT ENVELOPE(N) FROM (SELECT V AS N FROM TEST); @@ -88,11 +88,10 @@ SELECT ENVELOPE(V) FROM TEST; >> POLYGON ((68 78, 68 99951, 99903 99951, 99903 78, 68 78)) SELECT ESTIMATED_ENVELOPE('TEST', 'V'); -#+mvStore#>> POLYGON ((68 78, 68 99951, 99903 99951, 99903 78, 68 78)) -#-mvStore#>> null +>> POLYGON ((68 78, 68 99951, 99903 99951, 99903 78, 68 78)) TRUNCATE TABLE TEST; -> ok +> update count: 1000 @reconnect off diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/every.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/every.sql index 03a71ec930..e603f5c624 100644 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/every.sql +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/every.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -10,8 +10,8 @@ INSERT INTO TEST VALUES (1, 1), (1, 3), (2, 1), (2, 5), (3, 4); > update count: 5 SELECT A, EVERY(B < 5), BOOL_AND(B > 1), EVERY(B >= 1) FILTER (WHERE A = 1) FROM TEST GROUP BY A; -> A EVERY(B < 5) EVERY(B > 1) EVERY(B >= 1) FILTER (WHERE (A = 1)) -> - ------------ ------------ ------------------------------------ +> A EVERY(B < 5) EVERY(B > 1) EVERY(B >= 1) FILTER (WHERE A = 1) +> - ------------ ------------ ---------------------------------- > 1 TRUE FALSE TRUE > 2 FALSE FALSE null > 3 TRUE TRUE null diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/histogram.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/histogram.sql index d58ec6d7db..396daabd5b 100644 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/histogram.sql +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/histogram.sql @@ -1,23 +1,19 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -SELECT HISTOGRAM(X), HISTOGRAM(DISTINCT X) FROM VALUES (1), (2), (3), (1), (2), (NULL), (5) T(X); -> HISTOGRAM(X) HISTOGRAM(DISTINCT X) -> ------------------------------------------- ------------------------------------------- -> [[null, 1], [1, 2], [2, 2], [3, 1], [5, 1]] [[null, 1], [1, 1], [2, 1], [3, 1], [5, 1]] -> rows: 1 +SELECT HISTOGRAM(X), FROM VALUES (1), (2), (3), (1), (2), (NULL), (5) T(X); +>> [ROW (null, 1), ROW (1, 2), ROW (2, 2), ROW (3, 1), ROW (5, 1)] -SELECT HISTOGRAM(X) FILTER (WHERE X > 1), HISTOGRAM(DISTINCT X) FILTER (WHERE X > 1) - FROM VALUES (1), (2), (3), (1), (2), (NULL), (5) T(X); -> HISTOGRAM(X) FILTER (WHERE (X > 1)) HISTOGRAM(DISTINCT X) FILTER (WHERE (X > 1)) -> ----------------------------------- -------------------------------------------- -> [[2, 2], [3, 1], [5, 1]] [[2, 1], [3, 1], [5, 1]] -> rows: 1 +SELECT HISTOGRAM(X) FILTER (WHERE X > 1) FROM VALUES (1), (2), (3), (1), (2), (NULL), (5) T(X); +>> [ROW (2, 2), ROW (3, 1), ROW (5, 1)] -SELECT HISTOGRAM(X) FILTER (WHERE X > 0), HISTOGRAM(DISTINCT X) FILTER (WHERE X > 0) FROM VALUES (0) T(X); -> HISTOGRAM(X) FILTER (WHERE (X > 0)) HISTOGRAM(DISTINCT X) FILTER (WHERE (X > 0)) -> ----------------------------------- -------------------------------------------- -> [] [] -> rows: 1 +SELECT HISTOGRAM(X) FILTER (WHERE X > 0) FROM VALUES (0) T(X); +>> [] + +SELECT HISTOGRAM(DISTINCT X) FROM VALUES (0) T(X); +> exception SYNTAX_ERROR_2 + +SELECT HISTOGRAM(ALL X) FROM VALUES (0) T(X); +> exception SYNTAX_ERROR_2 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/json_arrayagg.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/json_arrayagg.sql new file mode 100644 index 0000000000..12429ec0af --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/json_arrayagg.sql @@ -0,0 +1,71 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(ID INT PRIMARY KEY, N VARCHAR, J JSON) AS VALUES + (1, 'Ten', JSON '10'), + (2, 'Null', NULL), + (3, 'False', JSON 'false'), + (4, 'False', JSON 'false'); +> ok + +SELECT JSON_ARRAYAGG(J NULL ON NULL) FROM TEST; +>> [10,null,false,false] + +SELECT JSON_ARRAYAGG(J) FROM TEST; +>> [10,false,false] + +SELECT JSON_ARRAYAGG(ALL J) FROM TEST; +>> [10,false,false] + +SELECT JSON_ARRAYAGG(DISTINCT J) FROM TEST; +>> [10,false] + +SELECT JSON_ARRAYAGG(J NULL ON NULL) FROM TEST; +>> [10,null,false,false] + +SELECT JSON_ARRAYAGG(J ABSENT ON NULL) FROM TEST; +>> [10,false,false] + +SELECT JSON_ARRAYAGG(J ORDER BY ID DESC NULL ON NULL) FROM TEST; +>> [false,false,null,10] + +SELECT JSON_ARRAY(NULL NULL ON NULL); +>> [null] + +EXPLAIN SELECT JSON_ARRAYAGG(J) FROM TEST; +>> SELECT JSON_ARRAYAGG("J") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_ARRAYAGG(J NULL ON NULL) FROM TEST; +>> SELECT JSON_ARRAYAGG("J" NULL ON NULL) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_ARRAYAGG(J ABSENT ON NULL) FROM TEST; +>> SELECT JSON_ARRAYAGG("J") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_ARRAYAGG(J FORMAT JSON ABSENT ON NULL) FROM TEST; +>> SELECT JSON_ARRAYAGG("J" FORMAT JSON) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_ARRAYAGG(DISTINCT J FORMAT JSON ORDER BY ID DESC ABSENT ON NULL) FROM TEST; +>> SELECT JSON_ARRAYAGG(DISTINCT "J" FORMAT JSON ORDER BY "ID" DESC) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DELETE FROM TEST WHERE J IS NOT NULL; +> update count: 3 + +SELECT JSON_ARRAYAGG(J) FROM TEST; +>> [] + +SELECT JSON_ARRAYAGG(J NULL ON NULL) FROM TEST; +>> [null] + +DELETE FROM TEST; +> update count: 1 + +SELECT JSON_ARRAYAGG(J) FROM TEST; +>> null + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT JSON_ARRAYAGG(A ORDER BY 'a') FROM (VALUES 1, 2) T(A); +>> SELECT JSON_ARRAYAGG("A") FROM (VALUES (1), (2)) "T"("A") /* table scan */ diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/json_objectagg.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/json_objectagg.sql new file mode 100644 index 0000000000..de61a64361 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/json_objectagg.sql @@ -0,0 +1,73 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(ID INT PRIMARY KEY, N VARCHAR, J JSON) AS VALUES + (1, 'Ten', '10' FORMAT JSON), + (2, 'Null', NULL), + (3, 'False', 'false' FORMAT JSON); +> ok + +SELECT JSON_OBJECTAGG(KEY N VALUE J) FROM TEST; +>> {"Ten":10,"Null":null,"False":false} + +SELECT JSON_OBJECTAGG(N VALUE J) FROM TEST; +>> {"Ten":10,"Null":null,"False":false} + +SELECT JSON_OBJECTAGG(N: J) FROM TEST; +>> {"Ten":10,"Null":null,"False":false} + +SELECT JSON_OBJECTAGG(N: J ABSENT ON NULL) FROM TEST; +>> {"Ten":10,"False":false} + +SELECT JSON_OBJECTAGG(N: J ABSENT ON NULL) FILTER (WHERE J IS NULL) FROM TEST; +>> {} + +SELECT JSON_OBJECTAGG(N: J) FILTER (WHERE FALSE) FROM TEST; +>> null + +SELECT JSON_OBJECTAGG(NULL: J) FROM TEST; +> exception INVALID_VALUE_2 + +INSERT INTO TEST VALUES (4, 'Ten', '-10' FORMAT JSON); +> update count: 1 + +SELECT JSON_OBJECTAGG(N: J) FROM TEST; +>> {"Ten":10,"Null":null,"False":false,"Ten":-10} + +SELECT JSON_OBJECTAGG(N: J WITHOUT UNIQUE KEYS) FROM TEST; +>> {"Ten":10,"Null":null,"False":false,"Ten":-10} + +SELECT JSON_OBJECTAGG(N: J WITH UNIQUE KEYS) FROM TEST; +> exception INVALID_VALUE_2 + +EXPLAIN SELECT JSON_OBJECTAGG(N: J) FROM TEST; +>> SELECT JSON_OBJECTAGG("N": "J") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECTAGG(N: J NULL ON NULL) FROM TEST; +>> SELECT JSON_OBJECTAGG("N": "J") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECTAGG(N: J ABSENT ON NULL) FROM TEST; +>> SELECT JSON_OBJECTAGG("N": "J" ABSENT ON NULL) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECTAGG(N: J WITH UNIQUE KEYS) FROM TEST; +>> SELECT JSON_OBJECTAGG("N": "J" WITH UNIQUE KEYS) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECTAGG(N: J NULL ON NULL WITH UNIQUE KEYS) FROM TEST; +>> SELECT JSON_OBJECTAGG("N": "J" WITH UNIQUE KEYS) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECTAGG(N: J ABSENT ON NULL WITH UNIQUE KEYS) FROM TEST; +>> SELECT JSON_OBJECTAGG("N": "J" ABSENT ON NULL WITH UNIQUE KEYS) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECTAGG(N: J WITHOUT UNIQUE KEYS) FROM TEST; +>> SELECT JSON_OBJECTAGG("N": "J") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECTAGG(N: J NULL ON NULL WITHOUT UNIQUE KEYS) FROM TEST; +>> SELECT JSON_OBJECTAGG("N": "J") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECTAGG(N: J ABSENT ON NULL WITHOUT UNIQUE KEYS) FROM TEST; +>> SELECT JSON_OBJECTAGG("N": "J" ABSENT ON NULL) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/listagg.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/listagg.sql index 9a75c86396..1a0d91f1a9 100644 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/listagg.sql +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/listagg.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -14,16 +14,16 @@ insert into test values ('1'), ('2'), ('3'), ('4'), ('5'), ('6'), ('7'), ('8'), select listagg(v, '-') within group (order by v asc), listagg(v, '-') within group (order by v desc) filter (where v >= '4') from test where v >= '2'; -> LISTAGG(V, '-') WITHIN GROUP (ORDER BY V) LISTAGG(V, '-') WITHIN GROUP (ORDER BY V DESC) FILTER (WHERE (V >= '4')) -> ----------------------------------------- ------------------------------------------------------------------------ +> LISTAGG(V, '-') WITHIN GROUP (ORDER BY V) LISTAGG(V, '-') WITHIN GROUP (ORDER BY V DESC) FILTER (WHERE V >= '4') +> ----------------------------------------- ---------------------------------------------------------------------- > 2-3-4-5-6-7-8-9 9-8-7-6-5-4 > rows: 1 select group_concat(v order by v asc separator '-'), group_concat(v order by v desc separator '-') filter (where v >= '4') from test where v >= '2'; -> LISTAGG(V, '-') WITHIN GROUP (ORDER BY V) LISTAGG(V, '-') WITHIN GROUP (ORDER BY V DESC) FILTER (WHERE (V >= '4')) -> ----------------------------------------- ------------------------------------------------------------------------ +> LISTAGG(V, '-') WITHIN GROUP (ORDER BY V) LISTAGG(V, '-') WITHIN GROUP (ORDER BY V DESC) FILTER (WHERE V >= '4') +> ----------------------------------------- ---------------------------------------------------------------------- > 2-3-4-5-6-7-8-9 9-8-7-6-5-4 > rows: 1 @@ -33,16 +33,16 @@ create index test_idx on test(v); select group_concat(v order by v asc separator '-'), group_concat(v order by v desc separator '-') filter (where v >= '4') from test where v >= '2'; -> LISTAGG(V, '-') WITHIN GROUP (ORDER BY V) LISTAGG(V, '-') WITHIN GROUP (ORDER BY V DESC) FILTER (WHERE (V >= '4')) -> ----------------------------------------- ------------------------------------------------------------------------ +> LISTAGG(V, '-') WITHIN GROUP (ORDER BY V) LISTAGG(V, '-') WITHIN GROUP (ORDER BY V DESC) FILTER (WHERE V >= '4') +> ----------------------------------------- ---------------------------------------------------------------------- > 2-3-4-5-6-7-8-9 9-8-7-6-5-4 > rows: 1 select group_concat(v order by v asc separator '-'), group_concat(v order by v desc separator '-') filter (where v >= '4') from test; -> LISTAGG(V, '-') WITHIN GROUP (ORDER BY V) LISTAGG(V, '-') WITHIN GROUP (ORDER BY V DESC) FILTER (WHERE (V >= '4')) -> ----------------------------------------- ------------------------------------------------------------------------ +> LISTAGG(V, '-') WITHIN GROUP (ORDER BY V) LISTAGG(V, '-') WITHIN GROUP (ORDER BY V DESC) FILTER (WHERE V >= '4') +> ----------------------------------------- ---------------------------------------------------------------------- > 1-2-3-4-5-6-7-8-9 9-8-7-6-5-4 > rows: 1 @@ -56,14 +56,14 @@ insert into test(v) values (7), (2), (8), (3), (7), (3), (9), (-1); > update count: 8 select group_concat(v) from test; -> LISTAGG(V) -> ---------------- +> LISTAGG(V) WITHIN GROUP (ORDER BY NULL) +> --------------------------------------- > 7,2,8,3,7,3,9,-1 > rows: 1 select group_concat(distinct v) from test; -> LISTAGG(DISTINCT V) -> ------------------- +> LISTAGG(DISTINCT V) WITHIN GROUP (ORDER BY NULL) +> ------------------------------------------------ > -1,2,3,7,8,9 > rows: 1 @@ -73,52 +73,183 @@ select group_concat(distinct v order by v desc) from test; > 9,8,7,3,2,-1 > rows: 1 +INSERT INTO TEST(V) VALUES NULL; +> update count: 1 + +SELECT LISTAGG(V, ',') WITHIN GROUP (ORDER BY ID) FROM TEST; +>> 7,2,8,3,7,3,9,-1 + +SELECT LISTAGG(COALESCE(CAST(V AS VARCHAR), 'null'), ',') WITHIN GROUP (ORDER BY ID) FROM TEST; +>> 7,2,8,3,7,3,9,-1,null + +SELECT LISTAGG(V, ',') WITHIN GROUP (ORDER BY V) FROM TEST; +>> -1,2,3,3,7,7,8,9 + drop table test; > ok -create table test(g varchar, v int) as values ('-', 1), ('-', 2), ('-', 3), ('|', 4), ('|', 5), ('|', 6), ('*', null); +create table test(g int, v int) as values (1, 1), (1, 2), (1, 3), (2, 4), (2, 5), (2, 6), (3, null); > ok -select g, listagg(v, g) from test group by g; -> G LISTAGG(V, G) -> - ------------- -> * null -> - 1-2-3 -> | 4|5|6 +select g, listagg(v, '-') from test group by g; +> G LISTAGG(V, '-') WITHIN GROUP (ORDER BY NULL) +> - -------------------------------------------- +> 1 1-2-3 +> 2 4-5-6 +> 3 null > rows: 3 -select g, listagg(v, g) over (partition by g) from test order by v; -> G LISTAGG(V, G) OVER (PARTITION BY G) -> - ----------------------------------- -> * null -> - 1-2-3 -> - 1-2-3 -> - 1-2-3 -> | 4|5|6 -> | 4|5|6 -> | 4|5|6 +select g, listagg(v, '-') over (partition by g) from test order by v; +> G LISTAGG(V, '-') WITHIN GROUP (ORDER BY NULL) OVER (PARTITION BY G) +> - ------------------------------------------------------------------ +> 3 null +> 1 1-2-3 +> 1 1-2-3 +> 1 1-2-3 +> 2 4-5-6 +> 2 4-5-6 +> 2 4-5-6 > rows (ordered): 7 -select g, listagg(v, g on overflow error) within group (order by v) filter (where v <> 2) over (partition by g) from test order by v; -> G LISTAGG(V, G) WITHIN GROUP (ORDER BY V) FILTER (WHERE (V <> 2)) OVER (PARTITION BY G) +select g, listagg(v, '-' on overflow error) within group (order by v) filter (where v <> 2) over (partition by g) from test order by v; +> G LISTAGG(V, '-') WITHIN GROUP (ORDER BY V) FILTER (WHERE V <> 2) OVER (PARTITION BY G) > - ------------------------------------------------------------------------------------- -> * null -> - 1-3 -> - 1-3 -> - 1-3 -> | 4|5|6 -> | 4|5|6 -> | 4|5|6 +> 3 null +> 1 1-3 +> 1 1-3 +> 1 1-3 +> 2 4-5-6 +> 2 4-5-6 +> 2 4-5-6 > rows (ordered): 7 select listagg(distinct v, '-') from test; -> LISTAGG(DISTINCT V, '-') -> ------------------------ +> LISTAGG(DISTINCT V, '-') WITHIN GROUP (ORDER BY NULL) +> ----------------------------------------------------- > 1-2-3-4-5-6 > rows: 1 select g, group_concat(v separator v) from test group by g; -> exception INVALID_VALUE_2 +> exception SYNTAX_ERROR_2 drop table test; > ok + +CREATE TABLE TEST(A INT, B INT, C INT); +> ok + +INSERT INTO TEST VALUES + (1, NULL, NULL), + (2, NULL, 1), + (3, 1, NULL), + (4, 1, 1), + (5, NULL, 2), + (6, 2, NULL), + (7, 2, 2); +> update count: 7 + +SELECT LISTAGG(A) WITHIN GROUP (ORDER BY B ASC NULLS FIRST, C ASC NULLS FIRST) FROM TEST; +>> 1,2,5,3,4,6,7 + +SELECT LISTAGG(A) WITHIN GROUP (ORDER BY B ASC NULLS LAST, C ASC NULLS LAST) FROM TEST; +>> 4,3,7,6,2,5,1 + +DROP TABLE TEST; +> ok + +SELECT LISTAGG(DISTINCT A, ' ') WITHIN GROUP (ORDER BY B) FROM (VALUES ('a', 2), ('a', 3), ('b', 1)) T(A, B); +>> b a + +CREATE TABLE TEST(A INT NOT NULL, B VARCHAR(50) NOT NULL) AS VALUES (1, '1'), (1, '2'), (1, '3'); +> ok + +SELECT STRING_AGG(B, ', ') FROM TEST GROUP BY A; +>> 1, 2, 3 + +SELECT STRING_AGG(B, ', ' ORDER BY B DESC) FROM TEST GROUP BY A; +>> 3, 2, 1 + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT LISTAGG(A) WITHIN GROUP (ORDER BY 'a') FROM (VALUES 'a', 'b') T(A); +>> SELECT LISTAGG("A") WITHIN GROUP (ORDER BY NULL) FROM (VALUES ('a'), ('b')) "T"("A") /* table scan */ + +SET MODE Oracle; +> ok + +SELECT LISTAGG(V, '') WITHIN GROUP(ORDER BY V) FROM (VALUES 'a', 'b') T(V); +>> ab + +SET MODE Regular; +> ok + +CREATE TABLE TEST(ID INT, V VARCHAR) AS VALUES (1, 'b'), (2, 'a'); +> ok + +EXPLAIN SELECT LISTAGG(V) FROM TEST; +>> SELECT LISTAGG("V") WITHIN GROUP (ORDER BY NULL) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT LISTAGG(V) WITHIN GROUP (ORDER BY ID) FROM TEST; +>> SELECT LISTAGG("V") WITHIN GROUP (ORDER BY "ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT LISTAGG(V, ';') WITHIN GROUP (ORDER BY ID) FROM TEST; +>> SELECT LISTAGG("V", ';') WITHIN GROUP (ORDER BY "ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT LISTAGG(V ON OVERFLOW ERROR) WITHIN GROUP (ORDER BY ID) FROM TEST; +>> SELECT LISTAGG("V") WITHIN GROUP (ORDER BY "ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT LISTAGG(V, ';' ON OVERFLOW ERROR) WITHIN GROUP (ORDER BY ID) FROM TEST; +>> SELECT LISTAGG("V", ';') WITHIN GROUP (ORDER BY "ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT LISTAGG(V ON OVERFLOW TRUNCATE WITH COUNT) WITHIN GROUP (ORDER BY ID) FROM TEST; +>> SELECT LISTAGG("V" ON OVERFLOW TRUNCATE WITH COUNT) WITHIN GROUP (ORDER BY "ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT LISTAGG(V ON OVERFLOW TRUNCATE WITHOUT COUNT) WITHIN GROUP (ORDER BY ID) FROM TEST; +>> SELECT LISTAGG("V" ON OVERFLOW TRUNCATE WITHOUT COUNT) WITHIN GROUP (ORDER BY "ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT LISTAGG(V ON OVERFLOW TRUNCATE '..' WITH COUNT) WITHIN GROUP (ORDER BY ID) FROM TEST; +>> SELECT LISTAGG("V" ON OVERFLOW TRUNCATE '..' WITH COUNT) WITHIN GROUP (ORDER BY "ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT LISTAGG(V ON OVERFLOW TRUNCATE '..' WITHOUT COUNT) WITHIN GROUP (ORDER BY ID) FROM TEST; +>> SELECT LISTAGG("V" ON OVERFLOW TRUNCATE '..' WITHOUT COUNT) WITHIN GROUP (ORDER BY "ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(V VARCHAR) AS SELECT 'ABCD_EFGH_' || X FROM SYSTEM_RANGE(1, 70000); +> ok + +SELECT RIGHT(LISTAGG(V ON OVERFLOW TRUNCATE WITH COUNT) WITHIN GROUP(ORDER BY V), 40) FROM TEST; +>> BCD_EFGH_69391,ABCD_EFGH_69392,...(4007) + +SELECT RIGHT(LISTAGG(V ON OVERFLOW TRUNCATE WITHOUT COUNT) WITHIN GROUP(ORDER BY V), 40) FROM TEST; +>> 9391,ABCD_EFGH_69392,ABCD_EFGH_69393,... + +SELECT RIGHT(LISTAGG(V ON OVERFLOW TRUNCATE '~~~~~~~~~~~~~~~' WITH COUNT) WITHIN GROUP(ORDER BY V), 40) FROM TEST; +>> 90,ABCD_EFGH_69391,~~~~~~~~~~~~~~~(4008) + +TRUNCATE TABLE TEST; +> update count: 70000 + +INSERT INTO TEST VALUES REPEAT('A', 1048573); +> update count: 1 + +SELECT RIGHT(LISTAGG(V ON OVERFLOW TRUNCATE WITH COUNT) WITHIN GROUP(ORDER BY V), 40) FROM + (TABLE TEST UNION VALUES 'BB'); +>> AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA,BB + +SELECT RIGHT(LISTAGG(V ON OVERFLOW ERROR) WITHIN GROUP(ORDER BY V), 40) FROM + (TABLE TEST UNION VALUES 'BBB'); +> exception VALUE_TOO_LONG_2 + +SELECT RIGHT(LISTAGG(V ON OVERFLOW TRUNCATE WITH COUNT) WITHIN GROUP(ORDER BY V), 40) FROM + (TABLE TEST UNION VALUES 'BBB'); +>> ...(2) + +SELECT RIGHT(LISTAGG(V ON OVERFLOW TRUNCATE '..' WITHOUT COUNT) WITHIN GROUP(ORDER BY V), 40) FROM + (TABLE TEST UNION VALUES 'BBB'); +>> AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA,.. + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/max.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/max.sql index d16d5af16d..dfdf0c99ba 100644 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/max.sql +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/max.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -12,8 +12,8 @@ insert into test values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11), > update count: 12 select max(v), max(v) filter (where v <= 8) from test where v <= 10; -> MAX(V) MAX(V) FILTER (WHERE (V <= 8)) -> ------ ------------------------------ +> MAX(V) MAX(V) FILTER (WHERE V <= 8) +> ------ ---------------------------- > 10 8 > rows: 1 @@ -21,16 +21,49 @@ create index test_idx on test(v); > ok select max(v), max(v) filter (where v <= 8) from test where v <= 10; -> MAX(V) MAX(V) FILTER (WHERE (V <= 8)) -> ------ ------------------------------ +> MAX(V) MAX(V) FILTER (WHERE V <= 8) +> ------ ---------------------------- > 10 8 > rows: 1 select max(v), max(v) filter (where v <= 8) from test; -> MAX(V) MAX(V) FILTER (WHERE (V <= 8)) -> ------ ------------------------------ +> MAX(V) MAX(V) FILTER (WHERE V <= 8) +> ------ ---------------------------- > 12 8 > rows: 1 drop table test; > ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, V INT) AS VALUES (1, 1), (2, NULL), (3, 5); +> ok + +CREATE INDEX TEST_IDX ON TEST(V NULLS LAST); +> ok + +EXPLAIN SELECT MAX(V) FROM TEST; +>> SELECT MAX("V") FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX */ /* direct lookup */ + +SELECT MAX(V) FROM TEST; +>> 5 + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT MAX(X) FROM SYSTEM_RANGE(1, 2); +>> SELECT MAX("X") FROM SYSTEM_RANGE(1, 2) /* range index */ /* direct lookup */ + +SELECT MAX(X) FROM SYSTEM_RANGE(1, 2, 0); +> exception STEP_SIZE_MUST_NOT_BE_ZERO + +SELECT MAX(X) FROM SYSTEM_RANGE(1, 2); +>> 2 + +SELECT MAX(X) FROM SYSTEM_RANGE(2, 1); +>> null + +SELECT MAX(X) FROM SYSTEM_RANGE(1, 2, -1); +>> null + +SELECT MAX(X) FROM SYSTEM_RANGE(2, 1, -1); +>> 2 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/min.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/min.sql index 45b98cf4f0..e8b4b50504 100644 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/min.sql +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/min.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -12,8 +12,8 @@ insert into test values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11), > update count: 12 select min(v), min(v) filter (where v >= 4) from test where v >= 2; -> MIN(V) MIN(V) FILTER (WHERE (V >= 4)) -> ------ ------------------------------ +> MIN(V) MIN(V) FILTER (WHERE V >= 4) +> ------ ---------------------------- > 2 4 > rows: 1 @@ -21,16 +21,55 @@ create index test_idx on test(v); > ok select min(v), min(v) filter (where v >= 4) from test where v >= 2; -> MIN(V) MIN(V) FILTER (WHERE (V >= 4)) -> ------ ------------------------------ +> MIN(V) MIN(V) FILTER (WHERE V >= 4) +> ------ ---------------------------- > 2 4 > rows: 1 select min(v), min(v) filter (where v >= 4) from test; -> MIN(V) MIN(V) FILTER (WHERE (V >= 4)) -> ------ ------------------------------ +> MIN(V) MIN(V) FILTER (WHERE V >= 4) +> ------ ---------------------------- > 1 4 > rows: 1 drop table test; > ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, V INT); +> ok + +CREATE INDEX TEST_IDX ON TEST(V NULLS FIRST); +> ok + +EXPLAIN SELECT MIN(V) FROM TEST; +>> SELECT MIN("V") FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX */ /* direct lookup */ + +SELECT MIN(V) FROM TEST; +>> null + +INSERT INTO TEST VALUES (1, 1), (2, NULL), (3, 5); +> update count: 3 + +SELECT MIN(V) FROM TEST; +>> 1 + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT MIN(X) FROM SYSTEM_RANGE(1, 2); +>> SELECT MIN("X") FROM SYSTEM_RANGE(1, 2) /* range index */ /* direct lookup */ + +SELECT MIN(X) FROM SYSTEM_RANGE(1, 2, 0); +> exception STEP_SIZE_MUST_NOT_BE_ZERO + +SELECT MIN(X) FROM SYSTEM_RANGE(1, 2); +>> 1 + +SELECT MIN(X) FROM SYSTEM_RANGE(2, 1); +>> null + +SELECT MIN(X) FROM SYSTEM_RANGE(1, 2, -1); +>> null + +SELECT MIN(X) FROM SYSTEM_RANGE(2, 1, -1); +>> 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/mode.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/mode.sql index 89acea1141..54b0dd7314 100644 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/mode.sql +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/mode.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -28,9 +28,9 @@ SELECT MODE(V), MODE() WITHIN GROUP (ORDER BY V DESC) FROM TEST; > rows: 1 SELECT MODE(V) FILTER (WHERE (V > 1)), MODE(V) FILTER (WHERE (V < 0)) FROM TEST; -> MODE() WITHIN GROUP (ORDER BY V) FILTER (WHERE (V > 1)) MODE() WITHIN GROUP (ORDER BY V) FILTER (WHERE (V < 0)) -> ------------------------------------------------------- ------------------------------------------------------- -> 2 null +> MODE() WITHIN GROUP (ORDER BY V) FILTER (WHERE V > 1) MODE() WITHIN GROUP (ORDER BY V) FILTER (WHERE V < 0) +> ----------------------------------------------------- ----------------------------------------------------- +> 2 null > rows: 1 -- Oracle compatibility diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/percentile.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/percentile.sql index dc7be8ffb0..5ac0bed4ad 100644 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/percentile.sql +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/percentile.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -609,20 +609,20 @@ select median(v) from test; delete from test; > update count: 5 -insert into test values ('2000-01-20 20:00:00+10:15'), ('2000-01-21 20:00:00-09'); +insert into test values ('2000-01-20 20:00:00+10:15:15'), ('2000-01-21 20:00:00-09'); > update count: 2 select median(v) from test; ->> 2000-01-21 08:00:30+00:37 +>> 2000-01-21 08:00:00.5+00:37:37 delete from test; > update count: 2 -insert into test values ('-2000-01-20 20:00:00+10:15'), ('-2000-01-21 20:00:00-09'); +insert into test values ('-2000-01-20 20:00:00+10:15:15'), ('-2000-01-21 20:00:00-09'); > update count: 2 select median(v) from test; ->> -2000-01-21 08:00:30+00:37 +>> -2000-01-21 08:00:00.5+00:37:37 drop table test; > ok @@ -640,16 +640,16 @@ drop table test; > ok -- with group by -create table test(name varchar, value int); +create table test(name varchar, "VALUE" int); > ok insert into test values ('Group 2A', 10), ('Group 2A', 10), ('Group 2A', 20), ('Group 1X', 40), ('Group 1X', 50), ('Group 3B', null); > update count: 6 -select name, median(value) from test group by name order by name; -> NAME MEDIAN(VALUE) -> -------- ------------- +select name, median("VALUE") from test group by name order by name; +> NAME MEDIAN("VALUE") +> -------- --------------- > Group 1X 45.0 > Group 2A 10 > Group 3B null @@ -727,8 +727,8 @@ insert into test values (10), (20), (30), (40), (50), (60), (70), (80), (90), (1 > update count: 12 select median(v), median(v) filter (where v >= 40) from test where v <= 100; -> MEDIAN(V) MEDIAN(V) FILTER (WHERE (V >= 40)) -> --------- ---------------------------------- +> MEDIAN(V) MEDIAN(V) FILTER (WHERE V >= 40) +> --------- -------------------------------- > 55.0 70 > rows: 1 @@ -736,14 +736,14 @@ create index test_idx on test(v); > ok select median(v), median(v) filter (where v >= 40) from test where v <= 100; -> MEDIAN(V) MEDIAN(V) FILTER (WHERE (V >= 40)) -> --------- ---------------------------------- +> MEDIAN(V) MEDIAN(V) FILTER (WHERE V >= 40) +> --------- -------------------------------- > 55.0 70 > rows: 1 select median(v), median(v) filter (where v >= 40) from test; -> MEDIAN(V) MEDIAN(V) FILTER (WHERE (V >= 40)) -> --------- ---------------------------------- +> MEDIAN(V) MEDIAN(V) FILTER (WHERE V >= 40) +> --------- -------------------------------- > 65.0 80 > rows: 1 @@ -770,8 +770,8 @@ select dept, median(amount) from test group by dept order by dept; > rows (ordered): 3 select dept, median(amount) filter (where amount >= 20) from test group by dept order by dept; -> DEPT MEDIAN(AMOUNT) FILTER (WHERE (AMOUNT >= 20)) -> ------ -------------------------------------------- +> DEPT MEDIAN(AMOUNT) FILTER (WHERE AMOUNT >= 20) +> ------ ------------------------------------------ > First 30 > Second 22 > Third 160.0 @@ -779,8 +779,8 @@ select dept, median(amount) filter (where amount >= 20) from test group by dept select dept, median(amount) filter (where amount >= 20) from test where (amount < 200) group by dept order by dept; -> DEPT MEDIAN(AMOUNT) FILTER (WHERE (AMOUNT >= 20)) -> ------ -------------------------------------------- +> DEPT MEDIAN(AMOUNT) FILTER (WHERE AMOUNT >= 20) +> ------ ------------------------------------------ > First 30 > Second 21.0 > Third 150 @@ -898,3 +898,19 @@ SELECT percentile_disc(v) within group (order by v) from test; drop table test; > ok + +SELECT PERCENTILE_CONT(0.1) WITHIN GROUP (ORDER BY V) FROM (VALUES TIME WITH TIME ZONE '10:30:00Z', TIME WITH TIME ZONE '15:30:00+10') T(V); +>> 15:00:00+09 + +SELECT PERCENTILE_CONT(0.7) WITHIN GROUP (ORDER BY V) FROM (VALUES TIME WITH TIME ZONE '10:00:00Z', TIME WITH TIME ZONE '12:00:00+00:00:01') T(V); +>> 11:24:00.7+00 + +SELECT PERCENTILE_CONT(0.7) WITHIN GROUP (ORDER BY V) FROM (VALUES TIME WITH TIME ZONE '23:59:59.999999999Z', TIME WITH TIME ZONE '23:59:59.999999999+00:00:01') T(V); +>> 23:59:59.299999999-00:00:01 + +SELECT PERCENTILE_CONT(0.7) WITHIN GROUP (ORDER BY V) FROM (VALUES TIME WITH TIME ZONE '00:00:00Z', TIME WITH TIME ZONE '00:00:00-00:00:01') T(V); +>> 00:00:00.3+00:00:01 + +-- null ordering has no effect, but must be allowed +SELECT PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY V NULLS LAST) FROM (VALUES NULL, 1, 3) T(V); +>> 2.0 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/rank.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/rank.sql index cde5794180..739f1b0772 100644 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/rank.sql +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/rank.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -126,3 +126,25 @@ SELECT RANK(V) WITHIN GROUP (ORDER BY V) FROM TEST; DROP TABLE TEST; > ok + +CREATE TABLE TEST(A INT, B INT, C INT); +> ok + +INSERT INTO TEST VALUES + (1, NULL, NULL), + (2, NULL, 1), + (3, 1, NULL), + (4, 1, 1), + (5, NULL, 3), + (6, 3, NULL), + (7, 3, 3); +> update count: 7 + +SELECT RANK(2, 2) WITHIN GROUP (ORDER BY B ASC NULLS FIRST, C ASC NULLS FIRST) FROM TEST; +>> 6 + +SELECT RANK(2, 2) WITHIN GROUP (ORDER BY B ASC NULLS LAST, C ASC NULLS LAST) FROM TEST; +>> 3 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_avgx.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_avgx.sql new file mode 100644 index 0000000000..421136363b --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_avgx.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT REGR_AVGX(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> REGR_AVGX(Y, X) OVER (ORDER BY R) +> --------------------------------- +> null +> null +> null +> -2.0 +> -1.5 +> 2.0 +> 4.0 +> 5.4 +> 5.666666666666667 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_avgy.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_avgy.sql new file mode 100644 index 0000000000..377e441846 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_avgy.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT REGR_AVGY(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> REGR_AVGY(Y, X) OVER (ORDER BY R) +> --------------------------------- +> null +> null +> null +> -3.0 +> -3.0 +> 1.3333333333333333 +> 3.5 +> 4.8 +> 5.833333333333333 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_count.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_count.sql new file mode 100644 index 0000000000..e8e72f1d46 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_count.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT REGR_COUNT(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> REGR_COUNT(Y, X) OVER (ORDER BY R) +> ---------------------------------- +> 0 +> 0 +> 0 +> 1 +> 2 +> 3 +> 4 +> 5 +> 6 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_intercept.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_intercept.sql new file mode 100644 index 0000000000..f1c22e3704 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_intercept.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT REGR_INTERCEPT(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> REGR_INTERCEPT(Y, X) OVER (ORDER BY R) +> -------------------------------------- +> null +> null +> null +> null +> -3.0 +> -1.1261261261261266 +> -1.1885245901639347 +> -1.2096774193548399 +> -0.6775510204081643 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_r2.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_r2.sql new file mode 100644 index 0000000000..67517a2099 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_r2.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT REGR_R2(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> REGR_R2(Y, X) OVER (ORDER BY R) +> ------------------------------- +> null +> null +> null +> null +> 1.0 +> 0.9932432432432432 +> 0.9918032786885245 +> 0.9844913151364764 +> 0.9182051244912443 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_slope.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_slope.sql new file mode 100644 index 0000000000..3f2c4688b0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_slope.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT REGR_SLOPE(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> REGR_SLOPE(Y, X) OVER (ORDER BY R) +> ---------------------------------- +> null +> null +> null +> null +> 0.0 +> 1.2297297297297298 +> 1.1721311475409837 +> 1.1129032258064517 +> 1.1489795918367347 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_sxx.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_sxx.sql new file mode 100644 index 0000000000..963dfa560f --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_sxx.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT REGR_SXX(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> REGR_SXX(Y, X) OVER (ORDER BY R) +> -------------------------------- +> null +> null +> null +> 0.0 +> 0.5 +> 74.0 +> 122.0 +> 161.2 +> 163.33333333333331 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_sxy.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_sxy.sql new file mode 100644 index 0000000000..9d6aeca260 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_sxy.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT REGR_SXY(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> REGR_SXY(Y, X) OVER (ORDER BY R) +> -------------------------------- +> null +> null +> null +> 0.0 +> 0.0 +> 91.0 +> 143.0 +> 179.4 +> 187.66666666666666 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_syy.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_syy.sql new file mode 100644 index 0000000000..9478b4f483 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_syy.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT REGR_SYY(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> REGR_SYY(Y, X) OVER (ORDER BY R) +> -------------------------------- +> null +> null +> null +> 0.0 +> 0.0 +> 112.66666666666669 +> 169.00000000000003 +> 202.80000000000004 +> 234.83333333333337 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/selectivity.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/selectivity.sql deleted file mode 100644 index 008a885806..0000000000 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/selectivity.sql +++ /dev/null @@ -1,4 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/stddev-pop.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/stddev-pop.sql deleted file mode 100644 index 008a885806..0000000000 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/stddev-pop.sql +++ /dev/null @@ -1,4 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/stddev-samp.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/stddev-samp.sql deleted file mode 100644 index 008a885806..0000000000 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/stddev-samp.sql +++ /dev/null @@ -1,4 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/stddev_pop.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/stddev_pop.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/stddev_pop.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/stddev_samp.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/stddev_samp.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/stddev_samp.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/sum.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/sum.sql index 57570242dc..f2d794076f 100644 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/sum.sql +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/sum.sql @@ -1,8 +1,20 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- +select sum(cast(x as int)) from system_range(2147483547, 2147483637); +>> 195421006872 + +select sum(x) from system_range(9223372036854775707, 9223372036854775797); +>> 839326855353784593432 + +select sum(cast(100 as tinyint)) from system_range(1, 1000); +>> 100000 + +select sum(cast(100 as smallint)) from system_range(1, 1000); +>> 100000 + -- with filter condition create table test(v int); @@ -12,8 +24,8 @@ insert into test values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11), > update count: 12 select sum(v), sum(v) filter (where v >= 4) from test where v <= 10; -> SUM(V) SUM(V) FILTER (WHERE (V >= 4)) -> ------ ------------------------------ +> SUM(V) SUM(V) FILTER (WHERE V >= 4) +> ------ ---------------------------- > 55 49 > rows: 1 @@ -21,8 +33,8 @@ create index test_idx on test(v); > ok select sum(v), sum(v) filter (where v >= 4) from test where v <= 10; -> SUM(V) SUM(V) FILTER (WHERE (V >= 4)) -> ------ ------------------------------ +> SUM(V) SUM(V) FILTER (WHERE V >= 4) +> ------ ---------------------------- > 55 49 > rows: 1 @@ -104,3 +116,117 @@ SELECT I, V, SUM(V) OVER W S, SUM(DISTINCT V) OVER W D FROM > 6 2 8 3 > 7 3 11 6 > rows: 7 + +SELECT * FROM (SELECT SUM(V) OVER (ORDER BY V ROWS BETWEEN CURRENT ROW AND CURRENT ROW) S FROM (VALUES 1, 2, 2) T(V)); +> S +> - +> 1 +> 2 +> 2 +> rows: 3 + +SELECT V, SUM(V) FILTER (WHERE V <> 1) OVER (ROWS CURRENT ROW) S FROM (VALUES 1, 2, 2) T(V); +> V S +> - ---- +> 1 null +> 2 2 +> 2 2 +> rows: 3 + +SELECT V, + SUM(V) FILTER (WHERE V <> 1) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) S, + SUM(V) FILTER (WHERE V <> 1) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) T + FROM (VALUES 1, 2, 2) T(V); +> V S T +> - - - +> 1 4 2 +> 2 4 4 +> 2 4 4 +> rows: 3 + + + +CREATE TABLE S( + B BOOLEAN, + N1 TINYINT, + N2 SMALLINT, + N4 INTEGER, + N8 BIGINT, + N NUMERIC(10, 2), + F4 REAL, + F8 DOUBLE PRECISION, + D DECFLOAT(10), + I1 INTERVAL YEAR(3), + I2 INTERVAL MONTH(3), + I3 INTERVAL DAY(3), + I4 INTERVAL HOUR(3), + I5 INTERVAL MINUTE(3), + I6 INTERVAL SECOND(2), + I7 INTERVAL YEAR(3) TO MONTH, + I8 INTERVAL DAY(3) TO HOUR, + I9 INTERVAL DAY(3) TO MINUTE, + I10 INTERVAL DAY(3) TO SECOND(2), + I11 INTERVAL HOUR(3) TO MINUTE, + I12 INTERVAL HOUR(3) TO SECOND(2), + I13 INTERVAL MINUTE(3) TO SECOND(2)); +> ok + +CREATE TABLE A AS SELECT + SUM(B) B, + SUM(N1) N1, + SUM(N2) N2, + SUM(N4) N4, + SUM(N8) N8, + SUM(N) N, + SUM(F4) F4, + SUM(F8) F8, + SUM(D) D, + SUM(I1) I1, + SUM(I2) I2, + SUM(I3) I3, + SUM(I4) I4, + SUM(I5) I5, + SUM(I6) I6, + SUM(I7) I7, + SUM(I8) I8, + SUM(I9) I9, + SUM(I10) I10, + SUM(I11) I11, + SUM(I12) I12, + SUM(I13) I13 + FROM S; +> ok + +SELECT COLUMN_NAME, DATA_TYPE_SQL('PUBLIC', 'A', 'TABLE', DTD_IDENTIFIER) TYPE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'A' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME TYPE +> ----------- -------------------------------- +> B BIGINT +> N1 BIGINT +> N2 BIGINT +> N4 BIGINT +> N8 NUMERIC(29) +> N NUMERIC(20, 2) +> F4 DOUBLE PRECISION +> F8 DECFLOAT(27) +> D DECFLOAT(20) +> I1 INTERVAL YEAR(18) +> I2 INTERVAL MONTH(18) +> I3 INTERVAL DAY(18) +> I4 INTERVAL HOUR(18) +> I5 INTERVAL MINUTE(18) +> I6 INTERVAL SECOND(18) +> I7 INTERVAL YEAR(18) TO MONTH +> I8 INTERVAL DAY(18) TO HOUR +> I9 INTERVAL DAY(18) TO MINUTE +> I10 INTERVAL DAY(18) TO SECOND(2) +> I11 INTERVAL HOUR(18) TO MINUTE +> I12 INTERVAL HOUR(18) TO SECOND(2) +> I13 INTERVAL MINUTE(18) TO SECOND(2) +> rows (ordered): 22 + +DROP TABLE S, A; +> ok + +SELECT SUM(I) FROM (VALUES INTERVAL '999999999999999999' SECOND, INTERVAL '1' SECOND) T(I); +> exception NUMERIC_VALUE_OUT_OF_RANGE_1 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/var-pop.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/var-pop.sql deleted file mode 100644 index 008a885806..0000000000 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/var-pop.sql +++ /dev/null @@ -1,4 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/var-samp.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/var-samp.sql deleted file mode 100644 index 008a885806..0000000000 --- a/h2/src/test/org/h2/test/scripts/functions/aggregate/var-samp.sql +++ /dev/null @@ -1,4 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/var_pop.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/var_pop.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/var_pop.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/var_samp.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/var_samp.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/var_samp.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/json/json_array.sql b/h2/src/test/org/h2/test/scripts/functions/json/json_array.sql new file mode 100644 index 0000000000..58d0c52988 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/json/json_array.sql @@ -0,0 +1,58 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT JSON_ARRAY(10, TRUE, 'str', NULL, '[1,2,3]' FORMAT JSON); +>> [10,true,"str",[1,2,3]] + +SELECT JSON_ARRAY(10, TRUE, 'str', NULL, '[1,2,3]' FORMAT JSON ABSENT ON NULL); +>> [10,true,"str",[1,2,3]] + +SELECT JSON_ARRAY(10, TRUE, 'str', NULL, '[1,2,3]' FORMAT JSON NULL ON NULL); +>> [10,true,"str",null,[1,2,3]] + +SELECT JSON_ARRAY(); +>> [] + +SELECT JSON_ARRAY(NULL ON NULL); +>> [] + +SELECT JSON_ARRAY(NULL ABSENT ON NULL); +>> [] + +SELECT JSON_ARRAY(NULL NULL ON NULL); +>> [null] + +CREATE TABLE TEST(ID INT, V VARCHAR); +> ok + +EXPLAIN SELECT JSON_ARRAY(V) FROM TEST; +>> SELECT JSON_ARRAY("V") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_ARRAY(V NULL ON NULL) FROM TEST; +>> SELECT JSON_ARRAY("V" NULL ON NULL) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_ARRAY(V ABSENT ON NULL) FROM TEST; +>> SELECT JSON_ARRAY("V") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_ARRAY(V FORMAT JSON ABSENT ON NULL) FROM TEST; +>> SELECT JSON_ARRAY("V" FORMAT JSON) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +INSERT INTO TEST VALUES (1, 'null'), (2, '1'), (3, null); +> update count: 3 + +SELECT JSON_ARRAY((SELECT V FROM TEST ORDER BY ID)); +>> ["null","1"] + +SELECT JSON_ARRAY((SELECT V FROM TEST ORDER BY ID) ABSENT ON NULL); +>> ["null","1"] + +SELECT JSON_ARRAY((SELECT V FROM TEST ORDER BY ID) NULL ON NULL); +>> ["null","1",null] + +SELECT JSON_ARRAY((SELECT V FROM TEST ORDER BY ID) FORMAT JSON); +>> [null,1,null] + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/json/json_object.sql b/h2/src/test/org/h2/test/scripts/functions/json/json_object.sql new file mode 100644 index 0000000000..d295f37244 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/json/json_object.sql @@ -0,0 +1,58 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT JSON_OBJECT('key1' : 10, 'key2' VALUE TRUE, KEY 'key3' VALUE 'str', 'key4' : NULL, 'key5' : '[1,2,3]' FORMAT JSON); +>> {"key1":10,"key2":true,"key3":"str","key4":null,"key5":[1,2,3]} + +SELECT JSON_OBJECT('key1' : NULL ABSENT ON NULL); +>> {} + +SELECT JSON_OBJECT('key1' : NULL NULL ON NULL); +>> {"key1":null} + +SELECT JSON_OBJECT(); +>> {} + +SELECT JSON_OBJECT(NULL ON NULL); +>> {} + +SELECT JSON_OBJECT(WITHOUT UNIQUE KEYS); +>> {} + +SELECT JSON_OBJECT('key1' : NULL, 'key1' : 2 NULL ON NULL WITHOUT UNIQUE KEYS); +>> {"key1":null,"key1":2} + +SELECT JSON_OBJECT('key1' : 1, 'key1' : 2 WITH UNIQUE KEYS); +> exception INVALID_VALUE_2 + +SELECT JSON_OBJECT('key1' : 1, 'key1' : 2 NULL ON NULL WITH UNIQUE KEYS); +> exception INVALID_VALUE_2 + +SELECT JSON_OBJECT('key1' : TRUE WITH UNIQUE KEYS); +>> {"key1":true} + +SELECT JSON_OBJECT(NULL : 1); +> exception INVALID_VALUE_2 + +CREATE TABLE TEST(V VARCHAR, ABSENT VARCHAR, WITHOUT VARCHAR); +> ok + +EXPLAIN SELECT JSON_OBJECT('name' : V NULL ON NULL WITHOUT UNIQUE KEYS) FROM TEST; +>> SELECT JSON_OBJECT('name': "V") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECT('name' : V ABSENT ON NULL WITH UNIQUE KEYS) FROM TEST; +>> SELECT JSON_OBJECT('name': "V" ABSENT ON NULL WITH UNIQUE KEYS) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECT(ABSENT : 1) FROM TEST; +>> SELECT JSON_OBJECT("ABSENT": 1) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECT(WITHOUT : 1) FROM TEST; +>> SELECT JSON_OBJECT("WITHOUT": 1) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT JSON_OBJECT(NULL ON NULL WITHOUT); +> exception SYNTAX_ERROR_2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/abs.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/abs.sql index f19ea4208c..1e49b93f5a 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/abs.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/abs.sql @@ -1,24 +1,15 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select abs(-1) r1, abs(id) r1b from test; +select abs(-1) r1, abs(1) r1b; > R1 R1B > -- --- > 1 1 > rows: 1 -select abs(sum(id)) from test; ->> 1 - -select abs(null) vn, abs(-1) r1, abs(1) r2, abs(0) r3, abs(-0.1) r4, abs(0.1) r5 from test; +select abs(null) vn, abs(-1) r1, abs(1) r2, abs(0) r3, abs(-0.1) r4, abs(0.1) r5; > VN R1 R2 R3 R4 R5 > ---- -- -- -- --- --- > null 1 1 0 0.1 0.1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/acos.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/acos.sql index 9214415230..d0f493db45 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/acos.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/acos.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select acos(null) vn, acos(-1) r1 from test; +select acos(null) vn, acos(-1) r1; > VN R1 > ---- ----------------- > null 3.141592653589793 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/asin.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/asin.sql index 2fb3ef14fb..d7fead3bf5 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/asin.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/asin.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select asin(null) vn, asin(-1) r1 from test; +select asin(null) vn, asin(-1) r1; > VN R1 > ---- ------------------- > null -1.5707963267948966 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/atan.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/atan.sql index d06266d868..e8612f1280 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/atan.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/atan.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select atan(null) vn, atan(-1) r1 from test; +select atan(null) vn, atan(-1) r1; > VN R1 > ---- ------------------- > null -0.7853981633974483 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/atan2.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/atan2.sql index dd02d6f67a..b0b117270c 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/atan2.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/atan2.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select atan2(null, null) vn, atan2(10, 1) r1 from test; +select atan2(null, null) vn, atan2(10, 1) r1; > VN R1 > ---- ------------------ > null 1.4711276743037347 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/bitand.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/bitand.sql index 9ddb8d14b8..da953e9f36 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/bitand.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/bitand.sql @@ -1,16 +1,79 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select bitand(null, 1) vn, bitand(1, null) vn1, bitand(null, null) vn2, bitand(3, 6) e2 from test; +select bitand(null, 1) vn, bitand(1, null) vn1, bitand(null, null) vn2, bitand(3, 6) e2; > VN VN1 VN2 E2 > ---- ---- ---- -- > null null null 2 > rows: 1 + +SELECT BITAND(10, 12); +>> 8 + +SELECT BITNAND(10, 12); +>> -9 + +CREATE TABLE TEST(A BIGINT, B BIGINT); +> ok + +EXPLAIN SELECT BITNOT(BITAND(A, B)), BITNOT(BITNAND(A, B)) FROM TEST; +>> SELECT BITNAND("A", "B"), BITAND("A", "B") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT + BITAND(CAST((0xC5 - 0x100) AS TINYINT), CAST(0x63 AS TINYINT)), + BITAND(CAST(0xC5 AS SMALLINT), CAST(0x63 AS SMALLINT)), + BITAND(CAST(0xC5 AS INTEGER), CAST(0x63 AS INTEGER)), + BITAND(CAST(0xC5 AS BIGINT), CAST(0x63 AS BIGINT)), + BITAND(CAST(X'C5' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITAND(CAST(X'C5' AS BINARY), CAST(X'63' AS BINARY)); +>> SELECT CAST(65 AS TINYINT), CAST(65 AS SMALLINT), 65, CAST(65 AS BIGINT), X'41', CAST(X'41' AS BINARY(1)) + +EXPLAIN SELECT + BITAND(CAST(X'C501' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITAND(CAST(X'63' AS VARBINARY), CAST(X'C501' AS VARBINARY)), + BITAND(CAST(X'C501' AS BINARY(2)), CAST(X'63' AS BINARY)), + BITAND(CAST(X'63' AS BINARY), CAST(X'C501' AS BINARY(2))); +>> SELECT X'4100', X'4100', CAST(X'4100' AS BINARY(2)), CAST(X'4100' AS BINARY(2)) + +EXPLAIN SELECT + BITAND(CAST(X'C501' AS VARBINARY), CAST(X'63' AS BINARY)), + BITAND(CAST(X'63' AS BINARY), CAST(X'C501' AS VARBINARY)); +>> SELECT CAST(X'41' AS BINARY(1)), CAST(X'41' AS BINARY(1)) + +EXPLAIN SELECT + BITNAND(CAST((0xC5 - 0x100) AS TINYINT), CAST(0x63 AS TINYINT)), + BITNAND(CAST(0xC5 AS SMALLINT), CAST(0x63 AS SMALLINT)), + BITNAND(CAST(0xC5 AS INTEGER), CAST(0x63 AS INTEGER)), + BITNAND(CAST(0xC5 AS BIGINT), CAST(0x63 AS BIGINT)), + BITNAND(CAST(X'C5' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITNAND(CAST(X'C5' AS BINARY), CAST(X'63' AS BINARY)); +>> SELECT CAST(-66 AS TINYINT), CAST(-66 AS SMALLINT), -66, CAST(-66 AS BIGINT), X'be', CAST(X'be' AS BINARY(1)) + +EXPLAIN SELECT + BITNAND(CAST(X'C501' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITNAND(CAST(X'63' AS VARBINARY), CAST(X'C501' AS VARBINARY)), + BITNAND(CAST(X'C501' AS BINARY(2)), CAST(X'63' AS BINARY)), + BITNAND(CAST(X'63' AS BINARY), CAST(X'C501' AS BINARY(2))); +>> SELECT X'beff', X'beff', CAST(X'beff' AS BINARY(2)), CAST(X'beff' AS BINARY(2)) + +EXPLAIN SELECT + BITNAND(CAST(X'C501' AS VARBINARY), CAST(X'63' AS BINARY)), + BITNAND(CAST(X'63' AS BINARY), CAST(X'C501' AS VARBINARY)); +>> SELECT CAST(X'be' AS BINARY(1)), CAST(X'be' AS BINARY(1)) + +SELECT BITAND('AA', 'BB'); +> exception INVALID_VALUE_2 + +SELECT BITAND(1, X'AA'); +> exception INVALID_VALUE_2 + +SELECT BITNAND('AA', 'BB'); +> exception INVALID_VALUE_2 + +SELECT BITNAND(1, X'AA'); +> exception INVALID_VALUE_2 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/bitcount.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/bitcount.sql new file mode 100644 index 0000000000..235b43338d --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/bitcount.sql @@ -0,0 +1,27 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT V, BITCOUNT(V) C FROM (VALUES 0, 10, -1) T(V); +> V C +> -- -- +> -1 32 +> 0 0 +> 10 2 +> rows: 3 + +EXPLAIN SELECT + BITCOUNT(CAST((0xC5 - 0x100) AS TINYINT)), + BITCOUNT(CAST(0xC5 AS SMALLINT)), + BITCOUNT(CAST(0xC5 AS INTEGER)), + BITCOUNT(CAST(0xC5 AS BIGINT)), + BITCOUNT(CAST(X'C5' AS VARBINARY)), + BITCOUNT(CAST(X'C5' AS BINARY)); +>> SELECT CAST(4 AS BIGINT), CAST(4 AS BIGINT), CAST(4 AS BIGINT), CAST(4 AS BIGINT), CAST(4 AS BIGINT), CAST(4 AS BIGINT) + +SELECT BITCOUNT(X'0123456789ABCDEF'); +>> 32 + +SELECT BITCOUNT(X'0123456789ABCDEF33'); +>> 36 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/bitget.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/bitget.sql index 008a885806..acea82167c 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/bitget.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/bitget.sql @@ -1,4 +1,30 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- + +SELECT I, + BITGET(CAST((0xC5 - 0x100) AS TINYINT), I), + BITGET(CAST(0xC5 AS SMALLINT), I), + BITGET(CAST(0xC5 AS INTEGER), I), + BITGET(CAST(0xC5 AS BIGINT), I), + BITGET(CAST(X'C5' AS VARBINARY), I), + BITGET(CAST(X'C5' AS BINARY), I) + FROM (VALUES -1, 0, 1, 4, 9, 99) T(I); +> I BITGET(-59, I) BITGET(197, I) BITGET(197, I) BITGET(197, I) BITGET(CAST(X'c5' AS BINARY VARYING), I) BITGET(X'c5', I) +> -- -------------- -------------- -------------- -------------- ---------------------------------------- ---------------- +> -1 FALSE FALSE FALSE FALSE FALSE FALSE +> 0 TRUE TRUE TRUE TRUE TRUE TRUE +> 1 FALSE FALSE FALSE FALSE FALSE FALSE +> 4 FALSE FALSE FALSE FALSE FALSE FALSE +> 9 FALSE FALSE FALSE FALSE FALSE FALSE +> 99 FALSE FALSE FALSE FALSE FALSE FALSE +> rows: 6 + +SELECT X, BITGET(X'1001', X) FROM SYSTEM_RANGE(7, 9); +> X BITGET(X'1001', X) +> - ------------------ +> 7 FALSE +> 8 TRUE +> 9 FALSE +> rows: 3 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/bitnot.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/bitnot.sql new file mode 100644 index 0000000000..d4c80c244d --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/bitnot.sql @@ -0,0 +1,31 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: Joe Littlejohn +-- + +select bitnot(null) vn, bitnot(0) v1, bitnot(10) v2, bitnot(-10) v3; +> VN V1 V2 V3 +> ---- -- --- -- +> null -1 -11 9 +> rows: 1 + +CREATE TABLE TEST(A BIGINT); +> ok + +EXPLAIN SELECT BITNOT(BITNOT(A)), BITNOT(LSHIFT(A, 1)) FROM TEST; +>> SELECT "A", BITNOT(LSHIFT("A", 1)) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT + BITNOT(CAST((0xC5 - 0x100) AS TINYINT)), + BITNOT(CAST(0xC5 AS SMALLINT)), + BITNOT(CAST(0xC5 AS INTEGER)), + BITNOT(CAST(0xC5 AS BIGINT)), + BITNOT(CAST(X'C5' AS VARBINARY)), + BITNOT(CAST(X'C5' AS BINARY)); +>> SELECT CAST(58 AS TINYINT), CAST(-198 AS SMALLINT), -198, CAST(-198 AS BIGINT), X'3a', CAST(X'3a' AS BINARY(1)) + +SELECT BITNOT('AA'); +> exception INVALID_VALUE_2 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/bitor.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/bitor.sql index 1beea007db..919484846b 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/bitor.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/bitor.sql @@ -1,16 +1,79 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select bitor(null, 1) vn, bitor(1, null) vn1, bitor(null, null) vn2, bitor(3, 6) e7 from test; +select bitor(null, 1) vn, bitor(1, null) vn1, bitor(null, null) vn2, bitor(3, 6) e7; > VN VN1 VN2 E7 > ---- ---- ---- -- > null null null 7 > rows: 1 + +SELECT BITOR(10, 12); +>> 14 + +SELECT BITNOR(10, 12); +>> -15 + +CREATE TABLE TEST(A BIGINT, B BIGINT); +> ok + +EXPLAIN SELECT BITNOT(BITOR(A, B)), BITNOT(BITNOR(A, B)) FROM TEST; +>> SELECT BITNOR("A", "B"), BITOR("A", "B") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT + BITOR(CAST((0xC5 - 0x100) AS TINYINT), CAST(0x63 AS TINYINT)), + BITOR(CAST(0xC5 AS SMALLINT), CAST(0x63 AS SMALLINT)), + BITOR(CAST(0xC5 AS INTEGER), CAST(0x63 AS INTEGER)), + BITOR(CAST(0xC5 AS BIGINT), CAST(0x63 AS BIGINT)), + BITOR(CAST(X'C5' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITOR(CAST(X'C5' AS BINARY), CAST(X'63' AS BINARY)); +>> SELECT CAST(-25 AS TINYINT), CAST(231 AS SMALLINT), 231, CAST(231 AS BIGINT), X'e7', CAST(X'e7' AS BINARY(1)) + +EXPLAIN SELECT + BITOR(CAST(X'C501' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITOR(CAST(X'63' AS VARBINARY), CAST(X'C501' AS VARBINARY)), + BITOR(CAST(X'C501' AS BINARY(2)), CAST(X'63' AS BINARY)), + BITOR(CAST(X'63' AS BINARY), CAST(X'C501' AS BINARY(2))); +>> SELECT X'e701', X'e701', CAST(X'e701' AS BINARY(2)), CAST(X'e701' AS BINARY(2)) + +EXPLAIN SELECT + BITOR(CAST(X'C501' AS VARBINARY), CAST(X'63' AS BINARY)), + BITOR(CAST(X'63' AS BINARY), CAST(X'C501' AS VARBINARY)); +>> SELECT CAST(X'e7' AS BINARY(1)), CAST(X'e7' AS BINARY(1)) + +EXPLAIN SELECT + BITNOR(CAST((0xC5 - 0x100) AS TINYINT), CAST(0x63 AS TINYINT)), + BITNOR(CAST(0xC5 AS SMALLINT), CAST(0x63 AS SMALLINT)), + BITNOR(CAST(0xC5 AS INTEGER), CAST(0x63 AS INTEGER)), + BITNOR(CAST(0xC5 AS BIGINT), CAST(0x63 AS BIGINT)), + BITNOR(CAST(X'C5' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITNOR(CAST(X'C5' AS BINARY), CAST(X'63' AS BINARY)); +>> SELECT CAST(24 AS TINYINT), CAST(-232 AS SMALLINT), -232, CAST(-232 AS BIGINT), X'18', CAST(X'18' AS BINARY(1)) + +EXPLAIN SELECT + BITNOR(CAST(X'C501' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITNOR(CAST(X'63' AS VARBINARY), CAST(X'C501' AS VARBINARY)), + BITNOR(CAST(X'C501' AS BINARY(2)), CAST(X'63' AS BINARY)), + BITNOR(CAST(X'63' AS BINARY), CAST(X'C501' AS BINARY(2))); +>> SELECT X'18fe', X'18fe', CAST(X'18fe' AS BINARY(2)), CAST(X'18fe' AS BINARY(2)) + +EXPLAIN SELECT + BITNOR(CAST(X'C501' AS VARBINARY), CAST(X'63' AS BINARY)), + BITNOR(CAST(X'63' AS BINARY), CAST(X'C501' AS VARBINARY)); +>> SELECT CAST(X'18' AS BINARY(1)), CAST(X'18' AS BINARY(1)) + +SELECT BITOR('AA', 'BB'); +> exception INVALID_VALUE_2 + +SELECT BITOR(1, X'AA'); +> exception INVALID_VALUE_2 + +SELECT BITNOR('AA', 'BB'); +> exception INVALID_VALUE_2 + +SELECT BITNOR(1, X'AA'); +> exception INVALID_VALUE_2 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/bitxor.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/bitxor.sql index b94eedef7c..a26692f7a3 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/bitxor.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/bitxor.sql @@ -1,16 +1,79 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select bitxor(null, 1) vn, bitxor(1, null) vn1, bitxor(null, null) vn2, bitxor(3, 6) e5 from test; +select bitxor(null, 1) vn, bitxor(1, null) vn1, bitxor(null, null) vn2, bitxor(3, 6) e5; > VN VN1 VN2 E5 > ---- ---- ---- -- > null null null 5 > rows: 1 + +SELECT BITXOR(10, 12); +>> 6 + +SELECT BITXNOR(10, 12); +>> -7 + +CREATE TABLE TEST(A BIGINT, B BIGINT); +> ok + +EXPLAIN SELECT BITNOT(BITXOR(A, B)), BITNOT(BITXNOR(A, B)) FROM TEST; +>> SELECT BITXNOR("A", "B"), BITXOR("A", "B") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT + BITXOR(CAST((0xC5 - 0x100) AS TINYINT), CAST(0x63 AS TINYINT)), + BITXOR(CAST(0xC5 AS SMALLINT), CAST(0x63 AS SMALLINT)), + BITXOR(CAST(0xC5 AS INTEGER), CAST(0x63 AS INTEGER)), + BITXOR(CAST(0xC5 AS BIGINT), CAST(0x63 AS BIGINT)), + BITXOR(CAST(X'C5' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITXOR(CAST(X'C5' AS BINARY), CAST(X'63' AS BINARY)); +>> SELECT CAST(-90 AS TINYINT), CAST(166 AS SMALLINT), 166, CAST(166 AS BIGINT), X'a6', CAST(X'a6' AS BINARY(1)) + +EXPLAIN SELECT + BITXOR(CAST(X'C501' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITXOR(CAST(X'63' AS VARBINARY), CAST(X'C501' AS VARBINARY)), + BITXOR(CAST(X'C501' AS BINARY(2)), CAST(X'63' AS BINARY)), + BITXOR(CAST(X'63' AS BINARY), CAST(X'C501' AS BINARY(2))); +>> SELECT X'a601', X'a601', CAST(X'a601' AS BINARY(2)), CAST(X'a601' AS BINARY(2)) + +EXPLAIN SELECT + BITXOR(CAST(X'C501' AS VARBINARY), CAST(X'63' AS BINARY)), + BITXOR(CAST(X'63' AS BINARY), CAST(X'C501' AS VARBINARY)); +>> SELECT CAST(X'a6' AS BINARY(1)), CAST(X'a6' AS BINARY(1)) + +EXPLAIN SELECT + BITXNOR(CAST((0xC5 - 0x100) AS TINYINT), CAST(0x63 AS TINYINT)), + BITXNOR(CAST(0xC5 AS SMALLINT), CAST(0x63 AS SMALLINT)), + BITXNOR(CAST(0xC5 AS INTEGER), CAST(0x63 AS INTEGER)), + BITXNOR(CAST(0xC5 AS BIGINT), CAST(0x63 AS BIGINT)), + BITXNOR(CAST(X'C5' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITXNOR(CAST(X'C5' AS BINARY), CAST(X'63' AS BINARY)); +>> SELECT CAST(89 AS TINYINT), CAST(-167 AS SMALLINT), -167, CAST(-167 AS BIGINT), X'59', CAST(X'59' AS BINARY(1)) + +EXPLAIN SELECT + BITXNOR(CAST(X'C501' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITXNOR(CAST(X'63' AS VARBINARY), CAST(X'C501' AS VARBINARY)), + BITXNOR(CAST(X'C501' AS BINARY(2)), CAST(X'63' AS BINARY)), + BITXNOR(CAST(X'63' AS BINARY), CAST(X'C501' AS BINARY(2))); +>> SELECT X'59fe', X'59fe', CAST(X'59fe' AS BINARY(2)), CAST(X'59fe' AS BINARY(2)) + +EXPLAIN SELECT + BITXNOR(CAST(X'C501' AS VARBINARY), CAST(X'63' AS BINARY)), + BITXNOR(CAST(X'63' AS BINARY), CAST(X'C501' AS VARBINARY)); +>> SELECT CAST(X'59' AS BINARY(1)), CAST(X'59' AS BINARY(1)) + +SELECT BITXOR('AA', 'BB'); +> exception INVALID_VALUE_2 + +SELECT BITXOR(1, X'AA'); +> exception INVALID_VALUE_2 + +SELECT BITXNOR('AA', 'BB'); +> exception INVALID_VALUE_2 + +SELECT BITXNOR(1, X'AA'); +> exception INVALID_VALUE_2 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/ceil.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/ceil.sql index 20033f9d9c..7bcb48fa03 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/ceil.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/ceil.sql @@ -1,16 +1,46 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); +select ceil(null) vn, ceil(1) v1, ceiling(1.1) v2, ceil(-1.1) v3, ceiling(1.9) v4, ceiling(-1.9) v5; +> VN V1 V2 V3 V4 V5 +> ---- -- -- -- -- -- +> null 1 2 -1 2 -1 +> rows: 1 + +SELECT CEIL(1.5), CEIL(-1.5), CEIL(1.5) IS OF (NUMERIC); +> 2 -1 TRUE +> - -- ---- +> 2 -1 TRUE +> rows: 1 + +SELECT CEIL(1.5::DOUBLE), CEIL(-1.5::DOUBLE), CEIL(1.5::DOUBLE) IS OF (DOUBLE); +> 2.0 -1.0 TRUE +> --- ---- ---- +> 2.0 -1.0 TRUE +> rows: 1 + +SELECT CEIL(1.5::REAL), CEIL(-1.5::REAL), CEIL(1.5::REAL) IS OF (REAL); +> 2.0 -1.0 TRUE +> --- ---- ---- +> 2.0 -1.0 TRUE +> rows: 1 + +SELECT CEIL('a'); +> exception INVALID_VALUE_2 + +CREATE TABLE S(N NUMERIC(5, 2)); > ok -insert into test values(1, 'Hello'); -> update count: 1 +CREATE TABLE T AS SELECT CEIL(N) C FROM S; +> ok -select ceil(null) vn, ceil(1) v1, ceiling(1.1) v2, ceil(-1.1) v3, ceiling(1.9) v4, ceiling(-1.9) v5 from test; -> VN V1 V2 V3 V4 V5 -> ---- --- --- ---- --- ---- -> null 1.0 2.0 -1.0 2.0 -1.0 +SELECT DATA_TYPE, NUMERIC_PRECISION, NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'T'; +> DATA_TYPE NUMERIC_PRECISION NUMERIC_SCALE +> --------- ----------------- ------------- +> NUMERIC 4 0 > rows: 1 + +DROP TABLE S, T; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/compress.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/compress.sql index 008a885806..7b0ef7bff1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/compress.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/compress.sql @@ -1,4 +1,25 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- + +CALL COMPRESS(X'000000000000000000000000'); +>> X'010c010000c000010000' + +CALL COMPRESS(X'000000000000000000000000', 'NO'); +>> X'000c000000000000000000000000' + +CALL COMPRESS(X'000000000000000000000000', 'LZF'); +>> X'010c010000c000010000' + +CALL COMPRESS(X'000000000000000000000000', 'DEFLATE'); +>> X'020c789c6360400000000c0001' + +CALL COMPRESS(X'000000000000000000000000', 'UNKNOWN'); +> exception UNSUPPORTED_COMPRESSION_ALGORITHM_1 + +CALL COMPRESS(NULL); +>> null + +CALL COMPRESS(X'00', NULL); +>> null diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/cos.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/cos.sql index 3876f18c30..fe649580c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/cos.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/cos.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select cos(null) vn, cos(-1) r1 from test; +select cos(null) vn, cos(-1) r1; > VN R1 > ---- ------------------ > null 0.5403023058681398 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/cosh.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/cosh.sql index 008a885806..0b7b614aab 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/cosh.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/cosh.sql @@ -1,4 +1,10 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- + +CALL COSH(1); +>> 1.543080634815244 + +CALL COSH(50); +>> 2.592352764293536E21 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/cot.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/cot.sql index 97b40f23c2..74963e24b5 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/cot.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/cot.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select cot(null) vn, cot(-1) r1 from test; +select cot(null) vn, cot(-1) r1; > VN R1 > ---- ------------------- > null -0.6420926159343306 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/decrypt.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/decrypt.sql index 1f780a25c8..b9eeb8fef9 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/decrypt.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/decrypt.sql @@ -1,9 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -call utf8tostring(decrypt('AES', '00000000000000000000000000000000', 'dbd42d55d4b923c4b03eba0396fac98e')); +call utf8tostring(decrypt('AES', X'00000000000000000000000000000000', X'dbd42d55d4b923c4b03eba0396fac98e')); >> Hello World Test call utf8tostring(decrypt('AES', hash('sha256', stringtoutf8('Hello'), 1000), encrypt('AES', hash('sha256', stringtoutf8('Hello'), 1000), stringtoutf8('Hello World Test')))); diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/degrees.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/degrees.sql index 20581b534a..4b4a130769 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/degrees.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/degrees.sql @@ -1,19 +1,13 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -- Truncate least significant digits because implementations returns slightly -- different results depending on Java version select degrees(null) vn, truncate(degrees(1), 10) v1, truncate(degrees(1.1), 10) v2, truncate(degrees(-1.1), 10) v3, truncate(degrees(1.9), 10) v4, - truncate(degrees(-1.9), 10) v5 from test; + truncate(degrees(-1.9), 10) v5; > VN V1 V2 V3 V4 V5 > ---- ------------ ------------- -------------- -------------- --------------- > null 57.295779513 63.0253574643 -63.0253574643 108.8619810748 -108.8619810748 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/encrypt.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/encrypt.sql index 786b2c5578..00dff40c67 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/encrypt.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/encrypt.sql @@ -1,13 +1,13 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -call encrypt('AES', '00000000000000000000000000000000', stringtoutf8('Hello World Test')); ->> dbd42d55d4b923c4b03eba0396fac98e +call encrypt('AES', X'00000000000000000000000000000000', stringtoutf8('Hello World Test')); +>> X'dbd42d55d4b923c4b03eba0396fac98e' -CALL ENCRYPT('XTEA', '00', STRINGTOUTF8('Test')); ->> 8bc9a4601b3062692a72a5941072425f +CALL ENCRYPT('XTEA', X'00', STRINGTOUTF8('Test')); +>> X'8bc9a4601b3062692a72a5941072425f' -call encrypt('XTEA', '000102030405060708090a0b0c0d0e0f', '4142434445464748'); ->> dea0b0b40966b0669fbae58ab503765f +call encrypt('XTEA', X'000102030405060708090a0b0c0d0e0f', X'4142434445464748'); +>> X'dea0b0b40966b0669fbae58ab503765f' diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/exp.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/exp.sql index ea67c3f8c6..365c31828d 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/exp.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/exp.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select exp(null) vn, left(exp(1), 4) v1, left(exp(1.1), 4) v2, left(exp(-1.1), 4) v3, left(exp(1.9), 4) v4, left(exp(-1.9), 4) v5 from test; +select exp(null) vn, left(exp(1), 4) v1, left(exp(1.1), 4) v2, left(exp(-1.1), 4) v3, left(exp(1.9), 4) v4, left(exp(-1.9), 4) v5; > VN V1 V2 V3 V4 V5 > ---- ---- ---- ---- ---- ---- > null 2.71 3.00 0.33 6.68 0.14 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/expand.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/expand.sql index 008a885806..2b8416c2a6 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/expand.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/expand.sql @@ -1,4 +1,19 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- + +CALL EXPAND(X'000c000000000000000000000000'); +>> X'000000000000000000000000' + +CALL EXPAND(X'010c010000c000010000'); +>> X'000000000000000000000000' + +CALL EXPAND(X'020c789c6360400000000c0001'); +>> X'000000000000000000000000' + +CALL EXPAND(X''); +> exception COMPRESSION_ERROR + +CALL EXPAND(NULL); +>> null diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/floor.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/floor.sql index 9145d09487..c9e17ef349 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/floor.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/floor.sql @@ -1,16 +1,43 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); +select floor(null) vn, floor(1) v1, floor(1.1) v2, floor(-1.1) v3, floor(1.9) v4, floor(-1.9) v5; +> VN V1 V2 V3 V4 V5 +> ---- -- -- -- -- -- +> null 1 1 -2 1 -2 +> rows: 1 + +SELECT FLOOR(1.5), FLOOR(-1.5), FLOOR(1.5) IS OF (NUMERIC); +> 1 -2 TRUE +> - -- ---- +> 1 -2 TRUE +> rows: 1 + +SELECT FLOOR(1.5::DOUBLE), FLOOR(-1.5::DOUBLE), FLOOR(1.5::DOUBLE) IS OF (DOUBLE); +> 1.0 -2.0 TRUE +> --- ---- ---- +> 1.0 -2.0 TRUE +> rows: 1 + +SELECT FLOOR(1.5::REAL), FLOOR(-1.5::REAL), FLOOR(1.5::REAL) IS OF (REAL); +> 1.0 -2.0 TRUE +> --- ---- ---- +> 1.0 -2.0 TRUE +> rows: 1 + +CREATE TABLE S(N NUMERIC(5, 2)); > ok -insert into test values(1, 'Hello'); -> update count: 1 +CREATE TABLE T AS SELECT FLOOR(N) F FROM S; +> ok -select floor(null) vn, floor(1) v1, floor(1.1) v2, floor(-1.1) v3, floor(1.9) v4, floor(-1.9) v5 from test; -> VN V1 V2 V3 V4 V5 -> ---- --- --- ---- --- ---- -> null 1.0 1.0 -2.0 1.0 -2.0 +SELECT DATA_TYPE, NUMERIC_PRECISION, NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'T'; +> DATA_TYPE NUMERIC_PRECISION NUMERIC_SCALE +> --------- ----------------- ------------- +> NUMERIC 4 0 > rows: 1 + +DROP TABLE S, T; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/hash.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/hash.sql index cc1184cd83..466d38225e 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/hash.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/hash.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -7,19 +7,79 @@ call hash('SHA256', 'Hello', 0); > exception INVALID_VALUE_2 call hash('SHA256', 'Hello'); ->> 185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969 +>> X'185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969' call hash('SHA256', 'Hello', 1); ->> 185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969 +>> X'185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969' call hash('SHA256', stringtoutf8('Hello'), 1); ->> 185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969 +>> X'185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969' CALL HASH('SHA256', 'Password', 1000); ->> c644a176ce920bde361ac336089b06cc2f1514dfa95ba5aabfe33f9a22d577f0 +>> X'c644a176ce920bde361ac336089b06cc2f1514dfa95ba5aabfe33f9a22d577f0' CALL HASH('SHA256', STRINGTOUTF8('Password'), 1000); ->> c644a176ce920bde361ac336089b06cc2f1514dfa95ba5aabfe33f9a22d577f0 +>> X'c644a176ce920bde361ac336089b06cc2f1514dfa95ba5aabfe33f9a22d577f0' call hash('unknown', 'Hello', 1); > exception INVALID_VALUE_2 + +CALL HASH('MD5', '****** Message digest test ******', 1); +>> X'ccd7ee53b52575b5b04fcadf1637fd30' + +CALL HASH('MD5', '****** Message digest test ******', 10); +>> X'b9e4b74ee3c41f646ee0ba42335efe20' + +CALL HASH('SHA-1', '****** Message digest test ******', 1); +>> X'b9f28134b8c9aef59e1257eca89e3e5101234694' + +CALL HASH('SHA-1', '****** Message digest test ******', 10); +>> X'e69a31beb996b59700aed3e6fbf9c29791efbc15' + +CALL HASH('SHA-224', '****** Message digest test ******', 1); +>> X'7bd9bf319961cfdb7fc9351debbcc8a80143d5d0909e8cbccd8b5f0f' + +CALL HASH('SHA-224', '****** Message digest test ******', 10); +>> X'6685a394158763e754332f0adec3ed43866dd0ba8f47624d0521fd1e' + +CALL HASH('SHA-256', '****** Message digest test ******', 1); +>> X'4e732bc9788b0958022403dbe42b4b79bfa270f05fbe914b4ecca074635f3f5c' + +CALL HASH('SHA-256', '****** Message digest test ******', 10); +>> X'93731025337904f6bc117ca5d3adc960ee2070c7a9666a5499af28546520da85' + +CALL HASH('SHA-384', '****** Message digest test ******', 1); +>> X'a37baa07c0cd5bc8dbb510b3fc3fa6f5ca539c847d8ee382d1d045b405a3d43dc4a898fcc31930cf7a80e2a79af82d4e' + +CALL HASH('SHA-384', '****** Message digest test ******', 10); +>> X'03cc3a769871ab13a64c387c44853efafe016180ab6ea70565924ccabe62c8884b2f2e1a53c1a79db184c112c9082bc2' + +CALL HASH('SHA-512', '****** Message digest test ******', 1); +>> X'88eb2488557eaf7e4da394b6f4ba08d4c781b9f2b9c9d150195ac7f7fbee7819923476b5139abc98f252b07649ade2471be46e2625b8003d0af5a8a50ca2915f' + +CALL HASH('SHA-512', '****** Message digest test ******', 10); +>> X'ab3bb7d9447f87a07379e9219c79da2e05122ff87bf25a5e553a7e44af7ac724ed91fb1fe5730d4bb584c367fc2232680f5c45b3863c6550fcf27b4473d05695' + +CALL HASH('SHA3-224', '****** Message digest test ******', 1); +>> X'cb91fec022d97ed63622d382e36e336b65a806888416a549fb4db390' + +CALL HASH('SHA3-224', '****** Message digest test ******', 10); +>> X'0d4dd581ed9b188341ec413988cb7c6bf15d178b151b543c91031ae6' + +CALL HASH('SHA3-256', '****** Message digest test ******', 1); +>> X'91db71f65f3c5b19370e0d9fd947da52695b28c9b440a1324d11e8076643c21f' + +CALL HASH('SHA3-256', '****** Message digest test ******', 10); +>> X'ed62484d8ac54550292241698dd5480de061fc23ab12e3e941a96ec7d3afd70f' + +CALL HASH('SHA3-384', '****** Message digest test ******', 1); +>> X'c2d5e516ea10a82a3d3a8c5fe8838ca77d402490f33ef813be9af168fd2cdf8f6daa7e9cf79565f3987f897d4087ce26' + +CALL HASH('SHA3-384', '****** Message digest test ******', 10); +>> X'9f5ac0eae232746826ea59196b455267e3aaa492047d5a2616c4a8aa325216f706dc7203fcbe71ee7e3357e0f3d93ee3' + +CALL HASH('SHA3-512', '****** Message digest test ******', 1); +>> X'08811cf7409957b59bb5ba090edbef9a35c3b7a4db5d5760f15f2b14453f9cacba30b9744d4248c742aa47f3d9943cf99e7d78d1700d4ccf5bc88b394bc00603' + +CALL HASH('SHA3-512', '****** Message digest test ******', 10); +>> X'37f2a9dbc6cd7a5122cc84383843566dd7195ed8d868b1c10aca2b706667c7bb0b4f00eab81d9e87b6f355e3afe0bccd57ba04aa121d0ef0c0bdea2ff8f95513' diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/length.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/length.sql index 6d68048476..67b65727dc 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/length.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/length.sql @@ -1,40 +1,34 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select bit_length(null) en, bit_length('') e0, bit_length('ab') e32 from test; +select bit_length(null) en, bit_length('') e0, bit_length('ab') e32; > EN E0 E32 > ---- -- --- -> null 0 32 +> null 0 16 > rows: 1 -select length(null) en, length('') e0, length('ab') e2 from test; +select length(null) en, length('') e0, length('ab') e2; > EN E0 E2 > ---- -- -- > null 0 2 > rows: 1 -select char_length(null) en, char_length('') e0, char_length('ab') e2 from test; +select char_length(null) en, char_length('') e0, char_length('ab') e2; > EN E0 E2 > ---- -- -- > null 0 2 > rows: 1 -select character_length(null) en, character_length('') e0, character_length('ab') e2 from test; +select character_length(null) en, character_length('') e0, character_length('ab') e2; > EN E0 E2 > ---- -- -- > null 0 2 > rows: 1 -select octet_length(null) en, octet_length('') e0, octet_length('ab') e4 from test; +select octet_length(null) en, octet_length('') e0, octet_length('ab') e4; > EN E0 E4 > ---- -- -- -> null 0 4 +> null 0 2 > rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/log.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/log.sql index 25c9f5532a..baf60a6c76 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/log.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/log.sql @@ -1,28 +1,100 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok +SELECT LN(NULL), LOG(NULL, NULL), LOG(NULL, 2); +> CAST(NULL AS DOUBLE PRECISION) CAST(NULL AS DOUBLE PRECISION) CAST(NULL AS DOUBLE PRECISION) +> ------------------------------ ------------------------------ ------------------------------ +> null null null +> rows: 1 + +SELECT LOG(2, NULL), LOG10(NULL), LOG(NULL); +> CAST(NULL AS DOUBLE PRECISION) CAST(NULL AS DOUBLE PRECISION) CAST(NULL AS DOUBLE PRECISION) +> ------------------------------ ------------------------------ ------------------------------ +> null null null +> rows: 1 + +SELECT LN(0); +> exception INVALID_VALUE_2 + +SELECT LN(-1); +> exception INVALID_VALUE_2 + +SELECT LOG(0, 2); +> exception INVALID_VALUE_2 + +SELECT LOG(-1, 2); +> exception INVALID_VALUE_2 + +SELECT LOG(1, 2); +> exception INVALID_VALUE_2 + +SELECT LOG(2, 0); +> exception INVALID_VALUE_2 + +SELECT LOG(2, -1); +> exception INVALID_VALUE_2 + +SELECT LOG(0); +> exception INVALID_VALUE_2 + +SELECT LOG(-1); +> exception INVALID_VALUE_2 + +SELECT LOG10(0); +> exception INVALID_VALUE_2 -insert into test values(1, 'Hello'); -> update count: 1 +SELECT LOG10(-1); +> exception INVALID_VALUE_2 -select log(null) vn, log(1) v1, ln(1.1) v2, log(-1.1) v3, log(1.9) v4, log(-1.9) v5 from test; -> VN V1 V2 V3 V4 V5 -> ---- --- ------------------- --- ------------------ --- -> null 0.0 0.09531017980432493 NaN 0.6418538861723947 NaN +SELECT LN(0.5) VH, LN(1) V1, LN(2) V2, LN(3) V3, LN(10) V10; +> VH V1 V2 V3 V10 +> ------------------- --- ------------------ ------------------ ----------------- +> -0.6931471805599453 0.0 0.6931471805599453 1.0986122886681098 2.302585092994046 > rows: 1 -select log10(null) vn, log10(0) v1, log10(10) v2, log10(0.0001) v3, log10(1000000) v4, log10(1) v5 from test; -> VN V1 V2 V3 V4 V5 -> ---- --------- --- ---- --- --- -> null -Infinity 1.0 -4.0 6.0 0.0 +SELECT LOG(2, 0.5) VH, LOG(2, 1) V1, LOG(2, 2) V2, LOG(2, 3) V3, LOG(2, 10) V10, LOG(2, 64) V64; +> VH V1 V2 V3 V10 V64 +> ---- --- --- ------------------ ------------------ --- +> -1.0 0.0 1.0 1.5849625007211563 3.3219280948873626 6.0 > rows: 1 -select log(null) vn, log(1) v1, log(1.1) v2, log(-1.1) v3, log(1.9) v4, log(-1.9) v5 from test; -> VN V1 V2 V3 V4 V5 -> ---- --- ------------------- --- ------------------ --- -> null 0.0 0.09531017980432493 NaN 0.6418538861723947 NaN +SELECT LOG(2.7182818284590452, 10); +>> 2.302585092994046 + +SELECT LOG(10, 3); +>> 0.47712125471966244 + +SELECT LOG(0.5) VH, LOG(1) V1, LOG(2) V2, LOG(3) V3, LOG(10) V10; +> VH V1 V2 V3 V10 +> ------------------- --- ------------------ ------------------ ----------------- +> -0.6931471805599453 0.0 0.6931471805599453 1.0986122886681098 2.302585092994046 +> rows: 1 + +SELECT LOG10(0.5) VH, LOG10(1) V1, LOG10(2) V2, LOG10(3) V3, LOG10(10) V10, LOG10(100) V100; +> VH V1 V2 V3 V10 V100 +> ------------------- --- ------------------ ------------------- --- ---- +> -0.3010299956639812 0.0 0.3010299956639812 0.47712125471966244 1.0 2.0 +> rows: 1 + +SET MODE PostgreSQL; +> ok + +SELECT LOG(0.5) VH, LOG(1) V1, LOG(2) V2, LOG(3) V3, LOG(10) V10, LOG(100) V100; +> VH V1 V2 V3 V10 V100 +> ------------------- --- ------------------ ------------------- --- ---- +> -0.3010299956639812 0.0 0.3010299956639812 0.47712125471966244 1.0 2.0 > rows: 1 + +SET MODE MSSQLServer; +> ok + +SELECT LOG(0.5, 2) VH, LOG(1, 2) V1, LOG(2, 2) V2, LOG(3, 2) V3, LOG(10, 2) V10, LOG(64, 2) V64; +> VH V1 V2 V3 V10 V64 +> ---- --- --- ------------------ ------------------ --- +> -1.0 0.0 1.0 1.5849625007211563 3.3219280948873626 6.0 +> rows: 1 + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/lshift.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/lshift.sql new file mode 100644 index 0000000000..7bb7e44e06 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/lshift.sql @@ -0,0 +1,109 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select lshift(null, 1) vn, lshift(1, null) vn1, lshift(null, null) vn2, lshift(3, 6) v1, lshift(3,0) v2; +> VN VN1 VN2 V1 V2 +> ---- ---- ---- --- -- +> null null null 192 3 +> rows: 1 + +SELECT I, + LSHIFT(CAST(-128 AS TINYINT), I), LSHIFT(CAST(1 AS TINYINT), I), + ULSHIFT(CAST(-128 AS TINYINT), I), ULSHIFT(CAST(1 AS TINYINT), I) + FROM + (VALUES -111, -8, -7, -1, 0, 1, 7, 8, 111) T(I) ORDER BY I; +> I LSHIFT(-128, I) LSHIFT(1, I) ULSHIFT(-128, I) ULSHIFT(1, I) +> ---- --------------- ------------ ---------------- ------------- +> -111 -1 0 0 0 +> -8 -1 0 0 0 +> -7 -1 0 1 0 +> -1 -64 0 64 0 +> 0 -128 1 -128 1 +> 1 0 2 0 2 +> 7 0 -128 0 -128 +> 8 0 0 0 0 +> 111 0 0 0 0 +> rows (ordered): 9 + +SELECT I, + LSHIFT(CAST(-32768 AS SMALLINT), I), LSHIFT(CAST(1 AS SMALLINT), I), + ULSHIFT(CAST(-32768 AS SMALLINT), I), ULSHIFT(CAST(1 AS SMALLINT), I) + FROM + (VALUES -111, -16, -15, -1, 0, 1, 15, 16, 111) T(I) ORDER BY I; +> I LSHIFT(-32768, I) LSHIFT(1, I) ULSHIFT(-32768, I) ULSHIFT(1, I) +> ---- ----------------- ------------ ------------------ ------------- +> -111 -1 0 0 0 +> -16 -1 0 0 0 +> -15 -1 0 1 0 +> -1 -16384 0 16384 0 +> 0 -32768 1 -32768 1 +> 1 0 2 0 2 +> 15 0 -32768 0 -32768 +> 16 0 0 0 0 +> 111 0 0 0 0 +> rows (ordered): 9 + +SELECT I, + LSHIFT(CAST(-2147483648 AS INTEGER), I), LSHIFT(CAST(1 AS INTEGER), I), + ULSHIFT(CAST(-2147483648 AS INTEGER), I), ULSHIFT(CAST(1 AS INTEGER), I) + FROM + (VALUES -111, -32, -31, -1, 0, 1, 31, 32, 111) T(I) ORDER BY I; +> I LSHIFT(-2147483648, I) LSHIFT(1, I) ULSHIFT(-2147483648, I) ULSHIFT(1, I) +> ---- ---------------------- ------------ ----------------------- ------------- +> -111 -1 0 0 0 +> -32 -1 0 0 0 +> -31 -1 0 1 0 +> -1 -1073741824 0 1073741824 0 +> 0 -2147483648 1 -2147483648 1 +> 1 0 2 0 2 +> 31 0 -2147483648 0 -2147483648 +> 32 0 0 0 0 +> 111 0 0 0 0 +> rows (ordered): 9 + +SELECT I, + LSHIFT(CAST(-9223372036854775808 AS BIGINT), I), LSHIFT(CAST(1 AS BIGINT), I), + ULSHIFT(CAST(-9223372036854775808 AS BIGINT), I), ULSHIFT(CAST(1 AS BIGINT), I) + FROM + (VALUES -111, -64, -63, -1, 0, 1, 63, 64, 111) T(I) ORDER BY I; +> I LSHIFT(-9223372036854775808, I) LSHIFT(1, I) ULSHIFT(-9223372036854775808, I) ULSHIFT(1, I) +> ---- ------------------------------- -------------------- -------------------------------- -------------------- +> -111 -1 0 0 0 +> -64 -1 0 0 0 +> -63 -1 0 1 0 +> -1 -4611686018427387904 0 4611686018427387904 0 +> 0 -9223372036854775808 1 -9223372036854775808 1 +> 1 0 2 0 2 +> 63 0 -9223372036854775808 0 -9223372036854775808 +> 64 0 0 0 0 +> 111 0 0 0 0 +> rows (ordered): 9 + +SELECT LSHIFT(X'', 1); +>> X'' + +SELECT LSHIFT(CAST(X'02' AS BINARY), 1); +>> X'04' + +SELECT I, LSHIFT(X'80ABCD09', I) FROM + (VALUES -33, -32, -31, -17, -16, -15, -1, 0, 1, 15, 16, 17, 31, 32, 33) T(I) ORDER BY I; +> I LSHIFT(X'80abcd09', I) +> --- ---------------------- +> -33 X'00000000' +> -32 X'00000000' +> -31 X'00000001' +> -17 X'00004055' +> -16 X'000080ab' +> -15 X'00010157' +> -1 X'4055e684' +> 0 X'80abcd09' +> 1 X'01579a12' +> 15 X'e6848000' +> 16 X'cd090000' +> 17 X'9a120000' +> 31 X'80000000' +> 32 X'00000000' +> 33 X'00000000' +> rows (ordered): 15 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/mod.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/mod.sql index d7a610669b..5d0b3e7312 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/mod.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/mod.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select mod(null, 1) vn, mod(1, null) vn1, mod(null, null) vn2, mod(10, 2) e1 from test; +select mod(null, 1) vn, mod(1, null) vn1, mod(null, null) vn2, mod(10, 2) e1; > VN VN1 VN2 E1 > ---- ---- ---- -- > null null null 0 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/ora-hash.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/ora-hash.sql index dc321f4737..6df772c987 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/ora-hash.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/ora-hash.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -42,10 +42,10 @@ SELECT ORA_HASH(1, 4294967295, 4294967295); SELECT ORA_HASH(1, 4294967295, 4294967296); > exception INVALID_VALUE_2 -CREATE TABLE TEST(I BINARY, B BLOB, S VARCHAR, C CLOB); +CREATE TABLE TEST(I BINARY(3), B BLOB, S VARCHAR, C CLOB); > ok -INSERT INTO TEST VALUES ('010203', '010203', 'abc', 'abc'); +INSERT INTO TEST VALUES (X'010203', X'010203', 'abc', 'abc'); > update count: 1 SELECT ORA_HASH(I) FROM TEST; diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/pi.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/pi.sql index e9d305ba23..0c283cbb3b 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/pi.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/pi.sql @@ -1,13 +1,7 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select pi() from test; +select pi(); >> 3.141592653589793 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/power.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/power.sql index ca927bf7f2..3dd455f940 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/power.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/power.sql @@ -1,16 +1,13 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select power(null, null) en, power(2, 3) e8, power(16, 0.5) e4 from test; +select power(null, null) en, power(2, 3) e8, power(16, 0.5) e4; > EN E8 E4 > ---- --- --- > null 8.0 4.0 > rows: 1 + +SELECT POWER(10, 2) IS OF (DOUBLE); +>> TRUE diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/radians.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/radians.sql index d8afeb05bd..f22f4933bd 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/radians.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/radians.sql @@ -1,20 +1,17 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -- Truncate least significant digits because implementations returns slightly -- different results depending on Java version select radians(null) vn, truncate(radians(1), 10) v1, truncate(radians(1.1), 10) v2, truncate(radians(-1.1), 10) v3, truncate(radians(1.9), 10) v4, - truncate(radians(-1.9), 10) v5 from test; + truncate(radians(-1.9), 10) v5; > VN V1 V2 V3 V4 V5 > ---- ------------ ------------ ------------- ------------ ------------- > null 0.0174532925 0.0191986217 -0.0191986217 0.0331612557 -0.0331612557 > rows: 1 + +SELECT RADIANS(0) IS OF (DOUBLE); +>> TRUE diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/rand.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/rand.sql index 67b66a32d4..1d6c29b6d6 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/rand.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/rand.sql @@ -1,21 +1,15 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - @reconnect off -select rand(1) e, random() f from test; +select rand(1) e, random() f; > E F > ------------------ ------------------- > 0.7308781907032909 0.41008081149220166 > rows: 1 -select rand() from test; +select rand(); >> 0.20771484130971707 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/random-uuid.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/random-uuid.sql index 5b232878b1..33a8bbe6aa 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/random-uuid.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/random-uuid.sql @@ -1,9 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -SELECT LENGTH(CAST(RANDOM_UUID() AS VARCHAR)); +SELECT CHAR_LENGTH(CAST(RANDOM_UUID() AS VARCHAR)); >> 36 SELECT RANDOM_UUID() = RANDOM_UUID(); @@ -12,11 +12,23 @@ SELECT RANDOM_UUID() = RANDOM_UUID(); SELECT NEWID(); > exception FUNCTION_NOT_FOUND_1 +SELECT SYS_GUID(); +> exception FUNCTION_NOT_FOUND_1 + SET MODE MSSQLServer; > ok -SELECT LENGTH(CAST(NEWID() AS VARCHAR)); +SELECT CHAR_LENGTH(CAST(NEWID() AS VARCHAR)); >> 36 +SET MODE Oracle; +> ok + +SELECT SYS_GUID() IS OF (RAW); +>> TRUE + +SELECT OCTET_LENGTH(SYS_GUID()); +>> 16 + SET MODE Regular; > ok diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/rotate.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/rotate.sql new file mode 100644 index 0000000000..5a205870e5 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/rotate.sql @@ -0,0 +1,103 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT I, ROTATELEFT(CAST(0x7d AS TINYINT), I) L, ROTATERIGHT(CAST(0x7d AS TINYINT), I) R + FROM (VALUES -8, -7, -2, -1, 0, 1, 2, 7, 8) T(I) ORDER BY I; +> I L R +> -- --- --- +> -8 125 125 +> -7 -6 -66 +> -2 95 -11 +> -1 -66 -6 +> 0 125 125 +> 1 -6 -66 +> 2 -11 95 +> 7 -66 -6 +> 8 125 125 +> rows (ordered): 9 + +SELECT I, ROTATELEFT(CAST(0x6d3f AS SMALLINT), I) L, ROTATERIGHT(CAST(0x6d3f AS SMALLINT), I) R + FROM (VALUES -16, -15, -2, -1, 0, 1, 2, 15, 16) T(I) ORDER BY I; +> I L R +> --- ------ ------ +> -16 27967 27967 +> -15 -9602 -18785 +> -2 -9393 -19203 +> -1 -18785 -9602 +> 0 27967 27967 +> 1 -9602 -18785 +> 2 -19203 -9393 +> 15 -18785 -9602 +> 16 27967 27967 +> rows (ordered): 9 + +SELECT I, ROTATELEFT(CAST(0x7d12e43c AS INTEGER), I) L, ROTATERIGHT(CAST(0x7d12e43c AS INTEGER), I) R + FROM (VALUES -32, -31, -2, -1, 0, 1, 2, 31, 32) T(I) ORDER BY I; +> I L R +> --- ---------- ---------- +> -32 2098390076 2098390076 +> -31 -98187144 1049195038 +> -2 524597519 -196374287 +> -1 1049195038 -98187144 +> 0 2098390076 2098390076 +> 1 -98187144 1049195038 +> 2 -196374287 524597519 +> 31 1049195038 -98187144 +> 32 2098390076 2098390076 +> rows (ordered): 9 + +SELECT I, ROTATELEFT(CAST(0x7302abe53d12e45f AS BIGINT), I) L, ROTATERIGHT(CAST(0x7302abe53d12e45f AS BIGINT), I) R + FROM (VALUES -64, -63, -2, -1, 0, 1, 2, 63, 64) T(I) ORDER BY I; +> I L R +> --- -------------------- -------------------- +> -64 8287375265375642719 8287375265375642719 +> -63 -1871993542958266178 -5079684404166954449 +> -2 -2539842202083477225 -3743987085916532355 +> -1 -5079684404166954449 -1871993542958266178 +> 0 8287375265375642719 8287375265375642719 +> 1 -1871993542958266178 -5079684404166954449 +> 2 -3743987085916532355 -2539842202083477225 +> 63 -5079684404166954449 -1871993542958266178 +> 64 8287375265375642719 8287375265375642719 +> rows (ordered): 9 + +SELECT I, ROTATELEFT(X'ABCD', I) L, ROTATERIGHT(X'ABCD', I) R + FROM (VALUES -16, -15, -8, -1, 0, 1, 8, 15, 16) T(I) ORDER BY I; +> I L R +> --- ------- ------- +> -16 X'abcd' X'abcd' +> -15 X'579b' X'd5e6' +> -8 X'cdab' X'cdab' +> -1 X'd5e6' X'579b' +> 0 X'abcd' X'abcd' +> 1 X'579b' X'd5e6' +> 8 X'cdab' X'cdab' +> 15 X'd5e6' X'579b' +> 16 X'abcd' X'abcd' +> rows (ordered): 9 + +SELECT I, ROTATELEFT(CAST(X'ABCD' AS BINARY(2)), I) L, ROTATERIGHT(CAST(X'ABCD' AS BINARY(2)), I) R + FROM (VALUES -16, -15, -8, -1, 0, 1, 8, 15, 16) T(I) ORDER BY I; +> I L R +> --- ------- ------- +> -16 X'abcd' X'abcd' +> -15 X'579b' X'd5e6' +> -8 X'cdab' X'cdab' +> -1 X'd5e6' X'579b' +> 0 X'abcd' X'abcd' +> 1 X'579b' X'd5e6' +> 8 X'cdab' X'cdab' +> 15 X'd5e6' X'579b' +> 16 X'abcd' X'abcd' +> rows (ordered): 9 + +SELECT ROTATELEFT(X'8000', 1); +>> X'0001' + +SELECT ROTATERIGHT(X'0001', 1); +>> X'8000' + +SELECT ROTATELEFT(X'', 1); +>> X'' diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/round.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/round.sql index 5f9171e2f3..e925aa307e 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/round.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/round.sql @@ -1,28 +1,111 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok +SELECT ROUND(-1.2), ROUND(-1.5), ROUND(-1.6), ROUND(2), ROUND(1.5), ROUND(1.8), ROUND(1.1); +> -1 -2 -2 2 2 2 1 +> -- -- -- - - - - +> -1 -2 -2 2 2 2 1 +> rows: 1 + +select round(null, null) en, round(10.49, 0) e10, round(10.05, 1) e101; +> EN E10 E101 +> ---- --- ---- +> null 10 10.1 +> rows: 1 + +select round(null) en, round(0.6, null) en2, round(1.05) e1, round(-1.51) em2; +> EN EN2 E1 EM2 +> ---- ---- -- --- +> null null 1 -2 +> rows: 1 -insert into test values(1, 'Hello'); -> update count: 1 +CALL ROUND(998.5::DOUBLE); +>> 999.0 -select round(null, null) en, round(10.49, 0) e10, round(10.05, 1) e101 from test; -> EN E10 E101 -> ---- ---- ---- -> null 10.0 10.1 +CALL ROUND(998.5::REAL); +>> 999.0 + +SELECT + ROUND(4503599627370495.0::DOUBLE), ROUND(4503599627370495.5::DOUBLE), + ROUND(4503599627370496.0::DOUBLE), ROUND(4503599627370497.0::DOUBLE); +> 4.503599627370495E15 4.503599627370496E15 4.503599627370496E15 4.503599627370497E15 +> -------------------- -------------------- -------------------- -------------------- +> 4.503599627370495E15 4.503599627370496E15 4.503599627370496E15 4.503599627370497E15 > rows: 1 -select round(null) en, round(0.6, null) en2, round(1.05) e1, round(-1.51) em2 from test; -> EN EN2 E1 EM2 -> ---- ---- --- ---- -> null null 1.0 -2.0 +SELECT + ROUND(450359962737049.50::DOUBLE, 1), ROUND(450359962737049.55::DOUBLE, 1), + ROUND(450359962737049.60::DOUBLE, 1), ROUND(450359962737049.70::DOUBLE, 1); +> 4.503599627370495E14 4.503599627370496E14 4.503599627370496E14 4.503599627370497E14 +> -------------------- -------------------- -------------------- -------------------- +> 4.503599627370495E14 4.503599627370496E14 4.503599627370496E14 4.503599627370497E14 > rows: 1 -select roundmagic(null) en, roundmagic(cast(3.11 as double) - 3.1) e001, roundmagic(3.11-3.1-0.01) e000, roundmagic(2000000000000) e20x from test; -> EN E001 E000 E20X -> ---- ---- ---- ------ -> null 0.01 0.0 2.0E12 +CALL ROUND(0.285, 2); +>> 0.29 + +CALL ROUND(0.285::DOUBLE, 2); +>> 0.29 + +CALL ROUND(0.285::REAL, 2); +>> 0.29 + +CALL ROUND(1.285, 2); +>> 1.29 + +CALL ROUND(1.285::DOUBLE, 2); +>> 1.29 + +CALL ROUND(1.285::REAL, 2); +>> 1.29 + +CALL ROUND(1, 1) IS OF (INTEGER); +>> TRUE + +CALL ROUND(1::DOUBLE, 1) IS OF (DOUBLE); +>> TRUE + +CALL ROUND(1::REAL, 1) IS OF (REAL); +>> TRUE + +SELECT ROUND(1, 10000000); +>> 1 + +CREATE TABLE T1(N NUMERIC(10, 2), D DECFLOAT(10), I INTEGER) AS VALUES (99999999.99, 99999999.99, 10); +> ok + +SELECT ROUND(N, -1) NN, ROUND(N) N0, ROUND(N, 1) N1, ROUND(N, 2) N2, ROUND(N, 3) N3, ROUND(N, 10000000) NL, + ROUND(D) D0, ROUND(D, 2) D2, ROUND(D, 3) D3, + ROUND(I) I0, ROUND(I, 1) I1, ROUND(I, I) II FROM T1; +> NN N0 N1 N2 N3 NL D0 D2 D3 I0 I1 II +> --------- --------- ----------- ----------- ----------- ----------- ---- ----------- ----------- -- -- -- +> 100000000 100000000 100000000.0 99999999.99 99999999.99 99999999.99 1E+8 99999999.99 99999999.99 10 10 10 > rows: 1 + +CREATE TABLE T2 AS SELECT ROUND(N, -1) NN, ROUND(N) N0, ROUND(N, 1) N1, ROUND(N, 2) N2, ROUND(N, 3) N3, ROUND(N, 10000000) NL, + ROUND(D) D0, ROUND(D, 2) D2, ROUND(D, 3) D3, + ROUND(I) I0, ROUND(I, 1) I1, ROUND(I, I) II FROM T1; +> ok + +SELECT COLUMN_NAME, DATA_TYPE, NUMERIC_PRECISION, NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'T2' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE NUMERIC_PRECISION NUMERIC_SCALE +> ----------- --------- ----------------- ------------- +> NN NUMERIC 9 0 +> N0 NUMERIC 9 0 +> N1 NUMERIC 10 1 +> N2 NUMERIC 10 2 +> N3 NUMERIC 10 2 +> NL NUMERIC 10 2 +> D0 DECFLOAT 10 null +> D2 DECFLOAT 10 null +> D3 DECFLOAT 10 null +> I0 INTEGER 32 0 +> I1 INTEGER 32 0 +> II INTEGER 32 0 +> rows (ordered): 12 + +DROP TABLE T1; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/roundmagic.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/roundmagic.sql index 008a885806..5e42f1852b 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/roundmagic.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/roundmagic.sql @@ -1,4 +1,10 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- + +select roundmagic(null) en, roundmagic(cast(3.11 as double) - 3.1) e001, roundmagic(3.11-3.1-0.01) e000, roundmagic(2000000000000) e20x; +> EN E001 E000 E20X +> ---- ---- ---- ------ +> null 0.01 0.0 2.0E12 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/rshift.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/rshift.sql new file mode 100644 index 0000000000..47acc0169b --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/rshift.sql @@ -0,0 +1,115 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select rshift(null, 1) vn, rshift(1, null) vn1, rshift(null, null) vn2, rshift(3, 6) v1, rshift(1024,3) v2; +> VN VN1 VN2 V1 V2 +> ---- ---- ---- -- --- +> null null null 0 128 +> rows: 1 + +SELECT I, + RSHIFT(CAST(-128 AS TINYINT), I), RSHIFT(CAST(1 AS TINYINT), I), + URSHIFT(CAST(-128 AS TINYINT), I), URSHIFT(CAST(1 AS TINYINT), I) + FROM + (VALUES -111, -8, -7, -1, 0, 1, 7, 8, 111) T(I) ORDER BY I; +> I RSHIFT(-128, I) RSHIFT(1, I) URSHIFT(-128, I) URSHIFT(1, I) +> ---- --------------- ------------ ---------------- ------------- +> -111 0 0 0 0 +> -8 0 0 0 0 +> -7 0 -128 0 -128 +> -1 0 2 0 2 +> 0 -128 1 -128 1 +> 1 -64 0 64 0 +> 7 -1 0 1 0 +> 8 -1 0 0 0 +> 111 -1 0 0 0 +> rows (ordered): 9 + +SELECT I, + RSHIFT(CAST(-32768 AS SMALLINT), I), RSHIFT(CAST(1 AS SMALLINT), I), + URSHIFT(CAST(-32768 AS SMALLINT), I), URSHIFT(CAST(1 AS SMALLINT), I) + FROM + (VALUES -111, -16, -15, -1, 0, 1, 15, 16, 111) T(I) ORDER BY I; +> I RSHIFT(-32768, I) RSHIFT(1, I) URSHIFT(-32768, I) URSHIFT(1, I) +> ---- ----------------- ------------ ------------------ ------------- +> -111 0 0 0 0 +> -16 0 0 0 0 +> -15 0 -32768 0 -32768 +> -1 0 2 0 2 +> 0 -32768 1 -32768 1 +> 1 -16384 0 16384 0 +> 15 -1 0 1 0 +> 16 -1 0 0 0 +> 111 -1 0 0 0 +> rows (ordered): 9 + +SELECT I, + RSHIFT(CAST(-2147483648 AS INTEGER), I), RSHIFT(CAST(1 AS INTEGER), I), + URSHIFT(CAST(-2147483648 AS INTEGER), I), URSHIFT(CAST(1 AS INTEGER), I) + FROM + (VALUES -111, -32, -31, -1, 0, 1, 31, 32, 111) T(I) ORDER BY I; +> I RSHIFT(-2147483648, I) RSHIFT(1, I) URSHIFT(-2147483648, I) URSHIFT(1, I) +> ---- ---------------------- ------------ ----------------------- ------------- +> -111 0 0 0 0 +> -32 0 0 0 0 +> -31 0 -2147483648 0 -2147483648 +> -1 0 2 0 2 +> 0 -2147483648 1 -2147483648 1 +> 1 -1073741824 0 1073741824 0 +> 31 -1 0 1 0 +> 32 -1 0 0 0 +> 111 -1 0 0 0 +> rows (ordered): 9 + +SELECT I, + RSHIFT(CAST(-9223372036854775808 AS BIGINT), I), RSHIFT(CAST(1 AS BIGINT), I), + URSHIFT(CAST(-9223372036854775808 AS BIGINT), I), URSHIFT(CAST(1 AS BIGINT), I) + FROM + (VALUES -111, -64, -63, -1, 0, 1, 63, 64, 111) T(I) ORDER BY I; +> I RSHIFT(-9223372036854775808, I) RSHIFT(1, I) URSHIFT(-9223372036854775808, I) URSHIFT(1, I) +> ---- ------------------------------- -------------------- -------------------------------- -------------------- +> -111 0 0 0 0 +> -64 0 0 0 0 +> -63 0 -9223372036854775808 0 -9223372036854775808 +> -1 0 2 0 2 +> 0 -9223372036854775808 1 -9223372036854775808 1 +> 1 -4611686018427387904 0 4611686018427387904 0 +> 63 -1 0 1 0 +> 64 -1 0 0 0 +> 111 -1 0 0 0 +> rows (ordered): 9 + +SELECT RSHIFT(X'', 1); +>> X'' + +SELECT RSHIFT(CAST(X'02' AS BINARY), 1); +>> X'01' + +SELECT I, RSHIFT(X'80ABCD09', I) FROM + (VALUES -33, -32, -31, -17, -16, -15, -1, 0, 1, 15, 16, 17, 31, 32, 33) T(I) ORDER BY I; +> I RSHIFT(X'80abcd09', I) +> --- ---------------------- +> -33 X'00000000' +> -32 X'00000000' +> -31 X'80000000' +> -17 X'9a120000' +> -16 X'cd090000' +> -15 X'e6848000' +> -1 X'01579a12' +> 0 X'80abcd09' +> 1 X'4055e684' +> 15 X'00010157' +> 16 X'000080ab' +> 17 X'00004055' +> 31 X'00000001' +> 32 X'00000000' +> 33 X'00000000' +> rows (ordered): 15 + +SELECT RSHIFT(-1, -9223372036854775808); +>> 0 + +SELECT URSHIFT(-1, -9223372036854775808); +>> 0 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/secure-rand.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/secure-rand.sql index 008a885806..a083f92c9e 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/secure-rand.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/secure-rand.sql @@ -1,4 +1,13 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- + +SELECT SECURE_RAND(NULL); +>> null + +SELECT OCTET_LENGTH(SECURE_RAND(0)); +>> 1 + +SELECT OCTET_LENGTH(SECURE_RAND(2)); +>> 2 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/sign.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/sign.sql index a7f210510f..2138f8f2be 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/sign.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/sign.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select sign(null) en, sign(10) e1, sign(0) e0, sign(-0.1) em1 from test; +select sign(null) en, sign(10) e1, sign(0) e0, sign(-0.1) em1; > EN E1 E0 EM1 > ---- -- -- --- > null 1 0 -1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/sin.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/sin.sql index e1e4d07560..f2f1146407 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/sin.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/sin.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select sin(null) vn, sin(-1) r1 from test; +select sin(null) vn, sin(-1) r1; > VN R1 > ---- ------------------- > null -0.8414709848078965 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/sinh.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/sinh.sql index 008a885806..2186ea8d20 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/sinh.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/sinh.sql @@ -1,4 +1,10 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- + +CALL SINH(1); +>> 1.1752011936438014 + +CALL SINH(50); +>> 2.592352764293536E21 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/sqrt.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/sqrt.sql index 45ba050c05..4a96f3a0a5 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/sqrt.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/sqrt.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select sqrt(null) vn, sqrt(0) e0, sqrt(1) e1, sqrt(4) e2, sqrt(100) e10, sqrt(0.25) e05 from test; +select sqrt(null) vn, sqrt(0) e0, sqrt(1) e1, sqrt(4) e2, sqrt(100) e10, sqrt(0.25) e05; > VN E0 E1 E2 E10 E05 > ---- --- --- --- ---- --- > null 0.0 1.0 2.0 10.0 0.5 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/tan.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/tan.sql index 6e5402b175..13bcd44e32 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/tan.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/tan.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select tan(null) vn, tan(-1) r1 from test; +select tan(null) vn, tan(-1) r1; > VN R1 > ---- ------------------- > null -1.5574077246549023 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/tanh.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/tanh.sql index 008a885806..b6765cc3dc 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/tanh.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/tanh.sql @@ -1,4 +1,10 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- + +CALL TANH(1); +>> 0.7615941559557649 + +CALL TANH(50); +>> 1.0 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/truncate.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/truncate.sql index bc5dc50ed6..0dbe8c9d3c 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/truncate.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/truncate.sql @@ -1,25 +1,131 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok +SELECT TRUNCATE(1.234, 2); +>> 1.23 + +SELECT TRUNCATE(DATE '2011-03-05'); +>> 2011-03-05 00:00:00 + +SELECT TRUNCATE(TIMESTAMP '2011-03-05 02:03:04'); +>> 2011-03-05 00:00:00 + +SELECT TRUNCATE(TIMESTAMP WITH TIME ZONE '2011-03-05 02:03:04+07'); +>> 2011-03-05 00:00:00+07 + +SELECT TRUNCATE(CURRENT_DATE, 1); +> exception INVALID_PARAMETER_COUNT_2 + +SELECT TRUNCATE(LOCALTIMESTAMP, 1); +> exception INVALID_PARAMETER_COUNT_2 + +SELECT TRUNCATE(CURRENT_TIMESTAMP, 1); +> exception INVALID_PARAMETER_COUNT_2 + +SELECT TRUNCATE('2011-03-05 02:03:04', 1); +> exception INVALID_PARAMETER_COUNT_2 + +SELECT TRUNCATE('bad'); +> exception INVALID_DATETIME_CONSTANT_2 -insert into test values(1, 'Hello'); -> update count: 1 +SELECT TRUNCATE(1, 2, 3); +> exception SYNTAX_ERROR_2 -select truncate(null, null) en, truncate(1.99, 0) e1, truncate(-10.9, 0) em10 from test; -> EN E1 EM10 -> ---- --- ----- -> null 1.0 -10.0 +select truncate(null, null) en, truncate(1.99, 0) e1, truncate(-10.9, 0) em10; +> EN E1 EM10 +> ---- -- ---- +> null 1 -10 > rows: 1 -select trunc(null, null) en, trunc(1.99, 0) e1, trunc(-10.9, 0) em10 from test; -> EN E1 EM10 -> ---- --- ----- -> null 1.0 -10.0 +select trunc(null, null) en, trunc(1.99, 0) e1, trunc(-10.9, 0) em10; +> EN E1 EM10 +> ---- -- ---- +> null 1 -10 > rows: 1 select trunc(1.3); ->> 1.0 +>> 1 + +SELECT TRUNCATE(1.3) IS OF (NUMERIC); +>> TRUE + +SELECT TRUNCATE(CAST(1.3 AS DOUBLE)) IS OF (DOUBLE); +>> TRUE + +SELECT TRUNCATE(CAST(1.3 AS REAL)) IS OF (REAL); +>> TRUE + +SELECT TRUNCATE(1.99, 0), TRUNCATE(1.99, 1), TRUNCATE(-1.99, 0), TRUNCATE(-1.99, 1); +> 1 1.9 -1 -1.9 +> - --- -- ---- +> 1 1.9 -1 -1.9 +> rows: 1 + +SELECT TRUNCATE(1.99::DOUBLE, 0), TRUNCATE(1.99::DOUBLE, 1), TRUNCATE(-1.99::DOUBLE, 0), TRUNCATE(-1.99::DOUBLE, 1); +> 1.0 1.9 -1.0 -1.9 +> --- --- ---- ---- +> 1.0 1.9 -1.0 -1.9 +> rows: 1 + +SELECT TRUNCATE(1.99::REAL, 0), TRUNCATE(1.99::REAL, 1), TRUNCATE(-1.99::REAL, 0), TRUNCATE(-1.99::REAL, 1); +> 1.0 1.9 -1.0 -1.9 +> --- --- ---- ---- +> 1.0 1.9 -1.0 -1.9 +> rows: 1 + +SELECT TRUNCATE(V, S) FROM (VALUES (1.111, 1)) T(V, S); +>> 1.100 + +SELECT TRUNC(1, 10000000); +>> 1 + +CREATE TABLE T1(N NUMERIC(10, 2), D DECFLOAT(10), I INTEGER) AS VALUES (99999999.99, 99999999.99, 10); +> ok + +SELECT TRUNC(N, -1) NN, TRUNC(N) N0, TRUNC(N, 1) N1, TRUNC(N, 2) N2, TRUNC(N, 3) N3, TRUNC(N, 10000000) NL, + TRUNC(D) D0, TRUNC(D, 2) D2, TRUNC(D, 3) D3, + TRUNC(I) I0, TRUNC(I, 1) I1, TRUNC(I, I) II FROM T1; +> NN N0 N1 N2 N3 NL D0 D2 D3 I0 I1 II +> -------- -------- ---------- ----------- ----------- ----------- -------- ----------- ----------- -- -- -- +> 99999990 99999999 99999999.9 99999999.99 99999999.99 99999999.99 99999999 99999999.99 99999999.99 10 10 10 +> rows: 1 + +CREATE TABLE T2 AS SELECT TRUNC(N, -1) NN, TRUNC(N) N0, TRUNC(N, 1) N1, TRUNC(N, 2) N2, TRUNC(N, 3) N3, TRUNC(N, 10000000) NL, + TRUNC(D) D0, TRUNC(D, 2) D2, TRUNC(D, 3) D3, + TRUNC(I) I0, TRUNC(I, 1) I1, TRUNC(I, I) II FROM T1; +> ok + +SELECT COLUMN_NAME, DATA_TYPE, NUMERIC_PRECISION, NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'T2' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE NUMERIC_PRECISION NUMERIC_SCALE +> ----------- --------- ----------------- ------------- +> NN NUMERIC 8 0 +> N0 NUMERIC 8 0 +> N1 NUMERIC 9 1 +> N2 NUMERIC 10 2 +> N3 NUMERIC 10 2 +> NL NUMERIC 10 2 +> D0 DECFLOAT 10 null +> D2 DECFLOAT 10 null +> D3 DECFLOAT 10 null +> I0 INTEGER 32 0 +> I1 INTEGER 32 0 +> II INTEGER 32 0 +> rows (ordered): 12 + +DROP TABLE T1; +> ok + +SELECT TRUNC(11, -1) I, TRUNC(CAST(11 AS NUMERIC(2)), -1) N; +> I N +> -- -- +> 10 10 +> rows: 1 + +SELECT TRUNC(11, -2) I, TRUNC(CAST(11 AS NUMERIC(2)), -2) N; +> I N +> - - +> 0 0 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/zero.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/zero.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/numeric/zero.sql +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/zero.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/string/array-to-string.sql b/h2/src/test/org/h2/test/scripts/functions/string/array-to-string.sql new file mode 100644 index 0000000000..7ca0767798 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/array-to-string.sql @@ -0,0 +1,34 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +set mode PostgreSQL; +> ok + +select array_to_string(array[null, 0, 1, null, 2], ','); +>> 0,1,2 + +select array_to_string(array['a', null, '', 'b', null], ',', null); +>> a,,b + +select array_to_string(array[null, 0, 1, null, 2], ',', '*'); +>> *,0,1,*,2 + +select array_to_string(array['a', null, '', 'b', null], ',', '*'); +>> a,*,,b,* + +select array_to_string(array[1, null, 3], 0, 2); +>> 10203 + +select array_to_string(null, 0, 2); +>> null + +select array_to_string(array[1, null, 3], null, 2); +>> null + +select array_to_string(0, ','); +> exception INVALID_VALUE_2 + +set mode Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/string/ascii.sql b/h2/src/test/org/h2/test/scripts/functions/string/ascii.sql index dc13ef0638..17fa38db98 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/ascii.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/ascii.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select ascii(null) en, ascii('') en, ascii('Abc') e65 from test; +select ascii(null) en, ascii('') en, ascii('Abc') e65; > EN EN E65 > ---- ---- --- > null null 65 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/bit-length.sql b/h2/src/test/org/h2/test/scripts/functions/string/bit-length.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/bit-length.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/bit-length.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/string/char.sql b/h2/src/test/org/h2/test/scripts/functions/string/char.sql index ab49f9add8..53bb3c5e93 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/char.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/char.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select char(null) en, char(65) ea from test; +select char(null) en, char(65) ea; > EN EA > ---- -- > null A diff --git a/h2/src/test/org/h2/test/scripts/functions/string/concat-ws.sql b/h2/src/test/org/h2/test/scripts/functions/string/concat-ws.sql index 008a885806..ec647763a6 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/concat-ws.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/concat-ws.sql @@ -1,4 +1,16 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- + +SELECT CONCAT_WS(NULL, NULL, 'a', NULL, 'b', NULL); +>> ab + +SELECT CONCAT_WS('*', NULL, 'a', NULL, 'b', NULL); +>> a*b + +SELECT CONCAT_WS('*', '', 'a', NULL, 'b', NULL); +>> *a*b + +SELECT '[' || CONCAT_WS('a', NULL, NULL) || ']'; +>> [] diff --git a/h2/src/test/org/h2/test/scripts/functions/string/concat.sql b/h2/src/test/org/h2/test/scripts/functions/string/concat.sql index be3580c306..4b1b73562d 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/concat.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/concat.sql @@ -1,17 +1,12 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok -insert into test values(1, 'Hello'); -> update count: 1 - -select concat(null, null) en, concat(null, 'a') ea, concat('b', null) eb, concat('ab', 'c') abc from test; -> EN EA EB ABC -> ---- -- -- --- -> null a b abc +select concat(null, null) en, concat(null, 'a') ea, concat('b', null) eb, concat('ab', 'c') abc; +> EN EA EB ABC +> -- -- -- --- +> a b abc > rows: 1 SELECT CONCAT('a', 'b', 'c', 'd'); diff --git a/h2/src/test/org/h2/test/scripts/functions/string/difference.sql b/h2/src/test/org/h2/test/scripts/functions/string/difference.sql index 254548ff5d..4853dfe1f0 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/difference.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/difference.sql @@ -1,21 +1,15 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select difference(null, null) en, difference('a', null) en1, difference(null, 'a') en2 from test; +select difference(null, null) en, difference('a', null) en1, difference(null, 'a') en2; > EN EN1 EN2 > ---- ---- ---- > null null null > rows: 1 -select difference('abc', 'abc') e0, difference('Thomas', 'Tom') e1 from test; +select difference('abc', 'abc') e0, difference('Thomas', 'Tom') e1; > E0 E1 > -- -- > 4 3 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/hextoraw.sql b/h2/src/test/org/h2/test/scripts/functions/string/hextoraw.sql index 7e9fcebc7b..95ea6902d5 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/hextoraw.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/hextoraw.sql @@ -1,16 +1,25 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select hextoraw(null) en, rawtohex(null) en1, hextoraw(rawtohex('abc')) abc from test; +select hextoraw(null) en, rawtohex(null) en1, hextoraw(rawtohex('abc')) abc; > EN EN1 ABC > ---- ---- --- > null null abc > rows: 1 + +SELECT HEXTORAW('0049'); +>> I + +SET MODE Oracle; +> ok + +SELECT HEXTORAW('0049'); +>> X'0049' + +SELECT HEXTORAW('0049') IS OF (RAW); +>> TRUE + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/string/insert.sql b/h2/src/test/org/h2/test/scripts/functions/string/insert.sql index 33c23407d5..d24cb58e4e 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/insert.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/insert.sql @@ -1,22 +1,19 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select insert(null, null, null, null) en, insert('Rund', 1, 0, 'o') e_round, insert(null, 1, 1, 'a') ea from test; +select insert(null, null, null, null) en, insert('Rund', 1, 0, 'o') e_round, insert(null, 1, 1, 'a') ea; > EN E_ROUND EA > ---- ------- -- > null Rund a > rows: 1 -select insert('World', 2, 4, 'e') welt, insert('Hello', 2, 1, 'a') hallo from test; +select insert('World', 2, 4, 'e') welt, insert('Hello', 2, 1, 'a') hallo; > WELT HALLO > ---- ----- > We Hallo > rows: 1 + +SELECT INSERT(NULL, 0, 0, NULL); +>> null diff --git a/h2/src/test/org/h2/test/scripts/functions/string/instr.sql b/h2/src/test/org/h2/test/scripts/functions/string/instr.sql deleted file mode 100644 index 19f1cf4465..0000000000 --- a/h2/src/test/org/h2/test/scripts/functions/string/instr.sql +++ /dev/null @@ -1,16 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- - -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select instr('Hello World', 'World') e7, instr('abchihihi', 'hi', 2) e3, instr('abcooo', 'o') e2 from test; -> E7 E3 E2 -> -- -- -- -> 7 4 4 -> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/left.sql b/h2/src/test/org/h2/test/scripts/functions/string/left.sql index 9cb8682db8..fcf92c16ac 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/left.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/left.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select left(null, 10) en, left('abc', null) en2, left('boat', 2) e_bo, left('', 1) ee, left('a', -1) ee2 from test; +select left(null, 10) en, left('abc', null) en2, left('boat', 2) e_bo, left('', 1) ee, left('a', -1) ee2; > EN EN2 E_BO EE EE2 > ---- ---- ---- -- --- > null null bo diff --git a/h2/src/test/org/h2/test/scripts/functions/string/length.sql b/h2/src/test/org/h2/test/scripts/functions/string/length.sql index f5dd4a9726..ebf2bae84d 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/length.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/length.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select length(null) en, length('This has 17 chars') e_17 from test; +select length(null) en, length('This has 17 chars') e_17; > EN E_17 > ---- ---- > null 17 @@ -21,11 +15,17 @@ SELECT LEN(NULL); SET MODE MSSQLServer; > ok -select len(null) en, len('MSSQLServer uses the len keyword') e_32 from test; +select len(null) en, len('MSSQLServer uses the len keyword') e_32; > EN E_32 > ---- ---- > null 32 > rows: 1 +SELECT LEN('A '); +>> 2 + +SELECT LEN(CAST('A ' AS CHAR(2))); +>> 1 + SET MODE Regular; > ok diff --git a/h2/src/test/org/h2/test/scripts/functions/string/locate.sql b/h2/src/test/org/h2/test/scripts/functions/string/locate.sql index f5efd9e143..fe1bf6dd12 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/locate.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/locate.sql @@ -1,21 +1,15 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select locate(null, null) en, locate(null, null, null) en1 from test; +select locate(null, null) en, locate(null, null, null) en1; > EN EN1 > ---- ---- > null null > rows: 1 -select locate('World', 'Hello World') e7, locate('hi', 'abchihihi', 2) e3 from test; +select locate('World', 'Hello World') e7, locate('hi', 'abchihihi', 2) e3; > E7 E3 > -- -- > 7 4 @@ -27,7 +21,7 @@ SELECT CHARINDEX('test', 'test'); SET MODE MSSQLServer; > ok -select charindex('World', 'Hello World') e7, charindex('hi', 'abchihihi', 2) e3 from test; +select charindex('World', 'Hello World') e7, charindex('hi', 'abchihihi', 2) e3; > E7 E3 > -- -- > 7 4 @@ -35,3 +29,21 @@ select charindex('World', 'Hello World') e7, charindex('hi', 'abchihihi', 2) e3 SET MODE Regular; > ok + +select instr('Hello World', 'World') e7, instr('abchihihi', 'hi', 2) e3, instr('abcooo', 'o') e2; +> E7 E3 E2 +> -- -- -- +> 7 4 4 +> rows: 1 + +EXPLAIN SELECT INSTR(A, B) FROM (VALUES ('A', 'B')) T(A, B); +>> SELECT LOCATE("B", "A") FROM (VALUES ('A', 'B')) "T"("A", "B") /* table scan */ + +select position(null, null) en, position(null, 'abc') en1, position('World', 'Hello World') e7, position('hi', 'abchihihi') e1; +> EN EN1 E7 E1 +> ---- ---- -- -- +> null null 7 4 +> rows: 1 + +EXPLAIN SELECT POSITION((A > B), C) FROM (VALUES (1, 2, 3)) T(A, B, C); +>> SELECT LOCATE("A" > "B", "C") FROM (VALUES (1, 2, 3)) "T"("A", "B", "C") /* table scan */ diff --git a/h2/src/test/org/h2/test/scripts/functions/string/lower.sql b/h2/src/test/org/h2/test/scripts/functions/string/lower.sql index f8edced911..73138cf357 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/lower.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/lower.sql @@ -1,21 +1,15 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select lower(null) en, lower('Hello') hello, lower('ABC') abc from test; +select lower(null) en, lower('Hello') hello, lower('ABC') abc; > EN HELLO ABC > ---- ----- --- > null hello abc > rows: 1 -select lcase(null) en, lcase('Hello') hello, lcase('ABC') abc from test; +select lcase(null) en, lcase('Hello') hello, lcase('ABC') abc; > EN HELLO ABC > ---- ----- --- > null hello abc diff --git a/h2/src/test/org/h2/test/scripts/functions/string/lpad.sql b/h2/src/test/org/h2/test/scripts/functions/string/lpad.sql index acbd29dd47..41c69ebb20 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/lpad.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/lpad.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/string/ltrim.sql b/h2/src/test/org/h2/test/scripts/functions/string/ltrim.sql index 24d15ea9d1..daf8e3e101 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/ltrim.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/ltrim.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select ltrim(null) en, '>' || ltrim('a') || '<' ea, '>' || ltrim(' a ') || '<' e_as from test; +select ltrim(null) en, '>' || ltrim('a') || '<' ea, '>' || ltrim(' a ') || '<' e_as; > EN EA E_AS > ---- --- ---- > null >a< >a < diff --git a/h2/src/test/org/h2/test/scripts/functions/string/octet-length.sql b/h2/src/test/org/h2/test/scripts/functions/string/octet-length.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/octet-length.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/octet-length.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/string/position.sql b/h2/src/test/org/h2/test/scripts/functions/string/position.sql deleted file mode 100644 index 3c71506fcb..0000000000 --- a/h2/src/test/org/h2/test/scripts/functions/string/position.sql +++ /dev/null @@ -1,16 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- - -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select position(null, null) en, position(null, 'abc') en1, position('World', 'Hello World') e7, position('hi', 'abchihihi') e1 from test; -> EN EN1 E7 E1 -> ---- ---- -- -- -> null null 7 4 -> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/quote_ident.sql b/h2/src/test/org/h2/test/scripts/functions/string/quote_ident.sql new file mode 100644 index 0000000000..8c8b946308 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/quote_ident.sql @@ -0,0 +1,16 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT QUOTE_IDENT(NULL); +>> null + +SELECT QUOTE_IDENT(''); +>> "" + +SELECT QUOTE_IDENT('a'); +>> "a" + +SELECT QUOTE_IDENT('"a""A"'); +>> """a""""A""" diff --git a/h2/src/test/org/h2/test/scripts/functions/string/rawtohex.sql b/h2/src/test/org/h2/test/scripts/functions/string/rawtohex.sql index 008a885806..05e418b045 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/rawtohex.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/rawtohex.sql @@ -1,4 +1,28 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- + +SELECT RAWTOHEX('A'); +>> 0041 + +SELECT RAWTOHEX('Az'); +>> 0041007a + +SET MODE Oracle; +> ok + +SELECT RAWTOHEX('A'); +>> 41 + +SELECT RAWTOHEX('Az'); +>> 417a + +SET MODE Regular; +> ok + +SELECT RAWTOHEX(X'12fe'); +>> 12fe + +SELECT RAWTOHEX('12345678-9abc-def0-0123-456789abcdef'::UUID); +>> 123456789abcdef00123456789abcdef diff --git a/h2/src/test/org/h2/test/scripts/functions/string/regex-replace.sql b/h2/src/test/org/h2/test/scripts/functions/string/regex-replace.sql index be3616d635..24a51ec6c7 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/regex-replace.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/regex-replace.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -15,6 +15,27 @@ select regexp_replace('Sylvain', 'S..', 'TOTO', 'mni'); set mode oracle; > ok +select regexp_replace('.1.2.3.4', '[^0-9]', '', 1, 0); +>> 1234 + +select regexp_replace('.1.2.3.4', '[^0-9]', '', 1, 1); +>> 1.2.3.4 + +select regexp_replace('.1.2.3.4', '[^0-9]', '', 1, 2); +>> .12.3.4 + +select regexp_replace('.1.2.3.4', '[^0-9]', '', 3, 2); +>> .1.23.4 + +select regexp_replace('', '[^0-9]', '', 3, 2); +>> null + +select regexp_replace('ababab', '', '', 3, 2); +>> ababab + +select regexp_replace('ababab', '', '', 3, 2, ''); +>> ababab + select regexp_replace('first last', '(\w+) (\w+)', '\2 \1'); >> last first diff --git a/h2/src/test/org/h2/test/scripts/functions/string/regexp-like.sql b/h2/src/test/org/h2/test/scripts/functions/string/regexp-like.sql index f0a80ee671..5f86d7f67d 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/regexp-like.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/regexp-like.sql @@ -1,15 +1,13 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- call select 1 from dual where regexp_like('x', 'x', '\'); > exception INVALID_VALUE_2 -select x from dual where REGEXP_LIKE('A', '[a-z]', 'i'); ->> 1 +CALL REGEXP_LIKE('A', '[a-z]', 'i'); +>> TRUE -select x from dual where REGEXP_LIKE('A', '[a-z]', 'c'); -> X -> - -> rows: 0 +CALL REGEXP_LIKE('A', '[a-z]', 'c'); +>> FALSE diff --git a/h2/src/test/org/h2/test/scripts/functions/string/regexp-substr.sql b/h2/src/test/org/h2/test/scripts/functions/string/regexp-substr.sql new file mode 100644 index 0000000000..b7c984a423 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/regexp-substr.sql @@ -0,0 +1,83 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- case insensitive matches upper case +CALL REGEXP_SUBSTR('A', '[a-z]', 1, 1, 'i'); +>> A + +-- case sensitive does not match upper case +CALL REGEXP_SUBSTR('A', '[a-z]', 1, 1, 'c'); +>> null + +-- match string from position at string index 3 +CALL REGEXP_SUBSTR('help helpful', 'help.*', 3); +>> helpful + +-- match string from position at string index 6 +CALL REGEXP_SUBSTR('help helpful helping', 'help.*', 7); +>> helping + +-- should return first occurrence +CALL REGEXP_SUBSTR('helpful helping', 'help\w*', 1, 1); +>> helpful + +-- should return second occurrence +CALL REGEXP_SUBSTR('helpful helping', 'help\w*', 1, 2); +>> helping + +-- should return third occurrence +CALL REGEXP_SUBSTR('help helpful helping', 'help\w*', 1, 3); +>> helping + +-- should return first occurrence, after string at index 3 +CALL REGEXP_SUBSTR('help helpful helping', 'help\w*', 3, 1); +>> helpful + +-- should first matching group +CALL REGEXP_SUBSTR('help helpful helping', '(help\w*)', 1, 1, NULL, 1); +>> help + +-- should second occurrence of first group +CALL REGEXP_SUBSTR('help helpful helping', '(help\w*)', 1, 2, NULL, 1); +>> helpful + +-- should second group +CALL REGEXP_SUBSTR('2020-10-01', '(\d{4})-(\d{2})-(\d{2})', 1, 1, NULL, 2); +>> 10 + +-- should third group +CALL REGEXP_SUBSTR('2020-10-01', '(\d{4})-(\d{2})-(\d{2})', 1, 1, NULL, 3); +>> 01 + +CALL REGEXP_SUBSTR('2020-10-01', '\d{4}'); +>> 2020 + +-- Test variants of passing NULL, which should always result in NULL result +CALL REGEXP_SUBSTR('2020-10-01', NULL); +>> null + +CALL REGEXP_SUBSTR(NULL, '\d{4}'); +>> null + +CALL REGEXP_SUBSTR(NULL, NULL); +>> null + +CALL REGEXP_SUBSTR('2020-10-01', '\d{4}', NULL); +>> null + +CALL REGEXP_SUBSTR('2020-10-01', '\d{4}', 1, NULL); +>> null + +CALL REGEXP_SUBSTR('2020-10-01', '\d{4}', 1, 1, NULL, NULL); +>> null + +-- Index out of bounds +CALL REGEXP_SUBSTR('2020-10-01', '(\d{4})', 1, 1, NULL, 10); +>> null + +-- Illegal regexp pattern +CALL REGEXP_SUBSTR('2020-10-01', '\d{a}'); +> exception LIKE_ESCAPE_ERROR_1 + diff --git a/h2/src/test/org/h2/test/scripts/functions/string/repeat.sql b/h2/src/test/org/h2/test/scripts/functions/string/repeat.sql index eb6d14a9a6..68b06222e0 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/repeat.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/repeat.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select repeat(null, null) en, repeat('Ho', 2) abcehoho , repeat('abc', 0) ee from test; +select repeat(null, null) en, repeat('Ho', 2) abcehoho , repeat('abc', 0) ee; > EN ABCEHOHO EE > ---- -------- -- > null HoHo diff --git a/h2/src/test/org/h2/test/scripts/functions/string/replace.sql b/h2/src/test/org/h2/test/scripts/functions/string/replace.sql index f988ce977a..19966c332c 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/replace.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/replace.sql @@ -1,21 +1,15 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select replace(null, null) en, replace(null, null, null) en1 from test; +select replace(null, null) en, replace(null, null, null) en1; > EN EN1 > ---- ---- > null null > rows: 1 -select replace('abchihihi', 'i', 'o') abcehohoho, replace('that is tom', 'i') abcethstom from test; +select replace('abchihihi', 'i', 'o') abcehohoho, replace('that is tom', 'i') abcethstom; > ABCEHOHOHO ABCETHSTOM > ---------- ---------- > abchohoho that s tom diff --git a/h2/src/test/org/h2/test/scripts/functions/string/right.sql b/h2/src/test/org/h2/test/scripts/functions/string/right.sql index e11c2e8534..c56fdca00c 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/right.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/right.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select right(null, 10) en, right('abc', null) en2, right('boat-trip', 2) e_ip, right('', 1) ee, right('a', -1) ee2 from test; +select right(null, 10) en, right('abc', null) en2, right('boat-trip', 2) e_ip, right('', 1) ee, right('a', -1) ee2; > EN EN2 E_IP EE EE2 > ---- ---- ---- -- --- > null null ip diff --git a/h2/src/test/org/h2/test/scripts/functions/string/rpad.sql b/h2/src/test/org/h2/test/scripts/functions/string/rpad.sql index e000d8f3a3..0d7e635657 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/rpad.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/rpad.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/string/rtrim.sql b/h2/src/test/org/h2/test/scripts/functions/string/rtrim.sql index 9a0ebd655e..a216fd6805 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/rtrim.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/rtrim.sql @@ -1,19 +1,13 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select rtrim(null) en, '>' || rtrim('a') || '<' ea, '>' || rtrim(' a ') || '<' es from test; +select rtrim(null) en, '>' || rtrim('a') || '<' ea, '>' || rtrim(' a ') || '<' es; > EN EA ES > ---- --- ---- > null >a< > a< > rows: 1 select rtrim() from dual; -> exception INVALID_PARAMETER_COUNT_2 +> exception SYNTAX_ERROR_2 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/soundex.sql b/h2/src/test/org/h2/test/scripts/functions/string/soundex.sql index 6cc4c52083..fec64ae3c5 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/soundex.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/soundex.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select soundex(null) en, soundex('tom') et from test; +select soundex(null) en, soundex('tom') et; > EN ET > ---- ---- > null t500 @@ -19,7 +13,7 @@ select soundex('Washington') W252, soundex('Lee') L000, soundex('Gutierrez') G362, soundex('Pfister') P236, soundex('Jackson') J250, soundex('Tymczak') T522, -soundex('VanDeusen') V532, soundex('Ashcraft') A261 from test; +soundex('VanDeusen') V532, soundex('Ashcraft') A261; > W252 L000 G362 P236 J250 T522 V532 A261 > ---- ---- ---- ---- ---- ---- ---- ---- > W252 L000 G362 P236 J250 T522 V532 A261 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/space.sql b/h2/src/test/org/h2/test/scripts/functions/string/space.sql index d00d7ee50e..867bd74657 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/space.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/space.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select space(null) en, '>' || space(1) || '<' es, '>' || space(3) || '<' e2 from test; +select space(null) en, '>' || space(1) || '<' es, '>' || space(3) || '<' e2; > EN ES E2 > ---- --- --- > null > < > < diff --git a/h2/src/test/org/h2/test/scripts/functions/string/stringdecode.sql b/h2/src/test/org/h2/test/scripts/functions/string/stringdecode.sql index 008a885806..3a2b439aec 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/stringdecode.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/stringdecode.sql @@ -1,4 +1,22 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- + +SELECT STRINGDECODE('\7'); +> exception STRING_FORMAT_ERROR_1 + +SELECT STRINGDECODE('\17'); +> exception STRING_FORMAT_ERROR_1 + +SELECT STRINGDECODE('\117'); +>> O + +SELECT STRINGDECODE('\178'); +> exception STRING_FORMAT_ERROR_1 + +SELECT STRINGDECODE('\u111'); +> exception STRING_FORMAT_ERROR_1 + +SELECT STRINGDECODE('\u0057'); +>> W diff --git a/h2/src/test/org/h2/test/scripts/functions/string/stringencode.sql b/h2/src/test/org/h2/test/scripts/functions/string/stringencode.sql index 516fb4f0e1..72274a9474 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/stringencode.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/stringencode.sql @@ -1,13 +1,10 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -INSERT INTO TEST VALUES(2, STRINGDECODE('abcsond\344rzeich\344 ') || char(22222) || STRINGDECODE(' \366\344\374\326\304\334\351\350\340\361!')); -> update count: 1 +SELECT STRINGENCODE(STRINGDECODE('abcsond\344rzeich\344 ') || char(22222) || STRINGDECODE(' \366\344\374\326\304\334\351\350\340\361!')); +>> abcsond\u00e4rzeich\u00e4 \u56ce \u00f6\u00e4\u00fc\u00d6\u00c4\u00dc\u00e9\u00e8\u00e0\u00f1! call STRINGENCODE(STRINGDECODE('abcsond\344rzeich\344 \u56ce \366\344\374\326\304\334\351\350\340\361!')); >> abcsond\u00e4rzeich\u00e4 \u56ce \u00f6\u00e4\u00fc\u00d6\u00c4\u00dc\u00e9\u00e8\u00e0\u00f1! diff --git a/h2/src/test/org/h2/test/scripts/functions/string/stringtoutf8.sql b/h2/src/test/org/h2/test/scripts/functions/string/stringtoutf8.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/stringtoutf8.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/stringtoutf8.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/string/substring.sql b/h2/src/test/org/h2/test/scripts/functions/string/substring.sql index 04e0035a6e..624fc9643b 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/substring.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/substring.sql @@ -1,27 +1,21 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select substr(null, null) en, substr(null, null, null) e1, substr('bob', 2) e_ob, substr('bob', 2, 1) eo from test; +select substr(null, null) en, substr(null, null, null) e1, substr('bob', 2) e_ob, substr('bob', 2, 1) eo; > EN E1 E_OB EO > ---- ---- ---- -- > null null ob o > rows: 1 -select substring(null, null) en, substring(null, null, null) e1, substring('bob', 2) e_ob, substring('bob', 2, 1) eo from test; +select substring(null, null) en, substring(null, null, null) e1, substring('bob', 2) e_ob, substring('bob', 2, 1) eo; > EN E1 E_OB EO > ---- ---- ---- -- > null null ob o > rows: 1 -select substring(null from null) en, substring(null from null for null) e1, substring('bob' from 2) e_ob, substring('bob' from 2 for 1) eo from test; +select substring(null from null) en, substring(null from null for null) e1, substring('bob' from 2) e_ob, substring('bob' from 2 for 1) eo; > EN E1 E_OB EO > ---- ---- ---- -- > null null ob o @@ -30,5 +24,59 @@ select substring(null from null) en, substring(null from null for null) e1, subs select substr('[Hello]', 2, 5); >> Hello +-- Compatibility syntax select substr('Hello World', -5); >> World + +-- Compatibility +SELECT SUBSTRING('X', 0, 1); +>> X + +CREATE TABLE TEST(STR VARCHAR, START INT, LEN INT); +> ok + +EXPLAIN SELECT SUBSTRING(STR FROM START), SUBSTRING(STR FROM START FOR LEN) FROM TEST; +>> SELECT SUBSTRING("STR" FROM "START"), SUBSTRING("STR" FROM "START" FOR "LEN") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +SELECT SUBSTRING('AAA' FROM 4 FOR 1); +> '' +> -- +> +> rows: 1 + +SELECT SUBSTRING(X'001122' FROM 1 FOR 3); +>> X'001122' + +SELECT SUBSTRING(X'001122' FROM 1 FOR 2); +>> X'0011' + +SELECT SUBSTRING(X'001122' FROM 2 FOR 2); +>> X'1122' + +SELECT SUBSTRING(X'001122' FROM 4 FOR 1); +>> X'' + +SELECT SUBSTRING(X'001122' FROM 2 FOR 1); +>> X'11' + +CREATE MEMORY TABLE TEST AS (VALUES SUBSTRING(X'0011' FROM 2)); +> ok + +-- Compatibility +SELECT SUBSTRING(X'00', 0, 1); +>> X'00' + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> -------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "C1" BINARY VARYING(1) ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES (X'11'); +> rows (ordered): 4 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/string/to-char.sql b/h2/src/test/org/h2/test/scripts/functions/string/to-char.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/to-char.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/to-char.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/string/translate.sql b/h2/src/test/org/h2/test/scripts/functions/string/translate.sql index 008a885806..4e9207a0fd 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/translate.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/translate.sql @@ -1,4 +1,37 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- + +CREATE TABLE testTranslate(id BIGINT, txt1 VARCHAR); +> ok + +INSERT INTO testTranslate(id, txt1) values(1, 'test1'), (2, NULL), (3, ''), (4, 'caps'); +> update count: 4 + +SELECT TRANSLATE(txt1, 'p', 'r') FROM testTranslate ORDER BY id; +> TRANSLATE(TXT1, 'p', 'r') +> ------------------------- +> test1 +> null +> +> cars +> rows (ordered): 4 + +SET MODE DB2; +> ok + +SELECT TRANSLATE(txt1, 'p', 'r') FROM testTranslate WHERE txt1 = 'caps'; +>> caps + +SELECT TRANSLATE(txt1, 'r', 'p') FROM testTranslate WHERE txt1 = 'caps'; +>> cars + +SET MODE Regular; +> ok + +SELECT TRANSLATE(NULL, NULL, NULL); +>> null + +DROP TABLE testTranslate; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/string/trim.sql b/h2/src/test/org/h2/test/scripts/functions/string/trim.sql index d0a4201a6e..c4d1f535c0 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/trim.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/trim.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -7,15 +7,15 @@ CREATE TABLE TEST(ID INT PRIMARY KEY, A VARCHAR, B VARCHAR, C VARCHAR) AS VALUES > ok SELECT TRIM(BOTH '_' FROM A), '|' || TRIM(LEADING FROM B) || '|', TRIM(TRAILING 'x' FROM C) FROM TEST; -> TRIM('_' FROM A) ('|' || TRIM(LEADING B)) || '|' TRIM(TRAILING 'x' FROM C) -> ---------------- ------------------------------- ------------------------- -> A |B | xA +> TRIM('_' FROM A) '|' || TRIM(LEADING FROM B) || '|' TRIM(TRAILING 'x' FROM C) +> ---------------- ---------------------------------- ------------------------- +> A |B | xA > rows: 1 SELECT LENGTH(TRIM(B)), LENGTH(TRIM(FROM B)) FROM TEST; -> LENGTH(TRIM(B)) LENGTH(TRIM(B)) -> --------------- --------------- -> 1 1 +> CHAR_LENGTH(TRIM(B)) CHAR_LENGTH(TRIM(B)) +> -------------------- -------------------- +> 1 1 > rows: 1 SELECT TRIM(BOTH B) FROM TEST; diff --git a/h2/src/test/org/h2/test/scripts/functions/string/upper.sql b/h2/src/test/org/h2/test/scripts/functions/string/upper.sql index 8d09250cfb..cbdaa1f69c 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/upper.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/upper.sql @@ -1,21 +1,15 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select ucase(null) en, ucase('Hello') hello, ucase('ABC') abc from test; +select ucase(null) en, ucase('Hello') hello, ucase('ABC') abc; > EN HELLO ABC > ---- ----- --- > null HELLO ABC > rows: 1 -select upper(null) en, upper('Hello') hello, upper('ABC') abc from test; +select upper(null) en, upper('Hello') hello, upper('ABC') abc; > EN HELLO ABC > ---- ----- --- > null HELLO ABC diff --git a/h2/src/test/org/h2/test/scripts/functions/string/utf8tostring.sql b/h2/src/test/org/h2/test/scripts/functions/string/utf8tostring.sql index 0bbea84e22..16a45622d8 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/utf8tostring.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/utf8tostring.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/string/xmlattr.sql b/h2/src/test/org/h2/test/scripts/functions/string/xmlattr.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/xmlattr.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/xmlattr.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/string/xmlcdata.sql b/h2/src/test/org/h2/test/scripts/functions/string/xmlcdata.sql index 819bc0fa65..278816047c 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/xmlcdata.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/xmlcdata.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/string/xmlcomment.sql b/h2/src/test/org/h2/test/scripts/functions/string/xmlcomment.sql index d657e0810d..9e7721a861 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/xmlcomment.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/xmlcomment.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/string/xmlnode.sql b/h2/src/test/org/h2/test/scripts/functions/string/xmlnode.sql index 532b7cddf6..280b762d15 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/xmlnode.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/xmlnode.sql @@ -1,10 +1,10 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -CALL XMLNODE('a', XMLATTR('href', 'http://h2database.com')); ->> +CALL XMLNODE('a', XMLATTR('href', 'https://h2database.com')); +>> CALL XMLNODE('br'); >>
          diff --git a/h2/src/test/org/h2/test/scripts/functions/string/xmlstartdoc.sql b/h2/src/test/org/h2/test/scripts/functions/string/xmlstartdoc.sql index 4a8accb32f..4f7d8df35f 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/xmlstartdoc.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/xmlstartdoc.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/string/xmltext.sql b/h2/src/test/org/h2/test/scripts/functions/string/xmltext.sql index 9ef6409840..9e2b422849 100644 --- a/h2/src/test/org/h2/test/scripts/functions/string/xmltext.sql +++ b/h2/src/test/org/h2/test/scripts/functions/string/xmltext.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/array-cat.sql b/h2/src/test/org/h2/test/scripts/functions/system/array-cat.sql index 6aa4d29874..b979da1343 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/array-cat.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/array-cat.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/array-contains.sql b/h2/src/test/org/h2/test/scripts/functions/system/array-contains.sql index 2bfd494807..897c24290b 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/array-contains.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/array-contains.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -30,7 +30,7 @@ select array_contains(ARRAY[ARRAY[1, 2], ARRAY[3, 4]], ARRAY[1, 2]); select array_contains(ARRAY[ARRAY[1, 2], ARRAY[3, 4]], ARRAY[5, 6]); >> FALSE -CREATE TABLE TEST (ID INT PRIMARY KEY AUTO_INCREMENT, A ARRAY); +CREATE TABLE TEST (ID INT PRIMARY KEY AUTO_INCREMENT, A INT ARRAY); > ok INSERT INTO TEST (A) VALUES (ARRAY[1L, 2L]), (ARRAY[3L, 4L]); diff --git a/h2/src/test/org/h2/test/scripts/functions/system/array-get.sql b/h2/src/test/org/h2/test/scripts/functions/system/array-get.sql index 008a885806..fe9e4b4e8a 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/array-get.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/array-get.sql @@ -1,4 +1,17 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- + +CREATE TABLE TEST(A INTEGER ARRAY) AS VALUES ARRAY[NULL], ARRAY[1]; +> ok + +SELECT A, ARRAY_GET(A, 1), ARRAY_GET(A, 1) IS OF (INTEGER) FROM TEST; +> A A[1] A[1] IS OF (INTEGER) +> ------ ---- -------------------- +> [1] 1 TRUE +> [null] null null +> rows: 2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/system/array-length.sql b/h2/src/test/org/h2/test/scripts/functions/system/array-length.sql deleted file mode 100644 index 008a885806..0000000000 --- a/h2/src/test/org/h2/test/scripts/functions/system/array-length.sql +++ /dev/null @@ -1,4 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/array-slice.sql b/h2/src/test/org/h2/test/scripts/functions/system/array-slice.sql index e9c9a54754..09e0d76d02 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/array-slice.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/array-slice.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/autocommit.sql b/h2/src/test/org/h2/test/scripts/functions/system/autocommit.sql index 3ea906743d..8065d08a50 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/autocommit.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/autocommit.sql @@ -1,13 +1,7 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select autocommit() from test; +select autocommit(); >> TRUE diff --git a/h2/src/test/org/h2/test/scripts/functions/system/cancel-session.sql b/h2/src/test/org/h2/test/scripts/functions/system/cancel-session.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/cancel-session.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/cancel-session.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/cardinality.sql b/h2/src/test/org/h2/test/scripts/functions/system/cardinality.sql new file mode 100644 index 0000000000..1d73e7fa08 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/cardinality.sql @@ -0,0 +1,41 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT CARDINALITY(NULL); +>> null + +SELECT CARDINALITY(ARRAY[]); +>> 0 + +SELECT CARDINALITY(ARRAY[1, 2, 5]); +>> 3 + +SELECT ARRAY_LENGTH(ARRAY[1, 2, 5]); +>> 3 + +CREATE TABLE TEST(ID INT, A INT ARRAY, B INT ARRAY[2]) AS VALUES (1, NULL, NULL), (2, ARRAY[1], ARRAY[1]); +> ok + +SELECT ID, ARRAY_MAX_CARDINALITY(A), ARRAY_MAX_CARDINALITY(B) FROM TEST; +> ID ARRAY_MAX_CARDINALITY(A) ARRAY_MAX_CARDINALITY(B) +> -- ------------------------ ------------------------ +> 1 65536 2 +> 2 65536 2 +> rows: 2 + +SELECT ARRAY_MAX_CARDINALITY(ARRAY_AGG(ID)) FROM TEST; +>> 65536 + +DROP TABLE TEST; +> ok + +SELECT ARRAY_MAX_CARDINALITY(ARRAY['a', 'b']); +>> 2 + +SELECT ARRAY_MAX_CARDINALITY(NULL); +> exception INVALID_VALUE_2 + +SELECT ARRAY_MAX_CARDINALITY(CAST(NULL AS INT ARRAY)); +>> 65536 diff --git a/h2/src/test/org/h2/test/scripts/functions/system/casewhen.sql b/h2/src/test/org/h2/test/scripts/functions/system/casewhen.sql index 11653f0324..f56f2b1ccb 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/casewhen.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/casewhen.sql @@ -1,46 +1,10 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select casewhen(null, '1', '2') xn, casewhen(1>0, 'n', 'y') xy, casewhen(0<1, 'a', 'b') xa from test; +select casewhen(null, '1', '2') xn, casewhen(1>0, 'n', 'y') xy, casewhen(0<1, 'a', 'b') xa; > XN XY XA > -- -- -- > 2 n a > rows: 1 - -select x, case when x=0 then 'zero' else 'not zero' end y from system_range(0, 2); -> X Y -> - -------- -> 0 zero -> 1 not zero -> 2 not zero -> rows: 3 - -select x, case when x=0 then 'zero' end y from system_range(0, 1); -> X Y -> - ---- -> 0 zero -> 1 null -> rows: 2 - -select x, case x when 0 then 'zero' else 'not zero' end y from system_range(0, 1); -> X Y -> - -------- -> 0 zero -> 1 not zero -> rows: 2 - -select x, case x when 0 then 'zero' when 1 then 'one' end y from system_range(0, 2); -> X Y -> - ---- -> 0 zero -> 1 one -> 2 null -> rows: 3 diff --git a/h2/src/test/org/h2/test/scripts/functions/system/cast.sql b/h2/src/test/org/h2/test/scripts/functions/system/cast.sql index 2b7ecb2873..4a343d320e 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/cast.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/cast.sql @@ -1,67 +1,61 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select cast(null as varchar(255)) xn, cast(' 10' as int) x10, cast(' 20 ' as int) x20 from test; +select cast(null as varchar(255)) xn, cast(' 10' as int) x10, cast(' 20 ' as int) x20; > XN X10 X20 > ---- --- --- > null 10 20 > rows: 1 -select cast(128 as binary); ->> 00000080 +select cast(128 as varbinary); +>> X'00000080' -select cast(65535 as binary); ->> 0000ffff +select cast(65535 as varbinary); +>> X'0000ffff' -select cast(cast('ff' as binary) as tinyint) x; +select cast(X'ff' as tinyint); >> -1 -select cast(cast('7f' as binary) as tinyint) x; +select cast(X'7f' as tinyint); >> 127 -select cast(cast('ff' as binary) as smallint) x; +select cast(X'00ff' as smallint); >> 255 -select cast(cast('ff' as binary) as int) x; +select cast(X'000000ff' as int); >> 255 -select cast(cast('ffff' as binary) as long) x; +select cast(X'000000000000ffff' as long); >> 65535 -select cast(cast(65535 as long) as binary); ->> 000000000000ffff +select cast(cast(65535 as long) as varbinary); +>> X'000000000000ffff' -select cast(cast(-1 as tinyint) as binary); ->> ff +select cast(cast(-1 as tinyint) as varbinary); +>> X'ff' -select cast(cast(-1 as smallint) as binary); ->> ffff +select cast(cast(-1 as smallint) as varbinary); +>> X'ffff' -select cast(cast(-1 as int) as binary); ->> ffffffff +select cast(cast(-1 as int) as varbinary); +>> X'ffffffff' -select cast(cast(-1 as long) as binary); ->> ffffffffffffffff +select cast(cast(-1 as long) as varbinary); +>> X'ffffffffffffffff' -select cast(cast(1 as tinyint) as binary); ->> 01 +select cast(cast(1 as tinyint) as varbinary); +>> X'01' -select cast(cast(1 as smallint) as binary); ->> 0001 +select cast(cast(1 as smallint) as varbinary); +>> X'0001' -select cast(cast(1 as int) as binary); ->> 00000001 +select cast(cast(1 as int) as varbinary); +>> X'00000001' -select cast(cast(1 as long) as binary); ->> 0000000000000001 +select cast(cast(1 as long) as varbinary); +>> X'0000000000000001' select cast(X'ff' as tinyint); >> -1 @@ -78,14 +72,14 @@ select cast(X'ffffffffffffffff' as long); select cast(' 011 ' as int); >> 11 -select cast(cast(0.1 as real) as decimal); +select cast(cast(0.1 as real) as decimal(1, 1)); >> 0.1 -select cast(cast(95605327.73 as float) as decimal); ->> 95605327.73 +select cast(cast(95605327.73 as float) as decimal(10, 8)); +> exception VALUE_TOO_LONG_2 -select cast(cast('01020304-0506-0708-090a-0b0c0d0e0f00' as uuid) as binary); ->> 0102030405060708090a0b0c0d0e0f00 +select cast(cast('01020304-0506-0708-090a-0b0c0d0e0f00' as uuid) as varbinary); +>> X'0102030405060708090a0b0c0d0e0f00' call cast('null' as uuid); > exception DATA_CONVERSION_ERROR_1 @@ -125,3 +119,85 @@ SELECT * FROM (SELECT CAST('2000-01-01 11:11:11.123456789Z' AS TIMESTAMP(0) WITH SELECT * FROM (SELECT CAST('2000-01-01 11:11:11.123456789Z' AS TIMESTAMP(9) WITH TIME ZONE)); >> 2000-01-01 11:11:11.123456789+00 + +EXPLAIN SELECT CAST('A' AS VARCHAR(10)), CAST(NULL AS BOOLEAN), CAST(NULL AS VARCHAR), CAST(1 AS INT); +>> SELECT CAST('A' AS CHARACTER VARYING(10)), UNKNOWN, CAST(NULL AS CHARACTER VARYING), 1 + +SELECT CURRENT_TIMESTAMP(9) = CAST(CURRENT_TIME(9) AS TIMESTAMP(9) WITH TIME ZONE); +>> TRUE + +SELECT LOCALTIMESTAMP(9) = CAST(LOCALTIME(9) AS TIMESTAMP(9)); +>> TRUE + +CREATE TABLE TEST(I INTERVAL DAY TO SECOND(9), T TIME(9) WITH TIME ZONE); +> ok + +EXPLAIN SELECT CAST(I AS INTERVAL HOUR(4) TO SECOND), CAST(I AS INTERVAL HOUR(4) TO SECOND(6)), + CAST(I AS INTERVAL HOUR TO SECOND(9)), CAST(I AS INTERVAL HOUR(2) TO SECOND(9)) FROM TEST; +>> SELECT CAST("I" AS INTERVAL HOUR(4) TO SECOND), CAST("I" AS INTERVAL HOUR(4) TO SECOND(6)), CAST("I" AS INTERVAL HOUR TO SECOND(9)), CAST("I" AS INTERVAL HOUR(2) TO SECOND(9)) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT CAST(T AS TIME WITH TIME ZONE), CAST(T AS TIME(0) WITH TIME ZONE), CAST(T AS TIME(3) WITH TIME ZONE) FROM TEST; +>> SELECT CAST("T" AS TIME WITH TIME ZONE), CAST("T" AS TIME(0) WITH TIME ZONE), CAST("T" AS TIME(3) WITH TIME ZONE) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT + CAST(TIME '10:00:00' AS TIME(9)), + CAST(TIME '10:00:00' AS TIME(9) WITH TIME ZONE), + CAST(TIME '10:00:00' AS TIMESTAMP(9)), + CAST(TIME '10:00:00' AS TIMESTAMP(9) WITH TIME ZONE); +>> SELECT TIME '10:00:00', CAST(TIME '10:00:00' AS TIME(9) WITH TIME ZONE), CAST(TIME '10:00:00' AS TIMESTAMP(9)), CAST(TIME '10:00:00' AS TIMESTAMP(9) WITH TIME ZONE) + +EXPLAIN SELECT + CAST(TIME WITH TIME ZONE '10:00:00+10' AS TIME(9)), + CAST(TIME WITH TIME ZONE '10:00:00+10' AS TIME(9) WITH TIME ZONE), + CAST(TIME WITH TIME ZONE '10:00:00+10' AS TIMESTAMP(9)), + CAST(TIME WITH TIME ZONE '10:00:00+10' AS TIMESTAMP(9) WITH TIME ZONE); +>> SELECT CAST(TIME WITH TIME ZONE '10:00:00+10' AS TIME(9)), TIME WITH TIME ZONE '10:00:00+10', CAST(TIME WITH TIME ZONE '10:00:00+10' AS TIMESTAMP(9)), CAST(TIME WITH TIME ZONE '10:00:00+10' AS TIMESTAMP(9) WITH TIME ZONE) + +EXPLAIN SELECT + CAST(DATE '2000-01-01' AS DATE), + CAST(DATE '2000-01-01' AS TIMESTAMP(9)), + CAST(DATE '2000-01-01' AS TIMESTAMP(9) WITH TIME ZONE); +>> SELECT DATE '2000-01-01', TIMESTAMP '2000-01-01 00:00:00', CAST(DATE '2000-01-01' AS TIMESTAMP(9) WITH TIME ZONE) + +EXPLAIN SELECT + CAST(TIMESTAMP '2000-01-01 10:00:00' AS TIME(9)), + CAST(TIMESTAMP '2000-01-01 10:00:00' AS TIME(9) WITH TIME ZONE), + CAST(TIMESTAMP '2000-01-01 10:00:00' AS DATE), + CAST(TIMESTAMP '2000-01-01 10:00:00' AS TIMESTAMP(9)), + CAST(TIMESTAMP '2000-01-01 10:00:00' AS TIMESTAMP(9) WITH TIME ZONE); +>> SELECT TIME '10:00:00', CAST(TIMESTAMP '2000-01-01 10:00:00' AS TIME(9) WITH TIME ZONE), DATE '2000-01-01', TIMESTAMP '2000-01-01 10:00:00', CAST(TIMESTAMP '2000-01-01 10:00:00' AS TIMESTAMP(9) WITH TIME ZONE) + +EXPLAIN SELECT + CAST(TIMESTAMP WITH TIME ZONE '2000-01-01 10:00:00+10' AS TIME(9)), + CAST(TIMESTAMP WITH TIME ZONE '2000-01-01 10:00:00+10' AS TIME(9) WITH TIME ZONE), + CAST(TIMESTAMP WITH TIME ZONE '2000-01-01 10:00:00+10' AS DATE), + CAST(TIMESTAMP WITH TIME ZONE '2000-01-01 10:00:00+10' AS TIMESTAMP(9)), + CAST(TIMESTAMP WITH TIME ZONE '2000-01-01 10:00:00+10' AS TIMESTAMP(9) WITH TIME ZONE); +>> SELECT CAST(TIMESTAMP WITH TIME ZONE '2000-01-01 10:00:00+10' AS TIME(9)), TIME WITH TIME ZONE '10:00:00+10', CAST(TIMESTAMP WITH TIME ZONE '2000-01-01 10:00:00+10' AS DATE), CAST(TIMESTAMP WITH TIME ZONE '2000-01-01 10:00:00+10' AS TIMESTAMP(9)), TIMESTAMP WITH TIME ZONE '2000-01-01 10:00:00+10' + +CREATE DOMAIN D INT CHECK (VALUE > 10); +> ok + +VALUES CAST(11 AS D); +>> 11 + +VALUES CAST(10 AS D); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +EXPLAIN SELECT CAST(X AS D) FROM SYSTEM_RANGE(20, 30); +>> SELECT CAST("X" AS "PUBLIC"."D") FROM SYSTEM_RANGE(20, 30) /* range index */ + +DROP DOMAIN D; +> ok + +EXPLAIN VALUES CAST('a' AS VARCHAR_IGNORECASE(10)); +>> VALUES (CAST('a' AS VARCHAR_IGNORECASE(10))) + +SELECT CAST('true ' AS BOOLEAN) V, CAST(CAST('true' AS CHAR(10)) AS BOOLEAN) F; +> V F +> ---- ---- +> TRUE TRUE +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/system/coalesce.sql b/h2/src/test/org/h2/test/scripts/functions/system/coalesce.sql index f4dededf59..c5fabf149b 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/coalesce.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/coalesce.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select coalesce(null, null) xn, coalesce(null, 'a') xa, coalesce('1', '2') x1 from test; +select coalesce(null, null) xn, coalesce(null, 'a') xa, coalesce('1', '2') x1; > XN XA X1 > ---- -- -- > null a 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/system/convert.sql b/h2/src/test/org/h2/test/scripts/functions/system/convert.sql index 1160074167..da1a5fa5c3 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/convert.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/convert.sql @@ -1,15 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select convert(null, varchar(255)) xn, convert(' 10', int) x10, convert(' 20 ', int) x20 from test; +select convert(null, varchar(255)) xn, convert(' 10', int) x10, convert(' 20 ', int) x20; > XN X10 X20 > ---- --- --- > null 10 20 diff --git a/h2/src/test/org/h2/test/scripts/functions/system/csvread.sql b/h2/src/test/org/h2/test/scripts/functions/system/csvread.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/csvread.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/csvread.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/csvwrite.sql b/h2/src/test/org/h2/test/scripts/functions/system/csvwrite.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/csvwrite.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/csvwrite.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/current_catalog.sql b/h2/src/test/org/h2/test/scripts/functions/system/current_catalog.sql new file mode 100644 index 0000000000..fbbce1f79b --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/current_catalog.sql @@ -0,0 +1,37 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CALL CURRENT_CATALOG; +>> SCRIPT + +CALL DATABASE(); +>> SCRIPT + +SET CATALOG SCRIPT; +> ok + +SET CATALOG 'SCRIPT'; +> ok + +SET CATALOG 'SCR' || 'IPT'; +> ok + +SET CATALOG UNKNOWN_CATALOG; +> exception DATABASE_NOT_FOUND_1 + +SET CATALOG NULL; +> exception DATABASE_NOT_FOUND_1 + +CALL CURRENT_DATABASE(); +> exception FUNCTION_NOT_FOUND_1 + +SET MODE PostgreSQL; +> ok + +CALL CURRENT_DATABASE(); +>> SCRIPT + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/system/current_schema.sql b/h2/src/test/org/h2/test/scripts/functions/system/current_schema.sql new file mode 100644 index 0000000000..d2f21bf1b2 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/current_schema.sql @@ -0,0 +1,40 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT CURRENT_SCHEMA, SCHEMA(); +> CURRENT_SCHEMA CURRENT_SCHEMA +> -------------- -------------- +> PUBLIC PUBLIC +> rows: 1 + +CREATE SCHEMA S1; +> ok + +SET SCHEMA S1; +> ok + +CALL CURRENT_SCHEMA; +>> S1 + +SET SCHEMA 'PUBLIC'; +> ok + +CALL CURRENT_SCHEMA; +>> PUBLIC + +SET SCHEMA 'S' || 1; +> ok + +CALL CURRENT_SCHEMA; +>> S1 + +SET SCHEMA PUBLIC; +> ok + +SET SCHEMA NULL; +> exception SCHEMA_NOT_FOUND_1 + +DROP SCHEMA S1; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/system/current_user.sql b/h2/src/test/org/h2/test/scripts/functions/system/current_user.sql new file mode 100644 index 0000000000..2881250ae8 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/current_user.sql @@ -0,0 +1,25 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select user() x_sa, current_user() x_sa2; +> X_SA X_SA2 +> ---- ----- +> SA SA +> rows: 1 + +SELECT CURRENT_USER; +>> SA + +SELECT SESSION_USER; +>> SA + +SELECT SYSTEM_USER; +>> SA + +SELECT CURRENT_ROLE; +>> PUBLIC + +EXPLAIN SELECT CURRENT_USER, SESSION_USER, SYSTEM_USER, USER, CURRENT_ROLE; +>> SELECT CURRENT_USER, SESSION_USER, SYSTEM_USER, CURRENT_USER, CURRENT_ROLE diff --git a/h2/src/test/org/h2/test/scripts/functions/system/currval.sql b/h2/src/test/org/h2/test/scripts/functions/system/currval.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/currval.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/currval.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/data_type_sql.sql b/h2/src/test/org/h2/test/scripts/functions/system/data_type_sql.sql new file mode 100644 index 0000000000..0f24fa4586 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/data_type_sql.sql @@ -0,0 +1,121 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- +CREATE CONSTANT C VALUE 12; +> ok + +CREATE DOMAIN D AS CHAR(3); +> ok + +CREATE TABLE T (C VARCHAR(10)); +> ok + +CREATE ALIAS R FOR "java.lang.Math.max(long,long)"; +> ok + +SELECT ID, DATA_TYPE_SQL('PUBLIC', 'C', 'CONSTANT', ID) FROM (VALUES NULL, 'TYPE', 'X') T(ID); +> ID DATA_TYPE_SQL('PUBLIC', 'C', 'CONSTANT', ID) +> ---- -------------------------------------------- +> TYPE INTEGER +> X null +> null null +> rows: 3 + +SELECT ID, DATA_TYPE_SQL('PUBLIC', 'D', 'DOMAIN', ID) FROM (VALUES NULL, 'TYPE', 'X') T(ID); +> ID DATA_TYPE_SQL('PUBLIC', 'D', 'DOMAIN', ID) +> ---- ------------------------------------------ +> TYPE CHARACTER(3) +> X null +> null null +> rows: 3 + +SELECT ID, DATA_TYPE_SQL('PUBLIC', 'T', 'TABLE', ID) FROM (VALUES NULL, '0', '1', '2', 'X') T(ID); +> ID DATA_TYPE_SQL('PUBLIC', 'T', 'TABLE', ID) +> ---- ----------------------------------------- +> 0 null +> 1 CHARACTER VARYING(10) +> 2 null +> X null +> null null +> rows: 5 + +SELECT ID, DATA_TYPE_SQL('PUBLIC', 'R_1', 'ROUTINE', ID) FROM (VALUES NULL, 'RESULT', '0', '1', '2', '3', 'X') T(ID); +> ID DATA_TYPE_SQL('PUBLIC', 'R_1', 'ROUTINE', ID) +> ------ --------------------------------------------- +> 0 null +> 1 BIGINT +> 2 BIGINT +> 3 null +> RESULT BIGINT +> X null +> null null +> rows: 7 + +SELECT DATA_TYPE_SQL(S, O, T, I) FROM (VALUES + (NULL, 'C', 'CONSTANT', 'TYPE'), + ('X', 'C', 'CONSTANT', 'TYPE'), + ('PUBLIC', NULL, 'CONSTANT', 'TYPE'), + ('PUBLIC', 'X', 'CONSTANT', 'TYPE'), + ('PUBLIC', 'C', NULL, 'TYPE'), + (NULL, 'D', 'DOMAIN', 'TYPE'), + ('X', 'D', 'DOMAIN', 'TYPE'), + ('PUBLIC', NULL, 'DOMAIN', 'TYPE'), + ('PUBLIC', 'X', 'DOMAIN', 'TYPE'), + ('PUBLIC', 'D', NULL, 'TYPE'), + (NULL, 'T', 'TABLE', '1'), + ('X', 'T', 'TABLE', '1'), + ('PUBLIC', NULL, 'TABLE', '1'), + ('PUBLIC', 'X', 'TABLE', '1'), + ('PUBLIC', 'T', NULL, '1'), + (NULL, 'R_1', 'ROUTINE', '1'), + ('X', 'R_1', 'ROUTINE', '1'), + ('PUBLIC', NULL, 'ROUTINE', '1'), + ('PUBLIC', 'R_0', 'ROUTINE', '1'), + ('PUBLIC', 'R_2', 'ROUTINE', '1'), + ('PUBLIC', 'R_Z', 'ROUTINE', '1'), + ('PUBLIC', 'X', 'ROUTINE', '1'), + ('PUBLIC', 'X_1', 'ROUTINE', '1'), + ('PUBLIC', 'R_1', NULL, '1'), + ('PUBLIC', 'T', 'X', '1') + ) T(S, O, T, I); +> DATA_TYPE_SQL(S, O, T, I) +> ------------------------- +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> rows: 25 + +DROP CONSTANT C; +> ok + +DROP DOMAIN D; +> ok + +DROP TABLE T; +> ok + +DROP ALIAS R; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/system/database-path.sql b/h2/src/test/org/h2/test/scripts/functions/system/database-path.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/database-path.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/database-path.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/database.sql b/h2/src/test/org/h2/test/scripts/functions/system/database.sql deleted file mode 100644 index aa7cc6d734..0000000000 --- a/h2/src/test/org/h2/test/scripts/functions/system/database.sql +++ /dev/null @@ -1,13 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- - -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select right(database(), 6) from test; ->> SCRIPT diff --git a/h2/src/test/org/h2/test/scripts/functions/system/db_object.sql b/h2/src/test/org/h2/test/scripts/functions/system/db_object.sql new file mode 100644 index 0000000000..d44d0fa5ee --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/db_object.sql @@ -0,0 +1,284 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE ROLE A; +> ok + +CREATE ROLE B; +> ok + +SELECT ID_A <> ID_B, SQL_A, SQL_B FROM (VALUES ( + DB_OBJECT_ID('ROLE', 'A'), + DB_OBJECT_ID('ROLE', 'B'), + DB_OBJECT_SQL('ROLE', 'A'), + DB_OBJECT_SQL('ROLE', 'B') +)) T(ID_A, ID_B, SQL_A, SQL_B); +> ID_A <> ID_B SQL_A SQL_B +> ------------ --------------- --------------- +> TRUE CREATE ROLE "A" CREATE ROLE "B" +> rows: 1 + +DROP ROLE A; +> ok + +DROP ROLE B; +> ok + +CALL DB_OBJECT_ID('SETTING', 'CREATE_BUILD') IS NOT NULL; +>> TRUE + +CALL DB_OBJECT_SQL('SETTING', 'CREATE_BUILD') IS NOT NULL; +>> TRUE + +CREATE SCHEMA A; +> ok + +CREATE SCHEMA B; +> ok + +SELECT ID_A <> ID_B, SQL_A, SQL_B FROM (VALUES ( + DB_OBJECT_ID('SCHEMA', 'A'), + DB_OBJECT_ID('SCHEMA', 'B'), + DB_OBJECT_SQL('SCHEMA', 'A'), + DB_OBJECT_SQL('SCHEMA', 'B') +)) T(ID_A, ID_B, SQL_A, SQL_B); +> ID_A <> ID_B SQL_A SQL_B +> ------------ -------------------------------------------------- -------------------------------------------------- +> TRUE CREATE SCHEMA IF NOT EXISTS "A" AUTHORIZATION "SA" CREATE SCHEMA IF NOT EXISTS "B" AUTHORIZATION "SA" +> rows: 1 + +DROP SCHEMA A; +> ok + +DROP SCHEMA B; +> ok + +CREATE USER A SALT X'00' HASH X'00'; +> ok + +CREATE USER B SALT X'00' HASH X'00'; +> ok + +SELECT ID_A <> ID_B, SQL_A, SQL_B FROM (VALUES ( + DB_OBJECT_ID('USER', 'A'), + DB_OBJECT_ID('USER', 'B'), + DB_OBJECT_SQL('USER', 'A'), + DB_OBJECT_SQL('USER', 'B') +)) T(ID_A, ID_B, SQL_A, SQL_B); +> ID_A <> ID_B SQL_A SQL_B +> ------------ ------------------------------------------------- ------------------------------------------------- +> TRUE CREATE USER IF NOT EXISTS "A" SALT '00' HASH '00' CREATE USER IF NOT EXISTS "B" SALT '00' HASH '00' +> rows: 1 + +DROP USER A; +> ok + +DROP USER B; +> ok + +CREATE CONSTANT A VALUE 1; +> ok + +CREATE CONSTANT B VALUE 2; +> ok + +SELECT ID_A <> ID_B, SQL_A, SQL_B FROM (VALUES ( + DB_OBJECT_ID('CONSTANT', 'PUBLIC', 'A'), + DB_OBJECT_ID('CONSTANT', 'PUBLIC', 'B'), + DB_OBJECT_SQL('CONSTANT', 'PUBLIC', 'A'), + DB_OBJECT_SQL('CONSTANT', 'PUBLIC', 'B') +)) T(ID_A, ID_B, SQL_A, SQL_B); +> ID_A <> ID_B SQL_A SQL_B +> ------------ ------------------------------------ ------------------------------------ +> TRUE CREATE CONSTANT "PUBLIC"."A" VALUE 1 CREATE CONSTANT "PUBLIC"."B" VALUE 2 +> rows: 1 + +DROP CONSTANT A; +> ok + +DROP CONSTANT B; +> ok + +CREATE DOMAIN A AS CHAR; +> ok + +CREATE DOMAIN B AS CHAR; +> ok + +SELECT ID_A <> ID_B, SQL_A, SQL_B FROM (VALUES ( + DB_OBJECT_ID('DOMAIN', 'PUBLIC', 'A'), + DB_OBJECT_ID('DOMAIN', 'PUBLIC', 'B'), + DB_OBJECT_SQL('DOMAIN', 'PUBLIC', 'A'), + DB_OBJECT_SQL('DOMAIN', 'PUBLIC', 'B') +)) T(ID_A, ID_B, SQL_A, SQL_B); +> ID_A <> ID_B SQL_A SQL_B +> ------------ --------------------------------------- --------------------------------------- +> TRUE CREATE DOMAIN "PUBLIC"."A" AS CHARACTER CREATE DOMAIN "PUBLIC"."B" AS CHARACTER +> rows: 1 + +DROP DOMAIN A; +> ok + +DROP DOMAIN B; +> ok + +CREATE ALIAS A FOR 'java.lang.Math.sqrt'; +> ok + +CREATE AGGREGATE B FOR 'org.h2.test.scripts.Aggregate1'; +> ok + +SELECT ID_A <> ID_B, SQL_A, SQL_B FROM (VALUES ( + DB_OBJECT_ID('ROUTINE', 'PUBLIC', 'A'), + DB_OBJECT_ID('ROUTINE', 'PUBLIC', 'B'), + DB_OBJECT_SQL('ROUTINE', 'PUBLIC', 'A'), + DB_OBJECT_SQL('ROUTINE', 'PUBLIC', 'B') +)) T(ID_A, ID_B, SQL_A, SQL_B); +> ID_A <> ID_B SQL_A SQL_B +> ------------ --------------------------------------------------------- ------------------------------------------------------------------------ +> TRUE CREATE FORCE ALIAS "PUBLIC"."A" FOR 'java.lang.Math.sqrt' CREATE FORCE AGGREGATE "PUBLIC"."B" FOR 'org.h2.test.scripts.Aggregate1' +> rows: 1 + +DROP ALIAS A; +> ok + +DROP AGGREGATE B; +> ok + +CREATE SEQUENCE A; +> ok + +CREATE SEQUENCE B; +> ok + +SELECT ID_A <> ID_B, SQL_A, SQL_B FROM (VALUES ( + DB_OBJECT_ID('SEQUENCE', 'PUBLIC', 'A'), + DB_OBJECT_ID('SEQUENCE', 'PUBLIC', 'B'), + DB_OBJECT_SQL('SEQUENCE', 'PUBLIC', 'A'), + DB_OBJECT_SQL('SEQUENCE', 'PUBLIC', 'B') +)) T(ID_A, ID_B, SQL_A, SQL_B); +> ID_A <> ID_B SQL_A SQL_B +> ------------ ----------------------------------------- ----------------------------------------- +> TRUE CREATE SEQUENCE "PUBLIC"."A" START WITH 1 CREATE SEQUENCE "PUBLIC"."B" START WITH 1 +> rows: 1 + +DROP SEQUENCE A; +> ok + +DROP SEQUENCE B; +> ok + +CREATE MEMORY TABLE T_A(ID INT); +> ok + +CREATE UNIQUE INDEX I_A ON T_A(ID); +> ok + +ALTER TABLE T_A ADD CONSTRAINT C_A UNIQUE(ID); +> ok + +CREATE SYNONYM S_A FOR T_A; +> ok + +CREATE TRIGGER G_A BEFORE INSERT ON T_A FOR EACH ROW CALL 'org.h2.test.scripts.Trigger1'; +> ok + +CREATE MEMORY TABLE T_B(ID INT); +> ok + +CREATE UNIQUE INDEX I_B ON T_B(ID); +> ok + +ALTER TABLE T_B ADD CONSTRAINT C_B UNIQUE(ID); +> ok + +CREATE SYNONYM S_B FOR T_B; +> ok + +CREATE TRIGGER G_B BEFORE INSERT ON T_B FOR EACH ROW CALL 'org.h2.test.scripts.Trigger1'; +> ok + +SELECT T, ID_A <> ID_B, SQL_A, SQL_B FROM (VALUES +( + 'CONSTRAINT', + DB_OBJECT_ID('CONSTRAINT', 'PUBLIC', 'C_A'), + DB_OBJECT_ID('CONSTRAINT', 'PUBLIC', 'C_B'), + DB_OBJECT_SQL('CONSTRAINT', 'PUBLIC', 'C_A'), + DB_OBJECT_SQL('CONSTRAINT', 'PUBLIC', 'C_B') +), ( + 'INDEX', + DB_OBJECT_ID('INDEX', 'PUBLIC', 'I_A'), + DB_OBJECT_ID('INDEX', 'PUBLIC', 'I_B'), + DB_OBJECT_SQL('INDEX', 'PUBLIC', 'I_A'), + DB_OBJECT_SQL('INDEX', 'PUBLIC', 'I_B') +), ( + 'SYNONYM', + DB_OBJECT_ID('SYNONYM', 'PUBLIC', 'S_A'), + DB_OBJECT_ID('SYNONYM', 'PUBLIC', 'S_B'), + DB_OBJECT_SQL('SYNONYM', 'PUBLIC', 'S_A'), + DB_OBJECT_SQL('SYNONYM', 'PUBLIC', 'S_B') +), ( + 'TABLE', + DB_OBJECT_ID('TABLE', 'PUBLIC', 'T_A'), + DB_OBJECT_ID('TABLE', 'PUBLIC', 'T_B'), + DB_OBJECT_SQL('TABLE', 'PUBLIC', 'T_A'), + DB_OBJECT_SQL('TABLE', 'PUBLIC', 'T_B') +), ( + 'TRIGGER', + DB_OBJECT_ID('TRIGGER', 'PUBLIC', 'G_A'), + DB_OBJECT_ID('TRIGGER', 'PUBLIC', 'G_B'), + DB_OBJECT_SQL('TRIGGER', 'PUBLIC', 'G_A'), + DB_OBJECT_SQL('TRIGGER', 'PUBLIC', 'G_B') +)) T(T, ID_A, ID_B, SQL_A, SQL_B); +> T ID_A <> ID_B SQL_A SQL_B +> ---------- ------------ ------------------------------------------------------------------------------------------------------------------------------- ------------------------------------------------------------------------------------------------------------------------------- +> CONSTRAINT TRUE ALTER TABLE "PUBLIC"."T_A" ADD CONSTRAINT "PUBLIC"."C_A" UNIQUE("ID") ALTER TABLE "PUBLIC"."T_B" ADD CONSTRAINT "PUBLIC"."C_B" UNIQUE("ID") +> INDEX TRUE CREATE UNIQUE INDEX "PUBLIC"."I_A" ON "PUBLIC"."T_A"("ID" NULLS FIRST) CREATE UNIQUE INDEX "PUBLIC"."I_B" ON "PUBLIC"."T_B"("ID" NULLS FIRST) +> SYNONYM TRUE CREATE SYNONYM "PUBLIC"."S_A" FOR "PUBLIC"."T_A" CREATE SYNONYM "PUBLIC"."S_B" FOR "PUBLIC"."T_B" +> TABLE TRUE CREATE MEMORY TABLE "PUBLIC"."T_A"( "ID" INTEGER ) CREATE MEMORY TABLE "PUBLIC"."T_B"( "ID" INTEGER ) +> TRIGGER TRUE CREATE FORCE TRIGGER "PUBLIC"."G_A" BEFORE INSERT ON "PUBLIC"."T_A" FOR EACH ROW QUEUE 1024 CALL 'org.h2.test.scripts.Trigger1' CREATE FORCE TRIGGER "PUBLIC"."G_B" BEFORE INSERT ON "PUBLIC"."T_B" FOR EACH ROW QUEUE 1024 CALL 'org.h2.test.scripts.Trigger1' +> rows: 5 + +DROP SYNONYM S_A; +> ok + +DROP SYNONYM S_B; +> ok + +DROP TABLE T_B, T_A; +> ok + +CALL DB_OBJECT_ID(NULL, NULL); +>> null + +CALL DB_OBJECT_ID(NULL, NULL, NULL); +>> null + +CALL DB_OBJECT_ID('UNKNOWN', NULL); +>> null + +CALL DB_OBJECT_ID('UNKNOWN', 'UNKNOWN'); +>> null + +CALL DB_OBJECT_ID('UNKNOWN', 'PUBLIC', 'UNKNOWN'); +>> null + +CALL DB_OBJECT_ID('UNKNOWN', 'UNKNOWN', 'UNKNOWN'); +>> null + +CALL DB_OBJECT_ID('TABLE', 'UNKNOWN', 'UNKNOWN'); +>> null + +CALL DB_OBJECT_ID('TABLE', 'PUBLIC', 'UNKNOWN'); +>> null + +CALL DB_OBJECT_ID('TABLE', 'PUBLIC', NULL); +>> null + +CALL DB_OBJECT_ID('TABLE', 'INFORMATION_SCHEMA', 'TABLES') IS NOT NULL; +>> TRUE + +CALL DB_OBJECT_SQL('TABLE', 'INFORMATION_SCHEMA', 'TABLES'); +>> null diff --git a/h2/src/test/org/h2/test/scripts/functions/system/decode.sql b/h2/src/test/org/h2/test/scripts/functions/system/decode.sql index bdf542fa14..7c7c3ec536 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/decode.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/decode.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/disk-space-used.sql b/h2/src/test/org/h2/test/scripts/functions/system/disk-space-used.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/disk-space-used.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/disk-space-used.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/file-read.sql b/h2/src/test/org/h2/test/scripts/functions/system/file-read.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/file-read.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/file-read.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/file-write.sql b/h2/src/test/org/h2/test/scripts/functions/system/file-write.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/file-write.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/file-write.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/greatest.sql b/h2/src/test/org/h2/test/scripts/functions/system/greatest.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/greatest.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/greatest.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/h2version.sql b/h2/src/test/org/h2/test/scripts/functions/system/h2version.sql index 008a885806..ff8a311fd1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/h2version.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/h2version.sql @@ -1,4 +1,7 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- + +EXPLAIN VALUES H2VERSION(); +>> VALUES (H2VERSION()) diff --git a/h2/src/test/org/h2/test/scripts/functions/system/identity.sql b/h2/src/test/org/h2/test/scripts/functions/system/identity.sql index 008a885806..4d692e68d5 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/identity.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/identity.sql @@ -1,4 +1,34 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- + +CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY, V INT); +> ok + +INSERT INTO TEST(V) VALUES 10; +> update count: 1 + +VALUES IDENTITY(); +> exception FUNCTION_NOT_FOUND_1 + +VALUES SCOPE_IDENTITY(); +> exception FUNCTION_NOT_FOUND_1 + +SET MODE LEGACY; +> ok + +INSERT INTO TEST(V) VALUES 20; +> update count: 1 + +VALUES IDENTITY(); +>> 2 + +VALUES SCOPE_IDENTITY(); +>> 2 + +SET MODE REGULAR; +> ok + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/system/ifnull.sql b/h2/src/test/org/h2/test/scripts/functions/system/ifnull.sql index aaa07e73d9..5aa7665740 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/ifnull.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/ifnull.sql @@ -1,22 +1,37 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select ifnull(null, '1') x1, ifnull(null, null) xn, ifnull('a', 'b') xa from test; +select ifnull(null, '1') x1, ifnull(null, null) xn, ifnull('a', 'b') xa; > X1 XN XA > -- ---- -- > 1 null a > rows: 1 -select isnull(null, '1') x1, isnull(null, null) xn, isnull('a', 'b') xa from test; +SELECT ISNULL(NULL, '1'); +> exception FUNCTION_NOT_FOUND_1 + +SET MODE MSSQLServer; +> ok + +select isnull(null, '1') x1, isnull(null, null) xn, isnull('a', 'b') xa; > X1 XN XA > -- ---- -- > 1 null a > rows: 1 + +SET MODE Regular; +> ok + +CREATE MEMORY TABLE S(D DOUBLE) AS VALUES NULL; +> ok + +CREATE MEMORY TABLE T AS SELECT IFNULL(D, D) FROM S; +> ok + +SELECT DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'T'; +>> DOUBLE PRECISION + +DROP TABLE S, T; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/system/last-insert-id.sql b/h2/src/test/org/h2/test/scripts/functions/system/last-insert-id.sql new file mode 100644 index 0000000000..b51d5cf5d9 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/last-insert-id.sql @@ -0,0 +1,43 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- +SET MODE MySQL; +> ok + +create memory table sequence (id INT NOT NULL AUTO_INCREMENT, title varchar(255)); +> ok + +INSERT INTO sequence (title) VALUES ('test'); +> update count: 1 + +INSERT INTO sequence (title) VALUES ('test1'); +> update count: 1 + +SELECT LAST_INSERT_ID() AS L; +>> 2 + +SELECT LAST_INSERT_ID(100) AS L; +>> 100 + +SELECT LAST_INSERT_ID() AS L; +>> 100 + +INSERT INTO sequence (title) VALUES ('test2'); +> update count: 1 + +SELECT MAX(id) AS M FROM sequence; +>> 3 + +SELECT LAST_INSERT_ID() AS L; +>> 3 + +SELECT LAST_INSERT_ID(NULL) AS L; +>> null + +SELECT LAST_INSERT_ID() AS L; +>> 0 + + +DROP TABLE sequence; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/system/least.sql b/h2/src/test/org/h2/test/scripts/functions/system/least.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/least.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/least.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/link-schema.sql b/h2/src/test/org/h2/test/scripts/functions/system/link-schema.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/link-schema.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/link-schema.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/lock-mode.sql b/h2/src/test/org/h2/test/scripts/functions/system/lock-mode.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/lock-mode.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/lock-mode.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/lock-timeout.sql b/h2/src/test/org/h2/test/scripts/functions/system/lock-timeout.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/lock-timeout.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/lock-timeout.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/memory-free.sql b/h2/src/test/org/h2/test/scripts/functions/system/memory-free.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/memory-free.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/memory-free.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/memory-used.sql b/h2/src/test/org/h2/test/scripts/functions/system/memory-used.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/memory-used.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/memory-used.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/nextval.sql b/h2/src/test/org/h2/test/scripts/functions/system/nextval.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/nextval.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/nextval.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/nullif.sql b/h2/src/test/org/h2/test/scripts/functions/system/nullif.sql index bb9d2419ef..6042a0bc00 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/nullif.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/nullif.sql @@ -1,16 +1,27 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select nullif(null, null) xn, nullif('a', 'a') xn, nullif('1', '2') x1 from test; +select nullif(null, null) xn, nullif('a', 'a') xn, nullif('1', '2') x1; > XN XN X1 > ---- ---- -- > null null 1 > rows: 1 + +SELECT + A = B, + NULLIF(A, B), CASE WHEN A = B THEN NULL ELSE A END + FROM (VALUES + (1, (1, NULL), (1, NULL)), + (2, (1, NULL), (2, NULL)), + (3, (2, NULL), (1, NULL)), + (4, (1, 1), (1, 2)) + ) T(N, A, B) ORDER BY N; +> A = B NULLIF(A, B) CASE WHEN A = B THEN NULL ELSE A END +> ----- ------------- ------------------------------------ +> null ROW (1, null) ROW (1, null) +> FALSE ROW (1, null) ROW (1, null) +> FALSE ROW (2, null) ROW (2, null) +> FALSE ROW (1, 1) ROW (1, 1) +> rows (ordered): 4 diff --git a/h2/src/test/org/h2/test/scripts/functions/system/nvl2.sql b/h2/src/test/org/h2/test/scripts/functions/system/nvl2.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/nvl2.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/nvl2.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/readonly.sql b/h2/src/test/org/h2/test/scripts/functions/system/readonly.sql index a96cac6a4f..14d9568289 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/readonly.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/readonly.sql @@ -1,13 +1,7 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select readonly() from test; +select readonly(); >> FALSE diff --git a/h2/src/test/org/h2/test/scripts/functions/system/rownum.sql b/h2/src/test/org/h2/test/scripts/functions/system/rownum.sql index 0300929cba..0893274095 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/rownum.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/rownum.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -13,5 +13,19 @@ select rownum() as rnum, str from test where str = 'A'; > 1 A > rows: 1 +----- Issue#3353 ----- +SELECT str FROM FINAL TABLE (UPDATE test SET str = char(rownum + 48) WHERE str = '0'); +> STR +> --- +> 1 +> rows: 1 + drop table test; > ok + +SELECT * FROM (VALUES 1, 2) AS T1(X), (VALUES 1, 2) AS T2(X) WHERE ROWNUM = 1; +> X X +> - - +> 1 1 +> rows: 1 + diff --git a/h2/src/test/org/h2/test/scripts/functions/system/schema.sql b/h2/src/test/org/h2/test/scripts/functions/system/schema.sql deleted file mode 100644 index 008a885806..0000000000 --- a/h2/src/test/org/h2/test/scripts/functions/system/schema.sql +++ /dev/null @@ -1,4 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/scope-identity.sql b/h2/src/test/org/h2/test/scripts/functions/system/scope-identity.sql deleted file mode 100644 index 008a885806..0000000000 --- a/h2/src/test/org/h2/test/scripts/functions/system/scope-identity.sql +++ /dev/null @@ -1,4 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/session-id.sql b/h2/src/test/org/h2/test/scripts/functions/system/session-id.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/session-id.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/session-id.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/set.sql b/h2/src/test/org/h2/test/scripts/functions/system/set.sql deleted file mode 100644 index 175b9b2df3..0000000000 --- a/h2/src/test/org/h2/test/scripts/functions/system/set.sql +++ /dev/null @@ -1,89 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- - -@reconnect off - --- Try a custom column naming rules setup - -SET COLUMN_NAME_RULES=MAX_IDENTIFIER_LENGTH = 30; -> ok - -SET COLUMN_NAME_RULES=REGULAR_EXPRESSION_MATCH_ALLOWED = '[A-Za-z0-9_]+'; -> ok - -SET COLUMN_NAME_RULES=REGULAR_EXPRESSION_MATCH_DISALLOWED = '[^A-Za-z0-9_]+'; -> ok - -SET COLUMN_NAME_RULES=DEFAULT_COLUMN_NAME_PATTERN = 'noName$$'; -> ok - -SET COLUMN_NAME_RULES=GENERATE_UNIQUE_COLUMN_NAMES = 1; -> ok - -SELECT 1 AS VERY_VERY_VERY_LONG_ID_VERY_VERY_VERY_LONG_ID, SUM(X)+1 AS _123456789012345, SUM(X)+1 , SUM(X)+1 -+47, 'x' , '!!!' , '!!!!' FROM SYSTEM_RANGE(1,2); -> VERY_VERY_VERY_LONG_ID_VERY_VE _123456789012345 SUMX1 SUMX147 x noName6 noName7 -> ------------------------------ ---------------- ----- ------- - ------- ------- -> 1 4 4 51 x !!! !!!! -> rows: 1 - -SET COLUMN_NAME_RULES=EMULATE='Oracle'; -> ok - -SELECT 1 AS VERY_VERY_VERY_LONG_ID, SUM(X)+1 AS _123456789012345, SUM(X)+1 , SUM(X)+1 -+47, 'x' , '!!!' , '!!!!' FROM SYSTEM_RANGE(1,2); -> VERY_VERY_VERY_LONG_ID _123456789012345 SUMX1 SUMX147 x _UNNAMED_6 _UNNAMED_7 -> ---------------------- ---------------- ----- ------- - ---------- ---------- -> 1 4 4 51 x !!! !!!! -> rows: 1 - -SET COLUMN_NAME_RULES=EMULATE='Oracle'; -> ok - -SELECT 1 AS VERY_VERY_VERY_LONG_ID, SUM(X)+1 AS _123456789012345, SUM(X)+1 , SUM(X)+1 -+47, 'x' , '!!!' , '!!!!', 'Very Long' AS _23456789012345678901234567890XXX FROM SYSTEM_RANGE(1,2); -> VERY_VERY_VERY_LONG_ID _123456789012345 SUMX1 SUMX147 x _UNNAMED_6 _UNNAMED_7 _23456789012345678901234567890XXX -> ---------------------- ---------------- ----- ------- - ---------- ---------- --------------------------------- -> 1 4 4 51 x !!! !!!! Very Long -> rows: 1 - -SET COLUMN_NAME_RULES=EMULATE='PostgreSQL'; -> ok - -SELECT 1 AS VERY_VERY_VERY_LONG_ID, SUM(X)+1 AS _123456789012345, SUM(X)+1 , SUM(X)+1 -+47, 'x' , '!!!' , '!!!!', 999 AS "QuotedColumnId" FROM SYSTEM_RANGE(1,2); -> VERY_VERY_VERY_LONG_ID _123456789012345 SUMX1 SUMX147 x _UNNAMED_6 _UNNAMED_7 QuotedColumnId -> ---------------------- ---------------- ----- ------- - ---------- ---------- -------------- -> 1 4 4 51 x !!! !!!! 999 -> rows: 1 - -SET COLUMN_NAME_RULES=DEFAULT; -> ok - --- Test all MODES of database: --- DB2, Derby, MSSQLServer, HSQLDB, MySQL, Oracle, PostgreSQL, Ignite -SET COLUMN_NAME_RULES=EMULATE='DB2'; -> ok - -SET COLUMN_NAME_RULES=EMULATE='Derby'; -> ok - -SET COLUMN_NAME_RULES=EMULATE='MSSQLServer'; -> ok - -SET COLUMN_NAME_RULES=EMULATE='MySQL'; -> ok - -SET COLUMN_NAME_RULES=EMULATE='Oracle'; -> ok - -SET COLUMN_NAME_RULES=EMULATE='PostgreSQL'; -> ok - -SET COLUMN_NAME_RULES=EMULATE='Ignite'; -> ok - -SET COLUMN_NAME_RULES=EMULATE='REGULAR'; -> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/system/table.sql b/h2/src/test/org/h2/test/scripts/functions/system/table.sql index ad0b6ae37d..4df052af6a 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/table.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/table.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -31,6 +31,13 @@ SELECT * FROM (SELECT * FROM TEST) x ORDER BY id; drop table test; > ok +select * from table(id int = (1)); +> ID +> -- +> 1 +> rows: 1 + +-- compatibility syntax call table(id int = (1)); > ID > -- @@ -38,10 +45,10 @@ call table(id int = (1)); > rows: 1 explain select * from table(id int = (1, 2), name varchar=('Hello', 'World')); ->> SELECT "TABLE"."ID", "TABLE"."NAME" FROM TABLE("ID" INT=ROW (1, 2), "NAME" VARCHAR=ROW ('Hello', 'World')) /* function */ +>> SELECT "TABLE"."ID", "TABLE"."NAME" FROM TABLE("ID" INTEGER=ROW (1, 2), "NAME" CHARACTER VARYING=ROW ('Hello', 'World')) /* function */ explain select * from table(id int = ARRAY[1, 2], name varchar=ARRAY['Hello', 'World']); ->> SELECT "TABLE"."ID", "TABLE"."NAME" FROM TABLE("ID" INT=ARRAY [1, 2], "NAME" VARCHAR=ARRAY ['Hello', 'World']) /* function */ +>> SELECT "TABLE"."ID", "TABLE"."NAME" FROM TABLE("ID" INTEGER=ARRAY [1, 2], "NAME" CHARACTER VARYING=ARRAY ['Hello', 'World']) /* function */ select * from table(id int=(1, 2), name varchar=('Hello', 'World')) x order by id; > ID NAME diff --git a/h2/src/test/org/h2/test/scripts/functions/system/transaction-id.sql b/h2/src/test/org/h2/test/scripts/functions/system/transaction-id.sql index 008a885806..e04598e7c1 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/transaction-id.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/transaction-id.sql @@ -1,4 +1,4 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/trim_array.sql b/h2/src/test/org/h2/test/scripts/functions/system/trim_array.sql new file mode 100644 index 0000000000..ba5c743a21 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/trim_array.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT TRIM_ARRAY(ARRAY[1, 2], -1); +> exception ARRAY_ELEMENT_ERROR_2 + +SELECT TRIM_ARRAY(ARRAY[1, 2], 0); +>> [1, 2] + +SELECT TRIM_ARRAY(ARRAY[1, 2], 1); +>> [1] + +SELECT TRIM_ARRAY(ARRAY[1, 2], 2); +>> [] + +SELECT TRIM_ARRAY(ARRAY[1, 2], 3); +> exception ARRAY_ELEMENT_ERROR_2 + +SELECT TRIM_ARRAY(NULL, 1); +>> null + +SELECT TRIM_ARRAY(NULL, -1); +> exception ARRAY_ELEMENT_ERROR_2 + +SELECT TRIM_ARRAY(ARRAY[1], NULL); +>> null diff --git a/h2/src/test/org/h2/test/scripts/functions/system/truncate-value.sql b/h2/src/test/org/h2/test/scripts/functions/system/truncate-value.sql index 008a885806..5bca7ee491 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/truncate-value.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/truncate-value.sql @@ -1,4 +1,19 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- + +CALL TRUNCATE_VALUE('Test 123', 4, FALSE); +>> Test + +CALL TRUNCATE_VALUE(1234567890.123456789, 4, FALSE); +>> 1235000000 + +CALL TRUNCATE_VALUE(1234567890.123456789, 4, TRUE); +>> 1235000000 + +CALL TRUNCATE_VALUE(CAST(1234567890.123456789 AS DOUBLE PRECISION), 4, FALSE); +>> 1.2345678901234567E9 + +CALL TRUNCATE_VALUE(CAST(1234567890.123456789 AS DOUBLE PRECISION), 4, TRUE); +>> 1.235E9 diff --git a/h2/src/test/org/h2/test/scripts/functions/system/unnest.sql b/h2/src/test/org/h2/test/scripts/functions/system/unnest.sql index 6b955267f4..a5a52b0197 100644 --- a/h2/src/test/org/h2/test/scripts/functions/system/unnest.sql +++ b/h2/src/test/org/h2/test/scripts/functions/system/unnest.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -19,6 +19,15 @@ SELECT * FROM UNNEST(ARRAY[1, 2, 3]); > 3 > rows: 3 +-- compatibility syntax +CALL UNNEST(ARRAY[1, 2, 3]); +> C1 +> -- +> 1 +> 2 +> 3 +> rows: 3 + SELECT * FROM UNNEST(ARRAY[1], ARRAY[2, 3, 4], ARRAY[5, 6]); > C1 C2 C3 > ---- -- ---- @@ -41,48 +50,18 @@ EXPLAIN SELECT * FROM UNNEST(ARRAY[1]); EXPLAIN SELECT * FROM UNNEST(ARRAY[1]) WITH ORDINALITY; >> SELECT "UNNEST"."C1", "UNNEST"."NORD" FROM UNNEST(ARRAY [1]) WITH ORDINALITY /* function */ -SELECT 1 IN(UNNEST(ARRAY[1, 2, 3])); +SELECT 1 IN(SELECT * FROM UNNEST(ARRAY[1, 2, 3])); >> TRUE -SELECT 4 IN(UNNEST(ARRAY[1, 2, 3])); +SELECT 4 IN(SELECT * FROM UNNEST(ARRAY[1, 2, 3])); >> FALSE -SELECT X, X IN(UNNEST(ARRAY[2, 4])) FROM SYSTEM_RANGE(1, 5); -> X X IN(2, 4) -> - ---------- +SELECT X, X IN(SELECT * FROM UNNEST(ARRAY[2, 4])) FROM SYSTEM_RANGE(1, 5); +> X X IN( SELECT DISTINCT UNNEST.C1 FROM UNNEST(ARRAY [2, 4])) +> - ---------------------------------------------------------- > 1 FALSE > 2 TRUE > 3 FALSE > 4 TRUE > 5 FALSE > rows: 5 - -SELECT X, X IN(UNNEST(?)) FROM SYSTEM_RANGE(1, 5); -{ -2 -> X X = ANY(?1) -> - ----------- -> 1 FALSE -> 2 TRUE -> 3 FALSE -> 4 FALSE -> 5 FALSE -> rows: 5 -}; -> update count: 0 - -CREATE TABLE TEST(A INT, B ARRAY); -> ok - -INSERT INTO TEST VALUES (2, ARRAY[2, 4]), (3, ARRAY[2, 5]); -> update count: 2 - -SELECT A, B, A IN(UNNEST(B)) FROM TEST; -> A B A IN(UNNEST(B)) -> - ------ --------------- -> 2 [2, 4] TRUE -> 3 [2, 5] FALSE -> rows: 2 - -DROP TABLE TEST; -> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/system/user.sql b/h2/src/test/org/h2/test/scripts/functions/system/user.sql deleted file mode 100644 index d90aec2b0f..0000000000 --- a/h2/src/test/org/h2/test/scripts/functions/system/user.sql +++ /dev/null @@ -1,19 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- - -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select user() x_sa, current_user() x_sa2 from test; -> X_SA X_SA2 -> ---- ----- -> SA SA -> rows: 1 - -select current_user() from test; ->> SA diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/current-time.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/current-time.sql index a9fda0d4b9..1d558baf58 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/current-time.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/current-time.sql @@ -1,21 +1,30 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); +SET TIME ZONE '-8:00'; > ok -insert into test values(1, 'Hello'); -> update count: 1 +SELECT CAST(CURRENT_TIME AS TIME(9)) = LOCALTIME; +>> TRUE + +SELECT CAST(CURRENT_TIME(0) AS TIME(9)) = LOCALTIME(0); +>> TRUE -select length(curtime())>=8 c1, length(current_time())>=8 c2, substring(curtime(), 3, 1) c3 from test; +SELECT CAST(CURRENT_TIME(9) AS TIME(9)) = LOCALTIME(9); +>> TRUE + +SET TIME ZONE LOCAL; +> ok + +select length(curtime())>=8 c1, length(current_time())>=8 c2, substring(curtime(), 3, 1) c3; > C1 C2 C3 > ---- ---- -- > TRUE TRUE : > rows: 1 -select length(now())>18 c1, length(current_timestamp())>18 c2, length(now(0))>18 c3, length(now(2))>18 c4 from test; +select length(now())>18 c1, length(current_timestamp())>18 c2, length(now(0))>18 c3, length(now(2))>18 c4; > C1 C2 C3 C4 > ---- ---- ---- ---- > TRUE TRUE TRUE TRUE @@ -29,3 +38,6 @@ SELECT CAST(CURRENT_TIME(0) AS TIME(9)) = LOCALTIME(0); SELECT CAST(CURRENT_TIME(9) AS TIME(9)) = LOCALTIME(9); >> TRUE + +EXPLAIN SELECT CURRENT_TIME, LOCALTIME, CURRENT_TIME(9), LOCALTIME(9); +>> SELECT CURRENT_TIME, LOCALTIME, CURRENT_TIME(9), LOCALTIME(9) diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/current_date.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/current_date.sql index c1a39a7bc8..c5fe931913 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/current_date.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/current_date.sql @@ -1,28 +1,13 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select length(curdate()) c1, length(current_date()) c2, substring(curdate(), 5, 1) c3 from test; +select length(curdate()) c1, length(current_date()) c2, substring(curdate(), 5, 1) c3; > C1 C2 C3 > -- -- -- > 10 10 - > rows: 1 -SELECT GETDATE(); -> exception FUNCTION_NOT_FOUND_1 - -SET MODE MSSQLServer; -> ok - -SELECT CURRENT_DATE = GETDATE(); +SELECT CURRENT_DATE IS OF (DATE); >> TRUE - -SET MODE Regular; -> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/current_timestamp.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/current_timestamp.sql index 5ddb3c9068..38e6ef835b 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/current_timestamp.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/current_timestamp.sql @@ -1,8 +1,11 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- +SET TIME ZONE '-8:00'; +> ok + SELECT CAST(CURRENT_TIMESTAMP AS TIMESTAMP(9)) = LOCALTIMESTAMP; >> TRUE @@ -12,6 +15,18 @@ SELECT CAST(CURRENT_TIMESTAMP(0) AS TIMESTAMP(9)) = LOCALTIMESTAMP(0); SELECT CAST(CURRENT_TIMESTAMP(9) AS TIMESTAMP(9)) = LOCALTIMESTAMP(9); >> TRUE +VALUES EXTRACT(TIMEZONE_HOUR FROM CURRENT_TIMESTAMP); +>> -8 + +SET TIME ZONE '5:00'; +> ok + +VALUES EXTRACT(TIMEZONE_HOUR FROM CURRENT_TIMESTAMP); +>> 5 + +SET TIME ZONE LOCAL; +> ok + @reconnect off SET AUTOCOMMIT OFF; @@ -108,3 +123,15 @@ SET AUTOCOMMIT ON; > ok @reconnect on + +SELECT GETDATE(); +> exception FUNCTION_NOT_FOUND_1 + +SET MODE MSSQLServer; +> ok + +SELECT LOCALTIMESTAMP(3) = GETDATE(); +>> TRUE + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/date_trunc.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/date_trunc.sql index 0c65549dda..7d72d289d4 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/date_trunc.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/date_trunc.sql @@ -1,57 +1,65 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- +@reconnect off + +SET TIME ZONE '01:00'; +> ok + -- -- Test time unit in 'MICROSECONDS' -- SELECT DATE_TRUNC('MICROSECONDS', time '00:00:00.000'); ->> 1970-01-01 00:00:00 +>> 00:00:00 SELECT DATE_TRUNC('microseconds', time '00:00:00.000'); ->> 1970-01-01 00:00:00 +>> 00:00:00 + +SELECT DATE_TRUNC(microseconds, time '00:00:00.000'); +>> 00:00:00 SELECT DATE_TRUNC('MICROSECONDS', time '15:00:00'); ->> 1970-01-01 15:00:00 +>> 15:00:00 SELECT DATE_TRUNC('microseconds', time '15:00:00'); ->> 1970-01-01 15:00:00 +>> 15:00:00 SELECT DATE_TRUNC('MICROSECONDS', time '15:14:13'); ->> 1970-01-01 15:14:13 +>> 15:14:13 SELECT DATE_TRUNC('microseconds', time '15:14:13'); ->> 1970-01-01 15:14:13 +>> 15:14:13 SELECT DATE_TRUNC('MICROSECONDS', time '15:14:13.123456789'); ->> 1970-01-01 15:14:13.123456 +>> 15:14:13.123456 SELECT DATE_TRUNC('microseconds', time '15:14:13.123456789'); ->> 1970-01-01 15:14:13.123456 +>> 15:14:13.123456 SELECT DATE_TRUNC('MICROSECONDS', date '2015-05-29'); ->> 2015-05-29 00:00:00 +>> 2015-05-29 SELECT DATE_TRUNC('microseconds', date '2015-05-29'); ->> 2015-05-29 00:00:00 +>> 2015-05-29 SELECT DATE_TRUNC('MICROSECONDS', date '1970-01-01'); ->> 1970-01-01 00:00:00 +>> 1970-01-01 SELECT DATE_TRUNC('microseconds', date '1970-01-01'); ->> 1970-01-01 00:00:00 +>> 1970-01-01 -select DATE_TRUNC('MICROSECONDS', timestamp with time zone '2015-05-29 15:14:13'); +select DATE_TRUNC('MICROSECONDS', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2015-05-29 15:14:13+00 -select DATE_TRUNC('microseconds', timestamp with time zone '2015-05-29 15:14:13'); +select DATE_TRUNC('microseconds', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2015-05-29 15:14:13+00 -select DATE_TRUNC('MICROSECONDS', timestamp with time zone '2015-05-29 15:14:13.123456789'); +select DATE_TRUNC('MICROSECONDS', timestamp with time zone '2015-05-29 15:14:13.123456789+00'); >> 2015-05-29 15:14:13.123456+00 -select DATE_TRUNC('microseconds', timestamp with time zone '2015-05-29 15:14:13.123456789'); +select DATE_TRUNC('microseconds', timestamp with time zone '2015-05-29 15:14:13.123456789+00'); >> 2015-05-29 15:14:13.123456+00 select DATE_TRUNC('MICROSECONDS', timestamp with time zone '2015-05-29 15:14:13-06'); @@ -102,79 +110,55 @@ SELECT DATE_TRUNC('microseconds', timestamp '2015-05-29 00:00:00'); SELECT DATE_TRUNC('MICROSECONDS', timestamp '2015-05-29 00:00:00'); >> 2015-05-29 00:00:00 -SELECT DATE_TRUNC('microseconds', '2015-05-29 15:14:13'); ->> 2015-05-29 15:14:13 - -SELECT DATE_TRUNC('MICROSECONDS', '2015-05-29 15:14:13'); ->> 2015-05-29 15:14:13 - -SELECT DATE_TRUNC('microseconds', '2015-05-29 15:14:13.123456789'); ->> 2015-05-29 15:14:13.123456 - -SELECT DATE_TRUNC('MICROSECONDS', '2015-05-29 15:14:13.123456789'); ->> 2015-05-29 15:14:13.123456 - -SELECT DATE_TRUNC('microseconds', '2015-05-29 15:00:00'); ->> 2015-05-29 15:00:00 - -SELECT DATE_TRUNC('MICROSECONDS', '2015-05-29 15:00:00'); ->> 2015-05-29 15:00:00 - -SELECT DATE_TRUNC('microseconds', '2015-05-29 00:00:00'); ->> 2015-05-29 00:00:00 - -SELECT DATE_TRUNC('MICROSECONDS', '2015-05-29 00:00:00'); ->> 2015-05-29 00:00:00 - -- -- Test time unit in 'MILLISECONDS' -- SELECT DATE_TRUNC('MILLISECONDS', time '00:00:00.000'); ->> 1970-01-01 00:00:00 +>> 00:00:00 SELECT DATE_TRUNC('milliseconds', time '00:00:00.000'); ->> 1970-01-01 00:00:00 +>> 00:00:00 SELECT DATE_TRUNC('MILLISECONDS', time '15:00:00'); ->> 1970-01-01 15:00:00 +>> 15:00:00 SELECT DATE_TRUNC('milliseconds', time '15:00:00'); ->> 1970-01-01 15:00:00 +>> 15:00:00 SELECT DATE_TRUNC('MILLISECONDS', time '15:14:13'); ->> 1970-01-01 15:14:13 +>> 15:14:13 SELECT DATE_TRUNC('milliseconds', time '15:14:13'); ->> 1970-01-01 15:14:13 +>> 15:14:13 SELECT DATE_TRUNC('MILLISECONDS', time '15:14:13.123456'); ->> 1970-01-01 15:14:13.123 +>> 15:14:13.123 SELECT DATE_TRUNC('milliseconds', time '15:14:13.123456'); ->> 1970-01-01 15:14:13.123 +>> 15:14:13.123 SELECT DATE_TRUNC('MILLISECONDS', date '2015-05-29'); ->> 2015-05-29 00:00:00 +>> 2015-05-29 SELECT DATE_TRUNC('milliseconds', date '2015-05-29'); ->> 2015-05-29 00:00:00 +>> 2015-05-29 SELECT DATE_TRUNC('MILLISECONDS', date '1970-01-01'); ->> 1970-01-01 00:00:00 +>> 1970-01-01 SELECT DATE_TRUNC('milliseconds', date '1970-01-01'); ->> 1970-01-01 00:00:00 +>> 1970-01-01 -select DATE_TRUNC('MILLISECONDS', timestamp with time zone '2015-05-29 15:14:13'); +select DATE_TRUNC('MILLISECONDS', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2015-05-29 15:14:13+00 -select DATE_TRUNC('milliseconds', timestamp with time zone '2015-05-29 15:14:13'); +select DATE_TRUNC('milliseconds', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2015-05-29 15:14:13+00 -select DATE_TRUNC('MILLISECONDS', timestamp with time zone '2015-05-29 15:14:13.123456'); +select DATE_TRUNC('MILLISECONDS', timestamp with time zone '2015-05-29 15:14:13.123456+00'); >> 2015-05-29 15:14:13.123+00 -select DATE_TRUNC('milliseconds', timestamp with time zone '2015-05-29 15:14:13.123456'); +select DATE_TRUNC('milliseconds', timestamp with time zone '2015-05-29 15:14:13.123456+00'); >> 2015-05-29 15:14:13.123+00 select DATE_TRUNC('MILLISECONDS', timestamp with time zone '2015-05-29 15:14:13-06'); @@ -225,79 +209,55 @@ SELECT DATE_TRUNC('milliseconds', timestamp '2015-05-29 00:00:00'); SELECT DATE_TRUNC('MILLISECONDS', timestamp '2015-05-29 00:00:00'); >> 2015-05-29 00:00:00 -SELECT DATE_TRUNC('milliseconds', '2015-05-29 15:14:13'); ->> 2015-05-29 15:14:13 - -SELECT DATE_TRUNC('MILLISECONDS', '2015-05-29 15:14:13'); ->> 2015-05-29 15:14:13 - -SELECT DATE_TRUNC('milliseconds', '2015-05-29 15:14:13.123456'); ->> 2015-05-29 15:14:13.123 - -SELECT DATE_TRUNC('MILLISECONDS', '2015-05-29 15:14:13.123456'); ->> 2015-05-29 15:14:13.123 - -SELECT DATE_TRUNC('milliseconds', '2015-05-29 15:00:00'); ->> 2015-05-29 15:00:00 - -SELECT DATE_TRUNC('MILLISECONDS', '2015-05-29 15:00:00'); ->> 2015-05-29 15:00:00 - -SELECT DATE_TRUNC('milliseconds', '2015-05-29 00:00:00'); ->> 2015-05-29 00:00:00 - -SELECT DATE_TRUNC('MILLISECONDS', '2015-05-29 00:00:00'); ->> 2015-05-29 00:00:00 - -- -- Test time unit 'SECOND' -- SELECT DATE_TRUNC('SECOND', time '00:00:00.000'); ->> 1970-01-01 00:00:00 +>> 00:00:00 SELECT DATE_TRUNC('second', time '00:00:00.000'); ->> 1970-01-01 00:00:00 +>> 00:00:00 SELECT DATE_TRUNC('SECOND', time '15:00:00'); ->> 1970-01-01 15:00:00 +>> 15:00:00 SELECT DATE_TRUNC('second', time '15:00:00'); ->> 1970-01-01 15:00:00 +>> 15:00:00 SELECT DATE_TRUNC('SECOND', time '15:14:13'); ->> 1970-01-01 15:14:13 +>> 15:14:13 SELECT DATE_TRUNC('second', time '15:14:13'); ->> 1970-01-01 15:14:13 +>> 15:14:13 SELECT DATE_TRUNC('SECOND', time '15:14:13.123456'); ->> 1970-01-01 15:14:13 +>> 15:14:13 SELECT DATE_TRUNC('second', time '15:14:13.123456'); ->> 1970-01-01 15:14:13 +>> 15:14:13 SELECT DATE_TRUNC('SECOND', date '2015-05-29'); ->> 2015-05-29 00:00:00 +>> 2015-05-29 SELECT DATE_TRUNC('second', date '2015-05-29'); ->> 2015-05-29 00:00:00 +>> 2015-05-29 SELECT DATE_TRUNC('SECOND', date '1970-01-01'); ->> 1970-01-01 00:00:00 +>> 1970-01-01 SELECT DATE_TRUNC('second', date '1970-01-01'); ->> 1970-01-01 00:00:00 +>> 1970-01-01 -select DATE_TRUNC('SECOND', timestamp with time zone '2015-05-29 15:14:13'); +select DATE_TRUNC('SECOND', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2015-05-29 15:14:13+00 -select DATE_TRUNC('second', timestamp with time zone '2015-05-29 15:14:13'); +select DATE_TRUNC('second', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2015-05-29 15:14:13+00 -select DATE_TRUNC('SECOND', timestamp with time zone '2015-05-29 15:14:13.123456'); +select DATE_TRUNC('SECOND', timestamp with time zone '2015-05-29 15:14:13.123456+00'); >> 2015-05-29 15:14:13+00 -select DATE_TRUNC('second', timestamp with time zone '2015-05-29 15:14:13.123456'); +select DATE_TRUNC('second', timestamp with time zone '2015-05-29 15:14:13.123456+00'); >> 2015-05-29 15:14:13+00 select DATE_TRUNC('SECOND', timestamp with time zone '2015-05-29 15:14:13-06'); @@ -348,67 +308,43 @@ SELECT DATE_TRUNC('second', timestamp '2015-05-29 00:00:00'); SELECT DATE_TRUNC('SECOND', timestamp '2015-05-29 00:00:00'); >> 2015-05-29 00:00:00 -SELECT DATE_TRUNC('second', '2015-05-29 15:14:13'); ->> 2015-05-29 15:14:13 - -SELECT DATE_TRUNC('SECOND', '2015-05-29 15:14:13'); ->> 2015-05-29 15:14:13 - -SELECT DATE_TRUNC('second', '2015-05-29 15:14:13.123456'); ->> 2015-05-29 15:14:13 - -SELECT DATE_TRUNC('SECOND', '2015-05-29 15:14:13.123456'); ->> 2015-05-29 15:14:13 - -SELECT DATE_TRUNC('second', '2015-05-29 15:00:00'); ->> 2015-05-29 15:00:00 - -SELECT DATE_TRUNC('SECOND', '2015-05-29 15:00:00'); ->> 2015-05-29 15:00:00 - -SELECT DATE_TRUNC('second', '2015-05-29 00:00:00'); ->> 2015-05-29 00:00:00 - -SELECT DATE_TRUNC('SECOND', '2015-05-29 00:00:00'); ->> 2015-05-29 00:00:00 - -- -- Test time unit 'MINUTE' -- SELECT DATE_TRUNC('MINUTE', time '00:00:00'); ->> 1970-01-01 00:00:00 +>> 00:00:00 SELECT DATE_TRUNC('minute', time '00:00:00'); ->> 1970-01-01 00:00:00 +>> 00:00:00 SELECT DATE_TRUNC('MINUTE', time '15:00:00'); ->> 1970-01-01 15:00:00 +>> 15:00:00 SELECT DATE_TRUNC('minute', time '15:00:00'); ->> 1970-01-01 15:00:00 +>> 15:00:00 SELECT DATE_TRUNC('MINUTE', time '15:14:13'); ->> 1970-01-01 15:14:00 +>> 15:14:00 SELECT DATE_TRUNC('minute', time '15:14:13'); ->> 1970-01-01 15:14:00 +>> 15:14:00 SELECT DATE_TRUNC('MINUTE', date '2015-05-29'); ->> 2015-05-29 00:00:00 +>> 2015-05-29 SELECT DATE_TRUNC('minute', date '2015-05-29'); ->> 2015-05-29 00:00:00 +>> 2015-05-29 SELECT DATE_TRUNC('MINUTE', date '1970-01-01'); ->> 1970-01-01 00:00:00 +>> 1970-01-01 SELECT DATE_TRUNC('minute', date '1970-01-01'); ->> 1970-01-01 00:00:00 +>> 1970-01-01 -select DATE_TRUNC('MINUTE', timestamp with time zone '2015-05-29 15:14:13'); +select DATE_TRUNC('MINUTE', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2015-05-29 15:14:00+00 -select DATE_TRUNC('minute', timestamp with time zone '2015-05-29 15:14:13'); +select DATE_TRUNC('minute', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2015-05-29 15:14:00+00 select DATE_TRUNC('MINUTE', timestamp with time zone '2015-05-29 15:14:13-06'); @@ -423,79 +359,52 @@ select DATE_TRUNC('MINUTE', timestamp with time zone '2015-05-29 15:14:13+10'); select DATE_TRUNC('minute', timestamp with time zone '2015-05-29 15:14:13+10'); >> 2015-05-29 15:14:00+10 -SELECT DATE_TRUNC('minute', timestamp '2015-05-29 15:14:13'); ->> 2015-05-29 15:14:00 - SELECT DATE_TRUNC('MINUTE', timestamp '2015-05-29 15:14:13'); >> 2015-05-29 15:14:00 -SELECT DATE_TRUNC('minute', timestamp '2015-05-29 15:00:00'); ->> 2015-05-29 15:00:00 - SELECT DATE_TRUNC('MINUTE', timestamp '2015-05-29 15:00:00'); >> 2015-05-29 15:00:00 -SELECT DATE_TRUNC('minute', timestamp '2015-05-29 00:00:00'); ->> 2015-05-29 00:00:00 - SELECT DATE_TRUNC('MINUTE', timestamp '2015-05-29 00:00:00'); >> 2015-05-29 00:00:00 -SELECT DATE_TRUNC('minute', '2015-05-29 15:14:13'); ->> 2015-05-29 15:14:00 - -SELECT DATE_TRUNC('MINUTE', '2015-05-29 15:14:13'); ->> 2015-05-29 15:14:00 - -SELECT DATE_TRUNC('minute', '2015-05-29 15:00:00'); ->> 2015-05-29 15:00:00 - -SELECT DATE_TRUNC('MINUTE', '2015-05-29 15:00:00'); ->> 2015-05-29 15:00:00 - -SELECT DATE_TRUNC('minute', '2015-05-29 00:00:00'); ->> 2015-05-29 00:00:00 - -SELECT DATE_TRUNC('MINUTE', '2015-05-29 00:00:00'); ->> 2015-05-29 00:00:00 - -- -- Test time unit 'HOUR' -- SELECT DATE_TRUNC('HOUR', time '00:00:00'); ->> 1970-01-01 00:00:00 +>> 00:00:00 SELECT DATE_TRUNC('hour', time '00:00:00'); ->> 1970-01-01 00:00:00 +>> 00:00:00 SELECT DATE_TRUNC('HOUR', time '15:00:00'); ->> 1970-01-01 15:00:00 +>> 15:00:00 SELECT DATE_TRUNC('hour', time '15:00:00'); ->> 1970-01-01 15:00:00 +>> 15:00:00 SELECT DATE_TRUNC('HOUR', time '15:14:13'); ->> 1970-01-01 15:00:00 +>> 15:00:00 SELECT DATE_TRUNC('hour', time '15:14:13'); ->> 1970-01-01 15:00:00 +>> 15:00:00 SELECT DATE_TRUNC('HOUR', date '2015-05-29'); ->> 2015-05-29 00:00:00 +>> 2015-05-29 SELECT DATE_TRUNC('hour', date '2015-05-29'); ->> 2015-05-29 00:00:00 +>> 2015-05-29 SELECT DATE_TRUNC('HOUR', date '1970-01-01'); ->> 1970-01-01 00:00:00 +>> 1970-01-01 SELECT DATE_TRUNC('hour', date '1970-01-01'); ->> 1970-01-01 00:00:00 +>> 1970-01-01 -select DATE_TRUNC('HOUR', timestamp with time zone '2015-05-29 15:14:13'); +select DATE_TRUNC('HOUR', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2015-05-29 15:00:00+00 -select DATE_TRUNC('hour', timestamp with time zone '2015-05-29 15:14:13'); +select DATE_TRUNC('hour', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2015-05-29 15:00:00+00 select DATE_TRUNC('HOUR', timestamp with time zone '2015-05-29 15:14:13-06'); @@ -528,44 +437,26 @@ SELECT DATE_TRUNC('hour', timestamp '2015-05-29 00:00:00'); SELECT DATE_TRUNC('HOUR', timestamp '2015-05-29 00:00:00'); >> 2015-05-29 00:00:00 -SELECT DATE_TRUNC('hour', '2015-05-29 15:14:13'); ->> 2015-05-29 15:00:00 - -SELECT DATE_TRUNC('HOUR', '2015-05-29 15:14:13'); ->> 2015-05-29 15:00:00 - -SELECT DATE_TRUNC('hour', '2015-05-29 15:00:00'); ->> 2015-05-29 15:00:00 - -SELECT DATE_TRUNC('HOUR', '2015-05-29 15:00:00'); ->> 2015-05-29 15:00:00 - -SELECT DATE_TRUNC('hour', '2015-05-29 00:00:00'); ->> 2015-05-29 00:00:00 - -SELECT DATE_TRUNC('HOUR', '2015-05-29 00:00:00'); ->> 2015-05-29 00:00:00 - -- -- Test time unit 'DAY' -- select DATE_TRUNC('day', time '00:00:00'); ->> 1970-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('DAY', time '00:00:00'); ->> 1970-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('day', time '15:14:13'); ->> 1970-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('DAY', time '15:14:13'); ->> 1970-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('day', date '2015-05-29'); ->> 2015-05-29 00:00:00 +>> 2015-05-29 select DATE_TRUNC('DAY', date '2015-05-29'); ->> 2015-05-29 00:00:00 +>> 2015-05-29 select DATE_TRUNC('day', timestamp '2015-05-29 15:14:13'); >> 2015-05-29 00:00:00 @@ -573,10 +464,10 @@ select DATE_TRUNC('day', timestamp '2015-05-29 15:14:13'); select DATE_TRUNC('DAY', timestamp '2015-05-29 15:14:13'); >> 2015-05-29 00:00:00 -select DATE_TRUNC('day', timestamp with time zone '2015-05-29 15:14:13'); +select DATE_TRUNC('day', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2015-05-29 00:00:00+00 -select DATE_TRUNC('DAY', timestamp with time zone '2015-05-29 15:14:13'); +select DATE_TRUNC('DAY', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2015-05-29 00:00:00+00 select DATE_TRUNC('day', timestamp with time zone '2015-05-29 05:14:13-06'); @@ -591,89 +482,70 @@ select DATE_TRUNC('day', timestamp with time zone '2015-05-29 15:14:13+10'); select DATE_TRUNC('DAY', timestamp with time zone '2015-05-29 15:14:13+10'); >> 2015-05-29 00:00:00+10 -select DATE_TRUNC('day', '2015-05-29 15:14:13'); ->> 2015-05-29 00:00:00 - -select DATE_TRUNC('DAY', '2015-05-29 15:14:13'); ->> 2015-05-29 00:00:00 - -- -- Test time unit 'WEEK' -- select DATE_TRUNC('week', time '00:00:00'); ->> 1969-12-29 00:00:00 +>> 00:00:00 select DATE_TRUNC('WEEK', time '00:00:00'); ->> 1969-12-29 00:00:00 +>> 00:00:00 select DATE_TRUNC('week', time '15:14:13'); ->> 1969-12-29 00:00:00 +>> 00:00:00 select DATE_TRUNC('WEEK', time '15:14:13'); ->> 1969-12-29 00:00:00 +>> 00:00:00 -select DATE_TRUNC('week', date '2015-05-28'); ->> 2015-05-25 00:00:00 +-- ISO_WEEK -select DATE_TRUNC('WEEK', date '2015-05-28'); ->> 2015-05-25 00:00:00 +SELECT DATE_TRUNC(ISO_WEEK, TIME '00:00:00'); +>> 00:00:00 -select DATE_TRUNC('week', timestamp '2015-05-29 15:14:13'); ->> 2015-05-25 00:00:00 +SELECT DATE_TRUNC(ISO_WEEK, TIME '15:14:13'); +>> 00:00:00 -select DATE_TRUNC('WEEK', timestamp '2015-05-29 15:14:13'); +SELECT DATE_TRUNC(ISO_WEEK, DATE '2015-05-28'); +>> 2015-05-25 + +SELECT DATE_TRUNC(ISO_WEEK, TIMESTAMP '2015-05-29 15:14:13'); >> 2015-05-25 00:00:00 -select DATE_TRUNC('week', timestamp with time zone '2015-05-29 15:14:13'); ->> 2015-05-25 00:00:00+00 +SELECT DATE_TRUNC(ISO_WEEK, TIMESTAMP '2018-03-14 00:00:00.000'); +>> 2018-03-12 00:00:00 -select DATE_TRUNC('WEEK', timestamp with time zone '2015-05-29 15:14:13'); +SELECT DATE_TRUNC(ISO_WEEK, TIMESTAMP WITH TIME ZONE '2015-05-29 15:14:13+00'); >> 2015-05-25 00:00:00+00 -select DATE_TRUNC('week', timestamp with time zone '2015-05-29 05:14:13-06'); ->> 2015-05-25 00:00:00-06 - -select DATE_TRUNC('WEEK', timestamp with time zone '2015-05-29 05:14:13-06'); +SELECT DATE_TRUNC(ISO_WEEK, TIMESTAMP WITH TIME ZONE '2015-05-29 05:14:13-06'); >> 2015-05-25 00:00:00-06 -select DATE_TRUNC('week', timestamp with time zone '2015-05-29 15:14:13+10'); ->> 2015-05-25 00:00:00+10 - -select DATE_TRUNC('WEEK', timestamp with time zone '2015-05-29 15:14:13+10'); +SELECT DATE_TRUNC(ISO_WEEK, TIMESTAMP WITH TIME ZONE '2015-05-29 15:14:13+10'); >> 2015-05-25 00:00:00+10 -select DATE_TRUNC('week', '2015-05-29 15:14:13'); ->> 2015-05-25 00:00:00 - -select DATE_TRUNC('WEEK', '2015-05-29 15:14:13'); ->> 2015-05-25 00:00:00 - -SELECT DATE_TRUNC('WEEK', '2018-03-14 00:00:00.000'); ->> 2018-03-12 00:00:00 - -SELECT DATE_TRUNC('week', '2018-03-14 00:00:00.000'); ->> 2018-03-12 00:00:00 - -- -- Test time unit 'MONTH' -- select DATE_TRUNC('month', time '00:00:00'); ->> 1970-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('MONTH', time '00:00:00'); ->> 1970-01-01 00:00:00 +>> 00:00:00 + +select DATE_TRUNC(MONTH, time '00:00:00'); +>> 00:00:00 select DATE_TRUNC('month', time '15:14:13'); ->> 1970-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('MONTH', time '15:14:13'); ->> 1970-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('month', date '2015-05-28'); ->> 2015-05-01 00:00:00 +>> 2015-05-01 select DATE_TRUNC('MONTH', date '2015-05-28'); ->> 2015-05-01 00:00:00 +>> 2015-05-01 select DATE_TRUNC('month', timestamp '2015-05-29 15:14:13'); >> 2015-05-01 00:00:00 @@ -681,10 +553,13 @@ select DATE_TRUNC('month', timestamp '2015-05-29 15:14:13'); select DATE_TRUNC('MONTH', timestamp '2015-05-29 15:14:13'); >> 2015-05-01 00:00:00 -select DATE_TRUNC('month', timestamp with time zone '2015-05-29 15:14:13'); +SELECT DATE_TRUNC('MONTH', timestamp '2018-03-14 00:00:00.000'); +>> 2018-03-01 00:00:00 + +select DATE_TRUNC('month', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2015-05-01 00:00:00+00 -select DATE_TRUNC('MONTH', timestamp with time zone '2015-05-29 15:14:13'); +select DATE_TRUNC('MONTH', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2015-05-01 00:00:00+00 select DATE_TRUNC('month', timestamp with time zone '2015-05-29 05:14:13-06'); @@ -699,50 +574,26 @@ select DATE_TRUNC('month', timestamp with time zone '2015-05-29 15:14:13+10'); select DATE_TRUNC('MONTH', timestamp with time zone '2015-05-29 15:14:13+10'); >> 2015-05-01 00:00:00+10 -select DATE_TRUNC('month', '2015-05-29 15:14:13'); ->> 2015-05-01 00:00:00 - -select DATE_TRUNC('MONTH', '2015-05-29 15:14:13'); ->> 2015-05-01 00:00:00 - -SELECT DATE_TRUNC('MONTH', '2018-03-14 00:00:00.000'); ->> 2018-03-01 00:00:00 - -SELECT DATE_TRUNC('month', '2018-03-14 00:00:00.000'); ->> 2018-03-01 00:00:00 - -SELECT DATE_TRUNC('month', '2015-05-29 15:14:13'); ->> 2015-05-01 00:00:00 - -SELECT DATE_TRUNC('MONTH', '2015-05-29 15:14:13'); ->> 2015-05-01 00:00:00 - -SELECT DATE_TRUNC('month', '2015-05-01 15:14:13'); ->> 2015-05-01 00:00:00 - -SELECT DATE_TRUNC('MONTH', '2015-05-01 15:14:13'); ->> 2015-05-01 00:00:00 - -- -- Test time unit 'QUARTER' -- select DATE_TRUNC('quarter', time '00:00:00'); ->> 1970-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('QUARTER', time '00:00:00'); ->> 1970-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('quarter', time '15:14:13'); ->> 1970-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('QUARTER', time '15:14:13'); ->> 1970-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('quarter', date '2015-05-28'); ->> 2015-04-01 00:00:00 +>> 2015-04-01 select DATE_TRUNC('QUARTER', date '2015-05-28'); ->> 2015-04-01 00:00:00 +>> 2015-04-01 select DATE_TRUNC('quarter', timestamp '2015-05-29 15:14:13'); >> 2015-04-01 00:00:00 @@ -750,92 +601,65 @@ select DATE_TRUNC('quarter', timestamp '2015-05-29 15:14:13'); select DATE_TRUNC('QUARTER', timestamp '2015-05-29 15:14:13'); >> 2015-04-01 00:00:00 -select DATE_TRUNC('quarter', timestamp with time zone '2015-05-29 15:14:13'); ->> 2015-04-01 00:00:00+00 - -select DATE_TRUNC('QUARTER', timestamp with time zone '2015-05-29 15:14:13'); ->> 2015-04-01 00:00:00+00 - -select DATE_TRUNC('quarter', timestamp with time zone '2015-05-29 05:14:13-06'); ->> 2015-04-01 00:00:00-06 - -select DATE_TRUNC('QUARTER', timestamp with time zone '2015-05-29 05:14:13-06'); ->> 2015-04-01 00:00:00-06 - -select DATE_TRUNC('quarter', timestamp with time zone '2015-05-29 15:14:13+10'); ->> 2015-04-01 00:00:00+10 - -select DATE_TRUNC('QUARTER', timestamp with time zone '2015-05-29 15:14:13+10'); ->> 2015-04-01 00:00:00+10 - -select DATE_TRUNC('quarter', '2015-05-29 15:14:13'); ->> 2015-04-01 00:00:00 - -select DATE_TRUNC('QUARTER', '2015-05-29 15:14:13'); ->> 2015-04-01 00:00:00 - -SELECT DATE_TRUNC('QUARTER', '2018-03-14 00:00:00.000'); ->> 2018-01-01 00:00:00 - -SELECT DATE_TRUNC('quarter', '2018-03-14 00:00:00.000'); +SELECT DATE_TRUNC('QUARTER', timestamp '2018-03-14 00:00:00.000'); >> 2018-01-01 00:00:00 -SELECT DATE_TRUNC('quarter', '2015-05-29 15:14:13'); ->> 2015-04-01 00:00:00 - -SELECT DATE_TRUNC('QUARTER', '2015-05-29 15:14:13'); +SELECT DATE_TRUNC('QUARTER', timestamp '2015-05-29 15:14:13'); >> 2015-04-01 00:00:00 -SELECT DATE_TRUNC('quarter', '2015-05-01 15:14:13'); +SELECT DATE_TRUNC('QUARTER', timestamp '2015-05-01 15:14:13'); >> 2015-04-01 00:00:00 -SELECT DATE_TRUNC('QUARTER', '2015-05-01 15:14:13'); ->> 2015-04-01 00:00:00 - -SELECT DATE_TRUNC('quarter', '2015-07-29 15:14:13'); ->> 2015-07-01 00:00:00 - -SELECT DATE_TRUNC('QUARTER', '2015-07-29 15:14:13'); +SELECT DATE_TRUNC('QUARTER', timestamp '2015-07-29 15:14:13'); >> 2015-07-01 00:00:00 -SELECT DATE_TRUNC('quarter', '2015-09-29 15:14:13'); +SELECT DATE_TRUNC('QUARTER', timestamp '2015-09-29 15:14:13'); >> 2015-07-01 00:00:00 -SELECT DATE_TRUNC('QUARTER', '2015-09-29 15:14:13'); ->> 2015-07-01 00:00:00 - -SELECT DATE_TRUNC('quarter', '2015-10-29 15:14:13'); +SELECT DATE_TRUNC('QUARTER', timestamp '2015-10-29 15:14:13'); >> 2015-10-01 00:00:00 -SELECT DATE_TRUNC('QUARTER', '2015-10-29 15:14:13'); +SELECT DATE_TRUNC('QUARTER', timestamp '2015-12-29 15:14:13'); >> 2015-10-01 00:00:00 -SELECT DATE_TRUNC('quarter', '2015-12-29 15:14:13'); ->> 2015-10-01 00:00:00 +select DATE_TRUNC('quarter', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-04-01 00:00:00+00 -SELECT DATE_TRUNC('QUARTER', '2015-12-29 15:14:13'); ->> 2015-10-01 00:00:00 +select DATE_TRUNC('QUARTER', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-04-01 00:00:00+00 + +select DATE_TRUNC('quarter', timestamp with time zone '2015-05-29 05:14:13-06'); +>> 2015-04-01 00:00:00-06 + +select DATE_TRUNC('QUARTER', timestamp with time zone '2015-05-29 05:14:13-06'); +>> 2015-04-01 00:00:00-06 + +select DATE_TRUNC('quarter', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-04-01 00:00:00+10 + +select DATE_TRUNC('QUARTER', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-04-01 00:00:00+10 -- -- Test time unit 'YEAR' -- select DATE_TRUNC('year', time '00:00:00'); ->> 1970-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('YEAR', time '00:00:00'); ->> 1970-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('year', time '15:14:13'); ->> 1970-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('YEAR', time '15:14:13'); ->> 1970-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('year', date '2015-05-28'); ->> 2015-01-01 00:00:00 +>> 2015-01-01 select DATE_TRUNC('YEAR', date '2015-05-28'); ->> 2015-01-01 00:00:00 +>> 2015-01-01 select DATE_TRUNC('year', timestamp '2015-05-29 15:14:13'); >> 2015-01-01 00:00:00 @@ -843,10 +667,10 @@ select DATE_TRUNC('year', timestamp '2015-05-29 15:14:13'); select DATE_TRUNC('YEAR', timestamp '2015-05-29 15:14:13'); >> 2015-01-01 00:00:00 -select DATE_TRUNC('year', timestamp with time zone '2015-05-29 15:14:13'); +select DATE_TRUNC('year', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2015-01-01 00:00:00+00 -select DATE_TRUNC('YEAR', timestamp with time zone '2015-05-29 15:14:13'); +select DATE_TRUNC('YEAR', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2015-01-01 00:00:00+00 select DATE_TRUNC('year', timestamp with time zone '2015-05-29 05:14:13-06'); @@ -861,32 +685,26 @@ select DATE_TRUNC('year', timestamp with time zone '2015-05-29 15:14:13+10'); select DATE_TRUNC('YEAR', timestamp with time zone '2015-05-29 15:14:13+10'); >> 2015-01-01 00:00:00+10 -SELECT DATE_TRUNC('year', '2015-05-29 15:14:13'); ->> 2015-01-01 00:00:00 - -SELECT DATE_TRUNC('YEAR', '2015-05-29 15:14:13'); ->> 2015-01-01 00:00:00 - -- -- Test time unit 'DECADE' -- select DATE_TRUNC('decade', time '00:00:00'); ->> 1970-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('DECADE', time '00:00:00'); ->> 1970-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('decade', time '15:14:13'); ->> 1970-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('DECADE', time '15:14:13'); ->> 1970-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('decade', date '2015-05-28'); ->> 2010-01-01 00:00:00 +>> 2010-01-01 select DATE_TRUNC('DECADE', date '2015-05-28'); ->> 2010-01-01 00:00:00 +>> 2010-01-01 select DATE_TRUNC('decade', timestamp '2015-05-29 15:14:13'); >> 2010-01-01 00:00:00 @@ -894,10 +712,13 @@ select DATE_TRUNC('decade', timestamp '2015-05-29 15:14:13'); select DATE_TRUNC('DECADE', timestamp '2015-05-29 15:14:13'); >> 2010-01-01 00:00:00 -select DATE_TRUNC('decade', timestamp with time zone '2015-05-29 15:14:13'); +SELECT DATE_TRUNC('decade', timestamp '2010-05-29 15:14:13'); +>> 2010-01-01 00:00:00 + +select DATE_TRUNC('decade', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2010-01-01 00:00:00+00 -select DATE_TRUNC('DECADE', timestamp with time zone '2015-05-29 15:14:13'); +select DATE_TRUNC('DECADE', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2010-01-01 00:00:00+00 select DATE_TRUNC('decade', timestamp with time zone '2015-05-29 05:14:13-06'); @@ -912,38 +733,26 @@ select DATE_TRUNC('decade', timestamp with time zone '2015-05-29 15:14:13+10'); select DATE_TRUNC('DECADE', timestamp with time zone '2015-05-29 15:14:13+10'); >> 2010-01-01 00:00:00+10 -SELECT DATE_TRUNC('decade', '2015-05-29 15:14:13'); ->> 2010-01-01 00:00:00 - -SELECT DATE_TRUNC('DECADE', '2015-05-29 15:14:13'); ->> 2010-01-01 00:00:00 - -SELECT DATE_TRUNC('decade', '2010-05-29 15:14:13'); ->> 2010-01-01 00:00:00 - -SELECT DATE_TRUNC('DECADE', '2010-05-29 15:14:13'); ->> 2010-01-01 00:00:00 - -- -- Test time unit 'CENTURY' -- select DATE_TRUNC('century', time '00:00:00'); ->> 1901-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('CENTURY', time '00:00:00'); ->> 1901-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('century', time '15:14:13'); ->> 1901-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('CENTURY', time '15:14:13'); ->> 1901-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('century', date '2015-05-28'); ->> 2001-01-01 00:00:00 +>> 2001-01-01 select DATE_TRUNC('CENTURY', date '2015-05-28'); ->> 2001-01-01 00:00:00 +>> 2001-01-01 select DATE_TRUNC('century', timestamp '2015-05-29 15:14:13'); >> 2001-01-01 00:00:00 @@ -951,10 +760,19 @@ select DATE_TRUNC('century', timestamp '2015-05-29 15:14:13'); select DATE_TRUNC('CENTURY', timestamp '2015-05-29 15:14:13'); >> 2001-01-01 00:00:00 -select DATE_TRUNC('century', timestamp with time zone '2015-05-29 15:14:13'); +SELECT DATE_TRUNC('century', timestamp '2199-05-29 15:14:13'); +>> 2101-01-01 00:00:00 + +SELECT DATE_TRUNC('CENTURY', timestamp '2000-05-29 15:14:13'); +>> 1901-01-01 00:00:00 + +SELECT DATE_TRUNC('century', timestamp '2001-05-29 15:14:13'); +>> 2001-01-01 00:00:00 + +select DATE_TRUNC('century', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2001-01-01 00:00:00+00 -select DATE_TRUNC('CENTURY', timestamp with time zone '2015-05-29 15:14:13'); +select DATE_TRUNC('CENTURY', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2001-01-01 00:00:00+00 select DATE_TRUNC('century', timestamp with time zone '2015-05-29 05:14:13-06'); @@ -969,50 +787,26 @@ select DATE_TRUNC('century', timestamp with time zone '2015-05-29 15:14:13+10'); select DATE_TRUNC('CENTURY', timestamp with time zone '2015-05-29 15:14:13+10'); >> 2001-01-01 00:00:00+10 -SELECT DATE_TRUNC('century', '2015-05-29 15:14:13'); ->> 2001-01-01 00:00:00 - -SELECT DATE_TRUNC('CENTURY', '2015-05-29 15:14:13'); ->> 2001-01-01 00:00:00 - -SELECT DATE_TRUNC('century', '2199-05-29 15:14:13'); ->> 2101-01-01 00:00:00 - -SELECT DATE_TRUNC('CENTURY', '2199-05-29 15:14:13'); ->> 2101-01-01 00:00:00 - -SELECT DATE_TRUNC('century', '2000-05-29 15:14:13'); ->> 1901-01-01 00:00:00 - -SELECT DATE_TRUNC('CENTURY', '2000-05-29 15:14:13'); ->> 1901-01-01 00:00:00 - -SELECT DATE_TRUNC('century', '2001-05-29 15:14:13'); ->> 2001-01-01 00:00:00 - -SELECT DATE_TRUNC('CENTURY', '2001-05-29 15:14:13'); ->> 2001-01-01 00:00:00 - -- -- Test time unit 'MILLENNIUM' -- select DATE_TRUNC('millennium', time '00:00:00'); ->> 1001-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('MILLENNIUM', time '00:00:00'); ->> 1001-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('millennium', time '15:14:13'); ->> 1001-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('MILLENNIUM', time '15:14:13'); ->> 1001-01-01 00:00:00 +>> 00:00:00 select DATE_TRUNC('millennium', date '2015-05-28'); ->> 2001-01-01 00:00:00 +>> 2001-01-01 select DATE_TRUNC('MILLENNIUM', date '2015-05-28'); ->> 2001-01-01 00:00:00 +>> 2001-01-01 select DATE_TRUNC('millennium', timestamp '2015-05-29 15:14:13'); >> 2001-01-01 00:00:00 @@ -1020,10 +814,13 @@ select DATE_TRUNC('millennium', timestamp '2015-05-29 15:14:13'); select DATE_TRUNC('MILLENNIUM', timestamp '2015-05-29 15:14:13'); >> 2001-01-01 00:00:00 -select DATE_TRUNC('millennium', timestamp with time zone '2015-05-29 15:14:13'); +SELECT DATE_TRUNC('millennium', timestamp '2000-05-29 15:14:13'); +>> 1001-01-01 00:00:00 + +select DATE_TRUNC('millennium', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2001-01-01 00:00:00+00 -select DATE_TRUNC('MILLENNIUM', timestamp with time zone '2015-05-29 15:14:13'); +select DATE_TRUNC('MILLENNIUM', timestamp with time zone '2015-05-29 15:14:13+00'); >> 2001-01-01 00:00:00+00 select DATE_TRUNC('millennium', timestamp with time zone '2015-05-29 05:14:13-06'); @@ -1038,24 +835,6 @@ select DATE_TRUNC('millennium', timestamp with time zone '2015-05-29 15:14:13+10 select DATE_TRUNC('MILLENNIUM', timestamp with time zone '2015-05-29 15:14:13+10'); >> 2001-01-01 00:00:00+10 -SELECT DATE_TRUNC('millennium', '2015-05-29 15:14:13'); ->> 2001-01-01 00:00:00 - -SELECT DATE_TRUNC('MILLENNIUM', '2015-05-29 15:14:13'); ->> 2001-01-01 00:00:00 - -SELECT DATE_TRUNC('millennium', '2001-05-29 15:14:13'); ->> 2001-01-01 00:00:00 - -SELECT DATE_TRUNC('MILLENNIUM', '2001-05-29 15:14:13'); ->> 2001-01-01 00:00:00 - -SELECT DATE_TRUNC('millennium', '2000-05-29 15:14:13'); ->> 1001-01-01 00:00:00 - -SELECT DATE_TRUNC('MILLENNIUM', '2000-05-29 15:14:13'); ->> 1001-01-01 00:00:00 - -- -- Test unhandled time unit and bad date -- @@ -1069,4 +848,78 @@ SELECT DATE_TRUNC('', ''); > exception INVALID_VALUE_2 SELECT DATE_TRUNC('YEAR', ''); -> exception INVALID_DATETIME_CONSTANT_2 +> exception INVALID_VALUE_2 + +SELECT DATE_TRUNC('microseconds', '2015-05-29 15:14:13'); +> exception INVALID_VALUE_2 + +SET MODE PostgreSQL; +> ok + +select DATE_TRUNC('YEAR', DATE '2015-05-28'); +>> 2015-01-01 00:00:00+01 + +SET MODE Regular; +> ok + +SELECT DATE_TRUNC(DECADE, DATE '0000-01-20'); +>> 0000-01-01 + +SELECT DATE_TRUNC(DECADE, DATE '-1-12-31'); +>> -0010-01-01 + +SELECT DATE_TRUNC(DECADE, DATE '-10-01-01'); +>> -0010-01-01 + +SELECT DATE_TRUNC(DECADE, DATE '-11-12-31'); +>> -0020-01-01 + +SELECT DATE_TRUNC(CENTURY, DATE '0001-01-20'); +>> 0001-01-01 + +SELECT DATE_TRUNC(CENTURY, DATE '0000-12-31'); +>> -0099-01-01 + +SELECT DATE_TRUNC(CENTURY, DATE '-1-12-31'); +>> -0099-01-01 + +SELECT DATE_TRUNC(CENTURY, DATE '-99-01-01'); +>> -0099-01-01 + +SELECT DATE_TRUNC(CENTURY, DATE '-100-12-31'); +>> -0199-01-01 + +SELECT DATE_TRUNC(MILLENNIUM, DATE '0001-01-20'); +>> 0001-01-01 + +SELECT DATE_TRUNC(MILLENNIUM, DATE '0000-12-31'); +>> -0999-01-01 + +SELECT DATE_TRUNC(MILLENNIUM, DATE '-1-12-31'); +>> -0999-01-01 + +SELECT DATE_TRUNC(MILLENNIUM, DATE '-999-01-01'); +>> -0999-01-01 + +SELECT DATE_TRUNC(MILLENNIUM, DATE '-1000-12-31'); +>> -1999-01-01 + +-- ISO_WEEK_YEAR + +SELECT DATE_TRUNC(ISO_WEEK_YEAR, DATE '2019-12-30'); +>> 2019-12-30 + +SELECT DATE_TRUNC(ISO_WEEK_YEAR, DATE '2020-01-01'); +>> 2019-12-30 + +SELECT DATE_TRUNC(ISO_WEEK_YEAR, DATE '2020-12-01'); +>> 2019-12-30 + +SELECT DATE_TRUNC(ISO_WEEK_YEAR, DATE '2020-12-31'); +>> 2019-12-30 + +SELECT DATE_TRUNC(ISO_WEEK_YEAR, DATE '2017-01-01'); +>> 2016-01-04 + +SELECT DATE_TRUNC(ISO_WEEK_YEAR, DATE '2017-01-02'); +>> 2017-01-02 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/dateadd.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/dateadd.sql index cb69a5f773..6ce6d4d43e 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/dateadd.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/dateadd.sql @@ -1,23 +1,14 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select dateadd('month', 1, timestamp '2003-01-31 10:20:30.012345678') from test; +select dateadd('month', 1, timestamp '2003-01-31 10:20:30.012345678'); >> 2003-02-28 10:20:30.012345678 -select dateadd('year', -1, timestamp '2000-02-29 10:20:30.012345678') from test; +select dateadd('year', -1, timestamp '2000-02-29 10:20:30.012345678'); >> 1999-02-28 10:20:30.012345678 -drop table test; -> ok - create table test(d date, t time, ts timestamp); > ok @@ -27,9 +18,6 @@ insert into test values(date '2001-01-01', time '01:00:00', timestamp '2010-01-0 select ts + t from test; >> 2010-01-01 01:00:00 -select ts + t + t - t x from test; ->> 2010-01-01 01:00:00 - select ts + t * 0.5 x from test; >> 2010-01-01 00:30:00 @@ -39,30 +27,48 @@ select ts + 0.5 x from test; select ts - 1.5 x from test; >> 2009-12-30 12:00:00 -select ts + 0.5 * t + t - t x from test; ->> 2010-01-01 00:30:00 - select ts + t / 0.5 x from test; >> 2010-01-01 02:00:00 -select d + t, t + d - t x from test; -> T + D X -> ------------------- ------------------- -> 2001-01-01 01:00:00 2001-01-01 00:00:00 -> rows: 1 +VALUES TIME '04:00:00' + TIME '20:03:30.123'; +>> 00:03:30.123 + +VALUES TIME '04:00:00' + TIME WITH TIME ZONE '20:03:30.123+05'; +>> 00:03:30.123+05 + +VALUES TIME WITH TIME ZONE '04:00:00+08' + TIME '20:03:30.123'; +>> 00:03:30.123+08 + +VALUES TIME WITH TIME ZONE '04:00:00+08' + TIME WITH TIME ZONE '20:03:30.123+05'; +> exception FEATURE_NOT_SUPPORTED_1 + +VALUES DATE '2005-03-04' + TIME '20:03:30.123'; +>> 2005-03-04 20:03:30.123 + +VALUES DATE '2005-03-04' + TIME WITH TIME ZONE '20:03:30.123+05'; +>> 2005-03-04 20:03:30.123+05 + +VALUES TIMESTAMP '2005-03-04 04:00:00' + TIME '20:03:30.123'; +>> 2005-03-05 00:03:30.123 + +VALUES TIMESTAMP '2005-03-04 04:00:00' + TIME WITH TIME ZONE '20:03:30.123+05'; +>> 2005-03-05 00:03:30.123+05 + +VALUES TIMESTAMP WITH TIME ZONE '2005-03-04 04:00:00+08' + TIME '20:03:30.123'; +>> 2005-03-05 00:03:30.123+08 + +VALUES TIMESTAMP WITH TIME ZONE '2005-03-04 04:00:00+08' + TIME WITH TIME ZONE '20:03:30.123+05'; +> exception FEATURE_NOT_SUPPORTED_1 select 1 + d + 1, d - 1, 2 + ts + 2, ts - 2 from test; -> DATEADD('DAY', 1, DATEADD('DAY', 1, D)) DATEADD('DAY', -1, D) DATEADD('DAY', 2, DATEADD('DAY', 2, TS)) DATEADD('DAY', -2, TS) -> --------------------------------------- --------------------- ---------------------------------------- ---------------------- -> 2001-01-03 2000-12-31 2010-01-05 00:00:00 2009-12-30 00:00:00 +> DATEADD(DAY, 1, DATEADD(DAY, 1, D)) DATEADD(DAY, -1, D) DATEADD(DAY, 2, DATEADD(DAY, 2, TS)) DATEADD(DAY, -2, TS) +> ----------------------------------- ------------------- ------------------------------------ -------------------- +> 2001-01-03 2000-12-31 2010-01-05 00:00:00 2009-12-30 00:00:00 > rows: 1 select 1 + d + t + 1 from test; >> 2001-01-03 01:00:00 -select ts - t - 2 from test; ->> 2009-12-29 23:00:00 - drop table test; > ok @@ -105,8 +111,32 @@ SELECT TIMESTAMPADD('TIMEZONE_HOUR', 1, TIMESTAMP WITH TIME ZONE '2010-01-01 10: SELECT TIMESTAMPADD('TIMEZONE_MINUTE', -45, TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:00+07:30'); >> 2010-01-01 10:00:00+06:45 +SELECT TIMESTAMPADD('TIMEZONE_SECOND', -45, TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:00+07:30'); +>> 2010-01-01 10:00:00+07:29:15 + +SELECT TIMESTAMPADD('TIMEZONE_HOUR', 1, TIME WITH TIME ZONE '10:00:00+07:30'); +>> 10:00:00+08:30 + +SELECT TIMESTAMPADD('TIMEZONE_MINUTE', -45, TIME WITH TIME ZONE '10:00:00+07:30'); +>> 10:00:00+06:45 + SELECT DATEADD(HOUR, 1, TIME '23:00:00'); >> 00:00:00 +SELECT DATEADD(HOUR, 1, TIME WITH TIME ZONE '21:00:00+01'); +>> 22:00:00+01 + +SELECT DATEADD(HOUR, 1, TIME WITH TIME ZONE '23:00:00+01'); +>> 00:00:00+01 + SELECT D FROM (SELECT '2010-01-01' D) WHERE D IN (SELECT D1 - 1 FROM (SELECT DATE '2010-01-02' D1)); >> 2010-01-01 + +SELECT DATEADD(MILLENNIUM, 1, DATE '2000-02-29'); +>> 3000-02-28 + +SELECT DATEADD(CENTURY, 1, DATE '2000-02-29'); +>> 2100-02-28 + +SELECT DATEADD(DECADE, 1, DATE '2000-02-29'); +>> 2010-02-28 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/datediff.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/datediff.sql index 61685e6a90..15b60523ba 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/datediff.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/datediff.sql @@ -1,54 +1,48 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select datediff('yy', timestamp '2003-12-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0') from test; +select datediff('yy', timestamp '2003-12-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0'); >> 1 -select datediff('year', timestamp '2003-12-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0') from test; +select datediff('year', timestamp '2003-12-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0'); >> 1 -select datediff('mm', timestamp '2003-11-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0') from test; +select datediff('mm', timestamp '2003-11-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0'); >> 2 -select datediff('month', timestamp '2003-11-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0') from test; +select datediff('month', timestamp '2003-11-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0'); >> 2 -select datediff('dd', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-05 10:00:00.0') from test; +select datediff('dd', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-05 10:00:00.0'); >> 4 -select datediff('day', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-05 10:00:00.0') from test; +select datediff('day', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-05 10:00:00.0'); >> 4 -select datediff('hh', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-02 10:00:00.0') from test; +select datediff('hh', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-02 10:00:00.0'); >> 24 -select datediff('hour', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-02 10:00:00.0') from test; +select datediff('hour', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-02 10:00:00.0'); >> 24 -select datediff('mi', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0') from test; +select datediff('mi', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0'); >> -20 -select datediff('minute', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0') from test; +select datediff('minute', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0'); >> -20 -select datediff('ss', timestamp '2004-01-01 10:00:00.5', timestamp '2004-01-01 10:00:01.0') from test; +select datediff('ss', timestamp '2004-01-01 10:00:00.5', timestamp '2004-01-01 10:00:01.0'); >> 1 -select datediff('second', timestamp '2004-01-01 10:00:00.5', timestamp '2004-01-01 10:00:01.0') from test; +select datediff('second', timestamp '2004-01-01 10:00:00.5', timestamp '2004-01-01 10:00:01.0'); >> 1 -select datediff('ms', timestamp '2004-01-01 10:00:00.5', timestamp '2004-01-01 10:00:01.0') from test; +select datediff('ms', timestamp '2004-01-01 10:00:00.5', timestamp '2004-01-01 10:00:01.0'); >> 500 -select datediff('millisecond', timestamp '2004-01-01 10:00:00.5', timestamp '2004-01-01 10:00:01.0') from test; +select datediff('millisecond', timestamp '2004-01-01 10:00:00.5', timestamp '2004-01-01 10:00:01.0'); >> 500 SELECT DATEDIFF('SECOND', '1900-01-01 00:00:00.001', '1900-01-01 00:00:00.002'), DATEDIFF('SECOND', '2000-01-01 00:00:00.001', '2000-01-01 00:00:00.002'); @@ -130,41 +124,23 @@ SELECT DATEDIFF('NANOSECOND', '2006-01-01 00:00:00.0000000', '2006-01-01 00:00:0 > 123456789 123456789 86400123456789 > rows: 1 -SELECT DATEDIFF('WEEK', DATE '2018-02-02', DATE '2018-02-03'), DATEDIFF('ISO_WEEK', DATE '2018-02-02', DATE '2018-02-03'); -> 0 0 -> - - -> 0 0 -> rows: 1 +SELECT DATEDIFF('ISO_WEEK', DATE '2018-02-02', DATE '2018-02-03'); +>> 0 -SELECT DATEDIFF('WEEK', DATE '2018-02-03', DATE '2018-02-04'), DATEDIFF('ISO_WEEK', DATE '2018-02-03', DATE '2018-02-04'); -> 1 0 -> - - -> 1 0 -> rows: 1 +SELECT DATEDIFF('ISO_WEEK', DATE '2018-02-03', DATE '2018-02-04'); +>> 0 -SELECT DATEDIFF('WEEK', DATE '2018-02-04', DATE '2018-02-05'), DATEDIFF('ISO_WEEK', DATE '2018-02-04', DATE '2018-02-05'); -> 0 1 -> - - -> 0 1 -> rows: 1 +SELECT DATEDIFF('ISO_WEEK', DATE '2018-02-04', DATE '2018-02-05'); +>> 1 -SELECT DATEDIFF('WEEK', DATE '2018-02-05', DATE '2018-02-06'), DATEDIFF('ISO_WEEK', DATE '2018-02-05', DATE '2018-02-06'); -> 0 0 -> - - -> 0 0 -> rows: 1 +SELECT DATEDIFF('ISO_WEEK', DATE '2018-02-05', DATE '2018-02-06'); +>> 0 -SELECT DATEDIFF('WEEK', DATE '1969-12-27', DATE '1969-12-28'), DATEDIFF('ISO_WEEK', DATE '1969-12-27', DATE '1969-12-28'); -> 1 0 -> - - -> 1 0 -> rows: 1 +SELECT DATEDIFF('ISO_WEEK', DATE '1969-12-27', DATE '1969-12-28'); +>> 0 -SELECT DATEDIFF('WEEK', DATE '1969-12-28', DATE '1969-12-29'), DATEDIFF('ISO_WEEK', DATE '1969-12-28', DATE '1969-12-29'); -> 0 1 -> - - -> 0 1 -> rows: 1 +SELECT DATEDIFF('ISO_WEEK', DATE '1969-12-28', DATE '1969-12-29'); +>> 1 SELECT DATEDIFF('QUARTER', DATE '2009-12-30', DATE '2009-12-31'); >> 0 @@ -189,6 +165,18 @@ SELECT DATEDIFF('TIMEZONE_MINUTE', TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:00 TIMESTAMP WITH TIME ZONE '2012-02-02 12:00:00+02'); >> 45 +SELECT DATEDIFF('TIMEZONE_SECOND', TIMESTAMP WITH TIME ZONE '1880-01-01 10:00:00-07:52:58', + TIMESTAMP WITH TIME ZONE '1890-02-02 12:00:00-08'); +>> -422 + +SELECT DATEDIFF('TIMEZONE_HOUR', TIME WITH TIME ZONE '10:00:00+01', + TIME WITH TIME ZONE '12:00:00+02'); +>> 1 + +SELECT DATEDIFF('TIMEZONE_MINUTE', TIME WITH TIME ZONE '10:00:00+01:15', + TIME WITH TIME ZONE '12:00:00+02'); +>> 45 + select datediff('HOUR', timestamp '2007-01-06 10:00:00Z', '2007-01-06 10:00:00Z'); >> 0 @@ -212,3 +200,30 @@ select timestampdiff(YEAR,'2017-01-01','2017-12-31 23:59:59'); select timestampdiff(MINUTE,'2003-02-01','2003-05-01 12:05:55'); >> 128885 + +SELECT DATEDIFF(MILLENNIUM, DATE '2000-12-31', DATE '2001-01-01'); +>> 1 + +SELECT DATEDIFF(MILLENNIUM, DATE '2001-01-01', DATE '3000-12-31'); +>> 0 + +SELECT DATEDIFF(MILLENNIUM, DATE '2001-01-01', DATE '3001-01-01'); +>> 1 + +SELECT DATEDIFF(CENTURY, DATE '2000-12-31', DATE '2001-01-01'); +>> 1 + +SELECT DATEDIFF(CENTURY, DATE '2001-01-01', DATE '2100-12-31'); +>> 0 + +SELECT DATEDIFF(CENTURY, DATE '2001-01-01', DATE '2101-01-01'); +>> 1 + +SELECT DATEDIFF(DECADE, DATE '2009-12-31', DATE '2010-01-01'); +>> 1 + +SELECT DATEDIFF(DECADE, DATE '2010-01-01', DATE '2019-12-31'); +>> 0 + +SELECT DATEDIFF(DECADE, DATE '2010-01-01', DATE '2020-01-01'); +>> 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-month.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-month.sql index 2e4bc9a598..609770c248 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-month.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-month.sql @@ -1,20 +1,11 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select dayofmonth(date '2005-09-12') from test; +select dayofmonth(date '2005-09-12'); >> 12 -drop table test; -> ok - create table test(ts timestamp with time zone); > ok diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-week.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-week.sql index 2b316a44cf..6e71c05740 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-week.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-week.sql @@ -1,13 +1,7 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select dayofweek(date '2005-09-12') from test; ->> 2 +SELECT DAYOFWEEK(DATE '2005-09-12') = EXTRACT(DAY_OF_WEEK FROM DATE '2005-09-12'); +>> TRUE diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-year.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-year.sql index 81545368a9..3d7c68e3c9 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-year.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-year.sql @@ -1,13 +1,7 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select dayofyear(date '2005-01-01') d1 from test; +select dayofyear(date '2005-01-01') d1; >> 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/dayname.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/dayname.sql index 6eab8337d5..743867d2dc 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/dayname.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/dayname.sql @@ -1,13 +1,7 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select dayname(date '2005-09-12') from test; +select dayname(date '2005-09-12'); >> Monday diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/extract.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/extract.sql index 05d30367eb..33918e95ea 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/extract.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/extract.sql @@ -1,8 +1,14 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- +SELECT EXTRACT(NANOSECOND FROM TIME '10:00:00.123456789') IS OF (INTEGER); +>> TRUE + +SELECT EXTRACT(EPOCH FROM TIME '01:00:00') IS OF (NUMERIC); +>> TRUE + SELECT EXTRACT (MICROSECOND FROM TIME '10:00:00.123456789'), EXTRACT (MCS FROM TIMESTAMP '2015-01-01 11:22:33.987654321'); > 123456 987654 @@ -86,15 +92,21 @@ SELECT EXTRACT(TIMEZONE_MINUTE FROM TIMESTAMP WITH TIME ZONE '2010-01-02 5:00:00 SELECT EXTRACT(TIMEZONE_MINUTE FROM TIMESTAMP WITH TIME ZONE '2010-01-02 5:00:00-08:30'); >> -30 +SELECT EXTRACT(TIMEZONE_SECOND FROM TIMESTAMP WITH TIME ZONE '1880-01-01 10:00:00-07:52:58'); +>> -58 + +SELECT EXTRACT(TIMEZONE_HOUR FROM TIME WITH TIME ZONE '5:00:00+07:15'); +>> 7 + +SELECT EXTRACT(TIMEZONE_MINUTE FROM TIME WITH TIME ZONE '5:00:00+07:15'); +>> 15 + select extract(hour from timestamp '2001-02-03 14:15:16'); >> 14 select extract(hour from '2001-02-03 14:15:16'); >> 14 -select extract(week from timestamp '2001-02-03 14:15:16'); ->> 5 - SELECT EXTRACT(YEAR FROM INTERVAL '-1' YEAR); >> -1 @@ -179,12 +191,12 @@ SELECT EXTRACT(MICROSECOND FROM INTERVAL '11.123456789' SECOND); SELECT EXTRACT(NANOSECOND FROM INTERVAL '11.123456789' SECOND); >> 123456789 -SELECT D, ISO_YEAR(D) Y1, EXTRACT(ISO_YEAR FROM D) Y2, EXTRACT(ISOYEAR FROM D) Y3 +SELECT D, ISO_YEAR(D) Y1, EXTRACT(ISO_WEEK_YEAR FROM D) Y2, EXTRACT(ISO_YEAR FROM D) Y3, EXTRACT(ISOYEAR FROM D) Y4 FROM (VALUES DATE '2017-01-01', DATE '2017-01-02') V(D); -> D Y1 Y2 Y3 -> ---------- ---- ---- ---- -> 2017-01-01 2016 2016 2016 -> 2017-01-02 2017 2017 2017 +> D Y1 Y2 Y3 Y4 +> ---------- ---- ---- ---- ---- +> 2017-01-01 2016 2016 2016 2016 +> 2017-01-02 2017 2017 2017 2017 > rows: 2 SELECT D, EXTRACT(ISO_DAY_OF_WEEK FROM D) D1, EXTRACT(ISODOW FROM D) D2 @@ -195,24 +207,69 @@ SELECT D, EXTRACT(ISO_DAY_OF_WEEK FROM D) D1, EXTRACT(ISODOW FROM D) D2 > 2019-02-04 1 1 > rows: 2 -SELECT D, EXTRACT(DAY_OF_WEEK FROM D) D1, EXTRACT(DAY_OF_WEEK FROM D) D2, EXTRACT(DOW FROM D) D3 - FROM (VALUES DATE '2019-02-02', DATE '2019-02-03') V(D); -> D D1 D2 D3 -> ---------- -- -- -- -> 2019-02-02 7 7 7 -> 2019-02-03 1 1 1 -> rows: 2 - SET MODE PostgreSQL; > ok -SELECT D, EXTRACT(DAY_OF_WEEK FROM D) D1, EXTRACT(DAY_OF_WEEK FROM D) D2, EXTRACT(DOW FROM D) D3 - FROM (VALUES DATE '2019-02-02', DATE '2019-02-03') V(D); -> D D1 D2 D3 -> ---------- -- -- -- -> 2019-02-02 7 7 6 -> 2019-02-03 1 1 0 +SELECT D, EXTRACT(DOW FROM D) D3 FROM (VALUES DATE '2019-02-02', DATE '2019-02-03') V(D); +> D D3 +> ---------- -- +> 2019-02-02 6 +> 2019-02-03 0 > rows: 2 SET MODE Regular; > ok + +SELECT EXTRACT(MILLENNIUM FROM DATE '-1000-12-31'); +>> -1 + +SELECT EXTRACT(MILLENNIUM FROM DATE '-999-01-01'); +>> 0 + +SELECT EXTRACT(MILLENNIUM FROM DATE '0000-12-31'); +>> 0 + +SELECT EXTRACT(MILLENNIUM FROM DATE '0001-01-01'); +>> 1 + +SELECT EXTRACT(MILLENNIUM FROM DATE '1000-12-31'); +>> 1 + +SELECT EXTRACT(MILLENNIUM FROM DATE '1001-01-01'); +>> 2 + +SELECT EXTRACT(CENTURY FROM DATE '-100-12-31'); +>> -1 + +SELECT EXTRACT(CENTURY FROM DATE '-99-01-01'); +>> 0 + +SELECT EXTRACT(CENTURY FROM DATE '0000-12-31'); +>> 0 + +SELECT EXTRACT(CENTURY FROM DATE '0001-01-01'); +>> 1 + +SELECT EXTRACT(CENTURY FROM DATE '0100-12-31'); +>> 1 + +SELECT EXTRACT(CENTURY FROM DATE '0101-01-01'); +>> 2 + +SELECT EXTRACT(DECADE FROM DATE '-11-12-31'); +>> -2 + +SELECT EXTRACT(DECADE FROM DATE '-10-01-01'); +>> -1 + +SELECT EXTRACT(DECADE FROM DATE '-1-12-31'); +>> -1 + +SELECT EXTRACT(DECADE FROM DATE '0000-01-01'); +>> 0 + +SELECT EXTRACT(DECADE FROM DATE '0009-12-31'); +>> 0 + +SELECT EXTRACT(DECADE FROM DATE '0010-01-01'); +>> 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/formatdatetime.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/formatdatetime.sql index 1208d47368..dd3e270714 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/formatdatetime.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/formatdatetime.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/hour.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/hour.sql index 0500bc88c1..b00828275f 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/hour.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/hour.sql @@ -1,20 +1,11 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select hour(time '23:10:59') from test; +select hour(time '23:10:59'); >> 23 -drop table test; -> ok - create table test(ts timestamp with time zone); > ok diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/minute.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/minute.sql index c6f10b5f39..8cf533ce83 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/minute.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/minute.sql @@ -1,13 +1,7 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select minute(timestamp '2005-01-01 23:10:59') from test; +select minute(timestamp '2005-01-01 23:10:59'); >> 10 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/month.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/month.sql index 38b8b28047..e85be36a08 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/month.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/month.sql @@ -1,13 +1,7 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select month(date '2005-09-25') from test; +select month(date '2005-09-25'); >> 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/monthname.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/monthname.sql index 3cc6190083..a8e6637432 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/monthname.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/monthname.sql @@ -1,13 +1,7 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select monthname(date '2005-09-12') from test; +select monthname(date '2005-09-12'); >> September diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/parsedatetime.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/parsedatetime.sql index 24667fe982..4c31dc58f0 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/parsedatetime.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/parsedatetime.sql @@ -1,10 +1,22 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- +SET TIME ZONE '01:00'; +> ok + CALL PARSEDATETIME('3. Februar 2001', 'd. MMMM yyyy', 'de'); ->> 2001-02-03 00:00:00 +>> 2001-02-03 00:00:00+01 CALL PARSEDATETIME('02/03/2001 04:05:06', 'MM/dd/yyyy HH:mm:ss'); ->> 2001-02-03 04:05:06 +>> 2001-02-03 04:05:06+01 + +CALL CAST(PARSEDATETIME('10:11:12', 'HH:mm:ss', 'en') AS TIME); +>> 10:11:12 + +CALL CAST(PARSEDATETIME('10:11:12', 'HH:mm:ss', 'en', 'GMT+2') AS TIME WITH TIME ZONE); +>> 10:11:12+02 + +SET TIME ZONE LOCAL; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/quarter.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/quarter.sql index b6e90335a2..b19ae40a73 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/quarter.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/quarter.sql @@ -1,13 +1,7 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select quarter(date '2005-09-01') from test; +select quarter(date '2005-09-01'); >> 3 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/second.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/second.sql index f1095bcdae..01243bae11 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/second.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/second.sql @@ -1,13 +1,7 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select second(timestamp '2005-01-01 23:10:59') from test; +select second(timestamp '2005-01-01 23:10:59'); >> 59 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/truncate.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/truncate.sql index 72d03fd275..3a28b9b174 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/truncate.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/truncate.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/week.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/week.sql index 32a5a06ab9..3d902ea56d 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/week.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/week.sql @@ -1,20 +1,8 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select week(date '2003-01-09') from test; ->> 2 - -drop table test; -> ok - -- ISO_WEEK select iso_week('2006-12-31') w, iso_year('2007-12-31') y, iso_day_of_week('2007-12-31') w; diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/year.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/year.sql index 54692fd703..25dea91c9d 100644 --- a/h2/src/test/org/h2/test/scripts/functions/timeanddate/year.sql +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/year.sql @@ -1,13 +1,7 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -create memory table test(id int primary key, name varchar(255)); -> ok - -insert into test values(1, 'Hello'); -> update count: 1 - -select year(date '2005-01-01') from test; +select year(date '2005-01-01'); >> 2005 diff --git a/h2/src/test/org/h2/test/scripts/functions/window/lead.sql b/h2/src/test/org/h2/test/scripts/functions/window/lead.sql index 35490b6241..947849a66c 100644 --- a/h2/src/test/org/h2/test/scripts/functions/window/lead.sql +++ b/h2/src/test/org/h2/test/scripts/functions/window/lead.sql @@ -1,9 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -CREATE TABLE TEST (ID INT PRIMARY KEY, VALUE INT); +CREATE TABLE TEST (ID INT PRIMARY KEY, "VALUE" INT); > ok INSERT INTO TEST VALUES @@ -19,12 +19,12 @@ INSERT INTO TEST VALUES > update count: 9 SELECT *, - LEAD(VALUE) OVER (ORDER BY ID) LD, - LEAD(VALUE) RESPECT NULLS OVER (ORDER BY ID) LD_N, - LEAD(VALUE) IGNORE NULLS OVER (ORDER BY ID) LD_NN, - LAG(VALUE) OVER (ORDER BY ID) LG, - LAG(VALUE) RESPECT NULLS OVER (ORDER BY ID) LG_N, - LAG(VALUE) IGNORE NULLS OVER (ORDER BY ID) LG_NN + LEAD("VALUE") OVER (ORDER BY ID) LD, + LEAD("VALUE") RESPECT NULLS OVER (ORDER BY ID) LD_N, + LEAD("VALUE") IGNORE NULLS OVER (ORDER BY ID) LD_NN, + LAG("VALUE") OVER (ORDER BY ID) LG, + LAG("VALUE") RESPECT NULLS OVER (ORDER BY ID) LG_N, + LAG("VALUE") IGNORE NULLS OVER (ORDER BY ID) LG_NN FROM TEST; > ID VALUE LD LD_N LD_NN LG LG_N LG_NN > -- ----- ---- ---- ----- ---- ---- ----- @@ -40,12 +40,12 @@ SELECT *, > rows: 9 SELECT *, - LEAD(VALUE, 1) OVER (ORDER BY ID) LD, - LEAD(VALUE, 1) RESPECT NULLS OVER (ORDER BY ID) LD_N, - LEAD(VALUE, 1) IGNORE NULLS OVER (ORDER BY ID) LD_NN, - LAG(VALUE, 1) OVER (ORDER BY ID) LG, - LAG(VALUE, 1) RESPECT NULLS OVER (ORDER BY ID) LG_N, - LAG(VALUE, 1) IGNORE NULLS OVER (ORDER BY ID) LG_NN + LEAD("VALUE", 1) OVER (ORDER BY ID) LD, + LEAD("VALUE", 1) RESPECT NULLS OVER (ORDER BY ID) LD_N, + LEAD("VALUE", 1) IGNORE NULLS OVER (ORDER BY ID) LD_NN, + LAG("VALUE", 1) OVER (ORDER BY ID) LG, + LAG("VALUE", 1) RESPECT NULLS OVER (ORDER BY ID) LG_N, + LAG("VALUE", 1) IGNORE NULLS OVER (ORDER BY ID) LG_NN FROM TEST; > ID VALUE LD LD_N LD_NN LG LG_N LG_NN > -- ----- ---- ---- ----- ---- ---- ----- @@ -61,12 +61,12 @@ SELECT *, > rows: 9 SELECT *, - LEAD(VALUE, 0) OVER (ORDER BY ID) LD, - LEAD(VALUE, 0) RESPECT NULLS OVER (ORDER BY ID) LD_N, - LEAD(VALUE, 0) IGNORE NULLS OVER (ORDER BY ID) LD_NN, - LAG(VALUE, 0) OVER (ORDER BY ID) LG, - LAG(VALUE, 0) RESPECT NULLS OVER (ORDER BY ID) LG_N, - LAG(VALUE, 0) IGNORE NULLS OVER (ORDER BY ID) LG_NN + LEAD("VALUE", 0) OVER (ORDER BY ID) LD, + LEAD("VALUE", 0) RESPECT NULLS OVER (ORDER BY ID) LD_N, + LEAD("VALUE", 0) IGNORE NULLS OVER (ORDER BY ID) LD_NN, + LAG("VALUE", 0) OVER (ORDER BY ID) LG, + LAG("VALUE", 0) RESPECT NULLS OVER (ORDER BY ID) LG_N, + LAG("VALUE", 0) IGNORE NULLS OVER (ORDER BY ID) LG_NN FROM TEST; > ID VALUE LD LD_N LD_NN LG LG_N LG_NN > -- ----- ---- ---- ----- ---- ---- ----- @@ -82,12 +82,12 @@ SELECT *, > rows: 9 SELECT *, - LEAD(VALUE, 2) OVER (ORDER BY ID) LD, - LEAD(VALUE, 2) RESPECT NULLS OVER (ORDER BY ID) LD_N, - LEAD(VALUE, 2) IGNORE NULLS OVER (ORDER BY ID) LD_NN, - LAG(VALUE, 2) OVER (ORDER BY ID) LG, - LAG(VALUE, 2) RESPECT NULLS OVER (ORDER BY ID) LG_N, - LAG(VALUE, 2) IGNORE NULLS OVER (ORDER BY ID) LG_NN + LEAD("VALUE", 2) OVER (ORDER BY ID) LD, + LEAD("VALUE", 2) RESPECT NULLS OVER (ORDER BY ID) LD_N, + LEAD("VALUE", 2) IGNORE NULLS OVER (ORDER BY ID) LD_NN, + LAG("VALUE", 2) OVER (ORDER BY ID) LG, + LAG("VALUE", 2) RESPECT NULLS OVER (ORDER BY ID) LG_N, + LAG("VALUE", 2) IGNORE NULLS OVER (ORDER BY ID) LG_NN FROM TEST; > ID VALUE LD LD_N LD_NN LG LG_N LG_NN > -- ----- ---- ---- ----- ---- ---- ----- @@ -103,12 +103,12 @@ SELECT *, > rows: 9 SELECT *, - LEAD(VALUE, 2, 1111.0) OVER (ORDER BY ID) LD, - LEAD(VALUE, 2, 1111.0) RESPECT NULLS OVER (ORDER BY ID) LD_N, - LEAD(VALUE, 2, 1111.0) IGNORE NULLS OVER (ORDER BY ID) LD_NN, - LAG(VALUE, 2, 1111.0) OVER (ORDER BY ID) LG, - LAG(VALUE, 2, 1111.0) RESPECT NULLS OVER (ORDER BY ID) LG_N, - LAG(VALUE, 2, 1111.0) IGNORE NULLS OVER (ORDER BY ID) LG_NN + LEAD("VALUE", 2, 1111.0) OVER (ORDER BY ID) LD, + LEAD("VALUE", 2, 1111.0) RESPECT NULLS OVER (ORDER BY ID) LD_N, + LEAD("VALUE", 2, 1111.0) IGNORE NULLS OVER (ORDER BY ID) LD_NN, + LAG("VALUE", 2, 1111.0) OVER (ORDER BY ID) LG, + LAG("VALUE", 2, 1111.0) RESPECT NULLS OVER (ORDER BY ID) LG_N, + LAG("VALUE", 2, 1111.0) IGNORE NULLS OVER (ORDER BY ID) LG_NN FROM TEST; > ID VALUE LD LD_N LD_NN LG LG_N LG_NN > -- ----- ---- ---- ----- ---- ---- ----- @@ -123,22 +123,22 @@ SELECT *, > 9 null 1111 1111 1111 22 22 22 > rows: 9 -SELECT LEAD(VALUE, -1) OVER (ORDER BY ID) FROM TEST; +SELECT LEAD("VALUE", -1) OVER (ORDER BY ID) FROM TEST; > exception INVALID_VALUE_2 -SELECT LAG(VALUE, -1) OVER (ORDER BY ID) FROM TEST; +SELECT LAG("VALUE", -1) OVER (ORDER BY ID) FROM TEST; > exception INVALID_VALUE_2 -SELECT LEAD(VALUE) OVER () FROM TEST; +SELECT LEAD("VALUE") OVER () FROM TEST; > exception SYNTAX_ERROR_2 -SELECT LAG(VALUE) OVER () FROM TEST; +SELECT LAG("VALUE") OVER () FROM TEST; > exception SYNTAX_ERROR_2 -SELECT LEAD(VALUE) OVER (ORDER BY ID RANGE CURRENT ROW) FROM TEST; +SELECT LEAD("VALUE") OVER (ORDER BY ID RANGE CURRENT ROW) FROM TEST; > exception SYNTAX_ERROR_1 -SELECT LAG(VALUE) OVER (ORDER BY ID RANGE CURRENT ROW) FROM TEST; +SELECT LAG("VALUE") OVER (ORDER BY ID RANGE CURRENT ROW) FROM TEST; > exception SYNTAX_ERROR_1 DROP TABLE TEST; @@ -151,3 +151,31 @@ SELECT C, SUM(I) S, LEAD(SUM(I)) OVER (ORDER BY SUM(I)) L FROM > 1 3 12 > 2 12 null > rows: 2 + +CREATE TABLE TEST(X INT) AS VALUES 1, 2, 3; +> ok + +EXPLAIN SELECT LEAD(X) OVER (ORDER BY 'a') FROM TEST; +>> SELECT LEAD("X") OVER (ORDER BY NULL) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT LEAD(X) OVER (ORDER BY 'a') FROM TEST; +> LEAD(X) OVER (ORDER BY NULL) +> ---------------------------- +> 2 +> 3 +> null +> rows: 3 + +EXPLAIN SELECT LAG(X) OVER (ORDER BY 'a') FROM TEST; +>> SELECT LAG("X") OVER (ORDER BY NULL) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT LAG(X) OVER (ORDER BY 'a') FROM TEST; +> LAG(X) OVER (ORDER BY NULL) +> --------------------------- +> 1 +> 2 +> null +> rows: 3 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/window/nth_value.sql b/h2/src/test/org/h2/test/scripts/functions/window/nth_value.sql index b007b89bd3..57fea994cd 100644 --- a/h2/src/test/org/h2/test/scripts/functions/window/nth_value.sql +++ b/h2/src/test/org/h2/test/scripts/functions/window/nth_value.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -9,7 +9,7 @@ SELECT FIRST_VALUE(1) OVER (PARTITION BY ID); SELECT FIRST_VALUE(1) OVER (ORDER BY ID); > exception COLUMN_NOT_FOUND_1 -CREATE TABLE TEST (ID INT PRIMARY KEY, CATEGORY INT, VALUE INT); +CREATE TABLE TEST (ID INT PRIMARY KEY, CATEGORY INT, "VALUE" INT); > ok INSERT INTO TEST VALUES @@ -29,12 +29,12 @@ INSERT INTO TEST VALUES > update count: 13 SELECT *, - FIRST_VALUE(VALUE) OVER (ORDER BY ID) FIRST, - FIRST_VALUE(VALUE) RESPECT NULLS OVER (ORDER BY ID) FIRST_N, - FIRST_VALUE(VALUE) IGNORE NULLS OVER (ORDER BY ID) FIRST_NN, - LAST_VALUE(VALUE) OVER (ORDER BY ID) LAST, - LAST_VALUE(VALUE) RESPECT NULLS OVER (ORDER BY ID) LAST_N, - LAST_VALUE(VALUE) IGNORE NULLS OVER (ORDER BY ID) LAST_NN + FIRST_VALUE("VALUE") OVER (ORDER BY ID) FIRST, + FIRST_VALUE("VALUE") RESPECT NULLS OVER (ORDER BY ID) FIRST_N, + FIRST_VALUE("VALUE") IGNORE NULLS OVER (ORDER BY ID) FIRST_NN, + LAST_VALUE("VALUE") OVER (ORDER BY ID) LAST, + LAST_VALUE("VALUE") RESPECT NULLS OVER (ORDER BY ID) LAST_N, + LAST_VALUE("VALUE") IGNORE NULLS OVER (ORDER BY ID) LAST_NN FROM TEST FETCH FIRST 6 ROWS ONLY; > ID CATEGORY VALUE FIRST FIRST_N FIRST_NN LAST LAST_N LAST_NN > -- -------- ----- ----- ------- -------- ---- ------ ------- @@ -47,12 +47,12 @@ SELECT *, > rows: 6 SELECT *, - FIRST_VALUE(VALUE) OVER (ORDER BY ID) FIRST, - FIRST_VALUE(VALUE) RESPECT NULLS OVER (ORDER BY ID) FIRST_N, - FIRST_VALUE(VALUE) IGNORE NULLS OVER (ORDER BY ID) FIRST_NN, - LAST_VALUE(VALUE) OVER (ORDER BY ID) LAST, - LAST_VALUE(VALUE) RESPECT NULLS OVER (ORDER BY ID) LAST_N, - LAST_VALUE(VALUE) IGNORE NULLS OVER (ORDER BY ID) LAST_NN + FIRST_VALUE("VALUE") OVER (ORDER BY ID) FIRST, + FIRST_VALUE("VALUE") RESPECT NULLS OVER (ORDER BY ID) FIRST_N, + FIRST_VALUE("VALUE") IGNORE NULLS OVER (ORDER BY ID) FIRST_NN, + LAST_VALUE("VALUE") OVER (ORDER BY ID) LAST, + LAST_VALUE("VALUE") RESPECT NULLS OVER (ORDER BY ID) LAST_N, + LAST_VALUE("VALUE") IGNORE NULLS OVER (ORDER BY ID) LAST_NN FROM TEST WHERE ID > 1 FETCH FIRST 3 ROWS ONLY; > ID CATEGORY VALUE FIRST FIRST_N FIRST_NN LAST LAST_N LAST_NN > -- -------- ----- ----- ------- -------- ---- ------ ------- @@ -62,15 +62,15 @@ SELECT *, > rows: 3 SELECT *, - NTH_VALUE(VALUE, 2) OVER (ORDER BY ID) NTH, - NTH_VALUE(VALUE, 2) FROM FIRST OVER (ORDER BY ID) NTH_FF, - NTH_VALUE(VALUE, 2) FROM LAST OVER (ORDER BY ID) NTH_FL, - NTH_VALUE(VALUE, 2) RESPECT NULLS OVER (ORDER BY ID) NTH_N, - NTH_VALUE(VALUE, 2) FROM FIRST RESPECT NULLS OVER (ORDER BY ID) NTH_FF_N, - NTH_VALUE(VALUE, 2) FROM LAST RESPECT NULLS OVER (ORDER BY ID) NTH_FL_N, - NTH_VALUE(VALUE, 2) IGNORE NULLS OVER (ORDER BY ID) NTH_NN, - NTH_VALUE(VALUE, 2) FROM FIRST IGNORE NULLS OVER (ORDER BY ID) NTH_FF_NN, - NTH_VALUE(VALUE, 2) FROM LAST IGNORE NULLS OVER (ORDER BY ID) NTH_FL_NN + NTH_VALUE("VALUE", 2) OVER (ORDER BY ID) NTH, + NTH_VALUE("VALUE", 2) FROM FIRST OVER (ORDER BY ID) NTH_FF, + NTH_VALUE("VALUE", 2) FROM LAST OVER (ORDER BY ID) NTH_FL, + NTH_VALUE("VALUE", 2) RESPECT NULLS OVER (ORDER BY ID) NTH_N, + NTH_VALUE("VALUE", 2) FROM FIRST RESPECT NULLS OVER (ORDER BY ID) NTH_FF_N, + NTH_VALUE("VALUE", 2) FROM LAST RESPECT NULLS OVER (ORDER BY ID) NTH_FL_N, + NTH_VALUE("VALUE", 2) IGNORE NULLS OVER (ORDER BY ID) NTH_NN, + NTH_VALUE("VALUE", 2) FROM FIRST IGNORE NULLS OVER (ORDER BY ID) NTH_FF_NN, + NTH_VALUE("VALUE", 2) FROM LAST IGNORE NULLS OVER (ORDER BY ID) NTH_FL_NN FROM TEST FETCH FIRST 6 ROWS ONLY; > ID CATEGORY VALUE NTH NTH_FF NTH_FL NTH_N NTH_FF_N NTH_FL_N NTH_NN NTH_FF_NN NTH_FL_NN > -- -------- ----- ---- ------ ------ ----- -------- -------- ------ --------- --------- @@ -83,14 +83,14 @@ SELECT *, > rows: 6 SELECT *, - NTH_VALUE(VALUE, 2) OVER(ORDER BY ID) F, - NTH_VALUE(VALUE, 2) OVER(ORDER BY ID RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) F_U_C, - NTH_VALUE(VALUE, 2) OVER(ORDER BY ID RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) F_C_U, - NTH_VALUE(VALUE, 2) OVER(ORDER BY ID RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) F_U_U, - NTH_VALUE(VALUE, 2) FROM LAST OVER(ORDER BY ID) L, - NTH_VALUE(VALUE, 2) FROM LAST OVER(ORDER BY ID RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) L_U_C, - NTH_VALUE(VALUE, 2) FROM LAST OVER(ORDER BY ID RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) L_C_U, - NTH_VALUE(VALUE, 2) FROM LAST OVER(ORDER BY ID RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) L_U_U + NTH_VALUE("VALUE", 2) OVER(ORDER BY ID) F, + NTH_VALUE("VALUE", 2) OVER(ORDER BY ID RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) F_U_C, + NTH_VALUE("VALUE", 2) OVER(ORDER BY ID RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) F_C_U, + NTH_VALUE("VALUE", 2) OVER(ORDER BY ID RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) F_U_U, + NTH_VALUE("VALUE", 2) FROM LAST OVER(ORDER BY ID) L, + NTH_VALUE("VALUE", 2) FROM LAST OVER(ORDER BY ID RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) L_U_C, + NTH_VALUE("VALUE", 2) FROM LAST OVER(ORDER BY ID RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) L_C_U, + NTH_VALUE("VALUE", 2) FROM LAST OVER(ORDER BY ID RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) L_U_U FROM TEST ORDER BY ID; > ID CATEGORY VALUE F F_U_C F_C_U F_U_U L L_U_C L_C_U L_U_U > -- -------- ----- ---- ----- ----- ----- ---- ----- ----- ----- @@ -109,13 +109,13 @@ SELECT *, > 13 4 null 12 12 null 12 41 41 null 41 > rows (ordered): 13 -SELECT NTH_VALUE(VALUE, 0) OVER (ORDER BY ID) FROM TEST; +SELECT NTH_VALUE("VALUE", 0) OVER (ORDER BY ID) FROM TEST; > exception INVALID_VALUE_2 SELECT *, - FIRST_VALUE(VALUE) OVER (PARTITION BY CATEGORY ORDER BY ID) FIRST, - LAST_VALUE(VALUE) OVER (PARTITION BY CATEGORY ORDER BY ID) LAST, - NTH_VALUE(VALUE, 2) OVER (PARTITION BY CATEGORY ORDER BY ID) NTH + FIRST_VALUE("VALUE") OVER (PARTITION BY CATEGORY ORDER BY ID) FIRST, + LAST_VALUE("VALUE") OVER (PARTITION BY CATEGORY ORDER BY ID) LAST, + NTH_VALUE("VALUE", 2) OVER (PARTITION BY CATEGORY ORDER BY ID) NTH FROM TEST ORDER BY ID; > ID CATEGORY VALUE FIRST LAST NTH > -- -------- ----- ----- ---- ---- @@ -150,7 +150,7 @@ SELECT ID, CATEGORY, NTH_VALUE(CATEGORY, 3) FROM LAST OVER (ORDER BY CATEGORY RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) C3, NTH_VALUE(CATEGORY, 2) FROM LAST OVER (ORDER BY CATEGORY RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING EXCLUDE CURRENT ROW) FROM TEST OFFSET 10 ROWS; -> ID CATEGORY C2 C3 NTH_VALUE(CATEGORY, 2) FROM LAST OVER (ORDER BY CATEGORY RANGE BETWEEN CURRENT_ROW AND UNBOUNDED FOLLOWING EXCLUDE CURRENT ROW) +> ID CATEGORY C2 C3 NTH_VALUE(CATEGORY, 2) FROM LAST OVER (ORDER BY CATEGORY RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING EXCLUDE CURRENT ROW) > -- -------- -- ---- ------------------------------------------------------------------------------------------------------------------------------- > 11 3 4 3 4 > 12 4 4 null null diff --git a/h2/src/test/org/h2/test/scripts/functions/window/ntile.sql b/h2/src/test/org/h2/test/scripts/functions/window/ntile.sql index 7662370e8d..6367c2d5e2 100644 --- a/h2/src/test/org/h2/test/scripts/functions/window/ntile.sql +++ b/h2/src/test/org/h2/test/scripts/functions/window/ntile.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/window/ratio_to_report.sql b/h2/src/test/org/h2/test/scripts/functions/window/ratio_to_report.sql index a4bfb41cf1..6760ad7076 100644 --- a/h2/src/test/org/h2/test/scripts/functions/window/ratio_to_report.sql +++ b/h2/src/test/org/h2/test/scripts/functions/window/ratio_to_report.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- diff --git a/h2/src/test/org/h2/test/scripts/functions/window/row_number.sql b/h2/src/test/org/h2/test/scripts/functions/window/row_number.sql index 9bcada285e..90b99c3628 100644 --- a/h2/src/test/org/h2/test/scripts/functions/window/row_number.sql +++ b/h2/src/test/org/h2/test/scripts/functions/window/row_number.sql @@ -1,9 +1,9 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- -CREATE TABLE TEST (ID INT PRIMARY KEY, CATEGORY INT, VALUE INT); +CREATE TABLE TEST (ID INT PRIMARY KEY, CATEGORY INT, "VALUE" INT); > ok INSERT INTO TEST VALUES @@ -190,3 +190,56 @@ SELECT ROW_NUMBER() OVER () FROM VALUES (1); > -------------------- > 1 > rows: 1 + +CREATE TABLE TEST(X INT) AS VALUES 1, 2, 3; +> ok + +EXPLAIN SELECT ROW_NUMBER() OVER (ORDER BY 'a') FROM TEST; +>> SELECT ROW_NUMBER() OVER () FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT RANK() OVER (ORDER BY 'a') FROM TEST; +>> SELECT CAST(1 AS BIGINT) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT RANK() OVER (ORDER BY 'a') FROM TEST; +> 1 +> - +> 1 +> 1 +> 1 +> rows: 3 + +EXPLAIN SELECT DENSE_RANK() OVER (ORDER BY 'a') FROM TEST; +>> SELECT CAST(1 AS BIGINT) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT DENSE_RANK() OVER (ORDER BY 'a') FROM TEST; +> 1 +> - +> 1 +> 1 +> 1 +> rows: 3 + +EXPLAIN SELECT PERCENT_RANK() OVER (ORDER BY 'a') FROM TEST; +>> SELECT CAST(0.0 AS DOUBLE PRECISION) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT PERCENT_RANK() OVER (ORDER BY 'a') FROM TEST; +> 0.0 +> --- +> 0.0 +> 0.0 +> 0.0 +> rows: 3 + +EXPLAIN SELECT CUME_DIST() OVER (ORDER BY 'a') FROM TEST; +>> SELECT CAST(1.0 AS DOUBLE PRECISION) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT CUME_DIST() OVER (ORDER BY 'a') FROM TEST; +> 1.0 +> --- +> 1.0 +> 1.0 +> 1.0 +> rows: 3 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/indexes.sql b/h2/src/test/org/h2/test/scripts/indexes.sql index 0bb16ebacf..4400a63a76 100644 --- a/h2/src/test/org/h2/test/scripts/indexes.sql +++ b/h2/src/test/org/h2/test/scripts/indexes.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -318,25 +318,25 @@ CREATE INDEX T_A_C ON TEST(A, C); > ok EXPLAIN SELECT * FROM TEST WHERE A = 0; ->> SELECT "TEST"."A", "TEST"."B", "TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A1: A = 0 */ WHERE "A" = 0 +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A1: A = 0 */ WHERE "A" = 0 EXPLAIN SELECT * FROM TEST WHERE A = 0 AND B >= 0; ->> SELECT "TEST"."A", "TEST"."B", "TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A_B: A = 0 AND B >= 0 */ WHERE ("A" = 0) AND ("B" >= 0) +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A_B: A = 0 AND B >= 0 */ WHERE ("A" = 0) AND ("B" >= 0) EXPLAIN SELECT * FROM TEST WHERE A > 0 AND B >= 0; ->> SELECT "TEST"."A", "TEST"."B", "TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A_B: A > 0 AND B >= 0 */ WHERE ("A" > 0) AND ("B" >= 0) +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A_B: A > 0 AND B >= 0 */ WHERE ("A" > 0) AND ("B" >= 0) INSERT INTO TEST (SELECT X / 100, X, X FROM SYSTEM_RANGE(1, 3000)); > update count: 3000 EXPLAIN SELECT * FROM TEST WHERE A = 0; ->> SELECT "TEST"."A", "TEST"."B", "TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A1: A = 0 */ WHERE "A" = 0 +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A1: A = 0 */ WHERE "A" = 0 EXPLAIN SELECT * FROM TEST WHERE A = 0 AND B >= 0; ->> SELECT "TEST"."A", "TEST"."B", "TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A_B: A = 0 AND B >= 0 */ WHERE ("A" = 0) AND ("B" >= 0) +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A_B: A = 0 AND B >= 0 */ WHERE ("A" = 0) AND ("B" >= 0) EXPLAIN SELECT * FROM TEST WHERE A > 0 AND B >= 0; ->> SELECT "TEST"."A", "TEST"."B", "TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A_B: A > 0 AND B >= 0 */ WHERE ("A" > 0) AND ("B" >= 0) +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A_B: A > 0 AND B >= 0 */ WHERE ("A" > 0) AND ("B" >= 0) -- Test that creation order of indexes has no effect CREATE INDEX T_A2 ON TEST(A); @@ -346,7 +346,67 @@ DROP INDEX T_A1; > ok EXPLAIN SELECT * FROM TEST WHERE A = 0; ->> SELECT "TEST"."A", "TEST"."B", "TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A2: A = 0 */ WHERE "A" = 0 +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A2: A = 0 */ WHERE "A" = 0 + +DROP TABLE TEST; +> ok + +CREATE TABLE T(A INT, B INT, C INT); +> ok + +CREATE INDEX T_B_IDX ON T(B); +> ok + +EXPLAIN SELECT * FROM T WHERE A = 1 AND B = A; +>> SELECT "PUBLIC"."T"."A", "PUBLIC"."T"."B", "PUBLIC"."T"."C" FROM "PUBLIC"."T" /* PUBLIC.T_B_IDX: B = 1 */ WHERE ("A" = 1) AND ("B" = "A") + +DROP TABLE T; +> ok + +-- _ROWID_ tests + +CREATE TABLE TEST(ID INT PRIMARY KEY); +> ok + +INSERT INTO TEST VALUES 1, 2, 3, 4; +> update count: 4 + +SELECT * FROM TEST WHERE ID >= 2 AND ID <= 3; +> ID +> -- +> 2 +> 3 +> rows: 2 + +SELECT * FROM TEST WHERE _ROWID_ >= 2 AND _ROWID_ <= 3; +> ID +> -- +> 2 +> 3 +> rows: 2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID FLOAT PRIMARY KEY); +> ok + +INSERT INTO TEST VALUES 1.0, 2.0, 3.0, 4.0; +> update count: 4 + +SELECT * FROM TEST WHERE ID >= 2.0 AND ID <= 3.0; +> ID +> --- +> 2.0 +> 3.0 +> rows: 2 + +SELECT * FROM TEST WHERE _ROWID_ >= 2 AND _ROWID_ <= 3; +> ID +> --- +> 2.0 +> 3.0 +> rows: 2 DROP TABLE TEST; > ok diff --git a/h2/src/test/org/h2/test/scripts/information_schema.sql b/h2/src/test/org/h2/test/scripts/information_schema.sql index be26ad2496..aca6341a63 100644 --- a/h2/src/test/org/h2/test/scripts/information_schema.sql +++ b/h2/src/test/org/h2/test/scripts/information_schema.sql @@ -1,8 +1,14 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- +TABLE INFORMATION_SCHEMA.INFORMATION_SCHEMA_CATALOG_NAME; +> CATALOG_NAME +> ------------ +> SCRIPT +> rows: 1 + CREATE TABLE T1(C1 INT NOT NULL, C2 INT NOT NULL, C3 INT, C4 INT); > ok @@ -15,18 +21,27 @@ ALTER TABLE T1 ADD CONSTRAINT U_1 UNIQUE(C3, C4); CREATE TABLE T2(C1 INT, C2 INT, C3 INT, C4 INT); > ok +ALTER TABLE T2 ADD CONSTRAINT FK_1 FOREIGN KEY (C3, C4) REFERENCES T1(C1, C3) ON DELETE SET NULL; +> exception CONSTRAINT_NOT_FOUND_1 + +SET MODE MySQL; +> ok + ALTER TABLE T2 ADD CONSTRAINT FK_1 FOREIGN KEY (C3, C4) REFERENCES T1(C1, C3) ON DELETE SET NULL; > ok ALTER TABLE T2 ADD CONSTRAINT FK_2 FOREIGN KEY (C3, C4) REFERENCES T1(C4, C3) ON UPDATE CASCADE ON DELETE SET DEFAULT; > ok -ALTER TABLE T2 ADD CONSTRAINT CH_1 CHECK C4 > 0; +SET MODE Regular; +> ok + +ALTER TABLE T2 ADD CONSTRAINT CH_1 CHECK (C4 > 0 AND NOT EXISTS(SELECT 1 FROM T1 WHERE T1.C1 + T1.C2 = T2.C4)); > ok SELECT * FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS LIMIT 0; -> CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME CONSTRAINT_TYPE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME IS_DEFERRABLE INITIALLY_DEFERRED -> ------------------ ----------------- --------------- --------------- ------------- ------------ ---------- ------------- ------------------ +> CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME CONSTRAINT_TYPE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME IS_DEFERRABLE INITIALLY_DEFERRED ENFORCED INDEX_CATALOG INDEX_SCHEMA INDEX_NAME REMARKS +> ------------------ ----------------- --------------- --------------- ------------- ------------ ---------- ------------- ------------------ -------- ------------- ------------ ---------- ------- > rows: 0 SELECT CONSTRAINT_NAME, CONSTRAINT_TYPE, TABLE_NAME, IS_DEFERRABLE, INITIALLY_DEFERRED FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS @@ -34,24 +49,13 @@ SELECT CONSTRAINT_NAME, CONSTRAINT_TYPE, TABLE_NAME, IS_DEFERRABLE, INITIALLY_DE ORDER BY TABLE_NAME, CONSTRAINT_NAME; > CONSTRAINT_NAME CONSTRAINT_TYPE TABLE_NAME IS_DEFERRABLE INITIALLY_DEFERRED > --------------- --------------- ---------- ------------- ------------------ +> CONSTRAINT_A UNIQUE T1 NO NO > PK_1 PRIMARY KEY T1 NO NO > U_1 UNIQUE T1 NO NO > CH_1 CHECK T2 NO NO > FK_1 FOREIGN KEY T2 NO NO > FK_2 FOREIGN KEY T2 NO NO -> rows (ordered): 5 - -SELECT CONSTRAINT_NAME, CONSTRAINT_TYPE, TABLE_NAME, COLUMN_LIST FROM INFORMATION_SCHEMA.CONSTRAINTS - WHERE CONSTRAINT_CATALOG = DATABASE() AND CONSTRAINT_SCHEMA = SCHEMA() AND TABLE_CATALOG = DATABASE() AND TABLE_SCHEMA = SCHEMA() - ORDER BY TABLE_NAME, CONSTRAINT_NAME; -> CONSTRAINT_NAME CONSTRAINT_TYPE TABLE_NAME COLUMN_LIST -> --------------- --------------- ---------- ----------- -> PK_1 PRIMARY KEY T1 C1,C2 -> U_1 UNIQUE T1 C3,C4 -> CH_1 CHECK T2 null -> FK_1 REFERENTIAL T2 C3,C4 -> FK_2 REFERENTIAL T2 C3,C4 -> rows (ordered): 5 +> rows (ordered): 6 SELECT * FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE LIMIT 0; > CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION POSITION_IN_UNIQUE_CONSTRAINT @@ -63,6 +67,8 @@ SELECT CONSTRAINT_NAME, TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, POSITION_IN_U ORDER BY TABLE_NAME, CONSTRAINT_NAME, ORDINAL_POSITION; > CONSTRAINT_NAME TABLE_NAME COLUMN_NAME ORDINAL_POSITION POSITION_IN_UNIQUE_CONSTRAINT > --------------- ---------- ----------- ---------------- ----------------------------- +> CONSTRAINT_A T1 C1 1 null +> CONSTRAINT_A T1 C3 2 null > PK_1 T1 C1 1 null > PK_1 T1 C2 2 null > U_1 T1 C3 1 null @@ -71,21 +77,20 @@ SELECT CONSTRAINT_NAME, TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, POSITION_IN_U > FK_1 T2 C4 2 2 > FK_2 T2 C3 1 2 > FK_2 T2 C4 2 1 -> rows (ordered): 8 +> rows (ordered): 10 SELECT * FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS LIMIT 0; > CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME UNIQUE_CONSTRAINT_CATALOG UNIQUE_CONSTRAINT_SCHEMA UNIQUE_CONSTRAINT_NAME MATCH_OPTION UPDATE_RULE DELETE_RULE > ------------------ ----------------- --------------- ------------------------- ------------------------ ---------------------- ------------ ----------- ----------- > rows: 0 --- H2 may return name of the index instead of name of the referenced constraint as UNIQUE_CONSTRAINT_NAME -SELECT CONSTRAINT_NAME, SUBSTRING(UNIQUE_CONSTRAINT_NAME, 0, 11) AS UCN_PART, MATCH_OPTION, UPDATE_RULE, DELETE_RULE FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS +SELECT CONSTRAINT_NAME, UNIQUE_CONSTRAINT_NAME, MATCH_OPTION, UPDATE_RULE, DELETE_RULE FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS WHERE CONSTRAINT_CATALOG = DATABASE() AND CONSTRAINT_SCHEMA = SCHEMA() AND UNIQUE_CONSTRAINT_CATALOG = DATABASE() AND UNIQUE_CONSTRAINT_SCHEMA = SCHEMA() ORDER BY CONSTRAINT_NAME, UNIQUE_CONSTRAINT_NAME; -> CONSTRAINT_NAME UCN_PART MATCH_OPTION UPDATE_RULE DELETE_RULE -> --------------- ----------- ------------ ----------- ----------- -> FK_1 FK_1_INDEX_ NONE RESTRICT SET NULL -> FK_2 U_1 NONE CASCADE SET DEFAULT +> CONSTRAINT_NAME UNIQUE_CONSTRAINT_NAME MATCH_OPTION UPDATE_RULE DELETE_RULE +> --------------- ---------------------- ------------ ----------- ----------- +> FK_1 CONSTRAINT_A NONE RESTRICT SET NULL +> FK_2 U_1 NONE CASCADE SET DEFAULT > rows (ordered): 2 SELECT U1.TABLE_NAME T1, U1.COLUMN_NAME C1, U2.TABLE_NAME T2, U2.COLUMN_NAME C2 @@ -98,8 +103,91 @@ SELECT U1.TABLE_NAME T1, U1.COLUMN_NAME C1, U2.TABLE_NAME T2, U2.COLUMN_NAME C2 > T2 C4 T1 C3 > rows (ordered): 2 +TABLE INFORMATION_SCHEMA.CHECK_CONSTRAINTS; +> CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME CHECK_CLAUSE +> ------------------ ----------------- --------------- --------------------------------------------------------------------------------------------------- +> SCRIPT PUBLIC CH_1 ("C4" > 0) AND (NOT EXISTS( SELECT 1 FROM "PUBLIC"."T1" WHERE ("T1"."C1" + "T1"."C2") = "T2"."C4")) +> rows: 1 + +TABLE INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE; +> TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME +> ------------- ------------ ---------- ----------- ------------------ ----------------- --------------- +> SCRIPT PUBLIC T1 C1 SCRIPT PUBLIC CH_1 +> SCRIPT PUBLIC T1 C1 SCRIPT PUBLIC CONSTRAINT_A +> SCRIPT PUBLIC T1 C1 SCRIPT PUBLIC FK_1 +> SCRIPT PUBLIC T1 C1 SCRIPT PUBLIC PK_1 +> SCRIPT PUBLIC T1 C2 SCRIPT PUBLIC CH_1 +> SCRIPT PUBLIC T1 C2 SCRIPT PUBLIC PK_1 +> SCRIPT PUBLIC T1 C3 SCRIPT PUBLIC CONSTRAINT_A +> SCRIPT PUBLIC T1 C3 SCRIPT PUBLIC FK_1 +> SCRIPT PUBLIC T1 C3 SCRIPT PUBLIC FK_2 +> SCRIPT PUBLIC T1 C3 SCRIPT PUBLIC U_1 +> SCRIPT PUBLIC T1 C4 SCRIPT PUBLIC FK_2 +> SCRIPT PUBLIC T1 C4 SCRIPT PUBLIC U_1 +> SCRIPT PUBLIC T2 C3 SCRIPT PUBLIC FK_1 +> SCRIPT PUBLIC T2 C3 SCRIPT PUBLIC FK_2 +> SCRIPT PUBLIC T2 C4 SCRIPT PUBLIC CH_1 +> SCRIPT PUBLIC T2 C4 SCRIPT PUBLIC FK_1 +> SCRIPT PUBLIC T2 C4 SCRIPT PUBLIC FK_2 +> rows: 17 + DROP TABLE T2; > ok DROP TABLE T1; > ok + +@reconnect off + +CREATE TABLE T1(C1 INT PRIMARY KEY); +> ok + +CREATE TABLE T2(C2 INT PRIMARY KEY REFERENCES T1); +> ok + +SELECT ENFORCED FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE = 'FOREIGN KEY'; +>> YES + +SET REFERENTIAL_INTEGRITY FALSE; +> ok + +SELECT ENFORCED FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE = 'FOREIGN KEY'; +>> NO + +SET REFERENTIAL_INTEGRITY TRUE; +> ok + +ALTER TABLE T1 SET REFERENTIAL_INTEGRITY FALSE; +> ok + +SELECT ENFORCED FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE = 'FOREIGN KEY'; +>> NO + +ALTER TABLE T1 SET REFERENTIAL_INTEGRITY TRUE; +> ok + +ALTER TABLE T2 SET REFERENTIAL_INTEGRITY FALSE; +> ok + +SELECT ENFORCED FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE = 'FOREIGN KEY'; +>> NO + +DROP TABLE T2, T1; +> ok + +@reconnect on + +SELECT TABLE_NAME, ROW_COUNT_ESTIMATE FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'INFORMATION_SCHEMA' + AND TABLE_NAME IN ('INFORMATION_SCHEMA_CATALOG_NAME', 'SCHEMATA', 'ROLES', 'SESSIONS', 'IN_DOUBT', 'USERS'); +> TABLE_NAME ROW_COUNT_ESTIMATE +> ------------------------------- ------------------ +> INFORMATION_SCHEMA_CATALOG_NAME 1 +> IN_DOUBT 0 +> ROLES 1 +> SCHEMATA 2 +> SESSIONS 1 +> USERS 1 +> rows: 6 + +EXPLAIN SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLLATIONS; +>> SELECT COUNT(*) FROM "INFORMATION_SCHEMA"."COLLATIONS" /* meta */ /* direct lookup */ diff --git a/h2/src/test/org/h2/test/scripts/other/at-time-zone.sql b/h2/src/test/org/h2/test/scripts/other/at-time-zone.sql new file mode 100644 index 0000000000..c66ed8e378 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/at-time-zone.sql @@ -0,0 +1,134 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE '10'; +>> 2010-01-01 15:00:01.123456789+10 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE '10:00:30'; +>> 2010-01-01 15:00:31.123456789+10:00:30 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE '10:00:30.1'; +> exception INVALID_VALUE_2 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE INTERVAL '10:00' HOUR TO MINUTE; +>> 2010-01-01 15:00:01.123456789+10 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE INTERVAL '10:00:30' HOUR TO SECOND; +>> 2010-01-01 15:00:31.123456789+10:00:30 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE INTERVAL '10:00:30.1' HOUR TO SECOND; +> exception INVALID_VALUE_2 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 20:00:01.123456789+05' AT TIME ZONE '18:00'; +>> 2010-01-02 09:00:01.123456789+18 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE '-18:00'; +>> 2009-12-31 11:00:01.123456789-18 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE '-18:01'; +> exception INVALID_VALUE_2 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE '+18:01'; +> exception INVALID_VALUE_2 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE '19:00'; +> exception INVALID_VALUE_2 + +CALL RIGHT(CAST(CURRENT_TIMESTAMP AT TIME ZONE '00:00' AS VARCHAR), 3); +>> +00 + +CALL CAST(CURRENT_TIMESTAMP AS VARCHAR) = CAST(CURRENT_TIMESTAMP AT LOCAL AS VARCHAR); +>> TRUE + +CALL CAST(CURRENT_TIMESTAMP AS VARCHAR) = CAST(LOCALTIMESTAMP AT LOCAL AS VARCHAR); +>> TRUE + +CALL TIME WITH TIME ZONE '10:00:01.123456789+05' AT TIME ZONE '10'; +>> 15:00:01.123456789+10 + +CALL RIGHT(CAST(CURRENT_TIME AT TIME ZONE '00:00' AS VARCHAR), 3); +>> +00 + +CALL CAST(CURRENT_TIME AS VARCHAR) = CAST(CURRENT_TIME AT LOCAL AS VARCHAR); +>> TRUE + +CALL CAST(CURRENT_TIME AS VARCHAR) = CAST(LOCALTIME AT LOCAL AS VARCHAR); +>> TRUE + +CALL CAST(NULL AS TIMESTAMP) AT LOCAL; +>> null + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:00Z' AT TIME ZONE NULL; +>> null + +CALL 1 AT LOCAL; +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST(A TIMESTAMP WITH TIME ZONE, B INTERVAL HOUR TO MINUTE) AS + (VALUES ('2010-01-01 10:00:00Z', '10:00')); +> ok + +EXPLAIN SELECT A AT TIME ZONE B, A AT LOCAL FROM TEST; +>> SELECT "A" AT TIME ZONE "B", "A" AT LOCAL FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +CALL TIMESTAMP WITH TIME ZONE '2000-01-01 01:00:00+02' AT TIME ZONE 'Europe/London'; +>> 1999-12-31 23:00:00+00 + +CALL TIMESTAMP WITH TIME ZONE '2000-07-01 01:00:00+02' AT TIME ZONE 'Europe/London'; +>> 2000-07-01 00:00:00+01 + +CALL TIMESTAMP WITH TIME ZONE '2000-01-01 01:00:00+02' AT TIME ZONE 'Z'; +>> 1999-12-31 23:00:00+00 + +CALL TIMESTAMP WITH TIME ZONE '2000-01-01 01:00:00+02' AT TIME ZONE 'UTC'; +>> 1999-12-31 23:00:00+00 + +CALL TIMESTAMP WITH TIME ZONE '2000-01-01 01:00:00+02' AT TIME ZONE 'GMT'; +>> 1999-12-31 23:00:00+00 + +CALL TIMESTAMP WITH TIME ZONE '2000-01-01 01:00:00+02' AT TIME ZONE ''; +> exception INVALID_VALUE_2 + +CALL TIMESTAMP WITH TIME ZONE '2000-01-01 01:00:00+02' AT TIME ZONE 'GMT0'; +> exception INVALID_VALUE_2 + +CALL TIME WITH TIME ZONE '01:00:00+02' AT TIME ZONE 'Europe/London'; +> exception INVALID_VALUE_2 + +SET TIME ZONE '5'; +> ok + +SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'TIME ZONE'; +>> GMT+05:00 + +SET TIME ZONE INTERVAL '4:00' HOUR TO MINUTE; +> ok + +SET TIME ZONE NULL; +> exception INVALID_VALUE_2 + +SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'TIME ZONE'; +>> GMT+04:00 + +CREATE TABLE TEST(T TIMESTAMP) AS (VALUES '2010-01-01 10:00:00'); +> ok + +SELECT CAST(T AS TIMESTAMP WITH TIME ZONE) FROM TEST; +>> 2010-01-01 10:00:00+04 + +SELECT T AT LOCAL FROM TEST; +>> 2010-01-01 10:00:00+04 + +SELECT T AT TIME ZONE '8:00' FROM TEST; +>> 2010-01-01 14:00:00+08 + +SET TIME ZONE LOCAL; +> ok + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/other/boolean-test.sql b/h2/src/test/org/h2/test/scripts/other/boolean-test.sql new file mode 100644 index 0000000000..37383d30f0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/boolean-test.sql @@ -0,0 +1,135 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT + NULL IS UNKNOWN, FALSE IS UNKNOWN, TRUE IS UNKNOWN, + NULL IS FALSE, FALSE IS FALSE, TRUE IS FALSE, + NULL IS TRUE, FALSE IS TRUE, TRUE IS TRUE; +> TRUE FALSE FALSE FALSE TRUE FALSE FALSE FALSE TRUE +> ---- ----- ----- ----- ---- ----- ----- ----- ---- +> TRUE FALSE FALSE FALSE TRUE FALSE FALSE FALSE TRUE +> rows: 1 + +SELECT + NULL IS NOT UNKNOWN, FALSE IS NOT UNKNOWN, TRUE IS NOT UNKNOWN, + NULL IS NOT FALSE, FALSE IS NOT FALSE, TRUE IS NOT FALSE, + NULL IS NOT TRUE, FALSE IS NOT TRUE, TRUE IS NOT TRUE; +> FALSE TRUE TRUE TRUE FALSE TRUE TRUE TRUE FALSE +> ----- ---- ---- ---- ----- ---- ---- ---- ----- +> FALSE TRUE TRUE TRUE FALSE TRUE TRUE TRUE FALSE +> rows: 1 + +CREATE TABLE TEST(B BOOLEAN, N INT) AS VALUES (NULL, NULL), (FALSE, 0), (TRUE, 1); +> ok + +CREATE INDEX TEST_B_IDX ON TEST(B); +> ok + +CREATE INDEX TEST_N_IDX ON TEST(N); +> ok + +SELECT B, B IS UNKNOWN, N IS UNKNOWN, B IS FALSE, N IS FALSE, B IS TRUE, N IS TRUE FROM TEST; +> B B IS UNKNOWN N IS UNKNOWN B IS FALSE N IS FALSE B IS TRUE N IS TRUE +> ----- ------------ ------------ ---------- ---------- --------- --------- +> FALSE FALSE FALSE TRUE TRUE FALSE FALSE +> TRUE FALSE FALSE FALSE FALSE TRUE TRUE +> null TRUE TRUE FALSE FALSE FALSE FALSE +> rows: 3 + +SELECT B, B IS NOT UNKNOWN, N IS NOT UNKNOWN, B IS NOT FALSE, N IS NOT FALSE, B IS NOT TRUE, N IS NOT TRUE FROM TEST; +> B B IS NOT UNKNOWN N IS NOT UNKNOWN B IS NOT FALSE N IS NOT FALSE B IS NOT TRUE N IS NOT TRUE +> ----- ---------------- ---------------- -------------- -------------- ------------- ------------- +> FALSE TRUE TRUE FALSE FALSE TRUE TRUE +> TRUE TRUE TRUE TRUE TRUE FALSE FALSE +> null FALSE FALSE TRUE TRUE TRUE TRUE +> rows: 3 + +SELECT B, NOT B IS NOT UNKNOWN, NOT N IS NOT UNKNOWN, NOT B IS NOT FALSE, NOT N IS NOT FALSE, + NOT B IS NOT TRUE, NOT N IS NOT TRUE FROM TEST; +> B B IS UNKNOWN N IS UNKNOWN B IS FALSE N IS FALSE B IS TRUE N IS TRUE +> ----- ------------ ------------ ---------- ---------- --------- --------- +> FALSE FALSE FALSE TRUE TRUE FALSE FALSE +> TRUE FALSE FALSE FALSE FALSE TRUE TRUE +> null TRUE TRUE FALSE FALSE FALSE FALSE +> rows: 3 + +EXPLAIN SELECT B FROM TEST WHERE B IS UNKNOWN; +>> SELECT "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_B_IDX: B IS UNKNOWN */ WHERE "B" IS UNKNOWN + +SELECT B FROM TEST WHERE B IS UNKNOWN; +>> null + +EXPLAIN SELECT N FROM TEST WHERE N IS UNKNOWN; +>> SELECT "N" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_N_IDX */ WHERE "N" IS UNKNOWN + +EXPLAIN SELECT B FROM TEST WHERE B IS NOT UNKNOWN; +>> SELECT "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_B_IDX: B IN(FALSE, TRUE) */ WHERE "B" IS NOT UNKNOWN + +SELECT B FROM TEST WHERE B IS NOT UNKNOWN; +> B +> ----- +> FALSE +> TRUE +> rows: 2 + +EXPLAIN SELECT N FROM TEST WHERE N IS NOT UNKNOWN; +>> SELECT "N" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_N_IDX */ WHERE "N" IS NOT UNKNOWN + +EXPLAIN SELECT B FROM TEST WHERE B IS FALSE; +>> SELECT "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_B_IDX: B IS FALSE */ WHERE "B" IS FALSE + +SELECT B FROM TEST WHERE B IS FALSE; +>> FALSE + +EXPLAIN SELECT N FROM TEST WHERE N IS FALSE; +>> SELECT "N" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_N_IDX */ WHERE "N" IS FALSE + +EXPLAIN SELECT B FROM TEST WHERE B IS NOT FALSE; +>> SELECT "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_B_IDX */ WHERE "B" IS NOT FALSE + +SELECT B FROM TEST WHERE B IS NOT FALSE; +> B +> ---- +> TRUE +> null +> rows: 2 + +EXPLAIN SELECT N FROM TEST WHERE N IS NOT FALSE; +>> SELECT "N" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_N_IDX */ WHERE "N" IS NOT FALSE + +EXPLAIN SELECT B FROM TEST WHERE B IS TRUE; +>> SELECT "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_B_IDX: B IS TRUE */ WHERE "B" IS TRUE + +SELECT B FROM TEST WHERE B IS TRUE; +>> TRUE + +EXPLAIN SELECT N FROM TEST WHERE N IS TRUE; +>> SELECT "N" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_N_IDX */ WHERE "N" IS TRUE + +EXPLAIN SELECT B FROM TEST WHERE B IS NOT TRUE; +>> SELECT "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_B_IDX */ WHERE "B" IS NOT TRUE + +SELECT B FROM TEST WHERE B IS NOT TRUE; +> B +> ----- +> FALSE +> null +> rows: 2 + +EXPLAIN SELECT N FROM TEST WHERE N IS NOT TRUE; +>> SELECT "N" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_N_IDX */ WHERE "N" IS NOT TRUE + +DELETE FROM TEST WHERE B IS NULL; +> update count: 1 + +ALTER TABLE TEST ALTER COLUMN B SET NOT NULL; +> ok + +-- If column is NOT NULL index condition for IS NOT UNKNOWN shouldn't exist +EXPLAIN SELECT B FROM TEST WHERE B IS NOT UNKNOWN; +>> SELECT "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_B_IDX */ WHERE "B" IS NOT UNKNOWN + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/other/case.sql b/h2/src/test/org/h2/test/scripts/other/case.sql new file mode 100644 index 0000000000..f2fdc6c499 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/case.sql @@ -0,0 +1,133 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select case when 1=null then 1 else 2 end; +>> 2 + +select case (1) when 1 then 1 else 2 end; +>> 1 + +select x, case when x=0 then 'zero' else 'not zero' end y from system_range(0, 2); +> X Y +> - -------- +> 0 zero +> 1 not zero +> 2 not zero +> rows: 3 + +select x, case when x=0 then 'zero' end y from system_range(0, 1); +> X Y +> - ---- +> 0 zero +> 1 null +> rows: 2 + +select x, case x when 0 then 'zero' else 'not zero' end y from system_range(0, 1); +> X Y +> - -------- +> 0 zero +> 1 not zero +> rows: 2 + +select x, case x when 0 then 'zero' when 1 then 'one' end y from system_range(0, 2); +> X Y +> - ---- +> 0 zero +> 1 one +> 2 null +> rows: 3 + +SELECT X, CASE X WHEN 1 THEN 10 WHEN 2, 3 THEN 25 WHEN 4, 5, 6 THEN 50 ELSE 90 END C FROM SYSTEM_RANGE(1, 7); +> X C +> - -- +> 1 10 +> 2 25 +> 3 25 +> 4 50 +> 5 50 +> 6 50 +> 7 90 +> rows: 7 + +SELECT CASE WHEN TRUE THEN 1 END CASE; +> exception SYNTAX_ERROR_1 + +SELECT S, CASE S + WHEN IS NULL THEN 1 + WHEN LOWER('A') THEN 2 + WHEN LIKE '%b' THEN 3 + WHEN ILIKE 'C' THEN 4 + WHEN REGEXP '[dQ]' THEN 5 + WHEN IS NOT DISTINCT FROM 'e' THEN 6 + WHEN IN ('x', 'f') THEN 7 + WHEN IN (VALUES 'g', 'z') THEN 8 + WHEN BETWEEN 'h' AND 'i' THEN 9 + WHEN = 'j' THEN 10 + WHEN < ANY(VALUES 'j', 'l') THEN 11 + WHEN NOT LIKE '%m%' THEN 12 + WHEN IS OF (VARCHAR) THEN 13 + ELSE 13 + END FROM (VALUES NULL, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm') T(S); +> S C2 +> ---- -- +> a 2 +> b 3 +> c 4 +> d 5 +> e 6 +> f 7 +> g 8 +> h 9 +> i 9 +> j 10 +> k 11 +> l 12 +> m 13 +> null 1 +> rows: 14 + +SELECT B, CASE B WHEN IS TRUE THEN 1 WHEN IS FALSE THEN 0 WHEN IS UNKNOWN THEN -1 END + FROM (VALUES TRUE, FALSE, UNKNOWN) T(B); +> B CASE B WHEN IS TRUE THEN 1 WHEN IS FALSE THEN 0 WHEN IS UNKNOWN THEN -1 END +> ----- --------------------------------------------------------------------------- +> FALSE 0 +> TRUE 1 +> null -1 +> rows: 3 + +SELECT J, CASE J WHEN IS JSON ARRAY THEN 1 WHEN IS NOT JSON OBJECT THEN 2 ELSE 3 END + FROM (VALUES JSON '[]', JSON 'true', JSON '{}') T(J); +> J CASE J WHEN IS JSON ARRAY THEN 1 WHEN IS NOT JSON OBJECT THEN 2 ELSE 3 END +> ---- -------------------------------------------------------------------------- +> [] 1 +> true 2 +> {} 3 +> rows: 3 + +SELECT V, CASE V + WHEN IN(CURRENT_DATE, DATE '2010-01-01') THEN 1 + ELSE 2 + END FROM (VALUES DATE '2000-01-01', DATE '2010-01-01', DATE '2020-02-01') T(V); +> V CASE V WHEN IN(CURRENT_DATE, DATE '2010-01-01') THEN 1 ELSE 2 END +> ---------- ----------------------------------------------------------------- +> 2000-01-01 2 +> 2010-01-01 1 +> 2020-02-01 2 +> rows: 3 + +SELECT CASE NULL WHEN IS NOT DISTINCT FROM NULL THEN TRUE ELSE FALSE END; +>> TRUE + +SELECT CASE TRUE WHEN CURRENT_DATE THEN 1 END; +> exception TYPES_ARE_NOT_COMPARABLE_2 + +SELECT * FROM (VALUES 0) D(X) JOIN (VALUES TRUE) T(C) WHERE (CASE C WHEN C THEN C END); +> X C +> - ---- +> 0 TRUE +> rows: 1 + +SELECT CASE TRUE WHEN NOT FALSE THEN 1 ELSE 0 END; +>> 1 diff --git a/h2/src/test/org/h2/test/scripts/other/concatenation.sql b/h2/src/test/org/h2/test/scripts/other/concatenation.sql new file mode 100644 index 0000000000..f61452a147 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/concatenation.sql @@ -0,0 +1,50 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(S VARCHAR(10), B VARBINARY(10), A VARCHAR(10) ARRAY) AS VALUES + ('a', X'49', ARRAY['b']), ('', X'', ARRAY[]), (NULL, NULL, NULL); +> ok + +EXPLAIN SELECT S || 'v' || '' || 'x' || S || (S || S), S || '', S || (B || X'50'), B || B || B FROM TEST; +>> SELECT "S" || 'vx' || "S" || "S" || "S", "S", "S" || ("B" || X'50'), "B" || "B" || "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT S || 'v' || '' || 'x' || S || (S || S), S || '', S || (B || X'50'), B || B || B FROM TEST; +> S || 'vx' || S || S || S S S || (B || X'50') B || B || B +> ------------------------ ---- ----------------- ----------- +> avxaaa a aIP X'494949' +> null null null null +> vx P X'' +> rows: 3 + +EXPLAIN SELECT S || A, ARRAY[] || A, S || CAST(ARRAY[] AS VARCHAR ARRAY), A || A || A FROM TEST; +>> SELECT "S" || "A", "A", CAST("S" AS CHARACTER VARYING ARRAY), "A" || "A" || "A" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT S || A, ARRAY[] || A, S || CAST(ARRAY[] AS VARCHAR ARRAY), A || A || A FROM TEST; +> S || A A CAST(S AS CHARACTER VARYING ARRAY) A || A || A +> ------ ---- ---------------------------------- ----------- +> [] [] [] [] +> [a, b] [b] [a] [b, b, b] +> null null null null +> rows: 3 + +EXPLAIN SELECT B || NULL, B || X'22' || NULL FROM TEST; +>> SELECT CAST(NULL AS BINARY VARYING(10)), CAST(NULL AS BINARY VARYING(11)) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT B || NULL, B || X'22' || NULL FROM TEST; +> CAST(NULL AS BINARY VARYING(10)) CAST(NULL AS BINARY VARYING(11)) +> -------------------------------- -------------------------------- +> null null +> null null +> null null +> rows: 3 + +EXPLAIN SELECT B || X'', A || ARRAY['a'] FROM TEST; +>> SELECT "B", "A" || ARRAY ['a'] FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT (S || S) || (B || B) FROM TEST; +>> SELECT "S" || "S" || ("B" || "B") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/other/conditions.sql b/h2/src/test/org/h2/test/scripts/other/conditions.sql new file mode 100644 index 0000000000..ae1444f1bd --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/conditions.sql @@ -0,0 +1,168 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT + NULL AND NULL, NULL AND FALSE, NULL AND TRUE, + FALSE AND NULL, FALSE AND FALSE, FALSE AND TRUE, + TRUE AND NULL, TRUE AND FALSE, TRUE AND TRUE; +> UNKNOWN FALSE UNKNOWN FALSE FALSE FALSE UNKNOWN FALSE TRUE +> ------- ----- ------- ----- ----- ----- ------- ----- ---- +> null FALSE null FALSE FALSE FALSE null FALSE TRUE +> rows: 1 + +SELECT + NULL OR NULL, NULL OR FALSE, NULL OR TRUE, + FALSE OR NULL, FALSE OR FALSE, FALSE OR TRUE, + TRUE OR NULL, TRUE OR FALSE, TRUE OR TRUE; +> UNKNOWN UNKNOWN TRUE UNKNOWN FALSE TRUE TRUE TRUE TRUE +> ------- ------- ---- ------- ----- ---- ---- ---- ---- +> null null TRUE null FALSE TRUE TRUE TRUE TRUE +> rows: 1 + +SELECT NOT NULL, NOT FALSE, NOT TRUE; +> UNKNOWN TRUE FALSE +> ------- ---- ----- +> null TRUE FALSE +> rows: 1 + +SELECT 0 AND TRUE; +>> FALSE + +SELECT TRUE AND 0; +>> FALSE + +SELECT 1 OR FALSE; +>> TRUE + +SELECT FALSE OR 1; +>> TRUE + +SELECT NOT 0; +>> TRUE + +SELECT NOT 1; +>> FALSE + +CREATE TABLE TEST(B BOOLEAN, Z INT) AS VALUES (NULL, 0); +> ok + +EXPLAIN SELECT NOT NOT B, NOT NOT Z FROM TEST; +>> SELECT "B", CAST("Z" AS BOOLEAN) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT TRUE AND B, B AND TRUE, TRUE AND Z, Z AND TRUE FROM TEST; +>> SELECT "B", "B", CAST("Z" AS BOOLEAN), CAST("Z" AS BOOLEAN) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT FALSE OR B, B OR FALSE, FALSE OR Z, Z OR FALSE FROM TEST; +>> SELECT "B", "B", CAST("Z" AS BOOLEAN), CAST("Z" AS BOOLEAN) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT); +> ok + +EXPLAIN SELECT A FROM TEST WHERE (A, B) IS NOT DISTINCT FROM NULL; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE ROW ("A", "B") IS NOT DISTINCT FROM NULL + +EXPLAIN SELECT A FROM TEST WHERE (A, B) IS DISTINCT FROM NULL; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE ROW ("A", "B") IS DISTINCT FROM NULL + +EXPLAIN SELECT A IS DISTINCT FROM NULL, NULL IS DISTINCT FROM A FROM TEST; +>> SELECT "A" IS NOT NULL, "A" IS NOT NULL FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT A IS NOT DISTINCT FROM NULL, NULL IS NOT DISTINCT FROM A FROM TEST; +>> SELECT "A" IS NULL, "A" IS NULL FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A NULL); +> ok + +SELECT 1 IN (SELECT A FROM TEST); +>> FALSE + +INSERT INTO TEST VALUES NULL; +> update count: 1 + +SELECT 1 IN (SELECT A FROM TEST); +>> null + +DROP TABLE TEST; +> ok + +SELECT 1 IN (NULL); +>> null + +SELECT 1 IN (SELECT NULL); +>> null + +SELECT 1 IN (VALUES NULL); +>> null + +SELECT 1 IN (SELECT * FROM TABLE(X NULL=())); +>> FALSE + +SELECT (1, 1) IN (VALUES (1, NULL)); +>> null + +SELECT (1, 1) IN (VALUES (NULL, 1)); +>> null + +SELECT (1, 1) IN (SELECT * FROM TABLE(X INT=(), Y INT=())); +>> FALSE + +VALUES FALSE OR NULL OR FALSE; +>> null + +VALUES FALSE OR NULL OR TRUE; +>> TRUE + +VALUES TRUE AND NULL AND TRUE; +>> null + +VALUES TRUE AND NULL AND FALSE; +>> FALSE + +SELECT * FROM (VALUES 1) T(C) WHERE NOT NOT CASE C WHEN 1 THEN TRUE WHEN 2 THEN FALSE ELSE NULL END; +>> 1 + +SELECT C AND C, NOT(C AND C) FROM (VALUES 'F') T(C); +> C AND C (NOT C) OR (NOT C) +> ------- ------------------ +> FALSE TRUE +> rows: 1 + +SELECT C != 2 AND C, NOT (C != 2 AND C) FROM (VALUES TRUE) T(C); +> exception TYPES_ARE_NOT_COMPARABLE_2 + +SELECT ROW(1) = ROW(ROW(1)); +>> TRUE + +SELECT ROW(1) = ROW(ROW(2)); +>> FALSE + +SELECT ROW(1) = ROW(ROW(1, 2)); +> exception TYPES_ARE_NOT_COMPARABLE_2 + +SELECT ROW(1) = ROW(ROW(TIME '00:00:00')); +> exception TYPES_ARE_NOT_COMPARABLE_2 + +CREATE TABLE TEST(C1 BOOLEAN GENERATED ALWAYS AS (NOT C2), C2 BOOLEAN GENERATED ALWAYS AS (C1)); +> exception COLUMN_NOT_FOUND_1 + +CREATE TABLE TEST(A INTEGER, B INTEGER, C INTEGER, D INTEGER) AS VALUES (1, 2, 3, 4); +> ok + +EXPLAIN SELECT A = B OR A = C C1, B = A OR A = C C2, A = B OR C = A C3, B = A OR C = A C4 FROM TEST; +>> SELECT "A" IN("B", "C") AS "C1", "A" IN("B", "C") AS "C2", "A" IN("B", "C") AS "C3", "A" IN("B", "C") AS "C4" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT A = B OR A = C OR A = D C1, B = A OR A = C OR A = D C2, A = B OR C = A OR A = D C3, + B = A OR C = A OR A = D C4, A = B OR A = C OR D = A C5, B = A OR A = C OR D = A C6, A = B OR C = A OR D = A C7, + B = A OR C = A OR D = A C8 FROM TEST; +>> SELECT "A" IN("B", "C", "D") AS "C1", "A" IN("B", "C", "D") AS "C2", "A" IN("B", "C", "D") AS "C3", "A" IN("B", "C", "D") AS "C4", "A" IN("B", "C", "D") AS "C5", "A" IN("B", "C", "D") AS "C6", "A" IN("B", "C", "D") AS "C7", "A" IN("B", "C", "D") AS "C8" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/other/data-change-delta-table.sql b/h2/src/test/org/h2/test/scripts/other/data-change-delta-table.sql new file mode 100644 index 0000000000..f8040387ee --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/data-change-delta-table.sql @@ -0,0 +1,417 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(ID BIGINT AUTO_INCREMENT PRIMARY KEY, A INT, B INT); +> ok + +CREATE TRIGGER T1 BEFORE INSERT, UPDATE ON TEST FOR EACH ROW CALL "org.h2.test.scripts.Trigger1"; +> ok + +-- INSERT + +SELECT * FROM OLD TABLE (INSERT INTO TEST(A, B) VALUES (100, 100)); +> exception SYNTAX_ERROR_2 + +SELECT * FROM NEW TABLE (INSERT INTO TEST(A, B) VALUES (1, 2)); +> ID A B +> -- - - +> 1 1 2 +> rows: 1 + +SELECT * FROM FINAL TABLE (INSERT INTO TEST(A, B) VALUES (2, 3)); +> ID A B +> -- - -- +> 2 2 30 +> rows: 1 + +-- INSERT from SELECT + +SELECT * FROM NEW TABLE (INSERT INTO TEST(A, B) SELECT * FROM VALUES (3, 4), (4, 5)); +> ID A B +> -- - - +> 3 3 4 +> 4 4 5 +> rows: 2 + +SELECT * FROM FINAL TABLE (INSERT INTO TEST(A, B) SELECT * FROM VALUES (5, 6), (6, 7)); +> ID A B +> -- - -- +> 5 5 60 +> 6 6 70 +> rows: 2 + +-- UPDATE + +SELECT * FROM OLD TABLE (UPDATE TEST SET B = 3 WHERE ID = 1); +> ID A B +> -- - -- +> 1 1 20 +> rows: 1 + +SELECT * FROM NEW TABLE (UPDATE TEST SET B = 3 WHERE ID = 1); +> ID A B +> -- - - +> 1 1 3 +> rows: 1 + +SELECT * FROM FINAL TABLE (UPDATE TEST SET B = 3 WHERE ID = 1); +> ID A B +> -- - -- +> 1 1 30 +> rows: 1 + +-- DELETE + +SELECT * FROM OLD TABLE (DELETE FROM TEST WHERE ID = 1); +> ID A B +> -- - -- +> 1 1 30 +> rows: 1 + +SELECT * FROM OLD TABLE (DELETE FROM TEST WHERE ID = ?); +{ +2 +> ID A B +> -- - -- +> 2 2 30 +> rows: 1 +100 +> ID A B +> -- - - +> rows: 0 +}; +> update count: 0 + +SELECT * FROM NEW TABLE (DELETE FROM TEST); +> exception SYNTAX_ERROR_2 + +SELECT * FROM FINAL TABLE (DELETE FROM TEST); +> exception SYNTAX_ERROR_2 + +SELECT * FROM TEST TABLE (DELETE FROM TEST); +> exception SYNTAX_ERROR_2 + +-- MERGE INTO + +SELECT * FROM OLD TABLE (MERGE INTO TEST KEY(ID) VALUES (3, 3, 5), (7, 7, 8)); +> ID A B +> -- - -- +> 3 3 40 +> rows: 1 + +SELECT * FROM NEW TABLE (MERGE INTO TEST KEY(ID) VALUES (4, 4, 6), (8, 8, 9)); +> ID A B +> -- - - +> 4 4 6 +> 8 8 9 +> rows: 2 + +SELECT * FROM FINAL TABLE (MERGE INTO TEST KEY(ID) VALUES (5, 5, 7), (9, 9, 10)); +> ID A B +> -- - --- +> 5 5 70 +> 9 9 100 +> rows: 2 + +-- MERGE INTO from SELECT + +SELECT * FROM OLD TABLE (MERGE INTO TEST KEY(ID) SELECT * FROM VALUES (3, 3, 6), (10, 10, 11)); +> ID A B +> -- - -- +> 3 3 50 +> rows: 1 + +SELECT * FROM NEW TABLE (MERGE INTO TEST KEY(ID) SELECT * FROM VALUES (4, 4, 7), (11, 11, 12)); +> ID A B +> -- -- -- +> 11 11 12 +> 4 4 7 +> rows: 2 + +SELECT * FROM FINAL TABLE (MERGE INTO TEST KEY(ID) SELECT * FROM VALUES (5, 5, 8), (12, 12, 13)); +> ID A B +> -- -- --- +> 12 12 130 +> 5 5 80 +> rows: 2 + +-- MERGE USING + +SELECT * FROM OLD TABLE (MERGE INTO TEST USING + (VALUES (3, 3, 7), (10, 10, 12), (13, 13, 14)) S(ID, A, B) + ON TEST.ID = S.ID + WHEN MATCHED AND S.ID = 3 THEN UPDATE SET TEST.B = S.B + WHEN MATCHED AND S.ID <> 3 THEN DELETE + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.A, S.B)); +> ID A B +> -- -- --- +> 10 10 110 +> 3 3 60 +> rows: 2 + +SELECT * FROM NEW TABLE (MERGE INTO TEST USING + (VALUES (4, 4, 8), (11, 11, 13), (14, 14, 15)) S(ID, A, B) + ON TEST.ID = S.ID + WHEN MATCHED AND S.ID = 4 THEN UPDATE SET TEST.B = S.B + WHEN MATCHED AND S.ID <> 4 THEN DELETE + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.A, S.B)); +> ID A B +> -- -- -- +> 14 14 15 +> 4 4 8 +> rows: 2 + +SELECT * FROM FINAL TABLE (MERGE INTO TEST USING + (VALUES (5, 5, 9), (12, 12, 15), (15, 15, 16)) S(ID, A, B) + ON TEST.ID = S.ID + WHEN MATCHED AND S.ID = 5 THEN UPDATE SET TEST.B = S.B + WHEN MATCHED AND S.ID <> 5 THEN DELETE + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.A, S.B)); +> ID A B +> -- -- --- +> 15 15 160 +> 5 5 90 +> rows: 2 + +-- REPLACE + +SELECT * FROM OLD TABLE (REPLACE INTO TEST VALUES (3, 3, 8), (16, 16, 17)); +> exception SYNTAX_ERROR_2 + +SELECT * FROM NEW TABLE (REPLACE INTO TEST VALUES (4, 4, 9), (17, 17, 18)); +> exception SYNTAX_ERROR_2 + +SELECT * FROM FINAL TABLE (REPLACE INTO TEST VALUES (5, 5, 10), (18, 18, 19)); +> exception SYNTAX_ERROR_2 + +SET MODE MySQL; +> ok + +SELECT * FROM OLD TABLE (REPLACE INTO TEST VALUES (3, 3, 8), (16, 16, 17)); +> ID A B +> -- - -- +> 3 3 70 +> rows: 1 + +SELECT * FROM NEW TABLE (REPLACE INTO TEST VALUES (4, 4, 9), (17, 17, 18)); +> ID A B +> -- -- -- +> 17 17 18 +> 4 4 9 +> rows: 2 + +SELECT * FROM FINAL TABLE (REPLACE INTO TEST VALUES (5, 5, 10), (18, 18, 19)); +> ID A B +> -- -- --- +> 18 18 190 +> 5 5 100 +> rows: 2 + +-- REPLACE from SELECT + +SELECT * FROM OLD TABLE (REPLACE INTO TEST SELECT * FROM VALUES (3, 3, 9), (19, 19, 20)); +> ID A B +> -- - -- +> 3 3 80 +> rows: 1 + +SELECT * FROM NEW TABLE (REPLACE INTO TEST SELECT * FROM VALUES (4, 4, 10), (20, 20, 21)); +> ID A B +> -- -- -- +> 20 20 21 +> 4 4 10 +> rows: 2 + +SELECT * FROM FINAL TABLE (REPLACE INTO TEST SELECT * FROM VALUES (5, 5, 11), (21, 21, 22)); +> ID A B +> -- -- --- +> 21 21 220 +> 5 5 110 +> rows: 2 + +SET MODE Regular; +> ok + +TRUNCATE TABLE TEST RESTART IDENTITY; +> update count: 16 + +CREATE VIEW TEST_VIEW AS SELECT * FROM TEST; +> ok + +CREATE TRIGGER T2 INSTEAD OF INSERT, UPDATE, DELETE ON TEST_VIEW FOR EACH ROW CALL "org.h2.test.scripts.Trigger2"; +> ok + +-- INSERT + +SELECT * FROM NEW TABLE (INSERT INTO TEST_VIEW(A, B) VALUES (1, 2)); +> ID A B +> ---- - - +> null 1 2 +> rows: 1 + +SELECT * FROM FINAL TABLE (INSERT INTO TEST_VIEW(A, B) VALUES (2, 3)); +> ID A B +> -- - -- +> 2 2 30 +> rows: 1 + +-- INSERT from SELECT + +SELECT * FROM NEW TABLE (INSERT INTO TEST_VIEW(A, B) SELECT * FROM VALUES (3, 4), (4, 5)); +> ID A B +> ---- - - +> null 3 4 +> null 4 5 +> rows: 2 + +SELECT * FROM FINAL TABLE (INSERT INTO TEST_VIEW(A, B) SELECT * FROM VALUES (5, 6), (6, 7)); +> ID A B +> -- - -- +> 5 5 60 +> 6 6 70 +> rows: 2 + +-- UPDATE + +SELECT * FROM OLD TABLE (UPDATE TEST_VIEW SET B = 3 WHERE ID = 1); +> ID A B +> -- - -- +> 1 1 20 +> rows: 1 + +SELECT * FROM NEW TABLE (UPDATE TEST_VIEW SET B = 3 WHERE ID = 1); +> ID A B +> -- - - +> 1 1 3 +> rows: 1 + +SELECT * FROM FINAL TABLE (UPDATE TEST_VIEW SET B = 3 WHERE ID = 1); +> ID A B +> -- - -- +> 1 1 30 +> rows: 1 + +-- DELETE + +SELECT * FROM OLD TABLE (DELETE FROM TEST_VIEW WHERE ID = 1); +> ID A B +> -- - -- +> 1 1 30 +> rows: 1 + +SELECT * FROM OLD TABLE (DELETE FROM TEST_VIEW WHERE ID = ?); +{ +2 +> ID A B +> -- - -- +> 2 2 30 +> rows: 1 +100 +> ID A B +> -- - - +> rows: 0 +}; +> update count: 0 + +-- MERGE INTO + +SELECT * FROM OLD TABLE (MERGE INTO TEST_VIEW KEY(ID) VALUES (3, 3, 5), (7, 7, 8)); +> ID A B +> -- - -- +> 3 3 40 +> rows: 1 + +SELECT * FROM NEW TABLE (MERGE INTO TEST_VIEW KEY(ID) VALUES (4, 4, 6), (8, 8, 9)); +> ID A B +> -- - - +> 4 4 6 +> 8 8 9 +> rows: 2 + +SELECT * FROM FINAL TABLE (MERGE INTO TEST_VIEW KEY(ID) VALUES (5, 5, 7), (9, 9, 10)); +> ID A B +> -- - --- +> 5 5 70 +> 9 9 100 +> rows: 2 + +-- MERGE INTO from SELECT + +SELECT * FROM OLD TABLE (MERGE INTO TEST_VIEW KEY(ID) SELECT * FROM VALUES (3, 3, 6), (10, 10, 11)); +> ID A B +> -- - -- +> 3 3 50 +> rows: 1 + +SELECT * FROM NEW TABLE (MERGE INTO TEST_VIEW KEY(ID) SELECT * FROM VALUES (4, 4, 7), (11, 11, 12)); +> ID A B +> -- -- -- +> 11 11 12 +> 4 4 7 +> rows: 2 + +SELECT * FROM FINAL TABLE (MERGE INTO TEST_VIEW KEY(ID) SELECT * FROM VALUES (5, 5, 8), (12, 12, 13)); +> ID A B +> -- -- --- +> 12 12 130 +> 5 5 80 +> rows: 2 + +-- MERGE USING + +SELECT * FROM OLD TABLE (MERGE INTO TEST_VIEW TEST USING + (VALUES (3, 3, 7), (10, 10, 12), (13, 13, 14)) S(ID, A, B) + ON TEST.ID = S.ID + WHEN MATCHED AND S.ID = 3 THEN UPDATE SET TEST.B = S.B + WHEN MATCHED AND S.ID <> 3 THEN DELETE + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.A, S.B)); +> ID A B +> -- -- --- +> 10 10 110 +> 3 3 60 +> rows: 2 + +SELECT * FROM NEW TABLE (MERGE INTO TEST_VIEW TEST USING + (VALUES (4, 4, 8), (11, 11, 13), (14, 14, 15)) S(ID, A, B) + ON TEST.ID = S.ID + WHEN MATCHED AND S.ID = 4 THEN UPDATE SET TEST.B = S.B + WHEN MATCHED AND S.ID <> 4 THEN DELETE + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.A, S.B)); +> ID A B +> -- -- -- +> 14 14 15 +> 4 4 8 +> rows: 2 + +DROP TABLE TEST CASCADE; +> ok + +CREATE TABLE TEST(ID BIGINT, DATA CHARACTER LARGE OBJECT); +> ok + +INSERT INTO TEST VALUES (1, REPEAT('A', 1000)); +> update count: 1 + +SELECT ID FROM FINAL TABLE (INSERT INTO TEST VALUES (2, REPEAT('B', 1000))); +>> 2 + +SELECT ID, SUBSTRING(DATA FROM 1 FOR 2) FROM TEST; +> ID SUBSTRING(DATA FROM 1 FOR 2) +> -- ---------------------------- +> 1 AA +> 2 BB +> rows: 2 + +@reconnect + +SELECT ID, SUBSTRING(DATA FROM 1 FOR 2) FROM TEST; +> ID SUBSTRING(DATA FROM 1 FOR 2) +> -- ---------------------------- +> 1 AA +> 2 BB +> rows: 2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/other/field-reference.sql b/h2/src/test/org/h2/test/scripts/other/field-reference.sql new file mode 100644 index 0000000000..203ea53b0d --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/field-reference.sql @@ -0,0 +1,31 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT (R).A, (R).B FROM (VALUES CAST((1, 2) AS ROW(A INT, B INT))) T(R); +> (R).A (R).B +> ----- ----- +> 1 2 +> rows: 1 + +SELECT (R).C FROM (VALUES CAST((1, 2) AS ROW(A INT, B INT))) T(R); +> exception COLUMN_NOT_FOUND_1 + +SELECT (R).C1, (R).C2 FROM (VALUES ((1, 2))) T(R); +> (R).C1 (R).C2 +> ------ ------ +> 1 2 +> rows: 1 + +SELECT (1, 2).C2; +>> 2 + +SELECT (1, 2).C0; +> exception COLUMN_NOT_FOUND_1 + +SELECT (1, 2).C; +> exception COLUMN_NOT_FOUND_1 + +SELECT (1, 2).CX; +> exception COLUMN_NOT_FOUND_1 diff --git a/h2/src/test/org/h2/test/scripts/other/help.sql b/h2/src/test/org/h2/test/scripts/other/help.sql index a1e5fb15fb..efd05de9c6 100644 --- a/h2/src/test/org/h2/test/scripts/other/help.sql +++ b/h2/src/test/org/h2/test/scripts/other/help.sql @@ -1,26 +1,26 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- help abc; -> ID SECTION TOPIC SYNTAX TEXT -> -- ------- ----- ------ ---- +> SECTION TOPIC SYNTAX TEXT +> ------- ----- ------ ---- > rows: 0 HELP ABCDE EF_GH; -> ID SECTION TOPIC SYNTAX TEXT -> -- ------- ----- ------ ---- +> SECTION TOPIC SYNTAX TEXT +> ------- ----- ------ ---- > rows: 0 HELP HELP; -> ID SECTION TOPIC SYNTAX TEXT -> -- ---------------- ----- ----------------------- ---------------------------------------------------- -> 67 Commands (Other) HELP HELP [ anything [...] ] Displays the help pages of SQL commands or keywords. +> SECTION TOPIC SYNTAX TEXT +> ---------------- ----- ----------------------- ---------------------------------------------------- +> Commands (Other) HELP HELP [ anything [...] ] Displays the help pages of SQL commands or keywords. > rows: 1 HELP he lp; -> ID SECTION TOPIC SYNTAX TEXT -> -- ---------------- ----- ----------------------- ---------------------------------------------------- -> 67 Commands (Other) HELP HELP [ anything [...] ] Displays the help pages of SQL commands or keywords. +> SECTION TOPIC SYNTAX TEXT +> ---------------- ----- ----------------------- ---------------------------------------------------- +> Commands (Other) HELP HELP [ anything [...] ] Displays the help pages of SQL commands or keywords. > rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/other/sequence.sql b/h2/src/test/org/h2/test/scripts/other/sequence.sql new file mode 100644 index 0000000000..16c2e25f9e --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/sequence.sql @@ -0,0 +1,481 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE SEQUENCE SEQ NO CACHE; +> ok + +CREATE TABLE TEST(NEXT INT, CURRENT INT) AS (VALUES (10, 11), (20, 21)); +> ok + +SELECT NEXT "VALUE", NEXT VALUE FOR SEQ, CURRENT "VALUE", CURRENT VALUE FOR SEQ FROM TEST; +> VALUE NEXT VALUE FOR PUBLIC.SEQ VALUE CURRENT VALUE FOR PUBLIC.SEQ +> ----- ------------------------- ----- ---------------------------- +> 10 1 11 1 +> 20 2 21 2 +> rows: 2 + +EXPLAIN SELECT NEXT "VALUE", NEXT VALUE FOR SEQ, CURRENT "VALUE", CURRENT VALUE FOR SEQ FROM TEST; +>> SELECT "NEXT" AS "VALUE", NEXT VALUE FOR "PUBLIC"."SEQ", "CURRENT" AS "VALUE", CURRENT VALUE FOR "PUBLIC"."SEQ" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +DROP SEQUENCE SEQ; +> ok + +CREATE SEQUENCE S1 START WITH 11; +> ok + +CREATE SEQUENCE S2 START WITH 61; +> ok + +SELECT NEXT VALUE FOR S1 A, NEXT VALUE FOR S2 B, NEXT VALUE FOR S1 C, NEXT VALUE FOR S2 D FROM SYSTEM_RANGE(1, 2); +> A B C D +> -- -- -- -- +> 11 61 11 61 +> 12 62 12 62 +> rows: 2 + +CREATE TABLE TEST(A BIGINT, B BIGINT, C BIGINT, D BIGINT, V INT) AS + SELECT NEXT VALUE FOR S1, NEXT VALUE FOR S2, NEXT VALUE FOR S1, NEXT VALUE FOR S2, X FROM SYSTEM_RANGE(1, 2); +> ok + +INSERT INTO TEST + SELECT NEXT VALUE FOR S1, NEXT VALUE FOR S2, NEXT VALUE FOR S1, NEXT VALUE FOR S2, X FROM SYSTEM_RANGE(3, 4); +> update count: 2 + +INSERT INTO TEST VALUES + (NEXT VALUE FOR S1, NEXT VALUE FOR S2, NEXT VALUE FOR S1, NEXT VALUE FOR S2, 5), + (NEXT VALUE FOR S1, NEXT VALUE FOR S2, NEXT VALUE FOR S1, NEXT VALUE FOR S2, 6); +> update count: 2 + +TABLE TEST; +> A B C D V +> -- -- -- -- - +> 13 63 13 63 1 +> 14 64 14 64 2 +> 15 65 15 65 3 +> 16 66 16 66 4 +> 17 67 17 67 5 +> 18 68 18 68 6 +> rows: 6 + +UPDATE TEST SET A = NEXT VALUE FOR S1, B = NEXT VALUE FOR S2, C = NEXT VALUE FOR S1, D = NEXT VALUE FOR S2 + WHERE V BETWEEN 3 AND 4; +> update count: 2 + +TABLE TEST; +> A B C D V +> -- -- -- -- - +> 13 63 13 63 1 +> 14 64 14 64 2 +> 17 67 17 67 5 +> 18 68 18 68 6 +> 19 69 19 69 3 +> 20 70 20 70 4 +> rows: 6 + +MERGE INTO TEST D USING (VALUES 7, 8) S ON D.V = S.C1 + WHEN NOT MATCHED THEN INSERT VALUES + (NEXT VALUE FOR S1, NEXT VALUE FOR S2, NEXT VALUE FOR S1, NEXT VALUE FOR S2, S.C1); +> update count: 2 + +TABLE TEST; +> A B C D V +> -- -- -- -- - +> 13 63 13 63 1 +> 14 64 14 64 2 +> 17 67 17 67 5 +> 18 68 18 68 6 +> 19 69 19 69 3 +> 20 70 20 70 4 +> 21 71 21 71 7 +> 22 72 22 72 8 +> rows: 8 + +MERGE INTO TEST D USING (VALUES 7, 8) S ON D.V = S.C1 + WHEN MATCHED THEN UPDATE + SET A = NEXT VALUE FOR S1, B = NEXT VALUE FOR S2, C = NEXT VALUE FOR S1, D = NEXT VALUE FOR S2; +> update count: 2 + +TABLE TEST; +> A B C D V +> -- -- -- -- - +> 13 63 13 63 1 +> 14 64 14 64 2 +> 17 67 17 67 5 +> 18 68 18 68 6 +> 19 69 19 69 3 +> 20 70 20 70 4 +> 23 73 23 73 7 +> 24 74 24 74 8 +> rows: 8 + +DROP TABLE TEST; +> ok + +SET MODE MariaDB; +> ok + +SELECT NEXT VALUE FOR S1 A, NEXT VALUE FOR S2 B, NEXT VALUE FOR S1 C, NEXT VALUE FOR S2 D FROM SYSTEM_RANGE(1, 2); +> A B C D +> -- -- -- -- +> 25 75 26 76 +> 27 77 28 78 +> rows: 2 + +SET MODE Regular; +> ok + +DROP SEQUENCE S1; +> ok + +DROP SEQUENCE S2; +> ok + +CREATE SEQUENCE SEQ; +> ok + +SELECT SEQ.NEXTVAL; +> exception COLUMN_NOT_FOUND_1 + +SELECT SEQ.CURRVAL; +> exception COLUMN_NOT_FOUND_1 + +DROP SEQUENCE SEQ; +> ok + +SET MODE Oracle; +> ok + +create sequence seq; +> ok + +select case seq.nextval when 2 then 'two' when 3 then 'three' when 1 then 'one' else 'other' end result from dual; +> RESULT +> ------ +> one +> rows: 1 + +drop sequence seq; +> ok + +create schema s authorization sa; +> ok + +alter sequence if exists s.seq restart with 10; +> ok + +create sequence s.seq cache 0; +> ok + +alter sequence if exists s.seq restart with 3; +> ok + +select s.seq.nextval as x; +> X +> - +> 3 +> rows: 1 + +drop sequence s.seq; +> ok + +create sequence s.seq cache 0; +> ok + +alter sequence s.seq restart with 10; +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION DROP; +> SCRIPT +> ---------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE SCHEMA IF NOT EXISTS "S" AUTHORIZATION "SA"; +> DROP SEQUENCE IF EXISTS "S"."SEQ"; +> CREATE SEQUENCE "S"."SEQ" AS NUMERIC(19, 0) START WITH 1 RESTART WITH 10 NO CACHE; +> rows (ordered): 4 + +drop schema s cascade; +> ok + +create schema TEST_SCHEMA; +> ok + +create sequence TEST_SCHEMA.TEST_SEQ; +> ok + +select TEST_SCHEMA.TEST_SEQ.CURRVAL; +> exception CURRENT_SEQUENCE_VALUE_IS_NOT_DEFINED_IN_SESSION_1 + +select TEST_SCHEMA.TEST_SEQ.nextval; +>> 1 + +select TEST_SCHEMA.TEST_SEQ.CURRVAL; +>> 1 + +drop schema TEST_SCHEMA cascade; +> ok + +CREATE TABLE TEST(CURRVAL INT, NEXTVAL INT); +> ok + +INSERT INTO TEST VALUES (3, 4); +> update count: 1 + +SELECT TEST.CURRVAL, TEST.NEXTVAL FROM TEST; +> CURRVAL NEXTVAL +> ------- ------- +> 3 4 +> rows: 1 + +DROP TABLE TEST; +> ok + +SET MODE Regular; +> ok + +CREATE SEQUENCE SEQ01 AS TINYINT; +> ok + +CREATE SEQUENCE SEQ02 AS SMALLINT; +> ok + +CREATE SEQUENCE SEQ03 AS INTEGER; +> ok + +CREATE SEQUENCE SEQ04 AS BIGINT; +> ok + +CREATE SEQUENCE SEQ05 AS REAL; +> ok + +CREATE SEQUENCE SEQ06 AS DOUBLE PRECISION; +> ok + +CREATE SEQUENCE SEQ AS NUMERIC(10, 2); +> exception FEATURE_NOT_SUPPORTED_1 + +CREATE SEQUENCE SEQ AS NUMERIC(100, 20); +> exception FEATURE_NOT_SUPPORTED_1 + +CREATE SEQUENCE SEQ07 AS DECIMAL; +> ok + +CREATE SEQUENCE SEQ08 AS DECIMAL(10); +> ok + +CREATE SEQUENCE SEQ11 AS DECIMAL(10, 2); +> exception FEATURE_NOT_SUPPORTED_1 + +CREATE SEQUENCE SEQ09 AS FLOAT; +> ok + +CREATE SEQUENCE SEQ10 AS FLOAT(20); +> ok + +CREATE SEQUENCE SEQ11 AS DECFLOAT; +> ok + +CREATE SEQUENCE SEQ12 AS DECFLOAT(10); +> ok + +CREATE SEQUENCE SEQ13 AS DECFLOAT(20); +> ok + +SELECT SEQUENCE_NAME, DATA_TYPE, NUMERIC_PRECISION, NUMERIC_PRECISION_RADIX, NUMERIC_SCALE, MAXIMUM_VALUE, + DECLARED_DATA_TYPE, DECLARED_NUMERIC_PRECISION, DECLARED_NUMERIC_SCALE FROM INFORMATION_SCHEMA.SEQUENCES; +> SEQUENCE_NAME DATA_TYPE NUMERIC_PRECISION NUMERIC_PRECISION_RADIX NUMERIC_SCALE MAXIMUM_VALUE DECLARED_DATA_TYPE DECLARED_NUMERIC_PRECISION DECLARED_NUMERIC_SCALE +> ------------- ---------------- ----------------- ----------------------- ------------- ------------------- ------------------ -------------------------- ---------------------- +> SEQ01 TINYINT 8 2 0 127 TINYINT null null +> SEQ02 SMALLINT 16 2 0 32767 SMALLINT null null +> SEQ03 INTEGER 32 2 0 2147483647 INTEGER null null +> SEQ04 BIGINT 64 2 0 9223372036854775807 BIGINT null null +> SEQ05 REAL 24 2 null 16777216 REAL null null +> SEQ06 DOUBLE PRECISION 53 2 null 9007199254740992 DOUBLE PRECISION null null +> SEQ07 NUMERIC 19 10 0 9223372036854775807 DECIMAL null null +> SEQ08 NUMERIC 10 10 0 9999999999 DECIMAL 10 null +> SEQ09 DOUBLE PRECISION 53 2 null 9007199254740992 FLOAT null null +> SEQ10 REAL 24 2 null 16777216 FLOAT 20 null +> SEQ11 DECFLOAT 19 10 null 9223372036854775807 DECFLOAT null null +> SEQ12 DECFLOAT 10 10 null 10000000000 DECFLOAT 10 null +> SEQ13 DECFLOAT 19 10 null 9223372036854775807 DECFLOAT 20 null +> rows: 13 + +SELECT NEXT VALUE FOR SEQ01 IS OF (TINYINT); +>> TRUE + +DROP ALL OBJECTS; +> ok + +CREATE SEQUENCE SEQ AS NUMERIC(10, 20); +> exception FEATURE_NOT_SUPPORTED_1 + +CREATE SEQUENCE SEQ AS VARCHAR(10); +> exception FEATURE_NOT_SUPPORTED_1 + +CREATE SEQUENCE SEQ NO; +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST( + A BIGINT GENERATED ALWAYS AS (C + 1), + B BIGINT GENERATED ALWAYS AS (D + 1), + C BIGINT GENERATED ALWAYS AS IDENTITY, + D BIGINT DEFAULT 3, + E BIGINT); +> ok + +INSERT INTO TEST(E) VALUES 10; +> update count: 1 + +TABLE TEST; +> A B C D E +> - - - - -- +> 2 4 1 3 10 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE SEQUENCE SEQ MINVALUE 1 MAXVALUE 2; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> 1 + +SELECT NEXT VALUE FOR SEQ; +>> 2 + +SELECT CACHE FROM INFORMATION_SCHEMA.SEQUENCES WHERE SEQUENCE_NAME = 'SEQ'; +>> 2 + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ----------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE SEQUENCE "PUBLIC"."SEQ" START WITH 1 MAXVALUE 2 EXHAUSTED; +> rows (ordered): 2 + +@reconnect + +SELECT NEXT VALUE FOR SEQ; +> exception SEQUENCE_EXHAUSTED + +ALTER SEQUENCE SEQ RESTART; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> 1 + +ALTER SEQUENCE SEQ CYCLE; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> 2 + +SELECT NEXT VALUE FOR SEQ; +>> 1 + +ALTER SEQUENCE SEQ INCREMENT BY -1; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> 2 + +SELECT NEXT VALUE FOR SEQ; +>> 1 + +DROP SEQUENCE SEQ; +> ok + +CREATE SEQUENCE SEQ MINVALUE 9223372036854775806; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> 9223372036854775806 + +SELECT NEXT VALUE FOR SEQ; +>> 9223372036854775807 + +SELECT NEXT VALUE FOR SEQ; +> exception SEQUENCE_EXHAUSTED + +ALTER SEQUENCE SEQ NO CACHE RESTART; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> 9223372036854775806 + +SELECT NEXT VALUE FOR SEQ; +>> 9223372036854775807 + +SELECT NEXT VALUE FOR SEQ; +> exception SEQUENCE_EXHAUSTED + +ALTER SEQUENCE SEQ CACHE 2 MINVALUE 9223372036854775805 RESTART WITH 9223372036854775805; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> 9223372036854775805 + +SELECT NEXT VALUE FOR SEQ; +>> 9223372036854775806 + +SELECT NEXT VALUE FOR SEQ; +>> 9223372036854775807 + +SELECT NEXT VALUE FOR SEQ; +> exception SEQUENCE_EXHAUSTED + +DROP SEQUENCE SEQ; +> ok + +CREATE SEQUENCE SEQ INCREMENT BY -1 MAXVALUE -9223372036854775807; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> -9223372036854775807 + +SELECT NEXT VALUE FOR SEQ; +>> -9223372036854775808 + +SELECT NEXT VALUE FOR SEQ; +> exception SEQUENCE_EXHAUSTED + +ALTER SEQUENCE SEQ NO CACHE RESTART; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> -9223372036854775807 + +SELECT NEXT VALUE FOR SEQ; +>> -9223372036854775808 + +SELECT NEXT VALUE FOR SEQ; +> exception SEQUENCE_EXHAUSTED + +ALTER SEQUENCE SEQ CACHE 2 MAXVALUE -9223372036854775806 RESTART WITH -9223372036854775806; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> -9223372036854775806 + +SELECT NEXT VALUE FOR SEQ; +>> -9223372036854775807 + +SELECT BASE_VALUE FROM INFORMATION_SCHEMA.SEQUENCES WHERE SEQUENCE_NAME = 'SEQ'; +>> -9223372036854775808 + +SELECT NEXT VALUE FOR SEQ; +>> -9223372036854775808 + +SELECT BASE_VALUE FROM INFORMATION_SCHEMA.SEQUENCES WHERE SEQUENCE_NAME = 'SEQ'; +>> null + +SELECT NEXT VALUE FOR SEQ; +> exception SEQUENCE_EXHAUSTED + +DROP SEQUENCE SEQ; +> ok diff --git a/h2/src/test/org/h2/test/scripts/other/set.sql b/h2/src/test/org/h2/test/scripts/other/set.sql new file mode 100644 index 0000000000..35296158fa --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/set.sql @@ -0,0 +1,244 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +@reconnect off + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> READ COMMITTED + +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> READ UNCOMMITTED + +SET TRANSACTION ISOLATION LEVEL READ COMMITTED; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> READ COMMITTED + +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> REPEATABLE READ + +SET TRANSACTION ISOLATION LEVEL SNAPSHOT; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> SNAPSHOT + +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> SERIALIZABLE + +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> READ UNCOMMITTED + +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL READ COMMITTED; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> READ COMMITTED + +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL REPEATABLE READ; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> REPEATABLE READ + +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL SNAPSHOT; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> SNAPSHOT + +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL SERIALIZABLE; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> SERIALIZABLE + +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL READ COMMITTED; +> ok + +SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'VARIABLE_BINARY'; +>> FALSE + +CREATE MEMORY TABLE TEST(B BINARY); +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> -------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "B" BINARY ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +DROP TABLE TEST; +> ok + +SET VARIABLE_BINARY TRUE; +> ok + +SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'VARIABLE_BINARY'; +>> TRUE + +CREATE MEMORY TABLE TEST(B BINARY); +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ---------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "B" BINARY VARYING ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +DROP TABLE TEST; +> ok + +SET VARIABLE_BINARY FALSE; +> ok + +SET LOCK_MODE 0; +> ok + +CALL LOCK_MODE(); +>> 0 + +SET LOCK_MODE 1; +> ok + +CALL LOCK_MODE(); +>> 3 + +SET LOCK_MODE 2; +> ok + +CALL LOCK_MODE(); +>> 3 + +SET LOCK_MODE 3; +> ok + +CALL LOCK_MODE(); +>> 3 + +@reconnect on + +SELECT CURRENT_PATH; +> CURRENT_PATH +> ------------ +> +> rows: 1 + +SET SCHEMA_SEARCH_PATH PUBLIC, INFORMATION_SCHEMA; +> ok + +SELECT CURRENT_PATH; +>> "PUBLIC","INFORMATION_SCHEMA" + +SET SCHEMA_SEARCH_PATH PUBLIC; +> ok + +CREATE TABLE TEST(C1 INT, C2 INT); +> ok + +CREATE INDEX IDX ON TEST(C1 ASC, C2 DESC); +> ok + +SELECT COLUMN_NAME, ORDERING_SPECIFICATION, NULL_ORDERING FROM INFORMATION_SCHEMA.INDEX_COLUMNS + WHERE INDEX_NAME = 'IDX'; +> COLUMN_NAME ORDERING_SPECIFICATION NULL_ORDERING +> ----------- ---------------------- ------------- +> C1 ASC FIRST +> C2 DESC LAST +> rows: 2 + +DROP INDEX IDX; +> ok + +SET DEFAULT_NULL_ORDERING LOW; +> ok + +CREATE INDEX IDX ON TEST(C1 ASC, C2 DESC); +> ok + +SELECT COLUMN_NAME, ORDERING_SPECIFICATION, NULL_ORDERING FROM INFORMATION_SCHEMA.INDEX_COLUMNS + WHERE INDEX_NAME = 'IDX'; +> COLUMN_NAME ORDERING_SPECIFICATION NULL_ORDERING +> ----------- ---------------------- ------------- +> C1 ASC FIRST +> C2 DESC LAST +> rows: 2 + +DROP INDEX IDX; +> ok + +SET DEFAULT_NULL_ORDERING HIGH; +> ok + +CREATE INDEX IDX ON TEST(C1 ASC, C2 DESC); +> ok + +SELECT COLUMN_NAME, ORDERING_SPECIFICATION, NULL_ORDERING FROM INFORMATION_SCHEMA.INDEX_COLUMNS + WHERE INDEX_NAME = 'IDX'; +> COLUMN_NAME ORDERING_SPECIFICATION NULL_ORDERING +> ----------- ---------------------- ------------- +> C1 ASC LAST +> C2 DESC FIRST +> rows: 2 + +DROP INDEX IDX; +> ok + +SET DEFAULT_NULL_ORDERING FIRST; +> ok + +CREATE INDEX IDX ON TEST(C1 ASC, C2 DESC); +> ok + +SELECT COLUMN_NAME, ORDERING_SPECIFICATION, NULL_ORDERING FROM INFORMATION_SCHEMA.INDEX_COLUMNS + WHERE INDEX_NAME = 'IDX'; +> COLUMN_NAME ORDERING_SPECIFICATION NULL_ORDERING +> ----------- ---------------------- ------------- +> C1 ASC FIRST +> C2 DESC FIRST +> rows: 2 + +DROP INDEX IDX; +> ok + +SET DEFAULT_NULL_ORDERING LAST; +> ok + +CREATE INDEX IDX ON TEST(C1 ASC, C2 DESC); +> ok + +SELECT COLUMN_NAME, ORDERING_SPECIFICATION, NULL_ORDERING FROM INFORMATION_SCHEMA.INDEX_COLUMNS + WHERE INDEX_NAME = 'IDX'; +> COLUMN_NAME ORDERING_SPECIFICATION NULL_ORDERING +> ----------- ---------------------- ------------- +> C1 ASC LAST +> C2 DESC LAST +> rows: 2 + +DROP INDEX IDX; +> ok + +SET DEFAULT_NULL_ORDERING LOW; +> ok + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/other/two_phase_commit.sql b/h2/src/test/org/h2/test/scripts/other/two_phase_commit.sql new file mode 100644 index 0000000000..2cb8a7a17d --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/two_phase_commit.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- issue #3033 +CREATE TABLE TEST(A BIGINT PRIMARY KEY, B BLOB); +> ok + +INSERT INTO TEST VALUES(1, REPEAT('010203040506070809101112',11)); +> update count: 1 + +@autocommit off + +DELETE FROM TEST WHERE A = 1; +> update count: 1 + +PREPARE COMMIT commit1; +> ok + +@reconnect + +ROLLBACK TRANSACTION commit1; +> ok + +SELECT B FROM TEST WHERE A = 1; +>> X'303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132' + diff --git a/h2/src/test/org/h2/test/scripts/other/unique_include.sql b/h2/src/test/org/h2/test/scripts/other/unique_include.sql new file mode 100644 index 0000000000..9f5428045a --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/unique_include.sql @@ -0,0 +1,76 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(A INT, B INT, C INT); +> ok + +CREATE UNIQUE INDEX TEST_IDX ON TEST(C) INCLUDE(B); +> ok + +INSERT INTO TEST VALUES (10, 20, 1), (11, 20, 2), (12, 21, 3); +> update count: 3 + +INSERT INTO TEST VALUES (13, 22, 1); +> exception DUPLICATE_KEY_1 + +SELECT INDEX_NAME, TABLE_NAME, INDEX_TYPE_NAME FROM INFORMATION_SCHEMA.INDEXES WHERE INDEX_NAME = 'TEST_IDX'; +> INDEX_NAME TABLE_NAME INDEX_TYPE_NAME +> ---------- ---------- --------------- +> TEST_IDX TEST UNIQUE INDEX +> rows: 1 + +SELECT INDEX_NAME, TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, IS_UNIQUE FROM INFORMATION_SCHEMA.INDEX_COLUMNS + WHERE INDEX_NAME = 'TEST_IDX' ORDER BY ORDINAL_POSITION; +> INDEX_NAME TABLE_NAME COLUMN_NAME ORDINAL_POSITION IS_UNIQUE +> ---------- ---------- ----------- ---------------- --------- +> TEST_IDX TEST C 1 TRUE +> TEST_IDX TEST B 2 FALSE +> rows (ordered): 2 + +SELECT DB_OBJECT_SQL('INDEX', 'PUBLIC', 'TEST_IDX'); +>> CREATE UNIQUE INDEX "PUBLIC"."TEST_IDX" ON "PUBLIC"."TEST"("C" NULLS FIRST) INCLUDE("B" NULLS FIRST) + +ALTER TABLE TEST ADD CONSTRAINT TEST_UNI_C UNIQUE(C); +> ok + +SELECT CONSTRAINT_NAME, CONSTRAINT_TYPE, TABLE_NAME, INDEX_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE TABLE_NAME = 'TEST'; +> CONSTRAINT_NAME CONSTRAINT_TYPE TABLE_NAME INDEX_NAME +> --------------- --------------- ---------- ---------- +> TEST_UNI_C UNIQUE TEST TEST_IDX +> rows: 1 + +SELECT CONSTRAINT_NAME, TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE + WHERE CONSTRAINT_NAME = 'TEST_UNI_C'; +> CONSTRAINT_NAME TABLE_NAME COLUMN_NAME ORDINAL_POSITION +> --------------- ---------- ----------- ---------------- +> TEST_UNI_C TEST C 1 +> rows: 1 + +EXPLAIN SELECT B, C FROM TEST ORDER BY C, B; +>> SELECT "B", "C" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX */ ORDER BY 2, 1 /* index sorted */ + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT, C INT); +> ok + +CREATE UNIQUE INDEX TEST_IDX_A_B ON TEST(A) INCLUDE (B); +> ok + +CREATE UNIQUE INDEX TEST_IDX_A ON TEST(A); +> ok + +CREATE UNIQUE INDEX TEST_IDX_A_B_C ON TEST(A) INCLUDE (B, C); +> ok + +ALTER TABLE TEST ADD CONSTRAINT UNI_TEST_A UNIQUE(A); +> ok + +SELECT INDEX_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_NAME = 'UNI_TEST_A'; +>> TEST_IDX_A + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/package.html b/h2/src/test/org/h2/test/scripts/package.html index 2e3a318a32..cf8c836c51 100644 --- a/h2/src/test/org/h2/test/scripts/package.html +++ b/h2/src/test/org/h2/test/scripts/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/scripts/comments.sql b/h2/src/test/org/h2/test/scripts/parser/comments.sql similarity index 70% rename from h2/src/test/org/h2/test/scripts/comments.sql rename to h2/src/test/org/h2/test/scripts/parser/comments.sql index a527deb93e..aa4f6e635a 100644 --- a/h2/src/test/org/h2/test/scripts/comments.sql +++ b/h2/src/test/org/h2/test/scripts/parser/comments.sql @@ -1,25 +1,22 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- CALL 1 /* comment */ ;; -> 1 -> - -> 1 -> rows: 1 +>> 1 CALL 1 /* comment */ ; -> 1 -> - -> 1 -> rows: 1 +>> 1 -call /* remark * / * /* ** // end */ 1; -> 1 -> - -> 1 -> rows: 1 +call /* remark * / * /* ** // end */*/ 1; +>> 1 + +call /*/*/ */*/ 1; +>> 1 + +call /*1/*1*/1*/1; +>> 1 --- remarks/comments/syntax ---------------------------------------------------------------------------------------------- CREATE TABLE TEST( @@ -46,5 +43,8 @@ DROP_ TABLE_ TEST_T; DROP TABLE TEST /*; > exception SYNTAX_ERROR_1 +call /* remark * / * /* ** // end */ 1; +> exception SYNTAX_ERROR_1 + DROP TABLE TEST; > ok diff --git a/h2/src/test/org/h2/test/scripts/parser/identifiers.sql b/h2/src/test/org/h2/test/scripts/parser/identifiers.sql new file mode 100644 index 0000000000..6d8bb4957a --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/parser/identifiers.sql @@ -0,0 +1,52 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT 1 "A""B""""C"""; +> A"B""C" +> ------- +> 1 +> rows: 1 + +SELECT 1 ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345; +> ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345 +> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +> 1 +> rows: 1 + +SELECT 1 ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456; +> exception NAME_TOO_LONG_2 + +SELECT 1 "ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345"; +> ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345 +> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +> 1 +> rows: 1 + +SELECT 1 "ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456"; +> exception NAME_TOO_LONG_2 + +SELECT 1 "ABCDEFGHIJKLMNOPQRSTUVWXYZ01234""5ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345"; +> exception NAME_TOO_LONG_2 + +SELECT 1 "ABCDEFGHIJKLMNOPQRSTUVWXYZ012345""ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345"; +> exception NAME_TOO_LONG_2 + +SELECT 3 U&"\0031", 4 U&"/0032" UESCAPE '/'; +> 1 2 +> - - +> 3 4 +> rows: 1 + +EXPLAIN SELECT 1 U&"!2030" UESCAPE '!'; +>> SELECT 1 AS U&"\2030" + +SELECT 1 U&"ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ01234\0035"; +> ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345 +> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +> 1 +> rows: 1 + +SELECT 1 U&"ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ01234\00356"; +> exception NAME_TOO_LONG_2 diff --git a/h2/src/test/org/h2/test/scripts/predicates/between.sql b/h2/src/test/org/h2/test/scripts/predicates/between.sql new file mode 100644 index 0000000000..0d4594f089 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/predicates/between.sql @@ -0,0 +1,107 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(ID INT PRIMARY KEY, X INT, A INT, B INT) AS VALUES + (1, NULL, NULL, NULL), + (2, NULL, NULL, 1), + (3, NULL, 1, NULL), + (4, 1, NULL, NULL), + (5, NULL, 1, 1), + (6, NULL, 1, 2), + (7, NULL, 2, 1), + (8, 1, NULL, 1), + (9, 1, NULL, 2), + (10, 2, NULL, 1), + (11, 1, 1, NULL), + (12, 1, 2, NULL), + (13, 2, 1, NULL), + (14, 1, 1, 1), + (15, 1, 1, 2), + (16, 1, 2, 1), + (17, 2, 1, 1), + (18, 1, 2, 2), + (19, 2, 1, 2), + (20, 2, 2, 1), + (21, 1, 2, 3), + (22, 1, 3, 2), + (23, 2, 1, 3), + (24, 2, 3, 1), + (25, 3, 1, 2), + (26, 3, 2, 1); +> ok + +EXPLAIN SELECT X BETWEEN A AND B A1, X BETWEEN ASYMMETRIC A AND B A2 FROM TEST; +>> SELECT "X" BETWEEN "A" AND "B" AS "A1", "X" BETWEEN "A" AND "B" AS "A2" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT X BETWEEN SYMMETRIC A AND B S1 FROM TEST; +>> SELECT "X" BETWEEN SYMMETRIC "A" AND "B" AS "S1" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT X NOT BETWEEN A AND B NA1, X NOT BETWEEN ASYMMETRIC A AND B NA2 FROM TEST; +>> SELECT "X" NOT BETWEEN "A" AND "B" AS "NA1", "X" NOT BETWEEN "A" AND "B" AS "NA2" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT X NOT BETWEEN SYMMETRIC A AND B NS1 FROM TEST; +>> SELECT "X" NOT BETWEEN SYMMETRIC "A" AND "B" AS "NS1" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT X BETWEEN A AND B A1, X BETWEEN ASYMMETRIC A AND B A2, A <= X AND X <= B A3, + X BETWEEN SYMMETRIC A AND B S1, A <= X AND X <= B OR A >= X AND X >= B S2, + X NOT BETWEEN A AND B NA1, X NOT BETWEEN ASYMMETRIC A AND B NA2, NOT (A <= X AND X <= B) NA3, + X NOT BETWEEN SYMMETRIC A AND B NS1, NOT (A <= X AND X <= B OR A >= X AND X >= B) NS2 + FROM TEST ORDER BY ID; +> A1 A2 A3 S1 S2 NA1 NA2 NA3 NS1 NS2 +> ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- +> null null null null null null null null null null +> null null null null null null null null null null +> null null null null null null null null null null +> null null null null null null null null null null +> null null null null null null null null null null +> null null null null null null null null null null +> null null null null null null null null null null +> null null null null null null null null null null +> null null null null null null null null null null +> FALSE FALSE FALSE null null TRUE TRUE TRUE null null +> null null null null null null null null null null +> FALSE FALSE FALSE null null TRUE TRUE TRUE null null +> null null null null null null null null null null +> TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE +> TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE +> FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE FALSE FALSE +> FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE +> FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE +> TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE +> FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE FALSE FALSE +> FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE +> FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE +> TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE +> FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE FALSE FALSE +> FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE +> FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE +> rows (ordered): 26 + +EXPLAIN SELECT * FROM TEST WHERE ID BETWEEN 1 AND 2; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."X", "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID >= 1 AND ID <= 2 */ WHERE "ID" BETWEEN 1 AND 2 + +EXPLAIN SELECT * FROM TEST WHERE ID NOT BETWEEN 1 AND 2; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."X", "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE "ID" NOT BETWEEN 1 AND 2 + +EXPLAIN SELECT NULL BETWEEN A AND B, X BETWEEN NULL AND NULL, X BETWEEN SYMMETRIC A AND NULL, X BETWEEN SYMMETRIC NULL AND B, X BETWEEN SYMMETRIC NULL AND NULL FROM TEST; +>> SELECT UNKNOWN, UNKNOWN, UNKNOWN, UNKNOWN, UNKNOWN FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT X BETWEEN 1 AND 1, X NOT BETWEEN 1 AND 1, 2 BETWEEN SYMMETRIC 3 AND 1 FROM TEST; +>> SELECT "X" = 1, "X" <> 1, TRUE FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT 2 BETWEEN 1 AND B, 2 BETWEEN A AND 3, 2 BETWEEN A AND B FROM TEST; +>> SELECT 2 BETWEEN 1 AND "B", 2 BETWEEN "A" AND 3, 2 BETWEEN "A" AND "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT X BETWEEN 1 AND NULL, X BETWEEN NULL AND 3 FROM TEST; +>> SELECT "X" BETWEEN 1 AND NULL, "X" BETWEEN NULL AND 3 FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT NOT (X BETWEEN A AND B), NOT (X NOT BETWEEN A AND B) FROM TEST; +>> SELECT "X" NOT BETWEEN "A" AND "B", "X" BETWEEN "A" AND "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +SELECT CURRENT_TIME BETWEEN CURRENT_DATE AND (CURRENT_DATE + INTERVAL '1' DAY); +> exception TYPES_ARE_NOT_COMPARABLE_2 diff --git a/h2/src/test/org/h2/test/scripts/predicates/distinct.sql b/h2/src/test/org/h2/test/scripts/predicates/distinct.sql new file mode 100644 index 0000000000..6fcd2e2d40 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/predicates/distinct.sql @@ -0,0 +1,66 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- Quantified distinct predicate + +SELECT 1 IS DISTINCT FROM ALL(VALUES 1, NULL, 2); +>> FALSE + +SELECT 1 IS DISTINCT FROM ALL(VALUES NULL, 2); +>> TRUE + +SELECT NULL IS DISTINCT FROM ALL(VALUES 1, NULL, 2); +>> FALSE + +SELECT NULL IS DISTINCT FROM ALL(VALUES 1, 2); +>> TRUE + +SELECT 1 IS NOT DISTINCT FROM ALL(VALUES 1, NULL, 2); +>> FALSE + +SELECT 1 IS NOT DISTINCT FROM ALL(VALUES 1, 1); +>> TRUE + +SELECT NULL IS NOT DISTINCT FROM ALL(VALUES 1, NULL, 2); +>> FALSE + +SELECT NULL IS NOT DISTINCT FROM ALL(VALUES NULL, NULL); +>> TRUE + +SELECT 1 IS DISTINCT FROM ANY(VALUES 1, NULL, 2); +>> TRUE + +SELECT 1 IS DISTINCT FROM ANY(VALUES 1, 1); +>> FALSE + +SELECT NULL IS DISTINCT FROM ANY(VALUES 1, NULL, 2); +>> TRUE + +SELECT NULL IS DISTINCT FROM ANY(VALUES NULL, NULL); +>> FALSE + +SELECT 1 IS NOT DISTINCT FROM ANY(VALUES 1, NULL, 2); +>> TRUE + +SELECT 1 IS NOT DISTINCT FROM ANY(VALUES NULL, 2); +>> FALSE + +SELECT NULL IS NOT DISTINCT FROM ANY(VALUES 1, NULL, 2); +>> TRUE + +SELECT NULL IS NOT DISTINCT FROM ANY(VALUES 1, 2); +>> FALSE + +SELECT NOT (NULL IS NOT DISTINCT FROM ANY(VALUES 1, 2)); +>> TRUE + +EXPLAIN SELECT NOT (NULL IS NOT DISTINCT FROM ANY(VALUES 1, 2)); +>> SELECT NOT (NULL IS NOT DISTINCT FROM ANY( VALUES (1), (2))) + +SELECT (1, NULL) IS NOT DISTINCT FROM ANY(VALUES (1, NULL), (2, NULL)); +>> TRUE + +SELECT (1, NULL) IS NOT DISTINCT FROM ANY(VALUES (2, NULL), (3, NULL)); +>> FALSE diff --git a/h2/src/test/org/h2/test/scripts/predicates/in.sql b/h2/src/test/org/h2/test/scripts/predicates/in.sql new file mode 100644 index 0000000000..a57b38c1ef --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/predicates/in.sql @@ -0,0 +1,428 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +create table test(id int) as select 1; +> ok + +select * from test where id in (select id from test order by 'x'); +> ID +> -- +> 1 +> rows: 1 + +drop table test; +> ok + +select x, x in(2, 3) i from system_range(1, 2) group by x; +> X I +> - ----- +> 1 FALSE +> 2 TRUE +> rows: 2 + +select * from system_range(1, 1) where x = x + 1 or x in(2, 0); +> X +> - +> rows: 0 + +select * from system_range(1, 1) where cast('a' || x as varchar_ignorecase) in ('A1', 'B1'); +> X +> - +> 1 +> rows: 1 + +create table test(x int) as select x from system_range(1, 2); +> ok + +select * from (select rownum r from test) where r in (1, 2); +> R +> - +> 1 +> 2 +> rows: 2 + +select * from (select rownum r from test) where r = 1 or r = 2; +> R +> - +> 1 +> 2 +> rows: 2 + +drop table test; +> ok + +select x from system_range(1, 1) where x in (select x from system_range(1, 1) group by x order by max(x)); +> X +> - +> 1 +> rows: 1 + +create table test(id int) as (values 1, 2, 4); +> ok + +select a.id, a.id in(select 4) x from test a, test b where a.id in (b.id, b.id - 1); +> ID X +> -- ----- +> 1 FALSE +> 1 FALSE +> 2 FALSE +> 4 TRUE +> rows: 4 + +select a.id, a.id in(select 4) x from test a, test b where a.id in (b.id, b.id - 1) group by a.id; +> ID X +> -- ----- +> 1 FALSE +> 2 FALSE +> 4 TRUE +> rows: 3 + +select a.id, 4 in(select a.id) x from test a, test b where a.id in (b.id, b.id - 1) group by a.id; +> ID X +> -- ----- +> 1 FALSE +> 2 FALSE +> 4 TRUE +> rows: 3 + +drop table test; +> ok + +create table test(id int primary key, d int) as (values (1, 1), (2, 1)); +> ok + +select id from test where id in (1, 2) and d = 1; +> ID +> -- +> 1 +> 2 +> rows: 2 + +drop table test; +> ok + +create table test(id int) as (values null, 1); +> ok + +select * from test where id not in (select id from test where 1=0); +> ID +> ---- +> 1 +> null +> rows: 2 + +select * from test where null not in (select id from test where 1=0); +> ID +> ---- +> 1 +> null +> rows: 2 + +select * from test where not (id in (select id from test where 1=0)); +> ID +> ---- +> 1 +> null +> rows: 2 + +select * from test where not (null in (select id from test where 1=0)); +> ID +> ---- +> 1 +> null +> rows: 2 + +drop table test; +> ok + +create table t1 (id int primary key) as (select x from system_range(1, 1000)); +> ok + +create table t2 (id int primary key) as (select x from system_range(1, 1000)); +> ok + +explain select count(*) from t1 where t1.id in ( select t2.id from t2 ); +>> SELECT COUNT(*) FROM "PUBLIC"."T1" /* PUBLIC.PRIMARY_KEY_A: ID IN(SELECT DISTINCT T2.ID FROM PUBLIC.T2 /* PUBLIC.T2.tableScan */) */ WHERE "T1"."ID" IN( SELECT DISTINCT "T2"."ID" FROM "PUBLIC"."T2" /* PUBLIC.T2.tableScan */) + +select count(*) from t1 where t1.id in ( select t2.id from t2 ); +> COUNT(*) +> -------- +> 1000 +> rows: 1 + +drop table t1, t2; +> ok + +select count(*) from system_range(1, 2) where x in(1, 1, 1); +> COUNT(*) +> -------- +> 1 +> rows: 1 + +create table test(id int primary key) as (values 1, 2, 3); +> ok + +explain select * from test where id in(1, 2, null); +>> SELECT "PUBLIC"."TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID IN(1, 2, NULL) */ WHERE "ID" IN(1, 2, NULL) + +drop table test; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)) AS (VALUES (1, 'Hello'), (2, 'World')); +> ok + +select * from test where id in (select id from test); +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> rows: 2 + +select * from test where id in ((select id from test)); +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> rows: 2 + +select * from test where id in (((select id from test))); +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> rows: 2 + +DROP TABLE TEST; +> ok + +create table test(v boolean) as (values unknown, true, false); +> ok + +SELECT CASE WHEN NOT (false IN (null)) THEN false END; +> NULL +> ---- +> null +> rows: 1 + +select a.v as av, b.v as bv, a.v IN (b.v), not a.v IN (b.v) from test a, test b; +> AV BV A.V = B.V A.V <> B.V +> ----- ----- --------- ---------- +> FALSE FALSE TRUE FALSE +> FALSE TRUE FALSE TRUE +> FALSE null null null +> TRUE FALSE FALSE TRUE +> TRUE TRUE TRUE FALSE +> TRUE null null null +> null FALSE null null +> null TRUE null null +> null null null null +> rows: 9 + +select a.v as av, b.v as bv, a.v IN (b.v, null), not a.v IN (b.v, null) from test a, test b; +> AV BV A.V IN(B.V, NULL) A.V NOT IN(B.V, NULL) +> ----- ----- ----------------- --------------------- +> FALSE FALSE TRUE FALSE +> FALSE TRUE null null +> FALSE null null null +> TRUE FALSE null null +> TRUE TRUE TRUE FALSE +> TRUE null null null +> null FALSE null null +> null TRUE null null +> null null null null +> rows: 9 + +drop table test; +> ok + +SELECT CASE WHEN NOT (false IN (null)) THEN false END; +> NULL +> ---- +> null +> rows: 1 + +create table test(a int, b int) as select 2, 0; +> ok + +create index idx on test(b, a); +> ok + +select count(*) from test where a in(2, 10) and b in(0, null); +>> 1 + +drop table test; +> ok + +create table test(a int, b int) as select 1, 0; +> ok + +create index idx on test(b, a); +> ok + +select count(*) from test where b in(null, 0) and a in(1, null); +>> 1 + +drop table test; +> ok + +create table test(a int, b int, unique(a, b)); +> ok + +insert into test values(1,1), (1,2); +> update count: 2 + +select count(*) from test where a in(1,2) and b in(1,2); +>> 2 + +drop table test; +> ok + +SELECT * FROM SYSTEM_RANGE(1, 10) WHERE X IN ((SELECT 1), (SELECT 2)); +> X +> - +> 1 +> 2 +> rows: 2 + +EXPLAIN SELECT * FROM SYSTEM_RANGE(1, 10) WHERE X IN ((SELECT X FROM SYSTEM_RANGE(1, 1)), (SELECT X FROM SYSTEM_RANGE(2, 2))); +>> SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 10) /* range index: X IN((SELECT X FROM SYSTEM_RANGE(1, 1) /* range index */), (SELECT X FROM SYSTEM_RANGE(2, 2) /* range index */)) */ WHERE "X" IN((SELECT "X" FROM SYSTEM_RANGE(1, 1) /* range index */), (SELECT "X" FROM SYSTEM_RANGE(2, 2) /* range index */)) + +-- Tests for IN predicate with an empty list + +SELECT 1 WHERE 1 IN (); +> 1 +> - +> rows: 0 + +SELECT 1 WHERE 1 NOT IN (); +>> 1 + +SELECT CASE 1 WHEN IN() THEN 1 ELSE 2 END; +> exception SYNTAX_ERROR_2 + +SET MODE DB2; +> ok + +SELECT 1 WHERE 1 IN (); +> exception SYNTAX_ERROR_2 + +SET MODE Derby; +> ok + +SELECT 1 WHERE 1 IN (); +> exception SYNTAX_ERROR_2 + +SET MODE MSSQLServer; +> ok + +SELECT 1 WHERE 1 IN (); +> exception SYNTAX_ERROR_2 + +SET MODE HSQLDB; +> ok + +SELECT 1 WHERE 1 IN (); +> exception SYNTAX_ERROR_2 + +SET MODE MySQL; +> ok + +SELECT 1 WHERE 1 IN (); +> exception SYNTAX_ERROR_2 + +SET MODE Oracle; +> ok + +SELECT 1 WHERE 1 IN (); +> exception SYNTAX_ERROR_2 + +SET MODE PostgreSQL; +> ok + +SELECT 1 WHERE 1 IN (); +> exception SYNTAX_ERROR_2 + +SET MODE Regular; +> ok + +CREATE TABLE TEST(A INT, B INT) AS (VALUES (1, 1), (1, 2), (2, 1), (2, NULL)); +> ok + +SELECT * FROM TEST WHERE (A, B) IN ((1, 1), (2, 1), (2, 2), (2, NULL)); +> A B +> - - +> 1 1 +> 2 1 +> rows: 2 + +DROP TABLE TEST; +> ok + +SELECT LOCALTIME IN(DATE '2000-01-01', DATE '2010-01-01'); +> exception TYPES_ARE_NOT_COMPARABLE_2 + +SELECT LOCALTIME IN ((VALUES DATE '2000-01-01', DATE '2010-01-01')); +> exception TYPES_ARE_NOT_COMPARABLE_2 + +CREATE TABLE TEST(V INT) AS VALUES 1, 2; +> ok + +SELECT V, V IN (1, 1000000000000) FROM TEST; +> V V IN(1, 1000000000000) +> - ---------------------- +> 1 TRUE +> 2 FALSE +> rows: 2 + +EXPLAIN SELECT V, V IN (1, 1000000000000) FROM TEST; +>> SELECT "V", "V" IN(1, 1000000000000) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +CREATE UNIQUE INDEX TEST_IDX ON TEST(V); +> ok + +SELECT V, V IN (1, 1000000000000) FROM TEST; +> V V IN(1, 1000000000000) +> - ---------------------- +> 1 TRUE +> 2 FALSE +> rows: 2 + +EXPLAIN SELECT V, V IN (1, 1000000000000) FROM TEST; +>> SELECT "V", "V" IN(1, 1000000000000) FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX */ + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(C BIGINT PRIMARY KEY) AS VALUES 1, 1000000000000; +> ok + +SELECT V, V IN (SELECT * FROM TEST) FROM (VALUES 1, 2) T(V); +> V V IN( SELECT DISTINCT PUBLIC.TEST.C FROM PUBLIC.TEST) +> - ----------------------------------------------------- +> 1 TRUE +> 2 FALSE +> rows: 2 + +EXPLAIN SELECT V, V IN (SELECT * FROM TEST) FROM (VALUES 1, 2) T(V); +>> SELECT "V", "V" IN( SELECT DISTINCT "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) FROM (VALUES (1), (2)) "T"("V") /* table scan */ + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(C INTEGER PRIMARY KEY) AS VALUES 1, 2; +> ok + +SELECT V, V IN (SELECT * FROM TEST) FROM (VALUES 1, 1000000000000) T(V); +> V V IN( SELECT DISTINCT PUBLIC.TEST.C FROM PUBLIC.TEST) +> ------------- ----------------------------------------------------- +> 1 TRUE +> 1000000000000 FALSE +> rows: 2 + +EXPLAIN SELECT V, V IN (SELECT * FROM TEST) FROM (VALUES 1, 1000000000000) T(V); +>> SELECT "V", "V" IN( SELECT DISTINCT "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) FROM (VALUES (1), (1000000000000)) "T"("V") /* table scan */ + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/predicates/like.sql b/h2/src/test/org/h2/test/scripts/predicates/like.sql new file mode 100644 index 0000000000..de01420418 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/predicates/like.sql @@ -0,0 +1,214 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +create table Foo (A varchar(20), B integer); +> ok + +insert into Foo (A, B) values ('abcd', 1), ('abcd', 2); +> update count: 2 + +select * from Foo where A like 'abc%' escape '\' AND B=1; +> A B +> ---- - +> abcd 1 +> rows: 1 + +drop table Foo; +> ok + +--- test case for number like string --------------------------------------------------------------------------------------------- +CREATE TABLE test (one bigint primary key, two bigint, three bigint); +> ok + +CREATE INDEX two ON test(two); +> ok + +INSERT INTO TEST VALUES(1, 2, 3), (10, 20, 30), (100, 200, 300); +> update count: 3 + +INSERT INTO TEST VALUES(2, 6, 9), (20, 60, 90), (200, 600, 900); +> update count: 3 + +SELECT * FROM test WHERE one LIKE '2%'; +> ONE TWO THREE +> --- --- ----- +> 2 6 9 +> 20 60 90 +> 200 600 900 +> rows: 3 + +SELECT * FROM test WHERE two LIKE '2%'; +> ONE TWO THREE +> --- --- ----- +> 1 2 3 +> 10 20 30 +> 100 200 300 +> rows: 3 + +SELECT * FROM test WHERE three LIKE '2%'; +> ONE TWO THREE +> --- --- ----- +> rows: 0 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +INSERT INTO TEST VALUES(0, NULL), (1, 'Hello'), (2, 'World'), (3, 'Word'), (4, 'Wo%'); +> update count: 5 + +SELECT * FROM TEST WHERE NAME IS NULL; +> ID NAME +> -- ---- +> 0 null +> rows: 1 + +SELECT * FROM TEST WHERE NAME IS NOT NULL; +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> 3 Word +> 4 Wo% +> rows: 4 + +SELECT * FROM TEST WHERE NAME BETWEEN 'H' AND 'Word'; +> ID NAME +> -- ----- +> 1 Hello +> 3 Word +> 4 Wo% +> rows: 3 + +SELECT * FROM TEST WHERE ID >= 2 AND ID <= 3 AND ID <> 2; +> ID NAME +> -- ---- +> 3 Word +> rows: 1 + +SELECT * FROM TEST WHERE ID>0 AND ID<4 AND ID!=2; +> ID NAME +> -- ----- +> 1 Hello +> 3 Word +> rows: 2 + +SELECT * FROM TEST WHERE 'Hello' LIKE '_el%'; +> ID NAME +> -- ----- +> 0 null +> 1 Hello +> 2 World +> 3 Word +> 4 Wo% +> rows: 5 + +SELECT * FROM TEST WHERE NAME LIKE 'Hello%'; +> ID NAME +> -- ----- +> 1 Hello +> rows: 1 + +SELECT * FROM TEST WHERE NAME ILIKE 'hello%'; +> ID NAME +> -- ----- +> 1 Hello +> rows: 1 + +SELECT * FROM TEST WHERE NAME ILIKE 'xxx%'; +> ID NAME +> -- ---- +> rows: 0 + +SELECT * FROM TEST WHERE NAME LIKE 'Wo%'; +> ID NAME +> -- ----- +> 2 World +> 3 Word +> 4 Wo% +> rows: 3 + +SELECT * FROM TEST WHERE NAME LIKE 'Wo\%'; +> ID NAME +> -- ---- +> 4 Wo% +> rows: 1 + +SELECT * FROM TEST WHERE NAME LIKE 'WoX%' ESCAPE 'X'; +> ID NAME +> -- ---- +> 4 Wo% +> rows: 1 + +SELECT * FROM TEST WHERE NAME LIKE 'Word_'; +> ID NAME +> -- ---- +> rows: 0 + +SELECT * FROM TEST WHERE NAME LIKE '%Hello%'; +> ID NAME +> -- ----- +> 1 Hello +> rows: 1 + +SELECT * FROM TEST WHERE 'Hello' LIKE NAME; +> ID NAME +> -- ----- +> 1 Hello +> rows: 1 + +SELECT T1.*, T2.* FROM TEST AS T1, TEST AS T2 WHERE T1.ID = T2.ID AND T1.NAME LIKE T2.NAME || '%'; +> ID NAME ID NAME +> -- ----- -- ----- +> 1 Hello 1 Hello +> 2 World 2 World +> 3 Word 3 Word +> 4 Wo% 4 Wo% +> rows: 4 + +SELECT ID, MAX(NAME) FROM TEST GROUP BY ID HAVING MAX(NAME) = 'World'; +> ID MAX(NAME) +> -- --------- +> 2 World +> rows: 1 + +SELECT ID, MAX(NAME) FROM TEST GROUP BY ID HAVING MAX(NAME) LIKE 'World%'; +> ID MAX(NAME) +> -- --------- +> 2 World +> rows: 1 + +EXPLAIN SELECT ID FROM TEST WHERE NAME ILIKE 'w%'; +>> SELECT "ID" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE "NAME" ILIKE 'w%' + +DROP TABLE TEST; +> ok + +SELECT S, S LIKE '%', S ILIKE '%', S REGEXP '%' FROM (VALUES NULL, '', '1') T(S); +> S CASE WHEN S IS NOT NULL THEN TRUE ELSE UNKNOWN END CASE WHEN S IS NOT NULL THEN TRUE ELSE UNKNOWN END S REGEXP '%' +> ---- -------------------------------------------------- -------------------------------------------------- ------------ +> TRUE TRUE FALSE +> 1 TRUE TRUE FALSE +> null null null null +> rows: 3 + +SELECT S, S NOT LIKE '%', S NOT ILIKE '%', S NOT REGEXP '%' FROM (VALUES NULL, '', '1') T(S); +> S CASE WHEN S IS NOT NULL THEN FALSE ELSE UNKNOWN END CASE WHEN S IS NOT NULL THEN FALSE ELSE UNKNOWN END S NOT REGEXP '%' +> ---- --------------------------------------------------- --------------------------------------------------- ---------------- +> FALSE FALSE TRUE +> 1 FALSE FALSE TRUE +> null null null null +> rows: 3 + +CREATE TABLE TEST(ID BIGINT PRIMARY KEY, V VARCHAR UNIQUE) AS VALUES (1, 'aa'), (2, 'bb'); +> ok + +SELECT ID FROM (SELECT * FROM TEST) WHERE V NOT LIKE 'a%'; +>> 2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/predicates/null.sql b/h2/src/test/org/h2/test/scripts/predicates/null.sql new file mode 100644 index 0000000000..68ed9603d0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/predicates/null.sql @@ -0,0 +1,200 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT NULL IS NULL; +>> TRUE + +SELECT NULL IS NOT NULL; +>> FALSE + +SELECT NOT NULL IS NULL; +>> FALSE + +SELECT NOT NULL IS NOT NULL; +>> TRUE + +SELECT 1 IS NULL; +>> FALSE + +SELECT 1 IS NOT NULL; +>> TRUE + +SELECT NOT 1 IS NULL; +>> TRUE + +SELECT NOT 1 IS NOT NULL; +>> FALSE + +SELECT () IS NULL; +>> TRUE + +SELECT () IS NOT NULL; +>> TRUE + +SELECT NOT () IS NULL; +>> FALSE + +SELECT NOT () IS NOT NULL; +>> FALSE + +SELECT (NULL, NULL) IS NULL; +>> TRUE + +SELECT (NULL, NULL) IS NOT NULL; +>> FALSE + +SELECT NOT (NULL, NULL) IS NULL; +>> FALSE + +SELECT NOT (NULL, NULL) IS NOT NULL; +>> TRUE + +SELECT (NULL, 1) IS NULL; +>> FALSE + +SELECT (NULL, 1) IS NOT NULL; +>> FALSE + +SELECT NOT (NULL, 1) IS NULL; +>> TRUE + +SELECT NOT (NULL, 1) IS NOT NULL; +>> TRUE + +SELECT (1, 2) IS NULL; +>> FALSE + +SELECT (1, 2) IS NOT NULL; +>> TRUE + +SELECT NOT (1, 2) IS NULL; +>> TRUE + +SELECT NOT (1, 2) IS NOT NULL; +>> FALSE + +CREATE TABLE TEST(A INT, B INT) AS VALUES (NULL, NULL), (1, NULL), (NULL, 2), (1, 2); +> ok + +CREATE INDEX TEST_A_IDX ON TEST(A); +> ok + +CREATE INDEX TEST_B_IDX ON TEST(B); +> ok + +CREATE INDEX TEST_A_B_IDX ON TEST(A, B); +> ok + +SELECT * FROM TEST T1 JOIN TEST T2 ON T1.A = T2.A WHERE T2.A IS NULL; +> A B A B +> - - - - +> rows: 0 + +EXPLAIN SELECT * FROM TEST T1 JOIN TEST T2 ON T1.A = T2.A WHERE T2.A IS NULL; +>> SELECT "T1"."A", "T1"."B", "T2"."A", "T2"."B" FROM "PUBLIC"."TEST" "T2" /* PUBLIC.TEST_A_B_IDX: A IS NULL */ /* WHERE T2.A IS NULL */ INNER JOIN "PUBLIC"."TEST" "T1" /* PUBLIC.TEST_A_B_IDX: A = T2.A */ ON 1=1 WHERE ("T2"."A" IS NULL) AND ("T1"."A" = "T2"."A") + +SELECT * FROM TEST T1 LEFT JOIN TEST T2 ON T1.A = T2.A WHERE T2.A IS NULL; +> A B A B +> ---- ---- ---- ---- +> null 2 null null +> null null null null +> rows: 2 + +EXPLAIN SELECT * FROM TEST T1 LEFT JOIN TEST T2 ON T1.A = T2.A WHERE T2.A IS NULL; +>> SELECT "T1"."A", "T1"."B", "T2"."A", "T2"."B" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST_A_B_IDX */ LEFT OUTER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.TEST_A_B_IDX: A = T1.A */ ON "T1"."A" = "T2"."A" WHERE "T2"."A" IS NULL + +SELECT * FROM TEST T1 JOIN TEST T2 ON T1.A = T2.A WHERE T2.A IS NOT NULL; +> A B A B +> - ---- - ---- +> 1 2 1 2 +> 1 2 1 null +> 1 null 1 2 +> 1 null 1 null +> rows: 4 + +EXPLAIN SELECT * FROM TEST T1 JOIN TEST T2 ON T1.A = T2.A WHERE T2.A IS NOT NULL; +>> SELECT "T1"."A", "T1"."B", "T2"."A", "T2"."B" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST_A_B_IDX */ INNER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.TEST_A_B_IDX: A = T1.A */ ON 1=1 WHERE ("T2"."A" IS NOT NULL) AND ("T1"."A" = "T2"."A") + +SELECT * FROM TEST T1 LEFT JOIN TEST T2 ON T1.A = T2.A WHERE T2.A IS NOT NULL; +> A B A B +> - ---- - ---- +> 1 2 1 2 +> 1 2 1 null +> 1 null 1 2 +> 1 null 1 null +> rows: 4 + +EXPLAIN SELECT * FROM TEST T1 LEFT JOIN TEST T2 ON T1.A = T2.A WHERE T2.A IS NOT NULL; +>> SELECT "T1"."A", "T1"."B", "T2"."A", "T2"."B" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST_A_B_IDX */ LEFT OUTER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.TEST_A_B_IDX: A = T1.A */ ON "T1"."A" = "T2"."A" WHERE "T2"."A" IS NOT NULL + +SELECT * FROM TEST T1 JOIN TEST T2 ON (T1.A, T1.B) = (T2.A, T2.B) WHERE (T2.A, T2.B) IS NULL; +> A B A B +> - - - - +> rows: 0 + +EXPLAIN SELECT * FROM TEST T1 JOIN TEST T2 ON (T1.A, T1.B) = (T2.A, T2.B) WHERE (T2.A, T2.B) IS NULL; +>> SELECT "T1"."A", "T1"."B", "T2"."A", "T2"."B" FROM "PUBLIC"."TEST" "T2" /* PUBLIC.TEST_A_B_IDX: A IS NULL AND B IS NULL */ /* WHERE ROW (T2.A, T2.B) IS NULL */ INNER JOIN "PUBLIC"."TEST" "T1" /* PUBLIC.TEST_A_B_IDX */ ON 1=1 WHERE (ROW ("T2"."A", "T2"."B") IS NULL) AND (ROW ("T1"."A", "T1"."B") = ROW ("T2"."A", "T2"."B")) + +SELECT * FROM TEST T1 LEFT JOIN TEST T2 ON (T1.A, T1.B) = (T2.A, T2.B) WHERE (T2.A, T2.B) IS NULL; +> A B A B +> ---- ---- ---- ---- +> 1 null null null +> null 2 null null +> null null null null +> rows: 3 + +EXPLAIN SELECT * FROM TEST T1 LEFT JOIN TEST T2 ON (T1.A, T1.B) = (T2.A, T2.B) WHERE (T2.A, T2.B) IS NULL; +>> SELECT "T1"."A", "T1"."B", "T2"."A", "T2"."B" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST_A_B_IDX */ LEFT OUTER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.TEST_A_B_IDX */ ON ROW ("T1"."A", "T1"."B") = ROW ("T2"."A", "T2"."B") WHERE ROW ("T2"."A", "T2"."B") IS NULL + +SELECT * FROM TEST T1 JOIN TEST T2 ON (T1.A, T1.B) = (T2.A, T2.B) WHERE (T2.A, T2.B) IS NOT NULL; +> A B A B +> - - - - +> 1 2 1 2 +> rows: 1 + +EXPLAIN SELECT * FROM TEST T1 JOIN TEST T2 ON (T1.A, T1.B) = (T2.A, T2.B) WHERE (T2.A, T2.B) IS NOT NULL; +>> SELECT "T1"."A", "T1"."B", "T2"."A", "T2"."B" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST_A_B_IDX */ INNER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.TEST_A_B_IDX */ ON 1=1 WHERE (ROW ("T2"."A", "T2"."B") IS NOT NULL) AND (ROW ("T1"."A", "T1"."B") = ROW ("T2"."A", "T2"."B")) + +SELECT * FROM TEST T1 LEFT JOIN TEST T2 ON (T1.A, T1.B) = (T2.A, T2.B) WHERE (T2.A, T2.B) IS NOT NULL; +> A B A B +> - - - - +> 1 2 1 2 +> rows: 1 + +EXPLAIN SELECT * FROM TEST T1 LEFT JOIN TEST T2 ON (T1.A, T1.B) = (T2.A, T2.B) WHERE (T2.A, T2.B) IS NOT NULL; +>> SELECT "T1"."A", "T1"."B", "T2"."A", "T2"."B" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST_A_B_IDX */ LEFT OUTER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.TEST_A_B_IDX */ ON ROW ("T1"."A", "T1"."B") = ROW ("T2"."A", "T2"."B") WHERE ROW ("T2"."A", "T2"."B") IS NOT NULL + +EXPLAIN SELECT A, B FROM TEST WHERE (A, NULL) IS NULL; +>> SELECT "A", "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_A_B_IDX: A IS NULL */ WHERE "A" IS NULL + +EXPLAIN SELECT A, B FROM TEST WHERE (A, NULL) IS NOT NULL; +>> SELECT "A", "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan: FALSE */ WHERE FALSE + +EXPLAIN SELECT A, B FROM TEST WHERE NOT (A, NULL) IS NULL; +>> SELECT "A", "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_A_B_IDX */ WHERE "A" IS NOT NULL + +EXPLAIN SELECT A, B FROM TEST WHERE NOT (A, NULL) IS NOT NULL; +>> SELECT "A", "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_A_B_IDX */ + +EXPLAIN SELECT A, B FROM TEST WHERE (A, NULL, B) IS NULL; +>> SELECT "A", "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_A_B_IDX: A IS NULL AND B IS NULL */ WHERE ROW ("A", "B") IS NULL + +EXPLAIN SELECT A, B FROM TEST WHERE (A, NULL, B, NULL) IS NULL; +>> SELECT "A", "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_A_B_IDX: A IS NULL AND B IS NULL */ WHERE ROW ("A", "B") IS NULL + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(I INTEGER) AS VALUES 1; +> ok + + +SELECT I FROM TEST WHERE _ROWID_ IS NULL; +> I +> - +> rows: 0 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/predicates/type.sql b/h2/src/test/org/h2/test/scripts/predicates/type.sql new file mode 100644 index 0000000000..d555c803f1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/predicates/type.sql @@ -0,0 +1,49 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT 1 IS OF (INT); +>> TRUE + +SELECT 1 IS NOT OF (INT); +>> FALSE + +SELECT NULL IS OF (INT); +>> null + +SELECT NULL IS NOT OF (INT); +>> null + +SELECT 1 IS OF (INT, BIGINT); +>> TRUE + +SELECT 1 IS NOT OF (INT, BIGINT); +>> FALSE + +SELECT TRUE IS OF (VARCHAR, TIME); +>> FALSE + +SELECT TRUE IS NOT OF (VARCHAR, TIME); +>> TRUE + +CREATE TABLE TEST(A INT NOT NULL, B INT); +> ok + +EXPLAIN SELECT + 'Test' IS OF (VARCHAR), 'Test' IS NOT OF (VARCHAR), + 10 IS OF (VARCHAR), 10 IS NOT OF (VARCHAR), + NULL IS OF (VARCHAR), NULL IS NOT OF (VARCHAR); +>> SELECT TRUE, FALSE, FALSE, TRUE, UNKNOWN, UNKNOWN + +EXPLAIN SELECT A IS OF (INT), A IS OF (BIGINT), A IS NOT OF (INT), NOT A IS OF (BIGINT) FROM TEST; +>> SELECT "A" IS OF (INTEGER), "A" IS OF (BIGINT), "A" IS NOT OF (INTEGER), "A" IS NOT OF (BIGINT) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT B IS OF (INT), B IS OF (BIGINT), B IS NOT OF (INT), NOT B IS OF (BIGINT) FROM TEST; +>> SELECT "B" IS OF (INTEGER), "B" IS OF (BIGINT), "B" IS NOT OF (INTEGER), "B" IS NOT OF (BIGINT) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT A IS NOT OF(INT) OR B IS OF (INT) FROM TEST; +>> SELECT ("A" IS NOT OF (INTEGER)) OR ("B" IS OF (INTEGER)) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/predicates/unique.sql b/h2/src/test/org/h2/test/scripts/predicates/unique.sql new file mode 100644 index 0000000000..ffc26ea555 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/predicates/unique.sql @@ -0,0 +1,54 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(ID INT PRIMARY KEY, GR INT, A INT, B INT, C INT) AS VALUES + (1, 1, NULL, NULL, NULL), + (2, 1, NULL, NULL, NULL), + (3, 1, NULL, 1, 1), + (4, 1, NULL, 1, 1), + (5, 1, 1, 1, 1), + (6, 1, 1, 1, 2), + (7, 2, 1, 2, 1); +> ok + +SELECT UNIQUE(SELECT A, B FROM TEST); +>> FALSE + +SELECT UNIQUE(TABLE TEST); +>> TRUE + +SELECT UNIQUE(SELECT A, B, C FROM TEST); +>> TRUE + +EXPLAIN SELECT UNIQUE(SELECT A, B FROM TEST); +>> SELECT UNIQUE( SELECT "A", "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) + +SELECT UNIQUE(SELECT A, B FROM TEST); +>> FALSE + +EXPLAIN SELECT UNIQUE(SELECT DISTINCT A, B FROM TEST); +>> SELECT TRUE + +SELECT UNIQUE(SELECT DISTINCT A, B FROM TEST); +>> TRUE + +SELECT G, UNIQUE(SELECT A, B, C FROM TEST WHERE GR = G) FROM (VALUES 1, 2, 3) V(G); +> G UNIQUE( SELECT A, B, C FROM PUBLIC.TEST WHERE GR = G) +> - ----------------------------------------------------- +> 1 TRUE +> 2 TRUE +> 3 TRUE +> rows: 3 + +SELECT G, UNIQUE(SELECT A, B FROM TEST WHERE GR = G ORDER BY A + B) FROM (VALUES 1, 2, 3) V(G); +> G UNIQUE( SELECT A, B FROM PUBLIC.TEST WHERE GR = G ORDER BY A + B) +> - ----------------------------------------------------------------- +> 1 FALSE +> 2 TRUE +> 3 TRUE +> rows: 3 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/derived-column-names.sql b/h2/src/test/org/h2/test/scripts/queries/derived-column-names.sql similarity index 86% rename from h2/src/test/org/h2/test/scripts/derived-column-names.sql rename to h2/src/test/org/h2/test/scripts/queries/derived-column-names.sql index 9606239256..1b36b3f9bb 100644 --- a/h2/src/test/org/h2/test/scripts/derived-column-names.sql +++ b/h2/src/test/org/h2/test/scripts/queries/derived-column-names.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -80,3 +80,9 @@ SELECT * FROM TEST AS T(A, B) USE INDEX (TEST_I_IDX); DROP TABLE TEST; > ok + +SELECT * FROM (SELECT 1 A, 2 A) T(B, C); +> B C +> - - +> 1 2 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/distinct.sql b/h2/src/test/org/h2/test/scripts/queries/distinct.sql similarity index 95% rename from h2/src/test/org/h2/test/scripts/distinct.sql rename to h2/src/test/org/h2/test/scripts/queries/distinct.sql index 474b39c97a..7da7c9ad95 100644 --- a/h2/src/test/org/h2/test/scripts/distinct.sql +++ b/h2/src/test/org/h2/test/scripts/queries/distinct.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -181,7 +181,7 @@ SELECT T1.C1, T2.C5 FROM TEST T1 JOIN ( > rows (ordered): 3 EXPLAIN SELECT DISTINCT ON(C1) C2 FROM TEST ORDER BY C1; ->> SELECT DISTINCT ON("C1") "C2" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ ORDER BY ="C1" +>> SELECT DISTINCT ON("C1") "C2" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ ORDER BY "C1" SELECT DISTINCT ON(C1) C2 FROM TEST ORDER BY C3; > exception ORDER_BY_NOT_IN_RESULT diff --git a/h2/src/test/org/h2/test/scripts/joins.sql b/h2/src/test/org/h2/test/scripts/queries/joins.sql similarity index 70% rename from h2/src/test/org/h2/test/scripts/joins.sql rename to h2/src/test/org/h2/test/scripts/queries/joins.sql index a4cefb576b..57ccf2acd6 100644 --- a/h2/src/test/org/h2/test/scripts/joins.sql +++ b/h2/src/test/org/h2/test/scripts/queries/joins.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -13,10 +13,10 @@ create table c(c int) as select x from system_range(1, 2); > ok select * from a inner join b on a=b right outer join c on c=a; -> C A B -> - ---- ---- -> 1 1 1 -> 2 null null +> A B C +> ---- ---- - +> 1 1 1 +> null null 2 > rows: 2 select * from c left outer join (a inner join b on b=a) on c=a; @@ -204,8 +204,7 @@ is null or three.val>=DATE'2006-07-01'; explain select * from one natural join two left join two three on one.id=three.id left join one four on two.id=four.id where three.val is null or three.val>=DATE'2006-07-01'; -#+mvStore#>> SELECT "ONE"."ID", "TWO"."VAL", "THREE"."ID", "THREE"."VAL", "FOUR"."ID" FROM "PUBLIC"."ONE" /* PUBLIC.ONE.tableScan */ INNER JOIN "PUBLIC"."TWO" /* PUBLIC.PRIMARY_KEY_14: ID = PUBLIC.ONE.ID */ ON 1=1 /* WHERE PUBLIC.ONE.ID = PUBLIC.TWO.ID */ LEFT OUTER JOIN "PUBLIC"."TWO" "THREE" /* PUBLIC.PRIMARY_KEY_14: ID = ONE.ID */ ON "ONE"."ID" = "THREE"."ID" LEFT OUTER JOIN "PUBLIC"."ONE" "FOUR" /* PUBLIC.PRIMARY_KEY_1: ID = TWO.ID */ ON "TWO"."ID" = "FOUR"."ID" WHERE ("PUBLIC"."ONE"."ID" = "PUBLIC"."TWO"."ID") AND (("THREE"."VAL" IS NULL) OR ("THREE"."VAL" >= DATE '2006-07-01')) -#-mvStore#>> SELECT "ONE"."ID", "TWO"."VAL", "THREE"."ID", "THREE"."VAL", "FOUR"."ID" FROM "PUBLIC"."ONE" /* PUBLIC.PRIMARY_KEY_1 */ INNER JOIN "PUBLIC"."TWO" /* PUBLIC.PRIMARY_KEY_14: ID = PUBLIC.ONE.ID */ ON 1=1 /* WHERE PUBLIC.ONE.ID = PUBLIC.TWO.ID */ LEFT OUTER JOIN "PUBLIC"."TWO" "THREE" /* PUBLIC.PRIMARY_KEY_14: ID = ONE.ID */ ON "ONE"."ID" = "THREE"."ID" LEFT OUTER JOIN "PUBLIC"."ONE" "FOUR" /* PUBLIC.PRIMARY_KEY_1: ID = TWO.ID */ ON "TWO"."ID" = "FOUR"."ID" WHERE ("PUBLIC"."ONE"."ID" = "PUBLIC"."TWO"."ID") AND (("THREE"."VAL" IS NULL) OR ("THREE"."VAL" >= DATE '2006-07-01')) +>> SELECT "PUBLIC"."ONE"."ID", "PUBLIC"."TWO"."VAL", "THREE"."ID", "THREE"."VAL", "FOUR"."ID" FROM "PUBLIC"."ONE" /* PUBLIC.ONE.tableScan */ INNER JOIN "PUBLIC"."TWO" /* PUBLIC.PRIMARY_KEY_14: ID = PUBLIC.ONE.ID */ ON 1=1 /* WHERE PUBLIC.ONE.ID = PUBLIC.TWO.ID */ LEFT OUTER JOIN "PUBLIC"."TWO" "THREE" /* PUBLIC.PRIMARY_KEY_14: ID = ONE.ID */ ON "ONE"."ID" = "THREE"."ID" LEFT OUTER JOIN "PUBLIC"."ONE" "FOUR" /* PUBLIC.PRIMARY_KEY_1: ID = TWO.ID */ ON "TWO"."ID" = "FOUR"."ID" WHERE ("PUBLIC"."ONE"."ID" = "PUBLIC"."TWO"."ID") AND (("THREE"."VAL" IS NULL) OR ("THREE"."VAL" >= DATE '2006-07-01')) -- Query #4: same as #3, but the joins have been manually re-ordered -- Correct result set, same as expected for #3. @@ -254,8 +253,7 @@ explain select * from test1 inner join test2 on test1.id=test2.id left outer join test3 on test2.id=test3.id where test3.id is null; -#+mvStore#>> SELECT "TEST1"."ID", "TEST2"."ID", "TEST3"."ID" FROM "PUBLIC"."TEST1" /* PUBLIC.TEST1.tableScan */ INNER JOIN "PUBLIC"."TEST2" /* PUBLIC.PRIMARY_KEY_4C: ID = TEST1.ID */ ON 1=1 /* WHERE TEST1.ID = TEST2.ID */ LEFT OUTER JOIN "PUBLIC"."TEST3" /* PUBLIC.PRIMARY_KEY_4C0: ID = TEST2.ID */ ON "TEST2"."ID" = "TEST3"."ID" WHERE ("TEST3"."ID" IS NULL) AND ("TEST1"."ID" = "TEST2"."ID") -#-mvStore#>> SELECT "TEST1"."ID", "TEST2"."ID", "TEST3"."ID" FROM "PUBLIC"."TEST1" /* PUBLIC.PRIMARY_KEY_4 */ INNER JOIN "PUBLIC"."TEST2" /* PUBLIC.PRIMARY_KEY_4C: ID = TEST1.ID */ ON 1=1 /* WHERE TEST1.ID = TEST2.ID */ LEFT OUTER JOIN "PUBLIC"."TEST3" /* PUBLIC.PRIMARY_KEY_4C0: ID = TEST2.ID */ ON "TEST2"."ID" = "TEST3"."ID" WHERE ("TEST3"."ID" IS NULL) AND ("TEST1"."ID" = "TEST2"."ID") +>> SELECT "PUBLIC"."TEST1"."ID", "PUBLIC"."TEST2"."ID", "PUBLIC"."TEST3"."ID" FROM "PUBLIC"."TEST1" /* PUBLIC.TEST1.tableScan */ INNER JOIN "PUBLIC"."TEST2" /* PUBLIC.PRIMARY_KEY_4C: ID = TEST1.ID */ ON 1=1 /* WHERE TEST1.ID = TEST2.ID */ LEFT OUTER JOIN "PUBLIC"."TEST3" /* PUBLIC.PRIMARY_KEY_4C0: ID = TEST2.ID */ ON "TEST2"."ID" = "TEST3"."ID" WHERE ("TEST3"."ID" IS NULL) AND ("TEST1"."ID" = "TEST2"."ID") insert into test1 select x from system_range(2, 1000); > update count: 999 @@ -272,8 +270,7 @@ explain select * from test1 inner join test2 on test1.id=test2.id left outer join test3 on test2.id=test3.id where test3.id is null; -#+mvStore#>> SELECT "TEST1"."ID", "TEST2"."ID", "TEST3"."ID" FROM "PUBLIC"."TEST2" /* PUBLIC.TEST2.tableScan */ LEFT OUTER JOIN "PUBLIC"."TEST3" /* PUBLIC.PRIMARY_KEY_4C0: ID = TEST2.ID */ ON "TEST2"."ID" = "TEST3"."ID" INNER JOIN "PUBLIC"."TEST1" /* PUBLIC.PRIMARY_KEY_4: ID = TEST2.ID */ ON 1=1 WHERE ("TEST3"."ID" IS NULL) AND ("TEST1"."ID" = "TEST2"."ID") -#-mvStore#>> SELECT "TEST1"."ID", "TEST2"."ID", "TEST3"."ID" FROM "PUBLIC"."TEST2" /* PUBLIC.PRIMARY_KEY_4C */ LEFT OUTER JOIN "PUBLIC"."TEST3" /* PUBLIC.PRIMARY_KEY_4C0: ID = TEST2.ID */ ON "TEST2"."ID" = "TEST3"."ID" INNER JOIN "PUBLIC"."TEST1" /* PUBLIC.PRIMARY_KEY_4: ID = TEST2.ID */ ON 1=1 WHERE ("TEST3"."ID" IS NULL) AND ("TEST1"."ID" = "TEST2"."ID") +>> SELECT "PUBLIC"."TEST1"."ID", "PUBLIC"."TEST2"."ID", "PUBLIC"."TEST3"."ID" FROM "PUBLIC"."TEST2" /* PUBLIC.TEST2.tableScan */ LEFT OUTER JOIN "PUBLIC"."TEST3" /* PUBLIC.PRIMARY_KEY_4C0: ID = TEST2.ID */ ON "TEST2"."ID" = "TEST3"."ID" INNER JOIN "PUBLIC"."TEST1" /* PUBLIC.PRIMARY_KEY_4: ID = TEST2.ID */ ON 1=1 WHERE ("TEST3"."ID" IS NULL) AND ("TEST1"."ID" = "TEST2"."ID") SELECT TEST1.ID, TEST2.ID, TEST3.ID FROM TEST2 @@ -549,10 +546,10 @@ select * from t1 left join t2 on t1.id=t2.id; > rows: 2 select * from t1 right join t2 on t1.id=t2.id; -> ID NAME ID NAME -> -- ----- ---- ---- -> 1 Hallo 1 hi -> 3 Welt null null +> ID NAME ID NAME +> ---- ---- -- ----- +> 1 hi 1 Hallo +> null null 3 Welt > rows: 2 select * from t1 cross join t2; @@ -570,7 +567,7 @@ select * from t1 natural join t2; > rows: 0 explain select * from t1 natural join t2; ->> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."T1" /* PUBLIC.T1.tableScan */ INNER JOIN "PUBLIC"."T2" /* PUBLIC.T2.tableScan */ ON 1=1 WHERE ("PUBLIC"."T1"."ID" = "PUBLIC"."T2"."ID") AND ("PUBLIC"."T1"."NAME" = "PUBLIC"."T2"."NAME") +>> SELECT "PUBLIC"."T1"."ID", "PUBLIC"."T1"."NAME" FROM "PUBLIC"."T1" /* PUBLIC.T1.tableScan */ INNER JOIN "PUBLIC"."T2" /* PUBLIC.T2.tableScan */ ON 1=1 WHERE ("PUBLIC"."T1"."ID" = "PUBLIC"."T2"."ID") AND ("PUBLIC"."T1"."NAME" = "PUBLIC"."T2"."NAME") drop table t1; > ok @@ -596,15 +593,25 @@ create table INVOICE_LINE(line_id int, invoiceid int, customerid int, line_text insert into INVOICE_LINE values(10, 1, 0, 'Super Soap'), (20, 1, 0, 'Regular Soap'); > update count: 2 -select c.*, i.*, l.* from customer c natural join invoice i natural join INVOICE_LINE l; +select * from customer c natural join invoice i natural join INVOICE_LINE l; > CUSTOMERID CUSTOMER_NAME INVOICEID INVOICE_TEXT LINE_ID LINE_TEXT > ---------- ------------- --------- ------------ ------- ------------ > 0 Acme 1 Soap 10 Super Soap > 0 Acme 1 Soap 20 Regular Soap > rows: 2 +explain select * from customer c natural join invoice i natural join INVOICE_LINE l; +>> SELECT "C"."CUSTOMERID", "C"."CUSTOMER_NAME", "I"."INVOICEID", "I"."INVOICE_TEXT", "L"."LINE_ID", "L"."LINE_TEXT" FROM "PUBLIC"."INVOICE" "I" /* PUBLIC.INVOICE.tableScan */ INNER JOIN "PUBLIC"."INVOICE_LINE" "L" /* PUBLIC.INVOICE_LINE.tableScan */ ON 1=1 /* WHERE (I.CUSTOMERID = L.CUSTOMERID) AND (I.INVOICEID = L.INVOICEID) */ INNER JOIN "PUBLIC"."CUSTOMER" "C" /* PUBLIC.CUSTOMER.tableScan */ ON 1=1 WHERE ("C"."CUSTOMERID" = "I"."CUSTOMERID") AND ("I"."CUSTOMERID" = "L"."CUSTOMERID") AND ("I"."INVOICEID" = "L"."INVOICEID") + +select c.*, i.*, l.* from customer c natural join invoice i natural join INVOICE_LINE l; +> CUSTOMERID CUSTOMER_NAME CUSTOMERID INVOICEID INVOICE_TEXT LINE_ID INVOICEID CUSTOMERID LINE_TEXT +> ---------- ------------- ---------- --------- ------------ ------- --------- ---------- ------------ +> 0 Acme 0 1 Soap 10 1 0 Super Soap +> 0 Acme 0 1 Soap 20 1 0 Regular Soap +> rows: 2 + explain select c.*, i.*, l.* from customer c natural join invoice i natural join INVOICE_LINE l; ->> SELECT "C"."CUSTOMERID", "C"."CUSTOMER_NAME", "I"."INVOICEID", "I"."INVOICE_TEXT", "L"."LINE_ID", "L"."LINE_TEXT" FROM "PUBLIC"."INVOICE" "I" /* PUBLIC.INVOICE.tableScan */ INNER JOIN "PUBLIC"."INVOICE_LINE" "L" /* PUBLIC.INVOICE_LINE.tableScan */ ON 1=1 /* WHERE (PUBLIC.I.CUSTOMERID = PUBLIC.L.CUSTOMERID) AND (PUBLIC.I.INVOICEID = PUBLIC.L.INVOICEID) */ INNER JOIN "PUBLIC"."CUSTOMER" "C" /* PUBLIC.CUSTOMER.tableScan */ ON 1=1 WHERE ("PUBLIC"."C"."CUSTOMERID" = "PUBLIC"."I"."CUSTOMERID") AND (("PUBLIC"."I"."CUSTOMERID" = "PUBLIC"."L"."CUSTOMERID") AND ("PUBLIC"."I"."INVOICEID" = "PUBLIC"."L"."INVOICEID")) +>> SELECT "C"."CUSTOMERID", "C"."CUSTOMER_NAME", "I"."CUSTOMERID", "I"."INVOICEID", "I"."INVOICE_TEXT", "L"."LINE_ID", "L"."INVOICEID", "L"."CUSTOMERID", "L"."LINE_TEXT" FROM "PUBLIC"."INVOICE" "I" /* PUBLIC.INVOICE.tableScan */ INNER JOIN "PUBLIC"."INVOICE_LINE" "L" /* PUBLIC.INVOICE_LINE.tableScan */ ON 1=1 /* WHERE (I.CUSTOMERID = L.CUSTOMERID) AND (I.INVOICEID = L.INVOICEID) */ INNER JOIN "PUBLIC"."CUSTOMER" "C" /* PUBLIC.CUSTOMER.tableScan */ ON 1=1 WHERE ("C"."CUSTOMERID" = "I"."CUSTOMERID") AND ("I"."CUSTOMERID" = "L"."CUSTOMERID") AND ("I"."INVOICEID" = "L"."INVOICEID") drop table customer; > ok @@ -650,11 +657,11 @@ SELECT * FROM PARENT P LEFT OUTER JOIN CHILD C ON P.ID = C.PARENTID; > rows: 3 SELECT * FROM CHILD C RIGHT OUTER JOIN PARENT P ON P.ID = C.PARENTID; -> ID NAME ID PARENTID NAME -> -- ---- ---- -------- ------ -> 1 Sue 100 1 Simon -> 1 Sue 101 1 Sabine -> 2 Joe null null null +> ID PARENTID NAME ID NAME +> ---- -------- ------ -- ---- +> 100 1 Simon 1 Sue +> 101 1 Sabine 1 Sue +> null null null 2 Joe > rows: 3 DROP TABLE PARENT; @@ -799,6 +806,9 @@ SELECT T1.X1, T2.X2, T3.X3, T4.X4, T5.X5 FROM ( > 1 1 1 1 1 > rows: 1 +DROP TABLE T1, T2, T3, T4, T5; +> ok + CREATE TABLE A(X INT); > ok @@ -826,3 +836,211 @@ SELECT * FROM TEST X LEFT OUTER JOIN TEST Y ON Y.A = X.A || '1'; DROP TABLE TEST; > ok + +CREATE TABLE T1(A INT, B INT) AS VALUES (1, 10), (2, 20), (4, 40), (6, 6), (7, 7); +> ok + +CREATE TABLE T2(A INT, B INT) AS VALUES (1, 100), (2, 200), (5, 500), (6, 6), (8, 7); +> ok + +SELECT T1.B, T2.B FROM T1 INNER JOIN T2 USING (A); +> B B +> -- --- +> 10 100 +> 20 200 +> 6 6 +> rows: 3 + +SELECT * FROM T1 INNER JOIN T2 USING (A); +> A B B +> - -- --- +> 1 10 100 +> 2 20 200 +> 6 6 6 +> rows: 3 + +SELECT * FROM T1 INNER JOIN T2 USING (B); +> B A A +> - - - +> 6 6 6 +> 7 7 8 +> rows: 2 + +SELECT T1.B, T2.B FROM T1 INNER JOIN T2 USING (A, B); +> B B +> - - +> 6 6 +> rows: 1 + +SELECT * FROM T1 INNER JOIN T2 USING (B, A); +> B A +> - - +> 6 6 +> rows: 1 + +DROP TABLE T1, T2; +> ok + +SELECT * + FROM (VALUES(1, 'A'), (2, 'B')) T1(A, B) + JOIN (VALUES(2, 'C'), (3, 'D')) T2(A, C) USING (A); +> A B C +> - - - +> 2 B C +> rows: 1 + +SELECT * + FROM (VALUES(1, 'A'), (2, 'B')) T1(A, B) + LEFT JOIN (VALUES(2, 'C'), (3, 'D')) T2(A, C) USING (A); +> A B C +> - - ---- +> 1 A null +> 2 B C +> rows: 2 + +SELECT * + FROM (VALUES(1, 'A'), (2, 'B')) T1(A, B) + RIGHT JOIN (VALUES(2, 'C'), (3, 'D')) T2(A, C) USING (A); +> A B C +> - ---- - +> 2 B C +> 3 null D +> rows: 2 + +SELECT T1.*, T2.* + FROM (VALUES(1, 'A'), (2, 'B')) T1(A, B) + RIGHT JOIN (VALUES(2, 'C'), (3, 'D')) T2(A, C) USING (A); +> A B A C +> ---- ---- - - +> 2 B 2 C +> null null 3 D +> rows: 2 + +SELECT * + FROM (VALUES(1, 'A'), (2, 'B')) T1(A, B) + NATURAL JOIN (VALUES(2, 'C'), (3, 'D')) T2(A, C); +> A B C +> - - - +> 2 B C +> rows: 1 + +CREATE TABLE T1(A VARCHAR_IGNORECASE PRIMARY KEY, B VARCHAR) AS (VALUES ('a', 'A'), ('b', 'B')); +> ok + +CREATE TABLE T2(A VARCHAR_IGNORECASE PRIMARY KEY, C VARCHAR) AS (VALUES ('B', 'C'), ('C', 'D')); +> ok + +SELECT * FROM T1 RIGHT JOIN T2 USING (A); +> A B C +> - ---- - +> C null D +> b B C +> rows: 2 + +EXPLAIN SELECT * FROM T1 RIGHT JOIN T2 USING (A); +>> SELECT COALESCE("PUBLIC"."T1"."A", "PUBLIC"."T2"."A") AS "A", "PUBLIC"."T1"."B", "PUBLIC"."T2"."C" FROM "PUBLIC"."T2" /* PUBLIC.T2.tableScan */ LEFT OUTER JOIN "PUBLIC"."T1" /* PUBLIC.PRIMARY_KEY_A: A = PUBLIC.T2.A */ ON "PUBLIC"."T1"."A" = "PUBLIC"."T2"."A" + +DROP TABLE T1, T2; +> ok + +CREATE TABLE T1(A INT PRIMARY KEY, B VARCHAR) AS (VALUES (1, 'A'), (2, 'B')); +> ok + +CREATE TABLE T2(A INT PRIMARY KEY, C VARCHAR) AS (VALUES (2, 'C'), (3, 'D')); +> ok + +SELECT * FROM T1 RIGHT JOIN T2 USING (A); +> A B C +> - ---- - +> 2 B C +> 3 null D +> rows: 2 + +EXPLAIN SELECT * FROM T1 RIGHT JOIN T2 USING (A); +>> SELECT "PUBLIC"."T2"."A", "PUBLIC"."T1"."B", "PUBLIC"."T2"."C" FROM "PUBLIC"."T2" /* PUBLIC.T2.tableScan */ LEFT OUTER JOIN "PUBLIC"."T1" /* PUBLIC.PRIMARY_KEY_A: A = PUBLIC.T2.A */ ON "PUBLIC"."T1"."A" = "PUBLIC"."T2"."A" + +SELECT * EXCEPT (T1.A) FROM T1 RIGHT JOIN T2 USING (A); +> B C +> ---- - +> B C +> null D +> rows: 2 + +SELECT * EXCEPT (T2.A) FROM T1 RIGHT JOIN T2 USING (A); +> B C +> ---- - +> B C +> null D +> rows: 2 + +DROP TABLE T1, T2; +> ok + +CREATE SCHEMA S1; +> ok + +CREATE SCHEMA S2; +> ok + +CREATE TABLE S1.T(A VARCHAR_IGNORECASE, B INT) AS (VALUES ('a', 2)); +> ok + +CREATE TABLE S2.T(A VARCHAR_IGNORECASE, B INT) AS (VALUES ('A', 3)); +> ok + +SELECT * FROM S1.T RIGHT JOIN S2.T USING(A); +> A B B +> - - - +> a 2 3 +> rows: 1 + +EXPLAIN SELECT * FROM S1.T RIGHT JOIN S2.T USING(A); +>> SELECT COALESCE("S1"."T"."A", "S2"."T"."A") AS "A", "S1"."T"."B", "S2"."T"."B" FROM "S2"."T" /* S2.T.tableScan */ LEFT OUTER JOIN "S1"."T" /* S1.T.tableScan */ ON "S1"."T"."A" = "S2"."T"."A" + +DROP SCHEMA S1 CASCADE; +> ok + +DROP SCHEMA S2 CASCADE; +> ok + +CREATE TABLE T1(C1 INTEGER) AS VALUES 1, 2, 4; +> ok + +CREATE TABLE T2(C2 INTEGER) AS VALUES 1, 3, 4; +> ok + +CREATE TABLE T3(C3 INTEGER) AS VALUES 2, 3, 4; +> ok + +SELECT * FROM T1 JOIN T2 LEFT JOIN T3 ON T2.C2 = T3.C3 ON T1.C1 = T2.C2; +> C1 C2 C3 +> -- -- ---- +> 1 1 null +> 4 4 4 +> rows: 2 + +EXPLAIN SELECT * FROM T1 JOIN T2 LEFT JOIN T3 ON T2.C2 = T3.C3 ON T1.C1 = T2.C2; +>> SELECT "PUBLIC"."T1"."C1", "PUBLIC"."T2"."C2", "PUBLIC"."T3"."C3" FROM ( "PUBLIC"."T2" /* PUBLIC.T2.tableScan */ LEFT OUTER JOIN "PUBLIC"."T3" /* PUBLIC.T3.tableScan */ ON "T2"."C2" = "T3"."C3" ) INNER JOIN "PUBLIC"."T1" /* PUBLIC.T1.tableScan */ ON 1=1 WHERE "T1"."C1" = "T2"."C2" + +SELECT * FROM T1 RIGHT JOIN T2 LEFT JOIN T3 ON T2.C2 = T3.C3 ON T1.C1 = T2.C2; +> C1 C2 C3 +> ---- -- ---- +> 1 1 null +> 4 4 4 +> null 3 3 +> rows: 3 + +EXPLAIN SELECT * FROM T1 RIGHT JOIN T2 LEFT JOIN T3 ON T2.C2 = T3.C3 ON T1.C1 = T2.C2; +>> SELECT "PUBLIC"."T1"."C1", "PUBLIC"."T2"."C2", "PUBLIC"."T3"."C3" FROM "PUBLIC"."T2" /* PUBLIC.T2.tableScan */ LEFT OUTER JOIN "PUBLIC"."T3" /* PUBLIC.T3.tableScan */ ON "T2"."C2" = "T3"."C3" LEFT OUTER JOIN "PUBLIC"."T1" /* PUBLIC.T1.tableScan */ ON "T1"."C1" = "T2"."C2" + +DROP TABLE T1, T2, T3; +> ok + +SELECT X.A, Y.B, Z.C +FROM (SELECT 1 A) X JOIN ( + (SELECT 1 B) Y JOIN (SELECT 1 C) Z ON Z.C = Y.B +) ON Y.B = X.A; +> A B C +> - - - +> 1 1 1 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/queries/query-optimisations.sql b/h2/src/test/org/h2/test/scripts/queries/query-optimisations.sql new file mode 100644 index 0000000000..16f09f0479 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/queries/query-optimisations.sql @@ -0,0 +1,210 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +create table person(firstname varchar, lastname varchar); +> ok + +create index person_1 on person(firstname, lastname); +> ok + +insert into person select convert(x,varchar) as firstname, (convert(x,varchar) || ' last') as lastname from system_range(1,100); +> update count: 100 + +-- Issue #643: verify that when using an index, we use the IN part of the query, if that part of the query +-- can directly use the index. +-- +explain analyze SELECT * FROM person WHERE firstname IN ('FirstName1', 'FirstName2') AND lastname='LastName1'; +>> SELECT "PUBLIC"."PERSON"."FIRSTNAME", "PUBLIC"."PERSON"."LASTNAME" FROM "PUBLIC"."PERSON" /* PUBLIC.PERSON_1: FIRSTNAME IN('FirstName1', 'FirstName2') AND LASTNAME = 'LastName1' */ /* scanCount: 1 */ WHERE ("FIRSTNAME" IN('FirstName1', 'FirstName2')) AND ("LASTNAME" = 'LastName1') + +CREATE TABLE TEST(A SMALLINT PRIMARY KEY, B SMALLINT); +> ok + +CREATE INDEX TEST_IDX_1 ON TEST(B); +> ok + +CREATE INDEX TEST_IDX_2 ON TEST(B, A); +> ok + +INSERT INTO TEST VALUES (1, 2), (3, 4); +> update count: 2 + +EXPLAIN SELECT _ROWID_ FROM TEST WHERE B = 4; +>> SELECT _ROWID_ FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX_1: B = 4 */ WHERE "B" = 4 + +EXPLAIN SELECT _ROWID_, A FROM TEST WHERE B = 4; +>> SELECT _ROWID_, "A" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX_1: B = 4 */ WHERE "B" = 4 + +EXPLAIN SELECT A FROM TEST WHERE B = 4; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX_1: B = 4 */ WHERE "B" = 4 + +SELECT _ROWID_, A FROM TEST WHERE B = 4; +> _ROWID_ A +> ------- - +> 3 3 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A TINYINT PRIMARY KEY, B TINYINT); +> ok + +CREATE INDEX TEST_IDX_1 ON TEST(B); +> ok + +CREATE INDEX TEST_IDX_2 ON TEST(B, A); +> ok + +INSERT INTO TEST VALUES (1, 2), (3, 4); +> update count: 2 + +EXPLAIN SELECT _ROWID_ FROM TEST WHERE B = 4; +>> SELECT _ROWID_ FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX_1: B = 4 */ WHERE "B" = 4 + +EXPLAIN SELECT _ROWID_, A FROM TEST WHERE B = 4; +>> SELECT _ROWID_, "A" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX_1: B = 4 */ WHERE "B" = 4 + +EXPLAIN SELECT A FROM TEST WHERE B = 4; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX_1: B = 4 */ WHERE "B" = 4 + +SELECT _ROWID_, A FROM TEST WHERE B = 4; +> _ROWID_ A +> ------- - +> 3 3 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(V VARCHAR(2)) AS VALUES -1, -2; +> ok + +CREATE INDEX TEST_INDEX ON TEST(V); +> ok + +SELECT * FROM TEST WHERE V >= -1; +>> -1 + +-- H2 may use the index for a table scan, but may not create index conditions due to incompatible type +EXPLAIN SELECT * FROM TEST WHERE V >= -1; +>> SELECT "PUBLIC"."TEST"."V" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_INDEX */ WHERE "V" >= -1 + +EXPLAIN SELECT * FROM TEST WHERE V IN (-1, -3); +>> SELECT "PUBLIC"."TEST"."V" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_INDEX */ WHERE "V" IN(-1, -3) + +SELECT * FROM TEST WHERE V < -1; +>> -2 + +DROP TABLE TEST; +> ok + +CREATE TABLE T(ID INT, V INT) AS VALUES (1, 1), (1, 2), (2, 1), (2, 2); +> ok + +SELECT T1.ID, T2.V AS LV FROM (SELECT ID, MAX(V) AS LV FROM T GROUP BY ID) AS T1 + INNER JOIN T AS T2 ON T2.ID = T1.ID AND T2.V = T1.LV + WHERE T1.ID IN (1, 2) ORDER BY ID; +> ID LV +> -- -- +> 1 2 +> 2 2 +> rows (ordered): 2 + +EXPLAIN SELECT T1.ID, T2.V AS LV FROM (SELECT ID, MAX(V) AS LV FROM T GROUP BY ID) AS T1 + INNER JOIN T AS T2 ON T2.ID = T1.ID AND T2.V = T1.LV + WHERE T1.ID IN (1, 2) ORDER BY ID; +>> SELECT "T1"."ID", "T2"."V" AS "LV" FROM "PUBLIC"."T" "T2" /* PUBLIC.T.tableScan */ INNER JOIN ( SELECT "ID", MAX("V") AS "LV" FROM "PUBLIC"."T" GROUP BY "ID" ) "T1" /* SELECT ID, MAX(V) AS LV FROM PUBLIC.T /* PUBLIC.T.tableScan */ WHERE ID IS NOT DISTINCT FROM ?1 GROUP BY ID HAVING MAX(V) IS NOT DISTINCT FROM ?2: ID = T2.ID AND LV = T2.V */ ON 1=1 WHERE ("T1"."ID" IN(1, 2)) AND ("T2"."ID" = "T1"."ID") AND ("T2"."V" = "T1"."LV") ORDER BY 1 + +DROP TABLE T; +> ok + +SELECT (SELECT ROWNUM) R FROM VALUES 1, 2, 3; +> R +> - +> 1 +> 1 +> 1 +> rows: 3 + +CREATE TABLE TEST(A INT, B INT, C INT) AS VALUES (1, 1, 1); +> ok + +SELECT T1.A FROM TEST T1 LEFT OUTER JOIN TEST T2 ON T1.B = T2.A WHERE (SELECT T2.C) IS NOT NULL ORDER BY T1.A; +>> 1 + +EXPLAIN SELECT T1.A FROM TEST T1 LEFT OUTER JOIN TEST T2 ON T1.B = T2.A WHERE (SELECT T2.C) IS NOT NULL ORDER BY T1.A; +>> SELECT "T1"."A" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ LEFT OUTER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.TEST.tableScan */ ON "T1"."B" = "T2"."A" WHERE "T2"."C" IS NOT NULL ORDER BY 1 + +SELECT X, (SELECT X IN (SELECT B FROM TEST)) FROM SYSTEM_RANGE(1, 2); +> X X IN( SELECT DISTINCT B FROM PUBLIC.TEST) +> - ----------------------------------------- +> 1 TRUE +> 2 FALSE +> rows: 2 + +SELECT T1.A FROM TEST T1 LEFT OUTER JOIN TEST T2 ON T1.B = T2.A WHERE (SELECT T2.C + ROWNUM) IS NOT NULL ORDER BY T1.A; +>> 1 + +EXPLAIN SELECT T1.A FROM TEST T1 LEFT OUTER JOIN TEST T2 ON T1.B = T2.A WHERE (SELECT T2.C + ROWNUM) IS NOT NULL ORDER BY T1.A; +>> SELECT "T1"."A" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ LEFT OUTER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.TEST.tableScan */ ON "T1"."B" = "T2"."A" WHERE ("T2"."C" + CAST(1 AS BIGINT)) IS NOT NULL ORDER BY 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE A(T TIMESTAMP WITH TIME ZONE UNIQUE) AS VALUES + TIMESTAMP WITH TIME ZONE '2020-01-01 00:01:02+02', + TIMESTAMP WITH TIME ZONE '2020-01-01 00:01:02+01'; +> ok + +CREATE TABLE B(D DATE) AS VALUES DATE '2020-01-01'; +> ok + +SET TIME ZONE '01:00'; +> ok + +SELECT T FROM A JOIN B ON T >= D; +>> 2020-01-01 00:01:02+01 + +EXPLAIN SELECT T FROM A JOIN B ON T >= D; +>> SELECT "T" FROM "PUBLIC"."B" /* PUBLIC.B.tableScan */ INNER JOIN "PUBLIC"."A" /* PUBLIC.CONSTRAINT_INDEX_4: T >= D */ ON 1=1 WHERE "T" >= "D" + +SET TIME ZONE LOCAL; +> ok + +DROP TABLE A, B; +> ok + +CREATE TABLE TEST(T TIMESTAMP WITH TIME ZONE) AS VALUES + NULL, + TIMESTAMP WITH TIME ZONE '2020-01-01 00:00:00+00', + TIMESTAMP WITH TIME ZONE '2020-01-01 01:00:00+01', + TIMESTAMP WITH TIME ZONE '2020-01-01 02:00:00+01', + NULL; +> ok + +SELECT T AT TIME ZONE 'UTC' FROM TEST GROUP BY T; +> T AT TIME ZONE 'UTC' +> ---------------------- +> 2020-01-01 00:00:00+00 +> 2020-01-01 01:00:00+00 +> null +> rows: 3 + +CREATE INDEX TEST_T_IDX ON TEST(T); +> ok + +SELECT T AT TIME ZONE 'UTC' FROM TEST GROUP BY T; +> T AT TIME ZONE 'UTC' +> ---------------------- +> 2020-01-01 00:00:00+00 +> 2020-01-01 01:00:00+00 +> null +> rows: 3 + +EXPLAIN SELECT T AT TIME ZONE 'UTC' FROM TEST GROUP BY T; +>> SELECT "T" AT TIME ZONE 'UTC' FROM "PUBLIC"."TEST" /* PUBLIC.TEST_T_IDX */ GROUP BY "T" /* group sorted */ + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/queries/select.sql b/h2/src/test/org/h2/test/scripts/queries/select.sql new file mode 100644 index 0000000000..02c4d8e352 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/queries/select.sql @@ -0,0 +1,1186 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(A INT, B INT, C INT); +> ok + +INSERT INTO TEST VALUES (1, 1, 1), (1, 1, 2), (1, 1, 3), (1, 2, 1), (1, 2, 2), (1, 2, 3), + (2, 1, 1), (2, 1, 2), (2, 1, 3), (2, 2, 1), (2, 2, 2), (2, 2, 3); +> update count: 12 + +SELECT * FROM TEST ORDER BY A, B; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> 1 2 2 +> 1 2 3 +> 2 1 1 +> 2 1 2 +> 2 1 3 +> 2 2 1 +> 2 2 2 +> 2 2 3 +> rows (partially ordered): 12 + +SELECT * FROM TEST ORDER BY A, B, C FETCH FIRST 4 ROWS ONLY; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> rows (ordered): 4 + +SELECT * FROM TEST ORDER BY A, B, C FETCH FIRST 4 ROWS WITH TIES; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> rows (ordered): 4 + +SELECT * FROM TEST ORDER BY A, B FETCH FIRST 4 ROWS WITH TIES; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> 1 2 2 +> 1 2 3 +> rows (partially ordered): 6 + +SELECT * FROM TEST ORDER BY A FETCH FIRST ROW WITH TIES; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> 1 2 2 +> 1 2 3 +> rows (partially ordered): 6 + +SELECT TOP (1) WITH TIES * FROM TEST ORDER BY A; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> 1 2 2 +> 1 2 3 +> rows (partially ordered): 6 + +SELECT TOP 1 PERCENT WITH TIES * FROM TEST ORDER BY A; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> 1 2 2 +> 1 2 3 +> rows (partially ordered): 6 + +SELECT TOP 51 PERCENT WITH TIES * FROM TEST ORDER BY A, B; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> 1 2 2 +> 1 2 3 +> 2 1 1 +> 2 1 2 +> 2 1 3 +> rows (partially ordered): 9 + +SELECT * FROM TEST ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 1 ROW WITH TIES; +> A B C +> - - - +> 1 2 1 +> 1 2 2 +> 1 2 3 +> rows (partially ordered): 3 + +SELECT * FROM TEST FETCH NEXT ROWS ONLY; +> A B C +> - - - +> 1 1 1 +> rows: 1 + +SELECT * FROM TEST FETCH FIRST 101 PERCENT ROWS ONLY; +> exception INVALID_VALUE_2 + +SELECT * FROM TEST FETCH FIRST -1 PERCENT ROWS ONLY; +> exception INVALID_VALUE_2 + +SELECT * FROM TEST FETCH FIRST 0 PERCENT ROWS ONLY; +> A B C +> - - - +> rows: 0 + +SELECT * FROM TEST FETCH FIRST 1 PERCENT ROWS ONLY; +> A B C +> - - - +> 1 1 1 +> rows: 1 + +SELECT * FROM TEST FETCH FIRST 10 PERCENT ROWS ONLY; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> rows: 2 + +SELECT * FROM TEST OFFSET 2 ROWS FETCH NEXT 10 PERCENT ROWS ONLY; +> A B C +> - - - +> 1 1 3 +> 1 2 1 +> rows: 2 + +CREATE INDEX TEST_A_IDX ON TEST(A); +> ok + +CREATE INDEX TEST_A_B_IDX ON TEST(A, B); +> ok + +SELECT * FROM TEST ORDER BY A FETCH FIRST 1 ROW WITH TIES; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> 1 2 2 +> 1 2 3 +> rows (partially ordered): 6 + +SELECT * FROM TEST ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 1 ROW WITH TIES; +> A B C +> - - - +> 1 2 1 +> 1 2 2 +> 1 2 3 +> rows (partially ordered): 3 + +SELECT * FROM TEST FETCH FIRST 1 ROW WITH TIES; +> exception WITH_TIES_WITHOUT_ORDER_BY + +(SELECT * FROM TEST) UNION (SELECT 1, 2, 4) ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 1 ROW WITH TIES; +> A B C +> - - - +> 1 2 1 +> 1 2 2 +> 1 2 3 +> 1 2 4 +> rows (partially ordered): 4 + +(SELECT * FROM TEST) UNION (SELECT 1, 2, 4) ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 50 PERCENT ROWS ONLY; +> A B C +> - - - +> 1 2 1 +> 1 2 2 +> 1 2 3 +> 1 2 4 +> 2 1 1 +> 2 1 2 +> 2 1 3 +> rows (partially ordered): 7 + +(SELECT * FROM TEST) UNION (SELECT 1, 2, 4) ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 40 PERCENT ROWS WITH TIES; +> A B C +> - - - +> 1 2 1 +> 1 2 2 +> 1 2 3 +> 1 2 4 +> 2 1 1 +> 2 1 2 +> 2 1 3 +> rows (partially ordered): 7 + +(SELECT * FROM TEST) UNION (SELECT 1, 2, 4) FETCH NEXT 1 ROW WITH TIES; +> exception WITH_TIES_WITHOUT_ORDER_BY + +EXPLAIN SELECT * FROM TEST ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 1 ROW WITH TIES; +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_A_B_IDX */ ORDER BY 1, 2 OFFSET 3 ROWS FETCH NEXT ROW WITH TIES /* index sorted */ + +EXPLAIN SELECT * FROM TEST ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 1 PERCENT ROWS WITH TIES; +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_A_B_IDX */ ORDER BY 1, 2 OFFSET 3 ROWS FETCH NEXT 1 PERCENT ROWS WITH TIES /* index sorted */ + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A VARCHAR_IGNORECASE, B VARCHAR_IGNORECASE); +> ok + +INSERT INTO TEST VALUES ('A', 1), ('a', 2), ('A', 3), ('B', 4); +> update count: 4 + +SELECT A, B FROM TEST ORDER BY A FETCH FIRST 1 ROW WITH TIES; +> A B +> - - +> A 1 +> A 3 +> a 2 +> rows (partially ordered): 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT); +> ok + +INSERT INTO TEST VALUES (1, 1), (1, 2), (2, 1), (2, 2), (2, 3); +> update count: 5 + +SELECT A, COUNT(B) FROM TEST GROUP BY A ORDER BY A OFFSET 1; +> A COUNT(B) +> - -------- +> 2 3 +> rows (ordered): 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, "VALUE" VARCHAR) AS VALUES (1, 'A'), (2, 'B'), (3, 'C'); +> ok + +SELECT * FROM TEST ORDER BY ID DESC OFFSET 2 ROWS FETCH FIRST 2147483646 ROWS ONLY; +> ID VALUE +> -- ----- +> 1 A +> rows (ordered): 1 + +SELECT * FROM TEST ORDER BY ID DESC OFFSET 2 ROWS FETCH FIRST 2147483647 ROWS ONLY; +> ID VALUE +> -- ----- +> 1 A +> rows (ordered): 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST1(A INT, B INT, C INT) AS SELECT 1, 2, 3; +> ok + +CREATE TABLE TEST2(A INT, B INT, C INT) AS SELECT 4, 5, 6; +> ok + +SELECT A, B FROM TEST1 UNION SELECT A, B FROM TEST2 ORDER BY TEST1.C; +> exception ORDER_BY_NOT_IN_RESULT + +DROP TABLE TEST1; +> ok + +DROP TABLE TEST2; +> ok + +-- Disallowed mixed OFFSET/FETCH/LIMIT/TOP clauses +CREATE TABLE TEST (ID BIGINT); +> ok + +SELECT TOP 1 ID FROM TEST OFFSET 1 ROW; +> exception SYNTAX_ERROR_1 + +SELECT TOP 1 ID FROM TEST FETCH NEXT ROW ONLY; +> exception SYNTAX_ERROR_1 + +SELECT TOP 1 ID FROM TEST LIMIT 1; +> exception SYNTAX_ERROR_1 + +SELECT ID FROM TEST OFFSET 1 ROW LIMIT 1; +> exception SYNTAX_ERROR_1 + +SELECT ID FROM TEST FETCH NEXT ROW ONLY LIMIT 1; +> exception SYNTAX_ERROR_1 + +DROP TABLE TEST; +> ok + +-- ORDER BY with parameter +CREATE TABLE TEST(A INT, B INT); +> ok + +INSERT INTO TEST VALUES (1, 1), (1, 2), (2, 1), (2, 2); +> update count: 4 + +SELECT * FROM TEST ORDER BY ?, ? FETCH FIRST ROW ONLY; +{ +1, 2 +> A B +> - - +> 1 1 +> rows (ordered): 1 +-1, 2 +> A B +> - - +> 2 1 +> rows (ordered): 1 +1, -2 +> A B +> - - +> 1 2 +> rows (ordered): 1 +-1, -2 +> A B +> - - +> 2 2 +> rows (ordered): 1 +2, -1 +> A B +> - - +> 2 1 +> rows (ordered): 1 +} +> update count: 0 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST1(A INT, B INT, C INT) AS SELECT 1, 2, 3; +> ok + +CREATE TABLE TEST2(A INT, D INT) AS SELECT 4, 5; +> ok + +SELECT * FROM TEST1, TEST2; +> A B C A D +> - - - - - +> 1 2 3 4 5 +> rows: 1 + +SELECT * EXCEPT (A) FROM TEST1; +> B C +> - - +> 2 3 +> rows: 1 + +SELECT * EXCEPT (TEST1.A) FROM TEST1; +> B C +> - - +> 2 3 +> rows: 1 + +SELECT * EXCEPT (PUBLIC.TEST1.A) FROM TEST1; +> B C +> - - +> 2 3 +> rows: 1 + +SELECT * EXCEPT (SCRIPT.PUBLIC.TEST1.A) FROM TEST1; +> B C +> - - +> 2 3 +> rows: 1 + +SELECT * EXCEPT (Z) FROM TEST1; +> exception COLUMN_NOT_FOUND_1 + +SELECT * EXCEPT (B, TEST1.B) FROM TEST1; +> exception DUPLICATE_COLUMN_NAME_1 + +SELECT * EXCEPT (A) FROM TEST1, TEST2; +> exception AMBIGUOUS_COLUMN_NAME_1 + +SELECT * EXCEPT (TEST1.A, B, TEST2.D) FROM TEST1, TEST2; +> C A +> - - +> 3 4 +> rows: 1 + +SELECT TEST1.*, TEST2.* FROM TEST1, TEST2; +> A B C A D +> - - - - - +> 1 2 3 4 5 +> rows: 1 + +SELECT TEST1.* EXCEPT (A), TEST2.* EXCEPT (A) FROM TEST1, TEST2; +> B C D +> - - - +> 2 3 5 +> rows: 1 + +SELECT TEST1.* EXCEPT (A), TEST2.* EXCEPT (D) FROM TEST1, TEST2; +> B C A +> - - - +> 2 3 4 +> rows: 1 + +SELECT * EXCEPT (T1.A, T2.D) FROM TEST1 T1, TEST2 T2; +> B C A +> - - - +> 2 3 4 +> rows: 1 + +DROP TABLE TEST1, TEST2; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, "VALUE" INT NOT NULL); +> ok + +INSERT INTO TEST VALUES (1, 1), (2, 1), (3, 2); +> update count: 3 + +SELECT ID, "VALUE" FROM TEST FOR UPDATE; +> ID VALUE +> -- ----- +> 1 1 +> 2 1 +> 3 2 +> rows: 3 + +-- Check that NULL row is returned from SELECT FOR UPDATE +CREATE TABLE T1(A INT PRIMARY KEY) AS VALUES 1, 2; +> ok + +CREATE TABLE T2(B INT PRIMARY KEY) AS VALUES 1; +> ok + +SELECT * FROM T1 LEFT JOIN T2 ON A = B FOR UPDATE; +> A B +> - ---- +> 1 1 +> 2 null +> rows: 2 + +DROP TABLE T1, T2; +> ok + +SELECT DISTINCT "VALUE" FROM TEST FOR UPDATE; +> exception FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT + +SELECT DISTINCT ON("VALUE") ID, "VALUE" FROM TEST FOR UPDATE; +> exception FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT + +SELECT SUM("VALUE") FROM TEST FOR UPDATE; +> exception FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT + +SELECT ID FROM TEST GROUP BY "VALUE" FOR UPDATE; +> exception FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT + +SELECT 1 FROM TEST HAVING TRUE FOR UPDATE; +> exception FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, V INT) AS SELECT X, X + 1 FROM SYSTEM_RANGE(1, 3); +> ok + +SELECT ID FROM TEST WHERE ID != ALL (SELECT ID FROM TEST WHERE ID IN(1, 3)); +> ID +> -- +> 2 +> rows: 1 + +SELECT (1, 3) > ANY (SELECT ID, V FROM TEST); +>> TRUE + +SELECT (1, 2) > ANY (SELECT ID, V FROM TEST); +>> FALSE + +SELECT (2, 3) = ANY (SELECT ID, V FROM TEST); +>> TRUE + +SELECT (3, 4) > ALL (SELECT ID, V FROM TEST); +>> FALSE + +DROP TABLE TEST; +> ok + +SELECT 1 = ALL (SELECT * FROM VALUES (NULL), (1), (2), (NULL) ORDER BY 1); +>> FALSE + +CREATE TABLE TEST(G INT, V INT); +> ok + +INSERT INTO TEST VALUES (10, 1), (11, 2), (20, 4); +> update count: 3 + +SELECT G / 10 G1, G / 10 G2, SUM(T.V) S FROM TEST T GROUP BY G / 10, G / 10; +> G1 G2 S +> -- -- - +> 1 1 3 +> 2 2 4 +> rows: 2 + +SELECT G / 10 G1, G / 10 G2, SUM(T.V) S FROM TEST T GROUP BY G2; +> G1 G2 S +> -- -- - +> 1 1 3 +> 2 2 4 +> rows: 2 + +DROP TABLE TEST; +> ok + +@reconnect off + +CALL RAND(0); +>> 0.730967787376657 + +SELECT RAND(), RAND() + 1, RAND() + 1, RAND() GROUP BY RAND() + 1; +> RAND() RAND() + 1 RAND() + 1 RAND() +> ------------------ ------------------ ------------------ ------------------ +> 0.6374174253501083 1.2405364156714858 1.2405364156714858 0.5504370051176339 +> rows: 1 + +SELECT RAND() A, RAND() + 1 B, RAND() + 1 C, RAND() D, RAND() + 2 E, RAND() + 3 F GROUP BY B, C, E, F; +> A B C D E F +> ------------------ ------------------ ------------------ ------------------ ------------------ ------------------ +> 0.8791825178724801 1.3332183994766498 1.3332183994766498 0.9412491794821144 2.3851891847407183 3.9848415401998087 +> rows: 1 + +@reconnect on + +CREATE TABLE TEST (A INT, B INT, C INT); +> ok + +INSERT INTO TEST VALUES (11, 12, 13), (21, 22, 23), (31, 32, 33); +> update count: 3 + +SELECT * FROM TEST WHERE (A, B) IN (VALUES (11, 12), (21, 22), (41, 42)); +> A B C +> -- -- -- +> 11 12 13 +> 21 22 23 +> rows: 2 + +SELECT * FROM TEST WHERE (A, B) = (VALUES (11, 12)); +> A B C +> -- -- -- +> 11 12 13 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A BIGINT, B INT) AS VALUES (1::BIGINT, 2); +> ok + +SELECT * FROM TEST WHERE (A, B) IN ((1, 2), (3, 4)); +> A B +> - - +> 1 2 +> rows: 1 + +UPDATE TEST SET A = 1000000000000; +> update count: 1 + +SELECT * FROM TEST WHERE (A, B) IN ((1, 2), (3, 4)); +> A B +> - - +> rows: 0 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A BIGINT, B INT) AS VALUES (1, 2); +> ok + +SELECT * FROM TEST WHERE (A, B) IN ((1::BIGINT, 2), (3, 4)); +> A B +> - - +> 1 2 +> rows: 1 + +SELECT * FROM TEST WHERE (A, B) IN ((1000000000000, 2), (3, 4)); +> A B +> - - +> rows: 0 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(I) AS VALUES 1, 2, 3; +> ok + +SELECT COUNT(*) C FROM TEST HAVING C < 1; +> C +> - +> rows: 0 + +SELECT COUNT(*) C FROM TEST QUALIFY C < 1; +> C +> - +> rows: 0 + +DROP TABLE TEST; +> ok + +SELECT A, ROW_NUMBER() OVER (ORDER BY B) R +FROM (VALUES (1, 2), (2, 1), (3, 3)) T(A, B); +> A R +> - - +> 1 2 +> 2 1 +> 3 3 +> rows: 3 + +SELECT X, A, ROW_NUMBER() OVER (ORDER BY B) R +FROM (SELECT 1 X), (VALUES (1, 2), (2, 1), (3, 3)) T(A, B); +> X A R +> - - - +> 1 1 2 +> 1 2 1 +> 1 3 3 +> rows: 3 + +SELECT A, SUM(S) OVER (ORDER BY S) FROM + (SELECT A, SUM(B) FROM (VALUES (1, 2), (1, 3), (3, 5), (3, 10)) V(A, B) GROUP BY A) S(A, S); +> A SUM(S) OVER (ORDER BY S) +> - ------------------------ +> 1 5 +> 3 20 +> rows: 2 + +SELECT A, SUM(A) OVER W SUM FROM (VALUES 1, 2) T(A) WINDOW W AS (ORDER BY A); +> A SUM +> - --- +> 1 1 +> 2 3 +> rows: 2 + +SELECT A, B, C FROM (SELECT A, B, C FROM (VALUES (1, 2, 3)) V(A, B, C)); +> A B C +> - - - +> 1 2 3 +> rows: 1 + +SELECT * FROM (SELECT * FROM (VALUES (1, 2, 3)) V(A, B, C)); +> A B C +> - - - +> 1 2 3 +> rows: 1 + +SELECT * FROM + (SELECT X * X, Y FROM + (SELECT A + 5, B FROM + (VALUES (1, 2)) V(A, B) + ) T(X, Y) + ); +> X * X Y +> ----- - +> 36 2 +> rows: 1 + +CREATE TABLE TEST("_ROWID_" INT) AS VALUES 2; +> ok + +SELECT _ROWID_ S1, TEST._ROWID_ S2, PUBLIC.TEST._ROWID_ S3, SCRIPT.PUBLIC.TEST._ROWID_ S4, + "_ROWID_" U1, TEST."_ROWID_" U2, PUBLIC.TEST."_ROWID_" U3, SCRIPT.PUBLIC.TEST."_ROWID_" U4 + FROM TEST; +> S1 S2 S3 S4 U1 U2 U3 U4 +> -- -- -- -- -- -- -- -- +> 1 1 1 1 2 2 2 2 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID BIGINT PRIMARY KEY); +> ok + +SELECT X.ID FROM TEST X JOIN TEST Y ON Y.ID IN (SELECT 1); +> ID +> -- +> rows: 0 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT) AS VALUES (1, 10), (2, 20), (4, 40); +> ok + +SELECT T1.A, T2.ARR FROM TEST T1 JOIN ( + SELECT A, ARRAY_AGG(B) OVER (ORDER BY B ROWS BETWEEN 1 FOLLOWING AND 2 FOLLOWING) ARR FROM TEST +) T2 ON T1.A = T2.A; +> A ARR +> - -------- +> 1 [20, 40] +> 2 [40] +> 4 null +> rows: 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, V INT UNIQUE); +> ok + +EXPLAIN SELECT * FROM TEST ORDER BY ID FOR UPDATE; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."V" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2 */ ORDER BY 1 FOR UPDATE /* index sorted */ + +EXPLAIN SELECT * FROM TEST ORDER BY V; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."V" FROM "PUBLIC"."TEST" /* PUBLIC.CONSTRAINT_INDEX_2 */ ORDER BY 2 /* index sorted */ + +EXPLAIN SELECT * FROM TEST ORDER BY V FOR UPDATE; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."V" FROM "PUBLIC"."TEST" /* PUBLIC.CONSTRAINT_INDEX_2 */ ORDER BY 2 FOR UPDATE + +DROP TABLE TEST; +> ok + +-- The next tests should be at the of this file + +SET MAX_MEMORY_ROWS = 1; +> ok + +CREATE TABLE TEST(I INT) AS SELECT * FROM SYSTEM_RANGE(1, 10); +> ok + +SELECT COUNT(*) FROM (SELECT I, SUM(I) S, COUNT(I) C FROM TEST GROUP BY I HAVING S + C <= 9 ORDER BY I); +>> 8 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT); +> ok + +EXPLAIN SELECT * FROM TEST WHERE A = 1 AND B = 1 OR A = 2 AND B = 2; +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE (("A" = 1) AND ("B" = 1)) OR (("A" = 2) AND ("B" = 2)) + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT) AS VALUES (1, 2), (1, 3), (5, 5); +> ok + +SELECT (SELECT A, B FROM TEST ORDER BY A + B FETCH FIRST ROW ONLY); +>> ROW (1, 2) + +SELECT * FROM TEST UNION ALL SELECT * FROM TEST OFFSET 2 ROWS; +> A B +> - - +> 1 2 +> 1 3 +> 5 5 +> 5 5 +> rows: 4 + +SELECT (1, 2) IN (SELECT * FROM TEST UNION ALL SELECT * FROM TEST OFFSET 2 ROWS); +>> TRUE + +SELECT * FROM TEST UNION ALL SELECT * FROM TEST ORDER BY A DESC, B DESC OFFSET 2 ROWS; +> A B +> - - +> 1 3 +> 1 3 +> 1 2 +> 1 2 +> rows (ordered): 4 + +SELECT (1, 2) IN (SELECT * FROM TEST UNION ALL SELECT * FROM TEST ORDER BY A DESC, B DESC OFFSET 2 ROWS); +>> TRUE + +SELECT (1, 2) IN (SELECT * FROM TEST UNION ALL SELECT * FROM TEST ORDER BY A DESC, B DESC OFFSET 2 ROWS FETCH NEXT 1 ROW ONLY); +>> FALSE + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT, NAME VARCHAR, DATA VARCHAR); +> ok + +-- This ORDER BY condition is currently forbidden +SELECT DISTINCT DATA FROM TEST ORDER BY (CASE WHEN EXISTS(SELECT * FROM TEST T WHERE T.NAME = 'A') THEN 1 ELSE 2 END); +> exception ORDER_BY_NOT_IN_RESULT + +SELECT DISTINCT DATA FROM TEST X ORDER BY (CASE WHEN EXISTS(SELECT * FROM TEST T WHERE T.ID = X.ID + 1) THEN 1 ELSE 2 END); +> exception ORDER_BY_NOT_IN_RESULT + +DROP TABLE TEST; +> ok + +-- Additional GROUP BY tests + +CREATE TABLE TEST(A INT, B INT, C INT) AS (VALUES + (NULL, NULL, NULL), (NULL, NULL, 1), (NULL, NULL, 2), + (NULL, 1, NULL), (NULL, 1, 1), (NULL, 1, 2), + (NULL, 2, NULL), (NULL, 2, 1), (NULL, 2, 2), + (1, NULL, NULL), (1, NULL, 1), (1, NULL, 2), + (1, 1, NULL), (1, 1, 1), (1, 1, 2), + (1, 2, NULL), (1, 2, 1), (1, 2, 2), + (2, NULL, NULL), (2, NULL, 1), (2, NULL, 2), + (2, 1, NULL), (2, 1, 1), (2, 1, 2), + (2, 2, NULL), (2, 2, 1), (2, 2, 2)); +> ok + +SELECT SUM(A), B, C FROM TEST GROUP BY B, C; +> SUM(A) B C +> ------ ---- ---- +> 3 1 1 +> 3 1 2 +> 3 1 null +> 3 2 1 +> 3 2 2 +> 3 2 null +> 3 null 1 +> 3 null 2 +> 3 null null +> rows: 9 + +EXPLAIN SELECT SUM(A), B, C FROM TEST GROUP BY B, C; +>> SELECT SUM("A"), "B", "C" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "B", "C" + +SELECT SUM(A), B, C FROM TEST GROUP BY (B), C, (); +> SUM(A) B C +> ------ ---- ---- +> 3 1 1 +> 3 1 2 +> 3 1 null +> 3 2 1 +> 3 2 2 +> 3 2 null +> 3 null 1 +> 3 null 2 +> 3 null null +> rows: 9 + +EXPLAIN SELECT SUM(A), B, C FROM TEST GROUP BY (B), C, (); +>> SELECT SUM("A"), "B", "C" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "B", "C" + +SELECT SUM(A), B, C FROM TEST GROUP BY (B, C); +> SUM(A) B C +> ------ ---- ---- +> 3 1 1 +> 3 1 2 +> 3 1 null +> 3 2 1 +> 3 2 2 +> 3 2 null +> 3 null 1 +> 3 null 2 +> 3 null null +> rows: 9 + +EXPLAIN SELECT SUM(A), B, C FROM TEST GROUP BY (B, C); +>> SELECT SUM("A"), "B", "C" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "B", "C" + +SELECT COUNT(*) FROM TEST; +>> 27 + +EXPLAIN SELECT COUNT(*) FROM TEST; +>> SELECT COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */ + +SELECT COUNT(*) FROM TEST GROUP BY (); +>> 27 + +EXPLAIN SELECT COUNT(*) FROM TEST GROUP BY (); +>> SELECT COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */ + +SELECT COUNT(*) FROM TEST WHERE FALSE; +>> 0 + +EXPLAIN SELECT COUNT(*) FROM TEST WHERE FALSE; +>> SELECT COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan: FALSE */ WHERE FALSE + +SELECT COUNT(*) FROM TEST WHERE FALSE GROUP BY (); +>> 0 + +EXPLAIN SELECT COUNT(*) FROM TEST WHERE FALSE GROUP BY (); +>> SELECT COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan: FALSE */ WHERE FALSE + +SELECT COUNT(*) FROM TEST WHERE FALSE GROUP BY (), (); +>> 0 + +EXPLAIN SELECT COUNT(*) FROM TEST WHERE FALSE GROUP BY (), (); +>> SELECT COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan: FALSE */ WHERE FALSE + +SELECT 1 FROM TEST GROUP BY (); +>> 1 + +EXPLAIN SELECT 1 FROM TEST GROUP BY (); +>> SELECT 1 FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY () /* direct lookup */ + +EXPLAIN SELECT FALSE AND MAX(A) > 0 FROM TEST; +>> SELECT FALSE FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY () /* direct lookup */ + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT PRIMARY KEY) AS (VALUES 1, 2, 3); +> ok + +SELECT A AS A1, A AS A2 FROM TEST GROUP BY A; +> A1 A2 +> -- -- +> 1 1 +> 2 2 +> 3 3 +> rows: 3 + +DROP TABLE TEST; +> ok + +-- Tests for SELECT without columns + +EXPLAIN SELECT *; +>> SELECT + +SELECT; +> +> +> +> rows: 1 + +SELECT FROM DUAL; +> +> +> +> rows: 1 + +SELECT * FROM DUAL JOIN (SELECT * FROM DUAL) ON 1 = 1; +> +> +> +> rows: 1 + +EXPLAIN SELECT * FROM DUAL JOIN (SELECT * FROM DUAL) ON 1 = 1; +>> SELECT FROM DUAL /* dual index */ INNER JOIN ( SELECT ) "_7" /* SELECT */ ON 1=1 + +SELECT WHERE FALSE; +> +> +> rows: 0 + +SELECT GROUP BY (); +> +> +> +> rows: 1 + +SELECT HAVING FALSE; +> +> +> rows: 0 + +SELECT QUALIFY FALSE; +> +> +> rows: 0 + +SELECT ORDER BY (SELECT 1); +> +> +> +> rows: 1 + +SELECT OFFSET 0 ROWS; +> +> +> +> rows: 1 + +SELECT FETCH FIRST 0 ROWS ONLY; +> +> +> rows: 0 + +CREATE TABLE TEST(A INT, B INT, C INT, D INT); +> ok + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) + C; +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY ("A" + "B") + "C" + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B); +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B" + +EXPLAIN SELECT 1 FROM (SELECT SUM(D) FROM TEST GROUP BY (A + B)) T; +>> SELECT 1 FROM ( SELECT SUM("D") FROM "PUBLIC"."TEST" GROUP BY "A" + "B" ) "T" /* SELECT SUM(D) FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ GROUP BY A + B */ + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B), C; +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B", "C" + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) HAVING TRUE; +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B" HAVING TRUE + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) WINDOW W AS (); +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B" + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) QUALIFY TRUE; +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B" QUALIFY TRUE + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) UNION VALUES 1; +>> (SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B") UNION (VALUES (1)) + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) EXCEPT VALUES 1; +>> (SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B") EXCEPT (VALUES (1)) + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) MINUS VALUES 1; +>> (SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B") EXCEPT (VALUES (1)) + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) INTERSECT VALUES 1; +>> (SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B") INTERSECT (VALUES (1)) + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) ORDER BY SUM(D); +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B" ORDER BY 1 + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) OFFSET 0 ROWS; +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B" OFFSET 0 ROWS + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) FETCH FIRST ROW ONLY; +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B" FETCH FIRST ROW ONLY + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) LIMIT 1; +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B" FETCH FIRST ROW ONLY + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) FOR UPDATE; +> exception FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT) AS VALUES 1, 2; +> ok + +SELECT A, A FROM TEST GROUP BY A HAVING SUM(A) > 0; +> A A +> - - +> 1 1 +> 2 2 +> rows: 2 + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT X FROM SYSTEM_RANGE(1, 10) A ORDER BY (SELECT X FROM SYSTEM_RANGE(1, 20) B WHERE A.X = B.X); +>> SELECT "X" FROM SYSTEM_RANGE(1, 10) "A" /* range index */ ORDER BY (SELECT "X" FROM SYSTEM_RANGE(1, 20) "B" /* range index: X = A.X */ WHERE "A"."X" = "B"."X") + +EXPLAIN SELECT X FROM SYSTEM_RANGE(1, 10) ORDER BY 'a'; +>> SELECT "X" FROM SYSTEM_RANGE(1, 10) /* range index */ + +EXPLAIN SELECT (SELECT 1); +>> SELECT 1 + +EXPLAIN SELECT (SELECT DISTINCT 1); +>> SELECT 1 + +EXPLAIN SELECT (SELECT DISTINCT ON(RAND()) 1); +>> SELECT 1 + +EXPLAIN SELECT (SELECT 1 WHERE TRUE); +>> SELECT 1 + +EXPLAIN SELECT (SELECT 1 HAVING TRUE); +>> SELECT (SELECT 1 HAVING TRUE) + +EXPLAIN SELECT (SELECT 1 QUALIFY TRUE); +>> SELECT (SELECT 1 QUALIFY TRUE) + +EXPLAIN SELECT (VALUES 1, 2 OFFSET 1 ROW); +>> SELECT 2 + +EXPLAIN SELECT (VALUES 1, 2 OFFSET RAND() ROWS); +>> SELECT (VALUES (1), (2) OFFSET RAND() ROWS) + +EXPLAIN SELECT (VALUES 1 FETCH FIRST 2 ROWS ONLY); +>> SELECT 1 + +EXPLAIN SELECT (VALUES 1, 2 FETCH FIRST RAND() ROWS ONLY); +>> SELECT (VALUES (1), (2) FETCH FIRST RAND() ROWS ONLY) + +EXPLAIN SELECT X FROM SYSTEM_RANGE(1, 10) ORDER BY (SELECT 1); +>> SELECT "X" FROM SYSTEM_RANGE(1, 10) /* range index */ + +EXPLAIN SELECT X FROM SYSTEM_RANGE(1, 10) ORDER BY (SELECT RAND()); +>> SELECT "X" FROM SYSTEM_RANGE(1, 10) /* range index */ ORDER BY RAND() + +EXPLAIN SELECT (SELECT 1, RAND()); +>> SELECT ROW (1, RAND()) + +EXPLAIN SELECT (VALUES (1, RAND())); +>> SELECT ROW (1, RAND()) + +EXPLAIN SELECT (VALUES 1, RAND()); +>> SELECT (VALUES (1), (RAND())) + +EXPLAIN SELECT X FROM SYSTEM_RANGE(1, 10) ORDER BY X, (1+1), -X; +>> SELECT "X" FROM SYSTEM_RANGE(1, 10) /* range index */ ORDER BY 1, - "X" + + +CREATE TABLE T1 ( + T1_ID BIGINT PRIMARY KEY +); +> ok + +INSERT INTO T1 VALUES 1, 2, 3; +> update count: 3 + +CREATE TABLE T2 ( + T2_ID BIGINT PRIMARY KEY, + T1_ID BIGINT NOT NULL REFERENCES T1 +); +> ok + +INSERT INTO T2 VALUES (1, 1), (2, 1), (3, 2), (4, 3); +> update count: 4 + +SELECT * FROM (SELECT * FROM T1 FETCH FIRST 2 ROWS ONLY) T1 JOIN T2 USING (T1_ID); +> T1_ID T2_ID +> ----- ----- +> 1 1 +> 1 2 +> 2 3 +> rows: 3 + + +DROP TABLE T2, T1; +> ok + +EXECUTE IMMEDIATE 'CREATE TABLE TEST AS SELECT C1 FROM (SELECT ' || (SELECT LISTAGG('1 C' || X) FROM SYSTEM_RANGE(1, 16384)) || ')'; +> ok + +DROP TABLE TEST; +> ok + +EXECUTE IMMEDIATE 'CREATE TABLE TEST AS SELECT C1 FROM (SELECT ' || (SELECT LISTAGG('1 C' || X) FROM SYSTEM_RANGE(1, 16385)) || ')'; +> exception TOO_MANY_COLUMNS_1 + +CREATE TABLE TEST(A INT, B INT); +> ok + +CREATE INDEX TEST_IDX ON TEST(A, B); +> ok + +INSERT INTO TEST VALUES (1, 1), (1, 2), (2, 1), (2, 2); +> update count: 4 + +SELECT A, 1 AS X, B FROM TEST ORDER BY A, X, B DESC; +> A X B +> - - - +> 1 1 2 +> 1 1 1 +> 2 1 2 +> 2 1 1 +> rows (ordered): 4 + +EXPLAIN SELECT A, 1 AS X, B FROM TEST ORDER BY A, X, B DESC; +>> SELECT "A", 1 AS "X", "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ ORDER BY 1, 2, 3 DESC + +DROP TABLE TEST; +> ok + +SELECT X FROM SYSTEM_RANGE(1, 2) ORDER BY X DESC FETCH FIRST 0xFFFFFFFF ROWS ONLY; +> X +> - +> 2 +> 1 +> rows (ordered): 2 + +SELECT ((SELECT 1 X) EXCEPT (SELECT 1 Y)) T; +> T +> ---- +> null +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/dml/table.sql b/h2/src/test/org/h2/test/scripts/queries/table.sql similarity index 68% rename from h2/src/test/org/h2/test/scripts/dml/table.sql rename to h2/src/test/org/h2/test/scripts/queries/table.sql index f1bd99a3c0..a4d234739b 100644 --- a/h2/src/test/org/h2/test/scripts/dml/table.sql +++ b/h2/src/test/org/h2/test/scripts/queries/table.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -51,5 +51,14 @@ SELECT (TABLE TEST FETCH FIRST ROW ONLY) "ROW"; > ROW (1, 1, 1) > rows: 1 +EXPLAIN TABLE TEST ORDER BY A; +>> TABLE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ ORDER BY 1 + +CREATE INDEX TEST_A_INDEX ON TEST(A); +> ok + +EXPLAIN TABLE TEST ORDER BY A; +>> TABLE "PUBLIC"."TEST" /* PUBLIC.TEST_A_INDEX */ ORDER BY 1 /* index sorted */ + DROP TABLE TEST; > ok diff --git a/h2/src/test/org/h2/test/scripts/queries/values.sql b/h2/src/test/org/h2/test/scripts/queries/values.sql new file mode 100644 index 0000000000..410945e759 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/queries/values.sql @@ -0,0 +1,115 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +VALUES (1, 2); +> C1 C2 +> -- -- +> 1 2 +> rows: 1 + +VALUES ROW (1, 2); +> C1 C2 +> -- -- +> 1 2 +> rows: 1 + +VALUES 1, 2; +> C1 +> -- +> 1 +> 2 +> rows: 2 + +VALUES 4, 3, 1, 2 ORDER BY 1 FETCH FIRST 75 PERCENT ROWS ONLY; +> C1 +> -- +> 1 +> 2 +> 3 +> rows (ordered): 3 + +SELECT * FROM (VALUES (1::BIGINT, 2)) T (A, B) WHERE (A, B) IN (VALUES(1, 2)); +> A B +> - - +> 1 2 +> rows: 1 + +SELECT * FROM (VALUES (1000000000000, 2)) T (A, B) WHERE (A, B) IN (VALUES(1, 2)); +> A B +> - - +> rows: 0 + +SELECT * FROM (VALUES (1, 2)) T (A, B) WHERE (A, B) IN (VALUES(1::BIGINT, 2)); +> A B +> - - +> 1 2 +> rows: 1 + +SELECT * FROM (VALUES (1, 2)) T (A, B) WHERE (A, B) IN (VALUES(1000000000000, 2)); +> A B +> - - +> rows: 0 + +EXPLAIN VALUES 1, (2), ROW(3); +>> VALUES (1), (2), (3) + +EXPLAIN VALUES (1, 2), (3, 4); +>> VALUES (1, 2), (3, 4) + +EXPLAIN SELECT * FROM (VALUES 1, 2) T(V); +>> SELECT "T"."V" FROM (VALUES (1), (2)) "T"("V") /* table scan */ + +EXPLAIN SELECT * FROM (VALUES 1, 2); +>> SELECT "_0"."C1" FROM (VALUES (1), (2)) "_0" /* table scan */ + +EXPLAIN SELECT * FROM (VALUES 1, 2 ORDER BY 1 DESC); +>> SELECT "_1"."C1" FROM ( VALUES (1), (2) ORDER BY 1 DESC ) "_1" /* VALUES (1), (2) ORDER BY 1 DESC */ + +-- Non-standard syntax +EXPLAIN SELECT * FROM VALUES 1, 2; +>> SELECT "_2"."C1" FROM (VALUES (1), (2)) "_2" /* table scan */ + +VALUES (1, 2), (3, 4), (5, 1) ORDER BY C1 + C2; +> C1 C2 +> -- -- +> 1 2 +> 5 1 +> 3 4 +> rows (ordered): 3 + +VALUES (1, 2), (3, 4), (5, 1) ORDER BY C1 + C2, C1 * C2; +> C1 C2 +> -- -- +> 1 2 +> 5 1 +> 3 4 +> rows (ordered): 3 + +VALUES (1, 2), (3, 4), (5, 1) ORDER BY C1 + C2, C1 * C2 OFFSET 1 ROW FETCH FIRST 1 ROW ONLY; +> C1 C2 +> -- -- +> 5 1 +> rows (ordered): 1 + +EXPLAIN VALUES (1, 2), (3, 4), (5, 1) ORDER BY C1 + C2, C1 * C2 OFFSET 1 ROW FETCH FIRST 1 ROW ONLY; +>> VALUES (1, 2), (3, 4), (5, 1) ORDER BY "C1" + "C2", "C1" * "C2" OFFSET 1 ROW FETCH NEXT ROW ONLY + +EXECUTE IMMEDIATE 'CREATE TABLE TEST AS SELECT C1 FROM (VALUES (' || (SELECT LISTAGG('1') FROM SYSTEM_RANGE(1, 16384)) || '))'; +> ok + +DROP TABLE TEST; +> ok + +EXECUTE IMMEDIATE 'CREATE TABLE TEST AS SELECT C1 FROM (VALUES (' || (SELECT LISTAGG('1') FROM SYSTEM_RANGE(1, 16385)) || '))'; +> exception TOO_MANY_COLUMNS_1 + +VALUES (1), (1, 2); +> exception COLUMN_COUNT_DOES_NOT_MATCH + +EXPLAIN SELECT C1, 2 FROM (VALUES 1, 2, 3) T ORDER BY 1; +>> SELECT "C1", 2 FROM (VALUES (1), (2), (3)) "T" /* table scan */ ORDER BY 1 + +EXPLAIN SELECT C1, 2 FROM (VALUES 1, 2, 3) T ORDER BY (1); +>> SELECT "C1", 2 FROM (VALUES (1), (2), (3)) "T" /* table scan */ diff --git a/h2/src/test/org/h2/test/scripts/window.sql b/h2/src/test/org/h2/test/scripts/queries/window.sql similarity index 94% rename from h2/src/test/org/h2/test/scripts/window.sql rename to h2/src/test/org/h2/test/scripts/queries/window.sql index 86cb23ef18..7e1e8560ac 100644 --- a/h2/src/test/org/h2/test/scripts/window.sql +++ b/h2/src/test/org/h2/test/scripts/queries/window.sql @@ -1,5 +1,5 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- @@ -210,8 +210,8 @@ DROP TABLE TEST; > ok SELECT - ARRAY_AGG(T) OVER (ORDER BY T RANGE BETWEEN INTERVAL 1 DAY PRECEDING AND CURRENT ROW) C, - ARRAY_AGG(T) OVER (ORDER BY T RANGE BETWEEN INTERVAL 2 HOUR PRECEDING AND INTERVAL 1 HOUR PRECEDING) P, + ARRAY_AGG(T) OVER (ORDER BY T RANGE BETWEEN INTERVAL '1' DAY PRECEDING AND CURRENT ROW) C, + ARRAY_AGG(T) OVER (ORDER BY T RANGE BETWEEN INTERVAL '2' HOUR PRECEDING AND INTERVAL '1' HOUR PRECEDING) P, T FROM VALUES (TIME '00:00:00'), (TIME '01:30:00') TEST(T) ORDER BY T; > C P T > -------------------- ---------- -------- @@ -227,3 +227,6 @@ SELECT SUM(A) OVER (ORDER BY A, B RANGE BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOW SELECT SUM(A) OVER (ORDER BY A, B RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) S FROM VALUES (1, 2) T(A, B); > exception SYNTAX_ERROR_2 + +SELECT SUM(A) OVER (GROUPS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) S FROM VALUES (1, 2) T(A, B); +> exception SYNTAX_ERROR_2 diff --git a/h2/src/test/org/h2/test/scripts/query-optimisations.sql b/h2/src/test/org/h2/test/scripts/query-optimisations.sql deleted file mode 100644 index 82de791f2d..0000000000 --- a/h2/src/test/org/h2/test/scripts/query-optimisations.sql +++ /dev/null @@ -1,19 +0,0 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- - -create table person(firstname varchar, lastname varchar); -> ok - -create index person_1 on person(firstname, lastname); -> ok - -insert into person select convert(x,varchar) as firstname, (convert(x,varchar) || ' last') as lastname from system_range(1,100); -> update count: 100 - --- Issue #643: verify that when using an index, we use the IN part of the query, if that part of the query --- can directly use the index. --- -explain analyze SELECT * FROM person WHERE firstname IN ('FirstName1', 'FirstName2') AND lastname='LastName1'; ->> SELECT "PERSON"."FIRSTNAME", "PERSON"."LASTNAME" FROM "PUBLIC"."PERSON" /* PUBLIC.PERSON_1: FIRSTNAME IN('FirstName1', 'FirstName2') AND LASTNAME = 'LastName1' */ /* scanCount: 1 */ WHERE ("FIRSTNAME" IN('FirstName1', 'FirstName2')) AND ("LASTNAME" = 'LastName1') diff --git a/h2/src/test/org/h2/test/scripts/range_table.sql b/h2/src/test/org/h2/test/scripts/range_table.sql index 9a17c4d171..b3b758b2e4 100644 --- a/h2/src/test/org/h2/test/scripts/range_table.sql +++ b/h2/src/test/org/h2/test/scripts/range_table.sql @@ -1,25 +1,25 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- explain select * from system_range(1, 2) where x=x+1 and x=1; ->> SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 2) /* PUBLIC.RANGE_INDEX: X = 1 */ WHERE (("X" = 1) AND ("X" = ("X" + 1))) AND (1 = ("X" + 1)) +>> SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 2) /* range index: X = CAST(1 AS BIGINT) */ WHERE ("X" = CAST(1 AS BIGINT)) AND ("X" = ("X" + 1)) explain select * from system_range(1, 2) where not (x = 1 and x*2 = 2); ->> SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 2) /* PUBLIC.RANGE_INDEX */ WHERE ("X" <> 1) OR (("X" * 2) <> 2) +>> SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 2) /* range index */ WHERE ("X" <> CAST(1 AS BIGINT)) OR (("X" * 2) <> 2) explain select * from system_range(1, 10) where (NOT x >= 5); ->> SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 10) /* PUBLIC.RANGE_INDEX: X < 5 */ WHERE "X" < 5 +>> SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 10) /* range index: X < CAST(5 AS BIGINT) */ WHERE "X" < CAST(5 AS BIGINT) select (select t1.x from system_range(1,1) t2) from system_range(1,1) t1; -> SELECT T1.X FROM SYSTEM_RANGE(1, 1) T2 /* PUBLIC.RANGE_INDEX */ /* scanCount: 2 */ -> ---------------------------------------------------------------------------------- +> (SELECT T1.X FROM SYSTEM_RANGE(1, 1) T2) +> ---------------------------------------- > 1 > rows: 1 EXPLAIN PLAN FOR SELECT * FROM SYSTEM_RANGE(1, 20); ->> SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 20) /* PUBLIC.RANGE_INDEX */ +>> SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 20) /* range index */ select sum(x) from system_range(2, 1000) r where not exists(select * from system_range(2, 32) r2 where r.x>r2.x and mod(r.x, r2.x)=0); @@ -222,3 +222,14 @@ SELECT * FROM SYSTEM_RANGE(8, 1, -2) WHERE X BETWEEN 3 AND 7 ORDER BY 1 DESC; SELECT COUNT(*) FROM SYSTEM_RANGE(8, 1, -2) WHERE X BETWEEN 3 AND 7; >> 2 + +SELECT X FROM SYSTEM_RANGE(1, 2, ?); +{ +1 +> X +> - +> 1 +> 2 +> rows: 2 +}; +> update count: 0 diff --git a/h2/src/test/org/h2/test/scripts/testScript.sql b/h2/src/test/org/h2/test/scripts/testScript.sql index 63d56a28d3..dd74558e9e 100644 --- a/h2/src/test/org/h2/test/scripts/testScript.sql +++ b/h2/src/test/org/h2/test/scripts/testScript.sql @@ -1,33 +1,38 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- ---- special grammar and test cases --------------------------------------------------------------------------------------------- -create table test(id int) as select 1; +CREATE TABLE TEST(A INT, B INT) AS VALUES (1, 2), (3, 4), (5, 6); > ok -select * from test where id in (select id from test order by 'x'); -> ID -> -- -> 1 -> rows: 1 +UPDATE TOP (1) TEST SET B = 10; +> exception TABLE_OR_VIEW_NOT_FOUND_1 -drop table test; +SET MODE MSSQLServer; > ok -select x, x in(2, 3) i from system_range(1, 2) group by x; -> X I -> - ----- -> 1 FALSE -> 2 TRUE -> rows: 2 +UPDATE TOP (1) TEST SET B = 10; +> update count: 1 -select * from dual join(select x from dual) on 1=1; -> X X +SELECT COUNT(*) FILTER (WHERE B = 10) N, COUNT(*) FILTER (WHERE B <> 10) O FROM TEST; +> N O > - - -> 1 1 +> 1 2 > rows: 1 +UPDATE TEST SET B = 10 WHERE B <> 10; +> update count: 2 + +UPDATE TOP (1) TEST SET B = 10 LIMIT 1; +> exception SYNTAX_ERROR_1 + +SET MODE Regular; +> ok + +DROP TABLE TEST; +> ok + +--- special grammar and test cases --------------------------------------------------------------------------------------------- select 0 as x from system_range(1, 2) d group by d.x; > X > - @@ -102,34 +107,11 @@ create table test(id int, name varchar) as select 1, 'a'; drop table test; > ok -create sequence seq; -> ok - -select case seq.nextval when 2 then 'two' when 3 then 'three' when 1 then 'one' else 'other' end result from dual; -> RESULT -> ------ -> one -> rows: 1 - -drop sequence seq; -> ok - -select * from dual where x = x + 1 or x in(2, 0); -> X -> - -> rows: 0 - select * from system_range(1,1) order by x limit 3 offset 3; > X > - > rows (ordered): 0 -select * from dual where cast('a' || x as varchar_ignorecase) in ('A1', 'B1'); -> X -> - -> 1 -> rows: 1 - create sequence seq start with 65 increment by 1; > ok @@ -167,10 +149,7 @@ select * from (select * from test order by name limit 1) where id < 10; drop table test; > ok -create table test (id int not null, pid int); -> ok - -create index idx_test_pid on test (pid); +create table test (id int primary key, pid int); > ok alter table test add constraint fk_test foreign key (pid) @@ -308,73 +287,17 @@ update test set (id)=(id); drop table test; > ok -create table test(x int) as select x from system_range(1, 2); -> ok - -select * from (select rownum r from test) where r in (1, 2); -> R -> - -> 1 -> 2 -> rows: 2 - -select * from (select rownum r from test) where r = 1 or r = 2; -> R -> - -> 1 -> 2 -> rows: 2 - -drop table test; -> ok - select 2^2; > exception SYNTAX_ERROR_1 -select * from dual where x in (select x from dual group by x order by max(x)); -> X -> - -> 1 -> rows: 1 - -create table test(d decimal(1, 2)); -> exception INVALID_VALUE_SCALE_PRECISION - -call truncate_value('Test 123', 4, false); -> 'Test' -> ------ -> Test -> rows: 1 - -call truncate_value(1234567890.123456789, 4, false); -> exception NUMERIC_VALUE_OUT_OF_RANGE_1 - -call truncate_value(1234567890.123456789, 4, true); -> 1234567890.1234567 -> ------------------ -> 1234567890.1234567 -> rows: 1 - select * from dual where cast('xx' as varchar_ignorecase(1)) = 'X' and cast('x x ' as char(2)) = 'x'; -> X -> - -> 1 +> +> +> > rows: 1 explain select -cast(0 as real), -cast(0 as double); ->> SELECT 0.0, 0.0 FROM SYSTEM_RANGE(1, 1) /* PUBLIC.RANGE_INDEX */ - -select () empty; -> EMPTY -> ------ -> ROW () -> rows: 1 - -select (1,) one_element; -> ONE_ELEMENT -> ----------- -> ROW (1) -> rows: 1 +>> SELECT CAST(0.0 AS REAL), CAST(0.0 AS DOUBLE PRECISION) select (1) one; > ONE @@ -389,12 +312,7 @@ insert into test values(1), (2), (4); > update count: 3 select * from test order by id limit -1; -> ID -> -- -> 1 -> 2 -> 4 -> rows (ordered): 3 +> exception INVALID_VALUE_2 select * from test order by id limit 0; > ID @@ -415,37 +333,7 @@ select * from test order by id limit 1+1; > rows (ordered): 2 select * from test order by id limit null; -> ID -> -- -> 1 -> 2 -> 4 -> rows (ordered): 3 - -select a.id, a.id in(select 4) x from test a, test b where a.id in (b.id, b.id - 1); -> ID X -> -- ----- -> 1 FALSE -> 1 FALSE -> 2 FALSE -> 4 TRUE -> rows: 4 - -select a.id, a.id in(select 4) x from test a, test b where a.id in (b.id, b.id - 1) group by a.id; -> ID X -> -- ----- -> 1 FALSE -> 2 FALSE -> 4 TRUE -> rows: 3 - -select a.id, 4 in(select a.id) x from test a, test b where a.id in (b.id, b.id - 1) group by a.id; -> ID X -> -- ----- -> 1 FALSE -> 2 FALSE -> 4 TRUE -> rows: 3 +> exception INVALID_VALUE_2 delete from test limit 0; > ok @@ -454,26 +342,11 @@ delete from test limit 1; > update count: 1 delete from test limit -1; -> update count: 2 +> exception INVALID_VALUE_2 drop table test; > ok -create domain x as int not null; -> ok - -create table test(id x); -> ok - -insert into test values(null); -> exception NULL_NOT_ALLOWED - -drop table test; -> ok - -drop domain x; -> ok - create table test(id int primary key); > ok @@ -481,11 +354,10 @@ insert into test(id) direct sorted select x from system_range(1, 100); > update count: 100 explain insert into test(id) direct sorted select x from system_range(1, 100); ->> INSERT INTO "PUBLIC"."TEST"("ID") DIRECT SORTED SELECT "X" FROM SYSTEM_RANGE(1, 100) /* PUBLIC.RANGE_INDEX */ +>> INSERT INTO "PUBLIC"."TEST"("ID") DIRECT SELECT "X" FROM SYSTEM_RANGE(1, 100) /* range index */ -explain select * from test limit 10 sample_size 10; -#+mvStore#>> SELECT "TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ FETCH FIRST 10 ROWS ONLY SAMPLE_SIZE 10 -#-mvStore#>> SELECT "TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2 */ FETCH FIRST 10 ROWS ONLY SAMPLE_SIZE 10 +explain select * from test limit 10; +>> SELECT "PUBLIC"."TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ FETCH FIRST 10 ROWS ONLY drop table test; > ok @@ -497,13 +369,13 @@ insert into test values(1), (2), (3), (4); > update count: 4 explain analyze select * from test where id is null; ->> SELECT "TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID IS NULL */ /* scanCount: 1 */ WHERE "ID" IS NULL +>> SELECT "PUBLIC"."TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID IS NULL */ /* scanCount: 1 */ WHERE "ID" IS NULL drop table test; > ok explain analyze select 1; ->> SELECT 1 FROM SYSTEM_RANGE(1, 1) /* PUBLIC.RANGE_INDEX */ /* scanCount: 2 */ +>> SELECT 1 create table test(id int); > ok @@ -534,7 +406,7 @@ select 3 from (select * from dual) union all select 2 from dual; create table a(x int, y int); > ok -create unique index a_xy on a(x, y); +alter table a add constraint a_xy unique(x, y); > ok create table b(x int, y int, foreign key(x, y) references a(x, y)); @@ -569,22 +441,6 @@ select * from (select null as x) where x=1; > - > rows: 0 -create table test(id int primary key, d int); -> ok - -insert into test values(1,1), (2, 1); -> update count: 2 - -select id from test where id in (1, 2) and d = 1; -> ID -> -- -> 1 -> 2 -> rows: 2 - -drop table test; -> ok - create table test(id decimal(10, 2) primary key) as select 0; > ok @@ -612,7 +468,7 @@ select count(*) from (select 1 union (select 2 intersect select 2)) x; create table test(id varchar(1) primary key) as select 'X'; > ok -select count(*) from (select 1 from dual where x in ((select 1 union select 1))) a; +select count(*) from (select 1 from dual where 1 in ((select 1 union select 1))) a; > COUNT(*) > -------- > 1 @@ -633,11 +489,12 @@ drop table test; create table test(id int, constraint pk primary key(id), constraint x unique(id)); > ok -select constraint_name from information_schema.indexes where table_name = 'TEST'; +SELECT CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE TABLE_NAME = 'TEST'; > CONSTRAINT_NAME > --------------- > PK -> rows: 1 +> X +> rows: 2 drop table test; > ok @@ -648,7 +505,7 @@ create table parent(id int primary key); create table child(id int, parent_id int, constraint child_parent foreign key (parent_id) references parent(id)); > ok -select constraint_name from information_schema.indexes where table_name = 'CHILD'; +SELECT CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE TABLE_NAME = 'CHILD'; > CONSTRAINT_NAME > --------------- > CHILD_PARENT @@ -666,45 +523,6 @@ alter table test alter column id identity; drop table test; > ok -create table test(id int primary key, name varchar); -> ok - -alter table test alter column id int auto_increment; -> ok - -create table otherTest(id int primary key, name varchar); -> ok - -alter table otherTest add constraint fk foreign key(id) references test(id); -> ok - -alter table otherTest drop foreign key fk; -> ok - -create unique index idx on otherTest(name); -> ok - -alter table otherTest drop index idx; -> ok - -drop table otherTest; -> ok - -insert into test(id) values(1); -> update count: 1 - -alter table test change column id id2 int; -> ok - -select id2 from test; -> ID2 -> --- -> 1 -> rows: 1 - -drop table test; -> ok - create table test(id identity); > ok @@ -744,7 +562,7 @@ select * from(select 1 from system_range(1, 2) group by sin(x) order by sin(x)); > 1 > rows: 2 -create table parent as select 1 id, 2 x; +create table parent(id int primary key, x int) as select 1 id, 2 x; > ok create table child(id int references parent(id)) as select 1; @@ -765,17 +583,17 @@ create domain int as varchar; create memory table test(id int); > ok -script nodata nopasswords nosettings; +script nodata nopasswords nosettings noversion; > SCRIPT -> -------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> CREATE DOMAIN "INT" AS VARCHAR; -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" "INT" ); +> ----------------------------------------------------------- > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> rows: 4 +> CREATE DOMAIN "PUBLIC"."INT" AS CHARACTER VARYING; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" "PUBLIC"."INT" ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 4 SELECT DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; ->> 12 +>> CHARACTER VARYING drop table test; > ok @@ -881,9 +699,9 @@ drop table test; create table test(t0 timestamp(0), t1 timestamp(1), t4 timestamp(4)); > ok -select column_name, numeric_scale from information_schema.columns c where c.table_name = 'TEST' order by column_name; -> COLUMN_NAME NUMERIC_SCALE -> ----------- ------------- +select column_name, datetime_precision from information_schema.columns c where c.table_name = 'TEST' order by column_name; +> COLUMN_NAME DATETIME_PRECISION +> ----------- ------------------ > T0 0 > T1 1 > T4 4 @@ -892,43 +710,6 @@ select column_name, numeric_scale from information_schema.columns c where c.tabl drop table test; > ok -create table test(id int); -> ok - -insert into test values(null), (1); -> update count: 2 - -select * from test where id not in (select id from test where 1=0); -> ID -> ---- -> 1 -> null -> rows: 2 - -select * from test where null not in (select id from test where 1=0); -> ID -> ---- -> 1 -> null -> rows: 2 - -select * from test where not (id in (select id from test where 1=0)); -> ID -> ---- -> 1 -> null -> rows: 2 - -select * from test where not (null in (select id from test where 1=0)); -> ID -> ---- -> 1 -> null -> rows: 2 - -drop table test; -> ok - create table test(a int); > ok @@ -1063,26 +844,29 @@ create table test(id int primary key, lastname varchar, firstname varchar, paren alter table test add constraint name unique (lastname, firstname); > ok -SELECT CONSTRAINT_NAME, UNIQUE_INDEX_NAME, COLUMN_LIST FROM INFORMATION_SCHEMA.CONSTRAINTS ; -> CONSTRAINT_NAME UNIQUE_INDEX_NAME COLUMN_LIST -> --------------- ----------------- ------------------ -> CONSTRAINT_2 PRIMARY_KEY_2 ID -> CONSTRAINT_27 PRIMARY_KEY_2 PARENT -> NAME NAME_INDEX_2 LASTNAME,FIRSTNAME +SELECT CONSTRAINT_NAME, INDEX_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS; +> CONSTRAINT_NAME INDEX_NAME +> --------------- ------------------ +> CONSTRAINT_2 PRIMARY_KEY_2 +> CONSTRAINT_27 CONSTRAINT_INDEX_2 +> NAME NAME_INDEX_2 > rows: 3 +SELECT CONSTRAINT_NAME, COLUMN_NAME FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE; +> CONSTRAINT_NAME COLUMN_NAME +> --------------- ----------- +> CONSTRAINT_2 ID +> CONSTRAINT_27 PARENT +> NAME FIRSTNAME +> NAME LASTNAME +> rows: 4 + drop table test; > ok -alter table information_schema.help rename to information_schema.help2; +ALTER TABLE INFORMATION_SCHEMA.INFORMATION_SCHEMA_CATALOG_NAME RENAME TO INFORMATION_SCHEMA.CAT; > exception FEATURE_NOT_SUPPORTED_1 -CREATE TABLE test (id int(25) NOT NULL auto_increment, name varchar NOT NULL, PRIMARY KEY (id,name)); -> ok - -drop table test; -> ok - CREATE TABLE test (id bigserial NOT NULL primary key); > ok @@ -1109,14 +893,14 @@ select * from test order by id; > 2 NaN NaN > rows (ordered): 3 -script nopasswords nosettings; +script nopasswords nosettings noversion; > SCRIPT -> ----------------------------------------------------------------------------------------------------------------------------------- -> -- 3 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INT, "D" DOUBLE, "F" FLOAT ); +> ----------------------------------------------------------------------------------------------------------------- > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> INSERT INTO "PUBLIC"."TEST" VALUES (0, POWER(0, -1), POWER(0, -1)), (1, (-POWER(0, -1)), (-POWER(0, -1))), (2, SQRT(-1), SQRT(-1)); -> rows: 4 +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER, "D" DOUBLE PRECISION, "F" FLOAT ); +> -- 3 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES (0, 'Infinity', 'Infinity'), (1, '-Infinity', '-Infinity'), (2, 'NaN', 'NaN'); +> rows (ordered): 4 DROP TABLE TEST; > ok @@ -1144,31 +928,6 @@ drop schema a cascade; drop schema b cascade; > ok -create table t1 (id int primary key); -> ok - -create table t2 (id int primary key); -> ok - -insert into t1 select x from system_range(1, 1000); -> update count: 1000 - -insert into t2 select x from system_range(1, 1000); -> update count: 1000 - -explain select count(*) from t1 where t1.id in ( select t2.id from t2 ); -#+mvStore#>> SELECT COUNT(*) FROM "PUBLIC"."T1" /* PUBLIC.PRIMARY_KEY_A: ID IN(SELECT T2.ID FROM PUBLIC.T2 /++ PUBLIC.T2.tableScan ++/) */ WHERE "T1"."ID" IN( SELECT "T2"."ID" FROM "PUBLIC"."T2" /* PUBLIC.T2.tableScan */) -#-mvStore#>> SELECT COUNT(*) FROM "PUBLIC"."T1" /* PUBLIC.PRIMARY_KEY_A: ID IN(SELECT T2.ID FROM PUBLIC.T2 /++ PUBLIC.PRIMARY_KEY_A5 ++/) */ WHERE "T1"."ID" IN( SELECT "T2"."ID" FROM "PUBLIC"."T2" /* PUBLIC.PRIMARY_KEY_A5 */) - -select count(*) from t1 where t1.id in ( select t2.id from t2 ); -> COUNT(*) -> -------- -> 1000 -> rows: 1 - -drop table t1, t2; -> ok - CREATE TABLE p(d date); > ok @@ -1176,22 +935,16 @@ INSERT INTO p VALUES('-1-01-01'), ('0-01-01'), ('0001-01-01'); > update count: 3 select d, year(d), extract(year from d), cast(d as timestamp) from p; -> D YEAR(D) EXTRACT(YEAR FROM D) CAST(D AS TIMESTAMP) -> ---------- ------- -------------------- -------------------- -> -1-01-01 -1 -1 -1-01-01 00:00:00 -> 0-01-01 0 0 0-01-01 00:00:00 -> 0001-01-01 1 1 0001-01-01 00:00:00 +> D EXTRACT(YEAR FROM D) EXTRACT(YEAR FROM D) CAST(D AS TIMESTAMP) +> ----------- -------------------- -------------------- -------------------- +> -0001-01-01 -1 -1 -0001-01-01 00:00:00 +> 0000-01-01 0 0 0000-01-01 00:00:00 +> 0001-01-01 1 1 0001-01-01 00:00:00 > rows: 3 drop table p; > ok -(SELECT X FROM DUAL ORDER BY X+2) UNION SELECT X FROM DUAL; -> X -> - -> 1 -> rows: 1 - create table test(a int, b int default 1); > ok @@ -1266,10 +1019,10 @@ INSERT INTO TEST VALUES (1, 'Mouse', 'MOUSE'), (2, 'MOUSE', 'Mouse'); > update count: 2 SELECT * FROM TEST; -> ID LABEL LOOKUP -> -- ----- ------ -> 1 Mouse MOUSE -> 2 MOUSE Mouse +> ID LABEL LOOKUP +> -- ------ ------ +> 1 Mouse MOUSE +> 2 MOUSE Mouse > rows: 2 DROP TABLE TEST; @@ -1285,7 +1038,7 @@ call set(1, 2); > exception CAN_ONLY_ASSIGN_TO_VARIABLE_1 select x, set(@t, ifnull(@t, 0) + x) from system_range(1, 3); -> X SET(@T, (IFNULL(@T, 0) + X)) +> X SET(@T, COALESCE(@T, 0) + X) > - ---------------------------- > 1 1 > 2 3 @@ -1317,12 +1070,6 @@ select * from ((test d1 inner join test d2 on d1.id = d2.id) inner join test d3 drop table test; > ok -select count(*) from system_range(1, 2) where x in(1, 1, 1); -> COUNT(*) -> -------- -> 1 -> rows: 1 - create table person(id bigint auto_increment, name varchar(100)); > ok @@ -1389,32 +1136,15 @@ ALTER TABLE test ALTER COLUMN ID2 RENAME TO ID; drop table test; > ok -create table test(id int primary key, data array); -> ok - -insert into test values(1, ARRAY[1, 1]), (2, ARRAY[1, 2]), (3, ARRAY[1, 1, 1]); -> update count: 3 - -select * from test order by data; -> ID DATA -> -- --------- -> 1 [1, 1] -> 3 [1, 1, 1] -> 2 [1, 2] -> rows (ordered): 3 - -drop table test; -> ok - CREATE TABLE FOO (A CHAR(10)); > ok CREATE TABLE BAR AS SELECT * FROM FOO; > ok -select table_name, numeric_precision from information_schema.columns where column_name = 'A'; -> TABLE_NAME NUMERIC_PRECISION -> ---------- ----------------- +select table_name, character_maximum_length from information_schema.columns where column_name = 'A'; +> TABLE_NAME CHARACTER_MAXIMUM_LENGTH +> ---------- ------------------------ > BAR 10 > FOO 10 > rows: 2 @@ -1447,7 +1177,7 @@ where cnt < 1000 order by dir_num asc; explain select * from (select dir_num, count(*) as cnt from multi_pages t, b_holding bh where t.bh_id=bh.id and bh.site='Hello' group by dir_num) as x where cnt < 1000 order by dir_num asc; ->> SELECT "X"."DIR_NUM", "X"."CNT" FROM ( SELECT "DIR_NUM", COUNT(*) AS "CNT" FROM "PUBLIC"."MULTI_PAGES" "T" INNER JOIN "PUBLIC"."B_HOLDING" "BH" ON 1=1 WHERE ("BH"."SITE" = 'Hello') AND ("T"."BH_ID" = "BH"."ID") GROUP BY "DIR_NUM" ) "X" /* SELECT DIR_NUM, COUNT(*) AS CNT FROM PUBLIC.MULTI_PAGES T /++ PUBLIC.MULTI_PAGES.tableScan ++/ INNER JOIN PUBLIC.B_HOLDING BH /++ PUBLIC.PRIMARY_KEY_3: ID = T.BH_ID ++/ ON 1=1 WHERE (BH.SITE = 'Hello') AND (T.BH_ID = BH.ID) GROUP BY DIR_NUM HAVING COUNT(*) <= ?1: CNT < 1000 */ WHERE "CNT" < 1000 ORDER BY 1 +>> SELECT "X"."DIR_NUM", "X"."CNT" FROM ( SELECT "DIR_NUM", COUNT(*) AS "CNT" FROM "PUBLIC"."MULTI_PAGES" "T" INNER JOIN "PUBLIC"."B_HOLDING" "BH" ON 1=1 WHERE ("BH"."SITE" = 'Hello') AND ("T"."BH_ID" = "BH"."ID") GROUP BY "DIR_NUM" ) "X" /* SELECT DIR_NUM, COUNT(*) AS CNT FROM PUBLIC.MULTI_PAGES T /* PUBLIC.MULTI_PAGES.tableScan */ INNER JOIN PUBLIC.B_HOLDING BH /* PUBLIC.PRIMARY_KEY_3: ID = T.BH_ID */ ON 1=1 WHERE (BH.SITE = 'Hello') AND (T.BH_ID = BH.ID) GROUP BY DIR_NUM HAVING COUNT(*) <= ?1: CNT < CAST(1000 AS BIGINT) */ WHERE "CNT" < CAST(1000 AS BIGINT) ORDER BY 1 select dir_num, count(*) as cnt from multi_pages t, b_holding bh where t.bh_id=bh.id and bh.site='Hello' group by dir_num @@ -1462,14 +1192,6 @@ having count(*) < 1000 order by dir_num asc; drop table multi_pages, b_holding; > ok -select * from dual where x = 1000000000000000000000; -> X -> - -> rows: 0 - -select * from dual where x = 'Hello'; -> exception DATA_CONVERSION_ERROR_1 - create table test(id smallint primary key); > ok @@ -1477,11 +1199,10 @@ insert into test values(1), (2), (3); > update count: 3 explain select * from test where id = 1; ->> SELECT "TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ WHERE "ID" = 1 +>> SELECT "PUBLIC"."TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ WHERE "ID" = 1 EXPLAIN SELECT * FROM TEST WHERE ID = (SELECT MAX(ID) FROM TEST); -#+mvStore#>> SELECT "TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = (SELECT MAX(ID) FROM PUBLIC.TEST /++ PUBLIC.TEST.tableScan ++/ /++ direct lookup ++/) */ WHERE "ID" = (SELECT MAX("ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */) -#-mvStore#>> SELECT "TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = (SELECT MAX(ID) FROM PUBLIC.TEST /++ PUBLIC.PRIMARY_KEY_2 ++/ /++ direct lookup ++/) */ WHERE "ID" = (SELECT MAX("ID") FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2 */ /* direct lookup */) +>> SELECT "PUBLIC"."TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = (SELECT MAX(ID) FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ /* direct lookup */) */ WHERE "ID" = (SELECT MAX("ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */) drop table test; > ok @@ -1493,22 +1214,10 @@ insert into test values(1), (2), (3); > update count: 3 explain select * from test where id = 3; ->> SELECT "TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 3 */ WHERE "ID" = 3 +>> SELECT "PUBLIC"."TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 3 */ WHERE "ID" = 3 explain select * from test where id = 255; ->> SELECT "TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 255 */ WHERE "ID" = 255 - -drop table test; -> ok - -create table test(id int primary key); -> ok - -insert into test values(1), (2), (3); -> update count: 3 - -explain select * from test where id in(1, 2, null); ->> SELECT "TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID IN(1, 2, NULL) */ WHERE "ID" IN(1, 2, NULL) +>> SELECT "PUBLIC"."TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 255 */ WHERE "ID" = 255 drop table test; > ok @@ -1624,7 +1333,7 @@ DROP TABLE A; set autocommit true; > ok -CREATE TABLE PARENT(ID INT); +CREATE TABLE PARENT(ID INT PRIMARY KEY); > ok CREATE TABLE CHILD(PID INT); @@ -1778,7 +1487,7 @@ select * from test where name = -1 and name = id; > rows: 1 explain select * from test where name = -1 and name = id; ->> SELECT "TEST"."ID", "TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = -1 */ WHERE (("NAME" = -1) AND ("NAME" = "ID")) AND ("ID" = -1) +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = -1 */ WHERE ("NAME" = -1) AND ("NAME" = "ID") DROP TABLE TEST; > ok @@ -1819,10 +1528,10 @@ INSERT INTO TEST VALUES(1, TRUE, 'Hello'), (2, FALSE, 'World'); > update count: 2 EXPLAIN SELECT * FROM TEST WHERE FLAG; ->> SELECT "TEST"."ID", "TEST"."FLAG", "TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.IDX_FLAG: FLAG = TRUE */ WHERE "FLAG" +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."FLAG", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.IDX_FLAG: FLAG = TRUE */ WHERE "FLAG" EXPLAIN SELECT * FROM TEST WHERE FLAG AND NAME>'I'; ->> SELECT "TEST"."ID", "TEST"."FLAG", "TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.IDX_FLAG: FLAG = TRUE AND NAME > 'I' */ WHERE "FLAG" AND ("NAME" > 'I') +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."FLAG", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.IDX_FLAG: FLAG = TRUE AND NAME > 'I' */ WHERE "FLAG" AND ("NAME" > 'I') DROP TABLE TEST; > ok @@ -1852,7 +1561,7 @@ create table test(id int); > ok explain select id+1 a from test group by id+1; ->> SELECT ("ID" + 1) AS "A" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "ID" + 1 +>> SELECT "ID" + 1 AS "A" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "ID" + 1 drop table test; > ok @@ -1860,7 +1569,7 @@ drop table test; set autocommit off; > ok -set search_path = public, information_schema; +set schema_search_path = public, information_schema; > ok select table_name from tables where 1=0; @@ -1868,7 +1577,7 @@ select table_name from tables where 1=0; > ---------- > rows: 0 -set search_path = public; +set schema_search_path = public; > ok set autocommit on; @@ -1919,18 +1628,17 @@ insert into test set id = 3, c = 'abcde ', v = 'abcde'; > update count: 1 select distinct length(c) from test order by length(c); -> LENGTH(C) -> --------- -> 1 +> CHAR_LENGTH(C) +> -------------- > 5 -> rows (ordered): 2 +> rows (ordered): 1 select id, c, v, length(c), length(v) from test order by id; -> ID C V LENGTH(C) LENGTH(V) -> -- ----- ----- --------- --------- -> 1 a a 1 1 -> 2 a a 1 2 -> 3 abcde abcde 5 5 +> ID C V CHAR_LENGTH(C) CHAR_LENGTH(V) +> -- ----- ----- -------------- -------------- +> 1 a a 5 1 +> 2 a a 5 2 +> 3 abcde abcde 5 5 > rows (ordered): 3 select id from test where c='a' order by id; @@ -1965,13 +1673,13 @@ INSERT INTO TEST VALUES(1, '10', NULL), (2, '0', NULL); > update count: 2 SELECT LEAST(ID, C, NAME), GREATEST(ID, C, NAME), LEAST(NULL, C), GREATEST(NULL, NULL), ID FROM TEST ORDER BY ID; -> LEAST(ID, C, NAME) GREATEST(ID, C, NAME) LEAST(NULL, C) NULL ID -> ------------------ --------------------- -------------- ---- -- -> 1 10 null null 1 -> 0 2 null null 2 +> LEAST(ID, C, NAME) GREATEST(ID, C, NAME) LEAST(NULL, C) CAST(NULL AS CHARACTER VARYING) ID +> ------------------ --------------------- -------------- ------------------------------- -- +> 1 10 null null 1 +> 0 2 null null 2 > rows (ordered): 2 -DROP TABLE IF EXISTS TEST; +DROP TABLE TEST; > ok create table people (family varchar(1) not null, person varchar(1) not null); @@ -2005,21 +1713,6 @@ select (1, 2); > ROW (1, 2) > rows: 1 -create table array_test(x array); -> ok - -insert into array_test values(ARRAY[1, 2, 3]), (ARRAY[2, 3, 4]); -> update count: 2 - -select * from array_test where x = ARRAY[1, 2, 3]; -> X -> --------- -> [1, 2, 3] -> rows: 1 - -drop table array_test; -> ok - select * from (select 1), (select 2); > 1 2 > - - @@ -2065,21 +1758,6 @@ drop table t1; drop table t2; > ok -create constant abc value 1; -> ok - -call abc; -> 1 -> - -> 1 -> rows: 1 - -drop all objects; -> ok - -call abc; -> exception COLUMN_NOT_FOUND_1 - CREATE TABLE test (family_name VARCHAR_IGNORECASE(63) NOT NULL); > ok @@ -2120,26 +1798,25 @@ create memory table test(id int primary key, data clob); insert into test values(1, 'abc' || space(20)); > update count: 1 -script nopasswords nosettings blocksize 10; +script nopasswords nosettings noversion blocksize 10; > SCRIPT -> -------------------------------------------------------------------------------------------------------------- -> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); -> CALL SYSTEM_COMBINE_BLOB(-1); -> CREATE ALIAS IF NOT EXISTS SYSTEM_COMBINE_BLOB FOR "org.h2.command.dml.ScriptCommand.combineBlob"; -> CREATE ALIAS IF NOT EXISTS SYSTEM_COMBINE_CLOB FOR "org.h2.command.dml.ScriptCommand.combineClob"; -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INT NOT NULL, "DATA" CLOB ); -> CREATE PRIMARY KEY SYSTEM_LOB_STREAM_PRIMARY_KEY ON SYSTEM_LOB_STREAM(ID, PART); -> CREATE TABLE IF NOT EXISTS SYSTEM_LOB_STREAM(ID INT NOT NULL, PART INT NOT NULL, CDATA VARCHAR, BDATA BINARY); +> ---------------------------------------------------------------------------------------------------------------------------------------- > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> DROP ALIAS IF EXISTS SYSTEM_COMBINE_BLOB; -> DROP ALIAS IF EXISTS SYSTEM_COMBINE_CLOB; -> DROP TABLE IF EXISTS SYSTEM_LOB_STREAM; -> INSERT INTO "PUBLIC"."TEST" VALUES (1, SYSTEM_COMBINE_CLOB(0)); +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "DATA" CHARACTER LARGE OBJECT ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> CREATE CACHED LOCAL TEMPORARY TABLE IF NOT EXISTS SYSTEM_LOB_STREAM(ID INT NOT NULL, PART INT NOT NULL, CDATA VARCHAR, BDATA VARBINARY); +> ALTER TABLE SYSTEM_LOB_STREAM ADD CONSTRAINT SYSTEM_LOB_STREAM_PRIMARY_KEY PRIMARY KEY(ID, PART); +> CREATE ALIAS IF NOT EXISTS SYSTEM_COMBINE_CLOB FOR 'org.h2.command.dml.ScriptCommand.combineClob'; +> CREATE ALIAS IF NOT EXISTS SYSTEM_COMBINE_BLOB FOR 'org.h2.command.dml.ScriptCommand.combineBlob'; > INSERT INTO SYSTEM_LOB_STREAM VALUES(0, 0, 'abc ', NULL); > INSERT INTO SYSTEM_LOB_STREAM VALUES(0, 1, ' ', NULL); > INSERT INTO SYSTEM_LOB_STREAM VALUES(0, 2, ' ', NULL); -> rows: 16 +> INSERT INTO "PUBLIC"."TEST" VALUES (1, SYSTEM_COMBINE_CLOB(0)); +> DROP TABLE IF EXISTS SYSTEM_LOB_STREAM; +> DROP ALIAS IF EXISTS SYSTEM_COMBINE_CLOB; +> DROP ALIAS IF EXISTS SYSTEM_COMBINE_BLOB; +> rows (ordered): 15 drop table test; > ok @@ -2160,36 +1837,6 @@ SELECT DISTINCT * FROM TEST ORDER BY ID; DROP TABLE TEST; > ok -create table Foo (A varchar(20), B integer); -> ok - -insert into Foo (A, B) values ('abcd', 1), ('abcd', 2); -> update count: 2 - -select * from Foo where A like 'abc%' escape '\' AND B=1; -> A B -> ---- - -> abcd 1 -> rows: 1 - -drop table Foo; -> ok - -create table test(id int, b binary); -> ok - -insert into test values(1, 'face'); -> update count: 1 - -select * from test where b = 'FaCe'; -> ID B -> -- ---- -> 1 face -> rows: 1 - -drop table test; -> ok - create sequence main_seq; > ok @@ -2202,11 +1849,11 @@ create sequence "TestSchema"."TestSeq"; create sequence "TestSchema"."ABC"; > ok -select currval('main_seq'), currval('TestSchema', 'TestSeq'), nextval('TestSchema', 'ABC'); -> CURRVAL('main_seq') CURRVAL('TestSchema', 'TestSeq') NEXTVAL('TestSchema', 'ABC') -> ------------------- -------------------------------- ---------------------------- -> 0 0 1 -> rows: 1 +select currval('main_seq'), currval('TestSchema', 'TestSeq'); +> exception CURRENT_SEQUENCE_VALUE_IS_NOT_DEFINED_IN_SESSION_1 + +select nextval('TestSchema', 'ABC'); +>> 1 set autocommit off; > ok @@ -2250,19 +1897,19 @@ CREATE TABLE parent(id int PRIMARY KEY); CREATE TABLE child(parentid int REFERENCES parent); > ok -select * from INFORMATION_SCHEMA.CROSS_REFERENCES; -> PKTABLE_CATALOG PKTABLE_SCHEMA PKTABLE_NAME PKCOLUMN_NAME FKTABLE_CATALOG FKTABLE_SCHEMA FKTABLE_NAME FKCOLUMN_NAME ORDINAL_POSITION UPDATE_RULE DELETE_RULE FK_NAME PK_NAME DEFERRABILITY -> --------------- -------------- ------------ ------------- --------------- -------------- ------------ ------------- ---------------- ----------- ----------- ------------ ------------- ------------- -> SCRIPT PUBLIC PARENT ID SCRIPT PUBLIC CHILD PARENTID 1 1 1 CONSTRAINT_3 PRIMARY_KEY_8 7 +TABLE INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS; +> CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME UNIQUE_CONSTRAINT_CATALOG UNIQUE_CONSTRAINT_SCHEMA UNIQUE_CONSTRAINT_NAME MATCH_OPTION UPDATE_RULE DELETE_RULE +> ------------------ ----------------- --------------- ------------------------- ------------------------ ---------------------- ------------ ----------- ----------- +> SCRIPT PUBLIC CONSTRAINT_3 SCRIPT PUBLIC CONSTRAINT_8 NONE RESTRICT RESTRICT > rows: 1 ALTER TABLE parent ADD COLUMN name varchar; > ok -select * from INFORMATION_SCHEMA.CROSS_REFERENCES; -> PKTABLE_CATALOG PKTABLE_SCHEMA PKTABLE_NAME PKCOLUMN_NAME FKTABLE_CATALOG FKTABLE_SCHEMA FKTABLE_NAME FKCOLUMN_NAME ORDINAL_POSITION UPDATE_RULE DELETE_RULE FK_NAME PK_NAME DEFERRABILITY -> --------------- -------------- ------------ ------------- --------------- -------------- ------------ ------------- ---------------- ----------- ----------- ------------ -------------- ------------- -> SCRIPT PUBLIC PARENT ID SCRIPT PUBLIC CHILD PARENTID 1 1 1 CONSTRAINT_3 PRIMARY_KEY_82 7 +TABLE INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS; +> CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME UNIQUE_CONSTRAINT_CATALOG UNIQUE_CONSTRAINT_SCHEMA UNIQUE_CONSTRAINT_NAME MATCH_OPTION UPDATE_RULE DELETE_RULE +> ------------------ ----------------- --------------- ------------------------- ------------------------ ---------------------- ------------ ----------- ----------- +> SCRIPT PUBLIC CONSTRAINT_3 SCRIPT PUBLIC CONSTRAINT_8 NONE RESTRICT RESTRICT > rows: 1 drop table parent, child; @@ -2284,10 +1931,10 @@ create table test(id int, name varchar); > ok explain select * from test; ->> SELECT "TEST"."ID", "TEST"."NAME" FROM "TEST_SCHEMA"."TEST" /* TEST_SCHEMA.TEST.tableScan */ +>> SELECT "TEST_SCHEMA"."TEST"."ID", "TEST_SCHEMA"."TEST"."NAME" FROM "TEST_SCHEMA"."TEST" /* TEST_SCHEMA.TEST.tableScan */ explain select * from public.test; ->> SELECT "TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ +>> SELECT "PUBLIC"."TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ drop schema TEST_SCHEMA cascade; > ok @@ -2355,36 +2002,6 @@ select timestamp '2001-02-03T10:30:33'; > 2001-02-03 10:30:33 > rows: 1 -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -INSERT INTO TEST VALUES(1, 'Hello'), (2, 'World'); -> update count: 2 - -select * from test where id in (select id from test); -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> rows: 2 - -select * from test where id in ((select id from test)); -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> rows: 2 - -select * from test where id in (((select id from test))); -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> rows: 2 - -DROP TABLE TEST; -> ok - create table test(id int); > ok @@ -2460,14 +2077,14 @@ CREATE ALIAS PARSE_INT2 FOR "java.lang.Integer.parseInt(java.lang.String, int)"; > ok select min(SUBSTRING(random_uuid(), 15,1)='4') from system_range(1, 10); -> MIN(SUBSTRING(RANDOM_UUID(), 15, 1) = '4') -> ------------------------------------------ +> MIN(SUBSTRING(RANDOM_UUID() FROM 15 FOR 1) = '4') +> ------------------------------------------------- > TRUE > rows: 1 select min(8=bitand(12, PARSE_INT2(SUBSTRING(random_uuid(), 20,1), 16))) from system_range(1, 10); -> MIN(8 = BITAND(12, PUBLIC.PARSE_INT2(SUBSTRING(RANDOM_UUID(), 20, 1), 16))) -> --------------------------------------------------------------------------- +> MIN(8 = BITAND(12, PUBLIC.PARSE_INT2(SUBSTRING(RANDOM_UUID() FROM 20 FOR 1), 16))) +> ---------------------------------------------------------------------------------- > TRUE > rows: 1 @@ -2493,13 +2110,14 @@ insert into test values('aa'); insert into test values('AA'); > update count: 1 -script nodata nopasswords nosettings; +script nodata nopasswords nosettings noversion; > SCRIPT -> ------------------------------------------------------------------------------------- -> -- 2 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "NAME" VARCHAR CHECK ("NAME" = UPPER("NAME")) ); +> --------------------------------------------------------------------------------------------------------- > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> rows: 3 +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "NAME" CHARACTER VARYING ); +> -- 2 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" CHECK("NAME" = UPPER("NAME")) NOCHECK; +> rows (ordered): 4 drop table test; > ok @@ -2527,65 +2145,62 @@ insert into address(id, name, name2) values(3, 'test_abc', 'test@gmail'); insert into address2(name) values('test@abc'); > exception TABLE_OR_VIEW_NOT_FOUND_1 -CREATE DOMAIN STRING AS VARCHAR(255) DEFAULT '' NOT NULL; -> ok - -CREATE DOMAIN IF NOT EXISTS STRING AS VARCHAR(255) DEFAULT '' NOT NULL; +CREATE DOMAIN STRING AS VARCHAR(255) DEFAULT ''; > ok -CREATE DOMAIN STRING1 AS VARCHAR NULL; +CREATE DOMAIN IF NOT EXISTS STRING AS VARCHAR(255) DEFAULT ''; > ok -CREATE DOMAIN STRING2 AS VARCHAR NOT NULL; +CREATE DOMAIN STRING1 AS VARCHAR; > ok -CREATE DOMAIN STRING3 AS VARCHAR DEFAULT ''; +CREATE DOMAIN STRING2 AS VARCHAR DEFAULT ''; > ok -create domain string_x as string3; +create domain string_x as string2; > ok -create memory table test(a string, b string1, c string2, d string3); +create memory table test(a string, b string1, c string2); > ok -insert into test(c) values('x'); +insert into test(b) values('x'); > update count: 1 select * from test; -> A B C D -> - ---- - ------- -> null x -> rows: 1 - -select DOMAIN_NAME, COLUMN_DEFAULT, IS_NULLABLE, DATA_TYPE, PRECISION, SCALE, TYPE_NAME, SELECTIVITY, CHECK_CONSTRAINT, REMARKS, SQL from information_schema.domains; -> DOMAIN_NAME COLUMN_DEFAULT IS_NULLABLE DATA_TYPE PRECISION SCALE TYPE_NAME SELECTIVITY CHECK_CONSTRAINT REMARKS SQL -> ----------- -------------- ----------- --------- ---------- ----- --------- ----------- ------------------------------------------------------------------- ------- ------------------------------------------------------------------------------------------------------------------------------------ -> EMAIL null YES 12 200 0 VARCHAR 50 (POSITION('@', "VALUE") > 1) CREATE DOMAIN "EMAIL" AS VARCHAR(200) CHECK (POSITION('@', "VALUE") > 1) -> GMAIL '@gmail.com' YES 12 200 0 VARCHAR 50 ((POSITION('@', "VALUE") > 1) AND (POSITION('gmail', "VALUE") > 1)) CREATE DOMAIN "GMAIL" AS VARCHAR(200) DEFAULT '@gmail.com' CHECK ((POSITION('@', "VALUE") > 1) AND (POSITION('gmail', "VALUE") > 1)) -> STRING '' NO 12 255 0 VARCHAR 50 CREATE DOMAIN "STRING" AS VARCHAR(255) DEFAULT '' NOT NULL -> STRING1 null YES 12 2147483647 0 VARCHAR 50 CREATE DOMAIN "STRING1" AS VARCHAR -> STRING2 null NO 12 2147483647 0 VARCHAR 50 CREATE DOMAIN "STRING2" AS VARCHAR NOT NULL -> STRING3 '' YES 12 2147483647 0 VARCHAR 50 CREATE DOMAIN "STRING3" AS VARCHAR DEFAULT '' -> STRING_X '' YES 12 2147483647 0 VARCHAR 50 CREATE DOMAIN "STRING_X" AS VARCHAR DEFAULT '' -> rows: 7 +> A B C +> - - ------- +> x +> rows: 1 + +select DOMAIN_NAME, DOMAIN_DEFAULT, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH, PARENT_DOMAIN_NAME, REMARKS from information_schema.domains; +> DOMAIN_NAME DOMAIN_DEFAULT DATA_TYPE CHARACTER_MAXIMUM_LENGTH PARENT_DOMAIN_NAME REMARKS +> ----------- -------------- ----------------- ------------------------ ------------------ ------- +> EMAIL null CHARACTER VARYING 200 null null +> GMAIL '@gmail.com' CHARACTER VARYING 200 EMAIL null +> STRING '' CHARACTER VARYING 255 null null +> STRING1 null CHARACTER VARYING 1048576 null null +> STRING2 '' CHARACTER VARYING 1048576 null null +> STRING_X null CHARACTER VARYING 1048576 STRING2 null +> rows: 6 -script nodata nopasswords nosettings; +script nodata nopasswords nosettings noversion; > SCRIPT -> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +> ------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE DOMAIN "PUBLIC"."EMAIL" AS CHARACTER VARYING(200); +> CREATE DOMAIN "PUBLIC"."STRING" AS CHARACTER VARYING(255) DEFAULT ''; +> CREATE DOMAIN "PUBLIC"."STRING1" AS CHARACTER VARYING; +> CREATE DOMAIN "PUBLIC"."STRING2" AS CHARACTER VARYING DEFAULT ''; +> CREATE DOMAIN "PUBLIC"."GMAIL" AS "PUBLIC"."EMAIL" DEFAULT '@gmail.com'; +> CREATE DOMAIN "PUBLIC"."STRING_X" AS "PUBLIC"."STRING2"; +> CREATE MEMORY TABLE "PUBLIC"."ADDRESS"( "ID" INTEGER NOT NULL, "NAME" "PUBLIC"."EMAIL", "NAME2" "PUBLIC"."GMAIL" ); +> ALTER TABLE "PUBLIC"."ADDRESS" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_E" PRIMARY KEY("ID"); > -- 1 +/- SELECT COUNT(*) FROM PUBLIC.ADDRESS; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "A" "PUBLIC"."STRING", "B" "PUBLIC"."STRING1", "C" "PUBLIC"."STRING2" ); > -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE "PUBLIC"."ADDRESS" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_E" PRIMARY KEY("ID"); -> CREATE DOMAIN "EMAIL" AS VARCHAR(200) CHECK (POSITION('@', "VALUE") > 1); -> CREATE DOMAIN "GMAIL" AS VARCHAR(200) DEFAULT '@gmail.com' CHECK ((POSITION('@', "VALUE") > 1) AND (POSITION('gmail', "VALUE") > 1)); -> CREATE DOMAIN "STRING" AS VARCHAR(255) DEFAULT '' NOT NULL; -> CREATE DOMAIN "STRING1" AS VARCHAR; -> CREATE DOMAIN "STRING2" AS VARCHAR NOT NULL; -> CREATE DOMAIN "STRING3" AS VARCHAR DEFAULT ''; -> CREATE DOMAIN "STRING_X" AS VARCHAR DEFAULT ''; -> CREATE MEMORY TABLE "PUBLIC"."ADDRESS"( "ID" INT NOT NULL, "NAME" "EMAIL" CHECK (POSITION('@', "NAME") > 1), "NAME2" "GMAIL" DEFAULT '@gmail.com' CHECK ((POSITION('@', "NAME2") > 1) AND (POSITION('gmail', "NAME2") > 1)) ); -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "A" "STRING" DEFAULT '' NOT NULL, "B" "STRING1", "C" "STRING2" NOT NULL, "D" "STRING3" DEFAULT '' ); -> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> rows: 13 +> ALTER DOMAIN "PUBLIC"."EMAIL" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_3" CHECK(LOCATE('@', VALUE) > 1) NOCHECK; +> ALTER DOMAIN "PUBLIC"."GMAIL" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_4" CHECK(LOCATE('gmail', VALUE) > 1) NOCHECK; +> rows (ordered): 14 drop table test; > ok @@ -2596,10 +2211,7 @@ drop domain string; drop domain string1; > ok -drop domain string2; -> ok - -drop domain string3; +drop domain string2 cascade; > ok drop domain string_x; @@ -2608,7 +2220,7 @@ drop domain string_x; drop table address; > ok -drop domain email; +drop domain email cascade; > ok drop domain gmail; @@ -2618,7 +2230,7 @@ create force view address_view as select * from address; > ok create table address(id identity, name varchar check instr(value, '@') > 1); -> exception COLUMN_NOT_FOUND_1 +> exception SYNTAX_ERROR_2 create table address(id identity, name varchar check instr(name, '@') > 1); > ok @@ -2632,14 +2244,14 @@ drop table address; create memory table a(k10 blob(10k), m20 blob(20m), g30 clob(30g)); > ok -script NODATA NOPASSWORDS NOSETTINGS drop; +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION DROP; > SCRIPT -> ----------------------------------------------------------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.A; -> CREATE MEMORY TABLE "PUBLIC"."A"( "K10" BLOB(10240), "M20" BLOB(20971520), "G30" CLOB(32212254720) ); +> ----------------------------------------------------------------------------------------------------------------------------------------------------- > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; > DROP TABLE IF EXISTS "PUBLIC"."A" CASCADE; -> rows: 4 +> CREATE MEMORY TABLE "PUBLIC"."A"( "K10" BINARY LARGE OBJECT(10240), "M20" BINARY LARGE OBJECT(20971520), "G30" CHARACTER LARGE OBJECT(32212254720) ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.A; +> rows (ordered): 4 create table b(); > ok @@ -2674,55 +2286,6 @@ drop table a, a; drop table b, c; > ok -CREATE SCHEMA CONST; -> ok - -CREATE CONSTANT IF NOT EXISTS ONE VALUE 1; -> ok - -COMMENT ON CONSTANT ONE IS 'Eins'; -> ok - -CREATE CONSTANT IF NOT EXISTS ONE VALUE 1; -> ok - -CREATE CONSTANT CONST.ONE VALUE 1; -> ok - -SELECT CONSTANT_SCHEMA, CONSTANT_NAME, DATA_TYPE, REMARKS, SQL FROM INFORMATION_SCHEMA.CONSTANTS; -> CONSTANT_SCHEMA CONSTANT_NAME DATA_TYPE REMARKS SQL -> --------------- ------------- --------- ------- --- -> CONST ONE 4 1 -> PUBLIC ONE 4 Eins 1 -> rows: 2 - -SELECT ONE, CONST.ONE FROM DUAL; -> 1 1 -> - - -> 1 1 -> rows: 1 - -COMMENT ON CONSTANT ONE IS NULL; -> ok - -DROP SCHEMA CONST CASCADE; -> ok - -SELECT CONSTANT_SCHEMA, CONSTANT_NAME, DATA_TYPE, REMARKS, SQL FROM INFORMATION_SCHEMA.CONSTANTS; -> CONSTANT_SCHEMA CONSTANT_NAME DATA_TYPE REMARKS SQL -> --------------- ------------- --------- ------- --- -> PUBLIC ONE 4 1 -> rows: 1 - -DROP CONSTANT ONE; -> ok - -DROP CONSTANT IF EXISTS ONE; -> ok - -DROP CONSTANT IF EXISTS ONE; -> ok - CREATE TABLE A (ID_A int primary key); > ok @@ -2772,10 +2335,10 @@ insert into x values(0), (1), (10); SELECT t1.ID, (SELECT t1.id || ':' || AVG(t2.ID) FROM X t2) AS col2 FROM X t1; > ID COL2 -> -- ---- -> 0 0:3 -> 1 1:3 -> 10 10:3 +> -- --------------------- +> 0 0:3.6666666666666665 +> 1 1:3.6666666666666665 +> 10 10:3.6666666666666665 > rows: 3 drop table x; @@ -2900,7 +2463,7 @@ select * from test2 where name like 'HELLO'; > rows: 1 explain plan for select * from test2, test where test2.name = test.name; ->> SELECT "TEST2"."ID", "TEST2"."NAME", "TEST"."ID", "TEST"."NAME" FROM "PUBLIC"."TEST2" /* PUBLIC.TEST2.tableScan */ INNER JOIN "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ ON 1=1 WHERE "TEST2"."NAME" = "TEST"."NAME" +>> SELECT "PUBLIC"."TEST2"."ID", "PUBLIC"."TEST2"."NAME", "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST2" /* PUBLIC.TEST2.tableScan */ INNER JOIN "PUBLIC"."TEST" /* PUBLIC.IDX_TEST_NAME */ ON 1=1 WHERE "TEST2"."NAME" = "TEST"."NAME" select * from test2, test where test2.name = test.name; > ID NAME ID NAME @@ -2910,7 +2473,7 @@ select * from test2, test where test2.name = test.name; > rows: 2 explain plan for select * from test, test2 where test2.name = test.name; ->> SELECT "TEST"."ID", "TEST"."NAME", "TEST2"."ID", "TEST2"."NAME" FROM "PUBLIC"."TEST2" /* PUBLIC.TEST2.tableScan */ INNER JOIN "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ ON 1=1 WHERE "TEST2"."NAME" = "TEST"."NAME" +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME", "PUBLIC"."TEST2"."ID", "PUBLIC"."TEST2"."NAME" FROM "PUBLIC"."TEST2" /* PUBLIC.TEST2.tableScan */ INNER JOIN "PUBLIC"."TEST" /* PUBLIC.IDX_TEST_NAME */ ON 1=1 WHERE "TEST2"."NAME" = "TEST"."NAME" select * from test, test2 where test2.name = test.name; > ID NAME ID NAME @@ -2923,7 +2486,7 @@ create index idx_test2_name on test2(name); > ok explain plan for select * from test2, test where test2.name = test.name; ->> SELECT "TEST2"."ID", "TEST2"."NAME", "TEST"."ID", "TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ INNER JOIN "PUBLIC"."TEST2" /* PUBLIC.IDX_TEST2_NAME: NAME = TEST.NAME */ ON 1=1 WHERE "TEST2"."NAME" = "TEST"."NAME" +>> SELECT "PUBLIC"."TEST2"."ID", "PUBLIC"."TEST2"."NAME", "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.IDX_TEST_NAME */ INNER JOIN "PUBLIC"."TEST2" /* PUBLIC.IDX_TEST2_NAME: NAME = TEST.NAME */ ON 1=1 WHERE "TEST2"."NAME" = "TEST"."NAME" select * from test2, test where test2.name = test.name; > ID NAME ID NAME @@ -2933,7 +2496,7 @@ select * from test2, test where test2.name = test.name; > rows: 2 explain plan for select * from test, test2 where test2.name = test.name; ->> SELECT "TEST"."ID", "TEST"."NAME", "TEST2"."ID", "TEST2"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ INNER JOIN "PUBLIC"."TEST2" /* PUBLIC.IDX_TEST2_NAME: NAME = TEST.NAME */ ON 1=1 WHERE "TEST2"."NAME" = "TEST"."NAME" +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME", "PUBLIC"."TEST2"."ID", "PUBLIC"."TEST2"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.IDX_TEST_NAME */ INNER JOIN "PUBLIC"."TEST2" /* PUBLIC.IDX_TEST2_NAME: NAME = TEST.NAME */ ON 1=1 WHERE "TEST2"."NAME" = "TEST"."NAME" select * from test, test2 where test2.name = test.name; > ID NAME ID NAME @@ -3019,7 +2582,7 @@ where exists (select 1 from test t4 where t2.id=t4.id); > rows: 2 explain select * from test t1 where id in(select id from test t2 where t1.id=t2.id); ->> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ WHERE "ID" IN( SELECT "ID" FROM "PUBLIC"."TEST" "T2" /* PUBLIC.PRIMARY_KEY_2: ID = T1.ID */ WHERE "T1"."ID" = "T2"."ID") +>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ WHERE "ID" IN( SELECT DISTINCT "ID" FROM "PUBLIC"."TEST" "T2" /* PUBLIC.PRIMARY_KEY_2: ID = T1.ID */ WHERE "T1"."ID" = "T2"."ID") select * from test t1 where id in(select id from test t2 where t1.id=t2.id); > ID NAME @@ -3029,7 +2592,7 @@ select * from test t1 where id in(select id from test t2 where t1.id=t2.id); > rows: 2 explain select * from test t1 where id in(id, id+1); ->> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ WHERE "ID" IN("ID", ("ID" + 1)) +>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ WHERE "ID" IN("ID", "ID" + 1) select * from test t1 where id in(id, id+1); > ID NAME @@ -3049,8 +2612,7 @@ select * from test t1 where id in(id); > rows: 2 explain select * from test t1 where id in(select id from test); -#+mvStore#>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID IN(SELECT ID FROM PUBLIC.TEST /++ PUBLIC.TEST.tableScan ++/) */ WHERE "ID" IN( SELECT "ID" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) -#-mvStore#>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID IN(SELECT ID FROM PUBLIC.TEST /++ PUBLIC.PRIMARY_KEY_2 ++/) */ WHERE "ID" IN( SELECT "ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2 */) +>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID IN(SELECT DISTINCT ID FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */) */ WHERE "ID" IN( SELECT DISTINCT "ID" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) select * from test t1 where id in(select id from test); > ID NAME @@ -3060,8 +2622,7 @@ select * from test t1 where id in(select id from test); > rows: 2 explain select * from test t1 where id in(1, select max(id) from test); -#+mvStore#>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID IN(1, (SELECT MAX(ID) FROM PUBLIC.TEST /++ PUBLIC.TEST.tableScan ++/ /++ direct lookup ++/)) */ WHERE "ID" IN(1, (SELECT MAX("ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */)) -#-mvStore#>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID IN(1, (SELECT MAX(ID) FROM PUBLIC.TEST /++ PUBLIC.PRIMARY_KEY_2 ++/ /++ direct lookup ++/)) */ WHERE "ID" IN(1, (SELECT MAX("ID") FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2 */ /* direct lookup */)) +>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID IN(1, (SELECT MAX(ID) FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ /* direct lookup */)) */ WHERE "ID" IN(1, (SELECT MAX("ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */)) select * from test t1 where id in(1, select max(id) from test); > ID NAME @@ -3109,55 +2670,6 @@ SELECT * FROM TEST WHERE foo = 123456789014567; DROP TABLE IF EXISTS TEST; > ok -create table test(v boolean); -> ok - -insert into test values(null), (true), (false); -> update count: 3 - -SELECT CASE WHEN NOT (false IN (null)) THEN false END; -> NULL -> ---- -> null -> rows: 1 - -select a.v as av, b.v as bv, a.v IN (b.v), not a.v IN (b.v) from test a, test b; -> AV BV A.V = B.V NOT (A.V = B.V) -> ----- ----- --------- --------------- -> FALSE FALSE TRUE FALSE -> FALSE TRUE FALSE TRUE -> FALSE null null null -> TRUE FALSE FALSE TRUE -> TRUE TRUE TRUE FALSE -> TRUE null null null -> null FALSE null null -> null TRUE null null -> null null null null -> rows: 9 - -select a.v as av, b.v as bv, a.v IN (b.v, null), not a.v IN (b.v, null) from test a, test b; -> AV BV A.V IN(B.V, NULL) NOT (A.V IN(B.V, NULL)) -> ----- ----- ----------------- ----------------------- -> FALSE FALSE TRUE FALSE -> FALSE TRUE null null -> FALSE null null null -> TRUE FALSE null null -> TRUE TRUE TRUE FALSE -> TRUE null null null -> null FALSE null null -> null TRUE null null -> null null null null -> rows: 9 - -drop table test; -> ok - -SELECT CASE WHEN NOT (false IN (null)) THEN false END; -> NULL -> ---- -> null -> rows: 1 - create table test(id int); > ok @@ -3173,15 +2685,9 @@ drop table test; > ok call select 1.0/3.0*3.0, 100.0/2.0, -25.0/100.0, 0.0/3.0, 6.9/2.0, 0.72179425150347250912311550800000 / 5314251955.21; -> SELECT 0.999999999999999999999999990, 50, -0.25, 0, 3.45, 1.35822361752313607260107721120531135706133161972E-10 FROM SYSTEM_RANGE(1, 1) /* PUBLIC.RANGE_INDEX */ /* scanCount: 2 */ -> ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> ROW (0.999999999999999999999999990, 50, -0.25, 0, 3.45, 1.35822361752313607260107721120531135706133161972E-10) -> rows: 1 - -call (select x from dual where x is null); -> SELECT X FROM SYSTEM_RANGE(1, 1) /* PUBLIC.RANGE_INDEX: X IS NULL */ /* scanCount: 1 */ WHERE X IS NULL -> ------------------------------------------------------------------------------------------------------- -> null +> ROW (0.99990, 50.0000, -0.25000000, 0.0000, 3.4500, 0.000000000135822361752313607260107721120531135706133162) +> ------------------------------------------------------------------------------------------------------------- +> ROW (0.99990, 50.0000, -0.25000000, 0.0000, 3.4500, 0.000000000135822361752313607260107721120531135706133162) > rows: 1 create sequence test_seq; @@ -3199,18 +2705,26 @@ alter table test add constraint nu unique(parent); alter table test add constraint fk foreign key(parent) references(id); > ok -select TABLE_NAME, NON_UNIQUE, INDEX_NAME, ORDINAL_POSITION, COLUMN_NAME, CARDINALITY, PRIMARY_KEY from INFORMATION_SCHEMA.INDEXES; -> TABLE_NAME NON_UNIQUE INDEX_NAME ORDINAL_POSITION COLUMN_NAME CARDINALITY PRIMARY_KEY -> ---------- ---------- ------------- ---------------- ----------- ----------- ----------- -> TEST FALSE NU_INDEX_2 1 PARENT 0 FALSE -> TEST FALSE PRIMARY_KEY_2 1 ID 0 TRUE -> TEST TRUE NI 1 PARENT 0 FALSE +SELECT TABLE_NAME, INDEX_NAME, INDEX_TYPE_NAME FROM INFORMATION_SCHEMA.INDEXES; +> TABLE_NAME INDEX_NAME INDEX_TYPE_NAME +> ---------- ------------- --------------- +> TEST NI INDEX +> TEST NU_INDEX_2 UNIQUE INDEX +> TEST PRIMARY_KEY_2 PRIMARY KEY +> rows: 3 + +SELECT TABLE_NAME, INDEX_NAME, ORDINAL_POSITION, COLUMN_NAME FROM INFORMATION_SCHEMA.INDEX_COLUMNS; +> TABLE_NAME INDEX_NAME ORDINAL_POSITION COLUMN_NAME +> ---------- ------------- ---------------- ----------- +> TEST NI 1 PARENT +> TEST NU_INDEX_2 1 PARENT +> TEST PRIMARY_KEY_2 1 ID > rows: 3 -select SEQUENCE_NAME, CURRENT_VALUE, INCREMENT, IS_GENERATED, REMARKS from INFORMATION_SCHEMA.SEQUENCES; -> SEQUENCE_NAME CURRENT_VALUE INCREMENT IS_GENERATED REMARKS -> ------------- ------------- --------- ------------ ------- -> TEST_SEQ 0 1 FALSE +select SEQUENCE_NAME, BASE_VALUE, INCREMENT, REMARKS from INFORMATION_SCHEMA.SEQUENCES; +> SEQUENCE_NAME BASE_VALUE INCREMENT REMARKS +> ------------- ---------- --------- ------- +> TEST_SEQ 1 1 null > rows: 1 drop table test; @@ -3253,14 +2767,14 @@ select count(*) from test where id = ((select id from test)); > exception SCALAR_SUBQUERY_CONTAINS_MORE_THAN_ONE_ROW select count(*) from test where id = ARRAY [(select id from test), 1]; -> exception COMPARING_ARRAY_TO_SCALAR +> exception TYPES_ARE_NOT_COMPARABLE_2 select count(*) from test where id = ((select id from test fetch first row only), 1); -> exception COLUMN_COUNT_DOES_NOT_MATCH +> exception TYPES_ARE_NOT_COMPARABLE_2 select (select id from test where 1=0) from test; -> SELECT ID FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan: FALSE */ WHERE FALSE -> ------------------------------------------------------------------------- +> (SELECT ID FROM PUBLIC.TEST WHERE FALSE) +> ---------------------------------------- > null > null > rows: 2 @@ -3275,14 +2789,14 @@ insert into test values(1, 'Y'); > update count: 1 call select a from test order by id; -> SELECT A FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_2 */ /* scanCount: 2 */ ORDER BY =ID /* index sorted */ -> ------------------------------------------------------------------------------------------------------- +> (SELECT A FROM PUBLIC.TEST ORDER BY ID) +> --------------------------------------- > TRUE > rows (ordered): 1 select select a from test order by id; -> SELECT A FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_2 */ /* scanCount: 2 */ ORDER BY =ID /* index sorted */ -> ------------------------------------------------------------------------------------------------------- +> (SELECT A FROM PUBLIC.TEST ORDER BY ID) +> --------------------------------------- > TRUE > rows: 1 @@ -3333,30 +2847,30 @@ CREATE memory TABLE sp1(S_NO VARCHAR(5) REFERENCES s, p_no VARCHAR(5) REFERENCES CREATE memory TABLE sp2(S_NO VARCHAR(5), p_no VARCHAR(5), qty INT, constraint c1 FOREIGN KEY (S_NO) references s, PRIMARY KEY (S_NO, p_no)); > ok -script NOPASSWORDS NOSETTINGS; +script NOPASSWORDS NOSETTINGS noversion; > SCRIPT -> ----------------------------------------------------------------------------------------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.P; +> -------------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "PARENT" INTEGER ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> CREATE MEMORY TABLE "PUBLIC"."S"( "S_NO" CHARACTER VARYING(5) NOT NULL, "NAME" CHARACTER VARYING(16), "CITY" CHARACTER VARYING(16) ); +> ALTER TABLE "PUBLIC"."S" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_5" PRIMARY KEY("S_NO"); > -- 0 +/- SELECT COUNT(*) FROM PUBLIC.S; +> CREATE MEMORY TABLE "PUBLIC"."P"( "P_NO" CHARACTER VARYING(5) NOT NULL, "DESCR" CHARACTER VARYING(16), "COLOR" CHARACTER VARYING(8) ); +> ALTER TABLE "PUBLIC"."P" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_50" PRIMARY KEY("P_NO"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.P; +> CREATE MEMORY TABLE "PUBLIC"."SP1"( "S_NO" CHARACTER VARYING(5) NOT NULL, "P_NO" CHARACTER VARYING(5) NOT NULL, "QTY" INTEGER ); +> ALTER TABLE "PUBLIC"."SP1" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_141" PRIMARY KEY("S_NO", "P_NO"); > -- 0 +/- SELECT COUNT(*) FROM PUBLIC.SP1; +> CREATE MEMORY TABLE "PUBLIC"."SP2"( "S_NO" CHARACTER VARYING(5) NOT NULL, "P_NO" CHARACTER VARYING(5) NOT NULL, "QTY" INTEGER ); +> ALTER TABLE "PUBLIC"."SP2" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_1417" PRIMARY KEY("S_NO", "P_NO"); > -- 0 +/- SELECT COUNT(*) FROM PUBLIC.SP2; -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE "PUBLIC"."P" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_50_0" PRIMARY KEY("P_NO"); -> ALTER TABLE "PUBLIC"."S" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_5" PRIMARY KEY("S_NO"); > ALTER TABLE "PUBLIC"."SP1" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_1" FOREIGN KEY("S_NO") REFERENCES "PUBLIC"."S"("S_NO") NOCHECK; > ALTER TABLE "PUBLIC"."SP1" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_14" FOREIGN KEY("P_NO") REFERENCES "PUBLIC"."P"("P_NO") NOCHECK; -> ALTER TABLE "PUBLIC"."SP1" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_141" PRIMARY KEY("S_NO", "P_NO"); -> ALTER TABLE "PUBLIC"."SP2" ADD CONSTRAINT "PUBLIC"."C1" FOREIGN KEY("S_NO") REFERENCES "PUBLIC"."S"("S_NO") NOCHECK; -> ALTER TABLE "PUBLIC"."SP2" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_1417" PRIMARY KEY("S_NO", "P_NO"); -> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); > ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_27" FOREIGN KEY("PARENT") REFERENCES "PUBLIC"."TEST"("ID") NOCHECK; -> CREATE MEMORY TABLE "PUBLIC"."P"( "P_NO" VARCHAR(5) NOT NULL, "DESCR" VARCHAR(16), "COLOR" VARCHAR(8) ); -> CREATE MEMORY TABLE "PUBLIC"."S"( "S_NO" VARCHAR(5) NOT NULL, "NAME" VARCHAR(16), "CITY" VARCHAR(16) ); -> CREATE MEMORY TABLE "PUBLIC"."SP1"( "S_NO" VARCHAR(5) NOT NULL, "P_NO" VARCHAR(5) NOT NULL, "QTY" INT ); -> CREATE MEMORY TABLE "PUBLIC"."SP2"( "S_NO" VARCHAR(5) NOT NULL, "P_NO" VARCHAR(5) NOT NULL, "QTY" INT ); -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INT NOT NULL, "PARENT" INT ); -> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> rows: 20 +> ALTER TABLE "PUBLIC"."SP2" ADD CONSTRAINT "PUBLIC"."C1" FOREIGN KEY("S_NO") REFERENCES "PUBLIC"."S"("S_NO") NOCHECK; +> rows (ordered): 20 drop table test; > ok @@ -3373,10 +2887,10 @@ drop table s; drop table p; > ok -create table test (id identity, value int not null); +create table test (id identity, "VALUE" int not null); > ok -create primary key on test(id); +alter table test add primary key(id); > exception SECOND_PRIMARY_KEY alter table test drop primary key; @@ -3385,7 +2899,7 @@ alter table test drop primary key; alter table test drop primary key; > exception INDEX_NOT_FOUND_1 -create primary key on test(id, id, id); +alter table test add primary key(id, id, id); > ok alter table test drop primary key; @@ -3403,11 +2917,11 @@ create local temporary table test (id identity, b int, foreign key(b) references drop table test; > ok -script NOPASSWORDS NOSETTINGS drop; +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION DROP; > SCRIPT > ------------------------------------------------- > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> rows: 1 +> rows (ordered): 1 create local temporary table test1 (id identity); > ok @@ -3418,10 +2932,7 @@ create local temporary table test2 (id identity); alter table test2 add constraint test2_test1 foreign key (id) references test1; > ok -drop table test1; -> ok - -drop table test2; +drop table test1, test2; > ok create local temporary table test1 (id identity); @@ -3433,10 +2944,7 @@ create local temporary table test2 (id identity); alter table test2 add constraint test2_test1 foreign key (id) references test1; > ok -drop table test1; -> ok - -drop table test2; +drop table test1, test2; > ok set autocommit on; @@ -3463,9 +2971,9 @@ drop table test; create table test(id int primary key); > ok +-- Column A.ID cannot be referenced here explain select * from test a inner join test b left outer join test c on c.id = a.id; -#+mvStore#>> SELECT "A"."ID", "C"."ID", "B"."ID" FROM "PUBLIC"."TEST" "A" /* PUBLIC.TEST.tableScan */ LEFT OUTER JOIN "PUBLIC"."TEST" "C" /* PUBLIC.PRIMARY_KEY_2: ID = A.ID */ ON "C"."ID" = "A"."ID" INNER JOIN "PUBLIC"."TEST" "B" /* PUBLIC.TEST.tableScan */ ON 1=1 -#-mvStore#>> SELECT "A"."ID", "C"."ID", "B"."ID" FROM "PUBLIC"."TEST" "A" /* PUBLIC.PRIMARY_KEY_2 */ LEFT OUTER JOIN "PUBLIC"."TEST" "C" /* PUBLIC.PRIMARY_KEY_2: ID = A.ID */ ON "C"."ID" = "A"."ID" INNER JOIN "PUBLIC"."TEST" "B" /* PUBLIC.PRIMARY_KEY_2 */ ON 1=1 +> exception COLUMN_NOT_FOUND_1 SELECT T.ID FROM TEST "T"; > ID @@ -3608,30 +3116,6 @@ alter index if exists s.idx_id rename to s.x; alter index if exists s.x rename to s.index_id; > ok -alter sequence if exists s.seq restart with 10; -> ok - -create sequence s.seq cache 0; -> ok - -alter sequence if exists s.seq restart with 3; -> ok - -select s.seq.nextval as x; -> X -> - -> 3 -> rows: 1 - -drop sequence s.seq; -> ok - -create sequence s.seq cache 0; -> ok - -alter sequence s.seq restart with 10; -> ok - alter table s.test add constraint cu_id unique(id); > ok @@ -3650,22 +3134,20 @@ alter table s.test rename to testtab; alter table s.testtab rename to test; > ok -create trigger test_trigger before insert on s.test call "org.h2.test.db.TestTriggersConstraints"; +create trigger test_trigger before insert on s.test call 'org.h2.test.db.TestTriggersConstraints'; > ok -script NOPASSWORDS NOSETTINGS drop; +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION DROP; > SCRIPT > ----------------------------------------------------------------------------------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM S.TEST; -> CREATE FORCE TRIGGER "S"."TEST_TRIGGER" BEFORE INSERT ON "S"."TEST" QUEUE 1024 CALL "org.h2.test.db.TestTriggersConstraints"; -> CREATE INDEX "S"."INDEX_ID" ON "S"."TEST"("ID"); -> CREATE MEMORY TABLE "S"."TEST"( "ID" INT ); -> CREATE SCHEMA IF NOT EXISTS "S" AUTHORIZATION "SA"; -> CREATE SEQUENCE "S"."SEQ" START WITH 10 CACHE 1; > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> DROP SEQUENCE IF EXISTS "S"."SEQ"; +> CREATE SCHEMA IF NOT EXISTS "S" AUTHORIZATION "SA"; > DROP TABLE IF EXISTS "S"."TEST" CASCADE; -> rows: 9 +> CREATE MEMORY TABLE "S"."TEST"( "ID" INTEGER ); +> -- 0 +/- SELECT COUNT(*) FROM S.TEST; +> CREATE INDEX "S"."INDEX_ID" ON "S"."TEST"("ID" NULLS FIRST); +> CREATE FORCE TRIGGER "S"."TEST_TRIGGER" BEFORE INSERT ON "S"."TEST" QUEUE 1024 CALL 'org.h2.test.db.TestTriggersConstraints'; +> rows (ordered): 7 drop trigger s.test_trigger; > ok @@ -3688,18 +3170,18 @@ alter table test add constraint abc foreign key(id) references (id); alter table test rename column id to i; > ok -script NOPASSWORDS NOSETTINGS drop; +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION DROP; > SCRIPT -> ------------------------------------------------------------------------------------------------------------------- -> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."ABC" FOREIGN KEY("I") REFERENCES "PUBLIC"."TEST"("I") NOCHECK; -> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("I"); -> CREATE INDEX "PUBLIC"."IDX_N_ID" ON "PUBLIC"."TEST"("NAME", "I"); -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "I" INT NOT NULL, "NAME" VARCHAR(255), "Y" INT AS ("I" + 1) ); +> -------------------------------------------------------------------------------------------------------------------------------------- > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; > DROP TABLE IF EXISTS "PUBLIC"."TEST" CASCADE; -> INSERT INTO "PUBLIC"."TEST" VALUES (1, 'Hello', 2); -> rows: 8 +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "I" INTEGER NOT NULL, "NAME" CHARACTER VARYING(255), "Y" INTEGER GENERATED ALWAYS AS ("I" + 1) ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("I"); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST"("I", "NAME") VALUES (1, 'Hello'); +> CREATE INDEX "PUBLIC"."IDX_N_ID" ON "PUBLIC"."TEST"("NAME" NULLS FIRST, "I" NULLS FIRST); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."ABC" FOREIGN KEY("I") REFERENCES "PUBLIC"."TEST"("I") NOCHECK; +> rows (ordered): 8 INSERT INTO TEST(i, name) VALUES(2, 'World'); > update count: 1 @@ -3788,7 +3270,7 @@ drop sequence seq1; create table test(a int primary key, b int, c int); > ok -create unique index idx_ba on test(b, a); +alter table test add constraint unique_ba unique(b, a); > ok alter table test add constraint abc foreign key(c, a) references test(b, a); @@ -3803,7 +3285,7 @@ drop table test; create table ADDRESS (ADDRESS_ID int primary key, ADDRESS_TYPE int not null, SERVER_ID int not null); > ok -create unique index idx_a on address(ADDRESS_TYPE, SERVER_ID); +alter table address add constraint unique_a unique(ADDRESS_TYPE, SERVER_ID); > ok create table SERVER (SERVER_ID int primary key, SERVER_TYPE int not null, ADDRESS_TYPE int); @@ -3818,10 +3300,7 @@ alter table SERVER add constraint server_const foreign key (ADDRESS_TYPE, SERVER insert into SERVER (SERVER_ID, SERVER_TYPE) values (1, 1); > update count: 1 -drop table address; -> ok - -drop table server; +drop table address, server; > ok CREATE TABLE PlanElements(id int primary key, name varchar, parent_id int, foreign key(parent_id) references(id) on delete cascade); @@ -3874,13 +3353,6 @@ DROP TABLE IF EXISTS CHILD; DROP TABLE IF EXISTS PARENT; > ok -(SELECT * FROM DUAL) UNION ALL (SELECT * FROM DUAL); -> X -> - -> 1 -> 1 -> rows: 2 - DECLARE GLOBAL TEMPORARY TABLE TEST(ID INT PRIMARY KEY); > ok @@ -3890,8 +3362,8 @@ SELECT * FROM TEST; > rows: 0 SELECT GROUP_CONCAT(ID) FROM TEST; -> LISTAGG(ID) -> ----------- +> LISTAGG(ID) WITHIN GROUP (ORDER BY NULL) +> ---------------------------------------- > null > rows: 1 @@ -3922,8 +3394,8 @@ INSERT INTO TEST VALUES(2, 'World'); > update count: 1 SELECT group_concat(name) FROM TEST group by id; -> LISTAGG(NAME) -> ------------- +> LISTAGG(NAME) WITHIN GROUP (ORDER BY NULL) +> ------------------------------------------ > Hello > World > rows: 2 @@ -3954,17 +3426,17 @@ create memory table test (id int primary key, im_ie varchar(10)); create sequence test_seq; > ok -script NODATA NOPASSWORDS NOSETTINGS drop; +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION DROP; > SCRIPT -> ------------------------------------------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INT NOT NULL, "IM_IE" VARCHAR(10) ); -> CREATE SEQUENCE "PUBLIC"."TEST_SEQ" START WITH 1; +> -------------------------------------------------------------------------------------------- > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> DROP SEQUENCE IF EXISTS "PUBLIC"."TEST_SEQ"; > DROP TABLE IF EXISTS "PUBLIC"."TEST" CASCADE; -> rows: 7 +> DROP SEQUENCE IF EXISTS "PUBLIC"."TEST_SEQ"; +> CREATE SEQUENCE "PUBLIC"."TEST_SEQ" START WITH 1; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "IM_IE" CHARACTER VARYING(10) ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 7 drop sequence test_seq; > ok @@ -3989,24 +3461,8 @@ SELECT * FROM TEST; DROP TABLE TEST; > ok -CREATE MEMORY TABLE TEST(ID BIGINT NOT NULL IDENTITY(10, 5), NAME VARCHAR); -> ok - -INSERT INTO TEST(NAME) VALUES('Hello'), ('World'); -> update count: 2 - -SELECT * FROM TEST; -> ID NAME -> -- ----- -> 10 Hello -> 15 World -> rows: 2 - -DROP TABLE TEST; -> ok - CREATE CACHED TABLE account( -id INTEGER NOT NULL IDENTITY, +id INTEGER GENERATED BY DEFAULT AS IDENTITY, name VARCHAR NOT NULL, mail_address VARCHAR NOT NULL, UNIQUE(name), @@ -4015,7 +3471,7 @@ PRIMARY KEY(id) > ok CREATE CACHED TABLE label( -id INTEGER NOT NULL IDENTITY, +id INTEGER GENERATED BY DEFAULT AS IDENTITY, parent_id INTEGER NOT NULL, account_id INTEGER NOT NULL, name VARCHAR NOT NULL, @@ -4049,7 +3505,7 @@ drop table account; > ok --- constraints and alter table add column --------------------------------------------------------------------------------------------- -CREATE TABLE TEST(ID INT, PARENTID INT, FOREIGN KEY(PARENTID) REFERENCES(ID)); +CREATE TABLE TEST(ID INT PRIMARY KEY, PARENTID INT, FOREIGN KEY(PARENTID) REFERENCES(ID)); > ok INSERT INTO TEST VALUES(0, 0); @@ -4079,13 +3535,13 @@ SELECT * FROM TEST; DROP TABLE TEST; > ok -CREATE MEMORY TABLE A(X INT); +CREATE MEMORY TABLE A(X INT PRIMARY KEY); > ok CREATE MEMORY TABLE B(XX INT, CONSTRAINT B2A FOREIGN KEY(XX) REFERENCES A(X)); > ok -CREATE MEMORY TABLE C(X_MASTER INT); +CREATE MEMORY TABLE C(X_MASTER INT PRIMARY KEY); > ok ALTER TABLE A ADD CONSTRAINT A2C FOREIGN KEY(X) REFERENCES C(X_MASTER); @@ -4112,13 +3568,7 @@ insert into a values(2, 2); insert into b values(2); > update count: 1 -DROP TABLE IF EXISTS A; -> ok - -DROP TABLE IF EXISTS B; -> ok - -DROP TABLE IF EXISTS C; +DROP TABLE IF EXISTS A, B, C; > ok --- quoted keywords --------------------------------------------------------------------------------------------- @@ -4141,44 +3591,7 @@ SELECT "ROWNUM", ROWNUM, "SELECT" "AS", "PRIMARY" AS "X", "KEY", "NEXTVAL", "IND DROP TABLE "CREATE"; > ok ---- test case for number like string --------------------------------------------------------------------------------------------- -CREATE TABLE test (one bigint primary key, two bigint, three bigint); -> ok - -CREATE INDEX two ON test(two); -> ok - -INSERT INTO TEST VALUES(1, 2, 3), (10, 20, 30), (100, 200, 300); -> update count: 3 - -INSERT INTO TEST VALUES(2, 6, 9), (20, 60, 90), (200, 600, 900); -> update count: 3 - -SELECT * FROM test WHERE one LIKE '2%'; -> ONE TWO THREE -> --- --- ----- -> 2 6 9 -> 20 60 90 -> 200 600 900 -> rows: 3 - -SELECT * FROM test WHERE two LIKE '2%'; -> ONE TWO THREE -> --- --- ----- -> 1 2 3 -> 10 20 30 -> 100 200 300 -> rows: 3 - -SELECT * FROM test WHERE three LIKE '2%'; -> ONE TWO THREE -> --- --- ----- -> rows: 0 - -DROP TABLE TEST; -> ok - -CREATE TABLE PARENT(ID INT, NAME VARCHAR); +CREATE TABLE PARENT(ID INT PRIMARY KEY, NAME VARCHAR); > ok CREATE TABLE CHILD(ID INT, PARENTID INT, FOREIGN KEY(PARENTID) REFERENCES PARENT(ID)); @@ -4209,10 +3622,7 @@ SELECT * FROM CHILD; > 21 2 > rows: 4 -DROP TABLE PARENT; -> ok - -DROP TABLE CHILD; +DROP TABLE PARENT, CHILD; > ok --- @@ -4310,12 +3720,10 @@ update test set (id, name)=(select id+1, name || 'Ho' from test t1 where test.id > update count: 2 explain update test set (id, name)=(id+1, name || 'Hi'); -#+mvStore#>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET "ID" = ARRAY_GET(ROW (("ID" + 1), ("NAME" || 'Hi')), 1), "NAME" = ARRAY_GET(ROW (("ID" + 1), ("NAME" || 'Hi')), 2) -#-mvStore#>> UPDATE "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2 */ SET "ID" = ARRAY_GET(ROW (("ID" + 1), ("NAME" || 'Hi')), 1), "NAME" = ARRAY_GET(ROW (("ID" + 1), ("NAME" || 'Hi')), 2) +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET "ID" = "ID" + 1, "NAME" = "NAME" || 'Hi' explain update test set (id, name)=(select id+1, name || 'Ho' from test t1 where test.id=t1.id); -#+mvStore#>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET "ID" = ARRAY_GET((SELECT ("ID" + 1), ("NAME" || 'Ho') FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID = TEST.ID */ WHERE "TEST"."ID" = "T1"."ID"), 1), "NAME" = ARRAY_GET((SELECT ("ID" + 1), ("NAME" || 'Ho') FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID = TEST.ID */ WHERE "TEST"."ID" = "T1"."ID"), 2) -#-mvStore#>> UPDATE "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2 */ SET "ID" = ARRAY_GET((SELECT ("ID" + 1), ("NAME" || 'Ho') FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID = TEST.ID */ WHERE "TEST"."ID" = "T1"."ID"), 1), "NAME" = ARRAY_GET((SELECT ("ID" + 1), ("NAME" || 'Ho') FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID = TEST.ID */ WHERE "TEST"."ID" = "T1"."ID"), 2) +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET ("ID", "NAME") = (SELECT "ID" + 1, "NAME" || 'Ho' FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID = TEST.ID */ WHERE "TEST"."ID" = "T1"."ID") select * from test; > ID NAME @@ -4340,17 +3748,17 @@ insert into test values(1, '', ''); insert into test values(2, 'Cafe', X'cafe'); > update count: 1 -script simple nopasswords nosettings; +script simple nopasswords nosettings noversion; > SCRIPT -> ------------------------------------------------------------------------------------- -> -- 3 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INT NOT NULL, "C" CLOB, "B" BLOB ); +> ------------------------------------------------------------------------------------------------------------------ > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "C" CHARACTER LARGE OBJECT, "B" BINARY LARGE OBJECT ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 3 +/- SELECT COUNT(*) FROM PUBLIC.TEST; > INSERT INTO "PUBLIC"."TEST" VALUES(0, NULL, NULL); > INSERT INTO "PUBLIC"."TEST" VALUES(1, '', X''); > INSERT INTO "PUBLIC"."TEST" VALUES(2, 'Cafe', X'cafe'); -> rows: 7 +> rows (ordered): 7 drop table test; > ok @@ -4369,19 +3777,19 @@ insert into b select id+10, p+10 from b; > update count: 10 explain select * from b b0, b b1, b b2 where b1.p = b0.id and b2.p = b1.id and b0.id=10; ->> SELECT "B0"."ID", "B0"."P", "B1"."ID", "B1"."P", "B2"."ID", "B2"."P" FROM "PUBLIC"."B" "B0" /* PUBLIC.PRIMARY_KEY_4: ID = 10 */ /* WHERE B0.ID = 10 */ INNER JOIN "PUBLIC"."B" "B1" /* PUBLIC.BP: P = B0.ID */ ON 1=1 /* WHERE B1.P = B0.ID */ INNER JOIN "PUBLIC"."B" "B2" /* PUBLIC.BP: P = B1.ID */ ON 1=1 WHERE ("B0"."ID" = 10) AND (("B1"."P" = "B0"."ID") AND ("B2"."P" = "B1"."ID")) +>> SELECT "B0"."ID", "B0"."P", "B1"."ID", "B1"."P", "B2"."ID", "B2"."P" FROM "PUBLIC"."B" "B0" /* PUBLIC.PRIMARY_KEY_4: ID = 10 */ /* WHERE B0.ID = 10 */ INNER JOIN "PUBLIC"."B" "B1" /* PUBLIC.BP: P = B0.ID */ ON 1=1 /* WHERE B1.P = B0.ID */ INNER JOIN "PUBLIC"."B" "B2" /* PUBLIC.BP: P = B1.ID */ ON 1=1 WHERE ("B0"."ID" = 10) AND ("B1"."P" = "B0"."ID") AND ("B2"."P" = "B1"."ID") explain select * from b b0, b b1, b b2, b b3 where b1.p = b0.id and b2.p = b1.id and b3.p = b2.id and b0.id=10; ->> SELECT "B0"."ID", "B0"."P", "B1"."ID", "B1"."P", "B2"."ID", "B2"."P", "B3"."ID", "B3"."P" FROM "PUBLIC"."B" "B0" /* PUBLIC.PRIMARY_KEY_4: ID = 10 */ /* WHERE B0.ID = 10 */ INNER JOIN "PUBLIC"."B" "B1" /* PUBLIC.BP: P = B0.ID */ ON 1=1 /* WHERE B1.P = B0.ID */ INNER JOIN "PUBLIC"."B" "B2" /* PUBLIC.BP: P = B1.ID */ ON 1=1 /* WHERE B2.P = B1.ID */ INNER JOIN "PUBLIC"."B" "B3" /* PUBLIC.BP: P = B2.ID */ ON 1=1 WHERE ("B0"."ID" = 10) AND (("B3"."P" = "B2"."ID") AND (("B1"."P" = "B0"."ID") AND ("B2"."P" = "B1"."ID"))) +>> SELECT "B0"."ID", "B0"."P", "B1"."ID", "B1"."P", "B2"."ID", "B2"."P", "B3"."ID", "B3"."P" FROM "PUBLIC"."B" "B0" /* PUBLIC.PRIMARY_KEY_4: ID = 10 */ /* WHERE B0.ID = 10 */ INNER JOIN "PUBLIC"."B" "B1" /* PUBLIC.BP: P = B0.ID */ ON 1=1 /* WHERE B1.P = B0.ID */ INNER JOIN "PUBLIC"."B" "B2" /* PUBLIC.BP: P = B1.ID */ ON 1=1 /* WHERE B2.P = B1.ID */ INNER JOIN "PUBLIC"."B" "B3" /* PUBLIC.BP: P = B2.ID */ ON 1=1 WHERE ("B0"."ID" = 10) AND ("B3"."P" = "B2"."ID") AND ("B1"."P" = "B0"."ID") AND ("B2"."P" = "B1"."ID") explain select * from b b0, b b1, b b2, b b3, b b4 where b1.p = b0.id and b2.p = b1.id and b3.p = b2.id and b4.p = b3.id and b0.id=10; ->> SELECT "B0"."ID", "B0"."P", "B1"."ID", "B1"."P", "B2"."ID", "B2"."P", "B3"."ID", "B3"."P", "B4"."ID", "B4"."P" FROM "PUBLIC"."B" "B0" /* PUBLIC.PRIMARY_KEY_4: ID = 10 */ /* WHERE B0.ID = 10 */ INNER JOIN "PUBLIC"."B" "B1" /* PUBLIC.BP: P = B0.ID */ ON 1=1 /* WHERE B1.P = B0.ID */ INNER JOIN "PUBLIC"."B" "B2" /* PUBLIC.BP: P = B1.ID */ ON 1=1 /* WHERE B2.P = B1.ID */ INNER JOIN "PUBLIC"."B" "B3" /* PUBLIC.BP: P = B2.ID */ ON 1=1 /* WHERE B3.P = B2.ID */ INNER JOIN "PUBLIC"."B" "B4" /* PUBLIC.BP: P = B3.ID */ ON 1=1 WHERE ("B0"."ID" = 10) AND (("B4"."P" = "B3"."ID") AND (("B3"."P" = "B2"."ID") AND (("B1"."P" = "B0"."ID") AND ("B2"."P" = "B1"."ID")))) +>> SELECT "B0"."ID", "B0"."P", "B1"."ID", "B1"."P", "B2"."ID", "B2"."P", "B3"."ID", "B3"."P", "B4"."ID", "B4"."P" FROM "PUBLIC"."B" "B0" /* PUBLIC.PRIMARY_KEY_4: ID = 10 */ /* WHERE B0.ID = 10 */ INNER JOIN "PUBLIC"."B" "B1" /* PUBLIC.BP: P = B0.ID */ ON 1=1 /* WHERE B1.P = B0.ID */ INNER JOIN "PUBLIC"."B" "B2" /* PUBLIC.BP: P = B1.ID */ ON 1=1 /* WHERE B2.P = B1.ID */ INNER JOIN "PUBLIC"."B" "B3" /* PUBLIC.BP: P = B2.ID */ ON 1=1 /* WHERE B3.P = B2.ID */ INNER JOIN "PUBLIC"."B" "B4" /* PUBLIC.BP: P = B3.ID */ ON 1=1 WHERE ("B0"."ID" = 10) AND ("B3"."P" = "B2"."ID") AND ("B4"."P" = "B3"."ID") AND ("B1"."P" = "B0"."ID") AND ("B2"."P" = "B1"."ID") analyze; > ok explain select * from b b0, b b1, b b2, b b3, b b4 where b1.p = b0.id and b2.p = b1.id and b3.p = b2.id and b4.p = b3.id and b0.id=10; ->> SELECT "B0"."ID", "B0"."P", "B1"."ID", "B1"."P", "B2"."ID", "B2"."P", "B3"."ID", "B3"."P", "B4"."ID", "B4"."P" FROM "PUBLIC"."B" "B0" /* PUBLIC.PRIMARY_KEY_4: ID = 10 */ /* WHERE B0.ID = 10 */ INNER JOIN "PUBLIC"."B" "B1" /* PUBLIC.BP: P = B0.ID */ ON 1=1 /* WHERE B1.P = B0.ID */ INNER JOIN "PUBLIC"."B" "B2" /* PUBLIC.BP: P = B1.ID */ ON 1=1 /* WHERE B2.P = B1.ID */ INNER JOIN "PUBLIC"."B" "B3" /* PUBLIC.BP: P = B2.ID */ ON 1=1 /* WHERE B3.P = B2.ID */ INNER JOIN "PUBLIC"."B" "B4" /* PUBLIC.BP: P = B3.ID */ ON 1=1 WHERE ("B0"."ID" = 10) AND (("B4"."P" = "B3"."ID") AND (("B3"."P" = "B2"."ID") AND (("B1"."P" = "B0"."ID") AND ("B2"."P" = "B1"."ID")))) +>> SELECT "B0"."ID", "B0"."P", "B1"."ID", "B1"."P", "B2"."ID", "B2"."P", "B3"."ID", "B3"."P", "B4"."ID", "B4"."P" FROM "PUBLIC"."B" "B0" /* PUBLIC.PRIMARY_KEY_4: ID = 10 */ /* WHERE B0.ID = 10 */ INNER JOIN "PUBLIC"."B" "B1" /* PUBLIC.BP: P = B0.ID */ ON 1=1 /* WHERE B1.P = B0.ID */ INNER JOIN "PUBLIC"."B" "B2" /* PUBLIC.BP: P = B1.ID */ ON 1=1 /* WHERE B2.P = B1.ID */ INNER JOIN "PUBLIC"."B" "B3" /* PUBLIC.BP: P = B2.ID */ ON 1=1 /* WHERE B3.P = B2.ID */ INNER JOIN "PUBLIC"."B" "B4" /* PUBLIC.BP: P = B3.ID */ ON 1=1 WHERE ("B0"."ID" = 10) AND ("B3"."P" = "B2"."ID") AND ("B4"."P" = "B3"."ID") AND ("B1"."P" = "B0"."ID") AND ("B2"."P" = "B1"."ID") drop table if exists b; > ok @@ -4407,24 +3815,16 @@ insert into test values > update count: 10 EXPLAIN SELECT * FROM TEST WHERE ID = 3; ->> SELECT "TEST"."ID", "TEST"."FIRST_NAME", "TEST"."NAME", "TEST"."STATE" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 3 */ WHERE "ID" = 3 - -SELECT SELECTIVITY(ID), SELECTIVITY(FIRST_NAME), -SELECTIVITY(NAME), SELECTIVITY(STATE) -FROM TEST WHERE ROWNUM()<100000; -> SELECTIVITY(ID) SELECTIVITY(FIRST_NAME) SELECTIVITY(NAME) SELECTIVITY(STATE) -> --------------- ----------------------- ----------------- ------------------ -> 100 60 80 10 -> rows: 1 +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."FIRST_NAME", "PUBLIC"."TEST"."NAME", "PUBLIC"."TEST"."STATE" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 3 */ WHERE "ID" = 3 explain select * from test where name='Smith' and first_name='Tom' and state=0; ->> SELECT "TEST"."ID", "TEST"."FIRST_NAME", "TEST"."NAME", "TEST"."STATE" FROM "PUBLIC"."TEST" /* PUBLIC.IDX_FIRST_NAME: FIRST_NAME = 'Tom' */ WHERE ("STATE" = 0) AND (("NAME" = 'Smith') AND ("FIRST_NAME" = 'Tom')) +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."FIRST_NAME", "PUBLIC"."TEST"."NAME", "PUBLIC"."TEST"."STATE" FROM "PUBLIC"."TEST" /* PUBLIC.IDX_FIRST_NAME: FIRST_NAME = 'Tom' */ WHERE ("STATE" = 0) AND ("NAME" = 'Smith') AND ("FIRST_NAME" = 'Tom') alter table test alter column name selectivity 100; > ok explain select * from test where name='Smith' and first_name='Tom' and state=0; ->> SELECT "TEST"."ID", "TEST"."FIRST_NAME", "TEST"."NAME", "TEST"."STATE" FROM "PUBLIC"."TEST" /* PUBLIC.IDX_NAME: NAME = 'Smith' */ WHERE ("STATE" = 0) AND (("NAME" = 'Smith') AND ("FIRST_NAME" = 'Tom')) +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."FIRST_NAME", "PUBLIC"."TEST"."NAME", "PUBLIC"."TEST"."STATE" FROM "PUBLIC"."TEST" /* PUBLIC.IDX_NAME: NAME = 'Smith' */ WHERE ("STATE" = 0) AND ("NAME" = 'Smith') AND ("FIRST_NAME" = 'Tom') drop table test; > ok @@ -4438,7 +3838,7 @@ INSERT INTO O SELECT X, X+1 FROM SYSTEM_RANGE(1, 1000); EXPLAIN SELECT A.X FROM O B, O A, O F, O D, O C, O E, O G, O H, O I, O J WHERE 1=J.X and J.Y=I.X AND I.Y=H.X AND H.Y=G.X AND G.Y=F.X AND F.Y=E.X AND E.Y=D.X AND D.Y=C.X AND C.Y=B.X AND B.Y=A.X; ->> SELECT "A"."X" FROM "PUBLIC"."O" "J" /* PUBLIC.PRIMARY_KEY_4: X = 1 */ /* WHERE J.X = 1 */ INNER JOIN "PUBLIC"."O" "I" /* PUBLIC.PRIMARY_KEY_4: X = J.Y */ ON 1=1 /* WHERE J.Y = I.X */ INNER JOIN "PUBLIC"."O" "H" /* PUBLIC.PRIMARY_KEY_4: X = I.Y */ ON 1=1 /* WHERE I.Y = H.X */ INNER JOIN "PUBLIC"."O" "G" /* PUBLIC.PRIMARY_KEY_4: X = H.Y */ ON 1=1 /* WHERE H.Y = G.X */ INNER JOIN "PUBLIC"."O" "F" /* PUBLIC.PRIMARY_KEY_4: X = G.Y */ ON 1=1 /* WHERE G.Y = F.X */ INNER JOIN "PUBLIC"."O" "E" /* PUBLIC.PRIMARY_KEY_4: X = F.Y */ ON 1=1 /* WHERE F.Y = E.X */ INNER JOIN "PUBLIC"."O" "D" /* PUBLIC.PRIMARY_KEY_4: X = E.Y */ ON 1=1 /* WHERE E.Y = D.X */ INNER JOIN "PUBLIC"."O" "C" /* PUBLIC.PRIMARY_KEY_4: X = D.Y */ ON 1=1 /* WHERE D.Y = C.X */ INNER JOIN "PUBLIC"."O" "B" /* PUBLIC.PRIMARY_KEY_4: X = C.Y */ ON 1=1 /* WHERE C.Y = B.X */ INNER JOIN "PUBLIC"."O" "A" /* PUBLIC.PRIMARY_KEY_4: X = B.Y */ ON 1=1 WHERE ("B"."Y" = "A"."X") AND (("C"."Y" = "B"."X") AND (("D"."Y" = "C"."X") AND (("E"."Y" = "D"."X") AND (("F"."Y" = "E"."X") AND (("G"."Y" = "F"."X") AND (("H"."Y" = "G"."X") AND (("I"."Y" = "H"."X") AND (("J"."X" = 1) AND ("J"."Y" = "I"."X"))))))))) +>> SELECT "A"."X" FROM "PUBLIC"."O" "J" /* PUBLIC.PRIMARY_KEY_4: X = 1 */ /* WHERE J.X = 1 */ INNER JOIN "PUBLIC"."O" "I" /* PUBLIC.PRIMARY_KEY_4: X = J.Y */ ON 1=1 /* WHERE J.Y = I.X */ INNER JOIN "PUBLIC"."O" "H" /* PUBLIC.PRIMARY_KEY_4: X = I.Y */ ON 1=1 /* WHERE I.Y = H.X */ INNER JOIN "PUBLIC"."O" "G" /* PUBLIC.PRIMARY_KEY_4: X = H.Y */ ON 1=1 /* WHERE H.Y = G.X */ INNER JOIN "PUBLIC"."O" "F" /* PUBLIC.PRIMARY_KEY_4: X = G.Y */ ON 1=1 /* WHERE G.Y = F.X */ INNER JOIN "PUBLIC"."O" "E" /* PUBLIC.PRIMARY_KEY_4: X = F.Y */ ON 1=1 /* WHERE F.Y = E.X */ INNER JOIN "PUBLIC"."O" "D" /* PUBLIC.PRIMARY_KEY_4: X = E.Y */ ON 1=1 /* WHERE E.Y = D.X */ INNER JOIN "PUBLIC"."O" "C" /* PUBLIC.PRIMARY_KEY_4: X = D.Y */ ON 1=1 /* WHERE D.Y = C.X */ INNER JOIN "PUBLIC"."O" "B" /* PUBLIC.PRIMARY_KEY_4: X = C.Y */ ON 1=1 /* WHERE C.Y = B.X */ INNER JOIN "PUBLIC"."O" "A" /* PUBLIC.PRIMARY_KEY_4: X = B.Y */ ON 1=1 WHERE ("J"."X" = 1) AND ("I"."Y" = "H"."X") AND ("H"."Y" = "G"."X") AND ("G"."Y" = "F"."X") AND ("F"."Y" = "E"."X") AND ("E"."Y" = "D"."X") AND ("D"."Y" = "C"."X") AND ("C"."Y" = "B"."X") AND ("B"."Y" = "A"."X") AND ("J"."Y" = "I"."X") DROP TABLE O; > ok @@ -4466,7 +3866,7 @@ AND DID=D.ID AND EID=E.ID AND FID=F.ID AND GID=G.ID AND HID=H.ID; EXPLAIN SELECT COUNT(*) FROM PARENT, CHILD A, CHILD B, CHILD C, CHILD D, CHILD E, CHILD F, CHILD G, CHILD H WHERE AID=A.ID AND BID=B.ID AND CID=C.ID AND DID=D.ID AND EID=E.ID AND FID=F.ID AND GID=G.ID AND HID=H.ID; ->> SELECT COUNT(*) FROM "PUBLIC"."PARENT" /* PUBLIC.PARENT.tableScan */ INNER JOIN "PUBLIC"."CHILD" "A" /* PUBLIC.PRIMARY_KEY_3: ID = AID */ ON 1=1 /* WHERE AID = A.ID */ INNER JOIN "PUBLIC"."CHILD" "B" /* PUBLIC.PRIMARY_KEY_3: ID = BID */ ON 1=1 /* WHERE BID = B.ID */ INNER JOIN "PUBLIC"."CHILD" "C" /* PUBLIC.PRIMARY_KEY_3: ID = CID */ ON 1=1 /* WHERE CID = C.ID */ INNER JOIN "PUBLIC"."CHILD" "D" /* PUBLIC.PRIMARY_KEY_3: ID = DID */ ON 1=1 /* WHERE DID = D.ID */ INNER JOIN "PUBLIC"."CHILD" "E" /* PUBLIC.PRIMARY_KEY_3: ID = EID */ ON 1=1 /* WHERE EID = E.ID */ INNER JOIN "PUBLIC"."CHILD" "F" /* PUBLIC.PRIMARY_KEY_3: ID = FID */ ON 1=1 /* WHERE FID = F.ID */ INNER JOIN "PUBLIC"."CHILD" "G" /* PUBLIC.PRIMARY_KEY_3: ID = GID */ ON 1=1 /* WHERE GID = G.ID */ INNER JOIN "PUBLIC"."CHILD" "H" /* PUBLIC.PRIMARY_KEY_3: ID = HID */ ON 1=1 WHERE ("HID" = "H"."ID") AND (("GID" = "G"."ID") AND (("FID" = "F"."ID") AND (("EID" = "E"."ID") AND (("DID" = "D"."ID") AND (("CID" = "C"."ID") AND (("AID" = "A"."ID") AND ("BID" = "B"."ID"))))))) +>> SELECT COUNT(*) FROM "PUBLIC"."PARENT" /* PUBLIC.PARENT.tableScan */ INNER JOIN "PUBLIC"."CHILD" "A" /* PUBLIC.PRIMARY_KEY_3: ID = AID */ ON 1=1 /* WHERE AID = A.ID */ INNER JOIN "PUBLIC"."CHILD" "B" /* PUBLIC.PRIMARY_KEY_3: ID = BID */ ON 1=1 /* WHERE BID = B.ID */ INNER JOIN "PUBLIC"."CHILD" "C" /* PUBLIC.PRIMARY_KEY_3: ID = CID */ ON 1=1 /* WHERE CID = C.ID */ INNER JOIN "PUBLIC"."CHILD" "D" /* PUBLIC.PRIMARY_KEY_3: ID = DID */ ON 1=1 /* WHERE DID = D.ID */ INNER JOIN "PUBLIC"."CHILD" "E" /* PUBLIC.PRIMARY_KEY_3: ID = EID */ ON 1=1 /* WHERE EID = E.ID */ INNER JOIN "PUBLIC"."CHILD" "F" /* PUBLIC.PRIMARY_KEY_3: ID = FID */ ON 1=1 /* WHERE FID = F.ID */ INNER JOIN "PUBLIC"."CHILD" "G" /* PUBLIC.PRIMARY_KEY_3: ID = GID */ ON 1=1 /* WHERE GID = G.ID */ INNER JOIN "PUBLIC"."CHILD" "H" /* PUBLIC.PRIMARY_KEY_3: ID = HID */ ON 1=1 WHERE ("CID" = "C"."ID") AND ("DID" = "D"."ID") AND ("EID" = "E"."ID") AND ("FID" = "F"."ID") AND ("GID" = "G"."ID") AND ("HID" = "H"."ID") AND ("AID" = "A"."ID") AND ("BID" = "B"."ID") CREATE TABLE FAMILY(ID INT PRIMARY KEY, PARENTID INT); > ok @@ -4477,7 +3877,7 @@ INSERT INTO FAMILY SELECT X, X-1 FROM SYSTEM_RANGE(0, 1000); EXPLAIN SELECT COUNT(*) FROM CHILD A, CHILD B, FAMILY, CHILD C, CHILD D, PARENT, CHILD E, CHILD F, CHILD G WHERE FAMILY.ID=1 AND FAMILY.PARENTID=PARENT.ID AND AID=A.ID AND BID=B.ID AND CID=C.ID AND DID=D.ID AND EID=E.ID AND FID=F.ID AND GID=G.ID; ->> SELECT COUNT(*) FROM "PUBLIC"."FAMILY" /* PUBLIC.PRIMARY_KEY_7: ID = 1 */ /* WHERE FAMILY.ID = 1 */ INNER JOIN "PUBLIC"."PARENT" /* PUBLIC.PRIMARY_KEY_8: ID = FAMILY.PARENTID */ ON 1=1 /* WHERE FAMILY.PARENTID = PARENT.ID */ INNER JOIN "PUBLIC"."CHILD" "A" /* PUBLIC.PRIMARY_KEY_3: ID = AID */ ON 1=1 /* WHERE AID = A.ID */ INNER JOIN "PUBLIC"."CHILD" "B" /* PUBLIC.PRIMARY_KEY_3: ID = BID */ ON 1=1 /* WHERE BID = B.ID */ INNER JOIN "PUBLIC"."CHILD" "C" /* PUBLIC.PRIMARY_KEY_3: ID = CID */ ON 1=1 /* WHERE CID = C.ID */ INNER JOIN "PUBLIC"."CHILD" "D" /* PUBLIC.PRIMARY_KEY_3: ID = DID */ ON 1=1 /* WHERE DID = D.ID */ INNER JOIN "PUBLIC"."CHILD" "E" /* PUBLIC.PRIMARY_KEY_3: ID = EID */ ON 1=1 /* WHERE EID = E.ID */ INNER JOIN "PUBLIC"."CHILD" "F" /* PUBLIC.PRIMARY_KEY_3: ID = FID */ ON 1=1 /* WHERE FID = F.ID */ INNER JOIN "PUBLIC"."CHILD" "G" /* PUBLIC.PRIMARY_KEY_3: ID = GID */ ON 1=1 WHERE ("GID" = "G"."ID") AND (("FID" = "F"."ID") AND (("EID" = "E"."ID") AND (("DID" = "D"."ID") AND (("CID" = "C"."ID") AND (("BID" = "B"."ID") AND (("AID" = "A"."ID") AND (("FAMILY"."ID" = 1) AND ("FAMILY"."PARENTID" = "PARENT"."ID")))))))) +>> SELECT COUNT(*) FROM "PUBLIC"."FAMILY" /* PUBLIC.PRIMARY_KEY_7: ID = 1 */ /* WHERE FAMILY.ID = 1 */ INNER JOIN "PUBLIC"."PARENT" /* PUBLIC.PRIMARY_KEY_8: ID = FAMILY.PARENTID */ ON 1=1 /* WHERE FAMILY.PARENTID = PARENT.ID */ INNER JOIN "PUBLIC"."CHILD" "A" /* PUBLIC.PRIMARY_KEY_3: ID = AID */ ON 1=1 /* WHERE AID = A.ID */ INNER JOIN "PUBLIC"."CHILD" "B" /* PUBLIC.PRIMARY_KEY_3: ID = BID */ ON 1=1 /* WHERE BID = B.ID */ INNER JOIN "PUBLIC"."CHILD" "C" /* PUBLIC.PRIMARY_KEY_3: ID = CID */ ON 1=1 /* WHERE CID = C.ID */ INNER JOIN "PUBLIC"."CHILD" "D" /* PUBLIC.PRIMARY_KEY_3: ID = DID */ ON 1=1 /* WHERE DID = D.ID */ INNER JOIN "PUBLIC"."CHILD" "E" /* PUBLIC.PRIMARY_KEY_3: ID = EID */ ON 1=1 /* WHERE EID = E.ID */ INNER JOIN "PUBLIC"."CHILD" "F" /* PUBLIC.PRIMARY_KEY_3: ID = FID */ ON 1=1 /* WHERE FID = F.ID */ INNER JOIN "PUBLIC"."CHILD" "G" /* PUBLIC.PRIMARY_KEY_3: ID = GID */ ON 1=1 WHERE ("FAMILY"."ID" = 1) AND ("AID" = "A"."ID") AND ("BID" = "B"."ID") AND ("CID" = "C"."ID") AND ("DID" = "D"."ID") AND ("EID" = "E"."ID") AND ("FID" = "F"."ID") AND ("GID" = "G"."ID") AND ("FAMILY"."PARENTID" = "PARENT"."ID") DROP TABLE FAMILY; > ok @@ -4644,13 +4044,13 @@ SELECT DISTINCT TABLE_SCHEMA, TABLE_CATALOG FROM INFORMATION_SCHEMA.TABLES ORDER > rows (ordered): 1 SELECT * FROM INFORMATION_SCHEMA.SCHEMATA; -> CATALOG_NAME SCHEMA_NAME SCHEMA_OWNER DEFAULT_CHARACTER_SET_NAME DEFAULT_COLLATION_NAME IS_DEFAULT REMARKS ID -> ------------ ------------------ ------------ -------------------------- ---------------------- ---------- ------- -- -> SCRIPT INFORMATION_SCHEMA SA Unicode OFF FALSE -1 -> SCRIPT PUBLIC SA Unicode OFF TRUE 0 +> CATALOG_NAME SCHEMA_NAME SCHEMA_OWNER DEFAULT_CHARACTER_SET_CATALOG DEFAULT_CHARACTER_SET_SCHEMA DEFAULT_CHARACTER_SET_NAME SQL_PATH DEFAULT_COLLATION_NAME REMARKS +> ------------ ------------------ ------------ ----------------------------- ---------------------------- -------------------------- -------- ---------------------- ------- +> SCRIPT INFORMATION_SCHEMA SA SCRIPT PUBLIC Unicode null OFF null +> SCRIPT PUBLIC SA SCRIPT PUBLIC Unicode null OFF null > rows: 2 -SELECT * FROM INFORMATION_SCHEMA.CATALOGS; +SELECT * FROM INFORMATION_SCHEMA.INFORMATION_SCHEMA_CATALOG_NAME; > CATALOG_NAME > ------------ > SCRIPT @@ -4664,10 +4064,10 @@ SELECT INFORMATION_SCHEMA.SCHEMATA.SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA; > rows: 2 SELECT INFORMATION_SCHEMA.SCHEMATA.* FROM INFORMATION_SCHEMA.SCHEMATA; -> CATALOG_NAME SCHEMA_NAME SCHEMA_OWNER DEFAULT_CHARACTER_SET_NAME DEFAULT_COLLATION_NAME IS_DEFAULT REMARKS ID -> ------------ ------------------ ------------ -------------------------- ---------------------- ---------- ------- -- -> SCRIPT INFORMATION_SCHEMA SA Unicode OFF FALSE -1 -> SCRIPT PUBLIC SA Unicode OFF TRUE 0 +> CATALOG_NAME SCHEMA_NAME SCHEMA_OWNER DEFAULT_CHARACTER_SET_CATALOG DEFAULT_CHARACTER_SET_SCHEMA DEFAULT_CHARACTER_SET_NAME SQL_PATH DEFAULT_COLLATION_NAME REMARKS +> ------------ ------------------ ------------ ----------------------------- ---------------------------- -------------------------- -------- ---------------------- ------- +> SCRIPT INFORMATION_SCHEMA SA SCRIPT PUBLIC Unicode null OFF null +> SCRIPT PUBLIC SA SCRIPT PUBLIC Unicode null OFF null > rows: 2 CREATE SCHEMA TEST_SCHEMA AUTHORIZATION SA; @@ -4696,7 +4096,7 @@ create schema ClientServer_Schema AUTHORIZATION SA; CREATE TABLE ClientServer_Schema.PrimaryKey_Seq ( sequence_name VARCHAR(100) NOT NULL, -seq_number BIGINT NOT NULL, +seq_number BIGINT NOT NULL UNIQUE, CONSTRAINT X_PKPrimaryKey_Seq PRIMARY KEY (sequence_name) ); @@ -4706,10 +4106,7 @@ alter table Contact_Schema.Address add constraint abc foreign key(address_id) references ClientServer_Schema.PrimaryKey_Seq(seq_number); > ok -drop table ClientServer_Schema.PrimaryKey_Seq; -> ok - -drop table Contact_Schema.Address; +drop table ClientServer_Schema.PrimaryKey_Seq, Contact_Schema.Address; > ok drop schema Contact_Schema restrict; @@ -4722,14 +4119,14 @@ drop schema ClientServer_Schema restrict; CREATE MEMORY TABLE TEST(ID INT PRIMARY KEY); > ok -SCRIPT NOPASSWORDS NOSETTINGS; +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; > SCRIPT > ------------------------------------------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INT NOT NULL ); > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> rows: 4 +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 4 ALTER TABLE TEST ADD CREATEDATE VARCHAR(255) DEFAULT '2001-01-01' NOT NULL; > ok @@ -4777,12 +4174,15 @@ ALTER TABLE TEST_SEQ ALTER COLUMN ID IDENTITY; > ok INSERT INTO TEST_SEQ VALUES(NULL, '1'); +> exception NULL_NOT_ALLOWED + +INSERT INTO TEST_SEQ VALUES(DEFAULT, '1'); > update count: 1 ALTER TABLE TEST_SEQ ALTER COLUMN ID RESTART WITH 10; > ok -INSERT INTO TEST_SEQ VALUES(NULL, '10'); +INSERT INTO TEST_SEQ VALUES(DEFAULT, '10'); > update count: 1 alter table test_seq drop primary key; @@ -4806,22 +4206,22 @@ SELECT * FROM TEST_SEQ ORDER BY ID; > 20 20 > rows (ordered): 4 -SCRIPT SIMPLE NOPASSWORDS NOSETTINGS; +SCRIPT SIMPLE NOPASSWORDS NOSETTINGS NOVERSION; > SCRIPT -> -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> -- 4 +/- SELECT COUNT(*) FROM PUBLIC.TEST_SEQ; -> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); -> CREATE INDEX "PUBLIC"."IDXNAME" ON "PUBLIC"."TEST"("NAME"); -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INT NOT NULL, "NAME" VARCHAR(255) DEFAULT 1, "CREATEDATE" VARCHAR(255) DEFAULT '2001-01-01' NOT NULL, "MODIFY_DATE" TIMESTAMP ); -> CREATE MEMORY TABLE "PUBLIC"."TEST_SEQ"( "ID" INT DEFAULT 20 NOT NULL, "DATA" VARCHAR ); +> -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> INSERT INTO "PUBLIC"."TEST" VALUES(1, 'Hi', '2001-01-01', NULL); +> CREATE MEMORY TABLE "PUBLIC"."TEST_SEQ"( "ID" INTEGER DEFAULT 20 NOT NULL, "DATA" CHARACTER VARYING ); +> -- 4 +/- SELECT COUNT(*) FROM PUBLIC.TEST_SEQ; > INSERT INTO "PUBLIC"."TEST_SEQ" VALUES(-1, '-1'); > INSERT INTO "PUBLIC"."TEST_SEQ" VALUES(1, '1'); > INSERT INTO "PUBLIC"."TEST_SEQ" VALUES(10, '10'); > INSERT INTO "PUBLIC"."TEST_SEQ" VALUES(20, '20'); -> rows: 12 +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "NAME" CHARACTER VARYING(255) DEFAULT 1, "CREATEDATE" CHARACTER VARYING(255) DEFAULT '2001-01-01' NOT NULL, "MODIFY_DATE" TIMESTAMP ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES(1, 'Hi', '2001-01-01', NULL); +> CREATE INDEX "PUBLIC"."IDXNAME" ON "PUBLIC"."TEST"("NAME" NULLS FIRST); +> rows (ordered): 12 CREATE UNIQUE INDEX IDX_NAME_ID ON TEST(ID, NAME); > ok @@ -4841,28 +4241,28 @@ ALTER TABLE TEST DROP NAME; DROP TABLE TEST_SEQ; > ok -SCRIPT NOPASSWORDS NOSETTINGS; +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; > SCRIPT -> ------------------------------------------------------------------------------------------------------------------------------------------- -> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INT NOT NULL, "CREATEDATE" VARCHAR(255) DEFAULT '2001-01-01' NOT NULL, "MODIFY_DATE" TIMESTAMP ); +> --------------------------------------------------------------------------------------------------------------------------------------------------------- > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "CREATEDATE" CHARACTER VARYING(255) DEFAULT '2001-01-01' NOT NULL, "MODIFY_DATE" TIMESTAMP ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; > INSERT INTO "PUBLIC"."TEST" VALUES (1, '2001-01-01', NULL); -> rows: 5 +> rows (ordered): 5 ALTER TABLE TEST ADD NAME VARCHAR(255) NULL BEFORE CREATEDATE; > ok -SCRIPT NOPASSWORDS NOSETTINGS; +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; > SCRIPT -> ---------------------------------------------------------------------------------------------------------------------------------------------------------------- -> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INT NOT NULL, "NAME" VARCHAR(255), "CREATEDATE" VARCHAR(255) DEFAULT '2001-01-01' NOT NULL, "MODIFY_DATE" TIMESTAMP ); +> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "NAME" CHARACTER VARYING(255), "CREATEDATE" CHARACTER VARYING(255) DEFAULT '2001-01-01' NOT NULL, "MODIFY_DATE" TIMESTAMP ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; > INSERT INTO "PUBLIC"."TEST" VALUES (1, NULL, '2001-01-01', NULL); -> rows: 5 +> rows (ordered): 5 UPDATE TEST SET NAME = 'Hi'; > update count: 1 @@ -4923,18 +4323,17 @@ select * from test; drop table test; > ok ---- autoIncrement ---------------------------------------------------------------------------------------------- CREATE MEMORY TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR); > ok -SCRIPT NOPASSWORDS NOSETTINGS; +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; > SCRIPT -> ------------------------------------------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INT NOT NULL, "NAME" VARCHAR ); +> --------------------------------------------------------------------------------------- > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> rows: 4 +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "NAME" CHARACTER VARYING ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 4 INSERT INTO TEST(ID, NAME) VALUES(1, 'Hi'), (2, 'World'); > update count: 2 @@ -4972,23 +4371,6 @@ SELECT TOP 2 * FROM TEST ORDER BY ID; > 2 World > rows (ordered): 2 -SELECT LIMIT (0+0) (2+0) * FROM TEST ORDER BY ID; -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> rows (ordered): 2 - -SELECT LIMIT (1+0) (2+0) NAME, -ID, ID _ID_ FROM TEST ORDER BY _ID_; -> NAME - ID _ID_ -> ----- ---- ---- -> World -2 2 -> with -3 3 -> rows (ordered): 2 - -EXPLAIN SELECT LIMIT (1+0) (2+0) * FROM TEST ORDER BY ID; ->> SELECT "TEST"."ID", "TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2 */ ORDER BY 1 OFFSET 1 ROW FETCH NEXT 2 ROWS ONLY /* index sorted */ - SELECT * FROM TEST ORDER BY ID LIMIT 2+0 OFFSET 1+0; > ID NAME > -- ----- @@ -5035,7 +4417,7 @@ SELECT * FROM (SELECT ID FROM TEST GROUP BY ID); > rows: 5 EXPLAIN SELECT * FROM TEST UNION ALL SELECT * FROM TEST ORDER BY ID LIMIT 2+0 OFFSET 1+0; ->> (SELECT "TEST"."ID", "TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) UNION ALL (SELECT "TEST"."ID", "TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) ORDER BY 1 OFFSET 1 ROW FETCH NEXT 2 ROWS ONLY +>> (SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) UNION ALL (SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) ORDER BY 1 OFFSET 1 ROW FETCH NEXT 2 ROWS ONLY EXPLAIN DELETE FROM TEST WHERE ID=1; >> DELETE FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ WHERE "ID" = 1 @@ -5056,7 +4438,7 @@ SELECT * FROM TEST2COL WHERE A=0 AND B=0; > rows: 1 EXPLAIN SELECT * FROM TEST2COL WHERE A=0 AND B=0; ->> SELECT "TEST2COL"."A", "TEST2COL"."B", "TEST2COL"."C" FROM "PUBLIC"."TEST2COL" /* PUBLIC.PRIMARY_KEY_E: A = 0 AND B = 0 */ WHERE (("A" = 0) AND ("B" = 0)) AND ("A" = "B") +>> SELECT "PUBLIC"."TEST2COL"."A", "PUBLIC"."TEST2COL"."B", "PUBLIC"."TEST2COL"."C" FROM "PUBLIC"."TEST2COL" /* PUBLIC.PRIMARY_KEY_E: A = 0 AND B = 0 */ WHERE ("A" = 0) AND ("B" = 0) SELECT * FROM TEST2COL WHERE A=0; > A B C @@ -5066,7 +4448,7 @@ SELECT * FROM TEST2COL WHERE A=0; > rows: 2 EXPLAIN SELECT * FROM TEST2COL WHERE A=0; ->> SELECT "TEST2COL"."A", "TEST2COL"."B", "TEST2COL"."C" FROM "PUBLIC"."TEST2COL" /* PUBLIC.PRIMARY_KEY_E: A = 0 */ WHERE "A" = 0 +>> SELECT "PUBLIC"."TEST2COL"."A", "PUBLIC"."TEST2COL"."B", "PUBLIC"."TEST2COL"."C" FROM "PUBLIC"."TEST2COL" /* PUBLIC.PRIMARY_KEY_E: A = 0 */ WHERE "A" = 0 SELECT * FROM TEST2COL WHERE B=0; > A B C @@ -5076,7 +4458,7 @@ SELECT * FROM TEST2COL WHERE B=0; > rows: 2 EXPLAIN SELECT * FROM TEST2COL WHERE B=0; ->> SELECT "TEST2COL"."A", "TEST2COL"."B", "TEST2COL"."C" FROM "PUBLIC"."TEST2COL" /* PUBLIC.TEST2COL.tableScan */ WHERE "B" = 0 +>> SELECT "PUBLIC"."TEST2COL"."A", "PUBLIC"."TEST2COL"."B", "PUBLIC"."TEST2COL"."C" FROM "PUBLIC"."TEST2COL" /* PUBLIC.TEST2COL.tableScan */ WHERE "B" = 0 DROP TABLE TEST2COL; > ok @@ -5120,8 +4502,8 @@ GRANT UPDATE ON TEST TO TEST_ROLE; GRANT TEST_ROLE TO TEST_USER; > ok -SELECT NAME FROM INFORMATION_SCHEMA.ROLES; -> NAME +SELECT ROLE_NAME FROM INFORMATION_SCHEMA.ROLES; +> ROLE_NAME > --------- > PUBLIC > TEST_ROLE @@ -5130,17 +4512,17 @@ SELECT NAME FROM INFORMATION_SCHEMA.ROLES; SELECT GRANTEE, GRANTEETYPE, GRANTEDROLE, RIGHTS, TABLE_SCHEMA, TABLE_NAME FROM INFORMATION_SCHEMA.RIGHTS; > GRANTEE GRANTEETYPE GRANTEDROLE RIGHTS TABLE_SCHEMA TABLE_NAME > --------- ----------- ----------- -------------- ------------ ---------- -> TEST_ROLE ROLE UPDATE PUBLIC TEST -> TEST_USER USER SELECT, INSERT PUBLIC TEST -> TEST_USER USER TEST_ROLE +> TEST_ROLE ROLE null UPDATE PUBLIC TEST +> TEST_USER USER TEST_ROLE null null null +> TEST_USER USER null SELECT, INSERT PUBLIC TEST > rows: 3 SELECT * FROM INFORMATION_SCHEMA.TABLE_PRIVILEGES; -> GRANTOR GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PRIVILEGE_TYPE IS_GRANTABLE -> ------- --------- ------------- ------------ ---------- -------------- ------------ -> null TEST_ROLE SCRIPT PUBLIC TEST UPDATE NO -> null TEST_USER SCRIPT PUBLIC TEST INSERT NO -> null TEST_USER SCRIPT PUBLIC TEST SELECT NO +> GRANTOR GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PRIVILEGE_TYPE IS_GRANTABLE WITH_HIERARCHY +> ------- --------- ------------- ------------ ---------- -------------- ------------ -------------- +> null TEST_ROLE SCRIPT PUBLIC TEST UPDATE NO NO +> null TEST_USER SCRIPT PUBLIC TEST INSERT NO NO +> null TEST_USER SCRIPT PUBLIC TEST SELECT NO NO > rows: 3 SELECT * FROM INFORMATION_SCHEMA.COLUMN_PRIVILEGES; @@ -5160,15 +4542,15 @@ REVOKE TEST_ROLE FROM TEST_USER; SELECT GRANTEE, GRANTEETYPE, GRANTEDROLE, RIGHTS, TABLE_NAME FROM INFORMATION_SCHEMA.RIGHTS; > GRANTEE GRANTEETYPE GRANTEDROLE RIGHTS TABLE_NAME > --------- ----------- ----------- ------ ---------- -> TEST_ROLE ROLE UPDATE TEST -> TEST_USER USER SELECT TEST +> TEST_ROLE ROLE null UPDATE TEST +> TEST_USER USER null SELECT TEST > rows: 2 SELECT * FROM INFORMATION_SCHEMA.TABLE_PRIVILEGES; -> GRANTOR GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PRIVILEGE_TYPE IS_GRANTABLE -> ------- --------- ------------- ------------ ---------- -------------- ------------ -> null TEST_ROLE SCRIPT PUBLIC TEST UPDATE NO -> null TEST_USER SCRIPT PUBLIC TEST SELECT NO +> GRANTOR GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PRIVILEGE_TYPE IS_GRANTABLE WITH_HIERARCHY +> ------- --------- ------------- ------------ ---------- -------------- ------------ -------------- +> null TEST_ROLE SCRIPT PUBLIC TEST UPDATE NO NO +> null TEST_USER SCRIPT PUBLIC TEST SELECT NO NO > rows: 2 DROP USER TEST_USER; @@ -5181,14 +4563,14 @@ DROP ROLE TEST_ROLE; > ok SELECT * FROM INFORMATION_SCHEMA.ROLES; -> NAME REMARKS ID -> ------ ------- -- -> PUBLIC 0 +> ROLE_NAME REMARKS +> --------- ------- +> PUBLIC null > rows: 1 SELECT * FROM INFORMATION_SCHEMA.RIGHTS; -> GRANTEE GRANTEETYPE GRANTEDROLE RIGHTS TABLE_SCHEMA TABLE_NAME ID -> ------- ----------- ----------- ------ ------------ ---------- -- +> GRANTEE GRANTEETYPE GRANTEDROLE RIGHTS TABLE_SCHEMA TABLE_NAME +> ------- ----------- ----------- ------ ------------ ---------- > rows: 0 --- plan ---------------------------------------------------------------------------------------------- @@ -5210,39 +4592,37 @@ EXPLAIN INSERT INTO TEST VALUES(1, 'Test'), (2, 'World'); >> INSERT INTO "PUBLIC"."TEST"("ID", "NAME") VALUES (1, 'Test'), (2, 'World') EXPLAIN INSERT INTO TEST SELECT DISTINCT ID+1, NAME FROM TEST; ->> INSERT INTO "PUBLIC"."TEST"("ID", "NAME") SELECT DISTINCT ("ID" + 1), "NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ +>> INSERT INTO "PUBLIC"."TEST"("ID", "NAME") SELECT DISTINCT "ID" + 1, "NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ EXPLAIN SELECT DISTINCT ID + 1, NAME FROM TEST; ->> SELECT DISTINCT ("ID" + 1), "NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ +>> SELECT DISTINCT "ID" + 1, "NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ EXPLAIN SELECT * FROM TEST WHERE 1=0; ->> SELECT "TEST"."ID", "TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan: FALSE */ WHERE FALSE +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan: FALSE */ WHERE FALSE EXPLAIN SELECT TOP 1 * FROM TEST FOR UPDATE; ->> SELECT "TEST"."ID", "TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ FETCH FIRST ROW ONLY FOR UPDATE +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ FETCH FIRST ROW ONLY FOR UPDATE EXPLAIN SELECT COUNT(NAME) FROM TEST WHERE ID=1; >> SELECT COUNT("NAME") FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ WHERE "ID" = 1 EXPLAIN SELECT * FROM TEST WHERE (ID>=1 AND ID<=2) OR (ID>0 AND ID<3) AND (ID<>6) ORDER BY NAME NULLS FIRST, 1 NULLS LAST, (1+1) DESC; ->> SELECT "TEST"."ID", "TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE (("ID" >= 1) AND ("ID" <= 2)) OR (("ID" <> 6) AND (("ID" > 0) AND ("ID" < 3))) ORDER BY 2 NULLS FIRST, 1 NULLS LAST, =2 DESC +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE (("ID" >= 1) AND ("ID" <= 2)) OR (("ID" <> 6) AND ("ID" > 0) AND ("ID" < 3)) ORDER BY 2 NULLS FIRST, 1 NULLS LAST EXPLAIN SELECT * FROM TEST WHERE ID=1 GROUP BY NAME, ID; ->> SELECT "TEST"."ID", "TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ WHERE "ID" = 1 GROUP BY "NAME", "ID" +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ WHERE "ID" = 1 GROUP BY "NAME", "ID" EXPLAIN PLAN FOR UPDATE TEST SET NAME='Hello', ID=1 WHERE NAME LIKE 'T%' ESCAPE 'x'; -#+mvStore#>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET "NAME" = 'Hello', "ID" = 1 WHERE "NAME" LIKE 'T%' ESCAPE 'x' -#-mvStore#>> UPDATE "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2 */ SET "NAME" = 'Hello', "ID" = 1 WHERE "NAME" LIKE 'T%' ESCAPE 'x' +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET "ID" = 1, "NAME" = 'Hello' WHERE "NAME" LIKE 'T%' ESCAPE 'x' EXPLAIN PLAN FOR DELETE FROM TEST; -#+mvStore#>> DELETE FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ -#-mvStore#>> DELETE FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2 */ +>> DELETE FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ EXPLAIN PLAN FOR SELECT NAME, COUNT(*) FROM TEST GROUP BY NAME HAVING COUNT(*) > 1; >> SELECT "NAME", COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "NAME" HAVING COUNT(*) > 1 EXPLAIN PLAN FOR SELECT * FROM test t1 inner join test t2 on t1.id=t2.id and t2.name is not null where t1.id=1; ->> SELECT "T1"."ID", "T1"."NAME", "T2"."ID", "T2"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ /* WHERE T1.ID = 1 */ INNER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.PRIMARY_KEY_2: ID = T1.ID */ ON 1=1 WHERE ("T1"."ID" = 1) AND (("T2"."NAME" IS NOT NULL) AND ("T1"."ID" = "T2"."ID")) +>> SELECT "T1"."ID", "T1"."NAME", "T2"."ID", "T2"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ /* WHERE T1.ID = 1 */ INNER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.PRIMARY_KEY_2: ID = T1.ID */ ON 1=1 WHERE ("T1"."ID" = 1) AND ("T2"."NAME" IS NOT NULL) AND ("T1"."ID" = "T2"."ID") EXPLAIN PLAN FOR SELECT * FROM test t1 left outer join test t2 on t1.id=t2.id and t2.name is not null where t1.id=1; >> SELECT "T1"."ID", "T1"."NAME", "T2"."ID", "T2"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ /* WHERE T1.ID = 1 */ LEFT OUTER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.PRIMARY_KEY_2: ID = T1.ID */ ON ("T2"."NAME" IS NOT NULL) AND ("T1"."ID" = "T2"."ID") WHERE "T1"."ID" = 1 @@ -5257,16 +4637,13 @@ EXPLAIN PLAN FOR SELECT * FROM TEST T1 WHERE ID IN(1, 2); >> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID IN(1, 2) */ WHERE "ID" IN(1, 2) EXPLAIN PLAN FOR SELECT * FROM TEST T1 WHERE ID IN(SELECT ID FROM TEST); -#+mvStore#>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID IN(SELECT ID FROM PUBLIC.TEST /++ PUBLIC.TEST.tableScan ++/) */ WHERE "ID" IN( SELECT "ID" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) -#-mvStore#>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID IN(SELECT ID FROM PUBLIC.TEST /++ PUBLIC.PRIMARY_KEY_2 ++/) */ WHERE "ID" IN( SELECT "ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2 */) +>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID IN(SELECT DISTINCT ID FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */) */ WHERE "ID" IN( SELECT DISTINCT "ID" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) EXPLAIN PLAN FOR SELECT * FROM TEST T1 WHERE ID NOT IN(SELECT ID FROM TEST); -#+mvStore#>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ WHERE NOT ("ID" IN( SELECT "ID" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */)) -#-mvStore#>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ WHERE NOT ("ID" IN( SELECT "ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2 */)) +>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ WHERE "ID" NOT IN( SELECT DISTINCT "ID" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) EXPLAIN PLAN FOR SELECT CAST(ID AS VARCHAR(255)) FROM TEST; -#+mvStore#>> SELECT CAST("ID" AS VARCHAR(255)) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ -#-mvStore#>> SELECT CAST("ID" AS VARCHAR(255)) FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2 */ +>> SELECT CAST("ID" AS CHARACTER VARYING(255)) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ EXPLAIN PLAN FOR SELECT LEFT(NAME, 2) FROM TEST; >> SELECT LEFT("NAME", 2) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ @@ -5303,7 +4680,7 @@ SELECT * FROM SYSTEM_RANGE(1,2) UNION ALL SELECT * FROM SYSTEM_RANGE(1,2) ORDER > rows (ordered): 4 EXPLAIN (SELECT * FROM SYSTEM_RANGE(1,2) UNION ALL SELECT * FROM SYSTEM_RANGE(1,2) ORDER BY 1); ->> (SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 2) /* PUBLIC.RANGE_INDEX */) UNION ALL (SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 2) /* PUBLIC.RANGE_INDEX */) ORDER BY 1 +>> (SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 2) /* range index */) UNION ALL (SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 2) /* range index */) ORDER BY 1 CREATE TABLE CHILDREN(ID INT PRIMARY KEY, NAME VARCHAR(255), CLASS INT); > ok @@ -5344,7 +4721,7 @@ SELECT * FROM CHILDREN UNION ALL SELECT * FROM CHILDREN ORDER BY ID, NAME FOR UP > rows (ordered): 8 EXPLAIN SELECT * FROM CHILDREN UNION ALL SELECT * FROM CHILDREN ORDER BY ID, NAME FOR UPDATE; ->> (SELECT "CHILDREN"."ID", "CHILDREN"."NAME", "CHILDREN"."CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */ FOR UPDATE) UNION ALL (SELECT "CHILDREN"."ID", "CHILDREN"."NAME", "CHILDREN"."CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */ FOR UPDATE) ORDER BY 1, 2 FOR UPDATE +>> (SELECT "PUBLIC"."CHILDREN"."ID", "PUBLIC"."CHILDREN"."NAME", "PUBLIC"."CHILDREN"."CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */ FOR UPDATE) UNION ALL (SELECT "PUBLIC"."CHILDREN"."ID", "PUBLIC"."CHILDREN"."NAME", "PUBLIC"."CHILDREN"."CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */ FOR UPDATE) ORDER BY 1, 2 FOR UPDATE SELECT 'Child', ID, NAME FROM CHILDREN UNION SELECT 'Class', ID, NAME FROM CLASSES; > 'Child' ID NAME @@ -5372,11 +4749,10 @@ SELECT * FROM CHILDREN EXCEPT SELECT * FROM CHILDREN WHERE CLASS=0; > rows: 3 EXPLAIN SELECT * FROM CHILDREN EXCEPT SELECT * FROM CHILDREN WHERE CLASS=0; ->> (SELECT "CHILDREN"."ID", "CHILDREN"."NAME", "CHILDREN"."CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */) EXCEPT (SELECT "CHILDREN"."ID", "CHILDREN"."NAME", "CHILDREN"."CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */ WHERE "CLASS" = 0) +>> (SELECT "PUBLIC"."CHILDREN"."ID", "PUBLIC"."CHILDREN"."NAME", "PUBLIC"."CHILDREN"."CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */) EXCEPT (SELECT "PUBLIC"."CHILDREN"."ID", "PUBLIC"."CHILDREN"."NAME", "PUBLIC"."CHILDREN"."CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */ WHERE "CLASS" = 0) EXPLAIN SELECT CLASS FROM CHILDREN INTERSECT SELECT ID FROM CLASSES; -#+mvStore#>> (SELECT "CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */) INTERSECT (SELECT "ID" FROM "PUBLIC"."CLASSES" /* PUBLIC.CLASSES.tableScan */) -#-mvStore#>> (SELECT "CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */) INTERSECT (SELECT "ID" FROM "PUBLIC"."CLASSES" /* PUBLIC.PRIMARY_KEY_5 */) +>> (SELECT "CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */) INTERSECT (SELECT "ID" FROM "PUBLIC"."CLASSES" /* PUBLIC.CLASSES.tableScan */) SELECT CLASS FROM CHILDREN INTERSECT SELECT ID FROM CLASSES; > CLASS @@ -5387,7 +4763,7 @@ SELECT CLASS FROM CHILDREN INTERSECT SELECT ID FROM CLASSES; > rows: 3 EXPLAIN SELECT * FROM CHILDREN EXCEPT SELECT * FROM CHILDREN WHERE CLASS=0; ->> (SELECT "CHILDREN"."ID", "CHILDREN"."NAME", "CHILDREN"."CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */) EXCEPT (SELECT "CHILDREN"."ID", "CHILDREN"."NAME", "CHILDREN"."CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */ WHERE "CLASS" = 0) +>> (SELECT "PUBLIC"."CHILDREN"."ID", "PUBLIC"."CHILDREN"."NAME", "PUBLIC"."CHILDREN"."CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */) EXCEPT (SELECT "PUBLIC"."CHILDREN"."ID", "PUBLIC"."CHILDREN"."NAME", "PUBLIC"."CHILDREN"."CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */ WHERE "CLASS" = 0) SELECT * FROM CHILDREN CH, CLASSES CL WHERE CH.CLASS = CL.ID; > ID NAME CLASS ID NAME @@ -5475,7 +4851,7 @@ SELECT * FROM V_UNION WHERE ID=1; > rows: 2 EXPLAIN SELECT * FROM V_UNION WHERE ID=1; ->> SELECT "V_UNION"."ID", "V_UNION"."NAME", "V_UNION"."CLASS" FROM "PUBLIC"."V_UNION" /* (SELECT CHILDREN.ID, CHILDREN.NAME, CHILDREN.CLASS FROM PUBLIC.CHILDREN /++ PUBLIC.PRIMARY_KEY_9: ID IS ?1 ++/ /++ scanCount: 2 ++/ WHERE CHILDREN.ID IS ?1) UNION ALL (SELECT CHILDREN.ID, CHILDREN.NAME, CHILDREN.CLASS FROM PUBLIC.CHILDREN /++ PUBLIC.PRIMARY_KEY_9: ID IS ?1 ++/ /++ scanCount: 2 ++/ WHERE CHILDREN.ID IS ?1): ID = 1 */ WHERE "ID" = 1 +>> SELECT "PUBLIC"."V_UNION"."ID", "PUBLIC"."V_UNION"."NAME", "PUBLIC"."V_UNION"."CLASS" FROM "PUBLIC"."V_UNION" /* (SELECT PUBLIC.CHILDREN.ID, PUBLIC.CHILDREN.NAME, PUBLIC.CHILDREN.CLASS FROM PUBLIC.CHILDREN /* PUBLIC.PRIMARY_KEY_9: ID IS NOT DISTINCT FROM ?1 */ /* scanCount: 2 */ WHERE PUBLIC.CHILDREN.ID IS NOT DISTINCT FROM ?1) UNION ALL (SELECT PUBLIC.CHILDREN.ID, PUBLIC.CHILDREN.NAME, PUBLIC.CHILDREN.CLASS FROM PUBLIC.CHILDREN /* PUBLIC.PRIMARY_KEY_9: ID IS NOT DISTINCT FROM ?1 */ /* scanCount: 2 */ WHERE PUBLIC.CHILDREN.ID IS NOT DISTINCT FROM ?1): ID = 1 */ WHERE "ID" = 1 CREATE VIEW V_EXCEPT AS SELECT * FROM CHILDREN EXCEPT SELECT * FROM CHILDREN WHERE ID=2; > ok @@ -5487,7 +4863,7 @@ SELECT * FROM V_EXCEPT WHERE ID=1; > rows: 1 EXPLAIN SELECT * FROM V_EXCEPT WHERE ID=1; ->> SELECT "V_EXCEPT"."ID", "V_EXCEPT"."NAME", "V_EXCEPT"."CLASS" FROM "PUBLIC"."V_EXCEPT" /* (SELECT DISTINCT CHILDREN.ID, CHILDREN.NAME, CHILDREN.CLASS FROM PUBLIC.CHILDREN /++ PUBLIC.PRIMARY_KEY_9: ID IS ?1 ++/ /++ scanCount: 2 ++/ WHERE CHILDREN.ID IS ?1) EXCEPT (SELECT DISTINCT CHILDREN.ID, CHILDREN.NAME, CHILDREN.CLASS FROM PUBLIC.CHILDREN /++ PUBLIC.PRIMARY_KEY_9: ID = 2 ++/ /++ scanCount: 2 ++/ WHERE ID = 2): ID = 1 */ WHERE "ID" = 1 +>> SELECT "PUBLIC"."V_EXCEPT"."ID", "PUBLIC"."V_EXCEPT"."NAME", "PUBLIC"."V_EXCEPT"."CLASS" FROM "PUBLIC"."V_EXCEPT" /* (SELECT DISTINCT PUBLIC.CHILDREN.ID, PUBLIC.CHILDREN.NAME, PUBLIC.CHILDREN.CLASS FROM PUBLIC.CHILDREN /* PUBLIC.PRIMARY_KEY_9: ID IS NOT DISTINCT FROM ?1 */ /* scanCount: 2 */ WHERE PUBLIC.CHILDREN.ID IS NOT DISTINCT FROM ?1) EXCEPT (SELECT DISTINCT PUBLIC.CHILDREN.ID, PUBLIC.CHILDREN.NAME, PUBLIC.CHILDREN.CLASS FROM PUBLIC.CHILDREN /* PUBLIC.PRIMARY_KEY_9: ID = 2 */ /* scanCount: 2 */ WHERE ID = 2): ID = 1 */ WHERE "ID" = 1 CREATE VIEW V_INTERSECT AS SELECT ID, NAME FROM CHILDREN INTERSECT SELECT * FROM CLASSES; > ok @@ -5498,7 +4874,7 @@ SELECT * FROM V_INTERSECT WHERE ID=1; > rows: 0 EXPLAIN SELECT * FROM V_INTERSECT WHERE ID=1; ->> SELECT "V_INTERSECT"."ID", "V_INTERSECT"."NAME" FROM "PUBLIC"."V_INTERSECT" /* (SELECT DISTINCT ID, NAME FROM PUBLIC.CHILDREN /++ PUBLIC.PRIMARY_KEY_9: ID IS ?1 ++/ /++ scanCount: 2 ++/ WHERE ID IS ?1) INTERSECT (SELECT DISTINCT CLASSES.ID, CLASSES.NAME FROM PUBLIC.CLASSES /++ PUBLIC.PRIMARY_KEY_5: ID IS ?1 ++/ /++ scanCount: 2 ++/ WHERE CLASSES.ID IS ?1): ID = 1 */ WHERE "ID" = 1 +>> SELECT "PUBLIC"."V_INTERSECT"."ID", "PUBLIC"."V_INTERSECT"."NAME" FROM "PUBLIC"."V_INTERSECT" /* (SELECT DISTINCT ID, NAME FROM PUBLIC.CHILDREN /* PUBLIC.PRIMARY_KEY_9: ID IS NOT DISTINCT FROM ?1 */ /* scanCount: 2 */ WHERE ID IS NOT DISTINCT FROM ?1) INTERSECT (SELECT DISTINCT PUBLIC.CLASSES.ID, PUBLIC.CLASSES.NAME FROM PUBLIC.CLASSES /* PUBLIC.PRIMARY_KEY_5: ID IS NOT DISTINCT FROM ?1 */ /* scanCount: 2 */ WHERE PUBLIC.CLASSES.ID IS NOT DISTINCT FROM ?1): ID = 1 */ WHERE "ID" = 1 DROP VIEW V_UNION; > ok @@ -5591,11 +4967,11 @@ SELECT * FROM TEST_ALL WHERE AID>=2; CREATE VIEW TEST_A_SUB AS SELECT * FROM TEST_A WHERE ID < 2; > ok -SELECT TABLE_NAME, SQL FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='VIEW'; -> TABLE_NAME SQL -> ---------- ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> TEST_ALL CREATE FORCE VIEW "PUBLIC"."TEST_ALL"("AID", "A_NAME", "BID", "B_NAME") AS SELECT "A"."ID" AS "AID", "A"."NAME" AS "A_NAME", "B"."ID" AS "BID", "B"."NAME" AS "B_NAME" FROM "PUBLIC"."TEST_A" "A" INNER JOIN "PUBLIC"."TEST_B" "B" ON 1=1 WHERE "A"."ID" = "B"."ID" -> TEST_A_SUB CREATE FORCE VIEW "PUBLIC"."TEST_A_SUB"("ID", "NAME") AS SELECT "TEST_A"."ID", "TEST_A"."NAME" FROM "PUBLIC"."TEST_A" WHERE "ID" < 2 +SELECT TABLE_NAME, VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_NAME VIEW_DEFINITION +> ---------- ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +> TEST_ALL SELECT "A"."ID" AS "AID", "A"."NAME" AS "A_NAME", "B"."ID" AS "BID", "B"."NAME" AS "B_NAME" FROM "PUBLIC"."TEST_A" "A" INNER JOIN "PUBLIC"."TEST_B" "B" ON 1=1 WHERE "A"."ID" = "B"."ID" +> TEST_A_SUB SELECT "PUBLIC"."TEST_A"."ID", "PUBLIC"."TEST_A"."NAME" FROM "PUBLIC"."TEST_A" WHERE "ID" < 2 > rows: 2 SELECT * FROM TEST_A_SUB WHERE NAME IS NOT NULL; @@ -5650,243 +5026,100 @@ ROLLBACK TO SAVEPOINT NOT_EXISTING; > exception SAVEPOINT_IS_INVALID_1 ROLLBACK TO SAVEPOINT TEST; -> ok - -SELECT * FROM TEST; -> ID NAME -> -- ----- -> 1 Test2 -> rows: 1 - -ROLLBACK WORK; -> ok - -SELECT * FROM TEST; -> ID NAME -> -- ---- -> rows: 0 - -INSERT INTO TEST VALUES(1, 'Test3'); -> update count: 1 - -SAVEPOINT TEST3; -> ok - -INSERT INTO TEST VALUES(2, 'World2'); -> update count: 1 - -ROLLBACK TO SAVEPOINT TEST3; -> ok - -COMMIT WORK; -> ok - -SELECT * FROM TEST; -> ID NAME -> -- ----- -> 1 Test3 -> rows: 1 - -SET AUTOCOMMIT TRUE; -> ok - -DROP TABLE TEST; -> ok - ---- insert..select ---------------------------------------------------------------------------------------------- -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -INSERT INTO TEST VALUES(0, 'Hello'); -> update count: 1 - -INSERT INTO TEST SELECT ID+1, NAME||'+' FROM TEST; -> update count: 1 - -INSERT INTO TEST SELECT ID+2, NAME||'+' FROM TEST; -> update count: 2 - -INSERT INTO TEST SELECT ID+4, NAME||'+' FROM TEST; -> update count: 4 - -SELECT * FROM TEST; -> ID NAME -> -- -------- -> 0 Hello -> 1 Hello+ -> 2 Hello+ -> 3 Hello++ -> 4 Hello+ -> 5 Hello++ -> 6 Hello++ -> 7 Hello+++ -> rows: 8 - -DROP TABLE TEST; -> ok - ---- syntax errors ---------------------------------------------------------------------------------------------- -CREATE SOMETHING STRANGE; -> exception SYNTAX_ERROR_2 - -SELECT T1.* T2; -> exception SYNTAX_ERROR_1 - -select replace('abchihihi', 'i', 'o') abcehohoho, replace('this is tom', 'i') 1e_th_st_om from test; -> exception SYNTAX_ERROR_1 - -select monthname(date )'005-0E9-12') d_set fm test; -> exception SYNTAX_ERROR_1 - -call substring('bob', 2, -1); -> '' -> -- -> -> rows: 1 - ---- like ---------------------------------------------------------------------------------------------- -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -INSERT INTO TEST VALUES(0, NULL); -> update count: 1 - -INSERT INTO TEST VALUES(1, 'Hello'); -> update count: 1 - -INSERT INTO TEST VALUES(2, 'World'); -> update count: 1 - -INSERT INTO TEST VALUES(3, 'Word'); -> update count: 1 - -INSERT INTO TEST VALUES(4, 'Wo%'); -> update count: 1 - -SELECT * FROM TEST WHERE NAME IS NULL; -> ID NAME -> -- ---- -> 0 null -> rows: 1 - -SELECT * FROM TEST WHERE NAME IS NOT NULL; -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> 3 Word -> 4 Wo% -> rows: 4 - -SELECT * FROM TEST WHERE NAME BETWEEN 'H' AND 'Word'; -> ID NAME -> -- ----- -> 1 Hello -> 3 Word -> 4 Wo% -> rows: 3 - -SELECT * FROM TEST WHERE ID >= 2 AND ID <= 3 AND ID <> 2; -> ID NAME -> -- ---- -> 3 Word -> rows: 1 - -SELECT * FROM TEST WHERE ID>0 AND ID<4 AND ID!=2; -> ID NAME -> -- ----- -> 1 Hello -> 3 Word -> rows: 2 - -SELECT * FROM TEST WHERE 'Hello' LIKE '_el%'; -> ID NAME -> -- ----- -> 0 null -> 1 Hello -> 2 World -> 3 Word -> 4 Wo% -> rows: 5 - -SELECT * FROM TEST WHERE NAME LIKE 'Hello%'; -> ID NAME -> -- ----- -> 1 Hello -> rows: 1 - -SELECT * FROM TEST WHERE NAME ILIKE 'hello%'; -> ID NAME -> -- ----- -> 1 Hello -> rows: 1 - -SELECT * FROM TEST WHERE NAME ILIKE 'xxx%'; -> ID NAME -> -- ---- -> rows: 0 - -SELECT * FROM TEST WHERE NAME LIKE 'Wo%'; -> ID NAME -> -- ----- -> 2 World -> 3 Word -> 4 Wo% -> rows: 3 +> ok -SELECT * FROM TEST WHERE NAME LIKE 'Wo\%'; +SELECT * FROM TEST; > ID NAME -> -- ---- -> 4 Wo% +> -- ----- +> 1 Test2 > rows: 1 -SELECT * FROM TEST WHERE NAME LIKE 'WoX%' ESCAPE 'X'; -> ID NAME -> -- ---- -> 4 Wo% -> rows: 1 +ROLLBACK WORK; +> ok -SELECT * FROM TEST WHERE NAME LIKE 'Word_'; +SELECT * FROM TEST; > ID NAME > -- ---- > rows: 0 -SELECT * FROM TEST WHERE NAME LIKE '%Hello%'; -> ID NAME -> -- ----- -> 1 Hello -> rows: 1 +INSERT INTO TEST VALUES(1, 'Test3'); +> update count: 1 + +SAVEPOINT TEST3; +> ok + +INSERT INTO TEST VALUES(2, 'World2'); +> update count: 1 + +ROLLBACK TO SAVEPOINT TEST3; +> ok + +COMMIT WORK; +> ok -SELECT * FROM TEST WHERE 'Hello' LIKE NAME; +SELECT * FROM TEST; > ID NAME > -- ----- -> 1 Hello +> 1 Test3 > rows: 1 -SELECT T1.*, T2.* FROM TEST AS T1, TEST AS T2 WHERE T1.ID = T2.ID AND T1.NAME LIKE T2.NAME || '%'; -> ID NAME ID NAME -> -- ----- -- ----- -> 1 Hello 1 Hello -> 2 World 2 World -> 3 Word 3 Word -> 4 Wo% 4 Wo% -> rows: 4 +SET AUTOCOMMIT TRUE; +> ok -SELECT ID, MAX(NAME) FROM TEST GROUP BY ID HAVING MAX(NAME) = 'World'; -> ID MAX(NAME) -> -- --------- -> 2 World -> rows: 1 +DROP TABLE TEST; +> ok -SELECT ID, MAX(NAME) FROM TEST GROUP BY ID HAVING MAX(NAME) LIKE 'World%'; -> ID MAX(NAME) -> -- --------- -> 2 World -> rows: 1 +--- insert..select ---------------------------------------------------------------------------------------------- +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +INSERT INTO TEST VALUES(0, 'Hello'); +> update count: 1 + +INSERT INTO TEST SELECT ID+1, NAME||'+' FROM TEST; +> update count: 1 + +INSERT INTO TEST SELECT ID+2, NAME||'+' FROM TEST; +> update count: 2 + +INSERT INTO TEST SELECT ID+4, NAME||'+' FROM TEST; +> update count: 4 + +SELECT * FROM TEST; +> ID NAME +> -- -------- +> 0 Hello +> 1 Hello+ +> 2 Hello+ +> 3 Hello++ +> 4 Hello+ +> 5 Hello++ +> 6 Hello++ +> 7 Hello+++ +> rows: 8 DROP TABLE TEST; > ok +--- syntax errors ---------------------------------------------------------------------------------------------- +CREATE SOMETHING STRANGE; +> exception SYNTAX_ERROR_2 + +SELECT T1.* T2; +> exception SYNTAX_ERROR_1 + +select replace('abchihihi', 'i', 'o') abcehohoho, replace('this is tom', 'i') 1e_th_st_om from test; +> exception SYNTAX_ERROR_1 + +select monthname(date )'005-0E9-12') d_set fm test; +> exception SYNTAX_ERROR_1 + +call substring('bob', 2, -1); +> '' +> -- +> +> rows: 1 + --- exists ---------------------------------------------------------------------------------------------- CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); > ok @@ -5939,19 +5172,19 @@ SELECT * FROM TEST T WHERE T.ID = (SELECT T2.ID FROM TEST T2 WHERE T2.ID=T.ID); > rows: 3 SELECT (SELECT T2.NAME FROM TEST T2 WHERE T2.ID=T.ID), T.NAME FROM TEST T; -> SELECT T2.NAME FROM PUBLIC.TEST T2 /* PUBLIC.PRIMARY_KEY_2: ID = T.ID */ /* scanCount: 2 */ WHERE T2.ID = T.ID NAME -> -------------------------------------------------------------------------------------------------------------- ----- -> Hello Hello -> World World -> null null +> (SELECT T2.NAME FROM PUBLIC.TEST T2 WHERE T2.ID = T.ID) NAME +> ------------------------------------------------------- ----- +> Hello Hello +> World World +> null null > rows: 3 SELECT (SELECT SUM(T2.ID) FROM TEST T2 WHERE T2.ID>T.ID), T.ID FROM TEST T; -> SELECT SUM(T2.ID) FROM PUBLIC.TEST T2 /* PUBLIC.PRIMARY_KEY_2: ID > T.ID */ /* scanCount: 2 */ WHERE T2.ID > T.ID ID -> ----------------------------------------------------------------------------------------------------------------- -- -> 2 1 -> 3 0 -> null 2 +> (SELECT SUM(T2.ID) FROM PUBLIC.TEST T2 WHERE T2.ID > T.ID) ID +> ---------------------------------------------------------- -- +> 2 1 +> 3 0 +> null 2 > rows: 3 select * from test t where t.id+1 in (select id from test); @@ -5996,7 +5229,7 @@ DROP TABLE TEST; > ok --- group by ---------------------------------------------------------------------------------------------- -CREATE TABLE TEST(A INT, B INT, VALUE INT, UNIQUE(A, B)); +CREATE TABLE TEST(A INT, B INT, "VALUE" INT, UNIQUE(A, B)); > ok INSERT INTO TEST VALUES(?, ?, ?); @@ -6011,7 +5244,7 @@ NULL, 1, 10 }; > update count: 7 -SELECT A, B, COUNT(*) CAL, COUNT(A) CA, COUNT(B) CB, MIN(VALUE) MI, MAX(VALUE) MA, SUM(VALUE) S FROM TEST GROUP BY A, B; +SELECT A, B, COUNT(*) CAL, COUNT(A) CA, COUNT(B) CB, MIN("VALUE") MI, MAX("VALUE") MA, SUM("VALUE") S FROM TEST GROUP BY A, B; > A B CAL CA CB MI MA S > ---- ---- --- -- -- ---- ---- ---- > 0 0 1 1 1 -1 -1 -1 @@ -6027,32 +5260,37 @@ DROP TABLE TEST; > ok --- data types (blob, clob, varchar_ignorecase) ---------------------------------------------------------------------------------------------- -CREATE TABLE TEST(ID INT, XB BINARY, XBL BLOB, XO OTHER, XCL CLOB, XVI VARCHAR_IGNORECASE); +CREATE TABLE TEST(ID INT, XB BINARY(3), XBL BLOB, XO OTHER, XCL CLOB, XVI VARCHAR_IGNORECASE); > ok -INSERT INTO TEST VALUES(0, X '', '', '', '', ''); +INSERT INTO TEST VALUES(0, X'', X'', X'', '', ''); > update count: 1 -INSERT INTO TEST VALUES(1, X '0101', '0101', '0101', 'abc', 'aa'); +INSERT INTO TEST VALUES(1, X'0101', X'0101', X'0101', 'abc', 'aa'); > update count: 1 -INSERT INTO TEST VALUES(2, X '0AFF', '08FE', 'F0F1', 'AbCdEfG', 'ZzAaBb'); +INSERT INTO TEST VALUES(2, X'0AFF', X'08FE', X'F0F1', 'AbCdEfG', 'ZzAaBb'); > update count: 1 -INSERT INTO TEST VALUES(3, X '112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff', '112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff', '112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff', 'AbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYz', 'AbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYz'); +INSERT INTO TEST VALUES(3, + X'112233', + X'112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff', + X'112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff', + 'AbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYz', + 'AbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYz'); > update count: 1 INSERT INTO TEST VALUES(4, NULL, NULL, NULL, NULL, NULL); > update count: 1 -SELECT * FROM TEST; -> ID XB XBL XO XCL XVI -> -- ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> 0 -> 1 0101 0101 0101 abc aa -> 2 0aff 08fe f0f1 AbCdEfG ZzAaBb -> 3 112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff 112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff 112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff AbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYz AbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYz -> 4 null null null null null +SELECT ID, XB, XBL, XO, XCL, XVI FROM TEST; +> ID XB XBL XO XCL XVI +> -- --------- --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +> 0 X'000000' X'' X'' +> 1 X'010100' X'0101' X'0101' abc aa +> 2 X'0aff00' X'08fe' X'f0f1' AbCdEfG ZzAaBb +> 3 X'112233' X'112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff' X'112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff' AbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYz AbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYz +> 4 null null null null null > rows: 5 SELECT ID FROM TEST WHERE XCL = XCL; @@ -6076,10 +5314,10 @@ SELECT ID FROM TEST WHERE XVI LIKE 'abc%'; > 3 > rows: 1 -SELECT 'abc', 'Papa Joe''s', CAST(-1 AS SMALLINT), CAST(2 AS BIGINT), CAST(0 AS DOUBLE), CAST('0a0f' AS BINARY) B, CAST(125 AS TINYINT), TRUE, FALSE FROM TEST WHERE ID=1; -> 'abc' 'Papa Joe''s' -1 2 0.0 B 125 TRUE FALSE -> ----- ------------- -- - --- ---- --- ---- ----- -> abc Papa Joe's -1 2 0.0 0a0f 125 TRUE FALSE +SELECT 'abc', 'Papa Joe''s', CAST(-1 AS SMALLINT), CAST(2 AS BIGINT), CAST(0 AS DOUBLE), CAST('0a0f' AS BINARY(4)) B, CAST(125 AS TINYINT), TRUE, FALSE FROM TEST WHERE ID=1; +> 'abc' 'Papa Joe''s' -1 2 0.0 B 125 TRUE FALSE +> ----- ------------- -- - --- ----------- --- ---- ----- +> abc Papa Joe's -1 2 0.0 X'30613066' 125 TRUE FALSE > rows: 1 -- ' This apostrophe is here to fix syntax highlighting in the text editors. @@ -6128,36 +5366,36 @@ SELECT * FROM TEST; > rows: 4 SELECT XD+1, XD-1, XD-XD FROM TEST; -> DATEADD('DAY', 1, XD) DATEADD('DAY', -1, XD) XD - XD -> --------------------- ---------------------- ---------------- -> 0001-02-04 0001-02-02 INTERVAL '0' DAY -> 0004-05-07 0004-05-05 INTERVAL '0' DAY -> 2000-01-01 1999-12-30 INTERVAL '0' DAY -> null null null +> DATEADD(DAY, 1, XD) DATEADD(DAY, -1, XD) XD - XD +> ------------------- -------------------- ---------------- +> 0001-02-04 0001-02-02 INTERVAL '0' DAY +> 0004-05-07 0004-05-05 INTERVAL '0' DAY +> 2000-01-01 1999-12-30 INTERVAL '0' DAY +> null null null > rows: 4 -SELECT ID, CAST(XT AS DATE) T2D, CAST(XTS AS DATE) TS2D, -CAST(XD AS TIME) D2T, CAST(XTS AS TIME(9)) TS2T, -CAST(XT AS TIMESTAMP) D2TS, CAST(XD AS TIMESTAMP) D2TS FROM TEST; -> ID T2D TS2D D2T TS2T D2TS D2TS -> ---- ---------- ---------- -------- ------------------ ------------------- ------------------- -> 0 1970-01-01 0002-03-04 00:00:00 00:00:00 1970-01-01 00:00:00 0001-02-03 00:00:00 -> 1 1970-01-01 0007-08-09 00:00:00 00:01:02 1970-01-01 01:02:03 0004-05-06 00:00:00 -> 2 1970-01-01 1999-12-31 00:00:00 23:59:59.123456789 1970-01-01 23:59:59 1999-12-31 00:00:00 -> null null null null null null null +SELECT ID, CAST(XTS AS DATE) TS2D, +CAST(XTS AS TIME(9)) TS2T, +CAST(XD AS TIMESTAMP) D2TS FROM TEST; +> ID TS2D TS2T D2TS +> ---- ---------- ------------------ ------------------- +> 0 0002-03-04 00:00:00 0001-02-03 00:00:00 +> 1 0007-08-09 00:01:02 0004-05-06 00:00:00 +> 2 1999-12-31 23:59:59.123456789 1999-12-31 00:00:00 +> null null null null > rows: 4 -SCRIPT SIMPLE NOPASSWORDS NOSETTINGS; +SCRIPT SIMPLE NOPASSWORDS NOSETTINGS NOVERSION; > SCRIPT > --------------------------------------------------------------------------------------------------------------------- -> -- 4 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INT, "XT" TIME, "XD" DATE, "XTS" TIMESTAMP(9) ); > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER, "XT" TIME, "XD" DATE, "XTS" TIMESTAMP(9) ); +> -- 4 +/- SELECT COUNT(*) FROM PUBLIC.TEST; > INSERT INTO "PUBLIC"."TEST" VALUES(0, TIME '00:00:00', DATE '0001-02-03', TIMESTAMP '0002-03-04 00:00:00'); > INSERT INTO "PUBLIC"."TEST" VALUES(1, TIME '01:02:03', DATE '0004-05-06', TIMESTAMP '0007-08-09 00:01:02'); > INSERT INTO "PUBLIC"."TEST" VALUES(2, TIME '23:59:59', DATE '1999-12-31', TIMESTAMP '1999-12-31 23:59:59.123456789'); > INSERT INTO "PUBLIC"."TEST" VALUES(NULL, NULL, NULL, NULL); -> rows: 7 +> rows (ordered): 7 DROP TABLE TEST; > ok @@ -6429,7 +5667,7 @@ DROP TABLE TEST; CREATE TABLE CUSTOMER(ID INT PRIMARY KEY, NAME VARCHAR(255)); > ok -CREATE TABLE INVOICE(ID INT, CUSTOMER_ID INT, PRIMARY KEY(CUSTOMER_ID, ID), VALUE DECIMAL(10,2)); +CREATE TABLE INVOICE(ID INT, CUSTOMER_ID INT, PRIMARY KEY(CUSTOMER_ID, ID), "VALUE" DECIMAL(10,2)); > ok INSERT INTO CUSTOMER VALUES(?, ?); @@ -6483,10 +5721,7 @@ SELECT * FROM INVOICE WHERE CUSTOMER_ID IN(SELECT C.ID FROM CUSTOMER C); > rows: 5 SELECT * FROM CUSTOMER WHERE NAME IN('Lehmann', 20); -> ID NAME -> -- ------- -> 1 Lehmann -> rows: 1 +> exception DATA_CONVERSION_ERROR_1 SELECT * FROM CUSTOMER WHERE NAME NOT IN('Scott'); > ID NAME @@ -6640,7 +5875,7 @@ drop view s; drop table t; > ok -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255), VALUE DECIMAL(10,2)); +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255), "VALUE" DECIMAL(10,2)); > ok INSERT INTO TEST VALUES(?, ?, ?); @@ -6657,9 +5892,9 @@ INSERT INTO TEST VALUES(?, ?, ?); }; > update count: 9 -SELECT IFNULL(NAME, '') || ': ' || GROUP_CONCAT(VALUE ORDER BY NAME, VALUE DESC SEPARATOR ', ') FROM TEST GROUP BY NAME ORDER BY 1; -> (IFNULL(NAME, '') || ': ') || LISTAGG(VALUE, ', ') WITHIN GROUP (ORDER BY NAME, VALUE DESC) -> ------------------------------------------------------------------------------------------- +SELECT IFNULL(NAME, '') || ': ' || GROUP_CONCAT("VALUE" ORDER BY NAME, "VALUE" DESC SEPARATOR ', ') FROM TEST GROUP BY NAME ORDER BY 1; +> COALESCE(NAME, '') || ': ' || LISTAGG("VALUE", ', ') WITHIN GROUP (ORDER BY NAME, "VALUE" DESC) +> ----------------------------------------------------------------------------------------------- > : 3.10, -10.00 > Apples: 1.50, 1.20, 1.10 > Bananas: 2.50 @@ -6674,8 +5909,8 @@ SELECT GROUP_CONCAT(ID ORDER BY ID) FROM TEST; > rows: 1 SELECT STRING_AGG(ID,';') FROM TEST; -> LISTAGG(ID, ';') -> ----------------- +> LISTAGG(ID, ';') WITHIN GROUP (ORDER BY NULL) +> --------------------------------------------- > 1;2;3;4;5;6;7;8;9 > rows: 1 @@ -6706,24 +5941,24 @@ SELECT DISTINCT NAME FROM TEST ORDER BY NAME DESC NULLS LAST LIMIT 2 OFFSET 1; > Bananas > rows (ordered): 2 -SELECT NAME, COUNT(*), SUM(VALUE), MAX(VALUE), MIN(VALUE), AVG(VALUE), COUNT(DISTINCT VALUE) FROM TEST GROUP BY NAME; -> NAME COUNT(*) SUM(VALUE) MAX(VALUE) MIN(VALUE) AVG(VALUE) COUNT(DISTINCT VALUE) -> -------- -------- ---------- ---------- ---------- ----------------------------- --------------------- -> Apples 3 3.80 1.50 1.10 1.266666666666666666666666667 3 -> Bananas 1 2.50 2.50 2.50 2.5 1 -> Cherries 1 5.10 5.10 5.10 5.1 1 -> Oranges 2 3.85 2.05 1.80 1.925 2 -> null 2 -6.90 3.10 -10.00 -3.45 2 +SELECT NAME, COUNT(*), SUM("VALUE"), MAX("VALUE"), MIN("VALUE"), AVG("VALUE"), COUNT(DISTINCT "VALUE") FROM TEST GROUP BY NAME; +> NAME COUNT(*) SUM("VALUE") MAX("VALUE") MIN("VALUE") AVG("VALUE") COUNT(DISTINCT "VALUE") +> -------- -------- ------------ ------------ ------------ --------------- ----------------------- +> Apples 3 3.80 1.50 1.10 1.266666666667 3 +> Bananas 1 2.50 2.50 2.50 2.500000000000 1 +> Cherries 1 5.10 5.10 5.10 5.100000000000 1 +> Oranges 2 3.85 2.05 1.80 1.925000000000 2 +> null 2 -6.90 3.10 -10.00 -3.450000000000 2 > rows: 5 -SELECT NAME, MAX(VALUE), MIN(VALUE), MAX(VALUE+1)*MIN(VALUE+1) FROM TEST GROUP BY NAME; -> NAME MAX(VALUE) MIN(VALUE) MAX(VALUE + 1) * MIN(VALUE + 1) -> -------- ---------- ---------- ------------------------------- -> Apples 1.50 1.10 5.2500 -> Bananas 2.50 2.50 12.2500 -> Cherries 5.10 5.10 37.2100 -> Oranges 2.05 1.80 8.5400 -> null 3.10 -10.00 -36.9000 +SELECT NAME, MAX("VALUE"), MIN("VALUE"), MAX("VALUE"+1)*MIN("VALUE"+1) FROM TEST GROUP BY NAME; +> NAME MAX("VALUE") MIN("VALUE") MAX("VALUE" + 1) * MIN("VALUE" + 1) +> -------- ------------ ------------ ----------------------------------- +> Apples 1.50 1.10 5.2500 +> Bananas 2.50 2.50 12.2500 +> Cherries 5.10 5.10 37.2100 +> Oranges 2.05 1.80 8.5400 +> null 3.10 -10.00 -36.9000 > rows: 5 DROP TABLE TEST; @@ -6799,7 +6034,7 @@ SELECT ID, '=', NAME FROM TEST ORDER BY 2 FOR UPDATE; > 1 = Hello > 2 = World > 3 = null -> rows (ordered): 3 +> rows: 3 DROP TABLE TEST; > ok @@ -6938,13 +6173,10 @@ CALL NEXT VALUE FOR TEST_LONG; > 90123456789012345 > rows: 1 -CALL IDENTITY(); ->> 90123456789012345 - -SELECT SEQUENCE_NAME, CURRENT_VALUE, INCREMENT FROM INFORMATION_SCHEMA.SEQUENCES; -> SEQUENCE_NAME CURRENT_VALUE INCREMENT +SELECT SEQUENCE_NAME, BASE_VALUE, INCREMENT FROM INFORMATION_SCHEMA.SEQUENCES; +> SEQUENCE_NAME BASE_VALUE INCREMENT > ------------- ----------------- --------- -> TEST_LONG 90123456789012345 -1 +> TEST_LONG 90123456789012344 -1 > rows: 1 SET AUTOCOMMIT TRUE; @@ -6976,23 +6208,29 @@ CREATE TABLE PARENT(A INT, B INT, PRIMARY KEY(A, B)); CREATE TABLE CHILD(ID INT PRIMARY KEY, PA INT, PB INT, CONSTRAINT AB FOREIGN KEY(PA, PB) REFERENCES PARENT(A, B)); > ok -SELECT * FROM INFORMATION_SCHEMA.CROSS_REFERENCES; -> PKTABLE_CATALOG PKTABLE_SCHEMA PKTABLE_NAME PKCOLUMN_NAME FKTABLE_CATALOG FKTABLE_SCHEMA FKTABLE_NAME FKCOLUMN_NAME ORDINAL_POSITION UPDATE_RULE DELETE_RULE FK_NAME PK_NAME DEFERRABILITY -> --------------- -------------- ------------ ------------- --------------- -------------- ------------ ------------- ---------------- ----------- ----------- ------- ------------- ------------- -> SCRIPT PUBLIC PARENT A SCRIPT PUBLIC CHILD PA 1 1 1 AB PRIMARY_KEY_8 7 -> SCRIPT PUBLIC PARENT B SCRIPT PUBLIC CHILD PB 2 1 1 AB PRIMARY_KEY_8 7 -> rows: 2 +TABLE INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS; +> CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME UNIQUE_CONSTRAINT_CATALOG UNIQUE_CONSTRAINT_SCHEMA UNIQUE_CONSTRAINT_NAME MATCH_OPTION UPDATE_RULE DELETE_RULE +> ------------------ ----------------- --------------- ------------------------- ------------------------ ---------------------- ------------ ----------- ----------- +> SCRIPT PUBLIC AB SCRIPT PUBLIC CONSTRAINT_8 NONE RESTRICT RESTRICT +> rows: 1 -DROP TABLE PARENT; -> ok +TABLE INFORMATION_SCHEMA.KEY_COLUMN_USAGE; +> CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION POSITION_IN_UNIQUE_CONSTRAINT +> ------------------ ----------------- --------------- ------------- ------------ ---------- ----------- ---------------- ----------------------------- +> SCRIPT PUBLIC AB SCRIPT PUBLIC CHILD PA 1 1 +> SCRIPT PUBLIC AB SCRIPT PUBLIC CHILD PB 2 2 +> SCRIPT PUBLIC CONSTRAINT_3 SCRIPT PUBLIC CHILD ID 1 null +> SCRIPT PUBLIC CONSTRAINT_8 SCRIPT PUBLIC PARENT A 1 null +> SCRIPT PUBLIC CONSTRAINT_8 SCRIPT PUBLIC PARENT B 2 null +> rows: 5 -DROP TABLE CHILD; +DROP TABLE PARENT, CHILD; > ok drop table if exists test; > ok -create table test(id int primary key, parent int, foreign key(id) references test(parent)); +create table test(id int primary key, parent int unique, foreign key(id) references test(parent)); > ok insert into test values(1, 1); @@ -7037,22 +6275,19 @@ CREATE MEMORY TABLE PARENT(ID INT PRIMARY KEY); CREATE MEMORY TABLE CHILD(ID INT, PARENT_ID INT, FOREIGN KEY(PARENT_ID) REFERENCES PARENT); > ok -SCRIPT NOPASSWORDS NOSETTINGS; +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; > SCRIPT > ---------------------------------------------------------------------------------------------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.CHILD; +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."PARENT"( "ID" INTEGER NOT NULL ); +> ALTER TABLE "PUBLIC"."PARENT" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_8" PRIMARY KEY("ID"); > -- 0 +/- SELECT COUNT(*) FROM PUBLIC.PARENT; +> CREATE MEMORY TABLE "PUBLIC"."CHILD"( "ID" INTEGER, "PARENT_ID" INTEGER ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.CHILD; > ALTER TABLE "PUBLIC"."CHILD" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_3" FOREIGN KEY("PARENT_ID") REFERENCES "PUBLIC"."PARENT"("ID") NOCHECK; -> ALTER TABLE "PUBLIC"."PARENT" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_8" PRIMARY KEY("ID"); -> CREATE MEMORY TABLE "PUBLIC"."CHILD"( "ID" INT, "PARENT_ID" INT ); -> CREATE MEMORY TABLE "PUBLIC"."PARENT"( "ID" INT NOT NULL ); -> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> rows: 7 - -DROP TABLE PARENT; -> ok +> rows (ordered): 7 -DROP TABLE CHILD; +DROP TABLE PARENT, CHILD; > ok CREATE TABLE TEST(ID INT, CONSTRAINT PK PRIMARY KEY(ID), NAME VARCHAR, PARENT INT, CONSTRAINT P FOREIGN KEY(PARENT) REFERENCES(ID)); @@ -7062,7 +6297,7 @@ ALTER TABLE TEST DROP PRIMARY KEY; > exception INDEX_BELONGS_TO_CONSTRAINT_2 ALTER TABLE TEST DROP CONSTRAINT PK; -> ok +> exception CONSTRAINT_IS_USED_BY_CONSTRAINT_2 INSERT INTO TEST VALUES(1, 'Frank', 1); > update count: 1 @@ -7082,7 +6317,7 @@ INSERT INTO TEST VALUES(4, 'Joe', 3); DROP TABLE TEST; > ok -CREATE MEMORY TABLE TEST(A_INT INT NOT NULL, B_INT INT NOT NULL, PRIMARY KEY(A_INT, B_INT)); +CREATE MEMORY TABLE TEST(A_INT INT NOT NULL, B_INT INT NOT NULL, PRIMARY KEY(A_INT, B_INT), CONSTRAINT U_B UNIQUE(B_INT)); > ok ALTER TABLE TEST ADD CONSTRAINT A_UNIQUE UNIQUE(A_INT); @@ -7100,14 +6335,15 @@ ALTER TABLE TEST DROP CONSTRAINT A_UNIQUE; ALTER TABLE TEST ADD CONSTRAINT C1 FOREIGN KEY(A_INT) REFERENCES TEST(B_INT); > ok -SCRIPT NOPASSWORDS NOSETTINGS; +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; > SCRIPT > -------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "A_INT" INTEGER NOT NULL, "B_INT" INTEGER NOT NULL ); > -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."U_B" UNIQUE("B_INT"); > ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."C1" FOREIGN KEY("A_INT") REFERENCES "PUBLIC"."TEST"("B_INT") NOCHECK; -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "A_INT" INT NOT NULL, "B_INT" INT NOT NULL ); -> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> rows: 4 +> rows (ordered): 5 ALTER TABLE TEST DROP CONSTRAINT C1; > ok @@ -7244,38 +6480,35 @@ SELECT * FROM B_TEST; > -1 XX > rows: 1 -SCRIPT NOPASSWORDS NOSETTINGS; +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; > SCRIPT > -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."A_TEST"( "A_INT" INTEGER NOT NULL, "A_VARCHAR" CHARACTER VARYING(255) DEFAULT 'x', "A_DATE" DATE, "A_DECIMAL" DECIMAL(10, 2) ); +> ALTER TABLE "PUBLIC"."A_TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_7" PRIMARY KEY("A_INT"); > -- 0 +/- SELECT COUNT(*) FROM PUBLIC.A_TEST; +> CREATE MEMORY TABLE "PUBLIC"."B_TEST"( "B_INT" INTEGER DEFAULT -1 NOT NULL, "B_VARCHAR" CHARACTER VARYING(255) DEFAULT NULL ); +> ALTER TABLE "PUBLIC"."B_TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_760" PRIMARY KEY("B_INT"); > -- 1 +/- SELECT COUNT(*) FROM PUBLIC.B_TEST; -> ALTER TABLE "PUBLIC"."A_TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_7" PRIMARY KEY("A_INT"); +> INSERT INTO "PUBLIC"."B_TEST" VALUES (-1, 'XX'); +> ALTER TABLE "PUBLIC"."A_TEST" ADD CONSTRAINT "PUBLIC"."MIN_LENGTH" CHECK(CHAR_LENGTH("A_VARCHAR") > 1) NOCHECK; +> ALTER TABLE "PUBLIC"."B_TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_76" CHECK(CHAR_LENGTH("B_VARCHAR") > 1) NOCHECK; > ALTER TABLE "PUBLIC"."A_TEST" ADD CONSTRAINT "PUBLIC"."DATE_UNIQUE" UNIQUE("A_DATE"); > ALTER TABLE "PUBLIC"."A_TEST" ADD CONSTRAINT "PUBLIC"."DATE_UNIQUE_2" UNIQUE("A_DATE"); -> ALTER TABLE "PUBLIC"."A_TEST" ADD CONSTRAINT "PUBLIC"."MIN_LENGTH" CHECK(LENGTH("A_VARCHAR") > 1) NOCHECK; > ALTER TABLE "PUBLIC"."B_TEST" ADD CONSTRAINT "PUBLIC"."B_UNIQUE" UNIQUE("B_INT"); > ALTER TABLE "PUBLIC"."B_TEST" ADD CONSTRAINT "PUBLIC"."C3" FOREIGN KEY("B_INT") REFERENCES "PUBLIC"."A_TEST"("A_INT") ON DELETE SET DEFAULT ON UPDATE SET DEFAULT NOCHECK; -> ALTER TABLE "PUBLIC"."B_TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_76" CHECK(LENGTH("B_VARCHAR") > 1) NOCHECK; -> ALTER TABLE "PUBLIC"."B_TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_760" PRIMARY KEY("B_INT"); -> CREATE MEMORY TABLE "PUBLIC"."A_TEST"( "A_INT" INT NOT NULL, "A_VARCHAR" VARCHAR(255) DEFAULT 'x', "A_DATE" DATE, "A_DECIMAL" DECIMAL(10, 2) ); -> CREATE MEMORY TABLE "PUBLIC"."B_TEST"( "B_INT" INT DEFAULT -1 NOT NULL, "B_VARCHAR" VARCHAR(255) DEFAULT NULL ); -> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> INSERT INTO "PUBLIC"."B_TEST" VALUES (-1, 'XX'); -> rows: 14 - -DROP TABLE A_TEST; -> ok +> rows (ordered): 14 -DROP TABLE B_TEST; +DROP TABLE A_TEST, B_TEST; > ok -CREATE MEMORY TABLE FAMILY(ID INT, NAME VARCHAR(20)); +CREATE MEMORY TABLE FAMILY(ID INT PRIMARY KEY, NAME VARCHAR(20)); > ok CREATE INDEX FAMILY_ID_NAME ON FAMILY(ID, NAME); > ok -CREATE MEMORY TABLE PARENT(ID INT, FAMILY_ID INT, NAME VARCHAR(20)); +CREATE MEMORY TABLE PARENT(ID INT, FAMILY_ID INT, NAME VARCHAR(20), UNIQUE(ID, FAMILY_ID)); > ok ALTER TABLE PARENT ADD CONSTRAINT PARENT_FAMILY FOREIGN KEY(FAMILY_ID) @@ -7345,51 +6578,55 @@ SELECT * FROM CHILD; > 201 null null Johann > rows: 4 -SCRIPT SIMPLE NOPASSWORDS NOSETTINGS; +SCRIPT SIMPLE NOPASSWORDS NOSETTINGS NOVERSION; > SCRIPT > ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."FAMILY"( "ID" INTEGER NOT NULL, "NAME" CHARACTER VARYING(20) ); +> ALTER TABLE "PUBLIC"."FAMILY" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_7" PRIMARY KEY("ID"); > -- 1 +/- SELECT COUNT(*) FROM PUBLIC.FAMILY; +> INSERT INTO "PUBLIC"."FAMILY" VALUES(1, 'Capone'); +> CREATE INDEX "PUBLIC"."FAMILY_ID_NAME" ON "PUBLIC"."FAMILY"("ID" NULLS FIRST, "NAME" NULLS FIRST); +> CREATE MEMORY TABLE "PUBLIC"."PARENT"( "ID" INTEGER, "FAMILY_ID" INTEGER, "NAME" CHARACTER VARYING(20) ); > -- 1 +/- SELECT COUNT(*) FROM PUBLIC.PARENT; +> INSERT INTO "PUBLIC"."PARENT" VALUES(3, 1, 'Sue'); +> CREATE MEMORY TABLE "PUBLIC"."CHILD"( "ID" INTEGER, "PARENTID" INTEGER, "FAMILY_ID" INTEGER, "NAME" CHARACTER VARYING(20) ); > -- 4 +/- SELECT COUNT(*) FROM PUBLIC.CHILD; -> ALTER TABLE "PUBLIC"."CHILD" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_3" UNIQUE("ID", "PARENTID"); -> ALTER TABLE "PUBLIC"."CHILD" ADD CONSTRAINT "PUBLIC"."PARENT_CHILD" FOREIGN KEY("PARENTID", "FAMILY_ID") REFERENCES "PUBLIC"."PARENT"("ID", "FAMILY_ID") ON DELETE SET NULL ON UPDATE CASCADE NOCHECK; -> ALTER TABLE "PUBLIC"."PARENT" ADD CONSTRAINT "PUBLIC"."PARENT_FAMILY" FOREIGN KEY("FAMILY_ID") REFERENCES "PUBLIC"."FAMILY"("ID") NOCHECK; -> CREATE INDEX "PUBLIC"."FAMILY_ID_NAME" ON "PUBLIC"."FAMILY"("ID", "NAME"); -> CREATE MEMORY TABLE "PUBLIC"."CHILD"( "ID" INT, "PARENTID" INT, "FAMILY_ID" INT, "NAME" VARCHAR(20) ); -> CREATE MEMORY TABLE "PUBLIC"."FAMILY"( "ID" INT, "NAME" VARCHAR(20) ); -> CREATE MEMORY TABLE "PUBLIC"."PARENT"( "ID" INT, "FAMILY_ID" INT, "NAME" VARCHAR(20) ); -> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; > INSERT INTO "PUBLIC"."CHILD" VALUES(100, 3, 1, 'Simon'); > INSERT INTO "PUBLIC"."CHILD" VALUES(101, 3, 1, 'Sabine'); > INSERT INTO "PUBLIC"."CHILD" VALUES(200, NULL, NULL, 'Jim'); > INSERT INTO "PUBLIC"."CHILD" VALUES(201, NULL, NULL, 'Johann'); -> INSERT INTO "PUBLIC"."FAMILY" VALUES(1, 'Capone'); -> INSERT INTO "PUBLIC"."PARENT" VALUES(3, 1, 'Sue'); -> rows: 17 +> ALTER TABLE "PUBLIC"."CHILD" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_3" UNIQUE("ID", "PARENTID"); +> ALTER TABLE "PUBLIC"."PARENT" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_8" UNIQUE("ID", "FAMILY_ID"); +> ALTER TABLE "PUBLIC"."CHILD" ADD CONSTRAINT "PUBLIC"."PARENT_CHILD" FOREIGN KEY("PARENTID", "FAMILY_ID") REFERENCES "PUBLIC"."PARENT"("ID", "FAMILY_ID") ON DELETE SET NULL ON UPDATE CASCADE NOCHECK; +> ALTER TABLE "PUBLIC"."PARENT" ADD CONSTRAINT "PUBLIC"."PARENT_FAMILY" FOREIGN KEY("FAMILY_ID") REFERENCES "PUBLIC"."FAMILY"("ID") NOCHECK; +> rows (ordered): 19 ALTER TABLE CHILD DROP CONSTRAINT PARENT_CHILD; > ok -SCRIPT SIMPLE NOPASSWORDS NOSETTINGS; +SCRIPT SIMPLE NOPASSWORDS NOSETTINGS NOVERSION; > SCRIPT > ------------------------------------------------------------------------------------------------------------------------------------------ +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."FAMILY"( "ID" INTEGER NOT NULL, "NAME" CHARACTER VARYING(20) ); +> ALTER TABLE "PUBLIC"."FAMILY" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_7" PRIMARY KEY("ID"); > -- 1 +/- SELECT COUNT(*) FROM PUBLIC.FAMILY; +> INSERT INTO "PUBLIC"."FAMILY" VALUES(1, 'Capone'); +> CREATE INDEX "PUBLIC"."FAMILY_ID_NAME" ON "PUBLIC"."FAMILY"("ID" NULLS FIRST, "NAME" NULLS FIRST); +> CREATE MEMORY TABLE "PUBLIC"."PARENT"( "ID" INTEGER, "FAMILY_ID" INTEGER, "NAME" CHARACTER VARYING(20) ); > -- 1 +/- SELECT COUNT(*) FROM PUBLIC.PARENT; +> INSERT INTO "PUBLIC"."PARENT" VALUES(3, 1, 'Sue'); +> CREATE MEMORY TABLE "PUBLIC"."CHILD"( "ID" INTEGER, "PARENTID" INTEGER, "FAMILY_ID" INTEGER, "NAME" CHARACTER VARYING(20) ); > -- 4 +/- SELECT COUNT(*) FROM PUBLIC.CHILD; -> ALTER TABLE "PUBLIC"."CHILD" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_3" UNIQUE("ID", "PARENTID"); -> ALTER TABLE "PUBLIC"."PARENT" ADD CONSTRAINT "PUBLIC"."PARENT_FAMILY" FOREIGN KEY("FAMILY_ID") REFERENCES "PUBLIC"."FAMILY"("ID") NOCHECK; -> CREATE INDEX "PUBLIC"."FAMILY_ID_NAME" ON "PUBLIC"."FAMILY"("ID", "NAME"); -> CREATE MEMORY TABLE "PUBLIC"."CHILD"( "ID" INT, "PARENTID" INT, "FAMILY_ID" INT, "NAME" VARCHAR(20) ); -> CREATE MEMORY TABLE "PUBLIC"."FAMILY"( "ID" INT, "NAME" VARCHAR(20) ); -> CREATE MEMORY TABLE "PUBLIC"."PARENT"( "ID" INT, "FAMILY_ID" INT, "NAME" VARCHAR(20) ); -> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; > INSERT INTO "PUBLIC"."CHILD" VALUES(100, 3, 1, 'Simon'); > INSERT INTO "PUBLIC"."CHILD" VALUES(101, 3, 1, 'Sabine'); > INSERT INTO "PUBLIC"."CHILD" VALUES(200, NULL, NULL, 'Jim'); > INSERT INTO "PUBLIC"."CHILD" VALUES(201, NULL, NULL, 'Johann'); -> INSERT INTO "PUBLIC"."FAMILY" VALUES(1, 'Capone'); -> INSERT INTO "PUBLIC"."PARENT" VALUES(3, 1, 'Sue'); -> rows: 16 +> ALTER TABLE "PUBLIC"."CHILD" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_3" UNIQUE("ID", "PARENTID"); +> ALTER TABLE "PUBLIC"."PARENT" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_8" UNIQUE("ID", "FAMILY_ID"); +> ALTER TABLE "PUBLIC"."PARENT" ADD CONSTRAINT "PUBLIC"."PARENT_FAMILY" FOREIGN KEY("FAMILY_ID") REFERENCES "PUBLIC"."FAMILY"("ID") NOCHECK; +> rows (ordered): 18 DELETE FROM PARENT; > update count: 1 @@ -7443,23 +6680,21 @@ SELECT * FROM INVOICE_LINE; > 1 101 20 Chair 540.40 > rows: 2 -DROP TABLE INVOICE; -> ok - -DROP TABLE INVOICE_LINE; +DROP TABLE INVOICE, INVOICE_LINE; > ok -CREATE MEMORY TABLE TEST(A INT, B INT, FOREIGN KEY (B) REFERENCES(A) ON UPDATE RESTRICT ON DELETE NO ACTION); +CREATE MEMORY TABLE TEST(A INT PRIMARY KEY, B INT, FOREIGN KEY (B) REFERENCES(A) ON UPDATE RESTRICT ON DELETE NO ACTION); > ok -SCRIPT NOPASSWORDS NOSETTINGS; +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; > SCRIPT -> ---------------------------------------------------------------------------------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" FOREIGN KEY("B") REFERENCES "PUBLIC"."TEST"("A") NOCHECK; -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "A" INT, "B" INT ); +> ----------------------------------------------------------------------------------------------------------------------------- > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; -> rows: 4 +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "A" INTEGER NOT NULL, "B" INTEGER ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("A"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_27" FOREIGN KEY("B") REFERENCES "PUBLIC"."TEST"("A") NOCHECK; +> rows (ordered): 5 DROP TABLE TEST; > ok @@ -7501,28 +6736,28 @@ ALTER TABLE TEST2_X RENAME TO TEST2; ALTER INDEX IDX_ID RENAME TO IDX_ID2; > ok -SCRIPT NOPASSWORDS NOSETTINGS; +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; > SCRIPT -> ------------------------------------------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST2; -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); -> CREATE INDEX "PUBLIC"."IDX_ID2" ON "PUBLIC"."TEST2"("ID"); -> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INT NOT NULL, "NAME" VARCHAR(255) ); -> CREATE MEMORY TABLE "PUBLIC"."TEST2"( "ID" INT ); +> -------------------------------------------------------------------------------------------- > CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE USER IF NOT EXISTS "TEST_ADMIN" PASSWORD '' ADMIN; > CREATE USER IF NOT EXISTS "TEST" PASSWORD ''; > CREATE USER IF NOT EXISTS "TEST2" PASSWORD ''; -> CREATE USER IF NOT EXISTS "TEST_ADMIN" PASSWORD '' ADMIN; -> rows: 10 - -SELECT NAME, ADMIN FROM INFORMATION_SCHEMA.USERS; -> NAME ADMIN -> ---------- ----- -> SA true -> TEST false -> TEST2 false -> TEST_ADMIN true +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "NAME" CHARACTER VARYING(255) ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> CREATE MEMORY TABLE "PUBLIC"."TEST2"( "ID" INTEGER ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST2; +> CREATE INDEX "PUBLIC"."IDX_ID2" ON "PUBLIC"."TEST2"("ID" NULLS FIRST); +> rows (ordered): 10 + +SELECT USER_NAME, IS_ADMIN FROM INFORMATION_SCHEMA.USERS; +> USER_NAME IS_ADMIN +> ---------- -------- +> SA TRUE +> TEST FALSE +> TEST2 FALSE +> TEST_ADMIN TRUE > rows: 4 DROP TABLE TEST2; @@ -7555,12 +6790,12 @@ CREATE USER SECURE SALT '001122' HASH '1122334455'; ALTER USER SECURE SET SALT '112233' HASH '2233445566'; > ok -SCRIPT NOSETTINGS; +SCRIPT NOSETTINGS NOVERSION; > SCRIPT > ------------------------------------------------------------------- > CREATE USER IF NOT EXISTS "SA" SALT '' HASH '' ADMIN; > CREATE USER IF NOT EXISTS "SECURE" SALT '112233' HASH '2233445566'; -> rows: 2 +> rows (ordered): 2 SET PASSWORD '123'; > ok @@ -7571,120 +6806,7 @@ SET AUTOCOMMIT TRUE; DROP USER SECURE; > ok ---- sequence with manual value ------------------ -drop table if exists test; -> ok - -CREATE TABLE TEST(ID bigint generated by default as identity (start with 1), name varchar); -> ok - -SET AUTOCOMMIT FALSE; -> ok - -insert into test(name) values('Hello'); -> update count: 1 - -insert into test(name) values('World'); -> update count: 1 - -call identity(); ->> 2 - -insert into test(id, name) values(1234567890123456, 'World'); -> update count: 1 - -call identity(); ->> 1234567890123456 - -insert into test(name) values('World'); -> update count: 1 - -call identity(); ->> 1234567890123457 - -select * from test order by id; -> ID NAME -> ---------------- ----- -> 1 Hello -> 2 World -> 1234567890123456 World -> 1234567890123457 World -> rows (ordered): 4 - -SET AUTOCOMMIT TRUE; -> ok - -drop table if exists test; -> ok - -CREATE TABLE TEST(ID bigint generated by default as identity (start with 1), name varchar); -> ok - -SET AUTOCOMMIT FALSE; -> ok - -insert into test(name) values('Hello'); -> update count: 1 - -insert into test(name) values('World'); -> update count: 1 - -call identity(); ->> 2 - -insert into test(id, name) values(1234567890123456, 'World'); -> update count: 1 - -call identity(); ->> 1234567890123456 - -insert into test(name) values('World'); -> update count: 1 - -call identity(); ->> 1234567890123457 - -select * from test order by id; -> ID NAME -> ---------------- ----- -> 1 Hello -> 2 World -> 1234567890123456 World -> 1234567890123457 World -> rows (ordered): 4 - -SET AUTOCOMMIT TRUE; -> ok - -drop table test; -> ok - --- test cases --------------------------------------------------------------------------------------------- -create memory table word(word_id integer, name varchar); -> ok - -alter table word alter column word_id integer(10) auto_increment; -> ok - -insert into word(name) values('Hello'); -> update count: 1 - -alter table word alter column word_id restart with 30872; -> ok - -insert into word(name) values('World'); -> update count: 1 - -select * from word; -> WORD_ID NAME -> ------- ----- -> 1 Hello -> 30872 World -> rows: 2 - -drop table word; -> ok - create table test(id int, name varchar); > ok @@ -7888,6 +7010,9 @@ alter table if exists z add constraint z_fk foreign key (id) references x (id); insert into z (id) values (1); > exception REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1 +SET MODE MySQL; +> ok + alter table if exists y drop foreign key z_fk; > ok @@ -7897,6 +7022,9 @@ alter table if exists z drop foreign key z_fk; alter table if exists z drop foreign key z_fk; > exception CONSTRAINT_NOT_FOUND_1 +SET MODE Regular; +> ok + insert into z (id) values (1); > update count: 1 @@ -7928,7 +7056,7 @@ drop schema z cascade; > ok ----- Issue#493 ----- -create table test (year int, action varchar(10)); +create table test ("YEAR" int, action varchar(10)); > ok insert into test values (2015, 'order'), (2016, 'order'), (2014, 'order'); @@ -7937,7 +7065,7 @@ insert into test values (2015, 'order'), (2016, 'order'), (2014, 'order'); insert into test values (2014, 'execution'), (2015, 'execution'), (2016, 'execution'); > update count: 3 -select * from test where year in (select distinct year from test order by year desc limit 1 offset 0); +select * from test where "YEAR" in (select distinct "YEAR" from test order by "YEAR" desc limit 1 offset 0); > YEAR ACTION > ---- --------- > 2016 execution diff --git a/h2/src/test/org/h2/test/scripts/testSimple.sql b/h2/src/test/org/h2/test/scripts/testSimple.sql index e7bd40e145..ae5fc89bbc 100644 --- a/h2/src/test/org/h2/test/scripts/testSimple.sql +++ b/h2/src/test/org/h2/test/scripts/testSimple.sql @@ -1,30 +1,27 @@ --- Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). -- Initial Developer: H2 Group -- select 1000L / 10; >> 100 -select * from (select x as y from dual order by y); ->> 1 - -select a.x from dual a, dual b order by x; +select * from (select 1 as y from dual order by y); >> 1 select 1 from(select 2 from(select 1) a right join dual b) c; >> 1 select 1.00 / 3 * 0.00; ->> 0.00000000000000000000000000000 +>> 0.000000000000000000000000 select 1.00000 / 3 * 0.0000; ->> 0.0000000000000000000000000000000000 +>> 0.00000000000000000000000000000 select 1.0000000 / 3 * 0.00000; ->> 0.0000000000000000000000000000000000000 +>> 0.00000000000000000000000000000000 select 1.0000000 / 3 * 0.000000; ->> 0E-38 +>> 0.000000000000000000000000000000000 create table test(id null); > ok @@ -65,7 +62,7 @@ select N'test'; select E'test\\test'; >> test\test -create table a(id int) as select null; +create table a(id int unique) as select null; > ok create table b(id int references a(id)) as select null; @@ -77,48 +74,12 @@ delete from a; drop table a, b; > ok -create table test(a int, b int) as select 2, 0; -> ok - -create index idx on test(b, a); -> ok - -select count(*) from test where a in(2, 10) and b in(0, null); ->> 1 - -drop table test; -> ok - -create table test(a int, b int) as select 1, 0; -> ok - -create index idx on test(b, a); -> ok - -select count(*) from test where b in(null, 0) and a in(1, null); ->> 1 - -drop table test; -> ok - create cached temp table test(id identity) not persistent; > ok drop table test; > ok -create table test(a int, b int, unique(a, b)); -> ok - -insert into test values(1,1), (1,2); -> update count: 2 - -select count(*) from test where a in(1,2) and b in(1,2); ->> 2 - -drop table test; -> ok - create table test(id int); > ok @@ -137,13 +98,13 @@ select is_nullable from information_schema.columns c where c.table_name = 'TEST' alter table test alter column id set data type varchar; > ok -select type_name from information_schema.columns c where c.table_name = 'TEST' and c.column_name = 'ID'; ->> VARCHAR +select data_type from information_schema.columns c where c.table_name = 'TEST' and c.column_name = 'ID'; +>> CHARACTER VARYING alter table test alter column id type int; > ok -select type_name from information_schema.columns c where c.table_name = 'TEST' and c.column_name = 'ID'; +select data_type from information_schema.columns c where c.table_name = 'TEST' and c.column_name = 'ID'; >> INTEGER alter table test alter column id drop default; @@ -256,30 +217,6 @@ drop table test; select count(*)from((select 1 from dual limit 1)union(select 2 from dual limit 1)); >> 2 -select sum(cast(x as int)) from system_range(2147483547, 2147483637); ->> 195421006872 - -select sum(x) from system_range(9223372036854775707, 9223372036854775797); ->> 839326855353784593432 - -select sum(cast(100 as tinyint)) from system_range(1, 1000); ->> 100000 - -select sum(cast(100 as smallint)) from system_range(1, 1000); ->> 100000 - -select avg(cast(x as int)) from system_range(2147483547, 2147483637); ->> 2147483592 - -select avg(x) from system_range(9223372036854775707, 9223372036854775797); ->> 9223372036854775752 - -select avg(cast(100 as tinyint)) from system_range(1, 1000); ->> 100 - -select avg(cast(100 as smallint)) from system_range(1, 1000); ->> 100 - select datediff(yyyy, now(), now()); >> 0 @@ -343,7 +280,7 @@ drop table master, detail; drop all objects; > ok -create table test(id int, parent int references test(id) on delete cascade); +create table test(id int primary key, parent int references test(id) on delete cascade); > ok insert into test values(0, 0); @@ -358,27 +295,12 @@ delete from test2; drop table test2; > ok -SELECT X FROM dual GROUP BY X HAVING X=AVG(X); ->> 1 - -create view test_view(id,) as select * from dual; +create view test_view(id) as select * from dual; > ok drop view test_view; > ok -create table test(id int,); -> ok - -insert into test(id,) values(1,); -> update count: 1 - -merge into test(id,) key(id,) values(1,); -> update count: 1 - -drop table test; -> ok - SET MODE DB2; > ok @@ -478,7 +400,7 @@ ALTER TABLE TEST ALTER COLUMN ID RESTART WITH ?; }; > update count: 0 -INSERT INTO TEST VALUES(NULL); +INSERT INTO TEST VALUES(DEFAULT); > update count: 1 SELECT * FROM TEST; @@ -508,10 +430,10 @@ DROP SEQUENCE TEST_SEQ; create schema Contact; > ok -CREATE TABLE Account (id BIGINT); +CREATE TABLE Account (id BIGINT PRIMARY KEY); > ok -CREATE TABLE Person (id BIGINT, FOREIGN KEY (id) REFERENCES Account(id)); +CREATE TABLE Person (id BIGINT PRIMARY KEY, FOREIGN KEY (id) REFERENCES Account(id)); > ok CREATE TABLE Contact.Contact (id BIGINT, FOREIGN KEY (id) REFERENCES public.Person(id)); @@ -559,9 +481,6 @@ ALTER TABLE TEST DROP B; DROP TABLE TEST; > ok -select count(d.*) from dual d group by d.x; ->> 1 - create table test(id int); > ok @@ -585,12 +504,6 @@ drop table test; select replace(lpad('string', 10), ' ', '*'); >> ****string -select count(*) from (select * from dual union select * from dual) where x = 0; ->> 0 - -select count(*) from (select * from (select * from dual union select * from dual)) where x = 0; ->> 0 - select instr('abcisj','s', -1) from dual; >> 5 @@ -627,35 +540,6 @@ SELECT NAME FROM TEST WHERE NAME REGEXP 'WorldW'; drop table test; > ok -select * from (select x from (select x from dual)) where 1=x; ->> 1 - -CREATE VIEW TEST_VIEW AS SELECT X FROM (SELECT X FROM DUAL); -> ok - -SELECT * FROM TEST_VIEW; ->> 1 - -SELECT * FROM TEST_VIEW; ->> 1 - -DROP VIEW TEST_VIEW; -> ok - -SELECT X FROM (SELECT X, X AS "XY" FROM DUAL) WHERE X=1; ->> 1 - -SELECT X FROM (SELECT X, X AS "X Y" FROM DUAL) WHERE X=1; ->> 1 - -SELECT X FROM (SELECT X, X AS "X Y" FROM DUAL AS "D Z") WHERE X=1; ->> 1 - -select * from (select x from dual union select convert(x, int) from dual) where x=0; -> X -> - -> rows: 0 - create table test(id int); > ok @@ -735,7 +619,10 @@ drop table test; > ok select * from dual where 'a_z' like '%=_%' escape '='; ->> 1 +> +> +> +> rows: 1 create table test as select 1 from dual union all select 2 from dual; > ok @@ -771,7 +658,7 @@ CREATE VIEW TEST_VIEW AS SELECT COUNT(ID) X FROM TEST; > ok explain SELECT * FROM TEST_VIEW WHERE X>1; ->> SELECT "TEST_VIEW"."X" FROM "PUBLIC"."TEST_VIEW" /* SELECT COUNT(ID) AS X FROM PUBLIC.TEST /++ PUBLIC.TEST.tableScan ++/ HAVING COUNT("ID") >= ?1: X > 1 */ WHERE "X" > 1 +>> SELECT "PUBLIC"."TEST_VIEW"."X" FROM "PUBLIC"."TEST_VIEW" /* SELECT COUNT(ID) AS X FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ HAVING COUNT(ID) >= ?1: X > CAST(1 AS BIGINT) */ WHERE "X" > CAST(1 AS BIGINT) DROP VIEW TEST_VIEW; > ok @@ -815,18 +702,9 @@ create table table1(f1 int not null primary key); create table table2(f2 int not null primary key references table1(f1)); > ok -drop table table1; +drop table table1, table2; > ok -drop table table2; -> ok - -select case when 1=null then 1 else 2 end; ->> 2 - -select case (1) when 1 then 1 else 2 end; ->> 1 - create table test(id int); > ok @@ -927,25 +805,7 @@ select date '+0011-01-01'; >> 0011-01-01 select date'-0010-01-01'; ->> -10-01-01 - -create schema TEST_SCHEMA; -> ok - -create table TEST_SCHEMA.test(id int); -> ok - -create sequence TEST_SCHEMA.TEST_SEQ; -> ok - -select TEST_SCHEMA.TEST_SEQ.CURRVAL; ->> 0 - -select TEST_SCHEMA.TEST_SEQ.nextval; ->> 1 - -drop schema TEST_SCHEMA cascade; -> ok +>> -0010-01-01 create table test(id int); > ok @@ -975,12 +835,12 @@ create alias parse_long for "java.lang.Long.parseLong(java.lang.String)"; comment on alias parse_long is 'Parse a long with base'; > ok -select remarks from information_schema.function_aliases where alias_name = 'PARSE_LONG'; +select remarks from information_schema.routines where routine_name = 'PARSE_LONG'; >> Parse a long with base @reconnect -select remarks from information_schema.function_aliases where alias_name = 'PARSE_LONG'; +select remarks from information_schema.routines where routine_name = 'PARSE_LONG'; >> Parse a long with base drop alias parse_long; @@ -994,12 +854,12 @@ create role hr; comment on role hr is 'Human Resources'; > ok -select remarks from information_schema.roles where name = 'HR'; +select remarks from information_schema.roles where role_name = 'HR'; >> Human Resources @reconnect -select remarks from information_schema.roles where name = 'HR'; +select remarks from information_schema.roles where role_name = 'HR'; >> Human Resources create user abc password 'x'; @@ -1069,23 +929,6 @@ drop schema tests cascade; @reconnect -create constant abc value 1; -> ok - -comment on constant abc is 'One'; -> ok - -select remarks from information_schema.constants where constant_name = 'ABC'; ->> One - -@reconnect - -select remarks from information_schema.constants where constant_name = 'ABC'; ->> One - -drop constant abc; -> ok - drop table test; > ok @@ -1106,7 +949,7 @@ comment on constraint const1 is 'unique id'; comment on index IDX_ID is 'id_index'; > ok -select remarks from information_schema.constraints where constraint_name = 'CONST1'; +select remarks from information_schema.table_constraints where constraint_name = 'CONST1'; >> unique id select remarks from information_schema.indexes where index_name = 'IDX_ID'; @@ -1114,7 +957,7 @@ select remarks from information_schema.indexes where index_name = 'IDX_ID'; @reconnect -select remarks from information_schema.constraints where constraint_name = 'CONST1'; +select remarks from information_schema.table_constraints where constraint_name = 'CONST1'; >> unique id select remarks from information_schema.indexes where index_name = 'IDX_ID'; @@ -1131,23 +974,23 @@ create user sales password '1'; comment on user sales is 'mr. money'; > ok -select remarks from information_schema.users where name = 'SALES'; +select remarks from information_schema.users where user_name = 'SALES'; >> mr. money @reconnect -select remarks from information_schema.users where name = 'SALES'; +select remarks from information_schema.users where user_name = 'SALES'; >> mr. money alter user sales rename to SALES_USER; > ok -select remarks from information_schema.users where name = 'SALES_USER'; +select remarks from information_schema.users where user_name = 'SALES_USER'; >> mr. money @reconnect -select remarks from information_schema.users where name = 'SALES_USER'; +select remarks from information_schema.users where user_name = 'SALES_USER'; >> mr. money create table test(id int); @@ -1366,7 +1209,7 @@ select count(*) from test1 where a='abccccc'; >> 0 truncate table test1; -> ok +> update count: 8 insert into test1 values ('abcaaaa'); > update count: 1 diff --git a/h2/src/test/org/h2/test/server/TestAutoServer.java b/h2/src/test/org/h2/test/server/TestAutoServer.java index df29b978ed..72090a0130 100644 --- a/h2/src/test/org/h2/test/server/TestAutoServer.java +++ b/h2/src/test/org/h2/test/server/TestAutoServer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.server; @@ -9,6 +9,7 @@ import java.sql.DriverManager; import java.sql.SQLException; import java.sql.Statement; +import org.h2.api.ErrorCode; import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.util.SortedProperties; @@ -29,20 +30,21 @@ public class TestAutoServer extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { testUnsupportedCombinations(); testAutoServer(false); + testSocketReadTimeout(false); if (!config.big) { testAutoServer(true); } testLinkedLocalTablesWithAutoServerReconnect(); } - private void testUnsupportedCombinations() throws SQLException { + private void testUnsupportedCombinations() { String[] urls = { "jdbc:h2:" + getTestName() + ";file_lock=no;auto_server=true", "jdbc:h2:" + getTestName() + ";file_lock=serialized;auto_server=true", @@ -50,7 +52,7 @@ private void testUnsupportedCombinations() throws SQLException { "jdbc:h2:mem:" + getTestName() + ";auto_server=true" }; for (String url : urls) { - assertThrows(SQLException.class, this).getConnection(url); + assertThrows(SQLException.class, () -> getConnection(url)); try { getConnection(url); fail(url); @@ -70,43 +72,102 @@ private void testAutoServer(boolean port) throws Exception { url += ";AUTO_SERVER_PORT=11111"; } String user = getUser(), password = getPassword(); - Connection connServer = getConnection(url + ";OPEN_NEW=TRUE", - user, password); + try (Connection connServer = getConnection(url + ";OPEN_NEW=TRUE", user, password)) { + int i = ITERATIONS; + for (; i > 0; i--) { + Thread.sleep(100); + SortedProperties prop = SortedProperties.loadProperties( + getBaseDir() + "/" + getTestName() + ".lock.db"); + String key = prop.getProperty("id"); + String server = prop.getProperty("server"); + if (server != null) { + String u2 = url.substring(url.indexOf(';')); + u2 = "jdbc:h2:tcp://" + server + "/" + key + u2; + Connection conn = DriverManager.getConnection(u2, user, password); + conn.close(); + int gotPort = Integer.parseInt(server.substring(server.lastIndexOf(':') + 1)); + if (port) { + assertEquals(11111, gotPort); + } + break; + } + } + if (i <= 0) { + fail(); + } + try (Connection conn = getConnection(url + ";OPEN_NEW=TRUE")) { + Statement stat = conn.createStatement(); + if (config.big) { + try { + stat.execute("SHUTDOWN"); + } catch (SQLException e) { + assertKnownException(e); + // the connection is closed + } + } + } + } + deleteDb("autoServer"); + } - int i = ITERATIONS; - for (; i > 0; i--) { - Thread.sleep(100); + + private void testSocketReadTimeout(boolean port) throws Exception { + if (config.memory || config.networked) { + return; + } + deleteDb(getTestName()); + String url = getURL(getTestName() + ";AUTO_SERVER=TRUE", true); + if (port) { + url += ";AUTO_SERVER_PORT=11111"; + } + String user = getUser(), password = getPassword(); + Connection connServer = getConnection(url + ";OPEN_NEW=TRUE", + user, password); + try { SortedProperties prop = SortedProperties.loadProperties( - getBaseDir() + "/" + getTestName() + ".lock.db"); + getBaseDir() + "/" + getTestName() + ".lock.db"); String key = prop.getProperty("id"); String server = prop.getProperty("server"); if (server != null) { String u2 = url.substring(url.indexOf(';')); - u2 = "jdbc:h2:tcp://" + server + "/" + key + u2; + //todo java.net.SocketTimeoutException: Read timed out + u2 = "jdbc:h2:tcp://" + server + "/" + key + u2 + ";NETWORK_TIMEOUT=100"; Connection conn = DriverManager.getConnection(u2, user, password); + Statement stat = conn.createStatement(); + assertThrows(ErrorCode.CONNECTION_BROKEN_1, stat). + executeQuery("SELECT MAX(RAND()) FROM SYSTEM_RANGE(1, 100000000)"); conn.close(); int gotPort = Integer.parseInt(server.substring(server.lastIndexOf(':') + 1)); if (port) { assertEquals(11111, gotPort); } - break; } - } - if (i <= 0) { - fail(); - } - Connection conn = getConnection(url + ";OPEN_NEW=TRUE"); - Statement stat = conn.createStatement(); - if (config.big) { + Connection conn = getConnection(url + ";OPEN_NEW=TRUE"); + Statement stat = conn.createStatement(); + if (config.big) { + try { + stat.execute("SHUTDOWN"); + } catch (SQLException e) { + assertKnownException(e); + // the connection is closed + } + } + conn.close(); + } finally { try { - stat.execute("SHUTDOWN"); + connServer.createStatement().execute("SHUTDOWN"); + if (config.big) { + fail("server should be down already"); + } } catch (SQLException e) { + assertTrue(config.big); assertKnownException(e); - // the connection is closed } + try { + connServer.close(); + } catch (SQLException ignore) {} } - conn.close(); - connServer.close(); + deleteDb("autoServer"); } diff --git a/h2/src/test/org/h2/test/server/TestInit.java b/h2/src/test/org/h2/test/server/TestInit.java index 4a4ae8574e..49a90f0ac3 100644 --- a/h2/src/test/org/h2/test/server/TestInit.java +++ b/h2/src/test/org/h2/test/server/TestInit.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.server; @@ -26,7 +26,7 @@ public class TestInit extends TestDb { * @param a ignored */ public static void main(String[] a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -41,7 +41,7 @@ public void test() throws Exception { Writer w = new OutputStreamWriter(FileUtils.newOutputStream(init1, false)); PrintWriter writer = new PrintWriter(w); - writer.println("create table test(id int identity, name varchar);"); + writer.println("create table test(id int generated by default as identity, name varchar);"); writer.println("insert into test(name) values('cat');"); writer.close(); diff --git a/h2/src/test/org/h2/test/server/TestJakartaWeb.java b/h2/src/test/org/h2/test/server/TestJakartaWeb.java new file mode 100644 index 0000000000..7d24757915 --- /dev/null +++ b/h2/src/test/org/h2/test/server/TestJakartaWeb.java @@ -0,0 +1,698 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.server; + +import java.io.BufferedReader; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; +import java.security.Principal; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.Vector; + +import jakarta.servlet.AsyncContext; +import jakarta.servlet.DispatcherType; +import jakarta.servlet.RequestDispatcher; +import jakarta.servlet.ServletConfig; +import jakarta.servlet.ServletContext; +import jakarta.servlet.ServletException; +import jakarta.servlet.ServletInputStream; +import jakarta.servlet.ServletOutputStream; +import jakarta.servlet.ServletRequest; +import jakarta.servlet.ServletResponse; +import jakarta.servlet.WriteListener; +import jakarta.servlet.http.Cookie; +import jakarta.servlet.http.HttpServletRequest; +import jakarta.servlet.http.HttpServletResponse; +import jakarta.servlet.http.HttpSession; +import jakarta.servlet.http.HttpUpgradeHandler; +import jakarta.servlet.http.Part; + +import org.h2.server.web.JakartaWebServlet; +import org.h2.test.TestBase; +import org.h2.test.TestDb; +import org.h2.util.Utils10; + +/** + * Tests the Jakarta Web Servlet for the H2 Console. + */ +public class TestJakartaWeb extends TestDb { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testServlet(); + } + + private void testServlet() throws Exception { + JakartaWebServlet servlet = new JakartaWebServlet(); + final HashMap configMap = new HashMap<>(); + configMap.put("ifExists", ""); + configMap.put("", ""); + ServletConfig config = new ServletConfig() { + + @Override + public String getServletName() { + return "H2Console"; + } + + @Override + public Enumeration getInitParameterNames() { + return new Vector<>(configMap.keySet()).elements(); + } + + @Override + public String getInitParameter(String name) { + return configMap.get(name); + } + + @Override + public ServletContext getServletContext() { + return null; + } + + }; + servlet.init(config); + + + TestHttpServletRequest request = new TestHttpServletRequest(); + request.setPathInfo("/"); + TestHttpServletResponse response = new TestHttpServletResponse(); + TestServletOutputStream out = new TestServletOutputStream(); + response.setServletOutputStream(out); + servlet.doGet(request, response); + assertContains(out.toString(), "location.href = 'login.jsp"); + servlet.destroy(); + } + + /** + * A HTTP servlet request for testing. + */ + static class TestHttpServletRequest implements HttpServletRequest { + + private String pathInfo; + + void setPathInfo(String pathInfo) { + this.pathInfo = pathInfo; + } + + @Override + public Object getAttribute(String name) { + return null; + } + + @Override + public Enumeration getAttributeNames() { + return new Vector().elements(); + } + + @Override + public String getCharacterEncoding() { + return null; + } + + @Override + public int getContentLength() { + return 0; + } + + @Override + public String getContentType() { + return null; + } + + @Override + public ServletInputStream getInputStream() throws IOException { + return null; + } + + @Override + public String getLocalAddr() { + return null; + } + + @Override + public String getLocalName() { + return null; + } + + @Override + public int getLocalPort() { + return 0; + } + + @Override + public Locale getLocale() { + return null; + } + + @Override + public Enumeration getLocales() { + return null; + } + + @Override + public String getParameter(String name) { + return null; + } + + @Override + public Map getParameterMap() { + return null; + } + + @Override + public Enumeration getParameterNames() { + return new Vector().elements(); + } + + @Override + public String[] getParameterValues(String name) { + return null; + } + + @Override + public String getProtocol() { + return null; + } + + @Override + public BufferedReader getReader() throws IOException { + return null; + } + + @Override + @Deprecated + public String getRealPath(String path) { + return null; + } + + @Override + public String getRemoteAddr() { + return null; + } + + @Override + public String getRemoteHost() { + return null; + } + + @Override + public int getRemotePort() { + return 0; + } + + @Override + public RequestDispatcher getRequestDispatcher(String name) { + return null; + } + + @Override + public String getScheme() { + return "http"; + } + + @Override + public String getServerName() { + return null; + } + + @Override + public int getServerPort() { + return 80; + } + + @Override + public boolean isSecure() { + return false; + } + + @Override + public void removeAttribute(String name) { + // ignore + } + + @Override + public void setAttribute(String name, Object value) { + // ignore + } + + @Override + public void setCharacterEncoding(String encoding) + throws UnsupportedEncodingException { + // ignore + } + + @Override + public String getAuthType() { + return null; + } + + @Override + public String getContextPath() { + return null; + } + + @Override + public Cookie[] getCookies() { + return null; + } + + @Override + public long getDateHeader(String x) { + return 0; + } + + @Override + public String getHeader(String name) { + return null; + } + + @Override + public Enumeration getHeaderNames() { + return null; + } + + @Override + public Enumeration getHeaders(String name) { + return null; + } + + @Override + public int getIntHeader(String name) { + return 0; + } + + @Override + public String getMethod() { + return null; + } + + @Override + public String getPathInfo() { + return pathInfo; + } + + @Override + public String getPathTranslated() { + return null; + } + + @Override + public String getQueryString() { + return null; + } + + @Override + public String getRemoteUser() { + return null; + } + + @Override + public String getRequestURI() { + return null; + } + + @Override + public StringBuffer getRequestURL() { + return null; + } + + @Override + public String getRequestedSessionId() { + return null; + } + + @Override + public String getServletPath() { + return null; + } + + @Override + public HttpSession getSession() { + return null; + } + + @Override + public HttpSession getSession(boolean x) { + return null; + } + + @Override + public Principal getUserPrincipal() { + return null; + } + + @Override + public boolean isRequestedSessionIdFromCookie() { + return false; + } + + @Override + public boolean isRequestedSessionIdFromURL() { + return false; + } + + @Override + @Deprecated + public boolean isRequestedSessionIdFromUrl() { + return false; + } + + @Override + public boolean isRequestedSessionIdValid() { + return false; + } + + @Override + public boolean isUserInRole(String x) { + return false; + } + + @Override + public java.util.Collection getParts() { + return null; + } + + @Override + public Part getPart(String name) { + return null; + } + + @Override + public boolean authenticate(HttpServletResponse response) { + return false; + } + + @Override + public void login(String username, String password) { + // ignore + } + + @Override + public void logout() { + // ignore + } + + @Override + public ServletContext getServletContext() { + return null; + } + + @Override + public AsyncContext startAsync() { + return null; + } + + @Override + public AsyncContext startAsync( + ServletRequest servletRequest, + ServletResponse servletResponse) { + return null; + } + + @Override + public boolean isAsyncStarted() { + return false; + } + + @Override + public boolean isAsyncSupported() { + return false; + } + + @Override + public AsyncContext getAsyncContext() { + return null; + } + + @Override + public DispatcherType getDispatcherType() { + return null; + } + + @Override + public long getContentLengthLong() { + return 0; + } + + @Override + public String changeSessionId() { + return null; + } + + @Override + public T upgrade(Class handlerClass) + throws IOException, ServletException { + return null; + } + + } + + /** + * A HTTP servlet response for testing. + */ + static class TestHttpServletResponse implements HttpServletResponse { + + ServletOutputStream servletOutputStream; + + void setServletOutputStream(ServletOutputStream servletOutputStream) { + this.servletOutputStream = servletOutputStream; + } + + @Override + public void flushBuffer() throws IOException { + // ignore + } + + @Override + public int getBufferSize() { + return 0; + } + + @Override + public String getCharacterEncoding() { + return null; + } + + @Override + public String getContentType() { + return null; + } + + @Override + public Locale getLocale() { + return null; + } + + @Override + public ServletOutputStream getOutputStream() throws IOException { + return servletOutputStream; + } + + @Override + public PrintWriter getWriter() throws IOException { + return null; + } + + @Override + public boolean isCommitted() { + return false; + } + + @Override + public void reset() { + // ignore + } + + @Override + public void resetBuffer() { + // ignore + } + + @Override + public void setBufferSize(int arg0) { + // ignore + } + + @Override + public void setCharacterEncoding(String arg0) { + // ignore + } + + @Override + public void setContentLength(int arg0) { + // ignore + } + + @Override + public void setContentLengthLong(long arg0) { + // ignore + } + + @Override + public void setContentType(String arg0) { + // ignore + } + + @Override + public void setLocale(Locale arg0) { + // ignore + } + + @Override + public void addCookie(Cookie arg0) { + // ignore + } + + @Override + public void addDateHeader(String arg0, long arg1) { + // ignore + } + + @Override + public void addHeader(String arg0, String arg1) { + // ignore + } + + @Override + public void addIntHeader(String arg0, int arg1) { + // ignore + } + + @Override + public boolean containsHeader(String arg0) { + return false; + } + + @Override + public String encodeRedirectURL(String arg0) { + return null; + } + + @Override + @Deprecated + public String encodeRedirectUrl(String arg0) { + return null; + } + + @Override + public String encodeURL(String arg0) { + return null; + } + + @Override + @Deprecated + public String encodeUrl(String arg0) { + return null; + } + + @Override + public void sendError(int arg0) throws IOException { + // ignore + } + + @Override + public void sendError(int arg0, String arg1) throws IOException { + // ignore + } + + @Override + public void sendRedirect(String arg0) throws IOException { + // ignore + } + + @Override + public void setDateHeader(String arg0, long arg1) { + // ignore + } + + @Override + public void setHeader(String arg0, String arg1) { + // ignore + } + + @Override + public void setIntHeader(String arg0, int arg1) { + // ignore + } + + @Override + public void setStatus(int arg0) { + // ignore + } + + @Override + @Deprecated + public void setStatus(int arg0, String arg1) { + // ignore + } + + @Override + public int getStatus() { + return 0; + } + + @Override + public String getHeader(String name) { + return null; + } + + @Override + public java.util.Collection getHeaders(String name) { + return null; + } + + @Override + public java.util.Collection getHeaderNames() { + return null; + } + + } + + /** + * A servlet output stream for testing. + */ + static class TestServletOutputStream extends ServletOutputStream { + + private final ByteArrayOutputStream buff = new ByteArrayOutputStream(); + + @Override + public void write(int b) throws IOException { + buff.write(b); + } + + @Override + public String toString() { + return Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8); + } + + @Override + public boolean isReady() { + return true; + } + + @Override + public void setWriteListener(WriteListener writeListener) { + // ignore + } + + } + +} diff --git a/h2/src/test/org/h2/test/server/TestNestedLoop.java b/h2/src/test/org/h2/test/server/TestNestedLoop.java index affb6df7e7..e085efed57 100644 --- a/h2/src/test/org/h2/test/server/TestNestedLoop.java +++ b/h2/src/test/org/h2/test/server/TestNestedLoop.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.server; @@ -26,7 +26,7 @@ public class TestNestedLoop extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -34,7 +34,7 @@ public void test() throws SQLException { deleteDb("nestedLoop"); Connection conn = getConnection("nestedLoop"); Statement stat = conn.createStatement(); - stat.execute("create table test(id int identity, name varchar)"); + stat.execute("create table test(id int generated by default as identity, name varchar)"); int len = getSize(1010, 10000); for (int i = 0; i < len; i++) { stat.execute("insert into test(name) values('Hello World')"); diff --git a/h2/src/test/org/h2/test/server/TestWeb.java b/h2/src/test/org/h2/test/server/TestWeb.java index e3f77c9982..f7cac62797 100644 --- a/h2/src/test/org/h2/test/server/TestWeb.java +++ b/h2/src/test/org/h2/test/server/TestWeb.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.server; @@ -15,7 +15,6 @@ import java.nio.charset.StandardCharsets; import java.security.Principal; import java.sql.Connection; -import java.sql.SQLException; import java.util.Enumeration; import java.util.HashMap; import java.util.Locale; @@ -26,6 +25,8 @@ import javax.servlet.DispatcherType; import javax.servlet.RequestDispatcher; import javax.servlet.ServletConfig; +import javax.servlet.ServletContext; +import javax.servlet.ServletException; import javax.servlet.ServletInputStream; import javax.servlet.ServletOutputStream; import javax.servlet.ServletRequest; @@ -37,8 +38,6 @@ import javax.servlet.http.HttpSession; import javax.servlet.http.HttpUpgradeHandler; import javax.servlet.http.Part; -import javax.servlet.ServletContext; -import javax.servlet.ServletException; import org.h2.api.ErrorCode; import org.h2.engine.Constants; @@ -47,10 +46,10 @@ import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; import org.h2.test.TestDb; -import org.h2.test.utils.AssertThrows; import org.h2.tools.Server; import org.h2.util.StringUtils; import org.h2.util.Task; +import org.h2.util.Utils10; /** * Tests the H2 Console application. @@ -65,7 +64,7 @@ public class TestWeb extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -85,8 +84,6 @@ private void testServlet() throws Exception { final HashMap configMap = new HashMap<>(); configMap.put("ifExists", ""); configMap.put("", ""); - configMap.put("", ""); - configMap.put("", ""); ServletConfig config = new ServletConfig() { @Override @@ -123,22 +120,10 @@ public ServletContext getServletContext() { servlet.destroy(); } - private static void testWrongParameters() { - new AssertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1) { - @Override - public void test() throws SQLException { - Server.createPgServer("-pgPort 8182"); - }}; - new AssertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1) { - @Override - public void test() throws SQLException { - Server.createTcpServer("-tcpPort 8182"); - }}; - new AssertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1) { - @Override - public void test() throws SQLException { - Server.createWebServer("-webPort=8182"); - }}; + private void testWrongParameters() { + assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, () -> Server.createPgServer("-pgPort 8182")); + assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, () -> Server.createTcpServer("-tcpPort 8182")); + assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, () -> Server.createWebServer("-webPort=8182")); } private void testAlreadyRunning() throws Exception { @@ -191,12 +176,7 @@ private void testTools() throws Exception { result = client.get(url, "tools.do?tool=DeleteDbFiles&args=-dir," + getBaseDir() + ",-db," + getTestName()); - String fn = getBaseDir() + "/" + getTestName(); - if (config.mvStore) { - fn += Constants.SUFFIX_MV_FILE; - } else { - fn += Constants.SUFFIX_PAGE_FILE; - } + String fn = getBaseDir() + "/" + getTestName() + Constants.SUFFIX_MV_FILE; assertFalse(FileUtils.exists(fn)); result = client.get(url, "tools.do?tool=Restore&args=-dir," + getBaseDir() + ",-db," + getTestName() +",-file," + getBaseDir() + @@ -450,8 +430,23 @@ private void testWebApp() throws Exception { result = client.get(url, "query.do?sql=@cancel"); assertContains(result, "There is currently no running statement"); result = client.get(url, - "query.do?sql=@generated insert into test(id) values(test_sequence.nextval)"); + "query.do?sql=@generated insert into test(id) values(next value for test_sequence)"); assertContains(result, "ID1"); + result = client.get(url, + "query.do?sql=@generated(1) insert into test(id) values(next value for test_sequence)"); + assertContains(result, "ID2"); + result = client.get(url, + "query.do?sql=@generated(1, 1) insert into test(id) values(next value for test_sequence)"); + assertContains(result, "IDID33"); + result = client.get(url, + "query.do?sql=@generated(id) insert into test(id) values(next value for test_sequence)"); + assertContains(result, "ID4"); + result = client.get(url, + "query.do?sql=@generated(id, id) insert into test(id) values(next value for test_sequence)"); + assertContains(result, "IDID55"); + result = client.get(url, + "query.do?sql=@generated() insert into test(id) values(next value for test_sequence)"); + assertContains(result, "
          "); result = client.get(url, "query.do?sql=@maxrows 2000"); assertContains(result, "Max rowcount is set"); result = client.get(url, "query.do?sql=@password_hash user password"); @@ -461,20 +456,15 @@ private void testWebApp() throws Exception { assertContains(result, "Ok"); result = client.get(url, "query.do?sql=@catalogs"); assertContains(result, "PUBLIC"); - result = client.get(url, - "query.do?sql=@column_privileges null null null TEST null"); + result = client.get(url, "query.do?sql=@column_privileges null null TEST null"); assertContains(result, "PRIVILEGE"); - result = client.get(url, - "query.do?sql=@cross_references null null null TEST"); + result = client.get(url, "query.do?sql=@cross_references null null TEST null null TEST"); assertContains(result, "PKTABLE_NAME"); - result = client.get(url, - "query.do?sql=@exported_keys null null null TEST"); + result = client.get(url, "query.do?sql=@exported_keys null null TEST"); assertContains(result, "PKTABLE_NAME"); - result = client.get(url, - "query.do?sql=@imported_keys null null null TEST"); + result = client.get(url, "query.do?sql=@imported_keys null null TEST"); assertContains(result, "PKTABLE_NAME"); - result = client.get(url, - "query.do?sql=@primary_keys null null null TEST"); + result = client.get(url, "query.do?sql=@primary_keys null null TEST"); assertContains(result, "PK_NAME"); result = client.get(url, "query.do?sql=@procedures null null null"); assertContains(result, "PROCEDURE_NAME"); @@ -485,23 +475,22 @@ private void testWebApp() throws Exception { result = client.get(url, "query.do?sql=@table_privileges"); assertContains(result, "PRIVILEGE"); result = client.get(url, "query.do?sql=@table_types"); - assertContains(result, "SYSTEM TABLE"); + assertContains(result, "BASE TABLE"); result = client.get(url, "query.do?sql=@type_info"); - assertContains(result, "CLOB"); + assertContains(result, "CHARACTER LARGE OBJECT"); result = client.get(url, "query.do?sql=@version_columns"); assertContains(result, "PSEUDO_COLUMN"); result = client.get(url, "query.do?sql=@attributes"); - assertContains(result, "Feature not supported: "attributes""); + assertContains(result, "ATTR_NAME"); result = client.get(url, "query.do?sql=@super_tables"); assertContains(result, "SUPERTABLE_NAME"); result = client.get(url, "query.do?sql=@super_types"); - assertContains(result, "Feature not supported: "superTypes""); + assertContains(result, "SUPERTYPE_NAME"); result = client.get(url, "query.do?sql=@prof_start"); assertContains(result, "Ok"); result = client.get(url, "query.do?sql=@prof_stop"); assertContains(result, "Top Stack Trace(s)"); - result = client.get(url, - "query.do?sql=@best_row_identifier null null TEST"); + result = client.get(url, "query.do?sql=@best_row_identifier null null TEST"); assertContains(result, "SCOPE"); assertContains(result, "COLUMN_NAME"); assertContains(result, "ID"); @@ -725,7 +714,7 @@ public RequestDispatcher getRequestDispatcher(String name) { @Override public String getScheme() { - return null; + return "http"; } @Override @@ -735,7 +724,7 @@ public String getServerName() { @Override public int getServerPort() { - return 0; + return 80; } @Override @@ -1180,7 +1169,7 @@ public void write(int b) throws IOException { @Override public String toString() { - return new String(buff.toByteArray(), StandardCharsets.UTF_8); + return Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8); } @Override diff --git a/h2/src/test/org/h2/test/server/WebClient.java b/h2/src/test/org/h2/test/server/WebClient.java index e724b879cc..a24d10a587 100644 --- a/h2/src/test/org/h2/test/server/WebClient.java +++ b/h2/src/test/org/h2/test/server/WebClient.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.server; @@ -24,7 +24,7 @@ public class WebClient { private String contentType; /** - * Open an URL and get the HTML data. + * Open a URL and get the HTML data. * * @param url the HTTP URL * @return the HTML as a string diff --git a/h2/src/test/org/h2/test/server/package.html b/h2/src/test/org/h2/test/server/package.html index 4d8720db7c..75974b6522 100644 --- a/h2/src/test/org/h2/test/server/package.html +++ b/h2/src/test/org/h2/test/server/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/store/CalculateHashConstant.java b/h2/src/test/org/h2/test/store/CalculateHashConstant.java index 403695be21..9399768d00 100644 --- a/h2/src/test/org/h2/test/store/CalculateHashConstant.java +++ b/h2/src/test/org/h2/test/store/CalculateHashConstant.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; diff --git a/h2/src/test/org/h2/test/store/CalculateHashConstantLong.java b/h2/src/test/org/h2/test/store/CalculateHashConstantLong.java index 83b160ad02..6dd2aba472 100644 --- a/h2/src/test/org/h2/test/store/CalculateHashConstantLong.java +++ b/h2/src/test/org/h2/test/store/CalculateHashConstantLong.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; diff --git a/h2/src/test/org/h2/test/store/FreeSpaceList.java b/h2/src/test/org/h2/test/store/FreeSpaceList.java index e0e9a7d452..b6cb3e9031 100644 --- a/h2/src/test/org/h2/test/store/FreeSpaceList.java +++ b/h2/src/test/org/h2/test/store/FreeSpaceList.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -61,7 +61,7 @@ public synchronized long allocate(int length) { return result * blockSize; } } - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Could not find a free page to allocate"); } @@ -85,12 +85,12 @@ public synchronized void markUsed(long pos, int length) { i++; } if (found == null) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Cannot find spot to mark as used in free list"); } if (start + required > found.start + found.length) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Runs over edge of free space"); } @@ -136,7 +136,7 @@ public synchronized void free(long pos, int length) { i++; } if (found == null) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Cannot find spot to mark as unused in free list"); } @@ -172,7 +172,7 @@ public synchronized void free(long pos, int length) { private int getBlockCount(int length) { if (length <= 0) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Free space invalid length"); } return MathUtils.roundUpInt(length, blockSize) / blockSize; diff --git a/h2/src/test/org/h2/test/store/FreeSpaceTree.java b/h2/src/test/org/h2/test/store/FreeSpaceTree.java index b38008084a..07931a9834 100644 --- a/h2/src/test/org/h2/test/store/FreeSpaceTree.java +++ b/h2/src/test/org/h2/test/store/FreeSpaceTree.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -85,7 +85,7 @@ public synchronized void markUsed(long pos, int length) { BlockRange x = new BlockRange(start, blocks); BlockRange prev = freeSpace.floor(x); if (prev == null) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Free space already marked"); } if (prev.start == start) { @@ -121,7 +121,7 @@ public synchronized void free(long pos, int length) { BlockRange x = new BlockRange(start, blocks); BlockRange next = freeSpace.ceiling(x); if (next == null) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Free space sentinel is missing"); } BlockRange prev = freeSpace.lower(x); @@ -156,7 +156,7 @@ private int getBlock(long pos) { private int getBlockCount(int length) { if (length <= 0) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Free space invalid length"); } return MathUtils.roundUpInt(length, blockSize) / blockSize; diff --git a/h2/src/test/org/h2/test/store/RowDataType.java b/h2/src/test/org/h2/test/store/RowDataType.java index 19ac58b0d5..ac4611f294 100644 --- a/h2/src/test/org/h2/test/store/RowDataType.java +++ b/h2/src/test/org/h2/test/store/RowDataType.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -8,28 +8,31 @@ import java.nio.ByteBuffer; import org.h2.mvstore.DataUtils; import org.h2.mvstore.WriteBuffer; +import org.h2.mvstore.type.BasicDataType; import org.h2.mvstore.type.DataType; /** * A row type. */ -public class RowDataType implements DataType { +public class RowDataType extends BasicDataType { - static final String PREFIX = "org.h2.test.store.row"; - - private final DataType[] types; + private final DataType[] types; + @SuppressWarnings("unchecked") RowDataType(DataType[] types) { this.types = types; } @Override - public int compare(Object a, Object b) { - if (a == b) { + public Object[][] createStorage(int size) { + return new Object[size][]; + } + + @Override + public int compare(Object[] ax, Object[] bx) { + if (ax == bx) { return 0; } - Object[] ax = (Object[]) a; - Object[] bx = (Object[]) b; int al = ax.length; int bl = bx.length; int len = Math.min(al, bl); @@ -48,8 +51,7 @@ public int compare(Object a, Object b) { } @Override - public int getMemory(Object obj) { - Object[] x = (Object[]) obj; + public int getMemory(Object[] x) { int len = x.length; int memory = 0; for (int i = 0; i < len; i++) { @@ -58,20 +60,6 @@ public int getMemory(Object obj) { return memory; } - @Override - public void read(ByteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - obj[i] = read(buff); - } - } - - @Override - public void write(WriteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - write(buff, obj[i]); - } - } - @Override public Object[] read(ByteBuffer buff) { int len = DataUtils.readVarInt(buff); @@ -83,13 +71,11 @@ public Object[] read(ByteBuffer buff) { } @Override - public void write(WriteBuffer buff, Object obj) { - Object[] x = (Object[]) obj; + public void write(WriteBuffer buff, Object[] x) { int len = x.length; buff.putVarInt(len); for (int i = 0; i < len; i++) { types[i].write(buff, x[i]); } } - } diff --git a/h2/src/test/org/h2/test/store/SequenceMap.java b/h2/src/test/org/h2/test/store/SequenceMap.java index 074eea97d5..aa94a5f99c 100644 --- a/h2/src/test/org/h2/test/store/SequenceMap.java +++ b/h2/src/test/org/h2/test/store/SequenceMap.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -10,6 +10,7 @@ import java.util.Map; import java.util.Set; import org.h2.mvstore.MVMap; +import org.h2.mvstore.type.DataType; /** * A custom map returning the keys and values 1 .. 10. @@ -26,8 +27,8 @@ public class SequenceMap extends MVMap { */ int max = 10; - public SequenceMap(Map config) { - super(config); + public SequenceMap(Map config, DataType keyType, DataType valueType) { + super(config, keyType, valueType); } @Override @@ -50,11 +51,6 @@ public Long next() { return Long.valueOf(x++); } - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - }; } @@ -71,7 +67,7 @@ public int size() { public static class Builder extends MVMap.Builder { @Override public SequenceMap create(Map config) { - return new SequenceMap(config); + return new SequenceMap(config, getKeyType(), getValueType()); } } diff --git a/h2/src/test/org/h2/test/store/TestBenchmark.java b/h2/src/test/org/h2/test/store/TestBenchmark.java index b951fc4a69..1f720479d5 100644 --- a/h2/src/test/org/h2/test/store/TestBenchmark.java +++ b/h2/src/test/org/h2/test/store/TestBenchmark.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -32,7 +32,7 @@ public class TestBenchmark extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/store/TestCacheConcurrentLIRS.java b/h2/src/test/org/h2/test/store/TestCacheConcurrentLIRS.java index e360c43fde..4c4f4093c1 100644 --- a/h2/src/test/org/h2/test/store/TestCacheConcurrentLIRS.java +++ b/h2/src/test/org/h2/test/store/TestCacheConcurrentLIRS.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -23,7 +23,7 @@ public class TestCacheConcurrentLIRS extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/store/TestCacheLIRS.java b/h2/src/test/org/h2/test/store/TestCacheLIRS.java index 3b2442be5e..95b9c167e0 100644 --- a/h2/src/test/org/h2/test/store/TestCacheLIRS.java +++ b/h2/src/test/org/h2/test/store/TestCacheLIRS.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -24,7 +24,7 @@ public class TestCacheLIRS extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -78,24 +78,9 @@ private void testEdgeCases() { CacheLIRS test = createCache(1); test.put(1, 10, 100); assertEquals(0, test.size()); - try { - test.put(null, 10, 100); - fail(); - } catch (NullPointerException e) { - // expected - } - try { - test.put(1, null, 100); - fail(); - } catch (NullPointerException e) { - // expected - } - try { - test.setMaxMemory(0); - fail(); - } catch (IllegalArgumentException e) { - // expected - } + assertThrows(NullPointerException.class, () -> test.put(null, 10, 100)); + assertThrows(NullPointerException.class, () -> test.put(1, null, 100)); + assertThrows(IllegalArgumentException.class, () -> test.setMaxMemory(0)); } private void testSize() { diff --git a/h2/src/test/org/h2/test/store/TestCacheLongKeyLIRS.java b/h2/src/test/org/h2/test/store/TestCacheLongKeyLIRS.java index 2e057d4184..487f0d6c47 100644 --- a/h2/src/test/org/h2/test/store/TestCacheLongKeyLIRS.java +++ b/h2/src/test/org/h2/test/store/TestCacheLongKeyLIRS.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -24,7 +24,7 @@ public class TestCacheLongKeyLIRS extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -86,18 +86,8 @@ private void testEdgeCases() { CacheLongKeyLIRS test = createCache(1); test.put(1, 10, 100); assertEquals(0, test.size()); - try { - test.put(1, null, 100); - fail(); - } catch (IllegalArgumentException e) { - // expected - } - try { - test.setMaxMemory(0); - fail(); - } catch (IllegalArgumentException e) { - // expected - } + assertThrows(IllegalArgumentException.class, () -> test.put(1, null, 100)); + assertThrows(IllegalArgumentException.class, () -> test.setMaxMemory(0)); } private void testSize() { diff --git a/h2/src/test/org/h2/test/store/TestDataUtils.java b/h2/src/test/org/h2/test/store/TestDataUtils.java index ba248cc510..e6b2c4acaf 100644 --- a/h2/src/test/org/h2/test/store/TestDataUtils.java +++ b/h2/src/test/org/h2/test/store/TestDataUtils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -11,9 +11,9 @@ import java.util.Arrays; import java.util.HashMap; import java.util.Random; - import org.h2.mvstore.Chunk; import org.h2.mvstore.DataUtils; +import org.h2.mvstore.MVStoreException; import org.h2.mvstore.WriteBuffer; import org.h2.test.TestBase; @@ -28,7 +28,7 @@ public class TestDataUtils extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -143,7 +143,7 @@ private void testMapRandomized() { HashMap map = DataUtils.parseMap(buff.toString()); assertNotNull(map); // ok - } catch (IllegalStateException e) { + } catch (MVStoreException e) { // ok - but not another exception } } diff --git a/h2/src/test/org/h2/test/store/TestDefrag.java b/h2/src/test/org/h2/test/store/TestDefrag.java index 07f08561e3..b78bab536d 100644 --- a/h2/src/test/org/h2/test/store/TestDefrag.java +++ b/h2/src/test/org/h2/test/store/TestDefrag.java @@ -1,37 +1,40 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; import static org.h2.engine.Constants.SUFFIX_MV_FILE; -import org.h2.test.TestBase; -import org.h2.test.TestDb; + import java.io.File; import java.sql.Connection; import java.sql.ResultSet; import java.sql.Statement; +import java.text.NumberFormat; + +import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Test off-line compaction procedure used by SHUTDOWN DEFRAG command * * @author Andrei Tokar */ -public class TestDefrag extends TestDb -{ +public class TestDefrag extends TestDb { + /** * Run just this test. * * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public boolean isEnabled() { - return config.mvStore && !config.memory && config.big && !config.travis; + return !config.memory && config.big && !config.ci; } @Override @@ -39,19 +42,25 @@ public void test() throws Exception { String dbName = getTestName(); deleteDb(dbName); File dbFile = new File(getBaseDir(), dbName + SUFFIX_MV_FILE); + NumberFormat nf = NumberFormat.getInstance(); try (Connection c = getConnection(dbName)) { try (Statement st = c.createStatement()) { st.execute("CREATE TABLE IF NOT EXISTS test (id INT PRIMARY KEY, txt varchar)" + " AS SELECT x, x || SPACE(200) FROM SYSTEM_RANGE(1,10000000)"); + st.execute("checkpoint"); } long origSize = dbFile.length(); - assertTrue(origSize > 4_000_000_000L); + String message = "before defrag: " + nf.format(origSize); + trace(message); + assertTrue(message, origSize > 4_000_000_000L); try (Statement st = c.createStatement()) { st.execute("shutdown defrag"); } - long compactedSize = dbFile.length(); - assertTrue(compactedSize < 400_000_000); } + long compactedSize = dbFile.length(); + String message = "after defrag: " + nf.format(compactedSize); + trace(message); + assertTrue(message, compactedSize < 400_000_000L); try (Connection c = getConnection(dbName + ";LAZY_QUERY_EXECUTION=1")) { try (Statement st = c.createStatement()) { diff --git a/h2/src/test/org/h2/test/store/TestFreeSpace.java b/h2/src/test/org/h2/test/store/TestFreeSpace.java index b9b3bb8cb5..c4867a4eab 100644 --- a/h2/src/test/org/h2/test/store/TestFreeSpace.java +++ b/h2/src/test/org/h2/test/store/TestFreeSpace.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -23,7 +23,7 @@ public class TestFreeSpace extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); testMemoryUsage(); testPerformance(); } diff --git a/h2/src/test/org/h2/test/store/TestImmutableArray.java b/h2/src/test/org/h2/test/store/TestImmutableArray.java index 7b95eebc04..9b40fdf404 100644 --- a/h2/src/test/org/h2/test/store/TestImmutableArray.java +++ b/h2/src/test/org/h2/test/store/TestImmutableArray.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; diff --git a/h2/src/test/org/h2/test/store/TestKillProcessWhileWriting.java b/h2/src/test/org/h2/test/store/TestKillProcessWhileWriting.java index 8a69c50ffa..802949a8dd 100644 --- a/h2/src/test/org/h2/test/store/TestKillProcessWhileWriting.java +++ b/h2/src/test/org/h2/test/store/TestKillProcessWhileWriting.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -29,7 +29,7 @@ public class TestKillProcessWhileWriting extends TestBase { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); test.config.big = true; - test.test(); + test.testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/store/TestMVRTree.java b/h2/src/test/org/h2/test/store/TestMVRTree.java index 8932d900cf..4af60017df 100644 --- a/h2/src/test/org/h2/test/store/TestMVRTree.java +++ b/h2/src/test/org/h2/test/store/TestMVRTree.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -16,6 +16,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Iterator; +import java.util.Objects; import java.util.Random; import javax.imageio.ImageIO; @@ -24,7 +25,8 @@ import org.h2.mvstore.MVStore; import org.h2.mvstore.rtree.MVRTreeMap; -import org.h2.mvstore.rtree.SpatialKey; +import org.h2.mvstore.rtree.Spatial; +import org.h2.mvstore.db.SpatialKey; import org.h2.mvstore.type.StringDataType; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; @@ -40,7 +42,7 @@ public class TestMVRTree extends TestMVStore { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -58,50 +60,45 @@ public void test() { private void testRemoveAll() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - s = new MVStore.Builder().fileName(fileName). - pageSplitSize(100).open(); - MVRTreeMap map = s.openMap("data", - new MVRTreeMap.Builder()); - Random r = new Random(1); - for (int i = 0; i < 1000; i++) { - float x = r.nextFloat() * 50, y = r.nextFloat() * 50; - SpatialKey k = new SpatialKey(i % 100, x, x + 2, y, y + 1); - map.put(k, "i:" + i); + try (MVStore s = new MVStore.Builder().fileName(fileName).pageSplitSize(100).open()) { + MVRTreeMap map = s.openMap("data", new MVRTreeMap.Builder<>()); + Random r = new Random(1); + for (int i = 0; i < 1000; i++) { + float x = r.nextFloat() * 50, y = r.nextFloat() * 50; + Spatial k = new SpatialKey(i % 100, x, x + 2, y, y + 1); + map.put(k, "i:" + i); + } + s.commit(); + map.clear(); } - s.commit(); - map.clear(); - s.close(); } private void testRandomInsert() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - s = new MVStore.Builder().fileName(fileName). - pageSplitSize(100).open(); - MVRTreeMap map = s.openMap("data", - new MVRTreeMap.Builder()); - Random r = new Random(1); - for (int i = 0; i < 1000; i++) { - if (i % 100 == 0) { - r.setSeed(1); - } - float x = r.nextFloat() * 50, y = r.nextFloat() * 50; - SpatialKey k = new SpatialKey(i % 100, x, x + 2, y, y + 1); - map.put(k, "i:" + i); - if (i % 10 == 0) { - s.commit(); + try (MVStore s = new MVStore.Builder().fileName(fileName). + pageSplitSize(100).open()) { + MVRTreeMap map = s.openMap("data", new MVRTreeMap.Builder<>()); + Random r = new Random(1); + for (int i = 0; i < 1000; i++) { + if (i % 100 == 0) { + r.setSeed(1); + } + float x = r.nextFloat() * 50, y = r.nextFloat() * 50; + Spatial k = new SpatialKey(i % 100, x, x + 2, y, y + 1); + map.put(k, "i:" + i); + if (i % 10 == 0) { + s.commit(); + } } } - s.close(); } private void testSpatialKey() { - SpatialKey a0 = new SpatialKey(0, 1, 2, 3, 4); - SpatialKey a1 = new SpatialKey(0, 1, 2, 3, 4); - SpatialKey b0 = new SpatialKey(1, 1, 2, 3, 4); - SpatialKey c0 = new SpatialKey(1, 1.1f, 2.2f, 3.3f, 4.4f); + Spatial a0 = new SpatialKey(0, 1, 2, 3, 4); + Spatial a1 = new SpatialKey(0, 1, 2, 3, 4); + Spatial b0 = new SpatialKey(1, 1, 2, 3, 4); + Spatial c0 = new SpatialKey(1, 1.1f, 2.2f, 3.3f, 4.4f); assertEquals(0, a0.hashCode()); assertEquals(1, b0.hashCode()); assertTrue(a0.equals(a0)); @@ -117,154 +114,149 @@ private void testSpatialKey() { private void testExample() { // create an in-memory store - MVStore s = MVStore.open(null); + try (MVStore s = MVStore.open(null)) { - // open an R-tree map - MVRTreeMap r = s.openMap("data", - new MVRTreeMap.Builder()); + // open an R-tree map + MVRTreeMap r = s.openMap("data", new MVRTreeMap.Builder<>()); - // add two key-value pairs - // the first value is the key id (to make the key unique) - // then the min x, max x, min y, max y - r.add(new SpatialKey(0, -3f, -2f, 2f, 3f), "left"); - r.add(new SpatialKey(1, 3f, 4f, 4f, 5f), "right"); + // add two key-value pairs + // the first value is the key id (to make the key unique) + // then the min x, max x, min y, max y + r.add(new SpatialKey(0, -3f, -2f, 2f, 3f), "left"); + r.add(new SpatialKey(1, 3f, 4f, 4f, 5f), "right"); - // iterate over the intersecting keys - Iterator it = r.findIntersectingKeys( - new SpatialKey(0, 0f, 9f, 3f, 6f)); - for (SpatialKey k; it.hasNext();) { - k = it.next(); - // System.out.println(k + ": " + r.get(k)); - assertNotNull(k); + // iterate over the intersecting keys + Iterator it = r.findIntersectingKeys( + new SpatialKey(0, 0f, 9f, 3f, 6f)); + for (Spatial k; it.hasNext(); ) { + k = it.next(); + // System.out.println(k + ": " + r.get(k)); + assertNotNull(k); + } } - s.close(); } private void testMany() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - s = openStore(fileName); - // s.setMaxPageSize(50); - MVRTreeMap r = s.openMap("data", - new MVRTreeMap.Builder().dimensions(2). - valueType(StringDataType.INSTANCE)); - // r.setQuadraticSplit(true); - Random rand = new Random(1); int len = 1000; - // long t = System.nanoTime(); - // Profiler prof = new Profiler(); - // prof.startCollecting(); - for (int i = 0; i < len; i++) { - float x = rand.nextFloat(), y = rand.nextFloat(); - float p = (float) (rand.nextFloat() * 0.000001); - SpatialKey k = new SpatialKey(i, x - p, x + p, y - p, y + p); - r.add(k, "" + i); - if (i > 0 && (i % len / 10) == 0) { - s.commit(); - } - if (i > 0 && (i % 10000) == 0) { - render(r, getBaseDir() + "/test.png"); + try (MVStore s = openStore(fileName)) { + // s.setMaxPageSize(50); + MVRTreeMap r = s.openMap("data", + new MVRTreeMap.Builder().dimensions(2). + valueType(StringDataType.INSTANCE)); + // r.setQuadraticSplit(true); + Random rand = new Random(1); + // long t = System.nanoTime(); + // Profiler prof = new Profiler(); + // prof.startCollecting(); + for (int i = 0; i < len; i++) { + float x = rand.nextFloat(), y = rand.nextFloat(); + float p = (float) (rand.nextFloat() * 0.000001); + Spatial k = new SpatialKey(i, x - p, x + p, y - p, y + p); + r.add(k, "" + i); + if (i > 0 && (i % len / 10) == 0) { + s.commit(); + } + if (i > 0 && (i % 10000) == 0) { + render(r, getBaseDir() + "/test.png"); + } } } - s.close(); - s = openStore(fileName); - r = s.openMap("data", - new MVRTreeMap.Builder().dimensions(2). - valueType(StringDataType.INSTANCE)); - rand = new Random(1); - for (int i = 0; i < len; i++) { - float x = rand.nextFloat(), y = rand.nextFloat(); - float p = (float) (rand.nextFloat() * 0.000001); - SpatialKey k = new SpatialKey(i, x - p, x + p, y - p, y + p); - assertEquals("" + i, r.get(k)); - } - assertEquals(len, r.size()); - int count = 0; - for (SpatialKey k : r.keySet()) { - assertNotNull(r.get(k)); - count++; - } - assertEquals(len, count); - rand = new Random(1); - for (int i = 0; i < len; i++) { - float x = rand.nextFloat(), y = rand.nextFloat(); - float p = (float) (rand.nextFloat() * 0.000001); - SpatialKey k = new SpatialKey(i, x - p, x + p, y - p, y + p); - r.remove(k); + try (MVStore s = openStore(fileName)) { + MVRTreeMap r = s.openMap("data", + new MVRTreeMap.Builder().dimensions(2). + valueType(StringDataType.INSTANCE)); + Random rand = new Random(1); + for (int i = 0; i < len; i++) { + float x = rand.nextFloat(), y = rand.nextFloat(); + float p = (float) (rand.nextFloat() * 0.000001); + Spatial k = new SpatialKey(i, x - p, x + p, y - p, y + p); + assertEquals("" + i, r.get(k)); + } + assertEquals(len, r.size()); + int count = 0; + for (Spatial k : r.keySet()) { + assertNotNull(r.get(k)); + count++; + } + assertEquals(len, count); + rand = new Random(1); + for (int i = 0; i < len; i++) { + float x = rand.nextFloat(), y = rand.nextFloat(); + float p = (float) (rand.nextFloat() * 0.000001); + Spatial k = new SpatialKey(i, x - p, x + p, y - p, y + p); + r.remove(k); + } + assertEquals(0, r.size()); } - assertEquals(0, r.size()); - s.close(); } private void testSimple() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - s = openStore(fileName); - MVRTreeMap r = s.openMap("data", - new MVRTreeMap.Builder().dimensions(2). - valueType(StringDataType.INSTANCE)); + try (MVStore s = openStore(fileName)) { + MVRTreeMap r = s.openMap("data", + new MVRTreeMap.Builder().dimensions(2). + valueType(StringDataType.INSTANCE)); - add(r, "Bern", key(0, 46.57, 7.27, 124381)); - add(r, "Basel", key(1, 47.34, 7.36, 170903)); - add(r, "Zurich", key(2, 47.22, 8.33, 376008)); - add(r, "Lucerne", key(3, 47.03, 8.18, 77491)); - add(r, "Geneva", key(4, 46.12, 6.09, 191803)); - add(r, "Lausanne", key(5, 46.31, 6.38, 127821)); - add(r, "Winterthur", key(6, 47.30, 8.45, 102966)); - add(r, "St. Gallen", key(7, 47.25, 9.22, 73500)); - add(r, "Biel/Bienne", key(8, 47.08, 7.15, 51203)); - add(r, "Lugano", key(9, 46.00, 8.57, 54667)); - add(r, "Thun", key(10, 46.46, 7.38, 42623)); - add(r, "Bellinzona", key(11, 46.12, 9.01, 17373)); - add(r, "Chur", key(12, 46.51, 9.32, 33756)); - // render(r, getBaseDir() + "/test.png"); - ArrayList list = new ArrayList<>(r.size()); - for (SpatialKey x : r.keySet()) { - list.add(r.get(x)); - } - Collections.sort(list); - assertEquals("[Basel, Bellinzona, Bern, Biel/Bienne, Chur, Geneva, " + - "Lausanne, Lucerne, Lugano, St. Gallen, Thun, Winterthur, Zurich]", - list.toString()); + add(r, "Bern", key(0, 46.57, 7.27, 124381)); + add(r, "Basel", key(1, 47.34, 7.36, 170903)); + add(r, "Zurich", key(2, 47.22, 8.33, 376008)); + add(r, "Lucerne", key(3, 47.03, 8.18, 77491)); + add(r, "Geneva", key(4, 46.12, 6.09, 191803)); + add(r, "Lausanne", key(5, 46.31, 6.38, 127821)); + add(r, "Winterthur", key(6, 47.30, 8.45, 102966)); + add(r, "St. Gallen", key(7, 47.25, 9.22, 73500)); + add(r, "Biel/Bienne", key(8, 47.08, 7.15, 51203)); + add(r, "Lugano", key(9, 46.00, 8.57, 54667)); + add(r, "Thun", key(10, 46.46, 7.38, 42623)); + add(r, "Bellinzona", key(11, 46.12, 9.01, 17373)); + add(r, "Chur", key(12, 46.51, 9.32, 33756)); + // render(r, getBaseDir() + "/test.png"); + ArrayList list = new ArrayList<>(r.size()); + for (Spatial x : r.keySet()) { + list.add(r.get(x)); + } + Collections.sort(list); + assertEquals("[Basel, Bellinzona, Bern, Biel/Bienne, Chur, Geneva, " + + "Lausanne, Lucerne, Lugano, St. Gallen, Thun, Winterthur, Zurich]", + list.toString()); - SpatialKey k; - // intersection - list.clear(); - k = key(0, 47.34, 7.36, 0); - for (Iterator it = r.findIntersectingKeys(k); it.hasNext();) { - list.add(r.get(it.next())); - } - Collections.sort(list); - assertEquals("[Basel]", list.toString()); + // intersection + list.clear(); + Spatial k = key(0, 47.34, 7.36, 0); + for (Iterator it = r.findIntersectingKeys(k); it.hasNext(); ) { + list.add(r.get(it.next())); + } + Collections.sort(list); + assertEquals("[Basel]", list.toString()); - // contains - list.clear(); - k = key(0, 47.34, 7.36, 0); - for (Iterator it = r.findContainedKeys(k); it.hasNext();) { - list.add(r.get(it.next())); - } - assertEquals(0, list.size()); - k = key(0, 47.34, 7.36, 171000); - for (Iterator it = r.findContainedKeys(k); it.hasNext();) { - list.add(r.get(it.next())); + // contains + list.clear(); + k = key(0, 47.34, 7.36, 0); + for (Iterator it = r.findContainedKeys(k); it.hasNext(); ) { + list.add(r.get(it.next())); + } + assertEquals(0, list.size()); + k = key(0, 47.34, 7.36, 171000); + for (Iterator it = r.findContainedKeys(k); it.hasNext(); ) { + list.add(r.get(it.next())); + } + assertEquals("[Basel]", list.toString()); } - assertEquals("[Basel]", list.toString()); - - s.close(); } - private static void add(MVRTreeMap r, String name, SpatialKey k) { + private static void add(MVRTreeMap r, String name, Spatial k) { r.put(k, name); } - private static SpatialKey key(int id, double y, double x, int population) { + private static Spatial key(int id, double y, double x, int population) { float a = (float) ((int) x + (x - (int) x) * 5 / 3); float b = 50 - (float) ((int) y + (y - (int) y) * 5 / 3); float s = (float) Math.sqrt(population / 10000000.); - SpatialKey k = new SpatialKey(id, a - s, a + s, b - s, b + s); + Spatial k = new SpatialKey(id, a - s, a + s, b - s, b + s); return k; } @@ -282,23 +274,23 @@ private static void render(MVRTreeMap r, String fileName) { g2d.setColor(Color.BLACK); SpatialKey b = new SpatialKey(0, Float.MAX_VALUE, Float.MIN_VALUE, Float.MAX_VALUE, Float.MIN_VALUE); - for (SpatialKey x : r.keySet()) { + for (Spatial x : r.keySet()) { b.setMin(0, Math.min(b.min(0), x.min(0))); b.setMin(1, Math.min(b.min(1), x.min(1))); b.setMax(0, Math.max(b.max(0), x.max(0))); b.setMax(1, Math.max(b.max(1), x.max(1))); } // System.out.println(b); - for (SpatialKey x : r.keySet()) { + for (Spatial x : r.keySet()) { int[] rect = scale(b, x, width, height); g2d.drawRect(rect[0], rect[1], rect[2] - rect[0], rect[3] - rect[1]); String s = r.get(x); g2d.drawChars(s.toCharArray(), 0, s.length(), rect[0], rect[1] - 4); } g2d.setColor(Color.red); - ArrayList list = new ArrayList<>(); + ArrayList list = new ArrayList<>(); r.addNodeKeys(list, r.getRootPage()); - for (SpatialKey x : list) { + for (Spatial x : list) { int[] rect = scale(b, x, width, height); g2d.drawRect(rect[0], rect[1], rect[2] - rect[0], rect[3] - rect[1]); } @@ -311,7 +303,7 @@ private static void render(MVRTreeMap r, String fileName) { } } - private static int[] scale(SpatialKey b, SpatialKey x, int width, int height) { + private static int[] scale(Spatial b, Spatial x, int width, int height) { int[] rect = { (int) ((x.min(0) - b.min(0)) * (width * 0.9) / (b.max(0) - b.min(0)) + width * 0.05), @@ -331,117 +323,111 @@ private void testRandom() { } private void testRandomFind() { - MVStore s = openStore(null); - MVRTreeMap m = s.openMap("data", - new MVRTreeMap.Builder()); - int max = 100; - for (int x = 0; x < max; x++) { - for (int y = 0; y < max; y++) { - int id = x * max + y; - SpatialKey k = new SpatialKey(id, x, x, y, y); - m.put(k, id); - } - } - Random rand = new Random(1); - int operationCount = 1000; - for (int i = 0; i < operationCount; i++) { - int x1 = rand.nextInt(max), y1 = rand.nextInt(10); - int x2 = rand.nextInt(10), y2 = rand.nextInt(10); - int intersecting = Math.max(0, x2 - x1 + 1) * Math.max(0, y2 - y1 + 1); - int contained = Math.max(0, x2 - x1 - 1) * Math.max(0, y2 - y1 - 1); - SpatialKey k = new SpatialKey(0, x1, x2, y1, y2); - Iterator it = m.findContainedKeys(k); - int count = 0; - while (it.hasNext()) { - SpatialKey t = it.next(); - assertTrue(t.min(0) > x1); - assertTrue(t.min(1) > y1); - assertTrue(t.max(0) < x2); - assertTrue(t.max(1) < y2); - count++; + try (MVStore s = openStore(null)) { + MVRTreeMap m = s.openMap("data", new MVRTreeMap.Builder<>()); + int max = 100; + for (int x = 0; x < max; x++) { + for (int y = 0; y < max; y++) { + int id = x * max + y; + Spatial k = new SpatialKey(id, x, x, y, y); + m.put(k, id); + } } - assertEquals(contained, count); - it = m.findIntersectingKeys(k); - count = 0; - while (it.hasNext()) { - SpatialKey t = it.next(); - assertTrue(t.min(0) >= x1); - assertTrue(t.min(1) >= y1); - assertTrue(t.max(0) <= x2); - assertTrue(t.max(1) <= y2); - count++; + Random rand = new Random(1); + int operationCount = 1000; + for (int i = 0; i < operationCount; i++) { + int x1 = rand.nextInt(max), y1 = rand.nextInt(10); + int x2 = rand.nextInt(10), y2 = rand.nextInt(10); + int intersecting = Math.max(0, x2 - x1 + 1) * Math.max(0, y2 - y1 + 1); + int contained = Math.max(0, x2 - x1 - 1) * Math.max(0, y2 - y1 - 1); + Spatial k = new SpatialKey(0, x1, x2, y1, y2); + Iterator it = m.findContainedKeys(k); + int count = 0; + while (it.hasNext()) { + Spatial t = it.next(); + assertTrue(t.min(0) > x1); + assertTrue(t.min(1) > y1); + assertTrue(t.max(0) < x2); + assertTrue(t.max(1) < y2); + count++; + } + assertEquals(contained, count); + it = m.findIntersectingKeys(k); + count = 0; + while (it.hasNext()) { + Spatial t = it.next(); + assertTrue(t.min(0) >= x1); + assertTrue(t.min(1) >= y1); + assertTrue(t.max(0) <= x2); + assertTrue(t.max(1) <= y2); + count++; + } + assertEquals(intersecting, count); } - assertEquals(intersecting, count); } } private void testRandom(boolean quadraticSplit) { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName); - - MVRTreeMap m = s.openMap("data", - new MVRTreeMap.Builder()); + try (MVStore s = openStore(fileName)) { + MVRTreeMap m = s.openMap("data", + new MVRTreeMap.Builder<>()); - m.setQuadraticSplit(quadraticSplit); - HashMap map = new HashMap<>(); - Random rand = new Random(1); - int operationCount = 10000; - int maxValue = 300; - for (int i = 0; i < operationCount; i++) { - int key = rand.nextInt(maxValue); - Random rk = new Random(key); - float x = rk.nextFloat(), y = rk.nextFloat(); - float p = (float) (rk.nextFloat() * 0.000001); - SpatialKey k = new SpatialKey(key, x - p, x + p, y - p, y + p); - String v = "" + rand.nextInt(); - Iterator it; - switch (rand.nextInt(5)) { - case 0: - log(i + ": put " + k + " = " + v + " " + m.size()); - m.put(k, v); - map.put(k, v); - break; - case 1: - log(i + ": remove " + k + " " + m.size()); - m.remove(k); - map.remove(k); - break; - case 2: { - p = (float) (rk.nextFloat() * 0.01); - k = new SpatialKey(key, x - p, x + p, y - p, y + p); - it = m.findIntersectingKeys(k); - while (it.hasNext()) { - SpatialKey n = it.next(); - String a = map.get(n); - assertNotNull(a); - } - break; - } - case 3: { - p = (float) (rk.nextFloat() * 0.01); - k = new SpatialKey(key, x - p, x + p, y - p, y + p); - it = m.findContainedKeys(k); - while (it.hasNext()) { - SpatialKey n = it.next(); - String a = map.get(n); - assertNotNull(a); + m.setQuadraticSplit(quadraticSplit); + HashMap map = new HashMap<>(); + Random rand = new Random(1); + int operationCount = 10000; + int maxValue = 300; + for (int i = 0; i < operationCount; i++) { + int key = rand.nextInt(maxValue); + Random rk = new Random(key); + float x = rk.nextFloat(), y = rk.nextFloat(); + float p = (float) (rk.nextFloat() * 0.000001); + Spatial k = new SpatialKey(key, x - p, x + p, y - p, y + p); + String v = "" + rand.nextInt(); + Iterator it; + switch (rand.nextInt(5)) { + case 0: + log(i + ": put " + k + " = " + v + " " + m.size()); + m.put(k, v); + map.put(k, v); + break; + case 1: + log(i + ": remove " + k + " " + m.size()); + m.remove(k); + map.remove(k); + break; + case 2: { + p = (float) (rk.nextFloat() * 0.01); + k = new SpatialKey(key, x - p, x + p, y - p, y + p); + it = m.findIntersectingKeys(k); + while (it.hasNext()) { + Spatial n = it.next(); + String a = map.get(n); + assertNotNull(a); + } + break; + } + case 3: { + p = (float) (rk.nextFloat() * 0.01); + k = new SpatialKey(key, x - p, x + p, y - p, y + p); + it = m.findContainedKeys(k); + while (it.hasNext()) { + Spatial n = it.next(); + String a = map.get(n); + assertNotNull(a); + } + break; + } + default: + String a = map.get(k); + String b = m.get(k); + assertTrue(Objects.equals(a, b)); + break; } - break; + assertEquals(map.size(), m.size()); } - default: - String a = map.get(k); - String b = m.get(k); - if (a == null || b == null) { - assertTrue(a == b); - } else { - assertEquals(a, b); - } - break; - } - assertEquals(map.size(), m.size()); } - s.close(); } - } diff --git a/h2/src/test/org/h2/test/store/TestMVStore.java b/h2/src/test/org/h2/test/store/TestMVStore.java index 0e569e49e1..3d5072b4b1 100644 --- a/h2/src/test/org/h2/test/store/TestMVStore.java +++ b/h2/src/test/org/h2/test/store/TestMVStore.java @@ -1,11 +1,10 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; -import java.lang.Thread.UncaughtExceptionHandler; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.charset.StandardCharsets; @@ -24,6 +23,7 @@ import org.h2.mvstore.FileStore; import org.h2.mvstore.MVMap; import org.h2.mvstore.MVStore; +import org.h2.mvstore.MVStoreException; import org.h2.mvstore.OffHeapStore; import org.h2.mvstore.type.DataType; import org.h2.mvstore.type.ObjectDataType; @@ -31,7 +31,7 @@ import org.h2.store.fs.FilePath; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; -import org.h2.test.utils.AssertThrows; +import org.h2.util.Utils; /** * Tests the MVStore. @@ -47,7 +47,7 @@ public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); test.config.traceTest = true; test.config.big = true; - test.test(); + test.testFromMain(); } @Override @@ -84,6 +84,7 @@ public void test() throws Exception { testFileHeader(); testFileHeaderCorruption(); testIndexSkip(); + testIndexSkipReverse(); testMinMaxNextKey(); testStoreVersion(); testIterateOldVersion(); @@ -106,42 +107,45 @@ public void test() throws Exception { testRandom(); testKeyValueClasses(); testIterate(); + testIterateReverse(); testCloseTwice(); testSimple(); + testInvalidSettings(); // longer running tests testLargerThan2G(); } private void testRemoveMapRollback() { - MVStore store = new MVStore.Builder(). - open(); - MVMap map = store.openMap("test"); - map.put("1", "Hello"); - store.commit(); - store.removeMap(map); - store.rollback(); - assertTrue(store.hasMap("test")); - map = store.openMap("test"); - assertEquals("Hello", map.get("1")); - store.close(); + try (MVStore store = new MVStore.Builder(). + open()) { + MVMap map = store.openMap("test"); + map.put("1", "Hello"); + store.commit(); + store.removeMap(map); + store.rollback(); + assertTrue(store.hasMap("test")); + map = store.openMap("test"); + assertEquals("Hello", map.get("1")); + } + FileUtils.createDirectories(getTestDir("")); String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - store = new MVStore.Builder(). + try (MVStore store = new MVStore.Builder(). autoCommitDisabled(). fileName(fileName). - open(); - map = store.openMap("test"); - map.put("1", "Hello"); - store.commit(); - store.removeMap(map); - store.rollback(); - assertTrue(store.hasMap("test")); - map = store.openMap("test"); - // the data will get back alive - assertEquals("Hello", map.get("1")); - store.close(); + open()) { + MVMap map = store.openMap("test"); + map.put("1", "Hello"); + store.commit(); + store.removeMap(map); + store.rollback(); + assertTrue(store.hasMap("test")); + map = store.openMap("test"); + // the data will get back alive + assertEquals("Hello", map.get("1")); + } } private void testProvidedFileStoreNotOpenedAndClosed() { @@ -170,38 +174,38 @@ public void close() { private void testVolatileMap() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore store = new MVStore.Builder(). + try (MVStore store = new MVStore.Builder(). fileName(fileName). - open(); - MVMap map = store.openMap("test"); - assertFalse(map.isVolatile()); - map.setVolatile(true); - assertTrue(map.isVolatile()); - map.put("1", "Hello"); - assertEquals("Hello", map.get("1")); - assertEquals(1, map.size()); - store.close(); - store = new MVStore.Builder(). + open()) { + MVMap map = store.openMap("test"); + assertFalse(map.isVolatile()); + map.setVolatile(true); + assertTrue(map.isVolatile()); + map.put("1", "Hello"); + assertEquals("Hello", map.get("1")); + assertEquals(1, map.size()); + } + try (MVStore store = new MVStore.Builder(). fileName(fileName). - open(); - assertTrue(store.hasMap("test")); - map = store.openMap("test"); - assertEquals(0, map.size()); - store.close(); + open()) { + assertTrue(store.hasMap("test")); + MVMap map = store.openMap("test"); + assertEquals(0, map.size()); + } } private void testEntrySet() { - MVStore s = new MVStore.Builder().open(); - MVMap map; - map = s.openMap("data"); - for (int i = 0; i < 20; i++) { - map.put(i, i * 10); - } - int next = 0; - for (Entry e : map.entrySet()) { - assertEquals(next, e.getKey().intValue()); - assertEquals(next * 10, e.getValue().intValue()); - next++; + try (MVStore s = new MVStore.Builder().open()) { + MVMap map = s.openMap("data"); + for (int i = 0; i < 20; i++) { + map.put(i, i * 10); + } + int next = 0; + for (Entry e : map.entrySet()) { + assertEquals(next, e.getKey().intValue()); + assertEquals(next * 10, e.getValue().intValue()); + next++; + } } } @@ -226,6 +230,7 @@ private void testCompressEmptyPage() { private void testCompressed() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); + String data = new String(new char[1000]).replace((char) 0, 'x'); long lastSize = 0; for (int level = 0; level <= 2; level++) { FileUtils.delete(fileName); @@ -235,196 +240,182 @@ private void testCompressed() { } else if (level == 2) { builder.compressHigh(); } - MVStore s = builder.open(); - MVMap map = s.openMap("data"); - String data = new String(new char[1000]).replace((char) 0, 'x'); - for (int i = 0; i < 400; i++) { - map.put(data + i, data); + try (MVStore s = builder.open()) { + MVMap map = s.openMap("data"); + for (int i = 0; i < 400; i++) { + map.put(data + i, data); + } } - s.close(); long size = FileUtils.size(fileName); if (level > 0) { assertTrue(size < lastSize); } lastSize = size; - s = new MVStore.Builder().fileName(fileName).open(); - map = s.openMap("data"); - for (int i = 0; i < 400; i++) { - assertEquals(data, map.get(data + i)); + try (MVStore s = new MVStore.Builder().fileName(fileName).open()) { + MVMap map = s.openMap("data"); + for (int i = 0; i < 400; i++) { + assertEquals(data, map.get(data + i)); + } } - s.close(); } } private void testFileFormatExample() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = MVStore.open(fileName); - MVMap map = s.openMap("data"); - for (int i = 0; i < 400; i++) { - map.put(i, "Hello"); - } - s.commit(); - for (int i = 0; i < 100; i++) { - map.put(0, "Hi"); + try (MVStore s = MVStore.open(fileName)) { + MVMap map = s.openMap("data"); + for (int i = 0; i < 400; i++) { + map.put(i, "Hello"); + } + s.commit(); + for (int i = 0; i < 100; i++) { + map.put(0, "Hi"); + } + s.commit(); } - s.commit(); - s.close(); // ;MVStoreTool.dump(fileName); } private void testMaxChunkLength() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = new MVStore.Builder().fileName(fileName).open(); - MVMap map = s.openMap("data"); - map.put(0, new byte[2 * 1024 * 1024]); - s.commit(); - map.put(1, new byte[10 * 1024]); - s.commit(); - MVMap meta = s.getMetaMap(); - Chunk c = Chunk.fromString(meta.get("chunk.1")); - assertTrue(c.maxLen < Integer.MAX_VALUE); - assertTrue(c.maxLenLive < Integer.MAX_VALUE); - s.close(); + try (MVStore s = new MVStore.Builder().fileName(fileName).open()) { + MVMap map = s.openMap("data"); + map.put(0, new byte[2 * 1024 * 1024]); + s.commit(); + map.put(1, new byte[10 * 1024]); + s.commit(); + MVMap layout = s.getLayoutMap(); + Chunk c = Chunk.fromString(layout.get(DataUtils.META_CHUNK + "1")); + assertTrue(c.maxLen < Integer.MAX_VALUE); + assertTrue(c.maxLenLive < Integer.MAX_VALUE); + } } private void testCacheInfo() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = new MVStore.Builder().fileName(fileName).cacheSize(2).open(); - assertEquals(2, s.getCacheSize()); - MVMap map; - map = s.openMap("data"); - byte[] data = new byte[1024]; - for (int i = 0; i < 1000; i++) { - map.put(i, data); - s.commit(); - if (i < 50) { - assertEquals(0, s.getCacheSizeUsed()); - } else if (i > 300) { - assertTrue(s.getCacheSizeUsed() >= 1); + try (MVStore s = new MVStore.Builder().fileName(fileName).cacheSize(2).open()) { + assertEquals(2, s.getCacheSize()); + MVMap map; + map = s.openMap("data"); + byte[] data = new byte[1024]; + for (int i = 0; i < 1000; i++) { + map.put(i, data); + s.commit(); + if (i < 50) { + assertEquals(0, s.getCacheSizeUsed()); + } else if (i > 300) { + assertTrue(s.getCacheSizeUsed() >= 1); + } } } - s.close(); - s = new MVStore.Builder().open(); - assertEquals(0, s.getCacheSize()); - assertEquals(0, s.getCacheSizeUsed()); - s.close(); + try (MVStore s = new MVStore.Builder().open()) { + assertEquals(0, s.getCacheSize()); + assertEquals(0, s.getCacheSizeUsed()); + } } - private void testVersionsToKeep() throws Exception { - MVStore s = new MVStore.Builder().open(); - assertEquals(5, s.getVersionsToKeep()); - MVMap map; - map = s.openMap("data"); - for (int i = 0; i < 20; i++) { - map.put(i, i); - s.commit(); - long version = s.getCurrentVersion(); - if (version >= 6) { - map.openVersion(version - 5); - try { - map.openVersion(version - 6); - fail(); - } catch (IllegalArgumentException e) { - // expected + private void testVersionsToKeep() { + try (MVStore s = new MVStore.Builder().open()) { + assertEquals(5, s.getVersionsToKeep()); + MVMap map = s.openMap("data"); + for (int i = 0; i < 20; i++) { + map.put(i, i); + s.commit(); + long version = s.getCurrentVersion(); + if (version >= 6) { + map.openVersion(version - 5); + assertThrows(IllegalArgumentException.class, () -> map.openVersion(version - 6)); } } } } private void testVersionsToKeep2() { - MVStore s = new MVStore.Builder().autoCommitDisabled().open(); - s.setVersionsToKeep(2); - final MVMap m = s.openMap("data"); - s.commit(); - assertEquals(1, s.getCurrentVersion()); - m.put(1, "version 1"); - s.commit(); - assertEquals(2, s.getCurrentVersion()); - m.put(1, "version 2"); - s.commit(); - assertEquals(3, s.getCurrentVersion()); - m.put(1, "version 3"); - s.commit(); - m.put(1, "version 4"); - assertEquals("version 4", m.openVersion(4).get(1)); - assertEquals("version 3", m.openVersion(3).get(1)); - assertEquals("version 2", m.openVersion(2).get(1)); - new AssertThrows(IllegalArgumentException.class) { - @Override - public void test() throws Exception { - m.openVersion(1); - } - }; - s.close(); + try (MVStore s = new MVStore.Builder().autoCommitDisabled().open()) { + s.setVersionsToKeep(2); + final MVMap m = s.openMap("data"); + s.commit(); + assertEquals(1, s.getCurrentVersion()); + m.put(1, "version 1"); + s.commit(); + assertEquals(2, s.getCurrentVersion()); + m.put(1, "version 2"); + s.commit(); + assertEquals(3, s.getCurrentVersion()); + m.put(1, "version 3"); + s.commit(); + m.put(1, "version 4"); + assertEquals("version 4", m.openVersion(4).get(1)); + assertEquals("version 3", m.openVersion(3).get(1)); + assertEquals("version 2", m.openVersion(2).get(1)); + assertThrows(IllegalArgumentException.class, () -> m.openVersion(1)); + } } - private void testRemoveMap() throws Exception { + private void testRemoveMap() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = new MVStore.Builder(). + try (MVStore s = new MVStore.Builder(). fileName(fileName). - open(); - MVMap map; - - map = s.openMap("data"); - map.put(1, 1); - assertEquals(1, map.get(1).intValue()); - s.commit(); - - s.removeMap(map); - s.commit(); + open()) { + MVMap map = s.openMap("data"); + map.put(1, 1); + assertEquals(1, map.get(1).intValue()); + s.commit(); - map = s.openMap("data"); - assertTrue(map.isEmpty()); - map.put(2, 2); + s.removeMap(map); + s.commit(); - s.close(); + map = s.openMap("data"); + assertTrue(map.isEmpty()); + map.put(2, 2); + } } - private void testIsEmpty() throws Exception { - MVStore s = new MVStore.Builder(). + private void testIsEmpty() { + try (MVStore s = new MVStore.Builder(). pageSplitSize(50). - open(); - Map m = s.openMap("data"); - m.put(1, new byte[50]); - m.put(2, new byte[50]); - m.put(3, new byte[50]); - m.remove(1); - m.remove(2); - m.remove(3); - assertEquals(0, m.size()); - assertTrue(m.isEmpty()); - s.close(); + open()) { + Map m = s.openMap("data"); + m.put(1, new byte[50]); + m.put(2, new byte[50]); + m.put(3, new byte[50]); + m.remove(1); + m.remove(2); + m.remove(3); + assertEquals(0, m.size()); + assertTrue(m.isEmpty()); + } } - private void testOffHeapStorage() throws Exception { + private void testOffHeapStorage() { OffHeapStore offHeap = new OffHeapStore(); - MVStore s = new MVStore.Builder(). - fileStore(offHeap). - open(); int count = 1000; - Map map = s.openMap("data"); - for (int i = 0; i < count; i++) { - map.put(i, "Hello " + i); - s.commit(); + try (MVStore s = new MVStore.Builder(). + fileStore(offHeap). + open()) { + Map map = s.openMap("data"); + for (int i = 0; i < count; i++) { + map.put(i, "Hello " + i); + s.commit(); + } + assertTrue(offHeap.getWriteCount() > count); } - assertTrue(offHeap.getWriteCount() > count); - s.close(); - s = new MVStore.Builder(). + try (MVStore s = new MVStore.Builder(). fileStore(offHeap). - open(); - map = s.openMap("data"); - for (int i = 0; i < count; i++) { - assertEquals("Hello " + i, map.get(i)); + open()) { + Map map = s.openMap("data"); + for (int i = 0; i < count; i++) { + assertEquals("Hello " + i, map.get(i)); + } } - s.close(); } - private void testNewerWriteVersion() throws Exception { + private void testNewerWriteVersion() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); MVStore s = new MVStore.Builder(). @@ -433,9 +424,9 @@ private void testNewerWriteVersion() throws Exception { open(); s.setRetentionTime(Integer.MAX_VALUE); Map header = s.getStoreHeader(); - assertEquals("1", header.get("format").toString()); - header.put("formatRead", "1"); - header.put("format", "2"); + assertEquals("2", header.get("format").toString()); + header.put("formatRead", "2"); + header.put("format", "3"); forceWriteStoreHeader(s); MVMap m = s.openMap("data"); forceWriteStoreHeader(s); @@ -448,9 +439,9 @@ private void testNewerWriteVersion() throws Exception { open(); header = s.getStoreHeader(); fail(header.toString()); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { assertEquals(DataUtils.ERROR_UNSUPPORTED_FORMAT, - DataUtils.getErrorCode(e.getMessage())); + e.getErrorCode()); } s = new MVStore.Builder(). encryptionKey("007".toCharArray()). @@ -474,13 +465,15 @@ private void testNewerWriteVersion() throws Exception { } - private void testCompactFully() throws Exception { + private void testCompactFully() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); MVStore s = new MVStore.Builder(). fileName(fileName). autoCommitDisabled(). open(); + s.setRetentionTime(0); + s.setVersionsToKeep(0); MVMap m; for (int i = 0; i < 100; i++) { m = s.openMap("data" + i); @@ -502,23 +495,13 @@ private void testCompactFully() throws Exception { private void testBackgroundExceptionListener() throws Exception { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - final AtomicReference exRef = - new AtomicReference<>(); - s = new MVStore.Builder(). + AtomicReference exRef = new AtomicReference<>(); + MVStore s = new MVStore.Builder(). fileName(fileName). - backgroundExceptionHandler(new UncaughtExceptionHandler() { - - @Override - public void uncaughtException(Thread t, Throwable e) { - exRef.set(e); - } - - }). + backgroundExceptionHandler((t, e) -> exRef.set(e)). open(); s.setAutoCommitDelay(10); - MVMap m; - m = s.openMap("data"); + MVMap m = s.openMap("data"); s.getFileStore().getFile().close(); try { m.put(1, "Hello"); @@ -530,12 +513,10 @@ public void uncaughtException(Thread t, Throwable e) { } Throwable e = exRef.get(); assertNotNull(e); - assertEquals(DataUtils.ERROR_WRITING_FAILED, - DataUtils.getErrorCode(e.getMessage())); - } catch (IllegalStateException e) { + checkErrorCode(DataUtils.ERROR_WRITING_FAILED, e); + } catch (MVStoreException e) { // sometimes it is detected right away - assertEquals(DataUtils.ERROR_CLOSED, - DataUtils.getErrorCode(e.getMessage())); + assertEquals(DataUtils.ERROR_CLOSED, e.getErrorCode()); } s.closeImmediately(); @@ -545,35 +526,32 @@ public void uncaughtException(Thread t, Throwable e) { private void testAtomicOperations() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - MVMap m; - s = new MVStore.Builder(). + try (MVStore s = new MVStore.Builder(). fileName(fileName). - open(); - m = s.openMap("data"); - - // putIfAbsent - assertNull(m.putIfAbsent(1, new byte[1])); - assertEquals(1, m.putIfAbsent(1, new byte[2]).length); - assertEquals(1, m.get(1).length); - - // replace - assertNull(m.replace(2, new byte[2])); - assertNull(m.get(2)); - assertEquals(1, m.replace(1, new byte[2]).length); - assertEquals(2, m.replace(1, new byte[3]).length); - assertEquals(3, m.replace(1, new byte[1]).length); - - // replace with oldValue - assertFalse(m.replace(1, new byte[2], new byte[10])); - assertTrue(m.replace(1, new byte[1], new byte[2])); - assertTrue(m.replace(1, new byte[2], new byte[1])); - - // remove - assertFalse(m.remove(1, new byte[2])); - assertTrue(m.remove(1, new byte[1])); - - s.close(); + open()) { + MVMap m = s.openMap("data"); + + // putIfAbsent + assertNull(m.putIfAbsent(1, new byte[1])); + assertEquals(1, m.putIfAbsent(1, new byte[2]).length); + assertEquals(1, m.get(1).length); + + // replace + assertNull(m.replace(2, new byte[2])); + assertNull(m.get(2)); + assertEquals(1, m.replace(1, new byte[2]).length); + assertEquals(2, m.replace(1, new byte[3]).length); + assertEquals(3, m.replace(1, new byte[1]).length); + + // replace with oldValue + assertFalse(m.replace(1, new byte[2], new byte[10])); + assertTrue(m.replace(1, new byte[1], new byte[2])); + assertTrue(m.replace(1, new byte[2], new byte[1])); + + // remove + assertFalse(m.remove(1, new byte[2])); + assertTrue(m.remove(1, new byte[1])); + } FileUtils.delete(fileName); } @@ -692,127 +670,102 @@ private void testWriteDelay() { private void testEncryptedFile() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - MVMap m; char[] passwordChars = "007".toCharArray(); - s = new MVStore.Builder(). - fileName(fileName). - encryptionKey(passwordChars). - open(); - assertEquals(0, passwordChars[0]); - assertEquals(0, passwordChars[1]); - assertEquals(0, passwordChars[2]); - assertTrue(FileUtils.exists(fileName)); - m = s.openMap("test"); - m.put(1, "Hello"); - assertEquals("Hello", m.get(1)); - s.close(); - - passwordChars = "008".toCharArray(); - try { - s = new MVStore.Builder(). - fileName(fileName). - encryptionKey(passwordChars).open(); - fail(); - } catch (IllegalStateException e) { - assertEquals(DataUtils.ERROR_FILE_CORRUPT, - DataUtils.getErrorCode(e.getMessage())); + try (MVStore s = new MVStore.Builder().fileName(fileName).encryptionKey(passwordChars).open()) { + assertPasswordErased(passwordChars); + assertTrue(FileUtils.exists(fileName)); + MVMap m = s.openMap("test"); + m.put(1, "Hello"); + assertEquals("Hello", m.get(1)); } - assertEquals(0, passwordChars[0]); - assertEquals(0, passwordChars[1]); - assertEquals(0, passwordChars[2]); + + char[] passwordChars2 = "008".toCharArray(); + assertThrows(DataUtils.ERROR_FILE_CORRUPT, + () -> new MVStore.Builder().fileName(fileName).encryptionKey(passwordChars2).open()); + assertPasswordErased(passwordChars2); passwordChars = "007".toCharArray(); - s = new MVStore.Builder(). - fileName(fileName). - encryptionKey(passwordChars).open(); - assertEquals(0, passwordChars[0]); - assertEquals(0, passwordChars[1]); - assertEquals(0, passwordChars[2]); - m = s.openMap("test"); - assertEquals("Hello", m.get(1)); - s.close(); + try (MVStore s = new MVStore.Builder().fileName(fileName).encryptionKey(passwordChars).open()) { + assertPasswordErased(passwordChars); + MVMap m = s.openMap("test"); + assertEquals("Hello", m.get(1)); + } FileUtils.setReadOnly(fileName); passwordChars = "007".toCharArray(); - s = new MVStore.Builder(). - fileName(fileName). - encryptionKey(passwordChars).open(); - assertTrue(s.getFileStore().isReadOnly()); - s.close(); + try (MVStore s = new MVStore.Builder().fileName(fileName).encryptionKey(passwordChars).open()) { + assertTrue(s.getFileStore().isReadOnly()); + } FileUtils.delete(fileName); assertFalse(FileUtils.exists(fileName)); } + private void assertPasswordErased(char[] passwordChars) { + assertEquals(0, passwordChars[0]); + assertEquals(0, passwordChars[1]); + assertEquals(0, passwordChars[2]); + } + private void testFileFormatChange() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - MVMap m; - s = openStore(fileName); - s.setRetentionTime(Integer.MAX_VALUE); - m = s.openMap("test"); - m.put(1, 1); - Map header = s.getStoreHeader(); - int format = Integer.parseInt(header.get("format").toString()); - assertEquals(1, format); - header.put("format", Integer.toString(format + 1)); - forceWriteStoreHeader(s); - s.close(); - try { - openStore(fileName).close(); - fail(); - } catch (IllegalStateException e) { - assertEquals(DataUtils.ERROR_UNSUPPORTED_FORMAT, - DataUtils.getErrorCode(e.getMessage())); - } + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(Integer.MAX_VALUE); + MVMap m = s.openMap("test"); + m.put(1, 1); + Map header = s.getStoreHeader(); + int format = Integer.parseInt(header.get("format").toString()); + assertEquals(2, format); + header.put("format", Integer.toString(format + 1)); + forceWriteStoreHeader(s); + } + assertThrows(DataUtils.ERROR_UNSUPPORTED_FORMAT, () -> openStore(fileName).close()); FileUtils.delete(fileName); } private void testRecreateMap() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName); - MVMap m = s.openMap("test"); - m.put(1, 1); - s.commit(); - s.removeMap(m); - s.close(); - s = openStore(fileName); - m = s.openMap("test"); - assertNull(m.get(1)); - s.close(); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("test"); + m.put(1, 1); + s.commit(); + s.removeMap(m); + } + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("test"); + assertNull(m.get(1)); + } } private void testRenameMapRollback() { - MVStore s = openStore(null); - MVMap map; - map = s.openMap("hello"); - map.put(1, 10); - long old = s.commit(); - s.renameMap(map, "world"); - map.put(2, 20); - assertEquals("world", map.getName()); - s.rollbackTo(old); - assertEquals("hello", map.getName()); - s.rollbackTo(0); - assertTrue(map.isClosed()); - s.close(); + try (MVStore s = openStore(null)) { + MVMap map = s.openMap("hello"); + map.put(1, 10); + long old = s.commit(); + s.renameMap(map, "world"); + map.put(2, 20); + assertEquals("world", map.getName()); + s.rollbackTo(old); + assertEquals("hello", map.getName()); + s.rollbackTo(0); + assertTrue(map.isClosed()); + } } private void testCustomMapType() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName); - Map seq = s.openMap("data", new SequenceMap.Builder()); - StringBuilder buff = new StringBuilder(); - for (long x : seq.keySet()) { - buff.append(x).append(';'); + try (MVStore s = openStore(fileName)) { + Map seq = s.openMap("data", new SequenceMap.Builder()); + StringBuilder buff = new StringBuilder(); + for (long x : seq.keySet()) { + buff.append(x).append(';'); + } + assertEquals("1;2;3;4;5;6;7;8;9;10;", buff.toString()); } - assertEquals("1;2;3;4;5;6;7;8;9;10;", buff.toString()); - s.close(); } private void testCacheSize() { @@ -821,94 +774,83 @@ private void testCacheSize() { } String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - MVMap map; - s = new MVStore.Builder(). + try (MVStore s = new MVStore.Builder(). fileName(fileName). autoCommitDisabled(). - compress().open(); - map = s.openMap("test"); - // add 10 MB of data - for (int i = 0; i < 1024; i++) { - map.put(i, new String(new char[10240])); + compress().open()) { + s.setReuseSpace(false); // disable free space scanning + MVMap map = s.openMap("test"); + // add 10 MB of data + for (int i = 0; i < 1024; i++) { + map.put(i, new String(new char[10240])); + } } - s.close(); int[] expectedReadsForCacheSize = { - 1880, 1789, 1616, 1374, 970, 711, 541 // compressed + 1880, 490, 476, 501, 476, 476, 541 // compressed // 1887, 1775, 1599, 1355, 1035, 732, 507 // uncompressed }; for (int cacheSize = 0; cacheSize <= 6; cacheSize += 1) { int cacheMB = 1 + 3 * cacheSize; - s = new MVStore.Builder(). + Utils.collectGarbage(); + try (MVStore s = new MVStore.Builder(). fileName(fileName). autoCommitDisabled(). - cacheSize(cacheMB).open(); - assertEquals(cacheMB, s.getCacheSize()); - map = s.openMap("test"); - for (int i = 0; i < 1024; i += 128) { - for (int j = 0; j < i; j++) { - String x = map.get(j); - assertEquals(10240, x.length()); + cacheSize(cacheMB).open()) { + assertEquals(cacheMB, s.getCacheSize()); + MVMap map = s.openMap("test"); + for (int i = 0; i < 1024; i += 128) { + for (int j = 0; j < i; j++) { + String x = map.get(j); + assertEquals(10240, x.length()); + } } + long readCount = s.getFileStore().getReadCount(); + int expected = expectedReadsForCacheSize[cacheSize]; + assertTrue("Cache " + cacheMB + "Mb, reads: " + readCount + " expected: " + expected + + " size: " + s.getFileStore().getReadBytes() + + " cache used: " + s.getCacheSizeUsed() + + " cache hits: " + s.getCache().getHits() + + " cache misses: " + s.getCache().getMisses() + + " cache requests: " + (s.getCache().getHits() + s.getCache().getMisses()) + + "", + Math.abs(100 - (100 * expected / readCount)) < 15); } - long readCount = s.getFileStore().getReadCount(); - int expected = expectedReadsForCacheSize[cacheSize]; - assertTrue("Cache "+cacheMB+"Mb, reads: " + readCount + " expected: " + expected + - " size: " + s.getFileStore().getReadBytes() + - " cache used: " + s.getCacheSizeUsed() + - " cache hits: " + s.getCache().getHits() + - " cache misses: " + s.getCache().getMisses() + - " cache requests: " + (s.getCache().getHits() + s.getCache().getMisses()) + - "", - Math.abs(100 - (100 * expected / readCount)) < 15); - s.close(); } - } private void testConcurrentOpen() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = new MVStore.Builder().fileName(fileName).open(); - try { - MVStore s1 = new MVStore.Builder().fileName(fileName).open(); - s1.close(); - fail(); - } catch (IllegalStateException e) { - // expected + try (MVStore s = new MVStore.Builder().fileName(fileName).open()) { + assertThrows(MVStoreException.class, () -> new MVStore.Builder().fileName(fileName).open().close()); + assertThrows(MVStoreException.class, + () -> new MVStore.Builder().fileName(fileName).readOnly().open().close()); + assertFalse(s.getFileStore().isReadOnly()); } - try { - MVStore s1 = new MVStore.Builder().fileName(fileName).readOnly().open(); - s1.close(); - fail(); - } catch (IllegalStateException e) { - // expected + try (MVStore s = new MVStore.Builder().fileName(fileName).readOnly().open()) { + assertTrue(s.getFileStore().isReadOnly()); } - assertFalse(s.getFileStore().isReadOnly()); - s.close(); - s = new MVStore.Builder().fileName(fileName).readOnly().open(); - assertTrue(s.getFileStore().isReadOnly()); - s.close(); } private void testFileHeader() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName); - s.setRetentionTime(Integer.MAX_VALUE); - long time = System.currentTimeMillis(); - Map m = s.getStoreHeader(); - assertEquals("1", m.get("format").toString()); - long creationTime = (Long) m.get("created"); - assertTrue(Math.abs(time - creationTime) < 100); - m.put("test", "123"); - forceWriteStoreHeader(s); - s.close(); - s = openStore(fileName); - Object test = s.getStoreHeader().get("test"); - assertNotNull(test); - assertEquals("123", test.toString()); - s.close(); + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(Integer.MAX_VALUE); + long time = System.currentTimeMillis(); + Map m = s.getStoreHeader(); + assertEquals("2", m.get("format").toString()); + long creationTime = (Long) m.get("created"); + assertTrue(Math.abs(time - creationTime) < 100); + m.put("test", "123"); + forceWriteStoreHeader(s); + } + + try (MVStore s = openStore(fileName)) { + Object test = s.getStoreHeader().get("test"); + assertNotNull(test); + assertEquals("123", test.toString()); + } } private static void forceWriteStoreHeader(MVStore s) { @@ -945,71 +887,72 @@ private static void sleep(long ms) { private void testFileHeaderCorruption() throws Exception { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = new MVStore.Builder(). - fileName(fileName).pageSplitSize(1000).autoCommitDisabled().open(); - s.setRetentionTime(0); - MVMap map; - map = s.openMap("test"); - map.put(0, new byte[100]); - for (int i = 0; i < 10; i++) { - map = s.openMap("test" + i); - map.put(0, new byte[1000]); - s.commit(); - } - FileStore fs = s.getFileStore(); - long size = fs.getFile().size(); - for (int i = 0; i < 100; i++) { - map = s.openMap("test" + i); - s.removeMap(map); - s.commit(); - s.compact(100, 1); - if (fs.getFile().size() <= size) { - break; + MVStore.Builder builder = new MVStore.Builder(). + fileName(fileName).pageSplitSize(1000).autoCommitDisabled(); + try (MVStore s = builder.open()) { + s.setRetentionTime(0); + MVMap map = s.openMap("test"); + map.put(0, new byte[100]); + for (int i = 0; i < 10; i++) { + map = s.openMap("test" + i); + map.put(0, new byte[1000]); + s.commit(); + } + FileStore fs = s.getFileStore(); + long size = fs.getFile().size(); + for (int i = 0; i < 100; i++) { + map = s.openMap("test" + i); + s.removeMap(map); + s.commit(); + s.compact(100, 1); + if (fs.getFile().size() <= size) { + break; + } } + // the last chunk is at the end + s.setReuseSpace(false); + map = s.openMap("test2"); + map.put(1, new byte[1000]); } - // the last chunk is at the end - s.setReuseSpace(false); - map = s.openMap("test2"); - map.put(1, new byte[1000]); - s.close(); + FilePath f = FilePath.get(fileName); int blockSize = 4 * 1024; // test corrupt file headers for (int i = 0; i <= blockSize; i += blockSize) { - FileChannel fc = f.open("rw"); - if (i == 0) { - // corrupt the last block (the end header) - fc.write(ByteBuffer.allocate(256), fc.size() - 256); - } - ByteBuffer buff = ByteBuffer.allocate(4 * 1024); - fc.read(buff, i); - String h = new String(buff.array(), StandardCharsets.UTF_8).trim(); - int idx = h.indexOf("fletcher:"); - int old = Character.digit(h.charAt(idx + "fletcher:".length()), 16); - int bad = (old + 1) & 15; - buff.put(idx + "fletcher:".length(), - (byte) Character.forDigit(bad, 16)); - buff.rewind(); - fc.write(buff, i); - fc.close(); + try (FileChannel fc = f.open("rw")) { + if (i == 0) { + // corrupt the last block (the end header) + fc.write(ByteBuffer.allocate(256), fc.size() - 256); + } + ByteBuffer buff = ByteBuffer.allocate(4 * 1024); + fc.read(buff, i); + String h = new String(buff.array(), StandardCharsets.UTF_8).trim(); + int idx = h.indexOf("fletcher:"); + int old = Character.digit(h.charAt(idx + "fletcher:".length()), 16); + int bad = (old + 1) & 15; + buff.put(idx + "fletcher:".length(), + (byte) Character.forDigit(bad, 16)); + + // now intentionally corrupt first or both headers + // note that headers may be overwritten upon successfull opening + for (int b = 0; b <= i; b += blockSize) { + buff.rewind(); + fc.write(buff, b); + } + } if (i == 0) { // if the first header is corrupt, the second // header should be used - s = openStore(fileName); - map = s.openMap("test"); - assertEquals(100, map.get(0).length); - map = s.openMap("test2"); - assertFalse(map.containsKey(1)); - s.close(); + try (MVStore s = openStore(fileName)) { + MVMap map = s.openMap("test"); + assertEquals(100, map.get(0).length); + map = s.openMap("test2"); + assertFalse(map.containsKey(1)); + } } else { // both headers are corrupt - try { - s = openStore(fileName); - fail(); - } catch (Exception e) { - // expected - } + assertThrows(Exception.class, () -> openStore(fileName)); } } } @@ -1070,69 +1013,87 @@ private void testIndexSkip() { assertEquals(map.size(), map.keyList().size()); } - private void testMinMaxNextKey() { - MVStore s = openStore(null); + private void testIndexSkipReverse() { + MVStore s = openStore(null, 4); MVMap map = s.openMap("test"); - map.put(10, 100); - map.put(20, 200); + for (int i = 0; i < 100; i += 2) { + map.put(i, 10 * i); + } + + Cursor c = map.cursor(50, null, true); + // skip must reset the root of the cursor + c.skip(10); + for (int i = 30; i >= 0; i -= 2) { + assertTrue(c.hasNext()); + assertEquals(i, c.next().intValue()); + } + assertFalse(c.hasNext()); + } + + private void testMinMaxNextKey() { + try (MVStore s = openStore(null)) { + MVMap map = s.openMap("test"); + map.put(10, 100); + map.put(20, 200); - assertEquals(10, map.firstKey().intValue()); - assertEquals(20, map.lastKey().intValue()); + assertEquals(10, map.firstKey().intValue()); + assertEquals(20, map.lastKey().intValue()); - assertEquals(20, map.ceilingKey(15).intValue()); - assertEquals(20, map.ceilingKey(20).intValue()); - assertEquals(10, map.floorKey(15).intValue()); - assertEquals(10, map.floorKey(10).intValue()); - assertEquals(20, map.higherKey(10).intValue()); - assertEquals(10, map.lowerKey(20).intValue()); + assertEquals(20, map.ceilingKey(15).intValue()); + assertEquals(20, map.ceilingKey(20).intValue()); + assertEquals(10, map.floorKey(15).intValue()); + assertEquals(10, map.floorKey(10).intValue()); + assertEquals(20, map.higherKey(10).intValue()); + assertEquals(10, map.lowerKey(20).intValue()); - final MVMap m = map; - assertEquals(10, m.ceilingKey(null).intValue()); - assertEquals(10, m.higherKey(null).intValue()); - assertNull(m.lowerKey(null)); - assertNull(m.floorKey(null)); + assertEquals(10, map.ceilingKey(null).intValue()); + assertEquals(10, map.higherKey(null).intValue()); + assertNull(map.lowerKey(null)); + assertNull(map.floorKey(null)); + } for (int i = 3; i < 20; i++) { - s = openStore(null, 4); - map = s.openMap("test"); - for (int j = 3; j < i; j++) { - map.put(j * 2, j * 20); - } - if (i == 3) { - assertNull(map.firstKey()); - assertNull(map.lastKey()); - } else { - assertEquals(6, map.firstKey().intValue()); - int max = (i - 1) * 2; - assertEquals(max, map.lastKey().intValue()); - - for (int j = 0; j < i * 2 + 2; j++) { - if (j > max) { - assertNull(map.ceilingKey(j)); - } else { - int ceiling = Math.max((j + 1) / 2 * 2, 6); - assertEquals(ceiling, map.ceilingKey(j).intValue()); - } + try (MVStore s = openStore(null, 4)) { + MVMap map = s.openMap("test"); + for (int j = 3; j < i; j++) { + map.put(j * 2, j * 20); + } + if (i == 3) { + assertNull(map.firstKey()); + assertNull(map.lastKey()); + } else { + assertEquals(6, map.firstKey().intValue()); + int max = (i - 1) * 2; + assertEquals(max, map.lastKey().intValue()); + + for (int j = 0; j < i * 2 + 2; j++) { + if (j > max) { + assertNull(map.ceilingKey(j)); + } else { + int ceiling = Math.max((j + 1) / 2 * 2, 6); + assertEquals(ceiling, map.ceilingKey(j).intValue()); + } - int floor = Math.min(max, Math.max(j / 2 * 2, 4)); - if (floor < 6) { - assertNull(map.floorKey(j)); - } else { - map.floorKey(j); - } + int floor = Math.min(max, Math.max(j / 2 * 2, 4)); + if (floor < 6) { + assertNull(map.floorKey(j)); + } else { + map.floorKey(j); + } - int lower = Math.min(max, Math.max((j - 1) / 2 * 2, 4)); - if (lower < 6) { - assertNull(map.lowerKey(j)); - } else { - assertEquals(lower, map.lowerKey(j).intValue()); - } + int lower = Math.min(max, Math.max((j - 1) / 2 * 2, 4)); + if (lower < 6) { + assertNull(map.lowerKey(j)); + } else { + assertEquals(lower, map.lowerKey(j).intValue()); + } - int higher = Math.max((j + 2) / 2 * 2, 6); - if (higher > max) { - assertNull(map.higherKey(j)); - } else { - assertEquals(higher, map.higherKey(j).intValue()); + int higher = Math.max((j + 2) / 2 * 2, 6); + if (higher > max) { + assertNull(map.higherKey(j)); + } else { + assertEquals(higher, map.higherKey(j).intValue()); + } } } } @@ -1142,68 +1103,71 @@ private void testMinMaxNextKey() { private void testStoreVersion() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = MVStore.open(fileName); - assertEquals(0, s.getCurrentVersion()); - assertEquals(0, s.getStoreVersion()); - s.setStoreVersion(0); - s.commit(); - s.setStoreVersion(1); - s.closeImmediately(); - s = MVStore.open(fileName); - assertEquals(1, s.getCurrentVersion()); - assertEquals(0, s.getStoreVersion()); - s.setStoreVersion(1); - s.close(); - s = MVStore.open(fileName); - assertEquals(2, s.getCurrentVersion()); - assertEquals(1, s.getStoreVersion()); - s.close(); - } + MVStore store = MVStore.open(fileName); + assertEquals(0, store.getCurrentVersion()); + assertEquals(0, store.getStoreVersion()); + store.setStoreVersion(0); + store.commit(); + store.setStoreVersion(1); + store.closeImmediately(); - private void testIterateOldVersion() { - MVStore s; - Map map; - s = new MVStore.Builder().open(); - map = s.openMap("test"); - int len = 100; - for (int i = 0; i < len; i++) { - map.put(i, 10 * i); + try (MVStore s = MVStore.open(fileName)) { + assertEquals(1, s.getCurrentVersion()); + assertEquals(0, s.getStoreVersion()); + s.setStoreVersion(1); } - Iterator it = map.keySet().iterator(); - s.commit(); - for (int i = 0; i < len; i += 2) { - map.remove(i); + + try (MVStore s = MVStore.open(fileName)) { + assertEquals(2, s.getCurrentVersion()); + assertEquals(1, s.getStoreVersion()); } - int count = 0; - while (it.hasNext()) { - it.next(); - count++; + } + + private void testIterateOldVersion() { + try (MVStore s = new MVStore.Builder().open()) { + Map map = s.openMap("test"); + int len = 100; + for (int i = 0; i < len; i++) { + map.put(i, 10 * i); + } + int count = 0; + MVStore.TxCounter txCounter = s.registerVersionUsage(); + try { + Iterator it = map.keySet().iterator(); + s.commit(); + for (int i = 0; i < len; i += 2) { + map.remove(i); + } + while (it.hasNext()) { + it.next(); + count++; + } + } finally { + s.deregisterVersionUsage(txCounter); + } + assertEquals(len, count); } - assertEquals(len, count); - s.close(); } private void testObjects() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - Map map; - s = new MVStore.Builder().fileName(fileName).open(); - map = s.openMap("test"); - map.put(1, "Hello"); - map.put("2", 200); - map.put(new Object[1], new Object[]{1, "2"}); - s.close(); + try (MVStore s = new MVStore.Builder().fileName(fileName).open()) { + Map map = s.openMap("test"); + map.put(1, "Hello"); + map.put("2", 200); + map.put(new Object[1], new Object[]{1, "2"}); + } - s = new MVStore.Builder().fileName(fileName).open(); - map = s.openMap("test"); - assertEquals("Hello", map.get(1).toString()); - assertEquals(200, ((Integer) map.get("2")).intValue()); - Object[] x = (Object[]) map.get(new Object[1]); - assertEquals(2, x.length); - assertEquals(1, ((Integer) x[0]).intValue()); - assertEquals("2", (String) x[1]); - s.close(); + try (MVStore s = new MVStore.Builder().fileName(fileName).open()) { + Map map = s.openMap("test"); + assertEquals("Hello", map.get(1).toString()); + assertEquals(200, ((Integer) map.get("2")).intValue()); + Object[] x = (Object[]) map.get(new Object[1]); + assertEquals(2, x.length); + assertEquals(1, ((Integer) x[0]).intValue()); + assertEquals("2", (String) x[1]); + } } private void testExample() { @@ -1211,22 +1175,19 @@ private void testExample() { FileUtils.delete(fileName); // open the store (in-memory if fileName is null) - MVStore s = MVStore.open(fileName); - - // create/get the map named "data" - MVMap map = s.openMap("data"); + try (MVStore s = MVStore.open(fileName)) { - // add and read some data - map.put(1, "Hello World"); - // System.out.println(map.get(1)); - - // close the store (this will persist changes) - s.close(); + // create/get the map named "data" + MVMap map = s.openMap("data"); - s = MVStore.open(fileName); - map = s.openMap("data"); - assertEquals("Hello World", map.get(1)); - s.close(); + // add and read some data + map.put(1, "Hello World"); + // System.out.println(map.get(1)); + } + try (MVStore s = MVStore.open(fileName)) { + MVMap map = s.openMap("data"); + assertEquals("Hello World", map.get(1)); + } } private void testExampleMvcc() { @@ -1234,45 +1195,43 @@ private void testExampleMvcc() { FileUtils.delete(fileName); // open the store (in-memory if fileName is null) - MVStore s = MVStore.open(fileName); + try (MVStore s = MVStore.open(fileName)) { - // create/get the map named "data" - MVMap map = s.openMap("data"); + // create/get the map named "data" + MVMap map = s.openMap("data"); - // add some data - map.put(1, "Hello"); - map.put(2, "World"); + // add some data + map.put(1, "Hello"); + map.put(2, "World"); - // get the current version, for later use - long oldVersion = s.getCurrentVersion(); + // get the current version, for later use + long oldVersion = s.getCurrentVersion(); - // from now on, the old version is read-only - s.commit(); + // from now on, the old version is read-only + s.commit(); - // more changes, in the new version - // changes can be rolled back if required - // changes always go into "head" (the newest version) - map.put(1, "Hi"); - map.remove(2); - - // access the old data (before the commit) - MVMap oldMap = - map.openVersion(oldVersion); - - // print the old version (can be done - // concurrently with further modifications) - // this will print "Hello" and "World": - // System.out.println(oldMap.get(1)); - assertEquals("Hello", oldMap.get(1)); - // System.out.println(oldMap.get(2)); - assertEquals("World", oldMap.get(2)); - - // print the newest version ("Hi") - // System.out.println(map.get(1)); - assertEquals("Hi", map.get(1)); - - // close the store - s.close(); + // more changes, in the new version + // changes can be rolled back if required + // changes always go into "head" (the newest version) + map.put(1, "Hi"); + map.remove(2); + + // access the old data (before the commit) + MVMap oldMap = + map.openVersion(oldVersion); + + // print the old version (can be done + // concurrently with further modifications) + // this will print "Hello" and "World": + // System.out.println(oldMap.get(1)); + assertEquals("Hello", oldMap.get(1)); + // System.out.println(oldMap.get(2)); + assertEquals("World", oldMap.get(2)); + + // print the newest version ("Hi") + // System.out.println(map.get(1)); + assertEquals("Hi", map.get(1)); + } } private void testOpenStoreCloseLoop() { @@ -1281,14 +1240,14 @@ private void testOpenStoreCloseLoop() { for (int k = 0; k < 1; k++) { // long t = System.nanoTime(); for (int j = 0; j < 3; j++) { - MVStore s = openStore(fileName); - Map m = s.openMap("data"); - for (int i = 0; i < 3; i++) { - Integer x = m.get("value"); - m.put("value", x == null ? 0 : x + 1); - s.commit(); + try (MVStore s = openStore(fileName)) { + Map m = s.openMap("data"); + for (int i = 0; i < 3; i++) { + Integer x = m.get("value"); + m.put("value", x == null ? 0 : x + 1); + s.commit(); + } } - s.close(); } // System.out.println("open/close: " + // TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - t)); @@ -1297,26 +1256,25 @@ private void testOpenStoreCloseLoop() { } private void testOldVersion() { - MVStore s; for (int op = 0; op <= 1; op++) { for (int i = 0; i < 5; i++) { - s = openStore(null); - s.setVersionsToKeep(Integer.MAX_VALUE); - MVMap m; - m = s.openMap("data"); - for (int j = 0; j < 5; j++) { - if (op == 1) { - m.put("1", "" + s.getCurrentVersion()); + try (MVStore s = openStore(null)) { + s.setVersionsToKeep(Integer.MAX_VALUE); + MVMap m; + m = s.openMap("data"); + for (int j = 0; j < 5; j++) { + if (op == 1) { + m.put("1", "" + s.getCurrentVersion()); + } + s.commit(); } - s.commit(); - } - for (int j = 0; j < s.getCurrentVersion(); j++) { - MVMap old = m.openVersion(j); - if (op == 1) { - assertEquals("" + j, old.get("1")); + for (int j = 0; j < s.getCurrentVersion(); j++) { + MVMap old = m.openVersion(j); + if (op == 1) { + assertEquals("" + j, old.get("1")); + } } } - s.close(); } } } @@ -1324,105 +1282,88 @@ private void testOldVersion() { private void testVersion() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - s = openStore(fileName); - s.setVersionsToKeep(100); - s.setAutoCommitDelay(0); - s.setRetentionTime(Integer.MAX_VALUE); - MVMap m = s.openMap("data"); - s.commit(); - long first = s.getCurrentVersion(); - m.put("0", "test"); - s.commit(); - m.put("1", "Hello"); - m.put("2", "World"); - for (int i = 10; i < 20; i++) { - m.put("" + i, "data"); - } - long old = s.getCurrentVersion(); - s.commit(); - m.put("1", "Hallo"); - m.put("2", "Welt"); - MVMap mFirst; - mFirst = m.openVersion(first); - assertEquals(0, mFirst.size()); - MVMap mOld; - assertEquals("Hallo", m.get("1")); - assertEquals("Welt", m.get("2")); - mOld = m.openVersion(old); - assertEquals("Hello", mOld.get("1")); - assertEquals("World", mOld.get("2")); - assertTrue(mOld.isReadOnly()); - long old3 = s.getCurrentVersion(); - assertEquals(3, old3); - s.commit(); + try (MVStore s = openStore(fileName)) { + s.setVersionsToKeep(100); + s.setAutoCommitDelay(0); + s.setRetentionTime(Integer.MAX_VALUE); + MVMap m = s.openMap("data"); + s.commit(); + long first = s.getCurrentVersion(); + assertEquals(1, first); + m.put("0", "test"); + s.commit(); + m.put("1", "Hello"); + m.put("2", "World"); + for (int i = 10; i < 20; i++) { + m.put("" + i, "data"); + } + long old = s.getCurrentVersion(); + s.commit(); + m.put("1", "Hallo"); + m.put("2", "Welt"); + MVMap mFirst; + mFirst = m.openVersion(first); + // openVersion() should restore map at last known state of the version specified + // not at the first known state, as it was before + assertEquals(1, mFirst.size()); + MVMap mOld; + assertEquals("Hallo", m.get("1")); + assertEquals("Welt", m.get("2")); + mOld = m.openVersion(old); + assertEquals("Hello", mOld.get("1")); + assertEquals("World", mOld.get("2")); + assertTrue(mOld.isReadOnly()); + long old3 = s.getCurrentVersion(); + assertEquals(3, old3); + s.commit(); - // the old version is still available - assertEquals("Hello", mOld.get("1")); - assertEquals("World", mOld.get("2")); + // the old version is still available + assertEquals("Hello", mOld.get("1")); + assertEquals("World", mOld.get("2")); - mOld = m.openVersion(old3); - assertEquals("Hallo", mOld.get("1")); - assertEquals("Welt", mOld.get("2")); + mOld = m.openVersion(old3); + assertEquals("Hallo", mOld.get("1")); + assertEquals("Welt", mOld.get("2")); - m.put("1", "Hi"); - assertEquals("Welt", m.remove("2")); - s.close(); - - s = openStore(fileName); - m = s.openMap("data"); - assertEquals("Hi", m.get("1")); - assertEquals(null, m.get("2")); - - // This test tries to cast in bronze some peculiar behaviour, - // which is rather implementation artifact then intentional. - // Once store is closed, only one single version of the data - // will exists upon re-opening - the latest. - // I hope nobody relies on this "multi-versioning". -/* - mOld = m.openVersion(old3); - assertEquals("Hallo", mOld.get("1")); - assertEquals("Welt", mOld.get("2")); -*/ + m.put("1", "Hi"); + assertEquals("Welt", m.remove("2")); + } - try { - m.openVersion(-3); - fail(); - } catch (IllegalArgumentException e) { - // expected + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + assertEquals("Hi", m.get("1")); + assertEquals(null, m.get("2")); + assertThrows(IllegalArgumentException.class, () -> m.openVersion(-3)); } - s.close(); } private void testTruncateFile() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - MVMap m; - s = openStore(fileName); - m = s.openMap("data"); - String data = new String(new char[10000]).replace((char) 0, 'x'); - for (int i = 1; i < 10; i++) { - m.put(i, data); - s.commit(); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + String data = new String(new char[10000]).replace((char) 0, 'x'); + for (int i = 1; i < 10; i++) { + m.put(i, data); + s.commit(); + } } - s.close(); long len = FileUtils.size(fileName); - s = openStore(fileName); - s.setRetentionTime(0); - // remove 75% - m = s.openMap("data"); - for (int i = 0; i < 10; i++) { - if (i % 4 != 0) { - sleep(2); - m.remove(i); - s.commit(); + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(0); + // remove 75% + MVMap m = s.openMap("data"); + for (int i = 0; i < 10; i++) { + if (i % 4 != 0) { + sleep(2); + m.remove(i); + s.commit(); + } } + assertTrue(s.compact(100, 50 * 1024)); + // compaction alone will not guarantee file size reduction + s.compactMoveChunks(); } - assertTrue(s.compact(100, 50 * 1024)); - // compaction alone will not guarantee file size reduction - s.compactMoveChunks(); - s.close(); long len2 = FileUtils.size(fileName); assertTrue("len2: " + len2 + " len: " + len, len2 < len); } @@ -1430,236 +1371,236 @@ private void testTruncateFile() { private void testFastDelete() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - MVMap m; - s = openStore(fileName, 700); - m = s.openMap("data"); - for (int i = 0; i < 1000; i++) { - m.put(i, "Hello World"); - assertEquals(i + 1, m.size()); + try (MVStore s = openStore(fileName, 700)) { + MVMap m = s.openMap("data"); + for (int i = 0; i < 1000; i++) { + m.put(i, "Hello World"); + assertEquals(i + 1, m.size()); + } + assertEquals(1000, m.size()); + // memory calculations were adjusted, so as this out-of-the-thin-air number + assertEquals(93832, s.getUnsavedMemory()); + s.commit(); + assertEquals(2, s.getFileStore().getWriteCount()); } - assertEquals(1000, m.size()); - // memory calculations were adjusted, so as this out-of-the-thin-air number - assertEquals(93522, s.getUnsavedMemory()); - s.commit(); - assertEquals(2, s.getFileStore().getWriteCount()); - s.close(); - s = openStore(fileName); - m = s.openMap("data"); - m.clear(); - assertEquals(0, m.size()); - s.commit(); - // ensure only nodes are read, but not leaves - assertEquals(8, s.getFileStore().getReadCount()); - assertTrue(s.getFileStore().getWriteCount() < 5); - s.close(); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + m.clear(); + assertEquals(0, m.size()); + s.commit(); + // ensure only nodes are read, but not leaves + assertEquals(7, s.getFileStore().getReadCount()); + assertTrue(s.getFileStore().getWriteCount() < 5); + } } private void testRollback() { - MVStore s = MVStore.open(null); - MVMap m = s.openMap("m"); - m.put(1, -1); - s.commit(); - for (int i = 0; i < 10; i++) { - m.put(1, i); - s.rollback(); - assertEquals(i - 1, m.get(1).intValue()); - m.put(1, i); + try (MVStore s = MVStore.open(null)) { + MVMap m = s.openMap("m"); + m.put(1, -1); s.commit(); + for (int i = 0; i < 10; i++) { + m.put(1, i); + s.rollback(); + assertEquals(i - 1, m.get(1).intValue()); + m.put(1, i); + s.commit(); + } } } private void testRollbackStored() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVMap meta; - MVStore s = openStore(fileName); - assertEquals(45000, s.getRetentionTime()); - s.setRetentionTime(0); - assertEquals(0, s.getRetentionTime()); - s.setRetentionTime(45000); - assertEquals(45000, s.getRetentionTime()); - assertEquals(0, s.getCurrentVersion()); - assertFalse(s.hasUnsavedChanges()); - MVMap m = s.openMap("data"); - assertTrue(s.hasUnsavedChanges()); - MVMap m0 = s.openMap("data0"); - m.put("1", "Hello"); - assertEquals(1, s.commit()); - s.rollbackTo(1); - assertEquals(1, s.getCurrentVersion()); - assertEquals("Hello", m.get("1")); - // so a new version is created - m.put("1", "Hello"); - - long v2 = s.commit(); - assertEquals(2, v2); - assertEquals(2, s.getCurrentVersion()); - assertFalse(s.hasUnsavedChanges()); - assertEquals("Hello", m.get("1")); - s.close(); - - s = openStore(fileName); - s.setRetentionTime(45000); - assertEquals(2, s.getCurrentVersion()); - meta = s.getMetaMap(); - m = s.openMap("data"); - assertFalse(s.hasUnsavedChanges()); - assertEquals("Hello", m.get("1")); - m0 = s.openMap("data0"); - MVMap m1 = s.openMap("data1"); - m.put("1", "Hallo"); - m0.put("1", "Hallo"); - m1.put("1", "Hallo"); - assertEquals("Hallo", m.get("1")); - assertEquals("Hallo", m1.get("1")); - assertTrue(s.hasUnsavedChanges()); - s.rollbackTo(v2); - assertFalse(s.hasUnsavedChanges()); - assertNull(meta.get("name.data1")); - assertNull(m0.get("1")); - assertEquals("Hello", m.get("1")); - // no changes - no real commit here - assertEquals(2, s.commit()); - s.close(); - - s = openStore(fileName); - s.setRetentionTime(45000); - assertEquals(2, s.getCurrentVersion()); - meta = s.getMetaMap(); - assertNotNull(meta.get("name.data")); - assertNotNull(meta.get("name.data0")); - assertNull(meta.get("name.data1")); - m = s.openMap("data"); - m0 = s.openMap("data0"); - assertNull(m0.get("1")); - assertEquals("Hello", m.get("1")); - assertFalse(m0.isReadOnly()); - m.put("1", "Hallo"); - s.commit(); - long v3 = s.getCurrentVersion(); - assertEquals(3, v3); - s.close(); + long v2; + try (MVStore s = openStore(fileName)) { + assertEquals(45000, s.getRetentionTime()); + s.setRetentionTime(0); + assertEquals(0, s.getRetentionTime()); + s.setRetentionTime(45000); + assertEquals(45000, s.getRetentionTime()); + assertEquals(0, s.getCurrentVersion()); + assertFalse(s.hasUnsavedChanges()); + MVMap m = s.openMap("data"); + assertTrue(s.hasUnsavedChanges()); + MVMap m0 = s.openMap("data0"); + m.put("1", "Hello"); + assertEquals(1, s.commit()); + s.rollbackTo(1); + assertEquals(1, s.getCurrentVersion()); + assertEquals("Hello", m.get("1")); + // so a new version is created + m.put("1", "Hello"); + + v2 = s.commit(); + assertEquals(2, v2); + assertEquals(2, s.getCurrentVersion()); + assertFalse(s.hasUnsavedChanges()); + assertEquals("Hello", m.get("1")); + } + + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(45000); + assertEquals(2, s.getCurrentVersion()); + MVMap meta = s.getMetaMap(); + MVMap m = s.openMap("data"); + assertFalse(s.hasUnsavedChanges()); + assertEquals("Hello", m.get("1")); + MVMap m0 = s.openMap("data0"); + MVMap m1 = s.openMap("data1"); + m.put("1", "Hallo"); + m0.put("1", "Hallo"); + m1.put("1", "Hallo"); + assertEquals("Hallo", m.get("1")); + assertEquals("Hallo", m1.get("1")); + assertTrue(s.hasUnsavedChanges()); + s.rollbackTo(v2); + assertFalse(s.hasUnsavedChanges()); + assertNull(meta.get(DataUtils.META_NAME + "data1")); + assertNull(m0.get("1")); + assertEquals("Hello", m.get("1")); + // no changes - no real commit here + assertEquals(2, s.commit()); + } + + long v3; + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(45000); + assertEquals(2, s.getCurrentVersion()); + MVMap meta = s.getMetaMap(); + assertNotNull(meta.get(DataUtils.META_NAME + "data")); + assertNotNull(meta.get(DataUtils.META_NAME + "data0")); + assertNull(meta.get(DataUtils.META_NAME + "data1")); + MVMap m = s.openMap("data"); + MVMap m0 = s.openMap("data0"); + assertNull(m0.get("1")); + assertEquals("Hello", m.get("1")); + assertFalse(m0.isReadOnly()); + m.put("1", "Hallo"); + s.commit(); + v3 = s.getCurrentVersion(); + assertEquals(3, v3); + } - s = openStore(fileName); - s.setRetentionTime(45000); - assertEquals(3, s.getCurrentVersion()); - m = s.openMap("data"); - m.put("1", "Hi"); - s.close(); + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(45000); + assertEquals(3, s.getCurrentVersion()); + MVMap m = s.openMap("data"); + m.put("1", "Hi"); + } - s = openStore(fileName); - s.setRetentionTime(45000); - m = s.openMap("data"); - assertEquals("Hi", m.get("1")); - s.rollbackTo(v3); - assertEquals("Hallo", m.get("1")); - s.close(); + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(45000); + MVMap m = s.openMap("data"); + assertEquals("Hi", m.get("1")); + s.rollbackTo(v3); + assertEquals("Hallo", m.get("1")); + } - s = openStore(fileName); - s.setRetentionTime(45000); - m = s.openMap("data"); - assertEquals("Hallo", m.get("1")); - s.close(); + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(45000); + MVMap m = s.openMap("data"); + assertEquals("Hallo", m.get("1")); + } } private void testRollbackInMemory() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName, 5); - s.setAutoCommitDelay(0); - assertEquals(0, s.getCurrentVersion()); - MVMap m = s.openMap("data"); - s.rollbackTo(0); - assertTrue(m.isClosed()); - assertEquals(0, s.getCurrentVersion()); - m = s.openMap("data"); + try (MVStore s = openStore(fileName, 5)) { + s.setAutoCommitDelay(0); + assertEquals(0, s.getCurrentVersion()); + MVMap m = s.openMap("data"); + s.rollbackTo(0); + assertTrue(m.isClosed()); + assertEquals(0, s.getCurrentVersion()); + m = s.openMap("data"); - MVMap m0 = s.openMap("data0"); - MVMap m2 = s.openMap("data2"); - m.put("1", "Hello"); - for (int i = 0; i < 10; i++) { - m2.put("" + i, "Test"); - } - long v1 = s.commit(); - assertEquals(1, v1); - assertEquals(1, s.getCurrentVersion()); - MVMap m1 = s.openMap("data1"); - assertEquals("Test", m2.get("1")); - m.put("1", "Hallo"); - m0.put("1", "Hallo"); - m1.put("1", "Hallo"); - m2.clear(); - assertEquals("Hallo", m.get("1")); - assertEquals("Hallo", m1.get("1")); - s.rollbackTo(v1); - assertEquals(1, s.getCurrentVersion()); - for (int i = 0; i < 10; i++) { - assertEquals("Test", m2.get("" + i)); - } - assertEquals("Hello", m.get("1")); - assertNull(m0.get("1")); - assertTrue(m1.isClosed()); - assertFalse(m0.isReadOnly()); - s.close(); + MVMap m0 = s.openMap("data0"); + MVMap m2 = s.openMap("data2"); + m.put("1", "Hello"); + for (int i = 0; i < 10; i++) { + m2.put("" + i, "Test"); + } + long v1 = s.commit(); + assertEquals(1, v1); + assertEquals(1, s.getCurrentVersion()); + MVMap m1 = s.openMap("data1"); + assertEquals("Test", m2.get("1")); + m.put("1", "Hallo"); + m0.put("1", "Hallo"); + m1.put("1", "Hallo"); + m2.clear(); + assertEquals("Hallo", m.get("1")); + assertEquals("Hallo", m1.get("1")); + s.rollbackTo(v1); + assertEquals(1, s.getCurrentVersion()); + for (int i = 0; i < 10; i++) { + assertEquals("Test", m2.get("" + i)); + } + assertEquals("Hello", m.get("1")); + assertNull(m0.get("1")); + assertTrue(m1.isClosed()); + assertFalse(m0.isReadOnly()); + } } private void testMeta() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName); - s.setRetentionTime(Integer.MAX_VALUE); - MVMap m = s.getMetaMap(); - assertEquals("[]", s.getMapNames().toString()); - MVMap data = s.openMap("data"); - data.put("1", "Hello"); - data.put("2", "World"); - s.commit(); - assertEquals(1, s.getCurrentVersion()); - - assertEquals("[data]", s.getMapNames().toString()); - assertEquals("data", s.getMapName(data.getId())); - assertNull(s.getMapName(s.getMetaMap().getId())); - assertNull(s.getMapName(data.getId() + 1)); + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(Integer.MAX_VALUE); + MVMap m = s.getMetaMap(); + assertEquals("[]", s.getMapNames().toString()); + MVMap data = s.openMap("data"); + data.put("1", "Hello"); + data.put("2", "World"); + s.commit(); + assertEquals(1, s.getCurrentVersion()); - String id = s.getMetaMap().get("name.data"); - assertEquals("name:data", m.get("map." + id)); - assertEquals("Hello", data.put("1", "Hallo")); - s.commit(); - assertEquals("name:data", m.get("map." + id)); - assertTrue(m.get("root.1").length() > 0); - assertTrue(m.containsKey("chunk.1")); + assertEquals("[data]", s.getMapNames().toString()); + assertEquals("data", s.getMapName(data.getId())); + assertNull(s.getMapName(s.getMetaMap().getId())); + assertNull(s.getMapName(data.getId() + 1)); - assertEquals(2, s.getCurrentVersion()); + String id = s.getMetaMap().get(DataUtils.META_NAME + "data"); + assertEquals("name:data", m.get(DataUtils.META_MAP + id)); + assertEquals("Hello", data.put("1", "Hallo")); + s.commit(); + assertEquals("name:data", m.get(DataUtils.META_MAP + id)); + m = s.getLayoutMap(); + assertTrue(m.get(DataUtils.META_ROOT + id).length() > 0); + assertTrue(m.containsKey(DataUtils.META_CHUNK + "1")); - s.rollbackTo(1); - assertEquals("Hello", data.get("1")); - assertEquals("World", data.get("2")); + assertEquals(2, s.getCurrentVersion()); - s.close(); + s.rollbackTo(1); + assertEquals("Hello", data.get("1")); + assertEquals("World", data.get("2")); + } } private void testInMemory() { for (int j = 0; j < 1; j++) { - MVStore s = openStore(null); - // s.setMaxPageSize(10); - int len = 100; - // TreeMap m = new TreeMap(); - // HashMap m = New.hashMap(); - MVMap m = s.openMap("data"); - for (int i = 0; i < len; i++) { - assertNull(m.put(i, "Hello World")); - } - for (int i = 0; i < len; i++) { - assertEquals("Hello World", m.get(i)); - } - for (int i = 0; i < len; i++) { - assertEquals("Hello World", m.remove(i)); + try (MVStore s = openStore(null)) { + // s.setMaxPageSize(10); + int len = 100; + // TreeMap m = new TreeMap(); + // HashMap m = New.hashMap(); + MVMap m = s.openMap("data"); + for (int i = 0; i < len; i++) { + assertNull(m.put(i, "Hello World")); + } + for (int i = 0; i < len; i++) { + assertEquals("Hello World", m.get(i)); + } + for (int i = 0; i < len; i++) { + assertEquals("Hello World", m.remove(i)); + } + assertEquals(null, m.get(0)); + assertEquals(0, m.size()); } - assertEquals(null, m.get(0)); - assertEquals(0, m.size()); - s.close(); } } @@ -1669,29 +1610,29 @@ private void testLargeImport() { int len = 1000; for (int j = 0; j < 5; j++) { FileUtils.delete(fileName); - MVStore s = openStore(fileName, 40); - MVMap m = s.openMap("data", - new MVMap.Builder() - .valueType(new RowDataType(new DataType[] { - new ObjectDataType(), - StringDataType.INSTANCE, - StringDataType.INSTANCE }))); - - // Profiler prof = new Profiler(); - // prof.startCollecting(); - // long t = System.nanoTime(); - for (int i = 0; i < len;) { - Object[] o = new Object[3]; - o[0] = i; - o[1] = "Hello World"; - o[2] = "World"; - m.put(i, o); - i++; - if (i % 10000 == 0) { - s.commit(); + try (MVStore s = openStore(fileName, 40)) { + MVMap m = s.openMap("data", + new MVMap.Builder() + .valueType(new RowDataType(new DataType[]{ + new ObjectDataType(), + StringDataType.INSTANCE, + StringDataType.INSTANCE}))); + + // Profiler prof = new Profiler(); + // prof.startCollecting(); + // long t = System.nanoTime(); + for (int i = 0; i < len; ) { + Object[] o = new Object[3]; + o[0] = i; + o[1] = "Hello World"; + o[2] = "World"; + m.put(i, o); + i++; + if (i % 10000 == 0) { + s.commit(); + } } } - s.close(); // System.out.println(prof.getTop(5)); // System.out.println("store time " + // TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - t)); @@ -1703,100 +1644,98 @@ private void testLargeImport() { private void testBtreeStore() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName); - s.close(); + MVStore store = openStore(fileName); + store.close(); - s = openStore(fileName); - MVMap m = s.openMap("data"); int count = 2000; - for (int i = 0; i < count; i++) { - assertNull(m.put(i, "hello " + i)); - assertEquals("hello " + i, m.get(i)); - } - s.commit(); - assertEquals("hello 0", m.remove(0)); - assertNull(m.get(0)); - for (int i = 1; i < count; i++) { - assertEquals("hello " + i, m.get(i)); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + for (int i = 0; i < count; i++) { + assertNull(m.put(i, "hello " + i)); + assertEquals("hello " + i, m.get(i)); + } + s.commit(); + assertEquals("hello 0", m.remove(0)); + assertNull(m.get(0)); + for (int i = 1; i < count; i++) { + assertEquals("hello " + i, m.get(i)); + } } - s.close(); - s = openStore(fileName); - m = s.openMap("data"); - assertNull(m.get(0)); - for (int i = 1; i < count; i++) { - assertEquals("hello " + i, m.get(i)); - } - for (int i = 1; i < count; i++) { - m.remove(i); - } - s.commit(); - assertNull(m.get(0)); - for (int i = 0; i < count; i++) { - assertNull(m.get(i)); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + assertNull(m.get(0)); + for (int i = 1; i < count; i++) { + assertEquals("hello " + i, m.get(i)); + } + for (int i = 1; i < count; i++) { + m.remove(i); + } + s.commit(); + assertNull(m.get(0)); + for (int i = 0; i < count; i++) { + assertNull(m.get(i)); + } } - s.close(); } private void testCompactMapNotOpen() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName, 1000); - MVMap m = s.openMap("data"); int factor = 100; - for (int j = 0; j < 10; j++) { - for (int i = j * factor; i < 10 * factor; i++) { - m.put(i, "Hello" + j); + try (MVStore s = openStore(fileName, 1000)) { + s.setAutoCommitDelay(0); + MVMap m = s.openMap("data"); + for (int j = 0; j < 10; j++) { + for (int i = j * factor; i < 10 * factor; i++) { + m.put(i, "Hello" + j); + } + s.commit(); } - s.commit(); } - s.close(); - s = openStore(fileName); - s.setRetentionTime(0); + try (MVStore s = openStore(fileName)) { + s.setAutoCommitDelay(0); + s.setRetentionTime(0); - Map meta = s.getMetaMap(); - int chunkCount1 = 0; - for (String k : meta.keySet()) { - if (k.startsWith("chunk.")) { - chunkCount1++; - } - } - s.compact(80, 1); - s.compact(80, 1); + Map layout = s.getLayoutMap(); + int chunkCount1 = getChunkCount(layout); + s.compact(80, 1); + s.compact(80, 1); - int chunkCount2 = 0; - for (String k : meta.keySet()) { - if (k.startsWith("chunk.")) { - chunkCount2++; - } - } - assertTrue(chunkCount2 >= chunkCount1); + int chunkCount2 = getChunkCount(layout); + assertTrue(chunkCount2 >= chunkCount1); - m = s.openMap("data"); - for (int i = 0; i < 10; i++) { - sleep(1); - boolean result = s.compact(50, 50 * 1024); - if (!result) { - break; + MVMap m = s.openMap("data"); + for (int i = 0; i < 10; i++) { + sleep(1); + boolean result = s.compact(50, 50 * 1024); + s.commit(); + if (!result) { + break; + } } - } - assertFalse(s.compact(50, 1024)); + assertFalse(s.compact(50, 1024)); + + int chunkCount3 = getChunkCount(layout); + + assertTrue(chunkCount1 + ">" + chunkCount2 + ">" + chunkCount3, + chunkCount3 < chunkCount1); - int chunkCount3 = 0; - for (String k : meta.keySet()) { - if (k.startsWith("chunk.")) { - chunkCount3++; + for (int i = 0; i < 10 * factor; i++) { + assertEquals("x" + i, "Hello" + (i / factor), m.get(i)); } } + } - assertTrue(chunkCount1 + ">" + chunkCount2 + ">" + chunkCount3, - chunkCount3 < chunkCount1); - - for (int i = 0; i < 10 * factor; i++) { - assertEquals("x" + i, "Hello" + (i / factor), m.get(i)); + private static int getChunkCount(Map layout) { + int chunkCount = 0; + for (String k : layout.keySet()) { + if (k.startsWith(DataUtils.META_CHUNK)) { + chunkCount++; + } } - s.close(); + return chunkCount; } private void testCompact() { @@ -1805,14 +1744,20 @@ private void testCompact() { long initialLength = 0; for (int j = 0; j < 20; j++) { sleep(2); - MVStore s = openStore(fileName); - s.setRetentionTime(0); - MVMap m = s.openMap("data"); - for (int i = 0; i < 100; i++) { - m.put(j + i, "Hello " + j); + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(0); + s.setVersionsToKeep(0); + MVMap m = s.openMap("data"); + for (int i = 0; i < 100; i++) { + m.put(j + i, "Hello " + j); + } + trace("Before - fill rate: " + s.getFillRate() + "%, chunks fill rate: " + + s.getChunksFillRate() + ", len: " + FileUtils.size(fileName)); + s.compact(80, 2048); + s.compactMoveChunks(); + trace("After - fill rate: " + s.getFillRate() + "%, chunks fill rate: " + + s.getChunksFillRate() + ", len: " + FileUtils.size(fileName)); } - s.compact(80, 1024); - s.close(); long len = FileUtils.size(fileName); // System.out.println(" len:" + len); if (initialLength == 0) { @@ -1824,19 +1769,20 @@ private void testCompact() { } // long len = FileUtils.size(fileName); // System.out.println("len0: " + len); - MVStore s = openStore(fileName); - MVMap m = s.openMap("data"); - for (int i = 0; i < 100; i++) { - m.remove(i); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + for (int i = 0; i < 100; i++) { + m.remove(i); + } + s.compact(80, 1024); } - s.compact(80, 1024); - s.close(); + // len = FileUtils.size(fileName); // System.out.println("len1: " + len); - s = openStore(fileName); - m = s.openMap("data"); - s.compact(80, 1024); - s.close(); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + s.compact(80, 1024); + } // len = FileUtils.size(fileName); // System.out.println("len2: " + len); } @@ -1847,24 +1793,25 @@ private void testReuseSpace() { long initialLength = 0; for (int j = 0; j < 20; j++) { sleep(2); - MVStore s = openStore(fileName); - s.setRetentionTime(0); - MVMap m = s.openMap("data"); - for (int i = 0; i < 10; i++) { - m.put(i, "Hello"); - } - s.commit(); - for (int i = 0; i < 10; i++) { - assertEquals("Hello", m.get(i)); - assertEquals("Hello", m.remove(i)); + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(0); + s.setVersionsToKeep(0); + MVMap m = s.openMap("data"); + for (int i = 0; i < 10; i++) { + m.put(i, "Hello"); + } + s.commit(); + for (int i = 0; i < 10; i++) { + assertEquals("Hello", m.get(i)); + assertEquals("Hello", m.remove(i)); + } } - s.close(); long len = FileUtils.size(fileName); if (initialLength == 0) { initialLength = len; } else { assertTrue("len: " + len + " initial: " + initialLength + " j: " + j, - len <= initialLength * 5); + len <= initialLength * 3); } } } @@ -1872,122 +1819,155 @@ private void testReuseSpace() { private void testRandom() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName); - MVMap m = s.openMap("data"); - TreeMap map = new TreeMap<>(); - Random r = new Random(1); - int operationCount = 1000; - int maxValue = 30; - Integer expected, got; - for (int i = 0; i < operationCount; i++) { - int k = r.nextInt(maxValue); - int v = r.nextInt(); - boolean compareAll; - switch (r.nextInt(3)) { - case 0: - log(i + ": put " + k + " = " + v); - expected = map.put(k, v); - got = m.put(k, v); - if (expected == null) { - assertNull(got); - } else { - assertEquals(expected, got); - } - compareAll = true; - break; - case 1: - log(i + ": remove " + k); - expected = map.remove(k); - got = m.remove(k); - if (expected == null) { - assertNull(got); - } else { - assertEquals(expected, got); - } - compareAll = true; - break; - default: - Integer a = map.get(k); - Integer b = m.get(k); - if (a == null || b == null) { - assertTrue(a == b); - } else { - assertEquals(a.intValue(), b.intValue()); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + TreeMap map = new TreeMap<>(); + Random r = new Random(1); + int operationCount = 1000; + int maxValue = 30; + Integer expected, got; + for (int i = 0; i < operationCount; i++) { + int k = r.nextInt(maxValue); + int v = r.nextInt(); + boolean compareAll; + switch (r.nextInt(3)) { + case 0: + log(i + ": put " + k + " = " + v); + expected = map.put(k, v); + got = m.put(k, v); + if (expected == null) { + assertNull(got); + } else { + assertEquals(expected, got); + } + compareAll = true; + break; + case 1: + log(i + ": remove " + k); + expected = map.remove(k); + got = m.remove(k); + if (expected == null) { + assertNull(got); + } else { + assertEquals(expected, got); + } + compareAll = true; + break; + default: + Integer a = map.get(k); + Integer b = m.get(k); + if (a == null || b == null) { + assertTrue(a == b); + } else { + assertEquals(a.intValue(), b.intValue()); + } + compareAll = false; + break; } - compareAll = false; - break; - } - if (compareAll) { - Iterator it = m.keyIterator(null); - Iterator itExpected = map.keySet().iterator(); - while (itExpected.hasNext()) { - assertTrue(it.hasNext()); - expected = itExpected.next(); - got = it.next(); - assertEquals(expected, got); + if (compareAll) { + Iterator it = m.keyIterator(null); + for (Integer integer : map.keySet()) { + assertTrue(it.hasNext()); + expected = integer; + got = it.next(); + assertEquals(expected, got); + } + assertFalse(it.hasNext()); } - assertFalse(it.hasNext()); } } - s.close(); } private void testKeyValueClasses() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName); - MVMap is = s.openMap("intString"); - is.put(1, "Hello"); - MVMap ii = s.openMap("intInt"); - ii.put(1, 10); - MVMap si = s.openMap("stringInt"); - si.put("Test", 10); - MVMap ss = s.openMap("stringString"); - ss.put("Hello", "World"); - s.close(); - s = openStore(fileName); - is = s.openMap("intString"); - assertEquals("Hello", is.get(1)); - ii = s.openMap("intInt"); - assertEquals(10, ii.get(1).intValue()); - si = s.openMap("stringInt"); - assertEquals(10, si.get("Test").intValue()); - ss = s.openMap("stringString"); - assertEquals("World", ss.get("Hello")); - s.close(); + try (MVStore s = openStore(fileName)) { + MVMap is = s.openMap("intString"); + is.put(1, "Hello"); + MVMap ii = s.openMap("intInt"); + ii.put(1, 10); + MVMap si = s.openMap("stringInt"); + si.put("Test", 10); + MVMap ss = s.openMap("stringString"); + ss.put("Hello", "World"); + } + + try (MVStore s = openStore(fileName)) { + MVMap is = s.openMap("intString"); + assertEquals("Hello", is.get(1)); + MVMap ii = s.openMap("intInt"); + assertEquals(10, ii.get(1).intValue()); + MVMap si = s.openMap("stringInt"); + assertEquals(10, si.get("Test").intValue()); + MVMap ss = s.openMap("stringString"); + assertEquals("World", ss.get("Hello")); + } } private void testIterate() { + int size = config.big ? 1000 : 10; String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName); - MVMap m = s.openMap("data"); - Iterator it = m.keyIterator(null); - assertFalse(it.hasNext()); - for (int i = 0; i < 10; i++) { - m.put(i, "hello " + i); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + Iterator it = m.keyIterator(null); + assertFalse(it.hasNext()); + for (int i = 0; i < size; i++) { + m.put(i, "hello " + i); + } + s.commit(); + it = m.keyIterator(null); + it.next(); + assertThrows(UnsupportedOperationException.class, it).remove(); + + it = m.keyIterator(null); + for (int i = 0; i < size; i++) { + assertTrue(it.hasNext()); + assertEquals(i, it.next().intValue()); + } + assertFalse(it.hasNext()); + assertThrows(NoSuchElementException.class, it).next(); + for (int j = 0; j < size; j++) { + it = m.keyIterator(j); + for (int i = j; i < size; i++) { + assertTrue(it.hasNext()); + assertEquals(i, it.next().intValue()); + } + assertFalse(it.hasNext()); + } } - s.commit(); - it = m.keyIterator(null); - it.next(); - assertThrows(UnsupportedOperationException.class, it).remove(); - - it = m.keyIterator(null); - for (int i = 0; i < 10; i++) { - assertTrue(it.hasNext()); - assertEquals(i, it.next().intValue()); - } - assertFalse(it.hasNext()); - assertThrows(NoSuchElementException.class, it).next(); - for (int j = 0; j < 10; j++) { - it = m.keyIterator(j); - for (int i = j; i < 10; i++) { + } + + private void testIterateReverse() { + int size = config.big ? 1000 : 10; + String fileName = getBaseDir() + "/" + getTestName(); + FileUtils.delete(fileName); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + for (int i = 0; i < size; i++) { + m.put(i, "hello " + i); + } + s.commit(); + Iterator it = m.keyIteratorReverse(null); + it.next(); + assertThrows(UnsupportedOperationException.class, it).remove(); + + it = m.keyIteratorReverse(null); + for (int i = size - 1; i >= 0; i--) { assertTrue(it.hasNext()); assertEquals(i, it.next().intValue()); } assertFalse(it.hasNext()); + assertThrows(NoSuchElementException.class, it).next(); + for (int j = 0; j < size; j++) { + it = m.keyIteratorReverse(j); + for (int i = j; i >= 0; i--) { + assertTrue(it.hasNext()); + assertEquals(i, it.next().intValue()); + } + assertFalse(it.hasNext()); + } } - s.close(); } private void testCloseTwice() { @@ -2006,27 +1986,32 @@ private void testCloseTwice() { private void testSimple() { String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName); - MVMap m = s.openMap("data"); - for (int i = 0; i < 3; i++) { - m.put(i, "hello " + i); - } - s.commit(); - assertEquals("hello 0", m.remove(0)); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + for (int i = 0; i < 3; i++) { + m.put(i, "hello " + i); + } + s.commit(); + assertEquals("hello 0", m.remove(0)); - assertNull(m.get(0)); - for (int i = 1; i < 3; i++) { - assertEquals("hello " + i, m.get(i)); + assertNull(m.get(0)); + for (int i = 1; i < 3; i++) { + assertEquals("hello " + i, m.get(i)); + } } - s.close(); - s = openStore(fileName); - m = s.openMap("data"); - assertNull(m.get(0)); - for (int i = 1; i < 3; i++) { - assertEquals("hello " + i, m.get(i)); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + assertNull(m.get(0)); + for (int i = 1; i < 3; i++) { + assertEquals("hello " + i, m.get(i)); + } } - s.close(); + } + + private void testInvalidSettings() { + assertThrows(IllegalArgumentException.class, + () -> new MVStore.Builder().fileName("test").fileStore(new OffHeapStore()).open()); } private void testLargerThan2G() { diff --git a/h2/src/test/org/h2/test/store/TestMVStoreBenchmark.java b/h2/src/test/org/h2/test/store/TestMVStoreBenchmark.java index 7335e1ffae..fc587d290d 100644 --- a/h2/src/test/org/h2/test/store/TestMVStoreBenchmark.java +++ b/h2/src/test/org/h2/test/store/TestMVStoreBenchmark.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; diff --git a/h2/src/test/org/h2/test/store/TestMVStoreCachePerformance.java b/h2/src/test/org/h2/test/store/TestMVStoreCachePerformance.java index 9a3c8cdb1c..1576724447 100644 --- a/h2/src/test/org/h2/test/store/TestMVStoreCachePerformance.java +++ b/h2/src/test/org/h2/test/store/TestMVStoreCachePerformance.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; diff --git a/h2/src/test/org/h2/test/store/TestConcurrent.java b/h2/src/test/org/h2/test/store/TestMVStoreConcurrent.java similarity index 70% rename from h2/src/test/org/h2/test/store/TestConcurrent.java rename to h2/src/test/org/h2/test/store/TestMVStoreConcurrent.java index eb63e02813..e05fcb8bb4 100644 --- a/h2/src/test/org/h2/test/store/TestConcurrent.java +++ b/h2/src/test/org/h2/test/store/TestMVStoreConcurrent.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -13,16 +13,17 @@ import java.nio.channels.FileChannel; import java.util.ArrayList; import java.util.Arrays; -import java.util.Comparator; import java.util.ConcurrentModificationException; import java.util.Iterator; import java.util.Map; import java.util.Random; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; - +import org.h2.mvstore.Chunk; import org.h2.mvstore.DataUtils; import org.h2.mvstore.MVMap; import org.h2.mvstore.MVStore; +import org.h2.mvstore.MVStoreException; import org.h2.mvstore.WriteBuffer; import org.h2.mvstore.type.ObjectDataType; import org.h2.store.fs.FileChannelInputStream; @@ -33,7 +34,7 @@ /** * Tests concurrently accessing a tree map store. */ -public class TestConcurrent extends TestMVStore { +public class TestMVStoreConcurrent extends TestMVStore { /** * Run just this test. @@ -41,7 +42,7 @@ public class TestConcurrent extends TestMVStore { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -70,7 +71,7 @@ private void testInterruptReopenAsync() { } private void testInterruptReopenRetryNIO() { - testInterruptReopen("retry:nio:"); + testInterruptReopen("retry:"); } private void testInterruptReopen(String prefix) { @@ -107,11 +108,10 @@ public void call() throws Exception { private void testConcurrentSaveCompact() { String fileName = "memFS:" + getTestName(); FileUtils.delete(fileName); - final MVStore s = new MVStore.Builder(). + MVStore.Builder builder = new MVStore.Builder(). fileName(fileName). - cacheSize(0). - open(); - try { + cacheSize(0); + try (final MVStore s = builder.open()) { s.setRetentionTime(0); final MVMap dataMap = s.openMap("data"); Task task = new Task() { @@ -143,8 +143,6 @@ public void call() { s.commit(); } task.get(); - } finally { - s.close(); } } @@ -161,12 +159,7 @@ private void testConcurrentDataType() throws InterruptedException { new Object[]{ new byte[]{(byte) -1, (byte) 1}, 20L}, new Object[]{ new byte[]{(byte) 1, (byte) -1}, 5}, }; - Arrays.sort(data, new Comparator() { - @Override - public int compare(Object o1, Object o2) { - return type.compare(o1, o2); - } - }); + Arrays.sort(data, type::compare); Task[] tasks = new Task[2]; for (int i = 0; i < tasks.length; i++) { tasks[i] = new Task() { @@ -209,15 +202,17 @@ public void call() { private void testConcurrentAutoCommitAndChange() throws InterruptedException { String fileName = "memFS:" + getTestName(); FileUtils.delete(fileName); - final MVStore s = new MVStore.Builder(). - fileName(fileName).pageSplitSize(1000). - open(); - try { + MVStore.Builder builder = new MVStore.Builder() + .fileName(fileName) + .pageSplitSize(1000); + try (MVStore s = builder.open()) { s.setRetentionTime(1000); s.setAutoCommitDelay(1); + final CountDownLatch latch = new CountDownLatch(2); Task task = new Task() { @Override public void call() { + latch.countDown(); while (!stop) { s.compact(100, 1024 * 1024); } @@ -230,6 +225,7 @@ public void call() { Task task2 = new Task() { @Override public void call() { + latch.countDown(); while (!stop) { int i = counter.getAndIncrement(); dataMap.put(i, i * 10); @@ -242,7 +238,7 @@ public void call() { }; task.execute(); task2.execute(); - Thread.sleep(1); + latch.await(); for (int i = 0; !task.isFinished() && !task2.isFinished() && i < 1000; i++) { MVMap map = s.openMap("d" + (i % 3)); map.put(0, i); @@ -253,8 +249,6 @@ public void call() { for (int i = 0; i < counter.get(); i++) { assertEquals(10 * i, dataMap.get(i).intValue()); } - } finally { - s.close(); } } @@ -334,9 +328,7 @@ public void call() { private static void testConcurrentChangeAndGetVersion() throws InterruptedException { for (int test = 0; test < 10; test++) { - final MVStore s = new MVStore.Builder(). - autoCommitDisabled().open(); - try { + try (final MVStore s = new MVStore.Builder().autoCommitDisabled().open()) { s.setVersionsToKeep(10); final MVMap m = s.openMap("data"); m.put(1, 1); @@ -370,8 +362,6 @@ public void call() { } task.get(); s.commit(); - } finally { - s.close(); } } } @@ -392,10 +382,11 @@ private void testConcurrentFree() throws InterruptedException { } } s1.close(); - final MVStore s = new MVStore.Builder(). - fileName(fileName).autoCommitDisabled().open(); - try { + MVStore.Builder builder = new MVStore.Builder(). + fileName(fileName).autoCommitDisabled(); + try (final MVStore s = builder.open()) { s.setRetentionTime(0); + s.setVersionsToKeep(0); final ArrayList> list = new ArrayList<>(count); for (int i = 0; i < count; i++) { MVMap m = s.openMap("d" + i); @@ -434,25 +425,29 @@ public void call() { task.get(); // this will mark old chunks as unused, // but not remove (and overwrite) them yet + MVMap m = s.openMap("dummy"); + m.put(0, 0); s.commit(); // this will remove them, so we end up with // one unused one, and one active one - MVMap m = s.openMap("dummy"); m.put(1, 1); s.commit(); m.put(2, 2); s.commit(); - MVMap meta = s.getMetaMap(); + MVMap layoutMap = s.getLayoutMap(); int chunkCount = 0; - for (String k : meta.keyList()) { - if (k.startsWith("chunk.")) { - chunkCount++; + for (String k : layoutMap.keyList()) { + if (k.startsWith(DataUtils.META_CHUNK)) { + // dead chunks may stay around for a little while + // discount them + Chunk chunk = Chunk.fromString(layoutMap.get(k)); + if (chunk.maxLenLive > 0) { + chunkCount++; + } } } assertTrue("" + chunkCount, chunkCount < 3); - } finally { - s.close(); } } } @@ -460,8 +455,7 @@ public void call() { private void testConcurrentStoreAndRemoveMap() throws InterruptedException { String fileName = "memFS:" + getTestName(); FileUtils.delete(fileName); - final MVStore s = openStore(fileName); - try { + try (MVStore s = openStore(fileName)) { int count = 200; for (int i = 0; i < count; i++) { MVMap m = s.openMap("d" + i); @@ -488,8 +482,6 @@ public void call() { } } task.get(); - } finally { - s.close(); } } @@ -497,8 +489,7 @@ private void testConcurrentStoreAndClose() throws InterruptedException { String fileName = "memFS:" + getTestName(); for (int i = 0; i < 10; i++) { FileUtils.delete(fileName); - final MVStore s = openStore(fileName); - try { + try (MVStore s = openStore(fileName)) { final AtomicInteger counter = new AtomicInteger(); Task task = new Task() { @Override @@ -523,18 +514,14 @@ public void call() { } Exception e = task.getException(); if (e != null) { - assertEquals(DataUtils.ERROR_CLOSED, - DataUtils.getErrorCode(e.getMessage())); + checkErrorCode(DataUtils.ERROR_CLOSED, e); } - } catch (IllegalStateException e) { + } catch (MVStoreException e) { // sometimes storing works, in which case // closing must fail - assertEquals(DataUtils.ERROR_WRITING_FAILED, - DataUtils.getErrorCode(e.getMessage())); + assertEquals(DataUtils.ERROR_WRITING_FAILED, e.getErrorCode()); task.get(); } - } finally { - s.close(); } } } @@ -543,9 +530,8 @@ public void call() { * Test the concurrent map implementation. */ private static void testConcurrentMap() throws InterruptedException { - final MVStore s = openStore(null); - final MVMap m = s.openMap("data"); - try { + try (MVStore s = openStore(null)) { + final MVMap m = s.openMap("data"); final int size = 20; final Random rand = new Random(1); Task task = new Task() { @@ -590,116 +576,114 @@ public void call() { Thread.sleep(1); } task.get(); - } finally { - s.close(); } } private void testConcurrentOnlineBackup() throws Exception { String fileName = getBaseDir() + "/" + getTestName(); String fileNameRestore = getBaseDir() + "/" + getTestName() + "2"; - final MVStore s = openStore(fileName); - final MVMap map = s.openMap("test"); - final Random r = new Random(); - Task task = new Task() { - @Override - public void call() throws Exception { - while (!stop) { - for (int i = 0; i < 10; i++) { - map.put(i, new byte[100 * r.nextInt(100)]); + try (final MVStore s = openStore(fileName)) { + final MVMap map = s.openMap("test"); + final Random r = new Random(); + Task task = new Task() { + @Override + public void call() throws Exception { + while (!stop) { + for (int i = 0; i < 10; i++) { + map.put(i, new byte[100 * r.nextInt(100)]); + } + s.commit(); + map.clear(); + s.commit(); + long len = s.getFileStore().size(); + if (len > 1024 * 1024) { + // slow down writing a lot + Thread.sleep(200); + } else if (len > 20 * 1024) { + // slow down writing + Thread.sleep(20); + } } - s.commit(); - map.clear(); - s.commit(); + } + }; + task.execute(); + try { + for (int i = 0; i < 10; i++) { + // System.out.println("test " + i); + s.setReuseSpace(false); + OutputStream out = new BufferedOutputStream( + new FileOutputStream(fileNameRestore)); long len = s.getFileStore().size(); - if (len > 1024 * 1024) { - // slow down writing a lot - Thread.sleep(200); - } else if (len > 20 * 1024) { - // slow down writing - Thread.sleep(20); + copyFileSlowly(s.getFileStore().getFile(), + len, out); + out.close(); + s.setReuseSpace(true); + MVStore s2 = openStore(fileNameRestore); + MVMap test = s2.openMap("test"); + for (Integer k : test.keySet()) { + test.get(k); } + s2.close(); + // let it compact + Thread.sleep(10); } + } finally { + task.get(); } - }; - task.execute(); - try { - for (int i = 0; i < 10; i++) { - // System.out.println("test " + i); - s.setReuseSpace(false); - OutputStream out = new BufferedOutputStream( - new FileOutputStream(fileNameRestore)); - long len = s.getFileStore().size(); - copyFileSlowly(s.getFileStore().getFile(), - len, out); - out.close(); - s.setReuseSpace(true); - MVStore s2 = openStore(fileNameRestore); - MVMap test = s2.openMap("test"); - for (Integer k : test.keySet()) { - test.get(k); - } - s2.close(); - // let it compact - Thread.sleep(10); - } - } finally { - task.get(); } - s.close(); } private static void copyFileSlowly(FileChannel file, long length, OutputStream out) throws Exception { file.position(0); - InputStream in = new BufferedInputStream(new FileChannelInputStream( - file, false)); - for (int j = 0; j < length; j++) { - int x = in.read(); - if (x < 0) { - break; + try (InputStream in = new BufferedInputStream(new FileChannelInputStream( + file, false))) { + for (int j = 0; j < length; j++) { + int x = in.read(); + if (x < 0) { + break; + } + out.write(x); } - out.write(x); } - in.close(); } private static void testConcurrentIterate() { - MVStore s = new MVStore.Builder().pageSplitSize(3).open(); - s.setVersionsToKeep(100); - final MVMap map = s.openMap("test"); - final int len = 10; - final Random r = new Random(); - Task task = new Task() { - @Override - public void call() { - while (!stop) { - int x = r.nextInt(len); - if (r.nextBoolean()) { - map.remove(x); - } else { - map.put(x, r.nextInt(100)); + try (MVStore s = new MVStore.Builder().pageSplitSize(3).open()) { + s.setVersionsToKeep(100); + final MVMap map = s.openMap("test"); + final int len = 10; + final Random r = new Random(); + Task task = new Task() { + @Override + public void call() { + while (!stop) { + int x = r.nextInt(len); + if (r.nextBoolean()) { + map.remove(x); + } else { + map.put(x, r.nextInt(100)); + } } } - } - }; - task.execute(); - try { - for (int k = 0; k < 10000; k++) { - Iterator it = map.keyIterator(r.nextInt(len)); - long old = map.getVersion(); - s.commit(); - while (map.getVersion() == old) { - Thread.yield(); - } - while (it.hasNext()) { - it.next(); + }; + task.execute(); + try { + for (int k = 0; k < 10000; k++) { + Iterator it = map.keyIterator(r.nextInt(len)); + long old = map.getVersion(); + s.commit(); + while (map.getVersion() == old) { + Thread.yield(); + } + while (it.hasNext()) { + it.next(); + } } + } finally { + task.get(); } - } finally { - task.get(); } - s.close(); } @@ -719,103 +703,102 @@ private void testConcurrentWrite() throws InterruptedException { private static void testConcurrentWrite(final AtomicInteger detected, final AtomicInteger notDetected) throws InterruptedException { - final MVStore s = openStore(null); - final MVMap m = s.openMap("data"); - final int size = 20; - final Random rand = new Random(1); - Task task = new Task() { - @Override - public void call() { - while (!stop) { - try { - if (rand.nextBoolean()) { - m.put(rand.nextInt(size), 1); - } else { - m.remove(rand.nextInt(size)); + try (final MVStore s = openStore(null)) { + final MVMap m = s.openMap("data"); + final int size = 20; + final Random rand = new Random(1); + Task task = new Task() { + @Override + public void call() { + while (!stop) { + try { + if (rand.nextBoolean()) { + m.put(rand.nextInt(size), 1); + } else { + m.remove(rand.nextInt(size)); + } + m.get(rand.nextInt(size)); + } catch (ConcurrentModificationException e) { + detected.incrementAndGet(); + } catch (NegativeArraySizeException + | ArrayIndexOutOfBoundsException + | IllegalArgumentException + | NullPointerException e) { + notDetected.incrementAndGet(); } - m.get(rand.nextInt(size)); - } catch (ConcurrentModificationException e) { - detected.incrementAndGet(); - } catch ( NegativeArraySizeException - | ArrayIndexOutOfBoundsException - | IllegalArgumentException - | NullPointerException e) { - notDetected.incrementAndGet(); } } - } - }; - task.execute(); - try { - Thread.sleep(1); - for (int j = 0; j < 10; j++) { - for (int i = 0; i < 10; i++) { - try { - if (rand.nextBoolean()) { - m.put(rand.nextInt(size), 2); - } else { - m.remove(rand.nextInt(size)); + }; + task.execute(); + try { + Thread.sleep(1); + for (int j = 0; j < 10; j++) { + for (int i = 0; i < 10; i++) { + try { + if (rand.nextBoolean()) { + m.put(rand.nextInt(size), 2); + } else { + m.remove(rand.nextInt(size)); + } + m.get(rand.nextInt(size)); + } catch (ConcurrentModificationException e) { + detected.incrementAndGet(); + } catch (NegativeArraySizeException + | ArrayIndexOutOfBoundsException + | NullPointerException + | IllegalArgumentException e) { + notDetected.incrementAndGet(); } - m.get(rand.nextInt(size)); - } catch (ConcurrentModificationException e) { - detected.incrementAndGet(); - } catch ( NegativeArraySizeException - | ArrayIndexOutOfBoundsException - | NullPointerException - | IllegalArgumentException e) { - notDetected.incrementAndGet(); } + s.commit(); + Thread.sleep(1); } - s.commit(); - Thread.sleep(1); + } finally { + task.get(); } - } finally { - task.get(); } - s.close(); } private static void testConcurrentRead() throws InterruptedException { - final MVStore s = openStore(null); - s.setVersionsToKeep(100); - final MVMap m = s.openMap("data"); - final int size = 3; - int x = (int) s.getCurrentVersion(); - for (int i = 0; i < size; i++) { - m.put(i, x); - } - s.commit(); - Task task = new Task() { - @Override - public void call() { - while (!stop) { - long v = s.getCurrentVersion() - 1; - Map old = m.openVersion(v); - for (int i = 0; i < size; i++) { - Integer x = old.get(i); - if (x == null || (int) v != x) { - Map old2 = m.openVersion(v); - throw new AssertionError(x + "<>" + v + " at " + i + " " + old2); + try (final MVStore s = openStore(null)) { + s.setVersionsToKeep(100); + final MVMap m = s.openMap("data"); + final int size = 3; + int x = (int) s.getCurrentVersion(); + for (int i = 0; i < size; i++) { + m.put(i, x); + } + s.commit(); + Task task = new Task() { + @Override + public void call() { + while (!stop) { + long v = s.getCurrentVersion() - 1; + Map old = m.openVersion(v); + for (int i = 0; i < size; i++) { + Integer x = old.get(i); + if (x == null || (int) v != x) { + Map old2 = m.openVersion(v); + throw new AssertionError(x + "<>" + v + " at " + i + " " + old2); + } } } } - } - }; - task.execute(); - try { - Thread.sleep(1); - for (int j = 0; j < 100; j++) { - x = (int) s.getCurrentVersion(); - for (int i = 0; i < size; i++) { - m.put(i, x); - } - s.commit(); + }; + task.execute(); + try { Thread.sleep(1); + for (int j = 0; j < 100; j++) { + x = (int) s.getCurrentVersion(); + for (int i = 0; i < size; i++) { + m.put(i, x); + } + s.commit(); + Thread.sleep(1); + } + } finally { + task.get(); } - } finally { - task.get(); } - s.close(); } - } diff --git a/h2/src/test/org/h2/test/store/TestMVStoreStopCompact.java b/h2/src/test/org/h2/test/store/TestMVStoreStopCompact.java index a7967ff3e8..b4c7a885f6 100644 --- a/h2/src/test/org/h2/test/store/TestMVStoreStopCompact.java +++ b/h2/src/test/org/h2/test/store/TestMVStoreStopCompact.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -28,14 +28,6 @@ public static void main(String... a) throws Exception { test.test(); } - @Override - public boolean isEnabled() { - if (!config.big) { - return false; - } - return true; - } - @Override public void test() throws Exception { for(int retentionTime = 10; retentionTime < 1000; retentionTime *= 10) { @@ -51,28 +43,38 @@ private void testStopCompact(int retentionTime, int timeout) throws InterruptedE FileUtils.delete(fileName); // store with a very small page size, to make sure // there are many leaf pages - MVStore s = new MVStore.Builder(). - fileName(fileName).open(); - s.setRetentionTime(retentionTime); - MVMap map = s.openMap("data"); - long start = System.currentTimeMillis(); - Random r = new Random(1); - for (int i = 0; i < 4000000; i++) { - long time = System.currentTimeMillis() - start; - if (time > timeout) { - break; + MVStore.Builder builder = new MVStore.Builder().fileName(fileName); + try (MVStore s = builder.open()) { + s.setRetentionTime(retentionTime); + s.setVersionsToKeep(0); + MVMap map = s.openMap("data"); + long start = System.currentTimeMillis(); + Random r = new Random(1); + for (int i = 0; i < 4_000_000; i++) { + long time = System.currentTimeMillis() - start; + if (time > timeout) { + break; + } + int x = r.nextInt(10_000_000); + map.put(x, "Hello World " + i * 10); + } + s.setAutoCommitDelay(100); + long oldWriteCount = s.getFileStore().getWriteCount(); + long totalWrites = 0; + // expect background write to stop after a few seconds + for (int i = 0; i < 50; i++) { + Thread.sleep(200); + long newWriteCount = s.getFileStore().getWriteCount(); + long delta = newWriteCount - oldWriteCount; + if (delta == 0) { + break; + } + totalWrites += delta; + oldWriteCount = newWriteCount; } - int x = r.nextInt(10000000); - map.put(x, "Hello World " + i * 10); + // expect that compaction didn't cause many writes + assertTrue("writeCount diff: " + retentionTime + "/" + timeout + " " + totalWrites, + totalWrites < 90); } - s.setAutoCommitDelay(100); - long oldWriteCount = s.getFileStore().getWriteCount(); - // expect background write to stop after 5 seconds - Thread.sleep(5000); - long newWriteCount = s.getFileStore().getWriteCount(); - // expect that compaction didn't cause many writes - assertTrue(newWriteCount - oldWriteCount < 30); - s.close(); } - } diff --git a/h2/src/test/org/h2/test/store/TestMVStoreTool.java b/h2/src/test/org/h2/test/store/TestMVStoreTool.java index f336219d97..a63d85a785 100644 --- a/h2/src/test/org/h2/test/store/TestMVStoreTool.java +++ b/h2/src/test/org/h2/test/store/TestMVStoreTool.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -12,7 +12,8 @@ import org.h2.mvstore.MVStore; import org.h2.mvstore.MVStoreTool; import org.h2.mvstore.rtree.MVRTreeMap; -import org.h2.mvstore.rtree.SpatialKey; +import org.h2.mvstore.rtree.Spatial; +import org.h2.mvstore.db.SpatialKey; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; @@ -30,15 +31,7 @@ public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); test.config.traceTest = true; test.config.big = true; - test.test(); - } - - @Override - public boolean isEnabled() { - if (config.memory) { - return false; - } - return true; + test.testFromMain(); } @Override @@ -109,7 +102,7 @@ private void testCompact() { start = System.currentTimeMillis(); MVStoreTool.compact(fileNameNew, false); - assertEquals(size2, FileUtils.size(fileNameNew)); + assertTrue(100L * Math.abs(size2 - FileUtils.size(fileNameNew)) / size2 < 1); MVStoreTool.compact(fileNameCompressed, true); assertEquals(size3, FileUtils.size(fileNameCompressed)); trace("Re-compacted in " + (System.currentTimeMillis() - start) + " ms."); @@ -138,7 +131,7 @@ private void assertEquals(MVStore a, MVStore b) { MVRTreeMap mb = b.openMap( mapName, new MVRTreeMap.Builder()); assertEquals(ma.sizeAsLong(), mb.sizeAsLong()); - for (Entry e : ma.entrySet()) { + for (Entry e : ma.entrySet()) { Object x = mb.get(e.getKey()); assertEquals(e.getValue(), x.toString()); } diff --git a/h2/src/test/org/h2/test/store/TestMVTableEngine.java b/h2/src/test/org/h2/test/store/TestMVTableEngine.java index cafcb54e9d..3c2d421eba 100644 --- a/h2/src/test/org/h2/test/store/TestMVTableEngine.java +++ b/h2/src/test/org/h2/test/store/TestMVTableEngine.java @@ -1,11 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; import java.io.ByteArrayInputStream; +import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.StringReader; @@ -18,6 +19,7 @@ import java.sql.SQLException; import java.sql.Savepoint; import java.sql.Statement; +import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import org.h2.api.ErrorCode; @@ -26,14 +28,17 @@ import org.h2.jdbc.JdbcConnection; import org.h2.mvstore.MVMap; import org.h2.mvstore.MVStore; +import org.h2.mvstore.db.LobStorageMap; import org.h2.mvstore.tx.TransactionStore; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.tools.Recover; import org.h2.tools.Restore; +import org.h2.util.IOUtils; import org.h2.util.JdbcUtils; import org.h2.util.Task; +import org.h2.value.Value; /** * Tests the MVStore in a database. @@ -46,26 +51,24 @@ public class TestMVTableEngine extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public boolean isEnabled() { - if (!config.mvStore) { - return false; - } return true; } @Override public void test() throws Exception { +/* testLobCopy(); testLobReuse(); testShutdownDuringLobCreation(); testLobCreationThenShutdown(); testManyTransactions(); testAppendOnly(); - testLowRetentionTime(); + testNoRetentionTime(); testOldAndNew(); testTemporaryTables(); testUniqueIndex(); @@ -76,7 +79,9 @@ public void test() throws Exception { testMinMaxWithNull(); testTimeout(); testExplainAnalyze(); - testTransactionLogEmptyAfterCommit(); + if (!config.memory) { + testTransactionLogEmptyAfterCommit(); + } testShrinkDatabaseFile(); testTwoPhaseCommit(); testRecover(); @@ -91,11 +96,12 @@ public void test() throws Exception { testEncryption(); testReadOnly(); testReuseDiskSpace(); +*/ testDataTypes(); - testSimple(); - if (!config.travis) { - testReverseDeletePerformance(); - } +// testSimple(); +// if (!config.travis) { +// testReverseDeletePerformance(); +// } } private void testLobCopy() throws Exception { @@ -121,26 +127,26 @@ private void testLobCopy() throws Exception { private void testLobReuse() throws Exception { deleteDb(getTestName()); - Connection conn1 = getConnection(getTestName()); - Statement stat = conn1.createStatement(); - stat.execute("create table test(id identity primary key, lob clob)"); - byte[] buffer = new byte[8192]; - for (int i = 0; i < 20; i++) { - Connection conn2 = getConnection(getTestName()); - stat = conn2.createStatement(); - stat.execute("insert into test(lob) select space(1025) from system_range(1, 10)"); - stat.execute("delete from test where random() > 0.5"); - ResultSet rs = conn2.createStatement().executeQuery( - "select lob from test"); - while (rs.next()) { - InputStream is = rs.getBinaryStream(1); - while (is.read(buffer) != -1) { - // ignore + try (Connection conn1 = getConnection(getTestName())) { + Statement stat = conn1.createStatement(); + stat.execute("create table test(id identity primary key, lob clob)"); + byte[] buffer = new byte[8192]; + for (int i = 0; i < 20; i++) { + try (Connection conn2 = getConnection(getTestName())) { + stat = conn2.createStatement(); + stat.execute("insert into test(lob) select space(1025) from system_range(1, 10)"); + stat.execute("delete from test where random() > 0.5"); + ResultSet rs = conn2.createStatement().executeQuery( + "select lob from test"); + while (rs.next()) { + InputStream is = rs.getBinaryStream(1); + while (is.read(buffer) != -1) { + // ignore + } + } } } - conn2.close(); } - conn1.close(); } private void testShutdownDuringLobCreation() throws Exception { @@ -148,69 +154,62 @@ private void testShutdownDuringLobCreation() throws Exception { return; } deleteDb(getTestName()); - Connection conn = getConnection(getTestName()); - Statement stat = conn.createStatement(); - stat.execute("create table test(data clob) as select space(10000)"); - final PreparedStatement prep = conn - .prepareStatement("set @lob = ?"); - final AtomicBoolean end = new AtomicBoolean(); - Task t = new Task() { - - @Override - public void call() throws Exception { - prep.setBinaryStream(1, new InputStream() { - - int len; - - @Override - public int read() throws IOException { - if (len++ < 1024 * 1024 * 4) { - return 0; - } - end.set(true); - while (!stop) { - try { - Thread.sleep(1); - } catch (InterruptedException e) { - // ignore + try (Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); + stat.execute("create table test(data clob) as select space(10000)"); + final PreparedStatement prep = conn + .prepareStatement("set @lob = ?"); + final AtomicBoolean end = new AtomicBoolean(); + Task t = new Task() { + + @Override + public void call() throws Exception { + prep.setBinaryStream(1, new InputStream() { + + int len; + + @Override + public int read() throws IOException { + if (len++ < 1024 * 1024 * 4) { + return 0; + } + end.set(true); + while (!stop) { + try { + Thread.sleep(1); + } catch (InterruptedException e) { + // ignore + } } + return -1; } - return -1; - } - } , -1); + }, -1); + } + }; + t.execute(); + while (!end.get()) { + Thread.sleep(1); } - }; - t.execute(); - while (!end.get()) { - Thread.sleep(1); + stat.execute("checkpoint"); + stat.execute("shutdown immediately"); + Exception ex = t.getException(); + assertNotNull(ex); + IOUtils.closeSilently(conn); } - stat.execute("checkpoint"); - stat.execute("shutdown immediately"); - Exception ex = t.getException(); - assertNotNull(ex); - try { - conn.close(); - } catch (Exception e) { - // ignore + try (Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); + stat.execute("shutdown defrag"); } - conn = getConnection(getTestName()); - stat = conn.createStatement(); - stat.execute("shutdown defrag"); - try { - conn.close(); - } catch (Exception e) { - // ignore + try (Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("select * " + + "from information_schema.settings " + + "where setting_name = 'info.PAGE_COUNT'"); + rs.next(); + int pages = rs.getInt(2); + // only one lob should remain (but it is small and compressed) + assertTrue("p:" + pages, pages <= 7); } - conn = getConnection(getTestName()); - stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("select * " + - "from information_schema.settings " + - "where name = 'info.PAGE_COUNT'"); - rs.next(); - int pages = rs.getInt(2); - // only one lob should remain (but it is small and compressed) - assertTrue("p:" + pages, pages < 4); - conn.close(); } private void testLobCreationThenShutdown() throws Exception { @@ -218,60 +217,53 @@ private void testLobCreationThenShutdown() throws Exception { return; } deleteDb(getTestName()); - Connection conn = getConnection(getTestName()); - Statement stat = conn.createStatement(); - stat.execute("create table test(id identity, data clob)"); - PreparedStatement prep = conn - .prepareStatement("insert into test values(?, ?)"); - for (int i = 0; i < 9; i++) { - prep.setInt(1, i); - int size = i * i * i * i * 1024; - prep.setCharacterStream(2, new StringReader(new String( - new char[size]))); - prep.execute(); + try (Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); + stat.execute("create table test(id identity, data clob)"); + PreparedStatement prep = conn + .prepareStatement("insert into test values(?, ?)"); + for (int i = 0; i < 9; i++) { + prep.setInt(1, i); + int size = i * i * i * i * 1024; + prep.setCharacterStream(2, new StringReader(new String( + new char[size]))); + prep.execute(); + } + stat.execute("shutdown immediately"); + IOUtils.closeSilently(conn); } - stat.execute("shutdown immediately"); - try { - conn.close(); - } catch (Exception e) { - // ignore + try (Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); + stat.execute("drop all objects"); + stat.execute("shutdown defrag"); } - conn = getConnection(getTestName()); - stat = conn.createStatement(); - stat.execute("drop all objects"); - stat.execute("shutdown defrag"); - try { - conn.close(); - } catch (Exception e) { - // ignore + try (Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("select * " + + "from information_schema.settings " + + "where setting_name = 'info.PAGE_COUNT'"); + rs.next(); + int pages = rs.getInt(2); + // no lobs should remain + assertTrue("p:" + pages, pages < 4); } - conn = getConnection(getTestName()); - stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("select * " + - "from information_schema.settings " + - "where name = 'info.PAGE_COUNT'"); - rs.next(); - int pages = rs.getInt(2); - // no lobs should remain - assertTrue("p:" + pages, pages < 4); - conn.close(); } private void testManyTransactions() throws Exception { deleteDb(getTestName()); - Connection conn = getConnection(getTestName()); - Statement stat = conn.createStatement(); - stat.execute("create table test()"); - conn.setAutoCommit(false); - stat.execute("insert into test values()"); + try (Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); + stat.execute("create table test()"); + conn.setAutoCommit(false); + stat.execute("insert into test values()"); - Connection conn2 = getConnection(getTestName()); - Statement stat2 = conn2.createStatement(); - for (long i = 0; i < 100000; i++) { - stat2.execute("insert into test values()"); + try (Connection conn2 = getConnection(getTestName())) { + Statement stat2 = conn2.createStatement(); + for (long i = 0; i < 100000; i++) { + stat2.execute("insert into test values()"); + } + } } - conn2.close(); - conn.close(); } private void testAppendOnly() throws Exception { @@ -279,225 +271,214 @@ private void testAppendOnly() throws Exception { return; } deleteDb(getTestName()); - Connection conn = getConnection(getTestName()); - Statement stat = conn.createStatement(); - stat.execute("set retention_time 0"); - for (int i = 0; i < 10; i++) { - stat.execute("create table dummy" + i + - " as select x, space(100) from system_range(1, 1000)"); - stat.execute("checkpoint"); + try (Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); + stat.execute("set retention_time 0"); + for (int i = 0; i < 10; i++) { + stat.execute("create table dummy" + i + + " as select x, space(100) from system_range(1, 1000)"); + stat.execute("checkpoint"); + } + stat.execute("create table test as select x from system_range(1, 1000)"); } - stat.execute("create table test as select x from system_range(1, 1000)"); - conn.close(); + String fileName = getBaseDir() + "/" + getTestName() + Constants.SUFFIX_MV_FILE; long fileSize = FileUtils.size(fileName); - conn = getConnection( - getTestName() + ";reuse_space=false"); - stat = conn.createStatement(); - stat.execute("set retention_time 0"); - for (int i = 0; i < 10; i++) { - stat.execute("drop table dummy" + i); - stat.execute("checkpoint"); + try (Connection conn = getConnection(getTestName() + ";reuse_space=false")) { + Statement stat = conn.createStatement(); + stat.execute("set retention_time 0"); + for (int i = 0; i < 10; i++) { + stat.execute("drop table dummy" + i); + stat.execute("checkpoint"); + } + stat.execute("alter table test alter column x rename to y"); + stat.execute("select y from test where 1 = 0"); + stat.execute("create table test2 as select x from system_range(1, 1000)"); } - stat.execute("alter table test alter column x rename to y"); - stat.execute("select y from test where 1 = 0"); - stat.execute("create table test2 as select x from system_range(1, 1000)"); - conn.close(); - FileChannel fc = FileUtils.open(fileName, "rw"); - // undo all changes - fc.truncate(fileSize); - fc.close(); + try (FileChannel fc = FileUtils.open(fileName, "rw")) { + // undo all changes + fc.truncate(fileSize); + } - conn = getConnection(getTestName()); - stat = conn.createStatement(); - stat.execute("select * from dummy0 where 1 = 0"); - stat.execute("select * from dummy9 where 1 = 0"); - stat.execute("select x from test where 1 = 0"); - conn.close(); + try (Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); + stat.execute("select * from dummy0 where 1 = 0"); + stat.execute("select * from dummy9 where 1 = 0"); + stat.execute("select x from test where 1 = 0"); + } } - private void testLowRetentionTime() throws SQLException { + private void testNoRetentionTime() throws SQLException { deleteDb(getTestName()); - Connection conn = getConnection( - getTestName() + ";RETENTION_TIME=10;WRITE_DELAY=10"); - Statement stat = conn.createStatement(); - Connection conn2 = getConnection(getTestName()); - Statement stat2 = conn2.createStatement(); - stat.execute("create alias sleep as " + - "$$void sleep(int ms) throws Exception { Thread.sleep(ms); }$$"); - stat.execute("create table test(id identity, name varchar) " + - "as select x, 'Init' from system_range(0, 1999)"); - for (int i = 0; i < 10; i++) { - stat.execute("insert into test values(null, 'Hello')"); - // create and delete a large table: this will force compaction - stat.execute("create table temp(id identity, name varchar) as " + - "select x, space(1000000) from system_range(0, 10)"); - stat.execute("drop table temp"); - } - ResultSet rs = stat2 - .executeQuery("select *, sleep(1) from test order by id"); - for (int i = 0; i < 2000 + 10; i++) { - assertTrue(rs.next()); - assertEquals(i, rs.getInt(1)); + try (Connection conn = getConnection(getTestName() + ";RETENTION_TIME=0;WRITE_DELAY=10")) { + Statement stat = conn.createStatement(); + try (Connection conn2 = getConnection(getTestName())) { + Statement stat2 = conn2.createStatement(); + stat.execute("create alias sleep as " + + "$$void sleep(int ms) throws Exception { Thread.sleep(ms); }$$"); + stat.execute("create table test(id identity, name varchar) " + + "as select x, 'Init' from system_range(0, 1999)"); + for (int i = 0; i < 10; i++) { + stat.execute("insert into test values(null, 'Hello')"); + // create and delete a large table: this will force compaction + stat.execute("create table temp(id identity, name varchar) as " + + "select x, space(1000000) from system_range(0, 10)"); + stat.execute("drop table temp"); + } + ResultSet rs = stat2 + .executeQuery("select *, sleep(1) from test order by id"); + for (int i = 0; i < 2000 + 10; i++) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + } + assertFalse(rs.next()); + } } - assertFalse(rs.next()); - conn2.close(); - conn.close(); } private void testOldAndNew() throws SQLException { if (config.memory) { return; } - Connection conn; deleteDb(getTestName()); String urlOld = getURL(getTestName() + ";MV_STORE=FALSE", true); String urlNew = getURL(getTestName() + ";MV_STORE=TRUE", true); String url = getURL(getTestName(), true); - conn = getConnection(urlOld); - conn.createStatement().execute("create table test_old(id int)"); - conn.close(); - conn = getConnection(url); - conn.createStatement().execute("select * from test_old"); - conn.close(); - conn = getConnection(urlNew); - conn.createStatement().execute("create table test_new(id int)"); - conn.close(); - conn = getConnection(url); - conn.createStatement().execute("select * from test_new"); - conn.close(); - conn = getConnection(urlOld); - conn.createStatement().execute("select * from test_old"); - conn.close(); - conn = getConnection(urlNew); - conn.createStatement().execute("select * from test_new"); - conn.close(); + try (Connection conn = getConnection(urlOld)) { + conn.createStatement().execute("create table test_old(id int)"); + } + try (Connection conn = getConnection(url)) { + conn.createStatement().execute("select * from test_old"); + } + try (Connection conn = getConnection(urlNew)) { + conn.createStatement().execute("create table test_new(id int)"); + } + try (Connection conn = getConnection(url)) { + conn.createStatement().execute("select * from test_new"); + } + try (Connection conn = getConnection(urlOld)) { + conn.createStatement().execute("select * from test_old"); + } + try (Connection conn = getConnection(urlNew)) { + conn.createStatement().execute("select * from test_new"); + } } private void testTemporaryTables() throws SQLException { - Connection conn; - Statement stat; deleteDb(getTestName()); String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("set max_memory_rows 100"); - stat.execute("create table t1 as select x from system_range(1, 200)"); - stat.execute("create table t2 as select x from system_range(1, 200)"); - for (int i = 0; i < 20; i++) { - // this will create temporary results that - // internally use temporary tables, which are not all closed - stat.execute("select count(*) from t1 where t1.x in (select t2.x from t2)"); + try (Connection conn = getConnection(url)) { + Statement stat = conn.createStatement(); + stat.execute("set max_memory_rows 100"); + stat.execute("create table t1 as select x from system_range(1, 200)"); + stat.execute("create table t2 as select x from system_range(1, 200)"); + for (int i = 0; i < 20; i++) { + // this will create temporary results that + // internally use temporary tables, which are not all closed + stat.execute("select count(*) from t1 where t1.x in (select t2.x from t2)"); + } } - conn.close(); - conn = getConnection(url); - stat = conn.createStatement(); - for (int i = 0; i < 20; i++) { - stat.execute("create table a" + i + "(id int primary key)"); - ResultSet rs = stat.executeQuery("select count(*) from a" + i); - rs.next(); - assertEquals(0, rs.getInt(1)); + try (Connection conn = getConnection(url)) { + Statement stat = conn.createStatement(); + for (int i = 0; i < 20; i++) { + stat.execute("create table a" + i + "(id int primary key)"); + ResultSet rs = stat.executeQuery("select count(*) from a" + i); + rs.next(); + assertEquals(0, rs.getInt(1)); + } } - conn.close(); } private void testUniqueIndex() throws SQLException { - Connection conn; - Statement stat; deleteDb(getTestName()); String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test as select x, 0 from system_range(1, 5000)"); - stat.execute("create unique index on test(x)"); - ResultSet rs = stat.executeQuery("select * from test where x=1"); - assertTrue(rs.next()); - assertFalse(rs.next()); - conn.close(); + try (Connection conn = getConnection(url)) { + Statement stat = conn.createStatement(); + stat.execute("create table test as select x, 0 from system_range(1, 5000)"); + stat.execute("create unique index on test(x)"); + ResultSet rs = stat.executeQuery("select * from test where x=1"); + assertTrue(rs.next()); + assertFalse(rs.next()); + } } private void testSecondaryIndex() throws SQLException { - Connection conn; - Statement stat; deleteDb(getTestName()); String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test(id int)"); - int size = 8 * 1024; - stat.execute("insert into test select mod(x * 111, " + size + ") " + - "from system_range(1, " + size + ")"); - stat.execute("create index on test(id)"); - ResultSet rs = stat.executeQuery( - "select count(*) from test inner join " + - "system_range(1, " + size + ") where " + - "id = mod(x * 111, " + size + ")"); - rs.next(); - assertEquals(size, rs.getInt(1)); - conn.close(); + try (Connection conn = getConnection(url)) { + Statement stat = conn.createStatement(); + stat.execute("create table test(id int)"); + int size = 8 * 1024; + stat.execute("insert into test select mod(x * 111, " + size + ") " + + "from system_range(1, " + size + ")"); + stat.execute("create index on test(id)"); + ResultSet rs = stat.executeQuery( + "select count(*) from test inner join " + + "system_range(1, " + size + ") where " + + "id = mod(x * 111, " + size + ")"); + rs.next(); + assertEquals(size, rs.getInt(1)); + } } private void testGarbageCollectionForLOB() throws SQLException { if (config.memory) { return; } - Connection conn; - Statement stat; deleteDb(getTestName()); String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test(id int, data blob)"); - stat.execute("insert into test select x, repeat('0', 10000) " + - "from system_range(1, 10)"); - stat.execute("drop table test"); - stat.execute("create table test2(id int, data blob)"); - PreparedStatement prep = conn.prepareStatement( - "insert into test2 values(?, ?)"); - prep.setInt(1, 1); - assertThrows(ErrorCode.IO_EXCEPTION_1, prep). - setBinaryStream(1, createFailingStream(new IOException())); - prep.setInt(1, 2); - assertThrows(ErrorCode.IO_EXCEPTION_1, prep). - setBinaryStream(1, createFailingStream(new IllegalStateException())); - conn.close(); - MVStore s = MVStore.open(getBaseDir()+ "/" + getTestName() + ".mv.db"); - assertTrue(s.hasMap("lobData")); - MVMap lobData = s.openMap("lobData"); - assertEquals(0, lobData.sizeAsLong()); - assertTrue(s.hasMap("lobMap")); - MVMap lobMap = s.openMap("lobMap"); - assertEquals(0, lobMap.sizeAsLong()); - assertTrue(s.hasMap("lobRef")); - MVMap lobRef = s.openMap("lobRef"); - assertEquals(0, lobRef.sizeAsLong()); - s.close(); + try (Connection conn = getConnection(url)) { + Statement stat = conn.createStatement(); + stat.execute("create table test(id int, data blob)"); + stat.execute("insert into test select x, repeat('0', 10000) " + + "from system_range(1, 10)"); + stat.execute("drop table test"); + stat.execute("create table test2(id int, data blob)"); + PreparedStatement prep = conn.prepareStatement( + "insert into test2 values(?, ?)"); + prep.setInt(1, 1); + assertThrows(ErrorCode.IO_EXCEPTION_1, prep). + setBinaryStream(1, createFailingStream(new IOException())); + prep.setInt(1, 2); + assertThrows(ErrorCode.IO_EXCEPTION_1, prep). + setBinaryStream(1, createFailingStream(new IllegalStateException())); + } + try (MVStore s = MVStore.open(getBaseDir()+ "/" + getTestName() + ".mv.db")) { + assertTrue(s.hasMap("lobData")); + MVMap lobData = s.openMap("lobData"); + assertEquals(0, lobData.sizeAsLong()); + assertTrue(s.hasMap("lobMap")); + MVMap lobMap = s.openMap("lobMap"); + assertEquals(0, lobMap.sizeAsLong()); + assertTrue(s.hasMap("lobRef")); + MVMap lobRef = s.openMap("lobRef"); + assertEquals(0, lobRef.sizeAsLong()); + } } private void testSpatial() throws SQLException { - Connection conn; Statement stat; deleteDb(getTestName()); String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("call rand(1)"); - stat.execute("create table coordinates as select rand()*50 x, " + - "rand()*50 y from system_range(1, 5000)"); - stat.execute("create table test(id identity, data geometry)"); - stat.execute("create spatial index on test(data)"); - stat.execute("insert into test(data) select 'polygon(('||" + - "(1+x)||' '||(1+y)||', '||(2+x)||' '||(2+y)||', "+ - "'||(3+x)||' '||(1+y)||', '||(1+x)||' '||(1+y)||'))' from coordinates;"); - conn.close(); + try (Connection conn = getConnection(url)) { + stat = conn.createStatement(); + stat.execute("call rand(1)"); + stat.execute("create table coordinates as select rand()*50 x, " + + "rand()*50 y from system_range(1, 5000)"); + stat.execute("create table test(id identity, data geometry)"); + stat.execute("create spatial index on test(data)"); + stat.execute("insert into test(data) select 'polygon(('||" + + "(1+x)||' '||(1+y)||', '||(2+x)||' '||(2+y)||', " + + "'||(3+x)||' '||(1+y)||', '||(1+x)||' '||(1+y)||'))' from coordinates;"); + } } private void testCount() throws Exception { @@ -505,175 +486,165 @@ private void testCount() throws Exception { return; } - Connection conn; - Connection conn2; Statement stat; Statement stat2; deleteDb(getTestName()); String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test(id int)"); - stat.execute("create table test2(id int)"); - stat.execute("insert into test select x from system_range(1, 10000)"); - conn.close(); + try (Connection conn = getConnection(url)) { + stat = conn.createStatement(); + stat.execute("create table test(id int)"); + stat.execute("create table test2(id int)"); + stat.execute("insert into test select x from system_range(1, 10000)"); + } - ResultSet rs; String plan; - conn2 = getConnection(url); - stat2 = conn2.createStatement(); - rs = stat2.executeQuery("explain analyze select count(*) from test"); - rs.next(); - plan = rs.getString(1); - assertTrue(plan, plan.indexOf("reads:") < 0); - - conn = getConnection(url); - stat = conn.createStatement(); - conn.setAutoCommit(false); - stat.execute("insert into test select x from system_range(1, 1000)"); - rs = stat.executeQuery("select count(*) from test"); - rs.next(); - assertEquals(11000, rs.getInt(1)); - - // not yet committed - rs = stat2.executeQuery("explain analyze select count(*) from test"); - rs.next(); - plan = rs.getString(1); - // transaction log is small, so no need to read the table - assertTrue(plan, plan.indexOf("reads:") < 0); - rs = stat2.executeQuery("select count(*) from test"); - rs.next(); - assertEquals(10000, rs.getInt(1)); - - stat.execute("insert into test2 select x from system_range(1, 11000)"); - rs = stat2.executeQuery("explain analyze select count(*) from test"); - rs.next(); - plan = rs.getString(1); - // transaction log is larger than the table, so read the table - assertContains(plan, "reads:"); - rs = stat2.executeQuery("select count(*) from test"); - rs.next(); - assertEquals(10000, rs.getInt(1)); + ResultSet rs; + try (Connection conn2 = getConnection(url)) { + stat2 = conn2.createStatement(); + rs = stat2.executeQuery("explain analyze select count(*) from test"); + rs.next(); + plan = rs.getString(1); + assertTrue(plan, !plan.contains("reads:")); + try (Connection conn = getConnection(url)) { + stat = conn.createStatement(); + conn.setAutoCommit(false); + stat.execute("insert into test select x from system_range(1, 1000)"); + rs = stat.executeQuery("select count(*) from test"); + rs.next(); + assertEquals(11000, rs.getInt(1)); + + // not yet committed + rs = stat2.executeQuery("explain analyze select count(*) from test"); + rs.next(); + plan = rs.getString(1); + // transaction log is small, so no need to read the table + assertTrue(plan, !plan.contains("reads:")); + rs = stat2.executeQuery("select count(*) from test"); + rs.next(); + assertEquals(10000, rs.getInt(1)); + + stat2.execute("set cache_size 1024"); // causes cache to be cleared, so reads will occur + + stat.execute("insert into test2 select x from system_range(1, 11000)"); + rs = stat2.executeQuery("explain analyze select count(*) from test"); + rs.next(); + plan = rs.getString(1); + // transaction log is larger than the table, so read the table + assertContains(plan, "reads:"); + rs = stat2.executeQuery("select count(*) from test"); + rs.next(); + assertEquals(10000, rs.getInt(1)); + } + } - conn2.close(); - conn.close(); } private void testMinMaxWithNull() throws Exception { - Connection conn; - Connection conn2; Statement stat; Statement stat2; deleteDb(getTestName()); String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test(data int)"); - stat.execute("create index on test(data)"); - stat.execute("insert into test values(null), (2)"); - conn2 = getConnection(url); - stat2 = conn2.createStatement(); - conn.setAutoCommit(false); - conn2.setAutoCommit(false); - stat.execute("insert into test values(1)"); - ResultSet rs; - rs = stat.executeQuery("select min(data) from test"); - rs.next(); - assertEquals(1, rs.getInt(1)); - rs = stat2.executeQuery("select min(data) from test"); - rs.next(); - // not yet committed - assertEquals(2, rs.getInt(1)); - conn2.close(); - conn.close(); + try (Connection conn = getConnection(url)) { + stat = conn.createStatement(); + stat.execute("create table test(data int)"); + stat.execute("create index on test(data)"); + stat.execute("insert into test values(null), (2)"); + try (Connection conn2 = getConnection(url)) { + stat2 = conn2.createStatement(); + conn.setAutoCommit(false); + conn2.setAutoCommit(false); + stat.execute("insert into test values(1)"); + ResultSet rs; + rs = stat.executeQuery("select min(data) from test"); + rs.next(); + assertEquals(1, rs.getInt(1)); + rs = stat2.executeQuery("select min(data) from test"); + rs.next(); + // not yet committed + assertEquals(2, rs.getInt(1)); + } + } } private void testTimeout() throws Exception { - Connection conn; - Connection conn2; Statement stat; Statement stat2; deleteDb(getTestName()); String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test(id identity, name varchar)"); - conn2 = getConnection(url); - stat2 = conn2.createStatement(); - conn.setAutoCommit(false); - conn2.setAutoCommit(false); - stat.execute("insert into test values(1, 'Hello')"); - assertThrows(ErrorCode.LOCK_TIMEOUT_1, stat2). - execute("insert into test values(1, 'Hello')"); - conn2.close(); - conn.close(); + try (Connection conn = getConnection(url)) { + stat = conn.createStatement(); + stat.execute("create table test(id identity, name varchar)"); + try (Connection conn2 = getConnection(url)) { + stat2 = conn2.createStatement(); + conn.setAutoCommit(false); + conn2.setAutoCommit(false); + stat.execute("insert into test values(1, 'Hello')"); + assertThrows(ErrorCode.LOCK_TIMEOUT_1, stat2). + execute("insert into test values(1, 'Hello')"); + } + } } private void testExplainAnalyze() throws Exception { if (config.memory) { return; } - Connection conn; Statement stat; deleteDb(getTestName()); - String url = getTestName() + ";MV_STORE=TRUE"; + String url = getTestName() + ";MV_STORE=TRUE;WRITE_DELAY=0"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test(id identity, name varchar) as " + - "select x, space(1000) from system_range(1, 1000)"); - ResultSet rs; - conn.close(); - conn = getConnection(url); - stat = conn.createStatement(); - rs = stat.executeQuery("explain analyze select * from test"); - rs.next(); - String plan = rs.getString(1); - // expect about 1000 reads - String readCount = plan.substring(plan.indexOf("reads: ")); - readCount = readCount.substring("reads: ".length(), readCount.indexOf('\n')); - int rc = Integer.parseInt(readCount); - assertTrue(plan, rc >= 60 && rc <= 80); -// assertTrue(plan, rc >= 1000 && rc <= 1200); - conn.close(); + try (Connection conn = getConnection(url)) { + stat = conn.createStatement(); + stat.execute("create table test(id identity, name varchar) as " + + "select x, space(1000) from system_range(1, 1000)"); + } + try (Connection conn = getConnection(url)) { + stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("explain analyze select * from test"); + rs.next(); + String plan = rs.getString(1); + // expect about 1000 reads + String readCount = plan.substring(plan.indexOf("reads: ")); + readCount = readCount.substring("reads: ".length(), readCount.indexOf('\n')); + int rc = Integer.parseInt(readCount); + assertTrue(plan, rc >= 60 && rc <= 80); + } } private void testTransactionLogEmptyAfterCommit() throws Exception { - Connection conn; Statement stat; deleteDb(getTestName()); String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test(id identity, name varchar)"); - stat.execute("set write_delay 0"); - conn.setAutoCommit(false); - PreparedStatement prep = conn.prepareStatement( - "insert into test(name) values(space(10000))"); - for (int j = 0; j < 100; j++) { - for (int i = 0; i < 100; i++) { - prep.execute(); + try (Connection conn = getConnection(url)) { + stat = conn.createStatement(); + stat.execute("create table test(id identity, name varchar)"); + stat.execute("set write_delay 0"); + conn.setAutoCommit(false); + PreparedStatement prep = conn.prepareStatement( + "insert into test(name) values(space(10000))"); + for (int j = 0; j < 100; j++) { + for (int i = 0; i < 100; i++) { + prep.execute(); + } + conn.commit(); + } + stat.execute("shutdown immediately"); + } catch (Exception ignore) {/**/} + + String file = getBaseDir() + "/" + getTestName() + Constants.SUFFIX_MV_FILE; + assertTrue(new File(file).exists()); + try (MVStore store = MVStore.open(file)) { + TransactionStore t = new TransactionStore(store); + t.init(); + int openTransactions = t.getOpenTransactions().size(); + if (openTransactions != 0) { + fail("transaction log was not empty"); } - conn.commit(); - } - stat.execute("shutdown immediately"); - JdbcUtils.closeSilently(conn); - - String file = getBaseDir() + "/" + getTestName() + - Constants.SUFFIX_MV_FILE; - - MVStore store = MVStore.open(file); - TransactionStore t = new TransactionStore(store); - t.init(); - int openTransactions = t.getOpenTransactions().size(); - store.close(); - if (openTransactions != 0) { - fail("transaction log was not empty"); } } @@ -682,7 +653,8 @@ private void testShrinkDatabaseFile() throws Exception { return; } deleteDb(getTestName()); - String dbName = getTestName() + ";MV_STORE=TRUE"; + // set WRITE_DELAY=0 so the free-unused-space runs on commit + String dbName = getTestName() + ";MV_STORE=TRUE;WRITE_DELAY=0"; Connection conn; Statement stat; long maxSize = 0; @@ -699,8 +671,8 @@ private void testShrinkDatabaseFile() throws Exception { retentionTime = 0; } ResultSet rs = stat.executeQuery( - "select value from information_schema.settings " + - "where name='RETENTION_TIME'"); + "select setting_value from information_schema.settings " + + "where setting_name='RETENTION_TIME'"); assertTrue(rs.next()); assertEquals(retentionTime, rs.getInt(1)); stat.execute("create table test(id int primary key, data varchar)"); @@ -727,7 +699,7 @@ private void testShrinkDatabaseFile() throws Exception { + Constants.SUFFIX_MV_FILE; long size = FileUtils.size(fileName); if (i < 10) { - maxSize = (int) (Math.max(size, maxSize) * 1.2); + maxSize = (int) Math.max(size * 1.2, maxSize); } else if (size > maxSize) { fail(i + " size: " + size + " max: " + maxSize); } @@ -943,15 +915,8 @@ private void testReferentialIntegrity() throws Exception { stat.execute("create table child(pid int)"); stat.execute("insert into parent values(1)"); stat.execute("insert into child values(2)"); - try { - stat.execute("alter table child add constraint cp " + - "foreign key(pid) references parent(id)"); - fail(); - } catch (SQLException e) { - assertEquals( - ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, - e.getErrorCode()); - } + assertThrows(ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, stat).execute( + "alter table child add constraint cp foreign key(pid) references parent(id)"); stat.execute("update child set pid=1"); stat.execute("drop table child, parent"); @@ -959,15 +924,8 @@ private void testReferentialIntegrity() throws Exception { stat.execute("create table child(pid int)"); stat.execute("insert into parent values(1)"); stat.execute("insert into child values(2)"); - try { - stat.execute("alter table child add constraint cp " + - "foreign key(pid) references parent(id)"); - fail(); - } catch (SQLException e) { - assertEquals( - ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, - e.getErrorCode()); - } + assertThrows(ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, stat).execute( + "alter table child add constraint cp foreign key(pid) references parent(id)"); stat.execute("drop table child, parent"); stat.execute("create table test(id identity, parent bigint, " + @@ -1135,15 +1093,13 @@ private void testReadOnly() throws Exception { private void testReuseDiskSpace() throws Exception { deleteDb(getTestName()); - String dbName = getTestName() + ";MV_STORE=TRUE"; + // set WRITE_DELAY=0 so the free-unused-space runs on commit + String dbName = getTestName() + ";MV_STORE=TRUE;WRITE_DELAY=0;RETENTION_TIME=0"; Connection conn; Statement stat; long maxSize = 0; for (int i = 0; i < 20; i++) { conn = getConnection(dbName); - Database db = (Database) ((JdbcConnection) conn). - getSession().getDataHandler(); - db.getStore().getMvStore().setRetentionTime(0); stat = conn.createStatement(); stat.execute("create table test(id int primary key, data varchar)"); stat.execute("insert into test select x, space(1000) " + @@ -1152,8 +1108,9 @@ private void testReuseDiskSpace() throws Exception { conn.close(); long size = FileUtils.size(getBaseDir() + "/" + getTestName() + Constants.SUFFIX_MV_FILE); +// trace("Pass #" + i + ": size=" + size); if (i < 10) { - maxSize = (int) (Math.max(size, maxSize) * 1.1); + maxSize = (int) (Math.max(size * 1.1, maxSize)); } else if (size > maxSize) { fail(i + " size: " + size + " max: " + maxSize); } @@ -1173,30 +1130,30 @@ private void testDataTypes() throws Exception { "by tinyint," + "sm smallint," + "bi bigint," + - "de decimal," + + "de decimal(5, 2)," + "re real,"+ "do double," + "ti time," + "da date," + "ts timestamp," + - "bin binary," + + "bin varbinary," + "uu uuid," + "bl blob," + "cl clob)"); stat.execute("insert into test values(1000, '', '', null, 0, 0, 0, " + "9, 2, 3, '10:00:00', '2001-01-01', " - + "'2010-10-10 10:10:10', x'00', 0, x'b1', 'clob')"); + + "'2010-10-10 10:10:10', x'00', '01234567-89AB-CDEF-0123-456789ABCDEF', x'b1', 'clob')"); stat.execute("insert into test values(1, 'vc', 'ch', true, 8, 16, 64, " + "123.00, 64.0, 32.0, '10:00:00', '2001-01-01', " - + "'2010-10-10 10:10:10', x'00', 0, x'b1', 'clob')"); + + "'2010-10-10 10:10:10', x'00', '01234567-89AB-CDEF-0123-456789ABCDEF', x'b1', 'clob')"); stat.execute("insert into test values(-1, " + "'quite a long string \u1234 \u00ff', 'ch', false, -8, -16, -64, " + "0, 0, 0, '10:00:00', '2001-01-01', " - + "'2010-10-10 10:10:10', SECURE_RAND(100), 0, x'b1', 'clob')"); + + "'2010-10-10 10:10:10', SECURE_RAND(100), RANDOM_UUID(), x'b1', 'clob')"); stat.execute("insert into test values(-1000, space(1000), 'ch', " + "false, -8, -16, -64, " + "1, 1, 1, '10:00:00', '2001-01-01', " - + "'2010-10-10 10:10:10', SECURE_RAND(100), 0, x'b1', 'clob')"); + + "'2010-10-10 10:10:10', SECURE_RAND(100), RANDOM_UUID(), x'b1', 'clob')"); if (!config.memory) { conn.close(); conn = getConnection(dbName); @@ -1207,26 +1164,25 @@ private void testDataTypes() throws Exception { rs.next(); assertEquals(1000, rs.getInt(1)); assertEquals("", rs.getString(2)); - assertEquals("", rs.getString(3)); + assertEquals(" ", rs.getString(3)); assertFalse(rs.getBoolean(4)); assertEquals(0, rs.getByte(5)); assertEquals(0, rs.getShort(6)); assertEquals(0, rs.getLong(7)); - assertEquals("9", rs.getBigDecimal(8).toString()); + assertEquals("9.00", rs.getBigDecimal(8).toString()); assertEquals(2d, rs.getDouble(9)); assertEquals(3d, rs.getFloat(10)); assertEquals("10:00:00", rs.getString(11)); assertEquals("2001-01-01", rs.getString(12)); assertEquals("2010-10-10 10:10:10", rs.getString(13)); assertEquals(1, rs.getBytes(14).length); - assertEquals("00000000-0000-0000-0000-000000000000", - rs.getString(15)); + assertEquals(UUID.fromString("01234567-89AB-CDEF-0123-456789ABCDEF"), rs.getObject(15)); assertEquals(1, rs.getBytes(16).length); assertEquals("clob", rs.getString(17)); rs.next(); assertEquals(1, rs.getInt(1)); assertEquals("vc", rs.getString(2)); - assertEquals("ch", rs.getString(3)); + assertEquals("ch ", rs.getString(3)); assertTrue(rs.getBoolean(4)); assertEquals(8, rs.getByte(5)); assertEquals(16, rs.getShort(6)); @@ -1238,69 +1194,68 @@ private void testDataTypes() throws Exception { assertEquals("2001-01-01", rs.getString(12)); assertEquals("2010-10-10 10:10:10", rs.getString(13)); assertEquals(1, rs.getBytes(14).length); - assertEquals("00000000-0000-0000-0000-000000000000", - rs.getString(15)); + assertEquals(UUID.fromString("01234567-89AB-CDEF-0123-456789ABCDEF"), rs.getObject(15)); assertEquals(1, rs.getBytes(16).length); assertEquals("clob", rs.getString(17)); rs.next(); assertEquals(-1, rs.getInt(1)); assertEquals("quite a long string \u1234 \u00ff", rs.getString(2)); - assertEquals("ch", rs.getString(3)); + assertEquals("ch ", rs.getString(3)); assertFalse(rs.getBoolean(4)); assertEquals(-8, rs.getByte(5)); assertEquals(-16, rs.getShort(6)); assertEquals(-64, rs.getLong(7)); - assertEquals("0", rs.getBigDecimal(8).toString()); + assertEquals("0.00", rs.getBigDecimal(8).toString()); assertEquals(0.0d, rs.getDouble(9)); assertEquals(0.0d, rs.getFloat(10)); assertEquals("10:00:00", rs.getString(11)); assertEquals("2001-01-01", rs.getString(12)); assertEquals("2010-10-10 10:10:10", rs.getString(13)); assertEquals(100, rs.getBytes(14).length); - assertEquals("00000000-0000-0000-0000-000000000000", - rs.getString(15)); + assertEquals(2, rs.getObject(15, UUID.class).variant()); assertEquals(1, rs.getBytes(16).length); assertEquals("clob", rs.getString(17)); rs.next(); assertEquals(-1000, rs.getInt(1)); assertEquals(1000, rs.getString(2).length()); - assertEquals("ch", rs.getString(3)); + assertEquals("ch ", rs.getString(3)); assertFalse(rs.getBoolean(4)); assertEquals(-8, rs.getByte(5)); assertEquals(-16, rs.getShort(6)); assertEquals(-64, rs.getLong(7)); - assertEquals("1", rs.getBigDecimal(8).toString()); + assertEquals("1.00", rs.getBigDecimal(8).toString()); assertEquals(1.0d, rs.getDouble(9)); assertEquals(1.0d, rs.getFloat(10)); assertEquals("10:00:00", rs.getString(11)); assertEquals("2001-01-01", rs.getString(12)); assertEquals("2010-10-10 10:10:10", rs.getString(13)); assertEquals(100, rs.getBytes(14).length); - assertEquals("00000000-0000-0000-0000-000000000000", - rs.getString(15)); + assertEquals(2, rs.getObject(15, UUID.class).variant()); assertEquals(1, rs.getBytes(16).length); assertEquals("clob", rs.getString(17)); stat.execute("drop table test"); stat.execute("create table test(id int, obj object, " + - "rs result_set, arr array, ig varchar_ignorecase)"); + "rs row(a int), arr1 int array, arr2 numeric(1000) array, ig varchar_ignorecase)"); PreparedStatement prep = conn.prepareStatement( - "insert into test values(?, ?, ?, ?, ?)"); + "insert into test values(?, ?, ?, ?, ?, ?)"); prep.setInt(1, 1); prep.setObject(2, new java.lang.AssertionError()); prep.setObject(3, stat.executeQuery("select 1 from dual")); prep.setObject(4, new Object[]{1, 2}); - prep.setObject(5, "test"); + prep.setObject(5, new Object[0]); + prep.setObject(6, "test"); prep.execute(); prep.setInt(1, 1); prep.setObject(2, new java.lang.AssertionError()); prep.setObject(3, stat.executeQuery("select 1 from dual")); - prep.setObject(4, new Object[]{ + prep.setObject(4, new Object[0]); + prep.setObject(5, new Object[]{ new BigDecimal(new String( new char[1000]).replace((char) 0, '1'))}); - prep.setObject(5, "test"); + prep.setObject(6, "test"); prep.execute(); if (!config.memory) { conn.close(); @@ -1358,12 +1313,7 @@ private void testSimple() throws Exception { assertEquals("Hello", rs.getString(2)); assertFalse(rs.next()); - try { - stat.execute("insert into test(id, name) values(10, 'Hello')"); - fail(); - } catch (SQLException e) { - assertEquals(e.toString(), ErrorCode.DUPLICATE_KEY_1, e.getErrorCode()); - } + assertThrows(ErrorCode.DUPLICATE_KEY_1, stat).execute("insert into test(id, name) values(10, 'Hello')"); rs = stat.executeQuery("select min(id), max(id), " + "min(name), max(name) from test"); @@ -1411,12 +1361,7 @@ private void testSimple() throws Exception { rs = stat.executeQuery("select count(*) from test"); rs.next(); assertEquals(3000, rs.getInt(1)); - try { - stat.execute("insert into test(id) values(1)"); - fail(); - } catch (SQLException e) { - assertEquals(ErrorCode.DUPLICATE_KEY_1, e.getErrorCode()); - } + assertThrows(ErrorCode.DUPLICATE_KEY_1, stat).execute("insert into test(id) values(1)"); stat.execute("delete from test"); stat.execute("insert into test(id, name) values(-1, 'Hello')"); rs = stat.executeQuery("select count(*) from test where id = -1"); diff --git a/h2/src/test/org/h2/test/store/TestObjectDataType.java b/h2/src/test/org/h2/test/store/TestObjectDataType.java index 8ca34abad8..8b4cc3adf1 100644 --- a/h2/src/test/org/h2/test/store/TestObjectDataType.java +++ b/h2/src/test/org/h2/test/store/TestObjectDataType.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -28,7 +28,7 @@ public class TestObjectDataType extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -106,7 +106,6 @@ private void testCommonValues() { if (last != null) { int comp = ot.compare(x, last); if (comp <= 0) { - ot.compare(x, last); fail(x.getClass().getSimpleName() + ": " + x.toString() + " " + comp); } diff --git a/h2/src/test/org/h2/test/store/TestRandomMapOps.java b/h2/src/test/org/h2/test/store/TestRandomMapOps.java index e1d859cb40..b3f75b45a9 100644 --- a/h2/src/test/org/h2/test/store/TestRandomMapOps.java +++ b/h2/src/test/org/h2/test/store/TestRandomMapOps.java @@ -1,18 +1,23 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; import java.text.MessageFormat; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; +import java.util.Map; +import java.util.Objects; import java.util.Random; import java.util.TreeMap; +import org.h2.mvstore.Cursor; import org.h2.mvstore.MVMap; import org.h2.mvstore.MVStore; import org.h2.store.fs.FileUtils; +import org.h2.test.TestAll; import org.h2.test.TestBase; /** @@ -21,8 +26,10 @@ public class TestRandomMapOps extends TestBase { private static final boolean LOG = false; + private final Random r = new Random(); private int op; + /** * Run just this test. * @@ -30,56 +37,65 @@ public class TestRandomMapOps extends TestBase { */ public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); - test.config.big = true; - test.test(); + TestAll config = test.config; + config.big = true; +// config.memory = true; + + test.println(config.toString()); + for (int i = 0; i < 10; i++) { + test.testFromMain(); + test.println("Done pass #" + i); + } } @Override public void test() throws Exception { - testMap("memFS:randomOps.h3"); - FileUtils.delete("memFS:randomOps.h3"); + if (config.memory) { + testMap(null); + } else { + String fileName = "memFS:" + getTestName(); + testMap(fileName); + } } private void testMap(String fileName) { - int best = Integer.MAX_VALUE; - int bestSeed = 0; - Throwable failException = null; - int size = getSize(100, 1000); - for (int seed = 0; seed < 100; seed++) { - FileUtils.delete(fileName); - Throwable ex = null; + int size = getSize(500, 3000); + long seed = 0; +// seed = System.currentTimeMillis(); +// seed = -3407210256209708616L; + for (int cnt = 0; cnt < 100; cnt++) { try { testOps(fileName, size, seed); - continue; - } catch (Exception | AssertionError e) { - ex = e; - } - if (op < best) { - trace(seed); - bestSeed = seed; - best = op; - size = best; - failException = ex; - // System.out.println("seed:" + seed + " op:" + op + " " + ex); + } catch (Exception | AssertionError ex) { + println("seed:" + seed + " op:" + op + " " + ex); + throw ex; + } finally { + if (fileName != null) { + FileUtils.delete(fileName); + } } - } - if (failException != null) { - throw (AssertionError) new AssertionError("seed = " + bestSeed - + " op = " + best).initCause(failException); + seed = r.nextLong(); } } - private void testOps(String fileName, int size, int seed) { - FileUtils.delete(fileName); - MVStore s = openStore(fileName); - MVMap m = s.openMap("data"); - Random r = new Random(seed); + private void testOps(String fileName, int loopCount, long seed) { + r.setSeed(seed); op = 0; - TreeMap map = new TreeMap<>(); - for (; op < size; op++) { - int k = r.nextInt(100); - byte[] v = new byte[r.nextInt(10) * 10]; - int type = r.nextInt(12); + MVStore s = openStore(fileName); + int keysPerPage = s.getKeysPerPage(); + int keyRange = 2000; + MVMap m = s.openMap("data"); + TreeMap map = new TreeMap<>(); + int[] recentKeys = new int[2 * keysPerPage]; + for (; op < loopCount; op++) { + int k = r.nextInt(3 * keyRange / 2); + if (k >= keyRange) { + k = recentKeys[k % recentKeys.length]; + } else { + recentKeys[op % recentKeys.length] = k; + } + String v = k + "_Value_" + op; + int type = r.nextInt(15); switch (type) { case 0: case 1: @@ -100,23 +116,27 @@ private void testOps(String fileName, int size, int seed) { s.compact(90, 1024); break; case 7: - log(op, k, v, "m.clear()"); - m.clear(); - map.clear(); + if (op % 64 == 0) { + log(op, k, v, "m.clear()"); + m.clear(); + map.clear(); + } break; case 8: log(op, k, v, "s.commit()"); s.commit(); break; case 9: - log(op, k, v, "s.commit()"); - s.commit(); - log(op, k, v, "s.close()"); - s.close(); - log(op, k, v, "s = openStore(fileName)"); - s = openStore(fileName); - log(op, k, v, "m = s.openMap(\"data\")"); - m = s.openMap("data"); + if (fileName != null) { + log(op, k, v, "s.commit()"); + s.commit(); + log(op, k, v, "s.close()"); + s.close(); + log(op, k, v, "s = openStore(fileName)"); + s = openStore(fileName); + log(op, k, v, "m = s.openMap(\"data\")"); + m = s.openMap("data"); + } break; case 10: log(op, k, v, "s.commit()"); @@ -124,7 +144,30 @@ private void testOps(String fileName, int size, int seed) { log(op, k, v, "s.compactMoveChunks()"); s.compactMoveChunks(); break; - case 11: + case 11: { + int rangeSize = r.nextInt(2 * keysPerPage); + int step = r.nextBoolean() ? 1 : -1; + for (int i = 0; i < rangeSize; i++) { + log(op, k, v, "m.put({0}, {1})"); + m.put(k, v); + map.put(k, v); + k += step; + v = k + "_Value_" + op; + } + break; + } + case 12: { + int rangeSize = r.nextInt(2 * keysPerPage); + int step = r.nextBoolean() ? 1 : -1; + for (int i = 0; i < rangeSize; i++) { + log(op, k, v, "m.remove({0})"); + m.remove(k); + map.remove(k); + k += step; + } + break; + } + default: log(op, k, v, "m.getKeyIndex({0})"); ArrayList keyList = new ArrayList<>(map.keySet()); int index = Collections.binarySearch(keyList, k, null); @@ -136,7 +179,7 @@ private void testOps(String fileName, int size, int seed) { } break; } - assertEqualsMapValues(map.get(k), m.get(k)); + assertEquals(map.get(k), m.get(k)); assertEquals(map.ceilingKey(k), m.ceilingKey(k)); assertEquals(map.floorKey(k), m.floorKey(k)); assertEquals(map.higherKey(k), m.higherKey(k)); @@ -147,27 +190,83 @@ private void testOps(String fileName, int size, int seed) { assertEquals(map.firstKey(), m.firstKey()); assertEquals(map.lastKey(), m.lastKey()); } + + int from = r.nextBoolean() ? r.nextInt(keyRange) : k + r.nextInt(2 * keysPerPage) - keysPerPage; + int to = r.nextBoolean() ? r.nextInt(keyRange) : from + r.nextInt(2 * keysPerPage) - keysPerPage; + + Cursor cursor; + Collection> entrySet; + String msg; + if (from <= to) { + msg = "(" + from + ", null)"; + cursor = m.cursor(from, null, false); + entrySet = map.tailMap(from).entrySet(); + assertEquals(msg, entrySet, cursor); + + msg = "(null, " + from + ")"; + cursor = m.cursor(null, from, false); + entrySet = map.headMap(from + 1).entrySet(); + assertEquals(msg, entrySet, cursor); + + msg = "(" + from + ", " + to + ")"; + cursor = m.cursor(from, to, false); + entrySet = map.subMap(from, to + 1).entrySet(); + assertEquals(msg, entrySet, cursor); + } + + if (from >= to) { + msg = "rev (" + from + ", null)"; + cursor = m.cursor(from, null, true); + entrySet = reverse(map.headMap(from + 1).entrySet()); + assertEquals(msg, entrySet, cursor); + + msg = "rev (null, "+from+")"; + cursor = m.cursor(null, from, true); + entrySet = reverse(map.tailMap(from).entrySet()); + assertEquals(msg, entrySet, cursor); + + msg = "rev (" + from + ", " + to + ")"; + cursor = m.cursor(from, to, true); + entrySet = reverse(map.subMap(to, from + 1).entrySet()); + assertEquals(msg, entrySet, cursor); + } } s.close(); } - private static MVStore openStore(String fileName) { - MVStore s = new MVStore.Builder().fileName(fileName). - pageSplitSize(50).autoCommitDisabled().open(); - s.setRetentionTime(1000); - return s; + private static Collection> reverse(Collection> entrySet) { + ArrayList> list = new ArrayList<>(entrySet); + Collections.reverse(list); + entrySet = list; + return entrySet; } - private void assertEqualsMapValues(byte[] x, byte[] y) { - if (x == null || y == null) { - if (x != y) { - assertTrue(x == y); - } - } else { - assertEquals(x.length, y.length); + private void assertEquals(String msg, Iterable> entrySet, Cursor cursor) { + int cnt = 0; + for (Map.Entry entry : entrySet) { + String message = msg + " " + cnt; + assertTrue(message, cursor.hasNext()); + assertEquals(message, entry.getKey(), cursor.next()); + assertEquals(message, entry.getKey(), cursor.getKey()); + assertEquals(message, entry.getValue(), cursor.getValue()); + ++cnt; + } + assertFalse(msg, cursor.hasNext()); + } + + public void assertEquals(String message, Object expected, Object actual) { + if (!Objects.equals(expected, actual)) { + fail(message + " expected: " + expected + " actual: " + actual); } } + private static MVStore openStore(String fileName) { + MVStore s = new MVStore.Builder().fileName(fileName) + .keysPerPage(7).autoCommitDisabled().open(); + s.setRetentionTime(1000); + return s; + } + /** * Log the operation * @@ -176,10 +275,9 @@ private void assertEqualsMapValues(byte[] x, byte[] y) { * @param v the value * @param msg the message */ - private static void log(int op, int k, byte[] v, String msg) { + private static void log(int op, int k, String v, String msg) { if (LOG) { - msg = MessageFormat.format(msg, k, - v == null ? null : "new byte[" + v.length + "]"); + msg = MessageFormat.format(msg, k, v); System.out.println(msg + "; // op " + op); } } diff --git a/h2/src/test/org/h2/test/store/TestShardedMap.java b/h2/src/test/org/h2/test/store/TestShardedMap.java index fd04138786..69345601c3 100644 --- a/h2/src/test/org/h2/test/store/TestShardedMap.java +++ b/h2/src/test/org/h2/test/store/TestShardedMap.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -21,7 +21,7 @@ public class TestShardedMap extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/store/TestSpinLock.java b/h2/src/test/org/h2/test/store/TestSpinLock.java index 904c237b0c..693d6ab53e 100644 --- a/h2/src/test/org/h2/test/store/TestSpinLock.java +++ b/h2/src/test/org/h2/test/store/TestSpinLock.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -29,7 +29,7 @@ public class TestSpinLock extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/store/TestStreamStore.java b/h2/src/test/org/h2/test/store/TestStreamStore.java index 5b3f43e7a0..1704fdad71 100644 --- a/h2/src/test/org/h2/test/store/TestStreamStore.java +++ b/h2/src/test/org/h2/test/store/TestStreamStore.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -35,11 +35,12 @@ public class TestStreamStore extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws IOException { + FileUtils.createDirectories(getBaseDir()); testMaxBlockKey(); testIOException(); testSaveCount(); @@ -85,8 +86,7 @@ private void testIOException() throws IOException { } fail(); } catch (IOException e) { - assertEquals(DataUtils.ERROR_BLOCK_NOT_FOUND, - DataUtils.getErrorCode(e.getMessage())); + checkErrorCode(DataUtils.ERROR_BLOCK_NOT_FOUND, e.getCause()); } } @@ -103,9 +103,9 @@ private void testSaveCount() throws IOException { for (int i = 0; i < 8 * 16; i++) { streamStore.put(new RandomStream(blockSize, i)); } - long writeCount = s.getFileStore().getWriteCount(); - assertTrue(writeCount > 2); s.close(); + long writeCount = s.getFileStore().getWriteCount(); + assertTrue(writeCount > 5); } private void testExceptionDuringStore() throws IOException { @@ -114,12 +114,10 @@ private void testExceptionDuringStore() throws IOException { HashMap map = new HashMap<>(); StreamStore s = new StreamStore(map); s.setMaxBlockSize(1024); - assertThrows(IOException.class, s). - put(createFailingStream(new IOException())); + assertThrows(IOException.class, () -> s.put(createFailingStream(new IOException()))); assertEquals(0, map.size()); // the runtime exception is converted to an IOException - assertThrows(IOException.class, s). - put(createFailingStream(new IllegalStateException())); + assertThrows(IOException.class, () -> s.put(createFailingStream(new IllegalStateException()))); assertEquals(0, map.size()); } @@ -231,29 +229,14 @@ public int read(byte[] b, int off, int len) { } - private void testDetectIllegalId() throws IOException { + private void testDetectIllegalId() { Map map = new HashMap<>(); StreamStore store = new StreamStore(map); - try { - store.length(new byte[]{3, 0, 0}); - fail(); - } catch (IllegalArgumentException e) { - // expected - } - try { - store.remove(new byte[]{3, 0, 0}); - fail(); - } catch (IllegalArgumentException e) { - // expected - } + assertThrows(IllegalArgumentException.class, () -> store.length(new byte[]{3, 0, 0})); + assertThrows(IllegalArgumentException.class, () -> store.remove(new byte[]{3, 0, 0})); map.put(0L, new byte[]{3, 0, 0}); InputStream in = store.get(new byte[]{2, 1, 0}); - try { - in.read(); - fail(); - } catch (IllegalArgumentException e) { - // expected - } + assertThrows(IllegalArgumentException.class, () -> in.read()); } private void testTreeStructure() throws IOException { diff --git a/h2/src/test/org/h2/test/store/TestTransactionStore.java b/h2/src/test/org/h2/test/store/TestTransactionStore.java index b19dbc0447..07fee7007d 100644 --- a/h2/src/test/org/h2/test/store/TestTransactionStore.java +++ b/h2/src/test/org/h2/test/store/TestTransactionStore.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -10,21 +10,25 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.util.AbstractMap.SimpleImmutableEntry; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Map.Entry; import java.util.Random; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; - import org.h2.mvstore.DataUtils; -import org.h2.mvstore.MVMap; import org.h2.mvstore.MVStore; +import org.h2.mvstore.MVStoreException; import org.h2.mvstore.tx.Transaction; import org.h2.mvstore.tx.TransactionMap; import org.h2.mvstore.tx.TransactionStore; import org.h2.mvstore.tx.TransactionStore.Change; +import org.h2.mvstore.type.LongDataType; +import org.h2.mvstore.type.MetaType; import org.h2.mvstore.type.ObjectDataType; +import org.h2.mvstore.type.StringDataType; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; import org.h2.util.Task; @@ -40,7 +44,7 @@ public class TestTransactionStore extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -53,7 +57,6 @@ public void test() throws Exception { testConcurrentUpdate(); testRepeatedChange(); testTransactionAge(); - testStopWhileCommitting(); testGetModifiedMaps(); testKeyIterator(); testTwoPhaseCommit(); @@ -62,78 +65,140 @@ public void test() throws Exception { testSingleConnection(); testCompareWithPostgreSQL(); testStoreMultiThreadedReads(); + testCommitAfterMapRemoval(); + testDeadLock(); } private void testHCLFKey() { - MVStore s = MVStore.open(null); - final TransactionStore ts = new TransactionStore(s); - ts.init(); - Transaction t = ts.begin(); - ObjectDataType keyType = new ObjectDataType(); - TransactionMap map = t.openMap("test", keyType, keyType); - // firstKey() - assertNull(map.firstKey()); - // lastKey() - assertNull(map.lastKey()); - map.put(10L, 100L); - map.put(20L, 200L); - map.put(30L, 300L); - map.put(40L, 400L); - t.commit(); - t = ts.begin(); - map = t.openMap("test", keyType, keyType); - map.put(15L, 150L); - // The same transaction - assertEquals((Object) 15L, map.higherKey(10L)); - t = ts.begin(); - map = t.openMap("test", keyType, keyType); - // Another transaction - // higherKey() - assertEquals((Object) 20L, map.higherKey(10L)); - assertEquals((Object) 20L, map.higherKey(15L)); - assertNull(map.higherKey(40L)); - // ceilingKey() - assertEquals((Object) 10L, map.ceilingKey(10L)); - assertEquals((Object) 20L, map.ceilingKey(15L)); - assertEquals((Object) 40L, map.ceilingKey(40L)); - assertNull(map.higherKey(45L)); - // lowerKey() - assertNull(map.lowerKey(10L)); - assertEquals((Object) 10L, map.lowerKey(15L)); - assertEquals((Object) 10L, map.lowerKey(20L)); - assertEquals((Object) 20L, map.lowerKey(25L)); - // floorKey() - assertNull(map.floorKey(5L)); - assertEquals((Object) 10L, map.floorKey(10L)); - assertEquals((Object) 10L, map.floorKey(15L)); - assertEquals((Object) 30L, map.floorKey(35L)); - s.close(); + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); + Transaction t = ts.begin(); + LongDataType keyType = LongDataType.INSTANCE; + TransactionMap map = t.openMap("test", keyType, keyType); + // firstEntry() & firstKey() + assertNull(map.firstEntry()); + assertNull(map.firstKey()); + // lastEntry() & lastKey() + assertNull(map.lastEntry()); + assertNull(map.lastKey()); + map.put(10L, 100L); + map.put(20L, 200L); + map.put(30L, 300L); + map.put(40L, 400L); + t.commit(); + t = ts.begin(); + map = t.openMap("test", keyType, keyType); + map.put(15L, 150L); + // The same transaction + assertEquals(new SimpleImmutableEntry<>(15L, 150L), map.higherEntry(10L)); + assertEquals((Object) 15L, map.higherKey(10L)); + t = ts.begin(); + map = t.openMap("test", keyType, keyType); + // Another transaction + // firstEntry() & firstKey() + assertEquals(new SimpleImmutableEntry<>(10L, 100L), map.firstEntry()); + assertEquals((Object) 10L, map.firstKey()); + // lastEntry() & lastKey() + assertEquals(new SimpleImmutableEntry<>(40L, 400L),map.lastEntry()); + assertEquals((Object) 40L, map.lastKey()); + // higherEntry() & higherKey() + assertEquals(new SimpleImmutableEntry<>(20L, 200L), map.higherEntry(10L)); + assertEquals((Object) 20L, map.higherKey(10L)); + assertEquals(new SimpleImmutableEntry<>(20L, 200L), map.higherEntry(15L)); + assertEquals((Object) 20L, map.higherKey(15L)); + assertNull(map.higherEntry(40L)); + assertNull(map.higherKey(40L)); + // ceilingEntry() & ceilingKey() + assertEquals(new SimpleImmutableEntry<>(10L, 100L), map.ceilingEntry(10L)); + assertEquals((Object) 10L, map.ceilingKey(10L)); + assertEquals(new SimpleImmutableEntry<>(20L, 200L), map.ceilingEntry(15L)); + assertEquals((Object) 20L, map.ceilingKey(15L)); + assertEquals(new SimpleImmutableEntry<>(40L, 400L), map.ceilingEntry(40L)); + assertEquals((Object) 40L, map.ceilingKey(40L)); + assertNull(map.higherEntry(45L)); + assertNull(map.higherKey(45L)); + // lowerEntry() & lowerKey() + assertNull(map.lowerEntry(10L)); + assertNull(map.lowerKey(10L)); + assertEquals(new SimpleImmutableEntry<>(10L, 100L), map.lowerEntry(15L)); + assertEquals((Object) 10L, map.lowerKey(15L)); + assertEquals(new SimpleImmutableEntry<>(10L, 100L), map.lowerEntry(20L)); + assertEquals((Object) 10L, map.lowerKey(20L)); + assertEquals(new SimpleImmutableEntry<>(20L, 200L), map.lowerEntry(25L)); + assertEquals((Object) 20L, map.lowerKey(25L)); + // floorEntry() & floorKey() + assertNull(map.floorEntry(5L)); + assertNull(map.floorKey(5L)); + assertEquals(new SimpleImmutableEntry<>(10L, 100L), map.floorEntry(10L)); + assertEquals((Object) 10L, map.floorKey(10L)); + assertEquals(new SimpleImmutableEntry<>(10L, 100L), map.floorEntry(15L)); + assertEquals((Object) 10L, map.floorKey(15L)); + assertEquals(new SimpleImmutableEntry<>(30L, 300L), map.floorEntry(35L)); + assertEquals((Object) 30L, map.floorKey(35L)); + } } private static void testConcurrentAddRemove() throws InterruptedException { - MVStore s = MVStore.open(null); - int threadCount = 3; - final int keyCount = 2; - final TransactionStore ts = new TransactionStore(s); - ts.init(); + try (MVStore s = MVStore.open(null)) { + int threadCount = 3; + int keyCount = 2; + TransactionStore ts = new TransactionStore(s); + ts.init(); + + final Random r = new Random(1); + + Task[] tasks = new Task[threadCount]; + for (int i = 0; i < threadCount; i++) { + Task task = new Task() { + @Override + public void call() { + while (!stop) { + Transaction tx = ts.begin(); + TransactionMap map = tx.openMap("data"); + int k = r.nextInt(keyCount); + try { + map.remove(k); + map.put(k, r.nextInt()); + } catch (MVStoreException e) { + // ignore and retry + } + tx.commit(); + } + } + }; + task.execute(); + tasks[i] = task; + } + Thread.sleep(1000); + for (Task t : tasks) { + t.get(); + } + } + } + + private void testConcurrentAdd() { + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); - final Random r = new Random(1); + Random r = new Random(1); + + AtomicInteger key = new AtomicInteger(); + AtomicInteger failCount = new AtomicInteger(); - Task[] tasks = new Task[threadCount]; - for (int i = 0; i < threadCount; i++) { Task task = new Task() { @Override - public void call() throws Exception { - TransactionMap map = null; + public void call() { while (!stop) { + int k = key.get(); Transaction tx = ts.begin(); - map = tx.openMap("data"); - int k = r.nextInt(keyCount); + TransactionMap map = tx.openMap("data"); try { - map.remove(k); map.put(k, r.nextInt()); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { + failCount.incrementAndGet(); // ignore and retry } tx.commit(); @@ -142,167 +207,108 @@ public void call() throws Exception { }; task.execute(); - tasks[i] = task; - } - Thread.sleep(1000); - for (Task t : tasks) { - t.get(); - } - s.close(); - } - - private void testConcurrentAdd() { - MVStore s; - s = MVStore.open(null); - final TransactionStore ts = new TransactionStore(s); - ts.init(); - - final Random r = new Random(1); - - final AtomicInteger key = new AtomicInteger(); - final AtomicInteger failCount = new AtomicInteger(); - - Task task = new Task() { - - @Override - public void call() throws Exception { - Transaction tx = null; - TransactionMap map = null; - while (!stop) { - int k = key.get(); - tx = ts.begin(); - map = tx.openMap("data"); - try { - map.put(k, r.nextInt()); - } catch (IllegalStateException e) { - failCount.incrementAndGet(); - // ignore and retry - } - tx.commit(); + int count = 100000; + for (int i = 0; i < count; i++) { + key.set(i); + Transaction tx = ts.begin(); + TransactionMap map = tx.openMap("data"); + try { + map.put(i, r.nextInt()); + } catch (MVStoreException e) { + failCount.incrementAndGet(); + // ignore and retry + } + tx.commit(); + if (failCount.get() > 0 && i > 4000) { + // stop earlier, if possible + count = i; + break; } } - - }; - task.execute(); - Transaction tx = null; - int count = 100000; - TransactionMap map = null; - for (int i = 0; i < count; i++) { - int k = i; - key.set(k); - tx = ts.begin(); - map = tx.openMap("data"); - try { - map.put(k, r.nextInt()); - } catch (IllegalStateException e) { - failCount.incrementAndGet(); - // ignore and retry - } - tx.commit(); - if (failCount.get() > 0 && i > 4000) { - // stop earlier, if possible - count = i; - break; - } + task.get(); + // we expect at least 10% the operations were successful + assertTrue(failCount + " >= " + (count * 0.9), + failCount.get() < count * 0.9); + // we expect at least a few failures + assertTrue(failCount.toString(), failCount.get() > 0); } - task.get(); - // we expect at least 10% the operations were successful - assertTrue(failCount.toString() + " >= " + (count * 0.9), - failCount.get() < count * 0.9); - // we expect at least a few failures - assertTrue(failCount.toString(), failCount.get() > 0); - s.close(); } private void testCountWithOpenTransactions() { - MVStore s; - TransactionStore ts; - s = MVStore.open(null); - ts = new TransactionStore(s); - ts.init(); + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); - Transaction tx1 = ts.begin(); - TransactionMap map1 = tx1.openMap("data"); - int size = 150; - for (int i = 0; i < size; i++) { - map1.put(i, i * 10); - } - tx1.commit(); - tx1 = ts.begin(); - map1 = tx1.openMap("data"); - - Transaction tx2 = ts.begin(); - TransactionMap map2 = tx2.openMap("data"); - - Random r = new Random(1); - for (int i = 0; i < size * 3; i++) { - assertEquals("op: " + i, size, map1.size()); - assertEquals("op: " + i, size, (int) map1.sizeAsLong()); - // keep the first 10%, and add 10% - int k = size / 10 + r.nextInt(size); - if (r.nextBoolean()) { - map2.remove(k); - } else { - map2.put(k, i); + Transaction tx1 = ts.begin(); + TransactionMap map1 = tx1.openMap("data"); + int size = 150; + for (int i = 0; i < size; i++) { + map1.put(i, i * 10); + } + tx1.commit(); + tx1 = ts.begin(); + map1 = tx1.openMap("data"); + + Transaction tx2 = ts.begin(); + TransactionMap map2 = tx2.openMap("data"); + + Random r = new Random(1); + for (int i = 0; i < size * 3; i++) { + assertEquals("op: " + i, size, map1.size()); + assertEquals("op: " + i, size, (int) map1.sizeAsLong()); + // keep the first 10%, and add 10% + int k = size / 10 + r.nextInt(size); + if (r.nextBoolean()) { + map2.remove(k); + } else { + map2.put(k, i); + } } } - s.close(); } private void testConcurrentUpdate() { - MVStore s; - TransactionStore ts; - s = MVStore.open(null); - ts = new TransactionStore(s); - ts.init(); - - Transaction tx1 = ts.begin(); - TransactionMap map1 = tx1.openMap("data"); - map1.put(1, 10); + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); - Transaction tx2 = ts.begin(); - TransactionMap map2 = tx2.openMap("data"); - try { - map2.put(1, 20); - fail(); - } catch (IllegalStateException e) { - assertEquals(DataUtils.ERROR_TRANSACTION_LOCKED, - DataUtils.getErrorCode(e.getMessage())); + Transaction tx1 = ts.begin(); + TransactionMap map1 = tx1.openMap("data"); + map1.put(1, 10); + + Transaction tx2 = ts.begin(); + TransactionMap map2 = tx2.openMap("data"); + assertThrows(DataUtils.ERROR_TRANSACTION_LOCKED, () -> map2.put(1, 20)); + assertEquals(10, map1.get(1).intValue()); + assertNull(map2.get(1)); + tx1.commit(); + assertEquals(10, map2.get(1).intValue()); } - assertEquals(10, map1.get(1).intValue()); - assertNull(map2.get(1)); - tx1.commit(); - assertEquals(10, map2.get(1).intValue()); - - s.close(); } private void testRepeatedChange() { - MVStore s; - TransactionStore ts; - s = MVStore.open(null); - ts = new TransactionStore(s); - ts.init(); - - Transaction tx0 = ts.begin(); - TransactionMap map0 = tx0.openMap("data"); - map0.put(1, -1); - tx0.commit(); + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); - Transaction tx = ts.begin(); - TransactionMap map = tx.openMap("data"); - for (int i = 0; i < 2000; i++) { - map.put(1, i); - } + Transaction tx0 = ts.begin(); + TransactionMap map0 = tx0.openMap("data"); + map0.put(1, -1); + tx0.commit(); - Transaction tx2 = ts.begin(); - TransactionMap map2 = tx2.openMap("data"); - assertEquals(-1, map2.get(1).intValue()); + Transaction tx = ts.begin(); + TransactionMap map = tx.openMap("data"); + for (int i = 0; i < 2000; i++) { + map.put(1, i); + } - s.close(); + Transaction tx2 = ts.begin(); + TransactionMap map2 = tx2.openMap("data"); + assertEquals(-1, map2.get(1).intValue()); + } } - private void testTransactionAge() throws Exception { + private void testTransactionAge() { MVStore s; TransactionStore ts; s = MVStore.open(null); @@ -325,25 +331,19 @@ private void testTransactionAge() throws Exception { } s = MVStore.open(null); - ts = new TransactionStore(s); - ts.init(); - ts.setMaxTransactionId(16); + TransactionStore ts2 = new TransactionStore(s); + ts2.init(); + ts2.setMaxTransactionId(16); ArrayList fifo = new ArrayList<>(); int open = 0; for (int i = 0; i < 64; i++) { - Transaction t = null; if (open >= 16) { - try { - t = ts.begin(); - fail(); - } catch (IllegalStateException e) { - // expected - too many open - } + assertThrows(MVStoreException.class, () -> ts2.begin()); Transaction first = fifo.remove(0); first.commit(); open--; } - t = ts.begin(); + Transaction t = ts2.begin(); t.openMap("data").put(i, i); fifo.add(t); open++; @@ -351,320 +351,222 @@ private void testTransactionAge() throws Exception { s.close(); } - private void testStopWhileCommitting() throws Exception { - String fileName = getBaseDir() + "/testStopWhileCommitting.h3"; - FileUtils.delete(fileName); - Random r = new Random(0); + private void testGetModifiedMaps() { + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); - for (int i = 0; i < 10;) { - MVStore s; - TransactionStore ts; - Transaction tx; - TransactionMap m; + Transaction tx = ts.begin(); + tx.openMap("m1"); + tx.openMap("m2"); + tx.openMap("m3"); + assertFalse(tx.getChanges(0).hasNext()); + tx.commit(); - s = MVStore.open(fileName); - ts = new TransactionStore(s); - ts.init(); tx = ts.begin(); - s.setReuseSpace(false); - m = tx.openMap("test"); - final String value = "x" + i; - for (int j = 0; j < 1000; j++) { - m.put(j, value); - } - final AtomicInteger state = new AtomicInteger(); - final MVStore store = s; - final MVMap other = s.openMap("other"); - Task task = new Task() { + TransactionMap m1 = tx.openMap("m1"); + TransactionMap m2 = tx.openMap("m2"); + TransactionMap m3 = tx.openMap("m3"); + m1.put("1", "100"); + long sp = tx.setSavepoint(); + m2.put("1", "100"); + m3.put("1", "100"); + Iterator it = tx.getChanges(sp); + assertTrue(it.hasNext()); + Change c = it.next(); + assertEquals("m3", c.mapName); + assertEquals("1", c.key.toString()); + assertNull(c.value); + assertTrue(it.hasNext()); + c = it.next(); + assertEquals("m2", c.mapName); + assertEquals("1", c.key.toString()); + assertNull(c.value); + assertFalse(it.hasNext()); + + it = tx.getChanges(0); + assertTrue(it.hasNext()); + c = it.next(); + assertEquals("m3", c.mapName); + assertEquals("1", c.key.toString()); + assertNull(c.value); + assertTrue(it.hasNext()); + c = it.next(); + assertEquals("m2", c.mapName); + assertEquals("1", c.key.toString()); + assertNull(c.value); + assertTrue(it.hasNext()); + c = it.next(); + assertEquals("m1", c.mapName); + assertEquals("1", c.key.toString()); + assertNull(c.value); + assertFalse(it.hasNext()); + + tx.rollbackToSavepoint(sp); + + it = tx.getChanges(0); + assertTrue(it.hasNext()); + c = it.next(); + assertEquals("m1", c.mapName); + assertEquals("1", c.key.toString()); + assertNull(c.value); + assertFalse(it.hasNext()); - @Override - public void call() throws Exception { - for (int i = 0; !stop; i++) { - state.set(i); - other.put(i, value); - store.commit(); - } - } - }; - task.execute(); - // wait for the task to start - while (state.get() < 1) { - Thread.yield(); - } - // commit while writing in the task tx.commit(); - // wait for the task to stop - task.get(); - store.close(); - s = MVStore.open(fileName); - // roll back a bit, until we have some undo log entries - for (int back = 0; back < 100; back++) { - int minus = r.nextInt(10); - s.rollbackTo(Math.max(0, s.getCurrentVersion() - minus)); - if (hasDataUndoLog(s)) { - break; - } - } - // re-open TransactionStore, because we rolled back - // underlying MVStore without rolling back TransactionStore - s.close(); - s = MVStore.open(fileName); - ts = new TransactionStore(s); - List list = ts.getOpenTransactions(); - if (list.size() != 0) { - tx = list.get(0); - if (tx.getStatus() == Transaction.STATUS_COMMITTED) { - i++; - } - } - s.close(); - FileUtils.delete(fileName); - assertFalse(FileUtils.exists(fileName)); - } - } - - private static boolean hasDataUndoLog(MVStore s) { - for (int i = 0; i < 255; i++) { - if (s.hasData(TransactionStore.getUndoLogName(true, 1))) { - return true; - } } - return false; } - private void testGetModifiedMaps() { - MVStore s = MVStore.open(null); - TransactionStore ts = new TransactionStore(s); - ts.init(); - Transaction tx; - TransactionMap m1, m2, m3; - long sp; - - tx = ts.begin(); - m1 = tx.openMap("m1"); - m2 = tx.openMap("m2"); - m3 = tx.openMap("m3"); - assertFalse(tx.getChanges(0).hasNext()); - tx.commit(); - - tx = ts.begin(); - m1 = tx.openMap("m1"); - m2 = tx.openMap("m2"); - m3 = tx.openMap("m3"); - m1.put("1", "100"); - sp = tx.setSavepoint(); - m2.put("1", "100"); - m3.put("1", "100"); - Iterator it = tx.getChanges(sp); - assertTrue(it.hasNext()); - Change c; - c = it.next(); - assertEquals("m3", c.mapName); - assertEquals("1", c.key.toString()); - assertNull(c.value); - assertTrue(it.hasNext()); - c = it.next(); - assertEquals("m2", c.mapName); - assertEquals("1", c.key.toString()); - assertNull(c.value); - assertFalse(it.hasNext()); - - it = tx.getChanges(0); - assertTrue(it.hasNext()); - c = it.next(); - assertEquals("m3", c.mapName); - assertEquals("1", c.key.toString()); - assertNull(c.value); - assertTrue(it.hasNext()); - c = it.next(); - assertEquals("m2", c.mapName); - assertEquals("1", c.key.toString()); - assertNull(c.value); - assertTrue(it.hasNext()); - c = it.next(); - assertEquals("m1", c.mapName); - assertEquals("1", c.key.toString()); - assertNull(c.value); - assertFalse(it.hasNext()); - - tx.rollbackToSavepoint(sp); - - it = tx.getChanges(0); - assertTrue(it.hasNext()); - c = it.next(); - assertEquals("m1", c.mapName); - assertEquals("1", c.key.toString()); - assertNull(c.value); - assertFalse(it.hasNext()); - - tx.commit(); + private void testKeyIterator() { + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); - s.close(); - } + Transaction tx = ts.begin(); + TransactionMap m = tx.openMap("test"); + m.put("1", "Hello"); + m.put("2", "World"); + m.put("3", "."); + tx.commit(); - private void testKeyIterator() { - MVStore s = MVStore.open(null); - TransactionStore ts = new TransactionStore(s); - ts.init(); - Transaction tx, tx2; - TransactionMap m, m2; - Iterator it, it2; - Iterator> entryIt; - - tx = ts.begin(); - m = tx.openMap("test"); - m.put("1", "Hello"); - m.put("2", "World"); - m.put("3", "."); - tx.commit(); - - tx2 = ts.begin(); - m2 = tx2.openMap("test"); - m2.remove("2"); - m2.put("3", "!"); - m2.put("4", "?"); - - tx = ts.begin(); - m = tx.openMap("test"); - it = m.keyIterator(null); - assertTrue(it.hasNext()); - assertEquals("1", it.next()); - assertTrue(it.hasNext()); - assertEquals("2", it.next()); - assertTrue(it.hasNext()); - assertEquals("3", it.next()); - assertFalse(it.hasNext()); - - entryIt = m.entrySet().iterator(); - assertTrue(entryIt.hasNext()); - assertEquals("1", entryIt.next().getKey()); - assertTrue(entryIt.hasNext()); - assertEquals("2", entryIt.next().getKey()); - assertTrue(entryIt.hasNext()); - assertEquals("3", entryIt.next().getKey()); - assertFalse(entryIt.hasNext()); - - it2 = m2.keyIterator(null); - assertTrue(it2.hasNext()); - assertEquals("1", it2.next()); - assertTrue(it2.hasNext()); - assertEquals("3", it2.next()); - assertTrue(it2.hasNext()); - assertEquals("4", it2.next()); - assertFalse(it2.hasNext()); + Transaction tx2 = ts.begin(); + TransactionMap m2 = tx2.openMap("test"); + m2.remove("2"); + m2.put("3", "!"); + m2.put("4", "?"); - s.close(); + tx = ts.begin(); + m = tx.openMap("test"); + Iterator it = m.keyIterator(null); + assertTrue(it.hasNext()); + assertEquals("1", it.next()); + assertTrue(it.hasNext()); + assertEquals("2", it.next()); + assertTrue(it.hasNext()); + assertEquals("3", it.next()); + assertFalse(it.hasNext()); + + Iterator> entryIt = m.entrySet().iterator(); + assertTrue(entryIt.hasNext()); + assertEquals("1", entryIt.next().getKey()); + assertTrue(entryIt.hasNext()); + assertEquals("2", entryIt.next().getKey()); + assertTrue(entryIt.hasNext()); + assertEquals("3", entryIt.next().getKey()); + assertFalse(entryIt.hasNext()); + + Iterator it2 = m2.keyIterator(null); + assertTrue(it2.hasNext()); + assertEquals("1", it2.next()); + assertTrue(it2.hasNext()); + assertEquals("3", it2.next()); + assertTrue(it2.hasNext()); + assertEquals("4", it2.next()); + assertFalse(it2.hasNext()); + } } private void testTwoPhaseCommit() { String fileName = getBaseDir() + "/testTwoPhaseCommit.h3"; FileUtils.delete(fileName); - MVStore s; - TransactionStore ts; - Transaction tx; - Transaction txOld; TransactionMap m; - List list; - s = MVStore.open(fileName); - ts = new TransactionStore(s); - ts.init(); - tx = ts.begin(); - assertEquals(null, tx.getName()); - tx.setName("first transaction"); - assertEquals("first transaction", tx.getName()); - assertEquals(1, tx.getId()); - assertEquals(Transaction.STATUS_OPEN, tx.getStatus()); - m = tx.openMap("test"); - m.put("1", "Hello"); - list = ts.getOpenTransactions(); - assertEquals(1, list.size()); - txOld = list.get(0); - assertTrue(tx.getId() == txOld.getId()); - assertEquals("first transaction", txOld.getName()); - s.commit(); - ts.close(); - s.close(); + try (MVStore s = MVStore.open(fileName)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); + Transaction tx = ts.begin(); + assertEquals(null, tx.getName()); + tx.setName("first transaction"); + assertEquals("first transaction", tx.getName()); + assertEquals(1, tx.getId()); + assertEquals(Transaction.STATUS_OPEN, tx.getStatus()); + m = tx.openMap("test"); + m.put("1", "Hello"); + List list = ts.getOpenTransactions(); + assertEquals(1, list.size()); + Transaction txOld = list.get(0); + assertTrue(tx.getId() == txOld.getId()); + assertEquals("first transaction", txOld.getName()); + s.commit(); + ts.close(); + } - s = MVStore.open(fileName); - ts = new TransactionStore(s); - ts.init(); - tx = ts.begin(); - assertEquals(2, tx.getId()); - m = tx.openMap("test"); - assertEquals(null, m.get("1")); - m.put("2", "Hello"); - list = ts.getOpenTransactions(); - assertEquals(2, list.size()); - txOld = list.get(0); - assertEquals(1, txOld.getId()); - assertEquals(Transaction.STATUS_OPEN, txOld.getStatus()); - assertEquals("first transaction", txOld.getName()); - txOld.prepare(); - assertEquals(Transaction.STATUS_PREPARED, txOld.getStatus()); - txOld = list.get(1); - txOld.commit(); - s.commit(); - s.close(); + try (MVStore s = MVStore.open(fileName)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); + Transaction tx = ts.begin(); + assertEquals(2, tx.getId()); + m = tx.openMap("test"); + assertEquals(null, m.get("1")); + m.put("2", "Hello"); + List list = ts.getOpenTransactions(); + assertEquals(2, list.size()); + Transaction txOld = list.get(0); + assertEquals(1, txOld.getId()); + assertEquals(Transaction.STATUS_OPEN, txOld.getStatus()); + assertEquals("first transaction", txOld.getName()); + txOld.prepare(); + assertEquals(Transaction.STATUS_PREPARED, txOld.getStatus()); + txOld = list.get(1); + txOld.commit(); + s.commit(); + } - s = MVStore.open(fileName); - ts = new TransactionStore(s); - ts.init(); - tx = ts.begin(); - m = tx.openMap("test"); - m.put("3", "Test"); - assertEquals(2, tx.getId()); - list = ts.getOpenTransactions(); - assertEquals(2, list.size()); - txOld = list.get(1); - assertEquals(2, txOld.getId()); - assertEquals(Transaction.STATUS_OPEN, txOld.getStatus()); - assertEquals(null, txOld.getName()); - txOld.rollback(); - txOld = list.get(0); - assertEquals(1, txOld.getId()); - assertEquals(Transaction.STATUS_PREPARED, txOld.getStatus()); - assertEquals("first transaction", txOld.getName()); - txOld.commit(); - assertEquals("Hello", m.get("1")); - s.close(); + try (MVStore s = MVStore.open(fileName)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); + Transaction tx = ts.begin(); + m = tx.openMap("test"); + m.put("3", "Test"); + assertEquals(2, tx.getId()); + List list = ts.getOpenTransactions(); + assertEquals(2, list.size()); + Transaction txOld = list.get(1); + assertEquals(2, txOld.getId()); + assertEquals(Transaction.STATUS_OPEN, txOld.getStatus()); + assertEquals(null, txOld.getName()); + txOld.rollback(); + txOld = list.get(0); + assertEquals(1, txOld.getId()); + assertEquals(Transaction.STATUS_PREPARED, txOld.getStatus()); + assertEquals("first transaction", txOld.getName()); + txOld.commit(); + assertEquals("Hello", m.get("1")); + } FileUtils.delete(fileName); } private void testSavepoint() { - MVStore s = MVStore.open(null); - TransactionStore ts = new TransactionStore(s); - ts.init(); - Transaction tx; - TransactionMap m; + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); - tx = ts.begin(); - m = tx.openMap("test"); - m.put("1", "Hello"); - m.put("2", "World"); - m.put("1", "Hallo"); - m.remove("2"); - m.put("3", "!"); - long logId = tx.setSavepoint(); - m.put("1", "Hi"); - m.put("2", "."); - m.remove("3"); - tx.rollbackToSavepoint(logId); - assertEquals("Hallo", m.get("1")); - assertNull(m.get("2")); - assertEquals("!", m.get("3")); - tx.rollback(); - - tx = ts.begin(); - m = tx.openMap("test"); - assertNull(m.get("1")); - assertNull(m.get("2")); - assertNull(m.get("3")); - - ts.close(); - s.close(); + Transaction tx = ts.begin(); + TransactionMap m = tx.openMap("test"); + m.put("1", "Hello"); + m.put("2", "World"); + m.put("1", "Hallo"); + m.remove("2"); + m.put("3", "!"); + long logId = tx.setSavepoint(); + m.put("1", "Hi"); + m.put("2", "."); + m.remove("3"); + tx.rollbackToSavepoint(logId); + assertEquals("Hallo", m.get("1")); + assertNull(m.get("2")); + assertEquals("!", m.get("3")); + tx.rollback(); + + tx = ts.begin(); + m = tx.openMap("test"); + assertNull(m.get("1")); + assertNull(m.get("2")); + assertNull(m.get("3")); + + ts.close(); + } } private void testCompareWithPostgreSQL() throws Exception { @@ -688,334 +590,420 @@ private void testCompareWithPostgreSQL() throws Exception { statements.get(0).execute( "create table test(id int primary key, name varchar(255))"); - MVStore s = MVStore.open(null); - TransactionStore ts = new TransactionStore(s); - ts.init(); - for (int i = 0; i < connectionCount; i++) { - Statement stat = statements.get(i); - // 100 ms to avoid blocking (the test is single threaded) - stat.execute("set statement_timeout to 100"); - Connection c = stat.getConnection(); - c.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); - c.setAutoCommit(false); - Transaction transaction = ts.begin(); - transactions.add(transaction); - TransactionMap map; - map = transaction.openMap("test"); - maps.add(map); - } - StringBuilder buff = new StringBuilder(); + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); + for (int i = 0; i < connectionCount; i++) { + Statement stat = statements.get(i); + // 100 ms to avoid blocking (the test is single threaded) + stat.execute("set statement_timeout to 100"); + Connection c = stat.getConnection(); + c.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); + c.setAutoCommit(false); + Transaction transaction = ts.begin(); + transactions.add(transaction); + TransactionMap map; + map = transaction.openMap("test"); + maps.add(map); + } + StringBuilder buff = new StringBuilder(); - Random r = new Random(1); - try { - for (int i = 0; i < opCount; i++) { - int connIndex = r.nextInt(connectionCount); - Statement stat = statements.get(connIndex); - Transaction transaction = transactions.get(connIndex); - TransactionMap map = maps.get(connIndex); - if (transaction == null) { - transaction = ts.begin(); - map = transaction.openMap("test"); - transactions.set(connIndex, transaction); - maps.set(connIndex, map); - - // read all data, to get a snapshot - ResultSet rs = stat.executeQuery( - "select * from test order by id"); - buff.append(i).append(": [" + connIndex + "]="); - int size = 0; - while (rs.next()) { - buff.append(' '); - int k = rs.getInt(1); - String v = rs.getString(2); - buff.append(k).append(':').append(v); - assertEquals(v, map.get(k)); - size++; - } - buff.append('\n'); - if (size != map.sizeAsLong()) { - assertEquals(size, map.sizeAsLong()); - } - } - int x = r.nextInt(rowCount); - int y = r.nextInt(rowCount); - buff.append(i).append(": [" + connIndex + "]: "); - ResultSet rs = null; - switch (r.nextInt(7)) { - case 0: - buff.append("commit"); - stat.getConnection().commit(); - transaction.commit(); - transactions.set(connIndex, null); - break; - case 1: - buff.append("rollback"); - stat.getConnection().rollback(); - transaction.rollback(); - transactions.set(connIndex, null); - break; - case 2: - // insert or update - String old = map.get(x); - if (old == null) { - buff.append("insert " + x + "=" + y); - if (map.tryPut(x, "" + y)) { - stat.execute("insert into test values(" + x + ", '" + y + "')"); - } else { - buff.append(" -> row was locked"); - // the statement would time out in PostgreSQL - // TODO test sometimes if timeout occurs + Random r = new Random(1); + try { + for (int i = 0; i < opCount; i++) { + int connIndex = r.nextInt(connectionCount); + Statement stat = statements.get(connIndex); + Transaction transaction = transactions.get(connIndex); + TransactionMap map = maps.get(connIndex); + if (transaction == null) { + transaction = ts.begin(); + map = transaction.openMap("test"); + transactions.set(connIndex, transaction); + maps.set(connIndex, map); + + // read all data, to get a snapshot + ResultSet rs = stat.executeQuery( + "select * from test order by id"); + buff.append(i).append(": [" + connIndex + "]="); + int size = 0; + while (rs.next()) { + buff.append(' '); + int k = rs.getInt(1); + String v = rs.getString(2); + buff.append(k).append(':').append(v); + assertEquals(v, map.get(k)); + size++; } - } else { - buff.append("update " + x + "=" + y + " (old:" + old + ")"); - if (map.tryPut(x, "" + y)) { - int c = stat.executeUpdate("update test set name = '" + y - + "' where id = " + x); - assertEquals(1, c); - } else { - buff.append(" -> row was locked"); - // the statement would time out in PostgreSQL - // TODO test sometimes if timeout occurs + buff.append('\n'); + if (size != map.sizeAsLong()) { + assertEquals(size, map.sizeAsLong()); } } - break; - case 3: - buff.append("delete " + x); - try { - int c = stat.executeUpdate("delete from test where id = " + x); - if (c == 1) { - map.remove(x); - } else { - assertNull(map.get(x)); - } - } catch (SQLException e) { - assertNotNull(map.get(x)); - assertFalse(map.tryRemove(x)); - // PostgreSQL needs to rollback - buff.append(" -> rollback"); - stat.getConnection().rollback(); - transaction.rollback(); - transactions.set(connIndex, null); + int x = r.nextInt(rowCount); + int y = r.nextInt(rowCount); + buff.append(i).append(": [" + connIndex + "]: "); + ResultSet rs = null; + switch (r.nextInt(7)) { + case 0: + buff.append("commit"); + stat.getConnection().commit(); + transaction.commit(); + transactions.set(connIndex, null); + break; + case 1: + buff.append("rollback"); + stat.getConnection().rollback(); + transaction.rollback(); + transactions.set(connIndex, null); + break; + case 2: + // insert or update + String old = map.get(x); + if (old == null) { + buff.append("insert " + x + "=" + y); + if (map.tryPut(x, "" + y)) { + stat.execute("insert into test values(" + x + ", '" + y + "')"); + } else { + buff.append(" -> row was locked"); + // the statement would time out in PostgreSQL + // TODO test sometimes if timeout occurs + } + } else { + buff.append("update " + x + "=" + y + " (old:" + old + ")"); + if (map.tryPut(x, "" + y)) { + int c = stat.executeUpdate("update test set name = '" + y + + "' where id = " + x); + assertEquals(1, c); + } else { + buff.append(" -> row was locked"); + // the statement would time out in PostgreSQL + // TODO test sometimes if timeout occurs + } + } + break; + case 3: + buff.append("delete " + x); + try { + int c = stat.executeUpdate("delete from test where id = " + x); + if (c == 1) { + map.remove(x); + } else { + assertNull(map.get(x)); + } + } catch (SQLException e) { + assertNotNull(map.get(x)); + assertFalse(map.tryRemove(x)); + // PostgreSQL needs to rollback + buff.append(" -> rollback"); + stat.getConnection().rollback(); + transaction.rollback(); + transactions.set(connIndex, null); + } + break; + case 4: + case 5: + case 6: + rs = stat.executeQuery("select * from test where id = " + x); + String expected = rs.next() ? rs.getString(2) : null; + buff.append("select " + x + "=" + expected); + assertEquals("i:" + i, expected, map.get(x)); + break; } - break; - case 4: - case 5: - case 6: - rs = stat.executeQuery("select * from test where id = " + x); - String expected = rs.next() ? rs.getString(2) : null; - buff.append("select " + x + "=" + expected); - assertEquals("i:" + i, expected, map.get(x)); - break; + buff.append('\n'); } - buff.append('\n'); + } catch (Exception e) { + e.printStackTrace(); + fail(buff.toString()); } - } catch (Exception e) { - e.printStackTrace(); - fail(buff.toString()); - } - for (Statement stat : statements) { - stat.getConnection().close(); + for (Statement stat : statements) { + stat.getConnection().close(); + } + ts.close(); } - ts.close(); - s.close(); } private void testConcurrentTransactionsReadCommitted() { - MVStore s = MVStore.open(null); + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); - TransactionStore ts = new TransactionStore(s); - ts.init(); - Transaction tx1, tx2; - TransactionMap m1, m2; - - tx1 = ts.begin(); - m1 = tx1.openMap("test"); - m1.put("1", "Hi"); - m1.put("3", "."); - tx1.commit(); - - tx1 = ts.begin(); - m1 = tx1.openMap("test"); - m1.put("1", "Hello"); - m1.put("2", "World"); - m1.remove("3"); - tx1.commit(); - - // start new transaction to read old data - tx2 = ts.begin(); - m2 = tx2.openMap("test"); - - // start transaction tx1, update/delete/add - tx1 = ts.begin(); - m1 = tx1.openMap("test"); - m1.put("1", "Hallo"); - m1.remove("2"); - m1.put("3", "!"); - - assertEquals("Hello", m2.get("1")); - assertEquals("World", m2.get("2")); - assertNull(m2.get("3")); - - tx1.commit(); - - assertEquals("Hallo", m2.get("1")); - assertNull(m2.get("2")); - assertEquals("!", m2.get("3")); - - tx1 = ts.begin(); - m1 = tx1.openMap("test"); - m1.put("2", "World"); - - assertNull(m2.get("2")); - assertFalse(m2.tryRemove("2")); - assertFalse(m2.tryPut("2", "Welt")); - - tx2 = ts.begin(); - m2 = tx2.openMap("test"); - assertNull(m2.get("2")); - m1.remove("2"); - assertNull(m2.get("2")); - tx1.commit(); - - tx1 = ts.begin(); - m1 = tx1.openMap("test"); - assertNull(m1.get("2")); - m1.put("2", "World"); - m1.put("2", "Welt"); - tx1.rollback(); - - tx1 = ts.begin(); - m1 = tx1.openMap("test"); - assertNull(m1.get("2")); - - ts.close(); - s.close(); + Transaction tx1 = ts.begin(); + TransactionMap m1 = tx1.openMap("test"); + m1.put("1", "Hi"); + m1.put("3", "."); + tx1.commit(); + + tx1 = ts.begin(); + m1 = tx1.openMap("test"); + m1.put("1", "Hello"); + m1.put("2", "World"); + m1.remove("3"); + tx1.commit(); + + // start new transaction to read old data + Transaction tx2 = ts.begin(); + TransactionMap m2 = tx2.openMap("test"); + + // start transaction tx1, update/delete/add + tx1 = ts.begin(); + m1 = tx1.openMap("test"); + m1.put("1", "Hallo"); + m1.remove("2"); + m1.put("3", "!"); + + assertEquals("Hello", m2.get("1")); + assertEquals("World", m2.get("2")); + assertNull(m2.get("3")); + + tx1.commit(); + + assertEquals("Hallo", m2.get("1")); + assertNull(m2.get("2")); + assertEquals("!", m2.get("3")); + + tx1 = ts.begin(); + m1 = tx1.openMap("test"); + m1.put("2", "World"); + + assertNull(m2.get("2")); + assertFalse(m2.tryRemove("2")); + assertFalse(m2.tryPut("2", "Welt")); + + tx2 = ts.begin(); + m2 = tx2.openMap("test"); + assertNull(m2.get("2")); + m1.remove("2"); + assertNull(m2.get("2")); + tx1.commit(); + + tx1 = ts.begin(); + m1 = tx1.openMap("test"); + assertNull(m1.get("2")); + m1.put("2", "World"); + m1.put("2", "Welt"); + tx1.rollback(); + + tx1 = ts.begin(); + m1 = tx1.openMap("test"); + assertNull(m1.get("2")); + + ts.close(); + } } private void testSingleConnection() { - MVStore s = MVStore.open(null); + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); - TransactionStore ts = new TransactionStore(s); - ts.init(); + // add, rollback + Transaction tx = ts.begin(); + TransactionMap m = tx.openMap("test"); + m.put("1", "Hello"); + assertEquals("Hello", m.get("1")); + m.put("2", "World"); + assertEquals("World", m.get("2")); + tx.rollback(); + tx = ts.begin(); + m = tx.openMap("test"); + assertNull(m.get("1")); + assertNull(m.get("2")); - Transaction tx; - TransactionMap m; + // add, commit + tx = ts.begin(); + m = tx.openMap("test"); + m.put("1", "Hello"); + m.put("2", "World"); + assertEquals("Hello", m.get("1")); + assertEquals("World", m.get("2")); + tx.commit(); + tx = ts.begin(); + m = tx.openMap("test"); + assertEquals("Hello", m.get("1")); + assertEquals("World", m.get("2")); - // add, rollback - tx = ts.begin(); - m = tx.openMap("test"); - m.put("1", "Hello"); - assertEquals("Hello", m.get("1")); - m.put("2", "World"); - assertEquals("World", m.get("2")); - tx.rollback(); - tx = ts.begin(); - m = tx.openMap("test"); - assertNull(m.get("1")); - assertNull(m.get("2")); - - // add, commit - tx = ts.begin(); - m = tx.openMap("test"); - m.put("1", "Hello"); - m.put("2", "World"); - assertEquals("Hello", m.get("1")); - assertEquals("World", m.get("2")); - tx.commit(); - tx = ts.begin(); - m = tx.openMap("test"); - assertEquals("Hello", m.get("1")); - assertEquals("World", m.get("2")); - - // update+delete+insert, rollback - tx = ts.begin(); - m = tx.openMap("test"); - m.put("1", "Hallo"); - m.remove("2"); - m.put("3", "!"); - assertEquals("Hallo", m.get("1")); - assertNull(m.get("2")); - assertEquals("!", m.get("3")); - tx.rollback(); - tx = ts.begin(); - m = tx.openMap("test"); - assertEquals("Hello", m.get("1")); - assertEquals("World", m.get("2")); - assertNull(m.get("3")); - - // update+delete+insert, commit - tx = ts.begin(); - m = tx.openMap("test"); - m.put("1", "Hallo"); - m.remove("2"); - m.put("3", "!"); - assertEquals("Hallo", m.get("1")); - assertNull(m.get("2")); - assertEquals("!", m.get("3")); - tx.commit(); - tx = ts.begin(); - m = tx.openMap("test"); - assertEquals("Hallo", m.get("1")); - assertNull(m.get("2")); - assertEquals("!", m.get("3")); - - ts.close(); - s.close(); + // update+delete+insert, rollback + tx = ts.begin(); + m = tx.openMap("test"); + m.put("1", "Hallo"); + m.remove("2"); + m.put("3", "!"); + assertEquals("Hallo", m.get("1")); + assertNull(m.get("2")); + assertEquals("!", m.get("3")); + tx.rollback(); + tx = ts.begin(); + m = tx.openMap("test"); + assertEquals("Hello", m.get("1")); + assertEquals("World", m.get("2")); + assertNull(m.get("3")); + + // update+delete+insert, commit + tx = ts.begin(); + m = tx.openMap("test"); + m.put("1", "Hallo"); + m.remove("2"); + m.put("3", "!"); + assertEquals("Hallo", m.get("1")); + assertNull(m.get("2")); + assertEquals("!", m.get("3")); + tx.commit(); + tx = ts.begin(); + m = tx.openMap("test"); + assertEquals("Hallo", m.get("1")); + assertNull(m.get("2")); + assertEquals("!", m.get("3")); + + ts.close(); + } } - private static void testStoreMultiThreadedReads() throws Exception { - MVStore s = MVStore.open(null); - final TransactionStore ts = new TransactionStore(s); + private static void testStoreMultiThreadedReads() { + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); + Transaction t = ts.begin(); + TransactionMap mapA = t.openMap("a"); + mapA.put(1, 0); + t.commit(); - ts.init(); - Transaction t = ts.begin(); - TransactionMap mapA = t.openMap("a"); - mapA.put(1, 0); - t.commit(); - - Task task = new Task() { - @Override - public void call() throws Exception { - for (int i = 0; !stop; i++) { + Task task = new Task() { + @Override + public void call() { + for (int i = 0; !stop; i++) { + Transaction tx = ts.begin(); + TransactionMap mapA = tx.openMap("a"); + while (!mapA.tryPut(1, i)) { + // repeat + } + tx.commit(); + + // map B transaction + // the other thread will get a map A uncommitted value, + // but by the time it tries to walk back to the committed + // value, the undoLog has changed + tx = ts.begin(); + TransactionMap mapB = tx.openMap("b"); + // put a new value to the map; this will cause a map B + // undoLog entry to be created with a null pre-image value + mapB.tryPut(i, -i); + // this is where the real race condition occurs: + // some other thread might get the B log entry + // for this transaction rather than the uncommitted A log + // entry it is expecting + tx.commit(); + } + } + }; + task.execute(); + try { + for (int i = 0; i < 10000; i++) { Transaction tx = ts.begin(); - TransactionMap mapA = tx.openMap("a"); - while (!mapA.tryPut(1, i)) { - // repeat + mapA = tx.openMap("a"); + if (mapA.get(1) == null) { + throw new AssertionError("key not found"); } tx.commit(); - - // map B transaction - // the other thread will get a map A uncommitted value, - // but by the time it tries to walk back to the committed - // value, the undoLog has changed - tx = ts.begin(); - TransactionMap mapB = tx.openMap("b"); - // put a new value to the map; this will cause a map B - // undoLog entry to be created with a null pre-image value - mapB.tryPut(i, -i); - // this is where the real race condition occurs: - // some other thread might get the B log entry - // for this transaction rather than the uncommitted A log - // entry it is expecting - tx.commit(); } + } finally { + task.get(); } - }; - task.execute(); - try { - for (int i = 0; i < 10000; i++) { - Transaction tx = ts.begin(); - mapA = tx.openMap("a"); - if (mapA.get(1) == null) { - throw new AssertionError("key not found"); - } - tx.commit(); + ts.close(); + } + } + + private void testCommitAfterMapRemoval() { + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); + Transaction t = ts.begin(); + TransactionMap map = t.openMap("test", LongDataType.INSTANCE, StringDataType.INSTANCE); + map.put(1L, "A"); + s.removeMap("test"); + try { + t.commit(); + } finally { + // commit should not fail, but even if it does + // transaction should be cleanly removed and store remains operational + assertTrue(ts.getOpenTransactions().isEmpty()); + assertFalse(ts.hasMap("test")); + t = ts.begin(); + map = t.openMap("test", LongDataType.INSTANCE, StringDataType.INSTANCE); + assertTrue(map.isEmpty()); + map.put(2L, "B"); } - } finally { - task.get(); } - ts.close(); } + private void testDeadLock() { + int threadCount = 2; + for (int i = 1; i < threadCount; i++) { + testDeadLock(threadCount, i); + } + } + + private void testDeadLock(int threadCount, int stepCount) { + try (MVStore s = MVStore.open(null)) { + s.setAutoCommitDelay(0); + TransactionStore ts = new TransactionStore(s, + new MetaType<>(null, s.backgroundExceptionHandler), new ObjectDataType(), 10000); + ts.init(); + Transaction t = ts.begin(); + TransactionMap m = t.openMap("test", LongDataType.INSTANCE, LongDataType.INSTANCE); + for (int i = 0; i < threadCount; i++) { + m.put((long)i, 0L); + } + t.commit(); + + CountDownLatch latch = new CountDownLatch(threadCount); + Task[] tasks = new Task[threadCount]; + for (int i = 0; i < threadCount; i++) { + long initialKey = i; + tasks[i] = new Task() { + @Override + public void call() throws Exception { + Transaction tx = ts.begin(); + try { + TransactionMap map = tx.openMap("test", LongDataType.INSTANCE, + LongDataType.INSTANCE); + long key = initialKey; + map.computeIfPresent(key, (k, v) -> v + 1); + latch.countDown(); + latch.await(); + for (int j = 0; j < stepCount; j++) { + key = (key + 1) % threadCount; + map.lock(key); + map.put(key, map.get(key) + 1); + } + tx.commit(); + } catch (Throwable e) { + tx.rollback(); + throw e; + } + } + }.execute(); + } + int failureCount = 0; + for (Task task : tasks) { + Exception exception = task.getException(); + if (exception != null) { + ++failureCount; + assertEquals(MVStoreException.class, exception.getClass()); + checkErrorCode(DataUtils.ERROR_TRANSACTIONS_DEADLOCK, exception); + } + } + assertEquals(" "+stepCount, stepCount, failureCount); + t = ts.begin(); + m = t.openMap("test", LongDataType.INSTANCE, LongDataType.INSTANCE); + int count = 0; + for (int i = 0; i < threadCount; i++) { + Long value = m.get((long) i); + assertNotNull("Key " + i, value); + count += value; + } + t.commit(); + assertEquals(" "+stepCount, (stepCount+1) * (threadCount - failureCount), count); + } + } } diff --git a/h2/src/test/org/h2/test/store/package.html b/h2/src/test/org/h2/test/store/package.html index 8a9fbaaaa8..f71790e7b3 100644 --- a/h2/src/test/org/h2/test/store/package.html +++ b/h2/src/test/org/h2/test/store/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/synth/BnfRandom.java b/h2/src/test/org/h2/test/synth/BnfRandom.java index 5f90e156bb..cc35923947 100644 --- a/h2/src/test/org/h2/test/synth/BnfRandom.java +++ b/h2/src/test/org/h2/test/synth/BnfRandom.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -152,18 +152,7 @@ private String getRandomFixed(int type) { @Override public void visitRuleList(boolean or, ArrayList list) { if (or) { - if (level > 10) { - if (level > 1000) { - // better than stack overflow - throw new AssertionError(); - } - list.get(0).accept(this); - return; - } - int idx = random.nextInt(list.size()); - level++; - list.get(idx).accept(this); - level--; + visitOr(list); return; } StringBuilder buff = new StringBuilder(); @@ -187,11 +176,42 @@ public void visitRuleOptional(Rule rule) { sql = ""; } + @Override + public void visitRuleOptional(ArrayList list) { + if (level > 10 ? random.nextInt(level) == 1 : random.nextInt(4) == 1) { + level++; + visitOr(list); + level--; + return; + } + sql = ""; + } + + private void visitOr(ArrayList list) throws AssertionError { + if (level > 10) { + if (level > 1000) { + // better than stack overflow + throw new AssertionError(); + } + list.get(0).accept(this); + return; + } + int idx = random.nextInt(list.size()); + level++; + list.get(idx).accept(this); + level--; + } + @Override public void visitRuleRepeat(boolean comma, Rule rule) { rule.accept(this); } + @Override + public void visitRuleExtension(Rule rule, boolean compatibility) { + rule.accept(this); + } + public void setSeed(int seed) { random.setSeed(seed); } diff --git a/h2/src/test/org/h2/test/synth/OutputCatcher.java b/h2/src/test/org/h2/test/synth/OutputCatcher.java index d179c8b27d..2ab3413d44 100644 --- a/h2/src/test/org/h2/test/synth/OutputCatcher.java +++ b/h2/src/test/org/h2/test/synth/OutputCatcher.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; diff --git a/h2/src/test/org/h2/test/synth/TestBtreeIndex.java b/h2/src/test/org/h2/test/synth/TestBtreeIndex.java index f97000e0df..42dfae5ec1 100644 --- a/h2/src/test/org/h2/test/synth/TestBtreeIndex.java +++ b/h2/src/test/org/h2/test/synth/TestBtreeIndex.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; diff --git a/h2/src/test/org/h2/test/synth/TestConcurrentUpdate.java b/h2/src/test/org/h2/test/synth/TestConcurrentUpdate.java index 9fd0f1ecba..072029b1a1 100644 --- a/h2/src/test/org/h2/test/synth/TestConcurrentUpdate.java +++ b/h2/src/test/org/h2/test/synth/TestConcurrentUpdate.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -8,8 +8,12 @@ import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; +import java.sql.SQLException; import java.sql.Statement; +import java.util.ArrayList; +import java.util.Collection; import java.util.Random; +import java.util.concurrent.CountDownLatch; import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.util.Task; @@ -29,21 +33,23 @@ public class TestConcurrentUpdate extends TestDb { */ public static void main(String... a) throws Exception { org.h2.test.TestAll config = new org.h2.test.TestAll(); - config.memory = true; - config.multiThreaded = true; +// config.memory = true; // config.mvStore = false; System.out.println(config); TestBase test = createCaller().init(config); for (int i = 0; i < 10; i++) { System.out.println("Pass #" + i); - test.config.beforeTest(); - test.test(); - test.config.afterTest(); + test.testFromMain(); } } @Override public void test() throws Exception { + testConcurrent(); + testConcurrentShutdown(); + } + + private void testConcurrent() throws Exception { deleteDb("concurrent"); final String url = getURL("concurrent;LOCK_TIMEOUT=2000", true); try (Connection conn = getConnection(url)) { @@ -113,4 +119,56 @@ public void call() throws Exception { assert success; } } + + private void testConcurrentShutdown() throws SQLException { + if (config.memory) { + return; + } + deleteDb(getTestName()); + final String url = getURL(getTestName(), true); + try (Connection connection = getConnection(url)) { + connection.createStatement().execute("create table test(id int primary key, v int)"); + connection.createStatement().execute("insert into test values(0, 0)"); + } + int len = 2; + final CountDownLatch latch = new CountDownLatch(len + 1); + Collection tasks = new ArrayList<>(); + + tasks.add(new Task() { + @Override + public void call() throws Exception { + try (Connection c = getConnection(url)) { + c.setAutoCommit(false); + c.createStatement().execute("insert into test values(1, 1)"); + latch.countDown(); + latch.await(); + } + } + }); + + for (int i = 0; i < len; i++) { + tasks.add(new Task() { + @Override + public void call() throws Exception { + try (Connection c = getConnection(url)) { + Statement stmt = c.createStatement(); + latch.countDown(); + latch.await(); + stmt.execute("shutdown"); + } + } + }); + } + for (Task task : tasks) { + task.execute(); + } + for (Task task : tasks) { + task.getException(); + } + try (Connection connection = getConnection(getTestName())) { + ResultSet rs = connection.createStatement().executeQuery("select count(*) from test"); + rs.next(); + assertEquals(1, rs.getInt(1)); + } + } } diff --git a/h2/src/test/org/h2/test/synth/TestCrashAPI.java b/h2/src/test/org/h2/test/synth/TestCrashAPI.java index bc3d956062..f88c3841e9 100644 --- a/h2/src/test/org/h2/test/synth/TestCrashAPI.java +++ b/h2/src/test/org/h2/test/synth/TestCrashAPI.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -30,14 +30,13 @@ import java.util.Arrays; import java.util.Calendar; import java.util.Comparator; +import java.util.GregorianCalendar; import java.util.HashMap; import java.util.Map; import org.h2.api.ErrorCode; -import org.h2.jdbc.JdbcConnection; import org.h2.store.FileLister; import org.h2.store.fs.FileUtils; -import org.h2.test.TestAll; import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.test.scripts.TestScript; @@ -45,7 +44,6 @@ import org.h2.tools.Backup; import org.h2.tools.DeleteDbFiles; import org.h2.tools.Restore; -import org.h2.util.DateTimeUtils; import org.h2.util.MathUtils; /** @@ -83,7 +81,7 @@ public class TestCrashAPI extends TestDb implements Runnable { public static void main(String... a) throws Exception { System.setProperty("h2.delayWrongPasswordMin", "0"); System.setProperty("h2.delayWrongPasswordMax", "0"); - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -115,12 +113,7 @@ public void run() { private static void recoverAll() { org.h2.Driver.load(); File[] files = new File("temp/backup").listFiles(); - Arrays.sort(files, new Comparator() { - @Override - public int compare(File o1, File o2) { - return o1.getName().compareTo(o2.getName()); - } - }); + Arrays.sort(files, Comparator.comparing(File::getName)); for (File f : files) { if (!f.getName().startsWith("db-")) { continue; @@ -162,9 +155,15 @@ public void test() throws Exception { recoverAll(); return; } - if (config.mvStore || config.networked) { + + if (config.networked) { return; } + + TestScript script = new TestScript(); + statements = script.getAllStatements(config); + initMethods(); + int len = getSize(2, 6); Thread t = new Thread(this); try { @@ -336,7 +335,7 @@ private void testCase(int seed) throws SQLException { continue; } if (random.getInt(2000) == 0 && conn != null) { - ((JdbcConnection) conn).setPowerOffCount(random.getInt(50)); + setPowerOffCount(conn, random.getInt(50)); } Object o = objects.get(objectId); if (o == null) { @@ -503,7 +502,7 @@ private Object getRandomParam(Class type) { // TODO should use generated savepoints return null; } else if (type == Calendar.class) { - return DateTimeUtils.createGregorianCalendar(); + return new GregorianCalendar(); } else if (type == java.net.URL.class) { return null; } else if (type == java.math.BigDecimal.class) { @@ -533,18 +532,4 @@ private void initMethods() { } } - @Override - public TestBase init(TestAll conf) throws Exception { - super.init(conf); - if (config.mvStore || config.networked) { - return this; - } - startServerIfRequired(); - TestScript script = new TestScript(); - statements = script.getAllStatements(config); - initMethods(); - org.h2.Driver.load(); - return this; - } - } diff --git a/h2/src/test/org/h2/test/synth/TestDiskFull.java b/h2/src/test/org/h2/test/synth/TestDiskFull.java index 22df84f115..16e4a0a1c3 100644 --- a/h2/src/test/org/h2/test/synth/TestDiskFull.java +++ b/h2/src/test/org/h2/test/synth/TestDiskFull.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -28,17 +28,13 @@ public class TestDiskFull extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { fs = FilePathUnstable.register(); - if (config.mvStore) { - fs.setPartialWrites(true); - } else { - fs.setPartialWrites(false); - } + fs.setPartialWrites(true); try { test(Integer.MAX_VALUE); int max = Integer.MAX_VALUE - fs.getDiskFullCount() + 10; diff --git a/h2/src/test/org/h2/test/synth/TestFuzzOptimizations.java b/h2/src/test/org/h2/test/synth/TestFuzzOptimizations.java index 26d6f6796e..f029e1f361 100644 --- a/h2/src/test/org/h2/test/synth/TestFuzzOptimizations.java +++ b/h2/src/test/org/h2/test/synth/TestFuzzOptimizations.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -34,7 +34,7 @@ public class TestFuzzOptimizations extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/synth/TestHalt.java b/h2/src/test/org/h2/test/synth/TestHalt.java index 3590d62821..f6fd68f5ee 100644 --- a/h2/src/test/org/h2/test/synth/TestHalt.java +++ b/h2/src/test/org/h2/test/synth/TestHalt.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -13,8 +13,8 @@ import java.sql.Connection; import java.sql.DriverManager; import java.sql.SQLException; -import java.text.SimpleDateFormat; -import java.util.Date; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; import java.util.Random; import org.h2.test.TestAll; import org.h2.test.TestBase; @@ -87,8 +87,7 @@ public abstract class TestHalt extends TestBase { */ protected Random random = new Random(); - private final SimpleDateFormat dateFormat = - new SimpleDateFormat("MM-dd HH:mm:ss "); + private final DateTimeFormatter dateFormat = DateTimeFormatter.ofPattern("MM-dd HH:mm:ss"); private int errorId; private int sequenceId; @@ -190,7 +189,7 @@ protected void traceOperation(String s, Exception e) { f.getParentFile().mkdirs(); try (FileWriter writer = new FileWriter(f, true)) { PrintWriter w = new PrintWriter(writer); - s = dateFormat.format(new Date()) + ": " + s; + s = dateFormat.format(LocalDateTime.now()) + " : " + s; w.println(s); if (e != null) { e.printStackTrace(w); @@ -297,7 +296,7 @@ protected void disconnect() { // lock.delete(); // System.gc(); // } -// Class.forName("org.apache.derby.jdbc.EmbeddedDriver").newInstance(); +// Class.forName("org.apache.derby.iapi.jdbc.AutoloadedDriver").newInstance(); // try { // return DriverManager.getConnection( // "jdbc:derby:test3;create=true", "sa", "sa"); @@ -323,7 +322,7 @@ protected void disconnect() { // void disconnectDerby() { // // super.disconnect(); // try { -// Class.forName("org.apache.derby.jdbc.EmbeddedDriver"); +// Class.forName("org.apache.derby.iapi.jdbc.AutoloadedDriver"); // DriverManager.getConnection( // "jdbc:derby:;shutdown=true", "sa", "sa"); // } catch (Exception e) { diff --git a/h2/src/test/org/h2/test/synth/TestHaltApp.java b/h2/src/test/org/h2/test/synth/TestHaltApp.java index 9905fac890..22b5d902e9 100644 --- a/h2/src/test/org/h2/test/synth/TestHaltApp.java +++ b/h2/src/test/org/h2/test/synth/TestHaltApp.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; diff --git a/h2/src/test/org/h2/test/synth/TestJoin.java b/h2/src/test/org/h2/test/synth/TestJoin.java index 967c9fc7ed..ca45c1aedf 100644 --- a/h2/src/test/org/h2/test/synth/TestJoin.java +++ b/h2/src/test/org/h2/test/synth/TestJoin.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -38,7 +38,7 @@ public class TestJoin extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -57,7 +57,7 @@ private void testJoin() throws Exception { Connection c2 = DriverManager.getConnection("jdbc:postgresql:test", "sa", "sa"); connections.add(c2); - // Class.forName("com.mysql.jdbc.Driver"); + // Class.forName("com.mysql.cj.jdbc.Driver"); // Connection c2 = // DriverManager.getConnection("jdbc:mysql://localhost/test", "sa", // "sa"); diff --git a/h2/src/test/org/h2/test/synth/TestKill.java b/h2/src/test/org/h2/test/synth/TestKill.java index c0a3e951cd..52baf41465 100644 --- a/h2/src/test/org/h2/test/synth/TestKill.java +++ b/h2/src/test/org/h2/test/synth/TestKill.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -36,7 +36,7 @@ public class TestKill extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/synth/TestKillProcess.java b/h2/src/test/org/h2/test/synth/TestKillProcess.java index 2eb57474e0..b432222552 100644 --- a/h2/src/test/org/h2/test/synth/TestKillProcess.java +++ b/h2/src/test/org/h2/test/synth/TestKillProcess.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; diff --git a/h2/src/test/org/h2/test/synth/TestKillRestart.java b/h2/src/test/org/h2/test/synth/TestKillRestart.java index 1de642b077..d9ed4920c5 100644 --- a/h2/src/test/org/h2/test/synth/TestKillRestart.java +++ b/h2/src/test/org/h2/test/synth/TestKillRestart.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; diff --git a/h2/src/test/org/h2/test/synth/TestKillRestartMulti.java b/h2/src/test/org/h2/test/synth/TestKillRestartMulti.java index 0996f7a12f..a8858e19ab 100644 --- a/h2/src/test/org/h2/test/synth/TestKillRestartMulti.java +++ b/h2/src/test/org/h2/test/synth/TestKillRestartMulti.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -59,11 +59,9 @@ public static void main(String... args) throws Exception { // the child process case SelfDestructor.startCountdown(CHILD_SELFDESTRUCT_TIMEOUT_MINS); new TestKillRestartMulti().test(args); - } - else - { + } else { // the standalone test case - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } } @@ -81,7 +79,7 @@ public boolean isEnabled() { @Override public void test() throws Exception { deleteDb("killRestartMulti"); - url = getURL("killRestartMulti", true); + url = getURL("killRestartMulti;RETENTION_TIME=0", true); user = getUser(); password = getPassword(); String selfDestruct = SelfDestructor.getPropertyString(60); @@ -318,7 +316,10 @@ private static void testConsistent(Connection conn) throws SQLException { rs.getString("NAME"); } } catch (SQLException e) { - if (e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1) { + if (e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1 || + e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1 || + e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_WITH_CANDIDATES_2 + ) { // ok } else { throw e; diff --git a/h2/src/test/org/h2/test/synth/TestLimit.java b/h2/src/test/org/h2/test/synth/TestLimit.java index 2caf933ddb..5a063b0329 100644 --- a/h2/src/test/org/h2/test/synth/TestLimit.java +++ b/h2/src/test/org/h2/test/synth/TestLimit.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -8,6 +8,8 @@ import java.sql.Connection; import java.sql.SQLException; import java.sql.Statement; + +import org.h2.api.ErrorCode; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -26,7 +28,7 @@ public class TestLimit extends TestDb { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); // test.config.traceTest = true; - test.test(); + test.testFromMain(); } @Override @@ -38,8 +40,8 @@ public void test() throws Exception { "select x from system_range(1, 10)"); for (int maxRows = 0; maxRows < 12; maxRows++) { stat.setMaxRows(maxRows); - for (int limit = -2; limit < 12; limit++) { - for (int offset = -2; offset < 12; offset++) { + for (int limit = -1; limit < 12; limit++) { + for (int offset = -1; offset < 12; offset++) { int l = limit < 0 ? 10 : Math.min(10, limit); for (int d = 0; d < 2; d++) { int m = maxRows <= 0 ? 10 : Math.min(10, maxRows); @@ -47,9 +49,9 @@ public void test() throws Exception { if (offset > 0) { expected = Math.max(0, Math.min(10 - offset, expected)); } - String s = "select " + (d == 1 ? "distinct " : "") + - " * from test limit " + (limit == -2 ? "null" : limit) + - " offset " + (offset == -2 ? "null" : offset); + String s = "select " + (d == 1 ? "distinct " : "") + "* from test" + + (offset >= 0 ? " offset " + offset + " rows" : "") + + (limit >= 0 ? " fetch next " + limit + " rows only" : ""); assertRow(expected, s); String union = "(" + s + ") union (" + s + ")"; assertRow(expected, union); @@ -60,11 +62,13 @@ public void test() throws Exception { expected = Math.min(m, l * 2); union = "(" + s + ") union all (" + s + ")"; assertRow(expected, union); - for (int unionLimit = -2; unionLimit < 5; unionLimit++) { + for (int unionLimit = -1; unionLimit < 5; unionLimit++) { int e = unionLimit < 0 ? 20 : Math.min(20, unionLimit); e = Math.min(expected, e); - String u = union + " limit " + - (unionLimit == -2 ? "null" : unionLimit); + String u = union; + if (unionLimit >= 0) { + u += " fetch first " + unionLimit + " rows only"; + } assertRow(e, u); } } @@ -74,9 +78,7 @@ public void test() throws Exception { assertEquals(0, stat.executeUpdate("delete from test limit 0")); assertEquals(1, stat.executeUpdate("delete from test limit 1")); assertEquals(2, stat.executeUpdate("delete from test limit 2")); - assertEquals(7, stat.executeUpdate("delete from test limit null")); - stat.execute("insert into test select x from system_range(1, 10)"); - assertEquals(10, stat.executeUpdate("delete from test limit -1")); + assertThrows(ErrorCode.INVALID_VALUE_2, stat).executeUpdate("delete from test limit null"); conn.close(); deleteDb("limit"); } diff --git a/h2/src/test/org/h2/test/synth/TestMultiThreaded.java b/h2/src/test/org/h2/test/synth/TestMultiThreaded.java index f585dceb57..4e1fc87c9b 100644 --- a/h2/src/test/org/h2/test/synth/TestMultiThreaded.java +++ b/h2/src/test/org/h2/test/synth/TestMultiThreaded.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -32,9 +32,7 @@ public static void main(String... a) throws Exception { TestBase test = createCaller().init(config); for (int i = 0; i < 100; i++) { System.out.println("Pass #" + i); - test.config.beforeTest(); - test.test(); - test.config.afterTest(); + test.testFromMain(); } } @@ -138,7 +136,7 @@ public void test() throws Exception { int size = getSize(2, 20); Connection[] connList = new Connection[size]; for (int i = 0; i < size; i++) { - connList[i] = getConnection("multiThreaded;MULTI_THREADED=1"); + connList[i] = getConnection("multiThreaded"); } Connection conn = connList[0]; Statement stat = conn.createStatement(); diff --git a/h2/src/test/org/h2/test/synth/TestNestedJoins.java b/h2/src/test/org/h2/test/synth/TestNestedJoins.java index 2b571dc6bd..d72fde3143 100644 --- a/h2/src/test/org/h2/test/synth/TestNestedJoins.java +++ b/h2/src/test/org/h2/test/synth/TestNestedJoins.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -37,7 +37,7 @@ public class TestNestedJoins extends TestDb { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); // test.config.traceTest = true; - test.test(); + test.testFromMain(); } @Override @@ -62,16 +62,16 @@ private void testRandom() throws Exception { } // Derby doesn't work currently - // deleteDerby(); - // try { - // Class.forName("org.apache.derby.jdbc.EmbeddedDriver"); - // Connection c2 = DriverManager.getConnection( - // "jdbc:derby:" + getBaseDir() + - // "/derby/test;create=true", "sa", "sa"); - // dbs.add(c2.createStatement()); - // } catch (Exception e) { - // // database not installed - ok - // } + deleteDerby(); + try { + Class.forName("org.apache.derby.iapi.jdbc.AutoloadedDriver"); + Connection c2 = DriverManager.getConnection( + "jdbc:derby:" + getBaseDir() + + "/derby/test;create=true", "sa", "sa"); + dbs.add(c2.createStatement()); + } catch (Throwable e) { + // database not installed - ok + } String shortest = null; Throwable shortestEx = null; for (int i = 0; i < 10; i++) { @@ -244,7 +244,7 @@ private void testCases() throws Exception { // issue 288 assertThrows(ErrorCode.COLUMN_NOT_FOUND_1, stat). execute("select 1 from dual a right outer join " + - "(select b.x from dual b) c on unknown.x = c.x, dual d"); + "(select b.x from dual b) c on unknown_table.x = c.x, dual d"); // issue 288 stat.execute("create table test(id int primary key)"); @@ -289,7 +289,6 @@ private void testCases() throws Exception { assertContains(sql, "("); stat.execute("drop table a, b, c"); - // see roadmap, tag: swapInnerJoinTables /* create table test(id int primary key, x int) as select x, x from system_range(1, 10); @@ -467,7 +466,8 @@ create table o(id int primary key) "on a.x = c.x"); assertTrue(rs.next()); sql = cleanRemarks(rs.getString(1)); - assertEquals("SELECT \"A\".\"X\", \"B\".\"X\", \"C\".\"X\", \"C\".\"Y\" FROM \"PUBLIC\".\"A\" " + + assertEquals("SELECT \"PUBLIC\".\"A\".\"X\", \"PUBLIC\".\"B\".\"X\", " + + "\"PUBLIC\".\"C\".\"X\", \"PUBLIC\".\"C\".\"Y\" FROM \"PUBLIC\".\"A\" " + "LEFT OUTER JOIN ( \"PUBLIC\".\"B\" " + "LEFT OUTER JOIN \"PUBLIC\".\"C\" " + "ON \"B\".\"X\" = \"C\".\"Y\" ) " + @@ -548,7 +548,8 @@ create table o(id int primary key) "inner join c on c.x = 1) on a.x = b.x"); assertTrue(rs.next()); sql = cleanRemarks(rs.getString(1)); - assertEquals("SELECT \"A\".\"X\", \"B\".\"X\", \"C\".\"X\" FROM \"PUBLIC\".\"A\" " + + assertEquals("SELECT \"PUBLIC\".\"A\".\"X\", \"PUBLIC\".\"B\".\"X\", \"PUBLIC\".\"C\".\"X\" " + + "FROM \"PUBLIC\".\"A\" " + "LEFT OUTER JOIN ( \"PUBLIC\".\"B\" " + "INNER JOIN \"PUBLIC\".\"C\" ON \"C\".\"X\" = 1 ) ON \"A\".\"X\" = \"B\".\"X\"", sql); stat.execute("drop table a, b, c"); @@ -601,7 +602,7 @@ create table o(id int primary key) "LEFT OUTER JOIN ( \"PUBLIC\".\"B\" " + "INNER JOIN \"PUBLIC\".\"BASE\" \"B_BASE\" " + "ON (\"B_BASE\".\"DELETED\" = 0) AND (\"B\".\"PK\" = \"B_BASE\".\"PK\") ) " + - "ON TRUE INNER JOIN \"PUBLIC\".\"A\" ON 1=1 " + + "ON 1=1 INNER JOIN \"PUBLIC\".\"A\" ON 1=1 " + "WHERE \"A\".\"PK\" = \"A_BASE\".\"PK\"", sql); rs = stat.executeQuery( "select a.pk, a_base.pk, b.pk, b_base.pk from a " + diff --git a/h2/src/test/org/h2/test/synth/TestOuterJoins.java b/h2/src/test/org/h2/test/synth/TestOuterJoins.java index a4e309b7e1..41e97bbfd8 100644 --- a/h2/src/test/org/h2/test/synth/TestOuterJoins.java +++ b/h2/src/test/org/h2/test/synth/TestOuterJoins.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -36,7 +36,7 @@ public class TestOuterJoins extends TestDb { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); test.config.traceTest = true; - test.test(); + test.testFromMain(); } @Override @@ -61,12 +61,12 @@ private void testRandom() throws Exception { } deleteDerby(); try { - Class.forName("org.apache.derby.jdbc.EmbeddedDriver"); + Class.forName("org.apache.derby.iapi.jdbc.AutoloadedDriver"); Connection c2 = DriverManager.getConnection( "jdbc:derby:" + getBaseDir() + "/derby/test;create=true", "sa", "sa"); dbs.add(c2.createStatement()); - } catch (Exception e) { + } catch (Throwable e) { // database not installed - ok } String shortest = null; @@ -420,7 +420,8 @@ private void testCases() throws Exception { "on a.x = c.x"); assertTrue(rs.next()); sql = cleanRemarks(rs.getString(1)); - assertEquals("SELECT \"A\".\"X\", \"B\".\"X\", \"C\".\"X\", \"C\".\"Y\" FROM \"PUBLIC\".\"A\" " + + assertEquals("SELECT \"PUBLIC\".\"A\".\"X\", \"PUBLIC\".\"B\".\"X\", " + + "\"PUBLIC\".\"C\".\"X\", \"PUBLIC\".\"C\".\"Y\" FROM \"PUBLIC\".\"A\" " + "LEFT OUTER JOIN ( \"PUBLIC\".\"B\" " + "LEFT OUTER JOIN \"PUBLIC\".\"C\" " + "ON \"B\".\"X\" = \"C\".\"Y\" ) " + @@ -501,7 +502,8 @@ private void testCases() throws Exception { "inner join c on c.x = 1) on a.x = b.x"); assertTrue(rs.next()); sql = cleanRemarks(rs.getString(1)); - assertEquals("SELECT \"A\".\"X\", \"B\".\"X\", \"C\".\"X\" FROM \"PUBLIC\".\"A\" " + + assertEquals("SELECT \"PUBLIC\".\"A\".\"X\", \"PUBLIC\".\"B\".\"X\", \"PUBLIC\".\"C\".\"X\" " + + "FROM \"PUBLIC\".\"A\" " + "LEFT OUTER JOIN ( \"PUBLIC\".\"B\" " + "INNER JOIN \"PUBLIC\".\"C\" ON \"C\".\"X\" = 1 ) ON \"A\".\"X\" = \"B\".\"X\"", sql); stat.execute("drop table a, b, c"); @@ -553,7 +555,7 @@ private void testCases() throws Exception { "LEFT OUTER JOIN ( \"PUBLIC\".\"B\" " + "INNER JOIN \"PUBLIC\".\"BASE\" \"B_BASE\" " + "ON (\"B_BASE\".\"DELETED\" = 0) AND (\"B\".\"PK\" = \"B_BASE\".\"PK\") ) " + - "ON TRUE INNER JOIN \"PUBLIC\".\"A\" ON 1=1 WHERE \"A\".\"PK\" = \"A_BASE\".\"PK\"", sql); + "ON 1=1 INNER JOIN \"PUBLIC\".\"A\" ON 1=1 WHERE \"A\".\"PK\" = \"A_BASE\".\"PK\"", sql); rs = stat.executeQuery("select a.pk, a_base.pk, b.pk, b_base.pk from a " + "inner join base a_base on a.pk = a_base.pk " + "left outer join (b inner join base b_base " + diff --git a/h2/src/test/org/h2/test/synth/TestPowerOffFs.java b/h2/src/test/org/h2/test/synth/TestPowerOffFs.java index 1860d213d6..443b7844d1 100644 --- a/h2/src/test/org/h2/test/synth/TestPowerOffFs.java +++ b/h2/src/test/org/h2/test/synth/TestPowerOffFs.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -28,7 +28,7 @@ public class TestPowerOffFs extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/synth/TestPowerOffFs2.java b/h2/src/test/org/h2/test/synth/TestPowerOffFs2.java index 1397f853ea..1799b86fde 100644 --- a/h2/src/test/org/h2/test/synth/TestPowerOffFs2.java +++ b/h2/src/test/org/h2/test/synth/TestPowerOffFs2.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -39,7 +39,7 @@ public class TestPowerOffFs2 extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -217,7 +217,10 @@ private static void testConsistent(Connection conn) throws SQLException { rs.getString("NAME"); } } catch (SQLException e) { - if (e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1) { + if (e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1 || + e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1 || + e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_WITH_CANDIDATES_2 + ) { // ok } else { throw e; diff --git a/h2/src/test/org/h2/test/synth/TestRandomCompare.java b/h2/src/test/org/h2/test/synth/TestRandomCompare.java index 84606541c1..7cf7657525 100644 --- a/h2/src/test/org/h2/test/synth/TestRandomCompare.java +++ b/h2/src/test/org/h2/test/synth/TestRandomCompare.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -33,7 +33,7 @@ public class TestRandomCompare extends TestDb { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); test.config.traceTest = true; - test.test(); + test.testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/synth/TestRandomSQL.java b/h2/src/test/org/h2/test/synth/TestRandomSQL.java index 416e5f4ffd..9223e60d6c 100644 --- a/h2/src/test/org/h2/test/synth/TestRandomSQL.java +++ b/h2/src/test/org/h2/test/synth/TestRandomSQL.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -27,7 +27,7 @@ public class TestRandomSQL extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/synth/TestReleaseSelectLock.java b/h2/src/test/org/h2/test/synth/TestReleaseSelectLock.java index 3a60f3e128..42907fe467 100644 --- a/h2/src/test/org/h2/test/synth/TestReleaseSelectLock.java +++ b/h2/src/test/org/h2/test/synth/TestReleaseSelectLock.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -27,9 +27,7 @@ public class TestReleaseSelectLock extends TestDb { */ public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); - test.config.mvStore = false; - test.config.multiThreaded = true; - test.test(); + test.testFromMain(); } @Override @@ -54,26 +52,23 @@ private void runConcurrentSelects() throws InterruptedException { int tryCount = 500; int threadsCount = getSize(2, 4); for (int tryNumber = 0; tryNumber < tryCount; tryNumber++) { - final CountDownLatch allFinished = new CountDownLatch(threadsCount); + CountDownLatch allFinished = new CountDownLatch(threadsCount); for (int i = 0; i < threadsCount; i++) { - new Thread(new Runnable() { - @Override - public void run() { - try { - Connection conn = getConnection(TEST_DB_NAME); - PreparedStatement stmt = conn.prepareStatement("select id from test"); - ResultSet rs = stmt.executeQuery(); - while (rs.next()) { - rs.getInt(1); - } - stmt.close(); - conn.close(); - } catch (Exception e) { - throw new RuntimeException(e); - } finally { - allFinished.countDown(); + new Thread(() -> { + try { + Connection conn = getConnection(TEST_DB_NAME); + PreparedStatement stmt = conn.prepareStatement("select id from test"); + ResultSet rs = stmt.executeQuery(); + while (rs.next()) { + rs.getInt(1); } + stmt.close(); + conn.close(); + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + allFinished.countDown(); } }).start(); } diff --git a/h2/src/test/org/h2/test/synth/TestSimpleIndex.java b/h2/src/test/org/h2/test/synth/TestSimpleIndex.java index 3575e175dc..4a0337d45c 100644 --- a/h2/src/test/org/h2/test/synth/TestSimpleIndex.java +++ b/h2/src/test/org/h2/test/synth/TestSimpleIndex.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -30,7 +30,7 @@ public class TestSimpleIndex extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/synth/TestStringAggCompatibility.java b/h2/src/test/org/h2/test/synth/TestStringAggCompatibility.java deleted file mode 100644 index 04cb044ded..0000000000 --- a/h2/src/test/org/h2/test/synth/TestStringAggCompatibility.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.synth; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import org.h2.test.TestBase; -import org.h2.test.TestDb; - -/** - * Test for check compatibility with PostgreSQL function string_agg() - */ -public class TestStringAggCompatibility extends TestDb { - - private Connection conn; - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws Exception { - deleteDb(getTestName()); - conn = getConnection(getTestName()); - prepareDb(); - testWhenOrderByMissing(); - testWithOrderBy(); - conn.close(); - } - - private void testWithOrderBy() throws SQLException { - ResultSet result = query( - "select string_agg(b, ', ' order by b desc) from stringAgg group by a; "); - - assertTrue(result.next()); - assertEquals("3, 2, 1", result.getString(1)); - } - - private void testWhenOrderByMissing() throws SQLException { - ResultSet result = query("select string_agg(b, ', ') from stringAgg group by a; "); - - assertTrue(result.next()); - assertEquals("1, 2, 3", result.getString(1)); - } - - private ResultSet query(String q) throws SQLException { - PreparedStatement st = conn.prepareStatement(q); - - st.execute(); - - return st.getResultSet(); - } - - private void prepareDb() throws SQLException { - exec("create table stringAgg(\n" + - " a int not null,\n" + - " b varchar(50) not null\n" + - ");"); - - exec("insert into stringAgg values(1, '1')"); - exec("insert into stringAgg values(1, '2')"); - exec("insert into stringAgg values(1, '3')"); - - } - - private void exec(String sql) throws SQLException { - conn.prepareStatement(sql).execute(); - } -} diff --git a/h2/src/test/org/h2/test/synth/TestThreads.java b/h2/src/test/org/h2/test/synth/TestThreads.java index bab4b9275c..f88049ebfd 100644 --- a/h2/src/test/org/h2/test/synth/TestThreads.java +++ b/h2/src/test/org/h2/test/synth/TestThreads.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -50,7 +50,7 @@ public TestThreads() { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/synth/TestTimer.java b/h2/src/test/org/h2/test/synth/TestTimer.java index 3e279b045c..aae2e40ccc 100644 --- a/h2/src/test/org/h2/test/synth/TestTimer.java +++ b/h2/src/test/org/h2/test/synth/TestTimer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -31,7 +31,7 @@ public class TestTimer extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/synth/package.html b/h2/src/test/org/h2/test/synth/package.html index 2bcc4eca20..31abc88978 100644 --- a/h2/src/test/org/h2/test/synth/package.html +++ b/h2/src/test/org/h2/test/synth/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/synth/sql/Column.java b/h2/src/test/org/h2/test/synth/sql/Column.java index b2ecb3ef04..e797507155 100644 --- a/h2/src/test/org/h2/test/synth/sql/Column.java +++ b/h2/src/test/org/h2/test/synth/sql/Column.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; @@ -173,10 +173,6 @@ Value getRandomValue() { return Value.getRandom(config, type, precision, scale, isNullable); } -// Value getRandomValueNotNull() { -// return Value.getRandom(config, type, precision, scale, false); -// } - /** * Generate a random column. * diff --git a/h2/src/test/org/h2/test/synth/sql/Command.java b/h2/src/test/org/h2/test/synth/sql/Command.java index 32a8bce44d..00997cc057 100644 --- a/h2/src/test/org/h2/test/synth/sql/Command.java +++ b/h2/src/test/org/h2/test/synth/sql/Command.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; @@ -382,10 +382,6 @@ Result run(DbInterface db) throws Exception { return result; } -// public String getNextTableAlias() { -// return "S" + nextAlias++; -// } - /** * Get a random table alias name. * diff --git a/h2/src/test/org/h2/test/synth/sql/DbConnection.java b/h2/src/test/org/h2/test/synth/sql/DbConnection.java index 2dd515803a..803fc28b6b 100644 --- a/h2/src/test/org/h2/test/synth/sql/DbConnection.java +++ b/h2/src/test/org/h2/test/synth/sql/DbConnection.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; diff --git a/h2/src/test/org/h2/test/synth/sql/DbInterface.java b/h2/src/test/org/h2/test/synth/sql/DbInterface.java index f1fede0630..118b7030d3 100644 --- a/h2/src/test/org/h2/test/synth/sql/DbInterface.java +++ b/h2/src/test/org/h2/test/synth/sql/DbInterface.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; diff --git a/h2/src/test/org/h2/test/synth/sql/DbState.java b/h2/src/test/org/h2/test/synth/sql/DbState.java index 780e9d0fd7..0ecee56720 100644 --- a/h2/src/test/org/h2/test/synth/sql/DbState.java +++ b/h2/src/test/org/h2/test/synth/sql/DbState.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; diff --git a/h2/src/test/org/h2/test/synth/sql/Expression.java b/h2/src/test/org/h2/test/synth/sql/Expression.java index 925cd9ee02..50d615425f 100644 --- a/h2/src/test/org/h2/test/synth/sql/Expression.java +++ b/h2/src/test/org/h2/test/synth/sql/Expression.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; diff --git a/h2/src/test/org/h2/test/synth/sql/Index.java b/h2/src/test/org/h2/test/synth/sql/Index.java index 03cc8fdc4e..544a847667 100644 --- a/h2/src/test/org/h2/test/synth/sql/Index.java +++ b/h2/src/test/org/h2/test/synth/sql/Index.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; diff --git a/h2/src/test/org/h2/test/synth/sql/RandomGen.java b/h2/src/test/org/h2/test/synth/sql/RandomGen.java index 9e0cfb4773..50ce674372 100644 --- a/h2/src/test/org/h2/test/synth/sql/RandomGen.java +++ b/h2/src/test/org/h2/test/synth/sql/RandomGen.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; diff --git a/h2/src/test/org/h2/test/synth/sql/Result.java b/h2/src/test/org/h2/test/synth/sql/Result.java index 345cddca0b..556bf8c34b 100644 --- a/h2/src/test/org/h2/test/synth/sql/Result.java +++ b/h2/src/test/org/h2/test/synth/sql/Result.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; diff --git a/h2/src/test/org/h2/test/synth/sql/Row.java b/h2/src/test/org/h2/test/synth/sql/Row.java index 6c2fd81cf0..e60988b1d3 100644 --- a/h2/src/test/org/h2/test/synth/sql/Row.java +++ b/h2/src/test/org/h2/test/synth/sql/Row.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; diff --git a/h2/src/test/org/h2/test/synth/sql/Table.java b/h2/src/test/org/h2/test/synth/sql/Table.java index c4318371a1..abf1092715 100644 --- a/h2/src/test/org/h2/test/synth/sql/Table.java +++ b/h2/src/test/org/h2/test/synth/sql/Table.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; @@ -11,6 +11,7 @@ * Represents a table. */ class Table { + private final TestSynth config; private String name; private boolean temporary; diff --git a/h2/src/test/org/h2/test/synth/sql/TestSynth.java b/h2/src/test/org/h2/test/synth/sql/TestSynth.java index 2e933fbaed..389a914f88 100644 --- a/h2/src/test/org/h2/test/synth/sql/TestSynth.java +++ b/h2/src/test/org/h2/test/synth/sql/TestSynth.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; @@ -60,7 +60,7 @@ public class TestSynth extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } /** @@ -296,11 +296,11 @@ public TestBase init(TestAll conf) throws Exception { addDatabase("org.h2.Driver", "jdbc:h2:" + getBaseDir() + "/synth/synth", "sa", "", false); - // addDatabase("com.mysql.jdbc.Driver", "jdbc:mysql://localhost/test", + // addDatabase("com.mysql.cj.jdbc.Driver", "jdbc:mysql://localhost/test", // "sa", ""); // addDatabase("org.h2.Driver", "jdbc:h2:synth;mode=mysql", "sa", ""); - // addDatabase("com.mysql.jdbc.Driver", "jdbc:mysql://localhost/test", + // addDatabase("com.mysql.cj.jdbc.Driver", "jdbc:mysql://localhost/test", // "sa", ""); // addDatabase("org.ldbc.jdbc.jdbcDriver", // "jdbc:ldbc:mysql://localhost/test", "sa", ""); diff --git a/h2/src/test/org/h2/test/synth/sql/Value.java b/h2/src/test/org/h2/test/synth/sql/Value.java index 5ccd1c5ea7..6707fee2f2 100644 --- a/h2/src/test/org/h2/test/synth/sql/Value.java +++ b/h2/src/test/org/h2/test/synth/sql/Value.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; diff --git a/h2/src/test/org/h2/test/synth/sql/package.html b/h2/src/test/org/h2/test/synth/sql/package.html index 939a0284cd..6826f682db 100644 --- a/h2/src/test/org/h2/test/synth/sql/package.html +++ b/h2/src/test/org/h2/test/synth/sql/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/synth/thread/TestMulti.java b/h2/src/test/org/h2/test/synth/thread/TestMulti.java index 054d0563dc..e7e16b7686 100644 --- a/h2/src/test/org/h2/test/synth/thread/TestMulti.java +++ b/h2/src/test/org/h2/test/synth/thread/TestMulti.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.thread; @@ -28,7 +28,7 @@ public class TestMulti extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/synth/thread/TestMultiNews.java b/h2/src/test/org/h2/test/synth/thread/TestMultiNews.java index d2394171cb..4c2921f6bc 100644 --- a/h2/src/test/org/h2/test/synth/thread/TestMultiNews.java +++ b/h2/src/test/org/h2/test/synth/thread/TestMultiNews.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.thread; diff --git a/h2/src/test/org/h2/test/synth/thread/TestMultiNewsSimple.java b/h2/src/test/org/h2/test/synth/thread/TestMultiNewsSimple.java index a5fbbfbd98..fc043105d8 100644 --- a/h2/src/test/org/h2/test/synth/thread/TestMultiNewsSimple.java +++ b/h2/src/test/org/h2/test/synth/thread/TestMultiNewsSimple.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.thread; diff --git a/h2/src/test/org/h2/test/synth/thread/TestMultiOrder.java b/h2/src/test/org/h2/test/synth/thread/TestMultiOrder.java index 49c419f8ab..c10fec4850 100644 --- a/h2/src/test/org/h2/test/synth/thread/TestMultiOrder.java +++ b/h2/src/test/org/h2/test/synth/thread/TestMultiOrder.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.thread; @@ -134,7 +134,7 @@ void first() throws SQLException { c.createStatement().execute("create table customer(" + "id int primary key, name varchar, account decimal)"); c.createStatement().execute("create table orders(" + - "id int identity primary key, customer_id int, total decimal)"); + "id int generated by default as identity primary key, customer_id int, total decimal)"); c.createStatement().execute("create table orderLine(" + "order_id int, line_id int, text varchar, " + "amount decimal, primary key(order_id, line_id))"); diff --git a/h2/src/test/org/h2/test/synth/thread/TestMultiThread.java b/h2/src/test/org/h2/test/synth/thread/TestMultiThread.java index fa24ea3778..7ed64a1eb6 100644 --- a/h2/src/test/org/h2/test/synth/thread/TestMultiThread.java +++ b/h2/src/test/org/h2/test/synth/thread/TestMultiThread.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.thread; diff --git a/h2/src/test/org/h2/test/synth/thread/package.html b/h2/src/test/org/h2/test/synth/thread/package.html index f9e328fca2..6adf5e5236 100644 --- a/h2/src/test/org/h2/test/synth/thread/package.html +++ b/h2/src/test/org/h2/test/synth/thread/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/todo/TestDiskSpaceLeak.java b/h2/src/test/org/h2/test/todo/TestDiskSpaceLeak.java index a603d99c7e..51aff905d3 100644 --- a/h2/src/test/org/h2/test/todo/TestDiskSpaceLeak.java +++ b/h2/src/test/org/h2/test/todo/TestDiskSpaceLeak.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.todo; @@ -10,7 +10,7 @@ import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.SQLException; -import org.h2.jdbc.JdbcConnection; +import org.h2.test.TestBase; import org.h2.tools.DeleteDbFiles; import org.h2.tools.Recover; import org.h2.util.JdbcUtils; @@ -49,8 +49,8 @@ public static void main(String... args) throws Exception { Recover.execute("data", "test"); new File("data/test.h2.sql").renameTo(new File("data/test." + i + ".sql")); conn = DriverManager.getConnection("jdbc:h2:data/test"); - // ((JdbcConnection) conn).setPowerOffCount(i); - ((JdbcConnection) conn).setPowerOffCount(28); + // TestBase.setPowerOffCount(conn, i); + TestBase.setPowerOffCount(conn, 28); String last = "connect"; try { conn.createStatement().execute("drop table test if exists"); diff --git a/h2/src/test/org/h2/test/todo/TestDropTableLarge.java b/h2/src/test/org/h2/test/todo/TestDropTableLarge.java index a106a92b29..3a050642a1 100644 --- a/h2/src/test/org/h2/test/todo/TestDropTableLarge.java +++ b/h2/src/test/org/h2/test/todo/TestDropTableLarge.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.todo; diff --git a/h2/src/test/org/h2/test/todo/TestLinkedTableFullCondition.java b/h2/src/test/org/h2/test/todo/TestLinkedTableFullCondition.java index 22b863535b..9770cf6e23 100644 --- a/h2/src/test/org/h2/test/todo/TestLinkedTableFullCondition.java +++ b/h2/src/test/org/h2/test/todo/TestLinkedTableFullCondition.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.todo; diff --git a/h2/src/test/org/h2/test/todo/TestTempTableCrash.java b/h2/src/test/org/h2/test/todo/TestTempTableCrash.java index fb576b9292..8a4e452975 100644 --- a/h2/src/test/org/h2/test/todo/TestTempTableCrash.java +++ b/h2/src/test/org/h2/test/todo/TestTempTableCrash.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.todo; @@ -10,8 +10,7 @@ import java.sql.Statement; import java.util.Random; import java.util.concurrent.TimeUnit; - -import org.h2.store.fs.FilePathRec; +import org.h2.store.fs.rec.FilePathRec; import org.h2.test.unit.TestReopen; import org.h2.tools.DeleteDbFiles; diff --git a/h2/src/test/org/h2/test/todo/TestUndoLogLarge.java b/h2/src/test/org/h2/test/todo/TestUndoLogLarge.java index d2068af251..41a463ffb9 100644 --- a/h2/src/test/org/h2/test/todo/TestUndoLogLarge.java +++ b/h2/src/test/org/h2/test/todo/TestUndoLogLarge.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.todo; diff --git a/h2/src/test/org/h2/test/todo/package.html b/h2/src/test/org/h2/test/todo/package.html index c7d8192c1e..a99d84ed75 100644 --- a/h2/src/test/org/h2/test/todo/package.html +++ b/h2/src/test/org/h2/test/todo/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/todo/supportTemplates.txt b/h2/src/test/org/h2/test/todo/supportTemplates.txt index 76bb9b6eaf..f79ebc4ed3 100644 --- a/h2/src/test/org/h2/test/todo/supportTemplates.txt +++ b/h2/src/test/org/h2/test/todo/supportTemplates.txt @@ -5,7 +5,7 @@ and only then, once you are completely sure it is an issue, submit it here. The reason is that only very few people actively monitor the issue tracker. Before submitting a bug, please also check the FAQ: -http://www.h2database.com/html/faq.html +https://h2database.com/html/faq.html What steps will reproduce the problem? (simple SQL scripts or simple standalone applications are preferred) diff --git a/h2/src/test/org/h2/test/todo/tools.sql b/h2/src/test/org/h2/test/todo/tools.sql index e01e52c6e5..bd61c7a5d0 100644 --- a/h2/src/test/org/h2/test/todo/tools.sql +++ b/h2/src/test/org/h2/test/todo/tools.sql @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ diff --git a/h2/src/test/org/h2/test/trace/Arg.java b/h2/src/test/org/h2/test/trace/Arg.java index 7c252ed248..55038562d5 100644 --- a/h2/src/test/org/h2/test/trace/Arg.java +++ b/h2/src/test/org/h2/test/trace/Arg.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). */ /* * Licensed to the Apache Software Foundation (ASF) under one or more diff --git a/h2/src/test/org/h2/test/trace/Parser.java b/h2/src/test/org/h2/test/trace/Parser.java index 054d14e255..86e995ef58 100644 --- a/h2/src/test/org/h2/test/trace/Parser.java +++ b/h2/src/test/org/h2/test/trace/Parser.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -188,7 +188,7 @@ private Arg parseValue() { number.indexOf('.') >= 0) { Double v = Double.parseDouble(number); return new Arg(double.class, v); - } else if (number.endsWith("L") || number.endsWith("l")) { + } else if (number.endsWith("l")) { Long v = Long.parseLong(number.substring(0, number.length() - 1)); return new Arg(long.class, v); } else { diff --git a/h2/src/test/org/h2/test/trace/Player.java b/h2/src/test/org/h2/test/trace/Player.java index 0835ccb36b..cf0a750200 100644 --- a/h2/src/test/org/h2/test/trace/Player.java +++ b/h2/src/test/org/h2/test/trace/Player.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). */ /* * Licensed to the Apache Software Foundation (ASF) under one or more diff --git a/h2/src/test/org/h2/test/trace/Statement.java b/h2/src/test/org/h2/test/trace/Statement.java index 53da0e3593..6fcca9d58e 100644 --- a/h2/src/test/org/h2/test/trace/Statement.java +++ b/h2/src/test/org/h2/test/trace/Statement.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). */ /* * Licensed to the Apache Software Foundation (ASF) under one or more diff --git a/h2/src/test/org/h2/test/trace/package.html b/h2/src/test/org/h2/test/trace/package.html index b593b49cec..5b4b294356 100644 --- a/h2/src/test/org/h2/test/trace/package.html +++ b/h2/src/test/org/h2/test/trace/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/unit/TestAnsCompression.java b/h2/src/test/org/h2/test/unit/TestAnsCompression.java index ea8df0b18a..32daf07048 100644 --- a/h2/src/test/org/h2/test/unit/TestAnsCompression.java +++ b/h2/src/test/org/h2/test/unit/TestAnsCompression.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -26,7 +26,7 @@ public class TestAnsCompression extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/unit/TestAutoReconnect.java b/h2/src/test/org/h2/test/unit/TestAutoReconnect.java index b502e2cdb1..e275d3ed1e 100644 --- a/h2/src/test/org/h2/test/unit/TestAutoReconnect.java +++ b/h2/src/test/org/h2/test/unit/TestAutoReconnect.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -33,7 +33,7 @@ public class TestAutoReconnect extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } private void restart() throws SQLException, InterruptedException { @@ -65,21 +65,17 @@ private void testWrongUrl() throws Exception { deleteDb(getTestName()); Server tcp = Server.createTcpServer().start(); try { - conn = getConnection("jdbc:h2:" + getBaseDir() + - "/" + getTestName() + ";AUTO_SERVER=TRUE"); - assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, this). - getConnection("jdbc:h2:" + getBaseDir() + - "/" + getTestName() + ";OPEN_NEW=TRUE"); - assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, this). - getConnection("jdbc:h2:" + getBaseDir() + - "/" + getTestName() + ";OPEN_NEW=TRUE"); + conn = getConnection("jdbc:h2:" + getBaseDir() + '/' + getTestName() + ";AUTO_SERVER=TRUE"); + assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, + () -> getConnection("jdbc:h2:" + getBaseDir() + '/' + getTestName() + ";OPEN_NEW=TRUE")); + assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, + () -> getConnection("jdbc:h2:" + getBaseDir() + '/' + getTestName() + ";OPEN_NEW=TRUE")); conn.close(); - conn = getConnection("jdbc:h2:tcp://localhost:" + tcp.getPort() + - "/" + getBaseDir() + "/" + getTestName()); - assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, this). - getConnection("jdbc:h2:" + getBaseDir() + - "/" + getTestName() + ";AUTO_SERVER=TRUE;OPEN_NEW=TRUE"); + conn = getConnection("jdbc:h2:tcp://localhost:" + tcp.getPort() + '/' + getBaseDir() + '/' // + + getTestName()); + assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, () -> getConnection( + "jdbc:h2:" + getBaseDir() + '/' + getTestName() + ";AUTO_SERVER=TRUE;OPEN_NEW=TRUE")); conn.close(); } finally { tcp.stop(); @@ -114,7 +110,7 @@ private void testReconnect() throws Exception { stat.execute("create table test(id identity, name varchar)"); restart(); PreparedStatement prep = conn.prepareStatement( - "insert into test values(null, ?)"); + "insert into test(name) values(?)"); restart(); prep.setString(1, "Hello"); restart(); @@ -166,6 +162,7 @@ private void testReconnect() throws Exception { if (i < 10) { throw e; } + break; } } restart(); @@ -187,32 +184,6 @@ private void testReconnect() throws Exception { /** * A database event listener used in this test. */ - public static final class MyDatabaseEventListener implements - DatabaseEventListener { - - @Override - public void closingDatabase() { - // ignore - } - - @Override - public void exceptionThrown(SQLException e, String sql) { - // ignore - } - - @Override - public void init(String u) { - // ignore - } - - @Override - public void opened() { - // ignore - } - - @Override - public void setProgress(int state, String name, int x, int max) { - // ignore - } + public static final class MyDatabaseEventListener implements DatabaseEventListener { } } diff --git a/h2/src/test/org/h2/test/unit/TestBinaryArithmeticStream.java b/h2/src/test/org/h2/test/unit/TestBinaryArithmeticStream.java index be8a86e39d..173691dd1d 100644 --- a/h2/src/test/org/h2/test/unit/TestBinaryArithmeticStream.java +++ b/h2/src/test/org/h2/test/unit/TestBinaryArithmeticStream.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -28,7 +28,7 @@ public class TestBinaryArithmeticStream extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/unit/TestBinaryOperation.java b/h2/src/test/org/h2/test/unit/TestBinaryOperation.java new file mode 100644 index 0000000000..606d728d44 --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestBinaryOperation.java @@ -0,0 +1,109 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import org.h2.engine.SessionLocal; +import org.h2.expression.BinaryOperation; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Operation0; +import org.h2.message.DbException; +import org.h2.test.TestBase; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * Test the binary operation. + */ +public class TestBinaryOperation extends TestBase { + + /** + * Run just this test. + * + * @param a + * ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testPlusMinus(BinaryOperation.OpType.PLUS); + testPlusMinus(BinaryOperation.OpType.MINUS); + testMultiply(); + testDivide(); + } + + private void testPlusMinus(BinaryOperation.OpType type) { + assertPrecisionScale(2, 0, 2, type, 1, 0, 1, 0); + assertPrecisionScale(3, 1, 2, type, 1, 1, 1, 0); + assertPrecisionScale(3, 1, 2, type, 1, 0, 1, 1); + } + + private void testMultiply() { + assertPrecisionScale(2, 0, 2, BinaryOperation.OpType.MULTIPLY, 1, 0, 1, 0); + assertPrecisionScale(2, 1, 2, BinaryOperation.OpType.MULTIPLY, 1, 1, 1, 0); + assertPrecisionScale(2, 1, 2, BinaryOperation.OpType.MULTIPLY, 1, 0, 1, 1); + } + + private void testDivide() { + assertPrecisionScale(3, 2, 2, BinaryOperation.OpType.DIVIDE, 1, 0, 1, 0); + assertPrecisionScale(3, 3, 2, BinaryOperation.OpType.DIVIDE, 1, 1, 1, 0); + assertPrecisionScale(3, 1, 2, BinaryOperation.OpType.DIVIDE, 1, 0, 1, 1); + assertPrecisionScale(25, 0, 10, BinaryOperation.OpType.DIVIDE, 1, 3, 9, 27); + } + + private void assertPrecisionScale(int expectedPrecision, int expectedScale, int expectedDecfloatPrecision, + BinaryOperation.OpType type, int precision1, int scale1, int precision2, int scale2) { + TestExpression left = new TestExpression(TypeInfo.getTypeInfo(Value.NUMERIC, precision1, scale1, null)); + TestExpression right = new TestExpression(TypeInfo.getTypeInfo(Value.NUMERIC, precision2, scale2, null)); + TypeInfo typeInfo = new BinaryOperation(type, left, right).optimize(null).getType(); + assertEquals(Value.NUMERIC, typeInfo.getValueType()); + assertEquals(expectedPrecision, typeInfo.getPrecision()); + assertEquals(expectedScale, typeInfo.getScale()); + left = new TestExpression(TypeInfo.getTypeInfo(Value.DECFLOAT, precision1, 0, null)); + right = new TestExpression(TypeInfo.getTypeInfo(Value.DECFLOAT, precision2, 0, null)); + typeInfo = new BinaryOperation(type, left, right).optimize(null).getType(); + assertEquals(Value.DECFLOAT, typeInfo.getValueType()); + assertEquals(expectedDecfloatPrecision, typeInfo.getPrecision()); + } + + private static final class TestExpression extends Operation0 { + + private final TypeInfo type; + + TestExpression(TypeInfo type) { + this.type = type; + } + + @Override + public Value getValue(SessionLocal session) { + throw DbException.getUnsupportedException(""); + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + throw DbException.getUnsupportedException(""); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return false; + } + + @Override + public int getCost() { + return 0; + } + + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestBitStream.java b/h2/src/test/org/h2/test/unit/TestBitStream.java index 88781d09f6..dd53cc55bc 100644 --- a/h2/src/test/org/h2/test/unit/TestBitStream.java +++ b/h2/src/test/org/h2/test/unit/TestBitStream.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -25,7 +25,7 @@ public class TestBitStream extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/unit/TestBnf.java b/h2/src/test/org/h2/test/unit/TestBnf.java index 25569ec5e6..71f9113c64 100644 --- a/h2/src/test/org/h2/test/unit/TestBnf.java +++ b/h2/src/test/org/h2/test/unit/TestBnf.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -29,7 +29,7 @@ public class TestBnf extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -48,7 +48,7 @@ public void test() throws Exception { private void testModes(Connection conn) throws Exception { DbContents dbContents; dbContents = new DbContents(); - dbContents.readContents("jdbc:h2:test", conn); + dbContents.readContents("jdbc:h2:./test", conn); assertTrue(dbContents.isH2()); dbContents = new DbContents(); dbContents.readContents("jdbc:derby:test", conn); @@ -88,7 +88,7 @@ private void testProcedures(Connection conn, boolean isMySQLMode) "CREATE TABLE " + "TABLE_WITH_STRING_FIELD (STRING_FIELD VARCHAR(50), INT_FIELD integer)"); DbContents dbContents = new DbContents(); - dbContents.readContents("jdbc:h2:test", conn); + dbContents.readContents("jdbc:h2:./test", conn); assertTrue(dbContents.isH2()); assertFalse(dbContents.isDerby()); assertFalse(dbContents.isFirebird()); diff --git a/h2/src/test/org/h2/test/unit/TestCache.java b/h2/src/test/org/h2/test/unit/TestCache.java index 6a113c3613..4f71f0d317 100644 --- a/h2/src/test/org/h2/test/unit/TestCache.java +++ b/h2/src/test/org/h2/test/unit/TestCache.java @@ -1,19 +1,16 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; -import java.io.ByteArrayInputStream; -import java.io.InputStream; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.Random; - import org.h2.message.Trace; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -40,101 +37,17 @@ public class TestCache extends TestDb implements CacheWriter { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); // test.config.traceTest = true; - test.test(); + test.testFromMain(); } @Override public void test() throws Exception { - if (!config.mvStore) { - testTQ(); - } testMemoryUsage(); testCache(); testCacheDb(false); testCacheDb(true); } - private void testTQ() throws Exception { - if (config.memory || config.reopen) { - return; - } - deleteDb("cache"); - Connection conn = getConnection( - "cache;LOG=0;UNDO_LOG=0"); - Statement stat = conn.createStatement(); - stat.execute("create table if not exists lob" + - "(id int primary key, data blob)"); - PreparedStatement prep = conn.prepareStatement( - "insert into lob values(?, ?)"); - Random r = new Random(1); - byte[] buff = new byte[2 * 1024 * 1024]; - for (int i = 0; i < 10; i++) { - prep.setInt(1, i); - r.nextBytes(buff); - prep.setBinaryStream(2, new ByteArrayInputStream(buff), -1); - prep.execute(); - } - stat.execute("create table if not exists test" + - "(id int primary key, data varchar)"); - prep = conn.prepareStatement("insert into test values(?, ?)"); - for (int i = 0; i < 20000; i++) { - prep.setInt(1, i); - prep.setString(2, "Hello"); - prep.execute(); - } - conn.close(); - testTQ("LRU", false); - testTQ("TQ", true); - } - - private void testTQ(String cacheType, boolean scanResistant) throws Exception { - Connection conn = getConnection( - "cache;CACHE_TYPE=" + cacheType + ";CACHE_SIZE=5120"); - Statement stat = conn.createStatement(); - PreparedStatement prep; - for (int k = 0; k < 10; k++) { - int rc; - prep = conn.prepareStatement( - "select * from test where id = ?"); - rc = getReadCount(stat); - for (int x = 0; x < 2; x++) { - for (int i = 0; i < 15000; i++) { - prep.setInt(1, i); - prep.executeQuery(); - } - } - int rcData = getReadCount(stat) - rc; - if (scanResistant && k > 0) { - // TQ is expected to keep the data rows in the cache - // even if the LOB is read once in a while - assertEquals(0, rcData); - } else { - assertTrue(rcData > 0); - } - rc = getReadCount(stat); - ResultSet rs = stat.executeQuery( - "select * from lob where id = " + k); - rs.next(); - InputStream in = rs.getBinaryStream(2); - while (in.read() >= 0) { - // ignore - } - in.close(); - int rcLob = getReadCount(stat) - rc; - assertTrue(rcLob > 0); - } - conn.close(); - } - - private static int getReadCount(Statement stat) throws Exception { - ResultSet rs; - rs = stat.executeQuery( - "select value from information_schema.settings " + - "where name = 'info.FILE_READ'"); - rs.next(); - return rs.getInt(1); - } - private void testMemoryUsage() throws SQLException { if (!config.traceTest) { return; @@ -169,8 +82,7 @@ private void testMemoryUsage() throws SQLException { // stat.execute("select data from test where data >= ''"); rs = stat.executeQuery( - "select value from information_schema.settings " + - "where name = 'info.CACHE_SIZE'"); + "SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'info.CACHE_SIZE'"); rs.next(); int calculated = rs.getInt(1); rs = null; @@ -186,12 +98,9 @@ private void testMemoryUsage() throws SQLException { " after closing: " + afterClose); } - private int getRealMemory() { + private static long getRealMemory() { StringUtils.clearCache(); Value.clearCache(); - eatMemory(100); - freeMemory(); - System.gc(); return Utils.getMemoryUsed(); } diff --git a/h2/src/test/org/h2/test/unit/TestCharsetCollator.java b/h2/src/test/org/h2/test/unit/TestCharsetCollator.java index 2c63fb96f9..e1fb1d13fa 100644 --- a/h2/src/test/org/h2/test/unit/TestCharsetCollator.java +++ b/h2/src/test/org/h2/test/unit/TestCharsetCollator.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -26,7 +26,7 @@ public class TestCharsetCollator extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @@ -37,15 +37,11 @@ public void test() throws Exception { testLengthComparison(); testCreationFromCompareMode(); testCreationFromCompareModeWithInvalidCharset(); + testCaseInsensitive(); } private void testCreationFromCompareModeWithInvalidCharset() { - try { - CompareMode.getCollator("CHARSET_INVALID"); - fail(); - } catch (UnsupportedCharsetException e) { - // expected - } + assertThrows(UnsupportedCharsetException.class, () -> CompareMode.getCollator("CHARSET_INVALID")); } private void testCreationFromCompareMode() { @@ -67,4 +63,11 @@ private void testNumberToCharacterComparison() { assertTrue(cp500Collator.compare("A", "1") < 0); assertTrue(utf8Collator.compare("A", "1") > 0); } + + private void testCaseInsensitive() { + CharsetCollator c = new CharsetCollator(StandardCharsets.UTF_8); + c.setStrength(Collator.SECONDARY); + assertEquals(0, c.compare("a", "A")); + } + } diff --git a/h2/src/test/org/h2/test/unit/TestClassLoaderLeak.java b/h2/src/test/org/h2/test/unit/TestClassLoaderLeak.java index 56dc387e45..1a6b4f4719 100644 --- a/h2/src/test/org/h2/test/unit/TestClassLoaderLeak.java +++ b/h2/src/test/org/h2/test/unit/TestClassLoaderLeak.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -39,7 +39,7 @@ public class TestClassLoaderLeak extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/unit/TestClearReferences.java b/h2/src/test/org/h2/test/unit/TestClearReferences.java deleted file mode 100644 index 8606f25eac..0000000000 --- a/h2/src/test/org/h2/test/unit/TestClearReferences.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.unit; - -import java.io.File; -import java.lang.reflect.Field; -import java.lang.reflect.Modifier; -import java.util.ArrayList; - -import org.h2.test.TestBase; -import org.h2.util.MathUtils; -import org.h2.value.ValueInt; - -/** - * Tests if Tomcat would clear static fields when re-loading a web application. - * See also - * http://svn.apache.org/repos/asf/tomcat/trunk/java/org/apache/catalina - * /loader/WebappClassLoader.java - */ -public class TestClearReferences extends TestBase { - - private static final String[] KNOWN_REFRESHED = { - "org.h2.compress.CompressLZF.cachedHashTable", - "org.h2.engine.DbSettings.defaultSettings", - "org.h2.engine.SessionRemote.sessionFactory", - "org.h2.expression.function.DateTimeFunctions.MONTHS_AND_WEEKS", - "org.h2.expression.function.ToChar.NAMES", - "org.h2.jdbcx.JdbcDataSourceFactory.cachedTraceSystem", - "org.h2.store.RecoverTester.instance", - "org.h2.store.fs.FilePath.defaultProvider", - "org.h2.store.fs.FilePath.providers", - "org.h2.store.fs.FilePath.tempRandom", - "org.h2.store.fs.FilePathRec.recorder", - "org.h2.store.fs.FileMemData.data", - "org.h2.tools.CompressTool.cachedBuffer", - "org.h2.util.CloseWatcher.queue", - "org.h2.util.CloseWatcher.refs", - "org.h2.util.DateTimeUtils.timeZone", - "org.h2.util.MathUtils.cachedSecureRandom", - "org.h2.util.NetUtils.cachedLocalAddress", - "org.h2.util.StringUtils.softCache", - "org.h2.util.JdbcUtils.allowedClassNames", - "org.h2.util.JdbcUtils.allowedClassNamePrefixes", - "org.h2.util.JdbcUtils.userClassFactories", - "org.h2.util.Task.counter", - "org.h2.value.CompareMode.lastUsed", - "org.h2.value.Value.softCache", - "org.h2.value.ValueBytes.type", - "org.h2.value.ValueCollectionBase.type", - "org.h2.value.ValueDecimal.type", - "org.h2.value.ValueInterval.type", - "org.h2.value.ValueLob.type", - "org.h2.value.ValueLobDb.type", - "org.h2.value.ValueString.type", - }; - - /** - * Path to main sources. In IDE project may be located either in the root - * directory of repository or in the h2 subdirectory. - */ - private final String SOURCE_PATH = new File("h2/src/main/org/h2/Driver.java").exists() - ? "h2/src/main/" : "src/main/"; - - private boolean hasError; - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws Exception { - // initialize the known classes - MathUtils.secureRandomLong(); - ValueInt.get(1); - Class.forName("org.h2.store.fs.FileMemData"); - - clear(); - - if (hasError) { - fail("Tomcat may clear the field above when reloading the web app"); - } - for (String s : KNOWN_REFRESHED) { - String className = s.substring(0, s.lastIndexOf('.')); - String fieldName = s.substring(s.lastIndexOf('.') + 1); - Class clazz = Class.forName(className); - try { - clazz.getDeclaredField(fieldName); - } catch (Exception e) { - fail(s); - } - } - } - - private void clear() throws Exception { - ArrayList> classes = new ArrayList<>(); - findClasses(classes, new File("bin/org/h2")); - findClasses(classes, new File("temp/org/h2")); - for (Class clazz : classes) { - clearClass(clazz); - } - } - - private void findClasses(ArrayList> classes, File file) { - String name = file.getName(); - if (file.isDirectory()) { - if (name.equals("CVS") || name.equals(".svn")) { - return; - } - for (File f : file.listFiles()) { - findClasses(classes, f); - } - } else { - if (!name.endsWith(".class")) { - return; - } - if (name.indexOf('$') >= 0) { - return; - } - String className = file.getAbsolutePath().replace('\\', '/'); - className = className.substring(className.lastIndexOf("org/h2")); - String packageName = className.substring(0, className.lastIndexOf('/')); - if (!new File(SOURCE_PATH + packageName).exists()) { - return; - } - className = className.replace('/', '.'); - className = className.substring(0, className.length() - ".class".length()); - Class clazz = null; - try { - clazz = Class.forName(className); - } catch (NoClassDefFoundError e) { - if (e.toString().contains("lucene")) { - // Lucene is not in the classpath, OK - } - } catch (ClassNotFoundException e) { - fail("Could not load " + className + ": " + e.toString()); - } - if (clazz != null) { - classes.add(clazz); - } - } - } - - /** - * This is how Tomcat resets the fields as of 2009-01-30. - * - * @param clazz the class to clear - */ - private void clearClass(Class clazz) throws Exception { - Field[] fields; - try { - fields = clazz.getDeclaredFields(); - } catch (NoClassDefFoundError e) { - if (e.toString().contains("lucene")) { - // Lucene is not in the classpath, OK - return; - } else if (e.toString().contains("jts")) { - // JTS is not in the classpath, OK - return; - } else if (e.toString().contains("slf4j")) { - // slf4j is not in the classpath, OK - return; - } - throw e; - } - for (Field field : fields) { - if (field.getType().isPrimitive() || field.getName().contains("$")) { - continue; - } - int modifiers = field.getModifiers(); - if (!Modifier.isStatic(modifiers)) { - continue; - } - field.setAccessible(true); - Object o = field.get(null); - if (o == null) { - continue; - } - if (Modifier.isFinal(modifiers)) { - if (field.getType().getName().startsWith("java.")) { - continue; - } - if (field.getType().getName().startsWith("javax.")) { - continue; - } - clearInstance(o); - } else { - clearField(clazz.getName() + "." + field.getName() + " = " + o); - } - } - } - - private void clearInstance(Object instance) throws Exception { - for (Field field : instance.getClass().getDeclaredFields()) { - if (field.getType().isPrimitive() || field.getName().contains("$")) { - continue; - } - int modifiers = field.getModifiers(); - if (Modifier.isStatic(modifiers) && Modifier.isFinal(modifiers)) { - continue; - } - field.setAccessible(true); - Object o = field.get(instance); - if (o == null) { - continue; - } - // loadedByThisOrChild - if (o.getClass().getName().startsWith("java.lang.")) { - continue; - } - if (o.getClass().isArray() && o.getClass().getComponentType().isPrimitive()) { - continue; - } - clearField(instance.getClass().getName() + "." + field.getName() + " = " + o); - } - } - - private void clearField(String s) { - for (String k : KNOWN_REFRESHED) { - if (s.startsWith(k)) { - return; - } - } - hasError = true; - System.out.println(s); - } - -} diff --git a/h2/src/test/org/h2/test/unit/TestCollation.java b/h2/src/test/org/h2/test/unit/TestCollation.java index 47aab48a91..7e0a9b1520 100644 --- a/h2/src/test/org/h2/test/unit/TestCollation.java +++ b/h2/src/test/org/h2/test/unit/TestCollation.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -23,7 +23,7 @@ public class TestCollation extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/unit/TestCompress.java b/h2/src/test/org/h2/test/unit/TestCompress.java index ff784d4d2b..7aacdf6c0c 100644 --- a/h2/src/test/org/h2/test/unit/TestCompress.java +++ b/h2/src/test/org/h2/test/unit/TestCompress.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -43,7 +43,7 @@ public class TestCompress extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -159,7 +159,7 @@ private void testDatabase() throws Exception { int pageSize = Constants.DEFAULT_PAGE_SIZE; byte[] buff2 = new byte[pageSize]; byte[] test = new byte[2 * pageSize]; - compress.compress(buff2, pageSize, test, 0); + compress.compress(buff2, 0, pageSize, test, 0); for (int j = 0; j < 4; j++) { long time = System.nanoTime(); for (int i = 0; i < 1000; i++) { @@ -169,7 +169,7 @@ private void testDatabase() throws Exception { if (len < 0) { break; } - compress.compress(buff2, pageSize, test, 0); + compress.compress(buff2, 0, pageSize, test, 0); } in.close(); } @@ -186,7 +186,7 @@ private void testDatabase() throws Exception { if (len < 0) { break; } - int b = compress.compress(buff2, pageSize, test, 0); + int b = compress.compress(buff2, 0, pageSize, test, 0); byte[] data = Arrays.copyOf(test, b); comp.add(data); } diff --git a/h2/src/test/org/h2/test/unit/TestConcurrent.java b/h2/src/test/org/h2/test/unit/TestConcurrentJdbc.java similarity index 78% rename from h2/src/test/org/h2/test/unit/TestConcurrent.java rename to h2/src/test/org/h2/test/unit/TestConcurrentJdbc.java index e13c911e53..bf75f5f70e 100644 --- a/h2/src/test/org/h2/test/unit/TestConcurrent.java +++ b/h2/src/test/org/h2/test/unit/TestConcurrentJdbc.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -10,6 +10,7 @@ import java.sql.PreparedStatement; import java.sql.SQLException; import java.sql.Statement; +import java.util.concurrent.CountDownLatch; import org.h2.api.ErrorCode; import org.h2.test.TestBase; @@ -18,7 +19,7 @@ /** * Test concurrent access to JDBC objects. */ -public class TestConcurrent extends TestBase { +public class TestConcurrentJdbc extends TestBase { /** * Run just this test. @@ -26,7 +27,7 @@ public class TestConcurrent extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -48,10 +49,12 @@ public void test() throws Exception { break; } final PreparedStatement prep = conn.prepareStatement(sql); + final CountDownLatch executedUpdate = new CountDownLatch(1); Task t = new Task() { @Override public void call() throws SQLException { while (!conn.isClosed()) { + executedUpdate.countDown(); switch (x % 6) { case 0: prep.executeQuery(); @@ -76,16 +79,21 @@ public void call() throws SQLException { } }; t.execute(); - Thread.sleep(100); + //Wait until the concurrent task has started + try { + executedUpdate.await(); + } catch (InterruptedException e) { + // ignore + } conn.close(); SQLException e = (SQLException) t.getException(); if (e != null) { if (ErrorCode.OBJECT_CLOSED != e.getErrorCode() && - ErrorCode.STATEMENT_WAS_CANCELED != e.getErrorCode()) { + ErrorCode.STATEMENT_WAS_CANCELED != e.getErrorCode() && + ErrorCode.DATABASE_CALLED_AT_SHUTDOWN != e.getErrorCode()) { throw e; } } } } - } diff --git a/h2/src/test/org/h2/test/unit/TestConnectionInfo.java b/h2/src/test/org/h2/test/unit/TestConnectionInfo.java index 7bfbdf7136..22405b1a1e 100644 --- a/h2/src/test/org/h2/test/unit/TestConnectionInfo.java +++ b/h2/src/test/org/h2/test/unit/TestConnectionInfo.java @@ -1,16 +1,14 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; import java.io.File; -import java.util.Properties; import org.h2.api.ErrorCode; import org.h2.engine.ConnectionInfo; -import org.h2.engine.SysProperties; import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.tools.DeleteDbFiles; @@ -29,7 +27,7 @@ public class TestConnectionInfo extends TestDb { * @param a ignored */ public static void main(String[] a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -41,40 +39,29 @@ public void test() throws Exception { } private void testImplicitRelativePath() throws Exception { - if (SysProperties.IMPLICIT_RELATIVE_PATH) { - return; - } - assertThrows(ErrorCode.URL_RELATIVE_TO_CWD, this). - getConnection("jdbc:h2:" + getTestName()); - assertThrows(ErrorCode.URL_RELATIVE_TO_CWD, this). - getConnection("jdbc:h2:data/" + getTestName()); + assertThrows(ErrorCode.URL_RELATIVE_TO_CWD, () -> getConnection("jdbc:h2:" + getTestName())); + assertThrows(ErrorCode.URL_RELATIVE_TO_CWD, () -> getConnection("jdbc:h2:data/" + getTestName())); getConnection("jdbc:h2:./data/" + getTestName()).close(); DeleteDbFiles.execute("data", getTestName(), true); } private void testConnectInitError() throws Exception { - assertThrows(ErrorCode.SYNTAX_ERROR_2, this). - getConnection("jdbc:h2:mem:;init=error"); - assertThrows(ErrorCode.IO_EXCEPTION_2, this). - getConnection("jdbc:h2:mem:;init=runscript from 'wrong.file'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, () -> getConnection("jdbc:h2:mem:;init=error")); + assertThrows(ErrorCode.IO_EXCEPTION_2, () -> getConnection("jdbc:h2:mem:;init=runscript from 'wrong.file'")); } private void testConnectionInfo() { - Properties info = new Properties(); ConnectionInfo connectionInfo = new ConnectionInfo( "jdbc:h2:mem:" + getTestName() + - ";LOG=2" + ";ACCESS_MODE_DATA=rws" + ";INIT=CREATE this...\\;INSERT that..." + ";IFEXISTS=TRUE", - info); + null, null, null); assertEquals("jdbc:h2:mem:" + getTestName(), connectionInfo.getURL()); - assertEquals("2", - connectionInfo.getProperty("LOG", "")); assertEquals("rws", connectionInfo.getProperty("ACCESS_MODE_DATA", "")); assertEquals("CREATE this...;INSERT that...", diff --git a/h2/src/test/org/h2/test/unit/TestDataPage.java b/h2/src/test/org/h2/test/unit/TestDataPage.java deleted file mode 100644 index 8bb057bde1..0000000000 --- a/h2/src/test/org/h2/test/unit/TestDataPage.java +++ /dev/null @@ -1,361 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.unit; - -import java.math.BigDecimal; -import java.sql.Date; -import java.sql.Time; -import java.util.concurrent.TimeUnit; - -import org.h2.api.JavaObjectSerializer; -import org.h2.result.SimpleResult; -import org.h2.store.Data; -import org.h2.store.DataHandler; -import org.h2.store.FileStore; -import org.h2.store.LobStorageBackend; -import org.h2.test.TestBase; -import org.h2.util.SmallLRUCache; -import org.h2.util.TempFileDeleter; -import org.h2.value.CompareMode; -import org.h2.value.Value; -import org.h2.value.ValueArray; -import org.h2.value.ValueBoolean; -import org.h2.value.ValueByte; -import org.h2.value.ValueBytes; -import org.h2.value.ValueDate; -import org.h2.value.ValueDecimal; -import org.h2.value.ValueDouble; -import org.h2.value.ValueFloat; -import org.h2.value.ValueInt; -import org.h2.value.ValueJavaObject; -import org.h2.value.ValueLong; -import org.h2.value.ValueNull; -import org.h2.value.ValueResultSet; -import org.h2.value.ValueShort; -import org.h2.value.ValueString; -import org.h2.value.ValueStringFixed; -import org.h2.value.ValueStringIgnoreCase; -import org.h2.value.ValueTime; -import org.h2.value.ValueTimestamp; -import org.h2.value.ValueTimestampTimeZone; -import org.h2.value.ValueUuid; - -/** - * Data page tests. - */ -public class TestDataPage extends TestBase implements DataHandler { - - private boolean testPerformance; - private final CompareMode compareMode = CompareMode.getInstance(null, 0); - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() { - if (testPerformance) { - testPerformance(); - System.exit(0); - return; - } - testValues(); - testAll(); - } - - private static void testPerformance() { - Data data = Data.create(null, 1024, false); - for (int j = 0; j < 4; j++) { - long time = System.nanoTime(); - for (int i = 0; i < 100000; i++) { - data.reset(); - for (int k = 0; k < 30; k++) { - data.writeString("Hello World"); - } - } - // for (int i = 0; i < 5000000; i++) { - // data.reset(); - // for (int k = 0; k < 100; k++) { - // data.writeInt(k * k); - // } - // } - // for (int i = 0; i < 200000; i++) { - // data.reset(); - // for (int k = 0; k < 100; k++) { - // data.writeVarInt(k * k); - // } - // } - System.out.println("write: " + - TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time) + - " ms"); - } - for (int j = 0; j < 4; j++) { - long time = System.nanoTime(); - for (int i = 0; i < 1000000; i++) { - data.reset(); - for (int k = 0; k < 30; k++) { - data.readString(); - } - } - // for (int i = 0; i < 3000000; i++) { - // data.reset(); - // for (int k = 0; k < 100; k++) { - // data.readVarInt(); - // } - // } - // for (int i = 0; i < 50000000; i++) { - // data.reset(); - // for (int k = 0; k < 100; k++) { - // data.readInt(); - // } - // } - System.out.println("read: " + - TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time) + - " ms"); - } - } - - private void testValues() { - testValue(ValueNull.INSTANCE); - testValue(ValueBoolean.FALSE); - testValue(ValueBoolean.TRUE); - for (int i = 0; i < 256; i++) { - testValue(ValueByte.get((byte) i)); - } - for (int i = 0; i < 256 * 256; i += 10) { - testValue(ValueShort.get((short) i)); - } - for (int i = 0; i < 256 * 256; i += 10) { - testValue(ValueInt.get(i)); - testValue(ValueInt.get(-i)); - testValue(ValueLong.get(i)); - testValue(ValueLong.get(-i)); - } - testValue(ValueInt.get(Integer.MAX_VALUE)); - testValue(ValueInt.get(Integer.MIN_VALUE)); - for (long i = 0; i < Integer.MAX_VALUE; i += 10 + i / 4) { - testValue(ValueInt.get((int) i)); - testValue(ValueInt.get((int) -i)); - } - testValue(ValueLong.get(Long.MAX_VALUE)); - testValue(ValueLong.get(Long.MIN_VALUE)); - for (long i = 0; i >= 0; i += 10 + i / 4) { - testValue(ValueLong.get(i)); - testValue(ValueLong.get(-i)); - } - testValue(ValueDecimal.get(BigDecimal.ZERO)); - testValue(ValueDecimal.get(BigDecimal.ONE)); - testValue(ValueDecimal.get(BigDecimal.TEN)); - testValue(ValueDecimal.get(BigDecimal.ONE.negate())); - testValue(ValueDecimal.get(BigDecimal.TEN.negate())); - for (long i = 0; i >= 0; i += 10 + i / 4) { - testValue(ValueDecimal.get(new BigDecimal(i))); - testValue(ValueDecimal.get(new BigDecimal(-i))); - for (int j = 0; j < 200; j += 50) { - testValue(ValueDecimal.get(new BigDecimal(i).setScale(j))); - testValue(ValueDecimal.get(new BigDecimal(i * i).setScale(j))); - } - testValue(ValueDecimal.get(new BigDecimal(i * i))); - } - testValue(ValueDate.get(new Date(System.currentTimeMillis()))); - testValue(ValueDate.get(new Date(0))); - testValue(ValueTime.get(new Time(System.currentTimeMillis()))); - testValue(ValueTime.get(new Time(0))); - testValue(ValueTimestamp.fromMillis(System.currentTimeMillis())); - testValue(ValueTimestamp.fromMillis(0)); - testValue(ValueTimestampTimeZone.parse("2000-01-01 10:00:00")); - testValue(ValueJavaObject.getNoCopy(null, new byte[0], this)); - testValue(ValueJavaObject.getNoCopy(null, new byte[100], this)); - for (int i = 0; i < 300; i++) { - testValue(ValueBytes.getNoCopy(new byte[i])); - } - for (int i = 0; i < 65000; i += 10 + i) { - testValue(ValueBytes.getNoCopy(new byte[i])); - } - testValue(ValueUuid.getNewRandom()); - for (int i = 0; i < 100; i++) { - testValue(ValueString.get(new String(new char[i]))); - } - for (int i = 0; i < 65000; i += 10 + i) { - testValue(ValueString.get(new String(new char[i]))); - testValue(ValueStringFixed.get(new String(new char[i]))); - testValue(ValueStringIgnoreCase.get(new String(new char[i]))); - } - testValue(ValueFloat.get(0f)); - testValue(ValueFloat.get(1f)); - testValue(ValueFloat.get(-1f)); - testValue(ValueDouble.get(0)); - testValue(ValueDouble.get(1)); - testValue(ValueDouble.get(-1)); - for (int i = 0; i < 65000; i += 10 + i) { - for (double j = 0.1; j < 65000; j += 10 + j) { - testValue(ValueFloat.get((float) (i / j))); - testValue(ValueDouble.get(i / j)); - testValue(ValueFloat.get((float) -(i / j))); - testValue(ValueDouble.get(-(i / j))); - } - } - testValue(ValueArray.get(new Value[0])); - testValue(ValueArray.get(new Value[] { ValueBoolean.TRUE, - ValueInt.get(10) })); - - SimpleResult rs = new SimpleResult(); - rs.addColumn("ID", "ID", Value.INT, 0, 0); - rs.addColumn("NAME", "NAME", Value.STRING, 255, 0); - rs.addRow(ValueInt.get(1), ValueString.get("Hello")); - rs.addRow(ValueInt.get(2), ValueString.get("World")); - rs.addRow(ValueInt.get(3), ValueString.get("Peace")); - testValue(ValueResultSet.get(rs)); - } - - private void testValue(Value v) { - testValue(v, false); - switch (v.getValueType()) { - case Value.DATE: - case Value.TIME: - case Value.TIMESTAMP: - testValue(v, true); - } - } - - private void testValue(Value v, boolean storeLocalTime) { - Data data = Data.create(null, 1024, storeLocalTime); - data.checkCapacity((int) v.getType().getPrecision()); - data.writeValue(v); - data.writeInt(123); - data.reset(); - Value v2 = data.readValue(); - assertEquals(v.getValueType(), v2.getValueType()); - assertEquals(0, v.compareTo(v2, null, compareMode)); - assertEquals(123, data.readInt()); - } - - private void testAll() { - Data page = Data.create(this, 128, false); - - char[] data = new char[0x10000]; - for (int i = 0; i < data.length; i++) { - data[i] = (char) i; - } - String s = new String(data); - page.checkCapacity(s.length() * 4); - page.writeString(s); - int len = page.length(); - assertEquals(len, Data.getStringLen(s)); - page.reset(); - assertEquals(s, page.readString()); - page.reset(); - - page.writeString("H\u1111!"); - page.writeString("John\tBrack's \"how are you\" M\u1111ller"); - page.writeValue(ValueInt.get(10)); - page.writeValue(ValueString.get("test")); - page.writeValue(ValueFloat.get(-2.25f)); - page.writeValue(ValueDouble.get(10.40)); - page.writeValue(ValueNull.INSTANCE); - trace(new String(page.getBytes())); - page.reset(); - - trace(page.readString()); - trace(page.readString()); - trace(page.readValue().getInt()); - trace(page.readValue().getString()); - trace("" + page.readValue().getFloat()); - trace("" + page.readValue().getDouble()); - trace(page.readValue().toString()); - page.reset(); - - page.writeInt(0); - page.writeInt(Integer.MAX_VALUE); - page.writeInt(Integer.MIN_VALUE); - page.writeInt(1); - page.writeInt(-1); - page.writeInt(1234567890); - page.writeInt(54321); - trace(new String(page.getBytes())); - page.reset(); - trace(page.readInt()); - trace(page.readInt()); - trace(page.readInt()); - trace(page.readInt()); - trace(page.readInt()); - trace(page.readInt()); - trace(page.readInt()); - - page = null; - } - - @Override - public String getDatabasePath() { - return null; - } - - @Override - public FileStore openFile(String name, String mode, boolean mustExist) { - return null; - } - - @Override - public void checkPowerOff() { - // nothing to do - } - - @Override - public void checkWritingAllowed() { - // ok - } - - @Override - public int getMaxLengthInplaceLob() { - throw new AssertionError(); - } - - @Override - public String getLobCompressionAlgorithm(int type) { - throw new AssertionError(); - } - - @Override - public Object getLobSyncObject() { - return this; - } - - @Override - public SmallLRUCache getLobFileListCache() { - return null; - } - - @Override - public TempFileDeleter getTempFileDeleter() { - return TempFileDeleter.getInstance(); - } - - @Override - public LobStorageBackend getLobStorage() { - return null; - } - - @Override - public int readLob(long lobId, byte[] hmac, long offset, byte[] buff, - int off, int length) { - return -1; - } - - @Override - public JavaObjectSerializer getJavaObjectSerializer() { - return null; - } - - @Override - public CompareMode getCompareMode() { - return compareMode; - } -} diff --git a/h2/src/test/org/h2/test/unit/TestDate.java b/h2/src/test/org/h2/test/unit/TestDate.java index e82f511b2b..739c6b1633 100644 --- a/h2/src/test/org/h2/test/unit/TestDate.java +++ b/h2/src/test/org/h2/test/unit/TestDate.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -15,16 +15,20 @@ import java.util.TimeZone; import org.h2.api.ErrorCode; -import org.h2.message.DbException; +import org.h2.api.JavaObjectSerializer; +import org.h2.engine.CastDataProvider; +import org.h2.engine.Mode; import org.h2.test.TestBase; -import org.h2.test.utils.AssertThrows; import org.h2.util.DateTimeUtils; +import org.h2.util.LegacyDateTimeUtils; +import org.h2.util.TimeZoneProvider; import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueDate; import org.h2.value.ValueDouble; import org.h2.value.ValueTime; import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; /** * Tests the date parsing. The problem is that some dates are not allowed @@ -34,13 +38,46 @@ */ public class TestDate extends TestBase { + static class SimpleCastDataProvider implements CastDataProvider { + + TimeZoneProvider currentTimeZone = DateTimeUtils.getTimeZone(); + + ValueTimestampTimeZone currentTimestamp = DateTimeUtils.currentTimestamp(currentTimeZone); + + @Override + public Mode getMode() { + return Mode.getRegular(); + } + + @Override + public ValueTimestampTimeZone currentTimestamp() { + return currentTimestamp; + } + + @Override + public TimeZoneProvider currentTimeZone() { + return currentTimeZone; + } + + @Override + public JavaObjectSerializer getJavaObjectSerializer() { + return null; + } + + @Override + public boolean zeroBasedEnums() { + return false; + } + + } + /** * Run just this test. * * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -57,20 +94,16 @@ public void test() throws SQLException { private void testValueDate() { assertEquals("2000-01-01", - ValueDate.get(Date.valueOf("2000-01-01")).getString()); - assertEquals("0-00-00", + LegacyDateTimeUtils.fromDate(null, null, Date.valueOf("2000-01-01")).getString()); + assertEquals("0000-00-00", ValueDate.fromDateValue(0).getString()); assertEquals("9999-12-31", ValueDate.parse("9999-12-31").getString()); assertEquals("-9999-12-31", ValueDate.parse("-9999-12-31").getString()); - assertEquals(Integer.MAX_VALUE + "-12-31", - ValueDate.parse(Integer.MAX_VALUE + "-12-31").getString()); - assertEquals(Integer.MIN_VALUE + "-12-31", - ValueDate.parse(Integer.MIN_VALUE + "-12-31").getString()); ValueDate d1 = ValueDate.parse("2001-01-01"); - assertEquals("2001-01-01", d1.getDate().toString()); - assertEquals("DATE '2001-01-01'", d1.getSQL()); + assertEquals("2001-01-01", LegacyDateTimeUtils.toDate(null, null, d1).toString()); + assertEquals("DATE '2001-01-01'", d1.getTraceSQL()); assertEquals("DATE '2001-01-01'", d1.toString()); assertEquals(Value.DATE, d1.getValueType()); long dv = d1.getDateValue(); @@ -78,7 +111,6 @@ private void testValueDate() { TypeInfo type = d1.getType(); assertEquals(d1.getString().length(), type.getDisplaySize()); assertEquals(ValueDate.PRECISION, type.getPrecision()); - assertEquals("java.sql.Date", d1.getObject().getClass().getName()); ValueDate d1b = ValueDate.parse("2001-01-01"); assertTrue(d1 == d1b); Value.clearCache(); @@ -94,50 +126,21 @@ private void testValueDate() { assertFalse(d2.equals(d1)); assertEquals(-1, d1.compareTo(d2, null, null)); assertEquals(1, d2.compareTo(d1, null, null)); - - // can't convert using java.util.Date - assertEquals( - Integer.MAX_VALUE + "-12-31 00:00:00", - ValueDate.parse(Integer.MAX_VALUE + "-12-31"). - convertTo(Value.TIMESTAMP).getString()); - assertEquals( - Integer.MIN_VALUE + "-12-31 00:00:00", - ValueDate.parse(Integer.MIN_VALUE + "-12-31"). - convertTo(Value.TIMESTAMP).getString()); - assertEquals( - "00:00:00", - ValueDate.parse(Integer.MAX_VALUE + "-12-31"). - convertTo(Value.TIME).getString()); - assertEquals( - "00:00:00", - ValueDate.parse(Integer.MIN_VALUE + "-12-31"). - convertTo(Value.TIME).getString()); } private void testValueTime() { - assertEquals("10:20:30", ValueTime.get(Time.valueOf("10:20:30")).getString()); + assertEquals("10:20:30", LegacyDateTimeUtils.fromTime(null, null, Time.valueOf("10:20:30")).getString()); assertEquals("00:00:00", ValueTime.fromNanos(0).getString()); assertEquals("23:59:59", ValueTime.parse("23:59:59").getString()); assertEquals("11:22:33.444555666", ValueTime.parse("11:22:33.444555666").getString()); - try { - ValueTime.parse("-00:00:00.000000001"); - fail(); - } catch (DbException ex) { - assertEquals(ErrorCode.INVALID_DATETIME_CONSTANT_2, ex.getErrorCode()); - } - try { - ValueTime.parse("24:00:00"); - fail(); - } catch (DbException ex) { - assertEquals(ErrorCode.INVALID_DATETIME_CONSTANT_2, ex.getErrorCode()); - } + assertThrows(ErrorCode.INVALID_DATETIME_CONSTANT_2, () -> ValueTime.parse("-00:00:00.000000001")); + assertThrows(ErrorCode.INVALID_DATETIME_CONSTANT_2, () -> ValueTime.parse("24:00:00")); ValueTime t1 = ValueTime.parse("11:11:11"); - assertEquals("11:11:11", t1.getTime().toString()); - assertEquals("1970-01-01", t1.getDate().toString()); - assertEquals("TIME '11:11:11'", t1.getSQL()); + assertEquals("11:11:11", LegacyDateTimeUtils.toTime(null, null, t1).toString()); + assertEquals("TIME '11:11:11'", t1.getTraceSQL()); assertEquals("TIME '11:11:11'", t1.toString()); assertEquals("05:35:35.5", t1.multiply(ValueDouble.get(0.5)).getString()); - assertEquals("22:22:22", t1.divide(ValueDouble.get(0.5)).getString()); + assertEquals("22:22:22", t1.divide(ValueDouble.get(0.5), TypeInfo.TYPE_TIME).getString()); assertEquals(Value.TIME, t1.getValueType()); long nanos = t1.getNanos(); assertEquals((int) ((nanos >>> 32) ^ nanos), t1.hashCode()); @@ -145,7 +148,6 @@ private void testValueTime() { TypeInfo type = t1.getType(); assertEquals(ValueTime.MAXIMUM_PRECISION, type.getDisplaySize()); assertEquals(ValueTime.MAXIMUM_PRECISION, type.getPrecision()); - assertEquals("java.sql.Time", t1.getObject().getClass().getName()); ValueTime t1b = ValueTime.parse("11:11:11"); assertTrue(t1 == t1b); Value.clearCache(); @@ -170,9 +172,9 @@ private void testValueTimestampWithTimezone() { String s = "2011-" + (m < 10 ? "0" : "") + m + "-" + (d < 10 ? "0" : "") + d + " " + (h < 10 ? "0" : "") + h + ":00:00"; - ValueTimestamp ts = ValueTimestamp.parse(s + "Z"); + ValueTimestamp ts = ValueTimestamp.parse(s + "Z", null); String s2 = ts.getString(); - ValueTimestamp ts2 = ValueTimestamp.parse(s2); + ValueTimestamp ts2 = ValueTimestamp.parse(s2, null); assertEquals(ts.getString(), ts2.getString()); } } @@ -182,36 +184,23 @@ private void testValueTimestampWithTimezone() { @SuppressWarnings("unlikely-arg-type") private void testValueTimestamp() { assertEquals( - "2001-02-03 04:05:06", ValueTimestamp.get( - Timestamp.valueOf( - "2001-02-03 04:05:06")).getString()); + "2001-02-03 04:05:06", + LegacyDateTimeUtils.fromTimestamp(null, null, Timestamp.valueOf("2001-02-03 04:05:06")).getString()); assertEquals( - "2001-02-03 04:05:06.001002003", ValueTimestamp.get( - Timestamp.valueOf( - "2001-02-03 04:05:06.001002003")).getString()); + "2001-02-03 04:05:06.001002003", + LegacyDateTimeUtils.fromTimestamp(null, null, Timestamp.valueOf("2001-02-03 04:05:06.001002003")) + .getString()); assertEquals( - "0-00-00 00:00:00", ValueTimestamp.fromDateValueAndNanos(0, 0).getString()); + "0000-00-00 00:00:00", ValueTimestamp.fromDateValueAndNanos(0, 0).getString()); assertEquals( "9999-12-31 23:59:59", - ValueTimestamp.parse( - "9999-12-31 23:59:59").getString()); + ValueTimestamp.parse("9999-12-31 23:59:59", null).getString()); - assertEquals( - Integer.MAX_VALUE + - "-12-31 01:02:03.04050607", - ValueTimestamp.parse(Integer.MAX_VALUE + - "-12-31 01:02:03.0405060708").getString()); - assertEquals( - Integer.MIN_VALUE + - "-12-31 01:02:03.04050607", - ValueTimestamp.parse(Integer.MIN_VALUE + - "-12-31 01:02:03.0405060708").getString()); - - ValueTimestamp t1 = ValueTimestamp.parse("2001-01-01 01:01:01.111"); - assertEquals("2001-01-01 01:01:01.111", t1.getTimestamp().toString()); - assertEquals("2001-01-01", t1.getDate().toString()); - assertEquals("01:01:01", t1.getTime().toString()); - assertEquals("TIMESTAMP '2001-01-01 01:01:01.111'", t1.getSQL()); + ValueTimestamp t1 = ValueTimestamp.parse("2001-01-01 01:01:01.111", null); + assertEquals("2001-01-01 01:01:01.111", LegacyDateTimeUtils.toTimestamp(null, null, t1).toString()); + assertEquals("2001-01-01", LegacyDateTimeUtils.toDate(null, null, t1).toString()); + assertEquals("01:01:01", LegacyDateTimeUtils.toTime(null, null, t1).toString()); + assertEquals("TIMESTAMP '2001-01-01 01:01:01.111'", t1.getTraceSQL()); assertEquals("TIMESTAMP '2001-01-01 01:01:01.111'", t1.toString()); assertEquals(Value.TIMESTAMP, t1.getValueType()); long dateValue = t1.getDateValue(); @@ -224,91 +213,82 @@ private void testValueTimestamp() { assertEquals(ValueTimestamp.MAXIMUM_PRECISION, type.getDisplaySize()); assertEquals(ValueTimestamp.MAXIMUM_PRECISION, type.getPrecision()); assertEquals(9, type.getScale()); - assertEquals("java.sql.Timestamp", t1.getObject().getClass().getName()); - ValueTimestamp t1b = ValueTimestamp.parse("2001-01-01 01:01:01.111"); + ValueTimestamp t1b = ValueTimestamp.parse("2001-01-01 01:01:01.111", null); assertTrue(t1 == t1b); Value.clearCache(); - t1b = ValueTimestamp.parse("2001-01-01 01:01:01.111"); + t1b = ValueTimestamp.parse("2001-01-01 01:01:01.111", null); assertFalse(t1 == t1b); assertTrue(t1.equals(t1)); assertTrue(t1.equals(t1b)); assertTrue(t1b.equals(t1)); assertEquals(0, t1.compareTo(t1b, null, null)); assertEquals(0, t1b.compareTo(t1, null, null)); - ValueTimestamp t2 = ValueTimestamp.parse("2002-02-02 02:02:02.222"); + ValueTimestamp t2 = ValueTimestamp.parse("2002-02-02 02:02:02.222", null); assertFalse(t1.equals(t2)); assertFalse(t2.equals(t1)); assertEquals(-1, t1.compareTo(t2, null, null)); assertEquals(1, t2.compareTo(t1, null, null)); - t1 = ValueTimestamp.parse("2001-01-01 01:01:01.123456789"); + SimpleCastDataProvider provider = new SimpleCastDataProvider(); + t1 = ValueTimestamp.parse("2001-01-01 01:01:01.123456789", null); assertEquals("2001-01-01 01:01:01.123456789", t1.getString()); assertEquals("2001-01-01 01:01:01.123456789", - t1.convertScale(true, 10).getString()); - assertEquals("2001-01-01 01:01:01.123456789", - t1.convertScale(true, 9).getString()); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 9, null), provider).getString()); assertEquals("2001-01-01 01:01:01.12345679", - t1.convertScale(true, 8).getString()); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 8, null), provider).getString()); assertEquals("2001-01-01 01:01:01.1234568", - t1.convertScale(true, 7).getString()); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 7, null), provider).getString()); assertEquals("2001-01-01 01:01:01.123457", - t1.convertScale(true, 6).getString()); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 6, null), provider).getString()); assertEquals("2001-01-01 01:01:01.12346", - t1.convertScale(true, 5).getString()); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 5, null), provider).getString()); assertEquals("2001-01-01 01:01:01.1235", - t1.convertScale(true, 4).getString()); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 4, null), provider).getString()); assertEquals("2001-01-01 01:01:01.123", - t1.convertScale(true, 3).getString()); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 3, null), provider).getString()); assertEquals("2001-01-01 01:01:01.12", - t1.convertScale(true, 2).getString()); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 2, null), provider).getString()); assertEquals("2001-01-01 01:01:01.1", - t1.convertScale(true, 1).getString()); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 1, null), provider).getString()); assertEquals("2001-01-01 01:01:01", - t1.convertScale(true, 0).getString()); - t1 = ValueTimestamp.parse("-2001-01-01 01:01:01.123456789"); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 0, null), provider).getString()); + t1 = ValueTimestamp.parse("-2001-01-01 01:01:01.123456789", null); assertEquals("-2001-01-01 01:01:01.123457", - t1.convertScale(true, 6).getString()); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 6, null), provider).getString()); // classes do not match - assertFalse(ValueTimestamp.parse("2001-01-01"). + assertFalse(ValueTimestamp.parse("2001-01-01", null). equals(ValueDate.parse("2001-01-01"))); + provider.currentTimestamp = ValueTimestampTimeZone.fromDateValueAndNanos(DateTimeUtils.EPOCH_DATE_VALUE, 0, + provider.currentTimeZone.getTimeZoneOffsetUTC(0L)); assertEquals("2001-01-01 01:01:01", - ValueTimestamp.parse("2001-01-01").add( - ValueTime.parse("01:01:01")).getString()); + ValueTimestamp.parse("2001-01-01", null).add( + ValueTime.parse("01:01:01").convertTo(TypeInfo.TYPE_TIMESTAMP, provider)).getString()); assertEquals("1010-10-10 00:00:00", - ValueTimestamp.parse("1010-10-10 10:10:10").subtract( - ValueTime.parse("10:10:10")).getString()); + ValueTimestamp.parse("1010-10-10 10:10:10", null).subtract( + ValueTime.parse("10:10:10").convertTo(TypeInfo.TYPE_TIMESTAMP, provider)).getString()); assertEquals("-2001-01-01 01:01:01", - ValueTimestamp.parse("-2001-01-01").add( - ValueTime.parse("01:01:01")).getString()); + ValueTimestamp.parse("-2001-01-01", null).add( + ValueTime.parse("01:01:01").convertTo(TypeInfo.TYPE_TIMESTAMP, provider)).getString()); assertEquals("-1010-10-10 00:00:00", - ValueTimestamp.parse("-1010-10-10 10:10:10").subtract( - ValueTime.parse("10:10:10")).getString()); + ValueTimestamp.parse("-1010-10-10 10:10:10", null).subtract( + ValueTime.parse("10:10:10").convertTo(TypeInfo.TYPE_TIMESTAMP, provider)).getString()); assertEquals(0, DateTimeUtils.absoluteDayFromDateValue( - ValueTimestamp.parse("1970-01-01").getDateValue())); - assertEquals(0, ValueTimestamp.parse( - "1970-01-01").getTimeNanos()); - assertEquals(0, ValueTimestamp.parse( - "1970-01-01 00:00:00.000 UTC").getTimestamp().getTime()); - assertEquals(0, ValueTimestamp.parse( - "+1970-01-01T00:00:00.000Z").getTimestamp().getTime()); - assertEquals(0, ValueTimestamp.parse( - "1970-01-01T00:00:00.000+00:00").getTimestamp().getTime()); - assertEquals(0, ValueTimestamp.parse( - "1970-01-01T00:00:00.000-00:00").getTimestamp().getTime()); - new AssertThrows(ErrorCode.INVALID_DATETIME_CONSTANT_2) { - @Override - public void test() { - ValueTimestamp.parse("1970-01-01 00:00:00.000 ABC"); - } - }; - new AssertThrows(ErrorCode.INVALID_DATETIME_CONSTANT_2) { - @Override - public void test() { - ValueTimestamp.parse("1970-01-01T00:00:00.000+ABC"); - } - }; + ValueTimestamp.parse("1970-01-01", null).getDateValue())); + assertEquals(0, ValueTimestamp.parse("1970-01-01", null).getTimeNanos()); + assertEquals(0, LegacyDateTimeUtils.toTimestamp(null, null, + ValueTimestamp.parse("1970-01-01 00:00:00.000 UTC", null)).getTime()); + assertEquals(0, LegacyDateTimeUtils.toTimestamp(null, null, + ValueTimestamp.parse("+1970-01-01T00:00:00.000Z", null)).getTime()); + assertEquals(0, LegacyDateTimeUtils.toTimestamp(null, null, + ValueTimestamp.parse("1970-01-01T00:00:00.000+00:00", null)).getTime()); + assertEquals(0, LegacyDateTimeUtils.toTimestamp(null, null, + ValueTimestamp.parse("1970-01-01T00:00:00.000-00:00", null)).getTime()); + assertThrows(ErrorCode.INVALID_DATETIME_CONSTANT_2, + () -> ValueTimestamp.parse("1970-01-01 00:00:00.000 ABC", null)); + assertThrows(ErrorCode.INVALID_DATETIME_CONSTANT_2, + () -> ValueTimestamp.parse("1970-01-01T00:00:00.000+ABC", null)); } private void testAbsoluteDay() { @@ -342,7 +322,7 @@ private void testAbsoluteDay() { } private void testValidDate() { - Calendar c = DateTimeUtils.createGregorianCalendar(DateTimeUtils.UTC); + Calendar c = TestDateTimeUtils.createGregorianCalendar(LegacyDateTimeUtils.UTC); c.setLenient(false); for (int y = -2000; y < 3000; y++) { for (int m = -3; m <= 14; m++) { @@ -352,7 +332,7 @@ private void testValidDate() { assertFalse(valid); } else if (d < 1 || d > 31) { assertFalse(valid); - } else if (y != 1582 && d >= 1 && d <= 27) { + } else if (d <= 27) { assertTrue(valid); } else { if (y <= 0) { @@ -408,8 +388,8 @@ private static void testCalculateLocalMillis() { } private static void testDate(int y, int m, int day) { - long millis = DateTimeUtils.getMillis( - TimeZone.getDefault(), y, m, day, 0, 0, 0, 0); + long millis = LegacyDateTimeUtils.getMillis(null, TimeZone.getDefault(), DateTimeUtils.dateValue(y, m, day), + 0); String st = new java.sql.Date(millis).toString(); int y2 = Integer.parseInt(st.substring(0, 4)); int m2 = Integer.parseInt(st.substring(5, 7)); @@ -444,33 +424,37 @@ public static ArrayList getDistinctTimeZones() { } private void testDateTimeUtils() { - ValueTimestamp ts1 = ValueTimestamp.parse("-999-08-07 13:14:15.16"); - ValueTimestamp ts2 = ValueTimestamp.parse("19999-08-07 13:14:15.16"); - ValueTime t1 = (ValueTime) ts1.convertTo(Value.TIME); - ValueTime t2 = (ValueTime) ts2.convertTo(Value.TIME); - ValueDate d1 = (ValueDate) ts1.convertTo(Value.DATE); - ValueDate d2 = (ValueDate) ts2.convertTo(Value.DATE); - assertEquals("-999-08-07 13:14:15.16", ts1.getString()); - assertEquals("-999-08-07", d1.getString()); - assertEquals("13:14:15.16", t1.getString()); - assertEquals("19999-08-07 13:14:15.16", ts2.getString()); - assertEquals("19999-08-07", d2.getString()); - assertEquals("13:14:15.16", t2.getString()); - ValueTimestamp ts1a = DateTimeUtils.convertTimestamp( - ts1.getTimestamp(), DateTimeUtils.createGregorianCalendar()); - ValueTimestamp ts2a = DateTimeUtils.convertTimestamp( - ts2.getTimestamp(), DateTimeUtils.createGregorianCalendar()); - assertEquals("-999-08-07 13:14:15.16", ts1a.getString()); - assertEquals("19999-08-07 13:14:15.16", ts2a.getString()); - - // test for bug on Java 1.8.0_60 in "Europe/Moscow" timezone. - // Doesn't affect most other timezones - long millis = 1407437460000L; - long ms = DateTimeUtils.getTimeUTCWithoutDst(millis); - ms += DateTimeUtils.getTimeZoneOffset(ms); - long result1 = DateTimeUtils.nanosFromLocalMillis(ms); - long result2 = DateTimeUtils.nanosFromLocalMillis(ms); - assertEquals(result1, result2); + TimeZone old = TimeZone.getDefault(); + /* + * java.util.TimeZone doesn't support LMT, so perform this test with + * fixed time zone offset + */ + TimeZone.setDefault(TimeZone.getTimeZone("GMT+01")); + DateTimeUtils.resetCalendar(); + try { + ValueTimestamp ts1 = ValueTimestamp.parse("-999-08-07 13:14:15.16", null); + ValueTimestamp ts2 = ValueTimestamp.parse("19999-08-07 13:14:15.16", null); + ValueTime t1 = (ValueTime) ts1.convertTo(TypeInfo.TYPE_TIME); + ValueTime t2 = (ValueTime) ts2.convertTo(TypeInfo.TYPE_TIME); + ValueDate d1 = ts1.convertToDate(null); + ValueDate d2 = ts2.convertToDate(null); + assertEquals("-0999-08-07 13:14:15.16", ts1.getString()); + assertEquals("-0999-08-07", d1.getString()); + assertEquals("13:14:15.16", t1.getString()); + assertEquals("19999-08-07 13:14:15.16", ts2.getString()); + assertEquals("19999-08-07", d2.getString()); + assertEquals("13:14:15.16", t2.getString()); + TimeZone timeZone = TimeZone.getDefault(); + ValueTimestamp ts1a = LegacyDateTimeUtils.fromTimestamp(null, timeZone, + LegacyDateTimeUtils.toTimestamp(null, null, ts1)); + ValueTimestamp ts2a = LegacyDateTimeUtils.fromTimestamp(null, timeZone, + LegacyDateTimeUtils.toTimestamp(null, null, ts2)); + assertEquals("-0999-08-07 13:14:15.16", ts1a.getString()); + assertEquals("19999-08-07 13:14:15.16", ts2a.getString()); + } finally { + TimeZone.setDefault(old); + DateTimeUtils.resetCalendar(); + } } } diff --git a/h2/src/test/org/h2/test/unit/TestDateIso8601.java b/h2/src/test/org/h2/test/unit/TestDateIso8601.java index 32baafa4bc..b3cfe3fe25 100644 --- a/h2/src/test/org/h2/test/unit/TestDateIso8601.java +++ b/h2/src/test/org/h2/test/unit/TestDateIso8601.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Robert Rathsack (firstName dot lastName at gmx dot de) */ package org.h2.test.unit; @@ -31,7 +31,7 @@ private enum Type { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } private static long parse(String s) { @@ -42,13 +42,13 @@ private static long parse(String s) { case DATE: return ValueDate.parse(s).getDateValue(); case TIMESTAMP: - return ValueTimestamp.parse(s).getDateValue(); + return ValueTimestamp.parse(s, null).getDateValue(); case TIMESTAMP_TIMEZONE_0: - return ValueTimestampTimeZone.parse(s + " 00:00:00.0Z").getDateValue(); + return ValueTimestampTimeZone.parse(s + " 00:00:00.0Z", null).getDateValue(); case TIMESTAMP_TIMEZONE_PLUS_18: - return ValueTimestampTimeZone.parse(s + " 00:00:00+18:00").getDateValue(); + return ValueTimestampTimeZone.parse(s + " 00:00:00+18:00", null).getDateValue(); case TIMESTAMP_TIMEZONE_MINUS_18: - return ValueTimestampTimeZone.parse(s + " 00:00:00-18:00").getDateValue(); + return ValueTimestampTimeZone.parse(s + " 00:00:00-18:00", null).getDateValue(); default: throw new IllegalStateException(); } diff --git a/h2/src/test/org/h2/test/unit/TestDateTimeUtils.java b/h2/src/test/org/h2/test/unit/TestDateTimeUtils.java index 4536e7eac6..e3aa5bf848 100644 --- a/h2/src/test/org/h2/test/unit/TestDateTimeUtils.java +++ b/h2/src/test/org/h2/test/unit/TestDateTimeUtils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -16,6 +16,7 @@ import org.h2.test.TestBase; import org.h2.util.DateTimeUtils; import org.h2.util.IntervalUtils; +import org.h2.util.LegacyDateTimeUtils; import org.h2.value.ValueInterval; import org.h2.value.ValueTimestamp; @@ -24,6 +25,19 @@ */ public class TestDateTimeUtils extends TestBase { + /** + * Creates a proleptic Gregorian calendar for the given timezone using the + * default locale. + * + * @param tz timezone for the calendar, is never null + * @return a new calendar instance. + */ + public static GregorianCalendar createGregorianCalendar(TimeZone tz) { + GregorianCalendar c = new GregorianCalendar(tz); + c.setGregorianChange(LegacyDateTimeUtils.PROLEPTIC_GREGORIAN_CHANGE); + return c; + } + /** * Run just this test. * @@ -39,7 +53,7 @@ public static void main(String... a) throws Exception { return; } } - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -51,6 +65,7 @@ public void test() throws Exception { testUTC2Value(false); testConvertScale(); testParseInterval(); + testGetTimeZoneOffset(); } private void testParseTimeNanosDB2Format() { @@ -66,7 +81,7 @@ private void testParseTimeNanosDB2Format() { * {@link DateTimeUtils#getIsoDayOfWeek(long)}. */ private void testDayOfWeek() { - GregorianCalendar gc = DateTimeUtils.createGregorianCalendar(DateTimeUtils.UTC); + GregorianCalendar gc = createGregorianCalendar(LegacyDateTimeUtils.UTC); for (int i = -1_000_000; i <= 1_000_000; i++) { gc.clear(); gc.setTimeInMillis(i * 86400000L); @@ -95,7 +110,7 @@ private void testDayOfWeek() { * {@link DateTimeUtils#getWeekYear(long, int, int)}. */ private void testWeekOfYear() { - GregorianCalendar gc = new GregorianCalendar(DateTimeUtils.UTC); + GregorianCalendar gc = new GregorianCalendar(LegacyDateTimeUtils.UTC); for (int firstDay = 1; firstDay <= 7; firstDay++) { gc.setFirstDayOfWeek(firstDay); for (int minimalDays = 1; minimalDays <= 7; minimalDays++) { @@ -121,27 +136,29 @@ private void testDateValueFromDenormalizedDate() { assertEquals(dateValue(2001, 2, 28), DateTimeUtils.dateValueFromDenormalizedDate(2000, 14, 29)); assertEquals(dateValue(1999, 8, 1), DateTimeUtils.dateValueFromDenormalizedDate(2000, -4, -100)); assertEquals(dateValue(2100, 12, 31), DateTimeUtils.dateValueFromDenormalizedDate(2100, 12, 2000)); - assertEquals(dateValue(-100, 2, 29), DateTimeUtils.dateValueFromDenormalizedDate(-100, 2, 30)); + assertEquals(dateValue(-100, 2, 28), DateTimeUtils.dateValueFromDenormalizedDate(-100, 2, 30)); } private void testUTC2Value(boolean allTimeZones) { TimeZone def = TimeZone.getDefault(); GregorianCalendar gc = new GregorianCalendar(); - if (allTimeZones) { - try { - for (String id : TimeZone.getAvailableIDs()) { + String[] ids = allTimeZones ? TimeZone.getAvailableIDs() + : new String[] { def.getID(), "+10", + // Any time zone with DST in the future (JDK-8073446) + "America/New_York" }; + try { + for (String id : ids) { + if (allTimeZones) { System.out.println(id); - TimeZone tz = TimeZone.getTimeZone(id); - TimeZone.setDefault(tz); - DateTimeUtils.resetCalendar(); - testUTC2ValueImpl(tz, gc); } - } finally { - TimeZone.setDefault(def); + TimeZone tz = TimeZone.getTimeZone(id); + TimeZone.setDefault(tz); DateTimeUtils.resetCalendar(); + testUTC2ValueImpl(tz, gc); } - } else { - testUTC2ValueImpl(def, gc); + } finally { + TimeZone.setDefault(def); + DateTimeUtils.resetCalendar(); } } @@ -158,45 +175,64 @@ private void testUTC2ValueImpl(TimeZone tz, GregorianCalendar gc) { for (int j = 0; j < 48; j++) { gc.set(year, month - 1, day, j / 2, (j & 1) * 30, 0); long timeMillis = gc.getTimeInMillis(); - ValueTimestamp ts = DateTimeUtils.convertTimestamp(new Timestamp(timeMillis), gc); - timeMillis += DateTimeUtils.getTimeZoneOffset(timeMillis); - assertEquals(ts.getDateValue(), DateTimeUtils.dateValueFromLocalMillis(timeMillis)); - assertEquals(ts.getTimeNanos(), DateTimeUtils.nanosFromLocalMillis(timeMillis)); + ValueTimestamp ts = LegacyDateTimeUtils.fromTimestamp(null, null, new Timestamp(timeMillis)); + timeMillis += LegacyDateTimeUtils.getTimeZoneOffsetMillis(null, timeMillis); + assertEquals(ts.getDateValue(), LegacyDateTimeUtils.dateValueFromLocalMillis(timeMillis)); + assertEquals(ts.getTimeNanos(), LegacyDateTimeUtils.nanosFromLocalMillis(timeMillis)); } } } private void testConvertScale() { - assertEquals(555_555_555_555L, DateTimeUtils.convertScale(555_555_555_555L, 9)); - assertEquals(555_555_555_550L, DateTimeUtils.convertScale(555_555_555_554L, 8)); - assertEquals(555_555_555_500L, DateTimeUtils.convertScale(555_555_555_549L, 7)); - assertEquals(555_555_555_000L, DateTimeUtils.convertScale(555_555_555_499L, 6)); - assertEquals(555_555_550_000L, DateTimeUtils.convertScale(555_555_554_999L, 5)); - assertEquals(555_555_500_000L, DateTimeUtils.convertScale(555_555_549_999L, 4)); - assertEquals(555_555_000_000L, DateTimeUtils.convertScale(555_555_499_999L, 3)); - assertEquals(555_550_000_000L, DateTimeUtils.convertScale(555_554_999_999L, 2)); - assertEquals(555_500_000_000L, DateTimeUtils.convertScale(555_549_999_999L, 1)); - assertEquals(555_000_000_000L, DateTimeUtils.convertScale(555_499_999_999L, 0)); - assertEquals(555_555_555_555L, DateTimeUtils.convertScale(555_555_555_555L, 9)); - assertEquals(555_555_555_560L, DateTimeUtils.convertScale(555_555_555_555L, 8)); - assertEquals(555_555_555_600L, DateTimeUtils.convertScale(555_555_555_550L, 7)); - assertEquals(555_555_556_000L, DateTimeUtils.convertScale(555_555_555_500L, 6)); - assertEquals(555_555_560_000L, DateTimeUtils.convertScale(555_555_555_000L, 5)); - assertEquals(555_555_600_000L, DateTimeUtils.convertScale(555_555_550_000L, 4)); - assertEquals(555_556_000_000L, DateTimeUtils.convertScale(555_555_500_000L, 3)); - assertEquals(555_560_000_000L, DateTimeUtils.convertScale(555_555_000_000L, 2)); - assertEquals(555_600_000_000L, DateTimeUtils.convertScale(555_550_000_000L, 1)); - assertEquals(556_000_000_000L, DateTimeUtils.convertScale(555_500_000_000L, 0)); - assertEquals(100_999_999_999L, DateTimeUtils.convertScale(100_999_999_999L, 9)); - assertEquals(101_000_000_000L, DateTimeUtils.convertScale(100_999_999_999L, 8)); - assertEquals(101_000_000_000L, DateTimeUtils.convertScale(100_999_999_999L, 7)); - assertEquals(101_000_000_000L, DateTimeUtils.convertScale(100_999_999_999L, 6)); - assertEquals(101_000_000_000L, DateTimeUtils.convertScale(100_999_999_999L, 5)); - assertEquals(101_000_000_000L, DateTimeUtils.convertScale(100_999_999_999L, 4)); - assertEquals(101_000_000_000L, DateTimeUtils.convertScale(100_999_999_999L, 3)); - assertEquals(101_000_000_000L, DateTimeUtils.convertScale(100_999_999_999L, 2)); - assertEquals(101_000_000_000L, DateTimeUtils.convertScale(100_999_999_999L, 1)); - assertEquals(101_000_000_000L, DateTimeUtils.convertScale(100_999_999_999L, 0)); + assertEquals(555_555_555_555L, DateTimeUtils.convertScale(555_555_555_555L, 9, Long.MAX_VALUE)); + assertEquals(555_555_555_550L, DateTimeUtils.convertScale(555_555_555_554L, 8, Long.MAX_VALUE)); + assertEquals(555_555_555_500L, DateTimeUtils.convertScale(555_555_555_549L, 7, Long.MAX_VALUE)); + assertEquals(555_555_555_000L, DateTimeUtils.convertScale(555_555_555_499L, 6, Long.MAX_VALUE)); + assertEquals(555_555_550_000L, DateTimeUtils.convertScale(555_555_554_999L, 5, Long.MAX_VALUE)); + assertEquals(555_555_500_000L, DateTimeUtils.convertScale(555_555_549_999L, 4, Long.MAX_VALUE)); + assertEquals(555_555_000_000L, DateTimeUtils.convertScale(555_555_499_999L, 3, Long.MAX_VALUE)); + assertEquals(555_550_000_000L, DateTimeUtils.convertScale(555_554_999_999L, 2, Long.MAX_VALUE)); + assertEquals(555_500_000_000L, DateTimeUtils.convertScale(555_549_999_999L, 1, Long.MAX_VALUE)); + assertEquals(555_000_000_000L, DateTimeUtils.convertScale(555_499_999_999L, 0, Long.MAX_VALUE)); + assertEquals(555_555_555_555L, DateTimeUtils.convertScale(555_555_555_555L, 9, Long.MAX_VALUE)); + assertEquals(555_555_555_560L, DateTimeUtils.convertScale(555_555_555_555L, 8, Long.MAX_VALUE)); + assertEquals(555_555_555_600L, DateTimeUtils.convertScale(555_555_555_550L, 7, Long.MAX_VALUE)); + assertEquals(555_555_556_000L, DateTimeUtils.convertScale(555_555_555_500L, 6, Long.MAX_VALUE)); + assertEquals(555_555_560_000L, DateTimeUtils.convertScale(555_555_555_000L, 5, Long.MAX_VALUE)); + assertEquals(555_555_600_000L, DateTimeUtils.convertScale(555_555_550_000L, 4, Long.MAX_VALUE)); + assertEquals(555_556_000_000L, DateTimeUtils.convertScale(555_555_500_000L, 3, Long.MAX_VALUE)); + assertEquals(555_560_000_000L, DateTimeUtils.convertScale(555_555_000_000L, 2, Long.MAX_VALUE)); + assertEquals(555_600_000_000L, DateTimeUtils.convertScale(555_550_000_000L, 1, Long.MAX_VALUE)); + assertEquals(556_000_000_000L, DateTimeUtils.convertScale(555_500_000_000L, 0, Long.MAX_VALUE)); + assertEquals(100_999_999_999L, DateTimeUtils.convertScale(100_999_999_999L, 9, Long.MAX_VALUE)); + assertEquals(100_999_999_999L, DateTimeUtils.convertScale(100_999_999_999L, 9, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_999_999_999L, DateTimeUtils.convertScale(86_399_999_999_999L, 9, Long.MAX_VALUE)); + for (int i = 8; i >= 0; i--) { + assertEquals(101_000_000_000L, DateTimeUtils.convertScale(100_999_999_999L, i, Long.MAX_VALUE)); + assertEquals(101_000_000_000L, + DateTimeUtils.convertScale(100_999_999_999L, i, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_400_000_000_000L, DateTimeUtils.convertScale(86_399_999_999_999L, i, Long.MAX_VALUE)); + } + assertEquals(86_399_999_999_999L, + DateTimeUtils.convertScale(86_399_999_999_999L, 9, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_999_999_990L, + DateTimeUtils.convertScale(86_399_999_999_999L, 8, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_999_999_900L, + DateTimeUtils.convertScale(86_399_999_999_999L, 7, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_999_999_000L, + DateTimeUtils.convertScale(86_399_999_999_999L, 6, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_999_990_000L, + DateTimeUtils.convertScale(86_399_999_999_999L, 5, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_999_900_000L, + DateTimeUtils.convertScale(86_399_999_999_999L, 4, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_999_000_000L, + DateTimeUtils.convertScale(86_399_999_999_999L, 3, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_990_000_000L, + DateTimeUtils.convertScale(86_399_999_999_999L, 2, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_900_000_000L, + DateTimeUtils.convertScale(86_399_999_999_999L, 1, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_000_000_000L, + DateTimeUtils.convertScale(86_399_999_999_999L, 0, DateTimeUtils.NANOS_PER_DAY)); } private void testParseInterval() { @@ -269,4 +305,23 @@ private void testParseIntervalImpl(IntervalQualifier qualifier, boolean negative assertEquals(b.toString(), expected.getString()); } + private void testGetTimeZoneOffset() { + TimeZone old = TimeZone.getDefault(); + TimeZone timeZone = TimeZone.getTimeZone("Europe/Paris"); + TimeZone.setDefault(timeZone); + DateTimeUtils.resetCalendar(); + try { + long n = -1111971600; + assertEquals(3_600, DateTimeUtils.getTimeZone().getTimeZoneOffsetUTC(n - 1)); + assertEquals(3_600_000, LegacyDateTimeUtils.getTimeZoneOffsetMillis(null, n * 1_000 - 1)); + assertEquals(0, DateTimeUtils.getTimeZone().getTimeZoneOffsetUTC(n)); + assertEquals(0, LegacyDateTimeUtils.getTimeZoneOffsetMillis(null, n * 1_000)); + assertEquals(0, DateTimeUtils.getTimeZone().getTimeZoneOffsetUTC(n + 1)); + assertEquals(0, LegacyDateTimeUtils.getTimeZoneOffsetMillis(null, n * 1_000 + 1)); + } finally { + TimeZone.setDefault(old); + DateTimeUtils.resetCalendar(); + } + } + } diff --git a/h2/src/test/org/h2/test/unit/TestDbException.java b/h2/src/test/org/h2/test/unit/TestDbException.java index f88fb0e702..014b3d63e8 100644 --- a/h2/src/test/org/h2/test/unit/TestDbException.java +++ b/h2/src/test/org/h2/test/unit/TestDbException.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -27,7 +27,7 @@ public class TestDbException extends TestBase { * ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/unit/TestExit.java b/h2/src/test/org/h2/test/unit/TestExit.java index 6999f29fb7..472a627dc3 100644 --- a/h2/src/test/org/h2/test/unit/TestExit.java +++ b/h2/src/test/org/h2/test/unit/TestExit.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -133,13 +133,7 @@ static File getClosedFile() { /** * A database event listener used in this test. */ - public static final class MyDatabaseEventListener implements - DatabaseEventListener { - - @Override - public void exceptionThrown(SQLException e, String sql) { - // nothing to do - } + public static final class MyDatabaseEventListener implements DatabaseEventListener { @Override public void closingDatabase() { @@ -150,21 +144,6 @@ public void closingDatabase() { } } - @Override - public void setProgress(int state, String name, int x, int max) { - // nothing to do - } - - @Override - public void init(String url) { - // nothing to do - } - - @Override - public void opened() { - // nothing to do - } - } } diff --git a/h2/src/test/org/h2/test/unit/TestFile.java b/h2/src/test/org/h2/test/unit/TestFile.java index f6288c7834..107d5e4099 100644 --- a/h2/src/test/org/h2/test/unit/TestFile.java +++ b/h2/src/test/org/h2/test/unit/TestFile.java @@ -1,15 +1,14 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; import java.util.Random; -import org.h2.api.JavaObjectSerializer; import org.h2.store.DataHandler; import org.h2.store.FileStore; -import org.h2.store.LobStorageBackend; +import org.h2.store.LobStorageInterface; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; import org.h2.util.SmallLRUCache; @@ -27,7 +26,7 @@ public class TestFile extends TestBase implements DataHandler { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -151,11 +150,6 @@ public String getDatabasePath() { return null; } - @Override - public String getLobCompressionAlgorithm(int type) { - return null; - } - @Override public Object getLobSyncObject() { return null; @@ -182,7 +176,7 @@ public TempFileDeleter getTempFileDeleter() { } @Override - public LobStorageBackend getLobStorage() { + public LobStorageInterface getLobStorage() { return null; } @@ -192,11 +186,6 @@ public int readLob(long lobId, byte[] hmac, long offset, byte[] buff, return -1; } - @Override - public JavaObjectSerializer getJavaObjectSerializer() { - return null; - } - @Override public CompareMode getCompareMode() { return CompareMode.getInstance(null, 0); diff --git a/h2/src/test/org/h2/test/unit/TestFileLock.java b/h2/src/test/org/h2/test/unit/TestFileLock.java index 97dea549c9..716c5b1d22 100644 --- a/h2/src/test/org/h2/test/unit/TestFileLock.java +++ b/h2/src/test/org/h2/test/unit/TestFileLock.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -46,7 +46,7 @@ private String getFile() { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -71,15 +71,14 @@ private void testFsFileLock() throws Exception { String url = "jdbc:h2:" + getBaseDir() + "/fileLock;FILE_LOCK=FS;OPEN_NEW=TRUE"; Connection conn = getConnection(url); - assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, this) - .getConnection(url); + assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, () -> getConnection(url)); conn.close(); } private void testFutureModificationDate() throws Exception { File f = new File(getFile()); f.delete(); - f.createNewFile(); + assertTrue(f.createNewFile()); f.setLastModified(System.currentTimeMillis() + 10000); FileLock lock = new FileLock(new TraceSystem(null), getFile(), Constants.LOCK_SLEEP); @@ -88,19 +87,14 @@ private void testFutureModificationDate() throws Exception { } private void testSimple() { - FileLock lock1 = new FileLock(new TraceSystem(null), getFile(), - Constants.LOCK_SLEEP); - FileLock lock2 = new FileLock(new TraceSystem(null), getFile(), - Constants.LOCK_SLEEP); + FileLock lock1 = new FileLock(new TraceSystem(null), getFile(), Constants.LOCK_SLEEP); + FileLock lock2 = new FileLock(new TraceSystem(null), getFile(), Constants.LOCK_SLEEP); lock1.lock(FileLockMethod.FILE); - createClassProxy(FileLock.class); - assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, lock2).lock( - FileLockMethod.FILE); + assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, () -> lock2.lock(FileLockMethod.FILE)); lock1.unlock(); - lock2 = new FileLock(new TraceSystem(null), getFile(), - Constants.LOCK_SLEEP); - lock2.lock(FileLockMethod.FILE); - lock2.unlock(); + FileLock lock3 = new FileLock(new TraceSystem(null), getFile(), Constants.LOCK_SLEEP); + lock3.lock(FileLockMethod.FILE); + lock3.unlock(); } private void test(boolean allowSocketsLock) throws Exception { diff --git a/h2/src/test/org/h2/test/unit/TestFileLockProcess.java b/h2/src/test/org/h2/test/unit/TestFileLockProcess.java index 9e37adb991..b69846f180 100644 --- a/h2/src/test/org/h2/test/unit/TestFileLockProcess.java +++ b/h2/src/test/org/h2/test/unit/TestFileLockProcess.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -28,7 +28,7 @@ public class TestFileLockProcess extends TestDb { public static void main(String... args) throws Exception { SelfDestructor.startCountdown(60); if (args.length == 0) { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); return; } String url = args[0]; diff --git a/h2/src/test/org/h2/test/unit/TestFileLockSerialized.java b/h2/src/test/org/h2/test/unit/TestFileLockSerialized.java deleted file mode 100644 index 13186028cf..0000000000 --- a/h2/src/test/org/h2/test/unit/TestFileLockSerialized.java +++ /dev/null @@ -1,704 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.unit; - -import java.io.OutputStream; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; - -import org.h2.api.ErrorCode; -import org.h2.jdbc.JdbcConnection; -import org.h2.store.fs.FileUtils; -import org.h2.test.TestBase; -import org.h2.test.TestDb; -import org.h2.util.SortedProperties; -import org.h2.util.Task; - -/** - * Test the serialized (server-less) mode. - */ -public class TestFileLockSerialized extends TestDb { - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public boolean isEnabled() { - if (config.mvStore) { - return false; - } - return true; - } - - @Override - public void test() throws Exception { - println("testSequence"); - testSequence(); - println("testAutoIncrement"); - testAutoIncrement(); - println("testSequenceFlush"); - testSequenceFlush(); - println("testLeftLogFiles"); - testLeftLogFiles(); - println("testWrongDatabaseInstanceOnReconnect"); - testWrongDatabaseInstanceOnReconnect(); - println("testCache()"); - testCache(); - println("testBigDatabase(false)"); - testBigDatabase(false); - println("testBigDatabase(true)"); - testBigDatabase(true); - println("testCheckpointInUpdateRaceCondition"); - testCheckpointInUpdateRaceCondition(); - println("testConcurrentUpdates"); - testConcurrentUpdates(); - println("testThreeMostlyReaders true"); - testThreeMostlyReaders(true); - println("testThreeMostlyReaders false"); - testThreeMostlyReaders(false); - println("testTwoReaders"); - testTwoReaders(); - println("testTwoWriters"); - testTwoWriters(); - println("testPendingWrite"); - testPendingWrite(); - println("testKillWriter"); - testKillWriter(); - println("testConcurrentReadWrite"); - testConcurrentReadWrite(); - deleteDb("fileLockSerialized"); - } - - private void testSequence() throws Exception { - deleteDb("fileLockSerialized"); - String url = "jdbc:h2:" + getBaseDir() + "/fileLockSerialized" + - ";FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE;RECONNECT_CHECK_DELAY=10"; - ResultSet rs; - Connection conn1 = getConnection(url); - Statement stat1 = conn1.createStatement(); - stat1.execute("create sequence seq"); - // 5 times RECONNECT_CHECK_DELAY - Thread.sleep(100); - rs = stat1.executeQuery("call seq.nextval"); - rs.next(); - conn1.close(); - } - - private void testSequenceFlush() throws Exception { - deleteDb("fileLockSerialized"); - String url = "jdbc:h2:" + getBaseDir() + - "/fileLockSerialized;FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE"; - ResultSet rs; - Connection conn1 = getConnection(url); - Statement stat1 = conn1.createStatement(); - stat1.execute("create sequence seq"); - rs = stat1.executeQuery("call seq.nextval"); - rs.next(); - assertEquals(1, rs.getInt(1)); - Connection conn2 = getConnection(url); - Statement stat2 = conn2.createStatement(); - rs = stat2.executeQuery("call seq.nextval"); - rs.next(); - assertEquals(2, rs.getInt(1)); - conn1.close(); - conn2.close(); - } - - private void testThreeMostlyReaders(final boolean write) throws Exception { - boolean longRun = false; - deleteDb("fileLockSerialized"); - final String url = "jdbc:h2:" + getBaseDir() + - "/fileLockSerialized;FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE"; - - Connection conn = getConnection(url); - conn.createStatement().execute("create table test(id int) as select 1"); - conn.close(); - - final int len = 10; - final Exception[] ex = { null }; - final boolean[] stop = { false }; - Thread[] threads = new Thread[len]; - for (int i = 0; i < len; i++) { - Thread t = new Thread(new Runnable() { - @Override - public void run() { - try { - Connection c = getConnection(url); - PreparedStatement p = c - .prepareStatement("select * from test where id = ?"); - while (!stop[0]) { - Thread.sleep(100); - if (write) { - if (Math.random() > 0.9) { - c.createStatement().execute( - "update test set id = id"); - } - } - p.setInt(1, 1); - p.executeQuery(); - p.clearParameters(); - } - c.close(); - } catch (Exception e) { - ex[0] = e; - } - } - }); - t.start(); - threads[i] = t; - } - if (longRun) { - Thread.sleep(40000); - } else { - Thread.sleep(1000); - } - stop[0] = true; - for (int i = 0; i < len; i++) { - threads[i].join(); - } - if (ex[0] != null) { - throw ex[0]; - } - getConnection(url).close(); - } - - private void testTwoReaders() throws Exception { - deleteDb("fileLockSerialized"); - String url = "jdbc:h2:" + getBaseDir() + - "/fileLockSerialized;FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE"; - Connection conn1 = getConnection(url); - conn1.createStatement().execute("create table test(id int)"); - Connection conn2 = getConnection(url); - Statement stat2 = conn2.createStatement(); - stat2.execute("drop table test"); - stat2.execute("create table test(id identity) as select 1"); - conn2.close(); - conn1.close(); - getConnection(url).close(); - } - - private void testTwoWriters() throws Exception { - deleteDb("fileLockSerialized"); - String url = "jdbc:h2:" + getBaseDir() + "/fileLockSerialized"; - final String writeUrl = url + ";FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE"; - Connection conn = getConnection(writeUrl, "sa", "sa"); - conn.createStatement() - .execute( - "create table test(id identity) as " + - "select x from system_range(1, 100)"); - conn.close(); - Task task = new Task() { - @Override - public void call() throws Exception { - while (!stop) { - Thread.sleep(10); - Connection c = getConnection(writeUrl, "sa", "sa"); - c.createStatement().execute("select * from test"); - c.close(); - } - } - }.execute(); - Thread.sleep(20); - for (int i = 0; i < 2; i++) { - conn = getConnection(writeUrl, "sa", "sa"); - Statement stat = conn.createStatement(); - stat.execute("drop table test"); - stat.execute("create table test(id identity) as " + - "select x from system_range(1, 100)"); - conn.createStatement().execute("select * from test"); - conn.close(); - } - Thread.sleep(100); - conn = getConnection(writeUrl, "sa", "sa"); - conn.createStatement().execute("select * from test"); - conn.close(); - task.get(); - } - - private void testPendingWrite() throws Exception { - deleteDb("fileLockSerialized"); - String url = "jdbc:h2:" + getBaseDir() + "/fileLockSerialized"; - String writeUrl = url + - ";FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE;WRITE_DELAY=0"; - - Connection conn = getConnection(writeUrl, "sa", "sa"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int primary key)"); - Thread.sleep(100); - String propFile = getBaseDir() + "/fileLockSerialized.lock.db"; - SortedProperties p = SortedProperties.loadProperties(propFile); - p.setProperty("changePending", "true"); - p.setProperty("modificationDataId", "1000"); - try (OutputStream out = FileUtils.newOutputStream(propFile, false)) { - p.store(out, "test"); - } - Thread.sleep(100); - stat.execute("select * from test"); - conn.close(); - } - - private void testKillWriter() throws Exception { - deleteDb("fileLockSerialized"); - String url = "jdbc:h2:" + getBaseDir() + "/fileLockSerialized"; - String writeUrl = url + - ";FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE;WRITE_DELAY=0"; - - Connection conn = getConnection(writeUrl, "sa", "sa"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int primary key)"); - ((JdbcConnection) conn).setPowerOffCount(1); - assertThrows(ErrorCode.DATABASE_IS_CLOSED, stat).execute( - "insert into test values(1)"); - - Connection conn2 = getConnection(writeUrl, "sa", "sa"); - Statement stat2 = conn2.createStatement(); - stat2.execute("insert into test values(1)"); - printResult(stat2, "select * from test"); - - conn2.close(); - - assertThrows(ErrorCode.DATABASE_IS_CLOSED, conn).close(); - } - - private void testConcurrentReadWrite() throws Exception { - deleteDb("fileLockSerialized"); - - String url = "jdbc:h2:" + getBaseDir() + "/fileLockSerialized"; - String writeUrl = url + ";FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE"; - // ;TRACE_LEVEL_SYSTEM_OUT=3 - // String readUrl = writeUrl + ";ACCESS_MODE_DATA=R"; - - trace(" create database"); - Class.forName("org.h2.Driver"); - Connection conn = getConnection(writeUrl, "sa", "sa"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int primary key)"); - - Connection conn3 = getConnection(writeUrl, "sa", "sa"); - PreparedStatement prep3 = conn3 - .prepareStatement("insert into test values(?)"); - - Connection conn2 = getConnection(writeUrl, "sa", "sa"); - Statement stat2 = conn2.createStatement(); - printResult(stat2, "select * from test"); - - stat2.execute("create local temporary table temp(name varchar) not persistent"); - printResult(stat2, "select * from temp"); - - trace(" insert row 1"); - stat.execute("insert into test values(1)"); - trace(" insert row 2"); - prep3.setInt(1, 2); - prep3.execute(); - printResult(stat2, "select * from test"); - printResult(stat2, "select * from temp"); - - conn.close(); - conn2.close(); - conn3.close(); - } - - private void printResult(Statement stat, String sql) throws SQLException { - trace(" query: " + sql); - ResultSet rs = stat.executeQuery(sql); - int rowCount = 0; - while (rs.next()) { - trace(" " + rs.getString(1)); - rowCount++; - } - trace(" " + rowCount + " row(s)"); - } - - private void testConcurrentUpdates() throws Exception { - boolean longRun = false; - if (longRun) { - for (int waitTime = 100; waitTime < 10000; waitTime += 20) { - for (int howManyThreads = 1; howManyThreads < 10; howManyThreads++) { - testConcurrentUpdates(waitTime, howManyThreads, waitTime * - howManyThreads * 10); - } - } - } else { - testConcurrentUpdates(100, 4, 2000); - } - } - - private void testAutoIncrement() throws Exception { - boolean longRun = false; - if (longRun) { - for (int waitTime = 100; waitTime < 10000; waitTime += 20) { - for (int howManyThreads = 1; howManyThreads < 10; howManyThreads++) { - testAutoIncrement(waitTime, howManyThreads, 2000); - } - } - } else { - testAutoIncrement(400, 2, 2000); - } - } - - private void testAutoIncrement(final int waitTime, int howManyThreads, - int runTime) throws Exception { - println("testAutoIncrement waitTime: " + waitTime + - " howManyThreads: " + howManyThreads + " runTime: " + runTime); - deleteDb("fileLockSerialized"); - final String url = "jdbc:h2:" + - getBaseDir() + - "/fileLockSerialized;FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE;" + - "AUTO_RECONNECT=TRUE;MAX_LENGTH_INPLACE_LOB=8192;" + - "COMPRESS_LOB=DEFLATE;CACHE_SIZE=65536"; - - Connection conn = getConnection(url); - conn.createStatement().execute( - "create table test(id int auto_increment, id2 int)"); - conn.close(); - - final long endTime = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(runTime); - final Exception[] ex = { null }; - final Connection[] connList = new Connection[howManyThreads]; - final boolean[] stop = { false }; - final int[] nextInt = { 0 }; - Thread[] threads = new Thread[howManyThreads]; - for (int i = 0; i < howManyThreads; i++) { - final int finalNrOfConnection = i; - Thread t = new Thread(new Runnable() { - @Override - public void run() { - try { - Connection c = getConnection(url); - connList[finalNrOfConnection] = c; - while (!stop[0]) { - synchronized (nextInt) { - ResultSet rs = c.createStatement() - .executeQuery( - "select id, id2 from test"); - while (rs.next()) { - if (rs.getInt(1) != rs.getInt(2)) { - throw new Exception(Thread - .currentThread().getId() + - " nextInt: " + - nextInt[0] + - " rs.getInt(1): " + - rs.getInt(1) + - " rs.getInt(2): " + - rs.getInt(2)); - } - } - nextInt[0]++; - Statement stat = c.createStatement(); - stat.execute("insert into test (id2) values(" + - nextInt[0] + ")"); - ResultSet rsKeys = stat.getGeneratedKeys(); - while (rsKeys.next()) { - assertEquals(nextInt[0], rsKeys.getInt(1)); - } - rsKeys.close(); - } - Thread.sleep(waitTime); - } - c.close(); - } catch (Exception e) { - e.printStackTrace(); - ex[0] = e; - } - } - }); - t.start(); - threads[i] = t; - } - while ((ex[0] == null) && (System.nanoTime() < endTime)) { - Thread.sleep(10); - } - - stop[0] = true; - for (int i = 0; i < howManyThreads; i++) { - threads[i].join(); - } - if (ex[0] != null) { - throw ex[0]; - } - getConnection(url).close(); - deleteDb("fileLockSerialized"); - } - - private void testConcurrentUpdates(final int waitTime, int howManyThreads, - int runTime) throws Exception { - println("testConcurrentUpdates waitTime: " + waitTime + - " howManyThreads: " + howManyThreads + " runTime: " + runTime); - deleteDb("fileLockSerialized"); - final String url = "jdbc:h2:" + - getBaseDir() + - "/fileLockSerialized;FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE;" + - "AUTO_RECONNECT=TRUE;MAX_LENGTH_INPLACE_LOB=8192;" + - "COMPRESS_LOB=DEFLATE;CACHE_SIZE=65536"; - - Connection conn = getConnection(url); - conn.createStatement().execute("create table test(id int)"); - conn.createStatement().execute("insert into test values(1)"); - conn.close(); - - final long endTime = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(runTime); - final Exception[] ex = { null }; - final Connection[] connList = new Connection[howManyThreads]; - final boolean[] stop = { false }; - final int[] lastInt = { 1 }; - Thread[] threads = new Thread[howManyThreads]; - for (int i = 0; i < howManyThreads; i++) { - final int finalNrOfConnection = i; - Thread t = new Thread(new Runnable() { - @Override - public void run() { - try { - Connection c = getConnection(url); - connList[finalNrOfConnection] = c; - while (!stop[0]) { - ResultSet rs = c.createStatement().executeQuery( - "select * from test"); - rs.next(); - if (rs.getInt(1) != lastInt[0]) { - throw new Exception(finalNrOfConnection + - " Expected: " + lastInt[0] + " got " + - rs.getInt(1)); - } - Thread.sleep(waitTime); - if (Math.random() > 0.7) { - int newLastInt = (int) (Math.random() * 1000); - c.createStatement().execute( - "update test set id = " + newLastInt); - lastInt[0] = newLastInt; - } - } - c.close(); - } catch (Exception e) { - e.printStackTrace(); - ex[0] = e; - } - } - }); - t.start(); - threads[i] = t; - } - while ((ex[0] == null) && (System.nanoTime() < endTime)) { - Thread.sleep(10); - } - - stop[0] = true; - for (int i = 0; i < howManyThreads; i++) { - threads[i].join(); - } - if (ex[0] != null) { - throw ex[0]; - } - getConnection(url).close(); - deleteDb("fileLockSerialized"); - } - - /** - * If a checkpoint occurs between beforeWriting and checkWritingAllowed then - * the result of checkWritingAllowed is READ_ONLY, which is wrong. - * - * Also, if a checkpoint started before beforeWriting, and ends between - * between beforeWriting and checkWritingAllowed, then the same error - * occurs. - */ - private void testCheckpointInUpdateRaceCondition() throws Exception { - boolean longRun = false; - deleteDb("fileLockSerialized"); - String url = "jdbc:h2:" + getBaseDir() + - "/fileLockSerialized;FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE"; - - Connection conn = getConnection(url); - conn.createStatement().execute("create table test(id int)"); - conn.createStatement().execute("insert into test values(1)"); - for (int i = 0; i < (longRun ? 10000 : 5); i++) { - Thread.sleep(402); - conn.createStatement().execute("update test set id = " + i); - } - conn.close(); - deleteDb("fileLockSerialized"); - } - - /** - * Caches must be cleared. Session.reconnect only closes the DiskFile (which - * is associated with the cache) if there is one session - */ - private void testCache() throws Exception { - deleteDb("fileLockSerialized"); - - String urlShared = "jdbc:h2:" + getBaseDir() + - "/fileLockSerialized;FILE_LOCK=SERIALIZED"; - - Connection connShared1 = getConnection(urlShared); - Statement statement1 = connShared1.createStatement(); - Connection connShared2 = getConnection(urlShared); - Statement statement2 = connShared2.createStatement(); - - statement1.execute("create table test1(id int)"); - statement1.execute("insert into test1 values(1)"); - - ResultSet rs = statement1.executeQuery("select id from test1"); - rs.close(); - rs = statement2.executeQuery("select id from test1"); - rs.close(); - - statement1.execute("update test1 set id=2"); - Thread.sleep(500); - - rs = statement2.executeQuery("select id from test1"); - assertTrue(rs.next()); - assertEquals(2, rs.getInt(1)); - rs.close(); - - connShared1.close(); - connShared2.close(); - deleteDb("fileLockSerialized"); - } - - private void testWrongDatabaseInstanceOnReconnect() throws Exception { - deleteDb("fileLockSerialized"); - - String urlShared = "jdbc:h2:" + getBaseDir() + - "/fileLockSerialized;FILE_LOCK=SERIALIZED"; - String urlForNew = urlShared + ";OPEN_NEW=TRUE"; - - Connection connShared1 = getConnection(urlShared); - Statement statement1 = connShared1.createStatement(); - Connection connShared2 = getConnection(urlShared); - Connection connNew = getConnection(urlForNew); - statement1.execute("create table test1(id int)"); - connShared1.close(); - connShared2.close(); - connNew.close(); - deleteDb("fileLockSerialized"); - } - - private void testBigDatabase(boolean withCache) { - boolean longRun = false; - final int howMuchRows = longRun ? 2000000 : 500000; - deleteDb("fileLockSerialized"); - int cacheSizeKb = withCache ? 5000 : 0; - - final CountDownLatch importFinishedLatch = new CountDownLatch(1); - final CountDownLatch select1FinishedLatch = new CountDownLatch(1); - - final String url = "jdbc:h2:" + getBaseDir() + "/fileLockSerialized" + - ";FILE_LOCK=SERIALIZED" + ";OPEN_NEW=TRUE" + ";CACHE_SIZE=" + - cacheSizeKb; - final Task importUpdateTask = new Task() { - @Override - public void call() throws Exception { - Connection conn = getConnection(url); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int, id2 int)"); - for (int i = 0; i < howMuchRows; i++) { - stat.execute("insert into test values(" + i + ", " + i + - ")"); - } - importFinishedLatch.countDown(); - - select1FinishedLatch.await(); - - stat.execute("update test set id2=999 where id=500"); - conn.close(); - } - }; - importUpdateTask.execute(); - - Task selectTask = new Task() { - @Override - public void call() throws Exception { - Connection conn = getConnection(url); - Statement stat = conn.createStatement(); - importFinishedLatch.await(); - - ResultSet rs = stat - .executeQuery("select id2 from test where id=500"); - assertTrue(rs.next()); - assertEquals(500, rs.getInt(1)); - rs.close(); - select1FinishedLatch.countDown(); - - // wait until the other task finished - importUpdateTask.get(); - - // can't use the exact same query, otherwise it would use - // the query cache - rs = stat.executeQuery("select id2 from test where id=500+0"); - assertTrue(rs.next()); - assertEquals(999, rs.getInt(1)); - rs.close(); - conn.close(); - } - }; - selectTask.execute(); - - importUpdateTask.get(); - selectTask.get(); - deleteDb("fileLockSerialized"); - } - - private void testLeftLogFiles() throws Exception { - deleteDb("fileLockSerialized"); - - // without serialized - String url; - url = "jdbc:h2:" + getBaseDir() + "/fileLockSerialized"; - Connection conn = getConnection(url); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int)"); - stat.execute("insert into test values(0)"); - conn.close(); - - List filesWithoutSerialized = FileUtils - .newDirectoryStream(getBaseDir()); - deleteDb("fileLockSerialized"); - - // with serialized - url = "jdbc:h2:" + getBaseDir() + - "/fileLockSerialized;FILE_LOCK=SERIALIZED"; - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test(id int)"); - Thread.sleep(500); - stat.execute("insert into test values(0)"); - conn.close(); - - List filesWithSerialized = FileUtils - .newDirectoryStream(getBaseDir()); - if (filesWithoutSerialized.size() != filesWithSerialized.size()) { - for (int i = 0; i < filesWithoutSerialized.size(); i++) { - if (!filesWithSerialized - .contains(filesWithoutSerialized.get(i))) { - System.out - .println("File left from 'without serialized' mode: " + - filesWithoutSerialized.get(i)); - } - } - for (int i = 0; i < filesWithSerialized.size(); i++) { - if (!filesWithoutSerialized - .contains(filesWithSerialized.get(i))) { - System.out - .println("File left from 'with serialized' mode: " + - filesWithSerialized.get(i)); - } - } - fail("With serialized it must create the same files than without serialized"); - } - deleteDb("fileLockSerialized"); - } - -} diff --git a/h2/src/test/org/h2/test/unit/TestFileSystem.java b/h2/src/test/org/h2/test/unit/TestFileSystem.java index 8e3d96bd50..8bd7dc1ee3 100644 --- a/h2/src/test/org/h2/test/unit/TestFileSystem.java +++ b/h2/src/test/org/h2/test/unit/TestFileSystem.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -12,7 +12,6 @@ import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; -import java.nio.channels.FileChannel.MapMode; import java.nio.channels.FileLock; import java.nio.channels.NonWritableChannelException; import java.sql.Connection; @@ -22,6 +21,7 @@ import java.sql.Statement; import java.util.List; import java.util.Random; +import java.util.concurrent.atomic.AtomicIntegerArray; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; import org.h2.dev.fs.FilePathZip2; @@ -29,11 +29,10 @@ import org.h2.mvstore.DataUtils; import org.h2.mvstore.cache.FilePathCache; import org.h2.store.fs.FilePath; -import org.h2.store.fs.FilePathEncrypt; -import org.h2.store.fs.FilePathRec; import org.h2.store.fs.FileUtils; +import org.h2.store.fs.encrypt.FilePathEncrypt; +import org.h2.store.fs.rec.FilePathRec; import org.h2.test.TestBase; -import org.h2.test.utils.AssertThrows; import org.h2.test.utils.FilePathDebug; import org.h2.tools.Backup; import org.h2.tools.DeleteDbFiles; @@ -53,7 +52,7 @@ public class TestFileSystem extends TestBase { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); // test.config.traceTest = true; - test.test(); + test.testFromMain(); } @Override @@ -63,7 +62,6 @@ public void test() throws Exception { testAbsoluteRelative(); testDirectories(getBaseDir()); testMoveTo(getBaseDir()); - testUnsupportedFeatures(getBaseDir()); FilePathZip2.register(); FilePath.register(new FilePathCache()); FilePathRec.register(); @@ -93,8 +91,7 @@ public void test() throws Exception { testFileSystem("rec:memFS:"); testUserHome(); try { - testFileSystem("nio:" + getBaseDir() + "/fs"); - testFileSystem("cache:nio:" + getBaseDir() + "/fs"); + testFileSystem("cache:" + getBaseDir() + "/fs"); testFileSystem("nioMapped:" + getBaseDir() + "/fs"); testFileSystem("encrypt:0007:" + getBaseDir() + "/fs"); testFileSystem("cache:encrypt:0007:" + getBaseDir() + "/fs"); @@ -204,7 +201,9 @@ private void testZipFileSystem(String prefix, Random r) throws IOException { private void testAbsoluteRelative() { assertFalse(FileUtils.isAbsolute("test/abc")); + assertFalse(FileUtils.isAbsolute("./test/abc")); assertTrue(FileUtils.isAbsolute("~/test/abc")); + assertTrue(FileUtils.isAbsolute("/test/abc")); } private void testMemFsDir() throws IOException { @@ -353,38 +352,14 @@ private void testDatabaseInJar() throws Exception { } private void testReadOnly(final String f) throws IOException { - new AssertThrows(IOException.class) { - @Override - public void test() throws IOException { - FileUtils.newOutputStream(f, false); - }}; - new AssertThrows(DbException.class) { - @Override - public void test() { - FileUtils.move(f, f); - }}; - new AssertThrows(DbException.class) { - @Override - public void test() { - FileUtils.move(f, f); - }}; - new AssertThrows(IOException.class) { - @Override - public void test() throws IOException { - FileUtils.createTempFile(f, ".tmp", false); - }}; + assertThrows(IOException.class, () -> FileUtils.newOutputStream(f, false)); + assertThrows(DbException.class, () -> FileUtils.move(f, f)); + assertThrows(DbException.class, () -> FileUtils.move(f, f)); + assertThrows(IOException.class, () -> FileUtils.createTempFile(f, ".tmp", false)); final FileChannel channel = FileUtils.open(f, "r"); - new AssertThrows(IOException.class) { - @Override - public void test() throws IOException { - channel.write(ByteBuffer.allocate(1)); - }}; - new AssertThrows(IOException.class) { - @Override - public void test() throws IOException { - channel.truncate(0); - }}; - assertTrue(null == channel.tryLock()); + assertThrows(NonWritableChannelException.class, () -> channel.write(ByteBuffer.allocate(1))); + assertThrows(IOException.class, () -> channel.truncate(0)); + assertNull(channel.tryLock()); channel.force(false); channel.close(); } @@ -427,27 +402,19 @@ private void testSetReadOnly(String fsBase) { } } - private static void testDirectories(String fsBase) { + private void testDirectories(String fsBase) { final String fileName = fsBase + "/testFile"; if (FileUtils.exists(fileName)) { FileUtils.delete(fileName); } if (FileUtils.createFile(fileName)) { - new AssertThrows(DbException.class) { - @Override - public void test() { - FileUtils.createDirectory(fileName); - }}; - new AssertThrows(DbException.class) { - @Override - public void test() { - FileUtils.createDirectories(fileName + "/test"); - }}; + assertThrows(DbException.class, () -> FileUtils.createDirectory(fileName)); + assertThrows(DbException.class, () -> FileUtils.createDirectories(fileName + "/test")); FileUtils.delete(fileName); } } - private static void testMoveTo(String fsBase) { + private void testMoveTo(String fsBase) { final String fileName = fsBase + "/testFile"; final String fileName2 = fsBase + "/testFile2"; if (FileUtils.exists(fileName)) { @@ -456,60 +423,10 @@ private static void testMoveTo(String fsBase) { if (FileUtils.createFile(fileName)) { FileUtils.move(fileName, fileName2); FileUtils.createFile(fileName); - new AssertThrows(DbException.class) { - @Override - public void test() { - FileUtils.move(fileName2, fileName); - }}; + assertThrows(DbException.class, () -> FileUtils.move(fileName2, fileName)); FileUtils.delete(fileName); FileUtils.delete(fileName2); - new AssertThrows(DbException.class) { - @Override - public void test() { - FileUtils.move(fileName, fileName2); - }}; - } - } - - private static void testUnsupportedFeatures(String fsBase) throws IOException { - final String fileName = fsBase + "/testFile"; - if (FileUtils.exists(fileName)) { - FileUtils.delete(fileName); - } - if (FileUtils.createFile(fileName)) { - final FileChannel channel = FileUtils.open(fileName, "rw"); - new AssertThrows(UnsupportedOperationException.class) { - @Override - public void test() throws IOException { - channel.map(MapMode.PRIVATE, 0, channel.size()); - }}; - new AssertThrows(UnsupportedOperationException.class) { - @Override - public void test() throws IOException { - channel.read(new ByteBuffer[]{ByteBuffer.allocate(10)}, 0, 0); - }}; - new AssertThrows(UnsupportedOperationException.class) { - @Override - public void test() throws IOException { - channel.write(new ByteBuffer[]{ByteBuffer.allocate(10)}, 0, 0); - }}; - new AssertThrows(UnsupportedOperationException.class) { - @Override - public void test() throws IOException { - channel.transferFrom(channel, 0, 0); - }}; - new AssertThrows(UnsupportedOperationException.class) { - @Override - public void test() throws IOException { - channel.transferTo(0, 0, channel); - }}; - new AssertThrows(UnsupportedOperationException.class) { - @Override - public void test() throws IOException { - channel.lock(); - }}; - channel.close(); - FileUtils.delete(fileName); + assertThrows(DbException.class, () -> FileUtils.move(fileName, fileName2)); } } @@ -574,18 +491,8 @@ private void testSimple(final String fsBase) throws Exception { FileUtils.readFully(channel, ByteBuffer.wrap(test, 0, 10000)); assertEquals(buffer, test); final FileChannel fc = channel; - new AssertThrows(IOException.class) { - @Override - public void test() throws Exception { - fc.write(ByteBuffer.wrap(test, 0, 10)); - } - }; - new AssertThrows(NonWritableChannelException.class) { - @Override - public void test() throws Exception { - fc.truncate(10); - } - }; + assertThrows(NonWritableChannelException.class, () -> fc.write(ByteBuffer.wrap(test, 0, 10))); + assertThrows(NonWritableChannelException.class, () -> fc.truncate(10)); channel.close(); long lastMod = FileUtils.lastModified(fsBase + "/test"); if (lastMod < time - 1999) { @@ -678,7 +585,6 @@ private void testRandomAccess(String fsBase, int seed) throws Exception { RandomAccessFile ra = new RandomAccessFile(file, "rw"); FileUtils.delete(s); FileChannel f = FileUtils.open(s, "rw"); - assertEquals(s, f.toString()); assertEquals(-1, f.read(ByteBuffer.wrap(new byte[1]))); f.force(true); Random random = new Random(seed); @@ -814,6 +720,8 @@ private void testConcurrent(String fsBase) throws Exception { final FileChannel f = FileUtils.open(s, "rw"); final int size = getSize(10, 50); f.write(ByteBuffer.allocate(size * 64 * 1024)); + AtomicIntegerArray locks = new AtomicIntegerArray(size); + AtomicIntegerArray expected = new AtomicIntegerArray(size); Random random = new Random(1); System.gc(); Task task = new Task() { @@ -823,18 +731,26 @@ public void call() throws Exception { while (!stop) { for (int pos = 0; pos < size; pos++) { byteBuff.clear(); - f.read(byteBuff, pos * 64 * 1024); + int e; + while (!locks.compareAndSet(pos, 0, 1)) { + } + try { + e = expected.get(pos); + f.read(byteBuff, pos * 64 * 1024); + } finally { + locks.set(pos, 0); + } byteBuff.position(0); int x = byteBuff.getInt(); int y = byteBuff.getInt(); - assertEquals(x, y); + assertEquals(e, x); + assertEquals(e, y); Thread.yield(); } } } }; task.execute(); - int[] data = new int[size]; try { ByteBuffer byteBuff = ByteBuffer.allocate(16); int operations = 10000; @@ -844,17 +760,31 @@ public void call() throws Exception { byteBuff.putInt(i); byteBuff.flip(); int pos = random.nextInt(size); - f.write(byteBuff, pos * 64 * 1024); - data[pos] = i; + while (!locks.compareAndSet(pos, 0, 1)) { + } + try { + f.write(byteBuff, pos * 64 * 1024); + expected.set(pos, i); + } finally { + locks.set(pos, 0); + } pos = random.nextInt(size); byteBuff.clear(); - f.read(byteBuff, pos * 64 * 1024); + int e; + while (!locks.compareAndSet(pos, 0, 1)) { + } + try { + e = expected.get(pos); + f.read(byteBuff, pos * 64 * 1024); + } finally { + locks.set(pos, 0); + } byteBuff.limit(16); byteBuff.position(0); int x = byteBuff.getInt(); int y = byteBuff.getInt(); - assertEquals(x, y); - assertEquals(data[pos], x); + assertEquals(e, x); + assertEquals(e, y); } } catch (Throwable e) { e.printStackTrace(); diff --git a/h2/src/test/org/h2/test/unit/TestFtp.java b/h2/src/test/org/h2/test/unit/TestFtp.java index dced4a7524..53ba7d2bcd 100644 --- a/h2/src/test/org/h2/test/unit/TestFtp.java +++ b/h2/src/test/org/h2/test/unit/TestFtp.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -26,7 +26,7 @@ public class TestFtp extends TestBase implements FtpEventListener { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/unit/TestGeometryUtils.java b/h2/src/test/org/h2/test/unit/TestGeometryUtils.java index 33f76eb7f5..6b8f1b54c5 100644 --- a/h2/src/test/org/h2/test/unit/TestGeometryUtils.java +++ b/h2/src/test/org/h2/test/unit/TestGeometryUtils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -30,7 +30,7 @@ import org.h2.util.geometry.EWKTUtils.EWKTTarget; import org.h2.util.geometry.GeometryUtils; import org.h2.util.geometry.GeometryUtils.DimensionSystemTarget; -import org.h2.util.geometry.GeometryUtils.EnvelopeAndDimensionSystemTarget; +import org.h2.util.geometry.GeometryUtils.EnvelopeTarget; import org.h2.util.geometry.GeometryUtils.Target; import org.h2.util.geometry.JTSUtils; import org.h2.util.geometry.JTSUtils.GeometryTarget; @@ -41,6 +41,7 @@ import org.locationtech.jts.geom.GeometryCollection; import org.locationtech.jts.geom.GeometryFactory; import org.locationtech.jts.geom.Point; +import org.locationtech.jts.io.ParseException; import org.locationtech.jts.io.WKBWriter; import org.locationtech.jts.io.WKTReader; import org.locationtech.jts.io.WKTWriter; @@ -105,7 +106,7 @@ public class TestGeometryUtils extends TestBase { * ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -118,6 +119,8 @@ public void test() throws Exception { testMultiPolygon(); testGeometryCollection(); testEmptyPoint(); + testDimensionXY(); + testDimensionZ(); testDimensionM(); testDimensionZM(); testFiniteOnly(); @@ -130,7 +133,7 @@ private void testPoint() throws Exception { testGeometry("POINT (1 2)", 2); testGeometry("POINT (-1.3 15)", 2); testGeometry("POINT (-1E32 1.000001)", "POINT (-1E32 1.000001)", - "POINT (-100000000000000000000000000000000 1.000001)", 2); + "POINT (-100000000000000000000000000000000 1.000001)", 2, true); testGeometry("POINT Z (2.7 -3 34)", 3); assertEquals("POINT Z (1 2 3)", EWKTUtils.ewkb2ewkt(EWKTUtils.ewkt2ewkb("POINTZ(1 2 3)"))); assertEquals("POINT Z (1 2 3)", EWKTUtils.ewkb2ewkt(EWKTUtils.ewkt2ewkb("pointz(1 2 3)"))); @@ -146,7 +149,7 @@ private void testLineString() throws Exception { private void testPolygon() throws Exception { testGeometry("POLYGON ((-1 -2, 10 1, 2 20, -1 -2))", 2); - testGeometry("POLYGON EMPTY", 2); + testGeometry("POLYGON EMPTY", "POLYGON EMPTY", "POLYGON EMPTY", 2, false); testGeometry("POLYGON ((-1 -2, 10 1, 2 20, -1 -2), (0.5 0.5, 1 0.5, 1 1, 0.5 0.5))", 2); // TODO is EMPTY inner ring valid? testGeometry("POLYGON ((-1 -2, 10 1, 2 20, -1 -2), EMPTY)", 2); @@ -156,8 +159,8 @@ private void testPolygon() throws Exception { private void testMultiPoint() throws Exception { testGeometry("MULTIPOINT ((1 2), (3 4))", 2); // Alternative syntax - testGeometry("MULTIPOINT (1 2, 3 4)", "MULTIPOINT ((1 2), (3 4))", "MULTIPOINT ((1 2), (3 4))", 2); - testGeometry("MULTIPOINT (1 2)", "MULTIPOINT ((1 2))", "MULTIPOINT ((1 2))", 2); + testGeometry("MULTIPOINT (1 2, 3 4)", "MULTIPOINT ((1 2), (3 4))", "MULTIPOINT ((1 2), (3 4))", 2, true); + testGeometry("MULTIPOINT (1 2)", "MULTIPOINT ((1 2))", "MULTIPOINT ((1 2))", 2, true); testGeometry("MULTIPOINT EMPTY", 2); testGeometry("MULTIPOINT Z ((1 2 0.5), (3 4 -3))", 3); } @@ -190,38 +193,41 @@ private void testGeometryCollection() throws Exception { } private void testGeometry(String wkt, int numOfDimensions) throws Exception { - testGeometry(wkt, wkt, wkt, numOfDimensions); + testGeometry(wkt, wkt, wkt, numOfDimensions, true); } - private void testGeometry(String wkt, String h2Wkt, String jtsWkt, int numOfDimensions) throws Exception { - Geometry geometryFromJTS = new WKTReader().read(wkt); + private void testGeometry(String wkt, String h2Wkt, String jtsWkt, int numOfDimensions, boolean withEWKB) + throws Exception { + Geometry geometryFromJTS = readWKT(wkt); byte[] wkbFromJTS = new WKBWriter(numOfDimensions).write(geometryFromJTS); // Test WKB->WKT conversion assertEquals(h2Wkt, EWKTUtils.ewkb2ewkt(wkbFromJTS)); - // Test WKT->WKB conversion - assertEquals(wkbFromJTS, EWKTUtils.ewkt2ewkb(wkt)); + if (withEWKB) { + // Test WKT->WKB conversion + assertEquals(wkbFromJTS, EWKTUtils.ewkt2ewkb(wkt)); - // Test WKB->WKB no-op normalization - assertEquals(wkbFromJTS, EWKBUtils.ewkb2ewkb(wkbFromJTS)); + // Test WKB->WKB no-op normalization + assertEquals(wkbFromJTS, EWKBUtils.ewkb2ewkb(wkbFromJTS)); + } // Test WKB->Geometry conversion Geometry geometryFromH2 = JTSUtils.ewkb2geometry(wkbFromJTS); String got = new WKTWriter(numOfDimensions).write(geometryFromH2); if (!jtsWkt.equals(got)) { - if (!jtsWkt.replaceAll(" Z", "").equals(got)) { // JTS 1.15 - assertEquals(jtsWkt.replaceAll(" Z ", " Z"), got); // JTS 1.16 - } + assertEquals(jtsWkt.replaceAll(" Z ", " Z"), got); } - // Test Geometry->WKB conversion - assertEquals(wkbFromJTS, JTSUtils.geometry2ewkb(geometryFromJTS)); + if (withEWKB) { + // Test Geometry->WKB conversion + assertEquals(wkbFromJTS, JTSUtils.geometry2ewkb(geometryFromJTS)); + } // Test Envelope Envelope envelopeFromJTS = geometryFromJTS.getEnvelopeInternal(); testEnvelope(envelopeFromJTS, GeometryUtils.getEnvelope(wkbFromJTS)); - EnvelopeAndDimensionSystemTarget target = new EnvelopeAndDimensionSystemTarget(); + EnvelopeTarget target = new EnvelopeTarget(); EWKBUtils.parseEWKB(wkbFromJTS, target); testEnvelope(envelopeFromJTS, target.getEnvelope()); @@ -259,6 +265,65 @@ private void testEmptyPoint() { assertEquals(ewkb, JTSUtils.geometry2ewkb(p)); } + private void testDimensionXY() throws Exception { + byte[] ewkb = EWKTUtils.ewkt2ewkb("POINT (1 2)"); + assertEquals("POINT (1 2)", EWKTUtils.ewkb2ewkt(ewkb)); + Point p = (Point) JTSUtils.ewkb2geometry(ewkb); + CoordinateSequence cs = p.getCoordinateSequence(); + testDimensionXYCheckPoint(cs); + assertEquals(ewkb, JTSUtils.geometry2ewkb(p)); + testDimensions(GeometryUtils.DIMENSION_SYSTEM_XY, ewkb); + testValueGeometryProperties(ewkb); + + p = (Point) readWKT("POINT (1 2)"); + cs = p.getCoordinateSequence(); + testDimensionXYCheckPoint(cs); + ewkb = JTSUtils.geometry2ewkb(p); + assertEquals("POINT (1 2)", EWKTUtils.ewkb2ewkt(ewkb)); + p = (Point) JTSUtils.ewkb2geometry(ewkb); + cs = p.getCoordinateSequence(); + testDimensionXYCheckPoint(cs); + } + + private void testDimensionXYCheckPoint(CoordinateSequence cs) { + assertEquals(2, cs.getDimension()); + assertEquals(0, cs.getMeasures()); + assertEquals(1, cs.getOrdinate(0, X)); + assertEquals(2, cs.getOrdinate(0, Y)); + assertEquals(Double.NaN, cs.getZ(0)); + } + + private void testDimensionZ() throws Exception { + byte[] ewkb = EWKTUtils.ewkt2ewkb("POINT Z (1 2 3)"); + assertEquals("POINT Z (1 2 3)", EWKTUtils.ewkb2ewkt(ewkb)); + assertEquals("POINT Z (1 2 3)", EWKTUtils.ewkb2ewkt(EWKTUtils.ewkt2ewkb("POINTZ(1 2 3)"))); + assertEquals("POINT Z (1 2 3)", EWKTUtils.ewkb2ewkt(EWKTUtils.ewkt2ewkb("pointz(1 2 3)"))); + Point p = (Point) JTSUtils.ewkb2geometry(ewkb); + CoordinateSequence cs = p.getCoordinateSequence(); + testDimensionZCheckPoint(cs); + assertEquals(ewkb, JTSUtils.geometry2ewkb(p)); + testDimensions(GeometryUtils.DIMENSION_SYSTEM_XYZ, ewkb); + testValueGeometryProperties(ewkb); + + p = (Point) readWKT("POINT Z (1 2 3)"); + cs = p.getCoordinateSequence(); + testDimensionZCheckPoint(cs); + ewkb = JTSUtils.geometry2ewkb(p); + assertEquals("POINT Z (1 2 3)", EWKTUtils.ewkb2ewkt(ewkb)); + p = (Point) JTSUtils.ewkb2geometry(ewkb); + cs = p.getCoordinateSequence(); + testDimensionZCheckPoint(cs); + } + + private void testDimensionZCheckPoint(CoordinateSequence cs) { + assertEquals(3, cs.getDimension()); + assertEquals(0, cs.getMeasures()); + assertEquals(1, cs.getOrdinate(0, X)); + assertEquals(2, cs.getOrdinate(0, Y)); + assertEquals(3, cs.getOrdinate(0, Z)); + assertEquals(3, cs.getZ(0)); + } + private void testDimensionM() throws Exception { byte[] ewkb = EWKTUtils.ewkt2ewkb("POINT M (1 2 3)"); assertEquals("POINT M (1 2 3)", EWKTUtils.ewkb2ewkt(ewkb)); @@ -271,29 +336,23 @@ private void testDimensionM() throws Exception { testDimensions(GeometryUtils.DIMENSION_SYSTEM_XYM, ewkb); testValueGeometryProperties(ewkb); - if (JTSUtils.M_IS_SUPPORTED) { - p = (Point) new WKTReader().read("POINT M (1 2 3)"); - cs = p.getCoordinateSequence(); - assertEquals(3, cs.getDimension()); - assertEquals(1, (int) cs.getClass().getMethod("getMeasures").invoke(cs)); - assertEquals(1, cs.getOrdinate(0, 0)); - assertEquals(2, cs.getOrdinate(0, 1)); - assertEquals(3, cs.getOrdinate(0, 2)); - ewkb = JTSUtils.geometry2ewkb(p); - assertEquals("POINT M (1 2 3)", EWKTUtils.ewkb2ewkt(ewkb)); - p = (Point) JTSUtils.ewkb2geometry(ewkb); - cs = p.getCoordinateSequence(); - testDimensionMCheckPoint(cs); - assertEquals(1, (int) cs.getClass().getMethod("getMeasures").invoke(cs)); - } + p = (Point) readWKT("POINT M (1 2 3)"); + cs = p.getCoordinateSequence(); + testDimensionMCheckPoint(cs); + ewkb = JTSUtils.geometry2ewkb(p); + assertEquals("POINT M (1 2 3)", EWKTUtils.ewkb2ewkt(ewkb)); + p = (Point) JTSUtils.ewkb2geometry(ewkb); + cs = p.getCoordinateSequence(); + testDimensionMCheckPoint(cs); } private void testDimensionMCheckPoint(CoordinateSequence cs) { - assertEquals(4, cs.getDimension()); + assertEquals(3, cs.getDimension()); + assertEquals(1, cs.getMeasures()); assertEquals(1, cs.getOrdinate(0, X)); assertEquals(2, cs.getOrdinate(0, Y)); - assertEquals(Double.NaN, cs.getOrdinate(0, Z)); - assertEquals(3, cs.getOrdinate(0, M)); + assertEquals(3, cs.getOrdinate(0, 2)); + assertEquals(3, cs.getM(0)); } private void testDimensionZM() throws Exception { @@ -308,26 +367,25 @@ private void testDimensionZM() throws Exception { testDimensions(GeometryUtils.DIMENSION_SYSTEM_XYZM, ewkb); testValueGeometryProperties(ewkb); - if (JTSUtils.M_IS_SUPPORTED) { - p = (Point) new WKTReader().read("POINT ZM (1 2 3 4)"); - cs = p.getCoordinateSequence(); - testDimensionZMCheckPoint(cs); - assertEquals(1, (int) cs.getClass().getMethod("getMeasures").invoke(cs)); - ewkb = JTSUtils.geometry2ewkb(p); - assertEquals("POINT ZM (1 2 3 4)", EWKTUtils.ewkb2ewkt(ewkb)); - p = (Point) JTSUtils.ewkb2geometry(ewkb); - cs = p.getCoordinateSequence(); - testDimensionZMCheckPoint(cs); - assertEquals(1, (int) cs.getClass().getMethod("getMeasures").invoke(cs)); - } + p = (Point) readWKT("POINT ZM (1 2 3 4)"); + cs = p.getCoordinateSequence(); + testDimensionZMCheckPoint(cs); + ewkb = JTSUtils.geometry2ewkb(p); + assertEquals("POINT ZM (1 2 3 4)", EWKTUtils.ewkb2ewkt(ewkb)); + p = (Point) JTSUtils.ewkb2geometry(ewkb); + cs = p.getCoordinateSequence(); + testDimensionZMCheckPoint(cs); } private void testDimensionZMCheckPoint(CoordinateSequence cs) { assertEquals(4, cs.getDimension()); + assertEquals(1, cs.getMeasures()); assertEquals(1, cs.getOrdinate(0, X)); assertEquals(2, cs.getOrdinate(0, Y)); assertEquals(3, cs.getOrdinate(0, Z)); + assertEquals(3, cs.getZ(0)); assertEquals(4, cs.getOrdinate(0, M)); + assertEquals(4, cs.getM(0)); } private void testValueGeometryProperties(byte[] ewkb) { @@ -336,7 +394,8 @@ private void testValueGeometryProperties(byte[] ewkb) { EWKBUtils.parseEWKB(ewkb, target); int dimensionSystem = target.getDimensionSystem(); assertEquals(dimensionSystem, vg.getDimensionSystem()); - String formattedType = EWKTUtils.formatGeometryTypeAndDimensionSystem(vg.getTypeAndDimensionSystem()); + String formattedType = EWKTUtils + .formatGeometryTypeAndDimensionSystem(new StringBuilder(), vg.getTypeAndDimensionSystem()).toString(); assertTrue(EWKTUtils.ewkb2ewkt(ewkb).startsWith(formattedType)); switch (dimensionSystem) { case DIMENSION_SYSTEM_XY: @@ -369,12 +428,7 @@ private void testFiniteOnly() { } private void testFiniteOnly(byte[] ewkb, Target target) { - try { - EWKBUtils.parseEWKB(ewkb, target); - fail(target.getClass().getName() + ' ' + StringUtils.convertBytesToHex(ewkb)); - } catch (IllegalArgumentException e) { - // Expected - } + assertThrows(IllegalArgumentException.class, () -> EWKBUtils.parseEWKB(ewkb, target)); } private void testSRID() throws Exception { @@ -414,9 +468,6 @@ private void testDimensions(int expected, byte[] ewkb) { DimensionSystemTarget dst = new DimensionSystemTarget(); EWKBUtils.parseEWKB(ewkb, dst); assertEquals(expected, dst.getDimensionSystem()); - EnvelopeAndDimensionSystemTarget envelopeAndDimensionTarget = new EnvelopeAndDimensionSystemTarget(); - EWKBUtils.parseEWKB(ewkb, envelopeAndDimensionTarget); - assertEquals(expected, envelopeAndDimensionTarget.getDimensionSystem()); } private void testIntersectionAndUnion() { @@ -464,31 +515,17 @@ private static double[] getEnvelope(Random r) { } private void testMixedGeometries() throws Exception { - try { - EWKTUtils.ewkt2ewkb(MIXED_WKT); - fail(); - } catch (IllegalArgumentException ex) { - // Expected - } - try { - EWKTUtils.ewkb2ewkt(MIXED_WKB); - fail(); - } catch (IllegalArgumentException ex) { - // Expected - } - try { - JTSUtils.ewkb2geometry(MIXED_WKB); - fail(); - } catch (IllegalArgumentException ex) { - // Expected - } + assertThrows(IllegalArgumentException.class, () -> EWKTUtils.ewkt2ewkb(MIXED_WKT)); + assertThrows(IllegalArgumentException.class, () -> EWKTUtils.ewkb2ewkt(MIXED_WKB)); + assertThrows(IllegalArgumentException.class, () -> JTSUtils.ewkb2geometry(MIXED_WKB)); Geometry g = new WKTReader().read(MIXED_WKT); - try { - JTSUtils.geometry2ewkb(g); - fail(); - } catch (IllegalArgumentException ex) { - // Expected - } + assertThrows(IllegalArgumentException.class, () -> JTSUtils.geometry2ewkb(g)); + } + + private static Geometry readWKT(String text) throws ParseException { + WKTReader reader = new WKTReader(); + reader.setIsOldJtsCoordinateSyntaxAllowed(false); + return reader.read(text); } } diff --git a/h2/src/test/org/h2/test/unit/TestIntArray.java b/h2/src/test/org/h2/test/unit/TestIntArray.java index c4690bf767..04ab6f905d 100644 --- a/h2/src/test/org/h2/test/unit/TestIntArray.java +++ b/h2/src/test/org/h2/test/unit/TestIntArray.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -20,7 +20,7 @@ public class TestIntArray extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/unit/TestIntIntHashMap.java b/h2/src/test/org/h2/test/unit/TestIntIntHashMap.java deleted file mode 100644 index b2cd561a55..0000000000 --- a/h2/src/test/org/h2/test/unit/TestIntIntHashMap.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.unit; - -import java.util.Random; - -import org.h2.test.TestBase; -import org.h2.util.IntIntHashMap; - -/** - * Tests the IntHashMap class. - */ -public class TestIntIntHashMap extends TestBase { - - private final Random rand = new Random(); - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() { - IntIntHashMap map = new IntIntHashMap(); - map.put(1, 1); - map.put(1, 2); - assertEquals(1, map.size()); - map.put(0, 1); - map.put(0, 2); - assertEquals(2, map.size()); - rand.setSeed(10); - test(true); - test(false); - } - - private void test(boolean random) { - int len = 2000; - int[] x = new int[len]; - for (int i = 0; i < len; i++) { - int key = random ? rand.nextInt() : i; - x[i] = key; - } - IntIntHashMap map = new IntIntHashMap(); - for (int i = 0; i < len; i++) { - map.put(x[i], i); - } - for (int i = 0; i < len; i++) { - if (map.get(x[i]) != i) { - throw new AssertionError("get " + x[i] + " = " + map.get(i) + - " should be " + i); - } - } - for (int i = 1; i < len; i += 2) { - map.remove(x[i]); - } - for (int i = 1; i < len; i += 2) { - if (map.get(x[i]) != -1) { - throw new AssertionError("get " + x[i] + " = " + map.get(i) + - " should be <=0"); - } - } - for (int i = 1; i < len; i += 2) { - map.put(x[i], i); - } - for (int i = 0; i < len; i++) { - if (map.get(x[i]) != i) { - throw new AssertionError("get " + x[i] + " = " + map.get(i) + - " should be " + i); - } - } - } -} diff --git a/h2/src/test/org/h2/test/unit/TestIntPerfectHash.java b/h2/src/test/org/h2/test/unit/TestIntPerfectHash.java index b9e1229dea..1aa4209d0d 100644 --- a/h2/src/test/org/h2/test/unit/TestIntPerfectHash.java +++ b/h2/src/test/org/h2/test/unit/TestIntPerfectHash.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; diff --git a/h2/src/test/org/h2/test/unit/TestInterval.java b/h2/src/test/org/h2/test/unit/TestInterval.java index 38c6d07dc3..ddbf276dc6 100644 --- a/h2/src/test/org/h2/test/unit/TestInterval.java +++ b/h2/src/test/org/h2/test/unit/TestInterval.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -27,7 +27,7 @@ public class TestInterval extends TestBase { * ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -68,12 +68,7 @@ private void testOfYearsGood(long years) { } private void testOfYearsBad(long years) { - try { - Interval.ofYears(years); - fail(); - } catch (IllegalArgumentException e) { - // OK - } + assertThrows(IllegalArgumentException.class, () -> Interval.ofYears(years)); } private void testOfMonths() { @@ -95,12 +90,7 @@ private void testOfMonthsGood(long months) { } private void testOfMonthsBad(long months) { - try { - Interval.ofMonths(months); - fail(); - } catch (IllegalArgumentException e) { - // OK - } + assertThrows(IllegalArgumentException.class, () -> Interval.ofMonths(months)); } private void testOfDays() { @@ -122,12 +112,7 @@ private void testOfDaysGood(long days) { } private void testOfDaysBad(long days) { - try { - Interval.ofDays(days); - fail(); - } catch (IllegalArgumentException e) { - // OK - } + assertThrows(IllegalArgumentException.class, () -> Interval.ofDays(days)); } private void testOfHours() { @@ -149,12 +134,7 @@ private void testOfHoursGood(long hours) { } private void testOfHoursBad(long hours) { - try { - Interval.ofHours(hours); - fail(); - } catch (IllegalArgumentException e) { - // OK - } + assertThrows(IllegalArgumentException.class, () -> Interval.ofHours(hours)); } private void testOfMinutes() { @@ -176,12 +156,7 @@ private void testOfMinutesGood(long minutes) { } private void testOfMinutesBad(long minutes) { - try { - Interval.ofMinutes(minutes); - fail(); - } catch (IllegalArgumentException e) { - // OK - } + assertThrows(IllegalArgumentException.class, () -> Interval.ofMinutes(minutes)); } private void testOfSeconds() { @@ -203,12 +178,7 @@ private void testOfSecondsGood(long seconds) { } private void testOfSecondsBad(long seconds) { - try { - Interval.ofSeconds(seconds); - fail(); - } catch (IllegalArgumentException e) { - // OK - } + assertThrows(IllegalArgumentException.class, () -> Interval.ofSeconds(seconds)); } private void testOfSeconds2() { @@ -250,12 +220,7 @@ private void testOfSeconds2Good(long seconds, int nanos) { } private void testOfSeconds2Bad(long seconds, int nanos) { - try { - Interval.ofSeconds(seconds, nanos); - fail(); - } catch (IllegalArgumentException e) { - // OK - } + assertThrows(IllegalArgumentException.class, () -> Interval.ofSeconds(seconds, nanos)); } private void testOfNanos() { @@ -317,12 +282,7 @@ private void testOfYearsMonthsGood(long years, int months) { } private void testOfYearsMonthsBad(long years, int months) { - try { - Interval.ofYearsMonths(years, months); - fail(); - } catch (IllegalArgumentException e) { - // OK - } + assertThrows(IllegalArgumentException.class, () -> Interval.ofYearsMonths(years, months)); } private void testOfDaysHours() { @@ -351,18 +311,13 @@ private void testOfDaysHoursGood(long days, int hours) { b.append('-'); } b.append(Math.abs(days)).append(' '); - StringUtils.appendZeroPadded(b, 2, Math.abs(hours)); + StringUtils.appendTwoDigits(b, Math.abs(hours)); b.append("' DAY TO HOUR"); assertEquals(b.toString(), i.toString()); } private void testOfDaysHoursBad(long days, int hours) { - try { - Interval.ofDaysHours(days, hours); - fail(); - } catch (IllegalArgumentException e) { - // OK - } + assertThrows(IllegalArgumentException.class, () -> Interval.ofDaysHours(days, hours)); } private void testOfDaysHoursMinutes() { @@ -397,20 +352,15 @@ private void testOfDaysHoursMinutesGood(long days, int hours, int minutes) { b.append('-'); } b.append(Math.abs(days)).append(' '); - StringUtils.appendZeroPadded(b, 2, Math.abs(hours)); + StringUtils.appendTwoDigits(b, Math.abs(hours)); b.append(':'); - StringUtils.appendZeroPadded(b, 2, Math.abs(minutes)); + StringUtils.appendTwoDigits(b, Math.abs(minutes)); b.append("' DAY TO MINUTE"); assertEquals(b.toString(), i.toString()); } private void testOfDaysHoursMinutesBad(long days, int hours, int minutes) { - try { - Interval.ofDaysHoursMinutes(days, hours, minutes); - fail(); - } catch (IllegalArgumentException e) { - // OK - } + assertThrows(IllegalArgumentException.class, () -> Interval.ofDaysHoursMinutes(days, hours, minutes)); } private void testOfDaysHoursMinutesSeconds() { @@ -453,22 +403,18 @@ private void testOfDaysHoursMinutesSecondsGood(long days, int hours, int minutes b.append('-'); } b.append(Math.abs(days)).append(' '); - StringUtils.appendZeroPadded(b, 2, Math.abs(hours)); + StringUtils.appendTwoDigits(b, Math.abs(hours)); b.append(':'); - StringUtils.appendZeroPadded(b, 2, Math.abs(minutes)); + StringUtils.appendTwoDigits(b, Math.abs(minutes)); b.append(':'); - StringUtils.appendZeroPadded(b, 2, Math.abs(seconds)); + StringUtils.appendTwoDigits(b, Math.abs(seconds)); b.append("' DAY TO SECOND"); assertEquals(b.toString(), i.toString()); } private void testOfDaysHoursMinutesSecondsBad(long days, int hours, int minutes, int seconds) { - try { - Interval.ofDaysHoursMinutesSeconds(days, hours, minutes, seconds); - fail(); - } catch (IllegalArgumentException e) { - // OK - } + assertThrows(IllegalArgumentException.class, + () -> Interval.ofDaysHoursMinutesSeconds(days, hours, minutes, seconds)); } private void testOfHoursMinutes() { @@ -497,18 +443,13 @@ private void testOfHoursMinutesGood(long hours, int minutes) { b.append('-'); } b.append(Math.abs(hours)).append(':'); - StringUtils.appendZeroPadded(b, 2, Math.abs(minutes)); + StringUtils.appendTwoDigits(b, Math.abs(minutes)); b.append("' HOUR TO MINUTE"); assertEquals(b.toString(), i.toString()); } private void testOfHoursMinutesBad(long hours, int minutes) { - try { - Interval.ofHoursMinutes(hours, minutes); - fail(); - } catch (IllegalArgumentException e) { - // OK - } + assertThrows(IllegalArgumentException.class, () -> Interval.ofHoursMinutes(hours, minutes)); } private void testOfHoursMinutesSeconds() { @@ -545,20 +486,15 @@ private void testOfHoursMinutesSecondsGood(long hours, int minutes, int seconds) b.append('-'); } b.append(Math.abs(hours)).append(':'); - StringUtils.appendZeroPadded(b, 2, Math.abs(minutes)); + StringUtils.appendTwoDigits(b, Math.abs(minutes)); b.append(':'); - StringUtils.appendZeroPadded(b, 2, Math.abs(seconds)); + StringUtils.appendTwoDigits(b, Math.abs(seconds)); b.append("' HOUR TO SECOND"); assertEquals(b.toString(), i.toString()); } private void testOfHoursMinutesSecondsBad(long hours, int minutes, int seconds) { - try { - Interval.ofHoursMinutesSeconds(hours, minutes, seconds); - fail(); - } catch (IllegalArgumentException e) { - // OK - } + assertThrows(IllegalArgumentException.class, () -> Interval.ofHoursMinutesSeconds(hours, minutes, seconds)); } private void testOfMinutesSeconds() { @@ -589,18 +525,13 @@ private void testOfMinutesSecondsGood(long minutes, int seconds) { b.append('-'); } b.append(Math.abs(minutes)).append(':'); - StringUtils.appendZeroPadded(b, 2, Math.abs(seconds)); + StringUtils.appendTwoDigits(b, Math.abs(seconds)); b.append("' MINUTE TO SECOND"); assertEquals(b.toString(), i.toString()); } private void testOfMinutesSecondsBad(long minutes, int seconds) { - try { - Interval.ofMinutesSeconds(minutes, seconds); - fail(); - } catch (IllegalArgumentException e) { - // OK - } + assertThrows(IllegalArgumentException.class, () -> Interval.ofMinutesSeconds(minutes, seconds)); } private static void stripTrailingZeroes(StringBuilder b) { diff --git a/h2/src/test/org/h2/test/unit/TestJakartaServlet.java b/h2/src/test/org/h2/test/unit/TestJakartaServlet.java new file mode 100644 index 0000000000..6f6cb83c03 --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestJakartaServlet.java @@ -0,0 +1,437 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import java.io.InputStream; +import java.net.URL; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Enumeration; +import java.util.EventListener; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import jakarta.servlet.Filter; +import jakarta.servlet.FilterRegistration; +import jakarta.servlet.FilterRegistration.Dynamic; +import jakarta.servlet.RequestDispatcher; +import jakarta.servlet.Servlet; +import jakarta.servlet.ServletContext; +import jakarta.servlet.ServletContextEvent; +import jakarta.servlet.ServletException; +import jakarta.servlet.ServletRegistration; +import jakarta.servlet.SessionCookieConfig; +import jakarta.servlet.SessionTrackingMode; +import jakarta.servlet.descriptor.JspConfigDescriptor; +import org.h2.api.ErrorCode; +import org.h2.server.web.JakartaDbStarter; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +/** + * Tests the JakartaDbStarter servlet. + * This test simulates a minimum servlet container environment. + */ +public class TestJakartaServlet extends TestDb { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + /** + * Minimum ServletContext implementation. + * Most methods are not implemented. + */ + static class TestServletContext implements ServletContext { + + private final Properties initParams = new Properties(); + private final HashMap attributes = new HashMap<>(); + + @Override + public void setAttribute(String key, Object value) { + attributes.put(key, value); + } + + @Override + public Object getAttribute(String key) { + return attributes.get(key); + } + + @Override + public boolean setInitParameter(String key, String value) { + initParams.setProperty(key, value); + return true; + } + + @Override + public String getInitParameter(String key) { + return initParams.getProperty(key); + } + + @Override + public Enumeration getAttributeNames() { + throw new UnsupportedOperationException(); + } + + @Override + public ServletContext getContext(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public Enumeration getInitParameterNames() { + throw new UnsupportedOperationException(); + } + + @Override + public int getMajorVersion() { + throw new UnsupportedOperationException(); + } + + @Override + public String getMimeType(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public int getMinorVersion() { + throw new UnsupportedOperationException(); + } + + @Override + public RequestDispatcher getNamedDispatcher(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public String getRealPath(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public RequestDispatcher getRequestDispatcher(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public URL getResource(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public InputStream getResourceAsStream(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public Set getResourcePaths(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public String getServerInfo() { + throw new UnsupportedOperationException(); + } + + /** + * @deprecated as of servlet API 2.1 + */ + @Override + @Deprecated + public Servlet getServlet(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public String getServletContextName() { + throw new UnsupportedOperationException(); + } + + /** + * @deprecated as of servlet API 2.1 + */ + @Deprecated + @Override + public Enumeration getServletNames() { + throw new UnsupportedOperationException(); + } + + /** + * @deprecated as of servlet API 2.0 + */ + @Deprecated + @Override + public Enumeration getServlets() { + throw new UnsupportedOperationException(); + } + + @Override + public void log(String string) { + throw new UnsupportedOperationException(); + } + + /** + * @deprecated as of servlet API 2.1 + */ + @Deprecated + @Override + public void log(Exception exception, String string) { + throw new UnsupportedOperationException(); + } + + @Override + public void log(String string, Throwable throwable) { + throw new UnsupportedOperationException(); + } + + @Override + public void removeAttribute(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public Dynamic addFilter(String arg0, String arg1) { + throw new UnsupportedOperationException(); + } + + @Override + public Dynamic addFilter(String arg0, Filter arg1) { + throw new UnsupportedOperationException(); + } + + @Override + public Dynamic addFilter(String arg0, Class arg1) { + throw new UnsupportedOperationException(); + } + + @Override + public void addListener(String arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public void addListener(T arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public void addListener(Class arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public jakarta.servlet.ServletRegistration.Dynamic addServlet( + String arg0, String arg1) { + throw new UnsupportedOperationException(); + } + + @Override + public jakarta.servlet.ServletRegistration.Dynamic addServlet( + String arg0, Servlet arg1) { + throw new UnsupportedOperationException(); + } + + @Override + public jakarta.servlet.ServletRegistration.Dynamic addServlet( + String arg0, Class arg1) { + throw new UnsupportedOperationException(); + } + + @Override + public T createFilter(Class arg0) + throws ServletException { + throw new UnsupportedOperationException(); + } + + @Override + public T createListener(Class arg0) + throws ServletException { + throw new UnsupportedOperationException(); + } + + @Override + public T createServlet(Class arg0) + throws ServletException { + throw new UnsupportedOperationException(); + } + + @Override + public void declareRoles(String... arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public ClassLoader getClassLoader() { + throw new UnsupportedOperationException(); + } + + @Override + public String getContextPath() { + throw new UnsupportedOperationException(); + } + + @Override + public Set getDefaultSessionTrackingModes() { + throw new UnsupportedOperationException(); + } + + @Override + public int getEffectiveMajorVersion() { + throw new UnsupportedOperationException(); + } + + @Override + public int getEffectiveMinorVersion() { + throw new UnsupportedOperationException(); + } + + @Override + public Set getEffectiveSessionTrackingModes() { + throw new UnsupportedOperationException(); + } + + @Override + public FilterRegistration getFilterRegistration(String arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public Map getFilterRegistrations() { + throw new UnsupportedOperationException(); + } + + @Override + public JspConfigDescriptor getJspConfigDescriptor() { + throw new UnsupportedOperationException(); + } + + @Override + public ServletRegistration getServletRegistration(String arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public Map getServletRegistrations() { + throw new UnsupportedOperationException(); + } + + @Override + public SessionCookieConfig getSessionCookieConfig() { + throw new UnsupportedOperationException(); + } + + + @Override + public void setSessionTrackingModes(Set arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public String getVirtualServerName() { + throw new UnsupportedOperationException(); + } + + @Override + public ServletRegistration.Dynamic addJspFile(String servletName, String jspFile) { + throw new UnsupportedOperationException(); + } + + @Override + public int getSessionTimeout() { + throw new UnsupportedOperationException(); + } + + @Override + public void setSessionTimeout(int sessionTimeout) { + throw new UnsupportedOperationException(); + } + + @Override + public String getRequestCharacterEncoding() { + throw new UnsupportedOperationException(); + } + + @Override + public void setRequestCharacterEncoding(String encoding) { + throw new UnsupportedOperationException(); + } + + @Override + public String getResponseCharacterEncoding() { + throw new UnsupportedOperationException(); + } + + @Override + public void setResponseCharacterEncoding(String encoding) { + throw new UnsupportedOperationException(); + } + + } + + @Override + public boolean isEnabled() { + if (config.networked || config.memory) { + return false; + } + return true; + } + + @Override + public void test() throws SQLException { + JakartaDbStarter listener = new JakartaDbStarter(); + + TestServletContext context = new TestServletContext(); + String url = getURL("servlet", true); + context.setInitParameter("db.url", url); + context.setInitParameter("db.user", getUser()); + context.setInitParameter("db.password", getPassword()); + context.setInitParameter("db.tcpServer", "-tcpPort 8888"); + + ServletContextEvent event = new ServletContextEvent(context); + listener.contextInitialized(event); + + Connection conn1 = listener.getConnection(); + Connection conn1a = (Connection) context.getAttribute("connection"); + assertTrue(conn1 == conn1a); + Statement stat1 = conn1.createStatement(); + stat1.execute("CREATE TABLE T(ID INT)"); + + String u2 = url.substring(url.indexOf("servlet")); + u2 = "jdbc:h2:tcp://localhost:8888/" + getBaseDir() + "/" + u2; + Connection conn2 = DriverManager.getConnection( + u2, getUser(), getPassword()); + Statement stat2 = conn2.createStatement(); + stat2.execute("SELECT * FROM T"); + stat2.execute("DROP TABLE T"); + + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, stat1). + execute("SELECT * FROM T"); + conn2.close(); + + listener.contextDestroyed(event); + + // listener must be stopped + assertThrows(ErrorCode.CONNECTION_BROKEN_1, + () -> getConnection("jdbc:h2:tcp://localhost:8888/" + getBaseDir() + "/servlet", getUser(), + getPassword())); + + // connection must be closed + assertThrows(ErrorCode.OBJECT_CLOSED, stat1). + execute("SELECT * FROM DUAL"); + + deleteDb("servlet"); + + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestJmx.java b/h2/src/test/org/h2/test/unit/TestJmx.java index 32321bd37b..20f6aea825 100644 --- a/h2/src/test/org/h2/test/unit/TestJmx.java +++ b/h2/src/test/org/h2/test/unit/TestJmx.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -19,7 +19,6 @@ import org.h2.engine.Constants; import org.h2.test.TestBase; import org.h2.test.TestDb; -import org.h2.util.Utils; /** * Tests the JMX feature. @@ -33,8 +32,7 @@ public class TestJmx extends TestDb { */ public static void main(String... a) throws Exception { TestBase base = TestBase.createCaller().init(); - base.config.mvStore = false; - base.test(); + base.testFromMain(); } @Override @@ -70,31 +68,8 @@ public void test() throws Exception { getAttribute(name, "FileReadCount").toString()); assertEquals("0", mbeanServer. getAttribute(name, "FileWriteCount").toString()); - assertEquals("0", mbeanServer. - getAttribute(name, "FileWriteCountTotal").toString()); - if (config.mvStore) { - assertEquals("1", mbeanServer. - getAttribute(name, "LogMode").toString()); - mbeanServer.setAttribute(name, new Attribute("LogMode", 2)); - assertEquals("2", mbeanServer. - getAttribute(name, "LogMode").toString()); - } assertEquals("REGULAR", mbeanServer. getAttribute(name, "Mode").toString()); - if (config.multiThreaded) { - assertEquals("true", mbeanServer. - getAttribute(name, "MultiThreaded").toString()); - } else { - assertEquals("false", mbeanServer. - getAttribute(name, "MultiThreaded").toString()); - } - if (config.mvStore) { - assertEquals("true", mbeanServer. - getAttribute(name, "Mvcc").toString()); - } else { - assertEquals("false", mbeanServer. - getAttribute(name, "Mvcc").toString()); - } assertEquals("false", mbeanServer. getAttribute(name, "ReadOnly").toString()); assertEquals("1", mbeanServer. @@ -102,9 +77,8 @@ public void test() throws Exception { mbeanServer.setAttribute(name, new Attribute("TraceLevel", 0)); assertEquals("0", mbeanServer. getAttribute(name, "TraceLevel").toString()); - assertTrue(mbeanServer. - getAttribute(name, "Version").toString().startsWith("1.")); - assertEquals(14, info.getAttributes().length); + assertEquals(Constants.FULL_VERSION, mbeanServer.getAttribute(name, "Version").toString()); + assertEquals(10, info.getAttributes().length); result = mbeanServer.invoke(name, "listSettings", null, null).toString(); assertContains(result, "ANALYZE_AUTO"); @@ -114,11 +88,7 @@ public void test() throws Exception { result = mbeanServer.invoke(name, "listSessions", null, null).toString(); assertContains(result, "session id"); - if (config.mvStore) { - assertContains(result, "read lock"); - } else { - assertContains(result, "write lock"); - } + assertContains(result, "read lock"); assertEquals(2, info.getOperations().length); assertContains(info.getDescription(), "database"); @@ -148,47 +118,24 @@ public void test() throws Exception { if (config.memory) { assertEquals("0", mbeanServer. getAttribute(name, "CacheSizeMax").toString()); - } else if (config.mvStore) { - assertEquals("16384", mbeanServer. - getAttribute(name, "CacheSizeMax").toString()); } else { - int cacheSize = Utils.scaleForAvailableMemory( - Constants.CACHE_SIZE_DEFAULT); - assertEquals("" + cacheSize, mbeanServer. + assertEquals("16384", mbeanServer. getAttribute(name, "CacheSizeMax").toString()); } mbeanServer.setAttribute(name, new Attribute("CacheSizeMax", 1)); if (config.memory) { assertEquals("0", mbeanServer. getAttribute(name, "CacheSizeMax").toString()); - } else if (config.mvStore) { + } else { assertEquals("1024", mbeanServer. getAttribute(name, "CacheSizeMax").toString()); assertEquals("0", mbeanServer. getAttribute(name, "CacheSize").toString()); assertTrue(0 < (Long) mbeanServer. getAttribute(name, "FileReadCount")); - assertTrue(0 < (Long) mbeanServer. - getAttribute(name, "FileWriteCount")); - assertEquals("0", mbeanServer. - getAttribute(name, "FileWriteCountTotal").toString()); - } else { - assertEquals("1", mbeanServer. - getAttribute(name, "CacheSizeMax").toString()); - assertTrue(0 < (Integer) mbeanServer. - getAttribute(name, "CacheSize")); - assertTrue(0 < (Long) mbeanServer. - getAttribute(name, "FileSize")); - assertTrue(0 < (Long) mbeanServer. - getAttribute(name, "FileReadCount")); - assertTrue(0 < (Long) mbeanServer. - getAttribute(name, "FileWriteCount")); - assertTrue(0 < (Long) mbeanServer. - getAttribute(name, "FileWriteCountTotal")); + // FileWriteCount can be not yet updated and may return 0 + assertTrue(0 <= (Long) mbeanServer.getAttribute(name, "FileWriteCount")); } - mbeanServer.setAttribute(name, new Attribute("LogMode", 0)); - assertEquals("0", mbeanServer. - getAttribute(name, "LogMode").toString()); conn.close(); diff --git a/h2/src/test/org/h2/test/unit/TestJsonUtils.java b/h2/src/test/org/h2/test/unit/TestJsonUtils.java new file mode 100644 index 0000000000..35b3bae17f --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestJsonUtils.java @@ -0,0 +1,340 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import java.math.BigDecimal; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.Callable; + +import org.h2.test.TestBase; +import org.h2.util.json.JSONByteArrayTarget; +import org.h2.util.json.JSONBytesSource; +import org.h2.util.json.JSONItemType; +import org.h2.util.json.JSONStringSource; +import org.h2.util.json.JSONStringTarget; +import org.h2.util.json.JSONTarget; +import org.h2.util.json.JSONValidationTargetWithUniqueKeys; +import org.h2.util.json.JSONValidationTargetWithoutUniqueKeys; +import org.h2.util.json.JSONValueTarget; + +/** + * Tests the classes from org.h2.util.json package. + */ +public class TestJsonUtils extends TestBase { + + private static final Charset[] CHARSETS = { StandardCharsets.UTF_8, StandardCharsets.UTF_16BE, + StandardCharsets.UTF_16LE, Charset.forName("UTF-32BE"), Charset.forName("UTF-32LE") }; + + private static final Callable> STRING_TARGET = () -> new JSONStringTarget(); + + private static final Callable> BYTES_TARGET = () -> new JSONByteArrayTarget(); + + private static final Callable> VALUE_TARGET = () -> new JSONValueTarget(); + + private static final Callable> JSON_VALIDATION_TARGET_WITHOUT_UNIQUE_KEYS = // + () -> new JSONValidationTargetWithoutUniqueKeys(); + + private static final Callable> JSON_VALIDATION_TARGET_WITH_UNIQUE_KEYS = // + () -> new JSONValidationTargetWithUniqueKeys(); + + /** + * Run just this test. + * + * @param a + * ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testTargetErrorDetection(); + testSourcesAndTargets(); + testUtfError(); + testLongNesting(); + testEncodeString(); + } + + private void testTargetErrorDetection() throws Exception { + testTargetErrorDetection(STRING_TARGET); + testTargetErrorDetection(BYTES_TARGET); + testTargetErrorDetection(VALUE_TARGET); + testTargetErrorDetection(JSON_VALIDATION_TARGET_WITHOUT_UNIQUE_KEYS); + testTargetErrorDetection(JSON_VALIDATION_TARGET_WITH_UNIQUE_KEYS); + } + + private void testTargetErrorDetection(Callable> constructor) throws Exception { + assertThrows(RuntimeException.class, () -> constructor.call().endObject()); + assertThrows(RuntimeException.class, () -> constructor.call().endArray()); + // Unexpected member without object + assertThrows(RuntimeException.class, () -> constructor.call().member("1")); + // Unexpected member inside array + JSONTarget target1 = constructor.call(); + target1.startArray(); + assertThrows(RuntimeException.class, () -> target1.member("1")); + // Unexpected member without value + JSONTarget target2 = constructor.call(); + target2.startObject(); + target2.member("1"); + assertThrows(RuntimeException.class, () -> target2.member("2")); + JSONTarget target3 = constructor.call(); + target3.startObject(); + target3.member("1"); + assertThrows(RuntimeException.class, () -> target3.endObject()); + // Unexpected value without member name + testJsonStringTargetErrorDetectionAllValues(() -> { + JSONTarget target = constructor.call(); + target.startObject(); + return target; + }); + // Unexpected second value + testJsonStringTargetErrorDetectionAllValues(() -> { + JSONTarget target = constructor.call(); + target.valueNull(); + return target; + }); + // No value + assertIncomplete(constructor.call()); + // Unclosed object + JSONTarget target = constructor.call(); + target.startObject(); + assertIncomplete(target); + // Unclosed array + target = constructor.call(); + target.startObject(); + assertIncomplete(target); + // End of array after start of object or vice versa + JSONTarget target6 = constructor.call(); + target6.startObject(); + assertThrows(RuntimeException.class, () -> target6.endArray()); + JSONTarget target7 = constructor.call(); + target7.startArray(); + assertThrows(RuntimeException.class, () -> target7.endObject()); + } + + private void assertIncomplete(JSONTarget target) { + assertThrows(RuntimeException.class, () -> target.getResult()); + } + + private void testJsonStringTargetErrorDetectionAllValues(Callable> initializer) throws Exception { + assertThrows(RuntimeException.class, () -> initializer.call().valueNull()); + assertThrows(RuntimeException.class, () -> initializer.call().valueFalse()); + assertThrows(RuntimeException.class, () -> initializer.call().valueTrue()); + assertThrows(RuntimeException.class, () -> initializer.call().valueNumber(BigDecimal.ONE)); + assertThrows(RuntimeException.class, () -> initializer.call().valueString("string")); + } + + private void testSourcesAndTargets() throws Exception { + testSourcesAndTargets("1", "1"); + testSourcesAndTargets("\uFEFF0", "0"); + testSourcesAndTargets("\uFEFF-1", "-1"); + testSourcesAndTargets("null", "null"); + testSourcesAndTargets("true", "true"); + testSourcesAndTargets("false", "false"); + testSourcesAndTargets("1.2", "1.2"); + testSourcesAndTargets("1.2e+1", "12"); + testSourcesAndTargets("10000.0", "10000.0"); + testSourcesAndTargets("\t\r\n 1.2E-1 ", "0.12"); + testSourcesAndTargets("9.99e99", "9.99E99"); + testSourcesAndTargets("\"\"", "\"\""); + testSourcesAndTargets("\"\\b\\f\\t\\r\\n\\\"\\/\\\\\\u0019\\u0020\"", "\"\\b\\f\\t\\r\\n\\\"/\\\\\\u0019 \""); + testSourcesAndTargets("{ }", "{}"); + testSourcesAndTargets("{\"a\" : 1}", "{\"a\":1}"); + testSourcesAndTargets("{\"a\" : 1, \"b\":[], \"c\":{}}", "{\"a\":1,\"b\":[],\"c\":{}}"); + testSourcesAndTargets("{\"a\" : 1, \"b\":[1,null, true,false,{}]}", "{\"a\":1,\"b\":[1,null,true,false,{}]}"); + testSourcesAndTargets("{\"1\" : [[[[[[[[[[11.1e-100]]]], null]]], {\n\r}]]]}", + "{\"1\":[[[[[[[[[[1.11E-99]]]],null]]],{}]]]}"); + testSourcesAndTargets("{\"b\":false,\"a\":1,\"a\":null}", "{\"b\":false,\"a\":1,\"a\":null}", true); + testSourcesAndTargets("[[{\"b\":false,\"a\":1,\"a\":null}]]", "[[{\"b\":false,\"a\":1,\"a\":null}]]", true); + testSourcesAndTargets("\"\uD800\uDFFF\"", "\"\uD800\uDFFF\""); + testSourcesAndTargets("\"\\uD800\\uDFFF\"", "\"\uD800\uDFFF\""); + testSourcesAndTargets("\"\u0700\"", "\"\u0700\""); + testSourcesAndTargets("\"\\u0700\"", "\"\u0700\""); + StringBuilder builder = new StringBuilder().append('"'); + for (int cp = 0x80; cp < Character.MIN_SURROGATE; cp++) { + builder.appendCodePoint(cp); + } + for (int cp = Character.MAX_SURROGATE + 1; cp < 0xfffe; cp++) { + builder.appendCodePoint(cp); + } + for (int cp = 0xffff; cp <= Character.MAX_CODE_POINT; cp++) { + builder.appendCodePoint(cp); + } + String s = builder.append('"').toString(); + testSourcesAndTargets(s, s); + testSourcesAndTargetsError("", true); + testSourcesAndTargetsError("\"", true); + testSourcesAndTargetsError("\"\\u", true); + testSourcesAndTargetsError("\u0080", true); + testSourcesAndTargetsError(".1", true); + testSourcesAndTargetsError("1.", true); + testSourcesAndTargetsError("1.1e", true); + testSourcesAndTargetsError("1.1e+", true); + testSourcesAndTargetsError("1.1e-", true); + testSourcesAndTargetsError("\b1", true); + testSourcesAndTargetsError("\"\\u", true); + testSourcesAndTargetsError("\"\\u0", true); + testSourcesAndTargetsError("\"\\u00", true); + testSourcesAndTargetsError("\"\\u000", true); + testSourcesAndTargetsError("\"\\u0000", true); + testSourcesAndTargetsError("{,}", true); + testSourcesAndTargetsError("{,,}", true); + testSourcesAndTargetsError("{}}", true); + testSourcesAndTargetsError("{\"a\":\"\":\"\"}", true); + testSourcesAndTargetsError("[]]", true); + testSourcesAndTargetsError("\"\\uZZZZ\"", true); + testSourcesAndTargetsError("\"\\x\"", true); + testSourcesAndTargetsError("\"\\", true); + testSourcesAndTargetsError("[1,", true); + testSourcesAndTargetsError("[1,,2]", true); + testSourcesAndTargetsError("[1,]", true); + testSourcesAndTargetsError("{\"a\":1,]", true); + testSourcesAndTargetsError("[1 2]", true); + testSourcesAndTargetsError("{\"a\"-1}", true); + testSourcesAndTargetsError("[1;2]", true); + testSourcesAndTargetsError("{\"a\":1,b:2}", true); + testSourcesAndTargetsError("{\"a\":1;\"b\":2}", true); + testSourcesAndTargetsError("fals", true); + testSourcesAndTargetsError("falsE", true); + testSourcesAndTargetsError("False", true); + testSourcesAndTargetsError("nul", true); + testSourcesAndTargetsError("nulL", true); + testSourcesAndTargetsError("Null", true); + testSourcesAndTargetsError("tru", true); + testSourcesAndTargetsError("truE", true); + testSourcesAndTargetsError("True", true); + testSourcesAndTargetsError("\"\uD800\"", false); + testSourcesAndTargetsError("\"\\uD800\"", true); + testSourcesAndTargetsError("\"\uDC00\"", false); + testSourcesAndTargetsError("\"\\uDC00\"", true); + testSourcesAndTargetsError("\"\uDBFF \"", false); + testSourcesAndTargetsError("\"\\uDBFF \"", true); + testSourcesAndTargetsError("\"\uDBFF\\\"", true); + testSourcesAndTargetsError("\"\\uDBFF\\\"", true); + testSourcesAndTargetsError("\"\uDFFF\uD800\"", false); + testSourcesAndTargetsError("\"\\uDFFF\\uD800\"", true); + } + + private void testSourcesAndTargets(String src, String expected) throws Exception { + testSourcesAndTargets(src, expected, false); + } + + private void testSourcesAndTargets(String src, String expected, boolean hasNonUniqueKeys) throws Exception { + JSONItemType itemType; + switch (expected.charAt(0)) { + case '[': + itemType = JSONItemType.ARRAY; + break; + case '{': + itemType = JSONItemType.OBJECT; + break; + default: + itemType = JSONItemType.SCALAR; + } + assertEquals(expected, JSONStringSource.parse(src, new JSONStringTarget())); + assertEquals(expected.getBytes(StandardCharsets.UTF_8), // + JSONStringSource.parse(src, new JSONByteArrayTarget())); + assertEquals(expected, JSONStringSource.parse(src, new JSONValueTarget()).toString()); + assertEquals(itemType, JSONStringSource.parse(src, new JSONValidationTargetWithoutUniqueKeys())); + if (hasNonUniqueKeys) { + testSourcesAndTargetsError(src, JSON_VALIDATION_TARGET_WITH_UNIQUE_KEYS, true); + } else { + assertEquals(itemType, JSONStringSource.parse(src, new JSONValidationTargetWithUniqueKeys())); + } + for (Charset charset : CHARSETS) { + assertEquals(expected, JSONBytesSource.parse(src.getBytes(charset), new JSONStringTarget())); + } + } + + private void testSourcesAndTargetsError(String src, boolean testBytes) throws Exception { + testSourcesAndTargetsError(src, STRING_TARGET, testBytes); + testSourcesAndTargetsError(src, BYTES_TARGET, testBytes); + testSourcesAndTargetsError(src, VALUE_TARGET, testBytes); + testSourcesAndTargetsError(src, JSON_VALIDATION_TARGET_WITHOUT_UNIQUE_KEYS, testBytes); + testSourcesAndTargetsError(src, JSON_VALIDATION_TARGET_WITH_UNIQUE_KEYS, testBytes); + } + + private void testSourcesAndTargetsError(String src, Callable> constructor, boolean testBytes) + throws Exception { + check: { + JSONTarget target = constructor.call(); + try { + JSONStringSource.parse(src, target); + } catch (IllegalArgumentException | IllegalStateException expected) { + // Expected + break check; + } + fail(); + } + /* + * String.getBytes() replaces invalid characters, so some tests are + * disabled. + */ + if (testBytes) { + JSONTarget target = constructor.call(); + try { + JSONBytesSource.parse(src.getBytes(StandardCharsets.UTF_8), target); + } catch (IllegalArgumentException | IllegalStateException expected) { + // Expected + return; + } + fail(); + } + } + + private void testUtfError() { + // 2 bytes + testUtfError(new byte[] { '"', (byte) 0xc2, (byte) 0xc0, '"' }); + testUtfError(new byte[] { '"', (byte) 0xc1, (byte) 0xbf, '"' }); + testUtfError(new byte[] { '"', (byte) 0xc2 }); + // 3 bytes + testUtfError(new byte[] { '"', (byte) 0xe1, (byte) 0xc0, (byte) 0x80, '"' }); + testUtfError(new byte[] { '"', (byte) 0xe1, (byte) 0x80, (byte) 0xc0, '"' }); + testUtfError(new byte[] { '"', (byte) 0xe0, (byte) 0x9f, (byte) 0xbf, '"' }); + testUtfError(new byte[] { '"', (byte) 0xe1, (byte) 0x80 }); + // 4 bytes + testUtfError(new byte[] { '"', (byte) 0xf1, (byte) 0xc0, (byte) 0x80, (byte) 0x80, '"' }); + testUtfError(new byte[] { '"', (byte) 0xf1, (byte) 0x80, (byte) 0xc0, (byte) 0x80, '"' }); + testUtfError(new byte[] { '"', (byte) 0xf1, (byte) 0x80, (byte) 0x80, (byte) 0xc0, '"' }); + testUtfError(new byte[] { '"', (byte) 0xf0, (byte) 0x8f, (byte) 0xbf, (byte) 0xbf, '"' }); + testUtfError(new byte[] { '"', (byte) 0xf4, (byte) 0x90, (byte) 0x80, (byte) 0x80, '"' }); + testUtfError(new byte[] { '"', (byte) 0xf1, (byte) 0x80, (byte) 0x80 }); + } + + private void testUtfError(byte[] bytes) { + assertThrows(IllegalArgumentException.class, + () -> JSONBytesSource.parse(bytes, new JSONValidationTargetWithoutUniqueKeys())); + } + + private void testLongNesting() { + final int halfLevel = 2048; + StringBuilder builder = new StringBuilder(halfLevel * 8); + for (int i = 0; i < halfLevel; i++) { + builder.append("{\"a\":["); + } + for (int i = 0; i < halfLevel; i++) { + builder.append("]}"); + } + String string = builder.toString(); + assertEquals(string, JSONStringSource.parse(string, new JSONStringTarget())); + byte[] bytes = string.getBytes(StandardCharsets.ISO_8859_1); + assertEquals(bytes, JSONBytesSource.normalize(bytes)); + } + + private void testEncodeString() { + testEncodeString("abc \"\u0001\u007f\u0080\u1000\uabcd\n'\t", + "\"abc \\\"\\u0001\u007f\u0080\u1000\uabcd\\n'\\t\"", + "\"abc \\\"\\u0001\u007f\\u0080\\u1000\\uabcd\\n\\u0027\\t\""); + } + + private void testEncodeString(String source, String expected, String expectedPrintable) { + assertEquals(expected, JSONStringTarget.encodeString(new StringBuilder(), source, false).toString()); + assertEquals(expectedPrintable, JSONStringTarget.encodeString(new StringBuilder(), source, true).toString()); + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestKeywords.java b/h2/src/test/org/h2/test/unit/TestKeywords.java index d826d3bead..b006b5dcb9 100644 --- a/h2/src/test/org/h2/test/unit/TestKeywords.java +++ b/h2/src/test/org/h2/test/unit/TestKeywords.java @@ -1,18 +1,26 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; +import java.io.IOException; import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.Statement; +import java.time.Duration; import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; import java.util.Map.Entry; +import java.util.TreeSet; import org.h2.command.Parser; +import org.h2.command.Token; +import org.h2.command.Tokenizer; +import org.h2.message.DbException; import org.h2.test.TestBase; import org.h2.util.ParserUtil; import org.objectweb.asm.ClassReader; @@ -34,23 +42,456 @@ private enum TokenType { CONTEXT_SENSITIVE_KEYWORD; } - /** - * Run just this test. - * - * @param a - * ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + private static final HashSet SQL92_RESERVED_WORDS = toSet(new String[] { + + "ABSOLUTE", "ACTION", "ADD", "ALL", "ALLOCATE", "ALTER", "AND", "ANY", "ARE", "AS", "ASC", "ASSERTION", + "AT", "AUTHORIZATION", "AVG", + + "BEGIN", "BETWEEN", "BIT", "BIT_LENGTH", "BOTH", "BY", + + "CASCADE", "CASCADED", "CASE", "CAST", "CATALOG", "CHAR", "CHARACTER", "CHAR_LENGTH", "CHARACTER_LENGTH", + "CHECK", "CLOSE", "COALESCE", "COLLATE", "COLLATION", "COLUMN", "COMMIT", "CONNECT", "CONNECTION", + "CONSTRAINT", "CONSTRAINTS", "CONTINUE", "CONVERT", "CORRESPONDING", "COUNT", "CREATE", "CROSS", "CURRENT", + "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER", "CURSOR", + + "DATE", "DAY", "DEALLOCATE", "DEC", "DECIMAL", "DECLARE", "DEFAULT", "DEFERRABLE", "DEFERRED", "DELETE", + "DESC", "DESCRIBE", "DESCRIPTOR", "DIAGNOSTICS", "DISCONNECT", "DISTINCT", "DOMAIN", "DOUBLE", "DROP", + + "ELSE", "END", "END-EXEC", "ESCAPE", "EXCEPT", "EXCEPTION", "EXEC", "EXECUTE", "EXISTS", "EXTERNAL", + "EXTRACT", + + "FALSE", "FETCH", "FIRST", "FLOAT", "FOR", "FOREIGN", "FOUND", "FROM", "FULL", + + "GET", "GLOBAL", "GO", "GOTO", "GRANT", "GROUP", + + "HAVING", "HOUR", + + "IDENTITY", "IMMEDIATE", "IN", "INDICATOR", "INITIALLY", "INNER", "INPUT", "INSENSITIVE", "INSERT", "INT", + "INTEGER", "INTERSECT", "INTERVAL", "INTO", "IS", "ISOLATION", + + "JOIN", + + "KEY", + + "LANGUAGE", "LAST", "LEADING", "LEFT", "LEVEL", "LIKE", "LOCAL", "LOWER", + + "MATCH", "MAX", "MIN", "MINUTE", "MODULE", "MONTH", + + "NAMES", "NATIONAL", "NATURAL", "NCHAR", "NEXT", "NO", "NOT", "NULL", "NULLIF", "NUMERIC", + + "OCTET_LENGTH", "OF", "ON", "ONLY", "OPEN", "OPTION", "OR", "ORDER", "OUTER", "OUTPUT", "OVERLAPS", + + "PAD", "PARTIAL", "POSITION", "PRECISION", "PREPARE", "PRESERVE", "PRIMARY", "PRIOR", "PRIVILEGES", + "PROCEDURE", "PUBLIC", + + "READ", "REAL", "REFERENCES", "RELATIVE", "RESTRICT", "REVOKE", "RIGHT", "ROLLBACK", "ROWS", + + "SCHEMA", "SCROLL", "SECOND", "SECTION", "SELECT", "SESSION", "SESSION_USER", "SET", "SIZE", "SMALLINT", + "SOME", "SPACE", "SQL", "SQLCODE", "SQLERROR", "SQLSTATE", "SUBSTRING", "SUM", "SYSTEM_USER", + + "TABLE", "TEMPORARY", "THEN", "TIME", "TIMESTAMP", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TO", "TRAILING", + "TRANSACTION", "TRANSLATE", "TRANSLATION", "TRIM", "TRUE", + + "UNION", "UNIQUE", "UNKNOWN", "UPDATE", "UPPER", "USAGE", "USER", "USING", + + "VALUE", "VALUES", "VARCHAR", "VARYING", "VIEW", + + "WHEN", "WHENEVER", "WHERE", "WITH", "WORK", "WRITE", + + "YEAR", + + "ZONE", + + }); + + private static final HashSet SQL1999_RESERVED_WORDS = toSet(new String[] { + + "ABSOLUTE", "ACTION", "ADD", "ADMIN", "AFTER", "AGGREGATE", "ALIAS", "ALL", "ALLOCATE", "ALTER", "AND", + "ANY", "ARE", "ARRAY", "AS", "ASC", "ASSERTION", "AT", "AUTHORIZATION", + + "BEFORE", "BEGIN", "BINARY", "BIT", "BLOB", "BOOLEAN", "BOTH", "BREADTH", "BY", + + "CALL", "CASCADE", "CASCADED", "CASE", "CAST", "CATALOG", "CHAR", "CHARACTER", "CHECK", "CLASS", "CLOB", + "CLOSE", "COLLATE", "COLLATION", "COLUMN", "COMMIT", "COMPLETION", "CONNECT", "CONNECTION", "CONSTRAINT", + "CONSTRAINTS", "CONSTRUCTOR", "CONTINUE", "CORRESPONDING", "CREATE", "CROSS", "CUBE", "CURRENT", + "CURRENT_DATE", "CURRENT_PATH", "CURRENT_ROLE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER", + "CURSOR", "CYCLE", + + "DATA", "DATE", "DAY", "DEALLOCATE", "DEC", "DECIMAL", "DECLARE", "DEFAULT", "DEFERRABLE", "DEFERRED", + "DELETE", "DEPTH", "DEREF", "DESC", "DESCRIBE", "DESCRIPTOR", "DESTROY", "DESTRUCTOR", "DETERMINISTIC", + "DICTIONARY", "DIAGNOSTICS", "DISCONNECT", "DISTINCT", "DOMAIN", "DOUBLE", "DROP", "DYNAMIC", + + "EACH", "ELSE", "END", "END-EXEC", "EQUALS", "ESCAPE", "EVERY", "EXCEPT", "EXCEPTION", "EXEC", "EXECUTE", + "EXTERNAL", + + "FALSE", "FETCH", "FIRST", "FLOAT", "FOR", "FOREIGN", "FOUND", "FROM", "FREE", "FULL", "FUNCTION", + + "GENERAL", "GET", "GLOBAL", "GO", "GOTO", "GRANT", "GROUP", "GROUPING", + + "HAVING", "HOST", "HOUR", + + "IDENTITY", "IGNORE", "IMMEDIATE", "IN", "INDICATOR", "INITIALIZE", "INITIALLY", "INNER", "INOUT", "INPUT", + "INSERT", "INT", "INTEGER", "INTERSECT", "INTERVAL", "INTO", "IS", "ISOLATION", "ITERATE", + + "JOIN", + + "KEY", + + "LANGUAGE", "LARGE", "LAST", "LATERAL", "LEADING", "LEFT", "LESS", "LEVEL", "LIKE", "LIMIT", "LOCAL", + "LOCALTIME", "LOCALTIMESTAMP", "LOCATOR", + + "MAP", "MATCH", "MINUTE", "MODIFIES", "MODIFY", "MODULE", "MONTH", + + "NAMES", "NATIONAL", "NATURAL", "NCHAR", "NCLOB", "NEW", "NEXT", "NO", "NONE", "NOT", "NULL", "NUMERIC", + + "OBJECT", "OF", "OFF", "OLD", "ON", "ONLY", "OPEN", "OPERATION", "OPTION", "OR", "ORDER", "ORDINALITY", + "OUT", "OUTER", "OUTPUT", + + "PAD", "PARAMETER", "PARAMETERS", "PARTIAL", "PATH", "POSTFIX", "PRECISION", "PREFIX", "PREORDER", + "PREPARE", "PRESERVE", "PRIMARY", "PRIOR", "PRIVILEGES", "PROCEDURE", "PUBLIC", + + "READ", "READS", "REAL", "RECURSIVE", "REF", "REFERENCES", "REFERENCING", "RELATIVE", "RESTRICT", "RESULT", + "RETURN", "RETURNS", "REVOKE", "RIGHT", "ROLE", "ROLLBACK", "ROLLUP", "ROUTINE", "ROW", "ROWS", + + "SAVEPOINT", "SCHEMA", "SCROLL", "SCOPE", "SEARCH", "SECOND", "SECTION", "SELECT", "SEQUENCE", "SESSION", + "SESSION_USER", "SET", "SETS", "SIZE", "SMALLINT", "SOME", "SPACE", "SPECIFIC", "SPECIFICTYPE", "SQL", + "SQLEXCEPTION", "SQLSTATE", "SQLWARNING", "START", "STATE", "STATEMENT", "STATIC", "STRUCTURE", + "SYSTEM_USER", + + "TABLE", "TEMPORARY", "TERMINATE", "THAN", "THEN", "TIME", "TIMESTAMP", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", + "TO", "TRAILING", "TRANSACTION", "TRANSLATION", "TREAT", "TRIGGER", "TRUE", + + "UNDER", "UNION", "UNIQUE", "UNKNOWN", "UNNEST", "UPDATE", "USAGE", "USER", "USING", + + "VALUE", "VALUES", "VARCHAR", "VARIABLE", "VARYING", "VIEW", + + "WHEN", "WHENEVER", "WHERE", "WITH", "WITHOUT", "WORK", "WRITE", + + "YEAR", "ZONE", + + }); + + private static final HashSet SQL2003_RESERVED_WORDS = toSet(new String[] { + + "ABS", "ALL", "ALLOCATE", "ALTER", "AND", "ANY", "ARE", "ARRAY", "AS", "ASENSITIVE", "ASYMMETRIC", "AT", + "ATOMIC", "AUTHORIZATION", "AVG", + + "BEGIN", "BETWEEN", "BIGINT", "BINARY", "BLOB", "BOOLEAN", "BOTH", "BY", + + "CALL", "CALLED", "CARDINALITY", "CASCADED", "CASE", "CAST", "CEIL", "CEILING", "CHAR", "CHAR_LENGTH", + "CHARACTER", "CHARACTER_LENGTH", "CHECK", "CLOB", "CLOSE", "COALESCE", "COLLATE", "COLLECT", "COLUMN", + "COMMIT", "CONDITION", "CONNECT", "CONSTRAINT", "CONVERT", "CORR", "CORRESPONDING", "COUNT", "COVAR_POP", + "COVAR_SAMP", "CREATE", "CROSS", "CUBE", "CUME_DIST", "CURRENT", "CURRENT_DATE", + "CURRENT_DEFAULT_TRANSFORM_GROUP", "CURRENT_PATH", "CURRENT_ROLE", "CURRENT_TIME", "CURRENT_TIMESTAMP", + "CURRENT_TRANSFORM_GROUP_FOR_TYPE", "CURRENT_USER", "CURSOR", "CYCLE", + + "DATE", "DAY", "DEALLOCATE", "DEC", "DECIMAL", "DECLARE", "DEFAULT", "DELETE", "DENSE_RANK", "DEREF", + "DESCRIBE", "DETERMINISTIC", "DISCONNECT", "DISTINCT", "DOUBLE", "DROP", "DYNAMIC", + + "EACH", "ELEMENT", "ELSE", "END", "END-EXEC", "ESCAPE", "EVERY", "EXCEPT", "EXEC", "EXECUTE", "EXISTS", + "EXP", "EXTERNAL", "EXTRACT", + + "FALSE", "FETCH", "FILTER", "FLOAT", "FLOOR", "FOR", "FOREIGN", "FREE", "FROM", "FULL", "FUNCTION", + "FUSION", + + "GET", "GLOBAL", "GRANT", "GROUP", "GROUPING", + + "HAVING", "HOLD", "HOUR", "IDENTITY", "IN", "INDICATOR", "INNER", "INOUT", "INSENSITIVE", + + "INSERT", "INT", "INTEGER", "INTERSECT", "INTERSECTION", "INTERVAL", "INTO", "IS", + + "JOIN", + + "LANGUAGE", "LARGE", "LATERAL", "LEADING", "LEFT", "LIKE", "LN", "LOCAL", "LOCALTIME", "LOCALTIMESTAMP", + "LOWER", + + "MATCH", "MAX", "MEMBER", "MERGE", "METHOD", "MIN", "MINUTE", "MOD", "MODIFIES", "MODULE", "MONTH", + "MULTISET", + + "NATIONAL", "NATURAL", "NCHAR", "NCLOB", "NEW", "NO", "NONE", "NORMALIZE", "NOT", "NULL", "NULLIF", + "NUMERIC", + + "OCTET_LENGTH", "OF", "OLD", "ON", "ONLY", "OPEN", "OR", "ORDER", "OUT", "OUTER", "OVER", "OVERLAPS", + "OVERLAY", + + "PARAMETER", "PARTITION", "PERCENT_RANK", "PERCENTILE_CONT", "PERCENTILE_DISC", "POSITION", "POWER", + "PRECISION", "PREPARE", "PRIMARY", "PROCEDURE", + + "RANGE", "RANK", "READS", "REAL", "RECURSIVE", "REF", "REFERENCES", "REFERENCING", "REGR_AVGX", // + "REGR_AVGY", "REGR_COUNT", "REGR_INTERCEPT", "REGR_R2", "REGR_SLOPE", "REGR_SXX", "REGR_SXY", "REGR_SYY", + "RELEASE", "RESULT", "RETURN", "RETURNS", "REVOKE", "RIGHT", "ROLLBACK", "ROLLUP", "ROW", "ROW_NUMBER", + "ROWS", + + "SAVEPOINT", "SCOPE", "SCROLL", "SEARCH", "SECOND", "SELECT", "SENSITIVE", "SESSION_USER", "SET", // + "SIMILAR", "SMALLINT", "SOME", "SPECIFIC", "SPECIFICTYPE", "SQL", "SQLEXCEPTION", "SQLSTATE", "SQLWARNING", + "SQRT", "START", "STATIC", "STDDEV_POP", "STDDEV_SAMP", "SUBMULTISET", "SUBSTRING", "SUM", "SYMMETRIC", + "SYSTEM", "SYSTEM_USER", + + "TABLE", "TABLESAMPLE", "THEN", "TIME", "TIMESTAMP", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TO", "TRAILING", + "TRANSLATE", "TRANSLATION", "TREAT", "TRIGGER", "TRIM", "TRUE", + + "UESCAPE", "UNION", "UNIQUE", "UNKNOWN", "UNNEST", "UPDATE", "UPPER", "USER", "USING", + + "VALUE", "VALUES", "VAR_POP", "VAR_SAMP", "VARCHAR", "VARYING", + + "WHEN", "WHENEVER", "WHERE", "WIDTH_BUCKET", "WINDOW", "WITH", "WITHIN", "WITHOUT", + + "YEAR", + + }); + + private static final HashSet SQL2008_RESERVED_WORDS = toSet(new String[] { + + "ABS", "ALL", "ALLOCATE", "ALTER", "AND", "ANY", "ARE", "ARRAY", "AS", "ASENSITIVE", "ASYMMETRIC", "AT", + "ATOMIC", "AUTHORIZATION", "AVG", + + "BEGIN", "BETWEEN", "BIGINT", "BINARY", "BLOB", "BOOLEAN", "BOTH", "BY", + + "CALL", "CALLED", "CARDINALITY", "CASCADED", "CASE", "CAST", "CEIL", "CEILING", "CHAR", "CHAR_LENGTH", + "CHARACTER", "CHARACTER_LENGTH", "CHECK", "CLOB", "CLOSE", "COALESCE", "COLLATE", "COLLECT", "COLUMN", + "COMMIT", "CONDITION", "CONNECT", "CONSTRAINT", "CONVERT", "CORR", "CORRESPONDING", "COUNT", "COVAR_POP", + "COVAR_SAMP", "CREATE", "CROSS", "CUBE", "CUME_DIST", "CURRENT", "CURRENT_CATALOG", "CURRENT_DATE", + "CURRENT_DEFAULT_TRANSFORM_GROUP", "CURRENT_PATH", "CURRENT_ROLE", "CURRENT_SCHEMA", "CURRENT_TIME", + "CURRENT_TIMESTAMP", "CURRENT_TRANSFORM_GROUP_FOR_TYPE", "CURRENT_USER", "CURSOR", "CYCLE", + + "DATE", "DAY", "DEALLOCATE", "DEC", "DECIMAL", "DECLARE", "DEFAULT", "DELETE", "DENSE_RANK", "DEREF", + "DESCRIBE", "DETERMINISTIC", "DISCONNECT", "DISTINCT", "DOUBLE", "DROP", "DYNAMIC", + + "EACH", "ELEMENT", "ELSE", "END", "END-EXEC", "ESCAPE", "EVERY", "EXCEPT", "EXEC", "EXECUTE", "EXISTS", + "EXP", "EXTERNAL", "EXTRACT", + + "FALSE", "FETCH", "FILTER", "FLOAT", "FLOOR", "FOR", "FOREIGN", "FREE", "FROM", "FULL", "FUNCTION", + "FUSION", + + "GET", "GLOBAL", "GRANT", "GROUP", "GROUPING", + + "HAVING", "HOLD", "HOUR", + + "IDENTITY", "IN", "INDICATOR", "INNER", "INOUT", "INSENSITIVE", "INSERT", "INT", "INTEGER", "INTERSECT", + "INTERSECTION", "INTERVAL", "INTO", "IS", + + "JOIN", + + "LANGUAGE", "LARGE", "LATERAL", "LEADING", "LEFT", "LIKE", "LIKE_REGEX", "LN", "LOCAL", "LOCALTIME", + "LOCALTIMESTAMP", "LOWER", + + "MATCH", "MAX", "MEMBER", "MERGE", "METHOD", "MIN", "MINUTE", "MOD", "MODIFIES", "MODULE", "MONTH", + "MULTISET", + + "NATIONAL", "NATURAL", "NCHAR", "NCLOB", "NEW", "NO", "NONE", "NORMALIZE", "NOT", "NULL", "NULLIF", + "NUMERIC", + + "OCTET_LENGTH", "OCCURRENCES_REGEX", "OF", "OLD", "ON", "ONLY", "OPEN", "OR", "ORDER", "OUT", "OUTER", + "OVER", "OVERLAPS", "OVERLAY", + + "PARAMETER", "PARTITION", "PERCENT_RANK", "PERCENTILE_CONT", "PERCENTILE_DISC", "POSITION", + "POSITION_REGEX", "POWER", "PRECISION", "PREPARE", "PRIMARY", "PROCEDURE", + + "RANGE", "RANK", "READS", "REAL", "RECURSIVE", "REF", "REFERENCES", "REFERENCING", "REGR_AVGX", // + "REGR_AVGY", "REGR_COUNT", "REGR_INTERCEPT", "REGR_R2", "REGR_SLOPE", "REGR_SXX", "REGR_SXY", "REGR_SYY", + "RELEASE", "RESULT", "RETURN", "RETURNS", "REVOKE", "RIGHT", "ROLLBACK", "ROLLUP", "ROW", "ROW_NUMBER", + "ROWS", + + "SAVEPOINT", "SCOPE", "SCROLL", "SEARCH", "SECOND", "SELECT", "SENSITIVE", "SESSION_USER", "SET", // + "SIMILAR", "SMALLINT", "SOME", "SPECIFIC", "SPECIFICTYPE", "SQL", "SQLEXCEPTION", "SQLSTATE", "SQLWARNING", + "SQRT", "START", "STATIC", "STDDEV_POP", "STDDEV_SAMP", "SUBMULTISET", "SUBSTRING", "SUBSTRING_REGEX", + "SUM", "SYMMETRIC", "SYSTEM", "SYSTEM_USER", + + "TABLE", "TABLESAMPLE", "THEN", "TIME", "TIMESTAMP", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TO", "TRAILING", + "TRANSLATE", "TRANSLATE_REGEX", "TRANSLATION", "TREAT", "TRIGGER", "TRIM", "TRUE", + + "UESCAPE", "UNION", "UNIQUE", "UNKNOWN", "UNNEST", "UPDATE", "UPPER", "USER", "USING", + + "VALUE", "VALUES", "VAR_POP", "VAR_SAMP", "VARBINARY", "VARCHAR", "VARYING", + + "WHEN", "WHENEVER", "WHERE", "WIDTH_BUCKET", "WINDOW", "WITH", "WITHIN", "WITHOUT", + + "YEAR", + + }); + + private static final HashSet SQL2011_RESERVED_WORDS = toSet(new String[] { + + "ABS", "ALL", "ALLOCATE", "ALTER", "AND", "ANY", "ARE", "ARRAY", "ARRAY_AGG", "ARRAY_MAX_CARDINALITY", // + "AS", "ASENSITIVE", "ASYMMETRIC", "AT", "ATOMIC", "AUTHORIZATION", "AVG", + + "BEGIN", "BEGIN_FRAME", "BEGIN_PARTITION", "BETWEEN", "BIGINT", "BINARY", "BLOB", "BOOLEAN", "BOTH", "BY", + + "CALL", "CALLED", "CARDINALITY", "CASCADED", "CASE", "CAST", "CEIL", "CEILING", "CHAR", "CHAR_LENGTH", + "CHARACTER", "CHARACTER_LENGTH", "CHECK", "CLOB", "CLOSE", "COALESCE", "COLLATE", "COLLECT", "COLUMN", + "COMMIT", "CONDITION", "CONNECT", "CONSTRAINT", "CONTAINS", "CONVERT", "CORR", "CORRESPONDING", "COUNT", + "COVAR_POP", "COVAR_SAMP", "CREATE", "CROSS", "CUBE", "CUME_DIST", "CURRENT", "CURRENT_CATALOG", + "CURRENT_DATE", "CURRENT_DEFAULT_TRANSFORM_GROUP", "CURRENT_PATH", "CURRENT_ROLE", "CURRENT_ROW", + "CURRENT_SCHEMA", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_TRANSFORM_GROUP_FOR_TYPE", "CURRENT_USER", + "CURSOR", "CYCLE", + + "DATE", "DAY", "DEALLOCATE", "DEC", "DECIMAL", "DECLARE", "DEFAULT", "DELETE", "DENSE_RANK", "DEREF", + "DESCRIBE", "DETERMINISTIC", "DISCONNECT", "DISTINCT", "DOUBLE", "DROP", "DYNAMIC", + + "EACH", "ELEMENT", "ELSE", "END", "END_FRAME", "END_PARTITION", "END-EXEC", "EQUALS", "ESCAPE", "EVERY", + "EXCEPT", "EXEC", "EXECUTE", "EXISTS", "EXP", "EXTERNAL", "EXTRACT", + + "FALSE", "FETCH", "FILTER", "FIRST_VALUE", "FLOAT", "FLOOR", "FOR", "FOREIGN", "FRAME_ROW", "FREE", "FROM", + "FULL", "FUNCTION", "FUSION", + + "GET", "GLOBAL", "GRANT", "GROUP", "GROUPING", "GROUPS", + + "HAVING", "HOLD", "HOUR", + + "IDENTITY", "IN", "INDICATOR", "INNER", "INOUT", "INSENSITIVE", "INSERT", "INT", "INTEGER", "INTERSECT", + "INTERSECTION", "INTERVAL", "INTO", "IS", + + "JOIN", + + "LAG", "LANGUAGE", "LARGE", "LAST_VALUE", "LATERAL", "LEAD", "LEADING", "LEFT", "LIKE", "LIKE_REGEX", "LN", + "LOCAL", "LOCALTIME", "LOCALTIMESTAMP", "LOWER", + + "MATCH", "MAX", "MEMBER", "MERGE", "METHOD", "MIN", "MINUTE", "MOD", "MODIFIES", "MODULE", "MONTH", + "MULTISET", + + "NATIONAL", "NATURAL", "NCHAR", "NCLOB", "NEW", "NO", "NONE", "NORMALIZE", "NOT", "NTH_VALUE", "NTILE", + "NULL", "NULLIF", "NUMERIC", + + "OCTET_LENGTH", "OCCURRENCES_REGEX", "OF", "OFFSET", "OLD", "ON", "ONLY", "OPEN", "OR", "ORDER", "OUT", + "OUTER", "OVER", "OVERLAPS", "OVERLAY", + + "PARAMETER", "PARTITION", "PERCENT", "PERCENT_RANK", "PERCENTILE_CONT", "PERCENTILE_DISC", "PERIOD", + "PORTION", "POSITION", "POSITION_REGEX", "POWER", "PRECEDES", "PRECISION", "PREPARE", "PRIMARY", + "PROCEDURE", + + "RANGE", "RANK", "READS", "REAL", "RECURSIVE", "REF", "REFERENCES", "REFERENCING", "REGR_AVGX", // + "REGR_AVGY", "REGR_COUNT", "REGR_INTERCEPT", "REGR_R2", "REGR_SLOPE", "REGR_SXX", "REGR_SXY", "REGR_SYY", + "RELEASE", "RESULT", "RETURN", "RETURNS", "REVOKE", "RIGHT", "ROLLBACK", "ROLLUP", "ROW", "ROW_NUMBER", + "ROWS", + + "SAVEPOINT", "SCOPE", "SCROLL", "SEARCH", "SECOND", "SELECT", "SENSITIVE", "SESSION_USER", "SET", // + "SIMILAR", "SMALLINT", "SOME", "SPECIFIC", "SPECIFICTYPE", "SQL", "SQLEXCEPTION", "SQLSTATE", "SQLWARNING", + "SQRT", "START", "STATIC", "STDDEV_POP", "STDDEV_SAMP", "SUBMULTISET", "SUBSTRING", "SUBSTRING_REGEX", + "SUCCEEDS", "SUM", "SYMMETRIC", "SYSTEM", "SYSTEM_TIME", "SYSTEM_USER", + + "TABLE", "TABLESAMPLE", "THEN", "TIME", "TIMESTAMP", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TO", "TRAILING", + "TRANSLATE", "TRANSLATE_REGEX", "TRANSLATION", "TREAT", "TRIGGER", "TRUNCATE", "TRIM", "TRIM_ARRAY", // + "TRUE", + + "UESCAPE", "UNION", "UNIQUE", "UNKNOWN", "UNNEST", "UPDATE", "UPPER", "USER", "USING", + + "VALUE", "VALUES", "VALUE_OF", "VAR_POP", "VAR_SAMP", "VARBINARY", "VARCHAR", "VARYING", "VERSIONING", + + "WHEN", "WHENEVER", "WHERE", "WIDTH_BUCKET", "WINDOW", "WITH", "WITHIN", "WITHOUT", + + "YEAR", + + }); + + private static final HashSet SQL2016_RESERVED_WORDS = toSet(new String[] { + + "ABS", "ACOS", "ALL", "ALLOCATE", "ALTER", "AND", "ANY", "ARE", "ARRAY", "ARRAY_AGG", + "ARRAY_MAX_CARDINALITY", "AS", "ASENSITIVE", "ASIN", "ASYMMETRIC", "AT", "ATAN", "ATOMIC", "AUTHORIZATION", + "AVG", + + "BEGIN", "BEGIN_FRAME", "BEGIN_PARTITION", "BETWEEN", "BIGINT", "BINARY", "BLOB", "BOOLEAN", "BOTH", "BY", + + "CALL", "CALLED", "CARDINALITY", "CASCADED", "CASE", "CAST", "CEIL", "CEILING", "CHAR", "CHAR_LENGTH", + "CHARACTER", "CHARACTER_LENGTH", "CHECK", "CLASSIFIER", "CLOB", "CLOSE", "COALESCE", "COLLATE", "COLLECT", + "COLUMN", "COMMIT", "CONDITION", "CONNECT", "CONSTRAINT", "CONTAINS", "CONVERT", "COPY", "CORR", + "CORRESPONDING", "COS", "COSH", "COUNT", "COVAR_POP", "COVAR_SAMP", "CREATE", "CROSS", "CUBE", "CUME_DIST", + "CURRENT", "CURRENT_CATALOG", "CURRENT_DATE", "CURRENT_DEFAULT_TRANSFORM_GROUP", "CURRENT_PATH", + "CURRENT_ROLE", "CURRENT_ROW", "CURRENT_SCHEMA", "CURRENT_TIME", "CURRENT_TIMESTAMP", + "CURRENT_TRANSFORM_GROUP_FOR_TYPE", "CURRENT_USER", "CURSOR", "CYCLE", + + "DATE", "DAY", "DEALLOCATE", "DEC", "DECIMAL", "DECFLOAT", "DECLARE", "DEFAULT", "DEFINE", "DELETE", + "DENSE_RANK", "DEREF", "DESCRIBE", "DETERMINISTIC", "DISCONNECT", "DISTINCT", "DOUBLE", "DROP", "DYNAMIC", + + "EACH", "ELEMENT", "ELSE", "EMPTY", "END", "END_FRAME", "END_PARTITION", "END-EXEC", "EQUALS", "ESCAPE", + "EVERY", "EXCEPT", "EXEC", "EXECUTE", "EXISTS", "EXP", "EXTERNAL", "EXTRACT", + + "FALSE", "FETCH", "FILTER", "FIRST_VALUE", "FLOAT", "FLOOR", "FOR", "FOREIGN", "FRAME_ROW", "FREE", "FROM", + "FULL", "FUNCTION", "FUSION", + + "GET", "GLOBAL", "GRANT", "GROUP", "GROUPING", "GROUPS", + + "HAVING", "HOLD", "HOUR", + + "IDENTITY", "IN", "INDICATOR", "INITIAL", "INNER", "INOUT", "INSENSITIVE", "INSERT", "INT", "INTEGER", + "INTERSECT", "INTERSECTION", "INTERVAL", "INTO", "IS", + + "JOIN", "JSON_ARRAY", "JSON_ARRAYAGG", "JSON_EXISTS", "JSON_OBJECT", "JSON_OBJECTAGG", "JSON_QUERY", + "JSON_TABLE", "JSON_TABLE_PRIMITIVE", "JSON_VALUE", + + "LAG", "LANGUAGE", "LARGE", "LAST_VALUE", "LATERAL", "LEAD", "LEADING", "LEFT", "LIKE", "LIKE_REGEX", + "LISTAGG", "LN", "LOCAL", "LOCALTIME", "LOCALTIMESTAMP", "LOG", "LOG10", "LOWER", + + "MATCH", "MATCH_NUMBER", "MATCH_RECOGNIZE", "MATCHES", "MAX", "MEMBER", "MERGE", "METHOD", "MIN", "MINUTE", + "MOD", "MODIFIES", "MODULE", "MONTH", "MULTISET", + + "NATIONAL", "NATURAL", "NCHAR", "NCLOB", "NEW", "NO", "NONE", "NORMALIZE", "NOT", "NTH_VALUE", "NTILE", + "NULL", "NULLIF", "NUMERIC", + + "OCTET_LENGTH", "OCCURRENCES_REGEX", "OF", "OFFSET", "OLD", "OMIT", "ON", "ONE", "ONLY", "OPEN", "OR", + "ORDER", "OUT", "OUTER", "OVER", "OVERLAPS", "OVERLAY", + + "PARAMETER", "PARTITION", "PATTERN", "PER", "PERCENT", "PERCENT_RANK", "PERCENTILE_CONT", // + "PERCENTILE_DISC", "PERIOD", "PORTION", "POSITION", "POSITION_REGEX", "POWER", "PRECEDES", "PRECISION", + "PREPARE", "PRIMARY", "PROCEDURE", "PTF", + + "RANGE", "RANK", "READS", "REAL", "RECURSIVE", "REF", "REFERENCES", "REFERENCING", "REGR_AVGX", // + "REGR_AVGY", "REGR_COUNT", "REGR_INTERCEPT", "REGR_R2", "REGR_SLOPE", "REGR_SXX", "REGR_SXY", "REGR_SYY", + "RELEASE", "RESULT", "RETURN", "RETURNS", "REVOKE", "RIGHT", "ROLLBACK", "ROLLUP", "ROW", "ROW_NUMBER", + "ROWS", "RUNNING", + + "SAVEPOINT", "SCOPE", "SCROLL", "SEARCH", "SECOND", "SEEK", "SELECT", "SENSITIVE", "SESSION_USER", "SET", + "SHOW", "SIMILAR", "SIN", "SINH", "SKIP", "SMALLINT", "SOME", "SPECIFIC", "SPECIFICTYPE", "SQL", + "SQLEXCEPTION", "SQLSTATE", "SQLWARNING", "SQRT", "START", "STATIC", "STDDEV_POP", "STDDEV_SAMP", + "SUBMULTISET", "SUBSET", "SUBSTRING", "SUBSTRING_REGEX", "SUCCEEDS", "SUM", "SYMMETRIC", "SYSTEM", + "SYSTEM_TIME", "SYSTEM_USER", + + "TABLE", "TABLESAMPLE", "TAN", "TANH", "THEN", "TIME", "TIMESTAMP", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", + "TO", "TRAILING", "TRANSLATE", "TRANSLATE_REGEX", "TRANSLATION", "TREAT", "TRIGGER", "TRIM", "TRIM_ARRAY", + "TRUE", "TRUNCATE", + + "UESCAPE", "UNION", "UNIQUE", "UNKNOWN", "UNNEST", "UPDATE", "UPPER", "USER", "USING", + + "VALUE", "VALUES", "VALUE_OF", "VAR_POP", "VAR_SAMP", "VARBINARY", "VARCHAR", "VARYING", "VERSIONING", + + "WHEN", "WHENEVER", "WHERE", "WIDTH_BUCKET", "WINDOW", "WITH", "WITHIN", "WITHOUT", + + "YEAR", + + }); + + private static final HashSet STRICT_MODE_NON_KEYWORDS = toSet(new String[] { "LIMIT", "MINUS", "TOP" }); + + private static final HashSet ALL_RESEVED_WORDS; + + private static final HashMap TOKENS; + + static { + HashSet set = new HashSet<>(1024); + set.addAll(SQL92_RESERVED_WORDS); + set.addAll(SQL1999_RESERVED_WORDS); + set.addAll(SQL2003_RESERVED_WORDS); + set.addAll(SQL2008_RESERVED_WORDS); + set.addAll(SQL2011_RESERVED_WORDS); + set.addAll(SQL2016_RESERVED_WORDS); + ALL_RESEVED_WORDS = set; + HashMap tokens = new HashMap<>(); + processClass(Parser.class, tokens); + processClass(ParserUtil.class, tokens); + processClass(Token.class, tokens); + processClass(Tokenizer.class, tokens); + TOKENS = tokens; } - @Override - public void test() throws Exception { - final HashMap tokens = new HashMap<>(); - ClassReader r = new ClassReader(Parser.class.getResourceAsStream("Parser.class")); - r.accept(new ClassVisitor(Opcodes.ASM7) { + private static void processClass(Class clazz, HashMap tokens) { + ClassReader r; + try { + r = new ClassReader(clazz.getResourceAsStream(clazz.getSimpleName() + ".class")); + } catch (IOException e) { + throw DbException.convert(e); + } + r.accept(new ClassVisitor(Opcodes.ASM8) { @Override - public FieldVisitor visitField(int access, String name, String descriptor, String signature, + public FieldVisitor visitField(int access, String name, String descriptor, String signature, // Object value) { add(value); return null; @@ -59,7 +500,7 @@ public FieldVisitor visitField(int access, String name, String descriptor, Strin @Override public MethodVisitor visitMethod(int access, String name, String descriptor, String signature, String[] exceptions) { - return new MethodVisitor(Opcodes.ASM7) { + return new MethodVisitor(Opcodes.ASM8) { @Override public void visitLdcInsn(Object value) { add(value); @@ -83,7 +524,7 @@ void add(Object value) { } } final TokenType type; - switch (ParserUtil.getSaveTokenType(s, false, 0, l, true)) { + switch (ParserUtil.getTokenType(s, false, true)) { case ParserUtil.IDENTIFIER: type = TokenType.IDENTIFIER; break; @@ -96,11 +537,50 @@ void add(Object value) { tokens.put(s, type); } }, ClassReader.SKIP_DEBUG | ClassReader.SKIP_FRAMES); - try (Connection conn = DriverManager.getConnection("jdbc:h2:mem:keywords")) { + } + + private static HashSet toSet(String[] array) { + HashSet set = new HashSet<>((int) Math.ceil(array.length / .75)); + for (String reservedWord : array) { + if (!set.add(reservedWord)) { + throw new AssertionError(reservedWord); + } + } + return set; + } + + /** + * Run just this test. + * + * @param a + * ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testParser(); + testInformationSchema(); + testMetaData(); + } + + private void testParser() throws Exception { + testParser(false); + testParser(true); + } + + private void testParser(boolean strictMode) throws Exception { + try (Connection conn = DriverManager + .getConnection("jdbc:h2:mem:keywords;MODE=" + (strictMode ? "STRICT" : "REGULAR"))) { Statement stat = conn.createStatement(); - for (Entry entry : tokens.entrySet()) { + for (Entry entry : TOKENS.entrySet()) { String s = entry.getKey(); TokenType type = entry.getValue(); + if (strictMode && STRICT_MODE_NON_KEYWORDS.contains(s)) { + type = TokenType.IDENTIFIER; + } Throwable exception1 = null, exception2 = null; try { stat.execute("CREATE TABLE " + s + '(' + s + " INT)"); @@ -121,6 +601,11 @@ void add(Object value) { assertFalse(rs.next()); assertEquals(s, rs.getMetaData().getColumnLabel(1)); } + try (ResultSet rs = stat.executeQuery("SELECT CASE " + s + " WHEN 10 THEN 1 END FROM " + s)) { + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertFalse(rs.next()); + } stat.execute("DROP TABLE " + s); stat.execute("CREATE TABLE TEST(" + s + " VARCHAR) AS VALUES '-'"); String str; @@ -129,6 +614,32 @@ void add(Object value) { str = rs.getString(1); } stat.execute("DROP TABLE TEST"); + stat.execute("CREATE TABLE TEST(" + s + " INT) AS (VALUES 10)"); + try (ResultSet rs = stat.executeQuery("SELECT " + s + " V FROM TEST")) { + assertTrue(rs.next()); + assertEquals(10, rs.getInt(1)); + } + try (ResultSet rs = stat.executeQuery("SELECT TEST." + s + " FROM TEST")) { + assertTrue(rs.next()); + assertEquals(10, rs.getInt(1)); + } + stat.execute("DROP TABLE TEST"); + stat.execute("CREATE TABLE TEST(" + s + " INT, _VALUE_ INT DEFAULT 1) AS VALUES (2, 2)"); + stat.execute("UPDATE TEST SET _VALUE_ = " + s); + try (ResultSet rs = stat.executeQuery("SELECT _VALUE_ FROM TEST")) { + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + } + stat.execute("DROP TABLE TEST"); + try (ResultSet rs = stat.executeQuery("SELECT 1 DAY " + s)) { + assertEquals(s, rs.getMetaData().getColumnLabel(1)); + assertTrue(rs.next()); + assertEquals(Duration.ofDays(1L), rs.getObject(1, Duration.class)); + } + try (ResultSet rs = stat.executeQuery("SELECT 1 = " + s + " FROM (VALUES 1) T(" + s + ')')) { + rs.next(); + assertTrue(rs.getBoolean(1)); + } try (ResultSet rs = stat .executeQuery("SELECT ROW_NUMBER() OVER(" + s + ") WINDOW " + s + " AS ()")) { } @@ -172,4 +683,72 @@ void add(Object value) { } } + private void testInformationSchema() throws Exception { + try (Connection conn = DriverManager.getConnection("jdbc:h2:mem:")) { + Statement stat = conn.createStatement(); + try (ResultSet rs = stat.executeQuery("SELECT TABLE_NAME, COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS")) { + while (rs.next()) { + String table = rs.getString(1); + if (isKeyword(table) && !table.equals("PARAMETERS")) { + fail("Table INFORMATION_SCHEMA.\"" + table + + "\" uses a keyword or SQL reserved word as its name."); + } + String column = rs.getString(2); + if (isKeyword(column)) { + fail("Column INFORMATION_SCHEMA." + table + ".\"" + column + + "\" uses a keyword or SQL reserved word as its name."); + } + } + } + } + } + + private static boolean isKeyword(String identifier) { + return ALL_RESEVED_WORDS.contains(identifier) || ParserUtil.isKeyword(identifier, false); + } + + @SuppressWarnings("incomplete-switch") + private void testMetaData() throws Exception { + TreeSet set = new TreeSet<>(); + for (Entry entry : TOKENS.entrySet()) { + switch (entry.getValue()) { + case KEYWORD: + case CONTEXT_SENSITIVE_KEYWORD: { + String s = entry.getKey(); + if (!SQL2003_RESERVED_WORDS.contains(s)) { + set.add(s); + } + } + } + } + try (Connection conn = DriverManager.getConnection("jdbc:h2:mem:")) { + assertEquals(setToString(set), conn.getMetaData().getSQLKeywords()); + } + try (Connection conn = DriverManager.getConnection("jdbc:h2:mem:;MODE=STRICT")) { + TreeSet set2 = new TreeSet<>(set); + set2.removeAll(STRICT_MODE_NON_KEYWORDS); + assertEquals(setToString(set2), conn.getMetaData().getSQLKeywords()); + } + set.add("INTERSECTS"); + set.add("SYSDATE"); + set.add("SYSTIME"); + set.add("SYSTIMESTAMP"); + set.add("TODAY"); + try (Connection conn = DriverManager.getConnection("jdbc:h2:mem:;OLD_INFORMATION_SCHEMA=TRUE")) { + assertEquals(setToString(set), conn.getMetaData().getSQLKeywords()); + } + } + + private static String setToString(TreeSet set) { + Iterator i = set.iterator(); + if (i.hasNext()) { + StringBuilder builder = new StringBuilder(i.next()); + while (i.hasNext()) { + builder.append(',').append(i.next()); + } + return builder.toString(); + } + return ""; + } + } diff --git a/h2/src/test/org/h2/test/unit/TestLocalResultFactory.java b/h2/src/test/org/h2/test/unit/TestLocalResultFactory.java deleted file mode 100644 index b8bb89b419..0000000000 --- a/h2/src/test/org/h2/test/unit/TestLocalResultFactory.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.unit; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.Statement; -import java.util.concurrent.atomic.AtomicInteger; -import org.h2.engine.Session; -import org.h2.expression.Expression; -import org.h2.result.LocalResult; -import org.h2.result.LocalResultFactory; -import org.h2.test.TestBase; - -/** - * Test {@link LocalResultFactory} setting. - */ -public class TestLocalResultFactory extends TestBase { - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String[] a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws Exception { - try (Connection conn = DriverManager.getConnection("jdbc:h2:mem:localResultFactory;LOCAL_RESULT_FACTORY=\"" - + MyTestLocalResultFactory.class.getName() + '"')) { - Statement stat = conn.createStatement(); - - stat.execute("create table t1(id int, name varchar)"); - for (int i = 0; i < 1000; i++) { - stat.execute("insert into t1 values(" + i + ", 'name')"); - } - assertEquals(MyTestLocalResultFactory.COUNTER.get(), 0); - - stat.execute("select * from t1"); - assertEquals(MyTestLocalResultFactory.COUNTER.get(), 1); - } - } - - /** - * Test local result factory. - */ - public static class MyTestLocalResultFactory extends LocalResultFactory { - /** Call counter for the factory methods. */ - static final AtomicInteger COUNTER = new AtomicInteger(); - - @Override public LocalResult create(Session session, Expression[] expressions, int visibleColumnCount) { - COUNTER.incrementAndGet(); - return LocalResultFactory.DEFAULT.create(session, expressions, visibleColumnCount); - } - - @Override public LocalResult create() { - COUNTER.incrementAndGet(); - return LocalResultFactory.DEFAULT.create(); - } - } -} diff --git a/h2/src/test/org/h2/test/unit/TestLocale.java b/h2/src/test/org/h2/test/unit/TestLocale.java index d7d867ad4a..0c91b9f6cd 100644 --- a/h2/src/test/org/h2/test/unit/TestLocale.java +++ b/h2/src/test/org/h2/test/unit/TestLocale.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -24,7 +24,7 @@ public class TestLocale extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/unit/TestMVTempResult.java b/h2/src/test/org/h2/test/unit/TestMVTempResult.java index ce908303dc..3dacb86ead 100644 --- a/h2/src/test/org/h2/test/unit/TestMVTempResult.java +++ b/h2/src/test/org/h2/test/unit/TestMVTempResult.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; diff --git a/h2/src/test/org/h2/test/unit/TestMathUtils.java b/h2/src/test/org/h2/test/unit/TestMathUtils.java index df5630e431..80b2e74428 100644 --- a/h2/src/test/org/h2/test/unit/TestMathUtils.java +++ b/h2/src/test/org/h2/test/unit/TestMathUtils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -19,7 +19,7 @@ public class TestMathUtils extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -60,6 +60,10 @@ private void testNextPowerOf2Int() { for (int i = 0; i < testValues.length; i++) { assertEquals(resultValues[i], MathUtils.nextPowerOf2(testValues[i])); } + testValues = new int[] { Integer.MIN_VALUE, -1, largestPower2 + 1, Integer.MAX_VALUE }; + for (int v : testValues) { + assertThrows(IllegalArgumentException.class, () -> MathUtils.nextPowerOf2(v)); + } } } diff --git a/h2/src/test/org/h2/test/unit/TestMemoryEstimator.java b/h2/src/test/org/h2/test/unit/TestMemoryEstimator.java new file mode 100644 index 0000000000..31e0e4dc83 --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestMemoryEstimator.java @@ -0,0 +1,120 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import java.nio.ByteBuffer; +import java.util.Random; +import java.util.concurrent.atomic.AtomicLong; +import org.h2.mvstore.WriteBuffer; +import org.h2.mvstore.type.BasicDataType; +import org.h2.test.TestBase; +import org.h2.util.MemoryEstimator; + +/** + * Class TestMemoryEstimator. + *
            + *
          • 12/7/19 10:38 PM initial creation + *
          + * + * @author Andrei Tokar + */ +public class TestMemoryEstimator extends TestBase { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() { + testEstimator(); + testPageEstimator(); + } + + private void testEstimator() { + Random random = new Random(); + AtomicLong stat = new AtomicLong(); + TestDataType dataType = new TestDataType(); + int sum = 0; + int sum2 = 0; + int err2 = 0; + int size = 10000; + for (int i = 0; i < size; i++) { + int x = (int)Math.abs(100 + random.nextGaussian() * 30); + int y = MemoryEstimator.estimateMemory(stat, dataType, x); + sum += x; + sum2 += x * x; + err2 += (x - y) * (x - y); + } + int avg = sum / size; + double err = Math.sqrt(1.0 * err2 / sum2); + int pct = MemoryEstimator.samplingPct(stat); + String msg = "Avg=" + avg + ", err=" + err + ", pct=" + pct + " " + (dataType.getCount() * 100 / size); + assertTrue(msg, err < 0.3); + assertTrue(msg, pct <= 7); + } + + private void testPageEstimator() { + Random random = new Random(); + AtomicLong stat = new AtomicLong(); + TestDataType dataType = new TestDataType(); + long sum = 0; + long sum2 = 0; + long err2 = 0; + int size = 10000; + int pageSz; + for (int i = 0; i < size; i+=pageSz) { + pageSz = random.nextInt(48) + 1; + Integer[] storage = dataType.createStorage(pageSz); + int x = 0; + for (int k = 0; k < pageSz; k++) { + storage[k] = (int)Math.abs(100 + random.nextGaussian() * 30); + x += storage[k]; + } + int y = MemoryEstimator.estimateMemory(stat, dataType, storage, pageSz); + sum += x; + sum2 += x * x; + err2 += (x - y) * (x - y); + } + long avg = sum / size; + double err = Math.sqrt(1.0 * err2 / sum2); + int pct = MemoryEstimator.samplingPct(stat); + String msg = "Avg=" + avg + ", err=" + err + ", pct=" + pct + " " + (dataType.getCount() * 100 / size); + assertTrue(msg, err < 0.12); + assertTrue(msg, pct <= 4); + } + + private static class TestDataType extends BasicDataType { + private int count; + + TestDataType() { + } + + public int getCount() { + return count; + } + + @Override + public int getMemory(Integer obj) { + ++count; + return obj; + } + + @Override + public void write(WriteBuffer buff, Integer obj) {} + + @Override + public Integer read(ByteBuffer buff) { return null; } + + @Override + public Integer[] createStorage(int size) { return new Integer[size]; } + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestMemoryUnmapper.java b/h2/src/test/org/h2/test/unit/TestMemoryUnmapper.java index 65aa095793..c2d320cb7c 100644 --- a/h2/src/test/org/h2/test/unit/TestMemoryUnmapper.java +++ b/h2/src/test/org/h2/test/unit/TestMemoryUnmapper.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -26,7 +26,7 @@ public class TestMemoryUnmapper extends TestBase { */ public static void main(String... a) throws Exception { if (a.length == 0) { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } else { ByteBuffer buffer = ByteBuffer.allocateDirect(10); System.exit(MemoryUnmapper.unmap(buffer) ? OK : UNAVAILABLE); diff --git a/h2/src/test/org/h2/test/unit/TestMode.java b/h2/src/test/org/h2/test/unit/TestMode.java index e34bd647a1..e8dd8a94fe 100644 --- a/h2/src/test/org/h2/test/unit/TestMode.java +++ b/h2/src/test/org/h2/test/unit/TestMode.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -19,7 +19,7 @@ public class TestMode extends TestBase { * @param a ignored */ public static void main(String[] a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/unit/TestModifyOnWrite.java b/h2/src/test/org/h2/test/unit/TestModifyOnWrite.java deleted file mode 100644 index cf2bffcfe0..0000000000 --- a/h2/src/test/org/h2/test/unit/TestModifyOnWrite.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.unit; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.Statement; - -import org.h2.engine.SysProperties; -import org.h2.store.fs.FileUtils; -import org.h2.test.TestBase; -import org.h2.test.TestDb; -import org.h2.util.IOUtils; -import org.h2.util.Utils; - -/** - * Test that the database file is only modified when writing to the database. - */ -public class TestModifyOnWrite extends TestDb { - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - System.setProperty("h2.modifyOnWrite", "true"); - TestBase.createCaller().init().test(); - } - - @Override - public boolean isEnabled() { - if (!SysProperties.MODIFY_ON_WRITE) { - return false; - } - return true; - } - - @Override - public void test() throws Exception { - deleteDb("modifyOnWrite"); - String dbFile = getBaseDir() + "/modifyOnWrite.h2.db"; - assertFalse(FileUtils.exists(dbFile)); - Connection conn = getConnection("modifyOnWrite"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int)"); - conn.close(); - byte[] test = IOUtils.readBytesAndClose(FileUtils.newInputStream(dbFile), -1); - - conn = getConnection("modifyOnWrite"); - stat = conn.createStatement(); - ResultSet rs; - rs = stat.executeQuery("select * from test"); - assertFalse(rs.next()); - conn.close(); - assertTrue(FileUtils.exists(dbFile)); - byte[] test2 = IOUtils.readBytesAndClose(FileUtils.newInputStream(dbFile), -1); - assertEquals(test, test2); - - conn = getConnection("modifyOnWrite"); - stat = conn.createStatement(); - stat.execute("insert into test values(1)"); - conn.close(); - - conn = getConnection("modifyOnWrite"); - stat = conn.createStatement(); - rs = stat.executeQuery("select * from test"); - assertTrue(rs.next()); - conn.close(); - - test2 = IOUtils.readBytesAndClose(FileUtils.newInputStream(dbFile), -1); - assertFalse(Utils.compareSecure(test, test2)); - } - -} diff --git a/h2/src/test/org/h2/test/unit/TestMultiThreadedKernel.java b/h2/src/test/org/h2/test/unit/TestMultiThreadedKernel.java index 75d94f8036..658bf5dfac 100644 --- a/h2/src/test/org/h2/test/unit/TestMultiThreadedKernel.java +++ b/h2/src/test/org/h2/test/unit/TestMultiThreadedKernel.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -29,7 +29,7 @@ public class TestMultiThreadedKernel extends TestDb implements Runnable { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -47,7 +47,7 @@ public void test() throws Exception { Thread[] list = new Thread[count]; for (int i = 0; i < count; i++) { TestMultiThreadedKernel r = new TestMultiThreadedKernel(); - r.url = getURL("multiThreadedKernel;MULTI_THREADED=1", true); + r.url = getURL("multiThreadedKernel", true); r.user = getUser(); r.password = getPassword(); r.master = this; @@ -70,7 +70,7 @@ public void run() { try { org.h2.Driver.load(); Connection conn = DriverManager.getConnection(url + - ";MULTI_THREADED=1;LOCK_MODE=3;WRITE_DELAY=0", + ";LOCK_MODE=3;WRITE_DELAY=0", user, password); conn.createStatement().execute( "CREATE TABLE TEST" + id + diff --git a/h2/src/test/org/h2/test/unit/TestNetUtils.java b/h2/src/test/org/h2/test/unit/TestNetUtils.java index 19ceac25d0..30bf100159 100644 --- a/h2/src/test/org/h2/test/unit/TestNetUtils.java +++ b/h2/src/test/org/h2/test/unit/TestNetUtils.java @@ -1,11 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Sergi Vladykin */ package org.h2.test.unit; import java.io.IOException; +import java.net.InetAddress; import java.net.ServerSocket; import java.net.Socket; import java.util.HashSet; @@ -20,6 +21,7 @@ import org.h2.test.TestBase; import org.h2.util.NetUtils; import org.h2.util.Task; +import org.h2.util.Utils10; /** * Test the network utilities from {@link NetUtils}. @@ -41,7 +43,7 @@ public class TestNetUtils extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -50,6 +52,8 @@ public void test() throws Exception { testTlsSessionWithServerSideAnonymousDisabled(); testFrequentConnections(true, 100); testFrequentConnections(false, 1000); + testIpToShortForm(); + testTcpQuickack(); } /** @@ -58,7 +62,7 @@ public void test() throws Exception { * (no SSL certificate is needed). */ private void testAnonymousTlsSession() throws Exception { - if (BuildBase.getJavaVersion() >= 11) { + if (config.ci || BuildBase.getJavaVersion() >= 11) { // Issue #1303 return; } @@ -102,6 +106,10 @@ private void testAnonymousTlsSession() throws Exception { * instead, the server socket is altered. */ private void testTlsSessionWithServerSideAnonymousDisabled() throws Exception { + if (config.ci) { + // Issue #1303 + return; + } boolean ssl = true; Task task = null; ServerSocket serverSocket = null; @@ -267,4 +275,60 @@ public Exception getException() { } + private void testIpToShortForm() throws Exception { + testIpToShortForm("1.2.3.4", "1.2.3.4"); + testIpToShortForm("1:2:3:4:a:b:c:d", "1:2:3:4:a:b:c:d"); + testIpToShortForm("::1", "::1"); + testIpToShortForm("1::", "1::"); + testIpToShortForm("c1c1:0:0:2::fffe", "c1c1:0:0:2:0:0:0:fffe"); + } + + private void testIpToShortForm(String expected, String source) throws Exception { + byte[] addr = InetAddress.getByName(source).getAddress(); + testIpToShortForm(expected, addr, false); + if (expected.indexOf(':') >= 0) { + expected = '[' + expected + ']'; + } + testIpToShortForm(expected, addr, true); + } + + private void testIpToShortForm(String expected, byte[] addr, boolean addBrackets) { + assertEquals(expected, NetUtils.ipToShortForm(null, addr, addBrackets).toString()); + assertEquals(expected, NetUtils.ipToShortForm(new StringBuilder(), addr, addBrackets).toString()); + assertEquals(expected, + NetUtils.ipToShortForm(new StringBuilder("*"), addr, addBrackets).deleteCharAt(0).toString()); + } + + private void testTcpQuickack() { + final boolean ssl = !config.ci && BuildBase.getJavaVersion() < 11; + try (ServerSocket serverSocket = NetUtils.createServerSocket(PORT, ssl)) { + Thread thread = new Thread() { + @Override + public void run() { + try (Socket s = NetUtils.createLoopbackSocket(PORT, ssl)) { + s.getInputStream().read(); + } catch (IOException e) { + } + } + }; + thread.start(); + try (Socket socket = serverSocket.accept()) { + boolean supported = Utils10.setTcpQuickack(socket, true); + if (supported) { + assertTrue(Utils10.getTcpQuickack(socket)); + Utils10.setTcpQuickack(socket, false); + assertFalse(Utils10.getTcpQuickack(socket)); + } + socket.getOutputStream().write(1); + } finally { + try { + thread.join(); + } catch (InterruptedException e) { + } + } + } catch (IOException e) { + e.printStackTrace(); + } + } + } diff --git a/h2/src/test/org/h2/test/unit/TestObjectDeserialization.java b/h2/src/test/org/h2/test/unit/TestObjectDeserialization.java index a16b7fc441..47274f014d 100644 --- a/h2/src/test/org/h2/test/unit/TestObjectDeserialization.java +++ b/h2/src/test/org/h2/test/unit/TestObjectDeserialization.java @@ -1,11 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Noah Fontes */ package org.h2.test.unit; -import org.h2.message.DbException; +import org.h2.api.ErrorCode; import org.h2.test.TestBase; import org.h2.util.JdbcUtils; import org.h2.util.StringUtils; @@ -33,7 +33,7 @@ public class TestObjectDeserialization extends TestBase { */ public static void main(String... a) throws Exception { System.setProperty("h2.useThreadContextClassLoader", "true"); - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -44,12 +44,8 @@ public void test() { private void testThreadContextClassLoader() { usesThreadContextClassLoader = false; Thread.currentThread().setContextClassLoader(new TestClassLoader()); - try { - JdbcUtils.deserialize(StringUtils.convertHexToBytes(OBJECT), null); - fail(); - } catch (DbException e) { - // expected - } + assertThrows(ErrorCode.DESERIALIZATION_FAILED_1, + () -> JdbcUtils.deserialize(StringUtils.convertHexToBytes(OBJECT), null)); assertTrue(usesThreadContextClassLoader); } diff --git a/h2/src/test/org/h2/test/unit/TestOldVersion.java b/h2/src/test/org/h2/test/unit/TestOldVersion.java deleted file mode 100644 index ba4d54b83d..0000000000 --- a/h2/src/test/org/h2/test/unit/TestOldVersion.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.unit; - -import java.lang.reflect.Method; -import java.net.URL; -import java.net.URLClassLoader; -import java.sql.Connection; -import java.sql.Driver; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.sql.Types; -import java.util.Properties; -import org.h2.api.ErrorCode; -import org.h2.test.TestBase; -import org.h2.test.TestDb; -import org.h2.tools.Server; - -/** - * Tests the compatibility with older versions - */ -public class TestOldVersion extends TestDb { - - private ClassLoader cl; - private Driver driver; - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public boolean isEnabled() { - if (config.mvStore) { - return false; - } - return true; - } - - @Override - public void test() throws Exception { - cl = getClassLoader("file:ext/h2-1.2.127.jar"); - driver = getDriver(cl); - if (driver == null) { - println("not found: ext/h2-1.2.127.jar - test skipped"); - return; - } - Connection conn = driver.connect("jdbc:h2:mem:", null); - assertEquals("1.2.127 (2010-01-15)", conn.getMetaData() - .getDatabaseProductVersion()); - conn.close(); - testLobInFiles(); - testOldClientNewServer(); - } - - private void testLobInFiles() throws Exception { - deleteDb("oldVersion"); - Connection conn; - Statement stat; - conn = driver.connect("jdbc:h2:" + getBaseDir() + "/oldVersion", null); - stat = conn.createStatement(); - stat.execute("create table test(id int primary key, b blob, c clob)"); - PreparedStatement prep = conn - .prepareStatement("insert into test values(?, ?, ?)"); - prep.setInt(1, 0); - prep.setNull(2, Types.BLOB); - prep.setNull(3, Types.CLOB); - prep.execute(); - prep.setInt(1, 1); - prep.setBytes(2, new byte[0]); - prep.setString(3, ""); - prep.execute(); - prep.setInt(1, 2); - prep.setBytes(2, new byte[5]); - prep.setString(3, "\u1234\u1234\u1234\u1234\u1234"); - prep.execute(); - prep.setInt(1, 3); - prep.setBytes(2, new byte[100000]); - prep.setString(3, new String(new char[100000])); - prep.execute(); - conn.close(); - conn = DriverManager.getConnection("jdbc:h2:" + getBaseDir() + - "/oldVersion", new Properties()); - stat = conn.createStatement(); - checkResult(stat.executeQuery("select * from test order by id")); - stat.execute("create table test2 as select * from test"); - checkResult(stat.executeQuery("select * from test2 order by id")); - stat.execute("delete from test"); - conn.close(); - } - - private void checkResult(ResultSet rs) throws SQLException { - rs.next(); - assertEquals(0, rs.getInt(1)); - assertEquals(null, rs.getBytes(2)); - assertEquals(null, rs.getString(3)); - rs.next(); - assertEquals(1, rs.getInt(1)); - assertEquals(new byte[0], rs.getBytes(2)); - assertEquals("", rs.getString(3)); - rs.next(); - assertEquals(2, rs.getInt(1)); - assertEquals(new byte[5], rs.getBytes(2)); - assertEquals("\u1234\u1234\u1234\u1234\u1234", rs.getString(3)); - rs.next(); - assertEquals(3, rs.getInt(1)); - assertEquals(new byte[100000], rs.getBytes(2)); - assertEquals(new String(new char[100000]), rs.getString(3)); - } - - private void testOldClientNewServer() throws Exception { - Server server = org.h2.tools.Server.createTcpServer(); - server.start(); - int port = server.getPort(); - assertThrows(ErrorCode.DRIVER_VERSION_ERROR_2, driver).connect( - "jdbc:h2:tcp://localhost:" + port + "/mem:test", null); - server.stop(); - - Class serverClass = cl.loadClass("org.h2.tools.Server"); - Method m; - m = serverClass.getMethod("createTcpServer", String[].class); - Object serverOld = m.invoke(null, new Object[] { new String[] { - "-tcpPort", "" + port } }); - m = serverOld.getClass().getMethod("start"); - m.invoke(serverOld); - Connection conn; - conn = org.h2.Driver.load().connect("jdbc:h2:mem:", null); - Statement stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("call 1"); - rs.next(); - assertEquals(1, rs.getInt(1)); - conn.close(); - m = serverOld.getClass().getMethod("stop"); - m.invoke(serverOld); - } - - private static ClassLoader getClassLoader(String jarFile) throws Exception { - URL[] urls = { new URL(jarFile) }; - return new URLClassLoader(urls, null) { - @Override - protected Class loadClass(String name, boolean resolve) throws ClassNotFoundException { - if (name.startsWith("org.h2.")) - return super.loadClass(name, resolve); - return TestOldVersion.class.getClassLoader().loadClass(name); - } - }; - } - - private static Driver getDriver(ClassLoader cl) throws Exception { - Class driverClass; - try { - driverClass = cl.loadClass("org.h2.Driver"); - } catch (ClassNotFoundException e) { - return null; - } - Method m = driverClass.getMethod("load"); - Driver driver = (Driver) m.invoke(null); - return driver; - } - -} diff --git a/h2/src/test/org/h2/test/unit/TestOverflow.java b/h2/src/test/org/h2/test/unit/TestOverflow.java index 99ff38c49b..c23d34c858 100644 --- a/h2/src/test/org/h2/test/unit/TestOverflow.java +++ b/h2/src/test/org/h2/test/unit/TestOverflow.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -11,7 +11,7 @@ import org.h2.test.TestBase; import org.h2.value.Value; -import org.h2.value.ValueString; +import org.h2.value.ValueVarchar; /** * Tests numeric overflow on various data types. @@ -30,15 +30,15 @@ public class TestOverflow extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() { - test(Value.BYTE, Byte.MIN_VALUE, Byte.MAX_VALUE); - test(Value.INT, Integer.MIN_VALUE, Integer.MAX_VALUE); - test(Value.LONG, Long.MIN_VALUE, Long.MAX_VALUE); - test(Value.SHORT, Short.MIN_VALUE, Short.MAX_VALUE); + test(Value.TINYINT, Byte.MIN_VALUE, Byte.MAX_VALUE); + test(Value.INTEGER, Integer.MIN_VALUE, Integer.MAX_VALUE); + test(Value.BIGINT, Long.MIN_VALUE, Long.MAX_VALUE); + test(Value.SMALLINT, Short.MIN_VALUE, Short.MAX_VALUE); } private void test(int type, long minValue, long maxValue) { @@ -124,7 +124,7 @@ private boolean inRange(BigInteger v) { } private void add(long l) { - values.add(ValueString.get("" + l).convertTo(dataType)); + values.add(ValueVarchar.get("" + l).convertTo(dataType)); } } diff --git a/h2/src/test/org/h2/test/unit/TestPageStore.java b/h2/src/test/org/h2/test/unit/TestPageStore.java deleted file mode 100644 index 378ce83760..0000000000 --- a/h2/src/test/org/h2/test/unit/TestPageStore.java +++ /dev/null @@ -1,910 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.unit; - -import java.io.InputStream; -import java.io.InputStreamReader; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Random; -import java.util.Set; -import java.util.TreeSet; -import java.util.concurrent.TimeUnit; - -import org.h2.api.DatabaseEventListener; -import org.h2.api.ErrorCode; -import org.h2.result.Row; -import org.h2.result.RowImpl; -import org.h2.store.Page; -import org.h2.store.fs.FileUtils; -import org.h2.test.TestBase; -import org.h2.test.TestDb; -import org.h2.util.IOUtils; -import org.h2.util.JdbcUtils; - -/** - * Test the page store. - */ -public class TestPageStore extends TestDb { - - /** - * The events log. - */ - static StringBuilder eventBuffer = new StringBuilder(); - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public boolean isEnabled() { - if (config.memory) { - return false; - } - return true; - } - - @Override - public void test() throws Exception { - deleteDb(null); - testDropTempTable(); - testLogLimitFalsePositive(); - testLogLimit(); - testRecoverLobInDatabase(); - testWriteTransactionLogBeforeData(); - testDefrag(); - testInsertReverse(); - testInsertDelete(); - testCheckpoint(); - testDropRecreate(); - testDropAll(); - testCloseTempTable(); - testDuplicateKey(); - testUpdateOverflow(); - testTruncateReconnect(); - testReverseIndex(); - testLargeUpdates(); - testLargeInserts(); - testLargeDatabaseFastOpen(); - testUniqueIndexReopen(); - testLargeRows(); - testRecoverDropIndex(); - testDropPk(); - testCreatePkLater(); - testTruncate(); - testLargeIndex(); - testUniqueIndex(); - testCreateIndexLater(); - testFuzzOperations(); - deleteDb(null); - } - - private void testDropTempTable() throws SQLException { - deleteDb("pageStoreDropTemp"); - Connection c1 = getConnection("pageStoreDropTemp"); - Connection c2 = getConnection("pageStoreDropTemp"); - c1.setAutoCommit(false); - c2.setAutoCommit(false); - Statement s1 = c1.createStatement(); - Statement s2 = c2.createStatement(); - s1.execute("create local temporary table a(id int primary key)"); - s1.execute("insert into a values(1)"); - c1.commit(); - c1.close(); - s2.execute("create table b(id int primary key)"); - s2.execute("insert into b values(1)"); - c2.commit(); - s2.execute("checkpoint sync"); - s2.execute("shutdown immediately"); - try { - c2.close(); - } catch (SQLException e) { - // ignore - } - c1 = getConnection("pageStoreDropTemp"); - c1.close(); - deleteDb("pageStoreDropTemp"); - } - - private void testLogLimit() throws Exception { - if (config.mvStore) { - return; - } - deleteDb("pageStoreLogLimit"); - Connection conn, conn2; - Statement stat, stat2; - String url = "pageStoreLogLimit;TRACE_LEVEL_FILE=2"; - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test(id int primary key)"); - conn.setAutoCommit(false); - stat.execute("insert into test values(1)"); - - conn2 = getConnection(url); - stat2 = conn2.createStatement(); - stat2.execute("create table t2(id identity, name varchar)"); - stat2.execute("set max_log_size 1"); - for (int i = 0; i < 10; i++) { - stat2.execute("insert into t2(name) " + - "select space(100) from system_range(1, 1000)"); - } - InputStream in = FileUtils.newInputStream(getBaseDir() + - "/pageStoreLogLimit.trace.db"); - String s = IOUtils.readStringAndClose(new InputStreamReader(in), -1); - assertContains(s, "Transaction log could not be truncated"); - conn.commit(); - ResultSet rs = stat2.executeQuery("select * from test"); - assertTrue(rs.next()); - conn2.close(); - conn.close(); - } - - private void testLogLimitFalsePositive() throws Exception { - deleteDb("pageStoreLogLimitFalsePositive"); - String url = "pageStoreLogLimitFalsePositive;TRACE_LEVEL_FILE=2"; - Connection conn = getConnection(url); - Statement stat = conn.createStatement(); - stat.execute("set max_log_size 1"); - stat.execute("create table test(x varchar)"); - for (int i = 0; i < 300; ++i) { - stat.execute("insert into test values (space(2000))"); - } - stat.execute("checkpoint"); - InputStream in = FileUtils.newInputStream(getBaseDir() + - "/pageStoreLogLimitFalsePositive.trace.db"); - String s = IOUtils.readStringAndClose(new InputStreamReader(in), -1); - assertFalse(s.indexOf("Transaction log could not be truncated") > 0); - conn.close(); - } - - private void testRecoverLobInDatabase() throws SQLException { - deleteDb("pageStoreRecoverLobInDatabase"); - String url = getURL("pageStoreRecoverLobInDatabase;" + - "CACHE_SIZE=1", true); - Connection conn; - Statement stat; - conn = getConnection(url, getUser(), getPassword()); - stat = conn.createStatement(); - stat.execute("create table test(id int primary key, name clob)"); - stat.execute("create index idx_id on test(id)"); - stat.execute("insert into test " + - "select x, space(1100+x) from system_range(1, 100)"); - Random r = new Random(1); - ArrayList list = new ArrayList<>(10); - for (int i = 0; i < 10; i++) { - Connection conn2 = getConnection(url, getUser(), getPassword()); - list.add(conn2); - Statement stat2 = conn2.createStatement(); - // conn2.setAutoCommit(false); - if (r.nextBoolean()) { - stat2.execute("update test set id = id where id = " + r.nextInt(100)); - } else { - stat2.execute("delete from test where id = " + r.nextInt(100)); - } - } - stat.execute("shutdown immediately"); - JdbcUtils.closeSilently(conn); - for (Connection c : list) { - JdbcUtils.closeSilently(c); - } - conn = getConnection(url, getUser(), getPassword()); - conn.close(); - } - - private void testWriteTransactionLogBeforeData() throws SQLException { - deleteDb("pageStoreWriteTransactionLogBeforeData"); - String url = getURL("pageStoreWriteTransactionLogBeforeData;" + - "CACHE_SIZE=16;WRITE_DELAY=1000000", true); - Connection conn; - Statement stat; - conn = getConnection(url, getUser(), getPassword()); - stat = conn.createStatement(); - stat.execute("create table test(name varchar) as select space(100000)"); - for (int i = 0; i < 100; i++) { - stat.execute("create table test" + i + "(id int) " + - "as select x from system_range(1, 1000)"); - } - conn.close(); - conn = getConnection(url, getUser(), getPassword()); - stat = conn.createStatement(); - stat.execute("drop table test0"); - stat.execute("select * from test"); - stat.execute("shutdown immediately"); - try { - conn.close(); - } catch (Exception e) { - // ignore - } - conn = getConnection(url, getUser(), getPassword()); - stat = conn.createStatement(); - for (int i = 1; i < 100; i++) { - stat.execute("select * from test" + i); - } - conn.close(); - } - - private void testDefrag() throws SQLException { - if (config.reopen || config.multiThreaded) { - return; - } - deleteDb("pageStoreDefrag"); - Connection conn = getConnection( - "pageStoreDefrag;LOG=0;UNDO_LOG=0;LOCK_MODE=0"); - Statement stat = conn.createStatement(); - int tableCount = 10; - int rowCount = getSize(1000, 100000); - for (int i = 0; i < tableCount; i++) { - stat.execute("create table test" + i + "(id int primary key, " + - "string1 varchar, string2 varchar, string3 varchar)"); - } - for (int j = 0; j < tableCount; j++) { - PreparedStatement prep = conn.prepareStatement( - "insert into test" + j + " values(?, ?, ?, ?)"); - for (int i = 0; i < rowCount; i++) { - prep.setInt(1, i); - prep.setInt(2, i); - prep.setInt(3, i); - prep.setInt(4, i); - prep.execute(); - } - } - stat.executeUpdate("shutdown defrag"); - conn.close(); - } - - private void testInsertReverse() throws SQLException { - deleteDb("pageStoreInsertReverse"); - Connection conn; - conn = getConnection("pageStoreInsertReverse"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int primary key, data varchar)"); - stat.execute("insert into test select -x, space(100) " + - "from system_range(1, 1000)"); - stat.execute("drop table test"); - stat.execute("create table test(id int primary key, data varchar)"); - stat.execute("insert into test select -x, space(2048) " + - "from system_range(1, 1000)"); - conn.close(); - } - - private void testInsertDelete() { - Row[] x = new Row[0]; - Row r = new RowImpl(null, 0); - x = Page.insert(x, 0, 0, r); - assertTrue(x[0] == r); - Row r2 = new RowImpl(null, 0); - x = Page.insert(x, 1, 0, r2); - assertTrue(x[0] == r2); - assertTrue(x[1] == r); - Row r3 = new RowImpl(null, 0); - x = Page.insert(x, 2, 1, r3); - assertTrue(x[0] == r2); - assertTrue(x[1] == r3); - assertTrue(x[2] == r); - - x = Page.remove(x, 3, 1); - assertTrue(x[0] == r2); - assertTrue(x[1] == r); - x = Page.remove(x, 2, 0); - assertTrue(x[0] == r); - x = Page.remove(x, 1, 0); - } - - private void testCheckpoint() throws SQLException { - deleteDb("pageStoreCheckpoint"); - Connection conn; - conn = getConnection("pageStoreCheckpoint"); - Statement stat = conn.createStatement(); - stat.execute("create table test(data varchar)"); - stat.execute("create sequence seq"); - stat.execute("set max_log_size 1"); - conn.setAutoCommit(false); - stat.execute("insert into test select space(1000) from system_range(1, 1000)"); - long before = System.nanoTime(); - stat.execute("select nextval('SEQ') from system_range(1, 100000)"); - long after = System.nanoTime(); - // it's hard to test - basically it shouldn't checkpoint too often - if (after - before > TimeUnit.SECONDS.toNanos(20)) { - if (!config.reopen) { - fail("Checkpoint took " + TimeUnit.NANOSECONDS.toMillis(after - before) + " ms"); - } - } - stat.execute("drop table test"); - stat.execute("drop sequence seq"); - conn.close(); - } - - private void testDropRecreate() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreDropRecreate"); - Connection conn; - conn = getConnection("pageStoreDropRecreate"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int)"); - stat.execute("create index idx_test on test(id)"); - stat.execute("create table test2(id int)"); - stat.execute("drop table test"); - // this will re-used the object id of the test table, - // which is lower than the object id of test2 - stat.execute("create index idx_test on test2(id)"); - conn.close(); - conn = getConnection("pageStoreDropRecreate"); - conn.close(); - } - - private void testDropAll() throws SQLException { - deleteDb("pageStoreDropAll"); - Connection conn; - String url = "pageStoreDropAll"; - conn = getConnection(url); - Statement stat = conn.createStatement(); - stat.execute("CREATE TEMP TABLE A(A INT)"); - stat.execute("CREATE TABLE B(A VARCHAR IDENTITY)"); - stat.execute("CREATE TEMP TABLE C(A INT)"); - conn.close(); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("DROP ALL OBJECTS"); - conn.close(); - } - - private void testCloseTempTable() throws SQLException { - deleteDb("pageStoreCloseTempTable"); - Connection conn; - String url = "pageStoreCloseTempTable;CACHE_SIZE=0"; - conn = getConnection(url); - Statement stat = conn.createStatement(); - stat.execute("create local temporary table test(id int)"); - conn.rollback(); - Connection conn2 = getConnection(url); - Statement stat2 = conn2.createStatement(); - stat2.execute("create table test2 as select x from system_range(1, 5000)"); - stat2.execute("shutdown immediately"); - assertThrows(ErrorCode.DATABASE_IS_CLOSED, conn).close(); - assertThrows(ErrorCode.DATABASE_IS_CLOSED, conn2).close(); - } - - private void testDuplicateKey() throws SQLException { - deleteDb("pageStoreDuplicateKey"); - Connection conn; - conn = getConnection("pageStoreDuplicateKey"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int primary key, name varchar)"); - stat.execute("insert into test values(0, space(3000))"); - try { - stat.execute("insert into test values(0, space(3000))"); - } catch (SQLException e) { - // ignore - } - stat.execute("select * from test"); - conn.close(); - } - - private void testTruncateReconnect() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreTruncateReconnect"); - Connection conn; - conn = getConnection("pageStoreTruncateReconnect"); - conn.createStatement().execute( - "create table test(id int primary key, name varchar)"); - conn.createStatement().execute( - "insert into test(id) select x from system_range(1, 390)"); - conn.createStatement().execute("checkpoint"); - conn.createStatement().execute("shutdown immediately"); - JdbcUtils.closeSilently(conn); - conn = getConnection("pageStoreTruncateReconnect"); - conn.createStatement().execute("truncate table test"); - conn.createStatement().execute( - "insert into test(id) select x from system_range(1, 390)"); - conn.createStatement().execute("shutdown immediately"); - JdbcUtils.closeSilently(conn); - conn = getConnection("pageStoreTruncateReconnect"); - conn.close(); - } - - private void testUpdateOverflow() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreUpdateOverflow"); - Connection conn; - conn = getConnection("pageStoreUpdateOverflow"); - conn.createStatement().execute("create table test" + - "(id int primary key, name varchar)"); - conn.createStatement().execute( - "insert into test values(0, space(3000))"); - conn.createStatement().execute("checkpoint"); - conn.createStatement().execute("shutdown immediately"); - - JdbcUtils.closeSilently(conn); - conn = getConnection("pageStoreUpdateOverflow"); - conn.createStatement().execute("update test set id = 1"); - conn.createStatement().execute("shutdown immediately"); - - JdbcUtils.closeSilently(conn); - conn = getConnection("pageStoreUpdateOverflow"); - conn.close(); - } - - private void testReverseIndex() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreReverseIndex"); - Connection conn = getConnection("pageStoreReverseIndex"); - Statement stat = conn.createStatement(); - stat.execute("create table test(x int, y varchar default space(200))"); - for (int i = 30; i < 100; i++) { - stat.execute("insert into test(x) select null from system_range(1, " + i + ")"); - stat.execute("insert into test(x) select x from system_range(1, " + i + ")"); - stat.execute("create index idx on test(x desc, y)"); - ResultSet rs = stat.executeQuery("select min(x) from test"); - rs.next(); - assertEquals(1, rs.getInt(1)); - stat.execute("drop index idx"); - stat.execute("truncate table test"); - } - conn.close(); - } - - private void testLargeUpdates() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreLargeUpdates"); - Connection conn; - conn = getConnection("pageStoreLargeUpdates"); - Statement stat = conn.createStatement(); - int size = 1500; - stat.execute("call rand(1)"); - stat.execute( - "create table test(id int primary key, data varchar, test int) as " + - "select x, '', 123 from system_range(1, " + size + ")"); - Random random = new Random(1); - PreparedStatement prep = conn.prepareStatement( - "update test set data=space(?) where id=?"); - for (int i = 0; i < 2500; i++) { - int id = random.nextInt(size); - int newSize = random.nextInt(6000); - prep.setInt(1, newSize); - prep.setInt(2, id); - prep.execute(); - } - conn.close(); - conn = getConnection("pageStoreLargeUpdates"); - stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("select * from test where test<>123"); - assertFalse(rs.next()); - conn.close(); - } - - private void testLargeInserts() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreLargeInserts"); - Connection conn; - conn = getConnection("pageStoreLargeInserts"); - Statement stat = conn.createStatement(); - stat.execute("create table test(data varchar)"); - stat.execute("insert into test values(space(1024 * 1024))"); - stat.execute("insert into test values(space(1024 * 1024))"); - conn.close(); - } - - private void testLargeDatabaseFastOpen() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreLargeDatabaseFastOpen"); - Connection conn; - String url = "pageStoreLargeDatabaseFastOpen"; - conn = getConnection(url); - conn.createStatement().execute( - "CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR)"); - conn.createStatement().execute( - "create unique index idx_test_name on test(name)"); - conn.createStatement().execute( - "INSERT INTO TEST " + - "SELECT X, X || space(10) FROM SYSTEM_RANGE(1, 1000)"); - conn.close(); - conn = getConnection(url); - conn.createStatement().execute("DELETE FROM TEST WHERE ID=1"); - conn.createStatement().execute("CHECKPOINT"); - conn.createStatement().execute("SHUTDOWN IMMEDIATELY"); - try { - conn.close(); - } catch (SQLException e) { - // ignore - } - eventBuffer.setLength(0); - conn = getConnection(url + ";DATABASE_EVENT_LISTENER='" + - MyDatabaseEventListener.class.getName() + "'"); - assertEquals("init;opened;", eventBuffer.toString()); - conn.close(); - } - - private void testUniqueIndexReopen() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreUniqueIndexReopen"); - Connection conn; - String url = "pageStoreUniqueIndexReopen"; - conn = getConnection(url); - conn.createStatement().execute( - "CREATE TABLE test(ID INT PRIMARY KEY, NAME VARCHAR(255))"); - conn.createStatement().execute( - "create unique index idx_test_name on test(name)"); - conn.createStatement().execute("INSERT INTO TEST VALUES(1, 'Hello')"); - conn.close(); - conn = getConnection(url); - assertThrows(ErrorCode.DUPLICATE_KEY_1, conn.createStatement()) - .execute("INSERT INTO TEST VALUES(2, 'Hello')"); - conn.close(); - } - - private void testLargeRows() throws Exception { - if (config.memory) { - return; - } - for (int i = 0; i < 10; i++) { - testLargeRows(i); - } - } - - private void testLargeRows(int seed) throws Exception { - deleteDb("pageStoreLargeRows"); - String url = getURL("pageStoreLargeRows;CACHE_SIZE=16", true); - Connection conn = null; - Statement stat = null; - int count = 0; - try { - Class.forName("org.h2.Driver"); - conn = DriverManager.getConnection(url); - stat = conn.createStatement(); - int tableCount = 1; - PreparedStatement[] insert = new PreparedStatement[tableCount]; - PreparedStatement[] deleteMany = new PreparedStatement[tableCount]; - PreparedStatement[] updateMany = new PreparedStatement[tableCount]; - for (int i = 0; i < tableCount; i++) { - stat.execute("create table test" + i + - "(id int primary key, name varchar)"); - stat.execute("create index idx_test" + i + " on test" + i + - "(name)"); - insert[i] = conn.prepareStatement("insert into test" + i + - " values(?, ? || space(?))"); - deleteMany[i] = conn.prepareStatement("delete from test" + i + - " where id between ? and ?"); - updateMany[i] = conn.prepareStatement("update test" + i + - " set name=? || space(?) where id between ? and ?"); - } - Random random = new Random(seed); - for (int i = 0; i < 1000; i++) { - count = i; - PreparedStatement p; - if (random.nextInt(100) < 95) { - p = insert[random.nextInt(tableCount)]; - p.setInt(1, i); - p.setInt(2, i); - if (random.nextInt(30) == 5) { - p.setInt(3, 3000); - } else { - p.setInt(3, random.nextInt(100)); - } - p.execute(); - } else if (random.nextInt(100) < 90) { - p = updateMany[random.nextInt(tableCount)]; - p.setInt(1, i); - p.setInt(2, random.nextInt(50)); - int first = random.nextInt(1 + i); - p.setInt(3, first); - p.setInt(4, first + random.nextInt(50)); - p.executeUpdate(); - } else { - p = deleteMany[random.nextInt(tableCount)]; - int first = random.nextInt(1 + i); - p.setInt(1, first); - p.setInt(2, first + random.nextInt(100)); - p.executeUpdate(); - } - } - conn.close(); - conn = DriverManager.getConnection(url); - conn.close(); - conn = DriverManager.getConnection(url); - stat = conn.createStatement(); - stat.execute("script to '" + getBaseDir() + "/pageStoreLargeRows.sql'"); - conn.close(); - FileUtils.delete(getBaseDir() + "/pageStoreLargeRows.sql"); - } catch (Exception e) { - if (stat != null) { - try { - stat.execute("shutdown immediately"); - } catch (SQLException e2) { - // ignore - } - } - if (conn != null) { - try { - conn.close(); - } catch (SQLException e2) { - // ignore - } - } - throw new RuntimeException("count: " + count, e); - } - } - - private void testRecoverDropIndex() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreRecoverDropIndex"); - Connection conn = getConnection("pageStoreRecoverDropIndex"); - Statement stat = conn.createStatement(); - stat.execute("set write_delay 0"); - stat.execute("create table test(id int, name varchar) " + - "as select x, x from system_range(1, 1400)"); - stat.execute("create index idx_name on test(name)"); - conn.close(); - conn = getConnection("pageStoreRecoverDropIndex"); - stat = conn.createStatement(); - stat.execute("drop index idx_name"); - stat.execute("shutdown immediately"); - try { - conn.close(); - } catch (SQLException e) { - // ignore - } - conn = getConnection("pageStoreRecoverDropIndex;cache_size=1"); - conn.close(); - } - - private void testDropPk() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreDropPk"); - Connection conn; - Statement stat; - conn = getConnection("pageStoreDropPk"); - stat = conn.createStatement(); - stat.execute("create table test(id int primary key)"); - stat.execute("insert into test values(" + Integer.MIN_VALUE + "), (" + - Integer.MAX_VALUE + ")"); - stat.execute("alter table test drop primary key"); - conn.close(); - conn = getConnection("pageStoreDropPk"); - stat = conn.createStatement(); - stat.execute("insert into test values(" + Integer.MIN_VALUE + "), (" + - Integer.MAX_VALUE + ")"); - conn.close(); - } - - private void testCreatePkLater() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreCreatePkLater"); - Connection conn; - Statement stat; - conn = getConnection("pageStoreCreatePkLater"); - stat = conn.createStatement(); - stat.execute("create table test(id int not null) as select 100"); - stat.execute("create primary key on test(id)"); - conn.close(); - conn = getConnection("pageStoreCreatePkLater"); - stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("select * from test where id = 100"); - assertTrue(rs.next()); - conn.close(); - } - - private void testTruncate() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreTruncate"); - Connection conn = getConnection("pageStoreTruncate"); - Statement stat = conn.createStatement(); - stat.execute("set write_delay 0"); - stat.execute("create table test(id int) as select 1"); - stat.execute("truncate table test"); - stat.execute("insert into test values(1)"); - stat.execute("shutdown immediately"); - try { - conn.close(); - } catch (SQLException e) { - // ignore - } - conn = getConnection("pageStoreTruncate"); - conn.close(); - } - - private void testLargeIndex() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreLargeIndex"); - Connection conn = getConnection("pageStoreLargeIndex"); - conn.createStatement().execute( - "create table test(id varchar primary key, d varchar)"); - PreparedStatement prep = conn.prepareStatement( - "insert into test values(?, space(500))"); - for (int i = 0; i < 20000; i++) { - prep.setString(1, "" + i); - prep.executeUpdate(); - } - conn.close(); - } - - private void testUniqueIndex() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreUniqueIndex"); - Connection conn = getConnection("pageStoreUniqueIndex"); - Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE TEST(ID INT UNIQUE)"); - stat.execute("INSERT INTO TEST VALUES(1)"); - conn.close(); - conn = getConnection("pageStoreUniqueIndex"); - assertThrows(ErrorCode.DUPLICATE_KEY_1, - conn.createStatement()).execute("INSERT INTO TEST VALUES(1)"); - conn.close(); - } - - private void testCreateIndexLater() throws SQLException { - deleteDb("pageStoreCreateIndexLater"); - Connection conn = getConnection("pageStoreCreateIndexLater"); - Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE TEST(NAME VARCHAR) AS SELECT 1"); - stat.execute("CREATE INDEX IDX_N ON TEST(NAME)"); - stat.execute("INSERT INTO TEST SELECT X FROM SYSTEM_RANGE(20, 100)"); - stat.execute("INSERT INTO TEST SELECT X FROM SYSTEM_RANGE(1000, 1100)"); - stat.execute("SHUTDOWN IMMEDIATELY"); - assertThrows(ErrorCode.DATABASE_IS_CLOSED, conn).close(); - conn = getConnection("pageStoreCreateIndexLater"); - conn.close(); - } - - private void testFuzzOperations() throws Exception { - int best = Integer.MAX_VALUE; - for (int i = 0; i < 10; i++) { - int x = testFuzzOperationsSeed(i, 10); - if (x >= 0 && x < best) { - best = x; - fail("op:" + x + " seed:" + i); - } - } - } - - private int testFuzzOperationsSeed(int seed, int len) throws SQLException { - deleteDb("pageStoreFuzz"); - Connection conn = getConnection("pageStoreFuzz"); - Statement stat = conn.createStatement(); - log("DROP TABLE IF EXISTS TEST;"); - stat.execute("DROP TABLE IF EXISTS TEST"); - log("CREATE TABLE TEST(ID INT PRIMARY KEY, " + - "NAME VARCHAR DEFAULT 'Hello World');"); - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, " + - "NAME VARCHAR DEFAULT 'Hello World')"); - Set rows = new TreeSet<>(); - Random random = new Random(seed); - for (int i = 0; i < len; i++) { - int op = random.nextInt(3); - Integer x = random.nextInt(100); - switch (op) { - case 0: - if (!rows.contains(x)) { - log("insert into test(id) values(" + x + ");"); - stat.execute("INSERT INTO TEST(ID) VALUES(" + x + ");"); - rows.add(x); - } - break; - case 1: - if (rows.contains(x)) { - log("delete from test where id=" + x + ";"); - stat.execute("DELETE FROM TEST WHERE ID=" + x); - rows.remove(x); - } - break; - case 2: - conn.close(); - conn = getConnection("pageStoreFuzz"); - stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); - log("--reconnect"); - for (int test : rows) { - if (!rs.next()) { - log("error: expected next"); - conn.close(); - return i; - } - int y = rs.getInt(1); - // System.out.println(" " + x); - if (y != test) { - log("error: " + y + " <> " + test); - conn.close(); - return i; - } - } - if (rs.next()) { - log("error: unexpected next"); - conn.close(); - return i; - } - } - } - conn.close(); - return -1; - } - - private void log(String m) { - trace(" " + m); - } - - /** - * A database event listener used in this test. - */ - public static final class MyDatabaseEventListener implements - DatabaseEventListener { - - @Override - public void closingDatabase() { - event("closing"); - } - - @Override - public void exceptionThrown(SQLException e, String sql) { - event("exceptionThrown " + e + " " + sql); - } - - @Override - public void init(String url) { - event("init"); - } - - @Override - public void opened() { - event("opened"); - } - - @Override - public void setProgress(int state, String name, int x, int max) { - if (name.startsWith("SYS:SYS_ID")) { - // ignore - return; - } - switch (state) { - case DatabaseEventListener.STATE_STATEMENT_START: - case DatabaseEventListener.STATE_STATEMENT_END: - case DatabaseEventListener.STATE_STATEMENT_PROGRESS: - return; - } - event("setProgress " + state + " " + name + " " + x + " " + max); - } - - private static void event(String s) { - eventBuffer.append(s).append(';'); - } - } -} diff --git a/h2/src/test/org/h2/test/unit/TestPageStoreCoverage.java b/h2/src/test/org/h2/test/unit/TestPageStoreCoverage.java index a9f745f252..6cbf7a5791 100644 --- a/h2/src/test/org/h2/test/unit/TestPageStoreCoverage.java +++ b/h2/src/test/org/h2/test/unit/TestPageStoreCoverage.java @@ -1,18 +1,14 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; -import java.nio.channels.FileChannel; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; - -import org.h2.api.ErrorCode; -import org.h2.engine.Constants; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; import org.h2.test.TestDb; @@ -32,7 +28,7 @@ public class TestPageStoreCoverage extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -50,7 +46,6 @@ public void test() throws Exception { testMoveRoot(); testBasic(); testReadOnly(); - testIncompleteCreate(); testBackupRestore(); testTrim(); testLongTransaction(); @@ -101,55 +96,54 @@ private void testMoveRoot() throws SQLException { } private void testRecoverTemp() throws SQLException { - Connection conn; - conn = getConnection(URL); - Statement stat = conn.createStatement(); - stat.execute("create cached temporary table test(id identity, name varchar)"); - stat.execute("create index idx_test_name on test(name)"); - stat.execute("create index idx_test_name2 on test(name, id)"); - stat.execute("create table test2(id identity, name varchar)"); - stat.execute("create index idx_test2_name on test2(name desc)"); - stat.execute("create index idx_test2_name2 on test2(name, id)"); - stat.execute("insert into test2 " + - "select null, space(10) from system_range(1, 10)"); - stat.execute("create table test3(id identity, name varchar)"); - stat.execute("checkpoint"); - conn.setAutoCommit(false); - stat.execute("create table test4(id identity, name varchar)"); - stat.execute("create index idx_test4_name2 on test(name, id)"); - stat.execute("insert into test " + - "select null, space(10) from system_range(1, 10)"); - stat.execute("insert into test3 " + - "select null, space(10) from system_range(1, 10)"); - stat.execute("insert into test4 " + - "select null, space(10) from system_range(1, 10)"); - stat.execute("truncate table test2"); - stat.execute("drop index idx_test_name"); - stat.execute("drop index idx_test2_name"); - stat.execute("drop table test2"); - stat.execute("insert into test " + - "select null, space(10) from system_range(1, 10)"); - stat.execute("shutdown immediately"); - assertThrows(ErrorCode.DATABASE_IS_CLOSED, conn).close(); - conn = getConnection(URL); - stat = conn.createStatement(); - stat.execute("drop all objects"); - // re-allocate index root pages - for (int i = 0; i < 10; i++) { - stat.execute("create table test" + i + "(id identity, name varchar)"); + try (Connection conn = getConnection(URL)) { + Statement stat = conn.createStatement(); + stat.execute("create cached temporary table test(id identity, name varchar)"); + stat.execute("create index idx_test_name on test(name)"); + stat.execute("create index idx_test_name2 on test(name, id)"); + stat.execute("create table test2(id identity, name varchar)"); + stat.execute("create index idx_test2_name on test2(name desc)"); + stat.execute("create index idx_test2_name2 on test2(name, id)"); + stat.execute("insert into test2(name) " + + "select space(10) from system_range(1, 10)"); + stat.execute("create table test3(id identity, name varchar)"); + stat.execute("checkpoint"); + conn.setAutoCommit(false); + stat.execute("create table test4(id identity, name varchar)"); + stat.execute("create index idx_test4_name2 on test(name, id)"); + stat.execute("insert into test(name) " + + "select space(10) from system_range(1, 10)"); + stat.execute("insert into test3(name) " + + "select space(10) from system_range(1, 10)"); + stat.execute("insert into test4(name) " + + "select space(10) from system_range(1, 10)"); + stat.execute("truncate table test2"); + stat.execute("drop index idx_test_name"); + stat.execute("drop index idx_test2_name"); + stat.execute("drop table test2"); + stat.execute("insert into test(name) " + + "select space(10) from system_range(1, 10)"); + stat.execute("shutdown immediately"); } - stat.execute("checkpoint"); - for (int i = 0; i < 10; i++) { - stat.execute("drop table test" + i); + try (Connection conn = getConnection(URL)) { + Statement stat = conn.createStatement(); + stat.execute("drop all objects"); + // re-allocate index root pages + for (int i = 0; i < 10; i++) { + stat.execute("create table test" + i + "(id identity, name varchar)"); + } + stat.execute("checkpoint"); + for (int i = 0; i < 10; i++) { + stat.execute("drop table test" + i); + } + for (int i = 0; i < 10; i++) { + stat.execute("create table test" + i + "(id identity, name varchar)"); + } + stat.execute("shutdown immediately"); } - for (int i = 0; i < 10; i++) { - stat.execute("create table test" + i + "(id identity, name varchar)"); + try (Connection conn = getConnection(URL)) { + conn.createStatement().execute("drop all objects"); } - stat.execute("shutdown immediately"); - assertThrows(ErrorCode.DATABASE_IS_CLOSED, conn).close(); - conn = getConnection(URL); - conn.createStatement().execute("drop all objects"); - conn.close(); } private void testLongTransaction() throws SQLException { @@ -158,8 +152,8 @@ private void testLongTransaction() throws SQLException { Statement stat = conn.createStatement(); stat.execute("create table test(id identity, name varchar)"); conn.setAutoCommit(false); - stat.execute("insert into test " + - "select null, space(10) from system_range(1, 10)"); + stat.execute("insert into test(name) " + + "select space(10) from system_range(1, 10)"); Connection conn2; conn2 = getConnection(URL); Statement stat2 = conn2.createStatement(); @@ -167,8 +161,8 @@ private void testLongTransaction() throws SQLException { // large transaction stat2.execute("create table test2(id identity, name varchar)"); stat2.execute("create index idx_test2_name on test2(name)"); - stat2.execute("insert into test2 " + - "select null, x || space(10000) from system_range(1, 100)"); + stat2.execute("insert into test2(name) " + + "select x || space(10000) from system_range(1, 100)"); stat2.execute("drop table test2"); conn2.close(); stat.execute("drop table test"); @@ -246,25 +240,4 @@ private void testBackupRestore() throws Exception { deleteDb("pageStore2"); } - private void testIncompleteCreate() throws Exception { - deleteDb("pageStoreCoverage"); - Connection conn; - String fileName = getBaseDir() + "/pageStore" + Constants.SUFFIX_PAGE_FILE; - conn = getConnection("pageStoreCoverage"); - Statement stat = conn.createStatement(); - stat.execute("drop table if exists INFORMATION_SCHEMA.LOB_DATA"); - stat.execute("drop table if exists INFORMATION_SCHEMA.LOB_MAP"); - conn.close(); - FileChannel f = FileUtils.open(fileName, "rw"); - // create a new database - conn = getConnection("pageStoreCoverage"); - conn.close(); - f = FileUtils.open(fileName, "rw"); - f.truncate(16); - // create a new database - conn = getConnection("pageStoreCoverage"); - conn.close(); - deleteDb("pageStoreCoverage"); - } - } diff --git a/h2/src/test/org/h2/test/unit/TestPattern.java b/h2/src/test/org/h2/test/unit/TestPattern.java index ffc1641368..4a56deb722 100644 --- a/h2/src/test/org/h2/test/unit/TestPattern.java +++ b/h2/src/test/org/h2/test/unit/TestPattern.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -21,7 +21,7 @@ public class TestPattern extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -44,7 +44,7 @@ private void testCompareModeReuse() { private void testPattern() { CompareMode mode = CompareMode.getInstance(null, 0); - CompareLike comp = new CompareLike(mode, "\\", null, null, null, false); + CompareLike comp = new CompareLike(mode, "\\", null, false, false, null, null, CompareLike.LikeType.LIKE); test(comp, "B", "%_"); test(comp, "A", "A%"); test(comp, "A", "A%%"); @@ -99,7 +99,7 @@ private String initPatternRegexp(String pattern, char escape) { for (int i = 0; i < len; i++) { char c = pattern.charAt(i); if (escape == c) { - if (i >= len) { + if (i >= len - 1) { fail("escape can't be last char"); } c = pattern.charAt(++i); diff --git a/h2/src/test/org/h2/test/unit/TestPerfectHash.java b/h2/src/test/org/h2/test/unit/TestPerfectHash.java index 04d1a53cf6..bc8cac777c 100644 --- a/h2/src/test/org/h2/test/unit/TestPerfectHash.java +++ b/h2/src/test/org/h2/test/unit/TestPerfectHash.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -56,14 +56,7 @@ private static void largeFile(String s) throws IOException { RandomAccessFile f = new RandomAccessFile(fileName, "r"); byte[] data = new byte[(int) f.length()]; f.readFully(data); - UniversalHash hf = new UniversalHash() { - - @Override - public int hashCode(Text o, int index, int seed) { - return o.hashCode(index, seed); - } - - }; + UniversalHash hf = Text::hashCode; f.close(); HashSet set = new HashSet<>(); Text t = new Text(data, 0); @@ -149,16 +142,11 @@ private void testBrokenHashFunction() { } for (int test = 1; test < 10; test++) { final int badUntilLevel = test; - UniversalHash badHash = new UniversalHash() { - - @Override - public int hashCode(String o, int index, int seed) { - if (index < badUntilLevel) { - return 0; - } - return StringHash.getFastHash(o, index, seed); + UniversalHash badHash = (o, index, seed) -> { + if (index < badUntilLevel) { + return 0; } - + return StringHash.getFastHash(o, index, seed); }; byte[] desc = MinimalPerfectHash.generate(set, badHash); testMinimal(desc, set, badHash); diff --git a/h2/src/test/org/h2/test/unit/TestPgServer.java b/h2/src/test/org/h2/test/unit/TestPgServer.java index 824c8e6ac4..4a0a4741d7 100644 --- a/h2/src/test/org/h2/test/unit/TestPgServer.java +++ b/h2/src/test/org/h2/test/unit/TestPgServer.java @@ -1,10 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; +import java.lang.reflect.Field; import java.math.BigDecimal; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -20,15 +21,19 @@ import java.sql.Timestamp; import java.sql.Types; import java.util.Properties; -import java.util.concurrent.Callable; +import java.util.Set; +import java.util.TimeZone; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; + import org.h2.api.ErrorCode; +import org.h2.server.pg.PgServer; import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.tools.Server; +import org.h2.util.DateTimeUtils; /** * Tests the PostgreSQL server protocol compliant implementation. @@ -43,7 +48,7 @@ public class TestPgServer extends TestDb { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); test.config.memory = true; - test.test(); + test.testFromMain(); } @Override @@ -58,39 +63,14 @@ public boolean isEnabled() { public void test() throws Exception { // testPgAdapter() starts server by itself without a wait so run it first testPgAdapter(); - testLowerCaseIdentifiers(); - testKeyAlias(); testKeyAlias(); testCancelQuery(); - testBinaryTypes(); + testTextualAndBinaryTypes(); + testBinaryNumeric(); testDateTime(); testPrepareWithUnspecifiedType(); - } - - private void testLowerCaseIdentifiers() throws SQLException { - if (!getPgJdbcDriver()) { - return; - } - deleteDb("pgserver"); - Connection conn = getConnection( - "mem:pgserver;DATABASE_TO_LOWER=true", "sa", "sa"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int, name varchar(255))"); - Server server = createPgServer("-baseDir", getBaseDir(), - "-ifNotExists", "-pgPort", "5535", "-pgDaemon", "-key", "pgserver", - "mem:pgserver"); - try { - Connection conn2; - conn2 = DriverManager.getConnection( - "jdbc:postgresql://localhost:5535/pgserver", "sa", "sa"); - stat = conn2.createStatement(); - stat.execute("select * from test"); - conn2.close(); - } finally { - server.stop(); - } - conn.close(); - deleteDb("pgserver"); + testOtherPgClients(); + testArray(); } private boolean getPgJdbcDriver() { @@ -136,6 +116,7 @@ private void testPgAdapter() throws SQLException { try { if (getPgJdbcDriver()) { testPgClient(); + testPgClientSimple(); } } finally { server.stop(); @@ -154,8 +135,8 @@ private void testCancelQuery() throws Exception { try { Connection conn = DriverManager.getConnection( "jdbc:postgresql://localhost:5535/pgserver", "sa", "sa"); - final Statement stat = conn.createStatement(); - stat.execute("create alias sleep for \"java.lang.Thread.sleep\""); + Statement stat = conn.createStatement(); + stat.execute("create alias sleep for 'java.lang.Thread.sleep'"); // create a table with 200 rows (cancel interval is 127) stat.execute("create table test(id int)"); @@ -163,12 +144,7 @@ private void testCancelQuery() throws Exception { stat.execute("insert into test (id) values (rand())"); } - Future future = executor.submit(new Callable() { - @Override - public Boolean call() throws SQLException { - return stat.execute("select id, sleep(5) from test"); - } - }); + Future future = executor.submit(() -> stat.execute("select id, sleep(5) from test")); // give it a little time to start and then cancel it Thread.sleep(100); @@ -200,6 +176,16 @@ private void testPgClient() throws SQLException { stat.execute("create table test(id int primary key, name varchar)"); stat.execute("create index idx_test_name on test(name, id)"); stat.execute("grant all on test to test"); + int userId; + try (ResultSet rs = stat.executeQuery("call db_object_id('USER', 'test')")) { + rs.next(); + userId = rs.getInt(1); + } + int indexId; + try (ResultSet rs = stat.executeQuery("call db_object_id('INDEX', 'public', 'idx_test_name')")) { + rs.next(); + indexId = rs.getInt(1); + } stat.close(); conn.close(); @@ -226,12 +212,14 @@ private void testPgClient() throws SQLException { prep.setInt(1, 1); prep.setString(2, "Hello"); prep.execute(); - rs = stat.executeQuery("select * from test"); + rs = stat.executeQuery("select *, null nul from test"); rs.next(); ResultSetMetaData rsMeta = rs.getMetaData(); assertEquals(Types.INTEGER, rsMeta.getColumnType(1)); assertEquals(Types.VARCHAR, rsMeta.getColumnType(2)); + assertEquals(Types.VARCHAR, rsMeta.getColumnType(3)); + assertEquals("test", rsMeta.getTableName(1)); prep.close(); assertEquals(1, rs.getInt(1)); @@ -250,14 +238,16 @@ private void testPgClient() throws SQLException { rs.close(); DatabaseMetaData dbMeta = conn.getMetaData(); rs = dbMeta.getTables(null, null, "TEST", null); - rs.next(); - assertEquals("TEST", rs.getString("TABLE_NAME")); assertFalse(rs.next()); - rs = dbMeta.getColumns(null, null, "TEST", null); + rs = dbMeta.getTables(null, null, "test", null); + assertTrue(rs.next()); + assertEquals("test", rs.getString("TABLE_NAME")); + assertFalse(rs.next()); + rs = dbMeta.getColumns(null, null, "test", null); rs.next(); - assertEquals("ID", rs.getString("COLUMN_NAME")); + assertEquals("id", rs.getString("COLUMN_NAME")); rs.next(); - assertEquals("NAME", rs.getString("COLUMN_NAME")); + assertEquals("name", rs.getString("COLUMN_NAME")); assertFalse(rs.next()); rs = dbMeta.getIndexInfo(null, null, "TEST", false, false); // index info is currently disabled @@ -274,7 +264,7 @@ private void testPgClient() throws SQLException { assertContains(s, "PostgreSQL"); s = rs.getString(2); s = rs.getString(3); - assertEquals(s, "PUBLIC"); + assertEquals(s, "public"); assertFalse(rs.next()); conn.setAutoCommit(false); @@ -288,11 +278,9 @@ private void testPgClient() throws SQLException { assertEquals("Hallo", rs.getString(2)); assertFalse(rs.next()); - rs = stat.executeQuery("select id, name, pg_get_userbyid(id) " + - "from information_schema.users order by id"); + rs = stat.executeQuery("select pg_get_userbyid(" + userId + ')'); rs.next(); - assertEquals(rs.getString(2), rs.getString(3)); - assertFalse(rs.next()); + assertEquals("test", rs.getString(1)); rs.close(); rs = stat.executeQuery("select currTid2('x', 1)"); @@ -303,14 +291,18 @@ private void testPgClient() throws SQLException { rs.next(); assertTrue(rs.getBoolean(1)); + rs = stat.executeQuery("select has_schema_privilege(1, 'READ')"); + rs.next(); + assertTrue(rs.getBoolean(1)); + rs = stat.executeQuery("select has_database_privilege(1, 'READ')"); rs.next(); assertTrue(rs.getBoolean(1)); - rs = stat.executeQuery("select pg_get_userbyid(-1)"); + rs = stat.executeQuery("select pg_get_userbyid(1000000000)"); rs.next(); - assertEquals(null, rs.getString(1)); + assertEquals("unknown (OID=1000000000)", rs.getString(1)); rs = stat.executeQuery("select pg_encoding_to_char(0)"); rs.next(); @@ -332,40 +324,72 @@ private void testPgClient() throws SQLException { rs.next(); assertEquals("", rs.getString(1)); - rs = stat.executeQuery("select pg_get_oid('\"WRONG\"')"); + rs = stat.executeQuery("select 0::regclass"); rs.next(); assertEquals(0, rs.getInt(1)); - rs = stat.executeQuery("select pg_get_oid('TEST')"); - rs.next(); - assertTrue(rs.getInt(1) > 0); - rs = stat.executeQuery("select pg_get_indexdef(0, 0, false)"); rs.next(); - assertEquals("", rs.getString(1)); - - rs = stat.executeQuery("select id from information_schema.indexes " + - "where index_name='IDX_TEST_NAME'"); - rs.next(); - int indexId = rs.getInt(1); + assertNull(rs.getString(1)); rs = stat.executeQuery("select pg_get_indexdef("+indexId+", 0, false)"); rs.next(); - assertEquals( - "CREATE INDEX \"PUBLIC\".\"IDX_TEST_NAME\" ON \"PUBLIC\".\"TEST\"(\"NAME\", \"ID\")", + assertEquals("CREATE INDEX \"public\".\"idx_test_name\" ON \"public\".\"test\"" + + "(\"name\" NULLS LAST, \"id\" NULLS LAST)", rs.getString(1)); rs = stat.executeQuery("select pg_get_indexdef("+indexId+", null, false)"); rs.next(); - assertEquals( - "CREATE INDEX \"PUBLIC\".\"IDX_TEST_NAME\" ON \"PUBLIC\".\"TEST\"(\"NAME\", \"ID\")", - rs.getString(1)); + assertNull(rs.getString(1)); rs = stat.executeQuery("select pg_get_indexdef("+indexId+", 1, false)"); rs.next(); - assertEquals("NAME", rs.getString(1)); + assertEquals("name", rs.getString(1)); rs = stat.executeQuery("select pg_get_indexdef("+indexId+", 2, false)"); rs.next(); - assertEquals("ID", rs.getString(1)); + assertEquals("id", rs.getString(1)); + + rs = stat.executeQuery("select * from pg_type where oid = " + PgServer.PG_TYPE_VARCHAR_ARRAY); + rs.next(); + assertEquals("_varchar", rs.getString("typname")); + assertEquals("_varchar", rs.getObject("typname")); + assertEquals("b", rs.getString("typtype")); + assertEquals(",", rs.getString("typdelim")); + assertEquals(PgServer.PG_TYPE_VARCHAR, rs.getInt("typelem")); + + stat.setMaxRows(10); + rs = stat.executeQuery("select * from generate_series(0, 10)"); + assertNRows(rs, 10); + stat.setMaxRows(0); + + stat.setFetchSize(2); + rs = stat.executeQuery("select * from generate_series(0, 4)"); + assertNRows(rs, 5); + rs = stat.executeQuery("select * from generate_series(0, 1)"); + assertNRows(rs, 2); + stat.setFetchSize(0); + + conn.close(); + } + + private void assertNRows(ResultSet rs, int n) throws SQLException { + for (int i = 0; i < n; i++) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + } + assertFalse(rs.next()); + } + private void testPgClientSimple() throws SQLException { + Connection conn = DriverManager.getConnection( + "jdbc:postgresql://localhost:5535/pgserver?preferQueryMode=simple", "sa", "sa"); + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("select 1"); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertFalse(rs.next()); + stat.setMaxRows(0); + stat.execute("create table test2(int integer)"); + stat.execute("drop table test2"); + assertThrows(SQLException.class, stat).execute("drop table test2"); conn.close(); } @@ -385,7 +409,7 @@ private void testKeyAlias() throws SQLException { stat.execute("create table test(id int primary key, name varchar)"); ResultSet rs = stat.executeQuery( "select storage_type from information_schema.tables " + - "where table_name = 'TEST'"); + "where table_name = 'test'"); assertTrue(rs.next()); assertEquals("MEMORY", rs.getString(1)); @@ -395,7 +419,35 @@ private void testKeyAlias() throws SQLException { } } - private void testBinaryTypes() throws SQLException { + private static Set supportedBinaryOids; + + static { + try { + supportedBinaryOids = getSupportedBinaryOids(); + } catch (ReflectiveOperationException e) { + throw new RuntimeException(e); + } + } + + @SuppressWarnings("unchecked") + private static Set getSupportedBinaryOids() throws ReflectiveOperationException { + Field supportedBinaryOidsField = Class + .forName("org.postgresql.jdbc.PgConnection") + .getDeclaredField("SUPPORTED_BINARY_OIDS"); + supportedBinaryOidsField.setAccessible(true); + return (Set) supportedBinaryOidsField.get(null); + } + + private void testTextualAndBinaryTypes() throws SQLException { + testTextualAndBinaryTypes(false); + testTextualAndBinaryTypes(true); + // additional support of NUMERIC for Npgsql + supportedBinaryOids.add(1700); + testTextualAndBinaryTypes(true); + supportedBinaryOids.remove(1700); + } + + private void testTextualAndBinaryTypes(boolean binary) throws SQLException { if (!getPgJdbcDriver()) { return; } @@ -406,8 +458,11 @@ private void testBinaryTypes() throws SQLException { Properties props = new Properties(); props.setProperty("user", "sa"); props.setProperty("password", "sa"); + // force binary - props.setProperty("prepareThreshold", "-1"); + if (binary) { + props.setProperty("prepareThreshold", "-1"); + } Connection conn = DriverManager.getConnection( "jdbc:postgresql://localhost:5535/pgserver", props); @@ -415,12 +470,13 @@ private void testBinaryTypes() throws SQLException { stat.execute( "create table test(x1 varchar, x2 int, " + - "x3 smallint, x4 bigint, x5 double, x6 float, " + - "x7 real, x8 boolean, x9 char, x10 bytea, " + - "x11 date, x12 time, x13 timestamp, x14 numeric)"); + "x3 smallint, x4 bigint, x5 double precision, x6 float, " + + "x7 real, x8 boolean, x9 char(3), x10 bytea, " + + "x11 date, x12 time, x13 timestamp, x14 numeric(25, 5)," + + "x15 time with time zone, x16 timestamp with time zone)"); PreparedStatement ps = conn.prepareStatement( - "insert into test values (?,?,?,?,?,?,?,?,?,?,?,?,?,?)"); + "insert into test values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"); ps.setString(1, "test"); ps.setInt(2, 12345678); ps.setShort(3, (short) 12345); @@ -430,13 +486,15 @@ private void testBinaryTypes() throws SQLException { ps.setFloat(7, 123.456f); ps.setBoolean(8, true); ps.setByte(9, (byte) 0xfe); - ps.setBytes(10, new byte[] { 'a', (byte) 0xfe, '\127' }); + ps.setBytes(10, new byte[] { 'a', (byte) 0xfe, '\127', 0, 127, '\\' }); ps.setDate(11, Date.valueOf("2015-01-31")); ps.setTime(12, Time.valueOf("20:11:15")); ps.setTimestamp(13, Timestamp.valueOf("2001-10-30 14:16:10.111")); ps.setBigDecimal(14, new BigDecimal("12345678901234567890.12345")); + ps.setTime(15, Time.valueOf("20:11:15")); + ps.setTimestamp(16, Timestamp.valueOf("2001-10-30 14:16:10.111")); ps.execute(); - for (int i = 1; i <= 14; i++) { + for (int i = 1; i <= 16; i++) { ps.setNull(i, Types.NULL); } ps.execute(); @@ -452,14 +510,16 @@ private void testBinaryTypes() throws SQLException { assertEquals(123.456f, rs.getFloat(7)); assertEquals(true, rs.getBoolean(8)); assertEquals((byte) 0xfe, rs.getByte(9)); - assertEquals(new byte[] { 'a', (byte) 0xfe, '\127' }, + assertEquals(new byte[] { 'a', (byte) 0xfe, '\127', 0, 127, '\\' }, rs.getBytes(10)); assertEquals(Date.valueOf("2015-01-31"), rs.getDate(11)); assertEquals(Time.valueOf("20:11:15"), rs.getTime(12)); assertEquals(Timestamp.valueOf("2001-10-30 14:16:10.111"), rs.getTimestamp(13)); assertEquals(new BigDecimal("12345678901234567890.12345"), rs.getBigDecimal(14)); + assertEquals(Time.valueOf("20:11:15"), rs.getTime(15)); + assertEquals(Timestamp.valueOf("2001-10-30 14:16:10.111"), rs.getTimestamp(16)); assertTrue(rs.next()); - for (int i = 1; i <= 14; i++) { + for (int i = 1; i <= 16; i++) { assertNull(rs.getObject(i)); } assertFalse(rs.next()); @@ -470,13 +530,13 @@ private void testBinaryTypes() throws SQLException { } } - private void testDateTime() throws SQLException { + private void testBinaryNumeric() throws SQLException { if (!getPgJdbcDriver()) { return; } - Server server = createPgServer( "-ifNotExists", "-pgPort", "5535", "-pgDaemon", "-key", "pgserver", "mem:pgserver"); + supportedBinaryOids.add(1700); try { Properties props = new Properties(); props.setProperty("user", "sa"); @@ -488,47 +548,107 @@ private void testDateTime() throws SQLException { "jdbc:postgresql://localhost:5535/pgserver", props); Statement stat = conn.createStatement(); - stat.execute( - "create table test(x1 date, x2 time, x3 timestamp)"); - - Date[] dates = { null, Date.valueOf("2017-02-20"), - Date.valueOf("1970-01-01"), Date.valueOf("1969-12-31"), - Date.valueOf("1940-01-10"), Date.valueOf("1950-11-10"), - Date.valueOf("1500-01-01")}; - Time[] times = { null, Time.valueOf("14:15:16"), - Time.valueOf("00:00:00"), Time.valueOf("23:59:59"), - Time.valueOf("00:10:59"), Time.valueOf("08:30:42"), - Time.valueOf("10:00:00")}; - Timestamp[] timestamps = { null, Timestamp.valueOf("2017-02-20 14:15:16.763"), - Timestamp.valueOf("1970-01-01 00:00:00"), Timestamp.valueOf("1969-12-31 23:59:59"), - Timestamp.valueOf("1940-01-10 00:10:59"), Timestamp.valueOf("1950-11-10 08:30:42.12"), - Timestamp.valueOf("1500-01-01 10:00:10")}; - int count = dates.length; - - PreparedStatement ps = conn.prepareStatement( - "insert into test values (?,?,?)"); - for (int i = 0; i < count; i++) { - ps.setDate(1, dates[i]); - ps.setTime(2, times[i]); - ps.setTimestamp(3, timestamps[i]); - ps.execute(); + try (ResultSet rs = stat.executeQuery("SELECT 1E-16383, 1E+1, 1E+89, 1E-16384")) { + rs.next(); + assertEquals(new BigDecimal("1E-16383"), rs.getBigDecimal(1)); + assertEquals(new BigDecimal("10"), rs.getBigDecimal(2)); + assertEquals(new BigDecimal("10").pow(89), rs.getBigDecimal(3)); + // TODO `SELECT 1E+90, 1E+131071` fails due to PgJDBC issue 1935 + try { + rs.getBigDecimal(4); + fail(); + } catch (IllegalArgumentException e) { + // PgJDBC doesn't support scale greater than 16383 + } } - - ResultSet rs = stat.executeQuery("select * from test"); - for (int i = 0; i < count; i++) { - assertTrue(rs.next()); - assertEquals(dates[i], rs.getDate(1)); - assertEquals(times[i], rs.getTime(2)); - assertEquals(timestamps[i], rs.getTimestamp(3)); + try (ResultSet rs = stat.executeQuery("SELECT 1E-32768")) { + fail(); + } catch (SQLException e) { + assertEquals("22003", e.getSQLState()); + } + try (ResultSet rs = stat.executeQuery("SELECT 1E+131072")) { + fail(); + } catch (SQLException e) { + assertEquals("22003", e.getSQLState()); } - assertFalse(rs.next()); conn.close(); } finally { + supportedBinaryOids.remove(1700); server.stop(); } } + private void testDateTime() throws SQLException { + if (!getPgJdbcDriver()) { + return; + } + TimeZone old = TimeZone.getDefault(); + /* + * java.util.TimeZone doesn't support LMT, so perform this test with + * fixed time zone offset + */ + TimeZone.setDefault(TimeZone.getTimeZone("GMT+01")); + DateTimeUtils.resetCalendar(); + try { + Server server = createPgServer( + "-ifNotExists", "-pgPort", "5535", "-pgDaemon", "-key", "pgserver", "mem:pgserver"); + try { + Properties props = new Properties(); + props.setProperty("user", "sa"); + props.setProperty("password", "sa"); + // force binary + props.setProperty("prepareThreshold", "-1"); + + Connection conn = DriverManager.getConnection( + "jdbc:postgresql://localhost:5535/pgserver", props); + Statement stat = conn.createStatement(); + + stat.execute( + "create table test(x1 date, x2 time, x3 timestamp)"); + + Date[] dates = { null, Date.valueOf("2017-02-20"), + Date.valueOf("1970-01-01"), Date.valueOf("1969-12-31"), + Date.valueOf("1940-01-10"), Date.valueOf("1950-11-10"), + Date.valueOf("1500-01-01")}; + Time[] times = { null, Time.valueOf("14:15:16"), + Time.valueOf("00:00:00"), Time.valueOf("23:59:59"), + Time.valueOf("00:10:59"), Time.valueOf("08:30:42"), + Time.valueOf("10:00:00")}; + Timestamp[] timestamps = { null, Timestamp.valueOf("2017-02-20 14:15:16.763"), + Timestamp.valueOf("1970-01-01 00:00:00"), Timestamp.valueOf("1969-12-31 23:59:59"), + Timestamp.valueOf("1940-01-10 00:10:59"), Timestamp.valueOf("1950-11-10 08:30:42.12"), + Timestamp.valueOf("1500-01-01 10:00:10")}; + int count = dates.length; + + PreparedStatement ps = conn.prepareStatement( + "insert into test values (?,?,?)"); + for (int i = 0; i < count; i++) { + ps.setDate(1, dates[i]); + ps.setTime(2, times[i]); + ps.setTimestamp(3, timestamps[i]); + ps.execute(); + } + + ResultSet rs = stat.executeQuery("select * from test"); + for (int i = 0; i < count; i++) { + assertTrue(rs.next()); + assertEquals(dates[i], rs.getDate(1)); + assertEquals(times[i], rs.getTime(2)); + assertEquals(timestamps[i], rs.getTimestamp(3)); + } + assertFalse(rs.next()); + + conn.close(); + } finally { + server.stop(); + } + } finally { + TimeZone.setDefault(old); + DateTimeUtils.resetCalendar(); + } + } + private void testPrepareWithUnspecifiedType() throws Exception { if (!getPgJdbcDriver()) { return; @@ -548,7 +668,7 @@ private void testPrepareWithUnspecifiedType() throws Exception { "jdbc:postgresql://localhost:5535/pgserver", props); Statement stmt = conn.createStatement(); - stmt.executeUpdate("create table t1 (id integer, value timestamp)"); + stmt.executeUpdate("create table t1 (id integer, v timestamp)"); stmt.close(); PreparedStatement pstmt = conn.prepareStatement("insert into t1 values(100500, ?)"); @@ -560,7 +680,7 @@ private void testPrepareWithUnspecifiedType() throws Exception { assertEquals(1, pstmt.executeUpdate()); pstmt.close(); - pstmt = conn.prepareStatement("SELECT * FROM t1 WHERE value = ?"); + pstmt = conn.prepareStatement("SELECT * FROM t1 WHERE v = ?"); assertEquals(Types.TIMESTAMP, pstmt.getParameterMetaData().getParameterType(1)); pstmt.setObject(1, t); @@ -575,4 +695,219 @@ private void testPrepareWithUnspecifiedType() throws Exception { server.stop(); } } + + private void testOtherPgClients() throws SQLException { + if (!getPgJdbcDriver()) { + return; + } + + Server server = createPgServer( + "-ifNotExists", "-pgPort", "5535", "-pgDaemon", "-key", "pgserver", "mem:pgserver"); + try ( + Connection conn = DriverManager.getConnection( + "jdbc:postgresql://localhost:5535/pgserver", "sa", "sa"); + Statement stat = conn.createStatement(); + ) { + stat.execute( + "create table test(id serial primary key, x1 integer)"); + + // pgAdmin + stat.execute("SET client_min_messages=notice"); + try (ResultSet rs = stat.executeQuery("SELECT set_config('bytea_output','escape',false) " + + "FROM pg_settings WHERE name = 'bytea_output'")) { + assertFalse(rs.next()); + } + stat.execute("SET client_encoding='UNICODE'"); + try (ResultSet rs = stat.executeQuery("SELECT version()")) { + assertTrue(rs.next()); + assertNotNull(rs.getString("version")); + } + try (ResultSet rs = stat.executeQuery("SELECT " + + "db.oid as did, db.datname, db.datallowconn, " + + "pg_encoding_to_char(db.encoding) AS serverencoding, " + + "has_database_privilege(db.oid, 'CREATE') as cancreate, datlastsysoid " + + "FROM pg_database db WHERE db.datname = current_database()")) { + assertTrue(rs.next()); + assertEquals("pgserver", rs.getString("datname")); + assertFalse(rs.next()); + } + try (ResultSet rs = stat.executeQuery("SELECT " + + "oid as id, rolname as name, rolsuper as is_superuser, " + + "CASE WHEN rolsuper THEN true ELSE rolcreaterole END as can_create_role, " + + "CASE WHEN rolsuper THEN true ELSE rolcreatedb END as can_create_db " + + "FROM pg_catalog.pg_roles WHERE rolname = current_user")) { + assertTrue(rs.next()); + assertEquals("sa", rs.getString("name")); + assertFalse(rs.next()); + } + try (ResultSet rs = stat.executeQuery("SELECT " + + "db.oid as did, db.datname as name, ta.spcname as spcname, db.datallowconn, " + + "has_database_privilege(db.oid, 'CREATE') as cancreate, datdba as owner " + + "FROM pg_database db LEFT OUTER JOIN pg_tablespace ta ON db.dattablespace = ta.oid " + + "WHERE db.oid > 100000::OID")) { + assertTrue(rs.next()); + assertEquals("pgserver", rs.getString("name")); + assertFalse(rs.next()); + } + try (ResultSet rs = stat.executeQuery("SELECT nsp.oid, nsp.nspname as name, " + + "has_schema_privilege(nsp.oid, 'CREATE') as can_create, " + + "has_schema_privilege(nsp.oid, 'USAGE') as has_usage " + + "FROM pg_namespace nsp WHERE nspname NOT LIKE 'pg\\_%' AND NOT (" + + "(nsp.nspname = 'pg_catalog' AND EXISTS (SELECT 1 FROM pg_class " + + "WHERE relname = 'pg_class' AND relnamespace = nsp.oid LIMIT 1)) OR " + + "(nsp.nspname = 'pgagent' AND EXISTS (SELECT 1 FROM pg_class " + + "WHERE relname = 'pga_job' AND relnamespace = nsp.oid LIMIT 1)) OR " + + "(nsp.nspname = 'information_schema' AND EXISTS (SELECT 1 FROM pg_class " + + "WHERE relname = 'tables' AND relnamespace = nsp.oid LIMIT 1))" + + ") ORDER BY nspname")) { + assertTrue(rs.next()); + assertEquals("public", rs.getString("name")); + assertFalse(rs.next()); + } + try (ResultSet rs = stat.executeQuery("SELECT format_type(23, NULL)")) { + assertTrue(rs.next()); + assertEquals("INTEGER", rs.getString(1)); + assertFalse(rs.next()); + } + // pgAdmin sends `SET LOCAL join_collapse_limit=8`, but `LOCAL` is not supported yet + stat.execute("SET join_collapse_limit=8"); + + // HeidiSQL + try (ResultSet rs = stat.executeQuery("SHOW ssl")) { + assertTrue(rs.next()); + assertEquals("off", rs.getString(1)); + } + stat.execute("SET search_path TO 'public', '$user'"); + try (ResultSet rs = stat.executeQuery("SELECT *, NULL AS data_length, " + + "pg_relation_size(QUOTE_IDENT(t.TABLE_SCHEMA) || '.' || QUOTE_IDENT(t.TABLE_NAME))::bigint " + + "AS index_length, " + + "c.reltuples, obj_description(c.oid) AS comment " + + "FROM \"information_schema\".\"tables\" AS t " + + "LEFT JOIN \"pg_namespace\" n ON t.table_schema = n.nspname " + + "LEFT JOIN \"pg_class\" c ON n.oid = c.relnamespace AND c.relname=t.table_name " + + "WHERE t.\"table_schema\"='public'")) { + assertTrue(rs.next()); + assertEquals("test", rs.getString("table_name")); + assertTrue(rs.getLong("index_length") >= 0L); // test pg_relation_size() + assertNull(rs.getString("comment")); // test obj_description() + } + try (ResultSet rs = stat.executeQuery("SELECT \"p\".\"proname\", \"p\".\"proargtypes\" " + + "FROM \"pg_catalog\".\"pg_namespace\" AS \"n\" " + + "JOIN \"pg_catalog\".\"pg_proc\" AS \"p\" ON \"p\".\"pronamespace\" = \"n\".\"oid\" " + + "WHERE \"n\".\"nspname\"='public'")) { + assertFalse(rs.next()); // "pg_proc" always empty + } + try (ResultSet rs = stat.executeQuery("SELECT DISTINCT a.attname AS column_name, " + + "a.attnum, a.atttypid, FORMAT_TYPE(a.atttypid, a.atttypmod) AS data_type, " + + "CASE a.attnotnull WHEN false THEN 'YES' ELSE 'NO' END AS IS_NULLABLE, " + + "com.description AS column_comment, pg_get_expr(def.adbin, def.adrelid) AS column_default, " + + "NULL AS character_maximum_length FROM pg_attribute AS a " + + "JOIN pg_class AS pgc ON pgc.oid = a.attrelid " + + "LEFT JOIN pg_description AS com ON (pgc.oid = com.objoid AND a.attnum = com.objsubid) " + + "LEFT JOIN pg_attrdef AS def ON (a.attrelid = def.adrelid AND a.attnum = def.adnum) " + + "WHERE a.attnum > 0 AND pgc.oid = a.attrelid AND pg_table_is_visible(pgc.oid) " + + "AND NOT a.attisdropped AND pgc.relname = 'test' ORDER BY a.attnum")) { + assertTrue(rs.next()); + assertEquals("id", rs.getString("column_name")); + assertTrue(rs.next()); + assertEquals("x1", rs.getString("column_name")); + assertFalse(rs.next()); + } + try (ResultSet rs = stat.executeQuery("SHOW ALL")) { + ResultSetMetaData rsMeta = rs.getMetaData(); + assertEquals("name", rsMeta.getColumnName(1)); + assertEquals("setting", rsMeta.getColumnName(2)); + } + + // DBeaver + try (ResultSet rs = stat.executeQuery("SELECT t.oid,t.*,c.relkind FROM pg_catalog.pg_type t " + + "LEFT OUTER JOIN pg_class c ON c.oid=t.typrelid WHERE typnamespace=-1000")) { + // just no exception + } + stat.execute("SET search_path TO 'ab', 'c\"d', 'e''f'"); + try (ResultSet rs = stat.executeQuery("SHOW search_path")) { + assertTrue(rs.next()); + assertEquals("pg_catalog, ab, \"c\"\"d\", \"e'f\"", rs.getString("search_path")); + } + stat.execute("SET search_path TO ab, \"c\"\"d\", \"e'f\""); + try (ResultSet rs = stat.executeQuery("SHOW search_path")) { + assertTrue(rs.next()); + assertEquals("pg_catalog, ab, \"c\"\"d\", \"e'f\"", rs.getString("search_path")); + } + int oid; + try (ResultSet rs = stat.executeQuery("SELECT oid FROM pg_class WHERE relname = 'test'")) { + rs.next(); + oid = rs.getInt("oid"); + } + try (ResultSet rs = stat.executeQuery("SELECT i.*,i.indkey as keys," + + "c.relname,c.relnamespace,c.relam,c.reltablespace," + + "tc.relname as tabrelname,dsc.description," + + "pg_catalog.pg_get_expr(i.indpred, i.indrelid) as pred_expr," + + "pg_catalog.pg_get_expr(i.indexprs, i.indrelid, true) as expr," + + "pg_catalog.pg_relation_size(i.indexrelid) as index_rel_size," + + "pg_catalog.pg_stat_get_numscans(i.indexrelid) as index_num_scans " + + "FROM pg_catalog.pg_index i " + + "INNER JOIN pg_catalog.pg_class c ON c.oid=i.indexrelid " + + "INNER JOIN pg_catalog.pg_class tc ON tc.oid=i.indrelid " + + "LEFT OUTER JOIN pg_catalog.pg_description dsc ON i.indexrelid=dsc.objoid " + + "WHERE i.indrelid=" + oid + " ORDER BY c.relname")) { + // pg_index is empty + assertFalse(rs.next()); + } + try (ResultSet rs = stat.executeQuery("SELECT c.oid,c.*," + + "t.relname as tabrelname,rt.relnamespace as refnamespace,d.description " + + "FROM pg_catalog.pg_constraint c " + + "INNER JOIN pg_catalog.pg_class t ON t.oid=c.conrelid " + + "LEFT OUTER JOIN pg_catalog.pg_class rt ON rt.oid=c.confrelid " + + "LEFT OUTER JOIN pg_catalog.pg_description d ON d.objoid=c.oid " + + "AND d.objsubid=0 AND d.classoid='pg_constraint'::regclass WHERE c.conrelid=" + oid)) { + assertTrue(rs.next()); + assertEquals("test", rs.getString("tabrelname")); + assertEquals("p", rs.getString("contype")); + assertEquals(Short.valueOf((short) 1), ((Object[]) rs.getArray("conkey").getArray())[0]); + } + } finally { + server.stop(); + } + } + + private void testArray() throws Exception { + if (!getPgJdbcDriver()) { + return; + } + + Server server = createPgServer( + "-ifNotExists", "-pgPort", "5535", "-pgDaemon", "-key", "pgserver", "mem:pgserver"); + try ( + Connection conn = DriverManager.getConnection( + "jdbc:postgresql://localhost:5535/pgserver", "sa", "sa"); + Statement stat = conn.createStatement(); + ) { + stat.execute("CREATE TABLE test (id int primary key, x1 varchar array)"); + stat.execute("INSERT INTO test (id, x1) VALUES (1, ARRAY['abc', 'd\\\"e', '{,}'])"); + try (ResultSet rs = stat.executeQuery( + "SELECT x1 FROM test WHERE id = 1")) { + assertTrue(rs.next()); + Object[] arr = (Object[]) rs.getArray(1).getArray(); + assertEquals("abc", arr[0]); + assertEquals("d\\\"e", arr[1]); + assertEquals("{,}", arr[2]); + } + try (ResultSet rs = stat.executeQuery( + "SELECT data_type FROM information_schema.columns WHERE table_schema = 'pg_catalog' " + + "AND table_name = 'pg_database' AND column_name = 'datacl'")) { + assertTrue(rs.next()); + assertEquals("array", rs.getString(1)); + } + try (ResultSet rs = stat.executeQuery( + "SELECT data_type FROM information_schema.columns WHERE table_schema = 'pg_catalog' " + + "AND table_name = 'pg_tablespace' AND column_name = 'spcacl'")) { + assertTrue(rs.next()); + assertEquals("array", rs.getString(1)); + } + } finally { + server.stop(); + } + } + } diff --git a/h2/src/test/org/h2/test/unit/TestReader.java b/h2/src/test/org/h2/test/unit/TestReader.java index a90ac325a0..2ddb8fcb30 100644 --- a/h2/src/test/org/h2/test/unit/TestReader.java +++ b/h2/src/test/org/h2/test/unit/TestReader.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -25,7 +25,7 @@ public class TestReader extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -35,7 +35,7 @@ public void test() throws Exception { InputStream in = new ReaderInputStream(r); byte[] buff = IOUtils.readBytesAndClose(in, 0); InputStream in2 = new ByteArrayInputStream(buff); - Reader r2 = IOUtils.getBufferedReader(in2); + Reader r2 = IOUtils.getReader(in2); String s2 = IOUtils.readStringAndClose(r2, Integer.MAX_VALUE); assertEquals(s, s2); } diff --git a/h2/src/test/org/h2/test/unit/TestRecovery.java b/h2/src/test/org/h2/test/unit/TestRecovery.java index 006e21057c..3db1cc1d30 100644 --- a/h2/src/test/org/h2/test/unit/TestRecovery.java +++ b/h2/src/test/org/h2/test/unit/TestRecovery.java @@ -1,26 +1,22 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; import java.io.ByteArrayOutputStream; -import java.io.InputStreamReader; import java.io.PrintStream; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; +import java.nio.charset.StandardCharsets; import java.sql.Connection; -import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; -import org.h2.engine.Constants; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.tools.DeleteDbFiles; import org.h2.tools.Recover; -import org.h2.util.IOUtils; +import org.h2.util.Utils10; /** * Tests database recovery. @@ -33,7 +29,7 @@ public class TestRecovery extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -46,31 +42,13 @@ public boolean isEnabled() { @Override public void test() throws Exception { - if (!config.mvStore) { - testRecoverTestMode(); - } testRecoverClob(); testRecoverFulltext(); - testRedoTransactions(); - testCorrupt(); - testWithTransactionLog(); testCompressedAndUncompressed(); testRunScript(); testRunScript2(); } - private void testRecoverTestMode() throws Exception { - String recoverTestLog = getBaseDir() + "/recovery.h2.db.log"; - FileUtils.delete(recoverTestLog); - deleteDb("recovery"); - Connection conn = getConnection("recovery;RECOVER_TEST=1"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int, name varchar)"); - stat.execute("drop all objects delete files"); - conn.close(); - assertTrue(FileUtils.exists(recoverTestLog)); - } - private void testRecoverClob() throws Exception { DeleteDbFiles.execute(getBaseDir(), "recovery", true); Connection conn = getConnection("recovery"); @@ -92,8 +70,7 @@ private void testRecoverFulltext() throws Exception { DeleteDbFiles.execute(getBaseDir(), "recovery", true); Connection conn = getConnection("recovery"); Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS IF NOT EXISTS FTL_INIT " + - "FOR \"org.h2.fulltext.FullTextLucene.init\""); + stat.execute("CREATE ALIAS IF NOT EXISTS FTL_INIT FOR 'org.h2.fulltext.FullTextLucene.init'"); stat.execute("CALL FTL_INIT()"); stat.execute("create table test(id int primary key, name varchar) as " + "select 1, 'Hello'"); @@ -107,131 +84,6 @@ private void testRecoverFulltext() throws Exception { conn.close(); } - private void testRedoTransactions() throws Exception { - if (config.mvStore) { - // not needed for MV_STORE=TRUE - return; - } - DeleteDbFiles.execute(getBaseDir(), "recovery", true); - Connection conn = getConnection("recovery"); - Statement stat = conn.createStatement(); - stat.execute("set write_delay 0"); - stat.execute("create table test(id int primary key, name varchar)"); - stat.execute("insert into test select x, 'Hello' from system_range(1, 5)"); - stat.execute("create table test2(id int primary key)"); - stat.execute("drop table test2"); - stat.execute("update test set name = 'Hallo' where id < 3"); - stat.execute("delete from test where id = 1"); - stat.execute("shutdown immediately"); - try { - conn.close(); - } catch (Exception e) { - // ignore - } - Recover.main("-dir", getBaseDir(), "-db", "recovery", "-transactionLog"); - DeleteDbFiles.execute(getBaseDir(), "recovery", true); - conn = getConnection("recovery;init=runscript from '" + - getBaseDir() + "/recovery.h2.sql'"); - stat = conn.createStatement(); - ResultSet rs; - rs = stat.executeQuery("select * from test order by id"); - assertTrue(rs.next()); - assertEquals(2, rs.getInt(1)); - assertEquals("Hallo", rs.getString(2)); - assertTrue(rs.next()); - assertEquals(3, rs.getInt(1)); - assertEquals("Hello", rs.getString(2)); - assertTrue(rs.next()); - assertEquals(4, rs.getInt(1)); - assertEquals("Hello", rs.getString(2)); - assertTrue(rs.next()); - assertEquals(5, rs.getInt(1)); - assertEquals("Hello", rs.getString(2)); - assertFalse(rs.next()); - conn.close(); - } - - private void testCorrupt() throws Exception { - if (config.mvStore) { - // not needed for MV_STORE=TRUE - return; - } - DeleteDbFiles.execute(getBaseDir(), "recovery", true); - Connection conn = getConnection("recovery"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int, name varchar) as " + - "select 1, 'Hello World1'"); - conn.close(); - FileChannel f = FileUtils.open(getBaseDir() + "/recovery.h2.db", "rw"); - byte[] buff = new byte[Constants.DEFAULT_PAGE_SIZE]; - while (f.position() < f.size()) { - FileUtils.readFully(f, ByteBuffer.wrap(buff)); - if (new String(buff).contains("Hello World1")) { - buff[buff.length - 1]++; - f.position(f.position() - buff.length); - f.write(ByteBuffer.wrap(buff)); - } - } - f.close(); - Recover.main("-dir", getBaseDir(), "-db", "recovery"); - String script = IOUtils.readStringAndClose( - new InputStreamReader( - FileUtils.newInputStream(getBaseDir() + "/recovery.h2.sql")), -1); - assertContains(script, "checksum mismatch"); - assertContains(script, "dump:"); - assertContains(script, "Hello World2"); - } - - private void testWithTransactionLog() throws SQLException { - if (config.mvStore) { - // not needed for MV_STORE=TRUE - return; - } - DeleteDbFiles.execute(getBaseDir(), "recovery", true); - Connection conn = getConnection("recovery"); - Statement stat = conn.createStatement(); - stat.execute("create table truncate(id int primary key) as " + - "select x from system_range(1, 1000)"); - stat.execute("create table test(id int primary key, data int, text varchar)"); - stat.execute("create index on test(data, id)"); - stat.execute("insert into test direct select x, 0, null " + - "from system_range(1, 1000)"); - stat.execute("insert into test values(-1, -1, space(10000))"); - stat.execute("checkpoint"); - stat.execute("delete from test where id = -1"); - stat.execute("truncate table truncate"); - conn.setAutoCommit(false); - long base = 0; - while (true) { - ResultSet rs = stat.executeQuery( - "select value from information_schema.settings " + - "where name = 'info.FILE_WRITE'"); - rs.next(); - long count = rs.getLong(1); - if (base == 0) { - base = count; - } else if (count > base + 10) { - break; - } - stat.execute("update test set data=0"); - stat.execute("update test set text=space(10000) where id = 0"); - stat.execute("update test set data=1, text = null"); - conn.commit(); - } - stat.execute("shutdown immediately"); - try { - conn.close(); - } catch (Exception e) { - // expected - } - Recover.main("-dir", getBaseDir(), "-db", "recovery"); - conn = getConnection("recovery"); - conn.close(); - Recover.main("-dir", getBaseDir(), "-db", "recovery", "-removePassword"); - conn = getConnection("recovery", getUser(), ""); - conn.close(); - DeleteDbFiles.execute(getBaseDir(), "recovery", true); - } private void testCompressedAndUncompressed() throws SQLException { DeleteDbFiles.execute(getBaseDir(), "recovery", true); @@ -241,7 +93,6 @@ private void testCompressedAndUncompressed() throws SQLException { Statement stat = conn.createStatement(); stat.execute("create table test(id int primary key, data clob)"); stat.execute("insert into test values(1, space(10000))"); - stat.execute("set compress_lob lzf"); stat.execute("insert into test values(2, space(10000))"); conn.close(); Recover rec = new Recover(); @@ -265,7 +116,7 @@ private void testCompressedAndUncompressed() throws SQLException { DeleteDbFiles.execute(getBaseDir(), "recovery2", true); } - private void testRunScript() throws SQLException { + private void testRunScript() throws Exception { DeleteDbFiles.execute(getBaseDir(), "recovery", true); DeleteDbFiles.execute(getBaseDir(), "recovery2", true); org.h2.Driver.load(); @@ -279,7 +130,7 @@ private void testRunScript() throws SQLException { "select * from test"); stat.execute("create table a(id int primary key) as " + "select * from system_range(1, 100)"); - stat.execute("create table b(id int references a(id)) as " + + stat.execute("create table b(id int primary key references a(id)) as " + "select * from system_range(1, 100)"); stat.execute("create table lob(c clob, b blob) as " + "select space(10000) || 'end', SECURE_RAND(10000)"); @@ -294,9 +145,9 @@ private void testRunScript() throws SQLException { Recover rec = new Recover(); ByteArrayOutputStream buff = new ByteArrayOutputStream(); - rec.setOut(new PrintStream(buff)); + rec.setOut(new PrintStream(buff, false, "UTF-8")); rec.runTool("-dir", getBaseDir(), "-db", "recovery", "-trace"); - String out = new String(buff.toByteArray()); + String out = Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8); assertContains(out, "Created file"); Connection conn2 = getConnection("recovery2"); @@ -325,26 +176,21 @@ private void testRunScript() throws SQLException { FileUtils.deleteRecursive(dir, false); } - private void testRunScript2() throws SQLException { - if (!config.mvStore) { - // TODO Does not work in PageStore mode - return; - } + private void testRunScript2() throws Exception { DeleteDbFiles.execute(getBaseDir(), "recovery", true); DeleteDbFiles.execute(getBaseDir(), "recovery2", true); org.h2.Driver.load(); Connection conn = getConnection("recovery"); Statement stat = conn.createStatement(); stat.execute("SET COLLATION EN"); - stat.execute("SET BINARY_COLLATION UNSIGNED"); stat.execute("CREATE TABLE TEST(A VARCHAR)"); conn.close(); final Recover recover = new Recover(); final ByteArrayOutputStream buff = new ByteArrayOutputStream(); // capture the console output - recover.setOut(new PrintStream(buff)); + recover.setOut(new PrintStream(buff, false, "UTF-8")); recover.runTool("-dir", getBaseDir(), "-db", "recovery", "-trace"); - String consoleOut = new String(buff.toByteArray()); + String consoleOut = Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8); assertContains(consoleOut, "Created file"); Connection conn2 = getConnection("recovery2"); diff --git a/h2/src/test/org/h2/test/unit/TestReopen.java b/h2/src/test/org/h2/test/unit/TestReopen.java index 75e61b98c3..babf456eb9 100644 --- a/h2/src/test/org/h2/test/unit/TestReopen.java +++ b/h2/src/test/org/h2/test/unit/TestReopen.java @@ -1,24 +1,23 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; import java.sql.SQLException; import java.util.HashSet; -import java.util.Properties; import java.util.concurrent.TimeUnit; import org.h2.api.ErrorCode; import org.h2.engine.ConnectionInfo; import org.h2.engine.Constants; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; -import org.h2.store.fs.FilePathRec; import org.h2.store.fs.FileUtils; import org.h2.store.fs.Recorder; +import org.h2.store.fs.rec.FilePathRec; import org.h2.test.TestBase; import org.h2.tools.Recover; import org.h2.util.IOUtils; @@ -48,7 +47,7 @@ public class TestReopen extends TestBase implements Recorder { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -72,8 +71,7 @@ public void log(int op, String fileName, byte[] data, long x) { if (op != Recorder.WRITE && op != Recorder.TRUNCATE) { return; } - if (!fileName.endsWith(Constants.SUFFIX_PAGE_FILE) && - !fileName.endsWith(Constants.SUFFIX_MV_FILE)) { + if (!fileName.endsWith(Constants.SUFFIX_MV_FILE)) { return; } if (testing) { @@ -100,25 +98,16 @@ private synchronized void logDb(String fileName) { System.out.println("+ write #" + writeCount + " verify #" + verifyCount); try { - if (fileName.endsWith(Constants.SUFFIX_PAGE_FILE)) { - IOUtils.copyFiles(fileName, testDatabase + - Constants.SUFFIX_PAGE_FILE); - } else { - IOUtils.copyFiles(fileName, testDatabase + - Constants.SUFFIX_MV_FILE); - } + IOUtils.copyFiles(fileName, testDatabase + + Constants.SUFFIX_MV_FILE); verifyCount++; // avoid using the Engine class to avoid deadlocks - Properties p = new Properties(); - String userName = getUser(); - p.setProperty("user", userName); - p.setProperty("password", getPassword()); String url = "jdbc:h2:" + testDatabase + ";FILE_LOCK=NO;TRACE_LEVEL_FILE=0"; - ConnectionInfo ci = new ConnectionInfo(url, p); + ConnectionInfo ci = new ConnectionInfo(url, null, getUser(), getPassword()); Database database = new Database(ci, null); // close the database - Session session = database.getSystemSession(); + SessionLocal session = database.getSystemSession(); session.prepare("script to '" + testDatabase + ".sql'").query(0); session.prepare("shutdown immediately").update(); database.removeSession(null); @@ -156,17 +145,11 @@ private synchronized void logDb(String fileName) { } testDatabase += "X"; try { - if (fileName.endsWith(Constants.SUFFIX_PAGE_FILE)) { - IOUtils.copyFiles(fileName, testDatabase + - Constants.SUFFIX_PAGE_FILE); - } else { - IOUtils.copyFiles(fileName, testDatabase + - Constants.SUFFIX_MV_FILE); - } + IOUtils.copyFiles(fileName, testDatabase + + Constants.SUFFIX_MV_FILE); // avoid using the Engine class to avoid deadlocks - Properties p = new Properties(); String url = "jdbc:h2:" + testDatabase + ";FILE_LOCK=NO"; - ConnectionInfo ci = new ConnectionInfo(url, p); + ConnectionInfo ci = new ConnectionInfo(url, null, null, null); Database database = new Database(ci, null); // close the database database.removeSession(null); diff --git a/h2/src/test/org/h2/test/unit/TestSampleApps.java b/h2/src/test/org/h2/test/unit/TestSampleApps.java index 16ccc5bd5e..2bcafae5b2 100644 --- a/h2/src/test/org/h2/test/unit/TestSampleApps.java +++ b/h2/src/test/org/h2/test/unit/TestSampleApps.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -19,6 +19,7 @@ import org.h2.tools.DeleteDbFiles; import org.h2.util.IOUtils; import org.h2.util.StringUtils; +import org.h2.util.Utils10; /** * Tests the sample apps. @@ -31,7 +32,7 @@ public class TestSampleApps extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -90,9 +91,11 @@ public void test() throws Exception { // process) testApp("The sum is 20.00", org.h2.samples.TriggerSample.class); testApp("Hello: 1\nWorld: 2", org.h2.samples.TriggerPassData.class); - testApp("table test:\n" + + testApp("Key 1 was generated\n" + + "Key 2 was generated\n\n" + + "TEST_TABLE:\n" + "1 Hallo\n\n" + - "test_view:\n" + + "TEST_VIEW:\n" + "1 Hallo", org.h2.samples.UpdatableView.class); testApp( @@ -135,7 +138,7 @@ private void testApp(String expected, Class clazz, String... args) out.flush(); System.setOut(oldOut); System.setErr(oldErr); - String s = new String(buff.toByteArray(), StandardCharsets.UTF_8); + String s = Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8); s = StringUtils.replaceAll(s, "\r\n", "\n"); s = s.trim(); expected = expected.trim(); diff --git a/h2/src/test/org/h2/test/unit/TestScriptReader.java b/h2/src/test/org/h2/test/unit/TestScriptReader.java index 467d37403f..6c430e9e76 100644 --- a/h2/src/test/org/h2/test/unit/TestScriptReader.java +++ b/h2/src/test/org/h2/test/unit/TestScriptReader.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -21,7 +21,7 @@ public class TestScriptReader extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -126,10 +126,17 @@ private static String randomStatement(Random random) { buff.append('*'); String[] ch = { ";", "-", "//", "/* ", "--", "\n", "\r", "a", "$" }; int l = random.nextInt(4); + int comments = 0; for (int j = 0; j < l; j++) { - buff.append(ch[random.nextInt(ch.length)]); + String s = ch[random.nextInt(ch.length)]; + buff.append(s); + if (s.equals("/* ")) { + comments++; + } + } + while (comments-- >= 0) { + buff.append("*/"); } - buff.append("*/"); } break; } @@ -188,12 +195,50 @@ private void testCommon() { assertEquals(null, source.readStatement()); source.close(); + s = "//"; + source = new ScriptReader(new StringReader(s)); + assertEquals("//", source.readStatement()); + assertTrue(source.isInsideRemark()); + assertFalse(source.isBlockRemark()); + source.close(); + // check handling of unclosed block comments s = "/*xxx"; source = new ScriptReader(new StringReader(s)); assertEquals("/*xxx", source.readStatement()); assertTrue(source.isBlockRemark()); source.close(); + + s = "/*xxx*"; + source = new ScriptReader(new StringReader(s)); + assertEquals("/*xxx*", source.readStatement()); + assertTrue(source.isBlockRemark()); + source.close(); + + s = "/*xxx* "; + source = new ScriptReader(new StringReader(s)); + assertEquals("/*xxx* ", source.readStatement()); + assertTrue(source.isBlockRemark()); + source.close(); + + s = "/*xxx/"; + source = new ScriptReader(new StringReader(s)); + assertEquals("/*xxx/", source.readStatement()); + assertTrue(source.isBlockRemark()); + source.close(); + + // nested comments + s = "/*/**/SCRIPT;*/"; + source = new ScriptReader(new StringReader(s)); + assertEquals("/*/**/SCRIPT;*/", source.readStatement()); + assertTrue(source.isBlockRemark()); + source.close(); + + s = "/* /* */ SCRIPT; */"; + source = new ScriptReader(new StringReader(s)); + assertEquals("/* /* */ SCRIPT; */", source.readStatement()); + assertTrue(source.isBlockRemark()); + source.close(); } } diff --git a/h2/src/test/org/h2/test/unit/TestSecurity.java b/h2/src/test/org/h2/test/unit/TestSecurity.java index 779062af16..7f3c97050c 100644 --- a/h2/src/test/org/h2/test/unit/TestSecurity.java +++ b/h2/src/test/org/h2/test/unit/TestSecurity.java @@ -1,18 +1,22 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.sql.Connection; import java.sql.DriverManager; import java.sql.SQLException; import java.util.Arrays; +import java.util.Random; import org.h2.security.BlockCipher; import org.h2.security.CipherFactory; import org.h2.security.SHA256; +import org.h2.security.SHA3; import org.h2.test.TestBase; import org.h2.util.StringUtils; @@ -27,13 +31,14 @@ public class TestSecurity extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws SQLException { testConnectWithHash(); testSHA(); + testSHA3(); testAES(); testBlockCiphers(); testRemoveAnonFromLegacyAlgorithms(); @@ -177,6 +182,38 @@ private void checkSHA256(String message, String expected) { assertEquals(expected, hash); } + private void testSHA3() { + try { + MessageDigest md = MessageDigest.getInstance("SHA3-224"); + Random r = new Random(); + byte[] bytes1 = new byte[r.nextInt(1025)]; + byte[] bytes2 = new byte[256]; + r.nextBytes(bytes1); + r.nextBytes(bytes2); + testSHA3(md, SHA3.getSha3_224(), bytes1, bytes2); + testSHA3(MessageDigest.getInstance("SHA3-256"), SHA3.getSha3_256(), bytes1, bytes2); + testSHA3(MessageDigest.getInstance("SHA3-384"), SHA3.getSha3_384(), bytes1, bytes2); + testSHA3(MessageDigest.getInstance("SHA3-512"), SHA3.getSha3_512(), bytes1, bytes2); + } catch (NoSuchAlgorithmException e) { + // Java 8 doesn't support SHA-3 + } + } + + private void testSHA3(MessageDigest md1, SHA3 md2, byte[] bytes1, byte[] bytes2) { + md1.update(bytes1); + md2.update(bytes1); + md1.update(bytes2, 0, 1); + md2.update(bytes2, 0, 1); + md1.update(bytes2, 1, 33); + md2.update(bytes2, 1, 33); + md1.update(bytes2, 34, 222); + md2.update(bytes2, 34, 222); + assertEquals(md1.digest(), md2.digest()); + md1.update(bytes2, 1, 1); + md2.update(bytes2, 1, 1); + assertEquals(md1.digest(), md2.digest()); + } + private void testBlockCiphers() { for (String algorithm : new String[] { "AES", "FOG" }) { byte[] test = new byte[4096]; diff --git a/h2/src/test/org/h2/test/unit/TestServlet.java b/h2/src/test/org/h2/test/unit/TestServlet.java index 7c0f0c2d88..8dd911ced6 100644 --- a/h2/src/test/org/h2/test/unit/TestServlet.java +++ b/h2/src/test/org/h2/test/unit/TestServlet.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -46,7 +46,7 @@ public class TestServlet extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } /** @@ -342,6 +342,41 @@ public String getVirtualServerName() { throw new UnsupportedOperationException(); } + @Override + public ServletRegistration.Dynamic addJspFile(String servletName, String jspFile) { + throw new UnsupportedOperationException(); + } + + @Override + public int getSessionTimeout() { + throw new UnsupportedOperationException(); + } + + @Override + public void setSessionTimeout(int sessionTimeout) { + throw new UnsupportedOperationException(); + } + + @Override + public String getRequestCharacterEncoding() { + throw new UnsupportedOperationException(); + } + + @Override + public void setRequestCharacterEncoding(String encoding) { + throw new UnsupportedOperationException(); + } + + @Override + public String getResponseCharacterEncoding() { + throw new UnsupportedOperationException(); + } + + @Override + public void setResponseCharacterEncoding(String encoding) { + throw new UnsupportedOperationException(); + } + } @Override @@ -380,16 +415,16 @@ public void test() throws SQLException { stat2.execute("SELECT * FROM T"); stat2.execute("DROP TABLE T"); - assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, stat1). + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, stat1). execute("SELECT * FROM T"); conn2.close(); listener.contextDestroyed(event); // listener must be stopped - assertThrows(ErrorCode.CONNECTION_BROKEN_1, this).getConnection( - "jdbc:h2:tcp://localhost:8888/" + getBaseDir() + "/servlet", - getUser(), getPassword()); + assertThrows(ErrorCode.CONNECTION_BROKEN_1, + () -> getConnection("jdbc:h2:tcp://localhost:8888/" + getBaseDir() + "/servlet", getUser(), + getPassword())); // connection must be closed assertThrows(ErrorCode.OBJECT_CLOSED, stat1). diff --git a/h2/src/test/org/h2/test/unit/TestShell.java b/h2/src/test/org/h2/test/unit/TestShell.java index 376917ed37..36d9373293 100644 --- a/h2/src/test/org/h2/test/unit/TestShell.java +++ b/h2/src/test/org/h2/test/unit/TestShell.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -13,9 +13,11 @@ import java.io.PipedInputStream; import java.io.PipedOutputStream; import java.io.PrintStream; +import java.nio.charset.StandardCharsets; import org.h2.test.TestBase; import org.h2.tools.Shell; import org.h2.util.Task; +import org.h2.util.Utils10; /** * Test the shell tool. @@ -40,27 +42,27 @@ public class TestShell extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { Shell shell = new Shell(); ByteArrayOutputStream buff = new ByteArrayOutputStream(); - shell.setOut(new PrintStream(buff)); + shell.setOut(new PrintStream(buff, false, "UTF-8")); shell.runTool("-url", "jdbc:h2:mem:", "-driver", "org.h2.Driver", "-user", "sa", "-password", "sa", "-properties", "null", "-sql", "select 'Hello ' || 'World' as hi"); - String s = new String(buff.toByteArray()); + String s = Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8); assertContains(s, "HI"); assertContains(s, "Hello World"); assertContains(s, "(1 row, "); shell = new Shell(); buff = new ByteArrayOutputStream(); - shell.setOut(new PrintStream(buff)); + shell.setOut(new PrintStream(buff, false, "UTF-8")); shell.runTool("-help"); - s = new String(buff.toByteArray()); + s = Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8); assertContains(s, "Interactive command line tool to access a database using JDBC."); @@ -195,7 +197,7 @@ public void call() throws Exception { testOut.println("list"); read("sql> Result list mode is now on"); - testOut.println("select 1 first, 2 second;"); + testOut.println("select 1 first, 2 `second`;"); read("sql> FIRST : 1"); read("SECOND: 2"); read("(1 row, "); diff --git a/h2/src/test/org/h2/test/unit/TestSort.java b/h2/src/test/org/h2/test/unit/TestSort.java index ed97d7fd5d..ab7efe8e8b 100644 --- a/h2/src/test/org/h2/test/unit/TestSort.java +++ b/h2/src/test/org/h2/test/unit/TestSort.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -21,17 +21,14 @@ public class TestSort extends TestBase { /** * The number of times the compare method was called. */ - AtomicInteger compareCount = new AtomicInteger(); + private AtomicInteger compareCount = new AtomicInteger(); /** * The comparison object used in this test. */ - Comparator comp = new Comparator() { - @Override - public int compare(Long o1, Long o2) { - compareCount.incrementAndGet(); - return Long.compare(o1 >> 32, o2 >> 32); - } + Comparator comp = (o1, o2) -> { + compareCount.incrementAndGet(); + return Long.compare(o1 >> 32, o2 >> 32); }; private final Long[] array = new Long[100000]; @@ -43,7 +40,7 @@ public int compare(Long o1, Long o2) { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/unit/TestStreams.java b/h2/src/test/org/h2/test/unit/TestStreams.java index a200d4d408..73a3c7cc36 100644 --- a/h2/src/test/org/h2/test/unit/TestStreams.java +++ b/h2/src/test/org/h2/test/unit/TestStreams.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -27,7 +27,7 @@ public class TestStreams extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/unit/TestStringCache.java b/h2/src/test/org/h2/test/unit/TestStringCache.java index 1a1b45db41..ccfa2a18b9 100644 --- a/h2/src/test/org/h2/test/unit/TestStringCache.java +++ b/h2/src/test/org/h2/test/unit/TestStringCache.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -33,7 +33,7 @@ public class TestStringCache extends TestBase { * @param args the command line parameters */ public static void main(String... args) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); new TestStringCache().runBenchmark(); } @@ -156,12 +156,9 @@ private void testMultiThreads() throws InterruptedException { int threadCount = getSize(3, 100); Thread[] threads = new Thread[threadCount]; for (int i = 0; i < threadCount; i++) { - Thread t = new Thread(new Runnable() { - @Override - public void run() { - while (!stop) { - testString(); - } + Thread t = new Thread(() -> { + while (!stop) { + testString(); } }); threads[i] = t; diff --git a/h2/src/test/org/h2/test/unit/TestStringUtils.java b/h2/src/test/org/h2/test/unit/TestStringUtils.java index 410b2d025f..5115c4c374 100644 --- a/h2/src/test/org/h2/test/unit/TestStringUtils.java +++ b/h2/src/test/org/h2/test/unit/TestStringUtils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -8,13 +8,13 @@ import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.net.URLEncoder; -import java.util.Date; import java.util.Random; -import org.h2.expression.function.DateTimeFunctions; + +import org.h2.expression.function.DateTimeFormatFunction; import org.h2.message.DbException; import org.h2.test.TestBase; -import org.h2.test.utils.AssertThrows; import org.h2.util.StringUtils; +import org.h2.value.ValueTimestampTimeZone; /** * Tests string utility methods. @@ -27,7 +27,7 @@ public class TestStringUtils extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -42,6 +42,7 @@ public void test() throws Exception { testReplaceAll(); testTrim(); testTrimSubstring(); + testTruncateString(); } private void testParseUInt31() { @@ -80,18 +81,9 @@ private void testHex() { StringUtils.convertHexToBytes("fAcE")); assertEquals(new byte[] { (byte) 0xfa, (byte) 0xce }, StringUtils.convertHexToBytes("FaCe")); - new AssertThrows(DbException.class) { @Override - public void test() { - StringUtils.convertHexToBytes("120"); - }}; - new AssertThrows(DbException.class) { @Override - public void test() { - StringUtils.convertHexToBytes("fast"); - }}; - new AssertThrows(DbException.class) { @Override - public void test() { - StringUtils.convertHexToBytes("012=abcf"); - }}; + assertThrows(DbException.class, () -> StringUtils.convertHexToBytes("120")); + assertThrows(DbException.class, () -> StringUtils.convertHexToBytes("fast")); + assertThrows(DbException.class, () -> StringUtils.convertHexToBytes("012=abcf")); } private void testPad() { @@ -113,7 +105,7 @@ private void testXML() { StringUtils.xmlText("Rand&Blue")); assertEquals("<<[[[]]]>>", StringUtils.xmlCData("<<[[[]]]>>")); - Date dt = DateTimeFunctions.parseDateTime( + ValueTimestampTimeZone dt = DateTimeFormatFunction.parseDateTime(null, "2001-02-03 04:05:06 GMT", "yyyy-MM-dd HH:mm:ss z", "en", "GMT"); String s = StringUtils.xmlStartDoc() @@ -123,19 +115,19 @@ private void testXML() { StringUtils.xmlComment("Test Comment\nZeile2") + StringUtils.xmlNode("channel", null, StringUtils.xmlNode("title", null, "H2 Database Engine") - + StringUtils.xmlNode("link", null, "http://www.h2database.com") + + StringUtils.xmlNode("link", null, "https://h2database.com") + StringUtils.xmlNode("description", null, "H2 Database Engine") + StringUtils.xmlNode("language", null, "en-us") + StringUtils.xmlNode("pubDate", null, - DateTimeFunctions.formatDateTime(dt, + DateTimeFormatFunction.formatDateTime(null, dt, "EEE, d MMM yyyy HH:mm:ss z", "en", "GMT")) + StringUtils.xmlNode("lastBuildDate", null, - DateTimeFunctions.formatDateTime(dt, + DateTimeFormatFunction.formatDateTime(null, dt, "EEE, d MMM yyyy HH:mm:ss z", "en", "GMT")) + StringUtils.xmlNode("item", null, StringUtils.xmlNode("title", null, "New Version 0.9.9.9.9") - + StringUtils.xmlNode("link", null, "http://www.h2database.com") + + StringUtils.xmlNode("link", null, "https://h2database.com") + StringUtils.xmlNode("description", null, StringUtils.xmlCData("\nNew Features\nTest\n"))))); assertEquals( @@ -149,14 +141,14 @@ private void testXML() { + " -->\n" + " \n" + " H2 Database Engine\n" - + " http://www.h2database.com\n" + + " https://h2database.com\n" + " H2 Database Engine\n" + " en-us\n" + " Sat, 3 Feb 2001 04:05:06 GMT\n" + " Sat, 3 Feb 2001 04:05:06 GMT\n" + " \n" + " New Version 0.9.9.9.9\n" - + " http://www.h2database.com\n" + + " https://h2database.com\n" + " \n" + " StringUtils.trimSubstring(" with (", 1, 8)); } private void testTrimSubstringImpl(String expected, String string, int startIndex, int endIndex) { @@ -299,4 +289,12 @@ private void testTrimSubstringImpl(String expected, String string, int startInde .trimSubstring(new StringBuilder(endIndex - startIndex), string, startIndex, endIndex).toString()); } + private void testTruncateString() { + assertEquals("", StringUtils.truncateString("", 1)); + assertEquals("", StringUtils.truncateString("a", 0)); + assertEquals("_\ud83d\ude00", StringUtils.truncateString("_\ud83d\ude00", 3)); + assertEquals("_", StringUtils.truncateString("_\ud83d\ude00", 2)); + assertEquals("_\ud83d", StringUtils.truncateString("_\ud83d_", 2)); + } + } diff --git a/h2/src/test/org/h2/test/unit/TestTimeStampWithTimeZone.java b/h2/src/test/org/h2/test/unit/TestTimeStampWithTimeZone.java index 3cb00a4d63..5d29fce860 100644 --- a/h2/src/test/org/h2/test/unit/TestTimeStampWithTimeZone.java +++ b/h2/src/test/org/h2/test/unit/TestTimeStampWithTimeZone.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -11,14 +11,18 @@ import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; +import java.sql.Types; +import java.time.OffsetDateTime; import java.util.TimeZone; -import org.h2.api.TimestampWithTimeZone; -import org.h2.engine.SysProperties; +import org.h2.engine.CastDataProvider; import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.util.DateTimeUtils; -import org.h2.util.LocalDateTimeUtils; +import org.h2.util.JSR310Utils; +import org.h2.util.LegacyDateTimeUtils; +import org.h2.util.TimeZoneProvider; +import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueDate; import org.h2.value.ValueTime; @@ -35,7 +39,7 @@ public class TestTimeStampWithTimeZone extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -65,95 +69,61 @@ private void test1() throws SQLException { ResultSet rs = stat.executeQuery("select t1 from test"); rs.next(); assertEquals("1970-01-01 12:00:00+00:15", rs.getString(1)); - TimestampWithTimeZone ts = test1_getTimestamp(rs); + OffsetDateTime ts = (OffsetDateTime) rs.getObject(1); assertEquals(1970, ts.getYear()); - assertEquals(1, ts.getMonth()); - assertEquals(1, ts.getDay()); - assertEquals(15, ts.getTimeZoneOffsetMins()); - TimestampWithTimeZone firstExpected = new TimestampWithTimeZone(1008673L, 43200000000000L, (short) 15); - assertEquals(firstExpected, ts); - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("1970-01-01T12:00+00:15", rs.getObject(1, - LocalDateTimeUtils.OFFSET_DATE_TIME).toString()); - } + assertEquals(1, ts.getMonthValue()); + assertEquals(1, ts.getDayOfMonth()); + assertEquals(15 * 60, ts.getOffset().getTotalSeconds()); + OffsetDateTime expected = OffsetDateTime.parse("1970-01-01T12:00+00:15"); + assertEquals(expected, ts); + assertEquals("1970-01-01T12:00+00:15", rs.getObject(1, OffsetDateTime.class).toString()); rs.next(); - ts = test1_getTimestamp(rs); + ts = (OffsetDateTime) rs.getObject(1); assertEquals(2016, ts.getYear()); - assertEquals(9, ts.getMonth()); - assertEquals(24, ts.getDay()); - assertEquals(1, ts.getTimeZoneOffsetMins()); - assertEquals(1L, ts.getNanosSinceMidnight()); - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("2016-09-24T00:00:00.000000001+00:01", rs.getObject(1, - LocalDateTimeUtils.OFFSET_DATE_TIME).toString()); - } + assertEquals(9, ts.getMonthValue()); + assertEquals(24, ts.getDayOfMonth()); + assertEquals(1L, ts.toLocalTime().toNanoOfDay()); + assertEquals(60, ts.getOffset().getTotalSeconds()); + assertEquals("2016-09-24T00:00:00.000000001+00:01", rs.getObject(1, OffsetDateTime.class).toString()); rs.next(); - ts = test1_getTimestamp(rs); + ts = (OffsetDateTime) rs.getObject(1); assertEquals(2016, ts.getYear()); - assertEquals(9, ts.getMonth()); - assertEquals(24, ts.getDay()); - assertEquals(-1, ts.getTimeZoneOffsetMins()); - assertEquals(1L, ts.getNanosSinceMidnight()); - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("2016-09-24T00:00:00.000000001-00:01", rs.getObject(1, - LocalDateTimeUtils.OFFSET_DATE_TIME).toString()); - } + assertEquals(9, ts.getMonthValue()); + assertEquals(24, ts.getDayOfMonth()); + assertEquals(1L, ts.toLocalTime().toNanoOfDay()); + assertEquals(-60, ts.getOffset().getTotalSeconds()); + assertEquals("2016-09-24T00:00:00.000000001-00:01", rs.getObject(1, OffsetDateTime.class).toString()); rs.next(); - ts = test1_getTimestamp(rs); + ts = (OffsetDateTime) rs.getObject(1); assertEquals(2016, ts.getYear()); - assertEquals(1, ts.getMonth()); - assertEquals(1, ts.getDay()); - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("2016-01-01T05:00+10:00", rs.getObject(1, - LocalDateTimeUtils.OFFSET_DATE_TIME).toString()); - } + assertEquals(1, ts.getMonthValue()); + assertEquals(1, ts.getDayOfMonth()); + assertEquals("2016-01-01T05:00+10:00", rs.getObject(1, OffsetDateTime.class).toString()); rs.next(); - ts = test1_getTimestamp(rs); + ts = (OffsetDateTime) rs.getObject(1); assertEquals(2015, ts.getYear()); - assertEquals(12, ts.getMonth()); - assertEquals(31, ts.getDay()); - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("2015-12-31T19:00-10:00", rs.getObject(1, - LocalDateTimeUtils.OFFSET_DATE_TIME).toString()); - } + assertEquals(12, ts.getMonthValue()); + assertEquals(31, ts.getDayOfMonth()); + assertEquals("2015-12-31T19:00-10:00", rs.getObject(1, OffsetDateTime.class).toString()); ResultSetMetaData metaData = rs.getMetaData(); int columnType = metaData.getColumnType(1); - // 2014 is the value of Types.TIMESTAMP_WITH_TIMEZONE - // use the value instead of the reference because the code has to - // compile (on Java 1.7). Can be replaced with - // Types.TIMESTAMP_WITH_TIMEZONE - // once Java 1.8 is required. - assertEquals(2014, columnType); - if (SysProperties.RETURN_OFFSET_DATE_TIME && LocalDateTimeUtils.isJava8DateApiPresent()) { - assertEquals("java.time.OffsetDateTime", metaData.getColumnClassName(1)); - } else { - assertEquals("org.h2.api.TimestampWithTimeZone", metaData.getColumnClassName(1)); - } + assertEquals(Types.TIMESTAMP_WITH_TIMEZONE, columnType); + assertEquals("java.time.OffsetDateTime", metaData.getColumnClassName(1)); rs.close(); rs = stat.executeQuery("select cast(t1 as varchar) from test"); assertTrue(rs.next()); - assertEquals(firstExpected, rs.getObject(1, TimestampWithTimeZone.class)); + assertEquals(expected, rs.getObject(1, OffsetDateTime.class)); stat.close(); conn.close(); } - private static TimestampWithTimeZone test1_getTimestamp(ResultSet rs) throws SQLException { - Object o = rs.getObject(1); - if (SysProperties.RETURN_OFFSET_DATE_TIME && LocalDateTimeUtils.isJava8DateApiPresent()) { - ValueTimestampTimeZone value = LocalDateTimeUtils.offsetDateTimeToValue(o); - return new TimestampWithTimeZone(value.getDateValue(), value.getTimeNanos(), - value.getTimeZoneOffsetMins()); - } - return (TimestampWithTimeZone) o; - } - private void test2() { - ValueTimestampTimeZone a = ValueTimestampTimeZone.parse("1970-01-01 12:00:00.00+00:15"); - ValueTimestampTimeZone b = ValueTimestampTimeZone.parse("1970-01-01 12:00:01.00+01:15"); + ValueTimestampTimeZone a = ValueTimestampTimeZone.parse("1970-01-01 12:00:00.00+00:15", null); + ValueTimestampTimeZone b = ValueTimestampTimeZone.parse("1970-01-01 12:00:01.00+01:15", null); int c = a.compareTo(b, null, null); assertEquals(1, c); c = b.compareTo(a, null, null); @@ -161,8 +131,8 @@ private void test2() { } private void test3() { - ValueTimestampTimeZone a = ValueTimestampTimeZone.parse("1970-01-02 00:00:02.00+01:15"); - ValueTimestampTimeZone b = ValueTimestampTimeZone.parse("1970-01-01 23:00:01.00+00:15"); + ValueTimestampTimeZone a = ValueTimestampTimeZone.parse("1970-01-02 00:00:02.00+01:15", null); + ValueTimestampTimeZone b = ValueTimestampTimeZone.parse("1970-01-01 23:00:01.00+00:15", null); int c = a.compareTo(b, null, null); assertEquals(1, c); c = b.compareTo(a, null, null); @@ -170,8 +140,8 @@ private void test3() { } private void test4() { - ValueTimestampTimeZone a = ValueTimestampTimeZone.parse("1970-01-02 00:00:01.00+01:15"); - ValueTimestampTimeZone b = ValueTimestampTimeZone.parse("1970-01-01 23:00:01.00+00:15"); + ValueTimestampTimeZone a = ValueTimestampTimeZone.parse("1970-01-02 00:00:01.00+01:15", null); + ValueTimestampTimeZone b = ValueTimestampTimeZone.parse("1970-01-01 23:00:01.00+00:15", null); int c = a.compareTo(b, null, null); assertEquals(0, c); c = b.compareTo(a, null, null); @@ -188,8 +158,8 @@ private void test5() throws SQLException { PreparedStatement preparedStatement = conn.prepareStatement("select id" + " from test5" + " where (t1 < ?)"); - Value value = ValueTimestampTimeZone.parse("2016-12-24 00:00:00.000000001+00:01"); - preparedStatement.setObject(1, value.getObject()); + Value value = ValueTimestampTimeZone.parse("2016-12-24 00:00:00.000000001+00:01", null); + preparedStatement.setObject(1, JSR310Utils.valueToOffsetDateTime(value, null)); ResultSet rs = preparedStatement.executeQuery(); @@ -215,34 +185,42 @@ private void testOrder() throws SQLException { conn.close(); } - private void testConversionsImpl(String timeStr, boolean testReverse) { - ValueTimestamp ts = ValueTimestamp.parse(timeStr); - ValueDate d = (ValueDate) ts.convertTo(Value.DATE); - ValueTime t = (ValueTime) ts.convertTo(Value.TIME); - ValueTimestampTimeZone tstz = ValueTimestampTimeZone.parse(timeStr); - assertEquals(ts, tstz.convertTo(Value.TIMESTAMP)); - assertEquals(d, tstz.convertTo(Value.DATE)); - assertEquals(t, tstz.convertTo(Value.TIME)); - assertEquals(ts.getTimestamp(), tstz.getTimestamp()); + private void testConversionsImpl(String timeStr, boolean testReverse, CastDataProvider provider) { + ValueTimestamp ts = ValueTimestamp.parse(timeStr, null); + ValueDate d = ts.convertToDate(provider); + ValueTime t = (ValueTime) ts.convertTo(TypeInfo.TYPE_TIME, provider); + ValueTimestampTimeZone tstz = ValueTimestampTimeZone.parse(timeStr, null); + assertEquals(ts, tstz.convertTo(TypeInfo.TYPE_TIMESTAMP, provider)); + assertEquals(d, tstz.convertToDate(provider)); + assertEquals(t, tstz.convertTo(TypeInfo.TYPE_TIME, provider)); + assertEquals(LegacyDateTimeUtils.toTimestamp(provider, null, ts), + LegacyDateTimeUtils.toTimestamp(provider, null, tstz)); if (testReverse) { - assertEquals(0, tstz.compareTo(ts.convertTo(Value.TIMESTAMP_TZ), null, null)); - assertEquals(d.convertTo(Value.TIMESTAMP).convertTo(Value.TIMESTAMP_TZ), - d.convertTo(Value.TIMESTAMP_TZ)); - assertEquals(t.convertTo(Value.TIMESTAMP).convertTo(Value.TIMESTAMP_TZ), - t.convertTo(Value.TIMESTAMP_TZ)); + assertEquals(0, tstz.compareTo(ts.convertTo(TypeInfo.TYPE_TIMESTAMP_TZ, provider), null, null)); + assertEquals(d.convertTo(TypeInfo.TYPE_TIMESTAMP, provider) + .convertTo(TypeInfo.TYPE_TIMESTAMP_TZ, provider), + d.convertTo(TypeInfo.TYPE_TIMESTAMP_TZ, provider)); + assertEquals(t.convertTo(TypeInfo.TYPE_TIMESTAMP, provider) + .convertTo(TypeInfo.TYPE_TIMESTAMP_TZ, provider), + t.convertTo(TypeInfo.TYPE_TIMESTAMP_TZ, provider)); } } private void testConversions() { + TestDate.SimpleCastDataProvider provider = new TestDate.SimpleCastDataProvider(); TimeZone current = TimeZone.getDefault(); try { for (String id : TimeZone.getAvailableIDs()) { + if (id.equals("GMT0")) { + continue; + } TimeZone.setDefault(TimeZone.getTimeZone(id)); + provider.currentTimeZone = TimeZoneProvider.ofId(id); DateTimeUtils.resetCalendar(); - testConversionsImpl("2017-12-05 23:59:30.987654321-12:00", true); - testConversionsImpl("2000-01-02 10:20:30.123456789+07:30", true); + testConversionsImpl("2017-12-05 23:59:30.987654321-12:00", true, provider); + testConversionsImpl("2000-01-02 10:20:30.123456789+07:30", true, provider); boolean testReverse = !"Africa/Monrovia".equals(id); - testConversionsImpl("1960-04-06 12:13:14.777666555+12:00", testReverse); + testConversionsImpl("1960-04-06 12:13:14.777666555+12:00", testReverse, provider); } } finally { TimeZone.setDefault(current); diff --git a/h2/src/test/org/h2/test/unit/TestTools.java b/h2/src/test/org/h2/test/unit/TestTools.java index 9c1dd11743..69b8c9a0b2 100644 --- a/h2/src/test/org/h2/test/unit/TestTools.java +++ b/h2/src/test/org/h2/test/unit/TestTools.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -45,7 +45,6 @@ import org.h2.test.TestBase; import org.h2.test.TestDb; import org.h2.test.trace.Player; -import org.h2.test.utils.AssertThrows; import org.h2.tools.Backup; import org.h2.tools.ChangeFileEncryption; import org.h2.tools.Console; @@ -61,6 +60,7 @@ import org.h2.tools.SimpleResultSet.SimpleArray; import org.h2.util.JdbcUtils; import org.h2.util.Task; +import org.h2.util.Utils10; import org.h2.value.ValueUuid; /** @@ -78,7 +78,7 @@ public class TestTools extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -101,7 +101,6 @@ public void test() throws Exception { testDeleteFiles(); testScriptRunscriptLob(); testServerMain(); - testRemove(); testConvertTraceFile(); testManagementDb(); testChangeFileEncryption(false); @@ -126,9 +125,7 @@ private void testTcpServerWithoutPort() throws Exception { s2.stop(); s1 = Server.createTcpServer("-tcpPort", "9123").start(); assertEquals(9123, s1.getPort()); - createClassProxy(Server.class); - assertThrows(ErrorCode.EXCEPTION_OPENING_PORT_2, - Server.createTcpServer("-tcpPort", "9123")).start(); + assertThrows(ErrorCode.EXCEPTION_OPENING_PORT_2, () -> Server.createTcpServer("-tcpPort", "9123").start()); s1.stop(); } @@ -179,9 +176,8 @@ private void testConsole() throws Exception { // trying to use the same port for two services should fail, // but also stop the first service - createClassProxy(c.getClass()); - assertThrows(ErrorCode.EXCEPTION_OPENING_PORT_2, c).runTool("-web", - "-webPort", "9002", "-tcp", "-tcpPort", "9002"); + assertThrows(ErrorCode.EXCEPTION_OPENING_PORT_2, + () -> c.runTool("-web", "-webPort", "9002", "-tcp", "-tcpPort", "9002")); c.runTool("-web", "-webPort", "9002"); } finally { @@ -216,14 +212,12 @@ public static void openBrowser(String url) { } private void testSimpleResultSet() throws Exception { - SimpleResultSet rs; rs = new SimpleResultSet(); rs.addColumn(null, 0, 0, 0); rs.addRow(1); - createClassProxy(rs.getClass()); - assertThrows(IllegalStateException.class, rs). - addColumn(null, 0, 0, 0); + SimpleResultSet r = rs; + assertThrows(IllegalStateException.class, () -> r.addColumn(null, 0, 0, 0)); assertEquals(ResultSet.TYPE_FORWARD_ONLY, rs.getType()); rs.next(); @@ -245,7 +239,7 @@ private void testSimpleResultSet() throws Exception { assertTrue(rs.getMetaData().isSigned(1)); assertFalse(rs.getMetaData().isWritable(1)); assertEquals("", rs.getMetaData().getCatalogName(1)); - assertEquals(null, rs.getMetaData().getColumnClassName(1)); + assertEquals(Void.class.getName(), rs.getMetaData().getColumnClassName(1)); assertEquals("NULL", rs.getMetaData().getColumnTypeName(1)); assertEquals("", rs.getMetaData().getSchemaName(1)); assertEquals("", rs.getMetaData().getTableName(1)); @@ -279,6 +273,7 @@ private void testSimpleResultSet() throws Exception { rs.addRow(BigInteger.ONE, null, true, null, BigDecimal.ONE, 1d, null, null, null, null, null); rs.addRow(BigInteger.ZERO, null, false, null, BigDecimal.ZERO, 0d, null, null, null, null, null); rs.addRow(null, null, null, null, null, null, null, null, null, null, null); + rs.addRow(null, null, true, null, null, null, null, null, null, null, null); rs.next(); @@ -396,6 +391,12 @@ private void testSimpleResultSet() throws Exception { assertNull(rs.getBinaryStream(12)); assertTrue(rs.wasNull()); + assertTrue(rs.next()); + assertTrue(rs.getBoolean(3)); + assertFalse(rs.wasNull()); + assertNull(rs.getObject(6, Float.class)); + assertTrue(rs.wasNull()); + // all updateX methods for (Method m: rs.getClass().getMethods()) { if (m.getName().startsWith("update")) { @@ -493,6 +494,7 @@ private void testSimpleResultSet() throws Exception { assertTrue(rs.next()); assertTrue(rs.next()); assertTrue(rs.next()); + assertTrue(rs.next()); assertFalse(rs.next()); assertThrows(ErrorCode.NO_DATA_AVAILABLE, (ResultSet) rs). getInt(1); @@ -506,7 +508,17 @@ private void testSimpleResultSet() throws Exception { rs.addRow(uuid); rs.next(); assertEquals(uuid, rs.getObject(1)); - assertEquals(uuid, ValueUuid.get(rs.getBytes(1)).getObject()); + assertEquals(uuid, ValueUuid.get(rs.getBytes(1)).getUuid()); + + assertTrue(rs.isWrapperFor(Object.class)); + assertTrue(rs.isWrapperFor(ResultSet.class)); + assertTrue(rs.isWrapperFor(rs.getClass())); + assertFalse(rs.isWrapperFor(Integer.class)); + assertTrue(rs == rs.unwrap(Object.class)); + assertTrue(rs == rs.unwrap(ResultSet.class)); + assertTrue(rs == rs.unwrap(rs.getClass())); + SimpleResultSet rs2 = rs; + assertThrows(ErrorCode.INVALID_VALUE_2, () -> rs2.unwrap(Integer.class)); } private void testJdbcDriverUtils() { @@ -519,12 +531,24 @@ private void testJdbcDriverUtils() { } catch (SQLException e) { assertEquals("08001", e.getSQLState()); } + try { + JdbcUtils.getConnection("javax.naming.InitialContext", "ldap://localhost/ds", "sa", ""); + fail("Expected SQLException: 08001"); + } catch (SQLException e) { + assertEquals("08001", e.getSQLState()); + assertEquals("Only java scheme is supported for JNDI lookups", e.getMessage()); + } + try { + JdbcUtils.getConnection("org.h2.Driver", "jdbc:h2:mem:", "sa", "", null, true); + fail("Expected SQLException: " + ErrorCode.REMOTE_DATABASE_NOT_FOUND_1); + } catch (SQLException e) { + assertEquals(ErrorCode.REMOTE_DATABASE_NOT_FOUND_1, e.getErrorCode()); + } } private void testWrongServer() throws Exception { // try to connect when the server is not running - assertThrows(ErrorCode.CONNECTION_BROKEN_1, this). - getConnection("jdbc:h2:tcp://localhost:9001/test"); + assertThrows(ErrorCode.CONNECTION_BROKEN_1, () -> getConnection("jdbc:h2:tcp://localhost:9001/test")); final ServerSocket serverSocket = new ServerSocket(9001); Task task = new Task() { @Override @@ -543,12 +567,7 @@ public void call() throws Exception { try { task.execute(); Thread.sleep(100); - try { - getConnection("jdbc:h2:tcp://localhost:9001/test"); - fail(); - } catch (SQLException e) { - assertEquals(ErrorCode.CONNECTION_BROKEN_1, e.getErrorCode()); - } + assertThrows(ErrorCode.CONNECTION_BROKEN_1, () -> getConnection("jdbc:h2:tcp://localhost:9001/test")); } finally { serverSocket.close(); task.getException(); @@ -576,14 +595,14 @@ private void testDeleteFiles() throws SQLException { deleteDb("testDeleteFiles"); } - private void testServerMain() throws SQLException { + private void testServerMain() throws Exception { testNonSSL(); - if (!config.travis) { + if (!config.ci) { testSSL(); } } - private void testNonSSL() throws SQLException { + private void testNonSSL() throws Exception { String result; Connection conn; @@ -611,7 +630,7 @@ private void testNonSSL() throws SQLException { } } - private void testSSL() throws SQLException { + private void testSSL() throws Exception { String result; Connection conn; @@ -628,8 +647,8 @@ private void testSSL() throws SQLException { result = runServer(0, new String[]{"-tcpShutdown", "ssl://localhost:9001", "-tcpPassword", "abcdef"}); assertContains(result, "Shutting down"); - assertThrows(ErrorCode.CONNECTION_BROKEN_1, this). - getConnection("jdbc:h2:ssl://localhost:9001/mem:", "sa", "sa"); + assertThrows(ErrorCode.CONNECTION_BROKEN_1, + () -> getConnection("jdbc:h2:ssl://localhost:9001/mem:", "sa", "sa")); result = runServer(0, new String[]{ "-ifNotExists", "-web", "-webPort", "9002", "-webAllowOthers", "-webSSL", @@ -652,16 +671,16 @@ private void testSSL() throws SQLException { "tcp://localhost:9006", "-tcpPassword", "abc", "-tcpShutdownForce"}); assertContains(result, "Shutting down"); stop.shutdown(); - assertThrows(ErrorCode.CONNECTION_BROKEN_1, this). - getConnection("jdbc:h2:tcp://localhost:9006/mem:", "sa", "sa"); + assertThrows(ErrorCode.CONNECTION_BROKEN_1, + () -> getConnection("jdbc:h2:tcp://localhost:9006/mem:", "sa", "sa")); } finally { shutdownServers(); } } - private String runServer(int exitCode, String... args) { + private String runServer(int exitCode, String... args) throws Exception { ByteArrayOutputStream buff = new ByteArrayOutputStream(); - PrintStream ps = new PrintStream(buff); + PrintStream ps = new PrintStream(buff, false, "UTF-8"); if (server != null) { remainingServers.add(server); } @@ -676,8 +695,7 @@ private String runServer(int exitCode, String... args) { } assertEquals(exitCode, result); ps.flush(); - String s = new String(buff.toByteArray()); - return s; + return Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8); } private void shutdownServers() { @@ -700,7 +718,7 @@ private void testConvertTraceFile() throws Exception { Connection conn = getConnection(url + ";TRACE_LEVEL_FILE=3", "sa", "sa"); Statement stat = conn.createStatement(); stat.execute( - "create table test(id int primary key, name varchar, amount decimal)"); + "create table test(id int primary key, name varchar, amount decimal(4, 2))"); PreparedStatement prep = conn.prepareStatement( "insert into test values(?, ?, ?)"); prep.setInt(1, 1); @@ -709,7 +727,7 @@ private void testConvertTraceFile() throws Exception { prep.executeUpdate(); stat.execute("create table test2(id int primary key,\n" + "a real, b double, c bigint,\n" + - "d smallint, e boolean, f binary, g date, h time, i timestamp)", + "d smallint, e boolean, f varbinary, g date, h time, i timestamp)", Statement.NO_GENERATED_KEYS); prep = conn.prepareStatement( "insert into test2 values(1, ?, ?, ?, ?, ?, ?, ?, ?, ?)"); @@ -752,8 +770,7 @@ private void testConvertTraceFile() throws Exception { private void testTraceFile(String url) throws SQLException { Connection conn; - Recover.main("-removePassword", "-dir", getBaseDir(), "-db", - "toolsConvertTraceFile"); + Recover.main("-dir", getBaseDir(), "-db", "toolsConvertTraceFile"); conn = getConnection(url, "sa", ""); Statement stat = conn.createStatement(); ResultSet rs; @@ -778,32 +795,6 @@ private void testTraceFile(String url) throws SQLException { conn.close(); } - private void testRemove() throws SQLException { - if (config.mvStore) { - return; - } - deleteDb("toolsRemove"); - org.h2.Driver.load(); - String url = "jdbc:h2:" + getBaseDir() + "/toolsRemove"; - Connection conn = getConnection(url, "sa", "sa"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int primary key, name varchar)"); - stat.execute("insert into test values(1, 'Hello')"); - conn.close(); - Recover.main("-dir", getBaseDir(), "-db", "toolsRemove", - "-removePassword"); - conn = getConnection(url, "sa", ""); - stat = conn.createStatement(); - ResultSet rs; - rs = stat.executeQuery("select * from test"); - rs.next(); - assertEquals(1, rs.getInt(1)); - assertEquals("Hello", rs.getString(2)); - conn.close(); - deleteDb("toolsRemove"); - FileUtils.delete(getBaseDir() + "/toolsRemove.h2.sql"); - } - private void testRecover() throws SQLException { if (config.memory) { return; @@ -876,7 +867,6 @@ private void testManagementDb() throws SQLException { } private void testScriptRunscriptLob() throws Exception { - org.h2.Driver.load(); String url = getURL("jdbc:h2:" + getBaseDir() + "/testScriptRunscriptLob", true); String user = "sa", password = "abc"; @@ -937,8 +927,7 @@ private void testScriptRunscriptLob() throws Exception { } - private void testScriptRunscript() throws SQLException { - org.h2.Driver.load(); + private void testScriptRunscript() throws Exception { String url = getURL("jdbc:h2:" + getBaseDir() + "/testScriptRunscript", true); String user = "sa", password = "abc"; @@ -975,10 +964,10 @@ private void testScriptRunscript() throws SQLException { "-quiet"); RunScript tool = new RunScript(); ByteArrayOutputStream buff = new ByteArrayOutputStream(); - tool.setOut(new PrintStream(buff)); + tool.setOut(new PrintStream(buff, false, "UTF-8")); tool.runTool("-url", url, "-user", user, "-password", password, "-script", fileName + ".txt", "-showResults"); - assertContains(buff.toString(), "Hello"); + assertContains(Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8), "Hello"); // test parsing of BLOCKSIZE option @@ -1017,14 +1006,9 @@ private void testBackupRestore() throws SQLException { .executeQuery("SELECT * FROM TEST"); assertTrue(rs.next()); assertFalse(rs.next()); - new AssertThrows(ErrorCode.CANNOT_CHANGE_SETTING_WHEN_OPEN_1) { - @Override - public void test() throws SQLException { - // must fail when the database is in use - Backup.main("-file", fileName, "-dir", getBaseDir(), "-db", - "testBackupRestore"); - } - }; + // must fail when the database is in use + assertThrows(ErrorCode.CANNOT_CHANGE_SETTING_WHEN_OPEN_1, + () -> Backup.main("-file", fileName, "-dir", getBaseDir(), "-db", "testBackupRestore")); conn.close(); DeleteDbFiles.main("-dir", getBaseDir(), "-db", "testBackupRestore", "-quiet"); @@ -1049,14 +1033,9 @@ private void testChangeFileEncryption(boolean split) throws SQLException { conn = getConnection(url, "sa", "def 123"); stat = conn.createStatement(); stat.execute("SELECT * FROM TEST"); - new AssertThrows(ErrorCode.CANNOT_CHANGE_SETTING_WHEN_OPEN_1) { - @Override - public void test() throws SQLException { - new ChangeFileEncryption().runTool(new String[] { "-dir", dir, - "-db", "testChangeFileEncryption", "-cipher", "AES", - "-decrypt", "def", "-quiet" }); - } - }; + assertThrows(ErrorCode.CANNOT_CHANGE_SETTING_WHEN_OPEN_1, + () -> new ChangeFileEncryption().runTool(new String[] { "-dir", dir, "-db", "testChangeFileEncryption", + "-cipher", "AES", "-decrypt", "def", "-quiet" })); conn.close(); args = new String[] { "-dir", dir, "-db", "testChangeFileEncryption", "-quiet" }; @@ -1075,14 +1054,8 @@ private void testChangeFileEncryptionWithWrongPassword() throws SQLException { conn.close(); // try with wrong password, this used to have a bug where it kept the // file handle open - new AssertThrows(SQLException.class) { - @Override - public void test() throws SQLException { - ChangeFileEncryption.execute(dir, "testChangeFileEncryption", - "AES", "wrong".toCharArray(), - "def".toCharArray(), true); - } - }; + assertThrows(SQLException.class, () -> ChangeFileEncryption.execute(dir, "testChangeFileEncryption", "AES", + "wrong".toCharArray(), "def".toCharArray(), true)); ChangeFileEncryption.execute(dir, "testChangeFileEncryption", "AES", "abc".toCharArray(), "def".toCharArray(), true); @@ -1104,19 +1077,15 @@ private void testServer() throws SQLException { "-tcpAllowOthers").start(); remainingServers.add(tcpServer); final int port = tcpServer.getPort(); - conn = getConnection("jdbc:h2:tcp://localhost:"+ port +"/test", "sa", ""); + conn = getConnection("jdbc:h2:tcp://localhost:" + port + "/test", "sa", ""); conn.close(); // must not be able to use a different base dir - new AssertThrows(ErrorCode.IO_EXCEPTION_1) { - @Override - public void test() throws SQLException { - getConnection("jdbc:h2:tcp://localhost:"+ port +"/../test", "sa", ""); - }}; - new AssertThrows(ErrorCode.IO_EXCEPTION_1) { - @Override - public void test() throws SQLException { - getConnection("jdbc:h2:tcp://localhost:"+port+"/../test2/test", "sa", ""); - }}; + assertThrows(ErrorCode.IO_EXCEPTION_1, + () -> getConnection("jdbc:h2:tcp://localhost:" + port + "/../test", "sa", "")); + assertThrows(ErrorCode.IO_EXCEPTION_1, + () -> getConnection("jdbc:h2:tcp://localhost:" + port + "/../test2/test", "sa", "")); + assertThrows(ErrorCode.WRONG_USER_OR_PASSWORD, + () -> Server.shutdownTcpServer("tcp://localhost:" + port, "", true, false)); tcpServer.stop(); Server tcpServerWithPassword = Server.createTcpServer( "-ifExists", @@ -1125,31 +1094,22 @@ public void test() throws SQLException { final int prt = tcpServerWithPassword.getPort(); remainingServers.add(tcpServerWithPassword); // must not be able to create new db - new AssertThrows(ErrorCode.DATABASE_NOT_FOUND_2) { - @Override - public void test() throws SQLException { - getConnection("jdbc:h2:tcp://localhost:"+prt+"/test2", "sa", ""); - }}; - new AssertThrows(ErrorCode.DATABASE_NOT_FOUND_2) { - @Override - public void test() throws SQLException { - getConnection("jdbc:h2:tcp://localhost:"+prt+"/test2;ifexists=false", "sa", ""); - }}; - conn = getConnection("jdbc:h2:tcp://localhost:"+prt+"/test", "sa", ""); + assertThrows(ErrorCode.REMOTE_DATABASE_NOT_FOUND_1, + () -> getConnection("jdbc:h2:tcp://localhost:" + prt + "/test2", "sa", "")); + assertThrows(ErrorCode.REMOTE_DATABASE_NOT_FOUND_1, + () -> getConnection("jdbc:h2:tcp://localhost:" + prt + "/test2;ifexists=false", "sa", "")); + conn = getConnection("jdbc:h2:tcp://localhost:" + prt + "/test", "sa", ""); conn.close(); - new AssertThrows(ErrorCode.WRONG_USER_OR_PASSWORD) { - @Override - public void test() throws SQLException { - Server.shutdownTcpServer("tcp://localhost:"+prt, "", true, false); - }}; - conn = getConnection("jdbc:h2:tcp://localhost:"+prt+"/test", "sa", ""); + assertThrows(ErrorCode.WRONG_USER_OR_PASSWORD, + () -> Server.shutdownTcpServer("tcp://localhost:" + prt, "", true, false)); + conn = getConnection("jdbc:h2:tcp://localhost:" + prt + "/test", "sa", ""); // conn.close(); - Server.shutdownTcpServer("tcp://localhost:"+prt, "abc", true, false); + Server.shutdownTcpServer("tcp://localhost:" + prt, "abc", true, false); // check that the database is closed deleteDb("test"); // server must have been closed - assertThrows(ErrorCode.CONNECTION_BROKEN_1, this). - getConnection("jdbc:h2:tcp://localhost:"+prt+"/test", "sa", ""); + assertThrows(ErrorCode.CONNECTION_BROKEN_1, + () -> getConnection("jdbc:h2:tcp://localhost:" + prt + "/test", "sa", "")); JdbcUtils.closeSilently(conn); // Test filesystem prefix and escape from baseDir deleteDb("testSplit"); @@ -1157,11 +1117,11 @@ public void test() throws SQLException { "-baseDir", getBaseDir(), "-tcpAllowOthers").start(); final int p = server.getPort(); - conn = getConnection("jdbc:h2:tcp://localhost:"+p+"/split:testSplit", "sa", ""); + conn = getConnection("jdbc:h2:tcp://localhost:" + p + "/split:testSplit", "sa", ""); conn.close(); - assertThrows(ErrorCode.IO_EXCEPTION_1, this). - getConnection("jdbc:h2:tcp://localhost:"+p+"/../test", "sa", ""); + assertThrows(ErrorCode.IO_EXCEPTION_1, + () -> getConnection("jdbc:h2:tcp://localhost:" + p + "/../test", "sa", "")); server.stop(); deleteDb("testSplit"); @@ -1173,7 +1133,7 @@ public void test() throws SQLException { /** * A simple Clob implementation. */ - class SimpleClob implements Clob { + static class SimpleClob implements Clob { private final String data; @@ -1263,7 +1223,7 @@ public void truncate(long len) throws SQLException { /** * A simple Blob implementation. */ - class SimpleBlob implements Blob { + static class SimpleBlob implements Blob { private final byte[] data; diff --git a/h2/src/test/org/h2/test/unit/TestTraceSystem.java b/h2/src/test/org/h2/test/unit/TestTraceSystem.java index a3c73632ff..1c6c1e6af9 100644 --- a/h2/src/test/org/h2/test/unit/TestTraceSystem.java +++ b/h2/src/test/org/h2/test/unit/TestTraceSystem.java @@ -1,15 +1,17 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; import java.io.ByteArrayOutputStream; import java.io.PrintStream; +import java.nio.charset.StandardCharsets; import org.h2.message.TraceSystem; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.util.Utils10; /** * Tests the trace system @@ -22,7 +24,7 @@ public class TestTraceSystem extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -48,14 +50,14 @@ private void testAdapter() { ts.close(); } - private void testTraceDebug() { + private void testTraceDebug() throws Exception { TraceSystem ts = new TraceSystem(null); ByteArrayOutputStream out = new ByteArrayOutputStream(); - ts.setSysOut(new PrintStream(out)); + ts.setSysOut(new PrintStream(out, false, "UTF-8")); ts.setLevelSystemOut(TraceSystem.DEBUG); ts.getTrace("test").debug(new Exception("error"), "test"); ts.close(); - String outString = new String(out.toByteArray()); + String outString = Utils10.byteArrayOutputStreamToString(out, StandardCharsets.UTF_8); assertContains(outString, "error"); assertContains(outString, "Exception"); assertContains(outString, "test"); diff --git a/h2/src/test/org/h2/test/unit/TestUpgrade.java b/h2/src/test/org/h2/test/unit/TestUpgrade.java new file mode 100644 index 0000000000..b448560ec9 --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestUpgrade.java @@ -0,0 +1,112 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import java.nio.charset.StandardCharsets; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.Statement; +import java.util.Properties; +import java.util.Random; + +import org.h2.engine.Constants; +import org.h2.store.fs.FilePath; +import org.h2.store.fs.FileUtils; +import org.h2.test.TestBase; +import org.h2.tools.Upgrade; + +/** + * Tests upgrade utility. + */ +public class TestUpgrade extends TestBase { + + /** + * Run just this test. + * + * @param a + * ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + deleteDb(); + testUpgrade(1, 2, 120); + testUpgrade(1, 4, 200); + } + + private void testUpgrade(int major, int minor, int build) throws Exception { + String baseDir = getBaseDir(); + String url = "jdbc:h2:" + baseDir + "/testUpgrade"; + Properties p = new Properties(); + p.put("user", "sa"); + p.put("password", "password"); + Random r = new Random(); + byte[] bytes = new byte[10_000]; + r.nextBytes(bytes); + String s = new String(bytes, StandardCharsets.ISO_8859_1); + java.sql.Driver driver = Upgrade.loadH2(build); + try { + assertEquals(major, driver.getMajorVersion()); + assertEquals(minor, driver.getMinorVersion()); + try (Connection conn = driver.connect(url, p)) { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(ID BIGINT AUTO_INCREMENT PRIMARY KEY, B BINARY, L BLOB, C CLOB)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(B, L, C) VALUES (?, ?, ?)"); + prep.setBytes(1, bytes); + prep.setBytes(2, bytes); + prep.setString(3, s); + prep.execute(); + } + } finally { + Upgrade.unloadH2(driver); + } + assertTrue(Upgrade.upgrade(url, p, build)); + try (Connection conn = DriverManager.getConnection(url, p)) { + Statement stat = conn.createStatement(); + try (ResultSet rs = stat.executeQuery("TABLE TEST")) { + assertTrue(rs.next()); + assertEquals(bytes, rs.getBytes(2)); + assertEquals(bytes, rs.getBytes(3)); + assertEquals(s, rs.getString(4)); + assertFalse(rs.next()); + } + try (ResultSet rs = stat.executeQuery("SELECT COLUMN_NAME, DATA_TYPE, CHARACTER_OCTET_LENGTH" + + " FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION")) { + assertTrue(rs.next()); + assertEquals("ID", rs.getString(1)); + assertEquals("BIGINT", rs.getString(2)); + assertTrue(rs.next()); + assertEquals("B", rs.getString(1)); + assertEquals("BINARY VARYING", rs.getString(2)); + assertEquals(Constants.MAX_STRING_LENGTH, rs.getLong(3)); + assertTrue(rs.next()); + assertEquals("L", rs.getString(1)); + assertEquals("BINARY LARGE OBJECT", rs.getString(2)); + assertEquals(Long.MAX_VALUE, rs.getLong(3)); + assertTrue(rs.next()); + assertEquals("C", rs.getString(1)); + assertEquals("CHARACTER LARGE OBJECT", rs.getString(2)); + assertEquals(Long.MAX_VALUE, rs.getLong(3)); + assertFalse(rs.next()); + } + } + deleteDb(); + } + + private void deleteDb() { + for (FilePath p : FilePath.get(getBaseDir()).newDirectoryStream()) { + if (p.getName().startsWith("testUpgrade")) { + FileUtils.deleteRecursive(p.toString(), false); + } + } + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestUtils.java b/h2/src/test/org/h2/test/unit/TestUtils.java index 4ecae0cdf6..29fbefae65 100644 --- a/h2/src/test/org/h2/test/unit/TestUtils.java +++ b/h2/src/test/org/h2/test/unit/TestUtils.java @@ -1,22 +1,18 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; import java.io.ByteArrayInputStream; -import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.Reader; import java.io.StringReader; import java.math.BigInteger; -import java.sql.Timestamp; -import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; -import java.util.Date; import java.util.Random; import org.h2.test.TestBase; import org.h2.util.Bits; @@ -39,7 +35,7 @@ public class TestUtils extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -152,17 +148,12 @@ private void testLongImpl2(byte[] buff, long x, long r) { } private void testSortTopN() { - Comparator comp = new Comparator() { - @Override - public int compare(Integer o1, Integer o2) { - return o1.compareTo(o2); - } - }; + Comparator comp = Comparator.naturalOrder(); Integer[] arr = new Integer[] {}; - Utils.sortTopN(arr, 0, 5, comp); + Utils.sortTopN(arr, 0, 0, comp); arr = new Integer[] { 1 }; - Utils.sortTopN(arr, 0, 5, comp); + Utils.sortTopN(arr, 0, 1, comp); arr = new Integer[] { 3, 5, 1, 4, 2 }; Utils.sortTopN(arr, 0, 2, comp); @@ -172,23 +163,19 @@ public int compare(Integer o1, Integer o2) { private void testSortTopNRandom() { Random rnd = new Random(); - Comparator comp = new Comparator() { - @Override - public int compare(Integer o1, Integer o2) { - return o1.compareTo(o2); - } - }; + Comparator comp = Comparator.naturalOrder(); for (int z = 0; z < 10000; z++) { - Integer[] arr = new Integer[1 + rnd.nextInt(500)]; - for (int i = 0; i < arr.length; i++) { + int length = 1 + rnd.nextInt(500); + Integer[] arr = new Integer[length]; + for (int i = 0; i < length; i++) { arr[i] = rnd.nextInt(50); } - Integer[] arr2 = Arrays.copyOf(arr, arr.length); - int offset = rnd.nextInt(arr.length); - int limit = rnd.nextInt(arr.length); - Utils.sortTopN(arr, offset, limit, comp); + Integer[] arr2 = Arrays.copyOf(arr, length); + int offset = rnd.nextInt(length); + int limit = rnd.nextInt(length - offset + 1); + Utils.sortTopN(arr, offset, offset + limit, comp); Arrays.sort(arr2, comp); - for (int i = offset, end = Math.min(offset + limit, arr.length); i < end; i++) { + for (int i = offset, end = offset + limit; i < end; i++) { if (!arr[i].equals(arr2[i])) { fail(offset + " " + end + "\n" + Arrays.toString(arr) + "\n" + Arrays.toString(arr2)); @@ -231,35 +218,10 @@ private void testReflectionUtils() throws Exception { // Instance methods long x = (Long) Utils.callMethod(instance, "longValue"); assertEquals(10, x); - // Static fields - String pathSeparator = (String) Utils - .getStaticField("java.io.File.pathSeparator"); - assertEquals(File.pathSeparator, pathSeparator); // Instance fields - String test = (String) Utils.getField(this, "testField"); - assertEquals(this.testField, test); - // Class present? - assertFalse(Utils.isClassPresent("abc")); - assertTrue(Utils.isClassPresent(getClass().getName())); Utils.callStaticMethod("java.lang.String.valueOf", "a"); Utils.callStaticMethod("java.awt.AWTKeyStroke.getAWTKeyStroke", 'x', java.awt.event.InputEvent.SHIFT_DOWN_MASK); - // Common comparable superclass - assertFalse(Utils.haveCommonComparableSuperclass( - Integer.class, - Long.class)); - assertTrue(Utils.haveCommonComparableSuperclass( - Integer.class, - Integer.class)); - assertTrue(Utils.haveCommonComparableSuperclass( - Timestamp.class, - Date.class)); - assertFalse(Utils.haveCommonComparableSuperclass( - ArrayList.class, - Long.class)); - assertFalse(Utils.haveCommonComparableSuperclass( - Integer.class, - ArrayList.class)); } private void testParseBooleanCheckFalse(String value) { @@ -308,18 +270,8 @@ private void testParseBoolean() { // Test other values assertFalse(Utils.parseBoolean("BAD", false, false)); assertTrue(Utils.parseBoolean("BAD", true, false)); - try { - Utils.parseBoolean("BAD", false, true); - fail(); - } catch (IllegalArgumentException e) { - // OK - } - try { - Utils.parseBoolean("BAD", true, true); - fail(); - } catch (IllegalArgumentException e) { - // OK - } + assertThrows(IllegalArgumentException.class, () -> Utils.parseBoolean("BAD", false, true)); + assertThrows(IllegalArgumentException.class, () -> Utils.parseBoolean("BAD", true, true)); } } diff --git a/h2/src/test/org/h2/test/unit/TestValue.java b/h2/src/test/org/h2/test/unit/TestValue.java index 5faa4a545f..d04d2e18b5 100644 --- a/h2/src/test/org/h2/test/unit/TestValue.java +++ b/h2/src/test/org/h2/test/unit/TestValue.java @@ -1,10 +1,14 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; +import static org.h2.engine.Constants.MAX_ARRAY_CARDINALITY; +import static org.h2.engine.Constants.MAX_NUMERIC_PRECISION; +import static org.h2.engine.Constants.MAX_STRING_LENGTH; + import java.io.ByteArrayInputStream; import java.io.InputStreamReader; import java.math.BigDecimal; @@ -17,42 +21,36 @@ import java.sql.SQLException; import java.sql.Time; import java.sql.Timestamp; -import java.sql.Types; import java.util.Arrays; import java.util.Calendar; import java.util.TimeZone; import java.util.UUID; - import org.h2.api.ErrorCode; +import org.h2.api.H2Type; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.jdbc.JdbcConnection; -import org.h2.message.DbException; -import org.h2.result.ResultInterface; -import org.h2.result.SimpleResult; import org.h2.store.DataHandler; import org.h2.test.TestBase; import org.h2.test.TestDb; -import org.h2.test.utils.AssertThrows; -import org.h2.tools.SimpleResultSet; import org.h2.util.Bits; -import org.h2.value.DataType; +import org.h2.util.JdbcUtils; +import org.h2.util.LegacyDateTimeUtils; import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueArray; -import org.h2.value.ValueBytes; -import org.h2.value.ValueDecimal; +import org.h2.value.ValueBlob; +import org.h2.value.ValueClob; import org.h2.value.ValueDouble; -import org.h2.value.ValueFloat; -import org.h2.value.ValueInt; import org.h2.value.ValueInterval; import org.h2.value.ValueJavaObject; -import org.h2.value.ValueLobDb; -import org.h2.value.ValueNull; -import org.h2.value.ValueResultSet; -import org.h2.value.ValueString; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueReal; import org.h2.value.ValueTimestamp; +import org.h2.value.ValueToObjectConverter2; import org.h2.value.ValueUuid; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; /** * Tests features of values. @@ -65,15 +63,13 @@ public class TestValue extends TestDb { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws SQLException { - testResultSetOperations(); testBinaryAndUuid(); testCastTrim(); - testValueResultSet(); testDataType(); testArray(); testUUID(); @@ -85,60 +81,8 @@ public void test() throws SQLException { testModulusOperator(); testLobComparison(); testTypeInfo(); - } - - private void testResultSetOperations() throws SQLException { - SimpleResultSet rs = new SimpleResultSet(); - rs.setAutoClose(false); - rs.addColumn("X", Types.INTEGER, 10, 0); - rs.addRow(new Object[]{null}); - rs.next(); - for (int type = Value.NULL; type < Value.TYPE_COUNT; type++) { - if (type == 23) { - // a defunct experimental type - } else { - Value v = DataType.readValue(null, rs, 1, type); - assertTrue(v == ValueNull.INSTANCE); - } - } - testResultSetOperation(new byte[0]); - testResultSetOperation(1); - testResultSetOperation(Boolean.TRUE); - testResultSetOperation((byte) 1); - testResultSetOperation((short) 2); - testResultSetOperation((long) 3); - testResultSetOperation(4.0f); - testResultSetOperation(5.0d); - testResultSetOperation(new Date(6)); - testResultSetOperation(new Time(7)); - testResultSetOperation(new Timestamp(8)); - testResultSetOperation(new BigDecimal("9")); - testResultSetOperation(UUID.randomUUID()); - - SimpleResultSet rs2 = new SimpleResultSet(); - rs2.setAutoClose(false); - rs2.addColumn("X", Types.INTEGER, 10, 0); - rs2.addRow(new Object[]{1}); - rs2.next(); - testResultSetOperation(rs2); - - } - - private void testResultSetOperation(Object obj) throws SQLException { - SimpleResultSet rs = new SimpleResultSet(); - rs.setAutoClose(false); - int valueType = DataType.getTypeFromClass(obj.getClass()); - int sqlType = DataType.convertTypeToSQLType(valueType); - rs.addColumn("X", sqlType, 10, 0); - rs.addRow(new Object[]{obj}); - rs.next(); - Value v = DataType.readValue(null, rs, 1, valueType); - Value v2 = DataType.convertToValue(null, obj, valueType); - if (v.getValueType() == Value.RESULT_SET) { - assertEquals(v.toString(), v2.toString()); - } else { - assertTrue(v.equals(v2)); - } + testH2Type(); + testHigherType(); } private void testBinaryAndUuid() throws SQLException { @@ -147,7 +91,13 @@ private void testBinaryAndUuid() throws SQLException { PreparedStatement prep; ResultSet rs; // Check conversion to byte[] - prep = conn.prepareStatement("SELECT * FROM TABLE(X BINARY=?)"); + prep = conn.prepareStatement("SELECT * FROM TABLE(X BINARY(16)=?)"); + prep.setObject(1, new Object[] { uuid }); + rs = prep.executeQuery(); + rs.next(); + assertTrue(Arrays.equals(Bits.uuidToBytes(uuid), (byte[]) rs.getObject(1))); + // Check conversion to byte[] + prep = conn.prepareStatement("SELECT * FROM TABLE(X VARBINARY=?)"); prep.setObject(1, new Object[] { uuid }); rs = prep.executeQuery(); rs.next(); @@ -167,157 +117,79 @@ private void testCastTrim() { Value v; String spaces = new String(new char[100]).replace((char) 0, ' '); - v = ValueArray.get(new Value[] { ValueString.get("hello"), - ValueString.get("world") }); - assertEquals(10, v.getType().getPrecision()); - assertEquals(5, v.convertPrecision(5, true).getType().getPrecision()); - v = ValueArray.get(new Value[]{ValueString.get(""), ValueString.get("")}); - assertEquals(0, v.getType().getPrecision()); - assertEquals("['']", v.convertPrecision(1, true).toString()); + v = ValueArray.get(new Value[] { ValueVarchar.get("hello"), ValueVarchar.get("world") }, null); + TypeInfo typeInfo = TypeInfo.getTypeInfo(Value.ARRAY, 1L, 0, TypeInfo.TYPE_VARCHAR); + assertEquals(2, v.getType().getPrecision()); + assertEquals(1, v.castTo(typeInfo, null).getType().getPrecision()); + v = ValueArray.get(new Value[]{ValueVarchar.get(""), ValueVarchar.get("")}, null); + assertEquals(2, v.getType().getPrecision()); + assertEquals("ARRAY ['']", v.castTo(typeInfo, null).toString()); - v = ValueBytes.get(spaces.getBytes()); + v = ValueVarbinary.get(spaces.getBytes()); + typeInfo = TypeInfo.getTypeInfo(Value.VARBINARY, 10L, 0, null); assertEquals(100, v.getType().getPrecision()); - assertEquals(10, v.convertPrecision(10, false).getType().getPrecision()); - assertEquals(10, v.convertPrecision(10, false).getBytes().length); - assertEquals(32, v.convertPrecision(10, false).getBytes()[9]); - assertEquals(10, v.convertPrecision(10, true).getType().getPrecision()); - - final Value vd = ValueDecimal.get(new BigDecimal("1234567890.123456789")); - assertEquals(19, vd.getType().getPrecision()); - assertEquals("1234567890.1234567", vd.convertPrecision(10, true).getString()); - new AssertThrows(ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE_1) { - @Override - public void test() { - vd.convertPrecision(10, false); - } - }; + assertEquals(10, v.castTo(typeInfo, null).getType().getPrecision()); + assertEquals(10, v.castTo(typeInfo, null).getBytes().length); + assertEquals(32, v.castTo(typeInfo, null).getBytes()[9]); + assertEquals(10, v.castTo(typeInfo, null).getType().getPrecision()); - v = ValueLobDb.createSmallLob(Value.CLOB, spaces.getBytes(), 100); + v = ValueClob.createSmall(spaces.getBytes(), 100); + typeInfo = TypeInfo.getTypeInfo(Value.CLOB, 10L, 0, null); assertEquals(100, v.getType().getPrecision()); - assertEquals(10, v.convertPrecision(10, false).getType().getPrecision()); - assertEquals(10, v.convertPrecision(10, false).getString().length()); - assertEquals(" ", v.convertPrecision(10, false).getString()); - assertEquals(10, v.convertPrecision(10, true).getType().getPrecision()); + assertEquals(10, v.castTo(typeInfo, null).getType().getPrecision()); + assertEquals(10, v.castTo(typeInfo, null).getString().length()); + assertEquals(" ", v.castTo(typeInfo, null).getString()); + assertEquals(10, v.castTo(typeInfo, null).getType().getPrecision()); - v = ValueLobDb.createSmallLob(Value.BLOB, spaces.getBytes(), 100); - assertEquals(100, v.getType().getPrecision()); - assertEquals(10, v.convertPrecision(10, false).getType().getPrecision()); - assertEquals(10, v.convertPrecision(10, false).getBytes().length); - assertEquals(32, v.convertPrecision(10, false).getBytes()[9]); - assertEquals(10, v.convertPrecision(10, true).getType().getPrecision()); - - SimpleResult rs = new SimpleResult(); - rs.addColumn("X", "X", Value.INT, 0, 0); - rs.addRow(ValueInt.get(1)); - v = ValueResultSet.get(rs); - assertEquals(Integer.MAX_VALUE, v.getType().getPrecision()); - assertEquals(Integer.MAX_VALUE, v.convertPrecision(10, false).getType().getPrecision()); - assertEquals(1, v.convertPrecision(10, false).getResult().getRowCount()); - assertEquals(0, v.convertPrecision(10, true).getResult().getRowCount()); - assertEquals(Integer.MAX_VALUE, v.convertPrecision(10, true).getType().getPrecision()); - - v = ValueString.get(spaces); + v = ValueBlob.createSmall(spaces.getBytes()); + typeInfo = TypeInfo.getTypeInfo(Value.BLOB, 10L, 0, null); assertEquals(100, v.getType().getPrecision()); - assertEquals(10, v.convertPrecision(10, false).getType().getPrecision()); - assertEquals(" ", v.convertPrecision(10, false).getString()); - assertEquals(" ", v.convertPrecision(10, true).getString()); - - } + assertEquals(10, v.castTo(typeInfo, null).getType().getPrecision()); + assertEquals(10, v.castTo(typeInfo, null).getBytes().length); + assertEquals(32, v.castTo(typeInfo, null).getBytes()[9]); + assertEquals(10, v.castTo(typeInfo, null).getType().getPrecision()); - private void testValueResultSet() throws SQLException { - SimpleResultSet rs = new SimpleResultSet(); - rs.setAutoClose(false); - rs.addColumn("ID", Types.INTEGER, 0, 0); - rs.addColumn("NAME", Types.VARCHAR, 255, 0); - rs.addRow(1, "Hello"); - rs.addRow(2, "World"); - rs.addRow(3, "Peace"); - - testValueResultSetTest(ValueResultSet.get(null, rs, Integer.MAX_VALUE), Integer.MAX_VALUE, true); - rs.beforeFirst(); - testValueResultSetTest(ValueResultSet.get(null, rs, 2), 2, true); - - SimpleResult result = new SimpleResult(); - result.addColumn("ID", "ID", Value.INT, 0, 0); - result.addColumn("NAME", "NAME", Value.STRING, 255, 0); - result.addRow(ValueInt.get(1), ValueString.get("Hello")); - result.addRow(ValueInt.get(2), ValueString.get("World")); - result.addRow(ValueInt.get(3), ValueString.get("Peace")); - - ValueResultSet v = ValueResultSet.get(result); - testValueResultSetTest(v, Integer.MAX_VALUE, false); - - testValueResultSetTest(ValueResultSet.get(v.getResult(), Integer.MAX_VALUE), Integer.MAX_VALUE, false); - testValueResultSetTest(ValueResultSet.get(v.getResult(), 2), 2, false); - } + v = ValueVarchar.get(spaces); + typeInfo = TypeInfo.getTypeInfo(Value.VARCHAR, 10L, 0, null); + assertEquals(100, v.getType().getPrecision()); + assertEquals(10, v.castTo(typeInfo, null).getType().getPrecision()); + assertEquals(" ", v.castTo(typeInfo, null).getString()); + assertEquals(" ", v.castTo(typeInfo, null).getString()); - private void testValueResultSetTest(ValueResultSet v, int count, boolean fromSimple) { - ResultInterface res = v.getResult(); - assertEquals(2, res.getVisibleColumnCount()); - assertEquals("ID", res.getAlias(0)); - assertEquals("ID", res.getColumnName(0)); - TypeInfo type = res.getColumnType(0); - assertEquals(Value.INT, type.getValueType()); - assertEquals(ValueInt.PRECISION, type.getPrecision()); - assertEquals(0, type.getScale()); - assertEquals(ValueInt.DISPLAY_SIZE, type.getDisplaySize()); - assertEquals("NAME", res.getAlias(1)); - assertEquals("NAME", res.getColumnName(1)); - type = res.getColumnType(1); - assertEquals(Value.STRING, type.getValueType()); - assertEquals(255, type.getPrecision()); - assertEquals(0, type.getScale()); - assertEquals(255, type.getDisplaySize()); - if (count >= 1) { - assertTrue(res.next()); - assertEquals(new Value[] {ValueInt.get(1), ValueString.get("Hello")}, res.currentRow()); - if (count >= 2) { - assertTrue(res.next()); - assertEquals(new Value[] {ValueInt.get(2), ValueString.get("World")}, res.currentRow()); - if (count >= 3) { - assertTrue(res.next()); - assertEquals(new Value[] {ValueInt.get(3), ValueString.get("Peace")}, res.currentRow()); - } - } - } - assertFalse(res.next()); } private void testDataType() { - testDataType(Value.NULL, null); - testDataType(Value.NULL, Void.class); - testDataType(Value.NULL, void.class); - testDataType(Value.ARRAY, String[].class); - testDataType(Value.STRING, String.class); - testDataType(Value.INT, Integer.class); - testDataType(Value.LONG, Long.class); - testDataType(Value.BOOLEAN, Boolean.class); - testDataType(Value.DOUBLE, Double.class); - testDataType(Value.BYTE, Byte.class); - testDataType(Value.SHORT, Short.class); - testDataType(Value.FLOAT, Float.class); - testDataType(Value.BYTES, byte[].class); - testDataType(Value.UUID, UUID.class); - testDataType(Value.NULL, Void.class); - testDataType(Value.DECIMAL, BigDecimal.class); - testDataType(Value.RESULT_SET, ResultSet.class); - testDataType(Value.BLOB, ValueLobDb.class); - // see FIXME in DataType.getTypeFromClass - //testDataType(Value.CLOB, Value.ValueClob.class); - testDataType(Value.DATE, Date.class); - testDataType(Value.TIME, Time.class); - testDataType(Value.TIMESTAMP, Timestamp.class); - testDataType(Value.TIMESTAMP, java.util.Date.class); - testDataType(Value.CLOB, java.io.Reader.class); - testDataType(Value.CLOB, java.sql.Clob.class); - testDataType(Value.BLOB, java.io.InputStream.class); - testDataType(Value.BLOB, java.sql.Blob.class); - testDataType(Value.ARRAY, Object[].class); - testDataType(Value.JAVA_OBJECT, StringBuffer.class); + testDataType(TypeInfo.TYPE_NULL, null); + testDataType(TypeInfo.TYPE_NULL, Void.class); + testDataType(TypeInfo.TYPE_NULL, void.class); + testDataType(TypeInfo.getTypeInfo(Value.ARRAY, Integer.MAX_VALUE, 0, TypeInfo.TYPE_VARCHAR), String[].class); + testDataType(TypeInfo.TYPE_VARCHAR, String.class); + testDataType(TypeInfo.TYPE_INTEGER, Integer.class); + testDataType(TypeInfo.TYPE_BIGINT, Long.class); + testDataType(TypeInfo.TYPE_BOOLEAN, Boolean.class); + testDataType(TypeInfo.TYPE_DOUBLE, Double.class); + testDataType(TypeInfo.TYPE_TINYINT, Byte.class); + testDataType(TypeInfo.TYPE_SMALLINT, Short.class); + testDataType(TypeInfo.TYPE_REAL, Float.class); + testDataType(TypeInfo.TYPE_VARBINARY, byte[].class); + testDataType(TypeInfo.TYPE_UUID, UUID.class); + testDataType(TypeInfo.TYPE_NULL, Void.class); + testDataType(TypeInfo.TYPE_NUMERIC_FLOATING_POINT, BigDecimal.class); + testDataType(TypeInfo.TYPE_DATE, Date.class); + testDataType(TypeInfo.TYPE_TIME, Time.class); + testDataType(TypeInfo.TYPE_TIMESTAMP, Timestamp.class); + testDataType(TypeInfo.TYPE_TIMESTAMP, java.util.Date.class); + testDataType(TypeInfo.TYPE_CLOB, java.io.Reader.class); + testDataType(TypeInfo.TYPE_CLOB, java.sql.Clob.class); + testDataType(TypeInfo.TYPE_BLOB, java.io.InputStream.class); + testDataType(TypeInfo.TYPE_BLOB, java.sql.Blob.class); + testDataType(TypeInfo.getTypeInfo(Value.ARRAY, Integer.MAX_VALUE, 0, TypeInfo.TYPE_JAVA_OBJECT), + Object[].class); + testDataType(TypeInfo.TYPE_JAVA_OBJECT, StringBuffer.class); } - private void testDataType(int type, Class clazz) { - assertEquals(type, DataType.getTypeFromClass(clazz)); + private void testDataType(TypeInfo type, Class clazz) { + assertEquals(type, ValueToObjectConverter2.classToType(clazz)); } private void testDouble(boolean useFloat) { @@ -329,65 +201,72 @@ private void testDouble(boolean useFloat) { Double.POSITIVE_INFINITY, Double.NaN }; + int[] signum = { + -1, + -1, + 0, + 1, + 1, + 0 + }; Value[] values = new Value[d.length]; for (int i = 0; i < d.length; i++) { - Value v = useFloat ? (Value) ValueFloat.get((float) d[i]) + Value v = useFloat ? (Value) ValueReal.get((float) d[i]) : (Value) ValueDouble.get(d[i]); values[i] = v; - assertTrue(values[i].compareTypeSafe(values[i], null) == 0); + assertTrue(values[i].compareTypeSafe(values[i], null, null) == 0); assertTrue(v.equals(v)); - assertEquals(Integer.compare(i, 2), v.getSignum()); + assertEquals(signum[i], v.getSignum()); } for (int i = 0; i < d.length - 1; i++) { - assertTrue(values[i].compareTypeSafe(values[i+1], null) < 0); - assertTrue(values[i + 1].compareTypeSafe(values[i], null) > 0); + assertTrue(values[i].compareTypeSafe(values[i+1], null, null) < 0); + assertTrue(values[i + 1].compareTypeSafe(values[i], null, null) > 0); assertFalse(values[i].equals(values[i+1])); } } private void testTimestamp() { - ValueTimestamp valueTs = ValueTimestamp.parse("2000-01-15 10:20:30.333222111"); + ValueTimestamp valueTs = ValueTimestamp.parse("2000-01-15 10:20:30.333222111", null); Timestamp ts = Timestamp.valueOf("2000-01-15 10:20:30.333222111"); assertEquals(ts.toString(), valueTs.getString()); - assertEquals(ts, valueTs.getTimestamp()); + assertEquals(ts, LegacyDateTimeUtils.toTimestamp(null, null, valueTs)); Calendar c = Calendar.getInstance(TimeZone.getTimeZone("Europe/Berlin")); c.set(2018, 02, 25, 1, 59, 00); c.set(Calendar.MILLISECOND, 123); long expected = c.getTimeInMillis(); - ts = ValueTimestamp.parse("2018-03-25 01:59:00.123123123 Europe/Berlin").getTimestamp(); + ts = LegacyDateTimeUtils.toTimestamp(null, null, + ValueTimestamp.parse("2018-03-25 01:59:00.123123123 Europe/Berlin", null)); assertEquals(expected, ts.getTime()); assertEquals(123123123, ts.getNanos()); - ts = ValueTimestamp.parse("2018-03-25 01:59:00.123123123+01").getTimestamp(); + ts = LegacyDateTimeUtils.toTimestamp(null, null, + ValueTimestamp.parse("2018-03-25 01:59:00.123123123+01", null)); assertEquals(expected, ts.getTime()); assertEquals(123123123, ts.getNanos()); expected += 60000; // 1 minute - ts = ValueTimestamp.parse("2018-03-25 03:00:00.123123123 Europe/Berlin").getTimestamp(); + ts = LegacyDateTimeUtils.toTimestamp(null, null, + ValueTimestamp.parse("2018-03-25 03:00:00.123123123 Europe/Berlin", null)); assertEquals(expected, ts.getTime()); assertEquals(123123123, ts.getNanos()); - ts = ValueTimestamp.parse("2018-03-25 03:00:00.123123123+02").getTimestamp(); + ts = LegacyDateTimeUtils.toTimestamp(null, null, + ValueTimestamp.parse("2018-03-25 03:00:00.123123123+02", null)); assertEquals(expected, ts.getTime()); assertEquals(123123123, ts.getNanos()); } private void testArray() { - ValueArray src = ValueArray.get(String.class, - new Value[] {ValueString.get("1"), ValueString.get("22"), ValueString.get("333")}); - assertEquals(6, src.getType().getPrecision()); - assertSame(src, src.convertPrecision(5, false)); - assertSame(src, src.convertPrecision(6, true)); - ValueArray exp = ValueArray.get(String.class, - new Value[] {ValueString.get("1"), ValueString.get("22"), ValueString.get("33")}); - Value got = src.convertPrecision(5, true); + ValueArray src = ValueArray.get( + new Value[] {ValueVarchar.get("1"), ValueVarchar.get("22"), ValueVarchar.get("333")}, null); + assertEquals(3, src.getType().getPrecision()); + assertSame(src, src.castTo(TypeInfo.getTypeInfo(Value.ARRAY, 3L, 0, TypeInfo.TYPE_VARCHAR), null)); + ValueArray exp = ValueArray.get( + new Value[] {ValueVarchar.get("1"), ValueVarchar.get("22")}, null); + Value got = src.castTo(TypeInfo.getTypeInfo(Value.ARRAY, 2L, 0, TypeInfo.TYPE_VARCHAR), null); assertEquals(exp, got); - assertEquals(String.class, ((ValueArray) got).getComponentType()); - exp = ValueArray.get(String.class, new Value[] {ValueString.get("1"), ValueString.get("22")}); - got = src.convertPrecision(3, true); + assertEquals(Value.VARCHAR, ((ValueArray) got).getComponentType().getValueType()); + exp = ValueArray.get(TypeInfo.TYPE_VARCHAR, new Value[0], null); + got = src.castTo(TypeInfo.getTypeInfo(Value.ARRAY, 0L, 0, TypeInfo.TYPE_VARCHAR), null); assertEquals(exp, got); - assertEquals(String.class, ((ValueArray) got).getComponentType()); - exp = ValueArray.get(String.class, new Value[0]); - got = src.convertPrecision(0, true); - assertEquals(exp, got); - assertEquals(String.class, ((ValueArray) got).getComponentType()); + assertEquals(Value.VARCHAR, ((ValueArray) got).getComponentType().getValueType()); } private void testUUID() { @@ -408,40 +287,29 @@ private void testUUID() { String uuidStr = "12345678-1234-4321-8765-123456789012"; UUID origUUID = UUID.fromString(uuidStr); - ValueJavaObject valObj = ValueJavaObject.getNoCopy(origUUID, null, null); - Value valUUID = valObj.convertTo(Value.UUID); - assertTrue(valUUID instanceof ValueUuid); - assertTrue(valUUID.getString().equals(uuidStr)); - assertTrue(valUUID.getObject().equals(origUUID)); - - ValueJavaObject voString = ValueJavaObject.getNoCopy( - new String("This is not a ValueUuid object"), null, null); - try { - voString.convertTo(Value.UUID); - fail(); - } catch (DbException expected) { - } + ValueJavaObject valObj = ValueJavaObject.getNoCopy(JdbcUtils.serialize(origUUID, null)); + ValueUuid valUUID = valObj.convertToUuid(); + assertEquals(uuidStr, valUUID.getString()); + assertEquals(origUUID, valUUID.getUuid()); + + ValueJavaObject voString = ValueJavaObject.getNoCopy(JdbcUtils.serialize( + new String("This is not a ValueUuid object"), null)); + assertThrows(ErrorCode.DESERIALIZATION_FAILED_1, () -> voString.convertToUuid()); } private void testModulusDouble() { final ValueDouble vd1 = ValueDouble.get(12); - new AssertThrows(ErrorCode.DIVISION_BY_ZERO_1) { @Override - public void test() { - vd1.modulus(ValueDouble.get(0)); - }}; + assertThrows(ErrorCode.DIVISION_BY_ZERO_1, () -> vd1.modulus(ValueDouble.ZERO)); ValueDouble vd2 = ValueDouble.get(10); ValueDouble vd3 = vd1.modulus(vd2); assertEquals(2, vd3.getDouble()); } private void testModulusDecimal() { - final ValueDecimal vd1 = ValueDecimal.get(new BigDecimal(12)); - new AssertThrows(ErrorCode.DIVISION_BY_ZERO_1) { @Override - public void test() { - vd1.modulus(ValueDecimal.get(new BigDecimal(0))); - }}; - ValueDecimal vd2 = ValueDecimal.get(new BigDecimal(10)); - ValueDecimal vd3 = vd1.modulus(vd2); + final ValueNumeric vd1 = ValueNumeric.get(new BigDecimal(12)); + assertThrows(ErrorCode.DIVISION_BY_ZERO_1, () -> vd1.modulus(ValueNumeric.ZERO)); + ValueNumeric vd2 = ValueNumeric.get(new BigDecimal(10)); + Value vd3 = vd1.modulus(vd2); assertEquals(2, vd3.getDouble()); } @@ -463,7 +331,7 @@ private void testLobComparison() throws SQLException { assertEquals(1, testLobComparisonImpl(null, Value.BLOB, 512, 512, 210, 200)); assertEquals(1, testLobComparisonImpl(null, Value.CLOB, 512, 512, 'B', 'A')); try (Connection c = DriverManager.getConnection("jdbc:h2:mem:testValue")) { - Database dh = ((Session) ((JdbcConnection) c).getSession()).getDatabase(); + Database dh = ((SessionLocal) ((JdbcConnection) c).getSession()).getDatabase(); assertEquals(1, testLobComparisonImpl(dh, Value.BLOB, 1_024, 1_024, 210, 200)); assertEquals(1, testLobComparisonImpl(dh, Value.CLOB, 1_024, 1_024, 'B', 'A')); assertEquals(-1, testLobComparisonImpl(dh, Value.BLOB, 10_000, 10_000, 200, 210)); @@ -489,12 +357,12 @@ private static int testLobComparisonImpl(DataHandler dh, int type, int size1, in } Value lob1 = createLob(dh, type, bytes1); Value lob2 = createLob(dh, type, bytes2); - return lob1.compareTypeSafe(lob2, null); + return lob1.compareTypeSafe(lob2, null, null); } private static Value createLob(DataHandler dh, int type, byte[] bytes) { if (dh == null) { - return ValueLobDb.createSmallLob(type, bytes); + return type == Value.BLOB ? ValueBlob.createSmall(bytes) : ValueClob.createSmall(bytes); } ByteArrayInputStream in = new ByteArrayInputStream(bytes); if (type == Value.BLOB) { @@ -506,27 +374,21 @@ private static Value createLob(DataHandler dh, int type, byte[] bytes) { private void testTypeInfo() { testTypeInfoCheck(Value.UNKNOWN, -1, -1, -1, TypeInfo.TYPE_UNKNOWN); - try { - TypeInfo.getTypeInfo(Value.UNKNOWN); - fail(); - } catch (DbException ex) { - assertEquals(ErrorCode.UNKNOWN_DATA_TYPE_1, ex.getErrorCode()); - } + assertThrows(ErrorCode.UNKNOWN_DATA_TYPE_1, () -> TypeInfo.getTypeInfo(Value.UNKNOWN)); testTypeInfoCheck(Value.NULL, 1, 0, 4, TypeInfo.TYPE_NULL, TypeInfo.getTypeInfo(Value.NULL)); testTypeInfoCheck(Value.BOOLEAN, 1, 0, 5, TypeInfo.TYPE_BOOLEAN, TypeInfo.getTypeInfo(Value.BOOLEAN)); - testTypeInfoCheck(Value.BYTE, 3, 0, 4, TypeInfo.TYPE_BYTE, TypeInfo.getTypeInfo(Value.BYTE)); - testTypeInfoCheck(Value.SHORT, 5, 0, 6, TypeInfo.TYPE_SHORT, TypeInfo.getTypeInfo(Value.SHORT)); - testTypeInfoCheck(Value.INT, 10, 0, 11, TypeInfo.TYPE_INT, TypeInfo.getTypeInfo(Value.INT)); - testTypeInfoCheck(Value.LONG, 19, 0, 20, TypeInfo.TYPE_LONG, TypeInfo.getTypeInfo(Value.LONG)); + testTypeInfoCheck(Value.TINYINT, 8, 0, 4, TypeInfo.TYPE_TINYINT, TypeInfo.getTypeInfo(Value.TINYINT)); + testTypeInfoCheck(Value.SMALLINT, 16, 0, 6, TypeInfo.TYPE_SMALLINT, TypeInfo.getTypeInfo(Value.SMALLINT)); + testTypeInfoCheck(Value.INTEGER, 32, 0, 11, TypeInfo.TYPE_INTEGER, TypeInfo.getTypeInfo(Value.INTEGER)); + testTypeInfoCheck(Value.BIGINT, 64, 0, 20, TypeInfo.TYPE_BIGINT, TypeInfo.getTypeInfo(Value.BIGINT)); - testTypeInfoCheck(Value.FLOAT, 7, 0, 15, TypeInfo.TYPE_FLOAT, TypeInfo.getTypeInfo(Value.FLOAT)); - testTypeInfoCheck(Value.DOUBLE, 17, 0, 24, TypeInfo.TYPE_DOUBLE, TypeInfo.getTypeInfo(Value.DOUBLE)); - testTypeInfoCheck(Value.DECIMAL, Integer.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE, - TypeInfo.TYPE_DECIMAL, TypeInfo.getTypeInfo(Value.DECIMAL)); - testTypeInfoCheck(Value.DECIMAL, 65_535, 32_767, 65_537, TypeInfo.TYPE_DECIMAL_DEFAULT); + testTypeInfoCheck(Value.REAL, 24, 0, 15, TypeInfo.TYPE_REAL, TypeInfo.getTypeInfo(Value.REAL)); + testTypeInfoCheck(Value.DOUBLE, 53, 0, 24, TypeInfo.TYPE_DOUBLE, TypeInfo.getTypeInfo(Value.DOUBLE)); + testTypeInfoCheck(Value.NUMERIC, MAX_NUMERIC_PRECISION, MAX_NUMERIC_PRECISION / 2, MAX_NUMERIC_PRECISION + 2, + TypeInfo.TYPE_NUMERIC_FLOATING_POINT); testTypeInfoCheck(Value.TIME, 18, 9, 18, TypeInfo.TYPE_TIME, TypeInfo.getTypeInfo(Value.TIME)); for (int s = 0; s <= 9; s++) { @@ -546,30 +408,29 @@ private void testTypeInfo() { testTypeInfoCheck(Value.TIMESTAMP_TZ, d, s, d, TypeInfo.getTypeInfo(Value.TIMESTAMP_TZ, 0, s, null)); } - testTypeInfoCheck(Value.BYTES, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, TypeInfo.getTypeInfo(Value.BYTES)); + testTypeInfoCheck(Value.BINARY, 1, 0, 2, TypeInfo.getTypeInfo(Value.BINARY)); + testTypeInfoCheck(Value.VARBINARY, MAX_STRING_LENGTH, 0, MAX_STRING_LENGTH * 2, + TypeInfo.getTypeInfo(Value.VARBINARY)); testTypeInfoCheck(Value.BLOB, Long.MAX_VALUE, 0, Integer.MAX_VALUE, TypeInfo.getTypeInfo(Value.BLOB)); testTypeInfoCheck(Value.CLOB, Long.MAX_VALUE, 0, Integer.MAX_VALUE, TypeInfo.getTypeInfo(Value.CLOB)); - testTypeInfoCheck(Value.STRING, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, TypeInfo.TYPE_STRING, - TypeInfo.getTypeInfo(Value.STRING)); - testTypeInfoCheck(Value.STRING_FIXED, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, - TypeInfo.getTypeInfo(Value.STRING_FIXED)); - testTypeInfoCheck(Value.STRING_IGNORECASE, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, - TypeInfo.getTypeInfo(Value.STRING_IGNORECASE)); + testTypeInfoCheck(Value.VARCHAR, MAX_STRING_LENGTH, 0, MAX_STRING_LENGTH, TypeInfo.TYPE_VARCHAR, + TypeInfo.getTypeInfo(Value.VARCHAR)); + testTypeInfoCheck(Value.CHAR, 1, 0, 1, TypeInfo.getTypeInfo(Value.CHAR)); + testTypeInfoCheck(Value.VARCHAR_IGNORECASE, MAX_STRING_LENGTH, 0, MAX_STRING_LENGTH, + TypeInfo.getTypeInfo(Value.VARCHAR_IGNORECASE)); - testTypeInfoCheck(Value.ARRAY, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, TypeInfo.TYPE_ARRAY, + testTypeInfoCheck(Value.ARRAY, MAX_ARRAY_CARDINALITY, 0, Integer.MAX_VALUE, TypeInfo.TYPE_ARRAY_UNKNOWN, TypeInfo.getTypeInfo(Value.ARRAY)); - testTypeInfoCheck(Value.RESULT_SET, Integer.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE, - TypeInfo.TYPE_RESULT_SET, TypeInfo.getTypeInfo(Value.RESULT_SET)); - testTypeInfoCheck(Value.ROW, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, TypeInfo.TYPE_ROW, + testTypeInfoCheck(Value.ROW, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, TypeInfo.TYPE_ROW_EMPTY, TypeInfo.getTypeInfo(Value.ROW)); - testTypeInfoCheck(Value.JAVA_OBJECT, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, TypeInfo.TYPE_JAVA_OBJECT, + testTypeInfoCheck(Value.JAVA_OBJECT, MAX_STRING_LENGTH, 0, MAX_STRING_LENGTH * 2, TypeInfo.TYPE_JAVA_OBJECT, TypeInfo.getTypeInfo(Value.JAVA_OBJECT)); testTypeInfoCheck(Value.UUID, 16, 0, 36, TypeInfo.TYPE_UUID, TypeInfo.getTypeInfo(Value.UUID)); - testTypeInfoCheck(Value.GEOMETRY, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, TypeInfo.TYPE_GEOMETRY, + testTypeInfoCheck(Value.GEOMETRY, MAX_STRING_LENGTH, 0, Integer.MAX_VALUE, TypeInfo.TYPE_GEOMETRY, TypeInfo.getTypeInfo(Value.GEOMETRY)); - testTypeInfoCheck(Value.ENUM, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, TypeInfo.TYPE_ENUM_UNDEFINED, + testTypeInfoCheck(Value.ENUM, MAX_STRING_LENGTH, 0, MAX_STRING_LENGTH, TypeInfo.TYPE_ENUM_UNDEFINED, TypeInfo.getTypeInfo(Value.ENUM)); testTypeInfoInterval1(Value.INTERVAL_YEAR); @@ -585,6 +446,9 @@ private void testTypeInfo() { testTypeInfoInterval1(Value.INTERVAL_HOUR_TO_MINUTE); testTypeInfoInterval2(Value.INTERVAL_HOUR_TO_SECOND); testTypeInfoInterval2(Value.INTERVAL_MINUTE_TO_SECOND); + + testTypeInfoCheck(Value.JSON, MAX_STRING_LENGTH, 0, MAX_STRING_LENGTH, TypeInfo.TYPE_JSON, + TypeInfo.getTypeInfo(Value.JSON)); } private void testTypeInfoInterval1(int type) { @@ -618,4 +482,71 @@ private void testTypeInfoCheck(int valueType, long precision, int scale, int dis assertEquals(displaySize, typeInfo.getDisplaySize()); } + private void testH2Type() { + assertEquals(Value.CHAR, (int) H2Type.CHAR.getVendorTypeNumber()); + assertEquals(Value.VARCHAR, (int) H2Type.VARCHAR.getVendorTypeNumber()); + assertEquals(Value.CLOB, (int) H2Type.CLOB.getVendorTypeNumber()); + assertEquals(Value.VARCHAR_IGNORECASE, (int) H2Type.VARCHAR_IGNORECASE.getVendorTypeNumber()); + assertEquals(Value.BINARY, (int) H2Type.BINARY.getVendorTypeNumber()); + assertEquals(Value.VARBINARY, (int) H2Type.VARBINARY.getVendorTypeNumber()); + assertEquals(Value.BLOB, (int) H2Type.BLOB.getVendorTypeNumber()); + assertEquals(Value.BOOLEAN, (int) H2Type.BOOLEAN.getVendorTypeNumber()); + assertEquals(Value.TINYINT, (int) H2Type.TINYINT.getVendorTypeNumber()); + assertEquals(Value.SMALLINT, (int) H2Type.SMALLINT.getVendorTypeNumber()); + assertEquals(Value.INTEGER, (int) H2Type.INTEGER.getVendorTypeNumber()); + assertEquals(Value.BIGINT, (int) H2Type.BIGINT.getVendorTypeNumber()); + assertEquals(Value.NUMERIC, (int) H2Type.NUMERIC.getVendorTypeNumber()); + assertEquals(Value.REAL, (int) H2Type.REAL.getVendorTypeNumber()); + assertEquals(Value.DOUBLE, (int) H2Type.DOUBLE_PRECISION.getVendorTypeNumber()); + assertEquals(Value.DECFLOAT, (int) H2Type.DECFLOAT.getVendorTypeNumber()); + assertEquals(Value.DATE, (int) H2Type.DATE.getVendorTypeNumber()); + assertEquals(Value.TIME, (int) H2Type.TIME.getVendorTypeNumber()); + assertEquals(Value.TIME_TZ, (int) H2Type.TIME_WITH_TIME_ZONE.getVendorTypeNumber()); + assertEquals(Value.TIMESTAMP, (int) H2Type.TIMESTAMP.getVendorTypeNumber()); + assertEquals(Value.TIMESTAMP_TZ, (int) H2Type.TIMESTAMP_WITH_TIME_ZONE.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_YEAR, (int) H2Type.INTERVAL_YEAR.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_MONTH, (int) H2Type.INTERVAL_MONTH.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_DAY, (int) H2Type.INTERVAL_DAY.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_HOUR, (int) H2Type.INTERVAL_HOUR.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_MINUTE, (int) H2Type.INTERVAL_MINUTE.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_SECOND, (int) H2Type.INTERVAL_SECOND.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_YEAR_TO_MONTH, (int) H2Type.INTERVAL_YEAR_TO_MONTH.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_DAY_TO_HOUR, (int) H2Type.INTERVAL_DAY_TO_HOUR.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_DAY_TO_MINUTE, (int) H2Type.INTERVAL_DAY_TO_MINUTE.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_DAY_TO_SECOND, (int) H2Type.INTERVAL_DAY_TO_SECOND.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_HOUR_TO_MINUTE, (int) H2Type.INTERVAL_HOUR_TO_MINUTE.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_HOUR_TO_SECOND, (int) H2Type.INTERVAL_HOUR_TO_SECOND.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_MINUTE_TO_SECOND, (int) H2Type.INTERVAL_MINUTE_TO_SECOND.getVendorTypeNumber()); + assertEquals(Value.JAVA_OBJECT, (int) H2Type.JAVA_OBJECT.getVendorTypeNumber()); + assertEquals(Value.ENUM, (int) H2Type.ENUM.getVendorTypeNumber()); + assertEquals(Value.GEOMETRY, (int) H2Type.GEOMETRY.getVendorTypeNumber()); + assertEquals(Value.JSON, (int) H2Type.JSON.getVendorTypeNumber()); + assertEquals(Value.UUID, (int) H2Type.UUID.getVendorTypeNumber()); + assertEquals(Value.ARRAY, (int) H2Type.array(H2Type.VARCHAR).getVendorTypeNumber()); + assertEquals(Value.ROW, (int) H2Type.row(H2Type.VARCHAR).getVendorTypeNumber()); + } + + private void testHigherType() { + testHigherTypeNumeric(15L, 6, 10L, 1, 5L, 6); + testHigherTypeNumeric(15L, 6, 5L, 6, 10L, 1); + TypeInfo intArray10 = TypeInfo.getTypeInfo(Value.ARRAY, 10, 0, TypeInfo.TYPE_INTEGER); + TypeInfo bigintArray1 = TypeInfo.getTypeInfo(Value.ARRAY, 1, 0, TypeInfo.TYPE_BIGINT); + TypeInfo bigintArray10 = TypeInfo.getTypeInfo(Value.ARRAY, 10, 0, TypeInfo.TYPE_BIGINT); + assertEquals(bigintArray10, TypeInfo.getHigherType(intArray10, bigintArray1)); + TypeInfo intArray10Array1 = TypeInfo.getTypeInfo(Value.ARRAY, 1, 0, intArray10); + TypeInfo bigintArray1Array10 = TypeInfo.getTypeInfo(Value.ARRAY, 10, 0, bigintArray1); + TypeInfo bigintArray10Array10 = TypeInfo.getTypeInfo(Value.ARRAY, 10, 0, bigintArray10); + assertEquals(bigintArray10Array10, TypeInfo.getHigherType(intArray10Array1, bigintArray1Array10)); + assertEquals(bigintArray10Array10, TypeInfo.getHigherType(intArray10, bigintArray1Array10)); + TypeInfo bigintArray10Array1 = TypeInfo.getTypeInfo(Value.ARRAY, 1, 0, bigintArray10); + assertEquals(bigintArray10Array1, TypeInfo.getHigherType(intArray10Array1, bigintArray1)); + } + + private void testHigherTypeNumeric(long expectedPrecision, int expectedScale, long precision1, int scale1, + long precision2, int scale2) { + assertEquals(TypeInfo.getTypeInfo(Value.NUMERIC, expectedPrecision, expectedScale, null), + TypeInfo.getHigherType(TypeInfo.getTypeInfo(Value.NUMERIC, precision1, scale1, null), + TypeInfo.getTypeInfo(Value.NUMERIC, precision2, scale2, null))); + } + } diff --git a/h2/src/test/org/h2/test/unit/TestValueMemory.java b/h2/src/test/org/h2/test/unit/TestValueMemory.java index 289c6a8a95..96ac632472 100644 --- a/h2/src/test/org/h2/test/unit/TestValueMemory.java +++ b/h2/src/test/org/h2/test/unit/TestValueMemory.java @@ -1,57 +1,63 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; import java.io.StringReader; import java.math.BigDecimal; import java.sql.SQLException; import java.util.ArrayList; import java.util.IdentityHashMap; import java.util.Random; - import org.h2.api.IntervalQualifier; -import org.h2.api.JavaObjectSerializer; import org.h2.engine.Constants; -import org.h2.result.SimpleResult; import org.h2.store.DataHandler; import org.h2.store.FileStore; -import org.h2.store.LobStorageFrontend; +import org.h2.store.LobStorageInterface; import org.h2.test.TestBase; import org.h2.test.utils.MemoryFootprint; +import org.h2.util.DateTimeUtils; import org.h2.util.SmallLRUCache; import org.h2.util.TempFileDeleter; import org.h2.util.Utils; import org.h2.value.CompareMode; -import org.h2.value.DataType; import org.h2.value.Value; import org.h2.value.ValueArray; +import org.h2.value.ValueBigint; +import org.h2.value.ValueBinary; +import org.h2.value.ValueBlob; import org.h2.value.ValueBoolean; -import org.h2.value.ValueByte; -import org.h2.value.ValueBytes; +import org.h2.value.ValueChar; +import org.h2.value.ValueClob; import org.h2.value.ValueDate; -import org.h2.value.ValueDecimal; +import org.h2.value.ValueDecfloat; import org.h2.value.ValueDouble; -import org.h2.value.ValueFloat; import org.h2.value.ValueGeometry; -import org.h2.value.ValueInt; +import org.h2.value.ValueInteger; import org.h2.value.ValueInterval; import org.h2.value.ValueJavaObject; -import org.h2.value.ValueLong; +import org.h2.value.ValueJson; +import org.h2.value.ValueLob; import org.h2.value.ValueNull; -import org.h2.value.ValueResultSet; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueReal; import org.h2.value.ValueRow; -import org.h2.value.ValueShort; -import org.h2.value.ValueString; -import org.h2.value.ValueStringFixed; -import org.h2.value.ValueStringIgnoreCase; +import org.h2.value.ValueSmallint; import org.h2.value.ValueTime; +import org.h2.value.ValueTimeTimeZone; import org.h2.value.ValueTimestamp; import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueTinyint; import org.h2.value.ValueUuid; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; +import org.h2.value.ValueVarcharIgnoreCase; /** * Tests the memory consumption of values. Values can estimate how much memory @@ -59,10 +65,14 @@ */ public class TestValueMemory extends TestBase implements DataHandler { + private static final long MIN_ABSOLUTE_DAY = DateTimeUtils.absoluteDayFromDateValue(DateTimeUtils.MIN_DATE_VALUE); + + private static final long MAX_ABSOLUTE_DAY = DateTimeUtils.absoluteDayFromDateValue(DateTimeUtils.MAX_DATE_VALUE); + private final Random random = new Random(1); private final SmallLRUCache lobFileListCache = SmallLRUCache .newInstance(128); - private LobStorageFrontend lobStorage; + private LobStorageTest lobStorage; /** * Run just this test. @@ -73,7 +83,7 @@ public static void main(String... a) throws Exception { // run using -javaagent:ext/h2-1.2.139.jar TestBase test = TestBase.createCaller().init(); test.config.traceTest = true; - test.test(); + test.testFromMain(); } @Override @@ -117,8 +127,8 @@ public void test() throws SQLException { } private void testCompare() { - ValueDecimal a = ValueDecimal.get(new BigDecimal("0.0")); - ValueDecimal b = ValueDecimal.get(new BigDecimal("-0.00")); + ValueNumeric a = ValueNumeric.get(new BigDecimal("0.0")); + ValueNumeric b = ValueNumeric.get(new BigDecimal("-0.00")); assertTrue(a.hashCode() != b.hashCode()); assertFalse(a.equals(b)); } @@ -164,41 +174,40 @@ private Value create(int type) throws SQLException { return ValueNull.INSTANCE; case Value.BOOLEAN: return ValueBoolean.FALSE; - case Value.BYTE: - return ValueByte.get((byte) random.nextInt()); - case Value.SHORT: - return ValueShort.get((short) random.nextInt()); - case Value.INT: - return ValueInt.get(random.nextInt()); - case Value.LONG: - return ValueLong.get(random.nextLong()); - case Value.DECIMAL: - return ValueDecimal.get(new BigDecimal(random.nextInt())); + case Value.TINYINT: + return ValueTinyint.get((byte) random.nextInt()); + case Value.SMALLINT: + return ValueSmallint.get((short) random.nextInt()); + case Value.INTEGER: + return ValueInteger.get(random.nextInt()); + case Value.BIGINT: + return ValueBigint.get(random.nextLong()); + case Value.NUMERIC: + return ValueNumeric.get(new BigDecimal(random.nextInt())); // + "12123344563456345634565234523451312312" case Value.DOUBLE: return ValueDouble.get(random.nextDouble()); - case Value.FLOAT: - return ValueFloat.get(random.nextFloat()); + case Value.REAL: + return ValueReal.get(random.nextFloat()); + case Value.DECFLOAT: + return ValueDecfloat.get(new BigDecimal(random.nextInt())); case Value.TIME: - return ValueTime.get(new java.sql.Time(random.nextLong())); + return ValueTime.fromNanos(randomTimeNanos()); + case Value.TIME_TZ: + return ValueTimeTimeZone.fromNanos(randomTimeNanos(), randomZoneOffset()); case Value.DATE: - return ValueDate.get(new java.sql.Date(random.nextLong())); + return ValueDate.fromDateValue(randomDateValue()); case Value.TIMESTAMP: - return ValueTimestamp.fromMillis(random.nextLong()); + return ValueTimestamp.fromDateValueAndNanos(randomDateValue(), randomTimeNanos()); case Value.TIMESTAMP_TZ: - // clamp to max legal value - long nanos = Math.max(Math.min(random.nextLong(), - 24L * 60 * 60 * 1000 * 1000 * 1000 - 1), 0); - int timeZoneOffsetMins = (int) (random.nextFloat() * (24 * 60)) - - (12 * 60); return ValueTimestampTimeZone.fromDateValueAndNanos( - random.nextLong(), nanos, (short) timeZoneOffsetMins); - case Value.BYTES: - return ValueBytes.get(randomBytes(random.nextInt(1000))); - case Value.STRING: - return ValueString.get(randomString(random.nextInt(100))); - case Value.STRING_IGNORECASE: - return ValueStringIgnoreCase.get(randomString(random.nextInt(100))); + randomDateValue(), randomTimeNanos(), randomZoneOffset()); + case Value.VARBINARY: + return ValueVarbinary.get(randomBytes(random.nextInt(1000))); + case Value.VARCHAR: + return ValueVarchar.get(randomString(random.nextInt(100))); + case Value.VARCHAR_IGNORECASE: + return ValueVarcharIgnoreCase.get(randomString(random.nextInt(100))); case Value.BLOB: { int len = (int) Math.abs(random.nextGaussian() * 10); byte[] data = randomBytes(len); @@ -210,23 +219,17 @@ private Value create(int type) throws SQLException { return getLobStorage().createClob(new StringReader(s), len); } case Value.ARRAY: - return ValueArray.get(createArray()); + return ValueArray.get(createArray(), null); case Value.ROW: return ValueRow.get(createArray()); - case Value.RESULT_SET: - return ValueResultSet.get(new SimpleResult()); case Value.JAVA_OBJECT: - return ValueJavaObject.getNoCopy(null, randomBytes(random.nextInt(100)), this); + return ValueJavaObject.getNoCopy(randomBytes(random.nextInt(100))); case Value.UUID: return ValueUuid.get(random.nextLong(), random.nextLong()); - case Value.STRING_FIXED: - return ValueStringFixed.get(randomString(random.nextInt(100))); + case Value.CHAR: + return ValueChar.get(randomString(random.nextInt(100))); case Value.GEOMETRY: - if (DataType.GEOMETRY_CLASS == null) { - return ValueNull.INSTANCE; - } - return ValueGeometry.get("POINT (" + random.nextInt(100) + " " + - random.nextInt(100) + ")"); + return ValueGeometry.get("POINT (" + random.nextInt(100) + ' ' + random.nextInt(100) + ')'); case Value.INTERVAL_YEAR: case Value.INTERVAL_MONTH: case Value.INTERVAL_DAY: @@ -246,16 +249,33 @@ private Value create(int type) throws SQLException { case Value.INTERVAL_HOUR_TO_MINUTE: return ValueInterval.from(IntervalQualifier.valueOf(type - Value.INTERVAL_YEAR), random.nextBoolean(), random.nextInt(Integer.MAX_VALUE), random.nextInt(12)); + case Value.JSON: + return ValueJson.fromJson("{\"key\":\"value\"}"); + case Value.BINARY: + return ValueBinary.get(randomBytes(random.nextInt(1000))); default: throw new AssertionError("type=" + type); } } + private long randomDateValue() { + return DateTimeUtils.dateValueFromAbsoluteDay( + (random.nextLong() & Long.MAX_VALUE) % (MAX_ABSOLUTE_DAY - MIN_ABSOLUTE_DAY + 1) + MIN_ABSOLUTE_DAY); + } + + private long randomTimeNanos() { + return (random.nextLong() & Long.MAX_VALUE) % DateTimeUtils.NANOS_PER_DAY; + } + + private short randomZoneOffset() { + return (short) (random.nextInt() % (18 * 60)); + } + private Value[] createArray() throws SQLException { int len = random.nextInt(20); Value[] list = new Value[len]; for (int i = 0; i < list.length; i++) { - list[i] = create(Value.STRING); + list[i] = create(Value.VARCHAR); } return list; } @@ -295,11 +315,6 @@ public String getDatabasePath() { return getBaseDir() + "/valueMemory"; } - @Override - public String getLobCompressionAlgorithm(int type) { - return "LZF"; - } - @Override public Object getLobSyncObject() { return this; @@ -326,9 +341,9 @@ public TempFileDeleter getTempFileDeleter() { } @Override - public LobStorageFrontend getLobStorage() { + public LobStorageInterface getLobStorage() { if (lobStorage == null) { - lobStorage = new LobStorageFrontend(this); + lobStorage = new LobStorageTest(); } return lobStorage; } @@ -339,13 +354,72 @@ public int readLob(long lobId, byte[] hmac, long offset, byte[] buff, return -1; } - @Override - public JavaObjectSerializer getJavaObjectSerializer() { - return null; - } - @Override public CompareMode getCompareMode() { return CompareMode.getInstance(null, 0); } + + + private class LobStorageTest implements LobStorageInterface { + + LobStorageTest() { + } + + @Override + public void removeLob(ValueLob lob) { + // not stored in the database + } + + @Override + public InputStream getInputStream(long lobId, + long byteCount) throws IOException { + // this method is only implemented on the server side of a TCP connection + throw new IllegalStateException(); + } + + @Override + public InputStream getInputStream(long lobId, int tableId, + long byteCount) throws IOException { + // this method is only implemented on the server side of a TCP connection + throw new IllegalStateException(); + } + + @Override + public boolean isReadOnly() { + return false; + } + + @Override + public ValueLob copyLob(ValueLob old, int tableId) { + throw new UnsupportedOperationException(); + } + + @Override + public void removeAllForTable(int tableId) { + throw new UnsupportedOperationException(); + } + + @Override + public ValueBlob createBlob(InputStream in, long maxLength) { + // need to use a temp file, because the input stream could come from + // the same database, which would create a weird situation (trying + // to read a block while writing something) + return ValueBlob.createTempBlob(in, maxLength, TestValueMemory.this); + } + + /** + * Create a CLOB object. + * + * @param reader the reader + * @param maxLength the maximum length (-1 if not known) + * @return the LOB + */ + @Override + public ValueClob createClob(Reader reader, long maxLength) { + // need to use a temp file, because the input stream could come from + // the same database, which would create a weird situation (trying + // to read a block while writing something) + return ValueClob.createTempClob(reader, maxLength, TestValueMemory.this); + } + } } diff --git a/h2/src/test/org/h2/test/unit/package.html b/h2/src/test/org/h2/test/unit/package.html index de1ce786e3..f87035f40d 100644 --- a/h2/src/test/org/h2/test/unit/package.html +++ b/h2/src/test/org/h2/test/unit/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/utils/AssertThrows.java b/h2/src/test/org/h2/test/utils/AssertThrows.java deleted file mode 100644 index 582923764c..0000000000 --- a/h2/src/test/org/h2/test/utils/AssertThrows.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.utils; - -import java.lang.reflect.Method; -import java.sql.SQLException; -import org.h2.message.DbException; - -/** - * Helper class to simplify negative testing. Usage: - *
          - * new AssertThrows() { public void test() {
          - *     Integer.parseInt("not a number");
          - * }};
          - * 
          - */ -public abstract class AssertThrows { - - /** - * Create a new assertion object, and call the test method to verify the - * expected exception is thrown. - * - * @param expectedExceptionClass the expected exception class - */ - public AssertThrows(final Class expectedExceptionClass) { - this(new ResultVerifier() { - @Override - public boolean verify(Object returnValue, Throwable t, Method m, - Object... args) { - if (t == null) { - throw new AssertionError("Expected an exception of type " + - expectedExceptionClass.getSimpleName() + - " to be thrown, but the method returned successfully"); - } - if (!expectedExceptionClass.isAssignableFrom(t.getClass())) { - AssertionError ae = new AssertionError( - "Expected an exception of type\n" + - expectedExceptionClass.getSimpleName() + - " to be thrown, but the method under test " + - "threw an exception of type\n" + - t.getClass().getSimpleName() + - " (see in the 'Caused by' for the exception " + - "that was thrown)"); - ae.initCause(t); - throw ae; - } - return false; - } - }); - } - - /** - * Create a new assertion object, and call the test method to verify the - * expected exception is thrown. - */ - public AssertThrows() { - this(new ResultVerifier() { - @Override - public boolean verify(Object returnValue, Throwable t, Method m, - Object... args) { - if (t != null) { - throw new AssertionError("Expected an exception " + - "to be thrown, but the method returned successfully"); - } - // all exceptions are fine - return false; - } - }); - } - - /** - * Create a new assertion object, and call the test method to verify the - * expected exception is thrown. - * - * @param expectedErrorCode the error code of the exception - */ - public AssertThrows(final int expectedErrorCode) { - this(new ResultVerifier() { - @Override - public boolean verify(Object returnValue, Throwable t, Method m, - Object... args) { - int errorCode; - if (t instanceof DbException) { - errorCode = ((DbException) t).getErrorCode(); - } else if (t instanceof SQLException) { - errorCode = ((SQLException) t).getErrorCode(); - } else { - errorCode = 0; - } - if (errorCode != expectedErrorCode) { - AssertionError ae = new AssertionError( - "Expected an SQLException or DbException with error code " + - expectedErrorCode); - ae.initCause(t); - throw ae; - } - return false; - } - }); - } - - private AssertThrows(ResultVerifier verifier) { - try { - test(); - verifier.verify(null, null, null); - } catch (Exception e) { - verifier.verify(null, e, null); - } - } - - /** - * The test method that is called. - * - * @throws Exception the exception - */ - public abstract void test() throws Exception; - -} diff --git a/h2/src/test/org/h2/test/utils/FilePathDebug.java b/h2/src/test/org/h2/test/utils/FilePathDebug.java index c0f19e831c..13144377a0 100644 --- a/h2/src/test/org/h2/test/utils/FilePathDebug.java +++ b/h2/src/test/org/h2/test/utils/FilePathDebug.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.utils; diff --git a/h2/src/test/org/h2/test/utils/FilePathReorderWrites.java b/h2/src/test/org/h2/test/utils/FilePathReorderWrites.java index 3ec3d5e99d..a8d9c72f28 100644 --- a/h2/src/test/org/h2/test/utils/FilePathReorderWrites.java +++ b/h2/src/test/org/h2/test/utils/FilePathReorderWrites.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.utils; @@ -13,7 +13,7 @@ import java.nio.channels.FileLock; import java.util.ArrayList; import java.util.Random; -import org.h2.store.fs.FileBase; +import org.h2.store.fs.FileBaseDefault; import org.h2.store.fs.FilePath; import org.h2.store.fs.FilePathWrapper; import org.h2.util.IOUtils; @@ -150,7 +150,7 @@ public void delete() { /** * A write-reordering file implementation. */ -class FileReorderWrites extends FileBase { +class FileReorderWrites extends FileBaseDefault { private final FilePathReorderWrites file; /** @@ -186,40 +186,23 @@ public void implCloseChannel() throws IOException { closed = true; } - @Override - public long position() throws IOException { - return readBase.position(); - } - @Override public long size() throws IOException { return readBase.size(); } - @Override - public int read(ByteBuffer dst) throws IOException { - return readBase.read(dst); - } - @Override public int read(ByteBuffer dst, long pos) throws IOException { return readBase.read(dst, pos); } @Override - public FileChannel position(long pos) throws IOException { - readBase.position(pos); - return this; - } - - @Override - public FileChannel truncate(long newSize) throws IOException { + protected void implTruncate(long newSize) throws IOException { long oldSize = readBase.size(); if (oldSize <= newSize) { - return this; + return; } addOperation(new FileWriteOperation(id++, newSize, null)); - return this; } private int addOperation(FileWriteOperation op) throws IOException { @@ -266,11 +249,6 @@ public void force(boolean metaData) throws IOException { applyAll(); } - @Override - public int write(ByteBuffer src) throws IOException { - return write(src, readBase.position()); - } - @Override public int write(ByteBuffer src, long position) throws IOException { if (FilePathReorderWrites.isPartialWrites() && src.remaining() > 2) { diff --git a/h2/src/test/org/h2/test/utils/FilePathUnstable.java b/h2/src/test/org/h2/test/utils/FilePathUnstable.java index a92dd83570..6343bf5ab6 100644 --- a/h2/src/test/org/h2/test/utils/FilePathUnstable.java +++ b/h2/src/test/org/h2/test/utils/FilePathUnstable.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.utils; diff --git a/h2/src/test/org/h2/test/utils/MemoryFootprint.java b/h2/src/test/org/h2/test/utils/MemoryFootprint.java index 8916ffccd6..ecfe077f82 100644 --- a/h2/src/test/org/h2/test/utils/MemoryFootprint.java +++ b/h2/src/test/org/h2/test/utils/MemoryFootprint.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.utils; @@ -9,7 +9,7 @@ import java.math.BigDecimal; import java.math.BigInteger; import org.h2.engine.Constants; -import org.h2.result.RowImpl; +import org.h2.result.Row; import org.h2.store.Data; import org.h2.util.Profiler; import org.h2.value.Value; @@ -33,8 +33,8 @@ public static void main(String... a) { print("BigDecimal", new BigDecimal("0")); print("BigInteger", new BigInteger("0")); print("String", new String("Hello")); - print("Data", Data.create(null, 10, false)); - print("Row", new RowImpl(new Value[0], 0)); + print("Data", Data.create(10)); + print("Row", Row.get(new Value[0], 0)); System.out.println(); for (int i = 1; i < 128; i += i) { diff --git a/h2/src/test/org/h2/test/utils/OutputCatcher.java b/h2/src/test/org/h2/test/utils/OutputCatcher.java index c0e6ada893..ef9362199a 100644 --- a/h2/src/test/org/h2/test/utils/OutputCatcher.java +++ b/h2/src/test/org/h2/test/utils/OutputCatcher.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.utils; @@ -46,7 +46,7 @@ public void stop() { System.setOut(out.print); System.err.flush(); System.setErr(err.print); - output = new String(buff.toByteArray()); + output = buff.toString(); } /** diff --git a/h2/src/test/org/h2/test/utils/ProxyCodeGenerator.java b/h2/src/test/org/h2/test/utils/ProxyCodeGenerator.java deleted file mode 100644 index 5560c47aca..0000000000 --- a/h2/src/test/org/h2/test/utils/ProxyCodeGenerator.java +++ /dev/null @@ -1,360 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.utils; - -import java.io.PrintWriter; -import java.io.StringWriter; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.Method; -import java.lang.reflect.Modifier; -import java.util.HashMap; -import java.util.TreeMap; -import java.util.TreeSet; -import org.h2.util.SourceCompiler; - -/** - * A code generator for class proxies. - */ -public class ProxyCodeGenerator { - - private static SourceCompiler compiler = new SourceCompiler(); - private static HashMap, Class> proxyMap = new HashMap<>(); - - private final TreeSet imports = new TreeSet<>(); - private final TreeMap methods = new TreeMap<>(); - private String packageName; - private String className; - private Class extendsClass; - private Constructor constructor; - - /** - * Check whether there is already a proxy class generated. - * - * @param c the class - * @return true if yes - */ - public static boolean isGenerated(Class c) { - return proxyMap.containsKey(c); - } - - /** - * Generate a proxy class. The returned class extends the given class. - * - * @param c the class to extend - * @return the proxy class - */ - public static Class getClassProxy(Class c) throws ClassNotFoundException { - Class p = proxyMap.get(c); - if (p != null) { - return p; - } - // TODO how to extend a class with private constructor - // TODO call right constructor - // TODO use the right package - ProxyCodeGenerator cg = new ProxyCodeGenerator(); - cg.setPackageName("bytecode"); - cg.generateClassProxy(c); - StringWriter sw = new StringWriter(); - cg.write(new PrintWriter(sw)); - String code = sw.toString(); - String proxy = "bytecode."+ c.getSimpleName() + "Proxy"; - compiler.setJavaSystemCompiler(false); - compiler.setSource(proxy, code); - // System.out.println(code); - Class px = compiler.getClass(proxy); - proxyMap.put(c, px); - return px; - } - - private void setPackageName(String packageName) { - this.packageName = packageName; - } - - /** - * Generate a class that implements all static methods of the given class, - * but as non-static. - * - * @param clazz the class to extend - */ - void generateStaticProxy(Class clazz) { - imports.clear(); - addImport(InvocationHandler.class); - addImport(Method.class); - addImport(clazz); - className = getClassName(clazz) + "Proxy"; - for (Method m : clazz.getDeclaredMethods()) { - if (Modifier.isStatic(m.getModifiers())) { - if (!Modifier.isPrivate(m.getModifiers())) { - addMethod(m); - } - } - } - } - - private void generateClassProxy(Class clazz) { - imports.clear(); - addImport(InvocationHandler.class); - addImport(Method.class); - addImport(clazz); - className = getClassName(clazz) + "Proxy"; - extendsClass = clazz; - int doNotOverride = Modifier.FINAL | Modifier.STATIC | - Modifier.PRIVATE | Modifier.ABSTRACT | Modifier.VOLATILE; - Class dc = clazz; - while (dc != null) { - addImport(dc); - for (Method m : dc.getDeclaredMethods()) { - if ((m.getModifiers() & doNotOverride) == 0) { - addMethod(m); - } - } - dc = dc.getSuperclass(); - } - for (Constructor c : clazz.getDeclaredConstructors()) { - if (Modifier.isPrivate(c.getModifiers())) { - continue; - } - if (constructor == null) { - constructor = c; - } else if (c.getParameterTypes().length < - constructor.getParameterTypes().length) { - constructor = c; - } - } - } - - private void addMethod(Method m) { - if (methods.containsKey(getMethodName(m))) { - // already declared in a subclass - return; - } - addImport(m.getReturnType()); - for (Class c : m.getParameterTypes()) { - addImport(c); - } - for (Class c : m.getExceptionTypes()) { - addImport(c); - } - methods.put(getMethodName(m), m); - } - - private static String getMethodName(Method m) { - StringBuilder buff = new StringBuilder(); - buff.append(m.getReturnType()).append(' '); - buff.append(m.getName()); - for (Class p : m.getParameterTypes()) { - buff.append(' '); - buff.append(p.getName()); - } - return buff.toString(); - } - - private void addImport(Class c) { - while (c.isArray()) { - c = c.getComponentType(); - } - if (!c.isPrimitive()) { - if (!"java.lang".equals(c.getPackage().getName())) { - imports.add(c.getName()); - } - } - } - - private static String getClassName(Class c) { - return getClassName(c, false); - } - - private static String getClassName(Class c, boolean varArg) { - if (varArg) { - c = c.getComponentType(); - } - String s = c.getSimpleName(); - while (true) { - c = c.getEnclosingClass(); - if (c == null) { - break; - } - s = c.getSimpleName() + "." + s; - } - if (varArg) { - return s + "..."; - } - return s; - } - - private void write(PrintWriter writer) { - if (packageName != null) { - writer.println("package " + packageName + ";"); - } - for (String imp : imports) { - writer.println("import " + imp + ";"); - } - writer.print("public class " + className); - if (extendsClass != null) { - writer.print(" extends " + getClassName(extendsClass)); - } - writer.println(" {"); - writer.println(" private final InvocationHandler ih;"); - writer.println(" public " + className + "() {"); - writer.println(" this(new InvocationHandler() {"); - writer.println(" public Object invoke(Object proxy,"); - writer.println(" Method method, Object[] args) " + - "throws Throwable {"); - writer.println(" return method.invoke(proxy, args);"); - writer.println(" }});"); - writer.println(" }"); - writer.println(" public " + className + "(InvocationHandler ih) {"); - if (constructor != null) { - writer.print(" super("); - int i = 0; - for (Class p : constructor.getParameterTypes()) { - if (i > 0) { - writer.print(", "); - } - if (p.isPrimitive()) { - if (p == boolean.class) { - writer.print("false"); - } else if (p == byte.class) { - writer.print("(byte) 0"); - } else if (p == char.class) { - writer.print("(char) 0"); - } else if (p == short.class) { - writer.print("(short) 0"); - } else if (p == int.class) { - writer.print("0"); - } else if (p == long.class) { - writer.print("0L"); - } else if (p == float.class) { - writer.print("0F"); - } else if (p == double.class) { - writer.print("0D"); - } - } else { - writer.print("null"); - } - i++; - } - writer.println(");"); - } - writer.println(" this.ih = ih;"); - writer.println(" }"); - writer.println(" @SuppressWarnings(\"unchecked\")"); - writer.println(" private static " + - "T convertException(Throwable e) {"); - writer.println(" if (e instanceof Error) {"); - writer.println(" throw (Error) e;"); - writer.println(" }"); - writer.println(" return (T) e;"); - writer.println(" }"); - for (Method m : methods.values()) { - Class retClass = m.getReturnType(); - writer.print(" "); - if (Modifier.isProtected(m.getModifiers())) { - // 'public' would also work - writer.print("protected "); - } else { - writer.print("public "); - } - writer.print(getClassName(retClass) + - " " + m.getName() + "("); - Class[] pc = m.getParameterTypes(); - for (int i = 0; i < pc.length; i++) { - Class p = pc[i]; - if (i > 0) { - writer.print(", "); - } - boolean varArg = i == pc.length - 1 && m.isVarArgs(); - writer.print(getClassName(p, varArg) + " p" + i); - } - writer.print(")"); - Class[] ec = m.getExceptionTypes(); - writer.print(" throws RuntimeException"); - if (ec.length > 0) { - for (Class e : ec) { - writer.print(", "); - writer.print(getClassName(e)); - } - } - writer.println(" {"); - writer.println(" try {"); - writer.print(" "); - if (retClass != void.class) { - writer.print("return ("); - if (retClass == boolean.class) { - writer.print("Boolean"); - } else if (retClass == byte.class) { - writer.print("Byte"); - } else if (retClass == char.class) { - writer.print("Character"); - } else if (retClass == short.class) { - writer.print("Short"); - } else if (retClass == int.class) { - writer.print("Integer"); - } else if (retClass == long.class) { - writer.print("Long"); - } else if (retClass == float.class) { - writer.print("Float"); - } else if (retClass == double.class) { - writer.print("Double"); - } else { - writer.print(getClassName(retClass)); - } - writer.print(") "); - } - writer.print("ih.invoke(this, "); - writer.println(getClassName(m.getDeclaringClass()) + - ".class.getDeclaredMethod(\"" + m.getName() + - "\","); - writer.print(" new Class[] {"); - int i = 0; - for (Class p : m.getParameterTypes()) { - if (i > 0) { - writer.print(", "); - } - writer.print(getClassName(p) + ".class"); - i++; - } - writer.println("}),"); - writer.print(" new Object[] {"); - for (i = 0; i < m.getParameterTypes().length; i++) { - if (i > 0) { - writer.print(", "); - } - writer.print("p" + i); - } - writer.println("});"); - writer.println(" } catch (Throwable e) {"); - writer.println(" throw convertException(e);"); - writer.println(" }"); - writer.println(" }"); - } - writer.println("}"); - writer.flush(); - } - - /** - * Format a method call, including arguments, for an exception message. - * - * @param m the method - * @param args the arguments - * @return the formatted string - */ - public static String formatMethodCall(Method m, Object... args) { - StringBuilder buff = new StringBuilder(); - buff.append(m.getName()).append('('); - for (int i = 0; i < args.length; i++) { - Object a = args[i]; - if (i > 0) { - buff.append(", "); - } - buff.append(a == null ? "null" : a.toString()); - } - buff.append(")"); - return buff.toString(); - } - -} diff --git a/h2/src/test/org/h2/test/utils/RandomDataUtils.java b/h2/src/test/org/h2/test/utils/RandomDataUtils.java new file mode 100644 index 0000000000..36b15e501c --- /dev/null +++ b/h2/src/test/org/h2/test/utils/RandomDataUtils.java @@ -0,0 +1,62 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.utils; + +import java.util.Random; + +/** + * Utilities for random data generation. + */ +public final class RandomDataUtils { + + /** + * Fills the specified character array with random printable code points + * from the limited set of Unicode code points with different length in + * UTF-8 representation. + * + *

          + * Debuggers can have performance problems on some systems when displayed + * values have characters from many different blocks, because too many large + * separate fonts with different sets of glyphs can be needed. + *

          + * + * @param r + * the source of random data + * @param chars + * the character array to fill + */ + public static void randomChars(Random r, char[] chars) { + for (int i = 0, l = chars.length; i < l;) { + int from, to; + switch (r.nextInt(4)) { + case 3: + if (i + 1 < l) { + from = 0x1F030; + to = 0x1F093; + break; + } + //$FALL-THROUGH$ + default: + from = ' '; + to = '~'; + break; + case 1: + from = 0xA0; + to = 0x24F; + break; + case 2: + from = 0x2800; + to = 0x28FF; + break; + } + i += Character.toChars(from + r.nextInt(to - from + 1), chars, i); + } + } + + private RandomDataUtils() { + } + +} diff --git a/h2/src/test/org/h2/test/utils/ResultVerifier.java b/h2/src/test/org/h2/test/utils/ResultVerifier.java index 1c6fdf78e2..ed5d73c75e 100644 --- a/h2/src/test/org/h2/test/utils/ResultVerifier.java +++ b/h2/src/test/org/h2/test/utils/ResultVerifier.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.utils; diff --git a/h2/src/test/org/h2/test/utils/SelfDestructor.java b/h2/src/test/org/h2/test/utils/SelfDestructor.java index 143f8b70c3..6f11ffa745 100644 --- a/h2/src/test/org/h2/test/utils/SelfDestructor.java +++ b/h2/src/test/org/h2/test/utils/SelfDestructor.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.utils; diff --git a/h2/src/test/org/h2/test/utils/TestColumnNamer.java b/h2/src/test/org/h2/test/utils/TestColumnNamer.java deleted file mode 100644 index e3e496632c..0000000000 --- a/h2/src/test/org/h2/test/utils/TestColumnNamer.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - */ -package org.h2.test.utils; - -import org.h2.expression.Expression; -import org.h2.expression.ValueExpression; -import org.h2.test.TestBase; -import org.h2.util.ColumnNamer; - -/** - * Tests the column name factory. - */ -public class TestColumnNamer extends TestBase { - - private String[] ids = new String[] { "ABC", "123", "a\n2", "a$c%d#e@f!.", null, - "VERYVERYVERYVERYVERYVERYLONGVERYVERYVERYVERYVERYVERYLONGVERYVERYVERYVERYVERYVERYLONG", "'!!!'", "'!!!!'", - "3.1415", "\r", "col1", "col1", "col1", - "col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2", - "col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2col2" }; - - private String[] expectedColumnName = { "ABC", "123", "a2", "acdef", "colName6", "VERYVERYVERYVERYVERYVERYLONGVE", - "colName8", "colName9", "31415", "colName11", "col1", "col1_2", "col1_3", "col2col2col2col2col2col2col2co", - "col2col2col2col2col2col2col2_2" }; - - /** - * This method is called when executing this application from the command - * line. - * - * @param args the command line parameters - */ - public static void main(String[] args) { - new TestColumnNamer().test(); - } - - @Override - public void test() { - ColumnNamer columnNamer = new ColumnNamer(null); - columnNamer.getConfiguration().configure("MAX_IDENTIFIER_LENGTH = 30"); - columnNamer.getConfiguration().configure("REGULAR_EXPRESSION_MATCH_ALLOWED = '[A-Za-z0-9_]+'"); - columnNamer.getConfiguration().configure("REGULAR_EXPRESSION_MATCH_DISALLOWED = '[^A-Za-z0-9_]+'"); - columnNamer.getConfiguration().configure("DEFAULT_COLUMN_NAME_PATTERN = 'colName$$'"); - columnNamer.getConfiguration().configure("GENERATE_UNIQUE_COLUMN_NAMES = 1"); - - int index = 0; - for (String id : ids) { - Expression columnExp = ValueExpression.getDefault(); - String newColumnName = columnNamer.getColumnName(columnExp, index + 1, id); - assertNotNull(newColumnName); - assertTrue(newColumnName.length() <= 30); - assertTrue(newColumnName.length() >= 1); - assertEquals(newColumnName, expectedColumnName[index]); - index++; - } - } -} diff --git a/h2/src/test/org/h2/test/utils/package.html b/h2/src/test/org/h2/test/utils/package.html index 9f02cacd86..c2468caa43 100644 --- a/h2/src/test/org/h2/test/utils/package.html +++ b/h2/src/test/org/h2/test/utils/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/WEB-INF/console.html b/h2/src/tools/WEB-INF/console.html index 6a54baa452..2ae76ab4a3 100644 --- a/h2/src/tools/WEB-INF/console.html +++ b/h2/src/tools/WEB-INF/console.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/WEB-INF/web.xml b/h2/src/tools/WEB-INF/web.xml index 424597fca1..b1b067f3ca 100644 --- a/h2/src/tools/WEB-INF/web.xml +++ b/h2/src/tools/WEB-INF/web.xml @@ -1,7 +1,7 @@ = 0) { - out.write(buffer, 0, read); - } - } - /** * Run the JaCoco code coverage. */ @@ -136,41 +224,34 @@ private static void copy(InputStream in, OutputStream out) throws IOException { public void coverage() { compile(); downloadTest(); - downloadUsingMaven("ext/org.jacoco.agent-0.8.2.jar", - "org.jacoco", "org.jacoco.agent", "0.8.2", - "1402427761df5c7601ff6e06280764833ed727b5"); - try (ZipFile zipFile = new ZipFile(new File("ext/org.jacoco.agent-0.8.2.jar"))) { - final Enumeration e = zipFile.entries(); - while (e.hasMoreElements()) { - final ZipEntry zipEntry = e.nextElement(); - final String name = zipEntry.getName(); - if (name.equals("jacocoagent.jar")) { - try (InputStream in = zipFile.getInputStream(zipEntry); - FileOutputStream out = new FileOutputStream("ext/jacocoagent.jar")) { - copy(in, out); - } - } - } + downloadUsingMaven("ext/org.jacoco.agent-" + JACOCO_VERSION + ".jar", + "org.jacoco", "org.jacoco.agent", JACOCO_VERSION, + "0fd03a8ab78af3dd03b27647067efa72690d4922"); + URI uri = URI.create("jar:" + + Paths.get("ext/org.jacoco.agent-" + JACOCO_VERSION + ".jar").toAbsolutePath().toUri()); + try (FileSystem fs = FileSystems.newFileSystem(uri, Collections.emptyMap())) { + Files.copy(fs.getPath("jacocoagent.jar"), Paths.get("ext/jacocoagent.jar"), + StandardCopyOption.REPLACE_EXISTING); } catch (IOException ex) { throw new RuntimeException(ex); } - downloadUsingMaven("ext/org.jacoco.cli-0.8.2.jar", - "org.jacoco", "org.jacoco.cli", "0.8.2", - "9595c53358d0306900183b5a7e6a70c88171ab4c"); - downloadUsingMaven("ext/org.jacoco.core-0.8.2.jar", - "org.jacoco", "org.jacoco.core", "0.8.2", - "977b33afe2344a9ee801fd3317c54d8e1f9d7a79"); - downloadUsingMaven("ext/org.jacoco.report-0.8.2.jar", - "org.jacoco", "org.jacoco.report", "0.8.2", - "50e133cdfd2d31ca5702b73615be70f801d3ae26"); - downloadUsingMaven("ext/asm-commons-7.0.jar", - "org.ow2.asm", "asm-commons", "7.0", - "478006d07b7c561ae3a92ddc1829bca81ae0cdd1"); - downloadUsingMaven("ext/asm-tree-7.0.jar", - "org.ow2.asm", "asm-tree", "7.0", - "29bc62dcb85573af6e62e5b2d735ef65966c4180"); - downloadUsingMaven("ext/args4j-2.33.jar", - "args4j", "args4j", "2.33", + downloadUsingMaven("ext/org.jacoco.cli-" + JACOCO_VERSION + ".jar", + "org.jacoco", "org.jacoco.cli", JACOCO_VERSION, + "30155fcd37821879264365693055290dbfe984bb"); + downloadUsingMaven("ext/org.jacoco.core-" + JACOCO_VERSION + ".jar", + "org.jacoco", "org.jacoco.core", JACOCO_VERSION, + "1ac96769aa83e5492d1a1a694774f6baec4eb704"); + downloadUsingMaven("ext/org.jacoco.report-" + JACOCO_VERSION + ".jar", + "org.jacoco", "org.jacoco.report", JACOCO_VERSION, + "421e4aab2aaa809d1e66a96feb11f61ea698da19"); + downloadUsingMaven("ext/asm-commons-" + ASM_VERSION + ".jar", + "org.ow2.asm", "asm-commons", ASM_VERSION, + "019c7ba355f0737815205518e332a8dc08b417c6"); + downloadUsingMaven("ext/asm-tree-" + ASM_VERSION + ".jar", + "org.ow2.asm", "asm-tree", ASM_VERSION, + "dfcad5abbcff36f8bdad5647fe6f4972e958ad59"); + downloadUsingMaven("ext/args4j-" + ARGS4J_VERSION + ".jar", + "args4j", "args4j", ARGS4J_VERSION, "bd87a75374a6d6523de82fef51fc3cfe9baf9fc9"); delete(files("coverage")); @@ -179,17 +260,17 @@ public void coverage() { // JaCoCo does not support multiple versions of the same classes delete(files("coverage/bin/META-INF/versions")); String cp = "coverage/bin" + - File.pathSeparator + "ext/postgresql-42.2.5.jre7" + - File.pathSeparator + "ext/servlet-api-3.1.0.jar" + - File.pathSeparator + "ext/lucene-core-5.5.5.jar" + - File.pathSeparator + "ext/lucene-analyzers-common-5.5.5.jar" + - File.pathSeparator + "ext/lucene-queryparser-5.5.5.jar" + - File.pathSeparator + "ext/h2mig_pagestore_addon.jar" + - File.pathSeparator + "ext/org.osgi.core-4.2.0.jar" + - File.pathSeparator + "ext/org.osgi.enterprise-4.2.0.jar" + - File.pathSeparator + "ext/jts-core-1.15.0.jar" + - File.pathSeparator + "ext/slf4j-api-1.6.0.jar" + - File.pathSeparator + "ext/slf4j-nop-1.6.0.jar" + + File.pathSeparator + "ext/postgresql-" + PGJDBC_VERSION + ".jar" + + File.pathSeparator + "ext/javax.servlet-api-" + JAVAX_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/jakarta.servlet-api-" + JAKARTA_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-core-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-analyzers-common-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-queryparser-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.core-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.enterprise-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/jts-core-" + JTS_VERSION + ".jar" + + File.pathSeparator + "ext/slf4j-api-" + SLF4J_VERSION + ".jar" + + File.pathSeparator + "ext/slf4j-nop-" + SLF4J_VERSION + ".jar" + File.pathSeparator + javaToolsJar; // Run tests execJava(args( @@ -204,13 +285,13 @@ public void coverage() { delete(files("coverage/bin/org/h2/sample")); // Generate report execJava(args("-cp", - "ext/org.jacoco.cli-0.8.2.jar" + File.pathSeparator - + "ext/org.jacoco.core-0.8.2.jar" + File.pathSeparator - + "ext/org.jacoco.report-0.8.2.jar" + File.pathSeparator - + "ext/asm-7.0.jar" + File.pathSeparator - + "ext/asm-commons-7.0.jar" + File.pathSeparator - + "ext/asm-tree-7.0.jar" + File.pathSeparator - + "ext/args4j-2.33.jar", + "ext/org.jacoco.cli-" + JACOCO_VERSION + ".jar" + File.pathSeparator + + "ext/org.jacoco.core-" + JACOCO_VERSION + ".jar" + File.pathSeparator + + "ext/org.jacoco.report-" + JACOCO_VERSION + ".jar" + File.pathSeparator + + "ext/asm-" + ASM_VERSION + ".jar" + File.pathSeparator + + "ext/asm-commons-" + ASM_VERSION + ".jar" + File.pathSeparator + + "ext/asm-tree-" + ASM_VERSION + ".jar" + File.pathSeparator + + "ext/args4j-" + ARGS4J_VERSION + ".jar", "org.jacoco.cli.internal.Main", "report", "coverage/jacoco.exec", "--classfiles", "coverage/bin", "--html", "coverage/report", "--sourcefiles", "h2/src/main")); @@ -236,9 +317,9 @@ private static String getTargetJavaVersion() { private void compileMVStore(boolean debugInfo) { clean(); mkdir("temp"); - String classpath = "temp"; - FileList files; - files = files("src/main/org/h2/mvstore"). + String classpath = "temp" + + File.pathSeparator + "ext/jts-core-" + JTS_VERSION + ".jar"; + FileList files = files("src/main/org/h2/mvstore"). exclude("src/main/org/h2/mvstore/db/*"); StringList args = args(); if (debugInfo) { @@ -255,72 +336,11 @@ private void compileMVStore(boolean debugInfo) { javac(args, files); } - private void compile(boolean debugInfo, boolean clientOnly, - boolean basicResourcesOnly) { - clean(); - mkdir("temp"); - download(); - String classpath = "temp" + - File.pathSeparator + "ext/servlet-api-3.1.0.jar" + - File.pathSeparator + "ext/lucene-core-5.5.5.jar" + - File.pathSeparator + "ext/lucene-analyzers-common-5.5.5.jar" + - File.pathSeparator + "ext/lucene-queryparser-5.5.5.jar" + - File.pathSeparator + "ext/slf4j-api-1.6.0.jar" + - File.pathSeparator + "ext/org.osgi.core-4.2.0.jar" + - File.pathSeparator + "ext/org.osgi.enterprise-4.2.0.jar" + - File.pathSeparator + "ext/jts-core-1.15.0.jar" + - File.pathSeparator + "ext/asm-7.0.jar" + - File.pathSeparator + javaToolsJar; - FileList files; - if (clientOnly) { - files = files("src/main/org/h2/Driver.java"); - files.addAll(files("src/main/org/h2/jdbc")); - files.addAll(files("src/main/org/h2/jdbcx")); - } else { - files = files("src/main"); - } - StringList args = args(); - if (debugInfo) { - args = args.plus("-Xlint:unchecked", - "-d", "temp", "-sourcepath", "src/main", "-classpath", classpath); - } else { - args = args.plus("-Xlint:unchecked", "-g:none", - "-d", "temp", "-sourcepath", "src/main", "-classpath", classpath); - } - String version = getTargetJavaVersion(); - if (version != null) { - args = args.plus("-target", version, "-source", version); - } - javac(args, files); - - files = files("src/main/META-INF/services"); - copy("temp", files, "src/main"); - - if (!clientOnly) { - files = files("src/test"); - files.addAll(files("src/tools")); - //we don't use Junit for this test framework - files = files.exclude("src/test/org/h2/test/TestAllJunit.java"); - args = args("-Xlint:unchecked", "-Xlint:deprecation", - "-d", "temp", "-sourcepath", "src/test" + File.pathSeparator + "src/tools", - "-classpath", classpath); - if (version != null) { - args = args.plus("-target", version, "-source", version); - } - javac(args, files); - files = files("src/test"). - exclude("*.java"). - exclude("*/package.html"); - copy("temp", files, "src/test"); - } - resources(clientOnly, basicResourcesOnly); - } - private static void filter(String source, String target, String old, String replacement) { - String text = new String(readFile(new File(source))); + String text = new String(readFile(Paths.get(source))); text = replaceAll(text, old, replacement); - writeFile(new File(target), text.getBytes()); + writeFile(Paths.get(target), text.getBytes()); } /** @@ -335,8 +355,6 @@ public void docs() { java("org.h2.build.code.CheckJavadoc", null); java("org.h2.build.code.CheckTextFiles", null); java("org.h2.build.doc.GenerateDoc", null); - java("org.h2.build.doc.GenerateHelp", null); - java("org.h2.build.i18n.PrepareTranslation", null); java("org.h2.build.indexer.Indexer", null); java("org.h2.build.doc.MergeDocs", null); java("org.h2.build.doc.WebSite", null); @@ -357,43 +375,49 @@ public void download() { } private void downloadOrVerify(boolean offline) { - downloadOrVerify("ext/servlet-api-3.1.0.jar", - "javax/servlet", "javax.servlet-api", "3.1.0", - "3cd63d075497751784b2fa84be59432f4905bf7c", offline); - downloadOrVerify("ext/lucene-core-5.5.5.jar", - "org/apache/lucene", "lucene-core", "5.5.5", - "c34bcd9274859dc07cfed2a935aaca90c4f4b861", offline); - downloadOrVerify("ext/lucene-analyzers-common-5.5.5.jar", - "org/apache/lucene", "lucene-analyzers-common", "5.5.5", - "e6b3f5d1b33ed24da7eef0a72f8062bd4652700c", offline); - downloadOrVerify("ext/lucene-queryparser-5.5.5.jar", - "org/apache/lucene", "lucene-queryparser", "5.5.5", - "6c965eb5838a2ba58b0de0fd860a420dcda11937", offline); - downloadOrVerify("ext/slf4j-api-1.6.0.jar", - "org/slf4j", "slf4j-api", "1.6.0", - "b353147a7d51fcfcd818d8aa6784839783db0915", offline); - downloadOrVerify("ext/org.osgi.core-4.2.0.jar", - "org/osgi", "org.osgi.core", "4.2.0", - "66ab449ff3aa5c4adfc82c89025cc983b422eb95", offline); - downloadOrVerify("ext/org.osgi.enterprise-4.2.0.jar", - "org/osgi", "org.osgi.enterprise", "4.2.0", - "8634dcb0fc62196e820ed0f1062993c377f74972", offline); - downloadOrVerify("ext/jts-core-1.15.0.jar", - "org/locationtech/jts", "jts-core", "1.15.0", - "705981b7e25d05a76a3654e597dab6ba423eb79e", offline); - downloadOrVerify("ext/junit-4.12.jar", - "junit", "junit", "4.12", - "2973d150c0dc1fefe998f834810d68f278ea58ec", offline); - downloadUsingMaven("ext/asm-7.0.jar", - "org.ow2.asm", "asm", "7.0", - "d74d4ba0dee443f68fb2dcb7fcdb945a2cd89912"); + downloadOrVerify("ext/javax.servlet-api-" + JAVAX_SERVLET_VERSION + ".jar", + "javax/servlet", "javax.servlet-api", JAVAX_SERVLET_VERSION, + "a27082684a2ff0bf397666c3943496c44541d1ca", offline); + downloadOrVerify("ext/jakarta.servlet-api-" + JAKARTA_SERVLET_VERSION + ".jar", + "jakarta/servlet", "jakarta.servlet-api", JAKARTA_SERVLET_VERSION, + "2e6b8ccde55522c879434ddec3714683ccae6867", offline); + downloadOrVerify("ext/lucene-core-" + LUCENE_VERSION + ".jar", + "org/apache/lucene", "lucene-core", LUCENE_VERSION, + "b275ca5f39b6dd45d5a7ecb49da65205ad2732ca", offline); + downloadOrVerify("ext/lucene-analyzers-common-" + LUCENE_VERSION + ".jar", + "org/apache/lucene", "lucene-analyzers-common", LUCENE_VERSION, + "2c4a7e8583e2061aa35db85705393b8b6e67a679", offline); + downloadOrVerify("ext/lucene-queryparser-" + LUCENE_VERSION + ".jar", + "org/apache/lucene", "lucene-queryparser", LUCENE_VERSION, + "96a104be314d0adaac163635610da8dfc5e4166e", offline); + downloadOrVerify("ext/slf4j-api-" + SLF4J_VERSION + ".jar", + "org/slf4j", "slf4j-api", SLF4J_VERSION, + "b5a4b6d16ab13e34a88fae84c35cd5d68cac922c", offline); + downloadOrVerify("ext/org.osgi.core-" + OSGI_VERSION + ".jar", + "org/osgi", "org.osgi.core", OSGI_VERSION, + "6e5e8cd3c9059c08e1085540442a490b59a7783c", offline); + downloadOrVerify("ext/org.osgi.enterprise-" + OSGI_VERSION + ".jar", + "org/osgi", "org.osgi.enterprise", OSGI_VERSION, + "4f6e081c38b951204e2b6a60d33ab0a90bfa1ad3", offline); + downloadOrVerify("ext/jts-core-" + JTS_VERSION + ".jar", + "org/locationtech/jts", "jts-core", JTS_VERSION, + "7e1973b5babdd98734b1ab903fc1155714402eec", offline); + downloadOrVerify("ext/junit-jupiter-api-" + JUNIT_VERSION + ".jar", + "org.junit.jupiter", "junit-jupiter-api", JUNIT_VERSION, + "c9ba885abfe975cda123bf6f8f0a69a1b46956d0", offline); + downloadUsingMaven("ext/asm-" + ASM_VERSION + ".jar", + "org.ow2.asm", "asm", ASM_VERSION, + "3f5199523fb95304b44563f5d56d9f5a07270669"); + downloadUsingMaven("ext/apiguardian-" + APIGUARDIAN_VERSION + ".jar", + "org.apiguardian", "apiguardian-api", APIGUARDIAN_VERSION, + "fc9dff4bb36d627bdc553de77e1f17efd790876c"); } private void downloadOrVerify(String target, String group, String artifact, String version, String sha1Checksum, boolean offline) { if (offline) { - File targetFile = new File(target); - if (targetFile.exists()) { + Path targetFile = Paths.get(target); + if (Files.exists(targetFile)) { return; } println("Missing file: " + target); @@ -404,26 +428,21 @@ private void downloadOrVerify(String target, String group, String artifact, } private void downloadTest() { - // for TestUpgrade - download("ext/h2mig_pagestore_addon.jar", - "http://h2database.com/h2mig_pagestore_addon.jar", - "6dfafe1b86959c3ba4f7cf03e99535e8b9719965"); // for TestOldVersion downloadUsingMaven("ext/h2-1.2.127.jar", "com/h2database", "h2", "1.2.127", "056e784c7cf009483366ab9cd8d21d02fe47031a"); // for TestPgServer - downloadUsingMaven("ext/postgresql-42.2.5.jre7.jar", - "org.postgresql", "postgresql", "42.2.5.jre7", - "ec74f6f7885b7e791f84c7219a97964e9d0121e4"); + downloadUsingMaven("ext/postgresql-" + PGJDBC_VERSION + ".jar", + "org.postgresql", "postgresql", PGJDBC_VERSION, PGJDBC_HASH); // for TestTraceSystem - downloadUsingMaven("ext/slf4j-nop-1.6.0.jar", - "org/slf4j", "slf4j-nop", "1.6.0", - "4da67bb4a6eea5dc273f99c50ad2333eadb46f86"); + downloadUsingMaven("ext/slf4j-nop-" + SLF4J_VERSION + ".jar", + "org/slf4j", "slf4j-nop", SLF4J_VERSION, + "55d4c73dd343efebd236abfeb367c9ef41d55063"); } private static String getVersion() { - return getStaticValue("org.h2.engine.Constants", "getVersion"); + return getStaticField("org.h2.engine.Constants", "VERSION"); } private static String getJarSuffix() { @@ -439,7 +458,7 @@ public void installer() { jar(); docs(); try { - exec("soffice", args("-invisible", "macro:///Standard.Module1.H2Pdf")); + exec("soffice", args("--invisible", "macro:///Standard.Module1.H2Pdf")); copy("docs", files("../h2web/h2.pdf"), "../h2web"); } catch (Exception e) { println("OpenOffice / LibreOffice is not available or macros H2Pdf is not installed:"); @@ -451,7 +470,7 @@ public void installer() { println("Put content of h2/src/installer/openoffice.txt here."); println("Edit BaseDir variable value:"); - println(" BaseDir = \"" + new File(System.getProperty("user.dir")).getParentFile().toURI() + '"'); + println(" BaseDir = \"" + Paths.get(System.getProperty("user.dir")).getParent().toUri() + '"'); println("Close office application and try to build installer again."); println("********************************************************************************"); } @@ -464,38 +483,38 @@ public void installer() { zip("../h2web/h2.zip", files, "../", false, false); boolean installer = false; try { - exec("makensis", args("/v2", "src/installer/h2.nsi")); + exec("makensis", args(isWindows() ? "/V2" : "-V2", "src/installer/h2.nsi")); installer = true; } catch (Exception e) { println("NSIS is not available: " + e); } String buildDate = getStaticField("org.h2.engine.Constants", "BUILD_DATE"); - byte[] data = readFile(new File("../h2web/h2.zip")); + byte[] data = readFile(Paths.get("../h2web/h2.zip")); String sha1Zip = getSHA1(data), sha1Exe = null; - writeFile(new File("../h2web/h2-" + buildDate + ".zip"), data); + writeFile(Paths.get("../h2web/h2-" + buildDate + ".zip"), data); if (installer) { - data = readFile(new File("../h2web/h2-setup.exe")); + data = readFile(Paths.get("../h2web/h2-setup.exe")); sha1Exe = getSHA1(data); - writeFile(new File("../h2web/h2-setup-" + buildDate + ".exe"), data); + writeFile(Paths.get("../h2web/h2-setup-" + buildDate + ".exe"), data); } updateChecksum("../h2web/html/download.html", sha1Zip, sha1Exe); } - private static void updateChecksum(String fileName, String sha1Zip, - String sha1Exe) { - String checksums = new String(readFile(new File(fileName))); + private static void updateChecksum(String fileName, String sha1Zip, String sha1Exe) { + Path file = Paths.get(fileName); + String checksums = new String(readFile(file)); checksums = replaceAll(checksums, "", "(SHA1 checksum: " + sha1Zip + ")"); if (sha1Exe != null) { checksums = replaceAll(checksums, "", "(SHA1 checksum: " + sha1Exe + ")"); } - writeFile(new File(fileName), checksums.getBytes()); + writeFile(file, checksums.getBytes()); } - private static String canonicalPath(File file) { + private static String canonicalPath(Path file) { try { - return file.getCanonicalPath(); + return file.toRealPath().toString(); } catch (IOException e) { throw new RuntimeException(e); } @@ -503,8 +522,8 @@ private static String canonicalPath(File file) { private FileList excludeTestMetaInfFiles(FileList files) { FileList testMetaInfFiles = files("src/test/META-INF"); - int basePathLength = canonicalPath(new File("src/test")).length(); - for (File file : testMetaInfFiles) { + int basePathLength = canonicalPath(Paths.get("src/test")).length(); + for (Path file : testMetaInfFiles) { files = files.exclude(canonicalPath(file).substring(basePathLength + 1)); } return files; @@ -512,14 +531,10 @@ private FileList excludeTestMetaInfFiles(FileList files) { /** * Add META-INF/versions for Java 9+. - * - * @param includeCurrentTimestamp include CurrentTimestamp implementation */ - private void addVersions(boolean includeCurrentTimestamp) { + private void addVersions() { copy("temp/META-INF/versions/9", files("src/java9/precompiled"), "src/java9/precompiled"); - if (!includeCurrentTimestamp) { - delete(files("temp/META-INF/versions/9/org/h2/util/CurrentTimestamp.class")); - } + copy("temp/META-INF/versions/10", files("src/java10/precompiled"), "src/java10/precompiled"); } /** @@ -528,7 +543,7 @@ private void addVersions(boolean includeCurrentTimestamp) { @Description(summary = "Create the regular h2.jar file.") public void jar() { compile(); - addVersions(true); + addVersions(); manifest("src/main/META-INF/MANIFEST.MF"); FileList files = files("temp"). exclude("temp/org/h2/build/*"). @@ -550,41 +565,13 @@ public void jar() { filter("src/installer/h2w.bat", "bin/h2w.bat", "h2.jar", "h2" + getJarSuffix()); } - /** - * Create the h2client.jar. This only contains the remote JDBC - * implementation. - */ - @Description(summary = "Create h2client.jar with only the remote JDBC implementation.") - public void jarClient() { - compile(true, true, false); - addVersions(false); - manifest("src/installer/client/MANIFEST.MF"); - FileList files = files("temp"). - exclude("temp/org/h2/build/*"). - exclude("temp/org/h2/dev/*"). - exclude("temp/org/h2/java/*"). - exclude("temp/org/h2/jcr/*"). - exclude("temp/org/h2/mode/*"). - exclude("temp/org/h2/samples/*"). - exclude("temp/org/h2/test/*"). - exclude("*.bat"). - exclude("*.sh"). - exclude("*.txt"). - exclude("*.DS_Store"); - files = excludeTestMetaInfFiles(files); - long kb = jar("bin/h2-client" + getJarSuffix(), files, "temp"); - if (kb < 400 || kb > 600) { - throw new RuntimeException("Expected file size 400 - 600 KB, got: " + kb); - } - } - /** * Create the file h2mvstore.jar. This only contains the MVStore. */ @Description(summary = "Create h2mvstore.jar containing only the MVStore.") public void jarMVStore() { compileMVStore(true); - addVersions(false); + addVersions(); manifest("src/installer/mvstore/MANIFEST.MF"); FileList files = files("temp"); files.exclude("*.DS_Store"); @@ -592,40 +579,6 @@ public void jarMVStore() { jar("bin/h2-mvstore" + getJarSuffix(), files, "temp"); } - /** - * Create the file h2small.jar. This only contains the embedded database. - * Debug information is disabled. - */ - @Description(summary = "Create h2small.jar containing only the embedded database.") - public void jarSmall() { - compile(false, false, true); - addVersions(true); - manifest("src/installer/small/MANIFEST.MF"); - FileList files = files("temp"). - exclude("temp/org/h2/build/*"). - exclude("temp/org/h2/dev/*"). - exclude("temp/org/h2/jcr/*"). - exclude("temp/org/h2/java/*"). - exclude("temp/org/h2/jcr/*"). - exclude("temp/org/h2/samples/*"). - exclude("temp/org/h2/server/ftp/*"). - exclude("temp/org/h2/test/*"). - exclude("temp/org/h2/bnf/*"). - exclude("temp/org/h2/fulltext/*"). - exclude("temp/org/h2/jdbcx/*"). - exclude("temp/org/h2/jmx/*"). - exclude("temp/org/h2/server/*"). - exclude("temp/org/h2/tools/*"). - exclude("*.bat"). - exclude("*.sh"). - exclude("*.txt"). - exclude("*.DS_Store"); - files = excludeTestMetaInfFiles(files); - files.add(new File("temp/org/h2/tools/DeleteDbFiles.class")); - files.add(new File("temp/org/h2/tools/CompressTool.class")); - jar("bin/h2small" + getJarSuffix(), files, "temp"); - } - /** * Create the Javadocs of the API (incl. the JDBC API) and tools. */ @@ -634,16 +587,17 @@ public void javadoc() { compileTools(); delete("docs"); mkdir("docs/javadoc"); - javadoc("-sourcepath", "src/main", "org.h2.jdbc", "org.h2.jdbcx", + javadoc("-sourcepath", "src/main", + "-d", "docs/javadoc", + "org.h2.jdbc", "org.h2.jdbcx", "org.h2.tools", "org.h2.api", "org.h2.engine", "org.h2.fulltext", "-classpath", - "ext/lucene-core-5.5.5.jar" + - File.pathSeparator + "ext/lucene-analyzers-common-5.5.5.jar" + - File.pathSeparator + "ext/lucene-queryparser-5.5.5.jar" + - File.pathSeparator + "ext/jts-core-1.15.0.jar", - "-docletpath", "bin" + File.pathSeparator + "temp", - "-doclet", "org.h2.build.doclet.Doclet"); - copy("docs/javadoc", files("src/docsrc/javadoc"), "src/docsrc/javadoc"); + "ext/lucene-core-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-analyzers-common-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-queryparser-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.core-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.enterprise-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/jts-core-" + JTS_VERSION + ".jar"); } /** @@ -655,40 +609,41 @@ public void javadocImpl() { mkdir("docs/javadocImpl2"); javadoc("-sourcepath", "src/main" + // need to be disabled if not enough memory - // File.pathSeparator + "src/test" + + File.pathSeparator + "src/test" + File.pathSeparator + "src/tools", - // need to be disabled for java 7 - // "-Xdoclint:none", + "-Xdoclint:all,-missing", "-noindex", - "-tag", "h2.resource", "-d", "docs/javadocImpl2", "-classpath", javaToolsJar + - File.pathSeparator + "ext/slf4j-api-1.6.0.jar" + - File.pathSeparator + "ext/servlet-api-3.1.0.jar" + - File.pathSeparator + "ext/lucene-core-5.5.5.jar" + - File.pathSeparator + "ext/lucene-analyzers-common-5.5.5.jar" + - File.pathSeparator + "ext/lucene-queryparser-5.5.5.jar" + - File.pathSeparator + "ext/org.osgi.core-4.2.0.jar" + - File.pathSeparator + "ext/org.osgi.enterprise-4.2.0.jar" + - File.pathSeparator + "ext/jts-core-1.15.0.jar" + - File.pathSeparator + "ext/asm-7.0.jar" + - File.pathSeparator + "ext/junit-4.12.jar", - "-subpackages", "org.h2"); + File.pathSeparator + "ext/slf4j-api-" + SLF4J_VERSION + ".jar" + + File.pathSeparator + "ext/javax.servlet-api-" + JAVAX_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/jakarta.servlet-api-" + JAKARTA_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-core-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-analyzers-common-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-queryparser-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.core-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.enterprise-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/jts-core-" + JTS_VERSION + ".jar" + + File.pathSeparator + "ext/asm-" + ASM_VERSION + ".jar" + + File.pathSeparator + "ext/junit-jupiter-api-" + JUNIT_VERSION + ".jar" + + File.pathSeparator + "ext/apiguardian-api-" + APIGUARDIAN_VERSION + ".jar", + "-subpackages", "org.h2", + "-exclude", "org.h2.dev:org.h2.java:org.h2.test:org.h2.build.code:org.h2.build.doc"); mkdir("docs/javadocImpl3"); javadoc("-sourcepath", "src/main", "-noindex", - "-tag", "h2.resource", "-d", "docs/javadocImpl3", "-classpath", javaToolsJar + - File.pathSeparator + "ext/slf4j-api-1.6.0.jar" + - File.pathSeparator + "ext/servlet-api-3.1.0.jar" + - File.pathSeparator + "ext/lucene-core-5.5.5.jar" + - File.pathSeparator + "ext/lucene-analyzers-common-5.5.5.jar" + - File.pathSeparator + "ext/lucene-queryparser-5.5.5.jar" + - File.pathSeparator + "ext/org.osgi.core-4.2.0.jar" + - File.pathSeparator + "ext/org.osgi.enterprise-4.2.0.jar" + - File.pathSeparator + "ext/jts-core-1.15.0.jar", + File.pathSeparator + "ext/slf4j-api-" + SLF4J_VERSION + ".jar" + + File.pathSeparator + "ext/javax.servlet-api-" + JAVAX_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/jakarta.servlet-api-" + JAKARTA_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-core-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-analyzers-common-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-queryparser-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.core-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.enterprise-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/jts-core-" + JTS_VERSION + ".jar", "-subpackages", "org.h2.mvstore", "-exclude", "org.h2.mvstore.db"); @@ -697,33 +652,33 @@ public void javadocImpl() { javadoc("-sourcepath", "src/main" + File.pathSeparator + "src/test" + File.pathSeparator + "src/tools", + "-d", "docs/javadoc", "-classpath", javaToolsJar + - File.pathSeparator + "ext/slf4j-api-1.6.0.jar" + - File.pathSeparator + "ext/servlet-api-3.1.0.jar" + - File.pathSeparator + "ext/lucene-core-5.5.5.jar" + - File.pathSeparator + "ext/lucene-analyzers-common-5.5.5.jar" + - File.pathSeparator + "ext/lucene-queryparser-5.5.5.jar" + - File.pathSeparator + "ext/org.osgi.core-4.2.0.jar" + - File.pathSeparator + "ext/org.osgi.enterprise-4.2.0.jar" + - File.pathSeparator + "ext/jts-core-1.15.0.jar" + - File.pathSeparator + "ext/asm-7.0.jar" + - File.pathSeparator + "ext/junit-4.12.jar", + File.pathSeparator + "ext/slf4j-api-" + SLF4J_VERSION + ".jar" + + File.pathSeparator + "ext/javax.servlet-api-" + JAVAX_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/jakarta.servlet-api-" + JAKARTA_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-core-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-analyzers-common-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-queryparser-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.core-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.enterprise-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/jts-core-" + JTS_VERSION + ".jar" + + File.pathSeparator + "ext/asm-" + ASM_VERSION + ".jar" + + File.pathSeparator + "ext/junit-jupiter-api-" + JUNIT_VERSION + ".jar" + + File.pathSeparator + "ext/apiguardian-api-" + APIGUARDIAN_VERSION + ".jar", "-subpackages", "org.h2", - "-package", - "-docletpath", "bin" + File.pathSeparator + "temp", - "-doclet", "org.h2.build.doclet.Doclet"); - copy("docs/javadocImpl", files("src/docsrc/javadoc"), "src/docsrc/javadoc"); + "-package"); } private static void manifest(String path) { - String manifest = new String(readFile(new File(path)), StandardCharsets.UTF_8); + String manifest = new String(readFile(Paths.get(path)), StandardCharsets.UTF_8); manifest = replaceAll(manifest, "${version}", getVersion()); manifest = replaceAll(manifest, "${buildJdk}", getJavaSpecVersion()); String createdBy = System.getProperty("java.runtime.version") + " (" + System.getProperty("java.vm.vendor") + ")"; manifest = replaceAll(manifest, "${createdBy}", createdBy); mkdir("temp/META-INF"); - writeFile(new File("temp/META-INF/MANIFEST.MF"), manifest.getBytes()); + writeFile(Paths.get("temp/META-INF/MANIFEST.MF"), manifest.getBytes()); } /** @@ -738,10 +693,9 @@ public void mavenDeployCentral() { copy("docs", files, "src/main"); files = files("docs").keep("docs/org/*").keep("*.java"); files.addAll(files("docs").keep("docs/META-INF/*")); - String manifest = new String(readFile(new File( - "src/installer/source-manifest.mf"))); + String manifest = new String(readFile(Paths.get("src/installer/source-manifest.mf"))); manifest = replaceAll(manifest, "${version}", getVersion()); - writeFile(new File("docs/META-INF/MANIFEST.MF"), manifest.getBytes()); + writeFile(Paths.get("docs/META-INF/MANIFEST.MF"), manifest.getBytes()); jar("docs/h2-" + getVersion() + "-sources.jar", files, "docs"); delete("docs/org"); delete("docs/META-INF"); @@ -779,9 +733,9 @@ public void mavenDeployCentral() { // generate and deploy the h2*.jar file jar(); - String pom = new String(readFile(new File("src/installer/pom-template.xml"))); + String pom = new String(readFile(Paths.get("src/installer/pom-template.xml"))); pom = replaceAll(pom, "@version@", getVersion()); - writeFile(new File("bin/pom.xml"), pom.getBytes()); + writeFile(Paths.get("bin/pom.xml"), pom.getBytes()); execScript("mvn", args( "deploy:deploy-file", "-Dfile=bin/h2" + getJarSuffix(), @@ -799,10 +753,9 @@ public void mavenDeployCentral() { exclude("docs/org/h2/mvstore/db/*"). keep("*.java"); files.addAll(files("docs").keep("docs/META-INF/*")); - manifest = new String(readFile(new File( - "src/installer/source-manifest.mf"))); + manifest = new String(readFile(Paths.get("src/installer/source-mvstore-manifest.mf"))); manifest = replaceAll(manifest, "${version}", getVersion()); - writeFile(new File("docs/META-INF/MANIFEST.MF"), manifest.getBytes()); + writeFile(Paths.get("docs/META-INF/MANIFEST.MF"), manifest.getBytes()); jar("docs/h2-mvstore-" + getVersion() + "-sources.jar", files, "docs"); delete("docs/org"); delete("docs/META-INF"); @@ -838,9 +791,9 @@ public void mavenDeployCentral() { // generate and deploy the h2-mvstore-*.jar file jarMVStore(); - pom = new String(readFile(new File("src/installer/pom-mvstore-template.xml"))); + pom = new String(readFile(Paths.get("src/installer/pom-mvstore-template.xml"))); pom = replaceAll(pom, "@version@", getVersion()); - writeFile(new File("bin/pom.xml"), pom.getBytes()); + writeFile(Paths.get("bin/pom.xml"), pom.getBytes()); execScript("mvn", args( "deploy:deploy-file", "-Dfile=bin/h2-mvstore" + getJarSuffix(), @@ -860,12 +813,12 @@ public void mavenDeployCentral() { public void mavenInstallLocal() { // MVStore jarMVStore(); - String pom = new String(readFile(new File("src/installer/pom-mvstore-template.xml"))); - pom = replaceAll(pom, "@version@", "1.0-SNAPSHOT"); - writeFile(new File("bin/pom.xml"), pom.getBytes()); + String pom = new String(readFile(Paths.get("src/installer/pom-mvstore-template.xml"))); + pom = replaceAll(pom, "@version@", getVersion()); + writeFile(Paths.get("bin/pom.xml"), pom.getBytes()); execScript("mvn", args( "install:install-file", - "-Dversion=1.0-SNAPSHOT", + "-Dversion=" + getVersion(), "-Dfile=bin/h2-mvstore" + getJarSuffix(), "-Dpackaging=jar", "-DpomFile=bin/pom.xml", @@ -873,12 +826,12 @@ public void mavenInstallLocal() { "-DgroupId=com.h2database")); // database jar(); - pom = new String(readFile(new File("src/installer/pom-template.xml"))); - pom = replaceAll(pom, "@version@", "1.0-SNAPSHOT"); - writeFile(new File("bin/pom.xml"), pom.getBytes()); + pom = new String(readFile(Paths.get("src/installer/pom-template.xml"))); + pom = replaceAll(pom, "@version@", getVersion()); + writeFile(Paths.get("bin/pom.xml"), pom.getBytes()); execScript("mvn", args( "install:install-file", - "-Dversion=1.0-SNAPSHOT", + "-Dversion=" + getVersion(), "-Dfile=bin/h2" + getJarSuffix(), "-Dpackaging=jar", "-DpomFile=bin/pom.xml", @@ -901,36 +854,6 @@ public void offline() { } } - private void resources(boolean clientOnly, boolean basicOnly) { - if (!clientOnly) { - java("org.h2.build.doc.GenerateHelp", null); - javadoc("-sourcepath", "src/main", "org.h2.tools", "org.h2.jmx", - "-classpath", - "ext/lucene-core-5.5.5.jar" + - File.pathSeparator + "ext/lucene-analyzers-common-5.5.5.jar" + - File.pathSeparator + "ext/lucene-queryparser-5.5.5.jar" + - File.pathSeparator + "ext/jts-core-1.15.0.jar", - "-docletpath", "bin" + File.pathSeparator + "temp", - "-doclet", "org.h2.build.doclet.ResourceDoclet"); - } - FileList files = files("src/main"). - exclude("*.MF"). - exclude("*.java"). - exclude("*/package.html"). - exclude("*/java.sql.Driver"). - exclude("*.DS_Store"); - if (basicOnly) { - files = files.keep("src/main/org/h2/res/_messages_en.*"); - } - if (clientOnly) { - files = files.exclude("src/main/org/h2/res/help.csv"); - files = files.exclude("src/main/org/h2/res/h2*"); - files = files.exclude("src/main/org/h2/res/javadoc.properties"); - files = files.exclude("src/main/org/h2/server/*"); - } - zip("temp/org/h2/util/data.zip", files, "src/main", true, false); - } - /** * Just run the spellchecker. */ @@ -950,39 +873,42 @@ public void test() { /** * Compile and run all fast tests. This does not include the compile step. */ - @Description(summary = "Compile and run all tests for Travis (excl. the compile step).") - public void testTravis() { + @Description(summary = "Compile and run all tests for CI (excl. the compile step).") + public void testCI() { test(true); } - private void test(boolean travis) { + private void test(boolean ci) { downloadTest(); String cp = "temp" + File.pathSeparator + "bin" + - File.pathSeparator + "ext/postgresql-42.2.5.jre7.jar" + - File.pathSeparator + "ext/servlet-api-3.1.0.jar" + - File.pathSeparator + "ext/lucene-core-5.5.5.jar" + - File.pathSeparator + "ext/lucene-analyzers-common-5.5.5.jar" + - File.pathSeparator + "ext/lucene-queryparser-5.5.5.jar" + - File.pathSeparator + "ext/h2mig_pagestore_addon.jar" + - File.pathSeparator + "ext/org.osgi.core-4.2.0.jar" + - File.pathSeparator + "ext/org.osgi.enterprise-4.2.0.jar" + - File.pathSeparator + "ext/jts-core-1.15.0.jar" + - File.pathSeparator + "ext/slf4j-api-1.6.0.jar" + - File.pathSeparator + "ext/slf4j-nop-1.6.0.jar" + - File.pathSeparator + "ext/asm-7.0.jar" + + File.pathSeparator + "ext/postgresql-" + PGJDBC_VERSION + ".jar" + + File.pathSeparator + "ext/javax.servlet-api-" + JAVAX_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/jakarta.servlet-api-" + JAKARTA_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-core-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-analyzers-common-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-queryparser-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.core-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.enterprise-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/jts-core-" + JTS_VERSION + ".jar" + + File.pathSeparator + "ext/slf4j-api-" + SLF4J_VERSION + ".jar" + + File.pathSeparator + "ext/slf4j-nop-" + SLF4J_VERSION + ".jar" + + File.pathSeparator + "ext/asm-" + ASM_VERSION + ".jar" + File.pathSeparator + javaToolsJar; int version = getJavaVersion(); if (version >= 9) { cp = "src/java9/precompiled" + File.pathSeparator + cp; + if (version >= 10) { + cp = "src/java10/precompiled" + File.pathSeparator + cp; + } } int ret; - if (travis) { + if (ci) { ret = execJava(args( "-ea", "-Xmx128m", "-XX:MaxDirectMemorySize=2g", "-cp", cp, - "org.h2.test.TestAll", "travis")); + "org.h2.test.TestAll", "ci")); } else { ret = execJava(args( "-ea", @@ -990,7 +916,7 @@ private void test(boolean travis) { "-cp", cp, "org.h2.test.TestAll")); } - // return a failure code for Jenkins/Travis/CI builds + // return a failure code for CI builds if (ret != 0) { System.exit(ret); } @@ -1138,8 +1064,7 @@ public void uploadBuild() { args = args.plus("-target", version, "-source", version); } javac(args, files); - String cp = "bin" + File.pathSeparator + "temp" + - File.pathSeparator + "ext/h2mig_pagestore_addon.jar"; + String cp = "bin" + File.pathSeparator + "temp"; execJava(args("-Xmx512m", "-cp", cp, "-Dh2.ftpPassword=" + password, "org.h2.build.doc.UploadBuild")); @@ -1162,8 +1087,8 @@ public void warConsole() { @Override protected String getLocalMavenDir() { String userHome = System.getProperty("user.home", ""); - File file = new File(userHome, ".m2/settings.xml"); - if (!file.exists()) { + Path file = Paths.get(userHome, ".m2/settings.xml"); + if (!Files.exists(file)) { return super.getLocalMavenDir(); } XMLParser p = new XMLParser(new String(BuildBase.readFile(file))); diff --git a/h2/src/tools/org/h2/build/BuildBase.java b/h2/src/tools/org/h2/build/BuildBase.java index fc64388ea7..830747fc2a 100644 --- a/h2/src/tools/org/h2/build/BuildBase.java +++ b/h2/src/tools/org/h2/build/BuildBase.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build; @@ -10,14 +10,12 @@ import java.io.BufferedReader; import java.io.ByteArrayOutputStream; import java.io.File; -import java.io.FileOutputStream; import java.io.FilterOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.PrintStream; -import java.io.RandomAccessFile; import java.lang.annotation.Documented; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; @@ -29,11 +27,17 @@ import java.lang.reflect.Modifier; import java.net.URL; import java.nio.charset.StandardCharsets; +import java.nio.file.FileAlreadyExistsException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.concurrent.TimeUnit; @@ -105,7 +109,7 @@ public String[] array() { /** * A list of files. */ - public static class FileList extends ArrayList { + public static class FileList extends ArrayList { private static final long serialVersionUID = 1L; @@ -154,8 +158,8 @@ private FileList filter(boolean keep, String pattern) { // normalize / and \ pattern = BuildBase.replaceAll(pattern, "/", File.separator); FileList list = new FileList(); - for (File f : this) { - String path = f.getPath(); + for (Path f : this) { + String path = f.toString(); boolean match = start ? path.startsWith(pattern) : path.endsWith(pattern); if (match == keep) { list.add(f); @@ -304,12 +308,7 @@ protected void beep() { */ protected void projectHelp() { Method[] methods = getClass().getDeclaredMethods(); - Arrays.sort(methods, new Comparator() { - @Override - public int compare(Method a, Method b) { - return a.getName().compareTo(b.getName()); - } - }); + Arrays.sort(methods, Comparator.comparing(Method::getName)); sysOut.println("Targets:"); String description; for (Method m : methods) { @@ -328,7 +327,7 @@ public int compare(Method a, Method b) { sysOut.println(); } - private static boolean isWindows() { + protected static boolean isWindows() { return System.getProperty("os.name").toLowerCase().contains("windows"); } @@ -436,24 +435,6 @@ protected static String getStaticField(String className, String fieldName) { } } - /** - * Reads the value from a static method of a class using reflection. - * - * @param className the name of the class - * @param methodName the field name - * @return the value as a string - */ - protected static String getStaticValue(String className, String methodName) { - try { - Class clazz = Class.forName(className); - Method method = clazz.getMethod(methodName); - return method.invoke(null).toString(); - } catch (Exception e) { - throw new RuntimeException("Can not read value " + className + "." - + methodName + "()", e); - } - } - /** * Copy files to the specified target directory. * @@ -462,14 +443,13 @@ protected static String getStaticValue(String className, String methodName) { * @param baseDir the base directory */ protected void copy(String targetDir, FileList files, String baseDir) { - File target = new File(targetDir); - File base = new File(baseDir); - println("Copying " + files.size() + " files to " + target.getPath()); - String basePath = base.getPath(); - for (File f : files) { - File t = new File(target, removeBase(basePath, f.getPath())); + Path target = Paths.get(targetDir); + Path base = Paths.get(baseDir); + println("Copying " + files.size() + " files to " + target); + for (Path f : files) { + Path t = target.resolve(base.relativize(f)); byte[] data = readFile(f); - mkdirs(t.getParentFile()); + mkdirs(t.getParent()); writeFile(t, data); } } @@ -542,7 +522,12 @@ protected void javadoc(String...args) { "Generating ", })); } - Class clazz = Class.forName("com.sun.tools.javadoc.Main"); + Class clazz; + try { + clazz = Class.forName("jdk.javadoc.internal.tool.Main"); + } catch (Exception e) { + clazz = Class.forName("com.sun.tools.javadoc.Main"); + } Method execute = clazz.getMethod("execute", String[].class); result = (Integer) invoke(execute, null, new Object[] { args }); } catch (Exception e) { @@ -594,18 +579,18 @@ protected static String getSHA1(byte[] data) { */ protected void downloadUsingMaven(String target, String group, String artifact, String version, String sha1Checksum) { - String repoDir = "http://repo1.maven.org/maven2"; - File targetFile = new File(target); - if (targetFile.exists()) { + String repoDir = "https://repo1.maven.org/maven2"; + Path targetFile = Paths.get(target); + if (Files.exists(targetFile)) { return; } String repoFile = group.replace('.', '/') + "/" + artifact + "/" + version + "/" + artifact + "-" + version + ".jar"; - mkdirs(targetFile.getAbsoluteFile().getParentFile()); - String localMavenDir = getLocalMavenDir(); - if (new File(localMavenDir).exists()) { - File f = new File(localMavenDir, repoFile); - if (!f.exists()) { + mkdirs(targetFile.toAbsolutePath().getParent()); + Path localMavenDir = Paths.get(getLocalMavenDir()); + if (Files.isDirectory(localMavenDir)) { + Path f = localMavenDir.resolve(repoFile); + if (!Files.exists(f)) { try { execScript("mvn", args( "org.apache.maven.plugins:maven-dependency-plugin:2.1:get", @@ -615,7 +600,7 @@ protected void downloadUsingMaven(String target, String group, println("Could not download using Maven: " + e.toString()); } } - if (f.exists()) { + if (Files.exists(f)) { byte[] data = readFile(f); String got = getSHA1(data); if (sha1Checksum == null) { @@ -625,7 +610,7 @@ protected void downloadUsingMaven(String target, String group, throw new RuntimeException( "SHA1 checksum mismatch; got: " + got + " expected: " + sha1Checksum + - " for file " + f.getAbsolutePath()); + " for file " + f.toAbsolutePath()); } } writeFile(targetFile, data); @@ -650,11 +635,11 @@ protected String getLocalMavenDir() { * @param sha1Checksum the SHA-1 checksum or null */ protected void download(String target, String fileURL, String sha1Checksum) { - File targetFile = new File(target); - if (targetFile.exists()) { + Path targetFile = Paths.get(target); + if (Files.exists(targetFile)) { return; } - mkdirs(targetFile.getAbsoluteFile().getParentFile()); + mkdirs(targetFile.toAbsolutePath().getParent()); ByteArrayOutputStream buff = new ByteArrayOutputStream(); try { println("Downloading " + fileURL); @@ -664,7 +649,7 @@ protected void download(String target, String fileURL, String sha1Checksum) { int len = 0; while (true) { long now = System.nanoTime(); - if (now > last + TimeUnit.SECONDS.toNanos(1)) { + if (now - last > 1_000_000_000L) { println("Downloaded " + len + " bytes"); last = now; } @@ -701,7 +686,7 @@ protected void download(String target, String fileURL, String sha1Checksum) { */ protected FileList files(String dir) { FileList list = new FileList(); - addFiles(list, new File(dir)); + addFiles(list, Paths.get(dir)); return list; } @@ -715,42 +700,35 @@ protected static StringList args(String...args) { return new StringList(args); } - private void addFiles(FileList list, File file) { - if (file.getName().startsWith(".svn")) { + private static void addFiles(FileList list, Path file) { + if (file.getFileName().toString().startsWith(".svn")) { // ignore - } else if (file.isDirectory()) { - String path = file.getPath(); - for (String fileName : file.list()) { - addFiles(list, new File(path, fileName)); + } else if (Files.isDirectory(file)) { + try { + Files.walkFileTree(file, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + list.add(file); + return FileVisitResult.CONTINUE; + } + }); + } catch (IOException e) { + throw new RuntimeException("Error reading directory " + file, e); } } else { list.add(file); } } - private static String removeBase(String basePath, String path) { - if (path.startsWith(basePath)) { - path = path.substring(basePath.length()); - } - path = path.replace('\\', '/'); - if (path.startsWith("/")) { - path = path.substring(1); - } - return path; - } - /** * Create or overwrite a file. * * @param file the file * @param data the data to write */ - public static void writeFile(File file, byte[] data) { + public static void writeFile(Path file, byte[] data) { try { - RandomAccessFile ra = new RandomAccessFile(file, "rw"); - ra.write(data); - ra.setLength(data.length); - ra.close(); + Files.write(file, data); } catch (IOException e) { throw new RuntimeException("Error writing to file " + file, e); } @@ -762,28 +740,11 @@ public static void writeFile(File file, byte[] data) { * @param file the file * @return the data */ - public static byte[] readFile(File file) { - RandomAccessFile ra = null; + public static byte[] readFile(Path file) { try { - ra = new RandomAccessFile(file, "r"); - long len = ra.length(); - if (len >= Integer.MAX_VALUE) { - throw new RuntimeException("File " + file.getPath() + " is too large"); - } - byte[] buffer = new byte[(int) len]; - ra.readFully(buffer); - ra.close(); - return buffer; + return Files.readAllBytes(file); } catch (IOException e) { throw new RuntimeException("Error reading from file " + file, e); - } finally { - if (ra != null) { - try { - ra.close(); - } catch (IOException e) { - // ignore - } - } } } @@ -831,20 +792,17 @@ private static long zipOrJar(String destFile, FileList files, String basePath, boolean storeOnly, boolean sortBySuffix, boolean jar) { if (sortBySuffix) { // for better compressibility, sort by suffix, then name - Collections.sort(files, new Comparator() { - @Override - public int compare(File f1, File f2) { - String p1 = f1.getPath(); - String p2 = f2.getPath(); - int comp = getSuffix(p1).compareTo(getSuffix(p2)); - if (comp == 0) { - comp = p1.compareTo(p2); - } - return comp; + files.sort((f1, f2) -> { + String p1 = f1.toString(); + String p2 = f2.toString(); + int comp = getSuffix(p1).compareTo(getSuffix(p2)); + if (comp == 0) { + comp = p1.compareTo(p2); } + return comp; }); } else if (jar) { - Collections.sort(files, new Comparator() { + files.sort(new Comparator() { private int priority(String path) { if (path.startsWith("META-INF/")) { if (path.equals("META-INF/MANIFEST.MF")) { @@ -862,9 +820,9 @@ private int priority(String path) { } @Override - public int compare(File f1, File f2) { - String p1 = f1.getPath(); - String p2 = f2.getPath(); + public int compare(Path f1, Path f2) { + String p1 = f1.toString(); + String p2 = f2.toString(); int comp = Integer.compare(priority(p1), priority(p2)); if (comp != 0) { return comp; @@ -873,16 +831,16 @@ public int compare(File f1, File f2) { } }); } - mkdirs(new File(destFile).getAbsoluteFile().getParentFile()); - // normalize the path (replace / with \ if required) - basePath = new File(basePath).getPath(); + Path dest = Paths.get(destFile).toAbsolutePath(); + mkdirs(dest.getParent()); + Path base = Paths.get(basePath); try { - if (new File(destFile).isDirectory()) { + if (Files.isDirectory(dest)) { throw new IOException( "Can't create the file as a directory with this name already exists: " + destFile); } - OutputStream out = new BufferedOutputStream(new FileOutputStream(destFile)); + OutputStream out = new BufferedOutputStream(Files.newOutputStream(dest)); ZipOutputStream zipOut; if (jar) { zipOut = new JarOutputStream(out); @@ -893,14 +851,13 @@ public int compare(File f1, File f2) { zipOut.setMethod(ZipOutputStream.STORED); } zipOut.setLevel(Deflater.BEST_COMPRESSION); - for (File file : files) { - String fileName = file.getPath(); - String entryName = removeBase(basePath, fileName); + for (Path file : files) { + String entryName = base.relativize(file).toString().replace('\\', '/'); byte[] data = readFile(file); ZipEntry entry = new ZipEntry(entryName); CRC32 crc = new CRC32(); crc.update(data); - entry.setSize(file.length()); + entry.setSize(data.length); entry.setCrc(crc.getValue()); zipOut.putNextEntry(entry); zipOut.write(data); @@ -908,14 +865,14 @@ public int compare(File f1, File f2) { } zipOut.closeEntry(); zipOut.close(); - return new File(destFile).length() / 1024; + return Files.size(dest) / 1024; } catch (IOException e) { throw new RuntimeException("Error creating file " + destFile, e); } } /** - * Get the current java specification version (for example, 1.4). + * Get the current java specification version (for example, 1.8). * * @return the java specification version */ @@ -926,15 +883,15 @@ protected static String getJavaSpecVersion() { /** * Get the current Java version as integer value. * - * @return the Java version (7, 8, 9, 10, 11, etc) + * @return the Java version (8, 9, 10, 11, 12, 13, etc) */ public static int getJavaVersion() { - int version = 7; + int version = 8; String v = getJavaSpecVersion(); if (v != null) { int idx = v.indexOf('.'); if (idx >= 0) { - // 1.7, 1.8 + // 1.8 v = v.substring(idx + 1); } version = Integer.parseInt(v); @@ -944,8 +901,8 @@ public static int getJavaVersion() { private static List getPaths(FileList files) { StringList list = new StringList(); - for (File f : files) { - list.add(f.getPath()); + for (Path f : files) { + list.add(f.toString()); } return list; } @@ -1009,22 +966,17 @@ protected void java(String className, StringList args) { * @param dir the directory to create */ protected static void mkdir(String dir) { - File f = new File(dir); - if (f.exists()) { - if (f.isFile()) { - throw new RuntimeException("Can not create directory " + dir - + " because a file with this name exists"); - } - } else { - mkdirs(f); - } + mkdirs(Paths.get(dir)); } - private static void mkdirs(File f) { - if (!f.exists()) { - if (!f.mkdirs()) { - throw new RuntimeException("Can not create directory " + f.getAbsolutePath()); - } + private static void mkdirs(Path f) { + try { + Files.createDirectories(f); + } catch (FileAlreadyExistsException e) { + throw new RuntimeException("Can not create directory " + e.getFile() + + " because a file with this name exists"); + } catch (IOException e) { + throw new RuntimeException("Can not create directory " + f.toAbsolutePath()); } } @@ -1035,7 +987,7 @@ private static void mkdirs(File f) { */ protected void delete(String dir) { println("Deleting " + dir); - delete(new File(dir)); + deleteRecursive(Paths.get(dir)); } /** @@ -1044,21 +996,37 @@ protected void delete(String dir) { * @param files the name of the files to delete */ protected void delete(FileList files) { - for (File f : files) { - delete(f); + for (Path f : files) { + deleteRecursive(f); } } - private void delete(File file) { - if (file.exists()) { - if (file.isDirectory()) { - String path = file.getPath(); - for (String fileName : file.list()) { - delete(new File(path, fileName)); - } - } - if (!file.delete()) { - throw new RuntimeException("Can not delete " + file.getPath()); + /** + * Delete a file or a directory with its content. + * + * @param file the file or directory to delete + */ + public static void deleteRecursive(Path file) { + if (Files.exists(file)) { + try { + Files.walkFileTree(file, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + Files.delete(file); + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { + if (exc == null) { + Files.delete(dir); + return FileVisitResult.CONTINUE; + } + throw exc; + } + }); + } catch (IOException e) { + throw new RuntimeException("Can not delete " + file); } } } diff --git a/h2/src/tools/org/h2/build/code/AbbaDetect.java b/h2/src/tools/org/h2/build/code/AbbaDetect.java index b4cbcb7ee6..68bc0ab2d7 100644 --- a/h2/src/tools/org/h2/build/code/AbbaDetect.java +++ b/h2/src/tools/org/h2/build/code/AbbaDetect.java @@ -1,14 +1,18 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.code; -import java.io.File; import java.io.IOException; -import java.io.RandomAccessFile; import java.nio.charset.StandardCharsets; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; /** * Enable / disable AB-BA deadlock detector code. @@ -23,31 +27,29 @@ public class AbbaDetect { */ public static void main(String... args) throws Exception { String baseDir = "src/main"; - process(new File(baseDir), true); + Files.walkFileTree(Paths.get(baseDir), new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + process(file, true); + return FileVisitResult.CONTINUE; + } + }); } - private static void process(File file, boolean enable) throws IOException { - String name = file.getName(); - if (file.isDirectory()) { - if (name.equals("CVS") || name.equals(".svn")) { - return; - } - for (File f : file.listFiles()) { - process(f, enable); - } - return; - } + /** + * Process a file. + * + * @param file the file + */ + static void process(Path file, boolean enable) throws IOException { + String name = file.getFileName().toString(); if (!name.endsWith(".java")) { return; } if (name.endsWith("AbbaDetector.java")) { return; } - RandomAccessFile in = new RandomAccessFile(file, "r"); - byte[] data = new byte[(int) file.length()]; - in.readFully(data); - in.close(); - String source = new String(data, StandardCharsets.UTF_8); + String source = new String(Files.readAllBytes(file), StandardCharsets.UTF_8); String original = source; source = disable(source); @@ -62,15 +64,13 @@ private static void process(File file, boolean enable) throws IOException { if (source.equals(original)) { return; } - File newFile = new File(file + ".new"); - RandomAccessFile out = new RandomAccessFile(newFile, "rw"); - out.write(source.getBytes(StandardCharsets.UTF_8)); - out.close(); + Path newFile = Paths.get(file.toString() + ".new"); + Files.write(newFile, source.getBytes(StandardCharsets.UTF_8)); - File oldFile = new File(file + ".old"); - file.renameTo(oldFile); - newFile.renameTo(file); - oldFile.delete(); + Path oldFile = Paths.get(file.toString() + ".old"); + Files.move(file, oldFile); + Files.move(newFile, file); + Files.delete(oldFile); } private static String disable(String source) { diff --git a/h2/src/tools/org/h2/build/code/CheckJavadoc.java b/h2/src/tools/org/h2/build/code/CheckJavadoc.java index ee31279059..a621e70fc8 100644 --- a/h2/src/tools/org/h2/build/code/CheckJavadoc.java +++ b/h2/src/tools/org/h2/build/code/CheckJavadoc.java @@ -1,13 +1,16 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.code; -import java.io.File; import java.io.IOException; -import java.io.RandomAccessFile; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; /** * This tool checks that for each .java file there is a package.html file, @@ -31,32 +34,28 @@ public static void main(String... args) throws Exception { } private void run() throws Exception { - String baseDir = "src"; - check(new File(baseDir)); + check(Paths.get("src")); if (errorCount > 0) { throw new Exception(errorCount + " errors found"); } } - private int check(File file) throws Exception { - String name = file.getName(); - if (file.isDirectory()) { - if (name.equals("CVS") || name.equals(".svn")) { - return 0; - } + private int check(Path file) throws Exception { + String name = file.getFileName().toString(); + if (Files.isDirectory(file)) { boolean foundPackageHtml = false, foundJava = false; - for (File f : file.listFiles()) { - int type = check(f); - if (type == 1) { - foundJava = true; - } else if (type == 2) { - foundPackageHtml = true; + try (DirectoryStream stream = Files.newDirectoryStream(file)) { + for (Path f : stream) { + int type = check(f); + if (type == 1) { + foundJava = true; + } else if (type == 2) { + foundPackageHtml = true; + } } } if (foundJava && !foundPackageHtml) { - System.out.println( - "No package.html file, but a Java file found at: " - + file.getAbsolutePath()); + System.out.println("No package.html file, but a Java file found at: " + file.toAbsolutePath()); errorCount++; } } else { @@ -70,60 +69,49 @@ private int check(File file) throws Exception { return 0; } - private void checkJavadoc(File file) throws IOException { - RandomAccessFile in = new RandomAccessFile(file, "r"); - byte[] data = new byte[(int) file.length()]; - in.readFully(data); - in.close(); - String text = new String(data); - int comment = text.indexOf("/**"); - if (comment < 0) { - System.out.println("No Javadoc comment: " + file.getAbsolutePath()); - errorCount++; - } - int pos = 0; - int lineNumber = 1; - boolean inComment = false; - while (true) { - int next = text.indexOf('\n', pos); - if (next < 0) { - break; - } - String rawLine = text.substring(pos, next); - if (rawLine.endsWith("\r")) { - rawLine = rawLine.substring(0, rawLine.length() - 1); - } + private void checkJavadoc(Path file) throws IOException { + List lines = Files.readAllLines(file); + boolean inComment = false, hasJavadoc = false; + for (int lineNumber = 0, size = lines.size(); lineNumber < size;) { + String rawLine = lines.get(lineNumber++); String line = rawLine.trim(); if (line.startsWith("/*")) { + if (!hasJavadoc && line.startsWith("/**")) { + hasJavadoc = true; + } inComment = true; } + int rawLength = rawLine.length(); if (inComment) { - if (rawLine.length() > MAX_COMMENT_LINE_SIZE + int i = line.indexOf("*/", 2); + if (i >= 0) { + inComment = false; + } + if (i == rawLength - 2 && rawLength > MAX_COMMENT_LINE_SIZE && !line.trim().startsWith("* http://") && !line.trim().startsWith("* https://")) { - System.out.println("Long line: " + file.getAbsolutePath() - + " (" + file.getName() + ":" + lineNumber + ")"); + System.out.println("Long line: " + file.toAbsolutePath() + + " (" + file.getFileName() + ":" + lineNumber + ")"); errorCount++; } - if (line.endsWith("*/")) { - inComment = false; - } } if (!inComment && line.startsWith("//")) { - if (rawLine.length() > MAX_COMMENT_LINE_SIZE + if (rawLength > MAX_COMMENT_LINE_SIZE && !line.trim().startsWith("// http://") && !line.trim().startsWith("// https://")) { - System.out.println("Long line: " + file.getAbsolutePath() - + " (" + file.getName() + ":" + lineNumber + ")"); + System.out.println("Long line: " + file.toAbsolutePath() + + " (" + file.getFileName() + ":" + lineNumber + ")"); errorCount++; } - } else if (!inComment && rawLine.length() > MAX_SOURCE_LINE_SIZE) { - System.out.println("Long line: " + file.getAbsolutePath() - + " (" + file.getName() + ":" + lineNumber + ")"); + } else if (!inComment && rawLength > MAX_SOURCE_LINE_SIZE) { + System.out.println("Long line: " + file.toAbsolutePath() + + " (" + file.getFileName() + ":" + lineNumber + ")"); errorCount++; } - lineNumber++; - pos = next + 1; + } + if (!hasJavadoc) { + System.out.println("No Javadoc comment: " + file.toAbsolutePath()); + errorCount++; } } diff --git a/h2/src/tools/org/h2/build/code/CheckTextFiles.java b/h2/src/tools/org/h2/build/code/CheckTextFiles.java index 50d47b08e4..a8b3c2453b 100644 --- a/h2/src/tools/org/h2/build/code/CheckTextFiles.java +++ b/h2/src/tools/org/h2/build/code/CheckTextFiles.java @@ -1,13 +1,19 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.code; import java.io.ByteArrayOutputStream; import java.io.File; -import java.io.RandomAccessFile; +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; import java.util.Arrays; /** @@ -20,7 +26,7 @@ public class CheckTextFiles { private static final int MAX_SOURCE_LINE_SIZE = 120; // must contain "+" otherwise this here counts as well - private static final String COPYRIGHT1 = "Copyright 2004-201"; + private static final String COPYRIGHT1 = "Copyright 2004-2022"; private static final String COPYRIGHT2 = "H2 Group."; private static final String LICENSE = "Multiple-Licensed " + "under the MPL 2.0"; @@ -57,73 +63,57 @@ public static void main(String... args) throws Exception { } private void run() throws Exception { - String baseDir = "src"; - check(new File(baseDir)); + Files.walkFileTree(Paths.get("src"), new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + check(file); + return FileVisitResult.CONTINUE; + } + }); if (hasError) { throw new Exception("Errors found"); } } - private void check(File file) throws Exception { - String name = file.getName(); - if (file.isDirectory()) { - if (name.equals("CVS") || name.equals(".svn")) { - return; - } - for (File f : file.listFiles()) { - check(f); - } - } else { - String suffix = ""; - int lastDot = name.lastIndexOf('.'); - if (lastDot >= 0) { - suffix = name.substring(lastDot + 1); - } - boolean check = false, ignore = false; - for (String s : SUFFIX_CHECK) { - if (suffix.equals(s)) { - check = true; - } + void check(Path file) throws IOException { + String name = file.getFileName().toString(); + String suffix = ""; + int lastDot = name.lastIndexOf('.'); + if (lastDot >= 0) { + suffix = name.substring(lastDot + 1); + } + boolean check = false, ignore = false; + for (String s : SUFFIX_CHECK) { + if (suffix.equals(s)) { + check = true; } -// if (name.endsWith(".html") && name.indexOf("_ja") > 0) { -// int todoRemoveJapaneseFiles; -// // Japanese html files are UTF-8 at this time -// check = false; -// ignore = true; -// } - if (name.endsWith(".utf8.txt") || - (name.startsWith("_docs_") && - name.endsWith(".properties"))) { - check = false; + } + for (String s : SUFFIX_IGNORE) { + if (suffix.equals(s)) { ignore = true; } - for (String s : SUFFIX_IGNORE) { - if (suffix.equals(s)) { - ignore = true; - } - } - boolean checkLicense = true; - for (String ig : suffixIgnoreLicense) { - if (suffix.equals(ig) || name.endsWith(ig)) { - checkLicense = false; - break; - } - } - if (ignore == check) { - throw new RuntimeException("Unknown suffix: " + suffix - + " for file: " + file.getAbsolutePath()); - } - useCRLF = false; - for (String s : SUFFIX_CRLF) { - if (suffix.equals(s)) { - useCRLF = true; - break; - } + } + boolean checkLicense = true; + for (String ig : suffixIgnoreLicense) { + if (suffix.equals(ig) || name.endsWith(ig)) { + checkLicense = false; + break; } - if (check) { - checkOrFixFile(file, AUTO_FIX, checkLicense); + } + if (ignore == check) { + throw new RuntimeException("Unknown suffix: " + suffix + + " for file: " + file.toAbsolutePath()); + } + useCRLF = false; + for (String s : SUFFIX_CRLF) { + if (suffix.equals(s)) { + useCRLF = true; + break; } } + if (check) { + checkOrFixFile(file, AUTO_FIX, checkLicense); + } } /** @@ -136,13 +126,9 @@ private void check(File file) throws Exception { * @param fix automatically fix newline characters and trailing spaces * @param checkLicense check the license and copyright */ - public void checkOrFixFile(File file, boolean fix, boolean checkLicense) - throws Exception { - RandomAccessFile in = new RandomAccessFile(file, "r"); - byte[] data = new byte[(int) file.length()]; + public void checkOrFixFile(Path file, boolean fix, boolean checkLicense) throws IOException { + byte[] data = Files.readAllBytes(file); ByteArrayOutputStream out = fix ? new ByteArrayOutputStream() : null; - in.readFully(data); - in.close(); if (checkLicense) { if (data.length > COPYRIGHT1.length() + LICENSE.length()) { // don't check tiny files @@ -192,12 +178,15 @@ public void checkOrFixFile(File file, boolean fix, boolean checkLicense) lastWasWhitespace = false; line++; int lineLength = i - startLinePos; - if (file.getName().endsWith(".java")) { + if (file.getFileName().toString().endsWith(".java")) { if (i > 0 && data[i - 1] == '\r') { lineLength--; } if (lineLength > MAX_SOURCE_LINE_SIZE) { - fail(file, "line too long: " + lineLength, line); + String s = new String(data, startLinePos, lineLength).trim(); + if (!s.startsWith("// http://") && !s.startsWith("// https://")) { + fail(file, "line too long: " + lineLength, line); + } } } startLinePos = i; @@ -263,11 +252,8 @@ public void checkOrFixFile(File file, boolean fix, boolean checkLicense) if (fix) { byte[] changed = out.toByteArray(); if (!Arrays.equals(data, changed)) { - RandomAccessFile f = new RandomAccessFile(file, "rw"); - f.write(changed); - f.setLength(changed.length); - f.close(); - System.out.println("CHANGED: " + file.getName()); + Files.write(file, changed); + System.out.println("CHANGED: " + file.getFileName()); } } line = 1; @@ -288,11 +274,12 @@ public void checkOrFixFile(File file, boolean fix, boolean checkLicense) } } - private void fail(File file, String error, int line) { + private void fail(Path file, String error, int line) { + file = file.toAbsolutePath(); if (line <= 0) { line = 1; } - String name = file.getAbsolutePath(); + String name = file.toString(); int idx = name.lastIndexOf(File.separatorChar); if (idx >= 0) { name = name.replace(File.separatorChar, '.'); @@ -302,8 +289,7 @@ private void fail(File file, String error, int line) { name = name.substring(idx); } } - System.out.println("FAIL at " + name + " " + error + " " - + file.getAbsolutePath()); + System.out.println("FAIL at " + name + " " + error + " " + file.toAbsolutePath()); hasError = true; if (failOnError) { throw new RuntimeException("FAIL"); diff --git a/h2/src/tools/org/h2/build/code/package.html b/h2/src/tools/org/h2/build/code/package.html index dafd84f9e5..8f33d88b5b 100644 --- a/h2/src/tools/org/h2/build/code/package.html +++ b/h2/src/tools/org/h2/build/code/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/build/doc/BnfRailroad.java b/h2/src/tools/org/h2/build/doc/BnfRailroad.java index c1bd0eb1b5..033c3ac149 100644 --- a/h2/src/tools/org/h2/build/doc/BnfRailroad.java +++ b/h2/src/tools/org/h2/build/doc/BnfRailroad.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; @@ -123,6 +123,8 @@ static String getHtmlText(int type) { return "["; case RuleFixed.CLOSE_BRACKET: return "]"; + case RuleFixed.JSON_TEXT: + return "JSON text"; default: throw new AssertionError("type="+type); } @@ -133,15 +135,8 @@ public void visitRuleList(boolean or, ArrayList list) { StringBuilder buff = new StringBuilder(); if (or) { buff.append(""); - int i = 0; - for (Rule r : list) { - String a = i == 0 ? "t" : i == list.size() - 1 ? "l" : "k"; - i++; - buff.append(""); + for (int i = 0, l = list.size() - 1; i <= l; i++) { + visitOrItem(buff, list.get(i), i == 0 ? "t" : i == l ? "l" : "k"); } buff.append("
          "); - r.accept(this); - buff.append(html); - buff.append("
          "); } else { @@ -161,9 +156,7 @@ public void visitRuleList(boolean or, ArrayList list) { @Override public void visitRuleOptional(Rule rule) { StringBuilder buff = new StringBuilder(); - buff.append(""); - buff.append("" + - ""); + writeOptionalStart(buff); buff.append("" + "
           
          "); rule.accept(this); @@ -172,4 +165,36 @@ public void visitRuleOptional(Rule rule) { html = buff.toString(); } + @Override + public void visitRuleOptional(ArrayList list) { + StringBuilder buff = new StringBuilder(); + writeOptionalStart(buff); + for (int i = 0, l = list.size() - 1; i <= l; i++) { + visitOrItem(buff, list.get(i), i == l ? "l" : "k"); + } + buff.append("
          "); + html = buff.toString(); + } + + private static void writeOptionalStart(StringBuilder buff) { + buff.append(""); + buff.append("" + + ""); + } + + private void visitOrItem(StringBuilder buff, Rule r, String a) { + buff.append(""); + } + + @Override + public void visitRuleExtension(Rule rule, boolean compatibility) { + StringBuilder buff = new StringBuilder("
          "); + rule.accept(this); + html = buff.append(html).append("
          ").toString(); + } + } diff --git a/h2/src/tools/org/h2/build/doc/BnfSyntax.java b/h2/src/tools/org/h2/build/doc/BnfSyntax.java index 21fd9b6145..d1e8e6188e 100644 --- a/h2/src/tools/org/h2/build/doc/BnfSyntax.java +++ b/h2/src/tools/org/h2/build/doc/BnfSyntax.java @@ -1,10 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; +import java.util.ArrayDeque; import java.util.ArrayList; import java.util.StringTokenizer; import org.h2.bnf.Bnf; @@ -32,20 +33,79 @@ public String getHtml(Bnf bnf, String syntaxLines) { syntaxLines = StringUtils.replaceAll(syntaxLines, "\n ", "\n"); StringTokenizer tokenizer = Bnf.getTokenizer(syntaxLines); StringBuilder buff = new StringBuilder(); + ArrayDeque deque = new ArrayDeque<>(); + boolean extension = false; while (tokenizer.hasMoreTokens()) { String s = tokenizer.nextToken(); + if (s.equals("@c@")) { + if (!extension) { + extension = true; + buff.append(""); + } + s = skipAfterExtensionStart(tokenizer); + } else if (s.equals("@h2@")) { + if (!extension) { + extension = true; + buff.append(""); + } + s = skipAfterExtensionStart(tokenizer); + } + if (extension) { + if (s.length() == 1) { + char c = s.charAt(0); + switch (c) { + case '[': + deque.addLast(']'); + break; + case '{': + deque.addLast('}'); + break; + case ']': + case '}': + char c2 = deque.removeLast(); + if (c != c2) { + throw new AssertionError("Expected " + c2 + " got " + c); + } + break; + default: + if (deque.isEmpty()) { + deque.add('*'); + } + } + } else if (deque.isEmpty()) { + deque.add('*'); + } + } if (s.length() == 1 || StringUtils.toUpperEnglish(s).equals(s)) { buff.append(StringUtils.xmlText(s)); + if (extension && deque.isEmpty()) { + extension = false; + buff.append(""); + } continue; } buff.append(getLink(bnf, s)); } + if (extension) { + if (deque.size() != 1 || deque.getLast() != '*') { + throw new AssertionError("Expected " + deque.getLast() + " got end of data"); + } + buff.append(""); + } String s = buff.toString(); // ensure it works within XHTML comments s = StringUtils.replaceAll(s, "--", "--"); return s; } + private static String skipAfterExtensionStart(StringTokenizer tokenizer) { + String s; + do { + s = tokenizer.nextToken(); + } while (s.equals(" ")); + return s; + } + /** * Get the HTML link to the given token. * @@ -106,9 +166,19 @@ public void visitRuleOptional(Rule rule) { // not used } + @Override + public void visitRuleOptional(ArrayList list) { + // not used + } + @Override public void visitRuleRepeat(boolean comma, Rule rule) { // not used } + @Override + public void visitRuleExtension(Rule rule, boolean compatibility) { + // not used + } + } diff --git a/h2/src/tools/org/h2/build/doc/FileConverter.java b/h2/src/tools/org/h2/build/doc/FileConverter.java index 7cfa6ad427..3d71af469f 100644 --- a/h2/src/tools/org/h2/build/doc/FileConverter.java +++ b/h2/src/tools/org/h2/build/doc/FileConverter.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; diff --git a/h2/src/tools/org/h2/build/doc/GenerateDoc.java b/h2/src/tools/org/h2/build/doc/GenerateDoc.java index 8f65a13686..4c7378324f 100644 --- a/h2/src/tools/org/h2/build/doc/GenerateDoc.java +++ b/h2/src/tools/org/h2/build/doc/GenerateDoc.java @@ -1,39 +1,44 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; import java.sql.Connection; import java.sql.DriverManager; +import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.Statement; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import org.h2.bnf.Bnf; import org.h2.engine.Constants; import org.h2.server.web.PageParser; -import org.h2.util.IOUtils; -import org.h2.util.JdbcUtils; +import org.h2.tools.Csv; import org.h2.util.StringUtils; /** * This application generates sections of the documentation - * by converting the built-in help section (INFORMATION_SCHEMA.HELP) + * by converting the built-in help section * to cross linked html. */ public class GenerateDoc { - private static final String IN_HELP = "src/docsrc/help/help.csv"; - private String inDir = "src/docsrc/html"; - private String outDir = "docs/html"; + private static final String IN_HELP = "src/main/org/h2/res/help.csv"; + private Path inDir = Paths.get("src/docsrc/html"); + private Path outDir = Paths.get("docs/html"); private Connection conn; private final HashMap session = new HashMap<>(); @@ -52,22 +57,21 @@ public static void main(String... args) throws Exception { private void run(String... args) throws Exception { for (int i = 0; i < args.length; i++) { if (args[i].equals("-in")) { - inDir = args[++i]; + inDir = Paths.get(args[++i]); } else if (args[i].equals("-out")) { - outDir = args[++i]; + outDir = Paths.get(args[++i]); } } Class.forName("org.h2.Driver"); conn = DriverManager.getConnection("jdbc:h2:mem:"); - new File(outDir).mkdirs(); - new RailroadImages().run(outDir + "/images"); + Files.createDirectories(outDir); + new RailroadImages().run(outDir.resolve("images")); bnf = Bnf.getInstance(null); bnf.linkStatements(); - session.put("version", Constants.getVersion()); + session.put("version", Constants.VERSION); session.put("versionDate", Constants.BUILD_DATE); - session.put("stableVersion", Constants.getVersionStable()); - session.put("stableVersionDate", Constants.BUILD_DATE_STABLE); - // String help = "SELECT * FROM INFORMATION_SCHEMA.HELP WHERE SECTION"; + session.put("downloadRoot", + "https://github.com/h2database/h2database/releases/download/version-" + Constants.VERSION); String help = "SELECT ROWNUM ID, * FROM CSVREAD('" + IN_HELP + "', NULL, 'lineComment=#') WHERE SECTION "; map("commandsDML", @@ -82,8 +86,7 @@ private void run(String... args) throws Exception { help + "= 'Datetime fields' ORDER BY ID", true, false); map("otherGrammar", help + "= 'Other Grammar' ORDER BY ID", true, false); - map("functionsAggregate", - help + "= 'Functions (Aggregate)' ORDER BY ID", true, false); + map("functionsNumeric", help + "= 'Functions (Numeric)' ORDER BY ID", true, false); map("functionsString", @@ -92,59 +95,139 @@ private void run(String... args) throws Exception { help + "= 'Functions (Time and Date)' ORDER BY ID", true, false); map("functionsSystem", help + "= 'Functions (System)' ORDER BY ID", true, false); - map("functionsWindow", - help + "= 'Functions (Window)' ORDER BY ID", true, false); + map("functionsJson", + help + "= 'Functions (JSON)' ORDER BY ID", true, false); + map("functionsTable", + help + "= 'Functions (Table)' ORDER BY ID", true, false); + + map("aggregateFunctionsGeneral", + help + "= 'Aggregate Functions (General)' ORDER BY ID", true, false); + map("aggregateFunctionsBinarySet", + help + "= 'Aggregate Functions (Binary Set)' ORDER BY ID", true, false); + map("aggregateFunctionsOrdered", + help + "= 'Aggregate Functions (Ordered)' ORDER BY ID", true, false); + map("aggregateFunctionsHypothetical", + help + "= 'Aggregate Functions (Hypothetical Set)' ORDER BY ID", true, false); + map("aggregateFunctionsInverse", + help + "= 'Aggregate Functions (Inverse Distribution)' ORDER BY ID", true, false); + map("aggregateFunctionsJSON", + help + "= 'Aggregate Functions (JSON)' ORDER BY ID", true, false); + + map("windowFunctionsRowNumber", + help + "= 'Window Functions (Row Number)' ORDER BY ID", true, false); + map("windowFunctionsRank", + help + "= 'Window Functions (Rank)' ORDER BY ID", true, false); + map("windowFunctionsLeadLag", + help + "= 'Window Functions (Lead or Lag)' ORDER BY ID", true, false); + map("windowFunctionsNth", + help + "= 'Window Functions (Nth Value)' ORDER BY ID", true, false); + map("windowFunctionsOther", + help + "= 'Window Functions (Other)' ORDER BY ID", true, false); + map("dataTypes", help + "LIKE 'Data Types%' ORDER BY SECTION, ID", true, true); map("intervalDataTypes", help + "LIKE 'Interval Data Types%' ORDER BY SECTION, ID", true, true); - map("informationSchema", "SELECT TABLE_NAME TOPIC, " + - "GROUP_CONCAT(COLUMN_NAME " + - "ORDER BY ORDINAL_POSITION SEPARATOR ', ') SYNTAX " + - "FROM INFORMATION_SCHEMA.COLUMNS " + - "WHERE TABLE_SCHEMA='INFORMATION_SCHEMA' " + - "GROUP BY TABLE_NAME ORDER BY TABLE_NAME", false, false); - processAll(""); - conn.close(); - } + HashMap informationSchemaTables = new HashMap<>(); + HashMap informationSchemaColumns = new HashMap<>(512); + Csv csv = new Csv(); + csv.setLineCommentCharacter('#'); + try (ResultSet rs = csv.read("src/docsrc/help/information_schema.csv", null, null)) { + while (rs.next()) { + String tableName = rs.getString(1); + String columnName = rs.getString(2); + String description = rs.getString(3); + if (columnName != null) { + informationSchemaColumns.put(tableName == null ? columnName : tableName + '.' + columnName, + description); + } else { + informationSchemaTables.put(tableName, description); + } + } + } + int errorCount = 0; + try (Statement stat = conn.createStatement(); + PreparedStatement prep = conn.prepareStatement("SELECT COLUMN_NAME, " + + "DATA_TYPE_SQL('INFORMATION_SCHEMA', TABLE_NAME, 'TABLE', DTD_IDENTIFIER) DT " + + "FROM INFORMATION_SCHEMA.COLUMNS " + + "WHERE TABLE_SCHEMA = 'INFORMATION_SCHEMA' AND TABLE_NAME = ? ORDER BY ORDINAL_POSITION")) { + ResultSet rs = stat.executeQuery("SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES " + + "WHERE TABLE_SCHEMA = 'INFORMATION_SCHEMA' ORDER BY TABLE_NAME"); - private void processAll(String dir) throws Exception { - if (dir.endsWith(".svn")) { - return; + ArrayList> list = new ArrayList<>(); + StringBuilder builder = new StringBuilder(); + while (rs.next()) { + HashMap map = new HashMap<>(8); + String table = rs.getString(1); + map.put("table", table); + map.put("link", "information_schema_" + StringUtils.urlEncode(table.toLowerCase())); + String description = informationSchemaTables.get(table); + if (description == null) { + System.out.println("No documentation for INFORMATION_SCHEMA." + table); + errorCount++; + description = ""; + } + map.put("description", StringUtils.xmlText(description)); + prep.setString(1, table); + ResultSet rs2 = prep.executeQuery(); + builder.setLength(0); + while (rs2.next()) { + if (rs2.getRow() > 1) { + builder.append('\n'); + } + String column = rs2.getString(1); + description = informationSchemaColumns.get(table + '.' + column); + if (description == null) { + description = informationSchemaColumns.get(column); + if (description == null) { + System.out.println("No documentation for INFORMATION_SCHEMA." + table + '.' + column); + errorCount++; + description = ""; + } + } + builder.append(""); + } + map.put("columns", builder.toString()); + list.add(map); + } + putToMap("informationSchema", list); } - File[] list = new File(inDir + "/" + dir).listFiles(); - for (File file : list) { - if (file.isDirectory()) { - processAll(dir + file.getName()); - } else { - process(dir, file.getName()); + Files.walkFileTree(inDir, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + process(file); + return FileVisitResult.CONTINUE; } + }); + conn.close(); + if (errorCount > 0) { + throw new IOException(errorCount + (errorCount == 1 ? " error" : " errors") + " found"); } } - private void process(String dir, String fileName) throws Exception { - String inFile = inDir + "/" + dir + "/" + fileName; - String outFile = outDir + "/" + dir + "/" + fileName; - new File(outFile).getParentFile().mkdirs(); - FileOutputStream out = new FileOutputStream(outFile); - FileInputStream in = new FileInputStream(inFile); - byte[] bytes = IOUtils.readBytesAndClose(in, 0); - if (fileName.endsWith(".html")) { + /** + * Process a file. + * + * @param inFile the file + */ + void process(Path inFile) throws IOException { + Path outFile = outDir.resolve(inDir.relativize(inFile)); + Files.createDirectories(outFile.getParent()); + byte[] bytes = Files.readAllBytes(inFile); + if (inFile.getFileName().toString().endsWith(".html")) { String page = new String(bytes); page = PageParser.parse(page, session); bytes = page.getBytes(); } - out.write(bytes); - out.close(); + Files.write(outFile, bytes); } private void map(String key, String sql, boolean railroads, boolean forDataTypes) throws Exception { - ResultSet rs = null; - Statement stat = null; - try { - stat = conn.createStatement(); - rs = stat.executeQuery(sql); + try (Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery(sql)) { ArrayList> list = new ArrayList<>(); while (rs.next()) { @@ -192,17 +275,19 @@ private void map(String key, String sql, boolean railroads, boolean forDataTypes list.add(map); } - session.put(key, list); - int div = 3; - int part = (list.size() + div - 1) / div; - for (int i = 0, start = 0; i < div; i++, start += part) { - List> listThird = list.subList(start, - Math.min(start + part, list.size())); - session.put(key + "-" + i, listThird); - } - } finally { - JdbcUtils.closeSilently(rs); - JdbcUtils.closeSilently(stat); + putToMap(key, list); + } + } + + private void putToMap(String key, ArrayList> list) { + session.put(key, list); + int div = 3; + int part = (list.size() + div - 1) / div; + for (int i = 0, start = 0; i < div; i++, start += part) { + int end = Math.min(start + part, list.size()); + List> listThird = start <= end ? list.subList(start, end) + : Collections.emptyList(); + session.put(key + '-' + i, listThird); } } @@ -267,15 +352,27 @@ private static String addLinks(String text) { int len = text.length(); int offset = 0; do { - int end = start + 7; - for (; end < len && !Character.isWhitespace(text.charAt(end)); end++) { - // Nothing to do + if (start > 2 && text.regionMatches(start - 2, "](https://h2database.com/html/", 0, 30)) { + int descEnd = start - 2; + int descStart = text.lastIndexOf('[', descEnd - 1) + 1; + int linkStart = start + 28; + int linkEnd = text.indexOf(')', start + 29); + buff.append(text, offset, descStart - 1) // + .append("") // + .append(text, descStart, descEnd) // + .append(""); + offset = linkEnd + 1; + } else { + int end = start + 7; + for (; end < len && !Character.isWhitespace(text.charAt(end)); end++) { + // Nothing to do + } + buff.append(text, offset, start) // + .append("") // + .append(text, start, end) // + .append(""); + offset = end; } - buff.append(text, offset, start) // - .append("") // - .append(text, start, end) // - .append(""); - offset = end; } while ((start = nextLink(text, offset)) >= 0); return buff.append(text, offset, len).toString(); } diff --git a/h2/src/tools/org/h2/build/doc/GenerateHelp.java b/h2/src/tools/org/h2/build/doc/GenerateHelp.java deleted file mode 100644 index 4ed8bb1fd0..0000000000 --- a/h2/src/tools/org/h2/build/doc/GenerateHelp.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.build.doc; - -import java.io.BufferedWriter; -import java.io.FileWriter; -import java.sql.ResultSet; -import java.sql.ResultSetMetaData; -import java.sql.Types; -import org.h2.tools.Csv; -import org.h2.tools.SimpleResultSet; - -/** - * Generates the help.csv file that is included in the jar file. - */ -public class GenerateHelp { - - /** - * This method is called when executing this application from the command - * line. - * - * @param args the command line parameters - */ - public static void main(String... args) throws Exception { - String in = "src/docsrc/help/help.csv"; - String out = "src/main/org/h2/res/help.csv"; - Csv csv = new Csv(); - csv.setLineCommentCharacter('#'); - ResultSet rs = csv.read(in, null, null); - SimpleResultSet rs2 = new SimpleResultSet(); - ResultSetMetaData meta = rs.getMetaData(); - int columnCount = meta.getColumnCount() - 1; - for (int i = 0; i < columnCount; i++) { - rs2.addColumn(meta.getColumnLabel(1 + i), Types.VARCHAR, 0, 0); - } - while (rs.next()) { - Object[] row = new Object[columnCount]; - for (int i = 0; i < columnCount; i++) { - String s = rs.getString(1 + i); - if (i == 3) { - int len = s.length(); - int end = 0; - for (; end < len; end++) { - char ch = s.charAt(end); - if (ch == '.') { - end++; - break; - } - if (ch == '"') { - do { - end++; - } while (end < len && s.charAt(end) != '"'); - } - } - s = s.substring(0, end); - } - row[i] = s; - } - rs2.addRow(row); - } - BufferedWriter writer = new BufferedWriter(new FileWriter(out)); - writer.write("# Copyright 2004-2019 H2 Group. " + - "Multiple-Licensed under the MPL 2.0,\n" + - "# and the EPL 1.0 " + - "(http://h2database.com/html/license.html).\n" + - "# Initial Developer: H2 Group\n"); - csv = new Csv(); - csv.setLineSeparator("\n"); - csv.write(writer, rs2); - } - -} diff --git a/h2/src/tools/org/h2/build/doc/LinkChecker.java b/h2/src/tools/org/h2/build/doc/LinkChecker.java index 5bff52e649..e857bf531d 100644 --- a/h2/src/tools/org/h2/build/doc/LinkChecker.java +++ b/h2/src/tools/org/h2/build/doc/LinkChecker.java @@ -1,21 +1,26 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; import java.io.File; -import java.io.FileReader; import java.io.IOException; import java.net.HttpURLConnection; import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import org.h2.tools.Server; -import org.h2.util.IOUtils; import org.h2.util.StringUtils; /** @@ -35,10 +40,19 @@ public class LinkChecker { "#commands_index", "#grammar_index", "#functions_index", - "#tutorial_index" + "#functions_aggregate_index", + "#functions_window_index", + "#tutorial_index", + "docs/javadoc/" }; - private final HashMap targets = new HashMap<>(); + private static enum TargetKind { + FILE, ID + } + private final HashMap targets = new HashMap<>(); + /** + * Map of source link (i.e. tag) in the document, to the document path + */ private final HashMap links = new HashMap<>(); /** @@ -52,10 +66,10 @@ public static void main(String... args) throws Exception { } private void run(String... args) throws Exception { - String dir = "docs"; + Path dir = Paths.get("docs"); for (int i = 0; i < args.length; i++) { if ("-dir".equals(args[i])) { - dir = args[++i]; + dir = Paths.get(args[++i]); } } process(dir); @@ -125,7 +139,7 @@ private void listBadLinks() throws Exception { ArrayList errors = new ArrayList<>(); for (String link : links.keySet()) { if (!link.startsWith("http") && !link.endsWith("h2.pdf") - && link.indexOf("_ja.") < 0) { + && /* For Javadoc 8 */ !link.startsWith("docs/javadoc")) { if (targets.get(link) == null) { errors.add(links.get(link) + ": Link missing " + link); } @@ -137,7 +151,7 @@ private void listBadLinks() throws Exception { } } for (String name : targets.keySet()) { - if (targets.get(name).equals("id")) { + if (targets.get(name) == TargetKind.ID) { boolean ignore = false; for (String to : IGNORE_MISSING_LINKS_TO) { if (name.contains(to)) { @@ -154,34 +168,37 @@ private void listBadLinks() throws Exception { for (String error : errors) { System.out.println(error); } - if (errors.size() > 0) { + if (!errors.isEmpty()) { throw new Exception("Problems where found by the Link Checker"); } } - private void process(String path) throws Exception { - if (path.endsWith("/CVS") || path.endsWith("/.svn")) { - return; - } - File file = new File(path); - if (file.isDirectory()) { - for (String n : file.list()) { - process(path + "/" + n); + private void process(Path path) throws Exception { + Files.walkFileTree(path, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + processFile(file); + return FileVisitResult.CONTINUE; } - } else { - processFile(path); - } + }); } - private void processFile(String path) throws Exception { - targets.put(path, "file"); - String lower = StringUtils.toLowerEnglish(path); + /** + * Process a file. + * + * @param file the file + */ + void processFile(Path file) throws IOException { + String path = file.toString(); + targets.put(path, TargetKind.FILE); + String fileName = file.getFileName().toString(); + String lower = StringUtils.toLowerEnglish(fileName); if (!lower.endsWith(".html") && !lower.endsWith(".htm")) { return; } - String fileName = new File(path).getName(); - String parent = path.substring(0, path.lastIndexOf('/')); - String html = IOUtils.readStringAndClose(new FileReader(path), -1); + Path parent = file.getParent(); + final String html = new String(Files.readAllBytes(file), StandardCharsets.UTF_8); + // find all the target fragments in the document (those elements marked with id attribute) int idx = -1; while (true) { idx = html.indexOf(" id=\"", idx + 1); @@ -195,9 +212,11 @@ private void processFile(String path) throws Exception { } String ref = html.substring(start, end); if (!ref.startsWith("_")) { - targets.put(path + "#" + ref, "id"); + targets.put(path + "#" + ref.replaceAll("%3C|<", "<").replaceAll("%3E|>", ">"), // + TargetKind.ID); } } + // find all the href links in the document idx = -1; while (true) { idx = html.indexOf(" href=\"", idx + 1); @@ -224,19 +243,22 @@ private void processFile(String path) throws Exception { } else if (ref.startsWith("#")) { ref = path + ref; } else { - String p = parent; + Path p = parent; while (ref.startsWith(".")) { if (ref.startsWith("./")) { ref = ref.substring(2); } else if (ref.startsWith("../")) { ref = ref.substring(3); - p = p.substring(0, p.lastIndexOf('/')); + p = p.getParent(); } } - ref = p + "/" + ref; + ref = p + File.separator + ref; } if (ref != null) { - links.put(ref, path); + links.put(ref.replace('/', File.separatorChar) // + .replaceAll("%5B", "[").replaceAll("%5D", "]") // + .replaceAll("%3C", "<").replaceAll("%3E", ">"), // + path); } } idx = -1; @@ -262,8 +284,9 @@ private void processFile(String path) throws Exception { if (type.equals("href")) { // already checked } else if (type.equals("id")) { - targets.put(path + "#" + ref, "id"); - } else { + // For Javadoc 8 + targets.put(path + "#" + ref, TargetKind.ID); + } else if (!type.equals("name")) { error(fileName, "Unsupported "); writer.println("H2 Documentation"); writer.println(""); writer.println("

          H2 Database Engine

          "); - writer.println("

          Version " + Constants.getFullVersion() + "

          "); + writer.println("

          Version " + Constants.FULL_VERSION + "

          "); writer.println(finalText); writer.println(""); writer.close(); @@ -77,6 +78,32 @@ private static String disableRailroads(String text) { return text; } + private static String addLegacyFontTag(String fileName, String text) { + int idx1 = text.indexOf("> 4)); + do { + builder.append(text, idx2, idx1); + boolean compat = text.regionMatches(idx1 + 17, "Compat\">", 0, 8); + boolean h2 = text.regionMatches(idx1 + 17, "H2\">", 0, 4); + if (compat == h2) { + throw new RuntimeException("Unknown BNF rule style in file " + fileName); + } + idx2 = text.indexOf("", idx1 + (compat ? 8 : 4)); + if (idx2 <= 0) { + throw new RuntimeException(" not found in file " + fileName); + } + idx2 += 7; + builder.append("") + .append(text, idx1, idx2).append(""); + idx1 = text.indexOf("= 0); + return builder.append(text, idx2, length).toString(); + } + private static String removeHeaderFooter(String fileName, String text) { // String start = " 0) { - int len = reader.read(data, off, length); - off += len; - length -= len; - } - reader.close(); - String s = new String(data); - return s; + return new String(Files.readAllBytes(Paths.get(BASE_DIR, fileName)), StandardCharsets.UTF_8); } } diff --git a/h2/src/tools/org/h2/build/doc/RailroadImages.java b/h2/src/tools/org/h2/build/doc/RailroadImages.java index cd3d3da090..c27620c303 100644 --- a/h2/src/tools/org/h2/build/doc/RailroadImages.java +++ b/h2/src/tools/org/h2/build/doc/RailroadImages.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; @@ -10,8 +10,12 @@ import java.awt.Graphics2D; import java.awt.RenderingHints; import java.awt.image.BufferedImage; -import java.io.File; import java.io.IOException; +import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; + import javax.imageio.ImageIO; /** @@ -24,26 +28,28 @@ public class RailroadImages { private static final int DIV = 2; private static final int STROKE = 6; - private String outDir; + private Path outDir; /** * This method is called when executing this application from the command * line. * * @param args the command line parameters + * @throws IOException on I/O exception */ - public static void main(String... args) { - new RailroadImages().run("docs/html/images"); + public static void main(String... args) throws IOException { + new RailroadImages().run(Paths.get("docs/html/images")); } /** * Create the images. * * @param out the target directory + * @throws IOException on I/O exception */ - void run(String out) { + void run(Path out) throws IOException { this.outDir = out; - new File(out).mkdirs(); + Files.createDirectories(outDir); BufferedImage img; Graphics2D g; @@ -111,8 +117,8 @@ private void savePng(BufferedImage img, String fileName) { RenderingHints.VALUE_INTERPOLATION_BILINEAR); g.drawImage(img, 0, 0, w / DIV, h / DIV, 0, 0, w, h, null); g.dispose(); - try { - ImageIO.write(smaller, "png", new File(outDir + "/" + fileName)); + try (OutputStream out = Files.newOutputStream(outDir.resolve(fileName))) { + ImageIO.write(smaller, "png", out); } catch (IOException e) { throw new RuntimeException(e); } diff --git a/h2/src/tools/org/h2/build/doc/SpellChecker.java b/h2/src/tools/org/h2/build/doc/SpellChecker.java index 442c09c896..710191f6fa 100644 --- a/h2/src/tools/org/h2/build/doc/SpellChecker.java +++ b/h2/src/tools/org/h2/build/doc/SpellChecker.java @@ -1,12 +1,14 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; -import java.io.File; import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; @@ -35,8 +37,7 @@ public class SpellChecker { private static final String DELIMITERS = " \n.();-\"=,*/{}_<>+\r:'@[]&\\!#|?$^%~`\t"; private static final String PREFIX_IGNORE = "abc"; - private static final String[] IGNORE_FILES = { "mainWeb.html", - "pg_catalog.sql" }; + private static final String[] IGNORE_FILES = { "mainWeb.html" }; // These are public so we can set them during development testing @@ -72,8 +73,8 @@ public static void main(String... args) throws IOException { } private void run(String dictionaryFileName, String dir) throws IOException { - process(new File(dictionaryFileName)); - process(new File(dir)); + process(Paths.get(dictionaryFileName)); + process(Paths.get(dir)); HashSet unused = new HashSet<>(); unused.addAll(dictionary); unused.removeAll(used); @@ -113,20 +114,20 @@ private void run(String dictionaryFileName, String dir) throws IOException { } } - private void process(File file) throws IOException { - String name = file.getName(); + private void process(Path file) throws IOException { + String name = file.getFileName().toString(); if (name.endsWith(".svn") || name.endsWith(".DS_Store")) { return; } if (name.startsWith("_") && name.indexOf("_en") < 0) { return; } - if (file.isDirectory()) { - for (File f : file.listFiles()) { + if (Files.isDirectory(file)) { + for (Path f : Files.newDirectoryStream(file)) { process(f); } } else { - String fileName = file.getAbsolutePath(); + String fileName = file.toAbsolutePath().toString(); int idx = fileName.lastIndexOf('.'); String suffix; if (idx < 0) { @@ -183,10 +184,7 @@ private void scan(String fileName, String text) { System.out.println(); } } - if (notFound.isEmpty()) { - return; - } - if (notFound.size() > 0) { + if (!notFound.isEmpty()) { System.out.println("file: " + fileName); for (String s : notFound) { System.out.print(s + " "); diff --git a/h2/src/tools/org/h2/build/doc/UploadBuild.java b/h2/src/tools/org/h2/build/doc/UploadBuild.java index f29b227bee..dc48eff0e8 100644 --- a/h2/src/tools/org/h2/build/doc/UploadBuild.java +++ b/h2/src/tools/org/h2/build/doc/UploadBuild.java @@ -1,19 +1,20 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.FileReader; import java.io.IOException; -import java.io.OutputStream; import java.io.StringReader; import java.nio.charset.StandardCharsets; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; @@ -22,11 +23,11 @@ import java.util.zip.Deflater; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; + import org.h2.dev.ftp.FtpClient; import org.h2.engine.Constants; import org.h2.store.fs.FileUtils; import org.h2.test.utils.OutputCatcher; -import org.h2.util.IOUtils; import org.h2.util.ScriptReader; import org.h2.util.StringUtils; @@ -42,6 +43,7 @@ public class UploadBuild { * @param args the command line parameters */ public static void main(String... args) throws Exception { + System.exit(0); System.setProperty("h2.socketConnectTimeout", "30000"); String password = System.getProperty("h2.ftpPassword"); if (password == null) { @@ -50,12 +52,11 @@ public static void main(String... args) throws Exception { FtpClient ftp = FtpClient.open("h2database.com"); ftp.login("h2database", password); ftp.changeWorkingDirectory("/httpdocs"); - boolean coverage = new File("coverage/index.html").exists(); + Path coverageFile = Paths.get("coverage/index.html"); + boolean coverage = Files.exists(coverageFile); boolean coverageFailed; if (coverage) { - byte[] data = IOUtils.readBytesAndClose( - new FileInputStream("coverage/index.html"), -1); - String index = new String(data, StandardCharsets.ISO_8859_1); + String index = new String(Files.readAllBytes(coverageFile), StandardCharsets.ISO_8859_1); coverageFailed = index.contains("CLASS=\"h\""); while (true) { int idx = index.indexOf("
          "); - error = true; } else { - testOutput = "No log.txt"; - error = true; + Path logFile = Paths.get("log.txt"); + if (Files.exists(logFile)) { + testOutput = new String(Files.readAllBytes(logFile)); + testOutput = testOutput.replaceAll("\n", "
          "); + error = true; + } else { + testOutput = "No log.txt"; + error = true; + } } if (!ftp.exists("/httpdocs", "automated")) { ftp.makeDirectory("/httpdocs/automated"); @@ -125,11 +127,11 @@ public static void main(String... args) throws Exception { (error ? " FAILED" : "") + (coverageFailed ? " COVERAGE" : "") + "', '" + ts + - "', '
          Output" + - " - Coverage" + - " - Jar');\n"; buildSql += sql; Connection conn; @@ -141,8 +143,8 @@ public static void main(String... args) throws Exception { conn = DriverManager.getConnection("jdbc:h2v1_1:mem:"); } conn.createStatement().execute(buildSql); - String newsfeed = IOUtils.readStringAndClose( - new FileReader("src/tools/org/h2/build/doc/buildNewsfeed.sql"), -1); + String newsfeed = new String(Files.readAllBytes(Paths.get("src/tools/org/h2/build/doc/buildNewsfeed.sql")), + StandardCharsets.UTF_8); ScriptReader r = new ScriptReader(new StringReader(newsfeed)); Statement stat = conn.createStatement(); ResultSet rs = null; @@ -164,21 +166,21 @@ public static void main(String... args) throws Exception { new ByteArrayInputStream(content.getBytes())); ftp.store("/httpdocs/html/testOutput.html", new ByteArrayInputStream(testOutput.getBytes())); - String jarFileName = "bin/h2-" + Constants.getVersion() + ".jar"; + String jarFileName = "bin/h2-" + Constants.VERSION + ".jar"; if (FileUtils.exists(jarFileName)) { ftp.store("/httpdocs/automated/h2-latest.jar", - new FileInputStream(jarFileName)); + Files.newInputStream(Paths.get(jarFileName))); } if (coverage) { ftp.store("/httpdocs/coverage/overview.html", - new FileInputStream("coverage/overview.html")); + Files.newInputStream(Paths.get("coverage/overview.html"))); ftp.store("/httpdocs/coverage/coverage.zip", - new FileInputStream("coverage.zip")); + Files.newInputStream(Paths.get("coverage.zip"))); FileUtils.delete("coverage.zip"); } String mavenRepoDir = System.getProperty("user.home") + "/.m2/repository/"; - boolean mavenSnapshot = new File(mavenRepoDir + - "com/h2database/h2/1.0-SNAPSHOT/h2-1.0-SNAPSHOT.jar").exists(); + boolean mavenSnapshot = Files.exists(Paths.get(mavenRepoDir + + "com/h2database/h2/1.0-SNAPSHOT/h2-1.0-SNAPSHOT.jar")); if (mavenSnapshot) { if (!ftp.exists("/httpdocs", "m2-repo")) { ftp.makeDirectory("/httpdocs/m2-repo"); @@ -203,59 +205,48 @@ public static void main(String... args) throws Exception { } ftp.store("/httpdocs/m2-repo/com/h2database/h2" + "/1.0-SNAPSHOT/h2-1.0-SNAPSHOT.pom", - new FileInputStream(mavenRepoDir + - "com/h2database/h2/1.0-SNAPSHOT/h2-1.0-SNAPSHOT.pom")); + Files.newInputStream(Paths.get(mavenRepoDir + + "com/h2database/h2/1.0-SNAPSHOT/h2-1.0-SNAPSHOT.pom"))); ftp.store("/httpdocs/m2-repo/com/h2database/h2" + "/1.0-SNAPSHOT/h2-1.0-SNAPSHOT.jar", - new FileInputStream(mavenRepoDir + - "com/h2database/h2/1.0-SNAPSHOT/h2-1.0-SNAPSHOT.jar")); + Files.newInputStream(Paths.get(mavenRepoDir + + "com/h2database/h2/1.0-SNAPSHOT/h2-1.0-SNAPSHOT.jar"))); ftp.store("/httpdocs/m2-repo/com/h2database/h2-mvstore" + "/1.0-SNAPSHOT/h2-mvstore-1.0-SNAPSHOT.pom", - new FileInputStream(mavenRepoDir + - "com/h2database/h2-mvstore/1.0-SNAPSHOT/h2-mvstore-1.0-SNAPSHOT.pom")); + Files.newInputStream(Paths.get(mavenRepoDir + + "com/h2database/h2-mvstore/1.0-SNAPSHOT/h2-mvstore-1.0-SNAPSHOT.pom"))); ftp.store("/httpdocs/m2-repo/com/h2database/h2-mvstore" + "/1.0-SNAPSHOT/h2-mvstore-1.0-SNAPSHOT.jar", - new FileInputStream(mavenRepoDir + - "com/h2database/h2-mvstore/1.0-SNAPSHOT/h2-mvstore-1.0-SNAPSHOT.jar")); + Files.newInputStream(Paths.get(mavenRepoDir + + "com/h2database/h2-mvstore/1.0-SNAPSHOT/h2-mvstore-1.0-SNAPSHOT.jar"))); } ftp.close(); } - private static void zip(String destFile, String directory, boolean storeOnly) - throws IOException { - OutputStream out = new FileOutputStream(destFile); - ZipOutputStream zipOut = new ZipOutputStream(out); + private static void zip(String destFile, String directory, boolean storeOnly) throws IOException { + ZipOutputStream zipOut = new ZipOutputStream(Files.newOutputStream(Paths.get(destFile))); if (storeOnly) { zipOut.setMethod(ZipOutputStream.STORED); } zipOut.setLevel(Deflater.BEST_COMPRESSION); - addFiles(new File(directory), new File(directory), zipOut); + Path base = Paths.get(directory); + Files.walkFileTree(base, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + byte[] data = Files.readAllBytes(file); + ZipEntry entry = new ZipEntry(base.relativize(file).toString().replace('\\', '/')); + CRC32 crc = new CRC32(); + crc.update(data); + entry.setSize(data.length); + entry.setCrc(crc.getValue()); + zipOut.putNextEntry(entry); + zipOut.write(data); + zipOut.closeEntry(); + return FileVisitResult.CONTINUE; + } + }); zipOut.finish(); zipOut.close(); } - private static void addFiles(File base, File file, ZipOutputStream out) - throws IOException { - if (file.isDirectory()) { - for (File f : file.listFiles()) { - addFiles(base, f, out); - } - } else { - String path = file.getAbsolutePath().substring(base.getAbsolutePath().length()); - path = path.replace('\\', '/'); - if (path.startsWith("/")) { - path = path.substring(1); - } - byte[] data = IOUtils.readBytesAndClose(new FileInputStream(file), -1); - ZipEntry entry = new ZipEntry(path); - CRC32 crc = new CRC32(); - crc.update(data); - entry.setSize(file.length()); - entry.setCrc(crc.getValue()); - out.putNextEntry(entry); - out.write(data); - out.closeEntry(); - } - } - } diff --git a/h2/src/tools/org/h2/build/doc/WebSite.java b/h2/src/tools/org/h2/build/doc/WebSite.java index a681c3d4ed..97bff93d03 100644 --- a/h2/src/tools/org/h2/build/doc/WebSite.java +++ b/h2/src/tools/org/h2/build/doc/WebSite.java @@ -1,19 +1,23 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.nio.file.DirectoryStream; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; import java.util.HashMap; +import org.h2.build.BuildBase; import org.h2.samples.Newsfeed; -import org.h2.util.IOUtils; import org.h2.util.StringUtils; /** @@ -29,8 +33,8 @@ public class WebSite { private static final String TRANSLATE_START = ""; - private static final String SOURCE_DIR = "docs"; - private static final String WEB_DIR = "../h2web"; + private static final Path SOURCE_DIR = Paths.get("docs"); + private static final Path WEB_DIR = Paths.get("../h2web"); private final HashMap fragments = new HashMap<>(); /** @@ -45,23 +49,19 @@ public static void main(String... args) throws Exception { private void run() throws Exception { // create the web site - deleteRecursive(new File(WEB_DIR)); + BuildBase.deleteRecursive(WEB_DIR); loadFragments(); - copy(new File(SOURCE_DIR), new File(WEB_DIR), true, true); + copy(SOURCE_DIR, WEB_DIR, true, true); Newsfeed.main(WEB_DIR + "/html"); // create the internal documentation - copy(new File(SOURCE_DIR), new File(SOURCE_DIR), true, false); + copy(SOURCE_DIR, SOURCE_DIR, true, false); } private void loadFragments() throws IOException { - File dir = new File(SOURCE_DIR, "html"); - for (File f : dir.listFiles()) { - if (f.getName().startsWith("fragments")) { - FileInputStream in = new FileInputStream(f); - byte[] bytes = IOUtils.readBytesAndClose(in, 0); - String page = new String(bytes, StandardCharsets.UTF_8); - fragments.put(f.getName(), page); + try (DirectoryStream stream = Files.newDirectoryStream(SOURCE_DIR.resolve("html"), "fragments*")) { + for (Path f : stream) { + fragments.put(f.getFileName().toString(), new String(Files.readAllBytes(f), StandardCharsets.UTF_8)); } } } @@ -102,65 +102,72 @@ private String replaceFragments(String fileName, String page) { return page; } - private void deleteRecursive(File dir) { - if (dir.isDirectory()) { - for (File f : dir.listFiles()) { - deleteRecursive(f); + private void copy(Path source, Path target, boolean replaceFragments, boolean web) throws IOException { + Files.walkFileTree(source, new SimpleFileVisitor() { + @Override + public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { + Files.createDirectories(target.resolve(source.relativize(dir))); + return FileVisitResult.CONTINUE; } - } - dir.delete(); + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + copyFile(file, target.resolve(source.relativize(file)), replaceFragments, web); + return super.visitFile(file, attrs); + } + }); } - private void copy(File source, File target, boolean replaceFragments, - boolean web) throws IOException { - if (source.isDirectory()) { - target.mkdirs(); - for (File f : source.listFiles()) { - copy(f, new File(target, f.getName()), replaceFragments, web); + /** + * Copy a file. + * + * @param source the source file + * @param target the target file + * @param replaceFragments whether to replace fragments + * @param web whether the target is a public web site (false for local documentation) + */ + void copyFile(Path source, Path target, boolean replaceFragments, boolean web) throws IOException { + String name = source.getFileName().toString(); + if (name.endsWith("onePage.html") || name.startsWith("fragments")) { + return; + } + if (web) { + if (name.endsWith("main.html")) { + return; } } else { - String name = source.getName(); - if (name.endsWith("onePage.html") || name.startsWith("fragments")) { + if (name.endsWith("mainWeb.html")) { return; } + } + byte[] bytes = Files.readAllBytes(source); + if (name.endsWith(".html")) { + String page = new String(bytes, StandardCharsets.UTF_8); if (web) { - if (name.endsWith("main.html") || name.endsWith("main_ja.html")) { - return; - } - } else { - if (name.endsWith("mainWeb.html") || name.endsWith("mainWeb_ja.html")) { - return; - } + page = StringUtils.replaceAll(page, ANALYTICS_TAG, ANALYTICS_SCRIPT); } - FileInputStream in = new FileInputStream(source); - byte[] bytes = IOUtils.readBytesAndClose(in, 0); - if (name.endsWith(".html")) { - String page = new String(bytes, StandardCharsets.UTF_8); - if (web) { - page = StringUtils.replaceAll(page, ANALYTICS_TAG, ANALYTICS_SCRIPT); - } - if (replaceFragments) { - page = replaceFragments(name, page); - page = StringUtils.replaceAll(page, "", "
          ");
          -                    page = StringUtils.replaceAll(page, "", "");
          -                }
          -                bytes = page.getBytes(StandardCharsets.UTF_8);
          +            if (replaceFragments) {
          +                page = replaceFragments(name, page);
          +                page = StringUtils.replaceAll(page, "", "
          ");
          +                page = StringUtils.replaceAll(page, "", "");
          +            }
          +            if (name.endsWith("changelog.html")) {
          +                page = page.replaceAll("Issue\\s+#?(\\d+)",
          +                        "Issue #$1");
          +                page = page.replaceAll("PR\\s+#?(\\d+)",
          +                        "PR #$1");
          +            }
          +            bytes = page.getBytes(StandardCharsets.UTF_8);
          +        }
          +        Files.write(target, bytes);
          +        if (web) {
          +            if (name.endsWith("mainWeb.html")) {
          +                Files.move(target, target.getParent().resolve("main.html"));
                       }
                   }
               }
          diff --git a/h2/src/tools/org/h2/build/doc/XMLChecker.java b/h2/src/tools/org/h2/build/doc/XMLChecker.java
          index 691c68dac5..3bb0d65b91 100644
          --- a/h2/src/tools/org/h2/build/doc/XMLChecker.java
          +++ b/h2/src/tools/org/h2/build/doc/XMLChecker.java
          @@ -1,16 +1,20 @@
           /*
          - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0,
          - * and the EPL 1.0 (http://h2database.com/html/license.html).
          + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
          + * and the EPL 1.0 (https://h2database.com/html/license.html).
            * Initial Developer: H2 Group
            */
           package org.h2.build.doc;
           
          -import java.io.File;
          -import java.io.FileReader;
          +import java.io.IOException;
          +import java.nio.charset.StandardCharsets;
          +import java.nio.file.FileVisitResult;
          +import java.nio.file.Files;
          +import java.nio.file.Path;
          +import java.nio.file.Paths;
          +import java.nio.file.SimpleFileVisitor;
          +import java.nio.file.attribute.BasicFileAttributes;
           import java.util.Stack;
           
          -import org.h2.util.IOUtils;
          -
           /**
            * This class checks that the HTML and XML part of the source code
            * is well-formed XML.
          @@ -24,35 +28,46 @@ public class XMLChecker {
                * @param args the command line parameters
                */
               public static void main(String... args) throws Exception {
          -        new XMLChecker().run(args);
          +        XMLChecker.run(args);
               }
           
          -    private void run(String... args) throws Exception {
          -        String dir = ".";
          +    private static void run(String... args) throws Exception {
          +        Path dir = Paths.get(".");
                   for (int i = 0; i < args.length; i++) {
                       if ("-dir".equals(args[i])) {
          -                dir = args[++i];
          +                dir = Paths.get(args[++i]);
                       }
                   }
          -        process(dir + "/src");
          -        process(dir + "/docs");
          +        process(dir.resolve("src"));
          +        process(dir.resolve("docs"));
               }
           
          -    private void process(String path) throws Exception {
          -        if (path.endsWith("/CVS") || path.endsWith("/.svn")) {
          -            return;
          -        }
          -        File file = new File(path);
          -        if (file.isDirectory()) {
          -            for (String name : file.list()) {
          -                process(path + "/" + name);
          +    private static void process(Path path) throws Exception {
          +        Files.walkFileTree(path, new SimpleFileVisitor() {
          +            @Override
          +            public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
          +                // For Javadoc 8
          +                if (dir.getFileName().toString().equals("javadoc")) {
          +                    return FileVisitResult.SKIP_SUBTREE;
          +                }
          +                return FileVisitResult.CONTINUE;
                       }
          -        } else {
          -            processFile(path);
          -        }
          +
          +            @Override
          +            public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
          +                processFile(file);
          +                return FileVisitResult.CONTINUE;
          +            }
          +        });
               }
           
          -    private static void processFile(String fileName) throws Exception {
          +    /**
          +     * Process a file.
          +     *
          +     * @param file the file
          +     */
          +    static void processFile(Path file) throws IOException {
          +        String fileName = file.getFileName().toString();
                   int idx = fileName.lastIndexOf('.');
                   if (idx < 0) {
                       return;
          @@ -62,8 +77,7 @@ private static void processFile(String fileName) throws Exception {
                       return;
                   }
                   // System.out.println("Checking file:" + fileName);
          -        FileReader reader = new FileReader(fileName);
          -        String s = IOUtils.readStringAndClose(reader, -1);
          +        String s = new String(Files.readAllBytes(file), StandardCharsets.UTF_8);
                   Exception last = null;
                   try {
                       checkXML(s, !suffix.equals("xml"));
          @@ -80,16 +94,16 @@ private static void checkXML(String xml, boolean html) throws Exception {
                   // String lastElement = null;
                   // 
        • : replace
        • ([^\r]*[^<]*) with
        • $1
        • // use this for html file, for example if
        • is not closed - String[] noClose = {}; + String[] noClose = {"br", "hr", "input", "link", "meta", "wbr"}; XMLParser parser = new XMLParser(xml); Stack stack = new Stack<>(); boolean rootElement = false; - while (true) { + loop: for (;;) { int event = parser.next(); if (event == XMLParser.END_DOCUMENT) { break; } else if (event == XMLParser.START_ELEMENT) { - if (stack.size() == 0) { + if (stack.isEmpty()) { if (rootElement) { throw new Exception("Second root element at " + parser.getRemaining()); } @@ -112,8 +126,7 @@ private static void checkXML(String xml, boolean html) throws Exception { if (html) { for (String n : noClose) { if (name.equals(n)) { - throw new Exception("Unnecessary closing element " - + name + " at " + parser.getRemaining()); + continue loop; } } } @@ -141,7 +154,7 @@ private static void checkXML(String xml, boolean html) throws Exception { + parser.getRemaining()); } } - if (stack.size() != 0) { + if (!stack.isEmpty()) { throw new Exception("Unclosed root element"); } } diff --git a/h2/src/tools/org/h2/build/doc/XMLParser.java b/h2/src/tools/org/h2/build/doc/XMLParser.java index b70013b5d3..bf9cdaad85 100644 --- a/h2/src/tools/org/h2/build/doc/XMLParser.java +++ b/h2/src/tools/org/h2/build/doc/XMLParser.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; diff --git a/h2/src/tools/org/h2/build/doc/buildNewsfeed.sql b/h2/src/tools/org/h2/build/doc/buildNewsfeed.sql index 058fa44110..bd04acf688 100644 --- a/h2/src/tools/org/h2/build/doc/buildNewsfeed.sql +++ b/h2/src/tools/org/h2/build/doc/buildNewsfeed.sql @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ @@ -8,14 +8,14 @@ CREATE TABLE CHANNEL(TITLE VARCHAR, LINK VARCHAR, DESC VARCHAR, LANGUAGE VARCHAR, PUB TIMESTAMP, LAST TIMESTAMP, AUTHOR VARCHAR); INSERT INTO CHANNEL VALUES('H2 Database Automated Build' , - 'http://www.h2database.com/html/build.html#automated', 'H2 Database Automated Build', 'en-us', NOW(), NOW(), 'Thomas Mueller'); + 'https://h2database.com/html/build.html#automated', 'H2 Database Automated Build', 'en-us', LOCALTIMESTAMP, LOCALTIMESTAMP, 'Thomas Mueller'); SELECT XMLSTARTDOC() || XMLNODE('feed', XMLATTR('xmlns', 'http://www.w3.org/2005/Atom') || XMLATTR('xml:lang', C.LANGUAGE), XMLNODE('title', XMLATTR('type', 'text'), C.TITLE) || XMLNODE('id', NULL, XMLTEXT(C.LINK)) || XMLNODE('author', NULL, XMLNODE('name', NULL, C.AUTHOR)) || - XMLNODE('link', XMLATTR('rel', 'self') || XMLATTR('href', 'http://www.h2database.com/automated/news.xml'), NULL) || + XMLNODE('link', XMLATTR('rel', 'self') || XMLATTR('href', 'https://h2database.com/automated/news.xml'), NULL) || XMLNODE('updated', NULL, FORMATDATETIME(C.LAST, 'yyyy-MM-dd''T''HH:mm:ss''Z''', 'en', 'GMT')) || GROUP_CONCAT( XMLNODE('entry', NULL, diff --git a/h2/src/tools/org/h2/build/doc/dictionary.txt b/h2/src/tools/org/h2/build/doc/dictionary.txt index 75849c11b8..9e48dba39b 100644 --- a/h2/src/tools/org/h2/build/doc/dictionary.txt +++ b/h2/src/tools/org/h2/build/doc/dictionary.txt @@ -25,8 +25,8 @@ amt analysis analytics analyze analyzed analyzer analyzers analyzing anatr ancestor anchor and andrew android andy anewarray ang angel angle angus animal animate aniseed anne annotate annotated annotation annotations annual ano anon anonymous another ans ansi ansorg answers ant anthony anti antialias antialiasing -anton antonio any anybody anyhow anyone anything anyway anywhere anzo apache -apart api apos app apparatus appear appears append appended appender appending +anton antonio any anybody anyhow anyone anything anyway anywhere anzo apache apart +api apiguardian apos app apparatus appear appears append appended appender appending appendix appends apple apples applet applets applicable application applications applied applies apply applying appreciate approach appropriate appropriateness approx approximate approximated approximation apps april aquiles arabic arbitrary @@ -55,9 +55,9 @@ becomes becoming been beep before begin beginning behalf behave behaving behavio behaviour behind being bel belgium believes bellinzona belong belonging belongs below bench benchmark benchmarks beneficial benefit bennet berger berkeley berlini bern bernd berne best beta better between beverages beware beyond bfff -bgcolor biased bid biel bienne big bigger biggest bigint biginteger bigserial +bgcolor biased bid biel bienne big bigger biggest bigint biginteger bigserial binlog bilinear bilingual billion bin binaries binary bind bindings bio biodiversity -biological bipush birth birthday biscuits bit bitand bitmap bitor bits bitwise +biological bipush birth birthday biscuits bit bitand bitmap bitnot bitor bits bitwise bitxor biz bjorn black blank blanked blanks bleyl blind blitz blob blobs block blocked blocking blockquote blocks blocksize blog blogs bloom blue blume blur bnf bnot boat bob bocher bodies body bogus bohlen bold bom bonita boo book @@ -75,7 +75,7 @@ cached caches caching cafe cajun cal calculate calculated calculates calculating calculation calculations calendar calendars call callable callback callbacks called caller calling calls cally caload came camel can cancel canceled canceling cancellation cancelled cancels candidates cannot canonical cap capabilities -capability capacity capitalization capitalize capitalized capone caps capture +capability capacity capitalization capitalize capitalized capone caps caption capture captured car card cardinal cardinality care careful carriage carrier cars cartesian cas cascade cascading case cases casesensitive casewhen cash casing casqueiro cast casting castore cat catalina catalog catalogs cataloguing catch catcher catches @@ -148,14 +148,14 @@ cryptoloop css csv csvread csvwrite cte ctid ctor ctrl ctx ctxsys cuaz cube cumulative cup curation curdate cure curious curly curr curren currency current currently currval cursor cursors curtime curtimestamp curve curves cust custom customarily customer customerid customers customizable customized customizer -customizers customizing cut cutover cvf cvs cwd cycle cycles cyclic cycling cyr +customizers customizing cut cutover cve cvf cvs cwd cycle cycles cyclic cycling cyr czech dadd daemon daffodil dagger dairy daload dalvik damage damages dan dance dangerous daniel dark darr darri dartifact darwin dash dashes dastore dat data database databaseaccess databases dataflyer datagram datalink datareader datasource datasources datastore datatext datatype datatypes datawriter date dateadd datediff datepart dates datestyle datetime datetimes datum david davide day daylight dayname dayofmonth dayofweek dayofyear days dba dbbench dbcp dbid -dbms dbname dbo dbs dbserv dbsnmp dclassifier dcmpg dcmpl dconst ddiv ddl +dbms dbname dbo dbs dbserv dbsnmp dclassifier dcmpg dcmpl dconst dderby ddiv ddl ddladmin deactivate deactivated deactivation dead deadlock deadlocked deadlocks deal dealing deallocate death debug debugging dec decade december decide decided decimal decision deck declaration declarations declarative declaratory declare declared @@ -221,7 +221,7 @@ english enhance enhanced enhancement enhancer enlarge enough enqueued ensp ensur ensures ensuring enter entered entering enterprise entire entities entity entrance entries entry enum enumerate enumerated enumerator enumerators enumeration env envelope environment environments enwiki eof eol epl epoch epoll epsilon equal equality equally -equals equipment equitable equiv equivalence equivalent equivalents era erable eremainder +equals equipment equitable equiv equivalence equivalent equivalents era erase eremainder eric erik err error errorlevel errors erwan ery esc escape escaped escapes escaping escargots ese espa essential essentials established estimate estimated estimates estimating estimation estoppel eta etc eth etl euml euro europe europeu euros eva eval @@ -244,7 +244,7 @@ fact factor factorial factories factory factual fadd fail failed failing fails f failures fair fake fall fallback falls faload false familiar families family faq far fashion fashioned fast faster fastest fastore fat fatal faulhaber fault favicon favorite fbj fcmpg fcmpl fconst fdiv feature features feb februar -february federal federated federation fedotovs fee feed feedback fees feff fetch +february federal federated federation fedora fedotovs fee feed feedback fees feff fetch fetched fetching few fewer ffeecc fffe fid field fields fiery fifo fifty file filed filename filepwd files filesystem fill filled filler fillers filling fills filo filter filtered filtering filters fin final finalization finalize finalizer @@ -379,7 +379,7 @@ longblob longer longest longitude longnvarchar longs longtext longvarbinary long look lookahead looking looks lookup lookups lookupswitch loop loopback looping loops loose lor lore lose losing loss losses lossless losslessly lost lot lots low lowast lower lowercase lowercased lowest loz lpad lrem lreturn lrm lru lsaquo -lshl lshr lsm lsquo lstore lsub lte ltrim lucene lucerne lugano lukas lumber +lshift lshl lshr lsm lsquo lstore lsub lte ltrim lucene lucerne lugano lukas lumber lumberjack luntbuild lushr lutin lxabcdef lxor lying lynx lzf mac macdonald machine machines maciej macr macro macromedia macros made magic magnolia magyar mahon mail mailing main mainly maintain maintained maintaining maintains @@ -393,7 +393,8 @@ matcher matches matching material materialized materials math mathematical mathematicians mathematics matrix matter matters maurice maven max maxbqualsize maxed maxgtridsize maximum maxlength maxrows maxvalue maxwidth may maybe mbean mbeans mcleod mdash mdd mddata mdsys mdtm mean meaning meaningful means meant -meantime meanwhile measurable measure measured meat mechanism media median medium +meantime meanwhile measurable measure measured measurement measurements +meat mechanism media median medium mediumblob mediumint mediumtext megabyte megabytes mehner meier meijer melbourne mem member members memcpy memmove memo memory mendonca mentioned menu merchantability merchantable merge merged merges merging meridian message @@ -469,7 +470,7 @@ periodically periods permanently permil permission permissions permits permitted permutation permutations perp persist persisted persistence persistent persister persisting persists person personal persons perspective pervasive pete peter petra pfgrc pfister pgdn pgup phane phantom phase phi philip philippe -philosophers phone php phrase phrases phromros physical pick picked pickle pico +philosophers phone php phrase phrases phromros physical pick picked pickle picks pico pid pieces pier pietrzak pilot piman ping pinned pipe piped pit pitest piv pivot pkcolumn pkcs pktable place placed placeholders places placing plain plaintext plan planned planner planning plans plant plenty platform platforms play player please @@ -556,7 +557,7 @@ rmd rmdir rmerr rmi rmiregistry rnd rnfr rnto road roadmap roads robert roc rogu rojas role roles roll rollback rollbacks rolled rolling rollover rolls roman room root rooted roots rot rotate round rounded rounding roundmagic rounds routine routinely routines row rowcount rowid rowlock rownum rows rowscn rowsize roy royalty rpad rpm rsa -rsaquo rsquo rss rtree rtrim ruby ruebezahl rule rules run rund rundll runnable +rsaquo rshift rsquo rss rtree rtrim ruby ruebezahl rule rules run rund rundll runnable runner runners running runs runscript runtime rwd rws sabine safari safe safely safes safety said sainsbury salary sale sales saload salt salz sam same sameorigin samp sample samples sanitize sanity sans sastore sat satisfy saturday sauce @@ -653,7 +654,7 @@ thus tick ticker tid tigers tilde time timed timely timeout timer times timestam timestampadd timestampdiff timestamps timezone timezones timing tiny tinyblob tinyint tinytext tip tips tired tis title titled titles tls tme tmendrscan tmfail tmjoin tmnoflags tmonephase tmp tmpdir tmresume tmstartrscan tmsuccess tmsuspend -tmueller tmzone toc today todescato todo tofu together toggle token tokenize +tmueller tmzone toast toc today todescato todo tofu together toggle token tokenize tokenizer tokens tolerant tom tomas tomcat tong too took tool toolbar toolkit tools toolset top topic topics toplink topology tort total totals touch toward tpc trace traces tracing track tracked tracker tracking tracks trade trademark @@ -810,4 +811,40 @@ presorted inclusion contexts aax mwd percentile cont interpolate mwa hypothetica isodow isoyear psql waiters reliably httpsdocs privileged narrow spending swallow locally uncomment builders -setjava lift +setjava lift hyperlinks lazarevn nikita lazarev lvl ispras bias dbff fals tru dfff +recognition spared hacky employing occupancy baos shifts littlejohn pushes scrub existent asterisked projections +omits redefined ensured arrayagg objectagg bmp uabcd prefixed incoherence aggressively smb invalidating filesystems +improper subcondition boxes negates abrupt chooses hindi updater zoned tolerable interference elimination +prepend honored evacuated peeked queued transforms inbounded fragmented unprotected adjustment supposedly alloted +housekeeping trail breadcrumb bets seasoned rewritable rpi eliminating projected reenterant varint races outcomes +sparsely shifting vacated evacuation bullet allocations projected evacuatable pin capable rewritable deficiency +successfull deduplication entrant mvmap sporadic irrelevant interrupts +sit sitting sooner hdr considering encounter compete quickack decrementing exhausting caveat aschoerk circular ident +scr ffffl suspend asap ldt lmt movement ago snapshotting paris phenomena backends quirks pgjdbc jupiter grab folds +umcfo iapi autoloaded derbyshared darkred coral mistyrose lightseagreen unmodifiable posix exc attrs relativize +quotient niomem niomapped obtaining rare occasions oversynchronizing disallows opponent adversarial broader decent tmv +prize secured stateful generification bracketed permissible opaque aside indexable daytime uncomparable reevaluates +pct sliding deliberately sampling grabs saw video keyed carries estimator restrain remainer magnitude placeholder +expandable jira meaningless iterated maliciously crafted cdef attention deserialized hurts absorb bufcnt digests +consumer reread relname proargtypes pronamespace relnamespace heidi proname reltuples collects trigraphs nspname +timetz timestamptz psycopg adbin attrdef objoid attnotnull adnum adrelid objsubid atttypid attname attisdropped pgc +attrelid currtid encodings + +rolconnlimit spcname indisclustered tgconstrname relhasoids rolcreaterole usecreatedb datconfig reltablespace relchecks +amname relhasindex tablespace reltriggers tgconstrrelid groname indrelid relhasrules classoid inhseqno tgargs datdba +indisunique rolinherit datacl rolvaliduntil datname indexprs usename typbasetype rolconfig relkind spcacl prorettype +datallowconn atthasdef dattablespace rolcreatedb inhrelid inhparent attlen rolname rolcanlogin aclitem datlastsysoid +indpred tgfoid indisprimary adsrc spcowner tgnargs typtype typinput rolcatupdate typnamespace tgrelid authid indexrelid +usesuper tgdeferrable rolpassword relam relpages tginitdeferred rolsuper autovacuum typnotnull spclocation cancreate +nsp pgagent pga awoken serverencoding untyped ambiguities tons lhs letting rhs opportunities specifications +usefully pipelining fetches reenable joiner visits dcl avxaaa german fold degree supertype overloads hierarchy locator +conrelid conkey tabrelname refnamespace dsc pred typrelid conname contype confrelid numscans beaver typdelim typelem +jsonb und decfloat attnums oids studio smells pvs mention statically deletable insertable reconstructed similarly +submissions explaining cycled assigns separation aimed ababab quotation cleanly beff cdab efgh +xnor bitnand bitcount nand bitnor bitxnor ulshift urshift rotates rotation rotateleft rotateright leaking incomparable +deref corr asensitive sqlexception avgy avgx lateral rollup syy reseved specifictype classifier sqlcode covar uescape +ptf overlay precedes regr slope sqlerror multiset submultiset inout sxx sxy intercept sqlwarning tablesample preorder +orientation eternal consideration erased fedc npgsql powers fffd uencode ampersand noversion ude considerable intro +entirely skeleton discouraged pearson coefficient squares covariance mytab debuggers fonts glyphs +filestore backstop tie breaker lockable lobtx btx waiter accounted aiobe spf resolvers generators +accidental wbr subtree recognising supplementary happier hasn officially rnrn diff --git a/h2/src/tools/org/h2/build/doc/package.html b/h2/src/tools/org/h2/build/doc/package.html index de7af3f9a8..339d88ba98 100644 --- a/h2/src/tools/org/h2/build/doc/package.html +++ b/h2/src/tools/org/h2/build/doc/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/build/doclet/Doclet.java b/h2/src/tools/org/h2/build/doclet/Doclet.java deleted file mode 100644 index c0141ce663..0000000000 --- a/h2/src/tools/org/h2/build/doclet/Doclet.java +++ /dev/null @@ -1,590 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.build.doclet; - -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; -import java.io.PrintWriter; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Comparator; -import java.util.HashSet; -import org.h2.util.StringUtils; -import com.sun.javadoc.ClassDoc; -import com.sun.javadoc.ConstructorDoc; -import com.sun.javadoc.ExecutableMemberDoc; -import com.sun.javadoc.FieldDoc; -import com.sun.javadoc.LanguageVersion; -import com.sun.javadoc.MethodDoc; -import com.sun.javadoc.ParamTag; -import com.sun.javadoc.Parameter; -import com.sun.javadoc.RootDoc; -import com.sun.javadoc.Tag; -import com.sun.javadoc.ThrowsTag; -import com.sun.javadoc.Type; - -/** - * This class is a custom doclet implementation to generate the - * Javadoc for this product. - */ -public class Doclet { - - private static final boolean INTERFACES_ONLY = Boolean - .getBoolean("h2.interfacesOnly"); - private String destDir = System.getProperty("h2.javadocDestDir", - "docs/javadoc"); - private int errorCount; - private final HashSet errors = new HashSet<>(); - - /** - * This method is called by the javadoc framework and is required for all - * doclets. - * - * @param root the root - * @return true if successful - */ - public static boolean start(RootDoc root) throws IOException { - return new Doclet().startDoc(root); - } - - private boolean startDoc(RootDoc root) throws IOException { - ClassDoc[] classes = root.classes(); - String[][] options = root.options(); - for (String[] op : options) { - if (op[0].equals("destdir")) { - destDir = op[1]; - } - } - for (ClassDoc clazz : classes) { - processClass(clazz); - } - if (errorCount > 0) { - throw new IOException("FAILED: " + errorCount + " errors found"); - } - return true; - } - - private static String getClass(ClassDoc clazz) { - String name = clazz.name(); - if (clazz.qualifiedName().indexOf(".jdbc.") > 0 && name.startsWith("Jdbc")) { - return name.substring(4); - } - return name; - } - - private void processClass(ClassDoc clazz) throws IOException { - String packageName = clazz.containingPackage().name(); - String dir = destDir + "/" + packageName.replace('.', '/'); - (new File(dir)).mkdirs(); - String fileName = dir + "/" + clazz.name() + ".html"; - String className = getClass(clazz); - FileWriter out = new FileWriter(fileName); - PrintWriter writer = new PrintWriter(new BufferedWriter(out)); - writer.println(""); - String language = "en"; - writer.println(""); - writer.println("" + - ""); - writer.println(className); - writer.println("" + - ""); - writer.println(""); - writer.println(""); - writer.println("
        •  
          "); + r.accept(this); + buff.append(html); + buff.append("
          ").append(column).append("").append(rs2.getString(2)) + .append("
          ") + .append(StringUtils.xmlText(description)).append("
          " + - "" + - "
          " + - "
          "); - writer.println("

          " + className + "

          "); - writer.println(formatText(clazz.commentText()) + "

          "); - - // methods - ConstructorDoc[] constructors = clazz.constructors(); - MethodDoc[] methods = clazz.methods(); - ExecutableMemberDoc[] constructorsMethods = - new ExecutableMemberDoc[constructors.length - + methods.length]; - System.arraycopy(constructors, 0, constructorsMethods, 0, - constructors.length); - System.arraycopy(methods, 0, constructorsMethods, constructors.length, - methods.length); - Arrays.sort(constructorsMethods, new Comparator() { - @Override - public int compare(ExecutableMemberDoc a, ExecutableMemberDoc b) { - // sort static method before non-static methods - if (a.isStatic() != b.isStatic()) { - return a.isStatic() ? -1 : 1; - } - return a.name().compareTo(b.name()); - } - }); -// -// -// Arrays.sort(methods, new Comparator() { -// public int compare(MethodDoc a, MethodDoc b) { -// // sort static method before non-static methods -// if (a.isStatic() != b.isStatic()) { -// return a.isStatic() ? -1 : 1; -// } -// return a.name().compareTo(b.name()); -// } -// }); - ArrayList signatures = new ArrayList<>(); - boolean hasMethods = false; - int id = 0; - for (int i = 0; i < constructorsMethods.length; i++) { - ExecutableMemberDoc method = constructorsMethods[i]; - String name = method.name(); - if (skipMethod(method)) { - continue; - } - if (!hasMethods) { - writer.println("" + - "" + - ""); - hasMethods = true; - } - String type = getTypeName(method.isStatic(), false, - getReturnType(method)); - writer.println(""); - writer.println(""); - writer.println(""); - writer.println(""); - id++; - } - if (hasMethods) { - writer.println("
          Methods
          " + type + - ""); - Parameter[] params = method.parameters(); - StringBuilder buff = new StringBuilder(); - StringBuilder buffSignature = new StringBuilder(name); - buff.append('('); - for (int j = 0; j < params.length; j++) { - if (j > 0) { - buff.append(", "); - } - buffSignature.append('_'); - Parameter param = params[j]; - boolean isVarArgs = method.isVarArgs() && j == params.length - 1; - String typeName = getTypeName(false, isVarArgs, param.type()); - buff.append(typeName); - buffSignature.append(StringUtils.replaceAll(typeName, "[]", "-")); - buff.append(' '); - buff.append(param.name()); - } - buff.append(')'); - if (isDeprecated(method)) { - name = "" + name + ""; - } - String signature = buffSignature.toString(); - while (signatures.size() < i) { - signatures.add(null); - } - signatures.add(i, signature); - writer.println("" + - name + "" + buff.toString()); - String firstSentence = getFirstSentence(method.firstSentenceTags()); - if (firstSentence != null) { - writer.println("
          " + - formatText(firstSentence) + "
          "); - } - writer.println("
          " + - type + ""); - writeMethodDetails(writer, clazz, method, signature); - writer.println("
          "); - } - - // field overview - FieldDoc[] fields = clazz.fields(); - if (clazz.interfaces().length > 0) { - fields = clazz.interfaces()[0].fields(); - } - Arrays.sort(fields, new Comparator() { - @Override - public int compare(FieldDoc a, FieldDoc b) { - return a.name().compareTo(b.name()); - } - }); - int fieldId = 0; - for (FieldDoc field : fields) { - if (skipField(clazz, field)) { - continue; - } - String name = field.name(); - String text = field.commentText(); - if (text == null || text.trim().length() == 0) { - addError("Undocumented field (" + - getLink(clazz, field.position().line()) + ") " + name); - } - if (text != null && text.startsWith("INTERNAL")) { - continue; - } - if (fieldId == 0) { - writer.println("
          "); - } - String type = getTypeName(true, false, field.type()); - writer.println(""); - fieldId++; - } - if (fieldId > 0) { - writer.println("
          Fields
          " + type + - ""); - String constant = field.constantValueExpression(); - - // add a link (a name) if there is a tag - String link = getFieldLink(text, constant, clazz, name); - writer.print("" + name + ""); - if (constant == null) { - writer.println(); - } else { - writer.println(" = " + constant); - } - writer.println("
          "); - } - - // field details - Arrays.sort(fields, new Comparator() { - @Override - public int compare(FieldDoc a, FieldDoc b) { - String ca = a.constantValueExpression(); - if (ca == null) { - ca = a.name(); - } - String cb = b.constantValueExpression(); - if (cb == null) { - cb = b.name(); - } - return ca.compareTo(cb); - } - }); - for (FieldDoc field : fields) { - writeFieldDetails(writer, clazz, field); - } - - writer.println("
          "); - writer.close(); - out.close(); - } - - private void writeFieldDetails(PrintWriter writer, ClassDoc clazz, - FieldDoc field) { - if (skipField(clazz, field)) { - return; - } - String text = field.commentText(); - if (text.startsWith("INTERNAL")) { - return; - } - String name = field.name(); - String constant = field.constantValueExpression(); - String link = getFieldLink(text, constant, clazz, name); - writer.println("

          " + - name); - if (constant == null) { - writer.println(); - } else { - writer.println(" = " + constant); - } - writer.println("

          "); - writer.println("
          " + formatText(text) + "
          "); - writer.println("
          "); - } - - private void writeMethodDetails(PrintWriter writer, ClassDoc clazz, - ExecutableMemberDoc method, String signature) { - String name = method.name(); - if (skipMethod(method)) { - return; - } - Parameter[] params = method.parameters(); - StringBuilder builder = new StringBuilder(); - builder.append('('); - for (int i = 0, l = params.length; i < l; i++) { - if (i > 0) { - builder.append(", "); - } - boolean isVarArgs = method.isVarArgs() && i == params.length - 1; - Parameter p = params[i]; - builder.append(getTypeName(false, isVarArgs, p.type())); - builder.append(' '); - builder.append(p.name()); - } - builder.append(')'); - ClassDoc[] exceptions = method.thrownExceptions(); - if (exceptions.length > 0) { - builder.append(" throws "); - for (int i = 0, l = exceptions.length; i < l; i++) { - if (i > 0) { - builder.append(", "); - } - builder.append(exceptions[i].typeName()); - } - } - if (isDeprecated(method)) { - name = "" + name + ""; - } - writer.println("" + - name + "" + builder.toString()); - boolean hasComment = method.commentText() != null && - method.commentText().trim().length() != 0; - writer.println("
          " + - formatText(method.commentText()) + "
          "); - ParamTag[] paramTags = method.paramTags(); - ThrowsTag[] throwsTags = method.throwsTags(); - boolean hasThrowsTag = throwsTags != null && throwsTags.length > 0; - if (paramTags.length != params.length) { - if (hasComment && !method.commentText().startsWith("[")) { - // [Not supported] and such are not problematic - addError("Undocumented parameter(s) (" + - getLink(clazz, method.position().line()) + ") " + - name + " documented: " + paramTags.length + - " params: "+ params.length); - } - } - for (int j = 0; j < paramTags.length; j++) { - String paramName = paramTags[j].parameterName(); - String comment = paramTags[j].parameterComment(); - if (comment.trim().length() == 0) { - addError("Undocumented parameter (" + - getLink(clazz, method.position().line()) + ") " + - name + " " + paramName); - } - String p = paramName + " - " + comment; - if (j == 0) { - writer.println("
          Parameters:
          "); - } - writer.println("
          " + p + "
          "); - } - Tag[] returnTags = method.tags("return"); - Type returnType = getReturnType(method); - if (returnTags != null && returnTags.length > 0) { - writer.println("
          Returns:
          "); - String returnComment = returnTags[0].text(); - if (returnComment.trim().length() == 0) { - addError("Undocumented return value (" + - getLink(clazz, method.position().line()) + ") " + name); - } - writer.println("
          " + returnComment + "
          "); - } else if (returnType != null && !returnType.toString().equals("void")) { - if (hasComment && !method.commentText().startsWith("[") && - !hasThrowsTag) { - // [Not supported] and such are not problematic - // also not problematic are methods that always throw an - // exception - addError("Undocumented return value (" - + getLink(clazz, method.position().line()) + ") " - + name + " " + getReturnType(method)); - } - } - if (hasThrowsTag) { - writer.println("
          Throws:
          "); - for (ThrowsTag tag : throwsTags) { - String p = tag.exceptionName(); - String c = tag.exceptionComment(); - if (c.length() > 0) { - p += " - " + c; - } - writer.println("
          " + p + "
          "); - } - } - } - - private static String getLink(ClassDoc clazz, int line) { - String c = clazz.name(); - int x = c.lastIndexOf('.'); - if (x >= 0) { - c = c.substring(0, x); - } - return c + ".java:" + line; - } - - private String getFieldLink(String text, String constant, ClassDoc clazz, - String name) { - String link = constant != null ? constant : name.toLowerCase(); - int linkStart = text.indexOf(""); - if (linkStart >= 0) { - int linkEnd = text.indexOf("", linkStart); - link = text.substring(linkStart + "".length(), linkEnd); - if (constant != null && !constant.equals(link)) { - System.out.println("Wrong code tag? " + clazz.name() + "." + - name + - " code: " + link + " constant: " + constant); - errorCount++; - } - } - if (link.startsWith("\"")) { - link = name; - } else if (Character.isDigit(link.charAt(0))) { - link = "c" + link; - } - return link; - } - - private static String formatText(String text) { - if (text == null) { - return text; - } - text = StringUtils.replaceAll(text, "\n ", ""); - return text; - } - - private static boolean skipField(ClassDoc clazz, FieldDoc field) { - if (field.isPrivate() || field.containingClass() != clazz) { - return true; - } - return false; - } - - private boolean skipMethod(ExecutableMemberDoc method) { - ClassDoc clazz = method.containingClass(); - boolean isAbstract = method instanceof MethodDoc - && ((MethodDoc) method).isAbstract(); - boolean isInterface = clazz.isInterface() - || (clazz.isAbstract() && isAbstract); - if (INTERFACES_ONLY && !isInterface) { - return true; - } - String name = method.name(); - if (method.isPrivate() || name.equals("finalize")) { - return true; - } - if (method.isConstructor() - && method.getRawCommentText().trim().length() == 0) { - return true; - } - if (method.getRawCommentText().trim() - .startsWith("@deprecated INTERNAL")) { - return true; - } - String firstSentence = getFirstSentence(method.firstSentenceTags()); - String raw = method.getRawCommentText(); - if (firstSentence != null && firstSentence.startsWith("INTERNAL")) { - return true; - } - if ((firstSentence == null || firstSentence.trim().length() == 0) - && raw.indexOf("{@inheritDoc}") < 0) { - if (!doesOverride(method)) { - boolean setterOrGetter = name.startsWith("set") - && method.parameters().length == 1; - setterOrGetter |= name.startsWith("get") - && method.parameters().length == 0; - Type returnType = getReturnType(method); - setterOrGetter |= name.startsWith("is") - && method.parameters().length == 0 - && returnType != null - && returnType.toString().equals("boolean"); - boolean enumValueMethod = name.equals("values") || name.equals("valueOf"); - if (!setterOrGetter && !enumValueMethod) { - addError("Undocumented method " + " (" - + getLink(clazz, method.position().line()) + ") " - + clazz + "." + name + " " + raw); - return true; - } - } - } - return false; - } - - private static Type getReturnType(ExecutableMemberDoc method) { - if (method instanceof MethodDoc) { - MethodDoc m = (MethodDoc) method; - return m.returnType(); - } - return null; - } - - private void addError(String s) { - if (errors.add(s)) { - System.out.println(s); - errorCount++; - } - } - - private boolean doesOverride(ExecutableMemberDoc method) { - if (method.isConstructor()) { - return true; - } - ClassDoc clazz = method.containingClass(); - int parameterCount = method.parameters().length; - return foundMethod(clazz, false, method.name(), parameterCount); - } - - private boolean foundMethod(ClassDoc clazz, boolean include, - String methodName, int parameterCount) { - if (include) { - for (MethodDoc m : clazz.methods()) { - if (m.name().equals(methodName) - && m.parameters().length == parameterCount) { - return true; - } - } - } - for (ClassDoc doc : clazz.interfaces()) { - if (foundMethod(doc, true, methodName, parameterCount)) { - return true; - } - } - clazz = clazz.superclass(); - return clazz != null - && foundMethod(clazz, true, methodName, parameterCount); - } - - private static String getFirstSentence(Tag[] tags) { - String firstSentence = null; - if (tags.length > 0) { - Tag first = tags[0]; - firstSentence = first.text(); - } - return firstSentence; - } - - private static String getTypeName(boolean isStatic, boolean isVarArgs, - Type type) { - if (type == null) { - return ""; - } - String s = type.typeName() + type.dimension(); - if (isVarArgs) { - // remove the last "[]" and add "..." instead - s = s.substring(0, s.length() - 2) + "..."; - } - if (isStatic) { - s = "static " + s; - } - return s; - } - - private static boolean isDeprecated(ExecutableMemberDoc method) { - for (Tag t : method.tags()) { - if (t.kind().equals("@deprecated")) { - return true; - } - } - return false; - } - - /** - * Get the language version this doclet supports. - * - * @return the language version - */ - public static LanguageVersion languageVersion() { - // otherwise, isVarArgs always returns false - // (which sounds like a bug but is a feature :-) - return LanguageVersion.JAVA_1_5; - } - -} diff --git a/h2/src/tools/org/h2/build/doclet/ResourceDoclet.java b/h2/src/tools/org/h2/build/doclet/ResourceDoclet.java deleted file mode 100644 index 3b3a9963af..0000000000 --- a/h2/src/tools/org/h2/build/doclet/ResourceDoclet.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.build.doclet; - -import java.io.IOException; -import org.h2.build.doc.XMLParser; -import org.h2.build.indexer.HtmlConverter; -import org.h2.util.SortedProperties; -import com.sun.javadoc.ClassDoc; -import com.sun.javadoc.Doc; -import com.sun.javadoc.MethodDoc; -import com.sun.javadoc.RootDoc; -import com.sun.javadoc.Tag; - -/** - * This custom doclet generates resources from javadoc comments. - * Only comments that contain 'at resource' are included. - * Only class level and method level comments are supported. - */ -public class ResourceDoclet { - - private String destFile = System.getProperty("h2.javadocResourceFile", - "src/main/org/h2/res/javadoc.properties"); - - private final SortedProperties resources = new SortedProperties(); - - /** - * This method is called by the javadoc framework and is required for all - * doclets. - * - * @param root the root - * @return true if successful - */ - public static boolean start(RootDoc root) throws IOException { - return new ResourceDoclet().startDoc(root); - } - - private boolean startDoc(RootDoc root) throws IOException { - ClassDoc[] classes = root.classes(); - String[][] options = root.options(); - for (String[] op : options) { - if (op[0].equals("dest")) { - destFile = op[1]; - } - } - for (ClassDoc clazz : classes) { - processClass(clazz); - } - resources.store(destFile); - return true; - } - - private void processClass(ClassDoc clazz) { - String packageName = clazz.containingPackage().name(); - String className = clazz.name(); - addResource(packageName + "." + className, clazz); - - for (MethodDoc method : clazz.methods()) { - String name = method.name(); - addResource(packageName + "." + className + "." + name, method); - } - } - - - private void addResource(String key, Doc doc) { - if (!isResource(doc)) { - return; - } - String xhtml = doc.commentText(); - XMLParser p = new XMLParser(xhtml); - StringBuilder buff = new StringBuilder(); - int column = 0; - int firstColumnSize = 0; - boolean inColumn = false; - while (p.hasNext()) { - String s; - switch (p.next()) { - case XMLParser.END_ELEMENT: - s = p.getName(); - if ("p".equals(s) || "tr".equals(s) || "br".equals(s)) { - buff.append('\n'); - } - break; - case XMLParser.START_ELEMENT: - s = p.getName(); - if ("table".equals(s)) { - buff.append('\n'); - } else if ("tr".equals(s)) { - column = 0; - } else if ("td".equals(s)) { - inColumn = true; - column++; - if (column == 2) { - buff.append('\t'); - } - } - break; - case XMLParser.CHARACTERS: - s = HtmlConverter.convertHtmlToString(p.getText().trim()); - if (inColumn && column == 1) { - firstColumnSize = Math.max(s.length(), firstColumnSize); - } - buff.append(s); - break; - } - } - for (int i = 0; i < buff.length(); i++) { - if (buff.charAt(i) == '\t') { - buff.deleteCharAt(i); - int length = i - buff.lastIndexOf("\n", i - 1); - for (int k = length; k < firstColumnSize + 3; k++) { - buff.insert(i, ' '); - } - } - } - String text = buff.toString().trim(); - resources.setProperty(key, text); - } - - private static boolean isResource(Doc doc) { - for (Tag t : doc.tags()) { - if (t.kind().equals("@h2.resource")) { - return true; - } - } - return false; - } - -} diff --git a/h2/src/tools/org/h2/build/i18n/PrepareTranslation.java b/h2/src/tools/org/h2/build/i18n/PrepareTranslation.java deleted file mode 100644 index acdeaec829..0000000000 --- a/h2/src/tools/org/h2/build/i18n/PrepareTranslation.java +++ /dev/null @@ -1,542 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.build.i18n; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.FileReader; -import java.io.FileWriter; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.io.OutputStreamWriter; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Properties; -import java.util.Stack; -import org.h2.build.doc.XMLParser; -import org.h2.server.web.PageParser; -import org.h2.util.IOUtils; -import org.h2.util.SortedProperties; -import org.h2.util.StringUtils; - -/** - * This class updates the translation source code files by parsing - * the HTML documentation. It also generates the translated HTML - * documentation. - */ -public class PrepareTranslation { - private static final String MAIN_LANGUAGE = "en"; - private static final String[] EXCLUDE = { "commands.html", "datatypes.html", - "functions.html", "grammar.html" }; - - /** - * This method is called when executing this application from the command - * line. - * - * @param args the command line parameters - */ - public static void main(String... args) throws Exception { - String baseDir = "src/docsrc/textbase"; - prepare(baseDir, "src/main/org/h2/res", true); - prepare(baseDir, "src/main/org/h2/server/web/res", true); - - // convert the txt files to properties files - PropertiesToUTF8.textUTF8ToProperties( - "src/docsrc/text/_docs_de.utf8.txt", - "src/docsrc/text/_docs_de.properties"); - PropertiesToUTF8.textUTF8ToProperties( - "src/docsrc/text/_docs_ja.utf8.txt", - "src/docsrc/text/_docs_ja.properties"); - - // create the .jsp files and extract the text in the main language - extractFromHtml("docs/html", "src/docsrc/text"); - - // add missing translations and create a new baseline - prepare(baseDir, "src/docsrc/text", false); - - // create the translated documentation - buildHtml("src/docsrc/text", "docs/html", "en"); - // buildHtml("src/docsrc/text", "docs/html", "de"); - // buildHtml("src/docsrc/text", "docs/html", "ja"); - - // convert the properties files back to utf8 text files, including the - // main language (to be used as a template) - PropertiesToUTF8.propertiesToTextUTF8( - "src/docsrc/text/_docs_en.properties", - "src/docsrc/text/_docs_en.utf8.txt"); - PropertiesToUTF8.propertiesToTextUTF8( - "src/docsrc/text/_docs_de.properties", - "src/docsrc/text/_docs_de.utf8.txt"); - PropertiesToUTF8.propertiesToTextUTF8( - "src/docsrc/text/_docs_ja.properties", - "src/docsrc/text/_docs_ja.utf8.txt"); - - // delete temporary files - for (File f : new File("src/docsrc/text").listFiles()) { - if (!f.getName().endsWith(".utf8.txt")) { - f.delete(); - } - } - } - - private static void buildHtml(String templateDir, String targetDir, - String language) throws IOException { - File[] list = new File(templateDir).listFiles(); - new File(targetDir).mkdirs(); - // load the main 'translation' - String propName = templateDir + "/_docs_" + MAIN_LANGUAGE - + ".properties"; - Properties prop = load(propName, false); - propName = templateDir + "/_docs_" + language + ".properties"; - if (!(new File(propName)).exists()) { - throw new IOException("Translation not found: " + propName); - } - Properties transProp = load(propName, false); - for (Object k : transProp.keySet()) { - String key = (String) k; - String t = transProp.getProperty(key); - // overload with translations, but not the ones starting with # - if (t.startsWith("##")) { - prop.put(key, t.substring(2)); - } else if (!t.startsWith("#")) { - prop.put(key, t); - } - } - ArrayList fileNames = new ArrayList<>(); - for (File f : list) { - String name = f.getName(); - if (!name.endsWith(".jsp")) { - continue; - } - // remove '.jsp' - name = name.substring(0, name.length() - 4); - fileNames.add(name); - } - for (File f : list) { - String name = f.getName(); - if (!name.endsWith(".jsp")) { - continue; - } - // remove '.jsp' - name = name.substring(0, name.length() - 4); - String template = IOUtils.readStringAndClose(new FileReader( - templateDir + "/" + name + ".jsp"), -1); - HashMap map = new HashMap<>(); - for (Object k : prop.keySet()) { - map.put(k.toString(), prop.get(k)); - } - String html = PageParser.parse(template, map); - html = StringUtils.replaceAll(html, "lang=\"" + MAIN_LANGUAGE - + "\"", "lang=\"" + language + "\""); - for (String n : fileNames) { - if ("frame".equals(n)) { - // don't translate 'frame.html' to 'frame_ja.html', - // otherwise we can't switch back to English - continue; - } - html = StringUtils.replaceAll(html, n + ".html\"", n + "_" - + language + ".html\""); - } - html = StringUtils.replaceAll(html, - "_" + MAIN_LANGUAGE + ".html\"", ".html\""); - String target; - if (language.equals(MAIN_LANGUAGE)) { - target = targetDir + "/" + name + ".html"; - } else { - target = targetDir + "/" + name + "_" + language + ".html"; - } - OutputStream out = new FileOutputStream(target); - OutputStreamWriter writer = new OutputStreamWriter(out, StandardCharsets.UTF_8); - writer.write(html); - writer.close(); - } - } - - private static boolean exclude(String fileName) { - for (String e : EXCLUDE) { - if (fileName.endsWith(e)) { - return true; - } - } - return false; - } - - private static void extractFromHtml(String dir, String target) - throws Exception { - for (File f : new File(dir).listFiles()) { - String name = f.getName(); - if (!name.endsWith(".html")) { - continue; - } - if (exclude(name)) { - continue; - } - // remove '.html' - name = name.substring(0, name.length() - 5); - if (name.indexOf('_') >= 0) { - // ignore translated files - continue; - } - String template = extract(name, f, target); - FileWriter writer = new FileWriter(target + "/" + name + ".jsp"); - writer.write(template); - writer.close(); - } - } - - // private static boolean isText(String s) { - // if (s.length() < 2) { - // return false; - // } - // for (int i = 0; i < s.length(); i++) { - // char c = s.charAt(i); - // if (!Character.isDigit(c) && c != '.' && c != '-' && c != '+') { - // return true; - // } - // } - // return false; - // } - - private static String getSpace(String s, boolean start) { - if (start) { - for (int i = 0; i < s.length(); i++) { - if (!Character.isSpaceChar(s.charAt(i))) { - if (i == 0) { - return ""; - } - return s.substring(0, i); - } - } - return s; - } - for (int i = s.length() - 1; i >= 0; i--) { - if (!Character.isSpaceChar(s.charAt(i))) { - if (i == s.length() - 1) { - return ""; - } - return s.substring(i + 1, s.length()); - } - } - // if all spaces, return an empty string to avoid duplicate spaces - return ""; - } - - private static String extract(String documentName, File f, String target) - throws Exception { - String xml = IOUtils.readStringAndClose(new InputStreamReader( - new FileInputStream(f), StandardCharsets.UTF_8), -1); - // the template contains ${} instead of text - StringBuilder template = new StringBuilder(xml.length()); - int id = 0; - SortedProperties prop = new SortedProperties(); - XMLParser parser = new XMLParser(xml); - StringBuilder buff = new StringBuilder(); - Stack stack = new Stack<>(); - String tag = ""; - boolean ignoreEnd = false; - String nextKey = ""; - // for debugging - boolean templateIsCopy = false; - while (true) { - int event = parser.next(); - if (event == XMLParser.END_DOCUMENT) { - break; - } else if (event == XMLParser.CHARACTERS) { - String s = parser.getText(); - if (s.trim().length() == 0) { - if (buff.length() > 0) { - buff.append(s); - } else { - template.append(s); - } - } else if ("p".equals(tag) || "li".equals(tag) - || "a".equals(tag) || "td".equals(tag) - || "th".equals(tag) || "h1".equals(tag) - || "h2".equals(tag) || "h3".equals(tag) - || "h4".equals(tag) || "body".equals(tag) - || "b".equals(tag) || "code".equals(tag) - || "form".equals(tag) || "span".equals(tag) - || "em".equals(tag) || "div".equals(tag) - || "strong".equals(tag) || "label".equals(tag)) { - if (buff.length() == 0) { - nextKey = documentName + "_" + (1000 + id++) + "_" - + tag; - template.append(getSpace(s, true)); - } else if (templateIsCopy) { - buff.append(getSpace(s, true)); - } - buff.append(s); - } else if ("pre".equals(tag) || "title".equals(tag) - || "script".equals(tag) || "style".equals(tag)) { - // ignore, don't translate - template.append(s); - } else { - System.out.println(f.getName() - + " invalid wrapper tag for text: " + tag - + " text: " + s); - System.out.println(parser.getRemaining()); - throw new Exception(); - } - } else if (event == XMLParser.START_ELEMENT) { - stack.add(tag); - String name = parser.getName(); - if ("code".equals(name) || "a".equals(name) || "b".equals(name) - || "span".equals(name)) { - // keep tags if wrapped, but not if this is the wrapper - if (buff.length() > 0) { - buff.append(parser.getToken()); - ignoreEnd = false; - } else { - ignoreEnd = true; - template.append(parser.getToken()); - } - } else if ("p".equals(tag) || "li".equals(tag) - || "td".equals(tag) || "th".equals(tag) - || "h1".equals(tag) || "h2".equals(tag) - || "h3".equals(tag) || "h4".equals(tag) - || "body".equals(tag) || "form".equals(tag)) { - if (buff.length() > 0) { - if (templateIsCopy) { - template.append(buff.toString()); - } else { - template.append("${" + nextKey + "}"); - } - add(prop, nextKey, buff); - } - template.append(parser.getToken()); - } else { - template.append(parser.getToken()); - } - tag = name; - } else if (event == XMLParser.END_ELEMENT) { - String name = parser.getName(); - if ("code".equals(name) || "a".equals(name) || "b".equals(name) - || "span".equals(name) || "em".equals(name) || "strong".equals(name)) { - if (ignoreEnd) { - if (buff.length() > 0) { - if (templateIsCopy) { - template.append(buff.toString()); - } else { - template.append("${" + nextKey + "}"); - } - add(prop, nextKey, buff); - } - template.append(parser.getToken()); - } else { - if (buff.length() > 0) { - buff.append(parser.getToken()); - } - } - } else { - if (buff.length() > 0) { - if (templateIsCopy) { - template.append(buff.toString()); - } else { - template.append("${" + nextKey + "}"); - } - add(prop, nextKey, buff); - } - template.append(parser.getToken()); - } - tag = stack.pop(); - } else if (event == XMLParser.DTD) { - template.append(parser.getToken()); - } else if (event == XMLParser.COMMENT) { - template.append(parser.getToken()); - } else { - int eventType = parser.getEventType(); - throw new Exception("Unexpected event " + eventType + " at " - + parser.getRemaining()); - } - // if(!xml.startsWith(template.toString())) { - // System.out.println(nextKey); - // System.out.println(template.substring(template.length()-60) - // +";"); - // System.out.println(xml.substring(template.length()-60, - // template.length())); - // System.out.println(template.substring(template.length()-55) - // +";"); - // System.out.println(xml.substring(template.length()-55, - // template.length())); - // break; - // } - } - new File(target).mkdirs(); - String propFileName = target + "/_docs_" + MAIN_LANGUAGE + ".properties"; - Properties old = load(propFileName, false); - prop.putAll(old); - store(prop, propFileName, false); - String t = template.toString(); - if (templateIsCopy && !t.equals(xml)) { - for (int i = 0; i < Math.min(t.length(), xml.length()); i++) { - if (t.charAt(i) != xml.charAt(i)) { - int start = Math.max(0, i - 30), end = Math.min(i + 30, xml.length()); - t = t.substring(start, end); - xml = xml.substring(start, end); - } - } - System.out.println("xml--------------------------------------------------: "); - System.out.println(xml); - System.out.println("t---------------------------------------------------: "); - System.out.println(t); - System.exit(1); - } - return t; - } - - private static String clean(String text) { - if (text.indexOf('\r') < 0 && text.indexOf('\n') < 0) { - return text; - } - text = text.replace('\r', ' '); - text = text.replace('\n', ' '); - while (true) { - String s = StringUtils.replaceAll(text, " ", " "); - if (s.equals(text)) { - break; - } - text = s; - } - return text; - } - - private static void add(Properties prop, String document, StringBuilder text) { - String s = clean(text.toString()); - text.setLength(0); - prop.setProperty(document, s); - } - - private static void prepare(String baseDir, String path, boolean utf8) - throws IOException { - String suffix = utf8 ? ".prop" : ".properties"; - File dir = new File(path); - File main = null; - ArrayList translations = new ArrayList<>(); - for (File f : dir.listFiles()) { - if (f.getName().endsWith(suffix) && f.getName().indexOf('_') >= 0) { - if (f.getName().endsWith("_" + MAIN_LANGUAGE + suffix)) { - main = f; - } else { - translations.add(f); - } - } - } - SortedProperties p = load(main.getAbsolutePath(), utf8); - Properties base = load(baseDir + "/" + main.getName(), utf8); - store(p, main.getAbsolutePath(), utf8); - for (File trans : translations) { - String language = trans.getName(); - language = language.substring(language.lastIndexOf('_') + 1, - language.lastIndexOf('.')); - prepare(p, base, trans, utf8); - } - store(p, baseDir + "/" + main.getName(), utf8); - } - - private static SortedProperties load(String fileName, boolean utf8) - throws IOException { - if (utf8) { - String s = new String(IOUtils.readBytesAndClose( - new FileInputStream(fileName), -1), StandardCharsets.UTF_8); - return SortedProperties.fromLines(s); - } - return SortedProperties.loadProperties(fileName); - } - - private static void store(SortedProperties p, String fileName, boolean utf8) - throws IOException { - if (utf8) { - String s = p.toLines(); - FileOutputStream f = new FileOutputStream(fileName); - f.write(s.getBytes(StandardCharsets.UTF_8)); - f.close(); - } else { - p.store(fileName); - } - } - - private static void prepare(Properties main, Properties base, File trans, - boolean utf8) throws IOException { - SortedProperties p = load(trans.getAbsolutePath(), utf8); - Properties oldTranslations = new Properties(); - for (Object k : base.keySet()) { - String key = (String) k; - String m = base.getProperty(key); - String t = p.getProperty(key); - if (t != null && !t.startsWith("#")) { - oldTranslations.setProperty(m, t); - } - } - HashSet toTranslate = new HashSet<>(); - // add missing keys, using # and the value from the main file - for (Object k : main.keySet()) { - String key = (String) k; - String now = main.getProperty(key); - if (!p.containsKey(key)) { - String t = oldTranslations.getProperty(now); - if (t == null) { - // System.out.println(trans.getName() + - // ": key " + key + " not found in " + - // "translation file; added # 'translation'"); - t = "#" + now; - p.put(key, t); - } else { - p.put(key, t); - } - } else { - String t = p.getProperty(key); - String last = base.getProperty(key); - if (t.startsWith("#") && !t.startsWith("##")) { - // not translated before - t = oldTranslations.getProperty(now); - if (t == null) { - t = "#" + now; - } - p.put(key, t); - } else if (last != null && !last.equals(now)) { - t = oldTranslations.getProperty(now); - if (t == null) { - // main data changed since the last run: review - // translation - System.out.println(trans.getName() + ": key " + key - + " changed, please review; last=" + last - + " now=" + now); - String old = p.getProperty(key); - t = "#" + now + " #" + old; - p.put(key, t); - } else { - p.put(key, t); - } - } - } - } - for (String key : toTranslate) { - String now = main.getProperty(key); - String t; - System.out - .println(trans.getName() - + ": key " - + key - + " not found in translation file; added dummy # 'translation'"); - t = "#" + now; - p.put(key, t); - } - // remove keys that don't exist in the main file - // (deleted or typo in the key) - for (Object k : new ArrayList<>(p.keySet())) { - String key = (String) k; - if (!main.containsKey(key)) { - p.remove(key); - } - } - store(p, trans.getAbsolutePath(), utf8); - } - -} diff --git a/h2/src/tools/org/h2/build/i18n/PropertiesToUTF8.java b/h2/src/tools/org/h2/build/i18n/PropertiesToUTF8.java deleted file mode 100644 index 592bbf055c..0000000000 --- a/h2/src/tools/org/h2/build/i18n/PropertiesToUTF8.java +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.build.i18n; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.InputStreamReader; -import java.io.LineNumberReader; -import java.io.OutputStreamWriter; -import java.io.PrintWriter; -import java.io.RandomAccessFile; -import java.nio.charset.StandardCharsets; -import java.util.Enumeration; -import java.util.Properties; -import org.h2.build.code.CheckTextFiles; -import org.h2.build.indexer.HtmlConverter; -import org.h2.util.IOUtils; -import org.h2.util.SortedProperties; -import org.h2.util.StringUtils; - -/** - * This class converts a file stored in the UTF-8 encoding format to - * a properties file and vice versa. - */ -public class PropertiesToUTF8 { - - private PropertiesToUTF8() { - // utility class - } - - /** - * This method is called when executing this application from the command - * line. - * - * @param args the command line parameters - */ - public static void main(String... args) throws Exception { - convert("bin/org/h2/res"); - convert("bin/org/h2/server/web/res"); - } - - /** - * Convert a properties file to a UTF-8 text file. - * - * @param source the name of the properties file - * @param target the target file name - */ - static void propertiesToTextUTF8(String source, String target) - throws Exception { - if (!new File(source).exists()) { - return; - } - Properties prop = SortedProperties.loadProperties(source); - FileOutputStream out = new FileOutputStream(target); - PrintWriter writer = new PrintWriter(new OutputStreamWriter(out, StandardCharsets.UTF_8)); - // keys is sorted - for (Enumeration en = prop.keys(); en.hasMoreElements();) { - String key = (String) en.nextElement(); - String value = prop.getProperty(key, null); - writer.print("@" + key + "\n"); - writer.print(value + "\n\n"); - } - writer.close(); - } - - /** - * Convert a translation file (in UTF-8) to a properties file (without - * special characters). - * - * @param source the source file name - * @param target the target file name - */ - static void textUTF8ToProperties(String source, String target) - throws Exception { - if (!new File(source).exists()) { - return; - } - try (LineNumberReader reader = new LineNumberReader(new InputStreamReader( - new FileInputStream(source), StandardCharsets.UTF_8))) { - SortedProperties prop = new SortedProperties(); - StringBuilder buff = new StringBuilder(); - String key = null; - boolean found = false; - while (true) { - String line = reader.readLine(); - if (line == null) { - break; - } - line = line.trim(); - if (line.length() == 0) { - continue; - } - if (line.startsWith("@")) { - if (key != null) { - prop.setProperty(key, buff.toString()); - buff.setLength(0); - } - found = true; - key = line.substring(1); - } else { - if (buff.length() > 0) { - buff.append(System.getProperty("line.separator")); - } - buff.append(line); - } - } - if (found) { - prop.setProperty(key, buff.toString()); - } - prop.store(target); - } - } - - private static void convert(String source) throws Exception { - for (File f : new File(source).listFiles()) { - if (!f.getName().endsWith(".properties")) { - continue; - } - FileInputStream in = new FileInputStream(f); - InputStreamReader r = new InputStreamReader(in, StandardCharsets.UTF_8); - String s = IOUtils.readStringAndClose(r, -1); - in.close(); - String name = f.getName(); - String utf8, html; - if (name.startsWith("utf8")) { - utf8 = HtmlConverter.convertHtmlToString(s); - html = HtmlConverter.convertStringToHtml(utf8); - RandomAccessFile out = new RandomAccessFile("_" + name.substring(4), "rw"); - out.write(html.getBytes()); - out.setLength(out.getFilePointer()); - out.close(); - } else { - new CheckTextFiles().checkOrFixFile(f, false, false); - html = s; - utf8 = HtmlConverter.convertHtmlToString(html); - // s = unescapeHtml(s); - utf8 = StringUtils.javaDecode(utf8); - FileOutputStream out = new FileOutputStream("_utf8" + f.getName()); - OutputStreamWriter w = new OutputStreamWriter(out, StandardCharsets.UTF_8); - w.write(utf8); - w.close(); - out.close(); - } - String java = StringUtils.javaEncode(utf8); - java = StringUtils.replaceAll(java, "\\r", "\r"); - java = StringUtils.replaceAll(java, "\\n", "\n"); - RandomAccessFile out = new RandomAccessFile("_java." + name, "rw"); - out.write(java.getBytes()); - out.setLength(out.getFilePointer()); - out.close(); - } - } - -} diff --git a/h2/src/tools/org/h2/build/indexer/HtmlConverter.java b/h2/src/tools/org/h2/build/indexer/HtmlConverter.java index 474a429459..7d226a84e4 100644 --- a/h2/src/tools/org/h2/build/indexer/HtmlConverter.java +++ b/h2/src/tools/org/h2/build/indexer/HtmlConverter.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.indexer; diff --git a/h2/src/tools/org/h2/build/indexer/Indexer.java b/h2/src/tools/org/h2/build/indexer/Indexer.java index 27baf4b287..a324cce6c1 100644 --- a/h2/src/tools/org/h2/build/indexer/Indexer.java +++ b/h2/src/tools/org/h2/build/indexer/Indexer.java @@ -1,22 +1,23 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.indexer; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileWriter; +import java.io.IOException; import java.io.PrintWriter; import java.nio.charset.StandardCharsets; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.StringTokenizer; -import org.h2.util.IOUtils; import org.h2.util.StringUtils; /** @@ -55,6 +56,7 @@ public class Indexer { * line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { new Indexer().run(args); @@ -70,7 +72,7 @@ private void run(String... args) throws Exception { destDir = args[++i]; } } - File file = new File(dir); + Path directory = Paths.get(dir); setNoIndex("index.html", "html/header.html", "html/search.html", "html/frame.html", "html/fragments.html", "html/sourceError.html", "html/source.html", @@ -79,8 +81,14 @@ private void run(String... args) throws Exception { "javadoc/allclasses-noframe.html", "javadoc/constant-values.html", "javadoc/overview-frame.html", "javadoc/overview-summary.html", "javadoc/serialized-form.html"); - output = new PrintWriter(new FileWriter(destDir + "/index.js")); - readPages("", file, 0); + output = new PrintWriter(Files.newBufferedWriter(Paths.get(destDir + "/index.js"))); + Files.walkFileTree(directory, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + readPages(directory.relativize(file).toString().replace('\\', '/'), file); + return FileVisitResult.CONTINUE; + } + }); output.println("var pages=new Array();"); output.println("var ref=new Array();"); output.println("var ignored='';"); @@ -135,12 +143,7 @@ private void sortWords() { ignored = ignoredBuff.toString(); // TODO support A, B, C,... class links in the index file and use them // for combined AND searches - Collections.sort(wordList, new Comparator() { - @Override - public int compare(Word w0, Word w1) { - return w0.name.compareToIgnoreCase(w1.name); - } - }); + wordList.sort((w0, w1) -> w0.name.compareToIgnoreCase(w1.name)); } private void removeOverflowRelations() { @@ -165,12 +168,7 @@ private void removeOverflowRelations() { } private void sortPages() { - Collections.sort(pages, new Comparator() { - @Override - public int compare(Page p0, Page p1) { - return Integer.compare(p1.relations, p0.relations); - } - }); + pages.sort((p0, p1) -> Integer.compare(p1.relations, p0.relations)); for (int i = 0; i < pages.size(); i++) { pages.get(i).id = i; } @@ -183,22 +181,17 @@ private void listPages() { } } - private void readPages(String dir, File file, int level) throws Exception { - String name = file.getName(); - String fileName = dir.length() > 0 ? dir + "/" + name : level > 0 ? name : ""; - if (file.isDirectory()) { - for (File f : file.listFiles()) { - readPages(fileName, f, level + 1); - } - return; - } - String lower = StringUtils.toLowerEnglish(name); + /** + * Read the pages of a file. + * + * @param fileName the file name + * @param file the path + */ + void readPages(String fileName, Path file) throws IOException { + String lower = StringUtils.toLowerEnglish(fileName); if (!lower.endsWith(".html") && !lower.endsWith(".htm")) { return; } - if (lower.contains("_ja.")) { - return; - } if (!noIndex.contains(fileName)) { page = new Page(pages.size(), fileName); pages.add(page); @@ -255,9 +248,8 @@ private void listWords() { output.println("ignored='" + ignored.toLowerCase() + "';"); } - private void readPage(File file) throws Exception { - byte[] data = IOUtils.readBytesAndClose(new FileInputStream(file), 0); - String text = new String(data, StandardCharsets.UTF_8); + private void readPage(Path file) throws IOException { + String text = new String(Files.readAllBytes(file), StandardCharsets.UTF_8); StringTokenizer t = new StringTokenizer(text, "<> \r\n", true); boolean inTag = false; title = false; @@ -312,8 +304,9 @@ private void readPage(File file) throws Exception { } if (page.title == null || page.title.trim().length() == 0) { - System.out.println("Error: not title found in " + file.getName()); - page.title = file.getName(); + String title = file.getFileName().toString(); + System.out.println("Error: not title found in " + title); + page.title = title; } page.title = page.title.trim(); } diff --git a/h2/src/tools/org/h2/build/indexer/Page.java b/h2/src/tools/org/h2/build/indexer/Page.java index 193911744b..4950c9905c 100644 --- a/h2/src/tools/org/h2/build/indexer/Page.java +++ b/h2/src/tools/org/h2/build/indexer/Page.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.indexer; diff --git a/h2/src/tools/org/h2/build/indexer/Weight.java b/h2/src/tools/org/h2/build/indexer/Weight.java index 4eae0e1422..f44a95ec81 100644 --- a/h2/src/tools/org/h2/build/indexer/Weight.java +++ b/h2/src/tools/org/h2/build/indexer/Weight.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.indexer; diff --git a/h2/src/tools/org/h2/build/indexer/Word.java b/h2/src/tools/org/h2/build/indexer/Word.java index e31a97f4fe..4015491412 100644 --- a/h2/src/tools/org/h2/build/indexer/Word.java +++ b/h2/src/tools/org/h2/build/indexer/Word.java @@ -1,13 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.indexer; import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.Map.Entry; @@ -70,12 +68,7 @@ void addAll(Word other) { ArrayList getSortedWeights() { if (weightList == null) { weightList = new ArrayList<>(pages.values()); - Collections.sort(weightList, new Comparator() { - @Override - public int compare(Weight w0, Weight w1) { - return Integer.compare(w1.value, w0.value); - } - }); + weightList.sort((w0, w1) -> Integer.compare(w1.value, w0.value)); } return weightList; } diff --git a/h2/src/tools/org/h2/build/indexer/package.html b/h2/src/tools/org/h2/build/indexer/package.html index bf40933b1d..e982aed7a6 100644 --- a/h2/src/tools/org/h2/build/indexer/package.html +++ b/h2/src/tools/org/h2/build/indexer/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/build/package.html b/h2/src/tools/org/h2/build/package.html index e3a5aae2f9..b4d57cdf3f 100644 --- a/h2/src/tools/org/h2/build/package.html +++ b/h2/src/tools/org/h2/build/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/cache/CacheLIRS.java b/h2/src/tools/org/h2/dev/cache/CacheLIRS.java index 667f42d9a5..7667cb3a0c 100644 --- a/h2/src/tools/org/h2/dev/cache/CacheLIRS.java +++ b/h2/src/tools/org/h2/dev/cache/CacheLIRS.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.cache; @@ -26,7 +26,7 @@ *

          * This class implements an approximation of the LIRS replacement algorithm * invented by Xiaodong Zhang and Song Jiang as described in - * http://www.cse.ohio-state.edu/~zhang/lirs-sigmetrics-02.html with a few + * https://web.cse.ohio-state.edu/~zhang.574/lirs-sigmetrics-02.html with a few * smaller changes: An additional queue for non-resident entries is used, to * prevent unbound memory usage. The maximum size of this queue is at most the * size of the rest of the stack. About 6.25% of the mapped entries are cold. diff --git a/h2/src/tools/org/h2/dev/cache/package.html b/h2/src/tools/org/h2/dev/cache/package.html index 7e72611bff..b72f46deed 100644 --- a/h2/src/tools/org/h2/dev/cache/package.html +++ b/h2/src/tools/org/h2/dev/cache/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/cluster/ShardedMap.java b/h2/src/tools/org/h2/dev/cluster/ShardedMap.java index 59cf7759dc..2ac17eb658 100644 --- a/h2/src/tools/org/h2/dev/cluster/ShardedMap.java +++ b/h2/src/tools/org/h2/dev/cluster/ShardedMap.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.cluster; @@ -25,7 +25,7 @@ */ public class ShardedMap extends AbstractMap { - private final DataType keyType; + private final DataType keyType; /** * The shards. Each shard has a minimum and a maximum key (null for no @@ -276,11 +276,6 @@ public Entry next() { return e; } - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - }; } diff --git a/h2/src/tools/org/h2/dev/cluster/package.html b/h2/src/tools/org/h2/dev/cluster/package.html index 509c56232e..5e941c9d23 100644 --- a/h2/src/tools/org/h2/dev/cluster/package.html +++ b/h2/src/tools/org/h2/dev/cluster/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/fs/ArchiveTool.java b/h2/src/tools/org/h2/dev/fs/ArchiveTool.java index 3d01a0b332..08128e953e 100644 --- a/h2/src/tools/org/h2/dev/fs/ArchiveTool.java +++ b/h2/src/tools/org/h2/dev/fs/ArchiveTool.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.fs; @@ -255,7 +255,7 @@ public int read() throws IOException { fileIn.close(); fileIn = null; } - if (files.size() == 0) { + if (files.isEmpty()) { // EOF return -1; } @@ -562,7 +562,7 @@ private static Iterator merge(final TreeSet segmentIn, final @Override public boolean hasNext() { - return segmentIn.size() > 0; + return !segmentIn.isEmpty(); } @Override @@ -578,11 +578,6 @@ public Chunk next() { return c; } - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - }; } @@ -953,7 +948,7 @@ public static Chunk read(DataInputStream in, boolean readKey) { } idList.add(x); } - if (idList.size() == 0) { + if (idList.isEmpty()) { // eof in.close(); return null; diff --git a/h2/src/tools/org/h2/dev/fs/ArchiveToolStore.java b/h2/src/tools/org/h2/dev/fs/ArchiveToolStore.java index ec78803783..6324d2f26b 100644 --- a/h2/src/tools/org/h2/dev/fs/ArchiveToolStore.java +++ b/h2/src/tools/org/h2/dev/fs/ArchiveToolStore.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.fs; @@ -12,8 +12,6 @@ import java.nio.channels.FileChannel; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; -import java.util.Comparator; import java.util.Map.Entry; import java.util.Random; import java.util.concurrent.TimeUnit; @@ -177,28 +175,22 @@ private void compress(String sourceDir) throws Exception { MVMap data = store.openMap("data" + segmentId); MVMap keepSegment = storeTemp.openMap("keep"); while (list.size() > 0) { - Collections.sort(list, new Comparator>() { - - @Override - public int compare(Cursor o1, - Cursor o2) { - int[] k1 = o1.getKey(); - int[] k2 = o2.getKey(); - int comp = 0; - for (int i = 0; i < k1.length - 1; i++) { - long x1 = k1[i]; - long x2 = k2[i]; - if (x1 > x2) { - comp = 1; - break; - } else if (x1 < x2) { - comp = -1; - break; - } - } - return comp; + list.sort((o1, o2) -> { + int[] k1 = o1.getKey(); + int[] k2 = o2.getKey(); + int comp = 0; + for (int i = 0; i < k1.length - 1; i++) { + long x1 = k1[i]; + long x2 = k2[i]; + if (x1 > x2) { + comp = 1; + break; + } else if (x1 < x2) { + comp = -1; + break; } - + } + return comp; }); Cursor top = list.get(0); int[] key = top.getKey(); @@ -395,28 +387,22 @@ private void expand(String targetDir) throws Exception { OutputStream file = null; int[] lastKey = null; while (list.size() > 0) { - Collections.sort(list, new Comparator>() { - - @Override - public int compare(Cursor o1, - Cursor o2) { - int[] k1 = o1.getKey(); - int[] k2 = o2.getKey(); - int comp = 0; - for (int i = 0; i < k1.length; i++) { - long x1 = k1[i]; - long x2 = k2[i]; - if (x1 > x2) { - comp = 1; - break; - } else if (x1 < x2) { - comp = -1; - break; - } + list.sort((o1, o2) -> { + int[] k1 = o1.getKey(); + int[] k2 = o2.getKey(); + int comp = 0; + for (int i = 0; i < k1.length; i++) { + long x1 = k1[i]; + long x2 = k2[i]; + if (x1 > x2) { + comp = 1; + break; + } else if (x1 < x2) { + comp = -1; + break; } - return comp; } - + return comp; }); Cursor top = list.get(0); int[] key = top.getKey(); diff --git a/h2/src/tools/org/h2/dev/fs/FilePathZip2.java b/h2/src/tools/org/h2/dev/fs/FilePathZip2.java index 60e1c4fd31..92578827e0 100644 --- a/h2/src/tools/org/h2/dev/fs/FilePathZip2.java +++ b/h2/src/tools/org/h2/dev/fs/FilePathZip2.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.fs; @@ -10,6 +10,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; +import java.nio.channels.Channels; import java.nio.channels.FileChannel; import java.nio.channels.FileLock; import java.util.ArrayList; @@ -19,10 +20,9 @@ import org.h2.message.DbException; import org.h2.store.fs.FakeFileChannel; import org.h2.store.fs.FileBase; -import org.h2.store.fs.FileChannelInputStream; import org.h2.store.fs.FilePath; -import org.h2.store.fs.FilePathDisk; import org.h2.store.fs.FileUtils; +import org.h2.store.fs.disk.FilePathDisk; import org.h2.util.IOUtils; /** @@ -243,7 +243,7 @@ public FilePath toRealPath() { @Override public InputStream newInputStream() throws IOException { - return new FileChannelInputStream(open("r"), true); + return Channels.newInputStream(open("r")); } @Override diff --git a/h2/src/tools/org/h2/dev/fs/FileShell.java b/h2/src/tools/org/h2/dev/fs/FileShell.java index 51a71c6530..be7ce88ba1 100644 --- a/h2/src/tools/org/h2/dev/fs/FileShell.java +++ b/h2/src/tools/org/h2/dev/fs/FileShell.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.fs; @@ -23,7 +23,6 @@ import org.h2.command.dml.BackupCommand; import org.h2.engine.Constants; -import org.h2.engine.SysProperties; import org.h2.message.DbException; import org.h2.store.fs.FileUtils; import org.h2.util.IOUtils; @@ -42,8 +41,9 @@ public class FileShell extends Tool { private String currentWorkingDirectory; /** - * Options are case sensitive. Supported options are: + * Options are case sensitive. * + * * * * @@ -52,9 +52,9 @@ public class FileShell extends Tool { * *
          Supported options
          [-help] or [-?]Print the list of options
          [-verbose]Execute the given commands and exit
          * Multiple commands may be executed if separated by ; - * @h2.resource * * @param args the command line arguments + * @throws SQLException on failure */ public static void main(String... args) throws SQLException { new FileShell().runTool(args); @@ -121,7 +121,7 @@ public void runTool(String... args) throws SQLException { private void promptLoop() { println(""); - println("Welcome to H2 File Shell " + Constants.getFullVersion()); + println("Welcome to H2 File Shell " + Constants.FULL_VERSION); println("Exit with Ctrl+C"); showHelp(); if (reader == null) { @@ -343,7 +343,7 @@ private static void zip(String zipFileName, String base, for (String fileName : source) { String f = FileUtils.toRealPath(fileName); if (!f.startsWith(base)) { - DbException.throwInternalError(f + " does not start with " + base); + throw DbException.getInternalError(f + " does not start with " + base); } if (f.endsWith(zipFileName)) { continue; @@ -388,17 +388,13 @@ private void unzip(String zipFileName, String targetDir) { } String fileName = entry.getName(); // restoring windows backups on linux and vice versa - fileName = fileName.replace('\\', - SysProperties.FILE_SEPARATOR.charAt(0)); - fileName = fileName.replace('/', - SysProperties.FILE_SEPARATOR.charAt(0)); - if (fileName.startsWith(SysProperties.FILE_SEPARATOR)) { + fileName = IOUtils.nameSeparatorsToNative(fileName); + if (fileName.startsWith(File.separator)) { fileName = fileName.substring(1); } OutputStream o = null; try { - o = FileUtils.newOutputStream(targetDir - + SysProperties.FILE_SEPARATOR + fileName, false); + o = FileUtils.newOutputStream(targetDir + File.separatorChar + fileName, false); IOUtils.copy(zipIn, o); o.close(); } finally { @@ -451,7 +447,7 @@ private String getFile(String f) { } String unwrapped = FileUtils.unwrap(f); String prefix = f.substring(0, f.length() - unwrapped.length()); - f = prefix + currentWorkingDirectory + SysProperties.FILE_SEPARATOR + unwrapped; + f = prefix + currentWorkingDirectory + File.separatorChar + unwrapped; return FileUtils.toRealPath(f); } diff --git a/h2/src/tools/org/h2/dev/fs/package.html b/h2/src/tools/org/h2/dev/fs/package.html index 0de527b466..e541d95b76 100644 --- a/h2/src/tools/org/h2/dev/fs/package.html +++ b/h2/src/tools/org/h2/dev/fs/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/ftp/FtpClient.java b/h2/src/tools/org/h2/dev/ftp/FtpClient.java index 5cc12d2082..faf1f36239 100644 --- a/h2/src/tools/org/h2/dev/ftp/FtpClient.java +++ b/h2/src/tools/org/h2/dev/ftp/FtpClient.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.ftp; @@ -378,8 +378,7 @@ public String nameList(String dir) throws IOException { ByteArrayOutputStream out = new ByteArrayOutputStream(); IOUtils.copyAndClose(inData, out); readCode(226); - byte[] data = out.toByteArray(); - return new String(data); + return out.toString(); } /** @@ -395,8 +394,7 @@ public String list(String dir) throws IOException { ByteArrayOutputStream out = new ByteArrayOutputStream(); IOUtils.copyAndClose(inData, out); readCode(226); - byte[] data = out.toByteArray(); - return new String(data); + return out.toString(); } /** diff --git a/h2/src/tools/org/h2/dev/ftp/package.html b/h2/src/tools/org/h2/dev/ftp/package.html index 4b9332cb8d..fcfd171c67 100644 --- a/h2/src/tools/org/h2/dev/ftp/package.html +++ b/h2/src/tools/org/h2/dev/ftp/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/ftp/server/FtpControl.java b/h2/src/tools/org/h2/dev/ftp/server/FtpControl.java index 5b32887b01..7e0a42e22e 100644 --- a/h2/src/tools/org/h2/dev/ftp/server/FtpControl.java +++ b/h2/src/tools/org/h2/dev/ftp/server/FtpControl.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.ftp.server; diff --git a/h2/src/tools/org/h2/dev/ftp/server/FtpData.java b/h2/src/tools/org/h2/dev/ftp/server/FtpData.java index 92ca751ff9..6faf76518b 100644 --- a/h2/src/tools/org/h2/dev/ftp/server/FtpData.java +++ b/h2/src/tools/org/h2/dev/ftp/server/FtpData.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.ftp.server; diff --git a/h2/src/tools/org/h2/dev/ftp/server/FtpEvent.java b/h2/src/tools/org/h2/dev/ftp/server/FtpEvent.java index df70f364b3..55f91f8242 100644 --- a/h2/src/tools/org/h2/dev/ftp/server/FtpEvent.java +++ b/h2/src/tools/org/h2/dev/ftp/server/FtpEvent.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.ftp.server; diff --git a/h2/src/tools/org/h2/dev/ftp/server/FtpEventListener.java b/h2/src/tools/org/h2/dev/ftp/server/FtpEventListener.java index 2f6d7ef7c1..e01a19aa9d 100644 --- a/h2/src/tools/org/h2/dev/ftp/server/FtpEventListener.java +++ b/h2/src/tools/org/h2/dev/ftp/server/FtpEventListener.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.ftp.server; diff --git a/h2/src/tools/org/h2/dev/ftp/server/FtpServer.java b/h2/src/tools/org/h2/dev/ftp/server/FtpServer.java index 571ef7c307..176e5f1f60 100644 --- a/h2/src/tools/org/h2/dev/ftp/server/FtpServer.java +++ b/h2/src/tools/org/h2/dev/ftp/server/FtpServer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.ftp.server; @@ -28,7 +28,7 @@ /** * Small FTP Server. Intended for ad-hoc networks in a secure environment. * Remote connections are possible. - * See also http://cr.yp.to/ftp.html http://www.ftpguide.com/ + * See also https://cr.yp.to/ftp.html http://www.ftpguide.com/ */ public class FtpServer extends Tool implements Service { @@ -91,9 +91,10 @@ public class FtpServer extends Tool implements Service { /** * When running without options, -tcp, -web, -browser, - * and -pg are started.
          - * Options are case sensitive. Supported options are: + * and -pg are started. + * Options are case sensitive. * + * * * * @@ -145,7 +146,6 @@ public class FtpServer extends Tool implements Service { * * *
          Supported options
          [-help] or [-?]Print the list of options
          [-web]
          [-trace]Print additional trace information; for all servers
          - * @h2.resource * * @param args the command line arguments */ diff --git a/h2/src/tools/org/h2/dev/ftp/server/package.html b/h2/src/tools/org/h2/dev/ftp/server/package.html index b1fe7a324e..29801cdf07 100644 --- a/h2/src/tools/org/h2/dev/ftp/server/package.html +++ b/h2/src/tools/org/h2/dev/ftp/server/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/hash/IntPerfectHash.java b/h2/src/tools/org/h2/dev/hash/IntPerfectHash.java index 997c2e2093..58db01ff78 100644 --- a/h2/src/tools/org/h2/dev/hash/IntPerfectHash.java +++ b/h2/src/tools/org/h2/dev/hash/IntPerfectHash.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.hash; diff --git a/h2/src/tools/org/h2/dev/hash/MinimalPerfectHash.java b/h2/src/tools/org/h2/dev/hash/MinimalPerfectHash.java index 566665a3df..3019f11b93 100644 --- a/h2/src/tools/org/h2/dev/hash/MinimalPerfectHash.java +++ b/h2/src/tools/org/h2/dev/hash/MinimalPerfectHash.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.hash; diff --git a/h2/src/tools/org/h2/dev/hash/PerfectHash.java b/h2/src/tools/org/h2/dev/hash/PerfectHash.java index ebae905201..185c942ec1 100644 --- a/h2/src/tools/org/h2/dev/hash/PerfectHash.java +++ b/h2/src/tools/org/h2/dev/hash/PerfectHash.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.hash; diff --git a/h2/src/tools/org/h2/dev/hash/package.html b/h2/src/tools/org/h2/dev/hash/package.html index 2d7ab69e14..f8d85f7907 100644 --- a/h2/src/tools/org/h2/dev/hash/package.html +++ b/h2/src/tools/org/h2/dev/hash/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/mail/SendMail.java.txt b/h2/src/tools/org/h2/dev/mail/SendMail.java.txt index 60a8370412..26018958b7 100644 --- a/h2/src/tools/org/h2/dev/mail/SendMail.java.txt +++ b/h2/src/tools/org/h2/dev/mail/SendMail.java.txt @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.mail; diff --git a/h2/src/tools/org/h2/dev/net/PgTcpRedirect.java b/h2/src/tools/org/h2/dev/net/PgTcpRedirect.java index 154386ca56..71ce3f98f1 100644 --- a/h2/src/tools/org/h2/dev/net/PgTcpRedirect.java +++ b/h2/src/tools/org/h2/dev/net/PgTcpRedirect.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.net; @@ -30,14 +30,14 @@ public class PgTcpRedirect { * @param args the command line parameters */ public static void main(String... args) throws Exception { - new PgTcpRedirect().loop(args); + loop(args); } - private void loop(String... args) throws Exception { + private static void loop(String... args) throws Exception { // MySQL protocol: // http://www.redferni.uklinux.net/mysql/MySQL-Protocol.html // PostgreSQL protocol: - // http://developer.postgresql.org/pgdocs/postgres/protocol.html + // https://www.postgresql.org/docs/devel/protocol.html // int portServer = 9083, portClient = 9084; // int portServer = 3306, portClient = 3307; // H2 PgServer @@ -66,7 +66,7 @@ private void loop(String... args) throws Exception { /** * This is the working thread of the TCP redirector. */ - private class TcpRedirectThread implements Runnable { + private static class TcpRedirectThread implements Runnable { private static final int STATE_INIT_CLIENT = 0, STATE_REGULAR = 1; private final Socket read, write; @@ -92,7 +92,7 @@ String readStringNull(InputStream in) throws IOException { return buff.toString(); } - private void println(String s) { + private static void println(String s) { if (DEBUG) { System.out.println(s); } @@ -385,7 +385,7 @@ private boolean processServer(InputStream inStream, break; } String msg = readStringNull(dataIn); - // http://developer.postgresql.org/pgdocs/postgres/protocol-error-fields.html + // https://www.postgresql.org/docs/devel/protocol-error-fields.html // S Severity // C Code: the SQLSTATE code // M Message @@ -420,7 +420,7 @@ private boolean processServer(InputStream inStream, break; } String msg = readStringNull(dataIn); - // http://developer.postgresql.org/pgdocs/postgres/protocol-error-fields.html + // https://www.postgresql.org/docs/devel/protocol-error-fields.html // S Severity // C Code: the SQLSTATE code // M Message diff --git a/h2/src/tools/org/h2/dev/net/package.html b/h2/src/tools/org/h2/dev/net/package.html index f8844fbaa3..4900db526b 100644 --- a/h2/src/tools/org/h2/dev/net/package.html +++ b/h2/src/tools/org/h2/dev/net/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/security/SecureKeyStoreBuilder.java b/h2/src/tools/org/h2/dev/security/SecureKeyStoreBuilder.java index 59b3c5aaa3..7deed4834c 100644 --- a/h2/src/tools/org/h2/dev/security/SecureKeyStoreBuilder.java +++ b/h2/src/tools/org/h2/dev/security/SecureKeyStoreBuilder.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.security; diff --git a/h2/src/tools/org/h2/dev/security/package.html b/h2/src/tools/org/h2/dev/security/package.html index 58ef8a4c9a..cb45245dd9 100644 --- a/h2/src/tools/org/h2/dev/security/package.html +++ b/h2/src/tools/org/h2/dev/security/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/sort/InPlaceStableMergeSort.java b/h2/src/tools/org/h2/dev/sort/InPlaceStableMergeSort.java index 9232b8c856..a442391953 100644 --- a/h2/src/tools/org/h2/dev/sort/InPlaceStableMergeSort.java +++ b/h2/src/tools/org/h2/dev/sort/InPlaceStableMergeSort.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.sort; diff --git a/h2/src/tools/org/h2/dev/sort/InPlaceStableQuicksort.java b/h2/src/tools/org/h2/dev/sort/InPlaceStableQuicksort.java index fa445565ed..dd0632e6ff 100644 --- a/h2/src/tools/org/h2/dev/sort/InPlaceStableQuicksort.java +++ b/h2/src/tools/org/h2/dev/sort/InPlaceStableQuicksort.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.sort; diff --git a/h2/src/tools/org/h2/dev/sort/package.html b/h2/src/tools/org/h2/dev/sort/package.html index 31fceb3282..3632158b6a 100644 --- a/h2/src/tools/org/h2/dev/sort/package.html +++ b/h2/src/tools/org/h2/dev/sort/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/util/AnsCompression.java b/h2/src/tools/org/h2/dev/util/AnsCompression.java index 9f0cdc9848..c27c8e37f4 100644 --- a/h2/src/tools/org/h2/dev/util/AnsCompression.java +++ b/h2/src/tools/org/h2/dev/util/AnsCompression.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; diff --git a/h2/src/tools/org/h2/dev/util/ArrayUtils.java b/h2/src/tools/org/h2/dev/util/ArrayUtils.java index 984f8fa3dd..657d7eafdb 100644 --- a/h2/src/tools/org/h2/dev/util/ArrayUtils.java +++ b/h2/src/tools/org/h2/dev/util/ArrayUtils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; diff --git a/h2/src/tools/org/h2/dev/util/Base64.java b/h2/src/tools/org/h2/dev/util/Base64.java index 3092d10a71..3606adfb02 100644 --- a/h2/src/tools/org/h2/dev/util/Base64.java +++ b/h2/src/tools/org/h2/dev/util/Base64.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; diff --git a/h2/src/tools/org/h2/dev/util/BinaryArithmeticStream.java b/h2/src/tools/org/h2/dev/util/BinaryArithmeticStream.java index fe43eb1669..e0cacb29b8 100644 --- a/h2/src/tools/org/h2/dev/util/BinaryArithmeticStream.java +++ b/h2/src/tools/org/h2/dev/util/BinaryArithmeticStream.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; diff --git a/h2/src/tools/org/h2/dev/util/BitStream.java b/h2/src/tools/org/h2/dev/util/BitStream.java index 9b9ef4429f..7968a4a4f2 100644 --- a/h2/src/tools/org/h2/dev/util/BitStream.java +++ b/h2/src/tools/org/h2/dev/util/BitStream.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; diff --git a/h2/src/tools/org/h2/dev/util/ConcurrentLinkedList.java b/h2/src/tools/org/h2/dev/util/ConcurrentLinkedList.java index cccd291490..bf82210857 100644 --- a/h2/src/tools/org/h2/dev/util/ConcurrentLinkedList.java +++ b/h2/src/tools/org/h2/dev/util/ConcurrentLinkedList.java @@ -1,15 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; import java.util.Iterator; -import org.h2.mvstore.DataUtils; - - /** * A very simple linked list that supports concurrent access. * Internally, it uses immutable objects. @@ -112,11 +109,6 @@ public K next() { return x; } - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException("remove"); - } - }; } diff --git a/h2/src/tools/org/h2/dev/util/ConcurrentLinkedListWithTail.java b/h2/src/tools/org/h2/dev/util/ConcurrentLinkedListWithTail.java index c368f33f56..72a2ebd786 100644 --- a/h2/src/tools/org/h2/dev/util/ConcurrentLinkedListWithTail.java +++ b/h2/src/tools/org/h2/dev/util/ConcurrentLinkedListWithTail.java @@ -1,14 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; import java.util.Iterator; -import org.h2.mvstore.DataUtils; - /** * A very simple linked list that supports concurrent access. * @@ -132,11 +130,6 @@ public K next() { return x; } - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException("remove"); - } - }; } diff --git a/h2/src/tools/org/h2/dev/util/ConcurrentRing.java b/h2/src/tools/org/h2/dev/util/ConcurrentRing.java index ff036ae1d5..73a06edd5e 100644 --- a/h2/src/tools/org/h2/dev/util/ConcurrentRing.java +++ b/h2/src/tools/org/h2/dev/util/ConcurrentRing.java @@ -1,14 +1,12 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; import java.util.Iterator; -import org.h2.mvstore.DataUtils; - /** * A ring buffer that supports concurrent access. * @@ -144,11 +142,6 @@ public K next() { return buffer[getIndex(readPos + offset++)]; } - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException("remove"); - } - }; } diff --git a/h2/src/tools/org/h2/dev/util/FileContentHash.java b/h2/src/tools/org/h2/dev/util/FileContentHash.java index 2e1d02c491..f815c37f6e 100644 --- a/h2/src/tools/org/h2/dev/util/FileContentHash.java +++ b/h2/src/tools/org/h2/dev/util/FileContentHash.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; diff --git a/h2/src/tools/org/h2/dev/util/FileViewer.java b/h2/src/tools/org/h2/dev/util/FileViewer.java index 6453a229f2..d92cd51f36 100644 --- a/h2/src/tools/org/h2/dev/util/FileViewer.java +++ b/h2/src/tools/org/h2/dev/util/FileViewer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; diff --git a/h2/src/tools/org/h2/dev/util/ImmutableArray.java b/h2/src/tools/org/h2/dev/util/ImmutableArray.java index a4de8b324d..2cdcfb239b 100644 --- a/h2/src/tools/org/h2/dev/util/ImmutableArray.java +++ b/h2/src/tools/org/h2/dev/util/ImmutableArray.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; @@ -163,11 +163,6 @@ public K next() { return a.get(index++); } - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException("remove"); - } - }; } diff --git a/h2/src/tools/org/h2/dev/util/ImmutableArray2.java b/h2/src/tools/org/h2/dev/util/ImmutableArray2.java index cb137b975a..3e4130fbfe 100644 --- a/h2/src/tools/org/h2/dev/util/ImmutableArray2.java +++ b/h2/src/tools/org/h2/dev/util/ImmutableArray2.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; @@ -203,11 +203,6 @@ public K next() { return a.get(index++); } - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException("remove"); - } - }; } diff --git a/h2/src/tools/org/h2/dev/util/ImmutableArray3.java b/h2/src/tools/org/h2/dev/util/ImmutableArray3.java index 42f6f04d4a..93cde7be31 100644 --- a/h2/src/tools/org/h2/dev/util/ImmutableArray3.java +++ b/h2/src/tools/org/h2/dev/util/ImmutableArray3.java @@ -1,12 +1,11 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; import java.util.Iterator; -import org.h2.mvstore.DataUtils; /** * An immutable array. @@ -152,11 +151,6 @@ public K next() { return a.get(index++); } - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException("remove"); - } - }; } diff --git a/h2/src/tools/org/h2/dev/util/JavaProcessKiller.java b/h2/src/tools/org/h2/dev/util/JavaProcessKiller.java index 3f6b6ac056..4a45487e7f 100644 --- a/h2/src/tools/org/h2/dev/util/JavaProcessKiller.java +++ b/h2/src/tools/org/h2/dev/util/JavaProcessKiller.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; diff --git a/h2/src/tools/org/h2/dev/util/Migrate.java b/h2/src/tools/org/h2/dev/util/Migrate.java index b0c657f98b..b9e647a88d 100644 --- a/h2/src/tools/org/h2/dev/util/Migrate.java +++ b/h2/src/tools/org/h2/dev/util/Migrate.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; @@ -37,7 +37,7 @@ public class Migrate { private static final String PASSWORD = "sa"; private static final File OLD_H2_FILE = new File("./h2-1.2.127.jar"); private static final String DOWNLOAD_URL = - "http://repo2.maven.org/maven2/com/h2database/h2/1.2.127/h2-1.2.127.jar"; + "https://repo1.maven.org/maven2/com/h2database/h2/1.2.127/h2-1.2.127.jar"; private static final String CHECKSUM = "056e784c7cf009483366ab9cd8d21d02fe47031a"; private static final String TEMP_SCRIPT = "backup.sql"; diff --git a/h2/src/tools/org/h2/dev/util/ReaderInputStream.java b/h2/src/tools/org/h2/dev/util/ReaderInputStream.java index 64ef569ac7..1bb9c6a74c 100644 --- a/h2/src/tools/org/h2/dev/util/ReaderInputStream.java +++ b/h2/src/tools/org/h2/dev/util/ReaderInputStream.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; diff --git a/h2/src/tools/org/h2/dev/util/RemovePasswords.java b/h2/src/tools/org/h2/dev/util/RemovePasswords.java index a58800caaf..9b915923f9 100644 --- a/h2/src/tools/org/h2/dev/util/RemovePasswords.java +++ b/h2/src/tools/org/h2/dev/util/RemovePasswords.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; diff --git a/h2/src/tools/org/h2/dev/util/ThreadDumpCleaner.java b/h2/src/tools/org/h2/dev/util/ThreadDumpCleaner.java index 591c432fd7..0405a9057e 100644 --- a/h2/src/tools/org/h2/dev/util/ThreadDumpCleaner.java +++ b/h2/src/tools/org/h2/dev/util/ThreadDumpCleaner.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; diff --git a/h2/src/tools/org/h2/dev/util/ThreadDumpFilter.java b/h2/src/tools/org/h2/dev/util/ThreadDumpFilter.java index ca7d0edfdc..acac8b9372 100644 --- a/h2/src/tools/org/h2/dev/util/ThreadDumpFilter.java +++ b/h2/src/tools/org/h2/dev/util/ThreadDumpFilter.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; diff --git a/h2/src/tools/org/h2/dev/util/ThreadDumpInliner.java b/h2/src/tools/org/h2/dev/util/ThreadDumpInliner.java index 21e2c44df1..0ab1755a11 100644 --- a/h2/src/tools/org/h2/dev/util/ThreadDumpInliner.java +++ b/h2/src/tools/org/h2/dev/util/ThreadDumpInliner.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; diff --git a/h2/src/tools/org/h2/dev/util/package.html b/h2/src/tools/org/h2/dev/util/package.html index 2cd16b6ef7..39f23a4632 100644 --- a/h2/src/tools/org/h2/dev/util/package.html +++ b/h2/src/tools/org/h2/dev/util/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/java/ClassObj.java b/h2/src/tools/org/h2/java/ClassObj.java index 3584fb71bd..88a84beb2d 100644 --- a/h2/src/tools/org/h2/java/ClassObj.java +++ b/h2/src/tools/org/h2/java/ClassObj.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java; diff --git a/h2/src/tools/org/h2/java/Expr.java b/h2/src/tools/org/h2/java/Expr.java index 504f9c91e3..ed72d184bd 100644 --- a/h2/src/tools/org/h2/java/Expr.java +++ b/h2/src/tools/org/h2/java/Expr.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java; @@ -80,7 +80,7 @@ public String asString() { StringBuilder buff = new StringBuilder(); initMethod(); if (method.isIgnore) { - if (args.size() == 0) { + if (args.isEmpty()) { // ignore } else if (args.size() == 1) { buff.append(args.get(0)); @@ -394,7 +394,7 @@ class NewExpr extends ExprBase { public String asString() { boolean refCount = type.refCount; StringBuilder buff = new StringBuilder(); - if (arrayInitExpr.size() > 0) { + if (!arrayInitExpr.isEmpty()) { if (refCount) { if (classObj.isPrimitive) { buff.append("ptr< array< " + classObj + " > >"); diff --git a/h2/src/tools/org/h2/java/Ignore.java b/h2/src/tools/org/h2/java/Ignore.java index 2ba9eec488..1ed8d3708f 100644 --- a/h2/src/tools/org/h2/java/Ignore.java +++ b/h2/src/tools/org/h2/java/Ignore.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java; diff --git a/h2/src/tools/org/h2/java/JavaParser.java b/h2/src/tools/org/h2/java/JavaParser.java index b560963fa9..9eadb1ddae 100644 --- a/h2/src/tools/org/h2/java/JavaParser.java +++ b/h2/src/tools/org/h2/java/JavaParser.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java; diff --git a/h2/src/tools/org/h2/java/Local.java b/h2/src/tools/org/h2/java/Local.java index ba949934a5..2df19d9527 100644 --- a/h2/src/tools/org/h2/java/Local.java +++ b/h2/src/tools/org/h2/java/Local.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java; diff --git a/h2/src/tools/org/h2/java/Statement.java b/h2/src/tools/org/h2/java/Statement.java index dde7bfd58b..13a5b2e8bf 100644 --- a/h2/src/tools/org/h2/java/Statement.java +++ b/h2/src/tools/org/h2/java/Statement.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java; diff --git a/h2/src/tools/org/h2/java/Test.java b/h2/src/tools/org/h2/java/Test.java index 406bacdcae..9ce40aece4 100644 --- a/h2/src/tools/org/h2/java/Test.java +++ b/h2/src/tools/org/h2/java/Test.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java; diff --git a/h2/src/tools/org/h2/java/TestApp.java b/h2/src/tools/org/h2/java/TestApp.java index 0dfe136dca..cd848c6869 100644 --- a/h2/src/tools/org/h2/java/TestApp.java +++ b/h2/src/tools/org/h2/java/TestApp.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java; diff --git a/h2/src/tools/org/h2/java/io/PrintStream.java b/h2/src/tools/org/h2/java/io/PrintStream.java index 0311496971..4eed18ddb9 100644 --- a/h2/src/tools/org/h2/java/io/PrintStream.java +++ b/h2/src/tools/org/h2/java/io/PrintStream.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java.io; diff --git a/h2/src/tools/org/h2/java/io/package.html b/h2/src/tools/org/h2/java/io/package.html index f131a92b95..fb9167e95f 100644 --- a/h2/src/tools/org/h2/java/io/package.html +++ b/h2/src/tools/org/h2/java/io/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/java/lang/Integer.java b/h2/src/tools/org/h2/java/lang/Integer.java index 2c63679439..94e98755e9 100644 --- a/h2/src/tools/org/h2/java/lang/Integer.java +++ b/h2/src/tools/org/h2/java/lang/Integer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java.lang; diff --git a/h2/src/tools/org/h2/java/lang/Long.java b/h2/src/tools/org/h2/java/lang/Long.java index b621a47995..fa99c22cd4 100644 --- a/h2/src/tools/org/h2/java/lang/Long.java +++ b/h2/src/tools/org/h2/java/lang/Long.java @@ -1,7 +1,7 @@ /* /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java.lang; diff --git a/h2/src/tools/org/h2/java/lang/Math.java b/h2/src/tools/org/h2/java/lang/Math.java index 85c4eab721..f32cc63669 100644 --- a/h2/src/tools/org/h2/java/lang/Math.java +++ b/h2/src/tools/org/h2/java/lang/Math.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java.lang; diff --git a/h2/src/tools/org/h2/java/lang/Object.java b/h2/src/tools/org/h2/java/lang/Object.java index 9c698c987d..2f7fb39921 100644 --- a/h2/src/tools/org/h2/java/lang/Object.java +++ b/h2/src/tools/org/h2/java/lang/Object.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java.lang; diff --git a/h2/src/tools/org/h2/java/lang/String.java b/h2/src/tools/org/h2/java/lang/String.java index 295d10b46d..7f316c6041 100644 --- a/h2/src/tools/org/h2/java/lang/String.java +++ b/h2/src/tools/org/h2/java/lang/String.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java.lang; diff --git a/h2/src/tools/org/h2/java/lang/StringBuilder.java b/h2/src/tools/org/h2/java/lang/StringBuilder.java index d068a66efe..3d7eb79f11 100644 --- a/h2/src/tools/org/h2/java/lang/StringBuilder.java +++ b/h2/src/tools/org/h2/java/lang/StringBuilder.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java.lang; diff --git a/h2/src/tools/org/h2/java/lang/System.java b/h2/src/tools/org/h2/java/lang/System.java index 04ea68221b..ba75438608 100644 --- a/h2/src/tools/org/h2/java/lang/System.java +++ b/h2/src/tools/org/h2/java/lang/System.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java.lang; diff --git a/h2/src/tools/org/h2/java/lang/package.html b/h2/src/tools/org/h2/java/lang/package.html index f131a92b95..fb9167e95f 100644 --- a/h2/src/tools/org/h2/java/lang/package.html +++ b/h2/src/tools/org/h2/java/lang/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/java/package.html b/h2/src/tools/org/h2/java/package.html index b4b2941d5a..0beb44f98c 100644 --- a/h2/src/tools/org/h2/java/package.html +++ b/h2/src/tools/org/h2/java/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/java/util/Arrays.java b/h2/src/tools/org/h2/java/util/Arrays.java index 09b59ccc5b..463625c980 100644 --- a/h2/src/tools/org/h2/java/util/Arrays.java +++ b/h2/src/tools/org/h2/java/util/Arrays.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java.util; diff --git a/h2/src/tools/org/h2/java/util/package.html b/h2/src/tools/org/h2/java/util/package.html index f131a92b95..fb9167e95f 100644 --- a/h2/src/tools/org/h2/java/util/package.html +++ b/h2/src/tools/org/h2/java/util/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/jcr/Railroads.java b/h2/src/tools/org/h2/jcr/Railroads.java index 3fc88a5949..21d167bddf 100644 --- a/h2/src/tools/org/h2/jcr/Railroads.java +++ b/h2/src/tools/org/h2/jcr/Railroads.java @@ -1,15 +1,15 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jcr; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.InputStreamReader; import java.io.Reader; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.util.ArrayList; @@ -22,7 +22,6 @@ import org.h2.build.doc.RailroadImages; import org.h2.server.web.PageParser; import org.h2.tools.Csv; -import org.h2.util.IOUtils; import org.h2.util.StringUtils; /** @@ -38,6 +37,7 @@ public class Railroads { * line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { new Railroads().process(); @@ -56,21 +56,18 @@ private void process() throws Exception { private void processHtml(String fileName) throws Exception { String source = "src/tools/org/h2/jcr/"; String target = "docs/html/"; - byte[] s = BuildBase.readFile(new File(source + "stylesheet.css")); - BuildBase.writeFile(new File(target + "stylesheet.css"), s); - String inFile = source + fileName; - String outFile = target + fileName; - new File(outFile).getParentFile().mkdirs(); - FileOutputStream out = new FileOutputStream(outFile); - FileInputStream in = new FileInputStream(inFile); - byte[] bytes = IOUtils.readBytesAndClose(in, 0); + byte[] s = BuildBase.readFile(Paths.get(source + "stylesheet.css")); + BuildBase.writeFile(Paths.get(target + "stylesheet.css"), s); + Path inFile = Paths.get(source + fileName); + Path outFile = Paths.get(target + fileName); + Files.createDirectories(outFile.getParent()); + byte[] bytes = Files.readAllBytes(inFile) ; if (fileName.endsWith(".html")) { String page = new String(bytes); page = PageParser.parse(page, session); bytes = page.getBytes(); } - out.write(bytes); - out.close(); + Files.write(outFile, bytes); } private static Reader getReader() { diff --git a/h2/src/tools/org/h2/jcr/help.csv b/h2/src/tools/org/h2/jcr/help.csv index e90e725d27..2040b35e94 100644 --- a/h2/src/tools/org/h2/jcr/help.csv +++ b/h2/src/tools/org/h2/jcr/help.csv @@ -1,5 +1,5 @@ -# Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, -# and the EPL 1.0 (http://h2database.com/html/license.html). +# Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +# and the EPL 1.0 (https://h2database.com/html/license.html). # Initial Developer: H2 Group) "SECTION","TOPIC","SYNTAX","TEXT" diff --git a/h2/src/tools/org/h2/jcr/jcr-sql2.html b/h2/src/tools/org/h2/jcr/jcr-sql2.html index 43f6d21e18..4cf12dcc2d 100644 --- a/h2/src/tools/org/h2/jcr/jcr-sql2.html +++ b/h2/src/tools/org/h2/jcr/jcr-sql2.html @@ -1,7 +1,7 @@ @@ -45,9 +45,9 @@

          JCR 2.0 SQL-2 Grammar

          The diagrams are created with a small Java program and this BNF. The program uses the BNF parser / converter -of the H2 database engine. +of the H2 database engine.

          -Please send feedback to the Jackrabbit User List. +Please send feedback to the Jackrabbit User List.

          diff --git a/h2/src/tools/org/h2/jcr/package.html b/h2/src/tools/org/h2/jcr/package.html index 8de0f9e1df..225645d0ff 100644 --- a/h2/src/tools/org/h2/jcr/package.html +++ b/h2/src/tools/org/h2/jcr/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/jcr/stylesheet.css b/h2/src/tools/org/h2/jcr/stylesheet.css index c7e1e763db..47ea40c2a4 100644 --- a/h2/src/tools/org/h2/jcr/stylesheet.css +++ b/h2/src/tools/org/h2/jcr/stylesheet.css @@ -1,7 +1,7 @@ /* - * Copyright 2004-2019 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * * Initial Developer: H2 Group + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group */ td, input, select, textarea, body, code, pre, td, th {